diff --git a/.gitattributes b/.gitattributes index 11947ece23fdbfb374bb71a39490e78dbad3b934..01a34811c7ac69862847e5c6d56e620a59b3f212 100644 --- a/.gitattributes +++ b/.gitattributes @@ -3152,3 +3152,4 @@ platform/dbops/binaries/go/go/pkg/tool/linux_amd64/pprof filter=lfs diff=lfs mer platform/dbops/binaries/go/go/pkg/tool/linux_amd64/test2json filter=lfs diff=lfs merge=lfs -text platform/dbops/binaries/go/go/pkg/tool/linux_amd64/trace filter=lfs diff=lfs merge=lfs -text platform/dbops/binaries/go/go/pkg/tool/linux_amd64/vet filter=lfs diff=lfs merge=lfs -text +platform/dbops/binaries/go/go/src/cmd/compile/default.pgo filter=lfs diff=lfs merge=lfs -text diff --git a/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-bad-mtime-file.tar b/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-bad-mtime-file.tar new file mode 100644 index 0000000000000000000000000000000000000000..a6a9b026c4d481511eadfb0248798b1bed68fc21 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-bad-mtime-file.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02e95784ed0c0a6c028eb2959c2c6393094d9687cfb44100fd7305955dbe6b0a +size 2560 diff --git a/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-global-records.tar b/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-global-records.tar new file mode 100644 index 0000000000000000000000000000000000000000..9dbf30f4e20ad1fd43153650f43241770f9916a1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-global-records.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4e6b8700915613e10edbfe16f31c8d3edfd80603fa4f12fd6eeee5881cbd881 +size 7168 diff --git a/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-multi-hdrs.tar b/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-multi-hdrs.tar new file mode 100644 index 0000000000000000000000000000000000000000..6138657592541bf5bf7b319838fcc21cb643c458 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-multi-hdrs.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7445b1987611850b4810e9b56a99188923f9dc467b7b240e0431f37f58cc6df3 +size 4608 diff --git a/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-nil-sparse-data.tar b/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-nil-sparse-data.tar new file mode 100644 index 0000000000000000000000000000000000000000..00633b73edc5989c4bda2553cc8cf0f6ae45f794 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-nil-sparse-data.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31585b656cf569c03f800d79e43b311c54c8ec88018a44e9e2ec05009abc8f5f +size 4096 diff --git a/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-nil-sparse-hole.tar b/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-nil-sparse-hole.tar new file mode 100644 index 0000000000000000000000000000000000000000..9b2c5eab9e4672b028c89f4228d6d372db2bac4c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-nil-sparse-hole.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68135f04e9b4fc6bc6a46508779912050028428f19f2eced56706a0bd297c13a +size 3072 diff --git a/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-nul-path.tar b/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-nul-path.tar new file mode 100644 index 0000000000000000000000000000000000000000..1d197a952c5760709da0316193f9f1cd088c2a6a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-nul-path.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a47d0adca51594bf284a2a1c50286f877a3718766dae1b262c4568ecd0bfa5e7 +size 2560 diff --git a/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-nul-xattrs.tar b/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-nul-xattrs.tar new file mode 100644 index 0000000000000000000000000000000000000000..210fc1c85395ea555dbcb91666a61a72b2df0648 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-nul-xattrs.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2a5221cdcd6bd73be8301f02c266fb13a2cba400d6836cce86ba9b6e044937a +size 2560 diff --git a/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-path-hdr.tar b/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-path-hdr.tar new file mode 100644 index 0000000000000000000000000000000000000000..c547f24be7f212dd330117ee230b8dc1fae2841b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-path-hdr.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f434250f98a8aad5e77b8695d751b11bca1625d4d0264638113ad27ebeb11c7 +size 1024 diff --git a/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-pos-size-file.tar b/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-pos-size-file.tar new file mode 100644 index 0000000000000000000000000000000000000000..31537e1593325914470740ef0ba37d77fd877ca3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-pos-size-file.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7eafa3b2f293d90eca480a6630941775dbfe332f54c344162ff1913e3746a96f +size 2560 diff --git a/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-records.tar b/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-records.tar new file mode 100644 index 0000000000000000000000000000000000000000..b41a5f2b85c46f0ecf28dd35dc625b244d488dba --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-records.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:261f3807fa134322126c117bb3ef987d019d00bc25e882fb3ebdd81cf57c812e +size 2560 diff --git a/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-sparse-big.tar b/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-sparse-big.tar new file mode 100644 index 0000000000000000000000000000000000000000..0dd0962426312078ad54bb6365805b9257f12232 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-sparse-big.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4afb4ca10dbf46d071752d82b2d148a82953f8b8776b7bc57f9f1f0e3c50a64 +size 6144 diff --git a/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax.tar b/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax.tar new file mode 100644 index 0000000000000000000000000000000000000000..254c205642b6683006aebb00d0039305988baadc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/tar/testdata/pax.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e313f478c14978e346fb2454f256876de83b154acf75fa49f498d7684964e8e1 +size 10240 diff --git a/platform/dbops/binaries/go/go/src/archive/tar/testdata/small.txt b/platform/dbops/binaries/go/go/src/archive/tar/testdata/small.txt new file mode 100644 index 0000000000000000000000000000000000000000..b249bfc518a8c96f83747c9fde1ad3529fa3672d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/tar/testdata/small.txt @@ -0,0 +1 @@ +Kilts \ No newline at end of file diff --git a/platform/dbops/binaries/go/go/src/archive/tar/testdata/small2.txt b/platform/dbops/binaries/go/go/src/archive/tar/testdata/small2.txt new file mode 100644 index 0000000000000000000000000000000000000000..394ee3ecd0edf3e17799ced62c774d17c1e57d31 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/tar/testdata/small2.txt @@ -0,0 +1 @@ +Google.com diff --git a/platform/dbops/binaries/go/go/src/archive/tar/testdata/sparse-formats.tar b/platform/dbops/binaries/go/go/src/archive/tar/testdata/sparse-formats.tar new file mode 100644 index 0000000000000000000000000000000000000000..35672b3441ba1e585dbf8fff32f44ee145097b1b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/tar/testdata/sparse-formats.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3dfc8596ba248ba48e18fc2c635753d033bfe8987d34121cea69a4a3a8e2ef40 +size 17920 diff --git a/platform/dbops/binaries/go/go/src/archive/tar/testdata/star.tar b/platform/dbops/binaries/go/go/src/archive/tar/testdata/star.tar new file mode 100644 index 0000000000000000000000000000000000000000..3aed38dcc9d082b2aaec1f243373e9836f1ef50d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/tar/testdata/star.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a178aeda2cd08b0f738b53120ebd9c27767546736e03ca47516c80ceec1d299c +size 3072 diff --git a/platform/dbops/binaries/go/go/src/archive/tar/testdata/trailing-slash.tar b/platform/dbops/binaries/go/go/src/archive/tar/testdata/trailing-slash.tar new file mode 100644 index 0000000000000000000000000000000000000000..dc80073bcec3e4cf9007397be3702b7e816f7870 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/tar/testdata/trailing-slash.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de161ee91e4f1bf9c98b8c5399ebf3d4f1f3d5be719bab8fe9ac8ac255715e02 +size 2560 diff --git a/platform/dbops/binaries/go/go/src/archive/tar/testdata/ustar-file-devs.tar b/platform/dbops/binaries/go/go/src/archive/tar/testdata/ustar-file-devs.tar new file mode 100644 index 0000000000000000000000000000000000000000..90b882819469dce6046ab3123fb0bf4aa6d671c5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/tar/testdata/ustar-file-devs.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a14b8f41c96d4d00f36642455f734396766397fa5c636115f4881c5b53959b88 +size 1536 diff --git a/platform/dbops/binaries/go/go/src/archive/tar/testdata/ustar-file-reg.tar b/platform/dbops/binaries/go/go/src/archive/tar/testdata/ustar-file-reg.tar new file mode 100644 index 0000000000000000000000000000000000000000..a616192f457fad22fe1db9d5c4e685dd8e08f240 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/tar/testdata/ustar-file-reg.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e81dbec6483c207ff01013f682d96040c7f936083f5ca7c97456d20965778a89 +size 1536 diff --git a/platform/dbops/binaries/go/go/src/archive/tar/testdata/ustar.tar b/platform/dbops/binaries/go/go/src/archive/tar/testdata/ustar.tar new file mode 100644 index 0000000000000000000000000000000000000000..6776f31e299d3b38c78f21a585f614f65a657aa1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/tar/testdata/ustar.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f58d4abcbc3a42dc21788e8aba382b3141ce34585100f7b2e2884601ce45273c +size 2048 diff --git a/platform/dbops/binaries/go/go/src/archive/tar/testdata/v7.tar b/platform/dbops/binaries/go/go/src/archive/tar/testdata/v7.tar new file mode 100644 index 0000000000000000000000000000000000000000..4de585be1ffbaa84ee6c7ab8bc0fa483cf707afe --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/tar/testdata/v7.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59b6b112db4107ed60bf30cd4a0a81b96f4f955808c06ce029194eff31e8d8ce +size 3584 diff --git a/platform/dbops/binaries/go/go/src/archive/tar/testdata/writer-big-long.tar b/platform/dbops/binaries/go/go/src/archive/tar/testdata/writer-big-long.tar new file mode 100644 index 0000000000000000000000000000000000000000..2f5c47a223422ba2c17de33820769244f0ba0e19 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/tar/testdata/writer-big-long.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36edba10087c389ffab0200dbf56013237af1ec8867a2442187c7147d899e7e4 +size 1536 diff --git a/platform/dbops/binaries/go/go/src/archive/tar/testdata/writer-big.tar b/platform/dbops/binaries/go/go/src/archive/tar/testdata/writer-big.tar new file mode 100644 index 0000000000000000000000000000000000000000..dc86f85254b4a7004cb62005492ef13dc2f99bca --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/tar/testdata/writer-big.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a0bee74a7bd0caa1d572ff1ae58c3c79310c8fc646a5e84fb5e1eea488ea4b5 +size 512 diff --git a/platform/dbops/binaries/go/go/src/archive/tar/testdata/writer.tar b/platform/dbops/binaries/go/go/src/archive/tar/testdata/writer.tar new file mode 100644 index 0000000000000000000000000000000000000000..191d66afdfc240c668acbb949ab59f9dfe368eb0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/tar/testdata/writer.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c126eb856810b76ae112d2dfeddf2c9aa89531dfc6f5e7c878301495a08d1b6d +size 3584 diff --git a/platform/dbops/binaries/go/go/src/archive/tar/testdata/xattrs.tar b/platform/dbops/binaries/go/go/src/archive/tar/testdata/xattrs.tar new file mode 100644 index 0000000000000000000000000000000000000000..e19588aaa84f028c9d02f8b844d3fb5c75fe175c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/tar/testdata/xattrs.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:577d18c199858f40ddb297b18de9b31041e253c04019f00b06067c1015925605 +size 5120 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/comment-truncated.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/comment-truncated.zip new file mode 100644 index 0000000000000000000000000000000000000000..8c65ac7d75e38ac0e760815c12ce81062db5a3eb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/comment-truncated.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:898b98f14b279d1a79c9e9d3e15d92f7ec630bb6990226ecb693a80c4bf0696c +size 216 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/crc32-not-streamed.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/crc32-not-streamed.zip new file mode 100644 index 0000000000000000000000000000000000000000..587b81191c1e652c30ca210ca24252f15ad34c7f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/crc32-not-streamed.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbf109e827cd301e67ef3b08082db8e0bacd9d7fe7e517278bf41821d3207898 +size 314 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/dd.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/dd.zip new file mode 100644 index 0000000000000000000000000000000000000000..fe48e7ed4d38d624c9e2f168f4c72c2429b22c7c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/dd.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dcec917ea7852418f21c2b8fe67c8355fd6ff0e875c5bc3718e1b8684a064171 +size 154 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/dupdir.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/dupdir.zip new file mode 100644 index 0000000000000000000000000000000000000000..f9208bdd697a96de561d59fbf67424c28c222d9c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/dupdir.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80a6fbb7eb70527702bb1312d0ecf41c282b5c6910e47abe93858ce296cfbc93 +size 458 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/go-no-datadesc-sig.zip.base64 b/platform/dbops/binaries/go/go/src/archive/zip/testdata/go-no-datadesc-sig.zip.base64 new file mode 100644 index 0000000000000000000000000000000000000000..1c2c071fbe0e7a528f00215cd3134c1e8c016ecc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/go-no-datadesc-sig.zip.base64 @@ -0,0 +1 @@ +UEsDBBQACAAAAGWHaECoZTJ+BAAAAAQAAAAHABgAZm9vLnR4dFVUBQAD3lVZT3V4CwABBPUBAAAEFAAAAGZvbwqoZTJ+BAAAAAQAAABQSwMEFAAIAAAAZodoQOmzogQEAAAABAAAAAcAGABiYXIudHh0VVQFAAPgVVlPdXgLAAEE9QEAAAQUAAAAYmFyCumzogQEAAAABAAAAFBLAQIUAxQACAAAAGWHaECoZTJ+BAAAAAQAAAAHABgAAAAAAAAAAACkgQAAAABmb28udHh0VVQFAAPeVVlPdXgLAAEE9QEAAAQUAAAAUEsBAhQDFAAIAAAAZodoQOmzogQEAAAABAAAAAcAGAAAAAAAAAAAAKSBTQAAAGJhci50eHRVVAUAA+BVWU91eAsAAQT1AQAABBQAAABQSwUGAAAAAAIAAgCaAAAAmgAAAAAA diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/go-with-datadesc-sig.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/go-with-datadesc-sig.zip new file mode 100644 index 0000000000000000000000000000000000000000..3a2d05080d8e8cffab895231987431832bc490a5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/go-with-datadesc-sig.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6be41a66578438afdda77edfaec1857ca553fa091dc31a2bd7db0d8a3a3e9a30 +size 242 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/gophercolor16x16.png b/platform/dbops/binaries/go/go/src/archive/zip/testdata/gophercolor16x16.png new file mode 100644 index 0000000000000000000000000000000000000000..697eece0f0377412716b5018610f4c3f8d364cde --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/gophercolor16x16.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f29bb9177d08fb50027e74a37eb00a69906a5994dc55a041670703efe4ecfdc9 +size 785 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/readme.notzip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/readme.notzip new file mode 100644 index 0000000000000000000000000000000000000000..79b1cb6de33c6ae86451acedbd50df4207a5710e Binary files /dev/null and b/platform/dbops/binaries/go/go/src/archive/zip/testdata/readme.notzip differ diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/readme.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/readme.zip new file mode 100644 index 0000000000000000000000000000000000000000..558413f10640ae6aaf5446d0842c97459baad830 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/readme.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ee76aa8a1c2ce04924b43424bf9d1b3138f812a80d6239f06f1133876f50d40 +size 1886 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/subdir.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/subdir.zip new file mode 100644 index 0000000000000000000000000000000000000000..fc974ab6fd1a7379da5ad7e3c5ca5eab3d0c181b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/subdir.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d57eff008fa79aab7d863c5e003d2bc35a5f734a9eac8b62289b9c6c395bb71f +size 428 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/symlink.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/symlink.zip new file mode 100644 index 0000000000000000000000000000000000000000..169a6e15d423e41a071270933ced25e80fd1dd6c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/symlink.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c871234a8d0c8787eacb357e53245d6663803fef1750518204a1aa97ec321f62 +size 173 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/test-badbase.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/test-badbase.zip new file mode 100644 index 0000000000000000000000000000000000000000..7173a1418268493fe499febbaa546e44f715d1bb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/test-badbase.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d11fa637a1153619ed3443401942d242238519feb24233bc1965a693b5f0d326 +size 1170 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/test-baddirsz.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/test-baddirsz.zip new file mode 100644 index 0000000000000000000000000000000000000000..d5679ccdf1623949af73d98160fd69ecc1604542 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/test-baddirsz.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60452121e8533c13ae22ce41a2539278049ffb63a08e1fea3d7239b7e627e55a +size 1170 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/test-prefix.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/test-prefix.zip new file mode 100644 index 0000000000000000000000000000000000000000..0f0cb97b0931b60ba16c43512178e5dfb84909ff --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/test-prefix.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d88728cfbbc295b8e146bde1e21fd4514ea60cc9909eb9ddb7537b0d448bfcea +size 1227 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/test-trailing-junk.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/test-trailing-junk.zip new file mode 100644 index 0000000000000000000000000000000000000000..61f3dec31df033dd1329ccdd8a33b0a50862f482 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/test-trailing-junk.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:acb77caa34accd11067629705ceb3d7cb01868134ae56fca3f9c6348dbd585d4 +size 1184 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/test.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/test.zip new file mode 100644 index 0000000000000000000000000000000000000000..fbb1e9957c1cdbc8261bcdc81e1316d42b63dbb7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/test.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e36d2ee9fbc41f7fe0b2717dc4b1fdc1978c9396dd28f3398b1f3a1a29dc146c +size 1170 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/time-22738.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/time-22738.zip new file mode 100644 index 0000000000000000000000000000000000000000..178f8572367d339ff2f9697ad3917f7ec9c045b5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/time-22738.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffecd2607b24eb31a12d3d94b8cf951c48b2e17c66b19d71150fb98da0a727e3 +size 140 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/time-7zip.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/time-7zip.zip new file mode 100644 index 0000000000000000000000000000000000000000..09da1cb25038826e17b01a4c92a6434a1d5ac2c4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/time-7zip.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12e1b7b71a0943547acca38516aef5d89d2466c79dae5279a915389b7beace4d +size 150 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/time-go.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/time-go.zip new file mode 100644 index 0000000000000000000000000000000000000000..2140d1c472d4fb763729526cdc871fddb29cc778 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/time-go.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45c2b43fd74e27b3a511a2f3f31de08651689366e4344ff5e4a644b3be35f4b9 +size 148 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/time-infozip.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/time-infozip.zip new file mode 100644 index 0000000000000000000000000000000000000000..44e8d4fc7f73697b979bfba62fc340ada4100e86 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/time-infozip.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e129eb30eeda61f7bec57f7708047750e4cf6add713a289d4b879b394459846 +size 166 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/time-osx.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/time-osx.zip new file mode 100644 index 0000000000000000000000000000000000000000..a4b54efb07310d8971d1a895e301a0dcd4a75db5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/time-osx.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c374ae7910a6b6b8499c0bb7757474456450c994bf39dbe4d9a1ab8361f50856 +size 142 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/time-win7.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/time-win7.zip new file mode 100644 index 0000000000000000000000000000000000000000..376f0ca3e5146acd31f354759fc3f284014ba427 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/time-win7.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28774f00d29804f65a6af708a125d9b56ea20a98b1681ae4ab0e001474710e2a +size 114 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/time-winrar.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/time-winrar.zip new file mode 100644 index 0000000000000000000000000000000000000000..a4ad441930598490526114bb3d8545b1ee9d10a7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/time-winrar.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:058b9c059e88fcd17d8e8bee4239f31073969c8affa4993498dbb7e5c1508518 +size 150 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/time-winzip.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/time-winzip.zip new file mode 100644 index 0000000000000000000000000000000000000000..5faa9708419cda93175bce655739e843f47dbd49 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/time-winzip.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1ae5e20dc1608e8fda463f6d19c667f1dd2ce290981565528a1e11ac7452bfa +size 150 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/unix.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/unix.zip new file mode 100644 index 0000000000000000000000000000000000000000..69f79d01a502a7a89da0f1b607e3e28b15d126f9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/unix.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae84fe91799e805724d20949195c4476aa4e7c124a9229c79987a4f7e0cdb178 +size 620 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/utf8-7zip.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/utf8-7zip.zip new file mode 100644 index 0000000000000000000000000000000000000000..8c95a9a9f90a61ce7fda70094db28e3a6699f09a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/utf8-7zip.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fda31b416bcacec89be0a8e1a9fe8f16b07b1761b34285a073f3cdde25209c4e +size 146 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/utf8-infozip.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/utf8-infozip.zip new file mode 100644 index 0000000000000000000000000000000000000000..2861f929b6aa8e289a287710bea1037619026a44 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/utf8-infozip.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d73791c790a2e4593a207d5e9ef7a5fdcb937db571be85747421b951655c98f6 +size 162 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/utf8-osx.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/utf8-osx.zip new file mode 100644 index 0000000000000000000000000000000000000000..11d3b7673c2f7cb5ab0f56b2e1b3ff7cb81a831d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/utf8-osx.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5da98564f0188c6ea25c5a681adac2bcf402a22b7e16bfdf087034be9e8b17a5 +size 138 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/utf8-winrar.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/utf8-winrar.zip new file mode 100644 index 0000000000000000000000000000000000000000..63e6f378eca026653e80b14eeb81edb09b5f2ee0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/utf8-winrar.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f41036f0d011ae1c98de383360b1e26eb41f216cea17ab909f8fcf027ba20791 +size 146 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/utf8-winzip.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/utf8-winzip.zip new file mode 100644 index 0000000000000000000000000000000000000000..9306c9f6344ecd047d0724eef0eb1e995c4259bb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/utf8-winzip.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f04b2fca5ebaac681e7ca1f859bc0a1abe4193800255a449b2a3a871dc3f64a0 +size 146 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/winxp.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/winxp.zip new file mode 100644 index 0000000000000000000000000000000000000000..84c4fb870c19a4a7fb179b06db5bb4d8717c8c8f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/winxp.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d60fd70a6de4cc16363b5a424bcf703f5345f01c6a0595f5246ae12822bf719d +size 412 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/zip64-2.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/zip64-2.zip new file mode 100644 index 0000000000000000000000000000000000000000..c89506db0efda52926a963ee100b946563c041f8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/zip64-2.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:941bf893b024dcb2269684357b6df18ca747ce873d6bdc626796572bf3a3f9e3 +size 266 diff --git a/platform/dbops/binaries/go/go/src/archive/zip/testdata/zip64.zip b/platform/dbops/binaries/go/go/src/archive/zip/testdata/zip64.zip new file mode 100644 index 0000000000000000000000000000000000000000..43481c44e08ad29e9d60b164b9e29aeff4d334be --- /dev/null +++ b/platform/dbops/binaries/go/go/src/archive/zip/testdata/zip64.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1a659d5a2d292682a17d789834d97b97650b6a8d188799ac1166fd85ace7dbd +size 242 diff --git a/platform/dbops/binaries/go/go/src/cmd/addr2line/addr2line_test.go b/platform/dbops/binaries/go/go/src/cmd/addr2line/addr2line_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0ea8994b6a0c757e6077ec81320a03187c86ca29 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/addr2line/addr2line_test.go @@ -0,0 +1,159 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bufio" + "bytes" + "internal/testenv" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "testing" +) + +// TestMain executes the test binary as the addr2line command if +// GO_ADDR2LINETEST_IS_ADDR2LINE is set, and runs the tests otherwise. +func TestMain(m *testing.M) { + if os.Getenv("GO_ADDR2LINETEST_IS_ADDR2LINE") != "" { + main() + os.Exit(0) + } + + os.Setenv("GO_ADDR2LINETEST_IS_ADDR2LINE", "1") // Set for subprocesses to inherit. + os.Exit(m.Run()) +} + +// addr2linePath returns the path to the "addr2line" binary to run. +func addr2linePath(t testing.TB) string { + t.Helper() + testenv.MustHaveExec(t) + + addr2linePathOnce.Do(func() { + addr2lineExePath, addr2linePathErr = os.Executable() + }) + if addr2linePathErr != nil { + t.Fatal(addr2linePathErr) + } + return addr2lineExePath +} + +var ( + addr2linePathOnce sync.Once + addr2lineExePath string + addr2linePathErr error +) + +func loadSyms(t *testing.T, dbgExePath string) map[string]string { + cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "nm", dbgExePath) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("%v: %v\n%s", cmd, err, string(out)) + } + syms := make(map[string]string) + scanner := bufio.NewScanner(bytes.NewReader(out)) + for scanner.Scan() { + f := strings.Fields(scanner.Text()) + if len(f) < 3 { + continue + } + syms[f[2]] = f[0] + } + if err := scanner.Err(); err != nil { + t.Fatalf("error reading symbols: %v", err) + } + return syms +} + +func runAddr2Line(t *testing.T, dbgExePath, addr string) (funcname, path, lineno string) { + cmd := testenv.Command(t, addr2linePath(t), dbgExePath) + cmd.Stdin = strings.NewReader(addr) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("go tool addr2line %v: %v\n%s", os.Args[0], err, string(out)) + } + f := strings.Split(string(out), "\n") + if len(f) < 3 && f[2] == "" { + t.Fatal("addr2line output must have 2 lines") + } + funcname = f[0] + pathAndLineNo := f[1] + f = strings.Split(pathAndLineNo, ":") + if runtime.GOOS == "windows" && len(f) == 3 { + // Reattach drive letter. + f = []string{f[0] + ":" + f[1], f[2]} + } + if len(f) != 2 { + t.Fatalf("no line number found in %q", pathAndLineNo) + } + return funcname, f[0], f[1] +} + +const symName = "cmd/addr2line.TestAddr2Line" + +func testAddr2Line(t *testing.T, dbgExePath, addr string) { + funcName, srcPath, srcLineNo := runAddr2Line(t, dbgExePath, addr) + if symName != funcName { + t.Fatalf("expected function name %v; got %v", symName, funcName) + } + fi1, err := os.Stat("addr2line_test.go") + if err != nil { + t.Fatalf("Stat failed: %v", err) + } + + // Debug paths are stored slash-separated, so convert to system-native. + srcPath = filepath.FromSlash(srcPath) + fi2, err := os.Stat(srcPath) + + // If GOROOT_FINAL is set and srcPath is not the file we expect, perhaps + // srcPath has had GOROOT_FINAL substituted for GOROOT and GOROOT hasn't been + // moved to its final location yet. If so, try the original location instead. + if gorootFinal := os.Getenv("GOROOT_FINAL"); gorootFinal != "" && + (os.IsNotExist(err) || (err == nil && !os.SameFile(fi1, fi2))) { + // srcPath is clean, but GOROOT_FINAL itself might not be. + // (See https://golang.org/issue/41447.) + gorootFinal = filepath.Clean(gorootFinal) + + if strings.HasPrefix(srcPath, gorootFinal) { + fi2, err = os.Stat(runtime.GOROOT() + strings.TrimPrefix(srcPath, gorootFinal)) + } + } + + if err != nil { + t.Fatalf("Stat failed: %v", err) + } + if !os.SameFile(fi1, fi2) { + t.Fatalf("addr2line_test.go and %s are not same file", srcPath) + } + if srcLineNo != "138" { + t.Fatalf("line number = %v; want 138", srcLineNo) + } +} + +// This is line 137. The test depends on that. +func TestAddr2Line(t *testing.T) { + testenv.MustHaveGoBuild(t) + + tmpDir, err := os.MkdirTemp("", "TestAddr2Line") + if err != nil { + t.Fatal("TempDir failed: ", err) + } + defer os.RemoveAll(tmpDir) + + // Build copy of test binary with debug symbols, + // since the one running now may not have them. + exepath := filepath.Join(tmpDir, "testaddr2line_test.exe") + out, err := testenv.Command(t, testenv.GoToolPath(t), "test", "-c", "-o", exepath, "cmd/addr2line").CombinedOutput() + if err != nil { + t.Fatalf("go test -c -o %v cmd/addr2line: %v\n%s", exepath, err, string(out)) + } + + syms := loadSyms(t, exepath) + + testAddr2Line(t, exepath, syms[symName]) + testAddr2Line(t, exepath, "0x"+syms[symName]) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/addr2line/main.go b/platform/dbops/binaries/go/go/src/cmd/addr2line/main.go new file mode 100644 index 0000000000000000000000000000000000000000..6e005a8fac96d6d5715e6cc3033d4db2b5d46544 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/addr2line/main.go @@ -0,0 +1,97 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Addr2line is a minimal simulation of the GNU addr2line tool, +// just enough to support pprof. +// +// Usage: +// +// go tool addr2line binary +// +// Addr2line reads hexadecimal addresses, one per line and with optional 0x prefix, +// from standard input. For each input address, addr2line prints two output lines, +// first the name of the function containing the address and second the file:line +// of the source code corresponding to that address. +// +// This tool is intended for use only by pprof; its interface may change or +// it may be deleted entirely in future releases. +package main + +import ( + "bufio" + "flag" + "fmt" + "log" + "os" + "strconv" + "strings" + + "cmd/internal/objfile" +) + +func printUsage(w *os.File) { + fmt.Fprintf(w, "usage: addr2line binary\n") + fmt.Fprintf(w, "reads addresses from standard input and writes two lines for each:\n") + fmt.Fprintf(w, "\tfunction name\n") + fmt.Fprintf(w, "\tfile:line\n") +} + +func usage() { + printUsage(os.Stderr) + os.Exit(2) +} + +func main() { + log.SetFlags(0) + log.SetPrefix("addr2line: ") + + // pprof expects this behavior when checking for addr2line + if len(os.Args) > 1 && os.Args[1] == "--help" { + printUsage(os.Stdout) + os.Exit(0) + } + + flag.Usage = usage + flag.Parse() + if flag.NArg() != 1 { + usage() + } + + f, err := objfile.Open(flag.Arg(0)) + if err != nil { + log.Fatal(err) + } + defer f.Close() + + tab, err := f.PCLineTable() + if err != nil { + log.Fatalf("reading %s: %v", flag.Arg(0), err) + } + + stdin := bufio.NewScanner(os.Stdin) + stdout := bufio.NewWriter(os.Stdout) + + for stdin.Scan() { + p := stdin.Text() + if strings.Contains(p, ":") { + // Reverse translate file:line to pc. + // This was an extension in the old C version of 'go tool addr2line' + // and is probably not used by anyone, but recognize the syntax. + // We don't have an implementation. + fmt.Fprintf(stdout, "!reverse translation not implemented\n") + continue + } + pc, _ := strconv.ParseUint(strings.TrimPrefix(p, "0x"), 16, 64) + file, line, fn := tab.PCToLine(pc) + name := "?" + if fn != nil { + name = fn.Name + } else { + file = "?" + line = 0 + } + fmt.Fprintf(stdout, "%s\n%s:%d\n", name, file, line) + } + stdout.Flush() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/api/api_test.go b/platform/dbops/binaries/go/go/src/cmd/api/api_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ba358d364d51dce627346f9668e4f9c93cf6b5ab --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/api/api_test.go @@ -0,0 +1,313 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "flag" + "fmt" + "go/build" + "internal/testenv" + "os" + "path/filepath" + "sort" + "strings" + "sync" + "testing" +) + +var flagCheck = flag.Bool("check", false, "run API checks") + +func TestMain(m *testing.M) { + flag.Parse() + for _, c := range contexts { + c.Compiler = build.Default.Compiler + } + build.Default.GOROOT = testenv.GOROOT(nil) + + os.Exit(m.Run()) +} + +var ( + updateGolden = flag.Bool("updategolden", false, "update golden files") +) + +func TestGolden(t *testing.T) { + if *flagCheck { + // slow, not worth repeating in -check + t.Skip("skipping with -check set") + } + + testenv.MustHaveGoBuild(t) + + td, err := os.Open("testdata/src/pkg") + if err != nil { + t.Fatal(err) + } + fis, err := td.Readdir(0) + if err != nil { + t.Fatal(err) + } + for _, fi := range fis { + if !fi.IsDir() { + continue + } + + // TODO(gri) remove extra pkg directory eventually + goldenFile := filepath.Join("testdata", "src", "pkg", fi.Name(), "golden.txt") + w := NewWalker(nil, "testdata/src/pkg") + pkg, _ := w.import_(fi.Name()) + w.export(pkg) + + if *updateGolden { + os.Remove(goldenFile) + f, err := os.Create(goldenFile) + if err != nil { + t.Fatal(err) + } + for _, feat := range w.Features() { + fmt.Fprintf(f, "%s\n", feat) + } + f.Close() + } + + bs, err := os.ReadFile(goldenFile) + if err != nil { + t.Fatalf("opening golden.txt for package %q: %v", fi.Name(), err) + } + wanted := strings.Split(string(bs), "\n") + sort.Strings(wanted) + for _, feature := range wanted { + if feature == "" { + continue + } + _, ok := w.features[feature] + if !ok { + t.Errorf("package %s: missing feature %q", fi.Name(), feature) + } + delete(w.features, feature) + } + + for _, feature := range w.Features() { + t.Errorf("package %s: extra feature not in golden file: %q", fi.Name(), feature) + } + } +} + +func TestCompareAPI(t *testing.T) { + tests := []struct { + name string + features, required, exception []string + ok bool // want + out string // want + }{ + { + name: "equal", + features: []string{"A", "B", "C"}, + required: []string{"A", "B", "C"}, + ok: true, + out: "", + }, + { + name: "feature added", + features: []string{"A", "B", "C", "D", "E", "F"}, + required: []string{"B", "D"}, + ok: false, + out: "+A\n+C\n+E\n+F\n", + }, + { + name: "feature removed", + features: []string{"C", "A"}, + required: []string{"A", "B", "C"}, + ok: false, + out: "-B\n", + }, + { + name: "exception removal", + features: []string{"A", "C"}, + required: []string{"A", "B", "C"}, + exception: []string{"B"}, + ok: true, + out: "", + }, + + // Test that a feature required on a subset of ports is implicitly satisfied + // by the same feature being implemented on all ports. That is, it shouldn't + // say "pkg syscall (darwin-amd64), type RawSockaddrInet6 struct" is missing. + // See https://go.dev/issue/4303. + { + name: "contexts reconverging after api/next/* update", + features: []string{ + "A", + "pkg syscall, type RawSockaddrInet6 struct", + }, + required: []string{ + "A", + "pkg syscall (darwin-amd64), type RawSockaddrInet6 struct", // api/go1.n.txt + "pkg syscall, type RawSockaddrInet6 struct", // api/next/n.txt + }, + ok: true, + out: "", + }, + { + name: "contexts reconverging before api/next/* update", + features: []string{ + "A", + "pkg syscall, type RawSockaddrInet6 struct", + }, + required: []string{ + "A", + "pkg syscall (darwin-amd64), type RawSockaddrInet6 struct", + }, + ok: false, + out: "+pkg syscall, type RawSockaddrInet6 struct\n", + }, + } + for _, tt := range tests { + buf := new(strings.Builder) + gotOK := compareAPI(buf, tt.features, tt.required, tt.exception) + if gotOK != tt.ok { + t.Errorf("%s: ok = %v; want %v", tt.name, gotOK, tt.ok) + } + if got := buf.String(); got != tt.out { + t.Errorf("%s: output differs\nGOT:\n%s\nWANT:\n%s", tt.name, got, tt.out) + } + } +} + +func TestSkipInternal(t *testing.T) { + tests := []struct { + pkg string + want bool + }{ + {"net/http", true}, + {"net/http/internal-foo", true}, + {"net/http/internal", false}, + {"net/http/internal/bar", false}, + {"internal/foo", false}, + {"internal", false}, + } + for _, tt := range tests { + got := !internalPkg.MatchString(tt.pkg) + if got != tt.want { + t.Errorf("%s is internal = %v; want %v", tt.pkg, got, tt.want) + } + } +} + +func BenchmarkAll(b *testing.B) { + for i := 0; i < b.N; i++ { + for _, context := range contexts { + w := NewWalker(context, filepath.Join(testenv.GOROOT(b), "src")) + for _, name := range w.stdPackages { + pkg, _ := w.import_(name) + w.export(pkg) + } + w.Features() + } + } +} + +var warmupCache = sync.OnceFunc(func() { + // Warm up the import cache in parallel. + var wg sync.WaitGroup + for _, context := range contexts { + context := context + wg.Add(1) + go func() { + defer wg.Done() + _ = NewWalker(context, filepath.Join(testenv.GOROOT(nil), "src")) + }() + } + wg.Wait() +}) + +func TestIssue21181(t *testing.T) { + if testing.Short() { + t.Skip("skipping with -short") + } + if *flagCheck { + // slow, not worth repeating in -check + t.Skip("skipping with -check set") + } + testenv.MustHaveGoBuild(t) + + warmupCache() + + for _, context := range contexts { + w := NewWalker(context, "testdata/src/issue21181") + pkg, err := w.import_("p") + if err != nil { + t.Fatalf("%s: (%s-%s) %s %v", err, context.GOOS, context.GOARCH, + pkg.Name(), w.imported) + } + w.export(pkg) + } +} + +func TestIssue29837(t *testing.T) { + if testing.Short() { + t.Skip("skipping with -short") + } + if *flagCheck { + // slow, not worth repeating in -check + t.Skip("skipping with -check set") + } + testenv.MustHaveGoBuild(t) + + warmupCache() + + for _, context := range contexts { + w := NewWalker(context, "testdata/src/issue29837") + _, err := w.ImportFrom("p", "", 0) + if _, nogo := err.(*build.NoGoError); !nogo { + t.Errorf("expected *build.NoGoError, got %T", err) + } + } +} + +func TestIssue41358(t *testing.T) { + if *flagCheck { + // slow, not worth repeating in -check + t.Skip("skipping with -check set") + } + testenv.MustHaveGoBuild(t) + context := new(build.Context) + *context = build.Default + context.Dir = filepath.Join(testenv.GOROOT(t), "src") + + w := NewWalker(context, context.Dir) + for _, pkg := range w.stdPackages { + if strings.HasPrefix(pkg, "vendor/") || strings.HasPrefix(pkg, "golang.org/x/") { + t.Fatalf("stdPackages contains unexpected package %s", pkg) + } + } +} + +func TestIssue64958(t *testing.T) { + defer func() { + if x := recover(); x != nil { + t.Errorf("expected no panic; recovered %v", x) + } + }() + + testenv.MustHaveGoBuild(t) + + for _, context := range contexts { + w := NewWalker(context, "testdata/src/issue64958") + pkg, err := w.importFrom("p", "", 0) + if err != nil { + t.Errorf("expected no error importing; got %T", err) + } + w.export(pkg) + } +} + +func TestCheck(t *testing.T) { + if !*flagCheck { + t.Skip("-check not specified") + } + testenv.MustHaveGoBuild(t) + Check(t) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/api/boring_test.go b/platform/dbops/binaries/go/go/src/cmd/api/boring_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f0e3575637c62a955f118943cc752f44be8c5d7f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/api/boring_test.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build boringcrypto + +package main + +import ( + "fmt" + "os" +) + +func init() { + fmt.Printf("SKIP with boringcrypto enabled\n") + os.Exit(0) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/api/main_test.go b/platform/dbops/binaries/go/go/src/cmd/api/main_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7985055b5c0de2ecf8634f51fa48db9ae21cb8eb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/api/main_test.go @@ -0,0 +1,1231 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This package computes the exported API of a set of Go packages. +// It is only a test, not a command, nor a usefully importable package. + +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "go/types" + "internal/testenv" + "io" + "log" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "testing" +) + +const verbose = false + +func goCmd() string { + var exeSuffix string + if runtime.GOOS == "windows" { + exeSuffix = ".exe" + } + path := filepath.Join(testenv.GOROOT(nil), "bin", "go"+exeSuffix) + if _, err := os.Stat(path); err == nil { + return path + } + return "go" +} + +// contexts are the default contexts which are scanned. +var contexts = []*build.Context{ + {GOOS: "linux", GOARCH: "386", CgoEnabled: true}, + {GOOS: "linux", GOARCH: "386"}, + {GOOS: "linux", GOARCH: "amd64", CgoEnabled: true}, + {GOOS: "linux", GOARCH: "amd64"}, + {GOOS: "linux", GOARCH: "arm", CgoEnabled: true}, + {GOOS: "linux", GOARCH: "arm"}, + {GOOS: "darwin", GOARCH: "amd64", CgoEnabled: true}, + {GOOS: "darwin", GOARCH: "amd64"}, + {GOOS: "darwin", GOARCH: "arm64", CgoEnabled: true}, + {GOOS: "darwin", GOARCH: "arm64"}, + {GOOS: "windows", GOARCH: "amd64"}, + {GOOS: "windows", GOARCH: "386"}, + {GOOS: "freebsd", GOARCH: "386", CgoEnabled: true}, + {GOOS: "freebsd", GOARCH: "386"}, + {GOOS: "freebsd", GOARCH: "amd64", CgoEnabled: true}, + {GOOS: "freebsd", GOARCH: "amd64"}, + {GOOS: "freebsd", GOARCH: "arm", CgoEnabled: true}, + {GOOS: "freebsd", GOARCH: "arm"}, + {GOOS: "freebsd", GOARCH: "arm64", CgoEnabled: true}, + {GOOS: "freebsd", GOARCH: "arm64"}, + {GOOS: "freebsd", GOARCH: "riscv64", CgoEnabled: true}, + {GOOS: "freebsd", GOARCH: "riscv64"}, + {GOOS: "netbsd", GOARCH: "386", CgoEnabled: true}, + {GOOS: "netbsd", GOARCH: "386"}, + {GOOS: "netbsd", GOARCH: "amd64", CgoEnabled: true}, + {GOOS: "netbsd", GOARCH: "amd64"}, + {GOOS: "netbsd", GOARCH: "arm", CgoEnabled: true}, + {GOOS: "netbsd", GOARCH: "arm"}, + {GOOS: "netbsd", GOARCH: "arm64", CgoEnabled: true}, + {GOOS: "netbsd", GOARCH: "arm64"}, + {GOOS: "openbsd", GOARCH: "386", CgoEnabled: true}, + {GOOS: "openbsd", GOARCH: "386"}, + {GOOS: "openbsd", GOARCH: "amd64", CgoEnabled: true}, + {GOOS: "openbsd", GOARCH: "amd64"}, +} + +func contextName(c *build.Context) string { + s := c.GOOS + "-" + c.GOARCH + if c.CgoEnabled { + s += "-cgo" + } + if c.Dir != "" { + s += fmt.Sprintf(" [%s]", c.Dir) + } + return s +} + +var internalPkg = regexp.MustCompile(`(^|/)internal($|/)`) + +var exitCode = 0 + +func Check(t *testing.T) { + checkFiles, err := filepath.Glob(filepath.Join(testenv.GOROOT(t), "api/go1*.txt")) + if err != nil { + t.Fatal(err) + } + + var nextFiles []string + if v := runtime.Version(); strings.Contains(v, "devel") || strings.Contains(v, "beta") { + next, err := filepath.Glob(filepath.Join(testenv.GOROOT(t), "api/next/*.txt")) + if err != nil { + t.Fatal(err) + } + nextFiles = next + } + + for _, c := range contexts { + c.Compiler = build.Default.Compiler + } + + walkers := make([]*Walker, len(contexts)) + var wg sync.WaitGroup + for i, context := range contexts { + i, context := i, context + wg.Add(1) + go func() { + defer wg.Done() + walkers[i] = NewWalker(context, filepath.Join(testenv.GOROOT(t), "src")) + }() + } + wg.Wait() + + var featureCtx = make(map[string]map[string]bool) // feature -> context name -> true + for _, w := range walkers { + for _, name := range w.stdPackages { + pkg, err := w.import_(name) + if _, nogo := err.(*build.NoGoError); nogo { + continue + } + if err != nil { + log.Fatalf("Import(%q): %v", name, err) + } + w.export(pkg) + } + + ctxName := contextName(w.context) + for _, f := range w.Features() { + if featureCtx[f] == nil { + featureCtx[f] = make(map[string]bool) + } + featureCtx[f][ctxName] = true + } + } + + var features []string + for f, cmap := range featureCtx { + if len(cmap) == len(contexts) { + features = append(features, f) + continue + } + comma := strings.Index(f, ",") + for cname := range cmap { + f2 := fmt.Sprintf("%s (%s)%s", f[:comma], cname, f[comma:]) + features = append(features, f2) + } + } + + bw := bufio.NewWriter(os.Stdout) + defer bw.Flush() + + var required []string + for _, file := range checkFiles { + required = append(required, fileFeatures(file, needApproval(file))...) + } + for _, file := range nextFiles { + required = append(required, fileFeatures(file, true)...) + } + exception := fileFeatures(filepath.Join(testenv.GOROOT(t), "api/except.txt"), false) + + if exitCode == 1 { + t.Errorf("API database problems found") + } + if !compareAPI(bw, features, required, exception) { + t.Errorf("API differences found") + } +} + +// export emits the exported package features. +func (w *Walker) export(pkg *apiPackage) { + if verbose { + log.Println(pkg) + } + pop := w.pushScope("pkg " + pkg.Path()) + w.current = pkg + w.collectDeprecated() + scope := pkg.Scope() + for _, name := range scope.Names() { + if token.IsExported(name) { + w.emitObj(scope.Lookup(name)) + } + } + pop() +} + +func set(items []string) map[string]bool { + s := make(map[string]bool) + for _, v := range items { + s[v] = true + } + return s +} + +var spaceParensRx = regexp.MustCompile(` \(\S+?\)`) + +func featureWithoutContext(f string) string { + if !strings.Contains(f, "(") { + return f + } + return spaceParensRx.ReplaceAllString(f, "") +} + +// portRemoved reports whether the given port-specific API feature is +// okay to no longer exist because its port was removed. +func portRemoved(feature string) bool { + return strings.Contains(feature, "(darwin-386)") || + strings.Contains(feature, "(darwin-386-cgo)") +} + +func compareAPI(w io.Writer, features, required, exception []string) (ok bool) { + ok = true + + featureSet := set(features) + exceptionSet := set(exception) + + sort.Strings(features) + sort.Strings(required) + + take := func(sl *[]string) string { + s := (*sl)[0] + *sl = (*sl)[1:] + return s + } + + for len(features) > 0 || len(required) > 0 { + switch { + case len(features) == 0 || (len(required) > 0 && required[0] < features[0]): + feature := take(&required) + if exceptionSet[feature] { + // An "unfortunate" case: the feature was once + // included in the API (e.g. go1.txt), but was + // subsequently removed. These are already + // acknowledged by being in the file + // "api/except.txt". No need to print them out + // here. + } else if portRemoved(feature) { + // okay. + } else if featureSet[featureWithoutContext(feature)] { + // okay. + } else { + fmt.Fprintf(w, "-%s\n", feature) + ok = false // broke compatibility + } + case len(required) == 0 || (len(features) > 0 && required[0] > features[0]): + newFeature := take(&features) + fmt.Fprintf(w, "+%s\n", newFeature) + ok = false // feature not in api/next/* + default: + take(&required) + take(&features) + } + } + + return ok +} + +// aliasReplacer applies type aliases to earlier API files, +// to avoid misleading negative results. +// This makes all the references to os.FileInfo in go1.txt +// be read as if they said fs.FileInfo, since os.FileInfo is now an alias. +// If there are many of these, we could do a more general solution, +// but for now the replacer is fine. +var aliasReplacer = strings.NewReplacer( + "os.FileInfo", "fs.FileInfo", + "os.FileMode", "fs.FileMode", + "os.PathError", "fs.PathError", +) + +func fileFeatures(filename string, needApproval bool) []string { + bs, err := os.ReadFile(filename) + if err != nil { + log.Fatal(err) + } + s := string(bs) + + // Diagnose common mistakes people make, + // since there is no apifmt to format these files. + // The missing final newline is important for the + // final release step of cat next/*.txt >go1.X.txt. + // If the files don't end in full lines, the concatenation goes awry. + if strings.Contains(s, "\r") { + log.Printf("%s: contains CRLFs", filename) + exitCode = 1 + } + if filepath.Base(filename) == "go1.4.txt" { + // No use for blank lines in api files, except go1.4.txt + // used them in a reasonable way and we should let it be. + } else if strings.HasPrefix(s, "\n") || strings.Contains(s, "\n\n") { + log.Printf("%s: contains a blank line", filename) + exitCode = 1 + } + if s == "" { + log.Printf("%s: empty file", filename) + exitCode = 1 + } else if s[len(s)-1] != '\n' { + log.Printf("%s: missing final newline", filename) + exitCode = 1 + } + s = aliasReplacer.Replace(s) + lines := strings.Split(s, "\n") + var nonblank []string + for i, line := range lines { + line = strings.TrimSpace(line) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + if needApproval { + feature, approval, ok := strings.Cut(line, "#") + if !ok { + log.Printf("%s:%d: missing proposal approval\n", filename, i+1) + exitCode = 1 + } else { + _, err := strconv.Atoi(approval) + if err != nil { + log.Printf("%s:%d: malformed proposal approval #%s\n", filename, i+1, approval) + exitCode = 1 + } + } + line = strings.TrimSpace(feature) + } else { + if strings.Contains(line, " #") { + log.Printf("%s:%d: unexpected approval\n", filename, i+1) + exitCode = 1 + } + } + nonblank = append(nonblank, line) + } + return nonblank +} + +var fset = token.NewFileSet() + +type Walker struct { + context *build.Context + root string + scope []string + current *apiPackage + deprecated map[token.Pos]bool + features map[string]bool // set + imported map[string]*apiPackage // packages already imported + stdPackages []string // names, omitting "unsafe", internal, and vendored packages + importMap map[string]map[string]string // importer dir -> import path -> canonical path + importDir map[string]string // canonical import path -> dir + +} + +func NewWalker(context *build.Context, root string) *Walker { + w := &Walker{ + context: context, + root: root, + features: map[string]bool{}, + imported: map[string]*apiPackage{"unsafe": &apiPackage{Package: types.Unsafe}}, + } + w.loadImports() + return w +} + +func (w *Walker) Features() (fs []string) { + for f := range w.features { + fs = append(fs, f) + } + sort.Strings(fs) + return +} + +var parsedFileCache = make(map[string]*ast.File) + +func (w *Walker) parseFile(dir, file string) (*ast.File, error) { + filename := filepath.Join(dir, file) + if f := parsedFileCache[filename]; f != nil { + return f, nil + } + + f, err := parser.ParseFile(fset, filename, nil, parser.ParseComments) + if err != nil { + return nil, err + } + parsedFileCache[filename] = f + + return f, nil +} + +// Disable before debugging non-obvious errors from the type-checker. +const usePkgCache = true + +var ( + pkgCache = map[string]*apiPackage{} // map tagKey to package + pkgTags = map[string][]string{} // map import dir to list of relevant tags +) + +// tagKey returns the tag-based key to use in the pkgCache. +// It is a comma-separated string; the first part is dir, the rest tags. +// The satisfied tags are derived from context but only those that +// matter (the ones listed in the tags argument plus GOOS and GOARCH) are used. +// The tags list, which came from go/build's Package.AllTags, +// is known to be sorted. +func tagKey(dir string, context *build.Context, tags []string) string { + ctags := map[string]bool{ + context.GOOS: true, + context.GOARCH: true, + } + if context.CgoEnabled { + ctags["cgo"] = true + } + for _, tag := range context.BuildTags { + ctags[tag] = true + } + // TODO: ReleaseTags (need to load default) + key := dir + + // explicit on GOOS and GOARCH as global cache will use "all" cached packages for + // an indirect imported package. See https://github.com/golang/go/issues/21181 + // for more detail. + tags = append(tags, context.GOOS, context.GOARCH) + sort.Strings(tags) + + for _, tag := range tags { + if ctags[tag] { + key += "," + tag + ctags[tag] = false + } + } + return key +} + +type listImports struct { + stdPackages []string // names, omitting "unsafe", internal, and vendored packages + importDir map[string]string // canonical import path → directory + importMap map[string]map[string]string // import path → canonical import path +} + +var listCache sync.Map // map[string]listImports, keyed by contextName + +// listSem is a semaphore restricting concurrent invocations of 'go list'. 'go +// list' has its own internal concurrency, so we use a hard-coded constant (to +// allow the I/O-intensive phases of 'go list' to overlap) instead of scaling +// all the way up to GOMAXPROCS. +var listSem = make(chan semToken, 2) + +type semToken struct{} + +// loadImports populates w with information about the packages in the standard +// library and the packages they themselves import in w's build context. +// +// The source import path and expanded import path are identical except for vendored packages. +// For example, on return: +// +// w.importMap["math"] = "math" +// w.importDir["math"] = "/src/math" +// +// w.importMap["golang.org/x/net/route"] = "vendor/golang.org/x/net/route" +// w.importDir["vendor/golang.org/x/net/route"] = "/src/vendor/golang.org/x/net/route" +// +// Since the set of packages that exist depends on context, the result of +// loadImports also depends on context. However, to improve test running time +// the configuration for each environment is cached across runs. +func (w *Walker) loadImports() { + if w.context == nil { + return // test-only Walker; does not use the import map + } + + name := contextName(w.context) + + imports, ok := listCache.Load(name) + if !ok { + listSem <- semToken{} + defer func() { <-listSem }() + + cmd := exec.Command(goCmd(), "list", "-e", "-deps", "-json", "std") + cmd.Env = listEnv(w.context) + if w.context.Dir != "" { + cmd.Dir = w.context.Dir + } + cmd.Stderr = os.Stderr + out, err := cmd.Output() + if err != nil { + log.Fatalf("loading imports: %v\n%s", err, out) + } + + var stdPackages []string + importMap := make(map[string]map[string]string) + importDir := make(map[string]string) + dec := json.NewDecoder(bytes.NewReader(out)) + for { + var pkg struct { + ImportPath, Dir string + ImportMap map[string]string + Standard bool + } + err := dec.Decode(&pkg) + if err == io.EOF { + break + } + if err != nil { + log.Fatalf("go list: invalid output: %v", err) + } + + // - Package "unsafe" contains special signatures requiring + // extra care when printing them - ignore since it is not + // going to change w/o a language change. + // - Internal and vendored packages do not contribute to our + // API surface. (If we are running within the "std" module, + // vendored dependencies appear as themselves instead of + // their "vendor/" standard-library copies.) + // - 'go list std' does not include commands, which cannot be + // imported anyway. + if ip := pkg.ImportPath; pkg.Standard && ip != "unsafe" && !strings.HasPrefix(ip, "vendor/") && !internalPkg.MatchString(ip) { + stdPackages = append(stdPackages, ip) + } + importDir[pkg.ImportPath] = pkg.Dir + if len(pkg.ImportMap) > 0 { + importMap[pkg.Dir] = make(map[string]string, len(pkg.ImportMap)) + } + for k, v := range pkg.ImportMap { + importMap[pkg.Dir][k] = v + } + } + + sort.Strings(stdPackages) + imports = listImports{ + stdPackages: stdPackages, + importMap: importMap, + importDir: importDir, + } + imports, _ = listCache.LoadOrStore(name, imports) + } + + li := imports.(listImports) + w.stdPackages = li.stdPackages + w.importDir = li.importDir + w.importMap = li.importMap +} + +// listEnv returns the process environment to use when invoking 'go list' for +// the given context. +func listEnv(c *build.Context) []string { + if c == nil { + return os.Environ() + } + + environ := append(os.Environ(), + "GOOS="+c.GOOS, + "GOARCH="+c.GOARCH) + if c.CgoEnabled { + environ = append(environ, "CGO_ENABLED=1") + } else { + environ = append(environ, "CGO_ENABLED=0") + } + return environ +} + +type apiPackage struct { + *types.Package + Files []*ast.File +} + +// Importing is a sentinel taking the place in Walker.imported +// for a package that is in the process of being imported. +var importing apiPackage + +// Import implements types.Importer. +func (w *Walker) Import(name string) (*types.Package, error) { + return w.ImportFrom(name, "", 0) +} + +// ImportFrom implements types.ImporterFrom. +func (w *Walker) ImportFrom(fromPath, fromDir string, mode types.ImportMode) (*types.Package, error) { + pkg, err := w.importFrom(fromPath, fromDir, mode) + if err != nil { + return nil, err + } + return pkg.Package, nil +} + +func (w *Walker) import_(name string) (*apiPackage, error) { + return w.importFrom(name, "", 0) +} + +func (w *Walker) importFrom(fromPath, fromDir string, mode types.ImportMode) (*apiPackage, error) { + name := fromPath + if canonical, ok := w.importMap[fromDir][fromPath]; ok { + name = canonical + } + + pkg := w.imported[name] + if pkg != nil { + if pkg == &importing { + log.Fatalf("cycle importing package %q", name) + } + return pkg, nil + } + w.imported[name] = &importing + + // Determine package files. + dir := w.importDir[name] + if dir == "" { + dir = filepath.Join(w.root, filepath.FromSlash(name)) + } + if fi, err := os.Stat(dir); err != nil || !fi.IsDir() { + log.Panicf("no source in tree for import %q (from import %s in %s): %v", name, fromPath, fromDir, err) + } + + context := w.context + if context == nil { + context = &build.Default + } + + // Look in cache. + // If we've already done an import with the same set + // of relevant tags, reuse the result. + var key string + if usePkgCache { + if tags, ok := pkgTags[dir]; ok { + key = tagKey(dir, context, tags) + if pkg := pkgCache[key]; pkg != nil { + w.imported[name] = pkg + return pkg, nil + } + } + } + + info, err := context.ImportDir(dir, 0) + if err != nil { + if _, nogo := err.(*build.NoGoError); nogo { + return nil, err + } + log.Fatalf("pkg %q, dir %q: ScanDir: %v", name, dir, err) + } + + // Save tags list first time we see a directory. + if usePkgCache { + if _, ok := pkgTags[dir]; !ok { + pkgTags[dir] = info.AllTags + key = tagKey(dir, context, info.AllTags) + } + } + + filenames := append(append([]string{}, info.GoFiles...), info.CgoFiles...) + + // Parse package files. + var files []*ast.File + for _, file := range filenames { + f, err := w.parseFile(dir, file) + if err != nil { + log.Fatalf("error parsing package %s: %s", name, err) + } + files = append(files, f) + } + + // Type-check package files. + var sizes types.Sizes + if w.context != nil { + sizes = types.SizesFor(w.context.Compiler, w.context.GOARCH) + } + conf := types.Config{ + IgnoreFuncBodies: true, + FakeImportC: true, + Importer: w, + Sizes: sizes, + } + tpkg, err := conf.Check(name, fset, files, nil) + if err != nil { + ctxt := "" + if w.context != nil { + ctxt = fmt.Sprintf("%s-%s", w.context.GOOS, w.context.GOARCH) + } + log.Fatalf("error typechecking package %s: %s (%s)", name, err, ctxt) + } + pkg = &apiPackage{tpkg, files} + + if usePkgCache { + pkgCache[key] = pkg + } + + w.imported[name] = pkg + return pkg, nil +} + +// pushScope enters a new scope (walking a package, type, node, etc) +// and returns a function that will leave the scope (with sanity checking +// for mismatched pushes & pops) +func (w *Walker) pushScope(name string) (popFunc func()) { + w.scope = append(w.scope, name) + return func() { + if len(w.scope) == 0 { + log.Fatalf("attempt to leave scope %q with empty scope list", name) + } + if w.scope[len(w.scope)-1] != name { + log.Fatalf("attempt to leave scope %q, but scope is currently %#v", name, w.scope) + } + w.scope = w.scope[:len(w.scope)-1] + } +} + +func sortedMethodNames(typ *types.Interface) []string { + n := typ.NumMethods() + list := make([]string, n) + for i := range list { + list[i] = typ.Method(i).Name() + } + sort.Strings(list) + return list +} + +// sortedEmbeddeds returns constraint types embedded in an +// interface. It does not include embedded interface types or methods. +func (w *Walker) sortedEmbeddeds(typ *types.Interface) []string { + n := typ.NumEmbeddeds() + list := make([]string, 0, n) + for i := 0; i < n; i++ { + emb := typ.EmbeddedType(i) + switch emb := emb.(type) { + case *types.Interface: + list = append(list, w.sortedEmbeddeds(emb)...) + case *types.Union: + var buf bytes.Buffer + nu := emb.Len() + for i := 0; i < nu; i++ { + if i > 0 { + buf.WriteString(" | ") + } + term := emb.Term(i) + if term.Tilde() { + buf.WriteByte('~') + } + w.writeType(&buf, term.Type()) + } + list = append(list, buf.String()) + } + } + sort.Strings(list) + return list +} + +func (w *Walker) writeType(buf *bytes.Buffer, typ types.Type) { + switch typ := typ.(type) { + case *types.Basic: + s := typ.Name() + switch typ.Kind() { + case types.UnsafePointer: + s = "unsafe.Pointer" + case types.UntypedBool: + s = "ideal-bool" + case types.UntypedInt: + s = "ideal-int" + case types.UntypedRune: + // "ideal-char" for compatibility with old tool + // TODO(gri) change to "ideal-rune" + s = "ideal-char" + case types.UntypedFloat: + s = "ideal-float" + case types.UntypedComplex: + s = "ideal-complex" + case types.UntypedString: + s = "ideal-string" + case types.UntypedNil: + panic("should never see untyped nil type") + default: + switch s { + case "byte": + s = "uint8" + case "rune": + s = "int32" + } + } + buf.WriteString(s) + + case *types.Array: + fmt.Fprintf(buf, "[%d]", typ.Len()) + w.writeType(buf, typ.Elem()) + + case *types.Slice: + buf.WriteString("[]") + w.writeType(buf, typ.Elem()) + + case *types.Struct: + buf.WriteString("struct") + + case *types.Pointer: + buf.WriteByte('*') + w.writeType(buf, typ.Elem()) + + case *types.Tuple: + panic("should never see a tuple type") + + case *types.Signature: + buf.WriteString("func") + w.writeSignature(buf, typ) + + case *types.Interface: + buf.WriteString("interface{") + if typ.NumMethods() > 0 || typ.NumEmbeddeds() > 0 { + buf.WriteByte(' ') + } + if typ.NumMethods() > 0 { + buf.WriteString(strings.Join(sortedMethodNames(typ), ", ")) + } + if typ.NumEmbeddeds() > 0 { + buf.WriteString(strings.Join(w.sortedEmbeddeds(typ), ", ")) + } + if typ.NumMethods() > 0 || typ.NumEmbeddeds() > 0 { + buf.WriteByte(' ') + } + buf.WriteString("}") + + case *types.Map: + buf.WriteString("map[") + w.writeType(buf, typ.Key()) + buf.WriteByte(']') + w.writeType(buf, typ.Elem()) + + case *types.Chan: + var s string + switch typ.Dir() { + case types.SendOnly: + s = "chan<- " + case types.RecvOnly: + s = "<-chan " + case types.SendRecv: + s = "chan " + default: + panic("unreachable") + } + buf.WriteString(s) + w.writeType(buf, typ.Elem()) + + case *types.Named: + obj := typ.Obj() + pkg := obj.Pkg() + if pkg != nil && pkg != w.current.Package { + buf.WriteString(pkg.Name()) + buf.WriteByte('.') + } + buf.WriteString(typ.Obj().Name()) + + case *types.TypeParam: + // Type parameter names may change, so use a placeholder instead. + fmt.Fprintf(buf, "$%d", typ.Index()) + + default: + panic(fmt.Sprintf("unknown type %T", typ)) + } +} + +func (w *Walker) writeSignature(buf *bytes.Buffer, sig *types.Signature) { + if tparams := sig.TypeParams(); tparams != nil { + w.writeTypeParams(buf, tparams, true) + } + w.writeParams(buf, sig.Params(), sig.Variadic()) + switch res := sig.Results(); res.Len() { + case 0: + // nothing to do + case 1: + buf.WriteByte(' ') + w.writeType(buf, res.At(0).Type()) + default: + buf.WriteByte(' ') + w.writeParams(buf, res, false) + } +} + +func (w *Walker) writeTypeParams(buf *bytes.Buffer, tparams *types.TypeParamList, withConstraints bool) { + buf.WriteByte('[') + c := tparams.Len() + for i := 0; i < c; i++ { + if i > 0 { + buf.WriteString(", ") + } + tp := tparams.At(i) + w.writeType(buf, tp) + if withConstraints { + buf.WriteByte(' ') + w.writeType(buf, tp.Constraint()) + } + } + buf.WriteByte(']') +} + +func (w *Walker) writeParams(buf *bytes.Buffer, t *types.Tuple, variadic bool) { + buf.WriteByte('(') + for i, n := 0, t.Len(); i < n; i++ { + if i > 0 { + buf.WriteString(", ") + } + typ := t.At(i).Type() + if variadic && i+1 == n { + buf.WriteString("...") + typ = typ.(*types.Slice).Elem() + } + w.writeType(buf, typ) + } + buf.WriteByte(')') +} + +func (w *Walker) typeString(typ types.Type) string { + var buf bytes.Buffer + w.writeType(&buf, typ) + return buf.String() +} + +func (w *Walker) signatureString(sig *types.Signature) string { + var buf bytes.Buffer + w.writeSignature(&buf, sig) + return buf.String() +} + +func (w *Walker) emitObj(obj types.Object) { + switch obj := obj.(type) { + case *types.Const: + if w.isDeprecated(obj) { + w.emitf("const %s //deprecated", obj.Name()) + } + w.emitf("const %s %s", obj.Name(), w.typeString(obj.Type())) + x := obj.Val() + short := x.String() + exact := x.ExactString() + if short == exact { + w.emitf("const %s = %s", obj.Name(), short) + } else { + w.emitf("const %s = %s // %s", obj.Name(), short, exact) + } + case *types.Var: + if w.isDeprecated(obj) { + w.emitf("var %s //deprecated", obj.Name()) + } + w.emitf("var %s %s", obj.Name(), w.typeString(obj.Type())) + case *types.TypeName: + w.emitType(obj) + case *types.Func: + w.emitFunc(obj) + default: + panic("unknown object: " + obj.String()) + } +} + +func (w *Walker) emitType(obj *types.TypeName) { + name := obj.Name() + if w.isDeprecated(obj) { + w.emitf("type %s //deprecated", name) + } + typ := obj.Type() + if obj.IsAlias() { + w.emitf("type %s = %s", name, w.typeString(typ)) + return + } + if tparams := obj.Type().(*types.Named).TypeParams(); tparams != nil { + var buf bytes.Buffer + buf.WriteString(name) + w.writeTypeParams(&buf, tparams, true) + name = buf.String() + } + switch typ := typ.Underlying().(type) { + case *types.Struct: + w.emitStructType(name, typ) + case *types.Interface: + w.emitIfaceType(name, typ) + return // methods are handled by emitIfaceType + default: + w.emitf("type %s %s", name, w.typeString(typ.Underlying())) + } + + // emit methods with value receiver + var methodNames map[string]bool + vset := types.NewMethodSet(typ) + for i, n := 0, vset.Len(); i < n; i++ { + m := vset.At(i) + if m.Obj().Exported() { + w.emitMethod(m) + if methodNames == nil { + methodNames = make(map[string]bool) + } + methodNames[m.Obj().Name()] = true + } + } + + // emit methods with pointer receiver; exclude + // methods that we have emitted already + // (the method set of *T includes the methods of T) + pset := types.NewMethodSet(types.NewPointer(typ)) + for i, n := 0, pset.Len(); i < n; i++ { + m := pset.At(i) + if m.Obj().Exported() && !methodNames[m.Obj().Name()] { + w.emitMethod(m) + } + } +} + +func (w *Walker) emitStructType(name string, typ *types.Struct) { + typeStruct := fmt.Sprintf("type %s struct", name) + w.emitf(typeStruct) + defer w.pushScope(typeStruct)() + + for i := 0; i < typ.NumFields(); i++ { + f := typ.Field(i) + if !f.Exported() { + continue + } + typ := f.Type() + if f.Anonymous() { + if w.isDeprecated(f) { + w.emitf("embedded %s //deprecated", w.typeString(typ)) + } + w.emitf("embedded %s", w.typeString(typ)) + continue + } + if w.isDeprecated(f) { + w.emitf("%s //deprecated", f.Name()) + } + w.emitf("%s %s", f.Name(), w.typeString(typ)) + } +} + +func (w *Walker) emitIfaceType(name string, typ *types.Interface) { + pop := w.pushScope("type " + name + " interface") + + var methodNames []string + complete := true + mset := types.NewMethodSet(typ) + for i, n := 0, mset.Len(); i < n; i++ { + m := mset.At(i).Obj().(*types.Func) + if !m.Exported() { + complete = false + continue + } + methodNames = append(methodNames, m.Name()) + if w.isDeprecated(m) { + w.emitf("%s //deprecated", m.Name()) + } + w.emitf("%s%s", m.Name(), w.signatureString(m.Type().(*types.Signature))) + } + + if !complete { + // The method set has unexported methods, so all the + // implementations are provided by the same package, + // so the method set can be extended. Instead of recording + // the full set of names (below), record only that there were + // unexported methods. (If the interface shrinks, we will notice + // because a method signature emitted during the last loop + // will disappear.) + w.emitf("unexported methods") + } + + pop() + + if !complete { + return + } + + if len(methodNames) == 0 { + w.emitf("type %s interface {}", name) + return + } + + sort.Strings(methodNames) + w.emitf("type %s interface { %s }", name, strings.Join(methodNames, ", ")) +} + +func (w *Walker) emitFunc(f *types.Func) { + sig := f.Type().(*types.Signature) + if sig.Recv() != nil { + panic("method considered a regular function: " + f.String()) + } + if w.isDeprecated(f) { + w.emitf("func %s //deprecated", f.Name()) + } + w.emitf("func %s%s", f.Name(), w.signatureString(sig)) +} + +func (w *Walker) emitMethod(m *types.Selection) { + sig := m.Type().(*types.Signature) + recv := sig.Recv().Type() + // report exported methods with unexported receiver base type + if true { + base := recv + if p, _ := recv.(*types.Pointer); p != nil { + base = p.Elem() + } + if obj := base.(*types.Named).Obj(); !obj.Exported() { + log.Fatalf("exported method with unexported receiver base type: %s", m) + } + } + tps := "" + if rtp := sig.RecvTypeParams(); rtp != nil { + var buf bytes.Buffer + w.writeTypeParams(&buf, rtp, false) + tps = buf.String() + } + if w.isDeprecated(m.Obj()) { + w.emitf("method (%s%s) %s //deprecated", w.typeString(recv), tps, m.Obj().Name()) + } + w.emitf("method (%s%s) %s%s", w.typeString(recv), tps, m.Obj().Name(), w.signatureString(sig)) +} + +func (w *Walker) emitf(format string, args ...any) { + f := strings.Join(w.scope, ", ") + ", " + fmt.Sprintf(format, args...) + if strings.Contains(f, "\n") { + panic("feature contains newlines: " + f) + } + + if _, dup := w.features[f]; dup { + panic("duplicate feature inserted: " + f) + } + w.features[f] = true + + if verbose { + log.Printf("feature: %s", f) + } +} + +func needApproval(filename string) bool { + name := filepath.Base(filename) + if name == "go1.txt" { + return false + } + minor := strings.TrimSuffix(strings.TrimPrefix(name, "go1."), ".txt") + n, err := strconv.Atoi(minor) + if err != nil { + log.Fatalf("unexpected api file: %v", name) + } + return n >= 19 // started tracking approvals in Go 1.19 +} + +func (w *Walker) collectDeprecated() { + isDeprecated := func(doc *ast.CommentGroup) bool { + if doc != nil { + for _, c := range doc.List { + if strings.HasPrefix(c.Text, "// Deprecated:") { + return true + } + } + } + return false + } + + w.deprecated = make(map[token.Pos]bool) + mark := func(id *ast.Ident) { + if id != nil { + w.deprecated[id.Pos()] = true + } + } + for _, file := range w.current.Files { + ast.Inspect(file, func(n ast.Node) bool { + switch n := n.(type) { + case *ast.File: + if isDeprecated(n.Doc) { + mark(n.Name) + } + return true + case *ast.GenDecl: + if isDeprecated(n.Doc) { + for _, spec := range n.Specs { + switch spec := spec.(type) { + case *ast.ValueSpec: + for _, id := range spec.Names { + mark(id) + } + case *ast.TypeSpec: + mark(spec.Name) + } + } + } + return true // look at specs + case *ast.FuncDecl: + if isDeprecated(n.Doc) { + mark(n.Name) + } + return false + case *ast.TypeSpec: + if isDeprecated(n.Doc) { + mark(n.Name) + } + return true // recurse into struct or interface type + case *ast.StructType: + return true // recurse into fields + case *ast.InterfaceType: + return true // recurse into methods + case *ast.FieldList: + return true // recurse into fields + case *ast.ValueSpec: + if isDeprecated(n.Doc) { + for _, id := range n.Names { + mark(id) + } + } + return false + case *ast.Field: + if isDeprecated(n.Doc) { + for _, id := range n.Names { + mark(id) + } + if len(n.Names) == 0 { + // embedded field T or *T? + typ := n.Type + if ptr, ok := typ.(*ast.StarExpr); ok { + typ = ptr.X + } + if id, ok := typ.(*ast.Ident); ok { + mark(id) + } + } + } + return false + default: + return false + } + }) + } +} + +func (w *Walker) isDeprecated(obj types.Object) bool { + return w.deprecated[obj.Pos()] +} diff --git a/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/issue21181/dep/p.go b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/issue21181/dep/p.go new file mode 100644 index 0000000000000000000000000000000000000000..2d8e0c4ccef954c9f214bff0d64fa4bdd94f8245 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/issue21181/dep/p.go @@ -0,0 +1,5 @@ +package dep + +type Interface interface { + N([]byte) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/issue21181/dep/p_amd64.go b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/issue21181/dep/p_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..8a2343a0e228abdc316f533b67ec3d1d12681cd7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/issue21181/dep/p_amd64.go @@ -0,0 +1 @@ +package dep diff --git a/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/issue21181/indirect/p.go b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/issue21181/indirect/p.go new file mode 100644 index 0000000000000000000000000000000000000000..e37cf3fc44a86b7221c4762c3f9b9cdfd745dffb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/issue21181/indirect/p.go @@ -0,0 +1,5 @@ +package indirect + +import "dep" + +func F(dep.Interface) {} diff --git a/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/issue21181/p/p.go b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/issue21181/p/p.go new file mode 100644 index 0000000000000000000000000000000000000000..a704160edcc0267cd7dc5a12422c961d23ef0829 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/issue21181/p/p.go @@ -0,0 +1,9 @@ +package p + +import ( + "dep" +) + +type algo struct { + indrt func(dep.Interface) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/issue21181/p/p_amd64.go b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/issue21181/p/p_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..02b4cbf036c26f821247b6c3cf95c2a8f293f871 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/issue21181/p/p_amd64.go @@ -0,0 +1,7 @@ +package p + +import "indirect" + +var in = []algo{ + {indirect.F}, +} diff --git a/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/issue21181/p/p_generic.go b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/issue21181/p/p_generic.go new file mode 100644 index 0000000000000000000000000000000000000000..ad6df20187e9c0bfbbf3104f4c81067259e8a4ff --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/issue21181/p/p_generic.go @@ -0,0 +1,12 @@ +//go:build !amd64 +// +build !amd64 + +package p + +import ( + "indirect" +) + +var in = []algo{ + {indirect.F}, +} diff --git a/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/issue29837/p/README b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/issue29837/p/README new file mode 100644 index 0000000000000000000000000000000000000000..770bc0f1b2dbad3162cb685687b5b17e58f4857a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/issue29837/p/README @@ -0,0 +1 @@ +Empty directory for test, see https://golang.org/issues/29837. \ No newline at end of file diff --git a/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/issue64958/p/p.go b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/issue64958/p/p.go new file mode 100644 index 0000000000000000000000000000000000000000..feba86797f1c9f2a59348aa3d9a22920cc3a04e2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/issue64958/p/p.go @@ -0,0 +1,3 @@ +package p + +type BasicAlias = uint8 diff --git a/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/pkg/p1/golden.txt b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/pkg/p1/golden.txt new file mode 100644 index 0000000000000000000000000000000000000000..65c4f35d2ca69dfaa9e64ef95b1f8f9e8a628b75 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/pkg/p1/golden.txt @@ -0,0 +1,104 @@ +pkg p1, const A = 1 +pkg p1, const A ideal-int +pkg p1, const A //deprecated +pkg p1, const A64 = 1 +pkg p1, const A64 int64 +pkg p1, const AIsLowerA = 11 +pkg p1, const AIsLowerA ideal-int +pkg p1, const B0 = 2 +pkg p1, const B0 ideal-int +pkg p1, const ConstChase2 = 11 +pkg p1, const ConstChase2 ideal-int +pkg p1, const ConversionConst = 5 +pkg p1, const ConversionConst MyInt +pkg p1, const FloatConst = 1.5 // 3/2 +pkg p1, const FloatConst ideal-float +pkg p1, const StrConst = "foo" +pkg p1, const StrConst ideal-string +pkg p1, func Bar(int8, int16, int64) +pkg p1, func Bar1(int8, int16, int64) uint64 +pkg p1, func Bar2(int8, int16, int64) (uint8, uint64) +pkg p1, func BarE() Error +pkg p1, func Now() Time +pkg p1, func PlainFunc(int, int, string) (*B, error) +pkg p1, func TakesFunc(func(int) int) +pkg p1, method (*B) JustOnB() +pkg p1, method (*B) OnBothTandBPtr() +pkg p1, method (*Embedded) OnEmbedded() +pkg p1, method (*S2) SMethod(int8, int16, int64) +pkg p1, method (*S2) SMethod //deprecated +pkg p1, method (*T) JustOnT() +pkg p1, method (*T) OnBothTandBPtr() +pkg p1, method (B) OnBothTandBVal() +pkg p1, method (S) StructValueMethod() +pkg p1, method (S) StructValueMethodNamedRecv() +pkg p1, method (S2) StructValueMethod() +pkg p1, method (S2) StructValueMethodNamedRecv() +pkg p1, method (T) OnBothTandBVal() +pkg p1, method (TPtrExported) OnEmbedded() +pkg p1, method (TPtrUnexported) OnBothTandBPtr() +pkg p1, method (TPtrUnexported) OnBothTandBVal() +pkg p1, type B struct +pkg p1, type ByteStruct struct +pkg p1, type ByteStruct struct, B uint8 +pkg p1, type ByteStruct struct, R int32 +pkg p1, type Codec struct +pkg p1, type Codec struct, Func func(int, int) int +pkg p1, type EmbedSelector struct +pkg p1, type EmbedSelector struct, embedded Time +pkg p1, type EmbedURLPtr struct +pkg p1, type EmbedURLPtr struct, embedded *URL +pkg p1, type Embedded struct +pkg p1, type Error interface { Error, Temporary } +pkg p1, type Error interface, Error() string +pkg p1, type Error interface, Temporary() bool +pkg p1, type FuncType func(int, int, string) (*B, error) +pkg p1, type I interface, Get(string) int64 +pkg p1, type I interface, Get //deprecated +pkg p1, type I interface, GetNamed(string) int64 +pkg p1, type I interface, Name() string +pkg p1, type I interface, PackageTwoMeth() +pkg p1, type I interface, Set(string, int64) +pkg p1, type I interface, unexported methods +pkg p1, type MyInt int +pkg p1, type Namer interface { Name } +pkg p1, type Namer interface, Name() string +pkg p1, type Private interface, X() +pkg p1, type Private interface, unexported methods +pkg p1, type Private //deprecated +pkg p1, type Public interface { X, Y } +pkg p1, type Public interface, X() +pkg p1, type Public interface, Y() +pkg p1, type S struct +pkg p1, type S struct, Public *int +pkg p1, type S struct, Public //deprecated +pkg p1, type S struct, PublicTime Time +pkg p1, type S2 struct +pkg p1, type S2 struct, Extra bool +pkg p1, type S2 struct, embedded S +pkg p1, type S2 struct, embedded S //deprecated +pkg p1, type SI struct +pkg p1, type SI struct, I int +pkg p1, type T struct +pkg p1, type TPtrExported struct +pkg p1, type TPtrExported struct, embedded *Embedded +pkg p1, type TPtrUnexported struct +pkg p1, type Time struct +pkg p1, type URL struct +pkg p1, type URL //deprecated +pkg p1, var Byte uint8 +pkg p1, var ByteConv []uint8 +pkg p1, var ByteFunc func(uint8) int32 +pkg p1, var ChecksumError error +pkg p1, var SIPtr *SI +pkg p1, var SIPtr2 *SI +pkg p1, var SIVal SI +pkg p1, var StrConv string +pkg p1, var V string +pkg p1, var V1 uint64 +pkg p1, var V2 p2.Twoer +pkg p1, var VError Error +pkg p1, var VError //deprecated +pkg p1, var X I +pkg p1, var X0 int64 +pkg p1, var Y int diff --git a/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/pkg/p1/p1.go b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/pkg/p1/p1.go new file mode 100644 index 0000000000000000000000000000000000000000..025563dbf30ecf52d31fea083d48e9115123fbe9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/pkg/p1/p1.go @@ -0,0 +1,224 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p1 + +import ( + ptwo "p2" +) + +const ( + ConstChase2 = constChase // forward declaration to unexported ident + constChase = AIsLowerA // forward declaration to exported ident + + // Deprecated: use B. + A = 1 + a = 11 + A64 int64 = 1 + + AIsLowerA = a // previously declared +) + +const ( + ConversionConst = MyInt(5) +) + +// Variables from function calls. +var ( + V = ptwo.F() + // Deprecated: use WError. + VError = BarE() + V1 = Bar1(1, 2, 3) + V2 = ptwo.G() +) + +// Variables with conversions: +var ( + StrConv = string("foo") + ByteConv = []byte("foo") +) + +var ChecksumError = ptwo.NewError("gzip checksum error") + +const B0 = 2 +const StrConst = "foo" +const FloatConst = 1.5 + +type myInt int + +type MyInt int + +type Time struct{} + +type S struct { + // Deprecated: use PublicTime. + Public *int + private *int + PublicTime Time +} + +// Deprecated: use URI. +type URL struct{} + +type EmbedURLPtr struct { + *URL +} + +type S2 struct { + // Deprecated: use T. + S + Extra bool +} + +var X0 int64 + +var ( + Y int + X I +) + +type Namer interface { + Name() string +} + +type I interface { + Namer + ptwo.Twoer + Set(name string, balance int64) + // Deprecated: use GetNamed. + Get(string) int64 + GetNamed(string) (balance int64) + private() +} + +type Public interface { + X() + Y() +} + +// Deprecated: Use Unexported. +type Private interface { + X() + y() +} + +type Error interface { + error + Temporary() bool +} + +func (myInt) privateTypeMethod() {} +func (myInt) CapitalMethodUnexportedType() {} + +// Deprecated: use TMethod. +func (s *S2) SMethod(x int8, y int16, z int64) {} + +type s struct{} + +func (s) method() +func (s) Method() + +func (S) StructValueMethod() +func (ignored S) StructValueMethodNamedRecv() + +func (s *S2) unexported(x int8, y int16, z int64) {} + +func Bar(x int8, y int16, z int64) {} +func Bar1(x int8, y int16, z int64) uint64 {} +func Bar2(x int8, y int16, z int64) (uint8, uint64) {} +func BarE() Error {} + +func unexported(x int8, y int16, z int64) {} + +func TakesFunc(f func(dontWantName int) int) + +type Codec struct { + Func func(x int, y int) (z int) +} + +type SI struct { + I int +} + +var SIVal = SI{} +var SIPtr = &SI{} +var SIPtr2 *SI + +type T struct { + common +} + +type B struct { + common +} + +type common struct { + i int +} + +type TPtrUnexported struct { + *common +} + +type TPtrExported struct { + *Embedded +} + +type FuncType func(x, y int, s string) (b *B, err error) + +type Embedded struct{} + +func PlainFunc(x, y int, s string) (b *B, err error) + +func (*Embedded) OnEmbedded() {} + +func (*T) JustOnT() {} +func (*B) JustOnB() {} +func (*common) OnBothTandBPtr() {} +func (common) OnBothTandBVal() {} + +type EmbedSelector struct { + Time +} + +const ( + foo = "foo" + foo2 string = "foo2" + truth = foo == "foo" || foo2 == "foo2" +) + +func ellipsis(...string) {} + +func Now() Time { + var now Time + return now +} + +var x = &S{ + Public: nil, + private: nil, + PublicTime: Now(), +} + +var parenExpr = (1 + 5) + +var funcLit = func() {} + +var m map[string]int + +var chanVar chan int + +var ifaceVar any = 5 + +var assertVar = ifaceVar.(int) + +var indexVar = m["foo"] + +var Byte byte +var ByteFunc func(byte) rune + +type ByteStruct struct { + B byte + R rune +} diff --git a/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/pkg/p2/golden.txt b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/pkg/p2/golden.txt new file mode 100644 index 0000000000000000000000000000000000000000..735d6681666910f41c08b9472fe7a692b23f059e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/pkg/p2/golden.txt @@ -0,0 +1,8 @@ +pkg p2, func F() string +pkg p2, func F //deprecated +pkg p2, func G() Twoer +pkg p2, func NewError(string) error +pkg p2, type Twoer interface { PackageTwoMeth } +pkg p2, type Twoer interface, PackageTwoMeth() +pkg p2, type Twoer interface, PackageTwoMeth //deprecated + diff --git a/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/pkg/p2/p2.go b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/pkg/p2/p2.go new file mode 100644 index 0000000000000000000000000000000000000000..2ce4e7587cbaba3a59214a2618a37f8787eb2e20 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/pkg/p2/p2.go @@ -0,0 +1,17 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p2 + +type Twoer interface { + // Deprecated: No good. + PackageTwoMeth() +} + +// Deprecated: No good. +func F() string {} + +func G() Twoer {} + +func NewError(s string) error {} diff --git a/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/pkg/p3/golden.txt b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/pkg/p3/golden.txt new file mode 100644 index 0000000000000000000000000000000000000000..a7dcccd1bdb7dd50ce888340fbb279cb0c8674c0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/pkg/p3/golden.txt @@ -0,0 +1,3 @@ +pkg p3, func BadHop(int, int, int) (bool, bool, *ThirdBase, *ThirdBase, error) +pkg p3, method (*ThirdBase) GoodPlayer() (int, int, int) +pkg p3, type ThirdBase struct diff --git a/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/pkg/p3/p3.go b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/pkg/p3/p3.go new file mode 100644 index 0000000000000000000000000000000000000000..3a0686abb01981d84aba69fc9912af5bd90d8d8e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/pkg/p3/p3.go @@ -0,0 +1,10 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p3 + +type ThirdBase struct{} + +func (tb *ThirdBase) GoodPlayer() (i, j, k int) +func BadHop(i, j, k int) (l, m bool, n, o *ThirdBase, err error) diff --git a/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/pkg/p4/golden.txt b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/pkg/p4/golden.txt new file mode 100644 index 0000000000000000000000000000000000000000..eec0598dcd1da1ca822b83897026f7a3e15f992d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/pkg/p4/golden.txt @@ -0,0 +1,6 @@ +pkg p4, func NewPair[$0 interface{ M }, $1 interface{ ~int }]($0, $1) Pair +pkg p4, method (Pair[$0, $1]) Second() $1 +pkg p4, method (Pair[$0, $1]) First() $0 +pkg p4, type Pair[$0 interface{ M }, $1 interface{ ~int }] struct +pkg p4, func Clone[$0 interface{ ~[]$1 }, $1 interface{}]($0) $0 +pkg p4, func Clone //deprecated diff --git a/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/pkg/p4/p4.go b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/pkg/p4/p4.go new file mode 100644 index 0000000000000000000000000000000000000000..6c93e3e6f1aa4640d4876856705353d78005edf3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/api/testdata/src/pkg/p4/p4.go @@ -0,0 +1,27 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p4 + +type Pair[T1 interface{ M() }, T2 ~int] struct { + f1 T1 + f2 T2 +} + +func NewPair[T1 interface{ M() }, T2 ~int](v1 T1, v2 T2) Pair[T1, T2] { + return Pair[T1, T2]{f1: v1, f2: v2} +} + +func (p Pair[X1, _]) First() X1 { + return p.f1 +} + +func (p Pair[_, X2]) Second() X2 { + return p.f2 +} + +// Deprecated: Use something else. +func Clone[S ~[]T, T any](s S) S { + return append(S(nil), s...) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/doc.go b/platform/dbops/binaries/go/go/src/cmd/asm/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..179ac1474e96b4accc52cd393f5608f85fd66834 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/doc.go @@ -0,0 +1,62 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Asm, typically invoked as “go tool asm”, assembles the source file into an object +file named for the basename of the argument source file with a .o suffix. The +object file can then be combined with other objects into a package archive. + +# Command Line + +Usage: + + go tool asm [flags] file + +The specified file must be a Go assembly file. +The same assembler is used for all target operating systems and architectures. +The GOOS and GOARCH environment variables set the desired target. + +Flags: + + -D name[=value] + Predefine symbol name with an optional simple value. + Can be repeated to define multiple symbols. + -I dir1 -I dir2 + Search for #include files in dir1, dir2, etc, + after consulting $GOROOT/pkg/$GOOS_$GOARCH. + -S + Print assembly and machine code. + -V + Print assembler version and exit. + -debug + Dump instructions as they are parsed. + -dynlink + Support references to Go symbols defined in other shared libraries. + -e + No limit on number of errors reported. + -gensymabis + Write symbol ABI information to output file. Don't assemble. + -o file + Write output to file. The default is foo.o for /a/b/c/foo.s. + -p pkgpath + Set expected package import to pkgpath. + -shared + Generate code that can be linked into a shared library. + -spectre list + Enable spectre mitigations in list (all, ret). + -trimpath prefix + Remove prefix from recorded source file paths. + -v + Print debug output. + +Input language: + +The assembler uses mostly the same syntax for all architectures, +the main variation having to do with addressing modes. Input is +run through a simplified C preprocessor that implements #include, +#define, #ifdef/endif, but not #if or ##. + +For more information, see https://golang.org/doc/asm. +*/ +package main diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/arch/arch.go b/platform/dbops/binaries/go/go/src/cmd/asm/internal/arch/arch.go new file mode 100644 index 0000000000000000000000000000000000000000..11bb7af899c1fd35cccaa5158d591c6ac80ee17b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/arch/arch.go @@ -0,0 +1,760 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package arch defines architecture-specific information and support functions. +package arch + +import ( + "cmd/internal/obj" + "cmd/internal/obj/arm" + "cmd/internal/obj/arm64" + "cmd/internal/obj/loong64" + "cmd/internal/obj/mips" + "cmd/internal/obj/ppc64" + "cmd/internal/obj/riscv" + "cmd/internal/obj/s390x" + "cmd/internal/obj/wasm" + "cmd/internal/obj/x86" + "fmt" + "strings" +) + +// Pseudo-registers whose names are the constant name without the leading R. +const ( + RFP = -(iota + 1) + RSB + RSP + RPC +) + +// Arch wraps the link architecture object with more architecture-specific information. +type Arch struct { + *obj.LinkArch + // Map of instruction names to enumeration. + Instructions map[string]obj.As + // Map of register names to enumeration. + Register map[string]int16 + // Table of register prefix names. These are things like R for R(0) and SPR for SPR(268). + RegisterPrefix map[string]bool + // RegisterNumber converts R(10) into arm.REG_R10. + RegisterNumber func(string, int16) (int16, bool) + // Instruction is a jump. + IsJump func(word string) bool +} + +// nilRegisterNumber is the register number function for architectures +// that do not accept the R(N) notation. It always returns failure. +func nilRegisterNumber(name string, n int16) (int16, bool) { + return 0, false +} + +// Set configures the architecture specified by GOARCH and returns its representation. +// It returns nil if GOARCH is not recognized. +func Set(GOARCH string, shared bool) *Arch { + switch GOARCH { + case "386": + return archX86(&x86.Link386) + case "amd64": + return archX86(&x86.Linkamd64) + case "arm": + return archArm() + case "arm64": + return archArm64() + case "loong64": + return archLoong64(&loong64.Linkloong64) + case "mips": + return archMips(&mips.Linkmips) + case "mipsle": + return archMips(&mips.Linkmipsle) + case "mips64": + return archMips64(&mips.Linkmips64) + case "mips64le": + return archMips64(&mips.Linkmips64le) + case "ppc64": + return archPPC64(&ppc64.Linkppc64) + case "ppc64le": + return archPPC64(&ppc64.Linkppc64le) + case "riscv64": + return archRISCV64(shared) + case "s390x": + return archS390x() + case "wasm": + return archWasm() + } + return nil +} + +func jumpX86(word string) bool { + return word[0] == 'J' || word == "CALL" || strings.HasPrefix(word, "LOOP") || word == "XBEGIN" +} + +func jumpRISCV(word string) bool { + switch word { + case "BEQ", "BEQZ", "BGE", "BGEU", "BGEZ", "BGT", "BGTU", "BGTZ", "BLE", "BLEU", "BLEZ", + "BLT", "BLTU", "BLTZ", "BNE", "BNEZ", "CALL", "JAL", "JALR", "JMP": + return true + } + return false +} + +func jumpWasm(word string) bool { + return word == "JMP" || word == "CALL" || word == "Call" || word == "Br" || word == "BrIf" +} + +func archX86(linkArch *obj.LinkArch) *Arch { + register := make(map[string]int16) + // Create maps for easy lookup of instruction names etc. + for i, s := range x86.Register { + register[s] = int16(i + x86.REG_AL) + } + // Pseudo-registers. + register["SB"] = RSB + register["FP"] = RFP + register["PC"] = RPC + if linkArch == &x86.Linkamd64 { + // Alias g to R14 + register["g"] = x86.REGG + } + // Register prefix not used on this architecture. + + instructions := make(map[string]obj.As) + for i, s := range obj.Anames { + instructions[s] = obj.As(i) + } + for i, s := range x86.Anames { + if obj.As(i) >= obj.A_ARCHSPECIFIC { + instructions[s] = obj.As(i) + obj.ABaseAMD64 + } + } + // Annoying aliases. + instructions["JA"] = x86.AJHI /* alternate */ + instructions["JAE"] = x86.AJCC /* alternate */ + instructions["JB"] = x86.AJCS /* alternate */ + instructions["JBE"] = x86.AJLS /* alternate */ + instructions["JC"] = x86.AJCS /* alternate */ + instructions["JCC"] = x86.AJCC /* carry clear (CF = 0) */ + instructions["JCS"] = x86.AJCS /* carry set (CF = 1) */ + instructions["JE"] = x86.AJEQ /* alternate */ + instructions["JEQ"] = x86.AJEQ /* equal (ZF = 1) */ + instructions["JG"] = x86.AJGT /* alternate */ + instructions["JGE"] = x86.AJGE /* greater than or equal (signed) (SF = OF) */ + instructions["JGT"] = x86.AJGT /* greater than (signed) (ZF = 0 && SF = OF) */ + instructions["JHI"] = x86.AJHI /* higher (unsigned) (CF = 0 && ZF = 0) */ + instructions["JHS"] = x86.AJCC /* alternate */ + instructions["JL"] = x86.AJLT /* alternate */ + instructions["JLE"] = x86.AJLE /* less than or equal (signed) (ZF = 1 || SF != OF) */ + instructions["JLO"] = x86.AJCS /* alternate */ + instructions["JLS"] = x86.AJLS /* lower or same (unsigned) (CF = 1 || ZF = 1) */ + instructions["JLT"] = x86.AJLT /* less than (signed) (SF != OF) */ + instructions["JMI"] = x86.AJMI /* negative (minus) (SF = 1) */ + instructions["JNA"] = x86.AJLS /* alternate */ + instructions["JNAE"] = x86.AJCS /* alternate */ + instructions["JNB"] = x86.AJCC /* alternate */ + instructions["JNBE"] = x86.AJHI /* alternate */ + instructions["JNC"] = x86.AJCC /* alternate */ + instructions["JNE"] = x86.AJNE /* not equal (ZF = 0) */ + instructions["JNG"] = x86.AJLE /* alternate */ + instructions["JNGE"] = x86.AJLT /* alternate */ + instructions["JNL"] = x86.AJGE /* alternate */ + instructions["JNLE"] = x86.AJGT /* alternate */ + instructions["JNO"] = x86.AJOC /* alternate */ + instructions["JNP"] = x86.AJPC /* alternate */ + instructions["JNS"] = x86.AJPL /* alternate */ + instructions["JNZ"] = x86.AJNE /* alternate */ + instructions["JO"] = x86.AJOS /* alternate */ + instructions["JOC"] = x86.AJOC /* overflow clear (OF = 0) */ + instructions["JOS"] = x86.AJOS /* overflow set (OF = 1) */ + instructions["JP"] = x86.AJPS /* alternate */ + instructions["JPC"] = x86.AJPC /* parity clear (PF = 0) */ + instructions["JPE"] = x86.AJPS /* alternate */ + instructions["JPL"] = x86.AJPL /* non-negative (plus) (SF = 0) */ + instructions["JPO"] = x86.AJPC /* alternate */ + instructions["JPS"] = x86.AJPS /* parity set (PF = 1) */ + instructions["JS"] = x86.AJMI /* alternate */ + instructions["JZ"] = x86.AJEQ /* alternate */ + instructions["MASKMOVDQU"] = x86.AMASKMOVOU + instructions["MOVD"] = x86.AMOVQ + instructions["MOVDQ2Q"] = x86.AMOVQ + instructions["MOVNTDQ"] = x86.AMOVNTO + instructions["MOVOA"] = x86.AMOVO + instructions["PSLLDQ"] = x86.APSLLO + instructions["PSRLDQ"] = x86.APSRLO + instructions["PADDD"] = x86.APADDL + // Spellings originally used in CL 97235. + instructions["MOVBELL"] = x86.AMOVBEL + instructions["MOVBEQQ"] = x86.AMOVBEQ + instructions["MOVBEWW"] = x86.AMOVBEW + + return &Arch{ + LinkArch: linkArch, + Instructions: instructions, + Register: register, + RegisterPrefix: nil, + RegisterNumber: nilRegisterNumber, + IsJump: jumpX86, + } +} + +func archArm() *Arch { + register := make(map[string]int16) + // Create maps for easy lookup of instruction names etc. + // Note that there is no list of names as there is for x86. + for i := arm.REG_R0; i < arm.REG_SPSR; i++ { + register[obj.Rconv(i)] = int16(i) + } + // Avoid unintentionally clobbering g using R10. + delete(register, "R10") + register["g"] = arm.REG_R10 + for i := 0; i < 16; i++ { + register[fmt.Sprintf("C%d", i)] = int16(i) + } + + // Pseudo-registers. + register["SB"] = RSB + register["FP"] = RFP + register["PC"] = RPC + register["SP"] = RSP + registerPrefix := map[string]bool{ + "F": true, + "R": true, + } + + // special operands for DMB/DSB instructions + register["MB_SY"] = arm.REG_MB_SY + register["MB_ST"] = arm.REG_MB_ST + register["MB_ISH"] = arm.REG_MB_ISH + register["MB_ISHST"] = arm.REG_MB_ISHST + register["MB_NSH"] = arm.REG_MB_NSH + register["MB_NSHST"] = arm.REG_MB_NSHST + register["MB_OSH"] = arm.REG_MB_OSH + register["MB_OSHST"] = arm.REG_MB_OSHST + + instructions := make(map[string]obj.As) + for i, s := range obj.Anames { + instructions[s] = obj.As(i) + } + for i, s := range arm.Anames { + if obj.As(i) >= obj.A_ARCHSPECIFIC { + instructions[s] = obj.As(i) + obj.ABaseARM + } + } + // Annoying aliases. + instructions["B"] = obj.AJMP + instructions["BL"] = obj.ACALL + // MCR differs from MRC by the way fields of the word are encoded. + // (Details in arm.go). Here we add the instruction so parse will find + // it, but give it an opcode number known only to us. + instructions["MCR"] = aMCR + + return &Arch{ + LinkArch: &arm.Linkarm, + Instructions: instructions, + Register: register, + RegisterPrefix: registerPrefix, + RegisterNumber: armRegisterNumber, + IsJump: jumpArm, + } +} + +func archArm64() *Arch { + register := make(map[string]int16) + // Create maps for easy lookup of instruction names etc. + // Note that there is no list of names as there is for 386 and amd64. + register[obj.Rconv(arm64.REGSP)] = int16(arm64.REGSP) + for i := arm64.REG_R0; i <= arm64.REG_R31; i++ { + register[obj.Rconv(i)] = int16(i) + } + // Rename R18 to R18_PLATFORM to avoid accidental use. + register["R18_PLATFORM"] = register["R18"] + delete(register, "R18") + for i := arm64.REG_F0; i <= arm64.REG_F31; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := arm64.REG_V0; i <= arm64.REG_V31; i++ { + register[obj.Rconv(i)] = int16(i) + } + + // System registers. + for i := 0; i < len(arm64.SystemReg); i++ { + register[arm64.SystemReg[i].Name] = arm64.SystemReg[i].Reg + } + + register["LR"] = arm64.REGLINK + + // Pseudo-registers. + register["SB"] = RSB + register["FP"] = RFP + register["PC"] = RPC + register["SP"] = RSP + // Avoid unintentionally clobbering g using R28. + delete(register, "R28") + register["g"] = arm64.REG_R28 + registerPrefix := map[string]bool{ + "F": true, + "R": true, + "V": true, + } + + instructions := make(map[string]obj.As) + for i, s := range obj.Anames { + instructions[s] = obj.As(i) + } + for i, s := range arm64.Anames { + if obj.As(i) >= obj.A_ARCHSPECIFIC { + instructions[s] = obj.As(i) + obj.ABaseARM64 + } + } + // Annoying aliases. + instructions["B"] = arm64.AB + instructions["BL"] = arm64.ABL + + return &Arch{ + LinkArch: &arm64.Linkarm64, + Instructions: instructions, + Register: register, + RegisterPrefix: registerPrefix, + RegisterNumber: arm64RegisterNumber, + IsJump: jumpArm64, + } + +} + +func archPPC64(linkArch *obj.LinkArch) *Arch { + register := make(map[string]int16) + // Create maps for easy lookup of instruction names etc. + // Note that there is no list of names as there is for x86. + for i := ppc64.REG_R0; i <= ppc64.REG_R31; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := ppc64.REG_F0; i <= ppc64.REG_F31; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := ppc64.REG_V0; i <= ppc64.REG_V31; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := ppc64.REG_VS0; i <= ppc64.REG_VS63; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := ppc64.REG_A0; i <= ppc64.REG_A7; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := ppc64.REG_CR0; i <= ppc64.REG_CR7; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := ppc64.REG_MSR; i <= ppc64.REG_CR; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := ppc64.REG_CR0LT; i <= ppc64.REG_CR7SO; i++ { + register[obj.Rconv(i)] = int16(i) + } + register["CR"] = ppc64.REG_CR + register["XER"] = ppc64.REG_XER + register["LR"] = ppc64.REG_LR + register["CTR"] = ppc64.REG_CTR + register["FPSCR"] = ppc64.REG_FPSCR + register["MSR"] = ppc64.REG_MSR + // Pseudo-registers. + register["SB"] = RSB + register["FP"] = RFP + register["PC"] = RPC + // Avoid unintentionally clobbering g using R30. + delete(register, "R30") + register["g"] = ppc64.REG_R30 + registerPrefix := map[string]bool{ + "CR": true, + "F": true, + "R": true, + "SPR": true, + } + + instructions := make(map[string]obj.As) + for i, s := range obj.Anames { + instructions[s] = obj.As(i) + } + for i, s := range ppc64.Anames { + if obj.As(i) >= obj.A_ARCHSPECIFIC { + instructions[s] = obj.As(i) + obj.ABasePPC64 + } + } + // The opcodes generated by x/arch's ppc64map are listed in + // a separate slice, add them too. + for i, s := range ppc64.GenAnames { + instructions[s] = obj.As(i) + ppc64.AFIRSTGEN + } + // Annoying aliases. + instructions["BR"] = ppc64.ABR + instructions["BL"] = ppc64.ABL + + return &Arch{ + LinkArch: linkArch, + Instructions: instructions, + Register: register, + RegisterPrefix: registerPrefix, + RegisterNumber: ppc64RegisterNumber, + IsJump: jumpPPC64, + } +} + +func archMips(linkArch *obj.LinkArch) *Arch { + register := make(map[string]int16) + // Create maps for easy lookup of instruction names etc. + // Note that there is no list of names as there is for x86. + for i := mips.REG_R0; i <= mips.REG_R31; i++ { + register[obj.Rconv(i)] = int16(i) + } + + for i := mips.REG_F0; i <= mips.REG_F31; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := mips.REG_M0; i <= mips.REG_M31; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := mips.REG_FCR0; i <= mips.REG_FCR31; i++ { + register[obj.Rconv(i)] = int16(i) + } + register["HI"] = mips.REG_HI + register["LO"] = mips.REG_LO + // Pseudo-registers. + register["SB"] = RSB + register["FP"] = RFP + register["PC"] = RPC + // Avoid unintentionally clobbering g using R30. + delete(register, "R30") + register["g"] = mips.REG_R30 + + registerPrefix := map[string]bool{ + "F": true, + "FCR": true, + "M": true, + "R": true, + } + + instructions := make(map[string]obj.As) + for i, s := range obj.Anames { + instructions[s] = obj.As(i) + } + for i, s := range mips.Anames { + if obj.As(i) >= obj.A_ARCHSPECIFIC { + instructions[s] = obj.As(i) + obj.ABaseMIPS + } + } + // Annoying alias. + instructions["JAL"] = mips.AJAL + + return &Arch{ + LinkArch: linkArch, + Instructions: instructions, + Register: register, + RegisterPrefix: registerPrefix, + RegisterNumber: mipsRegisterNumber, + IsJump: jumpMIPS, + } +} + +func archMips64(linkArch *obj.LinkArch) *Arch { + register := make(map[string]int16) + // Create maps for easy lookup of instruction names etc. + // Note that there is no list of names as there is for x86. + for i := mips.REG_R0; i <= mips.REG_R31; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := mips.REG_F0; i <= mips.REG_F31; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := mips.REG_M0; i <= mips.REG_M31; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := mips.REG_FCR0; i <= mips.REG_FCR31; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := mips.REG_W0; i <= mips.REG_W31; i++ { + register[obj.Rconv(i)] = int16(i) + } + register["HI"] = mips.REG_HI + register["LO"] = mips.REG_LO + // Pseudo-registers. + register["SB"] = RSB + register["FP"] = RFP + register["PC"] = RPC + // Avoid unintentionally clobbering g using R30. + delete(register, "R30") + register["g"] = mips.REG_R30 + // Avoid unintentionally clobbering RSB using R28. + delete(register, "R28") + register["RSB"] = mips.REG_R28 + registerPrefix := map[string]bool{ + "F": true, + "FCR": true, + "M": true, + "R": true, + "W": true, + } + + instructions := make(map[string]obj.As) + for i, s := range obj.Anames { + instructions[s] = obj.As(i) + } + for i, s := range mips.Anames { + if obj.As(i) >= obj.A_ARCHSPECIFIC { + instructions[s] = obj.As(i) + obj.ABaseMIPS + } + } + // Annoying alias. + instructions["JAL"] = mips.AJAL + + return &Arch{ + LinkArch: linkArch, + Instructions: instructions, + Register: register, + RegisterPrefix: registerPrefix, + RegisterNumber: mipsRegisterNumber, + IsJump: jumpMIPS, + } +} + +func archLoong64(linkArch *obj.LinkArch) *Arch { + register := make(map[string]int16) + // Create maps for easy lookup of instruction names etc. + // Note that there is no list of names as there is for x86. + for i := loong64.REG_R0; i <= loong64.REG_R31; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := loong64.REG_F0; i <= loong64.REG_F31; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := loong64.REG_FCSR0; i <= loong64.REG_FCSR31; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := loong64.REG_FCC0; i <= loong64.REG_FCC31; i++ { + register[obj.Rconv(i)] = int16(i) + } + // Pseudo-registers. + register["SB"] = RSB + register["FP"] = RFP + register["PC"] = RPC + // Avoid unintentionally clobbering g using R22. + delete(register, "R22") + register["g"] = loong64.REG_R22 + registerPrefix := map[string]bool{ + "F": true, + "FCSR": true, + "FCC": true, + "R": true, + } + + instructions := make(map[string]obj.As) + for i, s := range obj.Anames { + instructions[s] = obj.As(i) + } + for i, s := range loong64.Anames { + if obj.As(i) >= obj.A_ARCHSPECIFIC { + instructions[s] = obj.As(i) + obj.ABaseLoong64 + } + } + // Annoying alias. + instructions["JAL"] = loong64.AJAL + + return &Arch{ + LinkArch: linkArch, + Instructions: instructions, + Register: register, + RegisterPrefix: registerPrefix, + RegisterNumber: loong64RegisterNumber, + IsJump: jumpLoong64, + } +} + +func archRISCV64(shared bool) *Arch { + register := make(map[string]int16) + + // Standard register names. + for i := riscv.REG_X0; i <= riscv.REG_X31; i++ { + // Disallow X3 in shared mode, as this will likely be used as the + // GP register, which could result in problems in non-Go code, + // including signal handlers. + if shared && i == riscv.REG_GP { + continue + } + if i == riscv.REG_TP || i == riscv.REG_G { + continue + } + name := fmt.Sprintf("X%d", i-riscv.REG_X0) + register[name] = int16(i) + } + for i := riscv.REG_F0; i <= riscv.REG_F31; i++ { + name := fmt.Sprintf("F%d", i-riscv.REG_F0) + register[name] = int16(i) + } + + // General registers with ABI names. + register["ZERO"] = riscv.REG_ZERO + register["RA"] = riscv.REG_RA + register["SP"] = riscv.REG_SP + register["GP"] = riscv.REG_GP + register["TP"] = riscv.REG_TP + register["T0"] = riscv.REG_T0 + register["T1"] = riscv.REG_T1 + register["T2"] = riscv.REG_T2 + register["S0"] = riscv.REG_S0 + register["S1"] = riscv.REG_S1 + register["A0"] = riscv.REG_A0 + register["A1"] = riscv.REG_A1 + register["A2"] = riscv.REG_A2 + register["A3"] = riscv.REG_A3 + register["A4"] = riscv.REG_A4 + register["A5"] = riscv.REG_A5 + register["A6"] = riscv.REG_A6 + register["A7"] = riscv.REG_A7 + register["S2"] = riscv.REG_S2 + register["S3"] = riscv.REG_S3 + register["S4"] = riscv.REG_S4 + register["S5"] = riscv.REG_S5 + register["S6"] = riscv.REG_S6 + register["S7"] = riscv.REG_S7 + register["S8"] = riscv.REG_S8 + register["S9"] = riscv.REG_S9 + register["S10"] = riscv.REG_S10 + // Skip S11 as it is the g register. + register["T3"] = riscv.REG_T3 + register["T4"] = riscv.REG_T4 + register["T5"] = riscv.REG_T5 + register["T6"] = riscv.REG_T6 + + // Go runtime register names. + register["g"] = riscv.REG_G + register["CTXT"] = riscv.REG_CTXT + register["TMP"] = riscv.REG_TMP + + // ABI names for floating point register. + register["FT0"] = riscv.REG_FT0 + register["FT1"] = riscv.REG_FT1 + register["FT2"] = riscv.REG_FT2 + register["FT3"] = riscv.REG_FT3 + register["FT4"] = riscv.REG_FT4 + register["FT5"] = riscv.REG_FT5 + register["FT6"] = riscv.REG_FT6 + register["FT7"] = riscv.REG_FT7 + register["FS0"] = riscv.REG_FS0 + register["FS1"] = riscv.REG_FS1 + register["FA0"] = riscv.REG_FA0 + register["FA1"] = riscv.REG_FA1 + register["FA2"] = riscv.REG_FA2 + register["FA3"] = riscv.REG_FA3 + register["FA4"] = riscv.REG_FA4 + register["FA5"] = riscv.REG_FA5 + register["FA6"] = riscv.REG_FA6 + register["FA7"] = riscv.REG_FA7 + register["FS2"] = riscv.REG_FS2 + register["FS3"] = riscv.REG_FS3 + register["FS4"] = riscv.REG_FS4 + register["FS5"] = riscv.REG_FS5 + register["FS6"] = riscv.REG_FS6 + register["FS7"] = riscv.REG_FS7 + register["FS8"] = riscv.REG_FS8 + register["FS9"] = riscv.REG_FS9 + register["FS10"] = riscv.REG_FS10 + register["FS11"] = riscv.REG_FS11 + register["FT8"] = riscv.REG_FT8 + register["FT9"] = riscv.REG_FT9 + register["FT10"] = riscv.REG_FT10 + register["FT11"] = riscv.REG_FT11 + + // Pseudo-registers. + register["SB"] = RSB + register["FP"] = RFP + register["PC"] = RPC + + instructions := make(map[string]obj.As) + for i, s := range obj.Anames { + instructions[s] = obj.As(i) + } + for i, s := range riscv.Anames { + if obj.As(i) >= obj.A_ARCHSPECIFIC { + instructions[s] = obj.As(i) + obj.ABaseRISCV + } + } + + return &Arch{ + LinkArch: &riscv.LinkRISCV64, + Instructions: instructions, + Register: register, + RegisterPrefix: nil, + RegisterNumber: nilRegisterNumber, + IsJump: jumpRISCV, + } +} + +func archS390x() *Arch { + register := make(map[string]int16) + // Create maps for easy lookup of instruction names etc. + // Note that there is no list of names as there is for x86. + for i := s390x.REG_R0; i <= s390x.REG_R15; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := s390x.REG_F0; i <= s390x.REG_F15; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := s390x.REG_V0; i <= s390x.REG_V31; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := s390x.REG_AR0; i <= s390x.REG_AR15; i++ { + register[obj.Rconv(i)] = int16(i) + } + register["LR"] = s390x.REG_LR + // Pseudo-registers. + register["SB"] = RSB + register["FP"] = RFP + register["PC"] = RPC + // Avoid unintentionally clobbering g using R13. + delete(register, "R13") + register["g"] = s390x.REG_R13 + registerPrefix := map[string]bool{ + "AR": true, + "F": true, + "R": true, + } + + instructions := make(map[string]obj.As) + for i, s := range obj.Anames { + instructions[s] = obj.As(i) + } + for i, s := range s390x.Anames { + if obj.As(i) >= obj.A_ARCHSPECIFIC { + instructions[s] = obj.As(i) + obj.ABaseS390X + } + } + // Annoying aliases. + instructions["BR"] = s390x.ABR + instructions["BL"] = s390x.ABL + + return &Arch{ + LinkArch: &s390x.Links390x, + Instructions: instructions, + Register: register, + RegisterPrefix: registerPrefix, + RegisterNumber: s390xRegisterNumber, + IsJump: jumpS390x, + } +} + +func archWasm() *Arch { + instructions := make(map[string]obj.As) + for i, s := range obj.Anames { + instructions[s] = obj.As(i) + } + for i, s := range wasm.Anames { + if obj.As(i) >= obj.A_ARCHSPECIFIC { + instructions[s] = obj.As(i) + obj.ABaseWasm + } + } + + return &Arch{ + LinkArch: &wasm.Linkwasm, + Instructions: instructions, + Register: wasm.Register, + RegisterPrefix: nil, + RegisterNumber: nilRegisterNumber, + IsJump: jumpWasm, + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/arch/arm.go b/platform/dbops/binaries/go/go/src/cmd/asm/internal/arch/arm.go new file mode 100644 index 0000000000000000000000000000000000000000..22ac483b9222ac9289bf9e5a712e5b9953c911d5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/arch/arm.go @@ -0,0 +1,257 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file encapsulates some of the odd characteristics of the ARM +// instruction set, to minimize its interaction with the core of the +// assembler. + +package arch + +import ( + "strings" + + "cmd/internal/obj" + "cmd/internal/obj/arm" +) + +var armLS = map[string]uint8{ + "U": arm.C_UBIT, + "S": arm.C_SBIT, + "W": arm.C_WBIT, + "P": arm.C_PBIT, + "PW": arm.C_WBIT | arm.C_PBIT, + "WP": arm.C_WBIT | arm.C_PBIT, +} + +var armSCOND = map[string]uint8{ + "EQ": arm.C_SCOND_EQ, + "NE": arm.C_SCOND_NE, + "CS": arm.C_SCOND_HS, + "HS": arm.C_SCOND_HS, + "CC": arm.C_SCOND_LO, + "LO": arm.C_SCOND_LO, + "MI": arm.C_SCOND_MI, + "PL": arm.C_SCOND_PL, + "VS": arm.C_SCOND_VS, + "VC": arm.C_SCOND_VC, + "HI": arm.C_SCOND_HI, + "LS": arm.C_SCOND_LS, + "GE": arm.C_SCOND_GE, + "LT": arm.C_SCOND_LT, + "GT": arm.C_SCOND_GT, + "LE": arm.C_SCOND_LE, + "AL": arm.C_SCOND_NONE, + "U": arm.C_UBIT, + "S": arm.C_SBIT, + "W": arm.C_WBIT, + "P": arm.C_PBIT, + "PW": arm.C_WBIT | arm.C_PBIT, + "WP": arm.C_WBIT | arm.C_PBIT, + "F": arm.C_FBIT, + "IBW": arm.C_WBIT | arm.C_PBIT | arm.C_UBIT, + "IAW": arm.C_WBIT | arm.C_UBIT, + "DBW": arm.C_WBIT | arm.C_PBIT, + "DAW": arm.C_WBIT, + "IB": arm.C_PBIT | arm.C_UBIT, + "IA": arm.C_UBIT, + "DB": arm.C_PBIT, + "DA": 0, +} + +var armJump = map[string]bool{ + "B": true, + "BL": true, + "BX": true, + "BEQ": true, + "BNE": true, + "BCS": true, + "BHS": true, + "BCC": true, + "BLO": true, + "BMI": true, + "BPL": true, + "BVS": true, + "BVC": true, + "BHI": true, + "BLS": true, + "BGE": true, + "BLT": true, + "BGT": true, + "BLE": true, + "CALL": true, + "JMP": true, +} + +func jumpArm(word string) bool { + return armJump[word] +} + +// IsARMCMP reports whether the op (as defined by an arm.A* constant) is +// one of the comparison instructions that require special handling. +func IsARMCMP(op obj.As) bool { + switch op { + case arm.ACMN, arm.ACMP, arm.ATEQ, arm.ATST: + return true + } + return false +} + +// IsARMSTREX reports whether the op (as defined by an arm.A* constant) is +// one of the STREX-like instructions that require special handling. +func IsARMSTREX(op obj.As) bool { + switch op { + case arm.ASTREX, arm.ASTREXD, arm.ASWPW, arm.ASWPBU: + return true + } + return false +} + +// MCR is not defined by the obj/arm; instead we define it privately here. +// It is encoded as an MRC with a bit inside the instruction word, +// passed to arch.ARMMRCOffset. +const aMCR = arm.ALAST + 1 + +// IsARMMRC reports whether the op (as defined by an arm.A* constant) is +// MRC or MCR. +func IsARMMRC(op obj.As) bool { + switch op { + case arm.AMRC, aMCR: // Note: aMCR is defined in this package. + return true + } + return false +} + +// IsARMBFX reports whether the op (as defined by an arm.A* constant) is one the +// BFX-like instructions which are in the form of "op $width, $LSB, (Reg,) Reg". +func IsARMBFX(op obj.As) bool { + switch op { + case arm.ABFX, arm.ABFXU, arm.ABFC, arm.ABFI: + return true + } + return false +} + +// IsARMFloatCmp reports whether the op is a floating comparison instruction. +func IsARMFloatCmp(op obj.As) bool { + switch op { + case arm.ACMPF, arm.ACMPD: + return true + } + return false +} + +// ARMMRCOffset implements the peculiar encoding of the MRC and MCR instructions. +// The difference between MRC and MCR is represented by a bit high in the word, not +// in the usual way by the opcode itself. Asm must use AMRC for both instructions, so +// we return the opcode for MRC so that asm doesn't need to import obj/arm. +func ARMMRCOffset(op obj.As, cond string, x0, x1, x2, x3, x4, x5 int64) (offset int64, op0 obj.As, ok bool) { + op1 := int64(0) + if op == arm.AMRC { + op1 = 1 + } + bits, ok := ParseARMCondition(cond) + if !ok { + return + } + offset = (0xe << 24) | // opcode + (op1 << 20) | // MCR/MRC + ((int64(bits) ^ arm.C_SCOND_XOR) << 28) | // scond + ((x0 & 15) << 8) | //coprocessor number + ((x1 & 7) << 21) | // coprocessor operation + ((x2 & 15) << 12) | // ARM register + ((x3 & 15) << 16) | // Crn + ((x4 & 15) << 0) | // Crm + ((x5 & 7) << 5) | // coprocessor information + (1 << 4) /* must be set */ + return offset, arm.AMRC, true +} + +// IsARMMULA reports whether the op (as defined by an arm.A* constant) is +// MULA, MULS, MMULA, MMULS, MULABB, MULAWB or MULAWT, the 4-operand instructions. +func IsARMMULA(op obj.As) bool { + switch op { + case arm.AMULA, arm.AMULS, arm.AMMULA, arm.AMMULS, arm.AMULABB, arm.AMULAWB, arm.AMULAWT: + return true + } + return false +} + +var bcode = []obj.As{ + arm.ABEQ, + arm.ABNE, + arm.ABCS, + arm.ABCC, + arm.ABMI, + arm.ABPL, + arm.ABVS, + arm.ABVC, + arm.ABHI, + arm.ABLS, + arm.ABGE, + arm.ABLT, + arm.ABGT, + arm.ABLE, + arm.AB, + obj.ANOP, +} + +// ARMConditionCodes handles the special condition code situation for the ARM. +// It returns a boolean to indicate success; failure means cond was unrecognized. +func ARMConditionCodes(prog *obj.Prog, cond string) bool { + if cond == "" { + return true + } + bits, ok := ParseARMCondition(cond) + if !ok { + return false + } + /* hack to make B.NE etc. work: turn it into the corresponding conditional */ + if prog.As == arm.AB { + prog.As = bcode[(bits^arm.C_SCOND_XOR)&0xf] + bits = (bits &^ 0xf) | arm.C_SCOND_NONE + } + prog.Scond = bits + return true +} + +// ParseARMCondition parses the conditions attached to an ARM instruction. +// The input is a single string consisting of period-separated condition +// codes, such as ".P.W". An initial period is ignored. +func ParseARMCondition(cond string) (uint8, bool) { + return parseARMCondition(cond, armLS, armSCOND) +} + +func parseARMCondition(cond string, ls, scond map[string]uint8) (uint8, bool) { + cond = strings.TrimPrefix(cond, ".") + if cond == "" { + return arm.C_SCOND_NONE, true + } + names := strings.Split(cond, ".") + bits := uint8(0) + for _, name := range names { + if b, present := ls[name]; present { + bits |= b + continue + } + if b, present := scond[name]; present { + bits = (bits &^ arm.C_SCOND) | b + continue + } + return 0, false + } + return bits, true +} + +func armRegisterNumber(name string, n int16) (int16, bool) { + if n < 0 || 15 < n { + return 0, false + } + switch name { + case "R": + return arm.REG_R0 + n, true + case "F": + return arm.REG_F0 + n, true + } + return 0, false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/arch/arm64.go b/platform/dbops/binaries/go/go/src/cmd/asm/internal/arch/arm64.go new file mode 100644 index 0000000000000000000000000000000000000000..e63601de6476f0d826c61771a501511608112bfb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/arch/arm64.go @@ -0,0 +1,401 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file encapsulates some of the odd characteristics of the ARM64 +// instruction set, to minimize its interaction with the core of the +// assembler. + +package arch + +import ( + "cmd/internal/obj" + "cmd/internal/obj/arm64" + "errors" +) + +var arm64LS = map[string]uint8{ + "P": arm64.C_XPOST, + "W": arm64.C_XPRE, +} + +var arm64Jump = map[string]bool{ + "B": true, + "BL": true, + "BEQ": true, + "BNE": true, + "BCS": true, + "BHS": true, + "BCC": true, + "BLO": true, + "BMI": true, + "BPL": true, + "BVS": true, + "BVC": true, + "BHI": true, + "BLS": true, + "BGE": true, + "BLT": true, + "BGT": true, + "BLE": true, + "CALL": true, + "CBZ": true, + "CBZW": true, + "CBNZ": true, + "CBNZW": true, + "JMP": true, + "TBNZ": true, + "TBZ": true, + + // ADR isn't really a jump, but it takes a PC or label reference, + // which needs to patched like a jump. + "ADR": true, + "ADRP": true, +} + +func jumpArm64(word string) bool { + return arm64Jump[word] +} + +var arm64SpecialOperand map[string]arm64.SpecialOperand + +// GetARM64SpecialOperand returns the internal representation of a special operand. +func GetARM64SpecialOperand(name string) arm64.SpecialOperand { + if arm64SpecialOperand == nil { + // Generate the mapping automatically when the first time the function is called. + arm64SpecialOperand = map[string]arm64.SpecialOperand{} + for opd := arm64.SPOP_BEGIN; opd < arm64.SPOP_END; opd++ { + arm64SpecialOperand[opd.String()] = opd + } + + // Handle some special cases. + specialMapping := map[string]arm64.SpecialOperand{ + // The internal representation of CS(CC) and HS(LO) are the same. + "CS": arm64.SPOP_HS, + "CC": arm64.SPOP_LO, + } + for s, opd := range specialMapping { + arm64SpecialOperand[s] = opd + } + } + if opd, ok := arm64SpecialOperand[name]; ok { + return opd + } + return arm64.SPOP_END +} + +// IsARM64ADR reports whether the op (as defined by an arm64.A* constant) is +// one of the comparison instructions that require special handling. +func IsARM64ADR(op obj.As) bool { + switch op { + case arm64.AADR, arm64.AADRP: + return true + } + return false +} + +// IsARM64CMP reports whether the op (as defined by an arm64.A* constant) is +// one of the comparison instructions that require special handling. +func IsARM64CMP(op obj.As) bool { + switch op { + case arm64.ACMN, arm64.ACMP, arm64.ATST, + arm64.ACMNW, arm64.ACMPW, arm64.ATSTW, + arm64.AFCMPS, arm64.AFCMPD, + arm64.AFCMPES, arm64.AFCMPED: + return true + } + return false +} + +// IsARM64STLXR reports whether the op (as defined by an arm64.A* +// constant) is one of the STLXR-like instructions that require special +// handling. +func IsARM64STLXR(op obj.As) bool { + switch op { + case arm64.ASTLXRB, arm64.ASTLXRH, arm64.ASTLXRW, arm64.ASTLXR, + arm64.ASTXRB, arm64.ASTXRH, arm64.ASTXRW, arm64.ASTXR, + arm64.ASTXP, arm64.ASTXPW, arm64.ASTLXP, arm64.ASTLXPW: + return true + } + // LDADDx/SWPx/CASx atomic instructions + return arm64.IsAtomicInstruction(op) +} + +// IsARM64TBL reports whether the op (as defined by an arm64.A* +// constant) is one of the TBL-like instructions and one of its +// inputs does not fit into prog.Reg, so require special handling. +func IsARM64TBL(op obj.As) bool { + switch op { + case arm64.AVTBL, arm64.AVTBX, arm64.AVMOVQ: + return true + } + return false +} + +// IsARM64CASP reports whether the op (as defined by an arm64.A* +// constant) is one of the CASP-like instructions, and its 2nd +// destination is a register pair that require special handling. +func IsARM64CASP(op obj.As) bool { + switch op { + case arm64.ACASPD, arm64.ACASPW: + return true + } + return false +} + +// ARM64Suffix handles the special suffix for the ARM64. +// It returns a boolean to indicate success; failure means +// cond was unrecognized. +func ARM64Suffix(prog *obj.Prog, cond string) bool { + if cond == "" { + return true + } + bits, ok := parseARM64Suffix(cond) + if !ok { + return false + } + prog.Scond = bits + return true +} + +// parseARM64Suffix parses the suffix attached to an ARM64 instruction. +// The input is a single string consisting of period-separated condition +// codes, such as ".P.W". An initial period is ignored. +func parseARM64Suffix(cond string) (uint8, bool) { + if cond == "" { + return 0, true + } + return parseARMCondition(cond, arm64LS, nil) +} + +func arm64RegisterNumber(name string, n int16) (int16, bool) { + switch name { + case "F": + if 0 <= n && n <= 31 { + return arm64.REG_F0 + n, true + } + case "R": + if 0 <= n && n <= 30 { // not 31 + return arm64.REG_R0 + n, true + } + case "V": + if 0 <= n && n <= 31 { + return arm64.REG_V0 + n, true + } + } + return 0, false +} + +// ARM64RegisterShift constructs an ARM64 register with shift operation. +func ARM64RegisterShift(reg, op, count int16) (int64, error) { + // the base register of shift operations must be general register. + if reg > arm64.REG_R31 || reg < arm64.REG_R0 { + return 0, errors.New("invalid register for shift operation") + } + return int64(reg&31)<<16 | int64(op)<<22 | int64(uint16(count)), nil +} + +// ARM64RegisterExtension constructs an ARM64 register with extension or arrangement. +func ARM64RegisterExtension(a *obj.Addr, ext string, reg, num int16, isAmount, isIndex bool) error { + Rnum := (reg & 31) + int16(num<<5) + if isAmount { + if num < 0 || num > 7 { + return errors.New("index shift amount is out of range") + } + } + if reg <= arm64.REG_R31 && reg >= arm64.REG_R0 { + if !isAmount { + return errors.New("invalid register extension") + } + switch ext { + case "UXTB": + if a.Type == obj.TYPE_MEM { + return errors.New("invalid shift for the register offset addressing mode") + } + a.Reg = arm64.REG_UXTB + Rnum + case "UXTH": + if a.Type == obj.TYPE_MEM { + return errors.New("invalid shift for the register offset addressing mode") + } + a.Reg = arm64.REG_UXTH + Rnum + case "UXTW": + // effective address of memory is a base register value and an offset register value. + if a.Type == obj.TYPE_MEM { + a.Index = arm64.REG_UXTW + Rnum + } else { + a.Reg = arm64.REG_UXTW + Rnum + } + case "UXTX": + if a.Type == obj.TYPE_MEM { + return errors.New("invalid shift for the register offset addressing mode") + } + a.Reg = arm64.REG_UXTX + Rnum + case "SXTB": + if a.Type == obj.TYPE_MEM { + return errors.New("invalid shift for the register offset addressing mode") + } + a.Reg = arm64.REG_SXTB + Rnum + case "SXTH": + if a.Type == obj.TYPE_MEM { + return errors.New("invalid shift for the register offset addressing mode") + } + a.Reg = arm64.REG_SXTH + Rnum + case "SXTW": + if a.Type == obj.TYPE_MEM { + a.Index = arm64.REG_SXTW + Rnum + } else { + a.Reg = arm64.REG_SXTW + Rnum + } + case "SXTX": + if a.Type == obj.TYPE_MEM { + a.Index = arm64.REG_SXTX + Rnum + } else { + a.Reg = arm64.REG_SXTX + Rnum + } + case "LSL": + a.Index = arm64.REG_LSL + Rnum + default: + return errors.New("unsupported general register extension type: " + ext) + + } + } else if reg <= arm64.REG_V31 && reg >= arm64.REG_V0 { + switch ext { + case "B8": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_8B & 15) << 5) + case "B16": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_16B & 15) << 5) + case "H4": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_4H & 15) << 5) + case "H8": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_8H & 15) << 5) + case "S2": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_2S & 15) << 5) + case "S4": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_4S & 15) << 5) + case "D1": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_1D & 15) << 5) + case "D2": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_2D & 15) << 5) + case "Q1": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_1Q & 15) << 5) + case "B": + if !isIndex { + return nil + } + a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_B & 15) << 5) + a.Index = num + case "H": + if !isIndex { + return nil + } + a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_H & 15) << 5) + a.Index = num + case "S": + if !isIndex { + return nil + } + a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_S & 15) << 5) + a.Index = num + case "D": + if !isIndex { + return nil + } + a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_D & 15) << 5) + a.Index = num + default: + return errors.New("unsupported simd register extension type: " + ext) + } + } else { + return errors.New("invalid register and extension combination") + } + return nil +} + +// ARM64RegisterArrangement constructs an ARM64 vector register arrangement. +func ARM64RegisterArrangement(reg int16, name, arng string) (int64, error) { + var curQ, curSize uint16 + if name[0] != 'V' { + return 0, errors.New("expect V0 through V31; found: " + name) + } + if reg < 0 { + return 0, errors.New("invalid register number: " + name) + } + switch arng { + case "B8": + curSize = 0 + curQ = 0 + case "B16": + curSize = 0 + curQ = 1 + case "H4": + curSize = 1 + curQ = 0 + case "H8": + curSize = 1 + curQ = 1 + case "S2": + curSize = 2 + curQ = 0 + case "S4": + curSize = 2 + curQ = 1 + case "D1": + curSize = 3 + curQ = 0 + case "D2": + curSize = 3 + curQ = 1 + default: + return 0, errors.New("invalid arrangement in ARM64 register list") + } + return (int64(curQ) & 1 << 30) | (int64(curSize&3) << 10), nil +} + +// ARM64RegisterListOffset generates offset encoding according to AArch64 specification. +func ARM64RegisterListOffset(firstReg, regCnt int, arrangement int64) (int64, error) { + offset := int64(firstReg) + switch regCnt { + case 1: + offset |= 0x7 << 12 + case 2: + offset |= 0xa << 12 + case 3: + offset |= 0x6 << 12 + case 4: + offset |= 0x2 << 12 + default: + return 0, errors.New("invalid register numbers in ARM64 register list") + } + offset |= arrangement + // arm64 uses the 60th bit to differentiate from other archs + // For more details, refer to: obj/arm64/list7.go + offset |= 1 << 60 + return offset, nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/arch/loong64.go b/platform/dbops/binaries/go/go/src/cmd/asm/internal/arch/loong64.go new file mode 100644 index 0000000000000000000000000000000000000000..2958ee1a8681789aef426fcca3060ece0587c039 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/arch/loong64.go @@ -0,0 +1,78 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file encapsulates some of the odd characteristics of the +// Loong64 (LoongArch64) instruction set, to minimize its interaction +// with the core of the assembler. + +package arch + +import ( + "cmd/internal/obj" + "cmd/internal/obj/loong64" +) + +func jumpLoong64(word string) bool { + switch word { + case "BEQ", "BFPF", "BFPT", "BLTZ", "BGEZ", "BLEZ", "BGTZ", "BLT", "BLTU", "JIRL", "BNE", "BGE", "BGEU", "JMP", "JAL", "CALL": + return true + } + return false +} + +// IsLoong64CMP reports whether the op (as defined by an loong64.A* constant) is +// one of the CMP instructions that require special handling. +func IsLoong64CMP(op obj.As) bool { + switch op { + case loong64.ACMPEQF, loong64.ACMPEQD, loong64.ACMPGEF, loong64.ACMPGED, + loong64.ACMPGTF, loong64.ACMPGTD: + return true + } + return false +} + +// IsLoong64MUL reports whether the op (as defined by an loong64.A* constant) is +// one of the MUL/DIV/REM instructions that require special handling. +func IsLoong64MUL(op obj.As) bool { + switch op { + case loong64.AMUL, loong64.AMULU, loong64.AMULV, loong64.AMULVU, + loong64.ADIV, loong64.ADIVU, loong64.ADIVV, loong64.ADIVVU, + loong64.AREM, loong64.AREMU, loong64.AREMV, loong64.AREMVU: + return true + } + return false +} + +// IsLoong64RDTIME reports whether the op (as defined by an loong64.A* +// constant) is one of the RDTIMELW/RDTIMEHW/RDTIMED instructions that +// require special handling. +func IsLoong64RDTIME(op obj.As) bool { + switch op { + case loong64.ARDTIMELW, loong64.ARDTIMEHW, loong64.ARDTIMED: + return true + } + return false +} + +func loong64RegisterNumber(name string, n int16) (int16, bool) { + switch name { + case "F": + if 0 <= n && n <= 31 { + return loong64.REG_F0 + n, true + } + case "FCSR": + if 0 <= n && n <= 31 { + return loong64.REG_FCSR0 + n, true + } + case "FCC": + if 0 <= n && n <= 31 { + return loong64.REG_FCC0 + n, true + } + case "R": + if 0 <= n && n <= 31 { + return loong64.REG_R0 + n, true + } + } + return 0, false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/arch/mips.go b/platform/dbops/binaries/go/go/src/cmd/asm/internal/arch/mips.go new file mode 100644 index 0000000000000000000000000000000000000000..5d71f40fbea6f8854fb4429d692696cf2666a4fc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/arch/mips.go @@ -0,0 +1,72 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file encapsulates some of the odd characteristics of the +// MIPS (MIPS64) instruction set, to minimize its interaction +// with the core of the assembler. + +package arch + +import ( + "cmd/internal/obj" + "cmd/internal/obj/mips" +) + +func jumpMIPS(word string) bool { + switch word { + case "BEQ", "BFPF", "BFPT", "BGEZ", "BGEZAL", "BGTZ", "BLEZ", "BLTZ", "BLTZAL", "BNE", "JMP", "JAL", "CALL": + return true + } + return false +} + +// IsMIPSCMP reports whether the op (as defined by an mips.A* constant) is +// one of the CMP instructions that require special handling. +func IsMIPSCMP(op obj.As) bool { + switch op { + case mips.ACMPEQF, mips.ACMPEQD, mips.ACMPGEF, mips.ACMPGED, + mips.ACMPGTF, mips.ACMPGTD: + return true + } + return false +} + +// IsMIPSMUL reports whether the op (as defined by an mips.A* constant) is +// one of the MUL/DIV/REM/MADD/MSUB instructions that require special handling. +func IsMIPSMUL(op obj.As) bool { + switch op { + case mips.AMUL, mips.AMULU, mips.AMULV, mips.AMULVU, + mips.ADIV, mips.ADIVU, mips.ADIVV, mips.ADIVVU, + mips.AREM, mips.AREMU, mips.AREMV, mips.AREMVU, + mips.AMADD, mips.AMSUB: + return true + } + return false +} + +func mipsRegisterNumber(name string, n int16) (int16, bool) { + switch name { + case "F": + if 0 <= n && n <= 31 { + return mips.REG_F0 + n, true + } + case "FCR": + if 0 <= n && n <= 31 { + return mips.REG_FCR0 + n, true + } + case "M": + if 0 <= n && n <= 31 { + return mips.REG_M0 + n, true + } + case "R": + if 0 <= n && n <= 31 { + return mips.REG_R0 + n, true + } + case "W": + if 0 <= n && n <= 31 { + return mips.REG_W0 + n, true + } + } + return 0, false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/arch/ppc64.go b/platform/dbops/binaries/go/go/src/cmd/asm/internal/arch/ppc64.go new file mode 100644 index 0000000000000000000000000000000000000000..98a2bfedfdd0c54e2d3e7d37f4dafaf518a8eabe --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/arch/ppc64.go @@ -0,0 +1,84 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file encapsulates some of the odd characteristics of the +// 64-bit PowerPC (PPC64) instruction set, to minimize its interaction +// with the core of the assembler. + +package arch + +import ( + "cmd/internal/obj" + "cmd/internal/obj/ppc64" +) + +func jumpPPC64(word string) bool { + switch word { + case "BC", "BCL", "BEQ", "BGE", "BGT", "BL", "BLE", "BLT", "BNE", "BR", "BVC", "BVS", "BDNZ", "BDZ", "CALL", "JMP": + return true + } + return false +} + +// IsPPC64CMP reports whether the op (as defined by an ppc64.A* constant) is +// one of the CMP instructions that require special handling. +func IsPPC64CMP(op obj.As) bool { + switch op { + case ppc64.ACMP, ppc64.ACMPU, ppc64.ACMPW, ppc64.ACMPWU, ppc64.AFCMPU: + return true + } + return false +} + +// IsPPC64NEG reports whether the op (as defined by an ppc64.A* constant) is +// one of the NEG-like instructions that require special handling. +func IsPPC64NEG(op obj.As) bool { + switch op { + case ppc64.AADDMECC, ppc64.AADDMEVCC, ppc64.AADDMEV, ppc64.AADDME, + ppc64.AADDZECC, ppc64.AADDZEVCC, ppc64.AADDZEV, ppc64.AADDZE, + ppc64.ACNTLZDCC, ppc64.ACNTLZD, ppc64.ACNTLZWCC, ppc64.ACNTLZW, + ppc64.AEXTSBCC, ppc64.AEXTSB, ppc64.AEXTSHCC, ppc64.AEXTSH, + ppc64.AEXTSWCC, ppc64.AEXTSW, ppc64.ANEGCC, ppc64.ANEGVCC, + ppc64.ANEGV, ppc64.ANEG, ppc64.ASLBMFEE, ppc64.ASLBMFEV, + ppc64.ASLBMTE, ppc64.ASUBMECC, ppc64.ASUBMEVCC, ppc64.ASUBMEV, + ppc64.ASUBME, ppc64.ASUBZECC, ppc64.ASUBZEVCC, ppc64.ASUBZEV, + ppc64.ASUBZE: + return true + } + return false +} + +func ppc64RegisterNumber(name string, n int16) (int16, bool) { + switch name { + case "CR": + if 0 <= n && n <= 7 { + return ppc64.REG_CR0 + n, true + } + case "A": + if 0 <= n && n <= 8 { + return ppc64.REG_A0 + n, true + } + case "VS": + if 0 <= n && n <= 63 { + return ppc64.REG_VS0 + n, true + } + case "V": + if 0 <= n && n <= 31 { + return ppc64.REG_V0 + n, true + } + case "F": + if 0 <= n && n <= 31 { + return ppc64.REG_F0 + n, true + } + case "R": + if 0 <= n && n <= 31 { + return ppc64.REG_R0 + n, true + } + case "SPR": + if 0 <= n && n <= 1024 { + return ppc64.REG_SPR0 + n, true + } + } + return 0, false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/arch/riscv64.go b/platform/dbops/binaries/go/go/src/cmd/asm/internal/arch/riscv64.go new file mode 100644 index 0000000000000000000000000000000000000000..27a66c5e637bb7b6bd7f6f8e358298f50fc24e68 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/arch/riscv64.go @@ -0,0 +1,28 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file encapsulates some of the odd characteristics of the RISCV64 +// instruction set, to minimize its interaction with the core of the +// assembler. + +package arch + +import ( + "cmd/internal/obj" + "cmd/internal/obj/riscv" +) + +// IsRISCV64AMO reports whether the op (as defined by a riscv.A* +// constant) is one of the AMO instructions that requires special +// handling. +func IsRISCV64AMO(op obj.As) bool { + switch op { + case riscv.ASCW, riscv.ASCD, riscv.AAMOSWAPW, riscv.AAMOSWAPD, riscv.AAMOADDW, riscv.AAMOADDD, + riscv.AAMOANDW, riscv.AAMOANDD, riscv.AAMOORW, riscv.AAMOORD, riscv.AAMOXORW, riscv.AAMOXORD, + riscv.AAMOMINW, riscv.AAMOMIND, riscv.AAMOMINUW, riscv.AAMOMINUD, + riscv.AAMOMAXW, riscv.AAMOMAXD, riscv.AAMOMAXUW, riscv.AAMOMAXUD: + return true + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/arch/s390x.go b/platform/dbops/binaries/go/go/src/cmd/asm/internal/arch/s390x.go new file mode 100644 index 0000000000000000000000000000000000000000..519d20877c06cf1d909521d2ebd5b2823bd8b692 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/arch/s390x.go @@ -0,0 +1,81 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file encapsulates some of the odd characteristics of the +// s390x instruction set, to minimize its interaction +// with the core of the assembler. + +package arch + +import ( + "cmd/internal/obj/s390x" +) + +func jumpS390x(word string) bool { + switch word { + case "BRC", + "BC", + "BCL", + "BEQ", + "BGE", + "BGT", + "BL", + "BLE", + "BLEU", + "BLT", + "BLTU", + "BNE", + "BR", + "BVC", + "BVS", + "BRCT", + "BRCTG", + "CMPBEQ", + "CMPBGE", + "CMPBGT", + "CMPBLE", + "CMPBLT", + "CMPBNE", + "CMPUBEQ", + "CMPUBGE", + "CMPUBGT", + "CMPUBLE", + "CMPUBLT", + "CMPUBNE", + "CRJ", + "CGRJ", + "CLRJ", + "CLGRJ", + "CIJ", + "CGIJ", + "CLIJ", + "CLGIJ", + "CALL", + "JMP": + return true + } + return false +} + +func s390xRegisterNumber(name string, n int16) (int16, bool) { + switch name { + case "AR": + if 0 <= n && n <= 15 { + return s390x.REG_AR0 + n, true + } + case "F": + if 0 <= n && n <= 15 { + return s390x.REG_F0 + n, true + } + case "R": + if 0 <= n && n <= 15 { + return s390x.REG_R0 + n, true + } + case "V": + if 0 <= n && n <= 31 { + return s390x.REG_V0 + n, true + } + } + return 0, false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/asm.go b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/asm.go new file mode 100644 index 0000000000000000000000000000000000000000..375ef803bb7b724acd0ccf7c6be3663659768a65 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/asm.go @@ -0,0 +1,952 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package asm + +import ( + "fmt" + "internal/abi" + "strconv" + "strings" + "text/scanner" + + "cmd/asm/internal/arch" + "cmd/asm/internal/flags" + "cmd/asm/internal/lex" + "cmd/internal/obj" + "cmd/internal/obj/ppc64" + "cmd/internal/obj/x86" + "cmd/internal/sys" +) + +// TODO: configure the architecture + +var testOut *strings.Builder // Gathers output when testing. + +// append adds the Prog to the end of the program-thus-far. +// If doLabel is set, it also defines the labels collect for this Prog. +func (p *Parser) append(prog *obj.Prog, cond string, doLabel bool) { + if cond != "" { + switch p.arch.Family { + case sys.ARM: + if !arch.ARMConditionCodes(prog, cond) { + p.errorf("unrecognized condition code .%q", cond) + return + } + + case sys.ARM64: + if !arch.ARM64Suffix(prog, cond) { + p.errorf("unrecognized suffix .%q", cond) + return + } + + case sys.AMD64, sys.I386: + if err := x86.ParseSuffix(prog, cond); err != nil { + p.errorf("%v", err) + return + } + + default: + p.errorf("unrecognized suffix .%q", cond) + return + } + } + if p.firstProg == nil { + p.firstProg = prog + } else { + p.lastProg.Link = prog + } + p.lastProg = prog + if doLabel { + p.pc++ + for _, label := range p.pendingLabels { + if p.labels[label] != nil { + p.errorf("label %q multiply defined", label) + return + } + p.labels[label] = prog + } + p.pendingLabels = p.pendingLabels[0:0] + } + prog.Pc = p.pc + if *flags.Debug { + fmt.Println(p.lineNum, prog) + } + if testOut != nil { + fmt.Fprintln(testOut, prog) + } +} + +// validSymbol checks that addr represents a valid name for a pseudo-op. +func (p *Parser) validSymbol(pseudo string, addr *obj.Addr, offsetOk bool) bool { + if addr.Sym == nil || addr.Name != obj.NAME_EXTERN && addr.Name != obj.NAME_STATIC || addr.Scale != 0 || addr.Reg != 0 { + p.errorf("%s symbol %q must be a symbol(SB)", pseudo, symbolName(addr)) + return false + } + if !offsetOk && addr.Offset != 0 { + p.errorf("%s symbol %q must not be offset from SB", pseudo, symbolName(addr)) + return false + } + return true +} + +// evalInteger evaluates an integer constant for a pseudo-op. +func (p *Parser) evalInteger(pseudo string, operands []lex.Token) int64 { + addr := p.address(operands) + return p.getConstantPseudo(pseudo, &addr) +} + +// validImmediate checks that addr represents an immediate constant. +func (p *Parser) validImmediate(pseudo string, addr *obj.Addr) bool { + if addr.Type != obj.TYPE_CONST || addr.Name != 0 || addr.Reg != 0 || addr.Index != 0 { + p.errorf("%s: expected immediate constant; found %s", pseudo, obj.Dconv(&emptyProg, addr)) + return false + } + return true +} + +// asmText assembles a TEXT pseudo-op. +// TEXT runtime·sigtramp(SB),4,$0-0 +func (p *Parser) asmText(operands [][]lex.Token) { + if len(operands) != 2 && len(operands) != 3 { + p.errorf("expect two or three operands for TEXT") + return + } + + // Labels are function scoped. Patch existing labels and + // create a new label space for this TEXT. + p.patch() + p.labels = make(map[string]*obj.Prog) + + // Operand 0 is the symbol name in the form foo(SB). + // That means symbol plus indirect on SB and no offset. + nameAddr := p.address(operands[0]) + if !p.validSymbol("TEXT", &nameAddr, false) { + return + } + name := symbolName(&nameAddr) + next := 1 + + // Next operand is the optional text flag, a literal integer. + var flag = int64(0) + if len(operands) == 3 { + flag = p.evalInteger("TEXT", operands[1]) + next++ + } + + // Issue an error if we see a function defined as ABIInternal + // without NOSPLIT. In ABIInternal, obj needs to know the function + // signature in order to construct the morestack path, so this + // currently isn't supported for asm functions. + if nameAddr.Sym.ABI() == obj.ABIInternal && flag&obj.NOSPLIT == 0 { + p.errorf("TEXT %q: ABIInternal requires NOSPLIT", name) + } + + // Next operand is the frame and arg size. + // Bizarre syntax: $frameSize-argSize is two words, not subtraction. + // Both frameSize and argSize must be simple integers; only frameSize + // can be negative. + // The "-argSize" may be missing; if so, set it to objabi.ArgsSizeUnknown. + // Parse left to right. + op := operands[next] + if len(op) < 2 || op[0].ScanToken != '$' { + p.errorf("TEXT %s: frame size must be an immediate constant", name) + return + } + op = op[1:] + negative := false + if op[0].ScanToken == '-' { + negative = true + op = op[1:] + } + if len(op) == 0 || op[0].ScanToken != scanner.Int { + p.errorf("TEXT %s: frame size must be an immediate constant", name) + return + } + frameSize := p.positiveAtoi(op[0].String()) + if negative { + frameSize = -frameSize + } + op = op[1:] + argSize := int64(abi.ArgsSizeUnknown) + if len(op) > 0 { + // There is an argument size. It must be a minus sign followed by a non-negative integer literal. + if len(op) != 2 || op[0].ScanToken != '-' || op[1].ScanToken != scanner.Int { + p.errorf("TEXT %s: argument size must be of form -integer", name) + return + } + argSize = p.positiveAtoi(op[1].String()) + } + p.ctxt.InitTextSym(nameAddr.Sym, int(flag), p.pos()) + prog := &obj.Prog{ + Ctxt: p.ctxt, + As: obj.ATEXT, + Pos: p.pos(), + From: nameAddr, + To: obj.Addr{ + Type: obj.TYPE_TEXTSIZE, + Offset: frameSize, + // Argsize set below. + }, + } + nameAddr.Sym.Func().Text = prog + prog.To.Val = int32(argSize) + p.append(prog, "", true) +} + +// asmData assembles a DATA pseudo-op. +// DATA masks<>+0x00(SB)/4, $0x00000000 +func (p *Parser) asmData(operands [][]lex.Token) { + if len(operands) != 2 { + p.errorf("expect two operands for DATA") + return + } + + // Operand 0 has the general form foo<>+0x04(SB)/4. + op := operands[0] + n := len(op) + if n < 3 || op[n-2].ScanToken != '/' || op[n-1].ScanToken != scanner.Int { + p.errorf("expect /size for DATA argument") + return + } + szop := op[n-1].String() + sz, err := strconv.Atoi(szop) + if err != nil { + p.errorf("bad size for DATA argument: %q", szop) + } + op = op[:n-2] + nameAddr := p.address(op) + if !p.validSymbol("DATA", &nameAddr, true) { + return + } + name := symbolName(&nameAddr) + + // Operand 1 is an immediate constant or address. + valueAddr := p.address(operands[1]) + switch valueAddr.Type { + case obj.TYPE_CONST, obj.TYPE_FCONST, obj.TYPE_SCONST, obj.TYPE_ADDR: + // OK + default: + p.errorf("DATA value must be an immediate constant or address") + return + } + + // The addresses must not overlap. Easiest test: require monotonicity. + if lastAddr, ok := p.dataAddr[name]; ok && nameAddr.Offset < lastAddr { + p.errorf("overlapping DATA entry for %s", name) + return + } + p.dataAddr[name] = nameAddr.Offset + int64(sz) + + switch valueAddr.Type { + case obj.TYPE_CONST: + switch sz { + case 1, 2, 4, 8: + nameAddr.Sym.WriteInt(p.ctxt, nameAddr.Offset, int(sz), valueAddr.Offset) + default: + p.errorf("bad int size for DATA argument: %d", sz) + } + case obj.TYPE_FCONST: + switch sz { + case 4: + nameAddr.Sym.WriteFloat32(p.ctxt, nameAddr.Offset, float32(valueAddr.Val.(float64))) + case 8: + nameAddr.Sym.WriteFloat64(p.ctxt, nameAddr.Offset, valueAddr.Val.(float64)) + default: + p.errorf("bad float size for DATA argument: %d", sz) + } + case obj.TYPE_SCONST: + nameAddr.Sym.WriteString(p.ctxt, nameAddr.Offset, int(sz), valueAddr.Val.(string)) + case obj.TYPE_ADDR: + if sz == p.arch.PtrSize { + nameAddr.Sym.WriteAddr(p.ctxt, nameAddr.Offset, int(sz), valueAddr.Sym, valueAddr.Offset) + } else { + p.errorf("bad addr size for DATA argument: %d", sz) + } + } +} + +// asmGlobl assembles a GLOBL pseudo-op. +// GLOBL shifts<>(SB),8,$256 +// GLOBL shifts<>(SB),$256 +func (p *Parser) asmGlobl(operands [][]lex.Token) { + if len(operands) != 2 && len(operands) != 3 { + p.errorf("expect two or three operands for GLOBL") + return + } + + // Operand 0 has the general form foo<>+0x04(SB). + nameAddr := p.address(operands[0]) + if !p.validSymbol("GLOBL", &nameAddr, false) { + return + } + next := 1 + + // Next operand is the optional flag, a literal integer. + var flag = int64(0) + if len(operands) == 3 { + flag = p.evalInteger("GLOBL", operands[1]) + next++ + } + + // Final operand is an immediate constant. + addr := p.address(operands[next]) + if !p.validImmediate("GLOBL", &addr) { + return + } + + // log.Printf("GLOBL %s %d, $%d", name, flag, size) + p.ctxt.GloblPos(nameAddr.Sym, addr.Offset, int(flag), p.pos()) +} + +// asmPCData assembles a PCDATA pseudo-op. +// PCDATA $2, $705 +func (p *Parser) asmPCData(operands [][]lex.Token) { + if len(operands) != 2 { + p.errorf("expect two operands for PCDATA") + return + } + + // Operand 0 must be an immediate constant. + key := p.address(operands[0]) + if !p.validImmediate("PCDATA", &key) { + return + } + + // Operand 1 must be an immediate constant. + value := p.address(operands[1]) + if !p.validImmediate("PCDATA", &value) { + return + } + + // log.Printf("PCDATA $%d, $%d", key.Offset, value.Offset) + prog := &obj.Prog{ + Ctxt: p.ctxt, + As: obj.APCDATA, + Pos: p.pos(), + From: key, + To: value, + } + p.append(prog, "", true) +} + +// asmPCAlign assembles a PCALIGN pseudo-op. +// PCALIGN $16 +func (p *Parser) asmPCAlign(operands [][]lex.Token) { + if len(operands) != 1 { + p.errorf("expect one operand for PCALIGN") + return + } + + // Operand 0 must be an immediate constant. + key := p.address(operands[0]) + if !p.validImmediate("PCALIGN", &key) { + return + } + + prog := &obj.Prog{ + Ctxt: p.ctxt, + As: obj.APCALIGN, + From: key, + } + p.append(prog, "", true) +} + +// asmFuncData assembles a FUNCDATA pseudo-op. +// FUNCDATA $1, funcdata<>+4(SB) +func (p *Parser) asmFuncData(operands [][]lex.Token) { + if len(operands) != 2 { + p.errorf("expect two operands for FUNCDATA") + return + } + + // Operand 0 must be an immediate constant. + valueAddr := p.address(operands[0]) + if !p.validImmediate("FUNCDATA", &valueAddr) { + return + } + + // Operand 1 is a symbol name in the form foo(SB). + nameAddr := p.address(operands[1]) + if !p.validSymbol("FUNCDATA", &nameAddr, true) { + return + } + + prog := &obj.Prog{ + Ctxt: p.ctxt, + As: obj.AFUNCDATA, + Pos: p.pos(), + From: valueAddr, + To: nameAddr, + } + p.append(prog, "", true) +} + +// asmJump assembles a jump instruction. +// JMP R1 +// JMP exit +// JMP 3(PC) +func (p *Parser) asmJump(op obj.As, cond string, a []obj.Addr) { + var target *obj.Addr + prog := &obj.Prog{ + Ctxt: p.ctxt, + Pos: p.pos(), + As: op, + } + targetAddr := &prog.To + switch len(a) { + case 0: + if p.arch.Family == sys.Wasm { + target = &obj.Addr{Type: obj.TYPE_NONE} + break + } + p.errorf("wrong number of arguments to %s instruction", op) + return + case 1: + target = &a[0] + case 2: + // Special 2-operand jumps. + if p.arch.Family == sys.ARM64 && arch.IsARM64ADR(op) { + // ADR label, R. Label is in From. + target = &a[0] + prog.To = a[1] + targetAddr = &prog.From + } else { + target = &a[1] + prog.From = a[0] + } + case 3: + if p.arch.Family == sys.PPC64 { + // Special 3-operand jumps. + // a[1] is a register number expressed as a constant or register value + target = &a[2] + prog.From = a[0] + if a[0].Type != obj.TYPE_CONST { + // Legacy code may use a plain constant, accept it, and coerce + // into a constant. E.g: + // BC 4,... + // into + // BC $4,... + prog.From = obj.Addr{ + Type: obj.TYPE_CONST, + Offset: p.getConstant(prog, op, &a[0]), + } + + } + + // Likewise, fixup usage like: + // BC x,LT,... + // BC x,foo+2,... + // BC x,4 + // BC x,$5 + // into + // BC x,CR0LT,... + // BC x,CR0EQ,... + // BC x,CR1LT,... + // BC x,CR1GT,... + // The first and second cases demonstrate a symbol name which is + // effectively discarded. In these cases, the offset determines + // the CR bit. + prog.Reg = a[1].Reg + if a[1].Type != obj.TYPE_REG { + // The CR bit is represented as a constant 0-31. Convert it to a Reg. + c := p.getConstant(prog, op, &a[1]) + reg, success := ppc64.ConstantToCRbit(c) + if !success { + p.errorf("invalid CR bit register number %d", c) + } + prog.Reg = reg + } + break + } + if p.arch.Family == sys.MIPS || p.arch.Family == sys.MIPS64 || p.arch.Family == sys.RISCV64 { + // 3-operand jumps. + // First two must be registers + target = &a[2] + prog.From = a[0] + prog.Reg = p.getRegister(prog, op, &a[1]) + break + } + if p.arch.Family == sys.Loong64 { + // 3-operand jumps. + // First two must be registers + target = &a[2] + prog.From = a[0] + prog.Reg = p.getRegister(prog, op, &a[1]) + break + } + if p.arch.Family == sys.S390X { + // 3-operand jumps. + target = &a[2] + prog.From = a[0] + if a[1].Reg != 0 { + // Compare two registers and jump. + prog.Reg = p.getRegister(prog, op, &a[1]) + } else { + // Compare register with immediate and jump. + prog.AddRestSource(a[1]) + } + break + } + if p.arch.Family == sys.ARM64 { + // Special 3-operand jumps. + // a[0] must be immediate constant; a[1] is a register. + if a[0].Type != obj.TYPE_CONST { + p.errorf("%s: expected immediate constant; found %s", op, obj.Dconv(prog, &a[0])) + return + } + prog.From = a[0] + prog.Reg = p.getRegister(prog, op, &a[1]) + target = &a[2] + break + } + p.errorf("wrong number of arguments to %s instruction", op) + return + case 4: + if p.arch.Family == sys.S390X || p.arch.Family == sys.PPC64 { + // 4-operand compare-and-branch. + prog.From = a[0] + prog.Reg = p.getRegister(prog, op, &a[1]) + prog.AddRestSource(a[2]) + target = &a[3] + break + } + p.errorf("wrong number of arguments to %s instruction", op) + return + default: + p.errorf("wrong number of arguments to %s instruction", op) + return + } + switch { + case target.Type == obj.TYPE_BRANCH: + // JMP 4(PC) + *targetAddr = obj.Addr{ + Type: obj.TYPE_BRANCH, + Offset: p.pc + 1 + target.Offset, // +1 because p.pc is incremented in append, below. + } + case target.Type == obj.TYPE_REG: + // JMP R1 + *targetAddr = *target + case target.Type == obj.TYPE_MEM && (target.Name == obj.NAME_EXTERN || target.Name == obj.NAME_STATIC): + // JMP main·morestack(SB) + *targetAddr = *target + case target.Type == obj.TYPE_INDIR && (target.Name == obj.NAME_EXTERN || target.Name == obj.NAME_STATIC): + // JMP *main·morestack(SB) + *targetAddr = *target + targetAddr.Type = obj.TYPE_INDIR + case target.Type == obj.TYPE_MEM && target.Reg == 0 && target.Offset == 0: + // JMP exit + if target.Sym == nil { + // Parse error left name unset. + return + } + targetProg := p.labels[target.Sym.Name] + if targetProg == nil { + p.toPatch = append(p.toPatch, Patch{targetAddr, target.Sym.Name}) + } else { + p.branch(targetAddr, targetProg) + } + case target.Type == obj.TYPE_MEM && target.Name == obj.NAME_NONE: + // JMP 4(R0) + *targetAddr = *target + // On the ppc64, 9a encodes BR (CTR) as BR CTR. We do the same. + if p.arch.Family == sys.PPC64 && target.Offset == 0 { + targetAddr.Type = obj.TYPE_REG + } + case target.Type == obj.TYPE_CONST: + // JMP $4 + *targetAddr = a[0] + case target.Type == obj.TYPE_NONE: + // JMP + default: + p.errorf("cannot assemble jump %+v", target) + return + } + + p.append(prog, cond, true) +} + +func (p *Parser) patch() { + for _, patch := range p.toPatch { + targetProg := p.labels[patch.label] + if targetProg == nil { + p.errorf("undefined label %s", patch.label) + return + } + p.branch(patch.addr, targetProg) + } + p.toPatch = p.toPatch[:0] +} + +func (p *Parser) branch(addr *obj.Addr, target *obj.Prog) { + *addr = obj.Addr{ + Type: obj.TYPE_BRANCH, + Index: 0, + } + addr.Val = target +} + +// asmInstruction assembles an instruction. +// MOVW R9, (R10) +func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { + // fmt.Printf("%s %+v\n", op, a) + prog := &obj.Prog{ + Ctxt: p.ctxt, + Pos: p.pos(), + As: op, + } + switch len(a) { + case 0: + // Nothing to do. + case 1: + if p.arch.UnaryDst[op] || op == obj.ARET || op == obj.AGETCALLERPC { + // prog.From is no address. + prog.To = a[0] + } else { + prog.From = a[0] + // prog.To is no address. + } + if p.arch.Family == sys.PPC64 && arch.IsPPC64NEG(op) { + // NEG: From and To are both a[0]. + prog.To = a[0] + prog.From = a[0] + break + } + case 2: + if p.arch.Family == sys.ARM { + if arch.IsARMCMP(op) { + prog.From = a[0] + prog.Reg = p.getRegister(prog, op, &a[1]) + break + } + // Strange special cases. + if arch.IsARMFloatCmp(op) { + prog.From = a[0] + prog.Reg = p.getRegister(prog, op, &a[1]) + break + } + } else if p.arch.Family == sys.ARM64 && arch.IsARM64CMP(op) { + prog.From = a[0] + prog.Reg = p.getRegister(prog, op, &a[1]) + break + } else if p.arch.Family == sys.MIPS || p.arch.Family == sys.MIPS64 { + if arch.IsMIPSCMP(op) || arch.IsMIPSMUL(op) { + prog.From = a[0] + prog.Reg = p.getRegister(prog, op, &a[1]) + break + } + } else if p.arch.Family == sys.Loong64 { + if arch.IsLoong64CMP(op) { + prog.From = a[0] + prog.Reg = p.getRegister(prog, op, &a[1]) + break + } + + if arch.IsLoong64RDTIME(op) { + // The Loong64 RDTIME family of instructions is a bit special, + // in that both its register operands are outputs + prog.To = a[0] + if a[1].Type != obj.TYPE_REG { + p.errorf("invalid addressing modes for 2nd operand to %s instruction, must be register", op) + return + } + prog.RegTo2 = a[1].Reg + break + } + } + prog.From = a[0] + prog.To = a[1] + case 3: + switch p.arch.Family { + case sys.MIPS, sys.MIPS64: + prog.From = a[0] + prog.Reg = p.getRegister(prog, op, &a[1]) + prog.To = a[2] + case sys.Loong64: + prog.From = a[0] + prog.Reg = p.getRegister(prog, op, &a[1]) + prog.To = a[2] + case sys.ARM: + // Special cases. + if arch.IsARMSTREX(op) { + /* + STREX x, (y), z + from=(y) reg=x to=z + */ + prog.From = a[1] + prog.Reg = p.getRegister(prog, op, &a[0]) + prog.To = a[2] + break + } + if arch.IsARMBFX(op) { + // a[0] and a[1] must be constants, a[2] must be a register + prog.From = a[0] + prog.AddRestSource(a[1]) + prog.To = a[2] + break + } + // Otherwise the 2nd operand (a[1]) must be a register. + prog.From = a[0] + prog.Reg = p.getRegister(prog, op, &a[1]) + prog.To = a[2] + case sys.AMD64: + prog.From = a[0] + prog.AddRestSource(a[1]) + prog.To = a[2] + case sys.ARM64: + switch { + case arch.IsARM64STLXR(op): + // ARM64 instructions with one input and two outputs. + prog.From = a[0] + prog.To = a[1] + if a[2].Type != obj.TYPE_REG { + p.errorf("invalid addressing modes for third operand to %s instruction, must be register", op) + return + } + prog.RegTo2 = a[2].Reg + case arch.IsARM64TBL(op): + // one of its inputs does not fit into prog.Reg. + prog.From = a[0] + prog.AddRestSource(a[1]) + prog.To = a[2] + case arch.IsARM64CASP(op): + prog.From = a[0] + prog.To = a[1] + // both 1st operand and 3rd operand are (Rs, Rs+1) register pair. + // And the register pair must be contiguous. + if (a[0].Type != obj.TYPE_REGREG) || (a[2].Type != obj.TYPE_REGREG) { + p.errorf("invalid addressing modes for 1st or 3rd operand to %s instruction, must be register pair", op) + return + } + // For ARM64 CASP-like instructions, its 2nd destination operand is register pair(Rt, Rt+1) that can + // not fit into prog.RegTo2, so save it to the prog.RestArgs. + prog.AddRestDest(a[2]) + default: + prog.From = a[0] + prog.Reg = p.getRegister(prog, op, &a[1]) + prog.To = a[2] + } + case sys.I386: + prog.From = a[0] + prog.AddRestSource(a[1]) + prog.To = a[2] + case sys.PPC64: + if arch.IsPPC64CMP(op) { + // CMPW etc.; third argument is a CR register that goes into prog.Reg. + prog.From = a[0] + prog.Reg = p.getRegister(prog, op, &a[2]) + prog.To = a[1] + break + } + + prog.From = a[0] + prog.To = a[2] + + // If the second argument is not a register argument, it must be + // passed RestArgs/AddRestSource + switch a[1].Type { + case obj.TYPE_REG: + prog.Reg = p.getRegister(prog, op, &a[1]) + default: + prog.AddRestSource(a[1]) + } + case sys.RISCV64: + // RISCV64 instructions with one input and two outputs. + if arch.IsRISCV64AMO(op) { + prog.From = a[0] + prog.To = a[1] + if a[2].Type != obj.TYPE_REG { + p.errorf("invalid addressing modes for third operand to %s instruction, must be register", op) + return + } + prog.RegTo2 = a[2].Reg + break + } + prog.From = a[0] + prog.Reg = p.getRegister(prog, op, &a[1]) + prog.To = a[2] + case sys.S390X: + prog.From = a[0] + if a[1].Type == obj.TYPE_REG { + prog.Reg = p.getRegister(prog, op, &a[1]) + } else { + prog.AddRestSource(a[1]) + } + prog.To = a[2] + default: + p.errorf("TODO: implement three-operand instructions for this architecture") + return + } + case 4: + if p.arch.Family == sys.ARM { + if arch.IsARMBFX(op) { + // a[0] and a[1] must be constants, a[2] and a[3] must be registers + prog.From = a[0] + prog.AddRestSource(a[1]) + prog.Reg = p.getRegister(prog, op, &a[2]) + prog.To = a[3] + break + } + if arch.IsARMMULA(op) { + // All must be registers. + p.getRegister(prog, op, &a[0]) + r1 := p.getRegister(prog, op, &a[1]) + r2 := p.getRegister(prog, op, &a[2]) + p.getRegister(prog, op, &a[3]) + prog.From = a[0] + prog.To = a[3] + prog.To.Type = obj.TYPE_REGREG2 + prog.To.Offset = int64(r2) + prog.Reg = r1 + break + } + } + if p.arch.Family == sys.AMD64 { + prog.From = a[0] + prog.AddRestSourceArgs([]obj.Addr{a[1], a[2]}) + prog.To = a[3] + break + } + if p.arch.Family == sys.ARM64 { + prog.From = a[0] + prog.Reg = p.getRegister(prog, op, &a[1]) + prog.AddRestSource(a[2]) + prog.To = a[3] + break + } + if p.arch.Family == sys.PPC64 { + prog.From = a[0] + prog.To = a[3] + // If the second argument is not a register argument, it must be + // passed RestArgs/AddRestSource + if a[1].Type == obj.TYPE_REG { + prog.Reg = p.getRegister(prog, op, &a[1]) + prog.AddRestSource(a[2]) + } else { + // Don't set prog.Reg if a1 isn't a reg arg. + prog.AddRestSourceArgs([]obj.Addr{a[1], a[2]}) + } + break + } + if p.arch.Family == sys.RISCV64 { + prog.From = a[0] + prog.Reg = p.getRegister(prog, op, &a[1]) + prog.AddRestSource(a[2]) + prog.To = a[3] + break + } + if p.arch.Family == sys.S390X { + if a[1].Type != obj.TYPE_REG { + p.errorf("second operand must be a register in %s instruction", op) + return + } + prog.From = a[0] + prog.Reg = p.getRegister(prog, op, &a[1]) + prog.AddRestSource(a[2]) + prog.To = a[3] + break + } + p.errorf("can't handle %s instruction with 4 operands", op) + return + case 5: + if p.arch.Family == sys.PPC64 { + prog.From = a[0] + // Second arg is always a register type on ppc64. + prog.Reg = p.getRegister(prog, op, &a[1]) + prog.AddRestSourceArgs([]obj.Addr{a[2], a[3]}) + prog.To = a[4] + break + } + if p.arch.Family == sys.AMD64 { + prog.From = a[0] + prog.AddRestSourceArgs([]obj.Addr{a[1], a[2], a[3]}) + prog.To = a[4] + break + } + if p.arch.Family == sys.S390X { + prog.From = a[0] + prog.AddRestSourceArgs([]obj.Addr{a[1], a[2], a[3]}) + prog.To = a[4] + break + } + p.errorf("can't handle %s instruction with 5 operands", op) + return + case 6: + if p.arch.Family == sys.ARM && arch.IsARMMRC(op) { + // Strange special case: MCR, MRC. + prog.To.Type = obj.TYPE_CONST + x0 := p.getConstant(prog, op, &a[0]) + x1 := p.getConstant(prog, op, &a[1]) + x2 := int64(p.getRegister(prog, op, &a[2])) + x3 := int64(p.getRegister(prog, op, &a[3])) + x4 := int64(p.getRegister(prog, op, &a[4])) + x5 := p.getConstant(prog, op, &a[5]) + // Cond is handled specially for this instruction. + offset, MRC, ok := arch.ARMMRCOffset(op, cond, x0, x1, x2, x3, x4, x5) + if !ok { + p.errorf("unrecognized condition code .%q", cond) + } + prog.To.Offset = offset + cond = "" + prog.As = MRC // Both instructions are coded as MRC. + break + } + if p.arch.Family == sys.PPC64 { + prog.From = a[0] + // Second arg is always a register type on ppc64. + prog.Reg = p.getRegister(prog, op, &a[1]) + prog.AddRestSourceArgs([]obj.Addr{a[2], a[3], a[4]}) + prog.To = a[5] + break + } + fallthrough + default: + p.errorf("can't handle %s instruction with %d operands", op, len(a)) + return + } + + p.append(prog, cond, true) +} + +// symbolName returns the symbol name, or an error string if none is available. +func symbolName(addr *obj.Addr) string { + if addr.Sym != nil { + return addr.Sym.Name + } + return "" +} + +var emptyProg obj.Prog + +// getConstantPseudo checks that addr represents a plain constant and returns its value. +func (p *Parser) getConstantPseudo(pseudo string, addr *obj.Addr) int64 { + if addr.Type != obj.TYPE_MEM || addr.Name != 0 || addr.Reg != 0 || addr.Index != 0 { + p.errorf("%s: expected integer constant; found %s", pseudo, obj.Dconv(&emptyProg, addr)) + } + return addr.Offset +} + +// getConstant checks that addr represents a plain constant and returns its value. +func (p *Parser) getConstant(prog *obj.Prog, op obj.As, addr *obj.Addr) int64 { + if addr.Type != obj.TYPE_MEM || addr.Name != 0 || addr.Reg != 0 || addr.Index != 0 { + p.errorf("%s: expected integer constant; found %s", op, obj.Dconv(prog, addr)) + } + return addr.Offset +} + +// getImmediate checks that addr represents an immediate constant and returns its value. +func (p *Parser) getImmediate(prog *obj.Prog, op obj.As, addr *obj.Addr) int64 { + if addr.Type != obj.TYPE_CONST || addr.Name != 0 || addr.Reg != 0 || addr.Index != 0 { + p.errorf("%s: expected immediate constant; found %s", op, obj.Dconv(prog, addr)) + } + return addr.Offset +} + +// getRegister checks that addr represents a register and returns its value. +func (p *Parser) getRegister(prog *obj.Prog, op obj.As, addr *obj.Addr) int16 { + if addr.Type != obj.TYPE_REG || addr.Offset != 0 || addr.Name != 0 || addr.Index != 0 { + p.errorf("%s: expected register; found %s", op, obj.Dconv(prog, addr)) + } + return addr.Reg +} diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/endtoend_test.go b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/endtoend_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6e1aa1cd95f42cd311b50e826e311bb5fc0bc0b8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/endtoend_test.go @@ -0,0 +1,492 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package asm + +import ( + "bufio" + "bytes" + "fmt" + "internal/buildcfg" + "os" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + "testing" + + "cmd/asm/internal/lex" + "cmd/internal/obj" +) + +// An end-to-end test for the assembler: Do we print what we parse? +// Output is generated by, in effect, turning on -S and comparing the +// result against a golden file. + +func testEndToEnd(t *testing.T, goarch, file string) { + input := filepath.Join("testdata", file+".s") + architecture, ctxt := setArch(goarch) + architecture.Init(ctxt) + lexer := lex.NewLexer(input) + parser := NewParser(ctxt, architecture, lexer) + pList := new(obj.Plist) + var ok bool + testOut = new(strings.Builder) // The assembler writes test output to this buffer. + ctxt.Bso = bufio.NewWriter(os.Stdout) + ctxt.IsAsm = true + defer ctxt.Bso.Flush() + failed := false + ctxt.DiagFunc = func(format string, args ...interface{}) { + failed = true + t.Errorf(format, args...) + } + pList.Firstpc, ok = parser.Parse() + if !ok || failed { + t.Errorf("asm: %s assembly failed", goarch) + return + } + output := strings.Split(testOut.String(), "\n") + + // Reconstruct expected output by independently "parsing" the input. + data, err := os.ReadFile(input) + if err != nil { + t.Error(err) + return + } + lineno := 0 + seq := 0 + hexByLine := map[string]string{} + lines := strings.SplitAfter(string(data), "\n") +Diff: + for _, line := range lines { + lineno++ + + // Ignore include of textflag.h. + if strings.HasPrefix(line, "#include ") { + continue + } + + // Ignore GLOBL. + if strings.HasPrefix(line, "GLOBL ") { + continue + } + + // The general form of a test input line is: + // // comment + // INST args [// printed form] [// hex encoding] + parts := strings.Split(line, "//") + printed := strings.TrimSpace(parts[0]) + if printed == "" || strings.HasSuffix(printed, ":") { // empty or label + continue + } + seq++ + + var hexes string + switch len(parts) { + default: + t.Errorf("%s:%d: unable to understand comments: %s", input, lineno, line) + case 1: + // no comment + case 2: + // might be printed form or hex + note := strings.TrimSpace(parts[1]) + if isHexes(note) { + hexes = note + } else { + printed = note + } + case 3: + // printed form, then hex + printed = strings.TrimSpace(parts[1]) + hexes = strings.TrimSpace(parts[2]) + if !isHexes(hexes) { + t.Errorf("%s:%d: malformed hex instruction encoding: %s", input, lineno, line) + } + } + + if hexes != "" { + hexByLine[fmt.Sprintf("%s:%d", input, lineno)] = hexes + } + + // Canonicalize spacing in printed form. + // First field is opcode, then tab, then arguments separated by spaces. + // Canonicalize spaces after commas first. + // Comma to separate argument gets a space; comma within does not. + var buf []byte + nest := 0 + for i := 0; i < len(printed); i++ { + c := printed[i] + switch c { + case '{', '[': + nest++ + case '}', ']': + nest-- + case ',': + buf = append(buf, ',') + if nest == 0 { + buf = append(buf, ' ') + } + for i+1 < len(printed) && (printed[i+1] == ' ' || printed[i+1] == '\t') { + i++ + } + continue + } + buf = append(buf, c) + } + + f := strings.Fields(string(buf)) + + // Turn relative (PC) into absolute (PC) automatically, + // so that most branch instructions don't need comments + // giving the absolute form. + if len(f) > 0 && strings.Contains(printed, "(PC)") { + index := len(f) - 1 + suf := "(PC)" + for !strings.HasSuffix(f[index], suf) { + index-- + suf = "(PC)," + } + str := f[index] + n, err := strconv.Atoi(str[:len(str)-len(suf)]) + if err == nil { + f[index] = fmt.Sprintf("%d%s", seq+n, suf) + } + } + + if len(f) == 1 { + printed = f[0] + } else { + printed = f[0] + "\t" + strings.Join(f[1:], " ") + } + + want := fmt.Sprintf("%05d (%s:%d)\t%s", seq, input, lineno, printed) + for len(output) > 0 && (output[0] < want || output[0] != want && len(output[0]) >= 5 && output[0][:5] == want[:5]) { + if len(output[0]) >= 5 && output[0][:5] == want[:5] { + t.Errorf("mismatched output:\nhave %s\nwant %s", output[0], want) + output = output[1:] + continue Diff + } + t.Errorf("unexpected output: %q", output[0]) + output = output[1:] + } + if len(output) > 0 && output[0] == want { + output = output[1:] + } else { + t.Errorf("missing output: %q", want) + } + } + for len(output) > 0 { + if output[0] == "" { + // spurious blank caused by Split on "\n" + output = output[1:] + continue + } + t.Errorf("unexpected output: %q", output[0]) + output = output[1:] + } + + // Checked printing. + // Now check machine code layout. + + top := pList.Firstpc + var text *obj.LSym + ok = true + ctxt.DiagFunc = func(format string, args ...interface{}) { + t.Errorf(format, args...) + ok = false + } + obj.Flushplist(ctxt, pList, nil) + + for p := top; p != nil; p = p.Link { + if p.As == obj.ATEXT { + text = p.From.Sym + } + hexes := hexByLine[p.Line()] + if hexes == "" { + continue + } + delete(hexByLine, p.Line()) + if text == nil { + t.Errorf("%s: instruction outside TEXT", p) + } + size := int64(len(text.P)) - p.Pc + if p.Link != nil { + size = p.Link.Pc - p.Pc + } else if p.Isize != 0 { + size = int64(p.Isize) + } + var code []byte + if p.Pc < int64(len(text.P)) { + code = text.P[p.Pc:] + if size < int64(len(code)) { + code = code[:size] + } + } + codeHex := fmt.Sprintf("%x", code) + if codeHex == "" { + codeHex = "empty" + } + ok := false + for _, hex := range strings.Split(hexes, " or ") { + if codeHex == hex { + ok = true + break + } + } + if !ok { + t.Errorf("%s: have encoding %s, want %s", p, codeHex, hexes) + } + } + + if len(hexByLine) > 0 { + var missing []string + for key := range hexByLine { + missing = append(missing, key) + } + sort.Strings(missing) + for _, line := range missing { + t.Errorf("%s: did not find instruction encoding", line) + } + } + +} + +func isHexes(s string) bool { + if s == "" { + return false + } + if s == "empty" { + return true + } + for _, f := range strings.Split(s, " or ") { + if f == "" || len(f)%2 != 0 || strings.TrimLeft(f, "0123456789abcdef") != "" { + return false + } + } + return true +} + +// It would be nice if the error messages always began with +// the standard file:line: prefix, +// but that's not where we are today. +// It might be at the beginning but it might be in the middle of the printed instruction. +var fileLineRE = regexp.MustCompile(`(?:^|\()(testdata[/\\][\da-z]+\.s:\d+)(?:$|\)|:)`) + +// Same as in test/run.go +var ( + errRE = regexp.MustCompile(`// ERROR ?(.*)`) + errQuotesRE = regexp.MustCompile(`"([^"]*)"`) +) + +func testErrors(t *testing.T, goarch, file string, flags ...string) { + input := filepath.Join("testdata", file+".s") + architecture, ctxt := setArch(goarch) + architecture.Init(ctxt) + lexer := lex.NewLexer(input) + parser := NewParser(ctxt, architecture, lexer) + pList := new(obj.Plist) + var ok bool + ctxt.Bso = bufio.NewWriter(os.Stdout) + ctxt.IsAsm = true + defer ctxt.Bso.Flush() + failed := false + var errBuf bytes.Buffer + parser.errorWriter = &errBuf + ctxt.DiagFunc = func(format string, args ...interface{}) { + failed = true + s := fmt.Sprintf(format, args...) + if !strings.HasSuffix(s, "\n") { + s += "\n" + } + errBuf.WriteString(s) + } + for _, flag := range flags { + switch flag { + case "dynlink": + ctxt.Flag_dynlink = true + default: + t.Errorf("unknown flag %s", flag) + } + } + pList.Firstpc, ok = parser.Parse() + obj.Flushplist(ctxt, pList, nil) + if ok && !failed { + t.Errorf("asm: %s had no errors", file) + } + + errors := map[string]string{} + for _, line := range strings.Split(errBuf.String(), "\n") { + if line == "" || strings.HasPrefix(line, "\t") { + continue + } + m := fileLineRE.FindStringSubmatch(line) + if m == nil { + t.Errorf("unexpected error: %v", line) + continue + } + fileline := m[1] + if errors[fileline] != "" && errors[fileline] != line { + t.Errorf("multiple errors on %s:\n\t%s\n\t%s", fileline, errors[fileline], line) + continue + } + errors[fileline] = line + } + + // Reconstruct expected errors by independently "parsing" the input. + data, err := os.ReadFile(input) + if err != nil { + t.Error(err) + return + } + lineno := 0 + lines := strings.Split(string(data), "\n") + for _, line := range lines { + lineno++ + + fileline := fmt.Sprintf("%s:%d", input, lineno) + if m := errRE.FindStringSubmatch(line); m != nil { + all := m[1] + mm := errQuotesRE.FindAllStringSubmatch(all, -1) + if len(mm) != 1 { + t.Errorf("%s: invalid errorcheck line:\n%s", fileline, line) + } else if err := errors[fileline]; err == "" { + t.Errorf("%s: missing error, want %s", fileline, all) + } else if !strings.Contains(err, mm[0][1]) { + t.Errorf("%s: wrong error for %s:\n%s", fileline, all, err) + } + } else { + if errors[fileline] != "" { + t.Errorf("unexpected error on %s: %v", fileline, errors[fileline]) + } + } + delete(errors, fileline) + } + var extra []string + for key := range errors { + extra = append(extra, key) + } + sort.Strings(extra) + for _, fileline := range extra { + t.Errorf("unexpected error on %s: %v", fileline, errors[fileline]) + } +} + +func Test386EndToEnd(t *testing.T) { + testEndToEnd(t, "386", "386") +} + +func TestARMEndToEnd(t *testing.T) { + defer func(old int) { buildcfg.GOARM.Version = old }(buildcfg.GOARM.Version) + for _, goarm := range []int{5, 6, 7} { + t.Logf("GOARM=%d", goarm) + buildcfg.GOARM.Version = goarm + testEndToEnd(t, "arm", "arm") + if goarm == 6 { + testEndToEnd(t, "arm", "armv6") + } + } +} + +func TestGoBuildErrors(t *testing.T) { + testErrors(t, "amd64", "buildtagerror") +} + +func TestGenericErrors(t *testing.T) { + testErrors(t, "amd64", "duperror") +} + +func TestARMErrors(t *testing.T) { + testErrors(t, "arm", "armerror") +} + +func TestARM64EndToEnd(t *testing.T) { + testEndToEnd(t, "arm64", "arm64") +} + +func TestARM64Encoder(t *testing.T) { + testEndToEnd(t, "arm64", "arm64enc") +} + +func TestARM64Errors(t *testing.T) { + testErrors(t, "arm64", "arm64error") +} + +func TestAMD64EndToEnd(t *testing.T) { + testEndToEnd(t, "amd64", "amd64") +} + +func Test386Encoder(t *testing.T) { + testEndToEnd(t, "386", "386enc") +} + +func TestAMD64Encoder(t *testing.T) { + filenames := [...]string{ + "amd64enc", + "amd64enc_extra", + "avx512enc/aes_avx512f", + "avx512enc/gfni_avx512f", + "avx512enc/vpclmulqdq_avx512f", + "avx512enc/avx512bw", + "avx512enc/avx512cd", + "avx512enc/avx512dq", + "avx512enc/avx512er", + "avx512enc/avx512f", + "avx512enc/avx512pf", + "avx512enc/avx512_4fmaps", + "avx512enc/avx512_4vnniw", + "avx512enc/avx512_bitalg", + "avx512enc/avx512_ifma", + "avx512enc/avx512_vbmi", + "avx512enc/avx512_vbmi2", + "avx512enc/avx512_vnni", + "avx512enc/avx512_vpopcntdq", + } + for _, name := range filenames { + testEndToEnd(t, "amd64", name) + } +} + +func TestAMD64Errors(t *testing.T) { + testErrors(t, "amd64", "amd64error") +} + +func TestAMD64DynLinkErrors(t *testing.T) { + testErrors(t, "amd64", "amd64dynlinkerror", "dynlink") +} + +func TestMIPSEndToEnd(t *testing.T) { + testEndToEnd(t, "mips", "mips") + testEndToEnd(t, "mips64", "mips64") +} + +func TestLOONG64Encoder(t *testing.T) { + testEndToEnd(t, "loong64", "loong64enc1") + testEndToEnd(t, "loong64", "loong64enc2") + testEndToEnd(t, "loong64", "loong64enc3") + testEndToEnd(t, "loong64", "loong64") +} + +func TestPPC64EndToEnd(t *testing.T) { + defer func(old int) { buildcfg.GOPPC64 = old }(buildcfg.GOPPC64) + for _, goppc64 := range []int{8, 9, 10} { + t.Logf("GOPPC64=power%d", goppc64) + buildcfg.GOPPC64 = goppc64 + // Some pseudo-ops may assemble differently depending on GOPPC64 + testEndToEnd(t, "ppc64", "ppc64") + testEndToEnd(t, "ppc64", "ppc64_p10") + } +} + +func TestRISCVEndToEnd(t *testing.T) { + testEndToEnd(t, "riscv64", "riscv64") +} + +func TestRISCVErrors(t *testing.T) { + testErrors(t, "riscv64", "riscv64error") +} + +func TestS390XEndToEnd(t *testing.T) { + testEndToEnd(t, "s390x", "s390x") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/expr_test.go b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/expr_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1251594349fc01da5e074395e811a5e8da85c74d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/expr_test.go @@ -0,0 +1,121 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package asm + +import ( + "cmd/asm/internal/lex" + "strings" + "testing" + "text/scanner" +) + +type exprTest struct { + input string + output int64 + atEOF bool +} + +var exprTests = []exprTest{ + // Simple + {"0", 0, true}, + {"3", 3, true}, + {"070", 8 * 7, true}, + {"0x0f", 15, true}, + {"0xFF", 255, true}, + {"9223372036854775807", 9223372036854775807, true}, // max int64 + // Unary + {"-0", 0, true}, + {"~0", -1, true}, + {"~0*0", 0, true}, + {"+3", 3, true}, + {"-3", -3, true}, + {"-9223372036854775808", -9223372036854775808, true}, // min int64 + // Binary + {"3+4", 3 + 4, true}, + {"3-4", 3 - 4, true}, + {"2|5", 2 | 5, true}, + {"3^4", 3 ^ 4, true}, + {"3*4", 3 * 4, true}, + {"14/4", 14 / 4, true}, + {"3<<4", 3 << 4, true}, + {"48>>3", 48 >> 3, true}, + {"3&9", 3 & 9, true}, + // General + {"3*2+3", 3*2 + 3, true}, + {"3+2*3", 3 + 2*3, true}, + {"3*(2+3)", 3 * (2 + 3), true}, + {"3*-(2+3)", 3 * -(2 + 3), true}, + {"3<<2+4", 3<<2 + 4, true}, + {"3<<2+4", 3<<2 + 4, true}, + {"3<<(2+4)", 3 << (2 + 4), true}, + // Junk at EOF. + {"3 x", 3, false}, + // Big number + {"4611686018427387904", 4611686018427387904, true}, +} + +func TestExpr(t *testing.T) { + p := NewParser(nil, nil, nil) // Expression evaluation uses none of these fields of the parser. + for i, test := range exprTests { + p.start(lex.Tokenize(test.input)) + result := int64(p.expr()) + if result != test.output { + t.Errorf("%d: %q evaluated to %d; expected %d", i, test.input, result, test.output) + } + tok := p.next() + if test.atEOF && tok.ScanToken != scanner.EOF { + t.Errorf("%d: %q: at EOF got %s", i, test.input, tok) + } else if !test.atEOF && tok.ScanToken == scanner.EOF { + t.Errorf("%d: %q: expected not EOF but at EOF", i, test.input) + } + } +} + +type badExprTest struct { + input string + error string // Empty means no error. +} + +var badExprTests = []badExprTest{ + {"0/0", "division by zero"}, + {"3/0", "division by zero"}, + {"(1<<63)/0", "divide of value with high bit set"}, + {"3%0", "modulo by zero"}, + {"(1<<63)%0", "modulo of value with high bit set"}, + {"3<<-4", "negative left shift count"}, + {"3<<(1<<63)", "negative left shift count"}, + {"3>>-4", "negative right shift count"}, + {"3>>(1<<63)", "negative right shift count"}, + {"(1<<63)>>2", "right shift of value with high bit set"}, + {"(1<<62)>>2", ""}, + {`'\x80'`, "illegal UTF-8 encoding for character constant"}, + {"(23*4", "missing closing paren"}, + {")23*4", "unexpected ) evaluating expression"}, + {"18446744073709551616", "value out of range"}, +} + +func TestBadExpr(t *testing.T) { + for i, test := range badExprTests { + err := runBadTest(i, test, t) + if err == nil { + if test.error != "" { + t.Errorf("#%d: %q: expected error %q; got none", i, test.input, test.error) + } + continue + } + if !strings.Contains(err.Error(), test.error) { + t.Errorf("#%d: expected error %q; got %q", i, test.error, err) + continue + } + } +} + +func runBadTest(i int, test badExprTest, t *testing.T) (err error) { + p := NewParser(nil, nil, nil) // Expression evaluation uses none of these fields of the parser. + p.start(lex.Tokenize(test.input)) + return tryParse(t, func() { + p.expr() + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/line_test.go b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/line_test.go new file mode 100644 index 0000000000000000000000000000000000000000..01b058bd956dca47b27dfce33582dbb31eb246b7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/line_test.go @@ -0,0 +1,55 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package asm + +import ( + "cmd/asm/internal/lex" + "strings" + "testing" +) + +type badInstTest struct { + input, error string +} + +func TestAMD64BadInstParser(t *testing.T) { + testBadInstParser(t, "amd64", []badInstTest{ + // Test AVX512 suffixes. + {"VADDPD.A X0, X1, X2", `unknown suffix "A"`}, + {"VADDPD.A.A X0, X1, X2", `unknown suffix "A"; duplicate suffix "A"`}, + {"VADDPD.A.A.A X0, X1, X2", `unknown suffix "A"; duplicate suffix "A"`}, + {"VADDPD.A.B X0, X1, X2", `unknown suffix "A"; unknown suffix "B"`}, + {"VADDPD.Z.A X0, X1, X2", `Z suffix should be the last; unknown suffix "A"`}, + {"VADDPD.Z.Z X0, X1, X2", `Z suffix should be the last; duplicate suffix "Z"`}, + {"VADDPD.SAE.BCST X0, X1, X2", `can't combine rounding/SAE and broadcast`}, + {"VADDPD.BCST.SAE X0, X1, X2", `can't combine rounding/SAE and broadcast`}, + {"VADDPD.BCST.Z.SAE X0, X1, X2", `Z suffix should be the last; can't combine rounding/SAE and broadcast`}, + {"VADDPD.SAE.SAE X0, X1, X2", `duplicate suffix "SAE"`}, + {"VADDPD.RZ_SAE.SAE X0, X1, X2", `bad suffix combination`}, + + // BSWAP on 16-bit registers is undefined. See #29167, + {"BSWAPW DX", `unrecognized instruction`}, + {"BSWAPW R11", `unrecognized instruction`}, + }) +} + +func testBadInstParser(t *testing.T, goarch string, tests []badInstTest) { + for i, test := range tests { + arch, ctxt := setArch(goarch) + tokenizer := lex.NewTokenizer("", strings.NewReader(test.input+"\n"), nil) + parser := NewParser(ctxt, arch, tokenizer) + + err := tryParse(t, func() { + parser.Parse() + }) + + switch { + case err == nil: + t.Errorf("#%d: %q: want error %q; have none", i, test.input, test.error) + case !strings.Contains(err.Error(), test.error): + t.Errorf("#%d: %q: want error %q; have %q", i, test.input, test.error, err) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/operand_test.go b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/operand_test.go new file mode 100644 index 0000000000000000000000000000000000000000..579f5332233e0e8d2ec847df3a05f8c0ca05f2c6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/operand_test.go @@ -0,0 +1,1034 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package asm + +import ( + "internal/buildcfg" + "strings" + "testing" + + "cmd/asm/internal/arch" + "cmd/asm/internal/lex" + "cmd/internal/obj" +) + +// A simple in-out test: Do we print what we parse? + +func setArch(goarch string) (*arch.Arch, *obj.Link) { + buildcfg.GOOS = "linux" // obj can handle this OS for all architectures. + buildcfg.GOARCH = goarch + architecture := arch.Set(goarch, false) + if architecture == nil { + panic("asm: unrecognized architecture " + goarch) + } + ctxt := obj.Linknew(architecture.LinkArch) + ctxt.Pkgpath = "pkg" + return architecture, ctxt +} + +func newParser(goarch string) *Parser { + architecture, ctxt := setArch(goarch) + return NewParser(ctxt, architecture, nil) +} + +// tryParse executes parse func in panicOnError=true context. +// parse is expected to call any parsing methods that may panic. +// Returns error gathered from recover; nil if no parse errors occurred. +// +// For unexpected panics, calls t.Fatal. +func tryParse(t *testing.T, parse func()) (err error) { + panicOnError = true + defer func() { + panicOnError = false + + e := recover() + var ok bool + if err, ok = e.(error); e != nil && !ok { + t.Fatal(e) + } + }() + + parse() + + return nil +} + +func testBadOperandParser(t *testing.T, parser *Parser, tests []badOperandTest) { + for _, test := range tests { + err := tryParse(t, func() { + parser.start(lex.Tokenize(test.input)) + addr := obj.Addr{} + parser.operand(&addr) + }) + + switch { + case err == nil: + t.Errorf("fail at %s: got no errors; expected %s\n", test.input, test.error) + case !strings.Contains(err.Error(), test.error): + t.Errorf("fail at %s: got %s; expected %s", test.input, err, test.error) + } + } +} + +func testOperandParser(t *testing.T, parser *Parser, tests []operandTest) { + for _, test := range tests { + parser.start(lex.Tokenize(test.input)) + addr := obj.Addr{} + parser.operand(&addr) + var result string + if parser.allowABI { + result = obj.DconvWithABIDetail(&emptyProg, &addr) + } else { + result = obj.Dconv(&emptyProg, &addr) + } + if result != test.output { + t.Errorf("fail at %s: got %s; expected %s\n", test.input, result, test.output) + } + } +} + +func TestAMD64OperandParser(t *testing.T) { + parser := newParser("amd64") + testOperandParser(t, parser, amd64OperandTests) + testBadOperandParser(t, parser, amd64BadOperandTests) + parser.allowABI = true + testOperandParser(t, parser, amd64RuntimeOperandTests) + testBadOperandParser(t, parser, amd64BadOperandRuntimeTests) +} + +func Test386OperandParser(t *testing.T) { + parser := newParser("386") + testOperandParser(t, parser, x86OperandTests) +} + +func TestARMOperandParser(t *testing.T) { + parser := newParser("arm") + testOperandParser(t, parser, armOperandTests) +} +func TestARM64OperandParser(t *testing.T) { + parser := newParser("arm64") + testOperandParser(t, parser, arm64OperandTests) +} + +func TestPPC64OperandParser(t *testing.T) { + parser := newParser("ppc64") + testOperandParser(t, parser, ppc64OperandTests) +} + +func TestMIPSOperandParser(t *testing.T) { + parser := newParser("mips") + testOperandParser(t, parser, mipsOperandTests) +} + +func TestMIPS64OperandParser(t *testing.T) { + parser := newParser("mips64") + testOperandParser(t, parser, mips64OperandTests) +} + +func TestLOONG64OperandParser(t *testing.T) { + parser := newParser("loong64") + testOperandParser(t, parser, loong64OperandTests) +} + +func TestS390XOperandParser(t *testing.T) { + parser := newParser("s390x") + testOperandParser(t, parser, s390xOperandTests) +} + +func TestFuncAddress(t *testing.T) { + type subtest struct { + arch string + tests []operandTest + } + for _, sub := range []subtest{ + {"amd64", amd64OperandTests}, + {"386", x86OperandTests}, + {"arm", armOperandTests}, + {"arm64", arm64OperandTests}, + {"ppc64", ppc64OperandTests}, + {"mips", mipsOperandTests}, + {"mips64", mips64OperandTests}, + {"loong64", loong64OperandTests}, + {"s390x", s390xOperandTests}, + } { + t.Run(sub.arch, func(t *testing.T) { + parser := newParser(sub.arch) + for _, test := range sub.tests { + parser.start(lex.Tokenize(test.input)) + name, _, ok := parser.funcAddress() + + isFuncSym := strings.HasSuffix(test.input, "(SB)") && + // Ignore static symbols. + !strings.Contains(test.input, "<>") + + wantName := "" + if isFuncSym { + // Strip $|* and (SB) and +Int. + wantName = test.output[:len(test.output)-4] + if strings.HasPrefix(wantName, "$") || strings.HasPrefix(wantName, "*") { + wantName = wantName[1:] + } + if i := strings.Index(wantName, "+"); i >= 0 { + wantName = wantName[:i] + } + } + if ok != isFuncSym || name != wantName { + t.Errorf("fail at %s as function address: got %s, %v; expected %s, %v", test.input, name, ok, wantName, isFuncSym) + } + } + }) + } +} + +type operandTest struct { + input, output string +} + +type badOperandTest struct { + input, error string +} + +// Examples collected by scanning all the assembly in the standard repo. + +var amd64OperandTests = []operandTest{ + {"$(-1.0)", "$(-1.0)"}, + {"$(0.0)", "$(0.0)"}, + {"$(0x2000000+116)", "$33554548"}, + {"$(0x3F<<7)", "$8064"}, + {"$(112+8)", "$120"}, + {"$(1<<63)", "$-9223372036854775808"}, + {"$-1", "$-1"}, + {"$0", "$0"}, + {"$0-0", "$0"}, + {"$0-16", "$-16"}, + {"$0x000FFFFFFFFFFFFF", "$4503599627370495"}, + {"$0x01", "$1"}, + {"$0x02", "$2"}, + {"$0x04", "$4"}, + {"$0x3FE", "$1022"}, + {"$0x7fffffe00000", "$140737486258176"}, + {"$0xfffffffffffff001", "$-4095"}, + {"$1", "$1"}, + {"$1.0", "$(1.0)"}, + {"$10", "$10"}, + {"$1000", "$1000"}, + {"$1000000", "$1000000"}, + {"$1000000000", "$1000000000"}, + {"$__tsan_func_enter(SB)", "$__tsan_func_enter(SB)"}, + {"$main(SB)", "$main(SB)"}, + {"$masks<>(SB)", "$masks<>(SB)"}, + {"$setg_gcc<>(SB)", "$setg_gcc<>(SB)"}, + {"$shifts<>(SB)", "$shifts<>(SB)"}, + {"$~(1<<63)", "$9223372036854775807"}, + {"$~0x3F", "$-64"}, + {"$~15", "$-16"}, + {"(((8)&0xf)*4)(SP)", "32(SP)"}, + {"(((8-14)&0xf)*4)(SP)", "40(SP)"}, + {"(6+8)(AX)", "14(AX)"}, + {"(8*4)(BP)", "32(BP)"}, + {"(AX)", "(AX)"}, + {"(AX)(CX*8)", "(AX)(CX*8)"}, + {"(BP)(CX*4)", "(BP)(CX*4)"}, + {"(BP)(DX*4)", "(BP)(DX*4)"}, + {"(BP)(R8*4)", "(BP)(R8*4)"}, + {"(BX)", "(BX)"}, + {"(DI)", "(DI)"}, + {"(DI)(BX*1)", "(DI)(BX*1)"}, + {"(DX)", "(DX)"}, + {"(R9)", "(R9)"}, + {"(R9)(BX*8)", "(R9)(BX*8)"}, + {"(SI)", "(SI)"}, + {"(SI)(BX*1)", "(SI)(BX*1)"}, + {"(SI)(DX*1)", "(SI)(DX*1)"}, + {"(SP)", "(SP)"}, + {"(SP)(AX*4)", "(SP)(AX*4)"}, + {"32(SP)(BX*2)", "32(SP)(BX*2)"}, + {"32323(SP)(R8*4)", "32323(SP)(R8*4)"}, + {"+3(PC)", "3(PC)"}, + {"-1(DI)(BX*1)", "-1(DI)(BX*1)"}, + {"-3(PC)", "-3(PC)"}, + {"-64(SI)(BX*1)", "-64(SI)(BX*1)"}, + {"-96(SI)(BX*1)", "-96(SI)(BX*1)"}, + {"AL", "AL"}, + {"AX", "AX"}, + {"BP", "BP"}, + {"BX", "BX"}, + {"CX", "CX"}, + {"DI", "DI"}, + {"DX", "DX"}, + {"R10", "R10"}, + {"R10", "R10"}, + {"R11", "R11"}, + {"R12", "R12"}, + {"R13", "R13"}, + {"R14", "R14"}, + {"R15", "R15"}, + {"R8", "R8"}, + {"R9", "R9"}, + {"g", "R14"}, + {"SI", "SI"}, + {"SP", "SP"}, + {"X0", "X0"}, + {"X1", "X1"}, + {"X10", "X10"}, + {"X11", "X11"}, + {"X12", "X12"}, + {"X13", "X13"}, + {"X14", "X14"}, + {"X15", "X15"}, + {"X2", "X2"}, + {"X3", "X3"}, + {"X4", "X4"}, + {"X5", "X5"}, + {"X6", "X6"}, + {"X7", "X7"}, + {"X8", "X8"}, + {"X9", "X9"}, + {"_expand_key_128<>(SB)", "_expand_key_128<>(SB)"}, + {"_seek<>(SB)", "_seek<>(SB)"}, + {"a2+16(FP)", "a2+16(FP)"}, + {"addr2+24(FP)", "addr2+24(FP)"}, + {"asmcgocall<>(SB)", "asmcgocall<>(SB)"}, + {"b+24(FP)", "b+24(FP)"}, + {"b_len+32(FP)", "b_len+32(FP)"}, + {"racecall<>(SB)", "racecall<>(SB)"}, + {"rcv_name+20(FP)", "rcv_name+20(FP)"}, + {"retoffset+28(FP)", "retoffset+28(FP)"}, + {"runtime·_GetStdHandle(SB)", "runtime._GetStdHandle(SB)"}, + {"sync\u2215atomic·AddInt64(SB)", "sync/atomic.AddInt64(SB)"}, + {"timeout+20(FP)", "timeout+20(FP)"}, + {"ts+16(FP)", "ts+16(FP)"}, + {"x+24(FP)", "x+24(FP)"}, + {"x·y(SB)", "x.y(SB)"}, + {"x·y(SP)", "x.y(SP)"}, + {"x·y+8(SB)", "x.y+8(SB)"}, + {"x·y+8(SP)", "x.y+8(SP)"}, + {"y+56(FP)", "y+56(FP)"}, + {"·AddUint32(SB)", "pkg.AddUint32(SB)"}, + {"·callReflect(SB)", "pkg.callReflect(SB)"}, + {"[X0-X0]", "[X0-X0]"}, + {"[ Z9 - Z12 ]", "[Z9-Z12]"}, + {"[X0-AX]", "[X0-AX]"}, + {"[AX-X0]", "[AX-X0]"}, + {"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms. +} + +var amd64RuntimeOperandTests = []operandTest{ + {"$bar(SB)", "$bar(SB)"}, + {"$foo(SB)", "$foo(SB)"}, +} + +var amd64BadOperandTests = []badOperandTest{ + {"[", "register list: expected ']', found EOF"}, + {"[4", "register list: bad low register in `[4`"}, + {"[]", "register list: bad low register in `[]`"}, + {"[f-x]", "register list: bad low register in `[f`"}, + {"[r10-r13]", "register list: bad low register in `[r10`"}, + {"[k3-k6]", "register list: bad low register in `[k3`"}, + {"[X0]", "register list: expected '-' after `[X0`, found ']'"}, + {"[X0-]", "register list: bad high register in `[X0-]`"}, + {"[X0-x]", "register list: bad high register in `[X0-x`"}, + {"[X0-X1-X2]", "register list: expected ']' after `[X0-X1`, found '-'"}, + {"[X0,X3]", "register list: expected '-' after `[X0`, found ','"}, + {"[X0,X1,X2,X3]", "register list: expected '-' after `[X0`, found ','"}, + {"$foo", "ABI selector only permitted when compiling runtime, reference was to \"foo\""}, +} + +var amd64BadOperandRuntimeTests = []badOperandTest{ + {"$foo", "malformed ABI selector \"bletch\" in reference to \"foo\""}, +} + +var x86OperandTests = []operandTest{ + {"$(2.928932188134524e-01)", "$(0.29289321881345243)"}, + {"$-1", "$-1"}, + {"$0", "$0"}, + {"$0x00000000", "$0"}, + {"$runtime·badmcall(SB)", "$runtime.badmcall(SB)"}, + {"$setg_gcc<>(SB)", "$setg_gcc<>(SB)"}, + {"$~15", "$-16"}, + {"(-64*1024+104)(SP)", "-65432(SP)"}, + {"(0*4)(BP)", "(BP)"}, + {"(1*4)(DI)", "4(DI)"}, + {"(4*4)(BP)", "16(BP)"}, + {"(AX)", "(AX)"}, + {"(BP)(CX*4)", "(BP)(CX*4)"}, + {"(BP*8)", "0(BP*8)"}, + {"(BX)", "(BX)"}, + {"(SP)", "(SP)"}, + {"*AX", "AX"}, // TODO: Should make * illegal here; a simple alias for JMP AX. + {"*runtime·_GetStdHandle(SB)", "*runtime._GetStdHandle(SB)"}, + {"-(4+12)(DI)", "-16(DI)"}, + {"-1(DI)(BX*1)", "-1(DI)(BX*1)"}, + {"-96(DI)(BX*1)", "-96(DI)(BX*1)"}, + {"0(AX)", "(AX)"}, + {"0(BP)", "(BP)"}, + {"0(BX)", "(BX)"}, + {"4(AX)", "4(AX)"}, + {"AL", "AL"}, + {"AX", "AX"}, + {"BP", "BP"}, + {"BX", "BX"}, + {"CX", "CX"}, + {"DI", "DI"}, + {"DX", "DX"}, + {"F0", "F0"}, + {"GS", "GS"}, + {"SI", "SI"}, + {"SP", "SP"}, + {"X0", "X0"}, + {"X1", "X1"}, + {"X2", "X2"}, + {"X3", "X3"}, + {"X4", "X4"}, + {"X5", "X5"}, + {"X6", "X6"}, + {"X7", "X7"}, + {"asmcgocall<>(SB)", "asmcgocall<>(SB)"}, + {"ax+4(FP)", "ax+4(FP)"}, + {"ptime-12(SP)", "ptime-12(SP)"}, + {"runtime·_NtWaitForSingleObject(SB)", "runtime._NtWaitForSingleObject(SB)"}, + {"s(FP)", "s(FP)"}, + {"sec+4(FP)", "sec+4(FP)"}, + {"shifts<>(SB)(CX*8)", "shifts<>(SB)(CX*8)"}, + {"x+4(FP)", "x+4(FP)"}, + {"·AddUint32(SB)", "pkg.AddUint32(SB)"}, + {"·reflectcall(SB)", "pkg.reflectcall(SB)"}, + {"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms. +} + +var armOperandTests = []operandTest{ + {"$0", "$0"}, + {"$256", "$256"}, + {"(R0)", "(R0)"}, + {"(R11)", "(R11)"}, + {"(g)", "(g)"}, + {"-12(R4)", "-12(R4)"}, + {"0(PC)", "0(PC)"}, + {"1024", "1024"}, + {"12(R(1))", "12(R1)"}, + {"12(R13)", "12(R13)"}, + {"R0", "R0"}, + {"R0->(32-1)", "R0->31"}, + {"R0<>R(1)", "R0>>R1"}, + {"R0@>(32-1)", "R0@>31"}, + {"R1", "R1"}, + {"R11", "R11"}, + {"R12", "R12"}, + {"R13", "R13"}, + {"R14", "R14"}, + {"R15", "R15"}, + {"R1<<2(R3)", "R1<<2(R3)"}, + {"R(1)<<2(R(3))", "R1<<2(R3)"}, + {"R2", "R2"}, + {"R3", "R3"}, + {"R4", "R4"}, + {"R(4)", "R4"}, + {"R5", "R5"}, + {"R6", "R6"}, + {"R7", "R7"}, + {"R8", "R8"}, + {"[R0,R1,g,R15]", "[R0,R1,g,R15]"}, + {"[R0-R7]", "[R0,R1,R2,R3,R4,R5,R6,R7]"}, + {"[R(0)-R(7)]", "[R0,R1,R2,R3,R4,R5,R6,R7]"}, + {"[R0]", "[R0]"}, + {"[R1-R12]", "[R1,R2,R3,R4,R5,R6,R7,R8,R9,g,R11,R12]"}, + {"armCAS64(SB)", "armCAS64(SB)"}, + {"asmcgocall<>(SB)", "asmcgocall<>(SB)"}, + {"c+28(FP)", "c+28(FP)"}, + {"g", "g"}, + {"gosave<>(SB)", "gosave<>(SB)"}, + {"retlo+12(FP)", "retlo+12(FP)"}, + {"runtime·gogo(SB)", "runtime.gogo(SB)"}, + {"·AddUint32(SB)", "pkg.AddUint32(SB)"}, + {"(R1, R3)", "(R1, R3)"}, + {"[R0,R1,g,R15", ""}, // Issue 11764 - asm hung parsing ']' missing register lists. + {"[):[o-FP", ""}, // Issue 12469 - there was no infinite loop for ARM; these are just sanity checks. + {"[):[R0-FP", ""}, + {"(", ""}, // Issue 12466 - backed up before beginning of line. +} + +var ppc64OperandTests = []operandTest{ + {"$((1<<63)-1)", "$9223372036854775807"}, + {"$(-64*1024)", "$-65536"}, + {"$(1024 * 8)", "$8192"}, + {"$-1", "$-1"}, + {"$-24(R4)", "$-24(R4)"}, + {"$0", "$0"}, + {"$0(R1)", "$(R1)"}, + {"$0.5", "$(0.5)"}, + {"$0x7000", "$28672"}, + {"$0x88888eef", "$2290650863"}, + {"$1", "$1"}, + {"$_main<>(SB)", "$_main<>(SB)"}, + {"$argframe(FP)", "$argframe(FP)"}, + {"$runtime·tlsg(SB)", "$runtime.tlsg(SB)"}, + {"$~3", "$-4"}, + {"(-288-3*8)(R1)", "-312(R1)"}, + {"(16)(R7)", "16(R7)"}, + {"(8)(g)", "8(g)"}, + {"(CTR)", "(CTR)"}, + {"(R0)", "(R0)"}, + {"(R3)", "(R3)"}, + {"(R4)", "(R4)"}, + {"(R5)", "(R5)"}, + {"(R5)(R6*1)", "(R5)(R6*1)"}, + {"(R5+R6)", "(R5)(R6)"}, + {"-1(R4)", "-1(R4)"}, + {"-1(R5)", "-1(R5)"}, + {"6(PC)", "6(PC)"}, + {"CR7", "CR7"}, + {"CTR", "CTR"}, + {"VS0", "VS0"}, + {"VS1", "VS1"}, + {"VS2", "VS2"}, + {"VS3", "VS3"}, + {"VS4", "VS4"}, + {"VS5", "VS5"}, + {"VS6", "VS6"}, + {"VS7", "VS7"}, + {"VS8", "VS8"}, + {"VS9", "VS9"}, + {"VS10", "VS10"}, + {"VS11", "VS11"}, + {"VS12", "VS12"}, + {"VS13", "VS13"}, + {"VS14", "VS14"}, + {"VS15", "VS15"}, + {"VS16", "VS16"}, + {"VS17", "VS17"}, + {"VS18", "VS18"}, + {"VS19", "VS19"}, + {"VS20", "VS20"}, + {"VS21", "VS21"}, + {"VS22", "VS22"}, + {"VS23", "VS23"}, + {"VS24", "VS24"}, + {"VS25", "VS25"}, + {"VS26", "VS26"}, + {"VS27", "VS27"}, + {"VS28", "VS28"}, + {"VS29", "VS29"}, + {"VS30", "VS30"}, + {"VS31", "VS31"}, + {"VS32", "VS32"}, + {"VS33", "VS33"}, + {"VS34", "VS34"}, + {"VS35", "VS35"}, + {"VS36", "VS36"}, + {"VS37", "VS37"}, + {"VS38", "VS38"}, + {"VS39", "VS39"}, + {"VS40", "VS40"}, + {"VS41", "VS41"}, + {"VS42", "VS42"}, + {"VS43", "VS43"}, + {"VS44", "VS44"}, + {"VS45", "VS45"}, + {"VS46", "VS46"}, + {"VS47", "VS47"}, + {"VS48", "VS48"}, + {"VS49", "VS49"}, + {"VS50", "VS50"}, + {"VS51", "VS51"}, + {"VS52", "VS52"}, + {"VS53", "VS53"}, + {"VS54", "VS54"}, + {"VS55", "VS55"}, + {"VS56", "VS56"}, + {"VS57", "VS57"}, + {"VS58", "VS58"}, + {"VS59", "VS59"}, + {"VS60", "VS60"}, + {"VS61", "VS61"}, + {"VS62", "VS62"}, + {"VS63", "VS63"}, + {"V0", "V0"}, + {"V1", "V1"}, + {"V2", "V2"}, + {"V3", "V3"}, + {"V4", "V4"}, + {"V5", "V5"}, + {"V6", "V6"}, + {"V7", "V7"}, + {"V8", "V8"}, + {"V9", "V9"}, + {"V10", "V10"}, + {"V11", "V11"}, + {"V12", "V12"}, + {"V13", "V13"}, + {"V14", "V14"}, + {"V15", "V15"}, + {"V16", "V16"}, + {"V17", "V17"}, + {"V18", "V18"}, + {"V19", "V19"}, + {"V20", "V20"}, + {"V21", "V21"}, + {"V22", "V22"}, + {"V23", "V23"}, + {"V24", "V24"}, + {"V25", "V25"}, + {"V26", "V26"}, + {"V27", "V27"}, + {"V28", "V28"}, + {"V29", "V29"}, + {"V30", "V30"}, + {"V31", "V31"}, + {"F14", "F14"}, + {"F15", "F15"}, + {"F16", "F16"}, + {"F17", "F17"}, + {"F18", "F18"}, + {"F19", "F19"}, + {"F20", "F20"}, + {"F21", "F21"}, + {"F22", "F22"}, + {"F23", "F23"}, + {"F24", "F24"}, + {"F25", "F25"}, + {"F26", "F26"}, + {"F27", "F27"}, + {"F28", "F28"}, + {"F29", "F29"}, + {"F30", "F30"}, + {"F31", "F31"}, + {"LR", "LR"}, + {"R0", "R0"}, + {"R1", "R1"}, + {"R11", "R11"}, + {"R12", "R12"}, + {"R13", "R13"}, + {"R14", "R14"}, + {"R15", "R15"}, + {"R16", "R16"}, + {"R17", "R17"}, + {"R18", "R18"}, + {"R19", "R19"}, + {"R2", "R2"}, + {"R20", "R20"}, + {"R21", "R21"}, + {"R22", "R22"}, + {"R23", "R23"}, + {"R24", "R24"}, + {"R25", "R25"}, + {"R26", "R26"}, + {"R27", "R27"}, + {"R28", "R28"}, + {"R29", "R29"}, + {"R3", "R3"}, + {"R31", "R31"}, + {"R4", "R4"}, + {"R5", "R5"}, + {"R6", "R6"}, + {"R7", "R7"}, + {"R8", "R8"}, + {"R9", "R9"}, + {"SPR(269)", "SPR(269)"}, + {"a(FP)", "a(FP)"}, + {"g", "g"}, + {"ret+8(FP)", "ret+8(FP)"}, + {"runtime·abort(SB)", "runtime.abort(SB)"}, + {"·AddUint32(SB)", "pkg.AddUint32(SB)"}, + {"·trunc(SB)", "pkg.trunc(SB)"}, + {"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms. +} + +var arm64OperandTests = []operandTest{ + {"$0", "$0"}, + {"$0.5", "$(0.5)"}, + {"0(R26)", "(R26)"}, + {"0(RSP)", "(RSP)"}, + {"$1", "$1"}, + {"$-1", "$-1"}, + {"$1000", "$1000"}, + {"$1000000000", "$1000000000"}, + {"$0x7fff3c000", "$34358935552"}, + {"$1234", "$1234"}, + {"$~15", "$-16"}, + {"$16", "$16"}, + {"-16(RSP)", "-16(RSP)"}, + {"16(RSP)", "16(RSP)"}, + {"1(R1)", "1(R1)"}, + {"-1(R4)", "-1(R4)"}, + {"18740(R5)", "18740(R5)"}, + {"$2", "$2"}, + {"$-24(R4)", "$-24(R4)"}, + {"-24(RSP)", "-24(RSP)"}, + {"$24(RSP)", "$24(RSP)"}, + {"-32(RSP)", "-32(RSP)"}, + {"$48", "$48"}, + {"$(-64*1024)(R7)", "$-65536(R7)"}, + {"$(8-1)", "$7"}, + {"a+0(FP)", "a(FP)"}, + {"a1+8(FP)", "a1+8(FP)"}, + {"·AddInt32(SB)", `pkg.AddInt32(SB)`}, + {"runtime·divWVW(SB)", "runtime.divWVW(SB)"}, + {"$argframe+0(FP)", "$argframe(FP)"}, + {"$asmcgocall<>(SB)", "$asmcgocall<>(SB)"}, + {"EQ", "EQ"}, + {"F29", "F29"}, + {"F3", "F3"}, + {"F30", "F30"}, + {"g", "g"}, + {"LR", "R30"}, + {"(LR)", "(R30)"}, + {"R0", "R0"}, + {"R10", "R10"}, + {"R11", "R11"}, + {"R18_PLATFORM", "R18"}, + {"$4503601774854144.0", "$(4503601774854144.0)"}, + {"$runtime·badsystemstack(SB)", "$runtime.badsystemstack(SB)"}, + {"ZR", "ZR"}, + {"(ZR)", "(ZR)"}, + {"(R29, RSP)", "(R29, RSP)"}, + {"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms. +} + +var mips64OperandTests = []operandTest{ + {"$((1<<63)-1)", "$9223372036854775807"}, + {"$(-64*1024)", "$-65536"}, + {"$(1024 * 8)", "$8192"}, + {"$-1", "$-1"}, + {"$-24(R4)", "$-24(R4)"}, + {"$0", "$0"}, + {"$0(R1)", "$(R1)"}, + {"$0.5", "$(0.5)"}, + {"$0x7000", "$28672"}, + {"$0x88888eef", "$2290650863"}, + {"$1", "$1"}, + {"$_main<>(SB)", "$_main<>(SB)"}, + {"$argframe(FP)", "$argframe(FP)"}, + {"$~3", "$-4"}, + {"(-288-3*8)(R1)", "-312(R1)"}, + {"(16)(R7)", "16(R7)"}, + {"(8)(g)", "8(g)"}, + {"(R0)", "(R0)"}, + {"(R3)", "(R3)"}, + {"(R4)", "(R4)"}, + {"(R5)", "(R5)"}, + {"-1(R4)", "-1(R4)"}, + {"-1(R5)", "-1(R5)"}, + {"6(PC)", "6(PC)"}, + {"F14", "F14"}, + {"F15", "F15"}, + {"F16", "F16"}, + {"F17", "F17"}, + {"F18", "F18"}, + {"F19", "F19"}, + {"F20", "F20"}, + {"F21", "F21"}, + {"F22", "F22"}, + {"F23", "F23"}, + {"F24", "F24"}, + {"F25", "F25"}, + {"F26", "F26"}, + {"F27", "F27"}, + {"F28", "F28"}, + {"F29", "F29"}, + {"F30", "F30"}, + {"F31", "F31"}, + {"R0", "R0"}, + {"R1", "R1"}, + {"R11", "R11"}, + {"R12", "R12"}, + {"R13", "R13"}, + {"R14", "R14"}, + {"R15", "R15"}, + {"R16", "R16"}, + {"R17", "R17"}, + {"R18", "R18"}, + {"R19", "R19"}, + {"R2", "R2"}, + {"R20", "R20"}, + {"R21", "R21"}, + {"R22", "R22"}, + {"R23", "R23"}, + {"R24", "R24"}, + {"R25", "R25"}, + {"R26", "R26"}, + {"R27", "R27"}, + {"R29", "R29"}, + {"R3", "R3"}, + {"R31", "R31"}, + {"R4", "R4"}, + {"R5", "R5"}, + {"R6", "R6"}, + {"R7", "R7"}, + {"R8", "R8"}, + {"R9", "R9"}, + {"LO", "LO"}, + {"a(FP)", "a(FP)"}, + {"g", "g"}, + {"RSB", "R28"}, + {"ret+8(FP)", "ret+8(FP)"}, + {"runtime·abort(SB)", "runtime.abort(SB)"}, + {"·AddUint32(SB)", "pkg.AddUint32(SB)"}, + {"·trunc(SB)", "pkg.trunc(SB)"}, + {"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms. +} + +var mipsOperandTests = []operandTest{ + {"$((1<<63)-1)", "$9223372036854775807"}, + {"$(-64*1024)", "$-65536"}, + {"$(1024 * 8)", "$8192"}, + {"$-1", "$-1"}, + {"$-24(R4)", "$-24(R4)"}, + {"$0", "$0"}, + {"$0(R1)", "$(R1)"}, + {"$0.5", "$(0.5)"}, + {"$0x7000", "$28672"}, + {"$0x88888eef", "$2290650863"}, + {"$1", "$1"}, + {"$_main<>(SB)", "$_main<>(SB)"}, + {"$argframe(FP)", "$argframe(FP)"}, + {"$~3", "$-4"}, + {"(-288-3*8)(R1)", "-312(R1)"}, + {"(16)(R7)", "16(R7)"}, + {"(8)(g)", "8(g)"}, + {"(R0)", "(R0)"}, + {"(R3)", "(R3)"}, + {"(R4)", "(R4)"}, + {"(R5)", "(R5)"}, + {"-1(R4)", "-1(R4)"}, + {"-1(R5)", "-1(R5)"}, + {"6(PC)", "6(PC)"}, + {"F14", "F14"}, + {"F15", "F15"}, + {"F16", "F16"}, + {"F17", "F17"}, + {"F18", "F18"}, + {"F19", "F19"}, + {"F20", "F20"}, + {"F21", "F21"}, + {"F22", "F22"}, + {"F23", "F23"}, + {"F24", "F24"}, + {"F25", "F25"}, + {"F26", "F26"}, + {"F27", "F27"}, + {"F28", "F28"}, + {"F29", "F29"}, + {"F30", "F30"}, + {"F31", "F31"}, + {"R0", "R0"}, + {"R1", "R1"}, + {"R11", "R11"}, + {"R12", "R12"}, + {"R13", "R13"}, + {"R14", "R14"}, + {"R15", "R15"}, + {"R16", "R16"}, + {"R17", "R17"}, + {"R18", "R18"}, + {"R19", "R19"}, + {"R2", "R2"}, + {"R20", "R20"}, + {"R21", "R21"}, + {"R22", "R22"}, + {"R23", "R23"}, + {"R24", "R24"}, + {"R25", "R25"}, + {"R26", "R26"}, + {"R27", "R27"}, + {"R28", "R28"}, + {"R29", "R29"}, + {"R3", "R3"}, + {"R31", "R31"}, + {"R4", "R4"}, + {"R5", "R5"}, + {"R6", "R6"}, + {"R7", "R7"}, + {"R8", "R8"}, + {"R9", "R9"}, + {"LO", "LO"}, + {"a(FP)", "a(FP)"}, + {"g", "g"}, + {"ret+8(FP)", "ret+8(FP)"}, + {"runtime·abort(SB)", "runtime.abort(SB)"}, + {"·AddUint32(SB)", "pkg.AddUint32(SB)"}, + {"·trunc(SB)", "pkg.trunc(SB)"}, + {"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms. +} + +var loong64OperandTests = []operandTest{ + {"$((1<<63)-1)", "$9223372036854775807"}, + {"$(-64*1024)", "$-65536"}, + {"$(1024 * 8)", "$8192"}, + {"$-1", "$-1"}, + {"$-24(R4)", "$-24(R4)"}, + {"$0", "$0"}, + {"$0(R1)", "$(R1)"}, + {"$0.5", "$(0.5)"}, + {"$0x7000", "$28672"}, + {"$0x88888eef", "$2290650863"}, + {"$1", "$1"}, + {"$_main<>(SB)", "$_main<>(SB)"}, + {"$argframe(FP)", "$argframe(FP)"}, + {"$~3", "$-4"}, + {"(-288-3*8)(R1)", "-312(R1)"}, + {"(16)(R7)", "16(R7)"}, + {"(8)(g)", "8(g)"}, + {"(R0)", "(R0)"}, + {"(R3)", "(R3)"}, + {"(R4)", "(R4)"}, + {"(R5)", "(R5)"}, + {"-1(R4)", "-1(R4)"}, + {"-1(R5)", "-1(R5)"}, + {"6(PC)", "6(PC)"}, + {"F14", "F14"}, + {"F15", "F15"}, + {"F16", "F16"}, + {"F17", "F17"}, + {"F18", "F18"}, + {"F19", "F19"}, + {"F20", "F20"}, + {"F21", "F21"}, + {"F22", "F22"}, + {"F23", "F23"}, + {"F24", "F24"}, + {"F25", "F25"}, + {"F26", "F26"}, + {"F27", "F27"}, + {"F28", "F28"}, + {"F29", "F29"}, + {"F30", "F30"}, + {"F31", "F31"}, + {"R0", "R0"}, + {"R1", "R1"}, + {"R11", "R11"}, + {"R12", "R12"}, + {"R13", "R13"}, + {"R14", "R14"}, + {"R15", "R15"}, + {"R16", "R16"}, + {"R17", "R17"}, + {"R18", "R18"}, + {"R19", "R19"}, + {"R2", "R2"}, + {"R20", "R20"}, + {"R21", "R21"}, + {"R23", "R23"}, + {"R24", "R24"}, + {"R25", "R25"}, + {"R26", "R26"}, + {"R27", "R27"}, + {"R28", "R28"}, + {"R29", "R29"}, + {"R3", "R3"}, + {"R30", "R30"}, + {"R31", "R31"}, + {"R4", "R4"}, + {"R5", "R5"}, + {"R6", "R6"}, + {"R7", "R7"}, + {"R8", "R8"}, + {"R9", "R9"}, + {"a(FP)", "a(FP)"}, + {"g", "g"}, + {"ret+8(FP)", "ret+8(FP)"}, + {"runtime·abort(SB)", "runtime.abort(SB)"}, + {"·AddUint32(SB)", "pkg.AddUint32(SB)"}, + {"·trunc(SB)", "pkg.trunc(SB)"}, + {"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms. +} + +var s390xOperandTests = []operandTest{ + {"$((1<<63)-1)", "$9223372036854775807"}, + {"$(-64*1024)", "$-65536"}, + {"$(1024 * 8)", "$8192"}, + {"$-1", "$-1"}, + {"$-24(R4)", "$-24(R4)"}, + {"$0", "$0"}, + {"$0(R1)", "$(R1)"}, + {"$0.5", "$(0.5)"}, + {"$0x7000", "$28672"}, + {"$0x88888eef", "$2290650863"}, + {"$1", "$1"}, + {"$_main<>(SB)", "$_main<>(SB)"}, + {"$argframe(FP)", "$argframe(FP)"}, + {"$~3", "$-4"}, + {"(-288-3*8)(R1)", "-312(R1)"}, + {"(16)(R7)", "16(R7)"}, + {"(8)(g)", "8(g)"}, + {"(R0)", "(R0)"}, + {"(R3)", "(R3)"}, + {"(R4)", "(R4)"}, + {"(R5)", "(R5)"}, + {"-1(R4)", "-1(R4)"}, + {"-1(R5)", "-1(R5)"}, + {"6(PC)", "6(PC)"}, + {"R0", "R0"}, + {"R1", "R1"}, + {"R2", "R2"}, + {"R3", "R3"}, + {"R4", "R4"}, + {"R5", "R5"}, + {"R6", "R6"}, + {"R7", "R7"}, + {"R8", "R8"}, + {"R9", "R9"}, + {"R10", "R10"}, + {"R11", "R11"}, + {"R12", "R12"}, + // {"R13", "R13"}, R13 is g + {"R14", "R14"}, + {"R15", "R15"}, + {"F0", "F0"}, + {"F1", "F1"}, + {"F2", "F2"}, + {"F3", "F3"}, + {"F4", "F4"}, + {"F5", "F5"}, + {"F6", "F6"}, + {"F7", "F7"}, + {"F8", "F8"}, + {"F9", "F9"}, + {"F10", "F10"}, + {"F11", "F11"}, + {"F12", "F12"}, + {"F13", "F13"}, + {"F14", "F14"}, + {"F15", "F15"}, + {"V0", "V0"}, + {"V1", "V1"}, + {"V2", "V2"}, + {"V3", "V3"}, + {"V4", "V4"}, + {"V5", "V5"}, + {"V6", "V6"}, + {"V7", "V7"}, + {"V8", "V8"}, + {"V9", "V9"}, + {"V10", "V10"}, + {"V11", "V11"}, + {"V12", "V12"}, + {"V13", "V13"}, + {"V14", "V14"}, + {"V15", "V15"}, + {"V16", "V16"}, + {"V17", "V17"}, + {"V18", "V18"}, + {"V19", "V19"}, + {"V20", "V20"}, + {"V21", "V21"}, + {"V22", "V22"}, + {"V23", "V23"}, + {"V24", "V24"}, + {"V25", "V25"}, + {"V26", "V26"}, + {"V27", "V27"}, + {"V28", "V28"}, + {"V29", "V29"}, + {"V30", "V30"}, + {"V31", "V31"}, + {"a(FP)", "a(FP)"}, + {"g", "g"}, + {"ret+8(FP)", "ret+8(FP)"}, + {"runtime·abort(SB)", "runtime.abort(SB)"}, + {"·AddUint32(SB)", "pkg.AddUint32(SB)"}, + {"·trunc(SB)", "pkg.trunc(SB)"}, + {"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms. +} diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/parse.go b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/parse.go new file mode 100644 index 0000000000000000000000000000000000000000..ef6c840dc27c40a9bd275b4557923a94a6acc090 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/parse.go @@ -0,0 +1,1469 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package asm implements the parser and instruction generator for the assembler. +// TODO: Split apart? +package asm + +import ( + "fmt" + "io" + "log" + "os" + "strconv" + "strings" + "text/scanner" + "unicode/utf8" + + "cmd/asm/internal/arch" + "cmd/asm/internal/flags" + "cmd/asm/internal/lex" + "cmd/internal/obj" + "cmd/internal/obj/arm64" + "cmd/internal/obj/x86" + "cmd/internal/objabi" + "cmd/internal/src" + "cmd/internal/sys" +) + +type Parser struct { + lex lex.TokenReader + lineNum int // Line number in source file. + errorLine int // Line number of last error. + errorCount int // Number of errors. + sawCode bool // saw code in this file (as opposed to comments and blank lines) + pc int64 // virtual PC; count of Progs; doesn't advance for GLOBL or DATA. + input []lex.Token + inputPos int + pendingLabels []string // Labels to attach to next instruction. + labels map[string]*obj.Prog + toPatch []Patch + addr []obj.Addr + arch *arch.Arch + ctxt *obj.Link + firstProg *obj.Prog + lastProg *obj.Prog + dataAddr map[string]int64 // Most recent address for DATA for this symbol. + isJump bool // Instruction being assembled is a jump. + allowABI bool // Whether ABI selectors are allowed. + pkgPrefix string // Prefix to add to local symbols. + errorWriter io.Writer +} + +type Patch struct { + addr *obj.Addr + label string +} + +func NewParser(ctxt *obj.Link, ar *arch.Arch, lexer lex.TokenReader) *Parser { + pkgPrefix := obj.UnlinkablePkg + if ctxt != nil { + pkgPrefix = objabi.PathToPrefix(ctxt.Pkgpath) + } + return &Parser{ + ctxt: ctxt, + arch: ar, + lex: lexer, + labels: make(map[string]*obj.Prog), + dataAddr: make(map[string]int64), + errorWriter: os.Stderr, + allowABI: ctxt != nil && objabi.LookupPkgSpecial(ctxt.Pkgpath).AllowAsmABI, + pkgPrefix: pkgPrefix, + } +} + +// panicOnError is enabled when testing to abort execution on the first error +// and turn it into a recoverable panic. +var panicOnError bool + +func (p *Parser) errorf(format string, args ...interface{}) { + if panicOnError { + panic(fmt.Errorf(format, args...)) + } + if p.lineNum == p.errorLine { + // Only one error per line. + return + } + p.errorLine = p.lineNum + if p.lex != nil { + // Put file and line information on head of message. + format = "%s:%d: " + format + "\n" + args = append([]interface{}{p.lex.File(), p.lineNum}, args...) + } + fmt.Fprintf(p.errorWriter, format, args...) + p.errorCount++ + if p.errorCount > 10 && !*flags.AllErrors { + log.Fatal("too many errors") + } +} + +func (p *Parser) pos() src.XPos { + return p.ctxt.PosTable.XPos(src.MakePos(p.lex.Base(), uint(p.lineNum), 0)) +} + +func (p *Parser) Parse() (*obj.Prog, bool) { + scratch := make([][]lex.Token, 0, 3) + for { + word, cond, operands, ok := p.line(scratch) + if !ok { + break + } + scratch = operands + + if p.pseudo(word, operands) { + continue + } + i, present := p.arch.Instructions[word] + if present { + p.instruction(i, word, cond, operands) + continue + } + p.errorf("unrecognized instruction %q", word) + } + if p.errorCount > 0 { + return nil, false + } + p.patch() + return p.firstProg, true +} + +// ParseSymABIs parses p's assembly code to find text symbol +// definitions and references and writes a symabis file to w. +func (p *Parser) ParseSymABIs(w io.Writer) bool { + operands := make([][]lex.Token, 0, 3) + for { + word, _, operands1, ok := p.line(operands) + if !ok { + break + } + operands = operands1 + + p.symDefRef(w, word, operands) + } + return p.errorCount == 0 +} + +// nextToken returns the next non-build-comment token from the lexer. +// It reports misplaced //go:build comments but otherwise discards them. +func (p *Parser) nextToken() lex.ScanToken { + for { + tok := p.lex.Next() + if tok == lex.BuildComment { + if p.sawCode { + p.errorf("misplaced //go:build comment") + } + continue + } + if tok != '\n' { + p.sawCode = true + } + if tok == '#' { + // A leftover wisp of a #include/#define/etc, + // to let us know that p.sawCode should be true now. + // Otherwise ignored. + continue + } + return tok + } +} + +// line consumes a single assembly line from p.lex of the form +// +// {label:} WORD[.cond] [ arg {, arg} ] (';' | '\n') +// +// It adds any labels to p.pendingLabels and returns the word, cond, +// operand list, and true. If there is an error or EOF, it returns +// ok=false. +// +// line may reuse the memory from scratch. +func (p *Parser) line(scratch [][]lex.Token) (word, cond string, operands [][]lex.Token, ok bool) { +next: + // Skip newlines. + var tok lex.ScanToken + for { + tok = p.nextToken() + // We save the line number here so error messages from this instruction + // are labeled with this line. Otherwise we complain after we've absorbed + // the terminating newline and the line numbers are off by one in errors. + p.lineNum = p.lex.Line() + switch tok { + case '\n', ';': + continue + case scanner.EOF: + return "", "", nil, false + } + break + } + // First item must be an identifier. + if tok != scanner.Ident { + p.errorf("expected identifier, found %q", p.lex.Text()) + return "", "", nil, false // Might as well stop now. + } + word, cond = p.lex.Text(), "" + operands = scratch[:0] + // Zero or more comma-separated operands, one per loop. + nesting := 0 + colon := -1 + for tok != '\n' && tok != ';' { + // Process one operand. + var items []lex.Token + if cap(operands) > len(operands) { + // Reuse scratch items slice. + items = operands[:cap(operands)][len(operands)][:0] + } else { + items = make([]lex.Token, 0, 3) + } + for { + tok = p.nextToken() + if len(operands) == 0 && len(items) == 0 { + if p.arch.InFamily(sys.ARM, sys.ARM64, sys.AMD64, sys.I386) && tok == '.' { + // Suffixes: ARM conditionals or x86 modifiers. + tok = p.nextToken() + str := p.lex.Text() + if tok != scanner.Ident { + p.errorf("instruction suffix expected identifier, found %s", str) + } + cond = cond + "." + str + continue + } + if tok == ':' { + // Labels. + p.pendingLabels = append(p.pendingLabels, word) + goto next + } + } + if tok == scanner.EOF { + p.errorf("unexpected EOF") + return "", "", nil, false + } + // Split operands on comma. Also, the old syntax on x86 for a "register pair" + // was AX:DX, for which the new syntax is DX, AX. Note the reordering. + if tok == '\n' || tok == ';' || (nesting == 0 && (tok == ',' || tok == ':')) { + if tok == ':' { + // Remember this location so we can swap the operands below. + if colon >= 0 { + p.errorf("invalid ':' in operand") + return word, cond, operands, true + } + colon = len(operands) + } + break + } + if tok == '(' || tok == '[' { + nesting++ + } + if tok == ')' || tok == ']' { + nesting-- + } + items = append(items, lex.Make(tok, p.lex.Text())) + } + if len(items) > 0 { + operands = append(operands, items) + if colon >= 0 && len(operands) == colon+2 { + // AX:DX becomes DX, AX. + operands[colon], operands[colon+1] = operands[colon+1], operands[colon] + colon = -1 + } + } else if len(operands) > 0 || tok == ',' || colon >= 0 { + // Had a separator with nothing after. + p.errorf("missing operand") + } + } + return word, cond, operands, true +} + +func (p *Parser) instruction(op obj.As, word, cond string, operands [][]lex.Token) { + p.addr = p.addr[0:0] + p.isJump = p.arch.IsJump(word) + for _, op := range operands { + addr := p.address(op) + if !p.isJump && addr.Reg < 0 { // Jumps refer to PC, a pseudo. + p.errorf("illegal use of pseudo-register in %s", word) + } + p.addr = append(p.addr, addr) + } + if p.isJump { + p.asmJump(op, cond, p.addr) + return + } + p.asmInstruction(op, cond, p.addr) +} + +func (p *Parser) pseudo(word string, operands [][]lex.Token) bool { + switch word { + case "DATA": + p.asmData(operands) + case "FUNCDATA": + p.asmFuncData(operands) + case "GLOBL": + p.asmGlobl(operands) + case "PCDATA": + p.asmPCData(operands) + case "PCALIGN": + p.asmPCAlign(operands) + case "TEXT": + p.asmText(operands) + default: + return false + } + return true +} + +// symDefRef scans a line for potential text symbol definitions and +// references and writes symabis information to w. +// +// The symabis format is documented at +// cmd/compile/internal/ssagen.ReadSymABIs. +func (p *Parser) symDefRef(w io.Writer, word string, operands [][]lex.Token) { + switch word { + case "TEXT": + // Defines text symbol in operands[0]. + if len(operands) > 0 { + p.start(operands[0]) + if name, abi, ok := p.funcAddress(); ok { + fmt.Fprintf(w, "def %s %s\n", name, abi) + } + } + return + case "GLOBL", "PCDATA": + // No text definitions or symbol references. + case "DATA", "FUNCDATA": + // For DATA, operands[0] is defined symbol. + // For FUNCDATA, operands[0] is an immediate constant. + // Remaining operands may have references. + if len(operands) < 2 { + return + } + operands = operands[1:] + } + // Search for symbol references. + for _, op := range operands { + p.start(op) + if name, abi, ok := p.funcAddress(); ok { + fmt.Fprintf(w, "ref %s %s\n", name, abi) + } + } +} + +func (p *Parser) start(operand []lex.Token) { + p.input = operand + p.inputPos = 0 +} + +// address parses the operand into a link address structure. +func (p *Parser) address(operand []lex.Token) obj.Addr { + p.start(operand) + addr := obj.Addr{} + p.operand(&addr) + return addr +} + +// parseScale converts a decimal string into a valid scale factor. +func (p *Parser) parseScale(s string) int8 { + switch s { + case "1", "2", "4", "8": + return int8(s[0] - '0') + } + p.errorf("bad scale: %s", s) + return 0 +} + +// operand parses a general operand and stores the result in *a. +func (p *Parser) operand(a *obj.Addr) { + //fmt.Printf("Operand: %v\n", p.input) + if len(p.input) == 0 { + p.errorf("empty operand: cannot happen") + return + } + // General address (with a few exceptions) looks like + // $sym±offset(SB)(reg)(index*scale) + // Exceptions are: + // + // R1 + // offset + // $offset + // Every piece is optional, so we scan left to right and what + // we discover tells us where we are. + + // Prefix: $. + var prefix rune + switch tok := p.peek(); tok { + case '$', '*': + prefix = rune(tok) + p.next() + } + + // Symbol: sym±offset(SB) + tok := p.next() + name := tok.String() + if tok.ScanToken == scanner.Ident && !p.atStartOfRegister(name) { + switch p.arch.Family { + case sys.ARM64: + // arm64 special operands. + if opd := arch.GetARM64SpecialOperand(name); opd != arm64.SPOP_END { + a.Type = obj.TYPE_SPECIAL + a.Offset = int64(opd) + break + } + fallthrough + default: + // We have a symbol. Parse $sym±offset(symkind) + p.symbolReference(a, p.qualifySymbol(name), prefix) + } + // fmt.Printf("SYM %s\n", obj.Dconv(&emptyProg, 0, a)) + if p.peek() == scanner.EOF { + return + } + } + + // Special register list syntax for arm: [R1,R3-R7] + if tok.ScanToken == '[' { + if prefix != 0 { + p.errorf("illegal use of register list") + } + p.registerList(a) + p.expectOperandEnd() + return + } + + // Register: R1 + if tok.ScanToken == scanner.Ident && p.atStartOfRegister(name) { + if p.atRegisterShift() { + // ARM shifted register such as R1<>2. + a.Type = obj.TYPE_SHIFT + a.Offset = p.registerShift(tok.String(), prefix) + if p.peek() == '(' { + // Can only be a literal register here. + p.next() + tok := p.next() + name := tok.String() + if !p.atStartOfRegister(name) { + p.errorf("expected register; found %s", name) + } + a.Reg, _ = p.registerReference(name) + p.get(')') + } + } else if p.atRegisterExtension() { + a.Type = obj.TYPE_REG + p.registerExtension(a, tok.String(), prefix) + p.expectOperandEnd() + return + } else if r1, r2, scale, ok := p.register(tok.String(), prefix); ok { + if scale != 0 { + p.errorf("expected simple register reference") + } + a.Type = obj.TYPE_REG + a.Reg = r1 + if r2 != 0 { + // Form is R1:R2. It is on RHS and the second register + // needs to go into the LHS. + panic("cannot happen (Addr.Reg2)") + } + } + // fmt.Printf("REG %s\n", obj.Dconv(&emptyProg, 0, a)) + p.expectOperandEnd() + return + } + + // Constant. + haveConstant := false + switch tok.ScanToken { + case scanner.Int, scanner.Float, scanner.String, scanner.Char, '+', '-', '~': + haveConstant = true + case '(': + // Could be parenthesized expression or (R). Must be something, though. + tok := p.next() + if tok.ScanToken == scanner.EOF { + p.errorf("missing right parenthesis") + return + } + rname := tok.String() + p.back() + haveConstant = !p.atStartOfRegister(rname) + if !haveConstant { + p.back() // Put back the '('. + } + } + if haveConstant { + p.back() + if p.have(scanner.Float) { + if prefix != '$' { + p.errorf("floating-point constant must be an immediate") + } + a.Type = obj.TYPE_FCONST + a.Val = p.floatExpr() + // fmt.Printf("FCONST %s\n", obj.Dconv(&emptyProg, 0, a)) + p.expectOperandEnd() + return + } + if p.have(scanner.String) { + if prefix != '$' { + p.errorf("string constant must be an immediate") + return + } + str, err := strconv.Unquote(p.get(scanner.String).String()) + if err != nil { + p.errorf("string parse error: %s", err) + } + a.Type = obj.TYPE_SCONST + a.Val = str + // fmt.Printf("SCONST %s\n", obj.Dconv(&emptyProg, 0, a)) + p.expectOperandEnd() + return + } + a.Offset = int64(p.expr()) + if p.peek() != '(' { + switch prefix { + case '$': + a.Type = obj.TYPE_CONST + case '*': + a.Type = obj.TYPE_INDIR // Can appear but is illegal, will be rejected by the linker. + default: + a.Type = obj.TYPE_MEM + } + // fmt.Printf("CONST %d %s\n", a.Offset, obj.Dconv(&emptyProg, 0, a)) + p.expectOperandEnd() + return + } + // fmt.Printf("offset %d \n", a.Offset) + } + + // Register indirection: (reg) or (index*scale). We are on the opening paren. + p.registerIndirect(a, prefix) + // fmt.Printf("DONE %s\n", p.arch.Dconv(&emptyProg, 0, a)) + + p.expectOperandEnd() + return +} + +// atStartOfRegister reports whether the parser is at the start of a register definition. +func (p *Parser) atStartOfRegister(name string) bool { + // Simple register: R10. + _, present := p.arch.Register[name] + if present { + return true + } + // Parenthesized register: R(10). + return p.arch.RegisterPrefix[name] && p.peek() == '(' +} + +// atRegisterShift reports whether we are at the start of an ARM shifted register. +// We have consumed the register or R prefix. +func (p *Parser) atRegisterShift() bool { + // ARM only. + if !p.arch.InFamily(sys.ARM, sys.ARM64) { + return false + } + // R1<<... + if lex.IsRegisterShift(p.peek()) { + return true + } + // R(1)<<... Ugly check. TODO: Rethink how we handle ARM register shifts to be + // less special. + if p.peek() != '(' || len(p.input)-p.inputPos < 4 { + return false + } + return p.at('(', scanner.Int, ')') && lex.IsRegisterShift(p.input[p.inputPos+3].ScanToken) +} + +// atRegisterExtension reports whether we are at the start of an ARM64 extended register. +// We have consumed the register or R prefix. +func (p *Parser) atRegisterExtension() bool { + // ARM64 only. + if p.arch.Family != sys.ARM64 { + return false + } + // R1.xxx + return p.peek() == '.' +} + +// registerReference parses a register given either the name, R10, or a parenthesized form, SPR(10). +func (p *Parser) registerReference(name string) (int16, bool) { + r, present := p.arch.Register[name] + if present { + return r, true + } + if !p.arch.RegisterPrefix[name] { + p.errorf("expected register; found %s", name) + return 0, false + } + p.get('(') + tok := p.get(scanner.Int) + num, err := strconv.ParseInt(tok.String(), 10, 16) + p.get(')') + if err != nil { + p.errorf("parsing register list: %s", err) + return 0, false + } + r, ok := p.arch.RegisterNumber(name, int16(num)) + if !ok { + p.errorf("illegal register %s(%d)", name, r) + return 0, false + } + return r, true +} + +// register parses a full register reference where there is no symbol present (as in 4(R0) or R(10) but not sym(SB)) +// including forms involving multiple registers such as R1:R2. +func (p *Parser) register(name string, prefix rune) (r1, r2 int16, scale int8, ok bool) { + // R1 or R(1) R1:R2 R1,R2 R1+R2, or R1*scale. + r1, ok = p.registerReference(name) + if !ok { + return + } + if prefix != 0 && prefix != '*' { // *AX is OK. + p.errorf("prefix %c not allowed for register: %c%s", prefix, prefix, name) + } + c := p.peek() + if c == ':' || c == ',' || c == '+' { + // 2nd register; syntax (R1+R2) etc. No two architectures agree. + // Check the architectures match the syntax. + switch p.next().ScanToken { + case ',': + if !p.arch.InFamily(sys.ARM, sys.ARM64) { + p.errorf("(register,register) not supported on this architecture") + return + } + case '+': + if p.arch.Family != sys.PPC64 { + p.errorf("(register+register) not supported on this architecture") + return + } + } + name := p.next().String() + r2, ok = p.registerReference(name) + if !ok { + return + } + } + if p.peek() == '*' { + // Scale + p.next() + scale = p.parseScale(p.next().String()) + } + return r1, r2, scale, true +} + +// registerShift parses an ARM/ARM64 shifted register reference and returns the encoded representation. +// There is known to be a register (current token) and a shift operator (peeked token). +func (p *Parser) registerShift(name string, prefix rune) int64 { + if prefix != 0 { + p.errorf("prefix %c not allowed for shifted register: $%s", prefix, name) + } + // R1 op R2 or r1 op constant. + // op is: + // "<<" == 0 + // ">>" == 1 + // "->" == 2 + // "@>" == 3 + r1, ok := p.registerReference(name) + if !ok { + return 0 + } + var op int16 + switch p.next().ScanToken { + case lex.LSH: + op = 0 + case lex.RSH: + op = 1 + case lex.ARR: + op = 2 + case lex.ROT: + // following instructions on ARM64 support rotate right + // AND, ANDS, TST, BIC, BICS, EON, EOR, ORR, MVN, ORN + op = 3 + } + tok := p.next() + str := tok.String() + var count int16 + switch tok.ScanToken { + case scanner.Ident: + if p.arch.Family == sys.ARM64 { + p.errorf("rhs of shift must be integer: %s", str) + } else { + r2, ok := p.registerReference(str) + if !ok { + p.errorf("rhs of shift must be register or integer: %s", str) + } + count = (r2&15)<<8 | 1<<4 + } + case scanner.Int, '(': + p.back() + x := int64(p.expr()) + if p.arch.Family == sys.ARM64 { + if x >= 64 { + p.errorf("register shift count too large: %s", str) + } + count = int16((x & 63) << 10) + } else { + if x >= 32 { + p.errorf("register shift count too large: %s", str) + } + count = int16((x & 31) << 7) + } + default: + p.errorf("unexpected %s in register shift", tok.String()) + } + if p.arch.Family == sys.ARM64 { + off, err := arch.ARM64RegisterShift(r1, op, count) + if err != nil { + p.errorf(err.Error()) + } + return off + } else { + return int64((r1 & 15) | op<<5 | count) + } +} + +// registerExtension parses a register with extension or arrangement. +// There is known to be a register (current token) and an extension operator (peeked token). +func (p *Parser) registerExtension(a *obj.Addr, name string, prefix rune) { + if prefix != 0 { + p.errorf("prefix %c not allowed for shifted register: $%s", prefix, name) + } + + reg, ok := p.registerReference(name) + if !ok { + p.errorf("unexpected %s in register extension", name) + return + } + + isIndex := false + num := int16(0) + isAmount := true // Amount is zero by default + ext := "" + if p.peek() == lex.LSH { + // (Rn)(Rm<<2), the shifted offset register. + ext = "LSL" + } else { + // (Rn)(Rm.UXTW<1), the extended offset register. + // Rm.UXTW<<3, the extended register. + p.get('.') + tok := p.next() + ext = tok.String() + } + if p.peek() == lex.LSH { + // parses left shift amount applied after extension: < (indicates a static symbol) or + // (selecting text symbol with specific ABI). + doIssueError := true + isStatic, abi := p.symRefAttrs(name, doIssueError) + + if p.peek() == '+' || p.peek() == '-' { + a.Offset = int64(p.expr()) + } + if isStatic { + a.Sym = p.ctxt.LookupStatic(name) + } else { + a.Sym = p.ctxt.LookupABI(name, abi) + } + if p.peek() == scanner.EOF { + if prefix == 0 && p.isJump { + // Symbols without prefix or suffix are jump labels. + return + } + p.errorf("illegal or missing addressing mode for symbol %s", name) + return + } + // Expect (SB), (FP), (PC), or (SP) + p.get('(') + reg := p.get(scanner.Ident).String() + p.get(')') + p.setPseudoRegister(a, reg, isStatic, prefix) +} + +// setPseudoRegister sets the NAME field of addr for a pseudo-register reference such as (SB). +func (p *Parser) setPseudoRegister(addr *obj.Addr, reg string, isStatic bool, prefix rune) { + if addr.Reg != 0 { + p.errorf("internal error: reg %s already set in pseudo", reg) + } + switch reg { + case "FP": + addr.Name = obj.NAME_PARAM + case "PC": + if prefix != 0 { + p.errorf("illegal addressing mode for PC") + } + addr.Type = obj.TYPE_BRANCH // We set the type and leave NAME untouched. See asmJump. + case "SB": + addr.Name = obj.NAME_EXTERN + if isStatic { + addr.Name = obj.NAME_STATIC + } + case "SP": + addr.Name = obj.NAME_AUTO // The pseudo-stack. + default: + p.errorf("expected pseudo-register; found %s", reg) + } + if prefix == '$' { + addr.Type = obj.TYPE_ADDR + } +} + +// symRefAttrs parses an optional function symbol attribute clause for +// the function symbol 'name', logging an error for a malformed +// attribute clause if 'issueError' is true. The return value is a +// (boolean, ABI) pair indicating that the named symbol is either +// static or a particular ABI specification. +// +// The expected form of the attribute clause is: +// +// empty, yielding (false, obj.ABI0) +// "<>", yielding (true, obj.ABI0) +// "" yielding (false, obj.ABI0) +// "" yielding (false, obj.ABIInternal) +// +// Anything else beginning with "<" logs an error if issueError is +// true, otherwise returns (false, obj.ABI0). +func (p *Parser) symRefAttrs(name string, issueError bool) (bool, obj.ABI) { + abi := obj.ABI0 + isStatic := false + if p.peek() != '<' { + return isStatic, abi + } + p.next() + tok := p.peek() + if tok == '>' { + isStatic = true + } else if tok == scanner.Ident { + abistr := p.get(scanner.Ident).String() + if !p.allowABI { + if issueError { + p.errorf("ABI selector only permitted when compiling runtime, reference was to %q", name) + } + } else { + theabi, valid := obj.ParseABI(abistr) + if !valid { + if issueError { + p.errorf("malformed ABI selector %q in reference to %q", + abistr, name) + } + } else { + abi = theabi + } + } + } + p.get('>') + return isStatic, abi +} + +// funcAddress parses an external function address. This is a +// constrained form of the operand syntax that's always SB-based, +// non-static, and has at most a simple integer offset: +// +// [$|*]sym[][+Int](SB) +func (p *Parser) funcAddress() (string, obj.ABI, bool) { + switch p.peek() { + case '$', '*': + // Skip prefix. + p.next() + } + + tok := p.next() + name := tok.String() + if tok.ScanToken != scanner.Ident || p.atStartOfRegister(name) { + return "", obj.ABI0, false + } + name = p.qualifySymbol(name) + // Parse optional <> (indicates a static symbol) or + // (selecting text symbol with specific ABI). + noErrMsg := false + isStatic, abi := p.symRefAttrs(name, noErrMsg) + if isStatic { + return "", obj.ABI0, false // This function rejects static symbols. + } + tok = p.next() + if tok.ScanToken == '+' { + if p.next().ScanToken != scanner.Int { + return "", obj.ABI0, false + } + tok = p.next() + } + if tok.ScanToken != '(' { + return "", obj.ABI0, false + } + if reg := p.next(); reg.ScanToken != scanner.Ident || reg.String() != "SB" { + return "", obj.ABI0, false + } + if p.next().ScanToken != ')' || p.peek() != scanner.EOF { + return "", obj.ABI0, false + } + return name, abi, true +} + +// registerIndirect parses the general form of a register indirection. +// It can be (R1), (R2*scale), (R1)(R2*scale), (R1)(R2.SXTX<<3) or (R1)(R2<<3) +// where R1 may be a simple register or register pair R:R or (R, R) or (R+R). +// Or it might be a pseudo-indirection like (FP). +// We are sitting on the opening parenthesis. +func (p *Parser) registerIndirect(a *obj.Addr, prefix rune) { + p.get('(') + tok := p.next() + name := tok.String() + r1, r2, scale, ok := p.register(name, 0) + if !ok { + p.errorf("indirect through non-register %s", tok) + } + p.get(')') + a.Type = obj.TYPE_MEM + if r1 < 0 { + // Pseudo-register reference. + if r2 != 0 { + p.errorf("cannot use pseudo-register in pair") + return + } + // For SB, SP, and FP, there must be a name here. 0(FP) is not legal. + if name != "PC" && a.Name == obj.NAME_NONE { + p.errorf("cannot reference %s without a symbol", name) + } + p.setPseudoRegister(a, name, false, prefix) + return + } + a.Reg = r1 + if r2 != 0 { + // TODO: Consistency in the encoding would be nice here. + if p.arch.InFamily(sys.ARM, sys.ARM64) { + // Special form + // ARM: destination register pair (R1, R2). + // ARM64: register pair (R1, R2) for LDP/STP. + if prefix != 0 || scale != 0 { + p.errorf("illegal address mode for register pair") + return + } + a.Type = obj.TYPE_REGREG + a.Offset = int64(r2) + // Nothing may follow + return + } + if p.arch.Family == sys.PPC64 { + // Special form for PPC64: (R1+R2); alias for (R1)(R2). + if prefix != 0 || scale != 0 { + p.errorf("illegal address mode for register+register") + return + } + a.Type = obj.TYPE_MEM + a.Scale = 0 + a.Index = r2 + // Nothing may follow. + return + } + } + if r2 != 0 { + p.errorf("indirect through register pair") + } + if prefix == '$' { + a.Type = obj.TYPE_ADDR + } + if r1 == arch.RPC && prefix != 0 { + p.errorf("illegal addressing mode for PC") + } + if scale == 0 && p.peek() == '(' { + // General form (R)(R*scale). + p.next() + tok := p.next() + if p.atRegisterExtension() { + p.registerExtension(a, tok.String(), prefix) + } else if p.atRegisterShift() { + // (R1)(R2<<3) + p.registerExtension(a, tok.String(), prefix) + } else { + r1, r2, scale, ok = p.register(tok.String(), 0) + if !ok { + p.errorf("indirect through non-register %s", tok) + } + if r2 != 0 { + p.errorf("unimplemented two-register form") + } + a.Index = r1 + if scale != 0 && scale != 1 && (p.arch.Family == sys.ARM64 || + p.arch.Family == sys.PPC64) { + // Support (R1)(R2) (no scaling) and (R1)(R2*1). + p.errorf("%s doesn't support scaled register format", p.arch.Name) + } else { + a.Scale = int16(scale) + } + } + p.get(')') + } else if scale != 0 { + if p.arch.Family == sys.ARM64 { + p.errorf("arm64 doesn't support scaled register format") + } + // First (R) was missing, all we have is (R*scale). + a.Reg = 0 + a.Index = r1 + a.Scale = int16(scale) + } +} + +// registerList parses an ARM or ARM64 register list expression, a list of +// registers in []. There may be comma-separated ranges or individual +// registers, as in [R1,R3-R5] or [V1.S4, V2.S4, V3.S4, V4.S4]. +// For ARM, only R0 through R15 may appear. +// For ARM64, V0 through V31 with arrangement may appear. +// +// For 386/AMD64 register list specifies 4VNNIW-style multi-source operand. +// For range of 4 elements, Intel manual uses "+3" notation, for example: +// +// VP4DPWSSDS zmm1{k1}{z}, zmm2+3, m128 +// +// Given asm line: +// +// VP4DPWSSDS Z5, [Z10-Z13], (AX) +// +// zmm2 is Z10, and Z13 is the only valid value for it (Z10+3). +// Only simple ranges are accepted, like [Z0-Z3]. +// +// The opening bracket has been consumed. +func (p *Parser) registerList(a *obj.Addr) { + if p.arch.InFamily(sys.I386, sys.AMD64) { + p.registerListX86(a) + } else { + p.registerListARM(a) + } +} + +func (p *Parser) registerListARM(a *obj.Addr) { + // One range per loop. + var maxReg int + var bits uint16 + var arrangement int64 + switch p.arch.Family { + case sys.ARM: + maxReg = 16 + case sys.ARM64: + maxReg = 32 + default: + p.errorf("unexpected register list") + } + firstReg := -1 + nextReg := -1 + regCnt := 0 +ListLoop: + for { + tok := p.next() + switch tok.ScanToken { + case ']': + break ListLoop + case scanner.EOF: + p.errorf("missing ']' in register list") + return + } + switch p.arch.Family { + case sys.ARM64: + // Vn.T + name := tok.String() + r, ok := p.registerReference(name) + if !ok { + p.errorf("invalid register: %s", name) + } + reg := r - p.arch.Register["V0"] + p.get('.') + tok := p.next() + ext := tok.String() + curArrangement, err := arch.ARM64RegisterArrangement(reg, name, ext) + if err != nil { + p.errorf(err.Error()) + } + if firstReg == -1 { + // only record the first register and arrangement + firstReg = int(reg) + nextReg = firstReg + arrangement = curArrangement + } else if curArrangement != arrangement { + p.errorf("inconsistent arrangement in ARM64 register list") + } else if nextReg != int(reg) { + p.errorf("incontiguous register in ARM64 register list: %s", name) + } + regCnt++ + nextReg = (nextReg + 1) % 32 + case sys.ARM: + // Parse the upper and lower bounds. + lo := p.registerNumber(tok.String()) + hi := lo + if p.peek() == '-' { + p.next() + hi = p.registerNumber(p.next().String()) + } + if hi < lo { + lo, hi = hi, lo + } + // Check there are no duplicates in the register list. + for i := 0; lo <= hi && i < maxReg; i++ { + if bits&(1<>' | '<<' | '&') factor +func (p *Parser) term() uint64 { + value := p.factor() + for { + switch p.peek() { + case '*': + p.next() + value *= p.factor() + case '/': + p.next() + if int64(value) < 0 { + p.errorf("divide of value with high bit set") + } + divisor := p.factor() + if divisor == 0 { + p.errorf("division by zero") + } else { + value /= divisor + } + case '%': + p.next() + divisor := p.factor() + if int64(value) < 0 { + p.errorf("modulo of value with high bit set") + } + if divisor == 0 { + p.errorf("modulo by zero") + } else { + value %= divisor + } + case lex.LSH: + p.next() + shift := p.factor() + if int64(shift) < 0 { + p.errorf("negative left shift count") + } + return value << shift + case lex.RSH: + p.next() + shift := p.term() + if int64(shift) < 0 { + p.errorf("negative right shift count") + } + if int64(value) < 0 { + p.errorf("right shift of value with high bit set") + } + value >>= shift + case '&': + p.next() + value &= p.factor() + default: + return value + } + } +} + +// factor = const | '+' factor | '-' factor | '~' factor | '(' expr ')' +func (p *Parser) factor() uint64 { + tok := p.next() + switch tok.ScanToken { + case scanner.Int: + return p.atoi(tok.String()) + case scanner.Char: + str, err := strconv.Unquote(tok.String()) + if err != nil { + p.errorf("%s", err) + } + r, w := utf8.DecodeRuneInString(str) + if w == 1 && r == utf8.RuneError { + p.errorf("illegal UTF-8 encoding for character constant") + } + return uint64(r) + case '+': + return +p.factor() + case '-': + return -p.factor() + case '~': + return ^p.factor() + case '(': + v := p.expr() + if p.next().ScanToken != ')' { + p.errorf("missing closing paren") + } + return v + } + p.errorf("unexpected %s evaluating expression", tok) + return 0 +} + +// positiveAtoi returns an int64 that must be >= 0. +func (p *Parser) positiveAtoi(str string) int64 { + value, err := strconv.ParseInt(str, 0, 64) + if err != nil { + p.errorf("%s", err) + } + if value < 0 { + p.errorf("%s overflows int64", str) + } + return value +} + +func (p *Parser) atoi(str string) uint64 { + value, err := strconv.ParseUint(str, 0, 64) + if err != nil { + p.errorf("%s", err) + } + return value +} + +func (p *Parser) atof(str string) float64 { + value, err := strconv.ParseFloat(str, 64) + if err != nil { + p.errorf("%s", err) + } + return value +} + +// EOF represents the end of input. +var EOF = lex.Make(scanner.EOF, "EOF") + +func (p *Parser) next() lex.Token { + if !p.more() { + return EOF + } + tok := p.input[p.inputPos] + p.inputPos++ + return tok +} + +func (p *Parser) back() { + if p.inputPos == 0 { + p.errorf("internal error: backing up before BOL") + } else { + p.inputPos-- + } +} + +func (p *Parser) peek() lex.ScanToken { + if p.more() { + return p.input[p.inputPos].ScanToken + } + return scanner.EOF +} + +func (p *Parser) more() bool { + return p.inputPos < len(p.input) +} + +// get verifies that the next item has the expected type and returns it. +func (p *Parser) get(expected lex.ScanToken) lex.Token { + p.expect(expected, expected.String()) + return p.next() +} + +// expectOperandEnd verifies that the parsing state is properly at the end of an operand. +func (p *Parser) expectOperandEnd() { + p.expect(scanner.EOF, "end of operand") +} + +// expect verifies that the next item has the expected type. It does not consume it. +func (p *Parser) expect(expectedToken lex.ScanToken, expectedMessage string) { + if p.peek() != expectedToken { + p.errorf("expected %s, found %s", expectedMessage, p.next()) + } +} + +// have reports whether the remaining tokens (including the current one) contain the specified token. +func (p *Parser) have(token lex.ScanToken) bool { + for i := p.inputPos; i < len(p.input); i++ { + if p.input[i].ScanToken == token { + return true + } + } + return false +} + +// at reports whether the next tokens are as requested. +func (p *Parser) at(next ...lex.ScanToken) bool { + if len(p.input)-p.inputPos < len(next) { + return false + } + for i, r := range next { + if p.input[p.inputPos+i].ScanToken != r { + return false + } + } + return true +} diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/pseudo_test.go b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/pseudo_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b9be6a7b2db5b99726031f46aad0449cf0bb2497 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/pseudo_test.go @@ -0,0 +1,102 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package asm + +import ( + "strings" + "testing" + + "cmd/asm/internal/lex" +) + +func tokenize(s string) [][]lex.Token { + res := [][]lex.Token{} + if len(s) == 0 { + return res + } + for _, o := range strings.Split(s, ",") { + res = append(res, lex.Tokenize(o)) + } + return res +} + +func TestErroneous(t *testing.T) { + + type errtest struct { + pseudo string + operands string + expected string + } + + nonRuntimeTests := []errtest{ + {"TEXT", "", "expect two or three operands for TEXT"}, + {"TEXT", "%", "expect two or three operands for TEXT"}, + {"TEXT", "1, 1", "TEXT symbol \"\" must be a symbol(SB)"}, + {"TEXT", "$\"foo\", 0, $1", "TEXT symbol \"\" must be a symbol(SB)"}, + {"TEXT", "$0É:0, 0, $1", "expected end of operand, found É"}, // Issue #12467. + {"TEXT", "$:0:(SB, 0, $1", "expected '(', found 0"}, // Issue 12468. + {"TEXT", "@B(SB),0,$0", "expected '(', found B"}, // Issue 23580. + {"TEXT", "foo(SB),0", "ABI selector only permitted when compiling runtime, reference was to \"foo\""}, + {"FUNCDATA", "", "expect two operands for FUNCDATA"}, + {"FUNCDATA", "(SB ", "expect two operands for FUNCDATA"}, + {"DATA", "", "expect two operands for DATA"}, + {"DATA", "0", "expect two operands for DATA"}, + {"DATA", "(0), 1", "expect /size for DATA argument"}, + {"DATA", "@B(SB)/4,0", "expected '(', found B"}, // Issue 23580. + {"DATA", "·A(SB)/4,0", "DATA value must be an immediate constant or address"}, + {"DATA", "·B(SB)/4,$0", ""}, + {"DATA", "·C(SB)/5,$0", "bad int size for DATA argument: 5"}, + {"DATA", "·D(SB)/5,$0.0", "bad float size for DATA argument: 5"}, + {"DATA", "·E(SB)/4,$·A(SB)", "bad addr size for DATA argument: 4"}, + {"DATA", "·F(SB)/8,$·A(SB)", ""}, + {"DATA", "·G(SB)/5,$\"abcde\"", ""}, + {"GLOBL", "", "expect two or three operands for GLOBL"}, + {"GLOBL", "0,1", "GLOBL symbol \"\" must be a symbol(SB)"}, + {"GLOBL", "@B(SB), 0", "expected '(', found B"}, // Issue 23580. + {"PCDATA", "", "expect two operands for PCDATA"}, + {"PCDATA", "1", "expect two operands for PCDATA"}, + } + + runtimeTests := []errtest{ + {"TEXT", "foo(SB),0", "TEXT \"foo\": ABIInternal requires NOSPLIT"}, + } + + testcats := []struct { + allowABI bool + tests []errtest + }{ + { + allowABI: false, + tests: nonRuntimeTests, + }, + { + allowABI: true, + tests: runtimeTests, + }, + } + + // Note these errors should be independent of the architecture. + // Just run the test with amd64. + parser := newParser("amd64") + var buf strings.Builder + parser.errorWriter = &buf + + for _, cat := range testcats { + for _, test := range cat.tests { + parser.allowABI = cat.allowABI + parser.errorCount = 0 + parser.lineNum++ + if !parser.pseudo(test.pseudo, tokenize(test.operands)) { + t.Fatalf("Wrong pseudo-instruction: %s", test.pseudo) + } + errorLine := buf.String() + if test.expected != errorLine { + t.Errorf("Unexpected error %q; expected %q", errorLine, test.expected) + } + buf.Reset() + } + } + +} diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/386.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/386.s new file mode 100644 index 0000000000000000000000000000000000000000..e0855f5e4b66cf7d61f30bd81ab91dbb50d1fce8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/386.s @@ -0,0 +1,98 @@ +// This input was created by taking the instruction productions in +// the old assembler's (8a's) grammar and hand-writing complete +// instructions for each rule, to guarantee we cover the same space. + +#include "../../../../../runtime/textflag.h" + +TEXT foo(SB), DUPOK|NOSPLIT, $0 + +// LTYPE1 nonrem { outcode(int($1), &$2); } + SETCC AX + SETCC foo+4(SB) + +// LTYPE2 rimnon { outcode(int($1), &$2); } + DIVB AX + DIVB foo+4(SB) + PUSHL $foo+4(SB) + POPL AX + +// LTYPE3 rimrem { outcode(int($1), &$2); } + SUBB $1, AX + SUBB $1, foo+4(SB) + SUBB BX, AX + SUBB BX, foo+4(SB) + +// LTYPE4 remrim { outcode(int($1), &$2); } + CMPB AX, $1 + CMPB foo+4(SB), $4 + CMPB BX, AX + CMPB foo+4(SB), BX + +// LTYPER nonrel { outcode(int($1), &$2); } +label: + JC label // JCS + JC -1(PC) // JCS -1(PC) + +// LTYPEC spec3 { outcode(int($1), &$2); } + CALL AX + JCS 2(PC) + JMP *AX // JMP AX + CALL *foo(SB) + JCS 2(PC) + JMP $4 + JCS 2(PC) + JMP label // JMP 16 + CALL foo(SB) +// CALL (AX*4) // TODO: This line is silently dropped on the floor! + CALL foo+4(SB)(AX*4) + CALL *4(SP) // CALL 4(SP) + CALL *(AX) // CALL (AX) + CALL *(SP) // CALL (SP) +// CALL *(AX*4) // TODO: This line is silently dropped on the floor! + CALL *(AX)(AX*4) // CALL (AX)(AX*4) + CALL 4(SP) + CALL (AX) + CALL (SP) +// CALL (AX*4) // TODO: This line is silently dropped on the floor! + JCS 2(PC) + JMP (AX)(AX*4) + +// LTYPEN spec4 { outcode(int($1), &$2); } + NOP + NOP AX + NOP foo+4(SB) + +// LTYPES spec5 { outcode(int($1), &$2); } + SHLL $4, BX + SHLL $4, foo+4(SB) + SHLL $4, foo+4(SB):AX // SHLL $4, AX, foo+4(SB) + +// LTYPEM spec6 { outcode(int($1), &$2); } + MOVL AX, BX + MOVL $4, BX + +// LTYPEI spec7 { outcode(int($1), &$2); } + IMULL AX + IMULL $4, CX + IMULL AX, BX + +// LTYPEXC spec9 { outcode(int($1), &$2); } + CMPPD X0, X1, 4 + CMPPD foo+4(SB), X1, 4 + +// LTYPEX spec10 { outcode(int($1), &$2); } + PINSRD $1, (AX), X0 + PINSRD $2, foo+4(FP), X0 + +// Was bug: LOOP is a branch instruction. + JCS 2(PC) +loop: + LOOP loop // LOOP + +// Tests for TLS reference. + MOVL (TLS), AX + MOVL 8(TLS), DX + +// LTYPE0 nonnon { outcode(int($1), &$2); } + RET + RET foo(SB) diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/386enc.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/386enc.s new file mode 100644 index 0000000000000000000000000000000000000000..aacb40793e7d89e87dd16b9cb806a970fc3f2d27 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/386enc.s @@ -0,0 +1,40 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "../../../../../runtime/textflag.h" + +TEXT asmtest(SB),DUPOK|NOSPLIT,$0 + // Instructions that were encoded with BYTE sequences. + // Included to simplify validation of CL that fixed that. + MOVQ (AX), M0 // 0f6f00 + MOVQ M0, 8(SP) // 0f7f442408 + MOVQ 8(SP), M0 // 0f6f442408 + MOVQ M0, (AX) // 0f7f00 + MOVQ M0, (BX) // 0f7f03 + // On non-64bit arch, Go asm allowed uint32 offsets instead of int32. + // These tests check that property for backwards-compatibility. + MOVL 2147483648(AX), AX // 8b8000000080 + MOVL -2147483648(AX), AX // 8b8000000080 + ADDL 2147483648(AX), AX // 038000000080 + ADDL -2147483648(AX), AX // 038000000080 + // Make sure MOV CR/DR continues to work after changing its movtabs. + MOVL CR0, AX // 0f20c0 + MOVL CR0, DX // 0f20c2 + MOVL CR4, DI // 0f20e7 + MOVL AX, CR0 // 0f22c0 + MOVL DX, CR0 // 0f22c2 + MOVL DI, CR4 // 0f22e7 + MOVL DR0, AX // 0f21c0 + MOVL DR6, DX // 0f21f2 + MOVL DR7, SI // 0f21fe + // Test other movtab entries. + PUSHL SS // 16 + PUSHL FS // 0fa0 + POPL FS // 0fa1 + POPL SS // 17 + + RDPID AX // f30fc7f8 + + // End of tests. + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/amd64.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..1dec7f4135a541c0c881ade29b900c281868ef7d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/amd64.s @@ -0,0 +1,152 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This input was created by taking the instruction productions in +// the old assembler's (6a's) grammar and hand-writing complete +// instructions for each rule, to guarantee we cover the same space. + +#include "../../../../../runtime/textflag.h" + +TEXT foo(SB), DUPOK|NOSPLIT, $0 + +// LTYPE1 nonrem { outcode($1, &$2); } + NEGQ R11 + NEGQ 4(R11) + NEGQ foo+4(SB) + +// LTYPE2 rimnon { outcode($1, &$2); } + INT $4 + DIVB R11 + DIVB 4(R11) + DIVB foo+4(SB) + +// LTYPE3 rimrem { outcode($1, &$2); } + SUBQ $4, DI + SUBQ R11, DI + SUBQ 4(R11), DI + SUBQ foo+4(SB), DI + SUBQ $4, 8(R12) + SUBQ R11, 8(R12) + SUBQ R11, foo+4(SB) + +// LTYPE4 remrim { outcode($1, &$2); } + CMPB CX, $4 + +// LTYPER nonrel { outcode($1, &$2); } +label: + JB -4(PC) // JCS -4(PC) + JB label // JCS 17 + +// LTYPEC spec3 { outcode($1, &$2); } + JCS 2(PC) + JMP -4(PC) + JCS 2(PC) + JMP label // JMP 17 + JCS 2(PC) + JMP foo+4(SB) + JCS 2(PC) + JMP bar<>+4(SB) + JCS 2(PC) + JMP bar<>+4(SB)(R11*4) + JCS 2(PC) + JMP *4(SP) // JMP 4(SP) + JCS 2(PC) + JMP *(R12) // JMP (R12) + JCS 2(PC) +// JMP *(R12*4) // TODO: This line is silently dropped on the floor! + JCS 2(PC) + JMP *(R12)(R13*4) // JMP (R12)(R13*4) + JCS 2(PC) + JMP *(AX) // JMP (AX) + JCS 2(PC) + JMP *(SP) // JMP (SP) + JCS 2(PC) +// JMP *(AX*4) // TODO: This line is silently dropped on the floor! + JCS 2(PC) + JMP *(AX)(AX*4) // JMP (AX)(AX*4) + JCS 2(PC) + JMP 4(SP) + JCS 2(PC) + JMP (R12) + JCS 2(PC) +// JMP (R12*4) // TODO: This line is silently dropped on the floor! + JCS 2(PC) + JMP (R12)(R13*4) + JCS 2(PC) + JMP (AX) + JCS 2(PC) + JMP (SP) + JCS 2(PC) +// JMP (AX*4) // TODO: This line is silently dropped on the floor! + JCS 2(PC) + JMP (AX)(AX*4) + JCS 2(PC) + JMP R13 + +// LTYPEN spec4 { outcode($1, &$2); } + NOP + NOP AX + NOP foo+4(SB) + +// LTYPES spec5 { outcode($1, &$2); } + SHLL CX, R12 + SHLL CX, foo+4(SB) + // Old syntax, still accepted: + SHLL CX, R11:AX // SHLL CX, AX, R11 + +// LTYPEM spec6 { outcode($1, &$2); } + MOVL AX, R11 + MOVL $4, R11 +// MOVL AX, 0(AX):DS // no longer works - did it ever? + +// LTYPEI spec7 { outcode($1, &$2); } + IMULB DX + IMULW DX, BX + IMULL R11, R12 + IMULQ foo+4(SB), R11 + +// LTYPEXC spec8 { outcode($1, &$2); } + CMPPD X1, X2, 4 + CMPPD foo+4(SB), X2, 4 + +// LTYPEX spec9 { outcode($1, &$2); } + PINSRW $4, AX, X2 + PINSRW $4, foo+4(SB), X2 + +// LTYPERT spec10 { outcode($1, &$2); } + JCS 2(PC) + RETFL $4 + +// Was bug: LOOP is a branch instruction. + JCS 2(PC) +loop: + LOOP loop // LOOP + + // Intel pseudonyms for our own renamings. + PADDD M2, M1 // PADDL M2, M1 + MOVDQ2Q X1, M1 // MOVQ X1, M1 + MOVNTDQ X1, (AX) // MOVNTO X1, (AX) + MOVOA (AX), X1 // MOVO (AX), X1 + +// Tests for SP indexed addresses. + MOVQ foo(SP)(AX*1), BX // 488b1c04 + MOVQ foo+32(SP)(CX*2), DX // 488b544c20 + MOVQ foo+32323(SP)(R8*4), R9 // 4e8b8c84437e0000 + MOVL foo(SP)(SI*8), DI // 8b3cf4 + MOVL foo+32(SP)(R10*1), R11 // 468b5c1420 + MOVL foo+32323(SP)(R12*2), R13 // 468bac64437e0000 + MOVW foo(SP)(AX*4), R8 // 66448b0484 + MOVW foo+32(SP)(R9*8), CX // 66428b4ccc20 + MOVW foo+32323(SP)(AX*1), DX // 668b9404437e0000 + MOVB foo(SP)(AX*2), AL // 8a0444 + MOVB foo+32(SP)(CX*4), AH // 8a648c20 + MOVB foo+32323(SP)(CX*8), R9 // 448a8ccc437e0000 + +// Tests for TLS reference. + MOVQ (TLS), AX + MOVQ 8(TLS), DX + +// LTYPE0 nonnon { outcode($1, &$2); } + RET // c3 + RET foo(SB) diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/amd64dynlinkerror.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/amd64dynlinkerror.s new file mode 100644 index 0000000000000000000000000000000000000000..4bf58a39a4326615b5cf94875dce0faa7f316bf0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/amd64dynlinkerror.s @@ -0,0 +1,171 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test to make sure that if we use R15 after it is clobbered by +// a global variable access while dynamic linking, we get an error. +// See issue 43661. + +TEXT ·a1(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + MOVL $0, R15 + RET +TEXT ·a2(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + MOVQ $0, R15 + RET +TEXT ·a3(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + XORL R15, R15 + RET +TEXT ·a4(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + XORQ R15, R15 + RET +TEXT ·a5(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + XORL R15, R15 + RET +TEXT ·a6(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + POPQ R15 + PUSHQ R15 + RET +TEXT ·a7(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + MOVQ R15, AX // ERROR "when dynamic linking, R15 is clobbered by a global variable access and is used here" + RET +TEXT ·a8(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + ADDQ AX, R15 // ERROR "when dynamic linking, R15 is clobbered by a global variable access and is used here" + RET +TEXT ·a9(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + ORQ R15, R15 // ERROR "when dynamic linking, R15 is clobbered by a global variable access and is used here" + RET +TEXT ·a10(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + JEQ one + ORQ R15, R15 // ERROR "when dynamic linking, R15 is clobbered by a global variable access and is used here" +one: + RET +TEXT ·a11(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + JEQ one + JMP two +one: + ORQ R15, R15 // ERROR "when dynamic linking, R15 is clobbered by a global variable access and is used here" +two: + RET +TEXT ·a12(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + JMP one +two: + ORQ R15, R15 + RET +one: + MOVL $0, R15 + JMP two + +// Ensure 3-arg instructions get GOT-rewritten without errors. +// See issue 58735. +TEXT ·a13(SB), 0, $0-0 + MULXQ runtime·writeBarrier(SB), AX, CX + RET + +// Various special cases in the use-R15-after-global-access-when-dynlinking check. +// See issue 58632. +TEXT ·a14(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + MULXQ R15, AX, BX // ERROR "when dynamic linking, R15 is clobbered by a global variable access and is used here" + RET +TEXT ·a15(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + MULXQ AX, R15, BX + ADDQ $1, R15 + RET +TEXT ·a16(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + MULXQ AX, BX, R15 + ADDQ $1, R15 + RET +TEXT ·a17(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + MOVQ (R15), AX // ERROR "when dynamic linking, R15 is clobbered by a global variable access and is used here" + RET +TEXT ·a18(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + MOVQ (CX)(R15*1), AX // ERROR "when dynamic linking, R15 is clobbered by a global variable access and is used here" + RET +TEXT ·a19(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + MOVQ AX, (R15) // ERROR "when dynamic linking, R15 is clobbered by a global variable access and is used here" + RET +TEXT ·a20(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + MOVQ AX, (CX)(R15*1) // ERROR "when dynamic linking, R15 is clobbered by a global variable access and is used here" + RET +TEXT ·a21(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + MOVBLSX AX, R15 + ADDQ $1, R15 + RET +TEXT ·a22(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + PMOVMSKB X0, R15 + ADDQ $1, R15 + RET +TEXT ·a23(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + LEAQ (AX)(CX*1), R15 + RET +TEXT ·a24(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + LEAQ (R15)(AX*1), AX // ERROR "when dynamic linking, R15 is clobbered by a global variable access and is used here" + RET +TEXT ·a25(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + LEAQ (AX)(R15*1), AX // ERROR "when dynamic linking, R15 is clobbered by a global variable access and is used here" + RET +TEXT ·a26(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + IMUL3Q $33, AX, R15 + ADDQ $1, R15 + RET +TEXT ·a27(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + IMUL3Q $33, R15, AX // ERROR "when dynamic linking, R15 is clobbered by a global variable access and is used here" + RET +TEXT ·a28(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + PEXTRD $0, X0, R15 + ADDQ $1, R15 + RET +TEXT ·a29(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + VPEXTRD $0, X0, R15 + ADDQ $1, R15 + RET +TEXT ·a30(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + BSFQ R15, AX // ERROR "when dynamic linking, R15 is clobbered by a global variable access and is used here" + RET +TEXT ·a31(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + BSFQ AX, R15 + ADDQ $1, R15 + RET +TEXT ·a32(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + POPCNTL R15, AX // ERROR "when dynamic linking, R15 is clobbered by a global variable access and is used here" + RET +TEXT ·a33(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + POPCNTL AX, R15 + ADDQ $1, R15 + RET +TEXT ·a34(SB), 0, $0-0 + CMPL runtime·writeBarrier(SB), $0 + SHLXQ AX, CX, R15 + ADDQ $1, R15 + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/amd64enc.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/amd64enc.s new file mode 100644 index 0000000000000000000000000000000000000000..5bba292dee43f6659b457c8de9177682b0fa5d82 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/amd64enc.s @@ -0,0 +1,10700 @@ +// generated by x86test -amd64 +// DO NOT EDIT + +#include "../../../../../runtime/textflag.h" + +TEXT asmtest(SB),DUPOK|NOSPLIT,$0 + ADCB $7, AL // 1407 + ADCW $61731, AX // 661523f1 + ADCL $4045620583, AX // 15674523f1 + ADCQ $-249346713, AX // 4815674523f1 + ADCW $61731, (BX) // 66811323f1 + ADCW $61731, (R11) // 6641811323f1 + ADCW $61731, DX // 6681d223f1 + ADCW $61731, R11 // 664181d323f1 + ADCW $7, (BX) // 66831307 + ADCW $7, (R11) // 6641831307 + ADCW $7, DX // 6683d207 + ADCW $7, R11 // 664183d307 + ADCW DX, (BX) // 661113 + ADCW R11, (BX) // 6644111b + ADCW DX, (R11) // 66411113 + ADCW R11, (R11) // 6645111b + ADCW DX, DX // 6611d2 or 6613d2 + ADCW R11, DX // 664411da or 664113d3 + ADCW DX, R11 // 664111d3 or 664413da + ADCW R11, R11 // 664511db or 664513db + ADCL $4045620583, (BX) // 8113674523f1 + ADCL $4045620583, (R11) // 418113674523f1 + ADCL $4045620583, DX // 81d2674523f1 + ADCL $4045620583, R11 // 4181d3674523f1 + ADCL $7, (BX) // 831307 + ADCL $7, (R11) // 41831307 + ADCL $7, DX // 83d207 + ADCL $7, R11 // 4183d307 + ADCL DX, (BX) // 1113 + ADCL R11, (BX) // 44111b + ADCL DX, (R11) // 411113 + ADCL R11, (R11) // 45111b + ADCL DX, DX // 11d2 or 13d2 + ADCL R11, DX // 4411da or 4113d3 + ADCL DX, R11 // 4111d3 or 4413da + ADCL R11, R11 // 4511db or 4513db + ADCQ $-249346713, (BX) // 488113674523f1 + ADCQ $-249346713, (R11) // 498113674523f1 + ADCQ $-249346713, DX // 4881d2674523f1 + ADCQ $-249346713, R11 // 4981d3674523f1 + ADCQ $7, (BX) // 48831307 + ADCQ $7, (R11) // 49831307 + ADCQ $7, DX // 4883d207 + ADCQ $7, R11 // 4983d307 + ADCQ DX, (BX) // 481113 + ADCQ R11, (BX) // 4c111b + ADCQ DX, (R11) // 491113 + ADCQ R11, (R11) // 4d111b + ADCQ DX, DX // 4811d2 or 4813d2 + ADCQ R11, DX // 4c11da or 4913d3 + ADCQ DX, R11 // 4911d3 or 4c13da + ADCQ R11, R11 // 4d11db or 4d13db + ADCB $7, (BX) // 801307 + ADCB $7, (R11) // 41801307 + ADCB $7, DL // 80d207 + ADCB $7, R11 // 4180d307 + ADCB DL, (BX) // 1013 + ADCB R11, (BX) // 44101b + ADCB DL, (R11) // 411013 + ADCB R11, (R11) // 45101b + ADCB DL, DL // 10d2 or 12d2 + ADCB R11, DL // 4410da or 4112d3 + ADCB DL, R11 // 4110d3 or 4412da + ADCB R11, R11 // 4510db or 4512db + ADCW (BX), DX // 661313 + ADCW (R11), DX // 66411313 + ADCW (BX), R11 // 6644131b + ADCW (R11), R11 // 6645131b + ADCL (BX), DX // 1313 + ADCL (R11), DX // 411313 + ADCL (BX), R11 // 44131b + ADCL (R11), R11 // 45131b + ADCQ (BX), DX // 481313 + ADCQ (R11), DX // 491313 + ADCQ (BX), R11 // 4c131b + ADCQ (R11), R11 // 4d131b + ADCB (BX), DL // 1213 + ADCB (R11), DL // 411213 + ADCB (BX), R11 // 44121b + ADCB (R11), R11 // 45121b + ADCXL (BX), DX // 660f38f613 + ADCXL (R11), DX // 66410f38f613 + ADCXL DX, DX // 660f38f6d2 + ADCXL R11, DX // 66410f38f6d3 + ADCXL (BX), R11 // 66440f38f61b + ADCXL (R11), R11 // 66450f38f61b + ADCXL DX, R11 // 66440f38f6da + ADCXL R11, R11 // 66450f38f6db + ADCXQ (BX), DX // 66480f38f613 + ADCXQ (R11), DX // 66490f38f613 + ADCXQ DX, DX // 66480f38f6d2 + ADCXQ R11, DX // 66490f38f6d3 + ADCXQ (BX), R11 // 664c0f38f61b + ADCXQ (R11), R11 // 664d0f38f61b + ADCXQ DX, R11 // 664c0f38f6da + ADCXQ R11, R11 // 664d0f38f6db + ADDB $7, AL // 0407 + ADDW $61731, AX // 660523f1 + ADDL $4045620583, AX // 05674523f1 + ADDQ $-249346713, AX // 4805674523f1 + ADDW $61731, (BX) // 66810323f1 + ADDW $61731, (R11) // 6641810323f1 + ADDW $61731, DX // 6681c223f1 + ADDW $61731, R11 // 664181c323f1 + ADDW $7, (BX) // 66830307 + ADDW $7, (R11) // 6641830307 + ADDW $7, DX // 6683c207 + ADDW $7, R11 // 664183c307 + ADDW DX, (BX) // 660113 + ADDW R11, (BX) // 6644011b + ADDW DX, (R11) // 66410113 + ADDW R11, (R11) // 6645011b + ADDW DX, DX // 6601d2 or 6603d2 + ADDW R11, DX // 664401da or 664103d3 + ADDW DX, R11 // 664101d3 or 664403da + ADDW R11, R11 // 664501db or 664503db + ADDL $4045620583, (BX) // 8103674523f1 + ADDL $4045620583, (R11) // 418103674523f1 + ADDL $4045620583, DX // 81c2674523f1 + ADDL $4045620583, R11 // 4181c3674523f1 + ADDL $7, (BX) // 830307 + ADDL $7, (R11) // 41830307 + ADDL $7, DX // 83c207 + ADDL $7, R11 // 4183c307 + ADDL DX, (BX) // 0113 + ADDL R11, (BX) // 44011b + ADDL DX, (R11) // 410113 + ADDL R11, (R11) // 45011b + ADDL DX, DX // 01d2 or 03d2 + ADDL R11, DX // 4401da or 4103d3 + ADDL DX, R11 // 4101d3 or 4403da + ADDL R11, R11 // 4501db or 4503db + ADDQ $-249346713, (BX) // 488103674523f1 + ADDQ $-249346713, (R11) // 498103674523f1 + ADDQ $-249346713, DX // 4881c2674523f1 + ADDQ $-249346713, R11 // 4981c3674523f1 + ADDQ $7, (BX) // 48830307 + ADDQ $7, (R11) // 49830307 + ADDQ $7, DX // 4883c207 + ADDQ $7, R11 // 4983c307 + ADDQ DX, (BX) // 480113 + ADDQ R11, (BX) // 4c011b + ADDQ DX, (R11) // 490113 + ADDQ R11, (R11) // 4d011b + ADDQ DX, DX // 4801d2 or 4803d2 + ADDQ R11, DX // 4c01da or 4903d3 + ADDQ DX, R11 // 4901d3 or 4c03da + ADDQ R11, R11 // 4d01db or 4d03db + ADDB $7, (BX) // 800307 + ADDB $7, (R11) // 41800307 + ADDB $7, DL // 80c207 + ADDB $7, R11 // 4180c307 + ADDB DL, (BX) // 0013 + ADDB R11, (BX) // 44001b + ADDB DL, (R11) // 410013 + ADDB R11, (R11) // 45001b + ADDB DL, DL // 00d2 or 02d2 + ADDB R11, DL // 4400da or 4102d3 + ADDB DL, R11 // 4100d3 or 4402da + ADDB R11, R11 // 4500db or 4502db + ADDW (BX), DX // 660313 + ADDW (R11), DX // 66410313 + ADDW (BX), R11 // 6644031b + ADDW (R11), R11 // 6645031b + ADDL (BX), DX // 0313 + ADDL (R11), DX // 410313 + ADDL (BX), R11 // 44031b + ADDL (R11), R11 // 45031b + ADDQ (BX), DX // 480313 + ADDQ (R11), DX // 490313 + ADDQ (BX), R11 // 4c031b + ADDQ (R11), R11 // 4d031b + ADDB (BX), DL // 0213 + ADDB (R11), DL // 410213 + ADDB (BX), R11 // 44021b + ADDB (R11), R11 // 45021b + ADDPD (BX), X2 // 660f5813 + ADDPD (R11), X2 // 66410f5813 + ADDPD X2, X2 // 660f58d2 + ADDPD X11, X2 // 66410f58d3 + ADDPD (BX), X11 // 66440f581b + ADDPD (R11), X11 // 66450f581b + ADDPD X2, X11 // 66440f58da + ADDPD X11, X11 // 66450f58db + ADDPS (BX), X2 // 0f5813 + ADDPS (R11), X2 // 410f5813 + ADDPS X2, X2 // 0f58d2 + ADDPS X11, X2 // 410f58d3 + ADDPS (BX), X11 // 440f581b + ADDPS (R11), X11 // 450f581b + ADDPS X2, X11 // 440f58da + ADDPS X11, X11 // 450f58db + ADDSD (BX), X2 // f20f5813 + ADDSD (R11), X2 // f2410f5813 + ADDSD X2, X2 // f20f58d2 + ADDSD X11, X2 // f2410f58d3 + ADDSD (BX), X11 // f2440f581b + ADDSD (R11), X11 // f2450f581b + ADDSD X2, X11 // f2440f58da + ADDSD X11, X11 // f2450f58db + ADDSS (BX), X2 // f30f5813 + ADDSS (R11), X2 // f3410f5813 + ADDSS X2, X2 // f30f58d2 + ADDSS X11, X2 // f3410f58d3 + ADDSS (BX), X11 // f3440f581b + ADDSS (R11), X11 // f3450f581b + ADDSS X2, X11 // f3440f58da + ADDSS X11, X11 // f3450f58db + ADDSUBPD (BX), X2 // 660fd013 + ADDSUBPD (R11), X2 // 66410fd013 + ADDSUBPD X2, X2 // 660fd0d2 + ADDSUBPD X11, X2 // 66410fd0d3 + ADDSUBPD (BX), X11 // 66440fd01b + ADDSUBPD (R11), X11 // 66450fd01b + ADDSUBPD X2, X11 // 66440fd0da + ADDSUBPD X11, X11 // 66450fd0db + ADDSUBPS (BX), X2 // f20fd013 + ADDSUBPS (R11), X2 // f2410fd013 + ADDSUBPS X2, X2 // f20fd0d2 + ADDSUBPS X11, X2 // f2410fd0d3 + ADDSUBPS (BX), X11 // f2440fd01b + ADDSUBPS (R11), X11 // f2450fd01b + ADDSUBPS X2, X11 // f2440fd0da + ADDSUBPS X11, X11 // f2450fd0db + ADOXL (BX), DX // f30f38f613 + ADOXL (R11), DX // f3410f38f613 + ADOXL DX, DX // f30f38f6d2 + ADOXL R11, DX // f3410f38f6d3 + ADOXL (BX), R11 // f3440f38f61b + ADOXL (R11), R11 // f3450f38f61b + ADOXL DX, R11 // f3440f38f6da + ADOXL R11, R11 // f3450f38f6db + ADOXQ (BX), DX // f3480f38f613 + ADOXQ (R11), DX // f3490f38f613 + ADOXQ DX, DX // f3480f38f6d2 + ADOXQ R11, DX // f3490f38f6d3 + ADOXQ (BX), R11 // f34c0f38f61b + ADOXQ (R11), R11 // f34d0f38f61b + ADOXQ DX, R11 // f34c0f38f6da + ADOXQ R11, R11 // f34d0f38f6db + AESDEC (BX), X2 // 660f38de13 + AESDEC (R11), X2 // 66410f38de13 + AESDEC X2, X2 // 660f38ded2 + AESDEC X11, X2 // 66410f38ded3 + AESDEC (BX), X11 // 66440f38de1b + AESDEC (R11), X11 // 66450f38de1b + AESDEC X2, X11 // 66440f38deda + AESDEC X11, X11 // 66450f38dedb + AESDECLAST (BX), X2 // 660f38df13 + AESDECLAST (R11), X2 // 66410f38df13 + AESDECLAST X2, X2 // 660f38dfd2 + AESDECLAST X11, X2 // 66410f38dfd3 + AESDECLAST (BX), X11 // 66440f38df1b + AESDECLAST (R11), X11 // 66450f38df1b + AESDECLAST X2, X11 // 66440f38dfda + AESDECLAST X11, X11 // 66450f38dfdb + AESENC (BX), X2 // 660f38dc13 + AESENC (R11), X2 // 66410f38dc13 + AESENC X2, X2 // 660f38dcd2 + AESENC X11, X2 // 66410f38dcd3 + AESENC (BX), X11 // 66440f38dc1b + AESENC (R11), X11 // 66450f38dc1b + AESENC X2, X11 // 66440f38dcda + AESENC X11, X11 // 66450f38dcdb + AESENCLAST (BX), X2 // 660f38dd13 + AESENCLAST (R11), X2 // 66410f38dd13 + AESENCLAST X2, X2 // 660f38ddd2 + AESENCLAST X11, X2 // 66410f38ddd3 + AESENCLAST (BX), X11 // 66440f38dd1b + AESENCLAST (R11), X11 // 66450f38dd1b + AESENCLAST X2, X11 // 66440f38ddda + AESENCLAST X11, X11 // 66450f38dddb + AESIMC (BX), X2 // 660f38db13 + AESIMC (R11), X2 // 66410f38db13 + AESIMC X2, X2 // 660f38dbd2 + AESIMC X11, X2 // 66410f38dbd3 + AESIMC (BX), X11 // 66440f38db1b + AESIMC (R11), X11 // 66450f38db1b + AESIMC X2, X11 // 66440f38dbda + AESIMC X11, X11 // 66450f38dbdb + AESKEYGENASSIST $7, (BX), X2 // 660f3adf1307 + AESKEYGENASSIST $7, (R11), X2 // 66410f3adf1307 + AESKEYGENASSIST $7, X2, X2 // 660f3adfd207 + AESKEYGENASSIST $7, X11, X2 // 66410f3adfd307 + AESKEYGENASSIST $7, (BX), X11 // 66440f3adf1b07 + AESKEYGENASSIST $7, (R11), X11 // 66450f3adf1b07 + AESKEYGENASSIST $7, X2, X11 // 66440f3adfda07 + AESKEYGENASSIST $7, X11, X11 // 66450f3adfdb07 + ANDB $7, AL // 2407 + ANDW $61731, AX // 662523f1 + ANDL $4045620583, AX // 25674523f1 + ANDQ $-249346713, AX // 4825674523f1 + ANDW $61731, (BX) // 66812323f1 + ANDW $61731, (R11) // 6641812323f1 + ANDW $61731, DX // 6681e223f1 + ANDW $61731, R11 // 664181e323f1 + ANDW $7, (BX) // 66832307 + ANDW $7, (R11) // 6641832307 + ANDW $7, DX // 6683e207 + ANDW $7, R11 // 664183e307 + ANDW DX, (BX) // 662113 + ANDW R11, (BX) // 6644211b + ANDW DX, (R11) // 66412113 + ANDW R11, (R11) // 6645211b + ANDW DX, DX // 6621d2 or 6623d2 + ANDW R11, DX // 664421da or 664123d3 + ANDW DX, R11 // 664121d3 or 664423da + ANDW R11, R11 // 664521db or 664523db + ANDL $4045620583, (BX) // 8123674523f1 + ANDL $4045620583, (R11) // 418123674523f1 + ANDL $4045620583, DX // 81e2674523f1 + ANDL $4045620583, R11 // 4181e3674523f1 + ANDL $7, (BX) // 832307 + ANDL $7, (R11) // 41832307 + ANDL $7, DX // 83e207 + ANDL $7, R11 // 4183e307 + ANDL DX, (BX) // 2113 + ANDL R11, (BX) // 44211b + ANDL DX, (R11) // 412113 + ANDL R11, (R11) // 45211b + ANDL DX, DX // 21d2 or 23d2 + ANDL R11, DX // 4421da or 4123d3 + ANDL DX, R11 // 4121d3 or 4423da + ANDL R11, R11 // 4521db or 4523db + ANDQ $-249346713, (BX) // 488123674523f1 + ANDQ $-249346713, (R11) // 498123674523f1 + ANDQ $-249346713, DX // 4881e2674523f1 + ANDQ $-249346713, R11 // 4981e3674523f1 + ANDQ $7, (BX) // 48832307 + ANDQ $7, (R11) // 49832307 + ANDQ $7, DX // 4883e207 + ANDQ $7, R11 // 4983e307 + ANDQ DX, (BX) // 482113 + ANDQ R11, (BX) // 4c211b + ANDQ DX, (R11) // 492113 + ANDQ R11, (R11) // 4d211b + ANDQ DX, DX // 4821d2 or 4823d2 + ANDQ R11, DX // 4c21da or 4923d3 + ANDQ DX, R11 // 4921d3 or 4c23da + ANDQ R11, R11 // 4d21db or 4d23db + ANDB $7, (BX) // 802307 + ANDB $7, (R11) // 41802307 + ANDB $7, DL // 80e207 + ANDB $7, R11 // 4180e307 + ANDB DL, (BX) // 2013 + ANDB R11, (BX) // 44201b + ANDB DL, (R11) // 412013 + ANDB R11, (R11) // 45201b + ANDB DL, DL // 20d2 or 22d2 + ANDB R11, DL // 4420da or 4122d3 + ANDB DL, R11 // 4120d3 or 4422da + ANDB R11, R11 // 4520db or 4522db + ANDW (BX), DX // 662313 + ANDW (R11), DX // 66412313 + ANDW (BX), R11 // 6644231b + ANDW (R11), R11 // 6645231b + ANDL (BX), DX // 2313 + ANDL (R11), DX // 412313 + ANDL (BX), R11 // 44231b + ANDL (R11), R11 // 45231b + ANDQ (BX), DX // 482313 + ANDQ (R11), DX // 492313 + ANDQ (BX), R11 // 4c231b + ANDQ (R11), R11 // 4d231b + ANDB (BX), DL // 2213 + ANDB (R11), DL // 412213 + ANDB (BX), R11 // 44221b + ANDB (R11), R11 // 45221b + ANDNL (BX), R9, DX // c4e230f213 + ANDNL (R11), R9, DX // c4c230f213 + ANDNL DX, R9, DX // c4e230f2d2 + ANDNL R11, R9, DX // c4c230f2d3 + ANDNL (BX), R9, R11 // c46230f21b + ANDNL (R11), R9, R11 // c44230f21b + ANDNL DX, R9, R11 // c46230f2da + ANDNL R11, R9, R11 // c44230f2db + ANDNQ (BX), R14, DX // c4e288f213 + ANDNQ (R11), R14, DX // c4c288f213 + ANDNQ DX, R14, DX // c4e288f2d2 + ANDNQ R11, R14, DX // c4c288f2d3 + ANDNQ (BX), R14, R11 // c46288f21b + ANDNQ (R11), R14, R11 // c44288f21b + ANDNQ DX, R14, R11 // c46288f2da + ANDNQ R11, R14, R11 // c44288f2db + ANDNPD (BX), X2 // 660f5513 + ANDNPD (R11), X2 // 66410f5513 + ANDNPD X2, X2 // 660f55d2 + ANDNPD X11, X2 // 66410f55d3 + ANDNPD (BX), X11 // 66440f551b + ANDNPD (R11), X11 // 66450f551b + ANDNPD X2, X11 // 66440f55da + ANDNPD X11, X11 // 66450f55db + ANDNPS (BX), X2 // 0f5513 + ANDNPS (R11), X2 // 410f5513 + ANDNPS X2, X2 // 0f55d2 + ANDNPS X11, X2 // 410f55d3 + ANDNPS (BX), X11 // 440f551b + ANDNPS (R11), X11 // 450f551b + ANDNPS X2, X11 // 440f55da + ANDNPS X11, X11 // 450f55db + ANDPD (BX), X2 // 660f5413 + ANDPD (R11), X2 // 66410f5413 + ANDPD X2, X2 // 660f54d2 + ANDPD X11, X2 // 66410f54d3 + ANDPD (BX), X11 // 66440f541b + ANDPD (R11), X11 // 66450f541b + ANDPD X2, X11 // 66440f54da + ANDPD X11, X11 // 66450f54db + ANDPS (BX), X2 // 0f5413 + ANDPS (R11), X2 // 410f5413 + ANDPS X2, X2 // 0f54d2 + ANDPS X11, X2 // 410f54d3 + ANDPS (BX), X11 // 440f541b + ANDPS (R11), X11 // 450f541b + ANDPS X2, X11 // 440f54da + ANDPS X11, X11 // 450f54db + BEXTRL R9, (BX), DX // c4e230f713 + BEXTRL R9, (R11), DX // c4c230f713 + BEXTRL R9, DX, DX // c4e230f7d2 + BEXTRL R9, R11, DX // c4c230f7d3 + BEXTRL R9, (BX), R11 // c46230f71b + BEXTRL R9, (R11), R11 // c44230f71b + BEXTRL R9, DX, R11 // c46230f7da + BEXTRL R9, R11, R11 // c44230f7db + BEXTRQ R14, (BX), DX // c4e288f713 + BEXTRQ R14, (R11), DX // c4c288f713 + BEXTRQ R14, DX, DX // c4e288f7d2 + BEXTRQ R14, R11, DX // c4c288f7d3 + BEXTRQ R14, (BX), R11 // c46288f71b + BEXTRQ R14, (R11), R11 // c44288f71b + BEXTRQ R14, DX, R11 // c46288f7da + BEXTRQ R14, R11, R11 // c44288f7db + BLENDPD $7, (BX), X2 // 660f3a0d1307 + BLENDPD $7, (R11), X2 // 66410f3a0d1307 + BLENDPD $7, X2, X2 // 660f3a0dd207 + BLENDPD $7, X11, X2 // 66410f3a0dd307 + BLENDPD $7, (BX), X11 // 66440f3a0d1b07 + BLENDPD $7, (R11), X11 // 66450f3a0d1b07 + BLENDPD $7, X2, X11 // 66440f3a0dda07 + BLENDPD $7, X11, X11 // 66450f3a0ddb07 + BLENDPS $7, (BX), X2 // 660f3a0c1307 + BLENDPS $7, (R11), X2 // 66410f3a0c1307 + BLENDPS $7, X2, X2 // 660f3a0cd207 + BLENDPS $7, X11, X2 // 66410f3a0cd307 + BLENDPS $7, (BX), X11 // 66440f3a0c1b07 + BLENDPS $7, (R11), X11 // 66450f3a0c1b07 + BLENDPS $7, X2, X11 // 66440f3a0cda07 + BLENDPS $7, X11, X11 // 66450f3a0cdb07 + BLENDVPD X0, (BX), X2 // 660f381513 + BLENDVPD X0, (R11), X2 // 66410f381513 + BLENDVPD X0, X2, X2 // 660f3815d2 + BLENDVPD X0, X11, X2 // 66410f3815d3 + BLENDVPD X0, (BX), X11 // 66440f38151b + BLENDVPD X0, (R11), X11 // 66450f38151b + BLENDVPD X0, X2, X11 // 66440f3815da + BLENDVPD X0, X11, X11 // 66450f3815db + BLENDVPS X0, (BX), X2 // 660f381413 + BLENDVPS X0, (R11), X2 // 66410f381413 + BLENDVPS X0, X2, X2 // 660f3814d2 + BLENDVPS X0, X11, X2 // 66410f3814d3 + BLENDVPS X0, (BX), X11 // 66440f38141b + BLENDVPS X0, (R11), X11 // 66450f38141b + BLENDVPS X0, X2, X11 // 66440f3814da + BLENDVPS X0, X11, X11 // 66450f3814db + BLSIL (BX), R9 // c4e230f31b + BLSIL (R11), R9 // c4c230f31b + BLSIL DX, R9 // c4e230f3da + BLSIL R11, R9 // c4c230f3db + BLSIQ (BX), R14 // c4e288f31b + BLSIQ (R11), R14 // c4c288f31b + BLSIQ DX, R14 // c4e288f3da + BLSIQ R11, R14 // c4c288f3db + BLSMSKL (BX), R9 // c4e230f313 + BLSMSKL (R11), R9 // c4c230f313 + BLSMSKL DX, R9 // c4e230f3d2 + BLSMSKL R11, R9 // c4c230f3d3 + BLSMSKQ (BX), R14 // c4e288f313 + BLSMSKQ (R11), R14 // c4c288f313 + BLSMSKQ DX, R14 // c4e288f3d2 + BLSMSKQ R11, R14 // c4c288f3d3 + BLSRL (BX), R9 // c4e230f30b + BLSRL (R11), R9 // c4c230f30b + BLSRL DX, R9 // c4e230f3ca + BLSRL R11, R9 // c4c230f3cb + BLSRQ (BX), R14 // c4e288f30b + BLSRQ (R11), R14 // c4c288f30b + BLSRQ DX, R14 // c4e288f3ca + BLSRQ R11, R14 // c4c288f3cb + //TODO: BNDCL (BX), BND2 // f30f1a13 + //TODO: BNDCL (R11), BND2 // f3410f1a13 + //TODO: BNDCL DX, BND2 // f30f1ad2 + //TODO: BNDCL R11, BND2 // f3410f1ad3 + //TODO: BNDCL (BX), BND3 // f30f1a1b + //TODO: BNDCL (R11), BND3 // f3410f1a1b + //TODO: BNDCL DX, BND3 // f30f1ada + //TODO: BNDCL R11, BND3 // f3410f1adb + //TODO: BNDCN (BX), BND2 // f20f1b13 + //TODO: BNDCN (R11), BND2 // f2410f1b13 + //TODO: BNDCN DX, BND2 // f20f1bd2 + //TODO: BNDCN R11, BND2 // f2410f1bd3 + //TODO: BNDCN (BX), BND3 // f20f1b1b + //TODO: BNDCN (R11), BND3 // f2410f1b1b + //TODO: BNDCN DX, BND3 // f20f1bda + //TODO: BNDCN R11, BND3 // f2410f1bdb + //TODO: BNDCU (BX), BND2 // f20f1a13 + //TODO: BNDCU (R11), BND2 // f2410f1a13 + //TODO: BNDCU DX, BND2 // f20f1ad2 + //TODO: BNDCU R11, BND2 // f2410f1ad3 + //TODO: BNDCU (BX), BND3 // f20f1a1b + //TODO: BNDCU (R11), BND3 // f2410f1a1b + //TODO: BNDCU DX, BND3 // f20f1ada + //TODO: BNDCU R11, BND3 // f2410f1adb + //TODO: BNDLDX (BX), BND2 // 0f1a13 + //TODO: BNDLDX (R11), BND2 // 410f1a13 + //TODO: BNDLDX (BX), BND3 // 0f1a1b + //TODO: BNDLDX (R11), BND3 // 410f1a1b + //TODO: BNDMK (BX), BND2 // f30f1b13 + //TODO: BNDMK (R11), BND2 // f3410f1b13 + //TODO: BNDMK (BX), BND3 // f30f1b1b + //TODO: BNDMK (R11), BND3 // f3410f1b1b + //TODO: BNDMOV (BX), BND2 // 660f1a13 + //TODO: BNDMOV (R11), BND2 // 66410f1a13 + //TODO: BNDMOV BND2, BND2 // 660f1ad2 or 660f1bd2 + //TODO: BNDMOV BND3, BND2 // 660f1ad3 or 660f1bda + //TODO: BNDMOV (BX), BND3 // 660f1a1b + //TODO: BNDMOV (R11), BND3 // 66410f1a1b + //TODO: BNDMOV BND2, BND3 // 660f1ada or 660f1bd3 + //TODO: BNDMOV BND3, BND3 // 660f1adb or 660f1bdb + //TODO: BNDMOV BND2, (BX) // 660f1b13 + //TODO: BNDMOV BND3, (BX) // 660f1b1b + //TODO: BNDMOV BND2, (R11) // 66410f1b13 + //TODO: BNDMOV BND3, (R11) // 66410f1b1b + //TODO: BNDSTX BND2, (BX) // 0f1b13 + //TODO: BNDSTX BND3, (BX) // 0f1b1b + //TODO: BNDSTX BND2, (R11) // 410f1b13 + //TODO: BNDSTX BND3, (R11) // 410f1b1b + BSFW (BX), DX // 660fbc13 + BSFW (R11), DX // 66410fbc13 + BSFW DX, DX // 660fbcd2 + BSFW R11, DX // 66410fbcd3 + BSFW (BX), R11 // 66440fbc1b + BSFW (R11), R11 // 66450fbc1b + BSFW DX, R11 // 66440fbcda + BSFW R11, R11 // 66450fbcdb + BSFL (BX), DX // 0fbc13 + BSFL (R11), DX // 410fbc13 + BSFL DX, DX // 0fbcd2 + BSFL R11, DX // 410fbcd3 + BSFL (BX), R11 // 440fbc1b + BSFL (R11), R11 // 450fbc1b + BSFL DX, R11 // 440fbcda + BSFL R11, R11 // 450fbcdb + BSFQ (BX), DX // 480fbc13 + BSFQ (R11), DX // 490fbc13 + BSFQ DX, DX // 480fbcd2 + BSFQ R11, DX // 490fbcd3 + BSFQ (BX), R11 // 4c0fbc1b + BSFQ (R11), R11 // 4d0fbc1b + BSFQ DX, R11 // 4c0fbcda + BSFQ R11, R11 // 4d0fbcdb + BSRW (BX), DX // 660fbd13 + BSRW (R11), DX // 66410fbd13 + BSRW DX, DX // 660fbdd2 + BSRW R11, DX // 66410fbdd3 + BSRW (BX), R11 // 66440fbd1b + BSRW (R11), R11 // 66450fbd1b + BSRW DX, R11 // 66440fbdda + BSRW R11, R11 // 66450fbddb + BSRL (BX), DX // 0fbd13 + BSRL (R11), DX // 410fbd13 + BSRL DX, DX // 0fbdd2 + BSRL R11, DX // 410fbdd3 + BSRL (BX), R11 // 440fbd1b + BSRL (R11), R11 // 450fbd1b + BSRL DX, R11 // 440fbdda + BSRL R11, R11 // 450fbddb + BSRQ (BX), DX // 480fbd13 + BSRQ (R11), DX // 490fbd13 + BSRQ DX, DX // 480fbdd2 + BSRQ R11, DX // 490fbdd3 + BSRQ (BX), R11 // 4c0fbd1b + BSRQ (R11), R11 // 4d0fbd1b + BSRQ DX, R11 // 4c0fbdda + BSRQ R11, R11 // 4d0fbddb + BSWAPL DX // 0fca + BSWAPL R11 // 410fcb + BSWAPQ DX // 480fca + BSWAPQ R11 // 490fcb + BTW $7, (BX) // 660fba2307 + BTW $7, (R11) // 66410fba2307 + BTW $7, DX // 660fbae207 + BTW $7, R11 // 66410fbae307 + BTW DX, (BX) // 660fa313 + BTW R11, (BX) // 66440fa31b + BTW DX, (R11) // 66410fa313 + BTW R11, (R11) // 66450fa31b + BTW DX, DX // 660fa3d2 + BTW R11, DX // 66440fa3da + BTW DX, R11 // 66410fa3d3 + BTW R11, R11 // 66450fa3db + BTL $7, (BX) // 0fba2307 + BTL $7, (R11) // 410fba2307 + BTL $7, DX // 0fbae207 + BTL $7, R11 // 410fbae307 + BTL DX, (BX) // 0fa313 + BTL R11, (BX) // 440fa31b + BTL DX, (R11) // 410fa313 + BTL R11, (R11) // 450fa31b + BTL DX, DX // 0fa3d2 + BTL R11, DX // 440fa3da + BTL DX, R11 // 410fa3d3 + BTL R11, R11 // 450fa3db + BTQ $7, (BX) // 480fba2307 + BTQ $7, (R11) // 490fba2307 + BTQ $7, DX // 480fbae207 + BTQ $7, R11 // 490fbae307 + BTQ DX, (BX) // 480fa313 + BTQ R11, (BX) // 4c0fa31b + BTQ DX, (R11) // 490fa313 + BTQ R11, (R11) // 4d0fa31b + BTQ DX, DX // 480fa3d2 + BTQ R11, DX // 4c0fa3da + BTQ DX, R11 // 490fa3d3 + BTQ R11, R11 // 4d0fa3db + BTCW $7, (BX) // 660fba3b07 + BTCW $7, (R11) // 66410fba3b07 + BTCW $7, DX // 660fbafa07 + BTCW $7, R11 // 66410fbafb07 + BTCW DX, (BX) // 660fbb13 + BTCW R11, (BX) // 66440fbb1b + BTCW DX, (R11) // 66410fbb13 + BTCW R11, (R11) // 66450fbb1b + BTCW DX, DX // 660fbbd2 + BTCW R11, DX // 66440fbbda + BTCW DX, R11 // 66410fbbd3 + BTCW R11, R11 // 66450fbbdb + BTCL $7, (BX) // 0fba3b07 + BTCL $7, (R11) // 410fba3b07 + BTCL $7, DX // 0fbafa07 + BTCL $7, R11 // 410fbafb07 + BTCL DX, (BX) // 0fbb13 + BTCL R11, (BX) // 440fbb1b + BTCL DX, (R11) // 410fbb13 + BTCL R11, (R11) // 450fbb1b + BTCL DX, DX // 0fbbd2 + BTCL R11, DX // 440fbbda + BTCL DX, R11 // 410fbbd3 + BTCL R11, R11 // 450fbbdb + BTCQ $7, (BX) // 480fba3b07 + BTCQ $7, (R11) // 490fba3b07 + BTCQ $7, DX // 480fbafa07 + BTCQ $7, R11 // 490fbafb07 + BTCQ DX, (BX) // 480fbb13 + BTCQ R11, (BX) // 4c0fbb1b + BTCQ DX, (R11) // 490fbb13 + BTCQ R11, (R11) // 4d0fbb1b + BTCQ DX, DX // 480fbbd2 + BTCQ R11, DX // 4c0fbbda + BTCQ DX, R11 // 490fbbd3 + BTCQ R11, R11 // 4d0fbbdb + BTRW $7, (BX) // 660fba3307 + BTRW $7, (R11) // 66410fba3307 + BTRW $7, DX // 660fbaf207 + BTRW $7, R11 // 66410fbaf307 + BTRW DX, (BX) // 660fb313 + BTRW R11, (BX) // 66440fb31b + BTRW DX, (R11) // 66410fb313 + BTRW R11, (R11) // 66450fb31b + BTRW DX, DX // 660fb3d2 + BTRW R11, DX // 66440fb3da + BTRW DX, R11 // 66410fb3d3 + BTRW R11, R11 // 66450fb3db + BTRL $7, (BX) // 0fba3307 + BTRL $7, (R11) // 410fba3307 + BTRL $7, DX // 0fbaf207 + BTRL $7, R11 // 410fbaf307 + BTRL DX, (BX) // 0fb313 + BTRL R11, (BX) // 440fb31b + BTRL DX, (R11) // 410fb313 + BTRL R11, (R11) // 450fb31b + BTRL DX, DX // 0fb3d2 + BTRL R11, DX // 440fb3da + BTRL DX, R11 // 410fb3d3 + BTRL R11, R11 // 450fb3db + BTRQ $7, (BX) // 480fba3307 + BTRQ $7, (R11) // 490fba3307 + BTRQ $7, DX // 480fbaf207 + BTRQ $7, R11 // 490fbaf307 + BTRQ DX, (BX) // 480fb313 + BTRQ R11, (BX) // 4c0fb31b + BTRQ DX, (R11) // 490fb313 + BTRQ R11, (R11) // 4d0fb31b + BTRQ DX, DX // 480fb3d2 + BTRQ R11, DX // 4c0fb3da + BTRQ DX, R11 // 490fb3d3 + BTRQ R11, R11 // 4d0fb3db + BTSW $7, (BX) // 660fba2b07 + BTSW $7, (R11) // 66410fba2b07 + BTSW $7, DX // 660fbaea07 + BTSW $7, R11 // 66410fbaeb07 + BTSW DX, (BX) // 660fab13 + BTSW R11, (BX) // 66440fab1b + BTSW DX, (R11) // 66410fab13 + BTSW R11, (R11) // 66450fab1b + BTSW DX, DX // 660fabd2 + BTSW R11, DX // 66440fabda + BTSW DX, R11 // 66410fabd3 + BTSW R11, R11 // 66450fabdb + BTSL $7, (BX) // 0fba2b07 + BTSL $7, (R11) // 410fba2b07 + BTSL $7, DX // 0fbaea07 + BTSL $7, R11 // 410fbaeb07 + BTSL DX, (BX) // 0fab13 + BTSL R11, (BX) // 440fab1b + BTSL DX, (R11) // 410fab13 + BTSL R11, (R11) // 450fab1b + BTSL DX, DX // 0fabd2 + BTSL R11, DX // 440fabda + BTSL DX, R11 // 410fabd3 + BTSL R11, R11 // 450fabdb + BTSQ $7, (BX) // 480fba2b07 + BTSQ $7, (R11) // 490fba2b07 + BTSQ $7, DX // 480fbaea07 + BTSQ $7, R11 // 490fbaeb07 + BTSQ DX, (BX) // 480fab13 + BTSQ R11, (BX) // 4c0fab1b + BTSQ DX, (R11) // 490fab13 + BTSQ R11, (R11) // 4d0fab1b + BTSQ DX, DX // 480fabd2 + BTSQ R11, DX // 4c0fabda + BTSQ DX, R11 // 490fabd3 + BTSQ R11, R11 // 4d0fabdb + BZHIL R9, (BX), DX // c4e230f513 + BZHIL R9, (R11), DX // c4c230f513 + BZHIL R9, DX, DX // c4e230f5d2 + BZHIL R9, R11, DX // c4c230f5d3 + BZHIL R9, (BX), R11 // c46230f51b + BZHIL R9, (R11), R11 // c44230f51b + BZHIL R9, DX, R11 // c46230f5da + BZHIL R9, R11, R11 // c44230f5db + BZHIQ R14, (BX), DX // c4e288f513 + BZHIQ R14, (R11), DX // c4c288f513 + BZHIQ R14, DX, DX // c4e288f5d2 + BZHIQ R14, R11, DX // c4c288f5d3 + BZHIQ R14, (BX), R11 // c46288f51b + BZHIQ R14, (R11), R11 // c44288f51b + BZHIQ R14, DX, R11 // c46288f5da + BZHIQ R14, R11, R11 // c44288f5db + //TODO: CALLQ* (BX) // ff13 + //TODO: CALLQ* (R11) // 41ff13 + //TODO: CALLQ* DX // ffd2 + //TODO: CALLQ* R11 // 41ffd3 + //TODO: CALL .+$0x11223344 // e844332211 or 48e844332211 + //TODO: LCALLW* (BX) // 66ff1b + //TODO: LCALLW* (R11) // 6641ff1b + //TODO: LCALLL* (BX) // ff1b + //TODO: LCALLL* (R11) // 41ff1b + //TODO: LCALLQ* (BX) // 48ff1b + //TODO: LCALLQ* (R11) // 49ff1b + CBW // 6698 + CDQ // 99 + CDQE // 4898 + CLAC // 0f01ca + CLC // f8 + CLD // fc + CLFLUSH (BX) // 0fae3b + CLFLUSH (R11) // 410fae3b + CLFLUSHOPT (BX) // 660fae3b + CLFLUSHOPT (R11) // 66410fae3b + CLI // fa + CLTS // 0f06 + CMC // f5 + CMOVWHI (BX), DX // 660f4713 + CMOVWHI (R11), DX // 66410f4713 + CMOVWHI DX, DX // 660f47d2 + CMOVWHI R11, DX // 66410f47d3 + CMOVWHI (BX), R11 // 66440f471b + CMOVWHI (R11), R11 // 66450f471b + CMOVWHI DX, R11 // 66440f47da + CMOVWHI R11, R11 // 66450f47db + CMOVLHI (BX), DX // 0f4713 + CMOVLHI (R11), DX // 410f4713 + CMOVLHI DX, DX // 0f47d2 + CMOVLHI R11, DX // 410f47d3 + CMOVLHI (BX), R11 // 440f471b + CMOVLHI (R11), R11 // 450f471b + CMOVLHI DX, R11 // 440f47da + CMOVLHI R11, R11 // 450f47db + CMOVQHI (BX), DX // 480f4713 + CMOVQHI (R11), DX // 490f4713 + CMOVQHI DX, DX // 480f47d2 + CMOVQHI R11, DX // 490f47d3 + CMOVQHI (BX), R11 // 4c0f471b + CMOVQHI (R11), R11 // 4d0f471b + CMOVQHI DX, R11 // 4c0f47da + CMOVQHI R11, R11 // 4d0f47db + CMOVWCC (BX), DX // 660f4313 + CMOVWCC (R11), DX // 66410f4313 + CMOVWCC DX, DX // 660f43d2 + CMOVWCC R11, DX // 66410f43d3 + CMOVWCC (BX), R11 // 66440f431b + CMOVWCC (R11), R11 // 66450f431b + CMOVWCC DX, R11 // 66440f43da + CMOVWCC R11, R11 // 66450f43db + CMOVLCC (BX), DX // 0f4313 + CMOVLCC (R11), DX // 410f4313 + CMOVLCC DX, DX // 0f43d2 + CMOVLCC R11, DX // 410f43d3 + CMOVLCC (BX), R11 // 440f431b + CMOVLCC (R11), R11 // 450f431b + CMOVLCC DX, R11 // 440f43da + CMOVLCC R11, R11 // 450f43db + CMOVQCC (BX), DX // 480f4313 + CMOVQCC (R11), DX // 490f4313 + CMOVQCC DX, DX // 480f43d2 + CMOVQCC R11, DX // 490f43d3 + CMOVQCC (BX), R11 // 4c0f431b + CMOVQCC (R11), R11 // 4d0f431b + CMOVQCC DX, R11 // 4c0f43da + CMOVQCC R11, R11 // 4d0f43db + CMOVWCS (BX), DX // 660f4213 + CMOVWCS (R11), DX // 66410f4213 + CMOVWCS DX, DX // 660f42d2 + CMOVWCS R11, DX // 66410f42d3 + CMOVWCS (BX), R11 // 66440f421b + CMOVWCS (R11), R11 // 66450f421b + CMOVWCS DX, R11 // 66440f42da + CMOVWCS R11, R11 // 66450f42db + CMOVLCS (BX), DX // 0f4213 + CMOVLCS (R11), DX // 410f4213 + CMOVLCS DX, DX // 0f42d2 + CMOVLCS R11, DX // 410f42d3 + CMOVLCS (BX), R11 // 440f421b + CMOVLCS (R11), R11 // 450f421b + CMOVLCS DX, R11 // 440f42da + CMOVLCS R11, R11 // 450f42db + CMOVQCS (BX), DX // 480f4213 + CMOVQCS (R11), DX // 490f4213 + CMOVQCS DX, DX // 480f42d2 + CMOVQCS R11, DX // 490f42d3 + CMOVQCS (BX), R11 // 4c0f421b + CMOVQCS (R11), R11 // 4d0f421b + CMOVQCS DX, R11 // 4c0f42da + CMOVQCS R11, R11 // 4d0f42db + CMOVWLS (BX), DX // 660f4613 + CMOVWLS (R11), DX // 66410f4613 + CMOVWLS DX, DX // 660f46d2 + CMOVWLS R11, DX // 66410f46d3 + CMOVWLS (BX), R11 // 66440f461b + CMOVWLS (R11), R11 // 66450f461b + CMOVWLS DX, R11 // 66440f46da + CMOVWLS R11, R11 // 66450f46db + CMOVLLS (BX), DX // 0f4613 + CMOVLLS (R11), DX // 410f4613 + CMOVLLS DX, DX // 0f46d2 + CMOVLLS R11, DX // 410f46d3 + CMOVLLS (BX), R11 // 440f461b + CMOVLLS (R11), R11 // 450f461b + CMOVLLS DX, R11 // 440f46da + CMOVLLS R11, R11 // 450f46db + CMOVQLS (BX), DX // 480f4613 + CMOVQLS (R11), DX // 490f4613 + CMOVQLS DX, DX // 480f46d2 + CMOVQLS R11, DX // 490f46d3 + CMOVQLS (BX), R11 // 4c0f461b + CMOVQLS (R11), R11 // 4d0f461b + CMOVQLS DX, R11 // 4c0f46da + CMOVQLS R11, R11 // 4d0f46db + CMOVWEQ (BX), DX // 660f4413 + CMOVWEQ (R11), DX // 66410f4413 + CMOVWEQ DX, DX // 660f44d2 + CMOVWEQ R11, DX // 66410f44d3 + CMOVWEQ (BX), R11 // 66440f441b + CMOVWEQ (R11), R11 // 66450f441b + CMOVWEQ DX, R11 // 66440f44da + CMOVWEQ R11, R11 // 66450f44db + CMOVLEQ (BX), DX // 0f4413 + CMOVLEQ (R11), DX // 410f4413 + CMOVLEQ DX, DX // 0f44d2 + CMOVLEQ R11, DX // 410f44d3 + CMOVLEQ (BX), R11 // 440f441b + CMOVLEQ (R11), R11 // 450f441b + CMOVLEQ DX, R11 // 440f44da + CMOVLEQ R11, R11 // 450f44db + CMOVQEQ (BX), DX // 480f4413 + CMOVQEQ (R11), DX // 490f4413 + CMOVQEQ DX, DX // 480f44d2 + CMOVQEQ R11, DX // 490f44d3 + CMOVQEQ (BX), R11 // 4c0f441b + CMOVQEQ (R11), R11 // 4d0f441b + CMOVQEQ DX, R11 // 4c0f44da + CMOVQEQ R11, R11 // 4d0f44db + CMOVWGT (BX), DX // 660f4f13 + CMOVWGT (R11), DX // 66410f4f13 + CMOVWGT DX, DX // 660f4fd2 + CMOVWGT R11, DX // 66410f4fd3 + CMOVWGT (BX), R11 // 66440f4f1b + CMOVWGT (R11), R11 // 66450f4f1b + CMOVWGT DX, R11 // 66440f4fda + CMOVWGT R11, R11 // 66450f4fdb + CMOVLGT (BX), DX // 0f4f13 + CMOVLGT (R11), DX // 410f4f13 + CMOVLGT DX, DX // 0f4fd2 + CMOVLGT R11, DX // 410f4fd3 + CMOVLGT (BX), R11 // 440f4f1b + CMOVLGT (R11), R11 // 450f4f1b + CMOVLGT DX, R11 // 440f4fda + CMOVLGT R11, R11 // 450f4fdb + CMOVQGT (BX), DX // 480f4f13 + CMOVQGT (R11), DX // 490f4f13 + CMOVQGT DX, DX // 480f4fd2 + CMOVQGT R11, DX // 490f4fd3 + CMOVQGT (BX), R11 // 4c0f4f1b + CMOVQGT (R11), R11 // 4d0f4f1b + CMOVQGT DX, R11 // 4c0f4fda + CMOVQGT R11, R11 // 4d0f4fdb + CMOVWGE (BX), DX // 660f4d13 + CMOVWGE (R11), DX // 66410f4d13 + CMOVWGE DX, DX // 660f4dd2 + CMOVWGE R11, DX // 66410f4dd3 + CMOVWGE (BX), R11 // 66440f4d1b + CMOVWGE (R11), R11 // 66450f4d1b + CMOVWGE DX, R11 // 66440f4dda + CMOVWGE R11, R11 // 66450f4ddb + CMOVLGE (BX), DX // 0f4d13 + CMOVLGE (R11), DX // 410f4d13 + CMOVLGE DX, DX // 0f4dd2 + CMOVLGE R11, DX // 410f4dd3 + CMOVLGE (BX), R11 // 440f4d1b + CMOVLGE (R11), R11 // 450f4d1b + CMOVLGE DX, R11 // 440f4dda + CMOVLGE R11, R11 // 450f4ddb + CMOVQGE (BX), DX // 480f4d13 + CMOVQGE (R11), DX // 490f4d13 + CMOVQGE DX, DX // 480f4dd2 + CMOVQGE R11, DX // 490f4dd3 + CMOVQGE (BX), R11 // 4c0f4d1b + CMOVQGE (R11), R11 // 4d0f4d1b + CMOVQGE DX, R11 // 4c0f4dda + CMOVQGE R11, R11 // 4d0f4ddb + CMOVWLT (BX), DX // 660f4c13 + CMOVWLT (R11), DX // 66410f4c13 + CMOVWLT DX, DX // 660f4cd2 + CMOVWLT R11, DX // 66410f4cd3 + CMOVWLT (BX), R11 // 66440f4c1b + CMOVWLT (R11), R11 // 66450f4c1b + CMOVWLT DX, R11 // 66440f4cda + CMOVWLT R11, R11 // 66450f4cdb + CMOVLLT (BX), DX // 0f4c13 + CMOVLLT (R11), DX // 410f4c13 + CMOVLLT DX, DX // 0f4cd2 + CMOVLLT R11, DX // 410f4cd3 + CMOVLLT (BX), R11 // 440f4c1b + CMOVLLT (R11), R11 // 450f4c1b + CMOVLLT DX, R11 // 440f4cda + CMOVLLT R11, R11 // 450f4cdb + CMOVQLT (BX), DX // 480f4c13 + CMOVQLT (R11), DX // 490f4c13 + CMOVQLT DX, DX // 480f4cd2 + CMOVQLT R11, DX // 490f4cd3 + CMOVQLT (BX), R11 // 4c0f4c1b + CMOVQLT (R11), R11 // 4d0f4c1b + CMOVQLT DX, R11 // 4c0f4cda + CMOVQLT R11, R11 // 4d0f4cdb + CMOVWLE (BX), DX // 660f4e13 + CMOVWLE (R11), DX // 66410f4e13 + CMOVWLE DX, DX // 660f4ed2 + CMOVWLE R11, DX // 66410f4ed3 + CMOVWLE (BX), R11 // 66440f4e1b + CMOVWLE (R11), R11 // 66450f4e1b + CMOVWLE DX, R11 // 66440f4eda + CMOVWLE R11, R11 // 66450f4edb + CMOVLLE (BX), DX // 0f4e13 + CMOVLLE (R11), DX // 410f4e13 + CMOVLLE DX, DX // 0f4ed2 + CMOVLLE R11, DX // 410f4ed3 + CMOVLLE (BX), R11 // 440f4e1b + CMOVLLE (R11), R11 // 450f4e1b + CMOVLLE DX, R11 // 440f4eda + CMOVLLE R11, R11 // 450f4edb + CMOVQLE (BX), DX // 480f4e13 + CMOVQLE (R11), DX // 490f4e13 + CMOVQLE DX, DX // 480f4ed2 + CMOVQLE R11, DX // 490f4ed3 + CMOVQLE (BX), R11 // 4c0f4e1b + CMOVQLE (R11), R11 // 4d0f4e1b + CMOVQLE DX, R11 // 4c0f4eda + CMOVQLE R11, R11 // 4d0f4edb + CMOVWNE (BX), DX // 660f4513 + CMOVWNE (R11), DX // 66410f4513 + CMOVWNE DX, DX // 660f45d2 + CMOVWNE R11, DX // 66410f45d3 + CMOVWNE (BX), R11 // 66440f451b + CMOVWNE (R11), R11 // 66450f451b + CMOVWNE DX, R11 // 66440f45da + CMOVWNE R11, R11 // 66450f45db + CMOVLNE (BX), DX // 0f4513 + CMOVLNE (R11), DX // 410f4513 + CMOVLNE DX, DX // 0f45d2 + CMOVLNE R11, DX // 410f45d3 + CMOVLNE (BX), R11 // 440f451b + CMOVLNE (R11), R11 // 450f451b + CMOVLNE DX, R11 // 440f45da + CMOVLNE R11, R11 // 450f45db + CMOVQNE (BX), DX // 480f4513 + CMOVQNE (R11), DX // 490f4513 + CMOVQNE DX, DX // 480f45d2 + CMOVQNE R11, DX // 490f45d3 + CMOVQNE (BX), R11 // 4c0f451b + CMOVQNE (R11), R11 // 4d0f451b + CMOVQNE DX, R11 // 4c0f45da + CMOVQNE R11, R11 // 4d0f45db + CMOVWOC (BX), DX // 660f4113 + CMOVWOC (R11), DX // 66410f4113 + CMOVWOC DX, DX // 660f41d2 + CMOVWOC R11, DX // 66410f41d3 + CMOVWOC (BX), R11 // 66440f411b + CMOVWOC (R11), R11 // 66450f411b + CMOVWOC DX, R11 // 66440f41da + CMOVWOC R11, R11 // 66450f41db + CMOVLOC (BX), DX // 0f4113 + CMOVLOC (R11), DX // 410f4113 + CMOVLOC DX, DX // 0f41d2 + CMOVLOC R11, DX // 410f41d3 + CMOVLOC (BX), R11 // 440f411b + CMOVLOC (R11), R11 // 450f411b + CMOVLOC DX, R11 // 440f41da + CMOVLOC R11, R11 // 450f41db + CMOVQOC (BX), DX // 480f4113 + CMOVQOC (R11), DX // 490f4113 + CMOVQOC DX, DX // 480f41d2 + CMOVQOC R11, DX // 490f41d3 + CMOVQOC (BX), R11 // 4c0f411b + CMOVQOC (R11), R11 // 4d0f411b + CMOVQOC DX, R11 // 4c0f41da + CMOVQOC R11, R11 // 4d0f41db + CMOVWPC (BX), DX // 660f4b13 + CMOVWPC (R11), DX // 66410f4b13 + CMOVWPC DX, DX // 660f4bd2 + CMOVWPC R11, DX // 66410f4bd3 + CMOVWPC (BX), R11 // 66440f4b1b + CMOVWPC (R11), R11 // 66450f4b1b + CMOVWPC DX, R11 // 66440f4bda + CMOVWPC R11, R11 // 66450f4bdb + CMOVLPC (BX), DX // 0f4b13 + CMOVLPC (R11), DX // 410f4b13 + CMOVLPC DX, DX // 0f4bd2 + CMOVLPC R11, DX // 410f4bd3 + CMOVLPC (BX), R11 // 440f4b1b + CMOVLPC (R11), R11 // 450f4b1b + CMOVLPC DX, R11 // 440f4bda + CMOVLPC R11, R11 // 450f4bdb + CMOVQPC (BX), DX // 480f4b13 + CMOVQPC (R11), DX // 490f4b13 + CMOVQPC DX, DX // 480f4bd2 + CMOVQPC R11, DX // 490f4bd3 + CMOVQPC (BX), R11 // 4c0f4b1b + CMOVQPC (R11), R11 // 4d0f4b1b + CMOVQPC DX, R11 // 4c0f4bda + CMOVQPC R11, R11 // 4d0f4bdb + CMOVWPL (BX), DX // 660f4913 + CMOVWPL (R11), DX // 66410f4913 + CMOVWPL DX, DX // 660f49d2 + CMOVWPL R11, DX // 66410f49d3 + CMOVWPL (BX), R11 // 66440f491b + CMOVWPL (R11), R11 // 66450f491b + CMOVWPL DX, R11 // 66440f49da + CMOVWPL R11, R11 // 66450f49db + CMOVLPL (BX), DX // 0f4913 + CMOVLPL (R11), DX // 410f4913 + CMOVLPL DX, DX // 0f49d2 + CMOVLPL R11, DX // 410f49d3 + CMOVLPL (BX), R11 // 440f491b + CMOVLPL (R11), R11 // 450f491b + CMOVLPL DX, R11 // 440f49da + CMOVLPL R11, R11 // 450f49db + CMOVQPL (BX), DX // 480f4913 + CMOVQPL (R11), DX // 490f4913 + CMOVQPL DX, DX // 480f49d2 + CMOVQPL R11, DX // 490f49d3 + CMOVQPL (BX), R11 // 4c0f491b + CMOVQPL (R11), R11 // 4d0f491b + CMOVQPL DX, R11 // 4c0f49da + CMOVQPL R11, R11 // 4d0f49db + CMOVWOS (BX), DX // 660f4013 + CMOVWOS (R11), DX // 66410f4013 + CMOVWOS DX, DX // 660f40d2 + CMOVWOS R11, DX // 66410f40d3 + CMOVWOS (BX), R11 // 66440f401b + CMOVWOS (R11), R11 // 66450f401b + CMOVWOS DX, R11 // 66440f40da + CMOVWOS R11, R11 // 66450f40db + CMOVLOS (BX), DX // 0f4013 + CMOVLOS (R11), DX // 410f4013 + CMOVLOS DX, DX // 0f40d2 + CMOVLOS R11, DX // 410f40d3 + CMOVLOS (BX), R11 // 440f401b + CMOVLOS (R11), R11 // 450f401b + CMOVLOS DX, R11 // 440f40da + CMOVLOS R11, R11 // 450f40db + CMOVQOS (BX), DX // 480f4013 + CMOVQOS (R11), DX // 490f4013 + CMOVQOS DX, DX // 480f40d2 + CMOVQOS R11, DX // 490f40d3 + CMOVQOS (BX), R11 // 4c0f401b + CMOVQOS (R11), R11 // 4d0f401b + CMOVQOS DX, R11 // 4c0f40da + CMOVQOS R11, R11 // 4d0f40db + CMOVWPS (BX), DX // 660f4a13 + CMOVWPS (R11), DX // 66410f4a13 + CMOVWPS DX, DX // 660f4ad2 + CMOVWPS R11, DX // 66410f4ad3 + CMOVWPS (BX), R11 // 66440f4a1b + CMOVWPS (R11), R11 // 66450f4a1b + CMOVWPS DX, R11 // 66440f4ada + CMOVWPS R11, R11 // 66450f4adb + CMOVLPS (BX), DX // 0f4a13 + CMOVLPS (R11), DX // 410f4a13 + CMOVLPS DX, DX // 0f4ad2 + CMOVLPS R11, DX // 410f4ad3 + CMOVLPS (BX), R11 // 440f4a1b + CMOVLPS (R11), R11 // 450f4a1b + CMOVLPS DX, R11 // 440f4ada + CMOVLPS R11, R11 // 450f4adb + CMOVQPS (BX), DX // 480f4a13 + CMOVQPS (R11), DX // 490f4a13 + CMOVQPS DX, DX // 480f4ad2 + CMOVQPS R11, DX // 490f4ad3 + CMOVQPS (BX), R11 // 4c0f4a1b + CMOVQPS (R11), R11 // 4d0f4a1b + CMOVQPS DX, R11 // 4c0f4ada + CMOVQPS R11, R11 // 4d0f4adb + CMOVWMI (BX), DX // 660f4813 + CMOVWMI (R11), DX // 66410f4813 + CMOVWMI DX, DX // 660f48d2 + CMOVWMI R11, DX // 66410f48d3 + CMOVWMI (BX), R11 // 66440f481b + CMOVWMI (R11), R11 // 66450f481b + CMOVWMI DX, R11 // 66440f48da + CMOVWMI R11, R11 // 66450f48db + CMOVLMI (BX), DX // 0f4813 + CMOVLMI (R11), DX // 410f4813 + CMOVLMI DX, DX // 0f48d2 + CMOVLMI R11, DX // 410f48d3 + CMOVLMI (BX), R11 // 440f481b + CMOVLMI (R11), R11 // 450f481b + CMOVLMI DX, R11 // 440f48da + CMOVLMI R11, R11 // 450f48db + CMOVQMI (BX), DX // 480f4813 + CMOVQMI (R11), DX // 490f4813 + CMOVQMI DX, DX // 480f48d2 + CMOVQMI R11, DX // 490f48d3 + CMOVQMI (BX), R11 // 4c0f481b + CMOVQMI (R11), R11 // 4d0f481b + CMOVQMI DX, R11 // 4c0f48da + CMOVQMI R11, R11 // 4d0f48db + CMPB AL, $7 // 3c07 + CMPW AX, $61731 // 663d23f1 + CMPL AX, $4045620583 // 3d674523f1 + CMPQ AX, $-249346713 // 483d674523f1 + CMPW (BX), $61731 // 66813b23f1 + CMPW (R11), $61731 // 6641813b23f1 + CMPW DX, $61731 // 6681fa23f1 + CMPW R11, $61731 // 664181fb23f1 + CMPW (BX), $7 // 66833b07 + CMPW (R11), $7 // 6641833b07 + CMPW DX, $7 // 6683fa07 + CMPW R11, $7 // 664183fb07 + CMPW (BX), DX // 663913 + CMPW (BX), R11 // 6644391b + CMPW (R11), DX // 66413913 + CMPW (R11), R11 // 6645391b + CMPW DX, DX // 6639d2 or 663bd2 + CMPW DX, R11 // 664439da or 66413bd3 + CMPW R11, DX // 664139d3 or 66443bda + CMPW R11, R11 // 664539db or 66453bdb + CMPL (BX), $4045620583 // 813b674523f1 + CMPL (R11), $4045620583 // 41813b674523f1 + CMPL DX, $4045620583 // 81fa674523f1 + CMPL R11, $4045620583 // 4181fb674523f1 + CMPL (BX), $7 // 833b07 + CMPL (R11), $7 // 41833b07 + CMPL DX, $7 // 83fa07 + CMPL R11, $7 // 4183fb07 + CMPL (BX), DX // 3913 + CMPL (BX), R11 // 44391b + CMPL (R11), DX // 413913 + CMPL (R11), R11 // 45391b + CMPL DX, DX // 39d2 or 3bd2 + CMPL DX, R11 // 4439da or 413bd3 + CMPL R11, DX // 4139d3 or 443bda + CMPL R11, R11 // 4539db or 453bdb + CMPQ (BX), $-249346713 // 48813b674523f1 + CMPQ (R11), $-249346713 // 49813b674523f1 + CMPQ DX, $-249346713 // 4881fa674523f1 + CMPQ R11, $-249346713 // 4981fb674523f1 + CMPQ (BX), $7 // 48833b07 + CMPQ (R11), $7 // 49833b07 + CMPQ DX, $7 // 4883fa07 + CMPQ R11, $7 // 4983fb07 + CMPQ (BX), DX // 483913 + CMPQ (BX), R11 // 4c391b + CMPQ (R11), DX // 493913 + CMPQ (R11), R11 // 4d391b + CMPQ DX, DX // 4839d2 or 483bd2 + CMPQ DX, R11 // 4c39da or 493bd3 + CMPQ R11, DX // 4939d3 or 4c3bda + CMPQ R11, R11 // 4d39db or 4d3bdb + CMPB (BX), $7 // 803b07 + CMPB (R11), $7 // 41803b07 + CMPB DL, $7 // 80fa07 + CMPB R11, $7 // 4180fb07 + CMPB (BX), DL // 3813 + CMPB (BX), R11 // 44381b + CMPB (R11), DL // 413813 + CMPB (R11), R11 // 45381b + CMPB DL, DL // 38d2 or 3ad2 + CMPB DL, R11 // 4438da or 413ad3 + CMPB R11, DL // 4138d3 or 443ada + CMPB R11, R11 // 4538db or 453adb + CMPW DX, (BX) // 663b13 + CMPW DX, (R11) // 66413b13 + CMPW R11, (BX) // 66443b1b + CMPW R11, (R11) // 66453b1b + CMPL DX, (BX) // 3b13 + CMPL DX, (R11) // 413b13 + CMPL R11, (BX) // 443b1b + CMPL R11, (R11) // 453b1b + CMPQ DX, (BX) // 483b13 + CMPQ DX, (R11) // 493b13 + CMPQ R11, (BX) // 4c3b1b + CMPQ R11, (R11) // 4d3b1b + CMPB DL, (BX) // 3a13 + CMPB DL, (R11) // 413a13 + CMPB R11, (BX) // 443a1b + CMPB R11, (R11) // 453a1b + CMPPD (BX), X2, $7 // 660fc21307 + CMPPD (R11), X2, $7 // 66410fc21307 + CMPPD X2, X2, $7 // 660fc2d207 + CMPPD X11, X2, $7 // 66410fc2d307 + CMPPD (BX), X11, $7 // 66440fc21b07 + CMPPD (R11), X11, $7 // 66450fc21b07 + CMPPD X2, X11, $7 // 66440fc2da07 + CMPPD X11, X11, $7 // 66450fc2db07 + CMPPS (BX), X2, $7 // 0fc21307 + CMPPS (R11), X2, $7 // 410fc21307 + CMPPS X2, X2, $7 // 0fc2d207 + CMPPS X11, X2, $7 // 410fc2d307 + CMPPS (BX), X11, $7 // 440fc21b07 + CMPPS (R11), X11, $7 // 450fc21b07 + CMPPS X2, X11, $7 // 440fc2da07 + CMPPS X11, X11, $7 // 450fc2db07 + CMPSB // a6 + CMPSL // a7 + CMPSD (BX), X2, $7 // f20fc21307 + CMPSD (R11), X2, $7 // f2410fc21307 + CMPSD X2, X2, $7 // f20fc2d207 + CMPSD X11, X2, $7 // f2410fc2d307 + CMPSD (BX), X11, $7 // f2440fc21b07 + CMPSD (R11), X11, $7 // f2450fc21b07 + CMPSD X2, X11, $7 // f2440fc2da07 + CMPSD X11, X11, $7 // f2450fc2db07 + CMPSQ // 48a7 + CMPSS (BX), X2, $7 // f30fc21307 + CMPSS (R11), X2, $7 // f3410fc21307 + CMPSS X2, X2, $7 // f30fc2d207 + CMPSS X11, X2, $7 // f3410fc2d307 + CMPSS (BX), X11, $7 // f3440fc21b07 + CMPSS (R11), X11, $7 // f3450fc21b07 + CMPSS X2, X11, $7 // f3440fc2da07 + CMPSS X11, X11, $7 // f3450fc2db07 + CMPSW // 66a7 + CMPXCHGW DX, (BX) // 660fb113 + CMPXCHGW R11, (BX) // 66440fb11b + CMPXCHGW DX, (R11) // 66410fb113 + CMPXCHGW R11, (R11) // 66450fb11b + CMPXCHGW DX, DX // 660fb1d2 + CMPXCHGW R11, DX // 66440fb1da + CMPXCHGW DX, R11 // 66410fb1d3 + CMPXCHGW R11, R11 // 66450fb1db + CMPXCHGL DX, (BX) // 0fb113 + CMPXCHGL R11, (BX) // 440fb11b + CMPXCHGL DX, (R11) // 410fb113 + CMPXCHGL R11, (R11) // 450fb11b + CMPXCHGL DX, DX // 0fb1d2 + CMPXCHGL R11, DX // 440fb1da + CMPXCHGL DX, R11 // 410fb1d3 + CMPXCHGL R11, R11 // 450fb1db + CMPXCHGQ DX, (BX) // 480fb113 + CMPXCHGQ R11, (BX) // 4c0fb11b + CMPXCHGQ DX, (R11) // 490fb113 + CMPXCHGQ R11, (R11) // 4d0fb11b + CMPXCHGQ DX, DX // 480fb1d2 + CMPXCHGQ R11, DX // 4c0fb1da + CMPXCHGQ DX, R11 // 490fb1d3 + CMPXCHGQ R11, R11 // 4d0fb1db + CMPXCHGB DL, (BX) // 0fb013 + CMPXCHGB R11, (BX) // 440fb01b + CMPXCHGB DL, (R11) // 410fb013 + CMPXCHGB R11, (R11) // 450fb01b + CMPXCHGB DL, DL // 0fb0d2 + CMPXCHGB R11, DL // 440fb0da + CMPXCHGB DL, R11 // 410fb0d3 + CMPXCHGB R11, R11 // 450fb0db + CMPXCHG16B (BX) // 480fc70b + CMPXCHG16B (R11) // 490fc70b + CMPXCHG8B (BX) // 0fc70b + CMPXCHG8B (R11) // 410fc70b + COMISD (BX), X2 // 660f2f13 + COMISD (R11), X2 // 66410f2f13 + COMISD X2, X2 // 660f2fd2 + COMISD X11, X2 // 66410f2fd3 + COMISD (BX), X11 // 66440f2f1b + COMISD (R11), X11 // 66450f2f1b + COMISD X2, X11 // 66440f2fda + COMISD X11, X11 // 66450f2fdb + COMISS (BX), X2 // 0f2f13 + COMISS (R11), X2 // 410f2f13 + COMISS X2, X2 // 0f2fd2 + COMISS X11, X2 // 410f2fd3 + COMISS (BX), X11 // 440f2f1b + COMISS (R11), X11 // 450f2f1b + COMISS X2, X11 // 440f2fda + COMISS X11, X11 // 450f2fdb + CPUID // 0fa2 + CQO // 4899 + CRC32W (BX), DX // 66f20f38f113 + CRC32W (R11), DX // 66f2410f38f113 + CRC32W DX, DX // 66f20f38f1d2 + CRC32W R11, DX // 66f2410f38f1d3 + CRC32W (BX), R11 // 66f2440f38f11b + CRC32W (R11), R11 // 66f2450f38f11b + CRC32W DX, R11 // 66f2440f38f1da + CRC32W R11, R11 // 66f2450f38f1db + CRC32L (BX), DX // f20f38f113 + CRC32L (R11), DX // f2410f38f113 + CRC32L DX, DX // f20f38f1d2 + CRC32L R11, DX // f2410f38f1d3 + CRC32L (BX), R11 // f2440f38f11b + CRC32L (R11), R11 // f2450f38f11b + CRC32L DX, R11 // f2440f38f1da + CRC32L R11, R11 // f2450f38f1db + CRC32B (BX), DX // f20f38f013 or f2480f38f013 + CRC32B (R11), DX // f2410f38f013 or f2490f38f013 + CRC32B DL, DX // f20f38f0d2 or f2480f38f0d2 + CRC32B R11, DX // f2410f38f0d3 or f2490f38f0d3 + CRC32B (BX), R11 // f2440f38f01b or f24c0f38f01b + CRC32B (R11), R11 // f2450f38f01b or f24d0f38f01b + CRC32B DL, R11 // f2440f38f0da or f24c0f38f0da + CRC32B R11, R11 // f2450f38f0db or f24d0f38f0db + CRC32Q (BX), DX // f2480f38f113 + CRC32Q (R11), DX // f2490f38f113 + CRC32Q DX, DX // f2480f38f1d2 + CRC32Q R11, DX // f2490f38f1d3 + CRC32Q (BX), R11 // f24c0f38f11b + CRC32Q (R11), R11 // f24d0f38f11b + CRC32Q DX, R11 // f24c0f38f1da + CRC32Q R11, R11 // f24d0f38f1db + CVTPL2PD (BX), X2 // f30fe613 + CVTPL2PD (R11), X2 // f3410fe613 + CVTPL2PD X2, X2 // f30fe6d2 + CVTPL2PD X11, X2 // f3410fe6d3 + CVTPL2PD (BX), X11 // f3440fe61b + CVTPL2PD (R11), X11 // f3450fe61b + CVTPL2PD X2, X11 // f3440fe6da + CVTPL2PD X11, X11 // f3450fe6db + CVTPL2PS (BX), X2 // 0f5b13 + CVTPL2PS (R11), X2 // 410f5b13 + CVTPL2PS X2, X2 // 0f5bd2 + CVTPL2PS X11, X2 // 410f5bd3 + CVTPL2PS (BX), X11 // 440f5b1b + CVTPL2PS (R11), X11 // 450f5b1b + CVTPL2PS X2, X11 // 440f5bda + CVTPL2PS X11, X11 // 450f5bdb + CVTPD2PL (BX), X2 // f20fe613 + CVTPD2PL (R11), X2 // f2410fe613 + CVTPD2PL X2, X2 // f20fe6d2 + CVTPD2PL X11, X2 // f2410fe6d3 + CVTPD2PL (BX), X11 // f2440fe61b + CVTPD2PL (R11), X11 // f2450fe61b + CVTPD2PL X2, X11 // f2440fe6da + CVTPD2PL X11, X11 // f2450fe6db + //TODO: CVTPD2PI (BX), M2 // 660f2d13 + //TODO: CVTPD2PI (R11), M2 // 66410f2d13 + //TODO: CVTPD2PI X2, M2 // 660f2dd2 + //TODO: CVTPD2PI X11, M2 // 66410f2dd3 + //TODO: CVTPD2PI (BX), M3 // 660f2d1b + //TODO: CVTPD2PI (R11), M3 // 66410f2d1b + //TODO: CVTPD2PI X2, M3 // 660f2dda + //TODO: CVTPD2PI X11, M3 // 66410f2ddb + CVTPD2PS (BX), X2 // 660f5a13 + CVTPD2PS (R11), X2 // 66410f5a13 + CVTPD2PS X2, X2 // 660f5ad2 + CVTPD2PS X11, X2 // 66410f5ad3 + CVTPD2PS (BX), X11 // 66440f5a1b + CVTPD2PS (R11), X11 // 66450f5a1b + CVTPD2PS X2, X11 // 66440f5ada + CVTPD2PS X11, X11 // 66450f5adb + //TODO: CVTPI2PD (BX), X2 // 660f2a13 + //TODO: CVTPI2PD (R11), X2 // 66410f2a13 + //TODO: CVTPI2PD M2, X2 // 660f2ad2 + //TODO: CVTPI2PD M3, X2 // 660f2ad3 + //TODO: CVTPI2PD (BX), X11 // 66440f2a1b + //TODO: CVTPI2PD (R11), X11 // 66450f2a1b + //TODO: CVTPI2PD M2, X11 // 66440f2ada + //TODO: CVTPI2PD M3, X11 // 66440f2adb + //TODO: CVTPI2PS (BX), X2 // 0f2a13 + //TODO: CVTPI2PS (R11), X2 // 410f2a13 + //TODO: CVTPI2PS M2, X2 // 0f2ad2 + //TODO: CVTPI2PS M3, X2 // 0f2ad3 + //TODO: CVTPI2PS (BX), X11 // 440f2a1b + //TODO: CVTPI2PS (R11), X11 // 450f2a1b + //TODO: CVTPI2PS M2, X11 // 440f2ada + //TODO: CVTPI2PS M3, X11 // 440f2adb + CVTPS2PL (BX), X2 // 660f5b13 + CVTPS2PL (R11), X2 // 66410f5b13 + CVTPS2PL X2, X2 // 660f5bd2 + CVTPS2PL X11, X2 // 66410f5bd3 + CVTPS2PL (BX), X11 // 66440f5b1b + CVTPS2PL (R11), X11 // 66450f5b1b + CVTPS2PL X2, X11 // 66440f5bda + CVTPS2PL X11, X11 // 66450f5bdb + CVTPS2PD (BX), X2 // 0f5a13 + CVTPS2PD (R11), X2 // 410f5a13 + CVTPS2PD X2, X2 // 0f5ad2 + CVTPS2PD X11, X2 // 410f5ad3 + CVTPS2PD (BX), X11 // 440f5a1b + CVTPS2PD (R11), X11 // 450f5a1b + CVTPS2PD X2, X11 // 440f5ada + CVTPS2PD X11, X11 // 450f5adb + //TODO: CVTPS2PI (BX), M2 // 0f2d13 + //TODO: CVTPS2PI (R11), M2 // 410f2d13 + //TODO: CVTPS2PI X2, M2 // 0f2dd2 + //TODO: CVTPS2PI X11, M2 // 410f2dd3 + //TODO: CVTPS2PI (BX), M3 // 0f2d1b + //TODO: CVTPS2PI (R11), M3 // 410f2d1b + //TODO: CVTPS2PI X2, M3 // 0f2dda + //TODO: CVTPS2PI X11, M3 // 410f2ddb + CVTSD2SL (BX), DX // f20f2d13 or f2480f2d13 + CVTSD2SL (R11), DX // f2410f2d13 or f2490f2d13 + CVTSD2SL X2, DX // f20f2dd2 or f2480f2dd2 + CVTSD2SL X11, DX // f2410f2dd3 or f2490f2dd3 + CVTSD2SL (BX), R11 // f2440f2d1b or f24c0f2d1b + CVTSD2SL (R11), R11 // f2450f2d1b or f24d0f2d1b + CVTSD2SL X2, R11 // f2440f2dda or f24c0f2dda + CVTSD2SL X11, R11 // f2450f2ddb or f24d0f2ddb + CVTSD2SS (BX), X2 // f20f5a13 + CVTSD2SS (R11), X2 // f2410f5a13 + CVTSD2SS X2, X2 // f20f5ad2 + CVTSD2SS X11, X2 // f2410f5ad3 + CVTSD2SS (BX), X11 // f2440f5a1b + CVTSD2SS (R11), X11 // f2450f5a1b + CVTSD2SS X2, X11 // f2440f5ada + CVTSD2SS X11, X11 // f2450f5adb + CVTSL2SD (BX), X2 // f20f2a13 + CVTSL2SD (R11), X2 // f2410f2a13 + CVTSL2SD DX, X2 // f20f2ad2 + CVTSL2SD R11, X2 // f2410f2ad3 + CVTSL2SD (BX), X11 // f2440f2a1b + CVTSL2SD (R11), X11 // f2450f2a1b + CVTSL2SD DX, X11 // f2440f2ada + CVTSL2SD R11, X11 // f2450f2adb + CVTSQ2SD (BX), X2 // f2480f2a13 + CVTSQ2SD (R11), X2 // f2490f2a13 + CVTSQ2SD DX, X2 // f2480f2ad2 + CVTSQ2SD R11, X2 // f2490f2ad3 + CVTSQ2SD (BX), X11 // f24c0f2a1b + CVTSQ2SD (R11), X11 // f24d0f2a1b + CVTSQ2SD DX, X11 // f24c0f2ada + CVTSQ2SD R11, X11 // f24d0f2adb + CVTSL2SS (BX), X2 // f30f2a13 + CVTSL2SS (R11), X2 // f3410f2a13 + CVTSL2SS DX, X2 // f30f2ad2 + CVTSL2SS R11, X2 // f3410f2ad3 + CVTSL2SS (BX), X11 // f3440f2a1b + CVTSL2SS (R11), X11 // f3450f2a1b + CVTSL2SS DX, X11 // f3440f2ada + CVTSL2SS R11, X11 // f3450f2adb + CVTSQ2SS (BX), X2 // f3480f2a13 + CVTSQ2SS (R11), X2 // f3490f2a13 + CVTSQ2SS DX, X2 // f3480f2ad2 + CVTSQ2SS R11, X2 // f3490f2ad3 + CVTSQ2SS (BX), X11 // f34c0f2a1b + CVTSQ2SS (R11), X11 // f34d0f2a1b + CVTSQ2SS DX, X11 // f34c0f2ada + CVTSQ2SS R11, X11 // f34d0f2adb + CVTSS2SD (BX), X2 // f30f5a13 + CVTSS2SD (R11), X2 // f3410f5a13 + CVTSS2SD X2, X2 // f30f5ad2 + CVTSS2SD X11, X2 // f3410f5ad3 + CVTSS2SD (BX), X11 // f3440f5a1b + CVTSS2SD (R11), X11 // f3450f5a1b + CVTSS2SD X2, X11 // f3440f5ada + CVTSS2SD X11, X11 // f3450f5adb + CVTSS2SL (BX), DX // f30f2d13 or f3480f2d13 + CVTSS2SL (R11), DX // f3410f2d13 or f3490f2d13 + CVTSS2SL X2, DX // f30f2dd2 or f3480f2dd2 + CVTSS2SL X11, DX // f3410f2dd3 or f3490f2dd3 + CVTSS2SL (BX), R11 // f3440f2d1b or f34c0f2d1b + CVTSS2SL (R11), R11 // f3450f2d1b or f34d0f2d1b + CVTSS2SL X2, R11 // f3440f2dda or f34c0f2dda + CVTSS2SL X11, R11 // f3450f2ddb or f34d0f2ddb + CVTTPD2PL (BX), X2 // 660fe613 + CVTTPD2PL (R11), X2 // 66410fe613 + CVTTPD2PL X2, X2 // 660fe6d2 + CVTTPD2PL X11, X2 // 66410fe6d3 + CVTTPD2PL (BX), X11 // 66440fe61b + CVTTPD2PL (R11), X11 // 66450fe61b + CVTTPD2PL X2, X11 // 66440fe6da + CVTTPD2PL X11, X11 // 66450fe6db + //TODO: CVTTPD2PI (BX), M2 // 660f2c13 + //TODO: CVTTPD2PI (R11), M2 // 66410f2c13 + //TODO: CVTTPD2PI X2, M2 // 660f2cd2 + //TODO: CVTTPD2PI X11, M2 // 66410f2cd3 + //TODO: CVTTPD2PI (BX), M3 // 660f2c1b + //TODO: CVTTPD2PI (R11), M3 // 66410f2c1b + //TODO: CVTTPD2PI X2, M3 // 660f2cda + //TODO: CVTTPD2PI X11, M3 // 66410f2cdb + CVTTPS2PL (BX), X2 // f30f5b13 + CVTTPS2PL (R11), X2 // f3410f5b13 + CVTTPS2PL X2, X2 // f30f5bd2 + CVTTPS2PL X11, X2 // f3410f5bd3 + CVTTPS2PL (BX), X11 // f3440f5b1b + CVTTPS2PL (R11), X11 // f3450f5b1b + CVTTPS2PL X2, X11 // f3440f5bda + CVTTPS2PL X11, X11 // f3450f5bdb + //TODO: CVTTPS2PI (BX), M2 // 0f2c13 + //TODO: CVTTPS2PI (R11), M2 // 410f2c13 + //TODO: CVTTPS2PI X2, M2 // 0f2cd2 + //TODO: CVTTPS2PI X11, M2 // 410f2cd3 + //TODO: CVTTPS2PI (BX), M3 // 0f2c1b + //TODO: CVTTPS2PI (R11), M3 // 410f2c1b + //TODO: CVTTPS2PI X2, M3 // 0f2cda + //TODO: CVTTPS2PI X11, M3 // 410f2cdb + CVTTSD2SL (BX), DX // f20f2c13 or f2480f2c13 + CVTTSD2SL (R11), DX // f2410f2c13 or f2490f2c13 + CVTTSD2SL X2, DX // f20f2cd2 or f2480f2cd2 + CVTTSD2SL X11, DX // f2410f2cd3 or f2490f2cd3 + CVTTSD2SL (BX), R11 // f2440f2c1b or f24c0f2c1b + CVTTSD2SL (R11), R11 // f2450f2c1b or f24d0f2c1b + CVTTSD2SL X2, R11 // f2440f2cda or f24c0f2cda + CVTTSD2SL X11, R11 // f2450f2cdb or f24d0f2cdb + CVTTSS2SL (BX), DX // f30f2c13 or f3480f2c13 + CVTTSS2SL (R11), DX // f3410f2c13 or f3490f2c13 + CVTTSS2SL X2, DX // f30f2cd2 or f3480f2cd2 + CVTTSS2SL X11, DX // f3410f2cd3 or f3490f2cd3 + CVTTSS2SL (BX), R11 // f3440f2c1b or f34c0f2c1b + CVTTSS2SL (R11), R11 // f3450f2c1b or f34d0f2c1b + CVTTSS2SL X2, R11 // f3440f2cda or f34c0f2cda + CVTTSS2SL X11, R11 // f3450f2cdb or f34d0f2cdb + CWD // 6699 + CWDE // 98 + DECW (BX) // 66ff0b + DECW (R11) // 6641ff0b + DECW DX // 66ffca + DECW R11 // 6641ffcb + DECL (BX) // ff0b + DECL (R11) // 41ff0b + DECL DX // ffca + DECL R11 // 41ffcb + DECQ (BX) // 48ff0b + DECQ (R11) // 49ff0b + DECQ DX // 48ffca + DECQ R11 // 49ffcb + DECB (BX) // fe0b + DECB (R11) // 41fe0b + DECB DL // feca + DECB R11 // 41fecb + DIVW (BX) // 66f733 + DIVW (R11) // 6641f733 + DIVW DX // 66f7f2 + DIVW R11 // 6641f7f3 + DIVL (BX) // f733 + DIVL (R11) // 41f733 + DIVL DX // f7f2 + DIVL R11 // 41f7f3 + DIVQ (BX) // 48f733 + DIVQ (R11) // 49f733 + DIVQ DX // 48f7f2 + DIVQ R11 // 49f7f3 + DIVB (BX) // f633 + DIVB (R11) // 41f633 + DIVB DL // f6f2 + DIVB R11 // 41f6f3 + DIVPD (BX), X2 // 660f5e13 + DIVPD (R11), X2 // 66410f5e13 + DIVPD X2, X2 // 660f5ed2 + DIVPD X11, X2 // 66410f5ed3 + DIVPD (BX), X11 // 66440f5e1b + DIVPD (R11), X11 // 66450f5e1b + DIVPD X2, X11 // 66440f5eda + DIVPD X11, X11 // 66450f5edb + DIVPS (BX), X2 // 0f5e13 + DIVPS (R11), X2 // 410f5e13 + DIVPS X2, X2 // 0f5ed2 + DIVPS X11, X2 // 410f5ed3 + DIVPS (BX), X11 // 440f5e1b + DIVPS (R11), X11 // 450f5e1b + DIVPS X2, X11 // 440f5eda + DIVPS X11, X11 // 450f5edb + DIVSD (BX), X2 // f20f5e13 + DIVSD (R11), X2 // f2410f5e13 + DIVSD X2, X2 // f20f5ed2 + DIVSD X11, X2 // f2410f5ed3 + DIVSD (BX), X11 // f2440f5e1b + DIVSD (R11), X11 // f2450f5e1b + DIVSD X2, X11 // f2440f5eda + DIVSD X11, X11 // f2450f5edb + DIVSS (BX), X2 // f30f5e13 + DIVSS (R11), X2 // f3410f5e13 + DIVSS X2, X2 // f30f5ed2 + DIVSS X11, X2 // f3410f5ed3 + DIVSS (BX), X11 // f3440f5e1b + DIVSS (R11), X11 // f3450f5e1b + DIVSS X2, X11 // f3440f5eda + DIVSS X11, X11 // f3450f5edb + DPPD $7, (BX), X2 // 660f3a411307 + DPPD $7, (R11), X2 // 66410f3a411307 + DPPD $7, X2, X2 // 660f3a41d207 + DPPD $7, X11, X2 // 66410f3a41d307 + DPPD $7, (BX), X11 // 66440f3a411b07 + DPPD $7, (R11), X11 // 66450f3a411b07 + DPPD $7, X2, X11 // 66440f3a41da07 + DPPD $7, X11, X11 // 66450f3a41db07 + DPPS $7, (BX), X2 // 660f3a401307 + DPPS $7, (R11), X2 // 66410f3a401307 + DPPS $7, X2, X2 // 660f3a40d207 + DPPS $7, X11, X2 // 66410f3a40d307 + DPPS $7, (BX), X11 // 66440f3a401b07 + DPPS $7, (R11), X11 // 66450f3a401b07 + DPPS $7, X2, X11 // 66440f3a40da07 + DPPS $7, X11, X11 // 66450f3a40db07 + EMMS // 0f77 + //TODO: ENTERQ $0x12, $0xf123 // c823f112 + EXTRACTPS $0, X2, (BX) // 660f3a171300 + EXTRACTPS $1, X11, (BX) // 66440f3a171b01 + EXTRACTPS $2, X2, (R11) // 66410f3a171302 + EXTRACTPS $3, X11, (R11) // 66450f3a171b03 + EXTRACTPS $3, X2, DX // 660f3a17d203 + EXTRACTPS $2, X11, DX // 66440f3a17da02 + EXTRACTPS $1, X2, R11 // 66410f3a17d301 + EXTRACTPS $0, X11, R11 // 66450f3a17db00 + F2XM1 // d9f0 + FABS // d9e1 + FADDD F2, F0 // d8c2 + FADDD F3, F0 // d8c3 + FADDD F0, F2 // dcc2 + FADDD F0, F3 // dcc3 + FADDD (BX), F0 // d803 or dc03 + FADDD (R11), F0 // 41d803 or 41dc03 + FADDDP F0, F2 // dec2 + FADDDP F0, F3 // dec3 + FBLD (BX) // df23 + FBLD (R11) // 41df23 + FBSTP (BX) // df33 + FBSTP (R11) // 41df33 + FCHS // d9e0 + FCMOVB F2, F0 // dac2 + FCMOVB F3, F0 // dac3 + FCMOVBE F2, F0 // dad2 + FCMOVBE F3, F0 // dad3 + FCMOVE F2, F0 // daca + FCMOVE F3, F0 // dacb + FCMOVNB F2, F0 // dbc2 + FCMOVNB F3, F0 // dbc3 + FCMOVNBE F2, F0 // dbd2 + FCMOVNBE F3, F0 // dbd3 + FCMOVNE F2, F0 // dbca + FCMOVNE F3, F0 // dbcb + FCMOVNU F2, F0 // dbda + FCMOVNU F3, F0 // dbdb + FCMOVU F2, F0 // dada + FCMOVU F3, F0 // dadb + FCOMD F2, F0 // d8d2 + FCOMD F3, F0 // d8d3 + FCOMD (BX), F0 // d813 or dc13 + FCOMD (R11), F0 // 41d813 or 41dc13 + FCOMI F2, F0 // dbf2 + FCOMI F3, F0 // dbf3 + FCOMIP F2, F0 // dff2 + FCOMIP F3, F0 // dff3 + //TODO: FCOMP F2 // d8da + //TODO: FCOMP F3 // d8db + //TODO: FCOMFP (BX) // d81b + //TODO: FCOMFP (R11) // 41d81b + //TODO: FCOMPL (BX) // dc1b + //TODO: FCOMPL (R11) // 41dc1b + //TODO: FCOMPP // ded9 + FCOS // d9ff + FDECSTP // d9f6 + FDIVD F2, F0 // d8f2 + FDIVD F3, F0 // d8f3 + FDIVD F0, F2 // dcfa or dcf2 + FDIVD F0, F3 // dcfb or dcf3 + FDIVD (BX), F0 // d833 or dc33 + FDIVD (R11), F0 // 41d833 or 41dc33 + //TODO: FDIVRP F0, F2 // defa + //TODO: FDIVRP F0, F3 // defb + //TODO: FDIVR F2, F0 // d8fa + //TODO: FDIVR F3, F0 // d8fb + //TODO: FDIVFR (BX) // d83b + //TODO: FDIVFR (R11) // 41d83b + //TODO: FDIVRL (BX) // dc3b + //TODO: FDIVRL (R11) // 41dc3b + //TODO: FDIVP F0, F2 // def2 + //TODO: FDIVP F0, F3 // def3 + //TODO: FFREE F2 // ddc2 + //TODO: FFREE F3 // ddc3 + //TODO: FFREEP F2 // dfc2 + //TODO: FFREEP F3 // dfc3 + //TODO: FIADD (BX) // de03 + //TODO: FIADD (R11) // 41de03 + //TODO: FIADDL (BX) // da03 + //TODO: FIADDL (R11) // 41da03 + //TODO: FICOM (BX) // de13 + //TODO: FICOM (R11) // 41de13 + //TODO: FICOML (BX) // da13 + //TODO: FICOML (R11) // 41da13 + //TODO: FICOMP (BX) // de1b + //TODO: FICOMP (R11) // 41de1b + //TODO: FICOMPL (BX) // da1b + //TODO: FICOMPL (R11) // 41da1b + //TODO: FIDIV (BX) // de33 + //TODO: FIDIV (R11) // 41de33 + //TODO: FIDIVL (BX) // da33 + //TODO: FIDIVL (R11) // 41da33 + //TODO: FIDIVR (BX) // de3b + //TODO: FIDIVR (R11) // 41de3b + //TODO: FIDIVRL (BX) // da3b + //TODO: FIDIVRL (R11) // 41da3b + //TODO: FILD (BX) // df03 + //TODO: FILD (R11) // 41df03 + //TODO: FILDL (BX) // db03 + //TODO: FILDL (R11) // 41db03 + //TODO: FILDLL (BX) // df2b + //TODO: FILDLL (R11) // 41df2b + //TODO: FIMUL (BX) // de0b + //TODO: FIMUL (R11) // 41de0b + //TODO: FIMULL (BX) // da0b + //TODO: FIMULL (R11) // 41da0b + FINCSTP // d9f7 + //TODO: FIST (BX) // df13 + //TODO: FIST (R11) // 41df13 + //TODO: FISTL (BX) // db13 + //TODO: FISTL (R11) // 41db13 + //TODO: FISTP (BX) // df1b + //TODO: FISTP (R11) // 41df1b + //TODO: FISTPL (BX) // db1b + //TODO: FISTPL (R11) // 41db1b + //TODO: FISTPLL (BX) // df3b + //TODO: FISTPLL (R11) // 41df3b + //TODO: FISTTP (BX) // df0b + //TODO: FISTTP (R11) // 41df0b + //TODO: FISTTPL (BX) // db0b + //TODO: FISTTPL (R11) // 41db0b + //TODO: FISTTPLL (BX) // dd0b + //TODO: FISTTPLL (R11) // 41dd0b + //TODO: FISUB (BX) // de23 + //TODO: FISUB (R11) // 41de23 + //TODO: FISUBL (BX) // da23 + //TODO: FISUBL (R11) // 41da23 + //TODO: FISUBR (BX) // de2b + //TODO: FISUBR (R11) // 41de2b + //TODO: FISUBRL (BX) // da2b + //TODO: FISUBRL (R11) // 41da2b + //TODO: FLD F2 // d9c2 + //TODO: FLD F3 // d9c3 + //TODO: FLDS (BX) // d903 + //TODO: FLDS (R11) // 41d903 + //TODO: FLDL (BX) // dd03 + //TODO: FLDL (R11) // 41dd03 + //TODO: FLDT (BX) // db2b + //TODO: FLDT (R11) // 41db2b + FLD1 // d9e8 + FLDCW (BX) // d92b + FLDCW (R11) // 41d92b + //TODO: FLDENVL (BX) // d923 + //TODO: FLDENVL (R11) // 41d923 + FLDL2E // d9ea + FLDL2T // d9e9 + FLDLG2 // d9ec + FLDPI // d9eb + //TODO: FMUL F2, F0 // d8ca + //TODO: FMUL F3, F0 // d8cb + //TODO: FMUL F0, F2 // dcca + //TODO: FMUL F0, F3 // dccb + //TODO: FMULS (BX) // d80b + //TODO: FMULS (R11) // 41d80b + //TODO: FMULL (BX) // dc0b + //TODO: FMULL (R11) // 41dc0b + //TODO: FMULP F0, F2 // deca + //TODO: FMULP F0, F3 // decb + //TODO: FNCLEX // dbe2 + //TODO: FNINIT // dbe3 + FNOP // d9d0 + //TODO: FNSAVEL (BX) // dd33 + //TODO: FNSAVEL (R11) // 41dd33 + //TODO: FNSTCW (BX) // d93b + //TODO: FNSTCW (R11) // 41d93b + //TODO: FNSTENVL (BX) // d933 + //TODO: FNSTENVL (R11) // 41d933 + //TODO: FNSTSW AX // dfe0 + //TODO: FNSTSW (BX) // dd3b + //TODO: FNSTSW (R11) // 41dd3b + FPATAN // d9f3 + FPREM // d9f8 + FPREM1 // d9f5 + FPTAN // d9f2 + FRNDINT // d9fc + //TODO: FRSTORL (BX) // dd23 + //TODO: FRSTORL (R11) // 41dd23 + FSCALE // d9fd + FSIN // d9fe + FSINCOS // d9fb + FSQRT // d9fa + //TODO: FST F2 // ddd2 + //TODO: FST F3 // ddd3 + //TODO: FSTS (BX) // d913 + //TODO: FSTS (R11) // 41d913 + //TODO: FSTL (BX) // dd13 + //TODO: FSTL (R11) // 41dd13 + //TODO: FSTP F2 // ddda + //TODO: FSTP F3 // dddb + //TODO: FSTPS (BX) // d91b + //TODO: FSTPS (R11) // 41d91b + //TODO: FSTPL (BX) // dd1b + //TODO: FSTPL (R11) // 41dd1b + //TODO: FSTPT (BX) // db3b + //TODO: FSTPT (R11) // 41db3b + //TODO: FSUB F2, F0 // d8e2 + //TODO: FSUB F3, F0 // d8e3 + //TODO: FSUBR F0, F2 // dcea + //TODO: FSUBR F0, F3 // dceb + //TODO: FSUBS (BX) // d823 + //TODO: FSUBS (R11) // 41d823 + //TODO: FSUBL (BX) // dc23 + //TODO: FSUBL (R11) // 41dc23 + //TODO: FSUBRP F0, F2 // deea + //TODO: FSUBRP F0, F3 // deeb + //TODO: FSUBR F2, F0 // d8ea + //TODO: FSUBR F3, F0 // d8eb + //TODO: FSUB F0, F2 // dce2 + //TODO: FSUB F0, F3 // dce3 + //TODO: FSUBRS (BX) // d82b + //TODO: FSUBRS (R11) // 41d82b + //TODO: FSUBRL (BX) // dc2b + //TODO: FSUBRL (R11) // 41dc2b + //TODO: FSUBP F0, F2 // dee2 + //TODO: FSUBP F0, F3 // dee3 + FTST // d9e4 + //TODO: FUCOM F2 // dde2 + //TODO: FUCOM F3 // dde3 + //TODO: FUCOMI F2, F0 // dbea + //TODO: FUCOMI F3, F0 // dbeb + //TODO: FUCOMIP F2, F0 // dfea + //TODO: FUCOMIP F3, F0 // dfeb + //TODO: FUCOMP F2 // ddea + //TODO: FUCOMP F3 // ddeb + //TODO: FUCOMPP // dae9 + //TODO: FWAIT // 9b + FXAM // d9e5 + //TODO: FXCH F2 // d9ca + //TODO: FXCH F3 // d9cb + FXRSTOR (BX) // 0fae0b + FXRSTOR (R11) // 410fae0b + FXRSTOR64 (BX) // 480fae0b + FXRSTOR64 (R11) // 490fae0b + FXSAVE (BX) // 0fae03 + FXSAVE (R11) // 410fae03 + FXSAVE64 (BX) // 480fae03 + FXSAVE64 (R11) // 490fae03 + FXTRACT // d9f4 + FYL2X // d9f1 + FYL2XP1 // d9f9 + HADDPD (BX), X2 // 660f7c13 + HADDPD (R11), X2 // 66410f7c13 + HADDPD X2, X2 // 660f7cd2 + HADDPD X11, X2 // 66410f7cd3 + HADDPD (BX), X11 // 66440f7c1b + HADDPD (R11), X11 // 66450f7c1b + HADDPD X2, X11 // 66440f7cda + HADDPD X11, X11 // 66450f7cdb + HADDPS (BX), X2 // f20f7c13 + HADDPS (R11), X2 // f2410f7c13 + HADDPS X2, X2 // f20f7cd2 + HADDPS X11, X2 // f2410f7cd3 + HADDPS (BX), X11 // f2440f7c1b + HADDPS (R11), X11 // f2450f7c1b + HADDPS X2, X11 // f2440f7cda + HADDPS X11, X11 // f2450f7cdb + HLT // f4 + HSUBPD (BX), X2 // 660f7d13 + HSUBPD (R11), X2 // 66410f7d13 + HSUBPD X2, X2 // 660f7dd2 + HSUBPD X11, X2 // 66410f7dd3 + HSUBPD (BX), X11 // 66440f7d1b + HSUBPD (R11), X11 // 66450f7d1b + HSUBPD X2, X11 // 66440f7dda + HSUBPD X11, X11 // 66450f7ddb + HSUBPS (BX), X2 // f20f7d13 + HSUBPS (R11), X2 // f2410f7d13 + HSUBPS X2, X2 // f20f7dd2 + HSUBPS X11, X2 // f2410f7dd3 + HSUBPS (BX), X11 // f2440f7d1b + HSUBPS (R11), X11 // f2450f7d1b + HSUBPS X2, X11 // f2440f7dda + HSUBPS X11, X11 // f2450f7ddb + ICEBP // f1 + IDIVW (BX) // 66f73b + IDIVW (R11) // 6641f73b + IDIVW DX // 66f7fa + IDIVW R11 // 6641f7fb + IDIVL (BX) // f73b + IDIVL (R11) // 41f73b + IDIVL DX // f7fa + IDIVL R11 // 41f7fb + IDIVQ (BX) // 48f73b + IDIVQ (R11) // 49f73b + IDIVQ DX // 48f7fa + IDIVQ R11 // 49f7fb + IDIVB (BX) // f63b + IDIVB (R11) // 41f63b + IDIVB DL // f6fa + IDIVB R11 // 41f6fb + IMULW (BX) // 66f72b + IMULW (R11) // 6641f72b + IMULW DX // 66f7ea + IMULW R11 // 6641f7eb + IMULL (BX) // f72b + IMULL (R11) // 41f72b + IMULL DX // f7ea + IMULL R11 // 41f7eb + IMULQ (BX) // 48f72b + IMULQ (R11) // 49f72b + IMULQ DX // 48f7ea + IMULQ R11 // 49f7eb + IMULB (BX) // f62b + IMULB (R11) // 41f62b + IMULB DL // f6ea + IMULB R11 // 41f6eb + IMULW (BX), DX // 660faf13 + IMULW (R11), DX // 66410faf13 + IMULW DX, DX // 660fafd2 + IMULW R11, DX // 66410fafd3 + IMULW (BX), R11 // 66440faf1b + IMULW (R11), R11 // 66450faf1b + IMULW DX, R11 // 66440fafda + IMULW R11, R11 // 66450fafdb + IMUL3W $61731, (BX), DX // 66691323f1 + IMUL3W $61731, (R11), DX // 6641691323f1 + IMUL3W $61731, DX, DX // 6669d223f1 + IMUL3W $61731, R11, DX // 664169d323f1 + IMUL3W $61731, (BX), R11 // 6644691b23f1 + IMUL3W $61731, (R11), R11 // 6645691b23f1 + IMUL3W $61731, DX, R11 // 664469da23f1 + IMUL3W $61731, R11, R11 // 664569db23f1 + IMUL3W $7, (BX), DX // 666b1307 + IMUL3W $7, (R11), DX // 66416b1307 + IMUL3W $7, DX, DX // 666bd207 + IMUL3W $7, R11, DX // 66416bd307 + IMUL3W $7, (BX), R11 // 66446b1b07 + IMUL3W $7, (R11), R11 // 66456b1b07 + IMUL3W $7, DX, R11 // 66446bda07 + IMUL3W $7, R11, R11 // 66456bdb07 + IMULL (BX), DX // 0faf13 + IMULL (R11), DX // 410faf13 + IMULL DX, DX // 0fafd2 + IMULL R11, DX // 410fafd3 + IMULL (BX), R11 // 440faf1b + IMULL (R11), R11 // 450faf1b + IMULL DX, R11 // 440fafda + IMULL R11, R11 // 450fafdb + IMUL3L $4045620583, (BX), DX // 6913674523f1 + IMUL3L $4045620583, (R11), DX // 416913674523f1 + IMUL3L $4045620583, DX, DX // 69d2674523f1 + IMUL3L $4045620583, R11, DX // 4169d3674523f1 + IMUL3L $4045620583, (BX), R11 // 44691b674523f1 + IMUL3L $4045620583, (R11), R11 // 45691b674523f1 + IMUL3L $4045620583, DX, R11 // 4469da674523f1 + IMUL3L $4045620583, R11, R11 // 4569db674523f1 + IMUL3L $7, (BX), DX // 6b1307 + IMUL3L $7, (R11), DX // 416b1307 + IMUL3L $7, DX, DX // 6bd207 + IMUL3L $7, R11, DX // 416bd307 + IMUL3L $7, (BX), R11 // 446b1b07 + IMUL3L $7, (R11), R11 // 456b1b07 + IMUL3L $7, DX, R11 // 446bda07 + IMUL3L $7, R11, R11 // 456bdb07 + IMULQ (BX), DX // 480faf13 + IMULQ (R11), DX // 490faf13 + IMULQ DX, DX // 480fafd2 + IMULQ R11, DX // 490fafd3 + IMULQ (BX), R11 // 4c0faf1b + IMULQ (R11), R11 // 4d0faf1b + IMULQ DX, R11 // 4c0fafda + IMULQ R11, R11 // 4d0fafdb + IMUL3Q $-249346713, (BX), DX // 486913674523f1 + IMUL3Q $-249346713, (R11), DX // 496913674523f1 + IMUL3Q $-249346713, DX, DX // 4869d2674523f1 + IMUL3Q $-249346713, R11, DX // 4969d3674523f1 + IMUL3Q $-249346713, (BX), R11 // 4c691b674523f1 + IMUL3Q $-249346713, (R11), R11 // 4d691b674523f1 + IMUL3Q $-249346713, DX, R11 // 4c69da674523f1 + IMUL3Q $-249346713, R11, R11 // 4d69db674523f1 + IMUL3Q $7, (BX), DX // 486b1307 + IMUL3Q $7, (R11), DX // 496b1307 + IMUL3Q $7, DX, DX // 486bd207 + IMUL3Q $7, R11, DX // 496bd307 + IMUL3Q $7, (BX), R11 // 4c6b1b07 + IMUL3Q $7, (R11), R11 // 4d6b1b07 + IMUL3Q $7, DX, R11 // 4c6bda07 + IMUL3Q $7, R11, R11 // 4d6bdb07 + //TODO: INB DX, AL // ec + //TODO: INB $7, AL // e407 + //TODO: INW DX, AX // 66ed + //TODO: INW $7, AX // 66e507 + //TODO: INL DX, AX // ed + //TODO: INL $7, AX // e507 + INCW (BX) // 66ff03 + INCW (R11) // 6641ff03 + INCW DX // 66ffc2 + INCW R11 // 6641ffc3 + INCL (BX) // ff03 + INCL (R11) // 41ff03 + INCL DX // ffc2 + INCL R11 // 41ffc3 + INCQ (BX) // 48ff03 + INCQ (R11) // 49ff03 + INCQ DX // 48ffc2 + INCQ R11 // 49ffc3 + INCB (BX) // fe03 + INCB (R11) // 41fe03 + INCB DL // fec2 + INCB R11 // 41fec3 + INSB // 6c + INSL // 6d + INSERTPS $7, (BX), X2 // 660f3a211307 + INSERTPS $7, (R11), X2 // 66410f3a211307 + INSERTPS $7, X2, X2 // 660f3a21d207 + INSERTPS $7, X11, X2 // 66410f3a21d307 + INSERTPS $7, (BX), X11 // 66440f3a211b07 + INSERTPS $7, (R11), X11 // 66450f3a211b07 + INSERTPS $7, X2, X11 // 66440f3a21da07 + INSERTPS $7, X11, X11 // 66450f3a21db07 + INSW // 666d + //TODO: INT $3 // cc + INT $7 // cd07 + INVD // 0f08 + INVLPG (BX) // 0f013b + INVLPG (R11) // 410f013b + INVPCID (BX), DX // 660f388213 + INVPCID (R11), DX // 66410f388213 + INVPCID (BX), R11 // 66440f38821b + INVPCID (R11), R11 // 66450f38821b + JCS 2(PC) + IRETW // 66cf + JCS 2(PC) + IRETL // cf + JCS 2(PC) + IRETQ // 48cf + //TODO: JA .+$0x11223344 // 480f8744332211 or 0f8744332211 + //TODO: JA .+$0x11 // 7711 + //TODO: JAE .+$0x11223344 // 0f8344332211 or 480f8344332211 + //TODO: JAE .+$0x11 // 7311 + //TODO: JB .+$0x11223344 // 480f8244332211 or 0f8244332211 + //TODO: JB .+$0x11 // 7211 + //TODO: JBE .+$0x11223344 // 0f8644332211 or 480f8644332211 + //TODO: JBE .+$0x11 // 7611 + //TODO: JE .+$0x11223344 // 480f8444332211 or 0f8444332211 + //TODO: JE .+$0x11 // 7411 + //TODO: JECXZ .+$0x11 // e311 + //TODO: JG .+$0x11223344 // 0f8f44332211 or 480f8f44332211 + //TODO: JG .+$0x11 // 7f11 + //TODO: JGE .+$0x11223344 // 480f8d44332211 or 0f8d44332211 + //TODO: JGE .+$0x11 // 7d11 + //TODO: JL .+$0x11223344 // 0f8c44332211 or 480f8c44332211 + //TODO: JL .+$0x11 // 7c11 + //TODO: JLE .+$0x11223344 // 0f8e44332211 or 480f8e44332211 + //TODO: JLE .+$0x11 // 7e11 + JCS 2(PC) + //TODO: JMPQ* (BX) // ff23 + JCS 2(PC) + //TODO: JMPQ* (R11) // 41ff23 + JCS 2(PC) + //TODO: JMPQ* DX // ffe2 + JCS 2(PC) + //TODO: JMPQ* R11 // 41ffe3 + JCS 2(PC) + //TODO: JMP .+$0x11223344 // 48e944332211 or e944332211 + JCS 2(PC) + JCS 2(PC) + //TODO: JMP .+$0x11 // eb11 + JCS 2(PC) + //TODO: LJMPW* (BX) // 66ff2b + JCS 2(PC) + //TODO: LJMPW* (R11) // 6641ff2b + JCS 2(PC) + //TODO: LJMPL* (BX) // ff2b + JCS 2(PC) + //TODO: LJMPL* (R11) // 41ff2b + JCS 2(PC) + //TODO: LJMPQ* (BX) // 48ff2b + JCS 2(PC) + //TODO: LJMPQ* (R11) // 49ff2b + //TODO: JNE .+$0x11223344 // 480f8544332211 or 0f8544332211 + //TODO: JNE .+$0x11 // 7511 + //TODO: JNO .+$0x11223344 // 480f8144332211 or 0f8144332211 + //TODO: JNO .+$0x11 // 7111 + //TODO: JNP .+$0x11223344 // 480f8b44332211 or 0f8b44332211 + //TODO: JNP .+$0x11 // 7b11 + //TODO: JNS .+$0x11223344 // 0f8944332211 or 480f8944332211 + //TODO: JNS .+$0x11 // 7911 + //TODO: JO .+$0x11223344 // 0f8044332211 or 480f8044332211 + //TODO: JO .+$0x11 // 7011 + //TODO: JP .+$0x11223344 // 480f8a44332211 or 0f8a44332211 + //TODO: JP .+$0x11 // 7a11 + //TODO: JRCXZ .+$0x11 // e311 + //TODO: JS .+$0x11223344 // 480f8844332211 or 0f8844332211 + //TODO: JS .+$0x11 // 7811 + LAHF // 9f + LARW (BX), DX // 660f0213 + LARW (R11), DX // 66410f0213 + LARW DX, DX // 660f02d2 + LARW R11, DX // 66410f02d3 + LARW (BX), R11 // 66440f021b + LARW (R11), R11 // 66450f021b + LARW DX, R11 // 66440f02da + LARW R11, R11 // 66450f02db + LARL (BX), DX // 0f0213 + LARL (R11), DX // 410f0213 + LARL DX, DX // 0f02d2 + LARL R11, DX // 410f02d3 + LARL (BX), R11 // 440f021b + LARL (R11), R11 // 450f021b + LARL DX, R11 // 440f02da + LARL R11, R11 // 450f02db + LARQ (BX), DX // 480f0213 + LARQ (R11), DX // 490f0213 + LARQ DX, DX // 480f02d2 + LARQ R11, DX // 490f02d3 + LARQ (BX), R11 // 4c0f021b + LARQ (R11), R11 // 4d0f021b + LARQ DX, R11 // 4c0f02da + LARQ R11, R11 // 4d0f02db + LDDQU (BX), X2 // f20ff013 + LDDQU (R11), X2 // f2410ff013 + LDDQU (BX), X11 // f2440ff01b + LDDQU (R11), X11 // f2450ff01b + LDMXCSR (BX) // 0fae13 + LDMXCSR (R11) // 410fae13 + LEAW (BX), DX // 668d13 + LEAW (R11), DX // 66418d13 + LEAW (BX), R11 // 66448d1b + LEAW (R11), R11 // 66458d1b + LEAL (BX), DX // 8d13 + LEAL (R11), DX // 418d13 + LEAL (BX), R11 // 448d1b + LEAL (R11), R11 // 458d1b + LEAQ (BX), DX // 488d13 + LEAQ (R11), DX // 498d13 + LEAQ (BX), R11 // 4c8d1b + LEAQ (R11), R11 // 4d8d1b + LEAVEQ // 66c9 or c9 + LFENCE // 0faee8 + LFSW (BX), DX // 660fb413 + LFSW (R11), DX // 66410fb413 + LFSW (BX), R11 // 66440fb41b + LFSW (R11), R11 // 66450fb41b + LFSL (BX), DX // 0fb413 + LFSL (R11), DX // 410fb413 + LFSL (BX), R11 // 440fb41b + LFSL (R11), R11 // 450fb41b + LFSQ (BX), DX // 480fb413 + LFSQ (R11), DX // 490fb413 + LFSQ (BX), R11 // 4c0fb41b + LFSQ (R11), R11 // 4d0fb41b + LGDT (BX) // 0f0113 + LGDT (R11) // 410f0113 + LGSW (BX), DX // 660fb513 + LGSW (R11), DX // 66410fb513 + LGSW (BX), R11 // 66440fb51b + LGSW (R11), R11 // 66450fb51b + LGSL (BX), DX // 0fb513 + LGSL (R11), DX // 410fb513 + LGSL (BX), R11 // 440fb51b + LGSL (R11), R11 // 450fb51b + LGSQ (BX), DX // 480fb513 + LGSQ (R11), DX // 490fb513 + LGSQ (BX), R11 // 4c0fb51b + LGSQ (R11), R11 // 4d0fb51b + LIDT (BX) // 0f011b + LIDT (R11) // 410f011b + LLDT (BX) // 0f0013 + LLDT (R11) // 410f0013 + LLDT DX // 0f00d2 + LLDT R11 // 410f00d3 + LMSW (BX) // 0f0133 + LMSW (R11) // 410f0133 + LMSW DX // 0f01f2 + LMSW R11 // 410f01f3 + LODSB // ac + LODSL // ad + LODSQ // 48ad + LODSW // 66ad + //TODO: LOOP .+$0x11 // e211 + //TODO: LOOPEQ .+$0x11 // e111 + //TODO: LOOPNE .+$0x11 // e011 + LSLW (BX), DX // 660f0313 + LSLW (R11), DX // 66410f0313 + LSLW DX, DX // 660f03d2 + LSLW R11, DX // 66410f03d3 + LSLW (BX), R11 // 66440f031b + LSLW (R11), R11 // 66450f031b + LSLW DX, R11 // 66440f03da + LSLW R11, R11 // 66450f03db + LSLL (BX), DX // 0f0313 + LSLL (R11), DX // 410f0313 + LSLL DX, DX // 0f03d2 + LSLL R11, DX // 410f03d3 + LSLL (BX), R11 // 440f031b + LSLL (R11), R11 // 450f031b + LSLL DX, R11 // 440f03da + LSLL R11, R11 // 450f03db + LSLQ (BX), DX // 480f0313 + LSLQ (R11), DX // 490f0313 + LSLQ DX, DX // 480f03d2 + LSLQ R11, DX // 490f03d3 + LSLQ (BX), R11 // 4c0f031b + LSLQ (R11), R11 // 4d0f031b + LSLQ DX, R11 // 4c0f03da + LSLQ R11, R11 // 4d0f03db + LSSW (BX), DX // 660fb213 + LSSW (R11), DX // 66410fb213 + LSSW (BX), R11 // 66440fb21b + LSSW (R11), R11 // 66450fb21b + LSSL (BX), DX // 0fb213 + LSSL (R11), DX // 410fb213 + LSSL (BX), R11 // 440fb21b + LSSL (R11), R11 // 450fb21b + LSSQ (BX), DX // 480fb213 + LSSQ (R11), DX // 490fb213 + LSSQ (BX), R11 // 4c0fb21b + LSSQ (R11), R11 // 4d0fb21b + LTR (BX) // 0f001b + LTR (R11) // 410f001b + LTR DX // 0f00da + LTR R11 // 410f00db + LZCNTW (BX), DX // 66f30fbd13 + LZCNTW (R11), DX // 66f3410fbd13 + LZCNTW DX, DX // 66f30fbdd2 + LZCNTW R11, DX // 66f3410fbdd3 + LZCNTW (BX), R11 // 66f3440fbd1b + LZCNTW (R11), R11 // 66f3450fbd1b + LZCNTW DX, R11 // 66f3440fbdda + LZCNTW R11, R11 // 66f3450fbddb + LZCNTL (BX), DX // f30fbd13 + LZCNTL (R11), DX // f3410fbd13 + LZCNTL DX, DX // f30fbdd2 + LZCNTL R11, DX // f3410fbdd3 + LZCNTL (BX), R11 // f3440fbd1b + LZCNTL (R11), R11 // f3450fbd1b + LZCNTL DX, R11 // f3440fbdda + LZCNTL R11, R11 // f3450fbddb + LZCNTQ (BX), DX // f3480fbd13 + LZCNTQ (R11), DX // f3490fbd13 + LZCNTQ DX, DX // f3480fbdd2 + LZCNTQ R11, DX // f3490fbdd3 + LZCNTQ (BX), R11 // f34c0fbd1b + LZCNTQ (R11), R11 // f34d0fbd1b + LZCNTQ DX, R11 // f34c0fbdda + LZCNTQ R11, R11 // f34d0fbddb + MASKMOVOU X2, X2 // 660ff7d2 + MASKMOVOU X11, X2 // 66410ff7d3 + MASKMOVOU X2, X11 // 66440ff7da + MASKMOVOU X11, X11 // 66450ff7db + MASKMOVQ M2, M2 // 0ff7d2 + MASKMOVQ M3, M2 // 0ff7d3 + MASKMOVQ M2, M3 // 0ff7da + MASKMOVQ M3, M3 // 0ff7db + MAXPD (BX), X2 // 660f5f13 + MAXPD (R11), X2 // 66410f5f13 + MAXPD X2, X2 // 660f5fd2 + MAXPD X11, X2 // 66410f5fd3 + MAXPD (BX), X11 // 66440f5f1b + MAXPD (R11), X11 // 66450f5f1b + MAXPD X2, X11 // 66440f5fda + MAXPD X11, X11 // 66450f5fdb + MAXPS (BX), X2 // 0f5f13 + MAXPS (R11), X2 // 410f5f13 + MAXPS X2, X2 // 0f5fd2 + MAXPS X11, X2 // 410f5fd3 + MAXPS (BX), X11 // 440f5f1b + MAXPS (R11), X11 // 450f5f1b + MAXPS X2, X11 // 440f5fda + MAXPS X11, X11 // 450f5fdb + MAXSD (BX), X2 // f20f5f13 + MAXSD (R11), X2 // f2410f5f13 + MAXSD X2, X2 // f20f5fd2 + MAXSD X11, X2 // f2410f5fd3 + MAXSD (BX), X11 // f2440f5f1b + MAXSD (R11), X11 // f2450f5f1b + MAXSD X2, X11 // f2440f5fda + MAXSD X11, X11 // f2450f5fdb + MAXSS (BX), X2 // f30f5f13 + MAXSS (R11), X2 // f3410f5f13 + MAXSS X2, X2 // f30f5fd2 + MAXSS X11, X2 // f3410f5fd3 + MAXSS (BX), X11 // f3440f5f1b + MAXSS (R11), X11 // f3450f5f1b + MAXSS X2, X11 // f3440f5fda + MAXSS X11, X11 // f3450f5fdb + MFENCE // 0faef0 + MINPD (BX), X2 // 660f5d13 + MINPD (R11), X2 // 66410f5d13 + MINPD X2, X2 // 660f5dd2 + MINPD X11, X2 // 66410f5dd3 + MINPD (BX), X11 // 66440f5d1b + MINPD (R11), X11 // 66450f5d1b + MINPD X2, X11 // 66440f5dda + MINPD X11, X11 // 66450f5ddb + MINPS (BX), X2 // 0f5d13 + MINPS (R11), X2 // 410f5d13 + MINPS X2, X2 // 0f5dd2 + MINPS X11, X2 // 410f5dd3 + MINPS (BX), X11 // 440f5d1b + MINPS (R11), X11 // 450f5d1b + MINPS X2, X11 // 440f5dda + MINPS X11, X11 // 450f5ddb + MINSD (BX), X2 // f20f5d13 + MINSD (R11), X2 // f2410f5d13 + MINSD X2, X2 // f20f5dd2 + MINSD X11, X2 // f2410f5dd3 + MINSD (BX), X11 // f2440f5d1b + MINSD (R11), X11 // f2450f5d1b + MINSD X2, X11 // f2440f5dda + MINSD X11, X11 // f2450f5ddb + MINSS (BX), X2 // f30f5d13 + MINSS (R11), X2 // f3410f5d13 + MINSS X2, X2 // f30f5dd2 + MINSS X11, X2 // f3410f5dd3 + MINSS (BX), X11 // f3440f5d1b + MINSS (R11), X11 // f3450f5d1b + MINSS X2, X11 // f3440f5dda + MINSS X11, X11 // f3450f5ddb + MONITOR // 0f01c8 + //TODO: MOVABSB 0x123456789abcdef1, AL // a0f1debc9a78563412 + //TODO: MOVW 0x123456789abcdef1, AX // 66a1f1debc9a78563412 + MOVQ DX, CR2 // 0f22d2 + MOVQ R11, CR2 // 410f22d3 + MOVQ DX, CR3 // 0f22da + MOVQ R11, CR3 // 410f22db + MOVQ DX, DR2 // 0f23d2 + MOVQ R11, DR2 // 410f23d3 + MOVQ DX, DR3 // 0f23da + MOVQ R11, DR3 // 410f23db + //TODO: MOVL 0x123456789abcdef1, AX // a1f1debc9a78563412 + //TODO: MOVQ 0x123456789abcdef1, AX // 48a1f1debc9a78563412 + //TODO: MOVW (BX), SS // 668e13 or 488e13 + //TODO: MOVW (R11), SS // 66418e13 or 498e13 + //TODO: MOVW DX, SS // 668ed2 or 488ed2 + //TODO: MOVW R11, SS // 66418ed3 or 498ed3 + //TODO: MOVW (BX), DS // 668e1b or 488e1b + //TODO: MOVW (R11), DS // 66418e1b or 498e1b + //TODO: MOVW DX, DS // 668eda or 488eda + //TODO: MOVW R11, DS // 66418edb or 498edb + //TODO: MOVL (BX), SS // 8e13 + //TODO: MOVL (R11), SS // 418e13 + //TODO: MOVL DX, SS // 8ed2 + //TODO: MOVL R11, SS // 418ed3 + //TODO: MOVL (BX), DS // 8e1b + //TODO: MOVL (R11), DS // 418e1b + //TODO: MOVL DX, DS // 8eda + //TODO: MOVL R11, DS // 418edb + //TODO: MOVW AX, 0x123456789abcdef1 // 66a3f1debc9a78563412 + //TODO: MOVL AX, 0x123456789abcdef1 // a3f1debc9a78563412 + //TODO: MOVQ AX, 0x123456789abcdef1 // 48a3f1debc9a78563412 + //TODO: MOVABSB AL, 0x123456789abcdef1 // a2f1debc9a78563412 + //TODO: MOVW SS, (BX) // 668c13 or 488c13 + //TODO: MOVW DS, (BX) // 668c1b or 488c1b + //TODO: MOVW SS, (R11) // 66418c13 or 498c13 + //TODO: MOVW DS, (R11) // 66418c1b or 498c1b + //TODO: MOVW SS, DX // 668cd2 or 488cd2 + //TODO: MOVW DS, DX // 668cda or 488cda + //TODO: MOVW SS, R11 // 66418cd3 or 498cd3 + //TODO: MOVW DS, R11 // 66418cdb or 498cdb + MOVW $61731, (BX) // 66c70323f1 + MOVW $61731, (R11) // 6641c70323f1 + MOVW $61731, DX // 66c7c223f1 or 66ba23f1 + MOVW $61731, R11 // 6641c7c323f1 or 6641bb23f1 + MOVW DX, (BX) // 668913 + MOVW R11, (BX) // 6644891b + MOVW DX, (R11) // 66418913 + MOVW R11, (R11) // 6645891b + MOVW DX, DX // 6689d2 or 668bd2 + MOVW R11, DX // 664489da or 66418bd3 + MOVW DX, R11 // 664189d3 or 66448bda + MOVW R11, R11 // 664589db or 66458bdb + //TODO: MOVL SS, (BX) // 8c13 + //TODO: MOVL DS, (BX) // 8c1b + //TODO: MOVL SS, (R11) // 418c13 + //TODO: MOVL DS, (R11) // 418c1b + //TODO: MOVL SS, DX // 8cd2 + //TODO: MOVL DS, DX // 8cda + //TODO: MOVL SS, R11 // 418cd3 + //TODO: MOVL DS, R11 // 418cdb + MOVL $4045620583, (BX) // c703674523f1 + MOVL $4045620583, (R11) // 41c703674523f1 + MOVL $4045620583, DX // c7c2674523f1 or ba674523f1 + MOVL $4045620583, R11 // 41c7c3674523f1 or 41bb674523f1 + MOVL DX, (BX) // 8913 + MOVL R11, (BX) // 44891b + MOVL DX, (R11) // 418913 + MOVL R11, (R11) // 45891b + MOVL DX, DX // 89d2 or 8bd2 + MOVL R11, DX // 4489da or 418bd3 + MOVL DX, R11 // 4189d3 or 448bda + MOVL R11, R11 // 4589db or 458bdb + MOVQ $-249346713, (BX) // 48c703674523f1 + MOVQ $-249346713, (R11) // 49c703674523f1 + MOVQ $-249346713, DX // 48c7c2674523f1 + MOVQ $-249346713, R11 // 49c7c3674523f1 + MOVQ DX, (BX) // 488913 + MOVQ R11, (BX) // 4c891b + MOVQ DX, (R11) // 498913 + MOVQ R11, (R11) // 4d891b + MOVQ DX, DX // 4889d2 or 488bd2 + MOVQ R11, DX // 4c89da or 498bd3 + MOVQ DX, R11 // 4989d3 or 4c8bda + MOVQ R11, R11 // 4d89db or 4d8bdb + MOVB $7, (BX) // c60307 + MOVB $7, (R11) // 41c60307 + MOVB $7, DL // c6c207 or b207 + MOVB $7, R11 // 41c6c307 or 41b307 + MOVB DL, (BX) // 8813 + MOVB R11, (BX) // 44881b + MOVB DL, (R11) // 418813 + MOVB R11, (R11) // 45881b + MOVB DL, DL // 88d2 or 8ad2 + MOVB R11, DL // 4488da or 418ad3 + MOVB DL, R11 // 4188d3 or 448ada + MOVB R11, R11 // 4588db or 458adb + MOVW (BX), DX // 668b13 + MOVW (R11), DX // 66418b13 + MOVW (BX), R11 // 66448b1b + MOVW (R11), R11 // 66458b1b + MOVL (BX), DX // 8b13 + MOVL (R11), DX // 418b13 + MOVL (BX), R11 // 448b1b + MOVL (R11), R11 // 458b1b + MOVQ (BX), DX // 488b13 + MOVQ (R11), DX // 498b13 + MOVQ (BX), R11 // 4c8b1b + MOVQ (R11), R11 // 4d8b1b + MOVQ $-1070935975390360081, DX // 48baefcdab89674523f1 + MOVQ $-1070935975390360081, R11 // 49bbefcdab89674523f1 + MOVB (BX), DL // 8a13 + MOVB (R11), DL // 418a13 + MOVB (BX), R11 // 448a1b + MOVB (R11), R11 // 458a1b + MOVQ CR2, DX // 0f20d2 + MOVQ CR3, DX // 0f20da + MOVQ CR2, R11 // 410f20d3 + MOVQ CR3, R11 // 410f20db + MOVQ DR2, DX // 0f21d2 + MOVQ DR3, DX // 0f21da + MOVQ DR2, R11 // 410f21d3 + MOVQ DR3, R11 // 410f21db + MOVAPD (BX), X2 // 660f2813 + MOVAPD (R11), X2 // 66410f2813 + MOVAPD X2, X2 // 660f28d2 or 660f29d2 + MOVAPD X11, X2 // 66410f28d3 or 66440f29da + MOVAPD (BX), X11 // 66440f281b + MOVAPD (R11), X11 // 66450f281b + MOVAPD X2, X11 // 66440f28da or 66410f29d3 + MOVAPD X11, X11 // 66450f28db or 66450f29db + MOVAPD X2, (BX) // 660f2913 + MOVAPD X11, (BX) // 66440f291b + MOVAPD X2, (R11) // 66410f2913 + MOVAPD X11, (R11) // 66450f291b + MOVAPS (BX), X2 // 0f2813 + MOVAPS (R11), X2 // 410f2813 + MOVAPS X2, X2 // 0f28d2 or 0f29d2 + MOVAPS X11, X2 // 410f28d3 or 440f29da + MOVAPS (BX), X11 // 440f281b + MOVAPS (R11), X11 // 450f281b + MOVAPS X2, X11 // 440f28da or 410f29d3 + MOVAPS X11, X11 // 450f28db or 450f29db + MOVAPS X2, (BX) // 0f2913 + MOVAPS X11, (BX) // 440f291b + MOVAPS X2, (R11) // 410f2913 + MOVAPS X11, (R11) // 450f291b + MOVBEW DX, (BX) // 660f38f113 + MOVBEW R11, (BX) // 66440f38f11b + MOVBEW DX, (R11) // 66410f38f113 + MOVBEW R11, (R11) // 66450f38f11b + MOVBEW (BX), DX // 660f38f013 + MOVBEW (R11), DX // 66410f38f013 + MOVBEW (BX), R11 // 66440f38f01b + MOVBEW (R11), R11 // 66450f38f01b + MOVBEL DX, (BX) // 0f38f113 + MOVBEL R11, (BX) // 440f38f11b + MOVBEL DX, (R11) // 410f38f113 + MOVBEL R11, (R11) // 450f38f11b + MOVBEL (BX), DX // 0f38f013 + MOVBEL (R11), DX // 410f38f013 + MOVBEL (BX), R11 // 440f38f01b + MOVBEL (R11), R11 // 450f38f01b + MOVBEQ DX, (BX) // 480f38f113 + MOVBEQ R11, (BX) // 4c0f38f11b + MOVBEQ DX, (R11) // 490f38f113 + MOVBEQ R11, (R11) // 4d0f38f11b + MOVBEQ (BX), DX // 480f38f013 + MOVBEQ (R11), DX // 490f38f013 + MOVBEQ (BX), R11 // 4c0f38f01b + MOVBEQ (R11), R11 // 4d0f38f01b + MOVQ (BX), M2 // 0f6e13 or 0f6f13 or 480f6e13 + MOVQ (R11), M2 // 410f6e13 or 410f6f13 or 490f6e13 + MOVQ DX, M2 // 0f6ed2 or 480f6ed2 + MOVQ R11, M2 // 410f6ed3 or 490f6ed3 + MOVQ (BX), M3 // 0f6e1b or 0f6f1b or 480f6e1b + MOVQ (R11), M3 // 410f6e1b or 410f6f1b or 490f6e1b + MOVQ DX, M3 // 0f6eda or 480f6eda + MOVQ R11, M3 // 410f6edb or 490f6edb + MOVQ M2, (BX) // 0f7e13 or 0f7f13 or 480f7e13 + MOVQ M3, (BX) // 0f7e1b or 0f7f1b or 480f7e1b + MOVQ M2, (R11) // 410f7e13 or 410f7f13 or 490f7e13 + MOVQ M3, (R11) // 410f7e1b or 410f7f1b or 490f7e1b + MOVQ M2, DX // 0f7ed2 or 480f7ed2 + MOVQ M3, DX // 0f7eda or 480f7eda + MOVQ M2, R11 // 410f7ed3 or 490f7ed3 + MOVQ M3, R11 // 410f7edb or 490f7edb + MOVQ X2, (BX) // 660f7e13 or 66480f7e13 or 660fd613 + MOVQ X11, (BX) // 66440f7e1b or 664c0f7e1b or 66440fd61b + MOVQ X2, (R11) // 66410f7e13 or 66490f7e13 or 66410fd613 + MOVQ X11, (R11) // 66450f7e1b or 664d0f7e1b or 66450fd61b + MOVQ X2, DX // 660f7ed2 or 66480f7ed2 + MOVQ X11, DX // 66440f7eda or 664c0f7eda + MOVQ X2, R11 // 66410f7ed3 or 66490f7ed3 + MOVQ X11, R11 // 66450f7edb or 664d0f7edb + MOVQ (BX), X2 // 660f6e13 or 66480f6e13 or f30f7e13 + MOVQ (R11), X2 // 66410f6e13 or 66490f6e13 or f3410f7e13 + MOVQ DX, X2 // 660f6ed2 or 66480f6ed2 + MOVQ R11, X2 // 66410f6ed3 or 66490f6ed3 + MOVQ (BX), X11 // 66440f6e1b or 664c0f6e1b or f3440f7e1b + MOVQ (R11), X11 // 66450f6e1b or 664d0f6e1b or f3450f7e1b + MOVQ DX, X11 // 66440f6eda or 664c0f6eda + MOVQ R11, X11 // 66450f6edb or 664d0f6edb + MOVDDUP (BX), X2 // f20f1213 + MOVDDUP (R11), X2 // f2410f1213 + MOVDDUP X2, X2 // f20f12d2 + MOVDDUP X11, X2 // f2410f12d3 + MOVDDUP (BX), X11 // f2440f121b + MOVDDUP (R11), X11 // f2450f121b + MOVDDUP X2, X11 // f2440f12da + MOVDDUP X11, X11 // f2450f12db + MOVQ X2, M2 // f20fd6d2 + MOVQ X11, M2 // f2410fd6d3 + MOVQ X2, M3 // f20fd6da + MOVQ X11, M3 // f2410fd6db + MOVO (BX), X2 // 660f6f13 + MOVO (R11), X2 // 66410f6f13 + MOVO X2, X2 // 660f6fd2 or 660f7fd2 + MOVO X11, X2 // 66410f6fd3 or 66440f7fda + MOVO (BX), X11 // 66440f6f1b + MOVO (R11), X11 // 66450f6f1b + MOVO X2, X11 // 66440f6fda or 66410f7fd3 + MOVO X11, X11 // 66450f6fdb or 66450f7fdb + MOVO X2, (BX) // 660f7f13 + MOVO X11, (BX) // 66440f7f1b + MOVO X2, (R11) // 66410f7f13 + MOVO X11, (R11) // 66450f7f1b + MOVOU (BX), X2 // f30f6f13 + MOVOU (R11), X2 // f3410f6f13 + MOVOU X2, X2 // f30f6fd2 or f30f7fd2 + MOVOU X11, X2 // f3410f6fd3 or f3440f7fda + MOVOU (BX), X11 // f3440f6f1b + MOVOU (R11), X11 // f3450f6f1b + MOVOU X2, X11 // f3440f6fda or f3410f7fd3 + MOVOU X11, X11 // f3450f6fdb or f3450f7fdb + MOVOU X2, (BX) // f30f7f13 + MOVOU X11, (BX) // f3440f7f1b + MOVOU X2, (R11) // f3410f7f13 + MOVOU X11, (R11) // f3450f7f1b + MOVHLPS X2, X2 // 0f12d2 + MOVHLPS X11, X2 // 410f12d3 + MOVHLPS X2, X11 // 440f12da + MOVHLPS X11, X11 // 450f12db + MOVHPD X2, (BX) // 660f1713 + MOVHPD X11, (BX) // 66440f171b + MOVHPD X2, (R11) // 66410f1713 + MOVHPD X11, (R11) // 66450f171b + MOVHPD (BX), X2 // 660f1613 + MOVHPD (R11), X2 // 66410f1613 + MOVHPD (BX), X11 // 66440f161b + MOVHPD (R11), X11 // 66450f161b + MOVHPS X2, (BX) // 0f1713 + MOVHPS X11, (BX) // 440f171b + MOVHPS X2, (R11) // 410f1713 + MOVHPS X11, (R11) // 450f171b + MOVHPS (BX), X2 // 0f1613 + MOVHPS (R11), X2 // 410f1613 + MOVHPS (BX), X11 // 440f161b + MOVHPS (R11), X11 // 450f161b + MOVLHPS X2, X2 // 0f16d2 + MOVLHPS X11, X2 // 410f16d3 + MOVLHPS X2, X11 // 440f16da + MOVLHPS X11, X11 // 450f16db + MOVLPD X2, (BX) // 660f1313 + MOVLPD X11, (BX) // 66440f131b + MOVLPD X2, (R11) // 66410f1313 + MOVLPD X11, (R11) // 66450f131b + MOVLPD (BX), X2 // 660f1213 + MOVLPD (R11), X2 // 66410f1213 + MOVLPD (BX), X11 // 66440f121b + MOVLPD (R11), X11 // 66450f121b + MOVLPS X2, (BX) // 0f1313 + MOVLPS X11, (BX) // 440f131b + MOVLPS X2, (R11) // 410f1313 + MOVLPS X11, (R11) // 450f131b + MOVLPS (BX), X2 // 0f1213 + MOVLPS (R11), X2 // 410f1213 + MOVLPS (BX), X11 // 440f121b + MOVLPS (R11), X11 // 450f121b + MOVMSKPD X2, DX // 660f50d2 + MOVMSKPD X11, DX // 66410f50d3 + MOVMSKPD X2, R11 // 66440f50da + MOVMSKPD X11, R11 // 66450f50db + MOVMSKPS X2, DX // 0f50d2 + MOVMSKPS X11, DX // 410f50d3 + MOVMSKPS X2, R11 // 440f50da + MOVMSKPS X11, R11 // 450f50db + MOVNTO X2, (BX) // 660fe713 + MOVNTO X11, (BX) // 66440fe71b + MOVNTO X2, (R11) // 66410fe713 + MOVNTO X11, (R11) // 66450fe71b + MOVNTDQA (BX), X2 // 660f382a13 + MOVNTDQA (R11), X2 // 66410f382a13 + MOVNTDQA (BX), X11 // 66440f382a1b + MOVNTDQA (R11), X11 // 66450f382a1b + MOVNTIL DX, (BX) // 0fc313 + MOVNTIL R11, (BX) // 440fc31b + MOVNTIL DX, (R11) // 410fc313 + MOVNTIL R11, (R11) // 450fc31b + MOVNTIQ DX, (BX) // 480fc313 + MOVNTIQ R11, (BX) // 4c0fc31b + MOVNTIQ DX, (R11) // 490fc313 + MOVNTIQ R11, (R11) // 4d0fc31b + MOVNTPD X2, (BX) // 660f2b13 + MOVNTPD X11, (BX) // 66440f2b1b + MOVNTPD X2, (R11) // 66410f2b13 + MOVNTPD X11, (R11) // 66450f2b1b + MOVNTPS X2, (BX) // 0f2b13 + MOVNTPS X11, (BX) // 440f2b1b + MOVNTPS X2, (R11) // 410f2b13 + MOVNTPS X11, (R11) // 450f2b1b + MOVNTQ M2, (BX) // 0fe713 + MOVNTQ M3, (BX) // 0fe71b + MOVNTQ M2, (R11) // 410fe713 + MOVNTQ M3, (R11) // 410fe71b + //TODO: MOVNTSD X2, (BX) // f20f2b13 + //TODO: MOVNTSD X11, (BX) // f2440f2b1b + //TODO: MOVNTSD X2, (R11) // f2410f2b13 + //TODO: MOVNTSD X11, (R11) // f2450f2b1b + //TODO: MOVNTSS X2, (BX) // f30f2b13 + //TODO: MOVNTSS X11, (BX) // f3440f2b1b + //TODO: MOVNTSS X2, (R11) // f3410f2b13 + //TODO: MOVNTSS X11, (R11) // f3450f2b1b + //TODO: MOVQ M2, M2 // 0f6fd2 or 0f7fd2 + //TODO: MOVQ M3, M2 // 0f6fd3 or 0f7fda + //TODO: MOVQ M2, M3 // 0f6fda or 0f7fd3 + //TODO: MOVQ M3, M3 // 0f6fdb or 0f7fdb + MOVQ X2, X2 // f30f7ed2 or 660fd6d2 + MOVQ X11, X2 // f3410f7ed3 or 66440fd6da + MOVQ X2, X11 // f3440f7eda or 66410fd6d3 + MOVQ X11, X11 // f3450f7edb or 66450fd6db + MOVQOZX M2, X2 // f30fd6d2 + MOVQOZX M3, X2 // f30fd6d3 + MOVQOZX M2, X11 // f3440fd6da + MOVQOZX M3, X11 // f3440fd6db + MOVSB // a4 + MOVSL // a5 + MOVSD (BX), X2 // f20f1013 + MOVSD (R11), X2 // f2410f1013 + MOVSD X2, X2 // f20f10d2 or f20f11d2 + MOVSD X11, X2 // f2410f10d3 or f2440f11da + MOVSD (BX), X11 // f2440f101b + MOVSD (R11), X11 // f2450f101b + MOVSD X2, X11 // f2440f10da or f2410f11d3 + MOVSD X11, X11 // f2450f10db or f2450f11db + MOVSD X2, (BX) // f20f1113 + MOVSD X11, (BX) // f2440f111b + MOVSD X2, (R11) // f2410f1113 + MOVSD X11, (R11) // f2450f111b + MOVSHDUP (BX), X2 // f30f1613 + MOVSHDUP (R11), X2 // f3410f1613 + MOVSHDUP X2, X2 // f30f16d2 + MOVSHDUP X11, X2 // f3410f16d3 + MOVSHDUP (BX), X11 // f3440f161b + MOVSHDUP (R11), X11 // f3450f161b + MOVSHDUP X2, X11 // f3440f16da + MOVSHDUP X11, X11 // f3450f16db + MOVSLDUP (BX), X2 // f30f1213 + MOVSLDUP (R11), X2 // f3410f1213 + MOVSLDUP X2, X2 // f30f12d2 + MOVSLDUP X11, X2 // f3410f12d3 + MOVSLDUP (BX), X11 // f3440f121b + MOVSLDUP (R11), X11 // f3450f121b + MOVSLDUP X2, X11 // f3440f12da + MOVSLDUP X11, X11 // f3450f12db + MOVSQ // 48a5 + MOVSS (BX), X2 // f30f1013 + MOVSS (R11), X2 // f3410f1013 + MOVSS X2, X2 // f30f10d2 or f30f11d2 + MOVSS X11, X2 // f3410f10d3 or f3440f11da + MOVSS (BX), X11 // f3440f101b + MOVSS (R11), X11 // f3450f101b + MOVSS X2, X11 // f3440f10da or f3410f11d3 + MOVSS X11, X11 // f3450f10db or f3450f11db + MOVSS X2, (BX) // f30f1113 + MOVSS X11, (BX) // f3440f111b + MOVSS X2, (R11) // f3410f1113 + MOVSS X11, (R11) // f3450f111b + MOVSW // 66a5 + MOVSWW (BX), DX // 660fbf13 + MOVSWW (R11), DX // 66410fbf13 + MOVSWW DX, DX // 660fbfd2 + MOVSWW R11, DX // 66410fbfd3 + MOVSWW (BX), R11 // 66440fbf1b + MOVSWW (R11), R11 // 66450fbf1b + MOVSWW DX, R11 // 66440fbfda + MOVSWW R11, R11 // 66450fbfdb + MOVBWSX (BX), DX // 660fbe13 + MOVBWSX (R11), DX // 66410fbe13 + MOVBWSX DL, DX // 660fbed2 + MOVBWSX R11, DX // 66410fbed3 + MOVBWSX (BX), R11 // 66440fbe1b + MOVBWSX (R11), R11 // 66450fbe1b + MOVBWSX DL, R11 // 66440fbeda + MOVBWSX R11, R11 // 66450fbedb + MOVWLSX (BX), DX // 0fbf13 + MOVWLSX (R11), DX // 410fbf13 + MOVWLSX DX, DX // 0fbfd2 + MOVWLSX R11, DX // 410fbfd3 + MOVWLSX (BX), R11 // 440fbf1b + MOVWLSX (R11), R11 // 450fbf1b + MOVWLSX DX, R11 // 440fbfda + MOVWLSX R11, R11 // 450fbfdb + MOVBLSX (BX), DX // 0fbe13 + MOVBLSX (R11), DX // 410fbe13 + MOVBLSX DL, DX // 0fbed2 + MOVBLSX R11, DX // 410fbed3 + MOVBLSX (BX), R11 // 440fbe1b + MOVBLSX (R11), R11 // 450fbe1b + MOVBLSX DL, R11 // 440fbeda + MOVBLSX R11, R11 // 450fbedb + MOVWQSX (BX), DX // 480fbf13 or 666313 + MOVWQSX (R11), DX // 490fbf13 or 66416313 + MOVWQSX DX, DX // 480fbfd2 or 6663d2 + MOVWQSX R11, DX // 490fbfd3 or 664163d3 + MOVWQSX (BX), R11 // 4c0fbf1b or 6644631b + MOVWQSX (R11), R11 // 4d0fbf1b or 6645631b + MOVWQSX DX, R11 // 4c0fbfda or 664463da + MOVWQSX R11, R11 // 4d0fbfdb or 664563db + MOVBQSX (BX), DX // 480fbe13 + MOVBQSX (R11), DX // 490fbe13 + MOVBQSX DL, DX // 480fbed2 + MOVBQSX R11, DX // 490fbed3 + MOVBQSX (BX), R11 // 4c0fbe1b + MOVBQSX (R11), R11 // 4d0fbe1b + MOVBQSX DL, R11 // 4c0fbeda + MOVBQSX R11, R11 // 4d0fbedb + MOVLQSX (BX), DX // 6313 or 486313 + MOVLQSX (R11), DX // 416313 or 496313 + MOVLQSX DX, DX // 63d2 or 4863d2 + MOVLQSX R11, DX // 4163d3 or 4963d3 + MOVLQSX (BX), R11 // 44631b or 4c631b + MOVLQSX (R11), R11 // 45631b or 4d631b + MOVLQSX DX, R11 // 4463da or 4c63da + MOVLQSX R11, R11 // 4563db or 4d63db + MOVUPD (BX), X2 // 660f1013 + MOVUPD (R11), X2 // 66410f1013 + MOVUPD X2, X2 // 660f10d2 or 660f11d2 + MOVUPD X11, X2 // 66410f10d3 or 66440f11da + MOVUPD (BX), X11 // 66440f101b + MOVUPD (R11), X11 // 66450f101b + MOVUPD X2, X11 // 66440f10da or 66410f11d3 + MOVUPD X11, X11 // 66450f10db or 66450f11db + MOVUPD X2, (BX) // 660f1113 + MOVUPD X11, (BX) // 66440f111b + MOVUPD X2, (R11) // 66410f1113 + MOVUPD X11, (R11) // 66450f111b + MOVUPS (BX), X2 // 0f1013 + MOVUPS (R11), X2 // 410f1013 + MOVUPS X2, X2 // 0f10d2 or 0f11d2 + MOVUPS X11, X2 // 410f10d3 or 440f11da + MOVUPS (BX), X11 // 440f101b + MOVUPS (R11), X11 // 450f101b + MOVUPS X2, X11 // 440f10da or 410f11d3 + MOVUPS X11, X11 // 450f10db or 450f11db + MOVUPS X2, (BX) // 0f1113 + MOVUPS X11, (BX) // 440f111b + MOVUPS X2, (R11) // 410f1113 + MOVUPS X11, (R11) // 450f111b + MOVZWW (BX), DX // 660fb713 + MOVZWW (R11), DX // 66410fb713 + MOVZWW DX, DX // 660fb7d2 + MOVZWW R11, DX // 66410fb7d3 + MOVZWW (BX), R11 // 66440fb71b + MOVZWW (R11), R11 // 66450fb71b + MOVZWW DX, R11 // 66440fb7da + MOVZWW R11, R11 // 66450fb7db + MOVBWZX (BX), DX // 660fb613 + MOVBWZX (R11), DX // 66410fb613 + MOVBWZX DL, DX // 660fb6d2 + MOVBWZX R11, DX // 66410fb6d3 + MOVBWZX (BX), R11 // 66440fb61b + MOVBWZX (R11), R11 // 66450fb61b + MOVBWZX DL, R11 // 66440fb6da + MOVBWZX R11, R11 // 66450fb6db + MOVWLZX (BX), DX // 0fb713 + MOVWLZX (R11), DX // 410fb713 + MOVWLZX DX, DX // 0fb7d2 + MOVWLZX R11, DX // 410fb7d3 + MOVWLZX (BX), R11 // 440fb71b + MOVWLZX (R11), R11 // 450fb71b + MOVWLZX DX, R11 // 440fb7da + MOVWLZX R11, R11 // 450fb7db + MOVBLZX (BX), DX // 0fb613 + MOVBLZX (R11), DX // 410fb613 + MOVBLZX DL, DX // 0fb6d2 + MOVBLZX R11, DX // 410fb6d3 + MOVBLZX (BX), R11 // 440fb61b + MOVBLZX (R11), R11 // 450fb61b + MOVBLZX DL, R11 // 440fb6da + MOVBLZX R11, R11 // 450fb6db + MOVWQZX (BX), DX // 480fb713 + MOVWQZX (R11), DX // 490fb713 + MOVWQZX DX, DX // 480fb7d2 + MOVWQZX R11, DX // 490fb7d3 + MOVWQZX (BX), R11 // 4c0fb71b + MOVWQZX (R11), R11 // 4d0fb71b + MOVWQZX DX, R11 // 4c0fb7da + MOVWQZX R11, R11 // 4d0fb7db + MOVBQZX (BX), DX // 480fb613 + MOVBQZX (R11), DX // 490fb613 + MOVBQZX DL, DX // 480fb6d2 + MOVBQZX R11, DX // 490fb6d3 + MOVBQZX (BX), R11 // 4c0fb61b + MOVBQZX (R11), R11 // 4d0fb61b + MOVBQZX DL, R11 // 4c0fb6da + MOVBQZX R11, R11 // 4d0fb6db + MPSADBW $7, (BX), X2 // 660f3a421307 + MPSADBW $7, (R11), X2 // 66410f3a421307 + MPSADBW $7, X2, X2 // 660f3a42d207 + MPSADBW $7, X11, X2 // 66410f3a42d307 + MPSADBW $7, (BX), X11 // 66440f3a421b07 + MPSADBW $7, (R11), X11 // 66450f3a421b07 + MPSADBW $7, X2, X11 // 66440f3a42da07 + MPSADBW $7, X11, X11 // 66450f3a42db07 + MULW (BX) // 66f723 + MULW (R11) // 6641f723 + MULW DX // 66f7e2 + MULW R11 // 6641f7e3 + MULL (BX) // f723 + MULL (R11) // 41f723 + MULL DX // f7e2 + MULL R11 // 41f7e3 + MULQ (BX) // 48f723 + MULQ (R11) // 49f723 + MULQ DX // 48f7e2 + MULQ R11 // 49f7e3 + MULB (BX) // f623 + MULB (R11) // 41f623 + MULB DL // f6e2 + MULB R11 // 41f6e3 + MULPD (BX), X2 // 660f5913 + MULPD (R11), X2 // 66410f5913 + MULPD X2, X2 // 660f59d2 + MULPD X11, X2 // 66410f59d3 + MULPD (BX), X11 // 66440f591b + MULPD (R11), X11 // 66450f591b + MULPD X2, X11 // 66440f59da + MULPD X11, X11 // 66450f59db + MULPS (BX), X2 // 0f5913 + MULPS (R11), X2 // 410f5913 + MULPS X2, X2 // 0f59d2 + MULPS X11, X2 // 410f59d3 + MULPS (BX), X11 // 440f591b + MULPS (R11), X11 // 450f591b + MULPS X2, X11 // 440f59da + MULPS X11, X11 // 450f59db + MULSD (BX), X2 // f20f5913 + MULSD (R11), X2 // f2410f5913 + MULSD X2, X2 // f20f59d2 + MULSD X11, X2 // f2410f59d3 + MULSD (BX), X11 // f2440f591b + MULSD (R11), X11 // f2450f591b + MULSD X2, X11 // f2440f59da + MULSD X11, X11 // f2450f59db + MULSS (BX), X2 // f30f5913 + MULSS (R11), X2 // f3410f5913 + MULSS X2, X2 // f30f59d2 + MULSS X11, X2 // f3410f59d3 + MULSS (BX), X11 // f3440f591b + MULSS (R11), X11 // f3450f591b + MULSS X2, X11 // f3440f59da + MULSS X11, X11 // f3450f59db + MULXL (BX), R9, DX // c4e233f613 + MULXL (R11), R9, DX // c4c233f613 + MULXL DX, R9, DX // c4e233f6d2 + MULXL R11, R9, DX // c4c233f6d3 + MULXL (BX), R9, R11 // c46233f61b + MULXL (R11), R9, R11 // c44233f61b + MULXL DX, R9, R11 // c46233f6da + MULXL R11, R9, R11 // c44233f6db + MULXQ (BX), R14, DX // c4e28bf613 + MULXQ (R11), R14, DX // c4c28bf613 + MULXQ DX, R14, DX // c4e28bf6d2 + MULXQ R11, R14, DX // c4c28bf6d3 + MULXQ (BX), R14, R11 // c4628bf61b + MULXQ (R11), R14, R11 // c4428bf61b + MULXQ DX, R14, R11 // c4628bf6da + MULXQ R11, R14, R11 // c4428bf6db + MWAIT // 0f01c9 + NEGW (BX) // 66f71b + NEGW (R11) // 6641f71b + NEGW DX // 66f7da + NEGW R11 // 6641f7db + NEGL (BX) // f71b + NEGL (R11) // 41f71b + NEGL DX // f7da + NEGL R11 // 41f7db + NEGQ (BX) // 48f71b + NEGQ (R11) // 49f71b + NEGQ DX // 48f7da + NEGQ R11 // 49f7db + NEGB (BX) // f61b + NEGB (R11) // 41f61b + NEGB DL // f6da + NEGB R11 // 41f6db + NOPW (BX) // 660f1f03 + NOPW (R11) // 66410f1f03 + NOPW DX // 660f1fc2 + NOPW R11 // 66410f1fc3 + NOPL (BX) // 0f1f03 + NOPL (R11) // 410f1f03 + NOPL DX // 0f1fc2 + NOPL R11 // 410f1fc3 + NOTW (BX) // 66f713 + NOTW (R11) // 6641f713 + NOTW DX // 66f7d2 + NOTW R11 // 6641f7d3 + NOTL (BX) // f713 + NOTL (R11) // 41f713 + NOTL DX // f7d2 + NOTL R11 // 41f7d3 + NOTQ (BX) // 48f713 + NOTQ (R11) // 49f713 + NOTQ DX // 48f7d2 + NOTQ R11 // 49f7d3 + NOTB (BX) // f613 + NOTB (R11) // 41f613 + NOTB DL // f6d2 + NOTB R11 // 41f6d3 + ORB $7, AL // 0c07 + ORW $61731, AX // 660d23f1 + ORL $4045620583, AX // 0d674523f1 + ORQ $-249346713, AX // 480d674523f1 + ORW $61731, (BX) // 66810b23f1 + ORW $61731, (R11) // 6641810b23f1 + ORW $61731, DX // 6681ca23f1 + ORW $61731, R11 // 664181cb23f1 + ORW $7, (BX) // 66830b07 + ORW $7, (R11) // 6641830b07 + ORW $7, DX // 6683ca07 + ORW $7, R11 // 664183cb07 + ORW DX, (BX) // 660913 + ORW R11, (BX) // 6644091b + ORW DX, (R11) // 66410913 + ORW R11, (R11) // 6645091b + ORW DX, DX // 6609d2 or 660bd2 + ORW R11, DX // 664409da or 66410bd3 + ORW DX, R11 // 664109d3 or 66440bda + ORW R11, R11 // 664509db or 66450bdb + ORL $4045620583, (BX) // 810b674523f1 + ORL $4045620583, (R11) // 41810b674523f1 + ORL $4045620583, DX // 81ca674523f1 + ORL $4045620583, R11 // 4181cb674523f1 + ORL $7, (BX) // 830b07 + ORL $7, (R11) // 41830b07 + ORL $7, DX // 83ca07 + ORL $7, R11 // 4183cb07 + ORL DX, (BX) // 0913 + ORL R11, (BX) // 44091b + ORL DX, (R11) // 410913 + ORL R11, (R11) // 45091b + ORL DX, DX // 09d2 or 0bd2 + ORL R11, DX // 4409da or 410bd3 + ORL DX, R11 // 4109d3 or 440bda + ORL R11, R11 // 4509db or 450bdb + ORQ $-249346713, (BX) // 48810b674523f1 + ORQ $-249346713, (R11) // 49810b674523f1 + ORQ $-249346713, DX // 4881ca674523f1 + ORQ $-249346713, R11 // 4981cb674523f1 + ORQ $7, (BX) // 48830b07 + ORQ $7, (R11) // 49830b07 + ORQ $7, DX // 4883ca07 + ORQ $7, R11 // 4983cb07 + ORQ DX, (BX) // 480913 + ORQ R11, (BX) // 4c091b + ORQ DX, (R11) // 490913 + ORQ R11, (R11) // 4d091b + ORQ DX, DX // 4809d2 or 480bd2 + ORQ R11, DX // 4c09da or 490bd3 + ORQ DX, R11 // 4909d3 or 4c0bda + ORQ R11, R11 // 4d09db or 4d0bdb + ORB $7, (BX) // 800b07 + ORB $7, (R11) // 41800b07 + ORB $7, DL // 80ca07 + ORB $7, R11 // 4180cb07 + ORB DL, (BX) // 0813 + ORB R11, (BX) // 44081b + ORB DL, (R11) // 410813 + ORB R11, (R11) // 45081b + ORB DL, DL // 08d2 or 0ad2 + ORB R11, DL // 4408da or 410ad3 + ORB DL, R11 // 4108d3 or 440ada + ORB R11, R11 // 4508db or 450adb + ORW (BX), DX // 660b13 + ORW (R11), DX // 66410b13 + ORW (BX), R11 // 66440b1b + ORW (R11), R11 // 66450b1b + ORL (BX), DX // 0b13 + ORL (R11), DX // 410b13 + ORL (BX), R11 // 440b1b + ORL (R11), R11 // 450b1b + ORQ (BX), DX // 480b13 + ORQ (R11), DX // 490b13 + ORQ (BX), R11 // 4c0b1b + ORQ (R11), R11 // 4d0b1b + ORB (BX), DL // 0a13 + ORB (R11), DL // 410a13 + ORB (BX), R11 // 440a1b + ORB (R11), R11 // 450a1b + ORPD (BX), X2 // 660f5613 + ORPD (R11), X2 // 66410f5613 + ORPD X2, X2 // 660f56d2 + ORPD X11, X2 // 66410f56d3 + ORPD (BX), X11 // 66440f561b + ORPD (R11), X11 // 66450f561b + ORPD X2, X11 // 66440f56da + ORPD X11, X11 // 66450f56db + ORPS (BX), X2 // 0f5613 + ORPS (R11), X2 // 410f5613 + ORPS X2, X2 // 0f56d2 + ORPS X11, X2 // 410f56d3 + ORPS (BX), X11 // 440f561b + ORPS (R11), X11 // 450f561b + ORPS X2, X11 // 440f56da + ORPS X11, X11 // 450f56db + //TODO: OUTB AL, DX // ee + //TODO: OUTW AX, DX // 66ef + //TODO: OUTL AX, DX // ef + //TODO: OUTB AL, $7 // e607 + //TODO: OUTW AX, $7 // 66e707 + //TODO: OUTL AX, $7 // e707 + OUTSB // 6e + OUTSL // 6f + OUTSW // 666f + //TODO: PABSB (BX), M2 // 0f381c13 + //TODO: PABSB (R11), M2 // 410f381c13 + //TODO: PABSB M2, M2 // 0f381cd2 + //TODO: PABSB M3, M2 // 0f381cd3 + //TODO: PABSB (BX), M3 // 0f381c1b + //TODO: PABSB (R11), M3 // 410f381c1b + //TODO: PABSB M2, M3 // 0f381cda + //TODO: PABSB M3, M3 // 0f381cdb + PABSB (BX), X2 // 660f381c13 + PABSB (R11), X2 // 66410f381c13 + PABSB X2, X2 // 660f381cd2 + PABSB X11, X2 // 66410f381cd3 + PABSB (BX), X11 // 66440f381c1b + PABSB (R11), X11 // 66450f381c1b + PABSB X2, X11 // 66440f381cda + PABSB X11, X11 // 66450f381cdb + //TODO: PABSD (BX), M2 // 0f381e13 + //TODO: PABSD (R11), M2 // 410f381e13 + //TODO: PABSD M2, M2 // 0f381ed2 + //TODO: PABSD M3, M2 // 0f381ed3 + //TODO: PABSD (BX), M3 // 0f381e1b + //TODO: PABSD (R11), M3 // 410f381e1b + //TODO: PABSD M2, M3 // 0f381eda + //TODO: PABSD M3, M3 // 0f381edb + PABSD (BX), X2 // 660f381e13 + PABSD (R11), X2 // 66410f381e13 + PABSD X2, X2 // 660f381ed2 + PABSD X11, X2 // 66410f381ed3 + PABSD (BX), X11 // 66440f381e1b + PABSD (R11), X11 // 66450f381e1b + PABSD X2, X11 // 66440f381eda + PABSD X11, X11 // 66450f381edb + //TODO: PABSW (BX), M2 // 0f381d13 + //TODO: PABSW (R11), M2 // 410f381d13 + //TODO: PABSW M2, M2 // 0f381dd2 + //TODO: PABSW M3, M2 // 0f381dd3 + //TODO: PABSW (BX), M3 // 0f381d1b + //TODO: PABSW (R11), M3 // 410f381d1b + //TODO: PABSW M2, M3 // 0f381dda + //TODO: PABSW M3, M3 // 0f381ddb + PABSW (BX), X2 // 660f381d13 + PABSW (R11), X2 // 66410f381d13 + PABSW X2, X2 // 660f381dd2 + PABSW X11, X2 // 66410f381dd3 + PABSW (BX), X11 // 66440f381d1b + PABSW (R11), X11 // 66450f381d1b + PABSW X2, X11 // 66440f381dda + PABSW X11, X11 // 66450f381ddb + PACKSSLW (BX), M2 // 0f6b13 + PACKSSLW (R11), M2 // 410f6b13 + PACKSSLW M2, M2 // 0f6bd2 + PACKSSLW M3, M2 // 0f6bd3 + PACKSSLW (BX), M3 // 0f6b1b + PACKSSLW (R11), M3 // 410f6b1b + PACKSSLW M2, M3 // 0f6bda + PACKSSLW M3, M3 // 0f6bdb + PACKSSLW (BX), X2 // 660f6b13 + PACKSSLW (R11), X2 // 66410f6b13 + PACKSSLW X2, X2 // 660f6bd2 + PACKSSLW X11, X2 // 66410f6bd3 + PACKSSLW (BX), X11 // 66440f6b1b + PACKSSLW (R11), X11 // 66450f6b1b + PACKSSLW X2, X11 // 66440f6bda + PACKSSLW X11, X11 // 66450f6bdb + PACKSSWB (BX), M2 // 0f6313 + PACKSSWB (R11), M2 // 410f6313 + PACKSSWB M2, M2 // 0f63d2 + PACKSSWB M3, M2 // 0f63d3 + PACKSSWB (BX), M3 // 0f631b + PACKSSWB (R11), M3 // 410f631b + PACKSSWB M2, M3 // 0f63da + PACKSSWB M3, M3 // 0f63db + PACKSSWB (BX), X2 // 660f6313 + PACKSSWB (R11), X2 // 66410f6313 + PACKSSWB X2, X2 // 660f63d2 + PACKSSWB X11, X2 // 66410f63d3 + PACKSSWB (BX), X11 // 66440f631b + PACKSSWB (R11), X11 // 66450f631b + PACKSSWB X2, X11 // 66440f63da + PACKSSWB X11, X11 // 66450f63db + PACKUSDW (BX), X2 // 660f382b13 + PACKUSDW (R11), X2 // 66410f382b13 + PACKUSDW X2, X2 // 660f382bd2 + PACKUSDW X11, X2 // 66410f382bd3 + PACKUSDW (BX), X11 // 66440f382b1b + PACKUSDW (R11), X11 // 66450f382b1b + PACKUSDW X2, X11 // 66440f382bda + PACKUSDW X11, X11 // 66450f382bdb + PACKUSWB (BX), M2 // 0f6713 + PACKUSWB (R11), M2 // 410f6713 + PACKUSWB M2, M2 // 0f67d2 + PACKUSWB M3, M2 // 0f67d3 + PACKUSWB (BX), M3 // 0f671b + PACKUSWB (R11), M3 // 410f671b + PACKUSWB M2, M3 // 0f67da + PACKUSWB M3, M3 // 0f67db + PACKUSWB (BX), X2 // 660f6713 + PACKUSWB (R11), X2 // 66410f6713 + PACKUSWB X2, X2 // 660f67d2 + PACKUSWB X11, X2 // 66410f67d3 + PACKUSWB (BX), X11 // 66440f671b + PACKUSWB (R11), X11 // 66450f671b + PACKUSWB X2, X11 // 66440f67da + PACKUSWB X11, X11 // 66450f67db + PADDB (BX), M2 // 0ffc13 + PADDB (R11), M2 // 410ffc13 + PADDB M2, M2 // 0ffcd2 + PADDB M3, M2 // 0ffcd3 + PADDB (BX), M3 // 0ffc1b + PADDB (R11), M3 // 410ffc1b + PADDB M2, M3 // 0ffcda + PADDB M3, M3 // 0ffcdb + PADDB (BX), X2 // 660ffc13 + PADDB (R11), X2 // 66410ffc13 + PADDB X2, X2 // 660ffcd2 + PADDB X11, X2 // 66410ffcd3 + PADDB (BX), X11 // 66440ffc1b + PADDB (R11), X11 // 66450ffc1b + PADDB X2, X11 // 66440ffcda + PADDB X11, X11 // 66450ffcdb + PADDL (BX), M2 // 0ffe13 + PADDL (R11), M2 // 410ffe13 + PADDL M2, M2 // 0ffed2 + PADDL M3, M2 // 0ffed3 + PADDL (BX), M3 // 0ffe1b + PADDL (R11), M3 // 410ffe1b + PADDL M2, M3 // 0ffeda + PADDL M3, M3 // 0ffedb + PADDL (BX), X2 // 660ffe13 + PADDL (R11), X2 // 66410ffe13 + PADDL X2, X2 // 660ffed2 + PADDL X11, X2 // 66410ffed3 + PADDL (BX), X11 // 66440ffe1b + PADDL (R11), X11 // 66450ffe1b + PADDL X2, X11 // 66440ffeda + PADDL X11, X11 // 66450ffedb + //TODO: PADDQ (BX), M2 // 0fd413 + //TODO: PADDQ (R11), M2 // 410fd413 + //TODO: PADDQ M2, M2 // 0fd4d2 + //TODO: PADDQ M3, M2 // 0fd4d3 + //TODO: PADDQ (BX), M3 // 0fd41b + //TODO: PADDQ (R11), M3 // 410fd41b + //TODO: PADDQ M2, M3 // 0fd4da + //TODO: PADDQ M3, M3 // 0fd4db + PADDQ (BX), X2 // 660fd413 + PADDQ (R11), X2 // 66410fd413 + PADDQ X2, X2 // 660fd4d2 + PADDQ X11, X2 // 66410fd4d3 + PADDQ (BX), X11 // 66440fd41b + PADDQ (R11), X11 // 66450fd41b + PADDQ X2, X11 // 66440fd4da + PADDQ X11, X11 // 66450fd4db + PADDSB (BX), M2 // 0fec13 + PADDSB (R11), M2 // 410fec13 + PADDSB M2, M2 // 0fecd2 + PADDSB M3, M2 // 0fecd3 + PADDSB (BX), M3 // 0fec1b + PADDSB (R11), M3 // 410fec1b + PADDSB M2, M3 // 0fecda + PADDSB M3, M3 // 0fecdb + PADDSB (BX), X2 // 660fec13 + PADDSB (R11), X2 // 66410fec13 + PADDSB X2, X2 // 660fecd2 + PADDSB X11, X2 // 66410fecd3 + PADDSB (BX), X11 // 66440fec1b + PADDSB (R11), X11 // 66450fec1b + PADDSB X2, X11 // 66440fecda + PADDSB X11, X11 // 66450fecdb + PADDSW (BX), M2 // 0fed13 + PADDSW (R11), M2 // 410fed13 + PADDSW M2, M2 // 0fedd2 + PADDSW M3, M2 // 0fedd3 + PADDSW (BX), M3 // 0fed1b + PADDSW (R11), M3 // 410fed1b + PADDSW M2, M3 // 0fedda + PADDSW M3, M3 // 0feddb + PADDSW (BX), X2 // 660fed13 + PADDSW (R11), X2 // 66410fed13 + PADDSW X2, X2 // 660fedd2 + PADDSW X11, X2 // 66410fedd3 + PADDSW (BX), X11 // 66440fed1b + PADDSW (R11), X11 // 66450fed1b + PADDSW X2, X11 // 66440fedda + PADDSW X11, X11 // 66450feddb + PADDUSB (BX), M2 // 0fdc13 + PADDUSB (R11), M2 // 410fdc13 + PADDUSB M2, M2 // 0fdcd2 + PADDUSB M3, M2 // 0fdcd3 + PADDUSB (BX), M3 // 0fdc1b + PADDUSB (R11), M3 // 410fdc1b + PADDUSB M2, M3 // 0fdcda + PADDUSB M3, M3 // 0fdcdb + PADDUSB (BX), X2 // 660fdc13 + PADDUSB (R11), X2 // 66410fdc13 + PADDUSB X2, X2 // 660fdcd2 + PADDUSB X11, X2 // 66410fdcd3 + PADDUSB (BX), X11 // 66440fdc1b + PADDUSB (R11), X11 // 66450fdc1b + PADDUSB X2, X11 // 66440fdcda + PADDUSB X11, X11 // 66450fdcdb + PADDUSW (BX), M2 // 0fdd13 + PADDUSW (R11), M2 // 410fdd13 + PADDUSW M2, M2 // 0fddd2 + PADDUSW M3, M2 // 0fddd3 + PADDUSW (BX), M3 // 0fdd1b + PADDUSW (R11), M3 // 410fdd1b + PADDUSW M2, M3 // 0fddda + PADDUSW M3, M3 // 0fdddb + PADDUSW (BX), X2 // 660fdd13 + PADDUSW (R11), X2 // 66410fdd13 + PADDUSW X2, X2 // 660fddd2 + PADDUSW X11, X2 // 66410fddd3 + PADDUSW (BX), X11 // 66440fdd1b + PADDUSW (R11), X11 // 66450fdd1b + PADDUSW X2, X11 // 66440fddda + PADDUSW X11, X11 // 66450fdddb + PADDW (BX), M2 // 0ffd13 + PADDW (R11), M2 // 410ffd13 + PADDW M2, M2 // 0ffdd2 + PADDW M3, M2 // 0ffdd3 + PADDW (BX), M3 // 0ffd1b + PADDW (R11), M3 // 410ffd1b + PADDW M2, M3 // 0ffdda + PADDW M3, M3 // 0ffddb + PADDW (BX), X2 // 660ffd13 + PADDW (R11), X2 // 66410ffd13 + PADDW X2, X2 // 660ffdd2 + PADDW X11, X2 // 66410ffdd3 + PADDW (BX), X11 // 66440ffd1b + PADDW (R11), X11 // 66450ffd1b + PADDW X2, X11 // 66440ffdda + PADDW X11, X11 // 66450ffddb + //TODO: PALIGNR $7, (BX), M2 // 0f3a0f1307 + //TODO: PALIGNR $7, (R11), M2 // 410f3a0f1307 + //TODO: PALIGNR $7, M2, M2 // 0f3a0fd207 + //TODO: PALIGNR $7, M3, M2 // 0f3a0fd307 + //TODO: PALIGNR $7, (BX), M3 // 0f3a0f1b07 + //TODO: PALIGNR $7, (R11), M3 // 410f3a0f1b07 + //TODO: PALIGNR $7, M2, M3 // 0f3a0fda07 + //TODO: PALIGNR $7, M3, M3 // 0f3a0fdb07 + PALIGNR $7, (BX), X2 // 660f3a0f1307 + PALIGNR $7, (R11), X2 // 66410f3a0f1307 + PALIGNR $7, X2, X2 // 660f3a0fd207 + PALIGNR $7, X11, X2 // 66410f3a0fd307 + PALIGNR $7, (BX), X11 // 66440f3a0f1b07 + PALIGNR $7, (R11), X11 // 66450f3a0f1b07 + PALIGNR $7, X2, X11 // 66440f3a0fda07 + PALIGNR $7, X11, X11 // 66450f3a0fdb07 + PAND (BX), M2 // 0fdb13 + PAND (R11), M2 // 410fdb13 + PAND M2, M2 // 0fdbd2 + PAND M3, M2 // 0fdbd3 + PAND (BX), M3 // 0fdb1b + PAND (R11), M3 // 410fdb1b + PAND M2, M3 // 0fdbda + PAND M3, M3 // 0fdbdb + PAND (BX), X2 // 660fdb13 + PAND (R11), X2 // 66410fdb13 + PAND X2, X2 // 660fdbd2 + PAND X11, X2 // 66410fdbd3 + PAND (BX), X11 // 66440fdb1b + PAND (R11), X11 // 66450fdb1b + PAND X2, X11 // 66440fdbda + PAND X11, X11 // 66450fdbdb + PANDN (BX), M2 // 0fdf13 + PANDN (R11), M2 // 410fdf13 + PANDN M2, M2 // 0fdfd2 + PANDN M3, M2 // 0fdfd3 + PANDN (BX), M3 // 0fdf1b + PANDN (R11), M3 // 410fdf1b + PANDN M2, M3 // 0fdfda + PANDN M3, M3 // 0fdfdb + PANDN (BX), X2 // 660fdf13 + PANDN (R11), X2 // 66410fdf13 + PANDN X2, X2 // 660fdfd2 + PANDN X11, X2 // 66410fdfd3 + PANDN (BX), X11 // 66440fdf1b + PANDN (R11), X11 // 66450fdf1b + PANDN X2, X11 // 66440fdfda + PANDN X11, X11 // 66450fdfdb + PAVGB (BX), M2 // 0fe013 + PAVGB (R11), M2 // 410fe013 + PAVGB M2, M2 // 0fe0d2 + PAVGB M3, M2 // 0fe0d3 + PAVGB (BX), M3 // 0fe01b + PAVGB (R11), M3 // 410fe01b + PAVGB M2, M3 // 0fe0da + PAVGB M3, M3 // 0fe0db + PAVGB (BX), X2 // 660fe013 + PAVGB (R11), X2 // 66410fe013 + PAVGB X2, X2 // 660fe0d2 + PAVGB X11, X2 // 66410fe0d3 + PAVGB (BX), X11 // 66440fe01b + PAVGB (R11), X11 // 66450fe01b + PAVGB X2, X11 // 66440fe0da + PAVGB X11, X11 // 66450fe0db + PAVGW (BX), M2 // 0fe313 + PAVGW (R11), M2 // 410fe313 + PAVGW M2, M2 // 0fe3d2 + PAVGW M3, M2 // 0fe3d3 + PAVGW (BX), M3 // 0fe31b + PAVGW (R11), M3 // 410fe31b + PAVGW M2, M3 // 0fe3da + PAVGW M3, M3 // 0fe3db + PAVGW (BX), X2 // 660fe313 + PAVGW (R11), X2 // 66410fe313 + PAVGW X2, X2 // 660fe3d2 + PAVGW X11, X2 // 66410fe3d3 + PAVGW (BX), X11 // 66440fe31b + PAVGW (R11), X11 // 66450fe31b + PAVGW X2, X11 // 66440fe3da + PAVGW X11, X11 // 66450fe3db + PBLENDVB X0, (BX), X2 // 660f381013 + PBLENDVB X0, (R11), X2 // 66410f381013 + PBLENDVB X0, X2, X2 // 660f3810d2 + PBLENDVB X0, X11, X2 // 66410f3810d3 + PBLENDVB X0, (BX), X11 // 66440f38101b + PBLENDVB X0, (R11), X11 // 66450f38101b + PBLENDVB X0, X2, X11 // 66440f3810da + PBLENDVB X0, X11, X11 // 66450f3810db + PBLENDW $7, (BX), X2 // 660f3a0e1307 + PBLENDW $7, (R11), X2 // 66410f3a0e1307 + PBLENDW $7, X2, X2 // 660f3a0ed207 + PBLENDW $7, X11, X2 // 66410f3a0ed307 + PBLENDW $7, (BX), X11 // 66440f3a0e1b07 + PBLENDW $7, (R11), X11 // 66450f3a0e1b07 + PBLENDW $7, X2, X11 // 66440f3a0eda07 + PBLENDW $7, X11, X11 // 66450f3a0edb07 + PCLMULQDQ $7, (BX), X2 // 660f3a441307 + PCLMULQDQ $7, (R11), X2 // 66410f3a441307 + PCLMULQDQ $7, X2, X2 // 660f3a44d207 + PCLMULQDQ $7, X11, X2 // 66410f3a44d307 + PCLMULQDQ $7, (BX), X11 // 66440f3a441b07 + PCLMULQDQ $7, (R11), X11 // 66450f3a441b07 + PCLMULQDQ $7, X2, X11 // 66440f3a44da07 + PCLMULQDQ $7, X11, X11 // 66450f3a44db07 + PCMPEQB (BX), M2 // 0f7413 + PCMPEQB (R11), M2 // 410f7413 + PCMPEQB M2, M2 // 0f74d2 + PCMPEQB M3, M2 // 0f74d3 + PCMPEQB (BX), M3 // 0f741b + PCMPEQB (R11), M3 // 410f741b + PCMPEQB M2, M3 // 0f74da + PCMPEQB M3, M3 // 0f74db + PCMPEQB (BX), X2 // 660f7413 + PCMPEQB (R11), X2 // 66410f7413 + PCMPEQB X2, X2 // 660f74d2 + PCMPEQB X11, X2 // 66410f74d3 + PCMPEQB (BX), X11 // 66440f741b + PCMPEQB (R11), X11 // 66450f741b + PCMPEQB X2, X11 // 66440f74da + PCMPEQB X11, X11 // 66450f74db + PCMPEQL (BX), M2 // 0f7613 + PCMPEQL (R11), M2 // 410f7613 + PCMPEQL M2, M2 // 0f76d2 + PCMPEQL M3, M2 // 0f76d3 + PCMPEQL (BX), M3 // 0f761b + PCMPEQL (R11), M3 // 410f761b + PCMPEQL M2, M3 // 0f76da + PCMPEQL M3, M3 // 0f76db + PCMPEQL (BX), X2 // 660f7613 + PCMPEQL (R11), X2 // 66410f7613 + PCMPEQL X2, X2 // 660f76d2 + PCMPEQL X11, X2 // 66410f76d3 + PCMPEQL (BX), X11 // 66440f761b + PCMPEQL (R11), X11 // 66450f761b + PCMPEQL X2, X11 // 66440f76da + PCMPEQL X11, X11 // 66450f76db + PCMPEQQ (BX), X2 // 660f382913 + PCMPEQQ (R11), X2 // 66410f382913 + PCMPEQQ X2, X2 // 660f3829d2 + PCMPEQQ X11, X2 // 66410f3829d3 + PCMPEQQ (BX), X11 // 66440f38291b + PCMPEQQ (R11), X11 // 66450f38291b + PCMPEQQ X2, X11 // 66440f3829da + PCMPEQQ X11, X11 // 66450f3829db + PCMPEQW (BX), M2 // 0f7513 + PCMPEQW (R11), M2 // 410f7513 + PCMPEQW M2, M2 // 0f75d2 + PCMPEQW M3, M2 // 0f75d3 + PCMPEQW (BX), M3 // 0f751b + PCMPEQW (R11), M3 // 410f751b + PCMPEQW M2, M3 // 0f75da + PCMPEQW M3, M3 // 0f75db + PCMPEQW (BX), X2 // 660f7513 + PCMPEQW (R11), X2 // 66410f7513 + PCMPEQW X2, X2 // 660f75d2 + PCMPEQW X11, X2 // 66410f75d3 + PCMPEQW (BX), X11 // 66440f751b + PCMPEQW (R11), X11 // 66450f751b + PCMPEQW X2, X11 // 66440f75da + PCMPEQW X11, X11 // 66450f75db + PCMPESTRI $7, (BX), X2 // 660f3a611307 + PCMPESTRI $7, (R11), X2 // 66410f3a611307 + PCMPESTRI $7, X2, X2 // 660f3a61d207 + PCMPESTRI $7, X11, X2 // 66410f3a61d307 + PCMPESTRI $7, (BX), X11 // 66440f3a611b07 + PCMPESTRI $7, (R11), X11 // 66450f3a611b07 + PCMPESTRI $7, X2, X11 // 66440f3a61da07 + PCMPESTRI $7, X11, X11 // 66450f3a61db07 + PCMPESTRM $7, (BX), X2 // 660f3a601307 + PCMPESTRM $7, (R11), X2 // 66410f3a601307 + PCMPESTRM $7, X2, X2 // 660f3a60d207 + PCMPESTRM $7, X11, X2 // 66410f3a60d307 + PCMPESTRM $7, (BX), X11 // 66440f3a601b07 + PCMPESTRM $7, (R11), X11 // 66450f3a601b07 + PCMPESTRM $7, X2, X11 // 66440f3a60da07 + PCMPESTRM $7, X11, X11 // 66450f3a60db07 + PCMPGTB (BX), M2 // 0f6413 + PCMPGTB (R11), M2 // 410f6413 + PCMPGTB M2, M2 // 0f64d2 + PCMPGTB M3, M2 // 0f64d3 + PCMPGTB (BX), M3 // 0f641b + PCMPGTB (R11), M3 // 410f641b + PCMPGTB M2, M3 // 0f64da + PCMPGTB M3, M3 // 0f64db + PCMPGTB (BX), X2 // 660f6413 + PCMPGTB (R11), X2 // 66410f6413 + PCMPGTB X2, X2 // 660f64d2 + PCMPGTB X11, X2 // 66410f64d3 + PCMPGTB (BX), X11 // 66440f641b + PCMPGTB (R11), X11 // 66450f641b + PCMPGTB X2, X11 // 66440f64da + PCMPGTB X11, X11 // 66450f64db + PCMPGTL (BX), M2 // 0f6613 + PCMPGTL (R11), M2 // 410f6613 + PCMPGTL M2, M2 // 0f66d2 + PCMPGTL M3, M2 // 0f66d3 + PCMPGTL (BX), M3 // 0f661b + PCMPGTL (R11), M3 // 410f661b + PCMPGTL M2, M3 // 0f66da + PCMPGTL M3, M3 // 0f66db + PCMPGTL (BX), X2 // 660f6613 + PCMPGTL (R11), X2 // 66410f6613 + PCMPGTL X2, X2 // 660f66d2 + PCMPGTL X11, X2 // 66410f66d3 + PCMPGTL (BX), X11 // 66440f661b + PCMPGTL (R11), X11 // 66450f661b + PCMPGTL X2, X11 // 66440f66da + PCMPGTL X11, X11 // 66450f66db + PCMPGTQ (BX), X2 // 660f383713 + PCMPGTQ (R11), X2 // 66410f383713 + PCMPGTQ X2, X2 // 660f3837d2 + PCMPGTQ X11, X2 // 66410f3837d3 + PCMPGTQ (BX), X11 // 66440f38371b + PCMPGTQ (R11), X11 // 66450f38371b + PCMPGTQ X2, X11 // 66440f3837da + PCMPGTQ X11, X11 // 66450f3837db + PCMPGTW (BX), M2 // 0f6513 + PCMPGTW (R11), M2 // 410f6513 + PCMPGTW M2, M2 // 0f65d2 + PCMPGTW M3, M2 // 0f65d3 + PCMPGTW (BX), M3 // 0f651b + PCMPGTW (R11), M3 // 410f651b + PCMPGTW M2, M3 // 0f65da + PCMPGTW M3, M3 // 0f65db + PCMPGTW (BX), X2 // 660f6513 + PCMPGTW (R11), X2 // 66410f6513 + PCMPGTW X2, X2 // 660f65d2 + PCMPGTW X11, X2 // 66410f65d3 + PCMPGTW (BX), X11 // 66440f651b + PCMPGTW (R11), X11 // 66450f651b + PCMPGTW X2, X11 // 66440f65da + PCMPGTW X11, X11 // 66450f65db + PCMPISTRI $7, (BX), X2 // 660f3a631307 + PCMPISTRI $7, (R11), X2 // 66410f3a631307 + PCMPISTRI $7, X2, X2 // 660f3a63d207 + PCMPISTRI $7, X11, X2 // 66410f3a63d307 + PCMPISTRI $7, (BX), X11 // 66440f3a631b07 + PCMPISTRI $7, (R11), X11 // 66450f3a631b07 + PCMPISTRI $7, X2, X11 // 66440f3a63da07 + PCMPISTRI $7, X11, X11 // 66450f3a63db07 + PCMPISTRM $7, (BX), X2 // 660f3a621307 + PCMPISTRM $7, (R11), X2 // 66410f3a621307 + PCMPISTRM $7, X2, X2 // 660f3a62d207 + PCMPISTRM $7, X11, X2 // 66410f3a62d307 + PCMPISTRM $7, (BX), X11 // 66440f3a621b07 + PCMPISTRM $7, (R11), X11 // 66450f3a621b07 + PCMPISTRM $7, X2, X11 // 66440f3a62da07 + PCMPISTRM $7, X11, X11 // 66450f3a62db07 + PDEPL (BX), R9, DX // c4e233f513 + PDEPL (R11), R9, DX // c4c233f513 + PDEPL DX, R9, DX // c4e233f5d2 + PDEPL R11, R9, DX // c4c233f5d3 + PDEPL (BX), R9, R11 // c46233f51b + PDEPL (R11), R9, R11 // c44233f51b + PDEPL DX, R9, R11 // c46233f5da + PDEPL R11, R9, R11 // c44233f5db + PDEPQ (BX), R14, DX // c4e28bf513 + PDEPQ (R11), R14, DX // c4c28bf513 + PDEPQ DX, R14, DX // c4e28bf5d2 + PDEPQ R11, R14, DX // c4c28bf5d3 + PDEPQ (BX), R14, R11 // c4628bf51b + PDEPQ (R11), R14, R11 // c4428bf51b + PDEPQ DX, R14, R11 // c4628bf5da + PDEPQ R11, R14, R11 // c4428bf5db + PEXTL (BX), R9, DX // c4e232f513 + PEXTL (R11), R9, DX // c4c232f513 + PEXTL DX, R9, DX // c4e232f5d2 + PEXTL R11, R9, DX // c4c232f5d3 + PEXTL (BX), R9, R11 // c46232f51b + PEXTL (R11), R9, R11 // c44232f51b + PEXTL DX, R9, R11 // c46232f5da + PEXTL R11, R9, R11 // c44232f5db + PEXTQ (BX), R14, DX // c4e28af513 + PEXTQ (R11), R14, DX // c4c28af513 + PEXTQ DX, R14, DX // c4e28af5d2 + PEXTQ R11, R14, DX // c4c28af5d3 + PEXTQ (BX), R14, R11 // c4628af51b + PEXTQ (R11), R14, R11 // c4428af51b + PEXTQ DX, R14, R11 // c4628af5da + PEXTQ R11, R14, R11 // c4428af5db + PEXTRB $7, X2, (BX) // 660f3a141307 + PEXTRB $7, X11, (BX) // 66440f3a141b07 + PEXTRB $7, X2, (R11) // 66410f3a141307 + PEXTRB $7, X11, (R11) // 66450f3a141b07 + PEXTRB $7, X2, DX // 660f3a14d207 + PEXTRB $7, X11, DX // 66440f3a14da07 + PEXTRB $7, X2, R11 // 66410f3a14d307 + PEXTRB $7, X11, R11 // 66450f3a14db07 + PEXTRD $7, X2, (BX) // 660f3a161307 + PEXTRD $7, X11, (BX) // 66440f3a161b07 + PEXTRD $7, X2, (R11) // 66410f3a161307 + PEXTRD $7, X11, (R11) // 66450f3a161b07 + PEXTRD $7, X2, DX // 660f3a16d207 + PEXTRD $7, X11, DX // 66440f3a16da07 + PEXTRD $7, X2, R11 // 66410f3a16d307 + PEXTRD $7, X11, R11 // 66450f3a16db07 + PEXTRQ $7, X2, (BX) // 66480f3a161307 + PEXTRQ $7, X11, (BX) // 664c0f3a161b07 + PEXTRQ $7, X2, (R11) // 66490f3a161307 + PEXTRQ $7, X11, (R11) // 664d0f3a161b07 + PEXTRQ $7, X2, DX // 66480f3a16d207 + PEXTRQ $7, X11, DX // 664c0f3a16da07 + PEXTRQ $7, X2, R11 // 66490f3a16d307 + PEXTRQ $7, X11, R11 // 664d0f3a16db07 + //TODO: PEXTRW $7, M2, DX // 0fc5d207 + //TODO: PEXTRW $7, M3, DX // 0fc5d307 + //TODO: PEXTRW $7, M2, R11 // 440fc5da07 + //TODO: PEXTRW $7, M3, R11 // 440fc5db07 + PEXTRW $7, X2, DX // 660fc5d207 or 660f3a15d207 + PEXTRW $7, X11, DX // 66410fc5d307 or 66440f3a15da07 + PEXTRW $7, X2, R11 // 66440fc5da07 or 66410f3a15d307 + PEXTRW $7, X11, R11 // 66450fc5db07 or 66450f3a15db07 + PEXTRW $7, X2, (BX) // 660f3a151307 + PEXTRW $7, X11, (BX) // 66440f3a151b07 + PEXTRW $7, X2, (R11) // 66410f3a151307 + PEXTRW $7, X11, (R11) // 66450f3a151b07 + PHADDD (BX), M2 // 0f380213 + PHADDD (R11), M2 // 410f380213 + PHADDD M2, M2 // 0f3802d2 + PHADDD M3, M2 // 0f3802d3 + PHADDD (BX), M3 // 0f38021b + PHADDD (R11), M3 // 410f38021b + PHADDD M2, M3 // 0f3802da + PHADDD M3, M3 // 0f3802db + PHADDD (BX), X2 // 660f380213 + PHADDD (R11), X2 // 66410f380213 + PHADDD X2, X2 // 660f3802d2 + PHADDD X11, X2 // 66410f3802d3 + PHADDD (BX), X11 // 66440f38021b + PHADDD (R11), X11 // 66450f38021b + PHADDD X2, X11 // 66440f3802da + PHADDD X11, X11 // 66450f3802db + //TODO: PHADDSW (BX), M2 // 0f380313 + //TODO: PHADDSW (R11), M2 // 410f380313 + //TODO: PHADDSW M2, M2 // 0f3803d2 + //TODO: PHADDSW M3, M2 // 0f3803d3 + //TODO: PHADDSW (BX), M3 // 0f38031b + //TODO: PHADDSW (R11), M3 // 410f38031b + //TODO: PHADDSW M2, M3 // 0f3803da + //TODO: PHADDSW M3, M3 // 0f3803db + PHADDSW (BX), X2 // 660f380313 + PHADDSW (R11), X2 // 66410f380313 + PHADDSW X2, X2 // 660f3803d2 + PHADDSW X11, X2 // 66410f3803d3 + PHADDSW (BX), X11 // 66440f38031b + PHADDSW (R11), X11 // 66450f38031b + PHADDSW X2, X11 // 66440f3803da + PHADDSW X11, X11 // 66450f3803db + //TODO: PHADDW (BX), M2 // 0f380113 + //TODO: PHADDW (R11), M2 // 410f380113 + //TODO: PHADDW M2, M2 // 0f3801d2 + //TODO: PHADDW M3, M2 // 0f3801d3 + //TODO: PHADDW (BX), M3 // 0f38011b + //TODO: PHADDW (R11), M3 // 410f38011b + //TODO: PHADDW M2, M3 // 0f3801da + //TODO: PHADDW M3, M3 // 0f3801db + PHADDW (BX), X2 // 660f380113 + PHADDW (R11), X2 // 66410f380113 + PHADDW X2, X2 // 660f3801d2 + PHADDW X11, X2 // 66410f3801d3 + PHADDW (BX), X11 // 66440f38011b + PHADDW (R11), X11 // 66450f38011b + PHADDW X2, X11 // 66440f3801da + PHADDW X11, X11 // 66450f3801db + PHMINPOSUW (BX), X2 // 660f384113 + PHMINPOSUW (R11), X2 // 66410f384113 + PHMINPOSUW X2, X2 // 660f3841d2 + PHMINPOSUW X11, X2 // 66410f3841d3 + PHMINPOSUW (BX), X11 // 66440f38411b + PHMINPOSUW (R11), X11 // 66450f38411b + PHMINPOSUW X2, X11 // 66440f3841da + PHMINPOSUW X11, X11 // 66450f3841db + //TODO: PHSUBD (BX), M2 // 0f380613 + //TODO: PHSUBD (R11), M2 // 410f380613 + //TODO: PHSUBD M2, M2 // 0f3806d2 + //TODO: PHSUBD M3, M2 // 0f3806d3 + //TODO: PHSUBD (BX), M3 // 0f38061b + //TODO: PHSUBD (R11), M3 // 410f38061b + //TODO: PHSUBD M2, M3 // 0f3806da + //TODO: PHSUBD M3, M3 // 0f3806db + PHSUBD (BX), X2 // 660f380613 + PHSUBD (R11), X2 // 66410f380613 + PHSUBD X2, X2 // 660f3806d2 + PHSUBD X11, X2 // 66410f3806d3 + PHSUBD (BX), X11 // 66440f38061b + PHSUBD (R11), X11 // 66450f38061b + PHSUBD X2, X11 // 66440f3806da + PHSUBD X11, X11 // 66450f3806db + //TODO: PHSUBSW (BX), M2 // 0f380713 + //TODO: PHSUBSW (R11), M2 // 410f380713 + //TODO: PHSUBSW M2, M2 // 0f3807d2 + //TODO: PHSUBSW M3, M2 // 0f3807d3 + //TODO: PHSUBSW (BX), M3 // 0f38071b + //TODO: PHSUBSW (R11), M3 // 410f38071b + //TODO: PHSUBSW M2, M3 // 0f3807da + //TODO: PHSUBSW M3, M3 // 0f3807db + PHSUBSW (BX), X2 // 660f380713 + PHSUBSW (R11), X2 // 66410f380713 + PHSUBSW X2, X2 // 660f3807d2 + PHSUBSW X11, X2 // 66410f3807d3 + PHSUBSW (BX), X11 // 66440f38071b + PHSUBSW (R11), X11 // 66450f38071b + PHSUBSW X2, X11 // 66440f3807da + PHSUBSW X11, X11 // 66450f3807db + //TODO: PHSUBW (BX), M2 // 0f380513 + //TODO: PHSUBW (R11), M2 // 410f380513 + //TODO: PHSUBW M2, M2 // 0f3805d2 + //TODO: PHSUBW M3, M2 // 0f3805d3 + //TODO: PHSUBW (BX), M3 // 0f38051b + //TODO: PHSUBW (R11), M3 // 410f38051b + //TODO: PHSUBW M2, M3 // 0f3805da + //TODO: PHSUBW M3, M3 // 0f3805db + PHSUBW (BX), X2 // 660f380513 + PHSUBW (R11), X2 // 66410f380513 + PHSUBW X2, X2 // 660f3805d2 + PHSUBW X11, X2 // 66410f3805d3 + PHSUBW (BX), X11 // 66440f38051b + PHSUBW (R11), X11 // 66450f38051b + PHSUBW X2, X11 // 66440f3805da + PHSUBW X11, X11 // 66450f3805db + PINSRB $7, (BX), X2 // 660f3a201307 + PINSRB $7, (R11), X2 // 66410f3a201307 + PINSRB $7, DX, X2 // 660f3a20d207 + PINSRB $7, R11, X2 // 66410f3a20d307 + PINSRB $7, (BX), X11 // 66440f3a201b07 + PINSRB $7, (R11), X11 // 66450f3a201b07 + PINSRB $7, DX, X11 // 66440f3a20da07 + PINSRB $7, R11, X11 // 66450f3a20db07 + PINSRD $7, (BX), X2 // 660f3a221307 + PINSRD $7, (R11), X2 // 66410f3a221307 + PINSRD $7, DX, X2 // 660f3a22d207 + PINSRD $7, R11, X2 // 66410f3a22d307 + PINSRD $7, (BX), X11 // 66440f3a221b07 + PINSRD $7, (R11), X11 // 66450f3a221b07 + PINSRD $7, DX, X11 // 66440f3a22da07 + PINSRD $7, R11, X11 // 66450f3a22db07 + PINSRQ $7, (BX), X2 // 66480f3a221307 + PINSRQ $7, (R11), X2 // 66490f3a221307 + PINSRQ $7, DX, X2 // 66480f3a22d207 + PINSRQ $7, R11, X2 // 66490f3a22d307 + PINSRQ $7, (BX), X11 // 664c0f3a221b07 + PINSRQ $7, (R11), X11 // 664d0f3a221b07 + PINSRQ $7, DX, X11 // 664c0f3a22da07 + PINSRQ $7, R11, X11 // 664d0f3a22db07 + //TODO: PINSRW $7, (BX), M2 // 0fc41307 + //TODO: PINSRW $7, (R11), M2 // 410fc41307 + //TODO: PINSRW $7, DX, M2 // 0fc4d207 + //TODO: PINSRW $7, R11, M2 // 410fc4d307 + //TODO: PINSRW $7, (BX), M3 // 0fc41b07 + //TODO: PINSRW $7, (R11), M3 // 410fc41b07 + //TODO: PINSRW $7, DX, M3 // 0fc4da07 + //TODO: PINSRW $7, R11, M3 // 410fc4db07 + PINSRW $7, (BX), X2 // 660fc41307 + PINSRW $7, (R11), X2 // 66410fc41307 + PINSRW $7, DX, X2 // 660fc4d207 + PINSRW $7, R11, X2 // 66410fc4d307 + PINSRW $7, (BX), X11 // 66440fc41b07 + PINSRW $7, (R11), X11 // 66450fc41b07 + PINSRW $7, DX, X11 // 66440fc4da07 + PINSRW $7, R11, X11 // 66450fc4db07 + //TODO: PMADDUBSW (BX), M2 // 0f380413 + //TODO: PMADDUBSW (R11), M2 // 410f380413 + //TODO: PMADDUBSW M2, M2 // 0f3804d2 + //TODO: PMADDUBSW M3, M2 // 0f3804d3 + //TODO: PMADDUBSW (BX), M3 // 0f38041b + //TODO: PMADDUBSW (R11), M3 // 410f38041b + //TODO: PMADDUBSW M2, M3 // 0f3804da + //TODO: PMADDUBSW M3, M3 // 0f3804db + PMADDUBSW (BX), X2 // 660f380413 + PMADDUBSW (R11), X2 // 66410f380413 + PMADDUBSW X2, X2 // 660f3804d2 + PMADDUBSW X11, X2 // 66410f3804d3 + PMADDUBSW (BX), X11 // 66440f38041b + PMADDUBSW (R11), X11 // 66450f38041b + PMADDUBSW X2, X11 // 66440f3804da + PMADDUBSW X11, X11 // 66450f3804db + PMADDWL (BX), M2 // 0ff513 + PMADDWL (R11), M2 // 410ff513 + PMADDWL M2, M2 // 0ff5d2 + PMADDWL M3, M2 // 0ff5d3 + PMADDWL (BX), M3 // 0ff51b + PMADDWL (R11), M3 // 410ff51b + PMADDWL M2, M3 // 0ff5da + PMADDWL M3, M3 // 0ff5db + PMADDWL (BX), X2 // 660ff513 + PMADDWL (R11), X2 // 66410ff513 + PMADDWL X2, X2 // 660ff5d2 + PMADDWL X11, X2 // 66410ff5d3 + PMADDWL (BX), X11 // 66440ff51b + PMADDWL (R11), X11 // 66450ff51b + PMADDWL X2, X11 // 66440ff5da + PMADDWL X11, X11 // 66450ff5db + PMAXSB (BX), X2 // 660f383c13 + PMAXSB (R11), X2 // 66410f383c13 + PMAXSB X2, X2 // 660f383cd2 + PMAXSB X11, X2 // 66410f383cd3 + PMAXSB (BX), X11 // 66440f383c1b + PMAXSB (R11), X11 // 66450f383c1b + PMAXSB X2, X11 // 66440f383cda + PMAXSB X11, X11 // 66450f383cdb + PMAXSD (BX), X2 // 660f383d13 + PMAXSD (R11), X2 // 66410f383d13 + PMAXSD X2, X2 // 660f383dd2 + PMAXSD X11, X2 // 66410f383dd3 + PMAXSD (BX), X11 // 66440f383d1b + PMAXSD (R11), X11 // 66450f383d1b + PMAXSD X2, X11 // 66440f383dda + PMAXSD X11, X11 // 66450f383ddb + //TODO: PMAXSW (BX), M2 // 0fee13 + //TODO: PMAXSW (R11), M2 // 410fee13 + //TODO: PMAXSW M2, M2 // 0feed2 + //TODO: PMAXSW M3, M2 // 0feed3 + //TODO: PMAXSW (BX), M3 // 0fee1b + //TODO: PMAXSW (R11), M3 // 410fee1b + //TODO: PMAXSW M2, M3 // 0feeda + //TODO: PMAXSW M3, M3 // 0feedb + PMAXSW (BX), X2 // 660fee13 + PMAXSW (R11), X2 // 66410fee13 + PMAXSW X2, X2 // 660feed2 + PMAXSW X11, X2 // 66410feed3 + PMAXSW (BX), X11 // 66440fee1b + PMAXSW (R11), X11 // 66450fee1b + PMAXSW X2, X11 // 66440feeda + PMAXSW X11, X11 // 66450feedb + //TODO: PMAXUB (BX), M2 // 0fde13 + //TODO: PMAXUB (R11), M2 // 410fde13 + //TODO: PMAXUB M2, M2 // 0fded2 + //TODO: PMAXUB M3, M2 // 0fded3 + //TODO: PMAXUB (BX), M3 // 0fde1b + //TODO: PMAXUB (R11), M3 // 410fde1b + //TODO: PMAXUB M2, M3 // 0fdeda + //TODO: PMAXUB M3, M3 // 0fdedb + PMAXUB (BX), X2 // 660fde13 + PMAXUB (R11), X2 // 66410fde13 + PMAXUB X2, X2 // 660fded2 + PMAXUB X11, X2 // 66410fded3 + PMAXUB (BX), X11 // 66440fde1b + PMAXUB (R11), X11 // 66450fde1b + PMAXUB X2, X11 // 66440fdeda + PMAXUB X11, X11 // 66450fdedb + PMAXUD (BX), X2 // 660f383f13 + PMAXUD (R11), X2 // 66410f383f13 + PMAXUD X2, X2 // 660f383fd2 + PMAXUD X11, X2 // 66410f383fd3 + PMAXUD (BX), X11 // 66440f383f1b + PMAXUD (R11), X11 // 66450f383f1b + PMAXUD X2, X11 // 66440f383fda + PMAXUD X11, X11 // 66450f383fdb + PMAXUW (BX), X2 // 660f383e13 + PMAXUW (R11), X2 // 66410f383e13 + PMAXUW X2, X2 // 660f383ed2 + PMAXUW X11, X2 // 66410f383ed3 + PMAXUW (BX), X11 // 66440f383e1b + PMAXUW (R11), X11 // 66450f383e1b + PMAXUW X2, X11 // 66440f383eda + PMAXUW X11, X11 // 66450f383edb + PMINSB (BX), X2 // 660f383813 + PMINSB (R11), X2 // 66410f383813 + PMINSB X2, X2 // 660f3838d2 + PMINSB X11, X2 // 66410f3838d3 + PMINSB (BX), X11 // 66440f38381b + PMINSB (R11), X11 // 66450f38381b + PMINSB X2, X11 // 66440f3838da + PMINSB X11, X11 // 66450f3838db + PMINSD (BX), X2 // 660f383913 + PMINSD (R11), X2 // 66410f383913 + PMINSD X2, X2 // 660f3839d2 + PMINSD X11, X2 // 66410f3839d3 + PMINSD (BX), X11 // 66440f38391b + PMINSD (R11), X11 // 66450f38391b + PMINSD X2, X11 // 66440f3839da + PMINSD X11, X11 // 66450f3839db + //TODO: PMINSW (BX), M2 // 0fea13 + //TODO: PMINSW (R11), M2 // 410fea13 + //TODO: PMINSW M2, M2 // 0fead2 + //TODO: PMINSW M3, M2 // 0fead3 + //TODO: PMINSW (BX), M3 // 0fea1b + //TODO: PMINSW (R11), M3 // 410fea1b + //TODO: PMINSW M2, M3 // 0feada + //TODO: PMINSW M3, M3 // 0feadb + PMINSW (BX), X2 // 660fea13 + PMINSW (R11), X2 // 66410fea13 + PMINSW X2, X2 // 660fead2 + PMINSW X11, X2 // 66410fead3 + PMINSW (BX), X11 // 66440fea1b + PMINSW (R11), X11 // 66450fea1b + PMINSW X2, X11 // 66440feada + PMINSW X11, X11 // 66450feadb + //TODO: PMINUB (BX), M2 // 0fda13 + //TODO: PMINUB (R11), M2 // 410fda13 + //TODO: PMINUB M2, M2 // 0fdad2 + //TODO: PMINUB M3, M2 // 0fdad3 + //TODO: PMINUB (BX), M3 // 0fda1b + //TODO: PMINUB (R11), M3 // 410fda1b + //TODO: PMINUB M2, M3 // 0fdada + //TODO: PMINUB M3, M3 // 0fdadb + PMINUB (BX), X2 // 660fda13 + PMINUB (R11), X2 // 66410fda13 + PMINUB X2, X2 // 660fdad2 + PMINUB X11, X2 // 66410fdad3 + PMINUB (BX), X11 // 66440fda1b + PMINUB (R11), X11 // 66450fda1b + PMINUB X2, X11 // 66440fdada + PMINUB X11, X11 // 66450fdadb + PMINUD (BX), X2 // 660f383b13 + PMINUD (R11), X2 // 66410f383b13 + PMINUD X2, X2 // 660f383bd2 + PMINUD X11, X2 // 66410f383bd3 + PMINUD (BX), X11 // 66440f383b1b + PMINUD (R11), X11 // 66450f383b1b + PMINUD X2, X11 // 66440f383bda + PMINUD X11, X11 // 66450f383bdb + PMINUW (BX), X2 // 660f383a13 + PMINUW (R11), X2 // 66410f383a13 + PMINUW X2, X2 // 660f383ad2 + PMINUW X11, X2 // 66410f383ad3 + PMINUW (BX), X11 // 66440f383a1b + PMINUW (R11), X11 // 66450f383a1b + PMINUW X2, X11 // 66440f383ada + PMINUW X11, X11 // 66450f383adb + PMOVMSKB M2, DX // 0fd7d2 + PMOVMSKB M3, DX // 0fd7d3 + PMOVMSKB M2, R11 // 440fd7da + PMOVMSKB M3, R11 // 440fd7db + PMOVMSKB X2, DX // 660fd7d2 + PMOVMSKB X11, DX // 66410fd7d3 + PMOVMSKB X2, R11 // 66440fd7da + PMOVMSKB X11, R11 // 66450fd7db + PMOVSXBD (BX), X2 // 660f382113 + PMOVSXBD (R11), X2 // 66410f382113 + PMOVSXBD X2, X2 // 660f3821d2 + PMOVSXBD X11, X2 // 66410f3821d3 + PMOVSXBD (BX), X11 // 66440f38211b + PMOVSXBD (R11), X11 // 66450f38211b + PMOVSXBD X2, X11 // 66440f3821da + PMOVSXBD X11, X11 // 66450f3821db + PMOVSXBQ (BX), X2 // 660f382213 + PMOVSXBQ (R11), X2 // 66410f382213 + PMOVSXBQ X2, X2 // 660f3822d2 + PMOVSXBQ X11, X2 // 66410f3822d3 + PMOVSXBQ (BX), X11 // 66440f38221b + PMOVSXBQ (R11), X11 // 66450f38221b + PMOVSXBQ X2, X11 // 66440f3822da + PMOVSXBQ X11, X11 // 66450f3822db + PMOVSXBW (BX), X2 // 660f382013 + PMOVSXBW (R11), X2 // 66410f382013 + PMOVSXBW X2, X2 // 660f3820d2 + PMOVSXBW X11, X2 // 66410f3820d3 + PMOVSXBW (BX), X11 // 66440f38201b + PMOVSXBW (R11), X11 // 66450f38201b + PMOVSXBW X2, X11 // 66440f3820da + PMOVSXBW X11, X11 // 66450f3820db + PMOVSXDQ (BX), X2 // 660f382513 + PMOVSXDQ (R11), X2 // 66410f382513 + PMOVSXDQ X2, X2 // 660f3825d2 + PMOVSXDQ X11, X2 // 66410f3825d3 + PMOVSXDQ (BX), X11 // 66440f38251b + PMOVSXDQ (R11), X11 // 66450f38251b + PMOVSXDQ X2, X11 // 66440f3825da + PMOVSXDQ X11, X11 // 66450f3825db + PMOVSXWD (BX), X2 // 660f382313 + PMOVSXWD (R11), X2 // 66410f382313 + PMOVSXWD X2, X2 // 660f3823d2 + PMOVSXWD X11, X2 // 66410f3823d3 + PMOVSXWD (BX), X11 // 66440f38231b + PMOVSXWD (R11), X11 // 66450f38231b + PMOVSXWD X2, X11 // 66440f3823da + PMOVSXWD X11, X11 // 66450f3823db + PMOVSXWQ (BX), X2 // 660f382413 + PMOVSXWQ (R11), X2 // 66410f382413 + PMOVSXWQ X2, X2 // 660f3824d2 + PMOVSXWQ X11, X2 // 66410f3824d3 + PMOVSXWQ (BX), X11 // 66440f38241b + PMOVSXWQ (R11), X11 // 66450f38241b + PMOVSXWQ X2, X11 // 66440f3824da + PMOVSXWQ X11, X11 // 66450f3824db + PMOVZXBD (BX), X2 // 660f383113 + PMOVZXBD (R11), X2 // 66410f383113 + PMOVZXBD X2, X2 // 660f3831d2 + PMOVZXBD X11, X2 // 66410f3831d3 + PMOVZXBD (BX), X11 // 66440f38311b + PMOVZXBD (R11), X11 // 66450f38311b + PMOVZXBD X2, X11 // 66440f3831da + PMOVZXBD X11, X11 // 66450f3831db + PMOVZXBQ (BX), X2 // 660f383213 + PMOVZXBQ (R11), X2 // 66410f383213 + PMOVZXBQ X2, X2 // 660f3832d2 + PMOVZXBQ X11, X2 // 66410f3832d3 + PMOVZXBQ (BX), X11 // 66440f38321b + PMOVZXBQ (R11), X11 // 66450f38321b + PMOVZXBQ X2, X11 // 66440f3832da + PMOVZXBQ X11, X11 // 66450f3832db + PMOVZXBW (BX), X2 // 660f383013 + PMOVZXBW (R11), X2 // 66410f383013 + PMOVZXBW X2, X2 // 660f3830d2 + PMOVZXBW X11, X2 // 66410f3830d3 + PMOVZXBW (BX), X11 // 66440f38301b + PMOVZXBW (R11), X11 // 66450f38301b + PMOVZXBW X2, X11 // 66440f3830da + PMOVZXBW X11, X11 // 66450f3830db + PMOVZXDQ (BX), X2 // 660f383513 + PMOVZXDQ (R11), X2 // 66410f383513 + PMOVZXDQ X2, X2 // 660f3835d2 + PMOVZXDQ X11, X2 // 66410f3835d3 + PMOVZXDQ (BX), X11 // 66440f38351b + PMOVZXDQ (R11), X11 // 66450f38351b + PMOVZXDQ X2, X11 // 66440f3835da + PMOVZXDQ X11, X11 // 66450f3835db + PMOVZXWD (BX), X2 // 660f383313 + PMOVZXWD (R11), X2 // 66410f383313 + PMOVZXWD X2, X2 // 660f3833d2 + PMOVZXWD X11, X2 // 66410f3833d3 + PMOVZXWD (BX), X11 // 66440f38331b + PMOVZXWD (R11), X11 // 66450f38331b + PMOVZXWD X2, X11 // 66440f3833da + PMOVZXWD X11, X11 // 66450f3833db + PMOVZXWQ (BX), X2 // 660f383413 + PMOVZXWQ (R11), X2 // 66410f383413 + PMOVZXWQ X2, X2 // 660f3834d2 + PMOVZXWQ X11, X2 // 66410f3834d3 + PMOVZXWQ (BX), X11 // 66440f38341b + PMOVZXWQ (R11), X11 // 66450f38341b + PMOVZXWQ X2, X11 // 66440f3834da + PMOVZXWQ X11, X11 // 66450f3834db + PMULDQ (BX), X2 // 660f382813 + PMULDQ (R11), X2 // 66410f382813 + PMULDQ X2, X2 // 660f3828d2 + PMULDQ X11, X2 // 66410f3828d3 + PMULDQ (BX), X11 // 66440f38281b + PMULDQ (R11), X11 // 66450f38281b + PMULDQ X2, X11 // 66440f3828da + PMULDQ X11, X11 // 66450f3828db + //TODO: PMULHRSW (BX), M2 // 0f380b13 + //TODO: PMULHRSW (R11), M2 // 410f380b13 + //TODO: PMULHRSW M2, M2 // 0f380bd2 + //TODO: PMULHRSW M3, M2 // 0f380bd3 + //TODO: PMULHRSW (BX), M3 // 0f380b1b + //TODO: PMULHRSW (R11), M3 // 410f380b1b + //TODO: PMULHRSW M2, M3 // 0f380bda + //TODO: PMULHRSW M3, M3 // 0f380bdb + PMULHRSW (BX), X2 // 660f380b13 + PMULHRSW (R11), X2 // 66410f380b13 + PMULHRSW X2, X2 // 660f380bd2 + PMULHRSW X11, X2 // 66410f380bd3 + PMULHRSW (BX), X11 // 66440f380b1b + PMULHRSW (R11), X11 // 66450f380b1b + PMULHRSW X2, X11 // 66440f380bda + PMULHRSW X11, X11 // 66450f380bdb + PMULHUW (BX), M2 // 0fe413 + PMULHUW (R11), M2 // 410fe413 + PMULHUW M2, M2 // 0fe4d2 + PMULHUW M3, M2 // 0fe4d3 + PMULHUW (BX), M3 // 0fe41b + PMULHUW (R11), M3 // 410fe41b + PMULHUW M2, M3 // 0fe4da + PMULHUW M3, M3 // 0fe4db + PMULHUW (BX), X2 // 660fe413 + PMULHUW (R11), X2 // 66410fe413 + PMULHUW X2, X2 // 660fe4d2 + PMULHUW X11, X2 // 66410fe4d3 + PMULHUW (BX), X11 // 66440fe41b + PMULHUW (R11), X11 // 66450fe41b + PMULHUW X2, X11 // 66440fe4da + PMULHUW X11, X11 // 66450fe4db + PMULHW (BX), M2 // 0fe513 + PMULHW (R11), M2 // 410fe513 + PMULHW M2, M2 // 0fe5d2 + PMULHW M3, M2 // 0fe5d3 + PMULHW (BX), M3 // 0fe51b + PMULHW (R11), M3 // 410fe51b + PMULHW M2, M3 // 0fe5da + PMULHW M3, M3 // 0fe5db + PMULHW (BX), X2 // 660fe513 + PMULHW (R11), X2 // 66410fe513 + PMULHW X2, X2 // 660fe5d2 + PMULHW X11, X2 // 66410fe5d3 + PMULHW (BX), X11 // 66440fe51b + PMULHW (R11), X11 // 66450fe51b + PMULHW X2, X11 // 66440fe5da + PMULHW X11, X11 // 66450fe5db + PMULLD (BX), X2 // 660f384013 + PMULLD (R11), X2 // 66410f384013 + PMULLD X2, X2 // 660f3840d2 + PMULLD X11, X2 // 66410f3840d3 + PMULLD (BX), X11 // 66440f38401b + PMULLD (R11), X11 // 66450f38401b + PMULLD X2, X11 // 66440f3840da + PMULLD X11, X11 // 66450f3840db + PMULLW (BX), M2 // 0fd513 + PMULLW (R11), M2 // 410fd513 + PMULLW M2, M2 // 0fd5d2 + PMULLW M3, M2 // 0fd5d3 + PMULLW (BX), M3 // 0fd51b + PMULLW (R11), M3 // 410fd51b + PMULLW M2, M3 // 0fd5da + PMULLW M3, M3 // 0fd5db + PMULLW (BX), X2 // 660fd513 + PMULLW (R11), X2 // 66410fd513 + PMULLW X2, X2 // 660fd5d2 + PMULLW X11, X2 // 66410fd5d3 + PMULLW (BX), X11 // 66440fd51b + PMULLW (R11), X11 // 66450fd51b + PMULLW X2, X11 // 66440fd5da + PMULLW X11, X11 // 66450fd5db + PMULULQ (BX), M2 // 0ff413 + PMULULQ (R11), M2 // 410ff413 + PMULULQ M2, M2 // 0ff4d2 + PMULULQ M3, M2 // 0ff4d3 + PMULULQ (BX), M3 // 0ff41b + PMULULQ (R11), M3 // 410ff41b + PMULULQ M2, M3 // 0ff4da + PMULULQ M3, M3 // 0ff4db + PMULULQ (BX), X2 // 660ff413 + PMULULQ (R11), X2 // 66410ff413 + PMULULQ X2, X2 // 660ff4d2 + PMULULQ X11, X2 // 66410ff4d3 + PMULULQ (BX), X11 // 66440ff41b + PMULULQ (R11), X11 // 66450ff41b + PMULULQ X2, X11 // 66440ff4da + PMULULQ X11, X11 // 66450ff4db + PUSHQ AX + POPQ FS // 660fa1 or 0fa1 + PUSHQ AX + POPQ GS // 660fa9 or 0fa9 + PUSHW AX + POPW (BX) // 668f03 + PUSHW AX + POPW (R11) // 66418f03 + PUSHW AX + POPW DX // 668fc2 or 665a + PUSHW AX + POPW R11 // 66418fc3 or 66415b + PUSHQ AX + POPQ (BX) // 8f03 + PUSHQ AX + POPQ (R11) // 418f03 + PUSHQ AX + POPQ DX // 8fc2 or 5a + PUSHQ AX + POPQ R11 // 418fc3 or 415b + POPCNTW (BX), DX // 66f30fb813 + POPCNTW (R11), DX // 66f3410fb813 + POPCNTW DX, DX // 66f30fb8d2 + POPCNTW R11, DX // 66f3410fb8d3 + POPCNTW (BX), R11 // 66f3440fb81b + POPCNTW (R11), R11 // 66f3450fb81b + POPCNTW DX, R11 // 66f3440fb8da + POPCNTW R11, R11 // 66f3450fb8db + POPCNTL (BX), DX // f30fb813 + POPCNTL (R11), DX // f3410fb813 + POPCNTL DX, DX // f30fb8d2 + POPCNTL R11, DX // f3410fb8d3 + POPCNTL (BX), R11 // f3440fb81b + POPCNTL (R11), R11 // f3450fb81b + POPCNTL DX, R11 // f3440fb8da + POPCNTL R11, R11 // f3450fb8db + POPCNTQ (BX), DX // f3480fb813 + POPCNTQ (R11), DX // f3490fb813 + POPCNTQ DX, DX // f3480fb8d2 + POPCNTQ R11, DX // f3490fb8d3 + POPCNTQ (BX), R11 // f34c0fb81b + POPCNTQ (R11), R11 // f34d0fb81b + POPCNTQ DX, R11 // f34c0fb8da + POPCNTQ R11, R11 // f34d0fb8db + PUSHFW + POPFW // 669d + PUSHFQ + POPFQ // 9d + POR (BX), M2 // 0feb13 + POR (R11), M2 // 410feb13 + POR M2, M2 // 0febd2 + POR M3, M2 // 0febd3 + POR (BX), M3 // 0feb1b + POR (R11), M3 // 410feb1b + POR M2, M3 // 0febda + POR M3, M3 // 0febdb + POR (BX), X2 // 660feb13 + POR (R11), X2 // 66410feb13 + POR X2, X2 // 660febd2 + POR X11, X2 // 66410febd3 + POR (BX), X11 // 66440feb1b + POR (R11), X11 // 66450feb1b + POR X2, X11 // 66440febda + POR X11, X11 // 66450febdb + PREFETCHNTA (BX) // 0f1803 + PREFETCHNTA (R11) // 410f1803 + PREFETCHT0 (BX) // 0f180b + PREFETCHT0 (R11) // 410f180b + PREFETCHT1 (BX) // 0f1813 + PREFETCHT1 (R11) // 410f1813 + PREFETCHT2 (BX) // 0f181b + PREFETCHT2 (R11) // 410f181b + //TODO: PREFETCHW (BX) // 0f0d0b + //TODO: PREFETCHW (R11) // 410f0d0b + //TODO: PREFETCHWT1 (BX) // 0f0d13 + //TODO: PREFETCHWT1 (R11) // 410f0d13 + //TODO: PSADBW (BX), M2 // 0ff613 + //TODO: PSADBW (R11), M2 // 410ff613 + //TODO: PSADBW M2, M2 // 0ff6d2 + //TODO: PSADBW M3, M2 // 0ff6d3 + //TODO: PSADBW (BX), M3 // 0ff61b + //TODO: PSADBW (R11), M3 // 410ff61b + //TODO: PSADBW M2, M3 // 0ff6da + //TODO: PSADBW M3, M3 // 0ff6db + PSADBW (BX), X2 // 660ff613 + PSADBW (R11), X2 // 66410ff613 + PSADBW X2, X2 // 660ff6d2 + PSADBW X11, X2 // 66410ff6d3 + PSADBW (BX), X11 // 66440ff61b + PSADBW (R11), X11 // 66450ff61b + PSADBW X2, X11 // 66440ff6da + PSADBW X11, X11 // 66450ff6db + //TODO: PSHUFB (BX), M2 // 0f380013 + //TODO: PSHUFB (R11), M2 // 410f380013 + //TODO: PSHUFB M2, M2 // 0f3800d2 + //TODO: PSHUFB M3, M2 // 0f3800d3 + //TODO: PSHUFB (BX), M3 // 0f38001b + //TODO: PSHUFB (R11), M3 // 410f38001b + //TODO: PSHUFB M2, M3 // 0f3800da + //TODO: PSHUFB M3, M3 // 0f3800db + PSHUFB (BX), X2 // 660f380013 + PSHUFB (R11), X2 // 66410f380013 + PSHUFB X2, X2 // 660f3800d2 + PSHUFB X11, X2 // 66410f3800d3 + PSHUFB (BX), X11 // 66440f38001b + PSHUFB (R11), X11 // 66450f38001b + PSHUFB X2, X11 // 66440f3800da + PSHUFB X11, X11 // 66450f3800db + PSHUFD $7, (BX), X2 // 660f701307 + PSHUFL $7, (BX), X2 // 660f701307 + PSHUFD $7, (R11), X2 // 66410f701307 + PSHUFL $7, (R11), X2 // 66410f701307 + PSHUFD $7, X2, X2 // 660f70d207 + PSHUFL $7, X2, X2 // 660f70d207 + PSHUFD $7, X11, X2 // 66410f70d307 + PSHUFL $7, X11, X2 // 66410f70d307 + PSHUFD $7, (BX), X11 // 66440f701b07 + PSHUFL $7, (BX), X11 // 66440f701b07 + PSHUFD $7, (R11), X11 // 66450f701b07 + PSHUFL $7, (R11), X11 // 66450f701b07 + PSHUFD $7, X2, X11 // 66440f70da07 + PSHUFL $7, X2, X11 // 66440f70da07 + PSHUFD $7, X11, X11 // 66450f70db07 + PSHUFL $7, X11, X11 // 66450f70db07 + PSHUFHW $7, (BX), X2 // f30f701307 + PSHUFHW $7, (R11), X2 // f3410f701307 + PSHUFHW $7, X2, X2 // f30f70d207 + PSHUFHW $7, X11, X2 // f3410f70d307 + PSHUFHW $7, (BX), X11 // f3440f701b07 + PSHUFHW $7, (R11), X11 // f3450f701b07 + PSHUFHW $7, X2, X11 // f3440f70da07 + PSHUFHW $7, X11, X11 // f3450f70db07 + PSHUFLW $7, (BX), X2 // f20f701307 + PSHUFLW $7, (R11), X2 // f2410f701307 + PSHUFLW $7, X2, X2 // f20f70d207 + PSHUFLW $7, X11, X2 // f2410f70d307 + PSHUFLW $7, (BX), X11 // f2440f701b07 + PSHUFLW $7, (R11), X11 // f2450f701b07 + PSHUFLW $7, X2, X11 // f2440f70da07 + PSHUFLW $7, X11, X11 // f2450f70db07 + PSHUFW $7, (BX), M2 // 0f701307 + PSHUFW $7, (R11), M2 // 410f701307 + PSHUFW $7, M2, M2 // 0f70d207 + PSHUFW $7, M3, M2 // 0f70d307 + PSHUFW $7, (BX), M3 // 0f701b07 + PSHUFW $7, (R11), M3 // 410f701b07 + PSHUFW $7, M2, M3 // 0f70da07 + PSHUFW $7, M3, M3 // 0f70db07 + //TODO: PSIGNB (BX), M2 // 0f380813 + //TODO: PSIGNB (R11), M2 // 410f380813 + //TODO: PSIGNB M2, M2 // 0f3808d2 + //TODO: PSIGNB M3, M2 // 0f3808d3 + //TODO: PSIGNB (BX), M3 // 0f38081b + //TODO: PSIGNB (R11), M3 // 410f38081b + //TODO: PSIGNB M2, M3 // 0f3808da + //TODO: PSIGNB M3, M3 // 0f3808db + PSIGNB (BX), X2 // 660f380813 + PSIGNB (R11), X2 // 66410f380813 + PSIGNB X2, X2 // 660f3808d2 + PSIGNB X11, X2 // 66410f3808d3 + PSIGNB (BX), X11 // 66440f38081b + PSIGNB (R11), X11 // 66450f38081b + PSIGNB X2, X11 // 66440f3808da + PSIGNB X11, X11 // 66450f3808db + //TODO: PSIGND (BX), M2 // 0f380a13 + //TODO: PSIGND (R11), M2 // 410f380a13 + //TODO: PSIGND M2, M2 // 0f380ad2 + //TODO: PSIGND M3, M2 // 0f380ad3 + //TODO: PSIGND (BX), M3 // 0f380a1b + //TODO: PSIGND (R11), M3 // 410f380a1b + //TODO: PSIGND M2, M3 // 0f380ada + //TODO: PSIGND M3, M3 // 0f380adb + PSIGND (BX), X2 // 660f380a13 + PSIGND (R11), X2 // 66410f380a13 + PSIGND X2, X2 // 660f380ad2 + PSIGND X11, X2 // 66410f380ad3 + PSIGND (BX), X11 // 66440f380a1b + PSIGND (R11), X11 // 66450f380a1b + PSIGND X2, X11 // 66440f380ada + PSIGND X11, X11 // 66450f380adb + //TODO: PSIGNW (BX), M2 // 0f380913 + //TODO: PSIGNW (R11), M2 // 410f380913 + //TODO: PSIGNW M2, M2 // 0f3809d2 + //TODO: PSIGNW M3, M2 // 0f3809d3 + //TODO: PSIGNW (BX), M3 // 0f38091b + //TODO: PSIGNW (R11), M3 // 410f38091b + //TODO: PSIGNW M2, M3 // 0f3809da + //TODO: PSIGNW M3, M3 // 0f3809db + PSIGNW (BX), X2 // 660f380913 + PSIGNW (R11), X2 // 66410f380913 + PSIGNW X2, X2 // 660f3809d2 + PSIGNW X11, X2 // 66410f3809d3 + PSIGNW (BX), X11 // 66440f38091b + PSIGNW (R11), X11 // 66450f38091b + PSIGNW X2, X11 // 66440f3809da + PSIGNW X11, X11 // 66450f3809db + PSLLL (BX), M2 // 0ff213 + PSLLL (R11), M2 // 410ff213 + PSLLL M2, M2 // 0ff2d2 + PSLLL M3, M2 // 0ff2d3 + PSLLL (BX), M3 // 0ff21b + PSLLL (R11), M3 // 410ff21b + PSLLL M2, M3 // 0ff2da + PSLLL M3, M3 // 0ff2db + PSLLL $7, M2 // 0f72f207 + PSLLL $7, M3 // 0f72f307 + PSLLL (BX), X2 // 660ff213 + PSLLL (R11), X2 // 66410ff213 + PSLLL X2, X2 // 660ff2d2 + PSLLL X11, X2 // 66410ff2d3 + PSLLL (BX), X11 // 66440ff21b + PSLLL (R11), X11 // 66450ff21b + PSLLL X2, X11 // 66440ff2da + PSLLL X11, X11 // 66450ff2db + PSLLL $7, X2 // 660f72f207 + PSLLL $7, X11 // 66410f72f307 + PSLLO $7, X2 // 660f73fa07 + PSLLO $7, X11 // 66410f73fb07 + PSLLQ (BX), M2 // 0ff313 + PSLLQ (R11), M2 // 410ff313 + PSLLQ M2, M2 // 0ff3d2 + PSLLQ M3, M2 // 0ff3d3 + PSLLQ (BX), M3 // 0ff31b + PSLLQ (R11), M3 // 410ff31b + PSLLQ M2, M3 // 0ff3da + PSLLQ M3, M3 // 0ff3db + PSLLQ $7, M2 // 0f73f207 + PSLLQ $7, M3 // 0f73f307 + PSLLQ (BX), X2 // 660ff313 + PSLLQ (R11), X2 // 66410ff313 + PSLLQ X2, X2 // 660ff3d2 + PSLLQ X11, X2 // 66410ff3d3 + PSLLQ (BX), X11 // 66440ff31b + PSLLQ (R11), X11 // 66450ff31b + PSLLQ X2, X11 // 66440ff3da + PSLLQ X11, X11 // 66450ff3db + PSLLQ $7, X2 // 660f73f207 + PSLLQ $7, X11 // 66410f73f307 + PSLLW (BX), M2 // 0ff113 + PSLLW (R11), M2 // 410ff113 + PSLLW M2, M2 // 0ff1d2 + PSLLW M3, M2 // 0ff1d3 + PSLLW (BX), M3 // 0ff11b + PSLLW (R11), M3 // 410ff11b + PSLLW M2, M3 // 0ff1da + PSLLW M3, M3 // 0ff1db + PSLLW $7, M2 // 0f71f207 + PSLLW $7, M3 // 0f71f307 + PSLLW (BX), X2 // 660ff113 + PSLLW (R11), X2 // 66410ff113 + PSLLW X2, X2 // 660ff1d2 + PSLLW X11, X2 // 66410ff1d3 + PSLLW (BX), X11 // 66440ff11b + PSLLW (R11), X11 // 66450ff11b + PSLLW X2, X11 // 66440ff1da + PSLLW X11, X11 // 66450ff1db + PSLLW $7, X2 // 660f71f207 + PSLLW $7, X11 // 66410f71f307 + PSRAL (BX), M2 // 0fe213 + PSRAL (R11), M2 // 410fe213 + PSRAL M2, M2 // 0fe2d2 + PSRAL M3, M2 // 0fe2d3 + PSRAL (BX), M3 // 0fe21b + PSRAL (R11), M3 // 410fe21b + PSRAL M2, M3 // 0fe2da + PSRAL M3, M3 // 0fe2db + PSRAL $7, M2 // 0f72e207 + PSRAL $7, M3 // 0f72e307 + PSRAL (BX), X2 // 660fe213 + PSRAL (R11), X2 // 66410fe213 + PSRAL X2, X2 // 660fe2d2 + PSRAL X11, X2 // 66410fe2d3 + PSRAL (BX), X11 // 66440fe21b + PSRAL (R11), X11 // 66450fe21b + PSRAL X2, X11 // 66440fe2da + PSRAL X11, X11 // 66450fe2db + PSRAL $7, X2 // 660f72e207 + PSRAL $7, X11 // 66410f72e307 + PSRAW (BX), M2 // 0fe113 + PSRAW (R11), M2 // 410fe113 + PSRAW M2, M2 // 0fe1d2 + PSRAW M3, M2 // 0fe1d3 + PSRAW (BX), M3 // 0fe11b + PSRAW (R11), M3 // 410fe11b + PSRAW M2, M3 // 0fe1da + PSRAW M3, M3 // 0fe1db + PSRAW $7, M2 // 0f71e207 + PSRAW $7, M3 // 0f71e307 + PSRAW (BX), X2 // 660fe113 + PSRAW (R11), X2 // 66410fe113 + PSRAW X2, X2 // 660fe1d2 + PSRAW X11, X2 // 66410fe1d3 + PSRAW (BX), X11 // 66440fe11b + PSRAW (R11), X11 // 66450fe11b + PSRAW X2, X11 // 66440fe1da + PSRAW X11, X11 // 66450fe1db + PSRAW $7, X2 // 660f71e207 + PSRAW $7, X11 // 66410f71e307 + PSRLL (BX), M2 // 0fd213 + PSRLL (R11), M2 // 410fd213 + PSRLL M2, M2 // 0fd2d2 + PSRLL M3, M2 // 0fd2d3 + PSRLL (BX), M3 // 0fd21b + PSRLL (R11), M3 // 410fd21b + PSRLL M2, M3 // 0fd2da + PSRLL M3, M3 // 0fd2db + PSRLL $7, M2 // 0f72d207 + PSRLL $7, M3 // 0f72d307 + PSRLL (BX), X2 // 660fd213 + PSRLL (R11), X2 // 66410fd213 + PSRLL X2, X2 // 660fd2d2 + PSRLL X11, X2 // 66410fd2d3 + PSRLL (BX), X11 // 66440fd21b + PSRLL (R11), X11 // 66450fd21b + PSRLL X2, X11 // 66440fd2da + PSRLL X11, X11 // 66450fd2db + PSRLL $7, X2 // 660f72d207 + PSRLL $7, X11 // 66410f72d307 + PSRLO $7, X2 // 660f73da07 + PSRLO $7, X11 // 66410f73db07 + PSRLQ (BX), M2 // 0fd313 + PSRLQ (R11), M2 // 410fd313 + PSRLQ M2, M2 // 0fd3d2 + PSRLQ M3, M2 // 0fd3d3 + PSRLQ (BX), M3 // 0fd31b + PSRLQ (R11), M3 // 410fd31b + PSRLQ M2, M3 // 0fd3da + PSRLQ M3, M3 // 0fd3db + PSRLQ $7, M2 // 0f73d207 + PSRLQ $7, M3 // 0f73d307 + PSRLQ (BX), X2 // 660fd313 + PSRLQ (R11), X2 // 66410fd313 + PSRLQ X2, X2 // 660fd3d2 + PSRLQ X11, X2 // 66410fd3d3 + PSRLQ (BX), X11 // 66440fd31b + PSRLQ (R11), X11 // 66450fd31b + PSRLQ X2, X11 // 66440fd3da + PSRLQ X11, X11 // 66450fd3db + PSRLQ $7, X2 // 660f73d207 + PSRLQ $7, X11 // 66410f73d307 + PSRLW (BX), M2 // 0fd113 + PSRLW (R11), M2 // 410fd113 + PSRLW M2, M2 // 0fd1d2 + PSRLW M3, M2 // 0fd1d3 + PSRLW (BX), M3 // 0fd11b + PSRLW (R11), M3 // 410fd11b + PSRLW M2, M3 // 0fd1da + PSRLW M3, M3 // 0fd1db + PSRLW $7, M2 // 0f71d207 + PSRLW $7, M3 // 0f71d307 + PSRLW (BX), X2 // 660fd113 + PSRLW (R11), X2 // 66410fd113 + PSRLW X2, X2 // 660fd1d2 + PSRLW X11, X2 // 66410fd1d3 + PSRLW (BX), X11 // 66440fd11b + PSRLW (R11), X11 // 66450fd11b + PSRLW X2, X11 // 66440fd1da + PSRLW X11, X11 // 66450fd1db + PSRLW $7, X2 // 660f71d207 + PSRLW $7, X11 // 66410f71d307 + //TODO: PSUBB (BX), M2 // 0ff813 + //TODO: PSUBB (R11), M2 // 410ff813 + //TODO: PSUBB M2, M2 // 0ff8d2 + //TODO: PSUBB M3, M2 // 0ff8d3 + //TODO: PSUBB (BX), M3 // 0ff81b + //TODO: PSUBB (R11), M3 // 410ff81b + //TODO: PSUBB M2, M3 // 0ff8da + //TODO: PSUBB M3, M3 // 0ff8db + PSUBB (BX), X2 // 660ff813 + PSUBB (R11), X2 // 66410ff813 + PSUBB X2, X2 // 660ff8d2 + PSUBB X11, X2 // 66410ff8d3 + PSUBB (BX), X11 // 66440ff81b + PSUBB (R11), X11 // 66450ff81b + PSUBB X2, X11 // 66440ff8da + PSUBB X11, X11 // 66450ff8db + //TODO: PSUBL (BX), M2 // 0ffa13 + //TODO: PSUBL (R11), M2 // 410ffa13 + //TODO: PSUBL M2, M2 // 0ffad2 + //TODO: PSUBL M3, M2 // 0ffad3 + //TODO: PSUBL (BX), M3 // 0ffa1b + //TODO: PSUBL (R11), M3 // 410ffa1b + //TODO: PSUBL M2, M3 // 0ffada + //TODO: PSUBL M3, M3 // 0ffadb + PSUBL (BX), X2 // 660ffa13 + PSUBL (R11), X2 // 66410ffa13 + PSUBL X2, X2 // 660ffad2 + PSUBL X11, X2 // 66410ffad3 + PSUBL (BX), X11 // 66440ffa1b + PSUBL (R11), X11 // 66450ffa1b + PSUBL X2, X11 // 66440ffada + PSUBL X11, X11 // 66450ffadb + //TODO: PSUBQ (BX), M2 // 0ffb13 + //TODO: PSUBQ (R11), M2 // 410ffb13 + //TODO: PSUBQ M2, M2 // 0ffbd2 + //TODO: PSUBQ M3, M2 // 0ffbd3 + //TODO: PSUBQ (BX), M3 // 0ffb1b + //TODO: PSUBQ (R11), M3 // 410ffb1b + //TODO: PSUBQ M2, M3 // 0ffbda + //TODO: PSUBQ M3, M3 // 0ffbdb + PSUBQ (BX), X2 // 660ffb13 + PSUBQ (R11), X2 // 66410ffb13 + PSUBQ X2, X2 // 660ffbd2 + PSUBQ X11, X2 // 66410ffbd3 + PSUBQ (BX), X11 // 66440ffb1b + PSUBQ (R11), X11 // 66450ffb1b + PSUBQ X2, X11 // 66440ffbda + PSUBQ X11, X11 // 66450ffbdb + //TODO: PSUBSB (BX), M2 // 0fe813 + //TODO: PSUBSB (R11), M2 // 410fe813 + //TODO: PSUBSB M2, M2 // 0fe8d2 + //TODO: PSUBSB M3, M2 // 0fe8d3 + //TODO: PSUBSB (BX), M3 // 0fe81b + //TODO: PSUBSB (R11), M3 // 410fe81b + //TODO: PSUBSB M2, M3 // 0fe8da + //TODO: PSUBSB M3, M3 // 0fe8db + PSUBSB (BX), X2 // 660fe813 + PSUBSB (R11), X2 // 66410fe813 + PSUBSB X2, X2 // 660fe8d2 + PSUBSB X11, X2 // 66410fe8d3 + PSUBSB (BX), X11 // 66440fe81b + PSUBSB (R11), X11 // 66450fe81b + PSUBSB X2, X11 // 66440fe8da + PSUBSB X11, X11 // 66450fe8db + //TODO: PSUBSW (BX), M2 // 0fe913 + //TODO: PSUBSW (R11), M2 // 410fe913 + //TODO: PSUBSW M2, M2 // 0fe9d2 + //TODO: PSUBSW M3, M2 // 0fe9d3 + //TODO: PSUBSW (BX), M3 // 0fe91b + //TODO: PSUBSW (R11), M3 // 410fe91b + //TODO: PSUBSW M2, M3 // 0fe9da + //TODO: PSUBSW M3, M3 // 0fe9db + PSUBSW (BX), X2 // 660fe913 + PSUBSW (R11), X2 // 66410fe913 + PSUBSW X2, X2 // 660fe9d2 + PSUBSW X11, X2 // 66410fe9d3 + PSUBSW (BX), X11 // 66440fe91b + PSUBSW (R11), X11 // 66450fe91b + PSUBSW X2, X11 // 66440fe9da + PSUBSW X11, X11 // 66450fe9db + //TODO: PSUBUSB (BX), M2 // 0fd813 + //TODO: PSUBUSB (R11), M2 // 410fd813 + //TODO: PSUBUSB M2, M2 // 0fd8d2 + //TODO: PSUBUSB M3, M2 // 0fd8d3 + //TODO: PSUBUSB (BX), M3 // 0fd81b + //TODO: PSUBUSB (R11), M3 // 410fd81b + //TODO: PSUBUSB M2, M3 // 0fd8da + //TODO: PSUBUSB M3, M3 // 0fd8db + PSUBUSB (BX), X2 // 660fd813 + PSUBUSB (R11), X2 // 66410fd813 + PSUBUSB X2, X2 // 660fd8d2 + PSUBUSB X11, X2 // 66410fd8d3 + PSUBUSB (BX), X11 // 66440fd81b + PSUBUSB (R11), X11 // 66450fd81b + PSUBUSB X2, X11 // 66440fd8da + PSUBUSB X11, X11 // 66450fd8db + //TODO: PSUBUSW (BX), M2 // 0fd913 + //TODO: PSUBUSW (R11), M2 // 410fd913 + //TODO: PSUBUSW M2, M2 // 0fd9d2 + //TODO: PSUBUSW M3, M2 // 0fd9d3 + //TODO: PSUBUSW (BX), M3 // 0fd91b + //TODO: PSUBUSW (R11), M3 // 410fd91b + //TODO: PSUBUSW M2, M3 // 0fd9da + //TODO: PSUBUSW M3, M3 // 0fd9db + PSUBUSW (BX), X2 // 660fd913 + PSUBUSW (R11), X2 // 66410fd913 + PSUBUSW X2, X2 // 660fd9d2 + PSUBUSW X11, X2 // 66410fd9d3 + PSUBUSW (BX), X11 // 66440fd91b + PSUBUSW (R11), X11 // 66450fd91b + PSUBUSW X2, X11 // 66440fd9da + PSUBUSW X11, X11 // 66450fd9db + //TODO: PSUBW (BX), M2 // 0ff913 + //TODO: PSUBW (R11), M2 // 410ff913 + //TODO: PSUBW M2, M2 // 0ff9d2 + //TODO: PSUBW M3, M2 // 0ff9d3 + //TODO: PSUBW (BX), M3 // 0ff91b + //TODO: PSUBW (R11), M3 // 410ff91b + //TODO: PSUBW M2, M3 // 0ff9da + //TODO: PSUBW M3, M3 // 0ff9db + PSUBW (BX), X2 // 660ff913 + PSUBW (R11), X2 // 66410ff913 + PSUBW X2, X2 // 660ff9d2 + PSUBW X11, X2 // 66410ff9d3 + PSUBW (BX), X11 // 66440ff91b + PSUBW (R11), X11 // 66450ff91b + PSUBW X2, X11 // 66440ff9da + PSUBW X11, X11 // 66450ff9db + PTEST (BX), X2 // 660f381713 + PTEST (R11), X2 // 66410f381713 + PTEST X2, X2 // 660f3817d2 + PTEST X11, X2 // 66410f3817d3 + PTEST (BX), X11 // 66440f38171b + PTEST (R11), X11 // 66450f38171b + PTEST X2, X11 // 66440f3817da + PTEST X11, X11 // 66450f3817db + PUNPCKHBW (BX), M2 // 0f6813 + PUNPCKHBW (R11), M2 // 410f6813 + PUNPCKHBW M2, M2 // 0f68d2 + PUNPCKHBW M3, M2 // 0f68d3 + PUNPCKHBW (BX), M3 // 0f681b + PUNPCKHBW (R11), M3 // 410f681b + PUNPCKHBW M2, M3 // 0f68da + PUNPCKHBW M3, M3 // 0f68db + PUNPCKHBW (BX), X2 // 660f6813 + PUNPCKHBW (R11), X2 // 66410f6813 + PUNPCKHBW X2, X2 // 660f68d2 + PUNPCKHBW X11, X2 // 66410f68d3 + PUNPCKHBW (BX), X11 // 66440f681b + PUNPCKHBW (R11), X11 // 66450f681b + PUNPCKHBW X2, X11 // 66440f68da + PUNPCKHBW X11, X11 // 66450f68db + PUNPCKHLQ (BX), M2 // 0f6a13 + PUNPCKHLQ (R11), M2 // 410f6a13 + PUNPCKHLQ M2, M2 // 0f6ad2 + PUNPCKHLQ M3, M2 // 0f6ad3 + PUNPCKHLQ (BX), M3 // 0f6a1b + PUNPCKHLQ (R11), M3 // 410f6a1b + PUNPCKHLQ M2, M3 // 0f6ada + PUNPCKHLQ M3, M3 // 0f6adb + PUNPCKHLQ (BX), X2 // 660f6a13 + PUNPCKHLQ (R11), X2 // 66410f6a13 + PUNPCKHLQ X2, X2 // 660f6ad2 + PUNPCKHLQ X11, X2 // 66410f6ad3 + PUNPCKHLQ (BX), X11 // 66440f6a1b + PUNPCKHLQ (R11), X11 // 66450f6a1b + PUNPCKHLQ X2, X11 // 66440f6ada + PUNPCKHLQ X11, X11 // 66450f6adb + PUNPCKHQDQ (BX), X2 // 660f6d13 + PUNPCKHQDQ (R11), X2 // 66410f6d13 + PUNPCKHQDQ X2, X2 // 660f6dd2 + PUNPCKHQDQ X11, X2 // 66410f6dd3 + PUNPCKHQDQ (BX), X11 // 66440f6d1b + PUNPCKHQDQ (R11), X11 // 66450f6d1b + PUNPCKHQDQ X2, X11 // 66440f6dda + PUNPCKHQDQ X11, X11 // 66450f6ddb + PUNPCKHWL (BX), M2 // 0f6913 + PUNPCKHWL (R11), M2 // 410f6913 + PUNPCKHWL M2, M2 // 0f69d2 + PUNPCKHWL M3, M2 // 0f69d3 + PUNPCKHWL (BX), M3 // 0f691b + PUNPCKHWL (R11), M3 // 410f691b + PUNPCKHWL M2, M3 // 0f69da + PUNPCKHWL M3, M3 // 0f69db + PUNPCKHWL (BX), X2 // 660f6913 + PUNPCKHWL (R11), X2 // 66410f6913 + PUNPCKHWL X2, X2 // 660f69d2 + PUNPCKHWL X11, X2 // 66410f69d3 + PUNPCKHWL (BX), X11 // 66440f691b + PUNPCKHWL (R11), X11 // 66450f691b + PUNPCKHWL X2, X11 // 66440f69da + PUNPCKHWL X11, X11 // 66450f69db + PUNPCKLBW (BX), M2 // 0f6013 + PUNPCKLBW (R11), M2 // 410f6013 + PUNPCKLBW M2, M2 // 0f60d2 + PUNPCKLBW M3, M2 // 0f60d3 + PUNPCKLBW (BX), M3 // 0f601b + PUNPCKLBW (R11), M3 // 410f601b + PUNPCKLBW M2, M3 // 0f60da + PUNPCKLBW M3, M3 // 0f60db + PUNPCKLBW (BX), X2 // 660f6013 + PUNPCKLBW (R11), X2 // 66410f6013 + PUNPCKLBW X2, X2 // 660f60d2 + PUNPCKLBW X11, X2 // 66410f60d3 + PUNPCKLBW (BX), X11 // 66440f601b + PUNPCKLBW (R11), X11 // 66450f601b + PUNPCKLBW X2, X11 // 66440f60da + PUNPCKLBW X11, X11 // 66450f60db + PUNPCKLLQ (BX), M2 // 0f6213 + PUNPCKLLQ (R11), M2 // 410f6213 + PUNPCKLLQ M2, M2 // 0f62d2 + PUNPCKLLQ M3, M2 // 0f62d3 + PUNPCKLLQ (BX), M3 // 0f621b + PUNPCKLLQ (R11), M3 // 410f621b + PUNPCKLLQ M2, M3 // 0f62da + PUNPCKLLQ M3, M3 // 0f62db + PUNPCKLLQ (BX), X2 // 660f6213 + PUNPCKLLQ (R11), X2 // 66410f6213 + PUNPCKLLQ X2, X2 // 660f62d2 + PUNPCKLLQ X11, X2 // 66410f62d3 + PUNPCKLLQ (BX), X11 // 66440f621b + PUNPCKLLQ (R11), X11 // 66450f621b + PUNPCKLLQ X2, X11 // 66440f62da + PUNPCKLLQ X11, X11 // 66450f62db + PUNPCKLQDQ (BX), X2 // 660f6c13 + PUNPCKLQDQ (R11), X2 // 66410f6c13 + PUNPCKLQDQ X2, X2 // 660f6cd2 + PUNPCKLQDQ X11, X2 // 66410f6cd3 + PUNPCKLQDQ (BX), X11 // 66440f6c1b + PUNPCKLQDQ (R11), X11 // 66450f6c1b + PUNPCKLQDQ X2, X11 // 66440f6cda + PUNPCKLQDQ X11, X11 // 66450f6cdb + PUNPCKLWL (BX), M2 // 0f6113 + PUNPCKLWL (R11), M2 // 410f6113 + PUNPCKLWL M2, M2 // 0f61d2 + PUNPCKLWL M3, M2 // 0f61d3 + PUNPCKLWL (BX), M3 // 0f611b + PUNPCKLWL (R11), M3 // 410f611b + PUNPCKLWL M2, M3 // 0f61da + PUNPCKLWL M3, M3 // 0f61db + PUNPCKLWL (BX), X2 // 660f6113 + PUNPCKLWL (R11), X2 // 66410f6113 + PUNPCKLWL X2, X2 // 660f61d2 + PUNPCKLWL X11, X2 // 66410f61d3 + PUNPCKLWL (BX), X11 // 66440f611b + PUNPCKLWL (R11), X11 // 66450f611b + PUNPCKLWL X2, X11 // 66440f61da + PUNPCKLWL X11, X11 // 66450f61db + PUSHQ FS // 0fa0 + POPQ AX + PUSHQ GS // 0fa8 + POPQ AX + PUSHW $61731 // 666823f1 + POPW AX + PUSHQ $4045620583 // 68674523f1 + POPQ AX + PUSHQ $7 // 6a07 + POPQ AX + PUSHW (BX) // 66ff33 + POPW AX + PUSHW (R11) // 6641ff33 + POPW AX + PUSHW DX // 66fff2 or 6652 + POPW AX + PUSHW R11 // 6641fff3 or 664153 + POPW AX + PUSHQ (BX) // ff33 + POPQ AX + PUSHQ (R11) // 41ff33 + POPQ AX + PUSHQ DX // fff2 or 52 + POPQ AX + PUSHQ R11 // 41fff3 or 4153 + POPQ AX + PUSHFW // 669c + POPFW + PUSHFQ // 9c + POPFQ + PXOR (BX), M2 // 0fef13 + PXOR (R11), M2 // 410fef13 + PXOR M2, M2 // 0fefd2 + PXOR M3, M2 // 0fefd3 + PXOR (BX), M3 // 0fef1b + PXOR (R11), M3 // 410fef1b + PXOR M2, M3 // 0fefda + PXOR M3, M3 // 0fefdb + PXOR (BX), X2 // 660fef13 + PXOR (R11), X2 // 66410fef13 + PXOR X2, X2 // 660fefd2 + PXOR X11, X2 // 66410fefd3 + PXOR (BX), X11 // 66440fef1b + PXOR (R11), X11 // 66450fef1b + PXOR X2, X11 // 66440fefda + PXOR X11, X11 // 66450fefdb + RCLW $1, (BX) // 66d113 + RCLW $1, (R11) // 6641d113 + RCLW $1, DX // 66d1d2 + RCLW $1, R11 // 6641d1d3 + RCLW CL, (BX) // 66d313 + RCLW CL, (R11) // 6641d313 + RCLW CL, DX // 66d3d2 + RCLW CL, R11 // 6641d3d3 + RCLW $7, (BX) // 66c11307 + RCLW $7, (R11) // 6641c11307 + RCLW $7, DX // 66c1d207 + RCLW $7, R11 // 6641c1d307 + RCLL $1, (BX) // d113 + RCLL $1, (R11) // 41d113 + RCLL $1, DX // d1d2 + RCLL $1, R11 // 41d1d3 + RCLL CL, (BX) // d313 + RCLL CL, (R11) // 41d313 + RCLL CL, DX // d3d2 + RCLL CL, R11 // 41d3d3 + RCLL $7, (BX) // c11307 + RCLL $7, (R11) // 41c11307 + RCLL $7, DX // c1d207 + RCLL $7, R11 // 41c1d307 + RCLQ $1, (BX) // 48d113 + RCLQ $1, (R11) // 49d113 + RCLQ $1, DX // 48d1d2 + RCLQ $1, R11 // 49d1d3 + RCLQ CL, (BX) // 48d313 + RCLQ CL, (R11) // 49d313 + RCLQ CL, DX // 48d3d2 + RCLQ CL, R11 // 49d3d3 + RCLQ $7, (BX) // 48c11307 + RCLQ $7, (R11) // 49c11307 + RCLQ $7, DX // 48c1d207 + RCLQ $7, R11 // 49c1d307 + RCLB $1, (BX) // d013 + RCLB $1, (R11) // 41d013 + RCLB $1, DL // d0d2 + RCLB $1, R11 // 41d0d3 + RCLB CL, (BX) // d213 + RCLB CL, (R11) // 41d213 + RCLB CL, DL // d2d2 + RCLB CL, R11 // 41d2d3 + RCLB $7, (BX) // c01307 + RCLB $7, (R11) // 41c01307 + RCLB $7, DL // c0d207 + RCLB $7, R11 // 41c0d307 + RCPPS (BX), X2 // 0f5313 + RCPPS (R11), X2 // 410f5313 + RCPPS X2, X2 // 0f53d2 + RCPPS X11, X2 // 410f53d3 + RCPPS (BX), X11 // 440f531b + RCPPS (R11), X11 // 450f531b + RCPPS X2, X11 // 440f53da + RCPPS X11, X11 // 450f53db + RCPSS (BX), X2 // f30f5313 + RCPSS (R11), X2 // f3410f5313 + RCPSS X2, X2 // f30f53d2 + RCPSS X11, X2 // f3410f53d3 + RCPSS (BX), X11 // f3440f531b + RCPSS (R11), X11 // f3450f531b + RCPSS X2, X11 // f3440f53da + RCPSS X11, X11 // f3450f53db + RCRW $1, (BX) // 66d11b + RCRW $1, (R11) // 6641d11b + RCRW $1, DX // 66d1da + RCRW $1, R11 // 6641d1db + RCRW CL, (BX) // 66d31b + RCRW CL, (R11) // 6641d31b + RCRW CL, DX // 66d3da + RCRW CL, R11 // 6641d3db + RCRW $7, (BX) // 66c11b07 + RCRW $7, (R11) // 6641c11b07 + RCRW $7, DX // 66c1da07 + RCRW $7, R11 // 6641c1db07 + RCRL $1, (BX) // d11b + RCRL $1, (R11) // 41d11b + RCRL $1, DX // d1da + RCRL $1, R11 // 41d1db + RCRL CL, (BX) // d31b + RCRL CL, (R11) // 41d31b + RCRL CL, DX // d3da + RCRL CL, R11 // 41d3db + RCRL $7, (BX) // c11b07 + RCRL $7, (R11) // 41c11b07 + RCRL $7, DX // c1da07 + RCRL $7, R11 // 41c1db07 + RCRQ $1, (BX) // 48d11b + RCRQ $1, (R11) // 49d11b + RCRQ $1, DX // 48d1da + RCRQ $1, R11 // 49d1db + RCRQ CL, (BX) // 48d31b + RCRQ CL, (R11) // 49d31b + RCRQ CL, DX // 48d3da + RCRQ CL, R11 // 49d3db + RCRQ $7, (BX) // 48c11b07 + RCRQ $7, (R11) // 49c11b07 + RCRQ $7, DX // 48c1da07 + RCRQ $7, R11 // 49c1db07 + RCRB $1, (BX) // d01b + RCRB $1, (R11) // 41d01b + RCRB $1, DL // d0da + RCRB $1, R11 // 41d0db + RCRB CL, (BX) // d21b + RCRB CL, (R11) // 41d21b + RCRB CL, DL // d2da + RCRB CL, R11 // 41d2db + RCRB $7, (BX) // c01b07 + RCRB $7, (R11) // 41c01b07 + RCRB $7, DL // c0da07 + RCRB $7, R11 // 41c0db07 + RDFSBASEL DX // f30faec2 + RDFSBASEL R11 // f3410faec3 + RDGSBASEL DX // f30faeca + RDGSBASEL R11 // f3410faecb + RDFSBASEQ DX // f3480faec2 + RDFSBASEQ R11 // f3490faec3 + RDGSBASEQ DX // f3480faeca + RDGSBASEQ R11 // f3490faecb + RDMSR // 0f32 + RDPKRU // 0f01ee + RDPMC // 0f33 + RDRANDW DX // 660fc7f2 + RDRANDW R11 // 66410fc7f3 + RDRANDL DX // 0fc7f2 + RDRANDL R11 // 410fc7f3 + RDRANDQ DX // 480fc7f2 + RDRANDQ R11 // 490fc7f3 + RDSEEDW DX // 660fc7fa + RDSEEDW R11 // 66410fc7fb + RDSEEDL DX // 0fc7fa + RDSEEDL R11 // 410fc7fb + RDSEEDQ DX // 480fc7fa + RDSEEDQ R11 // 490fc7fb + RDTSC // 0f31 + RDTSCP // 0f01f9 + JCS 2(PC) + //TODO: RETQ // c3 + JCS 2(PC) + //TODO: RETQ $0xf123 // c223f1 + JCS 2(PC) + //TODO: RETFQ // cb + JCS 2(PC) + //TODO: RETFQ $0xf123 // ca23f1 + ROLW $1, (BX) // 66d103 + ROLW $1, (R11) // 6641d103 + ROLW $1, DX // 66d1c2 + ROLW $1, R11 // 6641d1c3 + ROLW CL, (BX) // 66d303 + ROLW CL, (R11) // 6641d303 + ROLW CL, DX // 66d3c2 + ROLW CL, R11 // 6641d3c3 + ROLW $7, (BX) // 66c10307 + ROLW $7, (R11) // 6641c10307 + ROLW $7, DX // 66c1c207 + ROLW $7, R11 // 6641c1c307 + ROLL $1, (BX) // d103 + ROLL $1, (R11) // 41d103 + ROLL $1, DX // d1c2 + ROLL $1, R11 // 41d1c3 + ROLL CL, (BX) // d303 + ROLL CL, (R11) // 41d303 + ROLL CL, DX // d3c2 + ROLL CL, R11 // 41d3c3 + ROLL $7, (BX) // c10307 + ROLL $7, (R11) // 41c10307 + ROLL $7, DX // c1c207 + ROLL $7, R11 // 41c1c307 + ROLQ $1, (BX) // 48d103 + ROLQ $1, (R11) // 49d103 + ROLQ $1, DX // 48d1c2 + ROLQ $1, R11 // 49d1c3 + ROLQ CL, (BX) // 48d303 + ROLQ CL, (R11) // 49d303 + ROLQ CL, DX // 48d3c2 + ROLQ CL, R11 // 49d3c3 + ROLQ $7, (BX) // 48c10307 + ROLQ $7, (R11) // 49c10307 + ROLQ $7, DX // 48c1c207 + ROLQ $7, R11 // 49c1c307 + ROLB $1, (BX) // d003 + ROLB $1, (R11) // 41d003 + ROLB $1, DL // d0c2 + ROLB $1, R11 // 41d0c3 + ROLB CL, (BX) // d203 + ROLB CL, (R11) // 41d203 + ROLB CL, DL // d2c2 + ROLB CL, R11 // 41d2c3 + ROLB $7, (BX) // c00307 + ROLB $7, (R11) // 41c00307 + ROLB $7, DL // c0c207 + ROLB $7, R11 // 41c0c307 + RORW $1, (BX) // 66d10b + RORW $1, (R11) // 6641d10b + RORW $1, DX // 66d1ca + RORW $1, R11 // 6641d1cb + RORW CL, (BX) // 66d30b + RORW CL, (R11) // 6641d30b + RORW CL, DX // 66d3ca + RORW CL, R11 // 6641d3cb + RORW $7, (BX) // 66c10b07 + RORW $7, (R11) // 6641c10b07 + RORW $7, DX // 66c1ca07 + RORW $7, R11 // 6641c1cb07 + RORL $1, (BX) // d10b + RORL $1, (R11) // 41d10b + RORL $1, DX // d1ca + RORL $1, R11 // 41d1cb + RORL CL, (BX) // d30b + RORL CL, (R11) // 41d30b + RORL CL, DX // d3ca + RORL CL, R11 // 41d3cb + RORL $7, (BX) // c10b07 + RORL $7, (R11) // 41c10b07 + RORL $7, DX // c1ca07 + RORL $7, R11 // 41c1cb07 + RORQ $1, (BX) // 48d10b + RORQ $1, (R11) // 49d10b + RORQ $1, DX // 48d1ca + RORQ $1, R11 // 49d1cb + RORQ CL, (BX) // 48d30b + RORQ CL, (R11) // 49d30b + RORQ CL, DX // 48d3ca + RORQ CL, R11 // 49d3cb + RORQ $7, (BX) // 48c10b07 + RORQ $7, (R11) // 49c10b07 + RORQ $7, DX // 48c1ca07 + RORQ $7, R11 // 49c1cb07 + RORB $1, (BX) // d00b + RORB $1, (R11) // 41d00b + RORB $1, DL // d0ca + RORB $1, R11 // 41d0cb + RORB CL, (BX) // d20b + RORB CL, (R11) // 41d20b + RORB CL, DL // d2ca + RORB CL, R11 // 41d2cb + RORB $7, (BX) // c00b07 + RORB $7, (R11) // 41c00b07 + RORB $7, DL // c0ca07 + RORB $7, R11 // 41c0cb07 + RORXL $7, (BX), DX // c4e37bf01307 + RORXL $7, (R11), DX // c4c37bf01307 + RORXL $7, DX, DX // c4e37bf0d207 + RORXL $7, R11, DX // c4c37bf0d307 + RORXL $7, (BX), R11 // c4637bf01b07 + RORXL $7, (R11), R11 // c4437bf01b07 + RORXL $7, DX, R11 // c4637bf0da07 + RORXL $7, R11, R11 // c4437bf0db07 + RORXQ $7, (BX), DX // c4e3fbf01307 + RORXQ $7, (R11), DX // c4c3fbf01307 + RORXQ $7, DX, DX // c4e3fbf0d207 + RORXQ $7, R11, DX // c4c3fbf0d307 + RORXQ $7, (BX), R11 // c463fbf01b07 + RORXQ $7, (R11), R11 // c443fbf01b07 + RORXQ $7, DX, R11 // c463fbf0da07 + RORXQ $7, R11, R11 // c443fbf0db07 + ROUNDPD $7, (BX), X2 // 660f3a091307 + ROUNDPD $7, (R11), X2 // 66410f3a091307 + ROUNDPD $7, X2, X2 // 660f3a09d207 + ROUNDPD $7, X11, X2 // 66410f3a09d307 + ROUNDPD $7, (BX), X11 // 66440f3a091b07 + ROUNDPD $7, (R11), X11 // 66450f3a091b07 + ROUNDPD $7, X2, X11 // 66440f3a09da07 + ROUNDPD $7, X11, X11 // 66450f3a09db07 + ROUNDPS $7, (BX), X2 // 660f3a081307 + ROUNDPS $7, (R11), X2 // 66410f3a081307 + ROUNDPS $7, X2, X2 // 660f3a08d207 + ROUNDPS $7, X11, X2 // 66410f3a08d307 + ROUNDPS $7, (BX), X11 // 66440f3a081b07 + ROUNDPS $7, (R11), X11 // 66450f3a081b07 + ROUNDPS $7, X2, X11 // 66440f3a08da07 + ROUNDPS $7, X11, X11 // 66450f3a08db07 + ROUNDSD $7, (BX), X2 // 660f3a0b1307 + ROUNDSD $7, (R11), X2 // 66410f3a0b1307 + ROUNDSD $7, X2, X2 // 660f3a0bd207 + ROUNDSD $7, X11, X2 // 66410f3a0bd307 + ROUNDSD $7, (BX), X11 // 66440f3a0b1b07 + ROUNDSD $7, (R11), X11 // 66450f3a0b1b07 + ROUNDSD $7, X2, X11 // 66440f3a0bda07 + ROUNDSD $7, X11, X11 // 66450f3a0bdb07 + ROUNDSS $7, (BX), X2 // 660f3a0a1307 + ROUNDSS $7, (R11), X2 // 66410f3a0a1307 + ROUNDSS $7, X2, X2 // 660f3a0ad207 + ROUNDSS $7, X11, X2 // 66410f3a0ad307 + ROUNDSS $7, (BX), X11 // 66440f3a0a1b07 + ROUNDSS $7, (R11), X11 // 66450f3a0a1b07 + ROUNDSS $7, X2, X11 // 66440f3a0ada07 + ROUNDSS $7, X11, X11 // 66450f3a0adb07 + RSM // 0faa + RSQRTPS (BX), X2 // 0f5213 + RSQRTPS (R11), X2 // 410f5213 + RSQRTPS X2, X2 // 0f52d2 + RSQRTPS X11, X2 // 410f52d3 + RSQRTPS (BX), X11 // 440f521b + RSQRTPS (R11), X11 // 450f521b + RSQRTPS X2, X11 // 440f52da + RSQRTPS X11, X11 // 450f52db + RSQRTSS (BX), X2 // f30f5213 + RSQRTSS (R11), X2 // f3410f5213 + RSQRTSS X2, X2 // f30f52d2 + RSQRTSS X11, X2 // f3410f52d3 + RSQRTSS (BX), X11 // f3440f521b + RSQRTSS (R11), X11 // f3450f521b + RSQRTSS X2, X11 // f3440f52da + RSQRTSS X11, X11 // f3450f52db + SAHF // 9e + SARW $1, (BX) // 66d13b + SARW $1, (R11) // 6641d13b + SARW $1, DX // 66d1fa + SARW $1, R11 // 6641d1fb + SARW CL, (BX) // 66d33b + SARW CL, (R11) // 6641d33b + SARW CL, DX // 66d3fa + SARW CL, R11 // 6641d3fb + SARW $7, (BX) // 66c13b07 + SARW $7, (R11) // 6641c13b07 + SARW $7, DX // 66c1fa07 + SARW $7, R11 // 6641c1fb07 + SARL $1, (BX) // d13b + SARL $1, (R11) // 41d13b + SARL $1, DX // d1fa + SARL $1, R11 // 41d1fb + SARL CL, (BX) // d33b + SARL CL, (R11) // 41d33b + SARL CL, DX // d3fa + SARL CL, R11 // 41d3fb + SARL $7, (BX) // c13b07 + SARL $7, (R11) // 41c13b07 + SARL $7, DX // c1fa07 + SARL $7, R11 // 41c1fb07 + SARQ $1, (BX) // 48d13b + SARQ $1, (R11) // 49d13b + SARQ $1, DX // 48d1fa + SARQ $1, R11 // 49d1fb + SARQ CL, (BX) // 48d33b + SARQ CL, (R11) // 49d33b + SARQ CL, DX // 48d3fa + SARQ CL, R11 // 49d3fb + SARQ $7, (BX) // 48c13b07 + SARQ $7, (R11) // 49c13b07 + SARQ $7, DX // 48c1fa07 + SARQ $7, R11 // 49c1fb07 + SARB $1, (BX) // d03b + SARB $1, (R11) // 41d03b + SARB $1, DL // d0fa + SARB $1, R11 // 41d0fb + SARB CL, (BX) // d23b + SARB CL, (R11) // 41d23b + SARB CL, DL // d2fa + SARB CL, R11 // 41d2fb + SARB $7, (BX) // c03b07 + SARB $7, (R11) // 41c03b07 + SARB $7, DL // c0fa07 + SARB $7, R11 // 41c0fb07 + SARXL R9, (BX), DX // c4e232f713 + SARXL R9, (R11), DX // c4c232f713 + SARXL R9, DX, DX // c4e232f7d2 + SARXL R9, R11, DX // c4c232f7d3 + SARXL R9, (BX), R11 // c46232f71b + SARXL R9, (R11), R11 // c44232f71b + SARXL R9, DX, R11 // c46232f7da + SARXL R9, R11, R11 // c44232f7db + SARXQ R14, (BX), DX // c4e28af713 + SARXQ R14, (R11), DX // c4c28af713 + SARXQ R14, DX, DX // c4e28af7d2 + SARXQ R14, R11, DX // c4c28af7d3 + SARXQ R14, (BX), R11 // c4628af71b + SARXQ R14, (R11), R11 // c4428af71b + SARXQ R14, DX, R11 // c4628af7da + SARXQ R14, R11, R11 // c4428af7db + SBBB $7, AL // 1c07 + SBBW $61731, AX // 661d23f1 + SBBL $4045620583, AX // 1d674523f1 + SBBQ $-249346713, AX // 481d674523f1 + SBBW $61731, (BX) // 66811b23f1 + SBBW $61731, (R11) // 6641811b23f1 + SBBW $61731, DX // 6681da23f1 + SBBW $61731, R11 // 664181db23f1 + SBBW $7, (BX) // 66831b07 + SBBW $7, (R11) // 6641831b07 + SBBW $7, DX // 6683da07 + SBBW $7, R11 // 664183db07 + SBBW DX, (BX) // 661913 + SBBW R11, (BX) // 6644191b + SBBW DX, (R11) // 66411913 + SBBW R11, (R11) // 6645191b + SBBW DX, DX // 6619d2 or 661bd2 + SBBW R11, DX // 664419da or 66411bd3 + SBBW DX, R11 // 664119d3 or 66441bda + SBBW R11, R11 // 664519db or 66451bdb + SBBL $4045620583, (BX) // 811b674523f1 + SBBL $4045620583, (R11) // 41811b674523f1 + SBBL $4045620583, DX // 81da674523f1 + SBBL $4045620583, R11 // 4181db674523f1 + SBBL $7, (BX) // 831b07 + SBBL $7, (R11) // 41831b07 + SBBL $7, DX // 83da07 + SBBL $7, R11 // 4183db07 + SBBL DX, (BX) // 1913 + SBBL R11, (BX) // 44191b + SBBL DX, (R11) // 411913 + SBBL R11, (R11) // 45191b + SBBL DX, DX // 19d2 or 1bd2 + SBBL R11, DX // 4419da or 411bd3 + SBBL DX, R11 // 4119d3 or 441bda + SBBL R11, R11 // 4519db or 451bdb + SBBQ $-249346713, (BX) // 48811b674523f1 + SBBQ $-249346713, (R11) // 49811b674523f1 + SBBQ $-249346713, DX // 4881da674523f1 + SBBQ $-249346713, R11 // 4981db674523f1 + SBBQ $7, (BX) // 48831b07 + SBBQ $7, (R11) // 49831b07 + SBBQ $7, DX // 4883da07 + SBBQ $7, R11 // 4983db07 + SBBQ DX, (BX) // 481913 + SBBQ R11, (BX) // 4c191b + SBBQ DX, (R11) // 491913 + SBBQ R11, (R11) // 4d191b + SBBQ DX, DX // 4819d2 or 481bd2 + SBBQ R11, DX // 4c19da or 491bd3 + SBBQ DX, R11 // 4919d3 or 4c1bda + SBBQ R11, R11 // 4d19db or 4d1bdb + SBBB $7, (BX) // 801b07 + SBBB $7, (R11) // 41801b07 + SBBB $7, DL // 80da07 + SBBB $7, R11 // 4180db07 + SBBB DL, (BX) // 1813 + SBBB R11, (BX) // 44181b + SBBB DL, (R11) // 411813 + SBBB R11, (R11) // 45181b + SBBB DL, DL // 18d2 or 1ad2 + SBBB R11, DL // 4418da or 411ad3 + SBBB DL, R11 // 4118d3 or 441ada + SBBB R11, R11 // 4518db or 451adb + SBBW (BX), DX // 661b13 + SBBW (R11), DX // 66411b13 + SBBW (BX), R11 // 66441b1b + SBBW (R11), R11 // 66451b1b + SBBL (BX), DX // 1b13 + SBBL (R11), DX // 411b13 + SBBL (BX), R11 // 441b1b + SBBL (R11), R11 // 451b1b + SBBQ (BX), DX // 481b13 + SBBQ (R11), DX // 491b13 + SBBQ (BX), R11 // 4c1b1b + SBBQ (R11), R11 // 4d1b1b + SBBB (BX), DL // 1a13 + SBBB (R11), DL // 411a13 + SBBB (BX), R11 // 441a1b + SBBB (R11), R11 // 451a1b + SCASB // ae + SCASL // af + SCASQ // 48af + SCASW // 66af + SETHI (BX) // 0f9703 + SETHI (R11) // 410f9703 + SETHI DL // 0f97c2 + SETHI R11 // 410f97c3 + SETCC (BX) // 0f9303 + SETCC (R11) // 410f9303 + SETCC DL // 0f93c2 + SETCC R11 // 410f93c3 + SETCS (BX) // 0f9203 + SETCS (R11) // 410f9203 + SETCS DL // 0f92c2 + SETCS R11 // 410f92c3 + SETLS (BX) // 0f9603 + SETLS (R11) // 410f9603 + SETLS DL // 0f96c2 + SETLS R11 // 410f96c3 + SETEQ (BX) // 0f9403 + SETEQ (R11) // 410f9403 + SETEQ DL // 0f94c2 + SETEQ R11 // 410f94c3 + SETGT (BX) // 0f9f03 + SETGT (R11) // 410f9f03 + SETGT DL // 0f9fc2 + SETGT R11 // 410f9fc3 + SETGE (BX) // 0f9d03 + SETGE (R11) // 410f9d03 + SETGE DL // 0f9dc2 + SETGE R11 // 410f9dc3 + SETLT (BX) // 0f9c03 + SETLT (R11) // 410f9c03 + SETLT DL // 0f9cc2 + SETLT R11 // 410f9cc3 + SETLE (BX) // 0f9e03 + SETLE (R11) // 410f9e03 + SETLE DL // 0f9ec2 + SETLE R11 // 410f9ec3 + SETNE (BX) // 0f9503 + SETNE (R11) // 410f9503 + SETNE DL // 0f95c2 + SETNE R11 // 410f95c3 + SETOC (BX) // 0f9103 + SETOC (R11) // 410f9103 + SETOC DL // 0f91c2 + SETOC R11 // 410f91c3 + SETPC (BX) // 0f9b03 + SETPC (R11) // 410f9b03 + SETPC DL // 0f9bc2 + SETPC R11 // 410f9bc3 + SETPL (BX) // 0f9903 + SETPL (R11) // 410f9903 + SETPL DL // 0f99c2 + SETPL R11 // 410f99c3 + SETOS (BX) // 0f9003 + SETOS (R11) // 410f9003 + SETOS DL // 0f90c2 + SETOS R11 // 410f90c3 + SETPS (BX) // 0f9a03 + SETPS (R11) // 410f9a03 + SETPS DL // 0f9ac2 + SETPS R11 // 410f9ac3 + SETMI (BX) // 0f9803 + SETMI (R11) // 410f9803 + SETMI DL // 0f98c2 + SETMI R11 // 410f98c3 + SFENCE // 0faef8 + SGDT (BX) // 0f0103 + SGDT (R11) // 410f0103 + SHLW $1, (BX) // 66d123 + SHLW $1, (R11) // 6641d123 + SHLW $1, DX // 66d1e2 + SHLW $1, R11 // 6641d1e3 + SHLW CL, (BX) // 66d323 + SHLW CL, (R11) // 6641d323 + SHLW CL, DX // 66d3e2 + SHLW CL, R11 // 6641d3e3 + SHLW $7, (BX) // 66c12307 + SHLW $7, (R11) // 6641c12307 + SHLW $7, DX // 66c1e207 + SHLW $7, R11 // 6641c1e307 + SHLL $1, (BX) // d123 + SHLL $1, (R11) // 41d123 + SHLL $1, DX // d1e2 + SHLL $1, R11 // 41d1e3 + SHLL CL, (BX) // d323 + SHLL CL, (R11) // 41d323 + SHLL CL, DX // d3e2 + SHLL CL, R11 // 41d3e3 + SHLL $7, (BX) // c12307 + SHLL $7, (R11) // 41c12307 + SHLL $7, DX // c1e207 + SHLL $7, R11 // 41c1e307 + SHLQ $1, (BX) // 48d123 + SHLQ $1, (R11) // 49d123 + SHLQ $1, DX // 48d1e2 + SHLQ $1, R11 // 49d1e3 + SHLQ CL, (BX) // 48d323 + SHLQ CL, (R11) // 49d323 + SHLQ CL, DX // 48d3e2 + SHLQ CL, R11 // 49d3e3 + SHLQ $7, (BX) // 48c12307 + SHLQ $7, (R11) // 49c12307 + SHLQ $7, DX // 48c1e207 + SHLQ $7, R11 // 49c1e307 + SHLB $1, (BX) // d023 + SHLB $1, (R11) // 41d023 + SHLB $1, DL // d0e2 + SHLB $1, R11 // 41d0e3 + SHLB CL, (BX) // d223 + SHLB CL, (R11) // 41d223 + SHLB CL, DL // d2e2 + SHLB CL, R11 // 41d2e3 + SHLB $7, (BX) // c02307 + SHLB $7, (R11) // 41c02307 + SHLB $7, DL // c0e207 + SHLB $7, R11 // 41c0e307 + SHLW CL, DX, (BX) // 660fa513 + SHLW CL, R11, (BX) // 66440fa51b + SHLW CL, DX, (R11) // 66410fa513 + SHLW CL, R11, (R11) // 66450fa51b + SHLW CL, DX, DX // 660fa5d2 + SHLW CL, R11, DX // 66440fa5da + SHLW CL, DX, R11 // 66410fa5d3 + SHLW CL, R11, R11 // 66450fa5db + SHLW $7, DX, (BX) // 660fa41307 + SHLW $7, R11, (BX) // 66440fa41b07 + SHLW $7, DX, (R11) // 66410fa41307 + SHLW $7, R11, (R11) // 66450fa41b07 + SHLW $7, DX, DX // 660fa4d207 + SHLW $7, R11, DX // 66440fa4da07 + SHLW $7, DX, R11 // 66410fa4d307 + SHLW $7, R11, R11 // 66450fa4db07 + SHLL CL, DX, (BX) // 0fa513 + SHLL CL, R11, (BX) // 440fa51b + SHLL CL, DX, (R11) // 410fa513 + SHLL CL, R11, (R11) // 450fa51b + SHLL CL, DX, DX // 0fa5d2 + SHLL CL, R11, DX // 440fa5da + SHLL CL, DX, R11 // 410fa5d3 + SHLL CL, R11, R11 // 450fa5db + SHLL $7, DX, (BX) // 0fa41307 + SHLL $7, R11, (BX) // 440fa41b07 + SHLL $7, DX, (R11) // 410fa41307 + SHLL $7, R11, (R11) // 450fa41b07 + SHLL $7, DX, DX // 0fa4d207 + SHLL $7, R11, DX // 440fa4da07 + SHLL $7, DX, R11 // 410fa4d307 + SHLL $7, R11, R11 // 450fa4db07 + SHLQ CL, DX, (BX) // 480fa513 + SHLQ CL, R11, (BX) // 4c0fa51b + SHLQ CL, DX, (R11) // 490fa513 + SHLQ CL, R11, (R11) // 4d0fa51b + SHLQ CL, DX, DX // 480fa5d2 + SHLQ CL, R11, DX // 4c0fa5da + SHLQ CL, DX, R11 // 490fa5d3 + SHLQ CL, R11, R11 // 4d0fa5db + SHLQ $7, DX, (BX) // 480fa41307 + SHLQ $7, R11, (BX) // 4c0fa41b07 + SHLQ $7, DX, (R11) // 490fa41307 + SHLQ $7, R11, (R11) // 4d0fa41b07 + SHLQ $7, DX, DX // 480fa4d207 + SHLQ $7, R11, DX // 4c0fa4da07 + SHLQ $7, DX, R11 // 490fa4d307 + SHLQ $7, R11, R11 // 4d0fa4db07 + SHLXL R9, (BX), DX // c4e231f713 + SHLXL R9, (R11), DX // c4c231f713 + SHLXL R9, DX, DX // c4e231f7d2 + SHLXL R9, R11, DX // c4c231f7d3 + SHLXL R9, (BX), R11 // c46231f71b + SHLXL R9, (R11), R11 // c44231f71b + SHLXL R9, DX, R11 // c46231f7da + SHLXL R9, R11, R11 // c44231f7db + SHLXQ R14, (BX), DX // c4e289f713 + SHLXQ R14, (R11), DX // c4c289f713 + SHLXQ R14, DX, DX // c4e289f7d2 + SHLXQ R14, R11, DX // c4c289f7d3 + SHLXQ R14, (BX), R11 // c46289f71b + SHLXQ R14, (R11), R11 // c44289f71b + SHLXQ R14, DX, R11 // c46289f7da + SHLXQ R14, R11, R11 // c44289f7db + SHRW $1, (BX) // 66d12b + SHRW $1, (R11) // 6641d12b + SHRW $1, DX // 66d1ea + SHRW $1, R11 // 6641d1eb + SHRW CL, (BX) // 66d32b + SHRW CL, (R11) // 6641d32b + SHRW CL, DX // 66d3ea + SHRW CL, R11 // 6641d3eb + SHRW $7, (BX) // 66c12b07 + SHRW $7, (R11) // 6641c12b07 + SHRW $7, DX // 66c1ea07 + SHRW $7, R11 // 6641c1eb07 + SHRL $1, (BX) // d12b + SHRL $1, (R11) // 41d12b + SHRL $1, DX // d1ea + SHRL $1, R11 // 41d1eb + SHRL CL, (BX) // d32b + SHRL CL, (R11) // 41d32b + SHRL CL, DX // d3ea + SHRL CL, R11 // 41d3eb + SHRL $7, (BX) // c12b07 + SHRL $7, (R11) // 41c12b07 + SHRL $7, DX // c1ea07 + SHRL $7, R11 // 41c1eb07 + SHRQ $1, (BX) // 48d12b + SHRQ $1, (R11) // 49d12b + SHRQ $1, DX // 48d1ea + SHRQ $1, R11 // 49d1eb + SHRQ CL, (BX) // 48d32b + SHRQ CL, (R11) // 49d32b + SHRQ CL, DX // 48d3ea + SHRQ CL, R11 // 49d3eb + SHRQ $7, (BX) // 48c12b07 + SHRQ $7, (R11) // 49c12b07 + SHRQ $7, DX // 48c1ea07 + SHRQ $7, R11 // 49c1eb07 + SHRB $1, (BX) // d02b + SHRB $1, (R11) // 41d02b + SHRB $1, DL // d0ea + SHRB $1, R11 // 41d0eb + SHRB CL, (BX) // d22b + SHRB CL, (R11) // 41d22b + SHRB CL, DL // d2ea + SHRB CL, R11 // 41d2eb + SHRB $7, (BX) // c02b07 + SHRB $7, (R11) // 41c02b07 + SHRB $7, DL // c0ea07 + SHRB $7, R11 // 41c0eb07 + SHRW CL, DX, (BX) // 660fad13 + SHRW CL, R11, (BX) // 66440fad1b + SHRW CL, DX, (R11) // 66410fad13 + SHRW CL, R11, (R11) // 66450fad1b + SHRW CL, DX, DX // 660fadd2 + SHRW CL, R11, DX // 66440fadda + SHRW CL, DX, R11 // 66410fadd3 + SHRW CL, R11, R11 // 66450faddb + SHRW $7, DX, (BX) // 660fac1307 + SHRW $7, R11, (BX) // 66440fac1b07 + SHRW $7, DX, (R11) // 66410fac1307 + SHRW $7, R11, (R11) // 66450fac1b07 + SHRW $7, DX, DX // 660facd207 + SHRW $7, R11, DX // 66440facda07 + SHRW $7, DX, R11 // 66410facd307 + SHRW $7, R11, R11 // 66450facdb07 + SHRL CL, DX, (BX) // 0fad13 + SHRL CL, R11, (BX) // 440fad1b + SHRL CL, DX, (R11) // 410fad13 + SHRL CL, R11, (R11) // 450fad1b + SHRL CL, DX, DX // 0fadd2 + SHRL CL, R11, DX // 440fadda + SHRL CL, DX, R11 // 410fadd3 + SHRL CL, R11, R11 // 450faddb + SHRL $7, DX, (BX) // 0fac1307 + SHRL $7, R11, (BX) // 440fac1b07 + SHRL $7, DX, (R11) // 410fac1307 + SHRL $7, R11, (R11) // 450fac1b07 + SHRL $7, DX, DX // 0facd207 + SHRL $7, R11, DX // 440facda07 + SHRL $7, DX, R11 // 410facd307 + SHRL $7, R11, R11 // 450facdb07 + SHRQ CL, DX, (BX) // 480fad13 + SHRQ CL, R11, (BX) // 4c0fad1b + SHRQ CL, DX, (R11) // 490fad13 + SHRQ CL, R11, (R11) // 4d0fad1b + SHRQ CL, DX, DX // 480fadd2 + SHRQ CL, R11, DX // 4c0fadda + SHRQ CL, DX, R11 // 490fadd3 + SHRQ CL, R11, R11 // 4d0faddb + SHRQ $7, DX, (BX) // 480fac1307 + SHRQ $7, R11, (BX) // 4c0fac1b07 + SHRQ $7, DX, (R11) // 490fac1307 + SHRQ $7, R11, (R11) // 4d0fac1b07 + SHRQ $7, DX, DX // 480facd207 + SHRQ $7, R11, DX // 4c0facda07 + SHRQ $7, DX, R11 // 490facd307 + SHRQ $7, R11, R11 // 4d0facdb07 + SHRXL R9, (BX), DX // c4e233f713 + SHRXL R9, (R11), DX // c4c233f713 + SHRXL R9, DX, DX // c4e233f7d2 + SHRXL R9, R11, DX // c4c233f7d3 + SHRXL R9, (BX), R11 // c46233f71b + SHRXL R9, (R11), R11 // c44233f71b + SHRXL R9, DX, R11 // c46233f7da + SHRXL R9, R11, R11 // c44233f7db + SHRXQ R14, (BX), DX // c4e28bf713 + SHRXQ R14, (R11), DX // c4c28bf713 + SHRXQ R14, DX, DX // c4e28bf7d2 + SHRXQ R14, R11, DX // c4c28bf7d3 + SHRXQ R14, (BX), R11 // c4628bf71b + SHRXQ R14, (R11), R11 // c4428bf71b + SHRXQ R14, DX, R11 // c4628bf7da + SHRXQ R14, R11, R11 // c4428bf7db + SHUFPD $7, (BX), X2 // 660fc61307 + SHUFPD $7, (R11), X2 // 66410fc61307 + SHUFPD $7, X2, X2 // 660fc6d207 + SHUFPD $7, X11, X2 // 66410fc6d307 + SHUFPD $7, (BX), X11 // 66440fc61b07 + SHUFPD $7, (R11), X11 // 66450fc61b07 + SHUFPD $7, X2, X11 // 66440fc6da07 + SHUFPD $7, X11, X11 // 66450fc6db07 + SHUFPS $7, (BX), X2 // 0fc61307 + SHUFPS $7, (R11), X2 // 410fc61307 + SHUFPS $7, X2, X2 // 0fc6d207 + SHUFPS $7, X11, X2 // 410fc6d307 + SHUFPS $7, (BX), X11 // 440fc61b07 + SHUFPS $7, (R11), X11 // 450fc61b07 + SHUFPS $7, X2, X11 // 440fc6da07 + SHUFPS $7, X11, X11 // 450fc6db07 + SIDT (BX) // 0f010b + SIDT (R11) // 410f010b + SLDTW (BX) // 660f0003 + SLDTW (R11) // 66410f0003 + SLDTW DX // 660f00c2 + SLDTW R11 // 66410f00c3 + SLDTL (BX) // 0f0003 + SLDTL (R11) // 410f0003 + SLDTL DX // 0f00c2 + SLDTL R11 // 410f00c3 + SLDTQ (BX) // 480f0003 + SLDTQ (R11) // 490f0003 + SLDTQ DX // 480f00c2 + SLDTQ R11 // 490f00c3 + SMSWW (BX) // 660f0123 + SMSWW (R11) // 66410f0123 + SMSWW DX // 660f01e2 + SMSWW R11 // 66410f01e3 + SMSWL (BX) // 0f0123 + SMSWL (R11) // 410f0123 + SMSWL DX // 0f01e2 + SMSWL R11 // 410f01e3 + SMSWQ (BX) // 480f0123 + SMSWQ (R11) // 490f0123 + SMSWQ DX // 480f01e2 + SMSWQ R11 // 490f01e3 + SQRTPD (BX), X2 // 660f5113 + SQRTPD (R11), X2 // 66410f5113 + SQRTPD X2, X2 // 660f51d2 + SQRTPD X11, X2 // 66410f51d3 + SQRTPD (BX), X11 // 66440f511b + SQRTPD (R11), X11 // 66450f511b + SQRTPD X2, X11 // 66440f51da + SQRTPD X11, X11 // 66450f51db + SQRTPS (BX), X2 // 0f5113 + SQRTPS (R11), X2 // 410f5113 + SQRTPS X2, X2 // 0f51d2 + SQRTPS X11, X2 // 410f51d3 + SQRTPS (BX), X11 // 440f511b + SQRTPS (R11), X11 // 450f511b + SQRTPS X2, X11 // 440f51da + SQRTPS X11, X11 // 450f51db + SQRTSD (BX), X2 // f20f5113 + SQRTSD (R11), X2 // f2410f5113 + SQRTSD X2, X2 // f20f51d2 + SQRTSD X11, X2 // f2410f51d3 + SQRTSD (BX), X11 // f2440f511b + SQRTSD (R11), X11 // f2450f511b + SQRTSD X2, X11 // f2440f51da + SQRTSD X11, X11 // f2450f51db + SQRTSS (BX), X2 // f30f5113 + SQRTSS (R11), X2 // f3410f5113 + SQRTSS X2, X2 // f30f51d2 + SQRTSS X11, X2 // f3410f51d3 + SQRTSS (BX), X11 // f3440f511b + SQRTSS (R11), X11 // f3450f511b + SQRTSS X2, X11 // f3440f51da + SQRTSS X11, X11 // f3450f51db + STAC // 0f01cb + STC // f9 + STD // fd + STI // fb + STMXCSR (BX) // 0fae1b + STMXCSR (R11) // 410fae1b + STOSB // aa + STOSL // ab + STOSQ // 48ab + STOSW // 66ab + STRW (BX) // 660f000b + STRW (R11) // 66410f000b + STRW DX // 660f00ca + STRW R11 // 66410f00cb + STRL (BX) // 0f000b + STRL (R11) // 410f000b + STRL DX // 0f00ca + STRL R11 // 410f00cb + STRQ (BX) // 480f000b + STRQ (R11) // 490f000b + STRQ DX // 480f00ca + STRQ R11 // 490f00cb + SUBB $7, AL // 2c07 + SUBW $61731, AX // 662d23f1 + SUBL $4045620583, AX // 2d674523f1 + SUBQ $-249346713, AX // 482d674523f1 + SUBW $61731, (BX) // 66812b23f1 + SUBW $61731, (R11) // 6641812b23f1 + SUBW $61731, DX // 6681ea23f1 + SUBW $61731, R11 // 664181eb23f1 + SUBW $7, (BX) // 66832b07 + SUBW $7, (R11) // 6641832b07 + SUBW $7, DX // 6683ea07 + SUBW $7, R11 // 664183eb07 + SUBW DX, (BX) // 662913 + SUBW R11, (BX) // 6644291b + SUBW DX, (R11) // 66412913 + SUBW R11, (R11) // 6645291b + SUBW DX, DX // 6629d2 or 662bd2 + SUBW R11, DX // 664429da or 66412bd3 + SUBW DX, R11 // 664129d3 or 66442bda + SUBW R11, R11 // 664529db or 66452bdb + SUBL $4045620583, (BX) // 812b674523f1 + SUBL $4045620583, (R11) // 41812b674523f1 + SUBL $4045620583, DX // 81ea674523f1 + SUBL $4045620583, R11 // 4181eb674523f1 + SUBL $7, (BX) // 832b07 + SUBL $7, (R11) // 41832b07 + SUBL $7, DX // 83ea07 + SUBL $7, R11 // 4183eb07 + SUBL DX, (BX) // 2913 + SUBL R11, (BX) // 44291b + SUBL DX, (R11) // 412913 + SUBL R11, (R11) // 45291b + SUBL DX, DX // 29d2 or 2bd2 + SUBL R11, DX // 4429da or 412bd3 + SUBL DX, R11 // 4129d3 or 442bda + SUBL R11, R11 // 4529db or 452bdb + SUBQ $-249346713, (BX) // 48812b674523f1 + SUBQ $-249346713, (R11) // 49812b674523f1 + SUBQ $-249346713, DX // 4881ea674523f1 + SUBQ $-249346713, R11 // 4981eb674523f1 + SUBQ $7, (BX) // 48832b07 + SUBQ $7, (R11) // 49832b07 + SUBQ $7, DX // 4883ea07 + SUBQ $7, R11 // 4983eb07 + SUBQ DX, (BX) // 482913 + SUBQ R11, (BX) // 4c291b + SUBQ DX, (R11) // 492913 + SUBQ R11, (R11) // 4d291b + SUBQ DX, DX // 4829d2 or 482bd2 + SUBQ R11, DX // 4c29da or 492bd3 + SUBQ DX, R11 // 4929d3 or 4c2bda + SUBQ R11, R11 // 4d29db or 4d2bdb + SUBB $7, (BX) // 802b07 + SUBB $7, (R11) // 41802b07 + SUBB $7, DL // 80ea07 + SUBB $7, R11 // 4180eb07 + SUBB DL, (BX) // 2813 + SUBB R11, (BX) // 44281b + SUBB DL, (R11) // 412813 + SUBB R11, (R11) // 45281b + SUBB DL, DL // 28d2 or 2ad2 + SUBB R11, DL // 4428da or 412ad3 + SUBB DL, R11 // 4128d3 or 442ada + SUBB R11, R11 // 4528db or 452adb + SUBW (BX), DX // 662b13 + SUBW (R11), DX // 66412b13 + SUBW (BX), R11 // 66442b1b + SUBW (R11), R11 // 66452b1b + SUBL (BX), DX // 2b13 + SUBL (R11), DX // 412b13 + SUBL (BX), R11 // 442b1b + SUBL (R11), R11 // 452b1b + SUBQ (BX), DX // 482b13 + SUBQ (R11), DX // 492b13 + SUBQ (BX), R11 // 4c2b1b + SUBQ (R11), R11 // 4d2b1b + SUBB (BX), DL // 2a13 + SUBB (R11), DL // 412a13 + SUBB (BX), R11 // 442a1b + SUBB (R11), R11 // 452a1b + SUBPD (BX), X2 // 660f5c13 + SUBPD (R11), X2 // 66410f5c13 + SUBPD X2, X2 // 660f5cd2 + SUBPD X11, X2 // 66410f5cd3 + SUBPD (BX), X11 // 66440f5c1b + SUBPD (R11), X11 // 66450f5c1b + SUBPD X2, X11 // 66440f5cda + SUBPD X11, X11 // 66450f5cdb + SUBPS (BX), X2 // 0f5c13 + SUBPS (R11), X2 // 410f5c13 + SUBPS X2, X2 // 0f5cd2 + SUBPS X11, X2 // 410f5cd3 + SUBPS (BX), X11 // 440f5c1b + SUBPS (R11), X11 // 450f5c1b + SUBPS X2, X11 // 440f5cda + SUBPS X11, X11 // 450f5cdb + SUBSD (BX), X2 // f20f5c13 + SUBSD (R11), X2 // f2410f5c13 + SUBSD X2, X2 // f20f5cd2 + SUBSD X11, X2 // f2410f5cd3 + SUBSD (BX), X11 // f2440f5c1b + SUBSD (R11), X11 // f2450f5c1b + SUBSD X2, X11 // f2440f5cda + SUBSD X11, X11 // f2450f5cdb + SUBSS (BX), X2 // f30f5c13 + SUBSS (R11), X2 // f3410f5c13 + SUBSS X2, X2 // f30f5cd2 + SUBSS X11, X2 // f3410f5cd3 + SUBSS (BX), X11 // f3440f5c1b + SUBSS (R11), X11 // f3450f5c1b + SUBSS X2, X11 // f3440f5cda + SUBSS X11, X11 // f3450f5cdb + SWAPGS // 0f01f8 + SYSCALL // 0f05 + SYSENTER // 0f34 + SYSENTER64 // 480f34 + SYSEXIT // 0f35 + SYSEXIT64 // 480f35 + SYSRET // 0f07 + TESTB $7, AL // a807 + TESTW $61731, AX // 66a923f1 + TESTL $4045620583, AX // a9674523f1 + TESTQ $-249346713, AX // 48a9674523f1 + TESTW $61731, (BX) // 66f70323f1 + TESTW $61731, (R11) // 6641f70323f1 + TESTW $61731, DX // 66f7c223f1 + TESTW $61731, R11 // 6641f7c323f1 + TESTW DX, (BX) // 668513 + TESTW R11, (BX) // 6644851b + TESTW DX, (R11) // 66418513 + TESTW R11, (R11) // 6645851b + TESTW DX, DX // 6685d2 + TESTW R11, DX // 664485da + TESTW DX, R11 // 664185d3 + TESTW R11, R11 // 664585db + TESTL $4045620583, (BX) // f703674523f1 + TESTL $4045620583, (R11) // 41f703674523f1 + TESTL $4045620583, DX // f7c2674523f1 + TESTL $4045620583, R11 // 41f7c3674523f1 + TESTL DX, (BX) // 8513 + TESTL R11, (BX) // 44851b + TESTL DX, (R11) // 418513 + TESTL R11, (R11) // 45851b + TESTL DX, DX // 85d2 + TESTL R11, DX // 4485da + TESTL DX, R11 // 4185d3 + TESTL R11, R11 // 4585db + TESTQ $-249346713, (BX) // 48f703674523f1 + TESTQ $-249346713, (R11) // 49f703674523f1 + TESTQ $-249346713, DX // 48f7c2674523f1 + TESTQ $-249346713, R11 // 49f7c3674523f1 + TESTQ DX, (BX) // 488513 + TESTQ R11, (BX) // 4c851b + TESTQ DX, (R11) // 498513 + TESTQ R11, (R11) // 4d851b + TESTQ DX, DX // 4885d2 + TESTQ R11, DX // 4c85da + TESTQ DX, R11 // 4985d3 + TESTQ R11, R11 // 4d85db + TESTB $7, (BX) // f60307 + TESTB $7, (R11) // 41f60307 + TESTB $7, DL // f6c207 + TESTB $7, R11 // 41f6c307 + TESTB DL, (BX) // 8413 + TESTB R11, (BX) // 44841b + TESTB DL, (R11) // 418413 + TESTB R11, (R11) // 45841b + TESTB DL, DL // 84d2 + TESTB R11, DL // 4484da + TESTB DL, R11 // 4184d3 + TESTB R11, R11 // 4584db + TZCNTW (BX), DX // 66f30fbc13 + TZCNTW (R11), DX // 66f3410fbc13 + TZCNTW DX, DX // 66f30fbcd2 + TZCNTW R11, DX // 66f3410fbcd3 + TZCNTW (BX), R11 // 66f3440fbc1b + TZCNTW (R11), R11 // 66f3450fbc1b + TZCNTW DX, R11 // 66f3440fbcda + TZCNTW R11, R11 // 66f3450fbcdb + TZCNTL (BX), DX // f30fbc13 + TZCNTL (R11), DX // f3410fbc13 + TZCNTL DX, DX // f30fbcd2 + TZCNTL R11, DX // f3410fbcd3 + TZCNTL (BX), R11 // f3440fbc1b + TZCNTL (R11), R11 // f3450fbc1b + TZCNTL DX, R11 // f3440fbcda + TZCNTL R11, R11 // f3450fbcdb + TZCNTQ (BX), DX // f3480fbc13 + TZCNTQ (R11), DX // f3490fbc13 + TZCNTQ DX, DX // f3480fbcd2 + TZCNTQ R11, DX // f3490fbcd3 + TZCNTQ (BX), R11 // f34c0fbc1b + TZCNTQ (R11), R11 // f34d0fbc1b + TZCNTQ DX, R11 // f34c0fbcda + TZCNTQ R11, R11 // f34d0fbcdb + UCOMISD (BX), X2 // 660f2e13 + UCOMISD (R11), X2 // 66410f2e13 + UCOMISD X2, X2 // 660f2ed2 + UCOMISD X11, X2 // 66410f2ed3 + UCOMISD (BX), X11 // 66440f2e1b + UCOMISD (R11), X11 // 66450f2e1b + UCOMISD X2, X11 // 66440f2eda + UCOMISD X11, X11 // 66450f2edb + UCOMISS (BX), X2 // 0f2e13 + UCOMISS (R11), X2 // 410f2e13 + UCOMISS X2, X2 // 0f2ed2 + UCOMISS X11, X2 // 410f2ed3 + UCOMISS (BX), X11 // 440f2e1b + UCOMISS (R11), X11 // 450f2e1b + UCOMISS X2, X11 // 440f2eda + UCOMISS X11, X11 // 450f2edb + UD1 // 0fb9 + UD2 // 0f0b + UNPCKHPD (BX), X2 // 660f1513 + UNPCKHPD (R11), X2 // 66410f1513 + UNPCKHPD X2, X2 // 660f15d2 + UNPCKHPD X11, X2 // 66410f15d3 + UNPCKHPD (BX), X11 // 66440f151b + UNPCKHPD (R11), X11 // 66450f151b + UNPCKHPD X2, X11 // 66440f15da + UNPCKHPD X11, X11 // 66450f15db + UNPCKHPS (BX), X2 // 0f1513 + UNPCKHPS (R11), X2 // 410f1513 + UNPCKHPS X2, X2 // 0f15d2 + UNPCKHPS X11, X2 // 410f15d3 + UNPCKHPS (BX), X11 // 440f151b + UNPCKHPS (R11), X11 // 450f151b + UNPCKHPS X2, X11 // 440f15da + UNPCKHPS X11, X11 // 450f15db + UNPCKLPD (BX), X2 // 660f1413 + UNPCKLPD (R11), X2 // 66410f1413 + UNPCKLPD X2, X2 // 660f14d2 + UNPCKLPD X11, X2 // 66410f14d3 + UNPCKLPD (BX), X11 // 66440f141b + UNPCKLPD (R11), X11 // 66450f141b + UNPCKLPD X2, X11 // 66440f14da + UNPCKLPD X11, X11 // 66450f14db + UNPCKLPS (BX), X2 // 0f1413 + UNPCKLPS (R11), X2 // 410f1413 + UNPCKLPS X2, X2 // 0f14d2 + UNPCKLPS X11, X2 // 410f14d3 + UNPCKLPS (BX), X11 // 440f141b + UNPCKLPS (R11), X11 // 450f141b + UNPCKLPS X2, X11 // 440f14da + UNPCKLPS X11, X11 // 450f14db + VADDPD (BX), X9, X2 // c4e1315813 or c5b15813 + VADDPD (R11), X9, X2 // c4c1315813 + VADDPD X2, X9, X2 // c4e13158d2 or c5b158d2 + VADDPD X11, X9, X2 // c4c13158d3 + VADDPD (BX), X9, X11 // c46131581b or c531581b + VADDPD (R11), X9, X11 // c44131581b + VADDPD X2, X9, X11 // c4613158da or c53158da + VADDPD X11, X9, X11 // c4413158db + VADDPD (BX), Y15, Y2 // c4e1055813 or c5855813 + VADDPD (R11), Y15, Y2 // c4c1055813 + VADDPD Y2, Y15, Y2 // c4e10558d2 or c58558d2 + VADDPD Y11, Y15, Y2 // c4c10558d3 + VADDPD (BX), Y15, Y11 // c46105581b or c505581b + VADDPD (R11), Y15, Y11 // c44105581b + VADDPD Y2, Y15, Y11 // c4610558da or c50558da + VADDPD Y11, Y15, Y11 // c4410558db + VADDPS (BX), X9, X2 // c4e1305813 or c5b05813 + VADDPS (R11), X9, X2 // c4c1305813 + VADDPS X2, X9, X2 // c4e13058d2 or c5b058d2 + VADDPS X11, X9, X2 // c4c13058d3 + VADDPS (BX), X9, X11 // c46130581b or c530581b + VADDPS (R11), X9, X11 // c44130581b + VADDPS X2, X9, X11 // c4613058da or c53058da + VADDPS X11, X9, X11 // c4413058db + VADDPS (BX), Y15, Y2 // c4e1045813 or c5845813 + VADDPS (R11), Y15, Y2 // c4c1045813 + VADDPS Y2, Y15, Y2 // c4e10458d2 or c58458d2 + VADDPS Y11, Y15, Y2 // c4c10458d3 + VADDPS (BX), Y15, Y11 // c46104581b or c504581b + VADDPS (R11), Y15, Y11 // c44104581b + VADDPS Y2, Y15, Y11 // c4610458da or c50458da + VADDPS Y11, Y15, Y11 // c4410458db + VADDSD (BX), X9, X2 // c4e1335813 or c5b35813 + VADDSD (R11), X9, X2 // c4c1335813 + VADDSD X2, X9, X2 // c4e13358d2 or c5b358d2 + VADDSD X11, X9, X2 // c4c13358d3 + VADDSD (BX), X9, X11 // c46133581b or c533581b + VADDSD (R11), X9, X11 // c44133581b + VADDSD X2, X9, X11 // c4613358da or c53358da + VADDSD X11, X9, X11 // c4413358db + VADDSS (BX), X9, X2 // c4e1325813 or c5b25813 + VADDSS (R11), X9, X2 // c4c1325813 + VADDSS X2, X9, X2 // c4e13258d2 or c5b258d2 + VADDSS X11, X9, X2 // c4c13258d3 + VADDSS (BX), X9, X11 // c46132581b or c532581b + VADDSS (R11), X9, X11 // c44132581b + VADDSS X2, X9, X11 // c4613258da or c53258da + VADDSS X11, X9, X11 // c4413258db + VADDSUBPD (BX), X9, X2 // c4e131d013 or c5b1d013 + VADDSUBPD (R11), X9, X2 // c4c131d013 + VADDSUBPD X2, X9, X2 // c4e131d0d2 or c5b1d0d2 + VADDSUBPD X11, X9, X2 // c4c131d0d3 + VADDSUBPD (BX), X9, X11 // c46131d01b or c531d01b + VADDSUBPD (R11), X9, X11 // c44131d01b + VADDSUBPD X2, X9, X11 // c46131d0da or c531d0da + VADDSUBPD X11, X9, X11 // c44131d0db + VADDSUBPD (BX), Y15, Y2 // c4e105d013 or c585d013 + VADDSUBPD (R11), Y15, Y2 // c4c105d013 + VADDSUBPD Y2, Y15, Y2 // c4e105d0d2 or c585d0d2 + VADDSUBPD Y11, Y15, Y2 // c4c105d0d3 + VADDSUBPD (BX), Y15, Y11 // c46105d01b or c505d01b + VADDSUBPD (R11), Y15, Y11 // c44105d01b + VADDSUBPD Y2, Y15, Y11 // c46105d0da or c505d0da + VADDSUBPD Y11, Y15, Y11 // c44105d0db + VADDSUBPS (BX), X9, X2 // c4e133d013 or c5b3d013 + VADDSUBPS (R11), X9, X2 // c4c133d013 + VADDSUBPS X2, X9, X2 // c4e133d0d2 or c5b3d0d2 + VADDSUBPS X11, X9, X2 // c4c133d0d3 + VADDSUBPS (BX), X9, X11 // c46133d01b or c533d01b + VADDSUBPS (R11), X9, X11 // c44133d01b + VADDSUBPS X2, X9, X11 // c46133d0da or c533d0da + VADDSUBPS X11, X9, X11 // c44133d0db + VADDSUBPS (BX), Y15, Y2 // c4e107d013 or c587d013 + VADDSUBPS (R11), Y15, Y2 // c4c107d013 + VADDSUBPS Y2, Y15, Y2 // c4e107d0d2 or c587d0d2 + VADDSUBPS Y11, Y15, Y2 // c4c107d0d3 + VADDSUBPS (BX), Y15, Y11 // c46107d01b or c507d01b + VADDSUBPS (R11), Y15, Y11 // c44107d01b + VADDSUBPS Y2, Y15, Y11 // c46107d0da or c507d0da + VADDSUBPS Y11, Y15, Y11 // c44107d0db + VAESDEC (BX), X9, X2 // c4e231de13 + VAESDEC (R11), X9, X2 // c4c231de13 + VAESDEC X2, X9, X2 // c4e231ded2 + VAESDEC X11, X9, X2 // c4c231ded3 + VAESDEC (BX), X9, X11 // c46231de1b + VAESDEC (R11), X9, X11 // c44231de1b + VAESDEC X2, X9, X11 // c46231deda + VAESDEC X11, X9, X11 // c44231dedb + VAESDECLAST (BX), X9, X2 // c4e231df13 + VAESDECLAST (R11), X9, X2 // c4c231df13 + VAESDECLAST X2, X9, X2 // c4e231dfd2 + VAESDECLAST X11, X9, X2 // c4c231dfd3 + VAESDECLAST (BX), X9, X11 // c46231df1b + VAESDECLAST (R11), X9, X11 // c44231df1b + VAESDECLAST X2, X9, X11 // c46231dfda + VAESDECLAST X11, X9, X11 // c44231dfdb + VAESENC (BX), X9, X2 // c4e231dc13 + VAESENC (R11), X9, X2 // c4c231dc13 + VAESENC X2, X9, X2 // c4e231dcd2 + VAESENC X11, X9, X2 // c4c231dcd3 + VAESENC (BX), X9, X11 // c46231dc1b + VAESENC (R11), X9, X11 // c44231dc1b + VAESENC X2, X9, X11 // c46231dcda + VAESENC X11, X9, X11 // c44231dcdb + VAESENCLAST (BX), X9, X2 // c4e231dd13 + VAESENCLAST (R11), X9, X2 // c4c231dd13 + VAESENCLAST X2, X9, X2 // c4e231ddd2 + VAESENCLAST X11, X9, X2 // c4c231ddd3 + VAESENCLAST (BX), X9, X11 // c46231dd1b + VAESENCLAST (R11), X9, X11 // c44231dd1b + VAESENCLAST X2, X9, X11 // c46231ddda + VAESENCLAST X11, X9, X11 // c44231dddb + VAESIMC (BX), X2 // c4e279db13 + VAESIMC (R11), X2 // c4c279db13 + VAESIMC X2, X2 // c4e279dbd2 + VAESIMC X11, X2 // c4c279dbd3 + VAESIMC (BX), X11 // c46279db1b + VAESIMC (R11), X11 // c44279db1b + VAESIMC X2, X11 // c46279dbda + VAESIMC X11, X11 // c44279dbdb + VAESKEYGENASSIST $7, (BX), X2 // c4e379df1307 + VAESKEYGENASSIST $7, (R11), X2 // c4c379df1307 + VAESKEYGENASSIST $7, X2, X2 // c4e379dfd207 + VAESKEYGENASSIST $7, X11, X2 // c4c379dfd307 + VAESKEYGENASSIST $7, (BX), X11 // c46379df1b07 + VAESKEYGENASSIST $7, (R11), X11 // c44379df1b07 + VAESKEYGENASSIST $7, X2, X11 // c46379dfda07 + VAESKEYGENASSIST $7, X11, X11 // c44379dfdb07 + VANDNPD (BX), X9, X2 // c4e1315513 or c5b15513 + VANDNPD (R11), X9, X2 // c4c1315513 + VANDNPD X2, X9, X2 // c4e13155d2 or c5b155d2 + VANDNPD X11, X9, X2 // c4c13155d3 + VANDNPD (BX), X9, X11 // c46131551b or c531551b + VANDNPD (R11), X9, X11 // c44131551b + VANDNPD X2, X9, X11 // c4613155da or c53155da + VANDNPD X11, X9, X11 // c4413155db + VANDNPD (BX), Y15, Y2 // c4e1055513 or c5855513 + VANDNPD (R11), Y15, Y2 // c4c1055513 + VANDNPD Y2, Y15, Y2 // c4e10555d2 or c58555d2 + VANDNPD Y11, Y15, Y2 // c4c10555d3 + VANDNPD (BX), Y15, Y11 // c46105551b or c505551b + VANDNPD (R11), Y15, Y11 // c44105551b + VANDNPD Y2, Y15, Y11 // c4610555da or c50555da + VANDNPD Y11, Y15, Y11 // c4410555db + VANDNPS (BX), X9, X2 // c4e1305513 or c5b05513 + VANDNPS (R11), X9, X2 // c4c1305513 + VANDNPS X2, X9, X2 // c4e13055d2 or c5b055d2 + VANDNPS X11, X9, X2 // c4c13055d3 + VANDNPS (BX), X9, X11 // c46130551b or c530551b + VANDNPS (R11), X9, X11 // c44130551b + VANDNPS X2, X9, X11 // c4613055da or c53055da + VANDNPS X11, X9, X11 // c4413055db + VANDNPS (BX), Y15, Y2 // c4e1045513 or c5845513 + VANDNPS (R11), Y15, Y2 // c4c1045513 + VANDNPS Y2, Y15, Y2 // c4e10455d2 or c58455d2 + VANDNPS Y11, Y15, Y2 // c4c10455d3 + VANDNPS (BX), Y15, Y11 // c46104551b or c504551b + VANDNPS (R11), Y15, Y11 // c44104551b + VANDNPS Y2, Y15, Y11 // c4610455da or c50455da + VANDNPS Y11, Y15, Y11 // c4410455db + VANDPD (BX), X9, X2 // c4e1315413 or c5b15413 + VANDPD (R11), X9, X2 // c4c1315413 + VANDPD X2, X9, X2 // c4e13154d2 or c5b154d2 + VANDPD X11, X9, X2 // c4c13154d3 + VANDPD (BX), X9, X11 // c46131541b or c531541b + VANDPD (R11), X9, X11 // c44131541b + VANDPD X2, X9, X11 // c4613154da or c53154da + VANDPD X11, X9, X11 // c4413154db + VANDPD (BX), Y15, Y2 // c4e1055413 or c5855413 + VANDPD (R11), Y15, Y2 // c4c1055413 + VANDPD Y2, Y15, Y2 // c4e10554d2 or c58554d2 + VANDPD Y11, Y15, Y2 // c4c10554d3 + VANDPD (BX), Y15, Y11 // c46105541b or c505541b + VANDPD (R11), Y15, Y11 // c44105541b + VANDPD Y2, Y15, Y11 // c4610554da or c50554da + VANDPD Y11, Y15, Y11 // c4410554db + VANDPS (BX), X9, X2 // c4e1305413 or c5b05413 + VANDPS (R11), X9, X2 // c4c1305413 + VANDPS X2, X9, X2 // c4e13054d2 or c5b054d2 + VANDPS X11, X9, X2 // c4c13054d3 + VANDPS (BX), X9, X11 // c46130541b or c530541b + VANDPS (R11), X9, X11 // c44130541b + VANDPS X2, X9, X11 // c4613054da or c53054da + VANDPS X11, X9, X11 // c4413054db + VANDPS (BX), Y15, Y2 // c4e1045413 or c5845413 + VANDPS (R11), Y15, Y2 // c4c1045413 + VANDPS Y2, Y15, Y2 // c4e10454d2 or c58454d2 + VANDPS Y11, Y15, Y2 // c4c10454d3 + VANDPS (BX), Y15, Y11 // c46104541b or c504541b + VANDPS (R11), Y15, Y11 // c44104541b + VANDPS Y2, Y15, Y11 // c4610454da or c50454da + VANDPS Y11, Y15, Y11 // c4410454db + VBLENDPD $7, (BX), X9, X2 // c4e3310d1307 + VBLENDPD $7, (R11), X9, X2 // c4c3310d1307 + VBLENDPD $7, X2, X9, X2 // c4e3310dd207 + VBLENDPD $7, X11, X9, X2 // c4c3310dd307 + VBLENDPD $7, (BX), X9, X11 // c463310d1b07 + VBLENDPD $7, (R11), X9, X11 // c443310d1b07 + VBLENDPD $7, X2, X9, X11 // c463310dda07 + VBLENDPD $7, X11, X9, X11 // c443310ddb07 + VBLENDPD $7, (BX), Y15, Y2 // c4e3050d1307 + VBLENDPD $7, (R11), Y15, Y2 // c4c3050d1307 + VBLENDPD $7, Y2, Y15, Y2 // c4e3050dd207 + VBLENDPD $7, Y11, Y15, Y2 // c4c3050dd307 + VBLENDPD $7, (BX), Y15, Y11 // c463050d1b07 + VBLENDPD $7, (R11), Y15, Y11 // c443050d1b07 + VBLENDPD $7, Y2, Y15, Y11 // c463050dda07 + VBLENDPD $7, Y11, Y15, Y11 // c443050ddb07 + VBLENDPS $7, (BX), X9, X2 // c4e3310c1307 + VBLENDPS $7, (R11), X9, X2 // c4c3310c1307 + VBLENDPS $7, X2, X9, X2 // c4e3310cd207 + VBLENDPS $7, X11, X9, X2 // c4c3310cd307 + VBLENDPS $7, (BX), X9, X11 // c463310c1b07 + VBLENDPS $7, (R11), X9, X11 // c443310c1b07 + VBLENDPS $7, X2, X9, X11 // c463310cda07 + VBLENDPS $7, X11, X9, X11 // c443310cdb07 + VBLENDPS $7, (BX), Y15, Y2 // c4e3050c1307 + VBLENDPS $7, (R11), Y15, Y2 // c4c3050c1307 + VBLENDPS $7, Y2, Y15, Y2 // c4e3050cd207 + VBLENDPS $7, Y11, Y15, Y2 // c4c3050cd307 + VBLENDPS $7, (BX), Y15, Y11 // c463050c1b07 + VBLENDPS $7, (R11), Y15, Y11 // c443050c1b07 + VBLENDPS $7, Y2, Y15, Y11 // c463050cda07 + VBLENDPS $7, Y11, Y15, Y11 // c443050cdb07 + VBLENDVPD X12, (BX), X9, X2 // c4e3314b13c0 + VBLENDVPD X12, (R11), X9, X2 // c4c3314b13c0 + VBLENDVPD X12, X2, X9, X2 // c4e3314bd2c0 + VBLENDVPD X12, X11, X9, X2 // c4c3314bd3c0 + VBLENDVPD X12, (BX), X9, X11 // c463314b1bc0 + VBLENDVPD X12, (R11), X9, X11 // c443314b1bc0 + VBLENDVPD X12, X2, X9, X11 // c463314bdac0 + VBLENDVPD X12, X11, X9, X11 // c443314bdbc0 + VBLENDVPD Y13, (BX), Y15, Y2 // c4e3054b13d0 + VBLENDVPD Y13, (R11), Y15, Y2 // c4c3054b13d0 + VBLENDVPD Y13, Y2, Y15, Y2 // c4e3054bd2d0 + VBLENDVPD Y13, Y11, Y15, Y2 // c4c3054bd3d0 + VBLENDVPD Y13, (BX), Y15, Y11 // c463054b1bd0 + VBLENDVPD Y13, (R11), Y15, Y11 // c443054b1bd0 + VBLENDVPD Y13, Y2, Y15, Y11 // c463054bdad0 + VBLENDVPD Y13, Y11, Y15, Y11 // c443054bdbd0 + VBLENDVPS X12, (BX), X9, X2 // c4e3314a13c0 + VBLENDVPS X12, (R11), X9, X2 // c4c3314a13c0 + VBLENDVPS X12, X2, X9, X2 // c4e3314ad2c0 + VBLENDVPS X12, X11, X9, X2 // c4c3314ad3c0 + VBLENDVPS X12, (BX), X9, X11 // c463314a1bc0 + VBLENDVPS X12, (R11), X9, X11 // c443314a1bc0 + VBLENDVPS X12, X2, X9, X11 // c463314adac0 + VBLENDVPS X12, X11, X9, X11 // c443314adbc0 + VBLENDVPS Y13, (BX), Y15, Y2 // c4e3054a13d0 + VBLENDVPS Y13, (R11), Y15, Y2 // c4c3054a13d0 + VBLENDVPS Y13, Y2, Y15, Y2 // c4e3054ad2d0 + VBLENDVPS Y13, Y11, Y15, Y2 // c4c3054ad3d0 + VBLENDVPS Y13, (BX), Y15, Y11 // c463054a1bd0 + VBLENDVPS Y13, (R11), Y15, Y11 // c443054a1bd0 + VBLENDVPS Y13, Y2, Y15, Y11 // c463054adad0 + VBLENDVPS Y13, Y11, Y15, Y11 // c443054adbd0 + VBROADCASTF128 (BX), Y2 // c4e27d1a13 + VBROADCASTF128 (R11), Y2 // c4c27d1a13 + VBROADCASTF128 (BX), Y11 // c4627d1a1b + VBROADCASTF128 (R11), Y11 // c4427d1a1b + VBROADCASTI128 (BX), Y2 // c4e27d5a13 + VBROADCASTI128 (R11), Y2 // c4c27d5a13 + VBROADCASTI128 (BX), Y11 // c4627d5a1b + VBROADCASTI128 (R11), Y11 // c4427d5a1b + VBROADCASTSD (BX), Y2 // c4e27d1913 + VBROADCASTSD (R11), Y2 // c4c27d1913 + VBROADCASTSD (BX), Y11 // c4627d191b + VBROADCASTSD (R11), Y11 // c4427d191b + VBROADCASTSD X2, Y2 // c4e27d19d2 + VBROADCASTSD X11, Y2 // c4c27d19d3 + VBROADCASTSD X2, Y11 // c4627d19da + VBROADCASTSD X11, Y11 // c4427d19db + VBROADCASTSS (BX), X2 // c4e2791813 + VBROADCASTSS (R11), X2 // c4c2791813 + VBROADCASTSS (BX), X11 // c46279181b + VBROADCASTSS (R11), X11 // c44279181b + VBROADCASTSS X2, X2 // c4e27918d2 + VBROADCASTSS X11, X2 // c4c27918d3 + VBROADCASTSS X2, X11 // c4627918da + VBROADCASTSS X11, X11 // c4427918db + VBROADCASTSS (BX), Y2 // c4e27d1813 + VBROADCASTSS (R11), Y2 // c4c27d1813 + VBROADCASTSS (BX), Y11 // c4627d181b + VBROADCASTSS (R11), Y11 // c4427d181b + VBROADCASTSS X2, Y2 // c4e27d18d2 + VBROADCASTSS X11, Y2 // c4c27d18d3 + VBROADCASTSS X2, Y11 // c4627d18da + VBROADCASTSS X11, Y11 // c4427d18db + VCMPPD $7, (BX), X9, X2 // c4e131c21307 or c5b1c21307 + VCMPPD $7, (R11), X9, X2 // c4c131c21307 + VCMPPD $7, X2, X9, X2 // c4e131c2d207 or c5b1c2d207 + VCMPPD $7, X11, X9, X2 // c4c131c2d307 + VCMPPD $7, (BX), X9, X11 // c46131c21b07 or c531c21b07 + VCMPPD $7, (R11), X9, X11 // c44131c21b07 + VCMPPD $7, X2, X9, X11 // c46131c2da07 or c531c2da07 + VCMPPD $7, X11, X9, X11 // c44131c2db07 + VCMPPD $7, (BX), Y15, Y2 // c4e105c21307 or c585c21307 + VCMPPD $7, (R11), Y15, Y2 // c4c105c21307 + VCMPPD $7, Y2, Y15, Y2 // c4e105c2d207 or c585c2d207 + VCMPPD $7, Y11, Y15, Y2 // c4c105c2d307 + VCMPPD $7, (BX), Y15, Y11 // c46105c21b07 or c505c21b07 + VCMPPD $7, (R11), Y15, Y11 // c44105c21b07 + VCMPPD $7, Y2, Y15, Y11 // c46105c2da07 or c505c2da07 + VCMPPD $7, Y11, Y15, Y11 // c44105c2db07 + VCMPPS $7, (BX), X9, X2 // c4e130c21307 or c5b0c21307 + VCMPPS $7, (R11), X9, X2 // c4c130c21307 + VCMPPS $7, X2, X9, X2 // c4e130c2d207 or c5b0c2d207 + VCMPPS $7, X11, X9, X2 // c4c130c2d307 + VCMPPS $7, (BX), X9, X11 // c46130c21b07 or c530c21b07 + VCMPPS $7, (R11), X9, X11 // c44130c21b07 + VCMPPS $7, X2, X9, X11 // c46130c2da07 or c530c2da07 + VCMPPS $7, X11, X9, X11 // c44130c2db07 + VCMPPS $7, (BX), Y15, Y2 // c4e104c21307 or c584c21307 + VCMPPS $7, (R11), Y15, Y2 // c4c104c21307 + VCMPPS $7, Y2, Y15, Y2 // c4e104c2d207 or c584c2d207 + VCMPPS $7, Y11, Y15, Y2 // c4c104c2d307 + VCMPPS $7, (BX), Y15, Y11 // c46104c21b07 or c504c21b07 + VCMPPS $7, (R11), Y15, Y11 // c44104c21b07 + VCMPPS $7, Y2, Y15, Y11 // c46104c2da07 or c504c2da07 + VCMPPS $7, Y11, Y15, Y11 // c44104c2db07 + VCMPSD $7, (BX), X9, X2 // c4e133c21307 or c5b3c21307 + VCMPSD $7, (R11), X9, X2 // c4c133c21307 + VCMPSD $7, X2, X9, X2 // c4e133c2d207 or c5b3c2d207 + VCMPSD $7, X11, X9, X2 // c4c133c2d307 + VCMPSD $7, (BX), X9, X11 // c46133c21b07 or c533c21b07 + VCMPSD $7, (R11), X9, X11 // c44133c21b07 + VCMPSD $7, X2, X9, X11 // c46133c2da07 or c533c2da07 + VCMPSD $7, X11, X9, X11 // c44133c2db07 + VCMPSS $7, (BX), X9, X2 // c4e132c21307 or c5b2c21307 + VCMPSS $7, (R11), X9, X2 // c4c132c21307 + VCMPSS $7, X2, X9, X2 // c4e132c2d207 or c5b2c2d207 + VCMPSS $7, X11, X9, X2 // c4c132c2d307 + VCMPSS $7, (BX), X9, X11 // c46132c21b07 or c532c21b07 + VCMPSS $7, (R11), X9, X11 // c44132c21b07 + VCMPSS $7, X2, X9, X11 // c46132c2da07 or c532c2da07 + VCMPSS $7, X11, X9, X11 // c44132c2db07 + VCOMISD (BX), X2 // c4e1792f13 or c5f92f13 + VCOMISD (R11), X2 // c4c1792f13 + VCOMISD X2, X2 // c4e1792fd2 or c5f92fd2 + VCOMISD X11, X2 // c4c1792fd3 + VCOMISD (BX), X11 // c461792f1b or c5792f1b + VCOMISD (R11), X11 // c441792f1b + VCOMISD X2, X11 // c461792fda or c5792fda + VCOMISD X11, X11 // c441792fdb + VCOMISS (BX), X2 // c4e1782f13 or c5f82f13 + VCOMISS (R11), X2 // c4c1782f13 + VCOMISS X2, X2 // c4e1782fd2 or c5f82fd2 + VCOMISS X11, X2 // c4c1782fd3 + VCOMISS (BX), X11 // c461782f1b or c5782f1b + VCOMISS (R11), X11 // c441782f1b + VCOMISS X2, X11 // c461782fda or c5782fda + VCOMISS X11, X11 // c441782fdb + VCVTDQ2PD (BX), X2 // c4e17ae613 or c5fae613 + VCVTDQ2PD (R11), X2 // c4c17ae613 + VCVTDQ2PD X2, X2 // c4e17ae6d2 or c5fae6d2 + VCVTDQ2PD X11, X2 // c4c17ae6d3 + VCVTDQ2PD (BX), X11 // c4617ae61b or c57ae61b + VCVTDQ2PD (R11), X11 // c4417ae61b + VCVTDQ2PD X2, X11 // c4617ae6da or c57ae6da + VCVTDQ2PD X11, X11 // c4417ae6db + VCVTDQ2PD (BX), Y2 // c4e17ee613 or c5fee613 + VCVTDQ2PD (R11), Y2 // c4c17ee613 + VCVTDQ2PD X2, Y2 // c4e17ee6d2 or c5fee6d2 + VCVTDQ2PD X11, Y2 // c4c17ee6d3 + VCVTDQ2PD (BX), Y11 // c4617ee61b or c57ee61b + VCVTDQ2PD (R11), Y11 // c4417ee61b + VCVTDQ2PD X2, Y11 // c4617ee6da or c57ee6da + VCVTDQ2PD X11, Y11 // c4417ee6db + VCVTDQ2PS (BX), X2 // c4e1785b13 or c5f85b13 + VCVTDQ2PS (R11), X2 // c4c1785b13 + VCVTDQ2PS X2, X2 // c4e1785bd2 or c5f85bd2 + VCVTDQ2PS X11, X2 // c4c1785bd3 + VCVTDQ2PS (BX), X11 // c461785b1b or c5785b1b + VCVTDQ2PS (R11), X11 // c441785b1b + VCVTDQ2PS X2, X11 // c461785bda or c5785bda + VCVTDQ2PS X11, X11 // c441785bdb + VCVTDQ2PS (BX), Y2 // c4e17c5b13 or c5fc5b13 + VCVTDQ2PS (R11), Y2 // c4c17c5b13 + VCVTDQ2PS Y2, Y2 // c4e17c5bd2 or c5fc5bd2 + VCVTDQ2PS Y11, Y2 // c4c17c5bd3 + VCVTDQ2PS (BX), Y11 // c4617c5b1b or c57c5b1b + VCVTDQ2PS (R11), Y11 // c4417c5b1b + VCVTDQ2PS Y2, Y11 // c4617c5bda or c57c5bda + VCVTDQ2PS Y11, Y11 // c4417c5bdb + VCVTPD2DQX (BX), X2 // c4e17be613 or c5fbe613 + VCVTPD2DQX (R11), X2 // c4c17be613 + VCVTPD2DQX X2, X2 // c4e17be6d2 or c5fbe6d2 + VCVTPD2DQX X11, X2 // c4c17be6d3 + VCVTPD2DQX (BX), X11 // c4617be61b or c57be61b + VCVTPD2DQX (R11), X11 // c4417be61b + VCVTPD2DQX X2, X11 // c4617be6da or c57be6da + VCVTPD2DQX X11, X11 // c4417be6db + VCVTPD2DQY (BX), X2 // c4e17fe613 or c5ffe613 + VCVTPD2DQY (R11), X2 // c4c17fe613 + VCVTPD2DQY Y2, X2 // c4e17fe6d2 or c5ffe6d2 + VCVTPD2DQY Y11, X2 // c4c17fe6d3 + VCVTPD2DQY (BX), X11 // c4617fe61b or c57fe61b + VCVTPD2DQY (R11), X11 // c4417fe61b + VCVTPD2DQY Y2, X11 // c4617fe6da or c57fe6da + VCVTPD2DQY Y11, X11 // c4417fe6db + VCVTPD2PSX (BX), X2 // c4e1795a13 or c5f95a13 + VCVTPD2PSX (R11), X2 // c4c1795a13 + VCVTPD2PSX X2, X2 // c4e1795ad2 or c5f95ad2 + VCVTPD2PSX X11, X2 // c4c1795ad3 + VCVTPD2PSX (BX), X11 // c461795a1b or c5795a1b + VCVTPD2PSX (R11), X11 // c441795a1b + VCVTPD2PSX X2, X11 // c461795ada or c5795ada + VCVTPD2PSX X11, X11 // c441795adb + VCVTPD2PSY (BX), X2 // c4e17d5a13 or c5fd5a13 + VCVTPD2PSY (R11), X2 // c4c17d5a13 + VCVTPD2PSY Y2, X2 // c4e17d5ad2 or c5fd5ad2 + VCVTPD2PSY Y11, X2 // c4c17d5ad3 + VCVTPD2PSY (BX), X11 // c4617d5a1b or c57d5a1b + VCVTPD2PSY (R11), X11 // c4417d5a1b + VCVTPD2PSY Y2, X11 // c4617d5ada or c57d5ada + VCVTPD2PSY Y11, X11 // c4417d5adb + VCVTPH2PS (BX), X2 // c4e2791313 + VCVTPH2PS (R11), X2 // c4c2791313 + VCVTPH2PS X2, X2 // c4e27913d2 + VCVTPH2PS X11, X2 // c4c27913d3 + VCVTPH2PS (BX), X11 // c46279131b + VCVTPH2PS (R11), X11 // c44279131b + VCVTPH2PS X2, X11 // c4627913da + VCVTPH2PS X11, X11 // c4427913db + VCVTPH2PS (BX), Y2 // c4e27d1313 + VCVTPH2PS (R11), Y2 // c4c27d1313 + VCVTPH2PS X2, Y2 // c4e27d13d2 + VCVTPH2PS X11, Y2 // c4c27d13d3 + VCVTPH2PS (BX), Y11 // c4627d131b + VCVTPH2PS (R11), Y11 // c4427d131b + VCVTPH2PS X2, Y11 // c4627d13da + VCVTPH2PS X11, Y11 // c4427d13db + VCVTPS2DQ (BX), X2 // c4e1795b13 or c5f95b13 + VCVTPS2DQ (R11), X2 // c4c1795b13 + VCVTPS2DQ X2, X2 // c4e1795bd2 or c5f95bd2 + VCVTPS2DQ X11, X2 // c4c1795bd3 + VCVTPS2DQ (BX), X11 // c461795b1b or c5795b1b + VCVTPS2DQ (R11), X11 // c441795b1b + VCVTPS2DQ X2, X11 // c461795bda or c5795bda + VCVTPS2DQ X11, X11 // c441795bdb + VCVTPS2DQ (BX), Y2 // c4e17d5b13 or c5fd5b13 + VCVTPS2DQ (R11), Y2 // c4c17d5b13 + VCVTPS2DQ Y2, Y2 // c4e17d5bd2 or c5fd5bd2 + VCVTPS2DQ Y11, Y2 // c4c17d5bd3 + VCVTPS2DQ (BX), Y11 // c4617d5b1b or c57d5b1b + VCVTPS2DQ (R11), Y11 // c4417d5b1b + VCVTPS2DQ Y2, Y11 // c4617d5bda or c57d5bda + VCVTPS2DQ Y11, Y11 // c4417d5bdb + VCVTPS2PD (BX), X2 // c4e1785a13 or c5f85a13 + VCVTPS2PD (R11), X2 // c4c1785a13 + VCVTPS2PD X2, X2 // c4e1785ad2 or c5f85ad2 + VCVTPS2PD X11, X2 // c4c1785ad3 + VCVTPS2PD (BX), X11 // c461785a1b or c5785a1b + VCVTPS2PD (R11), X11 // c441785a1b + VCVTPS2PD X2, X11 // c461785ada or c5785ada + VCVTPS2PD X11, X11 // c441785adb + VCVTPS2PD (BX), Y2 // c4e17c5a13 or c5fc5a13 + VCVTPS2PD (R11), Y2 // c4c17c5a13 + VCVTPS2PD X2, Y2 // c4e17c5ad2 or c5fc5ad2 + VCVTPS2PD X11, Y2 // c4c17c5ad3 + VCVTPS2PD (BX), Y11 // c4617c5a1b or c57c5a1b + VCVTPS2PD (R11), Y11 // c4417c5a1b + VCVTPS2PD X2, Y11 // c4617c5ada or c57c5ada + VCVTPS2PD X11, Y11 // c4417c5adb + VCVTPS2PH $7, Y2, (BX) // c4e37d1d1307 + VCVTPS2PH $7, Y11, (BX) // c4637d1d1b07 + VCVTPS2PH $7, Y2, (R11) // c4c37d1d1307 + VCVTPS2PH $7, Y11, (R11) // c4437d1d1b07 + VCVTPS2PH $7, Y2, X2 // c4e37d1dd207 + VCVTPS2PH $7, Y11, X2 // c4637d1dda07 + VCVTPS2PH $7, Y2, X11 // c4c37d1dd307 + VCVTPS2PH $7, Y11, X11 // c4437d1ddb07 + VCVTPS2PH $7, X2, (BX) // c4e3791d1307 + VCVTPS2PH $7, X11, (BX) // c463791d1b07 + VCVTPS2PH $7, X2, (R11) // c4c3791d1307 + VCVTPS2PH $7, X11, (R11) // c443791d1b07 + VCVTPS2PH $7, X2, X2 // c4e3791dd207 + VCVTPS2PH $7, X11, X2 // c463791dda07 + VCVTPS2PH $7, X2, X11 // c4c3791dd307 + VCVTPS2PH $7, X11, X11 // c443791ddb07 + VCVTSD2SI (BX), DX // c4e17b2d13 or c5fb2d13 + VCVTSD2SI (R11), DX // c4c17b2d13 + VCVTSD2SI X2, DX // c4e17b2dd2 or c5fb2dd2 + VCVTSD2SI X11, DX // c4c17b2dd3 + VCVTSD2SI (BX), R11 // c4617b2d1b or c57b2d1b + VCVTSD2SI (R11), R11 // c4417b2d1b + VCVTSD2SI X2, R11 // c4617b2dda or c57b2dda + VCVTSD2SI X11, R11 // c4417b2ddb + VCVTSD2SIQ (BX), DX // c4e1fb2d13 + VCVTSD2SIQ (R11), DX // c4c1fb2d13 + VCVTSD2SIQ X2, DX // c4e1fb2dd2 + VCVTSD2SIQ X11, DX // c4c1fb2dd3 + VCVTSD2SIQ (BX), R11 // c461fb2d1b + VCVTSD2SIQ (R11), R11 // c441fb2d1b + VCVTSD2SIQ X2, R11 // c461fb2dda + VCVTSD2SIQ X11, R11 // c441fb2ddb + VCVTSD2SS (BX), X9, X2 // c4e1335a13 or c5b35a13 + VCVTSD2SS (R11), X9, X2 // c4c1335a13 + VCVTSD2SS X2, X9, X2 // c4e1335ad2 or c5b35ad2 + VCVTSD2SS X11, X9, X2 // c4c1335ad3 + VCVTSD2SS (BX), X9, X11 // c461335a1b or c5335a1b + VCVTSD2SS (R11), X9, X11 // c441335a1b + VCVTSD2SS X2, X9, X11 // c461335ada or c5335ada + VCVTSD2SS X11, X9, X11 // c441335adb + VCVTSI2SDL (BX), X9, X2 // c4e1332a13 or c5b32a13 + VCVTSI2SDL (R11), X9, X2 // c4c1332a13 + VCVTSI2SDL DX, X9, X2 // c4e1332ad2 or c5b32ad2 + VCVTSI2SDL R11, X9, X2 // c4c1332ad3 + VCVTSI2SDL (BX), X9, X11 // c461332a1b or c5332a1b + VCVTSI2SDL (R11), X9, X11 // c441332a1b + VCVTSI2SDL DX, X9, X11 // c461332ada or c5332ada + VCVTSI2SDL R11, X9, X11 // c441332adb + VCVTSI2SDQ (BX), X9, X2 // c4e1b32a13 + VCVTSI2SDQ (R11), X9, X2 // c4c1b32a13 + VCVTSI2SDQ DX, X9, X2 // c4e1b32ad2 + VCVTSI2SDQ R11, X9, X2 // c4c1b32ad3 + VCVTSI2SDQ (BX), X9, X11 // c461b32a1b + VCVTSI2SDQ (R11), X9, X11 // c441b32a1b + VCVTSI2SDQ DX, X9, X11 // c461b32ada + VCVTSI2SDQ R11, X9, X11 // c441b32adb + VCVTSI2SSL (BX), X9, X2 // c4e1322a13 or c5b22a13 + VCVTSI2SSL (R11), X9, X2 // c4c1322a13 + VCVTSI2SSL DX, X9, X2 // c4e1322ad2 or c5b22ad2 + VCVTSI2SSL R11, X9, X2 // c4c1322ad3 + VCVTSI2SSL (BX), X9, X11 // c461322a1b or c5322a1b + VCVTSI2SSL (R11), X9, X11 // c441322a1b + VCVTSI2SSL DX, X9, X11 // c461322ada or c5322ada + VCVTSI2SSL R11, X9, X11 // c441322adb + VCVTSI2SSQ (BX), X9, X2 // c4e1b22a13 + VCVTSI2SSQ (R11), X9, X2 // c4c1b22a13 + VCVTSI2SSQ DX, X9, X2 // c4e1b22ad2 + VCVTSI2SSQ R11, X9, X2 // c4c1b22ad3 + VCVTSI2SSQ (BX), X9, X11 // c461b22a1b + VCVTSI2SSQ (R11), X9, X11 // c441b22a1b + VCVTSI2SSQ DX, X9, X11 // c461b22ada + VCVTSI2SSQ R11, X9, X11 // c441b22adb + VCVTSS2SD (BX), X9, X2 // c4e1325a13 or c5b25a13 + VCVTSS2SD (R11), X9, X2 // c4c1325a13 + VCVTSS2SD X2, X9, X2 // c4e1325ad2 or c5b25ad2 + VCVTSS2SD X11, X9, X2 // c4c1325ad3 + VCVTSS2SD (BX), X9, X11 // c461325a1b or c5325a1b + VCVTSS2SD (R11), X9, X11 // c441325a1b + VCVTSS2SD X2, X9, X11 // c461325ada or c5325ada + VCVTSS2SD X11, X9, X11 // c441325adb + VCVTSS2SI (BX), DX // c4e17a2d13 or c5fa2d13 + VCVTSS2SI (R11), DX // c4c17a2d13 + VCVTSS2SI X2, DX // c4e17a2dd2 or c5fa2dd2 + VCVTSS2SI X11, DX // c4c17a2dd3 + VCVTSS2SI (BX), R11 // c4617a2d1b or c57a2d1b + VCVTSS2SI (R11), R11 // c4417a2d1b + VCVTSS2SI X2, R11 // c4617a2dda or c57a2dda + VCVTSS2SI X11, R11 // c4417a2ddb + VCVTSS2SIQ (BX), DX // c4e1fa2d13 + VCVTSS2SIQ (R11), DX // c4c1fa2d13 + VCVTSS2SIQ X2, DX // c4e1fa2dd2 + VCVTSS2SIQ X11, DX // c4c1fa2dd3 + VCVTSS2SIQ (BX), R11 // c461fa2d1b + VCVTSS2SIQ (R11), R11 // c441fa2d1b + VCVTSS2SIQ X2, R11 // c461fa2dda + VCVTSS2SIQ X11, R11 // c441fa2ddb + VCVTTPD2DQX (BX), X2 // c4e179e613 or c5f9e613 + VCVTTPD2DQX (R11), X2 // c4c179e613 + VCVTTPD2DQX X2, X2 // c4e179e6d2 or c5f9e6d2 + VCVTTPD2DQX X11, X2 // c4c179e6d3 + VCVTTPD2DQX (BX), X11 // c46179e61b or c579e61b + VCVTTPD2DQX (R11), X11 // c44179e61b + VCVTTPD2DQX X2, X11 // c46179e6da or c579e6da + VCVTTPD2DQX X11, X11 // c44179e6db + VCVTTPD2DQY (BX), X2 // c4e17de613 or c5fde613 + VCVTTPD2DQY (R11), X2 // c4c17de613 + VCVTTPD2DQY Y2, X2 // c4e17de6d2 or c5fde6d2 + VCVTTPD2DQY Y11, X2 // c4c17de6d3 + VCVTTPD2DQY (BX), X11 // c4617de61b or c57de61b + VCVTTPD2DQY (R11), X11 // c4417de61b + VCVTTPD2DQY Y2, X11 // c4617de6da or c57de6da + VCVTTPD2DQY Y11, X11 // c4417de6db + VCVTTPS2DQ (BX), X2 // c4e17a5b13 or c5fa5b13 + VCVTTPS2DQ (R11), X2 // c4c17a5b13 + VCVTTPS2DQ X2, X2 // c4e17a5bd2 or c5fa5bd2 + VCVTTPS2DQ X11, X2 // c4c17a5bd3 + VCVTTPS2DQ (BX), X11 // c4617a5b1b or c57a5b1b + VCVTTPS2DQ (R11), X11 // c4417a5b1b + VCVTTPS2DQ X2, X11 // c4617a5bda or c57a5bda + VCVTTPS2DQ X11, X11 // c4417a5bdb + VCVTTPS2DQ (BX), Y2 // c4e17e5b13 or c5fe5b13 + VCVTTPS2DQ (R11), Y2 // c4c17e5b13 + VCVTTPS2DQ Y2, Y2 // c4e17e5bd2 or c5fe5bd2 + VCVTTPS2DQ Y11, Y2 // c4c17e5bd3 + VCVTTPS2DQ (BX), Y11 // c4617e5b1b or c57e5b1b + VCVTTPS2DQ (R11), Y11 // c4417e5b1b + VCVTTPS2DQ Y2, Y11 // c4617e5bda or c57e5bda + VCVTTPS2DQ Y11, Y11 // c4417e5bdb + VCVTTSD2SI (BX), DX // c4e17b2c13 or c5fb2c13 + VCVTTSD2SI (R11), DX // c4c17b2c13 + VCVTTSD2SI X2, DX // c4e17b2cd2 or c5fb2cd2 + VCVTTSD2SI X11, DX // c4c17b2cd3 + VCVTTSD2SI (BX), R11 // c4617b2c1b or c57b2c1b + VCVTTSD2SI (R11), R11 // c4417b2c1b + VCVTTSD2SI X2, R11 // c4617b2cda or c57b2cda + VCVTTSD2SI X11, R11 // c4417b2cdb + VCVTTSD2SIQ (BX), DX // c4e1fb2c13 + VCVTTSD2SIQ (R11), DX // c4c1fb2c13 + VCVTTSD2SIQ X2, DX // c4e1fb2cd2 + VCVTTSD2SIQ X11, DX // c4c1fb2cd3 + VCVTTSD2SIQ (BX), R11 // c461fb2c1b + VCVTTSD2SIQ (R11), R11 // c441fb2c1b + VCVTTSD2SIQ X2, R11 // c461fb2cda + VCVTTSD2SIQ X11, R11 // c441fb2cdb + VCVTTSS2SI (BX), DX // c4e17a2c13 or c5fa2c13 + VCVTTSS2SI (R11), DX // c4c17a2c13 + VCVTTSS2SI X2, DX // c4e17a2cd2 or c5fa2cd2 + VCVTTSS2SI X11, DX // c4c17a2cd3 + VCVTTSS2SI (BX), R11 // c4617a2c1b or c57a2c1b + VCVTTSS2SI (R11), R11 // c4417a2c1b + VCVTTSS2SI X2, R11 // c4617a2cda or c57a2cda + VCVTTSS2SI X11, R11 // c4417a2cdb + VCVTTSS2SIQ (BX), DX // c4e1fa2c13 + VCVTTSS2SIQ (R11), DX // c4c1fa2c13 + VCVTTSS2SIQ X2, DX // c4e1fa2cd2 + VCVTTSS2SIQ X11, DX // c4c1fa2cd3 + VCVTTSS2SIQ (BX), R11 // c461fa2c1b + VCVTTSS2SIQ (R11), R11 // c441fa2c1b + VCVTTSS2SIQ X2, R11 // c461fa2cda + VCVTTSS2SIQ X11, R11 // c441fa2cdb + VDIVPD (BX), X9, X2 // c4e1315e13 or c5b15e13 + VDIVPD (R11), X9, X2 // c4c1315e13 + VDIVPD X2, X9, X2 // c4e1315ed2 or c5b15ed2 + VDIVPD X11, X9, X2 // c4c1315ed3 + VDIVPD (BX), X9, X11 // c461315e1b or c5315e1b + VDIVPD (R11), X9, X11 // c441315e1b + VDIVPD X2, X9, X11 // c461315eda or c5315eda + VDIVPD X11, X9, X11 // c441315edb + VDIVPD (BX), Y15, Y2 // c4e1055e13 or c5855e13 + VDIVPD (R11), Y15, Y2 // c4c1055e13 + VDIVPD Y2, Y15, Y2 // c4e1055ed2 or c5855ed2 + VDIVPD Y11, Y15, Y2 // c4c1055ed3 + VDIVPD (BX), Y15, Y11 // c461055e1b or c5055e1b + VDIVPD (R11), Y15, Y11 // c441055e1b + VDIVPD Y2, Y15, Y11 // c461055eda or c5055eda + VDIVPD Y11, Y15, Y11 // c441055edb + VDIVPS (BX), X9, X2 // c4e1305e13 or c5b05e13 + VDIVPS (R11), X9, X2 // c4c1305e13 + VDIVPS X2, X9, X2 // c4e1305ed2 or c5b05ed2 + VDIVPS X11, X9, X2 // c4c1305ed3 + VDIVPS (BX), X9, X11 // c461305e1b or c5305e1b + VDIVPS (R11), X9, X11 // c441305e1b + VDIVPS X2, X9, X11 // c461305eda or c5305eda + VDIVPS X11, X9, X11 // c441305edb + VDIVPS (BX), Y15, Y2 // c4e1045e13 or c5845e13 + VDIVPS (R11), Y15, Y2 // c4c1045e13 + VDIVPS Y2, Y15, Y2 // c4e1045ed2 or c5845ed2 + VDIVPS Y11, Y15, Y2 // c4c1045ed3 + VDIVPS (BX), Y15, Y11 // c461045e1b or c5045e1b + VDIVPS (R11), Y15, Y11 // c441045e1b + VDIVPS Y2, Y15, Y11 // c461045eda or c5045eda + VDIVPS Y11, Y15, Y11 // c441045edb + VDIVSD (BX), X9, X2 // c4e1335e13 or c5b35e13 + VDIVSD (R11), X9, X2 // c4c1335e13 + VDIVSD X2, X9, X2 // c4e1335ed2 or c5b35ed2 + VDIVSD X11, X9, X2 // c4c1335ed3 + VDIVSD (BX), X9, X11 // c461335e1b or c5335e1b + VDIVSD (R11), X9, X11 // c441335e1b + VDIVSD X2, X9, X11 // c461335eda or c5335eda + VDIVSD X11, X9, X11 // c441335edb + VDIVSS (BX), X9, X2 // c4e1325e13 or c5b25e13 + VDIVSS (R11), X9, X2 // c4c1325e13 + VDIVSS X2, X9, X2 // c4e1325ed2 or c5b25ed2 + VDIVSS X11, X9, X2 // c4c1325ed3 + VDIVSS (BX), X9, X11 // c461325e1b or c5325e1b + VDIVSS (R11), X9, X11 // c441325e1b + VDIVSS X2, X9, X11 // c461325eda or c5325eda + VDIVSS X11, X9, X11 // c441325edb + VDPPD $7, (BX), X9, X2 // c4e331411307 + VDPPD $7, (R11), X9, X2 // c4c331411307 + VDPPD $7, X2, X9, X2 // c4e33141d207 + VDPPD $7, X11, X9, X2 // c4c33141d307 + VDPPD $7, (BX), X9, X11 // c46331411b07 + VDPPD $7, (R11), X9, X11 // c44331411b07 + VDPPD $7, X2, X9, X11 // c4633141da07 + VDPPD $7, X11, X9, X11 // c4433141db07 + VDPPS $7, (BX), X9, X2 // c4e331401307 + VDPPS $7, (R11), X9, X2 // c4c331401307 + VDPPS $7, X2, X9, X2 // c4e33140d207 + VDPPS $7, X11, X9, X2 // c4c33140d307 + VDPPS $7, (BX), X9, X11 // c46331401b07 + VDPPS $7, (R11), X9, X11 // c44331401b07 + VDPPS $7, X2, X9, X11 // c4633140da07 + VDPPS $7, X11, X9, X11 // c4433140db07 + VDPPS $7, (BX), Y15, Y2 // c4e305401307 + VDPPS $7, (R11), Y15, Y2 // c4c305401307 + VDPPS $7, Y2, Y15, Y2 // c4e30540d207 + VDPPS $7, Y11, Y15, Y2 // c4c30540d307 + VDPPS $7, (BX), Y15, Y11 // c46305401b07 + VDPPS $7, (R11), Y15, Y11 // c44305401b07 + VDPPS $7, Y2, Y15, Y11 // c4630540da07 + VDPPS $7, Y11, Y15, Y11 // c4430540db07 + VERR (BX) // 0f0023 + VERR (R11) // 410f0023 + VERR DX // 0f00e2 + VERR R11 // 410f00e3 + VERW (BX) // 0f002b + VERW (R11) // 410f002b + VERW DX // 0f00ea + VERW R11 // 410f00eb + VEXTRACTF128 $7, Y2, (BX) // c4e37d191307 + VEXTRACTF128 $7, Y11, (BX) // c4637d191b07 + VEXTRACTF128 $7, Y2, (R11) // c4c37d191307 + VEXTRACTF128 $7, Y11, (R11) // c4437d191b07 + VEXTRACTF128 $7, Y2, X2 // c4e37d19d207 + VEXTRACTF128 $7, Y11, X2 // c4637d19da07 + VEXTRACTF128 $7, Y2, X11 // c4c37d19d307 + VEXTRACTF128 $7, Y11, X11 // c4437d19db07 + VEXTRACTI128 $7, Y2, (BX) // c4e37d391307 + VEXTRACTI128 $7, Y11, (BX) // c4637d391b07 + VEXTRACTI128 $7, Y2, (R11) // c4c37d391307 + VEXTRACTI128 $7, Y11, (R11) // c4437d391b07 + VEXTRACTI128 $7, Y2, X2 // c4e37d39d207 + VEXTRACTI128 $7, Y11, X2 // c4637d39da07 + VEXTRACTI128 $7, Y2, X11 // c4c37d39d307 + VEXTRACTI128 $7, Y11, X11 // c4437d39db07 + VEXTRACTPS $7, X2, (BX) // c4e379171307 + VEXTRACTPS $7, X11, (BX) // c46379171b07 + VEXTRACTPS $7, X2, (R11) // c4c379171307 + VEXTRACTPS $7, X11, (R11) // c44379171b07 + VEXTRACTPS $7, X2, DX // c4e37917d207 + VEXTRACTPS $7, X11, DX // c4637917da07 + VEXTRACTPS $7, X2, R11 // c4c37917d307 + VEXTRACTPS $7, X11, R11 // c4437917db07 + VFMADD132PD (BX), X9, X2 // c4e2b19813 + VFMADD132PD (R11), X9, X2 // c4c2b19813 + VFMADD132PD X2, X9, X2 // c4e2b198d2 + VFMADD132PD X11, X9, X2 // c4c2b198d3 + VFMADD132PD (BX), X9, X11 // c462b1981b + VFMADD132PD (R11), X9, X11 // c442b1981b + VFMADD132PD X2, X9, X11 // c462b198da + VFMADD132PD X11, X9, X11 // c442b198db + VFMADD132PD (BX), Y15, Y2 // c4e2859813 + VFMADD132PD (R11), Y15, Y2 // c4c2859813 + VFMADD132PD Y2, Y15, Y2 // c4e28598d2 + VFMADD132PD Y11, Y15, Y2 // c4c28598d3 + VFMADD132PD (BX), Y15, Y11 // c46285981b + VFMADD132PD (R11), Y15, Y11 // c44285981b + VFMADD132PD Y2, Y15, Y11 // c4628598da + VFMADD132PD Y11, Y15, Y11 // c4428598db + VFMADD132PS (BX), X9, X2 // c4e2319813 + VFMADD132PS (R11), X9, X2 // c4c2319813 + VFMADD132PS X2, X9, X2 // c4e23198d2 + VFMADD132PS X11, X9, X2 // c4c23198d3 + VFMADD132PS (BX), X9, X11 // c46231981b + VFMADD132PS (R11), X9, X11 // c44231981b + VFMADD132PS X2, X9, X11 // c4623198da + VFMADD132PS X11, X9, X11 // c4423198db + VFMADD132PS (BX), Y15, Y2 // c4e2059813 + VFMADD132PS (R11), Y15, Y2 // c4c2059813 + VFMADD132PS Y2, Y15, Y2 // c4e20598d2 + VFMADD132PS Y11, Y15, Y2 // c4c20598d3 + VFMADD132PS (BX), Y15, Y11 // c46205981b + VFMADD132PS (R11), Y15, Y11 // c44205981b + VFMADD132PS Y2, Y15, Y11 // c4620598da + VFMADD132PS Y11, Y15, Y11 // c4420598db + VFMADD132SD (BX), X9, X2 // c4e2b19913 + VFMADD132SD (R11), X9, X2 // c4c2b19913 + VFMADD132SD X2, X9, X2 // c4e2b199d2 + VFMADD132SD X11, X9, X2 // c4c2b199d3 + VFMADD132SD (BX), X9, X11 // c462b1991b + VFMADD132SD (R11), X9, X11 // c442b1991b + VFMADD132SD X2, X9, X11 // c462b199da + VFMADD132SD X11, X9, X11 // c442b199db + VFMADD132SS (BX), X9, X2 // c4e2319913 + VFMADD132SS (R11), X9, X2 // c4c2319913 + VFMADD132SS X2, X9, X2 // c4e23199d2 + VFMADD132SS X11, X9, X2 // c4c23199d3 + VFMADD132SS (BX), X9, X11 // c46231991b + VFMADD132SS (R11), X9, X11 // c44231991b + VFMADD132SS X2, X9, X11 // c4623199da + VFMADD132SS X11, X9, X11 // c4423199db + VFMADD213PD (BX), X9, X2 // c4e2b1a813 + VFMADD213PD (R11), X9, X2 // c4c2b1a813 + VFMADD213PD X2, X9, X2 // c4e2b1a8d2 + VFMADD213PD X11, X9, X2 // c4c2b1a8d3 + VFMADD213PD (BX), X9, X11 // c462b1a81b + VFMADD213PD (R11), X9, X11 // c442b1a81b + VFMADD213PD X2, X9, X11 // c462b1a8da + VFMADD213PD X11, X9, X11 // c442b1a8db + VFMADD213PD (BX), Y15, Y2 // c4e285a813 + VFMADD213PD (R11), Y15, Y2 // c4c285a813 + VFMADD213PD Y2, Y15, Y2 // c4e285a8d2 + VFMADD213PD Y11, Y15, Y2 // c4c285a8d3 + VFMADD213PD (BX), Y15, Y11 // c46285a81b + VFMADD213PD (R11), Y15, Y11 // c44285a81b + VFMADD213PD Y2, Y15, Y11 // c46285a8da + VFMADD213PD Y11, Y15, Y11 // c44285a8db + VFMADD213PS (BX), X9, X2 // c4e231a813 + VFMADD213PS (R11), X9, X2 // c4c231a813 + VFMADD213PS X2, X9, X2 // c4e231a8d2 + VFMADD213PS X11, X9, X2 // c4c231a8d3 + VFMADD213PS (BX), X9, X11 // c46231a81b + VFMADD213PS (R11), X9, X11 // c44231a81b + VFMADD213PS X2, X9, X11 // c46231a8da + VFMADD213PS X11, X9, X11 // c44231a8db + VFMADD213PS (BX), Y15, Y2 // c4e205a813 + VFMADD213PS (R11), Y15, Y2 // c4c205a813 + VFMADD213PS Y2, Y15, Y2 // c4e205a8d2 + VFMADD213PS Y11, Y15, Y2 // c4c205a8d3 + VFMADD213PS (BX), Y15, Y11 // c46205a81b + VFMADD213PS (R11), Y15, Y11 // c44205a81b + VFMADD213PS Y2, Y15, Y11 // c46205a8da + VFMADD213PS Y11, Y15, Y11 // c44205a8db + VFMADD213SD (BX), X9, X2 // c4e2b1a913 + VFMADD213SD (R11), X9, X2 // c4c2b1a913 + VFMADD213SD X2, X9, X2 // c4e2b1a9d2 + VFMADD213SD X11, X9, X2 // c4c2b1a9d3 + VFMADD213SD (BX), X9, X11 // c462b1a91b + VFMADD213SD (R11), X9, X11 // c442b1a91b + VFMADD213SD X2, X9, X11 // c462b1a9da + VFMADD213SD X11, X9, X11 // c442b1a9db + VFMADD213SS (BX), X9, X2 // c4e231a913 + VFMADD213SS (R11), X9, X2 // c4c231a913 + VFMADD213SS X2, X9, X2 // c4e231a9d2 + VFMADD213SS X11, X9, X2 // c4c231a9d3 + VFMADD213SS (BX), X9, X11 // c46231a91b + VFMADD213SS (R11), X9, X11 // c44231a91b + VFMADD213SS X2, X9, X11 // c46231a9da + VFMADD213SS X11, X9, X11 // c44231a9db + VFMADD231PD (BX), X9, X2 // c4e2b1b813 + VFMADD231PD (R11), X9, X2 // c4c2b1b813 + VFMADD231PD X2, X9, X2 // c4e2b1b8d2 + VFMADD231PD X11, X9, X2 // c4c2b1b8d3 + VFMADD231PD (BX), X9, X11 // c462b1b81b + VFMADD231PD (R11), X9, X11 // c442b1b81b + VFMADD231PD X2, X9, X11 // c462b1b8da + VFMADD231PD X11, X9, X11 // c442b1b8db + VFMADD231PD (BX), Y15, Y2 // c4e285b813 + VFMADD231PD (R11), Y15, Y2 // c4c285b813 + VFMADD231PD Y2, Y15, Y2 // c4e285b8d2 + VFMADD231PD Y11, Y15, Y2 // c4c285b8d3 + VFMADD231PD (BX), Y15, Y11 // c46285b81b + VFMADD231PD (R11), Y15, Y11 // c44285b81b + VFMADD231PD Y2, Y15, Y11 // c46285b8da + VFMADD231PD Y11, Y15, Y11 // c44285b8db + VFMADD231PS (BX), X9, X2 // c4e231b813 + VFMADD231PS (R11), X9, X2 // c4c231b813 + VFMADD231PS X2, X9, X2 // c4e231b8d2 + VFMADD231PS X11, X9, X2 // c4c231b8d3 + VFMADD231PS (BX), X9, X11 // c46231b81b + VFMADD231PS (R11), X9, X11 // c44231b81b + VFMADD231PS X2, X9, X11 // c46231b8da + VFMADD231PS X11, X9, X11 // c44231b8db + VFMADD231PS (BX), Y15, Y2 // c4e205b813 + VFMADD231PS (R11), Y15, Y2 // c4c205b813 + VFMADD231PS Y2, Y15, Y2 // c4e205b8d2 + VFMADD231PS Y11, Y15, Y2 // c4c205b8d3 + VFMADD231PS (BX), Y15, Y11 // c46205b81b + VFMADD231PS (R11), Y15, Y11 // c44205b81b + VFMADD231PS Y2, Y15, Y11 // c46205b8da + VFMADD231PS Y11, Y15, Y11 // c44205b8db + VFMADD231SD (BX), X9, X2 // c4e2b1b913 + VFMADD231SD (R11), X9, X2 // c4c2b1b913 + VFMADD231SD X2, X9, X2 // c4e2b1b9d2 + VFMADD231SD X11, X9, X2 // c4c2b1b9d3 + VFMADD231SD (BX), X9, X11 // c462b1b91b + VFMADD231SD (R11), X9, X11 // c442b1b91b + VFMADD231SD X2, X9, X11 // c462b1b9da + VFMADD231SD X11, X9, X11 // c442b1b9db + VFMADD231SS (BX), X9, X2 // c4e231b913 + VFMADD231SS (R11), X9, X2 // c4c231b913 + VFMADD231SS X2, X9, X2 // c4e231b9d2 + VFMADD231SS X11, X9, X2 // c4c231b9d3 + VFMADD231SS (BX), X9, X11 // c46231b91b + VFMADD231SS (R11), X9, X11 // c44231b91b + VFMADD231SS X2, X9, X11 // c46231b9da + VFMADD231SS X11, X9, X11 // c44231b9db + VFMADDSUB132PD (BX), X9, X2 // c4e2b19613 + VFMADDSUB132PD (R11), X9, X2 // c4c2b19613 + VFMADDSUB132PD X2, X9, X2 // c4e2b196d2 + VFMADDSUB132PD X11, X9, X2 // c4c2b196d3 + VFMADDSUB132PD (BX), X9, X11 // c462b1961b + VFMADDSUB132PD (R11), X9, X11 // c442b1961b + VFMADDSUB132PD X2, X9, X11 // c462b196da + VFMADDSUB132PD X11, X9, X11 // c442b196db + VFMADDSUB132PD (BX), Y15, Y2 // c4e2859613 + VFMADDSUB132PD (R11), Y15, Y2 // c4c2859613 + VFMADDSUB132PD Y2, Y15, Y2 // c4e28596d2 + VFMADDSUB132PD Y11, Y15, Y2 // c4c28596d3 + VFMADDSUB132PD (BX), Y15, Y11 // c46285961b + VFMADDSUB132PD (R11), Y15, Y11 // c44285961b + VFMADDSUB132PD Y2, Y15, Y11 // c4628596da + VFMADDSUB132PD Y11, Y15, Y11 // c4428596db + VFMADDSUB132PS (BX), X9, X2 // c4e2319613 + VFMADDSUB132PS (R11), X9, X2 // c4c2319613 + VFMADDSUB132PS X2, X9, X2 // c4e23196d2 + VFMADDSUB132PS X11, X9, X2 // c4c23196d3 + VFMADDSUB132PS (BX), X9, X11 // c46231961b + VFMADDSUB132PS (R11), X9, X11 // c44231961b + VFMADDSUB132PS X2, X9, X11 // c4623196da + VFMADDSUB132PS X11, X9, X11 // c4423196db + VFMADDSUB132PS (BX), Y15, Y2 // c4e2059613 + VFMADDSUB132PS (R11), Y15, Y2 // c4c2059613 + VFMADDSUB132PS Y2, Y15, Y2 // c4e20596d2 + VFMADDSUB132PS Y11, Y15, Y2 // c4c20596d3 + VFMADDSUB132PS (BX), Y15, Y11 // c46205961b + VFMADDSUB132PS (R11), Y15, Y11 // c44205961b + VFMADDSUB132PS Y2, Y15, Y11 // c4620596da + VFMADDSUB132PS Y11, Y15, Y11 // c4420596db + VFMADDSUB213PD (BX), X9, X2 // c4e2b1a613 + VFMADDSUB213PD (R11), X9, X2 // c4c2b1a613 + VFMADDSUB213PD X2, X9, X2 // c4e2b1a6d2 + VFMADDSUB213PD X11, X9, X2 // c4c2b1a6d3 + VFMADDSUB213PD (BX), X9, X11 // c462b1a61b + VFMADDSUB213PD (R11), X9, X11 // c442b1a61b + VFMADDSUB213PD X2, X9, X11 // c462b1a6da + VFMADDSUB213PD X11, X9, X11 // c442b1a6db + VFMADDSUB213PD (BX), Y15, Y2 // c4e285a613 + VFMADDSUB213PD (R11), Y15, Y2 // c4c285a613 + VFMADDSUB213PD Y2, Y15, Y2 // c4e285a6d2 + VFMADDSUB213PD Y11, Y15, Y2 // c4c285a6d3 + VFMADDSUB213PD (BX), Y15, Y11 // c46285a61b + VFMADDSUB213PD (R11), Y15, Y11 // c44285a61b + VFMADDSUB213PD Y2, Y15, Y11 // c46285a6da + VFMADDSUB213PD Y11, Y15, Y11 // c44285a6db + VFMADDSUB213PS (BX), X9, X2 // c4e231a613 + VFMADDSUB213PS (R11), X9, X2 // c4c231a613 + VFMADDSUB213PS X2, X9, X2 // c4e231a6d2 + VFMADDSUB213PS X11, X9, X2 // c4c231a6d3 + VFMADDSUB213PS (BX), X9, X11 // c46231a61b + VFMADDSUB213PS (R11), X9, X11 // c44231a61b + VFMADDSUB213PS X2, X9, X11 // c46231a6da + VFMADDSUB213PS X11, X9, X11 // c44231a6db + VFMADDSUB213PS (BX), Y15, Y2 // c4e205a613 + VFMADDSUB213PS (R11), Y15, Y2 // c4c205a613 + VFMADDSUB213PS Y2, Y15, Y2 // c4e205a6d2 + VFMADDSUB213PS Y11, Y15, Y2 // c4c205a6d3 + VFMADDSUB213PS (BX), Y15, Y11 // c46205a61b + VFMADDSUB213PS (R11), Y15, Y11 // c44205a61b + VFMADDSUB213PS Y2, Y15, Y11 // c46205a6da + VFMADDSUB213PS Y11, Y15, Y11 // c44205a6db + VFMADDSUB231PD (BX), X9, X2 // c4e2b1b613 + VFMADDSUB231PD (R11), X9, X2 // c4c2b1b613 + VFMADDSUB231PD X2, X9, X2 // c4e2b1b6d2 + VFMADDSUB231PD X11, X9, X2 // c4c2b1b6d3 + VFMADDSUB231PD (BX), X9, X11 // c462b1b61b + VFMADDSUB231PD (R11), X9, X11 // c442b1b61b + VFMADDSUB231PD X2, X9, X11 // c462b1b6da + VFMADDSUB231PD X11, X9, X11 // c442b1b6db + VFMADDSUB231PD (BX), Y15, Y2 // c4e285b613 + VFMADDSUB231PD (R11), Y15, Y2 // c4c285b613 + VFMADDSUB231PD Y2, Y15, Y2 // c4e285b6d2 + VFMADDSUB231PD Y11, Y15, Y2 // c4c285b6d3 + VFMADDSUB231PD (BX), Y15, Y11 // c46285b61b + VFMADDSUB231PD (R11), Y15, Y11 // c44285b61b + VFMADDSUB231PD Y2, Y15, Y11 // c46285b6da + VFMADDSUB231PD Y11, Y15, Y11 // c44285b6db + VFMADDSUB231PS (BX), X9, X2 // c4e231b613 + VFMADDSUB231PS (R11), X9, X2 // c4c231b613 + VFMADDSUB231PS X2, X9, X2 // c4e231b6d2 + VFMADDSUB231PS X11, X9, X2 // c4c231b6d3 + VFMADDSUB231PS (BX), X9, X11 // c46231b61b + VFMADDSUB231PS (R11), X9, X11 // c44231b61b + VFMADDSUB231PS X2, X9, X11 // c46231b6da + VFMADDSUB231PS X11, X9, X11 // c44231b6db + VFMADDSUB231PS (BX), Y15, Y2 // c4e205b613 + VFMADDSUB231PS (R11), Y15, Y2 // c4c205b613 + VFMADDSUB231PS Y2, Y15, Y2 // c4e205b6d2 + VFMADDSUB231PS Y11, Y15, Y2 // c4c205b6d3 + VFMADDSUB231PS (BX), Y15, Y11 // c46205b61b + VFMADDSUB231PS (R11), Y15, Y11 // c44205b61b + VFMADDSUB231PS Y2, Y15, Y11 // c46205b6da + VFMADDSUB231PS Y11, Y15, Y11 // c44205b6db + VFMSUB132PD (BX), X9, X2 // c4e2b19a13 + VFMSUB132PD (R11), X9, X2 // c4c2b19a13 + VFMSUB132PD X2, X9, X2 // c4e2b19ad2 + VFMSUB132PD X11, X9, X2 // c4c2b19ad3 + VFMSUB132PD (BX), X9, X11 // c462b19a1b + VFMSUB132PD (R11), X9, X11 // c442b19a1b + VFMSUB132PD X2, X9, X11 // c462b19ada + VFMSUB132PD X11, X9, X11 // c442b19adb + VFMSUB132PD (BX), Y15, Y2 // c4e2859a13 + VFMSUB132PD (R11), Y15, Y2 // c4c2859a13 + VFMSUB132PD Y2, Y15, Y2 // c4e2859ad2 + VFMSUB132PD Y11, Y15, Y2 // c4c2859ad3 + VFMSUB132PD (BX), Y15, Y11 // c462859a1b + VFMSUB132PD (R11), Y15, Y11 // c442859a1b + VFMSUB132PD Y2, Y15, Y11 // c462859ada + VFMSUB132PD Y11, Y15, Y11 // c442859adb + VFMSUB132PS (BX), X9, X2 // c4e2319a13 + VFMSUB132PS (R11), X9, X2 // c4c2319a13 + VFMSUB132PS X2, X9, X2 // c4e2319ad2 + VFMSUB132PS X11, X9, X2 // c4c2319ad3 + VFMSUB132PS (BX), X9, X11 // c462319a1b + VFMSUB132PS (R11), X9, X11 // c442319a1b + VFMSUB132PS X2, X9, X11 // c462319ada + VFMSUB132PS X11, X9, X11 // c442319adb + VFMSUB132PS (BX), Y15, Y2 // c4e2059a13 + VFMSUB132PS (R11), Y15, Y2 // c4c2059a13 + VFMSUB132PS Y2, Y15, Y2 // c4e2059ad2 + VFMSUB132PS Y11, Y15, Y2 // c4c2059ad3 + VFMSUB132PS (BX), Y15, Y11 // c462059a1b + VFMSUB132PS (R11), Y15, Y11 // c442059a1b + VFMSUB132PS Y2, Y15, Y11 // c462059ada + VFMSUB132PS Y11, Y15, Y11 // c442059adb + VFMSUB132SD (BX), X9, X2 // c4e2b19b13 + VFMSUB132SD (R11), X9, X2 // c4c2b19b13 + VFMSUB132SD X2, X9, X2 // c4e2b19bd2 + VFMSUB132SD X11, X9, X2 // c4c2b19bd3 + VFMSUB132SD (BX), X9, X11 // c462b19b1b + VFMSUB132SD (R11), X9, X11 // c442b19b1b + VFMSUB132SD X2, X9, X11 // c462b19bda + VFMSUB132SD X11, X9, X11 // c442b19bdb + VFMSUB132SS (BX), X9, X2 // c4e2319b13 + VFMSUB132SS (R11), X9, X2 // c4c2319b13 + VFMSUB132SS X2, X9, X2 // c4e2319bd2 + VFMSUB132SS X11, X9, X2 // c4c2319bd3 + VFMSUB132SS (BX), X9, X11 // c462319b1b + VFMSUB132SS (R11), X9, X11 // c442319b1b + VFMSUB132SS X2, X9, X11 // c462319bda + VFMSUB132SS X11, X9, X11 // c442319bdb + VFMSUB213PD (BX), X9, X2 // c4e2b1aa13 + VFMSUB213PD (R11), X9, X2 // c4c2b1aa13 + VFMSUB213PD X2, X9, X2 // c4e2b1aad2 + VFMSUB213PD X11, X9, X2 // c4c2b1aad3 + VFMSUB213PD (BX), X9, X11 // c462b1aa1b + VFMSUB213PD (R11), X9, X11 // c442b1aa1b + VFMSUB213PD X2, X9, X11 // c462b1aada + VFMSUB213PD X11, X9, X11 // c442b1aadb + VFMSUB213PD (BX), Y15, Y2 // c4e285aa13 + VFMSUB213PD (R11), Y15, Y2 // c4c285aa13 + VFMSUB213PD Y2, Y15, Y2 // c4e285aad2 + VFMSUB213PD Y11, Y15, Y2 // c4c285aad3 + VFMSUB213PD (BX), Y15, Y11 // c46285aa1b + VFMSUB213PD (R11), Y15, Y11 // c44285aa1b + VFMSUB213PD Y2, Y15, Y11 // c46285aada + VFMSUB213PD Y11, Y15, Y11 // c44285aadb + VFMSUB213PS (BX), X9, X2 // c4e231aa13 + VFMSUB213PS (R11), X9, X2 // c4c231aa13 + VFMSUB213PS X2, X9, X2 // c4e231aad2 + VFMSUB213PS X11, X9, X2 // c4c231aad3 + VFMSUB213PS (BX), X9, X11 // c46231aa1b + VFMSUB213PS (R11), X9, X11 // c44231aa1b + VFMSUB213PS X2, X9, X11 // c46231aada + VFMSUB213PS X11, X9, X11 // c44231aadb + VFMSUB213PS (BX), Y15, Y2 // c4e205aa13 + VFMSUB213PS (R11), Y15, Y2 // c4c205aa13 + VFMSUB213PS Y2, Y15, Y2 // c4e205aad2 + VFMSUB213PS Y11, Y15, Y2 // c4c205aad3 + VFMSUB213PS (BX), Y15, Y11 // c46205aa1b + VFMSUB213PS (R11), Y15, Y11 // c44205aa1b + VFMSUB213PS Y2, Y15, Y11 // c46205aada + VFMSUB213PS Y11, Y15, Y11 // c44205aadb + VFMSUB213SD (BX), X9, X2 // c4e2b1ab13 + VFMSUB213SD (R11), X9, X2 // c4c2b1ab13 + VFMSUB213SD X2, X9, X2 // c4e2b1abd2 + VFMSUB213SD X11, X9, X2 // c4c2b1abd3 + VFMSUB213SD (BX), X9, X11 // c462b1ab1b + VFMSUB213SD (R11), X9, X11 // c442b1ab1b + VFMSUB213SD X2, X9, X11 // c462b1abda + VFMSUB213SD X11, X9, X11 // c442b1abdb + VFMSUB213SS (BX), X9, X2 // c4e231ab13 + VFMSUB213SS (R11), X9, X2 // c4c231ab13 + VFMSUB213SS X2, X9, X2 // c4e231abd2 + VFMSUB213SS X11, X9, X2 // c4c231abd3 + VFMSUB213SS (BX), X9, X11 // c46231ab1b + VFMSUB213SS (R11), X9, X11 // c44231ab1b + VFMSUB213SS X2, X9, X11 // c46231abda + VFMSUB213SS X11, X9, X11 // c44231abdb + VFMSUB231PD (BX), X9, X2 // c4e2b1ba13 + VFMSUB231PD (R11), X9, X2 // c4c2b1ba13 + VFMSUB231PD X2, X9, X2 // c4e2b1bad2 + VFMSUB231PD X11, X9, X2 // c4c2b1bad3 + VFMSUB231PD (BX), X9, X11 // c462b1ba1b + VFMSUB231PD (R11), X9, X11 // c442b1ba1b + VFMSUB231PD X2, X9, X11 // c462b1bada + VFMSUB231PD X11, X9, X11 // c442b1badb + VFMSUB231PD (BX), Y15, Y2 // c4e285ba13 + VFMSUB231PD (R11), Y15, Y2 // c4c285ba13 + VFMSUB231PD Y2, Y15, Y2 // c4e285bad2 + VFMSUB231PD Y11, Y15, Y2 // c4c285bad3 + VFMSUB231PD (BX), Y15, Y11 // c46285ba1b + VFMSUB231PD (R11), Y15, Y11 // c44285ba1b + VFMSUB231PD Y2, Y15, Y11 // c46285bada + VFMSUB231PD Y11, Y15, Y11 // c44285badb + VFMSUB231PS (BX), X9, X2 // c4e231ba13 + VFMSUB231PS (R11), X9, X2 // c4c231ba13 + VFMSUB231PS X2, X9, X2 // c4e231bad2 + VFMSUB231PS X11, X9, X2 // c4c231bad3 + VFMSUB231PS (BX), X9, X11 // c46231ba1b + VFMSUB231PS (R11), X9, X11 // c44231ba1b + VFMSUB231PS X2, X9, X11 // c46231bada + VFMSUB231PS X11, X9, X11 // c44231badb + VFMSUB231PS (BX), Y15, Y2 // c4e205ba13 + VFMSUB231PS (R11), Y15, Y2 // c4c205ba13 + VFMSUB231PS Y2, Y15, Y2 // c4e205bad2 + VFMSUB231PS Y11, Y15, Y2 // c4c205bad3 + VFMSUB231PS (BX), Y15, Y11 // c46205ba1b + VFMSUB231PS (R11), Y15, Y11 // c44205ba1b + VFMSUB231PS Y2, Y15, Y11 // c46205bada + VFMSUB231PS Y11, Y15, Y11 // c44205badb + VFMSUB231SD (BX), X9, X2 // c4e2b1bb13 + VFMSUB231SD (R11), X9, X2 // c4c2b1bb13 + VFMSUB231SD X2, X9, X2 // c4e2b1bbd2 + VFMSUB231SD X11, X9, X2 // c4c2b1bbd3 + VFMSUB231SD (BX), X9, X11 // c462b1bb1b + VFMSUB231SD (R11), X9, X11 // c442b1bb1b + VFMSUB231SD X2, X9, X11 // c462b1bbda + VFMSUB231SD X11, X9, X11 // c442b1bbdb + VFMSUB231SS (BX), X9, X2 // c4e231bb13 + VFMSUB231SS (R11), X9, X2 // c4c231bb13 + VFMSUB231SS X2, X9, X2 // c4e231bbd2 + VFMSUB231SS X11, X9, X2 // c4c231bbd3 + VFMSUB231SS (BX), X9, X11 // c46231bb1b + VFMSUB231SS (R11), X9, X11 // c44231bb1b + VFMSUB231SS X2, X9, X11 // c46231bbda + VFMSUB231SS X11, X9, X11 // c44231bbdb + VFMSUBADD132PD (BX), X9, X2 // c4e2b19713 + VFMSUBADD132PD (R11), X9, X2 // c4c2b19713 + VFMSUBADD132PD X2, X9, X2 // c4e2b197d2 + VFMSUBADD132PD X11, X9, X2 // c4c2b197d3 + VFMSUBADD132PD (BX), X9, X11 // c462b1971b + VFMSUBADD132PD (R11), X9, X11 // c442b1971b + VFMSUBADD132PD X2, X9, X11 // c462b197da + VFMSUBADD132PD X11, X9, X11 // c442b197db + VFMSUBADD132PD (BX), Y15, Y2 // c4e2859713 + VFMSUBADD132PD (R11), Y15, Y2 // c4c2859713 + VFMSUBADD132PD Y2, Y15, Y2 // c4e28597d2 + VFMSUBADD132PD Y11, Y15, Y2 // c4c28597d3 + VFMSUBADD132PD (BX), Y15, Y11 // c46285971b + VFMSUBADD132PD (R11), Y15, Y11 // c44285971b + VFMSUBADD132PD Y2, Y15, Y11 // c4628597da + VFMSUBADD132PD Y11, Y15, Y11 // c4428597db + VFMSUBADD132PS (BX), X9, X2 // c4e2319713 + VFMSUBADD132PS (R11), X9, X2 // c4c2319713 + VFMSUBADD132PS X2, X9, X2 // c4e23197d2 + VFMSUBADD132PS X11, X9, X2 // c4c23197d3 + VFMSUBADD132PS (BX), X9, X11 // c46231971b + VFMSUBADD132PS (R11), X9, X11 // c44231971b + VFMSUBADD132PS X2, X9, X11 // c4623197da + VFMSUBADD132PS X11, X9, X11 // c4423197db + VFMSUBADD132PS (BX), Y15, Y2 // c4e2059713 + VFMSUBADD132PS (R11), Y15, Y2 // c4c2059713 + VFMSUBADD132PS Y2, Y15, Y2 // c4e20597d2 + VFMSUBADD132PS Y11, Y15, Y2 // c4c20597d3 + VFMSUBADD132PS (BX), Y15, Y11 // c46205971b + VFMSUBADD132PS (R11), Y15, Y11 // c44205971b + VFMSUBADD132PS Y2, Y15, Y11 // c4620597da + VFMSUBADD132PS Y11, Y15, Y11 // c4420597db + VFMSUBADD213PD (BX), X9, X2 // c4e2b1a713 + VFMSUBADD213PD (R11), X9, X2 // c4c2b1a713 + VFMSUBADD213PD X2, X9, X2 // c4e2b1a7d2 + VFMSUBADD213PD X11, X9, X2 // c4c2b1a7d3 + VFMSUBADD213PD (BX), X9, X11 // c462b1a71b + VFMSUBADD213PD (R11), X9, X11 // c442b1a71b + VFMSUBADD213PD X2, X9, X11 // c462b1a7da + VFMSUBADD213PD X11, X9, X11 // c442b1a7db + VFMSUBADD213PD (BX), Y15, Y2 // c4e285a713 + VFMSUBADD213PD (R11), Y15, Y2 // c4c285a713 + VFMSUBADD213PD Y2, Y15, Y2 // c4e285a7d2 + VFMSUBADD213PD Y11, Y15, Y2 // c4c285a7d3 + VFMSUBADD213PD (BX), Y15, Y11 // c46285a71b + VFMSUBADD213PD (R11), Y15, Y11 // c44285a71b + VFMSUBADD213PD Y2, Y15, Y11 // c46285a7da + VFMSUBADD213PD Y11, Y15, Y11 // c44285a7db + VFMSUBADD213PS (BX), X9, X2 // c4e231a713 + VFMSUBADD213PS (R11), X9, X2 // c4c231a713 + VFMSUBADD213PS X2, X9, X2 // c4e231a7d2 + VFMSUBADD213PS X11, X9, X2 // c4c231a7d3 + VFMSUBADD213PS (BX), X9, X11 // c46231a71b + VFMSUBADD213PS (R11), X9, X11 // c44231a71b + VFMSUBADD213PS X2, X9, X11 // c46231a7da + VFMSUBADD213PS X11, X9, X11 // c44231a7db + VFMSUBADD213PS (BX), Y15, Y2 // c4e205a713 + VFMSUBADD213PS (R11), Y15, Y2 // c4c205a713 + VFMSUBADD213PS Y2, Y15, Y2 // c4e205a7d2 + VFMSUBADD213PS Y11, Y15, Y2 // c4c205a7d3 + VFMSUBADD213PS (BX), Y15, Y11 // c46205a71b + VFMSUBADD213PS (R11), Y15, Y11 // c44205a71b + VFMSUBADD213PS Y2, Y15, Y11 // c46205a7da + VFMSUBADD213PS Y11, Y15, Y11 // c44205a7db + VFMSUBADD231PD (BX), X9, X2 // c4e2b1b713 + VFMSUBADD231PD (R11), X9, X2 // c4c2b1b713 + VFMSUBADD231PD X2, X9, X2 // c4e2b1b7d2 + VFMSUBADD231PD X11, X9, X2 // c4c2b1b7d3 + VFMSUBADD231PD (BX), X9, X11 // c462b1b71b + VFMSUBADD231PD (R11), X9, X11 // c442b1b71b + VFMSUBADD231PD X2, X9, X11 // c462b1b7da + VFMSUBADD231PD X11, X9, X11 // c442b1b7db + VFMSUBADD231PD (BX), Y15, Y2 // c4e285b713 + VFMSUBADD231PD (R11), Y15, Y2 // c4c285b713 + VFMSUBADD231PD Y2, Y15, Y2 // c4e285b7d2 + VFMSUBADD231PD Y11, Y15, Y2 // c4c285b7d3 + VFMSUBADD231PD (BX), Y15, Y11 // c46285b71b + VFMSUBADD231PD (R11), Y15, Y11 // c44285b71b + VFMSUBADD231PD Y2, Y15, Y11 // c46285b7da + VFMSUBADD231PD Y11, Y15, Y11 // c44285b7db + VFMSUBADD231PS (BX), X9, X2 // c4e231b713 + VFMSUBADD231PS (R11), X9, X2 // c4c231b713 + VFMSUBADD231PS X2, X9, X2 // c4e231b7d2 + VFMSUBADD231PS X11, X9, X2 // c4c231b7d3 + VFMSUBADD231PS (BX), X9, X11 // c46231b71b + VFMSUBADD231PS (R11), X9, X11 // c44231b71b + VFMSUBADD231PS X2, X9, X11 // c46231b7da + VFMSUBADD231PS X11, X9, X11 // c44231b7db + VFMSUBADD231PS (BX), Y15, Y2 // c4e205b713 + VFMSUBADD231PS (R11), Y15, Y2 // c4c205b713 + VFMSUBADD231PS Y2, Y15, Y2 // c4e205b7d2 + VFMSUBADD231PS Y11, Y15, Y2 // c4c205b7d3 + VFMSUBADD231PS (BX), Y15, Y11 // c46205b71b + VFMSUBADD231PS (R11), Y15, Y11 // c44205b71b + VFMSUBADD231PS Y2, Y15, Y11 // c46205b7da + VFMSUBADD231PS Y11, Y15, Y11 // c44205b7db + VFNMADD132PD (BX), X9, X2 // c4e2b19c13 + VFNMADD132PD (R11), X9, X2 // c4c2b19c13 + VFNMADD132PD X2, X9, X2 // c4e2b19cd2 + VFNMADD132PD X11, X9, X2 // c4c2b19cd3 + VFNMADD132PD (BX), X9, X11 // c462b19c1b + VFNMADD132PD (R11), X9, X11 // c442b19c1b + VFNMADD132PD X2, X9, X11 // c462b19cda + VFNMADD132PD X11, X9, X11 // c442b19cdb + VFNMADD132PD (BX), Y15, Y2 // c4e2859c13 + VFNMADD132PD (R11), Y15, Y2 // c4c2859c13 + VFNMADD132PD Y2, Y15, Y2 // c4e2859cd2 + VFNMADD132PD Y11, Y15, Y2 // c4c2859cd3 + VFNMADD132PD (BX), Y15, Y11 // c462859c1b + VFNMADD132PD (R11), Y15, Y11 // c442859c1b + VFNMADD132PD Y2, Y15, Y11 // c462859cda + VFNMADD132PD Y11, Y15, Y11 // c442859cdb + VFNMADD132PS (BX), X9, X2 // c4e2319c13 + VFNMADD132PS (R11), X9, X2 // c4c2319c13 + VFNMADD132PS X2, X9, X2 // c4e2319cd2 + VFNMADD132PS X11, X9, X2 // c4c2319cd3 + VFNMADD132PS (BX), X9, X11 // c462319c1b + VFNMADD132PS (R11), X9, X11 // c442319c1b + VFNMADD132PS X2, X9, X11 // c462319cda + VFNMADD132PS X11, X9, X11 // c442319cdb + VFNMADD132PS (BX), Y15, Y2 // c4e2059c13 + VFNMADD132PS (R11), Y15, Y2 // c4c2059c13 + VFNMADD132PS Y2, Y15, Y2 // c4e2059cd2 + VFNMADD132PS Y11, Y15, Y2 // c4c2059cd3 + VFNMADD132PS (BX), Y15, Y11 // c462059c1b + VFNMADD132PS (R11), Y15, Y11 // c442059c1b + VFNMADD132PS Y2, Y15, Y11 // c462059cda + VFNMADD132PS Y11, Y15, Y11 // c442059cdb + VFNMADD132SD (BX), X9, X2 // c4e2b19d13 + VFNMADD132SD (R11), X9, X2 // c4c2b19d13 + VFNMADD132SD X2, X9, X2 // c4e2b19dd2 + VFNMADD132SD X11, X9, X2 // c4c2b19dd3 + VFNMADD132SD (BX), X9, X11 // c462b19d1b + VFNMADD132SD (R11), X9, X11 // c442b19d1b + VFNMADD132SD X2, X9, X11 // c462b19dda + VFNMADD132SD X11, X9, X11 // c442b19ddb + VFNMADD132SS (BX), X9, X2 // c4e2319d13 + VFNMADD132SS (R11), X9, X2 // c4c2319d13 + VFNMADD132SS X2, X9, X2 // c4e2319dd2 + VFNMADD132SS X11, X9, X2 // c4c2319dd3 + VFNMADD132SS (BX), X9, X11 // c462319d1b + VFNMADD132SS (R11), X9, X11 // c442319d1b + VFNMADD132SS X2, X9, X11 // c462319dda + VFNMADD132SS X11, X9, X11 // c442319ddb + VFNMADD213PD (BX), X9, X2 // c4e2b1ac13 + VFNMADD213PD (R11), X9, X2 // c4c2b1ac13 + VFNMADD213PD X2, X9, X2 // c4e2b1acd2 + VFNMADD213PD X11, X9, X2 // c4c2b1acd3 + VFNMADD213PD (BX), X9, X11 // c462b1ac1b + VFNMADD213PD (R11), X9, X11 // c442b1ac1b + VFNMADD213PD X2, X9, X11 // c462b1acda + VFNMADD213PD X11, X9, X11 // c442b1acdb + VFNMADD213PD (BX), Y15, Y2 // c4e285ac13 + VFNMADD213PD (R11), Y15, Y2 // c4c285ac13 + VFNMADD213PD Y2, Y15, Y2 // c4e285acd2 + VFNMADD213PD Y11, Y15, Y2 // c4c285acd3 + VFNMADD213PD (BX), Y15, Y11 // c46285ac1b + VFNMADD213PD (R11), Y15, Y11 // c44285ac1b + VFNMADD213PD Y2, Y15, Y11 // c46285acda + VFNMADD213PD Y11, Y15, Y11 // c44285acdb + VFNMADD213PS (BX), X9, X2 // c4e231ac13 + VFNMADD213PS (R11), X9, X2 // c4c231ac13 + VFNMADD213PS X2, X9, X2 // c4e231acd2 + VFNMADD213PS X11, X9, X2 // c4c231acd3 + VFNMADD213PS (BX), X9, X11 // c46231ac1b + VFNMADD213PS (R11), X9, X11 // c44231ac1b + VFNMADD213PS X2, X9, X11 // c46231acda + VFNMADD213PS X11, X9, X11 // c44231acdb + VFNMADD213PS (BX), Y15, Y2 // c4e205ac13 + VFNMADD213PS (R11), Y15, Y2 // c4c205ac13 + VFNMADD213PS Y2, Y15, Y2 // c4e205acd2 + VFNMADD213PS Y11, Y15, Y2 // c4c205acd3 + VFNMADD213PS (BX), Y15, Y11 // c46205ac1b + VFNMADD213PS (R11), Y15, Y11 // c44205ac1b + VFNMADD213PS Y2, Y15, Y11 // c46205acda + VFNMADD213PS Y11, Y15, Y11 // c44205acdb + VFNMADD213SD (BX), X9, X2 // c4e2b1ad13 + VFNMADD213SD (R11), X9, X2 // c4c2b1ad13 + VFNMADD213SD X2, X9, X2 // c4e2b1add2 + VFNMADD213SD X11, X9, X2 // c4c2b1add3 + VFNMADD213SD (BX), X9, X11 // c462b1ad1b + VFNMADD213SD (R11), X9, X11 // c442b1ad1b + VFNMADD213SD X2, X9, X11 // c462b1adda + VFNMADD213SD X11, X9, X11 // c442b1addb + VFNMADD213SS (BX), X9, X2 // c4e231ad13 + VFNMADD213SS (R11), X9, X2 // c4c231ad13 + VFNMADD213SS X2, X9, X2 // c4e231add2 + VFNMADD213SS X11, X9, X2 // c4c231add3 + VFNMADD213SS (BX), X9, X11 // c46231ad1b + VFNMADD213SS (R11), X9, X11 // c44231ad1b + VFNMADD213SS X2, X9, X11 // c46231adda + VFNMADD213SS X11, X9, X11 // c44231addb + VFNMADD231PD (BX), X9, X2 // c4e2b1bc13 + VFNMADD231PD (R11), X9, X2 // c4c2b1bc13 + VFNMADD231PD X2, X9, X2 // c4e2b1bcd2 + VFNMADD231PD X11, X9, X2 // c4c2b1bcd3 + VFNMADD231PD (BX), X9, X11 // c462b1bc1b + VFNMADD231PD (R11), X9, X11 // c442b1bc1b + VFNMADD231PD X2, X9, X11 // c462b1bcda + VFNMADD231PD X11, X9, X11 // c442b1bcdb + VFNMADD231PD (BX), Y15, Y2 // c4e285bc13 + VFNMADD231PD (R11), Y15, Y2 // c4c285bc13 + VFNMADD231PD Y2, Y15, Y2 // c4e285bcd2 + VFNMADD231PD Y11, Y15, Y2 // c4c285bcd3 + VFNMADD231PD (BX), Y15, Y11 // c46285bc1b + VFNMADD231PD (R11), Y15, Y11 // c44285bc1b + VFNMADD231PD Y2, Y15, Y11 // c46285bcda + VFNMADD231PD Y11, Y15, Y11 // c44285bcdb + VFNMADD231PS (BX), X9, X2 // c4e231bc13 + VFNMADD231PS (R11), X9, X2 // c4c231bc13 + VFNMADD231PS X2, X9, X2 // c4e231bcd2 + VFNMADD231PS X11, X9, X2 // c4c231bcd3 + VFNMADD231PS (BX), X9, X11 // c46231bc1b + VFNMADD231PS (R11), X9, X11 // c44231bc1b + VFNMADD231PS X2, X9, X11 // c46231bcda + VFNMADD231PS X11, X9, X11 // c44231bcdb + VFNMADD231PS (BX), Y15, Y2 // c4e205bc13 + VFNMADD231PS (R11), Y15, Y2 // c4c205bc13 + VFNMADD231PS Y2, Y15, Y2 // c4e205bcd2 + VFNMADD231PS Y11, Y15, Y2 // c4c205bcd3 + VFNMADD231PS (BX), Y15, Y11 // c46205bc1b + VFNMADD231PS (R11), Y15, Y11 // c44205bc1b + VFNMADD231PS Y2, Y15, Y11 // c46205bcda + VFNMADD231PS Y11, Y15, Y11 // c44205bcdb + VFNMADD231SD (BX), X9, X2 // c4e2b1bd13 + VFNMADD231SD (R11), X9, X2 // c4c2b1bd13 + VFNMADD231SD X2, X9, X2 // c4e2b1bdd2 + VFNMADD231SD X11, X9, X2 // c4c2b1bdd3 + VFNMADD231SD (BX), X9, X11 // c462b1bd1b + VFNMADD231SD (R11), X9, X11 // c442b1bd1b + VFNMADD231SD X2, X9, X11 // c462b1bdda + VFNMADD231SD X11, X9, X11 // c442b1bddb + VFNMADD231SS (BX), X9, X2 // c4e231bd13 + VFNMADD231SS (R11), X9, X2 // c4c231bd13 + VFNMADD231SS X2, X9, X2 // c4e231bdd2 + VFNMADD231SS X11, X9, X2 // c4c231bdd3 + VFNMADD231SS (BX), X9, X11 // c46231bd1b + VFNMADD231SS (R11), X9, X11 // c44231bd1b + VFNMADD231SS X2, X9, X11 // c46231bdda + VFNMADD231SS X11, X9, X11 // c44231bddb + VFNMSUB132PD (BX), X9, X2 // c4e2b19e13 + VFNMSUB132PD (R11), X9, X2 // c4c2b19e13 + VFNMSUB132PD X2, X9, X2 // c4e2b19ed2 + VFNMSUB132PD X11, X9, X2 // c4c2b19ed3 + VFNMSUB132PD (BX), X9, X11 // c462b19e1b + VFNMSUB132PD (R11), X9, X11 // c442b19e1b + VFNMSUB132PD X2, X9, X11 // c462b19eda + VFNMSUB132PD X11, X9, X11 // c442b19edb + VFNMSUB132PD (BX), Y15, Y2 // c4e2859e13 + VFNMSUB132PD (R11), Y15, Y2 // c4c2859e13 + VFNMSUB132PD Y2, Y15, Y2 // c4e2859ed2 + VFNMSUB132PD Y11, Y15, Y2 // c4c2859ed3 + VFNMSUB132PD (BX), Y15, Y11 // c462859e1b + VFNMSUB132PD (R11), Y15, Y11 // c442859e1b + VFNMSUB132PD Y2, Y15, Y11 // c462859eda + VFNMSUB132PD Y11, Y15, Y11 // c442859edb + VFNMSUB132PS (BX), X9, X2 // c4e2319e13 + VFNMSUB132PS (R11), X9, X2 // c4c2319e13 + VFNMSUB132PS X2, X9, X2 // c4e2319ed2 + VFNMSUB132PS X11, X9, X2 // c4c2319ed3 + VFNMSUB132PS (BX), X9, X11 // c462319e1b + VFNMSUB132PS (R11), X9, X11 // c442319e1b + VFNMSUB132PS X2, X9, X11 // c462319eda + VFNMSUB132PS X11, X9, X11 // c442319edb + VFNMSUB132PS (BX), Y15, Y2 // c4e2059e13 + VFNMSUB132PS (R11), Y15, Y2 // c4c2059e13 + VFNMSUB132PS Y2, Y15, Y2 // c4e2059ed2 + VFNMSUB132PS Y11, Y15, Y2 // c4c2059ed3 + VFNMSUB132PS (BX), Y15, Y11 // c462059e1b + VFNMSUB132PS (R11), Y15, Y11 // c442059e1b + VFNMSUB132PS Y2, Y15, Y11 // c462059eda + VFNMSUB132PS Y11, Y15, Y11 // c442059edb + VFNMSUB132SD (BX), X9, X2 // c4e2b19f13 + VFNMSUB132SD (R11), X9, X2 // c4c2b19f13 + VFNMSUB132SD X2, X9, X2 // c4e2b19fd2 + VFNMSUB132SD X11, X9, X2 // c4c2b19fd3 + VFNMSUB132SD (BX), X9, X11 // c462b19f1b + VFNMSUB132SD (R11), X9, X11 // c442b19f1b + VFNMSUB132SD X2, X9, X11 // c462b19fda + VFNMSUB132SD X11, X9, X11 // c442b19fdb + VFNMSUB132SS (BX), X9, X2 // c4e2319f13 + VFNMSUB132SS (R11), X9, X2 // c4c2319f13 + VFNMSUB132SS X2, X9, X2 // c4e2319fd2 + VFNMSUB132SS X11, X9, X2 // c4c2319fd3 + VFNMSUB132SS (BX), X9, X11 // c462319f1b + VFNMSUB132SS (R11), X9, X11 // c442319f1b + VFNMSUB132SS X2, X9, X11 // c462319fda + VFNMSUB132SS X11, X9, X11 // c442319fdb + VFNMSUB213PD (BX), X9, X2 // c4e2b1ae13 + VFNMSUB213PD (R11), X9, X2 // c4c2b1ae13 + VFNMSUB213PD X2, X9, X2 // c4e2b1aed2 + VFNMSUB213PD X11, X9, X2 // c4c2b1aed3 + VFNMSUB213PD (BX), X9, X11 // c462b1ae1b + VFNMSUB213PD (R11), X9, X11 // c442b1ae1b + VFNMSUB213PD X2, X9, X11 // c462b1aeda + VFNMSUB213PD X11, X9, X11 // c442b1aedb + VFNMSUB213PD (BX), Y15, Y2 // c4e285ae13 + VFNMSUB213PD (R11), Y15, Y2 // c4c285ae13 + VFNMSUB213PD Y2, Y15, Y2 // c4e285aed2 + VFNMSUB213PD Y11, Y15, Y2 // c4c285aed3 + VFNMSUB213PD (BX), Y15, Y11 // c46285ae1b + VFNMSUB213PD (R11), Y15, Y11 // c44285ae1b + VFNMSUB213PD Y2, Y15, Y11 // c46285aeda + VFNMSUB213PD Y11, Y15, Y11 // c44285aedb + VFNMSUB213PS (BX), X9, X2 // c4e231ae13 + VFNMSUB213PS (R11), X9, X2 // c4c231ae13 + VFNMSUB213PS X2, X9, X2 // c4e231aed2 + VFNMSUB213PS X11, X9, X2 // c4c231aed3 + VFNMSUB213PS (BX), X9, X11 // c46231ae1b + VFNMSUB213PS (R11), X9, X11 // c44231ae1b + VFNMSUB213PS X2, X9, X11 // c46231aeda + VFNMSUB213PS X11, X9, X11 // c44231aedb + VFNMSUB213PS (BX), Y15, Y2 // c4e205ae13 + VFNMSUB213PS (R11), Y15, Y2 // c4c205ae13 + VFNMSUB213PS Y2, Y15, Y2 // c4e205aed2 + VFNMSUB213PS Y11, Y15, Y2 // c4c205aed3 + VFNMSUB213PS (BX), Y15, Y11 // c46205ae1b + VFNMSUB213PS (R11), Y15, Y11 // c44205ae1b + VFNMSUB213PS Y2, Y15, Y11 // c46205aeda + VFNMSUB213PS Y11, Y15, Y11 // c44205aedb + VFNMSUB213SD (BX), X9, X2 // c4e2b1af13 + VFNMSUB213SD (R11), X9, X2 // c4c2b1af13 + VFNMSUB213SD X2, X9, X2 // c4e2b1afd2 + VFNMSUB213SD X11, X9, X2 // c4c2b1afd3 + VFNMSUB213SD (BX), X9, X11 // c462b1af1b + VFNMSUB213SD (R11), X9, X11 // c442b1af1b + VFNMSUB213SD X2, X9, X11 // c462b1afda + VFNMSUB213SD X11, X9, X11 // c442b1afdb + VFNMSUB213SS (BX), X9, X2 // c4e231af13 + VFNMSUB213SS (R11), X9, X2 // c4c231af13 + VFNMSUB213SS X2, X9, X2 // c4e231afd2 + VFNMSUB213SS X11, X9, X2 // c4c231afd3 + VFNMSUB213SS (BX), X9, X11 // c46231af1b + VFNMSUB213SS (R11), X9, X11 // c44231af1b + VFNMSUB213SS X2, X9, X11 // c46231afda + VFNMSUB213SS X11, X9, X11 // c44231afdb + VFNMSUB231PD (BX), X9, X2 // c4e2b1be13 + VFNMSUB231PD (R11), X9, X2 // c4c2b1be13 + VFNMSUB231PD X2, X9, X2 // c4e2b1bed2 + VFNMSUB231PD X11, X9, X2 // c4c2b1bed3 + VFNMSUB231PD (BX), X9, X11 // c462b1be1b + VFNMSUB231PD (R11), X9, X11 // c442b1be1b + VFNMSUB231PD X2, X9, X11 // c462b1beda + VFNMSUB231PD X11, X9, X11 // c442b1bedb + VFNMSUB231PD (BX), Y15, Y2 // c4e285be13 + VFNMSUB231PD (R11), Y15, Y2 // c4c285be13 + VFNMSUB231PD Y2, Y15, Y2 // c4e285bed2 + VFNMSUB231PD Y11, Y15, Y2 // c4c285bed3 + VFNMSUB231PD (BX), Y15, Y11 // c46285be1b + VFNMSUB231PD (R11), Y15, Y11 // c44285be1b + VFNMSUB231PD Y2, Y15, Y11 // c46285beda + VFNMSUB231PD Y11, Y15, Y11 // c44285bedb + VFNMSUB231PS (BX), X9, X2 // c4e231be13 + VFNMSUB231PS (R11), X9, X2 // c4c231be13 + VFNMSUB231PS X2, X9, X2 // c4e231bed2 + VFNMSUB231PS X11, X9, X2 // c4c231bed3 + VFNMSUB231PS (BX), X9, X11 // c46231be1b + VFNMSUB231PS (R11), X9, X11 // c44231be1b + VFNMSUB231PS X2, X9, X11 // c46231beda + VFNMSUB231PS X11, X9, X11 // c44231bedb + VFNMSUB231PS (BX), Y15, Y2 // c4e205be13 + VFNMSUB231PS (R11), Y15, Y2 // c4c205be13 + VFNMSUB231PS Y2, Y15, Y2 // c4e205bed2 + VFNMSUB231PS Y11, Y15, Y2 // c4c205bed3 + VFNMSUB231PS (BX), Y15, Y11 // c46205be1b + VFNMSUB231PS (R11), Y15, Y11 // c44205be1b + VFNMSUB231PS Y2, Y15, Y11 // c46205beda + VFNMSUB231PS Y11, Y15, Y11 // c44205bedb + VFNMSUB231SD (BX), X9, X2 // c4e2b1bf13 + VFNMSUB231SD (R11), X9, X2 // c4c2b1bf13 + VFNMSUB231SD X2, X9, X2 // c4e2b1bfd2 + VFNMSUB231SD X11, X9, X2 // c4c2b1bfd3 + VFNMSUB231SD (BX), X9, X11 // c462b1bf1b + VFNMSUB231SD (R11), X9, X11 // c442b1bf1b + VFNMSUB231SD X2, X9, X11 // c462b1bfda + VFNMSUB231SD X11, X9, X11 // c442b1bfdb + VFNMSUB231SS (BX), X9, X2 // c4e231bf13 + VFNMSUB231SS (R11), X9, X2 // c4c231bf13 + VFNMSUB231SS X2, X9, X2 // c4e231bfd2 + VFNMSUB231SS X11, X9, X2 // c4c231bfd3 + VFNMSUB231SS (BX), X9, X11 // c46231bf1b + VFNMSUB231SS (R11), X9, X11 // c44231bf1b + VFNMSUB231SS X2, X9, X11 // c46231bfda + VFNMSUB231SS X11, X9, X11 // c44231bfdb + VHADDPD (BX), X9, X2 // c4e1317c13 or c5b17c13 + VHADDPD (R11), X9, X2 // c4c1317c13 + VHADDPD X2, X9, X2 // c4e1317cd2 or c5b17cd2 + VHADDPD X11, X9, X2 // c4c1317cd3 + VHADDPD (BX), X9, X11 // c461317c1b or c5317c1b + VHADDPD (R11), X9, X11 // c441317c1b + VHADDPD X2, X9, X11 // c461317cda or c5317cda + VHADDPD X11, X9, X11 // c441317cdb + VHADDPD (BX), Y15, Y2 // c4e1057c13 or c5857c13 + VHADDPD (R11), Y15, Y2 // c4c1057c13 + VHADDPD Y2, Y15, Y2 // c4e1057cd2 or c5857cd2 + VHADDPD Y11, Y15, Y2 // c4c1057cd3 + VHADDPD (BX), Y15, Y11 // c461057c1b or c5057c1b + VHADDPD (R11), Y15, Y11 // c441057c1b + VHADDPD Y2, Y15, Y11 // c461057cda or c5057cda + VHADDPD Y11, Y15, Y11 // c441057cdb + VHADDPS (BX), X9, X2 // c4e1337c13 or c5b37c13 + VHADDPS (R11), X9, X2 // c4c1337c13 + VHADDPS X2, X9, X2 // c4e1337cd2 or c5b37cd2 + VHADDPS X11, X9, X2 // c4c1337cd3 + VHADDPS (BX), X9, X11 // c461337c1b or c5337c1b + VHADDPS (R11), X9, X11 // c441337c1b + VHADDPS X2, X9, X11 // c461337cda or c5337cda + VHADDPS X11, X9, X11 // c441337cdb + VHADDPS (BX), Y15, Y2 // c4e1077c13 or c5877c13 + VHADDPS (R11), Y15, Y2 // c4c1077c13 + VHADDPS Y2, Y15, Y2 // c4e1077cd2 or c5877cd2 + VHADDPS Y11, Y15, Y2 // c4c1077cd3 + VHADDPS (BX), Y15, Y11 // c461077c1b or c5077c1b + VHADDPS (R11), Y15, Y11 // c441077c1b + VHADDPS Y2, Y15, Y11 // c461077cda or c5077cda + VHADDPS Y11, Y15, Y11 // c441077cdb + VHSUBPD (BX), X9, X2 // c4e1317d13 or c5b17d13 + VHSUBPD (R11), X9, X2 // c4c1317d13 + VHSUBPD X2, X9, X2 // c4e1317dd2 or c5b17dd2 + VHSUBPD X11, X9, X2 // c4c1317dd3 + VHSUBPD (BX), X9, X11 // c461317d1b or c5317d1b + VHSUBPD (R11), X9, X11 // c441317d1b + VHSUBPD X2, X9, X11 // c461317dda or c5317dda + VHSUBPD X11, X9, X11 // c441317ddb + VHSUBPD (BX), Y15, Y2 // c4e1057d13 or c5857d13 + VHSUBPD (R11), Y15, Y2 // c4c1057d13 + VHSUBPD Y2, Y15, Y2 // c4e1057dd2 or c5857dd2 + VHSUBPD Y11, Y15, Y2 // c4c1057dd3 + VHSUBPD (BX), Y15, Y11 // c461057d1b or c5057d1b + VHSUBPD (R11), Y15, Y11 // c441057d1b + VHSUBPD Y2, Y15, Y11 // c461057dda or c5057dda + VHSUBPD Y11, Y15, Y11 // c441057ddb + VHSUBPS (BX), X9, X2 // c4e1337d13 or c5b37d13 + VHSUBPS (R11), X9, X2 // c4c1337d13 + VHSUBPS X2, X9, X2 // c4e1337dd2 or c5b37dd2 + VHSUBPS X11, X9, X2 // c4c1337dd3 + VHSUBPS (BX), X9, X11 // c461337d1b or c5337d1b + VHSUBPS (R11), X9, X11 // c441337d1b + VHSUBPS X2, X9, X11 // c461337dda or c5337dda + VHSUBPS X11, X9, X11 // c441337ddb + VHSUBPS (BX), Y15, Y2 // c4e1077d13 or c5877d13 + VHSUBPS (R11), Y15, Y2 // c4c1077d13 + VHSUBPS Y2, Y15, Y2 // c4e1077dd2 or c5877dd2 + VHSUBPS Y11, Y15, Y2 // c4c1077dd3 + VHSUBPS (BX), Y15, Y11 // c461077d1b or c5077d1b + VHSUBPS (R11), Y15, Y11 // c441077d1b + VHSUBPS Y2, Y15, Y11 // c461077dda or c5077dda + VHSUBPS Y11, Y15, Y11 // c441077ddb + VINSERTF128 $7, (BX), Y15, Y2 // c4e305181307 + VINSERTF128 $7, (R11), Y15, Y2 // c4c305181307 + VINSERTF128 $7, X2, Y15, Y2 // c4e30518d207 + VINSERTF128 $7, X11, Y15, Y2 // c4c30518d307 + VINSERTF128 $7, (BX), Y15, Y11 // c46305181b07 + VINSERTF128 $7, (R11), Y15, Y11 // c44305181b07 + VINSERTF128 $7, X2, Y15, Y11 // c4630518da07 + VINSERTF128 $7, X11, Y15, Y11 // c4430518db07 + VINSERTI128 $7, (BX), Y15, Y2 // c4e305381307 + VINSERTI128 $7, (R11), Y15, Y2 // c4c305381307 + VINSERTI128 $7, X2, Y15, Y2 // c4e30538d207 + VINSERTI128 $7, X11, Y15, Y2 // c4c30538d307 + VINSERTI128 $7, (BX), Y15, Y11 // c46305381b07 + VINSERTI128 $7, (R11), Y15, Y11 // c44305381b07 + VINSERTI128 $7, X2, Y15, Y11 // c4630538da07 + VINSERTI128 $7, X11, Y15, Y11 // c4430538db07 + VINSERTPS $7, (BX), X9, X2 // c4e331211307 + VINSERTPS $7, (R11), X9, X2 // c4c331211307 + VINSERTPS $7, X2, X9, X2 // c4e33121d207 + VINSERTPS $7, X11, X9, X2 // c4c33121d307 + VINSERTPS $7, (BX), X9, X11 // c46331211b07 + VINSERTPS $7, (R11), X9, X11 // c44331211b07 + VINSERTPS $7, X2, X9, X11 // c4633121da07 + VINSERTPS $7, X11, X9, X11 // c4433121db07 + VLDDQU (BX), X2 // c4e17bf013 or c5fbf013 + VLDDQU (R11), X2 // c4c17bf013 + VLDDQU (BX), X11 // c4617bf01b or c57bf01b + VLDDQU (R11), X11 // c4417bf01b + VLDDQU (BX), Y2 // c4e17ff013 or c5fff013 + VLDDQU (R11), Y2 // c4c17ff013 + VLDDQU (BX), Y11 // c4617ff01b or c57ff01b + VLDDQU (R11), Y11 // c4417ff01b + VLDMXCSR (BX) // c4e178ae13 or c5f8ae13 + VLDMXCSR (R11) // c4c178ae13 + VMASKMOVDQU X2, X2 // c4e179f7d2 or c5f9f7d2 + VMASKMOVDQU X11, X2 // c4c179f7d3 + VMASKMOVDQU X2, X11 // c46179f7da or c579f7da + VMASKMOVDQU X11, X11 // c44179f7db + VMASKMOVPD X2, X9, (BX) // c4e2312f13 + VMASKMOVPD X11, X9, (BX) // c462312f1b + VMASKMOVPD X2, X9, (R11) // c4c2312f13 + VMASKMOVPD X11, X9, (R11) // c442312f1b + VMASKMOVPD Y2, Y15, (BX) // c4e2052f13 + VMASKMOVPD Y11, Y15, (BX) // c462052f1b + VMASKMOVPD Y2, Y15, (R11) // c4c2052f13 + VMASKMOVPD Y11, Y15, (R11) // c442052f1b + VMASKMOVPD (BX), X9, X2 // c4e2312d13 + VMASKMOVPD (R11), X9, X2 // c4c2312d13 + VMASKMOVPD (BX), X9, X11 // c462312d1b + VMASKMOVPD (R11), X9, X11 // c442312d1b + VMASKMOVPD (BX), Y15, Y2 // c4e2052d13 + VMASKMOVPD (R11), Y15, Y2 // c4c2052d13 + VMASKMOVPD (BX), Y15, Y11 // c462052d1b + VMASKMOVPD (R11), Y15, Y11 // c442052d1b + VMASKMOVPS X2, X9, (BX) // c4e2312e13 + VMASKMOVPS X11, X9, (BX) // c462312e1b + VMASKMOVPS X2, X9, (R11) // c4c2312e13 + VMASKMOVPS X11, X9, (R11) // c442312e1b + VMASKMOVPS Y2, Y15, (BX) // c4e2052e13 + VMASKMOVPS Y11, Y15, (BX) // c462052e1b + VMASKMOVPS Y2, Y15, (R11) // c4c2052e13 + VMASKMOVPS Y11, Y15, (R11) // c442052e1b + VMASKMOVPS (BX), X9, X2 // c4e2312c13 + VMASKMOVPS (R11), X9, X2 // c4c2312c13 + VMASKMOVPS (BX), X9, X11 // c462312c1b + VMASKMOVPS (R11), X9, X11 // c442312c1b + VMASKMOVPS (BX), Y15, Y2 // c4e2052c13 + VMASKMOVPS (R11), Y15, Y2 // c4c2052c13 + VMASKMOVPS (BX), Y15, Y11 // c462052c1b + VMASKMOVPS (R11), Y15, Y11 // c442052c1b + VMAXPD (BX), X9, X2 // c4e1315f13 or c5b15f13 + VMAXPD (R11), X9, X2 // c4c1315f13 + VMAXPD X2, X9, X2 // c4e1315fd2 or c5b15fd2 + VMAXPD X11, X9, X2 // c4c1315fd3 + VMAXPD (BX), X9, X11 // c461315f1b or c5315f1b + VMAXPD (R11), X9, X11 // c441315f1b + VMAXPD X2, X9, X11 // c461315fda or c5315fda + VMAXPD X11, X9, X11 // c441315fdb + VMAXPD (BX), Y15, Y2 // c4e1055f13 or c5855f13 + VMAXPD (R11), Y15, Y2 // c4c1055f13 + VMAXPD Y2, Y15, Y2 // c4e1055fd2 or c5855fd2 + VMAXPD Y11, Y15, Y2 // c4c1055fd3 + VMAXPD (BX), Y15, Y11 // c461055f1b or c5055f1b + VMAXPD (R11), Y15, Y11 // c441055f1b + VMAXPD Y2, Y15, Y11 // c461055fda or c5055fda + VMAXPD Y11, Y15, Y11 // c441055fdb + VMAXPS (BX), X9, X2 // c4e1305f13 or c5b05f13 + VMAXPS (R11), X9, X2 // c4c1305f13 + VMAXPS X2, X9, X2 // c4e1305fd2 or c5b05fd2 + VMAXPS X11, X9, X2 // c4c1305fd3 + VMAXPS (BX), X9, X11 // c461305f1b or c5305f1b + VMAXPS (R11), X9, X11 // c441305f1b + VMAXPS X2, X9, X11 // c461305fda or c5305fda + VMAXPS X11, X9, X11 // c441305fdb + VMAXPS (BX), Y15, Y2 // c4e1045f13 or c5845f13 + VMAXPS (R11), Y15, Y2 // c4c1045f13 + VMAXPS Y2, Y15, Y2 // c4e1045fd2 or c5845fd2 + VMAXPS Y11, Y15, Y2 // c4c1045fd3 + VMAXPS (BX), Y15, Y11 // c461045f1b or c5045f1b + VMAXPS (R11), Y15, Y11 // c441045f1b + VMAXPS Y2, Y15, Y11 // c461045fda or c5045fda + VMAXPS Y11, Y15, Y11 // c441045fdb + VMAXSD (BX), X9, X2 // c4e1335f13 or c5b35f13 + VMAXSD (R11), X9, X2 // c4c1335f13 + VMAXSD X2, X9, X2 // c4e1335fd2 or c5b35fd2 + VMAXSD X11, X9, X2 // c4c1335fd3 + VMAXSD (BX), X9, X11 // c461335f1b or c5335f1b + VMAXSD (R11), X9, X11 // c441335f1b + VMAXSD X2, X9, X11 // c461335fda or c5335fda + VMAXSD X11, X9, X11 // c441335fdb + VMAXSS (BX), X9, X2 // c4e1325f13 or c5b25f13 + VMAXSS (R11), X9, X2 // c4c1325f13 + VMAXSS X2, X9, X2 // c4e1325fd2 or c5b25fd2 + VMAXSS X11, X9, X2 // c4c1325fd3 + VMAXSS (BX), X9, X11 // c461325f1b or c5325f1b + VMAXSS (R11), X9, X11 // c441325f1b + VMAXSS X2, X9, X11 // c461325fda or c5325fda + VMAXSS X11, X9, X11 // c441325fdb + VMINPD (BX), X9, X2 // c4e1315d13 or c5b15d13 + VMINPD (R11), X9, X2 // c4c1315d13 + VMINPD X2, X9, X2 // c4e1315dd2 or c5b15dd2 + VMINPD X11, X9, X2 // c4c1315dd3 + VMINPD (BX), X9, X11 // c461315d1b or c5315d1b + VMINPD (R11), X9, X11 // c441315d1b + VMINPD X2, X9, X11 // c461315dda or c5315dda + VMINPD X11, X9, X11 // c441315ddb + VMINPD (BX), Y15, Y2 // c4e1055d13 or c5855d13 + VMINPD (R11), Y15, Y2 // c4c1055d13 + VMINPD Y2, Y15, Y2 // c4e1055dd2 or c5855dd2 + VMINPD Y11, Y15, Y2 // c4c1055dd3 + VMINPD (BX), Y15, Y11 // c461055d1b or c5055d1b + VMINPD (R11), Y15, Y11 // c441055d1b + VMINPD Y2, Y15, Y11 // c461055dda or c5055dda + VMINPD Y11, Y15, Y11 // c441055ddb + VMINPS (BX), X9, X2 // c4e1305d13 or c5b05d13 + VMINPS (R11), X9, X2 // c4c1305d13 + VMINPS X2, X9, X2 // c4e1305dd2 or c5b05dd2 + VMINPS X11, X9, X2 // c4c1305dd3 + VMINPS (BX), X9, X11 // c461305d1b or c5305d1b + VMINPS (R11), X9, X11 // c441305d1b + VMINPS X2, X9, X11 // c461305dda or c5305dda + VMINPS X11, X9, X11 // c441305ddb + VMINPS (BX), Y15, Y2 // c4e1045d13 or c5845d13 + VMINPS (R11), Y15, Y2 // c4c1045d13 + VMINPS Y2, Y15, Y2 // c4e1045dd2 or c5845dd2 + VMINPS Y11, Y15, Y2 // c4c1045dd3 + VMINPS (BX), Y15, Y11 // c461045d1b or c5045d1b + VMINPS (R11), Y15, Y11 // c441045d1b + VMINPS Y2, Y15, Y11 // c461045dda or c5045dda + VMINPS Y11, Y15, Y11 // c441045ddb + VMINSD (BX), X9, X2 // c4e1335d13 or c5b35d13 + VMINSD (R11), X9, X2 // c4c1335d13 + VMINSD X2, X9, X2 // c4e1335dd2 or c5b35dd2 + VMINSD X11, X9, X2 // c4c1335dd3 + VMINSD (BX), X9, X11 // c461335d1b or c5335d1b + VMINSD (R11), X9, X11 // c441335d1b + VMINSD X2, X9, X11 // c461335dda or c5335dda + VMINSD X11, X9, X11 // c441335ddb + VMINSS (BX), X9, X2 // c4e1325d13 or c5b25d13 + VMINSS (R11), X9, X2 // c4c1325d13 + VMINSS X2, X9, X2 // c4e1325dd2 or c5b25dd2 + VMINSS X11, X9, X2 // c4c1325dd3 + VMINSS (BX), X9, X11 // c461325d1b or c5325d1b + VMINSS (R11), X9, X11 // c441325d1b + VMINSS X2, X9, X11 // c461325dda or c5325dda + VMINSS X11, X9, X11 // c441325ddb + VMOVAPD (BX), X2 // c4e1792813 or c5f92813 + VMOVAPD (R11), X2 // c4c1792813 + VMOVAPD X2, X2 // c4e17928d2 or c5f928d2 or c4e17929d2 or c5f929d2 + VMOVAPD X11, X2 // c4c17928d3 or c4617929da or c57929da + VMOVAPD (BX), X11 // c46179281b or c579281b + VMOVAPD (R11), X11 // c44179281b + VMOVAPD X2, X11 // c4617928da or c57928da or c4c17929d3 + VMOVAPD X11, X11 // c4417928db or c4417929db + VMOVAPD X2, (BX) // c4e1792913 or c5f92913 + VMOVAPD X11, (BX) // c46179291b or c579291b + VMOVAPD X2, (R11) // c4c1792913 + VMOVAPD X11, (R11) // c44179291b + VMOVAPD (BX), Y2 // c4e17d2813 or c5fd2813 + VMOVAPD (R11), Y2 // c4c17d2813 + VMOVAPD Y2, Y2 // c4e17d28d2 or c5fd28d2 or c4e17d29d2 or c5fd29d2 + VMOVAPD Y11, Y2 // c4c17d28d3 or c4617d29da or c57d29da + VMOVAPD (BX), Y11 // c4617d281b or c57d281b + VMOVAPD (R11), Y11 // c4417d281b + VMOVAPD Y2, Y11 // c4617d28da or c57d28da or c4c17d29d3 + VMOVAPD Y11, Y11 // c4417d28db or c4417d29db + VMOVAPD Y2, (BX) // c4e17d2913 or c5fd2913 + VMOVAPD Y11, (BX) // c4617d291b or c57d291b + VMOVAPD Y2, (R11) // c4c17d2913 + VMOVAPD Y11, (R11) // c4417d291b + VMOVAPS (BX), X2 // c4e1782813 or c5f82813 + VMOVAPS (R11), X2 // c4c1782813 + VMOVAPS X2, X2 // c4e17828d2 or c5f828d2 or c4e17829d2 or c5f829d2 + VMOVAPS X11, X2 // c4c17828d3 or c4617829da or c57829da + VMOVAPS (BX), X11 // c46178281b or c578281b + VMOVAPS (R11), X11 // c44178281b + VMOVAPS X2, X11 // c4617828da or c57828da or c4c17829d3 + VMOVAPS X11, X11 // c4417828db or c4417829db + VMOVAPS X2, (BX) // c4e1782913 or c5f82913 + VMOVAPS X11, (BX) // c46178291b or c578291b + VMOVAPS X2, (R11) // c4c1782913 + VMOVAPS X11, (R11) // c44178291b + VMOVAPS (BX), Y2 // c4e17c2813 or c5fc2813 + VMOVAPS (R11), Y2 // c4c17c2813 + VMOVAPS Y2, Y2 // c4e17c28d2 or c5fc28d2 or c4e17c29d2 or c5fc29d2 + VMOVAPS Y11, Y2 // c4c17c28d3 or c4617c29da or c57c29da + VMOVAPS (BX), Y11 // c4617c281b or c57c281b + VMOVAPS (R11), Y11 // c4417c281b + VMOVAPS Y2, Y11 // c4617c28da or c57c28da or c4c17c29d3 + VMOVAPS Y11, Y11 // c4417c28db or c4417c29db + VMOVAPS Y2, (BX) // c4e17c2913 or c5fc2913 + VMOVAPS Y11, (BX) // c4617c291b or c57c291b + VMOVAPS Y2, (R11) // c4c17c2913 + VMOVAPS Y11, (R11) // c4417c291b + VMOVD X2, (BX) // c4e1797e13 or c5f97e13 + VMOVD X11, (BX) // c461797e1b or c5797e1b + VMOVD X2, (R11) // c4c1797e13 + VMOVD X11, (R11) // c441797e1b + VMOVD X2, DX // c4e1797ed2 or c5f97ed2 + VMOVD X11, DX // c461797eda or c5797eda + VMOVD X2, R11 // c4c1797ed3 + VMOVD X11, R11 // c441797edb + VMOVD (BX), X2 // c4e1796e13 or c5f96e13 + VMOVD (R11), X2 // c4c1796e13 + VMOVD DX, X2 // c4e1796ed2 or c5f96ed2 + VMOVD R11, X2 // c4c1796ed3 + VMOVD (BX), X11 // c461796e1b or c5796e1b + VMOVD (R11), X11 // c441796e1b + VMOVD DX, X11 // c461796eda or c5796eda + VMOVD R11, X11 // c441796edb + VMOVDDUP (BX), X2 // c4e17b1213 or c5fb1213 + VMOVDDUP (R11), X2 // c4c17b1213 + VMOVDDUP X2, X2 // c4e17b12d2 or c5fb12d2 + VMOVDDUP X11, X2 // c4c17b12d3 + VMOVDDUP (BX), X11 // c4617b121b or c57b121b + VMOVDDUP (R11), X11 // c4417b121b + VMOVDDUP X2, X11 // c4617b12da or c57b12da + VMOVDDUP X11, X11 // c4417b12db + VMOVDDUP (BX), Y2 // c4e17f1213 or c5ff1213 + VMOVDDUP (R11), Y2 // c4c17f1213 + VMOVDDUP Y2, Y2 // c4e17f12d2 or c5ff12d2 + VMOVDDUP Y11, Y2 // c4c17f12d3 + VMOVDDUP (BX), Y11 // c4617f121b or c57f121b + VMOVDDUP (R11), Y11 // c4417f121b + VMOVDDUP Y2, Y11 // c4617f12da or c57f12da + VMOVDDUP Y11, Y11 // c4417f12db + VMOVDQA (BX), X2 // c4e1796f13 or c5f96f13 + VMOVDQA (R11), X2 // c4c1796f13 + VMOVDQA X2, X2 // c4e1796fd2 or c5f96fd2 or c4e1797fd2 or c5f97fd2 + VMOVDQA X11, X2 // c4c1796fd3 or c461797fda or c5797fda + VMOVDQA (BX), X11 // c461796f1b or c5796f1b + VMOVDQA (R11), X11 // c441796f1b + VMOVDQA X2, X11 // c461796fda or c5796fda or c4c1797fd3 + VMOVDQA X11, X11 // c441796fdb or c441797fdb + VMOVDQA X2, (BX) // c4e1797f13 or c5f97f13 + VMOVDQA X11, (BX) // c461797f1b or c5797f1b + VMOVDQA X2, (R11) // c4c1797f13 + VMOVDQA X11, (R11) // c441797f1b + VMOVDQA (BX), Y2 // c4e17d6f13 or c5fd6f13 + VMOVDQA (R11), Y2 // c4c17d6f13 + VMOVDQA Y2, Y2 // c4e17d6fd2 or c5fd6fd2 or c4e17d7fd2 or c5fd7fd2 + VMOVDQA Y11, Y2 // c4c17d6fd3 or c4617d7fda or c57d7fda + VMOVDQA (BX), Y11 // c4617d6f1b or c57d6f1b + VMOVDQA (R11), Y11 // c4417d6f1b + VMOVDQA Y2, Y11 // c4617d6fda or c57d6fda or c4c17d7fd3 + VMOVDQA Y11, Y11 // c4417d6fdb or c4417d7fdb + VMOVDQA Y2, (BX) // c4e17d7f13 or c5fd7f13 + VMOVDQA Y11, (BX) // c4617d7f1b or c57d7f1b + VMOVDQA Y2, (R11) // c4c17d7f13 + VMOVDQA Y11, (R11) // c4417d7f1b + VMOVDQU (BX), X2 // c4e17a6f13 or c5fa6f13 + VMOVDQU (R11), X2 // c4c17a6f13 + VMOVDQU X2, X2 // c4e17a6fd2 or c5fa6fd2 or c4e17a7fd2 or c5fa7fd2 + VMOVDQU X11, X2 // c4c17a6fd3 or c4617a7fda or c57a7fda + VMOVDQU (BX), X11 // c4617a6f1b or c57a6f1b + VMOVDQU (R11), X11 // c4417a6f1b + VMOVDQU X2, X11 // c4617a6fda or c57a6fda or c4c17a7fd3 + VMOVDQU X11, X11 // c4417a6fdb or c4417a7fdb + VMOVDQU X2, (BX) // c4e17a7f13 or c5fa7f13 + VMOVDQU X11, (BX) // c4617a7f1b or c57a7f1b + VMOVDQU X2, (R11) // c4c17a7f13 + VMOVDQU X11, (R11) // c4417a7f1b + VMOVDQU (BX), Y2 // c4e17e6f13 or c5fe6f13 + VMOVDQU (R11), Y2 // c4c17e6f13 + VMOVDQU Y2, Y2 // c4e17e6fd2 or c5fe6fd2 or c4e17e7fd2 or c5fe7fd2 + VMOVDQU Y11, Y2 // c4c17e6fd3 or c4617e7fda or c57e7fda + VMOVDQU (BX), Y11 // c4617e6f1b or c57e6f1b + VMOVDQU (R11), Y11 // c4417e6f1b + VMOVDQU Y2, Y11 // c4617e6fda or c57e6fda or c4c17e7fd3 + VMOVDQU Y11, Y11 // c4417e6fdb or c4417e7fdb + VMOVDQU Y2, (BX) // c4e17e7f13 or c5fe7f13 + VMOVDQU Y11, (BX) // c4617e7f1b or c57e7f1b + VMOVDQU Y2, (R11) // c4c17e7f13 + VMOVDQU Y11, (R11) // c4417e7f1b + VMOVHLPS X2, X9, X2 // c4e13012d2 or c5b012d2 + VMOVHLPS X11, X9, X2 // c4c13012d3 + VMOVHLPS X2, X9, X11 // c4613012da or c53012da + VMOVHLPS X11, X9, X11 // c4413012db + VMOVHPD X2, (BX) // c4e1791713 or c5f91713 + VMOVHPD X11, (BX) // c46179171b or c579171b + VMOVHPD X2, (R11) // c4c1791713 + VMOVHPD X11, (R11) // c44179171b + VMOVHPD (BX), X9, X2 // c4e1311613 or c5b11613 + VMOVHPD (R11), X9, X2 // c4c1311613 + VMOVHPD (BX), X9, X11 // c46131161b or c531161b + VMOVHPD (R11), X9, X11 // c44131161b + VMOVHPS X2, (BX) // c4e1781713 or c5f81713 + VMOVHPS X11, (BX) // c46178171b or c578171b + VMOVHPS X2, (R11) // c4c1781713 + VMOVHPS X11, (R11) // c44178171b + VMOVHPS (BX), X9, X2 // c4e1301613 or c5b01613 + VMOVHPS (R11), X9, X2 // c4c1301613 + VMOVHPS (BX), X9, X11 // c46130161b or c530161b + VMOVHPS (R11), X9, X11 // c44130161b + VMOVLHPS X2, X9, X2 // c4e13016d2 or c5b016d2 + VMOVLHPS X11, X9, X2 // c4c13016d3 + VMOVLHPS X2, X9, X11 // c4613016da or c53016da + VMOVLHPS X11, X9, X11 // c4413016db + VMOVLPD X2, (BX) // c4e1791313 or c5f91313 + VMOVLPD X11, (BX) // c46179131b or c579131b + VMOVLPD X2, (R11) // c4c1791313 + VMOVLPD X11, (R11) // c44179131b + VMOVLPD (BX), X9, X2 // c4e1311213 or c5b11213 + VMOVLPD (R11), X9, X2 // c4c1311213 + VMOVLPD (BX), X9, X11 // c46131121b or c531121b + VMOVLPD (R11), X9, X11 // c44131121b + VMOVLPS X2, (BX) // c4e1781313 or c5f81313 + VMOVLPS X11, (BX) // c46178131b or c578131b + VMOVLPS X2, (R11) // c4c1781313 + VMOVLPS X11, (R11) // c44178131b + VMOVLPS (BX), X9, X2 // c4e1301213 or c5b01213 + VMOVLPS (R11), X9, X2 // c4c1301213 + VMOVLPS (BX), X9, X11 // c46130121b or c530121b + VMOVLPS (R11), X9, X11 // c44130121b + VMOVMSKPD X2, DX // c4e17950d2 or c5f950d2 + VMOVMSKPD X11, DX // c4c17950d3 + VMOVMSKPD X2, R11 // c4617950da or c57950da + VMOVMSKPD X11, R11 // c4417950db + VMOVMSKPD Y2, DX // c4e17d50d2 or c5fd50d2 + VMOVMSKPD Y11, DX // c4c17d50d3 + VMOVMSKPD Y2, R11 // c4617d50da or c57d50da + VMOVMSKPD Y11, R11 // c4417d50db + VMOVMSKPS X2, DX // c4e17850d2 or c5f850d2 + VMOVMSKPS X11, DX // c4c17850d3 + VMOVMSKPS X2, R11 // c4617850da or c57850da + VMOVMSKPS X11, R11 // c4417850db + VMOVMSKPS Y2, DX // c4e17c50d2 or c5fc50d2 + VMOVMSKPS Y11, DX // c4c17c50d3 + VMOVMSKPS Y2, R11 // c4617c50da or c57c50da + VMOVMSKPS Y11, R11 // c4417c50db + VMOVNTDQ X2, (BX) // c4e179e713 or c5f9e713 + VMOVNTDQ X11, (BX) // c46179e71b or c579e71b + VMOVNTDQ X2, (R11) // c4c179e713 + VMOVNTDQ X11, (R11) // c44179e71b + VMOVNTDQ Y2, (BX) // c4e17de713 or c5fde713 + VMOVNTDQ Y11, (BX) // c4617de71b or c57de71b + VMOVNTDQ Y2, (R11) // c4c17de713 + VMOVNTDQ Y11, (R11) // c4417de71b + VMOVNTDQA (BX), X2 // c4e2792a13 + VMOVNTDQA (R11), X2 // c4c2792a13 + VMOVNTDQA (BX), X11 // c462792a1b + VMOVNTDQA (R11), X11 // c442792a1b + VMOVNTDQA (BX), Y2 // c4e27d2a13 + VMOVNTDQA (R11), Y2 // c4c27d2a13 + VMOVNTDQA (BX), Y11 // c4627d2a1b + VMOVNTDQA (R11), Y11 // c4427d2a1b + VMOVNTPD X2, (BX) // c4e1792b13 or c5f92b13 + VMOVNTPD X11, (BX) // c461792b1b or c5792b1b + VMOVNTPD X2, (R11) // c4c1792b13 + VMOVNTPD X11, (R11) // c441792b1b + VMOVNTPD Y2, (BX) // c4e17d2b13 or c5fd2b13 + VMOVNTPD Y11, (BX) // c4617d2b1b or c57d2b1b + VMOVNTPD Y2, (R11) // c4c17d2b13 + VMOVNTPD Y11, (R11) // c4417d2b1b + VMOVNTPS X2, (BX) // c4e1782b13 or c5f82b13 + VMOVNTPS X11, (BX) // c461782b1b or c5782b1b + VMOVNTPS X2, (R11) // c4c1782b13 + VMOVNTPS X11, (R11) // c441782b1b + VMOVNTPS Y2, (BX) // c4e17c2b13 or c5fc2b13 + VMOVNTPS Y11, (BX) // c4617c2b1b or c57c2b1b + VMOVNTPS Y2, (R11) // c4c17c2b13 + VMOVNTPS Y11, (R11) // c4417c2b1b + VMOVQ X2, (BX) // c4e1f97e13 or c4e179d613 or c5f9d613 + VMOVQ X11, (BX) // c461f97e1b or c46179d61b or c579d61b + VMOVQ X2, (R11) // c4c1f97e13 or c4c179d613 + VMOVQ X11, (R11) // c441f97e1b or c44179d61b + VMOVQ X2, DX // c4e1f97ed2 + VMOVQ X11, DX // c461f97eda + VMOVQ X2, R11 // c4c1f97ed3 + VMOVQ X11, R11 // c441f97edb + VMOVQ (BX), X2 // c4e17a7e13 or c5fa7e13 or c4e1f96e13 + VMOVQ (R11), X2 // c4c17a7e13 or c4c1f96e13 + VMOVQ (BX), X11 // c4617a7e1b or c57a7e1b or c461f96e1b + VMOVQ (R11), X11 // c4417a7e1b or c441f96e1b + VMOVQ DX, X2 // c4e1f96ed2 + VMOVQ R11, X2 // c4c1f96ed3 + VMOVQ DX, X11 // c461f96eda + VMOVQ R11, X11 // c441f96edb + VMOVQ X2, X2 // c4e17a7ed2 or c5fa7ed2 or c4e179d6d2 or c5f9d6d2 + VMOVQ X11, X2 // c4c17a7ed3 or c46179d6da or c579d6da + VMOVQ X2, X11 // c4617a7eda or c57a7eda or c4c179d6d3 + VMOVQ X11, X11 // c4417a7edb or c44179d6db + VMOVSD X2, (BX) // c4e17b1113 or c5fb1113 + VMOVSD X11, (BX) // c4617b111b or c57b111b + VMOVSD X2, (R11) // c4c17b1113 + VMOVSD X11, (R11) // c4417b111b + VMOVSD (BX), X2 // c4e17b1013 or c5fb1013 + VMOVSD (R11), X2 // c4c17b1013 + VMOVSD (BX), X11 // c4617b101b or c57b101b + VMOVSD (R11), X11 // c4417b101b + VMOVSD X2, X9, X2 // c4e13310d2 or c5b310d2 or c4e13311d2 or c5b311d2 + VMOVSD X11, X9, X2 // c4c13310d3 or c4613311da or c53311da + VMOVSD X2, X9, X11 // c4613310da or c53310da or c4c13311d3 + VMOVSD X11, X9, X11 // c4413310db or c4413311db + VMOVSHDUP (BX), X2 // c4e17a1613 or c5fa1613 + VMOVSHDUP (R11), X2 // c4c17a1613 + VMOVSHDUP X2, X2 // c4e17a16d2 or c5fa16d2 + VMOVSHDUP X11, X2 // c4c17a16d3 + VMOVSHDUP (BX), X11 // c4617a161b or c57a161b + VMOVSHDUP (R11), X11 // c4417a161b + VMOVSHDUP X2, X11 // c4617a16da or c57a16da + VMOVSHDUP X11, X11 // c4417a16db + VMOVSHDUP (BX), Y2 // c4e17e1613 or c5fe1613 + VMOVSHDUP (R11), Y2 // c4c17e1613 + VMOVSHDUP Y2, Y2 // c4e17e16d2 or c5fe16d2 + VMOVSHDUP Y11, Y2 // c4c17e16d3 + VMOVSHDUP (BX), Y11 // c4617e161b or c57e161b + VMOVSHDUP (R11), Y11 // c4417e161b + VMOVSHDUP Y2, Y11 // c4617e16da or c57e16da + VMOVSHDUP Y11, Y11 // c4417e16db + VMOVSLDUP (BX), X2 // c4e17a1213 or c5fa1213 + VMOVSLDUP (R11), X2 // c4c17a1213 + VMOVSLDUP X2, X2 // c4e17a12d2 or c5fa12d2 + VMOVSLDUP X11, X2 // c4c17a12d3 + VMOVSLDUP (BX), X11 // c4617a121b or c57a121b + VMOVSLDUP (R11), X11 // c4417a121b + VMOVSLDUP X2, X11 // c4617a12da or c57a12da + VMOVSLDUP X11, X11 // c4417a12db + VMOVSLDUP (BX), Y2 // c4e17e1213 or c5fe1213 + VMOVSLDUP (R11), Y2 // c4c17e1213 + VMOVSLDUP Y2, Y2 // c4e17e12d2 or c5fe12d2 + VMOVSLDUP Y11, Y2 // c4c17e12d3 + VMOVSLDUP (BX), Y11 // c4617e121b or c57e121b + VMOVSLDUP (R11), Y11 // c4417e121b + VMOVSLDUP Y2, Y11 // c4617e12da or c57e12da + VMOVSLDUP Y11, Y11 // c4417e12db + VMOVSS X2, (BX) // c4e17a1113 or c5fa1113 + VMOVSS X11, (BX) // c4617a111b or c57a111b + VMOVSS X2, (R11) // c4c17a1113 + VMOVSS X11, (R11) // c4417a111b + VMOVSS (BX), X2 // c4e17a1013 or c5fa1013 + VMOVSS (R11), X2 // c4c17a1013 + VMOVSS (BX), X11 // c4617a101b or c57a101b + VMOVSS (R11), X11 // c4417a101b + VMOVSS X2, X9, X2 // c4e13210d2 or c5b210d2 or c4e13211d2 or c5b211d2 + VMOVSS X11, X9, X2 // c4c13210d3 or c4613211da or c53211da + VMOVSS X2, X9, X11 // c4613210da or c53210da or c4c13211d3 + VMOVSS X11, X9, X11 // c4413210db or c4413211db + VMOVUPD (BX), X2 // c4e1791013 or c5f91013 + VMOVUPD (R11), X2 // c4c1791013 + VMOVUPD X2, X2 // c4e17910d2 or c5f910d2 or c4e17911d2 or c5f911d2 + VMOVUPD X11, X2 // c4c17910d3 or c4617911da or c57911da + VMOVUPD (BX), X11 // c46179101b or c579101b + VMOVUPD (R11), X11 // c44179101b + VMOVUPD X2, X11 // c4617910da or c57910da or c4c17911d3 + VMOVUPD X11, X11 // c4417910db or c4417911db + VMOVUPD X2, (BX) // c4e1791113 or c5f91113 + VMOVUPD X11, (BX) // c46179111b or c579111b + VMOVUPD X2, (R11) // c4c1791113 + VMOVUPD X11, (R11) // c44179111b + VMOVUPD (BX), Y2 // c4e17d1013 or c5fd1013 + VMOVUPD (R11), Y2 // c4c17d1013 + VMOVUPD Y2, Y2 // c4e17d10d2 or c5fd10d2 or c4e17d11d2 or c5fd11d2 + VMOVUPD Y11, Y2 // c4c17d10d3 or c4617d11da or c57d11da + VMOVUPD (BX), Y11 // c4617d101b or c57d101b + VMOVUPD (R11), Y11 // c4417d101b + VMOVUPD Y2, Y11 // c4617d10da or c57d10da or c4c17d11d3 + VMOVUPD Y11, Y11 // c4417d10db or c4417d11db + VMOVUPD Y2, (BX) // c4e17d1113 or c5fd1113 + VMOVUPD Y11, (BX) // c4617d111b or c57d111b + VMOVUPD Y2, (R11) // c4c17d1113 + VMOVUPD Y11, (R11) // c4417d111b + VMOVUPS (BX), X2 // c4e1781013 or c5f81013 + VMOVUPS (R11), X2 // c4c1781013 + VMOVUPS X2, X2 // c4e17810d2 or c5f810d2 or c4e17811d2 or c5f811d2 + VMOVUPS X11, X2 // c4c17810d3 or c4617811da or c57811da + VMOVUPS (BX), X11 // c46178101b or c578101b + VMOVUPS (R11), X11 // c44178101b + VMOVUPS X2, X11 // c4617810da or c57810da or c4c17811d3 + VMOVUPS X11, X11 // c4417810db or c4417811db + VMOVUPS X2, (BX) // c4e1781113 or c5f81113 + VMOVUPS X11, (BX) // c46178111b or c578111b + VMOVUPS X2, (R11) // c4c1781113 + VMOVUPS X11, (R11) // c44178111b + VMOVUPS (BX), Y2 // c4e17c1013 or c5fc1013 + VMOVUPS (R11), Y2 // c4c17c1013 + VMOVUPS Y2, Y2 // c4e17c10d2 or c5fc10d2 or c4e17c11d2 or c5fc11d2 + VMOVUPS Y11, Y2 // c4c17c10d3 or c4617c11da or c57c11da + VMOVUPS (BX), Y11 // c4617c101b or c57c101b + VMOVUPS (R11), Y11 // c4417c101b + VMOVUPS Y2, Y11 // c4617c10da or c57c10da or c4c17c11d3 + VMOVUPS Y11, Y11 // c4417c10db or c4417c11db + VMOVUPS Y2, (BX) // c4e17c1113 or c5fc1113 + VMOVUPS Y11, (BX) // c4617c111b or c57c111b + VMOVUPS Y2, (R11) // c4c17c1113 + VMOVUPS Y11, (R11) // c4417c111b + VMPSADBW $7, (BX), X9, X2 // c4e331421307 + VMPSADBW $7, (R11), X9, X2 // c4c331421307 + VMPSADBW $7, X2, X9, X2 // c4e33142d207 + VMPSADBW $7, X11, X9, X2 // c4c33142d307 + VMPSADBW $7, (BX), X9, X11 // c46331421b07 + VMPSADBW $7, (R11), X9, X11 // c44331421b07 + VMPSADBW $7, X2, X9, X11 // c4633142da07 + VMPSADBW $7, X11, X9, X11 // c4433142db07 + VMPSADBW $7, (BX), Y15, Y2 // c4e305421307 + VMPSADBW $7, (R11), Y15, Y2 // c4c305421307 + VMPSADBW $7, Y2, Y15, Y2 // c4e30542d207 + VMPSADBW $7, Y11, Y15, Y2 // c4c30542d307 + VMPSADBW $7, (BX), Y15, Y11 // c46305421b07 + VMPSADBW $7, (R11), Y15, Y11 // c44305421b07 + VMPSADBW $7, Y2, Y15, Y11 // c4630542da07 + VMPSADBW $7, Y11, Y15, Y11 // c4430542db07 + VMULPD (BX), X9, X2 // c4e1315913 or c5b15913 + VMULPD (R11), X9, X2 // c4c1315913 + VMULPD X2, X9, X2 // c4e13159d2 or c5b159d2 + VMULPD X11, X9, X2 // c4c13159d3 + VMULPD (BX), X9, X11 // c46131591b or c531591b + VMULPD (R11), X9, X11 // c44131591b + VMULPD X2, X9, X11 // c4613159da or c53159da + VMULPD X11, X9, X11 // c4413159db + VMULPD (BX), Y15, Y2 // c4e1055913 or c5855913 + VMULPD (R11), Y15, Y2 // c4c1055913 + VMULPD Y2, Y15, Y2 // c4e10559d2 or c58559d2 + VMULPD Y11, Y15, Y2 // c4c10559d3 + VMULPD (BX), Y15, Y11 // c46105591b or c505591b + VMULPD (R11), Y15, Y11 // c44105591b + VMULPD Y2, Y15, Y11 // c4610559da or c50559da + VMULPD Y11, Y15, Y11 // c4410559db + VMULPS (BX), X9, X2 // c4e1305913 or c5b05913 + VMULPS (R11), X9, X2 // c4c1305913 + VMULPS X2, X9, X2 // c4e13059d2 or c5b059d2 + VMULPS X11, X9, X2 // c4c13059d3 + VMULPS (BX), X9, X11 // c46130591b or c530591b + VMULPS (R11), X9, X11 // c44130591b + VMULPS X2, X9, X11 // c4613059da or c53059da + VMULPS X11, X9, X11 // c4413059db + VMULPS (BX), Y15, Y2 // c4e1045913 or c5845913 + VMULPS (R11), Y15, Y2 // c4c1045913 + VMULPS Y2, Y15, Y2 // c4e10459d2 or c58459d2 + VMULPS Y11, Y15, Y2 // c4c10459d3 + VMULPS (BX), Y15, Y11 // c46104591b or c504591b + VMULPS (R11), Y15, Y11 // c44104591b + VMULPS Y2, Y15, Y11 // c4610459da or c50459da + VMULPS Y11, Y15, Y11 // c4410459db + VMULSD (BX), X9, X2 // c4e1335913 or c5b35913 + VMULSD (R11), X9, X2 // c4c1335913 + VMULSD X2, X9, X2 // c4e13359d2 or c5b359d2 + VMULSD X11, X9, X2 // c4c13359d3 + VMULSD (BX), X9, X11 // c46133591b or c533591b + VMULSD (R11), X9, X11 // c44133591b + VMULSD X2, X9, X11 // c4613359da or c53359da + VMULSD X11, X9, X11 // c4413359db + VMULSS (BX), X9, X2 // c4e1325913 or c5b25913 + VMULSS (R11), X9, X2 // c4c1325913 + VMULSS X2, X9, X2 // c4e13259d2 or c5b259d2 + VMULSS X11, X9, X2 // c4c13259d3 + VMULSS (BX), X9, X11 // c46132591b or c532591b + VMULSS (R11), X9, X11 // c44132591b + VMULSS X2, X9, X11 // c4613259da or c53259da + VMULSS X11, X9, X11 // c4413259db + VORPD (BX), X9, X2 // c4e1315613 or c5b15613 + VORPD (R11), X9, X2 // c4c1315613 + VORPD X2, X9, X2 // c4e13156d2 or c5b156d2 + VORPD X11, X9, X2 // c4c13156d3 + VORPD (BX), X9, X11 // c46131561b or c531561b + VORPD (R11), X9, X11 // c44131561b + VORPD X2, X9, X11 // c4613156da or c53156da + VORPD X11, X9, X11 // c4413156db + VORPD (BX), Y15, Y2 // c4e1055613 or c5855613 + VORPD (R11), Y15, Y2 // c4c1055613 + VORPD Y2, Y15, Y2 // c4e10556d2 or c58556d2 + VORPD Y11, Y15, Y2 // c4c10556d3 + VORPD (BX), Y15, Y11 // c46105561b or c505561b + VORPD (R11), Y15, Y11 // c44105561b + VORPD Y2, Y15, Y11 // c4610556da or c50556da + VORPD Y11, Y15, Y11 // c4410556db + VORPS (BX), X9, X2 // c4e1305613 or c5b05613 + VORPS (R11), X9, X2 // c4c1305613 + VORPS X2, X9, X2 // c4e13056d2 or c5b056d2 + VORPS X11, X9, X2 // c4c13056d3 + VORPS (BX), X9, X11 // c46130561b or c530561b + VORPS (R11), X9, X11 // c44130561b + VORPS X2, X9, X11 // c4613056da or c53056da + VORPS X11, X9, X11 // c4413056db + VORPS (BX), Y15, Y2 // c4e1045613 or c5845613 + VORPS (R11), Y15, Y2 // c4c1045613 + VORPS Y2, Y15, Y2 // c4e10456d2 or c58456d2 + VORPS Y11, Y15, Y2 // c4c10456d3 + VORPS (BX), Y15, Y11 // c46104561b or c504561b + VORPS (R11), Y15, Y11 // c44104561b + VORPS Y2, Y15, Y11 // c4610456da or c50456da + VORPS Y11, Y15, Y11 // c4410456db + VPABSB (BX), X2 // c4e2791c13 + VPABSB (R11), X2 // c4c2791c13 + VPABSB X2, X2 // c4e2791cd2 + VPABSB X11, X2 // c4c2791cd3 + VPABSB (BX), X11 // c462791c1b + VPABSB (R11), X11 // c442791c1b + VPABSB X2, X11 // c462791cda + VPABSB X11, X11 // c442791cdb + VPABSB (BX), Y2 // c4e27d1c13 + VPABSB (R11), Y2 // c4c27d1c13 + VPABSB Y2, Y2 // c4e27d1cd2 + VPABSB Y11, Y2 // c4c27d1cd3 + VPABSB (BX), Y11 // c4627d1c1b + VPABSB (R11), Y11 // c4427d1c1b + VPABSB Y2, Y11 // c4627d1cda + VPABSB Y11, Y11 // c4427d1cdb + VPABSD (BX), X2 // c4e2791e13 + VPABSD (R11), X2 // c4c2791e13 + VPABSD X2, X2 // c4e2791ed2 + VPABSD X11, X2 // c4c2791ed3 + VPABSD (BX), X11 // c462791e1b + VPABSD (R11), X11 // c442791e1b + VPABSD X2, X11 // c462791eda + VPABSD X11, X11 // c442791edb + VPABSD (BX), Y2 // c4e27d1e13 + VPABSD (R11), Y2 // c4c27d1e13 + VPABSD Y2, Y2 // c4e27d1ed2 + VPABSD Y11, Y2 // c4c27d1ed3 + VPABSD (BX), Y11 // c4627d1e1b + VPABSD (R11), Y11 // c4427d1e1b + VPABSD Y2, Y11 // c4627d1eda + VPABSD Y11, Y11 // c4427d1edb + VPABSW (BX), X2 // c4e2791d13 + VPABSW (R11), X2 // c4c2791d13 + VPABSW X2, X2 // c4e2791dd2 + VPABSW X11, X2 // c4c2791dd3 + VPABSW (BX), X11 // c462791d1b + VPABSW (R11), X11 // c442791d1b + VPABSW X2, X11 // c462791dda + VPABSW X11, X11 // c442791ddb + VPABSW (BX), Y2 // c4e27d1d13 + VPABSW (R11), Y2 // c4c27d1d13 + VPABSW Y2, Y2 // c4e27d1dd2 + VPABSW Y11, Y2 // c4c27d1dd3 + VPABSW (BX), Y11 // c4627d1d1b + VPABSW (R11), Y11 // c4427d1d1b + VPABSW Y2, Y11 // c4627d1dda + VPABSW Y11, Y11 // c4427d1ddb + VPACKSSDW (BX), X9, X2 // c4e1316b13 or c5b16b13 + VPACKSSDW (R11), X9, X2 // c4c1316b13 + VPACKSSDW X2, X9, X2 // c4e1316bd2 or c5b16bd2 + VPACKSSDW X11, X9, X2 // c4c1316bd3 + VPACKSSDW (BX), X9, X11 // c461316b1b or c5316b1b + VPACKSSDW (R11), X9, X11 // c441316b1b + VPACKSSDW X2, X9, X11 // c461316bda or c5316bda + VPACKSSDW X11, X9, X11 // c441316bdb + VPACKSSDW (BX), Y15, Y2 // c4e1056b13 or c5856b13 + VPACKSSDW (R11), Y15, Y2 // c4c1056b13 + VPACKSSDW Y2, Y15, Y2 // c4e1056bd2 or c5856bd2 + VPACKSSDW Y11, Y15, Y2 // c4c1056bd3 + VPACKSSDW (BX), Y15, Y11 // c461056b1b or c5056b1b + VPACKSSDW (R11), Y15, Y11 // c441056b1b + VPACKSSDW Y2, Y15, Y11 // c461056bda or c5056bda + VPACKSSDW Y11, Y15, Y11 // c441056bdb + VPACKSSWB (BX), X9, X2 // c4e1316313 or c5b16313 + VPACKSSWB (R11), X9, X2 // c4c1316313 + VPACKSSWB X2, X9, X2 // c4e13163d2 or c5b163d2 + VPACKSSWB X11, X9, X2 // c4c13163d3 + VPACKSSWB (BX), X9, X11 // c46131631b or c531631b + VPACKSSWB (R11), X9, X11 // c44131631b + VPACKSSWB X2, X9, X11 // c4613163da or c53163da + VPACKSSWB X11, X9, X11 // c4413163db + VPACKSSWB (BX), Y15, Y2 // c4e1056313 or c5856313 + VPACKSSWB (R11), Y15, Y2 // c4c1056313 + VPACKSSWB Y2, Y15, Y2 // c4e10563d2 or c58563d2 + VPACKSSWB Y11, Y15, Y2 // c4c10563d3 + VPACKSSWB (BX), Y15, Y11 // c46105631b or c505631b + VPACKSSWB (R11), Y15, Y11 // c44105631b + VPACKSSWB Y2, Y15, Y11 // c4610563da or c50563da + VPACKSSWB Y11, Y15, Y11 // c4410563db + VPACKUSDW (BX), X9, X2 // c4e2312b13 + VPACKUSDW (R11), X9, X2 // c4c2312b13 + VPACKUSDW X2, X9, X2 // c4e2312bd2 + VPACKUSDW X11, X9, X2 // c4c2312bd3 + VPACKUSDW (BX), X9, X11 // c462312b1b + VPACKUSDW (R11), X9, X11 // c442312b1b + VPACKUSDW X2, X9, X11 // c462312bda + VPACKUSDW X11, X9, X11 // c442312bdb + VPACKUSDW (BX), Y15, Y2 // c4e2052b13 + VPACKUSDW (R11), Y15, Y2 // c4c2052b13 + VPACKUSDW Y2, Y15, Y2 // c4e2052bd2 + VPACKUSDW Y11, Y15, Y2 // c4c2052bd3 + VPACKUSDW (BX), Y15, Y11 // c462052b1b + VPACKUSDW (R11), Y15, Y11 // c442052b1b + VPACKUSDW Y2, Y15, Y11 // c462052bda + VPACKUSDW Y11, Y15, Y11 // c442052bdb + VPACKUSWB (BX), X9, X2 // c4e1316713 or c5b16713 + VPACKUSWB (R11), X9, X2 // c4c1316713 + VPACKUSWB X2, X9, X2 // c4e13167d2 or c5b167d2 + VPACKUSWB X11, X9, X2 // c4c13167d3 + VPACKUSWB (BX), X9, X11 // c46131671b or c531671b + VPACKUSWB (R11), X9, X11 // c44131671b + VPACKUSWB X2, X9, X11 // c4613167da or c53167da + VPACKUSWB X11, X9, X11 // c4413167db + VPACKUSWB (BX), Y15, Y2 // c4e1056713 or c5856713 + VPACKUSWB (R11), Y15, Y2 // c4c1056713 + VPACKUSWB Y2, Y15, Y2 // c4e10567d2 or c58567d2 + VPACKUSWB Y11, Y15, Y2 // c4c10567d3 + VPACKUSWB (BX), Y15, Y11 // c46105671b or c505671b + VPACKUSWB (R11), Y15, Y11 // c44105671b + VPACKUSWB Y2, Y15, Y11 // c4610567da or c50567da + VPACKUSWB Y11, Y15, Y11 // c4410567db + VPADDB (BX), X9, X2 // c4e131fc13 or c5b1fc13 + VPADDB (R11), X9, X2 // c4c131fc13 + VPADDB X2, X9, X2 // c4e131fcd2 or c5b1fcd2 + VPADDB X11, X9, X2 // c4c131fcd3 + VPADDB (BX), X9, X11 // c46131fc1b or c531fc1b + VPADDB (R11), X9, X11 // c44131fc1b + VPADDB X2, X9, X11 // c46131fcda or c531fcda + VPADDB X11, X9, X11 // c44131fcdb + VPADDB (BX), Y15, Y2 // c4e105fc13 or c585fc13 + VPADDB (R11), Y15, Y2 // c4c105fc13 + VPADDB Y2, Y15, Y2 // c4e105fcd2 or c585fcd2 + VPADDB Y11, Y15, Y2 // c4c105fcd3 + VPADDB (BX), Y15, Y11 // c46105fc1b or c505fc1b + VPADDB (R11), Y15, Y11 // c44105fc1b + VPADDB Y2, Y15, Y11 // c46105fcda or c505fcda + VPADDB Y11, Y15, Y11 // c44105fcdb + VPADDD (BX), X9, X2 // c4e131fe13 or c5b1fe13 + VPADDD (R11), X9, X2 // c4c131fe13 + VPADDD X2, X9, X2 // c4e131fed2 or c5b1fed2 + VPADDD X11, X9, X2 // c4c131fed3 + VPADDD (BX), X9, X11 // c46131fe1b or c531fe1b + VPADDD (R11), X9, X11 // c44131fe1b + VPADDD X2, X9, X11 // c46131feda or c531feda + VPADDD X11, X9, X11 // c44131fedb + VPADDD (BX), Y15, Y2 // c4e105fe13 or c585fe13 + VPADDD (R11), Y15, Y2 // c4c105fe13 + VPADDD Y2, Y15, Y2 // c4e105fed2 or c585fed2 + VPADDD Y11, Y15, Y2 // c4c105fed3 + VPADDD (BX), Y15, Y11 // c46105fe1b or c505fe1b + VPADDD (R11), Y15, Y11 // c44105fe1b + VPADDD Y2, Y15, Y11 // c46105feda or c505feda + VPADDD Y11, Y15, Y11 // c44105fedb + VPADDQ (BX), X9, X2 // c4e131d413 or c5b1d413 + VPADDQ (R11), X9, X2 // c4c131d413 + VPADDQ X2, X9, X2 // c4e131d4d2 or c5b1d4d2 + VPADDQ X11, X9, X2 // c4c131d4d3 + VPADDQ (BX), X9, X11 // c46131d41b or c531d41b + VPADDQ (R11), X9, X11 // c44131d41b + VPADDQ X2, X9, X11 // c46131d4da or c531d4da + VPADDQ X11, X9, X11 // c44131d4db + VPADDQ (BX), Y15, Y2 // c4e105d413 or c585d413 + VPADDQ (R11), Y15, Y2 // c4c105d413 + VPADDQ Y2, Y15, Y2 // c4e105d4d2 or c585d4d2 + VPADDQ Y11, Y15, Y2 // c4c105d4d3 + VPADDQ (BX), Y15, Y11 // c46105d41b or c505d41b + VPADDQ (R11), Y15, Y11 // c44105d41b + VPADDQ Y2, Y15, Y11 // c46105d4da or c505d4da + VPADDQ Y11, Y15, Y11 // c44105d4db + VPADDSB (BX), X9, X2 // c4e131ec13 or c5b1ec13 + VPADDSB (R11), X9, X2 // c4c131ec13 + VPADDSB X2, X9, X2 // c4e131ecd2 or c5b1ecd2 + VPADDSB X11, X9, X2 // c4c131ecd3 + VPADDSB (BX), X9, X11 // c46131ec1b or c531ec1b + VPADDSB (R11), X9, X11 // c44131ec1b + VPADDSB X2, X9, X11 // c46131ecda or c531ecda + VPADDSB X11, X9, X11 // c44131ecdb + VPADDSB (BX), Y15, Y2 // c4e105ec13 or c585ec13 + VPADDSB (R11), Y15, Y2 // c4c105ec13 + VPADDSB Y2, Y15, Y2 // c4e105ecd2 or c585ecd2 + VPADDSB Y11, Y15, Y2 // c4c105ecd3 + VPADDSB (BX), Y15, Y11 // c46105ec1b or c505ec1b + VPADDSB (R11), Y15, Y11 // c44105ec1b + VPADDSB Y2, Y15, Y11 // c46105ecda or c505ecda + VPADDSB Y11, Y15, Y11 // c44105ecdb + VPADDSW (BX), X9, X2 // c4e131ed13 or c5b1ed13 + VPADDSW (R11), X9, X2 // c4c131ed13 + VPADDSW X2, X9, X2 // c4e131edd2 or c5b1edd2 + VPADDSW X11, X9, X2 // c4c131edd3 + VPADDSW (BX), X9, X11 // c46131ed1b or c531ed1b + VPADDSW (R11), X9, X11 // c44131ed1b + VPADDSW X2, X9, X11 // c46131edda or c531edda + VPADDSW X11, X9, X11 // c44131eddb + VPADDSW (BX), Y15, Y2 // c4e105ed13 or c585ed13 + VPADDSW (R11), Y15, Y2 // c4c105ed13 + VPADDSW Y2, Y15, Y2 // c4e105edd2 or c585edd2 + VPADDSW Y11, Y15, Y2 // c4c105edd3 + VPADDSW (BX), Y15, Y11 // c46105ed1b or c505ed1b + VPADDSW (R11), Y15, Y11 // c44105ed1b + VPADDSW Y2, Y15, Y11 // c46105edda or c505edda + VPADDSW Y11, Y15, Y11 // c44105eddb + VPADDUSB (BX), X9, X2 // c4e131dc13 or c5b1dc13 + VPADDUSB (R11), X9, X2 // c4c131dc13 + VPADDUSB X2, X9, X2 // c4e131dcd2 or c5b1dcd2 + VPADDUSB X11, X9, X2 // c4c131dcd3 + VPADDUSB (BX), X9, X11 // c46131dc1b or c531dc1b + VPADDUSB (R11), X9, X11 // c44131dc1b + VPADDUSB X2, X9, X11 // c46131dcda or c531dcda + VPADDUSB X11, X9, X11 // c44131dcdb + VPADDUSB (BX), Y15, Y2 // c4e105dc13 or c585dc13 + VPADDUSB (R11), Y15, Y2 // c4c105dc13 + VPADDUSB Y2, Y15, Y2 // c4e105dcd2 or c585dcd2 + VPADDUSB Y11, Y15, Y2 // c4c105dcd3 + VPADDUSB (BX), Y15, Y11 // c46105dc1b or c505dc1b + VPADDUSB (R11), Y15, Y11 // c44105dc1b + VPADDUSB Y2, Y15, Y11 // c46105dcda or c505dcda + VPADDUSB Y11, Y15, Y11 // c44105dcdb + VPADDUSW (BX), X9, X2 // c4e131dd13 or c5b1dd13 + VPADDUSW (R11), X9, X2 // c4c131dd13 + VPADDUSW X2, X9, X2 // c4e131ddd2 or c5b1ddd2 + VPADDUSW X11, X9, X2 // c4c131ddd3 + VPADDUSW (BX), X9, X11 // c46131dd1b or c531dd1b + VPADDUSW (R11), X9, X11 // c44131dd1b + VPADDUSW X2, X9, X11 // c46131ddda or c531ddda + VPADDUSW X11, X9, X11 // c44131dddb + VPADDUSW (BX), Y15, Y2 // c4e105dd13 or c585dd13 + VPADDUSW (R11), Y15, Y2 // c4c105dd13 + VPADDUSW Y2, Y15, Y2 // c4e105ddd2 or c585ddd2 + VPADDUSW Y11, Y15, Y2 // c4c105ddd3 + VPADDUSW (BX), Y15, Y11 // c46105dd1b or c505dd1b + VPADDUSW (R11), Y15, Y11 // c44105dd1b + VPADDUSW Y2, Y15, Y11 // c46105ddda or c505ddda + VPADDUSW Y11, Y15, Y11 // c44105dddb + VPADDW (BX), X9, X2 // c4e131fd13 or c5b1fd13 + VPADDW (R11), X9, X2 // c4c131fd13 + VPADDW X2, X9, X2 // c4e131fdd2 or c5b1fdd2 + VPADDW X11, X9, X2 // c4c131fdd3 + VPADDW (BX), X9, X11 // c46131fd1b or c531fd1b + VPADDW (R11), X9, X11 // c44131fd1b + VPADDW X2, X9, X11 // c46131fdda or c531fdda + VPADDW X11, X9, X11 // c44131fddb + VPADDW (BX), Y15, Y2 // c4e105fd13 or c585fd13 + VPADDW (R11), Y15, Y2 // c4c105fd13 + VPADDW Y2, Y15, Y2 // c4e105fdd2 or c585fdd2 + VPADDW Y11, Y15, Y2 // c4c105fdd3 + VPADDW (BX), Y15, Y11 // c46105fd1b or c505fd1b + VPADDW (R11), Y15, Y11 // c44105fd1b + VPADDW Y2, Y15, Y11 // c46105fdda or c505fdda + VPADDW Y11, Y15, Y11 // c44105fddb + VPALIGNR $7, (BX), X9, X2 // c4e3310f1307 + VPALIGNR $7, (R11), X9, X2 // c4c3310f1307 + VPALIGNR $7, X2, X9, X2 // c4e3310fd207 + VPALIGNR $7, X11, X9, X2 // c4c3310fd307 + VPALIGNR $7, (BX), X9, X11 // c463310f1b07 + VPALIGNR $7, (R11), X9, X11 // c443310f1b07 + VPALIGNR $7, X2, X9, X11 // c463310fda07 + VPALIGNR $7, X11, X9, X11 // c443310fdb07 + VPALIGNR $7, (BX), Y15, Y2 // c4e3050f1307 + VPALIGNR $7, (R11), Y15, Y2 // c4c3050f1307 + VPALIGNR $7, Y2, Y15, Y2 // c4e3050fd207 + VPALIGNR $7, Y11, Y15, Y2 // c4c3050fd307 + VPALIGNR $7, (BX), Y15, Y11 // c463050f1b07 + VPALIGNR $7, (R11), Y15, Y11 // c443050f1b07 + VPALIGNR $7, Y2, Y15, Y11 // c463050fda07 + VPALIGNR $7, Y11, Y15, Y11 // c443050fdb07 + VPAND (BX), X9, X2 // c4e131db13 or c5b1db13 + VPAND (R11), X9, X2 // c4c131db13 + VPAND X2, X9, X2 // c4e131dbd2 or c5b1dbd2 + VPAND X11, X9, X2 // c4c131dbd3 + VPAND (BX), X9, X11 // c46131db1b or c531db1b + VPAND (R11), X9, X11 // c44131db1b + VPAND X2, X9, X11 // c46131dbda or c531dbda + VPAND X11, X9, X11 // c44131dbdb + VPAND (BX), Y15, Y2 // c4e105db13 or c585db13 + VPAND (R11), Y15, Y2 // c4c105db13 + VPAND Y2, Y15, Y2 // c4e105dbd2 or c585dbd2 + VPAND Y11, Y15, Y2 // c4c105dbd3 + VPAND (BX), Y15, Y11 // c46105db1b or c505db1b + VPAND (R11), Y15, Y11 // c44105db1b + VPAND Y2, Y15, Y11 // c46105dbda or c505dbda + VPAND Y11, Y15, Y11 // c44105dbdb + VPANDN (BX), X9, X2 // c4e131df13 or c5b1df13 + VPANDN (R11), X9, X2 // c4c131df13 + VPANDN X2, X9, X2 // c4e131dfd2 or c5b1dfd2 + VPANDN X11, X9, X2 // c4c131dfd3 + VPANDN (BX), X9, X11 // c46131df1b or c531df1b + VPANDN (R11), X9, X11 // c44131df1b + VPANDN X2, X9, X11 // c46131dfda or c531dfda + VPANDN X11, X9, X11 // c44131dfdb + VPANDN (BX), Y15, Y2 // c4e105df13 or c585df13 + VPANDN (R11), Y15, Y2 // c4c105df13 + VPANDN Y2, Y15, Y2 // c4e105dfd2 or c585dfd2 + VPANDN Y11, Y15, Y2 // c4c105dfd3 + VPANDN (BX), Y15, Y11 // c46105df1b or c505df1b + VPANDN (R11), Y15, Y11 // c44105df1b + VPANDN Y2, Y15, Y11 // c46105dfda or c505dfda + VPANDN Y11, Y15, Y11 // c44105dfdb + VPAVGB (BX), X9, X2 // c4e131e013 or c5b1e013 + VPAVGB (R11), X9, X2 // c4c131e013 + VPAVGB X2, X9, X2 // c4e131e0d2 or c5b1e0d2 + VPAVGB X11, X9, X2 // c4c131e0d3 + VPAVGB (BX), X9, X11 // c46131e01b or c531e01b + VPAVGB (R11), X9, X11 // c44131e01b + VPAVGB X2, X9, X11 // c46131e0da or c531e0da + VPAVGB X11, X9, X11 // c44131e0db + VPAVGB (BX), Y15, Y2 // c4e105e013 or c585e013 + VPAVGB (R11), Y15, Y2 // c4c105e013 + VPAVGB Y2, Y15, Y2 // c4e105e0d2 or c585e0d2 + VPAVGB Y11, Y15, Y2 // c4c105e0d3 + VPAVGB (BX), Y15, Y11 // c46105e01b or c505e01b + VPAVGB (R11), Y15, Y11 // c44105e01b + VPAVGB Y2, Y15, Y11 // c46105e0da or c505e0da + VPAVGB Y11, Y15, Y11 // c44105e0db + VPAVGW (BX), X9, X2 // c4e131e313 or c5b1e313 + VPAVGW (R11), X9, X2 // c4c131e313 + VPAVGW X2, X9, X2 // c4e131e3d2 or c5b1e3d2 + VPAVGW X11, X9, X2 // c4c131e3d3 + VPAVGW (BX), X9, X11 // c46131e31b or c531e31b + VPAVGW (R11), X9, X11 // c44131e31b + VPAVGW X2, X9, X11 // c46131e3da or c531e3da + VPAVGW X11, X9, X11 // c44131e3db + VPAVGW (BX), Y15, Y2 // c4e105e313 or c585e313 + VPAVGW (R11), Y15, Y2 // c4c105e313 + VPAVGW Y2, Y15, Y2 // c4e105e3d2 or c585e3d2 + VPAVGW Y11, Y15, Y2 // c4c105e3d3 + VPAVGW (BX), Y15, Y11 // c46105e31b or c505e31b + VPAVGW (R11), Y15, Y11 // c44105e31b + VPAVGW Y2, Y15, Y11 // c46105e3da or c505e3da + VPAVGW Y11, Y15, Y11 // c44105e3db + VPBLENDD $7, (BX), X9, X2 // c4e331021307 + VPBLENDD $7, (R11), X9, X2 // c4c331021307 + VPBLENDD $7, X2, X9, X2 // c4e33102d207 + VPBLENDD $7, X11, X9, X2 // c4c33102d307 + VPBLENDD $7, (BX), X9, X11 // c46331021b07 + VPBLENDD $7, (R11), X9, X11 // c44331021b07 + VPBLENDD $7, X2, X9, X11 // c4633102da07 + VPBLENDD $7, X11, X9, X11 // c4433102db07 + VPBLENDD $7, (BX), Y15, Y2 // c4e305021307 + VPBLENDD $7, (R11), Y15, Y2 // c4c305021307 + VPBLENDD $7, Y2, Y15, Y2 // c4e30502d207 + VPBLENDD $7, Y11, Y15, Y2 // c4c30502d307 + VPBLENDD $7, (BX), Y15, Y11 // c46305021b07 + VPBLENDD $7, (R11), Y15, Y11 // c44305021b07 + VPBLENDD $7, Y2, Y15, Y11 // c4630502da07 + VPBLENDD $7, Y11, Y15, Y11 // c4430502db07 + VPBLENDVB X12, (BX), X9, X2 // c4e3314c13c0 + VPBLENDVB X12, (R11), X9, X2 // c4c3314c13c0 + VPBLENDVB X12, X2, X9, X2 // c4e3314cd2c0 + VPBLENDVB X12, X11, X9, X2 // c4c3314cd3c0 + VPBLENDVB X12, (BX), X9, X11 // c463314c1bc0 + VPBLENDVB X12, (R11), X9, X11 // c443314c1bc0 + VPBLENDVB X12, X2, X9, X11 // c463314cdac0 + VPBLENDVB X12, X11, X9, X11 // c443314cdbc0 + VPBLENDVB Y13, (BX), Y15, Y2 // c4e3054c13d0 + VPBLENDVB Y13, (R11), Y15, Y2 // c4c3054c13d0 + VPBLENDVB Y13, Y2, Y15, Y2 // c4e3054cd2d0 + VPBLENDVB Y13, Y11, Y15, Y2 // c4c3054cd3d0 + VPBLENDVB Y13, (BX), Y15, Y11 // c463054c1bd0 + VPBLENDVB Y13, (R11), Y15, Y11 // c443054c1bd0 + VPBLENDVB Y13, Y2, Y15, Y11 // c463054cdad0 + VPBLENDVB Y13, Y11, Y15, Y11 // c443054cdbd0 + VPBLENDW $7, (BX), X9, X2 // c4e3310e1307 + VPBLENDW $7, (R11), X9, X2 // c4c3310e1307 + VPBLENDW $7, X2, X9, X2 // c4e3310ed207 + VPBLENDW $7, X11, X9, X2 // c4c3310ed307 + VPBLENDW $7, (BX), X9, X11 // c463310e1b07 + VPBLENDW $7, (R11), X9, X11 // c443310e1b07 + VPBLENDW $7, X2, X9, X11 // c463310eda07 + VPBLENDW $7, X11, X9, X11 // c443310edb07 + VPBLENDW $7, (BX), Y15, Y2 // c4e3050e1307 + VPBLENDW $7, (R11), Y15, Y2 // c4c3050e1307 + VPBLENDW $7, Y2, Y15, Y2 // c4e3050ed207 + VPBLENDW $7, Y11, Y15, Y2 // c4c3050ed307 + VPBLENDW $7, (BX), Y15, Y11 // c463050e1b07 + VPBLENDW $7, (R11), Y15, Y11 // c443050e1b07 + VPBLENDW $7, Y2, Y15, Y11 // c463050eda07 + VPBLENDW $7, Y11, Y15, Y11 // c443050edb07 + VPBROADCASTB (BX), X2 // c4e2797813 + VPBROADCASTB (R11), X2 // c4c2797813 + VPBROADCASTB X2, X2 // c4e27978d2 + VPBROADCASTB X11, X2 // c4c27978d3 + VPBROADCASTB (BX), X11 // c46279781b + VPBROADCASTB (R11), X11 // c44279781b + VPBROADCASTB X2, X11 // c4627978da + VPBROADCASTB X11, X11 // c4427978db + VPBROADCASTB (BX), Y2 // c4e27d7813 + VPBROADCASTB (R11), Y2 // c4c27d7813 + VPBROADCASTB X2, Y2 // c4e27d78d2 + VPBROADCASTB X11, Y2 // c4c27d78d3 + VPBROADCASTB (BX), Y11 // c4627d781b + VPBROADCASTB (R11), Y11 // c4427d781b + VPBROADCASTB X2, Y11 // c4627d78da + VPBROADCASTB X11, Y11 // c4427d78db + VPBROADCASTD (BX), X2 // c4e2795813 + VPBROADCASTD (R11), X2 // c4c2795813 + VPBROADCASTD X2, X2 // c4e27958d2 + VPBROADCASTD X11, X2 // c4c27958d3 + VPBROADCASTD (BX), X11 // c46279581b + VPBROADCASTD (R11), X11 // c44279581b + VPBROADCASTD X2, X11 // c4627958da + VPBROADCASTD X11, X11 // c4427958db + VPBROADCASTD (BX), Y2 // c4e27d5813 + VPBROADCASTD (R11), Y2 // c4c27d5813 + VPBROADCASTD X2, Y2 // c4e27d58d2 + VPBROADCASTD X11, Y2 // c4c27d58d3 + VPBROADCASTD (BX), Y11 // c4627d581b + VPBROADCASTD (R11), Y11 // c4427d581b + VPBROADCASTD X2, Y11 // c4627d58da + VPBROADCASTD X11, Y11 // c4427d58db + VPBROADCASTQ (BX), X2 // c4e2795913 + VPBROADCASTQ (R11), X2 // c4c2795913 + VPBROADCASTQ X2, X2 // c4e27959d2 + VPBROADCASTQ X11, X2 // c4c27959d3 + VPBROADCASTQ (BX), X11 // c46279591b + VPBROADCASTQ (R11), X11 // c44279591b + VPBROADCASTQ X2, X11 // c4627959da + VPBROADCASTQ X11, X11 // c4427959db + VPBROADCASTQ (BX), Y2 // c4e27d5913 + VPBROADCASTQ (R11), Y2 // c4c27d5913 + VPBROADCASTQ X2, Y2 // c4e27d59d2 + VPBROADCASTQ X11, Y2 // c4c27d59d3 + VPBROADCASTQ (BX), Y11 // c4627d591b + VPBROADCASTQ (R11), Y11 // c4427d591b + VPBROADCASTQ X2, Y11 // c4627d59da + VPBROADCASTQ X11, Y11 // c4427d59db + VPBROADCASTW (BX), X2 // c4e2797913 + VPBROADCASTW (R11), X2 // c4c2797913 + VPBROADCASTW X2, X2 // c4e27979d2 + VPBROADCASTW X11, X2 // c4c27979d3 + VPBROADCASTW (BX), X11 // c46279791b + VPBROADCASTW (R11), X11 // c44279791b + VPBROADCASTW X2, X11 // c4627979da + VPBROADCASTW X11, X11 // c4427979db + VPBROADCASTW (BX), Y2 // c4e27d7913 + VPBROADCASTW (R11), Y2 // c4c27d7913 + VPBROADCASTW X2, Y2 // c4e27d79d2 + VPBROADCASTW X11, Y2 // c4c27d79d3 + VPBROADCASTW (BX), Y11 // c4627d791b + VPBROADCASTW (R11), Y11 // c4427d791b + VPBROADCASTW X2, Y11 // c4627d79da + VPBROADCASTW X11, Y11 // c4427d79db + VPCLMULQDQ $7, (BX), X9, X2 // c4e331441307 + VPCLMULQDQ $7, (R11), X9, X2 // c4c331441307 + VPCLMULQDQ $7, X2, X9, X2 // c4e33144d207 + VPCLMULQDQ $7, X11, X9, X2 // c4c33144d307 + VPCLMULQDQ $7, (BX), X9, X11 // c46331441b07 + VPCLMULQDQ $7, (R11), X9, X11 // c44331441b07 + VPCLMULQDQ $7, X2, X9, X11 // c4633144da07 + VPCLMULQDQ $7, X11, X9, X11 // c4433144db07 + VPCMPEQB (BX), X9, X2 // c4e1317413 or c5b17413 + VPCMPEQB (R11), X9, X2 // c4c1317413 + VPCMPEQB X2, X9, X2 // c4e13174d2 or c5b174d2 + VPCMPEQB X11, X9, X2 // c4c13174d3 + VPCMPEQB (BX), X9, X11 // c46131741b or c531741b + VPCMPEQB (R11), X9, X11 // c44131741b + VPCMPEQB X2, X9, X11 // c4613174da or c53174da + VPCMPEQB X11, X9, X11 // c4413174db + VPCMPEQB (BX), Y15, Y2 // c4e1057413 or c5857413 + VPCMPEQB (R11), Y15, Y2 // c4c1057413 + VPCMPEQB Y2, Y15, Y2 // c4e10574d2 or c58574d2 + VPCMPEQB Y11, Y15, Y2 // c4c10574d3 + VPCMPEQB (BX), Y15, Y11 // c46105741b or c505741b + VPCMPEQB (R11), Y15, Y11 // c44105741b + VPCMPEQB Y2, Y15, Y11 // c4610574da or c50574da + VPCMPEQB Y11, Y15, Y11 // c4410574db + VPCMPEQD (BX), X9, X2 // c4e1317613 or c5b17613 + VPCMPEQD (R11), X9, X2 // c4c1317613 + VPCMPEQD X2, X9, X2 // c4e13176d2 or c5b176d2 + VPCMPEQD X11, X9, X2 // c4c13176d3 + VPCMPEQD (BX), X9, X11 // c46131761b or c531761b + VPCMPEQD (R11), X9, X11 // c44131761b + VPCMPEQD X2, X9, X11 // c4613176da or c53176da + VPCMPEQD X11, X9, X11 // c4413176db + VPCMPEQD (BX), Y15, Y2 // c4e1057613 or c5857613 + VPCMPEQD (R11), Y15, Y2 // c4c1057613 + VPCMPEQD Y2, Y15, Y2 // c4e10576d2 or c58576d2 + VPCMPEQD Y11, Y15, Y2 // c4c10576d3 + VPCMPEQD (BX), Y15, Y11 // c46105761b or c505761b + VPCMPEQD (R11), Y15, Y11 // c44105761b + VPCMPEQD Y2, Y15, Y11 // c4610576da or c50576da + VPCMPEQD Y11, Y15, Y11 // c4410576db + VPCMPEQQ (BX), X9, X2 // c4e2312913 + VPCMPEQQ (R11), X9, X2 // c4c2312913 + VPCMPEQQ X2, X9, X2 // c4e23129d2 + VPCMPEQQ X11, X9, X2 // c4c23129d3 + VPCMPEQQ (BX), X9, X11 // c46231291b + VPCMPEQQ (R11), X9, X11 // c44231291b + VPCMPEQQ X2, X9, X11 // c4623129da + VPCMPEQQ X11, X9, X11 // c4423129db + VPCMPEQQ (BX), Y15, Y2 // c4e2052913 + VPCMPEQQ (R11), Y15, Y2 // c4c2052913 + VPCMPEQQ Y2, Y15, Y2 // c4e20529d2 + VPCMPEQQ Y11, Y15, Y2 // c4c20529d3 + VPCMPEQQ (BX), Y15, Y11 // c46205291b + VPCMPEQQ (R11), Y15, Y11 // c44205291b + VPCMPEQQ Y2, Y15, Y11 // c4620529da + VPCMPEQQ Y11, Y15, Y11 // c4420529db + VPCMPEQW (BX), X9, X2 // c4e1317513 or c5b17513 + VPCMPEQW (R11), X9, X2 // c4c1317513 + VPCMPEQW X2, X9, X2 // c4e13175d2 or c5b175d2 + VPCMPEQW X11, X9, X2 // c4c13175d3 + VPCMPEQW (BX), X9, X11 // c46131751b or c531751b + VPCMPEQW (R11), X9, X11 // c44131751b + VPCMPEQW X2, X9, X11 // c4613175da or c53175da + VPCMPEQW X11, X9, X11 // c4413175db + VPCMPEQW (BX), Y15, Y2 // c4e1057513 or c5857513 + VPCMPEQW (R11), Y15, Y2 // c4c1057513 + VPCMPEQW Y2, Y15, Y2 // c4e10575d2 or c58575d2 + VPCMPEQW Y11, Y15, Y2 // c4c10575d3 + VPCMPEQW (BX), Y15, Y11 // c46105751b or c505751b + VPCMPEQW (R11), Y15, Y11 // c44105751b + VPCMPEQW Y2, Y15, Y11 // c4610575da or c50575da + VPCMPEQW Y11, Y15, Y11 // c4410575db + VPCMPESTRI $7, (BX), X2 // c4e379611307 + VPCMPESTRI $7, (R11), X2 // c4c379611307 + VPCMPESTRI $7, X2, X2 // c4e37961d207 + VPCMPESTRI $7, X11, X2 // c4c37961d307 + VPCMPESTRI $7, (BX), X11 // c46379611b07 + VPCMPESTRI $7, (R11), X11 // c44379611b07 + VPCMPESTRI $7, X2, X11 // c4637961da07 + VPCMPESTRI $7, X11, X11 // c4437961db07 + VPCMPESTRM $7, (BX), X2 // c4e379601307 + VPCMPESTRM $7, (R11), X2 // c4c379601307 + VPCMPESTRM $7, X2, X2 // c4e37960d207 + VPCMPESTRM $7, X11, X2 // c4c37960d307 + VPCMPESTRM $7, (BX), X11 // c46379601b07 + VPCMPESTRM $7, (R11), X11 // c44379601b07 + VPCMPESTRM $7, X2, X11 // c4637960da07 + VPCMPESTRM $7, X11, X11 // c4437960db07 + VPCMPGTB (BX), X9, X2 // c4e1316413 or c5b16413 + VPCMPGTB (R11), X9, X2 // c4c1316413 + VPCMPGTB X2, X9, X2 // c4e13164d2 or c5b164d2 + VPCMPGTB X11, X9, X2 // c4c13164d3 + VPCMPGTB (BX), X9, X11 // c46131641b or c531641b + VPCMPGTB (R11), X9, X11 // c44131641b + VPCMPGTB X2, X9, X11 // c4613164da or c53164da + VPCMPGTB X11, X9, X11 // c4413164db + VPCMPGTB (BX), Y15, Y2 // c4e1056413 or c5856413 + VPCMPGTB (R11), Y15, Y2 // c4c1056413 + VPCMPGTB Y2, Y15, Y2 // c4e10564d2 or c58564d2 + VPCMPGTB Y11, Y15, Y2 // c4c10564d3 + VPCMPGTB (BX), Y15, Y11 // c46105641b or c505641b + VPCMPGTB (R11), Y15, Y11 // c44105641b + VPCMPGTB Y2, Y15, Y11 // c4610564da or c50564da + VPCMPGTB Y11, Y15, Y11 // c4410564db + VPCMPGTD (BX), X9, X2 // c4e1316613 or c5b16613 + VPCMPGTD (R11), X9, X2 // c4c1316613 + VPCMPGTD X2, X9, X2 // c4e13166d2 or c5b166d2 + VPCMPGTD X11, X9, X2 // c4c13166d3 + VPCMPGTD (BX), X9, X11 // c46131661b or c531661b + VPCMPGTD (R11), X9, X11 // c44131661b + VPCMPGTD X2, X9, X11 // c4613166da or c53166da + VPCMPGTD X11, X9, X11 // c4413166db + VPCMPGTD (BX), Y15, Y2 // c4e1056613 or c5856613 + VPCMPGTD (R11), Y15, Y2 // c4c1056613 + VPCMPGTD Y2, Y15, Y2 // c4e10566d2 or c58566d2 + VPCMPGTD Y11, Y15, Y2 // c4c10566d3 + VPCMPGTD (BX), Y15, Y11 // c46105661b or c505661b + VPCMPGTD (R11), Y15, Y11 // c44105661b + VPCMPGTD Y2, Y15, Y11 // c4610566da or c50566da + VPCMPGTD Y11, Y15, Y11 // c4410566db + VPCMPGTQ (BX), X9, X2 // c4e2313713 + VPCMPGTQ (R11), X9, X2 // c4c2313713 + VPCMPGTQ X2, X9, X2 // c4e23137d2 + VPCMPGTQ X11, X9, X2 // c4c23137d3 + VPCMPGTQ (BX), X9, X11 // c46231371b + VPCMPGTQ (R11), X9, X11 // c44231371b + VPCMPGTQ X2, X9, X11 // c4623137da + VPCMPGTQ X11, X9, X11 // c4423137db + VPCMPGTQ (BX), Y15, Y2 // c4e2053713 + VPCMPGTQ (R11), Y15, Y2 // c4c2053713 + VPCMPGTQ Y2, Y15, Y2 // c4e20537d2 + VPCMPGTQ Y11, Y15, Y2 // c4c20537d3 + VPCMPGTQ (BX), Y15, Y11 // c46205371b + VPCMPGTQ (R11), Y15, Y11 // c44205371b + VPCMPGTQ Y2, Y15, Y11 // c4620537da + VPCMPGTQ Y11, Y15, Y11 // c4420537db + VPCMPGTW (BX), X9, X2 // c4e1316513 or c5b16513 + VPCMPGTW (R11), X9, X2 // c4c1316513 + VPCMPGTW X2, X9, X2 // c4e13165d2 or c5b165d2 + VPCMPGTW X11, X9, X2 // c4c13165d3 + VPCMPGTW (BX), X9, X11 // c46131651b or c531651b + VPCMPGTW (R11), X9, X11 // c44131651b + VPCMPGTW X2, X9, X11 // c4613165da or c53165da + VPCMPGTW X11, X9, X11 // c4413165db + VPCMPGTW (BX), Y15, Y2 // c4e1056513 or c5856513 + VPCMPGTW (R11), Y15, Y2 // c4c1056513 + VPCMPGTW Y2, Y15, Y2 // c4e10565d2 or c58565d2 + VPCMPGTW Y11, Y15, Y2 // c4c10565d3 + VPCMPGTW (BX), Y15, Y11 // c46105651b or c505651b + VPCMPGTW (R11), Y15, Y11 // c44105651b + VPCMPGTW Y2, Y15, Y11 // c4610565da or c50565da + VPCMPGTW Y11, Y15, Y11 // c4410565db + VPCMPISTRI $7, (BX), X2 // c4e379631307 + VPCMPISTRI $7, (R11), X2 // c4c379631307 + VPCMPISTRI $7, X2, X2 // c4e37963d207 + VPCMPISTRI $7, X11, X2 // c4c37963d307 + VPCMPISTRI $7, (BX), X11 // c46379631b07 + VPCMPISTRI $7, (R11), X11 // c44379631b07 + VPCMPISTRI $7, X2, X11 // c4637963da07 + VPCMPISTRI $7, X11, X11 // c4437963db07 + VPCMPISTRM $7, (BX), X2 // c4e379621307 + VPCMPISTRM $7, (R11), X2 // c4c379621307 + VPCMPISTRM $7, X2, X2 // c4e37962d207 + VPCMPISTRM $7, X11, X2 // c4c37962d307 + VPCMPISTRM $7, (BX), X11 // c46379621b07 + VPCMPISTRM $7, (R11), X11 // c44379621b07 + VPCMPISTRM $7, X2, X11 // c4637962da07 + VPCMPISTRM $7, X11, X11 // c4437962db07 + VPERM2F128 $7, (BX), Y15, Y2 // c4e305061307 + VPERM2F128 $7, (R11), Y15, Y2 // c4c305061307 + VPERM2F128 $7, Y2, Y15, Y2 // c4e30506d207 + VPERM2F128 $7, Y11, Y15, Y2 // c4c30506d307 + VPERM2F128 $7, (BX), Y15, Y11 // c46305061b07 + VPERM2F128 $7, (R11), Y15, Y11 // c44305061b07 + VPERM2F128 $7, Y2, Y15, Y11 // c4630506da07 + VPERM2F128 $7, Y11, Y15, Y11 // c4430506db07 + VPERM2I128 $7, (BX), Y15, Y2 // c4e305461307 + VPERM2I128 $7, (R11), Y15, Y2 // c4c305461307 + VPERM2I128 $7, Y2, Y15, Y2 // c4e30546d207 + VPERM2I128 $7, Y11, Y15, Y2 // c4c30546d307 + VPERM2I128 $7, (BX), Y15, Y11 // c46305461b07 + VPERM2I128 $7, (R11), Y15, Y11 // c44305461b07 + VPERM2I128 $7, Y2, Y15, Y11 // c4630546da07 + VPERM2I128 $7, Y11, Y15, Y11 // c4430546db07 + VPERMD (BX), Y15, Y2 // c4e2053613 + VPERMD (R11), Y15, Y2 // c4c2053613 + VPERMD Y2, Y15, Y2 // c4e20536d2 + VPERMD Y11, Y15, Y2 // c4c20536d3 + VPERMD (BX), Y15, Y11 // c46205361b + VPERMD (R11), Y15, Y11 // c44205361b + VPERMD Y2, Y15, Y11 // c4620536da + VPERMD Y11, Y15, Y11 // c4420536db + VPERMILPD $7, (BX), X2 // c4e379051307 + VPERMILPD $7, (R11), X2 // c4c379051307 + VPERMILPD $7, X2, X2 // c4e37905d207 + VPERMILPD $7, X11, X2 // c4c37905d307 + VPERMILPD $7, (BX), X11 // c46379051b07 + VPERMILPD $7, (R11), X11 // c44379051b07 + VPERMILPD $7, X2, X11 // c4637905da07 + VPERMILPD $7, X11, X11 // c4437905db07 + VPERMILPD (BX), X9, X2 // c4e2310d13 + VPERMILPD (R11), X9, X2 // c4c2310d13 + VPERMILPD X2, X9, X2 // c4e2310dd2 + VPERMILPD X11, X9, X2 // c4c2310dd3 + VPERMILPD (BX), X9, X11 // c462310d1b + VPERMILPD (R11), X9, X11 // c442310d1b + VPERMILPD X2, X9, X11 // c462310dda + VPERMILPD X11, X9, X11 // c442310ddb + VPERMILPD $7, (BX), Y2 // c4e37d051307 + VPERMILPD $7, (R11), Y2 // c4c37d051307 + VPERMILPD $7, Y2, Y2 // c4e37d05d207 + VPERMILPD $7, Y11, Y2 // c4c37d05d307 + VPERMILPD $7, (BX), Y11 // c4637d051b07 + VPERMILPD $7, (R11), Y11 // c4437d051b07 + VPERMILPD $7, Y2, Y11 // c4637d05da07 + VPERMILPD $7, Y11, Y11 // c4437d05db07 + VPERMILPD (BX), Y15, Y2 // c4e2050d13 + VPERMILPD (R11), Y15, Y2 // c4c2050d13 + VPERMILPD Y2, Y15, Y2 // c4e2050dd2 + VPERMILPD Y11, Y15, Y2 // c4c2050dd3 + VPERMILPD (BX), Y15, Y11 // c462050d1b + VPERMILPD (R11), Y15, Y11 // c442050d1b + VPERMILPD Y2, Y15, Y11 // c462050dda + VPERMILPD Y11, Y15, Y11 // c442050ddb + VPERMILPS $7, (BX), X2 // c4e379041307 + VPERMILPS $7, (R11), X2 // c4c379041307 + VPERMILPS $7, X2, X2 // c4e37904d207 + VPERMILPS $7, X11, X2 // c4c37904d307 + VPERMILPS $7, (BX), X11 // c46379041b07 + VPERMILPS $7, (R11), X11 // c44379041b07 + VPERMILPS $7, X2, X11 // c4637904da07 + VPERMILPS $7, X11, X11 // c4437904db07 + VPERMILPS (BX), X9, X2 // c4e2310c13 + VPERMILPS (R11), X9, X2 // c4c2310c13 + VPERMILPS X2, X9, X2 // c4e2310cd2 + VPERMILPS X11, X9, X2 // c4c2310cd3 + VPERMILPS (BX), X9, X11 // c462310c1b + VPERMILPS (R11), X9, X11 // c442310c1b + VPERMILPS X2, X9, X11 // c462310cda + VPERMILPS X11, X9, X11 // c442310cdb + VPERMILPS $7, (BX), Y2 // c4e37d041307 + VPERMILPS $7, (R11), Y2 // c4c37d041307 + VPERMILPS $7, Y2, Y2 // c4e37d04d207 + VPERMILPS $7, Y11, Y2 // c4c37d04d307 + VPERMILPS $7, (BX), Y11 // c4637d041b07 + VPERMILPS $7, (R11), Y11 // c4437d041b07 + VPERMILPS $7, Y2, Y11 // c4637d04da07 + VPERMILPS $7, Y11, Y11 // c4437d04db07 + VPERMILPS (BX), Y15, Y2 // c4e2050c13 + VPERMILPS (R11), Y15, Y2 // c4c2050c13 + VPERMILPS Y2, Y15, Y2 // c4e2050cd2 + VPERMILPS Y11, Y15, Y2 // c4c2050cd3 + VPERMILPS (BX), Y15, Y11 // c462050c1b + VPERMILPS (R11), Y15, Y11 // c442050c1b + VPERMILPS Y2, Y15, Y11 // c462050cda + VPERMILPS Y11, Y15, Y11 // c442050cdb + VPERMPD $7, (BX), Y2 // c4e3fd011307 + VPERMPD $7, (R11), Y2 // c4c3fd011307 + VPERMPD $7, Y2, Y2 // c4e3fd01d207 + VPERMPD $7, Y11, Y2 // c4c3fd01d307 + VPERMPD $7, (BX), Y11 // c463fd011b07 + VPERMPD $7, (R11), Y11 // c443fd011b07 + VPERMPD $7, Y2, Y11 // c463fd01da07 + VPERMPD $7, Y11, Y11 // c443fd01db07 + VPERMPS (BX), Y15, Y2 // c4e2051613 + VPERMPS (R11), Y15, Y2 // c4c2051613 + VPERMPS Y2, Y15, Y2 // c4e20516d2 + VPERMPS Y11, Y15, Y2 // c4c20516d3 + VPERMPS (BX), Y15, Y11 // c46205161b + VPERMPS (R11), Y15, Y11 // c44205161b + VPERMPS Y2, Y15, Y11 // c4620516da + VPERMPS Y11, Y15, Y11 // c4420516db + VPERMQ $7, (BX), Y2 // c4e3fd001307 + VPERMQ $7, (R11), Y2 // c4c3fd001307 + VPERMQ $7, Y2, Y2 // c4e3fd00d207 + VPERMQ $7, Y11, Y2 // c4c3fd00d307 + VPERMQ $7, (BX), Y11 // c463fd001b07 + VPERMQ $7, (R11), Y11 // c443fd001b07 + VPERMQ $7, Y2, Y11 // c463fd00da07 + VPERMQ $7, Y11, Y11 // c443fd00db07 + VPEXTRB $7, X2, (BX) // c4e379141307 + VPEXTRB $7, X11, (BX) // c46379141b07 + VPEXTRB $7, X2, (R11) // c4c379141307 + VPEXTRB $7, X11, (R11) // c44379141b07 + VPEXTRB $7, X2, DX // c4e37914d207 + VPEXTRB $7, X11, DX // c4637914da07 + VPEXTRB $7, X2, R11 // c4c37914d307 + VPEXTRB $7, X11, R11 // c4437914db07 + VPEXTRD $7, X2, (BX) // c4e379161307 + VPEXTRD $7, X11, (BX) // c46379161b07 + VPEXTRD $7, X2, (R11) // c4c379161307 + VPEXTRD $7, X11, (R11) // c44379161b07 + VPEXTRD $7, X2, DX // c4e37916d207 + VPEXTRD $7, X11, DX // c4637916da07 + VPEXTRD $7, X2, R11 // c4c37916d307 + VPEXTRD $7, X11, R11 // c4437916db07 + VPEXTRQ $7, X2, (BX) // c4e3f9161307 + VPEXTRQ $7, X11, (BX) // c463f9161b07 + VPEXTRQ $7, X2, (R11) // c4c3f9161307 + VPEXTRQ $7, X11, (R11) // c443f9161b07 + VPEXTRQ $7, X2, DX // c4e3f916d207 + VPEXTRQ $7, X11, DX // c463f916da07 + VPEXTRQ $7, X2, R11 // c4c3f916d307 + VPEXTRQ $7, X11, R11 // c443f916db07 + VPEXTRW $7, X2, DX // c4e179c5d207 or c5f9c5d207 or c4e37915d207 + VPEXTRW $7, X11, DX // c4c179c5d307 or c4637915da07 + VPEXTRW $7, X2, R11 // c46179c5da07 or c579c5da07 or c4c37915d307 + VPEXTRW $7, X11, R11 // c44179c5db07 or c4437915db07 + VPEXTRW $7, X2, (BX) // c4e379151307 + VPEXTRW $7, X11, (BX) // c46379151b07 + VPEXTRW $7, X2, (R11) // c4c379151307 + VPEXTRW $7, X11, (R11) // c44379151b07 + VPHADDD (BX), X9, X2 // c4e2310213 + VPHADDD (R11), X9, X2 // c4c2310213 + VPHADDD X2, X9, X2 // c4e23102d2 + VPHADDD X11, X9, X2 // c4c23102d3 + VPHADDD (BX), X9, X11 // c46231021b + VPHADDD (R11), X9, X11 // c44231021b + VPHADDD X2, X9, X11 // c4623102da + VPHADDD X11, X9, X11 // c4423102db + VPHADDD (BX), Y15, Y2 // c4e2050213 + VPHADDD (R11), Y15, Y2 // c4c2050213 + VPHADDD Y2, Y15, Y2 // c4e20502d2 + VPHADDD Y11, Y15, Y2 // c4c20502d3 + VPHADDD (BX), Y15, Y11 // c46205021b + VPHADDD (R11), Y15, Y11 // c44205021b + VPHADDD Y2, Y15, Y11 // c4620502da + VPHADDD Y11, Y15, Y11 // c4420502db + VPHADDSW (BX), X9, X2 // c4e2310313 + VPHADDSW (R11), X9, X2 // c4c2310313 + VPHADDSW X2, X9, X2 // c4e23103d2 + VPHADDSW X11, X9, X2 // c4c23103d3 + VPHADDSW (BX), X9, X11 // c46231031b + VPHADDSW (R11), X9, X11 // c44231031b + VPHADDSW X2, X9, X11 // c4623103da + VPHADDSW X11, X9, X11 // c4423103db + VPHADDSW (BX), Y15, Y2 // c4e2050313 + VPHADDSW (R11), Y15, Y2 // c4c2050313 + VPHADDSW Y2, Y15, Y2 // c4e20503d2 + VPHADDSW Y11, Y15, Y2 // c4c20503d3 + VPHADDSW (BX), Y15, Y11 // c46205031b + VPHADDSW (R11), Y15, Y11 // c44205031b + VPHADDSW Y2, Y15, Y11 // c4620503da + VPHADDSW Y11, Y15, Y11 // c4420503db + VPHADDW (BX), X9, X2 // c4e2310113 + VPHADDW (R11), X9, X2 // c4c2310113 + VPHADDW X2, X9, X2 // c4e23101d2 + VPHADDW X11, X9, X2 // c4c23101d3 + VPHADDW (BX), X9, X11 // c46231011b + VPHADDW (R11), X9, X11 // c44231011b + VPHADDW X2, X9, X11 // c4623101da + VPHADDW X11, X9, X11 // c4423101db + VPHADDW (BX), Y15, Y2 // c4e2050113 + VPHADDW (R11), Y15, Y2 // c4c2050113 + VPHADDW Y2, Y15, Y2 // c4e20501d2 + VPHADDW Y11, Y15, Y2 // c4c20501d3 + VPHADDW (BX), Y15, Y11 // c46205011b + VPHADDW (R11), Y15, Y11 // c44205011b + VPHADDW Y2, Y15, Y11 // c4620501da + VPHADDW Y11, Y15, Y11 // c4420501db + VPHMINPOSUW (BX), X2 // c4e2794113 + VPHMINPOSUW (R11), X2 // c4c2794113 + VPHMINPOSUW X2, X2 // c4e27941d2 + VPHMINPOSUW X11, X2 // c4c27941d3 + VPHMINPOSUW (BX), X11 // c46279411b + VPHMINPOSUW (R11), X11 // c44279411b + VPHMINPOSUW X2, X11 // c4627941da + VPHMINPOSUW X11, X11 // c4427941db + VPHSUBD (BX), X9, X2 // c4e2310613 + VPHSUBD (R11), X9, X2 // c4c2310613 + VPHSUBD X2, X9, X2 // c4e23106d2 + VPHSUBD X11, X9, X2 // c4c23106d3 + VPHSUBD (BX), X9, X11 // c46231061b + VPHSUBD (R11), X9, X11 // c44231061b + VPHSUBD X2, X9, X11 // c4623106da + VPHSUBD X11, X9, X11 // c4423106db + VPHSUBD (BX), Y15, Y2 // c4e2050613 + VPHSUBD (R11), Y15, Y2 // c4c2050613 + VPHSUBD Y2, Y15, Y2 // c4e20506d2 + VPHSUBD Y11, Y15, Y2 // c4c20506d3 + VPHSUBD (BX), Y15, Y11 // c46205061b + VPHSUBD (R11), Y15, Y11 // c44205061b + VPHSUBD Y2, Y15, Y11 // c4620506da + VPHSUBD Y11, Y15, Y11 // c4420506db + VPHSUBSW (BX), X9, X2 // c4e2310713 + VPHSUBSW (R11), X9, X2 // c4c2310713 + VPHSUBSW X2, X9, X2 // c4e23107d2 + VPHSUBSW X11, X9, X2 // c4c23107d3 + VPHSUBSW (BX), X9, X11 // c46231071b + VPHSUBSW (R11), X9, X11 // c44231071b + VPHSUBSW X2, X9, X11 // c4623107da + VPHSUBSW X11, X9, X11 // c4423107db + VPHSUBSW (BX), Y15, Y2 // c4e2050713 + VPHSUBSW (R11), Y15, Y2 // c4c2050713 + VPHSUBSW Y2, Y15, Y2 // c4e20507d2 + VPHSUBSW Y11, Y15, Y2 // c4c20507d3 + VPHSUBSW (BX), Y15, Y11 // c46205071b + VPHSUBSW (R11), Y15, Y11 // c44205071b + VPHSUBSW Y2, Y15, Y11 // c4620507da + VPHSUBSW Y11, Y15, Y11 // c4420507db + VPHSUBW (BX), X9, X2 // c4e2310513 + VPHSUBW (R11), X9, X2 // c4c2310513 + VPHSUBW X2, X9, X2 // c4e23105d2 + VPHSUBW X11, X9, X2 // c4c23105d3 + VPHSUBW (BX), X9, X11 // c46231051b + VPHSUBW (R11), X9, X11 // c44231051b + VPHSUBW X2, X9, X11 // c4623105da + VPHSUBW X11, X9, X11 // c4423105db + VPHSUBW (BX), Y15, Y2 // c4e2050513 + VPHSUBW (R11), Y15, Y2 // c4c2050513 + VPHSUBW Y2, Y15, Y2 // c4e20505d2 + VPHSUBW Y11, Y15, Y2 // c4c20505d3 + VPHSUBW (BX), Y15, Y11 // c46205051b + VPHSUBW (R11), Y15, Y11 // c44205051b + VPHSUBW Y2, Y15, Y11 // c4620505da + VPHSUBW Y11, Y15, Y11 // c4420505db + VPINSRB $7, (BX), X9, X2 // c4e331201307 + VPINSRB $7, (R11), X9, X2 // c4c331201307 + VPINSRB $7, DX, X9, X2 // c4e33120d207 + VPINSRB $7, R11, X9, X2 // c4c33120d307 + VPINSRB $7, (BX), X9, X11 // c46331201b07 + VPINSRB $7, (R11), X9, X11 // c44331201b07 + VPINSRB $7, DX, X9, X11 // c4633120da07 + VPINSRB $7, R11, X9, X11 // c4433120db07 + VPINSRD $7, (BX), X9, X2 // c4e331221307 + VPINSRD $7, (R11), X9, X2 // c4c331221307 + VPINSRD $7, DX, X9, X2 // c4e33122d207 + VPINSRD $7, R11, X9, X2 // c4c33122d307 + VPINSRD $7, (BX), X9, X11 // c46331221b07 + VPINSRD $7, (R11), X9, X11 // c44331221b07 + VPINSRD $7, DX, X9, X11 // c4633122da07 + VPINSRD $7, R11, X9, X11 // c4433122db07 + VPINSRQ $7, (BX), X9, X2 // c4e3b1221307 + VPINSRQ $7, (R11), X9, X2 // c4c3b1221307 + VPINSRQ $7, DX, X9, X2 // c4e3b122d207 + VPINSRQ $7, R11, X9, X2 // c4c3b122d307 + VPINSRQ $7, (BX), X9, X11 // c463b1221b07 + VPINSRQ $7, (R11), X9, X11 // c443b1221b07 + VPINSRQ $7, DX, X9, X11 // c463b122da07 + VPINSRQ $7, R11, X9, X11 // c443b122db07 + VPINSRW $7, (BX), X9, X2 // c4e131c41307 or c5b1c41307 + VPINSRW $7, (R11), X9, X2 // c4c131c41307 + VPINSRW $7, DX, X9, X2 // c4e131c4d207 or c5b1c4d207 + VPINSRW $7, R11, X9, X2 // c4c131c4d307 + VPINSRW $7, (BX), X9, X11 // c46131c41b07 or c531c41b07 + VPINSRW $7, (R11), X9, X11 // c44131c41b07 + VPINSRW $7, DX, X9, X11 // c46131c4da07 or c531c4da07 + VPINSRW $7, R11, X9, X11 // c44131c4db07 + VPMADDUBSW (BX), X9, X2 // c4e2310413 + VPMADDUBSW (R11), X9, X2 // c4c2310413 + VPMADDUBSW X2, X9, X2 // c4e23104d2 + VPMADDUBSW X11, X9, X2 // c4c23104d3 + VPMADDUBSW (BX), X9, X11 // c46231041b + VPMADDUBSW (R11), X9, X11 // c44231041b + VPMADDUBSW X2, X9, X11 // c4623104da + VPMADDUBSW X11, X9, X11 // c4423104db + VPMADDUBSW (BX), Y15, Y2 // c4e2050413 + VPMADDUBSW (R11), Y15, Y2 // c4c2050413 + VPMADDUBSW Y2, Y15, Y2 // c4e20504d2 + VPMADDUBSW Y11, Y15, Y2 // c4c20504d3 + VPMADDUBSW (BX), Y15, Y11 // c46205041b + VPMADDUBSW (R11), Y15, Y11 // c44205041b + VPMADDUBSW Y2, Y15, Y11 // c4620504da + VPMADDUBSW Y11, Y15, Y11 // c4420504db + VPMADDWD (BX), X9, X2 // c4e131f513 or c5b1f513 + VPMADDWD (R11), X9, X2 // c4c131f513 + VPMADDWD X2, X9, X2 // c4e131f5d2 or c5b1f5d2 + VPMADDWD X11, X9, X2 // c4c131f5d3 + VPMADDWD (BX), X9, X11 // c46131f51b or c531f51b + VPMADDWD (R11), X9, X11 // c44131f51b + VPMADDWD X2, X9, X11 // c46131f5da or c531f5da + VPMADDWD X11, X9, X11 // c44131f5db + VPMADDWD (BX), Y15, Y2 // c4e105f513 or c585f513 + VPMADDWD (R11), Y15, Y2 // c4c105f513 + VPMADDWD Y2, Y15, Y2 // c4e105f5d2 or c585f5d2 + VPMADDWD Y11, Y15, Y2 // c4c105f5d3 + VPMADDWD (BX), Y15, Y11 // c46105f51b or c505f51b + VPMADDWD (R11), Y15, Y11 // c44105f51b + VPMADDWD Y2, Y15, Y11 // c46105f5da or c505f5da + VPMADDWD Y11, Y15, Y11 // c44105f5db + VPMASKMOVD X2, X9, (BX) // c4e2318e13 + VPMASKMOVD X11, X9, (BX) // c462318e1b + VPMASKMOVD X2, X9, (R11) // c4c2318e13 + VPMASKMOVD X11, X9, (R11) // c442318e1b + VPMASKMOVD Y2, Y15, (BX) // c4e2058e13 + VPMASKMOVD Y11, Y15, (BX) // c462058e1b + VPMASKMOVD Y2, Y15, (R11) // c4c2058e13 + VPMASKMOVD Y11, Y15, (R11) // c442058e1b + VPMASKMOVD (BX), X9, X2 // c4e2318c13 + VPMASKMOVD (R11), X9, X2 // c4c2318c13 + VPMASKMOVD (BX), X9, X11 // c462318c1b + VPMASKMOVD (R11), X9, X11 // c442318c1b + VPMASKMOVD (BX), Y15, Y2 // c4e2058c13 + VPMASKMOVD (R11), Y15, Y2 // c4c2058c13 + VPMASKMOVD (BX), Y15, Y11 // c462058c1b + VPMASKMOVD (R11), Y15, Y11 // c442058c1b + VPMASKMOVQ X2, X9, (BX) // c4e2b18e13 + VPMASKMOVQ X11, X9, (BX) // c462b18e1b + VPMASKMOVQ X2, X9, (R11) // c4c2b18e13 + VPMASKMOVQ X11, X9, (R11) // c442b18e1b + VPMASKMOVQ Y2, Y15, (BX) // c4e2858e13 + VPMASKMOVQ Y11, Y15, (BX) // c462858e1b + VPMASKMOVQ Y2, Y15, (R11) // c4c2858e13 + VPMASKMOVQ Y11, Y15, (R11) // c442858e1b + VPMASKMOVQ (BX), X9, X2 // c4e2b18c13 + VPMASKMOVQ (R11), X9, X2 // c4c2b18c13 + VPMASKMOVQ (BX), X9, X11 // c462b18c1b + VPMASKMOVQ (R11), X9, X11 // c442b18c1b + VPMASKMOVQ (BX), Y15, Y2 // c4e2858c13 + VPMASKMOVQ (R11), Y15, Y2 // c4c2858c13 + VPMASKMOVQ (BX), Y15, Y11 // c462858c1b + VPMASKMOVQ (R11), Y15, Y11 // c442858c1b + VPMAXSB (BX), X9, X2 // c4e2313c13 + VPMAXSB (R11), X9, X2 // c4c2313c13 + VPMAXSB X2, X9, X2 // c4e2313cd2 + VPMAXSB X11, X9, X2 // c4c2313cd3 + VPMAXSB (BX), X9, X11 // c462313c1b + VPMAXSB (R11), X9, X11 // c442313c1b + VPMAXSB X2, X9, X11 // c462313cda + VPMAXSB X11, X9, X11 // c442313cdb + VPMAXSB (BX), Y15, Y2 // c4e2053c13 + VPMAXSB (R11), Y15, Y2 // c4c2053c13 + VPMAXSB Y2, Y15, Y2 // c4e2053cd2 + VPMAXSB Y11, Y15, Y2 // c4c2053cd3 + VPMAXSB (BX), Y15, Y11 // c462053c1b + VPMAXSB (R11), Y15, Y11 // c442053c1b + VPMAXSB Y2, Y15, Y11 // c462053cda + VPMAXSB Y11, Y15, Y11 // c442053cdb + VPMAXSD (BX), X9, X2 // c4e2313d13 + VPMAXSD (R11), X9, X2 // c4c2313d13 + VPMAXSD X2, X9, X2 // c4e2313dd2 + VPMAXSD X11, X9, X2 // c4c2313dd3 + VPMAXSD (BX), X9, X11 // c462313d1b + VPMAXSD (R11), X9, X11 // c442313d1b + VPMAXSD X2, X9, X11 // c462313dda + VPMAXSD X11, X9, X11 // c442313ddb + VPMAXSD (BX), Y15, Y2 // c4e2053d13 + VPMAXSD (R11), Y15, Y2 // c4c2053d13 + VPMAXSD Y2, Y15, Y2 // c4e2053dd2 + VPMAXSD Y11, Y15, Y2 // c4c2053dd3 + VPMAXSD (BX), Y15, Y11 // c462053d1b + VPMAXSD (R11), Y15, Y11 // c442053d1b + VPMAXSD Y2, Y15, Y11 // c462053dda + VPMAXSD Y11, Y15, Y11 // c442053ddb + VPMAXSW (BX), X9, X2 // c4e131ee13 or c5b1ee13 + VPMAXSW (R11), X9, X2 // c4c131ee13 + VPMAXSW X2, X9, X2 // c4e131eed2 or c5b1eed2 + VPMAXSW X11, X9, X2 // c4c131eed3 + VPMAXSW (BX), X9, X11 // c46131ee1b or c531ee1b + VPMAXSW (R11), X9, X11 // c44131ee1b + VPMAXSW X2, X9, X11 // c46131eeda or c531eeda + VPMAXSW X11, X9, X11 // c44131eedb + VPMAXSW (BX), Y15, Y2 // c4e105ee13 or c585ee13 + VPMAXSW (R11), Y15, Y2 // c4c105ee13 + VPMAXSW Y2, Y15, Y2 // c4e105eed2 or c585eed2 + VPMAXSW Y11, Y15, Y2 // c4c105eed3 + VPMAXSW (BX), Y15, Y11 // c46105ee1b or c505ee1b + VPMAXSW (R11), Y15, Y11 // c44105ee1b + VPMAXSW Y2, Y15, Y11 // c46105eeda or c505eeda + VPMAXSW Y11, Y15, Y11 // c44105eedb + VPMAXUB (BX), X9, X2 // c4e131de13 or c5b1de13 + VPMAXUB (R11), X9, X2 // c4c131de13 + VPMAXUB X2, X9, X2 // c4e131ded2 or c5b1ded2 + VPMAXUB X11, X9, X2 // c4c131ded3 + VPMAXUB (BX), X9, X11 // c46131de1b or c531de1b + VPMAXUB (R11), X9, X11 // c44131de1b + VPMAXUB X2, X9, X11 // c46131deda or c531deda + VPMAXUB X11, X9, X11 // c44131dedb + VPMAXUB (BX), Y15, Y2 // c4e105de13 or c585de13 + VPMAXUB (R11), Y15, Y2 // c4c105de13 + VPMAXUB Y2, Y15, Y2 // c4e105ded2 or c585ded2 + VPMAXUB Y11, Y15, Y2 // c4c105ded3 + VPMAXUB (BX), Y15, Y11 // c46105de1b or c505de1b + VPMAXUB (R11), Y15, Y11 // c44105de1b + VPMAXUB Y2, Y15, Y11 // c46105deda or c505deda + VPMAXUB Y11, Y15, Y11 // c44105dedb + VPMAXUD (BX), X9, X2 // c4e2313f13 + VPMAXUD (R11), X9, X2 // c4c2313f13 + VPMAXUD X2, X9, X2 // c4e2313fd2 + VPMAXUD X11, X9, X2 // c4c2313fd3 + VPMAXUD (BX), X9, X11 // c462313f1b + VPMAXUD (R11), X9, X11 // c442313f1b + VPMAXUD X2, X9, X11 // c462313fda + VPMAXUD X11, X9, X11 // c442313fdb + VPMAXUD (BX), Y15, Y2 // c4e2053f13 + VPMAXUD (R11), Y15, Y2 // c4c2053f13 + VPMAXUD Y2, Y15, Y2 // c4e2053fd2 + VPMAXUD Y11, Y15, Y2 // c4c2053fd3 + VPMAXUD (BX), Y15, Y11 // c462053f1b + VPMAXUD (R11), Y15, Y11 // c442053f1b + VPMAXUD Y2, Y15, Y11 // c462053fda + VPMAXUD Y11, Y15, Y11 // c442053fdb + VPMAXUW (BX), X9, X2 // c4e2313e13 + VPMAXUW (R11), X9, X2 // c4c2313e13 + VPMAXUW X2, X9, X2 // c4e2313ed2 + VPMAXUW X11, X9, X2 // c4c2313ed3 + VPMAXUW (BX), X9, X11 // c462313e1b + VPMAXUW (R11), X9, X11 // c442313e1b + VPMAXUW X2, X9, X11 // c462313eda + VPMAXUW X11, X9, X11 // c442313edb + VPMAXUW (BX), Y15, Y2 // c4e2053e13 + VPMAXUW (R11), Y15, Y2 // c4c2053e13 + VPMAXUW Y2, Y15, Y2 // c4e2053ed2 + VPMAXUW Y11, Y15, Y2 // c4c2053ed3 + VPMAXUW (BX), Y15, Y11 // c462053e1b + VPMAXUW (R11), Y15, Y11 // c442053e1b + VPMAXUW Y2, Y15, Y11 // c462053eda + VPMAXUW Y11, Y15, Y11 // c442053edb + VPMINSB (BX), X9, X2 // c4e2313813 + VPMINSB (R11), X9, X2 // c4c2313813 + VPMINSB X2, X9, X2 // c4e23138d2 + VPMINSB X11, X9, X2 // c4c23138d3 + VPMINSB (BX), X9, X11 // c46231381b + VPMINSB (R11), X9, X11 // c44231381b + VPMINSB X2, X9, X11 // c4623138da + VPMINSB X11, X9, X11 // c4423138db + VPMINSB (BX), Y15, Y2 // c4e2053813 + VPMINSB (R11), Y15, Y2 // c4c2053813 + VPMINSB Y2, Y15, Y2 // c4e20538d2 + VPMINSB Y11, Y15, Y2 // c4c20538d3 + VPMINSB (BX), Y15, Y11 // c46205381b + VPMINSB (R11), Y15, Y11 // c44205381b + VPMINSB Y2, Y15, Y11 // c4620538da + VPMINSB Y11, Y15, Y11 // c4420538db + VPMINSD (BX), X9, X2 // c4e2313913 + VPMINSD (R11), X9, X2 // c4c2313913 + VPMINSD X2, X9, X2 // c4e23139d2 + VPMINSD X11, X9, X2 // c4c23139d3 + VPMINSD (BX), X9, X11 // c46231391b + VPMINSD (R11), X9, X11 // c44231391b + VPMINSD X2, X9, X11 // c4623139da + VPMINSD X11, X9, X11 // c4423139db + VPMINSD (BX), Y15, Y2 // c4e2053913 + VPMINSD (R11), Y15, Y2 // c4c2053913 + VPMINSD Y2, Y15, Y2 // c4e20539d2 + VPMINSD Y11, Y15, Y2 // c4c20539d3 + VPMINSD (BX), Y15, Y11 // c46205391b + VPMINSD (R11), Y15, Y11 // c44205391b + VPMINSD Y2, Y15, Y11 // c4620539da + VPMINSD Y11, Y15, Y11 // c4420539db + VPMINSW (BX), X9, X2 // c4e131ea13 or c5b1ea13 + VPMINSW (R11), X9, X2 // c4c131ea13 + VPMINSW X2, X9, X2 // c4e131ead2 or c5b1ead2 + VPMINSW X11, X9, X2 // c4c131ead3 + VPMINSW (BX), X9, X11 // c46131ea1b or c531ea1b + VPMINSW (R11), X9, X11 // c44131ea1b + VPMINSW X2, X9, X11 // c46131eada or c531eada + VPMINSW X11, X9, X11 // c44131eadb + VPMINSW (BX), Y15, Y2 // c4e105ea13 or c585ea13 + VPMINSW (R11), Y15, Y2 // c4c105ea13 + VPMINSW Y2, Y15, Y2 // c4e105ead2 or c585ead2 + VPMINSW Y11, Y15, Y2 // c4c105ead3 + VPMINSW (BX), Y15, Y11 // c46105ea1b or c505ea1b + VPMINSW (R11), Y15, Y11 // c44105ea1b + VPMINSW Y2, Y15, Y11 // c46105eada or c505eada + VPMINSW Y11, Y15, Y11 // c44105eadb + VPMINUB (BX), X9, X2 // c4e131da13 or c5b1da13 + VPMINUB (R11), X9, X2 // c4c131da13 + VPMINUB X2, X9, X2 // c4e131dad2 or c5b1dad2 + VPMINUB X11, X9, X2 // c4c131dad3 + VPMINUB (BX), X9, X11 // c46131da1b or c531da1b + VPMINUB (R11), X9, X11 // c44131da1b + VPMINUB X2, X9, X11 // c46131dada or c531dada + VPMINUB X11, X9, X11 // c44131dadb + VPMINUB (BX), Y15, Y2 // c4e105da13 or c585da13 + VPMINUB (R11), Y15, Y2 // c4c105da13 + VPMINUB Y2, Y15, Y2 // c4e105dad2 or c585dad2 + VPMINUB Y11, Y15, Y2 // c4c105dad3 + VPMINUB (BX), Y15, Y11 // c46105da1b or c505da1b + VPMINUB (R11), Y15, Y11 // c44105da1b + VPMINUB Y2, Y15, Y11 // c46105dada or c505dada + VPMINUB Y11, Y15, Y11 // c44105dadb + VPMINUD (BX), X9, X2 // c4e2313b13 + VPMINUD (R11), X9, X2 // c4c2313b13 + VPMINUD X2, X9, X2 // c4e2313bd2 + VPMINUD X11, X9, X2 // c4c2313bd3 + VPMINUD (BX), X9, X11 // c462313b1b + VPMINUD (R11), X9, X11 // c442313b1b + VPMINUD X2, X9, X11 // c462313bda + VPMINUD X11, X9, X11 // c442313bdb + VPMINUD (BX), Y15, Y2 // c4e2053b13 + VPMINUD (R11), Y15, Y2 // c4c2053b13 + VPMINUD Y2, Y15, Y2 // c4e2053bd2 + VPMINUD Y11, Y15, Y2 // c4c2053bd3 + VPMINUD (BX), Y15, Y11 // c462053b1b + VPMINUD (R11), Y15, Y11 // c442053b1b + VPMINUD Y2, Y15, Y11 // c462053bda + VPMINUD Y11, Y15, Y11 // c442053bdb + VPMINUW (BX), X9, X2 // c4e2313a13 + VPMINUW (R11), X9, X2 // c4c2313a13 + VPMINUW X2, X9, X2 // c4e2313ad2 + VPMINUW X11, X9, X2 // c4c2313ad3 + VPMINUW (BX), X9, X11 // c462313a1b + VPMINUW (R11), X9, X11 // c442313a1b + VPMINUW X2, X9, X11 // c462313ada + VPMINUW X11, X9, X11 // c442313adb + VPMINUW (BX), Y15, Y2 // c4e2053a13 + VPMINUW (R11), Y15, Y2 // c4c2053a13 + VPMINUW Y2, Y15, Y2 // c4e2053ad2 + VPMINUW Y11, Y15, Y2 // c4c2053ad3 + VPMINUW (BX), Y15, Y11 // c462053a1b + VPMINUW (R11), Y15, Y11 // c442053a1b + VPMINUW Y2, Y15, Y11 // c462053ada + VPMINUW Y11, Y15, Y11 // c442053adb + VPMOVMSKB X2, DX // c4e179d7d2 or c5f9d7d2 + VPMOVMSKB X11, DX // c4c179d7d3 + VPMOVMSKB X2, R11 // c46179d7da or c579d7da + VPMOVMSKB X11, R11 // c44179d7db + VPMOVMSKB Y2, DX // c4e17dd7d2 or c5fdd7d2 + VPMOVMSKB Y11, DX // c4c17dd7d3 + VPMOVMSKB Y2, R11 // c4617dd7da or c57dd7da + VPMOVMSKB Y11, R11 // c4417dd7db + VPMOVSXBD (BX), X2 // c4e2792113 + VPMOVSXBD (R11), X2 // c4c2792113 + VPMOVSXBD X2, X2 // c4e27921d2 + VPMOVSXBD X11, X2 // c4c27921d3 + VPMOVSXBD (BX), X11 // c46279211b + VPMOVSXBD (R11), X11 // c44279211b + VPMOVSXBD X2, X11 // c4627921da + VPMOVSXBD X11, X11 // c4427921db + VPMOVSXBD (BX), Y2 // c4e27d2113 + VPMOVSXBD (R11), Y2 // c4c27d2113 + VPMOVSXBD X2, Y2 // c4e27d21d2 + VPMOVSXBD X11, Y2 // c4c27d21d3 + VPMOVSXBD (BX), Y11 // c4627d211b + VPMOVSXBD (R11), Y11 // c4427d211b + VPMOVSXBD X2, Y11 // c4627d21da + VPMOVSXBD X11, Y11 // c4427d21db + VPMOVSXBQ (BX), X2 // c4e2792213 + VPMOVSXBQ (R11), X2 // c4c2792213 + VPMOVSXBQ X2, X2 // c4e27922d2 + VPMOVSXBQ X11, X2 // c4c27922d3 + VPMOVSXBQ (BX), X11 // c46279221b + VPMOVSXBQ (R11), X11 // c44279221b + VPMOVSXBQ X2, X11 // c4627922da + VPMOVSXBQ X11, X11 // c4427922db + VPMOVSXBQ (BX), Y2 // c4e27d2213 + VPMOVSXBQ (R11), Y2 // c4c27d2213 + VPMOVSXBQ X2, Y2 // c4e27d22d2 + VPMOVSXBQ X11, Y2 // c4c27d22d3 + VPMOVSXBQ (BX), Y11 // c4627d221b + VPMOVSXBQ (R11), Y11 // c4427d221b + VPMOVSXBQ X2, Y11 // c4627d22da + VPMOVSXBQ X11, Y11 // c4427d22db + VPMOVSXBW (BX), X2 // c4e2792013 + VPMOVSXBW (R11), X2 // c4c2792013 + VPMOVSXBW X2, X2 // c4e27920d2 + VPMOVSXBW X11, X2 // c4c27920d3 + VPMOVSXBW (BX), X11 // c46279201b + VPMOVSXBW (R11), X11 // c44279201b + VPMOVSXBW X2, X11 // c4627920da + VPMOVSXBW X11, X11 // c4427920db + VPMOVSXBW (BX), Y2 // c4e27d2013 + VPMOVSXBW (R11), Y2 // c4c27d2013 + VPMOVSXBW X2, Y2 // c4e27d20d2 + VPMOVSXBW X11, Y2 // c4c27d20d3 + VPMOVSXBW (BX), Y11 // c4627d201b + VPMOVSXBW (R11), Y11 // c4427d201b + VPMOVSXBW X2, Y11 // c4627d20da + VPMOVSXBW X11, Y11 // c4427d20db + VPMOVSXDQ (BX), X2 // c4e2792513 + VPMOVSXDQ (R11), X2 // c4c2792513 + VPMOVSXDQ X2, X2 // c4e27925d2 + VPMOVSXDQ X11, X2 // c4c27925d3 + VPMOVSXDQ (BX), X11 // c46279251b + VPMOVSXDQ (R11), X11 // c44279251b + VPMOVSXDQ X2, X11 // c4627925da + VPMOVSXDQ X11, X11 // c4427925db + VPMOVSXDQ (BX), Y2 // c4e27d2513 + VPMOVSXDQ (R11), Y2 // c4c27d2513 + VPMOVSXDQ X2, Y2 // c4e27d25d2 + VPMOVSXDQ X11, Y2 // c4c27d25d3 + VPMOVSXDQ (BX), Y11 // c4627d251b + VPMOVSXDQ (R11), Y11 // c4427d251b + VPMOVSXDQ X2, Y11 // c4627d25da + VPMOVSXDQ X11, Y11 // c4427d25db + VPMOVSXWD (BX), X2 // c4e2792313 + VPMOVSXWD (R11), X2 // c4c2792313 + VPMOVSXWD X2, X2 // c4e27923d2 + VPMOVSXWD X11, X2 // c4c27923d3 + VPMOVSXWD (BX), X11 // c46279231b + VPMOVSXWD (R11), X11 // c44279231b + VPMOVSXWD X2, X11 // c4627923da + VPMOVSXWD X11, X11 // c4427923db + VPMOVSXWD (BX), Y2 // c4e27d2313 + VPMOVSXWD (R11), Y2 // c4c27d2313 + VPMOVSXWD X2, Y2 // c4e27d23d2 + VPMOVSXWD X11, Y2 // c4c27d23d3 + VPMOVSXWD (BX), Y11 // c4627d231b + VPMOVSXWD (R11), Y11 // c4427d231b + VPMOVSXWD X2, Y11 // c4627d23da + VPMOVSXWD X11, Y11 // c4427d23db + VPMOVSXWQ (BX), X2 // c4e2792413 + VPMOVSXWQ (R11), X2 // c4c2792413 + VPMOVSXWQ X2, X2 // c4e27924d2 + VPMOVSXWQ X11, X2 // c4c27924d3 + VPMOVSXWQ (BX), X11 // c46279241b + VPMOVSXWQ (R11), X11 // c44279241b + VPMOVSXWQ X2, X11 // c4627924da + VPMOVSXWQ X11, X11 // c4427924db + VPMOVSXWQ (BX), Y2 // c4e27d2413 + VPMOVSXWQ (R11), Y2 // c4c27d2413 + VPMOVSXWQ X2, Y2 // c4e27d24d2 + VPMOVSXWQ X11, Y2 // c4c27d24d3 + VPMOVSXWQ (BX), Y11 // c4627d241b + VPMOVSXWQ (R11), Y11 // c4427d241b + VPMOVSXWQ X2, Y11 // c4627d24da + VPMOVSXWQ X11, Y11 // c4427d24db + VPMOVZXBD (BX), X2 // c4e2793113 + VPMOVZXBD (R11), X2 // c4c2793113 + VPMOVZXBD X2, X2 // c4e27931d2 + VPMOVZXBD X11, X2 // c4c27931d3 + VPMOVZXBD (BX), X11 // c46279311b + VPMOVZXBD (R11), X11 // c44279311b + VPMOVZXBD X2, X11 // c4627931da + VPMOVZXBD X11, X11 // c4427931db + VPMOVZXBD (BX), Y2 // c4e27d3113 + VPMOVZXBD (R11), Y2 // c4c27d3113 + VPMOVZXBD X2, Y2 // c4e27d31d2 + VPMOVZXBD X11, Y2 // c4c27d31d3 + VPMOVZXBD (BX), Y11 // c4627d311b + VPMOVZXBD (R11), Y11 // c4427d311b + VPMOVZXBD X2, Y11 // c4627d31da + VPMOVZXBD X11, Y11 // c4427d31db + VPMOVZXBQ (BX), X2 // c4e2793213 + VPMOVZXBQ (R11), X2 // c4c2793213 + VPMOVZXBQ X2, X2 // c4e27932d2 + VPMOVZXBQ X11, X2 // c4c27932d3 + VPMOVZXBQ (BX), X11 // c46279321b + VPMOVZXBQ (R11), X11 // c44279321b + VPMOVZXBQ X2, X11 // c4627932da + VPMOVZXBQ X11, X11 // c4427932db + VPMOVZXBQ (BX), Y2 // c4e27d3213 + VPMOVZXBQ (R11), Y2 // c4c27d3213 + VPMOVZXBQ X2, Y2 // c4e27d32d2 + VPMOVZXBQ X11, Y2 // c4c27d32d3 + VPMOVZXBQ (BX), Y11 // c4627d321b + VPMOVZXBQ (R11), Y11 // c4427d321b + VPMOVZXBQ X2, Y11 // c4627d32da + VPMOVZXBQ X11, Y11 // c4427d32db + VPMOVZXBW (BX), X2 // c4e2793013 + VPMOVZXBW (R11), X2 // c4c2793013 + VPMOVZXBW X2, X2 // c4e27930d2 + VPMOVZXBW X11, X2 // c4c27930d3 + VPMOVZXBW (BX), X11 // c46279301b + VPMOVZXBW (R11), X11 // c44279301b + VPMOVZXBW X2, X11 // c4627930da + VPMOVZXBW X11, X11 // c4427930db + VPMOVZXBW (BX), Y2 // c4e27d3013 + VPMOVZXBW (R11), Y2 // c4c27d3013 + VPMOVZXBW X2, Y2 // c4e27d30d2 + VPMOVZXBW X11, Y2 // c4c27d30d3 + VPMOVZXBW (BX), Y11 // c4627d301b + VPMOVZXBW (R11), Y11 // c4427d301b + VPMOVZXBW X2, Y11 // c4627d30da + VPMOVZXBW X11, Y11 // c4427d30db + VPMOVZXDQ (BX), X2 // c4e2793513 + VPMOVZXDQ (R11), X2 // c4c2793513 + VPMOVZXDQ X2, X2 // c4e27935d2 + VPMOVZXDQ X11, X2 // c4c27935d3 + VPMOVZXDQ (BX), X11 // c46279351b + VPMOVZXDQ (R11), X11 // c44279351b + VPMOVZXDQ X2, X11 // c4627935da + VPMOVZXDQ X11, X11 // c4427935db + VPMOVZXDQ (BX), Y2 // c4e27d3513 + VPMOVZXDQ (R11), Y2 // c4c27d3513 + VPMOVZXDQ X2, Y2 // c4e27d35d2 + VPMOVZXDQ X11, Y2 // c4c27d35d3 + VPMOVZXDQ (BX), Y11 // c4627d351b + VPMOVZXDQ (R11), Y11 // c4427d351b + VPMOVZXDQ X2, Y11 // c4627d35da + VPMOVZXDQ X11, Y11 // c4427d35db + VPMOVZXWD (BX), X2 // c4e2793313 + VPMOVZXWD (R11), X2 // c4c2793313 + VPMOVZXWD X2, X2 // c4e27933d2 + VPMOVZXWD X11, X2 // c4c27933d3 + VPMOVZXWD (BX), X11 // c46279331b + VPMOVZXWD (R11), X11 // c44279331b + VPMOVZXWD X2, X11 // c4627933da + VPMOVZXWD X11, X11 // c4427933db + VPMOVZXWD (BX), Y2 // c4e27d3313 + VPMOVZXWD (R11), Y2 // c4c27d3313 + VPMOVZXWD X2, Y2 // c4e27d33d2 + VPMOVZXWD X11, Y2 // c4c27d33d3 + VPMOVZXWD (BX), Y11 // c4627d331b + VPMOVZXWD (R11), Y11 // c4427d331b + VPMOVZXWD X2, Y11 // c4627d33da + VPMOVZXWD X11, Y11 // c4427d33db + VPMOVZXWQ (BX), X2 // c4e2793413 + VPMOVZXWQ (R11), X2 // c4c2793413 + VPMOVZXWQ X2, X2 // c4e27934d2 + VPMOVZXWQ X11, X2 // c4c27934d3 + VPMOVZXWQ (BX), X11 // c46279341b + VPMOVZXWQ (R11), X11 // c44279341b + VPMOVZXWQ X2, X11 // c4627934da + VPMOVZXWQ X11, X11 // c4427934db + VPMOVZXWQ (BX), Y2 // c4e27d3413 + VPMOVZXWQ (R11), Y2 // c4c27d3413 + VPMOVZXWQ X2, Y2 // c4e27d34d2 + VPMOVZXWQ X11, Y2 // c4c27d34d3 + VPMOVZXWQ (BX), Y11 // c4627d341b + VPMOVZXWQ (R11), Y11 // c4427d341b + VPMOVZXWQ X2, Y11 // c4627d34da + VPMOVZXWQ X11, Y11 // c4427d34db + VPMULDQ (BX), X9, X2 // c4e2312813 + VPMULDQ (R11), X9, X2 // c4c2312813 + VPMULDQ X2, X9, X2 // c4e23128d2 + VPMULDQ X11, X9, X2 // c4c23128d3 + VPMULDQ (BX), X9, X11 // c46231281b + VPMULDQ (R11), X9, X11 // c44231281b + VPMULDQ X2, X9, X11 // c4623128da + VPMULDQ X11, X9, X11 // c4423128db + VPMULDQ (BX), Y15, Y2 // c4e2052813 + VPMULDQ (R11), Y15, Y2 // c4c2052813 + VPMULDQ Y2, Y15, Y2 // c4e20528d2 + VPMULDQ Y11, Y15, Y2 // c4c20528d3 + VPMULDQ (BX), Y15, Y11 // c46205281b + VPMULDQ (R11), Y15, Y11 // c44205281b + VPMULDQ Y2, Y15, Y11 // c4620528da + VPMULDQ Y11, Y15, Y11 // c4420528db + VPMULHRSW (BX), X9, X2 // c4e2310b13 + VPMULHRSW (R11), X9, X2 // c4c2310b13 + VPMULHRSW X2, X9, X2 // c4e2310bd2 + VPMULHRSW X11, X9, X2 // c4c2310bd3 + VPMULHRSW (BX), X9, X11 // c462310b1b + VPMULHRSW (R11), X9, X11 // c442310b1b + VPMULHRSW X2, X9, X11 // c462310bda + VPMULHRSW X11, X9, X11 // c442310bdb + VPMULHRSW (BX), Y15, Y2 // c4e2050b13 + VPMULHRSW (R11), Y15, Y2 // c4c2050b13 + VPMULHRSW Y2, Y15, Y2 // c4e2050bd2 + VPMULHRSW Y11, Y15, Y2 // c4c2050bd3 + VPMULHRSW (BX), Y15, Y11 // c462050b1b + VPMULHRSW (R11), Y15, Y11 // c442050b1b + VPMULHRSW Y2, Y15, Y11 // c462050bda + VPMULHRSW Y11, Y15, Y11 // c442050bdb + VPMULHUW (BX), X9, X2 // c4e131e413 or c5b1e413 + VPMULHUW (R11), X9, X2 // c4c131e413 + VPMULHUW X2, X9, X2 // c4e131e4d2 or c5b1e4d2 + VPMULHUW X11, X9, X2 // c4c131e4d3 + VPMULHUW (BX), X9, X11 // c46131e41b or c531e41b + VPMULHUW (R11), X9, X11 // c44131e41b + VPMULHUW X2, X9, X11 // c46131e4da or c531e4da + VPMULHUW X11, X9, X11 // c44131e4db + VPMULHUW (BX), Y15, Y2 // c4e105e413 or c585e413 + VPMULHUW (R11), Y15, Y2 // c4c105e413 + VPMULHUW Y2, Y15, Y2 // c4e105e4d2 or c585e4d2 + VPMULHUW Y11, Y15, Y2 // c4c105e4d3 + VPMULHUW (BX), Y15, Y11 // c46105e41b or c505e41b + VPMULHUW (R11), Y15, Y11 // c44105e41b + VPMULHUW Y2, Y15, Y11 // c46105e4da or c505e4da + VPMULHUW Y11, Y15, Y11 // c44105e4db + VPMULHW (BX), X9, X2 // c4e131e513 or c5b1e513 + VPMULHW (R11), X9, X2 // c4c131e513 + VPMULHW X2, X9, X2 // c4e131e5d2 or c5b1e5d2 + VPMULHW X11, X9, X2 // c4c131e5d3 + VPMULHW (BX), X9, X11 // c46131e51b or c531e51b + VPMULHW (R11), X9, X11 // c44131e51b + VPMULHW X2, X9, X11 // c46131e5da or c531e5da + VPMULHW X11, X9, X11 // c44131e5db + VPMULHW (BX), Y15, Y2 // c4e105e513 or c585e513 + VPMULHW (R11), Y15, Y2 // c4c105e513 + VPMULHW Y2, Y15, Y2 // c4e105e5d2 or c585e5d2 + VPMULHW Y11, Y15, Y2 // c4c105e5d3 + VPMULHW (BX), Y15, Y11 // c46105e51b or c505e51b + VPMULHW (R11), Y15, Y11 // c44105e51b + VPMULHW Y2, Y15, Y11 // c46105e5da or c505e5da + VPMULHW Y11, Y15, Y11 // c44105e5db + VPMULLD (BX), X9, X2 // c4e2314013 + VPMULLD (R11), X9, X2 // c4c2314013 + VPMULLD X2, X9, X2 // c4e23140d2 + VPMULLD X11, X9, X2 // c4c23140d3 + VPMULLD (BX), X9, X11 // c46231401b + VPMULLD (R11), X9, X11 // c44231401b + VPMULLD X2, X9, X11 // c4623140da + VPMULLD X11, X9, X11 // c4423140db + VPMULLD (BX), Y15, Y2 // c4e2054013 + VPMULLD (R11), Y15, Y2 // c4c2054013 + VPMULLD Y2, Y15, Y2 // c4e20540d2 + VPMULLD Y11, Y15, Y2 // c4c20540d3 + VPMULLD (BX), Y15, Y11 // c46205401b + VPMULLD (R11), Y15, Y11 // c44205401b + VPMULLD Y2, Y15, Y11 // c4620540da + VPMULLD Y11, Y15, Y11 // c4420540db + VPMULLW (BX), X9, X2 // c4e131d513 or c5b1d513 + VPMULLW (R11), X9, X2 // c4c131d513 + VPMULLW X2, X9, X2 // c4e131d5d2 or c5b1d5d2 + VPMULLW X11, X9, X2 // c4c131d5d3 + VPMULLW (BX), X9, X11 // c46131d51b or c531d51b + VPMULLW (R11), X9, X11 // c44131d51b + VPMULLW X2, X9, X11 // c46131d5da or c531d5da + VPMULLW X11, X9, X11 // c44131d5db + VPMULLW (BX), Y15, Y2 // c4e105d513 or c585d513 + VPMULLW (R11), Y15, Y2 // c4c105d513 + VPMULLW Y2, Y15, Y2 // c4e105d5d2 or c585d5d2 + VPMULLW Y11, Y15, Y2 // c4c105d5d3 + VPMULLW (BX), Y15, Y11 // c46105d51b or c505d51b + VPMULLW (R11), Y15, Y11 // c44105d51b + VPMULLW Y2, Y15, Y11 // c46105d5da or c505d5da + VPMULLW Y11, Y15, Y11 // c44105d5db + VPMULUDQ (BX), X9, X2 // c4e131f413 or c5b1f413 + VPMULUDQ (R11), X9, X2 // c4c131f413 + VPMULUDQ X2, X9, X2 // c4e131f4d2 or c5b1f4d2 + VPMULUDQ X11, X9, X2 // c4c131f4d3 + VPMULUDQ (BX), X9, X11 // c46131f41b or c531f41b + VPMULUDQ (R11), X9, X11 // c44131f41b + VPMULUDQ X2, X9, X11 // c46131f4da or c531f4da + VPMULUDQ X11, X9, X11 // c44131f4db + VPMULUDQ (BX), Y15, Y2 // c4e105f413 or c585f413 + VPMULUDQ (R11), Y15, Y2 // c4c105f413 + VPMULUDQ Y2, Y15, Y2 // c4e105f4d2 or c585f4d2 + VPMULUDQ Y11, Y15, Y2 // c4c105f4d3 + VPMULUDQ (BX), Y15, Y11 // c46105f41b or c505f41b + VPMULUDQ (R11), Y15, Y11 // c44105f41b + VPMULUDQ Y2, Y15, Y11 // c46105f4da or c505f4da + VPMULUDQ Y11, Y15, Y11 // c44105f4db + VPOR (BX), X9, X2 // c4e131eb13 or c5b1eb13 + VPOR (R11), X9, X2 // c4c131eb13 + VPOR X2, X9, X2 // c4e131ebd2 or c5b1ebd2 + VPOR X11, X9, X2 // c4c131ebd3 + VPOR (BX), X9, X11 // c46131eb1b or c531eb1b + VPOR (R11), X9, X11 // c44131eb1b + VPOR X2, X9, X11 // c46131ebda or c531ebda + VPOR X11, X9, X11 // c44131ebdb + VPOR (BX), Y15, Y2 // c4e105eb13 or c585eb13 + VPOR (R11), Y15, Y2 // c4c105eb13 + VPOR Y2, Y15, Y2 // c4e105ebd2 or c585ebd2 + VPOR Y11, Y15, Y2 // c4c105ebd3 + VPOR (BX), Y15, Y11 // c46105eb1b or c505eb1b + VPOR (R11), Y15, Y11 // c44105eb1b + VPOR Y2, Y15, Y11 // c46105ebda or c505ebda + VPOR Y11, Y15, Y11 // c44105ebdb + VPSADBW (BX), X9, X2 // c4e131f613 or c5b1f613 + VPSADBW (R11), X9, X2 // c4c131f613 + VPSADBW X2, X9, X2 // c4e131f6d2 or c5b1f6d2 + VPSADBW X11, X9, X2 // c4c131f6d3 + VPSADBW (BX), X9, X11 // c46131f61b or c531f61b + VPSADBW (R11), X9, X11 // c44131f61b + VPSADBW X2, X9, X11 // c46131f6da or c531f6da + VPSADBW X11, X9, X11 // c44131f6db + VPSADBW (BX), Y15, Y2 // c4e105f613 or c585f613 + VPSADBW (R11), Y15, Y2 // c4c105f613 + VPSADBW Y2, Y15, Y2 // c4e105f6d2 or c585f6d2 + VPSADBW Y11, Y15, Y2 // c4c105f6d3 + VPSADBW (BX), Y15, Y11 // c46105f61b or c505f61b + VPSADBW (R11), Y15, Y11 // c44105f61b + VPSADBW Y2, Y15, Y11 // c46105f6da or c505f6da + VPSADBW Y11, Y15, Y11 // c44105f6db + VPSHUFB (BX), X9, X2 // c4e2310013 + VPSHUFB (R11), X9, X2 // c4c2310013 + VPSHUFB X2, X9, X2 // c4e23100d2 + VPSHUFB X11, X9, X2 // c4c23100d3 + VPSHUFB (BX), X9, X11 // c46231001b + VPSHUFB (R11), X9, X11 // c44231001b + VPSHUFB X2, X9, X11 // c4623100da + VPSHUFB X11, X9, X11 // c4423100db + VPSHUFB (BX), Y15, Y2 // c4e2050013 + VPSHUFB (R11), Y15, Y2 // c4c2050013 + VPSHUFB Y2, Y15, Y2 // c4e20500d2 + VPSHUFB Y11, Y15, Y2 // c4c20500d3 + VPSHUFB (BX), Y15, Y11 // c46205001b + VPSHUFB (R11), Y15, Y11 // c44205001b + VPSHUFB Y2, Y15, Y11 // c4620500da + VPSHUFB Y11, Y15, Y11 // c4420500db + VPSHUFD $7, (BX), X2 // c4e179701307 or c5f9701307 + VPSHUFD $7, (R11), X2 // c4c179701307 + VPSHUFD $7, X2, X2 // c4e17970d207 or c5f970d207 + VPSHUFD $7, X11, X2 // c4c17970d307 + VPSHUFD $7, (BX), X11 // c46179701b07 or c579701b07 + VPSHUFD $7, (R11), X11 // c44179701b07 + VPSHUFD $7, X2, X11 // c4617970da07 or c57970da07 + VPSHUFD $7, X11, X11 // c4417970db07 + VPSHUFD $7, (BX), Y2 // c4e17d701307 or c5fd701307 + VPSHUFD $7, (R11), Y2 // c4c17d701307 + VPSHUFD $7, Y2, Y2 // c4e17d70d207 or c5fd70d207 + VPSHUFD $7, Y11, Y2 // c4c17d70d307 + VPSHUFD $7, (BX), Y11 // c4617d701b07 or c57d701b07 + VPSHUFD $7, (R11), Y11 // c4417d701b07 + VPSHUFD $7, Y2, Y11 // c4617d70da07 or c57d70da07 + VPSHUFD $7, Y11, Y11 // c4417d70db07 + VPSHUFHW $7, (BX), X2 // c4e17a701307 or c5fa701307 + VPSHUFHW $7, (R11), X2 // c4c17a701307 + VPSHUFHW $7, X2, X2 // c4e17a70d207 or c5fa70d207 + VPSHUFHW $7, X11, X2 // c4c17a70d307 + VPSHUFHW $7, (BX), X11 // c4617a701b07 or c57a701b07 + VPSHUFHW $7, (R11), X11 // c4417a701b07 + VPSHUFHW $7, X2, X11 // c4617a70da07 or c57a70da07 + VPSHUFHW $7, X11, X11 // c4417a70db07 + VPSHUFHW $7, (BX), Y2 // c4e17e701307 or c5fe701307 + VPSHUFHW $7, (R11), Y2 // c4c17e701307 + VPSHUFHW $7, Y2, Y2 // c4e17e70d207 or c5fe70d207 + VPSHUFHW $7, Y11, Y2 // c4c17e70d307 + VPSHUFHW $7, (BX), Y11 // c4617e701b07 or c57e701b07 + VPSHUFHW $7, (R11), Y11 // c4417e701b07 + VPSHUFHW $7, Y2, Y11 // c4617e70da07 or c57e70da07 + VPSHUFHW $7, Y11, Y11 // c4417e70db07 + VPSHUFLW $7, (BX), X2 // c4e17b701307 or c5fb701307 + VPSHUFLW $7, (R11), X2 // c4c17b701307 + VPSHUFLW $7, X2, X2 // c4e17b70d207 or c5fb70d207 + VPSHUFLW $7, X11, X2 // c4c17b70d307 + VPSHUFLW $7, (BX), X11 // c4617b701b07 or c57b701b07 + VPSHUFLW $7, (R11), X11 // c4417b701b07 + VPSHUFLW $7, X2, X11 // c4617b70da07 or c57b70da07 + VPSHUFLW $7, X11, X11 // c4417b70db07 + VPSHUFLW $7, (BX), Y2 // c4e17f701307 or c5ff701307 + VPSHUFLW $7, (R11), Y2 // c4c17f701307 + VPSHUFLW $7, Y2, Y2 // c4e17f70d207 or c5ff70d207 + VPSHUFLW $7, Y11, Y2 // c4c17f70d307 + VPSHUFLW $7, (BX), Y11 // c4617f701b07 or c57f701b07 + VPSHUFLW $7, (R11), Y11 // c4417f701b07 + VPSHUFLW $7, Y2, Y11 // c4617f70da07 or c57f70da07 + VPSHUFLW $7, Y11, Y11 // c4417f70db07 + VPSIGNB (BX), X9, X2 // c4e2310813 + VPSIGNB (R11), X9, X2 // c4c2310813 + VPSIGNB X2, X9, X2 // c4e23108d2 + VPSIGNB X11, X9, X2 // c4c23108d3 + VPSIGNB (BX), X9, X11 // c46231081b + VPSIGNB (R11), X9, X11 // c44231081b + VPSIGNB X2, X9, X11 // c4623108da + VPSIGNB X11, X9, X11 // c4423108db + VPSIGNB (BX), Y15, Y2 // c4e2050813 + VPSIGNB (R11), Y15, Y2 // c4c2050813 + VPSIGNB Y2, Y15, Y2 // c4e20508d2 + VPSIGNB Y11, Y15, Y2 // c4c20508d3 + VPSIGNB (BX), Y15, Y11 // c46205081b + VPSIGNB (R11), Y15, Y11 // c44205081b + VPSIGNB Y2, Y15, Y11 // c4620508da + VPSIGNB Y11, Y15, Y11 // c4420508db + VPSIGND (BX), X9, X2 // c4e2310a13 + VPSIGND (R11), X9, X2 // c4c2310a13 + VPSIGND X2, X9, X2 // c4e2310ad2 + VPSIGND X11, X9, X2 // c4c2310ad3 + VPSIGND (BX), X9, X11 // c462310a1b + VPSIGND (R11), X9, X11 // c442310a1b + VPSIGND X2, X9, X11 // c462310ada + VPSIGND X11, X9, X11 // c442310adb + VPSIGND (BX), Y15, Y2 // c4e2050a13 + VPSIGND (R11), Y15, Y2 // c4c2050a13 + VPSIGND Y2, Y15, Y2 // c4e2050ad2 + VPSIGND Y11, Y15, Y2 // c4c2050ad3 + VPSIGND (BX), Y15, Y11 // c462050a1b + VPSIGND (R11), Y15, Y11 // c442050a1b + VPSIGND Y2, Y15, Y11 // c462050ada + VPSIGND Y11, Y15, Y11 // c442050adb + VPSIGNW (BX), X9, X2 // c4e2310913 + VPSIGNW (R11), X9, X2 // c4c2310913 + VPSIGNW X2, X9, X2 // c4e23109d2 + VPSIGNW X11, X9, X2 // c4c23109d3 + VPSIGNW (BX), X9, X11 // c46231091b + VPSIGNW (R11), X9, X11 // c44231091b + VPSIGNW X2, X9, X11 // c4623109da + VPSIGNW X11, X9, X11 // c4423109db + VPSIGNW (BX), Y15, Y2 // c4e2050913 + VPSIGNW (R11), Y15, Y2 // c4c2050913 + VPSIGNW Y2, Y15, Y2 // c4e20509d2 + VPSIGNW Y11, Y15, Y2 // c4c20509d3 + VPSIGNW (BX), Y15, Y11 // c46205091b + VPSIGNW (R11), Y15, Y11 // c44205091b + VPSIGNW Y2, Y15, Y11 // c4620509da + VPSIGNW Y11, Y15, Y11 // c4420509db + VPSLLD (BX), X9, X2 // c4e131f213 or c5b1f213 + VPSLLD (R11), X9, X2 // c4c131f213 + VPSLLD X2, X9, X2 // c4e131f2d2 or c5b1f2d2 + VPSLLD X11, X9, X2 // c4c131f2d3 + VPSLLD (BX), X9, X11 // c46131f21b or c531f21b + VPSLLD (R11), X9, X11 // c44131f21b + VPSLLD X2, X9, X11 // c46131f2da or c531f2da + VPSLLD X11, X9, X11 // c44131f2db + VPSLLD $7, X2, X9 // c4e13172f207 or c5b172f207 + VPSLLD $7, X11, X9 // c4c13172f307 + VPSLLDQ $7, X2, X9 // c4e13173fa07 or c5b173fa07 + VPSLLDQ $7, X11, X9 // c4c13173fb07 + VPSLLDQ $7, Y2, Y15 // c4e10573fa07 or c58573fa07 + VPSLLDQ $7, Y11, Y15 // c4c10573fb07 + VPSLLQ (BX), X9, X2 // c4e131f313 or c5b1f313 + VPSLLQ (R11), X9, X2 // c4c131f313 + VPSLLQ X2, X9, X2 // c4e131f3d2 or c5b1f3d2 + VPSLLQ X11, X9, X2 // c4c131f3d3 + VPSLLQ (BX), X9, X11 // c46131f31b or c531f31b + VPSLLQ (R11), X9, X11 // c44131f31b + VPSLLQ X2, X9, X11 // c46131f3da or c531f3da + VPSLLQ X11, X9, X11 // c44131f3db + VPSLLQ $7, X2, X9 // c4e13173f207 or c5b173f207 + VPSLLQ $7, X11, X9 // c4c13173f307 + VPSLLVD (BX), X9, X2 // c4e2314713 + VPSLLVD (R11), X9, X2 // c4c2314713 + VPSLLVD X2, X9, X2 // c4e23147d2 + VPSLLVD X11, X9, X2 // c4c23147d3 + VPSLLVD (BX), X9, X11 // c46231471b + VPSLLVD (R11), X9, X11 // c44231471b + VPSLLVD X2, X9, X11 // c4623147da + VPSLLVD X11, X9, X11 // c4423147db + VPSLLVD (BX), Y15, Y2 // c4e2054713 + VPSLLVD (R11), Y15, Y2 // c4c2054713 + VPSLLVD Y2, Y15, Y2 // c4e20547d2 + VPSLLVD Y11, Y15, Y2 // c4c20547d3 + VPSLLVD (BX), Y15, Y11 // c46205471b + VPSLLVD (R11), Y15, Y11 // c44205471b + VPSLLVD Y2, Y15, Y11 // c4620547da + VPSLLVD Y11, Y15, Y11 // c4420547db + VPSLLVQ (BX), X9, X2 // c4e2b14713 + VPSLLVQ (R11), X9, X2 // c4c2b14713 + VPSLLVQ X2, X9, X2 // c4e2b147d2 + VPSLLVQ X11, X9, X2 // c4c2b147d3 + VPSLLVQ (BX), X9, X11 // c462b1471b + VPSLLVQ (R11), X9, X11 // c442b1471b + VPSLLVQ X2, X9, X11 // c462b147da + VPSLLVQ X11, X9, X11 // c442b147db + VPSLLVQ (BX), Y15, Y2 // c4e2854713 + VPSLLVQ (R11), Y15, Y2 // c4c2854713 + VPSLLVQ Y2, Y15, Y2 // c4e28547d2 + VPSLLVQ Y11, Y15, Y2 // c4c28547d3 + VPSLLVQ (BX), Y15, Y11 // c46285471b + VPSLLVQ (R11), Y15, Y11 // c44285471b + VPSLLVQ Y2, Y15, Y11 // c4628547da + VPSLLVQ Y11, Y15, Y11 // c4428547db + VPSLLW (BX), X9, X2 // c4e131f113 or c5b1f113 + VPSLLW (R11), X9, X2 // c4c131f113 + VPSLLW X2, X9, X2 // c4e131f1d2 or c5b1f1d2 + VPSLLW X11, X9, X2 // c4c131f1d3 + VPSLLW (BX), X9, X11 // c46131f11b or c531f11b + VPSLLW (R11), X9, X11 // c44131f11b + VPSLLW X2, X9, X11 // c46131f1da or c531f1da + VPSLLW X11, X9, X11 // c44131f1db + VPSLLW $7, X2, X9 // c4e13171f207 or c5b171f207 + VPSLLW $7, X11, X9 // c4c13171f307 + VPSLLW (BX), Y15, Y2 // c4e105f113 or c585f113 + VPSLLW (R11), Y15, Y2 // c4c105f113 + VPSLLW X2, Y15, Y2 // c4e105f1d2 or c585f1d2 + VPSLLW X11, Y15, Y2 // c4c105f1d3 + VPSLLW (BX), Y15, Y11 // c46105f11b or c505f11b + VPSLLW (R11), Y15, Y11 // c44105f11b + VPSLLW X2, Y15, Y11 // c46105f1da or c505f1da + VPSLLW X11, Y15, Y11 // c44105f1db + VPSLLW $7, Y2, Y15 // c4e10571f207 or c58571f207 + VPSLLW $7, Y11, Y15 // c4c10571f307 + VPSRAD (BX), X9, X2 // c4e131e213 or c5b1e213 + VPSRAD (R11), X9, X2 // c4c131e213 + VPSRAD X2, X9, X2 // c4e131e2d2 or c5b1e2d2 + VPSRAD X11, X9, X2 // c4c131e2d3 + VPSRAD (BX), X9, X11 // c46131e21b or c531e21b + VPSRAD (R11), X9, X11 // c44131e21b + VPSRAD X2, X9, X11 // c46131e2da or c531e2da + VPSRAD X11, X9, X11 // c44131e2db + VPSRAD $7, X2, X9 // c4e13172e207 or c5b172e207 + VPSRAD $7, X11, X9 // c4c13172e307 + VPSRAD (BX), Y15, Y2 // c4e105e213 or c585e213 + VPSRAD (R11), Y15, Y2 // c4c105e213 + VPSRAD X2, Y15, Y2 // c4e105e2d2 or c585e2d2 + VPSRAD X11, Y15, Y2 // c4c105e2d3 + VPSRAD (BX), Y15, Y11 // c46105e21b or c505e21b + VPSRAD (R11), Y15, Y11 // c44105e21b + VPSRAD X2, Y15, Y11 // c46105e2da or c505e2da + VPSRAD X11, Y15, Y11 // c44105e2db + VPSRAD $7, Y2, Y15 // c4e10572e207 or c58572e207 + VPSRAD $7, Y11, Y15 // c4c10572e307 + VPSRAVD (BX), X9, X2 // c4e2314613 + VPSRAVD (R11), X9, X2 // c4c2314613 + VPSRAVD X2, X9, X2 // c4e23146d2 + VPSRAVD X11, X9, X2 // c4c23146d3 + VPSRAVD (BX), X9, X11 // c46231461b + VPSRAVD (R11), X9, X11 // c44231461b + VPSRAVD X2, X9, X11 // c4623146da + VPSRAVD X11, X9, X11 // c4423146db + VPSRAVD (BX), Y15, Y2 // c4e2054613 + VPSRAVD (R11), Y15, Y2 // c4c2054613 + VPSRAVD Y2, Y15, Y2 // c4e20546d2 + VPSRAVD Y11, Y15, Y2 // c4c20546d3 + VPSRAVD (BX), Y15, Y11 // c46205461b + VPSRAVD (R11), Y15, Y11 // c44205461b + VPSRAVD Y2, Y15, Y11 // c4620546da + VPSRAVD Y11, Y15, Y11 // c4420546db + VPSRAW (BX), X9, X2 // c4e131e113 or c5b1e113 + VPSRAW (R11), X9, X2 // c4c131e113 + VPSRAW X2, X9, X2 // c4e131e1d2 or c5b1e1d2 + VPSRAW X11, X9, X2 // c4c131e1d3 + VPSRAW (BX), X9, X11 // c46131e11b or c531e11b + VPSRAW (R11), X9, X11 // c44131e11b + VPSRAW X2, X9, X11 // c46131e1da or c531e1da + VPSRAW X11, X9, X11 // c44131e1db + VPSRAW $7, X2, X9 // c4e13171e207 or c5b171e207 + VPSRAW $7, X11, X9 // c4c13171e307 + VPSRAW (BX), Y15, Y2 // c4e105e113 or c585e113 + VPSRAW (R11), Y15, Y2 // c4c105e113 + VPSRAW X2, Y15, Y2 // c4e105e1d2 or c585e1d2 + VPSRAW X11, Y15, Y2 // c4c105e1d3 + VPSRAW (BX), Y15, Y11 // c46105e11b or c505e11b + VPSRAW (R11), Y15, Y11 // c44105e11b + VPSRAW X2, Y15, Y11 // c46105e1da or c505e1da + VPSRAW X11, Y15, Y11 // c44105e1db + VPSRAW $7, Y2, Y15 // c4e10571e207 or c58571e207 + VPSRAW $7, Y11, Y15 // c4c10571e307 + VPSRLD (BX), X9, X2 // c4e131d213 or c5b1d213 + VPSRLD (R11), X9, X2 // c4c131d213 + VPSRLD X2, X9, X2 // c4e131d2d2 or c5b1d2d2 + VPSRLD X11, X9, X2 // c4c131d2d3 + VPSRLD (BX), X9, X11 // c46131d21b or c531d21b + VPSRLD (R11), X9, X11 // c44131d21b + VPSRLD X2, X9, X11 // c46131d2da or c531d2da + VPSRLD X11, X9, X11 // c44131d2db + VPSRLD $7, X2, X9 // c4e13172d207 or c5b172d207 + VPSRLD $7, X11, X9 // c4c13172d307 + VPSRLDQ $7, X2, X9 // c4e13173da07 or c5b173da07 + VPSRLDQ $7, X11, X9 // c4c13173db07 + VPSRLDQ $7, Y2, Y15 // c4e10573da07 or c58573da07 + VPSRLDQ $7, Y11, Y15 // c4c10573db07 + VPSRLQ (BX), X9, X2 // c4e131d313 or c5b1d313 + VPSRLQ (R11), X9, X2 // c4c131d313 + VPSRLQ X2, X9, X2 // c4e131d3d2 or c5b1d3d2 + VPSRLQ X11, X9, X2 // c4c131d3d3 + VPSRLQ (BX), X9, X11 // c46131d31b or c531d31b + VPSRLQ (R11), X9, X11 // c44131d31b + VPSRLQ X2, X9, X11 // c46131d3da or c531d3da + VPSRLQ X11, X9, X11 // c44131d3db + VPSRLQ $7, X2, X9 // c4e13173d207 or c5b173d207 + VPSRLQ $7, X11, X9 // c4c13173d307 + VPSRLVD (BX), X9, X2 // c4e2314513 + VPSRLVD (R11), X9, X2 // c4c2314513 + VPSRLVD X2, X9, X2 // c4e23145d2 + VPSRLVD X11, X9, X2 // c4c23145d3 + VPSRLVD (BX), X9, X11 // c46231451b + VPSRLVD (R11), X9, X11 // c44231451b + VPSRLVD X2, X9, X11 // c4623145da + VPSRLVD X11, X9, X11 // c4423145db + VPSRLVD (BX), Y15, Y2 // c4e2054513 + VPSRLVD (R11), Y15, Y2 // c4c2054513 + VPSRLVD Y2, Y15, Y2 // c4e20545d2 + VPSRLVD Y11, Y15, Y2 // c4c20545d3 + VPSRLVD (BX), Y15, Y11 // c46205451b + VPSRLVD (R11), Y15, Y11 // c44205451b + VPSRLVD Y2, Y15, Y11 // c4620545da + VPSRLVD Y11, Y15, Y11 // c4420545db + VPSRLVQ (BX), X9, X2 // c4e2b14513 + VPSRLVQ (R11), X9, X2 // c4c2b14513 + VPSRLVQ X2, X9, X2 // c4e2b145d2 + VPSRLVQ X11, X9, X2 // c4c2b145d3 + VPSRLVQ (BX), X9, X11 // c462b1451b + VPSRLVQ (R11), X9, X11 // c442b1451b + VPSRLVQ X2, X9, X11 // c462b145da + VPSRLVQ X11, X9, X11 // c442b145db + VPSRLVQ (BX), Y15, Y2 // c4e2854513 + VPSRLVQ (R11), Y15, Y2 // c4c2854513 + VPSRLVQ Y2, Y15, Y2 // c4e28545d2 + VPSRLVQ Y11, Y15, Y2 // c4c28545d3 + VPSRLVQ (BX), Y15, Y11 // c46285451b + VPSRLVQ (R11), Y15, Y11 // c44285451b + VPSRLVQ Y2, Y15, Y11 // c4628545da + VPSRLVQ Y11, Y15, Y11 // c4428545db + VPSRLW (BX), X9, X2 // c4e131d113 or c5b1d113 + VPSRLW (R11), X9, X2 // c4c131d113 + VPSRLW X2, X9, X2 // c4e131d1d2 or c5b1d1d2 + VPSRLW X11, X9, X2 // c4c131d1d3 + VPSRLW (BX), X9, X11 // c46131d11b or c531d11b + VPSRLW (R11), X9, X11 // c44131d11b + VPSRLW X2, X9, X11 // c46131d1da or c531d1da + VPSRLW X11, X9, X11 // c44131d1db + VPSRLW $7, X2, X9 // c4e13171d207 or c5b171d207 + VPSRLW $7, X11, X9 // c4c13171d307 + VPSRLW (BX), Y15, Y2 // c4e105d113 or c585d113 + VPSRLW (R11), Y15, Y2 // c4c105d113 + VPSRLW X2, Y15, Y2 // c4e105d1d2 or c585d1d2 + VPSRLW X11, Y15, Y2 // c4c105d1d3 + VPSRLW (BX), Y15, Y11 // c46105d11b or c505d11b + VPSRLW (R11), Y15, Y11 // c44105d11b + VPSRLW X2, Y15, Y11 // c46105d1da or c505d1da + VPSRLW X11, Y15, Y11 // c44105d1db + VPSRLW $7, Y2, Y15 // c4e10571d207 or c58571d207 + VPSRLW $7, Y11, Y15 // c4c10571d307 + VPSUBB (BX), X9, X2 // c4e131f813 or c5b1f813 + VPSUBB (R11), X9, X2 // c4c131f813 + VPSUBB X2, X9, X2 // c4e131f8d2 or c5b1f8d2 + VPSUBB X11, X9, X2 // c4c131f8d3 + VPSUBB (BX), X9, X11 // c46131f81b or c531f81b + VPSUBB (R11), X9, X11 // c44131f81b + VPSUBB X2, X9, X11 // c46131f8da or c531f8da + VPSUBB X11, X9, X11 // c44131f8db + VPSUBB (BX), Y15, Y2 // c4e105f813 or c585f813 + VPSUBB (R11), Y15, Y2 // c4c105f813 + VPSUBB Y2, Y15, Y2 // c4e105f8d2 or c585f8d2 + VPSUBB Y11, Y15, Y2 // c4c105f8d3 + VPSUBB (BX), Y15, Y11 // c46105f81b or c505f81b + VPSUBB (R11), Y15, Y11 // c44105f81b + VPSUBB Y2, Y15, Y11 // c46105f8da or c505f8da + VPSUBB Y11, Y15, Y11 // c44105f8db + VPSUBD (BX), X9, X2 // c4e131fa13 or c5b1fa13 + VPSUBD (R11), X9, X2 // c4c131fa13 + VPSUBD X2, X9, X2 // c4e131fad2 or c5b1fad2 + VPSUBD X11, X9, X2 // c4c131fad3 + VPSUBD (BX), X9, X11 // c46131fa1b or c531fa1b + VPSUBD (R11), X9, X11 // c44131fa1b + VPSUBD X2, X9, X11 // c46131fada or c531fada + VPSUBD X11, X9, X11 // c44131fadb + VPSUBD (BX), Y15, Y2 // c4e105fa13 or c585fa13 + VPSUBD (R11), Y15, Y2 // c4c105fa13 + VPSUBD Y2, Y15, Y2 // c4e105fad2 or c585fad2 + VPSUBD Y11, Y15, Y2 // c4c105fad3 + VPSUBD (BX), Y15, Y11 // c46105fa1b or c505fa1b + VPSUBD (R11), Y15, Y11 // c44105fa1b + VPSUBD Y2, Y15, Y11 // c46105fada or c505fada + VPSUBD Y11, Y15, Y11 // c44105fadb + VPSUBQ (BX), X9, X2 // c4e131fb13 or c5b1fb13 + VPSUBQ (R11), X9, X2 // c4c131fb13 + VPSUBQ X2, X9, X2 // c4e131fbd2 or c5b1fbd2 + VPSUBQ X11, X9, X2 // c4c131fbd3 + VPSUBQ (BX), X9, X11 // c46131fb1b or c531fb1b + VPSUBQ (R11), X9, X11 // c44131fb1b + VPSUBQ X2, X9, X11 // c46131fbda or c531fbda + VPSUBQ X11, X9, X11 // c44131fbdb + VPSUBQ (BX), Y15, Y2 // c4e105fb13 or c585fb13 + VPSUBQ (R11), Y15, Y2 // c4c105fb13 + VPSUBQ Y2, Y15, Y2 // c4e105fbd2 or c585fbd2 + VPSUBQ Y11, Y15, Y2 // c4c105fbd3 + VPSUBQ (BX), Y15, Y11 // c46105fb1b or c505fb1b + VPSUBQ (R11), Y15, Y11 // c44105fb1b + VPSUBQ Y2, Y15, Y11 // c46105fbda or c505fbda + VPSUBQ Y11, Y15, Y11 // c44105fbdb + VPSUBSB (BX), X9, X2 // c4e131e813 or c5b1e813 + VPSUBSB (R11), X9, X2 // c4c131e813 + VPSUBSB X2, X9, X2 // c4e131e8d2 or c5b1e8d2 + VPSUBSB X11, X9, X2 // c4c131e8d3 + VPSUBSB (BX), X9, X11 // c46131e81b or c531e81b + VPSUBSB (R11), X9, X11 // c44131e81b + VPSUBSB X2, X9, X11 // c46131e8da or c531e8da + VPSUBSB X11, X9, X11 // c44131e8db + VPSUBSB (BX), Y15, Y2 // c4e105e813 or c585e813 + VPSUBSB (R11), Y15, Y2 // c4c105e813 + VPSUBSB Y2, Y15, Y2 // c4e105e8d2 or c585e8d2 + VPSUBSB Y11, Y15, Y2 // c4c105e8d3 + VPSUBSB (BX), Y15, Y11 // c46105e81b or c505e81b + VPSUBSB (R11), Y15, Y11 // c44105e81b + VPSUBSB Y2, Y15, Y11 // c46105e8da or c505e8da + VPSUBSB Y11, Y15, Y11 // c44105e8db + VPSUBSW (BX), X9, X2 // c4e131e913 or c5b1e913 + VPSUBSW (R11), X9, X2 // c4c131e913 + VPSUBSW X2, X9, X2 // c4e131e9d2 or c5b1e9d2 + VPSUBSW X11, X9, X2 // c4c131e9d3 + VPSUBSW (BX), X9, X11 // c46131e91b or c531e91b + VPSUBSW (R11), X9, X11 // c44131e91b + VPSUBSW X2, X9, X11 // c46131e9da or c531e9da + VPSUBSW X11, X9, X11 // c44131e9db + VPSUBSW (BX), Y15, Y2 // c4e105e913 or c585e913 + VPSUBSW (R11), Y15, Y2 // c4c105e913 + VPSUBSW Y2, Y15, Y2 // c4e105e9d2 or c585e9d2 + VPSUBSW Y11, Y15, Y2 // c4c105e9d3 + VPSUBSW (BX), Y15, Y11 // c46105e91b or c505e91b + VPSUBSW (R11), Y15, Y11 // c44105e91b + VPSUBSW Y2, Y15, Y11 // c46105e9da or c505e9da + VPSUBSW Y11, Y15, Y11 // c44105e9db + VPSUBUSB (BX), X9, X2 // c4e131d813 or c5b1d813 + VPSUBUSB (R11), X9, X2 // c4c131d813 + VPSUBUSB X2, X9, X2 // c4e131d8d2 or c5b1d8d2 + VPSUBUSB X11, X9, X2 // c4c131d8d3 + VPSUBUSB (BX), X9, X11 // c46131d81b or c531d81b + VPSUBUSB (R11), X9, X11 // c44131d81b + VPSUBUSB X2, X9, X11 // c46131d8da or c531d8da + VPSUBUSB X11, X9, X11 // c44131d8db + VPSUBUSB (BX), Y15, Y2 // c4e105d813 or c585d813 + VPSUBUSB (R11), Y15, Y2 // c4c105d813 + VPSUBUSB Y2, Y15, Y2 // c4e105d8d2 or c585d8d2 + VPSUBUSB Y11, Y15, Y2 // c4c105d8d3 + VPSUBUSB (BX), Y15, Y11 // c46105d81b or c505d81b + VPSUBUSB (R11), Y15, Y11 // c44105d81b + VPSUBUSB Y2, Y15, Y11 // c46105d8da or c505d8da + VPSUBUSB Y11, Y15, Y11 // c44105d8db + VPSUBUSW (BX), X9, X2 // c4e131d913 or c5b1d913 + VPSUBUSW (R11), X9, X2 // c4c131d913 + VPSUBUSW X2, X9, X2 // c4e131d9d2 or c5b1d9d2 + VPSUBUSW X11, X9, X2 // c4c131d9d3 + VPSUBUSW (BX), X9, X11 // c46131d91b or c531d91b + VPSUBUSW (R11), X9, X11 // c44131d91b + VPSUBUSW X2, X9, X11 // c46131d9da or c531d9da + VPSUBUSW X11, X9, X11 // c44131d9db + VPSUBUSW (BX), Y15, Y2 // c4e105d913 or c585d913 + VPSUBUSW (R11), Y15, Y2 // c4c105d913 + VPSUBUSW Y2, Y15, Y2 // c4e105d9d2 or c585d9d2 + VPSUBUSW Y11, Y15, Y2 // c4c105d9d3 + VPSUBUSW (BX), Y15, Y11 // c46105d91b or c505d91b + VPSUBUSW (R11), Y15, Y11 // c44105d91b + VPSUBUSW Y2, Y15, Y11 // c46105d9da or c505d9da + VPSUBUSW Y11, Y15, Y11 // c44105d9db + VPSUBW (BX), X9, X2 // c4e131f913 or c5b1f913 + VPSUBW (R11), X9, X2 // c4c131f913 + VPSUBW X2, X9, X2 // c4e131f9d2 or c5b1f9d2 + VPSUBW X11, X9, X2 // c4c131f9d3 + VPSUBW (BX), X9, X11 // c46131f91b or c531f91b + VPSUBW (R11), X9, X11 // c44131f91b + VPSUBW X2, X9, X11 // c46131f9da or c531f9da + VPSUBW X11, X9, X11 // c44131f9db + VPSUBW (BX), Y15, Y2 // c4e105f913 or c585f913 + VPSUBW (R11), Y15, Y2 // c4c105f913 + VPSUBW Y2, Y15, Y2 // c4e105f9d2 or c585f9d2 + VPSUBW Y11, Y15, Y2 // c4c105f9d3 + VPSUBW (BX), Y15, Y11 // c46105f91b or c505f91b + VPSUBW (R11), Y15, Y11 // c44105f91b + VPSUBW Y2, Y15, Y11 // c46105f9da or c505f9da + VPSUBW Y11, Y15, Y11 // c44105f9db + VPTEST (BX), X2 // c4e2791713 + VPTEST (R11), X2 // c4c2791713 + VPTEST X2, X2 // c4e27917d2 + VPTEST X11, X2 // c4c27917d3 + VPTEST (BX), X11 // c46279171b + VPTEST (R11), X11 // c44279171b + VPTEST X2, X11 // c4627917da + VPTEST X11, X11 // c4427917db + VPTEST (BX), Y2 // c4e27d1713 + VPTEST (R11), Y2 // c4c27d1713 + VPTEST Y2, Y2 // c4e27d17d2 + VPTEST Y11, Y2 // c4c27d17d3 + VPTEST (BX), Y11 // c4627d171b + VPTEST (R11), Y11 // c4427d171b + VPTEST Y2, Y11 // c4627d17da + VPTEST Y11, Y11 // c4427d17db + VPUNPCKHBW (BX), X9, X2 // c4e1316813 or c5b16813 + VPUNPCKHBW (R11), X9, X2 // c4c1316813 + VPUNPCKHBW X2, X9, X2 // c4e13168d2 or c5b168d2 + VPUNPCKHBW X11, X9, X2 // c4c13168d3 + VPUNPCKHBW (BX), X9, X11 // c46131681b or c531681b + VPUNPCKHBW (R11), X9, X11 // c44131681b + VPUNPCKHBW X2, X9, X11 // c4613168da or c53168da + VPUNPCKHBW X11, X9, X11 // c4413168db + VPUNPCKHBW (BX), Y15, Y2 // c4e1056813 or c5856813 + VPUNPCKHBW (R11), Y15, Y2 // c4c1056813 + VPUNPCKHBW Y2, Y15, Y2 // c4e10568d2 or c58568d2 + VPUNPCKHBW Y11, Y15, Y2 // c4c10568d3 + VPUNPCKHBW (BX), Y15, Y11 // c46105681b or c505681b + VPUNPCKHBW (R11), Y15, Y11 // c44105681b + VPUNPCKHBW Y2, Y15, Y11 // c4610568da or c50568da + VPUNPCKHBW Y11, Y15, Y11 // c4410568db + VPUNPCKHDQ (BX), X9, X2 // c4e1316a13 or c5b16a13 + VPUNPCKHDQ (R11), X9, X2 // c4c1316a13 + VPUNPCKHDQ X2, X9, X2 // c4e1316ad2 or c5b16ad2 + VPUNPCKHDQ X11, X9, X2 // c4c1316ad3 + VPUNPCKHDQ (BX), X9, X11 // c461316a1b or c5316a1b + VPUNPCKHDQ (R11), X9, X11 // c441316a1b + VPUNPCKHDQ X2, X9, X11 // c461316ada or c5316ada + VPUNPCKHDQ X11, X9, X11 // c441316adb + VPUNPCKHDQ (BX), Y15, Y2 // c4e1056a13 or c5856a13 + VPUNPCKHDQ (R11), Y15, Y2 // c4c1056a13 + VPUNPCKHDQ Y2, Y15, Y2 // c4e1056ad2 or c5856ad2 + VPUNPCKHDQ Y11, Y15, Y2 // c4c1056ad3 + VPUNPCKHDQ (BX), Y15, Y11 // c461056a1b or c5056a1b + VPUNPCKHDQ (R11), Y15, Y11 // c441056a1b + VPUNPCKHDQ Y2, Y15, Y11 // c461056ada or c5056ada + VPUNPCKHDQ Y11, Y15, Y11 // c441056adb + VPUNPCKHQDQ (BX), X9, X2 // c4e1316d13 or c5b16d13 + VPUNPCKHQDQ (R11), X9, X2 // c4c1316d13 + VPUNPCKHQDQ X2, X9, X2 // c4e1316dd2 or c5b16dd2 + VPUNPCKHQDQ X11, X9, X2 // c4c1316dd3 + VPUNPCKHQDQ (BX), X9, X11 // c461316d1b or c5316d1b + VPUNPCKHQDQ (R11), X9, X11 // c441316d1b + VPUNPCKHQDQ X2, X9, X11 // c461316dda or c5316dda + VPUNPCKHQDQ X11, X9, X11 // c441316ddb + VPUNPCKHQDQ (BX), Y15, Y2 // c4e1056d13 or c5856d13 + VPUNPCKHQDQ (R11), Y15, Y2 // c4c1056d13 + VPUNPCKHQDQ Y2, Y15, Y2 // c4e1056dd2 or c5856dd2 + VPUNPCKHQDQ Y11, Y15, Y2 // c4c1056dd3 + VPUNPCKHQDQ (BX), Y15, Y11 // c461056d1b or c5056d1b + VPUNPCKHQDQ (R11), Y15, Y11 // c441056d1b + VPUNPCKHQDQ Y2, Y15, Y11 // c461056dda or c5056dda + VPUNPCKHQDQ Y11, Y15, Y11 // c441056ddb + VPUNPCKHWD (BX), X9, X2 // c4e1316913 or c5b16913 + VPUNPCKHWD (R11), X9, X2 // c4c1316913 + VPUNPCKHWD X2, X9, X2 // c4e13169d2 or c5b169d2 + VPUNPCKHWD X11, X9, X2 // c4c13169d3 + VPUNPCKHWD (BX), X9, X11 // c46131691b or c531691b + VPUNPCKHWD (R11), X9, X11 // c44131691b + VPUNPCKHWD X2, X9, X11 // c4613169da or c53169da + VPUNPCKHWD X11, X9, X11 // c4413169db + VPUNPCKHWD (BX), Y15, Y2 // c4e1056913 or c5856913 + VPUNPCKHWD (R11), Y15, Y2 // c4c1056913 + VPUNPCKHWD Y2, Y15, Y2 // c4e10569d2 or c58569d2 + VPUNPCKHWD Y11, Y15, Y2 // c4c10569d3 + VPUNPCKHWD (BX), Y15, Y11 // c46105691b or c505691b + VPUNPCKHWD (R11), Y15, Y11 // c44105691b + VPUNPCKHWD Y2, Y15, Y11 // c4610569da or c50569da + VPUNPCKHWD Y11, Y15, Y11 // c4410569db + VPUNPCKLBW (BX), X9, X2 // c4e1316013 or c5b16013 + VPUNPCKLBW (R11), X9, X2 // c4c1316013 + VPUNPCKLBW X2, X9, X2 // c4e13160d2 or c5b160d2 + VPUNPCKLBW X11, X9, X2 // c4c13160d3 + VPUNPCKLBW (BX), X9, X11 // c46131601b or c531601b + VPUNPCKLBW (R11), X9, X11 // c44131601b + VPUNPCKLBW X2, X9, X11 // c4613160da or c53160da + VPUNPCKLBW X11, X9, X11 // c4413160db + VPUNPCKLBW (BX), Y15, Y2 // c4e1056013 or c5856013 + VPUNPCKLBW (R11), Y15, Y2 // c4c1056013 + VPUNPCKLBW Y2, Y15, Y2 // c4e10560d2 or c58560d2 + VPUNPCKLBW Y11, Y15, Y2 // c4c10560d3 + VPUNPCKLBW (BX), Y15, Y11 // c46105601b or c505601b + VPUNPCKLBW (R11), Y15, Y11 // c44105601b + VPUNPCKLBW Y2, Y15, Y11 // c4610560da or c50560da + VPUNPCKLBW Y11, Y15, Y11 // c4410560db + VPUNPCKLDQ (BX), X9, X2 // c4e1316213 or c5b16213 + VPUNPCKLDQ (R11), X9, X2 // c4c1316213 + VPUNPCKLDQ X2, X9, X2 // c4e13162d2 or c5b162d2 + VPUNPCKLDQ X11, X9, X2 // c4c13162d3 + VPUNPCKLDQ (BX), X9, X11 // c46131621b or c531621b + VPUNPCKLDQ (R11), X9, X11 // c44131621b + VPUNPCKLDQ X2, X9, X11 // c4613162da or c53162da + VPUNPCKLDQ X11, X9, X11 // c4413162db + VPUNPCKLDQ (BX), Y15, Y2 // c4e1056213 or c5856213 + VPUNPCKLDQ (R11), Y15, Y2 // c4c1056213 + VPUNPCKLDQ Y2, Y15, Y2 // c4e10562d2 or c58562d2 + VPUNPCKLDQ Y11, Y15, Y2 // c4c10562d3 + VPUNPCKLDQ (BX), Y15, Y11 // c46105621b or c505621b + VPUNPCKLDQ (R11), Y15, Y11 // c44105621b + VPUNPCKLDQ Y2, Y15, Y11 // c4610562da or c50562da + VPUNPCKLDQ Y11, Y15, Y11 // c4410562db + VPUNPCKLQDQ (BX), X9, X2 // c4e1316c13 or c5b16c13 + VPUNPCKLQDQ (R11), X9, X2 // c4c1316c13 + VPUNPCKLQDQ X2, X9, X2 // c4e1316cd2 or c5b16cd2 + VPUNPCKLQDQ X11, X9, X2 // c4c1316cd3 + VPUNPCKLQDQ (BX), X9, X11 // c461316c1b or c5316c1b + VPUNPCKLQDQ (R11), X9, X11 // c441316c1b + VPUNPCKLQDQ X2, X9, X11 // c461316cda or c5316cda + VPUNPCKLQDQ X11, X9, X11 // c441316cdb + VPUNPCKLQDQ (BX), Y15, Y2 // c4e1056c13 or c5856c13 + VPUNPCKLQDQ (R11), Y15, Y2 // c4c1056c13 + VPUNPCKLQDQ Y2, Y15, Y2 // c4e1056cd2 or c5856cd2 + VPUNPCKLQDQ Y11, Y15, Y2 // c4c1056cd3 + VPUNPCKLQDQ (BX), Y15, Y11 // c461056c1b or c5056c1b + VPUNPCKLQDQ (R11), Y15, Y11 // c441056c1b + VPUNPCKLQDQ Y2, Y15, Y11 // c461056cda or c5056cda + VPUNPCKLQDQ Y11, Y15, Y11 // c441056cdb + VPUNPCKLWD (BX), X9, X2 // c4e1316113 or c5b16113 + VPUNPCKLWD (R11), X9, X2 // c4c1316113 + VPUNPCKLWD X2, X9, X2 // c4e13161d2 or c5b161d2 + VPUNPCKLWD X11, X9, X2 // c4c13161d3 + VPUNPCKLWD (BX), X9, X11 // c46131611b or c531611b + VPUNPCKLWD (R11), X9, X11 // c44131611b + VPUNPCKLWD X2, X9, X11 // c4613161da or c53161da + VPUNPCKLWD X11, X9, X11 // c4413161db + VPUNPCKLWD (BX), Y15, Y2 // c4e1056113 or c5856113 + VPUNPCKLWD (R11), Y15, Y2 // c4c1056113 + VPUNPCKLWD Y2, Y15, Y2 // c4e10561d2 or c58561d2 + VPUNPCKLWD Y11, Y15, Y2 // c4c10561d3 + VPUNPCKLWD (BX), Y15, Y11 // c46105611b or c505611b + VPUNPCKLWD (R11), Y15, Y11 // c44105611b + VPUNPCKLWD Y2, Y15, Y11 // c4610561da or c50561da + VPUNPCKLWD Y11, Y15, Y11 // c4410561db + VPXOR (BX), X9, X2 // c4e131ef13 or c5b1ef13 + VPXOR (R11), X9, X2 // c4c131ef13 + VPXOR X2, X9, X2 // c4e131efd2 or c5b1efd2 + VPXOR X11, X9, X2 // c4c131efd3 + VPXOR (BX), X9, X11 // c46131ef1b or c531ef1b + VPXOR (R11), X9, X11 // c44131ef1b + VPXOR X2, X9, X11 // c46131efda or c531efda + VPXOR X11, X9, X11 // c44131efdb + VPXOR (BX), Y15, Y2 // c4e105ef13 or c585ef13 + VPXOR (R11), Y15, Y2 // c4c105ef13 + VPXOR Y2, Y15, Y2 // c4e105efd2 or c585efd2 + VPXOR Y11, Y15, Y2 // c4c105efd3 + VPXOR (BX), Y15, Y11 // c46105ef1b or c505ef1b + VPXOR (R11), Y15, Y11 // c44105ef1b + VPXOR Y2, Y15, Y11 // c46105efda or c505efda + VPXOR Y11, Y15, Y11 // c44105efdb + VRCPPS (BX), X2 // c4e1785313 or c5f85313 + VRCPPS (R11), X2 // c4c1785313 + VRCPPS X2, X2 // c4e17853d2 or c5f853d2 + VRCPPS X11, X2 // c4c17853d3 + VRCPPS (BX), X11 // c46178531b or c578531b + VRCPPS (R11), X11 // c44178531b + VRCPPS X2, X11 // c4617853da or c57853da + VRCPPS X11, X11 // c4417853db + VRCPPS (BX), Y2 // c4e17c5313 or c5fc5313 + VRCPPS (R11), Y2 // c4c17c5313 + VRCPPS Y2, Y2 // c4e17c53d2 or c5fc53d2 + VRCPPS Y11, Y2 // c4c17c53d3 + VRCPPS (BX), Y11 // c4617c531b or c57c531b + VRCPPS (R11), Y11 // c4417c531b + VRCPPS Y2, Y11 // c4617c53da or c57c53da + VRCPPS Y11, Y11 // c4417c53db + VRCPSS (BX), X9, X2 // c4e1325313 or c5b25313 + VRCPSS (R11), X9, X2 // c4c1325313 + VRCPSS X2, X9, X2 // c4e13253d2 or c5b253d2 + VRCPSS X11, X9, X2 // c4c13253d3 + VRCPSS (BX), X9, X11 // c46132531b or c532531b + VRCPSS (R11), X9, X11 // c44132531b + VRCPSS X2, X9, X11 // c4613253da or c53253da + VRCPSS X11, X9, X11 // c4413253db + VROUNDPD $7, (BX), X2 // c4e379091307 + VROUNDPD $7, (R11), X2 // c4c379091307 + VROUNDPD $7, X2, X2 // c4e37909d207 + VROUNDPD $7, X11, X2 // c4c37909d307 + VROUNDPD $7, (BX), X11 // c46379091b07 + VROUNDPD $7, (R11), X11 // c44379091b07 + VROUNDPD $7, X2, X11 // c4637909da07 + VROUNDPD $7, X11, X11 // c4437909db07 + VROUNDPD $7, (BX), Y2 // c4e37d091307 + VROUNDPD $7, (R11), Y2 // c4c37d091307 + VROUNDPD $7, Y2, Y2 // c4e37d09d207 + VROUNDPD $7, Y11, Y2 // c4c37d09d307 + VROUNDPD $7, (BX), Y11 // c4637d091b07 + VROUNDPD $7, (R11), Y11 // c4437d091b07 + VROUNDPD $7, Y2, Y11 // c4637d09da07 + VROUNDPD $7, Y11, Y11 // c4437d09db07 + VROUNDPS $7, (BX), X2 // c4e379081307 + VROUNDPS $7, (R11), X2 // c4c379081307 + VROUNDPS $7, X2, X2 // c4e37908d207 + VROUNDPS $7, X11, X2 // c4c37908d307 + VROUNDPS $7, (BX), X11 // c46379081b07 + VROUNDPS $7, (R11), X11 // c44379081b07 + VROUNDPS $7, X2, X11 // c4637908da07 + VROUNDPS $7, X11, X11 // c4437908db07 + VROUNDPS $7, (BX), Y2 // c4e37d081307 + VROUNDPS $7, (R11), Y2 // c4c37d081307 + VROUNDPS $7, Y2, Y2 // c4e37d08d207 + VROUNDPS $7, Y11, Y2 // c4c37d08d307 + VROUNDPS $7, (BX), Y11 // c4637d081b07 + VROUNDPS $7, (R11), Y11 // c4437d081b07 + VROUNDPS $7, Y2, Y11 // c4637d08da07 + VROUNDPS $7, Y11, Y11 // c4437d08db07 + VROUNDSD $7, (BX), X9, X2 // c4e3310b1307 + VROUNDSD $7, (R11), X9, X2 // c4c3310b1307 + VROUNDSD $7, X2, X9, X2 // c4e3310bd207 + VROUNDSD $7, X11, X9, X2 // c4c3310bd307 + VROUNDSD $7, (BX), X9, X11 // c463310b1b07 + VROUNDSD $7, (R11), X9, X11 // c443310b1b07 + VROUNDSD $7, X2, X9, X11 // c463310bda07 + VROUNDSD $7, X11, X9, X11 // c443310bdb07 + VROUNDSS $7, (BX), X9, X2 // c4e3310a1307 + VROUNDSS $7, (R11), X9, X2 // c4c3310a1307 + VROUNDSS $7, X2, X9, X2 // c4e3310ad207 + VROUNDSS $7, X11, X9, X2 // c4c3310ad307 + VROUNDSS $7, (BX), X9, X11 // c463310a1b07 + VROUNDSS $7, (R11), X9, X11 // c443310a1b07 + VROUNDSS $7, X2, X9, X11 // c463310ada07 + VROUNDSS $7, X11, X9, X11 // c443310adb07 + VRSQRTPS (BX), X2 // c4e1785213 or c5f85213 + VRSQRTPS (R11), X2 // c4c1785213 + VRSQRTPS X2, X2 // c4e17852d2 or c5f852d2 + VRSQRTPS X11, X2 // c4c17852d3 + VRSQRTPS (BX), X11 // c46178521b or c578521b + VRSQRTPS (R11), X11 // c44178521b + VRSQRTPS X2, X11 // c4617852da or c57852da + VRSQRTPS X11, X11 // c4417852db + VRSQRTPS (BX), Y2 // c4e17c5213 or c5fc5213 + VRSQRTPS (R11), Y2 // c4c17c5213 + VRSQRTPS Y2, Y2 // c4e17c52d2 or c5fc52d2 + VRSQRTPS Y11, Y2 // c4c17c52d3 + VRSQRTPS (BX), Y11 // c4617c521b or c57c521b + VRSQRTPS (R11), Y11 // c4417c521b + VRSQRTPS Y2, Y11 // c4617c52da or c57c52da + VRSQRTPS Y11, Y11 // c4417c52db + VRSQRTSS (BX), X9, X2 // c4e1325213 or c5b25213 + VRSQRTSS (R11), X9, X2 // c4c1325213 + VRSQRTSS X2, X9, X2 // c4e13252d2 or c5b252d2 + VRSQRTSS X11, X9, X2 // c4c13252d3 + VRSQRTSS (BX), X9, X11 // c46132521b or c532521b + VRSQRTSS (R11), X9, X11 // c44132521b + VRSQRTSS X2, X9, X11 // c4613252da or c53252da + VRSQRTSS X11, X9, X11 // c4413252db + VSHUFPD $7, (BX), X9, X2 // c4e131c61307 or c5b1c61307 + VSHUFPD $7, (R11), X9, X2 // c4c131c61307 + VSHUFPD $7, X2, X9, X2 // c4e131c6d207 or c5b1c6d207 + VSHUFPD $7, X11, X9, X2 // c4c131c6d307 + VSHUFPD $7, (BX), X9, X11 // c46131c61b07 or c531c61b07 + VSHUFPD $7, (R11), X9, X11 // c44131c61b07 + VSHUFPD $7, X2, X9, X11 // c46131c6da07 or c531c6da07 + VSHUFPD $7, X11, X9, X11 // c44131c6db07 + VSHUFPD $7, (BX), Y15, Y2 // c4e105c61307 or c585c61307 + VSHUFPD $7, (R11), Y15, Y2 // c4c105c61307 + VSHUFPD $7, Y2, Y15, Y2 // c4e105c6d207 or c585c6d207 + VSHUFPD $7, Y11, Y15, Y2 // c4c105c6d307 + VSHUFPD $7, (BX), Y15, Y11 // c46105c61b07 or c505c61b07 + VSHUFPD $7, (R11), Y15, Y11 // c44105c61b07 + VSHUFPD $7, Y2, Y15, Y11 // c46105c6da07 or c505c6da07 + VSHUFPD $7, Y11, Y15, Y11 // c44105c6db07 + VSHUFPS $7, (BX), X9, X2 // c4e130c61307 or c5b0c61307 + VSHUFPS $7, (R11), X9, X2 // c4c130c61307 + VSHUFPS $7, X2, X9, X2 // c4e130c6d207 or c5b0c6d207 + VSHUFPS $7, X11, X9, X2 // c4c130c6d307 + VSHUFPS $7, (BX), X9, X11 // c46130c61b07 or c530c61b07 + VSHUFPS $7, (R11), X9, X11 // c44130c61b07 + VSHUFPS $7, X2, X9, X11 // c46130c6da07 or c530c6da07 + VSHUFPS $7, X11, X9, X11 // c44130c6db07 + VSHUFPS $7, (BX), Y15, Y2 // c4e104c61307 or c584c61307 + VSHUFPS $7, (R11), Y15, Y2 // c4c104c61307 + VSHUFPS $7, Y2, Y15, Y2 // c4e104c6d207 or c584c6d207 + VSHUFPS $7, Y11, Y15, Y2 // c4c104c6d307 + VSHUFPS $7, (BX), Y15, Y11 // c46104c61b07 or c504c61b07 + VSHUFPS $7, (R11), Y15, Y11 // c44104c61b07 + VSHUFPS $7, Y2, Y15, Y11 // c46104c6da07 or c504c6da07 + VSHUFPS $7, Y11, Y15, Y11 // c44104c6db07 + VSQRTPD (BX), X2 // c4e1795113 or c5f95113 + VSQRTPD (R11), X2 // c4c1795113 + VSQRTPD X2, X2 // c4e17951d2 or c5f951d2 + VSQRTPD X11, X2 // c4c17951d3 + VSQRTPD (BX), X11 // c46179511b or c579511b + VSQRTPD (R11), X11 // c44179511b + VSQRTPD X2, X11 // c4617951da or c57951da + VSQRTPD X11, X11 // c4417951db + VSQRTPD (BX), Y2 // c4e17d5113 or c5fd5113 + VSQRTPD (R11), Y2 // c4c17d5113 + VSQRTPD Y2, Y2 // c4e17d51d2 or c5fd51d2 + VSQRTPD Y11, Y2 // c4c17d51d3 + VSQRTPD (BX), Y11 // c4617d511b or c57d511b + VSQRTPD (R11), Y11 // c4417d511b + VSQRTPD Y2, Y11 // c4617d51da or c57d51da + VSQRTPD Y11, Y11 // c4417d51db + VSQRTPS (BX), X2 // c4e1785113 or c5f85113 + VSQRTPS (R11), X2 // c4c1785113 + VSQRTPS X2, X2 // c4e17851d2 or c5f851d2 + VSQRTPS X11, X2 // c4c17851d3 + VSQRTPS (BX), X11 // c46178511b or c578511b + VSQRTPS (R11), X11 // c44178511b + VSQRTPS X2, X11 // c4617851da or c57851da + VSQRTPS X11, X11 // c4417851db + VSQRTPS (BX), Y2 // c4e17c5113 or c5fc5113 + VSQRTPS (R11), Y2 // c4c17c5113 + VSQRTPS Y2, Y2 // c4e17c51d2 or c5fc51d2 + VSQRTPS Y11, Y2 // c4c17c51d3 + VSQRTPS (BX), Y11 // c4617c511b or c57c511b + VSQRTPS (R11), Y11 // c4417c511b + VSQRTPS Y2, Y11 // c4617c51da or c57c51da + VSQRTPS Y11, Y11 // c4417c51db + VSQRTSD (BX), X9, X2 // c4e1335113 or c5b35113 + VSQRTSD (R11), X9, X2 // c4c1335113 + VSQRTSD X2, X9, X2 // c4e13351d2 or c5b351d2 + VSQRTSD X11, X9, X2 // c4c13351d3 + VSQRTSD (BX), X9, X11 // c46133511b or c533511b + VSQRTSD (R11), X9, X11 // c44133511b + VSQRTSD X2, X9, X11 // c4613351da or c53351da + VSQRTSD X11, X9, X11 // c4413351db + VSQRTSS (BX), X9, X2 // c4e1325113 or c5b25113 + VSQRTSS (R11), X9, X2 // c4c1325113 + VSQRTSS X2, X9, X2 // c4e13251d2 or c5b251d2 + VSQRTSS X11, X9, X2 // c4c13251d3 + VSQRTSS (BX), X9, X11 // c46132511b or c532511b + VSQRTSS (R11), X9, X11 // c44132511b + VSQRTSS X2, X9, X11 // c4613251da or c53251da + VSQRTSS X11, X9, X11 // c4413251db + VSTMXCSR (BX) // c4e178ae1b or c5f8ae1b + VSTMXCSR (R11) // c4c178ae1b + VSUBPD (BX), X9, X2 // c4e1315c13 or c5b15c13 + VSUBPD (R11), X9, X2 // c4c1315c13 + VSUBPD X2, X9, X2 // c4e1315cd2 or c5b15cd2 + VSUBPD X11, X9, X2 // c4c1315cd3 + VSUBPD (BX), X9, X11 // c461315c1b or c5315c1b + VSUBPD (R11), X9, X11 // c441315c1b + VSUBPD X2, X9, X11 // c461315cda or c5315cda + VSUBPD X11, X9, X11 // c441315cdb + VSUBPD (BX), Y15, Y2 // c4e1055c13 or c5855c13 + VSUBPD (R11), Y15, Y2 // c4c1055c13 + VSUBPD Y2, Y15, Y2 // c4e1055cd2 or c5855cd2 + VSUBPD Y11, Y15, Y2 // c4c1055cd3 + VSUBPD (BX), Y15, Y11 // c461055c1b or c5055c1b + VSUBPD (R11), Y15, Y11 // c441055c1b + VSUBPD Y2, Y15, Y11 // c461055cda or c5055cda + VSUBPD Y11, Y15, Y11 // c441055cdb + VSUBPS (BX), X9, X2 // c4e1305c13 or c5b05c13 + VSUBPS (R11), X9, X2 // c4c1305c13 + VSUBPS X2, X9, X2 // c4e1305cd2 or c5b05cd2 + VSUBPS X11, X9, X2 // c4c1305cd3 + VSUBPS (BX), X9, X11 // c461305c1b or c5305c1b + VSUBPS (R11), X9, X11 // c441305c1b + VSUBPS X2, X9, X11 // c461305cda or c5305cda + VSUBPS X11, X9, X11 // c441305cdb + VSUBPS (BX), Y15, Y2 // c4e1045c13 or c5845c13 + VSUBPS (R11), Y15, Y2 // c4c1045c13 + VSUBPS Y2, Y15, Y2 // c4e1045cd2 or c5845cd2 + VSUBPS Y11, Y15, Y2 // c4c1045cd3 + VSUBPS (BX), Y15, Y11 // c461045c1b or c5045c1b + VSUBPS (R11), Y15, Y11 // c441045c1b + VSUBPS Y2, Y15, Y11 // c461045cda or c5045cda + VSUBPS Y11, Y15, Y11 // c441045cdb + VSUBSD (BX), X9, X2 // c4e1335c13 or c5b35c13 + VSUBSD (R11), X9, X2 // c4c1335c13 + VSUBSD X2, X9, X2 // c4e1335cd2 or c5b35cd2 + VSUBSD X11, X9, X2 // c4c1335cd3 + VSUBSD (BX), X9, X11 // c461335c1b or c5335c1b + VSUBSD (R11), X9, X11 // c441335c1b + VSUBSD X2, X9, X11 // c461335cda or c5335cda + VSUBSD X11, X9, X11 // c441335cdb + VSUBSS (BX), X9, X2 // c4e1325c13 or c5b25c13 + VSUBSS (R11), X9, X2 // c4c1325c13 + VSUBSS X2, X9, X2 // c4e1325cd2 or c5b25cd2 + VSUBSS X11, X9, X2 // c4c1325cd3 + VSUBSS (BX), X9, X11 // c461325c1b or c5325c1b + VSUBSS (R11), X9, X11 // c441325c1b + VSUBSS X2, X9, X11 // c461325cda or c5325cda + VSUBSS X11, X9, X11 // c441325cdb + VTESTPD (BX), X2 // c4e2790f13 + VTESTPD (R11), X2 // c4c2790f13 + VTESTPD X2, X2 // c4e2790fd2 + VTESTPD X11, X2 // c4c2790fd3 + VTESTPD (BX), X11 // c462790f1b + VTESTPD (R11), X11 // c442790f1b + VTESTPD X2, X11 // c462790fda + VTESTPD X11, X11 // c442790fdb + VTESTPD (BX), Y2 // c4e27d0f13 + VTESTPD (R11), Y2 // c4c27d0f13 + VTESTPD Y2, Y2 // c4e27d0fd2 + VTESTPD Y11, Y2 // c4c27d0fd3 + VTESTPD (BX), Y11 // c4627d0f1b + VTESTPD (R11), Y11 // c4427d0f1b + VTESTPD Y2, Y11 // c4627d0fda + VTESTPD Y11, Y11 // c4427d0fdb + VTESTPS (BX), X2 // c4e2790e13 + VTESTPS (R11), X2 // c4c2790e13 + VTESTPS X2, X2 // c4e2790ed2 + VTESTPS X11, X2 // c4c2790ed3 + VTESTPS (BX), X11 // c462790e1b + VTESTPS (R11), X11 // c442790e1b + VTESTPS X2, X11 // c462790eda + VTESTPS X11, X11 // c442790edb + VTESTPS (BX), Y2 // c4e27d0e13 + VTESTPS (R11), Y2 // c4c27d0e13 + VTESTPS Y2, Y2 // c4e27d0ed2 + VTESTPS Y11, Y2 // c4c27d0ed3 + VTESTPS (BX), Y11 // c4627d0e1b + VTESTPS (R11), Y11 // c4427d0e1b + VTESTPS Y2, Y11 // c4627d0eda + VTESTPS Y11, Y11 // c4427d0edb + VUCOMISD (BX), X2 // c4e1792e13 or c5f92e13 + VUCOMISD (R11), X2 // c4c1792e13 + VUCOMISD X2, X2 // c4e1792ed2 or c5f92ed2 + VUCOMISD X11, X2 // c4c1792ed3 + VUCOMISD (BX), X11 // c461792e1b or c5792e1b + VUCOMISD (R11), X11 // c441792e1b + VUCOMISD X2, X11 // c461792eda or c5792eda + VUCOMISD X11, X11 // c441792edb + VUCOMISS (BX), X2 // c4e1782e13 or c5f82e13 + VUCOMISS (R11), X2 // c4c1782e13 + VUCOMISS X2, X2 // c4e1782ed2 or c5f82ed2 + VUCOMISS X11, X2 // c4c1782ed3 + VUCOMISS (BX), X11 // c461782e1b or c5782e1b + VUCOMISS (R11), X11 // c441782e1b + VUCOMISS X2, X11 // c461782eda or c5782eda + VUCOMISS X11, X11 // c441782edb + VUNPCKHPD (BX), X9, X2 // c4e1311513 or c5b11513 + VUNPCKHPD (R11), X9, X2 // c4c1311513 + VUNPCKHPD X2, X9, X2 // c4e13115d2 or c5b115d2 + VUNPCKHPD X11, X9, X2 // c4c13115d3 + VUNPCKHPD (BX), X9, X11 // c46131151b or c531151b + VUNPCKHPD (R11), X9, X11 // c44131151b + VUNPCKHPD X2, X9, X11 // c4613115da or c53115da + VUNPCKHPD X11, X9, X11 // c4413115db + VUNPCKHPD (BX), Y15, Y2 // c4e1051513 or c5851513 + VUNPCKHPD (R11), Y15, Y2 // c4c1051513 + VUNPCKHPD Y2, Y15, Y2 // c4e10515d2 or c58515d2 + VUNPCKHPD Y11, Y15, Y2 // c4c10515d3 + VUNPCKHPD (BX), Y15, Y11 // c46105151b or c505151b + VUNPCKHPD (R11), Y15, Y11 // c44105151b + VUNPCKHPD Y2, Y15, Y11 // c4610515da or c50515da + VUNPCKHPD Y11, Y15, Y11 // c4410515db + VUNPCKHPS (BX), X9, X2 // c4e1301513 or c5b01513 + VUNPCKHPS (R11), X9, X2 // c4c1301513 + VUNPCKHPS X2, X9, X2 // c4e13015d2 or c5b015d2 + VUNPCKHPS X11, X9, X2 // c4c13015d3 + VUNPCKHPS (BX), X9, X11 // c46130151b or c530151b + VUNPCKHPS (R11), X9, X11 // c44130151b + VUNPCKHPS X2, X9, X11 // c4613015da or c53015da + VUNPCKHPS X11, X9, X11 // c4413015db + VUNPCKHPS (BX), Y15, Y2 // c4e1041513 or c5841513 + VUNPCKHPS (R11), Y15, Y2 // c4c1041513 + VUNPCKHPS Y2, Y15, Y2 // c4e10415d2 or c58415d2 + VUNPCKHPS Y11, Y15, Y2 // c4c10415d3 + VUNPCKHPS (BX), Y15, Y11 // c46104151b or c504151b + VUNPCKHPS (R11), Y15, Y11 // c44104151b + VUNPCKHPS Y2, Y15, Y11 // c4610415da or c50415da + VUNPCKHPS Y11, Y15, Y11 // c4410415db + VUNPCKLPD (BX), X9, X2 // c4e1311413 or c5b11413 + VUNPCKLPD (R11), X9, X2 // c4c1311413 + VUNPCKLPD X2, X9, X2 // c4e13114d2 or c5b114d2 + VUNPCKLPD X11, X9, X2 // c4c13114d3 + VUNPCKLPD (BX), X9, X11 // c46131141b or c531141b + VUNPCKLPD (R11), X9, X11 // c44131141b + VUNPCKLPD X2, X9, X11 // c4613114da or c53114da + VUNPCKLPD X11, X9, X11 // c4413114db + VUNPCKLPD (BX), Y15, Y2 // c4e1051413 or c5851413 + VUNPCKLPD (R11), Y15, Y2 // c4c1051413 + VUNPCKLPD Y2, Y15, Y2 // c4e10514d2 or c58514d2 + VUNPCKLPD Y11, Y15, Y2 // c4c10514d3 + VUNPCKLPD (BX), Y15, Y11 // c46105141b or c505141b + VUNPCKLPD (R11), Y15, Y11 // c44105141b + VUNPCKLPD Y2, Y15, Y11 // c4610514da or c50514da + VUNPCKLPD Y11, Y15, Y11 // c4410514db + VUNPCKLPS (BX), X9, X2 // c4e1301413 or c5b01413 + VUNPCKLPS (R11), X9, X2 // c4c1301413 + VUNPCKLPS X2, X9, X2 // c4e13014d2 or c5b014d2 + VUNPCKLPS X11, X9, X2 // c4c13014d3 + VUNPCKLPS (BX), X9, X11 // c46130141b or c530141b + VUNPCKLPS (R11), X9, X11 // c44130141b + VUNPCKLPS X2, X9, X11 // c4613014da or c53014da + VUNPCKLPS X11, X9, X11 // c4413014db + VUNPCKLPS (BX), Y15, Y2 // c4e1041413 or c5841413 + VUNPCKLPS (R11), Y15, Y2 // c4c1041413 + VUNPCKLPS Y2, Y15, Y2 // c4e10414d2 or c58414d2 + VUNPCKLPS Y11, Y15, Y2 // c4c10414d3 + VUNPCKLPS (BX), Y15, Y11 // c46104141b or c504141b + VUNPCKLPS (R11), Y15, Y11 // c44104141b + VUNPCKLPS Y2, Y15, Y11 // c4610414da or c50414da + VUNPCKLPS Y11, Y15, Y11 // c4410414db + VXORPD (BX), X9, X2 // c4e1315713 or c5b15713 + VXORPD (R11), X9, X2 // c4c1315713 + VXORPD X2, X9, X2 // c4e13157d2 or c5b157d2 + VXORPD X11, X9, X2 // c4c13157d3 + VXORPD (BX), X9, X11 // c46131571b or c531571b + VXORPD (R11), X9, X11 // c44131571b + VXORPD X2, X9, X11 // c4613157da or c53157da + VXORPD X11, X9, X11 // c4413157db + VXORPD (BX), Y15, Y2 // c4e1055713 or c5855713 + VXORPD (R11), Y15, Y2 // c4c1055713 + VXORPD Y2, Y15, Y2 // c4e10557d2 or c58557d2 + VXORPD Y11, Y15, Y2 // c4c10557d3 + VXORPD (BX), Y15, Y11 // c46105571b or c505571b + VXORPD (R11), Y15, Y11 // c44105571b + VXORPD Y2, Y15, Y11 // c4610557da or c50557da + VXORPD Y11, Y15, Y11 // c4410557db + VXORPS (BX), X9, X2 // c4e1305713 or c5b05713 + VXORPS (R11), X9, X2 // c4c1305713 + VXORPS X2, X9, X2 // c4e13057d2 or c5b057d2 + VXORPS X11, X9, X2 // c4c13057d3 + VXORPS (BX), X9, X11 // c46130571b or c530571b + VXORPS (R11), X9, X11 // c44130571b + VXORPS X2, X9, X11 // c4613057da or c53057da + VXORPS X11, X9, X11 // c4413057db + VXORPS (BX), Y15, Y2 // c4e1045713 or c5845713 + VXORPS (R11), Y15, Y2 // c4c1045713 + VXORPS Y2, Y15, Y2 // c4e10457d2 or c58457d2 + VXORPS Y11, Y15, Y2 // c4c10457d3 + VXORPS (BX), Y15, Y11 // c46104571b or c504571b + VXORPS (R11), Y15, Y11 // c44104571b + VXORPS Y2, Y15, Y11 // c4610457da or c50457da + VXORPS Y11, Y15, Y11 // c4410457db + VZEROALL // c4e17c77 or c5fc77 + VZEROUPPER // c4e17877 or c5f877 + WBINVD // 0f09 + WRFSBASEL DX // f30faed2 + WRFSBASEL R11 // f3410faed3 + WRGSBASEL DX // f30faeda + WRGSBASEL R11 // f3410faedb + WRFSBASEQ DX // f3480faed2 + WRFSBASEQ R11 // f3490faed3 + WRGSBASEQ DX // f3480faeda + WRGSBASEQ R11 // f3490faedb + WRMSR // 0f30 + WRPKRU // 0f01ef + XABORT $7 // c6f807 + XADDW DX, (BX) // 660fc113 + XADDW R11, (BX) // 66440fc11b + XADDW DX, (R11) // 66410fc113 + XADDW R11, (R11) // 66450fc11b + XADDW DX, DX // 660fc1d2 + XADDW R11, DX // 66440fc1da + XADDW DX, R11 // 66410fc1d3 + XADDW R11, R11 // 66450fc1db + XADDL DX, (BX) // 0fc113 + XADDL R11, (BX) // 440fc11b + XADDL DX, (R11) // 410fc113 + XADDL R11, (R11) // 450fc11b + XADDL DX, DX // 0fc1d2 + XADDL R11, DX // 440fc1da + XADDL DX, R11 // 410fc1d3 + XADDL R11, R11 // 450fc1db + XADDQ DX, (BX) // 480fc113 + XADDQ R11, (BX) // 4c0fc11b + XADDQ DX, (R11) // 490fc113 + XADDQ R11, (R11) // 4d0fc11b + XADDQ DX, DX // 480fc1d2 + XADDQ R11, DX // 4c0fc1da + XADDQ DX, R11 // 490fc1d3 + XADDQ R11, R11 // 4d0fc1db + XADDB DL, (BX) // 0fc013 + XADDB R11, (BX) // 440fc01b + XADDB DL, (R11) // 410fc013 + XADDB R11, (R11) // 450fc01b + XADDB DL, DL // 0fc0d2 + XADDB R11, DL // 440fc0da + XADDB DL, R11 // 410fc0d3 + XADDB R11, R11 // 450fc0db + //TODO: XBEGIN .+$0x1122 // 66c7f82211 + //TODO: XBEGIN .+$0x11223344 // c7f844332211 + XCHGW DX, (BX) // 668713 + XCHGW R11, (BX) // 6644871b + XCHGW DX, (R11) // 66418713 + XCHGW R11, (R11) // 6645871b + XCHGW DX, DX // 6687d2 + XCHGW R11, DX // 664487da + XCHGW DX, R11 // 664187d3 + XCHGW R11, R11 // 664587db + XCHGL DX, (BX) // 8713 + XCHGL R11, (BX) // 44871b + XCHGL DX, (R11) // 418713 + XCHGL R11, (R11) // 45871b + XCHGL DX, DX // 87d2 + XCHGL R11, DX // 4487da + XCHGL DX, R11 // 4187d3 + XCHGL R11, R11 // 4587db + XCHGQ DX, (BX) // 488713 + XCHGQ R11, (BX) // 4c871b + XCHGQ DX, (R11) // 498713 + XCHGQ R11, (R11) // 4d871b + XCHGQ DX, DX // 4887d2 + XCHGQ R11, DX // 4c87da + XCHGQ DX, R11 // 4987d3 + XCHGQ R11, R11 // 4d87db + XCHGB DL, (BX) // 8613 + XCHGB R11, (BX) // 44861b + XCHGB DL, (R11) // 418613 + XCHGB R11, (R11) // 45861b + XCHGB DL, DL // 86d2 + XCHGB R11, DL // 4486da + XCHGB DL, R11 // 4186d3 + XCHGB R11, R11 // 4586db + XCHGW AX, DX // 6692 + XCHGW AX, R11 // 664193 + XCHGL AX, DX // 92 + XCHGL AX, R11 // 4193 + XCHGQ AX, DX // 4892 + XCHGQ AX, R11 // 4993 + XEND // 0f01d5 + XGETBV // 0f01d0 + XLAT // d7 + XORB $7, AL // 3407 + XORW $61731, AX // 663523f1 + XORL $4045620583, AX // 35674523f1 + XORQ $-249346713, AX // 4835674523f1 + XORW $61731, (BX) // 66813323f1 + XORW $61731, (R11) // 6641813323f1 + XORW $61731, DX // 6681f223f1 + XORW $61731, R11 // 664181f323f1 + XORW $7, (BX) // 66833307 + XORW $7, (R11) // 6641833307 + XORW $7, DX // 6683f207 + XORW $7, R11 // 664183f307 + XORW DX, (BX) // 663113 + XORW R11, (BX) // 6644311b + XORW DX, (R11) // 66413113 + XORW R11, (R11) // 6645311b + XORW DX, DX // 6631d2 or 6633d2 + XORW R11, DX // 664431da or 664133d3 + XORW DX, R11 // 664131d3 or 664433da + XORW R11, R11 // 664531db or 664533db + XORL $4045620583, (BX) // 8133674523f1 + XORL $4045620583, (R11) // 418133674523f1 + XORL $4045620583, DX // 81f2674523f1 + XORL $4045620583, R11 // 4181f3674523f1 + XORL $7, (BX) // 833307 + XORL $7, (R11) // 41833307 + XORL $7, DX // 83f207 + XORL $7, R11 // 4183f307 + XORL DX, (BX) // 3113 + XORL R11, (BX) // 44311b + XORL DX, (R11) // 413113 + XORL R11, (R11) // 45311b + XORL DX, DX // 31d2 or 33d2 + XORL R11, DX // 4431da or 4133d3 + XORL DX, R11 // 4131d3 or 4433da + XORL R11, R11 // 4531db or 4533db + XORQ $-249346713, (BX) // 488133674523f1 + XORQ $-249346713, (R11) // 498133674523f1 + XORQ $-249346713, DX // 4881f2674523f1 + XORQ $-249346713, R11 // 4981f3674523f1 + XORQ $7, (BX) // 48833307 + XORQ $7, (R11) // 49833307 + XORQ $7, DX // 4883f207 + XORQ $7, R11 // 4983f307 + XORQ DX, (BX) // 483113 + XORQ R11, (BX) // 4c311b + XORQ DX, (R11) // 493113 + XORQ R11, (R11) // 4d311b + XORQ DX, DX // 4831d2 or 4833d2 + XORQ R11, DX // 4c31da or 4933d3 + XORQ DX, R11 // 4931d3 or 4c33da + XORQ R11, R11 // 4d31db or 4d33db + XORB $7, (BX) // 803307 + XORB $7, (R11) // 41803307 + XORB $7, DL // 80f207 + XORB $7, R11 // 4180f307 + XORB DL, (BX) // 3013 + XORB R11, (BX) // 44301b + XORB DL, (R11) // 413013 + XORB R11, (R11) // 45301b + XORB DL, DL // 30d2 or 32d2 + XORB R11, DL // 4430da or 4132d3 + XORB DL, R11 // 4130d3 or 4432da + XORB R11, R11 // 4530db or 4532db + XORW (BX), DX // 663313 + XORW (R11), DX // 66413313 + XORW (BX), R11 // 6644331b + XORW (R11), R11 // 6645331b + XORL (BX), DX // 3313 + XORL (R11), DX // 413313 + XORL (BX), R11 // 44331b + XORL (R11), R11 // 45331b + XORQ (BX), DX // 483313 + XORQ (R11), DX // 493313 + XORQ (BX), R11 // 4c331b + XORQ (R11), R11 // 4d331b + XORB (BX), DL // 3213 + XORB (R11), DL // 413213 + XORB (BX), R11 // 44321b + XORB (R11), R11 // 45321b + XORPD (BX), X2 // 660f5713 + XORPD (R11), X2 // 66410f5713 + XORPD X2, X2 // 660f57d2 + XORPD X11, X2 // 66410f57d3 + XORPD (BX), X11 // 66440f571b + XORPD (R11), X11 // 66450f571b + XORPD X2, X11 // 66440f57da + XORPD X11, X11 // 66450f57db + XORPS (BX), X2 // 0f5713 + XORPS (R11), X2 // 410f5713 + XORPS X2, X2 // 0f57d2 + XORPS X11, X2 // 410f57d3 + XORPS (BX), X11 // 440f571b + XORPS (R11), X11 // 450f571b + XORPS X2, X11 // 440f57da + XORPS X11, X11 // 450f57db + XRSTOR (BX) // 0fae2b + XRSTOR (R11) // 410fae2b + XRSTOR64 (BX) // 480fae2b + XRSTOR64 (R11) // 490fae2b + XRSTORS (BX) // 0fc71b + XRSTORS (R11) // 410fc71b + XRSTORS64 (BX) // 480fc71b + XRSTORS64 (R11) // 490fc71b + XSAVE (BX) // 0fae23 + XSAVE (R11) // 410fae23 + XSAVE64 (BX) // 480fae23 + XSAVE64 (R11) // 490fae23 + XSAVEC (BX) // 0fc723 + XSAVEC (R11) // 410fc723 + XSAVEC64 (BX) // 480fc723 + XSAVEC64 (R11) // 490fc723 + XSAVEOPT (BX) // 0fae33 + XSAVEOPT (R11) // 410fae33 + XSAVEOPT64 (BX) // 480fae33 + XSAVEOPT64 (R11) // 490fae33 + XSAVES (BX) // 0fc72b + XSAVES (R11) // 410fc72b + XSAVES64 (BX) // 480fc72b + XSAVES64 (R11) // 490fc72b + XSETBV // 0f01d1 + XTEST // 0f01d6 + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/amd64enc_extra.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/amd64enc_extra.s new file mode 100644 index 0000000000000000000000000000000000000000..08cb20c70718033424e12e53fbbe657d03b1f158 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/amd64enc_extra.s @@ -0,0 +1,1063 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This input extends auto-generated amd64enc.s test suite +// with manually added tests. + +#include "../../../../../runtime/textflag.h" + +TEXT asmtest(SB),DUPOK|NOSPLIT,$0 + // AVX2GATHER: basic combinations. + VPGATHERDQ Y2, (BP)(X7*2), Y1 // c4e2ed904c7d00 + VPGATHERDQ X12, (R13)(X14*2), X11 // c40299905c7500 + VPGATHERDQ Y12, (R13)(X14*2), Y11 // c4029d905c7500 + VPGATHERDQ Y0, 8(X4*1), Y6 // c4e2fd90342508000000 + VPGATHERDQ Y0, -8(X4*1), Y6 // c4e2fd903425f8ffffff + VPGATHERDQ Y0, 0(X4*1), Y6 // c4e2fd90342500000000 + VPGATHERDQ Y0, 664(X4*1), Y6 // c4e2fd90342598020000 + VPGATHERDQ Y0, 8(X4*8), Y6 // c4e2fd9034e508000000 + VPGATHERDQ Y0, -8(X4*8), Y6 // c4e2fd9034e5f8ffffff + VPGATHERDQ Y0, 0(X4*8), Y6 // c4e2fd9034e500000000 + VPGATHERDQ Y0, 664(X4*8), Y6 // c4e2fd9034e598020000 + VPGATHERDQ Y0, 8(X14*1), Y6 // c4a2fd90343508000000 + VPGATHERDQ Y0, -8(X14*1), Y6 // c4a2fd903435f8ffffff + VPGATHERDQ Y0, 0(X14*1), Y6 // c4a2fd90343500000000 + VPGATHERDQ Y0, 664(X14*1), Y6 // c4a2fd90343598020000 + VPGATHERDQ Y0, 8(X14*8), Y6 // c4a2fd9034f508000000 + VPGATHERDQ Y0, -8(X14*8), Y6 // c4a2fd9034f5f8ffffff + VPGATHERDQ Y0, 0(X14*8), Y6 // c4a2fd9034f500000000 + VPGATHERDQ Y0, 664(X14*8), Y6 // c4a2fd9034f598020000 + VPGATHERDQ X2, (BP)(X7*2), X1 // c4e2e9904c7d00 + VPGATHERDQ Y2, (BP)(X7*2), Y1 // c4e2ed904c7d00 + VPGATHERDQ X12, (R13)(X14*2), X11 // c40299905c7500 + VPGATHERDQ Y12, (R13)(X14*2), Y11 // c4029d905c7500 + VPGATHERDQ Y0, 8(X4*1), Y6 // c4e2fd90342508000000 + VPGATHERDQ Y0, -8(X4*1), Y6 // c4e2fd903425f8ffffff + VPGATHERDQ Y0, 0(X4*1), Y6 // c4e2fd90342500000000 + VPGATHERDQ Y0, 664(X4*1), Y6 // c4e2fd90342598020000 + VPGATHERDQ Y0, 8(X4*8), Y6 // c4e2fd9034e508000000 + VPGATHERDQ Y0, -8(X4*8), Y6 // c4e2fd9034e5f8ffffff + VPGATHERDQ Y0, 0(X4*8), Y6 // c4e2fd9034e500000000 + VPGATHERDQ Y0, 664(X4*8), Y6 // c4e2fd9034e598020000 + VPGATHERDQ Y0, 8(X14*1), Y6 // c4a2fd90343508000000 + VPGATHERDQ Y0, -8(X14*1), Y6 // c4a2fd903435f8ffffff + VPGATHERDQ Y0, 0(X14*1), Y6 // c4a2fd90343500000000 + VPGATHERDQ Y0, 664(X14*1), Y6 // c4a2fd90343598020000 + VPGATHERDQ Y0, 8(X14*8), Y6 // c4a2fd9034f508000000 + VPGATHERDQ Y0, -8(X14*8), Y6 // c4a2fd9034f5f8ffffff + VPGATHERDQ Y0, 0(X14*8), Y6 // c4a2fd9034f500000000 + VPGATHERDQ Y0, 664(X14*8), Y6 // c4a2fd9034f598020000 + VPGATHERQQ X2, (BP)(X7*2), X1 // c4e2e9914c7d00 + VPGATHERQQ Y2, (BP)(Y7*2), Y1 // c4e2ed914c7d00 + VPGATHERQQ X12, (R13)(X14*2), X11 // c40299915c7500 + VPGATHERQQ Y12, (R13)(Y14*2), Y11 // c4029d915c7500 + VPGATHERQQ X2, (BP)(X7*2), X1 // c4e2e9914c7d00 + VPGATHERQQ Y2, (BP)(Y7*2), Y1 // c4e2ed914c7d00 + VPGATHERQQ X12, (R13)(X14*2), X11 // c40299915c7500 + VPGATHERQQ Y12, (R13)(Y14*2), Y11 // c4029d915c7500 + VGATHERDPD X2, (BP)(X7*2), X1 // c4e2e9924c7d00 + VGATHERDPD Y2, (BP)(X7*2), Y1 // c4e2ed924c7d00 + VGATHERDPD X12, (R13)(X14*2), X11 // c40299925c7500 + VGATHERDPD Y12, (R13)(X14*2), Y11 // c4029d925c7500 + VGATHERDPD Y0, 8(X4*1), Y6 // c4e2fd92342508000000 + VGATHERDPD Y0, -8(X4*1), Y6 // c4e2fd923425f8ffffff + VGATHERDPD Y0, 0(X4*1), Y6 // c4e2fd92342500000000 + VGATHERDPD Y0, 664(X4*1), Y6 // c4e2fd92342598020000 + VGATHERDPD Y0, 8(X4*8), Y6 // c4e2fd9234e508000000 + VGATHERDPD Y0, -8(X4*8), Y6 // c4e2fd9234e5f8ffffff + VGATHERDPD Y0, 0(X4*8), Y6 // c4e2fd9234e500000000 + VGATHERDPD Y0, 664(X4*8), Y6 // c4e2fd9234e598020000 + VGATHERDPD Y0, 8(X14*1), Y6 // c4a2fd92343508000000 + VGATHERDPD Y0, -8(X14*1), Y6 // c4a2fd923435f8ffffff + VGATHERDPD Y0, 0(X14*1), Y6 // c4a2fd92343500000000 + VGATHERDPD Y0, 664(X14*1), Y6 // c4a2fd92343598020000 + VGATHERDPD Y0, 8(X14*8), Y6 // c4a2fd9234f508000000 + VGATHERDPD Y0, -8(X14*8), Y6 // c4a2fd9234f5f8ffffff + VGATHERDPD Y0, 0(X14*8), Y6 // c4a2fd9234f500000000 + VGATHERDPD Y0, 664(X14*8), Y6 // c4a2fd9234f598020000 + VGATHERDPD X2, (BP)(X7*2), X1 // c4e2e9924c7d00 + VGATHERDPD Y2, (BP)(X7*2), Y1 // c4e2ed924c7d00 + VGATHERDPD X12, (R13)(X14*2), X11 // c40299925c7500 + VGATHERDPD Y12, (R13)(X14*2), Y11 // c4029d925c7500 + VGATHERDPD Y0, 8(X4*1), Y6 // c4e2fd92342508000000 + VGATHERDPD Y0, -8(X4*1), Y6 // c4e2fd923425f8ffffff + VGATHERDPD Y0, 0(X4*1), Y6 // c4e2fd92342500000000 + VGATHERDPD Y0, 664(X4*1), Y6 // c4e2fd92342598020000 + VGATHERDPD Y0, 8(X4*8), Y6 // c4e2fd9234e508000000 + VGATHERDPD Y0, -8(X4*8), Y6 // c4e2fd9234e5f8ffffff + VGATHERDPD Y0, 0(X4*8), Y6 // c4e2fd9234e500000000 + VGATHERDPD Y0, 664(X4*8), Y6 // c4e2fd9234e598020000 + VGATHERDPD Y0, 8(X14*1), Y6 // c4a2fd92343508000000 + VGATHERDPD Y0, -8(X14*1), Y6 // c4a2fd923435f8ffffff + VGATHERDPD Y0, 0(X14*1), Y6 // c4a2fd92343500000000 + VGATHERDPD Y0, 664(X14*1), Y6 // c4a2fd92343598020000 + VGATHERDPD Y0, 8(X14*8), Y6 // c4a2fd9234f508000000 + VGATHERDPD Y0, -8(X14*8), Y6 // c4a2fd9234f5f8ffffff + VGATHERDPD Y0, 0(X14*8), Y6 // c4a2fd9234f500000000 + VGATHERDPD Y0, 664(X14*8), Y6 // c4a2fd9234f598020000 + VGATHERQPD X2, (BP)(X7*2), X1 // c4e2e9934c7d00 + VGATHERQPD Y2, (BP)(Y7*2), Y1 // c4e2ed934c7d00 + VGATHERQPD X12, (R13)(X14*2), X11 // c40299935c7500 + VGATHERQPD Y12, (R13)(Y14*2), Y11 // c4029d935c7500 + VGATHERQPD X2, (BP)(X7*2), X1 // c4e2e9934c7d00 + VGATHERQPD Y2, (BP)(Y7*2), Y1 // c4e2ed934c7d00 + VGATHERQPD X12, (R13)(X14*2), X11 // c40299935c7500 + VGATHERQPD Y12, (R13)(Y14*2), Y11 // c4029d935c7500 + VGATHERDPS X2, (BP)(X7*2), X1 // c4e269924c7d00 + VGATHERDPS Y2, (BP)(Y7*2), Y1 // c4e26d924c7d00 + VGATHERDPS X12, (R13)(X14*2), X11 // c40219925c7500 + VGATHERDPS Y12, (R13)(Y14*2), Y11 // c4021d925c7500 + VGATHERDPS X3, 8(X4*1), X6 // c4e26192342508000000 + VGATHERDPS X3, -8(X4*1), X6 // c4e261923425f8ffffff + VGATHERDPS X3, 0(X4*1), X6 // c4e26192342500000000 + VGATHERDPS X3, 664(X4*1), X6 // c4e26192342598020000 + VGATHERDPS X3, 8(X4*8), X6 // c4e2619234e508000000 + VGATHERDPS X3, -8(X4*8), X6 // c4e2619234e5f8ffffff + VGATHERDPS X3, 0(X4*8), X6 // c4e2619234e500000000 + VGATHERDPS X3, 664(X4*8), X6 // c4e2619234e598020000 + VGATHERDPS X3, 8(X14*1), X6 // c4a26192343508000000 + VGATHERDPS X3, -8(X14*1), X6 // c4a261923435f8ffffff + VGATHERDPS X3, 0(X14*1), X6 // c4a26192343500000000 + VGATHERDPS X3, 664(X14*1), X6 // c4a26192343598020000 + VGATHERDPS X3, 8(X14*8), X6 // c4a2619234f508000000 + VGATHERDPS X3, -8(X14*8), X6 // c4a2619234f5f8ffffff + VGATHERDPS X3, 0(X14*8), X6 // c4a2619234f500000000 + VGATHERDPS X3, 664(X14*8), X6 // c4a2619234f598020000 + VGATHERDPS X2, (BP)(X7*2), X1 // c4e269924c7d00 + VGATHERDPS Y2, (BP)(Y7*2), Y1 // c4e26d924c7d00 + VGATHERDPS X12, (R13)(X14*2), X11 // c40219925c7500 + VGATHERDPS Y12, (R13)(Y14*2), Y11 // c4021d925c7500 + VGATHERDPS X5, 8(X4*1), X6 // c4e25192342508000000 + VGATHERDPS X3, -8(X4*1), X6 // c4e261923425f8ffffff + VGATHERDPS X3, 0(X4*1), X6 // c4e26192342500000000 + VGATHERDPS X3, 664(X4*1), X6 // c4e26192342598020000 + VGATHERDPS X3, 8(X4*8), X6 // c4e2619234e508000000 + VGATHERDPS X3, -8(X4*8), X6 // c4e2619234e5f8ffffff + VGATHERDPS X3, 0(X4*8), X6 // c4e2619234e500000000 + VGATHERDPS X3, 664(X4*8), X6 // c4e2619234e598020000 + VGATHERDPS X3, 8(X14*1), X6 // c4a26192343508000000 + VGATHERDPS X3, -8(X14*1), X6 // c4a261923435f8ffffff + VGATHERDPS X3, 0(X14*1), X6 // c4a26192343500000000 + VGATHERDPS X3, 664(X14*1), X6 // c4a26192343598020000 + VGATHERDPS X3, 8(X14*8), X6 // c4a2619234f508000000 + VGATHERDPS X3, -8(X14*8), X6 // c4a2619234f5f8ffffff + VGATHERDPS X3, 0(X14*8), X6 // c4a2619234f500000000 + VGATHERDPS X3, 664(X14*8), X6 // c4a2619234f598020000 + VGATHERQPS X2, (BP)(X7*2), X1 // c4e269934c7d00 + VGATHERQPS X2, (BP)(Y7*2), X1 // c4e26d934c7d00 + VGATHERQPS X12, (R13)(X14*2), X11 // c40219935c7500 + VGATHERQPS X12, (R13)(Y14*2), X11 // c4021d935c7500 + VGATHERQPS X2, (BP)(X7*2), X1 // c4e269934c7d00 + VGATHERQPS X2, (BP)(Y7*2), X1 // c4e26d934c7d00 + VGATHERQPS X12, (R13)(X14*2), X11 // c40219935c7500 + VGATHERQPS X12, (R13)(Y14*2), X11 // c4021d935c7500 + VPGATHERDD X2, (BP)(X7*2), X1 // c4e269904c7d00 + VPGATHERDD Y2, (BP)(Y7*2), Y1 // c4e26d904c7d00 + VPGATHERDD X12, (R13)(X14*2), X11 // c40219905c7500 + VPGATHERDD Y12, (R13)(Y14*2), Y11 // c4021d905c7500 + VPGATHERDD X3, 8(X4*1), X6 // c4e26190342508000000 + VPGATHERDD X3, -8(X4*1), X6 // c4e261903425f8ffffff + VPGATHERDD X3, 0(X4*1), X6 // c4e26190342500000000 + VPGATHERDD X3, 664(X4*1), X6 // c4e26190342598020000 + VPGATHERDD X3, 8(X4*8), X6 // c4e2619034e508000000 + VPGATHERDD X3, -8(X4*8), X6 // c4e2619034e5f8ffffff + VPGATHERDD X3, 0(X4*8), X6 // c4e2619034e500000000 + VPGATHERDD X3, 664(X4*8), X6 // c4e2619034e598020000 + VPGATHERDD X3, 8(X14*1), X6 // c4a26190343508000000 + VPGATHERDD X3, -8(X14*1), X6 // c4a261903435f8ffffff + VPGATHERDD X3, 0(X14*1), X6 // c4a26190343500000000 + VPGATHERDD X3, 664(X14*1), X6 // c4a26190343598020000 + VPGATHERDD X3, 8(X14*8), X6 // c4a2619034f508000000 + VPGATHERDD X3, -8(X14*8), X6 // c4a2619034f5f8ffffff + VPGATHERDD X3, 0(X14*8), X6 // c4a2619034f500000000 + VPGATHERDD X3, 664(X14*8), X6 // c4a2619034f598020000 + VPGATHERDD X2, (BP)(X7*2), X1 // c4e269904c7d00 + VPGATHERDD Y2, (BP)(Y7*2), Y1 // c4e26d904c7d00 + VPGATHERDD X12, (R13)(X14*2), X11 // c40219905c7500 + VPGATHERDD Y12, (R13)(Y14*2), Y11 // c4021d905c7500 + VPGATHERDD X3, 8(X4*1), X6 // c4e26190342508000000 + VPGATHERDD X3, -8(X4*1), X6 // c4e261903425f8ffffff + VPGATHERDD X3, 0(X4*1), X6 // c4e26190342500000000 + VPGATHERDD X3, 664(X4*1), X6 // c4e26190342598020000 + VPGATHERDD X3, 8(X4*8), X6 // c4e2619034e508000000 + VPGATHERDD X3, -8(X4*8), X6 // c4e2619034e5f8ffffff + VPGATHERDD X3, 0(X4*8), X6 // c4e2619034e500000000 + VPGATHERDD X3, 664(X4*8), X6 // c4e2619034e598020000 + VPGATHERDD X3, 8(X14*1), X6 // c4a26190343508000000 + VPGATHERDD X3, -8(X14*1), X6 // c4a261903435f8ffffff + VPGATHERDD X3, 0(X14*1), X6 // c4a26190343500000000 + VPGATHERDD X3, 664(X14*1), X6 // c4a26190343598020000 + VPGATHERDD X3, 8(X14*8), X6 // c4a2619034f508000000 + VPGATHERDD X3, -8(X14*8), X6 // c4a2619034f5f8ffffff + VPGATHERDD X3, 0(X14*8), X6 // c4a2619034f500000000 + VPGATHERDD X3, 664(X14*8), X6 // c4a2619034f598020000 + VPGATHERQD X2, (BP)(X7*2), X1 // c4e269914c7d00 + VPGATHERQD X2, (BP)(Y7*2), X1 // c4e26d914c7d00 + VPGATHERQD X12, (R13)(X14*2), X11 // c40219915c7500 + VPGATHERQD X12, (R13)(Y14*2), X11 // c4021d915c7500 + VPGATHERQD X2, (BP)(X7*2), X1 // c4e269914c7d00 + VPGATHERQD X2, (BP)(Y7*2), X1 // c4e26d914c7d00 + VPGATHERQD X12, (R13)(X14*2), X11 // c40219915c7500 + VPGATHERQD X12, (R13)(Y14*2), X11 // c4021d915c7500 + VPGATHERQQ X0, 0(X1*1), X2 // c4e2f991140d00000000 + VPGATHERQQ Y0, 0(Y1*1), Y2 // c4e2fd91140d00000000 + VPGATHERQQ X8, 0(X9*1), X10 // c422b991140d00000000 + VPGATHERQQ Y8, 0(Y9*1), Y10 // c422bd91140d00000000 + VPGATHERQQ X0, 0(X1*4), X2 // c4e2f991148d00000000 + VPGATHERQQ Y0, 0(Y1*4), Y2 // c4e2fd91148d00000000 + VPGATHERQQ X8, 0(X9*4), X10 // c422b991148d00000000 + VPGATHERQQ Y8, 0(Y9*4), Y10 // c422bd91148d00000000 + // AVX2GATHER: test SP/BP base with different displacements. + VPGATHERQQ X0, (SP)(X1*1), X2 // c4e2f991140c + VPGATHERQQ X0, 16(SP)(X1*1), X2 // c4e2f991540c10 + VPGATHERQQ X0, 512(SP)(X1*1), X2 // c4e2f991940c00020000 + VPGATHERQQ X0, (R12)(X1*1), X2 // c4c2f991140c + VPGATHERQQ X0, 16(R12)(X1*1), X2 // c4c2f991540c10 + VPGATHERQQ X0, 512(R12)(X1*1), X2 // c4c2f991940c00020000 + VPGATHERQQ X0, (BP)(X1*1), X2 // c4e2f991540d00 + VPGATHERQQ X0, 16(BP)(X1*1), X2 // c4e2f991540d10 + VPGATHERQQ X0, 512(BP)(X1*1), X2 // c4e2f991940d00020000 + VPGATHERQQ X0, (R13)(X1*1), X2 // c4c2f991540d00 + VPGATHERQQ X0, 16(R13)(X1*1), X2 // c4c2f991540d10 + VPGATHERQQ X0, 512(R13)(X1*1), X2 // c4c2f991940d00020000 + VPGATHERQQ Y0, (SP)(Y1*1), Y2 // c4e2fd91140c + VPGATHERQQ Y0, 16(SP)(Y1*1), Y2 // c4e2fd91540c10 + VPGATHERQQ Y0, 512(SP)(Y1*1), Y2 // c4e2fd91940c00020000 + VPGATHERQQ Y0, (R12)(Y1*1), Y2 // c4c2fd91140c + VPGATHERQQ Y0, 16(R12)(Y1*1), Y2 // c4c2fd91540c10 + VPGATHERQQ Y0, 512(R12)(Y1*1), Y2 // c4c2fd91940c00020000 + VPGATHERQQ Y0, (BP)(Y1*1), Y2 // c4e2fd91540d00 + VPGATHERQQ Y0, 16(BP)(Y1*1), Y2 // c4e2fd91540d10 + VPGATHERQQ Y0, 512(BP)(Y1*1), Y2 // c4e2fd91940d00020000 + VPGATHERQQ Y0, (R13)(Y1*1), Y2 // c4c2fd91540d00 + VPGATHERQQ Y0, 16(R13)(Y1*1), Y2 // c4c2fd91540d10 + VPGATHERQQ Y0, 512(R13)(Y1*1), Y2 // c4c2fd91940d00020000 + // Test low-8 register for /is4 "hr" operand. + VPBLENDVB X0, (BX), X1, X2 // c4e3714c1300 + // /Yxr0 tests. + SHA256RNDS2 X0, (BX), X2 // 0f38cb13 + SHA256RNDS2 X0, (R11), X2 // 410f38cb13 + SHA256RNDS2 X0, X2, X2 // 0f38cbd2 + SHA256RNDS2 X0, X11, X2 // 410f38cbd3 + SHA256RNDS2 X0, (BX), X11 // 440f38cb1b + SHA256RNDS2 X0, (R11), X11 // 450f38cb1b + SHA256RNDS2 X0, X2, X11 // 440f38cbda + SHA256RNDS2 X0, X11, X11 // 450f38cbdb + // Rest SHA instructions tests. + SHA1MSG1 (BX), X2 // 0f38c913 + SHA1MSG1 (R11), X2 // 410f38c913 + SHA1MSG1 X2, X2 // 0f38c9d2 + SHA1MSG1 X11, X2 // 410f38c9d3 + SHA1MSG1 (BX), X11 // 440f38c91b + SHA1MSG1 (R11), X11 // 450f38c91b + SHA1MSG1 X2, X11 // 440f38c9da + SHA1MSG1 X11, X11 // 450f38c9db + SHA1MSG2 (BX), X2 // 0f38ca13 + SHA1MSG2 (R11), X2 // 410f38ca13 + SHA1MSG2 X2, X2 // 0f38cad2 + SHA1MSG2 X11, X2 // 410f38cad3 + SHA1MSG2 (BX), X11 // 440f38ca1b + SHA1MSG2 (R11), X11 // 450f38ca1b + SHA1MSG2 X2, X11 // 440f38cada + SHA1MSG2 X11, X11 // 450f38cadb + SHA1NEXTE (BX), X2 // 0f38c813 + SHA1NEXTE (R11), X2 // 410f38c813 + SHA1NEXTE X2, X2 // 0f38c8d2 + SHA1NEXTE X11, X2 // 410f38c8d3 + SHA1NEXTE (BX), X11 // 440f38c81b + SHA1NEXTE (R11), X11 // 450f38c81b + SHA1NEXTE X2, X11 // 440f38c8da + SHA1NEXTE X11, X11 // 450f38c8db + SHA1RNDS4 $0, (BX), X2 // 0f3acc1300 + SHA1RNDS4 $0, (R11), X2 // 410f3acc1300 + SHA1RNDS4 $1, X2, X2 // 0f3accd201 + SHA1RNDS4 $1, X11, X2 // 410f3accd301 + SHA1RNDS4 $2, (BX), X11 // 440f3acc1b02 + SHA1RNDS4 $2, (R11), X11 // 450f3acc1b02 + SHA1RNDS4 $3, X2, X11 // 440f3accda03 + SHA1RNDS4 $3, X11, X11 // 450f3accdb03 + SHA256MSG1 (BX), X2 // 0f38cc13 + SHA256MSG1 (R11), X2 // 410f38cc13 + SHA256MSG1 X2, X2 // 0f38ccd2 + SHA256MSG1 X11, X2 // 410f38ccd3 + SHA256MSG1 (BX), X11 // 440f38cc1b + SHA256MSG1 (R11), X11 // 450f38cc1b + SHA256MSG1 X2, X11 // 440f38ccda + SHA256MSG1 X11, X11 // 450f38ccdb + SHA256MSG2 (BX), X2 // 0f38cd13 + SHA256MSG2 (R11), X2 // 410f38cd13 + SHA256MSG2 X2, X2 // 0f38cdd2 + SHA256MSG2 X11, X2 // 410f38cdd3 + SHA256MSG2 (BX), X11 // 440f38cd1b + SHA256MSG2 (R11), X11 // 450f38cd1b + SHA256MSG2 X2, X11 // 440f38cdda + SHA256MSG2 X11, X11 // 450f38cddb + // Test VPERMQ with both uint8 and int8 immediate args + VPERMQ $-40, Y8, Y8 // c443fd00c0d8 + VPERMQ $216, Y8, Y8 // c443fd00c0d8 + // Test that VPERMPD that shares ytab list with VPERMQ continues to work too. + VPERMPD $-40, Y7, Y7 // c4e3fd01ffd8 + VPERMPD $216, Y7, Y7 // c4e3fd01ffd8 + // Check that LEAL is permitted to use overflowing offset. + LEAL 2400959708(BP)(R10*1), BP // 428dac15dcbc1b8f + LEAL 3395469782(AX)(R10*1), AX // 428d8410d6c162ca + // Make sure MOV CR/DR continues to work after changing its movtabs. + MOVQ CR0, AX // 0f20c0 + MOVQ CR0, DX // 0f20c2 + MOVQ CR4, DI // 0f20e7 + MOVQ AX, CR0 // 0f22c0 + MOVQ DX, CR0 // 0f22c2 + MOVQ DI, CR4 // 0f22e7 + MOVQ DR0, AX // 0f21c0 + MOVQ DR6, DX // 0f21f2 + MOVQ DR7, SI // 0f21fe + // Test other movtab entries. + PUSHQ GS // 0fa8 + PUSHQ FS // 0fa0 + POPQ FS // 0fa1 + POPQ GS // 0fa9 + // All instructions below semantically have unsigned operands, + // but previous assembler permitted negative arguments. + // This behavior is preserved for compatibility reasons. + VPSHUFD $-79, X7, X7 // c5f970ffb1 + RORXL $-1, (AX), DX // c4e37bf010ff + RORXQ $-1, (AX), DX // c4e3fbf010ff + VPSHUFD $-1, X1, X2 // c5f970d1ff + VPSHUFD $-1, Y1, Y2 // c5fd70d1ff + VPSHUFHW $-1, X1, X2 // c5fa70d1ff + VPSHUFHW $-1, Y1, Y2 // c5fe70d1ff + VPSHUFLW $-1, X1, X2 // c5fb70d1ff + VPSHUFLW $-1, Y1, Y2 // c5ff70d1ff + VROUNDPD $-1, X1, X2 // c4e37909d1ff + VROUNDPS $-1, Y1, Y2 // c4e37d08d1ff + VPSLLD $-1, X1, X2 // c5e972f1ff + VPSLLD $-1, Y1, Y2 // c5ed72f1ff + VPSLLDQ $-1, X1, X2 // c5e973f9ff + VPSLLDQ $-1, Y1, Y2 // c5ed73f9ff + VPSLLQ $-1, X1, X2 // c5e973f1ff + VPSLLQ $-1, Y1, Y2 // c5ed73f1ff + VPSRLD $-1, X1, X2 // c5e972d1ff + VPSRLD $-1, Y1, Y2 // c5ed72d1ff + VPSRLDQ $-1, X1, X2 // c5e973d9ff + VPSRLDQ $-1, Y1, Y2 // c5ed73d9ff + VPSRLQ $-1, X1, X2 // c5e973d1ff + VPSRLQ $-1, Y1, Y2 // c5ed73d1ff + VPEXTRW $-1, X1, (AX) // c4e3791508ff + VPEXTRW $-1, X1, AX // c4e37915c8ff + VEXTRACTF128 $-1, Y1, X2 // c4e37d19caff + VEXTRACTI128 $-1, Y1, X2 // c4e37d39caff + VAESKEYGENASSIST $-1, X1, X2 // c4e379dfd1ff + VPCMPESTRI $-1, X1, X2 // c4e37961d1ff + VPCMPESTRM $-1, X1, X2 // c4e37960d1ff + VPCMPISTRI $-1, X1, X2 // c4e37963d1ff + VPCMPISTRM $-1, X1, X2 // c4e37962d1ff + VPERMPD $-1, Y1, Y2 // c4e3fd01d1ff + VPERMILPD $-1, X1, X2 // c4e37905d1ff + VPERMILPD $-1, Y1, Y2 // c4e37d05d1ff + VPERMILPS $-1, X1, X2 // c4e37904d1ff + VPERMILPS $-1, Y1, Y2 // c4e37d04d1ff + VCVTPS2PH $-1, X1, X2 // c4e3791dcaff + VCVTPS2PH $-1, Y1, X2 // c4e37d1dcaff + VPSLLW $-1, X1, X2 // c5e971f1ff + VPSLLW $-1, Y1, Y2 // c5ed71f1ff + VPSRAD $-1, X1, X2 // c5e972e1ff + VPSRAD $-1, Y1, Y2 // c5ed72e1ff + VPSRAW $-1, X1, X2 // c5e971e1ff + VPSRAW $-1, Y1, Y2 // c5ed71e1ff + VPSRLW $-1, X1, X1 // c5f171d1ff + VPSRLW $-1, Y1, Y2 // c5ed71d1ff + VEXTRACTPS $-1, X1, AX // c4e37917c8ff + VPEXTRB $-1, X1, AX // c4e37914c8ff + VPEXTRD $-1, X1, AX // c4e37916c8ff + VPEXTRQ $-1, X1, AX // c4e3f916c8ff + // EVEX: High-16 X registers. + VADDPD X30, X1, X0 // 6291f50858c6 + VADDPD X2, X29, X0 // 62f1950058c2 + VADDPD X30, X29, X0 // 6291950058c6 + VADDPD X2, X1, X28 // 6261f50858e2 + VADDPD X30, X1, X28 // 6201f50858e6 + VADDPD X2, X29, X28 // 6261950058e2 + VADDPD X30, X29, X28 // 6201950058e6 + VADDPD X30, X11, X10 // 6211a50858d6 + VADDPD X12, X29, X10 // 6251950058d4 + VADDPD X30, X29, X10 // 6211950058d6 + VADDPD X12, X11, X28 // 6241a50858e4 + VADDPD X30, X11, X28 // 6201a50858e6 + VADDPD X12, X29, X28 // 6241950058e4 + VADDPD X30, X29, X28 // 6201950058e6 + VADDPD (AX), X29, X0 // 62f195005800 + VADDPD (AX), X1, X28 // 6261f5085820 + VADDPD (AX), X29, X28 // 626195005820 + VADDPD (AX), X29, X10 // 627195005810 + VADDPD (AX), X10, X28 // 6261ad085820 + VADDPD (CX)(AX*1), X29, X0 // 62f19500580401 + VADDPD (CX)(AX*1), X1, X28 // 6261f508582401 + VADDPD (CX)(AX*1), X29, X28 // 62619500582401 + VADDPD (CX)(AX*1), X29, X10 // 62719500581401 + VADDPD (CX)(AX*1), X10, X28 // 6261ad08582401 + VADDPD (CX)(AX*2), X29, X0 // 62f19500580441 + VADDPD (CX)(AX*2), X1, X28 // 6261f508582441 + VADDPD (CX)(AX*2), X29, X28 // 62619500582441 + VADDPD (CX)(AX*2), X29, X10 // 62719500581441 + VADDPD (CX)(AX*2), X10, X28 // 6261ad08582441 + // EVEX: displacement without Disp8. + VADDPD 15(AX), X29, X0 // 62f1950058800f000000 + VADDPD 15(AX), X1, X28 // 6261f50858a00f000000 + VADDPD 15(AX), X29, X28 // 6261950058a00f000000 + VADDPD 15(AX), X29, X10 // 6271950058900f000000 + VADDPD 15(AX), X10, X28 // 6261ad0858a00f000000 + VADDPD 15(CX)(AX*1), X29, X0 // 62f195005884010f000000 + VADDPD 15(CX)(AX*1), X1, X28 // 6261f50858a4010f000000 + VADDPD 15(CX)(AX*1), X29, X28 // 6261950058a4010f000000 + VADDPD 15(CX)(AX*1), X29, X10 // 627195005894010f000000 + VADDPD 15(CX)(AX*1), X10, X28 // 6261ad0858a4010f000000 + VADDPD 15(CX)(AX*2), X29, X0 // 62f195005884410f000000 + VADDPD 15(CX)(AX*2), X1, X28 // 6261f50858a4410f000000 + VADDPD 15(CX)(AX*2), X29, X28 // 6261950058a4410f000000 + VADDPD 15(CX)(AX*2), X29, X10 // 627195005894410f000000 + VADDPD 15(CX)(AX*2), X10, X28 // 6261ad0858a4410f000000 + // EVEX: compressed displacement (Disp8). + VADDPD 2032(DX), X29, X0 // 62f1950058427f + VADDPD 2032(DX), X1, X29 // 6261f508586a7f + VADDPD 2032(DX), X29, X28 // 6261950058627f + VADDPD 2032(DX)(AX*2), X29, X0 // 62f195005844427f + VADDPD 2032(DX)(AX*2), X1, X29 // 6261f508586c427f + VADDPD 2032(DX)(AX*2), X29, X28 // 626195005864427f + VADDPD 4064(DX), Y0, Y29 // 6261fd28586a7f + VADDPD 4064(DX), Y29, Y1 // 62f19520584a7f + VADDPD 4064(DX), Y28, Y29 // 62619d20586a7f + VADDPD 4064(DX)(AX*2), Y0, Y29 // 6261fd28586c427f + VADDPD 4064(DX)(AX*2), Y29, Y1 // 62f19520584c427f + VADDPD 8128(DX), Z0, Z29 // 6261fd48586a7f + VADDPD 8128(DX), Z29, Z1 // 62f19540584a7f + VADDPD 8128(DX), Z28, Z29 // 62619d40586a7f + VADDPD 8128(DX)(AX*2), Z0, Z29 // 6261fd48586c427f + VADDPD 8128(DX)(AX*2), Z29, Z1 // 62f19540584c427f + // EVEX: compressed displacement that does not fit into 8bits. + VADDPD 2048(DX), X29, X0 // 62f19500588200080000 + VADDPD 2048(DX), X1, X29 // 6261f50858aa00080000 + VADDPD 2048(DX), X29, X28 // 6261950058a200080000 + VADDPD 2048(DX)(AX*2), X29, X0 // 62f1950058844200080000 + VADDPD 2048(DX)(AX*2), X1, X29 // 6261f50858ac4200080000 + VADDPD 2048(DX)(AX*2), X29, X28 // 6261950058a44200080000 + VADDPD 4096(DX), Y0, Y29 // 6261fd2858aa00100000 + VADDPD 4096(DX), Y29, Y1 // 62f19520588a00100000 + VADDPD 4096(DX), Y28, Y29 // 62619d2058aa00100000 + VADDPD 4096(DX)(AX*2), Y0, Y29 // 6261fd2858ac4200100000 + VADDPD 4096(DX)(AX*2), Y29, Y1 // 62f19520588c4200100000 + VADDPD 8192(DX), Z0, Z29 // 6261fd4858aa00200000 + VADDPD 8192(DX), Z29, Z1 // 62f19540588a00200000 + VADDPD 8192(DX), Z28, Z29 // 62619d4058aa00200000 + VADDPD 8192(DX)(AX*2), Z0, Z29 // 6261fd4858ac4200200000 + VADDPD 8192(DX)(AX*2), Z29, Z1 // 62f19540588c4200200000 + // EVEX: Y registers; VL=256. + VADDPD Y30, Y1, Y0 // 6291f52858c6 + VADDPD Y0, Y29, Y2 // 62f1952058d0 + VADDPD Y0, Y29, Y30 // 6261952058f0 + VADDPD Y28, Y1, Y2 // 6291f52858d4 + VADDPD Y28, Y1, Y30 // 6201f52858f4 + VADDPD Y28, Y29, Y2 // 6291952058d4 + VADDPD Y28, Y29, Y30 // 6201952058f4 + VADDPD Y10, Y11, Y30 // 6241a52858f2 + VADDPD Y10, Y29, Y12 // 6251952058e2 + VADDPD Y10, Y29, Y30 // 6241952058f2 + VADDPD Y28, Y11, Y12 // 6211a52858e4 + VADDPD Y28, Y11, Y30 // 6201a52858f4 + VADDPD Y28, Y29, Y12 // 6211952058e4 + VADDPD Y28, Y29, Y30 // 6201952058f4 + VADDPD (AX), Y29, Y0 // 62f195205800 + VADDPD (AX), Y1, Y28 // 6261f5285820 + VADDPD (AX), Y29, Y28 // 626195205820 + VADDPD (AX), Y29, Y10 // 627195205810 + VADDPD (AX), Y10, Y28 // 6261ad285820 + VADDPD (CX)(AX*1), Y29, Y0 // 62f19520580401 + VADDPD (CX)(AX*1), Y1, Y28 // 6261f528582401 + VADDPD (CX)(AX*1), Y29, Y28 // 62619520582401 + VADDPD (CX)(AX*1), Y29, Y10 // 62719520581401 + VADDPD (CX)(AX*1), Y10, Y28 // 6261ad28582401 + VADDPD (CX)(AX*2), Y29, Y0 // 62f19520580441 + VADDPD (CX)(AX*2), Y1, Y28 // 6261f528582441 + VADDPD (CX)(AX*2), Y29, Y28 // 62619520582441 + VADDPD (CX)(AX*2), Y29, Y10 // 62719520581441 + VADDPD (CX)(AX*2), Y10, Y28 // 6261ad28582441 + VADDPD 15(AX), Y0, Y29 // 6261fd2858a80f000000 + VADDPD 15(AX), Y28, Y1 // 62f19d2058880f000000 + VADDPD 15(AX), Y28, Y29 // 62619d2058a80f000000 + VADDPD 15(AX), Y10, Y29 // 6261ad2858a80f000000 + VADDPD 15(AX), Y28, Y10 // 62719d2058900f000000 + VADDPD 15(CX)(AX*1), Y0, Y29 // 6261fd2858ac010f000000 + VADDPD 15(CX)(AX*1), Y28, Y1 // 62f19d20588c010f000000 + VADDPD 15(CX)(AX*1), Y28, Y29 // 62619d2058ac010f000000 + VADDPD 15(CX)(AX*1), Y10, Y29 // 6261ad2858ac010f000000 + VADDPD 15(CX)(AX*1), Y28, Y10 // 62719d205894010f000000 + VADDPD 15(CX)(AX*2), Y0, Y29 // 6261fd2858ac410f000000 + VADDPD 15(CX)(AX*2), Y28, Y1 // 62f19d20588c410f000000 + VADDPD 15(CX)(AX*2), Y28, Y29 // 62619d2058ac410f000000 + VADDPD 15(CX)(AX*2), Y10, Y29 // 6261ad2858ac410f000000 + VADDPD 15(CX)(AX*2), Y28, Y10 // 62719d205894410f000000 + VADDPD 2048(DX), Y0, Y29 // 6261fd28586a40 + VADDPD 2048(DX), Y29, Y1 // 62f19520584a40 + VADDPD 2048(DX), Y28, Y29 // 62619d20586a40 + VADDPD 2048(DX)(AX*2), Y0, Y29 // 6261fd28586c4240 + VADDPD 2048(DX)(AX*2), Y29, Y1 // 62f19520584c4240 + VADDPD 2048(DX)(AX*2), Y28, Y29 // 62619d20586c4240 + // EVEX: Z registers; VL=512. + VADDPD Z30, Z0, Z1 // 6291fd4858ce + VADDPD Z0, Z2, Z29 // 6261ed4858e8 + VADDPD Z0, Z30, Z29 // 62618d4058e8 + VADDPD Z28, Z2, Z1 // 6291ed4858cc + VADDPD Z28, Z30, Z1 // 62918d4058cc + VADDPD Z28, Z2, Z29 // 6201ed4858ec + VADDPD Z28, Z30, Z29 // 62018d4058ec + VADDPD Z10, Z30, Z11 // 62518d4058da + VADDPD Z10, Z12, Z29 // 62419d4858ea + VADDPD Z10, Z30, Z29 // 62418d4058ea + VADDPD Z28, Z12, Z11 // 62119d4858dc + VADDPD Z28, Z30, Z11 // 62118d4058dc + VADDPD Z28, Z12, Z29 // 62019d4858ec + VADDPD Z28, Z30, Z29 // 62018d4058ec + VADDPD (AX), Z0, Z29 // 6261fd485828 + VADDPD (AX), Z28, Z1 // 62f19d405808 + VADDPD (AX), Z28, Z29 // 62619d405828 + VADDPD (AX), Z10, Z29 // 6261ad485828 + VADDPD (AX), Z28, Z10 // 62719d405810 + VADDPD (CX)(AX*1), Z0, Z29 // 6261fd48582c01 + VADDPD (CX)(AX*1), Z28, Z1 // 62f19d40580c01 + VADDPD (CX)(AX*1), Z28, Z29 // 62619d40582c01 + VADDPD (CX)(AX*1), Z10, Z29 // 6261ad48582c01 + VADDPD (CX)(AX*1), Z28, Z10 // 62719d40581401 + VADDPD (CX)(AX*2), Z0, Z29 // 6261fd48582c41 + VADDPD (CX)(AX*2), Z28, Z1 // 62f19d40580c41 + VADDPD (CX)(AX*2), Z28, Z29 // 62619d40582c41 + VADDPD (CX)(AX*2), Z10, Z29 // 6261ad48582c41 + VADDPD (CX)(AX*2), Z28, Z10 // 62719d40581441 + VADDPD 15(AX), Z29, Z0 // 62f1954058800f000000 + VADDPD 15(AX), Z1, Z28 // 6261f54858a00f000000 + VADDPD 15(AX), Z29, Z28 // 6261954058a00f000000 + VADDPD 15(AX), Z29, Z10 // 6271954058900f000000 + VADDPD 15(AX), Z10, Z28 // 6261ad4858a00f000000 + VADDPD 15(CX)(AX*1), Z29, Z0 // 62f195405884010f000000 + VADDPD 15(CX)(AX*1), Z1, Z28 // 6261f54858a4010f000000 + VADDPD 15(CX)(AX*1), Z29, Z28 // 6261954058a4010f000000 + VADDPD 15(CX)(AX*1), Z29, Z10 // 627195405894010f000000 + VADDPD 15(CX)(AX*1), Z10, Z28 // 6261ad4858a4010f000000 + VADDPD 15(CX)(AX*2), Z29, Z0 // 62f195405884410f000000 + VADDPD 15(CX)(AX*2), Z1, Z28 // 6261f54858a4410f000000 + VADDPD 15(CX)(AX*2), Z29, Z28 // 6261954058a4410f000000 + VADDPD 15(CX)(AX*2), Z29, Z10 // 627195405894410f000000 + VADDPD 15(CX)(AX*2), Z10, Z28 // 6261ad4858a4410f000000 + VADDPD 2048(DX), Z29, Z0 // 62f19540584220 + VADDPD 2048(DX), Z1, Z29 // 6261f548586a20 + VADDPD 2048(DX), Z29, Z28 // 62619540586220 + VADDPD 2048(DX)(AX*2), Z29, Z0 // 62f1954058444220 + VADDPD 2048(DX)(AX*2), Z1, Z29 // 6261f548586c4220 + VADDPD 2048(DX)(AX*2), Z29, Z28 // 6261954058644220 + // EVEX: KOP (opmask) instructions. + KMOVB K0, K0 // c5f990c0 + KMOVB K7, K7 // c5f990ff + KMOVB K5, K1 // c5f990cd + KMOVB K1, K5 // c5f990e9 + KMOVB (AX), K1 // c5f99008 + KMOVB K0, (AX) // c5f99100 + KMOVB K7, (R10) // c4c179913a + KMOVB K5, AX // c5f993c5 + KMOVB K7, R10 // c57993d7 + KMOVB AX, K5 // c5f992e8 + KMOVB R10, K7 // c4c17992fa + KMOVW K0, K0 // c5f890c0 + KMOVW K7, K7 // c5f890ff + KMOVW K5, K1 // c5f890cd + KMOVW K1, K5 // c5f890e9 + KMOVW (AX), K1 // c5f89008 + KMOVW K0, (AX) // c5f89100 + KMOVW K7, (R10) // c4c178913a + KMOVW K5, AX // c5f893c5 + KMOVW K7, R10 // c57893d7 + KMOVW AX, K5 // c5f892e8 + KMOVW R10, K7 // c4c17892fa + KMOVD K0, K0 // c4e1f990c0 + KMOVD K7, K7 // c4e1f990ff + KMOVD K5, K1 // c4e1f990cd + KMOVD K1, K5 // c4e1f990e9 + KMOVD (AX), K1 // c4e1f99008 + KMOVD AX, K5 // c5fb92e8 + KMOVD R10, K7 // c4c17b92fa + KMOVD K0, (AX) // c4e1f99100 + KMOVD K7, (R10) // c4c1f9913a + KMOVD K5, AX // c5fb93c5 + KMOVD K7, R10 // c57b93d7 + KMOVQ K0, K0 // c4e1f890c0 + KMOVQ K7, K7 // c4e1f890ff + KMOVQ K5, K1 // c4e1f890cd + KMOVQ K1, K5 // c4e1f890e9 + KMOVQ (AX), K1 // c4e1f89008 + KMOVQ AX, K5 // c4e1fb92e8 + KMOVQ R10, K7 // c4c1fb92fa + KMOVQ K0, (AX) // c4e1f89100 + KMOVQ K7, (R10) // c4c1f8913a + KMOVQ K5, AX // c4e1fb93c5 + KMOVQ K7, R10 // c461fb93d7 + KNOTB K7, K0 // c5f944c7 + KNOTB K1, K5 // c5f944e9 + KNOTW K7, K0 // c5f844c7 + KNOTW K1, K5 // c5f844e9 + KNOTD K7, K0 // c4e1f944c7 + KNOTD K1, K5 // c4e1f944e9 + KNOTQ K7, K0 // c4e1f844c7 + KNOTQ K1, K5 // c4e1f844e9 + KORB K7, K5, K0 // c5d545c7 + KORB K0, K7, K5 // c5c545e8 + KORW K7, K5, K0 // c5d445c7 + KORW K0, K7, K5 // c5c445e8 + KORD K7, K5, K0 // c4e1d545c7 + KORD K0, K7, K5 // c4e1c545e8 + KORQ K7, K5, K0 // c4e1d445c7 + KORQ K0, K7, K5 // c4e1c445e8 + KSHIFTLB $0, K7, K0 // c4e37932c700 + KSHIFTLB $196, K1, K5 // c4e37932e9c4 + KSHIFTLW $0, K7, K0 // c4e3f932c700 + KSHIFTLW $196, K1, K5 // c4e3f932e9c4 + KSHIFTLD $0, K7, K0 // c4e37933c700 + KSHIFTLD $196, K1, K5 // c4e37933e9c4 + KSHIFTLQ $0, K7, K0 // c4e3f933c700 + KSHIFTLQ $196, K1, K5 // c4e3f933e9c4 + // EVEX: masking with K1-K7. + VADDPD X2, X1, K1, X0 // 62f1f50958c2 + VADDPD X12, X1, K4, X10 // 6251f50c58d4 + VADDPD X22, X1, K7, X20 // 62a1f50f58e6 + VADDPD (AX), X1, K1, X1 // 62f1f5095808 + VADDPD 8(R10), X10, K4, X10 // 6251ad0c589208000000 + VADDPD (R10)(AX*4), X20, K7, X20 // 62c1dd07582482 + VADDPD Y2, Y1, K1, Y0 // 62f1f52958c2 + VADDPD Y12, Y1, K4, Y10 // 6251f52c58d4 + VADDPD Y22, Y1, K7, Y20 // 62a1f52f58e6 + VADDPD (AX), Y1, K1, Y1 // 62f1f5295808 + VADDPD 8(R10), Y10, K4, Y10 // 6251ad2c589208000000 + VADDPD (R10)(AX*4), Y20, K7, Y20 // 62c1dd27582482 + VADDPD Z2, Z1, K1, Z0 // 62f1f54958c2 + VADDPD Z12, Z1, K4, Z10 // 6251f54c58d4 + VADDPD Z22, Z1, K7, Z20 // 62a1f54f58e6 + VADDPD (AX), Z1, K1, Z1 // 62f1f5495808 + VADDPD 8(R10), Z10, K4, Z10 // 6251ad4c589208000000 + VADDPD (R10)(AX*4), Z20, K7, Z20 // 62c1dd47582482 + // EVEX gather (also tests Z as VSIB index). + VPGATHERDD 360(AX)(X2*4), K1, X1 // 62f27d09904c905a + VPGATHERDD 640(BP)(X15*8), K3, X14 // 62327d0b90b4fd80020000 + VPGATHERDD 960(R10)(X25*2), K7, X24 // 62027d0790844ac0030000 + VPGATHERDD 1280(R10)(X1*4), K4, X0 // 62d27d0c90848a00050000 + VPGATHERDD 360(AX)(Y2*4), K1, Y1 // 62f27d29904c905a + VPGATHERDD 640(BP)(Y15*8), K3, Y14 // 62327d2b90b4fd80020000 + VPGATHERDD 960(R10)(Y25*2), K7, Y24 // 62027d2790844ac0030000 + VPGATHERDD 1280(R10)(Y1*4), K4, Y0 // 62d27d2c90848a00050000 + VPGATHERDD 360(AX)(Z2*4), K1, Z1 // 62f27d49904c905a + VPGATHERDD 640(BP)(Z15*8), K3, Z14 // 62327d4b90b4fd80020000 + VPGATHERDD 960(R10)(Z25*2), K7, Z24 // 62027d4790844ac0030000 + VPGATHERDD 1280(R10)(Z1*4), K4, Z0 // 62d27d4c90848a00050000 + VPGATHERDQ 360(AX)(X2*4), K1, X1 // 62f2fd09904c902d + VPGATHERDQ 640(BP)(X15*8), K3, X14 // 6232fd0b9074fd50 + VPGATHERDQ 960(R10)(X25*2), K7, X24 // 6202fd0790444a78 + VPGATHERDQ 1280(R10)(X1*4), K4, X0 // 62d2fd0c90848a00050000 + VPGATHERDQ 360(AX)(X2*4), K1, Y1 // 62f2fd29904c902d + VPGATHERDQ 640(BP)(X15*8), K3, Y14 // 6232fd2b9074fd50 + VPGATHERDQ 960(R10)(X25*2), K7, Y24 // 6202fd2790444a78 + VPGATHERDQ 1280(R10)(X1*4), K4, Y0 // 62d2fd2c90848a00050000 + VPGATHERDQ 360(AX)(Y2*4), K1, Z1 // 62f2fd49904c902d + VPGATHERDQ 640(BP)(Y15*8), K3, Z14 // 6232fd4b9074fd50 + VPGATHERDQ 960(R10)(Y25*2), K7, Z24 // 6202fd4790444a78 + VPGATHERDQ 1280(R10)(Y1*4), K4, Z0 // 62d2fd4c90848a00050000 + VGATHERDPD 360(R15)(X30*2), K6, X20 // 6282fd069264772d + VGATHERDPD 640(R15)(X20*2), K6, X10 // 6252fd0692546750 + VGATHERDPD 960(R15)(X10*2), K6, X20 // 6282fd0e92645778 + VGATHERDPD 1280(R15)(X0*2), K6, X10 // 6252fd0e92944700050000 + VGATHERDPD 360(R15)(X30*2), K6, Y20 // 6282fd269264772d + VGATHERDPD 640(R15)(X20*2), K6, Y10 // 6252fd2692546750 + VGATHERDPD 960(R15)(X10*2), K6, Y20 // 6282fd2e92645778 + VGATHERDPD 1280(R15)(X0*2), K6, Y10 // 6252fd2e92944700050000 + VGATHERDPD 360(R15)(Y30*2), K6, Z20 // 6282fd469264772d + VGATHERDPD 640(R15)(Y20*2), K6, Z10 // 6252fd4692546750 + VGATHERDPD 960(R15)(Y10*2), K6, Z20 // 6282fd4e92645778 + VGATHERDPD 1280(R15)(Y0*2), K6, Z10 // 6252fd4e92944700050000 + VGATHERDPS 360(R15)(X30*2), K6, X20 // 62827d069264775a + VGATHERDPS 640(R15)(X20*2), K6, X10 // 62527d0692946780020000 + VGATHERDPS 960(R15)(X10*2), K6, X20 // 62827d0e92a457c0030000 + VGATHERDPS 1280(R15)(X0*2), K6, X10 // 62527d0e92944700050000 + VGATHERDPS 360(R15)(Y30*2), K6, Y20 // 62827d269264775a + VGATHERDPS 640(R15)(Y20*2), K6, Y10 // 62527d2692946780020000 + VGATHERDPS 960(R15)(Y10*2), K6, Y20 // 62827d2e92a457c0030000 + VGATHERDPS 1280(R15)(Y0*2), K6, Y10 // 62527d2e92944700050000 + VGATHERDPS 360(R15)(Z30*2), K6, Z20 // 62827d469264775a + VGATHERDPS 640(R15)(Z20*2), K6, Z10 // 62527d4692946780020000 + VGATHERDPS 960(R15)(Z10*2), K6, Z20 // 62827d4e92a457c0030000 + VGATHERDPS 1280(R15)(Z0*2), K6, Z10 // 62527d4e92944700050000 + VGATHERQPS 360(R15)(X30*2), K6, X20 // 62827d069364775a + VGATHERQPS 640(R15)(X20*2), K6, X10 // 62527d0693946780020000 + VGATHERQPS 960(R15)(X10*2), K6, X20 // 62827d0e93a457c0030000 + VGATHERQPS 1280(R15)(X0*2), K6, X10 // 62527d0e93944700050000 + VGATHERQPS 360(R15)(Y30*2), K6, X20 // 62827d269364775a + VGATHERQPS 640(R15)(Y20*2), K6, X10 // 62527d2693946780020000 + VGATHERQPS 960(R15)(Y10*2), K6, X20 // 62827d2e93a457c0030000 + VGATHERQPS 1280(R15)(Y0*2), K6, X10 // 62527d2e93944700050000 + VGATHERQPS 360(R15)(Z30*2), K6, Y20 // 62827d469364775a + VGATHERQPS 640(R15)(Z20*2), K6, Y10 // 62527d4693946780020000 + VGATHERQPS 960(R15)(Z10*2), K6, Y20 // 62827d4e93a457c0030000 + VGATHERQPS 1280(R15)(Z0*2), K6, Y10 // 62527d4e93944700050000 + VPGATHERQD 360(R15)(X30*2), K6, X20 // 62827d069164775a + VPGATHERQD 640(R15)(X20*2), K6, X10 // 62527d0691946780020000 + VPGATHERQD 960(R15)(X10*2), K6, X20 // 62827d0e91a457c0030000 + VPGATHERQD 1280(R15)(X0*2), K6, X10 // 62527d0e91944700050000 + VPGATHERQD 360(R15)(Y30*2), K6, X20 // 62827d269164775a + VPGATHERQD 640(R15)(Y20*2), K6, X10 // 62527d2691946780020000 + VPGATHERQD 960(R15)(Y10*2), K6, X20 // 62827d2e91a457c0030000 + VPGATHERQD 1280(R15)(Y0*2), K6, X10 // 62527d2e91944700050000 + VPGATHERQD 360(R15)(Z30*2), K6, Y20 // 62827d469164775a + VPGATHERQD 640(R15)(Z20*2), K6, Y10 // 62527d4691946780020000 + VPGATHERQD 960(R15)(Z10*2), K6, Y20 // 62827d4e91a457c0030000 + VPGATHERQD 1280(R15)(Z0*2), K6, Y10 // 62527d4e91944700050000 + VPGATHERQQ 360(R15)(X30*2), K6, X20 // 6282fd069164772d + VPGATHERQQ 640(R15)(X20*2), K6, X10 // 6252fd0691546750 + VPGATHERQQ 960(R15)(X10*2), K6, X20 // 6282fd0e91645778 + VPGATHERQQ 1280(R15)(X0*2), K6, X10 // 6252fd0e91944700050000 + VPGATHERQQ 360(R15)(Y30*2), K6, Y20 // 6282fd269164772d + VPGATHERQQ 640(R15)(Y20*2), K6, Y10 // 6252fd2691546750 + VPGATHERQQ 960(R15)(Y10*2), K6, Y20 // 6282fd2e91645778 + VPGATHERQQ 1280(R15)(Y0*2), K6, Y10 // 6252fd2e91944700050000 + VPGATHERQQ 360(R15)(Z30*2), K6, Z20 // 6282fd469164772d + VPGATHERQQ 640(R15)(Z20*2), K6, Z10 // 6252fd4691546750 + VPGATHERQQ 960(R15)(Z10*2), K6, Z20 // 6282fd4e91645778 + VPGATHERQQ 1280(R15)(Z0*2), K6, Z10 // 6252fd4e91944700050000 + VGATHERQPD 360(R15)(X30*2), K6, X20 // 6282fd069364772d + VGATHERQPD 640(R15)(X20*2), K6, X10 // 6252fd0693546750 + VGATHERQPD 960(R15)(X10*2), K6, X20 // 6282fd0e93645778 + VGATHERQPD 1280(R15)(X0*2), K6, X10 // 6252fd0e93944700050000 + VGATHERQPD 360(R15)(Y30*2), K6, Y20 // 6282fd269364772d + VGATHERQPD 640(R15)(Y20*2), K6, Y10 // 6252fd2693546750 + VGATHERQPD 960(R15)(Y10*2), K6, Y20 // 6282fd2e93645778 + VGATHERQPD 1280(R15)(Y0*2), K6, Y10 // 6252fd2e93944700050000 + VGATHERQPD 360(R15)(Z30*2), K6, Z20 // 6282fd469364772d + VGATHERQPD 640(R15)(Z20*2), K6, Z10 // 6252fd4693546750 + VGATHERQPD 960(R15)(Z10*2), K6, Z20 // 6282fd4e93645778 + VGATHERQPD 1280(R15)(Z0*2), K6, Z10 // 6252fd4e93944700050000 + // EVEX: corner cases for High-16 registers. + VADDPD X31, X16, X15 // 6211fd0058ff + VADDPD X23, X15, X16 // 62a1850858c7 + VADDPD Y31, Y16, Y15 // 6211fd2058ff + VADDPD Y23, Y15, Y16 // 62a1852858c7 + VADDPD Z31, Z16, Z15 // 6211fd4058ff + VADDPD Z23, Z15, Z16 // 62a1854858c7 + VGATHERQPD (DX)(X16*1),K1,X31 // 6262fd01933c02 + VGATHERQPD (DX)(X31*1),K1,X16 // 62a2fd0193043a + VGATHERQPD (DX)(X15*1),K1,X23 // 62a2fd09933c3a + VGATHERQPD (DX)(X23*1),K1,X15 // 6272fd01933c3a + VGATHERQPD (DX)(Y16*1),K1,Y31 // 6262fd21933c02 + VGATHERQPD (DX)(Y31*1),K1,Y16 // 62a2fd2193043a + VGATHERQPD (DX)(Y15*1),K1,Y23 // 62a2fd29933c3a + VGATHERQPD (DX)(Y23*1),K1,Y15 // 6272fd21933c3a + VGATHERQPD (DX)(Z16*1),K1,Z31 // 6262fd41933c02 + VGATHERQPD (DX)(Z31*1),K1,Z16 // 62a2fd4193043a + VGATHERQPD (DX)(Z15*1),K1,Z23 // 62a2fd49933c3a + VGATHERQPD (DX)(Z23*1),K1,Z15 // 6272fd41933c3a + // EVEX: VCVTPD2DQ with Y suffix (VL=2). + VCVTPD2DQY (BX), X20 // 62e1ff28e623 + VCVTPD2DQY (R11), X30 // 6241ff28e633 + // XED encoder uses EVEX.X=0 for these; most x86 tools use EVEX.X=1. + // Either way is OK. + VMOVQ SP, X20 // 62e1fd086ee4 or 62a1fd086ee4 + VMOVQ BP, X20 // 62e1fd086ee5 or 62a1fd086ee5 + VMOVQ R14, X20 // 62c1fd086ee6 or 6281fd086ee6 + // "VMOVQ r/m64, xmm1"/6E vs "VMOVQ xmm2/m64, xmm1"/7E with mem operand. + VMOVQ (AX), X20 // 62e1fd086e20 or 62e1fe087e20 + VMOVQ 7(DX), X20 // 62e1fd086ea207000000 or 62e1fe087ea207000000 + VMOVQ -15(R11)(CX*1), X20 // 62c1fd086ea40bf1ffffff or 62c1fe087ea40bf1ffffff + VMOVQ (SP)(AX*2), X20 // 62e1fd086e2444 or 62e1fe087e2444 + // "VMOVQ xmm1, r/m64"/7E vs "VMOVQ xmm1, xmm2/m64"/D6 with mem operand. + VMOVQ X20, (AX) // 62e1fd087e20 or 62e1fd08d620 + VMOVQ X20, 7(DX) // 62e1fd087ea207000000 or 62e1fd08d6a207000000 + VMOVQ X20, -15(R11)(CX*1) // 62c1fd087ea40bf1ffffff or 62c1fd08d6a40bf1ffffff + VMOVQ X20, (SP)(AX*2) // 62e1fd087e2444 or 62e1fd08d62444 + // VMOVHPD: overlapping VEX and EVEX variants. + VMOVHPD (AX), X5, X5 // c5d11628 or c4e1d11628 or 62f1d5281628 or 62f1d5481628 + VMOVHPD 7(DX), X5, X5 // c5d1166a07 or 62f1d52816aa07000000 or 62f1d54816aa07000000 + VMOVHPD -15(R11)(CX*1), X5, X5 // c4c151166c0bf1 or 62d1d52816ac0bf1ffffff or 62d1d54816ac0bf1ffffff + VMOVHPD (SP)(AX*2), X5, X5 // c5d1162c44 or c4e1d1162c44 or 62f1d528162c44 or 62f1d548162c44 + VMOVHPD (AX), X8, X5 // c5b91628 or c4e1b91628 or 62f1bd281628 or 62f1bd481628 + VMOVHPD 7(DX), X8, X5 // c5b9166a07 or 62f1bd2816aa07000000 or 62f1bd4816aa07000000 + VMOVHPD -15(R11)(CX*1), X8, X5 // c4c139166c0bf1 or 62d1bd4816ac0bf1ffffff + VMOVHPD (SP)(AX*2), X8, X5 // c5b9162c44 or c4e1b9162c44 or 62f1bd28162c44 or 62f1bd48162c44 + VMOVHPD (AX), X20, X5 // 62f1dd001628 or 62f1dd201628 or 62f1dd401628 + VMOVHPD 7(DX), X20, X5 // 62f1dd0016aa07000000 or 62f1dd2016aa07000000 or 62f1dd4016aa07000000 + VMOVHPD -15(R11)(CX*1), X20, X5 // 62d1dd0016ac0bf1ffffff or 62d1dd2016ac0bf1ffffff or 62d1dd4016ac0bf1ffffff + VMOVHPD (SP)(AX*2), X20, X5 // 62f1dd00162c44 or 62f1dd20162c44 or 62f1dd40162c44 + VMOVHPD (AX), X5, X8 // c5511600 or c461d11600 or 6271d5281600 or 6271d5481600 + VMOVHPD 7(DX), X5, X8 // c551164207 or 6271d528168207000000 or 6271d548168207000000 + VMOVHPD -15(R11)(CX*1), X5, X8 // c4415116440bf1 or 6251d52816840bf1ffffff or 6251d54816840bf1ffffff + VMOVHPD (SP)(AX*2), X5, X8 // c551160444 or 6271d528160444 or 6271d548160444 + VMOVHPD (AX), X8, X8 // c5391600 or 6271bd281600 or 6271bd481600 + VMOVHPD 7(DX), X8, X8 // c539164207 or 6271bd28168207000000 or 6271bd48168207000000 + VMOVHPD -15(R11)(CX*1), X8, X8 // c4413916440bf1 or 6251bd2816840bf1ffffff or 6251bd4816840bf1ffffff + VMOVHPD (SP)(AX*2), X8, X8 // c539160444 or 6271bd28160444 or 6271bd48160444 + VMOVHPD (AX), X20, X8 // 6271dd001600 or 6271dd201600 or 6271dd401600 + VMOVHPD 7(DX), X20, X8 // 6271dd00168207000000 or 6271dd20168207000000 or 6271dd40168207000000 + VMOVHPD -15(R11)(CX*1), X20, X8 // 6251dd0016840bf1ffffff or 6251dd2016840bf1ffffff or 6251dd4016840bf1ffffff + VMOVHPD (SP)(AX*2), X20, X8 // 6271dd00160444 or 6271dd20160444 or 6271dd40160444 + VMOVHPD (AX), X5, X20 // 62e1d5081620 or 62e1d5281620 or 62e1d5481620 + VMOVHPD 7(DX), X5, X20 // 62e1d50816a207000000 or 62e1d52816a207000000 or 62e1d54816a207000000 + VMOVHPD -15(R11)(CX*1), X5, X20 // 62c1d50816a40bf1ffffff or 62c1d52816a40bf1ffffff or 62c1d54816a40bf1ffffff + VMOVHPD (SP)(AX*2), X5, X20 // 62e1d508162444 or 62e1d528162444 or 62e1d548162444 + VMOVHPD (AX), X8, X20 // 62e1bd081620 or 62e1bd281620 or 62e1bd481620 + VMOVHPD 7(DX), X8, X20 // 62e1bd0816a207000000 or 62e1bd2816a207000000 or 62e1bd4816a207000000 + VMOVHPD -15(R11)(CX*1), X8, X20 // 62c1bd0816a40bf1ffffff or 62c1bd2816a40bf1ffffff or 62c1bd4816a40bf1ffffff + VMOVHPD (SP)(AX*2), X8, X20 // 62e1bd08162444 or 62e1bd28162444 or 62e1bd48162444 + VMOVHPD (AX), X20, X20 // 62e1dd001620 or 62e1dd201620 or 62e1dd401620 + VMOVHPD 7(DX), X20, X20 // 62e1dd0016a207000000 or 62e1dd2016a207000000 or 62e1dd4016a207000000 + VMOVHPD -15(R11)(CX*1), X20, X20 // 62c1dd0016a40bf1ffffff or 62c1dd2016a40bf1ffffff or 62c1dd4016a40bf1ffffff + VMOVHPD (SP)(AX*2), X20, X20 // 62e1dd00162444 or 62e1dd20162444 or 62e1dd40162444 + VMOVHPD X5, (AX) // c5f91728 or 62f1fd281728 or 62f1fd481728 + VMOVHPD X8, (AX) // c5791700 or 6271fd281700 or 6271fd481700 + VMOVHPD X20, (AX) // 62e1fd081720 or 62e1fd281720 or 62e1fd481720 + VMOVHPD X5, 7(DX) // c5f9176a07 or 62f1fd2817aa07000000 or 62f1fd4817aa07000000 + VMOVHPD X8, 7(DX) // c579174207 or 6271fd28178207000000 or 6271fd48178207000000 + VMOVHPD X20, 7(DX) // 62e1fd0817a207000000 or 62e1fd2817a207000000 or 62e1fd4817a207000000 + VMOVHPD X5, -15(R11)(CX*1) // c4c179176c0bf1 or 62d1fd2817ac0bf1ffffff or 62d1fd4817ac0bf1ffffff + VMOVHPD X8, -15(R11)(CX*1) // c4417917440bf1 or 6251fd2817840bf1ffffff or 6251fd4817840bf1ffffff + VMOVHPD X20, -15(R11)(CX*1) // 62c1fd0817a40bf1ffffff or 62c1fd2817a40bf1ffffff or 62c1fd4817a40bf1ffffff + VMOVHPD X5, (SP)(AX*2) // c5f9172c44 or 62f1fd28172c44 or 62f1fd48172c44 + VMOVHPD X8, (SP)(AX*2) // c579170444 or 6271fd28170444 or 6271fd48170444 + VMOVHPD X20, (SP)(AX*2) // 62e1fd08172444 or 62e1fd28172444 or 62e1fd48172444 + // VMOVLPD: overlapping VEX and EVEX variants. + VMOVLPD (AX), X5, X5 // c5d11228 or 62f1d5281228 or 62f1d5481228 + VMOVLPD 7(DX), X5, X5 // c5d1126a07 or 62f1d52812aa07000000 or 62f1d54812aa07000000 + VMOVLPD -15(R11)(CX*1), X5, X5 // c4c151126c0bf1 or 62d1d52812ac0bf1ffffff or 62d1d54812ac0bf1ffffff + VMOVLPD (SP)(AX*2), X5, X5 // c5d1122c44 or 62f1d528122c44 or 62f1d548122c44 + VMOVLPD (AX), X8, X5 // c5b91228 or 62f1bd281228 or 62f1bd481228 + VMOVLPD 7(DX), X8, X5 // c5b9126a07 or 62f1bd2812aa07000000 or 62f1bd4812aa07000000 + VMOVLPD -15(R11)(CX*1), X8, X5 // c4c139126c0bf1 or 62d1bd2812ac0bf1ffffff or 62d1bd4812ac0bf1ffffff + VMOVLPD (SP)(AX*2), X8, X5 // c5b9122c44 or 62f1bd28122c44 or 62f1bd48122c44 + VMOVLPD (AX), X20, X5 // 62f1dd001228 or 62f1dd201228 or 62f1dd401228 + VMOVLPD 7(DX), X20, X5 // 62f1dd0012aa07000000 or 62f1dd2012aa07000000 or 62f1dd4012aa07000000 + VMOVLPD -15(R11)(CX*1), X20, X5 // 62d1dd0012ac0bf1ffffff or 62d1dd2012ac0bf1ffffff or 62d1dd4012ac0bf1ffffff + VMOVLPD (SP)(AX*2), X20, X5 // 62f1dd00122c44 or 62f1dd20122c44 or 62f1dd40122c44 + VMOVLPD (AX), X5, X8 // c5511200 or 6271d5281200 or 6271d5481200 + VMOVLPD 7(DX), X5, X8 // c551124207 or 6271d528128207000000 or 6271d548128207000000 + VMOVLPD -15(R11)(CX*1), X5, X8 // c4415112440bf1 or 6251d52812840bf1ffffff or 6251d54812840bf1ffffff + VMOVLPD (SP)(AX*2), X5, X8 // c551120444 or 6271d528120444 or 6271d548120444 + VMOVLPD (AX), X8, X8 // c5391200 or 6271bd281200 or 6271bd481200 + VMOVLPD 7(DX), X8, X8 // c539124207 or 6271bd28128207000000 or 6271bd48128207000000 + VMOVLPD -15(R11)(CX*1), X8, X8 // c4413912440bf1 or 6251bd2812840bf1ffffff or 6251bd4812840bf1ffffff + VMOVLPD (SP)(AX*2), X8, X8 // c539120444 or 6271bd28120444 or 6271bd48120444 + VMOVLPD (AX), X20, X8 // 6271dd001200 or 6271dd201200 or 6271dd401200 + VMOVLPD 7(DX), X20, X8 // 6271dd00128207000000 or 6271dd20128207000000 or 6271dd40128207000000 + VMOVLPD -15(R11)(CX*1), X20, X8 // 6251dd0012840bf1ffffff or 6251dd2012840bf1ffffff or 6251dd4012840bf1ffffff + VMOVLPD (SP)(AX*2), X20, X8 // 6271dd00120444 or 6271dd20120444 or 6271dd40120444 + VMOVLPD (AX), X5, X20 // 62e1d5081220 or 62e1d5281220 or 62e1d5481220 + VMOVLPD 7(DX), X5, X20 // 62e1d50812a207000000 or 62e1d52812a207000000 or 62e1d54812a207000000 + VMOVLPD -15(R11)(CX*1), X5, X20 // 62c1d50812a40bf1ffffff or 62c1d52812a40bf1ffffff or 62c1d54812a40bf1ffffff + VMOVLPD (SP)(AX*2), X5, X20 // 62e1d508122444 or 62e1d528122444 or 62e1d548122444 + VMOVLPD (AX), X8, X20 // 62e1bd081220 or 62e1bd281220 or 62e1bd481220 + VMOVLPD 7(DX), X8, X20 // 62e1bd0812a207000000 or 62e1bd2812a207000000 or 62e1bd4812a207000000 + VMOVLPD -15(R11)(CX*1), X8, X20 // 62c1bd0812a40bf1ffffff or 62c1bd2812a40bf1ffffff or 62c1bd4812a40bf1ffffff + VMOVLPD (SP)(AX*2), X8, X20 // 62e1bd08122444 or 62e1bd28122444 or 62e1bd48122444 + VMOVLPD (AX), X20, X20 // 62e1dd001220 or 62e1dd201220 or 62e1dd401220 + VMOVLPD 7(DX), X20, X20 // 62e1dd0012a207000000 or 62e1dd2012a207000000 or 62e1dd4012a207000000 + VMOVLPD -15(R11)(CX*1), X20, X20 // 62c1dd0012a40bf1ffffff or 62c1dd2012a40bf1ffffff or 62c1dd4012a40bf1ffffff + VMOVLPD (SP)(AX*2), X20, X20 // 62e1dd00122444 or 62e1dd20122444 or 62e1dd40122444 + VMOVLPD X5, (AX) // c5f91328 or 62f1fd281328 or 62f1fd481328 + VMOVLPD X8, (AX) // c5791300 or 6271fd281300 or 6271fd481300 + VMOVLPD X20, (AX) // 62e1fd081320 or 62e1fd281320 or 62e1fd481320 + VMOVLPD X5, 7(DX) // c5f9136a07 or 62f1fd2813aa07000000 or 62f1fd4813aa07000000 + VMOVLPD X8, 7(DX) // c579134207 or 6271fd28138207000000 or 6271fd48138207000000 + VMOVLPD X20, 7(DX) // 62e1fd0813a207000000 or 62e1fd2813a207000000 or 62e1fd4813a207000000 + VMOVLPD X5, -15(R11)(CX*1) // c4c179136c0bf1 or 62d1fd2813ac0bf1ffffff or 62d1fd4813ac0bf1ffffff + VMOVLPD X8, -15(R11)(CX*1) // c4417913440bf1 or 6251fd2813840bf1ffffff or 6251fd4813840bf1ffffff + VMOVLPD X20, -15(R11)(CX*1) // 62c1fd0813a40bf1ffffff or 62c1fd2813a40bf1ffffff or 62c1fd4813a40bf1ffffff + VMOVLPD X5, (SP)(AX*2) // c5f9132c44 or 62f1fd28132c44 or 62f1fd48132c44 + VMOVLPD X8, (SP)(AX*2) // c579130444 or 6271fd28130444 or 6271fd48130444 + VMOVLPD X20, (SP)(AX*2) // 62e1fd08132444 or 62e1fd28132444 or 62e1fd48132444 + // "VPEXTRW imm8u, xmm1, r32/m16"/15 vs "VPEXTRW imm8u, xmm2, r32"/C5. + VPEXTRW $17, X20, AX // 62b17d08c5c411 or 62e37d0815e011 or 62e3fd0815e011 + VPEXTRW $127, X20, AX // 62b17d08c5c47f or 62e37d0815e07f or 62e3fd0815e07f + VPEXTRW $17, X20, SP // 62b17d08c5e411 or 62e37d0815e411 or 62e3fd0815e411 + VPEXTRW $127, X20, SP // 62b17d08c5e47f or 62e37d0815e47f or 62e3fd0815e47f + VPEXTRW $17, X20, BP // 62b17d08c5ec11 or 62e37d0815e511 or 62e3fd0815e511 + VPEXTRW $127, X20, BP // 62b17d08c5ec7f or 62e37d0815e57f or 62e3fd0815e57f + VPEXTRW $17, X20, R14 // 62317d08c5f411 or 62c37d0815e611 or 62c3fd0815e611 + VPEXTRW $127, X20, R14 // 62317d08c5f47f or 62c37d0815e67f or 62c3fd0815e67f + VPEXTRW $17, X20, (AX) // 62e37d08152011 or 62e3fd08152011 + VPEXTRW $127, X20, (AX) // 62e37d0815207f or 62e3fd0815207f + VPEXTRW $17, X20, 7(DX) // 62e37d0815a20700000011 or 62e3fd0815a20700000011 + VPEXTRW $127, X20, 7(DX) // 62e37d0815a2070000007f or 62e3fd0815a2070000007f + VPEXTRW $17, X20, -15(R11)(CX*1) // 62c37d0815a40bf1ffffff11 or 62c3fd0815a40bf1ffffff11 + VPEXTRW $127, X20, -15(R11)(CX*1) // 62c37d0815a40bf1ffffff7f or 62c3fd0815a40bf1ffffff7f + VPEXTRW $17, X20, (SP)(AX*2) // 62e37d0815244411 or 62e3fd0815244411 + VPEXTRW $127, X20, (SP)(AX*2) // 62e37d081524447f or 62e3fd081524447f + // EVEX: embedded zeroing. + VADDPD.Z X30, X1, K7, X0 // 6291f58f58c6 + VMAXPD.Z (AX), Z2, K1, Z1 // 62f1edc95f08 + // EVEX: embedded rounding. + VADDPD.RU_SAE Z3, Z2, K1, Z1 // 62f1ed5958cb + VADDPD.RD_SAE Z3, Z2, K1, Z1 // 62f1ed3958cb + VADDPD.RZ_SAE Z3, Z2, K1, Z1 // 62f1ed7958cb + VADDPD.RN_SAE Z3, Z2, K1, Z1 // 62f1ed1958cb + VADDPD.RU_SAE.Z Z3, Z2, K1, Z1 // 62f1edd958cb + VADDPD.RD_SAE.Z Z3, Z2, K1, Z1 // 62f1edb958cb + VADDPD.RZ_SAE.Z Z3, Z2, K1, Z1 // 62f1edf958cb + VADDPD.RN_SAE.Z Z3, Z2, K1, Z1 // 62f1ed9958cb + // EVEX: embedded broadcasting. + VADDPD.BCST (AX), X2, K1, X1 // 62f1ed195808 + VADDPD.BCST.Z (AX), X2, K1, X1 // 62f1ed995808 + VADDPD.BCST (AX), Y2, K1, Y1 // 62f1ed395808 + VADDPD.BCST.Z (AX), Y2, K1, Y1 // 62f1edb95808 + VADDPD.BCST (AX), Z2, K1, Z1 // 62f1ed595808 + VADDPD.BCST.Z (AX), Z2, K1, Z1 // 62f1edd95808 + VMAXPD.BCST (AX), Z2, K1, Z1 // 62f1ed595f08 + VMAXPD.BCST.Z (AX), Z2, K1, Z1 // 62f1edd95f08 + // EVEX: suppress all exceptions (SAE). + VMAXPD.SAE Z3, Z2, K1, Z1 // 62f1ed595fcb or 62f1ed195fcb + VMAXPD.SAE.Z Z3, Z2, K1, Z1 // 62f1edd95fcb or 62f1ed995fcb + VMAXPD (AX), Z2, K1, Z1 // 62f1ed495f08 + VCMPSD.SAE $0, X0, X2, K0 // 62f1ef18c2c000 + VCMPSD.SAE $0, X0, X2, K1, K0 // 62f1ef19c2c000 + // EVEX: broadcast-affected compressed displacement (Disp8). + VADDPD.BCST 1016(DX), X0, X29 // 6261fd18586a7f + VADDPD.BCST 1016(DX), X29, X1 // 62f19510584a7f + VADDPD.BCST 1016(DX), X28, X29 // 62619d10586a7f + VADDPD.BCST 1016(DX)(AX*2), X0, X29 // 6261fd18586c427f + VADDPD.BCST 1016(DX)(AX*2), X29, X1 // 62f19510584c427f + VADDPD.BCST 1016(DX), Y0, Y29 // 6261fd38586a7f + VADDPD.BCST 1016(DX), Y29, Y1 // 62f19530584a7f + VADDPD.BCST 1016(DX), Y28, Y29 // 62619d30586a7f + VADDPD.BCST 1016(DX)(AX*2), Y0, Y29 // 6261fd38586c427f + VADDPD.BCST 1016(DX)(AX*2), Y29, Y1 // 62f19530584c427f + VADDPD.BCST 1016(DX), Z0, Z29 // 6261fd58586a7f + VADDPD.BCST 1016(DX), Z29, Z1 // 62f19550584a7f + VADDPD.BCST 1016(DX), Z28, Z29 // 62619d50586a7f + VADDPD.BCST 1016(DX)(AX*2), Z0, Z29 // 6261fd58586c427f + VADDPD.BCST 1016(DX)(AX*2), Z29, Z1 // 62f19550584c427f + VADDPS.BCST 508(DX), Z0, Z29 // 62617c58586a7f + VADDPS.BCST 508(DX), Z1, Z29 // 62617458586a7f + VADDPS.BCST 508(DX), Z28, Z29 // 62611c50586a7f + VADDPS.BCST 508(DX)(AX*2), Z0, Z29 // 62617c58586c427f + VADDPS.BCST 508(DX)(AX*2), Z1, Z29 // 62617458586c427f + // EVEX: broadcast-affected compressed displacement that does not fit into 8bits. + VADDPD.BCST 2032(DX), X0, X29 // 6261fd1858aaf0070000 + VADDPD.BCST 2032(DX), X29, X1 // 62f19510588af0070000 + VADDPD.BCST 2032(DX), X28, X29 // 62619d1058aaf0070000 + VADDPD.BCST 2032(DX)(AX*2), X0, X29 // 6261fd1858ac42f0070000 + VADDPD.BCST 2032(DX)(AX*2), X29, X1 // 62f19510588c42f0070000 + VADDPD.BCST 2032(DX), Y0, Y29 // 6261fd3858aaf0070000 + VADDPD.BCST 2032(DX), Y29, Y1 // 62f19530588af0070000 + VADDPD.BCST 2032(DX), Y28, Y29 // 62619d3058aaf0070000 + VADDPD.BCST 2032(DX)(AX*2), Y0, Y29 // 6261fd3858ac42f0070000 + VADDPD.BCST 2032(DX)(AX*2), Y29, Y1 // 62f19530588c42f0070000 + VADDPD.BCST 2032(DX), Z0, Z29 // 6261fd5858aaf0070000 + VADDPD.BCST 2032(DX), Z29, Z1 // 62f19550588af0070000 + VADDPD.BCST 2032(DX), Z28, Z29 // 62619d5058aaf0070000 + VADDPD.BCST 2032(DX)(AX*2), Z0, Z29 // 6261fd5858ac42f0070000 + VADDPD.BCST 2032(DX)(AX*2), Z29, Z1 // 62f19550588c42f0070000 + VADDPS.BCST 2032(DX), Z0, Z29 // 62617c5858aaf0070000 + VADDPS.BCST 2032(DX), Z1, Z29 // 6261745858aaf0070000 + VADDPS.BCST 2032(DX), Z28, Z29 // 62611c5058aaf0070000 + VADDPS.BCST 2032(DX)(AX*2), Z0, Z29 // 62617c5858ac42f0070000 + VADDPS.BCST 2032(DX)(AX*2), Z1, Z29 // 6261745858ac42f0070000 + // Forced EVEX encoding due to suffixes. + VADDPD.BCST 2032(DX), X0, X0 // 62f1fd185882f0070000 + VADDPD.BCST 2032(DX), Y0, Y0 // 62f1fd385882f0070000 + // Test new Z-cases one-by-one. + // + // Zevex_i_r_k_rm. + VCVTPS2PH $1, Z2, K5, Y21 // 62b37d4d1dd501 + VCVTPS2PH $2, Z21, K4, Y2 // 62e37d4c1dea02 + // Zevex_i_r_rm. + VCVTPS2PH $1, Z2, Y21 // 62b37d481dd501 + VCVTPS2PH $2, Z21, Y2 // 62e37d481dea02 + // Zevex_i_rm_k_r. + VFPCLASSPDX $1, X2, K5, K3 // 62f3fd0d66da01 + VFPCLASSPDX $2, X21, K4, K1 // 62b3fd0c66cd02 + VFPCLASSPDX $1, (AX), K5, K3 // 62f3fd0d661801 + VFPCLASSPDX $2, (CX), K4, K1 // 62f3fd0c660902 + // Zevex_i_rm_k_vo. + VPROLD $1, X2, K5, X21 // 62f1550572ca01 + VPROLD $2, Y21, K5, Y2 // 62b16d2d72cd02 + VPROLD $1, (AX), K5, X21 // 62f15505720801 + VPROLD $2, (CX), K5, Y2 // 62f16d2d720902 + // Zevex_i_rm_r. + VFPCLASSPDX $1, X2, K3 // 62f3fd0866da01 + VFPCLASSPDX $2, X21, K1 // 62b3fd0866cd02 + VFPCLASSPDX $1, (AX), K3 // 62f3fd08661801 + VFPCLASSPDX $2, (CX), K1 // 62f3fd08660902 + // Zevex_i_rm_v_k_r. + VALIGND $1, X2, X9, K5, X21 // 62e3350d03ea01 + VALIGND $2, Y21, Y2, K5, Y9 // 62336d2d03cd02 + VALIGND $3, Z9, Z21, K5, Z2 // 62d3554503d103 + VALIGND $1, (AX), X9, K5, X21 // 62e3350d032801 + VALIGND $2, (CX), Y2, K5, Y9 // 62736d2d030902 + VALIGND $3, (AX), Z21, K5, Z2 // 62f35545031003 + // Zevex_i_rm_v_r. + VALIGND $1, X2, X9, X21 // 62e3350803ea01 + VALIGND $2, Y21, Y2, Y9 // 62336d2803cd02 + VALIGND $3, Z9, Z21, Z2 // 62d3554003d103 + VALIGND $1, (AX), X9, X21 // 62e33508032801 + VALIGND $2, (CX), Y2, Y9 // 62736d28030902 + VALIGND $3, (AX), Z21, Z2 // 62f35540031003 + // Zevex_i_rm_vo. + VPROLD $1, X2, X21 // 62f1550072ca01 + VPROLD $2, Y21, Y2 // 62b16d2872cd02 + VPROLD $1, (AX), X21 // 62f15500720801 + VPROLD $2, (CX), Y2 // 62f16d28720902 + // Zevex_k_rmo. + VGATHERPF0DPD K5, (AX)(Y2*2) // 62f2fd4dc60c50 + VGATHERPF0DPD K3, (CX)(Y21*2) // 62f2fd43c60c69 + VSCATTERPF1DPD K5, (AX)(Y2*2) // 62f2fd4dc63450 + VSCATTERPF1DPD K3, (CX)(Y21*2) // 62f2fd43c63469 + // Zevex_r_k_rm. + VPSCATTERDD X2, K5, (AX)(X21*2) // 62f27d05a01468 + VPSCATTERDD X21, K5, (AX)(X2*2) // 62e27d0da02c50 + VPSCATTERDD Y2, K5, (AX)(Y21*2) // 62f27d25a01468 + VPSCATTERDD Y21, K5, (AX)(Y2*2) // 62e27d2da02c50 + VPSCATTERDD Z2, K5, (AX)(Z21*2) // 62f27d45a01468 + VPSCATTERDD Z21, K5, (AX)(Z2*2) // 62e27d4da02c50 + // Zevex_r_v_k_rm. + VMOVSD X2, X9, K5, X21 // 62b1b70d11d5 or 62e1b70d10ea + VMOVSD X21, X2, K5, X9 // 62c1ef0d11e9 or 6231ef0d10cd + VMOVSD X9, X21, K5, X2 // 6271d70511ca or 62d1d70510d1 + // Zevex_r_v_rm. + VMOVSD X2, X9, X21 // 62b1b70811d5 or 62e1b70810ea + VMOVSD X21, X2, X9 // 62c1ef0811e9 or 6231ef0810cd + VMOVSD X9, X21, X2 // 6271d70011ca or 62d1d70010d1 + VPMOVDB X2, X21 // 62b27e0831d5 + VPMOVDB X21, X2 // 62e27e0831ea + VPMOVDB X2, (AX) // 62f27e083110 + VPMOVDB X21, (AX) // 62e27e083128 + // Zevex_rm_k_r. + VMOVDDUP X2, K5, X21 // 62e1ff0d12ea + VMOVDDUP X21, K5, X2 // 62b1ff0d12d5 + VMOVDDUP (AX), K5, X21 // 62e1ff0d1228 + VMOVDDUP (CX), K5, X2 // 62f1ff0d1211 + VMOVDDUP Y2, K5, Y21 // 62e1ff2d12ea + VMOVDDUP Y21, K5, Y2 // 62b1ff2d12d5 + VMOVDDUP (AX), K5, Y21 // 62e1ff2d1228 + VMOVDDUP (CX), K5, Y2 // 62f1ff2d1211 + VMOVDDUP Z2, K5, Z21 // 62e1ff4d12ea + VMOVDDUP Z21, K5, Z2 // 62b1ff4d12d5 + VMOVDDUP (AX), K5, Z21 // 62e1ff4d1228 + VMOVDDUP (CX), K5, Z2 // 62f1ff4d1211 + // Zevex_rm_v_k_r. + VADDPD Z2, Z9, K5, Z21 // 62e1b54d58ea + VADDPD Z21, Z2, K5, Z9 // 6231ed4d58cd + VADDPD Z9, Z21, K5, Z2 // 62d1d54558d1 + // Zevex_rm_v_r. + VADDPD Z2, Z9, Z21 // 62e1b54858ea + VADDPD Z21, Z2, Z9 // 6231ed4858cd + VADDPD Z9, Z21, Z2 // 62d1d54058d1 + + CLWB (BX) // 660fae33 + CLDEMOTE (BX) // 0f1c03 + TPAUSE BX // 660faef3 + UMONITOR BX // f30faef3 + UMWAIT BX // f20faef3 + + RDPID DX // f30fc7fa + RDPID R11 // f3410fc7fb + + // End of tests. + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/amd64error.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/amd64error.s new file mode 100644 index 0000000000000000000000000000000000000000..e9a1d0acb2dab000ab353f84831a42ae41c39850 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/amd64error.s @@ -0,0 +1,150 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +TEXT errors(SB),$0 + MOVL foo<>(SB)(AX), AX // ERROR "invalid instruction" + MOVL (AX)(SP*1), AX // ERROR "invalid instruction" + EXTRACTPS $4, X2, (BX) // ERROR "invalid instruction" + EXTRACTPS $-1, X2, (BX) // ERROR "invalid instruction" + // VSIB addressing does not permit non-vector (X/Y) + // scaled index register. + VPGATHERDQ X12,(R13)(AX*2), X11 // ERROR "invalid instruction" + VPGATHERDQ X2, 664(BX*1), X1 // ERROR "invalid instruction" + VPGATHERDQ Y2, (BP)(AX*2), Y1 // ERROR "invalid instruction" + VPGATHERDQ Y5, 664(DX*8), Y6 // ERROR "invalid instruction" + VPGATHERDQ Y5, (DX), Y0 // ERROR "invalid instruction" + // VM/X rejects Y index register. + VPGATHERDQ Y5, 664(Y14*8), Y6 // ERROR "invalid instruction" + VPGATHERQQ X2, (BP)(Y7*2), X1 // ERROR "invalid instruction" + // VM/Y rejects X index register. + VPGATHERQQ Y2, (BP)(X7*2), Y1 // ERROR "invalid instruction" + VPGATHERDD Y5, -8(X14*8), Y6 // ERROR "invalid instruction" + // No VSIB for legacy instructions. + MOVL (AX)(X0*1), AX // ERROR "invalid instruction" + MOVL (AX)(Y0*1), AX // ERROR "invalid instruction" + // VSIB/VM is invalid without vector index. + // TODO(quasilyte): improve error message (#21860). + // "invalid VSIB address (missing vector index)" + VPGATHERQQ Y2, (BP), Y1 // ERROR "invalid instruction" + // AVX2GATHER mask/index/dest #UD cases. + VPGATHERQQ Y2, (BP)(X2*2), Y2 // ERROR "mask, index, and destination registers should be distinct" + VPGATHERQQ Y2, (BP)(X2*2), Y7 // ERROR "mask, index, and destination registers should be distinct" + VPGATHERQQ Y2, (BP)(X7*2), Y2 // ERROR "mask, index, and destination registers should be distinct" + VPGATHERQQ Y7, (BP)(X2*2), Y2 // ERROR "mask, index, and destination registers should be distinct" + VPGATHERDQ X2, 664(X2*8), X2 // ERROR "mask, index, and destination registers should be distinct" + VPGATHERDQ X2, 664(X2*8), X7 // ERROR "mask, index, and destination registers should be distinct" + VPGATHERDQ X2, 664(X7*8), X2 // ERROR "mask, index, and destination registers should be distinct" + VPGATHERDQ X7, 664(X2*8), X2 // ERROR "mask, index, and destination registers should be distinct" + // Non-X0 for Yxr0 should produce an error + BLENDVPD X1, (BX), X2 // ERROR "invalid instruction" + // Check offset overflow. Must fit in int32. + MOVQ 2147483647+1(AX), AX // ERROR "offset too large" + MOVQ 3395469782(R10), R8 // ERROR "offset too large" + LEAQ 3395469782(AX), AX // ERROR "offset too large" + ADDQ 3395469782(AX), AX // ERROR "offset too large" + ADDL 3395469782(AX), AX // ERROR "offset too large" + ADDW 3395469782(AX), AX // ERROR "offset too large" + LEAQ 433954697820(AX), AX // ERROR "offset too large" + ADDQ 433954697820(AX), AX // ERROR "offset too large" + ADDL 433954697820(AX), AX // ERROR "offset too large" + ADDW 433954697820(AX), AX // ERROR "offset too large" + // Pseudo-registers should not be used as scaled index. + CALL (AX)(PC*1) // ERROR "invalid instruction" + CALL (AX)(SB*1) // ERROR "invalid instruction" + CALL (AX)(FP*1) // ERROR "invalid instruction" + // Forbid memory operands for MOV CR/DR. See #24981. + MOVQ CR0, (AX) // ERROR "invalid instruction" + MOVQ CR2, (AX) // ERROR "invalid instruction" + MOVQ CR3, (AX) // ERROR "invalid instruction" + MOVQ CR4, (AX) // ERROR "invalid instruction" + MOVQ CR8, (AX) // ERROR "invalid instruction" + MOVQ (AX), CR0 // ERROR "invalid instruction" + MOVQ (AX), CR2 // ERROR "invalid instruction" + MOVQ (AX), CR3 // ERROR "invalid instruction" + MOVQ (AX), CR4 // ERROR "invalid instruction" + MOVQ (AX), CR8 // ERROR "invalid instruction" + MOVQ DR0, (AX) // ERROR "invalid instruction" + MOVQ DR2, (AX) // ERROR "invalid instruction" + MOVQ DR3, (AX) // ERROR "invalid instruction" + MOVQ DR6, (AX) // ERROR "invalid instruction" + MOVQ DR7, (AX) // ERROR "invalid instruction" + MOVQ (AX), DR0 // ERROR "invalid instruction" + MOVQ (AX), DR2 // ERROR "invalid instruction" + MOVQ (AX), DR3 // ERROR "invalid instruction" + MOVQ (AX), DR6 // ERROR "invalid instruction" + MOVQ (AX), DR7 // ERROR "invalid instruction" + // AVX512GATHER index/index #UD cases. + VPGATHERQQ (BP)(X2*2), K1, X2 // ERROR "index and destination registers should be distinct" + VPGATHERQQ (BP)(Y15*2), K1, Y15 // ERROR "index and destination registers should be distinct" + VPGATHERQQ (BP)(Z20*2), K1, Z20 // ERROR "index and destination registers should be distinct" + VPGATHERDQ (BP)(X2*2), K1, X2 // ERROR "index and destination registers should be distinct" + VPGATHERDQ (BP)(X15*2), K1, Y15 // ERROR "index and destination registers should be distinct" + VPGATHERDQ (BP)(Y20*2), K1, Z20 // ERROR "index and destination registers should be distinct" + // Instructions without EVEX variant can't use High-16 registers. + VADDSUBPD X20, X1, X2 // ERROR "invalid instruction" + VADDSUBPS X0, X20, X2 // ERROR "invalid instruction" + // Use of K0 for write mask (Yknot0). + // TODO(quasilyte): improve error message (#21860). + // "K0 can't be used for write mask" + VADDPD X0, X1, K0, X2 // ERROR "invalid instruction" + VADDPD Y0, Y1, K0, Y2 // ERROR "invalid instruction" + VADDPD Z0, Z1, K0, Z2 // ERROR "invalid instruction" + // VEX-encoded VSIB can't use High-16 registers as index (unlike EVEX). + // TODO(quasilyte): improve error message (#21860). + VPGATHERQQ X2, (BP)(X20*2), X3 // ERROR "invalid instruction" + VPGATHERQQ Y2, (BP)(Y20*2), Y3 // ERROR "invalid instruction" + // YzrMulti4 expects exactly 4 registers referenced by REG_LIST. + // TODO(quasilyte): improve error message (#21860). + V4FMADDPS (AX), [Z0-Z4], K1, Z7 // ERROR "invalid instruction" + V4FMADDPS (AX), [Z0-Z0], K1, Z7 // ERROR "invalid instruction" + // Invalid ranges in REG_LIST (low > high). + // TODO(quasilyte): improve error message (#21860). + V4FMADDPS (AX), [Z4-Z0], K1, Z7 // ERROR "invalid instruction" + V4FMADDPS (AX), [Z1-Z0], K1, Z7 // ERROR "invalid instruction" + // Mismatching registers in a range. + // TODO(quasilyte): improve error message (#21860). + V4FMADDPS (AX), [AX-Z3], K1, Z7 // ERROR "invalid instruction" + V4FMADDPS (AX), [Z0-AX], K1, Z7 // ERROR "invalid instruction" + // Usage of suffixes for non-EVEX instructions. + ADCB.Z $7, AL // ERROR "invalid instruction" + ADCB.RU_SAE $7, AL // ERROR "invalid instruction" + ADCB.RU_SAE.Z $7, AL // ERROR "invalid instruction" + // Usage of rounding with invalid operands. + VADDPD.RU_SAE X3, X2, K1, X1 // ERROR "unsupported rounding" + VADDPD.RD_SAE X3, X2, K1, X1 // ERROR "unsupported rounding" + VADDPD.RZ_SAE X3, X2, K1, X1 // ERROR "unsupported rounding" + VADDPD.RN_SAE X3, X2, K1, X1 // ERROR "unsupported rounding" + VADDPD.RU_SAE Y3, Y2, K1, Y1 // ERROR "unsupported rounding" + VADDPD.RD_SAE Y3, Y2, K1, Y1 // ERROR "unsupported rounding" + VADDPD.RZ_SAE Y3, Y2, K1, Y1 // ERROR "unsupported rounding" + VADDPD.RN_SAE Y3, Y2, K1, Y1 // ERROR "unsupported rounding" + // Unsupported SAE. + VMAXPD.SAE (AX), Z2, K1, Z1 // ERROR "illegal SAE with memory argument" + VADDPD.SAE X3, X2, K1, X1 // ERROR "unsupported SAE" + // Unsupported zeroing. + VFPCLASSPDX.Z $0, (AX), K2, K1 // ERROR "unsupported zeroing" + VFPCLASSPDY.Z $0, (AX), K2, K1 // ERROR "unsupported zeroing" + // Unsupported broadcast. + VFPCLASSSD.BCST $0, (AX), K2, K1 // ERROR "unsupported broadcast" + VFPCLASSSS.BCST $0, (AX), K2, K1 // ERROR "unsupported broadcast" + // Broadcast without memory operand. + VADDPD.BCST X3, X2, K1, X1 // ERROR "illegal broadcast without memory argument" + VADDPD.BCST X3, X2, K1, X1 // ERROR "illegal broadcast without memory argument" + VADDPD.BCST X3, X2, K1, X1 // ERROR "illegal broadcast without memory argument" + // CLWB instructions: + CLWB BX // ERROR "invalid instruction" + // CLDEMOTE instructions: + CLDEMOTE BX // ERROR "invalid instruction" + // WAITPKG instructions: + TPAUSE (BX) // ERROR "invalid instruction" + UMONITOR (BX) // ERROR "invalid instruction" + UMWAIT (BX) // ERROR "invalid instruction" + // .Z instructions + VMOVDQA32.Z Z0, Z1 // ERROR "mask register must be specified for .Z instructions" + VMOVDQA32.Z Z0, K0, Z1 // ERROR "invalid instruction" + VMOVDQA32.Z Z0, K1, Z1 // ok + + RDPID (BX) // ERROR "invalid instruction" + + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/arm.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/arm.s new file mode 100644 index 0000000000000000000000000000000000000000..93edc8854ead472df0dc790c2d57c97f390c30ea --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/arm.s @@ -0,0 +1,1617 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This input was created by taking the instruction productions in +// the old assembler's (5a's) grammar and hand-writing complete +// instructions for each rule, to guarantee we cover the same space. + +#include "../../../../../runtime/textflag.h" + +TEXT foo(SB), DUPOK|NOSPLIT, $0 + +// ADD +// +// LTYPE1 cond imsr ',' spreg ',' reg +// { +// outcode($1, $2, &$3, $5, &$7); +// } +// Cover some operand space here too. + ADD $1, R2, R3 + ADD R1<>R2, R3, R4 + ADD R1@>R2, R3, R4 + ADD R1->R2, R3, R4 + ADD R1, R2, R3 + ADD R(1)<>R2, R3 + ADD R1@>R2, R3 + ADD R1->R2, R3 + ADD R1, R2 + +// +// MVN +// +// LTYPE2 cond imsr ',' reg +// { +// outcode($1, $2, &$3, 0, &$5); +// } + CLZ R1, R2 + +// +// MOVW +// +// LTYPE3 cond gen ',' gen +// { +// outcode($1, $2, &$3, 0, &$5); +// } + MOVW.S R1, R2 + MOVW $1, R2 + MOVW.S R1<(SB) // JMP bar<>(SB) + +// +// BX +// +// LTYPEBX comma ireg +// { +// outcode($1, Always, &nullgen, 0, &$3); +// } + BX (R0) + +// +// BEQ +// +// LTYPE5 comma rel +// { +// outcode($1, Always, &nullgen, 0, &$3); +// } + BEQ 1(PC) + +// +// SWI +// +// LTYPE6 cond comma gen +// { +// outcode($1, $2, &nullgen, 0, &$4); +// } + SWI $2 + SWI $3 +// SWI foo(SB) - TODO: classifying foo(SB) as C_TLS_LE + +// +// CMP +// +// LTYPE7 cond imsr ',' spreg +// { +// outcode($1, $2, &$3, $5, &nullgen); +// } + CMP $1, R2 + CMP R1<>28, R1, R2 // 202e01e0 + AND R0<<28, R1, R2 // 002e01e0 + AND R0->28, R1, R2 // 402e01e0 + AND R0@>28, R1, R2 // 602e01e0 + AND.S R0>>28, R1, R2 // 202e11e0 + AND.S R0<<28, R1, R2 // 002e11e0 + AND.S R0->28, R1, R2 // 402e11e0 + AND.S R0@>28, R1, R2 // 602e11e0 + AND R0<<28, R1 // 001e01e0 + AND R0>>28, R1 // 201e01e0 + AND R0->28, R1 // 401e01e0 + AND R0@>28, R1 // 601e01e0 + AND.S R0<<28, R1 // 001e11e0 + AND.S R0>>28, R1 // 201e11e0 + AND.S R0->28, R1 // 401e11e0 + AND.S R0@>28, R1 // 601e11e0 + AND R0<>R1, R2, R3 // 303102e0 + AND R0->R1, R2, R3 // 503102e0 + AND R0@>R1, R2, R3 // 703102e0 + AND.S R0<>R1, R2, R3 // 303112e0 + AND.S R0->R1, R2, R3 // 503112e0 + AND.S R0@>R1, R2, R3 // 703112e0 + AND R0<>R1, R2 // 302102e0 + AND R0->R1, R2 // 502102e0 + AND R0@>R1, R2 // 702102e0 + AND.S R0<>R1, R2 // 302112e0 + AND.S R0->R1, R2 // 502112e0 + AND.S R0@>R1, R2 // 702112e0 + +// EOR + EOR $255, R0, R1 // ff1020e2 + EOR $4278190080, R0, R1 // ff1420e2 + EOR.S $255, R0, R1 // ff1030e2 + EOR.S $4278190080, R0, R1 // ff1430e2 + EOR $255, R0 // ff0020e2 + EOR $4278190080, R0 // ff0420e2 + EOR.S $255, R0 // ff0030e2 + EOR.S $4278190080, R0 // ff0430e2 + EOR R0, R1, R2 // 002021e0 + EOR.S R0, R1, R2 // 002031e0 + EOR R0, R1 // 001021e0 + EOR.S R0, R1 // 001031e0 + EOR R0>>28, R1, R2 // 202e21e0 + EOR R0<<28, R1, R2 // 002e21e0 + EOR R0->28, R1, R2 // 402e21e0 + EOR R0@>28, R1, R2 // 602e21e0 + EOR.S R0>>28, R1, R2 // 202e31e0 + EOR.S R0<<28, R1, R2 // 002e31e0 + EOR.S R0->28, R1, R2 // 402e31e0 + EOR.S R0@>28, R1, R2 // 602e31e0 + EOR R0<<28, R1 // 001e21e0 + EOR R0>>28, R1 // 201e21e0 + EOR R0->28, R1 // 401e21e0 + EOR R0@>28, R1 // 601e21e0 + EOR.S R0<<28, R1 // 001e31e0 + EOR.S R0>>28, R1 // 201e31e0 + EOR.S R0->28, R1 // 401e31e0 + EOR.S R0@>28, R1 // 601e31e0 + EOR R0<>R1, R2, R3 // 303122e0 + EOR R0->R1, R2, R3 // 503122e0 + EOR R0@>R1, R2, R3 // 703122e0 + EOR.S R0<>R1, R2, R3 // 303132e0 + EOR.S R0->R1, R2, R3 // 503132e0 + EOR.S R0@>R1, R2, R3 // 703132e0 + EOR R0<>R1, R2 // 302122e0 + EOR R0->R1, R2 // 502122e0 + EOR R0@>R1, R2 // 702122e0 + EOR.S R0<>R1, R2 // 302132e0 + EOR.S R0->R1, R2 // 502132e0 + EOR.S R0@>R1, R2 // 702132e0 + +// ORR + ORR $255, R0, R1 // ff1080e3 + ORR $4278190080, R0, R1 // ff1480e3 + ORR.S $255, R0, R1 // ff1090e3 + ORR.S $4278190080, R0, R1 // ff1490e3 + ORR $255, R0 // ff0080e3 + ORR $4278190080, R0 // ff0480e3 + ORR.S $255, R0 // ff0090e3 + ORR.S $4278190080, R0 // ff0490e3 + ORR R0, R1, R2 // 002081e1 + ORR.S R0, R1, R2 // 002091e1 + ORR R0, R1 // 001081e1 + ORR.S R0, R1 // 001091e1 + ORR R0>>28, R1, R2 // 202e81e1 + ORR R0<<28, R1, R2 // 002e81e1 + ORR R0->28, R1, R2 // 402e81e1 + ORR R0@>28, R1, R2 // 602e81e1 + ORR.S R0>>28, R1, R2 // 202e91e1 + ORR.S R0<<28, R1, R2 // 002e91e1 + ORR.S R0->28, R1, R2 // 402e91e1 + ORR.S R0@>28, R1, R2 // 602e91e1 + ORR R0<<28, R1 // 001e81e1 + ORR R0>>28, R1 // 201e81e1 + ORR R0->28, R1 // 401e81e1 + ORR R0@>28, R1 // 601e81e1 + ORR.S R0<<28, R1 // 001e91e1 + ORR.S R0>>28, R1 // 201e91e1 + ORR.S R0->28, R1 // 401e91e1 + ORR.S R0@>28, R1 // 601e91e1 + ORR R0<>R1, R2, R3 // 303182e1 + ORR R0->R1, R2, R3 // 503182e1 + ORR R0@>R1, R2, R3 // 703182e1 + ORR.S R0<>R1, R2, R3 // 303192e1 + ORR.S R0->R1, R2, R3 // 503192e1 + ORR.S R0@>R1, R2, R3 // 703192e1 + ORR R0<>R1, R2 // 302182e1 + ORR R0->R1, R2 // 502182e1 + ORR R0@>R1, R2 // 702182e1 + ORR.S R0<>R1, R2 // 302192e1 + ORR.S R0->R1, R2 // 502192e1 + ORR.S R0@>R1, R2 // 702192e1 + +// SUB + SUB $255, R0, R1 // ff1040e2 + SUB $4278190080, R0, R1 // ff1440e2 + SUB.S $255, R0, R1 // ff1050e2 + SUB.S $4278190080, R0, R1 // ff1450e2 + SUB $255, R0 // ff0040e2 + SUB $4278190080, R0 // ff0440e2 + SUB.S $255, R0 // ff0050e2 + SUB.S $4278190080, R0 // ff0450e2 + SUB R0, R1, R2 // 002041e0 + SUB.S R0, R1, R2 // 002051e0 + SUB R0, R1 // 001041e0 + SUB.S R0, R1 // 001051e0 + SUB R0>>28, R1, R2 // 202e41e0 + SUB R0<<28, R1, R2 // 002e41e0 + SUB R0->28, R1, R2 // 402e41e0 + SUB R0@>28, R1, R2 // 602e41e0 + SUB.S R0>>28, R1, R2 // 202e51e0 + SUB.S R0<<28, R1, R2 // 002e51e0 + SUB.S R0->28, R1, R2 // 402e51e0 + SUB.S R0@>28, R1, R2 // 602e51e0 + SUB R0<<28, R1 // 001e41e0 + SUB R0>>28, R1 // 201e41e0 + SUB R0->28, R1 // 401e41e0 + SUB R0@>28, R1 // 601e41e0 + SUB.S R0<<28, R1 // 001e51e0 + SUB.S R0>>28, R1 // 201e51e0 + SUB.S R0->28, R1 // 401e51e0 + SUB.S R0@>28, R1 // 601e51e0 + SUB R0<>R1, R2, R3 // 303142e0 + SUB R0->R1, R2, R3 // 503142e0 + SUB R0@>R1, R2, R3 // 703142e0 + SUB.S R0<>R1, R2, R3 // 303152e0 + SUB.S R0->R1, R2, R3 // 503152e0 + SUB.S R0@>R1, R2, R3 // 703152e0 + SUB R0<>R1, R2 // 302142e0 + SUB R0->R1, R2 // 502142e0 + SUB R0@>R1, R2 // 702142e0 + SUB.S R0<>R1, R2 // 302152e0 + SUB.S R0->R1, R2 // 502152e0 + SUB.S R0@>R1, R2 // 702152e0 + +// SBC + SBC $255, R0, R1 // ff10c0e2 + SBC $4278190080, R0, R1 // ff14c0e2 + SBC.S $255, R0, R1 // ff10d0e2 + SBC.S $4278190080, R0, R1 // ff14d0e2 + SBC $255, R0 // ff00c0e2 + SBC $4278190080, R0 // ff04c0e2 + SBC.S $255, R0 // ff00d0e2 + SBC.S $4278190080, R0 // ff04d0e2 + SBC R0, R1, R2 // 0020c1e0 + SBC.S R0, R1, R2 // 0020d1e0 + SBC R0, R1 // 0010c1e0 + SBC.S R0, R1 // 0010d1e0 + SBC R0>>28, R1, R2 // 202ec1e0 + SBC R0<<28, R1, R2 // 002ec1e0 + SBC R0->28, R1, R2 // 402ec1e0 + SBC R0@>28, R1, R2 // 602ec1e0 + SBC.S R0>>28, R1, R2 // 202ed1e0 + SBC.S R0<<28, R1, R2 // 002ed1e0 + SBC.S R0->28, R1, R2 // 402ed1e0 + SBC.S R0@>28, R1, R2 // 602ed1e0 + SBC R0<<28, R1 // 001ec1e0 + SBC R0>>28, R1 // 201ec1e0 + SBC R0->28, R1 // 401ec1e0 + SBC R0@>28, R1 // 601ec1e0 + SBC.S R0<<28, R1 // 001ed1e0 + SBC.S R0>>28, R1 // 201ed1e0 + SBC.S R0->28, R1 // 401ed1e0 + SBC.S R0@>28, R1 // 601ed1e0 + SBC R0<>R1, R2, R3 // 3031c2e0 + SBC R0->R1, R2, R3 // 5031c2e0 + SBC R0@>R1, R2, R3 // 7031c2e0 + SBC.S R0<>R1, R2, R3 // 3031d2e0 + SBC.S R0->R1, R2, R3 // 5031d2e0 + SBC.S R0@>R1, R2, R3 // 7031d2e0 + SBC R0<>R1, R2 // 3021c2e0 + SBC R0->R1, R2 // 5021c2e0 + SBC R0@>R1, R2 // 7021c2e0 + SBC.S R0<>R1, R2 // 3021d2e0 + SBC.S R0->R1, R2 // 5021d2e0 + SBC.S R0@>R1, R2 // 7021d2e0 + +// RSB + RSB $255, R0, R1 // ff1060e2 + RSB $4278190080, R0, R1 // ff1460e2 + RSB.S $255, R0, R1 // ff1070e2 + RSB.S $4278190080, R0, R1 // ff1470e2 + RSB $255, R0 // ff0060e2 + RSB $4278190080, R0 // ff0460e2 + RSB.S $255, R0 // ff0070e2 + RSB.S $4278190080, R0 // ff0470e2 + RSB R0, R1, R2 // 002061e0 + RSB.S R0, R1, R2 // 002071e0 + RSB R0, R1 // 001061e0 + RSB.S R0, R1 // 001071e0 + RSB R0>>28, R1, R2 // 202e61e0 + RSB R0<<28, R1, R2 // 002e61e0 + RSB R0->28, R1, R2 // 402e61e0 + RSB R0@>28, R1, R2 // 602e61e0 + RSB.S R0>>28, R1, R2 // 202e71e0 + RSB.S R0<<28, R1, R2 // 002e71e0 + RSB.S R0->28, R1, R2 // 402e71e0 + RSB.S R0@>28, R1, R2 // 602e71e0 + RSB R0<<28, R1 // 001e61e0 + RSB R0>>28, R1 // 201e61e0 + RSB R0->28, R1 // 401e61e0 + RSB R0@>28, R1 // 601e61e0 + RSB.S R0<<28, R1 // 001e71e0 + RSB.S R0>>28, R1 // 201e71e0 + RSB.S R0->28, R1 // 401e71e0 + RSB.S R0@>28, R1 // 601e71e0 + RSB R0<>R1, R2, R3 // 303162e0 + RSB R0->R1, R2, R3 // 503162e0 + RSB R0@>R1, R2, R3 // 703162e0 + RSB.S R0<>R1, R2, R3 // 303172e0 + RSB.S R0->R1, R2, R3 // 503172e0 + RSB.S R0@>R1, R2, R3 // 703172e0 + RSB R0<>R1, R2 // 302162e0 + RSB R0->R1, R2 // 502162e0 + RSB R0@>R1, R2 // 702162e0 + RSB.S R0<>R1, R2 // 302172e0 + RSB.S R0->R1, R2 // 502172e0 + RSB.S R0@>R1, R2 // 702172e0 + +// RSC + RSC $255, R0, R1 // ff10e0e2 + RSC $4278190080, R0, R1 // ff14e0e2 + RSC.S $255, R0, R1 // ff10f0e2 + RSC.S $4278190080, R0, R1 // ff14f0e2 + RSC $255, R0 // ff00e0e2 + RSC $4278190080, R0 // ff04e0e2 + RSC.S $255, R0 // ff00f0e2 + RSC.S $4278190080, R0 // ff04f0e2 + RSC R0, R1, R2 // 0020e1e0 + RSC.S R0, R1, R2 // 0020f1e0 + RSC R0, R1 // 0010e1e0 + RSC.S R0, R1 // 0010f1e0 + RSC R0>>28, R1, R2 // 202ee1e0 + RSC R0<<28, R1, R2 // 002ee1e0 + RSC R0->28, R1, R2 // 402ee1e0 + RSC R0@>28, R1, R2 // 602ee1e0 + RSC.S R0>>28, R1, R2 // 202ef1e0 + RSC.S R0<<28, R1, R2 // 002ef1e0 + RSC.S R0->28, R1, R2 // 402ef1e0 + RSC.S R0@>28, R1, R2 // 602ef1e0 + RSC R0<<28, R1 // 001ee1e0 + RSC R0>>28, R1 // 201ee1e0 + RSC R0->28, R1 // 401ee1e0 + RSC R0@>28, R1 // 601ee1e0 + RSC.S R0<<28, R1 // 001ef1e0 + RSC.S R0>>28, R1 // 201ef1e0 + RSC.S R0->28, R1 // 401ef1e0 + RSC.S R0@>28, R1 // 601ef1e0 + RSC R0<>R1, R2, R3 // 3031e2e0 + RSC R0->R1, R2, R3 // 5031e2e0 + RSC R0@>R1, R2, R3 // 7031e2e0 + RSC.S R0<>R1, R2, R3 // 3031f2e0 + RSC.S R0->R1, R2, R3 // 5031f2e0 + RSC.S R0@>R1, R2, R3 // 7031f2e0 + RSC R0<>R1, R2 // 3021e2e0 + RSC R0->R1, R2 // 5021e2e0 + RSC R0@>R1, R2 // 7021e2e0 + RSC.S R0<>R1, R2 // 3021f2e0 + RSC.S R0->R1, R2 // 5021f2e0 + RSC.S R0@>R1, R2 // 7021f2e0 + +// ADD + ADD $255, R0, R1 // ff1080e2 + ADD $4278190080, R0, R1 // ff1480e2 + ADD.S $255, R0, R1 // ff1090e2 + ADD.S $4278190080, R0, R1 // ff1490e2 + ADD $255, R0 // ff0080e2 + ADD $4278190080, R0 // ff0480e2 + ADD.S $255, R0 // ff0090e2 + ADD.S $4278190080, R0 // ff0490e2 + ADD R0, R1, R2 // 002081e0 + ADD.S R0, R1, R2 // 002091e0 + ADD R0, R1 // 001081e0 + ADD.S R0, R1 // 001091e0 + ADD R0>>28, R1, R2 // 202e81e0 + ADD R0<<28, R1, R2 // 002e81e0 + ADD R0->28, R1, R2 // 402e81e0 + ADD R0@>28, R1, R2 // 602e81e0 + ADD.S R0>>28, R1, R2 // 202e91e0 + ADD.S R0<<28, R1, R2 // 002e91e0 + ADD.S R0->28, R1, R2 // 402e91e0 + ADD.S R0@>28, R1, R2 // 602e91e0 + ADD R0<<28, R1 // 001e81e0 + ADD R0>>28, R1 // 201e81e0 + ADD R0->28, R1 // 401e81e0 + ADD R0@>28, R1 // 601e81e0 + ADD.S R0<<28, R1 // 001e91e0 + ADD.S R0>>28, R1 // 201e91e0 + ADD.S R0->28, R1 // 401e91e0 + ADD.S R0@>28, R1 // 601e91e0 + ADD R0<>R1, R2, R3 // 303182e0 + ADD R0->R1, R2, R3 // 503182e0 + ADD R0@>R1, R2, R3 // 703182e0 + ADD.S R0<>R1, R2, R3 // 303192e0 + ADD.S R0->R1, R2, R3 // 503192e0 + ADD.S R0@>R1, R2, R3 // 703192e0 + ADD R0<>R1, R2 // 302182e0 + ADD R0->R1, R2 // 502182e0 + ADD R0@>R1, R2 // 702182e0 + ADD.S R0<>R1, R2 // 302192e0 + ADD.S R0->R1, R2 // 502192e0 + ADD.S R0@>R1, R2 // 702192e0 + +// ADC + ADC $255, R0, R1 // ff10a0e2 + ADC $4278190080, R0, R1 // ff14a0e2 + ADC.S $255, R0, R1 // ff10b0e2 + ADC.S $4278190080, R0, R1 // ff14b0e2 + ADC $255, R0 // ff00a0e2 + ADC $4278190080, R0 // ff04a0e2 + ADC.S $255, R0 // ff00b0e2 + ADC.S $4278190080, R0 // ff04b0e2 + ADC R0, R1, R2 // 0020a1e0 + ADC.S R0, R1, R2 // 0020b1e0 + ADC R0, R1 // 0010a1e0 + ADC.S R0, R1 // 0010b1e0 + ADC R0>>28, R1, R2 // 202ea1e0 + ADC R0<<28, R1, R2 // 002ea1e0 + ADC R0->28, R1, R2 // 402ea1e0 + ADC R0@>28, R1, R2 // 602ea1e0 + ADC.S R0>>28, R1, R2 // 202eb1e0 + ADC.S R0<<28, R1, R2 // 002eb1e0 + ADC.S R0->28, R1, R2 // 402eb1e0 + ADC.S R0@>28, R1, R2 // 602eb1e0 + ADC R0<<28, R1 // 001ea1e0 + ADC R0>>28, R1 // 201ea1e0 + ADC R0->28, R1 // 401ea1e0 + ADC R0@>28, R1 // 601ea1e0 + ADC.S R0<<28, R1 // 001eb1e0 + ADC.S R0>>28, R1 // 201eb1e0 + ADC.S R0->28, R1 // 401eb1e0 + ADC.S R0@>28, R1 // 601eb1e0 + ADC R0<>R1, R2, R3 // 3031a2e0 + ADC R0->R1, R2, R3 // 5031a2e0 + ADC R0@>R1, R2, R3 // 7031a2e0 + ADC.S R0<>R1, R2, R3 // 3031b2e0 + ADC.S R0->R1, R2, R3 // 5031b2e0 + ADC.S R0@>R1, R2, R3 // 7031b2e0 + ADC R0<>R1, R2 // 3021a2e0 + ADC R0->R1, R2 // 5021a2e0 + ADC R0@>R1, R2 // 7021a2e0 + ADC.S R0<>R1, R2 // 3021b2e0 + ADC.S R0->R1, R2 // 5021b2e0 + ADC.S R0@>R1, R2 // 7021b2e0 + +// TEQ + TEQ $255, R7 // ff0037e3 + TEQ $4278190080, R9 // ff0439e3 + TEQ R9<<30, R7 // 090f37e1 + TEQ R9>>30, R7 // 290f37e1 + TEQ R9->30, R7 // 490f37e1 + TEQ R9@>30, R7 // 690f37e1 + TEQ R9<>R8, R7 // 390837e1 + TEQ R9->R8, R7 // 590837e1 + TEQ R9@>R8, R7 // 790837e1 + +// TST + TST $255, R7 // ff0017e3 + TST $4278190080, R9 // ff0419e3 + TST R9<<30, R7 // 090f17e1 + TST R9>>30, R7 // 290f17e1 + TST R9->30, R7 // 490f17e1 + TST R9@>30, R7 // 690f17e1 + TST R9<>R8, R7 // 390817e1 + TST R9->R8, R7 // 590817e1 + TST R9@>R8, R7 // 790817e1 + +// CMP + CMP $255, R7 // ff0057e3 + CMP $4278190080, R9 // ff0459e3 + CMP R9<<30, R7 // 090f57e1 + CMP R9>>30, R7 // 290f57e1 + CMP R9->30, R7 // 490f57e1 + CMP R9@>30, R7 // 690f57e1 + CMP R9<>R8, R7 // 390857e1 + CMP R9->R8, R7 // 590857e1 + CMP R9@>R8, R7 // 790857e1 + +// CMN + CMN $255, R7 // ff0077e3 + CMN $4278190080, R9 // ff0479e3 + CMN R9<<30, R7 // 090f77e1 + CMN R9>>30, R7 // 290f77e1 + CMN R9->30, R7 // 490f77e1 + CMN R9@>30, R7 // 690f77e1 + CMN R9<>R8, R7 // 390877e1 + CMN R9->R8, R7 // 590877e1 + CMN R9@>R8, R7 // 790877e1 + +// B* + BEQ 14(PC) // BEQ 14(PC) // 0c00000a + BNE 13(PC) // BNE 13(PC) // 0b00001a + BCS 12(PC) // BCS 12(PC) // 0a00002a + BCC 11(PC) // BCC 11(PC) // 0900003a + BMI 10(PC) // BMI 10(PC) // 0800004a + BPL 9(PC) // BPL 9(PC) // 0700005a + BVS 8(PC) // BVS 8(PC) // 0600006a + BVC 7(PC) // BVC 7(PC) // 0500007a + BHI 6(PC) // BHI 6(PC) // 0400008a + BLS 5(PC) // BLS 5(PC) // 0300009a + BGE 4(PC) // BGE 4(PC) // 020000aa + BLT 3(PC) // BLT 3(PC) // 010000ba + BGT 2(PC) // BGT 2(PC) // 000000ca + BLE 1(PC) // BLE 1(PC) // ffffffda + ADD $0, R0, R0 + B -1(PC) // JMP -1(PC) // fdffffea + B -2(PC) // JMP -2(PC) // fcffffea + B -3(PC) // JMP -3(PC) // fbffffea + B -4(PC) // JMP -4(PC) // faffffea + B -5(PC) // JMP -5(PC) // f9ffffea + B jmp_label_0 // JMP // 010000ea + B jmp_label_0 // JMP // 000000ea + B jmp_label_0 // JMP // ffffffea +jmp_label_0: + ADD $0, R0, R0 + BEQ jmp_label_0 // BEQ 519 // fdffff0a + BNE jmp_label_0 // BNE 519 // fcffff1a + BCS jmp_label_0 // BCS 519 // fbffff2a + BCC jmp_label_0 // BCC 519 // faffff3a + BMI jmp_label_0 // BMI 519 // f9ffff4a + BPL jmp_label_0 // BPL 519 // f8ffff5a + BVS jmp_label_0 // BVS 519 // f7ffff6a + BVC jmp_label_0 // BVC 519 // f6ffff7a + BHI jmp_label_0 // BHI 519 // f5ffff8a + BLS jmp_label_0 // BLS 519 // f4ffff9a + BGE jmp_label_0 // BGE 519 // f3ffffaa + BLT jmp_label_0 // BLT 519 // f2ffffba + BGT jmp_label_0 // BGT 519 // f1ffffca + BLE jmp_label_0 // BLE 519 // f0ffffda + B jmp_label_0 // JMP 519 // efffffea + B 0(PC) // JMP 0(PC) // feffffea +jmp_label_1: + B jmp_label_1 // JMP // feffffea + +// BL + BL.EQ 14(PC) // CALL.EQ 14(PC) // 0c00000b + BL.NE 13(PC) // CALL.NE 13(PC) // 0b00001b + BL.CS 12(PC) // CALL.CS 12(PC) // 0a00002b + BL.CC 11(PC) // CALL.CC 11(PC) // 0900003b + BL.MI 10(PC) // CALL.MI 10(PC) // 0800004b + BL.PL 9(PC) // CALL.PL 9(PC) // 0700005b + BL.VS 8(PC) // CALL.VS 8(PC) // 0600006b + BL.VC 7(PC) // CALL.VC 7(PC) // 0500007b + BL.HI 6(PC) // CALL.HI 6(PC) // 0400008b + BL.LS 5(PC) // CALL.LS 5(PC) // 0300009b + BL.GE 4(PC) // CALL.GE 4(PC) // 020000ab + BL.LT 3(PC) // CALL.LT 3(PC) // 010000bb + BL.GT 2(PC) // CALL.GT 2(PC) // 000000cb + BL.LE 1(PC) // CALL.LE 1(PC) // ffffffdb + ADD $0, R0, R0 + BL -1(PC) // CALL -1(PC) // fdffffeb + BL -2(PC) // CALL -2(PC) // fcffffeb + BL -3(PC) // CALL -3(PC) // fbffffeb + BL -4(PC) // CALL -4(PC) // faffffeb + BL -5(PC) // CALL -5(PC) // f9ffffeb + BL jmp_label_2 // CALL // 010000eb + BL jmp_label_2 // CALL // 000000eb + BL jmp_label_2 // CALL // ffffffeb +jmp_label_2: + ADD $0, R0, R0 + BL.EQ jmp_label_2 // CALL.EQ 560 // fdffff0b + BL.NE jmp_label_2 // CALL.NE 560 // fcffff1b + BL.CS jmp_label_2 // CALL.CS 560 // fbffff2b + BL.CC jmp_label_2 // CALL.CC 560 // faffff3b + BL.MI jmp_label_2 // CALL.MI 560 // f9ffff4b + BL.PL jmp_label_2 // CALL.PL 560 // f8ffff5b + BL.VS jmp_label_2 // CALL.VS 560 // f7ffff6b + BL.VC jmp_label_2 // CALL.VC 560 // f6ffff7b + BL.HI jmp_label_2 // CALL.HI 560 // f5ffff8b + BL.LS jmp_label_2 // CALL.LS 560 // f4ffff9b + BL.GE jmp_label_2 // CALL.GE 560 // f3ffffab + BL.LT jmp_label_2 // CALL.LT 560 // f2ffffbb + BL.GT jmp_label_2 // CALL.GT 560 // f1ffffcb + BL.LE jmp_label_2 // CALL.LE 560 // f0ffffdb + BL jmp_label_2 // CALL 560 // efffffeb + BL 0(PC) // CALL 0(PC) // feffffeb +jmp_label_3: + BL jmp_label_3 // CALL // feffffeb + +// BIC + BIC $255, R0, R1 // ff10c0e3 + BIC $4278190080, R0, R1 // ff14c0e3 + BIC.S $255, R0, R1 // ff10d0e3 + BIC.S $4278190080, R0, R1 // ff14d0e3 + BIC $255, R0 // ff00c0e3 + BIC $4278190080, R0 // ff04c0e3 + BIC.S $255, R0 // ff00d0e3 + BIC.S $4278190080, R0 // ff04d0e3 + BIC R0, R1, R2 // 0020c1e1 + BIC.S R0, R1, R2 // 0020d1e1 + BIC R0, R1 // 0010c1e1 + BIC.S R0, R1 // 0010d1e1 + BIC R0>>28, R1, R2 // 202ec1e1 + BIC R0<<28, R1, R2 // 002ec1e1 + BIC R0->28, R1, R2 // 402ec1e1 + BIC R0@>28, R1, R2 // 602ec1e1 + BIC.S R0>>28, R1, R2 // 202ed1e1 + BIC.S R0<<28, R1, R2 // 002ed1e1 + BIC.S R0->28, R1, R2 // 402ed1e1 + BIC.S R0@>28, R1, R2 // 602ed1e1 + BIC R0<<28, R1 // 001ec1e1 + BIC R0>>28, R1 // 201ec1e1 + BIC R0->28, R1 // 401ec1e1 + BIC R0@>28, R1 // 601ec1e1 + BIC.S R0<<28, R1 // 001ed1e1 + BIC.S R0>>28, R1 // 201ed1e1 + BIC.S R0->28, R1 // 401ed1e1 + BIC.S R0@>28, R1 // 601ed1e1 + BIC R0<>R1, R2, R3 // 3031c2e1 + BIC R0->R1, R2, R3 // 5031c2e1 + BIC R0@>R1, R2, R3 // 7031c2e1 + BIC.S R0<>R1, R2, R3 // 3031d2e1 + BIC.S R0->R1, R2, R3 // 5031d2e1 + BIC.S R0@>R1, R2, R3 // 7031d2e1 + BIC R0<>R1, R2 // 3021c2e1 + BIC R0->R1, R2 // 5021c2e1 + BIC R0@>R1, R2 // 7021c2e1 + BIC.S R0<>R1, R2 // 3021d2e1 + BIC.S R0->R1, R2 // 5021d2e1 + BIC.S R0@>R1, R2 // 7021d2e1 + +// SRL + SRL $0, R5, R6 // 0560a0e1 + SRL $1, R5, R6 // a560a0e1 + SRL $14, R5, R6 // 2567a0e1 + SRL $15, R5, R6 // a567a0e1 + SRL $30, R5, R6 // 256fa0e1 + SRL $31, R5, R6 // a56fa0e1 + SRL $32, R5, R6 // 2560a0e1 + SRL.S $14, R5, R6 // 2567b0e1 + SRL.S $15, R5, R6 // a567b0e1 + SRL.S $30, R5, R6 // 256fb0e1 + SRL.S $31, R5, R6 // a56fb0e1 + SRL $14, R5 // 2557a0e1 + SRL $15, R5 // a557a0e1 + SRL $30, R5 // 255fa0e1 + SRL $31, R5 // a55fa0e1 + SRL.S $14, R5 // 2557b0e1 + SRL.S $15, R5 // a557b0e1 + SRL.S $30, R5 // 255fb0e1 + SRL.S $31, R5 // a55fb0e1 + SRL R5, R6, R7 // 3675a0e1 + SRL.S R5, R6, R7 // 3675b0e1 + SRL R5, R7 // 3775a0e1 + SRL.S R5, R7 // 3775b0e1 + +// SRA + SRA $0, R5, R6 // 0560a0e1 + SRA $1, R5, R6 // c560a0e1 + SRA $14, R5, R6 // 4567a0e1 + SRA $15, R5, R6 // c567a0e1 + SRA $30, R5, R6 // 456fa0e1 + SRA $31, R5, R6 // c56fa0e1 + SRA $32, R5, R6 // 4560a0e1 + SRA.S $14, R5, R6 // 4567b0e1 + SRA.S $15, R5, R6 // c567b0e1 + SRA.S $30, R5, R6 // 456fb0e1 + SRA.S $31, R5, R6 // c56fb0e1 + SRA $14, R5 // 4557a0e1 + SRA $15, R5 // c557a0e1 + SRA $30, R5 // 455fa0e1 + SRA $31, R5 // c55fa0e1 + SRA.S $14, R5 // 4557b0e1 + SRA.S $15, R5 // c557b0e1 + SRA.S $30, R5 // 455fb0e1 + SRA.S $31, R5 // c55fb0e1 + SRA R5, R6, R7 // 5675a0e1 + SRA.S R5, R6, R7 // 5675b0e1 + SRA R5, R7 // 5775a0e1 + SRA.S R5, R7 // 5775b0e1 + +// SLL + SLL $0, R5, R6 // 0560a0e1 + SLL $1, R5, R6 // 8560a0e1 + SLL $14, R5, R6 // 0567a0e1 + SLL $15, R5, R6 // 8567a0e1 + SLL $30, R5, R6 // 056fa0e1 + SLL $31, R5, R6 // 856fa0e1 + SLL.S $14, R5, R6 // 0567b0e1 + SLL.S $15, R5, R6 // 8567b0e1 + SLL.S $30, R5, R6 // 056fb0e1 + SLL.S $31, R5, R6 // 856fb0e1 + SLL $14, R5 // 0557a0e1 + SLL $15, R5 // 8557a0e1 + SLL $30, R5 // 055fa0e1 + SLL $31, R5 // 855fa0e1 + SLL.S $14, R5 // 0557b0e1 + SLL.S $15, R5 // 8557b0e1 + SLL.S $30, R5 // 055fb0e1 + SLL.S $31, R5 // 855fb0e1 + SLL R5, R6, R7 // 1675a0e1 + SLL.S R5, R6, R7 // 1675b0e1 + SLL R5, R7 // 1775a0e1 + SLL.S R5, R7 // 1775b0e1 + +// Ops with zero shifts should encode as left shifts + ADD R0<<0, R1, R2 // 002081e0 + ADD R0>>0, R1, R2 // 002081e0 + ADD R0->0, R1, R2 // 002081e0 + ADD R0@>0, R1, R2 // 002081e0 + MOVW R0<<0(R1), R2 // 002091e7 + MOVW R0>>0(R1), R2 // 002091e7 + MOVW R0->0(R1), R2 // 002091e7 + MOVW R0@>0(R1), R2 // 002091e7 + MOVW R0, R1<<0(R2) // 010082e7 + MOVW R0, R1>>0(R2) // 010082e7 + MOVW R0, R1->0(R2) // 010082e7 + MOVW R0, R1@>0(R2) // 010082e7 + +// MULA / MULS + MULAWT R1, R2, R3, R4 // c23124e1 + MULAWB R1, R2, R3, R4 // 823124e1 + MULS R1, R2, R3, R4 // 923164e0 + MULA R1, R2, R3, R4 // 923124e0 + MULA.S R1, R2, R3, R4 // 923134e0 + MMULA R1, R2, R3, R4 // 123154e7 + MMULS R1, R2, R3, R4 // d23154e7 + MULABB R1, R2, R3, R4 // 823104e1 + MULAL R1, R2, (R4, R3) // 9231e4e0 + MULAL.S R1, R2, (R4, R3) // 9231f4e0 + MULALU R1, R2, (R4, R3) // 9231a4e0 + MULALU.S R1, R2, (R4, R3) // 9231b4e0 + +// MUL + MUL R2, R3, R4 // 930204e0 + MUL R2, R4 // 940204e0 + MUL R2, R4, R4 // 940204e0 + MUL.S R2, R3, R4 // 930214e0 + MUL.S R2, R4 // 940214e0 + MUL.S R2, R4, R4 // 940214e0 + MULU R5, R6, R7 // 960507e0 + MULU R5, R7 // 970507e0 + MULU R5, R7, R7 // 970507e0 + MULU.S R5, R6, R7 // 960517e0 + MULU.S R5, R7 // 970517e0 + MULU.S R5, R7, R7 // 970517e0 + MULLU R1, R2, (R4, R3) // 923184e0 + MULLU.S R1, R2, (R4, R3) // 923194e0 + MULL R1, R2, (R4, R3) // 9231c4e0 + MULL.S R1, R2, (R4, R3) // 9231d4e0 + MMUL R1, R2, R3 // 12f153e7 + MULBB R1, R2, R3 // 820163e1 + MULWB R1, R2, R3 // a20123e1 + MULWT R1, R2, R3 // e20123e1 + +// REV + REV R1, R2 // 312fbfe6 + REV16 R1, R2 // b12fbfe6 + REVSH R1, R2 // b12fffe6 + RBIT R1, R2 // 312fffe6 + +// XTAB/XTAH/XTABU/XTAHU + XTAB R2@>0, R8 // 7280a8e6 + XTAB R2@>8, R8 // 7284a8e6 + XTAB R2@>16, R8 // 7288a8e6 + XTAB R2@>24, R8 // 728ca8e6 + XTAH R3@>0, R9 // 7390b9e6 + XTAH R3@>8, R9 // 7394b9e6 + XTAH R3@>16, R9 // 7398b9e6 + XTAH R3@>24, R9 // 739cb9e6 + XTABU R4@>0, R7 // 7470e7e6 + XTABU R4@>8, R7 // 7474e7e6 + XTABU R4@>16, R7 // 7478e7e6 + XTABU R4@>24, R7 // 747ce7e6 + XTAHU R5@>0, R1 // 7510f1e6 + XTAHU R5@>8, R1 // 7514f1e6 + XTAHU R5@>16, R1 // 7518f1e6 + XTAHU R5@>24, R1 // 751cf1e6 + XTAB R2@>0, R4, R8 // 7280a4e6 + XTAB R2@>8, R4, R8 // 7284a4e6 + XTAB R2@>16, R4, R8 // 7288a4e6 + XTAB R2@>24, R4, R8 // 728ca4e6 + XTAH R3@>0, R4, R9 // 7390b4e6 + XTAH R3@>8, R4, R9 // 7394b4e6 + XTAH R3@>16, R4, R9 // 7398b4e6 + XTAH R3@>24, R4, R9 // 739cb4e6 + XTABU R4@>0, R0, R7 // 7470e0e6 + XTABU R4@>8, R0, R7 // 7474e0e6 + XTABU R4@>16, R0, R7 // 7478e0e6 + XTABU R4@>24, R0, R7 // 747ce0e6 + XTAHU R5@>0, R9, R1 // 7510f9e6 + XTAHU R5@>8, R9, R1 // 7514f9e6 + XTAHU R5@>16, R9, R1 // 7518f9e6 + XTAHU R5@>24, R9, R1 // 751cf9e6 + +// DIVHW R0, R1, R2: R1 / R0 -> R2 + DIVHW R0, R1, R2 // 11f012e7 + DIVUHW R0, R1, R2 // 11f032e7 +// DIVHW R0, R1: R1 / R0 -> R1 + DIVHW R0, R1 // 11f011e7 + DIVUHW R0, R1 // 11f031e7 + +// misc + CLZ R1, R2 // 112f6fe1 + WORD $0 // 00000000 + WORD $4294967295 // ffffffff + WORD $2863311530 // aaaaaaaa + WORD $1431655765 // 55555555 + PLD 4080(R6) // f0ffd6f5 + PLD -4080(R9) // f0ff59f5 + RFE // 0080fde8 + SWPW R3, (R7), R9 // SWPW (R7), R3, R9 // 939007e1 + SWPBU R4, (R2), R8 // SWPBU (R2), R4, R8 // 948042e1 + SWI $0 // 000000ef + SWI $65535 // ffff00ef + SWI // 000000ef + +// BFX/BFXU/BFC/BFI + BFX $16, $8, R1, R2 // BFX $16, R1, $8, R2 // 5124afe7 + BFX $29, $2, R8 // 5881bce7 + BFXU $16, $8, R1, R2 // BFXU $16, R1, $8, R2 // 5124efe7 + BFXU $29, $2, R8 // 5881fce7 + BFC $29, $2, R8 // 1f81dee7 + BFI $29, $2, R8 // 1881dee7 + BFI $16, $8, R1, R2 // BFI $16, R1, $8, R2 // 1124d7e7 + +// synthetic arithmetic + ADD $0xffffffaa, R2, R3 // ADD $4294967210, R2, R3 // 55b0e0e30b3082e0 + ADD $0xffffff55, R5 // ADD $4294967125, R5 // aab0e0e30b5085e0 + ADD.S $0xffffffab, R2, R3 // ADD.S $4294967211, R2, R3 // 54b0e0e30b3092e0 + ADD.S $0xffffff54, R5 // ADD.S $4294967124, R5 // abb0e0e30b5095e0 + ADC $0xffffffac, R2, R3 // ADC $4294967212, R2, R3 // 53b0e0e30b30a2e0 + ADC $0xffffff53, R5 // ADC $4294967123, R5 // acb0e0e30b50a5e0 + ADC.S $0xffffffad, R2, R3 // ADC.S $4294967213, R2, R3 // 52b0e0e30b30b2e0 + ADC.S $0xffffff52, R5 // ADC.S $4294967122, R5 // adb0e0e30b50b5e0 + SUB $0xffffffae, R2, R3 // SUB $4294967214, R2, R3 // 51b0e0e30b3042e0 + SUB $0xffffff51, R5 // SUB $4294967121, R5 // aeb0e0e30b5045e0 + SUB.S $0xffffffaf, R2, R3 // SUB.S $4294967215, R2, R3 // 50b0e0e30b3052e0 + SUB.S $0xffffff50, R5 // SUB.S $4294967120, R5 // afb0e0e30b5055e0 + SBC $0xffffffb0, R2, R3 // SBC $4294967216, R2, R3 // 4fb0e0e30b30c2e0 + SBC $0xffffff4f, R5 // SBC $4294967119, R5 // b0b0e0e30b50c5e0 + SBC.S $0xffffffb1, R2, R3 // SBC.S $4294967217, R2, R3 // 4eb0e0e30b30d2e0 + SBC.S $0xffffff4e, R5 // SBC.S $4294967118, R5 // b1b0e0e30b50d5e0 + RSB $0xffffffb2, R2, R3 // RSB $4294967218, R2, R3 // 4db0e0e30b3062e0 + RSB $0xffffff4d, R5 // RSB $4294967117, R5 // b2b0e0e30b5065e0 + RSB.S $0xffffffb3, R2, R3 // RSB.S $4294967219, R2, R3 // 4cb0e0e30b3072e0 + RSB.S $0xffffff4c, R5 // RSB.S $4294967116, R5 // b3b0e0e30b5075e0 + RSC $0xffffffb4, R2, R3 // RSC $4294967220, R2, R3 // 4bb0e0e30b30e2e0 + RSC $0xffffff4b, R5 // RSC $4294967115, R5 // b4b0e0e30b50e5e0 + RSC.S $0xffffffb5, R2, R3 // RSC.S $4294967221, R2, R3 // 4ab0e0e30b30f2e0 + RSC.S $0xffffff4a, R5 // RSC.S $4294967114, R5 // b5b0e0e30b50f5e0 + AND $0xffffffaa, R2, R3 // AND $4294967210, R2, R3 // 55b0e0e30b3002e0 + AND $0xffffff55, R5 // AND $4294967125, R5 // aab0e0e30b5005e0 + AND.S $0xffffffab, R2, R3 // AND.S $4294967211, R2, R3 // 54b0e0e30b3012e0 + AND.S $0xffffff54, R5 // AND.S $4294967124, R5 // abb0e0e30b5015e0 + ORR $0xffffffaa, R2, R3 // ORR $4294967210, R2, R3 // 55b0e0e30b3082e1 + ORR $0xffffff55, R5 // ORR $4294967125, R5 // aab0e0e30b5085e1 + ORR.S $0xffffffab, R2, R3 // ORR.S $4294967211, R2, R3 // 54b0e0e30b3092e1 + ORR.S $0xffffff54, R5 // ORR.S $4294967124, R5 // abb0e0e30b5095e1 + EOR $0xffffffaa, R2, R3 // EOR $4294967210, R2, R3 // 55b0e0e30b3022e0 + EOR $0xffffff55, R5 // EOR $4294967125, R5 // aab0e0e30b5025e0 + EOR.S $0xffffffab, R2, R3 // EOR.S $4294967211, R2, R3 // 54b0e0e30b3032e0 + EOR.S $0xffffff54, R5 // EOR.S $4294967124, R5 // abb0e0e30b5035e0 + BIC $0xffffffaa, R2, R3 // BIC $4294967210, R2, R3 // 55b0e0e30b30c2e1 + BIC $0xffffff55, R5 // BIC $4294967125, R5 // aab0e0e30b50c5e1 + BIC.S $0xffffffab, R2, R3 // BIC.S $4294967211, R2, R3 // 54b0e0e30b30d2e1 + BIC.S $0xffffff54, R5 // BIC.S $4294967124, R5 // abb0e0e30b50d5e1 + CMP $0xffffffab, R2 // CMP $4294967211, R2 // 54b0e0e30b0052e1 + CMN $0xffffffac, R3 // CMN $4294967212, R3 // 53b0e0e30b0073e1 + TST $0xffffffad, R4 // TST $4294967213, R4 // 52b0e0e30b0014e1 + TEQ $0xffffffae, R5 // TEQ $4294967214, R5 // 51b0e0e30b0035e1 + +// immediate decomposition + ADD $0xff0000ff, R0, R1 // ADD $4278190335, R0, R1 // ff1080e2ff1481e2 + EOR $0xff0000ff, R0, R1 // EOR $4278190335, R0, R1 // ff1020e2ff1421e2 + ORR $0xff0000ff, R0, R1 // ORR $4278190335, R0, R1 // ff1080e3ff1481e3 + SUB $0xff0000ff, R0, R1 // SUB $4278190335, R0, R1 // ff1040e2ff1441e2 + BIC $0xff0000ff, R0, R1 // BIC $4278190335, R0, R1 // ff10c0e3ff14c1e3 + RSB $0xff0000ff, R0, R1 // RSB $4278190335, R0, R1 // ff1060e2ff1481e2 + ADC $0xff0000ff, R0, R1 // ADC $4278190335, R0, R1 // ff10a0e2ff1481e2 + SBC $0xff0000ff, R0, R1 // SBC $4278190335, R0, R1 // ff10c0e2ff1441e2 + RSC $0xff0000ff, R0, R1 // RSC $4278190335, R0, R1 // ff10e0e2ff1481e2 + ADD $0x000fffff, R0, R1 // ADD $1048575, R0, R1 // 011680e2011041e2 + ADC $0x000fffff, R0, R1 // ADC $1048575, R0, R1 // 0116a0e2011041e2 + SUB $0x000fffff, R0, R1 // SUB $1048575, R0, R1 // 011640e2011081e2 + SBC $0x000fffff, R0, R1 // SBC $1048575, R0, R1 // 0116c0e2011081e2 + RSB $0x000fffff, R0, R1 // RSB $1048575, R0, R1 // 011660e2011041e2 + RSC $0x000fffff, R0, R1 // RSC $1048575, R0, R1 // 0116e0e2011041e2 + ADD $0xff0000ff, R1 // ADD $4278190335, R1 // ff1081e2ff1481e2 + EOR $0xff0000ff, R1 // EOR $4278190335, R1 // ff1021e2ff1421e2 + ORR $0xff0000ff, R1 // ORR $4278190335, R1 // ff1081e3ff1481e3 + SUB $0xff0000ff, R1 // SUB $4278190335, R1 // ff1041e2ff1441e2 + BIC $0xff0000ff, R1 // BIC $4278190335, R1 // ff10c1e3ff14c1e3 + RSB $0xff0000ff, R1 // RSB $4278190335, R1 // ff1061e2ff1481e2 + ADC $0xff0000ff, R1 // ADC $4278190335, R1 // ff10a1e2ff1481e2 + SBC $0xff0000ff, R1 // SBC $4278190335, R1 // ff10c1e2ff1441e2 + RSC $0xff0000ff, R1 // RSC $4278190335, R1 // ff10e1e2ff1481e2 + ADD $0x000fffff, R1 // ADD $1048575, R1 // 011681e2011041e2 + ADC $0x000fffff, R1 // ADC $1048575, R1 // 0116a1e2011041e2 + SUB $0x000fffff, R1 // SUB $1048575, R1 // 011641e2011081e2 + SBC $0x000fffff, R1 // SBC $1048575, R1 // 0116c1e2011081e2 + RSB $0x000fffff, R1 // RSB $1048575, R1 // 011661e2011041e2 + RSC $0x000fffff, R1 // RSC $1048575, R1 // 0116e1e2011041e2 + +// MVN + MVN $0xff, R1 // MVN $255, R1 // ff10e0e3 + MVN $0xff000000, R1 // MVN $4278190080, R1 // ff14e0e3 + MVN R9<<30, R7 // 097fe0e1 + MVN R9>>30, R7 // 297fe0e1 + MVN R9->30, R7 // 497fe0e1 + MVN R9@>30, R7 // 697fe0e1 + MVN.S R9<<30, R7 // 097ff0e1 + MVN.S R9>>30, R7 // 297ff0e1 + MVN.S R9->30, R7 // 497ff0e1 + MVN.S R9@>30, R7 // 697ff0e1 + MVN R9<>R8, R7 // 3978e0e1 + MVN R9->R8, R7 // 5978e0e1 + MVN R9@>R8, R7 // 7978e0e1 + MVN.S R9<>R8, R7 // 3978f0e1 + MVN.S R9->R8, R7 // 5978f0e1 + MVN.S R9@>R8, R7 // 7978f0e1 + MVN $0xffffffbe, R5 // MVN $4294967230, R5 // 4150a0e3 + +// MOVM + MOVM.IA [R0,R2,R4,R6], (R1) // MOVM.U [R0,R2,R4,R6], (R1) // 550081e8 + MOVM.IA [R0-R4,R6,R8,R9-R11], (R1) // MOVM.U [R0,R1,R2,R3,R4,R6,R8,R9,g,R11], (R1) // 5f0f81e8 + MOVM.IA.W [R0,R2,R4,R6], (R1) // MOVM.W.U [R0,R2,R4,R6], (R1) // 5500a1e8 + MOVM.IA.W [R0-R4,R6,R8,R9-R11], (R1) // MOVM.W.U [R0,R1,R2,R3,R4,R6,R8,R9,g,R11], (R1) // 5f0fa1e8 + MOVM.IA (R1), [R0,R2,R4,R6] // MOVM.U (R1), [R0,R2,R4,R6] // 550091e8 + MOVM.IA (R1), [R0-R4,R6,R8,R9-R11] // MOVM.U (R1), [R0,R1,R2,R3,R4,R6,R8,R9,g,R11] // 5f0f91e8 + MOVM.IA.W (R1), [R0,R2,R4,R6] // MOVM.W.U (R1), [R0,R2,R4,R6] // 5500b1e8 + MOVM.IA.W (R1), [R0-R4,R6,R8,R9-R11] // MOVM.W.U (R1), [R0,R1,R2,R3,R4,R6,R8,R9,g,R11] // 5f0fb1e8 + MOVM.DA [R0,R2,R4,R6], (R1) // MOVM [R0,R2,R4,R6], (R1) // 550001e8 + MOVM.DA [R0-R4,R6,R8,R9-R11], (R1) // MOVM [R0,R1,R2,R3,R4,R6,R8,R9,g,R11], (R1) // 5f0f01e8 + MOVM.DA.W [R0,R2,R4,R6], (R1) // MOVM.W [R0,R2,R4,R6], (R1) // 550021e8 + MOVM.DA.W [R0-R4,R6,R8,R9-R11], (R1) // MOVM.W [R0,R1,R2,R3,R4,R6,R8,R9,g,R11], (R1) // 5f0f21e8 + MOVM.DA (R1), [R0,R2,R4,R6] // MOVM (R1), [R0,R2,R4,R6] // 550011e8 + MOVM.DA (R1), [R0-R4,R6,R8,R9-R11] // MOVM (R1), [R0,R1,R2,R3,R4,R6,R8,R9,g,R11] // 5f0f11e8 + MOVM.DA.W (R1), [R0,R2,R4,R6] // MOVM.W (R1), [R0,R2,R4,R6] // 550031e8 + MOVM.DA.W (R1), [R0-R4,R6,R8,R9-R11] // MOVM.W (R1), [R0,R1,R2,R3,R4,R6,R8,R9,g,R11] // 5f0f31e8 + MOVM.DB [R0,R2,R4,R6], (R1) // MOVM.P [R0,R2,R4,R6], (R1) // 550001e9 + MOVM.DB [R0-R4,R6,R8,R9-R11], (R1) // MOVM.P [R0,R1,R2,R3,R4,R6,R8,R9,g,R11], (R1) // 5f0f01e9 + MOVM.DB.W [R0,R2,R4,R6], (R1) // MOVM.P.W [R0,R2,R4,R6], (R1) // 550021e9 + MOVM.DB.W [R0-R4,R6,R8,R9-R11], (R1) // MOVM.P.W [R0,R1,R2,R3,R4,R6,R8,R9,g,R11], (R1) // 5f0f21e9 + MOVM.DB (R1), [R0,R2,R4,R6] // MOVM.P (R1), [R0,R2,R4,R6] // 550011e9 + MOVM.DB (R1), [R0-R4,R6,R8,R9-R11] // MOVM.P (R1), [R0,R1,R2,R3,R4,R6,R8,R9,g,R11] // 5f0f11e9 + MOVM.DB.W (R1), [R0,R2,R4,R6] // MOVM.P.W (R1), [R0,R2,R4,R6] // 550031e9 + MOVM.DB.W (R1), [R0-R4,R6,R8,R9-R11] // MOVM.P.W (R1), [R0,R1,R2,R3,R4,R6,R8,R9,g,R11] // 5f0f31e9 + MOVM.IB [R0,R2,R4,R6], (g) // MOVM.P.U [R0,R2,R4,R6], (g) // 55008ae9 + MOVM.IB [R0-R4,R6,R8,R9-R11], (g) // MOVM.P.U [R0,R1,R2,R3,R4,R6,R8,R9,g,R11], (g) // 5f0f8ae9 + MOVM.IB.W [R0,R2,R4,R6], (g) // MOVM.P.W.U [R0,R2,R4,R6], (g) // 5500aae9 + MOVM.IB.W [R0-R4,R6,R8,R9-R11], (g) // MOVM.P.W.U [R0,R1,R2,R3,R4,R6,R8,R9,g,R11], (g) // 5f0faae9 + MOVM.IB (g), [R0,R2,R4,R6] // MOVM.P.U (g), [R0,R2,R4,R6] // 55009ae9 + MOVM.IB (g), [R0-R4,R6,R8,R9-R11] // MOVM.P.U (g), [R0,R1,R2,R3,R4,R6,R8,R9,g,R11] // 5f0f9ae9 + MOVM.IB.W (g), [R0,R2,R4,R6] // MOVM.P.W.U (g), [R0,R2,R4,R6] // 5500bae9 + MOVM.IB.W (g), [R0-R4,R6,R8,R9-R11] // MOVM.P.W.U (g), [R0,R1,R2,R3,R4,R6,R8,R9,g,R11] // 5f0fbae9 + +// MOVW + MOVW R3, R4 // 0340a0e1 + MOVW.S R3, R4 // 0340b0e1 + MOVW R9, R2 // 0920a0e1 + MOVW.S R9, R2 // 0920b0e1 + MOVW R5>>1, R2 // a520a0e1 + MOVW.S R5>>1, R2 // a520b0e1 + MOVW R5<<1, R2 // 8520a0e1 + MOVW.S R5<<1, R2 // 8520b0e1 + MOVW R5->1, R2 // c520a0e1 + MOVW.S R5->1, R2 // c520b0e1 + MOVW R5@>1, R2 // e520a0e1 + MOVW.S R5@>1, R2 // e520b0e1 + MOVW $0xff, R9 // MOVW $255, R9 // ff90a0e3 + MOVW $0xff000000, R9 // MOVW $4278190080, R9 // ff94a0e3 + MOVW $0xff(R0), R1 // MOVW $255(R0), R1 // ff1080e2 + MOVW.S $0xff(R0), R1 // MOVW.S $255(R0), R1 // ff1090e2 + MOVW $-0xff(R0), R1 // MOVW $-255(R0), R1 // ff1040e2 + MOVW.S $-0xff(R0), R1 // MOVW.S $-255(R0), R1 // ff1050e2 + MOVW $0xffffffae, R1 // MOVW $4294967214, R1 // 5110e0e3 + MOVW $0xaaaaaaaa, R1 // MOVW $2863311530, R1 + MOVW R1, (R2) // 001082e5 + MOVW.P R1, (R2) // 001082e4 + MOVW.W R1, (R2) // 0010a2e5 + MOVW R1, 0x20(R2) // MOVW R1, 32(R2) // 201082e5 + MOVW.P R1, 0x20(R2) // MOVW.P R1, 32(R2) // 201082e4 + MOVW.W R1, 0x20(R2) // MOVW.W R1, 32(R2) // 2010a2e5 + MOVW R1, -0x20(R2) // MOVW R1, -32(R2) // 201002e5 + MOVW.P R1, -0x20(R2) // MOVW.P R1, -32(R2) // 201002e4 + MOVW.W R1, -0x20(R2) // MOVW.W R1, -32(R2) // 201022e5 + MOVW (R2), R1 // 001092e5 + MOVW.P (R2), R1 // 001092e4 + MOVW.W (R2), R1 // 0010b2e5 + MOVW 0x20(R2), R1 // MOVW 32(R2), R1 // 201092e5 + MOVW.P 0x20(R2), R1 // MOVW.P 32(R2), R1 // 201092e4 + MOVW.W 0x20(R2), R1 // MOVW.W 32(R2), R1 // 2010b2e5 + MOVW -0x20(R2), R1 // MOVW -32(R2), R1 // 201012e5 + MOVW.P -0x20(R2), R1 // MOVW.P -32(R2), R1 // 201012e4 + MOVW.W -0x20(R2), R1 // MOVW.W -32(R2), R1 // 201032e5 + MOVW R1, 0x00ffffff(R2) // MOVW R1, 16777215(R2) + MOVW 0x00ffffff(R2), R1 // MOVW 16777215(R2), R1 + MOVW CPSR, R1 // 00100fe1 + MOVW R1, CPSR // 01f02ce1 + MOVW $0xff, CPSR // MOVW $255, CPSR // fff02ce3 + MOVW $0xff000000, CPSR // MOVW $4278190080, CPSR // fff42ce3 + MOVW FPSR, R9 // 109af1ee + MOVW FPSR, g // 10aaf1ee + MOVW R9, FPSR // 109ae1ee + MOVW g, FPSR // 10aae1ee + MOVW R0>>28(R1), R2 // 202e91e7 + MOVW R0<<28(R1), R2 // 002e91e7 + MOVW R0->28(R1), R2 // 402e91e7 + MOVW R0@>28(R1), R2 // 602e91e7 + MOVW.U R0>>28(R1), R2 // 202e11e7 + MOVW.U R0<<28(R1), R2 // 002e11e7 + MOVW.U R0->28(R1), R2 // 402e11e7 + MOVW.U R0@>28(R1), R2 // 602e11e7 + MOVW.W R0>>28(R1), R2 // 202eb1e7 + MOVW.W R0<<28(R1), R2 // 002eb1e7 + MOVW.W R0->28(R1), R2 // 402eb1e7 + MOVW.W R0@>28(R1), R2 // 602eb1e7 + MOVW.P R0>>28(g), R2 // 202e9ae6 + MOVW.P R0<<28(g), R2 // 002e9ae6 + MOVW.P R0->28(g), R2 // 402e9ae6 + MOVW.P R0@>28(g), R2 // 602e9ae6 + MOVW R2, R0>>28(R1) // 202e81e7 + MOVW R2, R0<<28(R1) // 002e81e7 + MOVW R2, R0->28(R1) // 402e81e7 + MOVW R2, R0@>28(R1) // 602e81e7 + MOVW.U R2, R0>>28(R1) // 202e01e7 + MOVW.U R2, R0<<28(R1) // 002e01e7 + MOVW.U R2, R0->28(R1) // 402e01e7 + MOVW.U R2, R0@>28(R1) // 602e01e7 + MOVW.W R2, R0>>28(R1) // 202ea1e7 + MOVW.W R2, R0<<28(R1) // 002ea1e7 + MOVW.W R2, R0->28(R1) // 402ea1e7 + MOVW.W R2, R0@>28(R1) // 602ea1e7 + MOVW.P R2, R0>>28(R5) // 202e85e6 + MOVW.P R2, R0<<28(R5) // 002e85e6 + MOVW.P R2, R0->28(R5) // 402e85e6 + MOVW.P R2, R0@>28(R5) // 602e85e6 + MOVW R0, math·Exp(SB) // MOVW R0, math.Exp(SB) + MOVW math·Exp(SB), R0 // MOVW math.Exp(SB), R0 + +// MOVB + MOVB R3, R4 // 0340a0e1 + MOVB R9, R2 // 0920a0e1 + MOVBU R0, R1 // ff1000e2 + MOVBS R5, R6 // 056ca0e1466ca0e1 + MOVB R1, (R2) // 0010c2e5 + MOVB.P R1, (R2) // 0010c2e4 + MOVB.W R1, (R2) // 0010e2e5 + MOVB R1, 0x20(R2) // MOVB R1, 32(R2) // 2010c2e5 + MOVB.P R1, 0x20(R2) // MOVB.P R1, 32(R2) // 2010c2e4 + MOVB.W R1, 0x20(R2) // MOVB.W R1, 32(R2) // 2010e2e5 + MOVB R1, -0x20(R2) // MOVB R1, -32(R2) // 201042e5 + MOVB.P R1, -0x20(R2) // MOVB.P R1, -32(R2) // 201042e4 + MOVB.W R1, -0x20(R2) // MOVB.W R1, -32(R2) // 201062e5 + MOVBS R1, (R2) // 0010c2e5 + MOVBS.P R1, (R2) // 0010c2e4 + MOVBS.W R1, (R2) // 0010e2e5 + MOVBS R1, 0x20(R2) // MOVBS R1, 32(R2) // 2010c2e5 + MOVBS.P R1, 0x20(R2) // MOVBS.P R1, 32(R2) // 2010c2e4 + MOVBS.W R1, 0x20(R2) // MOVBS.W R1, 32(R2) // 2010e2e5 + MOVBS R1, -0x20(R2) // MOVBS R1, -32(R2) // 201042e5 + MOVBS.P R1, -0x20(R2) // MOVBS.P R1, -32(R2) // 201042e4 + MOVBS.W R1, -0x20(R2) // MOVBS.W R1, -32(R2) // 201062e5 + MOVBU R1, (R2) // 0010c2e5 + MOVBU.P R1, (R2) // 0010c2e4 + MOVBU.W R1, (R2) // 0010e2e5 + MOVBU R1, 0x20(R2) // MOVBU R1, 32(R2) // 2010c2e5 + MOVBU.P R1, 0x20(R2) // MOVBU.P R1, 32(R2) // 2010c2e4 + MOVBU.W R1, 0x20(R2) // MOVBU.W R1, 32(R2) // 2010e2e5 + MOVBU R1, -0x20(R2) // MOVBU R1, -32(R2) // 201042e5 + MOVBU.P R1, -0x20(R2) // MOVBU.P R1, -32(R2) // 201042e4 + MOVBU.W R1, -0x20(R2) // MOVBU.W R1, -32(R2) // 201062e5 + MOVB (R2), R1 // d010d2e1 + MOVB.P (R2), R1 // d010d2e0 + MOVB.W (R2), R1 // d010f2e1 + MOVB 0x20(R2), R1 // MOVB 32(R2), R1 // d012d2e1 + MOVB.P 0x20(R2), R1 // MOVB.P 32(R2), R1 // d012d2e0 + MOVB.W 0x20(R2), R1 // MOVB.W 32(R2), R1 // d012f2e1 + MOVB -0x20(R2), R1 // MOVB -32(R2), R1 // d01252e1 + MOVB.P -0x20(R2), R1 // MOVB.P -32(R2), R1 // d01252e0 + MOVB.W -0x20(R2), R1 // MOVB.W -32(R2), R1 // d01272e1 + MOVBS (R2), R1 // d010d2e1 + MOVBS.P (R2), R1 // d010d2e0 + MOVBS.W (R2), R1 // d010f2e1 + MOVBS 0x20(R2), R1 // MOVBS 32(R2), R1 // d012d2e1 + MOVBS.P 0x20(R2), R1 // MOVBS.P 32(R2), R1 // d012d2e0 + MOVBS.W 0x20(R2), R1 // MOVBS.W 32(R2), R1 // d012f2e1 + MOVBS -0x20(R2), R1 // MOVBS -32(R2), R1 // d01252e1 + MOVBS.P -0x20(R2), R1 // MOVBS.P -32(R2), R1 // d01252e0 + MOVBS.W -0x20(R2), R1 // MOVBS.W -32(R2), R1 // d01272e1 + MOVBU (R2), R1 // 0010d2e5 + MOVBU.P (R2), R1 // 0010d2e4 + MOVBU.W (R2), R1 // 0010f2e5 + MOVBU 0x20(R2), R1 // MOVBU 32(R2), R1 // 2010d2e5 + MOVBU.P 0x20(R2), R1 // MOVBU.P 32(R2), R1 // 2010d2e4 + MOVBU.W 0x20(R2), R1 // MOVBU.W 32(R2), R1 // 2010f2e5 + MOVBU -0x20(R2), R1 // MOVBU -32(R2), R1 // 201052e5 + MOVBU.P -0x20(R2), R1 // MOVBU.P -32(R2), R1 // 201052e4 + MOVBU.W -0x20(R2), R1 // MOVBU.W -32(R2), R1 // 201072e5 + MOVB R1, 0x00ffffff(R2) // MOVB R1, 16777215(R2) + MOVB.W R1, 0x00ffffff(R2) // MOVB.W R1, 16777215(R2) + MOVB.P R1, 0x00ffffff(R2) // MOVB.P R1, 16777215(R2) + MOVB R1, -0x00ffffff(R2) // MOVB R1, -16777215(R2) + MOVB.W R1, -0x00ffffff(R2) // MOVB.W R1, -16777215(R2) + MOVB.P R1, -0x00ffffff(R2) // MOVB.P R1, -16777215(R2) + MOVB 0x00ffffff(R2), R1 // MOVB 16777215(R2), R1 + MOVB.P 0x00ffffff(R2), R1 // MOVB.P 16777215(R2), R1 + MOVB.W 0x00ffffff(R2), R1 // MOVB.W 16777215(R2), R1 + MOVB -0x00ffffff(R2), R1 // MOVB -16777215(R2), R1 + MOVB.P -0x00ffffff(R2), R1 // MOVB.P -16777215(R2), R1 + MOVB.W -0x00ffffff(R2), R1 // MOVB.W -16777215(R2), R1 + MOVBS R1, 0x00ffffff(R2) // MOVBS R1, 16777215(R2) + MOVBS.W R1, 0x00ffffff(R2) // MOVBS.W R1, 16777215(R2) + MOVBS.P R1, 0x00ffffff(R2) // MOVBS.P R1, 16777215(R2) + MOVBS R1, -0x00ffffff(R2) // MOVBS R1, -16777215(R2) + MOVBS.W R1, -0x00ffffff(R2) // MOVBS.W R1, -16777215(R2) + MOVBS.P R1, -0x00ffffff(R2) // MOVBS.P R1, -16777215(R2) + MOVBS 0x00ffffff(R2), R1 // MOVBS 16777215(R2), R1 + MOVBS.P 0x00ffffff(R2), R1 // MOVBS.P 16777215(R2), R1 + MOVBS.W 0x00ffffff(R2), R1 // MOVBS.W 16777215(R2), R1 + MOVBS -0x00ffffff(R2), R1 // MOVBS -16777215(R2), R1 + MOVBS.P -0x00ffffff(R2), R1 // MOVBS.P -16777215(R2), R1 + MOVBS.W -0x00ffffff(R2), R1 // MOVBS.W -16777215(R2), R1 + MOVBU R1, 0x00ffffff(R2) // MOVBU R1, 16777215(R2) + MOVBU.W R1, 0x00ffffff(R2) // MOVBU.W R1, 16777215(R2) + MOVBU.P R1, 0x00ffffff(R2) // MOVBU.P R1, 16777215(R2) + MOVBU R1, -0x00ffffff(R2) // MOVBU R1, -16777215(R2) + MOVBU.W R1, -0x00ffffff(R2) // MOVBU.W R1, -16777215(R2) + MOVBU.P R1, -0x00ffffff(R2) // MOVBU.P R1, -16777215(R2) + MOVBU 0x00ffffff(R2), R1 // MOVBU 16777215(R2), R1 + MOVBU.P 0x00ffffff(R2), R1 // MOVBU.P 16777215(R2), R1 + MOVBU.W 0x00ffffff(R2), R1 // MOVBU.W 16777215(R2), R1 + MOVBU -0x00ffffff(R2), R1 // MOVBU -16777215(R2), R1 + MOVBU.P -0x00ffffff(R2), R1 // MOVBU.P -16777215(R2), R1 + MOVBU.W -0x00ffffff(R2), R1 // MOVBU.W -16777215(R2), R1 + MOVB R0, math·Exp(SB) // MOVB R0, math.Exp(SB) + MOVB math·Exp(SB), R0 // MOVB math.Exp(SB), R0 + MOVBS R0, math·Exp(SB) // MOVBS R0, math.Exp(SB) + MOVBS math·Exp(SB), R0 // MOVBS math.Exp(SB), R0 + MOVBU R0, math·Exp(SB) // MOVBU R0, math.Exp(SB) + MOVBU math·Exp(SB), R0 // MOVBU math.Exp(SB), R0 + MOVB R2, R0>>28(R1) // 202ec1e7 + MOVB R2, R0<<28(R1) // 002ec1e7 + MOVB R2, R0->28(R1) // 402ec1e7 + MOVB R2, R0@>28(R1) // 602ec1e7 + MOVB.U R2, R0>>28(R1) // 202e41e7 + MOVB.U R2, R0<<28(R1) // 002e41e7 + MOVB.U R2, R0->28(R1) // 402e41e7 + MOVB.U R2, R0@>28(R1) // 602e41e7 + MOVB.W R2, R0>>28(R1) // 202ee1e7 + MOVB.W R2, R0<<28(R1) // 002ee1e7 + MOVB.W R2, R0->28(R1) // 402ee1e7 + MOVB.W R2, R0@>28(R1) // 602ee1e7 + MOVB.P R2, R0>>28(R5) // 202ec5e6 + MOVB.P R2, R0<<28(R5) // 002ec5e6 + MOVB.P R2, R0->28(R5) // 402ec5e6 + MOVB.P R2, R0@>28(R5) // 602ec5e6 + MOVBS R2, R0>>28(R1) // 202ec1e7 + MOVBS R2, R0<<28(R1) // 002ec1e7 + MOVBS R2, R0->28(R1) // 402ec1e7 + MOVBS R2, R0@>28(R1) // 602ec1e7 + MOVBS.U R2, R0>>28(R1) // 202e41e7 + MOVBS.U R2, R0<<28(R1) // 002e41e7 + MOVBS.U R2, R0->28(R1) // 402e41e7 + MOVBS.U R2, R0@>28(R1) // 602e41e7 + MOVBS.W R2, R0>>28(R1) // 202ee1e7 + MOVBS.W R2, R0<<28(R1) // 002ee1e7 + MOVBS.W R2, R0->28(R1) // 402ee1e7 + MOVBS.W R2, R0@>28(R1) // 602ee1e7 + MOVBS.P R2, R0>>28(R5) // 202ec5e6 + MOVBS.P R2, R0<<28(R5) // 002ec5e6 + MOVBS.P R2, R0->28(R5) // 402ec5e6 + MOVBS.P R2, R0@>28(R5) // 602ec5e6 + MOVBU R2, R0>>28(R1) // 202ec1e7 + MOVBU R2, R0<<28(R1) // 002ec1e7 + MOVBU R2, R0->28(R1) // 402ec1e7 + MOVBU R2, R0@>28(R1) // 602ec1e7 + MOVBU.U R2, R0>>28(R1) // 202e41e7 + MOVBU.U R2, R0<<28(R1) // 002e41e7 + MOVBU.U R2, R0->28(R1) // 402e41e7 + MOVBU.U R2, R0@>28(R1) // 602e41e7 + MOVBU.W R2, R0>>28(R1) // 202ee1e7 + MOVBU.W R2, R0<<28(R1) // 002ee1e7 + MOVBU.W R2, R0->28(R1) // 402ee1e7 + MOVBU.W R2, R0@>28(R1) // 602ee1e7 + MOVBU.P R2, R0>>28(R5) // 202ec5e6 + MOVBU.P R2, R0<<28(R5) // 002ec5e6 + MOVBU.P R2, R0->28(R5) // 402ec5e6 + MOVBU.P R2, R0@>28(R5) // 602ec5e6 + MOVBU R0>>28(R1), R2 // 202ed1e7 + MOVBU R0<<28(R1), R2 // 002ed1e7 + MOVBU R0->28(R1), R2 // 402ed1e7 + MOVBU R0@>28(R1), R2 // 602ed1e7 + MOVBU.U R0>>28(R1), R2 // 202e51e7 + MOVBU.U R0<<28(R1), R2 // 002e51e7 + MOVBU.U R0->28(R1), R2 // 402e51e7 + MOVBU.U R0@>28(R1), R2 // 602e51e7 + MOVBU.W R0>>28(R1), R2 // 202ef1e7 + MOVBU.W R0<<28(R1), R2 // 002ef1e7 + MOVBU.W R0->28(R1), R2 // 402ef1e7 + MOVBU.W R0@>28(R1), R2 // 602ef1e7 + MOVBU.P R0>>28(g), R2 // 202edae6 + MOVBU.P R0<<28(g), R2 // 002edae6 + MOVBU.P R0->28(g), R2 // 402edae6 + MOVBU.P R0@>28(g), R2 // 602edae6 + MOVBS R0<<0(R1), R2 // d02091e1 + MOVBS.U R0<<0(R1), R2 // d02011e1 + MOVBS.W R0<<0(R1), R2 // d020b1e1 + MOVBS.P R0<<0(R1), R2 // d02091e0 + MOVB R0<<0(R1), R2 // d02091e1 + MOVB.U R0<<0(R1), R2 // d02011e1 + MOVB.W R0<<0(R1), R2 // d020b1e1 + MOVB.P R0<<0(R1), R2 // d02091e0 + MOVBS R2@>0, R8 // 7280afe6 + MOVBS R2@>8, R8 // 7284afe6 + MOVBS R2@>16, R8 // 7288afe6 + MOVBS R2@>24, R8 // 728cafe6 + MOVB R2@>0, R8 // 7280afe6 + MOVB R2@>8, R8 // 7284afe6 + MOVB R2@>16, R8 // 7288afe6 + MOVB R2@>24, R8 // 728cafe6 + MOVBU R4@>0, R7 // 7470efe6 + MOVBU R4@>8, R7 // 7474efe6 + MOVBU R4@>16, R7 // 7478efe6 + MOVBU R4@>24, R7 // 747cefe6 + +// MOVH + MOVH R3, R4 // 0340a0e1 + MOVH R9, R2 // 0920a0e1 + MOVHS R5, R6 // 0568a0e14668a0e1 + MOVHU R5, R6 // 0568a0e12668a0e1 + MOVH R4, (R3) // b040c3e1 + MOVHS.W R4, (R3) // b040e3e1 + MOVHS.P R4, (R3) // b040c3e0 + MOVHS R4, (R3) // b040c3e1 + MOVHS.W R4, (R3) // b040e3e1 + MOVHS.P R4, (R3) // b040c3e0 + MOVHU R4, (R3) // b040c3e1 + MOVHU.W R4, (R3) // b040e3e1 + MOVHU.P R4, (R3) // b040c3e0 + MOVH R3, 0x20(R4) // MOVH R3, 32(R4) // b032c4e1 + MOVH.W R3, 0x20(R4) // MOVH.W R3, 32(R4) // b032e4e1 + MOVH.P R3, 0x20(R4) // MOVH.P R3, 32(R4) // b032c4e0 + MOVHS R3, 0x20(R4) // MOVHS R3, 32(R4) // b032c4e1 + MOVHS.W R3, 0x20(R4) // MOVHS.W R3, 32(R4) // b032e4e1 + MOVHS.P R3, 0x20(R4) // MOVHS.P R3, 32(R4) // b032c4e0 + MOVHU R3, 0x20(R4) // MOVHU R3, 32(R4) // b032c4e1 + MOVHU.W R3, 0x20(R4) // MOVHU.W R3, 32(R4) // b032e4e1 + MOVHU.P R3, 0x20(R4) // MOVHU.P R3, 32(R4) // b032c4e0 + MOVH R3, -0x20(R4) // MOVH R3, -32(R4) // b03244e1 + MOVH.W R3, -0x20(R4) // MOVH.W R3, -32(R4) // b03264e1 + MOVH.P R3, -0x20(R4) // MOVH.P R3, -32(R4) // b03244e0 + MOVHS R3, -0x20(R4) // MOVHS R3, -32(R4) // b03244e1 + MOVHS.W R3, -0x20(R4) // MOVHS.W R3, -32(R4) // b03264e1 + MOVHS.P R3, -0x20(R4) // MOVHS.P R3, -32(R4) // b03244e0 + MOVHU R3, -0x20(R4) // MOVHU R3, -32(R4) // b03244e1 + MOVHU.W R3, -0x20(R4) // MOVHU.W R3, -32(R4) // b03264e1 + MOVHU.P R3, -0x20(R4) // MOVHU.P R3, -32(R4) // b03244e0 + MOVHU (R9), R8 // b080d9e1 + MOVHU.W (R9), R8 // b080f9e1 + MOVHU.P (R9), R8 // b080d9e0 + MOVH (R9), R8 // f080d9e1 + MOVH.W (R9), R8 // f080f9e1 + MOVH.P (R9), R8 // f080d9e0 + MOVHS (R9), R8 // f080d9e1 + MOVHS.W (R9), R8 // f080f9e1 + MOVHS.P (R9), R8 // f080d9e0 + MOVHU 0x22(R9), R8 // MOVHU 34(R9), R8 // b282d9e1 + MOVHU.W 0x22(R9), R8 // MOVHU.W 34(R9), R8 // b282f9e1 + MOVHU.P 0x22(R9), R8 // MOVHU.P 34(R9), R8 // b282d9e0 + MOVH 0x22(R9), R8 // MOVH 34(R9), R8 // f282d9e1 + MOVH.W 0x22(R9), R8 // MOVH.W 34(R9), R8 // f282f9e1 + MOVH.P 0x22(R9), R8 // MOVH.P 34(R9), R8 // f282d9e0 + MOVHS 0x22(R9), R8 // MOVHS 34(R9), R8 // f282d9e1 + MOVHS.W 0x22(R9), R8 // MOVHS.W 34(R9), R8 // f282f9e1 + MOVHS.P 0x22(R9), R8 // MOVHS.P 34(R9), R8 // f282d9e0 + MOVHU -0x24(R9), R8 // MOVHU -36(R9), R8 // b48259e1 + MOVHU.W -0x24(R9), R8 // MOVHU.W -36(R9), R8 // b48279e1 + MOVHU.P -0x24(R9), R8 // MOVHU.P -36(R9), R8 // b48259e0 + MOVH -0x24(R9), R8 // MOVH -36(R9), R8 // f48259e1 + MOVH.W -0x24(R9), R8 // MOVH.W -36(R9), R8 // f48279e1 + MOVH.P -0x24(R9), R8 // MOVH.P -36(R9), R8 // f48259e0 + MOVHS -0x24(R9), R8 // MOVHS -36(R9), R8 // f48259e1 + MOVHS.W -0x24(R9), R8 // MOVHS.W -36(R9), R8 // f48279e1 + MOVHS.P -0x24(R9), R8 // MOVHS.P -36(R9), R8 // f48259e0 + MOVH R1, 0x00ffffff(R2) // MOVH R1, 16777215(R2) + MOVH.W R1, 0x00ffffff(R2) // MOVH.W R1, 16777215(R2) + MOVH.P R1, 0x00ffffff(R2) // MOVH.P R1, 16777215(R2) + MOVH R1, -0x00ffffff(R2) // MOVH R1, -16777215(R2) + MOVH.W R1, -0x00ffffff(R2) // MOVH.W R1, -16777215(R2) + MOVH.P R1, -0x00ffffff(R2) // MOVH.P R1, -16777215(R2) + MOVH 0x00ffffff(R2), R1 // MOVH 16777215(R2), R1 + MOVH.P 0x00ffffff(R2), R1 // MOVH.P 16777215(R2), R1 + MOVH.W 0x00ffffff(R2), R1 // MOVH.W 16777215(R2), R1 + MOVH -0x00ffffff(R2), R1 // MOVH -16777215(R2), R1 + MOVH.P -0x00ffffff(R2), R1 // MOVH.P -16777215(R2), R1 + MOVH.W -0x00ffffff(R2), R1 // MOVH.W -16777215(R2), R1 + MOVHS R1, 0x00ffffff(R2) // MOVHS R1, 16777215(R2) + MOVHS.W R1, 0x00ffffff(R2) // MOVHS.W R1, 16777215(R2) + MOVHS.P R1, 0x00ffffff(R2) // MOVHS.P R1, 16777215(R2) + MOVHS R1, -0x00ffffff(R2) // MOVHS R1, -16777215(R2) + MOVHS.W R1, -0x00ffffff(R2) // MOVHS.W R1, -16777215(R2) + MOVHS.P R1, -0x00ffffff(R2) // MOVHS.P R1, -16777215(R2) + MOVHS 0x00ffffff(R2), R1 // MOVHS 16777215(R2), R1 + MOVHS.P 0x00ffffff(R2), R1 // MOVHS.P 16777215(R2), R1 + MOVHS.W 0x00ffffff(R2), R1 // MOVHS.W 16777215(R2), R1 + MOVHS -0x00ffffff(R2), R1 // MOVHS -16777215(R2), R1 + MOVHS.P -0x00ffffff(R2), R1 // MOVHS.P -16777215(R2), R1 + MOVHS.W -0x00ffffff(R2), R1 // MOVHS.W -16777215(R2), R1 + MOVHU R1, 0x00ffffff(R2) // MOVHU R1, 16777215(R2) + MOVHU.W R1, 0x00ffffff(R2) // MOVHU.W R1, 16777215(R2) + MOVHU.P R1, 0x00ffffff(R2) // MOVHU.P R1, 16777215(R2) + MOVHU R1, -0x00ffffff(R2) // MOVHU R1, -16777215(R2) + MOVHU.W R1, -0x00ffffff(R2) // MOVHU.W R1, -16777215(R2) + MOVHU.P R1, -0x00ffffff(R2) // MOVHU.P R1, -16777215(R2) + MOVHU 0x00ffffff(R2), R1 // MOVHU 16777215(R2), R1 + MOVHU.P 0x00ffffff(R2), R1 // MOVHU.P 16777215(R2), R1 + MOVHU.W 0x00ffffff(R2), R1 // MOVHU.W 16777215(R2), R1 + MOVHU -0x00ffffff(R2), R1 // MOVHU -16777215(R2), R1 + MOVHU.P -0x00ffffff(R2), R1 // MOVHU.P -16777215(R2), R1 + MOVHU.W -0x00ffffff(R2), R1 // MOVHU.W -16777215(R2), R1 + MOVH R0, math·Exp(SB) // MOVH R0, math.Exp(SB) + MOVH math·Exp(SB), R0 // MOVH math.Exp(SB), R0 + MOVHS R0, math·Exp(SB) // MOVHS R0, math.Exp(SB) + MOVHS math·Exp(SB), R0 // MOVHS math.Exp(SB), R0 + MOVHU R0, math·Exp(SB) // MOVHU R0, math.Exp(SB) + MOVHU math·Exp(SB), R0 // MOVHU math.Exp(SB), R0 + MOVHS R0<<0(R1), R2 // f02091e1 + MOVHS.U R0<<0(R1), R2 // f02011e1 + MOVHS.W R0<<0(R1), R2 // f020b1e1 + MOVHS.P R0<<0(R1), R2 // f02091e0 + MOVH R0<<0(R1), R2 // f02091e1 + MOVH.U R0<<0(R1), R2 // f02011e1 + MOVH.W R0<<0(R1), R2 // f020b1e1 + MOVH.P R0<<0(R1), R2 // f02091e0 + MOVHU R0<<0(R1), R2 // b02091e1 + MOVHU.U R0<<0(R1), R2 // b02011e1 + MOVHU.W R0<<0(R1), R2 // b020b1e1 + MOVHU.P R0<<0(R1), R2 // b02091e0 + MOVHS R2, R5<<0(R1) // b52081e1 + MOVHS.U R2, R5<<0(R1) // b52001e1 + MOVHS.W R2, R5<<0(R1) // b520a1e1 + MOVHS.P R2, R5<<0(R1) // b52081e0 + MOVH R2, R5<<0(R1) // b52081e1 + MOVH.U R2, R5<<0(R1) // b52001e1 + MOVH.W R2, R5<<0(R1) // b520a1e1 + MOVH.P R2, R5<<0(R1) // b52081e0 + MOVHU R2, R5<<0(R1) // b52081e1 + MOVHU.U R2, R5<<0(R1) // b52001e1 + MOVHU.W R2, R5<<0(R1) // b520a1e1 + MOVHU.P R2, R5<<0(R1) // b52081e0 + MOVHS R3@>0, R9 // 7390bfe6 + MOVHS R3@>8, R9 // 7394bfe6 + MOVHS R3@>16, R9 // 7398bfe6 + MOVHS R3@>24, R9 // 739cbfe6 + MOVH R3@>0, R9 // 7390bfe6 + MOVH R3@>8, R9 // 7394bfe6 + MOVH R3@>16, R9 // 7398bfe6 + MOVH R3@>24, R9 // 739cbfe6 + MOVHU R5@>0, R1 // 7510ffe6 + MOVHU R5@>8, R1 // 7514ffe6 + MOVHU R5@>16, R1 // 7518ffe6 + MOVHU R5@>24, R1 // 751cffe6 + + RET foo(SB) + +// +// END +// +// LTYPEE +// { +// outcode($1, Always, &nullgen, 0, &nullgen); +// } + END diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/arm64.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/arm64.s new file mode 100644 index 0000000000000000000000000000000000000000..ecad08b37aa021ee87f83e77def30e590a496809 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/arm64.s @@ -0,0 +1,1886 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This input was created by taking the instruction productions in +// the old assembler's (7a's) grammar and hand-writing complete +// instructions for each rule, to guarantee we cover the same space. + +#include "../../../../../runtime/textflag.h" + +TEXT foo(SB), DUPOK|NOSPLIT, $-8 + +// arithmetic operations + ADDW $1, R2, R3 + ADDW R1, R2, R3 + ADDW R1, ZR, R3 + ADD $1, R2, R3 + ADD R1, R2, R3 + ADD R1, ZR, R3 + ADD $1, R2, R3 + ADDW $1, R2 + ADDW R1, R2 + ADD $1, R2 + ADD R1, R2 + ADD R1>>11, R2 + ADD R1<<22, R2 + ADD R1->33, R2 + ADD $0x000aaa, R2, R3 // ADD $2730, R2, R3 // 43a82a91 + ADD $0x000aaa, R2 // ADD $2730, R2 // 42a82a91 + ADD $0xaaa000, R2, R3 // ADD $11182080, R2, R3 // 43a86a91 + ADD $0xaaa000, R2 // ADD $11182080, R2 // 42a86a91 + ADD $0xaaaaaa, R2, R3 // ADD $11184810, R2, R3 // 43a82a9163a86a91 + ADD $0xaaaaaa, R2 // ADD $11184810, R2 // 42a82a9142a86a91 + SUB $0x000aaa, R2, R3 // SUB $2730, R2, R3 // 43a82ad1 + SUB $0x000aaa, R2 // SUB $2730, R2 // 42a82ad1 + SUB $0xaaa000, R2, R3 // SUB $11182080, R2, R3 // 43a86ad1 + SUB $0xaaa000, R2 // SUB $11182080, R2 // 42a86ad1 + SUB $0xaaaaaa, R2, R3 // SUB $11184810, R2, R3 // 43a82ad163a86ad1 + SUB $0xaaaaaa, R2 // SUB $11184810, R2 // 42a82ad142a86ad1 + ADDW $0x60060, R2 // ADDW $393312, R2 // 4280011142804111 + ADD $0x186a0, R2, R5 // ADD $100000, R2, R5 // 45801a91a5604091 + SUB $0xe7791f700, R3, R1 // SUB $62135596800, R3, R1 // 1be09ed23bf2aef2db01c0f261001bcb + ADD $0x3fffffffc000, R5 // ADD $70368744161280, R5 // fb7f72b2a5001b8b + ADD R1>>11, R2, R3 + ADD R1<<22, R2, R3 + ADD R1->33, R2, R3 + AND R1@>33, R2, R3 + ADD R1.UXTB, R2, R3 // 4300218b + ADD R1.UXTB<<4, R2, R3 // 4310218b + ADD R2, RSP, RSP // ff63228b + ADD R2.SXTX<<1, RSP, RSP // ffe7228b + ADD ZR.SXTX<<1, R2, R3 // 43e43f8b + ADDW R2.SXTW, R10, R12 // 4cc1220b + ADD R19.UXTX, R14, R17 // d161338b + ADDSW R19.UXTW, R14, R17 // d141332b + ADDS R12.SXTX, R3, R1 // 61e02cab + SUB R19.UXTH<<4, R2, R21 // 553033cb + SUBW R1.UXTX<<1, R3, R2 // 6264214b + SUBS R3.UXTX, R8, R9 // 096123eb + SUBSW R17.UXTH, R15, R21 // f521316b + SUBW ZR<<14, R19, R13 // 6d3a1f4b + CMP R2.SXTH, R13 // bfa122eb + CMN R1.SXTX<<2, R10 // 5fe921ab + CMPW R2.UXTH<<3, R11 // 7f2d226b + CMNW R1.SXTB, R9 // 3f81212b + ADD R1<<1, RSP, R3 // e367218b + ADDW R1<<2, R3, RSP // 7f48210b + SUB R1<<3, RSP // ff6f21cb + SUBS R1<<4, RSP, R3 // e37321eb + ADDS R1<<1, RSP, R4 // e46721ab + CMP R1<<2, RSP // ff6b21eb + CMN R1<<3, RSP // ff6f21ab + ADDS R1<<1, ZR, R4 // e40701ab + ADD R3<<50, ZR, ZR // ffcb038b + CMP R4<<24, ZR // ff6304eb + CMPW $0x60060, R2 // CMPW $393312, R2 // 1b0c8052db00a0725f001b6b + CMPW $40960, R0 // 1f284071 + CMPW $27745, R2 // 3b8c8d525f001b6b + CMNW $0x3fffffc0, R2 // CMNW $1073741760, R2 // fb5f1a325f001b2b + CMPW $0xffff0, R1 // CMPW $1048560, R1 // fb3f1c323f001b6b + CMP $0xffffffffffa0, R3 // CMP $281474976710560, R3 // fb0b80921b00e0f27f001beb + CMP $0xf4240, R1 // CMP $1000000, R1 // 1b4888d2fb01a0f23f001beb + CMP $3343198598084851058, R3 // 5bae8ed2db8daef23badcdf2bbcce5f27f001beb + CMP $3, R2 + CMP R1, R2 + CMP R1->11, R2 + CMP R1>>22, R2 + CMP R1<<33, R2 + CMP R22.SXTX, RSP // ffe336eb + CMP $0x22220000, RSP // CMP $572653568, RSP // 5b44a4d2ff633beb + CMPW $0x22220000, RSP // CMPW $572653568, RSP // 5b44a452ff433b6b + CCMN MI, ZR, R1, $4 // e44341ba + // MADD Rn,Rm,Ra,Rd + MADD R1, R2, R3, R4 // 6408019b + // CLS + CLSW R1, R2 + CLS R1, R2 + SBC $0, R1 // 21001fda + SBCW $0, R1 // 21001f5a + SBCS $0, R1 // 21001ffa + SBCSW $0, R1 // 21001f7a + ADC $0, R1 // 21001f9a + ADCW $0, R1 // 21001f1a + ADCS $0, R1 // 21001fba + ADCSW $0, R1 // 21001f3a + +// fp/simd instructions. + VADDP V1.B16, V2.B16, V3.B16 // 43bc214e + VADDP V1.S4, V2.S4, V3.S4 // 43bca14e + VADDP V1.D2, V2.D2, V3.D2 // 43bce14e + VAND V21.B8, V12.B8, V3.B8 // 831d350e + VCMEQ V1.H4, V2.H4, V3.H4 // 438c612e + VORR V5.B16, V4.B16, V3.B16 // 831ca54e + VADD V16.S4, V5.S4, V9.S4 // a984b04e + VEOR V0.B16, V1.B16, V0.B16 // 201c206e + VADDV V0.S4, V0 // 00b8b14e + VMOVI $82, V0.B16 // 40e6024f + VUADDLV V6.B16, V6 // c638306e + VADD V1, V2, V3 // 4384e15e + VADD V1, V3, V3 // 6384e15e + VSUB V12, V30, V30 // de87ec7e + VSUB V12, V20, V30 // 9e86ec7e + VFMLA V1.D2, V12.D2, V1.D2 // 81cd614e + VFMLA V1.S2, V12.S2, V1.S2 // 81cd210e + VFMLA V1.S4, V12.S4, V1.S4 // 81cd214e + VFMLS V1.D2, V12.D2, V1.D2 // 81cde14e + VFMLS V1.S2, V12.S2, V1.S2 // 81cda10e + VFMLS V1.S4, V12.S4, V1.S4 // 81cda14e + VEXT $4, V2.B8, V1.B8, V3.B8 // 2320022e + VEXT $8, V2.B16, V1.B16, V3.B16 // 2340026e + VRBIT V24.B16, V24.B16 // 185b606e + VRBIT V24.B8, V24.B8 // 185b602e + VUSHR $56, V1.D2, V2.D2 // 2204486f + VUSHR $24, V1.S4, V2.S4 // 2204286f + VUSHR $24, V1.S2, V2.S2 // 2204282f + VUSHR $8, V1.H4, V2.H4 // 2204182f + VUSHR $8, V1.H8, V2.H8 // 2204186f + VUSHR $2, V1.B8, V2.B8 // 22040e2f + VUSHR $2, V1.B16, V2.B16 // 22040e6f + VSHL $56, V1.D2, V2.D2 // 2254784f + VSHL $24, V1.S4, V2.S4 // 2254384f + VSHL $24, V1.S2, V2.S2 // 2254380f + VSHL $8, V1.H4, V2.H4 // 2254180f + VSHL $8, V1.H8, V2.H8 // 2254184f + VSHL $2, V1.B8, V2.B8 // 22540a0f + VSHL $2, V1.B16, V2.B16 // 22540a4f + VSRI $56, V1.D2, V2.D2 // 2244486f + VSRI $24, V1.S4, V2.S4 // 2244286f + VSRI $24, V1.S2, V2.S2 // 2244282f + VSRI $8, V1.H4, V2.H4 // 2244182f + VSRI $8, V1.H8, V2.H8 // 2244186f + VSRI $2, V1.B8, V2.B8 // 22440e2f + VSRI $2, V1.B16, V2.B16 // 22440e6f + VSLI $7, V2.B16, V3.B16 // 43540f6f + VSLI $15, V3.H4, V4.H4 // 64541f2f + VSLI $31, V5.S4, V6.S4 // a6543f6f + VSLI $63, V7.D2, V8.D2 // e8547f6f + VUSRA $8, V2.B16, V3.B16 // 4314086f + VUSRA $16, V3.H4, V4.H4 // 6414102f + VUSRA $32, V5.S4, V6.S4 // a614206f + VUSRA $64, V7.D2, V8.D2 // e814406f + VTBL V22.B16, [V28.B16, V29.B16], V11.B16 // 8b23164e + VTBL V18.B8, [V17.B16, V18.B16, V19.B16], V22.B8 // 3642120e + VTBL V31.B8, [V14.B16, V15.B16, V16.B16, V17.B16], V15.B8 // cf611f0e + VTBL V14.B16, [V16.B16], V11.B16 // 0b020e4e + VTBL V28.B16, [V25.B16, V26.B16], V5.B16 // 25231c4e + VTBL V16.B8, [V4.B16, V5.B16, V6.B16], V12.B8 // 8c40100e + VTBL V4.B8, [V16.B16, V17.B16, V18.B16, V19.B16], V4.B8 // 0462040e + VTBL V15.B8, [V1.B16], V20.B8 // 34000f0e + VTBL V26.B16, [V2.B16, V3.B16], V26.B16 // 5a201a4e + VTBL V15.B8, [V6.B16, V7.B16, V8.B16], V2.B8 // c2400f0e + VTBL V2.B16, [V27.B16, V28.B16, V29.B16, V30.B16], V18.B16 // 7263024e + VTBL V11.B16, [V13.B16], V27.B16 // bb010b4e + VTBL V3.B8, [V7.B16, V8.B16], V25.B8 // f920030e + VTBL V14.B16, [V3.B16, V4.B16, V5.B16], V17.B16 // 71400e4e + VTBL V13.B16, [V29.B16, V30.B16, V31.B16, V0.B16], V28.B16 // bc630d4e + VTBL V3.B8, [V27.B16], V8.B8 // 6803030e + VTBX V22.B16, [V28.B16, V29.B16], V11.B16 // 8b33164e + VTBX V18.B8, [V17.B16, V18.B16, V19.B16], V22.B8 // 3652120e + VTBX V31.B8, [V14.B16, V15.B16, V16.B16, V17.B16], V15.B8 // cf711f0e + VTBX V14.B16, [V16.B16], V11.B16 // 0b120e4e + VTBX V28.B16, [V25.B16, V26.B16], V5.B16 // 25331c4e + VTBX V16.B8, [V4.B16, V5.B16, V6.B16], V12.B8 // 8c50100e + VTBX V4.B8, [V16.B16, V17.B16, V18.B16, V19.B16], V4.B8 // 0472040e + VTBX V15.B8, [V1.B16], V20.B8 // 34100f0e + VTBX V26.B16, [V2.B16, V3.B16], V26.B16 // 5a301a4e + VTBX V15.B8, [V6.B16, V7.B16, V8.B16], V2.B8 // c2500f0e + VTBX V2.B16, [V27.B16, V28.B16, V29.B16, V30.B16], V18.B16 // 7273024e + VTBX V11.B16, [V13.B16], V27.B16 // bb110b4e + VTBX V3.B8, [V7.B16, V8.B16], V25.B8 // f930030e + VTBX V14.B16, [V3.B16, V4.B16, V5.B16], V17.B16 // 71500e4e + VTBX V13.B16, [V29.B16, V30.B16, V31.B16, V0.B16], V28.B16 // bc730d4e + VTBX V3.B8, [V27.B16], V8.B8 // 6813030e + VZIP1 V16.H8, V3.H8, V19.H8 // 7338504e + VZIP2 V22.D2, V25.D2, V21.D2 // 357bd64e + VZIP1 V6.D2, V9.D2, V11.D2 // 2b39c64e + VZIP2 V10.D2, V13.D2, V3.D2 // a379ca4e + VZIP1 V17.S2, V4.S2, V26.S2 // 9a38910e + VZIP2 V25.S2, V14.S2, V25.S2 // d979990e + VUXTL V30.B8, V30.H8 // dea7082f + VUXTL V30.H4, V29.S4 // dda7102f + VUXTL V29.S2, V2.D2 // a2a7202f + VUXTL2 V30.H8, V30.S4 // dea7106f + VUXTL2 V29.S4, V2.D2 // a2a7206f + VUXTL2 V30.B16, V2.H8 // c2a7086f + VBIT V21.B16, V25.B16, V4.B16 // 241fb56e + VBSL V23.B16, V3.B16, V7.B16 // 671c776e + VCMTST V2.B8, V29.B8, V2.B8 // a28f220e + VCMTST V2.D2, V23.D2, V3.D2 // e38ee24e + VSUB V2.B8, V30.B8, V30.B8 // de87222e + VUZP1 V0.B8, V30.B8, V1.B8 // c11b000e + VUZP1 V1.B16, V29.B16, V2.B16 // a21b014e + VUZP1 V2.H4, V28.H4, V3.H4 // 831b420e + VUZP1 V3.H8, V27.H8, V4.H8 // 641b434e + VUZP1 V28.S2, V2.S2, V5.S2 // 45189c0e + VUZP1 V29.S4, V1.S4, V6.S4 // 26189d4e + VUZP1 V30.D2, V0.D2, V7.D2 // 0718de4e + VUZP2 V0.D2, V30.D2, V1.D2 // c15bc04e + VUZP2 V30.D2, V0.D2, V29.D2 // 1d58de4e + VUSHLL $0, V30.B8, V30.H8 // dea7082f + VUSHLL $0, V30.H4, V29.S4 // dda7102f + VUSHLL $0, V29.S2, V2.D2 // a2a7202f + VUSHLL2 $0, V30.B16, V2.H8 // c2a7086f + VUSHLL2 $0, V30.H8, V30.S4 // dea7106f + VUSHLL2 $0, V29.S4, V2.D2 // a2a7206f + VUSHLL $7, V30.B8, V30.H8 // dea70f2f + VUSHLL $15, V30.H4, V29.S4 // dda71f2f + VUSHLL2 $31, V30.S4, V2.D2 // c2a73f6f + VBIF V0.B8, V30.B8, V1.B8 // c11fe02e + VBIF V30.B16, V0.B16, V2.B16 // 021cfe6e + FMOVS $(4.0), F0 // 0010221e + FMOVD $(4.0), F0 // 0010621e + FMOVS $(0.265625), F1 // 01302a1e + FMOVD $(0.1796875), F2 // 02f0681e + FMOVS $(0.96875), F3 // 03f02d1e + FMOVD $(28.0), F4 // 0490671e + FMOVD $0, F0 // e003679e + FMOVS $0, F0 // e003271e + FMOVD ZR, F0 // e003679e + FMOVS ZR, F0 // e003271e + FMOVD F1, ZR // 3f00669e + FMOVS F1, ZR // 3f00261e + VUADDW V9.B8, V12.H8, V14.H8 // 8e11292e + VUADDW V13.H4, V10.S4, V11.S4 // 4b116d2e + VUADDW V21.S2, V24.D2, V29.D2 // 1d13b52e + VUADDW2 V9.B16, V12.H8, V14.H8 // 8e11296e + VUADDW2 V13.H8, V20.S4, V30.S4 // 9e126d6e + VUADDW2 V21.S4, V24.D2, V29.D2 // 1d13b56e + VUMAX V3.B8, V2.B8, V1.B8 // 4164232e + VUMAX V3.B16, V2.B16, V1.B16 // 4164236e + VUMAX V3.H4, V2.H4, V1.H4 // 4164632e + VUMAX V3.H8, V2.H8, V1.H8 // 4164636e + VUMAX V3.S2, V2.S2, V1.S2 // 4164a32e + VUMAX V3.S4, V2.S4, V1.S4 // 4164a36e + VUMIN V3.B8, V2.B8, V1.B8 // 416c232e + VUMIN V3.B16, V2.B16, V1.B16 // 416c236e + VUMIN V3.H4, V2.H4, V1.H4 // 416c632e + VUMIN V3.H8, V2.H8, V1.H8 // 416c636e + VUMIN V3.S2, V2.S2, V1.S2 // 416ca32e + VUMIN V3.S4, V2.S4, V1.S4 // 416ca36e + FCCMPS LT, F1, F2, $1 // 41b4211e + FMADDS F1, F3, F2, F4 // 440c011f + FMADDD F4, F5, F4, F4 // 8414441f + FMSUBS F13, F21, F13, F19 // b3d50d1f + FMSUBD F11, F7, F15, F31 // ff9d4b1f + FNMADDS F1, F3, F2, F4 // 440c211f + FNMADDD F1, F3, F2, F4 // 440c611f + FNMSUBS F1, F3, F2, F4 // 448c211f + FNMSUBD F1, F3, F2, F4 // 448c611f + FADDS F2, F3, F4 // 6428221e + FADDD F1, F2 // 4228611e + VDUP V19.S[0], V17.S4 // 7106044e + VTRN1 V3.D2, V2.D2, V20.D2 // 5428c34e + VTRN2 V3.D2, V2.D2, V21.D2 // 5568c34e + VTRN1 V5.D2, V4.D2, V22.D2 // 9628c54e + VTRN2 V5.D2, V4.D2, V23.D2 // 9768c54e + + +// special + PRFM (R2), PLDL1KEEP // 400080f9 + PRFM 16(R2), PLDL1KEEP // 400880f9 + PRFM 48(R6), PSTL2STRM // d31880f9 + PRFM 8(R12), PLIL3STRM // 8d0580f9 + PRFM (R8), $25 // 190180f9 + PRFM 8(R9), $30 // 3e0580f9 + NOOP // 1f2003d5 + HINT $0 // 1f2003d5 + DMB $1 + SVC + +// encryption + SHA256H V9.S4, V3, V2 // 6240095e + SHA256H2 V9.S4, V4, V3 // 8350095e + SHA256SU0 V8.S4, V7.S4 // 0729285e + SHA256SU1 V6.S4, V5.S4, V7.S4 // a760065e + SHA1SU0 V11.S4, V8.S4, V6.S4 // 06310b5e + SHA1SU1 V5.S4, V1.S4 // a118285e + SHA1C V1.S4, V2, V3 // 4300015e + SHA1H V5, V4 // a408285e + SHA1M V8.S4, V7, V6 // e620085e + SHA1P V11.S4, V10, V9 // 49110b5e + SHA512H V2.D2, V1, V0 // 208062ce + SHA512H2 V4.D2, V3, V2 // 628464ce + SHA512SU0 V9.D2, V8.D2 // 2881c0ce + SHA512SU1 V7.D2, V6.D2, V5.D2 // c58867ce + VRAX1 V26.D2, V29.D2, V30.D2 // be8f7ace + VXAR $63, V27.D2, V21.D2, V26.D2 // bafe9bce + VPMULL V2.D1, V1.D1, V3.Q1 // 23e0e20e + VPMULL2 V2.D2, V1.D2, V4.Q1 // 24e0e24e + VPMULL V2.B8, V1.B8, V3.H8 // 23e0220e + VPMULL2 V2.B16, V1.B16, V4.H8 // 24e0224e + VEOR3 V2.B16, V7.B16, V12.B16, V25.B16 // 990907ce + VBCAX V1.B16, V2.B16, V26.B16, V31.B16 // 5f0722ce + VREV32 V5.B16, V5.B16 // a508206e + VREV64 V2.S2, V3.S2 // 4308a00e + VREV64 V2.S4, V3.S4 // 4308a04e + +// logical ops +// +// make sure constants get encoded into an instruction when it could + AND R1@>33, R2 + AND $(1<<63), R1 // AND $-9223372036854775808, R1 // 21004192 + AND $(1<<63-1), R1 // AND $9223372036854775807, R1 // 21f84092 + ORR $(1<<63), R1 // ORR $-9223372036854775808, R1 // 210041b2 + ORR $(1<<63-1), R1 // ORR $9223372036854775807, R1 // 21f840b2 + EOR $(1<<63), R1 // EOR $-9223372036854775808, R1 // 210041d2 + EOR $(1<<63-1), R1 // EOR $9223372036854775807, R1 // 21f840d2 + ANDW $0x3ff00000, R2 // ANDW $1072693248, R2 // 42240c12 + BICW $0x3ff00000, R2 // BICW $1072693248, R2 // 42540212 + ORRW $0x3ff00000, R2 // ORRW $1072693248, R2 // 42240c32 + ORNW $0x3ff00000, R2 // ORNW $1072693248, R2 // 42540232 + EORW $0x3ff00000, R2 // EORW $1072693248, R2 // 42240c52 + EONW $0x3ff00000, R2 // EONW $1072693248, R2 // 42540252 + AND $0x22220000, R3, R4 // AND $572653568, R3, R4 // 5b44a4d264001b8a + ORR $0x22220000, R3, R4 // ORR $572653568, R3, R4 // 5b44a4d264001baa + EOR $0x22220000, R3, R4 // EOR $572653568, R3, R4 // 5b44a4d264001bca + BIC $0x22220000, R3, R4 // BIC $572653568, R3, R4 // 5b44a4d264003b8a + ORN $0x22220000, R3, R4 // ORN $572653568, R3, R4 // 5b44a4d264003baa + EON $0x22220000, R3, R4 // EON $572653568, R3, R4 // 5b44a4d264003bca + ANDS $0x22220000, R3, R4 // ANDS $572653568, R3, R4 // 5b44a4d264001bea + BICS $0x22220000, R3, R4 // BICS $572653568, R3, R4 // 5b44a4d264003bea + EOR $0xe03fffffffffffff, R20, R22 // EOR $-2287828610704211969, R20, R22 // 96e243d2 + TSTW $0x600000006, R1 // TSTW $25769803782, R1 // 3f041f72 + TST $0x4900000049, R0 // TST $313532612681, R0 // 3b0980d23b09c0f21f001bea + ORR $0x170000, R2, R1 // ORR $1507328, R2, R1 // fb02a0d241001baa + AND $0xff00ff, R2 // AND $16711935, R2 // fb1f80d2fb1fa0f242001b8a + AND $0xff00ffff, R1 // AND $4278255615, R1 // fbff9fd21be0bff221001b8a + ANDS $0xffff, R2 // ANDS $65535, R2 // 423c40f2 + AND $0x7fffffff, R3 // AND $2147483647, R3 // 63784092 + ANDS $0x0ffffffff80000000, R2 // ANDS $-2147483648, R2 // 428061f2 + AND $0xfffff, R2 // AND $1048575, R2 // 424c4092 + ANDW $0xf00fffff, R1 // ANDW $4027580415, R1 // 215c0412 + ANDSW $0xff00ffff, R1 // ANDSW $4278255615, R1 // 215c0872 + TST $0x11223344, R2 // TST $287454020, R2 // 9b6886d25b24a2f25f001bea + TSTW $0xa000, R3 // TSTW $40960, R3 // 1b0094527f001b6a + BICW $0xa000, R3 // BICW $40960, R3 // 1b00945263003b0a + ORRW $0x1b000, R2, R3 // ORRW $110592, R2, R3 // 1b0096523b00a07243001b2a + TSTW $0x500000, R1 // TSTW $5242880, R1 // 1b0aa0523f001b6a + TSTW $0xff00ff, R1 // TSTW $16711935, R1 // 3f9c0072 + TSTW $0x60060, R5 // TSTW $393312, R5 // 1b0c8052db00a072bf001b6a + TSTW $0x6006000060060, R5 // TSTW $1689262177517664, R5 // 1b0c8052db00a072bf001b6a + ANDW $0x6006000060060, R5 // ANDW $1689262177517664, R5 // 1b0c8052db00a072a5001b0a + ANDSW $0x6006000060060, R5 // ANDSW $1689262177517664, R5 // 1b0c8052db00a072a5001b6a + EORW $0x6006000060060, R5 // EORW $1689262177517664, R5 // 1b0c8052db00a072a5001b4a + ORRW $0x6006000060060, R5 // ORRW $1689262177517664, R5 // 1b0c8052db00a072a5001b2a + BICW $0x6006000060060, R5 // BICW $1689262177517664, R5 // 1b0c8052db00a072a5003b0a + EONW $0x6006000060060, R5 // EONW $1689262177517664, R5 // 1b0c8052db00a072a5003b4a + ORNW $0x6006000060060, R5 // ORNW $1689262177517664, R5 // 1b0c8052db00a072a5003b2a + BICSW $0x6006000060060, R5 // BICSW $1689262177517664, R5 // 1b0c8052db00a072a5003b6a + AND $1, ZR // fb0340b2ff031b8a + ANDW $1, ZR // fb030032ff031b0a + // TODO: this could have better encoding + ANDW $-1, R10 // 1b0080124a011b0a + AND $8, R0, RSP // 1f007d92 + ORR $8, R0, RSP // 1f007db2 + EOR $8, R0, RSP // 1f007dd2 + BIC $8, R0, RSP // 1ff87c92 + ORN $8, R0, RSP // 1ff87cb2 + EON $8, R0, RSP // 1ff87cd2 + TST $15, R2 // 5f0c40f2 + TST R1, R2 // 5f0001ea + TST R1->11, R2 // 5f2c81ea + TST R1>>22, R2 // 5f5841ea + TST R1<<33, R2 // 5f8401ea + TST $0x22220000, R3 // TST $572653568, R3 // 5b44a4d27f001bea + +// move an immediate to a Rn. + MOVD $0x3fffffffc000, R0 // MOVD $70368744161280, R0 // e07f72b2 + MOVW $1000000, R4 // 04488852e401a072 + MOVW $0xaaaa0000, R1 // MOVW $2863267840, R1 // 4155b552 + MOVW $0xaaaaffff, R1 // MOVW $2863333375, R1 // a1aaaa12 + MOVW $0xaaaa, R1 // MOVW $43690, R1 // 41559552 + MOVW $0xffffaaaa, R1 // MOVW $4294945450, R1 // a1aa8a12 + MOVW $0xffff0000, R1 // MOVW $4294901760, R1 // e1ffbf52 + MOVD $0xffff00000000000, R1 // MOVD $1152903912420802560, R1 // e13f54b2 + MOVD $0x1111000000001111, R1 // MOVD $1229764173248860433, R1 // 212282d22122e2f2 + MOVD $0x1111ffff1111ffff, R1 // MOVD $1230045644216991743, R1 // c1ddbd922122e2f2 + MOVD $0x1111222233334444, R1 // MOVD $1229801703532086340, R1 // 818888d26166a6f24144c4f22122e2f2 + MOVD $0xaaaaffff, R1 // MOVD $2863333375, R1 // e1ff9fd24155b5f2 + MOVD $0x11110000, R1 // MOVD $286326784, R1 // 2122a2d2 + MOVD $0xaaaa0000aaaa1111, R1 // MOVD $-6149102338357718767, R1 // 212282d24155b5f24155f5f2 + MOVD $0x1111ffff1111aaaa, R1 // MOVD $1230045644216969898, R1 // a1aa8a922122a2f22122e2f2 + MOVD $0, R1 // e1031faa + MOVD $-1, R1 // 01008092 + MOVD $0x210000, R0 // MOVD $2162688, R0 // 2004a0d2 + MOVD $0xffffffffffffaaaa, R1 // MOVD $-21846, R1 // a1aa8a92 + MOVW $1, ZR // 3f008052 + MOVW $1, R1 + MOVD $1, ZR // 3f0080d2 + MOVD $1, R1 + MOVK $1, R1 + MOVD $0x1000100010001000, RSP // MOVD $1152939097061330944, RSP // ff8304b2 + MOVW $0x10001000, RSP // MOVW $268439552, RSP // ff830432 + ADDW $0x10001000, R1 // ADDW $268439552, R1 // fb83043221001b0b + ADDW $0x22220000, RSP, R3 // ADDW $572653568, RSP, R3 // 5b44a452e3433b0b + +// move a large constant to a Vd. + VMOVS $0x80402010, V11 // VMOVS $2151686160, V11 + VMOVD $0x8040201008040201, V20 // VMOVD $-9205322385119247871, V20 + VMOVQ $0x7040201008040201, $0x8040201008040201, V10 // VMOVQ $8088500183983456769, $-9205322385119247871, V10 + VMOVQ $0x8040201008040202, $0x7040201008040201, V20 // VMOVQ $-9205322385119247870, $8088500183983456769, V20 + +// mov(to/from sp) + MOVD $0x1002(RSP), R1 // MOVD $4098(RSP), R1 // e107409121080091 + MOVD $0x1708(RSP), RSP // MOVD $5896(RSP), RSP // ff074091ff231c91 + MOVD $0x2001(R7), R1 // MOVD $8193(R7), R1 // e108409121040091 + MOVD $0xffffff(R7), R1 // MOVD $16777215(R7), R1 // e1fc7f9121fc3f91 + MOVD $-0x1(R7), R1 // MOVD $-1(R7), R1 // e10400d1 + MOVD $-0x30(R7), R1 // MOVD $-48(R7), R1 // e1c000d1 + MOVD $-0x708(R7), R1 // MOVD $-1800(R7), R1 // e1201cd1 + MOVD $-0x2000(RSP), R1 // MOVD $-8192(RSP), R1 // e10b40d1 + MOVD $-0x10000(RSP), RSP // MOVD $-65536(RSP), RSP // ff4340d1 + MOVW R1, R2 + MOVW ZR, R1 + MOVW R1, ZR + MOVD R1, R2 + MOVD ZR, R1 + +// store and load +// +// LD1/ST1 + VLD1 (R8), [V1.B16, V2.B16] // 01a1404c + VLD1.P (R3), [V31.H8, V0.H8] // 7fa4df4c + VLD1.P (R8)(R20), [V21.B16, V22.B16] // 15a1d44c + VLD1.P 64(R1), [V5.B16, V6.B16, V7.B16, V8.B16] // 2520df4c + VLD1.P 1(R0), V4.B[15] // 041cdf4d + VLD1.P 2(R0), V4.H[7] // 0458df4d + VLD1.P 4(R0), V4.S[3] // 0490df4d + VLD1.P 8(R0), V4.D[1] // 0484df4d + VLD1.P (R0)(R1), V4.D[1] // 0484c14d + VLD1 (R0), V4.D[1] // 0484404d + VST1.P [V4.S4, V5.S4], 32(R1) // 24a89f4c + VST1 [V0.S4, V1.S4], (R0) // 00a8004c + VLD1 (R30), [V15.S2, V16.S2] // cfab400c + VLD1.P 24(R30), [V3.S2,V4.S2,V5.S2] // c36bdf0c + VLD2 (R29), [V23.H8, V24.H8] // b787404c + VLD2.P 16(R0), [V18.B8, V19.B8] // 1280df0c + VLD2.P (R1)(R2), [V15.S2, V16.S2] // 2f88c20c + VLD3 (R27), [V11.S4, V12.S4, V13.S4] // 6b4b404c + VLD3.P 48(RSP), [V11.S4, V12.S4, V13.S4] // eb4bdf4c + VLD3.P (R30)(R2), [V14.D2, V15.D2, V16.D2] // ce4fc24c + VLD4 (R15), [V10.H4, V11.H4, V12.H4, V13.H4] // ea05400c + VLD4.P 32(R24), [V31.B8, V0.B8, V1.B8, V2.B8] // 1f03df0c + VLD4.P (R13)(R9), [V14.S2, V15.S2, V16.S2, V17.S2] // ae09c90c + VLD1R (R1), [V9.B8] // 29c0400d + VLD1R.P (R1), [V9.B8] // 29c0df0d + VLD1R.P 1(R1), [V2.B8] // 22c0df0d + VLD1R.P 2(R1), [V2.H4] // 22c4df0d + VLD1R (R0), [V0.B16] // 00c0404d + VLD1R.P (R0), [V0.B16] // 00c0df4d + VLD1R.P (R15)(R1), [V15.H4] // efc5c10d + VLD2R (R15), [V15.H4, V16.H4] // efc5600d + VLD2R.P 16(R0), [V0.D2, V1.D2] // 00ccff4d + VLD2R.P (R0)(R5), [V31.D1, V0.D1] // 1fcce50d + VLD3R (RSP), [V31.S2, V0.S2, V1.S2] // ffeb400d + VLD3R.P 6(R15), [V15.H4, V16.H4, V17.H4] // efe5df0d + VLD3R.P (R15)(R6), [V15.H8, V16.H8, V17.H8] // efe5c64d + VLD4R (R0), [V0.B8, V1.B8, V2.B8, V3.B8] // 00e0600d + VLD4R.P 16(RSP), [V31.S4, V0.S4, V1.S4, V2.S4] // ffebff4d + VLD4R.P (R15)(R9), [V15.H4, V16.H4, V17.H4, V18.H4] // efe5e90d + VST1.P [V24.S2], 8(R2) // 58789f0c + VST1 [V29.S2, V30.S2], (R29) // bdab000c + VST1 [V14.H4, V15.H4, V16.H4], (R27) // 6e67000c + VST1.P V4.B[15], 1(R0) // 041c9f4d + VST1.P V4.H[7], 2(R0) // 04589f4d + VST1.P V4.S[3], 4(R0) // 04909f4d + VST1.P V4.D[1], 8(R0) // 04849f4d + VST1.P V4.D[1], (R0)(R1) // 0484814d + VST1 V4.D[1], (R0) // 0484004d + VST2 [V22.H8, V23.H8], (R23) // f686004c + VST2.P [V14.H4, V15.H4], 16(R17) // 2e869f0c + VST2.P [V14.H4, V15.H4], (R3)(R17) // 6e84910c + VST3 [V1.D2, V2.D2, V3.D2], (R11) // 614d004c + VST3.P [V18.S4, V19.S4, V20.S4], 48(R25) // 324b9f4c + VST3.P [V19.B8, V20.B8, V21.B8], (R3)(R7) // 7340870c + VST4 [V22.D2, V23.D2, V24.D2, V25.D2], (R3) // 760c004c + VST4.P [V14.D2, V15.D2, V16.D2, V17.D2], 64(R15) // ee0d9f4c + VST4.P [V24.B8, V25.B8, V26.B8, V27.B8], (R3)(R23) // 7800970c + +// pre/post-indexed + FMOVS.P F20, 4(R0) // 144400bc + FMOVS.W F20, 4(R0) // 144c00bc + FMOVD.P F20, 8(R1) // 348400fc + FMOVQ.P F13, 11(R10) // 4db5803c + FMOVQ.W F15, 11(R20) // 8fbe803c + + FMOVS.P 8(R0), F20 // 148440bc + FMOVS.W 8(R0), F20 // 148c40bc + FMOVD.W 8(R1), F20 // 348c40fc + FMOVQ.P 11(R10), F13 // 4db5c03c + FMOVQ.W 11(R20), F15 // 8fbec03c + +// storing $0 to memory, $0 will be replaced with ZR. + MOVD $0, (R1) // 3f0000f9 + MOVW $0, (R1) // 3f0000b9 + MOVWU $0, (R1) // 3f0000b9 + MOVH $0, (R1) // 3f000079 + MOVHU $0, (R1) // 3f000079 + MOVB $0, (R1) // 3f000039 + MOVBU $0, (R1) // 3f000039 + +// small offset fits into instructions + MOVB R1, 1(R2) // 41040039 + MOVH R1, 1(R2) // 41100078 + MOVH R1, 2(R2) // 41040079 + MOVW R1, 1(R2) // 411000b8 + MOVW R1, 4(R2) // 410400b9 + MOVD R1, 1(R2) // 411000f8 + MOVD R1, 8(R2) // 410400f9 + MOVD ZR, (R1) + MOVW ZR, (R1) + FMOVS F1, 1(R2) // 411000bc + FMOVS F1, 4(R2) // 410400bd + FMOVS F20, (R0) // 140000bd + FMOVD F1, 1(R2) // 411000fc + FMOVD F1, 8(R2) // 410400fd + FMOVD F20, (R2) // 540000fd + FMOVQ F0, 32(R5)// a008803d + FMOVQ F10, 65520(R10) // 4afdbf3d + FMOVQ F11, 64(RSP) // eb13803d + FMOVQ F11, 8(R20) // 8b82803c + FMOVQ F11, 4(R20) // 8b42803c + + MOVB 1(R1), R2 // 22048039 + MOVH 1(R1), R2 // 22108078 + MOVH 2(R1), R2 // 22048079 + MOVW 1(R1), R2 // 221080b8 + MOVW 4(R1), R2 // 220480b9 + MOVD 1(R1), R2 // 221040f8 + MOVD 8(R1), R2 // 220440f9 + FMOVS (R0), F20 // 140040bd + FMOVS 1(R1), F2 // 221040bc + FMOVS 4(R1), F2 // 220440bd + FMOVD 1(R1), F2 // 221040fc + FMOVD 8(R1), F2 // 220440fd + FMOVQ 32(R5), F2 // a208c03d + FMOVQ 65520(R10), F10 // 4afdff3d + FMOVQ 64(RSP), F11 // eb13c03d + +// medium offsets that either fit a single instruction or can use add+ldr/str + MOVD -4095(R17), R3 // 3bfe3fd1630340f9 + MOVD -391(R17), R3 // 3b1e06d1630340f9 + MOVD -257(R17), R3 // 3b0604d1630340f9 + MOVD -256(R17), R3 // 230250f8 + MOVD 255(R17), R3 // 23f24ff8 + MOVD 256(R17), R3 // 238240f9 + MOVD 257(R17), R3 // 3b060491630340f9 + MOVD 391(R17), R3 // 3b1e0691630340f9 + MOVD 4095(R17), R3 // 3bfe3f91630340f9 + + MOVD R0, -4095(R17) // 3bfe3fd1600300f9 + MOVD R0, -391(R17) // 3b1e06d1600300f9 + MOVD R0, -257(R17) // 3b0604d1600300f9 + MOVD R0, -256(R17) // 200210f8 + MOVD R0, 255(R17) // 20f20ff8 + MOVD R0, 256(R17) // 208200f9 + MOVD R0, 257(R17) // 3b060491600300f9 + MOVD R0, 391(R17) // 3b1e0691600300f9 + MOVD R0, 4095(R17) // 3bfe3f91600300f9 + MOVD R0, 4096(R17) // 200208f9 + MOVD R3, -4095(R17) // 3bfe3fd1630300f9 + MOVD R3, -391(R17) // 3b1e06d1630300f9 + MOVD R3, -257(R17) // 3b0604d1630300f9 + MOVD R3, -256(R17) // 230210f8 + MOVD R3, 255(R17) // 23f20ff8 + MOVD R3, 256(R17) // 238200f9 + MOVD R3, 257(R17) // 3b060491630300f9 + MOVD R3, 391(R17) // 3b1e0691630300f9 + MOVD R3, 4095(R17) // 3bfe3f91630300f9 + +// large aligned offset, use two instructions(add+ldr/str). + MOVB R1, 0x1001(R2) // MOVB R1, 4097(R2) // 5b04409161070039 + MOVB R1, 0xffffff(R2) // MOVB R1, 16777215(R2) // 5bfc7f9161ff3f39 + MOVH R1, 0x2002(R2) // MOVH R1, 8194(R2) // 5b08409161070079 + MOVH R1, 0x1000ffe(R2) // MOVH R1, 16781310(R2) // 5bfc7f9161ff3f79 + MOVW R1, 0x4004(R2) // MOVW R1, 16388(R2) // 5b104091610700b9 + MOVW R1, 0x1002ffc(R2) // MOVW R1, 16789500(R2) // 5bfc7f9161ff3fb9 + MOVD R1, 0x8008(R2) // MOVD R1, 32776(R2) // 5b204091610700f9 + MOVD R1, 0x1006ff8(R2) // MOVD R1, 16805880(R2) // 5bfc7f9161ff3ff9 + FMOVS F1, 0x4004(R2) // FMOVS F1, 16388(R2) // 5b104091610700bd + FMOVS F1, 0x1002ffc(R2) // FMOVS F1, 16789500(R2) // 5bfc7f9161ff3fbd + FMOVD F1, 0x8008(R2) // FMOVD F1, 32776(R2) // 5b204091610700fd + FMOVD F1, 0x1006ff8(R2) // FMOVD F1, 16805880(R2) // 5bfc7f9161ff3ffd + + MOVB 0x1001(R1), R2 // MOVB 4097(R1), R2 // 3b04409162078039 + MOVB 0xffffff(R1), R2 // MOVB 16777215(R1), R2 // 3bfc7f9162ffbf39 + MOVH 0x2002(R1), R2 // MOVH 8194(R1), R2 // 3b08409162078079 + MOVH 0x1000ffe(R1), R2 // MOVH 16781310(R1), R2 // 3bfc7f9162ffbf79 + MOVW 0x4004(R1), R2 // MOVW 16388(R1), R2 // 3b104091620780b9 + MOVW 0x1002ffc(R1), R2 // MOVW 16789500(R1), R2 // 3bfc7f9162ffbfb9 + MOVD 0x8008(R1), R2 // MOVD 32776(R1), R2 // 3b204091620740f9 + MOVD 0x1006ff8(R1), R2 // MOVD 16805880(R1), R2 // 3bfc7f9162ff7ff9 + FMOVS 0x4004(R1), F2 // FMOVS 16388(R1), F2 // 3b104091620740bd + FMOVS 0x1002ffc(R1), F2 // FMOVS 16789500(R1), F2 // 3bfc7f9162ff7fbd + FMOVD 0x8008(R1), F2 // FMOVD 32776(R1), F2 // 3b204091620740fd + FMOVD 0x1006ff8(R1), F2 // FMOVD 16805880(R1), F2 // 3bfc7f9162ff7ffd + +// very large or unaligned offset uses constant pool. +// the encoding cannot be checked as the address of the constant pool is unknown. +// here we only test that they can be assembled. + MOVB R1, 0x1000000(R2) // MOVB R1, 16777216(R2) + MOVB R1, 0x44332211(R2) // MOVB R1, 1144201745(R2) + MOVH R1, 0x1001000(R2) // MOVH R1, 16781312(R2) + MOVH R1, 0x44332211(R2) // MOVH R1, 1144201745(R2) + MOVW R1, 0x1003000(R2) // MOVW R1, 16789504(R2) + MOVW R1, 0x44332211(R2) // MOVW R1, 1144201745(R2) + MOVD R1, 0x1007000(R2) // MOVD R1, 16805888(R2) + MOVD R1, 0x44332211(R2) // MOVD R1, 1144201745(R2) + FMOVS F1, 0x1003000(R2) // FMOVS F1, 16789504(R2) + FMOVS F1, 0x44332211(R2) // FMOVS F1, 1144201745(R2) + FMOVD F1, 0x1007000(R2) // FMOVD F1, 16805888(R2) + FMOVD F1, 0x44332211(R2) // FMOVD F1, 1144201745(R2) + + MOVB 0x1000000(R1), R2 // MOVB 16777216(R1), R2 + MOVB 0x44332211(R1), R2 // MOVB 1144201745(R1), R2 + MOVH 0x1000000(R1), R2 // MOVH 16777216(R1), R2 + MOVH 0x44332211(R1), R2 // MOVH 1144201745(R1), R2 + MOVW 0x1000000(R1), R2 // MOVW 16777216(R1), R2 + MOVW 0x44332211(R1), R2 // MOVW 1144201745(R1), R2 + MOVD 0x1000000(R1), R2 // MOVD 16777216(R1), R2 + MOVD 0x44332211(R1), R2 // MOVD 1144201745(R1), R2 + FMOVS 0x1000000(R1), F2 // FMOVS 16777216(R1), F2 + FMOVS 0x44332211(R1), F2 // FMOVS 1144201745(R1), F2 + FMOVD 0x1000000(R1), F2 // FMOVD 16777216(R1), F2 + FMOVD 0x44332211(R1), F2 // FMOVD 1144201745(R1), F2 + +// shifted or extended register offset. + MOVD (R2)(R6.SXTW), R4 // 44c866f8 + MOVD (R3)(R6), R5 // 656866f8 + MOVD (R3)(R6*1), R5 // 656866f8 + MOVD (R2)(R6), R4 // 446866f8 + MOVWU (R19)(R20<<2), R20 // 747a74b8 + MOVD (R2)(R3<<0), R1 // 416863f8 + MOVD (R2)(R6<<3), R4 // 447866f8 + MOVD (R3)(R7.SXTX<<3), R8 // 68f867f8 + MOVWU (R5)(R4.UXTW), R10 // aa4864b8 + MOVBU (R3)(R9.UXTW), R8 // 68486938 + MOVBU (R5)(R8), R10 // aa686838 + MOVHU (R2)(R7.SXTW<<1), R11 // 4bd86778 + MOVHU (R1)(R2<<1), R5 // 25786278 + MOVB (R9)(R3.UXTW), R6 // 2649a338 + MOVB (R10)(R6), R15 // 4f69a638 + MOVB (R29)(R30<<0), R14 // ae6bbe38 + MOVB (R29)(R30), R14 // ae6bbe38 + MOVH (R5)(R7.SXTX<<1), R19 // b3f8a778 + MOVH (R8)(R4<<1), R10 // 0a79a478 + MOVW (R9)(R8.SXTW<<2), R19 // 33d9a8b8 + MOVW (R1)(R4.SXTX), R11 // 2be8a4b8 + MOVW (R1)(R4.SXTX), ZR // 3fe8a4b8 + MOVW (R2)(R5), R12 // 4c68a5b8 + FMOVS (R2)(R6), F4 // 446866bc + FMOVS (R2)(R6<<2), F4 // 447866bc + FMOVD (R2)(R6), F4 // 446866fc + FMOVD (R2)(R6<<3), F4 // 447866fc + + MOVD R5, (R2)(R6<<3) // 457826f8 + MOVD R9, (R6)(R7.SXTX<<3) // c9f827f8 + MOVD ZR, (R6)(R7.SXTX<<3) // dff827f8 + MOVW R8, (R2)(R3.UXTW<<2) // 485823b8 + MOVW R7, (R3)(R4.SXTW) // 67c824b8 + MOVB R4, (R2)(R6.SXTX) // 44e82638 + MOVB R8, (R3)(R9.UXTW) // 68482938 + MOVB R10, (R5)(R8) // aa682838 + MOVB R10, (R5)(R8*1) // aa682838 + MOVH R11, (R2)(R7.SXTW<<1) // 4bd82778 + MOVH R5, (R1)(R2<<1) // 25782278 + MOVH R7, (R2)(R5.SXTX<<1) // 47f82578 + MOVH R8, (R3)(R6.UXTW) // 68482678 + MOVB R4, (R2)(R6.SXTX) // 44e82638 + FMOVS F4, (R2)(R6) // 446826bc + FMOVS F4, (R2)(R6<<2) // 447826bc + FMOVD F4, (R2)(R6) // 446826fc + FMOVD F4, (R2)(R6<<3) // 447826fc + +// vmov + VMOV V8.S[1], R1 // 013d0c0e + VMOV V0.D[0], R11 // 0b3c084e + VMOV V0.D[1], R11 // 0b3c184e + VMOV R20, V1.S[0] // 811e044e + VMOV R20, V1.S[1] // 811e0c4e + VMOV R1, V9.H4 // 290c020e + VDUP R1, V9.H4 // 290c020e + VMOV R22, V11.D2 // cb0e084e + VDUP R22, V11.D2 // cb0e084e + VMOV V2.B16, V4.B16 // 441ca24e + VMOV V20.S[0], V20 // 9406045e + VDUP V20.S[0], V20 // 9406045e + VMOV V12.D[0], V12.D[1] // 8c05186e + VMOV V10.S[0], V12.S[1] // 4c050c6e + VMOV V9.H[0], V12.H[1] // 2c05066e + VMOV V8.B[0], V12.B[1] // 0c05036e + VMOV V8.B[7], V4.B[8] // 043d116e + +// CBZ +again: + CBZ R1, again // CBZ R1 + +// conditional operations + CSET GT, R1 // e1d79f9a + CSETW HI, R2 // e2979f1a + CSEL LT, R1, R2, ZR // 3fb0829a + CSELW LT, R2, R3, R4 // 44b0831a + CSINC GT, R1, ZR, R3 // 23c49f9a + CSNEG MI, R1, R2, R3 // 234482da + CSINV CS, R1, R2, R3 // CSINV HS, R1, R2, R3 // 232082da + CSINV HS, R1, R2, R3 // 232082da + CSINVW MI, R2, ZR, R2 // 42409f5a + CINC EQ, R4, R9 // 8914849a + CINCW PL, R2, ZR // 5f44821a + CINV PL, R11, R22 // 76418bda + CINVW LS, R7, R13 // ed80875a + CNEG LS, R13, R7 // a7858dda + CNEGW EQ, R8, R13 // 0d15885a + +// atomic ops + LDARB (R25), R2 // 22ffdf08 + LDARH (R5), R7 // a7fcdf48 + LDAXPW (R10), (R20, R16) // 54c17f88 + LDAXP (R25), (R30, R11) // 3eaf7fc8 + LDAXRW (R0), R2 // 02fc5f88 + LDXPW (R24), (R23, R11) // 172f7f88 + LDXP (R0), (R16, R13) // 10347fc8 + STLRB R11, (R22) // cbfe9f08 + STLRH R16, (R23) // f0fe9f48 + STLXP (R6, R3), (R10), R2 // 468d22c8 + STLXPW (R6, R11), (R22), R21 // c6ae3588 + STLXRW R1, (R0), R3 // 01fc0388 + STXP (R1, R2), (R3), R10 // 61082ac8 + STXP (R1, R2), (RSP), R10 // e10b2ac8 + STXPW (R1, R2), (R3), R10 // 61082a88 + STXPW (R1, R2), (RSP), R10 // e10b2a88 + SWPAD R5, (R6), R7 // c780a5f8 + SWPAD R5, (RSP), R7 // e783a5f8 + SWPAW R5, (R6), R7 // c780a5b8 + SWPAW R5, (RSP), R7 // e783a5b8 + SWPAH R5, (R6), R7 // c780a578 + SWPAH R5, (RSP), R7 // e783a578 + SWPAB R5, (R6), R7 // c780a538 + SWPAB R5, (RSP), R7 // e783a538 + SWPALD R5, (R6), R7 // c780e5f8 + SWPALD R5, (RSP), R7 // e783e5f8 + SWPALW R5, (R6), R7 // c780e5b8 + SWPALW R5, (RSP), R7 // e783e5b8 + SWPALH R5, (R6), R7 // c780e578 + SWPALH R5, (RSP), R7 // e783e578 + SWPALB R5, (R6), R7 // c780e538 + SWPALB R5, (RSP), R7 // e783e538 + SWPD R5, (R6), R7 // c78025f8 + SWPD R5, (RSP), R7 // e78325f8 + SWPW R5, (R6), R7 // c78025b8 + SWPW R5, (RSP), R7 // e78325b8 + SWPH R5, (R6), R7 // c7802578 + SWPH R5, (RSP), R7 // e7832578 + SWPB R5, (R6), R7 // c7802538 + SWPB R5, (RSP), R7 // e7832538 + SWPLD R5, (R6), R7 // c78065f8 + SWPLD R5, (RSP), R7 // e78365f8 + SWPLW R5, (R6), R7 // c78065b8 + SWPLW R5, (RSP), R7 // e78365b8 + SWPLH R5, (R6), R7 // c7806578 + SWPLH R5, (RSP), R7 // e7836578 + SWPLB R5, (R6), R7 // c7806538 + SWPLB R5, (RSP), R7 // e7836538 + LDADDAD R5, (R6), R7 // c700a5f8 + LDADDAD R5, (RSP), R7 // e703a5f8 + LDADDAW R5, (R6), R7 // c700a5b8 + LDADDAW R5, (RSP), R7 // e703a5b8 + LDADDAH R5, (R6), R7 // c700a578 + LDADDAH R5, (RSP), R7 // e703a578 + LDADDAB R5, (R6), R7 // c700a538 + LDADDAB R5, (RSP), R7 // e703a538 + LDADDALD R5, (R6), R7 // c700e5f8 + LDADDALD R5, (RSP), R7 // e703e5f8 + LDADDALW R5, (R6), R7 // c700e5b8 + LDADDALW R5, (RSP), R7 // e703e5b8 + LDADDALH R5, (R6), R7 // c700e578 + LDADDALH R5, (RSP), R7 // e703e578 + LDADDALB R5, (R6), R7 // c700e538 + LDADDALB R5, (RSP), R7 // e703e538 + LDADDD R5, (R6), R7 // c70025f8 + LDADDD R5, (RSP), R7 // e70325f8 + LDADDW R5, (R6), R7 // c70025b8 + LDADDW R5, (RSP), R7 // e70325b8 + LDADDH R5, (R6), R7 // c7002578 + LDADDH R5, (RSP), R7 // e7032578 + LDADDB R5, (R6), R7 // c7002538 + LDADDB R5, (RSP), R7 // e7032538 + LDADDLD R5, (R6), R7 // c70065f8 + LDADDLD R5, (RSP), R7 // e70365f8 + LDADDLW R5, (R6), R7 // c70065b8 + LDADDLW R5, (RSP), R7 // e70365b8 + LDADDLH R5, (R6), R7 // c7006578 + LDADDLH R5, (RSP), R7 // e7036578 + LDADDLB R5, (R6), R7 // c7006538 + LDADDLB R5, (RSP), R7 // e7036538 + LDCLRAD R5, (R6), R7 // c710a5f8 + LDCLRAD R5, (RSP), R7 // e713a5f8 + LDCLRAW R5, (R6), R7 // c710a5b8 + LDCLRAW R5, (RSP), R7 // e713a5b8 + LDCLRAH R5, (R6), R7 // c710a578 + LDCLRAH R5, (RSP), R7 // e713a578 + LDCLRAB R5, (R6), R7 // c710a538 + LDCLRAB R5, (RSP), R7 // e713a538 + LDCLRALD R5, (R6), R7 // c710e5f8 + LDCLRALD R5, (RSP), R7 // e713e5f8 + LDCLRALW R5, (R6), R7 // c710e5b8 + LDCLRALW R5, (RSP), R7 // e713e5b8 + LDCLRALH R5, (R6), R7 // c710e578 + LDCLRALH R5, (RSP), R7 // e713e578 + LDCLRALB R5, (R6), R7 // c710e538 + LDCLRALB R5, (RSP), R7 // e713e538 + LDCLRD R5, (R6), R7 // c71025f8 + LDCLRD R5, (RSP), R7 // e71325f8 + LDCLRW R5, (R6), R7 // c71025b8 + LDCLRW R5, (RSP), R7 // e71325b8 + LDCLRH R5, (R6), R7 // c7102578 + LDCLRH R5, (RSP), R7 // e7132578 + LDCLRB R5, (R6), R7 // c7102538 + LDCLRB R5, (RSP), R7 // e7132538 + LDCLRLD R5, (R6), R7 // c71065f8 + LDCLRLD R5, (RSP), R7 // e71365f8 + LDCLRLW R5, (R6), R7 // c71065b8 + LDCLRLW R5, (RSP), R7 // e71365b8 + LDCLRLH R5, (R6), R7 // c7106578 + LDCLRLH R5, (RSP), R7 // e7136578 + LDCLRLB R5, (R6), R7 // c7106538 + LDCLRLB R5, (RSP), R7 // e7136538 + LDEORAD R5, (R6), R7 // c720a5f8 + LDEORAD R5, (RSP), R7 // e723a5f8 + LDEORAW R5, (R6), R7 // c720a5b8 + LDEORAW R5, (RSP), R7 // e723a5b8 + LDEORAH R5, (R6), R7 // c720a578 + LDEORAH R5, (RSP), R7 // e723a578 + LDEORAB R5, (R6), R7 // c720a538 + LDEORAB R5, (RSP), R7 // e723a538 + LDEORALD R5, (R6), R7 // c720e5f8 + LDEORALD R5, (RSP), R7 // e723e5f8 + LDEORALW R5, (R6), R7 // c720e5b8 + LDEORALW R5, (RSP), R7 // e723e5b8 + LDEORALH R5, (R6), R7 // c720e578 + LDEORALH R5, (RSP), R7 // e723e578 + LDEORALB R5, (R6), R7 // c720e538 + LDEORALB R5, (RSP), R7 // e723e538 + LDEORD R5, (R6), R7 // c72025f8 + LDEORD R5, (RSP), R7 // e72325f8 + LDEORW R5, (R6), R7 // c72025b8 + LDEORW R5, (RSP), R7 // e72325b8 + LDEORH R5, (R6), R7 // c7202578 + LDEORH R5, (RSP), R7 // e7232578 + LDEORB R5, (R6), R7 // c7202538 + LDEORB R5, (RSP), R7 // e7232538 + LDEORLD R5, (R6), R7 // c72065f8 + LDEORLD R5, (RSP), R7 // e72365f8 + LDEORLW R5, (R6), R7 // c72065b8 + LDEORLW R5, (RSP), R7 // e72365b8 + LDEORLH R5, (R6), R7 // c7206578 + LDEORLH R5, (RSP), R7 // e7236578 + LDEORLB R5, (R6), R7 // c7206538 + LDEORLB R5, (RSP), R7 // e7236538 + LDADDD R5, (R6), ZR // df0025f8 + LDADDW R5, (R6), ZR // df0025b8 + LDADDH R5, (R6), ZR // df002578 + LDADDB R5, (R6), ZR // df002538 + LDADDLD R5, (R6), ZR // df0065f8 + LDADDLW R5, (R6), ZR // df0065b8 + LDADDLH R5, (R6), ZR // df006578 + LDADDLB R5, (R6), ZR // df006538 + LDCLRD R5, (R6), ZR // df1025f8 + LDCLRW R5, (R6), ZR // df1025b8 + LDCLRH R5, (R6), ZR // df102578 + LDCLRB R5, (R6), ZR // df102538 + LDCLRLD R5, (R6), ZR // df1065f8 + LDCLRLW R5, (R6), ZR // df1065b8 + LDCLRLH R5, (R6), ZR // df106578 + LDCLRLB R5, (R6), ZR // df106538 + LDEORD R5, (R6), ZR // df2025f8 + LDEORW R5, (R6), ZR // df2025b8 + LDEORH R5, (R6), ZR // df202578 + LDEORB R5, (R6), ZR // df202538 + LDEORLD R5, (R6), ZR // df2065f8 + LDEORLW R5, (R6), ZR // df2065b8 + LDEORLH R5, (R6), ZR // df206578 + LDEORLB R5, (R6), ZR // df206538 + LDORD R5, (R6), ZR // df3025f8 + LDORW R5, (R6), ZR // df3025b8 + LDORH R5, (R6), ZR // df302578 + LDORB R5, (R6), ZR // df302538 + LDORLD R5, (R6), ZR // df3065f8 + LDORLW R5, (R6), ZR // df3065b8 + LDORLH R5, (R6), ZR // df306578 + LDORLB R5, (R6), ZR // df306538 + LDORAD R5, (R6), R7 // c730a5f8 + LDORAD R5, (RSP), R7 // e733a5f8 + LDORAW R5, (R6), R7 // c730a5b8 + LDORAW R5, (RSP), R7 // e733a5b8 + LDORAH R5, (R6), R7 // c730a578 + LDORAH R5, (RSP), R7 // e733a578 + LDORAB R5, (R6), R7 // c730a538 + LDORAB R5, (RSP), R7 // e733a538 + LDORALD R5, (R6), R7 // c730e5f8 + LDORALD R5, (RSP), R7 // e733e5f8 + LDORALW R5, (R6), R7 // c730e5b8 + LDORALW R5, (RSP), R7 // e733e5b8 + LDORALH R5, (R6), R7 // c730e578 + LDORALH R5, (RSP), R7 // e733e578 + LDORALB R5, (R6), R7 // c730e538 + LDORALB R5, (RSP), R7 // e733e538 + LDORD R5, (R6), R7 // c73025f8 + LDORD R5, (RSP), R7 // e73325f8 + LDORW R5, (R6), R7 // c73025b8 + LDORW R5, (RSP), R7 // e73325b8 + LDORH R5, (R6), R7 // c7302578 + LDORH R5, (RSP), R7 // e7332578 + LDORB R5, (R6), R7 // c7302538 + LDORB R5, (RSP), R7 // e7332538 + LDORLD R5, (R6), R7 // c73065f8 + LDORLD R5, (RSP), R7 // e73365f8 + LDORLW R5, (R6), R7 // c73065b8 + LDORLW R5, (RSP), R7 // e73365b8 + LDORLH R5, (R6), R7 // c7306578 + LDORLH R5, (RSP), R7 // e7336578 + LDORLB R5, (R6), R7 // c7306538 + LDORLB R5, (RSP), R7 // e7336538 + CASD R1, (R2), ZR // 5f7ca1c8 + CASW R1, (RSP), ZR // ff7fa188 + CASB ZR, (R5), R3 // a37cbf08 + CASH R3, (RSP), ZR // ff7fa348 + CASW R5, (R7), R6 // e67ca588 + CASLD ZR, (RSP), R8 // e8ffbfc8 + CASLW R9, (R10), ZR // 5ffda988 + CASAD R7, (R11), R15 // 6f7de7c8 + CASAW R10, (RSP), R19 // f37fea88 + CASALD R5, (R6), R7 // c7fce5c8 + CASALD R5, (RSP), R7 // e7ffe5c8 + CASALW R5, (R6), R7 // c7fce588 + CASALW R5, (RSP), R7 // e7ffe588 + CASALH ZR, (R5), R8 // a8fcff48 + CASALB R8, (R9), ZR // 3ffde808 + CASPD (R30, ZR), (RSP), (R8, R9) // e87f3e48 + CASPW (R6, R7), (R8), (R4, R5) // 047d2608 + CASPD (R2, R3), (R2), (R8, R9) // 487c2248 + +// RET + RET + RET foo(SB) + +// B/BL/B.cond cases, and canonical names JMP, CALL. + BL 1(PC) // CALL 1(PC) + BL (R2) // CALL (R2) + BL foo(SB) // CALL foo(SB) + BL bar<>(SB) // CALL bar<>(SB) + B foo(SB) // JMP foo(SB) + BEQ 1(PC) + BEQ 2(PC) + TBZ $1, R1, 2(PC) + TBNZ $2, R2, 2(PC) + JMP foo(SB) + CALL foo(SB) + +// ADR + ADR next, R11 // ADR R11 // 2b000010 +next: + NOP + ADR -2(PC), R10 // 0a000010 + ADR 2(PC), R16 // 10000010 + ADR -26(PC), R1 // 01000010 + ADR 12(PC), R2 // 02000010 + ADRP -2(PC), R10 // 0a000090 + ADRP 2(PC), R16 // 10000090 + ADRP -26(PC), R1 // 01000090 + ADRP 12(PC), R2 // 02000090 + +// LDP/STP + LDP (R0), (R0, R1) // 000440a9 + LDP (R0), (R1, R2) // 010840a9 + LDP 8(R0), (R1, R2) // 018840a9 + LDP -8(R0), (R1, R2) // 01887fa9 + LDP 11(R0), (R1, R2) // 1b2c0091610b40a9 + LDP 1024(R0), (R1, R2) // 1b001091610b40a9 + LDP.W 8(R0), (R1, R2) // 0188c0a9 + LDP.P 8(R0), (R1, R2) // 0188c0a8 + LDP (RSP), (R1, R2) // e10b40a9 + LDP 8(RSP), (R1, R2) // e18b40a9 + LDP -8(RSP), (R1, R2) // e18b7fa9 + LDP 11(RSP), (R1, R2) // fb2f0091610b40a9 + LDP 1024(RSP), (R1, R2) // fb031091610b40a9 + LDP.W 8(RSP), (R1, R2) // e18bc0a9 + LDP.P 8(RSP), (R1, R2) // e18bc0a8 + LDP -31(R0), (R1, R2) // 1b7c00d1610b40a9 + LDP -4(R0), (R1, R2) // 1b1000d1610b40a9 + LDP -8(R0), (R1, R2) // 01887fa9 + LDP x(SB), (R1, R2) + LDP x+8(SB), (R1, R2) + LDP 8(R1), (ZR, R2) // 3f8840a9 + LDPW -5(R0), (R1, R2) // 1b1400d1610b4029 + LDPW (R0), (R1, R2) // 01084029 + LDPW 4(R0), (R1, R2) // 01884029 + LDPW -4(R0), (R1, R2) // 01887f29 + LDPW.W 4(R0), (R1, R2) // 0188c029 + LDPW.P 4(R0), (R1, R2) // 0188c028 + LDPW 11(R0), (R1, R2) // 1b2c0091610b4029 + LDPW 1024(R0), (R1, R2) // 1b001091610b4029 + LDPW (RSP), (R1, R2) // e10b4029 + LDPW 4(RSP), (R1, R2) // e18b4029 + LDPW -4(RSP), (R1, R2) // e18b7f29 + LDPW.W 4(RSP), (R1, R2) // e18bc029 + LDPW.P 4(RSP), (R1, R2) // e18bc028 + LDPW 11(RSP), (R1, R2) // fb2f0091610b4029 + LDPW 1024(RSP), (R1, R2) // fb031091610b4029 + LDPW x(SB), (R1, R2) + LDPW x+8(SB), (R1, R2) + LDPW 8(R1), (ZR, R2) // 3f084129 + LDPSW (R0), (R1, R2) // 01084069 + LDPSW 4(R0), (R1, R2) // 01884069 + LDPSW -4(R0), (R1, R2) // 01887f69 + LDPSW.W 4(R0), (R1, R2) // 0188c069 + LDPSW.P 4(R0), (R1, R2) // 0188c068 + LDPSW 11(R0), (R1, R2) // 1b2c0091610b4069 + LDPSW 1024(R0), (R1, R2) // 1b001091610b4069 + LDPSW (RSP), (R1, R2) // e10b4069 + LDPSW 4(RSP), (R1, R2) // e18b4069 + LDPSW -4(RSP), (R1, R2) // e18b7f69 + LDPSW.W 4(RSP), (R1, R2) // e18bc069 + LDPSW.P 4(RSP), (R1, R2) // e18bc068 + LDPSW 11(RSP), (R1, R2) // fb2f0091610b4069 + LDPSW 1024(RSP), (R1, R2) // fb031091610b4069 + LDPSW x(SB), (R1, R2) + LDPSW x+8(SB), (R1, R2) + LDPSW 8(R1), (ZR, R2) // 3f084169 + STP (R3, R4), (R5) // a31000a9 + STP (R3, R4), 8(R5) // a39000a9 + STP.W (R3, R4), 8(R5) // a39080a9 + STP.P (R3, R4), 8(R5) // a39080a8 + STP (R3, R4), -8(R5) // a3903fa9 + STP (R3, R4), -4(R5) // bb1000d1631300a9 + STP (R3, R4), 11(R0) // 1b2c0091631300a9 + STP (R3, R4), 1024(R0) // 1b001091631300a9 + STP (R3, R4), (RSP) // e31300a9 + STP (R3, R4), 8(RSP) // e39300a9 + STP.W (R3, R4), 8(RSP) // e39380a9 + STP.P (R3, R4), 8(RSP) // e39380a8 + STP (R3, R4), -8(RSP) // e3933fa9 + STP (R3, R4), 11(RSP) // fb2f0091631300a9 + STP (R3, R4), 1024(RSP) // fb031091631300a9 + STP (R3, R4), x(SB) + STP (R3, R4), x+8(SB) + STPW (R3, R4), (R5) // a3100029 + STPW (R3, R4), 4(R5) // a3900029 + STPW.W (R3, R4), 4(R5) // a3908029 + STPW.P (R3, R4), 4(R5) // a3908028 + STPW (R3, R4), -4(R5) // a3903f29 + STPW (R3, R4), -5(R5) // bb1400d163130029 + STPW (R3, R4), 11(R0) // 1b2c009163130029 + STPW (R3, R4), 1024(R0) // 1b00109163130029 + STPW (R3, R4), (RSP) // e3130029 + STPW (R3, R4), 4(RSP) // e3930029 + STPW.W (R3, R4), 4(RSP) // e3938029 + STPW.P (R3, R4), 4(RSP) // e3938028 + STPW (R3, R4), -4(RSP) // e3933f29 + STPW (R3, R4), 11(RSP) // fb2f009163130029 + STPW (R3, R4), 1024(RSP) // fb03109163130029 + STPW (R3, R4), x(SB) + STPW (R3, R4), x+8(SB) + +// bit field operation + BFI $0, R1, $1, R2 // 220040b3 + BFIW $0, R1, $1, R2 // 22000033 + SBFIZ $0, R1, $1, R2 // 22004093 + SBFIZW $0, R1, $1, R2 // 22000013 + UBFIZ $0, R1, $1, R2 // 220040d3 + UBFIZW $0, R1, $1, R2 // 22000053 + +// FSTPD/FSTPS/FLDPD/FLDPS + FLDPD (R0), (F1, F2) // 0108406d + FLDPD 8(R0), (F1, F2) // 0188406d + FLDPD -8(R0), (F1, F2) // 01887f6d + FLDPD 11(R0), (F1, F2) // 1b2c0091610b406d + FLDPD 1024(R0), (F1, F2) // 1b001091610b406d + FLDPD.W 8(R0), (F1, F2) // 0188c06d + FLDPD.P 8(R0), (F1, F2) // 0188c06c + FLDPD (RSP), (F1, F2) // e10b406d + FLDPD 8(RSP), (F1, F2) // e18b406d + FLDPD -8(RSP), (F1, F2) // e18b7f6d + FLDPD 11(RSP), (F1, F2) // fb2f0091610b406d + FLDPD 1024(RSP), (F1, F2) // fb031091610b406d + FLDPD.W 8(RSP), (F1, F2) // e18bc06d + FLDPD.P 8(RSP), (F1, F2) // e18bc06c + FLDPD -31(R0), (F1, F2) // 1b7c00d1610b406d + FLDPD -4(R0), (F1, F2) // 1b1000d1610b406d + FLDPD -8(R0), (F1, F2) // 01887f6d + FLDPD x(SB), (F1, F2) + FLDPD x+8(SB), (F1, F2) + FLDPS -5(R0), (F1, F2) // 1b1400d1610b402d + FLDPS (R0), (F1, F2) // 0108402d + FLDPS 4(R0), (F1, F2) // 0188402d + FLDPS -4(R0), (F1, F2) // 01887f2d + FLDPS.W 4(R0), (F1, F2) // 0188c02d + FLDPS.P 4(R0), (F1, F2) // 0188c02c + FLDPS 11(R0), (F1, F2) // 1b2c0091610b402d + FLDPS 1024(R0), (F1, F2) // 1b001091610b402d + FLDPS (RSP), (F1, F2) // e10b402d + FLDPS 4(RSP), (F1, F2) // e18b402d + FLDPS -4(RSP), (F1, F2) // e18b7f2d + FLDPS.W 4(RSP), (F1, F2) // e18bc02d + FLDPS.P 4(RSP), (F1, F2) // e18bc02c + FLDPS 11(RSP), (F1, F2) // fb2f0091610b402d + FLDPS 1024(RSP), (F1, F2) // fb031091610b402d + FLDPS x(SB), (F1, F2) + FLDPS x+8(SB), (F1, F2) + FSTPD (F3, F4), (R5) // a310006d + FSTPD (F3, F4), 8(R5) // a390006d + FSTPD.W (F3, F4), 8(R5) // a390806d + FSTPD.P (F3, F4), 8(R5) // a390806c + FSTPD (F3, F4), -8(R5) // a3903f6d + FSTPD (F3, F4), -4(R5) // bb1000d16313006d + FSTPD (F3, F4), 11(R0) // 1b2c00916313006d + FSTPD (F3, F4), 1024(R0) // 1b0010916313006d + FSTPD (F3, F4), (RSP) // e313006d + FSTPD (F3, F4), 8(RSP) // e393006d + FSTPD.W (F3, F4), 8(RSP) // e393806d + FSTPD.P (F3, F4), 8(RSP) // e393806c + FSTPD (F3, F4), -8(RSP) // e3933f6d + FSTPD (F3, F4), 11(RSP) // fb2f00916313006d + FSTPD (F3, F4), 1024(RSP) // fb0310916313006d + FSTPD (F3, F4), x(SB) + FSTPD (F3, F4), x+8(SB) + FSTPS (F3, F4), (R5) // a310002d + FSTPS (F3, F4), 4(R5) // a390002d + FSTPS.W (F3, F4), 4(R5) // a390802d + FSTPS.P (F3, F4), 4(R5) // a390802c + FSTPS (F3, F4), -4(R5) // a3903f2d + FSTPS (F3, F4), -5(R5) // bb1400d16313002d + FSTPS (F3, F4), 11(R0) // 1b2c00916313002d + FSTPS (F3, F4), 1024(R0) // 1b0010916313002d + FSTPS (F3, F4), (RSP) // e313002d + FSTPS (F3, F4), 4(RSP) // e393002d + FSTPS.W (F3, F4), 4(RSP) // e393802d + FSTPS.P (F3, F4), 4(RSP) // e393802c + FSTPS (F3, F4), -4(RSP) // e3933f2d + FSTPS (F3, F4), 11(RSP) // fb2f00916313002d + FSTPS (F3, F4), 1024(RSP) // fb0310916313002d + FSTPS (F3, F4), x(SB) + FSTPS (F3, F4), x+8(SB) + +// FLDPQ/FSTPQ + FLDPQ -4000(R0), (F1, F2) // 1b803ed1610b40ad + FLDPQ -1024(R0), (F1, F2) // 010860ad + FLDPQ (R0), (F1, F2) // 010840ad + FLDPQ 16(R0), (F1, F2) // 018840ad + FLDPQ -16(R0), (F1, F2) // 01887fad + FLDPQ.W 32(R0), (F1, F2) // 0108c1ad + FLDPQ.P 32(R0), (F1, F2) // 0108c1ac + FLDPQ 11(R0), (F1, F2) // 1b2c0091610b40ad + FLDPQ 1024(R0), (F1, F2) // 1b001091610b40ad + FLDPQ 4104(R0), (F1, F2) + FLDPQ -4000(RSP), (F1, F2) // fb833ed1610b40ad + FLDPQ -1024(RSP), (F1, F2) // e10b60ad + FLDPQ (RSP), (F1, F2) // e10b40ad + FLDPQ 16(RSP), (F1, F2) // e18b40ad + FLDPQ -16(RSP), (F1, F2) // e18b7fad + FLDPQ.W 32(RSP), (F1, F2) // e10bc1ad + FLDPQ.P 32(RSP), (F1, F2) // e10bc1ac + FLDPQ 11(RSP), (F1, F2) // fb2f0091610b40ad + FLDPQ 1024(RSP), (F1, F2) // fb031091610b40ad + FLDPQ 4104(RSP), (F1, F2) + FLDPQ -31(R0), (F1, F2) // 1b7c00d1610b40ad + FLDPQ -4(R0), (F1, F2) // 1b1000d1610b40ad + FLDPQ x(SB), (F1, F2) + FLDPQ x+8(SB), (F1, F2) + FSTPQ (F3, F4), -4000(R5) // bb803ed1631300ad + FSTPQ (F3, F4), -1024(R5) // a31020ad + FSTPQ (F3, F4), (R5) // a31000ad + FSTPQ (F3, F4), 16(R5) // a39000ad + FSTPQ (F3, F4), -16(R5) // a3903fad + FSTPQ.W (F3, F4), 32(R5) // a31081ad + FSTPQ.P (F3, F4), 32(R5) // a31081ac + FSTPQ (F3, F4), 11(R5) // bb2c0091631300ad + FSTPQ (F3, F4), 1024(R5) // bb001091631300ad + FSTPQ (F3, F4), 4104(R5) + FSTPQ (F3, F4), -4000(RSP) // fb833ed1631300ad + FSTPQ (F3, F4), -1024(RSP) // e31320ad + FSTPQ (F3, F4), (RSP) // e31300ad + FSTPQ (F3, F4), 16(RSP) // e39300ad + FSTPQ (F3, F4), -16(RSP) // e3933fad + FSTPQ.W (F3, F4), 32(RSP) // e31381ad + FSTPQ.P (F3, F4), 32(RSP) // e31381ac + FSTPQ (F3, F4), 11(RSP) // fb2f0091631300ad + FSTPQ (F3, F4), 1024(RSP) // fb031091631300ad + FSTPQ (F3, F4), 4104(RSP) + FSTPQ (F3, F4), x(SB) + FSTPQ (F3, F4), x+8(SB) + +// System Register + MSR $1, SPSel // bf4100d5 + MSR $9, DAIFSet // df4903d5 + MSR $6, DAIFClr // ff4603d5 + MSR $0, CPACR_EL1 // 5f1018d5 + MRS ELR_EL1, R8 // 284038d5 + MSR R16, ELR_EL1 // 304018d5 + MSR R2, ACTLR_EL1 // 221018d5 + MRS TCR_EL1, R5 // 452038d5 + MRS PMEVCNTR15_EL0, R12 // ece93bd5 + MSR R20, PMEVTYPER26_EL0 // 54ef1bd5 + MSR R10, DBGBCR15_EL1 // aa0f10d5 + MRS ACTLR_EL1, R3 // 231038d5 + MSR R9, ACTLR_EL1 // 291018d5 + MRS AFSR0_EL1, R10 // 0a5138d5 + MSR R1, AFSR0_EL1 // 015118d5 + MRS AFSR0_EL1, R9 // 095138d5 + MSR R30, AFSR0_EL1 // 1e5118d5 + MRS AFSR1_EL1, R0 // 205138d5 + MSR R1, AFSR1_EL1 // 215118d5 + MRS AFSR1_EL1, R8 // 285138d5 + MSR R19, AFSR1_EL1 // 335118d5 + MRS AIDR_EL1, R11 // eb0039d5 + MRS AMAIR_EL1, R0 // 00a338d5 + MSR R22, AMAIR_EL1 // 16a318d5 + MRS AMAIR_EL1, R14 // 0ea338d5 + MSR R0, AMAIR_EL1 // 00a318d5 + MRS APDAKeyHi_EL1, R16 // 302238d5 + MSR R26, APDAKeyHi_EL1 // 3a2218d5 + MRS APDAKeyLo_EL1, R21 // 152238d5 + MSR R22, APDAKeyLo_EL1 // 162218d5 + MRS APDBKeyHi_EL1, R2 // 622238d5 + MSR R6, APDBKeyHi_EL1 // 662218d5 + MRS APDBKeyLo_EL1, R5 // 452238d5 + MSR R22, APDBKeyLo_EL1 // 562218d5 + MRS APGAKeyHi_EL1, R22 // 362338d5 + MSR R5, APGAKeyHi_EL1 // 252318d5 + MRS APGAKeyLo_EL1, R16 // 102338d5 + MSR R22, APGAKeyLo_EL1 // 162318d5 + MRS APIAKeyHi_EL1, R23 // 372138d5 + MSR R17, APIAKeyHi_EL1 // 312118d5 + MRS APIAKeyLo_EL1, R16 // 102138d5 + MSR R6, APIAKeyLo_EL1 // 062118d5 + MRS APIBKeyHi_EL1, R10 // 6a2138d5 + MSR R11, APIBKeyHi_EL1 // 6b2118d5 + MRS APIBKeyLo_EL1, R25 // 592138d5 + MSR R22, APIBKeyLo_EL1 // 562118d5 + MRS CCSIDR_EL1, R25 // 190039d5 + MRS CLIDR_EL1, R16 // 300039d5 + MRS CNTFRQ_EL0, R20 // 14e03bd5 + MSR R16, CNTFRQ_EL0 // 10e01bd5 + MRS CNTKCTL_EL1, R26 // 1ae138d5 + MSR R0, CNTKCTL_EL1 // 00e118d5 + MRS CNTP_CTL_EL0, R14 // 2ee23bd5 + MSR R17, CNTP_CTL_EL0 // 31e21bd5 + MRS CNTP_CVAL_EL0, R15 // 4fe23bd5 + MSR R8, CNTP_CVAL_EL0 // 48e21bd5 + MRS CNTP_TVAL_EL0, R6 // 06e23bd5 + MSR R29, CNTP_TVAL_EL0 // 1de21bd5 + MRS CNTP_CTL_EL0, R22 // 36e23bd5 + MSR R0, CNTP_CTL_EL0 // 20e21bd5 + MRS CNTP_CVAL_EL0, R9 // 49e23bd5 + MSR R4, CNTP_CVAL_EL0 // 44e21bd5 + MRS CNTP_TVAL_EL0, R27 // 1be23bd5 + MSR R17, CNTP_TVAL_EL0 // 11e21bd5 + MRS CNTV_CTL_EL0, R27 // 3be33bd5 + MSR R2, CNTV_CTL_EL0 // 22e31bd5 + MRS CNTV_CVAL_EL0, R16 // 50e33bd5 + MSR R27, CNTV_CVAL_EL0 // 5be31bd5 + MRS CNTV_TVAL_EL0, R12 // 0ce33bd5 + MSR R19, CNTV_TVAL_EL0 // 13e31bd5 + MRS CNTV_CTL_EL0, R14 // 2ee33bd5 + MSR R2, CNTV_CTL_EL0 // 22e31bd5 + MRS CNTV_CVAL_EL0, R8 // 48e33bd5 + MSR R26, CNTV_CVAL_EL0 // 5ae31bd5 + MRS CNTV_TVAL_EL0, R6 // 06e33bd5 + MSR R19, CNTV_TVAL_EL0 // 13e31bd5 + MRS CNTKCTL_EL1, R16 // 10e138d5 + MSR R26, CNTKCTL_EL1 // 1ae118d5 + MRS CNTPCT_EL0, R9 // 29e03bd5 + MRS CNTPS_CTL_EL1, R30 // 3ee23fd5 + MSR R26, CNTPS_CTL_EL1 // 3ae21fd5 + MRS CNTPS_CVAL_EL1, R8 // 48e23fd5 + MSR R26, CNTPS_CVAL_EL1 // 5ae21fd5 + MRS CNTPS_TVAL_EL1, R7 // 07e23fd5 + MSR R13, CNTPS_TVAL_EL1 // 0de21fd5 + MRS CNTP_CTL_EL0, R2 // 22e23bd5 + MSR R10, CNTP_CTL_EL0 // 2ae21bd5 + MRS CNTP_CVAL_EL0, R6 // 46e23bd5 + MSR R21, CNTP_CVAL_EL0 // 55e21bd5 + MRS CNTP_TVAL_EL0, R27 // 1be23bd5 + MSR R29, CNTP_TVAL_EL0 // 1de21bd5 + MRS CNTVCT_EL0, R13 // 4de03bd5 + MRS CNTV_CTL_EL0, R30 // 3ee33bd5 + MSR R19, CNTV_CTL_EL0 // 33e31bd5 + MRS CNTV_CVAL_EL0, R27 // 5be33bd5 + MSR R24, CNTV_CVAL_EL0 // 58e31bd5 + MRS CNTV_TVAL_EL0, R24 // 18e33bd5 + MSR R5, CNTV_TVAL_EL0 // 05e31bd5 + MRS CONTEXTIDR_EL1, R15 // 2fd038d5 + MSR R27, CONTEXTIDR_EL1 // 3bd018d5 + MRS CONTEXTIDR_EL1, R29 // 3dd038d5 + MSR R24, CONTEXTIDR_EL1 // 38d018d5 + MRS CPACR_EL1, R10 // 4a1038d5 + MSR R14, CPACR_EL1 // 4e1018d5 + MRS CPACR_EL1, R27 // 5b1038d5 + MSR R22, CPACR_EL1 // 561018d5 + MRS CSSELR_EL1, R3 // 03003ad5 + MSR R4, CSSELR_EL1 // 04001ad5 + MRS CTR_EL0, R15 // 2f003bd5 + MRS CurrentEL, R1 // 414238d5 + MRS DAIF, R24 // 38423bd5 + MSR R9, DAIF // 29421bd5 + MRS DBGAUTHSTATUS_EL1, R5 // c57e30d5 + MRS DBGBCR0_EL1, R29 // bd0030d5 + MRS DBGBCR1_EL1, R13 // ad0130d5 + MRS DBGBCR2_EL1, R22 // b60230d5 + MRS DBGBCR3_EL1, R8 // a80330d5 + MRS DBGBCR4_EL1, R2 // a20430d5 + MRS DBGBCR5_EL1, R4 // a40530d5 + MRS DBGBCR6_EL1, R2 // a20630d5 + MRS DBGBCR7_EL1, R6 // a60730d5 + MRS DBGBCR8_EL1, R1 // a10830d5 + MRS DBGBCR9_EL1, R16 // b00930d5 + MRS DBGBCR10_EL1, R23 // b70a30d5 + MRS DBGBCR11_EL1, R3 // a30b30d5 + MRS DBGBCR12_EL1, R6 // a60c30d5 + MRS DBGBCR13_EL1, R16 // b00d30d5 + MRS DBGBCR14_EL1, R4 // a40e30d5 + MRS DBGBCR15_EL1, R9 // a90f30d5 + MSR R4, DBGBCR0_EL1 // a40010d5 + MSR R14, DBGBCR1_EL1 // ae0110d5 + MSR R7, DBGBCR2_EL1 // a70210d5 + MSR R12, DBGBCR3_EL1 // ac0310d5 + MSR R6, DBGBCR4_EL1 // a60410d5 + MSR R11, DBGBCR5_EL1 // ab0510d5 + MSR R6, DBGBCR6_EL1 // a60610d5 + MSR R13, DBGBCR7_EL1 // ad0710d5 + MSR R17, DBGBCR8_EL1 // b10810d5 + MSR R17, DBGBCR9_EL1 // b10910d5 + MSR R22, DBGBCR10_EL1 // b60a10d5 + MSR R16, DBGBCR11_EL1 // b00b10d5 + MSR R24, DBGBCR12_EL1 // b80c10d5 + MSR R29, DBGBCR13_EL1 // bd0d10d5 + MSR R1, DBGBCR14_EL1 // a10e10d5 + MSR R10, DBGBCR15_EL1 // aa0f10d5 + MRS DBGBVR0_EL1, R16 // 900030d5 + MRS DBGBVR1_EL1, R21 // 950130d5 + MRS DBGBVR2_EL1, R13 // 8d0230d5 + MRS DBGBVR3_EL1, R12 // 8c0330d5 + MRS DBGBVR4_EL1, R20 // 940430d5 + MRS DBGBVR5_EL1, R21 // 950530d5 + MRS DBGBVR6_EL1, R27 // 9b0630d5 + MRS DBGBVR7_EL1, R6 // 860730d5 + MRS DBGBVR8_EL1, R14 // 8e0830d5 + MRS DBGBVR9_EL1, R5 // 850930d5 + MRS DBGBVR10_EL1, R9 // 890a30d5 + MRS DBGBVR11_EL1, R25 // 990b30d5 + MRS DBGBVR12_EL1, R30 // 9e0c30d5 + MRS DBGBVR13_EL1, R1 // 810d30d5 + MRS DBGBVR14_EL1, R17 // 910e30d5 + MRS DBGBVR15_EL1, R25 // 990f30d5 + MSR R15, DBGBVR0_EL1 // 8f0010d5 + MSR R6, DBGBVR1_EL1 // 860110d5 + MSR R24, DBGBVR2_EL1 // 980210d5 + MSR R17, DBGBVR3_EL1 // 910310d5 + MSR R3, DBGBVR4_EL1 // 830410d5 + MSR R21, DBGBVR5_EL1 // 950510d5 + MSR R5, DBGBVR6_EL1 // 850610d5 + MSR R6, DBGBVR7_EL1 // 860710d5 + MSR R25, DBGBVR8_EL1 // 990810d5 + MSR R4, DBGBVR9_EL1 // 840910d5 + MSR R25, DBGBVR10_EL1 // 990a10d5 + MSR R17, DBGBVR11_EL1 // 910b10d5 + MSR R0, DBGBVR12_EL1 // 800c10d5 + MSR R5, DBGBVR13_EL1 // 850d10d5 + MSR R9, DBGBVR14_EL1 // 890e10d5 + MSR R12, DBGBVR15_EL1 // 8c0f10d5 + MRS DBGCLAIMCLR_EL1, R27 // db7930d5 + MSR R0, DBGCLAIMCLR_EL1 // c07910d5 + MRS DBGCLAIMSET_EL1, R7 // c77830d5 + MSR R13, DBGCLAIMSET_EL1 // cd7810d5 + MRS DBGDTRRX_EL0, R0 // 000533d5 + MSR R29, DBGDTRTX_EL0 // 1d0513d5 + MRS DBGDTR_EL0, R27 // 1b0433d5 + MSR R30, DBGDTR_EL0 // 1e0413d5 + MRS DBGPRCR_EL1, R4 // 841430d5 + MSR R0, DBGPRCR_EL1 // 801410d5 + MRS DBGWCR0_EL1, R24 // f80030d5 + MRS DBGWCR1_EL1, R19 // f30130d5 + MRS DBGWCR2_EL1, R25 // f90230d5 + MRS DBGWCR3_EL1, R0 // e00330d5 + MRS DBGWCR4_EL1, R13 // ed0430d5 + MRS DBGWCR5_EL1, R8 // e80530d5 + MRS DBGWCR6_EL1, R22 // f60630d5 + MRS DBGWCR7_EL1, R11 // eb0730d5 + MRS DBGWCR8_EL1, R11 // eb0830d5 + MRS DBGWCR9_EL1, R3 // e30930d5 + MRS DBGWCR10_EL1, R17 // f10a30d5 + MRS DBGWCR11_EL1, R21 // f50b30d5 + MRS DBGWCR12_EL1, R10 // ea0c30d5 + MRS DBGWCR13_EL1, R22 // f60d30d5 + MRS DBGWCR14_EL1, R11 // eb0e30d5 + MRS DBGWCR15_EL1, R0 // e00f30d5 + MSR R24, DBGWCR0_EL1 // f80010d5 + MSR R8, DBGWCR1_EL1 // e80110d5 + MSR R17, DBGWCR2_EL1 // f10210d5 + MSR R29, DBGWCR3_EL1 // fd0310d5 + MSR R13, DBGWCR4_EL1 // ed0410d5 + MSR R22, DBGWCR5_EL1 // f60510d5 + MSR R3, DBGWCR6_EL1 // e30610d5 + MSR R4, DBGWCR7_EL1 // e40710d5 + MSR R7, DBGWCR8_EL1 // e70810d5 + MSR R29, DBGWCR9_EL1 // fd0910d5 + MSR R3, DBGWCR10_EL1 // e30a10d5 + MSR R11, DBGWCR11_EL1 // eb0b10d5 + MSR R20, DBGWCR12_EL1 // f40c10d5 + MSR R6, DBGWCR13_EL1 // e60d10d5 + MSR R22, DBGWCR14_EL1 // f60e10d5 + MSR R25, DBGWCR15_EL1 // f90f10d5 + MRS DBGWVR0_EL1, R14 // ce0030d5 + MRS DBGWVR1_EL1, R16 // d00130d5 + MRS DBGWVR2_EL1, R15 // cf0230d5 + MRS DBGWVR3_EL1, R1 // c10330d5 + MRS DBGWVR4_EL1, R26 // da0430d5 + MRS DBGWVR5_EL1, R14 // ce0530d5 + MRS DBGWVR6_EL1, R17 // d10630d5 + MRS DBGWVR7_EL1, R22 // d60730d5 + MRS DBGWVR8_EL1, R4 // c40830d5 + MRS DBGWVR9_EL1, R3 // c30930d5 + MRS DBGWVR10_EL1, R16 // d00a30d5 + MRS DBGWVR11_EL1, R2 // c20b30d5 + MRS DBGWVR12_EL1, R5 // c50c30d5 + MRS DBGWVR13_EL1, R23 // d70d30d5 + MRS DBGWVR14_EL1, R5 // c50e30d5 + MRS DBGWVR15_EL1, R6 // c60f30d5 + MSR R24, DBGWVR0_EL1 // d80010d5 + MSR R6, DBGWVR1_EL1 // c60110d5 + MSR R1, DBGWVR2_EL1 // c10210d5 + MSR R24, DBGWVR3_EL1 // d80310d5 + MSR R24, DBGWVR4_EL1 // d80410d5 + MSR R0, DBGWVR5_EL1 // c00510d5 + MSR R10, DBGWVR6_EL1 // ca0610d5 + MSR R17, DBGWVR7_EL1 // d10710d5 + MSR R7, DBGWVR8_EL1 // c70810d5 + MSR R8, DBGWVR9_EL1 // c80910d5 + MSR R15, DBGWVR10_EL1 // cf0a10d5 + MSR R8, DBGWVR11_EL1 // c80b10d5 + MSR R7, DBGWVR12_EL1 // c70c10d5 + MSR R14, DBGWVR13_EL1 // ce0d10d5 + MSR R16, DBGWVR14_EL1 // d00e10d5 + MSR R5, DBGWVR15_EL1 // c50f10d5 + MRS DCZID_EL0, R21 // f5003bd5 + MRS DISR_EL1, R8 // 28c138d5 + MSR R5, DISR_EL1 // 25c118d5 + MRS DIT, R29 // bd423bd5 + MSR R22, DIT // b6421bd5 + MRS DLR_EL0, R25 // 39453bd5 + MSR R9, DLR_EL0 // 29451bd5 + MRS DSPSR_EL0, R3 // 03453bd5 + MSR R10, DSPSR_EL0 // 0a451bd5 + MRS ELR_EL1, R24 // 384038d5 + MSR R3, ELR_EL1 // 234018d5 + MRS ELR_EL1, R13 // 2d4038d5 + MSR R27, ELR_EL1 // 3b4018d5 + MRS ERRIDR_EL1, R30 // 1e5338d5 + MRS ERRSELR_EL1, R21 // 355338d5 + MSR R22, ERRSELR_EL1 // 365318d5 + MRS ERXADDR_EL1, R30 // 7e5438d5 + MSR R0, ERXADDR_EL1 // 605418d5 + MRS ERXCTLR_EL1, R6 // 265438d5 + MSR R9, ERXCTLR_EL1 // 295418d5 + MRS ERXFR_EL1, R19 // 135438d5 + MRS ERXMISC0_EL1, R20 // 145538d5 + MSR R24, ERXMISC0_EL1 // 185518d5 + MRS ERXMISC1_EL1, R15 // 2f5538d5 + MSR R10, ERXMISC1_EL1 // 2a5518d5 + MRS ERXSTATUS_EL1, R30 // 5e5438d5 + MSR R3, ERXSTATUS_EL1 // 435418d5 + MRS ESR_EL1, R6 // 065238d5 + MSR R21, ESR_EL1 // 155218d5 + MRS ESR_EL1, R17 // 115238d5 + MSR R12, ESR_EL1 // 0c5218d5 + MRS FAR_EL1, R3 // 036038d5 + MSR R17, FAR_EL1 // 116018d5 + MRS FAR_EL1, R9 // 096038d5 + MSR R25, FAR_EL1 // 196018d5 + MRS FPCR, R1 // 01443bd5 + MSR R27, FPCR // 1b441bd5 + MRS FPSR, R5 // 25443bd5 + MSR R15, FPSR // 2f441bd5 + MRS ID_AA64AFR0_EL1, R19 // 930538d5 + MRS ID_AA64AFR1_EL1, R24 // b80538d5 + MRS ID_AA64DFR0_EL1, R21 // 150538d5 + MRS ID_AA64DFR1_EL1, R20 // 340538d5 + MRS ID_AA64ISAR0_EL1, R4 // 040638d5 + MRS ID_AA64ISAR1_EL1, R6 // 260638d5 + MRS ID_AA64MMFR0_EL1, R0 // 000738d5 + MRS ID_AA64MMFR1_EL1, R17 // 310738d5 + MRS ID_AA64MMFR2_EL1, R23 // 570738d5 + MRS ID_AA64PFR0_EL1, R20 // 140438d5 + MRS ID_AA64PFR1_EL1, R26 // 3a0438d5 + MRS ID_AA64ZFR0_EL1, R26 // 9a0438d5 + MRS ID_AFR0_EL1, R21 // 750138d5 + MRS ID_DFR0_EL1, R15 // 4f0138d5 + MRS ID_ISAR0_EL1, R11 // 0b0238d5 + MRS ID_ISAR1_EL1, R16 // 300238d5 + MRS ID_ISAR2_EL1, R10 // 4a0238d5 + MRS ID_ISAR3_EL1, R13 // 6d0238d5 + MRS ID_ISAR4_EL1, R24 // 980238d5 + MRS ID_ISAR5_EL1, R29 // bd0238d5 + MRS ID_MMFR0_EL1, R10 // 8a0138d5 + MRS ID_MMFR1_EL1, R29 // bd0138d5 + MRS ID_MMFR2_EL1, R16 // d00138d5 + MRS ID_MMFR3_EL1, R10 // ea0138d5 + MRS ID_MMFR4_EL1, R23 // d70238d5 + MRS ID_PFR0_EL1, R4 // 040138d5 + MRS ID_PFR1_EL1, R12 // 2c0138d5 + MRS ISR_EL1, R24 // 18c138d5 + MRS MAIR_EL1, R20 // 14a238d5 + MSR R21, MAIR_EL1 // 15a218d5 + MRS MAIR_EL1, R20 // 14a238d5 + MSR R5, MAIR_EL1 // 05a218d5 + MRS MDCCINT_EL1, R23 // 170230d5 + MSR R27, MDCCINT_EL1 // 1b0210d5 + MRS MDCCSR_EL0, R19 // 130133d5 + MRS MDRAR_EL1, R12 // 0c1030d5 + MRS MDSCR_EL1, R15 // 4f0230d5 + MSR R15, MDSCR_EL1 // 4f0210d5 + MRS MIDR_EL1, R26 // 1a0038d5 + MRS MPIDR_EL1, R25 // b90038d5 + MRS MVFR0_EL1, R29 // 1d0338d5 + MRS MVFR1_EL1, R7 // 270338d5 + MRS MVFR2_EL1, R19 // 530338d5 + MRS NZCV, R11 // 0b423bd5 + MSR R10, NZCV // 0a421bd5 + MRS OSDLR_EL1, R16 // 901330d5 + MSR R21, OSDLR_EL1 // 951310d5 + MRS OSDTRRX_EL1, R5 // 450030d5 + MSR R30, OSDTRRX_EL1 // 5e0010d5 + MRS OSDTRTX_EL1, R3 // 430330d5 + MSR R13, OSDTRTX_EL1 // 4d0310d5 + MRS OSECCR_EL1, R2 // 420630d5 + MSR R17, OSECCR_EL1 // 510610d5 + MSR R3, OSLAR_EL1 // 831010d5 + MRS OSLSR_EL1, R15 // 8f1130d5 + MRS PAN, R14 // 6e4238d5 + MSR R0, PAN // 604218d5 + MRS PAR_EL1, R27 // 1b7438d5 + MSR R3, PAR_EL1 // 037418d5 + MRS PMCCFILTR_EL0, R10 // eaef3bd5 + MSR R16, PMCCFILTR_EL0 // f0ef1bd5 + MRS PMCCNTR_EL0, R17 // 119d3bd5 + MSR R13, PMCCNTR_EL0 // 0d9d1bd5 + MRS PMCEID0_EL0, R8 // c89c3bd5 + MRS PMCEID1_EL0, R30 // fe9c3bd5 + MRS PMCNTENCLR_EL0, R11 // 4b9c3bd5 + MSR R21, PMCNTENCLR_EL0 // 559c1bd5 + MRS PMCNTENSET_EL0, R25 // 399c3bd5 + MSR R13, PMCNTENSET_EL0 // 2d9c1bd5 + MRS PMCR_EL0, R23 // 179c3bd5 + MSR R11, PMCR_EL0 // 0b9c1bd5 + MRS PMEVCNTR0_EL0, R27 // 1be83bd5 + MRS PMEVCNTR1_EL0, R23 // 37e83bd5 + MRS PMEVCNTR2_EL0, R26 // 5ae83bd5 + MRS PMEVCNTR3_EL0, R11 // 6be83bd5 + MRS PMEVCNTR4_EL0, R14 // 8ee83bd5 + MRS PMEVCNTR5_EL0, R9 // a9e83bd5 + MRS PMEVCNTR6_EL0, R30 // dee83bd5 + MRS PMEVCNTR7_EL0, R19 // f3e83bd5 + MRS PMEVCNTR8_EL0, R5 // 05e93bd5 + MRS PMEVCNTR9_EL0, R27 // 3be93bd5 + MRS PMEVCNTR10_EL0, R23 // 57e93bd5 + MRS PMEVCNTR11_EL0, R27 // 7be93bd5 + MRS PMEVCNTR12_EL0, R0 // 80e93bd5 + MRS PMEVCNTR13_EL0, R13 // ade93bd5 + MRS PMEVCNTR14_EL0, R27 // dbe93bd5 + MRS PMEVCNTR15_EL0, R16 // f0e93bd5 + MRS PMEVCNTR16_EL0, R16 // 10ea3bd5 + MRS PMEVCNTR17_EL0, R14 // 2eea3bd5 + MRS PMEVCNTR18_EL0, R10 // 4aea3bd5 + MRS PMEVCNTR19_EL0, R12 // 6cea3bd5 + MRS PMEVCNTR20_EL0, R5 // 85ea3bd5 + MRS PMEVCNTR21_EL0, R26 // baea3bd5 + MRS PMEVCNTR22_EL0, R19 // d3ea3bd5 + MRS PMEVCNTR23_EL0, R5 // e5ea3bd5 + MRS PMEVCNTR24_EL0, R17 // 11eb3bd5 + MRS PMEVCNTR25_EL0, R0 // 20eb3bd5 + MRS PMEVCNTR26_EL0, R20 // 54eb3bd5 + MRS PMEVCNTR27_EL0, R12 // 6ceb3bd5 + MRS PMEVCNTR28_EL0, R29 // 9deb3bd5 + MRS PMEVCNTR29_EL0, R22 // b6eb3bd5 + MRS PMEVCNTR30_EL0, R22 // d6eb3bd5 + MSR R30, PMEVCNTR0_EL0 // 1ee81bd5 + MSR R1, PMEVCNTR1_EL0 // 21e81bd5 + MSR R20, PMEVCNTR2_EL0 // 54e81bd5 + MSR R9, PMEVCNTR3_EL0 // 69e81bd5 + MSR R8, PMEVCNTR4_EL0 // 88e81bd5 + MSR R2, PMEVCNTR5_EL0 // a2e81bd5 + MSR R30, PMEVCNTR6_EL0 // dee81bd5 + MSR R14, PMEVCNTR7_EL0 // eee81bd5 + MSR R1, PMEVCNTR8_EL0 // 01e91bd5 + MSR R15, PMEVCNTR9_EL0 // 2fe91bd5 + MSR R15, PMEVCNTR10_EL0 // 4fe91bd5 + MSR R14, PMEVCNTR11_EL0 // 6ee91bd5 + MSR R15, PMEVCNTR12_EL0 // 8fe91bd5 + MSR R25, PMEVCNTR13_EL0 // b9e91bd5 + MSR R26, PMEVCNTR14_EL0 // dae91bd5 + MSR R21, PMEVCNTR15_EL0 // f5e91bd5 + MSR R29, PMEVCNTR16_EL0 // 1dea1bd5 + MSR R11, PMEVCNTR17_EL0 // 2bea1bd5 + MSR R16, PMEVCNTR18_EL0 // 50ea1bd5 + MSR R2, PMEVCNTR19_EL0 // 62ea1bd5 + MSR R19, PMEVCNTR20_EL0 // 93ea1bd5 + MSR R17, PMEVCNTR21_EL0 // b1ea1bd5 + MSR R7, PMEVCNTR22_EL0 // c7ea1bd5 + MSR R23, PMEVCNTR23_EL0 // f7ea1bd5 + MSR R15, PMEVCNTR24_EL0 // 0feb1bd5 + MSR R27, PMEVCNTR25_EL0 // 3beb1bd5 + MSR R13, PMEVCNTR26_EL0 // 4deb1bd5 + MSR R2, PMEVCNTR27_EL0 // 62eb1bd5 + MSR R15, PMEVCNTR28_EL0 // 8feb1bd5 + MSR R14, PMEVCNTR29_EL0 // aeeb1bd5 + MSR R23, PMEVCNTR30_EL0 // d7eb1bd5 + MRS PMEVTYPER0_EL0, R23 // 17ec3bd5 + MRS PMEVTYPER1_EL0, R30 // 3eec3bd5 + MRS PMEVTYPER2_EL0, R12 // 4cec3bd5 + MRS PMEVTYPER3_EL0, R13 // 6dec3bd5 + MRS PMEVTYPER4_EL0, R25 // 99ec3bd5 + MRS PMEVTYPER5_EL0, R23 // b7ec3bd5 + MRS PMEVTYPER6_EL0, R8 // c8ec3bd5 + MRS PMEVTYPER7_EL0, R2 // e2ec3bd5 + MRS PMEVTYPER8_EL0, R23 // 17ed3bd5 + MRS PMEVTYPER9_EL0, R25 // 39ed3bd5 + MRS PMEVTYPER10_EL0, R0 // 40ed3bd5 + MRS PMEVTYPER11_EL0, R30 // 7eed3bd5 + MRS PMEVTYPER12_EL0, R0 // 80ed3bd5 + MRS PMEVTYPER13_EL0, R9 // a9ed3bd5 + MRS PMEVTYPER14_EL0, R15 // cfed3bd5 + MRS PMEVTYPER15_EL0, R13 // eded3bd5 + MRS PMEVTYPER16_EL0, R11 // 0bee3bd5 + MRS PMEVTYPER17_EL0, R19 // 33ee3bd5 + MRS PMEVTYPER18_EL0, R3 // 43ee3bd5 + MRS PMEVTYPER19_EL0, R17 // 71ee3bd5 + MRS PMEVTYPER20_EL0, R8 // 88ee3bd5 + MRS PMEVTYPER21_EL0, R2 // a2ee3bd5 + MRS PMEVTYPER22_EL0, R5 // c5ee3bd5 + MRS PMEVTYPER23_EL0, R17 // f1ee3bd5 + MRS PMEVTYPER24_EL0, R22 // 16ef3bd5 + MRS PMEVTYPER25_EL0, R3 // 23ef3bd5 + MRS PMEVTYPER26_EL0, R23 // 57ef3bd5 + MRS PMEVTYPER27_EL0, R19 // 73ef3bd5 + MRS PMEVTYPER28_EL0, R24 // 98ef3bd5 + MRS PMEVTYPER29_EL0, R3 // a3ef3bd5 + MRS PMEVTYPER30_EL0, R1 // c1ef3bd5 + MSR R20, PMEVTYPER0_EL0 // 14ec1bd5 + MSR R20, PMEVTYPER1_EL0 // 34ec1bd5 + MSR R14, PMEVTYPER2_EL0 // 4eec1bd5 + MSR R26, PMEVTYPER3_EL0 // 7aec1bd5 + MSR R11, PMEVTYPER4_EL0 // 8bec1bd5 + MSR R16, PMEVTYPER5_EL0 // b0ec1bd5 + MSR R29, PMEVTYPER6_EL0 // ddec1bd5 + MSR R3, PMEVTYPER7_EL0 // e3ec1bd5 + MSR R30, PMEVTYPER8_EL0 // 1eed1bd5 + MSR R17, PMEVTYPER9_EL0 // 31ed1bd5 + MSR R10, PMEVTYPER10_EL0 // 4aed1bd5 + MSR R19, PMEVTYPER11_EL0 // 73ed1bd5 + MSR R13, PMEVTYPER12_EL0 // 8ded1bd5 + MSR R23, PMEVTYPER13_EL0 // b7ed1bd5 + MSR R13, PMEVTYPER14_EL0 // cded1bd5 + MSR R9, PMEVTYPER15_EL0 // e9ed1bd5 + MSR R1, PMEVTYPER16_EL0 // 01ee1bd5 + MSR R19, PMEVTYPER17_EL0 // 33ee1bd5 + MSR R22, PMEVTYPER18_EL0 // 56ee1bd5 + MSR R23, PMEVTYPER19_EL0 // 77ee1bd5 + MSR R30, PMEVTYPER20_EL0 // 9eee1bd5 + MSR R9, PMEVTYPER21_EL0 // a9ee1bd5 + MSR R3, PMEVTYPER22_EL0 // c3ee1bd5 + MSR R1, PMEVTYPER23_EL0 // e1ee1bd5 + MSR R16, PMEVTYPER24_EL0 // 10ef1bd5 + MSR R12, PMEVTYPER25_EL0 // 2cef1bd5 + MSR R7, PMEVTYPER26_EL0 // 47ef1bd5 + MSR R9, PMEVTYPER27_EL0 // 69ef1bd5 + MSR R10, PMEVTYPER28_EL0 // 8aef1bd5 + MSR R5, PMEVTYPER29_EL0 // a5ef1bd5 + MSR R12, PMEVTYPER30_EL0 // ccef1bd5 + MRS PMINTENCLR_EL1, R24 // 589e38d5 + MSR R15, PMINTENCLR_EL1 // 4f9e18d5 + MRS PMINTENSET_EL1, R1 // 219e38d5 + MSR R4, PMINTENSET_EL1 // 249e18d5 + MRS PMOVSCLR_EL0, R6 // 669c3bd5 + MSR R30, PMOVSCLR_EL0 // 7e9c1bd5 + MRS PMOVSSET_EL0, R16 // 709e3bd5 + MSR R12, PMOVSSET_EL0 // 6c9e1bd5 + MRS PMSELR_EL0, R30 // be9c3bd5 + MSR R5, PMSELR_EL0 // a59c1bd5 + MSR R27, PMSWINC_EL0 // 9b9c1bd5 + MRS PMUSERENR_EL0, R8 // 089e3bd5 + MSR R6, PMUSERENR_EL0 // 069e1bd5 + MRS PMXEVCNTR_EL0, R26 // 5a9d3bd5 + MSR R10, PMXEVCNTR_EL0 // 4a9d1bd5 + MRS PMXEVTYPER_EL0, R4 // 249d3bd5 + MSR R4, PMXEVTYPER_EL0 // 249d1bd5 + MRS REVIDR_EL1, R29 // dd0038d5 + MRS RMR_EL1, R4 // 44c038d5 + MSR R0, RMR_EL1 // 40c018d5 + MRS RVBAR_EL1, R7 // 27c038d5 + MRS SCTLR_EL1, R8 // 081038d5 + MSR R0, SCTLR_EL1 // 001018d5 + MRS SCTLR_EL1, R30 // 1e1038d5 + MSR R13, SCTLR_EL1 // 0d1018d5 + MRS SPSR_EL1, R1 // 014038d5 + MSR R2, SPSR_EL1 // 024018d5 + MRS SPSR_EL1, R3 // 034038d5 + MSR R14, SPSR_EL1 // 0e4018d5 + MRS SPSR_abt, R12 // 2c433cd5 + MSR R4, SPSR_abt // 24431cd5 + MRS SPSR_fiq, R17 // 71433cd5 + MSR R9, SPSR_fiq // 69431cd5 + MRS SPSR_irq, R12 // 0c433cd5 + MSR R23, SPSR_irq // 17431cd5 + MRS SPSR_und, R29 // 5d433cd5 + MSR R3, SPSR_und // 43431cd5 + MRS SPSel, R29 // 1d4238d5 + MSR R1, SPSel // 014218d5 + MRS SP_EL0, R10 // 0a4138d5 + MSR R4, SP_EL0 // 044118d5 + MRS SP_EL1, R22 // 16413cd5 + MSR R17, SP_EL1 // 11411cd5 + MRS TCR_EL1, R17 // 512038d5 + MSR R23, TCR_EL1 // 572018d5 + MRS TCR_EL1, R14 // 4e2038d5 + MSR R29, TCR_EL1 // 5d2018d5 + MRS TPIDRRO_EL0, R26 // 7ad03bd5 + MSR R16, TPIDRRO_EL0 // 70d01bd5 + MRS TPIDR_EL0, R23 // 57d03bd5 + MSR R5, TPIDR_EL0 // 45d01bd5 + MRS TPIDR_EL1, R17 // 91d038d5 + MSR R22, TPIDR_EL1 // 96d018d5 + MRS TTBR0_EL1, R30 // 1e2038d5 + MSR R29, TTBR0_EL1 // 1d2018d5 + MRS TTBR0_EL1, R23 // 172038d5 + MSR R15, TTBR0_EL1 // 0f2018d5 + MRS TTBR1_EL1, R5 // 252038d5 + MSR R26, TTBR1_EL1 // 3a2018d5 + MRS TTBR1_EL1, R19 // 332038d5 + MSR R23, TTBR1_EL1 // 372018d5 + MRS UAO, R22 // 964238d5 + MSR R4, UAO // 844218d5 + MRS VBAR_EL1, R23 // 17c038d5 + MSR R2, VBAR_EL1 // 02c018d5 + MRS VBAR_EL1, R6 // 06c038d5 + MSR R3, VBAR_EL1 // 03c018d5 + MRS DISR_EL1, R12 // 2cc138d5 + MSR R24, DISR_EL1 // 38c118d5 + MRS MPIDR_EL1, R1 // a10038d5 + MRS MIDR_EL1, R13 // 0d0038d5 + MRS ZCR_EL1, R24 // 181238d5 + MSR R13, ZCR_EL1 // 0d1218d5 + MRS ZCR_EL1, R23 // 171238d5 + MSR R17, ZCR_EL1 // 111218d5 + SYS $32768, R1 // 018008d5 + SYS $32768 // 1f8008d5 + +// TLBI instruction + TLBI VMALLE1IS // 1f8308d5 + TLBI VMALLE1 // 1f8708d5 + TLBI ALLE2IS // 1f830cd5 + TLBI ALLE1IS // 9f830cd5 + TLBI VMALLS12E1IS // df830cd5 + TLBI ALLE2 // 1f870cd5 + TLBI ALLE1 // 9f870cd5 + TLBI VMALLS12E1 // df870cd5 + TLBI ALLE3IS // 1f830ed5 + TLBI ALLE3 // 1f870ed5 + TLBI VMALLE1OS // 1f8108d5 + TLBI ALLE2OS // 1f810cd5 + TLBI ALLE1OS // 9f810cd5 + TLBI VMALLS12E1OS // df810cd5 + TLBI ALLE3OS // 1f810ed5 + TLBI VAE1IS, R0 // 208308d5 + TLBI ASIDE1IS, R1 // 418308d5 + TLBI VAAE1IS, R2 // 628308d5 + TLBI VALE1IS, R3 // a38308d5 + TLBI VAALE1IS, R4 // e48308d5 + TLBI VAE1, R5 // 258708d5 + TLBI ASIDE1, R6 // 468708d5 + TLBI VAAE1, R7 // 678708d5 + TLBI VALE1, R8 // a88708d5 + TLBI VAALE1, R9 // e98708d5 + TLBI IPAS2E1IS, R10 // 2a800cd5 + TLBI IPAS2LE1IS, R11 // ab800cd5 + TLBI VAE2IS, R12 // 2c830cd5 + TLBI VALE2IS, R13 // ad830cd5 + TLBI IPAS2E1, R14 // 2e840cd5 + TLBI IPAS2LE1, R15 // af840cd5 + TLBI VAE2, R16 // 30870cd5 + TLBI VALE2, R17 // b1870cd5 + TLBI VAE3IS, ZR // 3f830ed5 + TLBI VALE3IS, R19 // b3830ed5 + TLBI VAE3, R20 // 34870ed5 + TLBI VALE3, R21 // b5870ed5 + TLBI VAE1OS, R22 // 368108d5 + TLBI ASIDE1OS, R23 // 578108d5 + TLBI VAAE1OS, R24 // 788108d5 + TLBI VALE1OS, R25 // b98108d5 + TLBI VAALE1OS, R26 // fa8108d5 + TLBI RVAE1IS, R27 // 3b8208d5 + TLBI RVAAE1IS, ZR // 7f8208d5 + TLBI RVALE1IS, R29 // bd8208d5 + TLBI RVAALE1IS, R30 // fe8208d5 + TLBI RVAE1OS, ZR // 3f8508d5 + TLBI RVAAE1OS, R0 // 608508d5 + TLBI RVALE1OS, R1 // a18508d5 + TLBI RVAALE1OS, R2 // e28508d5 + TLBI RVAE1, R3 // 238608d5 + TLBI RVAAE1, R4 // 648608d5 + TLBI RVALE1, R5 // a58608d5 + TLBI RVAALE1, R6 // e68608d5 + TLBI RIPAS2E1IS, R7 // 47800cd5 + TLBI RIPAS2LE1IS, R8 // c8800cd5 + TLBI VAE2OS, R9 // 29810cd5 + TLBI VALE2OS, R10 // aa810cd5 + TLBI RVAE2IS, R11 // 2b820cd5 + TLBI RVALE2IS, R12 // ac820cd5 + TLBI IPAS2E1OS, R13 // 0d840cd5 + TLBI RIPAS2E1, R14 // 4e840cd5 + TLBI RIPAS2E1OS, R15 // 6f840cd5 + TLBI IPAS2LE1OS, R16 // 90840cd5 + TLBI RIPAS2LE1, R17 // d1840cd5 + TLBI RIPAS2LE1OS, ZR // ff840cd5 + TLBI RVAE2OS, R19 // 33850cd5 + TLBI RVALE2OS, R20 // b4850cd5 + TLBI RVAE2, R21 // 35860cd5 + TLBI RVALE2, R22 // b6860cd5 + TLBI VAE3OS, R23 // 37810ed5 + TLBI VALE3OS, R24 // b8810ed5 + TLBI RVAE3IS, R25 // 39820ed5 + TLBI RVALE3IS, R26 // ba820ed5 + TLBI RVAE3OS, R27 // 3b850ed5 + TLBI RVALE3OS, ZR // bf850ed5 + TLBI RVAE3, R29 // 3d860ed5 + TLBI RVALE3, R30 // be860ed5 + +// DC instruction + DC IVAC, R0 // 207608d5 + DC ISW, R1 // 417608d5 + DC CSW, R2 // 427a08d5 + DC CISW, R3 // 437e08d5 + DC ZVA, R4 // 24740bd5 + DC CVAC, R5 // 257a0bd5 + DC CVAU, R6 // 267b0bd5 + DC CIVAC, R7 // 277e0bd5 + DC IGVAC, R8 // 687608d5 + DC IGSW, R9 // 897608d5 + DC IGDVAC, R10 // aa7608d5 + DC IGDSW, R11 // cb7608d5 + DC CGSW, R12 // 8c7a08d5 + DC CGDSW, R13 // cd7a08d5 + DC CIGSW, R14 // 8e7e08d5 + DC CIGDSW, R15 // cf7e08d5 + DC GVA, R16 // 70740bd5 + DC GZVA, R17 // 91740bd5 + DC CGVAC, ZR // 7f7a0bd5 + DC CGDVAC, R19 // b37a0bd5 + DC CGVAP, R20 // 747c0bd5 + DC CGDVAP, R21 // b57c0bd5 + DC CGVADP, R22 // 767d0bd5 + DC CGDVADP, R23 // b77d0bd5 + DC CIGVAC, R24 // 787e0bd5 + DC CIGDVAC, R25 // b97e0bd5 + DC CVAP, R26 // 3a7c0bd5 + DC CVADP, R27 // 3b7d0bd5 + END diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/arm64enc.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/arm64enc.s new file mode 100644 index 0000000000000000000000000000000000000000..cc002a1584d2c7afb3fba07d8544913eb5d7da23 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/arm64enc.s @@ -0,0 +1,766 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The cases are auto-generated by disassembler. +// The uncommented cases means they can be handled by assembler +// and they are consistent with disassembler decoding. +// TODO means they cannot be handled by current assembler. + +#include "../../../../../runtime/textflag.h" + +TEXT asmtest(SB),DUPOK|NOSPLIT,$-8 + + AND $(1<<63), R1 // AND $-9223372036854775808, R1 // 21004192 + ADCW ZR, R8, R10 // 0a011f1a + ADC R0, R2, R12 // 4c00009a + ADCSW R9, R21, R6 // a602093a + ADCS R23, R22, R22 // d60217ba + ADDW R5.UXTH, R8, R9 // 0921250b + ADD R8.SXTB<<3, R23, R14 // ee8e288b + ADDW $3076, R17, R3 // 23123011 + ADDW $(3076<<12), R17, R3 // ADDW $12599296, R17, R3 // 23127011 + ADD $2280, R25, R11 // 2ba32391 + ADD $(2280<<12), R25, R11 // ADD $9338880, R25, R11 // 2ba36391 + ADDW R13->5, R11, R7 // 67158d0b + ADD R25<<54, R17, R16 // 30da198b + ADDSW R12.SXTX<<1, R29, R7 // a7e72c2b + ADDS R24.UXTX<<4, R25, R21 // 357338ab + ADDSW $(3525<<12), R3, R11 // ADDSW $14438400, R3, R11 // 6b147731 + ADDS $(3525<<12), R3, R11 // ADDS $14438400, R3, R11 // 6b1477b1 + ADDSW R7->22, R14, R13 // cd59872b + ADDS R14>>7, ZR, R4 // e41f4eab + AND $-9223372036854775808, R1, R1 // 21004192 + ANDW $4026540031, R29, R2 // a2430412 + AND $34903429696192636, R12, R19 // 93910e92 + ANDW R9@>7, R19, R26 // 7a1ec90a + AND R9@>7, R19, R26 // 7a1ec98a + TSTW $2863311530, R24 // 1ff30172 + TST R2, R0 // 1f0002ea + TST $7, R2 // 5f0840f2 + ANDS R2, R0, ZR // 1f0002ea + ANDS $7, R2, ZR // 5f0840f2 + ANDSW $2863311530, R24, ZR // 1ff30172 + ANDSW $2863311530, R24, R23 // 17f30172 + ANDS $-140737488289793, R2, R5 // 458051f2 + ANDSW R26->24, R21, R15 // af629a6a + ANDS R30@>44, R3, R26 // 7ab0deea + ASRW R12, R27, R25 // 792bcc1a + ASR R14, R27, R7 // 672bce9a + ASR $11, R27, R25 // 79ff4b93 + ASRW $11, R27, R25 // 797f0b13 + BLT -1(PC) // ebffff54 + JMP -1(PC) // ffffff17 + BFIW $16, R20, $6, R0 // 80161033 + BFI $27, R21, $21, R25 // b95265b3 + BFXILW $3, R27, $23, R14 // 6e670333 + BFXIL $26, R8, $16, R20 // 14a55ab3 + BICW R7@>15, R5, R16 // b03ce70a + BIC R12@>13, R12, R19 // 9335ec8a + BICSW R25->20, R3, R20 // 7450b96a + BICS R19->12, R1, R23 // 3730b3ea + BICS R19, R1, R23 // 370033ea + BICS R19>>0, R1, R23 // 370073ea + CALL -1(PC) // ffffff97 + CALL (R15) // e0013fd6 + JMP (R29) // a0031fd6 + BRK $35943 // e08c31d4 + CBNZW R2, -1(PC) // e2ffff35 + CBNZ R7, -1(PC) // e7ffffb5 + CBZW R15, -1(PC) // efffff34 + CBZ R1, -1(PC) // e1ffffb4 + CCMN MI, ZR, R1, $4 // e44341ba + CCMNW AL, R26, $20, $11 // 4beb543a + CCMN PL, R24, $6, $1 // 015b46ba + CCMNW EQ, R20, R6, $6 // 8602463a + CCMN LE, R30, R12, $6 // c6d34cba + CCMPW VS, R29, $15, $7 // a76b4f7a + CCMP LE, R7, $19, $3 // e3d853fa + CCMPW HS, R19, R6, $0 // 6022467a + CCMP LT, R30, R6, $7 // c7b346fa + CCMN MI, ZR, R1, $4 // e44341ba + CSINCW HS, ZR, R27, R14 // ee279b1a + CSINC VC, R2, R1, R1 // 4174819a + CSINVW EQ, R2, R21, R17 // 5100955a + CSINV LO, R2, R19, R23 // 573093da + CINCW LO, R27, R14 // 6e279b1a + CINCW HS, R27, ZR // 7f379b1a + CINVW EQ, R2, R17 // 5110825a + CINV VS, R12, R7 // 87718cda + CINV VS, R30, R30 // de739eda + CLREX $4 // 5f3403d5 + CLREX $0 // 5f3003d5 + CLSW R15, R6 // e615c05a + CLS R15, ZR // ff15c0da + CLZW R1, R14 // 2e10c05a + CLZ R21, R9 // a912c0da + CMNW R21.UXTB<<4, R15 // ff11352b + CMN R0.UXTW<<4, R16 // 1f5220ab + CMNW R13>>8, R9 // 3f214d2b + CMN R6->17, R3 // 7f4486ab + CMNW $(2<<12), R5 // CMNW $8192, R5 // bf084031 + CMN $(8<<12), R12 // CMN $32768, R12 // 9f2140b1 + CMN R6->0, R3 // 7f0086ab + CMN R6, R3 // 7f0006ab + CMNW R30, R5 // bf001e2b + CMNW $2, R5 // bf080031 + CMN ZR, R3 // 7f001fab + CMN R0, R3 // 7f0000ab + CMPW R6.UXTB, R23 // ff02266b + CMP R25.SXTH<<2, R26 // 5fab39eb + CMP $3817, R29 // bfa73bf1 + CMP R7>>23, R3 // 7f5c47eb + CNEGW PL, R9, R14 // 2e45895a + CSNEGW HS, R5, R9, R14 // ae24895a + CSNEG PL, R14, R21, R3 // c35595da + CNEG LO, R7, R15 // ef2487da + CRC32B R17, R8, R16 // 1041d11a + CRC32H R3, R21, R27 // bb46c31a + CRC32W R22, R30, R9 // c94bd61a + CRC32X R20, R4, R15 // 8f4cd49a + CRC32CB R19, R27, R22 // 7653d31a + CRC32CH R21, R0, R20 // 1454d51a + CRC32CW R9, R3, R21 // 7558c91a + CRC32CX R11, R0, R24 // 185ccb9a + CSELW LO, R4, R20, R12 // 8c30941a + CSEL GE, R0, R12, R14 // 0ea08c9a + CSETW GE, R3 // e3b79f1a + CSET LT, R30 // fea79f9a + CSETMW VC, R5 // e5639f5a + CSETM VS, R4 // e4739fda + CSINCW LE, R5, R24, R26 // bad4981a + CSINC VS, R26, R16, R17 // 5167909a + CSINVW AL, R23, R21, R5 // e5e2955a + CSINV LO, R2, R11, R14 // 4e308bda + CSNEGW HS, R16, R29, R10 // 0a269d5a + CSNEG NE, R21, R19, R11 // ab1693da + DC IVAC, R1 // 217608d5 + DCPS1 $11378 // 418ea5d4 + DCPS2 $10699 // 6239a5d4 + DCPS3 $24415 // e3ebabd4 + DMB $1 // bf3103d5 + DMB $0 // bf3003d5 + DRPS // e003bfd6 + DSB $1 // 9f3103d5 + EONW R21<<29, R6, R9 // c974354a + EON R14>>46, R4, R9 // 89b86eca + EOR $-2287828610704211969, R27, R22 // 76e343d2 + EORW R12->27, R10, R19 // 536d8c4a + EOR R2<<59, R30, R17 // d1ef02ca + ERET // e0039fd6 + EXTRW $7, R8, R10, R25 // 591d8813 + EXTR $35, R22, R12, R8 // 888dd693 + SEVL // bf2003d5 + HINT $6 // df2003d5 + HINT $0 // 1f2003d5 + HLT $65509 // a0fc5fd4 + HVC $61428 // 82fe1dd4 + ISB $1 // df3103d5 + ISB $15 // df3f03d5 + LDARW (R12), R29 // 9dfddf88 + LDARW (R30), R22 // d6ffdf88 + LDARW (RSP), R22 // f6ffdf88 + LDAR (R27), R22 // 76ffdfc8 + LDARB (R25), R2 // 22ffdf08 + LDARH (R5), R7 // a7fcdf48 + LDAXPW (R10), (R20, R16) // 54c17f88 + LDAXP (R25), (R30, R11) // 3eaf7fc8 + LDAXRW (R15), R2 // e2fd5f88 + LDAXR (R15), R21 // f5fd5fc8 + LDAXRB (R19), R16 // 70fe5f08 + LDAXRH (R5), R8 // a8fc5f48 + //TODO LDNP 0xcc(RSP), ZR, R12 // ecff5928 + //TODO LDNP 0x40(R28), R9, R5 // 852744a8 + //TODO LDPSW -0xd0(R2), R0, R12 // 4c00e668 + //TODO LDPSW 0x5c(R4), R8, R5 // 85a0cb69 + //TODO LDPSW 0x6c(R12), R2, R27 // 9b894d69 + MOVWU.P -84(R15), R9 // e9c55ab8 + MOVD.P -46(R10), R8 // 48255df8 + MOVD.P (R10), R8 // 480540f8 + MOVWU.W -141(R3), R16 // 703c57b8 + MOVD.W -134(R0), R29 // 1dac57f8 + MOVWU 4156(R1), R25 // 393c50b9 + MOVD 14616(R10), R9 // 498d5cf9 + MOVWU (R4)(R12.SXTW<<2), R7 // 87d86cb8 + MOVD (R7)(R11.UXTW<<3), R25 // f9586bf8 + MOVBU.P 42(R2), R12 // 4ca44238 + MOVBU.W -27(R2), R14 // 4e5c5e38 + MOVBU 2916(R24), R3 // 03936d39 + MOVBU (R19)(R14<<0), R23 // 776a6e38 + MOVBU (R2)(R8.SXTX), R19 // 53e86838 + MOVBU (R27)(R23), R14 // 6e6b7738 + MOVHU.P 107(R14), R13 // cdb54678 + MOVHU.W 192(R3), R2 // 620c4c78 + MOVHU 6844(R4), R19 // 93787579 + MOVHU (R5)(R25.SXTW), R15 // afc87978 + //TODO MOVBW.P 77(R19), R11 // 6bd6c438 + MOVB.P 36(RSP), R27 // fb478238 + //TODO MOVBW.W -57(R19), R13 // 6d7edc38 + MOVB.W -178(R16), R24 // 18ee9438 + //TODO MOVBW 430(R8), R22 // 16b9c639 + MOVB 997(R9), R23 // 37958f39 + //TODO MOVBW (R2<<1)(R21), R15 // af7ae238 + //TODO MOVBW (R26)(R0), R21 // 1568fa38 + MOVB (R5)(R15), R16 // b068af38 + MOVB (R19)(R26.SXTW), R19 // 73caba38 + MOVB (R29)(R30), R14 // ae6bbe38 + //TODO MOVHW.P 218(R22), R25 // d9a6cd78 + MOVH.P 179(R23), R5 // e5368b78 + //TODO MOVHW.W 136(R2), R27 // 5b8cc878 + MOVH.W -63(R25), R22 // 361f9c78 + //TODO MOVHW 5708(R25), R21 // 359bec79 + MOVH 54(R2), R13 // 4d6c8079 + //TODO MOVHW (R22)(R24.SXTX), R4 // c4eaf878 + MOVH (R26)(R30.UXTW<<1), ZR // 5f5bbe78 + MOVW.P -58(R16), R2 // 02669cb8 + MOVW.W -216(R19), R8 // 688e92b8 + MOVW 4764(R23), R10 // ea9e92b9 + MOVW (R8)(R3.UXTW), R17 // 1149a3b8 + //TODO LDTR -0x1e(R3), R4 // 64285eb8 + //TODO LDTR -0xe5(R3), R10 // 6ab851f8 + //TODO LDTRB 0xf0(R13), R10 // aa094f38 + //TODO LDTRH 0xe8(R13), R23 // b7894e78 + //TODO LDTRSB -0x24(R20), R5 // 85cadd38 + //TODO LDTRSB -0x75(R9), R13 // 2db99838 + //TODO LDTRSH 0xef(R3), LR // 7ef8ce78 + //TODO LDTRSH 0x96(R19), R24 // 786a8978 + //TODO LDTRSW 0x1e(LR), R5 // c5eb81b8 + //TODO LDUR 0xbf(R13), R1 // a1f14bb8 + //TODO LDUR -0x3c(R22), R3 // c3425cf8 + //TODO LDURB -0xff(R17), R14 // 2e125038 + //TODO LDURH 0x80(R1), R6 // 26004878 + //TODO LDURSB 0xde(LR), R3 // c3e3cd38 + //TODO LDURSB 0x96(R9), R7 // 27618938 + //TODO LDURSH -0x49(R11), R28 // 7c71db78 + //TODO LDURSH -0x1f(R0), R29 // 1d109e78 + //TODO LDURSW 0x48(R6), R20 // d48084b8 + LDXPW (R24), (R23, R11) // 172f7f88 + LDXP (R0), (R16, R13) // 10347fc8 + LDXRW (RSP), R30 // fe7f5f88 + LDXR (R27), R12 // 6c7f5fc8 + LDXRB (R0), R4 // 047c5f08 + LDXRH (R12), R26 // 9a7d5f48 + LSLW R11, R10, R15 // 4f21cb1a + LSL R27, R24, R21 // 1523db9a + LSLW $5, R7, R22 // f6681b53 + LSL $57, R17, R2 // 221a47d3 + LSRW R9, R3, R12 // 6c24c91a + LSR R10, R5, R2 // a224ca9a + LSRW $1, R3, R16 // 707c0153 + LSR $12, R1, R20 // 34fc4cd3 + MADDW R13, R23, R3, R10 // 6a5c0d1b + MADD R5, R23, R10, R4 // 445d059b + MNEGW R0, R9, R21 // 35fd001b + MNEG R14, R27, R23 // 77ff0e9b + MOVD R2, R7 // e70302aa + MOVW $-24, R20 // f4028012 + MOVD $-51096, ZR // fff29892 + MOVW $2507014144, R20 // d4adb252 + MOVD $1313925191285342208, R7 // 8747e2d2 + ORRW $16252928, ZR, R21 // f5130d32 + MOVD $-4260607558625, R11 // eb6b16b2 + MOVD R30, R7 // e7031eaa + MOVKW $(3905<<0), R21 // MOVKW $3905, R21 // 35e88172 + MOVKW $(3905<<16), R21 // MOVKW $255918080, R21 // 35e8a172 + MOVK $(3905<<32), R21 // MOVK $16771847290880, R21 // 35e8c1f2 + MOVD $0, R5 // e5031faa + MSR $1, SPSel // bf4100d5 + MSR $9, DAIFSet // df4903d5 + MSR $6, DAIFClr // ff4603d5 + MRS ELR_EL1, R8 // 284038d5 + MSR R16, ELR_EL1 // 304018d5 + MRS DCZID_EL0, R3 // e3003bd5 + MSUBW R1, R1, R12, R5 // 8585011b + MSUB R19, R16, R26, R2 // 42c3139b + MULW R26, R5, R22 // b67c1a1b + MUL R4, R3, R0 // 607c049b + MVNW R3@>13, R8 // e837e32a + MVN R13>>31, R9 // e97f6daa + NEGSW R23<<1, R30 // fe07176b + NEGS R20>>35, R22 // f68f54eb + NGCW R13, R8 // e8030d5a + NGC R2, R7 // e70302da + NGCSW R10, R5 // e5030a7a + NGCS R24, R16 // f00318fa + NOOP // 1f2003d5 + ORNW R4@>11, R16, R3 // 032ee42a + ORN R22@>19, R3, R3 // 634cf6aa + ORRW $4294443071, R15, R24 // f8490d32 + ORR $-3458764513820540929, R12, R22 // 96f542b2 + ORRW R13<<4, R8, R26 // 1a110d2a + ORR R3<<22, R5, R6 // a65803aa + PRFM (R8), $25 // 190180f9 + PRFM (R2), PLDL1KEEP // 400080f9 + //TODO PRFM (R27)(R30.SXTW<<3), PLDL2STRM // 63dbbff8 + //TODO PRFUM 22(R16), PSTL1KEEP // 106281f8 + RBITW R9, R22 // 3601c05a + RBIT R11, R4 // 6401c0da + RET // c0035fd6 + REVW R8, R10 // 0a09c05a + REV R1, R2 // 220cc0da + REV16W R21, R19 // b306c05a + REV16 R25, R4 // 2407c0da + REV32 R27, R21 // 750bc0da + EXTRW $27, R4, R25, R19 // 336f8413 + EXTR $17, R10, R29, R15 // af47ca93 + ROR $14, R14, R15 // cf39ce93 + RORW $28, R14, R15 // cf718e13 + RORW R3, R12, R3 // 832dc31a + ROR R0, R23, R2 // e22ec09a + SBCW R4, R8, R24 // 1801045a + SBC R25, R10, R26 // 5a0119da + SBCSW R27, R19, R19 // 73021b7a + SBCS R5, R9, R5 // 250105fa + SBFIZW $9, R10, $18, R22 // 56451713 + SBFIZ $6, R11, $15, R20 // 74397a93 + SBFXW $8, R15, $10, R20 // f4450813 + SBFX $2, R27, $54, R7 // 67df4293 + SDIVW R22, R14, R9 // c90dd61a + SDIV R13, R21, R9 // a90ecd9a + SEV // 9f2003d5 + SEVL // bf2003d5 + SMADDL R3, R7, R11, R9 // 691d239b + SMSUBL R5, R19, R11, R29 // 7dcd259b + SMNEGL R26, R3, R15 // 6ffc3a9b + SMULH R17, R21, R21 // b57e519b + SMULL R0, R5, R0 // a07c209b + SMC $37977 // 238b12d4 + STLRW R16, (R22) // d0fe9f88 + STLR R3, (R24) // 03ff9fc8 + STLRB R11, (R22) // cbfe9f08 + STLRH R16, (R23) // f0fe9f48 + STLXR R7, (R27), R8 // 67ff08c8 + STLXRW R13, (R15), R14 // edfd0e88 + STLXRB R24, (R23), R8 // f8fe0808 + STLXRH R19, (R27), R11 // 73ff0b48 + STLXP (R6, R3), (R10), R2 // 468d22c8 + STLXPW (R6, R11), (R22), R21 // c6ae3588 + //TODO STNPW 44(R1), R3, R10 // 2a8c0528 + //TODO STNP 0x108(R3), ZR, R7 // 67fc10a8 + LDP.P -384(R3), (R22, R26) // 7668e8a8 + LDP.W 280(R8), (R19, R11) // 13add1a9 + STP.P (R22, R27), 352(R0) // 166c96a8 + STP.W (R17, R11), 96(R8) // 112d86a9 + MOVW.P R20, -28(R1) // 34441eb8 + MOVD.P R17, 191(R16) // 11f60bf8 + MOVW.W R1, -171(R14) // c15d15b8 + MOVD.W R14, -220(R13) // ae4d12f8 + MOVW R3, 14828(R24) // 03ef39b9 + MOVD R0, 20736(R17) // 208228f9 + MOVB.P ZR, -117(R7) // ffb41838 + MOVB.W R27, -96(R13) // bb0d1a38 + MOVB R17, 2200(R13) // b1612239 + MOVH.P R7, -72(R4) // 87841b78 + MOVH.W R12, -125(R14) // cc3d1878 + MOVH R19, 3686(R26) // 53cf1c79 + MOVW R21, 34(R0) // 152002b8 + MOVD R25, -137(R17) // 397217f8 + MOVW R4, (R12)(R22.UXTW<<2) // 845936b8 + MOVD R27, (R5)(R15.UXTW<<3) // bb582ff8 + MOVB R2, (R10)(R16) // 42693038 + MOVB R2, (R29)(R26) // a26b3a38 + MOVH R11, -80(R23) // eb021b78 + MOVH R11, (R27)(R14.SXTW<<1) // 6bdb2e78 + MOVB R19, (R0)(R4) // 13682438 + MOVB R1, (R6)(R4) // c1682438 + MOVH R3, (R11)(R13<<1) // 63792d78 + //TODO STTR 55(R4), R29 // 9d7803b8 + //TODO STTR 124(R5), R25 // b9c807f8 + //TODO STTRB -28(R23), R16 // f04a1e38 + //TODO STTRH 9(R10), R19 // 53990078 + STXP (R1, R2), (R3), R10 // 61082ac8 + STXP (R1, R2), (RSP), R10 // e10b2ac8 + STXPW (R1, R2), (R3), R10 // 61082a88 + STXPW (R1, R2), (RSP), R10 // e10b2a88 + STXRW R2, (R19), R20 // 627e1488 + STXR R15, (R21), R13 // af7e0dc8 + STXRB R7, (R9), R24 // 277d1808 + STXRH R12, (R3), R8 // 6c7c0848 + SUBW R20.UXTW<<2, R23, R19 // f34a344b + SUB R5.SXTW<<2, R1, R26 // 3ac825cb + SUB $(1923<<12), R4, R27 // SUB $7876608, R4, R27 // 9b0c5ed1 + SUBW $(1923<<12), R4, R27 // SUBW $7876608, R4, R27 // 9b0c5e51 + SUBW R12<<29, R7, R8 // e8740c4b + SUB R12<<61, R7, R8 // e8f40ccb + SUBSW R2.SXTH<<3, R13, R6 // a6ad226b + SUBS R21.UXTX<<2, R27, R4 // 646b35eb + SUBSW $(44<<12), R6, R9 // SUBSW $180224, R6, R9 // c9b04071 + SUBS $(1804<<12), R13, R9 // SUBS $7389184, R13, R9 // a9315cf1 + SUBSW R22->28, R6, R7 // c770966b + SUBSW R22>>28, R6, R7 // c770566b + SUBS R26<<15, R6, R16 // d03c1aeb + SVC $0 // 010000d4 + SVC $7165 // a17f03d4 + SXTBW R8, R25 // 191d0013 + SXTB R13, R9 // a91d4093 + SXTHW R8, R8 // 083d0013 + SXTH R17, R25 // 393e4093 + SXTW R0, R27 // 1b7c4093 + SYSL $285440, R12 // 0c5b2cd5 + TLBI VAE1IS, R1 // 218308d5 + TSTW $0x80000007, R9 // TSTW $2147483655, R9 // 3f0d0172 + TST $0xfffffff0, LR // TST $4294967280, R30 // df6f7cf2 + TSTW R10@>21, R2 // 5f54ca6a + TST R17<<11, R24 // 1f2f11ea + ANDSW $0x80000007, R9, ZR // ANDSW $2147483655, R9, ZR // 3f0d0172 + ANDS $0xfffffff0, LR, ZR // ANDS $4294967280, R30, ZR // df6f7cf2 + ANDSW R10@>21, R2, ZR // 5f54ca6a + ANDS R17<<11, R24, ZR // 1f2f11ea + UBFIZW $3, R19, $14, R14 // 6e361d53 + UBFIZ $3, R22, $14, R4 // c4367dd3 + UBFXW $3, R7, $20, R15 // ef580353 + UBFX $33, R17, $25, R5 // 25e661d3 + UDIVW R8, R21, R15 // af0ac81a + UDIV R2, R19, R21 // 750ac29a + UMADDL R0, R20, R17, R17 // 3152a09b + UMSUBL R22, R4, R3, R7 // 6790b69b + UMNEGL R3, R19, R1 // 61fea39b + UMULH R24, R20, R24 // 987ed89b + UMULL R19, R22, R19 // d37eb39b + UXTBW R2, R6 // 461c0053 + UXTHW R7, R20 // f43c0053 + VCNT V0.B8, V0.B8 // 0058200e + VCNT V0.B16, V0.B16 // 0058204e + WFE // 5f2003d5 + WFI // 7f2003d5 + YIELD // 3f2003d5 + //TODO FABD F0, F5, F11 // abd4a07e + //TODO VFABD V30.S2, V8.S2, V24.S2 // 18d5be2e + //TODO VFABS V5.S4, V24.S4 // b8f8a04e + FABSS F2, F28 // 5cc0201e + FABSD F0, F14 // 0ec0601e + //TODO FACGE F25, F16, F0 // 00ee797e + //TODO VFACGE V11.S2, V15.S2, V9.S2 // e9ed2b2e + //TODO FACGT F20, F16, F27 // 1beef47e + //TODO VFACGT V15.S4, V25.S4, V22.S4 // 36efaf6e + //TODO VFADD V21.D2, V10.D2, V21.D2 // 55d5754e + FADDS F12, F2, F10 // 4a282c1e + FADDD F24, F14, F12 // cc29781e + //TODO VFADDP V4.D2, F13 // 8dd8707e + //TODO VFADDP V30.S4, V3.S4, V11.S4 // 6bd43e6e + FCCMPS LE, F17, F12, $14 // 8ed5311e + FCCMPD HI, F11, F15, $15 // ef856b1e + FCCMPES HS, F28, F13, $13 // bd253c1e + FCCMPED LT, F20, F4, $9 // 99b4741e + //TODO FCMEQ F7, F11, F26 // 7ae5675e + //TODO VFCMEQ V29.S4, V26.S4, V30.S4 // 5ee73d4e + //TODO FCMEQ $0, F17, F22 // 36daa05e + //TODO VFCMEQ $0, V17.D2, V22.D2 // 36dae04e + //TODO FCMGE F29, F31, F13 // ede77d7e + //TODO VFCMGE V8.S2, V31.S2, V2.S2 // e2e7282e + //TODO FCMGE $0, F18, F27 // e2e7282e + //TODO VFCMGE $0, V14.S2, V8.S2 // c8c9a02e + //TODO FCMGT F20, F2, F8 // 48e4b47e + //TODO VFCMGT V26.D2, V15.D2, V23.D2 // f7e5fa6e + //TODO FCMGT $0, F14, F3 // c3c9e05e + //TODO VFCMGT $0, V6.S2, V28.S2 // dcc8a00e + //TODO FCMLE $0, F26, F25 // 59dba07e + //TODO VFCMLE $0, V28.S2, V20.S2 // 94dba02e + //TODO FCMLT $0, F17, F3 // 23eae05e + //TODO VFCMLT $0, V8.S4, V7.S4 // 07e9a04e + FCMPS F3, F17 // 2022231e + FCMPS $(0.0), F8 // 0821201e + FCMPD F11, F27 // 60236b1e + FCMPD $(0.0), F25 // 2823601e + FCMPES F16, F30 // d023301e + FCMPES $(0.0), F29 // b823201e + FCMPED F13, F10 // 50216d1e + FCMPED $(0.0), F25 // 3823601e + FCSELS EQ, F26, F27, F25 // 590f3b1e + FCSELD PL, F8, F22, F7 // 075d761e + //TODO FCVTAS F4, F28 // 9cc8215e + //TODO VFCVTAS V21.D2, V27.D2 // bbca614e + //TODO FCVTAS F27, R7 // 6703241e + //TODO FCVTAS F19, R26 // 7a02249e + //TODO FCVTAS F4, R0 // 8000641e + //TODO FCVTAS F3, R19 // 7300649e + //TODO FCVTAU F18, F28 // 5cca217e + //TODO VFCVTAU V30.S4, V27.S4 // dbcb216e + //TODO FCVTAU F0, R2 // 0200251e + //TODO FCVTAU F0, R24 // 1800259e + //TODO FCVTAU F31, R10 // ea03651e + //TODO FCVTAU F3, R8 // 6800659e + //TODO VFCVTL V11.S2, V21.D2 // 7579610e + //TODO VFCVTL2 V15.H8, V25.S4 // f979214e + //TODO FCVTMS F21, F28 // bcba215e + //TODO VFCVTMS V5.D2, V2.D2 // a2b8614e + //TODO FCVTMS F31, R19 // f303301e + //TODO FCVTMS F23, R16 // f002309e + //TODO FCVTMS F16, R22 // 1602701e + //TODO FCVTMS F14, R19 // d301709e + //TODO FCVTMU F14, F8 // c8b9217e + //TODO VFCVTMU V7.D2, V1.D2 // e1b8616e + //TODO FCVTMU F2, R0 // 4000311e + //TODO FCVTMU F23, R19 // f302319e + //TODO FCVTMU F16, R17 // 1102711e + //TODO FCVTMU F12, R19 // 9301719e + //TODO VFCVTN V23.D2, V26.S2 // fa6a610e + //TODO VFCVTN2 V2.D2, V31.S4 // 5f68614e + //TODO FCVTNS F3, F27 // 7ba8215e + //TODO VFCVTNS V11.S2, V12.S2 // 6ca9210e + //TODO FCVTNS F14, R9 // c901201e + //TODO FCVTNS F0, R27 // 1b00209e + //TODO FCVTNS F23, R0 // e002601e + //TODO FCVTNS F6, R30 // de00609e + //TODO FCVTNU F12, F9 // 89a9217e + //TODO VFCVTNU V3.D2, V20.D2 // 74a8616e + //TODO FCVTNU F20, R11 // 8b02211e + //TODO FCVTNU F23, R19 // f302219e + //TODO FCVTNU F4, R5 // 8500611e + //TODO FCVTNU F11, R19 // 7301619e + //TODO FCVTPS F20, F26 // 9aaae15e + //TODO VFCVTPS V29.S4, V13.S4 // adaba14e + //TODO FCVTPS F5, R29 // bd00281e + //TODO FCVTPS F3, R3 // 6300289e + //TODO FCVTPS F4, R25 // 9900681e + //TODO FCVTPS F29, R15 // af03689e + //TODO FCVTPU F13, F3 // a3a9e17e + //TODO VFCVTPU V6.S4, V24.S4 // d8a8a16e + //TODO FCVTPU F17, R17 // 3102291e + //TODO FCVTPU F7, R23 // f700299e + //TODO FCVTPU F10, R3 // 4301691e + //TODO FCVTPU F24, R27 // 1b03699e + //TODO FCVTXN F14, F0 // c069617e + //TODO VFCVTXN V1.D2, V17.S2 // 3168612e + //TODO VFCVTXN2 V0.D2, V21.S4 // 1568616e + //TODO FCVTZS $26, F29, F19 // b3ff665f + //TODO VFCVTZS $45, V14.D2, V18.D2 // d2fd534f + //TODO FCVTZS F8, F7 // 07b9a15e + //TODO VFCVTZS V2.S2, V4.S2 // 44b8a10e + //TODO FCVTZS $26, F7, R11 // eb98181e + //TODO FCVTZS $7, F4, ZR // 9fe4189e + //TODO FCVTZS $28, F13, R14 // ae91581e + //TODO FCVTZS $8, F27, R3 // 63e3589e + FCVTZSSW F7, R15 // ef00381e + FCVTZSS F16, ZR // 1f02389e + FCVTZSDW F19, R3 // 6302781e + FCVTZSD F7, R7 // e700789e + //TODO FCVTZU $17, F18, F28 // 5cfe2f7f + //TODO VFCVTZU $19, V20.D2, V11.D2 // 8bfe6d6f + //TODO FCVTZU F22, F8 // c8bae17e + //TODO VFCVTZU V0.S4, V1.S4 // 01b8a16e + //TODO FCVTZU $14, F24, R20 // 14cb191e + //TODO FCVTZU $6, F25, R17 // 31eb199e + //TODO FCVTZU $5, F17, R10 // 2aee591e + //TODO FCVTZU $6, F7, R19 // f3e8599e + FCVTZUSW F2, R9 // 4900391e + FCVTZUS F12, R29 // 9d01399e + FCVTZUDW F27, R22 // 7603791e + FCVTZUD F25, R22 // 3603799e + //TODO VFDIV V6.D2, V1.D2, V27.D2 // 3bfc666e + FDIVS F16, F10, F20 // 5419301e + FDIVD F11, F25, F30 // 3e1b6b1e + FMADDS F15, F2, F8, F1 // 01090f1f + FMADDD F15, F21, F25, F9 // 29574f1f + //TODO VFMAX V23.D2, V27.D2, V14.D2 // 6ef7774e + FMAXS F5, F28, F27 // 9b4b251e + FMAXD F12, F31, F31 // ff4b6c1e + //TODO VFMAXNM V3.D2, V12.D2, V27.D2 // 9bc5634e + FMAXNMS F11, F24, F12 // 0c6b2b1e + FMAXNMD F20, F6, F16 // d068741e + //TODO VFMAXNMP V3.S2, F2 // 62c8307e + //TODO VFMAXNMP V25.S2, V4.S2, V2.S2 // 82c4392e + //TODO VFMAXNMV V14.S4, F15 // cfc9306e + //TODO VFMAXP V3.S2, F27 // 7bf8307e + //TODO VFMAXP V29.S2, V30.S2, V9.S2 // c9f73d2e + //TODO VFMAXV V13.S4, F14 // aef9306e + //TODO VFMIN V19.D2, V30.D2, V7.D2 // c7f7f34e + FMINS F26, F18, F30 // 5e5a3a1e + FMIND F29, F4, F21 // 95587d1e + //TODO VFMINNM V21.S4, V5.S4, V1.S4 // a1c4b54e + FMINNMS F23, F20, F1 // 817a371e + FMINNMD F8, F3, F24 // 7878681e + //TODO VFMINNMP V16.D2, F12 // 0ccaf07e + //TODO VFMINNMP V10.S4, V25.S4, V27.S4 // 3bc7aa6e + //TODO VFMINNMV V8.S4, F3 // 03c9b06e + //TODO VFMINP V10.S2, F20 // 54f9b07e + //TODO VFMINP V1.D2, V10.D2, V3.D2 // 43f5e16e + //TODO VFMINV V11.S4, F9 // 69f9b06e + //TODO VFMLA V6.S[0], F2, F14 // 4e10865f + //TODO VFMLA V28.S[2], V2.S2, V30.S2 // 5e189c0f + VFMLA V29.S2, V20.S2, V14.S2 // 8ece3d0e + //TODO VFMLS V24.D[1], F3, F17 // 7158d85f + //TODO VFMLS V10.S[0], V11.S2, V10.S2 // 6a518a0f + VFMLS V29.S2, V27.S2, V17.S2 // 71cfbd0e + //TODO FMOVS $(-1.625), F13 // 0d503f1e + //TODO FMOVD $12.5, F30 // 1e30651e + //TODO VFMOV R7, V25.D[1] // f900af9e + FMOVD F2, R15 // 4f00669e + FMOVD R3, F11 // 6b00679e + FMOVS F20, R29 // 9d02261e + FMOVS R8, F15 // 0f01271e + FMOVD F2, F9 // 4940601e + FMOVS F4, F27 // 9b40201e + //TODO VFMOV $3.125, V8.D2 // 28f5006f + FMSUBS F13, F21, F13, F19 // b3d50d1f + FMSUBD F11, F7, F15, F31 // ff9d4b1f + //TODO VFMUL V9.S[2], F21, F19 // b39a895f + //TODO VFMUL V26.S[2], V26.S2, V2.S2 // 429b9a0f + //TODO VFMUL V21.D2, V17.D2, V25.D2 // 39de756e + FMULS F0, F6, F24 // d808201e + FMULD F5, F29, F9 // a90b651e + //TODO VFMULX V26.S[2], F20, F8 // 889a9a7f + //TODO VFMULX V12.D[1], V21.D2, V31.D2 // bf9acc6f + //TODO FMULX F16, F1, F31 // 3fdc705e + //TODO VFMULX V29.S2, V13.S2, V31.S2 // bfdd3d0e + //TODO VFNEG V18.S2, V12.S2 // 4cfaa02e + FNEGS F16, F5 // 0542211e + FNEGD F31, F31 // ff43611e + FNMADDS F17, F22, F6, F20 // d458311f + FNMADDD F15, F0, F26, F20 // 54036f1f + FNMSUBS F14, F16, F27, F14 // 6ec32e1f + FNMSUBD F29, F25, F8, F10 // 0ae57d1f + FNMULS F24, F22, F18 // d28a381e + FNMULD F14, F30, F7 // c78b6e1e + //TODO FRECPE F9, F2 // 22d9e15e + //TODO VFRECPE V0.S2, V28.S2 // 1cd8a10e + //TODO FRECPS F28, F10, F9 // 49fd3c5e + //TODO VFRECPS V27.D2, V12.D2, V24.D2 // 98fd7b4e + //TODO FRECPX F28, F3 // 83fbe15e + //TODO VFRINTA V14.S2, V25.S2 // d989212e + FRINTAS F0, F21 // 1540261e + FRINTAD F8, F22 // 1641661e + //TODO VFRINTI V21.D2, V31.D2 // bf9ae16e + FRINTIS F17, F17 // 31c2271e + FRINTID F9, F15 // 2fc1671e + //TODO VFRINTM V9.D2, V27.D2 // 3b99614e + FRINTMS F24, F16 // 1043251e + FRINTMD F5, F2 // a240651e + //TODO VFRINTN V30.S4, V2.S4 // c28b214e + FRINTNS F26, F14 // 4e43241e + FRINTND F28, F12 // 8c43641e + //TODO VFRINTP V27.D2, V31.D2 // 7f8be14e + FRINTPS F27, F4 // 64c3241e + FRINTPD F6, F22 // d6c0641e + //TODO VFRINTX V25.D2, V0.D2 // 209b616e + FRINTXS F26, F10 // 4a43271e + FRINTXD F16, F12 // 0c42671e + //TODO VFRINTZ V25.S4, V27.S4 // 3b9ba14e + FRINTZS F3, F28 // 7cc0251e + FRINTZD F24, F6 // 06c3651e + //TODO FRSQRTE F29, F5 // a5dbe17e + //TODO VFRSQRTE V18.S2, V1.S2 // 41daa12e + //TODO FRSQRTS F17, F7, F24 // f8fcf15e + //TODO VFRSQRTS V14.S2, V10.S2, V24.S2 // 58fdae0e + //TODO VFSQRT V2.D2, V21.D2 // 55f8e16e + FSQRTS F0, F9 // 09c0211e + FSQRTD F14, F27 // dbc1611e + FSUBS F25, F23, F0 // e03a391e + FSUBD F11, F13, F24 // b8396b1e + //TODO SCVTFSS F30, F20 // d4db215e + //TODO VSCVTF V7.S2, V17.S2 // f1d8210e + SCVTFWS R3, F16 // 7000221e + SCVTFWD R20, F4 // 8402621e + SCVTFS R16, F12 // 0c02229e + SCVTFD R26, F14 // 4e03629e + UCVTFWS R6, F4 // c400231e + UCVTFWD R10, F23 // 5701631e + UCVTFS R24, F29 // 1d03239e + UCVTFD R20, F11 // 8b02639e + VADD V16, V19, V14 // 6e86f05e + VADD V5.H8, V18.H8, V9.H8 // 4986654e + VADDP V7.H8, V25.H8, V17.H8 // 31bf674e + VADDV V3.H8, V0 // 60b8714e + AESD V22.B16, V19.B16 // d35a284e + AESE V31.B16, V29.B16 // fd4b284e + AESIMC V12.B16, V27.B16 // 9b79284e + AESMC V14.B16, V28.B16 // dc69284e + VAND V4.B16, V4.B16, V9.B16 // 891c244e + VCMEQ V24.S4, V13.S4, V12.S4 // ac8db86e + VCNT V13.B8, V11.B8 // ab59200e + VMOV V31.B[15], V18 // f2071f5e + VDUP V31.B[15], V18 // f2071f5e + VDUP V31.B[13], V20.B16 // f4071b4e + VEOR V4.B8, V18.B8, V7.B8 // 471e242e + VEXT $4, V2.B8, V1.B8, V3.B8 // 2320022e + VEXT $8, V2.B16, V1.B16, V3.B16 // 2340026e + VMOV V11.B[11], V16.B[12] // 705d196e + VMOV R20, V21.B[2] // 951e054e + VLD1 (R2), [V21.B16] // 5570404c + VLD1 (R24), [V18.D1, V19.D1, V20.D1] // 126f400c + VLD1 (R29), [V14.D1, V15.D1, V16.D1, V17.D1] // ae2f400c + VLD1.P 16(R23), [V1.B16] // e172df4c + VLD1.P (R6)(R11), [V31.D1] // df7ccb0c + VLD1.P 16(R7), [V31.D1, V0.D1] // ffacdf0c + VLD1.P (R19)(R4), [V24.B8, V25.B8] // 78a2c40c + VLD1.P (R20)(R8), [V7.H8, V8.H8, V9.H8] // 8766c84c + VLD1.P 32(R30), [V5.B8, V6.B8, V7.B8, V8.B8] // c523df0c + VLD1 (R19), V14.B[15] // 6e1e404d + VLD1 (R29), V0.H[1] // a04b400d + VLD1 (R27), V2.S[0] // 6283400d + VLD1 (R21), V5.D[1] // a586404d + VLD1.P 1(R19), V10.B[14] // 6a1adf4d + VLD1.P (R3)(R14), V16.B[11] // 700cce4d + VLD1.P 2(R1), V28.H[2] // 3c50df0d + VLD1.P (R13)(R20), V9.H[2] // a951d40d + VLD1.P 4(R17), V1.S[3] // 2192df4d + VLD1.P (R14)(R2), V17.S[2] // d181c24d + VLD1.P 8(R5), V30.D[1] // be84df4d + VLD1.P (R27)(R13), V27.D[0] // 7b87cd0d + //TODO FMOVS.P -29(RSP), F8 // e8375ebc + //TODO FMOVS.W 71(R29), F28 // bc7f44bc + FMOVS 6160(R4), F23 // 971058bd + VMOV V18.B[10], V27 // 5b06155e + VDUP V18.B[10], V27 // 5b06155e + VMOV V12.B[2], V28.B[12] // 9c15196e + VMOV R30, V4.B[13] // c41f1b4e + VMOV V2.B16, V4.B16 // 441ca24e + VMOV V13.S[0], R20 // b43d040e + VMOV V13.D[0], R20 // b43d084e + VMOVI $146, V22.B16 // 56e6044f + VORR V25.B16, V22.B16, V15.B16 // cf1eb94e + VPMULL V2.D1, V1.D1, V3.Q1 // 23e0e20e + VPMULL2 V2.D2, V1.D2, V4.Q1 // 24e0e24e + VPMULL V2.B8, V1.B8, V3.H8 // 23e0220e + VPMULL2 V2.B16, V1.B16, V4.H8 // 24e0224e + VRBIT V10.B16, V21.B16 // 5559606e + VREV32 V2.H8, V1.H8 // 4108606e + VREV16 V2.B8, V1.B8 // 4118200e + VREV16 V5.B16, V16.B16 // b018204e + SCVTFWS R6, F17 // d100221e + SCVTFWD R3, F15 // 6f00621e + SCVTFS R20, F25 // 9902229e + SCVTFD R13, F9 // a901629e + SHA1C V8.S4, V8, V2 // 0201085e + SHA1H V17, V25 // 390a285e + SHA1M V0.S4, V27, V27 // 7b23005e + SHA1P V3.S4, V20, V27 // 9b12035e + SHA1SU0 V17.S4, V13.S4, V16.S4 // b031115e + SHA1SU1 V24.S4, V23.S4 // 171b285e + SHA256H2 V6.S4, V16, V11 // 0b52065e + SHA256H V4.S4, V2, V11 // 4b40045e + SHA256SU0 V0.S4, V16.S4 // 1028285e + SHA256SU1 V31.S4, V3.S4, V15.S4 // 6f601f5e + VSHL $7, V22.D2, V25.D2 // d956474f + VST1 [V14.H4, V15.H4, V16.H4], (R27) // 6e67000c + VST1 [V2.S4, V3.S4, V4.S4, V5.S4], (R14) // c229004c + VST1.P [V25.S4], (R7)(R29) // f9789d4c + VST1.P [V25.D2, V26.D2], 32(R7) // f9ac9f4c + VST1.P [V14.D1, V15.D1], (R7)(R23) // eeac970c + VST1.P [V25.D2, V26.D2, V27.D2], 48(R27) // 796f9f4c + VST1.P [V13.H8, V14.H8, V15.H8], (R3)(R14) // 6d648e4c + VST1.P [V16.S4, V17.S4, V18.S4, V19.S4], 64(R6) // d0289f4c + VST1.P [V19.H4, V20.H4, V21.H4, V22.H4], (R4)(R16) // 9324900c + VST1 V12.B[3], (R1) // 2c0c000d + VST1 V12.B[3], (R1) // 2c0c000d + VST1 V25.S[2], (R20) // 9982004d + VST1 V9.D[1], (RSP) // e987004d + VST1.P V30.B[6], 1(R3) // 7e189f0d + VST1.P V8.B[0], (R3)(R21) // 6800950d + VST1.P V15.H[5], 2(R10) // 4f499f4d + VST1.P V1.H[7], (R23)(R11) // e15a8b4d + VST1.P V26.S[0], 4(R11) // 7a819f0d + VST1.P V9.S[1], (R16)(R21) // 0992950d + VST1.P V16.D[0], 8(R9) // 30859f0d + VST1.P V23.D[1], (R21)(R16) // b786904d + VSUB V1, V12, V23 // 9785e17e + VUADDLV V31.S4, V11 // eb3bb06e + UCVTFWS R11, F19 // 7301231e + UCVTFWD R26, F13 // 4d03631e + UCVTFS R23, F11 // eb02239e + UCVTFD R5, F29 // bd00639e + VMOV V0.B[1], R11 // 0b3c030e + VMOV V1.H[3], R12 // 2c3c0e0e + VUSHR $6, V22.H8, V23.H8 // d7061a6f + + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/arm64error.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/arm64error.s new file mode 100644 index 0000000000000000000000000000000000000000..3ac87884245c56a22e1c921fd000f027a8d13a08 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/arm64error.s @@ -0,0 +1,423 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +TEXT errors(SB),$0 + AND $1, RSP // ERROR "illegal source register" + ANDS $1, R0, RSP // ERROR "illegal combination" + ADDSW R7->32, R14, R13 // ERROR "shift amount out of range 0 to 31" + ADD R1.UXTB<<5, R2, R3 // ERROR "shift amount out of range 0 to 4" + ADDS R1.UXTX<<7, R2, R3 // ERROR "shift amount out of range 0 to 4" + ADDS R5, R6, RSP // ERROR "illegal destination register" + SUBS R5, R6, RSP // ERROR "illegal destination register" + ADDSW R5, R6, RSP // ERROR "illegal destination register" + SUBSW R5, R6, RSP // ERROR "illegal destination register" + ADDS $0xff, R6, RSP // ERROR "illegal destination register" + ADDS $0xffff0, R6, RSP // ERROR "illegal destination register" + ADDS $0x1000100010001000, R6, RSP // ERROR "illegal destination register" + ADDS $0x10001000100011, R6, RSP // ERROR "illegal destination register" + ADDSW $0xff, R6, RSP // ERROR "illegal destination register" + ADDSW $0xffff0, R6, RSP // ERROR "illegal destination register" + ADDSW $0x1000100010001000, R6, RSP // ERROR "illegal destination register" + ADDSW $0x10001000100011, R6, RSP // ERROR "illegal destination register" + SUBS $0xff, R6, RSP // ERROR "illegal destination register" + SUBS $0xffff0, R6, RSP // ERROR "illegal destination register" + SUBS $0x1000100010001000, R6, RSP // ERROR "illegal destination register" + SUBS $0x10001000100011, R6, RSP // ERROR "illegal destination register" + SUBSW $0xff, R6, RSP // ERROR "illegal destination register" + SUBSW $0xffff0, R6, RSP // ERROR "illegal destination register" + SUBSW $0x1000100010001000, R6, RSP // ERROR "illegal destination register" + SUBSW $0x10001000100011, R6, RSP // ERROR "illegal destination register" + AND $0x22220000, R2, RSP // ERROR "illegal combination" + ANDS $0x22220000, R2, RSP // ERROR "illegal combination" + ADD R1, R2, R3, R4 // ERROR "illegal combination" + BICW R7@>33, R5, R16 // ERROR "shift amount out of range 0 to 31" + NEGW R7<<33, R5 // ERROR "shift amount out of range 0 to 31" + NEGSW R7<<33, R5 // ERROR "shift amount out of range 0 to 31" + ADD R7@>2, R5, R16 // ERROR "unsupported shift operator" + ADDW R7@>2, R5, R16 // ERROR "unsupported shift operator" + ADDS R7@>2, R5, R16 // ERROR "unsupported shift operator" + ADDSW R7@>2, R5, R16 // ERROR "unsupported shift operator" + SUB R7@>2, R5, R16 // ERROR "unsupported shift operator" + SUBW R7@>2, R5, R16 // ERROR "unsupported shift operator" + SUBS R7@>2, R5, R16 // ERROR "unsupported shift operator" + SUBSW R7@>2, R5, R16 // ERROR "unsupported shift operator" + CMP R7@>2, R5 // ERROR "unsupported shift operator" + CMPW R7@>2, R5 // ERROR "unsupported shift operator" + CMN R7@>2, R5 // ERROR "unsupported shift operator" + CMNW R7@>2, R5 // ERROR "unsupported shift operator" + NEG R7@>2, R5 // ERROR "unsupported shift operator" + NEGW R7@>2, R5 // ERROR "unsupported shift operator" + NEGS R7@>2, R5 // ERROR "unsupported shift operator" + NEGSW R7@>2, R5 // ERROR "unsupported shift operator" + CINC CS, R2, R3, R4 // ERROR "illegal combination" + CSEL LT, R1, R2 // ERROR "illegal combination" + CINC AL, R2, R3 // ERROR "invalid condition" + CINC NV, R2, R3 // ERROR "invalid condition" + CINVW AL, R2, R3 // ERROR "invalid condition" + CINV NV, R2, R3 // ERROR "invalid condition" + CNEG AL, R2, R3 // ERROR "invalid condition" + CNEGW NV, R2, R3 // ERROR "invalid condition" + CSET AL, R2 // ERROR "invalid condition" + CSET NV, R2 // ERROR "invalid condition" + CSETMW AL, R2 // ERROR "invalid condition" + CSETM NV, R2 // ERROR "invalid condition" + LDP.P 8(R2), (R2, R3) // ERROR "constrained unpredictable behavior" + LDP.W 8(R3), (R2, R3) // ERROR "constrained unpredictable behavior" + LDP (R1), (R2, R2) // ERROR "constrained unpredictable behavior" + LDP (R0), (F0, F1) // ERROR "invalid register pair" + LDXPW (RSP), (R2, R2) // ERROR "constrained unpredictable behavior" + LDAXPW (R5), (R2, R2) // ERROR "constrained unpredictable behavior" + MOVD.P 300(R2), R3 // ERROR "offset out of range [-256,255]" + MOVD.P R3, 344(R2) // ERROR "offset out of range [-256,255]" + MOVD (R3)(R7.SXTX<<2), R8 // ERROR "invalid index shift amount" + MOVWU (R5)(R4.UXTW<<3), R10 // ERROR "invalid index shift amount" + MOVWU (R5)(R4<<1), R10 // ERROR "invalid index shift amount" + MOVB (R5)(R4.SXTW<<5), R10 // ERROR "invalid index shift amount" + MOVH R5, (R6)(R2<<3) // ERROR "invalid index shift amount" + MADD R1, R2, R3 // ERROR "illegal combination" + MOVD.P R1, 8(R1) // ERROR "constrained unpredictable behavior" + MOVD.W 16(R2), R2 // ERROR "constrained unpredictable behavior" + STP (F2, F3), (R0) // ERROR "invalid register pair" + STP.W (R1, R2), 8(R1) // ERROR "constrained unpredictable behavior" + STP.P (R1, R2), 8(R2) // ERROR "constrained unpredictable behavior" + STLXP (R6, R11), (RSP), R6 // ERROR "constrained unpredictable behavior" + STXP (R6, R11), (R2), R2 // ERROR "constrained unpredictable behavior" + STLXR R3, (RSP), R3 // ERROR "constrained unpredictable behavior" + STXR R3, (R4), R4 // ERROR "constrained unpredictable behavior" + STLXRB R2, (R5), R5 // ERROR "constrained unpredictable behavior" + VLD1 (R8)(R13), [V2.B16] // ERROR "illegal combination" + VLD1 8(R9), [V2.B16] // ERROR "illegal combination" + VST1 [V1.B16], (R8)(R13) // ERROR "illegal combination" + VST1 [V1.B16], 9(R2) // ERROR "illegal combination" + VLD1 8(R8)(R13), [V2.B16] // ERROR "illegal combination" + VMOV V8.D[2], V12.D[1] // ERROR "register element index out of range 0 to 1" + VMOV V8.S[4], V12.S[1] // ERROR "register element index out of range 0 to 3" + VMOV V8.H[8], V12.H[1] // ERROR "register element index out of range 0 to 7" + VMOV V8.B[16], V12.B[1] // ERROR "register element index out of range 0 to 15" + VMOV V8.D[0], V12.S[1] // ERROR "operand mismatch" + VMOV V8.D[0], V12.H[1] // ERROR "operand mismatch" + VMOV V8.D[0], V12.B[1] // ERROR "operand mismatch" + VMOV V8.S[0], V12.H[1] // ERROR "operand mismatch" + VMOV V8.S[0], V12.B[1] // ERROR "operand mismatch" + VMOV V8.H[0], V12.B[1] // ERROR "operand mismatch" + VMOV V8.B[16], R3 // ERROR "register element index out of range 0 to 15" + VMOV V8.H[9], R3 // ERROR "register element index out of range 0 to 7" + VMOV V8.S[4], R3 // ERROR "register element index out of range 0 to 3" + VMOV V8.D[2], R3 // ERROR "register element index out of range 0 to 1" + VDUP V8.B[16], V3.B16 // ERROR "register element index out of range 0 to 15" + VDUP V8.B[17], V3.B8 // ERROR "register element index out of range 0 to 15" + VDUP V8.H[9], V3.H4 // ERROR "register element index out of range 0 to 7" + VDUP V8.H[9], V3.H8 // ERROR "register element index out of range 0 to 7" + VDUP V8.S[4], V3.S2 // ERROR "register element index out of range 0 to 3" + VDUP V8.S[4], V3.S4 // ERROR "register element index out of range 0 to 3" + VDUP V8.D[2], V3.D2 // ERROR "register element index out of range 0 to 1" + VFMLA V1.D2, V12.D2, V3.S2 // ERROR "operand mismatch" + VFMLA V1.S2, V12.S2, V3.D2 // ERROR "operand mismatch" + VFMLA V1.S4, V12.S2, V3.D2 // ERROR "operand mismatch" + VFMLA V1.H4, V12.H4, V3.D2 // ERROR "operand mismatch" + VFMLS V1.S2, V12.S2, V3.S4 // ERROR "operand mismatch" + VFMLS V1.S2, V12.D2, V3.S4 // ERROR "operand mismatch" + VFMLS V1.S2, V12.S4, V3.D2 // ERROR "operand mismatch" + VFMLA V1.B8, V12.B8, V3.B8 // ERROR "invalid arrangement" + VFMLA V1.B16, V12.B16, V3.B16 // ERROR "invalid arrangement" + VFMLA V1.H4, V12.H4, V3.H4 // ERROR "invalid arrangement" + VFMLA V1.H8, V12.H8, V3.H8 // ERROR "invalid arrangement" + VFMLA V1.H4, V12.H4, V3.H4 // ERROR "invalid arrangement" + VFMLS V1.B8, V12.B8, V3.B8 // ERROR "invalid arrangement" + VFMLS V1.B16, V12.B16, V3.B16 // ERROR "invalid arrangement" + VFMLS V1.H4, V12.H4, V3.H4 // ERROR "invalid arrangement" + VFMLS V1.H8, V12.H8, V3.H8 // ERROR "invalid arrangement" + VFMLS V1.H4, V12.H4, V3.H4 // ERROR "invalid arrangement" + VST1.P [V4.S4,V5.S4], 48(R1) // ERROR "invalid post-increment offset" + VST1.P [V4.S4], 8(R1) // ERROR "invalid post-increment offset" + VLD1.P 32(R1), [V8.S4, V9.S4, V10.S4] // ERROR "invalid post-increment offset" + VLD1.P 48(R1), [V7.S4, V8.S4, V9.S4, V10.S4] // ERROR "invalid post-increment offset" + VPMULL V1.D1, V2.H4, V3.Q1 // ERROR "invalid arrangement" + VPMULL V1.H4, V2.H4, V3.Q1 // ERROR "operand mismatch" + VPMULL V1.D2, V2.D2, V3.Q1 // ERROR "operand mismatch" + VPMULL V1.B16, V2.B16, V3.H8 // ERROR "operand mismatch" + VPMULL2 V1.D2, V2.H4, V3.Q1 // ERROR "invalid arrangement" + VPMULL2 V1.H4, V2.H4, V3.Q1 // ERROR "operand mismatch" + VPMULL2 V1.D1, V2.D1, V3.Q1 // ERROR "operand mismatch" + VPMULL2 V1.B8, V2.B8, V3.H8 // ERROR "operand mismatch" + VEXT $8, V1.B16, V2.B8, V2.B16 // ERROR "invalid arrangement" + VEXT $8, V1.H8, V2.H8, V2.H8 // ERROR "invalid arrangement" + VRBIT V1.B16, V2.B8 // ERROR "invalid arrangement" + VRBIT V1.H4, V2.H4 // ERROR "invalid arrangement" + VUSHR $56, V1.D2, V2.H4 // ERROR "invalid arrangement" + VUSHR $127, V1.D2, V2.D2 // ERROR "shift out of range" + VLD1.P (R8)(R9.SXTX<<2), [V2.B16] // ERROR "invalid extended register" + VLD1.P (R8)(R9<<2), [V2.B16] // ERROR "invalid extended register" + VST1.P [V1.B16], (R8)(R9.UXTW) // ERROR "invalid extended register" + VST1.P [V1.B16], (R8)(R9<<1) // ERROR "invalid extended register" + VREV64 V1.H4, V2.H8 // ERROR "invalid arrangement" + VREV64 V1.D1, V2.D1 // ERROR "invalid arrangement" + VREV16 V1.D1, V2.D1 // ERROR "invalid arrangement" + VREV16 V1.B8, V2.B16 // ERROR "invalid arrangement" + VREV16 V1.H4, V2.H4 // ERROR "invalid arrangement" + FLDPQ (R0), (R1, R2) // ERROR "invalid register pair" + FLDPQ (R1), (F2, F2) // ERROR "constrained unpredictable behavior" + FSTPQ (R1, R2), (R0) // ERROR "invalid register pair" + FLDPD (R0), (R1, R2) // ERROR "invalid register pair" + FLDPD (R1), (F2, F2) // ERROR "constrained unpredictable behavior" + FLDPS (R2), (F3, F3) // ERROR "constrained unpredictable behavior" + FSTPD (R1, R2), (R0) // ERROR "invalid register pair" + FMOVS (F2), F0 // ERROR "illegal combination" + FMOVD F0, (F1) // ERROR "illegal combination" + LDADDAD R5, (R6), RSP // ERROR "illegal combination" + LDADDAW R5, (R6), RSP // ERROR "illegal combination" + LDADDAH R5, (R6), RSP // ERROR "illegal combination" + LDADDAB R5, (R6), RSP // ERROR "illegal combination" + LDADDALD R5, (R6), RSP // ERROR "illegal combination" + LDADDALW R5, (R6), RSP // ERROR "illegal combination" + LDADDALH R5, (R6), RSP // ERROR "illegal combination" + LDADDALB R5, (R6), RSP // ERROR "illegal combination" + LDADDD R5, (R6), RSP // ERROR "illegal combination" + LDADDW R5, (R6), RSP // ERROR "illegal combination" + LDADDH R5, (R6), RSP // ERROR "illegal combination" + LDADDB R5, (R6), RSP // ERROR "illegal combination" + LDADDLD R5, (R6), RSP // ERROR "illegal combination" + LDADDLW R5, (R6), RSP // ERROR "illegal combination" + LDADDLH R5, (R6), RSP // ERROR "illegal combination" + LDADDLB R5, (R6), RSP // ERROR "illegal combination" + LDCLRAD R5, (R6), RSP // ERROR "illegal combination" + LDCLRAW R5, (R6), RSP // ERROR "illegal combination" + LDCLRAH R5, (R6), RSP // ERROR "illegal combination" + LDCLRAB R5, (R6), RSP // ERROR "illegal combination" + LDCLRALD R5, (R6), RSP // ERROR "illegal combination" + LDCLRALW R5, (R6), RSP // ERROR "illegal combination" + LDCLRALH R5, (R6), RSP // ERROR "illegal combination" + LDCLRALB R5, (R6), RSP // ERROR "illegal combination" + LDCLRD R5, (R6), RSP // ERROR "illegal combination" + LDCLRW R5, (R6), RSP // ERROR "illegal combination" + LDCLRH R5, (R6), RSP // ERROR "illegal combination" + LDCLRB R5, (R6), RSP // ERROR "illegal combination" + LDCLRLD R5, (R6), RSP // ERROR "illegal combination" + LDCLRLW R5, (R6), RSP // ERROR "illegal combination" + LDCLRLH R5, (R6), RSP // ERROR "illegal combination" + LDCLRLB R5, (R6), RSP // ERROR "illegal combination" + LDEORAD R5, (R6), RSP // ERROR "illegal combination" + LDEORAW R5, (R6), RSP // ERROR "illegal combination" + LDEORAH R5, (R6), RSP // ERROR "illegal combination" + LDEORAB R5, (R6), RSP // ERROR "illegal combination" + LDEORALD R5, (R6), RSP // ERROR "illegal combination" + LDEORALW R5, (R6), RSP // ERROR "illegal combination" + LDEORALH R5, (R6), RSP // ERROR "illegal combination" + LDEORALB R5, (R6), RSP // ERROR "illegal combination" + LDEORD R5, (R6), RSP // ERROR "illegal combination" + LDEORW R5, (R6), RSP // ERROR "illegal combination" + LDEORH R5, (R6), RSP // ERROR "illegal combination" + LDEORB R5, (R6), RSP // ERROR "illegal combination" + LDEORLD R5, (R6), RSP // ERROR "illegal combination" + LDEORLW R5, (R6), RSP // ERROR "illegal combination" + LDEORLH R5, (R6), RSP // ERROR "illegal combination" + LDEORLB R5, (R6), RSP // ERROR "illegal combination" + LDORAD R5, (R6), RSP // ERROR "illegal combination" + LDORAW R5, (R6), RSP // ERROR "illegal combination" + LDORAH R5, (R6), RSP // ERROR "illegal combination" + LDORAB R5, (R6), RSP // ERROR "illegal combination" + LDORALD R5, (R6), RSP // ERROR "illegal combination" + LDORALW R5, (R6), RSP // ERROR "illegal combination" + LDORALH R5, (R6), RSP // ERROR "illegal combination" + LDORALB R5, (R6), RSP // ERROR "illegal combination" + LDORD R5, (R6), RSP // ERROR "illegal combination" + LDORW R5, (R6), RSP // ERROR "illegal combination" + LDORH R5, (R6), RSP // ERROR "illegal combination" + LDORB R5, (R6), RSP // ERROR "illegal combination" + LDORLD R5, (R6), RSP // ERROR "illegal combination" + LDORLW R5, (R6), RSP // ERROR "illegal combination" + LDORLH R5, (R6), RSP // ERROR "illegal combination" + LDORLB R5, (R6), RSP // ERROR "illegal combination" + SWPAD R5, (R6), RSP // ERROR "illegal combination" + SWPAW R5, (R6), RSP // ERROR "illegal combination" + SWPAH R5, (R6), RSP // ERROR "illegal combination" + SWPAB R5, (R6), RSP // ERROR "illegal combination" + SWPALD R5, (R6), RSP // ERROR "illegal combination" + SWPALW R5, (R6), RSP // ERROR "illegal combination" + SWPALH R5, (R6), RSP // ERROR "illegal combination" + SWPALB R5, (R6), RSP // ERROR "illegal combination" + SWPD R5, (R6), RSP // ERROR "illegal combination" + SWPW R5, (R6), RSP // ERROR "illegal combination" + SWPH R5, (R6), RSP // ERROR "illegal combination" + SWPB R5, (R6), RSP // ERROR "illegal combination" + SWPLD R5, (R6), RSP // ERROR "illegal combination" + SWPLW R5, (R6), RSP // ERROR "illegal combination" + SWPLH R5, (R6), RSP // ERROR "illegal combination" + SWPLB R5, (R6), RSP // ERROR "illegal combination" + STXR R5, (R6), RSP // ERROR "illegal combination" + STXRW R5, (R6), RSP // ERROR "illegal combination" + STLXR R5, (R6), RSP // ERROR "illegal combination" + STLXRW R5, (R6), RSP // ERROR "illegal combination" + STXP (R5, R7), (R6), RSP // ERROR "illegal combination" + STXPW (R5, R7), (R6), RSP // ERROR "illegal combination" + STLXP (R5, R7), (R6), RSP // ERROR "illegal combination" + STLXP (R5, R7), (R6), RSP // ERROR "illegal combination" + MSR OSLAR_EL1, R5 // ERROR "illegal combination" + MRS R11, AIDR_EL1 // ERROR "illegal combination" + MSR R6, AIDR_EL1 // ERROR "system register is not writable" + MSR R6, AMCFGR_EL0 // ERROR "system register is not writable" + MSR R6, AMCGCR_EL0 // ERROR "system register is not writable" + MSR R6, AMEVTYPER00_EL0 // ERROR "system register is not writable" + MSR R6, AMEVTYPER01_EL0 // ERROR "system register is not writable" + MSR R6, AMEVTYPER02_EL0 // ERROR "system register is not writable" + MSR R6, AMEVTYPER03_EL0 // ERROR "system register is not writable" + MSR R6, AMEVTYPER04_EL0 // ERROR "system register is not writable" + MSR R6, AMEVTYPER05_EL0 // ERROR "system register is not writable" + MSR R6, AMEVTYPER06_EL0 // ERROR "system register is not writable" + MSR R6, AMEVTYPER07_EL0 // ERROR "system register is not writable" + MSR R6, AMEVTYPER08_EL0 // ERROR "system register is not writable" + MSR R6, AMEVTYPER09_EL0 // ERROR "system register is not writable" + MSR R6, AMEVTYPER010_EL0 // ERROR "system register is not writable" + MSR R6, AMEVTYPER011_EL0 // ERROR "system register is not writable" + MSR R6, AMEVTYPER012_EL0 // ERROR "system register is not writable" + MSR R6, AMEVTYPER013_EL0 // ERROR "system register is not writable" + MSR R6, AMEVTYPER014_EL0 // ERROR "system register is not writable" + MSR R6, AMEVTYPER015_EL0 // ERROR "system register is not writable" + MSR R6, CCSIDR2_EL1 // ERROR "system register is not writable" + MSR R6, CCSIDR_EL1 // ERROR "system register is not writable" + MSR R6, CLIDR_EL1 // ERROR "system register is not writable" + MSR R6, CNTPCT_EL0 // ERROR "system register is not writable" + MSR R6, CNTVCT_EL0 // ERROR "system register is not writable" + MSR R6, CTR_EL0 // ERROR "system register is not writable" + MSR R6, CurrentEL // ERROR "system register is not writable" + MSR R6, DBGAUTHSTATUS_EL1 // ERROR "system register is not writable" + MSR R6, DBGDTRRX_EL0 // ERROR "system register is not writable" + MSR R6, DCZID_EL0 // ERROR "system register is not writable" + MSR R6, ERRIDR_EL1 // ERROR "system register is not writable" + MSR R6, ERXFR_EL1 // ERROR "system register is not writable" + MSR R6, ERXPFGF_EL1 // ERROR "system register is not writable" + MSR R6, GMID_EL1 // ERROR "system register is not writable" + MSR R6, ICC_HPPIR0_EL1 // ERROR "system register is not writable" + MSR R6, ICC_HPPIR1_EL1 // ERROR "system register is not writable" + MSR R6, ICC_IAR0_EL1 // ERROR "system register is not writable" + MSR R6, ICC_IAR1_EL1 // ERROR "system register is not writable" + MSR R6, ICC_RPR_EL1 // ERROR "system register is not writable" + MSR R6, ICV_HPPIR0_EL1 // ERROR "system register is not writable" + MSR R6, ICV_HPPIR1_EL1 // ERROR "system register is not writable" + MSR R6, ICV_IAR0_EL1 // ERROR "system register is not writable" + MSR R6, ICV_IAR1_EL1 // ERROR "system register is not writable" + MSR R6, ICV_RPR_EL1 // ERROR "system register is not writable" + MSR R6, ID_AA64AFR0_EL1 // ERROR "system register is not writable" + MSR R6, ID_AA64AFR1_EL1 // ERROR "system register is not writable" + MSR R6, ID_AA64DFR0_EL1 // ERROR "system register is not writable" + MSR R6, ID_AA64DFR1_EL1 // ERROR "system register is not writable" + MSR R6, ID_AA64ISAR0_EL1 // ERROR "system register is not writable" + MSR R6, ID_AA64ISAR1_EL1 // ERROR "system register is not writable" + MSR R6, ID_AA64MMFR0_EL1 // ERROR "system register is not writable" + MSR R6, ID_AA64MMFR1_EL1 // ERROR "system register is not writable" + MSR R6, ID_AA64MMFR2_EL1 // ERROR "system register is not writable" + MSR R6, ID_AA64PFR0_EL1 // ERROR "system register is not writable" + MSR R6, ID_AA64PFR1_EL1 // ERROR "system register is not writable" + MSR R6, ID_AA64ZFR0_EL1 // ERROR "system register is not writable" + MSR R6, ID_AFR0_EL1 // ERROR "system register is not writable" + MSR R6, ID_DFR0_EL1 // ERROR "system register is not writable" + MSR R6, ID_ISAR0_EL1 // ERROR "system register is not writable" + MSR R6, ID_ISAR1_EL1 // ERROR "system register is not writable" + MSR R6, ID_ISAR2_EL1 // ERROR "system register is not writable" + MSR R6, ID_ISAR3_EL1 // ERROR "system register is not writable" + MSR R6, ID_ISAR4_EL1 // ERROR "system register is not writable" + MSR R6, ID_ISAR5_EL1 // ERROR "system register is not writable" + MSR R6, ID_ISAR6_EL1 // ERROR "system register is not writable" + MSR R6, ID_MMFR0_EL1 // ERROR "system register is not writable" + MSR R6, ID_MMFR1_EL1 // ERROR "system register is not writable" + MSR R6, ID_MMFR2_EL1 // ERROR "system register is not writable" + MSR R6, ID_MMFR3_EL1 // ERROR "system register is not writable" + MSR R6, ID_MMFR4_EL1 // ERROR "system register is not writable" + MSR R6, ID_PFR0_EL1 // ERROR "system register is not writable" + MSR R6, ID_PFR1_EL1 // ERROR "system register is not writable" + MSR R6, ID_PFR2_EL1 // ERROR "system register is not writable" + MSR R6, ISR_EL1 // ERROR "system register is not writable" + MSR R6, LORID_EL1 // ERROR "system register is not writable" + MSR R6, MDCCSR_EL0 // ERROR "system register is not writable" + MSR R6, MDRAR_EL1 // ERROR "system register is not writable" + MSR R6, MIDR_EL1 // ERROR "system register is not writable" + MSR R6, MPAMIDR_EL1 // ERROR "system register is not writable" + MSR R6, MPIDR_EL1 // ERROR "system register is not writable" + MSR R6, MVFR0_EL1 // ERROR "system register is not writable" + MSR R6, MVFR1_EL1 // ERROR "system register is not writable" + MSR R6, MVFR2_EL1 // ERROR "system register is not writable" + MSR R6, OSLSR_EL1 // ERROR "system register is not writable" + MSR R6, PMBIDR_EL1 // ERROR "system register is not writable" + MSR R6, PMCEID0_EL0 // ERROR "system register is not writable" + MSR R6, PMCEID1_EL0 // ERROR "system register is not writable" + MSR R6, PMMIR_EL1 // ERROR "system register is not writable" + MSR R6, PMSIDR_EL1 // ERROR "system register is not writable" + MSR R6, REVIDR_EL1 // ERROR "system register is not writable" + MSR R6, RNDR // ERROR "system register is not writable" + MRS DBGDTRTX_EL0, R5 // ERROR "system register is not readable" + MRS ICV_DIR_EL1, R5 // ERROR "system register is not readable" + MRS ICC_SGI1R_EL1, R5 // ERROR "system register is not readable" + MRS ICC_SGI0R_EL1, R5 // ERROR "system register is not readable" + MRS ICC_EOIR1_EL1, R5 // ERROR "system register is not readable" + MRS ICC_EOIR0_EL1, R5 // ERROR "system register is not readable" + MRS ICC_DIR_EL1, R5 // ERROR "system register is not readable" + MRS ICC_ASGI1R_EL1, R5 // ERROR "system register is not readable" + MRS ICV_EOIR0_EL1, R3 // ERROR "system register is not readable" + MRS ICV_EOIR1_EL1, R3 // ERROR "system register is not readable" + MRS PMSWINC_EL0, R3 // ERROR "system register is not readable" + MRS OSLAR_EL1, R3 // ERROR "system register is not readable" + VLD3R.P 24(R15), [V15.H4,V16.H4,V17.H4] // ERROR "invalid post-increment offset" + VBIT V1.H4, V12.H4, V3.H4 // ERROR "invalid arrangement" + VBSL V1.D2, V12.D2, V3.D2 // ERROR "invalid arrangement" + VUXTL V30.D2, V30.H8 // ERROR "operand mismatch" + VUXTL2 V20.B8, V21.H8 // ERROR "operand mismatch" + VUXTL V3.D2, V4.B8 // ERROR "operand mismatch" + VUZP1 V0.B8, V30.B8, V1.B16 // ERROR "operand mismatch" + VUZP2 V0.Q1, V30.Q1, V1.Q1 // ERROR "invalid arrangement" + VUSHLL $0, V30.D2, V30.H8 // ERROR "operand mismatch" + VUSHLL2 $0, V20.B8, V21.H8 // ERROR "operand mismatch" + VUSHLL $8, V30.B8, V30.H8 // ERROR "shift amount out of range" + VUSHLL2 $32, V30.S4, V2.D2 // ERROR "shift amount out of range" + VBIF V0.B8, V1.B8, V2.B16 // ERROR "operand mismatch" + VBIF V0.D2, V1.D2, V2.D2 // ERROR "invalid arrangement" + VUADDW V9.B8, V12.H8, V14.B8 // ERROR "invalid arrangement" + VUADDW2 V9.B8, V12.S4, V14.S4 // ERROR "operand mismatch" + VUMAX V1.D2, V2.D2, V3.D2 // ERROR "invalid arrangement" + VUMIN V1.D2, V2.D2, V3.D2 // ERROR "invalid arrangement" + VUMAX V1.B8, V2.B8, V3.B16 // ERROR "operand mismatch" + VUMIN V1.H4, V2.S4, V3.H4 // ERROR "operand mismatch" + VSLI $64, V7.D2, V8.D2 // ERROR "shift out of range" + VUSRA $0, V7.D2, V8.D2 // ERROR "shift out of range" + CASPD (R3, R4), (R2), (R8, R9) // ERROR "source register pair must start from even register" + CASPD (R2, R3), (R2), (R9, R10) // ERROR "destination register pair must start from even register" + CASPD (R2, R4), (R2), (R8, R9) // ERROR "source register pair must be contiguous" + CASPD (R2, R3), (R2), (R8, R10) // ERROR "destination register pair must be contiguous" + ADD R1>>2, RSP, R3 // ERROR "illegal combination" + ADDS R2<<3, R3, RSP // ERROR "illegal destination register" + CMP R1<<5, RSP // ERROR "shift amount out of range 0 to 4" + MOVD.P y+8(FP), R1 // ERROR "illegal combination" + MOVD.W x-8(SP), R1 // ERROR "illegal combination" + LDP.P x+8(FP), (R0, R1) // ERROR "illegal combination" + LDP.W x+8(SP), (R0, R1) // ERROR "illegal combination" + ADD $0x1234567, R27, R3 // ERROR "cannot use REGTMP as source" + ADD $0x3fffffffc000, R27, R5 // ERROR "cannot use REGTMP as source" + AND $0x22220000, R27, R4 // ERROR "cannot use REGTMP as source" + ANDW $0x6006000060060, R27, R5 // ERROR "cannot use REGTMP as source" + STP (R3, R4), 0x1234567(R27) // ERROR "REGTMP used in large offset store" + LDP 0x1234567(R27), (R3, R4) // ERROR "REGTMP used in large offset load" + STP (R26, R27), 700(R2) // ERROR "cannot use REGTMP as source" + MOVK $0, R10 // ERROR "zero shifts cannot be handled correctly" + MOVK $(0<<32), R10 // ERROR "zero shifts cannot be handled correctly" + TLBI PLDL1KEEP // ERROR "illegal argument" + TLBI VMALLE1IS, R0 // ERROR "extraneous register at operand 2" + TLBI ALLE3OS, ZR // ERROR "extraneous register at operand 2" + TLBI VAE1IS // ERROR "missing register at operand 2" + TLBI RVALE3 // ERROR "missing register at operand 2" + DC PLDL1KEEP // ERROR "illegal argument" + DC VMALLE1IS // ERROR "illegal argument" + DC VAE1IS // ERROR "illegal argument" + DC VAE1IS, R0 // ERROR "illegal argument" + DC IVAC // ERROR "missing register at operand 2" + AESD V1.B8, V2.B8 // ERROR "invalid arrangement" + AESE V1.D2, V2.D2 // ERROR "invalid arrangement" + AESIMC V1.S4, V2.S4 // ERROR "invalid arrangement" + SHA1SU1 V1.B16, V2.B16 // ERROR "invalid arrangement" + SHA256SU1 V1.B16, V2.B16, V3.B16 // ERROR "invalid arrangement" + SHA512SU1 V1.S4, V2.S4, V3.S4 // ERROR "invalid arrangement" + SHA256H V1.D2, V2, V3 // ERROR "invalid arrangement" + SHA512H V1.S4, V2, V3 // ERROR "invalid arrangement" + AESE V1.B16, V2.B8 // ERROR "invalid arrangement" + SHA256SU1 V1.S4, V2.B16, V3.S4 // ERROR "invalid arrangement" + SHA1H V1.B16, V2.B16 // ERROR "invalid operands" + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/armerror.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/armerror.s new file mode 100644 index 0000000000000000000000000000000000000000..f2bed8d1c37fab9dc33d396871cfb80cec7530e4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/armerror.s @@ -0,0 +1,264 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +TEXT errors(SB),$0 + MOVW (F0), R1 // ERROR "illegal base register" + MOVB (F0), R1 // ERROR "illegal base register" + MOVH (F0), R1 // ERROR "illegal base register" + MOVF (F0), F1 // ERROR "illegal base register" + MOVD (F0), F1 // ERROR "illegal base register" + MOVW R1, (F0) // ERROR "illegal base register" + MOVB R2, (F0) // ERROR "illegal base register" + MOVH R3, (F0) // ERROR "illegal base register" + MOVF F4, (F0) // ERROR "illegal base register" + MOVD F5, (F0) // ERROR "illegal base register" + MOVM.IA (F1), [R0-R4] // ERROR "illegal base register" + MOVM.DA (F1), [R0-R4] // ERROR "illegal base register" + MOVM.IB (F1), [R0-R4] // ERROR "illegal base register" + MOVM.DB (F1), [R0-R4] // ERROR "illegal base register" + MOVM.IA [R0-R4], (F1) // ERROR "illegal base register" + MOVM.DA [R0-R4], (F1) // ERROR "illegal base register" + MOVM.IB [R0-R4], (F1) // ERROR "illegal base register" + MOVM.DB [R0-R4], (F1) // ERROR "illegal base register" + MOVW R0<<0(F1), R1 // ERROR "illegal base register" + MOVB R0<<0(F1), R1 // ERROR "illegal base register" + MOVW R1, R0<<0(F1) // ERROR "illegal base register" + MOVB R2, R0<<0(F1) // ERROR "illegal base register" + MOVF 0x00ffffff(F2), F1 // ERROR "illegal base register" + MOVD 0x00ffffff(F2), F1 // ERROR "illegal base register" + MOVF F2, 0x00ffffff(F2) // ERROR "illegal base register" + MOVD F2, 0x00ffffff(F2) // ERROR "illegal base register" + MULS.S R1, R2, R3, R4 // ERROR "invalid .S suffix" + ADD.P R1, R2, R3 // ERROR "invalid .P suffix" + SUB.W R2, R3 // ERROR "invalid .W suffix" + BL 4(R4) // ERROR "non-zero offset" + ADDF F0, R1, F2 // ERROR "illegal combination" + SWI (R0) // ERROR "illegal combination" + MULAD F0, F1 // ERROR "illegal combination" + MULAF F0, F1 // ERROR "illegal combination" + MULSD F0, F1 // ERROR "illegal combination" + MULSF F0, F1 // ERROR "illegal combination" + NMULAD F0, F1 // ERROR "illegal combination" + NMULAF F0, F1 // ERROR "illegal combination" + NMULSD F0, F1 // ERROR "illegal combination" + NMULSF F0, F1 // ERROR "illegal combination" + FMULAD F0, F1 // ERROR "illegal combination" + FMULAF F0, F1 // ERROR "illegal combination" + FMULSD F0, F1 // ERROR "illegal combination" + FMULSF F0, F1 // ERROR "illegal combination" + FNMULAD F0, F1 // ERROR "illegal combination" + FNMULAF F0, F1 // ERROR "illegal combination" + FNMULSD F0, F1 // ERROR "illegal combination" + FNMULSF F0, F1 // ERROR "illegal combination" + NEGF F0, F1, F2 // ERROR "illegal combination" + NEGD F0, F1, F2 // ERROR "illegal combination" + ABSF F0, F1, F2 // ERROR "illegal combination" + ABSD F0, F1, F2 // ERROR "illegal combination" + SQRTF F0, F1, F2 // ERROR "illegal combination" + SQRTD F0, F1, F2 // ERROR "illegal combination" + MOVF F0, F1, F2 // ERROR "illegal combination" + MOVD F0, F1, F2 // ERROR "illegal combination" + MOVDF F0, F1, F2 // ERROR "illegal combination" + MOVFD F0, F1, F2 // ERROR "illegal combination" + MOVM.IA 4(R1), [R0-R4] // ERROR "offset must be zero" + MOVM.DA 4(R1), [R0-R4] // ERROR "offset must be zero" + MOVM.IB 4(R1), [R0-R4] // ERROR "offset must be zero" + MOVM.DB 4(R1), [R0-R4] // ERROR "offset must be zero" + MOVM.IA [R0-R4], 4(R1) // ERROR "offset must be zero" + MOVM.DA [R0-R4], 4(R1) // ERROR "offset must be zero" + MOVM.IB [R0-R4], 4(R1) // ERROR "offset must be zero" + MOVM.DB [R0-R4], 4(R1) // ERROR "offset must be zero" + MOVW CPSR, FPSR // ERROR "illegal combination" + MOVW FPSR, CPSR // ERROR "illegal combination" + MOVW CPSR, errors(SB) // ERROR "illegal combination" + MOVW errors(SB), CPSR // ERROR "illegal combination" + MOVW FPSR, errors(SB) // ERROR "illegal combination" + MOVW errors(SB), FPSR // ERROR "illegal combination" + MOVW F0, errors(SB) // ERROR "illegal combination" + MOVW errors(SB), F0 // ERROR "illegal combination" + MOVW $20, errors(SB) // ERROR "illegal combination" + MOVW errors(SB), $20 // ERROR "illegal combination" + MOVW (R1), [R0-R4] // ERROR "illegal combination" + MOVW [R0-R4], (R1) // ERROR "illegal combination" + MOVB $245, R1 // ERROR "illegal combination" + MOVH $245, R1 // ERROR "illegal combination" + MOVB $0xff000000, R1 // ERROR "illegal combination" + MOVH $0xff000000, R1 // ERROR "illegal combination" + MOVB $0x00ffffff, R1 // ERROR "illegal combination" + MOVH $0x00ffffff, R1 // ERROR "illegal combination" + MOVB FPSR, g // ERROR "illegal combination" + MOVH FPSR, g // ERROR "illegal combination" + MOVB g, FPSR // ERROR "illegal combination" + MOVH g, FPSR // ERROR "illegal combination" + MOVB CPSR, g // ERROR "illegal combination" + MOVH CPSR, g // ERROR "illegal combination" + MOVB g, CPSR // ERROR "illegal combination" + MOVH g, CPSR // ERROR "illegal combination" + MOVB $0xff000000, CPSR // ERROR "illegal combination" + MOVH $0xff000000, CPSR // ERROR "illegal combination" + MOVB $0xff000000, FPSR // ERROR "illegal combination" + MOVH $0xff000000, FPSR // ERROR "illegal combination" + MOVB $0xffffff00, CPSR // ERROR "illegal combination" + MOVH $0xffffff00, CPSR // ERROR "illegal combination" + MOVB $0xfffffff0, FPSR // ERROR "illegal combination" + MOVH $0xfffffff0, FPSR // ERROR "illegal combination" + MOVB (R1), [R0-R4] // ERROR "illegal combination" + MOVB [R0-R4], (R1) // ERROR "illegal combination" + MOVH (R1), [R0-R4] // ERROR "illegal combination" + MOVH [R0-R4], (R1) // ERROR "illegal combination" + MOVB $0xff(R0), R1 // ERROR "illegal combination" + MOVH $0xff(R0), R1 // ERROR "illegal combination" + MOVB $errors(SB), R2 // ERROR "illegal combination" + MOVH $errors(SB), R2 // ERROR "illegal combination" + MOVB F0, R0 // ERROR "illegal combination" + MOVH F0, R0 // ERROR "illegal combination" + MOVB R0, F0 // ERROR "illegal combination" + MOVH R0, F0 // ERROR "illegal combination" + MOVB R0>>0(R1), R2 // ERROR "bad shift" + MOVB R0->0(R1), R2 // ERROR "bad shift" + MOVB R0@>0(R1), R2 // ERROR "bad shift" + MOVBS R0>>0(R1), R2 // ERROR "bad shift" + MOVBS R0->0(R1), R2 // ERROR "bad shift" + MOVBS R0@>0(R1), R2 // ERROR "bad shift" + MOVF CPSR, F1 // ERROR "illegal combination" + MOVD R1, CPSR // ERROR "illegal combination" + MOVW F1, F2 // ERROR "illegal combination" + MOVB F1, F2 // ERROR "illegal combination" + MOVH F1, F2 // ERROR "illegal combination" + MOVF R1, F2 // ERROR "illegal combination" + MOVD R1, F2 // ERROR "illegal combination" + MOVF R1, R1 // ERROR "illegal combination" + MOVD R1, R2 // ERROR "illegal combination" + MOVFW R1, R2 // ERROR "illegal combination" + MOVDW R1, R2 // ERROR "illegal combination" + MOVWF R1, R2 // ERROR "illegal combination" + MOVWD R1, R2 // ERROR "illegal combination" + MOVWD CPSR, R2 // ERROR "illegal combination" + MOVWF CPSR, R2 // ERROR "illegal combination" + MOVWD R1, CPSR // ERROR "illegal combination" + MOVWF R1, CPSR // ERROR "illegal combination" + MOVDW CPSR, R2 // ERROR "illegal combination" + MOVFW CPSR, R2 // ERROR "illegal combination" + MOVDW R1, CPSR // ERROR "illegal combination" + MOVFW R1, CPSR // ERROR "illegal combination" + BFX $12, $41, R2, R3 // ERROR "wrong width or LSB" + BFX $12, $-2, R2 // ERROR "wrong width or LSB" + BFXU $40, $4, R2, R3 // ERROR "wrong width or LSB" + BFXU $-40, $4, R2 // ERROR "wrong width or LSB" + BFX $-2, $4, R2, R3 // ERROR "wrong width or LSB" + BFXU $4, R2, R5, R2 // ERROR "missing or wrong LSB" + BFXU $4, R2, R5 // ERROR "missing or wrong LSB" + BFC $12, $8, R2, R3 // ERROR "illegal combination" + MOVB R0>>8, R2 // ERROR "illegal shift" + MOVH R0<<16, R2 // ERROR "illegal shift" + MOVBS R0->8, R2 // ERROR "illegal shift" + MOVHS R0<<24, R2 // ERROR "illegal shift" + MOVBU R0->24, R2 // ERROR "illegal shift" + MOVHU R0@>1, R2 // ERROR "illegal shift" + XTAB R0>>8, R2 // ERROR "illegal shift" + XTAH R0<<16, R2 // ERROR "illegal shift" + XTABU R0->24, R2 // ERROR "illegal shift" + XTAHU R0@>1, R2 // ERROR "illegal shift" + XTAB R0>>8, R5, R2 // ERROR "illegal shift" + XTAH R0<<16, R5, R2 // ERROR "illegal shift" + XTABU R0->24, R5, R2 // ERROR "illegal shift" + XTAHU R0@>1, R5, R2 // ERROR "illegal shift" + AND.W R0, R1 // ERROR "invalid .W suffix" + ORR.P R2, R3, R4 // ERROR "invalid .P suffix" + CMP.S R1, R2 // ERROR "invalid .S suffix" + BIC.P $124, R1, R2 // ERROR "invalid .P suffix" + MOVW.S $124, R1 // ERROR "invalid .S suffix" + MVN.S $123, g // ERROR "invalid .S suffix" + RSB.U $0, R9 // ERROR "invalid .U suffix" + CMP.S $29, g // ERROR "invalid .S suffix" + ADD.W R1<R2, R1 // ERROR "invalid .S suffix" + SLL.P R1, R2, R3 // ERROR "invalid .P suffix" + SRA.U R2, R8 // ERROR "invalid .U suffix" + SWI.S // ERROR "invalid .S suffix" + SWI.P $0 // ERROR "invalid .P suffix" + MOVW.S $0xaaaaaaaa, R7 // ERROR "invalid .S suffix" + MOVW.P $0xffffff44, R1 // ERROR "invalid .P suffix" + MOVW.S $0xffffff77, R1 // ERROR "invalid .S suffix" + MVN.S $0xffffffaa, R8 // ERROR "invalid .S suffix" + MVN.S $0xaaaaaaaa, R8 // ERROR "invalid .S suffix" + ADD.U $0xaaaaaaaa, R4 // ERROR "invalid .U suffix" + ORR.P $0x555555, R7, R3 // ERROR "invalid .P suffix" + TST.S $0xabcd1234, R2 // ERROR "invalid .S suffix" + MOVB.S R1, R2 // ERROR "invalid .S suffix" + MOVBU.P R1, R2 // ERROR "invalid .P suffix" + MOVBS.U R1, R2 // ERROR "invalid .U suffix" + MOVH.S R1, R2 // ERROR "invalid .S suffix" + MOVHU.P R1, R2 // ERROR "invalid .P suffix" + MOVHS.U R1, R2 // ERROR "invalid .U suffix" + MUL.P R0, R1, R2 // ERROR "invalid .P suffix" + MULU.W R1, R2 // ERROR "invalid .W suffix" + DIVHW.S R0, R1, R2 // ERROR "invalid .S suffix" + DIVHW.W R1, R2 // ERROR "invalid .W suffix" + MULL.W R2, R0, (R5, R8) // ERROR "invalid .W suffix" + MULLU.U R2, R0, (R5, R8) // ERROR "invalid .U suffix" + BFX.S $2, $4, R3 // ERROR "invalid .S suffix" + BFXU.W $2, $4, R3, R0 // ERROR "invalid .W suffix" + MOVB.S R1, 4(R2) // ERROR "invalid .S suffix" + MOVHU.S R1, 4(R2) // ERROR "invalid .S suffix" + MOVW.S R1, 4(R2) // ERROR "invalid .S suffix" + MOVBU.S 4(R2), R3 // ERROR "invalid .S suffix" + MOVH.S 4(R2), R3 // ERROR "invalid .S suffix" + MOVW.S 4(R2), R3 // ERROR "invalid .S suffix" + XTAB.S R0@>0, R2 // ERROR "invalid .S suffix" + XTAB.W R0@>8, R2, R9 // ERROR "invalid .W suffix" + MOVBU.S R0@>24, R1 // ERROR "invalid .S suffix" + MOVHS.S R0@>16, R1 // ERROR "invalid .S suffix" + MOVB.S R1, 0xaaaa(R2) // ERROR "invalid .S suffix" + MOVHU.S R1, 0xaaaa(R2) // ERROR "invalid .S suffix" + MOVW.S R1, 0xaaaa(R2) // ERROR "invalid .S suffix" + MOVBU.S 0xaaaa(R2), R3 // ERROR "invalid .S suffix" + MOVH.S 0xaaaa(R2), R3 // ERROR "invalid .S suffix" + MOVW.S 0xaaaa(R2), R3 // ERROR "invalid .S suffix" + MOVW.S CPSR, R1 // ERROR "invalid .S suffix" + MOVW.S R3, CPSR // ERROR "invalid .S suffix" + MOVW.S $0, CPSR // ERROR "invalid .S suffix" + MOVM.S (R0), [R2-R4] // ERROR "invalid .S suffix" + MOVM.S [R1-R6], (R9) // ERROR "invalid .S suffix" + SWPW.S R1, (R2), R3 // ERROR "invalid .S suffix" + MOVF.S (R0), F1 // ERROR "invalid .S suffix" + MOVF.S F9, (R4) // ERROR "invalid .S suffix" + MOVF.S 0xfff0(R0), F1 // ERROR "invalid .S suffix" + MOVF.S F9, 0xfff0(R4) // ERROR "invalid .S suffix" + ADDF.S F1, F2, F3 // ERROR "invalid .S suffix" + SUBD.U F1, F2 // ERROR "invalid .U suffix" + NEGF.W F9, F10 // ERROR "invalid .W suffix" + ABSD.P F9, F10 // ERROR "invalid .P suffix" + MOVW.S FPSR, R0 // ERROR "invalid .S suffix" + MOVW.P g, FPSR // ERROR "invalid .P suffix" + MOVW.S R1->4(R6), R2 // ERROR "invalid .S suffix" + MOVB.S R9, R2<<8(R4) // ERROR "invalid .S suffix" + MOVHU.S R9, R2<<0(R4) // ERROR "invalid .S suffix" + STREX.S R0, (R1), R2 // ERROR "invalid .S suffix" + LDREX.S (R2), R8 // ERROR "invalid .S suffix" + MOVF.S $0.0, F3 // ERROR "invalid .S suffix" + CMPF.S F1, F2 // ERROR "invalid .S suffix" + MOVFW.S F0, F9 // ERROR "invalid .S suffix" + MOVWF.W F3, F1 // ERROR "invalid .W suffix" + MOVFW.P F0, R9 // ERROR "invalid .P suffix" + MOVWF.W R3, F1 // ERROR "invalid .W suffix" + MOVW.S F0, R9 // ERROR "invalid .S suffix" + MOVW.U R3, F1 // ERROR "invalid .U suffix" + PLD.S 4(R1) // ERROR "invalid .S suffix" + CLZ.S R1, R2 // ERROR "invalid .S suffix" + MULBB.S R0, R1, R2 // ERROR "invalid .S suffix" + MULA.W R9, R6, R1, g // ERROR "invalid .W suffix" + MULS.S R2, R3, R4, g // ERROR "invalid .S suffix" + + STREX R1, (R0) // ERROR "illegal combination" + STREX (R1), R0 // ERROR "illegal combination" + STREX R1, (R0), R1 // ERROR "cannot use same register as both source and destination" + STREX R1, (R0), R0 // ERROR "cannot use same register as both source and destination" + STREXD R0, (R2), R0 // ERROR "cannot use same register as both source and destination" + STREXD R0, (R2), R1 // ERROR "cannot use same register as both source and destination" + STREXD R0, (R2), R2 // ERROR "cannot use same register as both source and destination" + STREXD R1, (R4), R7 // ERROR "must be even" + + END diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/armv6.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/armv6.s new file mode 100644 index 0000000000000000000000000000000000000000..361867fdc2fcfbb4a29eb323974ae1b82a612a6a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/armv6.s @@ -0,0 +1,110 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "../../../../../runtime/textflag.h" + +TEXT foo(SB), DUPOK|NOSPLIT, $0 + + ADDF F0, F1, F2 // 002a31ee + ADDD.EQ F3, F4, F5 // 035b340e + ADDF.NE F0, F2 // 002a321e + ADDD F3, F5 // 035b35ee + SUBF F0, F1, F2 // 402a31ee + SUBD.EQ F3, F4, F5 // 435b340e + SUBF.NE F0, F2 // 402a321e + SUBD F3, F5 // 435b35ee + MULF F0, F1, F2 // 002a21ee + MULD.EQ F3, F4, F5 // 035b240e + MULF.NE F0, F2 // 002a221e + MULD F3, F5 // 035b25ee + NMULF F0, F1, F2 // 402a21ee + NMULF F3, F7 // 437a27ee + NMULD F0, F1, F2 // 402b21ee + NMULD F3, F7 // 437b27ee + MULAF F5, F6, F7 // 057a06ee + MULAD F5, F6, F7 // 057b06ee + MULSF F5, F6, F7 // 457a06ee + MULSD F5, F6, F7 // 457b06ee + NMULAF F5, F6, F7 // 457a16ee + NMULAD F5, F6, F7 // 457b16ee + NMULSF F5, F6, F7 // 057a16ee + NMULSD F5, F6, F7 // 057b16ee + FMULAF F5, F6, F7 // 057aa6ee + FMULAD F5, F6, F7 // 057ba6ee + FMULSF F5, F6, F7 // 457aa6ee + FMULSD F5, F6, F7 // 457ba6ee + FNMULAF F5, F6, F7 // 457a96ee + FNMULAD F5, F6, F7 // 457b96ee + FNMULSF F5, F6, F7 // 057a96ee + FNMULSD F5, F6, F7 // 057b96ee + DIVF F0, F1, F2 // 002a81ee + DIVD.EQ F3, F4, F5 // 035b840e + DIVF.NE F0, F2 // 002a821e + DIVD F3, F5 // 035b85ee + NEGF F0, F1 // 401ab1ee + NEGD F4, F5 // 445bb1ee + ABSF F0, F1 // c01ab0ee + ABSD F4, F5 // c45bb0ee + SQRTF F0, F1 // c01ab1ee + SQRTD F4, F5 // c45bb1ee + MOVFD F0, F1 // c01ab7ee + MOVDF F4, F5 // c45bb7ee + + LDREX (R8), R9 // 9f9f98e1 + LDREXD (R11), R12 // 9fcfbbe1 + STREX R3, (R4), R5 // STREX (R4), R3, R5 // 935f84e1 + STREXD R8, (R9), g // STREXD (R9), R8, g // 98afa9e1 + + CMPF F8, F9 // c89ab4ee10faf1ee + CMPD.CS F4, F5 // c45bb42e10faf12e + CMPF.VS F7 // c07ab56e10faf16e + CMPD F6 // c06bb5ee10faf1ee + + MOVW R4, F8 // 104b08ee + MOVW F4, R8 // 108b14ee + + MOVF (R4), F9 // 009a94ed + MOVD.EQ (R4), F9 // 009b940d + MOVF.NE (g), F3 // 003a9a1d + MOVD (g), F3 // 003b9aed + MOVF 0x20(R3), F9 // MOVF 32(R3), F9 // 089a93ed + MOVD.EQ 0x20(R4), F9 // MOVD.EQ 32(R4), F9 // 089b940d + MOVF.NE -0x20(g), F3 // MOVF.NE -32(g), F3 // 083a1a1d + MOVD -0x20(g), F3 // MOVD -32(g), F3 // 083b1aed + MOVF F9, (R4) // 009a84ed + MOVD.EQ F9, (R4) // 009b840d + MOVF.NE F3, (g) // 003a8a1d + MOVD F3, (g) // 003b8aed + MOVF F9, 0x20(R3) // MOVF F9, 32(R3) // 089a83ed + MOVD.EQ F9, 0x20(R4) // MOVD.EQ F9, 32(R4) // 089b840d + MOVF.NE F3, -0x20(g) // MOVF.NE F3, -32(g) // 083a0a1d + MOVD F3, -0x20(g) // MOVD F3, -32(g) // 083b0aed + MOVF 0x00ffffff(R2), F1 // MOVF 16777215(R2), F1 + MOVD 0x00ffffff(R2), F1 // MOVD 16777215(R2), F1 + MOVF F2, 0x00ffffff(R2) // MOVF F2, 16777215(R2) + MOVD F2, 0x00ffffff(R2) // MOVD F2, 16777215(R2) + MOVF F0, math·Exp(SB) // MOVF F0, math.Exp(SB) + MOVF math·Exp(SB), F0 // MOVF math.Exp(SB), F0 + MOVD F0, math·Exp(SB) // MOVD F0, math.Exp(SB) + MOVD math·Exp(SB), F0 // MOVD math.Exp(SB), F0 + MOVF F4, F5 // 445ab0ee + MOVD F6, F7 // 467bb0ee + MOVFW F6, F8 // c68abdee + MOVFW F6, R8 // c6fabdee108b1fee + MOVFW.U F6, F8 // c68abcee + MOVFW.U F6, R8 // c6fabcee108b1fee + MOVDW F6, F8 // c68bbdee + MOVDW F6, R8 // c6fbbdee108b1fee + MOVDW.U F6, F8 // c68bbcee + MOVDW.U F6, R8 // c6fbbcee108b1fee + MOVWF F6, F8 // c68ab8ee + MOVWF R6, F8 // 106b0feecf8ab8ee + MOVWF.U F6, F8 // 468ab8ee + MOVWF.U R6, F8 // 106b0fee4f8ab8ee + MOVWD F6, F8 // c68bb8ee + MOVWD R6, F8 // 106b0feecf8bb8ee + MOVWD.U F6, F8 // 468bb8ee + MOVWD.U R6, F8 // 106b0fee4f8bb8ee + + END diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/aes_avx512f.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/aes_avx512f.s new file mode 100644 index 0000000000000000000000000000000000000000..87fa5f718a49fe9de5ddc60c79a6b2eaf2014cd1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/aes_avx512f.s @@ -0,0 +1,336 @@ +// Code generated by avx512test. DO NOT EDIT. + +#include "../../../../../../runtime/textflag.h" + +TEXT asmtest_aes_avx512f(SB), NOSPLIT, $0 + VAESDEC X24, X7, X11 // 62124508ded8 or 6212c508ded8 + VAESDEC X20, X7, X11 // 62324508dedc or 6232c508dedc + VAESDEC X24, X0, X11 // 62127d08ded8 or 6212fd08ded8 + VAESDEC X20, X0, X11 // 62327d08dedc or 6232fd08dedc + VAESDEC X24, X7, X31 // 62024508def8 or 6202c508def8 + VAESDEC X20, X7, X31 // 62224508defc or 6222c508defc + VAESDEC X7, X7, X31 // 62624508deff or 6262c508deff + VAESDEC -7(DI)(R8*1), X7, X31 // 62224508debc07f9ffffff or 6222c508debc07f9ffffff + VAESDEC (SP), X7, X31 // 62624508de3c24 or 6262c508de3c24 + VAESDEC X24, X0, X31 // 62027d08def8 or 6202fd08def8 + VAESDEC X20, X0, X31 // 62227d08defc or 6222fd08defc + VAESDEC X7, X0, X31 // 62627d08deff or 6262fd08deff + VAESDEC -7(DI)(R8*1), X0, X31 // 62227d08debc07f9ffffff or 6222fd08debc07f9ffffff + VAESDEC (SP), X0, X31 // 62627d08de3c24 or 6262fd08de3c24 + VAESDEC X24, X7, X3 // 62924508ded8 or 6292c508ded8 + VAESDEC X20, X7, X3 // 62b24508dedc or 62b2c508dedc + VAESDEC X24, X0, X3 // 62927d08ded8 or 6292fd08ded8 + VAESDEC X20, X0, X3 // 62b27d08dedc or 62b2fd08dedc + VAESDEC Y5, Y31, Y22 // 62e20520def5 or 62e28520def5 + VAESDEC Y19, Y31, Y22 // 62a20520def3 or 62a28520def3 + VAESDEC Y31, Y31, Y22 // 62820520def7 or 62828520def7 + VAESDEC 99(R15)(R15*1), Y31, Y22 // 62820520deb43f63000000 or 62828520deb43f63000000 + VAESDEC (DX), Y31, Y22 // 62e20520de32 or 62e28520de32 + VAESDEC Y5, Y5, Y22 // 62e25528def5 or 62e2d528def5 + VAESDEC Y19, Y5, Y22 // 62a25528def3 or 62a2d528def3 + VAESDEC Y31, Y5, Y22 // 62825528def7 or 6282d528def7 + VAESDEC 99(R15)(R15*1), Y5, Y22 // 62825528deb43f63000000 or 6282d528deb43f63000000 + VAESDEC (DX), Y5, Y22 // 62e25528de32 or 62e2d528de32 + VAESDEC Y5, Y0, Y22 // 62e27d28def5 or 62e2fd28def5 + VAESDEC Y19, Y0, Y22 // 62a27d28def3 or 62a2fd28def3 + VAESDEC Y31, Y0, Y22 // 62827d28def7 or 6282fd28def7 + VAESDEC 99(R15)(R15*1), Y0, Y22 // 62827d28deb43f63000000 or 6282fd28deb43f63000000 + VAESDEC (DX), Y0, Y22 // 62e27d28de32 or 62e2fd28de32 + VAESDEC Y5, Y31, Y9 // 62720520decd or 62728520decd + VAESDEC Y19, Y31, Y9 // 62320520decb or 62328520decb + VAESDEC Y31, Y31, Y9 // 62120520decf or 62128520decf + VAESDEC 99(R15)(R15*1), Y31, Y9 // 62120520de8c3f63000000 or 62128520de8c3f63000000 + VAESDEC (DX), Y31, Y9 // 62720520de0a or 62728520de0a + VAESDEC Y19, Y5, Y9 // 62325528decb or 6232d528decb + VAESDEC Y31, Y5, Y9 // 62125528decf or 6212d528decf + VAESDEC Y19, Y0, Y9 // 62327d28decb or 6232fd28decb + VAESDEC Y31, Y0, Y9 // 62127d28decf or 6212fd28decf + VAESDEC Y5, Y31, Y23 // 62e20520defd or 62e28520defd + VAESDEC Y19, Y31, Y23 // 62a20520defb or 62a28520defb + VAESDEC Y31, Y31, Y23 // 62820520deff or 62828520deff + VAESDEC 99(R15)(R15*1), Y31, Y23 // 62820520debc3f63000000 or 62828520debc3f63000000 + VAESDEC (DX), Y31, Y23 // 62e20520de3a or 62e28520de3a + VAESDEC Y5, Y5, Y23 // 62e25528defd or 62e2d528defd + VAESDEC Y19, Y5, Y23 // 62a25528defb or 62a2d528defb + VAESDEC Y31, Y5, Y23 // 62825528deff or 6282d528deff + VAESDEC 99(R15)(R15*1), Y5, Y23 // 62825528debc3f63000000 or 6282d528debc3f63000000 + VAESDEC (DX), Y5, Y23 // 62e25528de3a or 62e2d528de3a + VAESDEC Y5, Y0, Y23 // 62e27d28defd or 62e2fd28defd + VAESDEC Y19, Y0, Y23 // 62a27d28defb or 62a2fd28defb + VAESDEC Y31, Y0, Y23 // 62827d28deff or 6282fd28deff + VAESDEC 99(R15)(R15*1), Y0, Y23 // 62827d28debc3f63000000 or 6282fd28debc3f63000000 + VAESDEC (DX), Y0, Y23 // 62e27d28de3a or 62e2fd28de3a + VAESDEC Z27, Z3, Z11 // 62126548dedb or 6212e548dedb + VAESDEC Z15, Z3, Z11 // 62526548dedf or 6252e548dedf + VAESDEC 99(R15)(R15*1), Z3, Z11 // 62126548de9c3f63000000 or 6212e548de9c3f63000000 + VAESDEC (DX), Z3, Z11 // 62726548de1a or 6272e548de1a + VAESDEC Z27, Z12, Z11 // 62121d48dedb or 62129d48dedb + VAESDEC Z15, Z12, Z11 // 62521d48dedf or 62529d48dedf + VAESDEC 99(R15)(R15*1), Z12, Z11 // 62121d48de9c3f63000000 or 62129d48de9c3f63000000 + VAESDEC (DX), Z12, Z11 // 62721d48de1a or 62729d48de1a + VAESDEC Z27, Z3, Z25 // 62026548decb or 6202e548decb + VAESDEC Z15, Z3, Z25 // 62426548decf or 6242e548decf + VAESDEC 99(R15)(R15*1), Z3, Z25 // 62026548de8c3f63000000 or 6202e548de8c3f63000000 + VAESDEC (DX), Z3, Z25 // 62626548de0a or 6262e548de0a + VAESDEC Z27, Z12, Z25 // 62021d48decb or 62029d48decb + VAESDEC Z15, Z12, Z25 // 62421d48decf or 62429d48decf + VAESDEC 99(R15)(R15*1), Z12, Z25 // 62021d48de8c3f63000000 or 62029d48de8c3f63000000 + VAESDEC (DX), Z12, Z25 // 62621d48de0a or 62629d48de0a + VAESDECLAST X21, X5, X9 // 62325508dfcd or 6232d508dfcd + VAESDECLAST X21, X31, X9 // 62320500dfcd or 62328500dfcd + VAESDECLAST X1, X31, X9 // 62720500dfc9 or 62728500dfc9 + VAESDECLAST X11, X31, X9 // 62520500dfcb or 62528500dfcb + VAESDECLAST -7(CX), X31, X9 // 62720500df89f9ffffff or 62728500df89f9ffffff + VAESDECLAST 15(DX)(BX*4), X31, X9 // 62720500df8c9a0f000000 or 62728500df8c9a0f000000 + VAESDECLAST X21, X3, X9 // 62326508dfcd or 6232e508dfcd + VAESDECLAST X21, X5, X7 // 62b25508dffd or 62b2d508dffd + VAESDECLAST X21, X31, X7 // 62b20500dffd or 62b28500dffd + VAESDECLAST X1, X31, X7 // 62f20500dff9 or 62f28500dff9 + VAESDECLAST X11, X31, X7 // 62d20500dffb or 62d28500dffb + VAESDECLAST -7(CX), X31, X7 // 62f20500dfb9f9ffffff or 62f28500dfb9f9ffffff + VAESDECLAST 15(DX)(BX*4), X31, X7 // 62f20500dfbc9a0f000000 or 62f28500dfbc9a0f000000 + VAESDECLAST X21, X3, X7 // 62b26508dffd or 62b2e508dffd + VAESDECLAST X21, X5, X14 // 62325508dff5 or 6232d508dff5 + VAESDECLAST X21, X31, X14 // 62320500dff5 or 62328500dff5 + VAESDECLAST X1, X31, X14 // 62720500dff1 or 62728500dff1 + VAESDECLAST X11, X31, X14 // 62520500dff3 or 62528500dff3 + VAESDECLAST -7(CX), X31, X14 // 62720500dfb1f9ffffff or 62728500dfb1f9ffffff + VAESDECLAST 15(DX)(BX*4), X31, X14 // 62720500dfb49a0f000000 or 62728500dfb49a0f000000 + VAESDECLAST X21, X3, X14 // 62326508dff5 or 6232e508dff5 + VAESDECLAST Y31, Y27, Y28 // 62022520dfe7 or 6202a520dfe7 + VAESDECLAST Y3, Y27, Y28 // 62622520dfe3 or 6262a520dfe3 + VAESDECLAST Y14, Y27, Y28 // 62422520dfe6 or 6242a520dfe6 + VAESDECLAST -17(BP)(SI*8), Y27, Y28 // 62622520dfa4f5efffffff or 6262a520dfa4f5efffffff + VAESDECLAST (R15), Y27, Y28 // 62422520df27 or 6242a520df27 + VAESDECLAST Y31, Y0, Y28 // 62027d28dfe7 or 6202fd28dfe7 + VAESDECLAST Y3, Y0, Y28 // 62627d28dfe3 or 6262fd28dfe3 + VAESDECLAST Y14, Y0, Y28 // 62427d28dfe6 or 6242fd28dfe6 + VAESDECLAST -17(BP)(SI*8), Y0, Y28 // 62627d28dfa4f5efffffff or 6262fd28dfa4f5efffffff + VAESDECLAST (R15), Y0, Y28 // 62427d28df27 or 6242fd28df27 + VAESDECLAST Y31, Y11, Y28 // 62022528dfe7 or 6202a528dfe7 + VAESDECLAST Y3, Y11, Y28 // 62622528dfe3 or 6262a528dfe3 + VAESDECLAST Y14, Y11, Y28 // 62422528dfe6 or 6242a528dfe6 + VAESDECLAST -17(BP)(SI*8), Y11, Y28 // 62622528dfa4f5efffffff or 6262a528dfa4f5efffffff + VAESDECLAST (R15), Y11, Y28 // 62422528df27 or 6242a528df27 + VAESDECLAST Y31, Y27, Y2 // 62922520dfd7 or 6292a520dfd7 + VAESDECLAST Y3, Y27, Y2 // 62f22520dfd3 or 62f2a520dfd3 + VAESDECLAST Y14, Y27, Y2 // 62d22520dfd6 or 62d2a520dfd6 + VAESDECLAST -17(BP)(SI*8), Y27, Y2 // 62f22520df94f5efffffff or 62f2a520df94f5efffffff + VAESDECLAST (R15), Y27, Y2 // 62d22520df17 or 62d2a520df17 + VAESDECLAST Y31, Y0, Y2 // 62927d28dfd7 or 6292fd28dfd7 + VAESDECLAST Y31, Y11, Y2 // 62922528dfd7 or 6292a528dfd7 + VAESDECLAST Y31, Y27, Y24 // 62022520dfc7 or 6202a520dfc7 + VAESDECLAST Y3, Y27, Y24 // 62622520dfc3 or 6262a520dfc3 + VAESDECLAST Y14, Y27, Y24 // 62422520dfc6 or 6242a520dfc6 + VAESDECLAST -17(BP)(SI*8), Y27, Y24 // 62622520df84f5efffffff or 6262a520df84f5efffffff + VAESDECLAST (R15), Y27, Y24 // 62422520df07 or 6242a520df07 + VAESDECLAST Y31, Y0, Y24 // 62027d28dfc7 or 6202fd28dfc7 + VAESDECLAST Y3, Y0, Y24 // 62627d28dfc3 or 6262fd28dfc3 + VAESDECLAST Y14, Y0, Y24 // 62427d28dfc6 or 6242fd28dfc6 + VAESDECLAST -17(BP)(SI*8), Y0, Y24 // 62627d28df84f5efffffff or 6262fd28df84f5efffffff + VAESDECLAST (R15), Y0, Y24 // 62427d28df07 or 6242fd28df07 + VAESDECLAST Y31, Y11, Y24 // 62022528dfc7 or 6202a528dfc7 + VAESDECLAST Y3, Y11, Y24 // 62622528dfc3 or 6262a528dfc3 + VAESDECLAST Y14, Y11, Y24 // 62422528dfc6 or 6242a528dfc6 + VAESDECLAST -17(BP)(SI*8), Y11, Y24 // 62622528df84f5efffffff or 6262a528df84f5efffffff + VAESDECLAST (R15), Y11, Y24 // 62422528df07 or 6242a528df07 + VAESDECLAST Z8, Z23, Z23 // 62c24540dff8 or 62c2c540dff8 + VAESDECLAST Z28, Z23, Z23 // 62824540dffc or 6282c540dffc + VAESDECLAST -17(BP)(SI*8), Z23, Z23 // 62e24540dfbcf5efffffff or 62e2c540dfbcf5efffffff + VAESDECLAST (R15), Z23, Z23 // 62c24540df3f or 62c2c540df3f + VAESDECLAST Z8, Z6, Z23 // 62c24d48dff8 or 62c2cd48dff8 + VAESDECLAST Z28, Z6, Z23 // 62824d48dffc or 6282cd48dffc + VAESDECLAST -17(BP)(SI*8), Z6, Z23 // 62e24d48dfbcf5efffffff or 62e2cd48dfbcf5efffffff + VAESDECLAST (R15), Z6, Z23 // 62c24d48df3f or 62c2cd48df3f + VAESDECLAST Z8, Z23, Z5 // 62d24540dfe8 or 62d2c540dfe8 + VAESDECLAST Z28, Z23, Z5 // 62924540dfec or 6292c540dfec + VAESDECLAST -17(BP)(SI*8), Z23, Z5 // 62f24540dfacf5efffffff or 62f2c540dfacf5efffffff + VAESDECLAST (R15), Z23, Z5 // 62d24540df2f or 62d2c540df2f + VAESDECLAST Z8, Z6, Z5 // 62d24d48dfe8 or 62d2cd48dfe8 + VAESDECLAST Z28, Z6, Z5 // 62924d48dfec or 6292cd48dfec + VAESDECLAST -17(BP)(SI*8), Z6, Z5 // 62f24d48dfacf5efffffff or 62f2cd48dfacf5efffffff + VAESDECLAST (R15), Z6, Z5 // 62d24d48df2f or 62d2cd48df2f + VAESENC X14, X16, X13 // 62527d00dcee or 6252fd00dcee + VAESENC X19, X16, X13 // 62327d00dceb or 6232fd00dceb + VAESENC X8, X16, X13 // 62527d00dce8 or 6252fd00dce8 + VAESENC 99(R15)(R15*8), X16, X13 // 62127d00dcacff63000000 or 6212fd00dcacff63000000 + VAESENC 7(AX)(CX*8), X16, X13 // 62727d00dcacc807000000 or 6272fd00dcacc807000000 + VAESENC X19, X14, X13 // 62320d08dceb or 62328d08dceb + VAESENC X19, X11, X13 // 62322508dceb or 6232a508dceb + VAESENC X14, X16, X0 // 62d27d00dcc6 or 62d2fd00dcc6 + VAESENC X19, X16, X0 // 62b27d00dcc3 or 62b2fd00dcc3 + VAESENC X8, X16, X0 // 62d27d00dcc0 or 62d2fd00dcc0 + VAESENC 99(R15)(R15*8), X16, X0 // 62927d00dc84ff63000000 or 6292fd00dc84ff63000000 + VAESENC 7(AX)(CX*8), X16, X0 // 62f27d00dc84c807000000 or 62f2fd00dc84c807000000 + VAESENC X19, X14, X0 // 62b20d08dcc3 or 62b28d08dcc3 + VAESENC X19, X11, X0 // 62b22508dcc3 or 62b2a508dcc3 + VAESENC X14, X16, X30 // 62427d00dcf6 or 6242fd00dcf6 + VAESENC X19, X16, X30 // 62227d00dcf3 or 6222fd00dcf3 + VAESENC X8, X16, X30 // 62427d00dcf0 or 6242fd00dcf0 + VAESENC 99(R15)(R15*8), X16, X30 // 62027d00dcb4ff63000000 or 6202fd00dcb4ff63000000 + VAESENC 7(AX)(CX*8), X16, X30 // 62627d00dcb4c807000000 or 6262fd00dcb4c807000000 + VAESENC X14, X14, X30 // 62420d08dcf6 or 62428d08dcf6 + VAESENC X19, X14, X30 // 62220d08dcf3 or 62228d08dcf3 + VAESENC X8, X14, X30 // 62420d08dcf0 or 62428d08dcf0 + VAESENC 99(R15)(R15*8), X14, X30 // 62020d08dcb4ff63000000 or 62028d08dcb4ff63000000 + VAESENC 7(AX)(CX*8), X14, X30 // 62620d08dcb4c807000000 or 62628d08dcb4c807000000 + VAESENC X14, X11, X30 // 62422508dcf6 or 6242a508dcf6 + VAESENC X19, X11, X30 // 62222508dcf3 or 6222a508dcf3 + VAESENC X8, X11, X30 // 62422508dcf0 or 6242a508dcf0 + VAESENC 99(R15)(R15*8), X11, X30 // 62022508dcb4ff63000000 or 6202a508dcb4ff63000000 + VAESENC 7(AX)(CX*8), X11, X30 // 62622508dcb4c807000000 or 6262a508dcb4c807000000 + VAESENC Y18, Y15, Y2 // 62b20528dcd2 or 62b28528dcd2 + VAESENC Y24, Y15, Y2 // 62920528dcd0 or 62928528dcd0 + VAESENC Y18, Y22, Y2 // 62b24d20dcd2 or 62b2cd20dcd2 + VAESENC Y24, Y22, Y2 // 62924d20dcd0 or 6292cd20dcd0 + VAESENC Y9, Y22, Y2 // 62d24d20dcd1 or 62d2cd20dcd1 + VAESENC 7(SI)(DI*8), Y22, Y2 // 62f24d20dc94fe07000000 or 62f2cd20dc94fe07000000 + VAESENC -15(R14), Y22, Y2 // 62d24d20dc96f1ffffff or 62d2cd20dc96f1ffffff + VAESENC Y18, Y20, Y2 // 62b25d20dcd2 or 62b2dd20dcd2 + VAESENC Y24, Y20, Y2 // 62925d20dcd0 or 6292dd20dcd0 + VAESENC Y9, Y20, Y2 // 62d25d20dcd1 or 62d2dd20dcd1 + VAESENC 7(SI)(DI*8), Y20, Y2 // 62f25d20dc94fe07000000 or 62f2dd20dc94fe07000000 + VAESENC -15(R14), Y20, Y2 // 62d25d20dc96f1ffffff or 62d2dd20dc96f1ffffff + VAESENC Y18, Y15, Y13 // 62320528dcea or 62328528dcea + VAESENC Y24, Y15, Y13 // 62120528dce8 or 62128528dce8 + VAESENC Y18, Y22, Y13 // 62324d20dcea or 6232cd20dcea + VAESENC Y24, Y22, Y13 // 62124d20dce8 or 6212cd20dce8 + VAESENC Y9, Y22, Y13 // 62524d20dce9 or 6252cd20dce9 + VAESENC 7(SI)(DI*8), Y22, Y13 // 62724d20dcacfe07000000 or 6272cd20dcacfe07000000 + VAESENC -15(R14), Y22, Y13 // 62524d20dcaef1ffffff or 6252cd20dcaef1ffffff + VAESENC Y18, Y20, Y13 // 62325d20dcea or 6232dd20dcea + VAESENC Y24, Y20, Y13 // 62125d20dce8 or 6212dd20dce8 + VAESENC Y9, Y20, Y13 // 62525d20dce9 or 6252dd20dce9 + VAESENC 7(SI)(DI*8), Y20, Y13 // 62725d20dcacfe07000000 or 6272dd20dcacfe07000000 + VAESENC -15(R14), Y20, Y13 // 62525d20dcaef1ffffff or 6252dd20dcaef1ffffff + VAESENC Y18, Y15, Y27 // 62220528dcda or 62228528dcda + VAESENC Y24, Y15, Y27 // 62020528dcd8 or 62028528dcd8 + VAESENC Y9, Y15, Y27 // 62420528dcd9 or 62428528dcd9 + VAESENC 7(SI)(DI*8), Y15, Y27 // 62620528dc9cfe07000000 or 62628528dc9cfe07000000 + VAESENC -15(R14), Y15, Y27 // 62420528dc9ef1ffffff or 62428528dc9ef1ffffff + VAESENC Y18, Y22, Y27 // 62224d20dcda or 6222cd20dcda + VAESENC Y24, Y22, Y27 // 62024d20dcd8 or 6202cd20dcd8 + VAESENC Y9, Y22, Y27 // 62424d20dcd9 or 6242cd20dcd9 + VAESENC 7(SI)(DI*8), Y22, Y27 // 62624d20dc9cfe07000000 or 6262cd20dc9cfe07000000 + VAESENC -15(R14), Y22, Y27 // 62424d20dc9ef1ffffff or 6242cd20dc9ef1ffffff + VAESENC Y18, Y20, Y27 // 62225d20dcda or 6222dd20dcda + VAESENC Y24, Y20, Y27 // 62025d20dcd8 or 6202dd20dcd8 + VAESENC Y9, Y20, Y27 // 62425d20dcd9 or 6242dd20dcd9 + VAESENC 7(SI)(DI*8), Y20, Y27 // 62625d20dc9cfe07000000 or 6262dd20dc9cfe07000000 + VAESENC -15(R14), Y20, Y27 // 62425d20dc9ef1ffffff or 6242dd20dc9ef1ffffff + VAESENC Z12, Z16, Z21 // 62c27d40dcec or 62c2fd40dcec + VAESENC Z27, Z16, Z21 // 62827d40dceb or 6282fd40dceb + VAESENC 7(SI)(DI*8), Z16, Z21 // 62e27d40dcacfe07000000 or 62e2fd40dcacfe07000000 + VAESENC -15(R14), Z16, Z21 // 62c27d40dcaef1ffffff or 62c2fd40dcaef1ffffff + VAESENC Z12, Z13, Z21 // 62c21548dcec or 62c29548dcec + VAESENC Z27, Z13, Z21 // 62821548dceb or 62829548dceb + VAESENC 7(SI)(DI*8), Z13, Z21 // 62e21548dcacfe07000000 or 62e29548dcacfe07000000 + VAESENC -15(R14), Z13, Z21 // 62c21548dcaef1ffffff or 62c29548dcaef1ffffff + VAESENC Z12, Z16, Z5 // 62d27d40dcec or 62d2fd40dcec + VAESENC Z27, Z16, Z5 // 62927d40dceb or 6292fd40dceb + VAESENC 7(SI)(DI*8), Z16, Z5 // 62f27d40dcacfe07000000 or 62f2fd40dcacfe07000000 + VAESENC -15(R14), Z16, Z5 // 62d27d40dcaef1ffffff or 62d2fd40dcaef1ffffff + VAESENC Z12, Z13, Z5 // 62d21548dcec or 62d29548dcec + VAESENC Z27, Z13, Z5 // 62921548dceb or 62929548dceb + VAESENC 7(SI)(DI*8), Z13, Z5 // 62f21548dcacfe07000000 or 62f29548dcacfe07000000 + VAESENC -15(R14), Z13, Z5 // 62d21548dcaef1ffffff or 62d29548dcaef1ffffff + VAESENCLAST X23, X12, X8 // 62321d08ddc7 or 62329d08ddc7 + VAESENCLAST X31, X12, X8 // 62121d08ddc7 or 62129d08ddc7 + VAESENCLAST X23, X16, X8 // 62327d00ddc7 or 6232fd00ddc7 + VAESENCLAST X11, X16, X8 // 62527d00ddc3 or 6252fd00ddc3 + VAESENCLAST X31, X16, X8 // 62127d00ddc7 or 6212fd00ddc7 + VAESENCLAST (AX), X16, X8 // 62727d00dd00 or 6272fd00dd00 + VAESENCLAST 7(SI), X16, X8 // 62727d00dd8607000000 or 6272fd00dd8607000000 + VAESENCLAST X23, X23, X8 // 62324500ddc7 or 6232c500ddc7 + VAESENCLAST X11, X23, X8 // 62524500ddc3 or 6252c500ddc3 + VAESENCLAST X31, X23, X8 // 62124500ddc7 or 6212c500ddc7 + VAESENCLAST (AX), X23, X8 // 62724500dd00 or 6272c500dd00 + VAESENCLAST 7(SI), X23, X8 // 62724500dd8607000000 or 6272c500dd8607000000 + VAESENCLAST X23, X12, X26 // 62221d08ddd7 or 62229d08ddd7 + VAESENCLAST X11, X12, X26 // 62421d08ddd3 or 62429d08ddd3 + VAESENCLAST X31, X12, X26 // 62021d08ddd7 or 62029d08ddd7 + VAESENCLAST (AX), X12, X26 // 62621d08dd10 or 62629d08dd10 + VAESENCLAST 7(SI), X12, X26 // 62621d08dd9607000000 or 62629d08dd9607000000 + VAESENCLAST X23, X16, X26 // 62227d00ddd7 or 6222fd00ddd7 + VAESENCLAST X11, X16, X26 // 62427d00ddd3 or 6242fd00ddd3 + VAESENCLAST X31, X16, X26 // 62027d00ddd7 or 6202fd00ddd7 + VAESENCLAST (AX), X16, X26 // 62627d00dd10 or 6262fd00dd10 + VAESENCLAST 7(SI), X16, X26 // 62627d00dd9607000000 or 6262fd00dd9607000000 + VAESENCLAST X23, X23, X26 // 62224500ddd7 or 6222c500ddd7 + VAESENCLAST X11, X23, X26 // 62424500ddd3 or 6242c500ddd3 + VAESENCLAST X31, X23, X26 // 62024500ddd7 or 6202c500ddd7 + VAESENCLAST (AX), X23, X26 // 62624500dd10 or 6262c500dd10 + VAESENCLAST 7(SI), X23, X26 // 62624500dd9607000000 or 6262c500dd9607000000 + VAESENCLAST X23, X12, X23 // 62a21d08ddff or 62a29d08ddff + VAESENCLAST X11, X12, X23 // 62c21d08ddfb or 62c29d08ddfb + VAESENCLAST X31, X12, X23 // 62821d08ddff or 62829d08ddff + VAESENCLAST (AX), X12, X23 // 62e21d08dd38 or 62e29d08dd38 + VAESENCLAST 7(SI), X12, X23 // 62e21d08ddbe07000000 or 62e29d08ddbe07000000 + VAESENCLAST X23, X16, X23 // 62a27d00ddff or 62a2fd00ddff + VAESENCLAST X11, X16, X23 // 62c27d00ddfb or 62c2fd00ddfb + VAESENCLAST X31, X16, X23 // 62827d00ddff or 6282fd00ddff + VAESENCLAST (AX), X16, X23 // 62e27d00dd38 or 62e2fd00dd38 + VAESENCLAST 7(SI), X16, X23 // 62e27d00ddbe07000000 or 62e2fd00ddbe07000000 + VAESENCLAST X23, X23, X23 // 62a24500ddff or 62a2c500ddff + VAESENCLAST X11, X23, X23 // 62c24500ddfb or 62c2c500ddfb + VAESENCLAST X31, X23, X23 // 62824500ddff or 6282c500ddff + VAESENCLAST (AX), X23, X23 // 62e24500dd38 or 62e2c500dd38 + VAESENCLAST 7(SI), X23, X23 // 62e24500ddbe07000000 or 62e2c500ddbe07000000 + VAESENCLAST Y5, Y19, Y3 // 62f26520dddd or 62f2e520dddd + VAESENCLAST Y16, Y19, Y3 // 62b26520ddd8 or 62b2e520ddd8 + VAESENCLAST Y2, Y19, Y3 // 62f26520ddda or 62f2e520ddda + VAESENCLAST 7(SI)(DI*1), Y19, Y3 // 62f26520dd9c3e07000000 or 62f2e520dd9c3e07000000 + VAESENCLAST 15(DX)(BX*8), Y19, Y3 // 62f26520dd9cda0f000000 or 62f2e520dd9cda0f000000 + VAESENCLAST Y16, Y14, Y3 // 62b20d28ddd8 or 62b28d28ddd8 + VAESENCLAST Y5, Y21, Y3 // 62f25520dddd or 62f2d520dddd + VAESENCLAST Y16, Y21, Y3 // 62b25520ddd8 or 62b2d520ddd8 + VAESENCLAST Y2, Y21, Y3 // 62f25520ddda or 62f2d520ddda + VAESENCLAST 7(SI)(DI*1), Y21, Y3 // 62f25520dd9c3e07000000 or 62f2d520dd9c3e07000000 + VAESENCLAST 15(DX)(BX*8), Y21, Y3 // 62f25520dd9cda0f000000 or 62f2d520dd9cda0f000000 + VAESENCLAST Y5, Y19, Y19 // 62e26520dddd or 62e2e520dddd + VAESENCLAST Y16, Y19, Y19 // 62a26520ddd8 or 62a2e520ddd8 + VAESENCLAST Y2, Y19, Y19 // 62e26520ddda or 62e2e520ddda + VAESENCLAST 7(SI)(DI*1), Y19, Y19 // 62e26520dd9c3e07000000 or 62e2e520dd9c3e07000000 + VAESENCLAST 15(DX)(BX*8), Y19, Y19 // 62e26520dd9cda0f000000 or 62e2e520dd9cda0f000000 + VAESENCLAST Y5, Y14, Y19 // 62e20d28dddd or 62e28d28dddd + VAESENCLAST Y16, Y14, Y19 // 62a20d28ddd8 or 62a28d28ddd8 + VAESENCLAST Y2, Y14, Y19 // 62e20d28ddda or 62e28d28ddda + VAESENCLAST 7(SI)(DI*1), Y14, Y19 // 62e20d28dd9c3e07000000 or 62e28d28dd9c3e07000000 + VAESENCLAST 15(DX)(BX*8), Y14, Y19 // 62e20d28dd9cda0f000000 or 62e28d28dd9cda0f000000 + VAESENCLAST Y5, Y21, Y19 // 62e25520dddd or 62e2d520dddd + VAESENCLAST Y16, Y21, Y19 // 62a25520ddd8 or 62a2d520ddd8 + VAESENCLAST Y2, Y21, Y19 // 62e25520ddda or 62e2d520ddda + VAESENCLAST 7(SI)(DI*1), Y21, Y19 // 62e25520dd9c3e07000000 or 62e2d520dd9c3e07000000 + VAESENCLAST 15(DX)(BX*8), Y21, Y19 // 62e25520dd9cda0f000000 or 62e2d520dd9cda0f000000 + VAESENCLAST Y5, Y19, Y23 // 62e26520ddfd or 62e2e520ddfd + VAESENCLAST Y16, Y19, Y23 // 62a26520ddf8 or 62a2e520ddf8 + VAESENCLAST Y2, Y19, Y23 // 62e26520ddfa or 62e2e520ddfa + VAESENCLAST 7(SI)(DI*1), Y19, Y23 // 62e26520ddbc3e07000000 or 62e2e520ddbc3e07000000 + VAESENCLAST 15(DX)(BX*8), Y19, Y23 // 62e26520ddbcda0f000000 or 62e2e520ddbcda0f000000 + VAESENCLAST Y5, Y14, Y23 // 62e20d28ddfd or 62e28d28ddfd + VAESENCLAST Y16, Y14, Y23 // 62a20d28ddf8 or 62a28d28ddf8 + VAESENCLAST Y2, Y14, Y23 // 62e20d28ddfa or 62e28d28ddfa + VAESENCLAST 7(SI)(DI*1), Y14, Y23 // 62e20d28ddbc3e07000000 or 62e28d28ddbc3e07000000 + VAESENCLAST 15(DX)(BX*8), Y14, Y23 // 62e20d28ddbcda0f000000 or 62e28d28ddbcda0f000000 + VAESENCLAST Y5, Y21, Y23 // 62e25520ddfd or 62e2d520ddfd + VAESENCLAST Y16, Y21, Y23 // 62a25520ddf8 or 62a2d520ddf8 + VAESENCLAST Y2, Y21, Y23 // 62e25520ddfa or 62e2d520ddfa + VAESENCLAST 7(SI)(DI*1), Y21, Y23 // 62e25520ddbc3e07000000 or 62e2d520ddbc3e07000000 + VAESENCLAST 15(DX)(BX*8), Y21, Y23 // 62e25520ddbcda0f000000 or 62e2d520ddbcda0f000000 + VAESENCLAST Z25, Z6, Z22 // 62824d48ddf1 or 6282cd48ddf1 + VAESENCLAST Z12, Z6, Z22 // 62c24d48ddf4 or 62c2cd48ddf4 + VAESENCLAST 7(SI)(DI*1), Z6, Z22 // 62e24d48ddb43e07000000 or 62e2cd48ddb43e07000000 + VAESENCLAST 15(DX)(BX*8), Z6, Z22 // 62e24d48ddb4da0f000000 or 62e2cd48ddb4da0f000000 + VAESENCLAST Z25, Z8, Z22 // 62823d48ddf1 or 6282bd48ddf1 + VAESENCLAST Z12, Z8, Z22 // 62c23d48ddf4 or 62c2bd48ddf4 + VAESENCLAST 7(SI)(DI*1), Z8, Z22 // 62e23d48ddb43e07000000 or 62e2bd48ddb43e07000000 + VAESENCLAST 15(DX)(BX*8), Z8, Z22 // 62e23d48ddb4da0f000000 or 62e2bd48ddb4da0f000000 + VAESENCLAST Z25, Z6, Z11 // 62124d48ddd9 or 6212cd48ddd9 + VAESENCLAST Z12, Z6, Z11 // 62524d48dddc or 6252cd48dddc + VAESENCLAST 7(SI)(DI*1), Z6, Z11 // 62724d48dd9c3e07000000 or 6272cd48dd9c3e07000000 + VAESENCLAST 15(DX)(BX*8), Z6, Z11 // 62724d48dd9cda0f000000 or 6272cd48dd9cda0f000000 + VAESENCLAST Z25, Z8, Z11 // 62123d48ddd9 or 6212bd48ddd9 + VAESENCLAST Z12, Z8, Z11 // 62523d48dddc or 6252bd48dddc + VAESENCLAST 7(SI)(DI*1), Z8, Z11 // 62723d48dd9c3e07000000 or 6272bd48dd9c3e07000000 + VAESENCLAST 15(DX)(BX*8), Z8, Z11 // 62723d48dd9cda0f000000 or 6272bd48dd9cda0f000000 + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512_4fmaps.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512_4fmaps.s new file mode 100644 index 0000000000000000000000000000000000000000..e30f41d679dd37af154ebfe0dbe51d270037ff9b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512_4fmaps.s @@ -0,0 +1,66 @@ +// Code generated by avx512test. DO NOT EDIT. + +#include "../../../../../../runtime/textflag.h" + +TEXT asmtest_avx512_4fmaps(SB), NOSPLIT, $0 + V4FMADDPS 17(SP), [Z0-Z3], K2, Z0 // 62f27f4a9a842411000000 + V4FMADDPS -17(BP)(SI*4), [Z0-Z3], K2, Z0 // 62f27f4a9a84b5efffffff + V4FMADDPS 17(SP), [Z10-Z13], K2, Z0 // 62f22f4a9a842411000000 + V4FMADDPS -17(BP)(SI*4), [Z10-Z13], K2, Z0 // 62f22f4a9a84b5efffffff + V4FMADDPS 17(SP), [Z20-Z23], K2, Z0 // 62f25f429a842411000000 + V4FMADDPS -17(BP)(SI*4), [Z20-Z23], K2, Z0 // 62f25f429a84b5efffffff + V4FMADDPS 17(SP), [Z0-Z3], K2, Z8 // 62727f4a9a842411000000 + V4FMADDPS -17(BP)(SI*4), [Z0-Z3], K2, Z8 // 62727f4a9a84b5efffffff + V4FMADDPS 17(SP), [Z10-Z13], K2, Z8 // 62722f4a9a842411000000 + V4FMADDPS -17(BP)(SI*4), [Z10-Z13], K2, Z8 // 62722f4a9a84b5efffffff + V4FMADDPS 17(SP), [Z20-Z23], K2, Z8 // 62725f429a842411000000 + V4FMADDPS -17(BP)(SI*4), [Z20-Z23], K2, Z8 // 62725f429a84b5efffffff + V4FMADDSS 7(AX), [X0-X3], K5, X22 // 62e27f0d9bb007000000 or 62e27f2d9bb007000000 or 62e27f4d9bb007000000 + V4FMADDSS (DI), [X0-X3], K5, X22 // 62e27f0d9b37 or 62e27f2d9b37 or 62e27f4d9b37 + V4FMADDSS 7(AX), [X10-X13], K5, X22 // 62e22f0d9bb007000000 or 62e22f2d9bb007000000 or 62e22f4d9bb007000000 + V4FMADDSS (DI), [X10-X13], K5, X22 // 62e22f0d9b37 or 62e22f2d9b37 or 62e22f4d9b37 + V4FMADDSS 7(AX), [X20-X23], K5, X22 // 62e25f059bb007000000 or 62e25f259bb007000000 or 62e25f459bb007000000 + V4FMADDSS (DI), [X20-X23], K5, X22 // 62e25f059b37 or 62e25f259b37 or 62e25f459b37 + V4FMADDSS 7(AX), [X0-X3], K5, X30 // 62627f0d9bb007000000 or 62627f2d9bb007000000 or 62627f4d9bb007000000 + V4FMADDSS (DI), [X0-X3], K5, X30 // 62627f0d9b37 or 62627f2d9b37 or 62627f4d9b37 + V4FMADDSS 7(AX), [X10-X13], K5, X30 // 62622f0d9bb007000000 or 62622f2d9bb007000000 or 62622f4d9bb007000000 + V4FMADDSS (DI), [X10-X13], K5, X30 // 62622f0d9b37 or 62622f2d9b37 or 62622f4d9b37 + V4FMADDSS 7(AX), [X20-X23], K5, X30 // 62625f059bb007000000 or 62625f259bb007000000 or 62625f459bb007000000 + V4FMADDSS (DI), [X20-X23], K5, X30 // 62625f059b37 or 62625f259b37 or 62625f459b37 + V4FMADDSS 7(AX), [X0-X3], K5, X3 // 62f27f0d9b9807000000 or 62f27f2d9b9807000000 or 62f27f4d9b9807000000 + V4FMADDSS (DI), [X0-X3], K5, X3 // 62f27f0d9b1f or 62f27f2d9b1f or 62f27f4d9b1f + V4FMADDSS 7(AX), [X10-X13], K5, X3 // 62f22f0d9b9807000000 or 62f22f2d9b9807000000 or 62f22f4d9b9807000000 + V4FMADDSS (DI), [X10-X13], K5, X3 // 62f22f0d9b1f or 62f22f2d9b1f or 62f22f4d9b1f + V4FMADDSS 7(AX), [X20-X23], K5, X3 // 62f25f059b9807000000 or 62f25f259b9807000000 or 62f25f459b9807000000 + V4FMADDSS (DI), [X20-X23], K5, X3 // 62f25f059b1f or 62f25f259b1f or 62f25f459b1f + V4FNMADDPS 99(R15)(R15*1), [Z1-Z4], K3, Z15 // 6212774baabc3f63000000 + V4FNMADDPS (DX), [Z1-Z4], K3, Z15 // 6272774baa3a + V4FNMADDPS 99(R15)(R15*1), [Z11-Z14], K3, Z15 // 6212274baabc3f63000000 + V4FNMADDPS (DX), [Z11-Z14], K3, Z15 // 6272274baa3a + V4FNMADDPS 99(R15)(R15*1), [Z21-Z24], K3, Z15 // 62125743aabc3f63000000 + V4FNMADDPS (DX), [Z21-Z24], K3, Z15 // 62725743aa3a + V4FNMADDPS 99(R15)(R15*1), [Z1-Z4], K3, Z12 // 6212774baaa43f63000000 + V4FNMADDPS (DX), [Z1-Z4], K3, Z12 // 6272774baa22 + V4FNMADDPS 99(R15)(R15*1), [Z11-Z14], K3, Z12 // 6212274baaa43f63000000 + V4FNMADDPS (DX), [Z11-Z14], K3, Z12 // 6272274baa22 + V4FNMADDPS 99(R15)(R15*1), [Z21-Z24], K3, Z12 // 62125743aaa43f63000000 + V4FNMADDPS (DX), [Z21-Z24], K3, Z12 // 62725743aa22 + V4FNMADDSS -17(BP)(SI*8), [X1-X4], K4, X11 // 6272770cab9cf5efffffff or 6272772cab9cf5efffffff or 6272774cab9cf5efffffff + V4FNMADDSS (R15), [X1-X4], K4, X11 // 6252770cab1f or 6252772cab1f or 6252774cab1f + V4FNMADDSS -17(BP)(SI*8), [X11-X14], K4, X11 // 6272270cab9cf5efffffff or 6272272cab9cf5efffffff or 6272274cab9cf5efffffff + V4FNMADDSS (R15), [X11-X14], K4, X11 // 6252270cab1f or 6252272cab1f or 6252274cab1f + V4FNMADDSS -17(BP)(SI*8), [X21-X24], K4, X11 // 62725704ab9cf5efffffff or 62725724ab9cf5efffffff or 62725744ab9cf5efffffff + V4FNMADDSS (R15), [X21-X24], K4, X11 // 62525704ab1f or 62525724ab1f or 62525744ab1f + V4FNMADDSS -17(BP)(SI*8), [X1-X4], K4, X15 // 6272770cabbcf5efffffff or 6272772cabbcf5efffffff or 6272774cabbcf5efffffff + V4FNMADDSS (R15), [X1-X4], K4, X15 // 6252770cab3f or 6252772cab3f or 6252774cab3f + V4FNMADDSS -17(BP)(SI*8), [X11-X14], K4, X15 // 6272270cabbcf5efffffff or 6272272cabbcf5efffffff or 6272274cabbcf5efffffff + V4FNMADDSS (R15), [X11-X14], K4, X15 // 6252270cab3f or 6252272cab3f or 6252274cab3f + V4FNMADDSS -17(BP)(SI*8), [X21-X24], K4, X15 // 62725704abbcf5efffffff or 62725724abbcf5efffffff or 62725744abbcf5efffffff + V4FNMADDSS (R15), [X21-X24], K4, X15 // 62525704ab3f or 62525724ab3f or 62525744ab3f + V4FNMADDSS -17(BP)(SI*8), [X1-X4], K4, X30 // 6262770cabb4f5efffffff or 6262772cabb4f5efffffff or 6262774cabb4f5efffffff + V4FNMADDSS (R15), [X1-X4], K4, X30 // 6242770cab37 or 6242772cab37 or 6242774cab37 + V4FNMADDSS -17(BP)(SI*8), [X11-X14], K4, X30 // 6262270cabb4f5efffffff or 6262272cabb4f5efffffff or 6262274cabb4f5efffffff + V4FNMADDSS (R15), [X11-X14], K4, X30 // 6242270cab37 or 6242272cab37 or 6242274cab37 + V4FNMADDSS -17(BP)(SI*8), [X21-X24], K4, X30 // 62625704abb4f5efffffff or 62625724abb4f5efffffff or 62625744abb4f5efffffff + V4FNMADDSS (R15), [X21-X24], K4, X30 // 62425704ab37 or 62425724ab37 or 62425744ab37 + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512_4vnniw.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512_4vnniw.s new file mode 100644 index 0000000000000000000000000000000000000000..5a80ed0f458022f2192ba1e2a6b3991682e89682 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512_4vnniw.s @@ -0,0 +1,30 @@ +// Code generated by avx512test. DO NOT EDIT. + +#include "../../../../../../runtime/textflag.h" + +TEXT asmtest_avx512_4vnniw(SB), NOSPLIT, $0 + VP4DPWSSD 7(SI)(DI*1), [Z2-Z5], K4, Z17 // 62e26f4c528c3e07000000 + VP4DPWSSD 15(DX)(BX*8), [Z2-Z5], K4, Z17 // 62e26f4c528cda0f000000 + VP4DPWSSD 7(SI)(DI*1), [Z12-Z15], K4, Z17 // 62e21f4c528c3e07000000 + VP4DPWSSD 15(DX)(BX*8), [Z12-Z15], K4, Z17 // 62e21f4c528cda0f000000 + VP4DPWSSD 7(SI)(DI*1), [Z22-Z25], K4, Z17 // 62e24f44528c3e07000000 + VP4DPWSSD 15(DX)(BX*8), [Z22-Z25], K4, Z17 // 62e24f44528cda0f000000 + VP4DPWSSD 7(SI)(DI*1), [Z2-Z5], K4, Z23 // 62e26f4c52bc3e07000000 + VP4DPWSSD 15(DX)(BX*8), [Z2-Z5], K4, Z23 // 62e26f4c52bcda0f000000 + VP4DPWSSD 7(SI)(DI*1), [Z12-Z15], K4, Z23 // 62e21f4c52bc3e07000000 + VP4DPWSSD 15(DX)(BX*8), [Z12-Z15], K4, Z23 // 62e21f4c52bcda0f000000 + VP4DPWSSD 7(SI)(DI*1), [Z22-Z25], K4, Z23 // 62e24f4452bc3e07000000 + VP4DPWSSD 15(DX)(BX*8), [Z22-Z25], K4, Z23 // 62e24f4452bcda0f000000 + VP4DPWSSDS -7(DI)(R8*1), [Z4-Z7], K1, Z31 // 62225f4953bc07f9ffffff + VP4DPWSSDS (SP), [Z4-Z7], K1, Z31 // 62625f49533c24 + VP4DPWSSDS -7(DI)(R8*1), [Z14-Z17], K1, Z31 // 62220f4953bc07f9ffffff + VP4DPWSSDS (SP), [Z14-Z17], K1, Z31 // 62620f49533c24 + VP4DPWSSDS -7(DI)(R8*1), [Z24-Z27], K1, Z31 // 62223f4153bc07f9ffffff + VP4DPWSSDS (SP), [Z24-Z27], K1, Z31 // 62623f41533c24 + VP4DPWSSDS -7(DI)(R8*1), [Z4-Z7], K1, Z0 // 62b25f49538407f9ffffff + VP4DPWSSDS (SP), [Z4-Z7], K1, Z0 // 62f25f49530424 + VP4DPWSSDS -7(DI)(R8*1), [Z14-Z17], K1, Z0 // 62b20f49538407f9ffffff + VP4DPWSSDS (SP), [Z14-Z17], K1, Z0 // 62f20f49530424 + VP4DPWSSDS -7(DI)(R8*1), [Z24-Z27], K1, Z0 // 62b23f41538407f9ffffff + VP4DPWSSDS (SP), [Z24-Z27], K1, Z0 // 62f23f41530424 + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512_bitalg.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512_bitalg.s new file mode 100644 index 0000000000000000000000000000000000000000..fc9dd0cecdc5ed8f0760ea1771719122b08a65cb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512_bitalg.s @@ -0,0 +1,154 @@ +// Code generated by avx512test. DO NOT EDIT. + +#include "../../../../../../runtime/textflag.h" + +TEXT asmtest_avx512_bitalg(SB), NOSPLIT, $0 + VPOPCNTB X14, K4, X16 // 62c27d0c54c6 + VPOPCNTB X19, K4, X16 // 62a27d0c54c3 + VPOPCNTB X8, K4, X16 // 62c27d0c54c0 + VPOPCNTB 15(R8)(R14*1), K4, X16 // 62827d0c5484300f000000 + VPOPCNTB 15(R8)(R14*2), K4, X16 // 62827d0c5484700f000000 + VPOPCNTB X14, K4, X14 // 62527d0c54f6 + VPOPCNTB X19, K4, X14 // 62327d0c54f3 + VPOPCNTB X8, K4, X14 // 62527d0c54f0 + VPOPCNTB 15(R8)(R14*1), K4, X14 // 62127d0c54b4300f000000 + VPOPCNTB 15(R8)(R14*2), K4, X14 // 62127d0c54b4700f000000 + VPOPCNTB X14, K4, X11 // 62527d0c54de + VPOPCNTB X19, K4, X11 // 62327d0c54db + VPOPCNTB X8, K4, X11 // 62527d0c54d8 + VPOPCNTB 15(R8)(R14*1), K4, X11 // 62127d0c549c300f000000 + VPOPCNTB 15(R8)(R14*2), K4, X11 // 62127d0c549c700f000000 + VPOPCNTB Y14, K4, Y24 // 62427d2c54c6 + VPOPCNTB Y21, K4, Y24 // 62227d2c54c5 + VPOPCNTB Y1, K4, Y24 // 62627d2c54c1 + VPOPCNTB 15(R8)(R14*8), K4, Y24 // 62027d2c5484f00f000000 + VPOPCNTB -15(R14)(R15*2), K4, Y24 // 62027d2c54847ef1ffffff + VPOPCNTB Y14, K4, Y13 // 62527d2c54ee + VPOPCNTB Y21, K4, Y13 // 62327d2c54ed + VPOPCNTB Y1, K4, Y13 // 62727d2c54e9 + VPOPCNTB 15(R8)(R14*8), K4, Y13 // 62127d2c54acf00f000000 + VPOPCNTB -15(R14)(R15*2), K4, Y13 // 62127d2c54ac7ef1ffffff + VPOPCNTB Y14, K4, Y20 // 62c27d2c54e6 + VPOPCNTB Y21, K4, Y20 // 62a27d2c54e5 + VPOPCNTB Y1, K4, Y20 // 62e27d2c54e1 + VPOPCNTB 15(R8)(R14*8), K4, Y20 // 62827d2c54a4f00f000000 + VPOPCNTB -15(R14)(R15*2), K4, Y20 // 62827d2c54a47ef1ffffff + VPOPCNTB Z18, K7, Z13 // 62327d4f54ea + VPOPCNTB Z8, K7, Z13 // 62527d4f54e8 + VPOPCNTB 17(SP)(BP*8), K7, Z13 // 62727d4f54acec11000000 + VPOPCNTB 17(SP)(BP*4), K7, Z13 // 62727d4f54acac11000000 + VPOPCNTW X20, K3, X11 // 6232fd0b54dc + VPOPCNTW X5, K3, X11 // 6272fd0b54dd + VPOPCNTW X25, K3, X11 // 6212fd0b54d9 + VPOPCNTW (CX), K3, X11 // 6272fd0b5419 + VPOPCNTW 99(R15), K3, X11 // 6252fd0b549f63000000 + VPOPCNTW X20, K3, X23 // 62a2fd0b54fc + VPOPCNTW X5, K3, X23 // 62e2fd0b54fd + VPOPCNTW X25, K3, X23 // 6282fd0b54f9 + VPOPCNTW (CX), K3, X23 // 62e2fd0b5439 + VPOPCNTW 99(R15), K3, X23 // 62c2fd0b54bf63000000 + VPOPCNTW X20, K3, X2 // 62b2fd0b54d4 + VPOPCNTW X5, K3, X2 // 62f2fd0b54d5 + VPOPCNTW X25, K3, X2 // 6292fd0b54d1 + VPOPCNTW (CX), K3, X2 // 62f2fd0b5411 + VPOPCNTW 99(R15), K3, X2 // 62d2fd0b549763000000 + VPOPCNTW Y13, K3, Y21 // 62c2fd2b54ed + VPOPCNTW Y18, K3, Y21 // 62a2fd2b54ea + VPOPCNTW Y24, K3, Y21 // 6282fd2b54e8 + VPOPCNTW (SI), K3, Y21 // 62e2fd2b542e + VPOPCNTW 7(SI)(DI*2), K3, Y21 // 62e2fd2b54ac7e07000000 + VPOPCNTW Y13, K3, Y7 // 62d2fd2b54fd + VPOPCNTW Y18, K3, Y7 // 62b2fd2b54fa + VPOPCNTW Y24, K3, Y7 // 6292fd2b54f8 + VPOPCNTW (SI), K3, Y7 // 62f2fd2b543e + VPOPCNTW 7(SI)(DI*2), K3, Y7 // 62f2fd2b54bc7e07000000 + VPOPCNTW Y13, K3, Y30 // 6242fd2b54f5 + VPOPCNTW Y18, K3, Y30 // 6222fd2b54f2 + VPOPCNTW Y24, K3, Y30 // 6202fd2b54f0 + VPOPCNTW (SI), K3, Y30 // 6262fd2b5436 + VPOPCNTW 7(SI)(DI*2), K3, Y30 // 6262fd2b54b47e07000000 + VPOPCNTW Z28, K3, Z12 // 6212fd4b54e4 + VPOPCNTW Z13, K3, Z12 // 6252fd4b54e5 + VPOPCNTW 7(AX), K3, Z12 // 6272fd4b54a007000000 + VPOPCNTW (DI), K3, Z12 // 6272fd4b5427 + VPOPCNTW Z28, K3, Z16 // 6282fd4b54c4 + VPOPCNTW Z13, K3, Z16 // 62c2fd4b54c5 + VPOPCNTW 7(AX), K3, Z16 // 62e2fd4b548007000000 + VPOPCNTW (DI), K3, Z16 // 62e2fd4b5407 + VPSHUFBITQMB X24, X7, K6, K0 // 6292450e8fc0 + VPSHUFBITQMB X7, X7, K6, K0 // 62f2450e8fc7 + VPSHUFBITQMB X0, X7, K6, K0 // 62f2450e8fc0 + VPSHUFBITQMB (R8), X7, K6, K0 // 62d2450e8f00 + VPSHUFBITQMB 15(DX)(BX*2), X7, K6, K0 // 62f2450e8f845a0f000000 + VPSHUFBITQMB X24, X13, K6, K0 // 6292150e8fc0 + VPSHUFBITQMB X7, X13, K6, K0 // 62f2150e8fc7 + VPSHUFBITQMB X0, X13, K6, K0 // 62f2150e8fc0 + VPSHUFBITQMB (R8), X13, K6, K0 // 62d2150e8f00 + VPSHUFBITQMB 15(DX)(BX*2), X13, K6, K0 // 62f2150e8f845a0f000000 + VPSHUFBITQMB X24, X8, K6, K0 // 62923d0e8fc0 + VPSHUFBITQMB X7, X8, K6, K0 // 62f23d0e8fc7 + VPSHUFBITQMB X0, X8, K6, K0 // 62f23d0e8fc0 + VPSHUFBITQMB (R8), X8, K6, K0 // 62d23d0e8f00 + VPSHUFBITQMB 15(DX)(BX*2), X8, K6, K0 // 62f23d0e8f845a0f000000 + VPSHUFBITQMB X24, X7, K6, K5 // 6292450e8fe8 + VPSHUFBITQMB X7, X7, K6, K5 // 62f2450e8fef + VPSHUFBITQMB X0, X7, K6, K5 // 62f2450e8fe8 + VPSHUFBITQMB (R8), X7, K6, K5 // 62d2450e8f28 + VPSHUFBITQMB 15(DX)(BX*2), X7, K6, K5 // 62f2450e8fac5a0f000000 + VPSHUFBITQMB X24, X13, K6, K5 // 6292150e8fe8 + VPSHUFBITQMB X7, X13, K6, K5 // 62f2150e8fef + VPSHUFBITQMB X0, X13, K6, K5 // 62f2150e8fe8 + VPSHUFBITQMB (R8), X13, K6, K5 // 62d2150e8f28 + VPSHUFBITQMB 15(DX)(BX*2), X13, K6, K5 // 62f2150e8fac5a0f000000 + VPSHUFBITQMB X24, X8, K6, K5 // 62923d0e8fe8 + VPSHUFBITQMB X7, X8, K6, K5 // 62f23d0e8fef + VPSHUFBITQMB X0, X8, K6, K5 // 62f23d0e8fe8 + VPSHUFBITQMB (R8), X8, K6, K5 // 62d23d0e8f28 + VPSHUFBITQMB 15(DX)(BX*2), X8, K6, K5 // 62f23d0e8fac5a0f000000 + VPSHUFBITQMB Y14, Y2, K3, K6 // 62d26d2b8ff6 + VPSHUFBITQMB Y8, Y2, K3, K6 // 62d26d2b8ff0 + VPSHUFBITQMB Y20, Y2, K3, K6 // 62b26d2b8ff4 + VPSHUFBITQMB -17(BP), Y2, K3, K6 // 62f26d2b8fb5efffffff + VPSHUFBITQMB -15(R14)(R15*8), Y2, K3, K6 // 62926d2b8fb4fef1ffffff + VPSHUFBITQMB Y14, Y7, K3, K6 // 62d2452b8ff6 + VPSHUFBITQMB Y8, Y7, K3, K6 // 62d2452b8ff0 + VPSHUFBITQMB Y20, Y7, K3, K6 // 62b2452b8ff4 + VPSHUFBITQMB -17(BP), Y7, K3, K6 // 62f2452b8fb5efffffff + VPSHUFBITQMB -15(R14)(R15*8), Y7, K3, K6 // 6292452b8fb4fef1ffffff + VPSHUFBITQMB Y14, Y21, K3, K6 // 62d255238ff6 + VPSHUFBITQMB Y8, Y21, K3, K6 // 62d255238ff0 + VPSHUFBITQMB Y20, Y21, K3, K6 // 62b255238ff4 + VPSHUFBITQMB -17(BP), Y21, K3, K6 // 62f255238fb5efffffff + VPSHUFBITQMB -15(R14)(R15*8), Y21, K3, K6 // 629255238fb4fef1ffffff + VPSHUFBITQMB Y14, Y2, K3, K5 // 62d26d2b8fee + VPSHUFBITQMB Y8, Y2, K3, K5 // 62d26d2b8fe8 + VPSHUFBITQMB Y20, Y2, K3, K5 // 62b26d2b8fec + VPSHUFBITQMB -17(BP), Y2, K3, K5 // 62f26d2b8fadefffffff + VPSHUFBITQMB -15(R14)(R15*8), Y2, K3, K5 // 62926d2b8facfef1ffffff + VPSHUFBITQMB Y14, Y7, K3, K5 // 62d2452b8fee + VPSHUFBITQMB Y8, Y7, K3, K5 // 62d2452b8fe8 + VPSHUFBITQMB Y20, Y7, K3, K5 // 62b2452b8fec + VPSHUFBITQMB -17(BP), Y7, K3, K5 // 62f2452b8fadefffffff + VPSHUFBITQMB -15(R14)(R15*8), Y7, K3, K5 // 6292452b8facfef1ffffff + VPSHUFBITQMB Y14, Y21, K3, K5 // 62d255238fee + VPSHUFBITQMB Y8, Y21, K3, K5 // 62d255238fe8 + VPSHUFBITQMB Y20, Y21, K3, K5 // 62b255238fec + VPSHUFBITQMB -17(BP), Y21, K3, K5 // 62f255238fadefffffff + VPSHUFBITQMB -15(R14)(R15*8), Y21, K3, K5 // 629255238facfef1ffffff + VPSHUFBITQMB Z3, Z6, K7, K1 // 62f24d4f8fcb + VPSHUFBITQMB Z21, Z6, K7, K1 // 62b24d4f8fcd + VPSHUFBITQMB -15(R14)(R15*1), Z6, K7, K1 // 62924d4f8f8c3ef1ffffff + VPSHUFBITQMB -15(BX), Z6, K7, K1 // 62f24d4f8f8bf1ffffff + VPSHUFBITQMB Z3, Z25, K7, K1 // 62f235478fcb + VPSHUFBITQMB Z21, Z25, K7, K1 // 62b235478fcd + VPSHUFBITQMB -15(R14)(R15*1), Z25, K7, K1 // 629235478f8c3ef1ffffff + VPSHUFBITQMB -15(BX), Z25, K7, K1 // 62f235478f8bf1ffffff + VPSHUFBITQMB Z3, Z6, K7, K5 // 62f24d4f8feb + VPSHUFBITQMB Z21, Z6, K7, K5 // 62b24d4f8fed + VPSHUFBITQMB -15(R14)(R15*1), Z6, K7, K5 // 62924d4f8fac3ef1ffffff + VPSHUFBITQMB -15(BX), Z6, K7, K5 // 62f24d4f8fabf1ffffff + VPSHUFBITQMB Z3, Z25, K7, K5 // 62f235478feb + VPSHUFBITQMB Z21, Z25, K7, K5 // 62b235478fed + VPSHUFBITQMB -15(R14)(R15*1), Z25, K7, K5 // 629235478fac3ef1ffffff + VPSHUFBITQMB -15(BX), Z25, K7, K5 // 62f235478fabf1ffffff + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512_ifma.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512_ifma.s new file mode 100644 index 0000000000000000000000000000000000000000..6a1e5baadc3e2d14a33056cd221d0f117ce11958 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512_ifma.s @@ -0,0 +1,194 @@ +// Code generated by avx512test. DO NOT EDIT. + +#include "../../../../../../runtime/textflag.h" + +TEXT asmtest_avx512_ifma(SB), NOSPLIT, $0 + VPMADD52HUQ X7, X11, K1, X18 // 62e2a509b5d7 + VPMADD52HUQ X0, X11, K1, X18 // 62e2a509b5d0 + VPMADD52HUQ 17(SP)(BP*2), X11, K1, X18 // 62e2a509b5946c11000000 + VPMADD52HUQ -7(DI)(R8*4), X11, K1, X18 // 62a2a509b59487f9ffffff + VPMADD52HUQ X7, X31, K1, X18 // 62e28501b5d7 + VPMADD52HUQ X0, X31, K1, X18 // 62e28501b5d0 + VPMADD52HUQ 17(SP)(BP*2), X31, K1, X18 // 62e28501b5946c11000000 + VPMADD52HUQ -7(DI)(R8*4), X31, K1, X18 // 62a28501b59487f9ffffff + VPMADD52HUQ X7, X3, K1, X18 // 62e2e509b5d7 + VPMADD52HUQ X0, X3, K1, X18 // 62e2e509b5d0 + VPMADD52HUQ 17(SP)(BP*2), X3, K1, X18 // 62e2e509b5946c11000000 + VPMADD52HUQ -7(DI)(R8*4), X3, K1, X18 // 62a2e509b59487f9ffffff + VPMADD52HUQ X7, X11, K1, X21 // 62e2a509b5ef + VPMADD52HUQ X0, X11, K1, X21 // 62e2a509b5e8 + VPMADD52HUQ 17(SP)(BP*2), X11, K1, X21 // 62e2a509b5ac6c11000000 + VPMADD52HUQ -7(DI)(R8*4), X11, K1, X21 // 62a2a509b5ac87f9ffffff + VPMADD52HUQ X7, X31, K1, X21 // 62e28501b5ef + VPMADD52HUQ X0, X31, K1, X21 // 62e28501b5e8 + VPMADD52HUQ 17(SP)(BP*2), X31, K1, X21 // 62e28501b5ac6c11000000 + VPMADD52HUQ -7(DI)(R8*4), X31, K1, X21 // 62a28501b5ac87f9ffffff + VPMADD52HUQ X7, X3, K1, X21 // 62e2e509b5ef + VPMADD52HUQ X0, X3, K1, X21 // 62e2e509b5e8 + VPMADD52HUQ 17(SP)(BP*2), X3, K1, X21 // 62e2e509b5ac6c11000000 + VPMADD52HUQ -7(DI)(R8*4), X3, K1, X21 // 62a2e509b5ac87f9ffffff + VPMADD52HUQ X7, X11, K1, X1 // 62f2a509b5cf + VPMADD52HUQ X0, X11, K1, X1 // 62f2a509b5c8 + VPMADD52HUQ 17(SP)(BP*2), X11, K1, X1 // 62f2a509b58c6c11000000 + VPMADD52HUQ -7(DI)(R8*4), X11, K1, X1 // 62b2a509b58c87f9ffffff + VPMADD52HUQ X7, X31, K1, X1 // 62f28501b5cf + VPMADD52HUQ X0, X31, K1, X1 // 62f28501b5c8 + VPMADD52HUQ 17(SP)(BP*2), X31, K1, X1 // 62f28501b58c6c11000000 + VPMADD52HUQ -7(DI)(R8*4), X31, K1, X1 // 62b28501b58c87f9ffffff + VPMADD52HUQ X7, X3, K1, X1 // 62f2e509b5cf + VPMADD52HUQ X0, X3, K1, X1 // 62f2e509b5c8 + VPMADD52HUQ 17(SP)(BP*2), X3, K1, X1 // 62f2e509b58c6c11000000 + VPMADD52HUQ -7(DI)(R8*4), X3, K1, X1 // 62b2e509b58c87f9ffffff + VPMADD52HUQ Y28, Y31, K7, Y17 // 62828527b5cc + VPMADD52HUQ Y13, Y31, K7, Y17 // 62c28527b5cd + VPMADD52HUQ Y7, Y31, K7, Y17 // 62e28527b5cf + VPMADD52HUQ (R8), Y31, K7, Y17 // 62c28527b508 + VPMADD52HUQ 15(DX)(BX*2), Y31, K7, Y17 // 62e28527b58c5a0f000000 + VPMADD52HUQ Y28, Y8, K7, Y17 // 6282bd2fb5cc + VPMADD52HUQ Y13, Y8, K7, Y17 // 62c2bd2fb5cd + VPMADD52HUQ Y7, Y8, K7, Y17 // 62e2bd2fb5cf + VPMADD52HUQ (R8), Y8, K7, Y17 // 62c2bd2fb508 + VPMADD52HUQ 15(DX)(BX*2), Y8, K7, Y17 // 62e2bd2fb58c5a0f000000 + VPMADD52HUQ Y28, Y1, K7, Y17 // 6282f52fb5cc + VPMADD52HUQ Y13, Y1, K7, Y17 // 62c2f52fb5cd + VPMADD52HUQ Y7, Y1, K7, Y17 // 62e2f52fb5cf + VPMADD52HUQ (R8), Y1, K7, Y17 // 62c2f52fb508 + VPMADD52HUQ 15(DX)(BX*2), Y1, K7, Y17 // 62e2f52fb58c5a0f000000 + VPMADD52HUQ Y28, Y31, K7, Y7 // 62928527b5fc + VPMADD52HUQ Y13, Y31, K7, Y7 // 62d28527b5fd + VPMADD52HUQ Y7, Y31, K7, Y7 // 62f28527b5ff + VPMADD52HUQ (R8), Y31, K7, Y7 // 62d28527b538 + VPMADD52HUQ 15(DX)(BX*2), Y31, K7, Y7 // 62f28527b5bc5a0f000000 + VPMADD52HUQ Y28, Y8, K7, Y7 // 6292bd2fb5fc + VPMADD52HUQ Y13, Y8, K7, Y7 // 62d2bd2fb5fd + VPMADD52HUQ Y7, Y8, K7, Y7 // 62f2bd2fb5ff + VPMADD52HUQ (R8), Y8, K7, Y7 // 62d2bd2fb538 + VPMADD52HUQ 15(DX)(BX*2), Y8, K7, Y7 // 62f2bd2fb5bc5a0f000000 + VPMADD52HUQ Y28, Y1, K7, Y7 // 6292f52fb5fc + VPMADD52HUQ Y13, Y1, K7, Y7 // 62d2f52fb5fd + VPMADD52HUQ Y7, Y1, K7, Y7 // 62f2f52fb5ff + VPMADD52HUQ (R8), Y1, K7, Y7 // 62d2f52fb538 + VPMADD52HUQ 15(DX)(BX*2), Y1, K7, Y7 // 62f2f52fb5bc5a0f000000 + VPMADD52HUQ Y28, Y31, K7, Y9 // 62128527b5cc + VPMADD52HUQ Y13, Y31, K7, Y9 // 62528527b5cd + VPMADD52HUQ Y7, Y31, K7, Y9 // 62728527b5cf + VPMADD52HUQ (R8), Y31, K7, Y9 // 62528527b508 + VPMADD52HUQ 15(DX)(BX*2), Y31, K7, Y9 // 62728527b58c5a0f000000 + VPMADD52HUQ Y28, Y8, K7, Y9 // 6212bd2fb5cc + VPMADD52HUQ Y13, Y8, K7, Y9 // 6252bd2fb5cd + VPMADD52HUQ Y7, Y8, K7, Y9 // 6272bd2fb5cf + VPMADD52HUQ (R8), Y8, K7, Y9 // 6252bd2fb508 + VPMADD52HUQ 15(DX)(BX*2), Y8, K7, Y9 // 6272bd2fb58c5a0f000000 + VPMADD52HUQ Y28, Y1, K7, Y9 // 6212f52fb5cc + VPMADD52HUQ Y13, Y1, K7, Y9 // 6252f52fb5cd + VPMADD52HUQ Y7, Y1, K7, Y9 // 6272f52fb5cf + VPMADD52HUQ (R8), Y1, K7, Y9 // 6252f52fb508 + VPMADD52HUQ 15(DX)(BX*2), Y1, K7, Y9 // 6272f52fb58c5a0f000000 + VPMADD52HUQ Z23, Z23, K1, Z27 // 6222c541b5df + VPMADD52HUQ Z6, Z23, K1, Z27 // 6262c541b5de + VPMADD52HUQ 17(SP), Z23, K1, Z27 // 6262c541b59c2411000000 + VPMADD52HUQ -17(BP)(SI*4), Z23, K1, Z27 // 6262c541b59cb5efffffff + VPMADD52HUQ Z23, Z5, K1, Z27 // 6222d549b5df + VPMADD52HUQ Z6, Z5, K1, Z27 // 6262d549b5de + VPMADD52HUQ 17(SP), Z5, K1, Z27 // 6262d549b59c2411000000 + VPMADD52HUQ -17(BP)(SI*4), Z5, K1, Z27 // 6262d549b59cb5efffffff + VPMADD52HUQ Z23, Z23, K1, Z15 // 6232c541b5ff + VPMADD52HUQ Z6, Z23, K1, Z15 // 6272c541b5fe + VPMADD52HUQ 17(SP), Z23, K1, Z15 // 6272c541b5bc2411000000 + VPMADD52HUQ -17(BP)(SI*4), Z23, K1, Z15 // 6272c541b5bcb5efffffff + VPMADD52HUQ Z23, Z5, K1, Z15 // 6232d549b5ff + VPMADD52HUQ Z6, Z5, K1, Z15 // 6272d549b5fe + VPMADD52HUQ 17(SP), Z5, K1, Z15 // 6272d549b5bc2411000000 + VPMADD52HUQ -17(BP)(SI*4), Z5, K1, Z15 // 6272d549b5bcb5efffffff + VPMADD52LUQ X5, X9, K1, X24 // 6262b509b4c5 + VPMADD52LUQ X31, X9, K1, X24 // 6202b509b4c7 + VPMADD52LUQ X3, X9, K1, X24 // 6262b509b4c3 + VPMADD52LUQ 15(R8), X9, K1, X24 // 6242b509b4800f000000 + VPMADD52LUQ (BP), X9, K1, X24 // 6262b509b44500 + VPMADD52LUQ X5, X7, K1, X24 // 6262c509b4c5 + VPMADD52LUQ X31, X7, K1, X24 // 6202c509b4c7 + VPMADD52LUQ X3, X7, K1, X24 // 6262c509b4c3 + VPMADD52LUQ 15(R8), X7, K1, X24 // 6242c509b4800f000000 + VPMADD52LUQ (BP), X7, K1, X24 // 6262c509b44500 + VPMADD52LUQ X5, X14, K1, X24 // 62628d09b4c5 + VPMADD52LUQ X31, X14, K1, X24 // 62028d09b4c7 + VPMADD52LUQ X3, X14, K1, X24 // 62628d09b4c3 + VPMADD52LUQ 15(R8), X14, K1, X24 // 62428d09b4800f000000 + VPMADD52LUQ (BP), X14, K1, X24 // 62628d09b44500 + VPMADD52LUQ X5, X9, K1, X20 // 62e2b509b4e5 + VPMADD52LUQ X31, X9, K1, X20 // 6282b509b4e7 + VPMADD52LUQ X3, X9, K1, X20 // 62e2b509b4e3 + VPMADD52LUQ 15(R8), X9, K1, X20 // 62c2b509b4a00f000000 + VPMADD52LUQ (BP), X9, K1, X20 // 62e2b509b46500 + VPMADD52LUQ X5, X7, K1, X20 // 62e2c509b4e5 + VPMADD52LUQ X31, X7, K1, X20 // 6282c509b4e7 + VPMADD52LUQ X3, X7, K1, X20 // 62e2c509b4e3 + VPMADD52LUQ 15(R8), X7, K1, X20 // 62c2c509b4a00f000000 + VPMADD52LUQ (BP), X7, K1, X20 // 62e2c509b46500 + VPMADD52LUQ X5, X14, K1, X20 // 62e28d09b4e5 + VPMADD52LUQ X31, X14, K1, X20 // 62828d09b4e7 + VPMADD52LUQ X3, X14, K1, X20 // 62e28d09b4e3 + VPMADD52LUQ 15(R8), X14, K1, X20 // 62c28d09b4a00f000000 + VPMADD52LUQ (BP), X14, K1, X20 // 62e28d09b46500 + VPMADD52LUQ X5, X9, K1, X7 // 62f2b509b4fd + VPMADD52LUQ X31, X9, K1, X7 // 6292b509b4ff + VPMADD52LUQ X3, X9, K1, X7 // 62f2b509b4fb + VPMADD52LUQ 15(R8), X9, K1, X7 // 62d2b509b4b80f000000 + VPMADD52LUQ (BP), X9, K1, X7 // 62f2b509b47d00 + VPMADD52LUQ X5, X7, K1, X7 // 62f2c509b4fd + VPMADD52LUQ X31, X7, K1, X7 // 6292c509b4ff + VPMADD52LUQ X3, X7, K1, X7 // 62f2c509b4fb + VPMADD52LUQ 15(R8), X7, K1, X7 // 62d2c509b4b80f000000 + VPMADD52LUQ (BP), X7, K1, X7 // 62f2c509b47d00 + VPMADD52LUQ X5, X14, K1, X7 // 62f28d09b4fd + VPMADD52LUQ X31, X14, K1, X7 // 62928d09b4ff + VPMADD52LUQ X3, X14, K1, X7 // 62f28d09b4fb + VPMADD52LUQ 15(R8), X14, K1, X7 // 62d28d09b4b80f000000 + VPMADD52LUQ (BP), X14, K1, X7 // 62f28d09b47d00 + VPMADD52LUQ Y3, Y9, K1, Y2 // 62f2b529b4d3 + VPMADD52LUQ Y2, Y9, K1, Y2 // 62f2b529b4d2 + VPMADD52LUQ Y9, Y9, K1, Y2 // 62d2b529b4d1 + VPMADD52LUQ 17(SP)(BP*1), Y9, K1, Y2 // 62f2b529b4942c11000000 + VPMADD52LUQ -7(CX)(DX*8), Y9, K1, Y2 // 62f2b529b494d1f9ffffff + VPMADD52LUQ Y3, Y1, K1, Y2 // 62f2f529b4d3 + VPMADD52LUQ Y2, Y1, K1, Y2 // 62f2f529b4d2 + VPMADD52LUQ Y9, Y1, K1, Y2 // 62d2f529b4d1 + VPMADD52LUQ 17(SP)(BP*1), Y1, K1, Y2 // 62f2f529b4942c11000000 + VPMADD52LUQ -7(CX)(DX*8), Y1, K1, Y2 // 62f2f529b494d1f9ffffff + VPMADD52LUQ Y3, Y9, K1, Y21 // 62e2b529b4eb + VPMADD52LUQ Y2, Y9, K1, Y21 // 62e2b529b4ea + VPMADD52LUQ Y9, Y9, K1, Y21 // 62c2b529b4e9 + VPMADD52LUQ 17(SP)(BP*1), Y9, K1, Y21 // 62e2b529b4ac2c11000000 + VPMADD52LUQ -7(CX)(DX*8), Y9, K1, Y21 // 62e2b529b4acd1f9ffffff + VPMADD52LUQ Y3, Y1, K1, Y21 // 62e2f529b4eb + VPMADD52LUQ Y2, Y1, K1, Y21 // 62e2f529b4ea + VPMADD52LUQ Y9, Y1, K1, Y21 // 62c2f529b4e9 + VPMADD52LUQ 17(SP)(BP*1), Y1, K1, Y21 // 62e2f529b4ac2c11000000 + VPMADD52LUQ -7(CX)(DX*8), Y1, K1, Y21 // 62e2f529b4acd1f9ffffff + VPMADD52LUQ Y3, Y9, K1, Y12 // 6272b529b4e3 + VPMADD52LUQ Y2, Y9, K1, Y12 // 6272b529b4e2 + VPMADD52LUQ Y9, Y9, K1, Y12 // 6252b529b4e1 + VPMADD52LUQ 17(SP)(BP*1), Y9, K1, Y12 // 6272b529b4a42c11000000 + VPMADD52LUQ -7(CX)(DX*8), Y9, K1, Y12 // 6272b529b4a4d1f9ffffff + VPMADD52LUQ Y3, Y1, K1, Y12 // 6272f529b4e3 + VPMADD52LUQ Y2, Y1, K1, Y12 // 6272f529b4e2 + VPMADD52LUQ Y9, Y1, K1, Y12 // 6252f529b4e1 + VPMADD52LUQ 17(SP)(BP*1), Y1, K1, Y12 // 6272f529b4a42c11000000 + VPMADD52LUQ -7(CX)(DX*8), Y1, K1, Y12 // 6272f529b4a4d1f9ffffff + VPMADD52LUQ Z16, Z21, K7, Z8 // 6232d547b4c0 + VPMADD52LUQ Z13, Z21, K7, Z8 // 6252d547b4c5 + VPMADD52LUQ 7(AX), Z21, K7, Z8 // 6272d547b48007000000 + VPMADD52LUQ (DI), Z21, K7, Z8 // 6272d547b407 + VPMADD52LUQ Z16, Z5, K7, Z8 // 6232d54fb4c0 + VPMADD52LUQ Z13, Z5, K7, Z8 // 6252d54fb4c5 + VPMADD52LUQ 7(AX), Z5, K7, Z8 // 6272d54fb48007000000 + VPMADD52LUQ (DI), Z5, K7, Z8 // 6272d54fb407 + VPMADD52LUQ Z16, Z21, K7, Z28 // 6222d547b4e0 + VPMADD52LUQ Z13, Z21, K7, Z28 // 6242d547b4e5 + VPMADD52LUQ 7(AX), Z21, K7, Z28 // 6262d547b4a007000000 + VPMADD52LUQ (DI), Z21, K7, Z28 // 6262d547b427 + VPMADD52LUQ Z16, Z5, K7, Z28 // 6222d54fb4e0 + VPMADD52LUQ Z13, Z5, K7, Z28 // 6242d54fb4e5 + VPMADD52LUQ 7(AX), Z5, K7, Z28 // 6262d54fb4a007000000 + VPMADD52LUQ (DI), Z5, K7, Z28 // 6262d54fb427 + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512_vbmi.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512_vbmi.s new file mode 100644 index 0000000000000000000000000000000000000000..d598acbd7ea2a678b23c53c3c9ec3d90fe3ce152 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512_vbmi.s @@ -0,0 +1,415 @@ +// Code generated by avx512test. DO NOT EDIT. + +#include "../../../../../../runtime/textflag.h" + +TEXT asmtest_avx512_vbmi(SB), NOSPLIT, $0 + VPERMB X26, X20, K1, X23 // 62825d018dfa + VPERMB X19, X20, K1, X23 // 62a25d018dfb + VPERMB X0, X20, K1, X23 // 62e25d018df8 + VPERMB 7(SI)(DI*4), X20, K1, X23 // 62e25d018dbcbe07000000 + VPERMB -7(DI)(R8*2), X20, K1, X23 // 62a25d018dbc47f9ffffff + VPERMB X26, X2, K1, X23 // 62826d098dfa + VPERMB X19, X2, K1, X23 // 62a26d098dfb + VPERMB X0, X2, K1, X23 // 62e26d098df8 + VPERMB 7(SI)(DI*4), X2, K1, X23 // 62e26d098dbcbe07000000 + VPERMB -7(DI)(R8*2), X2, K1, X23 // 62a26d098dbc47f9ffffff + VPERMB X26, X9, K1, X23 // 628235098dfa + VPERMB X19, X9, K1, X23 // 62a235098dfb + VPERMB X0, X9, K1, X23 // 62e235098df8 + VPERMB 7(SI)(DI*4), X9, K1, X23 // 62e235098dbcbe07000000 + VPERMB -7(DI)(R8*2), X9, K1, X23 // 62a235098dbc47f9ffffff + VPERMB X26, X20, K1, X30 // 62025d018df2 + VPERMB X19, X20, K1, X30 // 62225d018df3 + VPERMB X0, X20, K1, X30 // 62625d018df0 + VPERMB 7(SI)(DI*4), X20, K1, X30 // 62625d018db4be07000000 + VPERMB -7(DI)(R8*2), X20, K1, X30 // 62225d018db447f9ffffff + VPERMB X26, X2, K1, X30 // 62026d098df2 + VPERMB X19, X2, K1, X30 // 62226d098df3 + VPERMB X0, X2, K1, X30 // 62626d098df0 + VPERMB 7(SI)(DI*4), X2, K1, X30 // 62626d098db4be07000000 + VPERMB -7(DI)(R8*2), X2, K1, X30 // 62226d098db447f9ffffff + VPERMB X26, X9, K1, X30 // 620235098df2 + VPERMB X19, X9, K1, X30 // 622235098df3 + VPERMB X0, X9, K1, X30 // 626235098df0 + VPERMB 7(SI)(DI*4), X9, K1, X30 // 626235098db4be07000000 + VPERMB -7(DI)(R8*2), X9, K1, X30 // 622235098db447f9ffffff + VPERMB X26, X20, K1, X8 // 62125d018dc2 + VPERMB X19, X20, K1, X8 // 62325d018dc3 + VPERMB X0, X20, K1, X8 // 62725d018dc0 + VPERMB 7(SI)(DI*4), X20, K1, X8 // 62725d018d84be07000000 + VPERMB -7(DI)(R8*2), X20, K1, X8 // 62325d018d8447f9ffffff + VPERMB X26, X2, K1, X8 // 62126d098dc2 + VPERMB X19, X2, K1, X8 // 62326d098dc3 + VPERMB X0, X2, K1, X8 // 62726d098dc0 + VPERMB 7(SI)(DI*4), X2, K1, X8 // 62726d098d84be07000000 + VPERMB -7(DI)(R8*2), X2, K1, X8 // 62326d098d8447f9ffffff + VPERMB X26, X9, K1, X8 // 621235098dc2 + VPERMB X19, X9, K1, X8 // 623235098dc3 + VPERMB X0, X9, K1, X8 // 627235098dc0 + VPERMB 7(SI)(DI*4), X9, K1, X8 // 627235098d84be07000000 + VPERMB -7(DI)(R8*2), X9, K1, X8 // 623235098d8447f9ffffff + VPERMB Y5, Y31, K7, Y22 // 62e205278df5 + VPERMB Y19, Y31, K7, Y22 // 62a205278df3 + VPERMB Y31, Y31, K7, Y22 // 628205278df7 + VPERMB 17(SP)(BP*1), Y31, K7, Y22 // 62e205278db42c11000000 + VPERMB -7(CX)(DX*8), Y31, K7, Y22 // 62e205278db4d1f9ffffff + VPERMB Y5, Y5, K7, Y22 // 62e2552f8df5 + VPERMB Y19, Y5, K7, Y22 // 62a2552f8df3 + VPERMB Y31, Y5, K7, Y22 // 6282552f8df7 + VPERMB 17(SP)(BP*1), Y5, K7, Y22 // 62e2552f8db42c11000000 + VPERMB -7(CX)(DX*8), Y5, K7, Y22 // 62e2552f8db4d1f9ffffff + VPERMB Y5, Y0, K7, Y22 // 62e27d2f8df5 + VPERMB Y19, Y0, K7, Y22 // 62a27d2f8df3 + VPERMB Y31, Y0, K7, Y22 // 62827d2f8df7 + VPERMB 17(SP)(BP*1), Y0, K7, Y22 // 62e27d2f8db42c11000000 + VPERMB -7(CX)(DX*8), Y0, K7, Y22 // 62e27d2f8db4d1f9ffffff + VPERMB Y5, Y31, K7, Y9 // 627205278dcd + VPERMB Y19, Y31, K7, Y9 // 623205278dcb + VPERMB Y31, Y31, K7, Y9 // 621205278dcf + VPERMB 17(SP)(BP*1), Y31, K7, Y9 // 627205278d8c2c11000000 + VPERMB -7(CX)(DX*8), Y31, K7, Y9 // 627205278d8cd1f9ffffff + VPERMB Y5, Y5, K7, Y9 // 6272552f8dcd + VPERMB Y19, Y5, K7, Y9 // 6232552f8dcb + VPERMB Y31, Y5, K7, Y9 // 6212552f8dcf + VPERMB 17(SP)(BP*1), Y5, K7, Y9 // 6272552f8d8c2c11000000 + VPERMB -7(CX)(DX*8), Y5, K7, Y9 // 6272552f8d8cd1f9ffffff + VPERMB Y5, Y0, K7, Y9 // 62727d2f8dcd + VPERMB Y19, Y0, K7, Y9 // 62327d2f8dcb + VPERMB Y31, Y0, K7, Y9 // 62127d2f8dcf + VPERMB 17(SP)(BP*1), Y0, K7, Y9 // 62727d2f8d8c2c11000000 + VPERMB -7(CX)(DX*8), Y0, K7, Y9 // 62727d2f8d8cd1f9ffffff + VPERMB Y5, Y31, K7, Y23 // 62e205278dfd + VPERMB Y19, Y31, K7, Y23 // 62a205278dfb + VPERMB Y31, Y31, K7, Y23 // 628205278dff + VPERMB 17(SP)(BP*1), Y31, K7, Y23 // 62e205278dbc2c11000000 + VPERMB -7(CX)(DX*8), Y31, K7, Y23 // 62e205278dbcd1f9ffffff + VPERMB Y5, Y5, K7, Y23 // 62e2552f8dfd + VPERMB Y19, Y5, K7, Y23 // 62a2552f8dfb + VPERMB Y31, Y5, K7, Y23 // 6282552f8dff + VPERMB 17(SP)(BP*1), Y5, K7, Y23 // 62e2552f8dbc2c11000000 + VPERMB -7(CX)(DX*8), Y5, K7, Y23 // 62e2552f8dbcd1f9ffffff + VPERMB Y5, Y0, K7, Y23 // 62e27d2f8dfd + VPERMB Y19, Y0, K7, Y23 // 62a27d2f8dfb + VPERMB Y31, Y0, K7, Y23 // 62827d2f8dff + VPERMB 17(SP)(BP*1), Y0, K7, Y23 // 62e27d2f8dbc2c11000000 + VPERMB -7(CX)(DX*8), Y0, K7, Y23 // 62e27d2f8dbcd1f9ffffff + VPERMB Z3, Z8, K1, Z3 // 62f23d498ddb + VPERMB Z27, Z8, K1, Z3 // 62923d498ddb + VPERMB 7(AX), Z8, K1, Z3 // 62f23d498d9807000000 + VPERMB (DI), Z8, K1, Z3 // 62f23d498d1f + VPERMB Z3, Z2, K1, Z3 // 62f26d498ddb + VPERMB Z27, Z2, K1, Z3 // 62926d498ddb + VPERMB 7(AX), Z2, K1, Z3 // 62f26d498d9807000000 + VPERMB (DI), Z2, K1, Z3 // 62f26d498d1f + VPERMB Z3, Z8, K1, Z21 // 62e23d498deb + VPERMB Z27, Z8, K1, Z21 // 62823d498deb + VPERMB 7(AX), Z8, K1, Z21 // 62e23d498da807000000 + VPERMB (DI), Z8, K1, Z21 // 62e23d498d2f + VPERMB Z3, Z2, K1, Z21 // 62e26d498deb + VPERMB Z27, Z2, K1, Z21 // 62826d498deb + VPERMB 7(AX), Z2, K1, Z21 // 62e26d498da807000000 + VPERMB (DI), Z2, K1, Z21 // 62e26d498d2f + VPERMI2B X15, X8, K7, X31 // 62423d0f75ff + VPERMI2B X0, X8, K7, X31 // 62623d0f75f8 + VPERMI2B X16, X8, K7, X31 // 62223d0f75f8 + VPERMI2B 17(SP), X8, K7, X31 // 62623d0f75bc2411000000 + VPERMI2B -17(BP)(SI*4), X8, K7, X31 // 62623d0f75bcb5efffffff + VPERMI2B X15, X1, K7, X31 // 6242750f75ff + VPERMI2B X0, X1, K7, X31 // 6262750f75f8 + VPERMI2B X16, X1, K7, X31 // 6222750f75f8 + VPERMI2B 17(SP), X1, K7, X31 // 6262750f75bc2411000000 + VPERMI2B -17(BP)(SI*4), X1, K7, X31 // 6262750f75bcb5efffffff + VPERMI2B X15, X0, K7, X31 // 62427d0f75ff + VPERMI2B X0, X0, K7, X31 // 62627d0f75f8 + VPERMI2B X16, X0, K7, X31 // 62227d0f75f8 + VPERMI2B 17(SP), X0, K7, X31 // 62627d0f75bc2411000000 + VPERMI2B -17(BP)(SI*4), X0, K7, X31 // 62627d0f75bcb5efffffff + VPERMI2B X15, X8, K7, X16 // 62c23d0f75c7 + VPERMI2B X0, X8, K7, X16 // 62e23d0f75c0 + VPERMI2B X16, X8, K7, X16 // 62a23d0f75c0 + VPERMI2B 17(SP), X8, K7, X16 // 62e23d0f75842411000000 + VPERMI2B -17(BP)(SI*4), X8, K7, X16 // 62e23d0f7584b5efffffff + VPERMI2B X15, X1, K7, X16 // 62c2750f75c7 + VPERMI2B X0, X1, K7, X16 // 62e2750f75c0 + VPERMI2B X16, X1, K7, X16 // 62a2750f75c0 + VPERMI2B 17(SP), X1, K7, X16 // 62e2750f75842411000000 + VPERMI2B -17(BP)(SI*4), X1, K7, X16 // 62e2750f7584b5efffffff + VPERMI2B X15, X0, K7, X16 // 62c27d0f75c7 + VPERMI2B X0, X0, K7, X16 // 62e27d0f75c0 + VPERMI2B X16, X0, K7, X16 // 62a27d0f75c0 + VPERMI2B 17(SP), X0, K7, X16 // 62e27d0f75842411000000 + VPERMI2B -17(BP)(SI*4), X0, K7, X16 // 62e27d0f7584b5efffffff + VPERMI2B X15, X8, K7, X7 // 62d23d0f75ff + VPERMI2B X0, X8, K7, X7 // 62f23d0f75f8 + VPERMI2B X16, X8, K7, X7 // 62b23d0f75f8 + VPERMI2B 17(SP), X8, K7, X7 // 62f23d0f75bc2411000000 + VPERMI2B -17(BP)(SI*4), X8, K7, X7 // 62f23d0f75bcb5efffffff + VPERMI2B X15, X1, K7, X7 // 62d2750f75ff + VPERMI2B X0, X1, K7, X7 // 62f2750f75f8 + VPERMI2B X16, X1, K7, X7 // 62b2750f75f8 + VPERMI2B 17(SP), X1, K7, X7 // 62f2750f75bc2411000000 + VPERMI2B -17(BP)(SI*4), X1, K7, X7 // 62f2750f75bcb5efffffff + VPERMI2B X15, X0, K7, X7 // 62d27d0f75ff + VPERMI2B X0, X0, K7, X7 // 62f27d0f75f8 + VPERMI2B X16, X0, K7, X7 // 62b27d0f75f8 + VPERMI2B 17(SP), X0, K7, X7 // 62f27d0f75bc2411000000 + VPERMI2B -17(BP)(SI*4), X0, K7, X7 // 62f27d0f75bcb5efffffff + VPERMI2B Y18, Y15, K2, Y2 // 62b2052a75d2 + VPERMI2B Y24, Y15, K2, Y2 // 6292052a75d0 + VPERMI2B Y9, Y15, K2, Y2 // 62d2052a75d1 + VPERMI2B 15(R8)(R14*1), Y15, K2, Y2 // 6292052a7594300f000000 + VPERMI2B 15(R8)(R14*2), Y15, K2, Y2 // 6292052a7594700f000000 + VPERMI2B Y18, Y22, K2, Y2 // 62b24d2275d2 + VPERMI2B Y24, Y22, K2, Y2 // 62924d2275d0 + VPERMI2B Y9, Y22, K2, Y2 // 62d24d2275d1 + VPERMI2B 15(R8)(R14*1), Y22, K2, Y2 // 62924d227594300f000000 + VPERMI2B 15(R8)(R14*2), Y22, K2, Y2 // 62924d227594700f000000 + VPERMI2B Y18, Y20, K2, Y2 // 62b25d2275d2 + VPERMI2B Y24, Y20, K2, Y2 // 62925d2275d0 + VPERMI2B Y9, Y20, K2, Y2 // 62d25d2275d1 + VPERMI2B 15(R8)(R14*1), Y20, K2, Y2 // 62925d227594300f000000 + VPERMI2B 15(R8)(R14*2), Y20, K2, Y2 // 62925d227594700f000000 + VPERMI2B Y18, Y15, K2, Y13 // 6232052a75ea + VPERMI2B Y24, Y15, K2, Y13 // 6212052a75e8 + VPERMI2B Y9, Y15, K2, Y13 // 6252052a75e9 + VPERMI2B 15(R8)(R14*1), Y15, K2, Y13 // 6212052a75ac300f000000 + VPERMI2B 15(R8)(R14*2), Y15, K2, Y13 // 6212052a75ac700f000000 + VPERMI2B Y18, Y22, K2, Y13 // 62324d2275ea + VPERMI2B Y24, Y22, K2, Y13 // 62124d2275e8 + VPERMI2B Y9, Y22, K2, Y13 // 62524d2275e9 + VPERMI2B 15(R8)(R14*1), Y22, K2, Y13 // 62124d2275ac300f000000 + VPERMI2B 15(R8)(R14*2), Y22, K2, Y13 // 62124d2275ac700f000000 + VPERMI2B Y18, Y20, K2, Y13 // 62325d2275ea + VPERMI2B Y24, Y20, K2, Y13 // 62125d2275e8 + VPERMI2B Y9, Y20, K2, Y13 // 62525d2275e9 + VPERMI2B 15(R8)(R14*1), Y20, K2, Y13 // 62125d2275ac300f000000 + VPERMI2B 15(R8)(R14*2), Y20, K2, Y13 // 62125d2275ac700f000000 + VPERMI2B Y18, Y15, K2, Y27 // 6222052a75da + VPERMI2B Y24, Y15, K2, Y27 // 6202052a75d8 + VPERMI2B Y9, Y15, K2, Y27 // 6242052a75d9 + VPERMI2B 15(R8)(R14*1), Y15, K2, Y27 // 6202052a759c300f000000 + VPERMI2B 15(R8)(R14*2), Y15, K2, Y27 // 6202052a759c700f000000 + VPERMI2B Y18, Y22, K2, Y27 // 62224d2275da + VPERMI2B Y24, Y22, K2, Y27 // 62024d2275d8 + VPERMI2B Y9, Y22, K2, Y27 // 62424d2275d9 + VPERMI2B 15(R8)(R14*1), Y22, K2, Y27 // 62024d22759c300f000000 + VPERMI2B 15(R8)(R14*2), Y22, K2, Y27 // 62024d22759c700f000000 + VPERMI2B Y18, Y20, K2, Y27 // 62225d2275da + VPERMI2B Y24, Y20, K2, Y27 // 62025d2275d8 + VPERMI2B Y9, Y20, K2, Y27 // 62425d2275d9 + VPERMI2B 15(R8)(R14*1), Y20, K2, Y27 // 62025d22759c300f000000 + VPERMI2B 15(R8)(R14*2), Y20, K2, Y27 // 62025d22759c700f000000 + VPERMI2B Z12, Z9, K4, Z3 // 62d2354c75dc + VPERMI2B Z22, Z9, K4, Z3 // 62b2354c75de + VPERMI2B -17(BP)(SI*8), Z9, K4, Z3 // 62f2354c759cf5efffffff + VPERMI2B (R15), Z9, K4, Z3 // 62d2354c751f + VPERMI2B Z12, Z19, K4, Z3 // 62d2654475dc + VPERMI2B Z22, Z19, K4, Z3 // 62b2654475de + VPERMI2B -17(BP)(SI*8), Z19, K4, Z3 // 62f26544759cf5efffffff + VPERMI2B (R15), Z19, K4, Z3 // 62d26544751f + VPERMI2B Z12, Z9, K4, Z30 // 6242354c75f4 + VPERMI2B Z22, Z9, K4, Z30 // 6222354c75f6 + VPERMI2B -17(BP)(SI*8), Z9, K4, Z30 // 6262354c75b4f5efffffff + VPERMI2B (R15), Z9, K4, Z30 // 6242354c7537 + VPERMI2B Z12, Z19, K4, Z30 // 6242654475f4 + VPERMI2B Z22, Z19, K4, Z30 // 6222654475f6 + VPERMI2B -17(BP)(SI*8), Z19, K4, Z30 // 6262654475b4f5efffffff + VPERMI2B (R15), Z19, K4, Z30 // 624265447537 + VPERMT2B X2, X0, K7, X20 // 62e27d0f7de2 + VPERMT2B X8, X0, K7, X20 // 62c27d0f7de0 + VPERMT2B X9, X0, K7, X20 // 62c27d0f7de1 + VPERMT2B (BX), X0, K7, X20 // 62e27d0f7d23 + VPERMT2B -17(BP)(SI*1), X0, K7, X20 // 62e27d0f7da435efffffff + VPERMT2B X2, X9, K7, X20 // 62e2350f7de2 + VPERMT2B X8, X9, K7, X20 // 62c2350f7de0 + VPERMT2B X9, X9, K7, X20 // 62c2350f7de1 + VPERMT2B (BX), X9, K7, X20 // 62e2350f7d23 + VPERMT2B -17(BP)(SI*1), X9, K7, X20 // 62e2350f7da435efffffff + VPERMT2B X2, X13, K7, X20 // 62e2150f7de2 + VPERMT2B X8, X13, K7, X20 // 62c2150f7de0 + VPERMT2B X9, X13, K7, X20 // 62c2150f7de1 + VPERMT2B (BX), X13, K7, X20 // 62e2150f7d23 + VPERMT2B -17(BP)(SI*1), X13, K7, X20 // 62e2150f7da435efffffff + VPERMT2B X2, X0, K7, X5 // 62f27d0f7dea + VPERMT2B X8, X0, K7, X5 // 62d27d0f7de8 + VPERMT2B X9, X0, K7, X5 // 62d27d0f7de9 + VPERMT2B (BX), X0, K7, X5 // 62f27d0f7d2b + VPERMT2B -17(BP)(SI*1), X0, K7, X5 // 62f27d0f7dac35efffffff + VPERMT2B X2, X9, K7, X5 // 62f2350f7dea + VPERMT2B X8, X9, K7, X5 // 62d2350f7de8 + VPERMT2B X9, X9, K7, X5 // 62d2350f7de9 + VPERMT2B (BX), X9, K7, X5 // 62f2350f7d2b + VPERMT2B -17(BP)(SI*1), X9, K7, X5 // 62f2350f7dac35efffffff + VPERMT2B X2, X13, K7, X5 // 62f2150f7dea + VPERMT2B X8, X13, K7, X5 // 62d2150f7de8 + VPERMT2B X9, X13, K7, X5 // 62d2150f7de9 + VPERMT2B (BX), X13, K7, X5 // 62f2150f7d2b + VPERMT2B -17(BP)(SI*1), X13, K7, X5 // 62f2150f7dac35efffffff + VPERMT2B X2, X0, K7, X25 // 62627d0f7dca + VPERMT2B X8, X0, K7, X25 // 62427d0f7dc8 + VPERMT2B X9, X0, K7, X25 // 62427d0f7dc9 + VPERMT2B (BX), X0, K7, X25 // 62627d0f7d0b + VPERMT2B -17(BP)(SI*1), X0, K7, X25 // 62627d0f7d8c35efffffff + VPERMT2B X2, X9, K7, X25 // 6262350f7dca + VPERMT2B X8, X9, K7, X25 // 6242350f7dc8 + VPERMT2B X9, X9, K7, X25 // 6242350f7dc9 + VPERMT2B (BX), X9, K7, X25 // 6262350f7d0b + VPERMT2B -17(BP)(SI*1), X9, K7, X25 // 6262350f7d8c35efffffff + VPERMT2B X2, X13, K7, X25 // 6262150f7dca + VPERMT2B X8, X13, K7, X25 // 6242150f7dc8 + VPERMT2B X9, X13, K7, X25 // 6242150f7dc9 + VPERMT2B (BX), X13, K7, X25 // 6262150f7d0b + VPERMT2B -17(BP)(SI*1), X13, K7, X25 // 6262150f7d8c35efffffff + VPERMT2B Y14, Y2, K6, Y18 // 62c26d2e7dd6 + VPERMT2B Y8, Y2, K6, Y18 // 62c26d2e7dd0 + VPERMT2B Y20, Y2, K6, Y18 // 62a26d2e7dd4 + VPERMT2B 7(SI)(DI*4), Y2, K6, Y18 // 62e26d2e7d94be07000000 + VPERMT2B -7(DI)(R8*2), Y2, K6, Y18 // 62a26d2e7d9447f9ffffff + VPERMT2B Y14, Y7, K6, Y18 // 62c2452e7dd6 + VPERMT2B Y8, Y7, K6, Y18 // 62c2452e7dd0 + VPERMT2B Y20, Y7, K6, Y18 // 62a2452e7dd4 + VPERMT2B 7(SI)(DI*4), Y7, K6, Y18 // 62e2452e7d94be07000000 + VPERMT2B -7(DI)(R8*2), Y7, K6, Y18 // 62a2452e7d9447f9ffffff + VPERMT2B Y14, Y21, K6, Y18 // 62c255267dd6 + VPERMT2B Y8, Y21, K6, Y18 // 62c255267dd0 + VPERMT2B Y20, Y21, K6, Y18 // 62a255267dd4 + VPERMT2B 7(SI)(DI*4), Y21, K6, Y18 // 62e255267d94be07000000 + VPERMT2B -7(DI)(R8*2), Y21, K6, Y18 // 62a255267d9447f9ffffff + VPERMT2B Y14, Y2, K6, Y3 // 62d26d2e7dde + VPERMT2B Y8, Y2, K6, Y3 // 62d26d2e7dd8 + VPERMT2B Y20, Y2, K6, Y3 // 62b26d2e7ddc + VPERMT2B 7(SI)(DI*4), Y2, K6, Y3 // 62f26d2e7d9cbe07000000 + VPERMT2B -7(DI)(R8*2), Y2, K6, Y3 // 62b26d2e7d9c47f9ffffff + VPERMT2B Y14, Y7, K6, Y3 // 62d2452e7dde + VPERMT2B Y8, Y7, K6, Y3 // 62d2452e7dd8 + VPERMT2B Y20, Y7, K6, Y3 // 62b2452e7ddc + VPERMT2B 7(SI)(DI*4), Y7, K6, Y3 // 62f2452e7d9cbe07000000 + VPERMT2B -7(DI)(R8*2), Y7, K6, Y3 // 62b2452e7d9c47f9ffffff + VPERMT2B Y14, Y21, K6, Y3 // 62d255267dde + VPERMT2B Y8, Y21, K6, Y3 // 62d255267dd8 + VPERMT2B Y20, Y21, K6, Y3 // 62b255267ddc + VPERMT2B 7(SI)(DI*4), Y21, K6, Y3 // 62f255267d9cbe07000000 + VPERMT2B -7(DI)(R8*2), Y21, K6, Y3 // 62b255267d9c47f9ffffff + VPERMT2B Y14, Y2, K6, Y24 // 62426d2e7dc6 + VPERMT2B Y8, Y2, K6, Y24 // 62426d2e7dc0 + VPERMT2B Y20, Y2, K6, Y24 // 62226d2e7dc4 + VPERMT2B 7(SI)(DI*4), Y2, K6, Y24 // 62626d2e7d84be07000000 + VPERMT2B -7(DI)(R8*2), Y2, K6, Y24 // 62226d2e7d8447f9ffffff + VPERMT2B Y14, Y7, K6, Y24 // 6242452e7dc6 + VPERMT2B Y8, Y7, K6, Y24 // 6242452e7dc0 + VPERMT2B Y20, Y7, K6, Y24 // 6222452e7dc4 + VPERMT2B 7(SI)(DI*4), Y7, K6, Y24 // 6262452e7d84be07000000 + VPERMT2B -7(DI)(R8*2), Y7, K6, Y24 // 6222452e7d8447f9ffffff + VPERMT2B Y14, Y21, K6, Y24 // 624255267dc6 + VPERMT2B Y8, Y21, K6, Y24 // 624255267dc0 + VPERMT2B Y20, Y21, K6, Y24 // 622255267dc4 + VPERMT2B 7(SI)(DI*4), Y21, K6, Y24 // 626255267d84be07000000 + VPERMT2B -7(DI)(R8*2), Y21, K6, Y24 // 622255267d8447f9ffffff + VPERMT2B Z20, Z1, K3, Z6 // 62b2754b7df4 + VPERMT2B Z9, Z1, K3, Z6 // 62d2754b7df1 + VPERMT2B (CX), Z1, K3, Z6 // 62f2754b7d31 + VPERMT2B 99(R15), Z1, K3, Z6 // 62d2754b7db763000000 + VPERMT2B Z20, Z9, K3, Z6 // 62b2354b7df4 + VPERMT2B Z9, Z9, K3, Z6 // 62d2354b7df1 + VPERMT2B (CX), Z9, K3, Z6 // 62f2354b7d31 + VPERMT2B 99(R15), Z9, K3, Z6 // 62d2354b7db763000000 + VPERMT2B Z20, Z1, K3, Z9 // 6232754b7dcc + VPERMT2B Z9, Z1, K3, Z9 // 6252754b7dc9 + VPERMT2B (CX), Z1, K3, Z9 // 6272754b7d09 + VPERMT2B 99(R15), Z1, K3, Z9 // 6252754b7d8f63000000 + VPERMT2B Z20, Z9, K3, Z9 // 6232354b7dcc + VPERMT2B Z9, Z9, K3, Z9 // 6252354b7dc9 + VPERMT2B (CX), Z9, K3, Z9 // 6272354b7d09 + VPERMT2B 99(R15), Z9, K3, Z9 // 6252354b7d8f63000000 + VPMULTISHIFTQB X9, X24, K5, X7 // 62d2bd0583f9 + VPMULTISHIFTQB X7, X24, K5, X7 // 62f2bd0583ff + VPMULTISHIFTQB X14, X24, K5, X7 // 62d2bd0583fe + VPMULTISHIFTQB 17(SP)(BP*1), X24, K5, X7 // 62f2bd0583bc2c11000000 + VPMULTISHIFTQB -7(CX)(DX*8), X24, K5, X7 // 62f2bd0583bcd1f9ffffff + VPMULTISHIFTQB X9, X20, K5, X7 // 62d2dd0583f9 + VPMULTISHIFTQB X7, X20, K5, X7 // 62f2dd0583ff + VPMULTISHIFTQB X14, X20, K5, X7 // 62d2dd0583fe + VPMULTISHIFTQB 17(SP)(BP*1), X20, K5, X7 // 62f2dd0583bc2c11000000 + VPMULTISHIFTQB -7(CX)(DX*8), X20, K5, X7 // 62f2dd0583bcd1f9ffffff + VPMULTISHIFTQB X9, X7, K5, X7 // 62d2c50d83f9 + VPMULTISHIFTQB X7, X7, K5, X7 // 62f2c50d83ff + VPMULTISHIFTQB X14, X7, K5, X7 // 62d2c50d83fe + VPMULTISHIFTQB 17(SP)(BP*1), X7, K5, X7 // 62f2c50d83bc2c11000000 + VPMULTISHIFTQB -7(CX)(DX*8), X7, K5, X7 // 62f2c50d83bcd1f9ffffff + VPMULTISHIFTQB X9, X24, K5, X0 // 62d2bd0583c1 + VPMULTISHIFTQB X7, X24, K5, X0 // 62f2bd0583c7 + VPMULTISHIFTQB X14, X24, K5, X0 // 62d2bd0583c6 + VPMULTISHIFTQB 17(SP)(BP*1), X24, K5, X0 // 62f2bd0583842c11000000 + VPMULTISHIFTQB -7(CX)(DX*8), X24, K5, X0 // 62f2bd058384d1f9ffffff + VPMULTISHIFTQB X9, X20, K5, X0 // 62d2dd0583c1 + VPMULTISHIFTQB X7, X20, K5, X0 // 62f2dd0583c7 + VPMULTISHIFTQB X14, X20, K5, X0 // 62d2dd0583c6 + VPMULTISHIFTQB 17(SP)(BP*1), X20, K5, X0 // 62f2dd0583842c11000000 + VPMULTISHIFTQB -7(CX)(DX*8), X20, K5, X0 // 62f2dd058384d1f9ffffff + VPMULTISHIFTQB X9, X7, K5, X0 // 62d2c50d83c1 + VPMULTISHIFTQB X7, X7, K5, X0 // 62f2c50d83c7 + VPMULTISHIFTQB X14, X7, K5, X0 // 62d2c50d83c6 + VPMULTISHIFTQB 17(SP)(BP*1), X7, K5, X0 // 62f2c50d83842c11000000 + VPMULTISHIFTQB -7(CX)(DX*8), X7, K5, X0 // 62f2c50d8384d1f9ffffff + VPMULTISHIFTQB Y16, Y30, K7, Y12 // 62328d2783e0 + VPMULTISHIFTQB Y1, Y30, K7, Y12 // 62728d2783e1 + VPMULTISHIFTQB Y30, Y30, K7, Y12 // 62128d2783e6 + VPMULTISHIFTQB 17(SP)(BP*2), Y30, K7, Y12 // 62728d2783a46c11000000 + VPMULTISHIFTQB -7(DI)(R8*4), Y30, K7, Y12 // 62328d2783a487f9ffffff + VPMULTISHIFTQB Y16, Y26, K7, Y12 // 6232ad2783e0 + VPMULTISHIFTQB Y1, Y26, K7, Y12 // 6272ad2783e1 + VPMULTISHIFTQB Y30, Y26, K7, Y12 // 6212ad2783e6 + VPMULTISHIFTQB 17(SP)(BP*2), Y26, K7, Y12 // 6272ad2783a46c11000000 + VPMULTISHIFTQB -7(DI)(R8*4), Y26, K7, Y12 // 6232ad2783a487f9ffffff + VPMULTISHIFTQB Y16, Y7, K7, Y12 // 6232c52f83e0 + VPMULTISHIFTQB Y1, Y7, K7, Y12 // 6272c52f83e1 + VPMULTISHIFTQB Y30, Y7, K7, Y12 // 6212c52f83e6 + VPMULTISHIFTQB 17(SP)(BP*2), Y7, K7, Y12 // 6272c52f83a46c11000000 + VPMULTISHIFTQB -7(DI)(R8*4), Y7, K7, Y12 // 6232c52f83a487f9ffffff + VPMULTISHIFTQB Y16, Y30, K7, Y21 // 62a28d2783e8 + VPMULTISHIFTQB Y1, Y30, K7, Y21 // 62e28d2783e9 + VPMULTISHIFTQB Y30, Y30, K7, Y21 // 62828d2783ee + VPMULTISHIFTQB 17(SP)(BP*2), Y30, K7, Y21 // 62e28d2783ac6c11000000 + VPMULTISHIFTQB -7(DI)(R8*4), Y30, K7, Y21 // 62a28d2783ac87f9ffffff + VPMULTISHIFTQB Y16, Y26, K7, Y21 // 62a2ad2783e8 + VPMULTISHIFTQB Y1, Y26, K7, Y21 // 62e2ad2783e9 + VPMULTISHIFTQB Y30, Y26, K7, Y21 // 6282ad2783ee + VPMULTISHIFTQB 17(SP)(BP*2), Y26, K7, Y21 // 62e2ad2783ac6c11000000 + VPMULTISHIFTQB -7(DI)(R8*4), Y26, K7, Y21 // 62a2ad2783ac87f9ffffff + VPMULTISHIFTQB Y16, Y7, K7, Y21 // 62a2c52f83e8 + VPMULTISHIFTQB Y1, Y7, K7, Y21 // 62e2c52f83e9 + VPMULTISHIFTQB Y30, Y7, K7, Y21 // 6282c52f83ee + VPMULTISHIFTQB 17(SP)(BP*2), Y7, K7, Y21 // 62e2c52f83ac6c11000000 + VPMULTISHIFTQB -7(DI)(R8*4), Y7, K7, Y21 // 62a2c52f83ac87f9ffffff + VPMULTISHIFTQB Y16, Y30, K7, Y14 // 62328d2783f0 + VPMULTISHIFTQB Y1, Y30, K7, Y14 // 62728d2783f1 + VPMULTISHIFTQB Y30, Y30, K7, Y14 // 62128d2783f6 + VPMULTISHIFTQB 17(SP)(BP*2), Y30, K7, Y14 // 62728d2783b46c11000000 + VPMULTISHIFTQB -7(DI)(R8*4), Y30, K7, Y14 // 62328d2783b487f9ffffff + VPMULTISHIFTQB Y16, Y26, K7, Y14 // 6232ad2783f0 + VPMULTISHIFTQB Y1, Y26, K7, Y14 // 6272ad2783f1 + VPMULTISHIFTQB Y30, Y26, K7, Y14 // 6212ad2783f6 + VPMULTISHIFTQB 17(SP)(BP*2), Y26, K7, Y14 // 6272ad2783b46c11000000 + VPMULTISHIFTQB -7(DI)(R8*4), Y26, K7, Y14 // 6232ad2783b487f9ffffff + VPMULTISHIFTQB Y16, Y7, K7, Y14 // 6232c52f83f0 + VPMULTISHIFTQB Y1, Y7, K7, Y14 // 6272c52f83f1 + VPMULTISHIFTQB Y30, Y7, K7, Y14 // 6212c52f83f6 + VPMULTISHIFTQB 17(SP)(BP*2), Y7, K7, Y14 // 6272c52f83b46c11000000 + VPMULTISHIFTQB -7(DI)(R8*4), Y7, K7, Y14 // 6232c52f83b487f9ffffff + VPMULTISHIFTQB Z7, Z2, K7, Z18 // 62e2ed4f83d7 + VPMULTISHIFTQB Z13, Z2, K7, Z18 // 62c2ed4f83d5 + VPMULTISHIFTQB 7(AX)(CX*4), Z2, K7, Z18 // 62e2ed4f83948807000000 + VPMULTISHIFTQB 7(AX)(CX*1), Z2, K7, Z18 // 62e2ed4f83940807000000 + VPMULTISHIFTQB Z7, Z21, K7, Z18 // 62e2d54783d7 + VPMULTISHIFTQB Z13, Z21, K7, Z18 // 62c2d54783d5 + VPMULTISHIFTQB 7(AX)(CX*4), Z21, K7, Z18 // 62e2d54783948807000000 + VPMULTISHIFTQB 7(AX)(CX*1), Z21, K7, Z18 // 62e2d54783940807000000 + VPMULTISHIFTQB Z7, Z2, K7, Z24 // 6262ed4f83c7 + VPMULTISHIFTQB Z13, Z2, K7, Z24 // 6242ed4f83c5 + VPMULTISHIFTQB 7(AX)(CX*4), Z2, K7, Z24 // 6262ed4f83848807000000 + VPMULTISHIFTQB 7(AX)(CX*1), Z2, K7, Z24 // 6262ed4f83840807000000 + VPMULTISHIFTQB Z7, Z21, K7, Z24 // 6262d54783c7 + VPMULTISHIFTQB Z13, Z21, K7, Z24 // 6242d54783c5 + VPMULTISHIFTQB 7(AX)(CX*4), Z21, K7, Z24 // 6262d54783848807000000 + VPMULTISHIFTQB 7(AX)(CX*1), Z21, K7, Z24 // 6262d54783840807000000 + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512_vbmi2.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512_vbmi2.s new file mode 100644 index 0000000000000000000000000000000000000000..3f49fab161d86e9896ba9fbe11f3820ca9a9d06e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512_vbmi2.s @@ -0,0 +1,1386 @@ +// Code generated by avx512test. DO NOT EDIT. + +#include "../../../../../../runtime/textflag.h" + +TEXT asmtest_avx512_vbmi2(SB), NOSPLIT, $0 + VPCOMPRESSB X7, K1, X15 // 62d27d0963ff + VPCOMPRESSB X13, K1, X15 // 62527d0963ef + VPCOMPRESSB X8, K1, X15 // 62527d0963c7 + VPCOMPRESSB X7, K1, X28 // 62927d0963fc + VPCOMPRESSB X13, K1, X28 // 62127d0963ec + VPCOMPRESSB X8, K1, X28 // 62127d0963c4 + VPCOMPRESSB X7, K1, -7(CX)(DX*1) // 62f27d09637c11f9 + VPCOMPRESSB X13, K1, -7(CX)(DX*1) // 62727d09636c11f9 + VPCOMPRESSB X8, K1, -7(CX)(DX*1) // 62727d09634411f9 + VPCOMPRESSB X7, K1, -15(R14)(R15*4) // 62927d09637cbef1 + VPCOMPRESSB X13, K1, -15(R14)(R15*4) // 62127d09636cbef1 + VPCOMPRESSB X8, K1, -15(R14)(R15*4) // 62127d096344bef1 + VPCOMPRESSB Y5, K1, Y8 // 62d27d2963e8 + VPCOMPRESSB Y24, K1, Y8 // 62427d2963c0 + VPCOMPRESSB Y21, K1, Y8 // 62c27d2963e8 + VPCOMPRESSB Y5, K1, Y11 // 62d27d2963eb + VPCOMPRESSB Y24, K1, Y11 // 62427d2963c3 + VPCOMPRESSB Y21, K1, Y11 // 62c27d2963eb + VPCOMPRESSB Y5, K1, Y24 // 62927d2963e8 + VPCOMPRESSB Y24, K1, Y24 // 62027d2963c0 + VPCOMPRESSB Y21, K1, Y24 // 62827d2963e8 + VPCOMPRESSB Y5, K1, -17(BP)(SI*8) // 62f27d29636cf5ef + VPCOMPRESSB Y24, K1, -17(BP)(SI*8) // 62627d296344f5ef + VPCOMPRESSB Y21, K1, -17(BP)(SI*8) // 62e27d29636cf5ef + VPCOMPRESSB Y5, K1, (R15) // 62d27d29632f + VPCOMPRESSB Y24, K1, (R15) // 62427d296307 + VPCOMPRESSB Y21, K1, (R15) // 62c27d29632f + VPCOMPRESSB Z2, K1, Z5 // 62f27d4963d5 + VPCOMPRESSB Z2, K1, Z23 // 62b27d4963d7 + VPCOMPRESSB Z2, K1, -17(BP) // 62f27d496355ef + VPCOMPRESSB Z2, K1, -15(R14)(R15*8) // 62927d496354fef1 + VPCOMPRESSW X20, K5, X20 // 62a2fd0d63e4 + VPCOMPRESSW X16, K5, X20 // 62a2fd0d63c4 + VPCOMPRESSW X12, K5, X20 // 6232fd0d63e4 + VPCOMPRESSW X20, K5, X24 // 6282fd0d63e0 + VPCOMPRESSW X16, K5, X24 // 6282fd0d63c0 + VPCOMPRESSW X12, K5, X24 // 6212fd0d63e0 + VPCOMPRESSW X20, K5, X7 // 62e2fd0d63e7 + VPCOMPRESSW X16, K5, X7 // 62e2fd0d63c7 + VPCOMPRESSW X12, K5, X7 // 6272fd0d63e7 + VPCOMPRESSW X20, K5, 17(SP)(BP*2) // 62e2fd0d63a46c11000000 + VPCOMPRESSW X16, K5, 17(SP)(BP*2) // 62e2fd0d63846c11000000 + VPCOMPRESSW X12, K5, 17(SP)(BP*2) // 6272fd0d63a46c11000000 + VPCOMPRESSW X20, K5, -7(DI)(R8*4) // 62a2fd0d63a487f9ffffff + VPCOMPRESSW X16, K5, -7(DI)(R8*4) // 62a2fd0d638487f9ffffff + VPCOMPRESSW X12, K5, -7(DI)(R8*4) // 6232fd0d63a487f9ffffff + VPCOMPRESSW Y18, K7, Y14 // 62c2fd2f63d6 + VPCOMPRESSW Y3, K7, Y14 // 62d2fd2f63de + VPCOMPRESSW Y24, K7, Y14 // 6242fd2f63c6 + VPCOMPRESSW Y18, K7, Y18 // 62a2fd2f63d2 + VPCOMPRESSW Y3, K7, Y18 // 62b2fd2f63da + VPCOMPRESSW Y24, K7, Y18 // 6222fd2f63c2 + VPCOMPRESSW Y18, K7, Y31 // 6282fd2f63d7 + VPCOMPRESSW Y3, K7, Y31 // 6292fd2f63df + VPCOMPRESSW Y24, K7, Y31 // 6202fd2f63c7 + VPCOMPRESSW Y18, K7, -7(DI)(R8*1) // 62a2fd2f639407f9ffffff + VPCOMPRESSW Y3, K7, -7(DI)(R8*1) // 62b2fd2f639c07f9ffffff + VPCOMPRESSW Y24, K7, -7(DI)(R8*1) // 6222fd2f638407f9ffffff + VPCOMPRESSW Y18, K7, (SP) // 62e2fd2f631424 + VPCOMPRESSW Y3, K7, (SP) // 62f2fd2f631c24 + VPCOMPRESSW Y24, K7, (SP) // 6262fd2f630424 + VPCOMPRESSW Z3, K7, Z26 // 6292fd4f63da + VPCOMPRESSW Z0, K7, Z26 // 6292fd4f63c2 + VPCOMPRESSW Z3, K7, Z3 // 62f2fd4f63db + VPCOMPRESSW Z0, K7, Z3 // 62f2fd4f63c3 + VPCOMPRESSW Z3, K7, 15(R8)(R14*8) // 6292fd4f639cf00f000000 + VPCOMPRESSW Z0, K7, 15(R8)(R14*8) // 6292fd4f6384f00f000000 + VPCOMPRESSW Z3, K7, -15(R14)(R15*2) // 6292fd4f639c7ef1ffffff + VPCOMPRESSW Z0, K7, -15(R14)(R15*2) // 6292fd4f63847ef1ffffff + VPEXPANDB X16, K1, X6 // 62b27d0962f0 + VPEXPANDB X28, K1, X6 // 62927d0962f4 + VPEXPANDB X8, K1, X6 // 62d27d0962f0 + VPEXPANDB 99(R15)(R15*4), K1, X6 // 62927d096274bf63 + VPEXPANDB 15(DX), K1, X6 // 62f27d0962720f + VPEXPANDB X16, K1, X22 // 62a27d0962f0 + VPEXPANDB X28, K1, X22 // 62827d0962f4 + VPEXPANDB X8, K1, X22 // 62c27d0962f0 + VPEXPANDB 99(R15)(R15*4), K1, X22 // 62827d096274bf63 + VPEXPANDB 15(DX), K1, X22 // 62e27d0962720f + VPEXPANDB X16, K1, X12 // 62327d0962e0 + VPEXPANDB X28, K1, X12 // 62127d0962e4 + VPEXPANDB X8, K1, X12 // 62527d0962e0 + VPEXPANDB 99(R15)(R15*4), K1, X12 // 62127d096264bf63 + VPEXPANDB 15(DX), K1, X12 // 62727d0962620f + VPEXPANDB Y31, K1, Y27 // 62027d2962df + VPEXPANDB Y3, K1, Y27 // 62627d2962db + VPEXPANDB Y14, K1, Y27 // 62427d2962de + VPEXPANDB -7(DI)(R8*1), K1, Y27 // 62227d29625c07f9 + VPEXPANDB (SP), K1, Y27 // 62627d29621c24 + VPEXPANDB Y31, K1, Y0 // 62927d2962c7 + VPEXPANDB Y3, K1, Y0 // 62f27d2962c3 + VPEXPANDB Y14, K1, Y0 // 62d27d2962c6 + VPEXPANDB -7(DI)(R8*1), K1, Y0 // 62b27d29624407f9 + VPEXPANDB (SP), K1, Y0 // 62f27d29620424 + VPEXPANDB Y31, K1, Y11 // 62127d2962df + VPEXPANDB Y3, K1, Y11 // 62727d2962db + VPEXPANDB Y14, K1, Y11 // 62527d2962de + VPEXPANDB -7(DI)(R8*1), K1, Y11 // 62327d29625c07f9 + VPEXPANDB (SP), K1, Y11 // 62727d29621c24 + VPEXPANDB Z14, K1, Z15 // 62527d4962fe + VPEXPANDB Z27, K1, Z15 // 62127d4962fb + VPEXPANDB 15(R8)(R14*8), K1, Z15 // 62127d49627cf00f + VPEXPANDB -15(R14)(R15*2), K1, Z15 // 62127d49627c7ef1 + VPEXPANDB Z14, K1, Z12 // 62527d4962e6 + VPEXPANDB Z27, K1, Z12 // 62127d4962e3 + VPEXPANDB 15(R8)(R14*8), K1, Z12 // 62127d496264f00f + VPEXPANDB -15(R14)(R15*2), K1, Z12 // 62127d4962647ef1 + VPEXPANDW X2, K5, X18 // 62e2fd0d62d2 + VPEXPANDW X24, K5, X18 // 6282fd0d62d0 + VPEXPANDW -7(CX)(DX*1), K5, X18 // 62e2fd0d629411f9ffffff + VPEXPANDW -15(R14)(R15*4), K5, X18 // 6282fd0d6294bef1ffffff + VPEXPANDW X2, K5, X11 // 6272fd0d62da + VPEXPANDW X24, K5, X11 // 6212fd0d62d8 + VPEXPANDW -7(CX)(DX*1), K5, X11 // 6272fd0d629c11f9ffffff + VPEXPANDW -15(R14)(R15*4), K5, X11 // 6212fd0d629cbef1ffffff + VPEXPANDW X2, K5, X9 // 6272fd0d62ca + VPEXPANDW X24, K5, X9 // 6212fd0d62c8 + VPEXPANDW -7(CX)(DX*1), K5, X9 // 6272fd0d628c11f9ffffff + VPEXPANDW -15(R14)(R15*4), K5, X9 // 6212fd0d628cbef1ffffff + VPEXPANDW Y5, K7, Y19 // 62e2fd2f62dd + VPEXPANDW Y16, K7, Y19 // 62a2fd2f62d8 + VPEXPANDW Y2, K7, Y19 // 62e2fd2f62da + VPEXPANDW (AX), K7, Y19 // 62e2fd2f6218 + VPEXPANDW 7(SI), K7, Y19 // 62e2fd2f629e07000000 + VPEXPANDW Y5, K7, Y14 // 6272fd2f62f5 + VPEXPANDW Y16, K7, Y14 // 6232fd2f62f0 + VPEXPANDW Y2, K7, Y14 // 6272fd2f62f2 + VPEXPANDW (AX), K7, Y14 // 6272fd2f6230 + VPEXPANDW 7(SI), K7, Y14 // 6272fd2f62b607000000 + VPEXPANDW Y5, K7, Y21 // 62e2fd2f62ed + VPEXPANDW Y16, K7, Y21 // 62a2fd2f62e8 + VPEXPANDW Y2, K7, Y21 // 62e2fd2f62ea + VPEXPANDW (AX), K7, Y21 // 62e2fd2f6228 + VPEXPANDW 7(SI), K7, Y21 // 62e2fd2f62ae07000000 + VPEXPANDW Z26, K7, Z6 // 6292fd4f62f2 + VPEXPANDW Z14, K7, Z6 // 62d2fd4f62f6 + VPEXPANDW (SI), K7, Z6 // 62f2fd4f6236 + VPEXPANDW 7(SI)(DI*2), K7, Z6 // 62f2fd4f62b47e07000000 + VPEXPANDW Z26, K7, Z14 // 6212fd4f62f2 + VPEXPANDW Z14, K7, Z14 // 6252fd4f62f6 + VPEXPANDW (SI), K7, Z14 // 6272fd4f6236 + VPEXPANDW 7(SI)(DI*2), K7, Z14 // 6272fd4f62b47e07000000 + VPSHLDD $47, X8, X31, K4, X26 // 6243050471d02f + VPSHLDD $47, X1, X31, K4, X26 // 6263050471d12f + VPSHLDD $47, X0, X31, K4, X26 // 6263050471d02f + VPSHLDD $47, 7(SI)(DI*4), X31, K4, X26 // 626305047194be070000002f + VPSHLDD $47, -7(DI)(R8*2), X31, K4, X26 // 62230504719447f9ffffff2f + VPSHLDD $47, X8, X16, K4, X26 // 62437d0471d02f + VPSHLDD $47, X1, X16, K4, X26 // 62637d0471d12f + VPSHLDD $47, X0, X16, K4, X26 // 62637d0471d02f + VPSHLDD $47, 7(SI)(DI*4), X16, K4, X26 // 62637d047194be070000002f + VPSHLDD $47, -7(DI)(R8*2), X16, K4, X26 // 62237d04719447f9ffffff2f + VPSHLDD $47, X8, X7, K4, X26 // 6243450c71d02f + VPSHLDD $47, X1, X7, K4, X26 // 6263450c71d12f + VPSHLDD $47, X0, X7, K4, X26 // 6263450c71d02f + VPSHLDD $47, 7(SI)(DI*4), X7, K4, X26 // 6263450c7194be070000002f + VPSHLDD $47, -7(DI)(R8*2), X7, K4, X26 // 6223450c719447f9ffffff2f + VPSHLDD $47, X8, X31, K4, X19 // 62c3050471d82f + VPSHLDD $47, X1, X31, K4, X19 // 62e3050471d92f + VPSHLDD $47, X0, X31, K4, X19 // 62e3050471d82f + VPSHLDD $47, 7(SI)(DI*4), X31, K4, X19 // 62e30504719cbe070000002f + VPSHLDD $47, -7(DI)(R8*2), X31, K4, X19 // 62a30504719c47f9ffffff2f + VPSHLDD $47, X8, X16, K4, X19 // 62c37d0471d82f + VPSHLDD $47, X1, X16, K4, X19 // 62e37d0471d92f + VPSHLDD $47, X0, X16, K4, X19 // 62e37d0471d82f + VPSHLDD $47, 7(SI)(DI*4), X16, K4, X19 // 62e37d04719cbe070000002f + VPSHLDD $47, -7(DI)(R8*2), X16, K4, X19 // 62a37d04719c47f9ffffff2f + VPSHLDD $47, X8, X7, K4, X19 // 62c3450c71d82f + VPSHLDD $47, X1, X7, K4, X19 // 62e3450c71d92f + VPSHLDD $47, X0, X7, K4, X19 // 62e3450c71d82f + VPSHLDD $47, 7(SI)(DI*4), X7, K4, X19 // 62e3450c719cbe070000002f + VPSHLDD $47, -7(DI)(R8*2), X7, K4, X19 // 62a3450c719c47f9ffffff2f + VPSHLDD $47, X8, X31, K4, X0 // 62d3050471c02f + VPSHLDD $47, X1, X31, K4, X0 // 62f3050471c12f + VPSHLDD $47, X0, X31, K4, X0 // 62f3050471c02f + VPSHLDD $47, 7(SI)(DI*4), X31, K4, X0 // 62f305047184be070000002f + VPSHLDD $47, -7(DI)(R8*2), X31, K4, X0 // 62b30504718447f9ffffff2f + VPSHLDD $47, X8, X16, K4, X0 // 62d37d0471c02f + VPSHLDD $47, X1, X16, K4, X0 // 62f37d0471c12f + VPSHLDD $47, X0, X16, K4, X0 // 62f37d0471c02f + VPSHLDD $47, 7(SI)(DI*4), X16, K4, X0 // 62f37d047184be070000002f + VPSHLDD $47, -7(DI)(R8*2), X16, K4, X0 // 62b37d04718447f9ffffff2f + VPSHLDD $47, X8, X7, K4, X0 // 62d3450c71c02f + VPSHLDD $47, X1, X7, K4, X0 // 62f3450c71c12f + VPSHLDD $47, X0, X7, K4, X0 // 62f3450c71c02f + VPSHLDD $47, 7(SI)(DI*4), X7, K4, X0 // 62f3450c7184be070000002f + VPSHLDD $47, -7(DI)(R8*2), X7, K4, X0 // 62b3450c718447f9ffffff2f + VPSHLDD $82, Y5, Y19, K1, Y3 // 62f3652171dd52 + VPSHLDD $82, Y16, Y19, K1, Y3 // 62b3652171d852 + VPSHLDD $82, Y2, Y19, K1, Y3 // 62f3652171da52 + VPSHLDD $82, (AX), Y19, K1, Y3 // 62f36521711852 + VPSHLDD $82, 7(SI), Y19, K1, Y3 // 62f36521719e0700000052 + VPSHLDD $82, Y5, Y14, K1, Y3 // 62f30d2971dd52 + VPSHLDD $82, Y16, Y14, K1, Y3 // 62b30d2971d852 + VPSHLDD $82, Y2, Y14, K1, Y3 // 62f30d2971da52 + VPSHLDD $82, (AX), Y14, K1, Y3 // 62f30d29711852 + VPSHLDD $82, 7(SI), Y14, K1, Y3 // 62f30d29719e0700000052 + VPSHLDD $82, Y5, Y21, K1, Y3 // 62f3552171dd52 + VPSHLDD $82, Y16, Y21, K1, Y3 // 62b3552171d852 + VPSHLDD $82, Y2, Y21, K1, Y3 // 62f3552171da52 + VPSHLDD $82, (AX), Y21, K1, Y3 // 62f35521711852 + VPSHLDD $82, 7(SI), Y21, K1, Y3 // 62f35521719e0700000052 + VPSHLDD $82, Y5, Y19, K1, Y19 // 62e3652171dd52 + VPSHLDD $82, Y16, Y19, K1, Y19 // 62a3652171d852 + VPSHLDD $82, Y2, Y19, K1, Y19 // 62e3652171da52 + VPSHLDD $82, (AX), Y19, K1, Y19 // 62e36521711852 + VPSHLDD $82, 7(SI), Y19, K1, Y19 // 62e36521719e0700000052 + VPSHLDD $82, Y5, Y14, K1, Y19 // 62e30d2971dd52 + VPSHLDD $82, Y16, Y14, K1, Y19 // 62a30d2971d852 + VPSHLDD $82, Y2, Y14, K1, Y19 // 62e30d2971da52 + VPSHLDD $82, (AX), Y14, K1, Y19 // 62e30d29711852 + VPSHLDD $82, 7(SI), Y14, K1, Y19 // 62e30d29719e0700000052 + VPSHLDD $82, Y5, Y21, K1, Y19 // 62e3552171dd52 + VPSHLDD $82, Y16, Y21, K1, Y19 // 62a3552171d852 + VPSHLDD $82, Y2, Y21, K1, Y19 // 62e3552171da52 + VPSHLDD $82, (AX), Y21, K1, Y19 // 62e35521711852 + VPSHLDD $82, 7(SI), Y21, K1, Y19 // 62e35521719e0700000052 + VPSHLDD $82, Y5, Y19, K1, Y23 // 62e3652171fd52 + VPSHLDD $82, Y16, Y19, K1, Y23 // 62a3652171f852 + VPSHLDD $82, Y2, Y19, K1, Y23 // 62e3652171fa52 + VPSHLDD $82, (AX), Y19, K1, Y23 // 62e36521713852 + VPSHLDD $82, 7(SI), Y19, K1, Y23 // 62e3652171be0700000052 + VPSHLDD $82, Y5, Y14, K1, Y23 // 62e30d2971fd52 + VPSHLDD $82, Y16, Y14, K1, Y23 // 62a30d2971f852 + VPSHLDD $82, Y2, Y14, K1, Y23 // 62e30d2971fa52 + VPSHLDD $82, (AX), Y14, K1, Y23 // 62e30d29713852 + VPSHLDD $82, 7(SI), Y14, K1, Y23 // 62e30d2971be0700000052 + VPSHLDD $82, Y5, Y21, K1, Y23 // 62e3552171fd52 + VPSHLDD $82, Y16, Y21, K1, Y23 // 62a3552171f852 + VPSHLDD $82, Y2, Y21, K1, Y23 // 62e3552171fa52 + VPSHLDD $82, (AX), Y21, K1, Y23 // 62e35521713852 + VPSHLDD $82, 7(SI), Y21, K1, Y23 // 62e3552171be0700000052 + VPSHLDD $126, Z27, Z2, K3, Z21 // 62836d4b71eb7e + VPSHLDD $126, Z25, Z2, K3, Z21 // 62836d4b71e97e + VPSHLDD $126, 17(SP)(BP*1), Z2, K3, Z21 // 62e36d4b71ac2c110000007e + VPSHLDD $126, -7(CX)(DX*8), Z2, K3, Z21 // 62e36d4b71acd1f9ffffff7e + VPSHLDD $126, Z27, Z7, K3, Z21 // 6283454b71eb7e + VPSHLDD $126, Z25, Z7, K3, Z21 // 6283454b71e97e + VPSHLDD $126, 17(SP)(BP*1), Z7, K3, Z21 // 62e3454b71ac2c110000007e + VPSHLDD $126, -7(CX)(DX*8), Z7, K3, Z21 // 62e3454b71acd1f9ffffff7e + VPSHLDD $126, Z27, Z2, K3, Z9 // 62136d4b71cb7e + VPSHLDD $126, Z25, Z2, K3, Z9 // 62136d4b71c97e + VPSHLDD $126, 17(SP)(BP*1), Z2, K3, Z9 // 62736d4b718c2c110000007e + VPSHLDD $126, -7(CX)(DX*8), Z2, K3, Z9 // 62736d4b718cd1f9ffffff7e + VPSHLDD $126, Z27, Z7, K3, Z9 // 6213454b71cb7e + VPSHLDD $126, Z25, Z7, K3, Z9 // 6213454b71c97e + VPSHLDD $126, 17(SP)(BP*1), Z7, K3, Z9 // 6273454b718c2c110000007e + VPSHLDD $126, -7(CX)(DX*8), Z7, K3, Z9 // 6273454b718cd1f9ffffff7e + VPSHLDQ $94, X22, X21, K4, X15 // 6233d50471fe5e + VPSHLDQ $94, X7, X21, K4, X15 // 6273d50471ff5e + VPSHLDQ $94, X19, X21, K4, X15 // 6233d50471fb5e + VPSHLDQ $94, 17(SP), X21, K4, X15 // 6273d50471bc24110000005e + VPSHLDQ $94, -17(BP)(SI*4), X21, K4, X15 // 6273d50471bcb5efffffff5e + VPSHLDQ $94, X22, X0, K4, X15 // 6233fd0c71fe5e + VPSHLDQ $94, X7, X0, K4, X15 // 6273fd0c71ff5e + VPSHLDQ $94, X19, X0, K4, X15 // 6233fd0c71fb5e + VPSHLDQ $94, 17(SP), X0, K4, X15 // 6273fd0c71bc24110000005e + VPSHLDQ $94, -17(BP)(SI*4), X0, K4, X15 // 6273fd0c71bcb5efffffff5e + VPSHLDQ $94, X22, X28, K4, X15 // 62339d0471fe5e + VPSHLDQ $94, X7, X28, K4, X15 // 62739d0471ff5e + VPSHLDQ $94, X19, X28, K4, X15 // 62339d0471fb5e + VPSHLDQ $94, 17(SP), X28, K4, X15 // 62739d0471bc24110000005e + VPSHLDQ $94, -17(BP)(SI*4), X28, K4, X15 // 62739d0471bcb5efffffff5e + VPSHLDQ $94, X22, X21, K4, X0 // 62b3d50471c65e + VPSHLDQ $94, X7, X21, K4, X0 // 62f3d50471c75e + VPSHLDQ $94, X19, X21, K4, X0 // 62b3d50471c35e + VPSHLDQ $94, 17(SP), X21, K4, X0 // 62f3d504718424110000005e + VPSHLDQ $94, -17(BP)(SI*4), X21, K4, X0 // 62f3d5047184b5efffffff5e + VPSHLDQ $94, X22, X0, K4, X0 // 62b3fd0c71c65e + VPSHLDQ $94, X7, X0, K4, X0 // 62f3fd0c71c75e + VPSHLDQ $94, X19, X0, K4, X0 // 62b3fd0c71c35e + VPSHLDQ $94, 17(SP), X0, K4, X0 // 62f3fd0c718424110000005e + VPSHLDQ $94, -17(BP)(SI*4), X0, K4, X0 // 62f3fd0c7184b5efffffff5e + VPSHLDQ $94, X22, X28, K4, X0 // 62b39d0471c65e + VPSHLDQ $94, X7, X28, K4, X0 // 62f39d0471c75e + VPSHLDQ $94, X19, X28, K4, X0 // 62b39d0471c35e + VPSHLDQ $94, 17(SP), X28, K4, X0 // 62f39d04718424110000005e + VPSHLDQ $94, -17(BP)(SI*4), X28, K4, X0 // 62f39d047184b5efffffff5e + VPSHLDQ $94, X22, X21, K4, X16 // 62a3d50471c65e + VPSHLDQ $94, X7, X21, K4, X16 // 62e3d50471c75e + VPSHLDQ $94, X19, X21, K4, X16 // 62a3d50471c35e + VPSHLDQ $94, 17(SP), X21, K4, X16 // 62e3d504718424110000005e + VPSHLDQ $94, -17(BP)(SI*4), X21, K4, X16 // 62e3d5047184b5efffffff5e + VPSHLDQ $94, X22, X0, K4, X16 // 62a3fd0c71c65e + VPSHLDQ $94, X7, X0, K4, X16 // 62e3fd0c71c75e + VPSHLDQ $94, X19, X0, K4, X16 // 62a3fd0c71c35e + VPSHLDQ $94, 17(SP), X0, K4, X16 // 62e3fd0c718424110000005e + VPSHLDQ $94, -17(BP)(SI*4), X0, K4, X16 // 62e3fd0c7184b5efffffff5e + VPSHLDQ $94, X22, X28, K4, X16 // 62a39d0471c65e + VPSHLDQ $94, X7, X28, K4, X16 // 62e39d0471c75e + VPSHLDQ $94, X19, X28, K4, X16 // 62a39d0471c35e + VPSHLDQ $94, 17(SP), X28, K4, X16 // 62e39d04718424110000005e + VPSHLDQ $94, -17(BP)(SI*4), X28, K4, X16 // 62e39d047184b5efffffff5e + VPSHLDQ $121, Y19, Y31, K5, Y21 // 62a3852571eb79 + VPSHLDQ $121, Y7, Y31, K5, Y21 // 62e3852571ef79 + VPSHLDQ $121, Y6, Y31, K5, Y21 // 62e3852571ee79 + VPSHLDQ $121, (BX), Y31, K5, Y21 // 62e38525712b79 + VPSHLDQ $121, -17(BP)(SI*1), Y31, K5, Y21 // 62e3852571ac35efffffff79 + VPSHLDQ $121, Y19, Y6, K5, Y21 // 62a3cd2d71eb79 + VPSHLDQ $121, Y7, Y6, K5, Y21 // 62e3cd2d71ef79 + VPSHLDQ $121, Y6, Y6, K5, Y21 // 62e3cd2d71ee79 + VPSHLDQ $121, (BX), Y6, K5, Y21 // 62e3cd2d712b79 + VPSHLDQ $121, -17(BP)(SI*1), Y6, K5, Y21 // 62e3cd2d71ac35efffffff79 + VPSHLDQ $121, Y19, Y11, K5, Y21 // 62a3a52d71eb79 + VPSHLDQ $121, Y7, Y11, K5, Y21 // 62e3a52d71ef79 + VPSHLDQ $121, Y6, Y11, K5, Y21 // 62e3a52d71ee79 + VPSHLDQ $121, (BX), Y11, K5, Y21 // 62e3a52d712b79 + VPSHLDQ $121, -17(BP)(SI*1), Y11, K5, Y21 // 62e3a52d71ac35efffffff79 + VPSHLDQ $121, Y19, Y31, K5, Y20 // 62a3852571e379 + VPSHLDQ $121, Y7, Y31, K5, Y20 // 62e3852571e779 + VPSHLDQ $121, Y6, Y31, K5, Y20 // 62e3852571e679 + VPSHLDQ $121, (BX), Y31, K5, Y20 // 62e38525712379 + VPSHLDQ $121, -17(BP)(SI*1), Y31, K5, Y20 // 62e3852571a435efffffff79 + VPSHLDQ $121, Y19, Y6, K5, Y20 // 62a3cd2d71e379 + VPSHLDQ $121, Y7, Y6, K5, Y20 // 62e3cd2d71e779 + VPSHLDQ $121, Y6, Y6, K5, Y20 // 62e3cd2d71e679 + VPSHLDQ $121, (BX), Y6, K5, Y20 // 62e3cd2d712379 + VPSHLDQ $121, -17(BP)(SI*1), Y6, K5, Y20 // 62e3cd2d71a435efffffff79 + VPSHLDQ $121, Y19, Y11, K5, Y20 // 62a3a52d71e379 + VPSHLDQ $121, Y7, Y11, K5, Y20 // 62e3a52d71e779 + VPSHLDQ $121, Y6, Y11, K5, Y20 // 62e3a52d71e679 + VPSHLDQ $121, (BX), Y11, K5, Y20 // 62e3a52d712379 + VPSHLDQ $121, -17(BP)(SI*1), Y11, K5, Y20 // 62e3a52d71a435efffffff79 + VPSHLDQ $121, Y19, Y31, K5, Y6 // 62b3852571f379 + VPSHLDQ $121, Y7, Y31, K5, Y6 // 62f3852571f779 + VPSHLDQ $121, Y6, Y31, K5, Y6 // 62f3852571f679 + VPSHLDQ $121, (BX), Y31, K5, Y6 // 62f38525713379 + VPSHLDQ $121, -17(BP)(SI*1), Y31, K5, Y6 // 62f3852571b435efffffff79 + VPSHLDQ $121, Y19, Y6, K5, Y6 // 62b3cd2d71f379 + VPSHLDQ $121, Y7, Y6, K5, Y6 // 62f3cd2d71f779 + VPSHLDQ $121, Y6, Y6, K5, Y6 // 62f3cd2d71f679 + VPSHLDQ $121, (BX), Y6, K5, Y6 // 62f3cd2d713379 + VPSHLDQ $121, -17(BP)(SI*1), Y6, K5, Y6 // 62f3cd2d71b435efffffff79 + VPSHLDQ $121, Y19, Y11, K5, Y6 // 62b3a52d71f379 + VPSHLDQ $121, Y7, Y11, K5, Y6 // 62f3a52d71f779 + VPSHLDQ $121, Y6, Y11, K5, Y6 // 62f3a52d71f679 + VPSHLDQ $121, (BX), Y11, K5, Y6 // 62f3a52d713379 + VPSHLDQ $121, -17(BP)(SI*1), Y11, K5, Y6 // 62f3a52d71b435efffffff79 + VPSHLDQ $13, Z3, Z27, K7, Z23 // 62e3a54771fb0d + VPSHLDQ $13, Z0, Z27, K7, Z23 // 62e3a54771f80d + VPSHLDQ $13, -17(BP)(SI*2), Z27, K7, Z23 // 62e3a54771bc75efffffff0d + VPSHLDQ $13, 7(AX)(CX*2), Z27, K7, Z23 // 62e3a54771bc48070000000d + VPSHLDQ $13, Z3, Z14, K7, Z23 // 62e38d4f71fb0d + VPSHLDQ $13, Z0, Z14, K7, Z23 // 62e38d4f71f80d + VPSHLDQ $13, -17(BP)(SI*2), Z14, K7, Z23 // 62e38d4f71bc75efffffff0d + VPSHLDQ $13, 7(AX)(CX*2), Z14, K7, Z23 // 62e38d4f71bc48070000000d + VPSHLDQ $13, Z3, Z27, K7, Z9 // 6273a54771cb0d + VPSHLDQ $13, Z0, Z27, K7, Z9 // 6273a54771c80d + VPSHLDQ $13, -17(BP)(SI*2), Z27, K7, Z9 // 6273a547718c75efffffff0d + VPSHLDQ $13, 7(AX)(CX*2), Z27, K7, Z9 // 6273a547718c48070000000d + VPSHLDQ $13, Z3, Z14, K7, Z9 // 62738d4f71cb0d + VPSHLDQ $13, Z0, Z14, K7, Z9 // 62738d4f71c80d + VPSHLDQ $13, -17(BP)(SI*2), Z14, K7, Z9 // 62738d4f718c75efffffff0d + VPSHLDQ $13, 7(AX)(CX*2), Z14, K7, Z9 // 62738d4f718c48070000000d + VPSHLDVD X15, X1, K7, X7 // 62d2750f71ff + VPSHLDVD X12, X1, K7, X7 // 62d2750f71fc + VPSHLDVD X0, X1, K7, X7 // 62f2750f71f8 + VPSHLDVD 7(AX), X1, K7, X7 // 62f2750f71b807000000 + VPSHLDVD (DI), X1, K7, X7 // 62f2750f713f + VPSHLDVD X15, X7, K7, X7 // 62d2450f71ff + VPSHLDVD X12, X7, K7, X7 // 62d2450f71fc + VPSHLDVD X0, X7, K7, X7 // 62f2450f71f8 + VPSHLDVD 7(AX), X7, K7, X7 // 62f2450f71b807000000 + VPSHLDVD (DI), X7, K7, X7 // 62f2450f713f + VPSHLDVD X15, X9, K7, X7 // 62d2350f71ff + VPSHLDVD X12, X9, K7, X7 // 62d2350f71fc + VPSHLDVD X0, X9, K7, X7 // 62f2350f71f8 + VPSHLDVD 7(AX), X9, K7, X7 // 62f2350f71b807000000 + VPSHLDVD (DI), X9, K7, X7 // 62f2350f713f + VPSHLDVD X15, X1, K7, X16 // 62c2750f71c7 + VPSHLDVD X12, X1, K7, X16 // 62c2750f71c4 + VPSHLDVD X0, X1, K7, X16 // 62e2750f71c0 + VPSHLDVD 7(AX), X1, K7, X16 // 62e2750f718007000000 + VPSHLDVD (DI), X1, K7, X16 // 62e2750f7107 + VPSHLDVD X15, X7, K7, X16 // 62c2450f71c7 + VPSHLDVD X12, X7, K7, X16 // 62c2450f71c4 + VPSHLDVD X0, X7, K7, X16 // 62e2450f71c0 + VPSHLDVD 7(AX), X7, K7, X16 // 62e2450f718007000000 + VPSHLDVD (DI), X7, K7, X16 // 62e2450f7107 + VPSHLDVD X15, X9, K7, X16 // 62c2350f71c7 + VPSHLDVD X12, X9, K7, X16 // 62c2350f71c4 + VPSHLDVD X0, X9, K7, X16 // 62e2350f71c0 + VPSHLDVD 7(AX), X9, K7, X16 // 62e2350f718007000000 + VPSHLDVD (DI), X9, K7, X16 // 62e2350f7107 + VPSHLDVD X15, X1, K7, X31 // 6242750f71ff + VPSHLDVD X12, X1, K7, X31 // 6242750f71fc + VPSHLDVD X0, X1, K7, X31 // 6262750f71f8 + VPSHLDVD 7(AX), X1, K7, X31 // 6262750f71b807000000 + VPSHLDVD (DI), X1, K7, X31 // 6262750f713f + VPSHLDVD X15, X7, K7, X31 // 6242450f71ff + VPSHLDVD X12, X7, K7, X31 // 6242450f71fc + VPSHLDVD X0, X7, K7, X31 // 6262450f71f8 + VPSHLDVD 7(AX), X7, K7, X31 // 6262450f71b807000000 + VPSHLDVD (DI), X7, K7, X31 // 6262450f713f + VPSHLDVD X15, X9, K7, X31 // 6242350f71ff + VPSHLDVD X12, X9, K7, X31 // 6242350f71fc + VPSHLDVD X0, X9, K7, X31 // 6262350f71f8 + VPSHLDVD 7(AX), X9, K7, X31 // 6262350f71b807000000 + VPSHLDVD (DI), X9, K7, X31 // 6262350f713f + VPSHLDVD Y5, Y20, K6, Y0 // 62f25d2671c5 + VPSHLDVD Y28, Y20, K6, Y0 // 62925d2671c4 + VPSHLDVD Y7, Y20, K6, Y0 // 62f25d2671c7 + VPSHLDVD 15(R8)(R14*4), Y20, K6, Y0 // 62925d267184b00f000000 + VPSHLDVD -7(CX)(DX*4), Y20, K6, Y0 // 62f25d26718491f9ffffff + VPSHLDVD Y5, Y12, K6, Y0 // 62f21d2e71c5 + VPSHLDVD Y28, Y12, K6, Y0 // 62921d2e71c4 + VPSHLDVD Y7, Y12, K6, Y0 // 62f21d2e71c7 + VPSHLDVD 15(R8)(R14*4), Y12, K6, Y0 // 62921d2e7184b00f000000 + VPSHLDVD -7(CX)(DX*4), Y12, K6, Y0 // 62f21d2e718491f9ffffff + VPSHLDVD Y5, Y3, K6, Y0 // 62f2652e71c5 + VPSHLDVD Y28, Y3, K6, Y0 // 6292652e71c4 + VPSHLDVD Y7, Y3, K6, Y0 // 62f2652e71c7 + VPSHLDVD 15(R8)(R14*4), Y3, K6, Y0 // 6292652e7184b00f000000 + VPSHLDVD -7(CX)(DX*4), Y3, K6, Y0 // 62f2652e718491f9ffffff + VPSHLDVD Y5, Y20, K6, Y3 // 62f25d2671dd + VPSHLDVD Y28, Y20, K6, Y3 // 62925d2671dc + VPSHLDVD Y7, Y20, K6, Y3 // 62f25d2671df + VPSHLDVD 15(R8)(R14*4), Y20, K6, Y3 // 62925d26719cb00f000000 + VPSHLDVD -7(CX)(DX*4), Y20, K6, Y3 // 62f25d26719c91f9ffffff + VPSHLDVD Y5, Y12, K6, Y3 // 62f21d2e71dd + VPSHLDVD Y28, Y12, K6, Y3 // 62921d2e71dc + VPSHLDVD Y7, Y12, K6, Y3 // 62f21d2e71df + VPSHLDVD 15(R8)(R14*4), Y12, K6, Y3 // 62921d2e719cb00f000000 + VPSHLDVD -7(CX)(DX*4), Y12, K6, Y3 // 62f21d2e719c91f9ffffff + VPSHLDVD Y5, Y3, K6, Y3 // 62f2652e71dd + VPSHLDVD Y28, Y3, K6, Y3 // 6292652e71dc + VPSHLDVD Y7, Y3, K6, Y3 // 62f2652e71df + VPSHLDVD 15(R8)(R14*4), Y3, K6, Y3 // 6292652e719cb00f000000 + VPSHLDVD -7(CX)(DX*4), Y3, K6, Y3 // 62f2652e719c91f9ffffff + VPSHLDVD Y5, Y20, K6, Y5 // 62f25d2671ed + VPSHLDVD Y28, Y20, K6, Y5 // 62925d2671ec + VPSHLDVD Y7, Y20, K6, Y5 // 62f25d2671ef + VPSHLDVD 15(R8)(R14*4), Y20, K6, Y5 // 62925d2671acb00f000000 + VPSHLDVD -7(CX)(DX*4), Y20, K6, Y5 // 62f25d2671ac91f9ffffff + VPSHLDVD Y5, Y12, K6, Y5 // 62f21d2e71ed + VPSHLDVD Y28, Y12, K6, Y5 // 62921d2e71ec + VPSHLDVD Y7, Y12, K6, Y5 // 62f21d2e71ef + VPSHLDVD 15(R8)(R14*4), Y12, K6, Y5 // 62921d2e71acb00f000000 + VPSHLDVD -7(CX)(DX*4), Y12, K6, Y5 // 62f21d2e71ac91f9ffffff + VPSHLDVD Y5, Y3, K6, Y5 // 62f2652e71ed + VPSHLDVD Y28, Y3, K6, Y5 // 6292652e71ec + VPSHLDVD Y7, Y3, K6, Y5 // 62f2652e71ef + VPSHLDVD 15(R8)(R14*4), Y3, K6, Y5 // 6292652e71acb00f000000 + VPSHLDVD -7(CX)(DX*4), Y3, K6, Y5 // 62f2652e71ac91f9ffffff + VPSHLDVD Z22, Z8, K3, Z14 // 62323d4b71f6 + VPSHLDVD Z25, Z8, K3, Z14 // 62123d4b71f1 + VPSHLDVD 15(R8)(R14*1), Z8, K3, Z14 // 62123d4b71b4300f000000 + VPSHLDVD 15(R8)(R14*2), Z8, K3, Z14 // 62123d4b71b4700f000000 + VPSHLDVD Z22, Z24, K3, Z14 // 62323d4371f6 + VPSHLDVD Z25, Z24, K3, Z14 // 62123d4371f1 + VPSHLDVD 15(R8)(R14*1), Z24, K3, Z14 // 62123d4371b4300f000000 + VPSHLDVD 15(R8)(R14*2), Z24, K3, Z14 // 62123d4371b4700f000000 + VPSHLDVD Z22, Z8, K3, Z7 // 62b23d4b71fe + VPSHLDVD Z25, Z8, K3, Z7 // 62923d4b71f9 + VPSHLDVD 15(R8)(R14*1), Z8, K3, Z7 // 62923d4b71bc300f000000 + VPSHLDVD 15(R8)(R14*2), Z8, K3, Z7 // 62923d4b71bc700f000000 + VPSHLDVD Z22, Z24, K3, Z7 // 62b23d4371fe + VPSHLDVD Z25, Z24, K3, Z7 // 62923d4371f9 + VPSHLDVD 15(R8)(R14*1), Z24, K3, Z7 // 62923d4371bc300f000000 + VPSHLDVD 15(R8)(R14*2), Z24, K3, Z7 // 62923d4371bc700f000000 + VPSHLDVQ X3, X17, K7, X12 // 6272f50771e3 + VPSHLDVQ X26, X17, K7, X12 // 6212f50771e2 + VPSHLDVQ X23, X17, K7, X12 // 6232f50771e7 + VPSHLDVQ 99(R15)(R15*1), X17, K7, X12 // 6212f50771a43f63000000 + VPSHLDVQ (DX), X17, K7, X12 // 6272f5077122 + VPSHLDVQ X3, X15, K7, X12 // 6272850f71e3 + VPSHLDVQ X26, X15, K7, X12 // 6212850f71e2 + VPSHLDVQ X23, X15, K7, X12 // 6232850f71e7 + VPSHLDVQ 99(R15)(R15*1), X15, K7, X12 // 6212850f71a43f63000000 + VPSHLDVQ (DX), X15, K7, X12 // 6272850f7122 + VPSHLDVQ X3, X8, K7, X12 // 6272bd0f71e3 + VPSHLDVQ X26, X8, K7, X12 // 6212bd0f71e2 + VPSHLDVQ X23, X8, K7, X12 // 6232bd0f71e7 + VPSHLDVQ 99(R15)(R15*1), X8, K7, X12 // 6212bd0f71a43f63000000 + VPSHLDVQ (DX), X8, K7, X12 // 6272bd0f7122 + VPSHLDVQ X3, X17, K7, X14 // 6272f50771f3 + VPSHLDVQ X26, X17, K7, X14 // 6212f50771f2 + VPSHLDVQ X23, X17, K7, X14 // 6232f50771f7 + VPSHLDVQ 99(R15)(R15*1), X17, K7, X14 // 6212f50771b43f63000000 + VPSHLDVQ (DX), X17, K7, X14 // 6272f5077132 + VPSHLDVQ X3, X15, K7, X14 // 6272850f71f3 + VPSHLDVQ X26, X15, K7, X14 // 6212850f71f2 + VPSHLDVQ X23, X15, K7, X14 // 6232850f71f7 + VPSHLDVQ 99(R15)(R15*1), X15, K7, X14 // 6212850f71b43f63000000 + VPSHLDVQ (DX), X15, K7, X14 // 6272850f7132 + VPSHLDVQ X3, X8, K7, X14 // 6272bd0f71f3 + VPSHLDVQ X26, X8, K7, X14 // 6212bd0f71f2 + VPSHLDVQ X23, X8, K7, X14 // 6232bd0f71f7 + VPSHLDVQ 99(R15)(R15*1), X8, K7, X14 // 6212bd0f71b43f63000000 + VPSHLDVQ (DX), X8, K7, X14 // 6272bd0f7132 + VPSHLDVQ X3, X17, K7, X5 // 62f2f50771eb + VPSHLDVQ X26, X17, K7, X5 // 6292f50771ea + VPSHLDVQ X23, X17, K7, X5 // 62b2f50771ef + VPSHLDVQ 99(R15)(R15*1), X17, K7, X5 // 6292f50771ac3f63000000 + VPSHLDVQ (DX), X17, K7, X5 // 62f2f507712a + VPSHLDVQ X3, X15, K7, X5 // 62f2850f71eb + VPSHLDVQ X26, X15, K7, X5 // 6292850f71ea + VPSHLDVQ X23, X15, K7, X5 // 62b2850f71ef + VPSHLDVQ 99(R15)(R15*1), X15, K7, X5 // 6292850f71ac3f63000000 + VPSHLDVQ (DX), X15, K7, X5 // 62f2850f712a + VPSHLDVQ X3, X8, K7, X5 // 62f2bd0f71eb + VPSHLDVQ X26, X8, K7, X5 // 6292bd0f71ea + VPSHLDVQ X23, X8, K7, X5 // 62b2bd0f71ef + VPSHLDVQ 99(R15)(R15*1), X8, K7, X5 // 6292bd0f71ac3f63000000 + VPSHLDVQ (DX), X8, K7, X5 // 62f2bd0f712a + VPSHLDVQ Y17, Y12, K4, Y0 // 62b29d2c71c1 + VPSHLDVQ Y7, Y12, K4, Y0 // 62f29d2c71c7 + VPSHLDVQ Y9, Y12, K4, Y0 // 62d29d2c71c1 + VPSHLDVQ (R8), Y12, K4, Y0 // 62d29d2c7100 + VPSHLDVQ 15(DX)(BX*2), Y12, K4, Y0 // 62f29d2c71845a0f000000 + VPSHLDVQ Y17, Y1, K4, Y0 // 62b2f52c71c1 + VPSHLDVQ Y7, Y1, K4, Y0 // 62f2f52c71c7 + VPSHLDVQ Y9, Y1, K4, Y0 // 62d2f52c71c1 + VPSHLDVQ (R8), Y1, K4, Y0 // 62d2f52c7100 + VPSHLDVQ 15(DX)(BX*2), Y1, K4, Y0 // 62f2f52c71845a0f000000 + VPSHLDVQ Y17, Y14, K4, Y0 // 62b28d2c71c1 + VPSHLDVQ Y7, Y14, K4, Y0 // 62f28d2c71c7 + VPSHLDVQ Y9, Y14, K4, Y0 // 62d28d2c71c1 + VPSHLDVQ (R8), Y14, K4, Y0 // 62d28d2c7100 + VPSHLDVQ 15(DX)(BX*2), Y14, K4, Y0 // 62f28d2c71845a0f000000 + VPSHLDVQ Y17, Y12, K4, Y22 // 62a29d2c71f1 + VPSHLDVQ Y7, Y12, K4, Y22 // 62e29d2c71f7 + VPSHLDVQ Y9, Y12, K4, Y22 // 62c29d2c71f1 + VPSHLDVQ (R8), Y12, K4, Y22 // 62c29d2c7130 + VPSHLDVQ 15(DX)(BX*2), Y12, K4, Y22 // 62e29d2c71b45a0f000000 + VPSHLDVQ Y17, Y1, K4, Y22 // 62a2f52c71f1 + VPSHLDVQ Y7, Y1, K4, Y22 // 62e2f52c71f7 + VPSHLDVQ Y9, Y1, K4, Y22 // 62c2f52c71f1 + VPSHLDVQ (R8), Y1, K4, Y22 // 62c2f52c7130 + VPSHLDVQ 15(DX)(BX*2), Y1, K4, Y22 // 62e2f52c71b45a0f000000 + VPSHLDVQ Y17, Y14, K4, Y22 // 62a28d2c71f1 + VPSHLDVQ Y7, Y14, K4, Y22 // 62e28d2c71f7 + VPSHLDVQ Y9, Y14, K4, Y22 // 62c28d2c71f1 + VPSHLDVQ (R8), Y14, K4, Y22 // 62c28d2c7130 + VPSHLDVQ 15(DX)(BX*2), Y14, K4, Y22 // 62e28d2c71b45a0f000000 + VPSHLDVQ Y17, Y12, K4, Y13 // 62329d2c71e9 + VPSHLDVQ Y7, Y12, K4, Y13 // 62729d2c71ef + VPSHLDVQ Y9, Y12, K4, Y13 // 62529d2c71e9 + VPSHLDVQ (R8), Y12, K4, Y13 // 62529d2c7128 + VPSHLDVQ 15(DX)(BX*2), Y12, K4, Y13 // 62729d2c71ac5a0f000000 + VPSHLDVQ Y17, Y1, K4, Y13 // 6232f52c71e9 + VPSHLDVQ Y7, Y1, K4, Y13 // 6272f52c71ef + VPSHLDVQ Y9, Y1, K4, Y13 // 6252f52c71e9 + VPSHLDVQ (R8), Y1, K4, Y13 // 6252f52c7128 + VPSHLDVQ 15(DX)(BX*2), Y1, K4, Y13 // 6272f52c71ac5a0f000000 + VPSHLDVQ Y17, Y14, K4, Y13 // 62328d2c71e9 + VPSHLDVQ Y7, Y14, K4, Y13 // 62728d2c71ef + VPSHLDVQ Y9, Y14, K4, Y13 // 62528d2c71e9 + VPSHLDVQ (R8), Y14, K4, Y13 // 62528d2c7128 + VPSHLDVQ 15(DX)(BX*2), Y14, K4, Y13 // 62728d2c71ac5a0f000000 + VPSHLDVQ Z0, Z6, K4, Z1 // 62f2cd4c71c8 + VPSHLDVQ Z8, Z6, K4, Z1 // 62d2cd4c71c8 + VPSHLDVQ (R14), Z6, K4, Z1 // 62d2cd4c710e + VPSHLDVQ -7(DI)(R8*8), Z6, K4, Z1 // 62b2cd4c718cc7f9ffffff + VPSHLDVQ Z0, Z2, K4, Z1 // 62f2ed4c71c8 + VPSHLDVQ Z8, Z2, K4, Z1 // 62d2ed4c71c8 + VPSHLDVQ (R14), Z2, K4, Z1 // 62d2ed4c710e + VPSHLDVQ -7(DI)(R8*8), Z2, K4, Z1 // 62b2ed4c718cc7f9ffffff + VPSHLDVQ Z0, Z6, K4, Z16 // 62e2cd4c71c0 + VPSHLDVQ Z8, Z6, K4, Z16 // 62c2cd4c71c0 + VPSHLDVQ (R14), Z6, K4, Z16 // 62c2cd4c7106 + VPSHLDVQ -7(DI)(R8*8), Z6, K4, Z16 // 62a2cd4c7184c7f9ffffff + VPSHLDVQ Z0, Z2, K4, Z16 // 62e2ed4c71c0 + VPSHLDVQ Z8, Z2, K4, Z16 // 62c2ed4c71c0 + VPSHLDVQ (R14), Z2, K4, Z16 // 62c2ed4c7106 + VPSHLDVQ -7(DI)(R8*8), Z2, K4, Z16 // 62a2ed4c7184c7f9ffffff + VPSHLDVW X18, X9, K7, X13 // 6232b50f70ea + VPSHLDVW X21, X9, K7, X13 // 6232b50f70ed + VPSHLDVW X1, X9, K7, X13 // 6272b50f70e9 + VPSHLDVW -17(BP)(SI*8), X9, K7, X13 // 6272b50f70acf5efffffff + VPSHLDVW (R15), X9, K7, X13 // 6252b50f702f + VPSHLDVW X18, X15, K7, X13 // 6232850f70ea + VPSHLDVW X21, X15, K7, X13 // 6232850f70ed + VPSHLDVW X1, X15, K7, X13 // 6272850f70e9 + VPSHLDVW -17(BP)(SI*8), X15, K7, X13 // 6272850f70acf5efffffff + VPSHLDVW (R15), X15, K7, X13 // 6252850f702f + VPSHLDVW X18, X26, K7, X13 // 6232ad0770ea + VPSHLDVW X21, X26, K7, X13 // 6232ad0770ed + VPSHLDVW X1, X26, K7, X13 // 6272ad0770e9 + VPSHLDVW -17(BP)(SI*8), X26, K7, X13 // 6272ad0770acf5efffffff + VPSHLDVW (R15), X26, K7, X13 // 6252ad07702f + VPSHLDVW X18, X9, K7, X28 // 6222b50f70e2 + VPSHLDVW X21, X9, K7, X28 // 6222b50f70e5 + VPSHLDVW X1, X9, K7, X28 // 6262b50f70e1 + VPSHLDVW -17(BP)(SI*8), X9, K7, X28 // 6262b50f70a4f5efffffff + VPSHLDVW (R15), X9, K7, X28 // 6242b50f7027 + VPSHLDVW X18, X15, K7, X28 // 6222850f70e2 + VPSHLDVW X21, X15, K7, X28 // 6222850f70e5 + VPSHLDVW X1, X15, K7, X28 // 6262850f70e1 + VPSHLDVW -17(BP)(SI*8), X15, K7, X28 // 6262850f70a4f5efffffff + VPSHLDVW (R15), X15, K7, X28 // 6242850f7027 + VPSHLDVW X18, X26, K7, X28 // 6222ad0770e2 + VPSHLDVW X21, X26, K7, X28 // 6222ad0770e5 + VPSHLDVW X1, X26, K7, X28 // 6262ad0770e1 + VPSHLDVW -17(BP)(SI*8), X26, K7, X28 // 6262ad0770a4f5efffffff + VPSHLDVW (R15), X26, K7, X28 // 6242ad077027 + VPSHLDVW X18, X9, K7, X24 // 6222b50f70c2 + VPSHLDVW X21, X9, K7, X24 // 6222b50f70c5 + VPSHLDVW X1, X9, K7, X24 // 6262b50f70c1 + VPSHLDVW -17(BP)(SI*8), X9, K7, X24 // 6262b50f7084f5efffffff + VPSHLDVW (R15), X9, K7, X24 // 6242b50f7007 + VPSHLDVW X18, X15, K7, X24 // 6222850f70c2 + VPSHLDVW X21, X15, K7, X24 // 6222850f70c5 + VPSHLDVW X1, X15, K7, X24 // 6262850f70c1 + VPSHLDVW -17(BP)(SI*8), X15, K7, X24 // 6262850f7084f5efffffff + VPSHLDVW (R15), X15, K7, X24 // 6242850f7007 + VPSHLDVW X18, X26, K7, X24 // 6222ad0770c2 + VPSHLDVW X21, X26, K7, X24 // 6222ad0770c5 + VPSHLDVW X1, X26, K7, X24 // 6262ad0770c1 + VPSHLDVW -17(BP)(SI*8), X26, K7, X24 // 6262ad077084f5efffffff + VPSHLDVW (R15), X26, K7, X24 // 6242ad077007 + VPSHLDVW Y2, Y28, K2, Y31 // 62629d2270fa + VPSHLDVW Y21, Y28, K2, Y31 // 62229d2270fd + VPSHLDVW Y12, Y28, K2, Y31 // 62429d2270fc + VPSHLDVW 17(SP)(BP*1), Y28, K2, Y31 // 62629d2270bc2c11000000 + VPSHLDVW -7(CX)(DX*8), Y28, K2, Y31 // 62629d2270bcd1f9ffffff + VPSHLDVW Y2, Y13, K2, Y31 // 6262952a70fa + VPSHLDVW Y21, Y13, K2, Y31 // 6222952a70fd + VPSHLDVW Y12, Y13, K2, Y31 // 6242952a70fc + VPSHLDVW 17(SP)(BP*1), Y13, K2, Y31 // 6262952a70bc2c11000000 + VPSHLDVW -7(CX)(DX*8), Y13, K2, Y31 // 6262952a70bcd1f9ffffff + VPSHLDVW Y2, Y7, K2, Y31 // 6262c52a70fa + VPSHLDVW Y21, Y7, K2, Y31 // 6222c52a70fd + VPSHLDVW Y12, Y7, K2, Y31 // 6242c52a70fc + VPSHLDVW 17(SP)(BP*1), Y7, K2, Y31 // 6262c52a70bc2c11000000 + VPSHLDVW -7(CX)(DX*8), Y7, K2, Y31 // 6262c52a70bcd1f9ffffff + VPSHLDVW Y2, Y28, K2, Y8 // 62729d2270c2 + VPSHLDVW Y21, Y28, K2, Y8 // 62329d2270c5 + VPSHLDVW Y12, Y28, K2, Y8 // 62529d2270c4 + VPSHLDVW 17(SP)(BP*1), Y28, K2, Y8 // 62729d2270842c11000000 + VPSHLDVW -7(CX)(DX*8), Y28, K2, Y8 // 62729d227084d1f9ffffff + VPSHLDVW Y2, Y13, K2, Y8 // 6272952a70c2 + VPSHLDVW Y21, Y13, K2, Y8 // 6232952a70c5 + VPSHLDVW Y12, Y13, K2, Y8 // 6252952a70c4 + VPSHLDVW 17(SP)(BP*1), Y13, K2, Y8 // 6272952a70842c11000000 + VPSHLDVW -7(CX)(DX*8), Y13, K2, Y8 // 6272952a7084d1f9ffffff + VPSHLDVW Y2, Y7, K2, Y8 // 6272c52a70c2 + VPSHLDVW Y21, Y7, K2, Y8 // 6232c52a70c5 + VPSHLDVW Y12, Y7, K2, Y8 // 6252c52a70c4 + VPSHLDVW 17(SP)(BP*1), Y7, K2, Y8 // 6272c52a70842c11000000 + VPSHLDVW -7(CX)(DX*8), Y7, K2, Y8 // 6272c52a7084d1f9ffffff + VPSHLDVW Y2, Y28, K2, Y1 // 62f29d2270ca + VPSHLDVW Y21, Y28, K2, Y1 // 62b29d2270cd + VPSHLDVW Y12, Y28, K2, Y1 // 62d29d2270cc + VPSHLDVW 17(SP)(BP*1), Y28, K2, Y1 // 62f29d22708c2c11000000 + VPSHLDVW -7(CX)(DX*8), Y28, K2, Y1 // 62f29d22708cd1f9ffffff + VPSHLDVW Y2, Y13, K2, Y1 // 62f2952a70ca + VPSHLDVW Y21, Y13, K2, Y1 // 62b2952a70cd + VPSHLDVW Y12, Y13, K2, Y1 // 62d2952a70cc + VPSHLDVW 17(SP)(BP*1), Y13, K2, Y1 // 62f2952a708c2c11000000 + VPSHLDVW -7(CX)(DX*8), Y13, K2, Y1 // 62f2952a708cd1f9ffffff + VPSHLDVW Y2, Y7, K2, Y1 // 62f2c52a70ca + VPSHLDVW Y21, Y7, K2, Y1 // 62b2c52a70cd + VPSHLDVW Y12, Y7, K2, Y1 // 62d2c52a70cc + VPSHLDVW 17(SP)(BP*1), Y7, K2, Y1 // 62f2c52a708c2c11000000 + VPSHLDVW -7(CX)(DX*8), Y7, K2, Y1 // 62f2c52a708cd1f9ffffff + VPSHLDVW Z11, Z14, K5, Z15 // 62528d4d70fb + VPSHLDVW Z5, Z14, K5, Z15 // 62728d4d70fd + VPSHLDVW 99(R15)(R15*4), Z14, K5, Z15 // 62128d4d70bcbf63000000 + VPSHLDVW 15(DX), Z14, K5, Z15 // 62728d4d70ba0f000000 + VPSHLDVW Z11, Z27, K5, Z15 // 6252a54570fb + VPSHLDVW Z5, Z27, K5, Z15 // 6272a54570fd + VPSHLDVW 99(R15)(R15*4), Z27, K5, Z15 // 6212a54570bcbf63000000 + VPSHLDVW 15(DX), Z27, K5, Z15 // 6272a54570ba0f000000 + VPSHLDVW Z11, Z14, K5, Z12 // 62528d4d70e3 + VPSHLDVW Z5, Z14, K5, Z12 // 62728d4d70e5 + VPSHLDVW 99(R15)(R15*4), Z14, K5, Z12 // 62128d4d70a4bf63000000 + VPSHLDVW 15(DX), Z14, K5, Z12 // 62728d4d70a20f000000 + VPSHLDVW Z11, Z27, K5, Z12 // 6252a54570e3 + VPSHLDVW Z5, Z27, K5, Z12 // 6272a54570e5 + VPSHLDVW 99(R15)(R15*4), Z27, K5, Z12 // 6212a54570a4bf63000000 + VPSHLDVW 15(DX), Z27, K5, Z12 // 6272a54570a20f000000 + VPSHLDW $65, X24, X7, K3, X11 // 6213c50b70d841 + VPSHLDW $65, X20, X7, K3, X11 // 6233c50b70dc41 + VPSHLDW $65, X7, X7, K3, X11 // 6273c50b70df41 + VPSHLDW $65, 7(SI)(DI*8), X7, K3, X11 // 6273c50b709cfe0700000041 + VPSHLDW $65, -15(R14), X7, K3, X11 // 6253c50b709ef1ffffff41 + VPSHLDW $65, X24, X0, K3, X11 // 6213fd0b70d841 + VPSHLDW $65, X20, X0, K3, X11 // 6233fd0b70dc41 + VPSHLDW $65, X7, X0, K3, X11 // 6273fd0b70df41 + VPSHLDW $65, 7(SI)(DI*8), X0, K3, X11 // 6273fd0b709cfe0700000041 + VPSHLDW $65, -15(R14), X0, K3, X11 // 6253fd0b709ef1ffffff41 + VPSHLDW $65, X24, X7, K3, X31 // 6203c50b70f841 + VPSHLDW $65, X20, X7, K3, X31 // 6223c50b70fc41 + VPSHLDW $65, X7, X7, K3, X31 // 6263c50b70ff41 + VPSHLDW $65, 7(SI)(DI*8), X7, K3, X31 // 6263c50b70bcfe0700000041 + VPSHLDW $65, -15(R14), X7, K3, X31 // 6243c50b70bef1ffffff41 + VPSHLDW $65, X24, X0, K3, X31 // 6203fd0b70f841 + VPSHLDW $65, X20, X0, K3, X31 // 6223fd0b70fc41 + VPSHLDW $65, X7, X0, K3, X31 // 6263fd0b70ff41 + VPSHLDW $65, 7(SI)(DI*8), X0, K3, X31 // 6263fd0b70bcfe0700000041 + VPSHLDW $65, -15(R14), X0, K3, X31 // 6243fd0b70bef1ffffff41 + VPSHLDW $65, X24, X7, K3, X3 // 6293c50b70d841 + VPSHLDW $65, X20, X7, K3, X3 // 62b3c50b70dc41 + VPSHLDW $65, X7, X7, K3, X3 // 62f3c50b70df41 + VPSHLDW $65, 7(SI)(DI*8), X7, K3, X3 // 62f3c50b709cfe0700000041 + VPSHLDW $65, -15(R14), X7, K3, X3 // 62d3c50b709ef1ffffff41 + VPSHLDW $65, X24, X0, K3, X3 // 6293fd0b70d841 + VPSHLDW $65, X20, X0, K3, X3 // 62b3fd0b70dc41 + VPSHLDW $65, X7, X0, K3, X3 // 62f3fd0b70df41 + VPSHLDW $65, 7(SI)(DI*8), X0, K3, X3 // 62f3fd0b709cfe0700000041 + VPSHLDW $65, -15(R14), X0, K3, X3 // 62d3fd0b709ef1ffffff41 + VPSHLDW $67, Y12, Y3, K4, Y9 // 6253e52c70cc43 + VPSHLDW $67, Y21, Y3, K4, Y9 // 6233e52c70cd43 + VPSHLDW $67, Y14, Y3, K4, Y9 // 6253e52c70ce43 + VPSHLDW $67, -17(BP)(SI*2), Y3, K4, Y9 // 6273e52c708c75efffffff43 + VPSHLDW $67, 7(AX)(CX*2), Y3, K4, Y9 // 6273e52c708c480700000043 + VPSHLDW $67, Y12, Y2, K4, Y9 // 6253ed2c70cc43 + VPSHLDW $67, Y21, Y2, K4, Y9 // 6233ed2c70cd43 + VPSHLDW $67, Y14, Y2, K4, Y9 // 6253ed2c70ce43 + VPSHLDW $67, -17(BP)(SI*2), Y2, K4, Y9 // 6273ed2c708c75efffffff43 + VPSHLDW $67, 7(AX)(CX*2), Y2, K4, Y9 // 6273ed2c708c480700000043 + VPSHLDW $67, Y12, Y9, K4, Y9 // 6253b52c70cc43 + VPSHLDW $67, Y21, Y9, K4, Y9 // 6233b52c70cd43 + VPSHLDW $67, Y14, Y9, K4, Y9 // 6253b52c70ce43 + VPSHLDW $67, -17(BP)(SI*2), Y9, K4, Y9 // 6273b52c708c75efffffff43 + VPSHLDW $67, 7(AX)(CX*2), Y9, K4, Y9 // 6273b52c708c480700000043 + VPSHLDW $67, Y12, Y3, K4, Y1 // 62d3e52c70cc43 + VPSHLDW $67, Y21, Y3, K4, Y1 // 62b3e52c70cd43 + VPSHLDW $67, Y14, Y3, K4, Y1 // 62d3e52c70ce43 + VPSHLDW $67, -17(BP)(SI*2), Y3, K4, Y1 // 62f3e52c708c75efffffff43 + VPSHLDW $67, 7(AX)(CX*2), Y3, K4, Y1 // 62f3e52c708c480700000043 + VPSHLDW $67, Y12, Y2, K4, Y1 // 62d3ed2c70cc43 + VPSHLDW $67, Y21, Y2, K4, Y1 // 62b3ed2c70cd43 + VPSHLDW $67, Y14, Y2, K4, Y1 // 62d3ed2c70ce43 + VPSHLDW $67, -17(BP)(SI*2), Y2, K4, Y1 // 62f3ed2c708c75efffffff43 + VPSHLDW $67, 7(AX)(CX*2), Y2, K4, Y1 // 62f3ed2c708c480700000043 + VPSHLDW $67, Y12, Y9, K4, Y1 // 62d3b52c70cc43 + VPSHLDW $67, Y21, Y9, K4, Y1 // 62b3b52c70cd43 + VPSHLDW $67, Y14, Y9, K4, Y1 // 62d3b52c70ce43 + VPSHLDW $67, -17(BP)(SI*2), Y9, K4, Y1 // 62f3b52c708c75efffffff43 + VPSHLDW $67, 7(AX)(CX*2), Y9, K4, Y1 // 62f3b52c708c480700000043 + VPSHLDW $127, Z2, Z5, K2, Z13 // 6273d54a70ea7f + VPSHLDW $127, (CX), Z5, K2, Z13 // 6273d54a70297f + VPSHLDW $127, 99(R15), Z5, K2, Z13 // 6253d54a70af630000007f + VPSHLDW $127, Z2, Z23, K2, Z13 // 6273c54270ea7f + VPSHLDW $127, (CX), Z23, K2, Z13 // 6273c54270297f + VPSHLDW $127, 99(R15), Z23, K2, Z13 // 6253c54270af630000007f + VPSHLDW $127, Z2, Z5, K2, Z14 // 6273d54a70f27f + VPSHLDW $127, (CX), Z5, K2, Z14 // 6273d54a70317f + VPSHLDW $127, 99(R15), Z5, K2, Z14 // 6253d54a70b7630000007f + VPSHLDW $127, Z2, Z23, K2, Z14 // 6273c54270f27f + VPSHLDW $127, (CX), Z23, K2, Z14 // 6273c54270317f + VPSHLDW $127, 99(R15), Z23, K2, Z14 // 6253c54270b7630000007f + VPSHRDD $0, X21, X5, K2, X9 // 6233550a73cd00 + VPSHRDD $0, X1, X5, K2, X9 // 6273550a73c900 + VPSHRDD $0, X11, X5, K2, X9 // 6253550a73cb00 + VPSHRDD $0, 7(SI)(DI*1), X5, K2, X9 // 6273550a738c3e0700000000 + VPSHRDD $0, 15(DX)(BX*8), X5, K2, X9 // 6273550a738cda0f00000000 + VPSHRDD $0, X21, X31, K2, X9 // 6233050273cd00 + VPSHRDD $0, X1, X31, K2, X9 // 6273050273c900 + VPSHRDD $0, X11, X31, K2, X9 // 6253050273cb00 + VPSHRDD $0, 7(SI)(DI*1), X31, K2, X9 // 62730502738c3e0700000000 + VPSHRDD $0, 15(DX)(BX*8), X31, K2, X9 // 62730502738cda0f00000000 + VPSHRDD $0, X21, X3, K2, X9 // 6233650a73cd00 + VPSHRDD $0, X1, X3, K2, X9 // 6273650a73c900 + VPSHRDD $0, X11, X3, K2, X9 // 6253650a73cb00 + VPSHRDD $0, 7(SI)(DI*1), X3, K2, X9 // 6273650a738c3e0700000000 + VPSHRDD $0, 15(DX)(BX*8), X3, K2, X9 // 6273650a738cda0f00000000 + VPSHRDD $0, X21, X5, K2, X7 // 62b3550a73fd00 + VPSHRDD $0, X1, X5, K2, X7 // 62f3550a73f900 + VPSHRDD $0, X11, X5, K2, X7 // 62d3550a73fb00 + VPSHRDD $0, 7(SI)(DI*1), X5, K2, X7 // 62f3550a73bc3e0700000000 + VPSHRDD $0, 15(DX)(BX*8), X5, K2, X7 // 62f3550a73bcda0f00000000 + VPSHRDD $0, X21, X31, K2, X7 // 62b3050273fd00 + VPSHRDD $0, X1, X31, K2, X7 // 62f3050273f900 + VPSHRDD $0, X11, X31, K2, X7 // 62d3050273fb00 + VPSHRDD $0, 7(SI)(DI*1), X31, K2, X7 // 62f3050273bc3e0700000000 + VPSHRDD $0, 15(DX)(BX*8), X31, K2, X7 // 62f3050273bcda0f00000000 + VPSHRDD $0, X21, X3, K2, X7 // 62b3650a73fd00 + VPSHRDD $0, X1, X3, K2, X7 // 62f3650a73f900 + VPSHRDD $0, X11, X3, K2, X7 // 62d3650a73fb00 + VPSHRDD $0, 7(SI)(DI*1), X3, K2, X7 // 62f3650a73bc3e0700000000 + VPSHRDD $0, 15(DX)(BX*8), X3, K2, X7 // 62f3650a73bcda0f00000000 + VPSHRDD $0, X21, X5, K2, X14 // 6233550a73f500 + VPSHRDD $0, X1, X5, K2, X14 // 6273550a73f100 + VPSHRDD $0, X11, X5, K2, X14 // 6253550a73f300 + VPSHRDD $0, 7(SI)(DI*1), X5, K2, X14 // 6273550a73b43e0700000000 + VPSHRDD $0, 15(DX)(BX*8), X5, K2, X14 // 6273550a73b4da0f00000000 + VPSHRDD $0, X21, X31, K2, X14 // 6233050273f500 + VPSHRDD $0, X1, X31, K2, X14 // 6273050273f100 + VPSHRDD $0, X11, X31, K2, X14 // 6253050273f300 + VPSHRDD $0, 7(SI)(DI*1), X31, K2, X14 // 6273050273b43e0700000000 + VPSHRDD $0, 15(DX)(BX*8), X31, K2, X14 // 6273050273b4da0f00000000 + VPSHRDD $0, X21, X3, K2, X14 // 6233650a73f500 + VPSHRDD $0, X1, X3, K2, X14 // 6273650a73f100 + VPSHRDD $0, X11, X3, K2, X14 // 6253650a73f300 + VPSHRDD $0, 7(SI)(DI*1), X3, K2, X14 // 6273650a73b43e0700000000 + VPSHRDD $0, 15(DX)(BX*8), X3, K2, X14 // 6273650a73b4da0f00000000 + VPSHRDD $97, Y31, Y16, K3, Y30 // 62037d2373f761 + VPSHRDD $97, Y22, Y16, K3, Y30 // 62237d2373f661 + VPSHRDD $97, Y6, Y16, K3, Y30 // 62637d2373f661 + VPSHRDD $97, 15(R8)(R14*1), Y16, K3, Y30 // 62037d2373b4300f00000061 + VPSHRDD $97, 15(R8)(R14*2), Y16, K3, Y30 // 62037d2373b4700f00000061 + VPSHRDD $97, Y31, Y1, K3, Y30 // 6203752b73f761 + VPSHRDD $97, Y22, Y1, K3, Y30 // 6223752b73f661 + VPSHRDD $97, Y6, Y1, K3, Y30 // 6263752b73f661 + VPSHRDD $97, 15(R8)(R14*1), Y1, K3, Y30 // 6203752b73b4300f00000061 + VPSHRDD $97, 15(R8)(R14*2), Y1, K3, Y30 // 6203752b73b4700f00000061 + VPSHRDD $97, Y31, Y30, K3, Y30 // 62030d2373f761 + VPSHRDD $97, Y22, Y30, K3, Y30 // 62230d2373f661 + VPSHRDD $97, Y6, Y30, K3, Y30 // 62630d2373f661 + VPSHRDD $97, 15(R8)(R14*1), Y30, K3, Y30 // 62030d2373b4300f00000061 + VPSHRDD $97, 15(R8)(R14*2), Y30, K3, Y30 // 62030d2373b4700f00000061 + VPSHRDD $97, Y31, Y16, K3, Y26 // 62037d2373d761 + VPSHRDD $97, Y22, Y16, K3, Y26 // 62237d2373d661 + VPSHRDD $97, Y6, Y16, K3, Y26 // 62637d2373d661 + VPSHRDD $97, 15(R8)(R14*1), Y16, K3, Y26 // 62037d237394300f00000061 + VPSHRDD $97, 15(R8)(R14*2), Y16, K3, Y26 // 62037d237394700f00000061 + VPSHRDD $97, Y31, Y1, K3, Y26 // 6203752b73d761 + VPSHRDD $97, Y22, Y1, K3, Y26 // 6223752b73d661 + VPSHRDD $97, Y6, Y1, K3, Y26 // 6263752b73d661 + VPSHRDD $97, 15(R8)(R14*1), Y1, K3, Y26 // 6203752b7394300f00000061 + VPSHRDD $97, 15(R8)(R14*2), Y1, K3, Y26 // 6203752b7394700f00000061 + VPSHRDD $97, Y31, Y30, K3, Y26 // 62030d2373d761 + VPSHRDD $97, Y22, Y30, K3, Y26 // 62230d2373d661 + VPSHRDD $97, Y6, Y30, K3, Y26 // 62630d2373d661 + VPSHRDD $97, 15(R8)(R14*1), Y30, K3, Y26 // 62030d237394300f00000061 + VPSHRDD $97, 15(R8)(R14*2), Y30, K3, Y26 // 62030d237394700f00000061 + VPSHRDD $97, Y31, Y16, K3, Y7 // 62937d2373ff61 + VPSHRDD $97, Y22, Y16, K3, Y7 // 62b37d2373fe61 + VPSHRDD $97, Y6, Y16, K3, Y7 // 62f37d2373fe61 + VPSHRDD $97, 15(R8)(R14*1), Y16, K3, Y7 // 62937d2373bc300f00000061 + VPSHRDD $97, 15(R8)(R14*2), Y16, K3, Y7 // 62937d2373bc700f00000061 + VPSHRDD $97, Y31, Y1, K3, Y7 // 6293752b73ff61 + VPSHRDD $97, Y22, Y1, K3, Y7 // 62b3752b73fe61 + VPSHRDD $97, Y6, Y1, K3, Y7 // 62f3752b73fe61 + VPSHRDD $97, 15(R8)(R14*1), Y1, K3, Y7 // 6293752b73bc300f00000061 + VPSHRDD $97, 15(R8)(R14*2), Y1, K3, Y7 // 6293752b73bc700f00000061 + VPSHRDD $97, Y31, Y30, K3, Y7 // 62930d2373ff61 + VPSHRDD $97, Y22, Y30, K3, Y7 // 62b30d2373fe61 + VPSHRDD $97, Y6, Y30, K3, Y7 // 62f30d2373fe61 + VPSHRDD $97, 15(R8)(R14*1), Y30, K3, Y7 // 62930d2373bc300f00000061 + VPSHRDD $97, 15(R8)(R14*2), Y30, K3, Y7 // 62930d2373bc700f00000061 + VPSHRDD $81, Z28, Z26, K3, Z6 // 62932d4373f451 + VPSHRDD $81, Z6, Z26, K3, Z6 // 62f32d4373f651 + VPSHRDD $81, 99(R15)(R15*2), Z26, K3, Z6 // 62932d4373b47f6300000051 + VPSHRDD $81, -7(DI), Z26, K3, Z6 // 62f32d4373b7f9ffffff51 + VPSHRDD $81, Z28, Z14, K3, Z6 // 62930d4b73f451 + VPSHRDD $81, Z6, Z14, K3, Z6 // 62f30d4b73f651 + VPSHRDD $81, 99(R15)(R15*2), Z14, K3, Z6 // 62930d4b73b47f6300000051 + VPSHRDD $81, -7(DI), Z14, K3, Z6 // 62f30d4b73b7f9ffffff51 + VPSHRDD $81, Z28, Z26, K3, Z14 // 62132d4373f451 + VPSHRDD $81, Z6, Z26, K3, Z14 // 62732d4373f651 + VPSHRDD $81, 99(R15)(R15*2), Z26, K3, Z14 // 62132d4373b47f6300000051 + VPSHRDD $81, -7(DI), Z26, K3, Z14 // 62732d4373b7f9ffffff51 + VPSHRDD $81, Z28, Z14, K3, Z14 // 62130d4b73f451 + VPSHRDD $81, Z6, Z14, K3, Z14 // 62730d4b73f651 + VPSHRDD $81, 99(R15)(R15*2), Z14, K3, Z14 // 62130d4b73b47f6300000051 + VPSHRDD $81, -7(DI), Z14, K3, Z14 // 62730d4b73b7f9ffffff51 + VPSHRDQ $42, X14, X16, K3, X13 // 6253fd0373ee2a + VPSHRDQ $42, X19, X16, K3, X13 // 6233fd0373eb2a + VPSHRDQ $42, X8, X16, K3, X13 // 6253fd0373e82a + VPSHRDQ $42, -7(DI)(R8*1), X16, K3, X13 // 6233fd0373ac07f9ffffff2a + VPSHRDQ $42, (SP), X16, K3, X13 // 6273fd03732c242a + VPSHRDQ $42, X14, X14, K3, X13 // 62538d0b73ee2a + VPSHRDQ $42, X19, X14, K3, X13 // 62338d0b73eb2a + VPSHRDQ $42, X8, X14, K3, X13 // 62538d0b73e82a + VPSHRDQ $42, -7(DI)(R8*1), X14, K3, X13 // 62338d0b73ac07f9ffffff2a + VPSHRDQ $42, (SP), X14, K3, X13 // 62738d0b732c242a + VPSHRDQ $42, X14, X11, K3, X13 // 6253a50b73ee2a + VPSHRDQ $42, X19, X11, K3, X13 // 6233a50b73eb2a + VPSHRDQ $42, X8, X11, K3, X13 // 6253a50b73e82a + VPSHRDQ $42, -7(DI)(R8*1), X11, K3, X13 // 6233a50b73ac07f9ffffff2a + VPSHRDQ $42, (SP), X11, K3, X13 // 6273a50b732c242a + VPSHRDQ $42, X14, X16, K3, X0 // 62d3fd0373c62a + VPSHRDQ $42, X19, X16, K3, X0 // 62b3fd0373c32a + VPSHRDQ $42, X8, X16, K3, X0 // 62d3fd0373c02a + VPSHRDQ $42, -7(DI)(R8*1), X16, K3, X0 // 62b3fd03738407f9ffffff2a + VPSHRDQ $42, (SP), X16, K3, X0 // 62f3fd037304242a + VPSHRDQ $42, X14, X14, K3, X0 // 62d38d0b73c62a + VPSHRDQ $42, X19, X14, K3, X0 // 62b38d0b73c32a + VPSHRDQ $42, X8, X14, K3, X0 // 62d38d0b73c02a + VPSHRDQ $42, -7(DI)(R8*1), X14, K3, X0 // 62b38d0b738407f9ffffff2a + VPSHRDQ $42, (SP), X14, K3, X0 // 62f38d0b7304242a + VPSHRDQ $42, X14, X11, K3, X0 // 62d3a50b73c62a + VPSHRDQ $42, X19, X11, K3, X0 // 62b3a50b73c32a + VPSHRDQ $42, X8, X11, K3, X0 // 62d3a50b73c02a + VPSHRDQ $42, -7(DI)(R8*1), X11, K3, X0 // 62b3a50b738407f9ffffff2a + VPSHRDQ $42, (SP), X11, K3, X0 // 62f3a50b7304242a + VPSHRDQ $42, X14, X16, K3, X30 // 6243fd0373f62a + VPSHRDQ $42, X19, X16, K3, X30 // 6223fd0373f32a + VPSHRDQ $42, X8, X16, K3, X30 // 6243fd0373f02a + VPSHRDQ $42, -7(DI)(R8*1), X16, K3, X30 // 6223fd0373b407f9ffffff2a + VPSHRDQ $42, (SP), X16, K3, X30 // 6263fd037334242a + VPSHRDQ $42, X14, X14, K3, X30 // 62438d0b73f62a + VPSHRDQ $42, X19, X14, K3, X30 // 62238d0b73f32a + VPSHRDQ $42, X8, X14, K3, X30 // 62438d0b73f02a + VPSHRDQ $42, -7(DI)(R8*1), X14, K3, X30 // 62238d0b73b407f9ffffff2a + VPSHRDQ $42, (SP), X14, K3, X30 // 62638d0b7334242a + VPSHRDQ $42, X14, X11, K3, X30 // 6243a50b73f62a + VPSHRDQ $42, X19, X11, K3, X30 // 6223a50b73f32a + VPSHRDQ $42, X8, X11, K3, X30 // 6243a50b73f02a + VPSHRDQ $42, -7(DI)(R8*1), X11, K3, X30 // 6223a50b73b407f9ffffff2a + VPSHRDQ $42, (SP), X11, K3, X30 // 6263a50b7334242a + VPSHRDQ $79, Y24, Y28, K2, Y21 // 62839d2273e84f + VPSHRDQ $79, Y13, Y28, K2, Y21 // 62c39d2273ed4f + VPSHRDQ $79, Y20, Y28, K2, Y21 // 62a39d2273ec4f + VPSHRDQ $79, (R14), Y28, K2, Y21 // 62c39d22732e4f + VPSHRDQ $79, -7(DI)(R8*8), Y28, K2, Y21 // 62a39d2273acc7f9ffffff4f + VPSHRDQ $79, Y24, Y20, K2, Y21 // 6283dd2273e84f + VPSHRDQ $79, Y13, Y20, K2, Y21 // 62c3dd2273ed4f + VPSHRDQ $79, Y20, Y20, K2, Y21 // 62a3dd2273ec4f + VPSHRDQ $79, (R14), Y20, K2, Y21 // 62c3dd22732e4f + VPSHRDQ $79, -7(DI)(R8*8), Y20, K2, Y21 // 62a3dd2273acc7f9ffffff4f + VPSHRDQ $79, Y24, Y14, K2, Y21 // 62838d2a73e84f + VPSHRDQ $79, Y13, Y14, K2, Y21 // 62c38d2a73ed4f + VPSHRDQ $79, Y20, Y14, K2, Y21 // 62a38d2a73ec4f + VPSHRDQ $79, (R14), Y14, K2, Y21 // 62c38d2a732e4f + VPSHRDQ $79, -7(DI)(R8*8), Y14, K2, Y21 // 62a38d2a73acc7f9ffffff4f + VPSHRDQ $79, Y24, Y28, K2, Y7 // 62939d2273f84f + VPSHRDQ $79, Y13, Y28, K2, Y7 // 62d39d2273fd4f + VPSHRDQ $79, Y20, Y28, K2, Y7 // 62b39d2273fc4f + VPSHRDQ $79, (R14), Y28, K2, Y7 // 62d39d22733e4f + VPSHRDQ $79, -7(DI)(R8*8), Y28, K2, Y7 // 62b39d2273bcc7f9ffffff4f + VPSHRDQ $79, Y24, Y20, K2, Y7 // 6293dd2273f84f + VPSHRDQ $79, Y13, Y20, K2, Y7 // 62d3dd2273fd4f + VPSHRDQ $79, Y20, Y20, K2, Y7 // 62b3dd2273fc4f + VPSHRDQ $79, (R14), Y20, K2, Y7 // 62d3dd22733e4f + VPSHRDQ $79, -7(DI)(R8*8), Y20, K2, Y7 // 62b3dd2273bcc7f9ffffff4f + VPSHRDQ $79, Y24, Y14, K2, Y7 // 62938d2a73f84f + VPSHRDQ $79, Y13, Y14, K2, Y7 // 62d38d2a73fd4f + VPSHRDQ $79, Y20, Y14, K2, Y7 // 62b38d2a73fc4f + VPSHRDQ $79, (R14), Y14, K2, Y7 // 62d38d2a733e4f + VPSHRDQ $79, -7(DI)(R8*8), Y14, K2, Y7 // 62b38d2a73bcc7f9ffffff4f + VPSHRDQ $79, Y24, Y28, K2, Y0 // 62939d2273c04f + VPSHRDQ $79, Y13, Y28, K2, Y0 // 62d39d2273c54f + VPSHRDQ $79, Y20, Y28, K2, Y0 // 62b39d2273c44f + VPSHRDQ $79, (R14), Y28, K2, Y0 // 62d39d2273064f + VPSHRDQ $79, -7(DI)(R8*8), Y28, K2, Y0 // 62b39d227384c7f9ffffff4f + VPSHRDQ $79, Y24, Y20, K2, Y0 // 6293dd2273c04f + VPSHRDQ $79, Y13, Y20, K2, Y0 // 62d3dd2273c54f + VPSHRDQ $79, Y20, Y20, K2, Y0 // 62b3dd2273c44f + VPSHRDQ $79, (R14), Y20, K2, Y0 // 62d3dd2273064f + VPSHRDQ $79, -7(DI)(R8*8), Y20, K2, Y0 // 62b3dd227384c7f9ffffff4f + VPSHRDQ $79, Y24, Y14, K2, Y0 // 62938d2a73c04f + VPSHRDQ $79, Y13, Y14, K2, Y0 // 62d38d2a73c54f + VPSHRDQ $79, Y20, Y14, K2, Y0 // 62b38d2a73c44f + VPSHRDQ $79, (R14), Y14, K2, Y0 // 62d38d2a73064f + VPSHRDQ $79, -7(DI)(R8*8), Y14, K2, Y0 // 62b38d2a7384c7f9ffffff4f + VPSHRDQ $64, Z3, Z26, K1, Z13 // 6273ad4173eb40 + VPSHRDQ $64, Z0, Z26, K1, Z13 // 6273ad4173e840 + VPSHRDQ $64, -7(CX)(DX*1), Z26, K1, Z13 // 6273ad4173ac11f9ffffff40 + VPSHRDQ $64, -15(R14)(R15*4), Z26, K1, Z13 // 6213ad4173acbef1ffffff40 + VPSHRDQ $64, Z3, Z3, K1, Z13 // 6273e54973eb40 + VPSHRDQ $64, Z0, Z3, K1, Z13 // 6273e54973e840 + VPSHRDQ $64, -7(CX)(DX*1), Z3, K1, Z13 // 6273e54973ac11f9ffffff40 + VPSHRDQ $64, -15(R14)(R15*4), Z3, K1, Z13 // 6213e54973acbef1ffffff40 + VPSHRDQ $64, Z3, Z26, K1, Z21 // 62e3ad4173eb40 + VPSHRDQ $64, Z0, Z26, K1, Z21 // 62e3ad4173e840 + VPSHRDQ $64, -7(CX)(DX*1), Z26, K1, Z21 // 62e3ad4173ac11f9ffffff40 + VPSHRDQ $64, -15(R14)(R15*4), Z26, K1, Z21 // 6283ad4173acbef1ffffff40 + VPSHRDQ $64, Z3, Z3, K1, Z21 // 62e3e54973eb40 + VPSHRDQ $64, Z0, Z3, K1, Z21 // 62e3e54973e840 + VPSHRDQ $64, -7(CX)(DX*1), Z3, K1, Z21 // 62e3e54973ac11f9ffffff40 + VPSHRDQ $64, -15(R14)(R15*4), Z3, K1, Z21 // 6283e54973acbef1ffffff40 + VPSHRDVD X23, X12, K2, X8 // 62321d0a73c7 + VPSHRDVD X11, X12, K2, X8 // 62521d0a73c3 + VPSHRDVD X31, X12, K2, X8 // 62121d0a73c7 + VPSHRDVD -7(CX), X12, K2, X8 // 62721d0a7381f9ffffff + VPSHRDVD 15(DX)(BX*4), X12, K2, X8 // 62721d0a73849a0f000000 + VPSHRDVD X23, X16, K2, X8 // 62327d0273c7 + VPSHRDVD X11, X16, K2, X8 // 62527d0273c3 + VPSHRDVD X31, X16, K2, X8 // 62127d0273c7 + VPSHRDVD -7(CX), X16, K2, X8 // 62727d027381f9ffffff + VPSHRDVD 15(DX)(BX*4), X16, K2, X8 // 62727d0273849a0f000000 + VPSHRDVD X23, X23, K2, X8 // 6232450273c7 + VPSHRDVD X11, X23, K2, X8 // 6252450273c3 + VPSHRDVD X31, X23, K2, X8 // 6212450273c7 + VPSHRDVD -7(CX), X23, K2, X8 // 627245027381f9ffffff + VPSHRDVD 15(DX)(BX*4), X23, K2, X8 // 6272450273849a0f000000 + VPSHRDVD X23, X12, K2, X26 // 62221d0a73d7 + VPSHRDVD X11, X12, K2, X26 // 62421d0a73d3 + VPSHRDVD X31, X12, K2, X26 // 62021d0a73d7 + VPSHRDVD -7(CX), X12, K2, X26 // 62621d0a7391f9ffffff + VPSHRDVD 15(DX)(BX*4), X12, K2, X26 // 62621d0a73949a0f000000 + VPSHRDVD X23, X16, K2, X26 // 62227d0273d7 + VPSHRDVD X11, X16, K2, X26 // 62427d0273d3 + VPSHRDVD X31, X16, K2, X26 // 62027d0273d7 + VPSHRDVD -7(CX), X16, K2, X26 // 62627d027391f9ffffff + VPSHRDVD 15(DX)(BX*4), X16, K2, X26 // 62627d0273949a0f000000 + VPSHRDVD X23, X23, K2, X26 // 6222450273d7 + VPSHRDVD X11, X23, K2, X26 // 6242450273d3 + VPSHRDVD X31, X23, K2, X26 // 6202450273d7 + VPSHRDVD -7(CX), X23, K2, X26 // 626245027391f9ffffff + VPSHRDVD 15(DX)(BX*4), X23, K2, X26 // 6262450273949a0f000000 + VPSHRDVD X23, X12, K2, X23 // 62a21d0a73ff + VPSHRDVD X11, X12, K2, X23 // 62c21d0a73fb + VPSHRDVD X31, X12, K2, X23 // 62821d0a73ff + VPSHRDVD -7(CX), X12, K2, X23 // 62e21d0a73b9f9ffffff + VPSHRDVD 15(DX)(BX*4), X12, K2, X23 // 62e21d0a73bc9a0f000000 + VPSHRDVD X23, X16, K2, X23 // 62a27d0273ff + VPSHRDVD X11, X16, K2, X23 // 62c27d0273fb + VPSHRDVD X31, X16, K2, X23 // 62827d0273ff + VPSHRDVD -7(CX), X16, K2, X23 // 62e27d0273b9f9ffffff + VPSHRDVD 15(DX)(BX*4), X16, K2, X23 // 62e27d0273bc9a0f000000 + VPSHRDVD X23, X23, K2, X23 // 62a2450273ff + VPSHRDVD X11, X23, K2, X23 // 62c2450273fb + VPSHRDVD X31, X23, K2, X23 // 6282450273ff + VPSHRDVD -7(CX), X23, K2, X23 // 62e2450273b9f9ffffff + VPSHRDVD 15(DX)(BX*4), X23, K2, X23 // 62e2450273bc9a0f000000 + VPSHRDVD Y22, Y26, K1, Y14 // 62322d2173f6 + VPSHRDVD Y3, Y26, K1, Y14 // 62722d2173f3 + VPSHRDVD Y15, Y26, K1, Y14 // 62522d2173f7 + VPSHRDVD 99(R15)(R15*4), Y26, K1, Y14 // 62122d2173b4bf63000000 + VPSHRDVD 15(DX), Y26, K1, Y14 // 62722d2173b20f000000 + VPSHRDVD Y22, Y30, K1, Y14 // 62320d2173f6 + VPSHRDVD Y3, Y30, K1, Y14 // 62720d2173f3 + VPSHRDVD Y15, Y30, K1, Y14 // 62520d2173f7 + VPSHRDVD 99(R15)(R15*4), Y30, K1, Y14 // 62120d2173b4bf63000000 + VPSHRDVD 15(DX), Y30, K1, Y14 // 62720d2173b20f000000 + VPSHRDVD Y22, Y12, K1, Y14 // 62321d2973f6 + VPSHRDVD Y3, Y12, K1, Y14 // 62721d2973f3 + VPSHRDVD Y15, Y12, K1, Y14 // 62521d2973f7 + VPSHRDVD 99(R15)(R15*4), Y12, K1, Y14 // 62121d2973b4bf63000000 + VPSHRDVD 15(DX), Y12, K1, Y14 // 62721d2973b20f000000 + VPSHRDVD Y22, Y26, K1, Y21 // 62a22d2173ee + VPSHRDVD Y3, Y26, K1, Y21 // 62e22d2173eb + VPSHRDVD Y15, Y26, K1, Y21 // 62c22d2173ef + VPSHRDVD 99(R15)(R15*4), Y26, K1, Y21 // 62822d2173acbf63000000 + VPSHRDVD 15(DX), Y26, K1, Y21 // 62e22d2173aa0f000000 + VPSHRDVD Y22, Y30, K1, Y21 // 62a20d2173ee + VPSHRDVD Y3, Y30, K1, Y21 // 62e20d2173eb + VPSHRDVD Y15, Y30, K1, Y21 // 62c20d2173ef + VPSHRDVD 99(R15)(R15*4), Y30, K1, Y21 // 62820d2173acbf63000000 + VPSHRDVD 15(DX), Y30, K1, Y21 // 62e20d2173aa0f000000 + VPSHRDVD Y22, Y12, K1, Y21 // 62a21d2973ee + VPSHRDVD Y3, Y12, K1, Y21 // 62e21d2973eb + VPSHRDVD Y15, Y12, K1, Y21 // 62c21d2973ef + VPSHRDVD 99(R15)(R15*4), Y12, K1, Y21 // 62821d2973acbf63000000 + VPSHRDVD 15(DX), Y12, K1, Y21 // 62e21d2973aa0f000000 + VPSHRDVD Y22, Y26, K1, Y1 // 62b22d2173ce + VPSHRDVD Y3, Y26, K1, Y1 // 62f22d2173cb + VPSHRDVD Y15, Y26, K1, Y1 // 62d22d2173cf + VPSHRDVD 99(R15)(R15*4), Y26, K1, Y1 // 62922d21738cbf63000000 + VPSHRDVD 15(DX), Y26, K1, Y1 // 62f22d21738a0f000000 + VPSHRDVD Y22, Y30, K1, Y1 // 62b20d2173ce + VPSHRDVD Y3, Y30, K1, Y1 // 62f20d2173cb + VPSHRDVD Y15, Y30, K1, Y1 // 62d20d2173cf + VPSHRDVD 99(R15)(R15*4), Y30, K1, Y1 // 62920d21738cbf63000000 + VPSHRDVD 15(DX), Y30, K1, Y1 // 62f20d21738a0f000000 + VPSHRDVD Y22, Y12, K1, Y1 // 62b21d2973ce + VPSHRDVD Y3, Y12, K1, Y1 // 62f21d2973cb + VPSHRDVD Y15, Y12, K1, Y1 // 62d21d2973cf + VPSHRDVD 99(R15)(R15*4), Y12, K1, Y1 // 62921d29738cbf63000000 + VPSHRDVD 15(DX), Y12, K1, Y1 // 62f21d29738a0f000000 + VPSHRDVD Z3, Z11, K7, Z21 // 62e2254f73eb + VPSHRDVD Z12, Z11, K7, Z21 // 62c2254f73ec + VPSHRDVD 15(DX)(BX*1), Z11, K7, Z21 // 62e2254f73ac1a0f000000 + VPSHRDVD -7(CX)(DX*2), Z11, K7, Z21 // 62e2254f73ac51f9ffffff + VPSHRDVD Z3, Z25, K7, Z21 // 62e2354773eb + VPSHRDVD Z12, Z25, K7, Z21 // 62c2354773ec + VPSHRDVD 15(DX)(BX*1), Z25, K7, Z21 // 62e2354773ac1a0f000000 + VPSHRDVD -7(CX)(DX*2), Z25, K7, Z21 // 62e2354773ac51f9ffffff + VPSHRDVD Z3, Z11, K7, Z13 // 6272254f73eb + VPSHRDVD Z12, Z11, K7, Z13 // 6252254f73ec + VPSHRDVD 15(DX)(BX*1), Z11, K7, Z13 // 6272254f73ac1a0f000000 + VPSHRDVD -7(CX)(DX*2), Z11, K7, Z13 // 6272254f73ac51f9ffffff + VPSHRDVD Z3, Z25, K7, Z13 // 6272354773eb + VPSHRDVD Z12, Z25, K7, Z13 // 6252354773ec + VPSHRDVD 15(DX)(BX*1), Z25, K7, Z13 // 6272354773ac1a0f000000 + VPSHRDVD -7(CX)(DX*2), Z25, K7, Z13 // 6272354773ac51f9ffffff + VPSHRDVQ X20, X11, K1, X24 // 6222a50973c4 + VPSHRDVQ X5, X11, K1, X24 // 6262a50973c5 + VPSHRDVQ X25, X11, K1, X24 // 6202a50973c1 + VPSHRDVQ 99(R15)(R15*8), X11, K1, X24 // 6202a5097384ff63000000 + VPSHRDVQ 7(AX)(CX*8), X11, K1, X24 // 6262a5097384c807000000 + VPSHRDVQ X20, X23, K1, X24 // 6222c50173c4 + VPSHRDVQ X5, X23, K1, X24 // 6262c50173c5 + VPSHRDVQ X25, X23, K1, X24 // 6202c50173c1 + VPSHRDVQ 99(R15)(R15*8), X23, K1, X24 // 6202c5017384ff63000000 + VPSHRDVQ 7(AX)(CX*8), X23, K1, X24 // 6262c5017384c807000000 + VPSHRDVQ X20, X2, K1, X24 // 6222ed0973c4 + VPSHRDVQ X5, X2, K1, X24 // 6262ed0973c5 + VPSHRDVQ X25, X2, K1, X24 // 6202ed0973c1 + VPSHRDVQ 99(R15)(R15*8), X2, K1, X24 // 6202ed097384ff63000000 + VPSHRDVQ 7(AX)(CX*8), X2, K1, X24 // 6262ed097384c807000000 + VPSHRDVQ X20, X11, K1, X14 // 6232a50973f4 + VPSHRDVQ X5, X11, K1, X14 // 6272a50973f5 + VPSHRDVQ X25, X11, K1, X14 // 6212a50973f1 + VPSHRDVQ 99(R15)(R15*8), X11, K1, X14 // 6212a50973b4ff63000000 + VPSHRDVQ 7(AX)(CX*8), X11, K1, X14 // 6272a50973b4c807000000 + VPSHRDVQ X20, X23, K1, X14 // 6232c50173f4 + VPSHRDVQ X5, X23, K1, X14 // 6272c50173f5 + VPSHRDVQ X25, X23, K1, X14 // 6212c50173f1 + VPSHRDVQ 99(R15)(R15*8), X23, K1, X14 // 6212c50173b4ff63000000 + VPSHRDVQ 7(AX)(CX*8), X23, K1, X14 // 6272c50173b4c807000000 + VPSHRDVQ X20, X2, K1, X14 // 6232ed0973f4 + VPSHRDVQ X5, X2, K1, X14 // 6272ed0973f5 + VPSHRDVQ X25, X2, K1, X14 // 6212ed0973f1 + VPSHRDVQ 99(R15)(R15*8), X2, K1, X14 // 6212ed0973b4ff63000000 + VPSHRDVQ 7(AX)(CX*8), X2, K1, X14 // 6272ed0973b4c807000000 + VPSHRDVQ X20, X11, K1, X0 // 62b2a50973c4 + VPSHRDVQ X5, X11, K1, X0 // 62f2a50973c5 + VPSHRDVQ X25, X11, K1, X0 // 6292a50973c1 + VPSHRDVQ 99(R15)(R15*8), X11, K1, X0 // 6292a5097384ff63000000 + VPSHRDVQ 7(AX)(CX*8), X11, K1, X0 // 62f2a5097384c807000000 + VPSHRDVQ X20, X23, K1, X0 // 62b2c50173c4 + VPSHRDVQ X5, X23, K1, X0 // 62f2c50173c5 + VPSHRDVQ X25, X23, K1, X0 // 6292c50173c1 + VPSHRDVQ 99(R15)(R15*8), X23, K1, X0 // 6292c5017384ff63000000 + VPSHRDVQ 7(AX)(CX*8), X23, K1, X0 // 62f2c5017384c807000000 + VPSHRDVQ X20, X2, K1, X0 // 62b2ed0973c4 + VPSHRDVQ X5, X2, K1, X0 // 62f2ed0973c5 + VPSHRDVQ X25, X2, K1, X0 // 6292ed0973c1 + VPSHRDVQ 99(R15)(R15*8), X2, K1, X0 // 6292ed097384ff63000000 + VPSHRDVQ 7(AX)(CX*8), X2, K1, X0 // 62f2ed097384c807000000 + VPSHRDVQ Y21, Y5, K1, Y1 // 62b2d52973cd + VPSHRDVQ Y7, Y5, K1, Y1 // 62f2d52973cf + VPSHRDVQ Y30, Y5, K1, Y1 // 6292d52973ce + VPSHRDVQ (CX), Y5, K1, Y1 // 62f2d5297309 + VPSHRDVQ 99(R15), Y5, K1, Y1 // 62d2d529738f63000000 + VPSHRDVQ Y21, Y17, K1, Y1 // 62b2f52173cd + VPSHRDVQ Y7, Y17, K1, Y1 // 62f2f52173cf + VPSHRDVQ Y30, Y17, K1, Y1 // 6292f52173ce + VPSHRDVQ (CX), Y17, K1, Y1 // 62f2f5217309 + VPSHRDVQ 99(R15), Y17, K1, Y1 // 62d2f521738f63000000 + VPSHRDVQ Y21, Y13, K1, Y1 // 62b2952973cd + VPSHRDVQ Y7, Y13, K1, Y1 // 62f2952973cf + VPSHRDVQ Y30, Y13, K1, Y1 // 6292952973ce + VPSHRDVQ (CX), Y13, K1, Y1 // 62f295297309 + VPSHRDVQ 99(R15), Y13, K1, Y1 // 62d29529738f63000000 + VPSHRDVQ Y21, Y5, K1, Y27 // 6222d52973dd + VPSHRDVQ Y7, Y5, K1, Y27 // 6262d52973df + VPSHRDVQ Y30, Y5, K1, Y27 // 6202d52973de + VPSHRDVQ (CX), Y5, K1, Y27 // 6262d5297319 + VPSHRDVQ 99(R15), Y5, K1, Y27 // 6242d529739f63000000 + VPSHRDVQ Y21, Y17, K1, Y27 // 6222f52173dd + VPSHRDVQ Y7, Y17, K1, Y27 // 6262f52173df + VPSHRDVQ Y30, Y17, K1, Y27 // 6202f52173de + VPSHRDVQ (CX), Y17, K1, Y27 // 6262f5217319 + VPSHRDVQ 99(R15), Y17, K1, Y27 // 6242f521739f63000000 + VPSHRDVQ Y21, Y13, K1, Y27 // 6222952973dd + VPSHRDVQ Y7, Y13, K1, Y27 // 6262952973df + VPSHRDVQ Y30, Y13, K1, Y27 // 6202952973de + VPSHRDVQ (CX), Y13, K1, Y27 // 626295297319 + VPSHRDVQ 99(R15), Y13, K1, Y27 // 62429529739f63000000 + VPSHRDVQ Y21, Y5, K1, Y19 // 62a2d52973dd + VPSHRDVQ Y7, Y5, K1, Y19 // 62e2d52973df + VPSHRDVQ Y30, Y5, K1, Y19 // 6282d52973de + VPSHRDVQ (CX), Y5, K1, Y19 // 62e2d5297319 + VPSHRDVQ 99(R15), Y5, K1, Y19 // 62c2d529739f63000000 + VPSHRDVQ Y21, Y17, K1, Y19 // 62a2f52173dd + VPSHRDVQ Y7, Y17, K1, Y19 // 62e2f52173df + VPSHRDVQ Y30, Y17, K1, Y19 // 6282f52173de + VPSHRDVQ (CX), Y17, K1, Y19 // 62e2f5217319 + VPSHRDVQ 99(R15), Y17, K1, Y19 // 62c2f521739f63000000 + VPSHRDVQ Y21, Y13, K1, Y19 // 62a2952973dd + VPSHRDVQ Y7, Y13, K1, Y19 // 62e2952973df + VPSHRDVQ Y30, Y13, K1, Y19 // 6282952973de + VPSHRDVQ (CX), Y13, K1, Y19 // 62e295297319 + VPSHRDVQ 99(R15), Y13, K1, Y19 // 62c29529739f63000000 + VPSHRDVQ Z23, Z23, K1, Z27 // 6222c54173df + VPSHRDVQ Z6, Z23, K1, Z27 // 6262c54173de + VPSHRDVQ -17(BP), Z23, K1, Z27 // 6262c541739defffffff + VPSHRDVQ -15(R14)(R15*8), Z23, K1, Z27 // 6202c541739cfef1ffffff + VPSHRDVQ Z23, Z5, K1, Z27 // 6222d54973df + VPSHRDVQ Z6, Z5, K1, Z27 // 6262d54973de + VPSHRDVQ -17(BP), Z5, K1, Z27 // 6262d549739defffffff + VPSHRDVQ -15(R14)(R15*8), Z5, K1, Z27 // 6202d549739cfef1ffffff + VPSHRDVQ Z23, Z23, K1, Z15 // 6232c54173ff + VPSHRDVQ Z6, Z23, K1, Z15 // 6272c54173fe + VPSHRDVQ -17(BP), Z23, K1, Z15 // 6272c54173bdefffffff + VPSHRDVQ -15(R14)(R15*8), Z23, K1, Z15 // 6212c54173bcfef1ffffff + VPSHRDVQ Z23, Z5, K1, Z15 // 6232d54973ff + VPSHRDVQ Z6, Z5, K1, Z15 // 6272d54973fe + VPSHRDVQ -17(BP), Z5, K1, Z15 // 6272d54973bdefffffff + VPSHRDVQ -15(R14)(R15*8), Z5, K1, Z15 // 6212d54973bcfef1ffffff + VPSHRDVW X2, X2, K7, X0 // 62f2ed0f72c2 + VPSHRDVW X31, X2, K7, X0 // 6292ed0f72c7 + VPSHRDVW X11, X2, K7, X0 // 62d2ed0f72c3 + VPSHRDVW (AX), X2, K7, X0 // 62f2ed0f7200 + VPSHRDVW 7(SI), X2, K7, X0 // 62f2ed0f728607000000 + VPSHRDVW X2, X8, K7, X0 // 62f2bd0f72c2 + VPSHRDVW X31, X8, K7, X0 // 6292bd0f72c7 + VPSHRDVW X11, X8, K7, X0 // 62d2bd0f72c3 + VPSHRDVW (AX), X8, K7, X0 // 62f2bd0f7200 + VPSHRDVW 7(SI), X8, K7, X0 // 62f2bd0f728607000000 + VPSHRDVW X2, X9, K7, X0 // 62f2b50f72c2 + VPSHRDVW X31, X9, K7, X0 // 6292b50f72c7 + VPSHRDVW X11, X9, K7, X0 // 62d2b50f72c3 + VPSHRDVW (AX), X9, K7, X0 // 62f2b50f7200 + VPSHRDVW 7(SI), X9, K7, X0 // 62f2b50f728607000000 + VPSHRDVW X2, X2, K7, X9 // 6272ed0f72ca + VPSHRDVW X31, X2, K7, X9 // 6212ed0f72cf + VPSHRDVW X11, X2, K7, X9 // 6252ed0f72cb + VPSHRDVW (AX), X2, K7, X9 // 6272ed0f7208 + VPSHRDVW 7(SI), X2, K7, X9 // 6272ed0f728e07000000 + VPSHRDVW X2, X8, K7, X9 // 6272bd0f72ca + VPSHRDVW X31, X8, K7, X9 // 6212bd0f72cf + VPSHRDVW X11, X8, K7, X9 // 6252bd0f72cb + VPSHRDVW (AX), X8, K7, X9 // 6272bd0f7208 + VPSHRDVW 7(SI), X8, K7, X9 // 6272bd0f728e07000000 + VPSHRDVW X2, X9, K7, X9 // 6272b50f72ca + VPSHRDVW X31, X9, K7, X9 // 6212b50f72cf + VPSHRDVW X11, X9, K7, X9 // 6252b50f72cb + VPSHRDVW (AX), X9, K7, X9 // 6272b50f7208 + VPSHRDVW 7(SI), X9, K7, X9 // 6272b50f728e07000000 + VPSHRDVW X2, X2, K7, X13 // 6272ed0f72ea + VPSHRDVW X31, X2, K7, X13 // 6212ed0f72ef + VPSHRDVW X11, X2, K7, X13 // 6252ed0f72eb + VPSHRDVW (AX), X2, K7, X13 // 6272ed0f7228 + VPSHRDVW 7(SI), X2, K7, X13 // 6272ed0f72ae07000000 + VPSHRDVW X2, X8, K7, X13 // 6272bd0f72ea + VPSHRDVW X31, X8, K7, X13 // 6212bd0f72ef + VPSHRDVW X11, X8, K7, X13 // 6252bd0f72eb + VPSHRDVW (AX), X8, K7, X13 // 6272bd0f7228 + VPSHRDVW 7(SI), X8, K7, X13 // 6272bd0f72ae07000000 + VPSHRDVW X2, X9, K7, X13 // 6272b50f72ea + VPSHRDVW X31, X9, K7, X13 // 6212b50f72ef + VPSHRDVW X11, X9, K7, X13 // 6252b50f72eb + VPSHRDVW (AX), X9, K7, X13 // 6272b50f7228 + VPSHRDVW 7(SI), X9, K7, X13 // 6272b50f72ae07000000 + VPSHRDVW Y5, Y8, K2, Y13 // 6272bd2a72ed + VPSHRDVW Y24, Y8, K2, Y13 // 6212bd2a72e8 + VPSHRDVW Y21, Y8, K2, Y13 // 6232bd2a72ed + VPSHRDVW 99(R15)(R15*2), Y8, K2, Y13 // 6212bd2a72ac7f63000000 + VPSHRDVW -7(DI), Y8, K2, Y13 // 6272bd2a72aff9ffffff + VPSHRDVW Y5, Y11, K2, Y13 // 6272a52a72ed + VPSHRDVW Y24, Y11, K2, Y13 // 6212a52a72e8 + VPSHRDVW Y21, Y11, K2, Y13 // 6232a52a72ed + VPSHRDVW 99(R15)(R15*2), Y11, K2, Y13 // 6212a52a72ac7f63000000 + VPSHRDVW -7(DI), Y11, K2, Y13 // 6272a52a72aff9ffffff + VPSHRDVW Y5, Y24, K2, Y13 // 6272bd2272ed + VPSHRDVW Y24, Y24, K2, Y13 // 6212bd2272e8 + VPSHRDVW Y21, Y24, K2, Y13 // 6232bd2272ed + VPSHRDVW 99(R15)(R15*2), Y24, K2, Y13 // 6212bd2272ac7f63000000 + VPSHRDVW -7(DI), Y24, K2, Y13 // 6272bd2272aff9ffffff + VPSHRDVW Y5, Y8, K2, Y18 // 62e2bd2a72d5 + VPSHRDVW Y24, Y8, K2, Y18 // 6282bd2a72d0 + VPSHRDVW Y21, Y8, K2, Y18 // 62a2bd2a72d5 + VPSHRDVW 99(R15)(R15*2), Y8, K2, Y18 // 6282bd2a72947f63000000 + VPSHRDVW -7(DI), Y8, K2, Y18 // 62e2bd2a7297f9ffffff + VPSHRDVW Y5, Y11, K2, Y18 // 62e2a52a72d5 + VPSHRDVW Y24, Y11, K2, Y18 // 6282a52a72d0 + VPSHRDVW Y21, Y11, K2, Y18 // 62a2a52a72d5 + VPSHRDVW 99(R15)(R15*2), Y11, K2, Y18 // 6282a52a72947f63000000 + VPSHRDVW -7(DI), Y11, K2, Y18 // 62e2a52a7297f9ffffff + VPSHRDVW Y5, Y24, K2, Y18 // 62e2bd2272d5 + VPSHRDVW Y24, Y24, K2, Y18 // 6282bd2272d0 + VPSHRDVW Y21, Y24, K2, Y18 // 62a2bd2272d5 + VPSHRDVW 99(R15)(R15*2), Y24, K2, Y18 // 6282bd2272947f63000000 + VPSHRDVW -7(DI), Y24, K2, Y18 // 62e2bd227297f9ffffff + VPSHRDVW Y5, Y8, K2, Y24 // 6262bd2a72c5 + VPSHRDVW Y24, Y8, K2, Y24 // 6202bd2a72c0 + VPSHRDVW Y21, Y8, K2, Y24 // 6222bd2a72c5 + VPSHRDVW 99(R15)(R15*2), Y8, K2, Y24 // 6202bd2a72847f63000000 + VPSHRDVW -7(DI), Y8, K2, Y24 // 6262bd2a7287f9ffffff + VPSHRDVW Y5, Y11, K2, Y24 // 6262a52a72c5 + VPSHRDVW Y24, Y11, K2, Y24 // 6202a52a72c0 + VPSHRDVW Y21, Y11, K2, Y24 // 6222a52a72c5 + VPSHRDVW 99(R15)(R15*2), Y11, K2, Y24 // 6202a52a72847f63000000 + VPSHRDVW -7(DI), Y11, K2, Y24 // 6262a52a7287f9ffffff + VPSHRDVW Y5, Y24, K2, Y24 // 6262bd2272c5 + VPSHRDVW Y24, Y24, K2, Y24 // 6202bd2272c0 + VPSHRDVW Y21, Y24, K2, Y24 // 6222bd2272c5 + VPSHRDVW 99(R15)(R15*2), Y24, K2, Y24 // 6202bd2272847f63000000 + VPSHRDVW -7(DI), Y24, K2, Y24 // 6262bd227287f9ffffff + VPSHRDVW Z16, Z21, K4, Z8 // 6232d54472c0 + VPSHRDVW Z13, Z21, K4, Z8 // 6252d54472c5 + VPSHRDVW 17(SP)(BP*2), Z21, K4, Z8 // 6272d54472846c11000000 + VPSHRDVW -7(DI)(R8*4), Z21, K4, Z8 // 6232d544728487f9ffffff + VPSHRDVW Z16, Z5, K4, Z8 // 6232d54c72c0 + VPSHRDVW Z13, Z5, K4, Z8 // 6252d54c72c5 + VPSHRDVW 17(SP)(BP*2), Z5, K4, Z8 // 6272d54c72846c11000000 + VPSHRDVW -7(DI)(R8*4), Z5, K4, Z8 // 6232d54c728487f9ffffff + VPSHRDVW Z16, Z21, K4, Z28 // 6222d54472e0 + VPSHRDVW Z13, Z21, K4, Z28 // 6242d54472e5 + VPSHRDVW 17(SP)(BP*2), Z21, K4, Z28 // 6262d54472a46c11000000 + VPSHRDVW -7(DI)(R8*4), Z21, K4, Z28 // 6222d54472a487f9ffffff + VPSHRDVW Z16, Z5, K4, Z28 // 6222d54c72e0 + VPSHRDVW Z13, Z5, K4, Z28 // 6242d54c72e5 + VPSHRDVW 17(SP)(BP*2), Z5, K4, Z28 // 6262d54c72a46c11000000 + VPSHRDVW -7(DI)(R8*4), Z5, K4, Z28 // 6222d54c72a487f9ffffff + VPSHRDW $27, X15, X0, K1, X22 // 62c3fd0972f71b + VPSHRDW $27, X11, X0, K1, X22 // 62c3fd0972f31b + VPSHRDW $27, X0, X0, K1, X22 // 62e3fd0972f01b + VPSHRDW $27, (BX), X0, K1, X22 // 62e3fd0972331b + VPSHRDW $27, -17(BP)(SI*1), X0, K1, X22 // 62e3fd0972b435efffffff1b + VPSHRDW $27, X15, X17, K1, X22 // 62c3f50172f71b + VPSHRDW $27, X11, X17, K1, X22 // 62c3f50172f31b + VPSHRDW $27, X0, X17, K1, X22 // 62e3f50172f01b + VPSHRDW $27, (BX), X17, K1, X22 // 62e3f50172331b + VPSHRDW $27, -17(BP)(SI*1), X17, K1, X22 // 62e3f50172b435efffffff1b + VPSHRDW $27, X15, X7, K1, X22 // 62c3c50972f71b + VPSHRDW $27, X11, X7, K1, X22 // 62c3c50972f31b + VPSHRDW $27, X0, X7, K1, X22 // 62e3c50972f01b + VPSHRDW $27, (BX), X7, K1, X22 // 62e3c50972331b + VPSHRDW $27, -17(BP)(SI*1), X7, K1, X22 // 62e3c50972b435efffffff1b + VPSHRDW $27, X15, X0, K1, X5 // 62d3fd0972ef1b + VPSHRDW $27, X11, X0, K1, X5 // 62d3fd0972eb1b + VPSHRDW $27, X0, X0, K1, X5 // 62f3fd0972e81b + VPSHRDW $27, (BX), X0, K1, X5 // 62f3fd09722b1b + VPSHRDW $27, -17(BP)(SI*1), X0, K1, X5 // 62f3fd0972ac35efffffff1b + VPSHRDW $27, X15, X17, K1, X5 // 62d3f50172ef1b + VPSHRDW $27, X11, X17, K1, X5 // 62d3f50172eb1b + VPSHRDW $27, X0, X17, K1, X5 // 62f3f50172e81b + VPSHRDW $27, (BX), X17, K1, X5 // 62f3f501722b1b + VPSHRDW $27, -17(BP)(SI*1), X17, K1, X5 // 62f3f50172ac35efffffff1b + VPSHRDW $27, X15, X7, K1, X5 // 62d3c50972ef1b + VPSHRDW $27, X11, X7, K1, X5 // 62d3c50972eb1b + VPSHRDW $27, X0, X7, K1, X5 // 62f3c50972e81b + VPSHRDW $27, (BX), X7, K1, X5 // 62f3c509722b1b + VPSHRDW $27, -17(BP)(SI*1), X7, K1, X5 // 62f3c50972ac35efffffff1b + VPSHRDW $27, X15, X0, K1, X14 // 6253fd0972f71b + VPSHRDW $27, X11, X0, K1, X14 // 6253fd0972f31b + VPSHRDW $27, X0, X0, K1, X14 // 6273fd0972f01b + VPSHRDW $27, (BX), X0, K1, X14 // 6273fd0972331b + VPSHRDW $27, -17(BP)(SI*1), X0, K1, X14 // 6273fd0972b435efffffff1b + VPSHRDW $27, X15, X17, K1, X14 // 6253f50172f71b + VPSHRDW $27, X11, X17, K1, X14 // 6253f50172f31b + VPSHRDW $27, X0, X17, K1, X14 // 6273f50172f01b + VPSHRDW $27, (BX), X17, K1, X14 // 6273f50172331b + VPSHRDW $27, -17(BP)(SI*1), X17, K1, X14 // 6273f50172b435efffffff1b + VPSHRDW $27, X15, X7, K1, X14 // 6253c50972f71b + VPSHRDW $27, X11, X7, K1, X14 // 6253c50972f31b + VPSHRDW $27, X0, X7, K1, X14 // 6273c50972f01b + VPSHRDW $27, (BX), X7, K1, X14 // 6273c50972331b + VPSHRDW $27, -17(BP)(SI*1), X7, K1, X14 // 6273c50972b435efffffff1b + VPSHRDW $47, Y7, Y9, K3, Y16 // 62e3b52b72c72f + VPSHRDW $47, Y6, Y9, K3, Y16 // 62e3b52b72c62f + VPSHRDW $47, Y26, Y9, K3, Y16 // 6283b52b72c22f + VPSHRDW $47, -7(CX)(DX*1), Y9, K3, Y16 // 62e3b52b728411f9ffffff2f + VPSHRDW $47, -15(R14)(R15*4), Y9, K3, Y16 // 6283b52b7284bef1ffffff2f + VPSHRDW $47, Y7, Y6, K3, Y16 // 62e3cd2b72c72f + VPSHRDW $47, Y6, Y6, K3, Y16 // 62e3cd2b72c62f + VPSHRDW $47, Y26, Y6, K3, Y16 // 6283cd2b72c22f + VPSHRDW $47, -7(CX)(DX*1), Y6, K3, Y16 // 62e3cd2b728411f9ffffff2f + VPSHRDW $47, -15(R14)(R15*4), Y6, K3, Y16 // 6283cd2b7284bef1ffffff2f + VPSHRDW $47, Y7, Y3, K3, Y16 // 62e3e52b72c72f + VPSHRDW $47, Y6, Y3, K3, Y16 // 62e3e52b72c62f + VPSHRDW $47, Y26, Y3, K3, Y16 // 6283e52b72c22f + VPSHRDW $47, -7(CX)(DX*1), Y3, K3, Y16 // 62e3e52b728411f9ffffff2f + VPSHRDW $47, -15(R14)(R15*4), Y3, K3, Y16 // 6283e52b7284bef1ffffff2f + VPSHRDW $47, Y7, Y9, K3, Y9 // 6273b52b72cf2f + VPSHRDW $47, Y6, Y9, K3, Y9 // 6273b52b72ce2f + VPSHRDW $47, Y26, Y9, K3, Y9 // 6213b52b72ca2f + VPSHRDW $47, -7(CX)(DX*1), Y9, K3, Y9 // 6273b52b728c11f9ffffff2f + VPSHRDW $47, -15(R14)(R15*4), Y9, K3, Y9 // 6213b52b728cbef1ffffff2f + VPSHRDW $47, Y7, Y6, K3, Y9 // 6273cd2b72cf2f + VPSHRDW $47, Y6, Y6, K3, Y9 // 6273cd2b72ce2f + VPSHRDW $47, Y26, Y6, K3, Y9 // 6213cd2b72ca2f + VPSHRDW $47, -7(CX)(DX*1), Y6, K3, Y9 // 6273cd2b728c11f9ffffff2f + VPSHRDW $47, -15(R14)(R15*4), Y6, K3, Y9 // 6213cd2b728cbef1ffffff2f + VPSHRDW $47, Y7, Y3, K3, Y9 // 6273e52b72cf2f + VPSHRDW $47, Y6, Y3, K3, Y9 // 6273e52b72ce2f + VPSHRDW $47, Y26, Y3, K3, Y9 // 6213e52b72ca2f + VPSHRDW $47, -7(CX)(DX*1), Y3, K3, Y9 // 6273e52b728c11f9ffffff2f + VPSHRDW $47, -15(R14)(R15*4), Y3, K3, Y9 // 6213e52b728cbef1ffffff2f + VPSHRDW $47, Y7, Y9, K3, Y13 // 6273b52b72ef2f + VPSHRDW $47, Y6, Y9, K3, Y13 // 6273b52b72ee2f + VPSHRDW $47, Y26, Y9, K3, Y13 // 6213b52b72ea2f + VPSHRDW $47, -7(CX)(DX*1), Y9, K3, Y13 // 6273b52b72ac11f9ffffff2f + VPSHRDW $47, -15(R14)(R15*4), Y9, K3, Y13 // 6213b52b72acbef1ffffff2f + VPSHRDW $47, Y7, Y6, K3, Y13 // 6273cd2b72ef2f + VPSHRDW $47, Y6, Y6, K3, Y13 // 6273cd2b72ee2f + VPSHRDW $47, Y26, Y6, K3, Y13 // 6213cd2b72ea2f + VPSHRDW $47, -7(CX)(DX*1), Y6, K3, Y13 // 6273cd2b72ac11f9ffffff2f + VPSHRDW $47, -15(R14)(R15*4), Y6, K3, Y13 // 6213cd2b72acbef1ffffff2f + VPSHRDW $47, Y7, Y3, K3, Y13 // 6273e52b72ef2f + VPSHRDW $47, Y6, Y3, K3, Y13 // 6273e52b72ee2f + VPSHRDW $47, Y26, Y3, K3, Y13 // 6213e52b72ea2f + VPSHRDW $47, -7(CX)(DX*1), Y3, K3, Y13 // 6273e52b72ac11f9ffffff2f + VPSHRDW $47, -15(R14)(R15*4), Y3, K3, Y13 // 6213e52b72acbef1ffffff2f + VPSHRDW $82, Z6, Z22, K4, Z12 // 6273cd4472e652 + VPSHRDW $82, Z8, Z22, K4, Z12 // 6253cd4472e052 + VPSHRDW $82, 15(R8), Z22, K4, Z12 // 6253cd4472a00f00000052 + VPSHRDW $82, (BP), Z22, K4, Z12 // 6273cd4472650052 + VPSHRDW $82, Z6, Z11, K4, Z12 // 6273a54c72e652 + VPSHRDW $82, Z8, Z11, K4, Z12 // 6253a54c72e052 + VPSHRDW $82, 15(R8), Z11, K4, Z12 // 6253a54c72a00f00000052 + VPSHRDW $82, (BP), Z11, K4, Z12 // 6273a54c72650052 + VPSHRDW $82, Z6, Z22, K4, Z27 // 6263cd4472de52 + VPSHRDW $82, Z8, Z22, K4, Z27 // 6243cd4472d852 + VPSHRDW $82, 15(R8), Z22, K4, Z27 // 6243cd4472980f00000052 + VPSHRDW $82, (BP), Z22, K4, Z27 // 6263cd44725d0052 + VPSHRDW $82, Z6, Z11, K4, Z27 // 6263a54c72de52 + VPSHRDW $82, Z8, Z11, K4, Z27 // 6243a54c72d852 + VPSHRDW $82, 15(R8), Z11, K4, Z27 // 6243a54c72980f00000052 + VPSHRDW $82, (BP), Z11, K4, Z27 // 6263a54c725d0052 + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512_vnni.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512_vnni.s new file mode 100644 index 0000000000000000000000000000000000000000..ce450a5247aed4df46b7678c7933ac82640fedf7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512_vnni.s @@ -0,0 +1,400 @@ +// Code generated by avx512test. DO NOT EDIT. + +#include "../../../../../../runtime/textflag.h" + +TEXT asmtest_avx512_vnni(SB), NOSPLIT, $0 + VPDPBUSD X15, X16, K2, X6 // 62d27d0250f7 + VPDPBUSD X11, X16, K2, X6 // 62d27d0250f3 + VPDPBUSD X1, X16, K2, X6 // 62f27d0250f1 + VPDPBUSD -15(R14)(R15*1), X16, K2, X6 // 62927d0250b43ef1ffffff + VPDPBUSD -15(BX), X16, K2, X6 // 62f27d0250b3f1ffffff + VPDPBUSD X15, X28, K2, X6 // 62d21d0250f7 + VPDPBUSD X11, X28, K2, X6 // 62d21d0250f3 + VPDPBUSD X1, X28, K2, X6 // 62f21d0250f1 + VPDPBUSD -15(R14)(R15*1), X28, K2, X6 // 62921d0250b43ef1ffffff + VPDPBUSD -15(BX), X28, K2, X6 // 62f21d0250b3f1ffffff + VPDPBUSD X15, X8, K2, X6 // 62d23d0a50f7 + VPDPBUSD X11, X8, K2, X6 // 62d23d0a50f3 + VPDPBUSD X1, X8, K2, X6 // 62f23d0a50f1 + VPDPBUSD -15(R14)(R15*1), X8, K2, X6 // 62923d0a50b43ef1ffffff + VPDPBUSD -15(BX), X8, K2, X6 // 62f23d0a50b3f1ffffff + VPDPBUSD X15, X16, K2, X22 // 62c27d0250f7 + VPDPBUSD X11, X16, K2, X22 // 62c27d0250f3 + VPDPBUSD X1, X16, K2, X22 // 62e27d0250f1 + VPDPBUSD -15(R14)(R15*1), X16, K2, X22 // 62827d0250b43ef1ffffff + VPDPBUSD -15(BX), X16, K2, X22 // 62e27d0250b3f1ffffff + VPDPBUSD X15, X28, K2, X22 // 62c21d0250f7 + VPDPBUSD X11, X28, K2, X22 // 62c21d0250f3 + VPDPBUSD X1, X28, K2, X22 // 62e21d0250f1 + VPDPBUSD -15(R14)(R15*1), X28, K2, X22 // 62821d0250b43ef1ffffff + VPDPBUSD -15(BX), X28, K2, X22 // 62e21d0250b3f1ffffff + VPDPBUSD X15, X8, K2, X22 // 62c23d0a50f7 + VPDPBUSD X11, X8, K2, X22 // 62c23d0a50f3 + VPDPBUSD X1, X8, K2, X22 // 62e23d0a50f1 + VPDPBUSD -15(R14)(R15*1), X8, K2, X22 // 62823d0a50b43ef1ffffff + VPDPBUSD -15(BX), X8, K2, X22 // 62e23d0a50b3f1ffffff + VPDPBUSD X15, X16, K2, X12 // 62527d0250e7 + VPDPBUSD X11, X16, K2, X12 // 62527d0250e3 + VPDPBUSD X1, X16, K2, X12 // 62727d0250e1 + VPDPBUSD -15(R14)(R15*1), X16, K2, X12 // 62127d0250a43ef1ffffff + VPDPBUSD -15(BX), X16, K2, X12 // 62727d0250a3f1ffffff + VPDPBUSD X15, X28, K2, X12 // 62521d0250e7 + VPDPBUSD X11, X28, K2, X12 // 62521d0250e3 + VPDPBUSD X1, X28, K2, X12 // 62721d0250e1 + VPDPBUSD -15(R14)(R15*1), X28, K2, X12 // 62121d0250a43ef1ffffff + VPDPBUSD -15(BX), X28, K2, X12 // 62721d0250a3f1ffffff + VPDPBUSD X15, X8, K2, X12 // 62523d0a50e7 + VPDPBUSD X11, X8, K2, X12 // 62523d0a50e3 + VPDPBUSD X1, X8, K2, X12 // 62723d0a50e1 + VPDPBUSD -15(R14)(R15*1), X8, K2, X12 // 62123d0a50a43ef1ffffff + VPDPBUSD -15(BX), X8, K2, X12 // 62723d0a50a3f1ffffff + VPDPBUSD Y11, Y28, K5, Y20 // 62c21d2550e3 + VPDPBUSD Y27, Y28, K5, Y20 // 62821d2550e3 + VPDPBUSD Y17, Y28, K5, Y20 // 62a21d2550e1 + VPDPBUSD (AX), Y28, K5, Y20 // 62e21d255020 + VPDPBUSD 7(SI), Y28, K5, Y20 // 62e21d2550a607000000 + VPDPBUSD Y11, Y1, K5, Y20 // 62c2752d50e3 + VPDPBUSD Y27, Y1, K5, Y20 // 6282752d50e3 + VPDPBUSD Y17, Y1, K5, Y20 // 62a2752d50e1 + VPDPBUSD (AX), Y1, K5, Y20 // 62e2752d5020 + VPDPBUSD 7(SI), Y1, K5, Y20 // 62e2752d50a607000000 + VPDPBUSD Y11, Y8, K5, Y20 // 62c23d2d50e3 + VPDPBUSD Y27, Y8, K5, Y20 // 62823d2d50e3 + VPDPBUSD Y17, Y8, K5, Y20 // 62a23d2d50e1 + VPDPBUSD (AX), Y8, K5, Y20 // 62e23d2d5020 + VPDPBUSD 7(SI), Y8, K5, Y20 // 62e23d2d50a607000000 + VPDPBUSD Y11, Y28, K5, Y9 // 62521d2550cb + VPDPBUSD Y27, Y28, K5, Y9 // 62121d2550cb + VPDPBUSD Y17, Y28, K5, Y9 // 62321d2550c9 + VPDPBUSD (AX), Y28, K5, Y9 // 62721d255008 + VPDPBUSD 7(SI), Y28, K5, Y9 // 62721d25508e07000000 + VPDPBUSD Y11, Y1, K5, Y9 // 6252752d50cb + VPDPBUSD Y27, Y1, K5, Y9 // 6212752d50cb + VPDPBUSD Y17, Y1, K5, Y9 // 6232752d50c9 + VPDPBUSD (AX), Y1, K5, Y9 // 6272752d5008 + VPDPBUSD 7(SI), Y1, K5, Y9 // 6272752d508e07000000 + VPDPBUSD Y11, Y8, K5, Y9 // 62523d2d50cb + VPDPBUSD Y27, Y8, K5, Y9 // 62123d2d50cb + VPDPBUSD Y17, Y8, K5, Y9 // 62323d2d50c9 + VPDPBUSD (AX), Y8, K5, Y9 // 62723d2d5008 + VPDPBUSD 7(SI), Y8, K5, Y9 // 62723d2d508e07000000 + VPDPBUSD Y11, Y28, K5, Y28 // 62421d2550e3 + VPDPBUSD Y27, Y28, K5, Y28 // 62021d2550e3 + VPDPBUSD Y17, Y28, K5, Y28 // 62221d2550e1 + VPDPBUSD (AX), Y28, K5, Y28 // 62621d255020 + VPDPBUSD 7(SI), Y28, K5, Y28 // 62621d2550a607000000 + VPDPBUSD Y11, Y1, K5, Y28 // 6242752d50e3 + VPDPBUSD Y27, Y1, K5, Y28 // 6202752d50e3 + VPDPBUSD Y17, Y1, K5, Y28 // 6222752d50e1 + VPDPBUSD (AX), Y1, K5, Y28 // 6262752d5020 + VPDPBUSD 7(SI), Y1, K5, Y28 // 6262752d50a607000000 + VPDPBUSD Y11, Y8, K5, Y28 // 62423d2d50e3 + VPDPBUSD Y27, Y8, K5, Y28 // 62023d2d50e3 + VPDPBUSD Y17, Y8, K5, Y28 // 62223d2d50e1 + VPDPBUSD (AX), Y8, K5, Y28 // 62623d2d5020 + VPDPBUSD 7(SI), Y8, K5, Y28 // 62623d2d50a607000000 + VPDPBUSD Z8, Z23, K3, Z23 // 62c2454350f8 + VPDPBUSD Z28, Z23, K3, Z23 // 6282454350fc + VPDPBUSD (SI), Z23, K3, Z23 // 62e24543503e + VPDPBUSD 7(SI)(DI*2), Z23, K3, Z23 // 62e2454350bc7e07000000 + VPDPBUSD Z8, Z6, K3, Z23 // 62c24d4b50f8 + VPDPBUSD Z28, Z6, K3, Z23 // 62824d4b50fc + VPDPBUSD (SI), Z6, K3, Z23 // 62e24d4b503e + VPDPBUSD 7(SI)(DI*2), Z6, K3, Z23 // 62e24d4b50bc7e07000000 + VPDPBUSD Z8, Z23, K3, Z5 // 62d2454350e8 + VPDPBUSD Z28, Z23, K3, Z5 // 6292454350ec + VPDPBUSD (SI), Z23, K3, Z5 // 62f24543502e + VPDPBUSD 7(SI)(DI*2), Z23, K3, Z5 // 62f2454350ac7e07000000 + VPDPBUSD Z8, Z6, K3, Z5 // 62d24d4b50e8 + VPDPBUSD Z28, Z6, K3, Z5 // 62924d4b50ec + VPDPBUSD (SI), Z6, K3, Z5 // 62f24d4b502e + VPDPBUSD 7(SI)(DI*2), Z6, K3, Z5 // 62f24d4b50ac7e07000000 + VPDPBUSDS X25, X14, K4, X19 // 62820d0c51d9 + VPDPBUSDS X11, X14, K4, X19 // 62c20d0c51db + VPDPBUSDS X17, X14, K4, X19 // 62a20d0c51d9 + VPDPBUSDS 7(AX)(CX*4), X14, K4, X19 // 62e20d0c519c8807000000 + VPDPBUSDS 7(AX)(CX*1), X14, K4, X19 // 62e20d0c519c0807000000 + VPDPBUSDS X25, X0, K4, X19 // 62827d0c51d9 + VPDPBUSDS X11, X0, K4, X19 // 62c27d0c51db + VPDPBUSDS X17, X0, K4, X19 // 62a27d0c51d9 + VPDPBUSDS 7(AX)(CX*4), X0, K4, X19 // 62e27d0c519c8807000000 + VPDPBUSDS 7(AX)(CX*1), X0, K4, X19 // 62e27d0c519c0807000000 + VPDPBUSDS X25, X14, K4, X13 // 62120d0c51e9 + VPDPBUSDS X11, X14, K4, X13 // 62520d0c51eb + VPDPBUSDS X17, X14, K4, X13 // 62320d0c51e9 + VPDPBUSDS 7(AX)(CX*4), X14, K4, X13 // 62720d0c51ac8807000000 + VPDPBUSDS 7(AX)(CX*1), X14, K4, X13 // 62720d0c51ac0807000000 + VPDPBUSDS X25, X0, K4, X13 // 62127d0c51e9 + VPDPBUSDS X11, X0, K4, X13 // 62527d0c51eb + VPDPBUSDS X17, X0, K4, X13 // 62327d0c51e9 + VPDPBUSDS 7(AX)(CX*4), X0, K4, X13 // 62727d0c51ac8807000000 + VPDPBUSDS 7(AX)(CX*1), X0, K4, X13 // 62727d0c51ac0807000000 + VPDPBUSDS X25, X14, K4, X2 // 62920d0c51d1 + VPDPBUSDS X11, X14, K4, X2 // 62d20d0c51d3 + VPDPBUSDS X17, X14, K4, X2 // 62b20d0c51d1 + VPDPBUSDS 7(AX)(CX*4), X14, K4, X2 // 62f20d0c51948807000000 + VPDPBUSDS 7(AX)(CX*1), X14, K4, X2 // 62f20d0c51940807000000 + VPDPBUSDS X25, X0, K4, X2 // 62927d0c51d1 + VPDPBUSDS X11, X0, K4, X2 // 62d27d0c51d3 + VPDPBUSDS X17, X0, K4, X2 // 62b27d0c51d1 + VPDPBUSDS 7(AX)(CX*4), X0, K4, X2 // 62f27d0c51948807000000 + VPDPBUSDS 7(AX)(CX*1), X0, K4, X2 // 62f27d0c51940807000000 + VPDPBUSDS Y28, Y26, K2, Y16 // 62822d2251c4 + VPDPBUSDS Y1, Y26, K2, Y16 // 62e22d2251c1 + VPDPBUSDS Y23, Y26, K2, Y16 // 62a22d2251c7 + VPDPBUSDS (BX), Y26, K2, Y16 // 62e22d225103 + VPDPBUSDS -17(BP)(SI*1), Y26, K2, Y16 // 62e22d22518435efffffff + VPDPBUSDS Y28, Y3, K2, Y16 // 6282652a51c4 + VPDPBUSDS Y1, Y3, K2, Y16 // 62e2652a51c1 + VPDPBUSDS Y23, Y3, K2, Y16 // 62a2652a51c7 + VPDPBUSDS (BX), Y3, K2, Y16 // 62e2652a5103 + VPDPBUSDS -17(BP)(SI*1), Y3, K2, Y16 // 62e2652a518435efffffff + VPDPBUSDS Y28, Y8, K2, Y16 // 62823d2a51c4 + VPDPBUSDS Y1, Y8, K2, Y16 // 62e23d2a51c1 + VPDPBUSDS Y23, Y8, K2, Y16 // 62a23d2a51c7 + VPDPBUSDS (BX), Y8, K2, Y16 // 62e23d2a5103 + VPDPBUSDS -17(BP)(SI*1), Y8, K2, Y16 // 62e23d2a518435efffffff + VPDPBUSDS Y28, Y26, K2, Y12 // 62122d2251e4 + VPDPBUSDS Y1, Y26, K2, Y12 // 62722d2251e1 + VPDPBUSDS Y23, Y26, K2, Y12 // 62322d2251e7 + VPDPBUSDS (BX), Y26, K2, Y12 // 62722d225123 + VPDPBUSDS -17(BP)(SI*1), Y26, K2, Y12 // 62722d2251a435efffffff + VPDPBUSDS Y28, Y3, K2, Y12 // 6212652a51e4 + VPDPBUSDS Y1, Y3, K2, Y12 // 6272652a51e1 + VPDPBUSDS Y23, Y3, K2, Y12 // 6232652a51e7 + VPDPBUSDS (BX), Y3, K2, Y12 // 6272652a5123 + VPDPBUSDS -17(BP)(SI*1), Y3, K2, Y12 // 6272652a51a435efffffff + VPDPBUSDS Y28, Y8, K2, Y12 // 62123d2a51e4 + VPDPBUSDS Y1, Y8, K2, Y12 // 62723d2a51e1 + VPDPBUSDS Y23, Y8, K2, Y12 // 62323d2a51e7 + VPDPBUSDS (BX), Y8, K2, Y12 // 62723d2a5123 + VPDPBUSDS -17(BP)(SI*1), Y8, K2, Y12 // 62723d2a51a435efffffff + VPDPBUSDS Y28, Y26, K2, Y6 // 62922d2251f4 + VPDPBUSDS Y1, Y26, K2, Y6 // 62f22d2251f1 + VPDPBUSDS Y23, Y26, K2, Y6 // 62b22d2251f7 + VPDPBUSDS (BX), Y26, K2, Y6 // 62f22d225133 + VPDPBUSDS -17(BP)(SI*1), Y26, K2, Y6 // 62f22d2251b435efffffff + VPDPBUSDS Y28, Y3, K2, Y6 // 6292652a51f4 + VPDPBUSDS Y1, Y3, K2, Y6 // 62f2652a51f1 + VPDPBUSDS Y23, Y3, K2, Y6 // 62b2652a51f7 + VPDPBUSDS (BX), Y3, K2, Y6 // 62f2652a5133 + VPDPBUSDS -17(BP)(SI*1), Y3, K2, Y6 // 62f2652a51b435efffffff + VPDPBUSDS Y28, Y8, K2, Y6 // 62923d2a51f4 + VPDPBUSDS Y1, Y8, K2, Y6 // 62f23d2a51f1 + VPDPBUSDS Y23, Y8, K2, Y6 // 62b23d2a51f7 + VPDPBUSDS (BX), Y8, K2, Y6 // 62f23d2a5133 + VPDPBUSDS -17(BP)(SI*1), Y8, K2, Y6 // 62f23d2a51b435efffffff + VPDPBUSDS Z12, Z16, K2, Z21 // 62c27d4251ec + VPDPBUSDS Z27, Z16, K2, Z21 // 62827d4251eb + VPDPBUSDS 17(SP)(BP*8), Z16, K2, Z21 // 62e27d4251acec11000000 + VPDPBUSDS 17(SP)(BP*4), Z16, K2, Z21 // 62e27d4251acac11000000 + VPDPBUSDS Z12, Z13, K2, Z21 // 62c2154a51ec + VPDPBUSDS Z27, Z13, K2, Z21 // 6282154a51eb + VPDPBUSDS 17(SP)(BP*8), Z13, K2, Z21 // 62e2154a51acec11000000 + VPDPBUSDS 17(SP)(BP*4), Z13, K2, Z21 // 62e2154a51acac11000000 + VPDPBUSDS Z12, Z16, K2, Z5 // 62d27d4251ec + VPDPBUSDS Z27, Z16, K2, Z5 // 62927d4251eb + VPDPBUSDS 17(SP)(BP*8), Z16, K2, Z5 // 62f27d4251acec11000000 + VPDPBUSDS 17(SP)(BP*4), Z16, K2, Z5 // 62f27d4251acac11000000 + VPDPBUSDS Z12, Z13, K2, Z5 // 62d2154a51ec + VPDPBUSDS Z27, Z13, K2, Z5 // 6292154a51eb + VPDPBUSDS 17(SP)(BP*8), Z13, K2, Z5 // 62f2154a51acec11000000 + VPDPBUSDS 17(SP)(BP*4), Z13, K2, Z5 // 62f2154a51acac11000000 + VPDPWSSD X2, X2, K3, X18 // 62e26d0b52d2 + VPDPWSSD X27, X2, K3, X18 // 62826d0b52d3 + VPDPWSSD X26, X2, K3, X18 // 62826d0b52d2 + VPDPWSSD (SI), X2, K3, X18 // 62e26d0b5216 + VPDPWSSD 7(SI)(DI*2), X2, K3, X18 // 62e26d0b52947e07000000 + VPDPWSSD X2, X24, K3, X18 // 62e23d0352d2 + VPDPWSSD X27, X24, K3, X18 // 62823d0352d3 + VPDPWSSD X26, X24, K3, X18 // 62823d0352d2 + VPDPWSSD (SI), X24, K3, X18 // 62e23d035216 + VPDPWSSD 7(SI)(DI*2), X24, K3, X18 // 62e23d0352947e07000000 + VPDPWSSD X2, X2, K3, X11 // 62726d0b52da + VPDPWSSD X27, X2, K3, X11 // 62126d0b52db + VPDPWSSD X26, X2, K3, X11 // 62126d0b52da + VPDPWSSD (SI), X2, K3, X11 // 62726d0b521e + VPDPWSSD 7(SI)(DI*2), X2, K3, X11 // 62726d0b529c7e07000000 + VPDPWSSD X2, X24, K3, X11 // 62723d0352da + VPDPWSSD X27, X24, K3, X11 // 62123d0352db + VPDPWSSD X26, X24, K3, X11 // 62123d0352da + VPDPWSSD (SI), X24, K3, X11 // 62723d03521e + VPDPWSSD 7(SI)(DI*2), X24, K3, X11 // 62723d03529c7e07000000 + VPDPWSSD X2, X2, K3, X9 // 62726d0b52ca + VPDPWSSD X27, X2, K3, X9 // 62126d0b52cb + VPDPWSSD X26, X2, K3, X9 // 62126d0b52ca + VPDPWSSD (SI), X2, K3, X9 // 62726d0b520e + VPDPWSSD 7(SI)(DI*2), X2, K3, X9 // 62726d0b528c7e07000000 + VPDPWSSD X2, X24, K3, X9 // 62723d0352ca + VPDPWSSD X27, X24, K3, X9 // 62123d0352cb + VPDPWSSD X26, X24, K3, X9 // 62123d0352ca + VPDPWSSD (SI), X24, K3, X9 // 62723d03520e + VPDPWSSD 7(SI)(DI*2), X24, K3, X9 // 62723d03528c7e07000000 + VPDPWSSD Y8, Y2, K3, Y14 // 62526d2b52f0 + VPDPWSSD Y9, Y2, K3, Y14 // 62526d2b52f1 + VPDPWSSD Y22, Y2, K3, Y14 // 62326d2b52f6 + VPDPWSSD 15(R8)(R14*4), Y2, K3, Y14 // 62126d2b52b4b00f000000 + VPDPWSSD -7(CX)(DX*4), Y2, K3, Y14 // 62726d2b52b491f9ffffff + VPDPWSSD Y8, Y22, K3, Y14 // 62524d2352f0 + VPDPWSSD Y9, Y22, K3, Y14 // 62524d2352f1 + VPDPWSSD Y22, Y22, K3, Y14 // 62324d2352f6 + VPDPWSSD 15(R8)(R14*4), Y22, K3, Y14 // 62124d2352b4b00f000000 + VPDPWSSD -7(CX)(DX*4), Y22, K3, Y14 // 62724d2352b491f9ffffff + VPDPWSSD Y8, Y27, K3, Y14 // 6252252352f0 + VPDPWSSD Y9, Y27, K3, Y14 // 6252252352f1 + VPDPWSSD Y22, Y27, K3, Y14 // 6232252352f6 + VPDPWSSD 15(R8)(R14*4), Y27, K3, Y14 // 6212252352b4b00f000000 + VPDPWSSD -7(CX)(DX*4), Y27, K3, Y14 // 6272252352b491f9ffffff + VPDPWSSD Y8, Y2, K3, Y31 // 62426d2b52f8 + VPDPWSSD Y9, Y2, K3, Y31 // 62426d2b52f9 + VPDPWSSD Y22, Y2, K3, Y31 // 62226d2b52fe + VPDPWSSD 15(R8)(R14*4), Y2, K3, Y31 // 62026d2b52bcb00f000000 + VPDPWSSD -7(CX)(DX*4), Y2, K3, Y31 // 62626d2b52bc91f9ffffff + VPDPWSSD Y8, Y22, K3, Y31 // 62424d2352f8 + VPDPWSSD Y9, Y22, K3, Y31 // 62424d2352f9 + VPDPWSSD Y22, Y22, K3, Y31 // 62224d2352fe + VPDPWSSD 15(R8)(R14*4), Y22, K3, Y31 // 62024d2352bcb00f000000 + VPDPWSSD -7(CX)(DX*4), Y22, K3, Y31 // 62624d2352bc91f9ffffff + VPDPWSSD Y8, Y27, K3, Y31 // 6242252352f8 + VPDPWSSD Y9, Y27, K3, Y31 // 6242252352f9 + VPDPWSSD Y22, Y27, K3, Y31 // 6222252352fe + VPDPWSSD 15(R8)(R14*4), Y27, K3, Y31 // 6202252352bcb00f000000 + VPDPWSSD -7(CX)(DX*4), Y27, K3, Y31 // 6262252352bc91f9ffffff + VPDPWSSD Y8, Y2, K3, Y25 // 62426d2b52c8 + VPDPWSSD Y9, Y2, K3, Y25 // 62426d2b52c9 + VPDPWSSD Y22, Y2, K3, Y25 // 62226d2b52ce + VPDPWSSD 15(R8)(R14*4), Y2, K3, Y25 // 62026d2b528cb00f000000 + VPDPWSSD -7(CX)(DX*4), Y2, K3, Y25 // 62626d2b528c91f9ffffff + VPDPWSSD Y8, Y22, K3, Y25 // 62424d2352c8 + VPDPWSSD Y9, Y22, K3, Y25 // 62424d2352c9 + VPDPWSSD Y22, Y22, K3, Y25 // 62224d2352ce + VPDPWSSD 15(R8)(R14*4), Y22, K3, Y25 // 62024d23528cb00f000000 + VPDPWSSD -7(CX)(DX*4), Y22, K3, Y25 // 62624d23528c91f9ffffff + VPDPWSSD Y8, Y27, K3, Y25 // 6242252352c8 + VPDPWSSD Y9, Y27, K3, Y25 // 6242252352c9 + VPDPWSSD Y22, Y27, K3, Y25 // 6222252352ce + VPDPWSSD 15(R8)(R14*4), Y27, K3, Y25 // 62022523528cb00f000000 + VPDPWSSD -7(CX)(DX*4), Y27, K3, Y25 // 62622523528c91f9ffffff + VPDPWSSD Z25, Z6, K3, Z22 // 62824d4b52f1 + VPDPWSSD Z12, Z6, K3, Z22 // 62c24d4b52f4 + VPDPWSSD 7(SI)(DI*4), Z6, K3, Z22 // 62e24d4b52b4be07000000 + VPDPWSSD -7(DI)(R8*2), Z6, K3, Z22 // 62a24d4b52b447f9ffffff + VPDPWSSD Z25, Z8, K3, Z22 // 62823d4b52f1 + VPDPWSSD Z12, Z8, K3, Z22 // 62c23d4b52f4 + VPDPWSSD 7(SI)(DI*4), Z8, K3, Z22 // 62e23d4b52b4be07000000 + VPDPWSSD -7(DI)(R8*2), Z8, K3, Z22 // 62a23d4b52b447f9ffffff + VPDPWSSD Z25, Z6, K3, Z11 // 62124d4b52d9 + VPDPWSSD Z12, Z6, K3, Z11 // 62524d4b52dc + VPDPWSSD 7(SI)(DI*4), Z6, K3, Z11 // 62724d4b529cbe07000000 + VPDPWSSD -7(DI)(R8*2), Z6, K3, Z11 // 62324d4b529c47f9ffffff + VPDPWSSD Z25, Z8, K3, Z11 // 62123d4b52d9 + VPDPWSSD Z12, Z8, K3, Z11 // 62523d4b52dc + VPDPWSSD 7(SI)(DI*4), Z8, K3, Z11 // 62723d4b529cbe07000000 + VPDPWSSD -7(DI)(R8*2), Z8, K3, Z11 // 62323d4b529c47f9ffffff + VPDPWSSDS X13, X11, K2, X22 // 62c2250a53f5 + VPDPWSSDS X6, X11, K2, X22 // 62e2250a53f6 + VPDPWSSDS X12, X11, K2, X22 // 62c2250a53f4 + VPDPWSSDS 17(SP)(BP*8), X11, K2, X22 // 62e2250a53b4ec11000000 + VPDPWSSDS 17(SP)(BP*4), X11, K2, X22 // 62e2250a53b4ac11000000 + VPDPWSSDS X13, X15, K2, X22 // 62c2050a53f5 + VPDPWSSDS X6, X15, K2, X22 // 62e2050a53f6 + VPDPWSSDS X12, X15, K2, X22 // 62c2050a53f4 + VPDPWSSDS 17(SP)(BP*8), X15, K2, X22 // 62e2050a53b4ec11000000 + VPDPWSSDS 17(SP)(BP*4), X15, K2, X22 // 62e2050a53b4ac11000000 + VPDPWSSDS X13, X30, K2, X22 // 62c20d0253f5 + VPDPWSSDS X6, X30, K2, X22 // 62e20d0253f6 + VPDPWSSDS X12, X30, K2, X22 // 62c20d0253f4 + VPDPWSSDS 17(SP)(BP*8), X30, K2, X22 // 62e20d0253b4ec11000000 + VPDPWSSDS 17(SP)(BP*4), X30, K2, X22 // 62e20d0253b4ac11000000 + VPDPWSSDS X13, X11, K2, X30 // 6242250a53f5 + VPDPWSSDS X6, X11, K2, X30 // 6262250a53f6 + VPDPWSSDS X12, X11, K2, X30 // 6242250a53f4 + VPDPWSSDS 17(SP)(BP*8), X11, K2, X30 // 6262250a53b4ec11000000 + VPDPWSSDS 17(SP)(BP*4), X11, K2, X30 // 6262250a53b4ac11000000 + VPDPWSSDS X13, X15, K2, X30 // 6242050a53f5 + VPDPWSSDS X6, X15, K2, X30 // 6262050a53f6 + VPDPWSSDS X12, X15, K2, X30 // 6242050a53f4 + VPDPWSSDS 17(SP)(BP*8), X15, K2, X30 // 6262050a53b4ec11000000 + VPDPWSSDS 17(SP)(BP*4), X15, K2, X30 // 6262050a53b4ac11000000 + VPDPWSSDS X13, X30, K2, X30 // 62420d0253f5 + VPDPWSSDS X6, X30, K2, X30 // 62620d0253f6 + VPDPWSSDS X12, X30, K2, X30 // 62420d0253f4 + VPDPWSSDS 17(SP)(BP*8), X30, K2, X30 // 62620d0253b4ec11000000 + VPDPWSSDS 17(SP)(BP*4), X30, K2, X30 // 62620d0253b4ac11000000 + VPDPWSSDS X13, X11, K2, X3 // 62d2250a53dd + VPDPWSSDS X6, X11, K2, X3 // 62f2250a53de + VPDPWSSDS X12, X11, K2, X3 // 62d2250a53dc + VPDPWSSDS 17(SP)(BP*8), X11, K2, X3 // 62f2250a539cec11000000 + VPDPWSSDS 17(SP)(BP*4), X11, K2, X3 // 62f2250a539cac11000000 + VPDPWSSDS X13, X15, K2, X3 // 62d2050a53dd + VPDPWSSDS X6, X15, K2, X3 // 62f2050a53de + VPDPWSSDS X12, X15, K2, X3 // 62d2050a53dc + VPDPWSSDS 17(SP)(BP*8), X15, K2, X3 // 62f2050a539cec11000000 + VPDPWSSDS 17(SP)(BP*4), X15, K2, X3 // 62f2050a539cac11000000 + VPDPWSSDS X13, X30, K2, X3 // 62d20d0253dd + VPDPWSSDS X6, X30, K2, X3 // 62f20d0253de + VPDPWSSDS X12, X30, K2, X3 // 62d20d0253dc + VPDPWSSDS 17(SP)(BP*8), X30, K2, X3 // 62f20d02539cec11000000 + VPDPWSSDS 17(SP)(BP*4), X30, K2, X3 // 62f20d02539cac11000000 + VPDPWSSDS Y0, Y6, K1, Y9 // 62724d2953c8 + VPDPWSSDS Y19, Y6, K1, Y9 // 62324d2953cb + VPDPWSSDS Y31, Y6, K1, Y9 // 62124d2953cf + VPDPWSSDS (R8), Y6, K1, Y9 // 62524d295308 + VPDPWSSDS 15(DX)(BX*2), Y6, K1, Y9 // 62724d29538c5a0f000000 + VPDPWSSDS Y0, Y1, K1, Y9 // 6272752953c8 + VPDPWSSDS Y19, Y1, K1, Y9 // 6232752953cb + VPDPWSSDS Y31, Y1, K1, Y9 // 6212752953cf + VPDPWSSDS (R8), Y1, K1, Y9 // 625275295308 + VPDPWSSDS 15(DX)(BX*2), Y1, K1, Y9 // 62727529538c5a0f000000 + VPDPWSSDS Y0, Y9, K1, Y9 // 6272352953c8 + VPDPWSSDS Y19, Y9, K1, Y9 // 6232352953cb + VPDPWSSDS Y31, Y9, K1, Y9 // 6212352953cf + VPDPWSSDS (R8), Y9, K1, Y9 // 625235295308 + VPDPWSSDS 15(DX)(BX*2), Y9, K1, Y9 // 62723529538c5a0f000000 + VPDPWSSDS Y0, Y6, K1, Y14 // 62724d2953f0 + VPDPWSSDS Y19, Y6, K1, Y14 // 62324d2953f3 + VPDPWSSDS Y31, Y6, K1, Y14 // 62124d2953f7 + VPDPWSSDS (R8), Y6, K1, Y14 // 62524d295330 + VPDPWSSDS 15(DX)(BX*2), Y6, K1, Y14 // 62724d2953b45a0f000000 + VPDPWSSDS Y0, Y1, K1, Y14 // 6272752953f0 + VPDPWSSDS Y19, Y1, K1, Y14 // 6232752953f3 + VPDPWSSDS Y31, Y1, K1, Y14 // 6212752953f7 + VPDPWSSDS (R8), Y1, K1, Y14 // 625275295330 + VPDPWSSDS 15(DX)(BX*2), Y1, K1, Y14 // 6272752953b45a0f000000 + VPDPWSSDS Y0, Y9, K1, Y14 // 6272352953f0 + VPDPWSSDS Y19, Y9, K1, Y14 // 6232352953f3 + VPDPWSSDS Y31, Y9, K1, Y14 // 6212352953f7 + VPDPWSSDS (R8), Y9, K1, Y14 // 625235295330 + VPDPWSSDS 15(DX)(BX*2), Y9, K1, Y14 // 6272352953b45a0f000000 + VPDPWSSDS Y0, Y6, K1, Y1 // 62f24d2953c8 + VPDPWSSDS Y19, Y6, K1, Y1 // 62b24d2953cb + VPDPWSSDS Y31, Y6, K1, Y1 // 62924d2953cf + VPDPWSSDS (R8), Y6, K1, Y1 // 62d24d295308 + VPDPWSSDS 15(DX)(BX*2), Y6, K1, Y1 // 62f24d29538c5a0f000000 + VPDPWSSDS Y0, Y1, K1, Y1 // 62f2752953c8 + VPDPWSSDS Y19, Y1, K1, Y1 // 62b2752953cb + VPDPWSSDS Y31, Y1, K1, Y1 // 6292752953cf + VPDPWSSDS (R8), Y1, K1, Y1 // 62d275295308 + VPDPWSSDS 15(DX)(BX*2), Y1, K1, Y1 // 62f27529538c5a0f000000 + VPDPWSSDS Y0, Y9, K1, Y1 // 62f2352953c8 + VPDPWSSDS Y19, Y9, K1, Y1 // 62b2352953cb + VPDPWSSDS Y31, Y9, K1, Y1 // 6292352953cf + VPDPWSSDS (R8), Y9, K1, Y1 // 62d235295308 + VPDPWSSDS 15(DX)(BX*2), Y9, K1, Y1 // 62f23529538c5a0f000000 + VPDPWSSDS Z6, Z9, K2, Z12 // 6272354a53e6 + VPDPWSSDS Z25, Z9, K2, Z12 // 6212354a53e1 + VPDPWSSDS 17(SP), Z9, K2, Z12 // 6272354a53a42411000000 + VPDPWSSDS -17(BP)(SI*4), Z9, K2, Z12 // 6272354a53a4b5efffffff + VPDPWSSDS Z6, Z12, K2, Z12 // 62721d4a53e6 + VPDPWSSDS Z25, Z12, K2, Z12 // 62121d4a53e1 + VPDPWSSDS 17(SP), Z12, K2, Z12 // 62721d4a53a42411000000 + VPDPWSSDS -17(BP)(SI*4), Z12, K2, Z12 // 62721d4a53a4b5efffffff + VPDPWSSDS Z6, Z9, K2, Z17 // 62e2354a53ce + VPDPWSSDS Z25, Z9, K2, Z17 // 6282354a53c9 + VPDPWSSDS 17(SP), Z9, K2, Z17 // 62e2354a538c2411000000 + VPDPWSSDS -17(BP)(SI*4), Z9, K2, Z17 // 62e2354a538cb5efffffff + VPDPWSSDS Z6, Z12, K2, Z17 // 62e21d4a53ce + VPDPWSSDS Z25, Z12, K2, Z17 // 62821d4a53c9 + VPDPWSSDS 17(SP), Z12, K2, Z17 // 62e21d4a538c2411000000 + VPDPWSSDS -17(BP)(SI*4), Z12, K2, Z17 // 62e21d4a538cb5efffffff + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512_vpopcntdq.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512_vpopcntdq.s new file mode 100644 index 0000000000000000000000000000000000000000..d71faec10f5901a419ece1f7c6faea39f0cfc493 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512_vpopcntdq.s @@ -0,0 +1,82 @@ +// Code generated by avx512test. DO NOT EDIT. + +#include "../../../../../../runtime/textflag.h" + +TEXT asmtest_avx512_vpopcntdq(SB), NOSPLIT, $0 + VPOPCNTD X12, K2, X8 // 62527d0a55c4 + VPOPCNTD X16, K2, X8 // 62327d0a55c0 + VPOPCNTD X23, K2, X8 // 62327d0a55c7 + VPOPCNTD (R14), K2, X8 // 62527d0a5506 + VPOPCNTD -7(DI)(R8*8), K2, X8 // 62327d0a5584c7f9ffffff + VPOPCNTD X12, K2, X26 // 62427d0a55d4 + VPOPCNTD X16, K2, X26 // 62227d0a55d0 + VPOPCNTD X23, K2, X26 // 62227d0a55d7 + VPOPCNTD (R14), K2, X26 // 62427d0a5516 + VPOPCNTD -7(DI)(R8*8), K2, X26 // 62227d0a5594c7f9ffffff + VPOPCNTD X12, K2, X23 // 62c27d0a55fc + VPOPCNTD X16, K2, X23 // 62a27d0a55f8 + VPOPCNTD X23, K2, X23 // 62a27d0a55ff + VPOPCNTD (R14), K2, X23 // 62c27d0a553e + VPOPCNTD -7(DI)(R8*8), K2, X23 // 62a27d0a55bcc7f9ffffff + VPOPCNTD Y22, K5, Y26 // 62227d2d55d6 + VPOPCNTD Y3, K5, Y26 // 62627d2d55d3 + VPOPCNTD Y15, K5, Y26 // 62427d2d55d7 + VPOPCNTD -15(R14)(R15*1), K5, Y26 // 62027d2d55943ef1ffffff + VPOPCNTD -15(BX), K5, Y26 // 62627d2d5593f1ffffff + VPOPCNTD Y22, K5, Y30 // 62227d2d55f6 + VPOPCNTD Y3, K5, Y30 // 62627d2d55f3 + VPOPCNTD Y15, K5, Y30 // 62427d2d55f7 + VPOPCNTD -15(R14)(R15*1), K5, Y30 // 62027d2d55b43ef1ffffff + VPOPCNTD -15(BX), K5, Y30 // 62627d2d55b3f1ffffff + VPOPCNTD Y22, K5, Y12 // 62327d2d55e6 + VPOPCNTD Y3, K5, Y12 // 62727d2d55e3 + VPOPCNTD Y15, K5, Y12 // 62527d2d55e7 + VPOPCNTD -15(R14)(R15*1), K5, Y12 // 62127d2d55a43ef1ffffff + VPOPCNTD -15(BX), K5, Y12 // 62727d2d55a3f1ffffff + VPOPCNTD Z2, K3, Z22 // 62e27d4b55f2 + VPOPCNTD Z31, K3, Z22 // 62827d4b55f7 + VPOPCNTD 7(SI)(DI*4), K3, Z22 // 62e27d4b55b4be07000000 + VPOPCNTD -7(DI)(R8*2), K3, Z22 // 62a27d4b55b447f9ffffff + VPOPCNTD Z2, K3, Z7 // 62f27d4b55fa + VPOPCNTD Z31, K3, Z7 // 62927d4b55ff + VPOPCNTD 7(SI)(DI*4), K3, Z7 // 62f27d4b55bcbe07000000 + VPOPCNTD -7(DI)(R8*2), K3, Z7 // 62b27d4b55bc47f9ffffff + VPOPCNTQ X24, K4, X23 // 6282fd0c55f8 + VPOPCNTQ X14, K4, X23 // 62c2fd0c55fe + VPOPCNTQ X0, K4, X23 // 62e2fd0c55f8 + VPOPCNTQ 99(R15)(R15*4), K4, X23 // 6282fd0c55bcbf63000000 + VPOPCNTQ 15(DX), K4, X23 // 62e2fd0c55ba0f000000 + VPOPCNTQ X24, K4, X11 // 6212fd0c55d8 + VPOPCNTQ X14, K4, X11 // 6252fd0c55de + VPOPCNTQ X0, K4, X11 // 6272fd0c55d8 + VPOPCNTQ 99(R15)(R15*4), K4, X11 // 6212fd0c559cbf63000000 + VPOPCNTQ 15(DX), K4, X11 // 6272fd0c559a0f000000 + VPOPCNTQ X24, K4, X31 // 6202fd0c55f8 + VPOPCNTQ X14, K4, X31 // 6242fd0c55fe + VPOPCNTQ X0, K4, X31 // 6262fd0c55f8 + VPOPCNTQ 99(R15)(R15*4), K4, X31 // 6202fd0c55bcbf63000000 + VPOPCNTQ 15(DX), K4, X31 // 6262fd0c55ba0f000000 + VPOPCNTQ Y5, K2, Y1 // 62f2fd2a55cd + VPOPCNTQ Y17, K2, Y1 // 62b2fd2a55c9 + VPOPCNTQ Y13, K2, Y1 // 62d2fd2a55cd + VPOPCNTQ 7(AX)(CX*4), K2, Y1 // 62f2fd2a558c8807000000 + VPOPCNTQ 7(AX)(CX*1), K2, Y1 // 62f2fd2a558c0807000000 + VPOPCNTQ Y5, K2, Y27 // 6262fd2a55dd + VPOPCNTQ Y17, K2, Y27 // 6222fd2a55d9 + VPOPCNTQ Y13, K2, Y27 // 6242fd2a55dd + VPOPCNTQ 7(AX)(CX*4), K2, Y27 // 6262fd2a559c8807000000 + VPOPCNTQ 7(AX)(CX*1), K2, Y27 // 6262fd2a559c0807000000 + VPOPCNTQ Y5, K2, Y19 // 62e2fd2a55dd + VPOPCNTQ Y17, K2, Y19 // 62a2fd2a55d9 + VPOPCNTQ Y13, K2, Y19 // 62c2fd2a55dd + VPOPCNTQ 7(AX)(CX*4), K2, Y19 // 62e2fd2a559c8807000000 + VPOPCNTQ 7(AX)(CX*1), K2, Y19 // 62e2fd2a559c0807000000 + VPOPCNTQ Z1, K2, Z20 // 62e2fd4a55e1 + VPOPCNTQ Z3, K2, Z20 // 62e2fd4a55e3 + VPOPCNTQ 17(SP), K2, Z20 // 62e2fd4a55a42411000000 + VPOPCNTQ -17(BP)(SI*4), K2, Z20 // 62e2fd4a55a4b5efffffff + VPOPCNTQ Z1, K2, Z9 // 6272fd4a55c9 + VPOPCNTQ Z3, K2, Z9 // 6272fd4a55cb + VPOPCNTQ 17(SP), K2, Z9 // 6272fd4a558c2411000000 + VPOPCNTQ -17(BP)(SI*4), K2, Z9 // 6272fd4a558cb5efffffff + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512bw.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512bw.s new file mode 100644 index 0000000000000000000000000000000000000000..e1ffb72589e6b4a65d71b01221c9dac4cdfab653 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512bw.s @@ -0,0 +1,1939 @@ +// Code generated by avx512test. DO NOT EDIT. + +#include "../../../../../../runtime/textflag.h" + +TEXT asmtest_avx512bw(SB), NOSPLIT, $0 + KADDD K4, K7, K5 // c4e1c54aec + KADDD K6, K7, K5 // c4e1c54aee + KADDD K4, K6, K5 // c4e1cd4aec + KADDD K6, K6, K5 // c4e1cd4aee + KADDD K4, K7, K4 // c4e1c54ae4 + KADDD K6, K7, K4 // c4e1c54ae6 + KADDD K4, K6, K4 // c4e1cd4ae4 + KADDD K6, K6, K4 // c4e1cd4ae6 + KADDQ K4, K5, K0 // c4e1d44ac4 + KADDQ K6, K5, K0 // c4e1d44ac6 + KADDQ K4, K4, K0 // c4e1dc4ac4 + KADDQ K6, K4, K0 // c4e1dc4ac6 + KADDQ K4, K5, K7 // c4e1d44afc + KADDQ K6, K5, K7 // c4e1d44afe + KADDQ K4, K4, K7 // c4e1dc4afc + KADDQ K6, K4, K7 // c4e1dc4afe + KANDD K1, K6, K0 // c4e1cd41c1 + KANDD K5, K6, K0 // c4e1cd41c5 + KANDD K1, K5, K0 // c4e1d541c1 + KANDD K5, K5, K0 // c4e1d541c5 + KANDD K1, K6, K5 // c4e1cd41e9 + KANDD K5, K6, K5 // c4e1cd41ed + KANDD K1, K5, K5 // c4e1d541e9 + KANDD K5, K5, K5 // c4e1d541ed + KANDND K5, K0, K4 // c4e1fd42e5 + KANDND K4, K0, K4 // c4e1fd42e4 + KANDND K5, K7, K4 // c4e1c542e5 + KANDND K4, K7, K4 // c4e1c542e4 + KANDND K5, K0, K6 // c4e1fd42f5 + KANDND K4, K0, K6 // c4e1fd42f4 + KANDND K5, K7, K6 // c4e1c542f5 + KANDND K4, K7, K6 // c4e1c542f4 + KANDNQ K6, K1, K4 // c4e1f442e6 + KANDNQ K7, K1, K4 // c4e1f442e7 + KANDNQ K6, K3, K4 // c4e1e442e6 + KANDNQ K7, K3, K4 // c4e1e442e7 + KANDNQ K6, K1, K6 // c4e1f442f6 + KANDNQ K7, K1, K6 // c4e1f442f7 + KANDNQ K6, K3, K6 // c4e1e442f6 + KANDNQ K7, K3, K6 // c4e1e442f7 + KANDQ K6, K0, K2 // c4e1fc41d6 + KANDQ K5, K0, K2 // c4e1fc41d5 + KANDQ K6, K5, K2 // c4e1d441d6 + KANDQ K5, K5, K2 // c4e1d441d5 + KANDQ K6, K0, K7 // c4e1fc41fe + KANDQ K5, K0, K7 // c4e1fc41fd + KANDQ K6, K5, K7 // c4e1d441fe + KANDQ K5, K5, K7 // c4e1d441fd + KMOVD K1, 17(SP) // c4e1f9914c2411 + KMOVD K3, 17(SP) // c4e1f9915c2411 + KMOVD K1, -17(BP)(SI*4) // c4e1f9914cb5ef + KMOVD K3, -17(BP)(SI*4) // c4e1f9915cb5ef + KMOVD K6, R14 // c57b93f6 + KMOVD K7, R14 // c57b93f7 + KMOVD K6, AX // c5fb93c6 + KMOVD K7, AX // c5fb93c7 + KMOVD K4, K6 // c4e1f990f4 + KMOVD K6, K6 // c4e1f990f6 + KMOVD 7(AX), K6 // c4e1f9907007 + KMOVD (DI), K6 // c4e1f99037 + KMOVD K4, K4 // c4e1f990e4 + KMOVD K6, K4 // c4e1f990e6 + KMOVD 7(AX), K4 // c4e1f9906007 + KMOVD (DI), K4 // c4e1f99027 + KMOVD R9, K4 // c4c17b92e1 + KMOVD CX, K4 // c5fb92e1 + KMOVD R9, K5 // c4c17b92e9 + KMOVD CX, K5 // c5fb92e9 + KMOVQ K2, 17(SP) // c4e1f891542411 + KMOVQ K7, 17(SP) // c4e1f8917c2411 + KMOVQ K2, -17(BP)(SI*4) // c4e1f89154b5ef + KMOVQ K7, -17(BP)(SI*4) // c4e1f8917cb5ef + KMOVQ K0, DX // c4e1fb93d0 + KMOVQ K5, DX // c4e1fb93d5 + KMOVQ K0, BP // c4e1fb93e8 + KMOVQ K5, BP // c4e1fb93ed + KMOVQ K1, K6 // c4e1f890f1 + KMOVQ K5, K6 // c4e1f890f5 + KMOVQ 7(AX), K6 // c4e1f8907007 + KMOVQ (DI), K6 // c4e1f89037 + KMOVQ K1, K5 // c4e1f890e9 + KMOVQ K5, K5 // c4e1f890ed + KMOVQ 7(AX), K5 // c4e1f8906807 + KMOVQ (DI), K5 // c4e1f8902f + KMOVQ R10, K3 // c4c1fb92da + KMOVQ CX, K3 // c4e1fb92d9 + KMOVQ R10, K1 // c4c1fb92ca + KMOVQ CX, K1 // c4e1fb92c9 + KNOTD K6, K6 // c4e1f944f6 + KNOTD K4, K6 // c4e1f944f4 + KNOTD K6, K7 // c4e1f944fe + KNOTD K4, K7 // c4e1f944fc + KNOTQ K4, K4 // c4e1f844e4 + KNOTQ K5, K4 // c4e1f844e5 + KNOTQ K4, K6 // c4e1f844f4 + KNOTQ K5, K6 // c4e1f844f5 + KORD K4, K7, K5 // c4e1c545ec + KORD K6, K7, K5 // c4e1c545ee + KORD K4, K6, K5 // c4e1cd45ec + KORD K6, K6, K5 // c4e1cd45ee + KORD K4, K7, K4 // c4e1c545e4 + KORD K6, K7, K4 // c4e1c545e6 + KORD K4, K6, K4 // c4e1cd45e4 + KORD K6, K6, K4 // c4e1cd45e6 + KORQ K4, K5, K0 // c4e1d445c4 + KORQ K6, K5, K0 // c4e1d445c6 + KORQ K4, K4, K0 // c4e1dc45c4 + KORQ K6, K4, K0 // c4e1dc45c6 + KORQ K4, K5, K7 // c4e1d445fc + KORQ K6, K5, K7 // c4e1d445fe + KORQ K4, K4, K7 // c4e1dc45fc + KORQ K6, K4, K7 // c4e1dc45fe + KORTESTD K4, K6 // c4e1f998f4 + KORTESTD K6, K6 // c4e1f998f6 + KORTESTD K4, K4 // c4e1f998e4 + KORTESTD K6, K4 // c4e1f998e6 + KORTESTQ K2, K4 // c4e1f898e2 + KORTESTQ K7, K4 // c4e1f898e7 + KORTESTQ K2, K5 // c4e1f898ea + KORTESTQ K7, K5 // c4e1f898ef + KSHIFTLD $0, K5, K0 // c4e37933c500 + KSHIFTLD $0, K4, K0 // c4e37933c400 + KSHIFTLD $0, K5, K7 // c4e37933fd00 + KSHIFTLD $0, K4, K7 // c4e37933fc00 + KSHIFTLQ $97, K1, K4 // c4e3f933e161 + KSHIFTLQ $97, K3, K4 // c4e3f933e361 + KSHIFTLQ $97, K1, K6 // c4e3f933f161 + KSHIFTLQ $97, K3, K6 // c4e3f933f361 + KSHIFTRD $79, K0, K2 // c4e37931d04f + KSHIFTRD $79, K5, K2 // c4e37931d54f + KSHIFTRD $79, K0, K7 // c4e37931f84f + KSHIFTRD $79, K5, K7 // c4e37931fd4f + KSHIFTRQ $64, K1, K6 // c4e3f931f140 + KSHIFTRQ $64, K5, K6 // c4e3f931f540 + KSHIFTRQ $64, K1, K5 // c4e3f931e940 + KSHIFTRQ $64, K5, K5 // c4e3f931ed40 + KTESTD K5, K0 // c4e1f999c5 + KTESTD K4, K0 // c4e1f999c4 + KTESTD K5, K7 // c4e1f999fd + KTESTD K4, K7 // c4e1f999fc + KTESTQ K1, K4 // c4e1f899e1 + KTESTQ K3, K4 // c4e1f899e3 + KTESTQ K1, K6 // c4e1f899f1 + KTESTQ K3, K6 // c4e1f899f3 + KUNPCKDQ K1, K6, K0 // c4e1cc4bc1 + KUNPCKDQ K5, K6, K0 // c4e1cc4bc5 + KUNPCKDQ K1, K5, K0 // c4e1d44bc1 + KUNPCKDQ K5, K5, K0 // c4e1d44bc5 + KUNPCKDQ K1, K6, K5 // c4e1cc4be9 + KUNPCKDQ K5, K6, K5 // c4e1cc4bed + KUNPCKDQ K1, K5, K5 // c4e1d44be9 + KUNPCKDQ K5, K5, K5 // c4e1d44bed + KUNPCKWD K7, K5, K3 // c5d44bdf + KUNPCKWD K6, K5, K3 // c5d44bde + KUNPCKWD K7, K4, K3 // c5dc4bdf + KUNPCKWD K6, K4, K3 // c5dc4bde + KUNPCKWD K7, K5, K1 // c5d44bcf + KUNPCKWD K6, K5, K1 // c5d44bce + KUNPCKWD K7, K4, K1 // c5dc4bcf + KUNPCKWD K6, K4, K1 // c5dc4bce + KXNORD K6, K1, K4 // c4e1f546e6 + KXNORD K7, K1, K4 // c4e1f546e7 + KXNORD K6, K3, K4 // c4e1e546e6 + KXNORD K7, K3, K4 // c4e1e546e7 + KXNORD K6, K1, K6 // c4e1f546f6 + KXNORD K7, K1, K6 // c4e1f546f7 + KXNORD K6, K3, K6 // c4e1e546f6 + KXNORD K7, K3, K6 // c4e1e546f7 + KXNORQ K4, K4, K6 // c4e1dc46f4 + KXNORQ K5, K4, K6 // c4e1dc46f5 + KXNORQ K4, K6, K6 // c4e1cc46f4 + KXNORQ K5, K6, K6 // c4e1cc46f5 + KXNORQ K4, K4, K4 // c4e1dc46e4 + KXNORQ K5, K4, K4 // c4e1dc46e5 + KXNORQ K4, K6, K4 // c4e1cc46e4 + KXNORQ K5, K6, K4 // c4e1cc46e5 + KXORD K0, K4, K7 // c4e1dd47f8 + KXORD K7, K4, K7 // c4e1dd47ff + KXORD K0, K6, K7 // c4e1cd47f8 + KXORD K7, K6, K7 // c4e1cd47ff + KXORD K0, K4, K6 // c4e1dd47f0 + KXORD K7, K4, K6 // c4e1dd47f7 + KXORD K0, K6, K6 // c4e1cd47f0 + KXORD K7, K6, K6 // c4e1cd47f7 + KXORQ K1, K4, K5 // c4e1dc47e9 + KXORQ K3, K4, K5 // c4e1dc47eb + KXORQ K1, K6, K5 // c4e1cc47e9 + KXORQ K3, K6, K5 // c4e1cc47eb + KXORQ K1, K4, K4 // c4e1dc47e1 + KXORQ K3, K4, K4 // c4e1dc47e3 + KXORQ K1, K6, K4 // c4e1cc47e1 + KXORQ K3, K6, K4 // c4e1cc47e3 + VDBPSADBW $65, X15, X17, K3, X5 // 62d3750342ef41 + VDBPSADBW $65, 7(AX)(CX*4), X17, K3, X5 // 62f3750342ac880700000041 + VDBPSADBW $65, 7(AX)(CX*1), X17, K3, X5 // 62f3750342ac080700000041 + VDBPSADBW $67, Y17, Y5, K4, Y19 // 62a3552c42d943 + VDBPSADBW $67, 99(R15)(R15*2), Y5, K4, Y19 // 6283552c429c7f6300000043 + VDBPSADBW $67, -7(DI), Y5, K4, Y19 // 62e3552c429ff9ffffff43 + VDBPSADBW $127, Z3, Z5, K2, Z19 // 62e3554a42db7f + VDBPSADBW $127, Z5, Z5, K2, Z19 // 62e3554a42dd7f + VDBPSADBW $127, 17(SP)(BP*1), Z5, K2, Z19 // 62e3554a429c2c110000007f + VDBPSADBW $127, -7(CX)(DX*8), Z5, K2, Z19 // 62e3554a429cd1f9ffffff7f + VDBPSADBW $127, Z3, Z1, K2, Z19 // 62e3754a42db7f + VDBPSADBW $127, Z5, Z1, K2, Z19 // 62e3754a42dd7f + VDBPSADBW $127, 17(SP)(BP*1), Z1, K2, Z19 // 62e3754a429c2c110000007f + VDBPSADBW $127, -7(CX)(DX*8), Z1, K2, Z19 // 62e3754a429cd1f9ffffff7f + VDBPSADBW $127, Z3, Z5, K2, Z15 // 6273554a42fb7f + VDBPSADBW $127, Z5, Z5, K2, Z15 // 6273554a42fd7f + VDBPSADBW $127, 17(SP)(BP*1), Z5, K2, Z15 // 6273554a42bc2c110000007f + VDBPSADBW $127, -7(CX)(DX*8), Z5, K2, Z15 // 6273554a42bcd1f9ffffff7f + VDBPSADBW $127, Z3, Z1, K2, Z15 // 6273754a42fb7f + VDBPSADBW $127, Z5, Z1, K2, Z15 // 6273754a42fd7f + VDBPSADBW $127, 17(SP)(BP*1), Z1, K2, Z15 // 6273754a42bc2c110000007f + VDBPSADBW $127, -7(CX)(DX*8), Z1, K2, Z15 // 6273754a42bcd1f9ffffff7f + VMOVDQU16 X14, K1, X16 // 6231ff097ff0 + VMOVDQU16 X14, K1, -17(BP)(SI*2) // 6271ff097fb475efffffff + VMOVDQU16 X14, K1, 7(AX)(CX*2) // 6271ff097fb44807000000 + VMOVDQU16 X14, K1, X11 // 6251ff097ff3 + VMOVDQU16 15(R8)(R14*1), K1, X11 // 6211ff096f9c300f000000 + VMOVDQU16 15(R8)(R14*2), K1, X11 // 6211ff096f9c700f000000 + VMOVDQU16 Y24, K7, Y18 // 6221ff2f7fc2 + VMOVDQU16 Y24, K7, 7(SI)(DI*4) // 6261ff2f7f84be07000000 + VMOVDQU16 Y24, K7, -7(DI)(R8*2) // 6221ff2f7f8447f9ffffff + VMOVDQU16 Y11, K2, Y8 // 6251ff2a7fd8 + VMOVDQU16 17(SP), K2, Y8 // 6271ff2a6f842411000000 + VMOVDQU16 -17(BP)(SI*4), K2, Y8 // 6271ff2a6f84b5efffffff + VMOVDQU16 Z6, K4, Z22 // 62b1ff4c7ff6 + VMOVDQU16 Z8, K4, Z22 // 6231ff4c7fc6 + VMOVDQU16 Z6, K4, Z11 // 62d1ff4c7ff3 + VMOVDQU16 Z8, K4, Z11 // 6251ff4c7fc3 + VMOVDQU16 Z6, K4, (CX) // 62f1ff4c7f31 + VMOVDQU16 Z8, K4, (CX) // 6271ff4c7f01 + VMOVDQU16 Z6, K4, 99(R15) // 62d1ff4c7fb763000000 + VMOVDQU16 Z8, K4, 99(R15) // 6251ff4c7f8763000000 + VMOVDQU16 Z12, K1, Z25 // 6211ff497fe1 + VMOVDQU16 Z17, K1, Z25 // 6281ff497fc9 + VMOVDQU16 99(R15)(R15*2), K1, Z25 // 6201ff496f8c7f63000000 + VMOVDQU16 -7(DI), K1, Z25 // 6261ff496f8ff9ffffff + VMOVDQU16 Z12, K1, Z12 // 6251ff497fe4 + VMOVDQU16 Z17, K1, Z12 // 62c1ff497fcc + VMOVDQU16 99(R15)(R15*2), K1, Z12 // 6211ff496fa47f63000000 + VMOVDQU16 -7(DI), K1, Z12 // 6271ff496fa7f9ffffff + VMOVDQU8 X11, K5, X23 // 62317f0d7fdf + VMOVDQU8 X11, K5, -7(CX)(DX*1) // 62717f0d7f9c11f9ffffff + VMOVDQU8 X11, K5, -15(R14)(R15*4) // 62117f0d7f9cbef1ffffff + VMOVDQU8 X24, K3, X31 // 62017f0b7fc7 + VMOVDQU8 15(DX)(BX*1), K3, X31 // 62617f0b6fbc1a0f000000 + VMOVDQU8 -7(CX)(DX*2), K3, X31 // 62617f0b6fbc51f9ffffff + VMOVDQU8 Y3, K4, Y6 // 62f17f2c7fde + VMOVDQU8 Y3, K4, 7(SI)(DI*1) // 62f17f2c7f9c3e07000000 + VMOVDQU8 Y3, K4, 15(DX)(BX*8) // 62f17f2c7f9cda0f000000 + VMOVDQU8 Y6, K2, Y7 // 62f17f2a7ff7 + VMOVDQU8 -7(DI)(R8*1), K2, Y7 // 62b17f2a6fbc07f9ffffff + VMOVDQU8 (SP), K2, Y7 // 62f17f2a6f3c24 + VMOVDQU8 Z9, K2, Z3 // 62717f4a7fcb + VMOVDQU8 Z19, K2, Z3 // 62e17f4a7fdb + VMOVDQU8 Z9, K2, Z30 // 62117f4a7fce + VMOVDQU8 Z19, K2, Z30 // 62817f4a7fde + VMOVDQU8 Z9, K2, 15(R8) // 62517f4a7f880f000000 + VMOVDQU8 Z19, K2, 15(R8) // 62c17f4a7f980f000000 + VMOVDQU8 Z9, K2, (BP) // 62717f4a7f4d00 + VMOVDQU8 Z19, K2, (BP) // 62e17f4a7f5d00 + VMOVDQU8 Z11, K3, Z12 // 62517f4b7fdc + VMOVDQU8 Z5, K3, Z12 // 62d17f4b7fec + VMOVDQU8 15(R8)(R14*8), K3, Z12 // 62117f4b6fa4f00f000000 + VMOVDQU8 -15(R14)(R15*2), K3, Z12 // 62117f4b6fa47ef1ffffff + VMOVDQU8 Z11, K3, Z22 // 62317f4b7fde + VMOVDQU8 Z5, K3, Z22 // 62b17f4b7fee + VMOVDQU8 15(R8)(R14*8), K3, Z22 // 62817f4b6fb4f00f000000 + VMOVDQU8 -15(R14)(R15*2), K3, Z22 // 62817f4b6fb47ef1ffffff + VPABSB X22, K3, X6 // 62b27d0b1cf6 or 62b2fd0b1cf6 + VPABSB -7(CX), K3, X6 // 62f27d0b1cb1f9ffffff or 62f2fd0b1cb1f9ffffff + VPABSB 15(DX)(BX*4), K3, X6 // 62f27d0b1cb49a0f000000 or 62f2fd0b1cb49a0f000000 + VPABSB Y27, K4, Y11 // 62127d2c1cdb or 6212fd2c1cdb + VPABSB 15(DX)(BX*1), K4, Y11 // 62727d2c1c9c1a0f000000 or 6272fd2c1c9c1a0f000000 + VPABSB -7(CX)(DX*2), K4, Y11 // 62727d2c1c9c51f9ffffff or 6272fd2c1c9c51f9ffffff + VPABSB Z6, K5, Z21 // 62e27d4d1cee or 62e2fd4d1cee + VPABSB Z9, K5, Z21 // 62c27d4d1ce9 or 62c2fd4d1ce9 + VPABSB (AX), K5, Z21 // 62e27d4d1c28 or 62e2fd4d1c28 + VPABSB 7(SI), K5, Z21 // 62e27d4d1cae07000000 or 62e2fd4d1cae07000000 + VPABSB Z6, K5, Z9 // 62727d4d1cce or 6272fd4d1cce + VPABSB Z9, K5, Z9 // 62527d4d1cc9 or 6252fd4d1cc9 + VPABSB (AX), K5, Z9 // 62727d4d1c08 or 6272fd4d1c08 + VPABSB 7(SI), K5, Z9 // 62727d4d1c8e07000000 or 6272fd4d1c8e07000000 + VPABSW X11, K4, X15 // 62527d0c1dfb or 6252fd0c1dfb + VPABSW (BX), K4, X15 // 62727d0c1d3b or 6272fd0c1d3b + VPABSW -17(BP)(SI*1), K4, X15 // 62727d0c1dbc35efffffff or 6272fd0c1dbc35efffffff + VPABSW Y3, K7, Y26 // 62627d2f1dd3 or 6262fd2f1dd3 + VPABSW 15(R8), K7, Y26 // 62427d2f1d900f000000 or 6242fd2f1d900f000000 + VPABSW (BP), K7, Y26 // 62627d2f1d5500 or 6262fd2f1d5500 + VPABSW Z16, K2, Z7 // 62b27d4a1df8 or 62b2fd4a1df8 + VPABSW Z25, K2, Z7 // 62927d4a1df9 or 6292fd4a1df9 + VPABSW (R8), K2, Z7 // 62d27d4a1d38 or 62d2fd4a1d38 + VPABSW 15(DX)(BX*2), K2, Z7 // 62f27d4a1dbc5a0f000000 or 62f2fd4a1dbc5a0f000000 + VPABSW Z16, K2, Z21 // 62a27d4a1de8 or 62a2fd4a1de8 + VPABSW Z25, K2, Z21 // 62827d4a1de9 or 6282fd4a1de9 + VPABSW (R8), K2, Z21 // 62c27d4a1d28 or 62c2fd4a1d28 + VPABSW 15(DX)(BX*2), K2, Z21 // 62e27d4a1dac5a0f000000 or 62e2fd4a1dac5a0f000000 + VPACKSSDW X13, X19, K5, X1 // 62d165056bcd + VPACKSSDW 15(R8)(R14*4), X19, K5, X1 // 629165056b8cb00f000000 + VPACKSSDW -7(CX)(DX*4), X19, K5, X1 // 62f165056b8c91f9ffffff + VPACKSSDW Y1, Y28, K3, Y8 // 62711d236bc1 + VPACKSSDW 15(R8)(R14*8), Y28, K3, Y8 // 62111d236b84f00f000000 + VPACKSSDW -15(R14)(R15*2), Y28, K3, Y8 // 62111d236b847ef1ffffff + VPACKSSDW Z21, Z12, K4, Z14 // 62311d4c6bf5 + VPACKSSDW Z9, Z12, K4, Z14 // 62511d4c6bf1 + VPACKSSDW 17(SP)(BP*1), Z12, K4, Z14 // 62711d4c6bb42c11000000 + VPACKSSDW -7(CX)(DX*8), Z12, K4, Z14 // 62711d4c6bb4d1f9ffffff + VPACKSSDW Z21, Z13, K4, Z14 // 6231154c6bf5 + VPACKSSDW Z9, Z13, K4, Z14 // 6251154c6bf1 + VPACKSSDW 17(SP)(BP*1), Z13, K4, Z14 // 6271154c6bb42c11000000 + VPACKSSDW -7(CX)(DX*8), Z13, K4, Z14 // 6271154c6bb4d1f9ffffff + VPACKSSDW Z21, Z12, K4, Z13 // 62311d4c6bed + VPACKSSDW Z9, Z12, K4, Z13 // 62511d4c6be9 + VPACKSSDW 17(SP)(BP*1), Z12, K4, Z13 // 62711d4c6bac2c11000000 + VPACKSSDW -7(CX)(DX*8), Z12, K4, Z13 // 62711d4c6bacd1f9ffffff + VPACKSSDW Z21, Z13, K4, Z13 // 6231154c6bed + VPACKSSDW Z9, Z13, K4, Z13 // 6251154c6be9 + VPACKSSDW 17(SP)(BP*1), Z13, K4, Z13 // 6271154c6bac2c11000000 + VPACKSSDW -7(CX)(DX*8), Z13, K4, Z13 // 6271154c6bacd1f9ffffff + VPACKSSWB X0, X14, K2, X2 // 62f10d0a63d0 or 62f18d0a63d0 + VPACKSSWB (R8), X14, K2, X2 // 62d10d0a6310 or 62d18d0a6310 + VPACKSSWB 15(DX)(BX*2), X14, K2, X2 // 62f10d0a63945a0f000000 or 62f18d0a63945a0f000000 + VPACKSSWB Y31, Y14, K2, Y23 // 62810d2a63ff or 62818d2a63ff + VPACKSSWB -15(R14)(R15*1), Y14, K2, Y23 // 62810d2a63bc3ef1ffffff or 62818d2a63bc3ef1ffffff + VPACKSSWB -15(BX), Y14, K2, Y23 // 62e10d2a63bbf1ffffff or 62e18d2a63bbf1ffffff + VPACKSSWB Z23, Z27, K3, Z2 // 62b1254363d7 or 62b1a54363d7 + VPACKSSWB Z9, Z27, K3, Z2 // 62d1254363d1 or 62d1a54363d1 + VPACKSSWB -17(BP)(SI*2), Z27, K3, Z2 // 62f12543639475efffffff or 62f1a543639475efffffff + VPACKSSWB 7(AX)(CX*2), Z27, K3, Z2 // 62f1254363944807000000 or 62f1a54363944807000000 + VPACKSSWB Z23, Z25, K3, Z2 // 62b1354363d7 or 62b1b54363d7 + VPACKSSWB Z9, Z25, K3, Z2 // 62d1354363d1 or 62d1b54363d1 + VPACKSSWB -17(BP)(SI*2), Z25, K3, Z2 // 62f13543639475efffffff or 62f1b543639475efffffff + VPACKSSWB 7(AX)(CX*2), Z25, K3, Z2 // 62f1354363944807000000 or 62f1b54363944807000000 + VPACKSSWB Z23, Z27, K3, Z7 // 62b1254363ff or 62b1a54363ff + VPACKSSWB Z9, Z27, K3, Z7 // 62d1254363f9 or 62d1a54363f9 + VPACKSSWB -17(BP)(SI*2), Z27, K3, Z7 // 62f1254363bc75efffffff or 62f1a54363bc75efffffff + VPACKSSWB 7(AX)(CX*2), Z27, K3, Z7 // 62f1254363bc4807000000 or 62f1a54363bc4807000000 + VPACKSSWB Z23, Z25, K3, Z7 // 62b1354363ff or 62b1b54363ff + VPACKSSWB Z9, Z25, K3, Z7 // 62d1354363f9 or 62d1b54363f9 + VPACKSSWB -17(BP)(SI*2), Z25, K3, Z7 // 62f1354363bc75efffffff or 62f1b54363bc75efffffff + VPACKSSWB 7(AX)(CX*2), Z25, K3, Z7 // 62f1354363bc4807000000 or 62f1b54363bc4807000000 + VPACKUSDW X11, X25, K3, X0 // 62d235032bc3 + VPACKUSDW 17(SP)(BP*1), X25, K3, X0 // 62f235032b842c11000000 + VPACKUSDW -7(CX)(DX*8), X25, K3, X0 // 62f235032b84d1f9ffffff + VPACKUSDW Y22, Y2, K3, Y25 // 62226d2b2bce + VPACKUSDW 7(AX)(CX*4), Y2, K3, Y25 // 62626d2b2b8c8807000000 + VPACKUSDW 7(AX)(CX*1), Y2, K3, Y25 // 62626d2b2b8c0807000000 + VPACKUSDW Z14, Z3, K2, Z27 // 6242654a2bde + VPACKUSDW Z7, Z3, K2, Z27 // 6262654a2bdf + VPACKUSDW 15(R8)(R14*1), Z3, K2, Z27 // 6202654a2b9c300f000000 + VPACKUSDW 15(R8)(R14*2), Z3, K2, Z27 // 6202654a2b9c700f000000 + VPACKUSDW Z14, Z0, K2, Z27 // 62427d4a2bde + VPACKUSDW Z7, Z0, K2, Z27 // 62627d4a2bdf + VPACKUSDW 15(R8)(R14*1), Z0, K2, Z27 // 62027d4a2b9c300f000000 + VPACKUSDW 15(R8)(R14*2), Z0, K2, Z27 // 62027d4a2b9c700f000000 + VPACKUSDW Z14, Z3, K2, Z14 // 6252654a2bf6 + VPACKUSDW Z7, Z3, K2, Z14 // 6272654a2bf7 + VPACKUSDW 15(R8)(R14*1), Z3, K2, Z14 // 6212654a2bb4300f000000 + VPACKUSDW 15(R8)(R14*2), Z3, K2, Z14 // 6212654a2bb4700f000000 + VPACKUSDW Z14, Z0, K2, Z14 // 62527d4a2bf6 + VPACKUSDW Z7, Z0, K2, Z14 // 62727d4a2bf7 + VPACKUSDW 15(R8)(R14*1), Z0, K2, Z14 // 62127d4a2bb4300f000000 + VPACKUSDW 15(R8)(R14*2), Z0, K2, Z14 // 62127d4a2bb4700f000000 + VPACKUSWB X11, X18, K1, X17 // 62c16d0167cb or 62c1ed0167cb + VPACKUSWB -17(BP)(SI*2), X18, K1, X17 // 62e16d01678c75efffffff or 62e1ed01678c75efffffff + VPACKUSWB 7(AX)(CX*2), X18, K1, X17 // 62e16d01678c4807000000 or 62e1ed01678c4807000000 + VPACKUSWB Y9, Y8, K2, Y27 // 62413d2a67d9 or 6241bd2a67d9 + VPACKUSWB (SI), Y8, K2, Y27 // 62613d2a671e or 6261bd2a671e + VPACKUSWB 7(SI)(DI*2), Y8, K2, Y27 // 62613d2a679c7e07000000 or 6261bd2a679c7e07000000 + VPACKUSWB Z1, Z22, K1, Z8 // 62714d4167c1 or 6271cd4167c1 + VPACKUSWB Z16, Z22, K1, Z8 // 62314d4167c0 or 6231cd4167c0 + VPACKUSWB (R14), Z22, K1, Z8 // 62514d416706 or 6251cd416706 + VPACKUSWB -7(DI)(R8*8), Z22, K1, Z8 // 62314d416784c7f9ffffff or 6231cd416784c7f9ffffff + VPACKUSWB Z1, Z25, K1, Z8 // 6271354167c1 or 6271b54167c1 + VPACKUSWB Z16, Z25, K1, Z8 // 6231354167c0 or 6231b54167c0 + VPACKUSWB (R14), Z25, K1, Z8 // 625135416706 or 6251b5416706 + VPACKUSWB -7(DI)(R8*8), Z25, K1, Z8 // 623135416784c7f9ffffff or 6231b5416784c7f9ffffff + VPACKUSWB Z1, Z22, K1, Z24 // 62614d4167c1 or 6261cd4167c1 + VPACKUSWB Z16, Z22, K1, Z24 // 62214d4167c0 or 6221cd4167c0 + VPACKUSWB (R14), Z22, K1, Z24 // 62414d416706 or 6241cd416706 + VPACKUSWB -7(DI)(R8*8), Z22, K1, Z24 // 62214d416784c7f9ffffff or 6221cd416784c7f9ffffff + VPACKUSWB Z1, Z25, K1, Z24 // 6261354167c1 or 6261b54167c1 + VPACKUSWB Z16, Z25, K1, Z24 // 6221354167c0 or 6221b54167c0 + VPACKUSWB (R14), Z25, K1, Z24 // 624135416706 or 6241b5416706 + VPACKUSWB -7(DI)(R8*8), Z25, K1, Z24 // 622135416784c7f9ffffff or 6221b5416784c7f9ffffff + VPADDB X24, X2, K7, X9 // 62116d0ffcc8 or 6211ed0ffcc8 + VPADDB 15(R8)(R14*1), X2, K7, X9 // 62116d0ffc8c300f000000 or 6211ed0ffc8c300f000000 + VPADDB 15(R8)(R14*2), X2, K7, X9 // 62116d0ffc8c700f000000 or 6211ed0ffc8c700f000000 + VPADDB Y14, Y9, K1, Y22 // 62c13529fcf6 or 62c1b529fcf6 + VPADDB 17(SP)(BP*8), Y9, K1, Y22 // 62e13529fcb4ec11000000 or 62e1b529fcb4ec11000000 + VPADDB 17(SP)(BP*4), Y9, K1, Y22 // 62e13529fcb4ac11000000 or 62e1b529fcb4ac11000000 + VPADDB Z15, Z0, K1, Z6 // 62d17d49fcf7 or 62d1fd49fcf7 + VPADDB Z12, Z0, K1, Z6 // 62d17d49fcf4 or 62d1fd49fcf4 + VPADDB 99(R15)(R15*4), Z0, K1, Z6 // 62917d49fcb4bf63000000 or 6291fd49fcb4bf63000000 + VPADDB 15(DX), Z0, K1, Z6 // 62f17d49fcb20f000000 or 62f1fd49fcb20f000000 + VPADDB Z15, Z8, K1, Z6 // 62d13d49fcf7 or 62d1bd49fcf7 + VPADDB Z12, Z8, K1, Z6 // 62d13d49fcf4 or 62d1bd49fcf4 + VPADDB 99(R15)(R15*4), Z8, K1, Z6 // 62913d49fcb4bf63000000 or 6291bd49fcb4bf63000000 + VPADDB 15(DX), Z8, K1, Z6 // 62f13d49fcb20f000000 or 62f1bd49fcb20f000000 + VPADDB Z15, Z0, K1, Z2 // 62d17d49fcd7 or 62d1fd49fcd7 + VPADDB Z12, Z0, K1, Z2 // 62d17d49fcd4 or 62d1fd49fcd4 + VPADDB 99(R15)(R15*4), Z0, K1, Z2 // 62917d49fc94bf63000000 or 6291fd49fc94bf63000000 + VPADDB 15(DX), Z0, K1, Z2 // 62f17d49fc920f000000 or 62f1fd49fc920f000000 + VPADDB Z15, Z8, K1, Z2 // 62d13d49fcd7 or 62d1bd49fcd7 + VPADDB Z12, Z8, K1, Z2 // 62d13d49fcd4 or 62d1bd49fcd4 + VPADDB 99(R15)(R15*4), Z8, K1, Z2 // 62913d49fc94bf63000000 or 6291bd49fc94bf63000000 + VPADDB 15(DX), Z8, K1, Z2 // 62f13d49fc920f000000 or 62f1bd49fc920f000000 + VPADDSB X15, X11, K4, X3 // 62d1250cecdf or 62d1a50cecdf + VPADDSB (CX), X11, K4, X3 // 62f1250cec19 or 62f1a50cec19 + VPADDSB 99(R15), X11, K4, X3 // 62d1250cec9f63000000 or 62d1a50cec9f63000000 + VPADDSB Y9, Y22, K5, Y31 // 62414d25ecf9 or 6241cd25ecf9 + VPADDSB 7(AX), Y22, K5, Y31 // 62614d25ecb807000000 or 6261cd25ecb807000000 + VPADDSB (DI), Y22, K5, Y31 // 62614d25ec3f or 6261cd25ec3f + VPADDSB Z13, Z28, K7, Z26 // 62411d47ecd5 or 62419d47ecd5 + VPADDSB Z21, Z28, K7, Z26 // 62211d47ecd5 or 62219d47ecd5 + VPADDSB -7(CX)(DX*1), Z28, K7, Z26 // 62611d47ec9411f9ffffff or 62619d47ec9411f9ffffff + VPADDSB -15(R14)(R15*4), Z28, K7, Z26 // 62011d47ec94bef1ffffff or 62019d47ec94bef1ffffff + VPADDSB Z13, Z6, K7, Z26 // 62414d4fecd5 or 6241cd4fecd5 + VPADDSB Z21, Z6, K7, Z26 // 62214d4fecd5 or 6221cd4fecd5 + VPADDSB -7(CX)(DX*1), Z6, K7, Z26 // 62614d4fec9411f9ffffff or 6261cd4fec9411f9ffffff + VPADDSB -15(R14)(R15*4), Z6, K7, Z26 // 62014d4fec94bef1ffffff or 6201cd4fec94bef1ffffff + VPADDSB Z13, Z28, K7, Z14 // 62511d47ecf5 or 62519d47ecf5 + VPADDSB Z21, Z28, K7, Z14 // 62311d47ecf5 or 62319d47ecf5 + VPADDSB -7(CX)(DX*1), Z28, K7, Z14 // 62711d47ecb411f9ffffff or 62719d47ecb411f9ffffff + VPADDSB -15(R14)(R15*4), Z28, K7, Z14 // 62111d47ecb4bef1ffffff or 62119d47ecb4bef1ffffff + VPADDSB Z13, Z6, K7, Z14 // 62514d4fecf5 or 6251cd4fecf5 + VPADDSB Z21, Z6, K7, Z14 // 62314d4fecf5 or 6231cd4fecf5 + VPADDSB -7(CX)(DX*1), Z6, K7, Z14 // 62714d4fecb411f9ffffff or 6271cd4fecb411f9ffffff + VPADDSB -15(R14)(R15*4), Z6, K7, Z14 // 62114d4fecb4bef1ffffff or 6211cd4fecb4bef1ffffff + VPADDSW X6, X13, K7, X30 // 6261150fedf6 or 6261950fedf6 + VPADDSW 99(R15)(R15*2), X13, K7, X30 // 6201150fedb47f63000000 or 6201950fedb47f63000000 + VPADDSW -7(DI), X13, K7, X30 // 6261150fedb7f9ffffff or 6261950fedb7f9ffffff + VPADDSW Y5, Y31, K6, Y23 // 62e10526edfd or 62e18526edfd + VPADDSW 99(R15)(R15*1), Y31, K6, Y23 // 62810526edbc3f63000000 or 62818526edbc3f63000000 + VPADDSW (DX), Y31, K6, Y23 // 62e10526ed3a or 62e18526ed3a + VPADDSW Z21, Z3, K3, Z26 // 6221654bedd5 or 6221e54bedd5 + VPADDSW Z13, Z3, K3, Z26 // 6241654bedd5 or 6241e54bedd5 + VPADDSW 15(DX)(BX*1), Z3, K3, Z26 // 6261654bed941a0f000000 or 6261e54bed941a0f000000 + VPADDSW -7(CX)(DX*2), Z3, K3, Z26 // 6261654bed9451f9ffffff or 6261e54bed9451f9ffffff + VPADDSW Z21, Z0, K3, Z26 // 62217d4bedd5 or 6221fd4bedd5 + VPADDSW Z13, Z0, K3, Z26 // 62417d4bedd5 or 6241fd4bedd5 + VPADDSW 15(DX)(BX*1), Z0, K3, Z26 // 62617d4bed941a0f000000 or 6261fd4bed941a0f000000 + VPADDSW -7(CX)(DX*2), Z0, K3, Z26 // 62617d4bed9451f9ffffff or 6261fd4bed9451f9ffffff + VPADDSW Z21, Z3, K3, Z3 // 62b1654beddd or 62b1e54beddd + VPADDSW Z13, Z3, K3, Z3 // 62d1654beddd or 62d1e54beddd + VPADDSW 15(DX)(BX*1), Z3, K3, Z3 // 62f1654bed9c1a0f000000 or 62f1e54bed9c1a0f000000 + VPADDSW -7(CX)(DX*2), Z3, K3, Z3 // 62f1654bed9c51f9ffffff or 62f1e54bed9c51f9ffffff + VPADDSW Z21, Z0, K3, Z3 // 62b17d4beddd or 62b1fd4beddd + VPADDSW Z13, Z0, K3, Z3 // 62d17d4beddd or 62d1fd4beddd + VPADDSW 15(DX)(BX*1), Z0, K3, Z3 // 62f17d4bed9c1a0f000000 or 62f1fd4bed9c1a0f000000 + VPADDSW -7(CX)(DX*2), Z0, K3, Z3 // 62f17d4bed9c51f9ffffff or 62f1fd4bed9c51f9ffffff + VPADDUSB X30, X23, K7, X12 // 62114507dce6 or 6211c507dce6 + VPADDUSB -7(CX)(DX*1), X23, K7, X12 // 62714507dca411f9ffffff or 6271c507dca411f9ffffff + VPADDUSB -15(R14)(R15*4), X23, K7, X12 // 62114507dca4bef1ffffff or 6211c507dca4bef1ffffff + VPADDUSB Y19, Y5, K4, Y0 // 62b1552cdcc3 or 62b1d52cdcc3 + VPADDUSB -17(BP)(SI*8), Y5, K4, Y0 // 62f1552cdc84f5efffffff or 62f1d52cdc84f5efffffff + VPADDUSB (R15), Y5, K4, Y0 // 62d1552cdc07 or 62d1d52cdc07 + VPADDUSB Z27, Z3, K4, Z11 // 6211654cdcdb or 6211e54cdcdb + VPADDUSB Z15, Z3, K4, Z11 // 6251654cdcdf or 6251e54cdcdf + VPADDUSB -17(BP), Z3, K4, Z11 // 6271654cdc9defffffff or 6271e54cdc9defffffff + VPADDUSB -15(R14)(R15*8), Z3, K4, Z11 // 6211654cdc9cfef1ffffff or 6211e54cdc9cfef1ffffff + VPADDUSB Z27, Z12, K4, Z11 // 62111d4cdcdb or 62119d4cdcdb + VPADDUSB Z15, Z12, K4, Z11 // 62511d4cdcdf or 62519d4cdcdf + VPADDUSB -17(BP), Z12, K4, Z11 // 62711d4cdc9defffffff or 62719d4cdc9defffffff + VPADDUSB -15(R14)(R15*8), Z12, K4, Z11 // 62111d4cdc9cfef1ffffff or 62119d4cdc9cfef1ffffff + VPADDUSB Z27, Z3, K4, Z25 // 6201654cdccb or 6201e54cdccb + VPADDUSB Z15, Z3, K4, Z25 // 6241654cdccf or 6241e54cdccf + VPADDUSB -17(BP), Z3, K4, Z25 // 6261654cdc8defffffff or 6261e54cdc8defffffff + VPADDUSB -15(R14)(R15*8), Z3, K4, Z25 // 6201654cdc8cfef1ffffff or 6201e54cdc8cfef1ffffff + VPADDUSB Z27, Z12, K4, Z25 // 62011d4cdccb or 62019d4cdccb + VPADDUSB Z15, Z12, K4, Z25 // 62411d4cdccf or 62419d4cdccf + VPADDUSB -17(BP), Z12, K4, Z25 // 62611d4cdc8defffffff or 62619d4cdc8defffffff + VPADDUSB -15(R14)(R15*8), Z12, K4, Z25 // 62011d4cdc8cfef1ffffff or 62019d4cdc8cfef1ffffff + VPADDUSW X2, X20, K7, X8 // 62715d07ddc2 or 6271dd07ddc2 + VPADDUSW 15(DX)(BX*1), X20, K7, X8 // 62715d07dd841a0f000000 or 6271dd07dd841a0f000000 + VPADDUSW -7(CX)(DX*2), X20, K7, X8 // 62715d07dd8451f9ffffff or 6271dd07dd8451f9ffffff + VPADDUSW Y2, Y28, K2, Y31 // 62611d22ddfa or 62619d22ddfa + VPADDUSW 7(SI)(DI*8), Y28, K2, Y31 // 62611d22ddbcfe07000000 or 62619d22ddbcfe07000000 + VPADDUSW -15(R14), Y28, K2, Y31 // 62411d22ddbef1ffffff or 62419d22ddbef1ffffff + VPADDUSW Z8, Z23, K5, Z23 // 62c14545ddf8 or 62c1c545ddf8 + VPADDUSW Z28, Z23, K5, Z23 // 62814545ddfc or 6281c545ddfc + VPADDUSW 17(SP)(BP*2), Z23, K5, Z23 // 62e14545ddbc6c11000000 or 62e1c545ddbc6c11000000 + VPADDUSW -7(DI)(R8*4), Z23, K5, Z23 // 62a14545ddbc87f9ffffff or 62a1c545ddbc87f9ffffff + VPADDUSW Z8, Z6, K5, Z23 // 62c14d4dddf8 or 62c1cd4dddf8 + VPADDUSW Z28, Z6, K5, Z23 // 62814d4dddfc or 6281cd4dddfc + VPADDUSW 17(SP)(BP*2), Z6, K5, Z23 // 62e14d4dddbc6c11000000 or 62e1cd4dddbc6c11000000 + VPADDUSW -7(DI)(R8*4), Z6, K5, Z23 // 62a14d4dddbc87f9ffffff or 62a1cd4dddbc87f9ffffff + VPADDUSW Z8, Z23, K5, Z5 // 62d14545dde8 or 62d1c545dde8 + VPADDUSW Z28, Z23, K5, Z5 // 62914545ddec or 6291c545ddec + VPADDUSW 17(SP)(BP*2), Z23, K5, Z5 // 62f14545ddac6c11000000 or 62f1c545ddac6c11000000 + VPADDUSW -7(DI)(R8*4), Z23, K5, Z5 // 62b14545ddac87f9ffffff or 62b1c545ddac87f9ffffff + VPADDUSW Z8, Z6, K5, Z5 // 62d14d4ddde8 or 62d1cd4ddde8 + VPADDUSW Z28, Z6, K5, Z5 // 62914d4dddec or 6291cd4dddec + VPADDUSW 17(SP)(BP*2), Z6, K5, Z5 // 62f14d4dddac6c11000000 or 62f1cd4dddac6c11000000 + VPADDUSW -7(DI)(R8*4), Z6, K5, Z5 // 62b14d4dddac87f9ffffff or 62b1cd4dddac87f9ffffff + VPADDW X19, X26, K3, X9 // 62312d03fdcb or 6231ad03fdcb + VPADDW -17(BP), X26, K3, X9 // 62712d03fd8defffffff or 6271ad03fd8defffffff + VPADDW -15(R14)(R15*8), X26, K3, X9 // 62112d03fd8cfef1ffffff or 6211ad03fd8cfef1ffffff + VPADDW Y0, Y27, K4, Y24 // 62612524fdc0 or 6261a524fdc0 + VPADDW 7(SI)(DI*1), Y27, K4, Y24 // 62612524fd843e07000000 or 6261a524fd843e07000000 + VPADDW 15(DX)(BX*8), Y27, K4, Y24 // 62612524fd84da0f000000 or 6261a524fd84da0f000000 + VPADDW Z12, Z16, K2, Z21 // 62c17d42fdec or 62c1fd42fdec + VPADDW Z27, Z16, K2, Z21 // 62817d42fdeb or 6281fd42fdeb + VPADDW 15(R8), Z16, K2, Z21 // 62c17d42fda80f000000 or 62c1fd42fda80f000000 + VPADDW (BP), Z16, K2, Z21 // 62e17d42fd6d00 or 62e1fd42fd6d00 + VPADDW Z12, Z13, K2, Z21 // 62c1154afdec or 62c1954afdec + VPADDW Z27, Z13, K2, Z21 // 6281154afdeb or 6281954afdeb + VPADDW 15(R8), Z13, K2, Z21 // 62c1154afda80f000000 or 62c1954afda80f000000 + VPADDW (BP), Z13, K2, Z21 // 62e1154afd6d00 or 62e1954afd6d00 + VPADDW Z12, Z16, K2, Z5 // 62d17d42fdec or 62d1fd42fdec + VPADDW Z27, Z16, K2, Z5 // 62917d42fdeb or 6291fd42fdeb + VPADDW 15(R8), Z16, K2, Z5 // 62d17d42fda80f000000 or 62d1fd42fda80f000000 + VPADDW (BP), Z16, K2, Z5 // 62f17d42fd6d00 or 62f1fd42fd6d00 + VPADDW Z12, Z13, K2, Z5 // 62d1154afdec or 62d1954afdec + VPADDW Z27, Z13, K2, Z5 // 6291154afdeb or 6291954afdeb + VPADDW 15(R8), Z13, K2, Z5 // 62d1154afda80f000000 or 62d1954afda80f000000 + VPADDW (BP), Z13, K2, Z5 // 62f1154afd6d00 or 62f1954afd6d00 + VPALIGNR $13, X16, X31, K2, X0 // 62b305020fc00d or 62b385020fc00d + VPALIGNR $13, 17(SP)(BP*2), X31, K2, X0 // 62f305020f846c110000000d or 62f385020f846c110000000d + VPALIGNR $13, -7(DI)(R8*4), X31, K2, X0 // 62b305020f8487f9ffffff0d or 62b385020f8487f9ffffff0d + VPALIGNR $65, Y3, Y31, K3, Y11 // 627305230fdb41 or 627385230fdb41 + VPALIGNR $65, -7(DI)(R8*1), Y31, K3, Y11 // 623305230f9c07f9ffffff41 or 623385230f9c07f9ffffff41 + VPALIGNR $65, (SP), Y31, K3, Y11 // 627305230f1c2441 or 627385230f1c2441 + VPALIGNR $67, Z25, Z6, K3, Z22 // 62834d4b0ff143 or 6283cd4b0ff143 + VPALIGNR $67, Z12, Z6, K3, Z22 // 62c34d4b0ff443 or 62c3cd4b0ff443 + VPALIGNR $67, 15(R8)(R14*8), Z6, K3, Z22 // 62834d4b0fb4f00f00000043 or 6283cd4b0fb4f00f00000043 + VPALIGNR $67, -15(R14)(R15*2), Z6, K3, Z22 // 62834d4b0fb47ef1ffffff43 or 6283cd4b0fb47ef1ffffff43 + VPALIGNR $67, Z25, Z8, K3, Z22 // 62833d4b0ff143 or 6283bd4b0ff143 + VPALIGNR $67, Z12, Z8, K3, Z22 // 62c33d4b0ff443 or 62c3bd4b0ff443 + VPALIGNR $67, 15(R8)(R14*8), Z8, K3, Z22 // 62833d4b0fb4f00f00000043 or 6283bd4b0fb4f00f00000043 + VPALIGNR $67, -15(R14)(R15*2), Z8, K3, Z22 // 62833d4b0fb47ef1ffffff43 or 6283bd4b0fb47ef1ffffff43 + VPALIGNR $67, Z25, Z6, K3, Z11 // 62134d4b0fd943 or 6213cd4b0fd943 + VPALIGNR $67, Z12, Z6, K3, Z11 // 62534d4b0fdc43 or 6253cd4b0fdc43 + VPALIGNR $67, 15(R8)(R14*8), Z6, K3, Z11 // 62134d4b0f9cf00f00000043 or 6213cd4b0f9cf00f00000043 + VPALIGNR $67, -15(R14)(R15*2), Z6, K3, Z11 // 62134d4b0f9c7ef1ffffff43 or 6213cd4b0f9c7ef1ffffff43 + VPALIGNR $67, Z25, Z8, K3, Z11 // 62133d4b0fd943 or 6213bd4b0fd943 + VPALIGNR $67, Z12, Z8, K3, Z11 // 62533d4b0fdc43 or 6253bd4b0fdc43 + VPALIGNR $67, 15(R8)(R14*8), Z8, K3, Z11 // 62133d4b0f9cf00f00000043 or 6213bd4b0f9cf00f00000043 + VPALIGNR $67, -15(R14)(R15*2), Z8, K3, Z11 // 62133d4b0f9c7ef1ffffff43 or 6213bd4b0f9c7ef1ffffff43 + VPAVGB X16, X7, K1, X19 // 62a14509e0d8 or 62a1c509e0d8 + VPAVGB (SI), X7, K1, X19 // 62e14509e01e or 62e1c509e01e + VPAVGB 7(SI)(DI*2), X7, K1, X19 // 62e14509e09c7e07000000 or 62e1c509e09c7e07000000 + VPAVGB Y14, Y19, K3, Y23 // 62c16523e0fe or 62c1e523e0fe + VPAVGB 15(R8)(R14*4), Y19, K3, Y23 // 62816523e0bcb00f000000 or 6281e523e0bcb00f000000 + VPAVGB -7(CX)(DX*4), Y19, K3, Y23 // 62e16523e0bc91f9ffffff or 62e1e523e0bc91f9ffffff + VPAVGB Z2, Z18, K4, Z11 // 62716d44e0da or 6271ed44e0da + VPAVGB Z21, Z18, K4, Z11 // 62316d44e0dd or 6231ed44e0dd + VPAVGB 7(SI)(DI*4), Z18, K4, Z11 // 62716d44e09cbe07000000 or 6271ed44e09cbe07000000 + VPAVGB -7(DI)(R8*2), Z18, K4, Z11 // 62316d44e09c47f9ffffff or 6231ed44e09c47f9ffffff + VPAVGB Z2, Z24, K4, Z11 // 62713d44e0da or 6271bd44e0da + VPAVGB Z21, Z24, K4, Z11 // 62313d44e0dd or 6231bd44e0dd + VPAVGB 7(SI)(DI*4), Z24, K4, Z11 // 62713d44e09cbe07000000 or 6271bd44e09cbe07000000 + VPAVGB -7(DI)(R8*2), Z24, K4, Z11 // 62313d44e09c47f9ffffff or 6231bd44e09c47f9ffffff + VPAVGB Z2, Z18, K4, Z5 // 62f16d44e0ea or 62f1ed44e0ea + VPAVGB Z21, Z18, K4, Z5 // 62b16d44e0ed or 62b1ed44e0ed + VPAVGB 7(SI)(DI*4), Z18, K4, Z5 // 62f16d44e0acbe07000000 or 62f1ed44e0acbe07000000 + VPAVGB -7(DI)(R8*2), Z18, K4, Z5 // 62b16d44e0ac47f9ffffff or 62b1ed44e0ac47f9ffffff + VPAVGB Z2, Z24, K4, Z5 // 62f13d44e0ea or 62f1bd44e0ea + VPAVGB Z21, Z24, K4, Z5 // 62b13d44e0ed or 62b1bd44e0ed + VPAVGB 7(SI)(DI*4), Z24, K4, Z5 // 62f13d44e0acbe07000000 or 62f1bd44e0acbe07000000 + VPAVGB -7(DI)(R8*2), Z24, K4, Z5 // 62b13d44e0ac47f9ffffff or 62b1bd44e0ac47f9ffffff + VPAVGW X7, X1, K5, X31 // 6261750de3ff or 6261f50de3ff + VPAVGW 17(SP)(BP*8), X1, K5, X31 // 6261750de3bcec11000000 or 6261f50de3bcec11000000 + VPAVGW 17(SP)(BP*4), X1, K5, X31 // 6261750de3bcac11000000 or 6261f50de3bcac11000000 + VPAVGW Y16, Y5, K7, Y21 // 62a1552fe3e8 or 62a1d52fe3e8 + VPAVGW (R8), Y5, K7, Y21 // 62c1552fe328 or 62c1d52fe328 + VPAVGW 15(DX)(BX*2), Y5, K7, Y21 // 62e1552fe3ac5a0f000000 or 62e1d52fe3ac5a0f000000 + VPAVGW Z6, Z6, K7, Z7 // 62f14d4fe3fe or 62f1cd4fe3fe + VPAVGW Z22, Z6, K7, Z7 // 62b14d4fe3fe or 62b1cd4fe3fe + VPAVGW 17(SP), Z6, K7, Z7 // 62f14d4fe3bc2411000000 or 62f1cd4fe3bc2411000000 + VPAVGW -17(BP)(SI*4), Z6, K7, Z7 // 62f14d4fe3bcb5efffffff or 62f1cd4fe3bcb5efffffff + VPAVGW Z6, Z16, K7, Z7 // 62f17d47e3fe or 62f1fd47e3fe + VPAVGW Z22, Z16, K7, Z7 // 62b17d47e3fe or 62b1fd47e3fe + VPAVGW 17(SP), Z16, K7, Z7 // 62f17d47e3bc2411000000 or 62f1fd47e3bc2411000000 + VPAVGW -17(BP)(SI*4), Z16, K7, Z7 // 62f17d47e3bcb5efffffff or 62f1fd47e3bcb5efffffff + VPAVGW Z6, Z6, K7, Z13 // 62714d4fe3ee or 6271cd4fe3ee + VPAVGW Z22, Z6, K7, Z13 // 62314d4fe3ee or 6231cd4fe3ee + VPAVGW 17(SP), Z6, K7, Z13 // 62714d4fe3ac2411000000 or 6271cd4fe3ac2411000000 + VPAVGW -17(BP)(SI*4), Z6, K7, Z13 // 62714d4fe3acb5efffffff or 6271cd4fe3acb5efffffff + VPAVGW Z6, Z16, K7, Z13 // 62717d47e3ee or 6271fd47e3ee + VPAVGW Z22, Z16, K7, Z13 // 62317d47e3ee or 6231fd47e3ee + VPAVGW 17(SP), Z16, K7, Z13 // 62717d47e3ac2411000000 or 6271fd47e3ac2411000000 + VPAVGW -17(BP)(SI*4), Z16, K7, Z13 // 62717d47e3acb5efffffff or 6271fd47e3acb5efffffff + VPBLENDMB X12, X15, K6, X9 // 6252050e66cc + VPBLENDMB 7(SI)(DI*4), X15, K6, X9 // 6272050e668cbe07000000 + VPBLENDMB -7(DI)(R8*2), X15, K6, X9 // 6232050e668c47f9ffffff + VPBLENDMB Y20, Y21, K3, Y2 // 62b2552366d4 + VPBLENDMB 17(SP)(BP*1), Y21, K3, Y2 // 62f2552366942c11000000 + VPBLENDMB -7(CX)(DX*8), Y21, K3, Y2 // 62f255236694d1f9ffffff + VPBLENDMB Z18, Z13, K7, Z1 // 62b2154f66ca + VPBLENDMB Z8, Z13, K7, Z1 // 62d2154f66c8 + VPBLENDMB 7(AX), Z13, K7, Z1 // 62f2154f668807000000 + VPBLENDMB (DI), Z13, K7, Z1 // 62f2154f660f + VPBLENDMB Z18, Z13, K7, Z15 // 6232154f66fa + VPBLENDMB Z8, Z13, K7, Z15 // 6252154f66f8 + VPBLENDMB 7(AX), Z13, K7, Z15 // 6272154f66b807000000 + VPBLENDMB (DI), Z13, K7, Z15 // 6272154f663f + VPBLENDMW X26, X3, K4, X8 // 6212e50c66c2 + VPBLENDMW 99(R15)(R15*1), X3, K4, X8 // 6212e50c66843f63000000 + VPBLENDMW (DX), X3, K4, X8 // 6272e50c6602 + VPBLENDMW Y3, Y0, K2, Y6 // 62f2fd2a66f3 + VPBLENDMW (R14), Y0, K2, Y6 // 62d2fd2a6636 + VPBLENDMW -7(DI)(R8*8), Y0, K2, Y6 // 62b2fd2a66b4c7f9ffffff + VPBLENDMW Z15, Z3, K2, Z14 // 6252e54a66f7 + VPBLENDMW Z30, Z3, K2, Z14 // 6212e54a66f6 + VPBLENDMW 7(SI)(DI*8), Z3, K2, Z14 // 6272e54a66b4fe07000000 + VPBLENDMW -15(R14), Z3, K2, Z14 // 6252e54a66b6f1ffffff + VPBLENDMW Z15, Z12, K2, Z14 // 62529d4a66f7 + VPBLENDMW Z30, Z12, K2, Z14 // 62129d4a66f6 + VPBLENDMW 7(SI)(DI*8), Z12, K2, Z14 // 62729d4a66b4fe07000000 + VPBLENDMW -15(R14), Z12, K2, Z14 // 62529d4a66b6f1ffffff + VPBLENDMW Z15, Z3, K2, Z28 // 6242e54a66e7 + VPBLENDMW Z30, Z3, K2, Z28 // 6202e54a66e6 + VPBLENDMW 7(SI)(DI*8), Z3, K2, Z28 // 6262e54a66a4fe07000000 + VPBLENDMW -15(R14), Z3, K2, Z28 // 6242e54a66a6f1ffffff + VPBLENDMW Z15, Z12, K2, Z28 // 62429d4a66e7 + VPBLENDMW Z30, Z12, K2, Z28 // 62029d4a66e6 + VPBLENDMW 7(SI)(DI*8), Z12, K2, Z28 // 62629d4a66a4fe07000000 + VPBLENDMW -15(R14), Z12, K2, Z28 // 62429d4a66a6f1ffffff + VPBROADCASTB CX, K3, X23 // 62e27d0b7af9 + VPBROADCASTB SP, K3, X23 // 62e27d0b7afc + VPBROADCASTB R14, K3, Y5 // 62d27d2b7aee + VPBROADCASTB AX, K3, Y5 // 62f27d2b7ae8 + VPBROADCASTB R9, K3, Z19 // 62c27d4b7ad9 + VPBROADCASTB CX, K3, Z19 // 62e27d4b7ad9 + VPBROADCASTB R9, K3, Z15 // 62527d4b7af9 + VPBROADCASTB CX, K3, Z15 // 62727d4b7af9 + VPBROADCASTB X28, K2, X13 // 62127d0a78ec + VPBROADCASTB 99(R15)(R15*1), K2, X13 // 62127d0a786c3f63 + VPBROADCASTB (DX), K2, X13 // 62727d0a782a + VPBROADCASTB X24, K1, Y20 // 62827d2978e0 + VPBROADCASTB -17(BP)(SI*8), K1, Y20 // 62e27d297864f5ef + VPBROADCASTB (R15), K1, Y20 // 62c27d297827 + VPBROADCASTB X9, K2, Z5 // 62d27d4a78e9 + VPBROADCASTB 7(SI)(DI*8), K2, Z5 // 62f27d4a786cfe07 + VPBROADCASTB -15(R14), K2, Z5 // 62d27d4a786ef1 + VPBROADCASTB X9, K2, Z1 // 62d27d4a78c9 + VPBROADCASTB 7(SI)(DI*8), K2, Z1 // 62f27d4a784cfe07 + VPBROADCASTB -15(R14), K2, Z1 // 62d27d4a784ef1 + VPBROADCASTW R14, K7, X20 // 62c27d0f7be6 + VPBROADCASTW AX, K7, X20 // 62e27d0f7be0 + VPBROADCASTW R9, K7, Y22 // 62c27d2f7bf1 + VPBROADCASTW CX, K7, Y22 // 62e27d2f7bf1 + VPBROADCASTW SP, K6, Z0 // 62f27d4e7bc4 + VPBROADCASTW R14, K6, Z0 // 62d27d4e7bc6 + VPBROADCASTW SP, K6, Z11 // 62727d4e7bdc + VPBROADCASTW R14, K6, Z11 // 62527d4e7bde + VPBROADCASTW X9, K3, X7 // 62d27d0b79f9 + VPBROADCASTW 99(R15)(R15*1), K3, X7 // 62927d0b79bc3f63000000 + VPBROADCASTW (DX), K3, X7 // 62f27d0b793a + VPBROADCASTW X7, K7, Y13 // 62727d2f79ef + VPBROADCASTW -17(BP)(SI*8), K7, Y13 // 62727d2f79acf5efffffff + VPBROADCASTW (R15), K7, Y13 // 62527d2f792f + VPBROADCASTW X14, K4, Z0 // 62d27d4c79c6 + VPBROADCASTW 7(SI)(DI*8), K4, Z0 // 62f27d4c7984fe07000000 + VPBROADCASTW -15(R14), K4, Z0 // 62d27d4c7986f1ffffff + VPBROADCASTW X14, K4, Z25 // 62427d4c79ce + VPBROADCASTW 7(SI)(DI*8), K4, Z25 // 62627d4c798cfe07000000 + VPBROADCASTW -15(R14), K4, Z25 // 62427d4c798ef1ffffff + VPCMPB $81, X1, X21, K4, K5 // 62f355043fe951 + VPCMPB $81, 7(SI)(DI*8), X21, K4, K5 // 62f355043facfe0700000051 + VPCMPB $81, -15(R14), X21, K4, K5 // 62d355043faef1ffffff51 + VPCMPB $81, X1, X21, K4, K4 // 62f355043fe151 + VPCMPB $81, 7(SI)(DI*8), X21, K4, K4 // 62f355043fa4fe0700000051 + VPCMPB $81, -15(R14), X21, K4, K4 // 62d355043fa6f1ffffff51 + VPCMPB $42, Y7, Y17, K7, K4 // 62f375273fe72a + VPCMPB $42, (CX), Y17, K7, K4 // 62f375273f212a + VPCMPB $42, 99(R15), Y17, K7, K4 // 62d375273fa7630000002a + VPCMPB $42, Y7, Y17, K7, K6 // 62f375273ff72a + VPCMPB $42, (CX), Y17, K7, K6 // 62f375273f312a + VPCMPB $42, 99(R15), Y17, K7, K6 // 62d375273fb7630000002a + VPCMPB $79, Z9, Z9, K2, K1 // 62d3354a3fc94f + VPCMPB $79, Z28, Z9, K2, K1 // 6293354a3fcc4f + VPCMPB $79, -7(DI)(R8*1), Z9, K2, K1 // 62b3354a3f8c07f9ffffff4f + VPCMPB $79, (SP), Z9, K2, K1 // 62f3354a3f0c244f + VPCMPB $79, Z9, Z25, K2, K1 // 62d335423fc94f + VPCMPB $79, Z28, Z25, K2, K1 // 629335423fcc4f + VPCMPB $79, -7(DI)(R8*1), Z25, K2, K1 // 62b335423f8c07f9ffffff4f + VPCMPB $79, (SP), Z25, K2, K1 // 62f335423f0c244f + VPCMPB $79, Z9, Z9, K2, K3 // 62d3354a3fd94f + VPCMPB $79, Z28, Z9, K2, K3 // 6293354a3fdc4f + VPCMPB $79, -7(DI)(R8*1), Z9, K2, K3 // 62b3354a3f9c07f9ffffff4f + VPCMPB $79, (SP), Z9, K2, K3 // 62f3354a3f1c244f + VPCMPB $79, Z9, Z25, K2, K3 // 62d335423fd94f + VPCMPB $79, Z28, Z25, K2, K3 // 629335423fdc4f + VPCMPB $79, -7(DI)(R8*1), Z25, K2, K3 // 62b335423f9c07f9ffffff4f + VPCMPB $79, (SP), Z25, K2, K3 // 62f335423f1c244f + VPCMPEQB X30, X0, K2, K4 // 62917d0a74e6 or 6291fd0a74e6 + VPCMPEQB -7(DI)(R8*1), X0, K2, K4 // 62b17d0a74a407f9ffffff or 62b1fd0a74a407f9ffffff + VPCMPEQB (SP), X0, K2, K4 // 62f17d0a742424 or 62f1fd0a742424 + VPCMPEQB X30, X0, K2, K5 // 62917d0a74ee or 6291fd0a74ee + VPCMPEQB -7(DI)(R8*1), X0, K2, K5 // 62b17d0a74ac07f9ffffff or 62b1fd0a74ac07f9ffffff + VPCMPEQB (SP), X0, K2, K5 // 62f17d0a742c24 or 62f1fd0a742c24 + VPCMPEQB Y1, Y8, K2, K2 // 62f13d2a74d1 or 62f1bd2a74d1 + VPCMPEQB -7(CX)(DX*1), Y8, K2, K2 // 62f13d2a749411f9ffffff or 62f1bd2a749411f9ffffff + VPCMPEQB -15(R14)(R15*4), Y8, K2, K2 // 62913d2a7494bef1ffffff or 6291bd2a7494bef1ffffff + VPCMPEQB Y1, Y8, K2, K7 // 62f13d2a74f9 or 62f1bd2a74f9 + VPCMPEQB -7(CX)(DX*1), Y8, K2, K7 // 62f13d2a74bc11f9ffffff or 62f1bd2a74bc11f9ffffff + VPCMPEQB -15(R14)(R15*4), Y8, K2, K7 // 62913d2a74bcbef1ffffff or 6291bd2a74bcbef1ffffff + VPCMPEQB Z31, Z17, K3, K0 // 6291754374c7 or 6291f54374c7 + VPCMPEQB Z0, Z17, K3, K0 // 62f1754374c0 or 62f1f54374c0 + VPCMPEQB 99(R15)(R15*8), Z17, K3, K0 // 629175437484ff63000000 or 6291f5437484ff63000000 + VPCMPEQB 7(AX)(CX*8), Z17, K3, K0 // 62f175437484c807000000 or 62f1f5437484c807000000 + VPCMPEQB Z31, Z23, K3, K0 // 6291454374c7 or 6291c54374c7 + VPCMPEQB Z0, Z23, K3, K0 // 62f1454374c0 or 62f1c54374c0 + VPCMPEQB 99(R15)(R15*8), Z23, K3, K0 // 629145437484ff63000000 or 6291c5437484ff63000000 + VPCMPEQB 7(AX)(CX*8), Z23, K3, K0 // 62f145437484c807000000 or 62f1c5437484c807000000 + VPCMPEQB Z31, Z17, K3, K5 // 6291754374ef or 6291f54374ef + VPCMPEQB Z0, Z17, K3, K5 // 62f1754374e8 or 62f1f54374e8 + VPCMPEQB 99(R15)(R15*8), Z17, K3, K5 // 6291754374acff63000000 or 6291f54374acff63000000 + VPCMPEQB 7(AX)(CX*8), Z17, K3, K5 // 62f1754374acc807000000 or 62f1f54374acc807000000 + VPCMPEQB Z31, Z23, K3, K5 // 6291454374ef or 6291c54374ef + VPCMPEQB Z0, Z23, K3, K5 // 62f1454374e8 or 62f1c54374e8 + VPCMPEQB 99(R15)(R15*8), Z23, K3, K5 // 6291454374acff63000000 or 6291c54374acff63000000 + VPCMPEQB 7(AX)(CX*8), Z23, K3, K5 // 62f1454374acc807000000 or 62f1c54374acc807000000 + VPCMPEQW X8, X19, K7, K0 // 62d1650775c0 or 62d1e50775c0 + VPCMPEQW (AX), X19, K7, K0 // 62f165077500 or 62f1e5077500 + VPCMPEQW 7(SI), X19, K7, K0 // 62f16507758607000000 or 62f1e507758607000000 + VPCMPEQW X8, X19, K7, K7 // 62d1650775f8 or 62d1e50775f8 + VPCMPEQW (AX), X19, K7, K7 // 62f165077538 or 62f1e5077538 + VPCMPEQW 7(SI), X19, K7, K7 // 62f1650775be07000000 or 62f1e50775be07000000 + VPCMPEQW Y12, Y21, K1, K5 // 62d1552175ec or 62d1d52175ec + VPCMPEQW 17(SP)(BP*2), Y21, K1, K5 // 62f1552175ac6c11000000 or 62f1d52175ac6c11000000 + VPCMPEQW -7(DI)(R8*4), Y21, K1, K5 // 62b1552175ac87f9ffffff or 62b1d52175ac87f9ffffff + VPCMPEQW Y12, Y21, K1, K4 // 62d1552175e4 or 62d1d52175e4 + VPCMPEQW 17(SP)(BP*2), Y21, K1, K4 // 62f1552175a46c11000000 or 62f1d52175a46c11000000 + VPCMPEQW -7(DI)(R8*4), Y21, K1, K4 // 62b1552175a487f9ffffff or 62b1d52175a487f9ffffff + VPCMPEQW Z26, Z30, K1, K4 // 62910d4175e2 or 62918d4175e2 + VPCMPEQW Z22, Z30, K1, K4 // 62b10d4175e6 or 62b18d4175e6 + VPCMPEQW 15(R8)(R14*4), Z30, K1, K4 // 62910d4175a4b00f000000 or 62918d4175a4b00f000000 + VPCMPEQW -7(CX)(DX*4), Z30, K1, K4 // 62f10d4175a491f9ffffff or 62f18d4175a491f9ffffff + VPCMPEQW Z26, Z5, K1, K4 // 6291554975e2 or 6291d54975e2 + VPCMPEQW Z22, Z5, K1, K4 // 62b1554975e6 or 62b1d54975e6 + VPCMPEQW 15(R8)(R14*4), Z5, K1, K4 // 6291554975a4b00f000000 or 6291d54975a4b00f000000 + VPCMPEQW -7(CX)(DX*4), Z5, K1, K4 // 62f1554975a491f9ffffff or 62f1d54975a491f9ffffff + VPCMPEQW Z26, Z30, K1, K6 // 62910d4175f2 or 62918d4175f2 + VPCMPEQW Z22, Z30, K1, K6 // 62b10d4175f6 or 62b18d4175f6 + VPCMPEQW 15(R8)(R14*4), Z30, K1, K6 // 62910d4175b4b00f000000 or 62918d4175b4b00f000000 + VPCMPEQW -7(CX)(DX*4), Z30, K1, K6 // 62f10d4175b491f9ffffff or 62f18d4175b491f9ffffff + VPCMPEQW Z26, Z5, K1, K6 // 6291554975f2 or 6291d54975f2 + VPCMPEQW Z22, Z5, K1, K6 // 62b1554975f6 or 62b1d54975f6 + VPCMPEQW 15(R8)(R14*4), Z5, K1, K6 // 6291554975b4b00f000000 or 6291d54975b4b00f000000 + VPCMPEQW -7(CX)(DX*4), Z5, K1, K6 // 62f1554975b491f9ffffff or 62f1d54975b491f9ffffff + VPCMPGTB X26, X8, K1, K1 // 62913d0964ca or 6291bd0964ca + VPCMPGTB (BX), X8, K1, K1 // 62f13d09640b or 62f1bd09640b + VPCMPGTB -17(BP)(SI*1), X8, K1, K1 // 62f13d09648c35efffffff or 62f1bd09648c35efffffff + VPCMPGTB X26, X8, K1, K3 // 62913d0964da or 6291bd0964da + VPCMPGTB (BX), X8, K1, K3 // 62f13d09641b or 62f1bd09641b + VPCMPGTB -17(BP)(SI*1), X8, K1, K3 // 62f13d09649c35efffffff or 62f1bd09649c35efffffff + VPCMPGTB Y1, Y9, K7, K6 // 62f1352f64f1 or 62f1b52f64f1 + VPCMPGTB 15(R8), Y9, K7, K6 // 62d1352f64b00f000000 or 62d1b52f64b00f000000 + VPCMPGTB (BP), Y9, K7, K6 // 62f1352f647500 or 62f1b52f647500 + VPCMPGTB Y1, Y9, K7, K7 // 62f1352f64f9 or 62f1b52f64f9 + VPCMPGTB 15(R8), Y9, K7, K7 // 62d1352f64b80f000000 or 62d1b52f64b80f000000 + VPCMPGTB (BP), Y9, K7, K7 // 62f1352f647d00 or 62f1b52f647d00 + VPCMPGTB Z16, Z7, K2, K6 // 62b1454a64f0 or 62b1c54a64f0 + VPCMPGTB Z25, Z7, K2, K6 // 6291454a64f1 or 6291c54a64f1 + VPCMPGTB (R8), Z7, K2, K6 // 62d1454a6430 or 62d1c54a6430 + VPCMPGTB 15(DX)(BX*2), Z7, K2, K6 // 62f1454a64b45a0f000000 or 62f1c54a64b45a0f000000 + VPCMPGTB Z16, Z21, K2, K6 // 62b1554264f0 or 62b1d54264f0 + VPCMPGTB Z25, Z21, K2, K6 // 6291554264f1 or 6291d54264f1 + VPCMPGTB (R8), Z21, K2, K6 // 62d155426430 or 62d1d5426430 + VPCMPGTB 15(DX)(BX*2), Z21, K2, K6 // 62f1554264b45a0f000000 or 62f1d54264b45a0f000000 + VPCMPGTB Z16, Z7, K2, K4 // 62b1454a64e0 or 62b1c54a64e0 + VPCMPGTB Z25, Z7, K2, K4 // 6291454a64e1 or 6291c54a64e1 + VPCMPGTB (R8), Z7, K2, K4 // 62d1454a6420 or 62d1c54a6420 + VPCMPGTB 15(DX)(BX*2), Z7, K2, K4 // 62f1454a64a45a0f000000 or 62f1c54a64a45a0f000000 + VPCMPGTB Z16, Z21, K2, K4 // 62b1554264e0 or 62b1d54264e0 + VPCMPGTB Z25, Z21, K2, K4 // 6291554264e1 or 6291d54264e1 + VPCMPGTB (R8), Z21, K2, K4 // 62d155426420 or 62d1d5426420 + VPCMPGTB 15(DX)(BX*2), Z21, K2, K4 // 62f1554264a45a0f000000 or 62f1d54264a45a0f000000 + VPCMPGTW X11, X23, K7, K3 // 62d1450765db or 62d1c50765db + VPCMPGTW 17(SP)(BP*1), X23, K7, K3 // 62f14507659c2c11000000 or 62f1c507659c2c11000000 + VPCMPGTW -7(CX)(DX*8), X23, K7, K3 // 62f14507659cd1f9ffffff or 62f1c507659cd1f9ffffff + VPCMPGTW X11, X23, K7, K1 // 62d1450765cb or 62d1c50765cb + VPCMPGTW 17(SP)(BP*1), X23, K7, K1 // 62f14507658c2c11000000 or 62f1c507658c2c11000000 + VPCMPGTW -7(CX)(DX*8), X23, K7, K1 // 62f14507658cd1f9ffffff or 62f1c507658cd1f9ffffff + VPCMPGTW Y21, Y12, K6, K5 // 62b11d2e65ed or 62b19d2e65ed + VPCMPGTW 7(AX)(CX*4), Y12, K6, K5 // 62f11d2e65ac8807000000 or 62f19d2e65ac8807000000 + VPCMPGTW 7(AX)(CX*1), Y12, K6, K5 // 62f11d2e65ac0807000000 or 62f19d2e65ac0807000000 + VPCMPGTW Y21, Y12, K6, K4 // 62b11d2e65e5 or 62b19d2e65e5 + VPCMPGTW 7(AX)(CX*4), Y12, K6, K4 // 62f11d2e65a48807000000 or 62f19d2e65a48807000000 + VPCMPGTW 7(AX)(CX*1), Y12, K6, K4 // 62f11d2e65a40807000000 or 62f19d2e65a40807000000 + VPCMPGTW Z23, Z27, K3, K7 // 62b1254365ff or 62b1a54365ff + VPCMPGTW Z9, Z27, K3, K7 // 62d1254365f9 or 62d1a54365f9 + VPCMPGTW 15(R8)(R14*1), Z27, K3, K7 // 6291254365bc300f000000 or 6291a54365bc300f000000 + VPCMPGTW 15(R8)(R14*2), Z27, K3, K7 // 6291254365bc700f000000 or 6291a54365bc700f000000 + VPCMPGTW Z23, Z25, K3, K7 // 62b1354365ff or 62b1b54365ff + VPCMPGTW Z9, Z25, K3, K7 // 62d1354365f9 or 62d1b54365f9 + VPCMPGTW 15(R8)(R14*1), Z25, K3, K7 // 6291354365bc300f000000 or 6291b54365bc300f000000 + VPCMPGTW 15(R8)(R14*2), Z25, K3, K7 // 6291354365bc700f000000 or 6291b54365bc700f000000 + VPCMPGTW Z23, Z27, K3, K6 // 62b1254365f7 or 62b1a54365f7 + VPCMPGTW Z9, Z27, K3, K6 // 62d1254365f1 or 62d1a54365f1 + VPCMPGTW 15(R8)(R14*1), Z27, K3, K6 // 6291254365b4300f000000 or 6291a54365b4300f000000 + VPCMPGTW 15(R8)(R14*2), Z27, K3, K6 // 6291254365b4700f000000 or 6291a54365b4700f000000 + VPCMPGTW Z23, Z25, K3, K6 // 62b1354365f7 or 62b1b54365f7 + VPCMPGTW Z9, Z25, K3, K6 // 62d1354365f1 or 62d1b54365f1 + VPCMPGTW 15(R8)(R14*1), Z25, K3, K6 // 6291354365b4300f000000 or 6291b54365b4300f000000 + VPCMPGTW 15(R8)(R14*2), Z25, K3, K6 // 6291354365b4700f000000 or 6291b54365b4700f000000 + VPCMPUB $121, X0, X14, K7, K4 // 62f30d0f3ee079 + VPCMPUB $121, 15(R8)(R14*1), X14, K7, K4 // 62930d0f3ea4300f00000079 + VPCMPUB $121, 15(R8)(R14*2), X14, K7, K4 // 62930d0f3ea4700f00000079 + VPCMPUB $121, X0, X14, K7, K6 // 62f30d0f3ef079 + VPCMPUB $121, 15(R8)(R14*1), X14, K7, K6 // 62930d0f3eb4300f00000079 + VPCMPUB $121, 15(R8)(R14*2), X14, K7, K6 // 62930d0f3eb4700f00000079 + VPCMPUB $13, Y7, Y26, K2, K1 // 62f32d223ecf0d + VPCMPUB $13, 17(SP)(BP*8), Y26, K2, K1 // 62f32d223e8cec110000000d + VPCMPUB $13, 17(SP)(BP*4), Y26, K2, K1 // 62f32d223e8cac110000000d + VPCMPUB $13, Y7, Y26, K2, K3 // 62f32d223edf0d + VPCMPUB $13, 17(SP)(BP*8), Y26, K2, K3 // 62f32d223e9cec110000000d + VPCMPUB $13, 17(SP)(BP*4), Y26, K2, K3 // 62f32d223e9cac110000000d + VPCMPUB $65, Z8, Z14, K5, K6 // 62d30d4d3ef041 + VPCMPUB $65, Z24, Z14, K5, K6 // 62930d4d3ef041 + VPCMPUB $65, 99(R15)(R15*4), Z14, K5, K6 // 62930d4d3eb4bf6300000041 + VPCMPUB $65, 15(DX), Z14, K5, K6 // 62f30d4d3eb20f00000041 + VPCMPUB $65, Z8, Z7, K5, K6 // 62d3454d3ef041 + VPCMPUB $65, Z24, Z7, K5, K6 // 6293454d3ef041 + VPCMPUB $65, 99(R15)(R15*4), Z7, K5, K6 // 6293454d3eb4bf6300000041 + VPCMPUB $65, 15(DX), Z7, K5, K6 // 62f3454d3eb20f00000041 + VPCMPUB $65, Z8, Z14, K5, K7 // 62d30d4d3ef841 + VPCMPUB $65, Z24, Z14, K5, K7 // 62930d4d3ef841 + VPCMPUB $65, 99(R15)(R15*4), Z14, K5, K7 // 62930d4d3ebcbf6300000041 + VPCMPUB $65, 15(DX), Z14, K5, K7 // 62f30d4d3eba0f00000041 + VPCMPUB $65, Z8, Z7, K5, K7 // 62d3454d3ef841 + VPCMPUB $65, Z24, Z7, K5, K7 // 6293454d3ef841 + VPCMPUB $65, 99(R15)(R15*4), Z7, K5, K7 // 6293454d3ebcbf6300000041 + VPCMPUB $65, 15(DX), Z7, K5, K7 // 62f3454d3eba0f00000041 + VPCMPUW $79, X25, X5, K3, K1 // 6293d50b3ec94f + VPCMPUW $79, (CX), X5, K3, K1 // 62f3d50b3e094f + VPCMPUW $79, 99(R15), X5, K3, K1 // 62d3d50b3e8f630000004f + VPCMPUW $79, X25, X5, K3, K5 // 6293d50b3ee94f + VPCMPUW $79, (CX), X5, K3, K5 // 62f3d50b3e294f + VPCMPUW $79, 99(R15), X5, K3, K5 // 62d3d50b3eaf630000004f + VPCMPUW $64, Y6, Y22, K2, K3 // 62f3cd223ede40 + VPCMPUW $64, 7(AX), Y22, K2, K3 // 62f3cd223e980700000040 + VPCMPUW $64, (DI), Y22, K2, K3 // 62f3cd223e1f40 + VPCMPUW $64, Y6, Y22, K2, K1 // 62f3cd223ece40 + VPCMPUW $64, 7(AX), Y22, K2, K1 // 62f3cd223e880700000040 + VPCMPUW $64, (DI), Y22, K2, K1 // 62f3cd223e0f40 + VPCMPUW $27, Z14, Z15, K1, K5 // 62d385493eee1b + VPCMPUW $27, Z27, Z15, K1, K5 // 629385493eeb1b + VPCMPUW $27, -7(CX)(DX*1), Z15, K1, K5 // 62f385493eac11f9ffffff1b + VPCMPUW $27, -15(R14)(R15*4), Z15, K1, K5 // 629385493eacbef1ffffff1b + VPCMPUW $27, Z14, Z12, K1, K5 // 62d39d493eee1b + VPCMPUW $27, Z27, Z12, K1, K5 // 62939d493eeb1b + VPCMPUW $27, -7(CX)(DX*1), Z12, K1, K5 // 62f39d493eac11f9ffffff1b + VPCMPUW $27, -15(R14)(R15*4), Z12, K1, K5 // 62939d493eacbef1ffffff1b + VPCMPUW $27, Z14, Z15, K1, K4 // 62d385493ee61b + VPCMPUW $27, Z27, Z15, K1, K4 // 629385493ee31b + VPCMPUW $27, -7(CX)(DX*1), Z15, K1, K4 // 62f385493ea411f9ffffff1b + VPCMPUW $27, -15(R14)(R15*4), Z15, K1, K4 // 629385493ea4bef1ffffff1b + VPCMPUW $27, Z14, Z12, K1, K4 // 62d39d493ee61b + VPCMPUW $27, Z27, Z12, K1, K4 // 62939d493ee31b + VPCMPUW $27, -7(CX)(DX*1), Z12, K1, K4 // 62f39d493ea411f9ffffff1b + VPCMPUW $27, -15(R14)(R15*4), Z12, K1, K4 // 62939d493ea4bef1ffffff1b + VPCMPW $47, X9, X0, K2, K7 // 62d3fd0a3ff92f + VPCMPW $47, 99(R15)(R15*2), X0, K2, K7 // 6293fd0a3fbc7f630000002f + VPCMPW $47, -7(DI), X0, K2, K7 // 62f3fd0a3fbff9ffffff2f + VPCMPW $47, X9, X0, K2, K6 // 62d3fd0a3ff12f + VPCMPW $47, 99(R15)(R15*2), X0, K2, K6 // 6293fd0a3fb47f630000002f + VPCMPW $47, -7(DI), X0, K2, K6 // 62f3fd0a3fb7f9ffffff2f + VPCMPW $82, Y7, Y21, K1, K4 // 62f3d5213fe752 + VPCMPW $82, 99(R15)(R15*1), Y21, K1, K4 // 6293d5213fa43f6300000052 + VPCMPW $82, (DX), Y21, K1, K4 // 62f3d5213f2252 + VPCMPW $82, Y7, Y21, K1, K6 // 62f3d5213ff752 + VPCMPW $82, 99(R15)(R15*1), Y21, K1, K6 // 6293d5213fb43f6300000052 + VPCMPW $82, (DX), Y21, K1, K6 // 62f3d5213f3252 + VPCMPW $126, Z13, Z11, K7, K0 // 62d3a54f3fc57e + VPCMPW $126, Z14, Z11, K7, K0 // 62d3a54f3fc67e + VPCMPW $126, 15(DX)(BX*1), Z11, K7, K0 // 62f3a54f3f841a0f0000007e + VPCMPW $126, -7(CX)(DX*2), Z11, K7, K0 // 62f3a54f3f8451f9ffffff7e + VPCMPW $126, Z13, Z5, K7, K0 // 62d3d54f3fc57e + VPCMPW $126, Z14, Z5, K7, K0 // 62d3d54f3fc67e + VPCMPW $126, 15(DX)(BX*1), Z5, K7, K0 // 62f3d54f3f841a0f0000007e + VPCMPW $126, -7(CX)(DX*2), Z5, K7, K0 // 62f3d54f3f8451f9ffffff7e + VPCMPW $126, Z13, Z11, K7, K7 // 62d3a54f3ffd7e + VPCMPW $126, Z14, Z11, K7, K7 // 62d3a54f3ffe7e + VPCMPW $126, 15(DX)(BX*1), Z11, K7, K7 // 62f3a54f3fbc1a0f0000007e + VPCMPW $126, -7(CX)(DX*2), Z11, K7, K7 // 62f3a54f3fbc51f9ffffff7e + VPCMPW $126, Z13, Z5, K7, K7 // 62d3d54f3ffd7e + VPCMPW $126, Z14, Z5, K7, K7 // 62d3d54f3ffe7e + VPCMPW $126, 15(DX)(BX*1), Z5, K7, K7 // 62f3d54f3fbc1a0f0000007e + VPCMPW $126, -7(CX)(DX*2), Z5, K7, K7 // 62f3d54f3fbc51f9ffffff7e + VPERMI2W X16, X20, K2, X7 // 62b2dd0275f8 + VPERMI2W 7(SI)(DI*1), X20, K2, X7 // 62f2dd0275bc3e07000000 + VPERMI2W 15(DX)(BX*8), X20, K2, X7 // 62f2dd0275bcda0f000000 + VPERMI2W Y18, Y14, K5, Y12 // 62328d2d75e2 + VPERMI2W -7(CX)(DX*1), Y14, K5, Y12 // 62728d2d75a411f9ffffff + VPERMI2W -15(R14)(R15*4), Y14, K5, Y12 // 62128d2d75a4bef1ffffff + VPERMI2W Z28, Z12, K3, Z1 // 62929d4b75cc + VPERMI2W Z13, Z12, K3, Z1 // 62d29d4b75cd + VPERMI2W 99(R15)(R15*8), Z12, K3, Z1 // 62929d4b758cff63000000 + VPERMI2W 7(AX)(CX*8), Z12, K3, Z1 // 62f29d4b758cc807000000 + VPERMI2W Z28, Z16, K3, Z1 // 6292fd4375cc + VPERMI2W Z13, Z16, K3, Z1 // 62d2fd4375cd + VPERMI2W 99(R15)(R15*8), Z16, K3, Z1 // 6292fd43758cff63000000 + VPERMI2W 7(AX)(CX*8), Z16, K3, Z1 // 62f2fd43758cc807000000 + VPERMI2W Z28, Z12, K3, Z3 // 62929d4b75dc + VPERMI2W Z13, Z12, K3, Z3 // 62d29d4b75dd + VPERMI2W 99(R15)(R15*8), Z12, K3, Z3 // 62929d4b759cff63000000 + VPERMI2W 7(AX)(CX*8), Z12, K3, Z3 // 62f29d4b759cc807000000 + VPERMI2W Z28, Z16, K3, Z3 // 6292fd4375dc + VPERMI2W Z13, Z16, K3, Z3 // 62d2fd4375dd + VPERMI2W 99(R15)(R15*8), Z16, K3, Z3 // 6292fd43759cff63000000 + VPERMI2W 7(AX)(CX*8), Z16, K3, Z3 // 62f2fd43759cc807000000 + VPERMT2W X0, X0, K3, X14 // 6272fd0b7df0 + VPERMT2W 15(R8)(R14*1), X0, K3, X14 // 6212fd0b7db4300f000000 + VPERMT2W 15(R8)(R14*2), X0, K3, X14 // 6212fd0b7db4700f000000 + VPERMT2W Y8, Y27, K2, Y22 // 62c2a5227df0 + VPERMT2W 7(SI)(DI*8), Y27, K2, Y22 // 62e2a5227db4fe07000000 + VPERMT2W -15(R14), Y27, K2, Y22 // 62c2a5227db6f1ffffff + VPERMT2W Z22, Z8, K1, Z14 // 6232bd497df6 + VPERMT2W Z25, Z8, K1, Z14 // 6212bd497df1 + VPERMT2W 17(SP)(BP*2), Z8, K1, Z14 // 6272bd497db46c11000000 + VPERMT2W -7(DI)(R8*4), Z8, K1, Z14 // 6232bd497db487f9ffffff + VPERMT2W Z22, Z24, K1, Z14 // 6232bd417df6 + VPERMT2W Z25, Z24, K1, Z14 // 6212bd417df1 + VPERMT2W 17(SP)(BP*2), Z24, K1, Z14 // 6272bd417db46c11000000 + VPERMT2W -7(DI)(R8*4), Z24, K1, Z14 // 6232bd417db487f9ffffff + VPERMT2W Z22, Z8, K1, Z7 // 62b2bd497dfe + VPERMT2W Z25, Z8, K1, Z7 // 6292bd497df9 + VPERMT2W 17(SP)(BP*2), Z8, K1, Z7 // 62f2bd497dbc6c11000000 + VPERMT2W -7(DI)(R8*4), Z8, K1, Z7 // 62b2bd497dbc87f9ffffff + VPERMT2W Z22, Z24, K1, Z7 // 62b2bd417dfe + VPERMT2W Z25, Z24, K1, Z7 // 6292bd417df9 + VPERMT2W 17(SP)(BP*2), Z24, K1, Z7 // 62f2bd417dbc6c11000000 + VPERMT2W -7(DI)(R8*4), Z24, K1, Z7 // 62b2bd417dbc87f9ffffff + VPERMW X17, X11, K2, X25 // 6222a50a8dc9 + VPERMW (R14), X11, K2, X25 // 6242a50a8d0e + VPERMW -7(DI)(R8*8), X11, K2, X25 // 6222a50a8d8cc7f9ffffff + VPERMW Y9, Y22, K1, Y9 // 6252cd218dc9 + VPERMW 7(SI)(DI*1), Y22, K1, Y9 // 6272cd218d8c3e07000000 + VPERMW 15(DX)(BX*8), Y22, K1, Y9 // 6272cd218d8cda0f000000 + VPERMW Z0, Z6, K7, Z1 // 62f2cd4f8dc8 + VPERMW Z8, Z6, K7, Z1 // 62d2cd4f8dc8 + VPERMW 15(R8), Z6, K7, Z1 // 62d2cd4f8d880f000000 + VPERMW (BP), Z6, K7, Z1 // 62f2cd4f8d4d00 + VPERMW Z0, Z2, K7, Z1 // 62f2ed4f8dc8 + VPERMW Z8, Z2, K7, Z1 // 62d2ed4f8dc8 + VPERMW 15(R8), Z2, K7, Z1 // 62d2ed4f8d880f000000 + VPERMW (BP), Z2, K7, Z1 // 62f2ed4f8d4d00 + VPERMW Z0, Z6, K7, Z16 // 62e2cd4f8dc0 + VPERMW Z8, Z6, K7, Z16 // 62c2cd4f8dc0 + VPERMW 15(R8), Z6, K7, Z16 // 62c2cd4f8d800f000000 + VPERMW (BP), Z6, K7, Z16 // 62e2cd4f8d4500 + VPERMW Z0, Z2, K7, Z16 // 62e2ed4f8dc0 + VPERMW Z8, Z2, K7, Z16 // 62c2ed4f8dc0 + VPERMW 15(R8), Z2, K7, Z16 // 62c2ed4f8d800f000000 + VPERMW (BP), Z2, K7, Z16 // 62e2ed4f8d4500 + VPEXTRB $79, X26, AX // 62637d0814d04f or 6263fd0814d04f + VPEXTRB $79, X26, R9 // 62437d0814d14f or 6243fd0814d14f + VPEXTRB $79, X26, 7(SI)(DI*1) // 62637d0814543e074f or 6263fd0814543e074f + VPEXTRB $79, X26, 15(DX)(BX*8) // 62637d081454da0f4f or 6263fd081454da0f4f + VPMADDUBSW X21, X16, K2, X0 // 62b27d0204c5 or 62b2fd0204c5 + VPMADDUBSW 15(R8)(R14*8), X16, K2, X0 // 62927d020484f00f000000 or 6292fd020484f00f000000 + VPMADDUBSW -15(R14)(R15*2), X16, K2, X0 // 62927d0204847ef1ffffff or 6292fd0204847ef1ffffff + VPMADDUBSW Y3, Y31, K4, Y11 // 6272052404db or 6272852404db + VPMADDUBSW -17(BP)(SI*2), Y31, K4, Y11 // 62720524049c75efffffff or 62728524049c75efffffff + VPMADDUBSW 7(AX)(CX*2), Y31, K4, Y11 // 62720524049c4807000000 or 62728524049c4807000000 + VPMADDUBSW Z6, Z22, K1, Z12 // 62724d4104e6 or 6272cd4104e6 + VPMADDUBSW Z8, Z22, K1, Z12 // 62524d4104e0 or 6252cd4104e0 + VPMADDUBSW 99(R15)(R15*1), Z22, K1, Z12 // 62124d4104a43f63000000 or 6212cd4104a43f63000000 + VPMADDUBSW (DX), Z22, K1, Z12 // 62724d410422 or 6272cd410422 + VPMADDUBSW Z6, Z11, K1, Z12 // 6272254904e6 or 6272a54904e6 + VPMADDUBSW Z8, Z11, K1, Z12 // 6252254904e0 or 6252a54904e0 + VPMADDUBSW 99(R15)(R15*1), Z11, K1, Z12 // 6212254904a43f63000000 or 6212a54904a43f63000000 + VPMADDUBSW (DX), Z11, K1, Z12 // 627225490422 or 6272a5490422 + VPMADDUBSW Z6, Z22, K1, Z27 // 62624d4104de or 6262cd4104de + VPMADDUBSW Z8, Z22, K1, Z27 // 62424d4104d8 or 6242cd4104d8 + VPMADDUBSW 99(R15)(R15*1), Z22, K1, Z27 // 62024d41049c3f63000000 or 6202cd41049c3f63000000 + VPMADDUBSW (DX), Z22, K1, Z27 // 62624d41041a or 6262cd41041a + VPMADDUBSW Z6, Z11, K1, Z27 // 6262254904de or 6262a54904de + VPMADDUBSW Z8, Z11, K1, Z27 // 6242254904d8 or 6242a54904d8 + VPMADDUBSW 99(R15)(R15*1), Z11, K1, Z27 // 62022549049c3f63000000 or 6202a549049c3f63000000 + VPMADDUBSW (DX), Z11, K1, Z27 // 62622549041a or 6262a549041a + VPMADDWD X22, X28, K3, X0 // 62b11d03f5c6 or 62b19d03f5c6 + VPMADDWD -15(R14)(R15*1), X28, K3, X0 // 62911d03f5843ef1ffffff or 62919d03f5843ef1ffffff + VPMADDWD -15(BX), X28, K3, X0 // 62f11d03f583f1ffffff or 62f19d03f583f1ffffff + VPMADDWD Y13, Y2, K4, Y14 // 62516d2cf5f5 or 6251ed2cf5f5 + VPMADDWD 15(R8)(R14*1), Y2, K4, Y14 // 62116d2cf5b4300f000000 or 6211ed2cf5b4300f000000 + VPMADDWD 15(R8)(R14*2), Y2, K4, Y14 // 62116d2cf5b4700f000000 or 6211ed2cf5b4700f000000 + VPMADDWD Z9, Z12, K5, Z25 // 62411d4df5c9 or 62419d4df5c9 + VPMADDWD Z12, Z12, K5, Z25 // 62411d4df5cc or 62419d4df5cc + VPMADDWD -17(BP)(SI*8), Z12, K5, Z25 // 62611d4df58cf5efffffff or 62619d4df58cf5efffffff + VPMADDWD (R15), Z12, K5, Z25 // 62411d4df50f or 62419d4df50f + VPMADDWD Z9, Z17, K5, Z25 // 62417545f5c9 or 6241f545f5c9 + VPMADDWD Z12, Z17, K5, Z25 // 62417545f5cc or 6241f545f5cc + VPMADDWD -17(BP)(SI*8), Z17, K5, Z25 // 62617545f58cf5efffffff or 6261f545f58cf5efffffff + VPMADDWD (R15), Z17, K5, Z25 // 62417545f50f or 6241f545f50f + VPMADDWD Z9, Z12, K5, Z12 // 62511d4df5e1 or 62519d4df5e1 + VPMADDWD Z12, Z12, K5, Z12 // 62511d4df5e4 or 62519d4df5e4 + VPMADDWD -17(BP)(SI*8), Z12, K5, Z12 // 62711d4df5a4f5efffffff or 62719d4df5a4f5efffffff + VPMADDWD (R15), Z12, K5, Z12 // 62511d4df527 or 62519d4df527 + VPMADDWD Z9, Z17, K5, Z12 // 62517545f5e1 or 6251f545f5e1 + VPMADDWD Z12, Z17, K5, Z12 // 62517545f5e4 or 6251f545f5e4 + VPMADDWD -17(BP)(SI*8), Z17, K5, Z12 // 62717545f5a4f5efffffff or 6271f545f5a4f5efffffff + VPMADDWD (R15), Z17, K5, Z12 // 62517545f527 or 6251f545f527 + VPMAXSB X7, X19, K7, X7 // 62f265073cff or 62f2e5073cff + VPMAXSB 7(AX)(CX*4), X19, K7, X7 // 62f265073cbc8807000000 or 62f2e5073cbc8807000000 + VPMAXSB 7(AX)(CX*1), X19, K7, X7 // 62f265073cbc0807000000 or 62f2e5073cbc0807000000 + VPMAXSB Y22, Y15, K7, Y27 // 6222052f3cde or 6222852f3cde + VPMAXSB (R14), Y15, K7, Y27 // 6242052f3c1e or 6242852f3c1e + VPMAXSB -7(DI)(R8*8), Y15, K7, Y27 // 6222052f3c9cc7f9ffffff or 6222852f3c9cc7f9ffffff + VPMAXSB Z8, Z3, K6, Z6 // 62d2654e3cf0 or 62d2e54e3cf0 + VPMAXSB Z2, Z3, K6, Z6 // 62f2654e3cf2 or 62f2e54e3cf2 + VPMAXSB 7(SI)(DI*8), Z3, K6, Z6 // 62f2654e3cb4fe07000000 or 62f2e54e3cb4fe07000000 + VPMAXSB -15(R14), Z3, K6, Z6 // 62d2654e3cb6f1ffffff or 62d2e54e3cb6f1ffffff + VPMAXSB Z8, Z21, K6, Z6 // 62d255463cf0 or 62d2d5463cf0 + VPMAXSB Z2, Z21, K6, Z6 // 62f255463cf2 or 62f2d5463cf2 + VPMAXSB 7(SI)(DI*8), Z21, K6, Z6 // 62f255463cb4fe07000000 or 62f2d5463cb4fe07000000 + VPMAXSB -15(R14), Z21, K6, Z6 // 62d255463cb6f1ffffff or 62d2d5463cb6f1ffffff + VPMAXSB Z8, Z3, K6, Z25 // 6242654e3cc8 or 6242e54e3cc8 + VPMAXSB Z2, Z3, K6, Z25 // 6262654e3cca or 6262e54e3cca + VPMAXSB 7(SI)(DI*8), Z3, K6, Z25 // 6262654e3c8cfe07000000 or 6262e54e3c8cfe07000000 + VPMAXSB -15(R14), Z3, K6, Z25 // 6242654e3c8ef1ffffff or 6242e54e3c8ef1ffffff + VPMAXSB Z8, Z21, K6, Z25 // 624255463cc8 or 6242d5463cc8 + VPMAXSB Z2, Z21, K6, Z25 // 626255463cca or 6262d5463cca + VPMAXSB 7(SI)(DI*8), Z21, K6, Z25 // 626255463c8cfe07000000 or 6262d5463c8cfe07000000 + VPMAXSB -15(R14), Z21, K6, Z25 // 624255463c8ef1ffffff or 6242d5463c8ef1ffffff + VPMAXSW X12, X0, K5, X12 // 62517d0deee4 or 6251fd0deee4 + VPMAXSW 7(SI)(DI*4), X0, K5, X12 // 62717d0deea4be07000000 or 6271fd0deea4be07000000 + VPMAXSW -7(DI)(R8*2), X0, K5, X12 // 62317d0deea447f9ffffff or 6231fd0deea447f9ffffff + VPMAXSW Y14, Y19, K3, Y23 // 62c16523eefe or 62c1e523eefe + VPMAXSW 99(R15)(R15*2), Y19, K3, Y23 // 62816523eebc7f63000000 or 6281e523eebc7f63000000 + VPMAXSW -7(DI), Y19, K3, Y23 // 62e16523eebff9ffffff or 62e1e523eebff9ffffff + VPMAXSW Z18, Z11, K4, Z12 // 6231254ceee2 or 6231a54ceee2 + VPMAXSW Z24, Z11, K4, Z12 // 6211254ceee0 or 6211a54ceee0 + VPMAXSW -7(CX), Z11, K4, Z12 // 6271254ceea1f9ffffff or 6271a54ceea1f9ffffff + VPMAXSW 15(DX)(BX*4), Z11, K4, Z12 // 6271254ceea49a0f000000 or 6271a54ceea49a0f000000 + VPMAXSW Z18, Z5, K4, Z12 // 6231554ceee2 or 6231d54ceee2 + VPMAXSW Z24, Z5, K4, Z12 // 6211554ceee0 or 6211d54ceee0 + VPMAXSW -7(CX), Z5, K4, Z12 // 6271554ceea1f9ffffff or 6271d54ceea1f9ffffff + VPMAXSW 15(DX)(BX*4), Z5, K4, Z12 // 6271554ceea49a0f000000 or 6271d54ceea49a0f000000 + VPMAXSW Z18, Z11, K4, Z22 // 62a1254ceef2 or 62a1a54ceef2 + VPMAXSW Z24, Z11, K4, Z22 // 6281254ceef0 or 6281a54ceef0 + VPMAXSW -7(CX), Z11, K4, Z22 // 62e1254ceeb1f9ffffff or 62e1a54ceeb1f9ffffff + VPMAXSW 15(DX)(BX*4), Z11, K4, Z22 // 62e1254ceeb49a0f000000 or 62e1a54ceeb49a0f000000 + VPMAXSW Z18, Z5, K4, Z22 // 62a1554ceef2 or 62a1d54ceef2 + VPMAXSW Z24, Z5, K4, Z22 // 6281554ceef0 or 6281d54ceef0 + VPMAXSW -7(CX), Z5, K4, Z22 // 62e1554ceeb1f9ffffff or 62e1d54ceeb1f9ffffff + VPMAXSW 15(DX)(BX*4), Z5, K4, Z22 // 62e1554ceeb49a0f000000 or 62e1d54ceeb49a0f000000 + VPMAXUB X17, X5, K2, X14 // 6231550adef1 or 6231d50adef1 + VPMAXUB 17(SP), X5, K2, X14 // 6271550adeb42411000000 or 6271d50adeb42411000000 + VPMAXUB -17(BP)(SI*4), X5, K2, X14 // 6271550adeb4b5efffffff or 6271d50adeb4b5efffffff + VPMAXUB Y16, Y5, K2, Y21 // 62a1552adee8 or 62a1d52adee8 + VPMAXUB -7(CX)(DX*1), Y5, K2, Y21 // 62e1552adeac11f9ffffff or 62e1d52adeac11f9ffffff + VPMAXUB -15(R14)(R15*4), Y5, K2, Y21 // 6281552adeacbef1ffffff or 6281d52adeacbef1ffffff + VPMAXUB Z6, Z7, K3, Z2 // 62f1454bded6 or 62f1c54bded6 + VPMAXUB Z16, Z7, K3, Z2 // 62b1454bded0 or 62b1c54bded0 + VPMAXUB 99(R15)(R15*8), Z7, K3, Z2 // 6291454bde94ff63000000 or 6291c54bde94ff63000000 + VPMAXUB 7(AX)(CX*8), Z7, K3, Z2 // 62f1454bde94c807000000 or 62f1c54bde94c807000000 + VPMAXUB Z6, Z13, K3, Z2 // 62f1154bded6 or 62f1954bded6 + VPMAXUB Z16, Z13, K3, Z2 // 62b1154bded0 or 62b1954bded0 + VPMAXUB 99(R15)(R15*8), Z13, K3, Z2 // 6291154bde94ff63000000 or 6291954bde94ff63000000 + VPMAXUB 7(AX)(CX*8), Z13, K3, Z2 // 62f1154bde94c807000000 or 62f1954bde94c807000000 + VPMAXUB Z6, Z7, K3, Z21 // 62e1454bdeee or 62e1c54bdeee + VPMAXUB Z16, Z7, K3, Z21 // 62a1454bdee8 or 62a1c54bdee8 + VPMAXUB 99(R15)(R15*8), Z7, K3, Z21 // 6281454bdeacff63000000 or 6281c54bdeacff63000000 + VPMAXUB 7(AX)(CX*8), Z7, K3, Z21 // 62e1454bdeacc807000000 or 62e1c54bdeacc807000000 + VPMAXUB Z6, Z13, K3, Z21 // 62e1154bdeee or 62e1954bdeee + VPMAXUB Z16, Z13, K3, Z21 // 62a1154bdee8 or 62a1954bdee8 + VPMAXUB 99(R15)(R15*8), Z13, K3, Z21 // 6281154bdeacff63000000 or 6281954bdeacff63000000 + VPMAXUB 7(AX)(CX*8), Z13, K3, Z21 // 62e1154bdeacc807000000 or 62e1954bdeacc807000000 + VPMAXUW X9, X24, K7, X28 // 62423d073ee1 or 6242bd073ee1 + VPMAXUW -17(BP)(SI*8), X24, K7, X28 // 62623d073ea4f5efffffff or 6262bd073ea4f5efffffff + VPMAXUW (R15), X24, K7, X28 // 62423d073e27 or 6242bd073e27 + VPMAXUW Y7, Y19, K1, Y11 // 627265213edf or 6272e5213edf + VPMAXUW 17(SP)(BP*2), Y19, K1, Y11 // 627265213e9c6c11000000 or 6272e5213e9c6c11000000 + VPMAXUW -7(DI)(R8*4), Y19, K1, Y11 // 623265213e9c87f9ffffff or 6232e5213e9c87f9ffffff + VPMAXUW Z12, Z1, K1, Z20 // 62c275493ee4 or 62c2f5493ee4 + VPMAXUW Z16, Z1, K1, Z20 // 62a275493ee0 or 62a2f5493ee0 + VPMAXUW 15(R8)(R14*4), Z1, K1, Z20 // 628275493ea4b00f000000 or 6282f5493ea4b00f000000 + VPMAXUW -7(CX)(DX*4), Z1, K1, Z20 // 62e275493ea491f9ffffff or 62e2f5493ea491f9ffffff + VPMAXUW Z12, Z3, K1, Z20 // 62c265493ee4 or 62c2e5493ee4 + VPMAXUW Z16, Z3, K1, Z20 // 62a265493ee0 or 62a2e5493ee0 + VPMAXUW 15(R8)(R14*4), Z3, K1, Z20 // 628265493ea4b00f000000 or 6282e5493ea4b00f000000 + VPMAXUW -7(CX)(DX*4), Z3, K1, Z20 // 62e265493ea491f9ffffff or 62e2e5493ea491f9ffffff + VPMAXUW Z12, Z1, K1, Z9 // 625275493ecc or 6252f5493ecc + VPMAXUW Z16, Z1, K1, Z9 // 623275493ec8 or 6232f5493ec8 + VPMAXUW 15(R8)(R14*4), Z1, K1, Z9 // 621275493e8cb00f000000 or 6212f5493e8cb00f000000 + VPMAXUW -7(CX)(DX*4), Z1, K1, Z9 // 627275493e8c91f9ffffff or 6272f5493e8c91f9ffffff + VPMAXUW Z12, Z3, K1, Z9 // 625265493ecc or 6252e5493ecc + VPMAXUW Z16, Z3, K1, Z9 // 623265493ec8 or 6232e5493ec8 + VPMAXUW 15(R8)(R14*4), Z3, K1, Z9 // 621265493e8cb00f000000 or 6212e5493e8cb00f000000 + VPMAXUW -7(CX)(DX*4), Z3, K1, Z9 // 627265493e8c91f9ffffff or 6272e5493e8c91f9ffffff + VPMINSB X18, X26, K1, X15 // 62322d0138fa or 6232ad0138fa + VPMINSB 7(SI)(DI*8), X26, K1, X15 // 62722d0138bcfe07000000 or 6272ad0138bcfe07000000 + VPMINSB -15(R14), X26, K1, X15 // 62522d0138bef1ffffff or 6252ad0138bef1ffffff + VPMINSB Y3, Y0, K7, Y6 // 62f27d2f38f3 or 62f2fd2f38f3 + VPMINSB 15(R8), Y0, K7, Y6 // 62d27d2f38b00f000000 or 62d2fd2f38b00f000000 + VPMINSB (BP), Y0, K7, Y6 // 62f27d2f387500 or 62f2fd2f387500 + VPMINSB Z3, Z14, K2, Z28 // 62620d4a38e3 or 62628d4a38e3 + VPMINSB Z12, Z14, K2, Z28 // 62420d4a38e4 or 62428d4a38e4 + VPMINSB (R8), Z14, K2, Z28 // 62420d4a3820 or 62428d4a3820 + VPMINSB 15(DX)(BX*2), Z14, K2, Z28 // 62620d4a38a45a0f000000 or 62628d4a38a45a0f000000 + VPMINSB Z3, Z28, K2, Z28 // 62621d4238e3 or 62629d4238e3 + VPMINSB Z12, Z28, K2, Z28 // 62421d4238e4 or 62429d4238e4 + VPMINSB (R8), Z28, K2, Z28 // 62421d423820 or 62429d423820 + VPMINSB 15(DX)(BX*2), Z28, K2, Z28 // 62621d4238a45a0f000000 or 62629d4238a45a0f000000 + VPMINSB Z3, Z14, K2, Z13 // 62720d4a38eb or 62728d4a38eb + VPMINSB Z12, Z14, K2, Z13 // 62520d4a38ec or 62528d4a38ec + VPMINSB (R8), Z14, K2, Z13 // 62520d4a3828 or 62528d4a3828 + VPMINSB 15(DX)(BX*2), Z14, K2, Z13 // 62720d4a38ac5a0f000000 or 62728d4a38ac5a0f000000 + VPMINSB Z3, Z28, K2, Z13 // 62721d4238eb or 62729d4238eb + VPMINSB Z12, Z28, K2, Z13 // 62521d4238ec or 62529d4238ec + VPMINSB (R8), Z28, K2, Z13 // 62521d423828 or 62529d423828 + VPMINSB 15(DX)(BX*2), Z28, K2, Z13 // 62721d4238ac5a0f000000 or 62729d4238ac5a0f000000 + VPMINSW X24, X0, K7, X0 // 62917d0feac0 or 6291fd0feac0 + VPMINSW -7(CX), X0, K7, X0 // 62f17d0fea81f9ffffff or 62f1fd0fea81f9ffffff + VPMINSW 15(DX)(BX*4), X0, K7, X0 // 62f17d0fea849a0f000000 or 62f1fd0fea849a0f000000 + VPMINSW Y22, Y0, K6, Y7 // 62b17d2eeafe or 62b1fd2eeafe + VPMINSW 7(AX)(CX*4), Y0, K6, Y7 // 62f17d2eeabc8807000000 or 62f1fd2eeabc8807000000 + VPMINSW 7(AX)(CX*1), Y0, K6, Y7 // 62f17d2eeabc0807000000 or 62f1fd2eeabc0807000000 + VPMINSW Z23, Z20, K3, Z16 // 62a15d43eac7 or 62a1dd43eac7 + VPMINSW Z19, Z20, K3, Z16 // 62a15d43eac3 or 62a1dd43eac3 + VPMINSW 15(R8)(R14*1), Z20, K3, Z16 // 62815d43ea84300f000000 or 6281dd43ea84300f000000 + VPMINSW 15(R8)(R14*2), Z20, K3, Z16 // 62815d43ea84700f000000 or 6281dd43ea84700f000000 + VPMINSW Z23, Z0, K3, Z16 // 62a17d4beac7 or 62a1fd4beac7 + VPMINSW Z19, Z0, K3, Z16 // 62a17d4beac3 or 62a1fd4beac3 + VPMINSW 15(R8)(R14*1), Z0, K3, Z16 // 62817d4bea84300f000000 or 6281fd4bea84300f000000 + VPMINSW 15(R8)(R14*2), Z0, K3, Z16 // 62817d4bea84700f000000 or 6281fd4bea84700f000000 + VPMINSW Z23, Z20, K3, Z9 // 62315d43eacf or 6231dd43eacf + VPMINSW Z19, Z20, K3, Z9 // 62315d43eacb or 6231dd43eacb + VPMINSW 15(R8)(R14*1), Z20, K3, Z9 // 62115d43ea8c300f000000 or 6211dd43ea8c300f000000 + VPMINSW 15(R8)(R14*2), Z20, K3, Z9 // 62115d43ea8c700f000000 or 6211dd43ea8c700f000000 + VPMINSW Z23, Z0, K3, Z9 // 62317d4beacf or 6231fd4beacf + VPMINSW Z19, Z0, K3, Z9 // 62317d4beacb or 6231fd4beacb + VPMINSW 15(R8)(R14*1), Z0, K3, Z9 // 62117d4bea8c300f000000 or 6211fd4bea8c300f000000 + VPMINSW 15(R8)(R14*2), Z0, K3, Z9 // 62117d4bea8c700f000000 or 6211fd4bea8c700f000000 + VPMINUB X9, X7, K7, X20 // 62c1450fdae1 or 62c1c50fdae1 + VPMINUB 99(R15)(R15*8), X7, K7, X20 // 6281450fdaa4ff63000000 or 6281c50fdaa4ff63000000 + VPMINUB 7(AX)(CX*8), X7, K7, X20 // 62e1450fdaa4c807000000 or 62e1c50fdaa4c807000000 + VPMINUB Y1, Y12, K4, Y13 // 62711d2cdae9 or 62719d2cdae9 + VPMINUB (SI), Y12, K4, Y13 // 62711d2cda2e or 62719d2cda2e + VPMINUB 7(SI)(DI*2), Y12, K4, Y13 // 62711d2cdaac7e07000000 or 62719d2cdaac7e07000000 + VPMINUB Z24, Z0, K4, Z0 // 62917d4cdac0 or 6291fd4cdac0 + VPMINUB Z12, Z0, K4, Z0 // 62d17d4cdac4 or 62d1fd4cdac4 + VPMINUB (R14), Z0, K4, Z0 // 62d17d4cda06 or 62d1fd4cda06 + VPMINUB -7(DI)(R8*8), Z0, K4, Z0 // 62b17d4cda84c7f9ffffff or 62b1fd4cda84c7f9ffffff + VPMINUB Z24, Z25, K4, Z0 // 62913544dac0 or 6291b544dac0 + VPMINUB Z12, Z25, K4, Z0 // 62d13544dac4 or 62d1b544dac4 + VPMINUB (R14), Z25, K4, Z0 // 62d13544da06 or 62d1b544da06 + VPMINUB -7(DI)(R8*8), Z25, K4, Z0 // 62b13544da84c7f9ffffff or 62b1b544da84c7f9ffffff + VPMINUB Z24, Z0, K4, Z11 // 62117d4cdad8 or 6211fd4cdad8 + VPMINUB Z12, Z0, K4, Z11 // 62517d4cdadc or 6251fd4cdadc + VPMINUB (R14), Z0, K4, Z11 // 62517d4cda1e or 6251fd4cda1e + VPMINUB -7(DI)(R8*8), Z0, K4, Z11 // 62317d4cda9cc7f9ffffff or 6231fd4cda9cc7f9ffffff + VPMINUB Z24, Z25, K4, Z11 // 62113544dad8 or 6211b544dad8 + VPMINUB Z12, Z25, K4, Z11 // 62513544dadc or 6251b544dadc + VPMINUB (R14), Z25, K4, Z11 // 62513544da1e or 6251b544da1e + VPMINUB -7(DI)(R8*8), Z25, K4, Z11 // 62313544da9cc7f9ffffff or 6231b544da9cc7f9ffffff + VPMINUW X13, X11, K2, X1 // 62d2250a3acd or 62d2a50a3acd + VPMINUW 15(R8)(R14*4), X11, K2, X1 // 6292250a3a8cb00f000000 or 6292a50a3a8cb00f000000 + VPMINUW -7(CX)(DX*4), X11, K2, X1 // 62f2250a3a8c91f9ffffff or 62f2a50a3a8c91f9ffffff + VPMINUW Y13, Y28, K3, Y1 // 62d21d233acd or 62d29d233acd + VPMINUW 17(SP), Y28, K3, Y1 // 62f21d233a8c2411000000 or 62f29d233a8c2411000000 + VPMINUW -17(BP)(SI*4), Y28, K3, Y1 // 62f21d233a8cb5efffffff or 62f29d233a8cb5efffffff + VPMINUW Z21, Z31, K3, Z17 // 62a205433acd or 62a285433acd + VPMINUW Z9, Z31, K3, Z17 // 62c205433ac9 or 62c285433ac9 + VPMINUW 99(R15)(R15*2), Z31, K3, Z17 // 628205433a8c7f63000000 or 628285433a8c7f63000000 + VPMINUW -7(DI), Z31, K3, Z17 // 62e205433a8ff9ffffff or 62e285433a8ff9ffffff + VPMINUW Z21, Z0, K3, Z17 // 62a27d4b3acd or 62a2fd4b3acd + VPMINUW Z9, Z0, K3, Z17 // 62c27d4b3ac9 or 62c2fd4b3ac9 + VPMINUW 99(R15)(R15*2), Z0, K3, Z17 // 62827d4b3a8c7f63000000 or 6282fd4b3a8c7f63000000 + VPMINUW -7(DI), Z0, K3, Z17 // 62e27d4b3a8ff9ffffff or 62e2fd4b3a8ff9ffffff + VPMINUW Z21, Z31, K3, Z23 // 62a205433afd or 62a285433afd + VPMINUW Z9, Z31, K3, Z23 // 62c205433af9 or 62c285433af9 + VPMINUW 99(R15)(R15*2), Z31, K3, Z23 // 628205433abc7f63000000 or 628285433abc7f63000000 + VPMINUW -7(DI), Z31, K3, Z23 // 62e205433abff9ffffff or 62e285433abff9ffffff + VPMINUW Z21, Z0, K3, Z23 // 62a27d4b3afd or 62a2fd4b3afd + VPMINUW Z9, Z0, K3, Z23 // 62c27d4b3af9 or 62c2fd4b3af9 + VPMINUW 99(R15)(R15*2), Z0, K3, Z23 // 62827d4b3abc7f63000000 or 6282fd4b3abc7f63000000 + VPMINUW -7(DI), Z0, K3, Z23 // 62e27d4b3abff9ffffff or 62e2fd4b3abff9ffffff + VPMOVB2M X0, K5 // 62f27e0829e8 + VPMOVB2M X0, K4 // 62f27e0829e0 + VPMOVB2M Y7, K4 // 62f27e2829e7 + VPMOVB2M Y7, K6 // 62f27e2829f7 + VPMOVB2M Z6, K1 // 62f27e4829ce + VPMOVB2M Z9, K1 // 62d27e4829c9 + VPMOVB2M Z6, K3 // 62f27e4829de + VPMOVB2M Z9, K3 // 62d27e4829d9 + VPMOVM2B K4, X26 // 62627e0828d4 + VPMOVM2B K5, X26 // 62627e0828d5 + VPMOVM2B K2, Y1 // 62f27e2828ca + VPMOVM2B K7, Y1 // 62f27e2828cf + VPMOVM2B K0, Z26 // 62627e4828d0 + VPMOVM2B K5, Z26 // 62627e4828d5 + VPMOVM2B K0, Z22 // 62e27e4828f0 + VPMOVM2B K5, Z22 // 62e27e4828f5 + VPMOVM2W K0, X16 // 62e2fe0828c0 + VPMOVM2W K7, X16 // 62e2fe0828c7 + VPMOVM2W K5, Y2 // 62f2fe2828d5 + VPMOVM2W K4, Y2 // 62f2fe2828d4 + VPMOVM2W K4, Z14 // 6272fe4828f4 + VPMOVM2W K6, Z14 // 6272fe4828f6 + VPMOVM2W K4, Z13 // 6272fe4828ec + VPMOVM2W K6, Z13 // 6272fe4828ee + VPMOVSWB X18, K3, X0 // 62e27e0b20d0 + VPMOVSWB X18, K3, -7(CX) // 62e27e0b2091f9ffffff + VPMOVSWB X18, K3, 15(DX)(BX*4) // 62e27e0b20949a0f000000 + VPMOVSWB Y6, K3, X8 // 62d27e2b20f0 + VPMOVSWB Y6, K3, -7(CX)(DX*1) // 62f27e2b20b411f9ffffff + VPMOVSWB Y6, K3, -15(R14)(R15*4) // 62927e2b20b4bef1ffffff + VPMOVSWB Z22, K3, Y21 // 62a27e4b20f5 + VPMOVSWB Z25, K3, Y21 // 62227e4b20cd + VPMOVSWB Z22, K3, 7(SI)(DI*1) // 62e27e4b20b43e07000000 + VPMOVSWB Z25, K3, 7(SI)(DI*1) // 62627e4b208c3e07000000 + VPMOVSWB Z22, K3, 15(DX)(BX*8) // 62e27e4b20b4da0f000000 + VPMOVSWB Z25, K3, 15(DX)(BX*8) // 62627e4b208cda0f000000 + VPMOVSXBW X13, K1, Y28 // 62427d2920e5 or 6242fd2920e5 + VPMOVSXBW -17(BP), K1, Y28 // 62627d2920a5efffffff or 6262fd2920a5efffffff + VPMOVSXBW -15(R14)(R15*8), K1, Y28 // 62027d2920a4fef1ffffff or 6202fd2920a4fef1ffffff + VPMOVSXBW X24, K1, X8 // 62127d0920c0 or 6212fd0920c0 + VPMOVSXBW (BX), K1, X8 // 62727d092003 or 6272fd092003 + VPMOVSXBW -17(BP)(SI*1), K1, X8 // 62727d09208435efffffff or 6272fd09208435efffffff + VPMOVSXBW Y20, K7, Z0 // 62b27d4f20c4 or 62b2fd4f20c4 + VPMOVSXBW -7(DI)(R8*1), K7, Z0 // 62b27d4f208407f9ffffff or 62b2fd4f208407f9ffffff + VPMOVSXBW (SP), K7, Z0 // 62f27d4f200424 or 62f2fd4f200424 + VPMOVSXBW Y20, K7, Z8 // 62327d4f20c4 or 6232fd4f20c4 + VPMOVSXBW -7(DI)(R8*1), K7, Z8 // 62327d4f208407f9ffffff or 6232fd4f208407f9ffffff + VPMOVSXBW (SP), K7, Z8 // 62727d4f200424 or 6272fd4f200424 + VPMOVUSWB X6, K1, X6 // 62f27e0910f6 + VPMOVUSWB X6, K1, 99(R15)(R15*2) // 62927e0910b47f63000000 + VPMOVUSWB X6, K1, -7(DI) // 62f27e0910b7f9ffffff + VPMOVUSWB Y15, K2, X22 // 62327e2a10fe + VPMOVUSWB Y15, K2, 7(SI)(DI*4) // 62727e2a10bcbe07000000 + VPMOVUSWB Y15, K2, -7(DI)(R8*2) // 62327e2a10bc47f9ffffff + VPMOVUSWB Z28, K1, Y1 // 62627e4910e1 + VPMOVUSWB Z6, K1, Y1 // 62f27e4910f1 + VPMOVUSWB Z28, K1, 15(R8)(R14*4) // 62027e4910a4b00f000000 + VPMOVUSWB Z6, K1, 15(R8)(R14*4) // 62927e4910b4b00f000000 + VPMOVUSWB Z28, K1, -7(CX)(DX*4) // 62627e4910a491f9ffffff + VPMOVUSWB Z6, K1, -7(CX)(DX*4) // 62f27e4910b491f9ffffff + VPMOVW2M X12, K4 // 62d2fe0829e4 + VPMOVW2M X12, K6 // 62d2fe0829f4 + VPMOVW2M Y27, K4 // 6292fe2829e3 + VPMOVW2M Y27, K5 // 6292fe2829eb + VPMOVW2M Z13, K2 // 62d2fe4829d5 + VPMOVW2M Z21, K2 // 62b2fe4829d5 + VPMOVW2M Z13, K7 // 62d2fe4829fd + VPMOVW2M Z21, K7 // 62b2fe4829fd + VPMOVWB X28, K7, X16 // 62227e0f30e0 + VPMOVWB X28, K7, -7(CX)(DX*1) // 62627e0f30a411f9ffffff + VPMOVWB X28, K7, -15(R14)(R15*4) // 62027e0f30a4bef1ffffff + VPMOVWB Y19, K1, X8 // 62c27e2930d8 + VPMOVWB Y19, K1, 17(SP) // 62e27e29309c2411000000 + VPMOVWB Y19, K1, -17(BP)(SI*4) // 62e27e29309cb5efffffff + VPMOVWB Z26, K1, Y5 // 62627e4930d5 + VPMOVWB Z3, K1, Y5 // 62f27e4930dd + VPMOVWB Z26, K1, (R8) // 62427e493010 + VPMOVWB Z3, K1, (R8) // 62d27e493018 + VPMOVWB Z26, K1, 15(DX)(BX*2) // 62627e4930945a0f000000 + VPMOVWB Z3, K1, 15(DX)(BX*2) // 62f27e49309c5a0f000000 + VPMOVZXBW X0, K4, Y21 // 62e27d2c30e8 or 62e2fd2c30e8 + VPMOVZXBW 99(R15)(R15*1), K4, Y21 // 62827d2c30ac3f63000000 or 6282fd2c30ac3f63000000 + VPMOVZXBW (DX), K4, Y21 // 62e27d2c302a or 62e2fd2c302a + VPMOVZXBW X11, K5, X25 // 62427d0d30cb or 6242fd0d30cb + VPMOVZXBW 17(SP)(BP*2), K5, X25 // 62627d0d308c6c11000000 or 6262fd0d308c6c11000000 + VPMOVZXBW -7(DI)(R8*4), K5, X25 // 62227d0d308c87f9ffffff or 6222fd0d308c87f9ffffff + VPMOVZXBW Y7, K7, Z11 // 62727d4f30df or 6272fd4f30df + VPMOVZXBW 17(SP)(BP*1), K7, Z11 // 62727d4f309c2c11000000 or 6272fd4f309c2c11000000 + VPMOVZXBW -7(CX)(DX*8), K7, Z11 // 62727d4f309cd1f9ffffff or 6272fd4f309cd1f9ffffff + VPMOVZXBW Y7, K7, Z25 // 62627d4f30cf or 6262fd4f30cf + VPMOVZXBW 17(SP)(BP*1), K7, Z25 // 62627d4f308c2c11000000 or 6262fd4f308c2c11000000 + VPMOVZXBW -7(CX)(DX*8), K7, Z25 // 62627d4f308cd1f9ffffff or 6262fd4f308cd1f9ffffff + VPMULHRSW X30, X15, K2, X11 // 6212050a0bde or 6212850a0bde + VPMULHRSW -7(CX), X15, K2, X11 // 6272050a0b99f9ffffff or 6272850a0b99f9ffffff + VPMULHRSW 15(DX)(BX*4), X15, K2, X11 // 6272050a0b9c9a0f000000 or 6272850a0b9c9a0f000000 + VPMULHRSW Y16, Y21, K3, Y24 // 622255230bc0 or 6222d5230bc0 + VPMULHRSW 99(R15)(R15*4), Y21, K3, Y24 // 620255230b84bf63000000 or 6202d5230b84bf63000000 + VPMULHRSW 15(DX), Y21, K3, Y24 // 626255230b820f000000 or 6262d5230b820f000000 + VPMULHRSW Z22, Z12, K3, Z16 // 62a21d4b0bc6 or 62a29d4b0bc6 + VPMULHRSW Z11, Z12, K3, Z16 // 62c21d4b0bc3 or 62c29d4b0bc3 + VPMULHRSW 15(DX)(BX*1), Z12, K3, Z16 // 62e21d4b0b841a0f000000 or 62e29d4b0b841a0f000000 + VPMULHRSW -7(CX)(DX*2), Z12, K3, Z16 // 62e21d4b0b8451f9ffffff or 62e29d4b0b8451f9ffffff + VPMULHRSW Z22, Z27, K3, Z16 // 62a225430bc6 or 62a2a5430bc6 + VPMULHRSW Z11, Z27, K3, Z16 // 62c225430bc3 or 62c2a5430bc3 + VPMULHRSW 15(DX)(BX*1), Z27, K3, Z16 // 62e225430b841a0f000000 or 62e2a5430b841a0f000000 + VPMULHRSW -7(CX)(DX*2), Z27, K3, Z16 // 62e225430b8451f9ffffff or 62e2a5430b8451f9ffffff + VPMULHRSW Z22, Z12, K3, Z13 // 62321d4b0bee or 62329d4b0bee + VPMULHRSW Z11, Z12, K3, Z13 // 62521d4b0beb or 62529d4b0beb + VPMULHRSW 15(DX)(BX*1), Z12, K3, Z13 // 62721d4b0bac1a0f000000 or 62729d4b0bac1a0f000000 + VPMULHRSW -7(CX)(DX*2), Z12, K3, Z13 // 62721d4b0bac51f9ffffff or 62729d4b0bac51f9ffffff + VPMULHRSW Z22, Z27, K3, Z13 // 623225430bee or 6232a5430bee + VPMULHRSW Z11, Z27, K3, Z13 // 625225430beb or 6252a5430beb + VPMULHRSW 15(DX)(BX*1), Z27, K3, Z13 // 627225430bac1a0f000000 or 6272a5430bac1a0f000000 + VPMULHRSW -7(CX)(DX*2), Z27, K3, Z13 // 627225430bac51f9ffffff or 6272a5430bac51f9ffffff + VPMULHUW X12, X6, K3, X13 // 62514d0be4ec or 6251cd0be4ec + VPMULHUW 99(R15)(R15*8), X6, K3, X13 // 62114d0be4acff63000000 or 6211cd0be4acff63000000 + VPMULHUW 7(AX)(CX*8), X6, K3, X13 // 62714d0be4acc807000000 or 6271cd0be4acc807000000 + VPMULHUW Y9, Y13, K2, Y9 // 6251152ae4c9 or 6251952ae4c9 + VPMULHUW (CX), Y13, K2, Y9 // 6271152ae409 or 6271952ae409 + VPMULHUW 99(R15), Y13, K2, Y9 // 6251152ae48f63000000 or 6251952ae48f63000000 + VPMULHUW Z12, Z25, K1, Z6 // 62d13541e4f4 or 62d1b541e4f4 + VPMULHUW Z17, Z25, K1, Z6 // 62b13541e4f1 or 62b1b541e4f1 + VPMULHUW -17(BP), Z25, K1, Z6 // 62f13541e4b5efffffff or 62f1b541e4b5efffffff + VPMULHUW -15(R14)(R15*8), Z25, K1, Z6 // 62913541e4b4fef1ffffff or 6291b541e4b4fef1ffffff + VPMULHUW Z12, Z12, K1, Z6 // 62d11d49e4f4 or 62d19d49e4f4 + VPMULHUW Z17, Z12, K1, Z6 // 62b11d49e4f1 or 62b19d49e4f1 + VPMULHUW -17(BP), Z12, K1, Z6 // 62f11d49e4b5efffffff or 62f19d49e4b5efffffff + VPMULHUW -15(R14)(R15*8), Z12, K1, Z6 // 62911d49e4b4fef1ffffff or 62919d49e4b4fef1ffffff + VPMULHUW Z12, Z25, K1, Z8 // 62513541e4c4 or 6251b541e4c4 + VPMULHUW Z17, Z25, K1, Z8 // 62313541e4c1 or 6231b541e4c1 + VPMULHUW -17(BP), Z25, K1, Z8 // 62713541e485efffffff or 6271b541e485efffffff + VPMULHUW -15(R14)(R15*8), Z25, K1, Z8 // 62113541e484fef1ffffff or 6211b541e484fef1ffffff + VPMULHUW Z12, Z12, K1, Z8 // 62511d49e4c4 or 62519d49e4c4 + VPMULHUW Z17, Z12, K1, Z8 // 62311d49e4c1 or 62319d49e4c1 + VPMULHUW -17(BP), Z12, K1, Z8 // 62711d49e485efffffff or 62719d49e485efffffff + VPMULHUW -15(R14)(R15*8), Z12, K1, Z8 // 62111d49e484fef1ffffff or 62119d49e484fef1ffffff + VPMULHW X8, X30, K2, X23 // 62c10d02e5f8 or 62c18d02e5f8 + VPMULHW (AX), X30, K2, X23 // 62e10d02e538 or 62e18d02e538 + VPMULHW 7(SI), X30, K2, X23 // 62e10d02e5be07000000 or 62e18d02e5be07000000 + VPMULHW Y7, Y3, K1, Y6 // 62f16529e5f7 or 62f1e529e5f7 + VPMULHW 99(R15)(R15*2), Y3, K1, Y6 // 62916529e5b47f63000000 or 6291e529e5b47f63000000 + VPMULHW -7(DI), Y3, K1, Y6 // 62f16529e5b7f9ffffff or 62f1e529e5b7f9ffffff + VPMULHW Z3, Z6, K7, Z9 // 62714d4fe5cb or 6271cd4fe5cb + VPMULHW Z21, Z6, K7, Z9 // 62314d4fe5cd or 6231cd4fe5cd + VPMULHW 17(SP)(BP*2), Z6, K7, Z9 // 62714d4fe58c6c11000000 or 6271cd4fe58c6c11000000 + VPMULHW -7(DI)(R8*4), Z6, K7, Z9 // 62314d4fe58c87f9ffffff or 6231cd4fe58c87f9ffffff + VPMULHW Z3, Z25, K7, Z9 // 62713547e5cb or 6271b547e5cb + VPMULHW Z21, Z25, K7, Z9 // 62313547e5cd or 6231b547e5cd + VPMULHW 17(SP)(BP*2), Z25, K7, Z9 // 62713547e58c6c11000000 or 6271b547e58c6c11000000 + VPMULHW -7(DI)(R8*4), Z25, K7, Z9 // 62313547e58c87f9ffffff or 6231b547e58c87f9ffffff + VPMULHW Z3, Z6, K7, Z12 // 62714d4fe5e3 or 6271cd4fe5e3 + VPMULHW Z21, Z6, K7, Z12 // 62314d4fe5e5 or 6231cd4fe5e5 + VPMULHW 17(SP)(BP*2), Z6, K7, Z12 // 62714d4fe5a46c11000000 or 6271cd4fe5a46c11000000 + VPMULHW -7(DI)(R8*4), Z6, K7, Z12 // 62314d4fe5a487f9ffffff or 6231cd4fe5a487f9ffffff + VPMULHW Z3, Z25, K7, Z12 // 62713547e5e3 or 6271b547e5e3 + VPMULHW Z21, Z25, K7, Z12 // 62313547e5e5 or 6231b547e5e5 + VPMULHW 17(SP)(BP*2), Z25, K7, Z12 // 62713547e5a46c11000000 or 6271b547e5a46c11000000 + VPMULHW -7(DI)(R8*4), Z25, K7, Z12 // 62313547e5a487f9ffffff or 6231b547e5a487f9ffffff + VPMULLW X7, X16, K1, X31 // 62617d01d5ff or 6261fd01d5ff + VPMULLW (R8), X16, K1, X31 // 62417d01d538 or 6241fd01d538 + VPMULLW 15(DX)(BX*2), X16, K1, X31 // 62617d01d5bc5a0f000000 or 6261fd01d5bc5a0f000000 + VPMULLW Y18, Y31, K3, Y18 // 62a10523d5d2 or 62a18523d5d2 + VPMULLW -17(BP), Y31, K3, Y18 // 62e10523d595efffffff or 62e18523d595efffffff + VPMULLW -15(R14)(R15*8), Y31, K3, Y18 // 62810523d594fef1ffffff or 62818523d594fef1ffffff + VPMULLW Z11, Z12, K4, Z9 // 62511d4cd5cb or 62519d4cd5cb + VPMULLW Z5, Z12, K4, Z9 // 62711d4cd5cd or 62719d4cd5cd + VPMULLW -15(R14)(R15*1), Z12, K4, Z9 // 62111d4cd58c3ef1ffffff or 62119d4cd58c3ef1ffffff + VPMULLW -15(BX), Z12, K4, Z9 // 62711d4cd58bf1ffffff or 62719d4cd58bf1ffffff + VPMULLW Z11, Z22, K4, Z9 // 62514d44d5cb or 6251cd44d5cb + VPMULLW Z5, Z22, K4, Z9 // 62714d44d5cd or 6271cd44d5cd + VPMULLW -15(R14)(R15*1), Z22, K4, Z9 // 62114d44d58c3ef1ffffff or 6211cd44d58c3ef1ffffff + VPMULLW -15(BX), Z22, K4, Z9 // 62714d44d58bf1ffffff or 6271cd44d58bf1ffffff + VPMULLW Z11, Z12, K4, Z19 // 62c11d4cd5db or 62c19d4cd5db + VPMULLW Z5, Z12, K4, Z19 // 62e11d4cd5dd or 62e19d4cd5dd + VPMULLW -15(R14)(R15*1), Z12, K4, Z19 // 62811d4cd59c3ef1ffffff or 62819d4cd59c3ef1ffffff + VPMULLW -15(BX), Z12, K4, Z19 // 62e11d4cd59bf1ffffff or 62e19d4cd59bf1ffffff + VPMULLW Z11, Z22, K4, Z19 // 62c14d44d5db or 62c1cd44d5db + VPMULLW Z5, Z22, K4, Z19 // 62e14d44d5dd or 62e1cd44d5dd + VPMULLW -15(R14)(R15*1), Z22, K4, Z19 // 62814d44d59c3ef1ffffff or 6281cd44d59c3ef1ffffff + VPMULLW -15(BX), Z22, K4, Z19 // 62e14d44d59bf1ffffff or 62e1cd44d59bf1ffffff + VPSADBW X7, X3, X31 // 62616508f6ff or 6261e508f6ff + VPSADBW 17(SP)(BP*8), X3, X31 // 62616508f6bcec11000000 or 6261e508f6bcec11000000 + VPSADBW 17(SP)(BP*4), X3, X31 // 62616508f6bcac11000000 or 6261e508f6bcac11000000 + VPSADBW Y14, Y9, Y22 // 62c13528f6f6 or 62c1b528f6f6 + VPSADBW 99(R15)(R15*8), Y9, Y22 // 62813528f6b4ff63000000 or 6281b528f6b4ff63000000 + VPSADBW 7(AX)(CX*8), Y9, Y22 // 62e13528f6b4c807000000 or 62e1b528f6b4c807000000 + VPSADBW Z7, Z26, Z30 // 62612d40f6f7 or 6261ad40f6f7 + VPSADBW Z21, Z26, Z30 // 62212d40f6f5 or 6221ad40f6f5 + VPSADBW (R8), Z26, Z30 // 62412d40f630 or 6241ad40f630 + VPSADBW 15(DX)(BX*2), Z26, Z30 // 62612d40f6b45a0f000000 or 6261ad40f6b45a0f000000 + VPSADBW Z7, Z22, Z30 // 62614d40f6f7 or 6261cd40f6f7 + VPSADBW Z21, Z22, Z30 // 62214d40f6f5 or 6221cd40f6f5 + VPSADBW (R8), Z22, Z30 // 62414d40f630 or 6241cd40f630 + VPSADBW 15(DX)(BX*2), Z22, Z30 // 62614d40f6b45a0f000000 or 6261cd40f6b45a0f000000 + VPSADBW Z7, Z26, Z5 // 62f12d40f6ef or 62f1ad40f6ef + VPSADBW Z21, Z26, Z5 // 62b12d40f6ed or 62b1ad40f6ed + VPSADBW (R8), Z26, Z5 // 62d12d40f628 or 62d1ad40f628 + VPSADBW 15(DX)(BX*2), Z26, Z5 // 62f12d40f6ac5a0f000000 or 62f1ad40f6ac5a0f000000 + VPSADBW Z7, Z22, Z5 // 62f14d40f6ef or 62f1cd40f6ef + VPSADBW Z21, Z22, Z5 // 62b14d40f6ed or 62b1cd40f6ed + VPSADBW (R8), Z22, Z5 // 62d14d40f628 or 62d1cd40f628 + VPSADBW 15(DX)(BX*2), Z22, Z5 // 62f14d40f6ac5a0f000000 or 62f1cd40f6ac5a0f000000 + VPSHUFB X13, X9, K5, X0 // 62d2350d00c5 or 62d2b50d00c5 + VPSHUFB 15(R8)(R14*4), X9, K5, X0 // 6292350d0084b00f000000 or 6292b50d0084b00f000000 + VPSHUFB -7(CX)(DX*4), X9, K5, X0 // 62f2350d008491f9ffffff or 62f2b50d008491f9ffffff + VPSHUFB Y2, Y16, K7, Y5 // 62f27d2700ea or 62f2fd2700ea + VPSHUFB 15(DX)(BX*1), Y16, K7, Y5 // 62f27d2700ac1a0f000000 or 62f2fd2700ac1a0f000000 + VPSHUFB -7(CX)(DX*2), Y16, K7, Y5 // 62f27d2700ac51f9ffffff or 62f2fd2700ac51f9ffffff + VPSHUFB Z9, Z12, K7, Z25 // 62421d4f00c9 or 62429d4f00c9 + VPSHUFB Z12, Z12, K7, Z25 // 62421d4f00cc or 62429d4f00cc + VPSHUFB 15(R8)(R14*8), Z12, K7, Z25 // 62021d4f008cf00f000000 or 62029d4f008cf00f000000 + VPSHUFB -15(R14)(R15*2), Z12, K7, Z25 // 62021d4f008c7ef1ffffff or 62029d4f008c7ef1ffffff + VPSHUFB Z9, Z17, K7, Z25 // 6242754700c9 or 6242f54700c9 + VPSHUFB Z12, Z17, K7, Z25 // 6242754700cc or 6242f54700cc + VPSHUFB 15(R8)(R14*8), Z17, K7, Z25 // 62027547008cf00f000000 or 6202f547008cf00f000000 + VPSHUFB -15(R14)(R15*2), Z17, K7, Z25 // 62027547008c7ef1ffffff or 6202f547008c7ef1ffffff + VPSHUFB Z9, Z12, K7, Z12 // 62521d4f00e1 or 62529d4f00e1 + VPSHUFB Z12, Z12, K7, Z12 // 62521d4f00e4 or 62529d4f00e4 + VPSHUFB 15(R8)(R14*8), Z12, K7, Z12 // 62121d4f00a4f00f000000 or 62129d4f00a4f00f000000 + VPSHUFB -15(R14)(R15*2), Z12, K7, Z12 // 62121d4f00a47ef1ffffff or 62129d4f00a47ef1ffffff + VPSHUFB Z9, Z17, K7, Z12 // 6252754700e1 or 6252f54700e1 + VPSHUFB Z12, Z17, K7, Z12 // 6252754700e4 or 6252f54700e4 + VPSHUFB 15(R8)(R14*8), Z17, K7, Z12 // 6212754700a4f00f000000 or 6212f54700a4f00f000000 + VPSHUFB -15(R14)(R15*2), Z17, K7, Z12 // 6212754700a47ef1ffffff or 6212f54700a47ef1ffffff + VPSHUFHW $13, X11, K2, X31 // 62417e0a70fb0d or 6241fe0a70fb0d + VPSHUFHW $13, -17(BP)(SI*2), K2, X31 // 62617e0a70bc75efffffff0d or 6261fe0a70bc75efffffff0d + VPSHUFHW $13, 7(AX)(CX*2), K2, X31 // 62617e0a70bc48070000000d or 6261fe0a70bc48070000000d + VPSHUFHW $65, Y11, K5, Y6 // 62d17e2d70f341 or 62d1fe2d70f341 + VPSHUFHW $65, 15(R8), K5, Y6 // 62d17e2d70b00f00000041 or 62d1fe2d70b00f00000041 + VPSHUFHW $65, (BP), K5, Y6 // 62f17e2d70750041 or 62f1fe2d70750041 + VPSHUFHW $67, Z0, K3, Z7 // 62f17e4b70f843 or 62f1fe4b70f843 + VPSHUFHW $67, Z6, K3, Z7 // 62f17e4b70fe43 or 62f1fe4b70fe43 + VPSHUFHW $67, (SI), K3, Z7 // 62f17e4b703e43 or 62f1fe4b703e43 + VPSHUFHW $67, 7(SI)(DI*2), K3, Z7 // 62f17e4b70bc7e0700000043 or 62f1fe4b70bc7e0700000043 + VPSHUFHW $67, Z0, K3, Z9 // 62717e4b70c843 or 6271fe4b70c843 + VPSHUFHW $67, Z6, K3, Z9 // 62717e4b70ce43 or 6271fe4b70ce43 + VPSHUFHW $67, (SI), K3, Z9 // 62717e4b700e43 or 6271fe4b700e43 + VPSHUFHW $67, 7(SI)(DI*2), K3, Z9 // 62717e4b708c7e0700000043 or 6271fe4b708c7e0700000043 + VPSHUFLW $127, X5, K4, X22 // 62e17f0c70f57f or 62e1ff0c70f57f + VPSHUFLW $127, 15(R8)(R14*1), K4, X22 // 62817f0c70b4300f0000007f or 6281ff0c70b4300f0000007f + VPSHUFLW $127, 15(R8)(R14*2), K4, X22 // 62817f0c70b4700f0000007f or 6281ff0c70b4700f0000007f + VPSHUFLW $0, Y7, K2, Y19 // 62e17f2a70df00 or 62e1ff2a70df00 + VPSHUFLW $0, 15(R8)(R14*8), K2, Y19 // 62817f2a709cf00f00000000 or 6281ff2a709cf00f00000000 + VPSHUFLW $0, -15(R14)(R15*2), K2, Y19 // 62817f2a709c7ef1ffffff00 or 6281ff2a709c7ef1ffffff00 + VPSHUFLW $97, Z3, K2, Z20 // 62e17f4a70e361 or 62e1ff4a70e361 + VPSHUFLW $97, Z30, K2, Z20 // 62817f4a70e661 or 6281ff4a70e661 + VPSHUFLW $97, 17(SP)(BP*8), K2, Z20 // 62e17f4a70a4ec1100000061 or 62e1ff4a70a4ec1100000061 + VPSHUFLW $97, 17(SP)(BP*4), K2, Z20 // 62e17f4a70a4ac1100000061 or 62e1ff4a70a4ac1100000061 + VPSHUFLW $97, Z3, K2, Z28 // 62617f4a70e361 or 6261ff4a70e361 + VPSHUFLW $97, Z30, K2, Z28 // 62017f4a70e661 or 6201ff4a70e661 + VPSHUFLW $97, 17(SP)(BP*8), K2, Z28 // 62617f4a70a4ec1100000061 or 6261ff4a70a4ec1100000061 + VPSHUFLW $97, 17(SP)(BP*4), K2, Z28 // 62617f4a70a4ac1100000061 or 6261ff4a70a4ac1100000061 + VPSLLDQ $64, X8, X18 // 62d16d0073f840 or 62d1ed0073f840 + VPSLLDQ $64, -7(CX)(DX*1), X18 // 62f16d0073bc11f9ffffff40 or 62f1ed0073bc11f9ffffff40 + VPSLLDQ $64, -15(R14)(R15*4), X18 // 62916d0073bcbef1ffffff40 or 6291ed0073bcbef1ffffff40 + VPSLLDQ $27, Y12, Y20 // 62d15d2073fc1b or 62d1dd2073fc1b + VPSLLDQ $27, 7(AX)(CX*4), Y20 // 62f15d2073bc88070000001b or 62f1dd2073bc88070000001b + VPSLLDQ $27, 7(AX)(CX*1), Y20 // 62f15d2073bc08070000001b or 62f1dd2073bc08070000001b + VPSLLDQ $47, Z7, Z2 // 62f16d4873ff2f or 62f1ed4873ff2f + VPSLLDQ $47, Z13, Z2 // 62d16d4873fd2f or 62d1ed4873fd2f + VPSLLDQ $47, 17(SP), Z2 // 62f16d4873bc24110000002f or 62f1ed4873bc24110000002f + VPSLLDQ $47, -17(BP)(SI*4), Z2 // 62f16d4873bcb5efffffff2f or 62f1ed4873bcb5efffffff2f + VPSLLDQ $47, Z7, Z21 // 62f1554073ff2f or 62f1d54073ff2f + VPSLLDQ $47, Z13, Z21 // 62d1554073fd2f or 62d1d54073fd2f + VPSLLDQ $47, 17(SP), Z21 // 62f1554073bc24110000002f or 62f1d54073bc24110000002f + VPSLLDQ $47, -17(BP)(SI*4), Z21 // 62f1554073bcb5efffffff2f or 62f1d54073bcb5efffffff2f + VPSLLVW X11, X1, K7, X22 // 62c2f50f12f3 + VPSLLVW 7(AX)(CX*4), X1, K7, X22 // 62e2f50f12b48807000000 + VPSLLVW 7(AX)(CX*1), X1, K7, X22 // 62e2f50f12b40807000000 + VPSLLVW Y9, Y7, K7, Y17 // 62c2c52f12c9 + VPSLLVW 17(SP), Y7, K7, Y17 // 62e2c52f128c2411000000 + VPSLLVW -17(BP)(SI*4), Y7, K7, Y17 // 62e2c52f128cb5efffffff + VPSLLVW Z3, Z14, K6, Z28 // 62628d4e12e3 + VPSLLVW Z12, Z14, K6, Z28 // 62428d4e12e4 + VPSLLVW 7(SI)(DI*8), Z14, K6, Z28 // 62628d4e12a4fe07000000 + VPSLLVW -15(R14), Z14, K6, Z28 // 62428d4e12a6f1ffffff + VPSLLVW Z3, Z28, K6, Z28 // 62629d4612e3 + VPSLLVW Z12, Z28, K6, Z28 // 62429d4612e4 + VPSLLVW 7(SI)(DI*8), Z28, K6, Z28 // 62629d4612a4fe07000000 + VPSLLVW -15(R14), Z28, K6, Z28 // 62429d4612a6f1ffffff + VPSLLVW Z3, Z14, K6, Z13 // 62728d4e12eb + VPSLLVW Z12, Z14, K6, Z13 // 62528d4e12ec + VPSLLVW 7(SI)(DI*8), Z14, K6, Z13 // 62728d4e12acfe07000000 + VPSLLVW -15(R14), Z14, K6, Z13 // 62528d4e12aef1ffffff + VPSLLVW Z3, Z28, K6, Z13 // 62729d4612eb + VPSLLVW Z12, Z28, K6, Z13 // 62529d4612ec + VPSLLVW 7(SI)(DI*8), Z28, K6, Z13 // 62729d4612acfe07000000 + VPSLLVW -15(R14), Z28, K6, Z13 // 62529d4612aef1ffffff + VPSLLW $121, X7, K3, X6 // 62f14d0b71f779 or 62f1cd0b71f779 + VPSLLW $121, (SI), K3, X6 // 62f14d0b713679 or 62f1cd0b713679 + VPSLLW $121, 7(SI)(DI*2), K3, X6 // 62f14d0b71b47e0700000079 or 62f1cd0b71b47e0700000079 + VPSLLW $13, Y8, K7, Y31 // 62d1052771f00d or 62d1852771f00d + VPSLLW $13, 7(AX), K7, Y31 // 62f1052771b0070000000d or 62f1852771b0070000000d + VPSLLW $13, (DI), K7, Y31 // 62f1052771370d or 62f1852771370d + VPSLLW $65, Z19, K4, Z15 // 62b1054c71f341 or 62b1854c71f341 + VPSLLW $65, Z15, K4, Z15 // 62d1054c71f741 or 62d1854c71f741 + VPSLLW $65, 7(SI)(DI*1), K4, Z15 // 62f1054c71b43e0700000041 or 62f1854c71b43e0700000041 + VPSLLW $65, 15(DX)(BX*8), K4, Z15 // 62f1054c71b4da0f00000041 or 62f1854c71b4da0f00000041 + VPSLLW $65, Z19, K4, Z30 // 62b10d4471f341 or 62b18d4471f341 + VPSLLW $65, Z15, K4, Z30 // 62d10d4471f741 or 62d18d4471f741 + VPSLLW $65, 7(SI)(DI*1), K4, Z30 // 62f10d4471b43e0700000041 or 62f18d4471b43e0700000041 + VPSLLW $65, 15(DX)(BX*8), K4, Z30 // 62f10d4471b4da0f00000041 or 62f18d4471b4da0f00000041 + VPSLLW X3, X31, K4, X8 // 62710504f1c3 or 62718504f1c3 + VPSLLW 17(SP)(BP*8), X31, K4, X8 // 62710504f184ec11000000 or 62718504f184ec11000000 + VPSLLW 17(SP)(BP*4), X31, K4, X8 // 62710504f184ac11000000 or 62718504f184ac11000000 + VPSLLW X28, Y28, K7, Y1 // 62911d27f1cc or 62919d27f1cc + VPSLLW 7(SI)(DI*4), Y28, K7, Y1 // 62f11d27f18cbe07000000 or 62f19d27f18cbe07000000 + VPSLLW -7(DI)(R8*2), Y28, K7, Y1 // 62b11d27f18c47f9ffffff or 62b19d27f18c47f9ffffff + VPSLLW X20, Z3, K2, Z5 // 62b1654af1ec or 62b1e54af1ec + VPSLLW 17(SP), Z3, K2, Z5 // 62f1654af1ac2411000000 or 62f1e54af1ac2411000000 + VPSLLW -17(BP)(SI*4), Z3, K2, Z5 // 62f1654af1acb5efffffff or 62f1e54af1acb5efffffff + VPSLLW X20, Z5, K2, Z5 // 62b1554af1ec or 62b1d54af1ec + VPSLLW 17(SP), Z5, K2, Z5 // 62f1554af1ac2411000000 or 62f1d54af1ac2411000000 + VPSLLW -17(BP)(SI*4), Z5, K2, Z5 // 62f1554af1acb5efffffff or 62f1d54af1acb5efffffff + VPSLLW X20, Z3, K2, Z1 // 62b1654af1cc or 62b1e54af1cc + VPSLLW 17(SP), Z3, K2, Z1 // 62f1654af18c2411000000 or 62f1e54af18c2411000000 + VPSLLW -17(BP)(SI*4), Z3, K2, Z1 // 62f1654af18cb5efffffff or 62f1e54af18cb5efffffff + VPSLLW X20, Z5, K2, Z1 // 62b1554af1cc or 62b1d54af1cc + VPSLLW 17(SP), Z5, K2, Z1 // 62f1554af18c2411000000 or 62f1d54af18c2411000000 + VPSLLW -17(BP)(SI*4), Z5, K2, Z1 // 62f1554af18cb5efffffff or 62f1d54af18cb5efffffff + VPSRAVW X8, X28, K4, X16 // 62c29d0411c0 + VPSRAVW 15(R8)(R14*4), X28, K4, X16 // 62829d041184b00f000000 + VPSRAVW -7(CX)(DX*4), X28, K4, X16 // 62e29d04118491f9ffffff + VPSRAVW Y7, Y26, K1, Y30 // 6262ad2111f7 + VPSRAVW -7(DI)(R8*1), Y26, K1, Y30 // 6222ad2111b407f9ffffff + VPSRAVW (SP), Y26, K1, Y30 // 6262ad21113424 + VPSRAVW Z21, Z31, K3, Z17 // 62a2854311cd + VPSRAVW Z9, Z31, K3, Z17 // 62c2854311c9 + VPSRAVW (BX), Z31, K3, Z17 // 62e28543110b + VPSRAVW -17(BP)(SI*1), Z31, K3, Z17 // 62e28543118c35efffffff + VPSRAVW Z21, Z0, K3, Z17 // 62a2fd4b11cd + VPSRAVW Z9, Z0, K3, Z17 // 62c2fd4b11c9 + VPSRAVW (BX), Z0, K3, Z17 // 62e2fd4b110b + VPSRAVW -17(BP)(SI*1), Z0, K3, Z17 // 62e2fd4b118c35efffffff + VPSRAVW Z21, Z31, K3, Z23 // 62a2854311fd + VPSRAVW Z9, Z31, K3, Z23 // 62c2854311f9 + VPSRAVW (BX), Z31, K3, Z23 // 62e28543113b + VPSRAVW -17(BP)(SI*1), Z31, K3, Z23 // 62e2854311bc35efffffff + VPSRAVW Z21, Z0, K3, Z23 // 62a2fd4b11fd + VPSRAVW Z9, Z0, K3, Z23 // 62c2fd4b11f9 + VPSRAVW (BX), Z0, K3, Z23 // 62e2fd4b113b + VPSRAVW -17(BP)(SI*1), Z0, K3, Z23 // 62e2fd4b11bc35efffffff + VPSRAW $79, X11, K4, X15 // 62d1050c71e34f or 62d1850c71e34f + VPSRAW $79, (R8), K4, X15 // 62d1050c71204f or 62d1850c71204f + VPSRAW $79, 15(DX)(BX*2), K4, X15 // 62f1050c71a45a0f0000004f or 62f1850c71a45a0f0000004f + VPSRAW $64, Y1, K5, Y16 // 62f17d2571e140 or 62f1fd2571e140 + VPSRAW $64, -7(CX), K5, Y16 // 62f17d2571a1f9ffffff40 or 62f1fd2571a1f9ffffff40 + VPSRAW $64, 15(DX)(BX*4), K5, Y16 // 62f17d2571a49a0f00000040 or 62f1fd2571a49a0f00000040 + VPSRAW $27, Z1, K7, Z6 // 62f14d4f71e11b or 62f1cd4f71e11b + VPSRAW $27, Z9, K7, Z6 // 62d14d4f71e11b or 62d1cd4f71e11b + VPSRAW $27, 15(R8)(R14*4), K7, Z6 // 62914d4f71a4b00f0000001b or 6291cd4f71a4b00f0000001b + VPSRAW $27, -7(CX)(DX*4), K7, Z6 // 62f14d4f71a491f9ffffff1b or 62f1cd4f71a491f9ffffff1b + VPSRAW $27, Z1, K7, Z9 // 62f1354f71e11b or 62f1b54f71e11b + VPSRAW $27, Z9, K7, Z9 // 62d1354f71e11b or 62d1b54f71e11b + VPSRAW $27, 15(R8)(R14*4), K7, Z9 // 6291354f71a4b00f0000001b or 6291b54f71a4b00f0000001b + VPSRAW $27, -7(CX)(DX*4), K7, Z9 // 62f1354f71a491f9ffffff1b or 62f1b54f71a491f9ffffff1b + VPSRAW X13, X19, K7, X1 // 62d16507e1cd or 62d1e507e1cd + VPSRAW 17(SP)(BP*1), X19, K7, X1 // 62f16507e18c2c11000000 or 62f1e507e18c2c11000000 + VPSRAW -7(CX)(DX*8), X19, K7, X1 // 62f16507e18cd1f9ffffff or 62f1e507e18cd1f9ffffff + VPSRAW X2, Y31, K6, Y30 // 62610526e1f2 or 62618526e1f2 + VPSRAW -17(BP)(SI*2), Y31, K6, Y30 // 62610526e1b475efffffff or 62618526e1b475efffffff + VPSRAW 7(AX)(CX*2), Y31, K6, Y30 // 62610526e1b44807000000 or 62618526e1b44807000000 + VPSRAW X14, Z30, K3, Z20 // 62c10d43e1e6 or 62c18d43e1e6 + VPSRAW 15(R8)(R14*1), Z30, K3, Z20 // 62810d43e1a4300f000000 or 62818d43e1a4300f000000 + VPSRAW 15(R8)(R14*2), Z30, K3, Z20 // 62810d43e1a4700f000000 or 62818d43e1a4700f000000 + VPSRAW X14, Z5, K3, Z20 // 62c1554be1e6 or 62c1d54be1e6 + VPSRAW 15(R8)(R14*1), Z5, K3, Z20 // 6281554be1a4300f000000 or 6281d54be1a4300f000000 + VPSRAW 15(R8)(R14*2), Z5, K3, Z20 // 6281554be1a4700f000000 or 6281d54be1a4700f000000 + VPSRAW X14, Z30, K3, Z9 // 62510d43e1ce or 62518d43e1ce + VPSRAW 15(R8)(R14*1), Z30, K3, Z9 // 62110d43e18c300f000000 or 62118d43e18c300f000000 + VPSRAW 15(R8)(R14*2), Z30, K3, Z9 // 62110d43e18c700f000000 or 62118d43e18c700f000000 + VPSRAW X14, Z5, K3, Z9 // 6251554be1ce or 6251d54be1ce + VPSRAW 15(R8)(R14*1), Z5, K3, Z9 // 6211554be18c300f000000 or 6211d54be18c300f000000 + VPSRAW 15(R8)(R14*2), Z5, K3, Z9 // 6211554be18c700f000000 or 6211d54be18c700f000000 + VPSRLDQ $94, -7(CX)(DX*1), X9 // 62f13508739c11f9ffffff5e or 62f1b508739c11f9ffffff5e + VPSRLDQ $94, -15(R14)(R15*4), X9 // 62913508739cbef1ffffff5e or 6291b508739cbef1ffffff5e + VPSRLDQ $121, Y28, Y0 // 62917d2873dc79 or 6291fd2873dc79 + VPSRLDQ $121, (AX), Y0 // 62f17d28731879 or 62f1fd28731879 + VPSRLDQ $121, 7(SI), Y0 // 62f17d28739e0700000079 or 62f1fd28739e0700000079 + VPSRLDQ $13, Z21, Z12 // 62b11d4873dd0d or 62b19d4873dd0d + VPSRLDQ $13, Z9, Z12 // 62d11d4873d90d or 62d19d4873d90d + VPSRLDQ $13, 17(SP)(BP*1), Z12 // 62f11d48739c2c110000000d or 62f19d48739c2c110000000d + VPSRLDQ $13, -7(CX)(DX*8), Z12 // 62f11d48739cd1f9ffffff0d or 62f19d48739cd1f9ffffff0d + VPSRLDQ $13, Z21, Z13 // 62b1154873dd0d or 62b1954873dd0d + VPSRLDQ $13, Z9, Z13 // 62d1154873d90d or 62d1954873d90d + VPSRLDQ $13, 17(SP)(BP*1), Z13 // 62f11548739c2c110000000d or 62f19548739c2c110000000d + VPSRLDQ $13, -7(CX)(DX*8), Z13 // 62f11548739cd1f9ffffff0d or 62f19548739cd1f9ffffff0d + VPSRLVW X30, X23, K1, X12 // 6212c50110e6 + VPSRLVW 7(AX)(CX*4), X23, K1, X12 // 6272c50110a48807000000 + VPSRLVW 7(AX)(CX*1), X23, K1, X12 // 6272c50110a40807000000 + VPSRLVW Y3, Y22, K1, Y12 // 6272cd2110e3 + VPSRLVW 17(SP)(BP*1), Y22, K1, Y12 // 6272cd2110a42c11000000 + VPSRLVW -7(CX)(DX*8), Y22, K1, Y12 // 6272cd2110a4d1f9ffffff + VPSRLVW Z14, Z15, K1, Z0 // 62d2854910c6 + VPSRLVW Z27, Z15, K1, Z0 // 6292854910c3 + VPSRLVW 99(R15)(R15*4), Z15, K1, Z0 // 629285491084bf63000000 + VPSRLVW 15(DX), Z15, K1, Z0 // 62f2854910820f000000 + VPSRLVW Z14, Z12, K1, Z0 // 62d29d4910c6 + VPSRLVW Z27, Z12, K1, Z0 // 62929d4910c3 + VPSRLVW 99(R15)(R15*4), Z12, K1, Z0 // 62929d491084bf63000000 + VPSRLVW 15(DX), Z12, K1, Z0 // 62f29d4910820f000000 + VPSRLVW Z14, Z15, K1, Z8 // 6252854910c6 + VPSRLVW Z27, Z15, K1, Z8 // 6212854910c3 + VPSRLVW 99(R15)(R15*4), Z15, K1, Z8 // 621285491084bf63000000 + VPSRLVW 15(DX), Z15, K1, Z8 // 6272854910820f000000 + VPSRLVW Z14, Z12, K1, Z8 // 62529d4910c6 + VPSRLVW Z27, Z12, K1, Z8 // 62129d4910c3 + VPSRLVW 99(R15)(R15*4), Z12, K1, Z8 // 62129d491084bf63000000 + VPSRLVW 15(DX), Z12, K1, Z8 // 62729d4910820f000000 + VPSRLW $0, X20, K7, X8 // 62b13d0f71d400 or 62b1bd0f71d400 + VPSRLW $0, (SI), K7, X8 // 62f13d0f711600 or 62f1bd0f711600 + VPSRLW $0, 7(SI)(DI*2), K7, X8 // 62f13d0f71947e0700000000 or 62f1bd0f71947e0700000000 + VPSRLW $97, Y1, K2, Y15 // 62f1052a71d161 or 62f1852a71d161 + VPSRLW $97, -17(BP)(SI*2), K2, Y15 // 62f1052a719475efffffff61 or 62f1852a719475efffffff61 + VPSRLW $97, 7(AX)(CX*2), K2, Y15 // 62f1052a7194480700000061 or 62f1852a7194480700000061 + VPSRLW $81, Z13, K4, Z11 // 62d1254c71d551 or 62d1a54c71d551 + VPSRLW $81, Z14, K4, Z11 // 62d1254c71d651 or 62d1a54c71d651 + VPSRLW $81, (CX), K4, Z11 // 62f1254c711151 or 62f1a54c711151 + VPSRLW $81, 99(R15), K4, Z11 // 62d1254c71976300000051 or 62d1a54c71976300000051 + VPSRLW $81, Z13, K4, Z5 // 62d1554c71d551 or 62d1d54c71d551 + VPSRLW $81, Z14, K4, Z5 // 62d1554c71d651 or 62d1d54c71d651 + VPSRLW $81, (CX), K4, Z5 // 62f1554c711151 or 62f1d54c711151 + VPSRLW $81, 99(R15), K4, Z5 // 62d1554c71976300000051 or 62d1d54c71976300000051 + VPSRLW X26, X9, K1, X2 // 62913509d1d2 or 6291b509d1d2 + VPSRLW 17(SP)(BP*8), X9, K1, X2 // 62f13509d194ec11000000 or 62f1b509d194ec11000000 + VPSRLW 17(SP)(BP*4), X9, K1, X2 // 62f13509d194ac11000000 or 62f1b509d194ac11000000 + VPSRLW X19, Y19, K3, Y27 // 62216523d1db or 6221e523d1db + VPSRLW 7(SI)(DI*4), Y19, K3, Y27 // 62616523d19cbe07000000 or 6261e523d19cbe07000000 + VPSRLW -7(DI)(R8*2), Y19, K3, Y27 // 62216523d19c47f9ffffff or 6221e523d19c47f9ffffff + VPSRLW X0, Z2, K4, Z5 // 62f16d4cd1e8 or 62f1ed4cd1e8 + VPSRLW 17(SP), Z2, K4, Z5 // 62f16d4cd1ac2411000000 or 62f1ed4cd1ac2411000000 + VPSRLW -17(BP)(SI*4), Z2, K4, Z5 // 62f16d4cd1acb5efffffff or 62f1ed4cd1acb5efffffff + VPSRLW X0, Z2, K4, Z23 // 62e16d4cd1f8 or 62e1ed4cd1f8 + VPSRLW 17(SP), Z2, K4, Z23 // 62e16d4cd1bc2411000000 or 62e1ed4cd1bc2411000000 + VPSRLW -17(BP)(SI*4), Z2, K4, Z23 // 62e16d4cd1bcb5efffffff or 62e1ed4cd1bcb5efffffff + VPSUBB X7, X16, K5, X31 // 62617d05f8ff or 6261fd05f8ff + VPSUBB 7(AX), X16, K5, X31 // 62617d05f8b807000000 or 6261fd05f8b807000000 + VPSUBB (DI), X16, K5, X31 // 62617d05f83f or 6261fd05f83f + VPSUBB Y13, Y17, K7, Y5 // 62d17527f8ed or 62d1f527f8ed + VPSUBB 15(R8)(R14*1), Y17, K7, Y5 // 62917527f8ac300f000000 or 6291f527f8ac300f000000 + VPSUBB 15(R8)(R14*2), Y17, K7, Y5 // 62917527f8ac700f000000 or 6291f527f8ac700f000000 + VPSUBB Z28, Z26, K7, Z6 // 62912d47f8f4 or 6291ad47f8f4 + VPSUBB Z6, Z26, K7, Z6 // 62f12d47f8f6 or 62f1ad47f8f6 + VPSUBB 99(R15)(R15*2), Z26, K7, Z6 // 62912d47f8b47f63000000 or 6291ad47f8b47f63000000 + VPSUBB -7(DI), Z26, K7, Z6 // 62f12d47f8b7f9ffffff or 62f1ad47f8b7f9ffffff + VPSUBB Z28, Z14, K7, Z6 // 62910d4ff8f4 or 62918d4ff8f4 + VPSUBB Z6, Z14, K7, Z6 // 62f10d4ff8f6 or 62f18d4ff8f6 + VPSUBB 99(R15)(R15*2), Z14, K7, Z6 // 62910d4ff8b47f63000000 or 62918d4ff8b47f63000000 + VPSUBB -7(DI), Z14, K7, Z6 // 62f10d4ff8b7f9ffffff or 62f18d4ff8b7f9ffffff + VPSUBB Z28, Z26, K7, Z14 // 62112d47f8f4 or 6211ad47f8f4 + VPSUBB Z6, Z26, K7, Z14 // 62712d47f8f6 or 6271ad47f8f6 + VPSUBB 99(R15)(R15*2), Z26, K7, Z14 // 62112d47f8b47f63000000 or 6211ad47f8b47f63000000 + VPSUBB -7(DI), Z26, K7, Z14 // 62712d47f8b7f9ffffff or 6271ad47f8b7f9ffffff + VPSUBB Z28, Z14, K7, Z14 // 62110d4ff8f4 or 62118d4ff8f4 + VPSUBB Z6, Z14, K7, Z14 // 62710d4ff8f6 or 62718d4ff8f6 + VPSUBB 99(R15)(R15*2), Z14, K7, Z14 // 62110d4ff8b47f63000000 or 62118d4ff8b47f63000000 + VPSUBB -7(DI), Z14, K7, Z14 // 62710d4ff8b7f9ffffff or 62718d4ff8b7f9ffffff + VPSUBSB X28, X0, K2, X21 // 62817d0ae8ec or 6281fd0ae8ec + VPSUBSB 7(SI)(DI*8), X0, K2, X21 // 62e17d0ae8acfe07000000 or 62e1fd0ae8acfe07000000 + VPSUBSB -15(R14), X0, K2, X21 // 62c17d0ae8aef1ffffff or 62c1fd0ae8aef1ffffff + VPSUBSB Y24, Y11, K5, Y8 // 6211252de8c0 or 6211a52de8c0 + VPSUBSB (CX), Y11, K5, Y8 // 6271252de801 or 6271a52de801 + VPSUBSB 99(R15), Y11, K5, Y8 // 6251252de88763000000 or 6251a52de88763000000 + VPSUBSB Z23, Z23, K3, Z27 // 62214543e8df or 6221c543e8df + VPSUBSB Z6, Z23, K3, Z27 // 62614543e8de or 6261c543e8de + VPSUBSB -17(BP), Z23, K3, Z27 // 62614543e89defffffff or 6261c543e89defffffff + VPSUBSB -15(R14)(R15*8), Z23, K3, Z27 // 62014543e89cfef1ffffff or 6201c543e89cfef1ffffff + VPSUBSB Z23, Z5, K3, Z27 // 6221554be8df or 6221d54be8df + VPSUBSB Z6, Z5, K3, Z27 // 6261554be8de or 6261d54be8de + VPSUBSB -17(BP), Z5, K3, Z27 // 6261554be89defffffff or 6261d54be89defffffff + VPSUBSB -15(R14)(R15*8), Z5, K3, Z27 // 6201554be89cfef1ffffff or 6201d54be89cfef1ffffff + VPSUBSB Z23, Z23, K3, Z15 // 62314543e8ff or 6231c543e8ff + VPSUBSB Z6, Z23, K3, Z15 // 62714543e8fe or 6271c543e8fe + VPSUBSB -17(BP), Z23, K3, Z15 // 62714543e8bdefffffff or 6271c543e8bdefffffff + VPSUBSB -15(R14)(R15*8), Z23, K3, Z15 // 62114543e8bcfef1ffffff or 6211c543e8bcfef1ffffff + VPSUBSB Z23, Z5, K3, Z15 // 6231554be8ff or 6231d54be8ff + VPSUBSB Z6, Z5, K3, Z15 // 6271554be8fe or 6271d54be8fe + VPSUBSB -17(BP), Z5, K3, Z15 // 6271554be8bdefffffff or 6271d54be8bdefffffff + VPSUBSB -15(R14)(R15*8), Z5, K3, Z15 // 6211554be8bcfef1ffffff or 6211d54be8bcfef1ffffff + VPSUBSW X19, X7, K4, X22 // 62a1450ce9f3 or 62a1c50ce9f3 + VPSUBSW 7(SI)(DI*1), X7, K4, X22 // 62e1450ce9b43e07000000 or 62e1c50ce9b43e07000000 + VPSUBSW 15(DX)(BX*8), X7, K4, X22 // 62e1450ce9b4da0f000000 or 62e1c50ce9b4da0f000000 + VPSUBSW Y21, Y24, K2, Y5 // 62b13d22e9ed or 62b1bd22e9ed + VPSUBSW 99(R15)(R15*2), Y24, K2, Y5 // 62913d22e9ac7f63000000 or 6291bd22e9ac7f63000000 + VPSUBSW -7(DI), Y24, K2, Y5 // 62f13d22e9aff9ffffff or 62f1bd22e9aff9ffffff + VPSUBSW Z16, Z21, K2, Z8 // 62315542e9c0 or 6231d542e9c0 + VPSUBSW Z13, Z21, K2, Z8 // 62515542e9c5 or 6251d542e9c5 + VPSUBSW 17(SP)(BP*2), Z21, K2, Z8 // 62715542e9846c11000000 or 6271d542e9846c11000000 + VPSUBSW -7(DI)(R8*4), Z21, K2, Z8 // 62315542e98487f9ffffff or 6231d542e98487f9ffffff + VPSUBSW Z16, Z5, K2, Z8 // 6231554ae9c0 or 6231d54ae9c0 + VPSUBSW Z13, Z5, K2, Z8 // 6251554ae9c5 or 6251d54ae9c5 + VPSUBSW 17(SP)(BP*2), Z5, K2, Z8 // 6271554ae9846c11000000 or 6271d54ae9846c11000000 + VPSUBSW -7(DI)(R8*4), Z5, K2, Z8 // 6231554ae98487f9ffffff or 6231d54ae98487f9ffffff + VPSUBSW Z16, Z21, K2, Z28 // 62215542e9e0 or 6221d542e9e0 + VPSUBSW Z13, Z21, K2, Z28 // 62415542e9e5 or 6241d542e9e5 + VPSUBSW 17(SP)(BP*2), Z21, K2, Z28 // 62615542e9a46c11000000 or 6261d542e9a46c11000000 + VPSUBSW -7(DI)(R8*4), Z21, K2, Z28 // 62215542e9a487f9ffffff or 6221d542e9a487f9ffffff + VPSUBSW Z16, Z5, K2, Z28 // 6221554ae9e0 or 6221d54ae9e0 + VPSUBSW Z13, Z5, K2, Z28 // 6241554ae9e5 or 6241d54ae9e5 + VPSUBSW 17(SP)(BP*2), Z5, K2, Z28 // 6261554ae9a46c11000000 or 6261d54ae9a46c11000000 + VPSUBSW -7(DI)(R8*4), Z5, K2, Z28 // 6221554ae9a487f9ffffff or 6221d54ae9a487f9ffffff + VPSUBUSB X31, X16, K3, X7 // 62917d03d8ff or 6291fd03d8ff + VPSUBUSB -7(DI)(R8*1), X16, K3, X7 // 62b17d03d8bc07f9ffffff or 62b1fd03d8bc07f9ffffff + VPSUBUSB (SP), X16, K3, X7 // 62f17d03d83c24 or 62f1fd03d83c24 + VPSUBUSB Y13, Y9, K3, Y16 // 62c1352bd8c5 or 62c1b52bd8c5 + VPSUBUSB -7(CX)(DX*1), Y9, K3, Y16 // 62e1352bd88411f9ffffff or 62e1b52bd88411f9ffffff + VPSUBUSB -15(R14)(R15*4), Y9, K3, Y16 // 6281352bd884bef1ffffff or 6281b52bd884bef1ffffff + VPSUBUSB Z6, Z22, K3, Z12 // 62714d43d8e6 or 6271cd43d8e6 + VPSUBUSB Z8, Z22, K3, Z12 // 62514d43d8e0 or 6251cd43d8e0 + VPSUBUSB 15(R8), Z22, K3, Z12 // 62514d43d8a00f000000 or 6251cd43d8a00f000000 + VPSUBUSB (BP), Z22, K3, Z12 // 62714d43d86500 or 6271cd43d86500 + VPSUBUSB Z6, Z11, K3, Z12 // 6271254bd8e6 or 6271a54bd8e6 + VPSUBUSB Z8, Z11, K3, Z12 // 6251254bd8e0 or 6251a54bd8e0 + VPSUBUSB 15(R8), Z11, K3, Z12 // 6251254bd8a00f000000 or 6251a54bd8a00f000000 + VPSUBUSB (BP), Z11, K3, Z12 // 6271254bd86500 or 6271a54bd86500 + VPSUBUSB Z6, Z22, K3, Z27 // 62614d43d8de or 6261cd43d8de + VPSUBUSB Z8, Z22, K3, Z27 // 62414d43d8d8 or 6241cd43d8d8 + VPSUBUSB 15(R8), Z22, K3, Z27 // 62414d43d8980f000000 or 6241cd43d8980f000000 + VPSUBUSB (BP), Z22, K3, Z27 // 62614d43d85d00 or 6261cd43d85d00 + VPSUBUSB Z6, Z11, K3, Z27 // 6261254bd8de or 6261a54bd8de + VPSUBUSB Z8, Z11, K3, Z27 // 6241254bd8d8 or 6241a54bd8d8 + VPSUBUSB 15(R8), Z11, K3, Z27 // 6241254bd8980f000000 or 6241a54bd8980f000000 + VPSUBUSB (BP), Z11, K3, Z27 // 6261254bd85d00 or 6261a54bd85d00 + VPSUBUSW X9, X7, K2, X1 // 62d1450ad9c9 or 62d1c50ad9c9 + VPSUBUSW -7(CX), X7, K2, X1 // 62f1450ad989f9ffffff or 62f1c50ad989f9ffffff + VPSUBUSW 15(DX)(BX*4), X7, K2, X1 // 62f1450ad98c9a0f000000 or 62f1c50ad98c9a0f000000 + VPSUBUSW Y3, Y6, K1, Y9 // 62714d29d9cb or 6271cd29d9cb + VPSUBUSW 15(DX)(BX*1), Y6, K1, Y9 // 62714d29d98c1a0f000000 or 6271cd29d98c1a0f000000 + VPSUBUSW -7(CX)(DX*2), Y6, K1, Y9 // 62714d29d98c51f9ffffff or 6271cd29d98c51f9ffffff + VPSUBUSW Z9, Z12, K2, Z25 // 62411d4ad9c9 or 62419d4ad9c9 + VPSUBUSW Z12, Z12, K2, Z25 // 62411d4ad9cc or 62419d4ad9cc + VPSUBUSW 15(R8)(R14*8), Z12, K2, Z25 // 62011d4ad98cf00f000000 or 62019d4ad98cf00f000000 + VPSUBUSW -15(R14)(R15*2), Z12, K2, Z25 // 62011d4ad98c7ef1ffffff or 62019d4ad98c7ef1ffffff + VPSUBUSW Z9, Z17, K2, Z25 // 62417542d9c9 or 6241f542d9c9 + VPSUBUSW Z12, Z17, K2, Z25 // 62417542d9cc or 6241f542d9cc + VPSUBUSW 15(R8)(R14*8), Z17, K2, Z25 // 62017542d98cf00f000000 or 6201f542d98cf00f000000 + VPSUBUSW -15(R14)(R15*2), Z17, K2, Z25 // 62017542d98c7ef1ffffff or 6201f542d98c7ef1ffffff + VPSUBUSW Z9, Z12, K2, Z12 // 62511d4ad9e1 or 62519d4ad9e1 + VPSUBUSW Z12, Z12, K2, Z12 // 62511d4ad9e4 or 62519d4ad9e4 + VPSUBUSW 15(R8)(R14*8), Z12, K2, Z12 // 62111d4ad9a4f00f000000 or 62119d4ad9a4f00f000000 + VPSUBUSW -15(R14)(R15*2), Z12, K2, Z12 // 62111d4ad9a47ef1ffffff or 62119d4ad9a47ef1ffffff + VPSUBUSW Z9, Z17, K2, Z12 // 62517542d9e1 or 6251f542d9e1 + VPSUBUSW Z12, Z17, K2, Z12 // 62517542d9e4 or 6251f542d9e4 + VPSUBUSW 15(R8)(R14*8), Z17, K2, Z12 // 62117542d9a4f00f000000 or 6211f542d9a4f00f000000 + VPSUBUSW -15(R14)(R15*2), Z17, K2, Z12 // 62117542d9a47ef1ffffff or 6211f542d9a47ef1ffffff + VPSUBW X0, X12, K1, X15 // 62711d09f9f8 or 62719d09f9f8 + VPSUBW 99(R15)(R15*8), X12, K1, X15 // 62111d09f9bcff63000000 or 62119d09f9bcff63000000 + VPSUBW 7(AX)(CX*8), X12, K1, X15 // 62711d09f9bcc807000000 or 62719d09f9bcc807000000 + VPSUBW Y26, Y6, K7, Y7 // 62914d2ff9fa or 6291cd2ff9fa + VPSUBW -17(BP), Y6, K7, Y7 // 62f14d2ff9bdefffffff or 62f1cd2ff9bdefffffff + VPSUBW -15(R14)(R15*8), Y6, K7, Y7 // 62914d2ff9bcfef1ffffff or 6291cd2ff9bcfef1ffffff + VPSUBW Z8, Z3, K1, Z6 // 62d16549f9f0 or 62d1e549f9f0 + VPSUBW Z2, Z3, K1, Z6 // 62f16549f9f2 or 62f1e549f9f2 + VPSUBW -15(R14)(R15*1), Z3, K1, Z6 // 62916549f9b43ef1ffffff or 6291e549f9b43ef1ffffff + VPSUBW -15(BX), Z3, K1, Z6 // 62f16549f9b3f1ffffff or 62f1e549f9b3f1ffffff + VPSUBW Z8, Z21, K1, Z6 // 62d15541f9f0 or 62d1d541f9f0 + VPSUBW Z2, Z21, K1, Z6 // 62f15541f9f2 or 62f1d541f9f2 + VPSUBW -15(R14)(R15*1), Z21, K1, Z6 // 62915541f9b43ef1ffffff or 6291d541f9b43ef1ffffff + VPSUBW -15(BX), Z21, K1, Z6 // 62f15541f9b3f1ffffff or 62f1d541f9b3f1ffffff + VPSUBW Z8, Z3, K1, Z25 // 62416549f9c8 or 6241e549f9c8 + VPSUBW Z2, Z3, K1, Z25 // 62616549f9ca or 6261e549f9ca + VPSUBW -15(R14)(R15*1), Z3, K1, Z25 // 62016549f98c3ef1ffffff or 6201e549f98c3ef1ffffff + VPSUBW -15(BX), Z3, K1, Z25 // 62616549f98bf1ffffff or 6261e549f98bf1ffffff + VPSUBW Z8, Z21, K1, Z25 // 62415541f9c8 or 6241d541f9c8 + VPSUBW Z2, Z21, K1, Z25 // 62615541f9ca or 6261d541f9ca + VPSUBW -15(R14)(R15*1), Z21, K1, Z25 // 62015541f98c3ef1ffffff or 6201d541f98c3ef1ffffff + VPSUBW -15(BX), Z21, K1, Z25 // 62615541f98bf1ffffff or 6261d541f98bf1ffffff + VPTESTMB X26, X3, K3, K3 // 6292650b26da + VPTESTMB 15(R8)(R14*4), X3, K3, K3 // 6292650b269cb00f000000 + VPTESTMB -7(CX)(DX*4), X3, K3, K3 // 62f2650b269c91f9ffffff + VPTESTMB X26, X3, K3, K1 // 6292650b26ca + VPTESTMB 15(R8)(R14*4), X3, K3, K1 // 6292650b268cb00f000000 + VPTESTMB -7(CX)(DX*4), X3, K3, K1 // 62f2650b268c91f9ffffff + VPTESTMB Y3, Y18, K4, K5 // 62f26d2426eb + VPTESTMB 15(R8)(R14*8), Y18, K4, K5 // 62926d2426acf00f000000 + VPTESTMB -15(R14)(R15*2), Y18, K4, K5 // 62926d2426ac7ef1ffffff + VPTESTMB Y3, Y18, K4, K4 // 62f26d2426e3 + VPTESTMB 15(R8)(R14*8), Y18, K4, K4 // 62926d2426a4f00f000000 + VPTESTMB -15(R14)(R15*2), Y18, K4, K4 // 62926d2426a47ef1ffffff + VPTESTMB Z11, Z12, K5, K7 // 62d21d4d26fb + VPTESTMB Z5, Z12, K5, K7 // 62f21d4d26fd + VPTESTMB 17(SP)(BP*8), Z12, K5, K7 // 62f21d4d26bcec11000000 + VPTESTMB 17(SP)(BP*4), Z12, K5, K7 // 62f21d4d26bcac11000000 + VPTESTMB Z11, Z22, K5, K7 // 62d24d4526fb + VPTESTMB Z5, Z22, K5, K7 // 62f24d4526fd + VPTESTMB 17(SP)(BP*8), Z22, K5, K7 // 62f24d4526bcec11000000 + VPTESTMB 17(SP)(BP*4), Z22, K5, K7 // 62f24d4526bcac11000000 + VPTESTMB Z11, Z12, K5, K6 // 62d21d4d26f3 + VPTESTMB Z5, Z12, K5, K6 // 62f21d4d26f5 + VPTESTMB 17(SP)(BP*8), Z12, K5, K6 // 62f21d4d26b4ec11000000 + VPTESTMB 17(SP)(BP*4), Z12, K5, K6 // 62f21d4d26b4ac11000000 + VPTESTMB Z11, Z22, K5, K6 // 62d24d4526f3 + VPTESTMB Z5, Z22, K5, K6 // 62f24d4526f5 + VPTESTMB 17(SP)(BP*8), Z22, K5, K6 // 62f24d4526b4ec11000000 + VPTESTMB 17(SP)(BP*4), Z22, K5, K6 // 62f24d4526b4ac11000000 + VPTESTMW X15, X9, K4, K6 // 62d2b50c26f7 + VPTESTMW -17(BP)(SI*2), X9, K4, K6 // 62f2b50c26b475efffffff + VPTESTMW 7(AX)(CX*2), X9, K4, K6 // 62f2b50c26b44807000000 + VPTESTMW X15, X9, K4, K4 // 62d2b50c26e7 + VPTESTMW -17(BP)(SI*2), X9, K4, K4 // 62f2b50c26a475efffffff + VPTESTMW 7(AX)(CX*2), X9, K4, K4 // 62f2b50c26a44807000000 + VPTESTMW Y8, Y14, K7, K4 // 62d28d2f26e0 + VPTESTMW (SI), Y14, K7, K4 // 62f28d2f2626 + VPTESTMW 7(SI)(DI*2), Y14, K7, K4 // 62f28d2f26a47e07000000 + VPTESTMW Y8, Y14, K7, K6 // 62d28d2f26f0 + VPTESTMW (SI), Y14, K7, K6 // 62f28d2f2636 + VPTESTMW 7(SI)(DI*2), Y14, K7, K6 // 62f28d2f26b47e07000000 + VPTESTMW Z1, Z6, K2, K4 // 62f2cd4a26e1 + VPTESTMW Z15, Z6, K2, K4 // 62d2cd4a26e7 + VPTESTMW 7(AX), Z6, K2, K4 // 62f2cd4a26a007000000 + VPTESTMW (DI), Z6, K2, K4 // 62f2cd4a2627 + VPTESTMW Z1, Z22, K2, K4 // 62f2cd4226e1 + VPTESTMW Z15, Z22, K2, K4 // 62d2cd4226e7 + VPTESTMW 7(AX), Z22, K2, K4 // 62f2cd4226a007000000 + VPTESTMW (DI), Z22, K2, K4 // 62f2cd422627 + VPTESTMW Z1, Z6, K2, K5 // 62f2cd4a26e9 + VPTESTMW Z15, Z6, K2, K5 // 62d2cd4a26ef + VPTESTMW 7(AX), Z6, K2, K5 // 62f2cd4a26a807000000 + VPTESTMW (DI), Z6, K2, K5 // 62f2cd4a262f + VPTESTMW Z1, Z22, K2, K5 // 62f2cd4226e9 + VPTESTMW Z15, Z22, K2, K5 // 62d2cd4226ef + VPTESTMW 7(AX), Z22, K2, K5 // 62f2cd4226a807000000 + VPTESTMW (DI), Z22, K2, K5 // 62f2cd42262f + VPTESTNMB X18, X26, K5, K2 // 62b22e0526d2 + VPTESTNMB 15(R8)(R14*1), X26, K5, K2 // 62922e052694300f000000 + VPTESTNMB 15(R8)(R14*2), X26, K5, K2 // 62922e052694700f000000 + VPTESTNMB X18, X26, K5, K7 // 62b22e0526fa + VPTESTNMB 15(R8)(R14*1), X26, K5, K7 // 62922e0526bc300f000000 + VPTESTNMB 15(R8)(R14*2), X26, K5, K7 // 62922e0526bc700f000000 + VPTESTNMB Y11, Y20, K3, K0 // 62d25e2326c3 + VPTESTNMB 17(SP)(BP*8), Y20, K3, K0 // 62f25e232684ec11000000 + VPTESTNMB 17(SP)(BP*4), Y20, K3, K0 // 62f25e232684ac11000000 + VPTESTNMB Y11, Y20, K3, K5 // 62d25e2326eb + VPTESTNMB 17(SP)(BP*8), Y20, K3, K5 // 62f25e2326acec11000000 + VPTESTNMB 17(SP)(BP*4), Y20, K3, K5 // 62f25e2326acac11000000 + VPTESTNMB Z18, Z13, K4, K6 // 62b2164c26f2 + VPTESTNMB Z8, Z13, K4, K6 // 62d2164c26f0 + VPTESTNMB 99(R15)(R15*1), Z13, K4, K6 // 6292164c26b43f63000000 + VPTESTNMB (DX), Z13, K4, K6 // 62f2164c2632 + VPTESTNMB Z18, Z13, K4, K5 // 62b2164c26ea + VPTESTNMB Z8, Z13, K4, K5 // 62d2164c26e8 + VPTESTNMB 99(R15)(R15*1), Z13, K4, K5 // 6292164c26ac3f63000000 + VPTESTNMB (DX), Z13, K4, K5 // 62f2164c262a + VPTESTNMW X7, X3, K1, K5 // 62f2e60926ef + VPTESTNMW (CX), X3, K1, K5 // 62f2e6092629 + VPTESTNMW 99(R15), X3, K1, K5 // 62d2e60926af63000000 + VPTESTNMW X7, X3, K1, K4 // 62f2e60926e7 + VPTESTNMW (CX), X3, K1, K4 // 62f2e6092621 + VPTESTNMW 99(R15), X3, K1, K4 // 62d2e60926a763000000 + VPTESTNMW Y20, Y20, K2, K4 // 62b2de2226e4 + VPTESTNMW 7(AX), Y20, K2, K4 // 62f2de2226a007000000 + VPTESTNMW (DI), Y20, K2, K4 // 62f2de222627 + VPTESTNMW Y20, Y20, K2, K6 // 62b2de2226f4 + VPTESTNMW 7(AX), Y20, K2, K6 // 62f2de2226b007000000 + VPTESTNMW (DI), Y20, K2, K6 // 62f2de222637 + VPTESTNMW Z28, Z12, K1, K1 // 62929e4926cc + VPTESTNMW Z13, Z12, K1, K1 // 62d29e4926cd + VPTESTNMW 7(SI)(DI*1), Z12, K1, K1 // 62f29e49268c3e07000000 + VPTESTNMW 15(DX)(BX*8), Z12, K1, K1 // 62f29e49268cda0f000000 + VPTESTNMW Z28, Z16, K1, K1 // 6292fe4126cc + VPTESTNMW Z13, Z16, K1, K1 // 62d2fe4126cd + VPTESTNMW 7(SI)(DI*1), Z16, K1, K1 // 62f2fe41268c3e07000000 + VPTESTNMW 15(DX)(BX*8), Z16, K1, K1 // 62f2fe41268cda0f000000 + VPTESTNMW Z28, Z12, K1, K3 // 62929e4926dc + VPTESTNMW Z13, Z12, K1, K3 // 62d29e4926dd + VPTESTNMW 7(SI)(DI*1), Z12, K1, K3 // 62f29e49269c3e07000000 + VPTESTNMW 15(DX)(BX*8), Z12, K1, K3 // 62f29e49269cda0f000000 + VPTESTNMW Z28, Z16, K1, K3 // 6292fe4126dc + VPTESTNMW Z13, Z16, K1, K3 // 62d2fe4126dd + VPTESTNMW 7(SI)(DI*1), Z16, K1, K3 // 62f2fe41269c3e07000000 + VPTESTNMW 15(DX)(BX*8), Z16, K1, K3 // 62f2fe41269cda0f000000 + VPUNPCKHBW X24, X0, K7, X0 // 62917d0f68c0 or 6291fd0f68c0 + VPUNPCKHBW 99(R15)(R15*2), X0, K7, X0 // 62917d0f68847f63000000 or 6291fd0f68847f63000000 + VPUNPCKHBW -7(DI), X0, K7, X0 // 62f17d0f6887f9ffffff or 62f1fd0f6887f9ffffff + VPUNPCKHBW Y28, Y28, K1, Y9 // 62111d2168cc or 62119d2168cc + VPUNPCKHBW 99(R15)(R15*1), Y28, K1, Y9 // 62111d21688c3f63000000 or 62119d21688c3f63000000 + VPUNPCKHBW (DX), Y28, K1, Y9 // 62711d21680a or 62719d21680a + VPUNPCKHBW Z15, Z3, K1, Z14 // 6251654968f7 or 6251e54968f7 + VPUNPCKHBW Z30, Z3, K1, Z14 // 6211654968f6 or 6211e54968f6 + VPUNPCKHBW -7(DI)(R8*1), Z3, K1, Z14 // 6231654968b407f9ffffff or 6231e54968b407f9ffffff + VPUNPCKHBW (SP), Z3, K1, Z14 // 62716549683424 or 6271e549683424 + VPUNPCKHBW Z15, Z12, K1, Z14 // 62511d4968f7 or 62519d4968f7 + VPUNPCKHBW Z30, Z12, K1, Z14 // 62111d4968f6 or 62119d4968f6 + VPUNPCKHBW -7(DI)(R8*1), Z12, K1, Z14 // 62311d4968b407f9ffffff or 62319d4968b407f9ffffff + VPUNPCKHBW (SP), Z12, K1, Z14 // 62711d49683424 or 62719d49683424 + VPUNPCKHBW Z15, Z3, K1, Z28 // 6241654968e7 or 6241e54968e7 + VPUNPCKHBW Z30, Z3, K1, Z28 // 6201654968e6 or 6201e54968e6 + VPUNPCKHBW -7(DI)(R8*1), Z3, K1, Z28 // 6221654968a407f9ffffff or 6221e54968a407f9ffffff + VPUNPCKHBW (SP), Z3, K1, Z28 // 62616549682424 or 6261e549682424 + VPUNPCKHBW Z15, Z12, K1, Z28 // 62411d4968e7 or 62419d4968e7 + VPUNPCKHBW Z30, Z12, K1, Z28 // 62011d4968e6 or 62019d4968e6 + VPUNPCKHBW -7(DI)(R8*1), Z12, K1, Z28 // 62211d4968a407f9ffffff or 62219d4968a407f9ffffff + VPUNPCKHBW (SP), Z12, K1, Z28 // 62611d49682424 or 62619d49682424 + VPUNPCKHWD X21, X3, K4, X31 // 6221650c69fd or 6221e50c69fd + VPUNPCKHWD -17(BP), X3, K4, X31 // 6261650c69bdefffffff or 6261e50c69bdefffffff + VPUNPCKHWD -15(R14)(R15*8), X3, K4, X31 // 6201650c69bcfef1ffffff or 6201e50c69bcfef1ffffff + VPUNPCKHWD Y26, Y6, K5, Y12 // 62114d2d69e2 or 6211cd2d69e2 + VPUNPCKHWD 7(SI)(DI*1), Y6, K5, Y12 // 62714d2d69a43e07000000 or 6271cd2d69a43e07000000 + VPUNPCKHWD 15(DX)(BX*8), Y6, K5, Y12 // 62714d2d69a4da0f000000 or 6271cd2d69a4da0f000000 + VPUNPCKHWD Z0, Z23, K7, Z20 // 62e1454769e0 or 62e1c54769e0 + VPUNPCKHWD Z11, Z23, K7, Z20 // 62c1454769e3 or 62c1c54769e3 + VPUNPCKHWD (AX), Z23, K7, Z20 // 62e145476920 or 62e1c5476920 + VPUNPCKHWD 7(SI), Z23, K7, Z20 // 62e1454769a607000000 or 62e1c54769a607000000 + VPUNPCKHWD Z0, Z19, K7, Z20 // 62e1654769e0 or 62e1e54769e0 + VPUNPCKHWD Z11, Z19, K7, Z20 // 62c1654769e3 or 62c1e54769e3 + VPUNPCKHWD (AX), Z19, K7, Z20 // 62e165476920 or 62e1e5476920 + VPUNPCKHWD 7(SI), Z19, K7, Z20 // 62e1654769a607000000 or 62e1e54769a607000000 + VPUNPCKHWD Z0, Z23, K7, Z0 // 62f1454769c0 or 62f1c54769c0 + VPUNPCKHWD Z11, Z23, K7, Z0 // 62d1454769c3 or 62d1c54769c3 + VPUNPCKHWD (AX), Z23, K7, Z0 // 62f145476900 or 62f1c5476900 + VPUNPCKHWD 7(SI), Z23, K7, Z0 // 62f14547698607000000 or 62f1c547698607000000 + VPUNPCKHWD Z0, Z19, K7, Z0 // 62f1654769c0 or 62f1e54769c0 + VPUNPCKHWD Z11, Z19, K7, Z0 // 62d1654769c3 or 62d1e54769c3 + VPUNPCKHWD (AX), Z19, K7, Z0 // 62f165476900 or 62f1e5476900 + VPUNPCKHWD 7(SI), Z19, K7, Z0 // 62f16547698607000000 or 62f1e547698607000000 + VPUNPCKLBW X13, X11, K7, X1 // 62d1250f60cd or 62d1a50f60cd + VPUNPCKLBW 17(SP)(BP*2), X11, K7, X1 // 62f1250f608c6c11000000 or 62f1a50f608c6c11000000 + VPUNPCKLBW -7(DI)(R8*4), X11, K7, X1 // 62b1250f608c87f9ffffff or 62b1a50f608c87f9ffffff + VPUNPCKLBW Y28, Y8, K6, Y3 // 62913d2e60dc or 6291bd2e60dc + VPUNPCKLBW -7(DI)(R8*1), Y8, K6, Y3 // 62b13d2e609c07f9ffffff or 62b1bd2e609c07f9ffffff + VPUNPCKLBW (SP), Y8, K6, Y3 // 62f13d2e601c24 or 62f1bd2e601c24 + VPUNPCKLBW Z0, Z24, K3, Z0 // 62f13d4360c0 or 62f1bd4360c0 + VPUNPCKLBW Z26, Z24, K3, Z0 // 62913d4360c2 or 6291bd4360c2 + VPUNPCKLBW (BX), Z24, K3, Z0 // 62f13d436003 or 62f1bd436003 + VPUNPCKLBW -17(BP)(SI*1), Z24, K3, Z0 // 62f13d43608435efffffff or 62f1bd43608435efffffff + VPUNPCKLBW Z0, Z12, K3, Z0 // 62f11d4b60c0 or 62f19d4b60c0 + VPUNPCKLBW Z26, Z12, K3, Z0 // 62911d4b60c2 or 62919d4b60c2 + VPUNPCKLBW (BX), Z12, K3, Z0 // 62f11d4b6003 or 62f19d4b6003 + VPUNPCKLBW -17(BP)(SI*1), Z12, K3, Z0 // 62f11d4b608435efffffff or 62f19d4b608435efffffff + VPUNPCKLBW Z0, Z24, K3, Z25 // 62613d4360c8 or 6261bd4360c8 + VPUNPCKLBW Z26, Z24, K3, Z25 // 62013d4360ca or 6201bd4360ca + VPUNPCKLBW (BX), Z24, K3, Z25 // 62613d43600b or 6261bd43600b + VPUNPCKLBW -17(BP)(SI*1), Z24, K3, Z25 // 62613d43608c35efffffff or 6261bd43608c35efffffff + VPUNPCKLBW Z0, Z12, K3, Z25 // 62611d4b60c8 or 62619d4b60c8 + VPUNPCKLBW Z26, Z12, K3, Z25 // 62011d4b60ca or 62019d4b60ca + VPUNPCKLBW (BX), Z12, K3, Z25 // 62611d4b600b or 62619d4b600b + VPUNPCKLBW -17(BP)(SI*1), Z12, K3, Z25 // 62611d4b608c35efffffff or 62619d4b608c35efffffff + VPUNPCKLWD X8, X8, K3, X19 // 62c13d0b61d8 or 62c1bd0b61d8 + VPUNPCKLWD -15(R14)(R15*1), X8, K3, X19 // 62813d0b619c3ef1ffffff or 6281bd0b619c3ef1ffffff + VPUNPCKLWD -15(BX), X8, K3, X19 // 62e13d0b619bf1ffffff or 62e1bd0b619bf1ffffff + VPUNPCKLWD Y8, Y27, K4, Y22 // 62c1252461f0 or 62c1a52461f0 + VPUNPCKLWD (AX), Y27, K4, Y22 // 62e125246130 or 62e1a5246130 + VPUNPCKLWD 7(SI), Y27, K4, Y22 // 62e1252461b607000000 or 62e1a52461b607000000 + VPUNPCKLWD Z6, Z21, K2, Z31 // 6261554261fe or 6261d54261fe + VPUNPCKLWD Z9, Z21, K2, Z31 // 6241554261f9 or 6241d54261f9 + VPUNPCKLWD 17(SP)(BP*1), Z21, K2, Z31 // 6261554261bc2c11000000 or 6261d54261bc2c11000000 + VPUNPCKLWD -7(CX)(DX*8), Z21, K2, Z31 // 6261554261bcd1f9ffffff or 6261d54261bcd1f9ffffff + VPUNPCKLWD Z6, Z9, K2, Z31 // 6261354a61fe or 6261b54a61fe + VPUNPCKLWD Z9, Z9, K2, Z31 // 6241354a61f9 or 6241b54a61f9 + VPUNPCKLWD 17(SP)(BP*1), Z9, K2, Z31 // 6261354a61bc2c11000000 or 6261b54a61bc2c11000000 + VPUNPCKLWD -7(CX)(DX*8), Z9, K2, Z31 // 6261354a61bcd1f9ffffff or 6261b54a61bcd1f9ffffff + VPUNPCKLWD Z6, Z21, K2, Z0 // 62f1554261c6 or 62f1d54261c6 + VPUNPCKLWD Z9, Z21, K2, Z0 // 62d1554261c1 or 62d1d54261c1 + VPUNPCKLWD 17(SP)(BP*1), Z21, K2, Z0 // 62f1554261842c11000000 or 62f1d54261842c11000000 + VPUNPCKLWD -7(CX)(DX*8), Z21, K2, Z0 // 62f155426184d1f9ffffff or 62f1d5426184d1f9ffffff + VPUNPCKLWD Z6, Z9, K2, Z0 // 62f1354a61c6 or 62f1b54a61c6 + VPUNPCKLWD Z9, Z9, K2, Z0 // 62d1354a61c1 or 62d1b54a61c1 + VPUNPCKLWD 17(SP)(BP*1), Z9, K2, Z0 // 62f1354a61842c11000000 or 62f1b54a61842c11000000 + VPUNPCKLWD -7(CX)(DX*8), Z9, K2, Z0 // 62f1354a6184d1f9ffffff or 62f1b54a6184d1f9ffffff + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512cd.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512cd.s new file mode 100644 index 0000000000000000000000000000000000000000..9b8b9fdafb73e69b0a55b30237b9f745e00651b5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512cd.s @@ -0,0 +1,190 @@ +// Code generated by avx512test. DO NOT EDIT. + +#include "../../../../../../runtime/textflag.h" + +TEXT asmtest_avx512cd(SB), NOSPLIT, $0 + VPBROADCASTMB2Q K1, X25 // 6262fe082ac9 + VPBROADCASTMB2Q K5, X25 // 6262fe082acd + VPBROADCASTMB2Q K1, X11 // 6272fe082ad9 + VPBROADCASTMB2Q K5, X11 // 6272fe082add + VPBROADCASTMB2Q K1, X17 // 62e2fe082ac9 + VPBROADCASTMB2Q K5, X17 // 62e2fe082acd + VPBROADCASTMB2Q K3, Y0 // 62f2fe282ac3 + VPBROADCASTMB2Q K1, Y0 // 62f2fe282ac1 + VPBROADCASTMB2Q K3, Y19 // 62e2fe282adb + VPBROADCASTMB2Q K1, Y19 // 62e2fe282ad9 + VPBROADCASTMB2Q K3, Y31 // 6262fe282afb + VPBROADCASTMB2Q K1, Y31 // 6262fe282af9 + VPBROADCASTMB2Q K5, Z21 // 62e2fe482aed + VPBROADCASTMB2Q K4, Z21 // 62e2fe482aec + VPBROADCASTMB2Q K5, Z8 // 6272fe482ac5 + VPBROADCASTMB2Q K4, Z8 // 6272fe482ac4 + VPBROADCASTMW2D K7, X18 // 62e27e083ad7 + VPBROADCASTMW2D K6, X18 // 62e27e083ad6 + VPBROADCASTMW2D K7, X11 // 62727e083adf + VPBROADCASTMW2D K6, X11 // 62727e083ade + VPBROADCASTMW2D K7, X9 // 62727e083acf + VPBROADCASTMW2D K6, X9 // 62727e083ace + VPBROADCASTMW2D K4, Y22 // 62e27e283af4 + VPBROADCASTMW2D K6, Y22 // 62e27e283af6 + VPBROADCASTMW2D K4, Y9 // 62727e283acc + VPBROADCASTMW2D K6, Y9 // 62727e283ace + VPBROADCASTMW2D K4, Y23 // 62e27e283afc + VPBROADCASTMW2D K6, Y23 // 62e27e283afe + VPBROADCASTMW2D K0, Z16 // 62e27e483ac0 + VPBROADCASTMW2D K7, Z16 // 62e27e483ac7 + VPBROADCASTMW2D K0, Z9 // 62727e483ac8 + VPBROADCASTMW2D K7, Z9 // 62727e483acf + VPCONFLICTD X6, K6, X6 // 62f27d0ec4f6 + VPCONFLICTD X1, K6, X6 // 62f27d0ec4f1 + VPCONFLICTD X8, K6, X6 // 62d27d0ec4f0 + VPCONFLICTD 15(R8), K6, X6 // 62d27d0ec4b00f000000 + VPCONFLICTD (BP), K6, X6 // 62f27d0ec47500 + VPCONFLICTD X6, K6, X17 // 62e27d0ec4ce + VPCONFLICTD X1, K6, X17 // 62e27d0ec4c9 + VPCONFLICTD X8, K6, X17 // 62c27d0ec4c8 + VPCONFLICTD 15(R8), K6, X17 // 62c27d0ec4880f000000 + VPCONFLICTD (BP), K6, X17 // 62e27d0ec44d00 + VPCONFLICTD X6, K6, X28 // 62627d0ec4e6 + VPCONFLICTD X1, K6, X28 // 62627d0ec4e1 + VPCONFLICTD X8, K6, X28 // 62427d0ec4e0 + VPCONFLICTD 15(R8), K6, X28 // 62427d0ec4a00f000000 + VPCONFLICTD (BP), K6, X28 // 62627d0ec46500 + VPCONFLICTD Y14, K3, Y2 // 62d27d2bc4d6 + VPCONFLICTD Y8, K3, Y2 // 62d27d2bc4d0 + VPCONFLICTD Y20, K3, Y2 // 62b27d2bc4d4 + VPCONFLICTD -7(CX), K3, Y2 // 62f27d2bc491f9ffffff + VPCONFLICTD 15(DX)(BX*4), K3, Y2 // 62f27d2bc4949a0f000000 + VPCONFLICTD Y14, K3, Y7 // 62d27d2bc4fe + VPCONFLICTD Y8, K3, Y7 // 62d27d2bc4f8 + VPCONFLICTD Y20, K3, Y7 // 62b27d2bc4fc + VPCONFLICTD -7(CX), K3, Y7 // 62f27d2bc4b9f9ffffff + VPCONFLICTD 15(DX)(BX*4), K3, Y7 // 62f27d2bc4bc9a0f000000 + VPCONFLICTD Y14, K3, Y21 // 62c27d2bc4ee + VPCONFLICTD Y8, K3, Y21 // 62c27d2bc4e8 + VPCONFLICTD Y20, K3, Y21 // 62a27d2bc4ec + VPCONFLICTD -7(CX), K3, Y21 // 62e27d2bc4a9f9ffffff + VPCONFLICTD 15(DX)(BX*4), K3, Y21 // 62e27d2bc4ac9a0f000000 + VPCONFLICTD Z11, K7, Z21 // 62c27d4fc4eb + VPCONFLICTD Z25, K7, Z21 // 62827d4fc4e9 + VPCONFLICTD -15(R14)(R15*1), K7, Z21 // 62827d4fc4ac3ef1ffffff + VPCONFLICTD -15(BX), K7, Z21 // 62e27d4fc4abf1ffffff + VPCONFLICTD Z11, K7, Z13 // 62527d4fc4eb + VPCONFLICTD Z25, K7, Z13 // 62127d4fc4e9 + VPCONFLICTD -15(R14)(R15*1), K7, Z13 // 62127d4fc4ac3ef1ffffff + VPCONFLICTD -15(BX), K7, Z13 // 62727d4fc4abf1ffffff + VPCONFLICTQ X11, K4, X8 // 6252fd0cc4c3 + VPCONFLICTQ X16, K4, X8 // 6232fd0cc4c0 + VPCONFLICTQ X6, K4, X8 // 6272fd0cc4c6 + VPCONFLICTQ 15(R8)(R14*8), K4, X8 // 6212fd0cc484f00f000000 + VPCONFLICTQ -15(R14)(R15*2), K4, X8 // 6212fd0cc4847ef1ffffff + VPCONFLICTQ X11, K4, X6 // 62d2fd0cc4f3 + VPCONFLICTQ X16, K4, X6 // 62b2fd0cc4f0 + VPCONFLICTQ X6, K4, X6 // 62f2fd0cc4f6 + VPCONFLICTQ 15(R8)(R14*8), K4, X6 // 6292fd0cc4b4f00f000000 + VPCONFLICTQ -15(R14)(R15*2), K4, X6 // 6292fd0cc4b47ef1ffffff + VPCONFLICTQ X11, K4, X0 // 62d2fd0cc4c3 + VPCONFLICTQ X16, K4, X0 // 62b2fd0cc4c0 + VPCONFLICTQ X6, K4, X0 // 62f2fd0cc4c6 + VPCONFLICTQ 15(R8)(R14*8), K4, X0 // 6292fd0cc484f00f000000 + VPCONFLICTQ -15(R14)(R15*2), K4, X0 // 6292fd0cc4847ef1ffffff + VPCONFLICTQ Y5, K4, Y11 // 6272fd2cc4dd + VPCONFLICTQ Y18, K4, Y11 // 6232fd2cc4da + VPCONFLICTQ Y20, K4, Y11 // 6232fd2cc4dc + VPCONFLICTQ 99(R15)(R15*8), K4, Y11 // 6212fd2cc49cff63000000 + VPCONFLICTQ 7(AX)(CX*8), K4, Y11 // 6272fd2cc49cc807000000 + VPCONFLICTQ Y5, K4, Y24 // 6262fd2cc4c5 + VPCONFLICTQ Y18, K4, Y24 // 6222fd2cc4c2 + VPCONFLICTQ Y20, K4, Y24 // 6222fd2cc4c4 + VPCONFLICTQ 99(R15)(R15*8), K4, Y24 // 6202fd2cc484ff63000000 + VPCONFLICTQ 7(AX)(CX*8), K4, Y24 // 6262fd2cc484c807000000 + VPCONFLICTQ Y5, K4, Y1 // 62f2fd2cc4cd + VPCONFLICTQ Y18, K4, Y1 // 62b2fd2cc4ca + VPCONFLICTQ Y20, K4, Y1 // 62b2fd2cc4cc + VPCONFLICTQ 99(R15)(R15*8), K4, Y1 // 6292fd2cc48cff63000000 + VPCONFLICTQ 7(AX)(CX*8), K4, Y1 // 62f2fd2cc48cc807000000 + VPCONFLICTQ Z27, K7, Z3 // 6292fd4fc4db + VPCONFLICTQ Z15, K7, Z3 // 62d2fd4fc4df + VPCONFLICTQ 7(AX)(CX*4), K7, Z3 // 62f2fd4fc49c8807000000 + VPCONFLICTQ 7(AX)(CX*1), K7, Z3 // 62f2fd4fc49c0807000000 + VPCONFLICTQ Z27, K7, Z12 // 6212fd4fc4e3 + VPCONFLICTQ Z15, K7, Z12 // 6252fd4fc4e7 + VPCONFLICTQ 7(AX)(CX*4), K7, Z12 // 6272fd4fc4a48807000000 + VPCONFLICTQ 7(AX)(CX*1), K7, Z12 // 6272fd4fc4a40807000000 + VPLZCNTD X3, K3, X17 // 62e27d0b44cb + VPLZCNTD X26, K3, X17 // 62827d0b44ca + VPLZCNTD X23, K3, X17 // 62a27d0b44cf + VPLZCNTD 15(DX)(BX*1), K3, X17 // 62e27d0b448c1a0f000000 + VPLZCNTD -7(CX)(DX*2), K3, X17 // 62e27d0b448c51f9ffffff + VPLZCNTD X3, K3, X15 // 62727d0b44fb + VPLZCNTD X26, K3, X15 // 62127d0b44fa + VPLZCNTD X23, K3, X15 // 62327d0b44ff + VPLZCNTD 15(DX)(BX*1), K3, X15 // 62727d0b44bc1a0f000000 + VPLZCNTD -7(CX)(DX*2), K3, X15 // 62727d0b44bc51f9ffffff + VPLZCNTD X3, K3, X8 // 62727d0b44c3 + VPLZCNTD X26, K3, X8 // 62127d0b44c2 + VPLZCNTD X23, K3, X8 // 62327d0b44c7 + VPLZCNTD 15(DX)(BX*1), K3, X8 // 62727d0b44841a0f000000 + VPLZCNTD -7(CX)(DX*2), K3, X8 // 62727d0b448451f9ffffff + VPLZCNTD Y5, K3, Y20 // 62e27d2b44e5 + VPLZCNTD Y28, K3, Y20 // 62827d2b44e4 + VPLZCNTD Y7, K3, Y20 // 62e27d2b44e7 + VPLZCNTD (BX), K3, Y20 // 62e27d2b4423 + VPLZCNTD -17(BP)(SI*1), K3, Y20 // 62e27d2b44a435efffffff + VPLZCNTD Y5, K3, Y12 // 62727d2b44e5 + VPLZCNTD Y28, K3, Y12 // 62127d2b44e4 + VPLZCNTD Y7, K3, Y12 // 62727d2b44e7 + VPLZCNTD (BX), K3, Y12 // 62727d2b4423 + VPLZCNTD -17(BP)(SI*1), K3, Y12 // 62727d2b44a435efffffff + VPLZCNTD Y5, K3, Y3 // 62f27d2b44dd + VPLZCNTD Y28, K3, Y3 // 62927d2b44dc + VPLZCNTD Y7, K3, Y3 // 62f27d2b44df + VPLZCNTD (BX), K3, Y3 // 62f27d2b441b + VPLZCNTD -17(BP)(SI*1), K3, Y3 // 62f27d2b449c35efffffff + VPLZCNTD Z21, K3, Z3 // 62b27d4b44dd + VPLZCNTD Z13, K3, Z3 // 62d27d4b44dd + VPLZCNTD 17(SP)(BP*8), K3, Z3 // 62f27d4b449cec11000000 + VPLZCNTD 17(SP)(BP*4), K3, Z3 // 62f27d4b449cac11000000 + VPLZCNTD Z21, K3, Z0 // 62b27d4b44c5 + VPLZCNTD Z13, K3, Z0 // 62d27d4b44c5 + VPLZCNTD 17(SP)(BP*8), K3, Z0 // 62f27d4b4484ec11000000 + VPLZCNTD 17(SP)(BP*4), K3, Z0 // 62f27d4b4484ac11000000 + VPLZCNTQ X9, K2, X13 // 6252fd0a44e9 + VPLZCNTQ X15, K2, X13 // 6252fd0a44ef + VPLZCNTQ X26, K2, X13 // 6212fd0a44ea + VPLZCNTQ -17(BP), K2, X13 // 6272fd0a44adefffffff + VPLZCNTQ -15(R14)(R15*8), K2, X13 // 6212fd0a44acfef1ffffff + VPLZCNTQ X9, K2, X28 // 6242fd0a44e1 + VPLZCNTQ X15, K2, X28 // 6242fd0a44e7 + VPLZCNTQ X26, K2, X28 // 6202fd0a44e2 + VPLZCNTQ -17(BP), K2, X28 // 6262fd0a44a5efffffff + VPLZCNTQ -15(R14)(R15*8), K2, X28 // 6202fd0a44a4fef1ffffff + VPLZCNTQ X9, K2, X24 // 6242fd0a44c1 + VPLZCNTQ X15, K2, X24 // 6242fd0a44c7 + VPLZCNTQ X26, K2, X24 // 6202fd0a44c2 + VPLZCNTQ -17(BP), K2, X24 // 6262fd0a4485efffffff + VPLZCNTQ -15(R14)(R15*8), K2, X24 // 6202fd0a4484fef1ffffff + VPLZCNTQ Y12, K1, Y0 // 62d2fd2944c4 + VPLZCNTQ Y1, K1, Y0 // 62f2fd2944c1 + VPLZCNTQ Y14, K1, Y0 // 62d2fd2944c6 + VPLZCNTQ 15(R8)(R14*4), K1, Y0 // 6292fd294484b00f000000 + VPLZCNTQ -7(CX)(DX*4), K1, Y0 // 62f2fd29448491f9ffffff + VPLZCNTQ Y12, K1, Y22 // 62c2fd2944f4 + VPLZCNTQ Y1, K1, Y22 // 62e2fd2944f1 + VPLZCNTQ Y14, K1, Y22 // 62c2fd2944f6 + VPLZCNTQ 15(R8)(R14*4), K1, Y22 // 6282fd2944b4b00f000000 + VPLZCNTQ -7(CX)(DX*4), K1, Y22 // 62e2fd2944b491f9ffffff + VPLZCNTQ Y12, K1, Y13 // 6252fd2944ec + VPLZCNTQ Y1, K1, Y13 // 6272fd2944e9 + VPLZCNTQ Y14, K1, Y13 // 6252fd2944ee + VPLZCNTQ 15(R8)(R14*4), K1, Y13 // 6212fd2944acb00f000000 + VPLZCNTQ -7(CX)(DX*4), K1, Y13 // 6272fd2944ac91f9ffffff + VPLZCNTQ Z3, K2, Z11 // 6272fd4a44db + VPLZCNTQ Z12, K2, Z11 // 6252fd4a44dc + VPLZCNTQ 7(SI)(DI*4), K2, Z11 // 6272fd4a449cbe07000000 + VPLZCNTQ -7(DI)(R8*2), K2, Z11 // 6232fd4a449c47f9ffffff + VPLZCNTQ Z3, K2, Z25 // 6262fd4a44cb + VPLZCNTQ Z12, K2, Z25 // 6242fd4a44cc + VPLZCNTQ 7(SI)(DI*4), K2, Z25 // 6262fd4a448cbe07000000 + VPLZCNTQ -7(DI)(R8*2), K2, Z25 // 6222fd4a448c47f9ffffff + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512dq.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512dq.s new file mode 100644 index 0000000000000000000000000000000000000000..9861f4adddb5dcdcbbd8166508e6a73ee5fc3aa4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512dq.s @@ -0,0 +1,2668 @@ +// Code generated by avx512test. DO NOT EDIT. + +#include "../../../../../../runtime/textflag.h" + +TEXT asmtest_avx512dq(SB), NOSPLIT, $0 + KADDB K3, K1, K6 // c5f54af3 + KADDB K1, K1, K6 // c5f54af1 + KADDB K3, K5, K6 // c5d54af3 + KADDB K1, K5, K6 // c5d54af1 + KADDB K3, K1, K5 // c5f54aeb + KADDB K1, K1, K5 // c5f54ae9 + KADDB K3, K5, K5 // c5d54aeb + KADDB K1, K5, K5 // c5d54ae9 + KADDW K6, K6, K1 // c5cc4ace + KADDW K4, K6, K1 // c5cc4acc + KADDW K6, K7, K1 // c5c44ace + KADDW K4, K7, K1 // c5c44acc + KADDW K6, K6, K3 // c5cc4ade + KADDW K4, K6, K3 // c5cc4adc + KADDW K6, K7, K3 // c5c44ade + KADDW K4, K7, K3 // c5c44adc + KANDB K2, K4, K4 // c5dd41e2 + KANDB K7, K4, K4 // c5dd41e7 + KANDB K2, K5, K4 // c5d541e2 + KANDB K7, K5, K4 // c5d541e7 + KANDB K2, K4, K6 // c5dd41f2 + KANDB K7, K4, K6 // c5dd41f7 + KANDB K2, K5, K6 // c5d541f2 + KANDB K7, K5, K6 // c5d541f7 + KANDNB K7, K5, K3 // c5d542df + KANDNB K6, K5, K3 // c5d542de + KANDNB K7, K4, K3 // c5dd42df + KANDNB K6, K4, K3 // c5dd42de + KANDNB K7, K5, K1 // c5d542cf + KANDNB K6, K5, K1 // c5d542ce + KANDNB K7, K4, K1 // c5dd42cf + KANDNB K6, K4, K1 // c5dd42ce + KMOVB K7, 17(SP) // c5f9917c2411 + KMOVB K6, 17(SP) // c5f991742411 + KMOVB K7, -17(BP)(SI*4) // c5f9917cb5ef + KMOVB K6, -17(BP)(SI*4) // c5f99174b5ef + KMOVB K4, AX // c5f993c4 + KMOVB K6, AX // c5f993c6 + KMOVB K4, R9 // c57993cc + KMOVB K6, R9 // c57993ce + KMOVB K5, K0 // c5f990c5 + KMOVB K4, K0 // c5f990c4 + KMOVB 7(AX), K0 // c5f9904007 + KMOVB (DI), K0 // c5f99007 + KMOVB K5, K7 // c5f990fd + KMOVB K4, K7 // c5f990fc + KMOVB 7(AX), K7 // c5f9907807 + KMOVB (DI), K7 // c5f9903f + KMOVB CX, K4 // c5f992e1 + KMOVB SP, K4 // c5f992e4 + KMOVB CX, K6 // c5f992f1 + KMOVB SP, K6 // c5f992f4 + KNOTB K1, K4 // c5f944e1 + KNOTB K3, K4 // c5f944e3 + KNOTB K1, K6 // c5f944f1 + KNOTB K3, K6 // c5f944f3 + KORB K3, K1, K6 // c5f545f3 + KORB K1, K1, K6 // c5f545f1 + KORB K3, K5, K6 // c5d545f3 + KORB K1, K5, K6 // c5d545f1 + KORB K3, K1, K5 // c5f545eb + KORB K1, K1, K5 // c5f545e9 + KORB K3, K5, K5 // c5d545eb + KORB K1, K5, K5 // c5d545e9 + KORTESTB K6, K1 // c5f998ce + KORTESTB K7, K1 // c5f998cf + KORTESTB K6, K3 // c5f998de + KORTESTB K7, K3 // c5f998df + KSHIFTLB $127, K4, K7 // c4e37932fc7f + KSHIFTLB $127, K6, K7 // c4e37932fe7f + KSHIFTLB $127, K4, K6 // c4e37932f47f + KSHIFTLB $127, K6, K6 // c4e37932f67f + KSHIFTRB $42, K4, K4 // c4e37930e42a + KSHIFTRB $42, K5, K4 // c4e37930e52a + KSHIFTRB $42, K4, K6 // c4e37930f42a + KSHIFTRB $42, K5, K6 // c4e37930f52a + KTESTB K4, K7 // c5f999fc + KTESTB K6, K7 // c5f999fe + KTESTB K4, K6 // c5f999f4 + KTESTB K6, K6 // c5f999f6 + KTESTW K6, K6 // c5f899f6 + KTESTW K4, K6 // c5f899f4 + KTESTW K6, K7 // c5f899fe + KTESTW K4, K7 // c5f899fc + KXNORB K5, K0, K4 // c5fd46e5 + KXNORB K4, K0, K4 // c5fd46e4 + KXNORB K5, K7, K4 // c5c546e5 + KXNORB K4, K7, K4 // c5c546e4 + KXNORB K5, K0, K6 // c5fd46f5 + KXNORB K4, K0, K6 // c5fd46f4 + KXNORB K5, K7, K6 // c5c546f5 + KXNORB K4, K7, K6 // c5c546f4 + KXORB K5, K3, K1 // c5e547cd + KXORB K4, K3, K1 // c5e547cc + KXORB K5, K1, K1 // c5f547cd + KXORB K4, K1, K1 // c5f547cc + KXORB K5, K3, K5 // c5e547ed + KXORB K4, K3, K5 // c5e547ec + KXORB K5, K1, K5 // c5f547ed + KXORB K4, K1, K5 // c5f547ec + VANDNPD X15, X0, K4, X22 // 62c1fd0c55f7 + VANDNPD X11, X0, K4, X22 // 62c1fd0c55f3 + VANDNPD X0, X0, K4, X22 // 62e1fd0c55f0 + VANDNPD (R8), X0, K4, X22 // 62c1fd0c5530 + VANDNPD 15(DX)(BX*2), X0, K4, X22 // 62e1fd0c55b45a0f000000 + VANDNPD X15, X17, K4, X22 // 62c1f50455f7 + VANDNPD X11, X17, K4, X22 // 62c1f50455f3 + VANDNPD X0, X17, K4, X22 // 62e1f50455f0 + VANDNPD (R8), X17, K4, X22 // 62c1f5045530 + VANDNPD 15(DX)(BX*2), X17, K4, X22 // 62e1f50455b45a0f000000 + VANDNPD X15, X7, K4, X22 // 62c1c50c55f7 + VANDNPD X11, X7, K4, X22 // 62c1c50c55f3 + VANDNPD X0, X7, K4, X22 // 62e1c50c55f0 + VANDNPD (R8), X7, K4, X22 // 62c1c50c5530 + VANDNPD 15(DX)(BX*2), X7, K4, X22 // 62e1c50c55b45a0f000000 + VANDNPD X15, X0, K4, X5 // 62d1fd0c55ef + VANDNPD X11, X0, K4, X5 // 62d1fd0c55eb + VANDNPD X0, X0, K4, X5 // 62f1fd0c55e8 + VANDNPD (R8), X0, K4, X5 // 62d1fd0c5528 + VANDNPD 15(DX)(BX*2), X0, K4, X5 // 62f1fd0c55ac5a0f000000 + VANDNPD X15, X17, K4, X5 // 62d1f50455ef + VANDNPD X11, X17, K4, X5 // 62d1f50455eb + VANDNPD X0, X17, K4, X5 // 62f1f50455e8 + VANDNPD (R8), X17, K4, X5 // 62d1f5045528 + VANDNPD 15(DX)(BX*2), X17, K4, X5 // 62f1f50455ac5a0f000000 + VANDNPD X15, X7, K4, X5 // 62d1c50c55ef + VANDNPD X11, X7, K4, X5 // 62d1c50c55eb + VANDNPD X0, X7, K4, X5 // 62f1c50c55e8 + VANDNPD (R8), X7, K4, X5 // 62d1c50c5528 + VANDNPD 15(DX)(BX*2), X7, K4, X5 // 62f1c50c55ac5a0f000000 + VANDNPD X15, X0, K4, X14 // 6251fd0c55f7 + VANDNPD X11, X0, K4, X14 // 6251fd0c55f3 + VANDNPD X0, X0, K4, X14 // 6271fd0c55f0 + VANDNPD (R8), X0, K4, X14 // 6251fd0c5530 + VANDNPD 15(DX)(BX*2), X0, K4, X14 // 6271fd0c55b45a0f000000 + VANDNPD X15, X17, K4, X14 // 6251f50455f7 + VANDNPD X11, X17, K4, X14 // 6251f50455f3 + VANDNPD X0, X17, K4, X14 // 6271f50455f0 + VANDNPD (R8), X17, K4, X14 // 6251f5045530 + VANDNPD 15(DX)(BX*2), X17, K4, X14 // 6271f50455b45a0f000000 + VANDNPD X15, X7, K4, X14 // 6251c50c55f7 + VANDNPD X11, X7, K4, X14 // 6251c50c55f3 + VANDNPD X0, X7, K4, X14 // 6271c50c55f0 + VANDNPD (R8), X7, K4, X14 // 6251c50c5530 + VANDNPD 15(DX)(BX*2), X7, K4, X14 // 6271c50c55b45a0f000000 + VANDNPD Y17, Y12, K5, Y0 // 62b19d2d55c1 + VANDNPD Y7, Y12, K5, Y0 // 62f19d2d55c7 + VANDNPD Y9, Y12, K5, Y0 // 62d19d2d55c1 + VANDNPD 99(R15)(R15*8), Y12, K5, Y0 // 62919d2d5584ff63000000 + VANDNPD 7(AX)(CX*8), Y12, K5, Y0 // 62f19d2d5584c807000000 + VANDNPD Y17, Y1, K5, Y0 // 62b1f52d55c1 + VANDNPD Y7, Y1, K5, Y0 // 62f1f52d55c7 + VANDNPD Y9, Y1, K5, Y0 // 62d1f52d55c1 + VANDNPD 99(R15)(R15*8), Y1, K5, Y0 // 6291f52d5584ff63000000 + VANDNPD 7(AX)(CX*8), Y1, K5, Y0 // 62f1f52d5584c807000000 + VANDNPD Y17, Y14, K5, Y0 // 62b18d2d55c1 + VANDNPD Y7, Y14, K5, Y0 // 62f18d2d55c7 + VANDNPD Y9, Y14, K5, Y0 // 62d18d2d55c1 + VANDNPD 99(R15)(R15*8), Y14, K5, Y0 // 62918d2d5584ff63000000 + VANDNPD 7(AX)(CX*8), Y14, K5, Y0 // 62f18d2d5584c807000000 + VANDNPD Y17, Y12, K5, Y22 // 62a19d2d55f1 + VANDNPD Y7, Y12, K5, Y22 // 62e19d2d55f7 + VANDNPD Y9, Y12, K5, Y22 // 62c19d2d55f1 + VANDNPD 99(R15)(R15*8), Y12, K5, Y22 // 62819d2d55b4ff63000000 + VANDNPD 7(AX)(CX*8), Y12, K5, Y22 // 62e19d2d55b4c807000000 + VANDNPD Y17, Y1, K5, Y22 // 62a1f52d55f1 + VANDNPD Y7, Y1, K5, Y22 // 62e1f52d55f7 + VANDNPD Y9, Y1, K5, Y22 // 62c1f52d55f1 + VANDNPD 99(R15)(R15*8), Y1, K5, Y22 // 6281f52d55b4ff63000000 + VANDNPD 7(AX)(CX*8), Y1, K5, Y22 // 62e1f52d55b4c807000000 + VANDNPD Y17, Y14, K5, Y22 // 62a18d2d55f1 + VANDNPD Y7, Y14, K5, Y22 // 62e18d2d55f7 + VANDNPD Y9, Y14, K5, Y22 // 62c18d2d55f1 + VANDNPD 99(R15)(R15*8), Y14, K5, Y22 // 62818d2d55b4ff63000000 + VANDNPD 7(AX)(CX*8), Y14, K5, Y22 // 62e18d2d55b4c807000000 + VANDNPD Y17, Y12, K5, Y13 // 62319d2d55e9 + VANDNPD Y7, Y12, K5, Y13 // 62719d2d55ef + VANDNPD Y9, Y12, K5, Y13 // 62519d2d55e9 + VANDNPD 99(R15)(R15*8), Y12, K5, Y13 // 62119d2d55acff63000000 + VANDNPD 7(AX)(CX*8), Y12, K5, Y13 // 62719d2d55acc807000000 + VANDNPD Y17, Y1, K5, Y13 // 6231f52d55e9 + VANDNPD Y7, Y1, K5, Y13 // 6271f52d55ef + VANDNPD Y9, Y1, K5, Y13 // 6251f52d55e9 + VANDNPD 99(R15)(R15*8), Y1, K5, Y13 // 6211f52d55acff63000000 + VANDNPD 7(AX)(CX*8), Y1, K5, Y13 // 6271f52d55acc807000000 + VANDNPD Y17, Y14, K5, Y13 // 62318d2d55e9 + VANDNPD Y7, Y14, K5, Y13 // 62718d2d55ef + VANDNPD Y9, Y14, K5, Y13 // 62518d2d55e9 + VANDNPD 99(R15)(R15*8), Y14, K5, Y13 // 62118d2d55acff63000000 + VANDNPD 7(AX)(CX*8), Y14, K5, Y13 // 62718d2d55acc807000000 + VANDNPD Z20, Z0, K7, Z7 // 62b1fd4f55fc + VANDNPD Z28, Z0, K7, Z7 // 6291fd4f55fc + VANDNPD 99(R15)(R15*8), Z0, K7, Z7 // 6291fd4f55bcff63000000 + VANDNPD 7(AX)(CX*8), Z0, K7, Z7 // 62f1fd4f55bcc807000000 + VANDNPD Z20, Z6, K7, Z7 // 62b1cd4f55fc + VANDNPD Z28, Z6, K7, Z7 // 6291cd4f55fc + VANDNPD 99(R15)(R15*8), Z6, K7, Z7 // 6291cd4f55bcff63000000 + VANDNPD 7(AX)(CX*8), Z6, K7, Z7 // 62f1cd4f55bcc807000000 + VANDNPD Z20, Z0, K7, Z9 // 6231fd4f55cc + VANDNPD Z28, Z0, K7, Z9 // 6211fd4f55cc + VANDNPD 99(R15)(R15*8), Z0, K7, Z9 // 6211fd4f558cff63000000 + VANDNPD 7(AX)(CX*8), Z0, K7, Z9 // 6271fd4f558cc807000000 + VANDNPD Z20, Z6, K7, Z9 // 6231cd4f55cc + VANDNPD Z28, Z6, K7, Z9 // 6211cd4f55cc + VANDNPD 99(R15)(R15*8), Z6, K7, Z9 // 6211cd4f558cff63000000 + VANDNPD 7(AX)(CX*8), Z6, K7, Z9 // 6271cd4f558cc807000000 + VANDNPS X15, X25, K7, X18 // 62c1340755d7 + VANDNPS X28, X25, K7, X18 // 6281340755d4 + VANDNPS 17(SP)(BP*1), X25, K7, X18 // 62e1340755942c11000000 + VANDNPS -7(CX)(DX*8), X25, K7, X18 // 62e134075594d1f9ffffff + VANDNPS X15, X3, K7, X18 // 62c1640f55d7 + VANDNPS X28, X3, K7, X18 // 6281640f55d4 + VANDNPS 17(SP)(BP*1), X3, K7, X18 // 62e1640f55942c11000000 + VANDNPS -7(CX)(DX*8), X3, K7, X18 // 62e1640f5594d1f9ffffff + VANDNPS X15, X18, K7, X18 // 62c16c0755d7 + VANDNPS X28, X18, K7, X18 // 62816c0755d4 + VANDNPS 17(SP)(BP*1), X18, K7, X18 // 62e16c0755942c11000000 + VANDNPS -7(CX)(DX*8), X18, K7, X18 // 62e16c075594d1f9ffffff + VANDNPS X15, X25, K7, X8 // 6251340755c7 + VANDNPS X28, X25, K7, X8 // 6211340755c4 + VANDNPS 17(SP)(BP*1), X25, K7, X8 // 6271340755842c11000000 + VANDNPS -7(CX)(DX*8), X25, K7, X8 // 627134075584d1f9ffffff + VANDNPS X15, X3, K7, X8 // 6251640f55c7 + VANDNPS X28, X3, K7, X8 // 6211640f55c4 + VANDNPS 17(SP)(BP*1), X3, K7, X8 // 6271640f55842c11000000 + VANDNPS -7(CX)(DX*8), X3, K7, X8 // 6271640f5584d1f9ffffff + VANDNPS X15, X18, K7, X8 // 62516c0755c7 + VANDNPS X28, X18, K7, X8 // 62116c0755c4 + VANDNPS 17(SP)(BP*1), X18, K7, X8 // 62716c0755842c11000000 + VANDNPS -7(CX)(DX*8), X18, K7, X8 // 62716c075584d1f9ffffff + VANDNPS X15, X25, K7, X27 // 6241340755df + VANDNPS X28, X25, K7, X27 // 6201340755dc + VANDNPS 17(SP)(BP*1), X25, K7, X27 // 62613407559c2c11000000 + VANDNPS -7(CX)(DX*8), X25, K7, X27 // 62613407559cd1f9ffffff + VANDNPS X15, X3, K7, X27 // 6241640f55df + VANDNPS X28, X3, K7, X27 // 6201640f55dc + VANDNPS 17(SP)(BP*1), X3, K7, X27 // 6261640f559c2c11000000 + VANDNPS -7(CX)(DX*8), X3, K7, X27 // 6261640f559cd1f9ffffff + VANDNPS X15, X18, K7, X27 // 62416c0755df + VANDNPS X28, X18, K7, X27 // 62016c0755dc + VANDNPS 17(SP)(BP*1), X18, K7, X27 // 62616c07559c2c11000000 + VANDNPS -7(CX)(DX*8), X18, K7, X27 // 62616c07559cd1f9ffffff + VANDNPS Y2, Y28, K6, Y31 // 62611c2655fa + VANDNPS Y21, Y28, K6, Y31 // 62211c2655fd + VANDNPS Y12, Y28, K6, Y31 // 62411c2655fc + VANDNPS (AX), Y28, K6, Y31 // 62611c265538 + VANDNPS 7(SI), Y28, K6, Y31 // 62611c2655be07000000 + VANDNPS Y2, Y13, K6, Y31 // 6261142e55fa + VANDNPS Y21, Y13, K6, Y31 // 6221142e55fd + VANDNPS Y12, Y13, K6, Y31 // 6241142e55fc + VANDNPS (AX), Y13, K6, Y31 // 6261142e5538 + VANDNPS 7(SI), Y13, K6, Y31 // 6261142e55be07000000 + VANDNPS Y2, Y7, K6, Y31 // 6261442e55fa + VANDNPS Y21, Y7, K6, Y31 // 6221442e55fd + VANDNPS Y12, Y7, K6, Y31 // 6241442e55fc + VANDNPS (AX), Y7, K6, Y31 // 6261442e5538 + VANDNPS 7(SI), Y7, K6, Y31 // 6261442e55be07000000 + VANDNPS Y2, Y28, K6, Y8 // 62711c2655c2 + VANDNPS Y21, Y28, K6, Y8 // 62311c2655c5 + VANDNPS Y12, Y28, K6, Y8 // 62511c2655c4 + VANDNPS (AX), Y28, K6, Y8 // 62711c265500 + VANDNPS 7(SI), Y28, K6, Y8 // 62711c26558607000000 + VANDNPS Y2, Y13, K6, Y8 // 6271142e55c2 + VANDNPS Y21, Y13, K6, Y8 // 6231142e55c5 + VANDNPS Y12, Y13, K6, Y8 // 6251142e55c4 + VANDNPS (AX), Y13, K6, Y8 // 6271142e5500 + VANDNPS 7(SI), Y13, K6, Y8 // 6271142e558607000000 + VANDNPS Y2, Y7, K6, Y8 // 6271442e55c2 + VANDNPS Y21, Y7, K6, Y8 // 6231442e55c5 + VANDNPS Y12, Y7, K6, Y8 // 6251442e55c4 + VANDNPS (AX), Y7, K6, Y8 // 6271442e5500 + VANDNPS 7(SI), Y7, K6, Y8 // 6271442e558607000000 + VANDNPS Y2, Y28, K6, Y1 // 62f11c2655ca + VANDNPS Y21, Y28, K6, Y1 // 62b11c2655cd + VANDNPS Y12, Y28, K6, Y1 // 62d11c2655cc + VANDNPS (AX), Y28, K6, Y1 // 62f11c265508 + VANDNPS 7(SI), Y28, K6, Y1 // 62f11c26558e07000000 + VANDNPS Y2, Y13, K6, Y1 // 62f1142e55ca + VANDNPS Y21, Y13, K6, Y1 // 62b1142e55cd + VANDNPS Y12, Y13, K6, Y1 // 62d1142e55cc + VANDNPS (AX), Y13, K6, Y1 // 62f1142e5508 + VANDNPS 7(SI), Y13, K6, Y1 // 62f1142e558e07000000 + VANDNPS Y2, Y7, K6, Y1 // 62f1442e55ca + VANDNPS Y21, Y7, K6, Y1 // 62b1442e55cd + VANDNPS Y12, Y7, K6, Y1 // 62d1442e55cc + VANDNPS (AX), Y7, K6, Y1 // 62f1442e5508 + VANDNPS 7(SI), Y7, K6, Y1 // 62f1442e558e07000000 + VANDNPS Z12, Z9, K3, Z3 // 62d1344b55dc + VANDNPS Z22, Z9, K3, Z3 // 62b1344b55de + VANDNPS (AX), Z9, K3, Z3 // 62f1344b5518 + VANDNPS 7(SI), Z9, K3, Z3 // 62f1344b559e07000000 + VANDNPS Z12, Z19, K3, Z3 // 62d1644355dc + VANDNPS Z22, Z19, K3, Z3 // 62b1644355de + VANDNPS (AX), Z19, K3, Z3 // 62f164435518 + VANDNPS 7(SI), Z19, K3, Z3 // 62f16443559e07000000 + VANDNPS Z12, Z9, K3, Z30 // 6241344b55f4 + VANDNPS Z22, Z9, K3, Z30 // 6221344b55f6 + VANDNPS (AX), Z9, K3, Z30 // 6261344b5530 + VANDNPS 7(SI), Z9, K3, Z30 // 6261344b55b607000000 + VANDNPS Z12, Z19, K3, Z30 // 6241644355f4 + VANDNPS Z22, Z19, K3, Z30 // 6221644355f6 + VANDNPS (AX), Z19, K3, Z30 // 626164435530 + VANDNPS 7(SI), Z19, K3, Z30 // 6261644355b607000000 + VANDPD X22, X24, K7, X7 // 62b1bd0754fe + VANDPD X1, X24, K7, X7 // 62f1bd0754f9 + VANDPD X11, X24, K7, X7 // 62d1bd0754fb + VANDPD -17(BP)(SI*2), X24, K7, X7 // 62f1bd0754bc75efffffff + VANDPD 7(AX)(CX*2), X24, K7, X7 // 62f1bd0754bc4807000000 + VANDPD X22, X7, K7, X7 // 62b1c50f54fe + VANDPD X1, X7, K7, X7 // 62f1c50f54f9 + VANDPD X11, X7, K7, X7 // 62d1c50f54fb + VANDPD -17(BP)(SI*2), X7, K7, X7 // 62f1c50f54bc75efffffff + VANDPD 7(AX)(CX*2), X7, K7, X7 // 62f1c50f54bc4807000000 + VANDPD X22, X0, K7, X7 // 62b1fd0f54fe + VANDPD X1, X0, K7, X7 // 62f1fd0f54f9 + VANDPD X11, X0, K7, X7 // 62d1fd0f54fb + VANDPD -17(BP)(SI*2), X0, K7, X7 // 62f1fd0f54bc75efffffff + VANDPD 7(AX)(CX*2), X0, K7, X7 // 62f1fd0f54bc4807000000 + VANDPD X22, X24, K7, X13 // 6231bd0754ee + VANDPD X1, X24, K7, X13 // 6271bd0754e9 + VANDPD X11, X24, K7, X13 // 6251bd0754eb + VANDPD -17(BP)(SI*2), X24, K7, X13 // 6271bd0754ac75efffffff + VANDPD 7(AX)(CX*2), X24, K7, X13 // 6271bd0754ac4807000000 + VANDPD X22, X7, K7, X13 // 6231c50f54ee + VANDPD X1, X7, K7, X13 // 6271c50f54e9 + VANDPD X11, X7, K7, X13 // 6251c50f54eb + VANDPD -17(BP)(SI*2), X7, K7, X13 // 6271c50f54ac75efffffff + VANDPD 7(AX)(CX*2), X7, K7, X13 // 6271c50f54ac4807000000 + VANDPD X22, X0, K7, X13 // 6231fd0f54ee + VANDPD X1, X0, K7, X13 // 6271fd0f54e9 + VANDPD X11, X0, K7, X13 // 6251fd0f54eb + VANDPD -17(BP)(SI*2), X0, K7, X13 // 6271fd0f54ac75efffffff + VANDPD 7(AX)(CX*2), X0, K7, X13 // 6271fd0f54ac4807000000 + VANDPD X22, X24, K7, X8 // 6231bd0754c6 + VANDPD X1, X24, K7, X8 // 6271bd0754c1 + VANDPD X11, X24, K7, X8 // 6251bd0754c3 + VANDPD -17(BP)(SI*2), X24, K7, X8 // 6271bd07548475efffffff + VANDPD 7(AX)(CX*2), X24, K7, X8 // 6271bd0754844807000000 + VANDPD X22, X7, K7, X8 // 6231c50f54c6 + VANDPD X1, X7, K7, X8 // 6271c50f54c1 + VANDPD X11, X7, K7, X8 // 6251c50f54c3 + VANDPD -17(BP)(SI*2), X7, K7, X8 // 6271c50f548475efffffff + VANDPD 7(AX)(CX*2), X7, K7, X8 // 6271c50f54844807000000 + VANDPD X22, X0, K7, X8 // 6231fd0f54c6 + VANDPD X1, X0, K7, X8 // 6271fd0f54c1 + VANDPD X11, X0, K7, X8 // 6251fd0f54c3 + VANDPD -17(BP)(SI*2), X0, K7, X8 // 6271fd0f548475efffffff + VANDPD 7(AX)(CX*2), X0, K7, X8 // 6271fd0f54844807000000 + VANDPD Y12, Y3, K4, Y9 // 6251e52c54cc + VANDPD Y21, Y3, K4, Y9 // 6231e52c54cd + VANDPD Y14, Y3, K4, Y9 // 6251e52c54ce + VANDPD (BX), Y3, K4, Y9 // 6271e52c540b + VANDPD -17(BP)(SI*1), Y3, K4, Y9 // 6271e52c548c35efffffff + VANDPD Y12, Y2, K4, Y9 // 6251ed2c54cc + VANDPD Y21, Y2, K4, Y9 // 6231ed2c54cd + VANDPD Y14, Y2, K4, Y9 // 6251ed2c54ce + VANDPD (BX), Y2, K4, Y9 // 6271ed2c540b + VANDPD -17(BP)(SI*1), Y2, K4, Y9 // 6271ed2c548c35efffffff + VANDPD Y12, Y9, K4, Y9 // 6251b52c54cc + VANDPD Y21, Y9, K4, Y9 // 6231b52c54cd + VANDPD Y14, Y9, K4, Y9 // 6251b52c54ce + VANDPD (BX), Y9, K4, Y9 // 6271b52c540b + VANDPD -17(BP)(SI*1), Y9, K4, Y9 // 6271b52c548c35efffffff + VANDPD Y12, Y3, K4, Y1 // 62d1e52c54cc + VANDPD Y21, Y3, K4, Y1 // 62b1e52c54cd + VANDPD Y14, Y3, K4, Y1 // 62d1e52c54ce + VANDPD (BX), Y3, K4, Y1 // 62f1e52c540b + VANDPD -17(BP)(SI*1), Y3, K4, Y1 // 62f1e52c548c35efffffff + VANDPD Y12, Y2, K4, Y1 // 62d1ed2c54cc + VANDPD Y21, Y2, K4, Y1 // 62b1ed2c54cd + VANDPD Y14, Y2, K4, Y1 // 62d1ed2c54ce + VANDPD (BX), Y2, K4, Y1 // 62f1ed2c540b + VANDPD -17(BP)(SI*1), Y2, K4, Y1 // 62f1ed2c548c35efffffff + VANDPD Y12, Y9, K4, Y1 // 62d1b52c54cc + VANDPD Y21, Y9, K4, Y1 // 62b1b52c54cd + VANDPD Y14, Y9, K4, Y1 // 62d1b52c54ce + VANDPD (BX), Y9, K4, Y1 // 62f1b52c540b + VANDPD -17(BP)(SI*1), Y9, K4, Y1 // 62f1b52c548c35efffffff + VANDPD Z2, Z18, K4, Z11 // 6271ed4454da + VANDPD Z21, Z18, K4, Z11 // 6231ed4454dd + VANDPD (BX), Z18, K4, Z11 // 6271ed44541b + VANDPD -17(BP)(SI*1), Z18, K4, Z11 // 6271ed44549c35efffffff + VANDPD Z2, Z24, K4, Z11 // 6271bd4454da + VANDPD Z21, Z24, K4, Z11 // 6231bd4454dd + VANDPD (BX), Z24, K4, Z11 // 6271bd44541b + VANDPD -17(BP)(SI*1), Z24, K4, Z11 // 6271bd44549c35efffffff + VANDPD Z2, Z18, K4, Z5 // 62f1ed4454ea + VANDPD Z21, Z18, K4, Z5 // 62b1ed4454ed + VANDPD (BX), Z18, K4, Z5 // 62f1ed44542b + VANDPD -17(BP)(SI*1), Z18, K4, Z5 // 62f1ed4454ac35efffffff + VANDPD Z2, Z24, K4, Z5 // 62f1bd4454ea + VANDPD Z21, Z24, K4, Z5 // 62b1bd4454ed + VANDPD (BX), Z24, K4, Z5 // 62f1bd44542b + VANDPD -17(BP)(SI*1), Z24, K4, Z5 // 62f1bd4454ac35efffffff + VANDPS X20, X31, K7, X6 // 62b1040754f4 + VANDPS X24, X31, K7, X6 // 6291040754f0 + VANDPS X7, X31, K7, X6 // 62f1040754f7 + VANDPS 15(R8)(R14*1), X31, K7, X6 // 6291040754b4300f000000 + VANDPS 15(R8)(R14*2), X31, K7, X6 // 6291040754b4700f000000 + VANDPS X20, X3, K7, X6 // 62b1640f54f4 + VANDPS X24, X3, K7, X6 // 6291640f54f0 + VANDPS X7, X3, K7, X6 // 62f1640f54f7 + VANDPS 15(R8)(R14*1), X3, K7, X6 // 6291640f54b4300f000000 + VANDPS 15(R8)(R14*2), X3, K7, X6 // 6291640f54b4700f000000 + VANDPS X20, X28, K7, X6 // 62b11c0754f4 + VANDPS X24, X28, K7, X6 // 62911c0754f0 + VANDPS X7, X28, K7, X6 // 62f11c0754f7 + VANDPS 15(R8)(R14*1), X28, K7, X6 // 62911c0754b4300f000000 + VANDPS 15(R8)(R14*2), X28, K7, X6 // 62911c0754b4700f000000 + VANDPS X20, X31, K7, X7 // 62b1040754fc + VANDPS X24, X31, K7, X7 // 6291040754f8 + VANDPS X7, X31, K7, X7 // 62f1040754ff + VANDPS 15(R8)(R14*1), X31, K7, X7 // 6291040754bc300f000000 + VANDPS 15(R8)(R14*2), X31, K7, X7 // 6291040754bc700f000000 + VANDPS X20, X3, K7, X7 // 62b1640f54fc + VANDPS X24, X3, K7, X7 // 6291640f54f8 + VANDPS X7, X3, K7, X7 // 62f1640f54ff + VANDPS 15(R8)(R14*1), X3, K7, X7 // 6291640f54bc300f000000 + VANDPS 15(R8)(R14*2), X3, K7, X7 // 6291640f54bc700f000000 + VANDPS X20, X28, K7, X7 // 62b11c0754fc + VANDPS X24, X28, K7, X7 // 62911c0754f8 + VANDPS X7, X28, K7, X7 // 62f11c0754ff + VANDPS 15(R8)(R14*1), X28, K7, X7 // 62911c0754bc300f000000 + VANDPS 15(R8)(R14*2), X28, K7, X7 // 62911c0754bc700f000000 + VANDPS X20, X31, K7, X8 // 6231040754c4 + VANDPS X24, X31, K7, X8 // 6211040754c0 + VANDPS X7, X31, K7, X8 // 6271040754c7 + VANDPS 15(R8)(R14*1), X31, K7, X8 // 621104075484300f000000 + VANDPS 15(R8)(R14*2), X31, K7, X8 // 621104075484700f000000 + VANDPS X20, X3, K7, X8 // 6231640f54c4 + VANDPS X24, X3, K7, X8 // 6211640f54c0 + VANDPS X7, X3, K7, X8 // 6271640f54c7 + VANDPS 15(R8)(R14*1), X3, K7, X8 // 6211640f5484300f000000 + VANDPS 15(R8)(R14*2), X3, K7, X8 // 6211640f5484700f000000 + VANDPS X20, X28, K7, X8 // 62311c0754c4 + VANDPS X24, X28, K7, X8 // 62111c0754c0 + VANDPS X7, X28, K7, X8 // 62711c0754c7 + VANDPS 15(R8)(R14*1), X28, K7, X8 // 62111c075484300f000000 + VANDPS 15(R8)(R14*2), X28, K7, X8 // 62111c075484700f000000 + VANDPS Y31, Y16, K2, Y30 // 62017c2254f7 + VANDPS Y22, Y16, K2, Y30 // 62217c2254f6 + VANDPS Y6, Y16, K2, Y30 // 62617c2254f6 + VANDPS 15(R8)(R14*4), Y16, K2, Y30 // 62017c2254b4b00f000000 + VANDPS -7(CX)(DX*4), Y16, K2, Y30 // 62617c2254b491f9ffffff + VANDPS Y31, Y1, K2, Y30 // 6201742a54f7 + VANDPS Y22, Y1, K2, Y30 // 6221742a54f6 + VANDPS Y6, Y1, K2, Y30 // 6261742a54f6 + VANDPS 15(R8)(R14*4), Y1, K2, Y30 // 6201742a54b4b00f000000 + VANDPS -7(CX)(DX*4), Y1, K2, Y30 // 6261742a54b491f9ffffff + VANDPS Y31, Y30, K2, Y30 // 62010c2254f7 + VANDPS Y22, Y30, K2, Y30 // 62210c2254f6 + VANDPS Y6, Y30, K2, Y30 // 62610c2254f6 + VANDPS 15(R8)(R14*4), Y30, K2, Y30 // 62010c2254b4b00f000000 + VANDPS -7(CX)(DX*4), Y30, K2, Y30 // 62610c2254b491f9ffffff + VANDPS Y31, Y16, K2, Y26 // 62017c2254d7 + VANDPS Y22, Y16, K2, Y26 // 62217c2254d6 + VANDPS Y6, Y16, K2, Y26 // 62617c2254d6 + VANDPS 15(R8)(R14*4), Y16, K2, Y26 // 62017c225494b00f000000 + VANDPS -7(CX)(DX*4), Y16, K2, Y26 // 62617c22549491f9ffffff + VANDPS Y31, Y1, K2, Y26 // 6201742a54d7 + VANDPS Y22, Y1, K2, Y26 // 6221742a54d6 + VANDPS Y6, Y1, K2, Y26 // 6261742a54d6 + VANDPS 15(R8)(R14*4), Y1, K2, Y26 // 6201742a5494b00f000000 + VANDPS -7(CX)(DX*4), Y1, K2, Y26 // 6261742a549491f9ffffff + VANDPS Y31, Y30, K2, Y26 // 62010c2254d7 + VANDPS Y22, Y30, K2, Y26 // 62210c2254d6 + VANDPS Y6, Y30, K2, Y26 // 62610c2254d6 + VANDPS 15(R8)(R14*4), Y30, K2, Y26 // 62010c225494b00f000000 + VANDPS -7(CX)(DX*4), Y30, K2, Y26 // 62610c22549491f9ffffff + VANDPS Y31, Y16, K2, Y7 // 62917c2254ff + VANDPS Y22, Y16, K2, Y7 // 62b17c2254fe + VANDPS Y6, Y16, K2, Y7 // 62f17c2254fe + VANDPS 15(R8)(R14*4), Y16, K2, Y7 // 62917c2254bcb00f000000 + VANDPS -7(CX)(DX*4), Y16, K2, Y7 // 62f17c2254bc91f9ffffff + VANDPS Y31, Y1, K2, Y7 // 6291742a54ff + VANDPS Y22, Y1, K2, Y7 // 62b1742a54fe + VANDPS Y6, Y1, K2, Y7 // 62f1742a54fe + VANDPS 15(R8)(R14*4), Y1, K2, Y7 // 6291742a54bcb00f000000 + VANDPS -7(CX)(DX*4), Y1, K2, Y7 // 62f1742a54bc91f9ffffff + VANDPS Y31, Y30, K2, Y7 // 62910c2254ff + VANDPS Y22, Y30, K2, Y7 // 62b10c2254fe + VANDPS Y6, Y30, K2, Y7 // 62f10c2254fe + VANDPS 15(R8)(R14*4), Y30, K2, Y7 // 62910c2254bcb00f000000 + VANDPS -7(CX)(DX*4), Y30, K2, Y7 // 62f10c2254bc91f9ffffff + VANDPS Z6, Z6, K5, Z7 // 62f14c4d54fe + VANDPS Z22, Z6, K5, Z7 // 62b14c4d54fe + VANDPS 15(R8)(R14*4), Z6, K5, Z7 // 62914c4d54bcb00f000000 + VANDPS -7(CX)(DX*4), Z6, K5, Z7 // 62f14c4d54bc91f9ffffff + VANDPS Z6, Z16, K5, Z7 // 62f17c4554fe + VANDPS Z22, Z16, K5, Z7 // 62b17c4554fe + VANDPS 15(R8)(R14*4), Z16, K5, Z7 // 62917c4554bcb00f000000 + VANDPS -7(CX)(DX*4), Z16, K5, Z7 // 62f17c4554bc91f9ffffff + VANDPS Z6, Z6, K5, Z13 // 62714c4d54ee + VANDPS Z22, Z6, K5, Z13 // 62314c4d54ee + VANDPS 15(R8)(R14*4), Z6, K5, Z13 // 62114c4d54acb00f000000 + VANDPS -7(CX)(DX*4), Z6, K5, Z13 // 62714c4d54ac91f9ffffff + VANDPS Z6, Z16, K5, Z13 // 62717c4554ee + VANDPS Z22, Z16, K5, Z13 // 62317c4554ee + VANDPS 15(R8)(R14*4), Z16, K5, Z13 // 62117c4554acb00f000000 + VANDPS -7(CX)(DX*4), Z16, K5, Z13 // 62717c4554ac91f9ffffff + VBROADCASTF32X2 X16, K3, Y1 // 62b27d2b19c8 + VBROADCASTF32X2 X28, K3, Y1 // 62927d2b19cc + VBROADCASTF32X2 X8, K3, Y1 // 62d27d2b19c8 + VBROADCASTF32X2 -17(BP)(SI*8), K3, Y1 // 62f27d2b198cf5efffffff + VBROADCASTF32X2 (R15), K3, Y1 // 62d27d2b190f + VBROADCASTF32X2 X16, K3, Y27 // 62227d2b19d8 + VBROADCASTF32X2 X28, K3, Y27 // 62027d2b19dc + VBROADCASTF32X2 X8, K3, Y27 // 62427d2b19d8 + VBROADCASTF32X2 -17(BP)(SI*8), K3, Y27 // 62627d2b199cf5efffffff + VBROADCASTF32X2 (R15), K3, Y27 // 62427d2b191f + VBROADCASTF32X2 X16, K3, Y19 // 62a27d2b19d8 + VBROADCASTF32X2 X28, K3, Y19 // 62827d2b19dc + VBROADCASTF32X2 X8, K3, Y19 // 62c27d2b19d8 + VBROADCASTF32X2 -17(BP)(SI*8), K3, Y19 // 62e27d2b199cf5efffffff + VBROADCASTF32X2 (R15), K3, Y19 // 62c27d2b191f + VBROADCASTF32X2 X15, K2, Z1 // 62d27d4a19cf + VBROADCASTF32X2 X11, K2, Z1 // 62d27d4a19cb + VBROADCASTF32X2 X1, K2, Z1 // 62f27d4a19c9 + VBROADCASTF32X2 7(SI)(DI*8), K2, Z1 // 62f27d4a198cfe07000000 + VBROADCASTF32X2 -15(R14), K2, Z1 // 62d27d4a198ef1ffffff + VBROADCASTF32X2 X15, K2, Z3 // 62d27d4a19df + VBROADCASTF32X2 X11, K2, Z3 // 62d27d4a19db + VBROADCASTF32X2 X1, K2, Z3 // 62f27d4a19d9 + VBROADCASTF32X2 7(SI)(DI*8), K2, Z3 // 62f27d4a199cfe07000000 + VBROADCASTF32X2 -15(R14), K2, Z3 // 62d27d4a199ef1ffffff + VBROADCASTF32X8 -17(BP)(SI*2), K1, Z28 // 62627d491ba475efffffff + VBROADCASTF32X8 7(AX)(CX*2), K1, Z28 // 62627d491ba44807000000 + VBROADCASTF32X8 -17(BP)(SI*2), K1, Z13 // 62727d491bac75efffffff + VBROADCASTF32X8 7(AX)(CX*2), K1, Z13 // 62727d491bac4807000000 + VBROADCASTF64X2 -7(CX)(DX*1), K7, Y21 // 62e2fd2f1aac11f9ffffff + VBROADCASTF64X2 -15(R14)(R15*4), K7, Y21 // 6282fd2f1aacbef1ffffff + VBROADCASTF64X2 -7(CX)(DX*1), K7, Y7 // 62f2fd2f1abc11f9ffffff + VBROADCASTF64X2 -15(R14)(R15*4), K7, Y7 // 6292fd2f1abcbef1ffffff + VBROADCASTF64X2 -7(CX)(DX*1), K7, Y30 // 6262fd2f1ab411f9ffffff + VBROADCASTF64X2 -15(R14)(R15*4), K7, Y30 // 6202fd2f1ab4bef1ffffff + VBROADCASTF64X2 15(DX)(BX*1), K1, Z14 // 6272fd491ab41a0f000000 + VBROADCASTF64X2 -7(CX)(DX*2), K1, Z14 // 6272fd491ab451f9ffffff + VBROADCASTF64X2 15(DX)(BX*1), K1, Z28 // 6262fd491aa41a0f000000 + VBROADCASTF64X2 -7(CX)(DX*2), K1, Z28 // 6262fd491aa451f9ffffff + VBROADCASTI32X2 X14, K1, X19 // 62c27d0959de + VBROADCASTI32X2 X0, K1, X19 // 62e27d0959d8 + VBROADCASTI32X2 7(SI)(DI*1), K1, X19 // 62e27d09599c3e07000000 + VBROADCASTI32X2 15(DX)(BX*8), K1, X19 // 62e27d09599cda0f000000 + VBROADCASTI32X2 X14, K1, X13 // 62527d0959ee + VBROADCASTI32X2 X0, K1, X13 // 62727d0959e8 + VBROADCASTI32X2 7(SI)(DI*1), K1, X13 // 62727d0959ac3e07000000 + VBROADCASTI32X2 15(DX)(BX*8), K1, X13 // 62727d0959acda0f000000 + VBROADCASTI32X2 X14, K1, X2 // 62d27d0959d6 + VBROADCASTI32X2 X0, K1, X2 // 62f27d0959d0 + VBROADCASTI32X2 7(SI)(DI*1), K1, X2 // 62f27d0959943e07000000 + VBROADCASTI32X2 15(DX)(BX*8), K1, X2 // 62f27d095994da0f000000 + VBROADCASTI32X2 X25, K7, Y13 // 62127d2f59e9 + VBROADCASTI32X2 X11, K7, Y13 // 62527d2f59eb + VBROADCASTI32X2 X17, K7, Y13 // 62327d2f59e9 + VBROADCASTI32X2 -7(DI)(R8*1), K7, Y13 // 62327d2f59ac07f9ffffff + VBROADCASTI32X2 (SP), K7, Y13 // 62727d2f592c24 + VBROADCASTI32X2 X25, K7, Y18 // 62827d2f59d1 + VBROADCASTI32X2 X11, K7, Y18 // 62c27d2f59d3 + VBROADCASTI32X2 X17, K7, Y18 // 62a27d2f59d1 + VBROADCASTI32X2 -7(DI)(R8*1), K7, Y18 // 62a27d2f599407f9ffffff + VBROADCASTI32X2 (SP), K7, Y18 // 62e27d2f591424 + VBROADCASTI32X2 X25, K7, Y24 // 62027d2f59c1 + VBROADCASTI32X2 X11, K7, Y24 // 62427d2f59c3 + VBROADCASTI32X2 X17, K7, Y24 // 62227d2f59c1 + VBROADCASTI32X2 -7(DI)(R8*1), K7, Y24 // 62227d2f598407f9ffffff + VBROADCASTI32X2 (SP), K7, Y24 // 62627d2f590424 + VBROADCASTI32X2 X18, K2, Z15 // 62327d4a59fa + VBROADCASTI32X2 X11, K2, Z15 // 62527d4a59fb + VBROADCASTI32X2 X9, K2, Z15 // 62527d4a59f9 + VBROADCASTI32X2 -7(CX), K2, Z15 // 62727d4a59b9f9ffffff + VBROADCASTI32X2 15(DX)(BX*4), K2, Z15 // 62727d4a59bc9a0f000000 + VBROADCASTI32X2 X18, K2, Z30 // 62227d4a59f2 + VBROADCASTI32X2 X11, K2, Z30 // 62427d4a59f3 + VBROADCASTI32X2 X9, K2, Z30 // 62427d4a59f1 + VBROADCASTI32X2 -7(CX), K2, Z30 // 62627d4a59b1f9ffffff + VBROADCASTI32X2 15(DX)(BX*4), K2, Z30 // 62627d4a59b49a0f000000 + VBROADCASTI32X8 (R14), K3, Z5 // 62d27d4b5b2e + VBROADCASTI32X8 -7(DI)(R8*8), K3, Z5 // 62b27d4b5bacc7f9ffffff + VBROADCASTI32X8 (R14), K3, Z1 // 62d27d4b5b0e + VBROADCASTI32X8 -7(DI)(R8*8), K3, Z1 // 62b27d4b5b8cc7f9ffffff + VBROADCASTI64X2 15(R8), K4, Y5 // 62d2fd2c5aa80f000000 + VBROADCASTI64X2 (BP), K4, Y5 // 62f2fd2c5a6d00 + VBROADCASTI64X2 15(R8), K4, Y24 // 6242fd2c5a800f000000 + VBROADCASTI64X2 (BP), K4, Y24 // 6262fd2c5a4500 + VBROADCASTI64X2 15(R8), K4, Y21 // 62c2fd2c5aa80f000000 + VBROADCASTI64X2 (BP), K4, Y21 // 62e2fd2c5a6d00 + VBROADCASTI64X2 15(R8)(R14*8), K5, Z3 // 6292fd4d5a9cf00f000000 + VBROADCASTI64X2 -15(R14)(R15*2), K5, Z3 // 6292fd4d5a9c7ef1ffffff + VBROADCASTI64X2 15(R8)(R14*8), K5, Z5 // 6292fd4d5aacf00f000000 + VBROADCASTI64X2 -15(R14)(R15*2), K5, Z5 // 6292fd4d5aac7ef1ffffff + VCVTPD2QQ X15, K7, X0 // 62d1fd0f7bc7 + VCVTPD2QQ X11, K7, X0 // 62d1fd0f7bc3 + VCVTPD2QQ X0, K7, X0 // 62f1fd0f7bc0 + VCVTPD2QQ -17(BP)(SI*8), K7, X0 // 62f1fd0f7b84f5efffffff + VCVTPD2QQ (R15), K7, X0 // 62d1fd0f7b07 + VCVTPD2QQ X15, K7, X17 // 62c1fd0f7bcf + VCVTPD2QQ X11, K7, X17 // 62c1fd0f7bcb + VCVTPD2QQ X0, K7, X17 // 62e1fd0f7bc8 + VCVTPD2QQ -17(BP)(SI*8), K7, X17 // 62e1fd0f7b8cf5efffffff + VCVTPD2QQ (R15), K7, X17 // 62c1fd0f7b0f + VCVTPD2QQ X15, K7, X7 // 62d1fd0f7bff + VCVTPD2QQ X11, K7, X7 // 62d1fd0f7bfb + VCVTPD2QQ X0, K7, X7 // 62f1fd0f7bf8 + VCVTPD2QQ -17(BP)(SI*8), K7, X7 // 62f1fd0f7bbcf5efffffff + VCVTPD2QQ (R15), K7, X7 // 62d1fd0f7b3f + VCVTPD2QQ Y0, K2, Y6 // 62f1fd2a7bf0 + VCVTPD2QQ Y19, K2, Y6 // 62b1fd2a7bf3 + VCVTPD2QQ Y31, K2, Y6 // 6291fd2a7bf7 + VCVTPD2QQ -15(R14)(R15*1), K2, Y6 // 6291fd2a7bb43ef1ffffff + VCVTPD2QQ -15(BX), K2, Y6 // 62f1fd2a7bb3f1ffffff + VCVTPD2QQ Y0, K2, Y1 // 62f1fd2a7bc8 + VCVTPD2QQ Y19, K2, Y1 // 62b1fd2a7bcb + VCVTPD2QQ Y31, K2, Y1 // 6291fd2a7bcf + VCVTPD2QQ -15(R14)(R15*1), K2, Y1 // 6291fd2a7b8c3ef1ffffff + VCVTPD2QQ -15(BX), K2, Y1 // 62f1fd2a7b8bf1ffffff + VCVTPD2QQ Y0, K2, Y9 // 6271fd2a7bc8 + VCVTPD2QQ Y19, K2, Y9 // 6231fd2a7bcb + VCVTPD2QQ Y31, K2, Y9 // 6211fd2a7bcf + VCVTPD2QQ -15(R14)(R15*1), K2, Y9 // 6211fd2a7b8c3ef1ffffff + VCVTPD2QQ -15(BX), K2, Y9 // 6271fd2a7b8bf1ffffff + VCVTPD2QQ Z12, K5, Z14 // 6251fd4d7bf4 + VCVTPD2QQ Z13, K5, Z14 // 6251fd4d7bf5 + VCVTPD2QQ Z12, K5, Z13 // 6251fd4d7bec + VCVTPD2QQ Z13, K5, Z13 // 6251fd4d7bed + VCVTPD2QQ Z2, K3, Z21 // 62e1fd4b7bea + VCVTPD2QQ Z7, K3, Z21 // 62e1fd4b7bef + VCVTPD2QQ -17(BP), K3, Z21 // 62e1fd4b7badefffffff + VCVTPD2QQ -15(R14)(R15*8), K3, Z21 // 6281fd4b7bacfef1ffffff + VCVTPD2QQ Z2, K3, Z9 // 6271fd4b7bca + VCVTPD2QQ Z7, K3, Z9 // 6271fd4b7bcf + VCVTPD2QQ -17(BP), K3, Z9 // 6271fd4b7b8defffffff + VCVTPD2QQ -15(R14)(R15*8), K3, Z9 // 6211fd4b7b8cfef1ffffff + VCVTPD2UQQ X24, K3, X7 // 6291fd0b79f8 + VCVTPD2UQQ X7, K3, X7 // 62f1fd0b79ff + VCVTPD2UQQ X0, K3, X7 // 62f1fd0b79f8 + VCVTPD2UQQ 7(SI)(DI*1), K3, X7 // 62f1fd0b79bc3e07000000 + VCVTPD2UQQ 15(DX)(BX*8), K3, X7 // 62f1fd0b79bcda0f000000 + VCVTPD2UQQ X24, K3, X13 // 6211fd0b79e8 + VCVTPD2UQQ X7, K3, X13 // 6271fd0b79ef + VCVTPD2UQQ X0, K3, X13 // 6271fd0b79e8 + VCVTPD2UQQ 7(SI)(DI*1), K3, X13 // 6271fd0b79ac3e07000000 + VCVTPD2UQQ 15(DX)(BX*8), K3, X13 // 6271fd0b79acda0f000000 + VCVTPD2UQQ X24, K3, X8 // 6211fd0b79c0 + VCVTPD2UQQ X7, K3, X8 // 6271fd0b79c7 + VCVTPD2UQQ X0, K3, X8 // 6271fd0b79c0 + VCVTPD2UQQ 7(SI)(DI*1), K3, X8 // 6271fd0b79843e07000000 + VCVTPD2UQQ 15(DX)(BX*8), K3, X8 // 6271fd0b7984da0f000000 + VCVTPD2UQQ Y27, K3, Y28 // 6201fd2b79e3 + VCVTPD2UQQ Y0, K3, Y28 // 6261fd2b79e0 + VCVTPD2UQQ Y11, K3, Y28 // 6241fd2b79e3 + VCVTPD2UQQ (SI), K3, Y28 // 6261fd2b7926 + VCVTPD2UQQ 7(SI)(DI*2), K3, Y28 // 6261fd2b79a47e07000000 + VCVTPD2UQQ Y27, K3, Y2 // 6291fd2b79d3 + VCVTPD2UQQ Y0, K3, Y2 // 62f1fd2b79d0 + VCVTPD2UQQ Y11, K3, Y2 // 62d1fd2b79d3 + VCVTPD2UQQ (SI), K3, Y2 // 62f1fd2b7916 + VCVTPD2UQQ 7(SI)(DI*2), K3, Y2 // 62f1fd2b79947e07000000 + VCVTPD2UQQ Y27, K3, Y24 // 6201fd2b79c3 + VCVTPD2UQQ Y0, K3, Y24 // 6261fd2b79c0 + VCVTPD2UQQ Y11, K3, Y24 // 6241fd2b79c3 + VCVTPD2UQQ (SI), K3, Y24 // 6261fd2b7906 + VCVTPD2UQQ 7(SI)(DI*2), K3, Y24 // 6261fd2b79847e07000000 + VCVTPD2UQQ Z3, K2, Z27 // 6261fd4a79db + VCVTPD2UQQ Z0, K2, Z27 // 6261fd4a79d8 + VCVTPD2UQQ Z3, K2, Z14 // 6271fd4a79f3 + VCVTPD2UQQ Z0, K2, Z14 // 6271fd4a79f0 + VCVTPD2UQQ Z8, K1, Z14 // 6251fd4979f0 + VCVTPD2UQQ Z24, K1, Z14 // 6211fd4979f0 + VCVTPD2UQQ 15(R8), K1, Z14 // 6251fd4979b00f000000 + VCVTPD2UQQ (BP), K1, Z14 // 6271fd49797500 + VCVTPD2UQQ Z8, K1, Z7 // 62d1fd4979f8 + VCVTPD2UQQ Z24, K1, Z7 // 6291fd4979f8 + VCVTPD2UQQ 15(R8), K1, Z7 // 62d1fd4979b80f000000 + VCVTPD2UQQ (BP), K1, Z7 // 62f1fd49797d00 + VCVTPS2QQ X19, K3, X15 // 62317d0b7bfb + VCVTPS2QQ X13, K3, X15 // 62517d0b7bfd + VCVTPS2QQ X2, K3, X15 // 62717d0b7bfa + VCVTPS2QQ (BX), K3, X15 // 62717d0b7b3b + VCVTPS2QQ -17(BP)(SI*1), K3, X15 // 62717d0b7bbc35efffffff + VCVTPS2QQ X19, K3, X11 // 62317d0b7bdb + VCVTPS2QQ X13, K3, X11 // 62517d0b7bdd + VCVTPS2QQ X2, K3, X11 // 62717d0b7bda + VCVTPS2QQ (BX), K3, X11 // 62717d0b7b1b + VCVTPS2QQ -17(BP)(SI*1), K3, X11 // 62717d0b7b9c35efffffff + VCVTPS2QQ X19, K3, X1 // 62b17d0b7bcb + VCVTPS2QQ X13, K3, X1 // 62d17d0b7bcd + VCVTPS2QQ X2, K3, X1 // 62f17d0b7bca + VCVTPS2QQ (BX), K3, X1 // 62f17d0b7b0b + VCVTPS2QQ -17(BP)(SI*1), K3, X1 // 62f17d0b7b8c35efffffff + VCVTPS2QQ X14, K7, Y20 // 62c17d2f7be6 + VCVTPS2QQ X0, K7, Y20 // 62e17d2f7be0 + VCVTPS2QQ 99(R15)(R15*1), K7, Y20 // 62817d2f7ba43f63000000 + VCVTPS2QQ (DX), K7, Y20 // 62e17d2f7b22 + VCVTPS2QQ X14, K7, Y12 // 62517d2f7be6 + VCVTPS2QQ X0, K7, Y12 // 62717d2f7be0 + VCVTPS2QQ 99(R15)(R15*1), K7, Y12 // 62117d2f7ba43f63000000 + VCVTPS2QQ (DX), K7, Y12 // 62717d2f7b22 + VCVTPS2QQ X14, K7, Y3 // 62d17d2f7bde + VCVTPS2QQ X0, K7, Y3 // 62f17d2f7bd8 + VCVTPS2QQ 99(R15)(R15*1), K7, Y3 // 62917d2f7b9c3f63000000 + VCVTPS2QQ (DX), K7, Y3 // 62f17d2f7b1a + VCVTPS2QQ Y5, K4, Z6 // 62f17d4c7bf5 + VCVTPS2QQ Y28, K4, Z6 // 62917d4c7bf4 + VCVTPS2QQ Y7, K4, Z6 // 62f17d4c7bf7 + VCVTPS2QQ Y5, K4, Z14 // 62717d4c7bf5 + VCVTPS2QQ Y28, K4, Z14 // 62117d4c7bf4 + VCVTPS2QQ Y7, K4, Z14 // 62717d4c7bf7 + VCVTPS2QQ Y0, K4, Z26 // 62617d4c7bd0 + VCVTPS2QQ Y22, K4, Z26 // 62217d4c7bd6 + VCVTPS2QQ Y13, K4, Z26 // 62417d4c7bd5 + VCVTPS2QQ 7(AX)(CX*4), K4, Z26 // 62617d4c7b948807000000 + VCVTPS2QQ 7(AX)(CX*1), K4, Z26 // 62617d4c7b940807000000 + VCVTPS2QQ Y0, K4, Z14 // 62717d4c7bf0 + VCVTPS2QQ Y22, K4, Z14 // 62317d4c7bf6 + VCVTPS2QQ Y13, K4, Z14 // 62517d4c7bf5 + VCVTPS2QQ 7(AX)(CX*4), K4, Z14 // 62717d4c7bb48807000000 + VCVTPS2QQ 7(AX)(CX*1), K4, Z14 // 62717d4c7bb40807000000 + VCVTPS2UQQ X2, K4, X2 // 62f17d0c79d2 + VCVTPS2UQQ X27, K4, X2 // 62917d0c79d3 + VCVTPS2UQQ X26, K4, X2 // 62917d0c79d2 + VCVTPS2UQQ (R8), K4, X2 // 62d17d0c7910 + VCVTPS2UQQ 15(DX)(BX*2), K4, X2 // 62f17d0c79945a0f000000 + VCVTPS2UQQ X2, K4, X24 // 62617d0c79c2 + VCVTPS2UQQ X27, K4, X24 // 62017d0c79c3 + VCVTPS2UQQ X26, K4, X24 // 62017d0c79c2 + VCVTPS2UQQ (R8), K4, X24 // 62417d0c7900 + VCVTPS2UQQ 15(DX)(BX*2), K4, X24 // 62617d0c79845a0f000000 + VCVTPS2UQQ X22, K2, Y31 // 62217d2a79fe + VCVTPS2UQQ X30, K2, Y31 // 62017d2a79fe + VCVTPS2UQQ X3, K2, Y31 // 62617d2a79fb + VCVTPS2UQQ 7(SI)(DI*8), K2, Y31 // 62617d2a79bcfe07000000 + VCVTPS2UQQ -15(R14), K2, Y31 // 62417d2a79bef1ffffff + VCVTPS2UQQ X22, K2, Y8 // 62317d2a79c6 + VCVTPS2UQQ X30, K2, Y8 // 62117d2a79c6 + VCVTPS2UQQ X3, K2, Y8 // 62717d2a79c3 + VCVTPS2UQQ 7(SI)(DI*8), K2, Y8 // 62717d2a7984fe07000000 + VCVTPS2UQQ -15(R14), K2, Y8 // 62517d2a7986f1ffffff + VCVTPS2UQQ X22, K2, Y1 // 62b17d2a79ce + VCVTPS2UQQ X30, K2, Y1 // 62917d2a79ce + VCVTPS2UQQ X3, K2, Y1 // 62f17d2a79cb + VCVTPS2UQQ 7(SI)(DI*8), K2, Y1 // 62f17d2a798cfe07000000 + VCVTPS2UQQ -15(R14), K2, Y1 // 62d17d2a798ef1ffffff + VCVTPS2UQQ Y28, K2, Z21 // 62817d4a79ec + VCVTPS2UQQ Y13, K2, Z21 // 62c17d4a79ed + VCVTPS2UQQ Y7, K2, Z21 // 62e17d4a79ef + VCVTPS2UQQ Y28, K2, Z13 // 62117d4a79ec + VCVTPS2UQQ Y13, K2, Z13 // 62517d4a79ed + VCVTPS2UQQ Y7, K2, Z13 // 62717d4a79ef + VCVTPS2UQQ Y2, K3, Z11 // 62717d4b79da + VCVTPS2UQQ Y21, K3, Z11 // 62317d4b79dd + VCVTPS2UQQ Y12, K3, Z11 // 62517d4b79dc + VCVTPS2UQQ 17(SP)(BP*8), K3, Z11 // 62717d4b799cec11000000 + VCVTPS2UQQ 17(SP)(BP*4), K3, Z11 // 62717d4b799cac11000000 + VCVTPS2UQQ Y2, K3, Z25 // 62617d4b79ca + VCVTPS2UQQ Y21, K3, Z25 // 62217d4b79cd + VCVTPS2UQQ Y12, K3, Z25 // 62417d4b79cc + VCVTPS2UQQ 17(SP)(BP*8), K3, Z25 // 62617d4b798cec11000000 + VCVTPS2UQQ 17(SP)(BP*4), K3, Z25 // 62617d4b798cac11000000 + VCVTQQ2PD X13, K3, X11 // 6251fe0be6dd + VCVTQQ2PD X6, K3, X11 // 6271fe0be6de + VCVTQQ2PD X12, K3, X11 // 6251fe0be6dc + VCVTQQ2PD 17(SP)(BP*1), K3, X11 // 6271fe0be69c2c11000000 + VCVTQQ2PD -7(CX)(DX*8), K3, X11 // 6271fe0be69cd1f9ffffff + VCVTQQ2PD X13, K3, X15 // 6251fe0be6fd + VCVTQQ2PD X6, K3, X15 // 6271fe0be6fe + VCVTQQ2PD X12, K3, X15 // 6251fe0be6fc + VCVTQQ2PD 17(SP)(BP*1), K3, X15 // 6271fe0be6bc2c11000000 + VCVTQQ2PD -7(CX)(DX*8), K3, X15 // 6271fe0be6bcd1f9ffffff + VCVTQQ2PD X13, K3, X30 // 6241fe0be6f5 + VCVTQQ2PD X6, K3, X30 // 6261fe0be6f6 + VCVTQQ2PD X12, K3, X30 // 6241fe0be6f4 + VCVTQQ2PD 17(SP)(BP*1), K3, X30 // 6261fe0be6b42c11000000 + VCVTQQ2PD -7(CX)(DX*8), K3, X30 // 6261fe0be6b4d1f9ffffff + VCVTQQ2PD Y3, K3, Y9 // 6271fe2be6cb + VCVTQQ2PD Y2, K3, Y9 // 6271fe2be6ca + VCVTQQ2PD Y9, K3, Y9 // 6251fe2be6c9 + VCVTQQ2PD 7(SI)(DI*1), K3, Y9 // 6271fe2be68c3e07000000 + VCVTQQ2PD 15(DX)(BX*8), K3, Y9 // 6271fe2be68cda0f000000 + VCVTQQ2PD Y3, K3, Y1 // 62f1fe2be6cb + VCVTQQ2PD Y2, K3, Y1 // 62f1fe2be6ca + VCVTQQ2PD Y9, K3, Y1 // 62d1fe2be6c9 + VCVTQQ2PD 7(SI)(DI*1), K3, Y1 // 62f1fe2be68c3e07000000 + VCVTQQ2PD 15(DX)(BX*8), K3, Y1 // 62f1fe2be68cda0f000000 + VCVTQQ2PD Z27, K2, Z3 // 6291fe4ae6db + VCVTQQ2PD Z15, K2, Z3 // 62d1fe4ae6df + VCVTQQ2PD Z27, K2, Z12 // 6211fe4ae6e3 + VCVTQQ2PD Z15, K2, Z12 // 6251fe4ae6e7 + VCVTQQ2PD Z23, K1, Z23 // 62a1fe49e6ff + VCVTQQ2PD Z6, K1, Z23 // 62e1fe49e6fe + VCVTQQ2PD 7(SI)(DI*4), K1, Z23 // 62e1fe49e6bcbe07000000 + VCVTQQ2PD -7(DI)(R8*2), K1, Z23 // 62a1fe49e6bc47f9ffffff + VCVTQQ2PD Z23, K1, Z5 // 62b1fe49e6ef + VCVTQQ2PD Z6, K1, Z5 // 62f1fe49e6ee + VCVTQQ2PD 7(SI)(DI*4), K1, Z5 // 62f1fe49e6acbe07000000 + VCVTQQ2PD -7(DI)(R8*2), K1, Z5 // 62b1fe49e6ac47f9ffffff + VCVTQQ2PS Z8, K2, Y12 // 6251fc4a5be0 + VCVTQQ2PS Z28, K2, Y12 // 6211fc4a5be4 + VCVTQQ2PS Z8, K2, Y21 // 62c1fc4a5be8 + VCVTQQ2PS Z28, K2, Y21 // 6281fc4a5bec + VCVTQQ2PS Z8, K2, Y14 // 6251fc4a5bf0 + VCVTQQ2PS Z28, K2, Y14 // 6211fc4a5bf4 + VCVTQQ2PS Z21, K1, Y30 // 6221fc495bf5 + VCVTQQ2PS Z5, K1, Y30 // 6261fc495bf5 + VCVTQQ2PS 17(SP), K1, Y30 // 6261fc495bb42411000000 + VCVTQQ2PS -17(BP)(SI*4), K1, Y30 // 6261fc495bb4b5efffffff + VCVTQQ2PS Z21, K1, Y26 // 6221fc495bd5 + VCVTQQ2PS Z5, K1, Y26 // 6261fc495bd5 + VCVTQQ2PS 17(SP), K1, Y26 // 6261fc495b942411000000 + VCVTQQ2PS -17(BP)(SI*4), K1, Y26 // 6261fc495b94b5efffffff + VCVTQQ2PS Z21, K1, Y7 // 62b1fc495bfd + VCVTQQ2PS Z5, K1, Y7 // 62f1fc495bfd + VCVTQQ2PS 17(SP), K1, Y7 // 62f1fc495bbc2411000000 + VCVTQQ2PS -17(BP)(SI*4), K1, Y7 // 62f1fc495bbcb5efffffff + VCVTQQ2PSX X20, K7, X23 // 62a1fc0f5bfc + VCVTQQ2PSX X2, K7, X23 // 62e1fc0f5bfa + VCVTQQ2PSX X9, K7, X23 // 62c1fc0f5bf9 + VCVTQQ2PSX -17(BP)(SI*2), K7, X23 // 62e1fc0f5bbc75efffffff + VCVTQQ2PSX 7(AX)(CX*2), K7, X23 // 62e1fc0f5bbc4807000000 + VCVTQQ2PSX X20, K7, X30 // 6221fc0f5bf4 + VCVTQQ2PSX X2, K7, X30 // 6261fc0f5bf2 + VCVTQQ2PSX X9, K7, X30 // 6241fc0f5bf1 + VCVTQQ2PSX -17(BP)(SI*2), K7, X30 // 6261fc0f5bb475efffffff + VCVTQQ2PSX 7(AX)(CX*2), K7, X30 // 6261fc0f5bb44807000000 + VCVTQQ2PSX X20, K7, X8 // 6231fc0f5bc4 + VCVTQQ2PSX X2, K7, X8 // 6271fc0f5bc2 + VCVTQQ2PSX X9, K7, X8 // 6251fc0f5bc1 + VCVTQQ2PSX -17(BP)(SI*2), K7, X8 // 6271fc0f5b8475efffffff + VCVTQQ2PSX 7(AX)(CX*2), K7, X8 // 6271fc0f5b844807000000 + VCVTQQ2PSY Y16, K1, X26 // 6221fc295bd0 + VCVTQQ2PSY Y1, K1, X26 // 6261fc295bd1 + VCVTQQ2PSY Y30, K1, X26 // 6201fc295bd6 + VCVTQQ2PSY -7(DI)(R8*1), K1, X26 // 6221fc295b9407f9ffffff + VCVTQQ2PSY (SP), K1, X26 // 6261fc295b1424 + VCVTQQ2PSY Y16, K1, X19 // 62a1fc295bd8 + VCVTQQ2PSY Y1, K1, X19 // 62e1fc295bd9 + VCVTQQ2PSY Y30, K1, X19 // 6281fc295bde + VCVTQQ2PSY -7(DI)(R8*1), K1, X19 // 62a1fc295b9c07f9ffffff + VCVTQQ2PSY (SP), K1, X19 // 62e1fc295b1c24 + VCVTQQ2PSY Y16, K1, X0 // 62b1fc295bc0 + VCVTQQ2PSY Y1, K1, X0 // 62f1fc295bc1 + VCVTQQ2PSY Y30, K1, X0 // 6291fc295bc6 + VCVTQQ2PSY -7(DI)(R8*1), K1, X0 // 62b1fc295b8407f9ffffff + VCVTQQ2PSY (SP), K1, X0 // 62f1fc295b0424 + VCVTTPD2QQ X6, K5, X6 // 62f1fd0d7af6 + VCVTTPD2QQ X1, K5, X6 // 62f1fd0d7af1 + VCVTTPD2QQ X8, K5, X6 // 62d1fd0d7af0 + VCVTTPD2QQ (R14), K5, X6 // 62d1fd0d7a36 + VCVTTPD2QQ -7(DI)(R8*8), K5, X6 // 62b1fd0d7ab4c7f9ffffff + VCVTTPD2QQ X6, K5, X17 // 62e1fd0d7ace + VCVTTPD2QQ X1, K5, X17 // 62e1fd0d7ac9 + VCVTTPD2QQ X8, K5, X17 // 62c1fd0d7ac8 + VCVTTPD2QQ (R14), K5, X17 // 62c1fd0d7a0e + VCVTTPD2QQ -7(DI)(R8*8), K5, X17 // 62a1fd0d7a8cc7f9ffffff + VCVTTPD2QQ X6, K5, X28 // 6261fd0d7ae6 + VCVTTPD2QQ X1, K5, X28 // 6261fd0d7ae1 + VCVTTPD2QQ X8, K5, X28 // 6241fd0d7ae0 + VCVTTPD2QQ (R14), K5, X28 // 6241fd0d7a26 + VCVTTPD2QQ -7(DI)(R8*8), K5, X28 // 6221fd0d7aa4c7f9ffffff + VCVTTPD2QQ Y14, K7, Y24 // 6241fd2f7ac6 + VCVTTPD2QQ Y21, K7, Y24 // 6221fd2f7ac5 + VCVTTPD2QQ Y1, K7, Y24 // 6261fd2f7ac1 + VCVTTPD2QQ 99(R15)(R15*8), K7, Y24 // 6201fd2f7a84ff63000000 + VCVTTPD2QQ 7(AX)(CX*8), K7, Y24 // 6261fd2f7a84c807000000 + VCVTTPD2QQ Y14, K7, Y13 // 6251fd2f7aee + VCVTTPD2QQ Y21, K7, Y13 // 6231fd2f7aed + VCVTTPD2QQ Y1, K7, Y13 // 6271fd2f7ae9 + VCVTTPD2QQ 99(R15)(R15*8), K7, Y13 // 6211fd2f7aacff63000000 + VCVTTPD2QQ 7(AX)(CX*8), K7, Y13 // 6271fd2f7aacc807000000 + VCVTTPD2QQ Y14, K7, Y20 // 62c1fd2f7ae6 + VCVTTPD2QQ Y21, K7, Y20 // 62a1fd2f7ae5 + VCVTTPD2QQ Y1, K7, Y20 // 62e1fd2f7ae1 + VCVTTPD2QQ 99(R15)(R15*8), K7, Y20 // 6281fd2f7aa4ff63000000 + VCVTTPD2QQ 7(AX)(CX*8), K7, Y20 // 62e1fd2f7aa4c807000000 + VCVTTPD2QQ Z6, K7, Z22 // 62e1fd4f7af6 + VCVTTPD2QQ Z8, K7, Z22 // 62c1fd4f7af0 + VCVTTPD2QQ Z6, K7, Z11 // 6271fd4f7ade + VCVTTPD2QQ Z8, K7, Z11 // 6251fd4f7ad8 + VCVTTPD2QQ Z12, K6, Z25 // 6241fd4e7acc + VCVTTPD2QQ Z17, K6, Z25 // 6221fd4e7ac9 + VCVTTPD2QQ 99(R15)(R15*1), K6, Z25 // 6201fd4e7a8c3f63000000 + VCVTTPD2QQ (DX), K6, Z25 // 6261fd4e7a0a + VCVTTPD2QQ Z12, K6, Z12 // 6251fd4e7ae4 + VCVTTPD2QQ Z17, K6, Z12 // 6231fd4e7ae1 + VCVTTPD2QQ 99(R15)(R15*1), K6, Z12 // 6211fd4e7aa43f63000000 + VCVTTPD2QQ (DX), K6, Z12 // 6271fd4e7a22 + VCVTTPD2UQQ X15, K7, X16 // 62c1fd0f78c7 + VCVTTPD2UQQ X11, K7, X16 // 62c1fd0f78c3 + VCVTTPD2UQQ X1, K7, X16 // 62e1fd0f78c1 + VCVTTPD2UQQ (CX), K7, X16 // 62e1fd0f7801 + VCVTTPD2UQQ 99(R15), K7, X16 // 62c1fd0f788763000000 + VCVTTPD2UQQ X15, K7, X28 // 6241fd0f78e7 + VCVTTPD2UQQ X11, K7, X28 // 6241fd0f78e3 + VCVTTPD2UQQ X1, K7, X28 // 6261fd0f78e1 + VCVTTPD2UQQ (CX), K7, X28 // 6261fd0f7821 + VCVTTPD2UQQ 99(R15), K7, X28 // 6241fd0f78a763000000 + VCVTTPD2UQQ X15, K7, X8 // 6251fd0f78c7 + VCVTTPD2UQQ X11, K7, X8 // 6251fd0f78c3 + VCVTTPD2UQQ X1, K7, X8 // 6271fd0f78c1 + VCVTTPD2UQQ (CX), K7, X8 // 6271fd0f7801 + VCVTTPD2UQQ 99(R15), K7, X8 // 6251fd0f788763000000 + VCVTTPD2UQQ Y21, K2, Y5 // 62b1fd2a78ed + VCVTTPD2UQQ Y7, K2, Y5 // 62f1fd2a78ef + VCVTTPD2UQQ Y30, K2, Y5 // 6291fd2a78ee + VCVTTPD2UQQ (BX), K2, Y5 // 62f1fd2a782b + VCVTTPD2UQQ -17(BP)(SI*1), K2, Y5 // 62f1fd2a78ac35efffffff + VCVTTPD2UQQ Y21, K2, Y17 // 62a1fd2a78cd + VCVTTPD2UQQ Y7, K2, Y17 // 62e1fd2a78cf + VCVTTPD2UQQ Y30, K2, Y17 // 6281fd2a78ce + VCVTTPD2UQQ (BX), K2, Y17 // 62e1fd2a780b + VCVTTPD2UQQ -17(BP)(SI*1), K2, Y17 // 62e1fd2a788c35efffffff + VCVTTPD2UQQ Y21, K2, Y13 // 6231fd2a78ed + VCVTTPD2UQQ Y7, K2, Y13 // 6271fd2a78ef + VCVTTPD2UQQ Y30, K2, Y13 // 6211fd2a78ee + VCVTTPD2UQQ (BX), K2, Y13 // 6271fd2a782b + VCVTTPD2UQQ -17(BP)(SI*1), K2, Y13 // 6271fd2a78ac35efffffff + VCVTTPD2UQQ Z8, K5, Z3 // 62d1fd4d78d8 + VCVTTPD2UQQ Z2, K5, Z3 // 62f1fd4d78da + VCVTTPD2UQQ Z8, K5, Z21 // 62c1fd4d78e8 + VCVTTPD2UQQ Z2, K5, Z21 // 62e1fd4d78ea + VCVTTPD2UQQ Z7, K3, Z3 // 62f1fd4b78df + VCVTTPD2UQQ Z9, K3, Z3 // 62d1fd4b78d9 + VCVTTPD2UQQ 7(SI)(DI*8), K3, Z3 // 62f1fd4b789cfe07000000 + VCVTTPD2UQQ -15(R14), K3, Z3 // 62d1fd4b789ef1ffffff + VCVTTPD2UQQ Z7, K3, Z27 // 6261fd4b78df + VCVTTPD2UQQ Z9, K3, Z27 // 6241fd4b78d9 + VCVTTPD2UQQ 7(SI)(DI*8), K3, Z27 // 6261fd4b789cfe07000000 + VCVTTPD2UQQ -15(R14), K3, Z27 // 6241fd4b789ef1ffffff + VCVTTPS2QQ X18, K3, X25 // 62217d0b7aca + VCVTTPS2QQ X11, K3, X25 // 62417d0b7acb + VCVTTPS2QQ X9, K3, X25 // 62417d0b7ac9 + VCVTTPS2QQ -7(CX)(DX*1), K3, X25 // 62617d0b7a8c11f9ffffff + VCVTTPS2QQ -15(R14)(R15*4), K3, X25 // 62017d0b7a8cbef1ffffff + VCVTTPS2QQ X18, K3, X11 // 62317d0b7ada + VCVTTPS2QQ X11, K3, X11 // 62517d0b7adb + VCVTTPS2QQ X9, K3, X11 // 62517d0b7ad9 + VCVTTPS2QQ -7(CX)(DX*1), K3, X11 // 62717d0b7a9c11f9ffffff + VCVTTPS2QQ -15(R14)(R15*4), K3, X11 // 62117d0b7a9cbef1ffffff + VCVTTPS2QQ X18, K3, X17 // 62a17d0b7aca + VCVTTPS2QQ X11, K3, X17 // 62c17d0b7acb + VCVTTPS2QQ X9, K3, X17 // 62c17d0b7ac9 + VCVTTPS2QQ -7(CX)(DX*1), K3, X17 // 62e17d0b7a8c11f9ffffff + VCVTTPS2QQ -15(R14)(R15*4), K3, X17 // 62817d0b7a8cbef1ffffff + VCVTTPS2QQ X2, K3, Y5 // 62f17d2b7aea + VCVTTPS2QQ X24, K3, Y5 // 62917d2b7ae8 + VCVTTPS2QQ (R8), K3, Y5 // 62d17d2b7a28 + VCVTTPS2QQ 15(DX)(BX*2), K3, Y5 // 62f17d2b7aac5a0f000000 + VCVTTPS2QQ X2, K3, Y24 // 62617d2b7ac2 + VCVTTPS2QQ X24, K3, Y24 // 62017d2b7ac0 + VCVTTPS2QQ (R8), K3, Y24 // 62417d2b7a00 + VCVTTPS2QQ 15(DX)(BX*2), K3, Y24 // 62617d2b7a845a0f000000 + VCVTTPS2QQ X2, K3, Y21 // 62e17d2b7aea + VCVTTPS2QQ X24, K3, Y21 // 62817d2b7ae8 + VCVTTPS2QQ (R8), K3, Y21 // 62c17d2b7a28 + VCVTTPS2QQ 15(DX)(BX*2), K3, Y21 // 62e17d2b7aac5a0f000000 + VCVTTPS2QQ Y16, K2, Z12 // 62317d4a7ae0 + VCVTTPS2QQ Y9, K2, Z12 // 62517d4a7ae1 + VCVTTPS2QQ Y13, K2, Z12 // 62517d4a7ae5 + VCVTTPS2QQ Y16, K2, Z22 // 62a17d4a7af0 + VCVTTPS2QQ Y9, K2, Z22 // 62c17d4a7af1 + VCVTTPS2QQ Y13, K2, Z22 // 62c17d4a7af5 + VCVTTPS2QQ Y9, K1, Z11 // 62517d497ad9 + VCVTTPS2QQ Y6, K1, Z11 // 62717d497ade + VCVTTPS2QQ Y3, K1, Z11 // 62717d497adb + VCVTTPS2QQ -7(DI)(R8*1), K1, Z11 // 62317d497a9c07f9ffffff + VCVTTPS2QQ (SP), K1, Z11 // 62717d497a1c24 + VCVTTPS2QQ Y9, K1, Z5 // 62d17d497ae9 + VCVTTPS2QQ Y6, K1, Z5 // 62f17d497aee + VCVTTPS2QQ Y3, K1, Z5 // 62f17d497aeb + VCVTTPS2QQ -7(DI)(R8*1), K1, Z5 // 62b17d497aac07f9ffffff + VCVTTPS2QQ (SP), K1, Z5 // 62f17d497a2c24 + VCVTTPS2UQQ X13, K1, X11 // 62517d0978dd + VCVTTPS2UQQ X6, K1, X11 // 62717d0978de + VCVTTPS2UQQ X12, K1, X11 // 62517d0978dc + VCVTTPS2UQQ -17(BP), K1, X11 // 62717d09789defffffff + VCVTTPS2UQQ -15(R14)(R15*8), K1, X11 // 62117d09789cfef1ffffff + VCVTTPS2UQQ X13, K1, X15 // 62517d0978fd + VCVTTPS2UQQ X6, K1, X15 // 62717d0978fe + VCVTTPS2UQQ X12, K1, X15 // 62517d0978fc + VCVTTPS2UQQ -17(BP), K1, X15 // 62717d0978bdefffffff + VCVTTPS2UQQ -15(R14)(R15*8), K1, X15 // 62117d0978bcfef1ffffff + VCVTTPS2UQQ X13, K1, X30 // 62417d0978f5 + VCVTTPS2UQQ X6, K1, X30 // 62617d0978f6 + VCVTTPS2UQQ X12, K1, X30 // 62417d0978f4 + VCVTTPS2UQQ -17(BP), K1, X30 // 62617d0978b5efffffff + VCVTTPS2UQQ -15(R14)(R15*8), K1, X30 // 62017d0978b4fef1ffffff + VCVTTPS2UQQ X23, K1, Y14 // 62317d2978f7 + VCVTTPS2UQQ X30, K1, Y14 // 62117d2978f6 + VCVTTPS2UQQ X8, K1, Y14 // 62517d2978f0 + VCVTTPS2UQQ -17(BP)(SI*2), K1, Y14 // 62717d2978b475efffffff + VCVTTPS2UQQ 7(AX)(CX*2), K1, Y14 // 62717d2978b44807000000 + VCVTTPS2UQQ X23, K1, Y18 // 62a17d2978d7 + VCVTTPS2UQQ X30, K1, Y18 // 62817d2978d6 + VCVTTPS2UQQ X8, K1, Y18 // 62c17d2978d0 + VCVTTPS2UQQ -17(BP)(SI*2), K1, Y18 // 62e17d29789475efffffff + VCVTTPS2UQQ 7(AX)(CX*2), K1, Y18 // 62e17d2978944807000000 + VCVTTPS2UQQ X23, K1, Y31 // 62217d2978ff + VCVTTPS2UQQ X30, K1, Y31 // 62017d2978fe + VCVTTPS2UQQ X8, K1, Y31 // 62417d2978f8 + VCVTTPS2UQQ -17(BP)(SI*2), K1, Y31 // 62617d2978bc75efffffff + VCVTTPS2UQQ 7(AX)(CX*2), K1, Y31 // 62617d2978bc4807000000 + VCVTTPS2UQQ Y18, K7, Z6 // 62b17d4f78f2 + VCVTTPS2UQQ Y3, K7, Z6 // 62f17d4f78f3 + VCVTTPS2UQQ Y24, K7, Z6 // 62917d4f78f0 + VCVTTPS2UQQ Y18, K7, Z22 // 62a17d4f78f2 + VCVTTPS2UQQ Y3, K7, Z22 // 62e17d4f78f3 + VCVTTPS2UQQ Y24, K7, Z22 // 62817d4f78f0 + VCVTTPS2UQQ Y2, K2, Z1 // 62f17d4a78ca + VCVTTPS2UQQ Y7, K2, Z1 // 62f17d4a78cf + VCVTTPS2UQQ Y21, K2, Z1 // 62b17d4a78cd + VCVTTPS2UQQ 99(R15)(R15*8), K2, Z1 // 62917d4a788cff63000000 + VCVTTPS2UQQ 7(AX)(CX*8), K2, Z1 // 62f17d4a788cc807000000 + VCVTTPS2UQQ Y2, K2, Z15 // 62717d4a78fa + VCVTTPS2UQQ Y7, K2, Z15 // 62717d4a78ff + VCVTTPS2UQQ Y21, K2, Z15 // 62317d4a78fd + VCVTTPS2UQQ 99(R15)(R15*8), K2, Z15 // 62117d4a78bcff63000000 + VCVTTPS2UQQ 7(AX)(CX*8), K2, Z15 // 62717d4a78bcc807000000 + VCVTUQQ2PD X13, K6, X21 // 62c1fe0e7aed + VCVTUQQ2PD X0, K6, X21 // 62e1fe0e7ae8 + VCVTUQQ2PD X30, K6, X21 // 6281fe0e7aee + VCVTUQQ2PD 15(R8)(R14*8), K6, X21 // 6281fe0e7aacf00f000000 + VCVTUQQ2PD -15(R14)(R15*2), K6, X21 // 6281fe0e7aac7ef1ffffff + VCVTUQQ2PD X13, K6, X1 // 62d1fe0e7acd + VCVTUQQ2PD X0, K6, X1 // 62f1fe0e7ac8 + VCVTUQQ2PD X30, K6, X1 // 6291fe0e7ace + VCVTUQQ2PD 15(R8)(R14*8), K6, X1 // 6291fe0e7a8cf00f000000 + VCVTUQQ2PD -15(R14)(R15*2), K6, X1 // 6291fe0e7a8c7ef1ffffff + VCVTUQQ2PD X13, K6, X11 // 6251fe0e7add + VCVTUQQ2PD X0, K6, X11 // 6271fe0e7ad8 + VCVTUQQ2PD X30, K6, X11 // 6211fe0e7ade + VCVTUQQ2PD 15(R8)(R14*8), K6, X11 // 6211fe0e7a9cf00f000000 + VCVTUQQ2PD -15(R14)(R15*2), K6, X11 // 6211fe0e7a9c7ef1ffffff + VCVTUQQ2PD Y11, K3, Y28 // 6241fe2b7ae3 + VCVTUQQ2PD Y27, K3, Y28 // 6201fe2b7ae3 + VCVTUQQ2PD Y17, K3, Y28 // 6221fe2b7ae1 + VCVTUQQ2PD 99(R15)(R15*4), K3, Y28 // 6201fe2b7aa4bf63000000 + VCVTUQQ2PD 15(DX), K3, Y28 // 6261fe2b7aa20f000000 + VCVTUQQ2PD Y11, K3, Y1 // 62d1fe2b7acb + VCVTUQQ2PD Y27, K3, Y1 // 6291fe2b7acb + VCVTUQQ2PD Y17, K3, Y1 // 62b1fe2b7ac9 + VCVTUQQ2PD 99(R15)(R15*4), K3, Y1 // 6291fe2b7a8cbf63000000 + VCVTUQQ2PD 15(DX), K3, Y1 // 62f1fe2b7a8a0f000000 + VCVTUQQ2PD Y11, K3, Y8 // 6251fe2b7ac3 + VCVTUQQ2PD Y27, K3, Y8 // 6211fe2b7ac3 + VCVTUQQ2PD Y17, K3, Y8 // 6231fe2b7ac1 + VCVTUQQ2PD 99(R15)(R15*4), K3, Y8 // 6211fe2b7a84bf63000000 + VCVTUQQ2PD 15(DX), K3, Y8 // 6271fe2b7a820f000000 + VCVTUQQ2PD Z12, K7, Z1 // 62d1fe4f7acc + VCVTUQQ2PD Z16, K7, Z1 // 62b1fe4f7ac8 + VCVTUQQ2PD Z12, K7, Z3 // 62d1fe4f7adc + VCVTUQQ2PD Z16, K7, Z3 // 62b1fe4f7ad8 + VCVTUQQ2PD Z14, K4, Z28 // 6241fe4c7ae6 + VCVTUQQ2PD Z28, K4, Z28 // 6201fe4c7ae4 + VCVTUQQ2PD 15(R8)(R14*4), K4, Z28 // 6201fe4c7aa4b00f000000 + VCVTUQQ2PD -7(CX)(DX*4), K4, Z28 // 6261fe4c7aa491f9ffffff + VCVTUQQ2PD Z14, K4, Z13 // 6251fe4c7aee + VCVTUQQ2PD Z28, K4, Z13 // 6211fe4c7aec + VCVTUQQ2PD 15(R8)(R14*4), K4, Z13 // 6211fe4c7aacb00f000000 + VCVTUQQ2PD -7(CX)(DX*4), K4, Z13 // 6271fe4c7aac91f9ffffff + VCVTUQQ2PS Z3, K4, Y16 // 62e1ff4c7ac3 + VCVTUQQ2PS Z12, K4, Y16 // 62c1ff4c7ac4 + VCVTUQQ2PS Z3, K4, Y12 // 6271ff4c7ae3 + VCVTUQQ2PS Z12, K4, Y12 // 6251ff4c7ae4 + VCVTUQQ2PS Z3, K4, Y6 // 62f1ff4c7af3 + VCVTUQQ2PS Z12, K4, Y6 // 62d1ff4c7af4 + VCVTUQQ2PS Z15, K7, Y26 // 6241ff4f7ad7 + VCVTUQQ2PS Z30, K7, Y26 // 6201ff4f7ad6 + VCVTUQQ2PS (R8), K7, Y26 // 6241ff4f7a10 + VCVTUQQ2PS 15(DX)(BX*2), K7, Y26 // 6261ff4f7a945a0f000000 + VCVTUQQ2PS Z15, K7, Y3 // 62d1ff4f7adf + VCVTUQQ2PS Z30, K7, Y3 // 6291ff4f7ade + VCVTUQQ2PS (R8), K7, Y3 // 62d1ff4f7a18 + VCVTUQQ2PS 15(DX)(BX*2), K7, Y3 // 62f1ff4f7a9c5a0f000000 + VCVTUQQ2PS Z15, K7, Y8 // 6251ff4f7ac7 + VCVTUQQ2PS Z30, K7, Y8 // 6211ff4f7ac6 + VCVTUQQ2PS (R8), K7, Y8 // 6251ff4f7a00 + VCVTUQQ2PS 15(DX)(BX*2), K7, Y8 // 6271ff4f7a845a0f000000 + VCVTUQQ2PSX X14, K2, X16 // 62c1ff0a7ac6 + VCVTUQQ2PSX X19, K2, X16 // 62a1ff0a7ac3 + VCVTUQQ2PSX X8, K2, X16 // 62c1ff0a7ac0 + VCVTUQQ2PSX -15(R14)(R15*1), K2, X16 // 6281ff0a7a843ef1ffffff + VCVTUQQ2PSX -15(BX), K2, X16 // 62e1ff0a7a83f1ffffff + VCVTUQQ2PSX X14, K2, X14 // 6251ff0a7af6 + VCVTUQQ2PSX X19, K2, X14 // 6231ff0a7af3 + VCVTUQQ2PSX X8, K2, X14 // 6251ff0a7af0 + VCVTUQQ2PSX -15(R14)(R15*1), K2, X14 // 6211ff0a7ab43ef1ffffff + VCVTUQQ2PSX -15(BX), K2, X14 // 6271ff0a7ab3f1ffffff + VCVTUQQ2PSX X14, K2, X11 // 6251ff0a7ade + VCVTUQQ2PSX X19, K2, X11 // 6231ff0a7adb + VCVTUQQ2PSX X8, K2, X11 // 6251ff0a7ad8 + VCVTUQQ2PSX -15(R14)(R15*1), K2, X11 // 6211ff0a7a9c3ef1ffffff + VCVTUQQ2PSX -15(BX), K2, X11 // 6271ff0a7a9bf1ffffff + VCVTUQQ2PSY Y28, K5, X8 // 6211ff2d7ac4 + VCVTUQQ2PSY Y1, K5, X8 // 6271ff2d7ac1 + VCVTUQQ2PSY Y23, K5, X8 // 6231ff2d7ac7 + VCVTUQQ2PSY (CX), K5, X8 // 6271ff2d7a01 + VCVTUQQ2PSY 99(R15), K5, X8 // 6251ff2d7a8763000000 + VCVTUQQ2PSY Y28, K5, X26 // 6201ff2d7ad4 + VCVTUQQ2PSY Y1, K5, X26 // 6261ff2d7ad1 + VCVTUQQ2PSY Y23, K5, X26 // 6221ff2d7ad7 + VCVTUQQ2PSY (CX), K5, X26 // 6261ff2d7a11 + VCVTUQQ2PSY 99(R15), K5, X26 // 6241ff2d7a9763000000 + VCVTUQQ2PSY Y28, K5, X23 // 6281ff2d7afc + VCVTUQQ2PSY Y1, K5, X23 // 62e1ff2d7af9 + VCVTUQQ2PSY Y23, K5, X23 // 62a1ff2d7aff + VCVTUQQ2PSY (CX), K5, X23 // 62e1ff2d7a39 + VCVTUQQ2PSY 99(R15), K5, X23 // 62c1ff2d7abf63000000 + VEXTRACTF32X8 $0, Z12, K4, Y18 // 62337d4c1be200 + VEXTRACTF32X8 $0, Z13, K4, Y18 // 62337d4c1bea00 + VEXTRACTF32X8 $0, Z12, K4, Y24 // 62137d4c1be000 + VEXTRACTF32X8 $0, Z13, K4, Y24 // 62137d4c1be800 + VEXTRACTF32X8 $0, Z12, K4, Y9 // 62537d4c1be100 + VEXTRACTF32X8 $0, Z13, K4, Y9 // 62537d4c1be900 + VEXTRACTF32X8 $0, Z12, K4, 15(R8) // 62537d4c1ba00f00000000 + VEXTRACTF32X8 $0, Z13, K4, 15(R8) // 62537d4c1ba80f00000000 + VEXTRACTF32X8 $0, Z12, K4, (BP) // 62737d4c1b650000 + VEXTRACTF32X8 $0, Z13, K4, (BP) // 62737d4c1b6d0000 + VEXTRACTF64X2 $1, Y3, K4, X8 // 62d3fd2c19d801 + VEXTRACTF64X2 $1, Y19, K4, X8 // 62c3fd2c19d801 + VEXTRACTF64X2 $1, Y23, K4, X8 // 62c3fd2c19f801 + VEXTRACTF64X2 $1, Y3, K4, X1 // 62f3fd2c19d901 + VEXTRACTF64X2 $1, Y19, K4, X1 // 62e3fd2c19d901 + VEXTRACTF64X2 $1, Y23, K4, X1 // 62e3fd2c19f901 + VEXTRACTF64X2 $1, Y3, K4, X0 // 62f3fd2c19d801 + VEXTRACTF64X2 $1, Y19, K4, X0 // 62e3fd2c19d801 + VEXTRACTF64X2 $1, Y23, K4, X0 // 62e3fd2c19f801 + VEXTRACTF64X2 $1, Y3, K4, -17(BP)(SI*8) // 62f3fd2c199cf5efffffff01 + VEXTRACTF64X2 $1, Y19, K4, -17(BP)(SI*8) // 62e3fd2c199cf5efffffff01 + VEXTRACTF64X2 $1, Y23, K4, -17(BP)(SI*8) // 62e3fd2c19bcf5efffffff01 + VEXTRACTF64X2 $1, Y3, K4, (R15) // 62d3fd2c191f01 + VEXTRACTF64X2 $1, Y19, K4, (R15) // 62c3fd2c191f01 + VEXTRACTF64X2 $1, Y23, K4, (R15) // 62c3fd2c193f01 + VEXTRACTF64X2 $0, Z21, K7, X15 // 62c3fd4f19ef00 + VEXTRACTF64X2 $0, Z9, K7, X15 // 6253fd4f19cf00 + VEXTRACTF64X2 $0, Z21, K7, X0 // 62e3fd4f19e800 + VEXTRACTF64X2 $0, Z9, K7, X0 // 6273fd4f19c800 + VEXTRACTF64X2 $0, Z21, K7, X16 // 62a3fd4f19e800 + VEXTRACTF64X2 $0, Z9, K7, X16 // 6233fd4f19c800 + VEXTRACTF64X2 $0, Z21, K7, 7(SI)(DI*8) // 62e3fd4f19acfe0700000000 + VEXTRACTF64X2 $0, Z9, K7, 7(SI)(DI*8) // 6273fd4f198cfe0700000000 + VEXTRACTF64X2 $0, Z21, K7, -15(R14) // 62c3fd4f19aef1ffffff00 + VEXTRACTF64X2 $0, Z9, K7, -15(R14) // 6253fd4f198ef1ffffff00 + VEXTRACTI32X8 $1, Z23, K4, Y21 // 62a37d4c3bfd01 + VEXTRACTI32X8 $1, Z9, K4, Y21 // 62337d4c3bcd01 + VEXTRACTI32X8 $1, Z23, K4, Y20 // 62a37d4c3bfc01 + VEXTRACTI32X8 $1, Z9, K4, Y20 // 62337d4c3bcc01 + VEXTRACTI32X8 $1, Z23, K4, Y6 // 62e37d4c3bfe01 + VEXTRACTI32X8 $1, Z9, K4, Y6 // 62737d4c3bce01 + VEXTRACTI32X8 $1, Z23, K4, -15(R14)(R15*1) // 62837d4c3bbc3ef1ffffff01 + VEXTRACTI32X8 $1, Z9, K4, -15(R14)(R15*1) // 62137d4c3b8c3ef1ffffff01 + VEXTRACTI32X8 $1, Z23, K4, -15(BX) // 62e37d4c3bbbf1ffffff01 + VEXTRACTI32X8 $1, Z9, K4, -15(BX) // 62737d4c3b8bf1ffffff01 + VEXTRACTI64X2 $0, Y31, K2, X7 // 6263fd2a39ff00 + VEXTRACTI64X2 $0, Y6, K2, X7 // 62f3fd2a39f700 + VEXTRACTI64X2 $0, Y11, K2, X7 // 6273fd2a39df00 + VEXTRACTI64X2 $0, Y31, K2, X16 // 6223fd2a39f800 + VEXTRACTI64X2 $0, Y6, K2, X16 // 62b3fd2a39f000 + VEXTRACTI64X2 $0, Y11, K2, X16 // 6233fd2a39d800 + VEXTRACTI64X2 $0, Y31, K2, X31 // 6203fd2a39ff00 + VEXTRACTI64X2 $0, Y6, K2, X31 // 6293fd2a39f700 + VEXTRACTI64X2 $0, Y11, K2, X31 // 6213fd2a39df00 + VEXTRACTI64X2 $0, Y31, K2, -7(CX) // 6263fd2a39b9f9ffffff00 + VEXTRACTI64X2 $0, Y6, K2, -7(CX) // 62f3fd2a39b1f9ffffff00 + VEXTRACTI64X2 $0, Y11, K2, -7(CX) // 6273fd2a3999f9ffffff00 + VEXTRACTI64X2 $0, Y31, K2, 15(DX)(BX*4) // 6263fd2a39bc9a0f00000000 + VEXTRACTI64X2 $0, Y6, K2, 15(DX)(BX*4) // 62f3fd2a39b49a0f00000000 + VEXTRACTI64X2 $0, Y11, K2, 15(DX)(BX*4) // 6273fd2a399c9a0f00000000 + VEXTRACTI64X2 $2, Z27, K2, X1 // 6263fd4a39d902 + VEXTRACTI64X2 $2, Z14, K2, X1 // 6273fd4a39f102 + VEXTRACTI64X2 $2, Z27, K2, X7 // 6263fd4a39df02 + VEXTRACTI64X2 $2, Z14, K2, X7 // 6273fd4a39f702 + VEXTRACTI64X2 $2, Z27, K2, X9 // 6243fd4a39d902 + VEXTRACTI64X2 $2, Z14, K2, X9 // 6253fd4a39f102 + VEXTRACTI64X2 $2, Z27, K2, 99(R15)(R15*8) // 6203fd4a399cff6300000002 + VEXTRACTI64X2 $2, Z14, K2, 99(R15)(R15*8) // 6213fd4a39b4ff6300000002 + VEXTRACTI64X2 $2, Z27, K2, 7(AX)(CX*8) // 6263fd4a399cc80700000002 + VEXTRACTI64X2 $2, Z14, K2, 7(AX)(CX*8) // 6273fd4a39b4c80700000002 + VFPCLASSPDX $65, X14, K4, K1 // 62d3fd0c66ce41 + VFPCLASSPDX $65, X19, K4, K1 // 62b3fd0c66cb41 + VFPCLASSPDX $65, X8, K4, K1 // 62d3fd0c66c841 + VFPCLASSPDX $65, (R14), K4, K1 // 62d3fd0c660e41 + VFPCLASSPDX $65, -7(DI)(R8*8), K4, K1 // 62b3fd0c668cc7f9ffffff41 + VFPCLASSPDX $65, X14, K4, K3 // 62d3fd0c66de41 + VFPCLASSPDX $65, X19, K4, K3 // 62b3fd0c66db41 + VFPCLASSPDX $65, X8, K4, K3 // 62d3fd0c66d841 + VFPCLASSPDX $65, (R14), K4, K3 // 62d3fd0c661e41 + VFPCLASSPDX $65, -7(DI)(R8*8), K4, K3 // 62b3fd0c669cc7f9ffffff41 + VFPCLASSPDY $67, Y31, K1, K6 // 6293fd2966f743 + VFPCLASSPDY $67, Y5, K1, K6 // 62f3fd2966f543 + VFPCLASSPDY $67, Y0, K1, K6 // 62f3fd2966f043 + VFPCLASSPDY $67, 7(SI)(DI*8), K1, K6 // 62f3fd2966b4fe0700000043 + VFPCLASSPDY $67, -15(R14), K1, K6 // 62d3fd2966b6f1ffffff43 + VFPCLASSPDY $67, Y31, K1, K7 // 6293fd2966ff43 + VFPCLASSPDY $67, Y5, K1, K7 // 62f3fd2966fd43 + VFPCLASSPDY $67, Y0, K1, K7 // 62f3fd2966f843 + VFPCLASSPDY $67, 7(SI)(DI*8), K1, K7 // 62f3fd2966bcfe0700000043 + VFPCLASSPDY $67, -15(R14), K1, K7 // 62d3fd2966bef1ffffff43 + VFPCLASSPDZ $127, Z3, K3, K6 // 62f3fd4b66f37f + VFPCLASSPDZ $127, Z27, K3, K6 // 6293fd4b66f37f + VFPCLASSPDZ $127, 7(AX)(CX*4), K3, K6 // 62f3fd4b66b488070000007f + VFPCLASSPDZ $127, 7(AX)(CX*1), K3, K6 // 62f3fd4b66b408070000007f + VFPCLASSPDZ $127, Z3, K3, K4 // 62f3fd4b66e37f + VFPCLASSPDZ $127, Z27, K3, K4 // 6293fd4b66e37f + VFPCLASSPDZ $127, 7(AX)(CX*4), K3, K4 // 62f3fd4b66a488070000007f + VFPCLASSPDZ $127, 7(AX)(CX*1), K3, K4 // 62f3fd4b66a408070000007f + VFPCLASSPSX $0, X8, K4, K4 // 62d37d0c66e000 + VFPCLASSPSX $0, X26, K4, K4 // 62937d0c66e200 + VFPCLASSPSX $0, X23, K4, K4 // 62b37d0c66e700 + VFPCLASSPSX $0, 99(R15)(R15*4), K4, K4 // 62937d0c66a4bf6300000000 + VFPCLASSPSX $0, 15(DX), K4, K4 // 62f37d0c66a20f00000000 + VFPCLASSPSX $0, X8, K4, K6 // 62d37d0c66f000 + VFPCLASSPSX $0, X26, K4, K6 // 62937d0c66f200 + VFPCLASSPSX $0, X23, K4, K6 // 62b37d0c66f700 + VFPCLASSPSX $0, 99(R15)(R15*4), K4, K6 // 62937d0c66b4bf6300000000 + VFPCLASSPSX $0, 15(DX), K4, K6 // 62f37d0c66b20f00000000 + VFPCLASSPSY $97, Y5, K5, K4 // 62f37d2d66e561 + VFPCLASSPSY $97, Y19, K5, K4 // 62b37d2d66e361 + VFPCLASSPSY $97, Y31, K5, K4 // 62937d2d66e761 + VFPCLASSPSY $97, 7(SI)(DI*1), K5, K4 // 62f37d2d66a43e0700000061 + VFPCLASSPSY $97, 15(DX)(BX*8), K5, K4 // 62f37d2d66a4da0f00000061 + VFPCLASSPSY $97, Y5, K5, K5 // 62f37d2d66ed61 + VFPCLASSPSY $97, Y19, K5, K5 // 62b37d2d66eb61 + VFPCLASSPSY $97, Y31, K5, K5 // 62937d2d66ef61 + VFPCLASSPSY $97, 7(SI)(DI*1), K5, K5 // 62f37d2d66ac3e0700000061 + VFPCLASSPSY $97, 15(DX)(BX*8), K5, K5 // 62f37d2d66acda0f00000061 + VFPCLASSPSZ $81, Z7, K7, K2 // 62f37d4f66d751 + VFPCLASSPSZ $81, Z9, K7, K2 // 62d37d4f66d151 + VFPCLASSPSZ $81, (SI), K7, K2 // 62f37d4f661651 + VFPCLASSPSZ $81, 7(SI)(DI*2), K7, K2 // 62f37d4f66947e0700000051 + VFPCLASSPSZ $81, Z7, K7, K7 // 62f37d4f66ff51 + VFPCLASSPSZ $81, Z9, K7, K7 // 62d37d4f66f951 + VFPCLASSPSZ $81, (SI), K7, K7 // 62f37d4f663e51 + VFPCLASSPSZ $81, 7(SI)(DI*2), K7, K7 // 62f37d4f66bc7e0700000051 + VFPCLASSSD $42, X12, K7, K0 // 62d3fd0f67c42a or 62d3fd2f67c42a or 62d3fd4f67c42a + VFPCLASSSD $42, X16, K7, K0 // 62b3fd0f67c02a or 62b3fd2f67c02a or 62b3fd4f67c02a + VFPCLASSSD $42, X23, K7, K0 // 62b3fd0f67c72a or 62b3fd2f67c72a or 62b3fd4f67c72a + VFPCLASSSD $42, (BX), K7, K0 // 62f3fd0f67032a or 62f3fd2f67032a or 62f3fd4f67032a + VFPCLASSSD $42, -17(BP)(SI*1), K7, K0 // 62f3fd0f678435efffffff2a or 62f3fd2f678435efffffff2a or 62f3fd4f678435efffffff2a + VFPCLASSSD $42, X12, K7, K5 // 62d3fd0f67ec2a or 62d3fd2f67ec2a or 62d3fd4f67ec2a + VFPCLASSSD $42, X16, K7, K5 // 62b3fd0f67e82a or 62b3fd2f67e82a or 62b3fd4f67e82a + VFPCLASSSD $42, X23, K7, K5 // 62b3fd0f67ef2a or 62b3fd2f67ef2a or 62b3fd4f67ef2a + VFPCLASSSD $42, (BX), K7, K5 // 62f3fd0f672b2a or 62f3fd2f672b2a or 62f3fd4f672b2a + VFPCLASSSD $42, -17(BP)(SI*1), K7, K5 // 62f3fd0f67ac35efffffff2a or 62f3fd2f67ac35efffffff2a or 62f3fd4f67ac35efffffff2a + VFPCLASSSS $79, X23, K6, K6 // 62b37d0e67f74f or 62b37d2e67f74f or 62b37d4e67f74f + VFPCLASSSS $79, X11, K6, K6 // 62d37d0e67f34f or 62d37d2e67f34f or 62d37d4e67f34f + VFPCLASSSS $79, X31, K6, K6 // 62937d0e67f74f or 62937d2e67f74f or 62937d4e67f74f + VFPCLASSSS $79, 7(SI)(DI*1), K6, K6 // 62f37d0e67b43e070000004f or 62f37d2e67b43e070000004f or 62f37d4e67b43e070000004f + VFPCLASSSS $79, 15(DX)(BX*8), K6, K6 // 62f37d0e67b4da0f0000004f or 62f37d2e67b4da0f0000004f or 62f37d4e67b4da0f0000004f + VFPCLASSSS $79, X23, K6, K5 // 62b37d0e67ef4f or 62b37d2e67ef4f or 62b37d4e67ef4f + VFPCLASSSS $79, X11, K6, K5 // 62d37d0e67eb4f or 62d37d2e67eb4f or 62d37d4e67eb4f + VFPCLASSSS $79, X31, K6, K5 // 62937d0e67ef4f or 62937d2e67ef4f or 62937d4e67ef4f + VFPCLASSSS $79, 7(SI)(DI*1), K6, K5 // 62f37d0e67ac3e070000004f or 62f37d2e67ac3e070000004f or 62f37d4e67ac3e070000004f + VFPCLASSSS $79, 15(DX)(BX*8), K6, K5 // 62f37d0e67acda0f0000004f or 62f37d2e67acda0f0000004f or 62f37d4e67acda0f0000004f + VINSERTF32X8 $1, Y12, Z0, K2, Z23 // 62c37d4a1afc01 + VINSERTF32X8 $1, Y21, Z0, K2, Z23 // 62a37d4a1afd01 + VINSERTF32X8 $1, Y14, Z0, K2, Z23 // 62c37d4a1afe01 + VINSERTF32X8 $1, 17(SP)(BP*1), Z0, K2, Z23 // 62e37d4a1abc2c1100000001 + VINSERTF32X8 $1, -7(CX)(DX*8), Z0, K2, Z23 // 62e37d4a1abcd1f9ffffff01 + VINSERTF32X8 $1, Y12, Z11, K2, Z23 // 62c3254a1afc01 + VINSERTF32X8 $1, Y21, Z11, K2, Z23 // 62a3254a1afd01 + VINSERTF32X8 $1, Y14, Z11, K2, Z23 // 62c3254a1afe01 + VINSERTF32X8 $1, 17(SP)(BP*1), Z11, K2, Z23 // 62e3254a1abc2c1100000001 + VINSERTF32X8 $1, -7(CX)(DX*8), Z11, K2, Z23 // 62e3254a1abcd1f9ffffff01 + VINSERTF32X8 $1, Y12, Z0, K2, Z19 // 62c37d4a1adc01 + VINSERTF32X8 $1, Y21, Z0, K2, Z19 // 62a37d4a1add01 + VINSERTF32X8 $1, Y14, Z0, K2, Z19 // 62c37d4a1ade01 + VINSERTF32X8 $1, 17(SP)(BP*1), Z0, K2, Z19 // 62e37d4a1a9c2c1100000001 + VINSERTF32X8 $1, -7(CX)(DX*8), Z0, K2, Z19 // 62e37d4a1a9cd1f9ffffff01 + VINSERTF32X8 $1, Y12, Z11, K2, Z19 // 62c3254a1adc01 + VINSERTF32X8 $1, Y21, Z11, K2, Z19 // 62a3254a1add01 + VINSERTF32X8 $1, Y14, Z11, K2, Z19 // 62c3254a1ade01 + VINSERTF32X8 $1, 17(SP)(BP*1), Z11, K2, Z19 // 62e3254a1a9c2c1100000001 + VINSERTF32X8 $1, -7(CX)(DX*8), Z11, K2, Z19 // 62e3254a1a9cd1f9ffffff01 + VINSERTF64X2 $0, X3, Y16, K4, Y30 // 6263fd2418f300 + VINSERTF64X2 $0, X26, Y16, K4, Y30 // 6203fd2418f200 + VINSERTF64X2 $0, X23, Y16, K4, Y30 // 6223fd2418f700 + VINSERTF64X2 $0, 7(AX)(CX*4), Y16, K4, Y30 // 6263fd2418b4880700000000 + VINSERTF64X2 $0, 7(AX)(CX*1), Y16, K4, Y30 // 6263fd2418b4080700000000 + VINSERTF64X2 $0, X3, Y1, K4, Y30 // 6263f52c18f300 + VINSERTF64X2 $0, X26, Y1, K4, Y30 // 6203f52c18f200 + VINSERTF64X2 $0, X23, Y1, K4, Y30 // 6223f52c18f700 + VINSERTF64X2 $0, 7(AX)(CX*4), Y1, K4, Y30 // 6263f52c18b4880700000000 + VINSERTF64X2 $0, 7(AX)(CX*1), Y1, K4, Y30 // 6263f52c18b4080700000000 + VINSERTF64X2 $0, X3, Y30, K4, Y30 // 62638d2418f300 + VINSERTF64X2 $0, X26, Y30, K4, Y30 // 62038d2418f200 + VINSERTF64X2 $0, X23, Y30, K4, Y30 // 62238d2418f700 + VINSERTF64X2 $0, 7(AX)(CX*4), Y30, K4, Y30 // 62638d2418b4880700000000 + VINSERTF64X2 $0, 7(AX)(CX*1), Y30, K4, Y30 // 62638d2418b4080700000000 + VINSERTF64X2 $0, X3, Y16, K4, Y26 // 6263fd2418d300 + VINSERTF64X2 $0, X26, Y16, K4, Y26 // 6203fd2418d200 + VINSERTF64X2 $0, X23, Y16, K4, Y26 // 6223fd2418d700 + VINSERTF64X2 $0, 7(AX)(CX*4), Y16, K4, Y26 // 6263fd241894880700000000 + VINSERTF64X2 $0, 7(AX)(CX*1), Y16, K4, Y26 // 6263fd241894080700000000 + VINSERTF64X2 $0, X3, Y1, K4, Y26 // 6263f52c18d300 + VINSERTF64X2 $0, X26, Y1, K4, Y26 // 6203f52c18d200 + VINSERTF64X2 $0, X23, Y1, K4, Y26 // 6223f52c18d700 + VINSERTF64X2 $0, 7(AX)(CX*4), Y1, K4, Y26 // 6263f52c1894880700000000 + VINSERTF64X2 $0, 7(AX)(CX*1), Y1, K4, Y26 // 6263f52c1894080700000000 + VINSERTF64X2 $0, X3, Y30, K4, Y26 // 62638d2418d300 + VINSERTF64X2 $0, X26, Y30, K4, Y26 // 62038d2418d200 + VINSERTF64X2 $0, X23, Y30, K4, Y26 // 62238d2418d700 + VINSERTF64X2 $0, 7(AX)(CX*4), Y30, K4, Y26 // 62638d241894880700000000 + VINSERTF64X2 $0, 7(AX)(CX*1), Y30, K4, Y26 // 62638d241894080700000000 + VINSERTF64X2 $0, X3, Y16, K4, Y7 // 62f3fd2418fb00 + VINSERTF64X2 $0, X26, Y16, K4, Y7 // 6293fd2418fa00 + VINSERTF64X2 $0, X23, Y16, K4, Y7 // 62b3fd2418ff00 + VINSERTF64X2 $0, 7(AX)(CX*4), Y16, K4, Y7 // 62f3fd2418bc880700000000 + VINSERTF64X2 $0, 7(AX)(CX*1), Y16, K4, Y7 // 62f3fd2418bc080700000000 + VINSERTF64X2 $0, X3, Y1, K4, Y7 // 62f3f52c18fb00 + VINSERTF64X2 $0, X26, Y1, K4, Y7 // 6293f52c18fa00 + VINSERTF64X2 $0, X23, Y1, K4, Y7 // 62b3f52c18ff00 + VINSERTF64X2 $0, 7(AX)(CX*4), Y1, K4, Y7 // 62f3f52c18bc880700000000 + VINSERTF64X2 $0, 7(AX)(CX*1), Y1, K4, Y7 // 62f3f52c18bc080700000000 + VINSERTF64X2 $0, X3, Y30, K4, Y7 // 62f38d2418fb00 + VINSERTF64X2 $0, X26, Y30, K4, Y7 // 62938d2418fa00 + VINSERTF64X2 $0, X23, Y30, K4, Y7 // 62b38d2418ff00 + VINSERTF64X2 $0, 7(AX)(CX*4), Y30, K4, Y7 // 62f38d2418bc880700000000 + VINSERTF64X2 $0, 7(AX)(CX*1), Y30, K4, Y7 // 62f38d2418bc080700000000 + VINSERTF64X2 $1, X13, Z24, K1, Z0 // 62d3bd4118c501 + VINSERTF64X2 $1, X28, Z24, K1, Z0 // 6293bd4118c401 + VINSERTF64X2 $1, X24, Z24, K1, Z0 // 6293bd4118c001 + VINSERTF64X2 $1, (SI), Z24, K1, Z0 // 62f3bd41180601 + VINSERTF64X2 $1, 7(SI)(DI*2), Z24, K1, Z0 // 62f3bd4118847e0700000001 + VINSERTF64X2 $1, X13, Z12, K1, Z0 // 62d39d4918c501 + VINSERTF64X2 $1, X28, Z12, K1, Z0 // 62939d4918c401 + VINSERTF64X2 $1, X24, Z12, K1, Z0 // 62939d4918c001 + VINSERTF64X2 $1, (SI), Z12, K1, Z0 // 62f39d49180601 + VINSERTF64X2 $1, 7(SI)(DI*2), Z12, K1, Z0 // 62f39d4918847e0700000001 + VINSERTF64X2 $1, X13, Z24, K1, Z25 // 6243bd4118cd01 + VINSERTF64X2 $1, X28, Z24, K1, Z25 // 6203bd4118cc01 + VINSERTF64X2 $1, X24, Z24, K1, Z25 // 6203bd4118c801 + VINSERTF64X2 $1, (SI), Z24, K1, Z25 // 6263bd41180e01 + VINSERTF64X2 $1, 7(SI)(DI*2), Z24, K1, Z25 // 6263bd41188c7e0700000001 + VINSERTF64X2 $1, X13, Z12, K1, Z25 // 62439d4918cd01 + VINSERTF64X2 $1, X28, Z12, K1, Z25 // 62039d4918cc01 + VINSERTF64X2 $1, X24, Z12, K1, Z25 // 62039d4918c801 + VINSERTF64X2 $1, (SI), Z12, K1, Z25 // 62639d49180e01 + VINSERTF64X2 $1, 7(SI)(DI*2), Z12, K1, Z25 // 62639d49188c7e0700000001 + VINSERTI32X8 $1, Y24, Z17, K7, Z20 // 628375473ae001 + VINSERTI32X8 $1, Y13, Z17, K7, Z20 // 62c375473ae501 + VINSERTI32X8 $1, Y20, Z17, K7, Z20 // 62a375473ae401 + VINSERTI32X8 $1, 15(R8)(R14*1), Z17, K7, Z20 // 628375473aa4300f00000001 + VINSERTI32X8 $1, 15(R8)(R14*2), Z17, K7, Z20 // 628375473aa4700f00000001 + VINSERTI32X8 $1, Y24, Z0, K7, Z20 // 62837d4f3ae001 + VINSERTI32X8 $1, Y13, Z0, K7, Z20 // 62c37d4f3ae501 + VINSERTI32X8 $1, Y20, Z0, K7, Z20 // 62a37d4f3ae401 + VINSERTI32X8 $1, 15(R8)(R14*1), Z0, K7, Z20 // 62837d4f3aa4300f00000001 + VINSERTI32X8 $1, 15(R8)(R14*2), Z0, K7, Z20 // 62837d4f3aa4700f00000001 + VINSERTI32X8 $1, Y24, Z17, K7, Z0 // 629375473ac001 + VINSERTI32X8 $1, Y13, Z17, K7, Z0 // 62d375473ac501 + VINSERTI32X8 $1, Y20, Z17, K7, Z0 // 62b375473ac401 + VINSERTI32X8 $1, 15(R8)(R14*1), Z17, K7, Z0 // 629375473a84300f00000001 + VINSERTI32X8 $1, 15(R8)(R14*2), Z17, K7, Z0 // 629375473a84700f00000001 + VINSERTI32X8 $1, Y24, Z0, K7, Z0 // 62937d4f3ac001 + VINSERTI32X8 $1, Y13, Z0, K7, Z0 // 62d37d4f3ac501 + VINSERTI32X8 $1, Y20, Z0, K7, Z0 // 62b37d4f3ac401 + VINSERTI32X8 $1, 15(R8)(R14*1), Z0, K7, Z0 // 62937d4f3a84300f00000001 + VINSERTI32X8 $1, 15(R8)(R14*2), Z0, K7, Z0 // 62937d4f3a84700f00000001 + VINSERTI64X2 $0, X11, Y26, K7, Y14 // 6253ad2738f300 + VINSERTI64X2 $0, X31, Y26, K7, Y14 // 6213ad2738f700 + VINSERTI64X2 $0, X3, Y26, K7, Y14 // 6273ad2738f300 + VINSERTI64X2 $0, 17(SP), Y26, K7, Y14 // 6273ad2738b4241100000000 + VINSERTI64X2 $0, -17(BP)(SI*4), Y26, K7, Y14 // 6273ad2738b4b5efffffff00 + VINSERTI64X2 $0, X11, Y30, K7, Y14 // 62538d2738f300 + VINSERTI64X2 $0, X31, Y30, K7, Y14 // 62138d2738f700 + VINSERTI64X2 $0, X3, Y30, K7, Y14 // 62738d2738f300 + VINSERTI64X2 $0, 17(SP), Y30, K7, Y14 // 62738d2738b4241100000000 + VINSERTI64X2 $0, -17(BP)(SI*4), Y30, K7, Y14 // 62738d2738b4b5efffffff00 + VINSERTI64X2 $0, X11, Y12, K7, Y14 // 62539d2f38f300 + VINSERTI64X2 $0, X31, Y12, K7, Y14 // 62139d2f38f700 + VINSERTI64X2 $0, X3, Y12, K7, Y14 // 62739d2f38f300 + VINSERTI64X2 $0, 17(SP), Y12, K7, Y14 // 62739d2f38b4241100000000 + VINSERTI64X2 $0, -17(BP)(SI*4), Y12, K7, Y14 // 62739d2f38b4b5efffffff00 + VINSERTI64X2 $0, X11, Y26, K7, Y21 // 62c3ad2738eb00 + VINSERTI64X2 $0, X31, Y26, K7, Y21 // 6283ad2738ef00 + VINSERTI64X2 $0, X3, Y26, K7, Y21 // 62e3ad2738eb00 + VINSERTI64X2 $0, 17(SP), Y26, K7, Y21 // 62e3ad2738ac241100000000 + VINSERTI64X2 $0, -17(BP)(SI*4), Y26, K7, Y21 // 62e3ad2738acb5efffffff00 + VINSERTI64X2 $0, X11, Y30, K7, Y21 // 62c38d2738eb00 + VINSERTI64X2 $0, X31, Y30, K7, Y21 // 62838d2738ef00 + VINSERTI64X2 $0, X3, Y30, K7, Y21 // 62e38d2738eb00 + VINSERTI64X2 $0, 17(SP), Y30, K7, Y21 // 62e38d2738ac241100000000 + VINSERTI64X2 $0, -17(BP)(SI*4), Y30, K7, Y21 // 62e38d2738acb5efffffff00 + VINSERTI64X2 $0, X11, Y12, K7, Y21 // 62c39d2f38eb00 + VINSERTI64X2 $0, X31, Y12, K7, Y21 // 62839d2f38ef00 + VINSERTI64X2 $0, X3, Y12, K7, Y21 // 62e39d2f38eb00 + VINSERTI64X2 $0, 17(SP), Y12, K7, Y21 // 62e39d2f38ac241100000000 + VINSERTI64X2 $0, -17(BP)(SI*4), Y12, K7, Y21 // 62e39d2f38acb5efffffff00 + VINSERTI64X2 $0, X11, Y26, K7, Y1 // 62d3ad2738cb00 + VINSERTI64X2 $0, X31, Y26, K7, Y1 // 6293ad2738cf00 + VINSERTI64X2 $0, X3, Y26, K7, Y1 // 62f3ad2738cb00 + VINSERTI64X2 $0, 17(SP), Y26, K7, Y1 // 62f3ad27388c241100000000 + VINSERTI64X2 $0, -17(BP)(SI*4), Y26, K7, Y1 // 62f3ad27388cb5efffffff00 + VINSERTI64X2 $0, X11, Y30, K7, Y1 // 62d38d2738cb00 + VINSERTI64X2 $0, X31, Y30, K7, Y1 // 62938d2738cf00 + VINSERTI64X2 $0, X3, Y30, K7, Y1 // 62f38d2738cb00 + VINSERTI64X2 $0, 17(SP), Y30, K7, Y1 // 62f38d27388c241100000000 + VINSERTI64X2 $0, -17(BP)(SI*4), Y30, K7, Y1 // 62f38d27388cb5efffffff00 + VINSERTI64X2 $0, X11, Y12, K7, Y1 // 62d39d2f38cb00 + VINSERTI64X2 $0, X31, Y12, K7, Y1 // 62939d2f38cf00 + VINSERTI64X2 $0, X3, Y12, K7, Y1 // 62f39d2f38cb00 + VINSERTI64X2 $0, 17(SP), Y12, K7, Y1 // 62f39d2f388c241100000000 + VINSERTI64X2 $0, -17(BP)(SI*4), Y12, K7, Y1 // 62f39d2f388cb5efffffff00 + VINSERTI64X2 $3, X7, Z31, K6, Z17 // 62e3854638cf03 + VINSERTI64X2 $3, X0, Z31, K6, Z17 // 62e3854638c803 + VINSERTI64X2 $3, 7(AX), Z31, K6, Z17 // 62e3854638880700000003 + VINSERTI64X2 $3, (DI), Z31, K6, Z17 // 62e38546380f03 + VINSERTI64X2 $3, X7, Z0, K6, Z17 // 62e3fd4e38cf03 + VINSERTI64X2 $3, X0, Z0, K6, Z17 // 62e3fd4e38c803 + VINSERTI64X2 $3, 7(AX), Z0, K6, Z17 // 62e3fd4e38880700000003 + VINSERTI64X2 $3, (DI), Z0, K6, Z17 // 62e3fd4e380f03 + VINSERTI64X2 $3, X7, Z31, K6, Z23 // 62e3854638ff03 + VINSERTI64X2 $3, X0, Z31, K6, Z23 // 62e3854638f803 + VINSERTI64X2 $3, 7(AX), Z31, K6, Z23 // 62e3854638b80700000003 + VINSERTI64X2 $3, (DI), Z31, K6, Z23 // 62e38546383f03 + VINSERTI64X2 $3, X7, Z0, K6, Z23 // 62e3fd4e38ff03 + VINSERTI64X2 $3, X0, Z0, K6, Z23 // 62e3fd4e38f803 + VINSERTI64X2 $3, 7(AX), Z0, K6, Z23 // 62e3fd4e38b80700000003 + VINSERTI64X2 $3, (DI), Z0, K6, Z23 // 62e3fd4e383f03 + VORPD X11, X24, K7, X23 // 62c1bd0756fb + VORPD X23, X24, K7, X23 // 62a1bd0756ff + VORPD X2, X24, K7, X23 // 62e1bd0756fa + VORPD -17(BP)(SI*8), X24, K7, X23 // 62e1bd0756bcf5efffffff + VORPD (R15), X24, K7, X23 // 62c1bd07563f + VORPD X11, X14, K7, X23 // 62c18d0f56fb + VORPD X23, X14, K7, X23 // 62a18d0f56ff + VORPD X2, X14, K7, X23 // 62e18d0f56fa + VORPD -17(BP)(SI*8), X14, K7, X23 // 62e18d0f56bcf5efffffff + VORPD (R15), X14, K7, X23 // 62c18d0f563f + VORPD X11, X0, K7, X23 // 62c1fd0f56fb + VORPD X23, X0, K7, X23 // 62a1fd0f56ff + VORPD X2, X0, K7, X23 // 62e1fd0f56fa + VORPD -17(BP)(SI*8), X0, K7, X23 // 62e1fd0f56bcf5efffffff + VORPD (R15), X0, K7, X23 // 62c1fd0f563f + VORPD X11, X24, K7, X11 // 6251bd0756db + VORPD X23, X24, K7, X11 // 6231bd0756df + VORPD X2, X24, K7, X11 // 6271bd0756da + VORPD -17(BP)(SI*8), X24, K7, X11 // 6271bd07569cf5efffffff + VORPD (R15), X24, K7, X11 // 6251bd07561f + VORPD X11, X14, K7, X11 // 62518d0f56db + VORPD X23, X14, K7, X11 // 62318d0f56df + VORPD X2, X14, K7, X11 // 62718d0f56da + VORPD -17(BP)(SI*8), X14, K7, X11 // 62718d0f569cf5efffffff + VORPD (R15), X14, K7, X11 // 62518d0f561f + VORPD X11, X0, K7, X11 // 6251fd0f56db + VORPD X23, X0, K7, X11 // 6231fd0f56df + VORPD X2, X0, K7, X11 // 6271fd0f56da + VORPD -17(BP)(SI*8), X0, K7, X11 // 6271fd0f569cf5efffffff + VORPD (R15), X0, K7, X11 // 6251fd0f561f + VORPD X11, X24, K7, X31 // 6241bd0756fb + VORPD X23, X24, K7, X31 // 6221bd0756ff + VORPD X2, X24, K7, X31 // 6261bd0756fa + VORPD -17(BP)(SI*8), X24, K7, X31 // 6261bd0756bcf5efffffff + VORPD (R15), X24, K7, X31 // 6241bd07563f + VORPD X11, X14, K7, X31 // 62418d0f56fb + VORPD X23, X14, K7, X31 // 62218d0f56ff + VORPD X2, X14, K7, X31 // 62618d0f56fa + VORPD -17(BP)(SI*8), X14, K7, X31 // 62618d0f56bcf5efffffff + VORPD (R15), X14, K7, X31 // 62418d0f563f + VORPD X11, X0, K7, X31 // 6241fd0f56fb + VORPD X23, X0, K7, X31 // 6221fd0f56ff + VORPD X2, X0, K7, X31 // 6261fd0f56fa + VORPD -17(BP)(SI*8), X0, K7, X31 // 6261fd0f56bcf5efffffff + VORPD (R15), X0, K7, X31 // 6241fd0f563f + VORPD Y16, Y5, K1, Y8 // 6231d52956c0 + VORPD Y9, Y5, K1, Y8 // 6251d52956c1 + VORPD Y13, Y5, K1, Y8 // 6251d52956c5 + VORPD 99(R15)(R15*2), Y5, K1, Y8 // 6211d52956847f63000000 + VORPD -7(DI), Y5, K1, Y8 // 6271d5295687f9ffffff + VORPD Y16, Y24, K1, Y8 // 6231bd2156c0 + VORPD Y9, Y24, K1, Y8 // 6251bd2156c1 + VORPD Y13, Y24, K1, Y8 // 6251bd2156c5 + VORPD 99(R15)(R15*2), Y24, K1, Y8 // 6211bd2156847f63000000 + VORPD -7(DI), Y24, K1, Y8 // 6271bd215687f9ffffff + VORPD Y16, Y21, K1, Y8 // 6231d52156c0 + VORPD Y9, Y21, K1, Y8 // 6251d52156c1 + VORPD Y13, Y21, K1, Y8 // 6251d52156c5 + VORPD 99(R15)(R15*2), Y21, K1, Y8 // 6211d52156847f63000000 + VORPD -7(DI), Y21, K1, Y8 // 6271d5215687f9ffffff + VORPD Y16, Y5, K1, Y11 // 6231d52956d8 + VORPD Y9, Y5, K1, Y11 // 6251d52956d9 + VORPD Y13, Y5, K1, Y11 // 6251d52956dd + VORPD 99(R15)(R15*2), Y5, K1, Y11 // 6211d529569c7f63000000 + VORPD -7(DI), Y5, K1, Y11 // 6271d529569ff9ffffff + VORPD Y16, Y24, K1, Y11 // 6231bd2156d8 + VORPD Y9, Y24, K1, Y11 // 6251bd2156d9 + VORPD Y13, Y24, K1, Y11 // 6251bd2156dd + VORPD 99(R15)(R15*2), Y24, K1, Y11 // 6211bd21569c7f63000000 + VORPD -7(DI), Y24, K1, Y11 // 6271bd21569ff9ffffff + VORPD Y16, Y21, K1, Y11 // 6231d52156d8 + VORPD Y9, Y21, K1, Y11 // 6251d52156d9 + VORPD Y13, Y21, K1, Y11 // 6251d52156dd + VORPD 99(R15)(R15*2), Y21, K1, Y11 // 6211d521569c7f63000000 + VORPD -7(DI), Y21, K1, Y11 // 6271d521569ff9ffffff + VORPD Y16, Y5, K1, Y24 // 6221d52956c0 + VORPD Y9, Y5, K1, Y24 // 6241d52956c1 + VORPD Y13, Y5, K1, Y24 // 6241d52956c5 + VORPD 99(R15)(R15*2), Y5, K1, Y24 // 6201d52956847f63000000 + VORPD -7(DI), Y5, K1, Y24 // 6261d5295687f9ffffff + VORPD Y16, Y24, K1, Y24 // 6221bd2156c0 + VORPD Y9, Y24, K1, Y24 // 6241bd2156c1 + VORPD Y13, Y24, K1, Y24 // 6241bd2156c5 + VORPD 99(R15)(R15*2), Y24, K1, Y24 // 6201bd2156847f63000000 + VORPD -7(DI), Y24, K1, Y24 // 6261bd215687f9ffffff + VORPD Y16, Y21, K1, Y24 // 6221d52156c0 + VORPD Y9, Y21, K1, Y24 // 6241d52156c1 + VORPD Y13, Y21, K1, Y24 // 6241d52156c5 + VORPD 99(R15)(R15*2), Y21, K1, Y24 // 6201d52156847f63000000 + VORPD -7(DI), Y21, K1, Y24 // 6261d5215687f9ffffff + VORPD Z9, Z9, K1, Z0 // 62d1b54956c1 + VORPD Z25, Z9, K1, Z0 // 6291b54956c1 + VORPD -7(CX), Z9, K1, Z0 // 62f1b5495681f9ffffff + VORPD 15(DX)(BX*4), Z9, K1, Z0 // 62f1b54956849a0f000000 + VORPD Z9, Z3, K1, Z0 // 62d1e54956c1 + VORPD Z25, Z3, K1, Z0 // 6291e54956c1 + VORPD -7(CX), Z3, K1, Z0 // 62f1e5495681f9ffffff + VORPD 15(DX)(BX*4), Z3, K1, Z0 // 62f1e54956849a0f000000 + VORPD Z9, Z9, K1, Z26 // 6241b54956d1 + VORPD Z25, Z9, K1, Z26 // 6201b54956d1 + VORPD -7(CX), Z9, K1, Z26 // 6261b5495691f9ffffff + VORPD 15(DX)(BX*4), Z9, K1, Z26 // 6261b54956949a0f000000 + VORPD Z9, Z3, K1, Z26 // 6241e54956d1 + VORPD Z25, Z3, K1, Z26 // 6201e54956d1 + VORPD -7(CX), Z3, K1, Z26 // 6261e5495691f9ffffff + VORPD 15(DX)(BX*4), Z3, K1, Z26 // 6261e54956949a0f000000 + VORPS X2, X0, K1, X20 // 62e17c0956e2 + VORPS X8, X0, K1, X20 // 62c17c0956e0 + VORPS X9, X0, K1, X20 // 62c17c0956e1 + VORPS 7(SI)(DI*8), X0, K1, X20 // 62e17c0956a4fe07000000 + VORPS -15(R14), X0, K1, X20 // 62c17c0956a6f1ffffff + VORPS X2, X9, K1, X20 // 62e1340956e2 + VORPS X8, X9, K1, X20 // 62c1340956e0 + VORPS X9, X9, K1, X20 // 62c1340956e1 + VORPS 7(SI)(DI*8), X9, K1, X20 // 62e1340956a4fe07000000 + VORPS -15(R14), X9, K1, X20 // 62c1340956a6f1ffffff + VORPS X2, X13, K1, X20 // 62e1140956e2 + VORPS X8, X13, K1, X20 // 62c1140956e0 + VORPS X9, X13, K1, X20 // 62c1140956e1 + VORPS 7(SI)(DI*8), X13, K1, X20 // 62e1140956a4fe07000000 + VORPS -15(R14), X13, K1, X20 // 62c1140956a6f1ffffff + VORPS X2, X0, K1, X5 // 62f17c0956ea + VORPS X8, X0, K1, X5 // 62d17c0956e8 + VORPS X9, X0, K1, X5 // 62d17c0956e9 + VORPS 7(SI)(DI*8), X0, K1, X5 // 62f17c0956acfe07000000 + VORPS -15(R14), X0, K1, X5 // 62d17c0956aef1ffffff + VORPS X2, X9, K1, X5 // 62f1340956ea + VORPS X8, X9, K1, X5 // 62d1340956e8 + VORPS X9, X9, K1, X5 // 62d1340956e9 + VORPS 7(SI)(DI*8), X9, K1, X5 // 62f1340956acfe07000000 + VORPS -15(R14), X9, K1, X5 // 62d1340956aef1ffffff + VORPS X2, X13, K1, X5 // 62f1140956ea + VORPS X8, X13, K1, X5 // 62d1140956e8 + VORPS X9, X13, K1, X5 // 62d1140956e9 + VORPS 7(SI)(DI*8), X13, K1, X5 // 62f1140956acfe07000000 + VORPS -15(R14), X13, K1, X5 // 62d1140956aef1ffffff + VORPS X2, X0, K1, X25 // 62617c0956ca + VORPS X8, X0, K1, X25 // 62417c0956c8 + VORPS X9, X0, K1, X25 // 62417c0956c9 + VORPS 7(SI)(DI*8), X0, K1, X25 // 62617c09568cfe07000000 + VORPS -15(R14), X0, K1, X25 // 62417c09568ef1ffffff + VORPS X2, X9, K1, X25 // 6261340956ca + VORPS X8, X9, K1, X25 // 6241340956c8 + VORPS X9, X9, K1, X25 // 6241340956c9 + VORPS 7(SI)(DI*8), X9, K1, X25 // 62613409568cfe07000000 + VORPS -15(R14), X9, K1, X25 // 62413409568ef1ffffff + VORPS X2, X13, K1, X25 // 6261140956ca + VORPS X8, X13, K1, X25 // 6241140956c8 + VORPS X9, X13, K1, X25 // 6241140956c9 + VORPS 7(SI)(DI*8), X13, K1, X25 // 62611409568cfe07000000 + VORPS -15(R14), X13, K1, X25 // 62411409568ef1ffffff + VORPS Y11, Y7, K7, Y9 // 6251442f56cb + VORPS Y26, Y7, K7, Y9 // 6211442f56ca + VORPS Y12, Y7, K7, Y9 // 6251442f56cc + VORPS -7(CX)(DX*1), Y7, K7, Y9 // 6271442f568c11f9ffffff + VORPS -15(R14)(R15*4), Y7, K7, Y9 // 6211442f568cbef1ffffff + VORPS Y11, Y6, K7, Y9 // 62514c2f56cb + VORPS Y26, Y6, K7, Y9 // 62114c2f56ca + VORPS Y12, Y6, K7, Y9 // 62514c2f56cc + VORPS -7(CX)(DX*1), Y6, K7, Y9 // 62714c2f568c11f9ffffff + VORPS -15(R14)(R15*4), Y6, K7, Y9 // 62114c2f568cbef1ffffff + VORPS Y11, Y26, K7, Y9 // 62512c2756cb + VORPS Y26, Y26, K7, Y9 // 62112c2756ca + VORPS Y12, Y26, K7, Y9 // 62512c2756cc + VORPS -7(CX)(DX*1), Y26, K7, Y9 // 62712c27568c11f9ffffff + VORPS -15(R14)(R15*4), Y26, K7, Y9 // 62112c27568cbef1ffffff + VORPS Y11, Y7, K7, Y6 // 62d1442f56f3 + VORPS Y26, Y7, K7, Y6 // 6291442f56f2 + VORPS Y12, Y7, K7, Y6 // 62d1442f56f4 + VORPS -7(CX)(DX*1), Y7, K7, Y6 // 62f1442f56b411f9ffffff + VORPS -15(R14)(R15*4), Y7, K7, Y6 // 6291442f56b4bef1ffffff + VORPS Y11, Y6, K7, Y6 // 62d14c2f56f3 + VORPS Y26, Y6, K7, Y6 // 62914c2f56f2 + VORPS Y12, Y6, K7, Y6 // 62d14c2f56f4 + VORPS -7(CX)(DX*1), Y6, K7, Y6 // 62f14c2f56b411f9ffffff + VORPS -15(R14)(R15*4), Y6, K7, Y6 // 62914c2f56b4bef1ffffff + VORPS Y11, Y26, K7, Y6 // 62d12c2756f3 + VORPS Y26, Y26, K7, Y6 // 62912c2756f2 + VORPS Y12, Y26, K7, Y6 // 62d12c2756f4 + VORPS -7(CX)(DX*1), Y26, K7, Y6 // 62f12c2756b411f9ffffff + VORPS -15(R14)(R15*4), Y26, K7, Y6 // 62912c2756b4bef1ffffff + VORPS Y11, Y7, K7, Y3 // 62d1442f56db + VORPS Y26, Y7, K7, Y3 // 6291442f56da + VORPS Y12, Y7, K7, Y3 // 62d1442f56dc + VORPS -7(CX)(DX*1), Y7, K7, Y3 // 62f1442f569c11f9ffffff + VORPS -15(R14)(R15*4), Y7, K7, Y3 // 6291442f569cbef1ffffff + VORPS Y11, Y6, K7, Y3 // 62d14c2f56db + VORPS Y26, Y6, K7, Y3 // 62914c2f56da + VORPS Y12, Y6, K7, Y3 // 62d14c2f56dc + VORPS -7(CX)(DX*1), Y6, K7, Y3 // 62f14c2f569c11f9ffffff + VORPS -15(R14)(R15*4), Y6, K7, Y3 // 62914c2f569cbef1ffffff + VORPS Y11, Y26, K7, Y3 // 62d12c2756db + VORPS Y26, Y26, K7, Y3 // 62912c2756da + VORPS Y12, Y26, K7, Y3 // 62d12c2756dc + VORPS -7(CX)(DX*1), Y26, K7, Y3 // 62f12c27569c11f9ffffff + VORPS -15(R14)(R15*4), Y26, K7, Y3 // 62912c27569cbef1ffffff + VORPS Z17, Z20, K2, Z9 // 62315c4256c9 + VORPS Z0, Z20, K2, Z9 // 62715c4256c8 + VORPS 99(R15)(R15*8), Z20, K2, Z9 // 62115c42568cff63000000 + VORPS 7(AX)(CX*8), Z20, K2, Z9 // 62715c42568cc807000000 + VORPS Z17, Z0, K2, Z9 // 62317c4a56c9 + VORPS Z0, Z0, K2, Z9 // 62717c4a56c8 + VORPS 99(R15)(R15*8), Z0, K2, Z9 // 62117c4a568cff63000000 + VORPS 7(AX)(CX*8), Z0, K2, Z9 // 62717c4a568cc807000000 + VORPS Z17, Z20, K2, Z28 // 62215c4256e1 + VORPS Z0, Z20, K2, Z28 // 62615c4256e0 + VORPS 99(R15)(R15*8), Z20, K2, Z28 // 62015c4256a4ff63000000 + VORPS 7(AX)(CX*8), Z20, K2, Z28 // 62615c4256a4c807000000 + VORPS Z17, Z0, K2, Z28 // 62217c4a56e1 + VORPS Z0, Z0, K2, Z28 // 62617c4a56e0 + VORPS 99(R15)(R15*8), Z0, K2, Z28 // 62017c4a56a4ff63000000 + VORPS 7(AX)(CX*8), Z0, K2, Z28 // 62617c4a56a4c807000000 + VPEXTRD $64, X22, CX // 62e37d0816f140 + VPEXTRD $64, X30, CX // 62637d0816f140 + VPEXTRD $64, X22, SP // 62e37d0816f440 + VPEXTRD $64, X30, SP // 62637d0816f440 + VPEXTRD $64, X22, 99(R15)(R15*2) // 62837d0816b47f6300000040 + VPEXTRD $64, X30, 99(R15)(R15*2) // 62037d0816b47f6300000040 + VPEXTRD $64, X22, -7(DI) // 62e37d0816b7f9ffffff40 + VPEXTRD $64, X30, -7(DI) // 62637d0816b7f9ffffff40 + VPEXTRQ $27, X30, R9 // 6243fd0816f11b + VPEXTRQ $27, X30, R13 // 6243fd0816f51b + VPEXTRQ $27, X30, -15(R14)(R15*1) // 6203fd0816b43ef1ffffff1b + VPEXTRQ $27, X30, -15(BX) // 6263fd0816b3f1ffffff1b + VPINSRD $82, R9, X22, X21 // 62c34d0022e952 + VPINSRD $82, CX, X22, X21 // 62e34d0022e952 + VPINSRD $82, -7(CX)(DX*1), X22, X21 // 62e34d0022ac11f9ffffff52 + VPINSRD $82, -15(R14)(R15*4), X22, X21 // 62834d0022acbef1ffffff52 + VPINSRD $82, R9, X7, X21 // 62c3450822e952 + VPINSRD $82, CX, X7, X21 // 62e3450822e952 + VPINSRD $82, -7(CX)(DX*1), X7, X21 // 62e3450822ac11f9ffffff52 + VPINSRD $82, -15(R14)(R15*4), X7, X21 // 6283450822acbef1ffffff52 + VPINSRD $82, R9, X19, X21 // 62c3650022e952 + VPINSRD $82, CX, X19, X21 // 62e3650022e952 + VPINSRD $82, -7(CX)(DX*1), X19, X21 // 62e3650022ac11f9ffffff52 + VPINSRD $82, -15(R14)(R15*4), X19, X21 // 6283650022acbef1ffffff52 + VPINSRD $82, R9, X22, X0 // 62d34d0022c152 + VPINSRD $82, CX, X22, X0 // 62f34d0022c152 + VPINSRD $82, -7(CX)(DX*1), X22, X0 // 62f34d00228411f9ffffff52 + VPINSRD $82, -15(R14)(R15*4), X22, X0 // 62934d002284bef1ffffff52 + VPINSRD $82, R9, X19, X0 // 62d3650022c152 + VPINSRD $82, CX, X19, X0 // 62f3650022c152 + VPINSRD $82, -7(CX)(DX*1), X19, X0 // 62f36500228411f9ffffff52 + VPINSRD $82, -15(R14)(R15*4), X19, X0 // 629365002284bef1ffffff52 + VPINSRD $82, R9, X22, X28 // 62434d0022e152 + VPINSRD $82, CX, X22, X28 // 62634d0022e152 + VPINSRD $82, -7(CX)(DX*1), X22, X28 // 62634d0022a411f9ffffff52 + VPINSRD $82, -15(R14)(R15*4), X22, X28 // 62034d0022a4bef1ffffff52 + VPINSRD $82, R9, X7, X28 // 6243450822e152 + VPINSRD $82, CX, X7, X28 // 6263450822e152 + VPINSRD $82, -7(CX)(DX*1), X7, X28 // 6263450822a411f9ffffff52 + VPINSRD $82, -15(R14)(R15*4), X7, X28 // 6203450822a4bef1ffffff52 + VPINSRD $82, R9, X19, X28 // 6243650022e152 + VPINSRD $82, CX, X19, X28 // 6263650022e152 + VPINSRD $82, -7(CX)(DX*1), X19, X28 // 6263650022a411f9ffffff52 + VPINSRD $82, -15(R14)(R15*4), X19, X28 // 6203650022a4bef1ffffff52 + VPINSRQ $126, DX, X1, X16 // 62e3f50822c27e + VPINSRQ $126, BP, X1, X16 // 62e3f50822c57e + VPINSRQ $126, 7(AX)(CX*4), X1, X16 // 62e3f508228488070000007e + VPINSRQ $126, 7(AX)(CX*1), X1, X16 // 62e3f508228408070000007e + VPINSRQ $126, DX, X7, X16 // 62e3c50822c27e + VPINSRQ $126, BP, X7, X16 // 62e3c50822c57e + VPINSRQ $126, 7(AX)(CX*4), X7, X16 // 62e3c508228488070000007e + VPINSRQ $126, 7(AX)(CX*1), X7, X16 // 62e3c508228408070000007e + VPINSRQ $126, DX, X9, X16 // 62e3b50822c27e + VPINSRQ $126, BP, X9, X16 // 62e3b50822c57e + VPINSRQ $126, 7(AX)(CX*4), X9, X16 // 62e3b508228488070000007e + VPINSRQ $126, 7(AX)(CX*1), X9, X16 // 62e3b508228408070000007e + VPINSRQ $126, DX, X1, X31 // 6263f50822fa7e + VPINSRQ $126, BP, X1, X31 // 6263f50822fd7e + VPINSRQ $126, 7(AX)(CX*4), X1, X31 // 6263f50822bc88070000007e + VPINSRQ $126, 7(AX)(CX*1), X1, X31 // 6263f50822bc08070000007e + VPINSRQ $126, DX, X7, X31 // 6263c50822fa7e + VPINSRQ $126, BP, X7, X31 // 6263c50822fd7e + VPINSRQ $126, 7(AX)(CX*4), X7, X31 // 6263c50822bc88070000007e + VPINSRQ $126, 7(AX)(CX*1), X7, X31 // 6263c50822bc08070000007e + VPINSRQ $126, DX, X9, X31 // 6263b50822fa7e + VPINSRQ $126, BP, X9, X31 // 6263b50822fd7e + VPINSRQ $126, 7(AX)(CX*4), X9, X31 // 6263b50822bc88070000007e + VPINSRQ $126, 7(AX)(CX*1), X9, X31 // 6263b50822bc08070000007e + VPMOVD2M X3, K6 // 62f27e0839f3 + VPMOVD2M X26, K6 // 62927e0839f2 + VPMOVD2M X23, K6 // 62b27e0839f7 + VPMOVD2M X3, K7 // 62f27e0839fb + VPMOVD2M X26, K7 // 62927e0839fa + VPMOVD2M X23, K7 // 62b27e0839ff + VPMOVD2M Y5, K6 // 62f27e2839f5 + VPMOVD2M Y28, K6 // 62927e2839f4 + VPMOVD2M Y7, K6 // 62f27e2839f7 + VPMOVD2M Y5, K4 // 62f27e2839e5 + VPMOVD2M Y28, K4 // 62927e2839e4 + VPMOVD2M Y7, K4 // 62f27e2839e7 + VPMOVD2M Z1, K4 // 62f27e4839e1 + VPMOVD2M Z9, K4 // 62d27e4839e1 + VPMOVD2M Z1, K6 // 62f27e4839f1 + VPMOVD2M Z9, K6 // 62d27e4839f1 + VPMOVM2D K6, X21 // 62e27e0838ee + VPMOVM2D K5, X21 // 62e27e0838ed + VPMOVM2D K6, X1 // 62f27e0838ce + VPMOVM2D K5, X1 // 62f27e0838cd + VPMOVM2D K6, X11 // 62727e0838de + VPMOVM2D K5, X11 // 62727e0838dd + VPMOVM2D K1, Y28 // 62627e2838e1 + VPMOVM2D K5, Y28 // 62627e2838e5 + VPMOVM2D K1, Y13 // 62727e2838e9 + VPMOVM2D K5, Y13 // 62727e2838ed + VPMOVM2D K1, Y7 // 62f27e2838f9 + VPMOVM2D K5, Y7 // 62f27e2838fd + VPMOVM2D K3, Z7 // 62f27e4838fb + VPMOVM2D K1, Z7 // 62f27e4838f9 + VPMOVM2D K3, Z21 // 62e27e4838eb + VPMOVM2D K1, Z21 // 62e27e4838e9 + VPMOVM2Q K5, X13 // 6272fe0838ed + VPMOVM2Q K4, X13 // 6272fe0838ec + VPMOVM2Q K5, X0 // 62f2fe0838c5 + VPMOVM2Q K4, X0 // 62f2fe0838c4 + VPMOVM2Q K5, X30 // 6262fe0838f5 + VPMOVM2Q K4, X30 // 6262fe0838f4 + VPMOVM2Q K7, Y2 // 62f2fe2838d7 + VPMOVM2Q K6, Y2 // 62f2fe2838d6 + VPMOVM2Q K7, Y21 // 62e2fe2838ef + VPMOVM2Q K6, Y21 // 62e2fe2838ee + VPMOVM2Q K7, Y12 // 6272fe2838e7 + VPMOVM2Q K6, Y12 // 6272fe2838e6 + VPMOVM2Q K4, Z16 // 62e2fe4838c4 + VPMOVM2Q K6, Z16 // 62e2fe4838c6 + VPMOVM2Q K4, Z25 // 6262fe4838cc + VPMOVM2Q K6, Z25 // 6262fe4838ce + VPMOVQ2M X14, K1 // 62d2fe0839ce + VPMOVQ2M X19, K1 // 62b2fe0839cb + VPMOVQ2M X8, K1 // 62d2fe0839c8 + VPMOVQ2M X14, K3 // 62d2fe0839de + VPMOVQ2M X19, K3 // 62b2fe0839db + VPMOVQ2M X8, K3 // 62d2fe0839d8 + VPMOVQ2M Y3, K6 // 62f2fe2839f3 + VPMOVQ2M Y2, K6 // 62f2fe2839f2 + VPMOVQ2M Y9, K6 // 62d2fe2839f1 + VPMOVQ2M Y3, K7 // 62f2fe2839fb + VPMOVQ2M Y2, K7 // 62f2fe2839fa + VPMOVQ2M Y9, K7 // 62d2fe2839f9 + VPMOVQ2M Z12, K6 // 62d2fe4839f4 + VPMOVQ2M Z13, K6 // 62d2fe4839f5 + VPMOVQ2M Z12, K4 // 62d2fe4839e4 + VPMOVQ2M Z13, K4 // 62d2fe4839e5 + VPMULLQ X13, X3, K7, X17 // 62c2e50f40cd + VPMULLQ X28, X3, K7, X17 // 6282e50f40cc + VPMULLQ X24, X3, K7, X17 // 6282e50f40c8 + VPMULLQ 15(R8)(R14*4), X3, K7, X17 // 6282e50f408cb00f000000 + VPMULLQ -7(CX)(DX*4), X3, K7, X17 // 62e2e50f408c91f9ffffff + VPMULLQ X13, X26, K7, X17 // 62c2ad0740cd + VPMULLQ X28, X26, K7, X17 // 6282ad0740cc + VPMULLQ X24, X26, K7, X17 // 6282ad0740c8 + VPMULLQ 15(R8)(R14*4), X26, K7, X17 // 6282ad07408cb00f000000 + VPMULLQ -7(CX)(DX*4), X26, K7, X17 // 62e2ad07408c91f9ffffff + VPMULLQ X13, X23, K7, X17 // 62c2c50740cd + VPMULLQ X28, X23, K7, X17 // 6282c50740cc + VPMULLQ X24, X23, K7, X17 // 6282c50740c8 + VPMULLQ 15(R8)(R14*4), X23, K7, X17 // 6282c507408cb00f000000 + VPMULLQ -7(CX)(DX*4), X23, K7, X17 // 62e2c507408c91f9ffffff + VPMULLQ X13, X3, K7, X15 // 6252e50f40fd + VPMULLQ X28, X3, K7, X15 // 6212e50f40fc + VPMULLQ X24, X3, K7, X15 // 6212e50f40f8 + VPMULLQ 15(R8)(R14*4), X3, K7, X15 // 6212e50f40bcb00f000000 + VPMULLQ -7(CX)(DX*4), X3, K7, X15 // 6272e50f40bc91f9ffffff + VPMULLQ X13, X26, K7, X15 // 6252ad0740fd + VPMULLQ X28, X26, K7, X15 // 6212ad0740fc + VPMULLQ X24, X26, K7, X15 // 6212ad0740f8 + VPMULLQ 15(R8)(R14*4), X26, K7, X15 // 6212ad0740bcb00f000000 + VPMULLQ -7(CX)(DX*4), X26, K7, X15 // 6272ad0740bc91f9ffffff + VPMULLQ X13, X23, K7, X15 // 6252c50740fd + VPMULLQ X28, X23, K7, X15 // 6212c50740fc + VPMULLQ X24, X23, K7, X15 // 6212c50740f8 + VPMULLQ 15(R8)(R14*4), X23, K7, X15 // 6212c50740bcb00f000000 + VPMULLQ -7(CX)(DX*4), X23, K7, X15 // 6272c50740bc91f9ffffff + VPMULLQ X13, X3, K7, X8 // 6252e50f40c5 + VPMULLQ X28, X3, K7, X8 // 6212e50f40c4 + VPMULLQ X24, X3, K7, X8 // 6212e50f40c0 + VPMULLQ 15(R8)(R14*4), X3, K7, X8 // 6212e50f4084b00f000000 + VPMULLQ -7(CX)(DX*4), X3, K7, X8 // 6272e50f408491f9ffffff + VPMULLQ X13, X26, K7, X8 // 6252ad0740c5 + VPMULLQ X28, X26, K7, X8 // 6212ad0740c4 + VPMULLQ X24, X26, K7, X8 // 6212ad0740c0 + VPMULLQ 15(R8)(R14*4), X26, K7, X8 // 6212ad074084b00f000000 + VPMULLQ -7(CX)(DX*4), X26, K7, X8 // 6272ad07408491f9ffffff + VPMULLQ X13, X23, K7, X8 // 6252c50740c5 + VPMULLQ X28, X23, K7, X8 // 6212c50740c4 + VPMULLQ X24, X23, K7, X8 // 6212c50740c0 + VPMULLQ 15(R8)(R14*4), X23, K7, X8 // 6212c5074084b00f000000 + VPMULLQ -7(CX)(DX*4), X23, K7, X8 // 6272c507408491f9ffffff + VPMULLQ Y28, Y31, K2, Y17 // 6282852240cc + VPMULLQ Y13, Y31, K2, Y17 // 62c2852240cd + VPMULLQ Y7, Y31, K2, Y17 // 62e2852240cf + VPMULLQ 15(DX)(BX*1), Y31, K2, Y17 // 62e28522408c1a0f000000 + VPMULLQ -7(CX)(DX*2), Y31, K2, Y17 // 62e28522408c51f9ffffff + VPMULLQ Y28, Y8, K2, Y17 // 6282bd2a40cc + VPMULLQ Y13, Y8, K2, Y17 // 62c2bd2a40cd + VPMULLQ Y7, Y8, K2, Y17 // 62e2bd2a40cf + VPMULLQ 15(DX)(BX*1), Y8, K2, Y17 // 62e2bd2a408c1a0f000000 + VPMULLQ -7(CX)(DX*2), Y8, K2, Y17 // 62e2bd2a408c51f9ffffff + VPMULLQ Y28, Y1, K2, Y17 // 6282f52a40cc + VPMULLQ Y13, Y1, K2, Y17 // 62c2f52a40cd + VPMULLQ Y7, Y1, K2, Y17 // 62e2f52a40cf + VPMULLQ 15(DX)(BX*1), Y1, K2, Y17 // 62e2f52a408c1a0f000000 + VPMULLQ -7(CX)(DX*2), Y1, K2, Y17 // 62e2f52a408c51f9ffffff + VPMULLQ Y28, Y31, K2, Y7 // 6292852240fc + VPMULLQ Y13, Y31, K2, Y7 // 62d2852240fd + VPMULLQ Y7, Y31, K2, Y7 // 62f2852240ff + VPMULLQ 15(DX)(BX*1), Y31, K2, Y7 // 62f2852240bc1a0f000000 + VPMULLQ -7(CX)(DX*2), Y31, K2, Y7 // 62f2852240bc51f9ffffff + VPMULLQ Y28, Y8, K2, Y7 // 6292bd2a40fc + VPMULLQ Y13, Y8, K2, Y7 // 62d2bd2a40fd + VPMULLQ Y7, Y8, K2, Y7 // 62f2bd2a40ff + VPMULLQ 15(DX)(BX*1), Y8, K2, Y7 // 62f2bd2a40bc1a0f000000 + VPMULLQ -7(CX)(DX*2), Y8, K2, Y7 // 62f2bd2a40bc51f9ffffff + VPMULLQ Y28, Y1, K2, Y7 // 6292f52a40fc + VPMULLQ Y13, Y1, K2, Y7 // 62d2f52a40fd + VPMULLQ Y7, Y1, K2, Y7 // 62f2f52a40ff + VPMULLQ 15(DX)(BX*1), Y1, K2, Y7 // 62f2f52a40bc1a0f000000 + VPMULLQ -7(CX)(DX*2), Y1, K2, Y7 // 62f2f52a40bc51f9ffffff + VPMULLQ Y28, Y31, K2, Y9 // 6212852240cc + VPMULLQ Y13, Y31, K2, Y9 // 6252852240cd + VPMULLQ Y7, Y31, K2, Y9 // 6272852240cf + VPMULLQ 15(DX)(BX*1), Y31, K2, Y9 // 62728522408c1a0f000000 + VPMULLQ -7(CX)(DX*2), Y31, K2, Y9 // 62728522408c51f9ffffff + VPMULLQ Y28, Y8, K2, Y9 // 6212bd2a40cc + VPMULLQ Y13, Y8, K2, Y9 // 6252bd2a40cd + VPMULLQ Y7, Y8, K2, Y9 // 6272bd2a40cf + VPMULLQ 15(DX)(BX*1), Y8, K2, Y9 // 6272bd2a408c1a0f000000 + VPMULLQ -7(CX)(DX*2), Y8, K2, Y9 // 6272bd2a408c51f9ffffff + VPMULLQ Y28, Y1, K2, Y9 // 6212f52a40cc + VPMULLQ Y13, Y1, K2, Y9 // 6252f52a40cd + VPMULLQ Y7, Y1, K2, Y9 // 6272f52a40cf + VPMULLQ 15(DX)(BX*1), Y1, K2, Y9 // 6272f52a408c1a0f000000 + VPMULLQ -7(CX)(DX*2), Y1, K2, Y9 // 6272f52a408c51f9ffffff + VPMULLQ Z3, Z20, K4, Z0 // 62f2dd4440c3 + VPMULLQ Z30, Z20, K4, Z0 // 6292dd4440c6 + VPMULLQ 15(R8)(R14*8), Z20, K4, Z0 // 6292dd444084f00f000000 + VPMULLQ -15(R14)(R15*2), Z20, K4, Z0 // 6292dd4440847ef1ffffff + VPMULLQ Z3, Z28, K4, Z0 // 62f29d4440c3 + VPMULLQ Z30, Z28, K4, Z0 // 62929d4440c6 + VPMULLQ 15(R8)(R14*8), Z28, K4, Z0 // 62929d444084f00f000000 + VPMULLQ -15(R14)(R15*2), Z28, K4, Z0 // 62929d4440847ef1ffffff + VPMULLQ Z3, Z20, K4, Z6 // 62f2dd4440f3 + VPMULLQ Z30, Z20, K4, Z6 // 6292dd4440f6 + VPMULLQ 15(R8)(R14*8), Z20, K4, Z6 // 6292dd4440b4f00f000000 + VPMULLQ -15(R14)(R15*2), Z20, K4, Z6 // 6292dd4440b47ef1ffffff + VPMULLQ Z3, Z28, K4, Z6 // 62f29d4440f3 + VPMULLQ Z30, Z28, K4, Z6 // 62929d4440f6 + VPMULLQ 15(R8)(R14*8), Z28, K4, Z6 // 62929d4440b4f00f000000 + VPMULLQ -15(R14)(R15*2), Z28, K4, Z6 // 62929d4440b47ef1ffffff + VRANGEPD $11, X24, X23, K2, X12 // 6213c50250e00b + VRANGEPD $11, X14, X23, K2, X12 // 6253c50250e60b + VRANGEPD $11, X0, X23, K2, X12 // 6273c50250e00b + VRANGEPD $11, 17(SP)(BP*8), X23, K2, X12 // 6273c50250a4ec110000000b + VRANGEPD $11, 17(SP)(BP*4), X23, K2, X12 // 6273c50250a4ac110000000b + VRANGEPD $11, X24, X11, K2, X12 // 6213a50a50e00b + VRANGEPD $11, X14, X11, K2, X12 // 6253a50a50e60b + VRANGEPD $11, X0, X11, K2, X12 // 6273a50a50e00b + VRANGEPD $11, 17(SP)(BP*8), X11, K2, X12 // 6273a50a50a4ec110000000b + VRANGEPD $11, 17(SP)(BP*4), X11, K2, X12 // 6273a50a50a4ac110000000b + VRANGEPD $11, X24, X31, K2, X12 // 6213850250e00b + VRANGEPD $11, X14, X31, K2, X12 // 6253850250e60b + VRANGEPD $11, X0, X31, K2, X12 // 6273850250e00b + VRANGEPD $11, 17(SP)(BP*8), X31, K2, X12 // 6273850250a4ec110000000b + VRANGEPD $11, 17(SP)(BP*4), X31, K2, X12 // 6273850250a4ac110000000b + VRANGEPD $11, X24, X23, K2, X16 // 6283c50250c00b + VRANGEPD $11, X14, X23, K2, X16 // 62c3c50250c60b + VRANGEPD $11, X0, X23, K2, X16 // 62e3c50250c00b + VRANGEPD $11, 17(SP)(BP*8), X23, K2, X16 // 62e3c5025084ec110000000b + VRANGEPD $11, 17(SP)(BP*4), X23, K2, X16 // 62e3c5025084ac110000000b + VRANGEPD $11, X24, X11, K2, X16 // 6283a50a50c00b + VRANGEPD $11, X14, X11, K2, X16 // 62c3a50a50c60b + VRANGEPD $11, X0, X11, K2, X16 // 62e3a50a50c00b + VRANGEPD $11, 17(SP)(BP*8), X11, K2, X16 // 62e3a50a5084ec110000000b + VRANGEPD $11, 17(SP)(BP*4), X11, K2, X16 // 62e3a50a5084ac110000000b + VRANGEPD $11, X24, X31, K2, X16 // 6283850250c00b + VRANGEPD $11, X14, X31, K2, X16 // 62c3850250c60b + VRANGEPD $11, X0, X31, K2, X16 // 62e3850250c00b + VRANGEPD $11, 17(SP)(BP*8), X31, K2, X16 // 62e385025084ec110000000b + VRANGEPD $11, 17(SP)(BP*4), X31, K2, X16 // 62e385025084ac110000000b + VRANGEPD $11, X24, X23, K2, X23 // 6283c50250f80b + VRANGEPD $11, X14, X23, K2, X23 // 62c3c50250fe0b + VRANGEPD $11, X0, X23, K2, X23 // 62e3c50250f80b + VRANGEPD $11, 17(SP)(BP*8), X23, K2, X23 // 62e3c50250bcec110000000b + VRANGEPD $11, 17(SP)(BP*4), X23, K2, X23 // 62e3c50250bcac110000000b + VRANGEPD $11, X24, X11, K2, X23 // 6283a50a50f80b + VRANGEPD $11, X14, X11, K2, X23 // 62c3a50a50fe0b + VRANGEPD $11, X0, X11, K2, X23 // 62e3a50a50f80b + VRANGEPD $11, 17(SP)(BP*8), X11, K2, X23 // 62e3a50a50bcec110000000b + VRANGEPD $11, 17(SP)(BP*4), X11, K2, X23 // 62e3a50a50bcac110000000b + VRANGEPD $11, X24, X31, K2, X23 // 6283850250f80b + VRANGEPD $11, X14, X31, K2, X23 // 62c3850250fe0b + VRANGEPD $11, X0, X31, K2, X23 // 62e3850250f80b + VRANGEPD $11, 17(SP)(BP*8), X31, K2, X23 // 62e3850250bcec110000000b + VRANGEPD $11, 17(SP)(BP*4), X31, K2, X23 // 62e3850250bcac110000000b + VRANGEPD $12, Y3, Y18, K1, Y15 // 6273ed2150fb0c + VRANGEPD $12, Y19, Y18, K1, Y15 // 6233ed2150fb0c + VRANGEPD $12, Y23, Y18, K1, Y15 // 6233ed2150ff0c + VRANGEPD $12, (R8), Y18, K1, Y15 // 6253ed2150380c + VRANGEPD $12, 15(DX)(BX*2), Y18, K1, Y15 // 6273ed2150bc5a0f0000000c + VRANGEPD $12, Y3, Y24, K1, Y15 // 6273bd2150fb0c + VRANGEPD $12, Y19, Y24, K1, Y15 // 6233bd2150fb0c + VRANGEPD $12, Y23, Y24, K1, Y15 // 6233bd2150ff0c + VRANGEPD $12, (R8), Y24, K1, Y15 // 6253bd2150380c + VRANGEPD $12, 15(DX)(BX*2), Y24, K1, Y15 // 6273bd2150bc5a0f0000000c + VRANGEPD $12, Y3, Y9, K1, Y15 // 6273b52950fb0c + VRANGEPD $12, Y19, Y9, K1, Y15 // 6233b52950fb0c + VRANGEPD $12, Y23, Y9, K1, Y15 // 6233b52950ff0c + VRANGEPD $12, (R8), Y9, K1, Y15 // 6253b52950380c + VRANGEPD $12, 15(DX)(BX*2), Y9, K1, Y15 // 6273b52950bc5a0f0000000c + VRANGEPD $12, Y3, Y18, K1, Y22 // 62e3ed2150f30c + VRANGEPD $12, Y19, Y18, K1, Y22 // 62a3ed2150f30c + VRANGEPD $12, Y23, Y18, K1, Y22 // 62a3ed2150f70c + VRANGEPD $12, (R8), Y18, K1, Y22 // 62c3ed2150300c + VRANGEPD $12, 15(DX)(BX*2), Y18, K1, Y22 // 62e3ed2150b45a0f0000000c + VRANGEPD $12, Y3, Y24, K1, Y22 // 62e3bd2150f30c + VRANGEPD $12, Y19, Y24, K1, Y22 // 62a3bd2150f30c + VRANGEPD $12, Y23, Y24, K1, Y22 // 62a3bd2150f70c + VRANGEPD $12, (R8), Y24, K1, Y22 // 62c3bd2150300c + VRANGEPD $12, 15(DX)(BX*2), Y24, K1, Y22 // 62e3bd2150b45a0f0000000c + VRANGEPD $12, Y3, Y9, K1, Y22 // 62e3b52950f30c + VRANGEPD $12, Y19, Y9, K1, Y22 // 62a3b52950f30c + VRANGEPD $12, Y23, Y9, K1, Y22 // 62a3b52950f70c + VRANGEPD $12, (R8), Y9, K1, Y22 // 62c3b52950300c + VRANGEPD $12, 15(DX)(BX*2), Y9, K1, Y22 // 62e3b52950b45a0f0000000c + VRANGEPD $12, Y3, Y18, K1, Y20 // 62e3ed2150e30c + VRANGEPD $12, Y19, Y18, K1, Y20 // 62a3ed2150e30c + VRANGEPD $12, Y23, Y18, K1, Y20 // 62a3ed2150e70c + VRANGEPD $12, (R8), Y18, K1, Y20 // 62c3ed2150200c + VRANGEPD $12, 15(DX)(BX*2), Y18, K1, Y20 // 62e3ed2150a45a0f0000000c + VRANGEPD $12, Y3, Y24, K1, Y20 // 62e3bd2150e30c + VRANGEPD $12, Y19, Y24, K1, Y20 // 62a3bd2150e30c + VRANGEPD $12, Y23, Y24, K1, Y20 // 62a3bd2150e70c + VRANGEPD $12, (R8), Y24, K1, Y20 // 62c3bd2150200c + VRANGEPD $12, 15(DX)(BX*2), Y24, K1, Y20 // 62e3bd2150a45a0f0000000c + VRANGEPD $12, Y3, Y9, K1, Y20 // 62e3b52950e30c + VRANGEPD $12, Y19, Y9, K1, Y20 // 62a3b52950e30c + VRANGEPD $12, Y23, Y9, K1, Y20 // 62a3b52950e70c + VRANGEPD $12, (R8), Y9, K1, Y20 // 62c3b52950200c + VRANGEPD $12, 15(DX)(BX*2), Y9, K1, Y20 // 62e3b52950a45a0f0000000c + VRANGEPD $13, Z21, Z12, K7, Z14 // 62339d4f50f50d + VRANGEPD $13, Z9, Z12, K7, Z14 // 62539d4f50f10d + VRANGEPD $13, Z21, Z13, K7, Z14 // 6233954f50f50d + VRANGEPD $13, Z9, Z13, K7, Z14 // 6253954f50f10d + VRANGEPD $13, Z21, Z12, K7, Z13 // 62339d4f50ed0d + VRANGEPD $13, Z9, Z12, K7, Z13 // 62539d4f50e90d + VRANGEPD $13, Z21, Z13, K7, Z13 // 6233954f50ed0d + VRANGEPD $13, Z9, Z13, K7, Z13 // 6253954f50e90d + VRANGEPD $14, Z23, Z27, K1, Z2 // 62b3a54150d70e + VRANGEPD $14, Z9, Z27, K1, Z2 // 62d3a54150d10e + VRANGEPD $14, (R14), Z27, K1, Z2 // 62d3a54150160e + VRANGEPD $14, -7(DI)(R8*8), Z27, K1, Z2 // 62b3a5415094c7f9ffffff0e + VRANGEPD $14, Z23, Z25, K1, Z2 // 62b3b54150d70e + VRANGEPD $14, Z9, Z25, K1, Z2 // 62d3b54150d10e + VRANGEPD $14, (R14), Z25, K1, Z2 // 62d3b54150160e + VRANGEPD $14, -7(DI)(R8*8), Z25, K1, Z2 // 62b3b5415094c7f9ffffff0e + VRANGEPD $14, Z23, Z27, K1, Z7 // 62b3a54150ff0e + VRANGEPD $14, Z9, Z27, K1, Z7 // 62d3a54150f90e + VRANGEPD $14, (R14), Z27, K1, Z7 // 62d3a541503e0e + VRANGEPD $14, -7(DI)(R8*8), Z27, K1, Z7 // 62b3a54150bcc7f9ffffff0e + VRANGEPD $14, Z23, Z25, K1, Z7 // 62b3b54150ff0e + VRANGEPD $14, Z9, Z25, K1, Z7 // 62d3b54150f90e + VRANGEPD $14, (R14), Z25, K1, Z7 // 62d3b541503e0e + VRANGEPD $14, -7(DI)(R8*8), Z25, K1, Z7 // 62b3b54150bcc7f9ffffff0e + VRANGEPS $15, X0, X20, K1, X11 // 62735d0150d80f + VRANGEPS $15, X9, X20, K1, X11 // 62535d0150d90f + VRANGEPS $15, X13, X20, K1, X11 // 62535d0150dd0f + VRANGEPS $15, 7(SI)(DI*4), X20, K1, X11 // 62735d01509cbe070000000f + VRANGEPS $15, -7(DI)(R8*2), X20, K1, X11 // 62335d01509c47f9ffffff0f + VRANGEPS $15, X0, X5, K1, X11 // 6273550950d80f + VRANGEPS $15, X9, X5, K1, X11 // 6253550950d90f + VRANGEPS $15, X13, X5, K1, X11 // 6253550950dd0f + VRANGEPS $15, 7(SI)(DI*4), X5, K1, X11 // 62735509509cbe070000000f + VRANGEPS $15, -7(DI)(R8*2), X5, K1, X11 // 62335509509c47f9ffffff0f + VRANGEPS $15, X0, X25, K1, X11 // 6273350150d80f + VRANGEPS $15, X9, X25, K1, X11 // 6253350150d90f + VRANGEPS $15, X13, X25, K1, X11 // 6253350150dd0f + VRANGEPS $15, 7(SI)(DI*4), X25, K1, X11 // 62733501509cbe070000000f + VRANGEPS $15, -7(DI)(R8*2), X25, K1, X11 // 62333501509c47f9ffffff0f + VRANGEPS $15, X0, X20, K1, X23 // 62e35d0150f80f + VRANGEPS $15, X9, X20, K1, X23 // 62c35d0150f90f + VRANGEPS $15, X13, X20, K1, X23 // 62c35d0150fd0f + VRANGEPS $15, 7(SI)(DI*4), X20, K1, X23 // 62e35d0150bcbe070000000f + VRANGEPS $15, -7(DI)(R8*2), X20, K1, X23 // 62a35d0150bc47f9ffffff0f + VRANGEPS $15, X0, X5, K1, X23 // 62e3550950f80f + VRANGEPS $15, X9, X5, K1, X23 // 62c3550950f90f + VRANGEPS $15, X13, X5, K1, X23 // 62c3550950fd0f + VRANGEPS $15, 7(SI)(DI*4), X5, K1, X23 // 62e3550950bcbe070000000f + VRANGEPS $15, -7(DI)(R8*2), X5, K1, X23 // 62a3550950bc47f9ffffff0f + VRANGEPS $15, X0, X25, K1, X23 // 62e3350150f80f + VRANGEPS $15, X9, X25, K1, X23 // 62c3350150f90f + VRANGEPS $15, X13, X25, K1, X23 // 62c3350150fd0f + VRANGEPS $15, 7(SI)(DI*4), X25, K1, X23 // 62e3350150bcbe070000000f + VRANGEPS $15, -7(DI)(R8*2), X25, K1, X23 // 62a3350150bc47f9ffffff0f + VRANGEPS $15, X0, X20, K1, X2 // 62f35d0150d00f + VRANGEPS $15, X9, X20, K1, X2 // 62d35d0150d10f + VRANGEPS $15, X13, X20, K1, X2 // 62d35d0150d50f + VRANGEPS $15, 7(SI)(DI*4), X20, K1, X2 // 62f35d015094be070000000f + VRANGEPS $15, -7(DI)(R8*2), X20, K1, X2 // 62b35d01509447f9ffffff0f + VRANGEPS $15, X0, X5, K1, X2 // 62f3550950d00f + VRANGEPS $15, X9, X5, K1, X2 // 62d3550950d10f + VRANGEPS $15, X13, X5, K1, X2 // 62d3550950d50f + VRANGEPS $15, 7(SI)(DI*4), X5, K1, X2 // 62f355095094be070000000f + VRANGEPS $15, -7(DI)(R8*2), X5, K1, X2 // 62b35509509447f9ffffff0f + VRANGEPS $15, X0, X25, K1, X2 // 62f3350150d00f + VRANGEPS $15, X9, X25, K1, X2 // 62d3350150d10f + VRANGEPS $15, X13, X25, K1, X2 // 62d3350150d50f + VRANGEPS $15, 7(SI)(DI*4), X25, K1, X2 // 62f335015094be070000000f + VRANGEPS $15, -7(DI)(R8*2), X25, K1, X2 // 62b33501509447f9ffffff0f + VRANGEPS $0, Y21, Y5, K1, Y19 // 62a3552950dd00 + VRANGEPS $0, Y20, Y5, K1, Y19 // 62a3552950dc00 + VRANGEPS $0, Y6, Y5, K1, Y19 // 62e3552950de00 + VRANGEPS $0, 17(SP)(BP*1), Y5, K1, Y19 // 62e35529509c2c1100000000 + VRANGEPS $0, -7(CX)(DX*8), Y5, K1, Y19 // 62e35529509cd1f9ffffff00 + VRANGEPS $0, Y21, Y16, K1, Y19 // 62a37d2150dd00 + VRANGEPS $0, Y20, Y16, K1, Y19 // 62a37d2150dc00 + VRANGEPS $0, Y6, Y16, K1, Y19 // 62e37d2150de00 + VRANGEPS $0, 17(SP)(BP*1), Y16, K1, Y19 // 62e37d21509c2c1100000000 + VRANGEPS $0, -7(CX)(DX*8), Y16, K1, Y19 // 62e37d21509cd1f9ffffff00 + VRANGEPS $0, Y21, Y2, K1, Y19 // 62a36d2950dd00 + VRANGEPS $0, Y20, Y2, K1, Y19 // 62a36d2950dc00 + VRANGEPS $0, Y6, Y2, K1, Y19 // 62e36d2950de00 + VRANGEPS $0, 17(SP)(BP*1), Y2, K1, Y19 // 62e36d29509c2c1100000000 + VRANGEPS $0, -7(CX)(DX*8), Y2, K1, Y19 // 62e36d29509cd1f9ffffff00 + VRANGEPS $0, Y21, Y5, K1, Y14 // 6233552950f500 + VRANGEPS $0, Y20, Y5, K1, Y14 // 6233552950f400 + VRANGEPS $0, Y6, Y5, K1, Y14 // 6273552950f600 + VRANGEPS $0, 17(SP)(BP*1), Y5, K1, Y14 // 6273552950b42c1100000000 + VRANGEPS $0, -7(CX)(DX*8), Y5, K1, Y14 // 6273552950b4d1f9ffffff00 + VRANGEPS $0, Y21, Y16, K1, Y14 // 62337d2150f500 + VRANGEPS $0, Y20, Y16, K1, Y14 // 62337d2150f400 + VRANGEPS $0, Y6, Y16, K1, Y14 // 62737d2150f600 + VRANGEPS $0, 17(SP)(BP*1), Y16, K1, Y14 // 62737d2150b42c1100000000 + VRANGEPS $0, -7(CX)(DX*8), Y16, K1, Y14 // 62737d2150b4d1f9ffffff00 + VRANGEPS $0, Y21, Y2, K1, Y14 // 62336d2950f500 + VRANGEPS $0, Y20, Y2, K1, Y14 // 62336d2950f400 + VRANGEPS $0, Y6, Y2, K1, Y14 // 62736d2950f600 + VRANGEPS $0, 17(SP)(BP*1), Y2, K1, Y14 // 62736d2950b42c1100000000 + VRANGEPS $0, -7(CX)(DX*8), Y2, K1, Y14 // 62736d2950b4d1f9ffffff00 + VRANGEPS $0, Y21, Y5, K1, Y21 // 62a3552950ed00 + VRANGEPS $0, Y20, Y5, K1, Y21 // 62a3552950ec00 + VRANGEPS $0, Y6, Y5, K1, Y21 // 62e3552950ee00 + VRANGEPS $0, 17(SP)(BP*1), Y5, K1, Y21 // 62e3552950ac2c1100000000 + VRANGEPS $0, -7(CX)(DX*8), Y5, K1, Y21 // 62e3552950acd1f9ffffff00 + VRANGEPS $0, Y21, Y16, K1, Y21 // 62a37d2150ed00 + VRANGEPS $0, Y20, Y16, K1, Y21 // 62a37d2150ec00 + VRANGEPS $0, Y6, Y16, K1, Y21 // 62e37d2150ee00 + VRANGEPS $0, 17(SP)(BP*1), Y16, K1, Y21 // 62e37d2150ac2c1100000000 + VRANGEPS $0, -7(CX)(DX*8), Y16, K1, Y21 // 62e37d2150acd1f9ffffff00 + VRANGEPS $0, Y21, Y2, K1, Y21 // 62a36d2950ed00 + VRANGEPS $0, Y20, Y2, K1, Y21 // 62a36d2950ec00 + VRANGEPS $0, Y6, Y2, K1, Y21 // 62e36d2950ee00 + VRANGEPS $0, 17(SP)(BP*1), Y2, K1, Y21 // 62e36d2950ac2c1100000000 + VRANGEPS $0, -7(CX)(DX*8), Y2, K1, Y21 // 62e36d2950acd1f9ffffff00 + VRANGEPS $1, Z14, Z3, K7, Z27 // 6243654f50de01 + VRANGEPS $1, Z7, Z3, K7, Z27 // 6263654f50df01 + VRANGEPS $1, Z14, Z0, K7, Z27 // 62437d4f50de01 + VRANGEPS $1, Z7, Z0, K7, Z27 // 62637d4f50df01 + VRANGEPS $1, Z14, Z3, K7, Z14 // 6253654f50f601 + VRANGEPS $1, Z7, Z3, K7, Z14 // 6273654f50f701 + VRANGEPS $1, Z14, Z0, K7, Z14 // 62537d4f50f601 + VRANGEPS $1, Z7, Z0, K7, Z14 // 62737d4f50f701 + VRANGEPS $2, Z1, Z22, K2, Z8 // 62734d4250c102 + VRANGEPS $2, Z16, Z22, K2, Z8 // 62334d4250c002 + VRANGEPS $2, 99(R15)(R15*4), Z22, K2, Z8 // 62134d425084bf6300000002 + VRANGEPS $2, 15(DX), Z22, K2, Z8 // 62734d4250820f00000002 + VRANGEPS $2, Z1, Z25, K2, Z8 // 6273354250c102 + VRANGEPS $2, Z16, Z25, K2, Z8 // 6233354250c002 + VRANGEPS $2, 99(R15)(R15*4), Z25, K2, Z8 // 621335425084bf6300000002 + VRANGEPS $2, 15(DX), Z25, K2, Z8 // 6273354250820f00000002 + VRANGEPS $2, Z1, Z22, K2, Z24 // 62634d4250c102 + VRANGEPS $2, Z16, Z22, K2, Z24 // 62234d4250c002 + VRANGEPS $2, 99(R15)(R15*4), Z22, K2, Z24 // 62034d425084bf6300000002 + VRANGEPS $2, 15(DX), Z22, K2, Z24 // 62634d4250820f00000002 + VRANGEPS $2, Z1, Z25, K2, Z24 // 6263354250c102 + VRANGEPS $2, Z16, Z25, K2, Z24 // 6223354250c002 + VRANGEPS $2, 99(R15)(R15*4), Z25, K2, Z24 // 620335425084bf6300000002 + VRANGEPS $2, 15(DX), Z25, K2, Z24 // 6263354250820f00000002 + VRANGESD $3, X22, X2, K4, X2 // 62b3ed0c51d603 + VRANGESD $3, X5, X2, K4, X2 // 62f3ed0c51d503 + VRANGESD $3, X14, X2, K4, X2 // 62d3ed0c51d603 + VRANGESD $3, X22, X31, K4, X2 // 62b3850451d603 + VRANGESD $3, X5, X31, K4, X2 // 62f3850451d503 + VRANGESD $3, X14, X31, K4, X2 // 62d3850451d603 + VRANGESD $3, X22, X11, K4, X2 // 62b3a50c51d603 + VRANGESD $3, X5, X11, K4, X2 // 62f3a50c51d503 + VRANGESD $3, X14, X11, K4, X2 // 62d3a50c51d603 + VRANGESD $3, X22, X2, K4, X8 // 6233ed0c51c603 + VRANGESD $3, X5, X2, K4, X8 // 6273ed0c51c503 + VRANGESD $3, X14, X2, K4, X8 // 6253ed0c51c603 + VRANGESD $3, X22, X31, K4, X8 // 6233850451c603 + VRANGESD $3, X5, X31, K4, X8 // 6273850451c503 + VRANGESD $3, X14, X31, K4, X8 // 6253850451c603 + VRANGESD $3, X22, X11, K4, X8 // 6233a50c51c603 + VRANGESD $3, X5, X11, K4, X8 // 6273a50c51c503 + VRANGESD $3, X14, X11, K4, X8 // 6253a50c51c603 + VRANGESD $3, X22, X2, K4, X9 // 6233ed0c51ce03 + VRANGESD $3, X5, X2, K4, X9 // 6273ed0c51cd03 + VRANGESD $3, X14, X2, K4, X9 // 6253ed0c51ce03 + VRANGESD $3, X22, X31, K4, X9 // 6233850451ce03 + VRANGESD $3, X5, X31, K4, X9 // 6273850451cd03 + VRANGESD $3, X14, X31, K4, X9 // 6253850451ce03 + VRANGESD $3, X22, X11, K4, X9 // 6233a50c51ce03 + VRANGESD $3, X5, X11, K4, X9 // 6273a50c51cd03 + VRANGESD $3, X14, X11, K4, X9 // 6253a50c51ce03 + VRANGESD $4, X18, X15, K1, X0 // 62b3850951c204 or 62b3852951c204 or 62b3854951c204 + VRANGESD $4, X8, X15, K1, X0 // 62d3850951c004 or 62d3852951c004 or 62d3854951c004 + VRANGESD $4, X27, X15, K1, X0 // 6293850951c304 or 6293852951c304 or 6293854951c304 + VRANGESD $4, 7(AX)(CX*4), X15, K1, X0 // 62f385095184880700000004 or 62f385295184880700000004 or 62f385495184880700000004 + VRANGESD $4, 7(AX)(CX*1), X15, K1, X0 // 62f385095184080700000004 or 62f385295184080700000004 or 62f385495184080700000004 + VRANGESD $4, X18, X11, K1, X0 // 62b3a50951c204 or 62b3a52951c204 or 62b3a54951c204 + VRANGESD $4, X8, X11, K1, X0 // 62d3a50951c004 or 62d3a52951c004 or 62d3a54951c004 + VRANGESD $4, X27, X11, K1, X0 // 6293a50951c304 or 6293a52951c304 or 6293a54951c304 + VRANGESD $4, 7(AX)(CX*4), X11, K1, X0 // 62f3a5095184880700000004 or 62f3a5295184880700000004 or 62f3a5495184880700000004 + VRANGESD $4, 7(AX)(CX*1), X11, K1, X0 // 62f3a5095184080700000004 or 62f3a5295184080700000004 or 62f3a5495184080700000004 + VRANGESD $4, X18, X0, K1, X0 // 62b3fd0951c204 or 62b3fd2951c204 or 62b3fd4951c204 + VRANGESD $4, X8, X0, K1, X0 // 62d3fd0951c004 or 62d3fd2951c004 or 62d3fd4951c004 + VRANGESD $4, X27, X0, K1, X0 // 6293fd0951c304 or 6293fd2951c304 or 6293fd4951c304 + VRANGESD $4, 7(AX)(CX*4), X0, K1, X0 // 62f3fd095184880700000004 or 62f3fd295184880700000004 or 62f3fd495184880700000004 + VRANGESD $4, 7(AX)(CX*1), X0, K1, X0 // 62f3fd095184080700000004 or 62f3fd295184080700000004 or 62f3fd495184080700000004 + VRANGESD $4, X18, X15, K1, X17 // 62a3850951ca04 or 62a3852951ca04 or 62a3854951ca04 + VRANGESD $4, X8, X15, K1, X17 // 62c3850951c804 or 62c3852951c804 or 62c3854951c804 + VRANGESD $4, X27, X15, K1, X17 // 6283850951cb04 or 6283852951cb04 or 6283854951cb04 + VRANGESD $4, 7(AX)(CX*4), X15, K1, X17 // 62e38509518c880700000004 or 62e38529518c880700000004 or 62e38549518c880700000004 + VRANGESD $4, 7(AX)(CX*1), X15, K1, X17 // 62e38509518c080700000004 or 62e38529518c080700000004 or 62e38549518c080700000004 + VRANGESD $4, X18, X11, K1, X17 // 62a3a50951ca04 or 62a3a52951ca04 or 62a3a54951ca04 + VRANGESD $4, X8, X11, K1, X17 // 62c3a50951c804 or 62c3a52951c804 or 62c3a54951c804 + VRANGESD $4, X27, X11, K1, X17 // 6283a50951cb04 or 6283a52951cb04 or 6283a54951cb04 + VRANGESD $4, 7(AX)(CX*4), X11, K1, X17 // 62e3a509518c880700000004 or 62e3a529518c880700000004 or 62e3a549518c880700000004 + VRANGESD $4, 7(AX)(CX*1), X11, K1, X17 // 62e3a509518c080700000004 or 62e3a529518c080700000004 or 62e3a549518c080700000004 + VRANGESD $4, X18, X0, K1, X17 // 62a3fd0951ca04 or 62a3fd2951ca04 or 62a3fd4951ca04 + VRANGESD $4, X8, X0, K1, X17 // 62c3fd0951c804 or 62c3fd2951c804 or 62c3fd4951c804 + VRANGESD $4, X27, X0, K1, X17 // 6283fd0951cb04 or 6283fd2951cb04 or 6283fd4951cb04 + VRANGESD $4, 7(AX)(CX*4), X0, K1, X17 // 62e3fd09518c880700000004 or 62e3fd29518c880700000004 or 62e3fd49518c880700000004 + VRANGESD $4, 7(AX)(CX*1), X0, K1, X17 // 62e3fd09518c080700000004 or 62e3fd29518c080700000004 or 62e3fd49518c080700000004 + VRANGESD $4, X18, X15, K1, X7 // 62b3850951fa04 or 62b3852951fa04 or 62b3854951fa04 + VRANGESD $4, X8, X15, K1, X7 // 62d3850951f804 or 62d3852951f804 or 62d3854951f804 + VRANGESD $4, X27, X15, K1, X7 // 6293850951fb04 or 6293852951fb04 or 6293854951fb04 + VRANGESD $4, 7(AX)(CX*4), X15, K1, X7 // 62f3850951bc880700000004 or 62f3852951bc880700000004 or 62f3854951bc880700000004 + VRANGESD $4, 7(AX)(CX*1), X15, K1, X7 // 62f3850951bc080700000004 or 62f3852951bc080700000004 or 62f3854951bc080700000004 + VRANGESD $4, X18, X11, K1, X7 // 62b3a50951fa04 or 62b3a52951fa04 or 62b3a54951fa04 + VRANGESD $4, X8, X11, K1, X7 // 62d3a50951f804 or 62d3a52951f804 or 62d3a54951f804 + VRANGESD $4, X27, X11, K1, X7 // 6293a50951fb04 or 6293a52951fb04 or 6293a54951fb04 + VRANGESD $4, 7(AX)(CX*4), X11, K1, X7 // 62f3a50951bc880700000004 or 62f3a52951bc880700000004 or 62f3a54951bc880700000004 + VRANGESD $4, 7(AX)(CX*1), X11, K1, X7 // 62f3a50951bc080700000004 or 62f3a52951bc080700000004 or 62f3a54951bc080700000004 + VRANGESD $4, X18, X0, K1, X7 // 62b3fd0951fa04 or 62b3fd2951fa04 or 62b3fd4951fa04 + VRANGESD $4, X8, X0, K1, X7 // 62d3fd0951f804 or 62d3fd2951f804 or 62d3fd4951f804 + VRANGESD $4, X27, X0, K1, X7 // 6293fd0951fb04 or 6293fd2951fb04 or 6293fd4951fb04 + VRANGESD $4, 7(AX)(CX*4), X0, K1, X7 // 62f3fd0951bc880700000004 or 62f3fd2951bc880700000004 or 62f3fd4951bc880700000004 + VRANGESD $4, 7(AX)(CX*1), X0, K1, X7 // 62f3fd0951bc080700000004 or 62f3fd2951bc080700000004 or 62f3fd4951bc080700000004 + VRANGESS $5, X7, X15, K3, X25 // 6263050b51cf05 + VRANGESS $5, X13, X15, K3, X25 // 6243050b51cd05 + VRANGESS $5, X8, X15, K3, X25 // 6243050b51c805 + VRANGESS $5, X7, X28, K3, X25 // 62631d0351cf05 + VRANGESS $5, X13, X28, K3, X25 // 62431d0351cd05 + VRANGESS $5, X8, X28, K3, X25 // 62431d0351c805 + VRANGESS $5, X7, X15, K3, X3 // 62f3050b51df05 + VRANGESS $5, X13, X15, K3, X3 // 62d3050b51dd05 + VRANGESS $5, X8, X15, K3, X3 // 62d3050b51d805 + VRANGESS $5, X7, X28, K3, X3 // 62f31d0351df05 + VRANGESS $5, X13, X28, K3, X3 // 62d31d0351dd05 + VRANGESS $5, X8, X28, K3, X3 // 62d31d0351d805 + VRANGESS $5, X7, X15, K3, X18 // 62e3050b51d705 + VRANGESS $5, X13, X15, K3, X18 // 62c3050b51d505 + VRANGESS $5, X8, X15, K3, X18 // 62c3050b51d005 + VRANGESS $5, X7, X28, K3, X18 // 62e31d0351d705 + VRANGESS $5, X13, X28, K3, X18 // 62c31d0351d505 + VRANGESS $5, X8, X28, K3, X18 // 62c31d0351d005 + VRANGESS $6, X6, X22, K4, X24 // 62634d0451c606 or 62634d2451c606 or 62634d4451c606 + VRANGESS $6, X7, X22, K4, X24 // 62634d0451c706 or 62634d2451c706 or 62634d4451c706 + VRANGESS $6, X8, X22, K4, X24 // 62434d0451c006 or 62434d2451c006 or 62434d4451c006 + VRANGESS $6, 7(SI)(DI*1), X22, K4, X24 // 62634d0451843e0700000006 or 62634d2451843e0700000006 or 62634d4451843e0700000006 + VRANGESS $6, 15(DX)(BX*8), X22, K4, X24 // 62634d045184da0f00000006 or 62634d245184da0f00000006 or 62634d445184da0f00000006 + VRANGESS $6, X6, X1, K4, X24 // 6263750c51c606 or 6263752c51c606 or 6263754c51c606 + VRANGESS $6, X7, X1, K4, X24 // 6263750c51c706 or 6263752c51c706 or 6263754c51c706 + VRANGESS $6, X8, X1, K4, X24 // 6243750c51c006 or 6243752c51c006 or 6243754c51c006 + VRANGESS $6, 7(SI)(DI*1), X1, K4, X24 // 6263750c51843e0700000006 or 6263752c51843e0700000006 or 6263754c51843e0700000006 + VRANGESS $6, 15(DX)(BX*8), X1, K4, X24 // 6263750c5184da0f00000006 or 6263752c5184da0f00000006 or 6263754c5184da0f00000006 + VRANGESS $6, X6, X11, K4, X24 // 6263250c51c606 or 6263252c51c606 or 6263254c51c606 + VRANGESS $6, X7, X11, K4, X24 // 6263250c51c706 or 6263252c51c706 or 6263254c51c706 + VRANGESS $6, X8, X11, K4, X24 // 6243250c51c006 or 6243252c51c006 or 6243254c51c006 + VRANGESS $6, 7(SI)(DI*1), X11, K4, X24 // 6263250c51843e0700000006 or 6263252c51843e0700000006 or 6263254c51843e0700000006 + VRANGESS $6, 15(DX)(BX*8), X11, K4, X24 // 6263250c5184da0f00000006 or 6263252c5184da0f00000006 or 6263254c5184da0f00000006 + VRANGESS $6, X6, X22, K4, X7 // 62f34d0451fe06 or 62f34d2451fe06 or 62f34d4451fe06 + VRANGESS $6, X7, X22, K4, X7 // 62f34d0451ff06 or 62f34d2451ff06 or 62f34d4451ff06 + VRANGESS $6, X8, X22, K4, X7 // 62d34d0451f806 or 62d34d2451f806 or 62d34d4451f806 + VRANGESS $6, 7(SI)(DI*1), X22, K4, X7 // 62f34d0451bc3e0700000006 or 62f34d2451bc3e0700000006 or 62f34d4451bc3e0700000006 + VRANGESS $6, 15(DX)(BX*8), X22, K4, X7 // 62f34d0451bcda0f00000006 or 62f34d2451bcda0f00000006 or 62f34d4451bcda0f00000006 + VRANGESS $6, X6, X1, K4, X7 // 62f3750c51fe06 or 62f3752c51fe06 or 62f3754c51fe06 + VRANGESS $6, X7, X1, K4, X7 // 62f3750c51ff06 or 62f3752c51ff06 or 62f3754c51ff06 + VRANGESS $6, X8, X1, K4, X7 // 62d3750c51f806 or 62d3752c51f806 or 62d3754c51f806 + VRANGESS $6, 7(SI)(DI*1), X1, K4, X7 // 62f3750c51bc3e0700000006 or 62f3752c51bc3e0700000006 or 62f3754c51bc3e0700000006 + VRANGESS $6, 15(DX)(BX*8), X1, K4, X7 // 62f3750c51bcda0f00000006 or 62f3752c51bcda0f00000006 or 62f3754c51bcda0f00000006 + VRANGESS $6, X6, X11, K4, X7 // 62f3250c51fe06 or 62f3252c51fe06 or 62f3254c51fe06 + VRANGESS $6, X7, X11, K4, X7 // 62f3250c51ff06 or 62f3252c51ff06 or 62f3254c51ff06 + VRANGESS $6, X8, X11, K4, X7 // 62d3250c51f806 or 62d3252c51f806 or 62d3254c51f806 + VRANGESS $6, 7(SI)(DI*1), X11, K4, X7 // 62f3250c51bc3e0700000006 or 62f3252c51bc3e0700000006 or 62f3254c51bc3e0700000006 + VRANGESS $6, 15(DX)(BX*8), X11, K4, X7 // 62f3250c51bcda0f00000006 or 62f3252c51bcda0f00000006 or 62f3254c51bcda0f00000006 + VRANGESS $6, X6, X22, K4, X0 // 62f34d0451c606 or 62f34d2451c606 or 62f34d4451c606 + VRANGESS $6, X7, X22, K4, X0 // 62f34d0451c706 or 62f34d2451c706 or 62f34d4451c706 + VRANGESS $6, X8, X22, K4, X0 // 62d34d0451c006 or 62d34d2451c006 or 62d34d4451c006 + VRANGESS $6, 7(SI)(DI*1), X22, K4, X0 // 62f34d0451843e0700000006 or 62f34d2451843e0700000006 or 62f34d4451843e0700000006 + VRANGESS $6, 15(DX)(BX*8), X22, K4, X0 // 62f34d045184da0f00000006 or 62f34d245184da0f00000006 or 62f34d445184da0f00000006 + VRANGESS $6, X6, X1, K4, X0 // 62f3750c51c606 or 62f3752c51c606 or 62f3754c51c606 + VRANGESS $6, X7, X1, K4, X0 // 62f3750c51c706 or 62f3752c51c706 or 62f3754c51c706 + VRANGESS $6, X8, X1, K4, X0 // 62d3750c51c006 or 62d3752c51c006 or 62d3754c51c006 + VRANGESS $6, 7(SI)(DI*1), X1, K4, X0 // 62f3750c51843e0700000006 or 62f3752c51843e0700000006 or 62f3754c51843e0700000006 + VRANGESS $6, 15(DX)(BX*8), X1, K4, X0 // 62f3750c5184da0f00000006 or 62f3752c5184da0f00000006 or 62f3754c5184da0f00000006 + VRANGESS $6, X6, X11, K4, X0 // 62f3250c51c606 or 62f3252c51c606 or 62f3254c51c606 + VRANGESS $6, X7, X11, K4, X0 // 62f3250c51c706 or 62f3252c51c706 or 62f3254c51c706 + VRANGESS $6, X8, X11, K4, X0 // 62d3250c51c006 or 62d3252c51c006 or 62d3254c51c006 + VRANGESS $6, 7(SI)(DI*1), X11, K4, X0 // 62f3250c51843e0700000006 or 62f3252c51843e0700000006 or 62f3254c51843e0700000006 + VRANGESS $6, 15(DX)(BX*8), X11, K4, X0 // 62f3250c5184da0f00000006 or 62f3252c5184da0f00000006 or 62f3254c5184da0f00000006 + VREDUCEPD $126, X8, K3, X31 // 6243fd0b56f87e + VREDUCEPD $126, X1, K3, X31 // 6263fd0b56f97e + VREDUCEPD $126, X0, K3, X31 // 6263fd0b56f87e + VREDUCEPD $126, 99(R15)(R15*1), K3, X31 // 6203fd0b56bc3f630000007e + VREDUCEPD $126, (DX), K3, X31 // 6263fd0b563a7e + VREDUCEPD $126, X8, K3, X16 // 62c3fd0b56c07e + VREDUCEPD $126, X1, K3, X16 // 62e3fd0b56c17e + VREDUCEPD $126, X0, K3, X16 // 62e3fd0b56c07e + VREDUCEPD $126, 99(R15)(R15*1), K3, X16 // 6283fd0b56843f630000007e + VREDUCEPD $126, (DX), K3, X16 // 62e3fd0b56027e + VREDUCEPD $126, X8, K3, X7 // 62d3fd0b56f87e + VREDUCEPD $126, X1, K3, X7 // 62f3fd0b56f97e + VREDUCEPD $126, X0, K3, X7 // 62f3fd0b56f87e + VREDUCEPD $126, 99(R15)(R15*1), K3, X7 // 6293fd0b56bc3f630000007e + VREDUCEPD $126, (DX), K3, X7 // 62f3fd0b563a7e + VREDUCEPD $94, Y0, K3, Y5 // 62f3fd2b56e85e + VREDUCEPD $94, Y22, K3, Y5 // 62b3fd2b56ee5e + VREDUCEPD $94, Y13, K3, Y5 // 62d3fd2b56ed5e + VREDUCEPD $94, (R14), K3, Y5 // 62d3fd2b562e5e + VREDUCEPD $94, -7(DI)(R8*8), K3, Y5 // 62b3fd2b56acc7f9ffffff5e + VREDUCEPD $94, Y0, K3, Y28 // 6263fd2b56e05e + VREDUCEPD $94, Y22, K3, Y28 // 6223fd2b56e65e + VREDUCEPD $94, Y13, K3, Y28 // 6243fd2b56e55e + VREDUCEPD $94, (R14), K3, Y28 // 6243fd2b56265e + VREDUCEPD $94, -7(DI)(R8*8), K3, Y28 // 6223fd2b56a4c7f9ffffff5e + VREDUCEPD $94, Y0, K3, Y7 // 62f3fd2b56f85e + VREDUCEPD $94, Y22, K3, Y7 // 62b3fd2b56fe5e + VREDUCEPD $94, Y13, K3, Y7 // 62d3fd2b56fd5e + VREDUCEPD $94, (R14), K3, Y7 // 62d3fd2b563e5e + VREDUCEPD $94, -7(DI)(R8*8), K3, Y7 // 62b3fd2b56bcc7f9ffffff5e + VREDUCEPD $121, Z3, K2, Z26 // 6263fd4a56d379 + VREDUCEPD $121, Z0, K2, Z26 // 6263fd4a56d079 + VREDUCEPD $121, Z3, K2, Z3 // 62f3fd4a56db79 + VREDUCEPD $121, Z0, K2, Z3 // 62f3fd4a56d879 + VREDUCEPD $13, Z11, K1, Z21 // 62c3fd4956eb0d + VREDUCEPD $13, Z25, K1, Z21 // 6283fd4956e90d + VREDUCEPD $13, -17(BP), K1, Z21 // 62e3fd4956adefffffff0d + VREDUCEPD $13, -15(R14)(R15*8), K1, Z21 // 6283fd4956acfef1ffffff0d + VREDUCEPD $13, Z11, K1, Z13 // 6253fd4956eb0d + VREDUCEPD $13, Z25, K1, Z13 // 6213fd4956e90d + VREDUCEPD $13, -17(BP), K1, Z13 // 6273fd4956adefffffff0d + VREDUCEPD $13, -15(R14)(R15*8), K1, Z13 // 6213fd4956acfef1ffffff0d + VREDUCEPS $65, X21, K2, X15 // 62337d0a56fd41 + VREDUCEPS $65, X0, K2, X15 // 62737d0a56f841 + VREDUCEPS $65, X28, K2, X15 // 62137d0a56fc41 + VREDUCEPS $65, -17(BP)(SI*8), K2, X15 // 62737d0a56bcf5efffffff41 + VREDUCEPS $65, (R15), K2, X15 // 62537d0a563f41 + VREDUCEPS $65, X21, K2, X0 // 62b37d0a56c541 + VREDUCEPS $65, X0, K2, X0 // 62f37d0a56c041 + VREDUCEPS $65, X28, K2, X0 // 62937d0a56c441 + VREDUCEPS $65, -17(BP)(SI*8), K2, X0 // 62f37d0a5684f5efffffff41 + VREDUCEPS $65, (R15), K2, X0 // 62d37d0a560741 + VREDUCEPS $65, X21, K2, X16 // 62a37d0a56c541 + VREDUCEPS $65, X0, K2, X16 // 62e37d0a56c041 + VREDUCEPS $65, X28, K2, X16 // 62837d0a56c441 + VREDUCEPS $65, -17(BP)(SI*8), K2, X16 // 62e37d0a5684f5efffffff41 + VREDUCEPS $65, (R15), K2, X16 // 62c37d0a560741 + VREDUCEPS $67, Y17, K1, Y12 // 62337d2956e143 + VREDUCEPS $67, Y7, K1, Y12 // 62737d2956e743 + VREDUCEPS $67, Y9, K1, Y12 // 62537d2956e143 + VREDUCEPS $67, 99(R15)(R15*4), K1, Y12 // 62137d2956a4bf6300000043 + VREDUCEPS $67, 15(DX), K1, Y12 // 62737d2956a20f00000043 + VREDUCEPS $67, Y17, K1, Y1 // 62b37d2956c943 + VREDUCEPS $67, Y7, K1, Y1 // 62f37d2956cf43 + VREDUCEPS $67, Y9, K1, Y1 // 62d37d2956c943 + VREDUCEPS $67, 99(R15)(R15*4), K1, Y1 // 62937d29568cbf6300000043 + VREDUCEPS $67, 15(DX), K1, Y1 // 62f37d29568a0f00000043 + VREDUCEPS $67, Y17, K1, Y14 // 62337d2956f143 + VREDUCEPS $67, Y7, K1, Y14 // 62737d2956f743 + VREDUCEPS $67, Y9, K1, Y14 // 62537d2956f143 + VREDUCEPS $67, 99(R15)(R15*4), K1, Y14 // 62137d2956b4bf6300000043 + VREDUCEPS $67, 15(DX), K1, Y14 // 62737d2956b20f00000043 + VREDUCEPS $127, Z27, K7, Z3 // 62937d4f56db7f + VREDUCEPS $127, Z15, K7, Z3 // 62d37d4f56df7f + VREDUCEPS $127, Z27, K7, Z12 // 62137d4f56e37f + VREDUCEPS $127, Z15, K7, Z12 // 62537d4f56e77f + VREDUCEPS $0, Z23, K1, Z23 // 62a37d4956ff00 + VREDUCEPS $0, Z6, K1, Z23 // 62e37d4956fe00 + VREDUCEPS $0, 17(SP)(BP*2), K1, Z23 // 62e37d4956bc6c1100000000 + VREDUCEPS $0, -7(DI)(R8*4), K1, Z23 // 62a37d4956bc87f9ffffff00 + VREDUCEPS $0, Z23, K1, Z5 // 62b37d4956ef00 + VREDUCEPS $0, Z6, K1, Z5 // 62f37d4956ee00 + VREDUCEPS $0, 17(SP)(BP*2), K1, Z5 // 62f37d4956ac6c1100000000 + VREDUCEPS $0, -7(DI)(R8*4), K1, Z5 // 62b37d4956ac87f9ffffff00 + VREDUCESD $97, X1, X7, K1, X22 // 62e3c50957f161 + VREDUCESD $97, X7, X7, K1, X22 // 62e3c50957f761 + VREDUCESD $97, X9, X7, K1, X22 // 62c3c50957f161 + VREDUCESD $97, X1, X16, K1, X22 // 62e3fd0157f161 + VREDUCESD $97, X7, X16, K1, X22 // 62e3fd0157f761 + VREDUCESD $97, X9, X16, K1, X22 // 62c3fd0157f161 + VREDUCESD $97, X1, X31, K1, X22 // 62e3850157f161 + VREDUCESD $97, X7, X31, K1, X22 // 62e3850157f761 + VREDUCESD $97, X9, X31, K1, X22 // 62c3850157f161 + VREDUCESD $97, X1, X7, K1, X7 // 62f3c50957f961 + VREDUCESD $97, X7, X7, K1, X7 // 62f3c50957ff61 + VREDUCESD $97, X9, X7, K1, X7 // 62d3c50957f961 + VREDUCESD $97, X1, X16, K1, X7 // 62f3fd0157f961 + VREDUCESD $97, X7, X16, K1, X7 // 62f3fd0157ff61 + VREDUCESD $97, X9, X16, K1, X7 // 62d3fd0157f961 + VREDUCESD $97, X1, X31, K1, X7 // 62f3850157f961 + VREDUCESD $97, X7, X31, K1, X7 // 62f3850157ff61 + VREDUCESD $97, X9, X31, K1, X7 // 62d3850157f961 + VREDUCESD $97, X1, X7, K1, X19 // 62e3c50957d961 + VREDUCESD $97, X7, X7, K1, X19 // 62e3c50957df61 + VREDUCESD $97, X9, X7, K1, X19 // 62c3c50957d961 + VREDUCESD $97, X1, X16, K1, X19 // 62e3fd0157d961 + VREDUCESD $97, X7, X16, K1, X19 // 62e3fd0157df61 + VREDUCESD $97, X9, X16, K1, X19 // 62c3fd0157d961 + VREDUCESD $97, X1, X31, K1, X19 // 62e3850157d961 + VREDUCESD $97, X7, X31, K1, X19 // 62e3850157df61 + VREDUCESD $97, X9, X31, K1, X19 // 62c3850157d961 + VREDUCESD $81, X17, X12, K1, X15 // 62339d0957f951 or 62339d2957f951 or 62339d4957f951 + VREDUCESD $81, X15, X12, K1, X15 // 62539d0957ff51 or 62539d2957ff51 or 62539d4957ff51 + VREDUCESD $81, X8, X12, K1, X15 // 62539d0957f851 or 62539d2957f851 or 62539d4957f851 + VREDUCESD $81, 7(SI)(DI*4), X12, K1, X15 // 62739d0957bcbe0700000051 or 62739d2957bcbe0700000051 or 62739d4957bcbe0700000051 + VREDUCESD $81, -7(DI)(R8*2), X12, K1, X15 // 62339d0957bc47f9ffffff51 or 62339d2957bc47f9ffffff51 or 62339d4957bc47f9ffffff51 + VREDUCESD $81, X17, X14, K1, X15 // 62338d0957f951 or 62338d2957f951 or 62338d4957f951 + VREDUCESD $81, X15, X14, K1, X15 // 62538d0957ff51 or 62538d2957ff51 or 62538d4957ff51 + VREDUCESD $81, X8, X14, K1, X15 // 62538d0957f851 or 62538d2957f851 or 62538d4957f851 + VREDUCESD $81, 7(SI)(DI*4), X14, K1, X15 // 62738d0957bcbe0700000051 or 62738d2957bcbe0700000051 or 62738d4957bcbe0700000051 + VREDUCESD $81, -7(DI)(R8*2), X14, K1, X15 // 62338d0957bc47f9ffffff51 or 62338d2957bc47f9ffffff51 or 62338d4957bc47f9ffffff51 + VREDUCESD $81, X17, X5, K1, X15 // 6233d50957f951 or 6233d52957f951 or 6233d54957f951 + VREDUCESD $81, X15, X5, K1, X15 // 6253d50957ff51 or 6253d52957ff51 or 6253d54957ff51 + VREDUCESD $81, X8, X5, K1, X15 // 6253d50957f851 or 6253d52957f851 or 6253d54957f851 + VREDUCESD $81, 7(SI)(DI*4), X5, K1, X15 // 6273d50957bcbe0700000051 or 6273d52957bcbe0700000051 or 6273d54957bcbe0700000051 + VREDUCESD $81, -7(DI)(R8*2), X5, K1, X15 // 6233d50957bc47f9ffffff51 or 6233d52957bc47f9ffffff51 or 6233d54957bc47f9ffffff51 + VREDUCESD $81, X17, X12, K1, X12 // 62339d0957e151 or 62339d2957e151 or 62339d4957e151 + VREDUCESD $81, X15, X12, K1, X12 // 62539d0957e751 or 62539d2957e751 or 62539d4957e751 + VREDUCESD $81, X8, X12, K1, X12 // 62539d0957e051 or 62539d2957e051 or 62539d4957e051 + VREDUCESD $81, 7(SI)(DI*4), X12, K1, X12 // 62739d0957a4be0700000051 or 62739d2957a4be0700000051 or 62739d4957a4be0700000051 + VREDUCESD $81, -7(DI)(R8*2), X12, K1, X12 // 62339d0957a447f9ffffff51 or 62339d2957a447f9ffffff51 or 62339d4957a447f9ffffff51 + VREDUCESD $81, X17, X14, K1, X12 // 62338d0957e151 or 62338d2957e151 or 62338d4957e151 + VREDUCESD $81, X15, X14, K1, X12 // 62538d0957e751 or 62538d2957e751 or 62538d4957e751 + VREDUCESD $81, X8, X14, K1, X12 // 62538d0957e051 or 62538d2957e051 or 62538d4957e051 + VREDUCESD $81, 7(SI)(DI*4), X14, K1, X12 // 62738d0957a4be0700000051 or 62738d2957a4be0700000051 or 62738d4957a4be0700000051 + VREDUCESD $81, -7(DI)(R8*2), X14, K1, X12 // 62338d0957a447f9ffffff51 or 62338d2957a447f9ffffff51 or 62338d4957a447f9ffffff51 + VREDUCESD $81, X17, X5, K1, X12 // 6233d50957e151 or 6233d52957e151 or 6233d54957e151 + VREDUCESD $81, X15, X5, K1, X12 // 6253d50957e751 or 6253d52957e751 or 6253d54957e751 + VREDUCESD $81, X8, X5, K1, X12 // 6253d50957e051 or 6253d52957e051 or 6253d54957e051 + VREDUCESD $81, 7(SI)(DI*4), X5, K1, X12 // 6273d50957a4be0700000051 or 6273d52957a4be0700000051 or 6273d54957a4be0700000051 + VREDUCESD $81, -7(DI)(R8*2), X5, K1, X12 // 6233d50957a447f9ffffff51 or 6233d52957a447f9ffffff51 or 6233d54957a447f9ffffff51 + VREDUCESD $81, X17, X12, K1, X0 // 62b39d0957c151 or 62b39d2957c151 or 62b39d4957c151 + VREDUCESD $81, X15, X12, K1, X0 // 62d39d0957c751 or 62d39d2957c751 or 62d39d4957c751 + VREDUCESD $81, X8, X12, K1, X0 // 62d39d0957c051 or 62d39d2957c051 or 62d39d4957c051 + VREDUCESD $81, 7(SI)(DI*4), X12, K1, X0 // 62f39d095784be0700000051 or 62f39d295784be0700000051 or 62f39d495784be0700000051 + VREDUCESD $81, -7(DI)(R8*2), X12, K1, X0 // 62b39d09578447f9ffffff51 or 62b39d29578447f9ffffff51 or 62b39d49578447f9ffffff51 + VREDUCESD $81, X17, X14, K1, X0 // 62b38d0957c151 or 62b38d2957c151 or 62b38d4957c151 + VREDUCESD $81, X15, X14, K1, X0 // 62d38d0957c751 or 62d38d2957c751 or 62d38d4957c751 + VREDUCESD $81, X8, X14, K1, X0 // 62d38d0957c051 or 62d38d2957c051 or 62d38d4957c051 + VREDUCESD $81, 7(SI)(DI*4), X14, K1, X0 // 62f38d095784be0700000051 or 62f38d295784be0700000051 or 62f38d495784be0700000051 + VREDUCESD $81, -7(DI)(R8*2), X14, K1, X0 // 62b38d09578447f9ffffff51 or 62b38d29578447f9ffffff51 or 62b38d49578447f9ffffff51 + VREDUCESD $81, X17, X5, K1, X0 // 62b3d50957c151 or 62b3d52957c151 or 62b3d54957c151 + VREDUCESD $81, X15, X5, K1, X0 // 62d3d50957c751 or 62d3d52957c751 or 62d3d54957c751 + VREDUCESD $81, X8, X5, K1, X0 // 62d3d50957c051 or 62d3d52957c051 or 62d3d54957c051 + VREDUCESD $81, 7(SI)(DI*4), X5, K1, X0 // 62f3d5095784be0700000051 or 62f3d5295784be0700000051 or 62f3d5495784be0700000051 + VREDUCESD $81, -7(DI)(R8*2), X5, K1, X0 // 62b3d509578447f9ffffff51 or 62b3d529578447f9ffffff51 or 62b3d549578447f9ffffff51 + VREDUCESS $42, X9, X13, K7, X3 // 62d3150f57d92a + VREDUCESS $42, X15, X13, K7, X3 // 62d3150f57df2a + VREDUCESS $42, X26, X13, K7, X3 // 6293150f57da2a + VREDUCESS $42, X9, X28, K7, X3 // 62d31d0757d92a + VREDUCESS $42, X15, X28, K7, X3 // 62d31d0757df2a + VREDUCESS $42, X26, X28, K7, X3 // 62931d0757da2a + VREDUCESS $42, X9, X24, K7, X3 // 62d33d0757d92a + VREDUCESS $42, X15, X24, K7, X3 // 62d33d0757df2a + VREDUCESS $42, X26, X24, K7, X3 // 62933d0757da2a + VREDUCESS $42, X9, X13, K7, X26 // 6243150f57d12a + VREDUCESS $42, X15, X13, K7, X26 // 6243150f57d72a + VREDUCESS $42, X26, X13, K7, X26 // 6203150f57d22a + VREDUCESS $42, X9, X28, K7, X26 // 62431d0757d12a + VREDUCESS $42, X15, X28, K7, X26 // 62431d0757d72a + VREDUCESS $42, X26, X28, K7, X26 // 62031d0757d22a + VREDUCESS $42, X9, X24, K7, X26 // 62433d0757d12a + VREDUCESS $42, X15, X24, K7, X26 // 62433d0757d72a + VREDUCESS $42, X26, X24, K7, X26 // 62033d0757d22a + VREDUCESS $42, X9, X13, K7, X23 // 62c3150f57f92a + VREDUCESS $42, X15, X13, K7, X23 // 62c3150f57ff2a + VREDUCESS $42, X26, X13, K7, X23 // 6283150f57fa2a + VREDUCESS $42, X9, X28, K7, X23 // 62c31d0757f92a + VREDUCESS $42, X15, X28, K7, X23 // 62c31d0757ff2a + VREDUCESS $42, X26, X28, K7, X23 // 62831d0757fa2a + VREDUCESS $42, X9, X24, K7, X23 // 62c33d0757f92a + VREDUCESS $42, X15, X24, K7, X23 // 62c33d0757ff2a + VREDUCESS $42, X26, X24, K7, X23 // 62833d0757fa2a + VREDUCESS $79, X7, X11, K2, X18 // 62e3250a57d74f or 62e3252a57d74f or 62e3254a57d74f + VREDUCESS $79, X0, X11, K2, X18 // 62e3250a57d04f or 62e3252a57d04f or 62e3254a57d04f + VREDUCESS $79, 99(R15)(R15*8), X11, K2, X18 // 6283250a5794ff630000004f or 6283252a5794ff630000004f or 6283254a5794ff630000004f + VREDUCESS $79, 7(AX)(CX*8), X11, K2, X18 // 62e3250a5794c8070000004f or 62e3252a5794c8070000004f or 62e3254a5794c8070000004f + VREDUCESS $79, X7, X31, K2, X18 // 62e3050257d74f or 62e3052257d74f or 62e3054257d74f + VREDUCESS $79, X0, X31, K2, X18 // 62e3050257d04f or 62e3052257d04f or 62e3054257d04f + VREDUCESS $79, 99(R15)(R15*8), X31, K2, X18 // 628305025794ff630000004f or 628305225794ff630000004f or 628305425794ff630000004f + VREDUCESS $79, 7(AX)(CX*8), X31, K2, X18 // 62e305025794c8070000004f or 62e305225794c8070000004f or 62e305425794c8070000004f + VREDUCESS $79, X7, X3, K2, X18 // 62e3650a57d74f or 62e3652a57d74f or 62e3654a57d74f + VREDUCESS $79, X0, X3, K2, X18 // 62e3650a57d04f or 62e3652a57d04f or 62e3654a57d04f + VREDUCESS $79, 99(R15)(R15*8), X3, K2, X18 // 6283650a5794ff630000004f or 6283652a5794ff630000004f or 6283654a5794ff630000004f + VREDUCESS $79, 7(AX)(CX*8), X3, K2, X18 // 62e3650a5794c8070000004f or 62e3652a5794c8070000004f or 62e3654a5794c8070000004f + VREDUCESS $79, X7, X11, K2, X21 // 62e3250a57ef4f or 62e3252a57ef4f or 62e3254a57ef4f + VREDUCESS $79, X0, X11, K2, X21 // 62e3250a57e84f or 62e3252a57e84f or 62e3254a57e84f + VREDUCESS $79, 99(R15)(R15*8), X11, K2, X21 // 6283250a57acff630000004f or 6283252a57acff630000004f or 6283254a57acff630000004f + VREDUCESS $79, 7(AX)(CX*8), X11, K2, X21 // 62e3250a57acc8070000004f or 62e3252a57acc8070000004f or 62e3254a57acc8070000004f + VREDUCESS $79, X7, X31, K2, X21 // 62e3050257ef4f or 62e3052257ef4f or 62e3054257ef4f + VREDUCESS $79, X0, X31, K2, X21 // 62e3050257e84f or 62e3052257e84f or 62e3054257e84f + VREDUCESS $79, 99(R15)(R15*8), X31, K2, X21 // 6283050257acff630000004f or 6283052257acff630000004f or 6283054257acff630000004f + VREDUCESS $79, 7(AX)(CX*8), X31, K2, X21 // 62e3050257acc8070000004f or 62e3052257acc8070000004f or 62e3054257acc8070000004f + VREDUCESS $79, X7, X3, K2, X21 // 62e3650a57ef4f or 62e3652a57ef4f or 62e3654a57ef4f + VREDUCESS $79, X0, X3, K2, X21 // 62e3650a57e84f or 62e3652a57e84f or 62e3654a57e84f + VREDUCESS $79, 99(R15)(R15*8), X3, K2, X21 // 6283650a57acff630000004f or 6283652a57acff630000004f or 6283654a57acff630000004f + VREDUCESS $79, 7(AX)(CX*8), X3, K2, X21 // 62e3650a57acc8070000004f or 62e3652a57acc8070000004f or 62e3654a57acc8070000004f + VREDUCESS $79, X7, X11, K2, X1 // 62f3250a57cf4f or 62f3252a57cf4f or 62f3254a57cf4f + VREDUCESS $79, X0, X11, K2, X1 // 62f3250a57c84f or 62f3252a57c84f or 62f3254a57c84f + VREDUCESS $79, 99(R15)(R15*8), X11, K2, X1 // 6293250a578cff630000004f or 6293252a578cff630000004f or 6293254a578cff630000004f + VREDUCESS $79, 7(AX)(CX*8), X11, K2, X1 // 62f3250a578cc8070000004f or 62f3252a578cc8070000004f or 62f3254a578cc8070000004f + VREDUCESS $79, X7, X31, K2, X1 // 62f3050257cf4f or 62f3052257cf4f or 62f3054257cf4f + VREDUCESS $79, X0, X31, K2, X1 // 62f3050257c84f or 62f3052257c84f or 62f3054257c84f + VREDUCESS $79, 99(R15)(R15*8), X31, K2, X1 // 62930502578cff630000004f or 62930522578cff630000004f or 62930542578cff630000004f + VREDUCESS $79, 7(AX)(CX*8), X31, K2, X1 // 62f30502578cc8070000004f or 62f30522578cc8070000004f or 62f30542578cc8070000004f + VREDUCESS $79, X7, X3, K2, X1 // 62f3650a57cf4f or 62f3652a57cf4f or 62f3654a57cf4f + VREDUCESS $79, X0, X3, K2, X1 // 62f3650a57c84f or 62f3652a57c84f or 62f3654a57c84f + VREDUCESS $79, 99(R15)(R15*8), X3, K2, X1 // 6293650a578cff630000004f or 6293652a578cff630000004f or 6293654a578cff630000004f + VREDUCESS $79, 7(AX)(CX*8), X3, K2, X1 // 62f3650a578cc8070000004f or 62f3652a578cc8070000004f or 62f3654a578cc8070000004f + VXORPD X13, X3, K5, X17 // 62c1e50d57cd + VXORPD X28, X3, K5, X17 // 6281e50d57cc + VXORPD X24, X3, K5, X17 // 6281e50d57c8 + VXORPD -7(CX)(DX*1), X3, K5, X17 // 62e1e50d578c11f9ffffff + VXORPD -15(R14)(R15*4), X3, K5, X17 // 6281e50d578cbef1ffffff + VXORPD X13, X26, K5, X17 // 62c1ad0557cd + VXORPD X28, X26, K5, X17 // 6281ad0557cc + VXORPD X24, X26, K5, X17 // 6281ad0557c8 + VXORPD -7(CX)(DX*1), X26, K5, X17 // 62e1ad05578c11f9ffffff + VXORPD -15(R14)(R15*4), X26, K5, X17 // 6281ad05578cbef1ffffff + VXORPD X13, X23, K5, X17 // 62c1c50557cd + VXORPD X28, X23, K5, X17 // 6281c50557cc + VXORPD X24, X23, K5, X17 // 6281c50557c8 + VXORPD -7(CX)(DX*1), X23, K5, X17 // 62e1c505578c11f9ffffff + VXORPD -15(R14)(R15*4), X23, K5, X17 // 6281c505578cbef1ffffff + VXORPD X13, X3, K5, X15 // 6251e50d57fd + VXORPD X28, X3, K5, X15 // 6211e50d57fc + VXORPD X24, X3, K5, X15 // 6211e50d57f8 + VXORPD -7(CX)(DX*1), X3, K5, X15 // 6271e50d57bc11f9ffffff + VXORPD -15(R14)(R15*4), X3, K5, X15 // 6211e50d57bcbef1ffffff + VXORPD X13, X26, K5, X15 // 6251ad0557fd + VXORPD X28, X26, K5, X15 // 6211ad0557fc + VXORPD X24, X26, K5, X15 // 6211ad0557f8 + VXORPD -7(CX)(DX*1), X26, K5, X15 // 6271ad0557bc11f9ffffff + VXORPD -15(R14)(R15*4), X26, K5, X15 // 6211ad0557bcbef1ffffff + VXORPD X13, X23, K5, X15 // 6251c50557fd + VXORPD X28, X23, K5, X15 // 6211c50557fc + VXORPD X24, X23, K5, X15 // 6211c50557f8 + VXORPD -7(CX)(DX*1), X23, K5, X15 // 6271c50557bc11f9ffffff + VXORPD -15(R14)(R15*4), X23, K5, X15 // 6211c50557bcbef1ffffff + VXORPD X13, X3, K5, X8 // 6251e50d57c5 + VXORPD X28, X3, K5, X8 // 6211e50d57c4 + VXORPD X24, X3, K5, X8 // 6211e50d57c0 + VXORPD -7(CX)(DX*1), X3, K5, X8 // 6271e50d578411f9ffffff + VXORPD -15(R14)(R15*4), X3, K5, X8 // 6211e50d5784bef1ffffff + VXORPD X13, X26, K5, X8 // 6251ad0557c5 + VXORPD X28, X26, K5, X8 // 6211ad0557c4 + VXORPD X24, X26, K5, X8 // 6211ad0557c0 + VXORPD -7(CX)(DX*1), X26, K5, X8 // 6271ad05578411f9ffffff + VXORPD -15(R14)(R15*4), X26, K5, X8 // 6211ad055784bef1ffffff + VXORPD X13, X23, K5, X8 // 6251c50557c5 + VXORPD X28, X23, K5, X8 // 6211c50557c4 + VXORPD X24, X23, K5, X8 // 6211c50557c0 + VXORPD -7(CX)(DX*1), X23, K5, X8 // 6271c505578411f9ffffff + VXORPD -15(R14)(R15*4), X23, K5, X8 // 6211c5055784bef1ffffff + VXORPD Y5, Y20, K3, Y0 // 62f1dd2357c5 + VXORPD Y28, Y20, K3, Y0 // 6291dd2357c4 + VXORPD Y7, Y20, K3, Y0 // 62f1dd2357c7 + VXORPD -7(CX), Y20, K3, Y0 // 62f1dd235781f9ffffff + VXORPD 15(DX)(BX*4), Y20, K3, Y0 // 62f1dd2357849a0f000000 + VXORPD Y5, Y12, K3, Y0 // 62f19d2b57c5 + VXORPD Y28, Y12, K3, Y0 // 62919d2b57c4 + VXORPD Y7, Y12, K3, Y0 // 62f19d2b57c7 + VXORPD -7(CX), Y12, K3, Y0 // 62f19d2b5781f9ffffff + VXORPD 15(DX)(BX*4), Y12, K3, Y0 // 62f19d2b57849a0f000000 + VXORPD Y5, Y3, K3, Y0 // 62f1e52b57c5 + VXORPD Y28, Y3, K3, Y0 // 6291e52b57c4 + VXORPD Y7, Y3, K3, Y0 // 62f1e52b57c7 + VXORPD -7(CX), Y3, K3, Y0 // 62f1e52b5781f9ffffff + VXORPD 15(DX)(BX*4), Y3, K3, Y0 // 62f1e52b57849a0f000000 + VXORPD Y5, Y20, K3, Y3 // 62f1dd2357dd + VXORPD Y28, Y20, K3, Y3 // 6291dd2357dc + VXORPD Y7, Y20, K3, Y3 // 62f1dd2357df + VXORPD -7(CX), Y20, K3, Y3 // 62f1dd235799f9ffffff + VXORPD 15(DX)(BX*4), Y20, K3, Y3 // 62f1dd23579c9a0f000000 + VXORPD Y5, Y12, K3, Y3 // 62f19d2b57dd + VXORPD Y28, Y12, K3, Y3 // 62919d2b57dc + VXORPD Y7, Y12, K3, Y3 // 62f19d2b57df + VXORPD -7(CX), Y12, K3, Y3 // 62f19d2b5799f9ffffff + VXORPD 15(DX)(BX*4), Y12, K3, Y3 // 62f19d2b579c9a0f000000 + VXORPD Y5, Y3, K3, Y3 // 62f1e52b57dd + VXORPD Y28, Y3, K3, Y3 // 6291e52b57dc + VXORPD Y7, Y3, K3, Y3 // 62f1e52b57df + VXORPD -7(CX), Y3, K3, Y3 // 62f1e52b5799f9ffffff + VXORPD 15(DX)(BX*4), Y3, K3, Y3 // 62f1e52b579c9a0f000000 + VXORPD Y5, Y20, K3, Y5 // 62f1dd2357ed + VXORPD Y28, Y20, K3, Y5 // 6291dd2357ec + VXORPD Y7, Y20, K3, Y5 // 62f1dd2357ef + VXORPD -7(CX), Y20, K3, Y5 // 62f1dd2357a9f9ffffff + VXORPD 15(DX)(BX*4), Y20, K3, Y5 // 62f1dd2357ac9a0f000000 + VXORPD Y5, Y12, K3, Y5 // 62f19d2b57ed + VXORPD Y28, Y12, K3, Y5 // 62919d2b57ec + VXORPD Y7, Y12, K3, Y5 // 62f19d2b57ef + VXORPD -7(CX), Y12, K3, Y5 // 62f19d2b57a9f9ffffff + VXORPD 15(DX)(BX*4), Y12, K3, Y5 // 62f19d2b57ac9a0f000000 + VXORPD Y5, Y3, K3, Y5 // 62f1e52b57ed + VXORPD Y28, Y3, K3, Y5 // 6291e52b57ec + VXORPD Y7, Y3, K3, Y5 // 62f1e52b57ef + VXORPD -7(CX), Y3, K3, Y5 // 62f1e52b57a9f9ffffff + VXORPD 15(DX)(BX*4), Y3, K3, Y5 // 62f1e52b57ac9a0f000000 + VXORPD Z13, Z28, K4, Z26 // 62419d4457d5 + VXORPD Z21, Z28, K4, Z26 // 62219d4457d5 + VXORPD 15(R8)(R14*1), Z28, K4, Z26 // 62019d445794300f000000 + VXORPD 15(R8)(R14*2), Z28, K4, Z26 // 62019d445794700f000000 + VXORPD Z13, Z6, K4, Z26 // 6241cd4c57d5 + VXORPD Z21, Z6, K4, Z26 // 6221cd4c57d5 + VXORPD 15(R8)(R14*1), Z6, K4, Z26 // 6201cd4c5794300f000000 + VXORPD 15(R8)(R14*2), Z6, K4, Z26 // 6201cd4c5794700f000000 + VXORPD Z13, Z28, K4, Z14 // 62519d4457f5 + VXORPD Z21, Z28, K4, Z14 // 62319d4457f5 + VXORPD 15(R8)(R14*1), Z28, K4, Z14 // 62119d4457b4300f000000 + VXORPD 15(R8)(R14*2), Z28, K4, Z14 // 62119d4457b4700f000000 + VXORPD Z13, Z6, K4, Z14 // 6251cd4c57f5 + VXORPD Z21, Z6, K4, Z14 // 6231cd4c57f5 + VXORPD 15(R8)(R14*1), Z6, K4, Z14 // 6211cd4c57b4300f000000 + VXORPD 15(R8)(R14*2), Z6, K4, Z14 // 6211cd4c57b4700f000000 + VXORPS X11, X18, K2, X9 // 62516c0257cb + VXORPS X31, X18, K2, X9 // 62116c0257cf + VXORPS X3, X18, K2, X9 // 62716c0257cb + VXORPS 15(DX)(BX*1), X18, K2, X9 // 62716c02578c1a0f000000 + VXORPS -7(CX)(DX*2), X18, K2, X9 // 62716c02578c51f9ffffff + VXORPS X11, X21, K2, X9 // 6251540257cb + VXORPS X31, X21, K2, X9 // 6211540257cf + VXORPS X3, X21, K2, X9 // 6271540257cb + VXORPS 15(DX)(BX*1), X21, K2, X9 // 62715402578c1a0f000000 + VXORPS -7(CX)(DX*2), X21, K2, X9 // 62715402578c51f9ffffff + VXORPS X11, X1, K2, X9 // 6251740a57cb + VXORPS X31, X1, K2, X9 // 6211740a57cf + VXORPS X3, X1, K2, X9 // 6271740a57cb + VXORPS 15(DX)(BX*1), X1, K2, X9 // 6271740a578c1a0f000000 + VXORPS -7(CX)(DX*2), X1, K2, X9 // 6271740a578c51f9ffffff + VXORPS X11, X18, K2, X15 // 62516c0257fb + VXORPS X31, X18, K2, X15 // 62116c0257ff + VXORPS X3, X18, K2, X15 // 62716c0257fb + VXORPS 15(DX)(BX*1), X18, K2, X15 // 62716c0257bc1a0f000000 + VXORPS -7(CX)(DX*2), X18, K2, X15 // 62716c0257bc51f9ffffff + VXORPS X11, X21, K2, X15 // 6251540257fb + VXORPS X31, X21, K2, X15 // 6211540257ff + VXORPS X3, X21, K2, X15 // 6271540257fb + VXORPS 15(DX)(BX*1), X21, K2, X15 // 6271540257bc1a0f000000 + VXORPS -7(CX)(DX*2), X21, K2, X15 // 6271540257bc51f9ffffff + VXORPS X11, X1, K2, X15 // 6251740a57fb + VXORPS X31, X1, K2, X15 // 6211740a57ff + VXORPS X3, X1, K2, X15 // 6271740a57fb + VXORPS 15(DX)(BX*1), X1, K2, X15 // 6271740a57bc1a0f000000 + VXORPS -7(CX)(DX*2), X1, K2, X15 // 6271740a57bc51f9ffffff + VXORPS X11, X18, K2, X26 // 62416c0257d3 + VXORPS X31, X18, K2, X26 // 62016c0257d7 + VXORPS X3, X18, K2, X26 // 62616c0257d3 + VXORPS 15(DX)(BX*1), X18, K2, X26 // 62616c0257941a0f000000 + VXORPS -7(CX)(DX*2), X18, K2, X26 // 62616c02579451f9ffffff + VXORPS X11, X21, K2, X26 // 6241540257d3 + VXORPS X31, X21, K2, X26 // 6201540257d7 + VXORPS X3, X21, K2, X26 // 6261540257d3 + VXORPS 15(DX)(BX*1), X21, K2, X26 // 6261540257941a0f000000 + VXORPS -7(CX)(DX*2), X21, K2, X26 // 62615402579451f9ffffff + VXORPS X11, X1, K2, X26 // 6241740a57d3 + VXORPS X31, X1, K2, X26 // 6201740a57d7 + VXORPS X3, X1, K2, X26 // 6261740a57d3 + VXORPS 15(DX)(BX*1), X1, K2, X26 // 6261740a57941a0f000000 + VXORPS -7(CX)(DX*2), X1, K2, X26 // 6261740a579451f9ffffff + VXORPS Y17, Y12, K2, Y0 // 62b11c2a57c1 + VXORPS Y7, Y12, K2, Y0 // 62f11c2a57c7 + VXORPS Y9, Y12, K2, Y0 // 62d11c2a57c1 + VXORPS 99(R15)(R15*8), Y12, K2, Y0 // 62911c2a5784ff63000000 + VXORPS 7(AX)(CX*8), Y12, K2, Y0 // 62f11c2a5784c807000000 + VXORPS Y17, Y1, K2, Y0 // 62b1742a57c1 + VXORPS Y7, Y1, K2, Y0 // 62f1742a57c7 + VXORPS Y9, Y1, K2, Y0 // 62d1742a57c1 + VXORPS 99(R15)(R15*8), Y1, K2, Y0 // 6291742a5784ff63000000 + VXORPS 7(AX)(CX*8), Y1, K2, Y0 // 62f1742a5784c807000000 + VXORPS Y17, Y14, K2, Y0 // 62b10c2a57c1 + VXORPS Y7, Y14, K2, Y0 // 62f10c2a57c7 + VXORPS Y9, Y14, K2, Y0 // 62d10c2a57c1 + VXORPS 99(R15)(R15*8), Y14, K2, Y0 // 62910c2a5784ff63000000 + VXORPS 7(AX)(CX*8), Y14, K2, Y0 // 62f10c2a5784c807000000 + VXORPS Y17, Y12, K2, Y22 // 62a11c2a57f1 + VXORPS Y7, Y12, K2, Y22 // 62e11c2a57f7 + VXORPS Y9, Y12, K2, Y22 // 62c11c2a57f1 + VXORPS 99(R15)(R15*8), Y12, K2, Y22 // 62811c2a57b4ff63000000 + VXORPS 7(AX)(CX*8), Y12, K2, Y22 // 62e11c2a57b4c807000000 + VXORPS Y17, Y1, K2, Y22 // 62a1742a57f1 + VXORPS Y7, Y1, K2, Y22 // 62e1742a57f7 + VXORPS Y9, Y1, K2, Y22 // 62c1742a57f1 + VXORPS 99(R15)(R15*8), Y1, K2, Y22 // 6281742a57b4ff63000000 + VXORPS 7(AX)(CX*8), Y1, K2, Y22 // 62e1742a57b4c807000000 + VXORPS Y17, Y14, K2, Y22 // 62a10c2a57f1 + VXORPS Y7, Y14, K2, Y22 // 62e10c2a57f7 + VXORPS Y9, Y14, K2, Y22 // 62c10c2a57f1 + VXORPS 99(R15)(R15*8), Y14, K2, Y22 // 62810c2a57b4ff63000000 + VXORPS 7(AX)(CX*8), Y14, K2, Y22 // 62e10c2a57b4c807000000 + VXORPS Y17, Y12, K2, Y13 // 62311c2a57e9 + VXORPS Y7, Y12, K2, Y13 // 62711c2a57ef + VXORPS Y9, Y12, K2, Y13 // 62511c2a57e9 + VXORPS 99(R15)(R15*8), Y12, K2, Y13 // 62111c2a57acff63000000 + VXORPS 7(AX)(CX*8), Y12, K2, Y13 // 62711c2a57acc807000000 + VXORPS Y17, Y1, K2, Y13 // 6231742a57e9 + VXORPS Y7, Y1, K2, Y13 // 6271742a57ef + VXORPS Y9, Y1, K2, Y13 // 6251742a57e9 + VXORPS 99(R15)(R15*8), Y1, K2, Y13 // 6211742a57acff63000000 + VXORPS 7(AX)(CX*8), Y1, K2, Y13 // 6271742a57acc807000000 + VXORPS Y17, Y14, K2, Y13 // 62310c2a57e9 + VXORPS Y7, Y14, K2, Y13 // 62710c2a57ef + VXORPS Y9, Y14, K2, Y13 // 62510c2a57e9 + VXORPS 99(R15)(R15*8), Y14, K2, Y13 // 62110c2a57acff63000000 + VXORPS 7(AX)(CX*8), Y14, K2, Y13 // 62710c2a57acc807000000 + VXORPS Z21, Z3, K3, Z26 // 6221644b57d5 + VXORPS Z13, Z3, K3, Z26 // 6241644b57d5 + VXORPS (R14), Z3, K3, Z26 // 6241644b5716 + VXORPS -7(DI)(R8*8), Z3, K3, Z26 // 6221644b5794c7f9ffffff + VXORPS Z21, Z0, K3, Z26 // 62217c4b57d5 + VXORPS Z13, Z0, K3, Z26 // 62417c4b57d5 + VXORPS (R14), Z0, K3, Z26 // 62417c4b5716 + VXORPS -7(DI)(R8*8), Z0, K3, Z26 // 62217c4b5794c7f9ffffff + VXORPS Z21, Z3, K3, Z3 // 62b1644b57dd + VXORPS Z13, Z3, K3, Z3 // 62d1644b57dd + VXORPS (R14), Z3, K3, Z3 // 62d1644b571e + VXORPS -7(DI)(R8*8), Z3, K3, Z3 // 62b1644b579cc7f9ffffff + VXORPS Z21, Z0, K3, Z3 // 62b17c4b57dd + VXORPS Z13, Z0, K3, Z3 // 62d17c4b57dd + VXORPS (R14), Z0, K3, Z3 // 62d17c4b571e + VXORPS -7(DI)(R8*8), Z0, K3, Z3 // 62b17c4b579cc7f9ffffff + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512er.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512er.s new file mode 100644 index 0000000000000000000000000000000000000000..855a8d9ea383f8f3a5d92b198dfbdb3a5b28f403 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512er.s @@ -0,0 +1,331 @@ +// Code generated by avx512test. DO NOT EDIT. + +#include "../../../../../../runtime/textflag.h" + +TEXT asmtest_avx512er(SB), NOSPLIT, $0 + VEXP2PD Z17, K7, Z20 // 62a2fd4fc8e1 + VEXP2PD Z0, K7, Z20 // 62e2fd4fc8e0 + VEXP2PD Z17, K7, Z0 // 62b2fd4fc8c1 + VEXP2PD Z0, K7, Z0 // 62f2fd4fc8c0 + VEXP2PD Z31, K2, Z17 // 6282fd4ac8cf + VEXP2PD Z0, K2, Z17 // 62e2fd4ac8c8 + VEXP2PD (R14), K2, Z17 // 62c2fd4ac80e + VEXP2PD -7(DI)(R8*8), K2, Z17 // 62a2fd4ac88cc7f9ffffff + VEXP2PD Z31, K2, Z23 // 6282fd4ac8ff + VEXP2PD Z0, K2, Z23 // 62e2fd4ac8f8 + VEXP2PD (R14), K2, Z23 // 62c2fd4ac83e + VEXP2PD -7(DI)(R8*8), K2, Z23 // 62a2fd4ac8bcc7f9ffffff + VEXP2PS Z6, K4, Z21 // 62e27d4cc8ee + VEXP2PS Z9, K4, Z21 // 62c27d4cc8e9 + VEXP2PS Z6, K4, Z9 // 62727d4cc8ce + VEXP2PS Z9, K4, Z9 // 62527d4cc8c9 + VEXP2PS Z20, K1, Z1 // 62b27d49c8cc + VEXP2PS Z9, K1, Z1 // 62d27d49c8c9 + VEXP2PS 99(R15)(R15*4), K1, Z1 // 62927d49c88cbf63000000 + VEXP2PS 15(DX), K1, Z1 // 62f27d49c88a0f000000 + VEXP2PS Z20, K1, Z9 // 62327d49c8cc + VEXP2PS Z9, K1, Z9 // 62527d49c8c9 + VEXP2PS 99(R15)(R15*4), K1, Z9 // 62127d49c88cbf63000000 + VEXP2PS 15(DX), K1, Z9 // 62727d49c88a0f000000 + VRCP28PD Z13, K7, Z11 // 6252fd4fcadd + VRCP28PD Z14, K7, Z11 // 6252fd4fcade + VRCP28PD Z13, K7, Z5 // 62d2fd4fcaed + VRCP28PD Z14, K7, Z5 // 62d2fd4fcaee + VRCP28PD Z2, K2, Z5 // 62f2fd4acaea + VRCP28PD -7(CX)(DX*1), K2, Z5 // 62f2fd4acaac11f9ffffff + VRCP28PD -15(R14)(R15*4), K2, Z5 // 6292fd4acaacbef1ffffff + VRCP28PD Z2, K2, Z23 // 62e2fd4acafa + VRCP28PD -7(CX)(DX*1), K2, Z23 // 62e2fd4acabc11f9ffffff + VRCP28PD -15(R14)(R15*4), K2, Z23 // 6282fd4acabcbef1ffffff + VRCP28PS Z26, K5, Z6 // 62927d4dcaf2 + VRCP28PS Z14, K5, Z6 // 62d27d4dcaf6 + VRCP28PS Z26, K5, Z14 // 62127d4dcaf2 + VRCP28PS Z14, K5, Z14 // 62527d4dcaf6 + VRCP28PS Z13, K3, Z28 // 62427d4bcae5 + VRCP28PS Z21, K3, Z28 // 62227d4bcae5 + VRCP28PS 15(DX)(BX*1), K3, Z28 // 62627d4bcaa41a0f000000 + VRCP28PS -7(CX)(DX*2), K3, Z28 // 62627d4bcaa451f9ffffff + VRCP28PS Z13, K3, Z6 // 62d27d4bcaf5 + VRCP28PS Z21, K3, Z6 // 62b27d4bcaf5 + VRCP28PS 15(DX)(BX*1), K3, Z6 // 62f27d4bcab41a0f000000 + VRCP28PS -7(CX)(DX*2), K3, Z6 // 62f27d4bcab451f9ffffff + VRCP28SD X25, X14, K4, X19 // 62828d0ccbd9 + VRCP28SD X11, X14, K4, X19 // 62c28d0ccbdb + VRCP28SD X17, X14, K4, X19 // 62a28d0ccbd9 + VRCP28SD X25, X0, K4, X19 // 6282fd0ccbd9 + VRCP28SD X11, X0, K4, X19 // 62c2fd0ccbdb + VRCP28SD X17, X0, K4, X19 // 62a2fd0ccbd9 + VRCP28SD X25, X14, K4, X13 // 62128d0ccbe9 + VRCP28SD X11, X14, K4, X13 // 62528d0ccbeb + VRCP28SD X17, X14, K4, X13 // 62328d0ccbe9 + VRCP28SD X25, X0, K4, X13 // 6212fd0ccbe9 + VRCP28SD X11, X0, K4, X13 // 6252fd0ccbeb + VRCP28SD X17, X0, K4, X13 // 6232fd0ccbe9 + VRCP28SD X25, X14, K4, X2 // 62928d0ccbd1 + VRCP28SD X11, X14, K4, X2 // 62d28d0ccbd3 + VRCP28SD X17, X14, K4, X2 // 62b28d0ccbd1 + VRCP28SD X25, X0, K4, X2 // 6292fd0ccbd1 + VRCP28SD X11, X0, K4, X2 // 62d2fd0ccbd3 + VRCP28SD X17, X0, K4, X2 // 62b2fd0ccbd1 + VRCP28SD X2, X2, K2, X18 // 62e2ed0acbd2 or 62e2ed2acbd2 or 62e2ed4acbd2 + VRCP28SD X27, X2, K2, X18 // 6282ed0acbd3 or 6282ed2acbd3 or 6282ed4acbd3 + VRCP28SD X26, X2, K2, X18 // 6282ed0acbd2 or 6282ed2acbd2 or 6282ed4acbd2 + VRCP28SD 17(SP)(BP*8), X2, K2, X18 // 62e2ed0acb94ec11000000 or 62e2ed2acb94ec11000000 or 62e2ed4acb94ec11000000 + VRCP28SD 17(SP)(BP*4), X2, K2, X18 // 62e2ed0acb94ac11000000 or 62e2ed2acb94ac11000000 or 62e2ed4acb94ac11000000 + VRCP28SD X2, X24, K2, X18 // 62e2bd02cbd2 or 62e2bd22cbd2 or 62e2bd42cbd2 + VRCP28SD X27, X24, K2, X18 // 6282bd02cbd3 or 6282bd22cbd3 or 6282bd42cbd3 + VRCP28SD X26, X24, K2, X18 // 6282bd02cbd2 or 6282bd22cbd2 or 6282bd42cbd2 + VRCP28SD 17(SP)(BP*8), X24, K2, X18 // 62e2bd02cb94ec11000000 or 62e2bd22cb94ec11000000 or 62e2bd42cb94ec11000000 + VRCP28SD 17(SP)(BP*4), X24, K2, X18 // 62e2bd02cb94ac11000000 or 62e2bd22cb94ac11000000 or 62e2bd42cb94ac11000000 + VRCP28SD X2, X2, K2, X11 // 6272ed0acbda or 6272ed2acbda or 6272ed4acbda + VRCP28SD X27, X2, K2, X11 // 6212ed0acbdb or 6212ed2acbdb or 6212ed4acbdb + VRCP28SD X26, X2, K2, X11 // 6212ed0acbda or 6212ed2acbda or 6212ed4acbda + VRCP28SD 17(SP)(BP*8), X2, K2, X11 // 6272ed0acb9cec11000000 or 6272ed2acb9cec11000000 or 6272ed4acb9cec11000000 + VRCP28SD 17(SP)(BP*4), X2, K2, X11 // 6272ed0acb9cac11000000 or 6272ed2acb9cac11000000 or 6272ed4acb9cac11000000 + VRCP28SD X2, X24, K2, X11 // 6272bd02cbda or 6272bd22cbda or 6272bd42cbda + VRCP28SD X27, X24, K2, X11 // 6212bd02cbdb or 6212bd22cbdb or 6212bd42cbdb + VRCP28SD X26, X24, K2, X11 // 6212bd02cbda or 6212bd22cbda or 6212bd42cbda + VRCP28SD 17(SP)(BP*8), X24, K2, X11 // 6272bd02cb9cec11000000 or 6272bd22cb9cec11000000 or 6272bd42cb9cec11000000 + VRCP28SD 17(SP)(BP*4), X24, K2, X11 // 6272bd02cb9cac11000000 or 6272bd22cb9cac11000000 or 6272bd42cb9cac11000000 + VRCP28SD X2, X2, K2, X9 // 6272ed0acbca or 6272ed2acbca or 6272ed4acbca + VRCP28SD X27, X2, K2, X9 // 6212ed0acbcb or 6212ed2acbcb or 6212ed4acbcb + VRCP28SD X26, X2, K2, X9 // 6212ed0acbca or 6212ed2acbca or 6212ed4acbca + VRCP28SD 17(SP)(BP*8), X2, K2, X9 // 6272ed0acb8cec11000000 or 6272ed2acb8cec11000000 or 6272ed4acb8cec11000000 + VRCP28SD 17(SP)(BP*4), X2, K2, X9 // 6272ed0acb8cac11000000 or 6272ed2acb8cac11000000 or 6272ed4acb8cac11000000 + VRCP28SD X2, X24, K2, X9 // 6272bd02cbca or 6272bd22cbca or 6272bd42cbca + VRCP28SD X27, X24, K2, X9 // 6212bd02cbcb or 6212bd22cbcb or 6212bd42cbcb + VRCP28SD X26, X24, K2, X9 // 6212bd02cbca or 6212bd22cbca or 6212bd42cbca + VRCP28SD 17(SP)(BP*8), X24, K2, X9 // 6272bd02cb8cec11000000 or 6272bd22cb8cec11000000 or 6272bd42cb8cec11000000 + VRCP28SD 17(SP)(BP*4), X24, K2, X9 // 6272bd02cb8cac11000000 or 6272bd22cb8cac11000000 or 6272bd42cb8cac11000000 + VRCP28SS X13, X11, K2, X22 // 62c2250acbf5 + VRCP28SS X6, X11, K2, X22 // 62e2250acbf6 + VRCP28SS X12, X11, K2, X22 // 62c2250acbf4 + VRCP28SS X13, X15, K2, X22 // 62c2050acbf5 + VRCP28SS X6, X15, K2, X22 // 62e2050acbf6 + VRCP28SS X12, X15, K2, X22 // 62c2050acbf4 + VRCP28SS X13, X30, K2, X22 // 62c20d02cbf5 + VRCP28SS X6, X30, K2, X22 // 62e20d02cbf6 + VRCP28SS X12, X30, K2, X22 // 62c20d02cbf4 + VRCP28SS X13, X11, K2, X30 // 6242250acbf5 + VRCP28SS X6, X11, K2, X30 // 6262250acbf6 + VRCP28SS X12, X11, K2, X30 // 6242250acbf4 + VRCP28SS X13, X15, K2, X30 // 6242050acbf5 + VRCP28SS X6, X15, K2, X30 // 6262050acbf6 + VRCP28SS X12, X15, K2, X30 // 6242050acbf4 + VRCP28SS X13, X30, K2, X30 // 62420d02cbf5 + VRCP28SS X6, X30, K2, X30 // 62620d02cbf6 + VRCP28SS X12, X30, K2, X30 // 62420d02cbf4 + VRCP28SS X13, X11, K2, X3 // 62d2250acbdd + VRCP28SS X6, X11, K2, X3 // 62f2250acbde + VRCP28SS X12, X11, K2, X3 // 62d2250acbdc + VRCP28SS X13, X15, K2, X3 // 62d2050acbdd + VRCP28SS X6, X15, K2, X3 // 62f2050acbde + VRCP28SS X12, X15, K2, X3 // 62d2050acbdc + VRCP28SS X13, X30, K2, X3 // 62d20d02cbdd + VRCP28SS X6, X30, K2, X3 // 62f20d02cbde + VRCP28SS X12, X30, K2, X3 // 62d20d02cbdc + VRCP28SS X26, X20, K3, X23 // 62825d03cbfa or 62825d23cbfa or 62825d43cbfa + VRCP28SS X19, X20, K3, X23 // 62a25d03cbfb or 62a25d23cbfb or 62a25d43cbfb + VRCP28SS X0, X20, K3, X23 // 62e25d03cbf8 or 62e25d23cbf8 or 62e25d43cbf8 + VRCP28SS -7(CX), X20, K3, X23 // 62e25d03cbb9f9ffffff or 62e25d23cbb9f9ffffff or 62e25d43cbb9f9ffffff + VRCP28SS 15(DX)(BX*4), X20, K3, X23 // 62e25d03cbbc9a0f000000 or 62e25d23cbbc9a0f000000 or 62e25d43cbbc9a0f000000 + VRCP28SS X26, X2, K3, X23 // 62826d0bcbfa or 62826d2bcbfa or 62826d4bcbfa + VRCP28SS X19, X2, K3, X23 // 62a26d0bcbfb or 62a26d2bcbfb or 62a26d4bcbfb + VRCP28SS X0, X2, K3, X23 // 62e26d0bcbf8 or 62e26d2bcbf8 or 62e26d4bcbf8 + VRCP28SS -7(CX), X2, K3, X23 // 62e26d0bcbb9f9ffffff or 62e26d2bcbb9f9ffffff or 62e26d4bcbb9f9ffffff + VRCP28SS 15(DX)(BX*4), X2, K3, X23 // 62e26d0bcbbc9a0f000000 or 62e26d2bcbbc9a0f000000 or 62e26d4bcbbc9a0f000000 + VRCP28SS X26, X9, K3, X23 // 6282350bcbfa or 6282352bcbfa or 6282354bcbfa + VRCP28SS X19, X9, K3, X23 // 62a2350bcbfb or 62a2352bcbfb or 62a2354bcbfb + VRCP28SS X0, X9, K3, X23 // 62e2350bcbf8 or 62e2352bcbf8 or 62e2354bcbf8 + VRCP28SS -7(CX), X9, K3, X23 // 62e2350bcbb9f9ffffff or 62e2352bcbb9f9ffffff or 62e2354bcbb9f9ffffff + VRCP28SS 15(DX)(BX*4), X9, K3, X23 // 62e2350bcbbc9a0f000000 or 62e2352bcbbc9a0f000000 or 62e2354bcbbc9a0f000000 + VRCP28SS X26, X20, K3, X30 // 62025d03cbf2 or 62025d23cbf2 or 62025d43cbf2 + VRCP28SS X19, X20, K3, X30 // 62225d03cbf3 or 62225d23cbf3 or 62225d43cbf3 + VRCP28SS X0, X20, K3, X30 // 62625d03cbf0 or 62625d23cbf0 or 62625d43cbf0 + VRCP28SS -7(CX), X20, K3, X30 // 62625d03cbb1f9ffffff or 62625d23cbb1f9ffffff or 62625d43cbb1f9ffffff + VRCP28SS 15(DX)(BX*4), X20, K3, X30 // 62625d03cbb49a0f000000 or 62625d23cbb49a0f000000 or 62625d43cbb49a0f000000 + VRCP28SS X26, X2, K3, X30 // 62026d0bcbf2 or 62026d2bcbf2 or 62026d4bcbf2 + VRCP28SS X19, X2, K3, X30 // 62226d0bcbf3 or 62226d2bcbf3 or 62226d4bcbf3 + VRCP28SS X0, X2, K3, X30 // 62626d0bcbf0 or 62626d2bcbf0 or 62626d4bcbf0 + VRCP28SS -7(CX), X2, K3, X30 // 62626d0bcbb1f9ffffff or 62626d2bcbb1f9ffffff or 62626d4bcbb1f9ffffff + VRCP28SS 15(DX)(BX*4), X2, K3, X30 // 62626d0bcbb49a0f000000 or 62626d2bcbb49a0f000000 or 62626d4bcbb49a0f000000 + VRCP28SS X26, X9, K3, X30 // 6202350bcbf2 or 6202352bcbf2 or 6202354bcbf2 + VRCP28SS X19, X9, K3, X30 // 6222350bcbf3 or 6222352bcbf3 or 6222354bcbf3 + VRCP28SS X0, X9, K3, X30 // 6262350bcbf0 or 6262352bcbf0 or 6262354bcbf0 + VRCP28SS -7(CX), X9, K3, X30 // 6262350bcbb1f9ffffff or 6262352bcbb1f9ffffff or 6262354bcbb1f9ffffff + VRCP28SS 15(DX)(BX*4), X9, K3, X30 // 6262350bcbb49a0f000000 or 6262352bcbb49a0f000000 or 6262354bcbb49a0f000000 + VRCP28SS X26, X20, K3, X8 // 62125d03cbc2 or 62125d23cbc2 or 62125d43cbc2 + VRCP28SS X19, X20, K3, X8 // 62325d03cbc3 or 62325d23cbc3 or 62325d43cbc3 + VRCP28SS X0, X20, K3, X8 // 62725d03cbc0 or 62725d23cbc0 or 62725d43cbc0 + VRCP28SS -7(CX), X20, K3, X8 // 62725d03cb81f9ffffff or 62725d23cb81f9ffffff or 62725d43cb81f9ffffff + VRCP28SS 15(DX)(BX*4), X20, K3, X8 // 62725d03cb849a0f000000 or 62725d23cb849a0f000000 or 62725d43cb849a0f000000 + VRCP28SS X26, X2, K3, X8 // 62126d0bcbc2 or 62126d2bcbc2 or 62126d4bcbc2 + VRCP28SS X19, X2, K3, X8 // 62326d0bcbc3 or 62326d2bcbc3 or 62326d4bcbc3 + VRCP28SS X0, X2, K3, X8 // 62726d0bcbc0 or 62726d2bcbc0 or 62726d4bcbc0 + VRCP28SS -7(CX), X2, K3, X8 // 62726d0bcb81f9ffffff or 62726d2bcb81f9ffffff or 62726d4bcb81f9ffffff + VRCP28SS 15(DX)(BX*4), X2, K3, X8 // 62726d0bcb849a0f000000 or 62726d2bcb849a0f000000 or 62726d4bcb849a0f000000 + VRCP28SS X26, X9, K3, X8 // 6212350bcbc2 or 6212352bcbc2 or 6212354bcbc2 + VRCP28SS X19, X9, K3, X8 // 6232350bcbc3 or 6232352bcbc3 or 6232354bcbc3 + VRCP28SS X0, X9, K3, X8 // 6272350bcbc0 or 6272352bcbc0 or 6272354bcbc0 + VRCP28SS -7(CX), X9, K3, X8 // 6272350bcb81f9ffffff or 6272352bcb81f9ffffff or 6272354bcb81f9ffffff + VRCP28SS 15(DX)(BX*4), X9, K3, X8 // 6272350bcb849a0f000000 or 6272352bcb849a0f000000 or 6272354bcb849a0f000000 + VRSQRT28PD Z7, K3, Z3 // 62f2fd4bccdf + VRSQRT28PD Z9, K3, Z3 // 62d2fd4bccd9 + VRSQRT28PD Z7, K3, Z27 // 6262fd4bccdf + VRSQRT28PD Z9, K3, Z27 // 6242fd4bccd9 + VRSQRT28PD Z20, K3, Z0 // 62b2fd4bccc4 + VRSQRT28PD Z28, K3, Z0 // 6292fd4bccc4 + VRSQRT28PD (SI), K3, Z0 // 62f2fd4bcc06 + VRSQRT28PD 7(SI)(DI*2), K3, Z0 // 62f2fd4bcc847e07000000 + VRSQRT28PD Z20, K3, Z6 // 62b2fd4bccf4 + VRSQRT28PD Z28, K3, Z6 // 6292fd4bccf4 + VRSQRT28PD (SI), K3, Z6 // 62f2fd4bcc36 + VRSQRT28PD 7(SI)(DI*2), K3, Z6 // 62f2fd4bccb47e07000000 + VRSQRT28PS Z9, K2, Z3 // 62d27d4accd9 + VRSQRT28PS Z19, K2, Z3 // 62b27d4accdb + VRSQRT28PS Z9, K2, Z30 // 62427d4accf1 + VRSQRT28PS Z19, K2, Z30 // 62227d4accf3 + VRSQRT28PS Z11, K1, Z12 // 62527d49cce3 + VRSQRT28PS Z5, K1, Z12 // 62727d49cce5 + VRSQRT28PS 17(SP)(BP*8), K1, Z12 // 62727d49cca4ec11000000 + VRSQRT28PS 17(SP)(BP*4), K1, Z12 // 62727d49cca4ac11000000 + VRSQRT28PS Z11, K1, Z22 // 62c27d49ccf3 + VRSQRT28PS Z5, K1, Z22 // 62e27d49ccf5 + VRSQRT28PS 17(SP)(BP*8), K1, Z22 // 62e27d49ccb4ec11000000 + VRSQRT28PS 17(SP)(BP*4), K1, Z22 // 62e27d49ccb4ac11000000 + VRSQRT28SD X20, X20, K2, X31 // 6222dd02cdfc + VRSQRT28SD X16, X20, K2, X31 // 6222dd02cdf8 + VRSQRT28SD X12, X20, K2, X31 // 6242dd02cdfc + VRSQRT28SD X20, X24, K2, X31 // 6222bd02cdfc + VRSQRT28SD X16, X24, K2, X31 // 6222bd02cdf8 + VRSQRT28SD X12, X24, K2, X31 // 6242bd02cdfc + VRSQRT28SD X20, X7, K2, X31 // 6222c50acdfc + VRSQRT28SD X16, X7, K2, X31 // 6222c50acdf8 + VRSQRT28SD X12, X7, K2, X31 // 6242c50acdfc + VRSQRT28SD X20, X20, K2, X3 // 62b2dd02cddc + VRSQRT28SD X16, X20, K2, X3 // 62b2dd02cdd8 + VRSQRT28SD X12, X20, K2, X3 // 62d2dd02cddc + VRSQRT28SD X20, X24, K2, X3 // 62b2bd02cddc + VRSQRT28SD X16, X24, K2, X3 // 62b2bd02cdd8 + VRSQRT28SD X12, X24, K2, X3 // 62d2bd02cddc + VRSQRT28SD X20, X7, K2, X3 // 62b2c50acddc + VRSQRT28SD X16, X7, K2, X3 // 62b2c50acdd8 + VRSQRT28SD X12, X7, K2, X3 // 62d2c50acddc + VRSQRT28SD X20, X20, K2, X28 // 6222dd02cde4 + VRSQRT28SD X16, X20, K2, X28 // 6222dd02cde0 + VRSQRT28SD X12, X20, K2, X28 // 6242dd02cde4 + VRSQRT28SD X20, X24, K2, X28 // 6222bd02cde4 + VRSQRT28SD X16, X24, K2, X28 // 6222bd02cde0 + VRSQRT28SD X12, X24, K2, X28 // 6242bd02cde4 + VRSQRT28SD X20, X7, K2, X28 // 6222c50acde4 + VRSQRT28SD X16, X7, K2, X28 // 6222c50acde0 + VRSQRT28SD X12, X7, K2, X28 // 6242c50acde4 + VRSQRT28SD X8, X6, K1, X6 // 62d2cd09cdf0 or 62d2cd29cdf0 or 62d2cd49cdf0 + VRSQRT28SD X6, X6, K1, X6 // 62f2cd09cdf6 or 62f2cd29cdf6 or 62f2cd49cdf6 + VRSQRT28SD X0, X6, K1, X6 // 62f2cd09cdf0 or 62f2cd29cdf0 or 62f2cd49cdf0 + VRSQRT28SD 99(R15)(R15*1), X6, K1, X6 // 6292cd09cdb43f63000000 or 6292cd29cdb43f63000000 or 6292cd49cdb43f63000000 + VRSQRT28SD (DX), X6, K1, X6 // 62f2cd09cd32 or 62f2cd29cd32 or 62f2cd49cd32 + VRSQRT28SD X8, X1, K1, X6 // 62d2f509cdf0 or 62d2f529cdf0 or 62d2f549cdf0 + VRSQRT28SD X6, X1, K1, X6 // 62f2f509cdf6 or 62f2f529cdf6 or 62f2f549cdf6 + VRSQRT28SD X0, X1, K1, X6 // 62f2f509cdf0 or 62f2f529cdf0 or 62f2f549cdf0 + VRSQRT28SD 99(R15)(R15*1), X1, K1, X6 // 6292f509cdb43f63000000 or 6292f529cdb43f63000000 or 6292f549cdb43f63000000 + VRSQRT28SD (DX), X1, K1, X6 // 62f2f509cd32 or 62f2f529cd32 or 62f2f549cd32 + VRSQRT28SD X8, X8, K1, X6 // 62d2bd09cdf0 or 62d2bd29cdf0 or 62d2bd49cdf0 + VRSQRT28SD X6, X8, K1, X6 // 62f2bd09cdf6 or 62f2bd29cdf6 or 62f2bd49cdf6 + VRSQRT28SD X0, X8, K1, X6 // 62f2bd09cdf0 or 62f2bd29cdf0 or 62f2bd49cdf0 + VRSQRT28SD 99(R15)(R15*1), X8, K1, X6 // 6292bd09cdb43f63000000 or 6292bd29cdb43f63000000 or 6292bd49cdb43f63000000 + VRSQRT28SD (DX), X8, K1, X6 // 62f2bd09cd32 or 62f2bd29cd32 or 62f2bd49cd32 + VRSQRT28SD X8, X6, K1, X17 // 62c2cd09cdc8 or 62c2cd29cdc8 or 62c2cd49cdc8 + VRSQRT28SD X6, X6, K1, X17 // 62e2cd09cdce or 62e2cd29cdce or 62e2cd49cdce + VRSQRT28SD X0, X6, K1, X17 // 62e2cd09cdc8 or 62e2cd29cdc8 or 62e2cd49cdc8 + VRSQRT28SD 99(R15)(R15*1), X6, K1, X17 // 6282cd09cd8c3f63000000 or 6282cd29cd8c3f63000000 or 6282cd49cd8c3f63000000 + VRSQRT28SD (DX), X6, K1, X17 // 62e2cd09cd0a or 62e2cd29cd0a or 62e2cd49cd0a + VRSQRT28SD X8, X1, K1, X17 // 62c2f509cdc8 or 62c2f529cdc8 or 62c2f549cdc8 + VRSQRT28SD X6, X1, K1, X17 // 62e2f509cdce or 62e2f529cdce or 62e2f549cdce + VRSQRT28SD X0, X1, K1, X17 // 62e2f509cdc8 or 62e2f529cdc8 or 62e2f549cdc8 + VRSQRT28SD 99(R15)(R15*1), X1, K1, X17 // 6282f509cd8c3f63000000 or 6282f529cd8c3f63000000 or 6282f549cd8c3f63000000 + VRSQRT28SD (DX), X1, K1, X17 // 62e2f509cd0a or 62e2f529cd0a or 62e2f549cd0a + VRSQRT28SD X8, X8, K1, X17 // 62c2bd09cdc8 or 62c2bd29cdc8 or 62c2bd49cdc8 + VRSQRT28SD X6, X8, K1, X17 // 62e2bd09cdce or 62e2bd29cdce or 62e2bd49cdce + VRSQRT28SD X0, X8, K1, X17 // 62e2bd09cdc8 or 62e2bd29cdc8 or 62e2bd49cdc8 + VRSQRT28SD 99(R15)(R15*1), X8, K1, X17 // 6282bd09cd8c3f63000000 or 6282bd29cd8c3f63000000 or 6282bd49cd8c3f63000000 + VRSQRT28SD (DX), X8, K1, X17 // 62e2bd09cd0a or 62e2bd29cd0a or 62e2bd49cd0a + VRSQRT28SD X8, X6, K1, X28 // 6242cd09cde0 or 6242cd29cde0 or 6242cd49cde0 + VRSQRT28SD X6, X6, K1, X28 // 6262cd09cde6 or 6262cd29cde6 or 6262cd49cde6 + VRSQRT28SD X0, X6, K1, X28 // 6262cd09cde0 or 6262cd29cde0 or 6262cd49cde0 + VRSQRT28SD 99(R15)(R15*1), X6, K1, X28 // 6202cd09cda43f63000000 or 6202cd29cda43f63000000 or 6202cd49cda43f63000000 + VRSQRT28SD (DX), X6, K1, X28 // 6262cd09cd22 or 6262cd29cd22 or 6262cd49cd22 + VRSQRT28SD X8, X1, K1, X28 // 6242f509cde0 or 6242f529cde0 or 6242f549cde0 + VRSQRT28SD X6, X1, K1, X28 // 6262f509cde6 or 6262f529cde6 or 6262f549cde6 + VRSQRT28SD X0, X1, K1, X28 // 6262f509cde0 or 6262f529cde0 or 6262f549cde0 + VRSQRT28SD 99(R15)(R15*1), X1, K1, X28 // 6202f509cda43f63000000 or 6202f529cda43f63000000 or 6202f549cda43f63000000 + VRSQRT28SD (DX), X1, K1, X28 // 6262f509cd22 or 6262f529cd22 or 6262f549cd22 + VRSQRT28SD X8, X8, K1, X28 // 6242bd09cde0 or 6242bd29cde0 or 6242bd49cde0 + VRSQRT28SD X6, X8, K1, X28 // 6262bd09cde6 or 6262bd29cde6 or 6262bd49cde6 + VRSQRT28SD X0, X8, K1, X28 // 6262bd09cde0 or 6262bd29cde0 or 6262bd49cde0 + VRSQRT28SD 99(R15)(R15*1), X8, K1, X28 // 6202bd09cda43f63000000 or 6202bd29cda43f63000000 or 6202bd49cda43f63000000 + VRSQRT28SD (DX), X8, K1, X28 // 6262bd09cd22 or 6262bd29cd22 or 6262bd49cd22 + VRSQRT28SS X16, X6, K7, X11 // 62324d0fcdd8 + VRSQRT28SS X28, X6, K7, X11 // 62124d0fcddc + VRSQRT28SS X8, X6, K7, X11 // 62524d0fcdd8 + VRSQRT28SS X16, X22, K7, X11 // 62324d07cdd8 + VRSQRT28SS X28, X22, K7, X11 // 62124d07cddc + VRSQRT28SS X8, X22, K7, X11 // 62524d07cdd8 + VRSQRT28SS X16, X12, K7, X11 // 62321d0fcdd8 + VRSQRT28SS X28, X12, K7, X11 // 62121d0fcddc + VRSQRT28SS X8, X12, K7, X11 // 62521d0fcdd8 + VRSQRT28SS X16, X6, K7, X16 // 62a24d0fcdc0 + VRSQRT28SS X28, X6, K7, X16 // 62824d0fcdc4 + VRSQRT28SS X8, X6, K7, X16 // 62c24d0fcdc0 + VRSQRT28SS X16, X22, K7, X16 // 62a24d07cdc0 + VRSQRT28SS X28, X22, K7, X16 // 62824d07cdc4 + VRSQRT28SS X8, X22, K7, X16 // 62c24d07cdc0 + VRSQRT28SS X16, X12, K7, X16 // 62a21d0fcdc0 + VRSQRT28SS X28, X12, K7, X16 // 62821d0fcdc4 + VRSQRT28SS X8, X12, K7, X16 // 62c21d0fcdc0 + VRSQRT28SS X16, X6, K7, X6 // 62b24d0fcdf0 + VRSQRT28SS X28, X6, K7, X6 // 62924d0fcdf4 + VRSQRT28SS X8, X6, K7, X6 // 62d24d0fcdf0 + VRSQRT28SS X16, X22, K7, X6 // 62b24d07cdf0 + VRSQRT28SS X28, X22, K7, X6 // 62924d07cdf4 + VRSQRT28SS X8, X22, K7, X6 // 62d24d07cdf0 + VRSQRT28SS X16, X12, K7, X6 // 62b21d0fcdf0 + VRSQRT28SS X28, X12, K7, X6 // 62921d0fcdf4 + VRSQRT28SS X8, X12, K7, X6 // 62d21d0fcdf0 + VRSQRT28SS X14, X19, K1, X15 // 62526501cdfe or 62526521cdfe or 62526541cdfe + VRSQRT28SS X0, X19, K1, X15 // 62726501cdf8 or 62726521cdf8 or 62726541cdf8 + VRSQRT28SS 15(R8)(R14*4), X19, K1, X15 // 62126501cdbcb00f000000 or 62126521cdbcb00f000000 or 62126541cdbcb00f000000 + VRSQRT28SS -7(CX)(DX*4), X19, K1, X15 // 62726501cdbc91f9ffffff or 62726521cdbc91f9ffffff or 62726541cdbc91f9ffffff + VRSQRT28SS X14, X13, K1, X15 // 62521509cdfe or 62521529cdfe or 62521549cdfe + VRSQRT28SS X0, X13, K1, X15 // 62721509cdf8 or 62721529cdf8 or 62721549cdf8 + VRSQRT28SS 15(R8)(R14*4), X13, K1, X15 // 62121509cdbcb00f000000 or 62121529cdbcb00f000000 or 62121549cdbcb00f000000 + VRSQRT28SS -7(CX)(DX*4), X13, K1, X15 // 62721509cdbc91f9ffffff or 62721529cdbc91f9ffffff or 62721549cdbc91f9ffffff + VRSQRT28SS X14, X2, K1, X15 // 62526d09cdfe or 62526d29cdfe or 62526d49cdfe + VRSQRT28SS X0, X2, K1, X15 // 62726d09cdf8 or 62726d29cdf8 or 62726d49cdf8 + VRSQRT28SS 15(R8)(R14*4), X2, K1, X15 // 62126d09cdbcb00f000000 or 62126d29cdbcb00f000000 or 62126d49cdbcb00f000000 + VRSQRT28SS -7(CX)(DX*4), X2, K1, X15 // 62726d09cdbc91f9ffffff or 62726d29cdbc91f9ffffff or 62726d49cdbc91f9ffffff + VRSQRT28SS X14, X19, K1, X11 // 62526501cdde or 62526521cdde or 62526541cdde + VRSQRT28SS X0, X19, K1, X11 // 62726501cdd8 or 62726521cdd8 or 62726541cdd8 + VRSQRT28SS 15(R8)(R14*4), X19, K1, X11 // 62126501cd9cb00f000000 or 62126521cd9cb00f000000 or 62126541cd9cb00f000000 + VRSQRT28SS -7(CX)(DX*4), X19, K1, X11 // 62726501cd9c91f9ffffff or 62726521cd9c91f9ffffff or 62726541cd9c91f9ffffff + VRSQRT28SS X14, X13, K1, X11 // 62521509cdde or 62521529cdde or 62521549cdde + VRSQRT28SS X0, X13, K1, X11 // 62721509cdd8 or 62721529cdd8 or 62721549cdd8 + VRSQRT28SS 15(R8)(R14*4), X13, K1, X11 // 62121509cd9cb00f000000 or 62121529cd9cb00f000000 or 62121549cd9cb00f000000 + VRSQRT28SS -7(CX)(DX*4), X13, K1, X11 // 62721509cd9c91f9ffffff or 62721529cd9c91f9ffffff or 62721549cd9c91f9ffffff + VRSQRT28SS X14, X2, K1, X11 // 62526d09cdde or 62526d29cdde or 62526d49cdde + VRSQRT28SS X0, X2, K1, X11 // 62726d09cdd8 or 62726d29cdd8 or 62726d49cdd8 + VRSQRT28SS 15(R8)(R14*4), X2, K1, X11 // 62126d09cd9cb00f000000 or 62126d29cd9cb00f000000 or 62126d49cd9cb00f000000 + VRSQRT28SS -7(CX)(DX*4), X2, K1, X11 // 62726d09cd9c91f9ffffff or 62726d29cd9c91f9ffffff or 62726d49cd9c91f9ffffff + VRSQRT28SS X14, X19, K1, X1 // 62d26501cdce or 62d26521cdce or 62d26541cdce + VRSQRT28SS X0, X19, K1, X1 // 62f26501cdc8 or 62f26521cdc8 or 62f26541cdc8 + VRSQRT28SS 15(R8)(R14*4), X19, K1, X1 // 62926501cd8cb00f000000 or 62926521cd8cb00f000000 or 62926541cd8cb00f000000 + VRSQRT28SS -7(CX)(DX*4), X19, K1, X1 // 62f26501cd8c91f9ffffff or 62f26521cd8c91f9ffffff or 62f26541cd8c91f9ffffff + VRSQRT28SS X14, X13, K1, X1 // 62d21509cdce or 62d21529cdce or 62d21549cdce + VRSQRT28SS X0, X13, K1, X1 // 62f21509cdc8 or 62f21529cdc8 or 62f21549cdc8 + VRSQRT28SS 15(R8)(R14*4), X13, K1, X1 // 62921509cd8cb00f000000 or 62921529cd8cb00f000000 or 62921549cd8cb00f000000 + VRSQRT28SS -7(CX)(DX*4), X13, K1, X1 // 62f21509cd8c91f9ffffff or 62f21529cd8c91f9ffffff or 62f21549cd8c91f9ffffff + VRSQRT28SS X14, X2, K1, X1 // 62d26d09cdce or 62d26d29cdce or 62d26d49cdce + VRSQRT28SS X0, X2, K1, X1 // 62f26d09cdc8 or 62f26d29cdc8 or 62f26d49cdc8 + VRSQRT28SS 15(R8)(R14*4), X2, K1, X1 // 62926d09cd8cb00f000000 or 62926d29cd8cb00f000000 or 62926d49cd8cb00f000000 + VRSQRT28SS -7(CX)(DX*4), X2, K1, X1 // 62f26d09cd8c91f9ffffff or 62f26d29cd8c91f9ffffff or 62f26d49cd8c91f9ffffff + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512f.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512f.s new file mode 100644 index 0000000000000000000000000000000000000000..71b5764c5b79c8ba9370d23aa1935c0128d53705 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512f.s @@ -0,0 +1,5639 @@ +// Code generated by avx512test. DO NOT EDIT. + +#include "../../../../../../runtime/textflag.h" + +TEXT asmtest_avx512f(SB), NOSPLIT, $0 + KANDNW K4, K4, K6 // c5dc42f4 + KANDNW K5, K4, K6 // c5dc42f5 + KANDNW K4, K6, K6 // c5cc42f4 + KANDNW K5, K6, K6 // c5cc42f5 + KANDNW K4, K4, K4 // c5dc42e4 + KANDNW K5, K4, K4 // c5dc42e5 + KANDNW K4, K6, K4 // c5cc42e4 + KANDNW K5, K6, K4 // c5cc42e5 + KANDW K5, K3, K1 // c5e441cd + KANDW K4, K3, K1 // c5e441cc + KANDW K5, K1, K1 // c5f441cd + KANDW K4, K1, K1 // c5f441cc + KANDW K5, K3, K5 // c5e441ed + KANDW K4, K3, K5 // c5e441ec + KANDW K5, K1, K5 // c5f441ed + KANDW K4, K1, K5 // c5f441ec + KMOVW K5, 17(SP) // c5f8916c2411 + KMOVW K4, 17(SP) // c5f891642411 + KMOVW K5, -17(BP)(SI*4) // c5f8916cb5ef + KMOVW K4, -17(BP)(SI*4) // c5f89164b5ef + KMOVW K7, SP // c5f893e7 + KMOVW K6, SP // c5f893e6 + KMOVW K7, R14 // c57893f7 + KMOVW K6, R14 // c57893f6 + KMOVW K0, K4 // c5f890e0 + KMOVW K7, K4 // c5f890e7 + KMOVW 7(AX), K4 // c5f8906007 + KMOVW (DI), K4 // c5f89027 + KMOVW K0, K6 // c5f890f0 + KMOVW K7, K6 // c5f890f7 + KMOVW 7(AX), K6 // c5f8907007 + KMOVW (DI), K6 // c5f89037 + KMOVW AX, K5 // c5f892e8 + KMOVW R9, K5 // c4c17892e9 + KMOVW AX, K4 // c5f892e0 + KMOVW R9, K4 // c4c17892e1 + KNOTW K0, K2 // c5f844d0 + KNOTW K5, K2 // c5f844d5 + KNOTW K0, K7 // c5f844f8 + KNOTW K5, K7 // c5f844fd + KORTESTW K6, K0 // c5f898c6 + KORTESTW K5, K0 // c5f898c5 + KORTESTW K6, K5 // c5f898ee + KORTESTW K5, K5 // c5f898ed + KORW K5, K3, K1 // c5e445cd + KORW K4, K3, K1 // c5e445cc + KORW K5, K1, K1 // c5f445cd + KORW K4, K1, K1 // c5f445cc + KORW K5, K3, K5 // c5e445ed + KORW K4, K3, K5 // c5e445ec + KORW K5, K1, K5 // c5f445ed + KORW K4, K1, K5 // c5f445ec + KSHIFTLW $81, K6, K6 // c4e3f932f651 + KSHIFTLW $81, K4, K6 // c4e3f932f451 + KSHIFTLW $81, K6, K7 // c4e3f932fe51 + KSHIFTLW $81, K4, K7 // c4e3f932fc51 + KSHIFTRW $27, K5, K3 // c4e3f930dd1b + KSHIFTRW $27, K4, K3 // c4e3f930dc1b + KSHIFTRW $27, K5, K1 // c4e3f930cd1b + KSHIFTRW $27, K4, K1 // c4e3f930cc1b + KUNPCKBW K2, K4, K4 // c5dd4be2 + KUNPCKBW K7, K4, K4 // c5dd4be7 + KUNPCKBW K2, K5, K4 // c5d54be2 + KUNPCKBW K7, K5, K4 // c5d54be7 + KUNPCKBW K2, K4, K6 // c5dd4bf2 + KUNPCKBW K7, K4, K6 // c5dd4bf7 + KUNPCKBW K2, K5, K6 // c5d54bf2 + KUNPCKBW K7, K5, K6 // c5d54bf7 + KXNORW K6, K0, K2 // c5fc46d6 + KXNORW K5, K0, K2 // c5fc46d5 + KXNORW K6, K5, K2 // c5d446d6 + KXNORW K5, K5, K2 // c5d446d5 + KXNORW K6, K0, K7 // c5fc46fe + KXNORW K5, K0, K7 // c5fc46fd + KXNORW K6, K5, K7 // c5d446fe + KXNORW K5, K5, K7 // c5d446fd + KXORW K4, K6, K6 // c5cc47f4 + KXORW K6, K6, K6 // c5cc47f6 + KXORW K4, K4, K6 // c5dc47f4 + KXORW K6, K4, K6 // c5dc47f6 + KXORW K4, K6, K7 // c5cc47fc + KXORW K6, K6, K7 // c5cc47fe + KXORW K4, K4, K7 // c5dc47fc + KXORW K6, K4, K7 // c5dc47fe + VADDPD X15, X11, K2, X3 // 62d1a50a58df + VADDPD 7(SI)(DI*8), X11, K2, X3 // 62f1a50a589cfe07000000 + VADDPD -15(R14), X11, K2, X3 // 62d1a50a589ef1ffffff + VADDPD Y25, Y31, K2, Y14 // 6211852258f1 + VADDPD 17(SP), Y31, K2, Y14 // 6271852258b42411000000 + VADDPD -17(BP)(SI*4), Y31, K2, Y14 // 6271852258b4b5efffffff + VADDPD Z13, Z11, K3, Z14 // 6251a54b58f5 + VADDPD Z14, Z11, K3, Z14 // 6251a54b58f6 + VADDPD Z13, Z5, K3, Z14 // 6251d54b58f5 + VADDPD Z14, Z5, K3, Z14 // 6251d54b58f6 + VADDPD Z13, Z11, K3, Z27 // 6241a54b58dd + VADDPD Z14, Z11, K3, Z27 // 6241a54b58de + VADDPD Z13, Z5, K3, Z27 // 6241d54b58dd + VADDPD Z14, Z5, K3, Z27 // 6241d54b58de + VADDPD Z6, Z2, K3, Z5 // 62f1ed4b58ee + VADDPD Z14, Z2, K3, Z5 // 62d1ed4b58ee + VADDPD 17(SP), Z2, K3, Z5 // 62f1ed4b58ac2411000000 + VADDPD -17(BP)(SI*4), Z2, K3, Z5 // 62f1ed4b58acb5efffffff + VADDPD Z6, Z2, K3, Z23 // 62e1ed4b58fe + VADDPD Z14, Z2, K3, Z23 // 62c1ed4b58fe + VADDPD 17(SP), Z2, K3, Z23 // 62e1ed4b58bc2411000000 + VADDPD -17(BP)(SI*4), Z2, K3, Z23 // 62e1ed4b58bcb5efffffff + VADDPS X6, X13, K3, X30 // 6261140b58f6 + VADDPS 7(SI)(DI*1), X13, K3, X30 // 6261140b58b43e07000000 + VADDPS 15(DX)(BX*8), X13, K3, X30 // 6261140b58b4da0f000000 + VADDPS Y27, Y22, K2, Y2 // 62914c2258d3 + VADDPS 7(AX), Y22, K2, Y2 // 62f14c22589007000000 + VADDPS (DI), Y22, K2, Y2 // 62f14c225817 + VADDPS Z13, Z28, K1, Z26 // 62411c4158d5 + VADDPS Z21, Z28, K1, Z26 // 62211c4158d5 + VADDPS Z13, Z6, K1, Z26 // 62414c4958d5 + VADDPS Z21, Z6, K1, Z26 // 62214c4958d5 + VADDPS Z13, Z28, K1, Z14 // 62511c4158f5 + VADDPS Z21, Z28, K1, Z14 // 62311c4158f5 + VADDPS Z13, Z6, K1, Z14 // 62514c4958f5 + VADDPS Z21, Z6, K1, Z14 // 62314c4958f5 + VADDPS Z21, Z3, K2, Z26 // 6221644a58d5 + VADDPS Z13, Z3, K2, Z26 // 6241644a58d5 + VADDPS 7(AX), Z3, K2, Z26 // 6261644a589007000000 + VADDPS (DI), Z3, K2, Z26 // 6261644a5817 + VADDPS Z21, Z0, K2, Z26 // 62217c4a58d5 + VADDPS Z13, Z0, K2, Z26 // 62417c4a58d5 + VADDPS 7(AX), Z0, K2, Z26 // 62617c4a589007000000 + VADDPS (DI), Z0, K2, Z26 // 62617c4a5817 + VADDPS Z21, Z3, K2, Z3 // 62b1644a58dd + VADDPS Z13, Z3, K2, Z3 // 62d1644a58dd + VADDPS 7(AX), Z3, K2, Z3 // 62f1644a589807000000 + VADDPS (DI), Z3, K2, Z3 // 62f1644a581f + VADDPS Z21, Z0, K2, Z3 // 62b17c4a58dd + VADDPS Z13, Z0, K2, Z3 // 62d17c4a58dd + VADDPS 7(AX), Z0, K2, Z3 // 62f17c4a589807000000 + VADDPS (DI), Z0, K2, Z3 // 62f17c4a581f + VADDSD X30, X23, K1, X12 // 6211c70158e6 + VADDSD X2, X20, K7, X8 // 6271df0758c2 or 6271df2758c2 or 6271df4758c2 + VADDSD 99(R15)(R15*1), X20, K7, X8 // 6211df0758843f63000000 or 6211df2758843f63000000 or 6211df4758843f63000000 + VADDSD (DX), X20, K7, X8 // 6271df075802 or 6271df275802 or 6271df475802 + VADDSS X19, X26, K1, X9 // 62312e0158cb + VADDSS X16, X31, K1, X0 // 62b1060158c0 or 62b1062158c0 or 62b1064158c0 + VADDSS 99(R15)(R15*1), X31, K1, X0 // 6291060158843f63000000 or 6291062158843f63000000 or 6291064158843f63000000 + VADDSS (DX), X31, K1, X0 // 62f106015802 or 62f106215802 or 62f106415802 + VALIGND $47, X16, X7, K1, X19 // 62a3450903d82f + VALIGND $47, (BX), X7, K1, X19 // 62e34509031b2f + VALIGND $47, -17(BP)(SI*1), X7, K1, X19 // 62e34509039c35efffffff2f + VALIGND $82, Y23, Y9, K7, Y22 // 62a3352f03f752 + VALIGND $82, -7(DI)(R8*1), Y9, K7, Y22 // 62a3352f03b407f9ffffff52 + VALIGND $82, (SP), Y9, K7, Y22 // 62e3352f03342452 + VALIGND $126, Z6, Z9, K2, Z12 // 6273354a03e67e + VALIGND $126, Z25, Z9, K2, Z12 // 6213354a03e17e + VALIGND $126, -7(DI)(R8*1), Z9, K2, Z12 // 6233354a03a407f9ffffff7e + VALIGND $126, (SP), Z9, K2, Z12 // 6273354a0324247e + VALIGND $126, Z6, Z12, K2, Z12 // 62731d4a03e67e + VALIGND $126, Z25, Z12, K2, Z12 // 62131d4a03e17e + VALIGND $126, -7(DI)(R8*1), Z12, K2, Z12 // 62331d4a03a407f9ffffff7e + VALIGND $126, (SP), Z12, K2, Z12 // 62731d4a0324247e + VALIGND $126, Z6, Z9, K2, Z17 // 62e3354a03ce7e + VALIGND $126, Z25, Z9, K2, Z17 // 6283354a03c97e + VALIGND $126, -7(DI)(R8*1), Z9, K2, Z17 // 62a3354a038c07f9ffffff7e + VALIGND $126, (SP), Z9, K2, Z17 // 62e3354a030c247e + VALIGND $126, Z6, Z12, K2, Z17 // 62e31d4a03ce7e + VALIGND $126, Z25, Z12, K2, Z17 // 62831d4a03c97e + VALIGND $126, -7(DI)(R8*1), Z12, K2, Z17 // 62a31d4a038c07f9ffffff7e + VALIGND $126, (SP), Z12, K2, Z17 // 62e31d4a030c247e + VALIGNQ $94, X7, X1, K4, X31 // 6263f50c03ff5e + VALIGNQ $94, 15(R8)(R14*4), X1, K4, X31 // 6203f50c03bcb00f0000005e + VALIGNQ $94, -7(CX)(DX*4), X1, K4, X31 // 6263f50c03bc91f9ffffff5e + VALIGNQ $121, Y0, Y5, K1, Y31 // 6263d52903f879 + VALIGNQ $121, -7(CX), Y5, K1, Y31 // 6263d52903b9f9ffffff79 + VALIGNQ $121, 15(DX)(BX*4), Y5, K1, Y31 // 6263d52903bc9a0f00000079 + VALIGNQ $13, Z3, Z8, K3, Z3 // 62f3bd4b03db0d + VALIGNQ $13, Z27, Z8, K3, Z3 // 6293bd4b03db0d + VALIGNQ $13, -7(CX), Z8, K3, Z3 // 62f3bd4b0399f9ffffff0d + VALIGNQ $13, 15(DX)(BX*4), Z8, K3, Z3 // 62f3bd4b039c9a0f0000000d + VALIGNQ $13, Z3, Z2, K3, Z3 // 62f3ed4b03db0d + VALIGNQ $13, Z27, Z2, K3, Z3 // 6293ed4b03db0d + VALIGNQ $13, -7(CX), Z2, K3, Z3 // 62f3ed4b0399f9ffffff0d + VALIGNQ $13, 15(DX)(BX*4), Z2, K3, Z3 // 62f3ed4b039c9a0f0000000d + VALIGNQ $13, Z3, Z8, K3, Z21 // 62e3bd4b03eb0d + VALIGNQ $13, Z27, Z8, K3, Z21 // 6283bd4b03eb0d + VALIGNQ $13, -7(CX), Z8, K3, Z21 // 62e3bd4b03a9f9ffffff0d + VALIGNQ $13, 15(DX)(BX*4), Z8, K3, Z21 // 62e3bd4b03ac9a0f0000000d + VALIGNQ $13, Z3, Z2, K3, Z21 // 62e3ed4b03eb0d + VALIGNQ $13, Z27, Z2, K3, Z21 // 6283ed4b03eb0d + VALIGNQ $13, -7(CX), Z2, K3, Z21 // 62e3ed4b03a9f9ffffff0d + VALIGNQ $13, 15(DX)(BX*4), Z2, K3, Z21 // 62e3ed4b03ac9a0f0000000d + VBLENDMPD X28, X13, K3, X23 // 6282950b65fc + VBLENDMPD (R14), X13, K3, X23 // 62c2950b653e + VBLENDMPD -7(DI)(R8*8), X13, K3, X23 // 62a2950b65bcc7f9ffffff + VBLENDMPD Y27, Y13, K4, Y2 // 6292952c65d3 + VBLENDMPD (R8), Y13, K4, Y2 // 62d2952c6510 + VBLENDMPD 15(DX)(BX*2), Y13, K4, Y2 // 62f2952c65945a0f000000 + VBLENDMPD Z18, Z13, K2, Z1 // 62b2954a65ca + VBLENDMPD Z8, Z13, K2, Z1 // 62d2954a65c8 + VBLENDMPD (R8), Z13, K2, Z1 // 62d2954a6508 + VBLENDMPD 15(DX)(BX*2), Z13, K2, Z1 // 62f2954a658c5a0f000000 + VBLENDMPD Z18, Z13, K2, Z15 // 6232954a65fa + VBLENDMPD Z8, Z13, K2, Z15 // 6252954a65f8 + VBLENDMPD (R8), Z13, K2, Z15 // 6252954a6538 + VBLENDMPD 15(DX)(BX*2), Z13, K2, Z15 // 6272954a65bc5a0f000000 + VBLENDMPS X15, X9, K2, X24 // 6242350a65c7 + VBLENDMPS 99(R15)(R15*4), X9, K2, X24 // 6202350a6584bf63000000 + VBLENDMPS 15(DX), X9, K2, X24 // 6262350a65820f000000 + VBLENDMPS Y20, Y22, K3, Y15 // 62324d2365fc + VBLENDMPS 17(SP)(BP*1), Y22, K3, Y15 // 62724d2365bc2c11000000 + VBLENDMPS -7(CX)(DX*8), Y22, K3, Y15 // 62724d2365bcd1f9ffffff + VBLENDMPS Z20, Z2, K3, Z22 // 62a26d4b65f4 + VBLENDMPS Z9, Z2, K3, Z22 // 62c26d4b65f1 + VBLENDMPS 17(SP)(BP*1), Z2, K3, Z22 // 62e26d4b65b42c11000000 + VBLENDMPS -7(CX)(DX*8), Z2, K3, Z22 // 62e26d4b65b4d1f9ffffff + VBLENDMPS Z20, Z31, K3, Z22 // 62a2054365f4 + VBLENDMPS Z9, Z31, K3, Z22 // 62c2054365f1 + VBLENDMPS 17(SP)(BP*1), Z31, K3, Z22 // 62e2054365b42c11000000 + VBLENDMPS -7(CX)(DX*8), Z31, K3, Z22 // 62e2054365b4d1f9ffffff + VBLENDMPS Z20, Z2, K3, Z7 // 62b26d4b65fc + VBLENDMPS Z9, Z2, K3, Z7 // 62d26d4b65f9 + VBLENDMPS 17(SP)(BP*1), Z2, K3, Z7 // 62f26d4b65bc2c11000000 + VBLENDMPS -7(CX)(DX*8), Z2, K3, Z7 // 62f26d4b65bcd1f9ffffff + VBLENDMPS Z20, Z31, K3, Z7 // 62b2054365fc + VBLENDMPS Z9, Z31, K3, Z7 // 62d2054365f9 + VBLENDMPS 17(SP)(BP*1), Z31, K3, Z7 // 62f2054365bc2c11000000 + VBLENDMPS -7(CX)(DX*8), Z31, K3, Z7 // 62f2054365bcd1f9ffffff + VBROADCASTF32X4 (CX), K1, Y24 // 62627d291a01 + VBROADCASTF32X4 99(R15), K1, Y24 // 62427d291a8763000000 + VBROADCASTF32X4 99(R15)(R15*2), K2, Z12 // 62127d4a1aa47f63000000 + VBROADCASTF32X4 -7(DI), K2, Z12 // 62727d4a1aa7f9ffffff + VBROADCASTF32X4 99(R15)(R15*2), K2, Z16 // 62827d4a1a847f63000000 + VBROADCASTF32X4 -7(DI), K2, Z16 // 62e27d4a1a87f9ffffff + VBROADCASTF64X4 15(R8)(R14*1), K1, Z3 // 6292fd491b9c300f000000 + VBROADCASTF64X4 15(R8)(R14*2), K1, Z3 // 6292fd491b9c700f000000 + VBROADCASTF64X4 15(R8)(R14*1), K1, Z12 // 6212fd491ba4300f000000 + VBROADCASTF64X4 15(R8)(R14*2), K1, Z12 // 6212fd491ba4700f000000 + VBROADCASTI32X4 -17(BP), K4, Y19 // 62e27d2c5a9defffffff + VBROADCASTI32X4 -15(R14)(R15*8), K4, Y19 // 62827d2c5a9cfef1ffffff + VBROADCASTI32X4 17(SP)(BP*2), K1, Z19 // 62e27d495a9c6c11000000 + VBROADCASTI32X4 -7(DI)(R8*4), K1, Z19 // 62a27d495a9c87f9ffffff + VBROADCASTI32X4 17(SP)(BP*2), K1, Z15 // 62727d495abc6c11000000 + VBROADCASTI32X4 -7(DI)(R8*4), K1, Z15 // 62327d495abc87f9ffffff + VBROADCASTI64X4 99(R15)(R15*4), K7, Z14 // 6212fd4f5bb4bf63000000 + VBROADCASTI64X4 15(DX), K7, Z14 // 6272fd4f5bb20f000000 + VBROADCASTI64X4 99(R15)(R15*4), K7, Z15 // 6212fd4f5bbcbf63000000 + VBROADCASTI64X4 15(DX), K7, Z15 // 6272fd4f5bba0f000000 + VBROADCASTSD X3, K7, Y19 // 62e2fd2f19db + VBROADCASTSD 99(R15)(R15*8), K7, Y19 // 6282fd2f199cff63000000 + VBROADCASTSD 7(AX)(CX*8), K7, Y19 // 62e2fd2f199cc807000000 + VBROADCASTSD X7, K6, Z21 // 62e2fd4e19ef + VBROADCASTSD (AX), K6, Z21 // 62e2fd4e1928 + VBROADCASTSD 7(SI), K6, Z21 // 62e2fd4e19ae07000000 + VBROADCASTSD X7, K6, Z8 // 6272fd4e19c7 + VBROADCASTSD (AX), K6, Z8 // 6272fd4e1900 + VBROADCASTSD 7(SI), K6, Z8 // 6272fd4e198607000000 + VBROADCASTSS X0, K3, X0 // 62f27d0b18c0 + VBROADCASTSS -17(BP)(SI*8), K3, X0 // 62f27d0b1884f5efffffff + VBROADCASTSS (R15), K3, X0 // 62d27d0b1807 + VBROADCASTSS X24, K7, Y14 // 62127d2f18f0 + VBROADCASTSS 7(SI)(DI*8), K7, Y14 // 62727d2f18b4fe07000000 + VBROADCASTSS -15(R14), K7, Y14 // 62527d2f18b6f1ffffff + VBROADCASTSS X20, K4, Z16 // 62a27d4c18c4 + VBROADCASTSS 7(SI)(DI*1), K4, Z16 // 62e27d4c18843e07000000 + VBROADCASTSS 15(DX)(BX*8), K4, Z16 // 62e27d4c1884da0f000000 + VBROADCASTSS X20, K4, Z9 // 62327d4c18cc + VBROADCASTSS 7(SI)(DI*1), K4, Z9 // 62727d4c188c3e07000000 + VBROADCASTSS 15(DX)(BX*8), K4, Z9 // 62727d4c188cda0f000000 + VCMPPD $65, X9, X7, K4, K4 // 62d1c50cc2e141 + VCMPPD $65, -15(R14)(R15*1), X7, K4, K4 // 6291c50cc2a43ef1ffffff41 + VCMPPD $65, -15(BX), X7, K4, K4 // 62f1c50cc2a3f1ffffff41 + VCMPPD $65, X9, X7, K4, K5 // 62d1c50cc2e941 + VCMPPD $65, -15(R14)(R15*1), X7, K4, K5 // 6291c50cc2ac3ef1ffffff41 + VCMPPD $65, -15(BX), X7, K4, K5 // 62f1c50cc2abf1ffffff41 + VCMPPD $67, Y5, Y21, K7, K2 // 62f1d527c2d543 + VCMPPD $67, (CX), Y21, K7, K2 // 62f1d527c21143 + VCMPPD $67, 99(R15), Y21, K7, K2 // 62d1d527c2976300000043 + VCMPPD $67, Y5, Y21, K7, K7 // 62f1d527c2fd43 + VCMPPD $67, (CX), Y21, K7, K7 // 62f1d527c23943 + VCMPPD $67, 99(R15), Y21, K7, K7 // 62d1d527c2bf6300000043 + VCMPPD $127, Z23, Z20, K2, K0 // 62b1dd42c2c77f + VCMPPD $127, Z19, Z20, K2, K0 // 62b1dd42c2c37f + VCMPPD $127, Z23, Z0, K2, K0 // 62b1fd4ac2c77f + VCMPPD $127, Z19, Z0, K2, K0 // 62b1fd4ac2c37f + VCMPPD $127, Z23, Z20, K2, K5 // 62b1dd42c2ef7f + VCMPPD $127, Z19, Z20, K2, K5 // 62b1dd42c2eb7f + VCMPPD $127, Z23, Z0, K2, K5 // 62b1fd4ac2ef7f + VCMPPD $127, Z19, Z0, K2, K5 // 62b1fd4ac2eb7f + VCMPPD $0, Z0, Z0, K5, K6 // 62f1fd4dc2f000 + VCMPPD $0, Z25, Z0, K5, K6 // 6291fd4dc2f100 + VCMPPD $0, -17(BP)(SI*2), Z0, K5, K6 // 62f1fd4dc2b475efffffff00 + VCMPPD $0, 7(AX)(CX*2), Z0, K5, K6 // 62f1fd4dc2b4480700000000 + VCMPPD $0, Z0, Z11, K5, K6 // 62f1a54dc2f000 + VCMPPD $0, Z25, Z11, K5, K6 // 6291a54dc2f100 + VCMPPD $0, -17(BP)(SI*2), Z11, K5, K6 // 62f1a54dc2b475efffffff00 + VCMPPD $0, 7(AX)(CX*2), Z11, K5, K6 // 62f1a54dc2b4480700000000 + VCMPPD $0, Z0, Z0, K5, K5 // 62f1fd4dc2e800 + VCMPPD $0, Z25, Z0, K5, K5 // 6291fd4dc2e900 + VCMPPD $0, -17(BP)(SI*2), Z0, K5, K5 // 62f1fd4dc2ac75efffffff00 + VCMPPD $0, 7(AX)(CX*2), Z0, K5, K5 // 62f1fd4dc2ac480700000000 + VCMPPD $0, Z0, Z11, K5, K5 // 62f1a54dc2e800 + VCMPPD $0, Z25, Z11, K5, K5 // 6291a54dc2e900 + VCMPPD $0, -17(BP)(SI*2), Z11, K5, K5 // 62f1a54dc2ac75efffffff00 + VCMPPD $0, 7(AX)(CX*2), Z11, K5, K5 // 62f1a54dc2ac480700000000 + VCMPPS $97, X14, X7, K3, K1 // 62d1440bc2ce61 + VCMPPS $97, 7(AX)(CX*4), X7, K3, K1 // 62f1440bc28c880700000061 + VCMPPS $97, 7(AX)(CX*1), X7, K3, K1 // 62f1440bc28c080700000061 + VCMPPS $97, X14, X7, K3, K5 // 62d1440bc2ee61 + VCMPPS $97, 7(AX)(CX*4), X7, K3, K5 // 62f1440bc2ac880700000061 + VCMPPS $97, 7(AX)(CX*1), X7, K3, K5 // 62f1440bc2ac080700000061 + VCMPPS $81, Y2, Y16, K4, K3 // 62f17c24c2da51 + VCMPPS $81, 99(R15)(R15*2), Y16, K4, K3 // 62917c24c29c7f6300000051 + VCMPPS $81, -7(DI), Y16, K4, K3 // 62f17c24c29ff9ffffff51 + VCMPPS $81, Y2, Y16, K4, K1 // 62f17c24c2ca51 + VCMPPS $81, 99(R15)(R15*2), Y16, K4, K1 // 62917c24c28c7f6300000051 + VCMPPS $81, -7(DI), Y16, K4, K1 // 62f17c24c28ff9ffffff51 + VCMPPS $42, Z0, Z24, K2, K5 // 62f13c42c2e82a + VCMPPS $42, Z26, Z24, K2, K5 // 62913c42c2ea2a + VCMPPS $42, Z0, Z12, K2, K5 // 62f11c4ac2e82a + VCMPPS $42, Z26, Z12, K2, K5 // 62911c4ac2ea2a + VCMPPS $42, Z0, Z24, K2, K4 // 62f13c42c2e02a + VCMPPS $42, Z26, Z24, K2, K4 // 62913c42c2e22a + VCMPPS $42, Z0, Z12, K2, K4 // 62f11c4ac2e02a + VCMPPS $42, Z26, Z12, K2, K4 // 62911c4ac2e22a + VCMPPS $79, Z9, Z9, K2, K7 // 62d1344ac2f94f + VCMPPS $79, Z25, Z9, K2, K7 // 6291344ac2f94f + VCMPPS $79, 15(R8)(R14*1), Z9, K2, K7 // 6291344ac2bc300f0000004f + VCMPPS $79, 15(R8)(R14*2), Z9, K2, K7 // 6291344ac2bc700f0000004f + VCMPPS $79, Z9, Z3, K2, K7 // 62d1644ac2f94f + VCMPPS $79, Z25, Z3, K2, K7 // 6291644ac2f94f + VCMPPS $79, 15(R8)(R14*1), Z3, K2, K7 // 6291644ac2bc300f0000004f + VCMPPS $79, 15(R8)(R14*2), Z3, K2, K7 // 6291644ac2bc700f0000004f + VCMPPS $79, Z9, Z9, K2, K6 // 62d1344ac2f14f + VCMPPS $79, Z25, Z9, K2, K6 // 6291344ac2f14f + VCMPPS $79, 15(R8)(R14*1), Z9, K2, K6 // 6291344ac2b4300f0000004f + VCMPPS $79, 15(R8)(R14*2), Z9, K2, K6 // 6291344ac2b4700f0000004f + VCMPPS $79, Z9, Z3, K2, K6 // 62d1644ac2f14f + VCMPPS $79, Z25, Z3, K2, K6 // 6291644ac2f14f + VCMPPS $79, 15(R8)(R14*1), Z3, K2, K6 // 6291644ac2b4300f0000004f + VCMPPS $79, 15(R8)(R14*2), Z3, K2, K6 // 6291644ac2b4700f0000004f + VCMPSD $64, X31, X5, K3, K4 // 6291d70bc2e740 + VCMPSD $64, X31, X5, K3, K6 // 6291d70bc2f740 + VCMPSD $27, X21, X3, K3, K0 // 62b1e70bc2c51b or 62b1e72bc2c51b or 62b1e74bc2c51b + VCMPSD $27, (BX), X3, K3, K0 // 62f1e70bc2031b or 62f1e72bc2031b or 62f1e74bc2031b + VCMPSD $27, -17(BP)(SI*1), X3, K3, K0 // 62f1e70bc28435efffffff1b or 62f1e72bc28435efffffff1b or 62f1e74bc28435efffffff1b + VCMPSD $27, X21, X3, K3, K7 // 62b1e70bc2fd1b or 62b1e72bc2fd1b or 62b1e74bc2fd1b + VCMPSD $27, (BX), X3, K3, K7 // 62f1e70bc23b1b or 62f1e72bc23b1b or 62f1e74bc23b1b + VCMPSD $27, -17(BP)(SI*1), X3, K3, K7 // 62f1e70bc2bc35efffffff1b or 62f1e72bc2bc35efffffff1b or 62f1e74bc2bc35efffffff1b + VCMPSS $47, X11, X1, K3, K5 // 62d1760bc2eb2f + VCMPSS $47, X11, X1, K3, K4 // 62d1760bc2e32f + VCMPSS $82, X0, X13, K2, K4 // 62f1160ac2e052 or 62f1162ac2e052 or 62f1164ac2e052 + VCMPSS $82, -7(DI)(R8*1), X13, K2, K4 // 62b1160ac2a407f9ffffff52 or 62b1162ac2a407f9ffffff52 or 62b1164ac2a407f9ffffff52 + VCMPSS $82, (SP), X13, K2, K4 // 62f1160ac2242452 or 62f1162ac2242452 or 62f1164ac2242452 + VCMPSS $82, X0, X13, K2, K6 // 62f1160ac2f052 or 62f1162ac2f052 or 62f1164ac2f052 + VCMPSS $82, -7(DI)(R8*1), X13, K2, K6 // 62b1160ac2b407f9ffffff52 or 62b1162ac2b407f9ffffff52 or 62b1164ac2b407f9ffffff52 + VCMPSS $82, (SP), X13, K2, K6 // 62f1160ac2342452 or 62f1162ac2342452 or 62f1164ac2342452 + VCOMISD X16, X30 // 6221fd082ff0 + VCOMISS X19, X14 // 62317c082ff3 + VCOMPRESSPD X23, K1, X26 // 6282fd098afa + VCOMPRESSPD X23, K1, (SI) // 62e2fd098a3e + VCOMPRESSPD X23, K1, 7(SI)(DI*2) // 62e2fd098abc7e07000000 + VCOMPRESSPD Y20, K2, Y21 // 62a2fd2a8ae5 + VCOMPRESSPD Y20, K2, -7(CX)(DX*1) // 62e2fd2a8aa411f9ffffff + VCOMPRESSPD Y20, K2, -15(R14)(R15*4) // 6282fd2a8aa4bef1ffffff + VCOMPRESSPD Z20, K1, Z9 // 62c2fd498ae1 + VCOMPRESSPD Z0, K1, Z9 // 62d2fd498ac1 + VCOMPRESSPD Z20, K1, Z28 // 6282fd498ae4 + VCOMPRESSPD Z0, K1, Z28 // 6292fd498ac4 + VCOMPRESSPD Z20, K1, (R14) // 62c2fd498a26 + VCOMPRESSPD Z0, K1, (R14) // 62d2fd498a06 + VCOMPRESSPD Z20, K1, -7(DI)(R8*8) // 62a2fd498aa4c7f9ffffff + VCOMPRESSPD Z0, K1, -7(DI)(R8*8) // 62b2fd498a84c7f9ffffff + VCOMPRESSPS X16, K7, X12 // 62c27d0f8ac4 + VCOMPRESSPS X16, K7, 17(SP)(BP*8) // 62e27d0f8a84ec11000000 + VCOMPRESSPS X16, K7, 17(SP)(BP*4) // 62e27d0f8a84ac11000000 + VCOMPRESSPS Y31, K1, Y6 // 62627d298afe + VCOMPRESSPS Y31, K1, 15(DX)(BX*1) // 62627d298abc1a0f000000 + VCOMPRESSPS Y31, K1, -7(CX)(DX*2) // 62627d298abc51f9ffffff + VCOMPRESSPS Z17, K1, Z17 // 62a27d498ac9 + VCOMPRESSPS Z23, K1, Z17 // 62a27d498af9 + VCOMPRESSPS Z17, K1, Z0 // 62e27d498ac8 + VCOMPRESSPS Z23, K1, Z0 // 62e27d498af8 + VCOMPRESSPS Z17, K1, 99(R15)(R15*4) // 62827d498a8cbf63000000 + VCOMPRESSPS Z23, K1, 99(R15)(R15*4) // 62827d498abcbf63000000 + VCOMPRESSPS Z17, K1, 15(DX) // 62e27d498a8a0f000000 + VCOMPRESSPS Z23, K1, 15(DX) // 62e27d498aba0f000000 + VCVTDQ2PD X23, K1, X23 // 62a17e09e6ff + VCVTDQ2PD 7(SI)(DI*4), K1, X23 // 62e17e09e6bcbe07000000 + VCVTDQ2PD -7(DI)(R8*2), K1, X23 // 62a17e09e6bc47f9ffffff + VCVTDQ2PD X11, K7, Y6 // 62d17e2fe6f3 + VCVTDQ2PD -17(BP), K7, Y6 // 62f17e2fe6b5efffffff + VCVTDQ2PD -15(R14)(R15*8), K7, Y6 // 62917e2fe6b4fef1ffffff + VCVTDQ2PD Y11, K2, Z31 // 62417e4ae6fb + VCVTDQ2PD (CX), K2, Z31 // 62617e4ae639 + VCVTDQ2PD 99(R15), K2, Z31 // 62417e4ae6bf63000000 + VCVTDQ2PD Y11, K2, Z0 // 62d17e4ae6c3 + VCVTDQ2PD (CX), K2, Z0 // 62f17e4ae601 + VCVTDQ2PD 99(R15), K2, Z0 // 62d17e4ae68763000000 + VCVTDQ2PS X24, K4, X31 // 62017c0c5bf8 + VCVTDQ2PS 17(SP), K4, X31 // 62617c0c5bbc2411000000 + VCVTDQ2PS -17(BP)(SI*4), K4, X31 // 62617c0c5bbcb5efffffff + VCVTDQ2PS Y7, K1, Y19 // 62e17c295bdf + VCVTDQ2PS 17(SP)(BP*2), K1, Y19 // 62e17c295b9c6c11000000 + VCVTDQ2PS -7(DI)(R8*4), K1, Y19 // 62a17c295b9c87f9ffffff + VCVTDQ2PS Z6, K3, Z21 // 62e17c4b5bee + VCVTDQ2PS Z9, K3, Z21 // 62c17c4b5be9 + VCVTDQ2PS Z6, K3, Z9 // 62717c4b5bce + VCVTDQ2PS Z9, K3, Z9 // 62517c4b5bc9 + VCVTDQ2PS Z20, K4, Z1 // 62b17c4c5bcc + VCVTDQ2PS Z9, K4, Z1 // 62d17c4c5bc9 + VCVTDQ2PS 99(R15)(R15*2), K4, Z1 // 62917c4c5b8c7f63000000 + VCVTDQ2PS -7(DI), K4, Z1 // 62f17c4c5b8ff9ffffff + VCVTDQ2PS Z20, K4, Z9 // 62317c4c5bcc + VCVTDQ2PS Z9, K4, Z9 // 62517c4c5bc9 + VCVTDQ2PS 99(R15)(R15*2), K4, Z9 // 62117c4c5b8c7f63000000 + VCVTDQ2PS -7(DI), K4, Z9 // 62717c4c5b8ff9ffffff + VCVTPD2DQ Z30, K5, Y6 // 6291ff4de6f6 + VCVTPD2DQ Z5, K5, Y6 // 62f1ff4de6f5 + VCVTPD2DQ Z26, K7, Y0 // 6291ff4fe6c2 + VCVTPD2DQ Z22, K7, Y0 // 62b1ff4fe6c6 + VCVTPD2DQ -7(CX)(DX*1), K7, Y0 // 62f1ff4fe68411f9ffffff + VCVTPD2DQ -15(R14)(R15*4), K7, Y0 // 6291ff4fe684bef1ffffff + VCVTPD2DQX X0, K7, X14 // 6271ff0fe6f0 + VCVTPD2DQX 7(AX), K7, X14 // 6271ff0fe6b007000000 + VCVTPD2DQX (DI), K7, X14 // 6271ff0fe637 + VCVTPD2DQY Y3, K6, X11 // 6271ff2ee6db + VCVTPD2DQY 15(R8), K6, X11 // 6251ff2ee6980f000000 + VCVTPD2DQY (BP), K6, X11 // 6271ff2ee65d00 + VCVTPD2PS Z7, K3, Y5 // 62f1fd4b5aef + VCVTPD2PS Z21, K3, Y5 // 62b1fd4b5aed + VCVTPD2PS Z16, K7, Y20 // 62a1fd4f5ae0 + VCVTPD2PS Z25, K7, Y20 // 6281fd4f5ae1 + VCVTPD2PS 15(DX)(BX*1), K7, Y20 // 62e1fd4f5aa41a0f000000 + VCVTPD2PS -7(CX)(DX*2), K7, Y20 // 62e1fd4f5aa451f9ffffff + VCVTPD2PSX X2, K4, X23 // 62e1fd0c5afa + VCVTPD2PSX 99(R15)(R15*1), K4, X23 // 6281fd0c5abc3f63000000 + VCVTPD2PSX (DX), K4, X23 // 62e1fd0c5a3a + VCVTPD2PSY Y12, K4, X20 // 62c1fd2c5ae4 + VCVTPD2PSY 15(R8)(R14*8), K4, X20 // 6281fd2c5aa4f00f000000 + VCVTPD2PSY -15(R14)(R15*2), K4, X20 // 6281fd2c5aa47ef1ffffff + VCVTPD2UDQ Z27, K4, Y28 // 6201fc4c79e3 + VCVTPD2UDQ Z25, K4, Y28 // 6201fc4c79e1 + VCVTPD2UDQ Z23, K2, Y7 // 62b1fc4a79ff + VCVTPD2UDQ Z9, K2, Y7 // 62d1fc4a79f9 + VCVTPD2UDQ 17(SP)(BP*2), K2, Y7 // 62f1fc4a79bc6c11000000 + VCVTPD2UDQ -7(DI)(R8*4), K2, Y7 // 62b1fc4a79bc87f9ffffff + VCVTPD2UDQX X9, K2, X0 // 62d1fc0a79c1 + VCVTPD2UDQX 7(SI)(DI*8), K2, X0 // 62f1fc0a7984fe07000000 + VCVTPD2UDQX -15(R14), K2, X0 // 62d1fc0a7986f1ffffff + VCVTPD2UDQY Y0, K3, X13 // 6271fc2b79e8 + VCVTPD2UDQY 7(AX)(CX*4), K3, X13 // 6271fc2b79ac8807000000 + VCVTPD2UDQY 7(AX)(CX*1), K3, X13 // 6271fc2b79ac0807000000 + VCVTPH2PS X9, K2, Y12 // 62527d2a13e1 + VCVTPH2PS -7(DI)(R8*1), K2, Y12 // 62327d2a13a407f9ffffff + VCVTPH2PS (SP), K2, Y12 // 62727d2a132424 + VCVTPH2PS X31, K1, X2 // 62927d0913d7 + VCVTPH2PS (R8), K1, X2 // 62d27d091310 + VCVTPH2PS 15(DX)(BX*2), K1, X2 // 62f27d0913945a0f000000 + VCVTPH2PS Y1, K7, Z22 // 62e27d4f13f1 + VCVTPH2PS Y1, K7, Z25 // 62627d4f13c9 + VCVTPH2PS Y14, K1, Z1 // 62d27d4913ce + VCVTPH2PS 17(SP)(BP*8), K1, Z1 // 62f27d49138cec11000000 + VCVTPH2PS 17(SP)(BP*4), K1, Z1 // 62f27d49138cac11000000 + VCVTPH2PS Y14, K1, Z16 // 62c27d4913c6 + VCVTPH2PS 17(SP)(BP*8), K1, Z16 // 62e27d491384ec11000000 + VCVTPH2PS 17(SP)(BP*4), K1, Z16 // 62e27d491384ac11000000 + VCVTPS2DQ X22, K1, X11 // 62317d095bde + VCVTPS2DQ -7(CX), K1, X11 // 62717d095b99f9ffffff + VCVTPS2DQ 15(DX)(BX*4), K1, X11 // 62717d095b9c9a0f000000 + VCVTPS2DQ Y7, K1, Y17 // 62e17d295bcf + VCVTPS2DQ 7(SI)(DI*4), K1, Y17 // 62e17d295b8cbe07000000 + VCVTPS2DQ -7(DI)(R8*2), K1, Y17 // 62a17d295b8c47f9ffffff + VCVTPS2DQ Z0, K7, Z6 // 62f17d4f5bf0 + VCVTPS2DQ Z8, K7, Z6 // 62d17d4f5bf0 + VCVTPS2DQ Z0, K7, Z2 // 62f17d4f5bd0 + VCVTPS2DQ Z8, K7, Z2 // 62d17d4f5bd0 + VCVTPS2DQ Z14, K2, Z15 // 62517d4a5bfe + VCVTPS2DQ Z27, K2, Z15 // 62117d4a5bfb + VCVTPS2DQ 15(R8)(R14*8), K2, Z15 // 62117d4a5bbcf00f000000 + VCVTPS2DQ -15(R14)(R15*2), K2, Z15 // 62117d4a5bbc7ef1ffffff + VCVTPS2DQ Z14, K2, Z12 // 62517d4a5be6 + VCVTPS2DQ Z27, K2, Z12 // 62117d4a5be3 + VCVTPS2DQ 15(R8)(R14*8), K2, Z12 // 62117d4a5ba4f00f000000 + VCVTPS2DQ -15(R14)(R15*2), K2, Z12 // 62117d4a5ba47ef1ffffff + VCVTPS2PD X14, K4, X5 // 62d17c0c5aee + VCVTPS2PD 99(R15)(R15*8), K4, X5 // 62917c0c5aacff63000000 + VCVTPS2PD 7(AX)(CX*8), K4, X5 // 62f17c0c5aacc807000000 + VCVTPS2PD X0, K1, Y9 // 62717c295ac8 + VCVTPS2PD 17(SP), K1, Y9 // 62717c295a8c2411000000 + VCVTPS2PD -17(BP)(SI*4), K1, Y9 // 62717c295a8cb5efffffff + VCVTPS2PD Y31, K3, Z11 // 62117c4b5adf + VCVTPS2PD Y31, K3, Z5 // 62917c4b5aef + VCVTPS2PD Y8, K4, Z13 // 62517c4c5ae8 + VCVTPS2PD -15(R14)(R15*1), K4, Z13 // 62117c4c5aac3ef1ffffff + VCVTPS2PD -15(BX), K4, Z13 // 62717c4c5aabf1ffffff + VCVTPS2PD Y8, K4, Z14 // 62517c4c5af0 + VCVTPS2PD -15(R14)(R15*1), K4, Z14 // 62117c4c5ab43ef1ffffff + VCVTPS2PD -15(BX), K4, Z14 // 62717c4c5ab3f1ffffff + VCVTPS2PH $126, X7, K5, X17 // 62b37d0d1df97e + VCVTPS2PH $126, X7, K5, 17(SP)(BP*1) // 62f37d0d1dbc2c110000007e + VCVTPS2PH $126, X7, K5, -7(CX)(DX*8) // 62f37d0d1dbcd1f9ffffff7e + VCVTPS2PH $94, Y1, K7, X15 // 62d37d2f1dcf5e + VCVTPS2PH $94, Y1, K7, (AX) // 62f37d2f1d085e + VCVTPS2PH $94, Y1, K7, 7(SI) // 62f37d2f1d8e070000005e + VCVTPS2PH $121, Z5, K7, Y28 // 62937d4f1dec79 + VCVTPS2PH $121, Z23, K7, Y28 // 62837d4f1dfc79 + VCVTPS2PH $121, Z5, K7, 7(AX) // 62f37d4f1da80700000079 + VCVTPS2PH $121, Z23, K7, 7(AX) // 62e37d4f1db80700000079 + VCVTPS2PH $121, Z5, K7, (DI) // 62f37d4f1d2f79 + VCVTPS2PH $121, Z23, K7, (DI) // 62e37d4f1d3f79 + VCVTPS2PH $13, Z2, K6, Y13 // 62d37d4e1dd50d + VCVTPS2UDQ X27, K7, X8 // 62117c0f79c3 + VCVTPS2UDQ 15(R8)(R14*4), K7, X8 // 62117c0f7984b00f000000 + VCVTPS2UDQ -7(CX)(DX*4), K7, X8 // 62717c0f798491f9ffffff + VCVTPS2UDQ Y9, K2, Y12 // 62517c2a79e1 + VCVTPS2UDQ -17(BP)(SI*8), K2, Y12 // 62717c2a79a4f5efffffff + VCVTPS2UDQ (R15), K2, Y12 // 62517c2a7927 + VCVTPS2UDQ Z13, K5, Z28 // 62417c4d79e5 + VCVTPS2UDQ Z21, K5, Z28 // 62217c4d79e5 + VCVTPS2UDQ Z13, K5, Z6 // 62d17c4d79f5 + VCVTPS2UDQ Z21, K5, Z6 // 62b17c4d79f5 + VCVTPS2UDQ Z3, K3, Z26 // 62617c4b79d3 + VCVTPS2UDQ Z0, K3, Z26 // 62617c4b79d0 + VCVTPS2UDQ (SI), K3, Z26 // 62617c4b7916 + VCVTPS2UDQ 7(SI)(DI*2), K3, Z26 // 62617c4b79947e07000000 + VCVTPS2UDQ Z3, K3, Z3 // 62f17c4b79db + VCVTPS2UDQ Z0, K3, Z3 // 62f17c4b79d8 + VCVTPS2UDQ (SI), K3, Z3 // 62f17c4b791e + VCVTPS2UDQ 7(SI)(DI*2), K3, Z3 // 62f17c4b799c7e07000000 + VCVTSD2SI X24, R14 // 62117f082df0 or 62117f282df0 or 62117f482df0 + VCVTSD2SI X24, AX // 62917f082dc0 or 62917f282dc0 or 62917f482dc0 + VCVTSD2SS X11, X1, K1, X22 // 62c1f7095af3 + VCVTSD2SS X8, X7, K1, X6 // 62d1c7095af0 or 62d1c7295af0 or 62d1c7495af0 + VCVTSD2SS (R14), X7, K1, X6 // 62d1c7095a36 or 62d1c7295a36 or 62d1c7495a36 + VCVTSD2SS -7(DI)(R8*8), X7, K1, X6 // 62b1c7095ab4c7f9ffffff or 62b1c7295ab4c7f9ffffff or 62b1c7495ab4c7f9ffffff + VCVTSD2USIL X31, R9 // 62117f0879cf + VCVTSD2USIL X31, CX // 62917f0879cf + VCVTSD2USIL X3, SP // 62f17f0879e3 or 62f17f2879e3 or 62f17f4879e3 + VCVTSD2USIL 99(R15)(R15*4), SP // 62917f0879a4bf63000000 or 62917f2879a4bf63000000 or 62917f4879a4bf63000000 + VCVTSD2USIL 15(DX), SP // 62f17f0879a20f000000 or 62f17f2879a20f000000 or 62f17f4879a20f000000 + VCVTSD2USIL X3, R14 // 62717f0879f3 or 62717f2879f3 or 62717f4879f3 + VCVTSD2USIL 99(R15)(R15*4), R14 // 62117f0879b4bf63000000 or 62117f2879b4bf63000000 or 62117f4879b4bf63000000 + VCVTSD2USIL 15(DX), R14 // 62717f0879b20f000000 or 62717f2879b20f000000 or 62717f4879b20f000000 + VCVTSD2USIQ X28, R10 // 6211ff0879d4 + VCVTSD2USIQ X28, CX // 6291ff0879cc + VCVTSD2USIQ X20, R9 // 6231ff0879cc or 6231ff2879cc or 6231ff4879cc + VCVTSD2USIQ (CX), R9 // 6271ff087909 or 6271ff287909 or 6271ff487909 + VCVTSD2USIQ 99(R15), R9 // 6251ff08798f63000000 or 6251ff28798f63000000 or 6251ff48798f63000000 + VCVTSD2USIQ X20, R13 // 6231ff0879ec or 6231ff2879ec or 6231ff4879ec + VCVTSD2USIQ (CX), R13 // 6271ff087929 or 6271ff287929 or 6271ff487929 + VCVTSD2USIQ 99(R15), R13 // 6251ff0879af63000000 or 6251ff2879af63000000 or 6251ff4879af63000000 + VCVTSI2SDL AX, X7, X24 // 626147082ac0 or 626147282ac0 or 626147482ac0 + VCVTSI2SDL R9, X7, X24 // 624147082ac1 or 624147282ac1 or 624147482ac1 + VCVTSI2SDL 99(R15)(R15*8), X7, X24 // 620147082a84ff63000000 or 620147282a84ff63000000 or 620147482a84ff63000000 + VCVTSI2SDL 7(AX)(CX*8), X7, X24 // 626147082a84c807000000 or 626147282a84c807000000 or 626147482a84c807000000 + VCVTSI2SDQ DX, X16, X20 // 62e1ff002ae2 or 62e1ff202ae2 or 62e1ff402ae2 + VCVTSI2SDQ BP, X16, X20 // 62e1ff002ae5 or 62e1ff202ae5 or 62e1ff402ae5 + VCVTSI2SDQ 99(R15)(R15*2), X16, X20 // 6281ff002aa47f63000000 or 6281ff202aa47f63000000 or 6281ff402aa47f63000000 + VCVTSI2SDQ -7(DI), X16, X20 // 62e1ff002aa7f9ffffff or 62e1ff202aa7f9ffffff or 62e1ff402aa7f9ffffff + VCVTSI2SSL CX, X28, X17 // 62e11e002ac9 or 62e11e202ac9 or 62e11e402ac9 + VCVTSI2SSL SP, X28, X17 // 62e11e002acc or 62e11e202acc or 62e11e402acc + VCVTSI2SSL (AX), X28, X17 // 62e11e002a08 or 62e11e202a08 or 62e11e402a08 + VCVTSI2SSL 7(SI), X28, X17 // 62e11e002a8e07000000 or 62e11e202a8e07000000 or 62e11e402a8e07000000 + VCVTSS2SD X6, X16, K7, X11 // 62717e075ade + VCVTSS2SD X12, X22, K2, X6 // 62d14e025af4 or 62d14e225af4 or 62d14e425af4 + VCVTSS2SD (BX), X22, K2, X6 // 62f14e025a33 or 62f14e225a33 or 62f14e425a33 + VCVTSS2SD -17(BP)(SI*1), X22, K2, X6 // 62f14e025ab435efffffff or 62f14e225ab435efffffff or 62f14e425ab435efffffff + VCVTSS2SI X16, R9 // 62317e082dc8 + VCVTSS2SI X16, CX // 62b17e082dc8 + VCVTSS2SI X28, SP // 62917e082de4 or 62917e282de4 or 62917e482de4 + VCVTSS2SI X28, R14 // 62117e082df4 or 62117e282df4 or 62117e482df4 + VCVTSS2USIL X11, AX // 62d17e0879c3 + VCVTSS2USIL X11, R9 // 62517e0879cb + VCVTSS2USIL X1, CX // 62f17e0879c9 or 62f17e2879c9 or 62f17e4879c9 + VCVTSS2USIL 17(SP)(BP*1), CX // 62f17e08798c2c11000000 or 62f17e28798c2c11000000 or 62f17e48798c2c11000000 + VCVTSS2USIL -7(CX)(DX*8), CX // 62f17e08798cd1f9ffffff or 62f17e28798cd1f9ffffff or 62f17e48798cd1f9ffffff + VCVTSS2USIL X1, SP // 62f17e0879e1 or 62f17e2879e1 or 62f17e4879e1 + VCVTSS2USIL 17(SP)(BP*1), SP // 62f17e0879a42c11000000 or 62f17e2879a42c11000000 or 62f17e4879a42c11000000 + VCVTSS2USIL -7(CX)(DX*8), SP // 62f17e0879a4d1f9ffffff or 62f17e2879a4d1f9ffffff or 62f17e4879a4d1f9ffffff + VCVTSS2USIQ X19, DX // 62b1fe0879d3 + VCVTSS2USIQ X19, BP // 62b1fe0879eb + VCVTSS2USIQ X13, R10 // 6251fe0879d5 or 6251fe2879d5 or 6251fe4879d5 + VCVTSS2USIQ -17(BP)(SI*2), R10 // 6271fe08799475efffffff or 6271fe28799475efffffff or 6271fe48799475efffffff + VCVTSS2USIQ 7(AX)(CX*2), R10 // 6271fe0879944807000000 or 6271fe2879944807000000 or 6271fe4879944807000000 + VCVTSS2USIQ X13, CX // 62d1fe0879cd or 62d1fe2879cd or 62d1fe4879cd + VCVTSS2USIQ -17(BP)(SI*2), CX // 62f1fe08798c75efffffff or 62f1fe28798c75efffffff or 62f1fe48798c75efffffff + VCVTSS2USIQ 7(AX)(CX*2), CX // 62f1fe08798c4807000000 or 62f1fe28798c4807000000 or 62f1fe48798c4807000000 + VCVTTPD2DQ Z16, K4, Y30 // 6221fd4ce6f0 + VCVTTPD2DQ Z13, K4, Y30 // 6241fd4ce6f5 + VCVTTPD2DQ Z12, K1, Y26 // 6241fd49e6d4 + VCVTTPD2DQ Z27, K1, Y26 // 6201fd49e6d3 + VCVTTPD2DQ 7(AX), K1, Y26 // 6261fd49e69007000000 + VCVTTPD2DQ (DI), K1, Y26 // 6261fd49e617 + VCVTTPD2DQX X14, K3, X2 // 62d1fd0be6d6 + VCVTTPD2DQX 15(R8)(R14*1), K3, X2 // 6291fd0be694300f000000 + VCVTTPD2DQX 15(R8)(R14*2), K3, X2 // 6291fd0be694700f000000 + VCVTTPD2DQY Y7, K4, X0 // 62f1fd2ce6c7 + VCVTTPD2DQY -7(CX), K4, X0 // 62f1fd2ce681f9ffffff + VCVTTPD2DQY 15(DX)(BX*4), K4, X0 // 62f1fd2ce6849a0f000000 + VCVTTPD2UDQ Z9, K3, Y30 // 6241fc4b78f1 + VCVTTPD2UDQ Z12, K3, Y30 // 6241fc4b78f4 + VCVTTPD2UDQ Z6, K7, Y31 // 6261fc4f78fe + VCVTTPD2UDQ Z25, K7, Y31 // 6201fc4f78f9 + VCVTTPD2UDQ -17(BP)(SI*8), K7, Y31 // 6261fc4f78bcf5efffffff + VCVTTPD2UDQ (R15), K7, Y31 // 6241fc4f783f + VCVTTPD2UDQX X17, K4, X11 // 6231fc0c78d9 + VCVTTPD2UDQX 99(R15)(R15*4), K4, X11 // 6211fc0c789cbf63000000 + VCVTTPD2UDQX 15(DX), K4, X11 // 6271fc0c789a0f000000 + VCVTTPD2UDQY Y22, K4, X18 // 62a1fc2c78d6 + VCVTTPD2UDQY (AX), K4, X18 // 62e1fc2c7810 + VCVTTPD2UDQY 7(SI), K4, X18 // 62e1fc2c789607000000 + VCVTTPS2DQ X24, K4, X2 // 62917e0c5bd0 + VCVTTPS2DQ 99(R15)(R15*2), K4, X2 // 62917e0c5b947f63000000 + VCVTTPS2DQ -7(DI), K4, X2 // 62f17e0c5b97f9ffffff + VCVTTPS2DQ Y0, K2, Y7 // 62f17e2a5bf8 + VCVTTPS2DQ 15(R8)(R14*4), K2, Y7 // 62917e2a5bbcb00f000000 + VCVTTPS2DQ -7(CX)(DX*4), K2, Y7 // 62f17e2a5bbc91f9ffffff + VCVTTPS2DQ Z20, K2, Z0 // 62b17e4a5bc4 + VCVTTPS2DQ Z28, K2, Z0 // 62917e4a5bc4 + VCVTTPS2DQ Z20, K2, Z6 // 62b17e4a5bf4 + VCVTTPS2DQ Z28, K2, Z6 // 62917e4a5bf4 + VCVTTPS2DQ Z9, K3, Z3 // 62d17e4b5bd9 + VCVTTPS2DQ Z19, K3, Z3 // 62b17e4b5bdb + VCVTTPS2DQ 7(SI)(DI*1), K3, Z3 // 62f17e4b5b9c3e07000000 + VCVTTPS2DQ 15(DX)(BX*8), K3, Z3 // 62f17e4b5b9cda0f000000 + VCVTTPS2DQ Z9, K3, Z30 // 62417e4b5bf1 + VCVTTPS2DQ Z19, K3, Z30 // 62217e4b5bf3 + VCVTTPS2DQ 7(SI)(DI*1), K3, Z30 // 62617e4b5bb43e07000000 + VCVTTPS2DQ 15(DX)(BX*8), K3, Z30 // 62617e4b5bb4da0f000000 + VCVTTPS2UDQ X22, K2, X26 // 62217c0a78d6 + VCVTTPS2UDQ 15(DX)(BX*1), K2, X26 // 62617c0a78941a0f000000 + VCVTTPS2UDQ -7(CX)(DX*2), K2, X26 // 62617c0a789451f9ffffff + VCVTTPS2UDQ Y13, K1, Y24 // 62417c2978c5 + VCVTTPS2UDQ 17(SP)(BP*1), K1, Y24 // 62617c2978842c11000000 + VCVTTPS2UDQ -7(CX)(DX*8), K1, Y24 // 62617c297884d1f9ffffff + VCVTTPS2UDQ Z2, K7, Z18 // 62e17c4f78d2 + VCVTTPS2UDQ Z21, K7, Z18 // 62a17c4f78d5 + VCVTTPS2UDQ Z2, K7, Z24 // 62617c4f78c2 + VCVTTPS2UDQ Z21, K7, Z24 // 62217c4f78c5 + VCVTTPS2UDQ Z6, K1, Z7 // 62f17c4978fe + VCVTTPS2UDQ Z16, K1, Z7 // 62b17c4978f8 + VCVTTPS2UDQ -7(CX), K1, Z7 // 62f17c4978b9f9ffffff + VCVTTPS2UDQ 15(DX)(BX*4), K1, Z7 // 62f17c4978bc9a0f000000 + VCVTTPS2UDQ Z6, K1, Z13 // 62717c4978ee + VCVTTPS2UDQ Z16, K1, Z13 // 62317c4978e8 + VCVTTPS2UDQ -7(CX), K1, Z13 // 62717c4978a9f9ffffff + VCVTTPS2UDQ 15(DX)(BX*4), K1, Z13 // 62717c4978ac9a0f000000 + VCVTTSD2SI X30, R9 // 62117f082cce or 62117f282cce or 62117f482cce + VCVTTSD2SI X30, CX // 62917f082cce or 62917f282cce or 62917f482cce + VCVTTSD2USIL X12, SP // 62d17f0878e4 + VCVTTSD2USIL X12, R14 // 62517f0878f4 + VCVTTSD2USIL X23, AX // 62b17f0878c7 or 62b17f2878c7 or 62b17f4878c7 + VCVTTSD2USIL 17(SP)(BP*2), AX // 62f17f0878846c11000000 or 62f17f2878846c11000000 or 62f17f4878846c11000000 + VCVTTSD2USIL -7(DI)(R8*4), AX // 62b17f08788487f9ffffff or 62b17f28788487f9ffffff or 62b17f48788487f9ffffff + VCVTTSD2USIL X23, R9 // 62317f0878cf or 62317f2878cf or 62317f4878cf + VCVTTSD2USIL 17(SP)(BP*2), R9 // 62717f08788c6c11000000 or 62717f28788c6c11000000 or 62717f48788c6c11000000 + VCVTTSD2USIL -7(DI)(R8*4), R9 // 62317f08788c87f9ffffff or 62317f28788c87f9ffffff or 62317f48788c87f9ffffff + VCVTTSD2USIQ X30, R10 // 6211ff0878d6 + VCVTTSD2USIQ X30, CX // 6291ff0878ce + VCVTTSD2USIQ X8, R9 // 6251ff0878c8 or 6251ff2878c8 or 6251ff4878c8 + VCVTTSD2USIQ 15(R8), R9 // 6251ff0878880f000000 or 6251ff2878880f000000 or 6251ff4878880f000000 + VCVTTSD2USIQ (BP), R9 // 6271ff08784d00 or 6271ff28784d00 or 6271ff48784d00 + VCVTTSD2USIQ X8, R13 // 6251ff0878e8 or 6251ff2878e8 or 6251ff4878e8 + VCVTTSD2USIQ 15(R8), R13 // 6251ff0878a80f000000 or 6251ff2878a80f000000 or 6251ff4878a80f000000 + VCVTTSD2USIQ (BP), R13 // 6271ff08786d00 or 6271ff28786d00 or 6271ff48786d00 + VCVTTSS2SI X20, CX // 62b17e082ccc + VCVTTSS2SI X20, SP // 62b17e082ce4 + VCVTTSS2SIQ X26, R10 // 6211fe082cd2 or 6211fe282cd2 or 6211fe482cd2 + VCVTTSS2SIQ X26, CX // 6291fe082cca or 6291fe282cca or 6291fe482cca + VCVTTSS2USIL X19, R9 // 62317e0878cb + VCVTTSS2USIL X19, CX // 62b17e0878cb + VCVTTSS2USIL X0, SP // 62f17e0878e0 or 62f17e2878e0 or 62f17e4878e0 + VCVTTSS2USIL 99(R15)(R15*4), SP // 62917e0878a4bf63000000 or 62917e2878a4bf63000000 or 62917e4878a4bf63000000 + VCVTTSS2USIL 15(DX), SP // 62f17e0878a20f000000 or 62f17e2878a20f000000 or 62f17e4878a20f000000 + VCVTTSS2USIL X0, R14 // 62717e0878f0 or 62717e2878f0 or 62717e4878f0 + VCVTTSS2USIL 99(R15)(R15*4), R14 // 62117e0878b4bf63000000 or 62117e2878b4bf63000000 or 62117e4878b4bf63000000 + VCVTTSS2USIL 15(DX), R14 // 62717e0878b20f000000 or 62717e2878b20f000000 or 62717e4878b20f000000 + VCVTTSS2USIQ X31, R9 // 6211fe0878cf + VCVTTSS2USIQ X31, R13 // 6211fe0878ef + VCVTTSS2USIQ X16, DX // 62b1fe0878d0 or 62b1fe2878d0 or 62b1fe4878d0 + VCVTTSS2USIQ (CX), DX // 62f1fe087811 or 62f1fe287811 or 62f1fe487811 + VCVTTSS2USIQ 99(R15), DX // 62d1fe08789763000000 or 62d1fe28789763000000 or 62d1fe48789763000000 + VCVTTSS2USIQ X16, BP // 62b1fe0878e8 or 62b1fe2878e8 or 62b1fe4878e8 + VCVTTSS2USIQ (CX), BP // 62f1fe087829 or 62f1fe287829 or 62f1fe487829 + VCVTTSS2USIQ 99(R15), BP // 62d1fe0878af63000000 or 62d1fe2878af63000000 or 62d1fe4878af63000000 + VCVTUDQ2PD X8, K4, X7 // 62d17e0c7af8 + VCVTUDQ2PD 17(SP)(BP*2), K4, X7 // 62f17e0c7abc6c11000000 + VCVTUDQ2PD -7(DI)(R8*4), K4, X7 // 62b17e0c7abc87f9ffffff + VCVTUDQ2PD X1, K1, Y1 // 62f17e297ac9 + VCVTUDQ2PD 15(R8)(R14*1), K1, Y1 // 62917e297a8c300f000000 + VCVTUDQ2PD 15(R8)(R14*2), K1, Y1 // 62917e297a8c700f000000 + VCVTUDQ2PD Y26, K3, Z13 // 62117e4b7aea + VCVTUDQ2PD (AX), K3, Z13 // 62717e4b7a28 + VCVTUDQ2PD 7(SI), K3, Z13 // 62717e4b7aae07000000 + VCVTUDQ2PS X15, K4, X0 // 62d17f0c7ac7 + VCVTUDQ2PS 15(R8), K4, X0 // 62d17f0c7a800f000000 + VCVTUDQ2PS (BP), K4, X0 // 62f17f0c7a4500 + VCVTUDQ2PS Y12, K5, Y30 // 62417f2d7af4 + VCVTUDQ2PS (R14), K5, Y30 // 62417f2d7a36 + VCVTUDQ2PS -7(DI)(R8*8), K5, Y30 // 62217f2d7ab4c7f9ffffff + VCVTUDQ2PS Z22, K7, Z18 // 62a17f4f7ad6 + VCVTUDQ2PS Z7, K7, Z18 // 62e17f4f7ad7 + VCVTUDQ2PS Z22, K7, Z8 // 62317f4f7ac6 + VCVTUDQ2PS Z7, K7, Z8 // 62717f4f7ac7 + VCVTUDQ2PS Z20, K7, Z2 // 62b17f4f7ad4 + VCVTUDQ2PS Z9, K7, Z2 // 62d17f4f7ad1 + VCVTUDQ2PS (BX), K7, Z2 // 62f17f4f7a13 + VCVTUDQ2PS -17(BP)(SI*1), K7, Z2 // 62f17f4f7a9435efffffff + VCVTUDQ2PS Z20, K7, Z31 // 62217f4f7afc + VCVTUDQ2PS Z9, K7, Z31 // 62417f4f7af9 + VCVTUDQ2PS (BX), K7, Z31 // 62617f4f7a3b + VCVTUDQ2PS -17(BP)(SI*1), K7, Z31 // 62617f4f7abc35efffffff + VCVTUSI2SDL AX, X7, X22 // 62e147087bf0 or 62e147287bf0 or 62e147487bf0 + VCVTUSI2SDL R9, X7, X22 // 62c147087bf1 or 62c147287bf1 or 62c147487bf1 + VCVTUSI2SDL 99(R15)(R15*2), X7, X22 // 628147087bb47f63000000 or 628147287bb47f63000000 or 628147487bb47f63000000 + VCVTUSI2SDL -7(DI), X7, X22 // 62e147087bb7f9ffffff or 62e147287bb7f9ffffff or 62e147487bb7f9ffffff + VCVTUSI2SDQ R10, X7, X19 // 62c1c7087bda or 62c1c7287bda or 62c1c7487bda + VCVTUSI2SDQ CX, X7, X19 // 62e1c7087bd9 or 62e1c7287bd9 or 62e1c7487bd9 + VCVTUSI2SDQ 15(R8)(R14*8), X7, X19 // 6281c7087b9cf00f000000 or 6281c7287b9cf00f000000 or 6281c7487b9cf00f000000 + VCVTUSI2SDQ -15(R14)(R15*2), X7, X19 // 6281c7087b9c7ef1ffffff or 6281c7287b9c7ef1ffffff or 6281c7487b9c7ef1ffffff + VCVTUSI2SDQ R9, X31, X16 // 62c187007bc1 + VCVTUSI2SDQ R13, X31, X16 // 62c187007bc5 + VCVTUSI2SSL CX, X7, X1 // 62f146087bc9 or 62f146287bc9 or 62f146487bc9 + VCVTUSI2SSL SP, X7, X1 // 62f146087bcc or 62f146287bcc or 62f146487bcc + VCVTUSI2SSL -7(CX)(DX*1), X7, X1 // 62f146087b8c11f9ffffff or 62f146287b8c11f9ffffff or 62f146487b8c11f9ffffff + VCVTUSI2SSL -15(R14)(R15*4), X7, X1 // 629146087b8cbef1ffffff or 629146287b8cbef1ffffff or 629146487b8cbef1ffffff + VCVTUSI2SSL R14, X15, X9 // 625106087bce + VCVTUSI2SSL AX, X15, X9 // 627106087bc8 + VCVTUSI2SSQ DX, X0, X12 // 6271fe087be2 or 6271fe287be2 or 6271fe487be2 + VCVTUSI2SSQ BP, X0, X12 // 6271fe087be5 or 6271fe287be5 or 6271fe487be5 + VCVTUSI2SSQ -15(R14)(R15*1), X0, X12 // 6211fe087ba43ef1ffffff or 6211fe287ba43ef1ffffff or 6211fe487ba43ef1ffffff + VCVTUSI2SSQ -15(BX), X0, X12 // 6271fe087ba3f1ffffff or 6271fe287ba3f1ffffff or 6271fe487ba3f1ffffff + VCVTUSI2SSQ R10, X14, X12 // 62518e087be2 + VCVTUSI2SSQ CX, X14, X12 // 62718e087be1 + VDIVPD X26, X3, K2, X8 // 6211e50a5ec2 + VDIVPD (SI), X3, K2, X8 // 6271e50a5e06 + VDIVPD 7(SI)(DI*2), X3, K2, X8 // 6271e50a5e847e07000000 + VDIVPD Y7, Y21, K3, Y13 // 6271d5235eef + VDIVPD -7(CX)(DX*1), Y21, K3, Y13 // 6271d5235eac11f9ffffff + VDIVPD -15(R14)(R15*4), Y21, K3, Y13 // 6211d5235eacbef1ffffff + VDIVPD Z16, Z21, K3, Z14 // 6231d5435ef0 + VDIVPD Z9, Z21, K3, Z14 // 6251d5435ef1 + VDIVPD Z16, Z8, K3, Z14 // 6231bd4b5ef0 + VDIVPD Z9, Z8, K3, Z14 // 6251bd4b5ef1 + VDIVPD Z16, Z21, K3, Z15 // 6231d5435ef8 + VDIVPD Z9, Z21, K3, Z15 // 6251d5435ef9 + VDIVPD Z16, Z8, K3, Z15 // 6231bd4b5ef8 + VDIVPD Z9, Z8, K3, Z15 // 6251bd4b5ef9 + VDIVPD Z0, Z23, K3, Z20 // 62e1c5435ee0 + VDIVPD Z11, Z23, K3, Z20 // 62c1c5435ee3 + VDIVPD -17(BP)(SI*2), Z23, K3, Z20 // 62e1c5435ea475efffffff + VDIVPD 7(AX)(CX*2), Z23, K3, Z20 // 62e1c5435ea44807000000 + VDIVPD Z0, Z19, K3, Z20 // 62e1e5435ee0 + VDIVPD Z11, Z19, K3, Z20 // 62c1e5435ee3 + VDIVPD -17(BP)(SI*2), Z19, K3, Z20 // 62e1e5435ea475efffffff + VDIVPD 7(AX)(CX*2), Z19, K3, Z20 // 62e1e5435ea44807000000 + VDIVPD Z0, Z23, K3, Z0 // 62f1c5435ec0 + VDIVPD Z11, Z23, K3, Z0 // 62d1c5435ec3 + VDIVPD -17(BP)(SI*2), Z23, K3, Z0 // 62f1c5435e8475efffffff + VDIVPD 7(AX)(CX*2), Z23, K3, Z0 // 62f1c5435e844807000000 + VDIVPD Z0, Z19, K3, Z0 // 62f1e5435ec0 + VDIVPD Z11, Z19, K3, Z0 // 62d1e5435ec3 + VDIVPD -17(BP)(SI*2), Z19, K3, Z0 // 62f1e5435e8475efffffff + VDIVPD 7(AX)(CX*2), Z19, K3, Z0 // 62f1e5435e844807000000 + VDIVPS X28, X13, K2, X23 // 6281140a5efc + VDIVPS 17(SP)(BP*8), X13, K2, X23 // 62e1140a5ebcec11000000 + VDIVPS 17(SP)(BP*4), X13, K2, X23 // 62e1140a5ebcac11000000 + VDIVPS Y18, Y13, K1, Y30 // 622114295ef2 + VDIVPS 15(DX)(BX*1), Y13, K1, Y30 // 626114295eb41a0f000000 + VDIVPS -7(CX)(DX*2), Y13, K1, Y30 // 626114295eb451f9ffffff + VDIVPS Z0, Z24, K2, Z0 // 62f13c425ec0 + VDIVPS Z26, Z24, K2, Z0 // 62913c425ec2 + VDIVPS Z0, Z12, K2, Z0 // 62f11c4a5ec0 + VDIVPS Z26, Z12, K2, Z0 // 62911c4a5ec2 + VDIVPS Z0, Z24, K2, Z25 // 62613c425ec8 + VDIVPS Z26, Z24, K2, Z25 // 62013c425eca + VDIVPS Z0, Z12, K2, Z25 // 62611c4a5ec8 + VDIVPS Z26, Z12, K2, Z25 // 62011c4a5eca + VDIVPS Z9, Z9, K1, Z9 // 625134495ec9 + VDIVPS Z28, Z9, K1, Z9 // 621134495ecc + VDIVPS 15(R8)(R14*1), Z9, K1, Z9 // 621134495e8c300f000000 + VDIVPS 15(R8)(R14*2), Z9, K1, Z9 // 621134495e8c700f000000 + VDIVPS Z9, Z25, K1, Z9 // 625134415ec9 + VDIVPS Z28, Z25, K1, Z9 // 621134415ecc + VDIVPS 15(R8)(R14*1), Z25, K1, Z9 // 621134415e8c300f000000 + VDIVPS 15(R8)(R14*2), Z25, K1, Z9 // 621134415e8c700f000000 + VDIVPS Z9, Z9, K1, Z3 // 62d134495ed9 + VDIVPS Z28, Z9, K1, Z3 // 629134495edc + VDIVPS 15(R8)(R14*1), Z9, K1, Z3 // 629134495e9c300f000000 + VDIVPS 15(R8)(R14*2), Z9, K1, Z3 // 629134495e9c700f000000 + VDIVPS Z9, Z25, K1, Z3 // 62d134415ed9 + VDIVPS Z28, Z25, K1, Z3 // 629134415edc + VDIVPS 15(R8)(R14*1), Z25, K1, Z3 // 629134415e9c300f000000 + VDIVPS 15(R8)(R14*2), Z25, K1, Z3 // 629134415e9c700f000000 + VDIVSD X15, X9, K7, X24 // 6241b70f5ec7 + VDIVSD X21, X18, K1, X26 // 6221ef015ed5 or 6221ef215ed5 or 6221ef415ed5 + VDIVSD 7(AX)(CX*4), X18, K1, X26 // 6261ef015e948807000000 or 6261ef215e948807000000 or 6261ef415e948807000000 + VDIVSD 7(AX)(CX*1), X18, K1, X26 // 6261ef015e940807000000 or 6261ef215e940807000000 or 6261ef415e940807000000 + VDIVSS X31, X11, K1, X1 // 629126095ecf + VDIVSS X0, X7, K1, X3 // 62f146095ed8 or 62f146295ed8 or 62f146495ed8 + VDIVSS 15(DX)(BX*1), X7, K1, X3 // 62f146095e9c1a0f000000 or 62f146295e9c1a0f000000 or 62f146495e9c1a0f000000 + VDIVSS -7(CX)(DX*2), X7, K1, X3 // 62f146095e9c51f9ffffff or 62f146295e9c51f9ffffff or 62f146495e9c51f9ffffff + VEXPANDPD X24, K3, X0 // 6292fd0b88c0 + VEXPANDPD 7(SI)(DI*4), K3, X0 // 62f2fd0b8884be07000000 + VEXPANDPD -7(DI)(R8*2), K3, X0 // 62b2fd0b888447f9ffffff + VEXPANDPD Y8, K4, Y24 // 6242fd2c88c0 + VEXPANDPD -17(BP), K4, Y24 // 6262fd2c8885efffffff + VEXPANDPD -15(R14)(R15*8), K4, Y24 // 6202fd2c8884fef1ffffff + VEXPANDPD Z26, K5, Z30 // 6202fd4d88f2 + VEXPANDPD Z22, K5, Z30 // 6222fd4d88f6 + VEXPANDPD (CX), K5, Z30 // 6262fd4d8831 + VEXPANDPD 99(R15), K5, Z30 // 6242fd4d88b763000000 + VEXPANDPD Z26, K5, Z5 // 6292fd4d88ea + VEXPANDPD Z22, K5, Z5 // 62b2fd4d88ee + VEXPANDPD (CX), K5, Z5 // 62f2fd4d8829 + VEXPANDPD 99(R15), K5, Z5 // 62d2fd4d88af63000000 + VEXPANDPS X7, K7, X20 // 62e27d0f88e7 + VEXPANDPS 17(SP), K7, X20 // 62e27d0f88a42411000000 + VEXPANDPS -17(BP)(SI*4), K7, X20 // 62e27d0f88a4b5efffffff + VEXPANDPS Y24, K7, Y11 // 62127d2f88d8 + VEXPANDPS 17(SP)(BP*2), K7, Y11 // 62727d2f889c6c11000000 + VEXPANDPS -7(DI)(R8*4), K7, Y11 // 62327d2f889c87f9ffffff + VEXPANDPS Z16, K6, Z7 // 62b27d4e88f8 + VEXPANDPS Z25, K6, Z7 // 62927d4e88f9 + VEXPANDPS 99(R15)(R15*2), K6, Z7 // 62927d4e88bc7f63000000 + VEXPANDPS -7(DI), K6, Z7 // 62f27d4e88bff9ffffff + VEXPANDPS Z16, K6, Z21 // 62a27d4e88e8 + VEXPANDPS Z25, K6, Z21 // 62827d4e88e9 + VEXPANDPS 99(R15)(R15*2), K6, Z21 // 62827d4e88ac7f63000000 + VEXPANDPS -7(DI), K6, Z21 // 62e27d4e88aff9ffffff + VEXTRACTF32X4 $1, Y5, K3, X9 // 62d37d2b19e901 + VEXTRACTF32X4 $1, Y5, K3, 7(AX) // 62f37d2b19a80700000001 + VEXTRACTF32X4 $1, Y5, K3, (DI) // 62f37d2b192f01 + VEXTRACTF32X4 $3, Z14, K7, X7 // 62737d4f19f703 + VEXTRACTF32X4 $3, Z13, K7, X7 // 62737d4f19ef03 + VEXTRACTF32X4 $3, Z14, K7, 99(R15)(R15*1) // 62137d4f19b43f6300000003 + VEXTRACTF32X4 $3, Z13, K7, 99(R15)(R15*1) // 62137d4f19ac3f6300000003 + VEXTRACTF32X4 $3, Z14, K7, (DX) // 62737d4f193203 + VEXTRACTF32X4 $3, Z13, K7, (DX) // 62737d4f192a03 + VEXTRACTF64X4 $0, Z2, K2, Y16 // 62b3fd4a1bd000 + VEXTRACTF64X4 $0, Z7, K2, Y16 // 62b3fd4a1bf800 + VEXTRACTF64X4 $0, Z2, K2, 15(R8)(R14*8) // 6293fd4a1b94f00f00000000 + VEXTRACTF64X4 $0, Z7, K2, 15(R8)(R14*8) // 6293fd4a1bbcf00f00000000 + VEXTRACTF64X4 $0, Z2, K2, -15(R14)(R15*2) // 6293fd4a1b947ef1ffffff00 + VEXTRACTF64X4 $0, Z7, K2, -15(R14)(R15*2) // 6293fd4a1bbc7ef1ffffff00 + VEXTRACTI32X4 $0, Y9, K5, X31 // 62137d2d39cf00 + VEXTRACTI32X4 $0, Y9, K5, 7(SI)(DI*1) // 62737d2d398c3e0700000000 + VEXTRACTI32X4 $0, Y9, K5, 15(DX)(BX*8) // 62737d2d398cda0f00000000 + VEXTRACTI32X4 $1, Z27, K3, X3 // 62637d4b39db01 + VEXTRACTI32X4 $1, Z25, K3, X3 // 62637d4b39cb01 + VEXTRACTI32X4 $1, Z27, K3, -7(DI)(R8*1) // 62237d4b399c07f9ffffff01 + VEXTRACTI32X4 $1, Z25, K3, -7(DI)(R8*1) // 62237d4b398c07f9ffffff01 + VEXTRACTI32X4 $1, Z27, K3, (SP) // 62637d4b391c2401 + VEXTRACTI32X4 $1, Z25, K3, (SP) // 62637d4b390c2401 + VEXTRACTI64X4 $1, Z3, K3, Y6 // 62f3fd4b3bde01 + VEXTRACTI64X4 $1, Z0, K3, Y6 // 62f3fd4b3bc601 + VEXTRACTI64X4 $1, Z3, K3, 7(AX)(CX*4) // 62f3fd4b3b9c880700000001 + VEXTRACTI64X4 $1, Z0, K3, 7(AX)(CX*4) // 62f3fd4b3b84880700000001 + VEXTRACTI64X4 $1, Z3, K3, 7(AX)(CX*1) // 62f3fd4b3b9c080700000001 + VEXTRACTI64X4 $1, Z0, K3, 7(AX)(CX*1) // 62f3fd4b3b84080700000001 + VFIXUPIMMPD $97, X30, X0, K3, X13 // 6213fd0b54ee61 + VFIXUPIMMPD $97, (AX), X0, K3, X13 // 6273fd0b542861 + VFIXUPIMMPD $97, 7(SI), X0, K3, X13 // 6273fd0b54ae0700000061 + VFIXUPIMMPD $81, Y6, Y7, K3, Y3 // 62f3c52b54de51 + VFIXUPIMMPD $81, (SI), Y7, K3, Y3 // 62f3c52b541e51 + VFIXUPIMMPD $81, 7(SI)(DI*2), Y7, K3, Y3 // 62f3c52b549c7e0700000051 + VFIXUPIMMPD $42, Z22, Z8, K2, Z14 // 6233bd4a54f62a + VFIXUPIMMPD $42, Z25, Z8, K2, Z14 // 6213bd4a54f12a + VFIXUPIMMPD $42, Z22, Z24, K2, Z14 // 6233bd4254f62a + VFIXUPIMMPD $42, Z25, Z24, K2, Z14 // 6213bd4254f12a + VFIXUPIMMPD $42, Z22, Z8, K2, Z7 // 62b3bd4a54fe2a + VFIXUPIMMPD $42, Z25, Z8, K2, Z7 // 6293bd4a54f92a + VFIXUPIMMPD $42, Z22, Z24, K2, Z7 // 62b3bd4254fe2a + VFIXUPIMMPD $42, Z25, Z24, K2, Z7 // 6293bd4254f92a + VFIXUPIMMPD $79, Z0, Z6, K1, Z1 // 62f3cd4954c84f + VFIXUPIMMPD $79, Z8, Z6, K1, Z1 // 62d3cd4954c84f + VFIXUPIMMPD $79, -7(CX)(DX*1), Z6, K1, Z1 // 62f3cd49548c11f9ffffff4f + VFIXUPIMMPD $79, -15(R14)(R15*4), Z6, K1, Z1 // 6293cd49548cbef1ffffff4f + VFIXUPIMMPD $79, Z0, Z2, K1, Z1 // 62f3ed4954c84f + VFIXUPIMMPD $79, Z8, Z2, K1, Z1 // 62d3ed4954c84f + VFIXUPIMMPD $79, -7(CX)(DX*1), Z2, K1, Z1 // 62f3ed49548c11f9ffffff4f + VFIXUPIMMPD $79, -15(R14)(R15*4), Z2, K1, Z1 // 6293ed49548cbef1ffffff4f + VFIXUPIMMPD $79, Z0, Z6, K1, Z16 // 62e3cd4954c04f + VFIXUPIMMPD $79, Z8, Z6, K1, Z16 // 62c3cd4954c04f + VFIXUPIMMPD $79, -7(CX)(DX*1), Z6, K1, Z16 // 62e3cd49548411f9ffffff4f + VFIXUPIMMPD $79, -15(R14)(R15*4), Z6, K1, Z16 // 6283cd495484bef1ffffff4f + VFIXUPIMMPD $79, Z0, Z2, K1, Z16 // 62e3ed4954c04f + VFIXUPIMMPD $79, Z8, Z2, K1, Z16 // 62c3ed4954c04f + VFIXUPIMMPD $79, -7(CX)(DX*1), Z2, K1, Z16 // 62e3ed49548411f9ffffff4f + VFIXUPIMMPD $79, -15(R14)(R15*4), Z2, K1, Z16 // 6283ed495484bef1ffffff4f + VFIXUPIMMPS $64, X11, X14, K2, X16 // 62c30d0a54c340 + VFIXUPIMMPS $64, (BX), X14, K2, X16 // 62e30d0a540340 + VFIXUPIMMPS $64, -17(BP)(SI*1), X14, K2, X16 // 62e30d0a548435efffffff40 + VFIXUPIMMPS $27, Y26, Y11, K1, Y26 // 6203252954d21b + VFIXUPIMMPS $27, 17(SP)(BP*8), Y11, K1, Y26 // 626325295494ec110000001b + VFIXUPIMMPS $27, 17(SP)(BP*4), Y11, K1, Y26 // 626325295494ac110000001b + VFIXUPIMMPS $47, Z11, Z14, K7, Z15 // 62530d4f54fb2f + VFIXUPIMMPS $47, Z5, Z14, K7, Z15 // 62730d4f54fd2f + VFIXUPIMMPS $47, Z11, Z27, K7, Z15 // 6253254754fb2f + VFIXUPIMMPS $47, Z5, Z27, K7, Z15 // 6273254754fd2f + VFIXUPIMMPS $47, Z11, Z14, K7, Z12 // 62530d4f54e32f + VFIXUPIMMPS $47, Z5, Z14, K7, Z12 // 62730d4f54e52f + VFIXUPIMMPS $47, Z11, Z27, K7, Z12 // 6253254754e32f + VFIXUPIMMPS $47, Z5, Z27, K7, Z12 // 6273254754e52f + VFIXUPIMMPS $82, Z2, Z5, K1, Z13 // 6273554954ea52 + VFIXUPIMMPS $82, 15(DX)(BX*1), Z5, K1, Z13 // 6273554954ac1a0f00000052 + VFIXUPIMMPS $82, -7(CX)(DX*2), Z5, K1, Z13 // 6273554954ac51f9ffffff52 + VFIXUPIMMPS $82, Z2, Z23, K1, Z13 // 6273454154ea52 + VFIXUPIMMPS $82, 15(DX)(BX*1), Z23, K1, Z13 // 6273454154ac1a0f00000052 + VFIXUPIMMPS $82, -7(CX)(DX*2), Z23, K1, Z13 // 6273454154ac51f9ffffff52 + VFIXUPIMMPS $82, Z2, Z5, K1, Z14 // 6273554954f252 + VFIXUPIMMPS $82, 15(DX)(BX*1), Z5, K1, Z14 // 6273554954b41a0f00000052 + VFIXUPIMMPS $82, -7(CX)(DX*2), Z5, K1, Z14 // 6273554954b451f9ffffff52 + VFIXUPIMMPS $82, Z2, Z23, K1, Z14 // 6273454154f252 + VFIXUPIMMPS $82, 15(DX)(BX*1), Z23, K1, Z14 // 6273454154b41a0f00000052 + VFIXUPIMMPS $82, -7(CX)(DX*2), Z23, K1, Z14 // 6273454154b451f9ffffff52 + VFIXUPIMMSD $126, X8, X19, K1, X14 // 6253e50155f07e + VFIXUPIMMSD $94, X23, X26, K1, X8 // 6233ad0155c75e or 6233ad2155c75e or 6233ad4155c75e + VFIXUPIMMSD $94, (SI), X26, K1, X8 // 6273ad0155065e or 6273ad2155065e or 6273ad4155065e + VFIXUPIMMSD $94, 7(SI)(DI*2), X26, K1, X8 // 6273ad0155847e070000005e or 6273ad2155847e070000005e or 6273ad4155847e070000005e + VFIXUPIMMSS $121, X23, X16, K7, X12 // 62337d0755e779 + VFIXUPIMMSS $13, X31, X11, K2, X23 // 6283250a55ff0d or 6283252a55ff0d or 6283254a55ff0d + VFIXUPIMMSS $13, 17(SP)(BP*2), X11, K2, X23 // 62e3250a55bc6c110000000d or 62e3252a55bc6c110000000d or 62e3254a55bc6c110000000d + VFIXUPIMMSS $13, -7(DI)(R8*4), X11, K2, X23 // 62a3250a55bc87f9ffffff0d or 62a3252a55bc87f9ffffff0d or 62a3254a55bc87f9ffffff0d + VFMADD132PD X0, X14, K4, X24 // 62628d0c98c0 + VFMADD132PD 15(R8)(R14*4), X14, K4, X24 // 62028d0c9884b00f000000 + VFMADD132PD -7(CX)(DX*4), X14, K4, X24 // 62628d0c988491f9ffffff + VFMADD132PD Y18, Y14, K1, Y12 // 62328d2998e2 + VFMADD132PD 7(SI)(DI*4), Y14, K1, Y12 // 62728d2998a4be07000000 + VFMADD132PD -7(DI)(R8*2), Y14, K1, Y12 // 62328d2998a447f9ffffff + VFMADD132PD Z28, Z26, K3, Z6 // 6292ad4398f4 + VFMADD132PD Z6, Z26, K3, Z6 // 62f2ad4398f6 + VFMADD132PD Z28, Z14, K3, Z6 // 62928d4b98f4 + VFMADD132PD Z6, Z14, K3, Z6 // 62f28d4b98f6 + VFMADD132PD Z28, Z26, K3, Z14 // 6212ad4398f4 + VFMADD132PD Z6, Z26, K3, Z14 // 6272ad4398f6 + VFMADD132PD Z28, Z14, K3, Z14 // 62128d4b98f4 + VFMADD132PD Z6, Z14, K3, Z14 // 62728d4b98f6 + VFMADD132PD Z3, Z26, K4, Z13 // 6272ad4498eb + VFMADD132PD Z0, Z26, K4, Z13 // 6272ad4498e8 + VFMADD132PD -17(BP), Z26, K4, Z13 // 6272ad4498adefffffff + VFMADD132PD -15(R14)(R15*8), Z26, K4, Z13 // 6212ad4498acfef1ffffff + VFMADD132PD Z3, Z3, K4, Z13 // 6272e54c98eb + VFMADD132PD Z0, Z3, K4, Z13 // 6272e54c98e8 + VFMADD132PD -17(BP), Z3, K4, Z13 // 6272e54c98adefffffff + VFMADD132PD -15(R14)(R15*8), Z3, K4, Z13 // 6212e54c98acfef1ffffff + VFMADD132PD Z3, Z26, K4, Z21 // 62e2ad4498eb + VFMADD132PD Z0, Z26, K4, Z21 // 62e2ad4498e8 + VFMADD132PD -17(BP), Z26, K4, Z21 // 62e2ad4498adefffffff + VFMADD132PD -15(R14)(R15*8), Z26, K4, Z21 // 6282ad4498acfef1ffffff + VFMADD132PD Z3, Z3, K4, Z21 // 62e2e54c98eb + VFMADD132PD Z0, Z3, K4, Z21 // 62e2e54c98e8 + VFMADD132PD -17(BP), Z3, K4, Z21 // 62e2e54c98adefffffff + VFMADD132PD -15(R14)(R15*8), Z3, K4, Z21 // 6282e54c98acfef1ffffff + VFMADD132PS X2, X23, K5, X11 // 6272450598da + VFMADD132PS (R8), X23, K5, X11 // 625245059818 + VFMADD132PS 15(DX)(BX*2), X23, K5, X11 // 62724505989c5a0f000000 + VFMADD132PS Y3, Y18, K7, Y31 // 62626d2798fb + VFMADD132PS 17(SP), Y18, K7, Y31 // 62626d2798bc2411000000 + VFMADD132PS -17(BP)(SI*4), Y18, K7, Y31 // 62626d2798bcb5efffffff + VFMADD132PS Z3, Z11, K7, Z21 // 62e2254f98eb + VFMADD132PS Z12, Z11, K7, Z21 // 62c2254f98ec + VFMADD132PS Z3, Z25, K7, Z21 // 62e2354798eb + VFMADD132PS Z12, Z25, K7, Z21 // 62c2354798ec + VFMADD132PS Z3, Z11, K7, Z13 // 6272254f98eb + VFMADD132PS Z12, Z11, K7, Z13 // 6252254f98ec + VFMADD132PS Z3, Z25, K7, Z13 // 6272354798eb + VFMADD132PS Z12, Z25, K7, Z13 // 6252354798ec + VFMADD132PS Z23, Z23, K6, Z27 // 6222454698df + VFMADD132PS Z6, Z23, K6, Z27 // 6262454698de + VFMADD132PS 17(SP)(BP*2), Z23, K6, Z27 // 62624546989c6c11000000 + VFMADD132PS -7(DI)(R8*4), Z23, K6, Z27 // 62224546989c87f9ffffff + VFMADD132PS Z23, Z5, K6, Z27 // 6222554e98df + VFMADD132PS Z6, Z5, K6, Z27 // 6262554e98de + VFMADD132PS 17(SP)(BP*2), Z5, K6, Z27 // 6262554e989c6c11000000 + VFMADD132PS -7(DI)(R8*4), Z5, K6, Z27 // 6222554e989c87f9ffffff + VFMADD132PS Z23, Z23, K6, Z15 // 6232454698ff + VFMADD132PS Z6, Z23, K6, Z15 // 6272454698fe + VFMADD132PS 17(SP)(BP*2), Z23, K6, Z15 // 6272454698bc6c11000000 + VFMADD132PS -7(DI)(R8*4), Z23, K6, Z15 // 6232454698bc87f9ffffff + VFMADD132PS Z23, Z5, K6, Z15 // 6232554e98ff + VFMADD132PS Z6, Z5, K6, Z15 // 6272554e98fe + VFMADD132PS 17(SP)(BP*2), Z5, K6, Z15 // 6272554e98bc6c11000000 + VFMADD132PS -7(DI)(R8*4), Z5, K6, Z15 // 6232554e98bc87f9ffffff + VFMADD132SD X25, X5, K3, X20 // 6282d50b99e1 + VFMADD132SD X13, X9, K7, X0 // 62d2b50f99c5 or 62d2b52f99c5 or 62d2b54f99c5 + VFMADD132SD 17(SP)(BP*8), X9, K7, X0 // 62f2b50f9984ec11000000 or 62f2b52f9984ec11000000 or 62f2b54f9984ec11000000 + VFMADD132SD 17(SP)(BP*4), X9, K7, X0 // 62f2b50f9984ac11000000 or 62f2b52f9984ac11000000 or 62f2b54f9984ac11000000 + VFMADD132SS X9, X8, K4, X2 // 62d23d0c99d1 + VFMADD132SS X11, X31, K4, X2 // 62d2050499d3 or 62d2052499d3 or 62d2054499d3 + VFMADD132SS 15(R8), X31, K4, X2 // 62d2050499900f000000 or 62d2052499900f000000 or 62d2054499900f000000 + VFMADD132SS (BP), X31, K4, X2 // 62f20504995500 or 62f20524995500 or 62f20544995500 + VFMADD213PD X14, X5, K7, X22 // 62c2d50fa8f6 + VFMADD213PD 17(SP)(BP*1), X5, K7, X22 // 62e2d50fa8b42c11000000 + VFMADD213PD -7(CX)(DX*8), X5, K7, X22 // 62e2d50fa8b4d1f9ffffff + VFMADD213PD Y7, Y2, K2, Y24 // 6262ed2aa8c7 + VFMADD213PD 7(AX), Y2, K2, Y24 // 6262ed2aa88007000000 + VFMADD213PD (DI), Y2, K2, Y24 // 6262ed2aa807 + VFMADD213PD Z16, Z21, K5, Z8 // 6232d545a8c0 + VFMADD213PD Z13, Z21, K5, Z8 // 6252d545a8c5 + VFMADD213PD Z16, Z5, K5, Z8 // 6232d54da8c0 + VFMADD213PD Z13, Z5, K5, Z8 // 6252d54da8c5 + VFMADD213PD Z16, Z21, K5, Z28 // 6222d545a8e0 + VFMADD213PD Z13, Z21, K5, Z28 // 6242d545a8e5 + VFMADD213PD Z16, Z5, K5, Z28 // 6222d54da8e0 + VFMADD213PD Z13, Z5, K5, Z28 // 6242d54da8e5 + VFMADD213PD Z6, Z22, K3, Z12 // 6272cd43a8e6 + VFMADD213PD Z8, Z22, K3, Z12 // 6252cd43a8e0 + VFMADD213PD 15(R8), Z22, K3, Z12 // 6252cd43a8a00f000000 + VFMADD213PD (BP), Z22, K3, Z12 // 6272cd43a86500 + VFMADD213PD Z6, Z11, K3, Z12 // 6272a54ba8e6 + VFMADD213PD Z8, Z11, K3, Z12 // 6252a54ba8e0 + VFMADD213PD 15(R8), Z11, K3, Z12 // 6252a54ba8a00f000000 + VFMADD213PD (BP), Z11, K3, Z12 // 6272a54ba86500 + VFMADD213PD Z6, Z22, K3, Z27 // 6262cd43a8de + VFMADD213PD Z8, Z22, K3, Z27 // 6242cd43a8d8 + VFMADD213PD 15(R8), Z22, K3, Z27 // 6242cd43a8980f000000 + VFMADD213PD (BP), Z22, K3, Z27 // 6262cd43a85d00 + VFMADD213PD Z6, Z11, K3, Z27 // 6262a54ba8de + VFMADD213PD Z8, Z11, K3, Z27 // 6242a54ba8d8 + VFMADD213PD 15(R8), Z11, K3, Z27 // 6242a54ba8980f000000 + VFMADD213PD (BP), Z11, K3, Z27 // 6262a54ba85d00 + VFMADD213PS X7, X17, K4, X0 // 62f27504a8c7 + VFMADD213PS -17(BP)(SI*2), X17, K4, X0 // 62f27504a88475efffffff + VFMADD213PS 7(AX)(CX*2), X17, K4, X0 // 62f27504a8844807000000 + VFMADD213PS Y8, Y14, K2, Y21 // 62c20d2aa8e8 + VFMADD213PS 99(R15)(R15*1), Y14, K2, Y21 // 62820d2aa8ac3f63000000 + VFMADD213PS (DX), Y14, K2, Y21 // 62e20d2aa82a + VFMADD213PS Z9, Z12, K2, Z25 // 62421d4aa8c9 + VFMADD213PS Z12, Z12, K2, Z25 // 62421d4aa8cc + VFMADD213PS Z9, Z17, K2, Z25 // 62427542a8c9 + VFMADD213PS Z12, Z17, K2, Z25 // 62427542a8cc + VFMADD213PS Z9, Z12, K2, Z12 // 62521d4aa8e1 + VFMADD213PS Z12, Z12, K2, Z12 // 62521d4aa8e4 + VFMADD213PS Z9, Z17, K2, Z12 // 62527542a8e1 + VFMADD213PS Z12, Z17, K2, Z12 // 62527542a8e4 + VFMADD213PS Z8, Z3, K3, Z6 // 62d2654ba8f0 + VFMADD213PS Z2, Z3, K3, Z6 // 62f2654ba8f2 + VFMADD213PS 15(R8)(R14*8), Z3, K3, Z6 // 6292654ba8b4f00f000000 + VFMADD213PS -15(R14)(R15*2), Z3, K3, Z6 // 6292654ba8b47ef1ffffff + VFMADD213PS Z8, Z21, K3, Z6 // 62d25543a8f0 + VFMADD213PS Z2, Z21, K3, Z6 // 62f25543a8f2 + VFMADD213PS 15(R8)(R14*8), Z21, K3, Z6 // 62925543a8b4f00f000000 + VFMADD213PS -15(R14)(R15*2), Z21, K3, Z6 // 62925543a8b47ef1ffffff + VFMADD213PS Z8, Z3, K3, Z25 // 6242654ba8c8 + VFMADD213PS Z2, Z3, K3, Z25 // 6262654ba8ca + VFMADD213PS 15(R8)(R14*8), Z3, K3, Z25 // 6202654ba88cf00f000000 + VFMADD213PS -15(R14)(R15*2), Z3, K3, Z25 // 6202654ba88c7ef1ffffff + VFMADD213PS Z8, Z21, K3, Z25 // 62425543a8c8 + VFMADD213PS Z2, Z21, K3, Z25 // 62625543a8ca + VFMADD213PS 15(R8)(R14*8), Z21, K3, Z25 // 62025543a88cf00f000000 + VFMADD213PS -15(R14)(R15*2), Z21, K3, Z25 // 62025543a88c7ef1ffffff + VFMADD213SD X0, X11, K3, X15 // 6272a50ba9f8 + VFMADD213SD X27, X8, K3, X18 // 6282bd0ba9d3 or 6282bd2ba9d3 or 6282bd4ba9d3 + VFMADD213SD 7(SI)(DI*4), X8, K3, X18 // 62e2bd0ba994be07000000 or 62e2bd2ba994be07000000 or 62e2bd4ba994be07000000 + VFMADD213SD -7(DI)(R8*2), X8, K3, X18 // 62a2bd0ba99447f9ffffff or 62a2bd2ba99447f9ffffff or 62a2bd4ba99447f9ffffff + VFMADD213SS X18, X3, K2, X25 // 6222650aa9ca + VFMADD213SS X15, X28, K1, X15 // 62521d01a9ff or 62521d21a9ff or 62521d41a9ff + VFMADD213SS 15(R8)(R14*8), X28, K1, X15 // 62121d01a9bcf00f000000 or 62121d21a9bcf00f000000 or 62121d41a9bcf00f000000 + VFMADD213SS -15(R14)(R15*2), X28, K1, X15 // 62121d01a9bc7ef1ffffff or 62121d21a9bc7ef1ffffff or 62121d41a9bc7ef1ffffff + VFMADD231PD X8, X13, K2, X7 // 62d2950ab8f8 + VFMADD231PD 15(R8)(R14*1), X13, K2, X7 // 6292950ab8bc300f000000 + VFMADD231PD 15(R8)(R14*2), X13, K2, X7 // 6292950ab8bc700f000000 + VFMADD231PD Y24, Y11, K1, Y20 // 6282a529b8e0 + VFMADD231PD -17(BP)(SI*8), Y11, K1, Y20 // 62e2a529b8a4f5efffffff + VFMADD231PD (R15), Y11, K1, Y20 // 62c2a529b827 + VFMADD231PD Z0, Z7, K7, Z3 // 62f2c54fb8d8 + VFMADD231PD Z6, Z7, K7, Z3 // 62f2c54fb8de + VFMADD231PD Z0, Z9, K7, Z3 // 62f2b54fb8d8 + VFMADD231PD Z6, Z9, K7, Z3 // 62f2b54fb8de + VFMADD231PD Z0, Z7, K7, Z27 // 6262c54fb8d8 + VFMADD231PD Z6, Z7, K7, Z27 // 6262c54fb8de + VFMADD231PD Z0, Z9, K7, Z27 // 6262b54fb8d8 + VFMADD231PD Z6, Z9, K7, Z27 // 6262b54fb8de + VFMADD231PD Z9, Z3, K1, Z20 // 62c2e549b8e1 + VFMADD231PD Z19, Z3, K1, Z20 // 62a2e549b8e3 + VFMADD231PD -15(R14)(R15*1), Z3, K1, Z20 // 6282e549b8a43ef1ffffff + VFMADD231PD -15(BX), Z3, K1, Z20 // 62e2e549b8a3f1ffffff + VFMADD231PD Z9, Z30, K1, Z20 // 62c28d41b8e1 + VFMADD231PD Z19, Z30, K1, Z20 // 62a28d41b8e3 + VFMADD231PD -15(R14)(R15*1), Z30, K1, Z20 // 62828d41b8a43ef1ffffff + VFMADD231PD -15(BX), Z30, K1, Z20 // 62e28d41b8a3f1ffffff + VFMADD231PD Z9, Z3, K1, Z28 // 6242e549b8e1 + VFMADD231PD Z19, Z3, K1, Z28 // 6222e549b8e3 + VFMADD231PD -15(R14)(R15*1), Z3, K1, Z28 // 6202e549b8a43ef1ffffff + VFMADD231PD -15(BX), Z3, K1, Z28 // 6262e549b8a3f1ffffff + VFMADD231PD Z9, Z30, K1, Z28 // 62428d41b8e1 + VFMADD231PD Z19, Z30, K1, Z28 // 62228d41b8e3 + VFMADD231PD -15(R14)(R15*1), Z30, K1, Z28 // 62028d41b8a43ef1ffffff + VFMADD231PD -15(BX), Z30, K1, Z28 // 62628d41b8a3f1ffffff + VFMADD231PS X0, X7, K1, X24 // 62624509b8c0 + VFMADD231PS (R14), X7, K1, X24 // 62424509b806 + VFMADD231PS -7(DI)(R8*8), X7, K1, X24 // 62224509b884c7f9ffffff + VFMADD231PS Y18, Y5, K1, Y1 // 62b25529b8ca + VFMADD231PS 7(SI)(DI*8), Y5, K1, Y1 // 62f25529b88cfe07000000 + VFMADD231PS -15(R14), Y5, K1, Y1 // 62d25529b88ef1ffffff + VFMADD231PS Z18, Z11, K7, Z12 // 6232254fb8e2 + VFMADD231PS Z24, Z11, K7, Z12 // 6212254fb8e0 + VFMADD231PS Z18, Z5, K7, Z12 // 6232554fb8e2 + VFMADD231PS Z24, Z5, K7, Z12 // 6212554fb8e0 + VFMADD231PS Z18, Z11, K7, Z22 // 62a2254fb8f2 + VFMADD231PS Z24, Z11, K7, Z22 // 6282254fb8f0 + VFMADD231PS Z18, Z5, K7, Z22 // 62a2554fb8f2 + VFMADD231PS Z24, Z5, K7, Z22 // 6282554fb8f0 + VFMADD231PS Z6, Z7, K2, Z2 // 62f2454ab8d6 + VFMADD231PS Z16, Z7, K2, Z2 // 62b2454ab8d0 + VFMADD231PS 7(AX)(CX*4), Z7, K2, Z2 // 62f2454ab8948807000000 + VFMADD231PS 7(AX)(CX*1), Z7, K2, Z2 // 62f2454ab8940807000000 + VFMADD231PS Z6, Z13, K2, Z2 // 62f2154ab8d6 + VFMADD231PS Z16, Z13, K2, Z2 // 62b2154ab8d0 + VFMADD231PS 7(AX)(CX*4), Z13, K2, Z2 // 62f2154ab8948807000000 + VFMADD231PS 7(AX)(CX*1), Z13, K2, Z2 // 62f2154ab8940807000000 + VFMADD231PS Z6, Z7, K2, Z21 // 62e2454ab8ee + VFMADD231PS Z16, Z7, K2, Z21 // 62a2454ab8e8 + VFMADD231PS 7(AX)(CX*4), Z7, K2, Z21 // 62e2454ab8ac8807000000 + VFMADD231PS 7(AX)(CX*1), Z7, K2, Z21 // 62e2454ab8ac0807000000 + VFMADD231PS Z6, Z13, K2, Z21 // 62e2154ab8ee + VFMADD231PS Z16, Z13, K2, Z21 // 62a2154ab8e8 + VFMADD231PS 7(AX)(CX*4), Z13, K2, Z21 // 62e2154ab8ac8807000000 + VFMADD231PS 7(AX)(CX*1), Z13, K2, Z21 // 62e2154ab8ac0807000000 + VFMADD231SD X11, X1, K4, X22 // 62c2f50cb9f3 + VFMADD231SD X8, X7, K1, X6 // 62d2c509b9f0 or 62d2c529b9f0 or 62d2c549b9f0 + VFMADD231SD 17(SP), X7, K1, X6 // 62f2c509b9b42411000000 or 62f2c529b9b42411000000 or 62f2c549b9b42411000000 + VFMADD231SD -17(BP)(SI*4), X7, K1, X6 // 62f2c509b9b4b5efffffff or 62f2c529b9b4b5efffffff or 62f2c549b9b4b5efffffff + VFMADD231SS X28, X3, K3, X31 // 6202650bb9fc + VFMADD231SS X7, X24, K4, X20 // 62e23d04b9e7 or 62e23d24b9e7 or 62e23d44b9e7 + VFMADD231SS -15(R14)(R15*1), X24, K4, X20 // 62823d04b9a43ef1ffffff or 62823d24b9a43ef1ffffff or 62823d44b9a43ef1ffffff + VFMADD231SS -15(BX), X24, K4, X20 // 62e23d04b9a3f1ffffff or 62e23d24b9a3f1ffffff or 62e23d44b9a3f1ffffff + VFMADDSUB132PD X12, X16, K5, X20 // 62c2fd0596e4 + VFMADDSUB132PD 99(R15)(R15*4), X16, K5, X20 // 6282fd0596a4bf63000000 + VFMADDSUB132PD 15(DX), X16, K5, X20 // 62e2fd0596a20f000000 + VFMADDSUB132PD Y9, Y20, K7, Y20 // 62c2dd2796e1 + VFMADDSUB132PD 7(SI)(DI*1), Y20, K7, Y20 // 62e2dd2796a43e07000000 + VFMADDSUB132PD 15(DX)(BX*8), Y20, K7, Y20 // 62e2dd2796a4da0f000000 + VFMADDSUB132PD Z13, Z1, K7, Z6 // 62d2f54f96f5 + VFMADDSUB132PD Z13, Z15, K7, Z6 // 62d2854f96f5 + VFMADDSUB132PD Z13, Z1, K7, Z22 // 62c2f54f96f5 + VFMADDSUB132PD Z13, Z15, K7, Z22 // 62c2854f96f5 + VFMADDSUB132PD Z2, Z22, K6, Z18 // 62e2cd4696d2 + VFMADDSUB132PD Z31, Z22, K6, Z18 // 6282cd4696d7 + VFMADDSUB132PD (SI), Z22, K6, Z18 // 62e2cd469616 + VFMADDSUB132PD 7(SI)(DI*2), Z22, K6, Z18 // 62e2cd4696947e07000000 + VFMADDSUB132PD Z2, Z7, K6, Z18 // 62e2c54e96d2 + VFMADDSUB132PD Z31, Z7, K6, Z18 // 6282c54e96d7 + VFMADDSUB132PD (SI), Z7, K6, Z18 // 62e2c54e9616 + VFMADDSUB132PD 7(SI)(DI*2), Z7, K6, Z18 // 62e2c54e96947e07000000 + VFMADDSUB132PD Z2, Z22, K6, Z8 // 6272cd4696c2 + VFMADDSUB132PD Z31, Z22, K6, Z8 // 6212cd4696c7 + VFMADDSUB132PD (SI), Z22, K6, Z8 // 6272cd469606 + VFMADDSUB132PD 7(SI)(DI*2), Z22, K6, Z8 // 6272cd4696847e07000000 + VFMADDSUB132PD Z2, Z7, K6, Z8 // 6272c54e96c2 + VFMADDSUB132PD Z31, Z7, K6, Z8 // 6212c54e96c7 + VFMADDSUB132PD (SI), Z7, K6, Z8 // 6272c54e9606 + VFMADDSUB132PD 7(SI)(DI*2), Z7, K6, Z8 // 6272c54e96847e07000000 + VFMADDSUB132PS X28, X17, K3, X6 // 6292750396f4 + VFMADDSUB132PS (CX), X17, K3, X6 // 62f275039631 + VFMADDSUB132PS 99(R15), X17, K3, X6 // 62d2750396b763000000 + VFMADDSUB132PS Y1, Y28, K7, Y28 // 62621d2796e1 + VFMADDSUB132PS -7(DI)(R8*1), Y28, K7, Y28 // 62221d2796a407f9ffffff + VFMADDSUB132PS (SP), Y28, K7, Y28 // 62621d27962424 + VFMADDSUB132PS Z12, Z1, K4, Z20 // 62c2754c96e4 + VFMADDSUB132PS Z16, Z1, K4, Z20 // 62a2754c96e0 + VFMADDSUB132PS Z12, Z3, K4, Z20 // 62c2654c96e4 + VFMADDSUB132PS Z16, Z3, K4, Z20 // 62a2654c96e0 + VFMADDSUB132PS Z12, Z1, K4, Z9 // 6252754c96cc + VFMADDSUB132PS Z16, Z1, K4, Z9 // 6232754c96c8 + VFMADDSUB132PS Z12, Z3, K4, Z9 // 6252654c96cc + VFMADDSUB132PS Z16, Z3, K4, Z9 // 6232654c96c8 + VFMADDSUB132PS Z3, Z14, K4, Z28 // 62620d4c96e3 + VFMADDSUB132PS Z12, Z14, K4, Z28 // 62420d4c96e4 + VFMADDSUB132PS 17(SP)(BP*8), Z14, K4, Z28 // 62620d4c96a4ec11000000 + VFMADDSUB132PS 17(SP)(BP*4), Z14, K4, Z28 // 62620d4c96a4ac11000000 + VFMADDSUB132PS Z3, Z28, K4, Z28 // 62621d4496e3 + VFMADDSUB132PS Z12, Z28, K4, Z28 // 62421d4496e4 + VFMADDSUB132PS 17(SP)(BP*8), Z28, K4, Z28 // 62621d4496a4ec11000000 + VFMADDSUB132PS 17(SP)(BP*4), Z28, K4, Z28 // 62621d4496a4ac11000000 + VFMADDSUB132PS Z3, Z14, K4, Z13 // 62720d4c96eb + VFMADDSUB132PS Z12, Z14, K4, Z13 // 62520d4c96ec + VFMADDSUB132PS 17(SP)(BP*8), Z14, K4, Z13 // 62720d4c96acec11000000 + VFMADDSUB132PS 17(SP)(BP*4), Z14, K4, Z13 // 62720d4c96acac11000000 + VFMADDSUB132PS Z3, Z28, K4, Z13 // 62721d4496eb + VFMADDSUB132PS Z12, Z28, K4, Z13 // 62521d4496ec + VFMADDSUB132PS 17(SP)(BP*8), Z28, K4, Z13 // 62721d4496acec11000000 + VFMADDSUB132PS 17(SP)(BP*4), Z28, K4, Z13 // 62721d4496acac11000000 + VFMADDSUB213PD X8, X1, K7, X6 // 62d2f50fa6f0 + VFMADDSUB213PD 99(R15)(R15*2), X1, K7, X6 // 6292f50fa6b47f63000000 + VFMADDSUB213PD -7(DI), X1, K7, X6 // 62f2f50fa6b7f9ffffff + VFMADDSUB213PD Y27, Y11, K2, Y8 // 6212a52aa6c3 + VFMADDSUB213PD -7(CX), Y11, K2, Y8 // 6272a52aa681f9ffffff + VFMADDSUB213PD 15(DX)(BX*4), Y11, K2, Y8 // 6272a52aa6849a0f000000 + VFMADDSUB213PD Z5, Z19, K5, Z15 // 6272e545a6fd + VFMADDSUB213PD Z1, Z19, K5, Z15 // 6272e545a6f9 + VFMADDSUB213PD Z5, Z15, K5, Z15 // 6272854da6fd + VFMADDSUB213PD Z1, Z15, K5, Z15 // 6272854da6f9 + VFMADDSUB213PD Z5, Z19, K5, Z30 // 6262e545a6f5 + VFMADDSUB213PD Z1, Z19, K5, Z30 // 6262e545a6f1 + VFMADDSUB213PD Z5, Z15, K5, Z30 // 6262854da6f5 + VFMADDSUB213PD Z1, Z15, K5, Z30 // 6262854da6f1 + VFMADDSUB213PD Z21, Z14, K3, Z3 // 62b28d4ba6dd + VFMADDSUB213PD Z8, Z14, K3, Z3 // 62d28d4ba6d8 + VFMADDSUB213PD 7(SI)(DI*4), Z14, K3, Z3 // 62f28d4ba69cbe07000000 + VFMADDSUB213PD -7(DI)(R8*2), Z14, K3, Z3 // 62b28d4ba69c47f9ffffff + VFMADDSUB213PD Z21, Z15, K3, Z3 // 62b2854ba6dd + VFMADDSUB213PD Z8, Z15, K3, Z3 // 62d2854ba6d8 + VFMADDSUB213PD 7(SI)(DI*4), Z15, K3, Z3 // 62f2854ba69cbe07000000 + VFMADDSUB213PD -7(DI)(R8*2), Z15, K3, Z3 // 62b2854ba69c47f9ffffff + VFMADDSUB213PD Z21, Z14, K3, Z5 // 62b28d4ba6ed + VFMADDSUB213PD Z8, Z14, K3, Z5 // 62d28d4ba6e8 + VFMADDSUB213PD 7(SI)(DI*4), Z14, K3, Z5 // 62f28d4ba6acbe07000000 + VFMADDSUB213PD -7(DI)(R8*2), Z14, K3, Z5 // 62b28d4ba6ac47f9ffffff + VFMADDSUB213PD Z21, Z15, K3, Z5 // 62b2854ba6ed + VFMADDSUB213PD Z8, Z15, K3, Z5 // 62d2854ba6e8 + VFMADDSUB213PD 7(SI)(DI*4), Z15, K3, Z5 // 62f2854ba6acbe07000000 + VFMADDSUB213PD -7(DI)(R8*2), Z15, K3, Z5 // 62b2854ba6ac47f9ffffff + VFMADDSUB213PS X0, X6, K4, X8 // 62724d0ca6c0 + VFMADDSUB213PS -7(CX)(DX*1), X6, K4, X8 // 62724d0ca68411f9ffffff + VFMADDSUB213PS -15(R14)(R15*4), X6, K4, X8 // 62124d0ca684bef1ffffff + VFMADDSUB213PS Y12, Y16, K2, Y17 // 62c27d22a6cc + VFMADDSUB213PS 99(R15)(R15*8), Y16, K2, Y17 // 62827d22a68cff63000000 + VFMADDSUB213PS 7(AX)(CX*8), Y16, K2, Y17 // 62e27d22a68cc807000000 + VFMADDSUB213PS Z23, Z20, K2, Z16 // 62a25d42a6c7 + VFMADDSUB213PS Z19, Z20, K2, Z16 // 62a25d42a6c3 + VFMADDSUB213PS Z23, Z0, K2, Z16 // 62a27d4aa6c7 + VFMADDSUB213PS Z19, Z0, K2, Z16 // 62a27d4aa6c3 + VFMADDSUB213PS Z23, Z20, K2, Z9 // 62325d42a6cf + VFMADDSUB213PS Z19, Z20, K2, Z9 // 62325d42a6cb + VFMADDSUB213PS Z23, Z0, K2, Z9 // 62327d4aa6cf + VFMADDSUB213PS Z19, Z0, K2, Z9 // 62327d4aa6cb + VFMADDSUB213PS Z24, Z0, K3, Z0 // 62927d4ba6c0 + VFMADDSUB213PS Z12, Z0, K3, Z0 // 62d27d4ba6c4 + VFMADDSUB213PS 17(SP), Z0, K3, Z0 // 62f27d4ba6842411000000 + VFMADDSUB213PS -17(BP)(SI*4), Z0, K3, Z0 // 62f27d4ba684b5efffffff + VFMADDSUB213PS Z24, Z25, K3, Z0 // 62923543a6c0 + VFMADDSUB213PS Z12, Z25, K3, Z0 // 62d23543a6c4 + VFMADDSUB213PS 17(SP), Z25, K3, Z0 // 62f23543a6842411000000 + VFMADDSUB213PS -17(BP)(SI*4), Z25, K3, Z0 // 62f23543a684b5efffffff + VFMADDSUB213PS Z24, Z0, K3, Z11 // 62127d4ba6d8 + VFMADDSUB213PS Z12, Z0, K3, Z11 // 62527d4ba6dc + VFMADDSUB213PS 17(SP), Z0, K3, Z11 // 62727d4ba69c2411000000 + VFMADDSUB213PS -17(BP)(SI*4), Z0, K3, Z11 // 62727d4ba69cb5efffffff + VFMADDSUB213PS Z24, Z25, K3, Z11 // 62123543a6d8 + VFMADDSUB213PS Z12, Z25, K3, Z11 // 62523543a6dc + VFMADDSUB213PS 17(SP), Z25, K3, Z11 // 62723543a69c2411000000 + VFMADDSUB213PS -17(BP)(SI*4), Z25, K3, Z11 // 62723543a69cb5efffffff + VFMADDSUB231PD X6, X16, K3, X11 // 6272fd03b6de + VFMADDSUB231PD 15(DX)(BX*1), X16, K3, X11 // 6272fd03b69c1a0f000000 + VFMADDSUB231PD -7(CX)(DX*2), X16, K3, X11 // 6272fd03b69c51f9ffffff + VFMADDSUB231PD Y3, Y26, K3, Y6 // 62f2ad23b6f3 + VFMADDSUB231PD (AX), Y26, K3, Y6 // 62f2ad23b630 + VFMADDSUB231PD 7(SI), Y26, K3, Y6 // 62f2ad23b6b607000000 + VFMADDSUB231PD Z9, Z9, K2, Z0 // 62d2b54ab6c1 + VFMADDSUB231PD Z25, Z9, K2, Z0 // 6292b54ab6c1 + VFMADDSUB231PD Z9, Z3, K2, Z0 // 62d2e54ab6c1 + VFMADDSUB231PD Z25, Z3, K2, Z0 // 6292e54ab6c1 + VFMADDSUB231PD Z9, Z9, K2, Z26 // 6242b54ab6d1 + VFMADDSUB231PD Z25, Z9, K2, Z26 // 6202b54ab6d1 + VFMADDSUB231PD Z9, Z3, K2, Z26 // 6242e54ab6d1 + VFMADDSUB231PD Z25, Z3, K2, Z26 // 6202e54ab6d1 + VFMADDSUB231PD Z17, Z20, K1, Z9 // 6232dd41b6c9 + VFMADDSUB231PD Z0, Z20, K1, Z9 // 6272dd41b6c8 + VFMADDSUB231PD 7(AX), Z20, K1, Z9 // 6272dd41b68807000000 + VFMADDSUB231PD (DI), Z20, K1, Z9 // 6272dd41b60f + VFMADDSUB231PD Z17, Z0, K1, Z9 // 6232fd49b6c9 + VFMADDSUB231PD Z0, Z0, K1, Z9 // 6272fd49b6c8 + VFMADDSUB231PD 7(AX), Z0, K1, Z9 // 6272fd49b68807000000 + VFMADDSUB231PD (DI), Z0, K1, Z9 // 6272fd49b60f + VFMADDSUB231PD Z17, Z20, K1, Z28 // 6222dd41b6e1 + VFMADDSUB231PD Z0, Z20, K1, Z28 // 6262dd41b6e0 + VFMADDSUB231PD 7(AX), Z20, K1, Z28 // 6262dd41b6a007000000 + VFMADDSUB231PD (DI), Z20, K1, Z28 // 6262dd41b627 + VFMADDSUB231PD Z17, Z0, K1, Z28 // 6222fd49b6e1 + VFMADDSUB231PD Z0, Z0, K1, Z28 // 6262fd49b6e0 + VFMADDSUB231PD 7(AX), Z0, K1, Z28 // 6262fd49b6a007000000 + VFMADDSUB231PD (DI), Z0, K1, Z28 // 6262fd49b627 + VFMADDSUB231PS X12, X22, K2, X6 // 62d24d02b6f4 + VFMADDSUB231PS -17(BP), X22, K2, X6 // 62f24d02b6b5efffffff + VFMADDSUB231PS -15(R14)(R15*8), X22, K2, X6 // 62924d02b6b4fef1ffffff + VFMADDSUB231PS Y1, Y28, K1, Y8 // 62721d21b6c1 + VFMADDSUB231PS (BX), Y28, K1, Y8 // 62721d21b603 + VFMADDSUB231PS -17(BP)(SI*1), Y28, K1, Y8 // 62721d21b68435efffffff + VFMADDSUB231PS Z21, Z31, K7, Z17 // 62a20547b6cd + VFMADDSUB231PS Z9, Z31, K7, Z17 // 62c20547b6c9 + VFMADDSUB231PS Z21, Z0, K7, Z17 // 62a27d4fb6cd + VFMADDSUB231PS Z9, Z0, K7, Z17 // 62c27d4fb6c9 + VFMADDSUB231PS Z21, Z31, K7, Z23 // 62a20547b6fd + VFMADDSUB231PS Z9, Z31, K7, Z23 // 62c20547b6f9 + VFMADDSUB231PS Z21, Z0, K7, Z23 // 62a27d4fb6fd + VFMADDSUB231PS Z9, Z0, K7, Z23 // 62c27d4fb6f9 + VFMADDSUB231PS Z20, Z1, K1, Z6 // 62b27549b6f4 + VFMADDSUB231PS Z9, Z1, K1, Z6 // 62d27549b6f1 + VFMADDSUB231PS 99(R15)(R15*1), Z1, K1, Z6 // 62927549b6b43f63000000 + VFMADDSUB231PS (DX), Z1, K1, Z6 // 62f27549b632 + VFMADDSUB231PS Z20, Z9, K1, Z6 // 62b23549b6f4 + VFMADDSUB231PS Z9, Z9, K1, Z6 // 62d23549b6f1 + VFMADDSUB231PS 99(R15)(R15*1), Z9, K1, Z6 // 62923549b6b43f63000000 + VFMADDSUB231PS (DX), Z9, K1, Z6 // 62f23549b632 + VFMADDSUB231PS Z20, Z1, K1, Z9 // 62327549b6cc + VFMADDSUB231PS Z9, Z1, K1, Z9 // 62527549b6c9 + VFMADDSUB231PS 99(R15)(R15*1), Z1, K1, Z9 // 62127549b68c3f63000000 + VFMADDSUB231PS (DX), Z1, K1, Z9 // 62727549b60a + VFMADDSUB231PS Z20, Z9, K1, Z9 // 62323549b6cc + VFMADDSUB231PS Z9, Z9, K1, Z9 // 62523549b6c9 + VFMADDSUB231PS 99(R15)(R15*1), Z9, K1, Z9 // 62123549b68c3f63000000 + VFMADDSUB231PS (DX), Z9, K1, Z9 // 62723549b60a + VFMSUB132PD X8, X28, K1, X16 // 62c29d019ac0 + VFMSUB132PD 17(SP)(BP*2), X28, K1, X16 // 62e29d019a846c11000000 + VFMSUB132PD -7(DI)(R8*4), X28, K1, X16 // 62a29d019a8487f9ffffff + VFMSUB132PD Y31, Y14, K1, Y23 // 62828d299aff + VFMSUB132PD 15(R8)(R14*4), Y14, K1, Y23 // 62828d299abcb00f000000 + VFMSUB132PD -7(CX)(DX*4), Y14, K1, Y23 // 62e28d299abc91f9ffffff + VFMSUB132PD Z7, Z26, K7, Z30 // 6262ad479af7 + VFMSUB132PD Z21, Z26, K7, Z30 // 6222ad479af5 + VFMSUB132PD Z7, Z22, K7, Z30 // 6262cd479af7 + VFMSUB132PD Z21, Z22, K7, Z30 // 6222cd479af5 + VFMSUB132PD Z7, Z26, K7, Z5 // 62f2ad479aef + VFMSUB132PD Z21, Z26, K7, Z5 // 62b2ad479aed + VFMSUB132PD Z7, Z22, K7, Z5 // 62f2cd479aef + VFMSUB132PD Z21, Z22, K7, Z5 // 62b2cd479aed + VFMSUB132PD Z12, Z14, K2, Z16 // 62c28d4a9ac4 + VFMSUB132PD Z13, Z14, K2, Z16 // 62c28d4a9ac5 + VFMSUB132PD -17(BP)(SI*8), Z14, K2, Z16 // 62e28d4a9a84f5efffffff + VFMSUB132PD (R15), Z14, K2, Z16 // 62c28d4a9a07 + VFMSUB132PD Z12, Z13, K2, Z16 // 62c2954a9ac4 + VFMSUB132PD Z13, Z13, K2, Z16 // 62c2954a9ac5 + VFMSUB132PD -17(BP)(SI*8), Z13, K2, Z16 // 62e2954a9a84f5efffffff + VFMSUB132PD (R15), Z13, K2, Z16 // 62c2954a9a07 + VFMSUB132PD Z12, Z14, K2, Z25 // 62428d4a9acc + VFMSUB132PD Z13, Z14, K2, Z25 // 62428d4a9acd + VFMSUB132PD -17(BP)(SI*8), Z14, K2, Z25 // 62628d4a9a8cf5efffffff + VFMSUB132PD (R15), Z14, K2, Z25 // 62428d4a9a0f + VFMSUB132PD Z12, Z13, K2, Z25 // 6242954a9acc + VFMSUB132PD Z13, Z13, K2, Z25 // 6242954a9acd + VFMSUB132PD -17(BP)(SI*8), Z13, K2, Z25 // 6262954a9a8cf5efffffff + VFMSUB132PD (R15), Z13, K2, Z25 // 6242954a9a0f + VFMSUB132PS X1, X11, K4, X15 // 6272250c9af9 + VFMSUB132PS 15(R8), X11, K4, X15 // 6252250c9ab80f000000 + VFMSUB132PS (BP), X11, K4, X15 // 6272250c9a7d00 + VFMSUB132PS Y22, Y2, K1, Y25 // 62226d299ace + VFMSUB132PS (R8), Y2, K1, Y25 // 62426d299a08 + VFMSUB132PS 15(DX)(BX*2), Y2, K1, Y25 // 62626d299a8c5a0f000000 + VFMSUB132PS Z27, Z2, K3, Z21 // 62826d4b9aeb + VFMSUB132PS Z25, Z2, K3, Z21 // 62826d4b9ae9 + VFMSUB132PS Z27, Z7, K3, Z21 // 6282454b9aeb + VFMSUB132PS Z25, Z7, K3, Z21 // 6282454b9ae9 + VFMSUB132PS Z27, Z2, K3, Z9 // 62126d4b9acb + VFMSUB132PS Z25, Z2, K3, Z9 // 62126d4b9ac9 + VFMSUB132PS Z27, Z7, K3, Z9 // 6212454b9acb + VFMSUB132PS Z25, Z7, K3, Z9 // 6212454b9ac9 + VFMSUB132PS Z3, Z27, K4, Z23 // 62e225449afb + VFMSUB132PS Z0, Z27, K4, Z23 // 62e225449af8 + VFMSUB132PS 7(SI)(DI*8), Z27, K4, Z23 // 62e225449abcfe07000000 + VFMSUB132PS -15(R14), Z27, K4, Z23 // 62c225449abef1ffffff + VFMSUB132PS Z3, Z14, K4, Z23 // 62e20d4c9afb + VFMSUB132PS Z0, Z14, K4, Z23 // 62e20d4c9af8 + VFMSUB132PS 7(SI)(DI*8), Z14, K4, Z23 // 62e20d4c9abcfe07000000 + VFMSUB132PS -15(R14), Z14, K4, Z23 // 62c20d4c9abef1ffffff + VFMSUB132PS Z3, Z27, K4, Z9 // 627225449acb + VFMSUB132PS Z0, Z27, K4, Z9 // 627225449ac8 + VFMSUB132PS 7(SI)(DI*8), Z27, K4, Z9 // 627225449a8cfe07000000 + VFMSUB132PS -15(R14), Z27, K4, Z9 // 625225449a8ef1ffffff + VFMSUB132PS Z3, Z14, K4, Z9 // 62720d4c9acb + VFMSUB132PS Z0, Z14, K4, Z9 // 62720d4c9ac8 + VFMSUB132PS 7(SI)(DI*8), Z14, K4, Z9 // 62720d4c9a8cfe07000000 + VFMSUB132PS -15(R14), Z14, K4, Z9 // 62520d4c9a8ef1ffffff + VFMSUB132SD X2, X13, K5, X19 // 62e2950d9bda + VFMSUB132SD X0, X0, K7, X14 // 6272fd0f9bf0 or 6272fd2f9bf0 or 6272fd4f9bf0 + VFMSUB132SD 7(AX), X0, K7, X14 // 6272fd0f9bb007000000 or 6272fd2f9bb007000000 or 6272fd4f9bb007000000 + VFMSUB132SD (DI), X0, K7, X14 // 6272fd0f9b37 or 6272fd2f9b37 or 6272fd4f9b37 + VFMSUB132SS X17, X11, K7, X25 // 6222250f9bc9 + VFMSUB132SS X9, X11, K6, X18 // 62c2250e9bd1 or 62c2252e9bd1 or 62c2254e9bd1 + VFMSUB132SS 7(AX)(CX*4), X11, K6, X18 // 62e2250e9b948807000000 or 62e2252e9b948807000000 or 62e2254e9b948807000000 + VFMSUB132SS 7(AX)(CX*1), X11, K6, X18 // 62e2250e9b940807000000 or 62e2252e9b940807000000 or 62e2254e9b940807000000 + VFMSUB213PD X2, X24, K3, X2 // 62f2bd03aad2 + VFMSUB213PD 15(R8)(R14*8), X24, K3, X2 // 6292bd03aa94f00f000000 + VFMSUB213PD -15(R14)(R15*2), X24, K3, X2 // 6292bd03aa947ef1ffffff + VFMSUB213PD Y9, Y8, K7, Y27 // 6242bd2faad9 + VFMSUB213PD 17(SP)(BP*1), Y8, K7, Y27 // 6262bd2faa9c2c11000000 + VFMSUB213PD -7(CX)(DX*8), Y8, K7, Y27 // 6262bd2faa9cd1f9ffffff + VFMSUB213PD Z22, Z8, K4, Z14 // 6232bd4caaf6 + VFMSUB213PD Z25, Z8, K4, Z14 // 6212bd4caaf1 + VFMSUB213PD Z22, Z24, K4, Z14 // 6232bd44aaf6 + VFMSUB213PD Z25, Z24, K4, Z14 // 6212bd44aaf1 + VFMSUB213PD Z22, Z8, K4, Z7 // 62b2bd4caafe + VFMSUB213PD Z25, Z8, K4, Z7 // 6292bd4caaf9 + VFMSUB213PD Z22, Z24, K4, Z7 // 62b2bd44aafe + VFMSUB213PD Z25, Z24, K4, Z7 // 6292bd44aaf9 + VFMSUB213PD Z0, Z6, K4, Z1 // 62f2cd4caac8 + VFMSUB213PD Z8, Z6, K4, Z1 // 62d2cd4caac8 + VFMSUB213PD 7(SI)(DI*1), Z6, K4, Z1 // 62f2cd4caa8c3e07000000 + VFMSUB213PD 15(DX)(BX*8), Z6, K4, Z1 // 62f2cd4caa8cda0f000000 + VFMSUB213PD Z0, Z2, K4, Z1 // 62f2ed4caac8 + VFMSUB213PD Z8, Z2, K4, Z1 // 62d2ed4caac8 + VFMSUB213PD 7(SI)(DI*1), Z2, K4, Z1 // 62f2ed4caa8c3e07000000 + VFMSUB213PD 15(DX)(BX*8), Z2, K4, Z1 // 62f2ed4caa8cda0f000000 + VFMSUB213PD Z0, Z6, K4, Z16 // 62e2cd4caac0 + VFMSUB213PD Z8, Z6, K4, Z16 // 62c2cd4caac0 + VFMSUB213PD 7(SI)(DI*1), Z6, K4, Z16 // 62e2cd4caa843e07000000 + VFMSUB213PD 15(DX)(BX*8), Z6, K4, Z16 // 62e2cd4caa84da0f000000 + VFMSUB213PD Z0, Z2, K4, Z16 // 62e2ed4caac0 + VFMSUB213PD Z8, Z2, K4, Z16 // 62c2ed4caac0 + VFMSUB213PD 7(SI)(DI*1), Z2, K4, Z16 // 62e2ed4caa843e07000000 + VFMSUB213PD 15(DX)(BX*8), Z2, K4, Z16 // 62e2ed4caa84da0f000000 + VFMSUB213PS X26, X27, K7, X2 // 62922507aad2 + VFMSUB213PS -15(R14)(R15*1), X27, K7, X2 // 62922507aa943ef1ffffff + VFMSUB213PS -15(BX), X27, K7, X2 // 62f22507aa93f1ffffff + VFMSUB213PS Y14, Y9, K2, Y22 // 62c2352aaaf6 + VFMSUB213PS -17(BP)(SI*2), Y9, K2, Y22 // 62e2352aaab475efffffff + VFMSUB213PS 7(AX)(CX*2), Y9, K2, Y22 // 62e2352aaab44807000000 + VFMSUB213PS Z11, Z14, K5, Z15 // 62520d4daafb + VFMSUB213PS Z5, Z14, K5, Z15 // 62720d4daafd + VFMSUB213PS Z11, Z27, K5, Z15 // 62522545aafb + VFMSUB213PS Z5, Z27, K5, Z15 // 62722545aafd + VFMSUB213PS Z11, Z14, K5, Z12 // 62520d4daae3 + VFMSUB213PS Z5, Z14, K5, Z12 // 62720d4daae5 + VFMSUB213PS Z11, Z27, K5, Z12 // 62522545aae3 + VFMSUB213PS Z5, Z27, K5, Z12 // 62722545aae5 + VFMSUB213PS Z2, Z5, K3, Z13 // 6272554baaea + VFMSUB213PS -7(DI)(R8*1), Z5, K3, Z13 // 6232554baaac07f9ffffff + VFMSUB213PS (SP), Z5, K3, Z13 // 6272554baa2c24 + VFMSUB213PS Z2, Z23, K3, Z13 // 62724543aaea + VFMSUB213PS -7(DI)(R8*1), Z23, K3, Z13 // 62324543aaac07f9ffffff + VFMSUB213PS (SP), Z23, K3, Z13 // 62724543aa2c24 + VFMSUB213PS Z2, Z5, K3, Z14 // 6272554baaf2 + VFMSUB213PS -7(DI)(R8*1), Z5, K3, Z14 // 6232554baab407f9ffffff + VFMSUB213PS (SP), Z5, K3, Z14 // 6272554baa3424 + VFMSUB213PS Z2, Z23, K3, Z14 // 62724543aaf2 + VFMSUB213PS -7(DI)(R8*1), Z23, K3, Z14 // 62324543aab407f9ffffff + VFMSUB213PS (SP), Z23, K3, Z14 // 62724543aa3424 + VFMSUB213SD X3, X30, K4, X22 // 62e28d04abf3 + VFMSUB213SD X30, X15, K2, X11 // 6212850aabde or 6212852aabde or 6212854aabde + VFMSUB213SD 99(R15)(R15*1), X15, K2, X11 // 6212850aab9c3f63000000 or 6212852aab9c3f63000000 or 6212854aab9c3f63000000 + VFMSUB213SD (DX), X15, K2, X11 // 6272850aab1a or 6272852aab1a or 6272854aab1a + VFMSUB213SS X12, X6, K2, X13 // 62524d0aabec + VFMSUB213SS X8, X30, K3, X23 // 62c20d03abf8 or 62c20d23abf8 or 62c20d43abf8 + VFMSUB213SS (SI), X30, K3, X23 // 62e20d03ab3e or 62e20d23ab3e or 62e20d43ab3e + VFMSUB213SS 7(SI)(DI*2), X30, K3, X23 // 62e20d03abbc7e07000000 or 62e20d23abbc7e07000000 or 62e20d43abbc7e07000000 + VFMSUB231PD X9, X2, K3, X20 // 62c2ed0bbae1 + VFMSUB231PD 7(AX)(CX*4), X2, K3, X20 // 62e2ed0bbaa48807000000 + VFMSUB231PD 7(AX)(CX*1), X2, K3, X20 // 62e2ed0bbaa40807000000 + VFMSUB231PD Y1, Y6, K3, Y1 // 62f2cd2bbac9 + VFMSUB231PD 15(R8)(R14*1), Y6, K3, Y1 // 6292cd2bba8c300f000000 + VFMSUB231PD 15(R8)(R14*2), Y6, K3, Y1 // 6292cd2bba8c700f000000 + VFMSUB231PD Z28, Z26, K2, Z6 // 6292ad42baf4 + VFMSUB231PD Z6, Z26, K2, Z6 // 62f2ad42baf6 + VFMSUB231PD Z28, Z14, K2, Z6 // 62928d4abaf4 + VFMSUB231PD Z6, Z14, K2, Z6 // 62f28d4abaf6 + VFMSUB231PD Z28, Z26, K2, Z14 // 6212ad42baf4 + VFMSUB231PD Z6, Z26, K2, Z14 // 6272ad42baf6 + VFMSUB231PD Z28, Z14, K2, Z14 // 62128d4abaf4 + VFMSUB231PD Z6, Z14, K2, Z14 // 62728d4abaf6 + VFMSUB231PD Z3, Z26, K1, Z13 // 6272ad41baeb + VFMSUB231PD Z0, Z26, K1, Z13 // 6272ad41bae8 + VFMSUB231PD -7(CX), Z26, K1, Z13 // 6272ad41baa9f9ffffff + VFMSUB231PD 15(DX)(BX*4), Z26, K1, Z13 // 6272ad41baac9a0f000000 + VFMSUB231PD Z3, Z3, K1, Z13 // 6272e549baeb + VFMSUB231PD Z0, Z3, K1, Z13 // 6272e549bae8 + VFMSUB231PD -7(CX), Z3, K1, Z13 // 6272e549baa9f9ffffff + VFMSUB231PD 15(DX)(BX*4), Z3, K1, Z13 // 6272e549baac9a0f000000 + VFMSUB231PD Z3, Z26, K1, Z21 // 62e2ad41baeb + VFMSUB231PD Z0, Z26, K1, Z21 // 62e2ad41bae8 + VFMSUB231PD -7(CX), Z26, K1, Z21 // 62e2ad41baa9f9ffffff + VFMSUB231PD 15(DX)(BX*4), Z26, K1, Z21 // 62e2ad41baac9a0f000000 + VFMSUB231PD Z3, Z3, K1, Z21 // 62e2e549baeb + VFMSUB231PD Z0, Z3, K1, Z21 // 62e2e549bae8 + VFMSUB231PD -7(CX), Z3, K1, Z21 // 62e2e549baa9f9ffffff + VFMSUB231PD 15(DX)(BX*4), Z3, K1, Z21 // 62e2e549baac9a0f000000 + VFMSUB231PS X0, X19, K2, X26 // 62626502bad0 + VFMSUB231PS (SI), X19, K2, X26 // 62626502ba16 + VFMSUB231PS 7(SI)(DI*2), X19, K2, X26 // 62626502ba947e07000000 + VFMSUB231PS Y19, Y0, K1, Y9 // 62327d29bacb + VFMSUB231PS (R14), Y0, K1, Y9 // 62527d29ba0e + VFMSUB231PS -7(DI)(R8*8), Y0, K1, Y9 // 62327d29ba8cc7f9ffffff + VFMSUB231PS Z3, Z11, K7, Z21 // 62e2254fbaeb + VFMSUB231PS Z12, Z11, K7, Z21 // 62c2254fbaec + VFMSUB231PS Z3, Z25, K7, Z21 // 62e23547baeb + VFMSUB231PS Z12, Z25, K7, Z21 // 62c23547baec + VFMSUB231PS Z3, Z11, K7, Z13 // 6272254fbaeb + VFMSUB231PS Z12, Z11, K7, Z13 // 6252254fbaec + VFMSUB231PS Z3, Z25, K7, Z13 // 62723547baeb + VFMSUB231PS Z12, Z25, K7, Z13 // 62523547baec + VFMSUB231PS Z23, Z23, K1, Z27 // 62224541badf + VFMSUB231PS Z6, Z23, K1, Z27 // 62624541bade + VFMSUB231PS 99(R15)(R15*8), Z23, K1, Z27 // 62024541ba9cff63000000 + VFMSUB231PS 7(AX)(CX*8), Z23, K1, Z27 // 62624541ba9cc807000000 + VFMSUB231PS Z23, Z5, K1, Z27 // 62225549badf + VFMSUB231PS Z6, Z5, K1, Z27 // 62625549bade + VFMSUB231PS 99(R15)(R15*8), Z5, K1, Z27 // 62025549ba9cff63000000 + VFMSUB231PS 7(AX)(CX*8), Z5, K1, Z27 // 62625549ba9cc807000000 + VFMSUB231PS Z23, Z23, K1, Z15 // 62324541baff + VFMSUB231PS Z6, Z23, K1, Z15 // 62724541bafe + VFMSUB231PS 99(R15)(R15*8), Z23, K1, Z15 // 62124541babcff63000000 + VFMSUB231PS 7(AX)(CX*8), Z23, K1, Z15 // 62724541babcc807000000 + VFMSUB231PS Z23, Z5, K1, Z15 // 62325549baff + VFMSUB231PS Z6, Z5, K1, Z15 // 62725549bafe + VFMSUB231PS 99(R15)(R15*8), Z5, K1, Z15 // 62125549babcff63000000 + VFMSUB231PS 7(AX)(CX*8), Z5, K1, Z15 // 62725549babcc807000000 + VFMSUB231SD X7, X16, K1, X31 // 6262fd01bbff + VFMSUB231SD X0, X1, K1, X8 // 6272f509bbc0 or 6272f529bbc0 or 6272f549bbc0 + VFMSUB231SD -17(BP)(SI*8), X1, K1, X8 // 6272f509bb84f5efffffff or 6272f529bb84f5efffffff or 6272f549bb84f5efffffff + VFMSUB231SD (R15), X1, K1, X8 // 6252f509bb07 or 6252f529bb07 or 6252f549bb07 + VFMSUB231SS X16, X0, K7, X15 // 62327d0fbbf8 + VFMSUB231SS X28, X0, K2, X21 // 62827d0abbec or 62827d2abbec or 62827d4abbec + VFMSUB231SS 17(SP)(BP*8), X0, K2, X21 // 62e27d0abbacec11000000 or 62e27d2abbacec11000000 or 62e27d4abbacec11000000 + VFMSUB231SS 17(SP)(BP*4), X0, K2, X21 // 62e27d0abbacac11000000 or 62e27d2abbacac11000000 or 62e27d4abbacac11000000 + VFMSUBADD132PD X19, X7, K4, X22 // 62a2c50c97f3 + VFMSUBADD132PD 17(SP)(BP*8), X7, K4, X22 // 62e2c50c97b4ec11000000 + VFMSUBADD132PD 17(SP)(BP*4), X7, K4, X22 // 62e2c50c97b4ac11000000 + VFMSUBADD132PD Y9, Y22, K1, Y31 // 6242cd2197f9 + VFMSUBADD132PD 99(R15)(R15*4), Y22, K1, Y31 // 6202cd2197bcbf63000000 + VFMSUBADD132PD 15(DX), Y22, K1, Y31 // 6262cd2197ba0f000000 + VFMSUBADD132PD Z16, Z21, K3, Z8 // 6232d54397c0 + VFMSUBADD132PD Z13, Z21, K3, Z8 // 6252d54397c5 + VFMSUBADD132PD Z16, Z5, K3, Z8 // 6232d54b97c0 + VFMSUBADD132PD Z13, Z5, K3, Z8 // 6252d54b97c5 + VFMSUBADD132PD Z16, Z21, K3, Z28 // 6222d54397e0 + VFMSUBADD132PD Z13, Z21, K3, Z28 // 6242d54397e5 + VFMSUBADD132PD Z16, Z5, K3, Z28 // 6222d54b97e0 + VFMSUBADD132PD Z13, Z5, K3, Z28 // 6242d54b97e5 + VFMSUBADD132PD Z6, Z22, K4, Z12 // 6272cd4497e6 + VFMSUBADD132PD Z8, Z22, K4, Z12 // 6252cd4497e0 + VFMSUBADD132PD (AX), Z22, K4, Z12 // 6272cd449720 + VFMSUBADD132PD 7(SI), Z22, K4, Z12 // 6272cd4497a607000000 + VFMSUBADD132PD Z6, Z11, K4, Z12 // 6272a54c97e6 + VFMSUBADD132PD Z8, Z11, K4, Z12 // 6252a54c97e0 + VFMSUBADD132PD (AX), Z11, K4, Z12 // 6272a54c9720 + VFMSUBADD132PD 7(SI), Z11, K4, Z12 // 6272a54c97a607000000 + VFMSUBADD132PD Z6, Z22, K4, Z27 // 6262cd4497de + VFMSUBADD132PD Z8, Z22, K4, Z27 // 6242cd4497d8 + VFMSUBADD132PD (AX), Z22, K4, Z27 // 6262cd449718 + VFMSUBADD132PD 7(SI), Z22, K4, Z27 // 6262cd44979e07000000 + VFMSUBADD132PD Z6, Z11, K4, Z27 // 6262a54c97de + VFMSUBADD132PD Z8, Z11, K4, Z27 // 6242a54c97d8 + VFMSUBADD132PD (AX), Z11, K4, Z27 // 6262a54c9718 + VFMSUBADD132PD 7(SI), Z11, K4, Z27 // 6262a54c979e07000000 + VFMSUBADD132PS X31, X16, K5, X7 // 62927d0597ff + VFMSUBADD132PS 7(SI)(DI*4), X16, K5, X7 // 62f27d0597bcbe07000000 + VFMSUBADD132PS -7(DI)(R8*2), X16, K5, X7 // 62b27d0597bc47f9ffffff + VFMSUBADD132PS Y5, Y31, K7, Y23 // 62e2052797fd + VFMSUBADD132PS (CX), Y31, K7, Y23 // 62e205279739 + VFMSUBADD132PS 99(R15), Y31, K7, Y23 // 62c2052797bf63000000 + VFMSUBADD132PS Z9, Z12, K7, Z25 // 62421d4f97c9 + VFMSUBADD132PS Z12, Z12, K7, Z25 // 62421d4f97cc + VFMSUBADD132PS Z9, Z17, K7, Z25 // 6242754797c9 + VFMSUBADD132PS Z12, Z17, K7, Z25 // 6242754797cc + VFMSUBADD132PS Z9, Z12, K7, Z12 // 62521d4f97e1 + VFMSUBADD132PS Z12, Z12, K7, Z12 // 62521d4f97e4 + VFMSUBADD132PS Z9, Z17, K7, Z12 // 6252754797e1 + VFMSUBADD132PS Z12, Z17, K7, Z12 // 6252754797e4 + VFMSUBADD132PS Z8, Z3, K6, Z6 // 62d2654e97f0 + VFMSUBADD132PS Z2, Z3, K6, Z6 // 62f2654e97f2 + VFMSUBADD132PS (BX), Z3, K6, Z6 // 62f2654e9733 + VFMSUBADD132PS -17(BP)(SI*1), Z3, K6, Z6 // 62f2654e97b435efffffff + VFMSUBADD132PS Z8, Z21, K6, Z6 // 62d2554697f0 + VFMSUBADD132PS Z2, Z21, K6, Z6 // 62f2554697f2 + VFMSUBADD132PS (BX), Z21, K6, Z6 // 62f255469733 + VFMSUBADD132PS -17(BP)(SI*1), Z21, K6, Z6 // 62f2554697b435efffffff + VFMSUBADD132PS Z8, Z3, K6, Z25 // 6242654e97c8 + VFMSUBADD132PS Z2, Z3, K6, Z25 // 6262654e97ca + VFMSUBADD132PS (BX), Z3, K6, Z25 // 6262654e970b + VFMSUBADD132PS -17(BP)(SI*1), Z3, K6, Z25 // 6262654e978c35efffffff + VFMSUBADD132PS Z8, Z21, K6, Z25 // 6242554697c8 + VFMSUBADD132PS Z2, Z21, K6, Z25 // 6262554697ca + VFMSUBADD132PS (BX), Z21, K6, Z25 // 62625546970b + VFMSUBADD132PS -17(BP)(SI*1), Z21, K6, Z25 // 62625546978c35efffffff + VFMSUBADD213PD X9, X7, K3, X1 // 62d2c50ba7c9 + VFMSUBADD213PD 17(SP), X7, K3, X1 // 62f2c50ba78c2411000000 + VFMSUBADD213PD -17(BP)(SI*4), X7, K3, X1 // 62f2c50ba78cb5efffffff + VFMSUBADD213PD Y19, Y5, K7, Y0 // 62b2d52fa7c3 + VFMSUBADD213PD 99(R15)(R15*2), Y5, K7, Y0 // 6292d52fa7847f63000000 + VFMSUBADD213PD -7(DI), Y5, K7, Y0 // 62f2d52fa787f9ffffff + VFMSUBADD213PD Z0, Z7, K4, Z3 // 62f2c54ca7d8 + VFMSUBADD213PD Z6, Z7, K4, Z3 // 62f2c54ca7de + VFMSUBADD213PD Z0, Z9, K4, Z3 // 62f2b54ca7d8 + VFMSUBADD213PD Z6, Z9, K4, Z3 // 62f2b54ca7de + VFMSUBADD213PD Z0, Z7, K4, Z27 // 6262c54ca7d8 + VFMSUBADD213PD Z6, Z7, K4, Z27 // 6262c54ca7de + VFMSUBADD213PD Z0, Z9, K4, Z27 // 6262b54ca7d8 + VFMSUBADD213PD Z6, Z9, K4, Z27 // 6262b54ca7de + VFMSUBADD213PD Z9, Z3, K4, Z20 // 62c2e54ca7e1 + VFMSUBADD213PD Z19, Z3, K4, Z20 // 62a2e54ca7e3 + VFMSUBADD213PD 15(R8)(R14*4), Z3, K4, Z20 // 6282e54ca7a4b00f000000 + VFMSUBADD213PD -7(CX)(DX*4), Z3, K4, Z20 // 62e2e54ca7a491f9ffffff + VFMSUBADD213PD Z9, Z30, K4, Z20 // 62c28d44a7e1 + VFMSUBADD213PD Z19, Z30, K4, Z20 // 62a28d44a7e3 + VFMSUBADD213PD 15(R8)(R14*4), Z30, K4, Z20 // 62828d44a7a4b00f000000 + VFMSUBADD213PD -7(CX)(DX*4), Z30, K4, Z20 // 62e28d44a7a491f9ffffff + VFMSUBADD213PD Z9, Z3, K4, Z28 // 6242e54ca7e1 + VFMSUBADD213PD Z19, Z3, K4, Z28 // 6222e54ca7e3 + VFMSUBADD213PD 15(R8)(R14*4), Z3, K4, Z28 // 6202e54ca7a4b00f000000 + VFMSUBADD213PD -7(CX)(DX*4), Z3, K4, Z28 // 6262e54ca7a491f9ffffff + VFMSUBADD213PD Z9, Z30, K4, Z28 // 62428d44a7e1 + VFMSUBADD213PD Z19, Z30, K4, Z28 // 62228d44a7e3 + VFMSUBADD213PD 15(R8)(R14*4), Z30, K4, Z28 // 62028d44a7a4b00f000000 + VFMSUBADD213PD -7(CX)(DX*4), Z30, K4, Z28 // 62628d44a7a491f9ffffff + VFMSUBADD213PS X0, X12, K7, X15 // 62721d0fa7f8 + VFMSUBADD213PS 7(AX), X12, K7, X15 // 62721d0fa7b807000000 + VFMSUBADD213PS (DI), X12, K7, X15 // 62721d0fa73f + VFMSUBADD213PS Y2, Y28, K2, Y31 // 62621d22a7fa + VFMSUBADD213PS -7(CX)(DX*1), Y28, K2, Y31 // 62621d22a7bc11f9ffffff + VFMSUBADD213PS -15(R14)(R15*4), Y28, K2, Y31 // 62021d22a7bcbef1ffffff + VFMSUBADD213PS Z18, Z11, K5, Z12 // 6232254da7e2 + VFMSUBADD213PS Z24, Z11, K5, Z12 // 6212254da7e0 + VFMSUBADD213PS Z18, Z5, K5, Z12 // 6232554da7e2 + VFMSUBADD213PS Z24, Z5, K5, Z12 // 6212554da7e0 + VFMSUBADD213PS Z18, Z11, K5, Z22 // 62a2254da7f2 + VFMSUBADD213PS Z24, Z11, K5, Z22 // 6282254da7f0 + VFMSUBADD213PS Z18, Z5, K5, Z22 // 62a2554da7f2 + VFMSUBADD213PS Z24, Z5, K5, Z22 // 6282554da7f0 + VFMSUBADD213PS Z6, Z7, K3, Z2 // 62f2454ba7d6 + VFMSUBADD213PS Z16, Z7, K3, Z2 // 62b2454ba7d0 + VFMSUBADD213PS (R8), Z7, K3, Z2 // 62d2454ba710 + VFMSUBADD213PS 15(DX)(BX*2), Z7, K3, Z2 // 62f2454ba7945a0f000000 + VFMSUBADD213PS Z6, Z13, K3, Z2 // 62f2154ba7d6 + VFMSUBADD213PS Z16, Z13, K3, Z2 // 62b2154ba7d0 + VFMSUBADD213PS (R8), Z13, K3, Z2 // 62d2154ba710 + VFMSUBADD213PS 15(DX)(BX*2), Z13, K3, Z2 // 62f2154ba7945a0f000000 + VFMSUBADD213PS Z6, Z7, K3, Z21 // 62e2454ba7ee + VFMSUBADD213PS Z16, Z7, K3, Z21 // 62a2454ba7e8 + VFMSUBADD213PS (R8), Z7, K3, Z21 // 62c2454ba728 + VFMSUBADD213PS 15(DX)(BX*2), Z7, K3, Z21 // 62e2454ba7ac5a0f000000 + VFMSUBADD213PS Z6, Z13, K3, Z21 // 62e2154ba7ee + VFMSUBADD213PS Z16, Z13, K3, Z21 // 62a2154ba7e8 + VFMSUBADD213PS (R8), Z13, K3, Z21 // 62c2154ba728 + VFMSUBADD213PS 15(DX)(BX*2), Z13, K3, Z21 // 62e2154ba7ac5a0f000000 + VFMSUBADD231PD X5, X14, K4, X12 // 62728d0cb7e5 + VFMSUBADD231PD 99(R15)(R15*1), X14, K4, X12 // 62128d0cb7a43f63000000 + VFMSUBADD231PD (DX), X14, K4, X12 // 62728d0cb722 + VFMSUBADD231PD Y0, Y27, K2, Y24 // 6262a522b7c0 + VFMSUBADD231PD 15(DX)(BX*1), Y27, K2, Y24 // 6262a522b7841a0f000000 + VFMSUBADD231PD -7(CX)(DX*2), Y27, K2, Y24 // 6262a522b78451f9ffffff + VFMSUBADD231PD Z13, Z1, K2, Z6 // 62d2f54ab7f5 + VFMSUBADD231PD Z13, Z15, K2, Z6 // 62d2854ab7f5 + VFMSUBADD231PD Z13, Z1, K2, Z22 // 62c2f54ab7f5 + VFMSUBADD231PD Z13, Z15, K2, Z22 // 62c2854ab7f5 + VFMSUBADD231PD Z2, Z22, K3, Z18 // 62e2cd43b7d2 + VFMSUBADD231PD Z31, Z22, K3, Z18 // 6282cd43b7d7 + VFMSUBADD231PD 17(SP)(BP*1), Z22, K3, Z18 // 62e2cd43b7942c11000000 + VFMSUBADD231PD -7(CX)(DX*8), Z22, K3, Z18 // 62e2cd43b794d1f9ffffff + VFMSUBADD231PD Z2, Z7, K3, Z18 // 62e2c54bb7d2 + VFMSUBADD231PD Z31, Z7, K3, Z18 // 6282c54bb7d7 + VFMSUBADD231PD 17(SP)(BP*1), Z7, K3, Z18 // 62e2c54bb7942c11000000 + VFMSUBADD231PD -7(CX)(DX*8), Z7, K3, Z18 // 62e2c54bb794d1f9ffffff + VFMSUBADD231PD Z2, Z22, K3, Z8 // 6272cd43b7c2 + VFMSUBADD231PD Z31, Z22, K3, Z8 // 6212cd43b7c7 + VFMSUBADD231PD 17(SP)(BP*1), Z22, K3, Z8 // 6272cd43b7842c11000000 + VFMSUBADD231PD -7(CX)(DX*8), Z22, K3, Z8 // 6272cd43b784d1f9ffffff + VFMSUBADD231PD Z2, Z7, K3, Z8 // 6272c54bb7c2 + VFMSUBADD231PD Z31, Z7, K3, Z8 // 6212c54bb7c7 + VFMSUBADD231PD 17(SP)(BP*1), Z7, K3, Z8 // 6272c54bb7842c11000000 + VFMSUBADD231PD -7(CX)(DX*8), Z7, K3, Z8 // 6272c54bb784d1f9ffffff + VFMSUBADD231PS X8, X15, K3, X17 // 62c2050bb7c8 + VFMSUBADD231PS -17(BP)(SI*8), X15, K3, X17 // 62e2050bb78cf5efffffff + VFMSUBADD231PS (R15), X15, K3, X17 // 62c2050bb70f + VFMSUBADD231PS Y3, Y31, K3, Y11 // 62720523b7db + VFMSUBADD231PS -17(BP), Y31, K3, Y11 // 62720523b79defffffff + VFMSUBADD231PS -15(R14)(R15*8), Y31, K3, Y11 // 62120523b79cfef1ffffff + VFMSUBADD231PS Z12, Z1, K2, Z20 // 62c2754ab7e4 + VFMSUBADD231PS Z16, Z1, K2, Z20 // 62a2754ab7e0 + VFMSUBADD231PS Z12, Z3, K2, Z20 // 62c2654ab7e4 + VFMSUBADD231PS Z16, Z3, K2, Z20 // 62a2654ab7e0 + VFMSUBADD231PS Z12, Z1, K2, Z9 // 6252754ab7cc + VFMSUBADD231PS Z16, Z1, K2, Z9 // 6232754ab7c8 + VFMSUBADD231PS Z12, Z3, K2, Z9 // 6252654ab7cc + VFMSUBADD231PS Z16, Z3, K2, Z9 // 6232654ab7c8 + VFMSUBADD231PS Z3, Z14, K1, Z28 // 62620d49b7e3 + VFMSUBADD231PS Z12, Z14, K1, Z28 // 62420d49b7e4 + VFMSUBADD231PS -17(BP)(SI*2), Z14, K1, Z28 // 62620d49b7a475efffffff + VFMSUBADD231PS 7(AX)(CX*2), Z14, K1, Z28 // 62620d49b7a44807000000 + VFMSUBADD231PS Z3, Z28, K1, Z28 // 62621d41b7e3 + VFMSUBADD231PS Z12, Z28, K1, Z28 // 62421d41b7e4 + VFMSUBADD231PS -17(BP)(SI*2), Z28, K1, Z28 // 62621d41b7a475efffffff + VFMSUBADD231PS 7(AX)(CX*2), Z28, K1, Z28 // 62621d41b7a44807000000 + VFMSUBADD231PS Z3, Z14, K1, Z13 // 62720d49b7eb + VFMSUBADD231PS Z12, Z14, K1, Z13 // 62520d49b7ec + VFMSUBADD231PS -17(BP)(SI*2), Z14, K1, Z13 // 62720d49b7ac75efffffff + VFMSUBADD231PS 7(AX)(CX*2), Z14, K1, Z13 // 62720d49b7ac4807000000 + VFMSUBADD231PS Z3, Z28, K1, Z13 // 62721d41b7eb + VFMSUBADD231PS Z12, Z28, K1, Z13 // 62521d41b7ec + VFMSUBADD231PS -17(BP)(SI*2), Z28, K1, Z13 // 62721d41b7ac75efffffff + VFMSUBADD231PS 7(AX)(CX*2), Z28, K1, Z13 // 62721d41b7ac4807000000 + VFNMADD132PD X23, X26, K2, X3 // 62b2ad029cdf + VFNMADD132PD 7(SI)(DI*8), X26, K2, X3 // 62f2ad029c9cfe07000000 + VFNMADD132PD -15(R14), X26, K2, X3 // 62d2ad029c9ef1ffffff + VFNMADD132PD Y13, Y2, K1, Y14 // 6252ed299cf5 + VFNMADD132PD 17(SP)(BP*2), Y2, K1, Y14 // 6272ed299cb46c11000000 + VFNMADD132PD -7(DI)(R8*4), Y2, K1, Y14 // 6232ed299cb487f9ffffff + VFNMADD132PD Z5, Z19, K7, Z15 // 6272e5479cfd + VFNMADD132PD Z1, Z19, K7, Z15 // 6272e5479cf9 + VFNMADD132PD Z5, Z15, K7, Z15 // 6272854f9cfd + VFNMADD132PD Z1, Z15, K7, Z15 // 6272854f9cf9 + VFNMADD132PD Z5, Z19, K7, Z30 // 6262e5479cf5 + VFNMADD132PD Z1, Z19, K7, Z30 // 6262e5479cf1 + VFNMADD132PD Z5, Z15, K7, Z30 // 6262854f9cf5 + VFNMADD132PD Z1, Z15, K7, Z30 // 6262854f9cf1 + VFNMADD132PD Z21, Z14, K1, Z3 // 62b28d499cdd + VFNMADD132PD Z8, Z14, K1, Z3 // 62d28d499cd8 + VFNMADD132PD 15(R8)(R14*1), Z14, K1, Z3 // 62928d499c9c300f000000 + VFNMADD132PD 15(R8)(R14*2), Z14, K1, Z3 // 62928d499c9c700f000000 + VFNMADD132PD Z21, Z15, K1, Z3 // 62b285499cdd + VFNMADD132PD Z8, Z15, K1, Z3 // 62d285499cd8 + VFNMADD132PD 15(R8)(R14*1), Z15, K1, Z3 // 629285499c9c300f000000 + VFNMADD132PD 15(R8)(R14*2), Z15, K1, Z3 // 629285499c9c700f000000 + VFNMADD132PD Z21, Z14, K1, Z5 // 62b28d499ced + VFNMADD132PD Z8, Z14, K1, Z5 // 62d28d499ce8 + VFNMADD132PD 15(R8)(R14*1), Z14, K1, Z5 // 62928d499cac300f000000 + VFNMADD132PD 15(R8)(R14*2), Z14, K1, Z5 // 62928d499cac700f000000 + VFNMADD132PD Z21, Z15, K1, Z5 // 62b285499ced + VFNMADD132PD Z8, Z15, K1, Z5 // 62d285499ce8 + VFNMADD132PD 15(R8)(R14*1), Z15, K1, Z5 // 629285499cac300f000000 + VFNMADD132PD 15(R8)(R14*2), Z15, K1, Z5 // 629285499cac700f000000 + VFNMADD132PS X24, X28, K1, X13 // 62121d019ce8 + VFNMADD132PS 7(SI)(DI*1), X28, K1, X13 // 62721d019cac3e07000000 + VFNMADD132PS 15(DX)(BX*8), X28, K1, X13 // 62721d019cacda0f000000 + VFNMADD132PS Y22, Y15, K1, Y27 // 622205299cde + VFNMADD132PS 15(R8), Y15, K1, Y27 // 624205299c980f000000 + VFNMADD132PS (BP), Y15, K1, Y27 // 626205299c5d00 + VFNMADD132PS Z23, Z20, K7, Z16 // 62a25d479cc7 + VFNMADD132PS Z19, Z20, K7, Z16 // 62a25d479cc3 + VFNMADD132PS Z23, Z0, K7, Z16 // 62a27d4f9cc7 + VFNMADD132PS Z19, Z0, K7, Z16 // 62a27d4f9cc3 + VFNMADD132PS Z23, Z20, K7, Z9 // 62325d479ccf + VFNMADD132PS Z19, Z20, K7, Z9 // 62325d479ccb + VFNMADD132PS Z23, Z0, K7, Z9 // 62327d4f9ccf + VFNMADD132PS Z19, Z0, K7, Z9 // 62327d4f9ccb + VFNMADD132PS Z24, Z0, K2, Z0 // 62927d4a9cc0 + VFNMADD132PS Z12, Z0, K2, Z0 // 62d27d4a9cc4 + VFNMADD132PS (R14), Z0, K2, Z0 // 62d27d4a9c06 + VFNMADD132PS -7(DI)(R8*8), Z0, K2, Z0 // 62b27d4a9c84c7f9ffffff + VFNMADD132PS Z24, Z25, K2, Z0 // 629235429cc0 + VFNMADD132PS Z12, Z25, K2, Z0 // 62d235429cc4 + VFNMADD132PS (R14), Z25, K2, Z0 // 62d235429c06 + VFNMADD132PS -7(DI)(R8*8), Z25, K2, Z0 // 62b235429c84c7f9ffffff + VFNMADD132PS Z24, Z0, K2, Z11 // 62127d4a9cd8 + VFNMADD132PS Z12, Z0, K2, Z11 // 62527d4a9cdc + VFNMADD132PS (R14), Z0, K2, Z11 // 62527d4a9c1e + VFNMADD132PS -7(DI)(R8*8), Z0, K2, Z11 // 62327d4a9c9cc7f9ffffff + VFNMADD132PS Z24, Z25, K2, Z11 // 621235429cd8 + VFNMADD132PS Z12, Z25, K2, Z11 // 625235429cdc + VFNMADD132PS (R14), Z25, K2, Z11 // 625235429c1e + VFNMADD132PS -7(DI)(R8*8), Z25, K2, Z11 // 623235429c9cc7f9ffffff + VFNMADD132SD X26, X15, K4, X9 // 6212850c9dca + VFNMADD132SD X1, X21, K1, X18 // 62e2d5019dd1 or 62e2d5219dd1 or 62e2d5419dd1 + VFNMADD132SD 7(SI)(DI*8), X21, K1, X18 // 62e2d5019d94fe07000000 or 62e2d5219d94fe07000000 or 62e2d5419d94fe07000000 + VFNMADD132SD -15(R14), X21, K1, X18 // 62c2d5019d96f1ffffff or 62c2d5219d96f1ffffff or 62c2d5419d96f1ffffff + VFNMADD132SS X3, X31, K3, X11 // 627205039ddb + VFNMADD132SS X0, X0, K4, X7 // 62f27d0c9df8 or 62f27d2c9df8 or 62f27d4c9df8 + VFNMADD132SS 7(SI)(DI*4), X0, K4, X7 // 62f27d0c9dbcbe07000000 or 62f27d2c9dbcbe07000000 or 62f27d4c9dbcbe07000000 + VFNMADD132SS -7(DI)(R8*2), X0, K4, X7 // 62b27d0c9dbc47f9ffffff or 62b27d2c9dbc47f9ffffff or 62b27d4c9dbc47f9ffffff + VFNMADD213PD X7, X20, K5, X24 // 6262dd05acc7 + VFNMADD213PD -7(DI)(R8*1), X20, K5, X24 // 6222dd05ac8407f9ffffff + VFNMADD213PD (SP), X20, K5, X24 // 6262dd05ac0424 + VFNMADD213PD Y24, Y18, K7, Y20 // 6282ed27ace0 + VFNMADD213PD 15(R8)(R14*8), Y18, K7, Y20 // 6282ed27aca4f00f000000 + VFNMADD213PD -15(R14)(R15*2), Y18, K7, Y20 // 6282ed27aca47ef1ffffff + VFNMADD213PD Z9, Z9, K7, Z0 // 62d2b54facc1 + VFNMADD213PD Z25, Z9, K7, Z0 // 6292b54facc1 + VFNMADD213PD Z9, Z3, K7, Z0 // 62d2e54facc1 + VFNMADD213PD Z25, Z3, K7, Z0 // 6292e54facc1 + VFNMADD213PD Z9, Z9, K7, Z26 // 6242b54facd1 + VFNMADD213PD Z25, Z9, K7, Z26 // 6202b54facd1 + VFNMADD213PD Z9, Z3, K7, Z26 // 6242e54facd1 + VFNMADD213PD Z25, Z3, K7, Z26 // 6202e54facd1 + VFNMADD213PD Z17, Z20, K6, Z9 // 6232dd46acc9 + VFNMADD213PD Z0, Z20, K6, Z9 // 6272dd46acc8 + VFNMADD213PD 99(R15)(R15*4), Z20, K6, Z9 // 6212dd46ac8cbf63000000 + VFNMADD213PD 15(DX), Z20, K6, Z9 // 6272dd46ac8a0f000000 + VFNMADD213PD Z17, Z0, K6, Z9 // 6232fd4eacc9 + VFNMADD213PD Z0, Z0, K6, Z9 // 6272fd4eacc8 + VFNMADD213PD 99(R15)(R15*4), Z0, K6, Z9 // 6212fd4eac8cbf63000000 + VFNMADD213PD 15(DX), Z0, K6, Z9 // 6272fd4eac8a0f000000 + VFNMADD213PD Z17, Z20, K6, Z28 // 6222dd46ace1 + VFNMADD213PD Z0, Z20, K6, Z28 // 6262dd46ace0 + VFNMADD213PD 99(R15)(R15*4), Z20, K6, Z28 // 6202dd46aca4bf63000000 + VFNMADD213PD 15(DX), Z20, K6, Z28 // 6262dd46aca20f000000 + VFNMADD213PD Z17, Z0, K6, Z28 // 6222fd4eace1 + VFNMADD213PD Z0, Z0, K6, Z28 // 6262fd4eace0 + VFNMADD213PD 99(R15)(R15*4), Z0, K6, Z28 // 6202fd4eaca4bf63000000 + VFNMADD213PD 15(DX), Z0, K6, Z28 // 6262fd4eaca20f000000 + VFNMADD213PS X14, X7, K3, X9 // 6252450bacce + VFNMADD213PS -7(CX), X7, K3, X9 // 6272450bac89f9ffffff + VFNMADD213PS 15(DX)(BX*4), X7, K3, X9 // 6272450bac8c9a0f000000 + VFNMADD213PS Y19, Y3, K7, Y9 // 6232652faccb + VFNMADD213PS -15(R14)(R15*1), Y3, K7, Y9 // 6212652fac8c3ef1ffffff + VFNMADD213PS -15(BX), Y3, K7, Y9 // 6272652fac8bf1ffffff + VFNMADD213PS Z21, Z31, K4, Z17 // 62a20544accd + VFNMADD213PS Z9, Z31, K4, Z17 // 62c20544acc9 + VFNMADD213PS Z21, Z0, K4, Z17 // 62a27d4caccd + VFNMADD213PS Z9, Z0, K4, Z17 // 62c27d4cacc9 + VFNMADD213PS Z21, Z31, K4, Z23 // 62a20544acfd + VFNMADD213PS Z9, Z31, K4, Z23 // 62c20544acf9 + VFNMADD213PS Z21, Z0, K4, Z23 // 62a27d4cacfd + VFNMADD213PS Z9, Z0, K4, Z23 // 62c27d4cacf9 + VFNMADD213PS Z20, Z1, K4, Z6 // 62b2754cacf4 + VFNMADD213PS Z9, Z1, K4, Z6 // 62d2754cacf1 + VFNMADD213PS (CX), Z1, K4, Z6 // 62f2754cac31 + VFNMADD213PS 99(R15), Z1, K4, Z6 // 62d2754cacb763000000 + VFNMADD213PS Z20, Z9, K4, Z6 // 62b2354cacf4 + VFNMADD213PS Z9, Z9, K4, Z6 // 62d2354cacf1 + VFNMADD213PS (CX), Z9, K4, Z6 // 62f2354cac31 + VFNMADD213PS 99(R15), Z9, K4, Z6 // 62d2354cacb763000000 + VFNMADD213PS Z20, Z1, K4, Z9 // 6232754caccc + VFNMADD213PS Z9, Z1, K4, Z9 // 6252754cacc9 + VFNMADD213PS (CX), Z1, K4, Z9 // 6272754cac09 + VFNMADD213PS 99(R15), Z1, K4, Z9 // 6252754cac8f63000000 + VFNMADD213PS Z20, Z9, K4, Z9 // 6232354caccc + VFNMADD213PS Z9, Z9, K4, Z9 // 6252354cacc9 + VFNMADD213PS (CX), Z9, K4, Z9 // 6272354cac09 + VFNMADD213PS 99(R15), Z9, K4, Z9 // 6252354cac8f63000000 + VFNMADD213SD X3, X31, K7, X5 // 62f28507adeb + VFNMADD213SD X11, X1, K2, X21 // 62c2f50aadeb or 62c2f52aadeb or 62c2f54aadeb + VFNMADD213SD 7(SI)(DI*1), X1, K2, X21 // 62e2f50aadac3e07000000 or 62e2f52aadac3e07000000 or 62e2f54aadac3e07000000 + VFNMADD213SD 15(DX)(BX*8), X1, K2, X21 // 62e2f50aadacda0f000000 or 62e2f52aadacda0f000000 or 62e2f54aadacda0f000000 + VFNMADD213SS X30, X0, K5, X13 // 62127d0dadee + VFNMADD213SS X11, X14, K3, X16 // 62c20d0badc3 or 62c20d2badc3 or 62c20d4badc3 + VFNMADD213SS 17(SP), X14, K3, X16 // 62e20d0bad842411000000 or 62e20d2bad842411000000 or 62e20d4bad842411000000 + VFNMADD213SS -17(BP)(SI*4), X14, K3, X16 // 62e20d0bad84b5efffffff or 62e20d2bad84b5efffffff or 62e20d4bad84b5efffffff + VFNMADD231PD X8, X19, K4, X14 // 6252e504bcf0 + VFNMADD231PD 99(R15)(R15*8), X19, K4, X14 // 6212e504bcb4ff63000000 + VFNMADD231PD 7(AX)(CX*8), X19, K4, X14 // 6272e504bcb4c807000000 + VFNMADD231PD Y14, Y19, K2, Y23 // 62c2e522bcfe + VFNMADD231PD 7(AX)(CX*4), Y19, K2, Y23 // 62e2e522bcbc8807000000 + VFNMADD231PD 7(AX)(CX*1), Y19, K2, Y23 // 62e2e522bcbc0807000000 + VFNMADD231PD Z7, Z26, K2, Z30 // 6262ad42bcf7 + VFNMADD231PD Z21, Z26, K2, Z30 // 6222ad42bcf5 + VFNMADD231PD Z7, Z22, K2, Z30 // 6262cd42bcf7 + VFNMADD231PD Z21, Z22, K2, Z30 // 6222cd42bcf5 + VFNMADD231PD Z7, Z26, K2, Z5 // 62f2ad42bcef + VFNMADD231PD Z21, Z26, K2, Z5 // 62b2ad42bced + VFNMADD231PD Z7, Z22, K2, Z5 // 62f2cd42bcef + VFNMADD231PD Z21, Z22, K2, Z5 // 62b2cd42bced + VFNMADD231PD Z12, Z14, K3, Z16 // 62c28d4bbcc4 + VFNMADD231PD Z13, Z14, K3, Z16 // 62c28d4bbcc5 + VFNMADD231PD 99(R15)(R15*2), Z14, K3, Z16 // 62828d4bbc847f63000000 + VFNMADD231PD -7(DI), Z14, K3, Z16 // 62e28d4bbc87f9ffffff + VFNMADD231PD Z12, Z13, K3, Z16 // 62c2954bbcc4 + VFNMADD231PD Z13, Z13, K3, Z16 // 62c2954bbcc5 + VFNMADD231PD 99(R15)(R15*2), Z13, K3, Z16 // 6282954bbc847f63000000 + VFNMADD231PD -7(DI), Z13, K3, Z16 // 62e2954bbc87f9ffffff + VFNMADD231PD Z12, Z14, K3, Z25 // 62428d4bbccc + VFNMADD231PD Z13, Z14, K3, Z25 // 62428d4bbccd + VFNMADD231PD 99(R15)(R15*2), Z14, K3, Z25 // 62028d4bbc8c7f63000000 + VFNMADD231PD -7(DI), Z14, K3, Z25 // 62628d4bbc8ff9ffffff + VFNMADD231PD Z12, Z13, K3, Z25 // 6242954bbccc + VFNMADD231PD Z13, Z13, K3, Z25 // 6242954bbccd + VFNMADD231PD 99(R15)(R15*2), Z13, K3, Z25 // 6202954bbc8c7f63000000 + VFNMADD231PD -7(DI), Z13, K3, Z25 // 6262954bbc8ff9ffffff + VFNMADD231PS X23, X26, K3, X8 // 62322d03bcc7 + VFNMADD231PS (AX), X26, K3, X8 // 62722d03bc00 + VFNMADD231PS 7(SI), X26, K3, X8 // 62722d03bc8607000000 + VFNMADD231PS Y16, Y5, K3, Y21 // 62a2552bbce8 + VFNMADD231PS (SI), Y5, K3, Y21 // 62e2552bbc2e + VFNMADD231PS 7(SI)(DI*2), Y5, K3, Y21 // 62e2552bbcac7e07000000 + VFNMADD231PS Z27, Z2, K2, Z21 // 62826d4abceb + VFNMADD231PS Z25, Z2, K2, Z21 // 62826d4abce9 + VFNMADD231PS Z27, Z7, K2, Z21 // 6282454abceb + VFNMADD231PS Z25, Z7, K2, Z21 // 6282454abce9 + VFNMADD231PS Z27, Z2, K2, Z9 // 62126d4abccb + VFNMADD231PS Z25, Z2, K2, Z9 // 62126d4abcc9 + VFNMADD231PS Z27, Z7, K2, Z9 // 6212454abccb + VFNMADD231PS Z25, Z7, K2, Z9 // 6212454abcc9 + VFNMADD231PS Z3, Z27, K1, Z23 // 62e22541bcfb + VFNMADD231PS Z0, Z27, K1, Z23 // 62e22541bcf8 + VFNMADD231PS -7(CX)(DX*1), Z27, K1, Z23 // 62e22541bcbc11f9ffffff + VFNMADD231PS -15(R14)(R15*4), Z27, K1, Z23 // 62822541bcbcbef1ffffff + VFNMADD231PS Z3, Z14, K1, Z23 // 62e20d49bcfb + VFNMADD231PS Z0, Z14, K1, Z23 // 62e20d49bcf8 + VFNMADD231PS -7(CX)(DX*1), Z14, K1, Z23 // 62e20d49bcbc11f9ffffff + VFNMADD231PS -15(R14)(R15*4), Z14, K1, Z23 // 62820d49bcbcbef1ffffff + VFNMADD231PS Z3, Z27, K1, Z9 // 62722541bccb + VFNMADD231PS Z0, Z27, K1, Z9 // 62722541bcc8 + VFNMADD231PS -7(CX)(DX*1), Z27, K1, Z9 // 62722541bc8c11f9ffffff + VFNMADD231PS -15(R14)(R15*4), Z27, K1, Z9 // 62122541bc8cbef1ffffff + VFNMADD231PS Z3, Z14, K1, Z9 // 62720d49bccb + VFNMADD231PS Z0, Z14, K1, Z9 // 62720d49bcc8 + VFNMADD231PS -7(CX)(DX*1), Z14, K1, Z9 // 62720d49bc8c11f9ffffff + VFNMADD231PS -15(R14)(R15*4), Z14, K1, Z9 // 62120d49bc8cbef1ffffff + VFNMADD231SD X23, X16, K2, X12 // 6232fd02bde7 + VFNMADD231SD X31, X11, K1, X23 // 6282a509bdff or 6282a529bdff or 6282a549bdff + VFNMADD231SD -7(DI)(R8*1), X11, K1, X23 // 62a2a509bdbc07f9ffffff or 62a2a529bdbc07f9ffffff or 62a2a549bdbc07f9ffffff + VFNMADD231SD (SP), X11, K1, X23 // 62e2a509bd3c24 or 62e2a529bd3c24 or 62e2a549bd3c24 + VFNMADD231SS X0, X14, K7, X24 // 62620d0fbdc0 + VFNMADD231SS X2, X23, K1, X11 // 62724501bdda or 62724521bdda or 62724541bdda + VFNMADD231SS 7(AX), X23, K1, X11 // 62724501bd9807000000 or 62724521bd9807000000 or 62724541bd9807000000 + VFNMADD231SS (DI), X23, K1, X11 // 62724501bd1f or 62724521bd1f or 62724541bd1f + VFNMSUB132PD X25, X5, K1, X20 // 6282d5099ee1 + VFNMSUB132PD (BX), X5, K1, X20 // 62e2d5099e23 + VFNMSUB132PD -17(BP)(SI*1), X5, K1, X20 // 62e2d5099ea435efffffff + VFNMSUB132PD Y20, Y21, K1, Y2 // 62b2d5219ed4 + VFNMSUB132PD 17(SP)(BP*8), Y21, K1, Y2 // 62f2d5219e94ec11000000 + VFNMSUB132PD 17(SP)(BP*4), Y21, K1, Y2 // 62f2d5219e94ac11000000 + VFNMSUB132PD Z22, Z8, K7, Z14 // 6232bd4f9ef6 + VFNMSUB132PD Z25, Z8, K7, Z14 // 6212bd4f9ef1 + VFNMSUB132PD Z22, Z24, K7, Z14 // 6232bd479ef6 + VFNMSUB132PD Z25, Z24, K7, Z14 // 6212bd479ef1 + VFNMSUB132PD Z22, Z8, K7, Z7 // 62b2bd4f9efe + VFNMSUB132PD Z25, Z8, K7, Z7 // 6292bd4f9ef9 + VFNMSUB132PD Z22, Z24, K7, Z7 // 62b2bd479efe + VFNMSUB132PD Z25, Z24, K7, Z7 // 6292bd479ef9 + VFNMSUB132PD Z0, Z6, K2, Z1 // 62f2cd4a9ec8 + VFNMSUB132PD Z8, Z6, K2, Z1 // 62d2cd4a9ec8 + VFNMSUB132PD 15(DX)(BX*1), Z6, K2, Z1 // 62f2cd4a9e8c1a0f000000 + VFNMSUB132PD -7(CX)(DX*2), Z6, K2, Z1 // 62f2cd4a9e8c51f9ffffff + VFNMSUB132PD Z0, Z2, K2, Z1 // 62f2ed4a9ec8 + VFNMSUB132PD Z8, Z2, K2, Z1 // 62d2ed4a9ec8 + VFNMSUB132PD 15(DX)(BX*1), Z2, K2, Z1 // 62f2ed4a9e8c1a0f000000 + VFNMSUB132PD -7(CX)(DX*2), Z2, K2, Z1 // 62f2ed4a9e8c51f9ffffff + VFNMSUB132PD Z0, Z6, K2, Z16 // 62e2cd4a9ec0 + VFNMSUB132PD Z8, Z6, K2, Z16 // 62c2cd4a9ec0 + VFNMSUB132PD 15(DX)(BX*1), Z6, K2, Z16 // 62e2cd4a9e841a0f000000 + VFNMSUB132PD -7(CX)(DX*2), Z6, K2, Z16 // 62e2cd4a9e8451f9ffffff + VFNMSUB132PD Z0, Z2, K2, Z16 // 62e2ed4a9ec0 + VFNMSUB132PD Z8, Z2, K2, Z16 // 62c2ed4a9ec0 + VFNMSUB132PD 15(DX)(BX*1), Z2, K2, Z16 // 62e2ed4a9e841a0f000000 + VFNMSUB132PD -7(CX)(DX*2), Z2, K2, Z16 // 62e2ed4a9e8451f9ffffff + VFNMSUB132PS X13, X9, K4, X0 // 62d2350c9ec5 + VFNMSUB132PS 15(R8)(R14*4), X9, K4, X0 // 6292350c9e84b00f000000 + VFNMSUB132PS -7(CX)(DX*4), X9, K4, X0 // 62f2350c9e8491f9ffffff + VFNMSUB132PS Y6, Y31, K1, Y6 // 62f205219ef6 + VFNMSUB132PS 7(SI)(DI*4), Y31, K1, Y6 // 62f205219eb4be07000000 + VFNMSUB132PS -7(DI)(R8*2), Y31, K1, Y6 // 62b205219eb447f9ffffff + VFNMSUB132PS Z11, Z14, K3, Z15 // 62520d4b9efb + VFNMSUB132PS Z5, Z14, K3, Z15 // 62720d4b9efd + VFNMSUB132PS Z11, Z27, K3, Z15 // 625225439efb + VFNMSUB132PS Z5, Z27, K3, Z15 // 627225439efd + VFNMSUB132PS Z11, Z14, K3, Z12 // 62520d4b9ee3 + VFNMSUB132PS Z5, Z14, K3, Z12 // 62720d4b9ee5 + VFNMSUB132PS Z11, Z27, K3, Z12 // 625225439ee3 + VFNMSUB132PS Z5, Z27, K3, Z12 // 627225439ee5 + VFNMSUB132PS Z2, Z5, K4, Z13 // 6272554c9eea + VFNMSUB132PS -17(BP), Z5, K4, Z13 // 6272554c9eadefffffff + VFNMSUB132PS -15(R14)(R15*8), Z5, K4, Z13 // 6212554c9eacfef1ffffff + VFNMSUB132PS Z2, Z23, K4, Z13 // 627245449eea + VFNMSUB132PS -17(BP), Z23, K4, Z13 // 627245449eadefffffff + VFNMSUB132PS -15(R14)(R15*8), Z23, K4, Z13 // 621245449eacfef1ffffff + VFNMSUB132PS Z2, Z5, K4, Z14 // 6272554c9ef2 + VFNMSUB132PS -17(BP), Z5, K4, Z14 // 6272554c9eb5efffffff + VFNMSUB132PS -15(R14)(R15*8), Z5, K4, Z14 // 6212554c9eb4fef1ffffff + VFNMSUB132PS Z2, Z23, K4, Z14 // 627245449ef2 + VFNMSUB132PS -17(BP), Z23, K4, Z14 // 627245449eb5efffffff + VFNMSUB132PS -15(R14)(R15*8), Z23, K4, Z14 // 621245449eb4fef1ffffff + VFNMSUB132SD X9, X8, K5, X2 // 62d2bd0d9fd1 + VFNMSUB132SD X11, X31, K7, X2 // 62d285079fd3 or 62d285279fd3 or 62d285479fd3 + VFNMSUB132SD -7(CX), X31, K7, X2 // 62f285079f91f9ffffff or 62f285279f91f9ffffff or 62f285479f91f9ffffff + VFNMSUB132SD 15(DX)(BX*4), X31, K7, X2 // 62f285079f949a0f000000 or 62f285279f949a0f000000 or 62f285479f949a0f000000 + VFNMSUB132SS X14, X5, K7, X22 // 62c2550f9ff6 + VFNMSUB132SS X7, X17, K6, X0 // 62f275069fc7 or 62f275269fc7 or 62f275469fc7 + VFNMSUB132SS 99(R15)(R15*1), X17, K6, X0 // 629275069f843f63000000 or 629275269f843f63000000 or 629275469f843f63000000 + VFNMSUB132SS (DX), X17, K6, X0 // 62f275069f02 or 62f275269f02 or 62f275469f02 + VFNMSUB213PD X0, X11, K3, X15 // 6272a50baef8 + VFNMSUB213PD (R8), X11, K3, X15 // 6252a50bae38 + VFNMSUB213PD 15(DX)(BX*2), X11, K3, X15 // 6272a50baebc5a0f000000 + VFNMSUB213PD Y7, Y19, K7, Y11 // 6272e527aedf + VFNMSUB213PD 17(SP), Y19, K7, Y11 // 6272e527ae9c2411000000 + VFNMSUB213PD -17(BP)(SI*4), Y19, K7, Y11 // 6272e527ae9cb5efffffff + VFNMSUB213PD Z28, Z26, K4, Z6 // 6292ad44aef4 + VFNMSUB213PD Z6, Z26, K4, Z6 // 62f2ad44aef6 + VFNMSUB213PD Z28, Z14, K4, Z6 // 62928d4caef4 + VFNMSUB213PD Z6, Z14, K4, Z6 // 62f28d4caef6 + VFNMSUB213PD Z28, Z26, K4, Z14 // 6212ad44aef4 + VFNMSUB213PD Z6, Z26, K4, Z14 // 6272ad44aef6 + VFNMSUB213PD Z28, Z14, K4, Z14 // 62128d4caef4 + VFNMSUB213PD Z6, Z14, K4, Z14 // 62728d4caef6 + VFNMSUB213PD Z3, Z26, K4, Z13 // 6272ad44aeeb + VFNMSUB213PD Z0, Z26, K4, Z13 // 6272ad44aee8 + VFNMSUB213PD 17(SP)(BP*2), Z26, K4, Z13 // 6272ad44aeac6c11000000 + VFNMSUB213PD -7(DI)(R8*4), Z26, K4, Z13 // 6232ad44aeac87f9ffffff + VFNMSUB213PD Z3, Z3, K4, Z13 // 6272e54caeeb + VFNMSUB213PD Z0, Z3, K4, Z13 // 6272e54caee8 + VFNMSUB213PD 17(SP)(BP*2), Z3, K4, Z13 // 6272e54caeac6c11000000 + VFNMSUB213PD -7(DI)(R8*4), Z3, K4, Z13 // 6232e54caeac87f9ffffff + VFNMSUB213PD Z3, Z26, K4, Z21 // 62e2ad44aeeb + VFNMSUB213PD Z0, Z26, K4, Z21 // 62e2ad44aee8 + VFNMSUB213PD 17(SP)(BP*2), Z26, K4, Z21 // 62e2ad44aeac6c11000000 + VFNMSUB213PD -7(DI)(R8*4), Z26, K4, Z21 // 62a2ad44aeac87f9ffffff + VFNMSUB213PD Z3, Z3, K4, Z21 // 62e2e54caeeb + VFNMSUB213PD Z0, Z3, K4, Z21 // 62e2e54caee8 + VFNMSUB213PD 17(SP)(BP*2), Z3, K4, Z21 // 62e2e54caeac6c11000000 + VFNMSUB213PD -7(DI)(R8*4), Z3, K4, Z21 // 62a2e54caeac87f9ffffff + VFNMSUB213PS X27, X8, K7, X18 // 62823d0faed3 + VFNMSUB213PS 17(SP)(BP*1), X8, K7, X18 // 62e23d0fae942c11000000 + VFNMSUB213PS -7(CX)(DX*8), X8, K7, X18 // 62e23d0fae94d1f9ffffff + VFNMSUB213PS Y3, Y0, K2, Y6 // 62f27d2aaef3 + VFNMSUB213PS 7(AX), Y0, K2, Y6 // 62f27d2aaeb007000000 + VFNMSUB213PS (DI), Y0, K2, Y6 // 62f27d2aae37 + VFNMSUB213PS Z3, Z11, K5, Z21 // 62e2254daeeb + VFNMSUB213PS Z12, Z11, K5, Z21 // 62c2254daeec + VFNMSUB213PS Z3, Z25, K5, Z21 // 62e23545aeeb + VFNMSUB213PS Z12, Z25, K5, Z21 // 62c23545aeec + VFNMSUB213PS Z3, Z11, K5, Z13 // 6272254daeeb + VFNMSUB213PS Z12, Z11, K5, Z13 // 6252254daeec + VFNMSUB213PS Z3, Z25, K5, Z13 // 62723545aeeb + VFNMSUB213PS Z12, Z25, K5, Z13 // 62523545aeec + VFNMSUB213PS Z23, Z23, K3, Z27 // 62224543aedf + VFNMSUB213PS Z6, Z23, K3, Z27 // 62624543aede + VFNMSUB213PS 15(R8), Z23, K3, Z27 // 62424543ae980f000000 + VFNMSUB213PS (BP), Z23, K3, Z27 // 62624543ae5d00 + VFNMSUB213PS Z23, Z5, K3, Z27 // 6222554baedf + VFNMSUB213PS Z6, Z5, K3, Z27 // 6262554baede + VFNMSUB213PS 15(R8), Z5, K3, Z27 // 6242554bae980f000000 + VFNMSUB213PS (BP), Z5, K3, Z27 // 6262554bae5d00 + VFNMSUB213PS Z23, Z23, K3, Z15 // 62324543aeff + VFNMSUB213PS Z6, Z23, K3, Z15 // 62724543aefe + VFNMSUB213PS 15(R8), Z23, K3, Z15 // 62524543aeb80f000000 + VFNMSUB213PS (BP), Z23, K3, Z15 // 62724543ae7d00 + VFNMSUB213PS Z23, Z5, K3, Z15 // 6232554baeff + VFNMSUB213PS Z6, Z5, K3, Z15 // 6272554baefe + VFNMSUB213PS 15(R8), Z5, K3, Z15 // 6252554baeb80f000000 + VFNMSUB213PS (BP), Z5, K3, Z15 // 6272554bae7d00 + VFNMSUB213SD X18, X3, K4, X25 // 6222e50cafca + VFNMSUB213SD X15, X28, K2, X15 // 62529d02afff or 62529d22afff or 62529d42afff + VFNMSUB213SD 99(R15)(R15*8), X28, K2, X15 // 62129d02afbcff63000000 or 62129d22afbcff63000000 or 62129d42afbcff63000000 + VFNMSUB213SD 7(AX)(CX*8), X28, K2, X15 // 62729d02afbcc807000000 or 62729d22afbcc807000000 or 62729d42afbcc807000000 + VFNMSUB213SS X8, X13, K2, X7 // 62d2150aaff8 + VFNMSUB213SS X0, X7, K3, X24 // 6262450bafc0 or 6262452bafc0 or 6262454bafc0 + VFNMSUB213SS -17(BP)(SI*8), X7, K3, X24 // 6262450baf84f5efffffff or 6262452baf84f5efffffff or 6262454baf84f5efffffff + VFNMSUB213SS (R15), X7, K3, X24 // 6242450baf07 or 6242452baf07 or 6242454baf07 + VFNMSUB231PD X11, X1, K3, X22 // 62c2f50bbef3 + VFNMSUB231PD -17(BP)(SI*2), X1, K3, X22 // 62e2f50bbeb475efffffff + VFNMSUB231PD 7(AX)(CX*2), X1, K3, X22 // 62e2f50bbeb44807000000 + VFNMSUB231PD Y12, Y20, K3, Y5 // 62d2dd23beec + VFNMSUB231PD 99(R15)(R15*1), Y20, K3, Y5 // 6292dd23beac3f63000000 + VFNMSUB231PD (DX), Y20, K3, Y5 // 62f2dd23be2a + VFNMSUB231PD Z16, Z21, K2, Z8 // 6232d542bec0 + VFNMSUB231PD Z13, Z21, K2, Z8 // 6252d542bec5 + VFNMSUB231PD Z16, Z5, K2, Z8 // 6232d54abec0 + VFNMSUB231PD Z13, Z5, K2, Z8 // 6252d54abec5 + VFNMSUB231PD Z16, Z21, K2, Z28 // 6222d542bee0 + VFNMSUB231PD Z13, Z21, K2, Z28 // 6242d542bee5 + VFNMSUB231PD Z16, Z5, K2, Z28 // 6222d54abee0 + VFNMSUB231PD Z13, Z5, K2, Z28 // 6242d54abee5 + VFNMSUB231PD Z6, Z22, K1, Z12 // 6272cd41bee6 + VFNMSUB231PD Z8, Z22, K1, Z12 // 6252cd41bee0 + VFNMSUB231PD 15(R8)(R14*8), Z22, K1, Z12 // 6212cd41bea4f00f000000 + VFNMSUB231PD -15(R14)(R15*2), Z22, K1, Z12 // 6212cd41bea47ef1ffffff + VFNMSUB231PD Z6, Z11, K1, Z12 // 6272a549bee6 + VFNMSUB231PD Z8, Z11, K1, Z12 // 6252a549bee0 + VFNMSUB231PD 15(R8)(R14*8), Z11, K1, Z12 // 6212a549bea4f00f000000 + VFNMSUB231PD -15(R14)(R15*2), Z11, K1, Z12 // 6212a549bea47ef1ffffff + VFNMSUB231PD Z6, Z22, K1, Z27 // 6262cd41bede + VFNMSUB231PD Z8, Z22, K1, Z27 // 6242cd41bed8 + VFNMSUB231PD 15(R8)(R14*8), Z22, K1, Z27 // 6202cd41be9cf00f000000 + VFNMSUB231PD -15(R14)(R15*2), Z22, K1, Z27 // 6202cd41be9c7ef1ffffff + VFNMSUB231PD Z6, Z11, K1, Z27 // 6262a549bede + VFNMSUB231PD Z8, Z11, K1, Z27 // 6242a549bed8 + VFNMSUB231PD 15(R8)(R14*8), Z11, K1, Z27 // 6202a549be9cf00f000000 + VFNMSUB231PD -15(R14)(R15*2), Z11, K1, Z27 // 6202a549be9c7ef1ffffff + VFNMSUB231PS X8, X7, K2, X6 // 62d2450abef0 + VFNMSUB231PS 15(R8)(R14*1), X7, K2, X6 // 6292450abeb4300f000000 + VFNMSUB231PS 15(R8)(R14*2), X7, K2, X6 // 6292450abeb4700f000000 + VFNMSUB231PS Y28, Y5, K1, Y3 // 62925529bedc + VFNMSUB231PS -17(BP)(SI*8), Y5, K1, Y3 // 62f25529be9cf5efffffff + VFNMSUB231PS (R15), Y5, K1, Y3 // 62d25529be1f + VFNMSUB231PS Z9, Z12, K7, Z25 // 62421d4fbec9 + VFNMSUB231PS Z12, Z12, K7, Z25 // 62421d4fbecc + VFNMSUB231PS Z9, Z17, K7, Z25 // 62427547bec9 + VFNMSUB231PS Z12, Z17, K7, Z25 // 62427547becc + VFNMSUB231PS Z9, Z12, K7, Z12 // 62521d4fbee1 + VFNMSUB231PS Z12, Z12, K7, Z12 // 62521d4fbee4 + VFNMSUB231PS Z9, Z17, K7, Z12 // 62527547bee1 + VFNMSUB231PS Z12, Z17, K7, Z12 // 62527547bee4 + VFNMSUB231PS Z8, Z3, K1, Z6 // 62d26549bef0 + VFNMSUB231PS Z2, Z3, K1, Z6 // 62f26549bef2 + VFNMSUB231PS -15(R14)(R15*1), Z3, K1, Z6 // 62926549beb43ef1ffffff + VFNMSUB231PS -15(BX), Z3, K1, Z6 // 62f26549beb3f1ffffff + VFNMSUB231PS Z8, Z21, K1, Z6 // 62d25541bef0 + VFNMSUB231PS Z2, Z21, K1, Z6 // 62f25541bef2 + VFNMSUB231PS -15(R14)(R15*1), Z21, K1, Z6 // 62925541beb43ef1ffffff + VFNMSUB231PS -15(BX), Z21, K1, Z6 // 62f25541beb3f1ffffff + VFNMSUB231PS Z8, Z3, K1, Z25 // 62426549bec8 + VFNMSUB231PS Z2, Z3, K1, Z25 // 62626549beca + VFNMSUB231PS -15(R14)(R15*1), Z3, K1, Z25 // 62026549be8c3ef1ffffff + VFNMSUB231PS -15(BX), Z3, K1, Z25 // 62626549be8bf1ffffff + VFNMSUB231PS Z8, Z21, K1, Z25 // 62425541bec8 + VFNMSUB231PS Z2, Z21, K1, Z25 // 62625541beca + VFNMSUB231PS -15(R14)(R15*1), Z21, K1, Z25 // 62025541be8c3ef1ffffff + VFNMSUB231PS -15(BX), Z21, K1, Z25 // 62625541be8bf1ffffff + VFNMSUB231SD X28, X3, K1, X31 // 6202e509bffc + VFNMSUB231SD X7, X24, K1, X20 // 62e2bd01bfe7 or 62e2bd21bfe7 or 62e2bd41bfe7 + VFNMSUB231SD (AX), X24, K1, X20 // 62e2bd01bf20 or 62e2bd21bf20 or 62e2bd41bf20 + VFNMSUB231SD 7(SI), X24, K1, X20 // 62e2bd01bfa607000000 or 62e2bd21bfa607000000 or 62e2bd41bfa607000000 + VFNMSUB231SS X12, X16, K7, X20 // 62c27d07bfe4 + VFNMSUB231SS X28, X17, K2, X6 // 62927502bff4 or 62927522bff4 or 62927542bff4 + VFNMSUB231SS 7(SI)(DI*8), X17, K2, X6 // 62f27502bfb4fe07000000 or 62f27522bfb4fe07000000 or 62f27542bfb4fe07000000 + VFNMSUB231SS -15(R14), X17, K2, X6 // 62d27502bfb6f1ffffff or 62d27522bfb6f1ffffff or 62d27542bfb6f1ffffff + VGATHERDPD (AX)(X4*1), K3, X6 // 62f2fd0b923420 + VGATHERDPD (BP)(X10*2), K3, X6 // 62b2fd0b92745500 + VGATHERDPD (R10)(X29*8), K3, X6 // 6292fd039234ea + VGATHERDPD (DX)(X10*4), K7, Y22 // 62a2fd2f923492 + VGATHERDPD (SP)(X4*2), K7, Y22 // 62e2fd2f923464 + VGATHERDPD (R14)(X29*8), K7, Y22 // 6282fd279234ee + VGATHERDPD (R10)(Y29*8), K4, Z0 // 6292fd449204ea + VGATHERDPD (SP)(Y4*2), K4, Z0 // 62f2fd4c920464 + VGATHERDPD (DX)(Y10*4), K4, Z0 // 62b2fd4c920492 + VGATHERDPD (R10)(Y29*8), K4, Z6 // 6292fd449234ea + VGATHERDPD (SP)(Y4*2), K4, Z6 // 62f2fd4c923464 + VGATHERDPD (DX)(Y10*4), K4, Z6 // 62b2fd4c923492 + VGATHERDPS (AX)(X4*1), K4, X0 // 62f27d0c920420 + VGATHERDPS (BP)(X10*2), K4, X0 // 62b27d0c92445500 + VGATHERDPS (R10)(X29*8), K4, X0 // 62927d049204ea + VGATHERDPS (R14)(Y29*8), K7, Y13 // 62127d27922cee + VGATHERDPS (AX)(Y4*1), K7, Y13 // 62727d2f922c20 + VGATHERDPS (BP)(Y10*2), K7, Y13 // 62327d2f926c5500 + VGATHERDPS (DX)(Z10*4), K2, Z20 // 62a27d4a922492 + VGATHERDPS (AX)(Z4*1), K2, Z20 // 62e27d4a922420 + VGATHERDPS (SP)(Z4*2), K2, Z20 // 62e27d4a922464 + VGATHERDPS (DX)(Z10*4), K2, Z28 // 62227d4a922492 + VGATHERDPS (AX)(Z4*1), K2, Z28 // 62627d4a922420 + VGATHERDPS (SP)(Z4*2), K2, Z28 // 62627d4a922464 + VGATHERQPD (AX)(X4*1), K2, X11 // 6272fd0a931c20 + VGATHERQPD (BP)(X10*2), K2, X11 // 6232fd0a935c5500 + VGATHERQPD (R10)(X29*8), K2, X11 // 6212fd02931cea + VGATHERQPD (R10)(Y29*8), K1, Y12 // 6212fd219324ea + VGATHERQPD (SP)(Y4*2), K1, Y12 // 6272fd29932464 + VGATHERQPD (DX)(Y10*4), K1, Y12 // 6232fd29932492 + VGATHERQPD (DX)(Z10*4), K2, Z3 // 62b2fd4a931c92 + VGATHERQPD (AX)(Z4*1), K2, Z3 // 62f2fd4a931c20 + VGATHERQPD (SP)(Z4*2), K2, Z3 // 62f2fd4a931c64 + VGATHERQPD (DX)(Z10*4), K2, Z30 // 6222fd4a933492 + VGATHERQPD (AX)(Z4*1), K2, Z30 // 6262fd4a933420 + VGATHERQPD (SP)(Z4*2), K2, Z30 // 6262fd4a933464 + VGATHERQPS (DX)(X10*4), K1, X16 // 62a27d09930492 + VGATHERQPS (SP)(X4*2), K1, X16 // 62e27d09930464 + VGATHERQPS (R14)(X29*8), K1, X16 // 62827d019304ee + VGATHERQPS (R14)(Y29*8), K7, X6 // 62927d279334ee + VGATHERQPS (AX)(Y4*1), K7, X6 // 62f27d2f933420 + VGATHERQPS (BP)(Y10*2), K7, X6 // 62b27d2f93745500 + VGATHERQPS (BP)(Z10*2), K1, Y1 // 62b27d49934c5500 + VGATHERQPS (R10)(Z29*8), K1, Y1 // 62927d41930cea + VGATHERQPS (R14)(Z29*8), K1, Y1 // 62927d41930cee + VGETEXPPD X22, K1, X6 // 62b2fd0942f6 + VGETEXPPD (CX), K1, X6 // 62f2fd094231 + VGETEXPPD 99(R15), K1, X6 // 62d2fd0942b763000000 + VGETEXPPD Y17, K1, Y14 // 6232fd2942f1 + VGETEXPPD -7(DI)(R8*1), K1, Y14 // 6232fd2942b407f9ffffff + VGETEXPPD (SP), K1, Y14 // 6272fd29423424 + VGETEXPPD Z12, K7, Z9 // 6252fd4f42cc + VGETEXPPD Z22, K7, Z9 // 6232fd4f42ce + VGETEXPPD Z12, K7, Z19 // 62c2fd4f42dc + VGETEXPPD Z22, K7, Z19 // 62a2fd4f42de + VGETEXPPD Z18, K2, Z11 // 6232fd4a42da + VGETEXPPD Z24, K2, Z11 // 6212fd4a42d8 + VGETEXPPD 17(SP)(BP*8), K2, Z11 // 6272fd4a429cec11000000 + VGETEXPPD 17(SP)(BP*4), K2, Z11 // 6272fd4a429cac11000000 + VGETEXPPD Z18, K2, Z5 // 62b2fd4a42ea + VGETEXPPD Z24, K2, Z5 // 6292fd4a42e8 + VGETEXPPD 17(SP)(BP*8), K2, Z5 // 62f2fd4a42acec11000000 + VGETEXPPD 17(SP)(BP*4), K2, Z5 // 62f2fd4a42acac11000000 + VGETEXPPS X16, K4, X12 // 62327d0c42e0 + VGETEXPPS 99(R15)(R15*2), K4, X12 // 62127d0c42a47f63000000 + VGETEXPPS -7(DI), K4, X12 // 62727d0c42a7f9ffffff + VGETEXPPS Y9, K1, Y7 // 62d27d2942f9 + VGETEXPPS -7(CX), K1, Y7 // 62f27d2942b9f9ffffff + VGETEXPPS 15(DX)(BX*4), K1, Y7 // 62f27d2942bc9a0f000000 + VGETEXPPS Z7, K3, Z2 // 62f27d4b42d7 + VGETEXPPS Z13, K3, Z2 // 62d27d4b42d5 + VGETEXPPS Z7, K3, Z21 // 62e27d4b42ef + VGETEXPPS Z13, K3, Z21 // 62c27d4b42ed + VGETEXPPS Z6, K4, Z6 // 62f27d4c42f6 + VGETEXPPS Z22, K4, Z6 // 62b27d4c42f6 + VGETEXPPS 7(SI)(DI*4), K4, Z6 // 62f27d4c42b4be07000000 + VGETEXPPS -7(DI)(R8*2), K4, Z6 // 62b27d4c42b447f9ffffff + VGETEXPPS Z6, K4, Z16 // 62e27d4c42c6 + VGETEXPPS Z22, K4, Z16 // 62a27d4c42c6 + VGETEXPPS 7(SI)(DI*4), K4, Z16 // 62e27d4c4284be07000000 + VGETEXPPS -7(DI)(R8*2), K4, Z16 // 62a27d4c428447f9ffffff + VGETEXPSD X15, X8, K5, X28 // 6242bd0d43e7 + VGETEXPSD X19, X1, K7, X11 // 6232f50f43db or 6232f52f43db or 6232f54f43db + VGETEXPSD 15(R8)(R14*4), X1, K7, X11 // 6212f50f439cb00f000000 or 6212f52f439cb00f000000 or 6212f54f439cb00f000000 + VGETEXPSD -7(CX)(DX*4), X1, K7, X11 // 6272f50f439c91f9ffffff or 6272f52f439c91f9ffffff or 6272f54f439c91f9ffffff + VGETEXPSS X14, X2, K7, X13 // 62526d0f43ee + VGETEXPSS X25, X0, K6, X0 // 62927d0e43c1 or 62927d2e43c1 or 62927d4e43c1 + VGETEXPSS -7(DI)(R8*1), X0, K6, X0 // 62b27d0e438407f9ffffff or 62b27d2e438407f9ffffff or 62b27d4e438407f9ffffff + VGETEXPSS (SP), X0, K6, X0 // 62f27d0e430424 or 62f27d2e430424 or 62f27d4e430424 + VGETMANTPD $15, X17, K3, X11 // 6233fd0b26d90f + VGETMANTPD $15, -7(CX)(DX*1), K3, X11 // 6273fd0b269c11f9ffffff0f + VGETMANTPD $15, -15(R14)(R15*4), K3, X11 // 6213fd0b269cbef1ffffff0f + VGETMANTPD $0, Y8, K7, Y31 // 6243fd2f26f800 + VGETMANTPD $0, 99(R15)(R15*8), K7, Y31 // 6203fd2f26bcff6300000000 + VGETMANTPD $0, 7(AX)(CX*8), K7, Y31 // 6263fd2f26bcc80700000000 + VGETMANTPD $1, Z13, K4, Z1 // 62d3fd4c26cd01 + VGETMANTPD $1, Z13, K4, Z15 // 6253fd4c26fd01 + VGETMANTPD $2, Z22, K4, Z18 // 62a3fd4c26d602 + VGETMANTPD $2, Z7, K4, Z18 // 62e3fd4c26d702 + VGETMANTPD $2, 17(SP), K4, Z18 // 62e3fd4c2694241100000002 + VGETMANTPD $2, -17(BP)(SI*4), K4, Z18 // 62e3fd4c2694b5efffffff02 + VGETMANTPD $2, Z22, K4, Z8 // 6233fd4c26c602 + VGETMANTPD $2, Z7, K4, Z8 // 6273fd4c26c702 + VGETMANTPD $2, 17(SP), K4, Z8 // 6273fd4c2684241100000002 + VGETMANTPD $2, -17(BP)(SI*4), K4, Z8 // 6273fd4c2684b5efffffff02 + VGETMANTPS $3, X11, K7, X18 // 62c37d0f26d303 + VGETMANTPS $3, 15(DX)(BX*1), K7, X18 // 62e37d0f26941a0f00000003 + VGETMANTPS $3, -7(CX)(DX*2), K7, X18 // 62e37d0f269451f9ffffff03 + VGETMANTPS $4, Y28, K2, Y1 // 62937d2a26cc04 + VGETMANTPS $4, (AX), K2, Y1 // 62f37d2a260804 + VGETMANTPS $4, 7(SI), K2, Y1 // 62f37d2a268e0700000004 + VGETMANTPS $5, Z20, K5, Z2 // 62b37d4d26d405 + VGETMANTPS $5, Z9, K5, Z2 // 62d37d4d26d105 + VGETMANTPS $5, Z20, K5, Z31 // 62237d4d26fc05 + VGETMANTPS $5, Z9, K5, Z31 // 62437d4d26f905 + VGETMANTPS $6, Z12, K3, Z1 // 62d37d4b26cc06 + VGETMANTPS $6, Z16, K3, Z1 // 62b37d4b26c806 + VGETMANTPS $6, 7(AX), K3, Z1 // 62f37d4b26880700000006 + VGETMANTPS $6, (DI), K3, Z1 // 62f37d4b260f06 + VGETMANTPS $6, Z12, K3, Z3 // 62d37d4b26dc06 + VGETMANTPS $6, Z16, K3, Z3 // 62b37d4b26d806 + VGETMANTPS $6, 7(AX), K3, Z3 // 62f37d4b26980700000006 + VGETMANTPS $6, (DI), K3, Z3 // 62f37d4b261f06 + VGETMANTSD $7, X24, X2, K4, X9 // 6213ed0c27c807 + VGETMANTSD $8, X27, X2, K2, X2 // 6293ed0a27d308 or 6293ed2a27d308 or 6293ed4a27d308 + VGETMANTSD $8, (R8), X2, K2, X2 // 62d3ed0a271008 or 62d3ed2a271008 or 62d3ed4a271008 + VGETMANTSD $8, 15(DX)(BX*2), X2, K2, X2 // 62f3ed0a27945a0f00000008 or 62f3ed2a27945a0f00000008 or 62f3ed4a27945a0f00000008 + VGETMANTSS $9, X30, X22, K2, X26 // 62034d0227d609 + VGETMANTSS $10, X15, X11, K3, X3 // 62d3250b27df0a or 62d3252b27df0a or 62d3254b27df0a + VGETMANTSS $10, -7(CX), X11, K3, X3 // 62f3250b2799f9ffffff0a or 62f3252b2799f9ffffff0a or 62f3254b2799f9ffffff0a + VGETMANTSS $10, 15(DX)(BX*4), X11, K3, X3 // 62f3250b279c9a0f0000000a or 62f3252b279c9a0f0000000a or 62f3254b279c9a0f0000000a + VINSERTF32X4 $0, X9, Y9, K1, Y2 // 62d3352918d100 + VINSERTF32X4 $0, 15(R8)(R14*8), Y9, K1, Y2 // 629335291894f00f00000000 + VINSERTF32X4 $0, -15(R14)(R15*2), Y9, K1, Y2 // 6293352918947ef1ffffff00 + VINSERTF32X4 $0, X26, Z20, K7, Z16 // 62835d4718c200 + VINSERTF32X4 $0, -15(R14)(R15*1), Z20, K7, Z16 // 62835d4718843ef1ffffff00 + VINSERTF32X4 $0, -15(BX), Z20, K7, Z16 // 62e35d471883f1ffffff00 + VINSERTF32X4 $0, X26, Z0, K7, Z16 // 62837d4f18c200 + VINSERTF32X4 $0, -15(R14)(R15*1), Z0, K7, Z16 // 62837d4f18843ef1ffffff00 + VINSERTF32X4 $0, -15(BX), Z0, K7, Z16 // 62e37d4f1883f1ffffff00 + VINSERTF32X4 $0, X26, Z20, K7, Z9 // 62135d4718ca00 + VINSERTF32X4 $0, -15(R14)(R15*1), Z20, K7, Z9 // 62135d47188c3ef1ffffff00 + VINSERTF32X4 $0, -15(BX), Z20, K7, Z9 // 62735d47188bf1ffffff00 + VINSERTF32X4 $0, X26, Z0, K7, Z9 // 62137d4f18ca00 + VINSERTF32X4 $0, -15(R14)(R15*1), Z0, K7, Z9 // 62137d4f188c3ef1ffffff00 + VINSERTF32X4 $0, -15(BX), Z0, K7, Z9 // 62737d4f188bf1ffffff00 + VINSERTF64X4 $1, Y30, Z9, K3, Z0 // 6293b54b1ac601 + VINSERTF64X4 $1, -17(BP)(SI*2), Z9, K3, Z0 // 62f3b54b1a8475efffffff01 + VINSERTF64X4 $1, 7(AX)(CX*2), Z9, K3, Z0 // 62f3b54b1a84480700000001 + VINSERTF64X4 $1, Y30, Z3, K3, Z0 // 6293e54b1ac601 + VINSERTF64X4 $1, -17(BP)(SI*2), Z3, K3, Z0 // 62f3e54b1a8475efffffff01 + VINSERTF64X4 $1, 7(AX)(CX*2), Z3, K3, Z0 // 62f3e54b1a84480700000001 + VINSERTF64X4 $1, Y30, Z9, K3, Z26 // 6203b54b1ad601 + VINSERTF64X4 $1, -17(BP)(SI*2), Z9, K3, Z26 // 6263b54b1a9475efffffff01 + VINSERTF64X4 $1, 7(AX)(CX*2), Z9, K3, Z26 // 6263b54b1a94480700000001 + VINSERTF64X4 $1, Y30, Z3, K3, Z26 // 6203e54b1ad601 + VINSERTF64X4 $1, -17(BP)(SI*2), Z3, K3, Z26 // 6263e54b1a9475efffffff01 + VINSERTF64X4 $1, 7(AX)(CX*2), Z3, K3, Z26 // 6263e54b1a94480700000001 + VINSERTI32X4 $0, X31, Y7, K4, Y26 // 6203452c38d700 + VINSERTI32X4 $0, 17(SP)(BP*8), Y7, K4, Y26 // 6263452c3894ec1100000000 + VINSERTI32X4 $0, 17(SP)(BP*4), Y7, K4, Y26 // 6263452c3894ac1100000000 + VINSERTI32X4 $2, X16, Z9, K5, Z9 // 6233354d38c802 + VINSERTI32X4 $2, 7(SI)(DI*4), Z9, K5, Z9 // 6273354d388cbe0700000002 + VINSERTI32X4 $2, -7(DI)(R8*2), Z9, K5, Z9 // 6233354d388c47f9ffffff02 + VINSERTI32X4 $2, X16, Z28, K5, Z9 // 62331d4538c802 + VINSERTI32X4 $2, 7(SI)(DI*4), Z28, K5, Z9 // 62731d45388cbe0700000002 + VINSERTI32X4 $2, -7(DI)(R8*2), Z28, K5, Z9 // 62331d45388c47f9ffffff02 + VINSERTI32X4 $2, X16, Z9, K5, Z25 // 6223354d38c802 + VINSERTI32X4 $2, 7(SI)(DI*4), Z9, K5, Z25 // 6263354d388cbe0700000002 + VINSERTI32X4 $2, -7(DI)(R8*2), Z9, K5, Z25 // 6223354d388c47f9ffffff02 + VINSERTI32X4 $2, X16, Z28, K5, Z25 // 62231d4538c802 + VINSERTI32X4 $2, 7(SI)(DI*4), Z28, K5, Z25 // 62631d45388cbe0700000002 + VINSERTI32X4 $2, -7(DI)(R8*2), Z28, K5, Z25 // 62231d45388c47f9ffffff02 + VINSERTI64X4 $1, Y31, Z6, K3, Z21 // 6283cd4b3aef01 + VINSERTI64X4 $1, (R14), Z6, K3, Z21 // 62c3cd4b3a2e01 + VINSERTI64X4 $1, -7(DI)(R8*8), Z6, K3, Z21 // 62a3cd4b3aacc7f9ffffff01 + VINSERTI64X4 $1, Y31, Z9, K3, Z21 // 6283b54b3aef01 + VINSERTI64X4 $1, (R14), Z9, K3, Z21 // 62c3b54b3a2e01 + VINSERTI64X4 $1, -7(DI)(R8*8), Z9, K3, Z21 // 62a3b54b3aacc7f9ffffff01 + VINSERTI64X4 $1, Y31, Z6, K3, Z9 // 6213cd4b3acf01 + VINSERTI64X4 $1, (R14), Z6, K3, Z9 // 6253cd4b3a0e01 + VINSERTI64X4 $1, -7(DI)(R8*8), Z6, K3, Z9 // 6233cd4b3a8cc7f9ffffff01 + VINSERTI64X4 $1, Y31, Z9, K3, Z9 // 6213b54b3acf01 + VINSERTI64X4 $1, (R14), Z9, K3, Z9 // 6253b54b3a0e01 + VINSERTI64X4 $1, -7(DI)(R8*8), Z9, K3, Z9 // 6233b54b3a8cc7f9ffffff01 + VMAXPD X21, X16, K7, X0 // 62b1fd075fc5 + VMAXPD 99(R15)(R15*1), X16, K7, X0 // 6291fd075f843f63000000 + VMAXPD (DX), X16, K7, X0 // 62f1fd075f02 + VMAXPD Y21, Y6, K4, Y22 // 62a1cd2c5ff5 + VMAXPD 99(R15)(R15*4), Y6, K4, Y22 // 6281cd2c5fb4bf63000000 + VMAXPD 15(DX), Y6, K4, Y22 // 62e1cd2c5fb20f000000 + VMAXPD Z30, Z20, K4, Z1 // 6291dd445fce + VMAXPD Z5, Z20, K4, Z1 // 62f1dd445fcd + VMAXPD Z30, Z9, K4, Z1 // 6291b54c5fce + VMAXPD Z5, Z9, K4, Z1 // 62f1b54c5fcd + VMAXPD Z30, Z20, K4, Z9 // 6211dd445fce + VMAXPD Z5, Z20, K4, Z9 // 6271dd445fcd + VMAXPD Z30, Z9, K4, Z9 // 6211b54c5fce + VMAXPD Z5, Z9, K4, Z9 // 6271b54c5fcd + VMAXPD Z16, Z7, K7, Z26 // 6221c54f5fd0 + VMAXPD Z25, Z7, K7, Z26 // 6201c54f5fd1 + VMAXPD 7(SI)(DI*1), Z7, K7, Z26 // 6261c54f5f943e07000000 + VMAXPD 15(DX)(BX*8), Z7, K7, Z26 // 6261c54f5f94da0f000000 + VMAXPD Z16, Z21, K7, Z26 // 6221d5475fd0 + VMAXPD Z25, Z21, K7, Z26 // 6201d5475fd1 + VMAXPD 7(SI)(DI*1), Z21, K7, Z26 // 6261d5475f943e07000000 + VMAXPD 15(DX)(BX*8), Z21, K7, Z26 // 6261d5475f94da0f000000 + VMAXPD Z16, Z7, K7, Z22 // 62a1c54f5ff0 + VMAXPD Z25, Z7, K7, Z22 // 6281c54f5ff1 + VMAXPD 7(SI)(DI*1), Z7, K7, Z22 // 62e1c54f5fb43e07000000 + VMAXPD 15(DX)(BX*8), Z7, K7, Z22 // 62e1c54f5fb4da0f000000 + VMAXPD Z16, Z21, K7, Z22 // 62a1d5475ff0 + VMAXPD Z25, Z21, K7, Z22 // 6281d5475ff1 + VMAXPD 7(SI)(DI*1), Z21, K7, Z22 // 62e1d5475fb43e07000000 + VMAXPD 15(DX)(BX*8), Z21, K7, Z22 // 62e1d5475fb4da0f000000 + VMAXPS X22, X28, K2, X0 // 62b11c025fc6 + VMAXPS -17(BP)(SI*8), X28, K2, X0 // 62f11c025f84f5efffffff + VMAXPS (R15), X28, K2, X0 // 62d11c025f07 + VMAXPS Y28, Y0, K5, Y7 // 62917c2d5ffc + VMAXPS (CX), Y0, K5, Y7 // 62f17c2d5f39 + VMAXPS 99(R15), Y0, K5, Y7 // 62d17c2d5fbf63000000 + VMAXPS Z21, Z12, K3, Z14 // 62311c4b5ff5 + VMAXPS Z9, Z12, K3, Z14 // 62511c4b5ff1 + VMAXPS Z21, Z13, K3, Z14 // 6231144b5ff5 + VMAXPS Z9, Z13, K3, Z14 // 6251144b5ff1 + VMAXPS Z21, Z12, K3, Z13 // 62311c4b5fed + VMAXPS Z9, Z12, K3, Z13 // 62511c4b5fe9 + VMAXPS Z21, Z13, K3, Z13 // 6231144b5fed + VMAXPS Z9, Z13, K3, Z13 // 6251144b5fe9 + VMAXPS Z23, Z27, K4, Z2 // 62b124445fd7 + VMAXPS Z9, Z27, K4, Z2 // 62d124445fd1 + VMAXPS -7(DI)(R8*1), Z27, K4, Z2 // 62b124445f9407f9ffffff + VMAXPS (SP), Z27, K4, Z2 // 62f124445f1424 + VMAXPS Z23, Z25, K4, Z2 // 62b134445fd7 + VMAXPS Z9, Z25, K4, Z2 // 62d134445fd1 + VMAXPS -7(DI)(R8*1), Z25, K4, Z2 // 62b134445f9407f9ffffff + VMAXPS (SP), Z25, K4, Z2 // 62f134445f1424 + VMAXPS Z23, Z27, K4, Z7 // 62b124445fff + VMAXPS Z9, Z27, K4, Z7 // 62d124445ff9 + VMAXPS -7(DI)(R8*1), Z27, K4, Z7 // 62b124445fbc07f9ffffff + VMAXPS (SP), Z27, K4, Z7 // 62f124445f3c24 + VMAXPS Z23, Z25, K4, Z7 // 62b134445fff + VMAXPS Z9, Z25, K4, Z7 // 62d134445ff9 + VMAXPS -7(DI)(R8*1), Z25, K4, Z7 // 62b134445fbc07f9ffffff + VMAXPS (SP), Z25, K4, Z7 // 62f134445f3c24 + VMAXSD X7, X19, K2, X7 // 62f1e7025fff + VMAXSD X1, X31, K2, X16 // 62e187025fc1 or 62e187225fc1 or 62e187425fc1 + VMAXSD 17(SP)(BP*1), X31, K2, X16 // 62e187025f842c11000000 or 62e187225f842c11000000 or 62e187425f842c11000000 + VMAXSD -7(CX)(DX*8), X31, K2, X16 // 62e187025f84d1f9ffffff or 62e187225f84d1f9ffffff or 62e187425f84d1f9ffffff + VMAXSS X15, X9, K3, X7 // 62d1360b5fff + VMAXSS X12, X0, K3, X12 // 62517e0b5fe4 or 62517e2b5fe4 or 62517e4b5fe4 + VMAXSS (AX), X0, K3, X12 // 62717e0b5f20 or 62717e2b5f20 or 62717e4b5f20 + VMAXSS 7(SI), X0, K3, X12 // 62717e0b5fa607000000 or 62717e2b5fa607000000 or 62717e4b5fa607000000 + VMINPD X17, X5, K3, X14 // 6231d50b5df1 + VMINPD 7(SI)(DI*8), X5, K3, X14 // 6271d50b5db4fe07000000 + VMINPD -15(R14), X5, K3, X14 // 6251d50b5db6f1ffffff + VMINPD Y24, Y14, K2, Y20 // 62818d2a5de0 + VMINPD 99(R15)(R15*2), Y14, K2, Y20 // 62818d2a5da47f63000000 + VMINPD -7(DI), Y14, K2, Y20 // 62e18d2a5da7f9ffffff + VMINPD Z14, Z3, K1, Z27 // 6241e5495dde + VMINPD Z7, Z3, K1, Z27 // 6261e5495ddf + VMINPD Z14, Z0, K1, Z27 // 6241fd495dde + VMINPD Z7, Z0, K1, Z27 // 6261fd495ddf + VMINPD Z14, Z3, K1, Z14 // 6251e5495df6 + VMINPD Z7, Z3, K1, Z14 // 6271e5495df7 + VMINPD Z14, Z0, K1, Z14 // 6251fd495df6 + VMINPD Z7, Z0, K1, Z14 // 6271fd495df7 + VMINPD Z1, Z22, K2, Z8 // 6271cd425dc1 + VMINPD Z16, Z22, K2, Z8 // 6231cd425dc0 + VMINPD -7(CX), Z22, K2, Z8 // 6271cd425d81f9ffffff + VMINPD 15(DX)(BX*4), Z22, K2, Z8 // 6271cd425d849a0f000000 + VMINPD Z1, Z25, K2, Z8 // 6271b5425dc1 + VMINPD Z16, Z25, K2, Z8 // 6231b5425dc0 + VMINPD -7(CX), Z25, K2, Z8 // 6271b5425d81f9ffffff + VMINPD 15(DX)(BX*4), Z25, K2, Z8 // 6271b5425d849a0f000000 + VMINPD Z1, Z22, K2, Z24 // 6261cd425dc1 + VMINPD Z16, Z22, K2, Z24 // 6221cd425dc0 + VMINPD -7(CX), Z22, K2, Z24 // 6261cd425d81f9ffffff + VMINPD 15(DX)(BX*4), Z22, K2, Z24 // 6261cd425d849a0f000000 + VMINPD Z1, Z25, K2, Z24 // 6261b5425dc1 + VMINPD Z16, Z25, K2, Z24 // 6221b5425dc0 + VMINPD -7(CX), Z25, K2, Z24 // 6261b5425d81f9ffffff + VMINPD 15(DX)(BX*4), Z25, K2, Z24 // 6261b5425d849a0f000000 + VMINPS X3, X8, K1, X15 // 62713c095dfb + VMINPS 7(SI)(DI*1), X8, K1, X15 // 62713c095dbc3e07000000 + VMINPS 15(DX)(BX*8), X8, K1, X15 // 62713c095dbcda0f000000 + VMINPS Y14, Y20, K7, Y13 // 62515c275dee + VMINPS -7(CX)(DX*1), Y20, K7, Y13 // 62715c275dac11f9ffffff + VMINPS -15(R14)(R15*4), Y20, K7, Y13 // 62115c275dacbef1ffffff + VMINPS Z15, Z0, K1, Z6 // 62d17c495df7 + VMINPS Z12, Z0, K1, Z6 // 62d17c495df4 + VMINPS Z15, Z8, K1, Z6 // 62d13c495df7 + VMINPS Z12, Z8, K1, Z6 // 62d13c495df4 + VMINPS Z15, Z0, K1, Z2 // 62d17c495dd7 + VMINPS Z12, Z0, K1, Z2 // 62d17c495dd4 + VMINPS Z15, Z8, K1, Z2 // 62d13c495dd7 + VMINPS Z12, Z8, K1, Z2 // 62d13c495dd4 + VMINPS Z13, Z11, K1, Z14 // 625124495df5 + VMINPS Z14, Z11, K1, Z14 // 625124495df6 + VMINPS 99(R15)(R15*8), Z11, K1, Z14 // 621124495db4ff63000000 + VMINPS 7(AX)(CX*8), Z11, K1, Z14 // 627124495db4c807000000 + VMINPS Z13, Z5, K1, Z14 // 625154495df5 + VMINPS Z14, Z5, K1, Z14 // 625154495df6 + VMINPS 99(R15)(R15*8), Z5, K1, Z14 // 621154495db4ff63000000 + VMINPS 7(AX)(CX*8), Z5, K1, Z14 // 627154495db4c807000000 + VMINPS Z13, Z11, K1, Z27 // 624124495ddd + VMINPS Z14, Z11, K1, Z27 // 624124495dde + VMINPS 99(R15)(R15*8), Z11, K1, Z27 // 620124495d9cff63000000 + VMINPS 7(AX)(CX*8), Z11, K1, Z27 // 626124495d9cc807000000 + VMINPS Z13, Z5, K1, Z27 // 624154495ddd + VMINPS Z14, Z5, K1, Z27 // 624154495dde + VMINPS 99(R15)(R15*8), Z5, K1, Z27 // 620154495d9cff63000000 + VMINPS 7(AX)(CX*8), Z5, K1, Z27 // 626154495d9cc807000000 + VMINSD X13, X23, K1, X26 // 6241c7015dd5 + VMINSD X9, X24, K7, X28 // 6241bf075de1 or 6241bf275de1 or 6241bf475de1 + VMINSD -17(BP)(SI*2), X24, K7, X28 // 6261bf075da475efffffff or 6261bf275da475efffffff or 6261bf475da475efffffff + VMINSD 7(AX)(CX*2), X24, K7, X28 // 6261bf075da44807000000 or 6261bf275da44807000000 or 6261bf475da44807000000 + VMINSS X18, X26, K2, X15 // 62312e025dfa + VMINSS X11, X1, K4, X21 // 62c1760c5deb or 62c1762c5deb or 62c1764c5deb + VMINSS (BX), X1, K4, X21 // 62e1760c5d2b or 62e1762c5d2b or 62e1764c5d2b + VMINSS -17(BP)(SI*1), X1, K4, X21 // 62e1760c5dac35efffffff or 62e1762c5dac35efffffff or 62e1764c5dac35efffffff + VMOVAPD X3, K1, X31 // 6291fd0929df + VMOVAPD X3, K1, -7(DI)(R8*1) // 62b1fd09299c07f9ffffff + VMOVAPD X3, K1, (SP) // 62f1fd09291c24 + VMOVAPD X0, K3, X7 // 62f1fd0b29c7 + VMOVAPD -7(CX), K3, X7 // 62f1fd0b28b9f9ffffff + VMOVAPD 15(DX)(BX*4), K3, X7 // 62f1fd0b28bc9a0f000000 + VMOVAPD Y1, K4, Y21 // 62b1fd2c29cd + VMOVAPD Y1, K4, 15(DX)(BX*1) // 62f1fd2c298c1a0f000000 + VMOVAPD Y1, K4, -7(CX)(DX*2) // 62f1fd2c298c51f9ffffff + VMOVAPD Y30, K5, Y26 // 6201fd2d29f2 + VMOVAPD -17(BP), K5, Y26 // 6261fd2d2895efffffff + VMOVAPD -15(R14)(R15*8), K5, Y26 // 6201fd2d2894fef1ffffff + VMOVAPD Z2, K7, Z5 // 62f1fd4f29d5 + VMOVAPD Z2, K7, Z23 // 62b1fd4f29d7 + VMOVAPD Z2, K7, (AX) // 62f1fd4f2910 + VMOVAPD Z2, K7, 7(SI) // 62f1fd4f299607000000 + VMOVAPD Z26, K7, Z6 // 6261fd4f29d6 + VMOVAPD Z14, K7, Z6 // 6271fd4f29f6 + VMOVAPD (BX), K7, Z6 // 62f1fd4f2833 + VMOVAPD -17(BP)(SI*1), K7, Z6 // 62f1fd4f28b435efffffff + VMOVAPD Z26, K7, Z14 // 6241fd4f29d6 + VMOVAPD Z14, K7, Z14 // 6251fd4f29f6 + VMOVAPD (BX), K7, Z14 // 6271fd4f2833 + VMOVAPD -17(BP)(SI*1), K7, Z14 // 6271fd4f28b435efffffff + VMOVAPS X24, K6, X0 // 62617c0e29c0 + VMOVAPS X24, K6, 99(R15)(R15*8) // 62017c0e2984ff63000000 + VMOVAPS X24, K6, 7(AX)(CX*8) // 62617c0e2984c807000000 + VMOVAPS X7, K3, X20 // 62b17c0b29fc + VMOVAPS (AX), K3, X20 // 62e17c0b2820 + VMOVAPS 7(SI), K3, X20 // 62e17c0b28a607000000 + VMOVAPS Y22, K7, Y12 // 62c17c2f29f4 + VMOVAPS Y22, K7, 17(SP)(BP*2) // 62e17c2f29b46c11000000 + VMOVAPS Y22, K7, -7(DI)(R8*4) // 62a17c2f29b487f9ffffff + VMOVAPS Y15, K4, Y3 // 62717c2c29fb + VMOVAPS 15(R8), K4, Y3 // 62d17c2c28980f000000 + VMOVAPS (BP), K4, Y3 // 62f17c2c285d00 + VMOVAPS Z13, K4, Z28 // 62117c4c29ec + VMOVAPS Z21, K4, Z28 // 62817c4c29ec + VMOVAPS Z13, K4, Z6 // 62717c4c29ee + VMOVAPS Z21, K4, Z6 // 62e17c4c29ee + VMOVAPS Z13, K4, 15(R8)(R14*4) // 62117c4c29acb00f000000 + VMOVAPS Z21, K4, 15(R8)(R14*4) // 62817c4c29acb00f000000 + VMOVAPS Z13, K4, -7(CX)(DX*4) // 62717c4c29ac91f9ffffff + VMOVAPS Z21, K4, -7(CX)(DX*4) // 62e17c4c29ac91f9ffffff + VMOVAPS Z3, K7, Z26 // 62917c4f29da + VMOVAPS Z0, K7, Z26 // 62917c4f29c2 + VMOVAPS (R8), K7, Z26 // 62417c4f2810 + VMOVAPS 15(DX)(BX*2), K7, Z26 // 62617c4f28945a0f000000 + VMOVAPS Z3, K7, Z3 // 62f17c4f29db + VMOVAPS Z0, K7, Z3 // 62f17c4f29c3 + VMOVAPS (R8), K7, Z3 // 62d17c4f2818 + VMOVAPS 15(DX)(BX*2), K7, Z3 // 62f17c4f289c5a0f000000 + VMOVDDUP X5, K2, X14 // 6271ff0a12f5 + VMOVDDUP 15(R8)(R14*1), K2, X14 // 6211ff0a12b4300f000000 + VMOVDDUP 15(R8)(R14*2), K2, X14 // 6211ff0a12b4700f000000 + VMOVDDUP Y27, K5, Y1 // 6291ff2d12cb + VMOVDDUP 15(R8)(R14*8), K5, Y1 // 6291ff2d128cf00f000000 + VMOVDDUP -15(R14)(R15*2), K5, Y1 // 6291ff2d128c7ef1ffffff + VMOVDDUP Z11, K3, Z21 // 62c1ff4b12eb + VMOVDDUP Z25, K3, Z21 // 6281ff4b12e9 + VMOVDDUP 17(SP)(BP*1), K3, Z21 // 62e1ff4b12ac2c11000000 + VMOVDDUP -7(CX)(DX*8), K3, Z21 // 62e1ff4b12acd1f9ffffff + VMOVDDUP Z11, K3, Z13 // 6251ff4b12eb + VMOVDDUP Z25, K3, Z13 // 6211ff4b12e9 + VMOVDDUP 17(SP)(BP*1), K3, Z13 // 6271ff4b12ac2c11000000 + VMOVDDUP -7(CX)(DX*8), K3, Z13 // 6271ff4b12acd1f9ffffff + VMOVDQA32 X3, K4, X31 // 62917d0c7fdf + VMOVDQA32 X3, K4, (BX) // 62f17d0c7f1b + VMOVDQA32 X3, K4, -17(BP)(SI*1) // 62f17d0c7f9c35efffffff + VMOVDQA32 X1, K2, X21 // 62b17d0a7fcd + VMOVDQA32 15(R8)(R14*4), K2, X21 // 62817d0a6facb00f000000 + VMOVDQA32 -7(CX)(DX*4), K2, X21 // 62e17d0a6fac91f9ffffff + VMOVDQA32 Y5, K2, Y19 // 62b17d2a7feb + VMOVDQA32 Y5, K2, -15(R14)(R15*1) // 62917d2a7fac3ef1ffffff + VMOVDQA32 Y5, K2, -15(BX) // 62f17d2a7fabf1ffffff + VMOVDQA32 Y13, K3, Y17 // 62317d2b7fe9 + VMOVDQA32 7(AX)(CX*4), K3, Y17 // 62e17d2b6f8c8807000000 + VMOVDQA32 7(AX)(CX*1), K3, Y17 // 62e17d2b6f8c0807000000 + VMOVDQA32 Z27, K3, Z3 // 62617d4b7fdb + VMOVDQA32 Z15, K3, Z3 // 62717d4b7ffb + VMOVDQA32 Z27, K3, Z12 // 62417d4b7fdc + VMOVDQA32 Z15, K3, Z12 // 62517d4b7ffc + VMOVDQA32 Z27, K3, -17(BP)(SI*2) // 62617d4b7f9c75efffffff + VMOVDQA32 Z15, K3, -17(BP)(SI*2) // 62717d4b7fbc75efffffff + VMOVDQA32 Z27, K3, 7(AX)(CX*2) // 62617d4b7f9c4807000000 + VMOVDQA32 Z15, K3, 7(AX)(CX*2) // 62717d4b7fbc4807000000 + VMOVDQA32 Z23, K3, Z23 // 62a17d4b7fff + VMOVDQA32 Z6, K3, Z23 // 62b17d4b7ff7 + VMOVDQA32 15(R8)(R14*1), K3, Z23 // 62817d4b6fbc300f000000 + VMOVDQA32 15(R8)(R14*2), K3, Z23 // 62817d4b6fbc700f000000 + VMOVDQA32 Z23, K3, Z5 // 62e17d4b7ffd + VMOVDQA32 Z6, K3, Z5 // 62f17d4b7ff5 + VMOVDQA32 15(R8)(R14*1), K3, Z5 // 62917d4b6fac300f000000 + VMOVDQA32 15(R8)(R14*2), K3, Z5 // 62917d4b6fac700f000000 + VMOVDQA64 X13, K2, X11 // 6251fd0a7feb + VMOVDQA64 X13, K2, (R8) // 6251fd0a7f28 + VMOVDQA64 X13, K2, 15(DX)(BX*2) // 6271fd0a7fac5a0f000000 + VMOVDQA64 X30, K1, X0 // 6261fd097ff0 + VMOVDQA64 17(SP)(BP*1), K1, X0 // 62f1fd096f842c11000000 + VMOVDQA64 -7(CX)(DX*8), K1, X0 // 62f1fd096f84d1f9ffffff + VMOVDQA64 Y7, K2, Y21 // 62b1fd2a7ffd + VMOVDQA64 Y7, K2, (SI) // 62f1fd2a7f3e + VMOVDQA64 Y7, K2, 7(SI)(DI*2) // 62f1fd2a7fbc7e07000000 + VMOVDQA64 Y13, K1, Y30 // 6211fd297fee + VMOVDQA64 17(SP)(BP*8), K1, Y30 // 6261fd296fb4ec11000000 + VMOVDQA64 17(SP)(BP*4), K1, Y30 // 6261fd296fb4ac11000000 + VMOVDQA64 Z21, K7, Z8 // 62c1fd4f7fe8 + VMOVDQA64 Z5, K7, Z8 // 62d1fd4f7fe8 + VMOVDQA64 Z21, K7, Z28 // 6281fd4f7fec + VMOVDQA64 Z5, K7, Z28 // 6291fd4f7fec + VMOVDQA64 Z21, K7, (R14) // 62c1fd4f7f2e + VMOVDQA64 Z5, K7, (R14) // 62d1fd4f7f2e + VMOVDQA64 Z21, K7, -7(DI)(R8*8) // 62a1fd4f7facc7f9ffffff + VMOVDQA64 Z5, K7, -7(DI)(R8*8) // 62b1fd4f7facc7f9ffffff + VMOVDQA64 Z12, K1, Z16 // 6231fd497fe0 + VMOVDQA64 Z27, K1, Z16 // 6221fd497fd8 + VMOVDQA64 99(R15)(R15*4), K1, Z16 // 6281fd496f84bf63000000 + VMOVDQA64 15(DX), K1, Z16 // 62e1fd496f820f000000 + VMOVDQA64 Z12, K1, Z13 // 6251fd497fe5 + VMOVDQA64 Z27, K1, Z13 // 6241fd497fdd + VMOVDQA64 99(R15)(R15*4), K1, Z13 // 6211fd496facbf63000000 + VMOVDQA64 15(DX), K1, Z13 // 6271fd496faa0f000000 + VMOVDQU32 X8, K3, X19 // 62317e0b7fc3 + VMOVDQU32 X8, K3, (R14) // 62517e0b7f06 + VMOVDQU32 X8, K3, -7(DI)(R8*8) // 62317e0b7f84c7f9ffffff + VMOVDQU32 X26, K4, X8 // 62417e0c7fd0 + VMOVDQU32 99(R15)(R15*4), K4, X8 // 62117e0c6f84bf63000000 + VMOVDQU32 15(DX), K4, X8 // 62717e0c6f820f000000 + VMOVDQU32 Y5, K5, Y24 // 62917e2d7fe8 + VMOVDQU32 Y5, K5, 7(AX) // 62f17e2d7fa807000000 + VMOVDQU32 Y5, K5, (DI) // 62f17e2d7f2f + VMOVDQU32 Y21, K7, Y24 // 62817e2f7fe8 + VMOVDQU32 99(R15)(R15*1), K7, Y24 // 62017e2f6f843f63000000 + VMOVDQU32 (DX), K7, Y24 // 62617e2f6f02 + VMOVDQU32 Z6, K7, Z9 // 62d17e4f7ff1 + VMOVDQU32 Z25, K7, Z9 // 62417e4f7fc9 + VMOVDQU32 Z6, K7, Z12 // 62d17e4f7ff4 + VMOVDQU32 Z25, K7, Z12 // 62417e4f7fcc + VMOVDQU32 Z6, K7, -7(CX)(DX*1) // 62f17e4f7fb411f9ffffff + VMOVDQU32 Z25, K7, -7(CX)(DX*1) // 62617e4f7f8c11f9ffffff + VMOVDQU32 Z6, K7, -15(R14)(R15*4) // 62917e4f7fb4bef1ffffff + VMOVDQU32 Z25, K7, -15(R14)(R15*4) // 62017e4f7f8cbef1ffffff + VMOVDQU32 Z8, K6, Z3 // 62717e4e7fc3 + VMOVDQU32 Z2, K6, Z3 // 62f17e4e7fd3 + VMOVDQU32 15(DX)(BX*1), K6, Z3 // 62f17e4e6f9c1a0f000000 + VMOVDQU32 -7(CX)(DX*2), K6, Z3 // 62f17e4e6f9c51f9ffffff + VMOVDQU32 Z8, K6, Z21 // 62317e4e7fc5 + VMOVDQU32 Z2, K6, Z21 // 62b17e4e7fd5 + VMOVDQU32 15(DX)(BX*1), K6, Z21 // 62e17e4e6fac1a0f000000 + VMOVDQU32 -7(CX)(DX*2), K6, Z21 // 62e17e4e6fac51f9ffffff + VMOVDQU64 X12, K3, X23 // 6231fe0b7fe7 + VMOVDQU64 X12, K3, (CX) // 6271fe0b7f21 + VMOVDQU64 X12, K3, 99(R15) // 6251fe0b7fa763000000 + VMOVDQU64 X23, K7, X16 // 62a1fe0f7ff8 + VMOVDQU64 99(R15)(R15*2), K7, X16 // 6281fe0f6f847f63000000 + VMOVDQU64 -7(DI), K7, X16 // 62e1fe0f6f87f9ffffff + VMOVDQU64 Y9, K4, Y16 // 6231fe2c7fc8 + VMOVDQU64 Y9, K4, -17(BP)(SI*8) // 6271fe2c7f8cf5efffffff + VMOVDQU64 Y9, K4, (R15) // 6251fe2c7f0f + VMOVDQU64 Y9, K4, Y13 // 6251fe2c7fcd + VMOVDQU64 7(SI)(DI*8), K4, Y13 // 6271fe2c6facfe07000000 + VMOVDQU64 -15(R14), K4, Y13 // 6251fe2c6faef1ffffff + VMOVDQU64 Z7, K7, Z3 // 62f1fe4f7ffb + VMOVDQU64 Z9, K7, Z3 // 6271fe4f7fcb + VMOVDQU64 Z7, K7, Z27 // 6291fe4f7ffb + VMOVDQU64 Z9, K7, Z27 // 6211fe4f7fcb + VMOVDQU64 Z7, K7, -17(BP) // 62f1fe4f7fbdefffffff + VMOVDQU64 Z9, K7, -17(BP) // 6271fe4f7f8defffffff + VMOVDQU64 Z7, K7, -15(R14)(R15*8) // 6291fe4f7fbcfef1ffffff + VMOVDQU64 Z9, K7, -15(R14)(R15*8) // 6211fe4f7f8cfef1ffffff + VMOVDQU64 Z20, K2, Z0 // 62e1fe4a7fe0 + VMOVDQU64 Z28, K2, Z0 // 6261fe4a7fe0 + VMOVDQU64 17(SP)(BP*2), K2, Z0 // 62f1fe4a6f846c11000000 + VMOVDQU64 -7(DI)(R8*4), K2, Z0 // 62b1fe4a6f8487f9ffffff + VMOVDQU64 Z20, K2, Z6 // 62e1fe4a7fe6 + VMOVDQU64 Z28, K2, Z6 // 6261fe4a7fe6 + VMOVDQU64 17(SP)(BP*2), K2, Z6 // 62f1fe4a6fb46c11000000 + VMOVDQU64 -7(DI)(R8*4), K2, Z6 // 62b1fe4a6fb487f9ffffff + VMOVHPS (R14), X2, X23 // 62c16c08163e + VMOVHPS -7(DI)(R8*8), X2, X23 // 62a16c0816bcc7f9ffffff + VMOVHPS X20, 99(R15)(R15*4) // 62817c0817a4bf63000000 + VMOVHPS X20, 15(DX) // 62e17c0817a20f000000 + VMOVLHPS X0, X25, X5 // 62f1340016e8 + VMOVNTDQ Y26, -7(CX) // 62617d28e791f9ffffff + VMOVNTDQ Y26, 15(DX)(BX*4) // 62617d28e7949a0f000000 + VMOVNTDQ Z18, -15(R14)(R15*1) // 62817d48e7943ef1ffffff + VMOVNTDQ Z24, -15(R14)(R15*1) // 62017d48e7843ef1ffffff + VMOVNTDQ Z18, -15(BX) // 62e17d48e793f1ffffff + VMOVNTDQ Z24, -15(BX) // 62617d48e783f1ffffff + VMOVNTDQA 7(AX)(CX*4), Z2 // 62f27d482a948807000000 + VMOVNTDQA 7(AX)(CX*1), Z2 // 62f27d482a940807000000 + VMOVNTDQA 7(AX)(CX*4), Z21 // 62e27d482aac8807000000 + VMOVNTDQA 7(AX)(CX*1), Z21 // 62e27d482aac0807000000 + VMOVNTPD Y26, (AX) // 6261fd282b10 + VMOVNTPD Y26, 7(SI) // 6261fd282b9607000000 + VMOVNTPD Z7, (SI) // 62f1fd482b3e + VMOVNTPD Z13, (SI) // 6271fd482b2e + VMOVNTPD Z7, 7(SI)(DI*2) // 62f1fd482bbc7e07000000 + VMOVNTPD Z13, 7(SI)(DI*2) // 6271fd482bac7e07000000 + VMOVNTPS X31, 15(R8)(R14*8) // 62017c082bbcf00f000000 + VMOVNTPS X31, -15(R14)(R15*2) // 62017c082bbc7ef1ffffff + VMOVNTPS Z6, 17(SP)(BP*8) // 62f17c482bb4ec11000000 + VMOVNTPS Z16, 17(SP)(BP*8) // 62e17c482b84ec11000000 + VMOVNTPS Z6, 17(SP)(BP*4) // 62f17c482bb4ac11000000 + VMOVNTPS Z16, 17(SP)(BP*4) // 62e17c482b84ac11000000 + VMOVSD -7(CX)(DX*1), K3, X11 // 6271ff0b109c11f9ffffff or 6271ff2b109c11f9ffffff or 6271ff4b109c11f9ffffff + VMOVSD -15(R14)(R15*4), K3, X11 // 6211ff0b109cbef1ffffff or 6211ff2b109cbef1ffffff or 6211ff4b109cbef1ffffff + VMOVSD X14, X5, K3, X22 // 6231d70b11f6 or 6231d72b11f6 or 6231d74b11f6 + VMOVSD X0, K2, 15(DX)(BX*1) // 62f1ff0a11841a0f000000 or 62f1ff2a11841a0f000000 or 62f1ff4a11841a0f000000 + VMOVSD X0, K2, -7(CX)(DX*2) // 62f1ff0a118451f9ffffff or 62f1ff2a118451f9ffffff or 62f1ff4a118451f9ffffff + VMOVSD X15, X7, K1, X17 // 6231c70911f9 or 6231c72911f9 or 6231c74911f9 + VMOVSHDUP X0, K2, X11 // 62717e0a16d8 + VMOVSHDUP -15(R14)(R15*1), K2, X11 // 62117e0a169c3ef1ffffff + VMOVSHDUP -15(BX), K2, X11 // 62717e0a169bf1ffffff + VMOVSHDUP Y18, K1, Y14 // 62317e2916f2 + VMOVSHDUP 15(R8)(R14*4), K1, Y14 // 62117e2916b4b00f000000 + VMOVSHDUP -7(CX)(DX*4), K1, Y14 // 62717e2916b491f9ffffff + VMOVSHDUP Z1, K7, Z6 // 62f17e4f16f1 + VMOVSHDUP Z15, K7, Z6 // 62d17e4f16f7 + VMOVSHDUP 7(SI)(DI*4), K7, Z6 // 62f17e4f16b4be07000000 + VMOVSHDUP -7(DI)(R8*2), K7, Z6 // 62b17e4f16b447f9ffffff + VMOVSHDUP Z1, K7, Z22 // 62e17e4f16f1 + VMOVSHDUP Z15, K7, Z22 // 62c17e4f16f7 + VMOVSHDUP 7(SI)(DI*4), K7, Z22 // 62e17e4f16b4be07000000 + VMOVSHDUP -7(DI)(R8*2), K7, Z22 // 62a17e4f16b447f9ffffff + VMOVSLDUP X8, K1, X18 // 62c17e0912d0 + VMOVSLDUP 7(AX)(CX*4), K1, X18 // 62e17e0912948807000000 + VMOVSLDUP 7(AX)(CX*1), K1, X18 // 62e17e0912940807000000 + VMOVSLDUP Y18, K1, Y31 // 62217e2912fa + VMOVSLDUP (R8), K1, Y31 // 62417e291238 + VMOVSLDUP 15(DX)(BX*2), K1, Y31 // 62617e2912bc5a0f000000 + VMOVSLDUP Z18, K1, Z13 // 62317e4912ea + VMOVSLDUP Z8, K1, Z13 // 62517e4912e8 + VMOVSLDUP 17(SP), K1, Z13 // 62717e4912ac2411000000 + VMOVSLDUP -17(BP)(SI*4), K1, Z13 // 62717e4912acb5efffffff + VMOVSS 17(SP)(BP*1), K7, X27 // 62617e0f109c2c11000000 or 62617e2f109c2c11000000 or 62617e4f109c2c11000000 + VMOVSS -7(CX)(DX*8), K7, X27 // 62617e0f109cd1f9ffffff or 62617e2f109cd1f9ffffff or 62617e4f109cd1f9ffffff + VMOVSS X18, X3, K2, X25 // 6281660a11d1 or 6281662a11d1 or 6281664a11d1 + VMOVSS X15, K4, -17(BP)(SI*2) // 62717e0c11bc75efffffff or 62717e2c11bc75efffffff or 62717e4c11bc75efffffff + VMOVSS X15, K4, 7(AX)(CX*2) // 62717e0c11bc4807000000 or 62717e2c11bc4807000000 or 62717e4c11bc4807000000 + VMOVSS X7, X15, K1, X28 // 6291060911fc or 6291062911fc or 6291064911fc + VMOVUPD X8, K3, X13 // 6251fd0b11c5 + VMOVUPD X8, K3, (SI) // 6271fd0b1106 + VMOVUPD X8, K3, 7(SI)(DI*2) // 6271fd0b11847e07000000 + VMOVUPD X7, K4, X24 // 6291fd0c11f8 + VMOVUPD 17(SP)(BP*8), K4, X24 // 6261fd0c1084ec11000000 + VMOVUPD 17(SP)(BP*4), K4, X24 // 6261fd0c1084ac11000000 + VMOVUPD Y24, K5, Y3 // 6261fd2d11c3 + VMOVUPD Y24, K5, 17(SP)(BP*1) // 6261fd2d11842c11000000 + VMOVUPD Y24, K5, -7(CX)(DX*8) // 6261fd2d1184d1f9ffffff + VMOVUPD Y7, K7, Y2 // 62f1fd2f11fa + VMOVUPD -17(BP)(SI*2), K7, Y2 // 62f1fd2f109475efffffff + VMOVUPD 7(AX)(CX*2), K7, Y2 // 62f1fd2f10944807000000 + VMOVUPD Z2, K7, Z22 // 62b1fd4f11d6 + VMOVUPD Z31, K7, Z22 // 6221fd4f11fe + VMOVUPD Z2, K7, Z7 // 62f1fd4f11d7 + VMOVUPD Z31, K7, Z7 // 6261fd4f11ff + VMOVUPD Z2, K7, 7(AX) // 62f1fd4f119007000000 + VMOVUPD Z31, K7, 7(AX) // 6261fd4f11b807000000 + VMOVUPD Z2, K7, (DI) // 62f1fd4f1117 + VMOVUPD Z31, K7, (DI) // 6261fd4f113f + VMOVUPD Z1, K6, Z20 // 62b1fd4e11cc + VMOVUPD Z3, K6, Z20 // 62b1fd4e11dc + VMOVUPD 99(R15)(R15*1), K6, Z20 // 6281fd4e10a43f63000000 + VMOVUPD (DX), K6, Z20 // 62e1fd4e1022 + VMOVUPD Z1, K6, Z9 // 62d1fd4e11c9 + VMOVUPD Z3, K6, Z9 // 62d1fd4e11d9 + VMOVUPD 99(R15)(R15*1), K6, Z9 // 6211fd4e108c3f63000000 + VMOVUPD (DX), K6, Z9 // 6271fd4e100a + VMOVUPS X22, K3, X0 // 62e17c0b11f0 + VMOVUPS X22, K3, 7(SI)(DI*4) // 62e17c0b11b4be07000000 + VMOVUPS X22, K3, -7(DI)(R8*2) // 62a17c0b11b447f9ffffff + VMOVUPS X11, K7, X1 // 62717c0f11d9 + VMOVUPS 17(SP), K7, X1 // 62f17c0f108c2411000000 + VMOVUPS -17(BP)(SI*4), K7, X1 // 62f17c0f108cb5efffffff + VMOVUPS Y14, K4, Y21 // 62317c2c11f5 + VMOVUPS Y14, K4, 15(R8)(R14*1) // 62117c2c11b4300f000000 + VMOVUPS Y14, K4, 15(R8)(R14*2) // 62117c2c11b4700f000000 + VMOVUPS Y20, K4, Y8 // 62c17c2c11e0 + VMOVUPS (R14), K4, Y8 // 62517c2c1006 + VMOVUPS -7(DI)(R8*8), K4, Y8 // 62317c2c1084c7f9ffffff + VMOVUPS Z28, K7, Z12 // 62417c4f11e4 + VMOVUPS Z13, K7, Z12 // 62517c4f11ec + VMOVUPS Z28, K7, Z16 // 62217c4f11e0 + VMOVUPS Z13, K7, Z16 // 62317c4f11e8 + VMOVUPS Z28, K7, -17(BP)(SI*8) // 62617c4f11a4f5efffffff + VMOVUPS Z13, K7, -17(BP)(SI*8) // 62717c4f11acf5efffffff + VMOVUPS Z28, K7, (R15) // 62417c4f1127 + VMOVUPS Z13, K7, (R15) // 62517c4f112f + VMOVUPS Z3, K2, Z14 // 62d17c4a11de + VMOVUPS Z12, K2, Z14 // 62517c4a11e6 + VMOVUPS 7(SI)(DI*8), K2, Z14 // 62717c4a10b4fe07000000 + VMOVUPS -15(R14), K2, Z14 // 62517c4a10b6f1ffffff + VMOVUPS Z3, K2, Z28 // 62917c4a11dc + VMOVUPS Z12, K2, Z28 // 62117c4a11e4 + VMOVUPS 7(SI)(DI*8), K2, Z28 // 62617c4a10a4fe07000000 + VMOVUPS -15(R14), K2, Z28 // 62417c4a10a6f1ffffff + VMULPD X8, X7, K5, X6 // 62d1c50d59f0 + VMULPD 7(AX), X7, K5, X6 // 62f1c50d59b007000000 + VMULPD (DI), X7, K5, X6 // 62f1c50d5937 + VMULPD Y1, Y24, K3, Y11 // 6271bd2359d9 + VMULPD 99(R15)(R15*4), Y24, K3, Y11 // 6211bd23599cbf63000000 + VMULPD 15(DX), Y24, K3, Y11 // 6271bd23599a0f000000 + VMULPD Z5, Z19, K4, Z15 // 6271e54459fd + VMULPD Z1, Z19, K4, Z15 // 6271e54459f9 + VMULPD Z5, Z15, K4, Z15 // 6271854c59fd + VMULPD Z1, Z15, K4, Z15 // 6271854c59f9 + VMULPD Z5, Z19, K4, Z30 // 6261e54459f5 + VMULPD Z1, Z19, K4, Z30 // 6261e54459f1 + VMULPD Z5, Z15, K4, Z30 // 6261854c59f5 + VMULPD Z1, Z15, K4, Z30 // 6261854c59f1 + VMULPD Z21, Z14, K2, Z3 // 62b18d4a59dd + VMULPD Z8, Z14, K2, Z3 // 62d18d4a59d8 + VMULPD 7(SI)(DI*1), Z14, K2, Z3 // 62f18d4a599c3e07000000 + VMULPD 15(DX)(BX*8), Z14, K2, Z3 // 62f18d4a599cda0f000000 + VMULPD Z21, Z15, K2, Z3 // 62b1854a59dd + VMULPD Z8, Z15, K2, Z3 // 62d1854a59d8 + VMULPD 7(SI)(DI*1), Z15, K2, Z3 // 62f1854a599c3e07000000 + VMULPD 15(DX)(BX*8), Z15, K2, Z3 // 62f1854a599cda0f000000 + VMULPD Z21, Z14, K2, Z5 // 62b18d4a59ed + VMULPD Z8, Z14, K2, Z5 // 62d18d4a59e8 + VMULPD 7(SI)(DI*1), Z14, K2, Z5 // 62f18d4a59ac3e07000000 + VMULPD 15(DX)(BX*8), Z14, K2, Z5 // 62f18d4a59acda0f000000 + VMULPD Z21, Z15, K2, Z5 // 62b1854a59ed + VMULPD Z8, Z15, K2, Z5 // 62d1854a59e8 + VMULPD 7(SI)(DI*1), Z15, K2, Z5 // 62f1854a59ac3e07000000 + VMULPD 15(DX)(BX*8), Z15, K2, Z5 // 62f1854a59acda0f000000 + VMULPS X28, X3, K2, X31 // 6201640a59fc + VMULPS 99(R15)(R15*1), X3, K2, X31 // 6201640a59bc3f63000000 + VMULPS (DX), X3, K2, X31 // 6261640a593a + VMULPS Y20, Y18, K3, Y5 // 62b16c2359ec + VMULPS (CX), Y18, K3, Y5 // 62f16c235929 + VMULPS 99(R15), Y18, K3, Y5 // 62d16c2359af63000000 + VMULPS Z23, Z20, K3, Z16 // 62a15c4359c7 + VMULPS Z19, Z20, K3, Z16 // 62a15c4359c3 + VMULPS Z23, Z0, K3, Z16 // 62a17c4b59c7 + VMULPS Z19, Z0, K3, Z16 // 62a17c4b59c3 + VMULPS Z23, Z20, K3, Z9 // 62315c4359cf + VMULPS Z19, Z20, K3, Z9 // 62315c4359cb + VMULPS Z23, Z0, K3, Z9 // 62317c4b59cf + VMULPS Z19, Z0, K3, Z9 // 62317c4b59cb + VMULPS Z24, Z0, K3, Z0 // 62917c4b59c0 + VMULPS Z12, Z0, K3, Z0 // 62d17c4b59c4 + VMULPS -7(DI)(R8*1), Z0, K3, Z0 // 62b17c4b598407f9ffffff + VMULPS (SP), Z0, K3, Z0 // 62f17c4b590424 + VMULPS Z24, Z25, K3, Z0 // 6291344359c0 + VMULPS Z12, Z25, K3, Z0 // 62d1344359c4 + VMULPS -7(DI)(R8*1), Z25, K3, Z0 // 62b13443598407f9ffffff + VMULPS (SP), Z25, K3, Z0 // 62f13443590424 + VMULPS Z24, Z0, K3, Z11 // 62117c4b59d8 + VMULPS Z12, Z0, K3, Z11 // 62517c4b59dc + VMULPS -7(DI)(R8*1), Z0, K3, Z11 // 62317c4b599c07f9ffffff + VMULPS (SP), Z0, K3, Z11 // 62717c4b591c24 + VMULPS Z24, Z25, K3, Z11 // 6211344359d8 + VMULPS Z12, Z25, K3, Z11 // 6251344359dc + VMULPS -7(DI)(R8*1), Z25, K3, Z11 // 62313443599c07f9ffffff + VMULPS (SP), Z25, K3, Z11 // 62713443591c24 + VMULSD X7, X24, K2, X20 // 62e1bf0259e7 + VMULSD X12, X16, K1, X20 // 62c1ff0159e4 or 62c1ff2159e4 or 62c1ff4159e4 + VMULSD -17(BP), X16, K1, X20 // 62e1ff0159a5efffffff or 62e1ff2159a5efffffff or 62e1ff4159a5efffffff + VMULSD -15(R14)(R15*8), X16, K1, X20 // 6281ff0159a4fef1ffffff or 6281ff2159a4fef1ffffff or 6281ff4159a4fef1ffffff + VMULSS X28, X17, K2, X6 // 6291760259f4 + VMULSS X8, X1, K1, X6 // 62d1760959f0 or 62d1762959f0 or 62d1764959f0 + VMULSS 15(R8)(R14*1), X1, K1, X6 // 6291760959b4300f000000 or 6291762959b4300f000000 or 6291764959b4300f000000 + VMULSS 15(R8)(R14*2), X1, K1, X6 // 6291760959b4700f000000 or 6291762959b4700f000000 or 6291764959b4700f000000 + VPABSD X16, K7, X12 // 62327d0f1ee0 + VPABSD 99(R15)(R15*8), K7, X12 // 62127d0f1ea4ff63000000 + VPABSD 7(AX)(CX*8), K7, X12 // 62727d0f1ea4c807000000 + VPABSD Y16, K7, Y17 // 62a27d2f1ec8 + VPABSD -17(BP), K7, Y17 // 62e27d2f1e8defffffff + VPABSD -15(R14)(R15*8), K7, Y17 // 62827d2f1e8cfef1ffffff + VPABSD Z20, K6, Z1 // 62b27d4e1ecc + VPABSD Z9, K6, Z1 // 62d27d4e1ec9 + VPABSD (BX), K6, Z1 // 62f27d4e1e0b + VPABSD -17(BP)(SI*1), K6, Z1 // 62f27d4e1e8c35efffffff + VPABSD Z20, K6, Z9 // 62327d4e1ecc + VPABSD Z9, K6, Z9 // 62527d4e1ec9 + VPABSD (BX), K6, Z9 // 62727d4e1e0b + VPABSD -17(BP)(SI*1), K6, Z9 // 62727d4e1e8c35efffffff + VPABSQ X8, K3, X28 // 6242fd0b1fe0 + VPABSQ (AX), K3, X28 // 6262fd0b1f20 + VPABSQ 7(SI), K3, X28 // 6262fd0b1fa607000000 + VPABSQ Y6, K7, Y12 // 6272fd2f1fe6 + VPABSQ 17(SP)(BP*2), K7, Y12 // 6272fd2f1fa46c11000000 + VPABSQ -7(DI)(R8*4), K7, Y12 // 6232fd2f1fa487f9ffffff + VPABSQ Z26, K4, Z30 // 6202fd4c1ff2 + VPABSQ Z22, K4, Z30 // 6222fd4c1ff6 + VPABSQ 15(R8)(R14*4), K4, Z30 // 6202fd4c1fb4b00f000000 + VPABSQ -7(CX)(DX*4), K4, Z30 // 6262fd4c1fb491f9ffffff + VPABSQ Z26, K4, Z5 // 6292fd4c1fea + VPABSQ Z22, K4, Z5 // 62b2fd4c1fee + VPABSQ 15(R8)(R14*4), K4, Z5 // 6292fd4c1facb00f000000 + VPABSQ -7(CX)(DX*4), K4, Z5 // 62f2fd4c1fac91f9ffffff + VPADDD X27, X2, K1, X2 // 62916d09fed3 + VPADDD (R14), X2, K1, X2 // 62d16d09fe16 + VPADDD -7(DI)(R8*8), X2, K1, X2 // 62b16d09fe94c7f9ffffff + VPADDD Y1, Y6, K7, Y1 // 62f14d2ffec9 + VPADDD 7(SI)(DI*4), Y6, K7, Y1 // 62f14d2ffe8cbe07000000 + VPADDD -7(DI)(R8*2), Y6, K7, Y1 // 62b14d2ffe8c47f9ffffff + VPADDD Z13, Z11, K2, Z14 // 6251254afef5 + VPADDD Z14, Z11, K2, Z14 // 6251254afef6 + VPADDD (CX), Z11, K2, Z14 // 6271254afe31 + VPADDD 99(R15), Z11, K2, Z14 // 6251254afeb763000000 + VPADDD Z13, Z5, K2, Z14 // 6251554afef5 + VPADDD Z14, Z5, K2, Z14 // 6251554afef6 + VPADDD (CX), Z5, K2, Z14 // 6271554afe31 + VPADDD 99(R15), Z5, K2, Z14 // 6251554afeb763000000 + VPADDD Z13, Z11, K2, Z27 // 6241254afedd + VPADDD Z14, Z11, K2, Z27 // 6241254afede + VPADDD (CX), Z11, K2, Z27 // 6261254afe19 + VPADDD 99(R15), Z11, K2, Z27 // 6241254afe9f63000000 + VPADDD Z13, Z5, K2, Z27 // 6241554afedd + VPADDD Z14, Z5, K2, Z27 // 6241554afede + VPADDD (CX), Z5, K2, Z27 // 6261554afe19 + VPADDD 99(R15), Z5, K2, Z27 // 6241554afe9f63000000 + VPADDQ X30, X22, K4, X26 // 6201cd04d4d6 + VPADDQ 99(R15)(R15*4), X22, K4, X26 // 6201cd04d494bf63000000 + VPADDQ 15(DX), X22, K4, X26 // 6261cd04d4920f000000 + VPADDQ Y19, Y0, K1, Y9 // 6231fd29d4cb + VPADDQ 17(SP), Y0, K1, Y9 // 6271fd29d48c2411000000 + VPADDQ -17(BP)(SI*4), Y0, K1, Y9 // 6271fd29d48cb5efffffff + VPADDQ Z6, Z2, K3, Z5 // 62f1ed4bd4ee + VPADDQ Z14, Z2, K3, Z5 // 62d1ed4bd4ee + VPADDQ 99(R15)(R15*2), Z2, K3, Z5 // 6291ed4bd4ac7f63000000 + VPADDQ -7(DI), Z2, K3, Z5 // 62f1ed4bd4aff9ffffff + VPADDQ Z6, Z2, K3, Z23 // 62e1ed4bd4fe + VPADDQ Z14, Z2, K3, Z23 // 62c1ed4bd4fe + VPADDQ 99(R15)(R15*2), Z2, K3, Z23 // 6281ed4bd4bc7f63000000 + VPADDQ -7(DI), Z2, K3, Z23 // 62e1ed4bd4bff9ffffff + VPANDD X1, X8, K3, X7 // 62f13d0bdbf9 + VPANDD 15(R8), X8, K3, X7 // 62d13d0bdbb80f000000 + VPANDD (BP), X8, K3, X7 // 62f13d0bdb7d00 + VPANDD Y13, Y2, K2, Y14 // 62516d2adbf5 + VPANDD -7(CX), Y2, K2, Y14 // 62716d2adbb1f9ffffff + VPANDD 15(DX)(BX*4), Y2, K2, Y14 // 62716d2adbb49a0f000000 + VPANDD Z6, Z9, K1, Z12 // 62713549dbe6 + VPANDD Z25, Z9, K1, Z12 // 62113549dbe1 + VPANDD -15(R14)(R15*1), Z9, K1, Z12 // 62113549dba43ef1ffffff + VPANDD -15(BX), Z9, K1, Z12 // 62713549dba3f1ffffff + VPANDD Z6, Z12, K1, Z12 // 62711d49dbe6 + VPANDD Z25, Z12, K1, Z12 // 62111d49dbe1 + VPANDD -15(R14)(R15*1), Z12, K1, Z12 // 62111d49dba43ef1ffffff + VPANDD -15(BX), Z12, K1, Z12 // 62711d49dba3f1ffffff + VPANDD Z6, Z9, K1, Z17 // 62e13549dbce + VPANDD Z25, Z9, K1, Z17 // 62813549dbc9 + VPANDD -15(R14)(R15*1), Z9, K1, Z17 // 62813549db8c3ef1ffffff + VPANDD -15(BX), Z9, K1, Z17 // 62e13549db8bf1ffffff + VPANDD Z6, Z12, K1, Z17 // 62e11d49dbce + VPANDD Z25, Z12, K1, Z17 // 62811d49dbc9 + VPANDD -15(R14)(R15*1), Z12, K1, Z17 // 62811d49db8c3ef1ffffff + VPANDD -15(BX), Z12, K1, Z17 // 62e11d49db8bf1ffffff + VPANDND X0, X15, K2, X0 // 62f1050adfc0 + VPANDND 15(R8)(R14*8), X15, K2, X0 // 6291050adf84f00f000000 + VPANDND -15(R14)(R15*2), X15, K2, X0 // 6291050adf847ef1ffffff + VPANDND Y22, Y15, K1, Y27 // 62210529dfde + VPANDND 99(R15)(R15*8), Y15, K1, Y27 // 62010529df9cff63000000 + VPANDND 7(AX)(CX*8), Y15, K1, Y27 // 62610529df9cc807000000 + VPANDND Z3, Z8, K7, Z3 // 62f13d4fdfdb + VPANDND Z27, Z8, K7, Z3 // 62913d4fdfdb + VPANDND 7(AX)(CX*4), Z8, K7, Z3 // 62f13d4fdf9c8807000000 + VPANDND 7(AX)(CX*1), Z8, K7, Z3 // 62f13d4fdf9c0807000000 + VPANDND Z3, Z2, K7, Z3 // 62f16d4fdfdb + VPANDND Z27, Z2, K7, Z3 // 62916d4fdfdb + VPANDND 7(AX)(CX*4), Z2, K7, Z3 // 62f16d4fdf9c8807000000 + VPANDND 7(AX)(CX*1), Z2, K7, Z3 // 62f16d4fdf9c0807000000 + VPANDND Z3, Z8, K7, Z21 // 62e13d4fdfeb + VPANDND Z27, Z8, K7, Z21 // 62813d4fdfeb + VPANDND 7(AX)(CX*4), Z8, K7, Z21 // 62e13d4fdfac8807000000 + VPANDND 7(AX)(CX*1), Z8, K7, Z21 // 62e13d4fdfac0807000000 + VPANDND Z3, Z2, K7, Z21 // 62e16d4fdfeb + VPANDND Z27, Z2, K7, Z21 // 62816d4fdfeb + VPANDND 7(AX)(CX*4), Z2, K7, Z21 // 62e16d4fdfac8807000000 + VPANDND 7(AX)(CX*1), Z2, K7, Z21 // 62e16d4fdfac0807000000 + VPANDNQ X0, X21, K1, X16 // 62e1d501dfc0 + VPANDNQ -15(R14)(R15*1), X21, K1, X16 // 6281d501df843ef1ffffff + VPANDNQ -15(BX), X21, K1, X16 // 62e1d501df83f1ffffff + VPANDNQ Y24, Y18, K1, Y20 // 6281ed21dfe0 + VPANDNQ (AX), Y18, K1, Y20 // 62e1ed21df20 + VPANDNQ 7(SI), Y18, K1, Y20 // 62e1ed21dfa607000000 + VPANDNQ Z20, Z0, K1, Z7 // 62b1fd49dffc + VPANDNQ Z28, Z0, K1, Z7 // 6291fd49dffc + VPANDNQ (SI), Z0, K1, Z7 // 62f1fd49df3e + VPANDNQ 7(SI)(DI*2), Z0, K1, Z7 // 62f1fd49dfbc7e07000000 + VPANDNQ Z20, Z6, K1, Z7 // 62b1cd49dffc + VPANDNQ Z28, Z6, K1, Z7 // 6291cd49dffc + VPANDNQ (SI), Z6, K1, Z7 // 62f1cd49df3e + VPANDNQ 7(SI)(DI*2), Z6, K1, Z7 // 62f1cd49dfbc7e07000000 + VPANDNQ Z20, Z0, K1, Z9 // 6231fd49dfcc + VPANDNQ Z28, Z0, K1, Z9 // 6211fd49dfcc + VPANDNQ (SI), Z0, K1, Z9 // 6271fd49df0e + VPANDNQ 7(SI)(DI*2), Z0, K1, Z9 // 6271fd49df8c7e07000000 + VPANDNQ Z20, Z6, K1, Z9 // 6231cd49dfcc + VPANDNQ Z28, Z6, K1, Z9 // 6211cd49dfcc + VPANDNQ (SI), Z6, K1, Z9 // 6271cd49df0e + VPANDNQ 7(SI)(DI*2), Z6, K1, Z9 // 6271cd49df8c7e07000000 + VPANDQ X7, X22, K7, X28 // 6261cd07dbe7 + VPANDQ 7(AX)(CX*4), X22, K7, X28 // 6261cd07dba48807000000 + VPANDQ 7(AX)(CX*1), X22, K7, X28 // 6261cd07dba40807000000 + VPANDQ Y19, Y3, K2, Y9 // 6231e52adbcb + VPANDQ (BX), Y3, K2, Y9 // 6271e52adb0b + VPANDQ -17(BP)(SI*1), Y3, K2, Y9 // 6271e52adb8c35efffffff + VPANDQ Z12, Z9, K4, Z3 // 62d1b54cdbdc + VPANDQ Z22, Z9, K4, Z3 // 62b1b54cdbde + VPANDQ 17(SP)(BP*8), Z9, K4, Z3 // 62f1b54cdb9cec11000000 + VPANDQ 17(SP)(BP*4), Z9, K4, Z3 // 62f1b54cdb9cac11000000 + VPANDQ Z12, Z19, K4, Z3 // 62d1e544dbdc + VPANDQ Z22, Z19, K4, Z3 // 62b1e544dbde + VPANDQ 17(SP)(BP*8), Z19, K4, Z3 // 62f1e544db9cec11000000 + VPANDQ 17(SP)(BP*4), Z19, K4, Z3 // 62f1e544db9cac11000000 + VPANDQ Z12, Z9, K4, Z30 // 6241b54cdbf4 + VPANDQ Z22, Z9, K4, Z30 // 6221b54cdbf6 + VPANDQ 17(SP)(BP*8), Z9, K4, Z30 // 6261b54cdbb4ec11000000 + VPANDQ 17(SP)(BP*4), Z9, K4, Z30 // 6261b54cdbb4ac11000000 + VPANDQ Z12, Z19, K4, Z30 // 6241e544dbf4 + VPANDQ Z22, Z19, K4, Z30 // 6221e544dbf6 + VPANDQ 17(SP)(BP*8), Z19, K4, Z30 // 6261e544dbb4ec11000000 + VPANDQ 17(SP)(BP*4), Z19, K4, Z30 // 6261e544dbb4ac11000000 + VPBLENDMD X14, X12, K4, X0 // 62d21d0c64c6 + VPBLENDMD 17(SP), X12, K4, X0 // 62f21d0c64842411000000 + VPBLENDMD -17(BP)(SI*4), X12, K4, X0 // 62f21d0c6484b5efffffff + VPBLENDMD Y6, Y31, K4, Y6 // 62f2052464f6 + VPBLENDMD -17(BP)(SI*2), Y31, K4, Y6 // 62f2052464b475efffffff + VPBLENDMD 7(AX)(CX*2), Y31, K4, Y6 // 62f2052464b44807000000 + VPBLENDMD Z20, Z2, K7, Z22 // 62a26d4f64f4 + VPBLENDMD Z9, Z2, K7, Z22 // 62c26d4f64f1 + VPBLENDMD 99(R15)(R15*1), Z2, K7, Z22 // 62826d4f64b43f63000000 + VPBLENDMD (DX), Z2, K7, Z22 // 62e26d4f6432 + VPBLENDMD Z20, Z31, K7, Z22 // 62a2054764f4 + VPBLENDMD Z9, Z31, K7, Z22 // 62c2054764f1 + VPBLENDMD 99(R15)(R15*1), Z31, K7, Z22 // 6282054764b43f63000000 + VPBLENDMD (DX), Z31, K7, Z22 // 62e205476432 + VPBLENDMD Z20, Z2, K7, Z7 // 62b26d4f64fc + VPBLENDMD Z9, Z2, K7, Z7 // 62d26d4f64f9 + VPBLENDMD 99(R15)(R15*1), Z2, K7, Z7 // 62926d4f64bc3f63000000 + VPBLENDMD (DX), Z2, K7, Z7 // 62f26d4f643a + VPBLENDMD Z20, Z31, K7, Z7 // 62b2054764fc + VPBLENDMD Z9, Z31, K7, Z7 // 62d2054764f9 + VPBLENDMD 99(R15)(R15*1), Z31, K7, Z7 // 6292054764bc3f63000000 + VPBLENDMD (DX), Z31, K7, Z7 // 62f20547643a + VPBLENDMQ X15, X17, K2, X5 // 62d2f50264ef + VPBLENDMQ 7(AX), X17, K2, X5 // 62f2f50264a807000000 + VPBLENDMQ (DI), X17, K2, X5 // 62f2f502642f + VPBLENDMQ Y7, Y19, K5, Y11 // 6272e52564df + VPBLENDMQ 15(R8)(R14*1), Y19, K5, Y11 // 6212e525649c300f000000 + VPBLENDMQ 15(R8)(R14*2), Y19, K5, Y11 // 6212e525649c700f000000 + VPBLENDMQ Z28, Z12, K3, Z1 // 62929d4b64cc + VPBLENDMQ Z13, Z12, K3, Z1 // 62d29d4b64cd + VPBLENDMQ -17(BP)(SI*8), Z12, K3, Z1 // 62f29d4b648cf5efffffff + VPBLENDMQ (R15), Z12, K3, Z1 // 62d29d4b640f + VPBLENDMQ Z28, Z16, K3, Z1 // 6292fd4364cc + VPBLENDMQ Z13, Z16, K3, Z1 // 62d2fd4364cd + VPBLENDMQ -17(BP)(SI*8), Z16, K3, Z1 // 62f2fd43648cf5efffffff + VPBLENDMQ (R15), Z16, K3, Z1 // 62d2fd43640f + VPBLENDMQ Z28, Z12, K3, Z3 // 62929d4b64dc + VPBLENDMQ Z13, Z12, K3, Z3 // 62d29d4b64dd + VPBLENDMQ -17(BP)(SI*8), Z12, K3, Z3 // 62f29d4b649cf5efffffff + VPBLENDMQ (R15), Z12, K3, Z3 // 62d29d4b641f + VPBLENDMQ Z28, Z16, K3, Z3 // 6292fd4364dc + VPBLENDMQ Z13, Z16, K3, Z3 // 62d2fd4364dd + VPBLENDMQ -17(BP)(SI*8), Z16, K3, Z3 // 62f2fd43649cf5efffffff + VPBLENDMQ (R15), Z16, K3, Z3 // 62d2fd43641f + VPBROADCASTD SP, K1, X15 // 62727d097cfc + VPBROADCASTD R14, K1, X15 // 62527d097cfe + VPBROADCASTD AX, K7, Y12 // 62727d2f7ce0 + VPBROADCASTD R9, K7, Y12 // 62527d2f7ce1 + VPBROADCASTD CX, K1, Z3 // 62f27d497cd9 + VPBROADCASTD SP, K1, Z3 // 62f27d497cdc + VPBROADCASTD CX, K1, Z5 // 62f27d497ce9 + VPBROADCASTD SP, K1, Z5 // 62f27d497cec + VPBROADCASTD X18, K1, X26 // 62227d0958d2 + VPBROADCASTD (R14), K1, X26 // 62427d095816 + VPBROADCASTD -7(DI)(R8*8), K1, X26 // 62227d095894c7f9ffffff + VPBROADCASTD X21, K1, Y3 // 62b27d2958dd + VPBROADCASTD 99(R15)(R15*4), K1, Y3 // 62927d29589cbf63000000 + VPBROADCASTD 15(DX), K1, Y3 // 62f27d29589a0f000000 + VPBROADCASTD X1, K7, Z14 // 62727d4f58f1 + VPBROADCASTD (CX), K7, Z14 // 62727d4f5831 + VPBROADCASTD 99(R15), K7, Z14 // 62527d4f58b763000000 + VPBROADCASTD X1, K7, Z15 // 62727d4f58f9 + VPBROADCASTD (CX), K7, Z15 // 62727d4f5839 + VPBROADCASTD 99(R15), K7, Z15 // 62527d4f58bf63000000 + VPBROADCASTQ R9, K2, X3 // 62d2fd0a7cd9 + VPBROADCASTQ R13, K2, X3 // 62d2fd0a7cdd + VPBROADCASTQ DX, K4, Y7 // 62f2fd2c7cfa + VPBROADCASTQ BP, K4, Y7 // 62f2fd2c7cfd + VPBROADCASTQ R10, K1, Z20 // 62c2fd497ce2 + VPBROADCASTQ CX, K1, Z20 // 62e2fd497ce1 + VPBROADCASTQ R10, K1, Z0 // 62d2fd497cc2 + VPBROADCASTQ CX, K1, Z0 // 62f2fd497cc1 + VPBROADCASTQ X0, K3, X7 // 62f2fd0b59f8 + VPBROADCASTQ 17(SP)(BP*2), K3, X7 // 62f2fd0b59bc6c11000000 + VPBROADCASTQ -7(DI)(R8*4), K3, X7 // 62b2fd0b59bc87f9ffffff + VPBROADCASTQ X0, K4, Y0 // 62f2fd2c59c0 + VPBROADCASTQ 15(R8), K4, Y0 // 62d2fd2c59800f000000 + VPBROADCASTQ (BP), K4, Y0 // 62f2fd2c594500 + VPBROADCASTQ X24, K5, Z23 // 6282fd4d59f8 + VPBROADCASTQ 15(R8)(R14*8), K5, Z23 // 6282fd4d59bcf00f000000 + VPBROADCASTQ -15(R14)(R15*2), K5, Z23 // 6282fd4d59bc7ef1ffffff + VPBROADCASTQ X24, K5, Z19 // 6282fd4d59d8 + VPBROADCASTQ 15(R8)(R14*8), K5, Z19 // 6282fd4d599cf00f000000 + VPBROADCASTQ -15(R14)(R15*2), K5, Z19 // 6282fd4d599c7ef1ffffff + VPCMPD $64, X13, X11, K5, K6 // 62d3250d1ff540 + VPCMPD $64, 7(SI)(DI*1), X11, K5, K6 // 62f3250d1fb43e0700000040 + VPCMPD $64, 15(DX)(BX*8), X11, K5, K6 // 62f3250d1fb4da0f00000040 + VPCMPD $64, X13, X11, K5, K7 // 62d3250d1ffd40 + VPCMPD $64, 7(SI)(DI*1), X11, K5, K7 // 62f3250d1fbc3e0700000040 + VPCMPD $64, 15(DX)(BX*8), X11, K5, K7 // 62f3250d1fbcda0f00000040 + VPCMPD $27, Y31, Y9, K3, K6 // 6293352b1ff71b + VPCMPD $27, 99(R15)(R15*2), Y9, K3, K6 // 6293352b1fb47f630000001b + VPCMPD $27, -7(DI), Y9, K3, K6 // 62f3352b1fb7f9ffffff1b + VPCMPD $27, Y31, Y9, K3, K4 // 6293352b1fe71b + VPCMPD $27, 99(R15)(R15*2), Y9, K3, K4 // 6293352b1fa47f630000001b + VPCMPD $27, -7(DI), Y9, K3, K4 // 62f3352b1fa7f9ffffff1b + VPCMPD $47, Z17, Z20, K4, K4 // 62b35d441fe12f + VPCMPD $47, Z0, Z20, K4, K4 // 62f35d441fe02f + VPCMPD $47, -7(CX), Z20, K4, K4 // 62f35d441fa1f9ffffff2f + VPCMPD $47, 15(DX)(BX*4), Z20, K4, K4 // 62f35d441fa49a0f0000002f + VPCMPD $47, Z17, Z0, K4, K4 // 62b37d4c1fe12f + VPCMPD $47, Z0, Z0, K4, K4 // 62f37d4c1fe02f + VPCMPD $47, -7(CX), Z0, K4, K4 // 62f37d4c1fa1f9ffffff2f + VPCMPD $47, 15(DX)(BX*4), Z0, K4, K4 // 62f37d4c1fa49a0f0000002f + VPCMPD $47, Z17, Z20, K4, K6 // 62b35d441ff12f + VPCMPD $47, Z0, Z20, K4, K6 // 62f35d441ff02f + VPCMPD $47, -7(CX), Z20, K4, K6 // 62f35d441fb1f9ffffff2f + VPCMPD $47, 15(DX)(BX*4), Z20, K4, K6 // 62f35d441fb49a0f0000002f + VPCMPD $47, Z17, Z0, K4, K6 // 62b37d4c1ff12f + VPCMPD $47, Z0, Z0, K4, K6 // 62f37d4c1ff02f + VPCMPD $47, -7(CX), Z0, K4, K6 // 62f37d4c1fb1f9ffffff2f + VPCMPD $47, 15(DX)(BX*4), Z0, K4, K6 // 62f37d4c1fb49a0f0000002f + VPCMPEQD X14, X16, K3, K6 // 62d17d0376f6 + VPCMPEQD -7(CX), X16, K3, K6 // 62f17d0376b1f9ffffff + VPCMPEQD 15(DX)(BX*4), X16, K3, K6 // 62f17d0376b49a0f000000 + VPCMPEQD X14, X16, K3, K5 // 62d17d0376ee + VPCMPEQD -7(CX), X16, K3, K5 // 62f17d0376a9f9ffffff + VPCMPEQD 15(DX)(BX*4), X16, K3, K5 // 62f17d0376ac9a0f000000 + VPCMPEQD Y13, Y28, K3, K1 // 62d11d2376cd + VPCMPEQD 15(DX)(BX*1), Y28, K3, K1 // 62f11d23768c1a0f000000 + VPCMPEQD -7(CX)(DX*2), Y28, K3, K1 // 62f11d23768c51f9ffffff + VPCMPEQD Y13, Y28, K3, K5 // 62d11d2376ed + VPCMPEQD 15(DX)(BX*1), Y28, K3, K5 // 62f11d2376ac1a0f000000 + VPCMPEQD -7(CX)(DX*2), Y28, K3, K5 // 62f11d2376ac51f9ffffff + VPCMPEQD Z6, Z21, K2, K3 // 62f1554276de + VPCMPEQD Z9, Z21, K2, K3 // 62d1554276d9 + VPCMPEQD (AX), Z21, K2, K3 // 62f155427618 + VPCMPEQD 7(SI), Z21, K2, K3 // 62f15542769e07000000 + VPCMPEQD Z6, Z9, K2, K3 // 62f1354a76de + VPCMPEQD Z9, Z9, K2, K3 // 62d1354a76d9 + VPCMPEQD (AX), Z9, K2, K3 // 62f1354a7618 + VPCMPEQD 7(SI), Z9, K2, K3 // 62f1354a769e07000000 + VPCMPEQD Z6, Z21, K2, K1 // 62f1554276ce + VPCMPEQD Z9, Z21, K2, K1 // 62d1554276c9 + VPCMPEQD (AX), Z21, K2, K1 // 62f155427608 + VPCMPEQD 7(SI), Z21, K2, K1 // 62f15542768e07000000 + VPCMPEQD Z6, Z9, K2, K1 // 62f1354a76ce + VPCMPEQD Z9, Z9, K2, K1 // 62d1354a76c9 + VPCMPEQD (AX), Z9, K2, K1 // 62f1354a7608 + VPCMPEQD 7(SI), Z9, K2, K1 // 62f1354a768e07000000 + VPCMPEQQ X14, X11, K1, K5 // 62d2a50929ee + VPCMPEQQ 99(R15)(R15*8), X11, K1, K5 // 6292a50929acff63000000 + VPCMPEQQ 7(AX)(CX*8), X11, K1, K5 // 62f2a50929acc807000000 + VPCMPEQQ X14, X11, K1, K4 // 62d2a50929e6 + VPCMPEQQ 99(R15)(R15*8), X11, K1, K4 // 6292a50929a4ff63000000 + VPCMPEQQ 7(AX)(CX*8), X11, K1, K4 // 62f2a50929a4c807000000 + VPCMPEQQ Y2, Y7, K2, K7 // 62f2c52a29fa + VPCMPEQQ -17(BP), Y7, K2, K7 // 62f2c52a29bdefffffff + VPCMPEQQ -15(R14)(R15*8), Y7, K2, K7 // 6292c52a29bcfef1ffffff + VPCMPEQQ Y2, Y7, K2, K6 // 62f2c52a29f2 + VPCMPEQQ -17(BP), Y7, K2, K6 // 62f2c52a29b5efffffff + VPCMPEQQ -15(R14)(R15*8), Y7, K2, K6 // 6292c52a29b4fef1ffffff + VPCMPEQQ Z20, Z1, K1, K4 // 62b2f54929e4 + VPCMPEQQ Z9, Z1, K1, K4 // 62d2f54929e1 + VPCMPEQQ (BX), Z1, K1, K4 // 62f2f5492923 + VPCMPEQQ -17(BP)(SI*1), Z1, K1, K4 // 62f2f54929a435efffffff + VPCMPEQQ Z20, Z9, K1, K4 // 62b2b54929e4 + VPCMPEQQ Z9, Z9, K1, K4 // 62d2b54929e1 + VPCMPEQQ (BX), Z9, K1, K4 // 62f2b5492923 + VPCMPEQQ -17(BP)(SI*1), Z9, K1, K4 // 62f2b54929a435efffffff + VPCMPEQQ Z20, Z1, K1, K6 // 62b2f54929f4 + VPCMPEQQ Z9, Z1, K1, K6 // 62d2f54929f1 + VPCMPEQQ (BX), Z1, K1, K6 // 62f2f5492933 + VPCMPEQQ -17(BP)(SI*1), Z1, K1, K6 // 62f2f54929b435efffffff + VPCMPEQQ Z20, Z9, K1, K6 // 62b2b54929f4 + VPCMPEQQ Z9, Z9, K1, K6 // 62d2b54929f1 + VPCMPEQQ (BX), Z9, K1, K6 // 62f2b5492933 + VPCMPEQQ -17(BP)(SI*1), Z9, K1, K6 // 62f2b54929b435efffffff + VPCMPGTD X12, X23, K4, K4 // 62d1450466e4 + VPCMPGTD 15(R8)(R14*4), X23, K4, K4 // 6291450466a4b00f000000 + VPCMPGTD -7(CX)(DX*4), X23, K4, K4 // 62f1450466a491f9ffffff + VPCMPGTD X12, X23, K4, K6 // 62d1450466f4 + VPCMPGTD 15(R8)(R14*4), X23, K4, K6 // 6291450466b4b00f000000 + VPCMPGTD -7(CX)(DX*4), X23, K4, K6 // 62f1450466b491f9ffffff + VPCMPGTD Y3, Y9, K1, K4 // 62f1352966e3 + VPCMPGTD 15(R8)(R14*8), Y9, K1, K4 // 6291352966a4f00f000000 + VPCMPGTD -15(R14)(R15*2), Y9, K1, K4 // 6291352966a47ef1ffffff + VPCMPGTD Y3, Y9, K1, K5 // 62f1352966eb + VPCMPGTD 15(R8)(R14*8), Y9, K1, K5 // 6291352966acf00f000000 + VPCMPGTD -15(R14)(R15*2), Y9, K1, K5 // 6291352966ac7ef1ffffff + VPCMPGTD Z12, Z14, K3, K2 // 62d10d4b66d4 + VPCMPGTD Z13, Z14, K3, K2 // 62d10d4b66d5 + VPCMPGTD 17(SP)(BP*1), Z14, K3, K2 // 62f10d4b66942c11000000 + VPCMPGTD -7(CX)(DX*8), Z14, K3, K2 // 62f10d4b6694d1f9ffffff + VPCMPGTD Z12, Z13, K3, K2 // 62d1154b66d4 + VPCMPGTD Z13, Z13, K3, K2 // 62d1154b66d5 + VPCMPGTD 17(SP)(BP*1), Z13, K3, K2 // 62f1154b66942c11000000 + VPCMPGTD -7(CX)(DX*8), Z13, K3, K2 // 62f1154b6694d1f9ffffff + VPCMPGTD Z12, Z14, K3, K7 // 62d10d4b66fc + VPCMPGTD Z13, Z14, K3, K7 // 62d10d4b66fd + VPCMPGTD 17(SP)(BP*1), Z14, K3, K7 // 62f10d4b66bc2c11000000 + VPCMPGTD -7(CX)(DX*8), Z14, K3, K7 // 62f10d4b66bcd1f9ffffff + VPCMPGTD Z12, Z13, K3, K7 // 62d1154b66fc + VPCMPGTD Z13, Z13, K3, K7 // 62d1154b66fd + VPCMPGTD 17(SP)(BP*1), Z13, K3, K7 // 62f1154b66bc2c11000000 + VPCMPGTD -7(CX)(DX*8), Z13, K3, K7 // 62f1154b66bcd1f9ffffff + VPCMPGTQ X23, X16, K4, K0 // 62b2fd0437c7 + VPCMPGTQ (R8), X16, K4, K0 // 62d2fd043700 + VPCMPGTQ 15(DX)(BX*2), X16, K4, K0 // 62f2fd0437845a0f000000 + VPCMPGTQ X23, X16, K4, K5 // 62b2fd0437ef + VPCMPGTQ (R8), X16, K4, K5 // 62d2fd043728 + VPCMPGTQ 15(DX)(BX*2), X16, K4, K5 // 62f2fd0437ac5a0f000000 + VPCMPGTQ Y9, Y2, K5, K6 // 62d2ed2d37f1 + VPCMPGTQ -15(R14)(R15*1), Y2, K5, K6 // 6292ed2d37b43ef1ffffff + VPCMPGTQ -15(BX), Y2, K5, K6 // 62f2ed2d37b3f1ffffff + VPCMPGTQ Y9, Y2, K5, K5 // 62d2ed2d37e9 + VPCMPGTQ -15(R14)(R15*1), Y2, K5, K5 // 6292ed2d37ac3ef1ffffff + VPCMPGTQ -15(BX), Y2, K5, K5 // 62f2ed2d37abf1ffffff + VPCMPGTQ Z2, Z21, K7, K1 // 62f2d54737ca + VPCMPGTQ Z7, Z21, K7, K1 // 62f2d54737cf + VPCMPGTQ -17(BP)(SI*2), Z21, K7, K1 // 62f2d547378c75efffffff + VPCMPGTQ 7(AX)(CX*2), Z21, K7, K1 // 62f2d547378c4807000000 + VPCMPGTQ Z2, Z9, K7, K1 // 62f2b54f37ca + VPCMPGTQ Z7, Z9, K7, K1 // 62f2b54f37cf + VPCMPGTQ -17(BP)(SI*2), Z9, K7, K1 // 62f2b54f378c75efffffff + VPCMPGTQ 7(AX)(CX*2), Z9, K7, K1 // 62f2b54f378c4807000000 + VPCMPGTQ Z2, Z21, K7, K5 // 62f2d54737ea + VPCMPGTQ Z7, Z21, K7, K5 // 62f2d54737ef + VPCMPGTQ -17(BP)(SI*2), Z21, K7, K5 // 62f2d54737ac75efffffff + VPCMPGTQ 7(AX)(CX*2), Z21, K7, K5 // 62f2d54737ac4807000000 + VPCMPGTQ Z2, Z9, K7, K5 // 62f2b54f37ea + VPCMPGTQ Z7, Z9, K7, K5 // 62f2b54f37ef + VPCMPGTQ -17(BP)(SI*2), Z9, K7, K5 // 62f2b54f37ac75efffffff + VPCMPGTQ 7(AX)(CX*2), Z9, K7, K5 // 62f2b54f37ac4807000000 + VPCMPQ $82, X24, X31, K7, K4 // 629385071fe052 + VPCMPQ $82, -17(BP)(SI*2), X31, K7, K4 // 62f385071fa475efffffff52 + VPCMPQ $82, 7(AX)(CX*2), X31, K7, K4 // 62f385071fa4480700000052 + VPCMPQ $82, X24, X31, K7, K6 // 629385071ff052 + VPCMPQ $82, -17(BP)(SI*2), X31, K7, K6 // 62f385071fb475efffffff52 + VPCMPQ $82, 7(AX)(CX*2), X31, K7, K6 // 62f385071fb4480700000052 + VPCMPQ $126, Y30, Y14, K4, K0 // 62938d2c1fc67e + VPCMPQ $126, (SI), Y14, K4, K0 // 62f38d2c1f067e + VPCMPQ $126, 7(SI)(DI*2), Y14, K4, K0 // 62f38d2c1f847e070000007e + VPCMPQ $126, Y30, Y14, K4, K7 // 62938d2c1ffe7e + VPCMPQ $126, (SI), Y14, K4, K7 // 62f38d2c1f3e7e + VPCMPQ $126, 7(SI)(DI*2), Y14, K4, K7 // 62f38d2c1fbc7e070000007e + VPCMPQ $94, Z3, Z27, K4, K5 // 62f3a5441feb5e + VPCMPQ $94, Z0, Z27, K4, K5 // 62f3a5441fe85e + VPCMPQ $94, (R14), Z27, K4, K5 // 62d3a5441f2e5e + VPCMPQ $94, -7(DI)(R8*8), Z27, K4, K5 // 62b3a5441facc7f9ffffff5e + VPCMPQ $94, Z3, Z14, K4, K5 // 62f38d4c1feb5e + VPCMPQ $94, Z0, Z14, K4, K5 // 62f38d4c1fe85e + VPCMPQ $94, (R14), Z14, K4, K5 // 62d38d4c1f2e5e + VPCMPQ $94, -7(DI)(R8*8), Z14, K4, K5 // 62b38d4c1facc7f9ffffff5e + VPCMPQ $94, Z3, Z27, K4, K4 // 62f3a5441fe35e + VPCMPQ $94, Z0, Z27, K4, K4 // 62f3a5441fe05e + VPCMPQ $94, (R14), Z27, K4, K4 // 62d3a5441f265e + VPCMPQ $94, -7(DI)(R8*8), Z27, K4, K4 // 62b3a5441fa4c7f9ffffff5e + VPCMPQ $94, Z3, Z14, K4, K4 // 62f38d4c1fe35e + VPCMPQ $94, Z0, Z14, K4, K4 // 62f38d4c1fe05e + VPCMPQ $94, (R14), Z14, K4, K4 // 62d38d4c1f265e + VPCMPQ $94, -7(DI)(R8*8), Z14, K4, K4 // 62b38d4c1fa4c7f9ffffff5e + VPCMPUD $67, X23, X11, K3, K6 // 62b3250b1ef743 + VPCMPUD $67, (R14), X11, K3, K6 // 62d3250b1e3643 + VPCMPUD $67, -7(DI)(R8*8), X11, K3, K6 // 62b3250b1eb4c7f9ffffff43 + VPCMPUD $67, X23, X11, K3, K4 // 62b3250b1ee743 + VPCMPUD $67, (R14), X11, K3, K4 // 62d3250b1e2643 + VPCMPUD $67, -7(DI)(R8*8), X11, K3, K4 // 62b3250b1ea4c7f9ffffff43 + VPCMPUD $127, Y1, Y16, K4, K4 // 62f37d241ee17f + VPCMPUD $127, 7(SI)(DI*4), Y16, K4, K4 // 62f37d241ea4be070000007f + VPCMPUD $127, -7(DI)(R8*2), Y16, K4, K4 // 62b37d241ea447f9ffffff7f + VPCMPUD $127, Y1, Y16, K4, K6 // 62f37d241ef17f + VPCMPUD $127, 7(SI)(DI*4), Y16, K4, K6 // 62f37d241eb4be070000007f + VPCMPUD $127, -7(DI)(R8*2), Y16, K4, K6 // 62b37d241eb447f9ffffff7f + VPCMPUD $0, Z1, Z22, K2, K4 // 62f34d421ee100 + VPCMPUD $0, Z16, Z22, K2, K4 // 62b34d421ee000 + VPCMPUD $0, (CX), Z22, K2, K4 // 62f34d421e2100 + VPCMPUD $0, 99(R15), Z22, K2, K4 // 62d34d421ea76300000000 + VPCMPUD $0, Z1, Z25, K2, K4 // 62f335421ee100 + VPCMPUD $0, Z16, Z25, K2, K4 // 62b335421ee000 + VPCMPUD $0, (CX), Z25, K2, K4 // 62f335421e2100 + VPCMPUD $0, 99(R15), Z25, K2, K4 // 62d335421ea76300000000 + VPCMPUD $0, Z1, Z22, K2, K5 // 62f34d421ee900 + VPCMPUD $0, Z16, Z22, K2, K5 // 62b34d421ee800 + VPCMPUD $0, (CX), Z22, K2, K5 // 62f34d421e2900 + VPCMPUD $0, 99(R15), Z22, K2, K5 // 62d34d421eaf6300000000 + VPCMPUD $0, Z1, Z25, K2, K5 // 62f335421ee900 + VPCMPUD $0, Z16, Z25, K2, K5 // 62b335421ee800 + VPCMPUD $0, (CX), Z25, K2, K5 // 62f335421e2900 + VPCMPUD $0, 99(R15), Z25, K2, K5 // 62d335421eaf6300000000 + VPCMPUQ $97, X20, X2, K2, K2 // 62b3ed0a1ed461 + VPCMPUQ $97, 99(R15)(R15*4), X2, K2, K2 // 6293ed0a1e94bf6300000061 + VPCMPUQ $97, 15(DX), X2, K2, K2 // 62f3ed0a1e920f00000061 + VPCMPUQ $97, X20, X2, K2, K7 // 62b3ed0a1efc61 + VPCMPUQ $97, 99(R15)(R15*4), X2, K2, K7 // 6293ed0a1ebcbf6300000061 + VPCMPUQ $97, 15(DX), X2, K2, K7 // 62f3ed0a1eba0f00000061 + VPCMPUQ $81, Y31, Y30, K3, K0 // 62938d231ec751 + VPCMPUQ $81, 17(SP), Y30, K3, K0 // 62f38d231e84241100000051 + VPCMPUQ $81, -17(BP)(SI*4), Y30, K3, K0 // 62f38d231e84b5efffffff51 + VPCMPUQ $81, Y31, Y30, K3, K5 // 62938d231eef51 + VPCMPUQ $81, 17(SP), Y30, K3, K5 // 62f38d231eac241100000051 + VPCMPUQ $81, -17(BP)(SI*4), Y30, K3, K5 // 62f38d231eacb5efffffff51 + VPCMPUQ $42, Z0, Z6, K3, K6 // 62f3cd4b1ef02a + VPCMPUQ $42, Z8, Z6, K3, K6 // 62d3cd4b1ef02a + VPCMPUQ $42, 99(R15)(R15*2), Z6, K3, K6 // 6293cd4b1eb47f630000002a + VPCMPUQ $42, -7(DI), Z6, K3, K6 // 62f3cd4b1eb7f9ffffff2a + VPCMPUQ $42, Z0, Z2, K3, K6 // 62f3ed4b1ef02a + VPCMPUQ $42, Z8, Z2, K3, K6 // 62d3ed4b1ef02a + VPCMPUQ $42, 99(R15)(R15*2), Z2, K3, K6 // 6293ed4b1eb47f630000002a + VPCMPUQ $42, -7(DI), Z2, K3, K6 // 62f3ed4b1eb7f9ffffff2a + VPCMPUQ $42, Z0, Z6, K3, K5 // 62f3cd4b1ee82a + VPCMPUQ $42, Z8, Z6, K3, K5 // 62d3cd4b1ee82a + VPCMPUQ $42, 99(R15)(R15*2), Z6, K3, K5 // 6293cd4b1eac7f630000002a + VPCMPUQ $42, -7(DI), Z6, K3, K5 // 62f3cd4b1eaff9ffffff2a + VPCMPUQ $42, Z0, Z2, K3, K5 // 62f3ed4b1ee82a + VPCMPUQ $42, Z8, Z2, K3, K5 // 62d3ed4b1ee82a + VPCMPUQ $42, 99(R15)(R15*2), Z2, K3, K5 // 6293ed4b1eac7f630000002a + VPCMPUQ $42, -7(DI), Z2, K3, K5 // 62f3ed4b1eaff9ffffff2a + VPCOMPRESSD X9, K7, X8 // 62527d0f8bc8 + VPCOMPRESSD X9, K7, 15(DX)(BX*1) // 62727d0f8b8c1a0f000000 + VPCOMPRESSD X9, K7, -7(CX)(DX*2) // 62727d0f8b8c51f9ffffff + VPCOMPRESSD Y14, K2, Y20 // 62327d2a8bf4 + VPCOMPRESSD Y14, K2, 7(SI)(DI*8) // 62727d2a8bb4fe07000000 + VPCOMPRESSD Y14, K2, -15(R14) // 62527d2a8bb6f1ffffff + VPCOMPRESSD Z26, K4, Z6 // 62627d4c8bd6 + VPCOMPRESSD Z14, K4, Z6 // 62727d4c8bf6 + VPCOMPRESSD Z26, K4, Z14 // 62427d4c8bd6 + VPCOMPRESSD Z14, K4, Z14 // 62527d4c8bf6 + VPCOMPRESSD Z26, K4, 17(SP)(BP*2) // 62627d4c8b946c11000000 + VPCOMPRESSD Z14, K4, 17(SP)(BP*2) // 62727d4c8bb46c11000000 + VPCOMPRESSD Z26, K4, -7(DI)(R8*4) // 62227d4c8b9487f9ffffff + VPCOMPRESSD Z14, K4, -7(DI)(R8*4) // 62327d4c8bb487f9ffffff + VPCOMPRESSQ X31, K1, X2 // 6262fd098bfa + VPCOMPRESSQ X31, K1, -17(BP) // 6262fd098bbdefffffff + VPCOMPRESSQ X31, K1, -15(R14)(R15*8) // 6202fd098bbcfef1ffffff + VPCOMPRESSQ Y13, K3, Y24 // 6212fd2b8be8 + VPCOMPRESSQ Y13, K3, 7(SI)(DI*1) // 6272fd2b8bac3e07000000 + VPCOMPRESSQ Y13, K3, 15(DX)(BX*8) // 6272fd2b8bacda0f000000 + VPCOMPRESSQ Z13, K4, Z28 // 6212fd4c8bec + VPCOMPRESSQ Z21, K4, Z28 // 6282fd4c8bec + VPCOMPRESSQ Z13, K4, Z6 // 6272fd4c8bee + VPCOMPRESSQ Z21, K4, Z6 // 62e2fd4c8bee + VPCOMPRESSQ Z13, K4, 15(R8) // 6252fd4c8ba80f000000 + VPCOMPRESSQ Z21, K4, 15(R8) // 62c2fd4c8ba80f000000 + VPCOMPRESSQ Z13, K4, (BP) // 6272fd4c8b6d00 + VPCOMPRESSQ Z21, K4, (BP) // 62e2fd4c8b6d00 + VPERMD Y11, Y8, K1, Y24 // 62423d2936c3 + VPERMD -17(BP)(SI*2), Y8, K1, Y24 // 62623d29368475efffffff + VPERMD 7(AX)(CX*2), Y8, K1, Y24 // 62623d2936844807000000 + VPERMD Z20, Z0, K1, Z7 // 62b27d4936fc + VPERMD Z28, Z0, K1, Z7 // 62927d4936fc + VPERMD 99(R15)(R15*1), Z0, K1, Z7 // 62927d4936bc3f63000000 + VPERMD (DX), Z0, K1, Z7 // 62f27d49363a + VPERMD Z20, Z6, K1, Z7 // 62b24d4936fc + VPERMD Z28, Z6, K1, Z7 // 62924d4936fc + VPERMD 99(R15)(R15*1), Z6, K1, Z7 // 62924d4936bc3f63000000 + VPERMD (DX), Z6, K1, Z7 // 62f24d49363a + VPERMD Z20, Z0, K1, Z9 // 62327d4936cc + VPERMD Z28, Z0, K1, Z9 // 62127d4936cc + VPERMD 99(R15)(R15*1), Z0, K1, Z9 // 62127d49368c3f63000000 + VPERMD (DX), Z0, K1, Z9 // 62727d49360a + VPERMD Z20, Z6, K1, Z9 // 62324d4936cc + VPERMD Z28, Z6, K1, Z9 // 62124d4936cc + VPERMD 99(R15)(R15*1), Z6, K1, Z9 // 62124d49368c3f63000000 + VPERMD (DX), Z6, K1, Z9 // 62724d49360a + VPERMI2D X1, X22, K1, X0 // 62f24d0176c1 + VPERMI2D 7(AX), X22, K1, X0 // 62f24d01768007000000 + VPERMI2D (DI), X22, K1, X0 // 62f24d017607 + VPERMI2D Y9, Y16, K3, Y21 // 62c27d2376e9 + VPERMI2D (R14), Y16, K3, Y21 // 62c27d23762e + VPERMI2D -7(DI)(R8*8), Y16, K3, Y21 // 62a27d2376acc7f9ffffff + VPERMI2D Z2, Z18, K4, Z11 // 62726d4476da + VPERMI2D Z21, Z18, K4, Z11 // 62326d4476dd + VPERMI2D 7(SI)(DI*8), Z18, K4, Z11 // 62726d44769cfe07000000 + VPERMI2D -15(R14), Z18, K4, Z11 // 62526d44769ef1ffffff + VPERMI2D Z2, Z24, K4, Z11 // 62723d4476da + VPERMI2D Z21, Z24, K4, Z11 // 62323d4476dd + VPERMI2D 7(SI)(DI*8), Z24, K4, Z11 // 62723d44769cfe07000000 + VPERMI2D -15(R14), Z24, K4, Z11 // 62523d44769ef1ffffff + VPERMI2D Z2, Z18, K4, Z5 // 62f26d4476ea + VPERMI2D Z21, Z18, K4, Z5 // 62b26d4476ed + VPERMI2D 7(SI)(DI*8), Z18, K4, Z5 // 62f26d4476acfe07000000 + VPERMI2D -15(R14), Z18, K4, Z5 // 62d26d4476aef1ffffff + VPERMI2D Z2, Z24, K4, Z5 // 62f23d4476ea + VPERMI2D Z21, Z24, K4, Z5 // 62b23d4476ed + VPERMI2D 7(SI)(DI*8), Z24, K4, Z5 // 62f23d4476acfe07000000 + VPERMI2D -15(R14), Z24, K4, Z5 // 62d23d4476aef1ffffff + VPERMI2PD X7, X6, K5, X11 // 6272cd0d77df + VPERMI2PD 99(R15)(R15*1), X6, K5, X11 // 6212cd0d779c3f63000000 + VPERMI2PD (DX), X6, K5, X11 // 6272cd0d771a + VPERMI2PD Y6, Y9, K7, Y13 // 6272b52f77ee + VPERMI2PD 99(R15)(R15*4), Y9, K7, Y13 // 6212b52f77acbf63000000 + VPERMI2PD 15(DX), Y9, K7, Y13 // 6272b52f77aa0f000000 + VPERMI2PD Z6, Z6, K7, Z7 // 62f2cd4f77fe + VPERMI2PD Z22, Z6, K7, Z7 // 62b2cd4f77fe + VPERMI2PD 7(SI)(DI*1), Z6, K7, Z7 // 62f2cd4f77bc3e07000000 + VPERMI2PD 15(DX)(BX*8), Z6, K7, Z7 // 62f2cd4f77bcda0f000000 + VPERMI2PD Z6, Z16, K7, Z7 // 62f2fd4777fe + VPERMI2PD Z22, Z16, K7, Z7 // 62b2fd4777fe + VPERMI2PD 7(SI)(DI*1), Z16, K7, Z7 // 62f2fd4777bc3e07000000 + VPERMI2PD 15(DX)(BX*8), Z16, K7, Z7 // 62f2fd4777bcda0f000000 + VPERMI2PD Z6, Z6, K7, Z13 // 6272cd4f77ee + VPERMI2PD Z22, Z6, K7, Z13 // 6232cd4f77ee + VPERMI2PD 7(SI)(DI*1), Z6, K7, Z13 // 6272cd4f77ac3e07000000 + VPERMI2PD 15(DX)(BX*8), Z6, K7, Z13 // 6272cd4f77acda0f000000 + VPERMI2PD Z6, Z16, K7, Z13 // 6272fd4777ee + VPERMI2PD Z22, Z16, K7, Z13 // 6232fd4777ee + VPERMI2PD 7(SI)(DI*1), Z16, K7, Z13 // 6272fd4777ac3e07000000 + VPERMI2PD 15(DX)(BX*8), Z16, K7, Z13 // 6272fd4777acda0f000000 + VPERMI2PS X3, X31, K6, X8 // 6272050677c3 + VPERMI2PS -17(BP)(SI*8), X31, K6, X8 // 627205067784f5efffffff + VPERMI2PS (R15), X31, K6, X8 // 625205067707 + VPERMI2PS Y6, Y7, K3, Y3 // 62f2452b77de + VPERMI2PS (CX), Y7, K3, Y3 // 62f2452b7719 + VPERMI2PS 99(R15), Y7, K3, Y3 // 62d2452b779f63000000 + VPERMI2PS Z18, Z13, K7, Z1 // 62b2154f77ca + VPERMI2PS Z8, Z13, K7, Z1 // 62d2154f77c8 + VPERMI2PS -7(DI)(R8*1), Z13, K7, Z1 // 62b2154f778c07f9ffffff + VPERMI2PS (SP), Z13, K7, Z1 // 62f2154f770c24 + VPERMI2PS Z18, Z13, K7, Z15 // 6232154f77fa + VPERMI2PS Z8, Z13, K7, Z15 // 6252154f77f8 + VPERMI2PS -7(DI)(R8*1), Z13, K7, Z15 // 6232154f77bc07f9ffffff + VPERMI2PS (SP), Z13, K7, Z15 // 6272154f773c24 + VPERMI2Q X24, X20, K4, X28 // 6202dd0476e0 + VPERMI2Q 7(SI)(DI*8), X20, K4, X28 // 6262dd0476a4fe07000000 + VPERMI2Q -15(R14), X20, K4, X28 // 6242dd0476a6f1ffffff + VPERMI2Q Y26, Y11, K4, Y26 // 6202a52c76d2 + VPERMI2Q 99(R15)(R15*2), Y11, K4, Y26 // 6202a52c76947f63000000 + VPERMI2Q -7(DI), Y11, K4, Y26 // 6262a52c7697f9ffffff + VPERMI2Q Z20, Z2, K7, Z22 // 62a2ed4f76f4 + VPERMI2Q Z9, Z2, K7, Z22 // 62c2ed4f76f1 + VPERMI2Q -7(CX), Z2, K7, Z22 // 62e2ed4f76b1f9ffffff + VPERMI2Q 15(DX)(BX*4), Z2, K7, Z22 // 62e2ed4f76b49a0f000000 + VPERMI2Q Z20, Z31, K7, Z22 // 62a2854776f4 + VPERMI2Q Z9, Z31, K7, Z22 // 62c2854776f1 + VPERMI2Q -7(CX), Z31, K7, Z22 // 62e2854776b1f9ffffff + VPERMI2Q 15(DX)(BX*4), Z31, K7, Z22 // 62e2854776b49a0f000000 + VPERMI2Q Z20, Z2, K7, Z7 // 62b2ed4f76fc + VPERMI2Q Z9, Z2, K7, Z7 // 62d2ed4f76f9 + VPERMI2Q -7(CX), Z2, K7, Z7 // 62f2ed4f76b9f9ffffff + VPERMI2Q 15(DX)(BX*4), Z2, K7, Z7 // 62f2ed4f76bc9a0f000000 + VPERMI2Q Z20, Z31, K7, Z7 // 62b2854776fc + VPERMI2Q Z9, Z31, K7, Z7 // 62d2854776f9 + VPERMI2Q -7(CX), Z31, K7, Z7 // 62f2854776b9f9ffffff + VPERMI2Q 15(DX)(BX*4), Z31, K7, Z7 // 62f2854776bc9a0f000000 + VPERMILPD $94, X6, K4, X12 // 6273fd0c05e65e + VPERMILPD $94, -7(DI)(R8*1), K4, X12 // 6233fd0c05a407f9ffffff5e + VPERMILPD $94, (SP), K4, X12 // 6273fd0c0524245e + VPERMILPD $121, Y18, K2, Y31 // 6223fd2a05fa79 + VPERMILPD $121, 15(DX)(BX*1), K2, Y31 // 6263fd2a05bc1a0f00000079 + VPERMILPD $121, -7(CX)(DX*2), K2, Y31 // 6263fd2a05bc51f9ffffff79 + VPERMILPD $13, Z3, K2, Z14 // 6273fd4a05f30d + VPERMILPD $13, Z12, K2, Z14 // 6253fd4a05f40d + VPERMILPD $13, (AX), K2, Z14 // 6273fd4a05300d + VPERMILPD $13, 7(SI), K2, Z14 // 6273fd4a05b6070000000d + VPERMILPD $13, Z3, K2, Z28 // 6263fd4a05e30d + VPERMILPD $13, Z12, K2, Z28 // 6243fd4a05e40d + VPERMILPD $13, (AX), K2, Z28 // 6263fd4a05200d + VPERMILPD $13, 7(SI), K2, Z28 // 6263fd4a05a6070000000d + VPERMILPD X6, X28, K3, X17 // 62e29d030dce + VPERMILPD -7(CX), X28, K3, X17 // 62e29d030d89f9ffffff + VPERMILPD 15(DX)(BX*4), X28, K3, X17 // 62e29d030d8c9a0f000000 + VPERMILPD Y2, Y24, K3, Y3 // 62f2bd230dda + VPERMILPD -17(BP), Y24, K3, Y3 // 62f2bd230d9defffffff + VPERMILPD -15(R14)(R15*8), Y24, K3, Y3 // 6292bd230d9cfef1ffffff + VPERMILPD Z5, Z19, K3, Z15 // 6272e5430dfd + VPERMILPD Z1, Z19, K3, Z15 // 6272e5430df9 + VPERMILPD (BX), Z19, K3, Z15 // 6272e5430d3b + VPERMILPD -17(BP)(SI*1), Z19, K3, Z15 // 6272e5430dbc35efffffff + VPERMILPD Z5, Z15, K3, Z15 // 6272854b0dfd + VPERMILPD Z1, Z15, K3, Z15 // 6272854b0df9 + VPERMILPD (BX), Z15, K3, Z15 // 6272854b0d3b + VPERMILPD -17(BP)(SI*1), Z15, K3, Z15 // 6272854b0dbc35efffffff + VPERMILPD Z5, Z19, K3, Z30 // 6262e5430df5 + VPERMILPD Z1, Z19, K3, Z30 // 6262e5430df1 + VPERMILPD (BX), Z19, K3, Z30 // 6262e5430d33 + VPERMILPD -17(BP)(SI*1), Z19, K3, Z30 // 6262e5430db435efffffff + VPERMILPD Z5, Z15, K3, Z30 // 6262854b0df5 + VPERMILPD Z1, Z15, K3, Z30 // 6262854b0df1 + VPERMILPD (BX), Z15, K3, Z30 // 6262854b0d33 + VPERMILPD -17(BP)(SI*1), Z15, K3, Z30 // 6262854b0db435efffffff + VPERMILPS $65, X8, K2, X1 // 62d37d0a04c841 + VPERMILPS $65, 99(R15)(R15*8), K2, X1 // 62937d0a048cff6300000041 + VPERMILPS $65, 7(AX)(CX*8), K2, X1 // 62f37d0a048cc80700000041 + VPERMILPS $67, Y21, K1, Y7 // 62b37d2904fd43 + VPERMILPS $67, 17(SP)(BP*2), K1, Y7 // 62f37d2904bc6c1100000043 + VPERMILPS $67, -7(DI)(R8*4), K1, Y7 // 62b37d2904bc87f9ffffff43 + VPERMILPS $127, Z14, K2, Z3 // 62d37d4a04de7f + VPERMILPS $127, Z15, K2, Z3 // 62d37d4a04df7f + VPERMILPS $127, 15(R8)(R14*4), K2, Z3 // 62937d4a049cb00f0000007f + VPERMILPS $127, -7(CX)(DX*4), K2, Z3 // 62f37d4a049c91f9ffffff7f + VPERMILPS $127, Z14, K2, Z5 // 62d37d4a04ee7f + VPERMILPS $127, Z15, K2, Z5 // 62d37d4a04ef7f + VPERMILPS $127, 15(R8)(R14*4), K2, Z5 // 62937d4a04acb00f0000007f + VPERMILPS $127, -7(CX)(DX*4), K2, Z5 // 62f37d4a04ac91f9ffffff7f + VPERMILPS X0, X6, K1, X8 // 62724d090cc0 + VPERMILPS (AX), X6, K1, X8 // 62724d090c00 + VPERMILPS 7(SI), X6, K1, X8 // 62724d090c8607000000 + VPERMILPS Y20, Y8, K7, Y14 // 62323d2f0cf4 + VPERMILPS 15(R8), Y8, K7, Y14 // 62523d2f0cb00f000000 + VPERMILPS (BP), Y8, K7, Y14 // 62723d2f0c7500 + VPERMILPS Z20, Z16, K1, Z21 // 62a27d410cec + VPERMILPS Z0, Z16, K1, Z21 // 62e27d410ce8 + VPERMILPS (R8), Z16, K1, Z21 // 62c27d410c28 + VPERMILPS 15(DX)(BX*2), Z16, K1, Z21 // 62e27d410cac5a0f000000 + VPERMILPS Z20, Z9, K1, Z21 // 62a235490cec + VPERMILPS Z0, Z9, K1, Z21 // 62e235490ce8 + VPERMILPS (R8), Z9, K1, Z21 // 62c235490c28 + VPERMILPS 15(DX)(BX*2), Z9, K1, Z21 // 62e235490cac5a0f000000 + VPERMILPS Z20, Z16, K1, Z8 // 62327d410cc4 + VPERMILPS Z0, Z16, K1, Z8 // 62727d410cc0 + VPERMILPS (R8), Z16, K1, Z8 // 62527d410c00 + VPERMILPS 15(DX)(BX*2), Z16, K1, Z8 // 62727d410c845a0f000000 + VPERMILPS Z20, Z9, K1, Z8 // 623235490cc4 + VPERMILPS Z0, Z9, K1, Z8 // 627235490cc0 + VPERMILPS (R8), Z9, K1, Z8 // 625235490c00 + VPERMILPS 15(DX)(BX*2), Z9, K1, Z8 // 627235490c845a0f000000 + VPERMPD $0, Y24, K1, Y11 // 6213fd2901d800 + VPERMPD $0, 15(R8)(R14*8), K1, Y11 // 6213fd29019cf00f00000000 + VPERMPD $0, -15(R14)(R15*2), K1, Y11 // 6213fd29019c7ef1ffffff00 + VPERMPD $97, Z0, K1, Z23 // 62e3fd4901f861 + VPERMPD $97, Z11, K1, Z23 // 62c3fd4901fb61 + VPERMPD $97, 17(SP)(BP*1), K1, Z23 // 62e3fd4901bc2c1100000061 + VPERMPD $97, -7(CX)(DX*8), K1, Z23 // 62e3fd4901bcd1f9ffffff61 + VPERMPD $97, Z0, K1, Z19 // 62e3fd4901d861 + VPERMPD $97, Z11, K1, Z19 // 62c3fd4901db61 + VPERMPD $97, 17(SP)(BP*1), K1, Z19 // 62e3fd49019c2c1100000061 + VPERMPD $97, -7(CX)(DX*8), K1, Z19 // 62e3fd49019cd1f9ffffff61 + VPERMPD Y18, Y5, K7, Y1 // 62b2d52f16ca + VPERMPD -15(R14)(R15*1), Y5, K7, Y1 // 6292d52f168c3ef1ffffff + VPERMPD -15(BX), Y5, K7, Y1 // 62f2d52f168bf1ffffff + VPERMPD Z0, Z24, K2, Z0 // 62f2bd4216c0 + VPERMPD Z26, Z24, K2, Z0 // 6292bd4216c2 + VPERMPD -17(BP)(SI*2), Z24, K2, Z0 // 62f2bd42168475efffffff + VPERMPD 7(AX)(CX*2), Z24, K2, Z0 // 62f2bd4216844807000000 + VPERMPD Z0, Z12, K2, Z0 // 62f29d4a16c0 + VPERMPD Z26, Z12, K2, Z0 // 62929d4a16c2 + VPERMPD -17(BP)(SI*2), Z12, K2, Z0 // 62f29d4a168475efffffff + VPERMPD 7(AX)(CX*2), Z12, K2, Z0 // 62f29d4a16844807000000 + VPERMPD Z0, Z24, K2, Z25 // 6262bd4216c8 + VPERMPD Z26, Z24, K2, Z25 // 6202bd4216ca + VPERMPD -17(BP)(SI*2), Z24, K2, Z25 // 6262bd42168c75efffffff + VPERMPD 7(AX)(CX*2), Z24, K2, Z25 // 6262bd42168c4807000000 + VPERMPD Z0, Z12, K2, Z25 // 62629d4a16c8 + VPERMPD Z26, Z12, K2, Z25 // 62029d4a16ca + VPERMPD -17(BP)(SI*2), Z12, K2, Z25 // 62629d4a168c75efffffff + VPERMPD 7(AX)(CX*2), Z12, K2, Z25 // 62629d4a168c4807000000 + VPERMPS Y9, Y20, K4, Y20 // 62c25d2416e1 + VPERMPS 7(AX)(CX*4), Y20, K4, Y20 // 62e25d2416a48807000000 + VPERMPS 7(AX)(CX*1), Y20, K4, Y20 // 62e25d2416a40807000000 + VPERMPS Z9, Z9, K1, Z9 // 6252354916c9 + VPERMPS Z28, Z9, K1, Z9 // 6212354916cc + VPERMPS 15(R8)(R14*1), Z9, K1, Z9 // 62123549168c300f000000 + VPERMPS 15(R8)(R14*2), Z9, K1, Z9 // 62123549168c700f000000 + VPERMPS Z9, Z25, K1, Z9 // 6252354116c9 + VPERMPS Z28, Z25, K1, Z9 // 6212354116cc + VPERMPS 15(R8)(R14*1), Z25, K1, Z9 // 62123541168c300f000000 + VPERMPS 15(R8)(R14*2), Z25, K1, Z9 // 62123541168c700f000000 + VPERMPS Z9, Z9, K1, Z3 // 62d2354916d9 + VPERMPS Z28, Z9, K1, Z3 // 6292354916dc + VPERMPS 15(R8)(R14*1), Z9, K1, Z3 // 62923549169c300f000000 + VPERMPS 15(R8)(R14*2), Z9, K1, Z3 // 62923549169c700f000000 + VPERMPS Z9, Z25, K1, Z3 // 62d2354116d9 + VPERMPS Z28, Z25, K1, Z3 // 6292354116dc + VPERMPS 15(R8)(R14*1), Z25, K1, Z3 // 62923541169c300f000000 + VPERMPS 15(R8)(R14*2), Z25, K1, Z3 // 62923541169c700f000000 + VPERMQ $81, Y28, K3, Y28 // 6203fd2b00e451 + VPERMQ $81, (SI), K3, Y28 // 6263fd2b002651 + VPERMQ $81, 7(SI)(DI*2), K3, Y28 // 6263fd2b00a47e0700000051 + VPERMQ $42, Z17, K4, Z20 // 62a3fd4c00e12a + VPERMQ $42, Z0, K4, Z20 // 62e3fd4c00e02a + VPERMQ $42, (R14), K4, Z20 // 62c3fd4c00262a + VPERMQ $42, -7(DI)(R8*8), K4, Z20 // 62a3fd4c00a4c7f9ffffff2a + VPERMQ $42, Z17, K4, Z0 // 62b3fd4c00c12a + VPERMQ $42, Z0, K4, Z0 // 62f3fd4c00c02a + VPERMQ $42, (R14), K4, Z0 // 62d3fd4c00062a + VPERMQ $42, -7(DI)(R8*8), K4, Z0 // 62b3fd4c0084c7f9ffffff2a + VPERMQ Y11, Y8, K5, Y1 // 62d2bd2d36cb + VPERMQ 17(SP)(BP*8), Y8, K5, Y1 // 62f2bd2d368cec11000000 + VPERMQ 17(SP)(BP*4), Y8, K5, Y1 // 62f2bd2d368cac11000000 + VPERMQ Z21, Z31, K7, Z17 // 62a2854736cd + VPERMQ Z9, Z31, K7, Z17 // 62c2854736c9 + VPERMQ 99(R15)(R15*4), Z31, K7, Z17 // 62828547368cbf63000000 + VPERMQ 15(DX), Z31, K7, Z17 // 62e28547368a0f000000 + VPERMQ Z21, Z0, K7, Z17 // 62a2fd4f36cd + VPERMQ Z9, Z0, K7, Z17 // 62c2fd4f36c9 + VPERMQ 99(R15)(R15*4), Z0, K7, Z17 // 6282fd4f368cbf63000000 + VPERMQ 15(DX), Z0, K7, Z17 // 62e2fd4f368a0f000000 + VPERMQ Z21, Z31, K7, Z23 // 62a2854736fd + VPERMQ Z9, Z31, K7, Z23 // 62c2854736f9 + VPERMQ 99(R15)(R15*4), Z31, K7, Z23 // 6282854736bcbf63000000 + VPERMQ 15(DX), Z31, K7, Z23 // 62e2854736ba0f000000 + VPERMQ Z21, Z0, K7, Z23 // 62a2fd4f36fd + VPERMQ Z9, Z0, K7, Z23 // 62c2fd4f36f9 + VPERMQ 99(R15)(R15*4), Z0, K7, Z23 // 6282fd4f36bcbf63000000 + VPERMQ 15(DX), Z0, K7, Z23 // 62e2fd4f36ba0f000000 + VPERMT2D X12, X22, K7, X6 // 62d24d077ef4 + VPERMT2D 15(R8)(R14*4), X22, K7, X6 // 62924d077eb4b00f000000 + VPERMT2D -7(CX)(DX*4), X22, K7, X6 // 62f24d077eb491f9ffffff + VPERMT2D Y26, Y6, K4, Y12 // 62124d2c7ee2 + VPERMT2D 17(SP), Y6, K4, Y12 // 62724d2c7ea42411000000 + VPERMT2D -17(BP)(SI*4), Y6, K4, Y12 // 62724d2c7ea4b5efffffff + VPERMT2D Z7, Z26, K4, Z30 // 62622d447ef7 + VPERMT2D Z21, Z26, K4, Z30 // 62222d447ef5 + VPERMT2D 99(R15)(R15*2), Z26, K4, Z30 // 62022d447eb47f63000000 + VPERMT2D -7(DI), Z26, K4, Z30 // 62622d447eb7f9ffffff + VPERMT2D Z7, Z22, K4, Z30 // 62624d447ef7 + VPERMT2D Z21, Z22, K4, Z30 // 62224d447ef5 + VPERMT2D 99(R15)(R15*2), Z22, K4, Z30 // 62024d447eb47f63000000 + VPERMT2D -7(DI), Z22, K4, Z30 // 62624d447eb7f9ffffff + VPERMT2D Z7, Z26, K4, Z5 // 62f22d447eef + VPERMT2D Z21, Z26, K4, Z5 // 62b22d447eed + VPERMT2D 99(R15)(R15*2), Z26, K4, Z5 // 62922d447eac7f63000000 + VPERMT2D -7(DI), Z26, K4, Z5 // 62f22d447eaff9ffffff + VPERMT2D Z7, Z22, K4, Z5 // 62f24d447eef + VPERMT2D Z21, Z22, K4, Z5 // 62b24d447eed + VPERMT2D 99(R15)(R15*2), Z22, K4, Z5 // 62924d447eac7f63000000 + VPERMT2D -7(DI), Z22, K4, Z5 // 62f24d447eaff9ffffff + VPERMT2PD X8, X28, K7, X16 // 62c29d077fc0 + VPERMT2PD (R8), X28, K7, X16 // 62c29d077f00 + VPERMT2PD 15(DX)(BX*2), X28, K7, X16 // 62e29d077f845a0f000000 + VPERMT2PD Y28, Y8, K2, Y3 // 6292bd2a7fdc + VPERMT2PD 7(AX), Y8, K2, Y3 // 62f2bd2a7f9807000000 + VPERMT2PD (DI), Y8, K2, Y3 // 62f2bd2a7f1f + VPERMT2PD Z12, Z14, K5, Z16 // 62c28d4d7fc4 + VPERMT2PD Z13, Z14, K5, Z16 // 62c28d4d7fc5 + VPERMT2PD -7(CX)(DX*1), Z14, K5, Z16 // 62e28d4d7f8411f9ffffff + VPERMT2PD -15(R14)(R15*4), Z14, K5, Z16 // 62828d4d7f84bef1ffffff + VPERMT2PD Z12, Z13, K5, Z16 // 62c2954d7fc4 + VPERMT2PD Z13, Z13, K5, Z16 // 62c2954d7fc5 + VPERMT2PD -7(CX)(DX*1), Z13, K5, Z16 // 62e2954d7f8411f9ffffff + VPERMT2PD -15(R14)(R15*4), Z13, K5, Z16 // 6282954d7f84bef1ffffff + VPERMT2PD Z12, Z14, K5, Z25 // 62428d4d7fcc + VPERMT2PD Z13, Z14, K5, Z25 // 62428d4d7fcd + VPERMT2PD -7(CX)(DX*1), Z14, K5, Z25 // 62628d4d7f8c11f9ffffff + VPERMT2PD -15(R14)(R15*4), Z14, K5, Z25 // 62028d4d7f8cbef1ffffff + VPERMT2PD Z12, Z13, K5, Z25 // 6242954d7fcc + VPERMT2PD Z13, Z13, K5, Z25 // 6242954d7fcd + VPERMT2PD -7(CX)(DX*1), Z13, K5, Z25 // 6262954d7f8c11f9ffffff + VPERMT2PD -15(R14)(R15*4), Z13, K5, Z25 // 6202954d7f8cbef1ffffff + VPERMT2PS X1, X11, K3, X15 // 6272250b7ff9 + VPERMT2PS 17(SP)(BP*1), X11, K3, X15 // 6272250b7fbc2c11000000 + VPERMT2PS -7(CX)(DX*8), X11, K3, X15 // 6272250b7fbcd1f9ffffff + VPERMT2PS Y14, Y23, K4, Y1 // 62d245247fce + VPERMT2PS 99(R15)(R15*1), Y23, K4, Y1 // 629245247f8c3f63000000 + VPERMT2PS (DX), Y23, K4, Y1 // 62f245247f0a + VPERMT2PS Z27, Z2, K2, Z21 // 62826d4a7feb + VPERMT2PS Z25, Z2, K2, Z21 // 62826d4a7fe9 + VPERMT2PS 15(DX)(BX*1), Z2, K2, Z21 // 62e26d4a7fac1a0f000000 + VPERMT2PS -7(CX)(DX*2), Z2, K2, Z21 // 62e26d4a7fac51f9ffffff + VPERMT2PS Z27, Z7, K2, Z21 // 6282454a7feb + VPERMT2PS Z25, Z7, K2, Z21 // 6282454a7fe9 + VPERMT2PS 15(DX)(BX*1), Z7, K2, Z21 // 62e2454a7fac1a0f000000 + VPERMT2PS -7(CX)(DX*2), Z7, K2, Z21 // 62e2454a7fac51f9ffffff + VPERMT2PS Z27, Z2, K2, Z9 // 62126d4a7fcb + VPERMT2PS Z25, Z2, K2, Z9 // 62126d4a7fc9 + VPERMT2PS 15(DX)(BX*1), Z2, K2, Z9 // 62726d4a7f8c1a0f000000 + VPERMT2PS -7(CX)(DX*2), Z2, K2, Z9 // 62726d4a7f8c51f9ffffff + VPERMT2PS Z27, Z7, K2, Z9 // 6212454a7fcb + VPERMT2PS Z25, Z7, K2, Z9 // 6212454a7fc9 + VPERMT2PS 15(DX)(BX*1), Z7, K2, Z9 // 6272454a7f8c1a0f000000 + VPERMT2PS -7(CX)(DX*2), Z7, K2, Z9 // 6272454a7f8c51f9ffffff + VPERMT2Q X2, X13, K2, X19 // 62e2950a7eda + VPERMT2Q -17(BP)(SI*2), X13, K2, X19 // 62e2950a7e9c75efffffff + VPERMT2Q 7(AX)(CX*2), X13, K2, X19 // 62e2950a7e9c4807000000 + VPERMT2Q Y2, Y25, K3, Y31 // 6262b5237efa + VPERMT2Q -17(BP)(SI*8), Y25, K3, Y31 // 6262b5237ebcf5efffffff + VPERMT2Q (R15), Y25, K3, Y31 // 6242b5237e3f + VPERMT2Q Z3, Z27, K3, Z23 // 62e2a5437efb + VPERMT2Q Z0, Z27, K3, Z23 // 62e2a5437ef8 + VPERMT2Q -17(BP), Z27, K3, Z23 // 62e2a5437ebdefffffff + VPERMT2Q -15(R14)(R15*8), Z27, K3, Z23 // 6282a5437ebcfef1ffffff + VPERMT2Q Z3, Z14, K3, Z23 // 62e28d4b7efb + VPERMT2Q Z0, Z14, K3, Z23 // 62e28d4b7ef8 + VPERMT2Q -17(BP), Z14, K3, Z23 // 62e28d4b7ebdefffffff + VPERMT2Q -15(R14)(R15*8), Z14, K3, Z23 // 62828d4b7ebcfef1ffffff + VPERMT2Q Z3, Z27, K3, Z9 // 6272a5437ecb + VPERMT2Q Z0, Z27, K3, Z9 // 6272a5437ec8 + VPERMT2Q -17(BP), Z27, K3, Z9 // 6272a5437e8defffffff + VPERMT2Q -15(R14)(R15*8), Z27, K3, Z9 // 6212a5437e8cfef1ffffff + VPERMT2Q Z3, Z14, K3, Z9 // 62728d4b7ecb + VPERMT2Q Z0, Z14, K3, Z9 // 62728d4b7ec8 + VPERMT2Q -17(BP), Z14, K3, Z9 // 62728d4b7e8defffffff + VPERMT2Q -15(R14)(R15*8), Z14, K3, Z9 // 62128d4b7e8cfef1ffffff + VPEXPANDD X2, K7, X9 // 62727d0f89ca + VPEXPANDD (CX), K7, X9 // 62727d0f8909 + VPEXPANDD 99(R15), K7, X9 // 62527d0f898f63000000 + VPEXPANDD Y1, K2, Y6 // 62f27d2a89f1 + VPEXPANDD -7(CX), K2, Y6 // 62f27d2a89b1f9ffffff + VPEXPANDD 15(DX)(BX*4), K2, Y6 // 62f27d2a89b49a0f000000 + VPEXPANDD Z13, K4, Z11 // 62527d4c89dd + VPEXPANDD Z14, K4, Z11 // 62527d4c89de + VPEXPANDD -15(R14)(R15*1), K4, Z11 // 62127d4c899c3ef1ffffff + VPEXPANDD -15(BX), K4, Z11 // 62727d4c899bf1ffffff + VPEXPANDD Z13, K4, Z5 // 62d27d4c89ed + VPEXPANDD Z14, K4, Z5 // 62d27d4c89ee + VPEXPANDD -15(R14)(R15*1), K4, Z5 // 62927d4c89ac3ef1ffffff + VPEXPANDD -15(BX), K4, Z5 // 62f27d4c89abf1ffffff + VPEXPANDQ X2, K1, X24 // 6262fd0989c2 + VPEXPANDQ 99(R15)(R15*2), K1, X24 // 6202fd0989847f63000000 + VPEXPANDQ -7(DI), K1, X24 // 6262fd098987f9ffffff + VPEXPANDQ Y0, K3, Y9 // 6272fd2b89c8 + VPEXPANDQ 99(R15)(R15*8), K3, Y9 // 6212fd2b898cff63000000 + VPEXPANDQ 7(AX)(CX*8), K3, Y9 // 6272fd2b898cc807000000 + VPEXPANDQ Z2, K4, Z5 // 62f2fd4c89ea + VPEXPANDQ 7(AX)(CX*4), K4, Z5 // 62f2fd4c89ac8807000000 + VPEXPANDQ 7(AX)(CX*1), K4, Z5 // 62f2fd4c89ac0807000000 + VPEXPANDQ Z2, K4, Z23 // 62e2fd4c89fa + VPEXPANDQ 7(AX)(CX*4), K4, Z23 // 62e2fd4c89bc8807000000 + VPEXPANDQ 7(AX)(CX*1), K4, Z23 // 62e2fd4c89bc0807000000 + VPGATHERDD (DX)(X10*4), K6, X3 // 62b27d0e901c92 + VPGATHERDD (SP)(X4*2), K6, X3 // 62f27d0e901c64 + VPGATHERDD (R14)(X29*8), K6, X3 // 62927d06901cee + VPGATHERDD (R10)(Y29*8), K3, Y22 // 62827d239034ea + VPGATHERDD (SP)(Y4*2), K3, Y22 // 62e27d2b903464 + VPGATHERDD (DX)(Y10*4), K3, Y22 // 62a27d2b903492 + VPGATHERDD (BP)(Z10*2), K7, Z28 // 62227d4f90645500 + VPGATHERDD (R10)(Z29*8), K7, Z28 // 62027d479024ea + VPGATHERDD (R14)(Z29*8), K7, Z28 // 62027d479024ee + VPGATHERDD (BP)(Z10*2), K7, Z6 // 62b27d4f90745500 + VPGATHERDD (R10)(Z29*8), K7, Z6 // 62927d479034ea + VPGATHERDD (R14)(Z29*8), K7, Z6 // 62927d479034ee + VPGATHERDQ (AX)(X4*1), K4, X11 // 6272fd0c901c20 + VPGATHERDQ (BP)(X10*2), K4, X11 // 6232fd0c905c5500 + VPGATHERDQ (R10)(X29*8), K4, X11 // 6212fd04901cea + VPGATHERDQ (DX)(X10*4), K4, Y9 // 6232fd2c900c92 + VPGATHERDQ (SP)(X4*2), K4, Y9 // 6272fd2c900c64 + VPGATHERDQ (R14)(X29*8), K4, Y9 // 6212fd24900cee + VPGATHERDQ (R14)(Y29*8), K7, Z13 // 6212fd47902cee + VPGATHERDQ (AX)(Y4*1), K7, Z13 // 6272fd4f902c20 + VPGATHERDQ (BP)(Y10*2), K7, Z13 // 6232fd4f906c5500 + VPGATHERDQ (R14)(Y29*8), K7, Z21 // 6282fd47902cee + VPGATHERDQ (AX)(Y4*1), K7, Z21 // 62e2fd4f902c20 + VPGATHERDQ (BP)(Y10*2), K7, Z21 // 62a2fd4f906c5500 + VPGATHERQD (AX)(X4*1), K2, X15 // 62727d0a913c20 + VPGATHERQD (BP)(X10*2), K2, X15 // 62327d0a917c5500 + VPGATHERQD (R10)(X29*8), K2, X15 // 62127d02913cea + VPGATHERQD (R10)(Y29*8), K5, X30 // 62027d259134ea + VPGATHERQD (SP)(Y4*2), K5, X30 // 62627d2d913464 + VPGATHERQD (DX)(Y10*4), K5, X30 // 62227d2d913492 + VPGATHERQD (DX)(Z10*4), K3, Y23 // 62a27d4b913c92 + VPGATHERQD (AX)(Z4*1), K3, Y23 // 62e27d4b913c20 + VPGATHERQD (SP)(Z4*2), K3, Y23 // 62e27d4b913c64 + VPGATHERQQ (DX)(X10*4), K4, X13 // 6232fd0c912c92 + VPGATHERQQ (SP)(X4*2), K4, X13 // 6272fd0c912c64 + VPGATHERQQ (R14)(X29*8), K4, X13 // 6212fd04912cee + VPGATHERQQ (R14)(Y29*8), K2, Y31 // 6202fd22913cee + VPGATHERQQ (AX)(Y4*1), K2, Y31 // 6262fd2a913c20 + VPGATHERQQ (BP)(Y10*2), K2, Y31 // 6222fd2a917c5500 + VPGATHERQQ (BP)(Z10*2), K2, Z26 // 6222fd4a91545500 + VPGATHERQQ (R10)(Z29*8), K2, Z26 // 6202fd429114ea + VPGATHERQQ (R14)(Z29*8), K2, Z26 // 6202fd429114ee + VPGATHERQQ (BP)(Z10*2), K2, Z3 // 62b2fd4a915c5500 + VPGATHERQQ (R10)(Z29*8), K2, Z3 // 6292fd42911cea + VPGATHERQQ (R14)(Z29*8), K2, Z3 // 6292fd42911cee + VPMAXSD X1, X31, K3, X16 // 62e205033dc1 + VPMAXSD (SI), X31, K3, X16 // 62e205033d06 + VPMAXSD 7(SI)(DI*2), X31, K3, X16 // 62e205033d847e07000000 + VPMAXSD Y24, Y18, K7, Y20 // 62826d273de0 + VPMAXSD 99(R15)(R15*4), Y18, K7, Y20 // 62826d273da4bf63000000 + VPMAXSD 15(DX), Y18, K7, Y20 // 62e26d273da20f000000 + VPMAXSD Z0, Z7, K4, Z3 // 62f2454c3dd8 + VPMAXSD Z6, Z7, K4, Z3 // 62f2454c3dde + VPMAXSD 7(SI)(DI*1), Z7, K4, Z3 // 62f2454c3d9c3e07000000 + VPMAXSD 15(DX)(BX*8), Z7, K4, Z3 // 62f2454c3d9cda0f000000 + VPMAXSD Z0, Z9, K4, Z3 // 62f2354c3dd8 + VPMAXSD Z6, Z9, K4, Z3 // 62f2354c3dde + VPMAXSD 7(SI)(DI*1), Z9, K4, Z3 // 62f2354c3d9c3e07000000 + VPMAXSD 15(DX)(BX*8), Z9, K4, Z3 // 62f2354c3d9cda0f000000 + VPMAXSD Z0, Z7, K4, Z27 // 6262454c3dd8 + VPMAXSD Z6, Z7, K4, Z27 // 6262454c3dde + VPMAXSD 7(SI)(DI*1), Z7, K4, Z27 // 6262454c3d9c3e07000000 + VPMAXSD 15(DX)(BX*8), Z7, K4, Z27 // 6262454c3d9cda0f000000 + VPMAXSD Z0, Z9, K4, Z27 // 6262354c3dd8 + VPMAXSD Z6, Z9, K4, Z27 // 6262354c3dde + VPMAXSD 7(SI)(DI*1), Z9, K4, Z27 // 6262354c3d9c3e07000000 + VPMAXSD 15(DX)(BX*8), Z9, K4, Z27 // 6262354c3d9cda0f000000 + VPMAXSQ X15, X9, K4, X7 // 62d2b50c3dff + VPMAXSQ 17(SP)(BP*8), X9, K4, X7 // 62f2b50c3dbcec11000000 + VPMAXSQ 17(SP)(BP*4), X9, K4, X7 // 62f2b50c3dbcac11000000 + VPMAXSQ Y19, Y3, K7, Y9 // 6232e52f3dcb + VPMAXSQ (CX), Y3, K7, Y9 // 6272e52f3d09 + VPMAXSQ 99(R15), Y3, K7, Y9 // 6252e52f3d8f63000000 + VPMAXSQ Z9, Z3, K2, Z20 // 62c2e54a3de1 + VPMAXSQ Z19, Z3, K2, Z20 // 62a2e54a3de3 + VPMAXSQ -7(DI)(R8*1), Z3, K2, Z20 // 62a2e54a3da407f9ffffff + VPMAXSQ (SP), Z3, K2, Z20 // 62e2e54a3d2424 + VPMAXSQ Z9, Z30, K2, Z20 // 62c28d423de1 + VPMAXSQ Z19, Z30, K2, Z20 // 62a28d423de3 + VPMAXSQ -7(DI)(R8*1), Z30, K2, Z20 // 62a28d423da407f9ffffff + VPMAXSQ (SP), Z30, K2, Z20 // 62e28d423d2424 + VPMAXSQ Z9, Z3, K2, Z28 // 6242e54a3de1 + VPMAXSQ Z19, Z3, K2, Z28 // 6222e54a3de3 + VPMAXSQ -7(DI)(R8*1), Z3, K2, Z28 // 6222e54a3da407f9ffffff + VPMAXSQ (SP), Z3, K2, Z28 // 6262e54a3d2424 + VPMAXSQ Z9, Z30, K2, Z28 // 62428d423de1 + VPMAXSQ Z19, Z30, K2, Z28 // 62228d423de3 + VPMAXSQ -7(DI)(R8*1), Z30, K2, Z28 // 62228d423da407f9ffffff + VPMAXSQ (SP), Z30, K2, Z28 // 62628d423d2424 + VPMAXUD X3, X8, K3, X15 // 62723d0b3ffb + VPMAXUD 7(AX), X8, K3, X15 // 62723d0b3fb807000000 + VPMAXUD (DI), X8, K3, X15 // 62723d0b3f3f + VPMAXUD Y20, Y21, K3, Y2 // 62b255233fd4 + VPMAXUD 15(DX)(BX*1), Y21, K3, Y2 // 62f255233f941a0f000000 + VPMAXUD -7(CX)(DX*2), Y21, K3, Y2 // 62f255233f9451f9ffffff + VPMAXUD Z13, Z1, K2, Z6 // 62d2754a3ff5 + VPMAXUD (AX), Z1, K2, Z6 // 62f2754a3f30 + VPMAXUD 7(SI), Z1, K2, Z6 // 62f2754a3fb607000000 + VPMAXUD Z13, Z15, K2, Z6 // 62d2054a3ff5 + VPMAXUD (AX), Z15, K2, Z6 // 62f2054a3f30 + VPMAXUD 7(SI), Z15, K2, Z6 // 62f2054a3fb607000000 + VPMAXUD Z13, Z1, K2, Z22 // 62c2754a3ff5 + VPMAXUD (AX), Z1, K2, Z22 // 62e2754a3f30 + VPMAXUD 7(SI), Z1, K2, Z22 // 62e2754a3fb607000000 + VPMAXUD Z13, Z15, K2, Z22 // 62c2054a3ff5 + VPMAXUD (AX), Z15, K2, Z22 // 62e2054a3f30 + VPMAXUD 7(SI), Z15, K2, Z22 // 62e2054a3fb607000000 + VPMAXUQ X13, X23, K1, X26 // 6242c5013fd5 + VPMAXUQ 99(R15)(R15*1), X23, K1, X26 // 6202c5013f943f63000000 + VPMAXUQ (DX), X23, K1, X26 // 6262c5013f12 + VPMAXUQ Y6, Y31, K2, Y6 // 62f285223ff6 + VPMAXUQ -17(BP), Y31, K2, Y6 // 62f285223fb5efffffff + VPMAXUQ -15(R14)(R15*8), Y31, K2, Y6 // 629285223fb4fef1ffffff + VPMAXUQ Z2, Z22, K1, Z18 // 62e2cd413fd2 + VPMAXUQ Z31, Z22, K1, Z18 // 6282cd413fd7 + VPMAXUQ (BX), Z22, K1, Z18 // 62e2cd413f13 + VPMAXUQ -17(BP)(SI*1), Z22, K1, Z18 // 62e2cd413f9435efffffff + VPMAXUQ Z2, Z7, K1, Z18 // 62e2c5493fd2 + VPMAXUQ Z31, Z7, K1, Z18 // 6282c5493fd7 + VPMAXUQ (BX), Z7, K1, Z18 // 62e2c5493f13 + VPMAXUQ -17(BP)(SI*1), Z7, K1, Z18 // 62e2c5493f9435efffffff + VPMAXUQ Z2, Z22, K1, Z8 // 6272cd413fc2 + VPMAXUQ Z31, Z22, K1, Z8 // 6212cd413fc7 + VPMAXUQ (BX), Z22, K1, Z8 // 6272cd413f03 + VPMAXUQ -17(BP)(SI*1), Z22, K1, Z8 // 6272cd413f8435efffffff + VPMAXUQ Z2, Z7, K1, Z8 // 6272c5493fc2 + VPMAXUQ Z31, Z7, K1, Z8 // 6212c5493fc7 + VPMAXUQ (BX), Z7, K1, Z8 // 6272c5493f03 + VPMAXUQ -17(BP)(SI*1), Z7, K1, Z8 // 6272c5493f8435efffffff + VPMINSD X11, X1, K4, X21 // 62c2750c39eb + VPMINSD 7(SI)(DI*1), X1, K4, X21 // 62e2750c39ac3e07000000 + VPMINSD 15(DX)(BX*8), X1, K4, X21 // 62e2750c39acda0f000000 + VPMINSD Y12, Y20, K1, Y5 // 62d25d2139ec + VPMINSD 15(R8)(R14*8), Y20, K1, Y5 // 62925d2139acf00f000000 + VPMINSD -15(R14)(R15*2), Y20, K1, Y5 // 62925d2139ac7ef1ffffff + VPMINSD Z5, Z19, K3, Z15 // 6272654339fd + VPMINSD Z1, Z19, K3, Z15 // 6272654339f9 + VPMINSD 17(SP)(BP*1), Z19, K3, Z15 // 6272654339bc2c11000000 + VPMINSD -7(CX)(DX*8), Z19, K3, Z15 // 6272654339bcd1f9ffffff + VPMINSD Z5, Z15, K3, Z15 // 6272054b39fd + VPMINSD Z1, Z15, K3, Z15 // 6272054b39f9 + VPMINSD 17(SP)(BP*1), Z15, K3, Z15 // 6272054b39bc2c11000000 + VPMINSD -7(CX)(DX*8), Z15, K3, Z15 // 6272054b39bcd1f9ffffff + VPMINSD Z5, Z19, K3, Z30 // 6262654339f5 + VPMINSD Z1, Z19, K3, Z30 // 6262654339f1 + VPMINSD 17(SP)(BP*1), Z19, K3, Z30 // 6262654339b42c11000000 + VPMINSD -7(CX)(DX*8), Z19, K3, Z30 // 6262654339b4d1f9ffffff + VPMINSD Z5, Z15, K3, Z30 // 6262054b39f5 + VPMINSD Z1, Z15, K3, Z30 // 6262054b39f1 + VPMINSD 17(SP)(BP*1), Z15, K3, Z30 // 6262054b39b42c11000000 + VPMINSD -7(CX)(DX*8), Z15, K3, Z30 // 6262054b39b4d1f9ffffff + VPMINSQ X7, X3, K4, X31 // 6262e50c39ff + VPMINSQ -7(DI)(R8*1), X3, K4, X31 // 6222e50c39bc07f9ffffff + VPMINSQ (SP), X3, K4, X31 // 6262e50c393c24 + VPMINSQ Y28, Y5, K5, Y3 // 6292d52d39dc + VPMINSQ -15(R14)(R15*1), Y5, K5, Y3 // 6292d52d399c3ef1ffffff + VPMINSQ -15(BX), Y5, K5, Y3 // 62f2d52d399bf1ffffff + VPMINSQ Z21, Z14, K7, Z3 // 62b28d4f39dd + VPMINSQ Z8, Z14, K7, Z3 // 62d28d4f39d8 + VPMINSQ -17(BP)(SI*2), Z14, K7, Z3 // 62f28d4f399c75efffffff + VPMINSQ 7(AX)(CX*2), Z14, K7, Z3 // 62f28d4f399c4807000000 + VPMINSQ Z21, Z15, K7, Z3 // 62b2854f39dd + VPMINSQ Z8, Z15, K7, Z3 // 62d2854f39d8 + VPMINSQ -17(BP)(SI*2), Z15, K7, Z3 // 62f2854f399c75efffffff + VPMINSQ 7(AX)(CX*2), Z15, K7, Z3 // 62f2854f399c4807000000 + VPMINSQ Z21, Z14, K7, Z5 // 62b28d4f39ed + VPMINSQ Z8, Z14, K7, Z5 // 62d28d4f39e8 + VPMINSQ -17(BP)(SI*2), Z14, K7, Z5 // 62f28d4f39ac75efffffff + VPMINSQ 7(AX)(CX*2), Z14, K7, Z5 // 62f28d4f39ac4807000000 + VPMINSQ Z21, Z15, K7, Z5 // 62b2854f39ed + VPMINSQ Z8, Z15, K7, Z5 // 62d2854f39e8 + VPMINSQ -17(BP)(SI*2), Z15, K7, Z5 // 62f2854f39ac75efffffff + VPMINSQ 7(AX)(CX*2), Z15, K7, Z5 // 62f2854f39ac4807000000 + VPMINUD X5, X14, K7, X7 // 62f20d0f3bfd + VPMINUD (AX), X14, K7, X7 // 62f20d0f3b38 + VPMINUD 7(SI), X14, K7, X7 // 62f20d0f3bbe07000000 + VPMINUD Y7, Y17, K2, Y14 // 627275223bf7 + VPMINUD 17(SP)(BP*8), Y17, K2, Y14 // 627275223bb4ec11000000 + VPMINUD 17(SP)(BP*4), Y17, K2, Y14 // 627275223bb4ac11000000 + VPMINUD Z9, Z9, K5, Z0 // 62d2354d3bc1 + VPMINUD Z25, Z9, K5, Z0 // 6292354d3bc1 + VPMINUD 99(R15)(R15*4), Z9, K5, Z0 // 6292354d3b84bf63000000 + VPMINUD 15(DX), Z9, K5, Z0 // 62f2354d3b820f000000 + VPMINUD Z9, Z3, K5, Z0 // 62d2654d3bc1 + VPMINUD Z25, Z3, K5, Z0 // 6292654d3bc1 + VPMINUD 99(R15)(R15*4), Z3, K5, Z0 // 6292654d3b84bf63000000 + VPMINUD 15(DX), Z3, K5, Z0 // 62f2654d3b820f000000 + VPMINUD Z9, Z9, K5, Z26 // 6242354d3bd1 + VPMINUD Z25, Z9, K5, Z26 // 6202354d3bd1 + VPMINUD 99(R15)(R15*4), Z9, K5, Z26 // 6202354d3b94bf63000000 + VPMINUD 15(DX), Z9, K5, Z26 // 6262354d3b920f000000 + VPMINUD Z9, Z3, K5, Z26 // 6242654d3bd1 + VPMINUD Z25, Z3, K5, Z26 // 6202654d3bd1 + VPMINUD 99(R15)(R15*4), Z3, K5, Z26 // 6202654d3b94bf63000000 + VPMINUD 15(DX), Z3, K5, Z26 // 6262654d3b920f000000 + VPMINUQ X21, X3, K3, X31 // 6222e50b3bfd + VPMINUQ (BX), X3, K3, X31 // 6262e50b3b3b + VPMINUQ -17(BP)(SI*1), X3, K3, X31 // 6262e50b3bbc35efffffff + VPMINUQ Y8, Y31, K4, Y9 // 625285243bc8 + VPMINUQ 7(SI)(DI*4), Y31, K4, Y9 // 627285243b8cbe07000000 + VPMINUQ -7(DI)(R8*2), Y31, K4, Y9 // 623285243b8c47f9ffffff + VPMINUQ Z17, Z20, K2, Z9 // 6232dd423bc9 + VPMINUQ Z0, Z20, K2, Z9 // 6272dd423bc8 + VPMINUQ (CX), Z20, K2, Z9 // 6272dd423b09 + VPMINUQ 99(R15), Z20, K2, Z9 // 6252dd423b8f63000000 + VPMINUQ Z17, Z0, K2, Z9 // 6232fd4a3bc9 + VPMINUQ Z0, Z0, K2, Z9 // 6272fd4a3bc8 + VPMINUQ (CX), Z0, K2, Z9 // 6272fd4a3b09 + VPMINUQ 99(R15), Z0, K2, Z9 // 6252fd4a3b8f63000000 + VPMINUQ Z17, Z20, K2, Z28 // 6222dd423be1 + VPMINUQ Z0, Z20, K2, Z28 // 6262dd423be0 + VPMINUQ (CX), Z20, K2, Z28 // 6262dd423b21 + VPMINUQ 99(R15), Z20, K2, Z28 // 6242dd423ba763000000 + VPMINUQ Z17, Z0, K2, Z28 // 6222fd4a3be1 + VPMINUQ Z0, Z0, K2, Z28 // 6262fd4a3be0 + VPMINUQ (CX), Z0, K2, Z28 // 6262fd4a3b21 + VPMINUQ 99(R15), Z0, K2, Z28 // 6242fd4a3ba763000000 + VPMOVDB X14, K3, X16 // 62327e0b31f0 + VPMOVDB X14, K3, 15(DX)(BX*1) // 62727e0b31b41a0f000000 + VPMOVDB X14, K3, -7(CX)(DX*2) // 62727e0b31b451f9ffffff + VPMOVDB Y21, K2, X11 // 62c27e2a31eb + VPMOVDB Y21, K2, (SI) // 62e27e2a312e + VPMOVDB Y21, K2, 7(SI)(DI*2) // 62e27e2a31ac7e07000000 + VPMOVDB Z20, K1, X14 // 62c27e4931e6 + VPMOVDB Z9, K1, X14 // 62527e4931ce + VPMOVDB Z20, K1, (R8) // 62c27e493120 + VPMOVDB Z9, K1, (R8) // 62527e493108 + VPMOVDB Z20, K1, 15(DX)(BX*2) // 62e27e4931a45a0f000000 + VPMOVDB Z9, K1, 15(DX)(BX*2) // 62727e49318c5a0f000000 + VPMOVDW X8, K2, X19 // 62327e0a33c3 + VPMOVDW X8, K2, 17(SP)(BP*8) // 62727e0a3384ec11000000 + VPMOVDW X8, K2, 17(SP)(BP*4) // 62727e0a3384ac11000000 + VPMOVDW Y12, K1, X8 // 62527e2933e0 + VPMOVDW Y12, K1, 17(SP)(BP*1) // 62727e2933a42c11000000 + VPMOVDW Y12, K1, -7(CX)(DX*8) // 62727e2933a4d1f9ffffff + VPMOVDW Z30, K7, Y9 // 62427e4f33f1 + VPMOVDW Z5, K7, Y9 // 62d27e4f33e9 + VPMOVDW Z30, K7, 7(AX) // 62627e4f33b007000000 + VPMOVDW Z5, K7, 7(AX) // 62f27e4f33a807000000 + VPMOVDW Z30, K7, (DI) // 62627e4f3337 + VPMOVDW Z5, K7, (DI) // 62f27e4f332f + VPMOVQB X11, K1, X23 // 62327e0932df + VPMOVQB X11, K1, -7(DI)(R8*1) // 62327e09329c07f9ffffff + VPMOVQB X11, K1, (SP) // 62727e09321c24 + VPMOVQB Y12, K1, X31 // 62127e2932e7 + VPMOVQB Y12, K1, -17(BP) // 62727e2932a5efffffff + VPMOVQB Y12, K1, -15(R14)(R15*8) // 62127e2932a4fef1ffffff + VPMOVQB Z21, K1, X24 // 62827e4932e8 + VPMOVQB Z9, K1, X24 // 62127e4932c8 + VPMOVQB Z21, K1, 7(SI)(DI*4) // 62e27e4932acbe07000000 + VPMOVQB Z9, K1, 7(SI)(DI*4) // 62727e49328cbe07000000 + VPMOVQB Z21, K1, -7(DI)(R8*2) // 62a27e4932ac47f9ffffff + VPMOVQB Z9, K1, -7(DI)(R8*2) // 62327e49328c47f9ffffff + VPMOVQD X0, K7, X14 // 62d27e0f35c6 + VPMOVQD X0, K7, 17(SP) // 62f27e0f35842411000000 + VPMOVQD X0, K7, -17(BP)(SI*4) // 62f27e0f3584b5efffffff + VPMOVQD Y21, K2, X11 // 62c27e2a35eb + VPMOVQD Y21, K2, -17(BP)(SI*2) // 62e27e2a35ac75efffffff + VPMOVQD Y21, K2, 7(AX)(CX*2) // 62e27e2a35ac4807000000 + VPMOVQD Z2, K4, Y14 // 62d27e4c35d6 + VPMOVQD Z7, K4, Y14 // 62d27e4c35fe + VPMOVQD Z2, K4, 99(R15)(R15*1) // 62927e4c35943f63000000 + VPMOVQD Z7, K4, 99(R15)(R15*1) // 62927e4c35bc3f63000000 + VPMOVQD Z2, K4, (DX) // 62f27e4c3512 + VPMOVQD Z7, K4, (DX) // 62f27e4c353a + VPMOVQW X2, K1, X23 // 62b27e0934d7 + VPMOVQW X2, K1, 17(SP)(BP*2) // 62f27e0934946c11000000 + VPMOVQW X2, K1, -7(DI)(R8*4) // 62b27e09349487f9ffffff + VPMOVQW Y30, K3, X20 // 62227e2b34f4 + VPMOVQW Y30, K3, 7(AX) // 62627e2b34b007000000 + VPMOVQW Y30, K3, (DI) // 62627e2b3437 + VPMOVQW Z27, K4, X5 // 62627e4c34dd + VPMOVQW Z25, K4, X5 // 62627e4c34cd + VPMOVQW Z27, K4, 15(R8)(R14*1) // 62027e4c349c300f000000 + VPMOVQW Z25, K4, 15(R8)(R14*1) // 62027e4c348c300f000000 + VPMOVQW Z27, K4, 15(R8)(R14*2) // 62027e4c349c700f000000 + VPMOVQW Z25, K4, 15(R8)(R14*2) // 62027e4c348c700f000000 + VPMOVSDB X0, K5, X25 // 62927e0d21c1 + VPMOVSDB X0, K5, 15(R8) // 62d27e0d21800f000000 + VPMOVSDB X0, K5, (BP) // 62f27e0d214500 + VPMOVSDB Y26, K7, X9 // 62427e2f21d1 + VPMOVSDB Y26, K7, 99(R15)(R15*1) // 62027e2f21943f63000000 + VPMOVSDB Y26, K7, (DX) // 62627e2f2112 + VPMOVSDB Z23, K7, X13 // 62c27e4f21fd + VPMOVSDB Z9, K7, X13 // 62527e4f21cd + VPMOVSDB Z23, K7, (R14) // 62c27e4f213e + VPMOVSDB Z9, K7, (R14) // 62527e4f210e + VPMOVSDB Z23, K7, -7(DI)(R8*8) // 62a27e4f21bcc7f9ffffff + VPMOVSDB Z9, K7, -7(DI)(R8*8) // 62327e4f218cc7f9ffffff + VPMOVSDW X8, K6, X2 // 62727e0e23c2 + VPMOVSDW X8, K6, -17(BP)(SI*8) // 62727e0e2384f5efffffff + VPMOVSDW X8, K6, (R15) // 62527e0e2307 + VPMOVSDW Y7, K3, X9 // 62d27e2b23f9 + VPMOVSDW Y7, K3, 99(R15)(R15*4) // 62927e2b23bcbf63000000 + VPMOVSDW Y7, K3, 15(DX) // 62f27e2b23ba0f000000 + VPMOVSDW Z27, K7, Y16 // 62227e4f23d8 + VPMOVSDW Z14, K7, Y16 // 62327e4f23f0 + VPMOVSDW Z27, K7, -17(BP)(SI*8) // 62627e4f239cf5efffffff + VPMOVSDW Z14, K7, -17(BP)(SI*8) // 62727e4f23b4f5efffffff + VPMOVSDW Z27, K7, (R15) // 62427e4f231f + VPMOVSDW Z14, K7, (R15) // 62527e4f2337 + VPMOVSQB X31, K4, X2 // 62627e0c22fa + VPMOVSQB X31, K4, -7(CX) // 62627e0c22b9f9ffffff + VPMOVSQB X31, K4, 15(DX)(BX*4) // 62627e0c22bc9a0f000000 + VPMOVSQB Y1, K4, X11 // 62d27e2c22cb + VPMOVSQB Y1, K4, 15(R8)(R14*8) // 62927e2c228cf00f000000 + VPMOVSQB Y1, K4, -15(R14)(R15*2) // 62927e2c228c7ef1ffffff + VPMOVSQB Z3, K7, X22 // 62b27e4f22de + VPMOVSQB Z0, K7, X22 // 62b27e4f22c6 + VPMOVSQB Z3, K7, 7(SI)(DI*8) // 62f27e4f229cfe07000000 + VPMOVSQB Z0, K7, 7(SI)(DI*8) // 62f27e4f2284fe07000000 + VPMOVSQB Z3, K7, -15(R14) // 62d27e4f229ef1ffffff + VPMOVSQB Z0, K7, -15(R14) // 62d27e4f2286f1ffffff + VPMOVSQD X14, K2, X5 // 62727e0a25f5 + VPMOVSQD X14, K2, 7(SI)(DI*1) // 62727e0a25b43e07000000 + VPMOVSQD X14, K2, 15(DX)(BX*8) // 62727e0a25b4da0f000000 + VPMOVSQD Y30, K5, X0 // 62627e2d25f0 + VPMOVSQD Y30, K5, (CX) // 62627e2d2531 + VPMOVSQD Y30, K5, 99(R15) // 62427e2d25b763000000 + VPMOVSQD Z14, K3, Y31 // 62127e4b25f7 + VPMOVSQD Z7, K3, Y31 // 62927e4b25ff + VPMOVSQD Z14, K3, 7(SI)(DI*8) // 62727e4b25b4fe07000000 + VPMOVSQD Z7, K3, 7(SI)(DI*8) // 62f27e4b25bcfe07000000 + VPMOVSQD Z14, K3, -15(R14) // 62527e4b25b6f1ffffff + VPMOVSQD Z7, K3, -15(R14) // 62d27e4b25bef1ffffff + VPMOVSQW X7, K4, X17 // 62b27e0c24f9 + VPMOVSQW X7, K4, -15(R14)(R15*1) // 62927e0c24bc3ef1ffffff + VPMOVSQW X7, K4, -15(BX) // 62f27e0c24bbf1ffffff + VPMOVSQW Y22, K2, X15 // 62c27e2a24f7 + VPMOVSQW Y22, K2, -7(DI)(R8*1) // 62a27e2a24b407f9ffffff + VPMOVSQW Y22, K2, (SP) // 62e27e2a243424 + VPMOVSQW Z8, K2, X11 // 62527e4a24c3 + VPMOVSQW Z24, K2, X11 // 62427e4a24c3 + VPMOVSQW Z8, K2, 99(R15)(R15*2) // 62127e4a24847f63000000 + VPMOVSQW Z24, K2, 99(R15)(R15*2) // 62027e4a24847f63000000 + VPMOVSQW Z8, K2, -7(DI) // 62727e4a2487f9ffffff + VPMOVSQW Z24, K2, -7(DI) // 62627e4a2487f9ffffff + VPMOVSXBD X27, K2, Z1 // 62927d4a21cb or 6292fd4a21cb + VPMOVSXBD 15(DX)(BX*1), K2, Z1 // 62f27d4a218c1a0f000000 or 62f2fd4a218c1a0f000000 + VPMOVSXBD -7(CX)(DX*2), K2, Z1 // 62f27d4a218c51f9ffffff or 62f2fd4a218c51f9ffffff + VPMOVSXBD X27, K2, Z16 // 62827d4a21c3 or 6282fd4a21c3 + VPMOVSXBD 15(DX)(BX*1), K2, Z16 // 62e27d4a21841a0f000000 or 62e2fd4a21841a0f000000 + VPMOVSXBD -7(CX)(DX*2), K2, Z16 // 62e27d4a218451f9ffffff or 62e2fd4a218451f9ffffff + VPMOVSXBD X3, K1, X25 // 62627d0921cb or 6262fd0921cb + VPMOVSXBD 7(AX)(CX*4), K1, X25 // 62627d09218c8807000000 or 6262fd09218c8807000000 + VPMOVSXBD 7(AX)(CX*1), K1, X25 // 62627d09218c0807000000 or 6262fd09218c0807000000 + VPMOVSXBD X18, K2, Y7 // 62b27d2a21fa or 62b2fd2a21fa + VPMOVSXBD 99(R15)(R15*8), K2, Y7 // 62927d2a21bcff63000000 or 6292fd2a21bcff63000000 + VPMOVSXBD 7(AX)(CX*8), K2, Y7 // 62f27d2a21bcc807000000 or 62f2fd2a21bcc807000000 + VPMOVSXBQ X28, K1, X15 // 62127d0922fc or 6212fd0922fc + VPMOVSXBQ 99(R15)(R15*8), K1, X15 // 62127d0922bcff63000000 or 6212fd0922bcff63000000 + VPMOVSXBQ 7(AX)(CX*8), K1, X15 // 62727d0922bcc807000000 or 6272fd0922bcc807000000 + VPMOVSXBQ X15, K7, Y0 // 62d27d2f22c7 or 62d2fd2f22c7 + VPMOVSXBQ (SI), K7, Y0 // 62f27d2f2206 or 62f2fd2f2206 + VPMOVSXBQ 7(SI)(DI*2), K7, Y0 // 62f27d2f22847e07000000 or 62f2fd2f22847e07000000 + VPMOVSXBQ X7, K1, Z6 // 62f27d4922f7 or 62f2fd4922f7 + VPMOVSXBQ (AX), K1, Z6 // 62f27d492230 or 62f2fd492230 + VPMOVSXBQ 7(SI), K1, Z6 // 62f27d4922b607000000 or 62f2fd4922b607000000 + VPMOVSXBQ X7, K1, Z2 // 62f27d4922d7 or 62f2fd4922d7 + VPMOVSXBQ (AX), K1, Z2 // 62f27d492210 or 62f2fd492210 + VPMOVSXBQ 7(SI), K1, Z2 // 62f27d49229607000000 or 62f2fd49229607000000 + VPMOVSXDQ X7, K2, Y14 // 62727d2a25f7 + VPMOVSXDQ 17(SP)(BP*2), K2, Y14 // 62727d2a25b46c11000000 + VPMOVSXDQ -7(DI)(R8*4), K2, Y14 // 62327d2a25b487f9ffffff + VPMOVSXDQ X22, K4, X0 // 62b27d0c25c6 + VPMOVSXDQ 15(R8)(R14*4), K4, X0 // 62927d0c2584b00f000000 + VPMOVSXDQ -7(CX)(DX*4), K4, X0 // 62f27d0c258491f9ffffff + VPMOVSXDQ Y24, K1, Z15 // 62127d4925f8 + VPMOVSXDQ -7(CX), K1, Z15 // 62727d4925b9f9ffffff + VPMOVSXDQ 15(DX)(BX*4), K1, Z15 // 62727d4925bc9a0f000000 + VPMOVSXDQ Y24, K1, Z12 // 62127d4925e0 + VPMOVSXDQ -7(CX), K1, Z12 // 62727d4925a1f9ffffff + VPMOVSXDQ 15(DX)(BX*4), K1, Z12 // 62727d4925a49a0f000000 + VPMOVSXWD X1, K3, Y13 // 62727d2b23e9 or 6272fd2b23e9 + VPMOVSXWD 15(R8), K3, Y13 // 62527d2b23a80f000000 or 6252fd2b23a80f000000 + VPMOVSXWD (BP), K3, Y13 // 62727d2b236d00 or 6272fd2b236d00 + VPMOVSXWD X6, K4, X11 // 62727d0c23de or 6272fd0c23de + VPMOVSXWD (R8), K4, X11 // 62527d0c2318 or 6252fd0c2318 + VPMOVSXWD 15(DX)(BX*2), K4, X11 // 62727d0c239c5a0f000000 or 6272fd0c239c5a0f000000 + VPMOVSXWD Y20, K5, Z14 // 62327d4d23f4 or 6232fd4d23f4 + VPMOVSXWD 99(R15)(R15*8), K5, Z14 // 62127d4d23b4ff63000000 or 6212fd4d23b4ff63000000 + VPMOVSXWD 7(AX)(CX*8), K5, Z14 // 62727d4d23b4c807000000 or 6272fd4d23b4c807000000 + VPMOVSXWD Y20, K5, Z27 // 62227d4d23dc or 6222fd4d23dc + VPMOVSXWD 99(R15)(R15*8), K5, Z27 // 62027d4d239cff63000000 or 6202fd4d239cff63000000 + VPMOVSXWD 7(AX)(CX*8), K5, Z27 // 62627d4d239cc807000000 or 6262fd4d239cc807000000 + VPMOVSXWQ X7, K7, Z11 // 62727d4f24df or 6272fd4f24df + VPMOVSXWQ 15(R8)(R14*8), K7, Z11 // 62127d4f249cf00f000000 or 6212fd4f249cf00f000000 + VPMOVSXWQ -15(R14)(R15*2), K7, Z11 // 62127d4f249c7ef1ffffff or 6212fd4f249c7ef1ffffff + VPMOVSXWQ X7, K7, Z5 // 62f27d4f24ef or 62f2fd4f24ef + VPMOVSXWQ 15(R8)(R14*8), K7, Z5 // 62927d4f24acf00f000000 or 6292fd4f24acf00f000000 + VPMOVSXWQ -15(R14)(R15*2), K7, Z5 // 62927d4f24ac7ef1ffffff or 6292fd4f24ac7ef1ffffff + VPMOVSXWQ X31, K7, X8 // 62127d0f24c7 or 6212fd0f24c7 + VPMOVSXWQ 17(SP)(BP*8), K7, X8 // 62727d0f2484ec11000000 or 6272fd0f2484ec11000000 + VPMOVSXWQ 17(SP)(BP*4), K7, X8 // 62727d0f2484ac11000000 or 6272fd0f2484ac11000000 + VPMOVSXWQ X3, K6, Y14 // 62727d2e24f3 or 6272fd2e24f3 + VPMOVSXWQ 17(SP)(BP*1), K6, Y14 // 62727d2e24b42c11000000 or 6272fd2e24b42c11000000 + VPMOVSXWQ -7(CX)(DX*8), K6, Y14 // 62727d2e24b4d1f9ffffff or 6272fd2e24b4d1f9ffffff + VPMOVUSDB X20, K3, X28 // 62827e0b11e4 + VPMOVUSDB X20, K3, 7(SI)(DI*4) // 62e27e0b11a4be07000000 + VPMOVUSDB X20, K3, -7(DI)(R8*2) // 62a27e0b11a447f9ffffff + VPMOVUSDB Y21, K7, X24 // 62827e2f11e8 + VPMOVUSDB Y21, K7, -17(BP)(SI*2) // 62e27e2f11ac75efffffff + VPMOVUSDB Y21, K7, 7(AX)(CX*2) // 62e27e2f11ac4807000000 + VPMOVUSDB Z13, K4, X7 // 62727e4c11ef + VPMOVUSDB Z14, K4, X7 // 62727e4c11f7 + VPMOVUSDB Z13, K4, -15(R14)(R15*1) // 62127e4c11ac3ef1ffffff + VPMOVUSDB Z14, K4, -15(R14)(R15*1) // 62127e4c11b43ef1ffffff + VPMOVUSDB Z13, K4, -15(BX) // 62727e4c11abf1ffffff + VPMOVUSDB Z14, K4, -15(BX) // 62727e4c11b3f1ffffff + VPMOVUSDW X16, K4, X20 // 62a27e0c13c4 + VPMOVUSDW X16, K4, 15(R8)(R14*1) // 62827e0c1384300f000000 + VPMOVUSDW X16, K4, 15(R8)(R14*2) // 62827e0c1384700f000000 + VPMOVUSDW Y1, K7, X12 // 62d27e2f13cc + VPMOVUSDW Y1, K7, 7(AX)(CX*4) // 62f27e2f138c8807000000 + VPMOVUSDW Y1, K7, 7(AX)(CX*1) // 62f27e2f138c0807000000 + VPMOVUSDW Z5, K2, Y26 // 62927e4a13ea + VPMOVUSDW Z23, K2, Y26 // 62827e4a13fa + VPMOVUSDW Z5, K2, (AX) // 62f27e4a1328 + VPMOVUSDW Z23, K2, (AX) // 62e27e4a1338 + VPMOVUSDW Z5, K2, 7(SI) // 62f27e4a13ae07000000 + VPMOVUSDW Z23, K2, 7(SI) // 62e27e4a13be07000000 + VPMOVUSQB X17, K5, X6 // 62e27e0d12ce + VPMOVUSQB X17, K5, (AX) // 62e27e0d1208 + VPMOVUSQB X17, K5, 7(SI) // 62e27e0d128e07000000 + VPMOVUSQB Y30, K3, X28 // 62027e2b12f4 + VPMOVUSQB Y30, K3, 17(SP) // 62627e2b12b42411000000 + VPMOVUSQB Y30, K3, -17(BP)(SI*4) // 62627e2b12b4b5efffffff + VPMOVUSQB Z2, K4, X6 // 62f27e4c12d6 + VPMOVUSQB Z2, K4, (R14) // 62d27e4c1216 + VPMOVUSQB Z2, K4, -7(DI)(R8*8) // 62b27e4c1294c7f9ffffff + VPMOVUSQD X8, K2, X1 // 62727e0a15c1 + VPMOVUSQD X8, K2, 99(R15)(R15*4) // 62127e0a1584bf63000000 + VPMOVUSQD X8, K2, 15(DX) // 62727e0a15820f000000 + VPMOVUSQD Y12, K2, X8 // 62527e2a15e0 + VPMOVUSQD Y12, K2, (SI) // 62727e2a1526 + VPMOVUSQD Y12, K2, 7(SI)(DI*2) // 62727e2a15a47e07000000 + VPMOVUSQD Z6, K3, Y22 // 62b27e4b15f6 + VPMOVUSQD Z14, K3, Y22 // 62327e4b15f6 + VPMOVUSQD Z6, K3, (BX) // 62f27e4b1533 + VPMOVUSQD Z14, K3, (BX) // 62727e4b1533 + VPMOVUSQD Z6, K3, -17(BP)(SI*1) // 62f27e4b15b435efffffff + VPMOVUSQD Z14, K3, -17(BP)(SI*1) // 62727e4b15b435efffffff + VPMOVUSQW X0, K3, X6 // 62f27e0b14c6 + VPMOVUSQW X0, K3, 7(AX) // 62f27e0b148007000000 + VPMOVUSQW X0, K3, (DI) // 62f27e0b1407 + VPMOVUSQW Y3, K3, X11 // 62d27e2b14db + VPMOVUSQW Y3, K3, (CX) // 62f27e2b1419 + VPMOVUSQW Y3, K3, 99(R15) // 62d27e2b149f63000000 + VPMOVUSQW Z26, K2, X16 // 62227e4a14d0 + VPMOVUSQW Z14, K2, X16 // 62327e4a14f0 + VPMOVUSQW Z26, K2, 17(SP)(BP*8) // 62627e4a1494ec11000000 + VPMOVUSQW Z14, K2, 17(SP)(BP*8) // 62727e4a14b4ec11000000 + VPMOVUSQW Z26, K2, 17(SP)(BP*4) // 62627e4a1494ac11000000 + VPMOVUSQW Z14, K2, 17(SP)(BP*4) // 62727e4a14b4ac11000000 + VPMOVZXBD X15, K1, Z3 // 62d27d4931df or 62d2fd4931df + VPMOVZXBD 7(AX), K1, Z3 // 62f27d49319807000000 or 62f2fd49319807000000 + VPMOVZXBD (DI), K1, Z3 // 62f27d49311f or 62f2fd49311f + VPMOVZXBD X15, K1, Z0 // 62d27d4931c7 or 62d2fd4931c7 + VPMOVZXBD 7(AX), K1, Z0 // 62f27d49318007000000 or 62f2fd49318007000000 + VPMOVZXBD (DI), K1, Z0 // 62f27d493107 or 62f2fd493107 + VPMOVZXBD X1, K7, X11 // 62727d0f31d9 or 6272fd0f31d9 + VPMOVZXBD 99(R15)(R15*1), K7, X11 // 62127d0f319c3f63000000 or 6212fd0f319c3f63000000 + VPMOVZXBD (DX), K7, X11 // 62727d0f311a or 6272fd0f311a + VPMOVZXBD X19, K2, Y17 // 62a27d2a31cb or 62a2fd2a31cb + VPMOVZXBD 15(DX)(BX*1), K2, Y17 // 62e27d2a318c1a0f000000 or 62e2fd2a318c1a0f000000 + VPMOVZXBD -7(CX)(DX*2), K2, Y17 // 62e27d2a318c51f9ffffff or 62e2fd2a318c51f9ffffff + VPMOVZXBQ X2, K4, X13 // 62727d0c32ea or 6272fd0c32ea + VPMOVZXBQ (BX), K4, X13 // 62727d0c322b or 6272fd0c322b + VPMOVZXBQ -17(BP)(SI*1), K4, X13 // 62727d0c32ac35efffffff or 6272fd0c32ac35efffffff + VPMOVZXBQ X14, K1, Y13 // 62527d2932ee or 6252fd2932ee + VPMOVZXBQ -17(BP)(SI*8), K1, Y13 // 62727d2932acf5efffffff or 6272fd2932acf5efffffff + VPMOVZXBQ (R15), K1, Y13 // 62527d29322f or 6252fd29322f + VPMOVZXBQ X0, K3, Z21 // 62e27d4b32e8 or 62e2fd4b32e8 + VPMOVZXBQ -17(BP), K3, Z21 // 62e27d4b32adefffffff or 62e2fd4b32adefffffff + VPMOVZXBQ -15(R14)(R15*8), K3, Z21 // 62827d4b32acfef1ffffff or 6282fd4b32acfef1ffffff + VPMOVZXBQ X0, K3, Z13 // 62727d4b32e8 or 6272fd4b32e8 + VPMOVZXBQ -17(BP), K3, Z13 // 62727d4b32adefffffff or 6272fd4b32adefffffff + VPMOVZXBQ -15(R14)(R15*8), K3, Z13 // 62127d4b32acfef1ffffff or 6212fd4b32acfef1ffffff + VPMOVZXDQ X17, K7, Y30 // 62227d2f35f1 + VPMOVZXDQ -17(BP)(SI*8), K7, Y30 // 62627d2f35b4f5efffffff + VPMOVZXDQ (R15), K7, Y30 // 62427d2f3537 + VPMOVZXDQ X11, K6, X18 // 62c27d0e35d3 + VPMOVZXDQ 15(R8), K6, X18 // 62c27d0e35900f000000 + VPMOVZXDQ (BP), K6, X18 // 62e27d0e355500 + VPMOVZXDQ Y13, K3, Z3 // 62d27d4b35dd + VPMOVZXDQ -17(BP)(SI*2), K3, Z3 // 62f27d4b359c75efffffff + VPMOVZXDQ 7(AX)(CX*2), K3, Z3 // 62f27d4b359c4807000000 + VPMOVZXDQ Y13, K3, Z12 // 62527d4b35e5 + VPMOVZXDQ -17(BP)(SI*2), K3, Z12 // 62727d4b35a475efffffff + VPMOVZXDQ 7(AX)(CX*2), K3, Z12 // 62727d4b35a44807000000 + VPMOVZXWD X9, K7, Y18 // 62c27d2f33d1 or 62c2fd2f33d1 + VPMOVZXWD 7(SI)(DI*8), K7, Y18 // 62e27d2f3394fe07000000 or 62e2fd2f3394fe07000000 + VPMOVZXWD -15(R14), K7, Y18 // 62c27d2f3396f1ffffff or 62c2fd2f3396f1ffffff + VPMOVZXWD X24, K4, X2 // 62927d0c33d0 or 6292fd0c33d0 + VPMOVZXWD 15(R8)(R14*8), K4, X2 // 62927d0c3394f00f000000 or 6292fd0c3394f00f000000 + VPMOVZXWD -15(R14)(R15*2), K4, X2 // 62927d0c33947ef1ffffff or 6292fd0c33947ef1ffffff + VPMOVZXWD Y24, K4, Z27 // 62027d4c33d8 or 6202fd4c33d8 + VPMOVZXWD 15(R8)(R14*1), K4, Z27 // 62027d4c339c300f000000 or 6202fd4c339c300f000000 + VPMOVZXWD 15(R8)(R14*2), K4, Z27 // 62027d4c339c700f000000 or 6202fd4c339c700f000000 + VPMOVZXWD Y24, K4, Z15 // 62127d4c33f8 or 6212fd4c33f8 + VPMOVZXWD 15(R8)(R14*1), K4, Z15 // 62127d4c33bc300f000000 or 6212fd4c33bc300f000000 + VPMOVZXWD 15(R8)(R14*2), K4, Z15 // 62127d4c33bc700f000000 or 6212fd4c33bc700f000000 + VPMOVZXWQ X2, K7, Z23 // 62e27d4f34fa or 62e2fd4f34fa + VPMOVZXWQ 7(SI)(DI*1), K7, Z23 // 62e27d4f34bc3e07000000 or 62e2fd4f34bc3e07000000 + VPMOVZXWQ 15(DX)(BX*8), K7, Z23 // 62e27d4f34bcda0f000000 or 62e2fd4f34bcda0f000000 + VPMOVZXWQ X2, K7, Z5 // 62f27d4f34ea or 62f2fd4f34ea + VPMOVZXWQ 7(SI)(DI*1), K7, Z5 // 62f27d4f34ac3e07000000 or 62f2fd4f34ac3e07000000 + VPMOVZXWQ 15(DX)(BX*8), K7, Z5 // 62f27d4f34acda0f000000 or 62f2fd4f34acda0f000000 + VPMOVZXWQ X27, K2, X2 // 62927d0a34d3 or 6292fd0a34d3 + VPMOVZXWQ 7(SI)(DI*8), K2, X2 // 62f27d0a3494fe07000000 or 62f2fd0a3494fe07000000 + VPMOVZXWQ -15(R14), K2, X2 // 62d27d0a3496f1ffffff or 62d2fd0a3496f1ffffff + VPMOVZXWQ X26, K5, Y8 // 62127d2d34c2 or 6212fd2d34c2 + VPMOVZXWQ -15(R14)(R15*1), K5, Y8 // 62127d2d34843ef1ffffff or 6212fd2d34843ef1ffffff + VPMOVZXWQ -15(BX), K5, Y8 // 62727d2d3483f1ffffff or 6272fd2d3483f1ffffff + VPMULDQ X3, X30, K3, X22 // 62e28d0328f3 + VPMULDQ -7(DI)(R8*1), X30, K3, X22 // 62a28d0328b407f9ffffff + VPMULDQ (SP), X30, K3, X22 // 62e28d03283424 + VPMULDQ Y5, Y24, K4, Y11 // 6272bd2428dd + VPMULDQ (R14), Y24, K4, Y11 // 6252bd24281e + VPMULDQ -7(DI)(R8*8), Y24, K4, Y11 // 6232bd24289cc7f9ffffff + VPMULDQ Z21, Z8, K2, Z23 // 62a2bd4a28fd + VPMULDQ Z5, Z8, K2, Z23 // 62e2bd4a28fd + VPMULDQ -7(CX)(DX*1), Z8, K2, Z23 // 62e2bd4a28bc11f9ffffff + VPMULDQ -15(R14)(R15*4), Z8, K2, Z23 // 6282bd4a28bcbef1ffffff + VPMULDQ Z21, Z28, K2, Z23 // 62a29d4228fd + VPMULDQ Z5, Z28, K2, Z23 // 62e29d4228fd + VPMULDQ -7(CX)(DX*1), Z28, K2, Z23 // 62e29d4228bc11f9ffffff + VPMULDQ -15(R14)(R15*4), Z28, K2, Z23 // 62829d4228bcbef1ffffff + VPMULDQ Z21, Z8, K2, Z6 // 62b2bd4a28f5 + VPMULDQ Z5, Z8, K2, Z6 // 62f2bd4a28f5 + VPMULDQ -7(CX)(DX*1), Z8, K2, Z6 // 62f2bd4a28b411f9ffffff + VPMULDQ -15(R14)(R15*4), Z8, K2, Z6 // 6292bd4a28b4bef1ffffff + VPMULDQ Z21, Z28, K2, Z6 // 62b29d4228f5 + VPMULDQ Z5, Z28, K2, Z6 // 62f29d4228f5 + VPMULDQ -7(CX)(DX*1), Z28, K2, Z6 // 62f29d4228b411f9ffffff + VPMULDQ -15(R14)(R15*4), Z28, K2, Z6 // 62929d4228b4bef1ffffff + VPMULLD X9, X2, K1, X20 // 62c26d0940e1 + VPMULLD (BX), X2, K1, X20 // 62e26d094023 + VPMULLD -17(BP)(SI*1), X2, K1, X20 // 62e26d0940a435efffffff + VPMULLD Y11, Y26, K1, Y6 // 62d22d2140f3 + VPMULLD -7(CX)(DX*1), Y26, K1, Y6 // 62f22d2140b411f9ffffff + VPMULLD -15(R14)(R15*4), Y26, K1, Y6 // 62922d2140b4bef1ffffff + VPMULLD Z7, Z3, K1, Z8 // 6272654940c7 + VPMULLD Z9, Z3, K1, Z8 // 6252654940c1 + VPMULLD 15(R8), Z3, K1, Z8 // 6252654940800f000000 + VPMULLD (BP), Z3, K1, Z8 // 62726549404500 + VPMULLD Z7, Z27, K1, Z8 // 6272254140c7 + VPMULLD Z9, Z27, K1, Z8 // 6252254140c1 + VPMULLD 15(R8), Z27, K1, Z8 // 6252254140800f000000 + VPMULLD (BP), Z27, K1, Z8 // 62722541404500 + VPMULLD Z7, Z3, K1, Z2 // 62f2654940d7 + VPMULLD Z9, Z3, K1, Z2 // 62d2654940d1 + VPMULLD 15(R8), Z3, K1, Z2 // 62d2654940900f000000 + VPMULLD (BP), Z3, K1, Z2 // 62f26549405500 + VPMULLD Z7, Z27, K1, Z2 // 62f2254140d7 + VPMULLD Z9, Z27, K1, Z2 // 62d2254140d1 + VPMULLD 15(R8), Z27, K1, Z2 // 62d2254140900f000000 + VPMULLD (BP), Z27, K1, Z2 // 62f22541405500 + VPMULUDQ X16, X0, K6, X15 // 6231fd0ef4f8 + VPMULUDQ -17(BP)(SI*2), X0, K6, X15 // 6271fd0ef4bc75efffffff + VPMULUDQ 7(AX)(CX*2), X0, K6, X15 // 6271fd0ef4bc4807000000 + VPMULUDQ Y14, Y21, K3, Y7 // 62d1d523f4fe + VPMULUDQ 15(R8), Y21, K3, Y7 // 62d1d523f4b80f000000 + VPMULUDQ (BP), Y21, K3, Y7 // 62f1d523f47d00 + VPMULUDQ Z1, Z6, K7, Z6 // 62f1cd4ff4f1 + VPMULUDQ Z15, Z6, K7, Z6 // 62d1cd4ff4f7 + VPMULUDQ (SI), Z6, K7, Z6 // 62f1cd4ff436 + VPMULUDQ 7(SI)(DI*2), Z6, K7, Z6 // 62f1cd4ff4b47e07000000 + VPMULUDQ Z1, Z22, K7, Z6 // 62f1cd47f4f1 + VPMULUDQ Z15, Z22, K7, Z6 // 62d1cd47f4f7 + VPMULUDQ (SI), Z22, K7, Z6 // 62f1cd47f436 + VPMULUDQ 7(SI)(DI*2), Z22, K7, Z6 // 62f1cd47f4b47e07000000 + VPMULUDQ Z1, Z6, K7, Z16 // 62e1cd4ff4c1 + VPMULUDQ Z15, Z6, K7, Z16 // 62c1cd4ff4c7 + VPMULUDQ (SI), Z6, K7, Z16 // 62e1cd4ff406 + VPMULUDQ 7(SI)(DI*2), Z6, K7, Z16 // 62e1cd4ff4847e07000000 + VPMULUDQ Z1, Z22, K7, Z16 // 62e1cd47f4c1 + VPMULUDQ Z15, Z22, K7, Z16 // 62c1cd47f4c7 + VPMULUDQ (SI), Z22, K7, Z16 // 62e1cd47f406 + VPMULUDQ 7(SI)(DI*2), Z22, K7, Z16 // 62e1cd47f4847e07000000 + VPORD X7, X1, K2, X31 // 6261750aebff + VPORD 99(R15)(R15*2), X1, K2, X31 // 6201750aebbc7f63000000 + VPORD -7(DI), X1, K2, X31 // 6261750aebbff9ffffff + VPORD Y28, Y9, K1, Y20 // 62813529ebe4 + VPORD 17(SP)(BP*8), Y9, K1, Y20 // 62e13529eba4ec11000000 + VPORD 17(SP)(BP*4), Y9, K1, Y20 // 62e13529eba4ac11000000 + VPORD Z15, Z3, K2, Z14 // 6251654aebf7 + VPORD Z30, Z3, K2, Z14 // 6211654aebf6 + VPORD 99(R15)(R15*1), Z3, K2, Z14 // 6211654aebb43f63000000 + VPORD (DX), Z3, K2, Z14 // 6271654aeb32 + VPORD Z15, Z12, K2, Z14 // 62511d4aebf7 + VPORD Z30, Z12, K2, Z14 // 62111d4aebf6 + VPORD 99(R15)(R15*1), Z12, K2, Z14 // 62111d4aebb43f63000000 + VPORD (DX), Z12, K2, Z14 // 62711d4aeb32 + VPORD Z15, Z3, K2, Z28 // 6241654aebe7 + VPORD Z30, Z3, K2, Z28 // 6201654aebe6 + VPORD 99(R15)(R15*1), Z3, K2, Z28 // 6201654aeba43f63000000 + VPORD (DX), Z3, K2, Z28 // 6261654aeb22 + VPORD Z15, Z12, K2, Z28 // 62411d4aebe7 + VPORD Z30, Z12, K2, Z28 // 62011d4aebe6 + VPORD 99(R15)(R15*1), Z12, K2, Z28 // 62011d4aeba43f63000000 + VPORD (DX), Z12, K2, Z28 // 62611d4aeb22 + VPORQ X12, X15, K1, X9 // 62518509ebcc + VPORQ -7(CX)(DX*1), X15, K1, X9 // 62718509eb8c11f9ffffff + VPORQ -15(R14)(R15*4), X15, K1, X9 // 62118509eb8cbef1ffffff + VPORQ Y8, Y1, K7, Y28 // 6241f52febe0 + VPORQ 7(SI)(DI*4), Y1, K7, Y28 // 6261f52feba4be07000000 + VPORQ -7(DI)(R8*2), Y1, K7, Y28 // 6221f52feba447f9ffffff + VPORQ Z3, Z5, K1, Z19 // 62e1d549ebdb + VPORQ Z5, Z5, K1, Z19 // 62e1d549ebdd + VPORQ -17(BP)(SI*8), Z5, K1, Z19 // 62e1d549eb9cf5efffffff + VPORQ (R15), Z5, K1, Z19 // 62c1d549eb1f + VPORQ Z3, Z1, K1, Z19 // 62e1f549ebdb + VPORQ Z5, Z1, K1, Z19 // 62e1f549ebdd + VPORQ -17(BP)(SI*8), Z1, K1, Z19 // 62e1f549eb9cf5efffffff + VPORQ (R15), Z1, K1, Z19 // 62c1f549eb1f + VPORQ Z3, Z5, K1, Z15 // 6271d549ebfb + VPORQ Z5, Z5, K1, Z15 // 6271d549ebfd + VPORQ -17(BP)(SI*8), Z5, K1, Z15 // 6271d549ebbcf5efffffff + VPORQ (R15), Z5, K1, Z15 // 6251d549eb3f + VPORQ Z3, Z1, K1, Z15 // 6271f549ebfb + VPORQ Z5, Z1, K1, Z15 // 6271f549ebfd + VPORQ -17(BP)(SI*8), Z1, K1, Z15 // 6271f549ebbcf5efffffff + VPORQ (R15), Z1, K1, Z15 // 6251f549eb3f + VPROLD $121, X12, K1, X0 // 62d17d0972cc79 + VPROLD $121, 15(DX)(BX*1), K1, X0 // 62f17d09728c1a0f00000079 + VPROLD $121, -7(CX)(DX*2), K1, X0 // 62f17d09728c51f9ffffff79 + VPROLD $13, Y27, K1, Y11 // 6291252972cb0d + VPROLD $13, 17(SP), K1, Y11 // 62f12529728c24110000000d + VPROLD $13, -17(BP)(SI*4), K1, Y11 // 62f12529728cb5efffffff0d + VPROLD $65, Z21, K7, Z14 // 62b10d4f72cd41 + VPROLD $65, Z8, K7, Z14 // 62d10d4f72c841 + VPROLD $65, 7(SI)(DI*8), K7, Z14 // 62f10d4f728cfe0700000041 + VPROLD $65, -15(R14), K7, Z14 // 62d10d4f728ef1ffffff41 + VPROLD $65, Z21, K7, Z15 // 62b1054f72cd41 + VPROLD $65, Z8, K7, Z15 // 62d1054f72c841 + VPROLD $65, 7(SI)(DI*8), K7, Z15 // 62f1054f728cfe0700000041 + VPROLD $65, -15(R14), K7, Z15 // 62d1054f728ef1ffffff41 + VPROLQ $67, X5, K2, X14 // 62f18d0a72cd43 + VPROLQ $67, -17(BP), K2, X14 // 62f18d0a728defffffff43 + VPROLQ $67, -15(R14)(R15*8), K2, X14 // 62918d0a728cfef1ffffff43 + VPROLQ $127, Y16, K4, Y17 // 62b1f52472c87f + VPROLQ $127, 7(AX), K4, Y17 // 62f1f5247288070000007f + VPROLQ $127, (DI), K4, Y17 // 62f1f524720f7f + VPROLQ $0, Z20, K1, Z16 // 62b1fd4172cc00 + VPROLQ $0, Z0, K1, Z16 // 62f1fd4172c800 + VPROLQ $0, 7(SI)(DI*1), K1, Z16 // 62f1fd41728c3e0700000000 + VPROLQ $0, 15(DX)(BX*8), K1, Z16 // 62f1fd41728cda0f00000000 + VPROLQ $0, Z20, K1, Z9 // 62b1b54972cc00 + VPROLQ $0, Z0, K1, Z9 // 62f1b54972c800 + VPROLQ $0, 7(SI)(DI*1), K1, Z9 // 62f1b549728c3e0700000000 + VPROLQ $0, 15(DX)(BX*8), K1, Z9 // 62f1b549728cda0f00000000 + VPROLVD X8, X15, K3, X17 // 62c2050b15c8 + VPROLVD 17(SP)(BP*2), X15, K3, X17 // 62e2050b158c6c11000000 + VPROLVD -7(DI)(R8*4), X15, K3, X17 // 62a2050b158c87f9ffffff + VPROLVD Y26, Y6, K4, Y12 // 62124d2c15e2 + VPROLVD 99(R15)(R15*1), Y6, K4, Y12 // 62124d2c15a43f63000000 + VPROLVD (DX), Y6, K4, Y12 // 62724d2c1522 + VPROLVD Z0, Z0, K5, Z23 // 62e27d4d15f8 + VPROLVD Z25, Z0, K5, Z23 // 62827d4d15f9 + VPROLVD -7(DI)(R8*1), Z0, K5, Z23 // 62a27d4d15bc07f9ffffff + VPROLVD (SP), Z0, K5, Z23 // 62e27d4d153c24 + VPROLVD Z0, Z11, K5, Z23 // 62e2254d15f8 + VPROLVD Z25, Z11, K5, Z23 // 6282254d15f9 + VPROLVD -7(DI)(R8*1), Z11, K5, Z23 // 62a2254d15bc07f9ffffff + VPROLVD (SP), Z11, K5, Z23 // 62e2254d153c24 + VPROLVD Z0, Z0, K5, Z19 // 62e27d4d15d8 + VPROLVD Z25, Z0, K5, Z19 // 62827d4d15d9 + VPROLVD -7(DI)(R8*1), Z0, K5, Z19 // 62a27d4d159c07f9ffffff + VPROLVD (SP), Z0, K5, Z19 // 62e27d4d151c24 + VPROLVD Z0, Z11, K5, Z19 // 62e2254d15d8 + VPROLVD Z25, Z11, K5, Z19 // 6282254d15d9 + VPROLVD -7(DI)(R8*1), Z11, K5, Z19 // 62a2254d159c07f9ffffff + VPROLVD (SP), Z11, K5, Z19 // 62e2254d151c24 + VPROLVQ X23, X26, K7, X3 // 62b2ad0715df + VPROLVQ 15(R8), X26, K7, X3 // 62d2ad0715980f000000 + VPROLVQ (BP), X26, K7, X3 // 62f2ad07155d00 + VPROLVQ Y28, Y8, K7, Y3 // 6292bd2f15dc + VPROLVQ -17(BP)(SI*8), Y8, K7, Y3 // 62f2bd2f159cf5efffffff + VPROLVQ (R15), Y8, K7, Y3 // 62d2bd2f151f + VPROLVQ Z9, Z0, K6, Z24 // 6242fd4e15c1 + VPROLVQ Z3, Z0, K6, Z24 // 6262fd4e15c3 + VPROLVQ -7(CX), Z0, K6, Z24 // 6262fd4e1581f9ffffff + VPROLVQ 15(DX)(BX*4), Z0, K6, Z24 // 6262fd4e15849a0f000000 + VPROLVQ Z9, Z26, K6, Z24 // 6242ad4615c1 + VPROLVQ Z3, Z26, K6, Z24 // 6262ad4615c3 + VPROLVQ -7(CX), Z26, K6, Z24 // 6262ad461581f9ffffff + VPROLVQ 15(DX)(BX*4), Z26, K6, Z24 // 6262ad4615849a0f000000 + VPROLVQ Z9, Z0, K6, Z12 // 6252fd4e15e1 + VPROLVQ Z3, Z0, K6, Z12 // 6272fd4e15e3 + VPROLVQ -7(CX), Z0, K6, Z12 // 6272fd4e15a1f9ffffff + VPROLVQ 15(DX)(BX*4), Z0, K6, Z12 // 6272fd4e15a49a0f000000 + VPROLVQ Z9, Z26, K6, Z12 // 6252ad4615e1 + VPROLVQ Z3, Z26, K6, Z12 // 6272ad4615e3 + VPROLVQ -7(CX), Z26, K6, Z12 // 6272ad4615a1f9ffffff + VPROLVQ 15(DX)(BX*4), Z26, K6, Z12 // 6272ad4615a49a0f000000 + VPRORD $97, X28, K3, X13 // 6291150b72c461 + VPRORD $97, 15(R8)(R14*8), K3, X13 // 6291150b7284f00f00000061 + VPRORD $97, -15(R14)(R15*2), K3, X13 // 6291150b72847ef1ffffff61 + VPRORD $81, Y23, K7, Y1 // 62b1752f72c751 + VPRORD $81, 7(SI)(DI*8), K7, Y1 // 62f1752f7284fe0700000051 + VPRORD $81, -15(R14), K7, Y1 // 62d1752f7286f1ffffff51 + VPRORD $42, Z9, K4, Z9 // 62d1354c72c12a + VPRORD $42, Z28, K4, Z9 // 6291354c72c42a + VPRORD $42, 99(R15)(R15*8), K4, Z9 // 6291354c7284ff630000002a + VPRORD $42, 7(AX)(CX*8), K4, Z9 // 62f1354c7284c8070000002a + VPRORD $42, Z9, K4, Z25 // 62d1354472c12a + VPRORD $42, Z28, K4, Z25 // 6291354472c42a + VPRORD $42, 99(R15)(R15*8), K4, Z25 // 629135447284ff630000002a + VPRORD $42, 7(AX)(CX*8), K4, Z25 // 62f135447284c8070000002a + VPRORQ $79, X9, K4, X24 // 62d1bd0472c14f + VPRORQ $79, -15(R14)(R15*1), K4, X24 // 6291bd0472843ef1ffffff4f + VPRORQ $79, -15(BX), K4, X24 // 62f1bd047283f1ffffff4f + VPRORQ $64, Y31, K7, Y14 // 62918d2f72c740 + VPRORQ $64, 7(SI)(DI*1), K7, Y14 // 62f18d2f72843e0700000040 + VPRORQ $64, 15(DX)(BX*8), K7, Y14 // 62f18d2f7284da0f00000040 + VPRORQ $27, Z17, K2, Z20 // 62b1dd4272c11b + VPRORQ $27, Z0, K2, Z20 // 62f1dd4272c01b + VPRORQ $27, (AX), K2, Z20 // 62f1dd4272001b + VPRORQ $27, 7(SI), K2, Z20 // 62f1dd427286070000001b + VPRORQ $27, Z17, K2, Z0 // 62b1fd4a72c11b + VPRORQ $27, Z0, K2, Z0 // 62f1fd4a72c01b + VPRORQ $27, (AX), K2, Z0 // 62f1fd4a72001b + VPRORQ $27, 7(SI), K2, Z0 // 62f1fd4a7286070000001b + VPRORVD X18, X26, K5, X15 // 62322d0514fa + VPRORVD 7(AX)(CX*4), X26, K5, X15 // 62722d0514bc8807000000 + VPRORVD 7(AX)(CX*1), X26, K5, X15 // 62722d0514bc0807000000 + VPRORVD Y22, Y2, K3, Y25 // 62226d2b14ce + VPRORVD -7(DI)(R8*1), Y2, K3, Y25 // 62226d2b148c07f9ffffff + VPRORVD (SP), Y2, K3, Y25 // 62626d2b140c24 + VPRORVD Z21, Z31, K4, Z17 // 62a2054414cd + VPRORVD Z9, Z31, K4, Z17 // 62c2054414c9 + VPRORVD (BX), Z31, K4, Z17 // 62e20544140b + VPRORVD -17(BP)(SI*1), Z31, K4, Z17 // 62e20544148c35efffffff + VPRORVD Z21, Z0, K4, Z17 // 62a27d4c14cd + VPRORVD Z9, Z0, K4, Z17 // 62c27d4c14c9 + VPRORVD (BX), Z0, K4, Z17 // 62e27d4c140b + VPRORVD -17(BP)(SI*1), Z0, K4, Z17 // 62e27d4c148c35efffffff + VPRORVD Z21, Z31, K4, Z23 // 62a2054414fd + VPRORVD Z9, Z31, K4, Z23 // 62c2054414f9 + VPRORVD (BX), Z31, K4, Z23 // 62e20544143b + VPRORVD -17(BP)(SI*1), Z31, K4, Z23 // 62e2054414bc35efffffff + VPRORVD Z21, Z0, K4, Z23 // 62a27d4c14fd + VPRORVD Z9, Z0, K4, Z23 // 62c27d4c14f9 + VPRORVD (BX), Z0, K4, Z23 // 62e27d4c143b + VPRORVD -17(BP)(SI*1), Z0, K4, Z23 // 62e27d4c14bc35efffffff + VPRORVQ X11, X1, K2, X21 // 62c2f50a14eb + VPRORVQ (SI), X1, K2, X21 // 62e2f50a142e + VPRORVQ 7(SI)(DI*2), X1, K2, X21 // 62e2f50a14ac7e07000000 + VPRORVQ Y9, Y8, K2, Y27 // 6242bd2a14d9 + VPRORVQ -7(CX), Y8, K2, Y27 // 6262bd2a1499f9ffffff + VPRORVQ 15(DX)(BX*4), Y8, K2, Y27 // 6262bd2a149c9a0f000000 + VPRORVQ Z20, Z1, K3, Z6 // 62b2f54b14f4 + VPRORVQ Z9, Z1, K3, Z6 // 62d2f54b14f1 + VPRORVQ 15(R8)(R14*4), Z1, K3, Z6 // 6292f54b14b4b00f000000 + VPRORVQ -7(CX)(DX*4), Z1, K3, Z6 // 62f2f54b14b491f9ffffff + VPRORVQ Z20, Z9, K3, Z6 // 62b2b54b14f4 + VPRORVQ Z9, Z9, K3, Z6 // 62d2b54b14f1 + VPRORVQ 15(R8)(R14*4), Z9, K3, Z6 // 6292b54b14b4b00f000000 + VPRORVQ -7(CX)(DX*4), Z9, K3, Z6 // 62f2b54b14b491f9ffffff + VPRORVQ Z20, Z1, K3, Z9 // 6232f54b14cc + VPRORVQ Z9, Z1, K3, Z9 // 6252f54b14c9 + VPRORVQ 15(R8)(R14*4), Z1, K3, Z9 // 6212f54b148cb00f000000 + VPRORVQ -7(CX)(DX*4), Z1, K3, Z9 // 6272f54b148c91f9ffffff + VPRORVQ Z20, Z9, K3, Z9 // 6232b54b14cc + VPRORVQ Z9, Z9, K3, Z9 // 6252b54b14c9 + VPRORVQ 15(R8)(R14*4), Z9, K3, Z9 // 6212b54b148cb00f000000 + VPRORVQ -7(CX)(DX*4), Z9, K3, Z9 // 6272b54b148c91f9ffffff + VPSCATTERDD X0, K3, (AX)(X4*1) // 62f27d0ba00420 + VPSCATTERDD X0, K3, (BP)(X10*2) // 62b27d0ba0445500 + VPSCATTERDD X0, K3, (R10)(X29*8) // 62927d03a004ea + VPSCATTERDD Y1, K3, (R10)(Y29*8) // 62927d23a00cea + VPSCATTERDD Y1, K3, (SP)(Y4*2) // 62f27d2ba00c64 + VPSCATTERDD Y1, K3, (DX)(Y10*4) // 62b27d2ba00c92 + VPSCATTERDD Z16, K2, (DX)(Z10*4) // 62a27d4aa00492 + VPSCATTERDD Z25, K2, (DX)(Z10*4) // 62227d4aa00c92 + VPSCATTERDD Z16, K2, (AX)(Z4*1) // 62e27d4aa00420 + VPSCATTERDD Z25, K2, (AX)(Z4*1) // 62627d4aa00c20 + VPSCATTERDD Z16, K2, (SP)(Z4*2) // 62e27d4aa00464 + VPSCATTERDD Z25, K2, (SP)(Z4*2) // 62627d4aa00c64 + VPSCATTERDQ X0, K1, (DX)(X10*4) // 62b2fd09a00492 + VPSCATTERDQ X0, K1, (SP)(X4*2) // 62f2fd09a00464 + VPSCATTERDQ X0, K1, (R14)(X29*8) // 6292fd01a004ee + VPSCATTERDQ Y6, K2, (AX)(X4*1) // 62f2fd2aa03420 + VPSCATTERDQ Y6, K2, (BP)(X10*2) // 62b2fd2aa0745500 + VPSCATTERDQ Y6, K2, (R10)(X29*8) // 6292fd22a034ea + VPSCATTERDQ Z14, K1, (R14)(Y29*8) // 6212fd41a034ee + VPSCATTERDQ Z13, K1, (R14)(Y29*8) // 6212fd41a02cee + VPSCATTERDQ Z14, K1, (AX)(Y4*1) // 6272fd49a03420 + VPSCATTERDQ Z13, K1, (AX)(Y4*1) // 6272fd49a02c20 + VPSCATTERDQ Z14, K1, (BP)(Y10*2) // 6232fd49a0745500 + VPSCATTERDQ Z13, K1, (BP)(Y10*2) // 6232fd49a06c5500 + VPSCATTERQD X24, K7, (AX)(X4*1) // 62627d0fa10420 + VPSCATTERQD X24, K7, (BP)(X10*2) // 62227d0fa1445500 + VPSCATTERQD X24, K7, (R10)(X29*8) // 62027d07a104ea + VPSCATTERQD X20, K1, (R10)(Y29*8) // 62827d21a124ea + VPSCATTERQD X20, K1, (SP)(Y4*2) // 62e27d29a12464 + VPSCATTERQD X20, K1, (DX)(Y10*4) // 62a27d29a12492 + VPSCATTERQD Y1, K1, (DX)(Z10*4) // 62b27d49a10c92 + VPSCATTERQD Y1, K1, (AX)(Z4*1) // 62f27d49a10c20 + VPSCATTERQD Y1, K1, (SP)(Z4*2) // 62f27d49a10c64 + VPSCATTERQQ X7, K1, (DX)(X10*4) // 62b2fd09a13c92 + VPSCATTERQQ X7, K1, (SP)(X4*2) // 62f2fd09a13c64 + VPSCATTERQQ X7, K1, (R14)(X29*8) // 6292fd01a13cee + VPSCATTERQQ Y9, K7, (R14)(Y29*8) // 6212fd27a10cee + VPSCATTERQQ Y9, K7, (AX)(Y4*1) // 6272fd2fa10c20 + VPSCATTERQQ Y9, K7, (BP)(Y10*2) // 6232fd2fa14c5500 + VPSCATTERQQ Z12, K2, (BP)(Z10*2) // 6232fd4aa1645500 + VPSCATTERQQ Z13, K2, (BP)(Z10*2) // 6232fd4aa16c5500 + VPSCATTERQQ Z12, K2, (R10)(Z29*8) // 6212fd42a124ea + VPSCATTERQQ Z13, K2, (R10)(Z29*8) // 6212fd42a12cea + VPSCATTERQQ Z12, K2, (R14)(Z29*8) // 6212fd42a124ee + VPSCATTERQQ Z13, K2, (R14)(Z29*8) // 6212fd42a12cee + VPSHUFD $126, X2, K4, X9 // 62717d0c70ca7e + VPSHUFD $126, 17(SP)(BP*1), K4, X9 // 62717d0c708c2c110000007e + VPSHUFD $126, -7(CX)(DX*8), K4, X9 // 62717d0c708cd1f9ffffff7e + VPSHUFD $94, Y31, K4, Y6 // 62917d2c70f75e + VPSHUFD $94, 17(SP)(BP*2), K4, Y6 // 62f17d2c70b46c110000005e + VPSHUFD $94, -7(DI)(R8*4), K4, Y6 // 62b17d2c70b487f9ffffff5e + VPSHUFD $121, Z3, K7, Z8 // 62717d4f70c379 + VPSHUFD $121, Z27, K7, Z8 // 62117d4f70c379 + VPSHUFD $121, 7(AX)(CX*4), K7, Z8 // 62717d4f7084880700000079 + VPSHUFD $121, 7(AX)(CX*1), K7, Z8 // 62717d4f7084080700000079 + VPSHUFD $121, Z3, K7, Z2 // 62f17d4f70d379 + VPSHUFD $121, Z27, K7, Z2 // 62917d4f70d379 + VPSHUFD $121, 7(AX)(CX*4), K7, Z2 // 62f17d4f7094880700000079 + VPSHUFD $121, 7(AX)(CX*1), K7, Z2 // 62f17d4f7094080700000079 + VPSLLD $81, X0, K3, X14 // 62f10d0b72f051 + VPSLLD $81, (R14), K3, X14 // 62d10d0b723651 + VPSLLD $81, -7(DI)(R8*8), K3, X14 // 62b10d0b72b4c7f9ffffff51 + VPSLLD $42, Y0, K3, Y6 // 62f14d2b72f02a + VPSLLD $42, -15(R14)(R15*1), K3, Y6 // 62914d2b72b43ef1ffffff2a + VPSLLD $42, -15(BX), K3, Y6 // 62f14d2b72b3f1ffffff2a + VPSLLD $79, Z12, K3, Z9 // 62d1354b72f44f + VPSLLD $79, Z22, K3, Z9 // 62b1354b72f64f + VPSLLD $79, 7(SI)(DI*4), K3, Z9 // 62f1354b72b4be070000004f + VPSLLD $79, -7(DI)(R8*2), K3, Z9 // 62b1354b72b447f9ffffff4f + VPSLLD $79, Z12, K3, Z19 // 62d1654372f44f + VPSLLD $79, Z22, K3, Z19 // 62b1654372f64f + VPSLLD $79, 7(SI)(DI*4), K3, Z19 // 62f1654372b4be070000004f + VPSLLD $79, -7(DI)(R8*2), K3, Z19 // 62b1654372b447f9ffffff4f + VPSLLD X15, X7, K2, X17 // 62c1450af2cf + VPSLLD 99(R15)(R15*4), X7, K2, X17 // 6281450af28cbf63000000 + VPSLLD 15(DX), X7, K2, X17 // 62e1450af28a0f000000 + VPSLLD X11, Y5, K1, Y3 // 62d15529f2db + VPSLLD (CX), Y5, K1, Y3 // 62f15529f219 + VPSLLD 99(R15), Y5, K1, Y3 // 62d15529f29f63000000 + VPSLLD X0, Z18, K2, Z11 // 62716d42f2d8 + VPSLLD 99(R15)(R15*2), Z18, K2, Z11 // 62116d42f29c7f63000000 + VPSLLD -7(DI), Z18, K2, Z11 // 62716d42f29ff9ffffff + VPSLLD X0, Z24, K2, Z11 // 62713d42f2d8 + VPSLLD 99(R15)(R15*2), Z24, K2, Z11 // 62113d42f29c7f63000000 + VPSLLD -7(DI), Z24, K2, Z11 // 62713d42f29ff9ffffff + VPSLLD X0, Z18, K2, Z5 // 62f16d42f2e8 + VPSLLD 99(R15)(R15*2), Z18, K2, Z5 // 62916d42f2ac7f63000000 + VPSLLD -7(DI), Z18, K2, Z5 // 62f16d42f2aff9ffffff + VPSLLD X0, Z24, K2, Z5 // 62f13d42f2e8 + VPSLLD 99(R15)(R15*2), Z24, K2, Z5 // 62913d42f2ac7f63000000 + VPSLLD -7(DI), Z24, K2, Z5 // 62f13d42f2aff9ffffff + VPSLLQ $82, X25, K1, X27 // 6291a50173f152 + VPSLLQ $82, 15(DX)(BX*1), K1, X27 // 62f1a50173b41a0f00000052 + VPSLLQ $82, -7(CX)(DX*2), K1, X27 // 62f1a50173b451f9ffffff52 + VPSLLQ $126, Y5, K7, Y3 // 62f1e52f73f57e + VPSLLQ $126, (SI), K7, Y3 // 62f1e52f73367e + VPSLLQ $126, 7(SI)(DI*2), K7, Y3 // 62f1e52f73b47e070000007e + VPSLLQ $94, Z6, K1, Z6 // 62f1cd4973f65e + VPSLLQ $94, Z22, K1, Z6 // 62b1cd4973f65e + VPSLLQ $94, 7(AX), K1, Z6 // 62f1cd4973b0070000005e + VPSLLQ $94, (DI), K1, Z6 // 62f1cd4973375e + VPSLLQ $94, Z6, K1, Z16 // 62f1fd4173f65e + VPSLLQ $94, Z22, K1, Z16 // 62b1fd4173f65e + VPSLLQ $94, 7(AX), K1, Z16 // 62f1fd4173b0070000005e + VPSLLQ $94, (DI), K1, Z16 // 62f1fd4173375e + VPSLLQ X15, X18, K1, X3 // 62d1ed01f3df + VPSLLQ -17(BP), X18, K1, X3 // 62f1ed01f39defffffff + VPSLLQ -15(R14)(R15*8), X18, K1, X3 // 6291ed01f39cfef1ffffff + VPSLLQ X28, Y7, K1, Y28 // 6201c529f3e4 + VPSLLQ 17(SP)(BP*2), Y7, K1, Y28 // 6261c529f3a46c11000000 + VPSLLQ -7(DI)(R8*4), Y7, K1, Y28 // 6221c529f3a487f9ffffff + VPSLLQ X15, Z13, K7, Z1 // 62d1954ff3cf + VPSLLQ 15(R8), Z13, K7, Z1 // 62d1954ff3880f000000 + VPSLLQ (BP), Z13, K7, Z1 // 62f1954ff34d00 + VPSLLQ X15, Z13, K7, Z15 // 6251954ff3ff + VPSLLQ 15(R8), Z13, K7, Z15 // 6251954ff3b80f000000 + VPSLLQ (BP), Z13, K7, Z15 // 6271954ff37d00 + VPSLLVD X8, X13, K2, X7 // 62d2150a47f8 + VPSLLVD 15(R8)(R14*8), X13, K2, X7 // 6292150a47bcf00f000000 + VPSLLVD -15(R14)(R15*2), X13, K2, X7 // 6292150a47bc7ef1ffffff + VPSLLVD Y13, Y22, K4, Y0 // 62d24d2447c5 + VPSLLVD 17(SP)(BP*8), Y22, K4, Y0 // 62f24d244784ec11000000 + VPSLLVD 17(SP)(BP*4), Y22, K4, Y0 // 62f24d244784ac11000000 + VPSLLVD Z2, Z22, K1, Z18 // 62e24d4147d2 + VPSLLVD Z31, Z22, K1, Z18 // 62824d4147d7 + VPSLLVD 99(R15)(R15*1), Z22, K1, Z18 // 62824d4147943f63000000 + VPSLLVD (DX), Z22, K1, Z18 // 62e24d414712 + VPSLLVD Z2, Z7, K1, Z18 // 62e2454947d2 + VPSLLVD Z31, Z7, K1, Z18 // 6282454947d7 + VPSLLVD 99(R15)(R15*1), Z7, K1, Z18 // 6282454947943f63000000 + VPSLLVD (DX), Z7, K1, Z18 // 62e245494712 + VPSLLVD Z2, Z22, K1, Z8 // 62724d4147c2 + VPSLLVD Z31, Z22, K1, Z8 // 62124d4147c7 + VPSLLVD 99(R15)(R15*1), Z22, K1, Z8 // 62124d4147843f63000000 + VPSLLVD (DX), Z22, K1, Z8 // 62724d414702 + VPSLLVD Z2, Z7, K1, Z8 // 6272454947c2 + VPSLLVD Z31, Z7, K1, Z8 // 6212454947c7 + VPSLLVD 99(R15)(R15*1), Z7, K1, Z8 // 6212454947843f63000000 + VPSLLVD (DX), Z7, K1, Z8 // 627245494702 + VPSLLVQ X0, X7, K3, X24 // 6262c50b47c0 + VPSLLVQ -15(R14)(R15*1), X7, K3, X24 // 6202c50b47843ef1ffffff + VPSLLVQ -15(BX), X7, K3, X24 // 6262c50b4783f1ffffff + VPSLLVQ Y14, Y1, K4, Y12 // 6252f52c47e6 + VPSLLVQ 7(SI)(DI*4), Y1, K4, Y12 // 6272f52c47a4be07000000 + VPSLLVQ -7(DI)(R8*2), Y1, K4, Y12 // 6232f52c47a447f9ffffff + VPSLLVQ Z12, Z1, K5, Z20 // 62c2f54d47e4 + VPSLLVQ Z16, Z1, K5, Z20 // 62a2f54d47e0 + VPSLLVQ -17(BP)(SI*8), Z1, K5, Z20 // 62e2f54d47a4f5efffffff + VPSLLVQ (R15), Z1, K5, Z20 // 62c2f54d4727 + VPSLLVQ Z12, Z3, K5, Z20 // 62c2e54d47e4 + VPSLLVQ Z16, Z3, K5, Z20 // 62a2e54d47e0 + VPSLLVQ -17(BP)(SI*8), Z3, K5, Z20 // 62e2e54d47a4f5efffffff + VPSLLVQ (R15), Z3, K5, Z20 // 62c2e54d4727 + VPSLLVQ Z12, Z1, K5, Z9 // 6252f54d47cc + VPSLLVQ Z16, Z1, K5, Z9 // 6232f54d47c8 + VPSLLVQ -17(BP)(SI*8), Z1, K5, Z9 // 6272f54d478cf5efffffff + VPSLLVQ (R15), Z1, K5, Z9 // 6252f54d470f + VPSLLVQ Z12, Z3, K5, Z9 // 6252e54d47cc + VPSLLVQ Z16, Z3, K5, Z9 // 6232e54d47c8 + VPSLLVQ -17(BP)(SI*8), Z3, K5, Z9 // 6272e54d478cf5efffffff + VPSLLVQ (R15), Z3, K5, Z9 // 6252e54d470f + VPSRAD $67, X7, K5, X24 // 62f13d0572e743 + VPSRAD $67, 7(AX), K5, X24 // 62f13d0572a00700000043 + VPSRAD $67, (DI), K5, X24 // 62f13d05722743 + VPSRAD $127, Y7, K3, Y13 // 62f1152b72e77f + VPSRAD $127, 99(R15)(R15*1), K3, Y13 // 6291152b72a43f630000007f + VPSRAD $127, (DX), K3, Y13 // 62f1152b72227f + VPSRAD $0, Z21, K4, Z14 // 62b10d4c72e500 + VPSRAD $0, Z8, K4, Z14 // 62d10d4c72e000 + VPSRAD $0, -7(DI)(R8*1), K4, Z14 // 62b10d4c72a407f9ffffff00 + VPSRAD $0, (SP), K4, Z14 // 62f10d4c72242400 + VPSRAD $0, Z21, K4, Z15 // 62b1054c72e500 + VPSRAD $0, Z8, K4, Z15 // 62d1054c72e000 + VPSRAD $0, -7(DI)(R8*1), K4, Z15 // 62b1054c72a407f9ffffff00 + VPSRAD $0, (SP), K4, Z15 // 62f1054c72242400 + VPSRAD X12, X16, K2, X20 // 62c17d02e2e4 + VPSRAD 99(R15)(R15*1), X16, K2, X20 // 62817d02e2a43f63000000 + VPSRAD (DX), X16, K2, X20 // 62e17d02e222 + VPSRAD X6, Y21, K2, Y2 // 62f15522e2d6 + VPSRAD -17(BP)(SI*8), Y21, K2, Y2 // 62f15522e294f5efffffff + VPSRAD (R15), Y21, K2, Y2 // 62d15522e217 + VPSRAD X17, Z20, K3, Z16 // 62a15d43e2c1 + VPSRAD 7(SI)(DI*8), Z20, K3, Z16 // 62e15d43e284fe07000000 + VPSRAD -15(R14), Z20, K3, Z16 // 62c15d43e286f1ffffff + VPSRAD X17, Z0, K3, Z16 // 62a17d4be2c1 + VPSRAD 7(SI)(DI*8), Z0, K3, Z16 // 62e17d4be284fe07000000 + VPSRAD -15(R14), Z0, K3, Z16 // 62c17d4be286f1ffffff + VPSRAD X17, Z20, K3, Z9 // 62315d43e2c9 + VPSRAD 7(SI)(DI*8), Z20, K3, Z9 // 62715d43e28cfe07000000 + VPSRAD -15(R14), Z20, K3, Z9 // 62515d43e28ef1ffffff + VPSRAD X17, Z0, K3, Z9 // 62317d4be2c9 + VPSRAD 7(SI)(DI*8), Z0, K3, Z9 // 62717d4be28cfe07000000 + VPSRAD -15(R14), Z0, K3, Z9 // 62517d4be28ef1ffffff + VPSRAQ $97, X6, K3, X28 // 62f19d0372e661 + VPSRAQ $97, 7(SI)(DI*1), K3, X28 // 62f19d0372a43e0700000061 + VPSRAQ $97, 15(DX)(BX*8), K3, X28 // 62f19d0372a4da0f00000061 + VPSRAQ $81, Y9, K3, Y12 // 62d19d2b72e151 + VPSRAQ $81, -17(BP)(SI*8), K3, Y12 // 62f19d2b72a4f5efffffff51 + VPSRAQ $81, (R15), K3, Y12 // 62d19d2b722751 + VPSRAQ $42, Z0, K2, Z23 // 62f1c54272e02a + VPSRAQ $42, Z11, K2, Z23 // 62d1c54272e32a + VPSRAQ $42, -7(CX), K2, Z23 // 62f1c54272a1f9ffffff2a + VPSRAQ $42, 15(DX)(BX*4), K2, Z23 // 62f1c54272a49a0f0000002a + VPSRAQ $42, Z0, K2, Z19 // 62f1e54272e02a + VPSRAQ $42, Z11, K2, Z19 // 62d1e54272e32a + VPSRAQ $42, -7(CX), K2, Z19 // 62f1e54272a1f9ffffff2a + VPSRAQ $42, 15(DX)(BX*4), K2, Z19 // 62f1e54272a49a0f0000002a + VPSRAQ X8, X8, K1, X1 // 62d1bd09e2c8 + VPSRAQ -7(DI)(R8*1), X8, K1, X1 // 62b1bd09e28c07f9ffffff + VPSRAQ (SP), X8, K1, X1 // 62f1bd09e20c24 + VPSRAQ X6, Y9, K2, Y1 // 62f1b52ae2ce + VPSRAQ -7(CX), Y9, K2, Y1 // 62f1b52ae289f9ffffff + VPSRAQ 15(DX)(BX*4), Y9, K2, Y1 // 62f1b52ae28c9a0f000000 + VPSRAQ X0, Z24, K1, Z0 // 62f1bd41e2c0 + VPSRAQ 99(R15)(R15*8), Z24, K1, Z0 // 6291bd41e284ff63000000 + VPSRAQ 7(AX)(CX*8), Z24, K1, Z0 // 62f1bd41e284c807000000 + VPSRAQ X0, Z12, K1, Z0 // 62f19d49e2c0 + VPSRAQ 99(R15)(R15*8), Z12, K1, Z0 // 62919d49e284ff63000000 + VPSRAQ 7(AX)(CX*8), Z12, K1, Z0 // 62f19d49e284c807000000 + VPSRAQ X0, Z24, K1, Z25 // 6261bd41e2c8 + VPSRAQ 99(R15)(R15*8), Z24, K1, Z25 // 6201bd41e28cff63000000 + VPSRAQ 7(AX)(CX*8), Z24, K1, Z25 // 6261bd41e28cc807000000 + VPSRAQ X0, Z12, K1, Z25 // 62619d49e2c8 + VPSRAQ 99(R15)(R15*8), Z12, K1, Z25 // 62019d49e28cff63000000 + VPSRAQ 7(AX)(CX*8), Z12, K1, Z25 // 62619d49e28cc807000000 + VPSRAVD X6, X16, K7, X11 // 62727d0746de + VPSRAVD (AX), X16, K7, X11 // 62727d074618 + VPSRAVD 7(SI), X16, K7, X11 // 62727d07469e07000000 + VPSRAVD Y9, Y2, K1, Y3 // 62d26d2946d9 + VPSRAVD 7(SI)(DI*8), Y2, K1, Y3 // 62f26d29469cfe07000000 + VPSRAVD -15(R14), Y2, K1, Y3 // 62d26d29469ef1ffffff + VPSRAVD Z9, Z9, K1, Z0 // 62d2354946c1 + VPSRAVD Z25, Z9, K1, Z0 // 6292354946c1 + VPSRAVD 99(R15)(R15*8), Z9, K1, Z0 // 629235494684ff63000000 + VPSRAVD 7(AX)(CX*8), Z9, K1, Z0 // 62f235494684c807000000 + VPSRAVD Z9, Z3, K1, Z0 // 62d2654946c1 + VPSRAVD Z25, Z3, K1, Z0 // 6292654946c1 + VPSRAVD 99(R15)(R15*8), Z3, K1, Z0 // 629265494684ff63000000 + VPSRAVD 7(AX)(CX*8), Z3, K1, Z0 // 62f265494684c807000000 + VPSRAVD Z9, Z9, K1, Z26 // 6242354946d1 + VPSRAVD Z25, Z9, K1, Z26 // 6202354946d1 + VPSRAVD 99(R15)(R15*8), Z9, K1, Z26 // 620235494694ff63000000 + VPSRAVD 7(AX)(CX*8), Z9, K1, Z26 // 626235494694c807000000 + VPSRAVD Z9, Z3, K1, Z26 // 6242654946d1 + VPSRAVD Z25, Z3, K1, Z26 // 6202654946d1 + VPSRAVD 99(R15)(R15*8), Z3, K1, Z26 // 620265494694ff63000000 + VPSRAVD 7(AX)(CX*8), Z3, K1, Z26 // 626265494694c807000000 + VPSRAVQ X12, X22, K1, X6 // 62d2cd0146f4 + VPSRAVQ (BX), X22, K1, X6 // 62f2cd014633 + VPSRAVQ -17(BP)(SI*1), X22, K1, X6 // 62f2cd0146b435efffffff + VPSRAVQ Y14, Y21, K7, Y12 // 6252d52746e6 + VPSRAVQ 7(SI)(DI*1), Y21, K7, Y12 // 6272d52746a43e07000000 + VPSRAVQ 15(DX)(BX*8), Y21, K7, Y12 // 6272d52746a4da0f000000 + VPSRAVQ Z17, Z20, K2, Z9 // 6232dd4246c9 + VPSRAVQ Z0, Z20, K2, Z9 // 6272dd4246c8 + VPSRAVQ (AX), Z20, K2, Z9 // 6272dd424608 + VPSRAVQ 7(SI), Z20, K2, Z9 // 6272dd42468e07000000 + VPSRAVQ Z17, Z0, K2, Z9 // 6232fd4a46c9 + VPSRAVQ Z0, Z0, K2, Z9 // 6272fd4a46c8 + VPSRAVQ (AX), Z0, K2, Z9 // 6272fd4a4608 + VPSRAVQ 7(SI), Z0, K2, Z9 // 6272fd4a468e07000000 + VPSRAVQ Z17, Z20, K2, Z28 // 6222dd4246e1 + VPSRAVQ Z0, Z20, K2, Z28 // 6262dd4246e0 + VPSRAVQ (AX), Z20, K2, Z28 // 6262dd424620 + VPSRAVQ 7(SI), Z20, K2, Z28 // 6262dd4246a607000000 + VPSRAVQ Z17, Z0, K2, Z28 // 6222fd4a46e1 + VPSRAVQ Z0, Z0, K2, Z28 // 6262fd4a46e0 + VPSRAVQ (AX), Z0, K2, Z28 // 6262fd4a4620 + VPSRAVQ 7(SI), Z0, K2, Z28 // 6262fd4a46a607000000 + VPSRLD $47, X0, K7, X0 // 62f17d0f72d02f + VPSRLD $47, (R14), K7, X0 // 62d17d0f72162f + VPSRLD $47, -7(DI)(R8*8), K7, X0 // 62b17d0f7294c7f9ffffff2f + VPSRLD $82, Y6, K4, Y22 // 62f14d2472d652 + VPSRLD $82, 99(R15)(R15*8), K4, Y22 // 62914d247294ff6300000052 + VPSRLD $82, 7(AX)(CX*8), K4, Y22 // 62f14d247294c80700000052 + VPSRLD $126, Z7, K4, Z26 // 62f12d4472d77e + VPSRLD $126, Z21, K4, Z26 // 62b12d4472d57e + VPSRLD $126, (R8), K4, Z26 // 62d12d4472107e + VPSRLD $126, 15(DX)(BX*2), K4, Z26 // 62f12d4472945a0f0000007e + VPSRLD $126, Z7, K4, Z22 // 62f14d4472d77e + VPSRLD $126, Z21, K4, Z22 // 62b14d4472d57e + VPSRLD $126, (R8), K4, Z22 // 62d14d4472107e + VPSRLD $126, 15(DX)(BX*2), K4, Z22 // 62f14d4472945a0f0000007e + VPSRLD X17, X11, K7, X25 // 6221250fd2c9 + VPSRLD 99(R15)(R15*4), X11, K7, X25 // 6201250fd28cbf63000000 + VPSRLD 15(DX), X11, K7, X25 // 6261250fd28a0f000000 + VPSRLD X18, Y7, K2, Y21 // 62a1452ad2ea + VPSRLD (CX), Y7, K2, Y21 // 62e1452ad229 + VPSRLD 99(R15), Y7, K2, Y21 // 62c1452ad2af63000000 + VPSRLD X11, Z14, K5, Z16 // 62c10d4dd2c3 + VPSRLD 99(R15)(R15*2), Z14, K5, Z16 // 62810d4dd2847f63000000 + VPSRLD -7(DI), Z14, K5, Z16 // 62e10d4dd287f9ffffff + VPSRLD X11, Z13, K5, Z16 // 62c1154dd2c3 + VPSRLD 99(R15)(R15*2), Z13, K5, Z16 // 6281154dd2847f63000000 + VPSRLD -7(DI), Z13, K5, Z16 // 62e1154dd287f9ffffff + VPSRLD X11, Z14, K5, Z25 // 62410d4dd2cb + VPSRLD 99(R15)(R15*2), Z14, K5, Z25 // 62010d4dd28c7f63000000 + VPSRLD -7(DI), Z14, K5, Z25 // 62610d4dd28ff9ffffff + VPSRLD X11, Z13, K5, Z25 // 6241154dd2cb + VPSRLD 99(R15)(R15*2), Z13, K5, Z25 // 6201154dd28c7f63000000 + VPSRLD -7(DI), Z13, K5, Z25 // 6261154dd28ff9ffffff + VPSRLQ $65, X2, K3, X24 // 62f1bd0373d241 + VPSRLQ $65, 15(DX)(BX*1), K3, X24 // 62f1bd0373941a0f00000041 + VPSRLQ $65, -7(CX)(DX*2), K3, X24 // 62f1bd03739451f9ffffff41 + VPSRLQ $67, Y14, K4, Y20 // 62d1dd2473d643 + VPSRLQ $67, (BX), K4, Y20 // 62f1dd24731343 + VPSRLQ $67, -17(BP)(SI*1), K4, Y20 // 62f1dd24739435efffffff43 + VPSRLQ $127, Z27, K2, Z2 // 6291ed4a73d37f + VPSRLQ $127, Z25, K2, Z2 // 6291ed4a73d17f + VPSRLQ $127, -17(BP)(SI*2), K2, Z2 // 62f1ed4a739475efffffff7f + VPSRLQ $127, 7(AX)(CX*2), K2, Z2 // 62f1ed4a739448070000007f + VPSRLQ $127, Z27, K2, Z7 // 6291c54a73d37f + VPSRLQ $127, Z25, K2, Z7 // 6291c54a73d17f + VPSRLQ $127, -17(BP)(SI*2), K2, Z7 // 62f1c54a739475efffffff7f + VPSRLQ $127, 7(AX)(CX*2), K2, Z7 // 62f1c54a739448070000007f + VPSRLQ X26, X27, K2, X2 // 6291a502d3d2 + VPSRLQ -17(BP), X27, K2, X2 // 62f1a502d395efffffff + VPSRLQ -15(R14)(R15*8), X27, K2, X2 // 6291a502d394fef1ffffff + VPSRLQ X22, Y13, K3, Y24 // 6221952bd3c6 + VPSRLQ 17(SP)(BP*2), Y13, K3, Y24 // 6261952bd3846c11000000 + VPSRLQ -7(DI)(R8*4), Y13, K3, Y24 // 6221952bd38487f9ffffff + VPSRLQ X30, Z27, K3, Z23 // 6281a543d3fe + VPSRLQ 15(R8), Z27, K3, Z23 // 62c1a543d3b80f000000 + VPSRLQ (BP), Z27, K3, Z23 // 62e1a543d37d00 + VPSRLQ X30, Z14, K3, Z23 // 62818d4bd3fe + VPSRLQ 15(R8), Z14, K3, Z23 // 62c18d4bd3b80f000000 + VPSRLQ (BP), Z14, K3, Z23 // 62e18d4bd37d00 + VPSRLQ X30, Z27, K3, Z9 // 6211a543d3ce + VPSRLQ 15(R8), Z27, K3, Z9 // 6251a543d3880f000000 + VPSRLQ (BP), Z27, K3, Z9 // 6271a543d34d00 + VPSRLQ X30, Z14, K3, Z9 // 62118d4bd3ce + VPSRLQ 15(R8), Z14, K3, Z9 // 62518d4bd3880f000000 + VPSRLQ (BP), Z14, K3, Z9 // 62718d4bd34d00 + VPSRLVD X15, X11, K3, X3 // 62d2250b45df + VPSRLVD 15(R8)(R14*8), X11, K3, X3 // 6292250b459cf00f000000 + VPSRLVD -15(R14)(R15*2), X11, K3, X3 // 6292250b459c7ef1ffffff + VPSRLVD Y21, Y14, K2, Y20 // 62a20d2a45e5 + VPSRLVD 15(R8)(R14*4), Y14, K2, Y20 // 62820d2a45a4b00f000000 + VPSRLVD -7(CX)(DX*4), Y14, K2, Y20 // 62e20d2a45a491f9ffffff + VPSRLVD Z8, Z14, K1, Z3 // 62d20d4945d8 + VPSRLVD Z24, Z14, K1, Z3 // 62920d4945d8 + VPSRLVD 15(R8)(R14*1), Z14, K1, Z3 // 62920d49459c300f000000 + VPSRLVD 15(R8)(R14*2), Z14, K1, Z3 // 62920d49459c700f000000 + VPSRLVD Z8, Z7, K1, Z3 // 62d2454945d8 + VPSRLVD Z24, Z7, K1, Z3 // 6292454945d8 + VPSRLVD 15(R8)(R14*1), Z7, K1, Z3 // 62924549459c300f000000 + VPSRLVD 15(R8)(R14*2), Z7, K1, Z3 // 62924549459c700f000000 + VPSRLVD Z8, Z14, K1, Z0 // 62d20d4945c0 + VPSRLVD Z24, Z14, K1, Z0 // 62920d4945c0 + VPSRLVD 15(R8)(R14*1), Z14, K1, Z0 // 62920d494584300f000000 + VPSRLVD 15(R8)(R14*2), Z14, K1, Z0 // 62920d494584700f000000 + VPSRLVD Z8, Z7, K1, Z0 // 62d2454945c0 + VPSRLVD Z24, Z7, K1, Z0 // 6292454945c0 + VPSRLVD 15(R8)(R14*1), Z7, K1, Z0 // 629245494584300f000000 + VPSRLVD 15(R8)(R14*2), Z7, K1, Z0 // 629245494584700f000000 + VPSRLVQ X6, X13, K2, X30 // 6262950a45f6 + VPSRLVQ -15(R14)(R15*1), X13, K2, X30 // 6202950a45b43ef1ffffff + VPSRLVQ -15(BX), X13, K2, X30 // 6262950a45b3f1ffffff + VPSRLVQ Y30, Y26, K1, Y1 // 6292ad2145ce + VPSRLVQ (R8), Y26, K1, Y1 // 62d2ad214508 + VPSRLVQ 15(DX)(BX*2), Y26, K1, Y1 // 62f2ad21458c5a0f000000 + VPSRLVQ Z6, Z1, K7, Z22 // 62e2f54f45f6 + VPSRLVQ Z2, Z1, K7, Z22 // 62e2f54f45f2 + VPSRLVQ (R14), Z1, K7, Z22 // 62c2f54f4536 + VPSRLVQ -7(DI)(R8*8), Z1, K7, Z22 // 62a2f54f45b4c7f9ffffff + VPSRLVQ Z6, Z16, K7, Z22 // 62e2fd4745f6 + VPSRLVQ Z2, Z16, K7, Z22 // 62e2fd4745f2 + VPSRLVQ (R14), Z16, K7, Z22 // 62c2fd474536 + VPSRLVQ -7(DI)(R8*8), Z16, K7, Z22 // 62a2fd4745b4c7f9ffffff + VPSRLVQ Z6, Z1, K7, Z25 // 6262f54f45ce + VPSRLVQ Z2, Z1, K7, Z25 // 6262f54f45ca + VPSRLVQ (R14), Z1, K7, Z25 // 6242f54f450e + VPSRLVQ -7(DI)(R8*8), Z1, K7, Z25 // 6222f54f458cc7f9ffffff + VPSRLVQ Z6, Z16, K7, Z25 // 6262fd4745ce + VPSRLVQ Z2, Z16, K7, Z25 // 6262fd4745ca + VPSRLVQ (R14), Z16, K7, Z25 // 6242fd47450e + VPSRLVQ -7(DI)(R8*8), Z16, K7, Z25 // 6222fd47458cc7f9ffffff + VPSUBD X0, X1, K6, X8 // 6271750efac0 + VPSUBD 99(R15)(R15*1), X1, K6, X8 // 6211750efa843f63000000 + VPSUBD (DX), X1, K6, X8 // 6271750efa02 + VPSUBD Y30, Y7, K3, Y21 // 6281452bfaee + VPSUBD (R14), Y7, K3, Y21 // 62c1452bfa2e + VPSUBD -7(DI)(R8*8), Y7, K3, Y21 // 62a1452bfaacc7f9ffffff + VPSUBD Z3, Z26, K7, Z13 // 62712d47faeb + VPSUBD Z0, Z26, K7, Z13 // 62712d47fae8 + VPSUBD -7(CX)(DX*1), Z26, K7, Z13 // 62712d47faac11f9ffffff + VPSUBD -15(R14)(R15*4), Z26, K7, Z13 // 62112d47faacbef1ffffff + VPSUBD Z3, Z3, K7, Z13 // 6271654ffaeb + VPSUBD Z0, Z3, K7, Z13 // 6271654ffae8 + VPSUBD -7(CX)(DX*1), Z3, K7, Z13 // 6271654ffaac11f9ffffff + VPSUBD -15(R14)(R15*4), Z3, K7, Z13 // 6211654ffaacbef1ffffff + VPSUBD Z3, Z26, K7, Z21 // 62e12d47faeb + VPSUBD Z0, Z26, K7, Z21 // 62e12d47fae8 + VPSUBD -7(CX)(DX*1), Z26, K7, Z21 // 62e12d47faac11f9ffffff + VPSUBD -15(R14)(R15*4), Z26, K7, Z21 // 62812d47faacbef1ffffff + VPSUBD Z3, Z3, K7, Z21 // 62e1654ffaeb + VPSUBD Z0, Z3, K7, Z21 // 62e1654ffae8 + VPSUBD -7(CX)(DX*1), Z3, K7, Z21 // 62e1654ffaac11f9ffffff + VPSUBD -15(R14)(R15*4), Z3, K7, Z21 // 6281654ffaacbef1ffffff + VPSUBQ X16, X0, K4, X15 // 6231fd0cfbf8 + VPSUBQ -17(BP)(SI*8), X0, K4, X15 // 6271fd0cfbbcf5efffffff + VPSUBQ (R15), X0, K4, X15 // 6251fd0cfb3f + VPSUBQ Y24, Y18, K4, Y13 // 6211ed24fbe8 + VPSUBQ 99(R15)(R15*4), Y18, K4, Y13 // 6211ed24fbacbf63000000 + VPSUBQ 15(DX), Y18, K4, Y13 // 6271ed24fbaa0f000000 + VPSUBQ Z3, Z11, K7, Z21 // 62e1a54ffbeb + VPSUBQ Z12, Z11, K7, Z21 // 62c1a54ffbec + VPSUBQ 15(DX)(BX*1), Z11, K7, Z21 // 62e1a54ffbac1a0f000000 + VPSUBQ -7(CX)(DX*2), Z11, K7, Z21 // 62e1a54ffbac51f9ffffff + VPSUBQ Z3, Z25, K7, Z21 // 62e1b547fbeb + VPSUBQ Z12, Z25, K7, Z21 // 62c1b547fbec + VPSUBQ 15(DX)(BX*1), Z25, K7, Z21 // 62e1b547fbac1a0f000000 + VPSUBQ -7(CX)(DX*2), Z25, K7, Z21 // 62e1b547fbac51f9ffffff + VPSUBQ Z3, Z11, K7, Z13 // 6271a54ffbeb + VPSUBQ Z12, Z11, K7, Z13 // 6251a54ffbec + VPSUBQ 15(DX)(BX*1), Z11, K7, Z13 // 6271a54ffbac1a0f000000 + VPSUBQ -7(CX)(DX*2), Z11, K7, Z13 // 6271a54ffbac51f9ffffff + VPSUBQ Z3, Z25, K7, Z13 // 6271b547fbeb + VPSUBQ Z12, Z25, K7, Z13 // 6251b547fbec + VPSUBQ 15(DX)(BX*1), Z25, K7, Z13 // 6271b547fbac1a0f000000 + VPSUBQ -7(CX)(DX*2), Z25, K7, Z13 // 6271b547fbac51f9ffffff + VPTERNLOGD $42, X5, X14, K1, X12 // 62730d0925e52a + VPTERNLOGD $42, (AX), X14, K1, X12 // 62730d0925202a + VPTERNLOGD $42, 7(SI), X14, K1, X12 // 62730d0925a6070000002a + VPTERNLOGD $79, Y12, Y26, K1, Y11 // 62532d2125dc4f + VPTERNLOGD $79, 17(SP)(BP*2), Y26, K1, Y11 // 62732d21259c6c110000004f + VPTERNLOGD $79, -7(DI)(R8*4), Y26, K1, Y11 // 62332d21259c87f9ffffff4f + VPTERNLOGD $64, Z0, Z7, K7, Z3 // 62f3454f25d840 + VPTERNLOGD $64, Z6, Z7, K7, Z3 // 62f3454f25de40 + VPTERNLOGD $64, 7(AX)(CX*4), Z7, K7, Z3 // 62f3454f259c880700000040 + VPTERNLOGD $64, 7(AX)(CX*1), Z7, K7, Z3 // 62f3454f259c080700000040 + VPTERNLOGD $64, Z0, Z9, K7, Z3 // 62f3354f25d840 + VPTERNLOGD $64, Z6, Z9, K7, Z3 // 62f3354f25de40 + VPTERNLOGD $64, 7(AX)(CX*4), Z9, K7, Z3 // 62f3354f259c880700000040 + VPTERNLOGD $64, 7(AX)(CX*1), Z9, K7, Z3 // 62f3354f259c080700000040 + VPTERNLOGD $64, Z0, Z7, K7, Z27 // 6263454f25d840 + VPTERNLOGD $64, Z6, Z7, K7, Z27 // 6263454f25de40 + VPTERNLOGD $64, 7(AX)(CX*4), Z7, K7, Z27 // 6263454f259c880700000040 + VPTERNLOGD $64, 7(AX)(CX*1), Z7, K7, Z27 // 6263454f259c080700000040 + VPTERNLOGD $64, Z0, Z9, K7, Z27 // 6263354f25d840 + VPTERNLOGD $64, Z6, Z9, K7, Z27 // 6263354f25de40 + VPTERNLOGD $64, 7(AX)(CX*4), Z9, K7, Z27 // 6263354f259c880700000040 + VPTERNLOGD $64, 7(AX)(CX*1), Z9, K7, Z27 // 6263354f259c080700000040 + VPTERNLOGQ $27, X8, X15, K2, X17 // 62c3850a25c81b + VPTERNLOGQ $27, (BX), X15, K2, X17 // 62e3850a250b1b + VPTERNLOGQ $27, -17(BP)(SI*1), X15, K2, X17 // 62e3850a258c35efffffff1b + VPTERNLOGQ $47, Y31, Y18, K4, Y14 // 6213ed2425f72f + VPTERNLOGQ $47, 15(R8), Y18, K4, Y14 // 6253ed2425b00f0000002f + VPTERNLOGQ $47, (BP), Y18, K4, Y14 // 6273ed242575002f + VPTERNLOGQ $82, Z9, Z3, K1, Z20 // 62c3e54925e152 + VPTERNLOGQ $82, Z19, Z3, K1, Z20 // 62a3e54925e352 + VPTERNLOGQ $82, (SI), Z3, K1, Z20 // 62e3e549252652 + VPTERNLOGQ $82, 7(SI)(DI*2), Z3, K1, Z20 // 62e3e54925a47e0700000052 + VPTERNLOGQ $82, Z9, Z30, K1, Z20 // 62c38d4125e152 + VPTERNLOGQ $82, Z19, Z30, K1, Z20 // 62a38d4125e352 + VPTERNLOGQ $82, (SI), Z30, K1, Z20 // 62e38d41252652 + VPTERNLOGQ $82, 7(SI)(DI*2), Z30, K1, Z20 // 62e38d4125a47e0700000052 + VPTERNLOGQ $82, Z9, Z3, K1, Z28 // 6243e54925e152 + VPTERNLOGQ $82, Z19, Z3, K1, Z28 // 6223e54925e352 + VPTERNLOGQ $82, (SI), Z3, K1, Z28 // 6263e549252652 + VPTERNLOGQ $82, 7(SI)(DI*2), Z3, K1, Z28 // 6263e54925a47e0700000052 + VPTERNLOGQ $82, Z9, Z30, K1, Z28 // 62438d4125e152 + VPTERNLOGQ $82, Z19, Z30, K1, Z28 // 62238d4125e352 + VPTERNLOGQ $82, (SI), Z30, K1, Z28 // 62638d41252652 + VPTERNLOGQ $82, 7(SI)(DI*2), Z30, K1, Z28 // 62638d4125a47e0700000052 + VPTESTMD X13, X23, K7, K4 // 62d2450727e5 + VPTESTMD (R8), X23, K7, K4 // 62d245072720 + VPTESTMD 15(DX)(BX*2), X23, K7, K4 // 62f2450727a45a0f000000 + VPTESTMD X13, X23, K7, K6 // 62d2450727f5 + VPTESTMD (R8), X23, K7, K6 // 62d245072730 + VPTESTMD 15(DX)(BX*2), X23, K7, K6 // 62f2450727b45a0f000000 + VPTESTMD Y2, Y24, K7, K0 // 62f23d2727c2 + VPTESTMD -15(R14)(R15*1), Y24, K7, K0 // 62923d2727843ef1ffffff + VPTESTMD -15(BX), Y24, K7, K0 // 62f23d272783f1ffffff + VPTESTMD Y2, Y24, K7, K7 // 62f23d2727fa + VPTESTMD -15(R14)(R15*1), Y24, K7, K7 // 62923d2727bc3ef1ffffff + VPTESTMD -15(BX), Y24, K7, K7 // 62f23d2727bbf1ffffff + VPTESTMD Z2, Z18, K6, K5 // 62f26d4627ea + VPTESTMD Z21, Z18, K6, K5 // 62b26d4627ed + VPTESTMD 7(SI)(DI*4), Z18, K6, K5 // 62f26d4627acbe07000000 + VPTESTMD -7(DI)(R8*2), Z18, K6, K5 // 62b26d4627ac47f9ffffff + VPTESTMD Z2, Z24, K6, K5 // 62f23d4627ea + VPTESTMD Z21, Z24, K6, K5 // 62b23d4627ed + VPTESTMD 7(SI)(DI*4), Z24, K6, K5 // 62f23d4627acbe07000000 + VPTESTMD -7(DI)(R8*2), Z24, K6, K5 // 62b23d4627ac47f9ffffff + VPTESTMD Z2, Z18, K6, K4 // 62f26d4627e2 + VPTESTMD Z21, Z18, K6, K4 // 62b26d4627e5 + VPTESTMD 7(SI)(DI*4), Z18, K6, K4 // 62f26d4627a4be07000000 + VPTESTMD -7(DI)(R8*2), Z18, K6, K4 // 62b26d4627a447f9ffffff + VPTESTMD Z2, Z24, K6, K4 // 62f23d4627e2 + VPTESTMD Z21, Z24, K6, K4 // 62b23d4627e5 + VPTESTMD 7(SI)(DI*4), Z24, K6, K4 // 62f23d4627a4be07000000 + VPTESTMD -7(DI)(R8*2), Z24, K6, K4 // 62b23d4627a447f9ffffff + VPTESTMQ X24, X28, K3, K4 // 62929d0327e0 + VPTESTMQ 17(SP)(BP*1), X28, K3, K4 // 62f29d0327a42c11000000 + VPTESTMQ -7(CX)(DX*8), X28, K3, K4 // 62f29d0327a4d1f9ffffff + VPTESTMQ X24, X28, K3, K6 // 62929d0327f0 + VPTESTMQ 17(SP)(BP*1), X28, K3, K6 // 62f29d0327b42c11000000 + VPTESTMQ -7(CX)(DX*8), X28, K3, K6 // 62f29d0327b4d1f9ffffff + VPTESTMQ Y21, Y7, K7, K1 // 62b2c52f27cd + VPTESTMQ 7(AX)(CX*4), Y7, K7, K1 // 62f2c52f278c8807000000 + VPTESTMQ 7(AX)(CX*1), Y7, K7, K1 // 62f2c52f278c0807000000 + VPTESTMQ Y21, Y7, K7, K3 // 62b2c52f27dd + VPTESTMQ 7(AX)(CX*4), Y7, K7, K3 // 62f2c52f279c8807000000 + VPTESTMQ 7(AX)(CX*1), Y7, K7, K3 // 62f2c52f279c0807000000 + VPTESTMQ Z6, Z7, K4, K6 // 62f2c54c27f6 + VPTESTMQ Z16, Z7, K4, K6 // 62b2c54c27f0 + VPTESTMQ 17(SP), Z7, K4, K6 // 62f2c54c27b42411000000 + VPTESTMQ -17(BP)(SI*4), Z7, K4, K6 // 62f2c54c27b4b5efffffff + VPTESTMQ Z6, Z13, K4, K6 // 62f2954c27f6 + VPTESTMQ Z16, Z13, K4, K6 // 62b2954c27f0 + VPTESTMQ 17(SP), Z13, K4, K6 // 62f2954c27b42411000000 + VPTESTMQ -17(BP)(SI*4), Z13, K4, K6 // 62f2954c27b4b5efffffff + VPTESTMQ Z6, Z7, K4, K7 // 62f2c54c27fe + VPTESTMQ Z16, Z7, K4, K7 // 62b2c54c27f8 + VPTESTMQ 17(SP), Z7, K4, K7 // 62f2c54c27bc2411000000 + VPTESTMQ -17(BP)(SI*4), Z7, K4, K7 // 62f2c54c27bcb5efffffff + VPTESTMQ Z6, Z13, K4, K7 // 62f2954c27fe + VPTESTMQ Z16, Z13, K4, K7 // 62b2954c27f8 + VPTESTMQ 17(SP), Z13, K4, K7 // 62f2954c27bc2411000000 + VPTESTMQ -17(BP)(SI*4), Z13, K4, K7 // 62f2954c27bcb5efffffff + VPTESTNMD X1, X21, K2, K1 // 62f2560227c9 + VPTESTNMD (R14), X21, K2, K1 // 62d25602270e + VPTESTNMD -7(DI)(R8*8), X21, K2, K1 // 62b25602278cc7f9ffffff + VPTESTNMD X1, X21, K2, K5 // 62f2560227e9 + VPTESTNMD (R14), X21, K2, K5 // 62d25602272e + VPTESTNMD -7(DI)(R8*8), X21, K2, K5 // 62b2560227acc7f9ffffff + VPTESTNMD Y1, Y24, K2, K3 // 62f23e2227d9 + VPTESTNMD 7(SI)(DI*4), Y24, K2, K3 // 62f23e22279cbe07000000 + VPTESTNMD -7(DI)(R8*2), Y24, K2, K3 // 62b23e22279c47f9ffffff + VPTESTNMD Y1, Y24, K2, K1 // 62f23e2227c9 + VPTESTNMD 7(SI)(DI*4), Y24, K2, K1 // 62f23e22278cbe07000000 + VPTESTNMD -7(DI)(R8*2), Y24, K2, K1 // 62b23e22278c47f9ffffff + VPTESTNMD Z2, Z22, K3, K5 // 62f24e4327ea + VPTESTNMD Z31, Z22, K3, K5 // 62924e4327ef + VPTESTNMD -17(BP)(SI*8), Z22, K3, K5 // 62f24e4327acf5efffffff + VPTESTNMD (R15), Z22, K3, K5 // 62d24e43272f + VPTESTNMD Z2, Z7, K3, K5 // 62f2464b27ea + VPTESTNMD Z31, Z7, K3, K5 // 6292464b27ef + VPTESTNMD -17(BP)(SI*8), Z7, K3, K5 // 62f2464b27acf5efffffff + VPTESTNMD (R15), Z7, K3, K5 // 62d2464b272f + VPTESTNMD Z2, Z22, K3, K4 // 62f24e4327e2 + VPTESTNMD Z31, Z22, K3, K4 // 62924e4327e7 + VPTESTNMD -17(BP)(SI*8), Z22, K3, K4 // 62f24e4327a4f5efffffff + VPTESTNMD (R15), Z22, K3, K4 // 62d24e432727 + VPTESTNMD Z2, Z7, K3, K4 // 62f2464b27e2 + VPTESTNMD Z31, Z7, K3, K4 // 6292464b27e7 + VPTESTNMD -17(BP)(SI*8), Z7, K3, K4 // 62f2464b27a4f5efffffff + VPTESTNMD (R15), Z7, K3, K4 // 62d2464b2727 + VPTESTNMQ X31, X11, K3, K7 // 6292a60b27ff + VPTESTNMQ 99(R15)(R15*4), X11, K3, K7 // 6292a60b27bcbf63000000 + VPTESTNMQ 15(DX), X11, K3, K7 // 62f2a60b27ba0f000000 + VPTESTNMQ X31, X11, K3, K6 // 6292a60b27f7 + VPTESTNMQ 99(R15)(R15*4), X11, K3, K6 // 6292a60b27b4bf63000000 + VPTESTNMQ 15(DX), X11, K3, K6 // 62f2a60b27b20f000000 + VPTESTNMQ Y18, Y5, K3, K4 // 62b2d62b27e2 + VPTESTNMQ 17(SP), Y5, K3, K4 // 62f2d62b27a42411000000 + VPTESTNMQ -17(BP)(SI*4), Y5, K3, K4 // 62f2d62b27a4b5efffffff + VPTESTNMQ Y18, Y5, K3, K6 // 62b2d62b27f2 + VPTESTNMQ 17(SP), Y5, K3, K6 // 62f2d62b27b42411000000 + VPTESTNMQ -17(BP)(SI*4), Y5, K3, K6 // 62f2d62b27b4b5efffffff + VPTESTNMQ Z1, Z20, K2, K0 // 62f2de4227c1 + VPTESTNMQ Z3, Z20, K2, K0 // 62f2de4227c3 + VPTESTNMQ 7(SI)(DI*8), Z20, K2, K0 // 62f2de422784fe07000000 + VPTESTNMQ -15(R14), Z20, K2, K0 // 62d2de422786f1ffffff + VPTESTNMQ Z1, Z9, K2, K0 // 62f2b64a27c1 + VPTESTNMQ Z3, Z9, K2, K0 // 62f2b64a27c3 + VPTESTNMQ 7(SI)(DI*8), Z9, K2, K0 // 62f2b64a2784fe07000000 + VPTESTNMQ -15(R14), Z9, K2, K0 // 62d2b64a2786f1ffffff + VPTESTNMQ Z1, Z20, K2, K7 // 62f2de4227f9 + VPTESTNMQ Z3, Z20, K2, K7 // 62f2de4227fb + VPTESTNMQ 7(SI)(DI*8), Z20, K2, K7 // 62f2de4227bcfe07000000 + VPTESTNMQ -15(R14), Z20, K2, K7 // 62d2de4227bef1ffffff + VPTESTNMQ Z1, Z9, K2, K7 // 62f2b64a27f9 + VPTESTNMQ Z3, Z9, K2, K7 // 62f2b64a27fb + VPTESTNMQ 7(SI)(DI*8), Z9, K2, K7 // 62f2b64a27bcfe07000000 + VPTESTNMQ -15(R14), Z9, K2, K7 // 62d2b64a27bef1ffffff + VPUNPCKHDQ X9, X7, K1, X20 // 62c145096ae1 + VPUNPCKHDQ -7(CX)(DX*1), X7, K1, X20 // 62e145096aa411f9ffffff + VPUNPCKHDQ -15(R14)(R15*4), X7, K1, X20 // 628145096aa4bef1ffffff + VPUNPCKHDQ Y11, Y8, K7, Y1 // 62d13d2f6acb + VPUNPCKHDQ -17(BP)(SI*8), Y8, K7, Y1 // 62f13d2f6a8cf5efffffff + VPUNPCKHDQ (R15), Y8, K7, Y1 // 62d13d2f6a0f + VPUNPCKHDQ Z3, Z5, K2, Z19 // 62e1554a6adb + VPUNPCKHDQ Z5, Z5, K2, Z19 // 62e1554a6add + VPUNPCKHDQ -7(CX), Z5, K2, Z19 // 62e1554a6a99f9ffffff + VPUNPCKHDQ 15(DX)(BX*4), Z5, K2, Z19 // 62e1554a6a9c9a0f000000 + VPUNPCKHDQ Z3, Z1, K2, Z19 // 62e1754a6adb + VPUNPCKHDQ Z5, Z1, K2, Z19 // 62e1754a6add + VPUNPCKHDQ -7(CX), Z1, K2, Z19 // 62e1754a6a99f9ffffff + VPUNPCKHDQ 15(DX)(BX*4), Z1, K2, Z19 // 62e1754a6a9c9a0f000000 + VPUNPCKHDQ Z3, Z5, K2, Z15 // 6271554a6afb + VPUNPCKHDQ Z5, Z5, K2, Z15 // 6271554a6afd + VPUNPCKHDQ -7(CX), Z5, K2, Z15 // 6271554a6ab9f9ffffff + VPUNPCKHDQ 15(DX)(BX*4), Z5, K2, Z15 // 6271554a6abc9a0f000000 + VPUNPCKHDQ Z3, Z1, K2, Z15 // 6271754a6afb + VPUNPCKHDQ Z5, Z1, K2, Z15 // 6271754a6afd + VPUNPCKHDQ -7(CX), Z1, K2, Z15 // 6271754a6ab9f9ffffff + VPUNPCKHDQ 15(DX)(BX*4), Z1, K2, Z15 // 6271754a6abc9a0f000000 + VPUNPCKHQDQ X5, X14, K4, X7 // 62f18d0c6dfd + VPUNPCKHQDQ 15(DX)(BX*1), X14, K4, X7 // 62f18d0c6dbc1a0f000000 + VPUNPCKHQDQ -7(CX)(DX*2), X14, K4, X7 // 62f18d0c6dbc51f9ffffff + VPUNPCKHQDQ Y16, Y17, K1, Y27 // 6221f5216dd8 + VPUNPCKHQDQ 7(SI)(DI*8), Y17, K1, Y27 // 6261f5216d9cfe07000000 + VPUNPCKHQDQ -15(R14), Y17, K1, Y27 // 6241f5216d9ef1ffffff + VPUNPCKHQDQ Z16, Z21, K3, Z14 // 6231d5436df0 + VPUNPCKHQDQ Z9, Z21, K3, Z14 // 6251d5436df1 + VPUNPCKHQDQ 99(R15)(R15*8), Z21, K3, Z14 // 6211d5436db4ff63000000 + VPUNPCKHQDQ 7(AX)(CX*8), Z21, K3, Z14 // 6271d5436db4c807000000 + VPUNPCKHQDQ Z16, Z8, K3, Z14 // 6231bd4b6df0 + VPUNPCKHQDQ Z9, Z8, K3, Z14 // 6251bd4b6df1 + VPUNPCKHQDQ 99(R15)(R15*8), Z8, K3, Z14 // 6211bd4b6db4ff63000000 + VPUNPCKHQDQ 7(AX)(CX*8), Z8, K3, Z14 // 6271bd4b6db4c807000000 + VPUNPCKHQDQ Z16, Z21, K3, Z15 // 6231d5436df8 + VPUNPCKHQDQ Z9, Z21, K3, Z15 // 6251d5436df9 + VPUNPCKHQDQ 99(R15)(R15*8), Z21, K3, Z15 // 6211d5436dbcff63000000 + VPUNPCKHQDQ 7(AX)(CX*8), Z21, K3, Z15 // 6271d5436dbcc807000000 + VPUNPCKHQDQ Z16, Z8, K3, Z15 // 6231bd4b6df8 + VPUNPCKHQDQ Z9, Z8, K3, Z15 // 6251bd4b6df9 + VPUNPCKHQDQ 99(R15)(R15*8), Z8, K3, Z15 // 6211bd4b6dbcff63000000 + VPUNPCKHQDQ 7(AX)(CX*8), Z8, K3, Z15 // 6271bd4b6dbcc807000000 + VPUNPCKLDQ X16, X30, K7, X0 // 62b10d0762c0 + VPUNPCKLDQ 15(R8), X30, K7, X0 // 62d10d0762800f000000 + VPUNPCKLDQ (BP), X30, K7, X0 // 62f10d07624500 + VPUNPCKLDQ Y14, Y23, K4, Y1 // 62d1452462ce + VPUNPCKLDQ -7(CX), Y23, K4, Y1 // 62f145246289f9ffffff + VPUNPCKLDQ 15(DX)(BX*4), Y23, K4, Y1 // 62f14524628c9a0f000000 + VPUNPCKLDQ Z9, Z9, K4, Z9 // 6251354c62c9 + VPUNPCKLDQ Z28, Z9, K4, Z9 // 6211354c62cc + VPUNPCKLDQ 15(R8)(R14*4), Z9, K4, Z9 // 6211354c628cb00f000000 + VPUNPCKLDQ -7(CX)(DX*4), Z9, K4, Z9 // 6271354c628c91f9ffffff + VPUNPCKLDQ Z9, Z25, K4, Z9 // 6251354462c9 + VPUNPCKLDQ Z28, Z25, K4, Z9 // 6211354462cc + VPUNPCKLDQ 15(R8)(R14*4), Z25, K4, Z9 // 62113544628cb00f000000 + VPUNPCKLDQ -7(CX)(DX*4), Z25, K4, Z9 // 62713544628c91f9ffffff + VPUNPCKLDQ Z9, Z9, K4, Z3 // 62d1354c62d9 + VPUNPCKLDQ Z28, Z9, K4, Z3 // 6291354c62dc + VPUNPCKLDQ 15(R8)(R14*4), Z9, K4, Z3 // 6291354c629cb00f000000 + VPUNPCKLDQ -7(CX)(DX*4), Z9, K4, Z3 // 62f1354c629c91f9ffffff + VPUNPCKLDQ Z9, Z25, K4, Z3 // 62d1354462d9 + VPUNPCKLDQ Z28, Z25, K4, Z3 // 6291354462dc + VPUNPCKLDQ 15(R8)(R14*4), Z25, K4, Z3 // 62913544629cb00f000000 + VPUNPCKLDQ -7(CX)(DX*4), Z25, K4, Z3 // 62f13544629c91f9ffffff + VPUNPCKLQDQ X14, X11, K7, X14 // 6251a50f6cf6 + VPUNPCKLQDQ 15(R8)(R14*8), X11, K7, X14 // 6211a50f6cb4f00f000000 + VPUNPCKLQDQ -15(R14)(R15*2), X11, K7, X14 // 6211a50f6cb47ef1ffffff + VPUNPCKLQDQ Y2, Y25, K2, Y31 // 6261b5226cfa + VPUNPCKLQDQ 99(R15)(R15*8), Y25, K2, Y31 // 6201b5226cbcff63000000 + VPUNPCKLQDQ 7(AX)(CX*8), Y25, K2, Y31 // 6261b5226cbcc807000000 + VPUNPCKLQDQ Z17, Z17, K5, Z20 // 62a1f5456ce1 + VPUNPCKLQDQ Z23, Z17, K5, Z20 // 62a1f5456ce7 + VPUNPCKLQDQ (R8), Z17, K5, Z20 // 62c1f5456c20 + VPUNPCKLQDQ 15(DX)(BX*2), Z17, K5, Z20 // 62e1f5456ca45a0f000000 + VPUNPCKLQDQ Z17, Z0, K5, Z20 // 62a1fd4d6ce1 + VPUNPCKLQDQ Z23, Z0, K5, Z20 // 62a1fd4d6ce7 + VPUNPCKLQDQ (R8), Z0, K5, Z20 // 62c1fd4d6c20 + VPUNPCKLQDQ 15(DX)(BX*2), Z0, K5, Z20 // 62e1fd4d6ca45a0f000000 + VPUNPCKLQDQ Z17, Z17, K5, Z0 // 62b1f5456cc1 + VPUNPCKLQDQ Z23, Z17, K5, Z0 // 62b1f5456cc7 + VPUNPCKLQDQ (R8), Z17, K5, Z0 // 62d1f5456c00 + VPUNPCKLQDQ 15(DX)(BX*2), Z17, K5, Z0 // 62f1f5456c845a0f000000 + VPUNPCKLQDQ Z17, Z0, K5, Z0 // 62b1fd4d6cc1 + VPUNPCKLQDQ Z23, Z0, K5, Z0 // 62b1fd4d6cc7 + VPUNPCKLQDQ (R8), Z0, K5, Z0 // 62d1fd4d6c00 + VPUNPCKLQDQ 15(DX)(BX*2), Z0, K5, Z0 // 62f1fd4d6c845a0f000000 + VPXORD X12, X23, K2, X26 // 62414502efd4 + VPXORD 7(AX)(CX*4), X23, K2, X26 // 62614502ef948807000000 + VPXORD 7(AX)(CX*1), X23, K2, X26 // 62614502ef940807000000 + VPXORD Y9, Y22, K3, Y9 // 62514d23efc9 + VPXORD (BX), Y22, K3, Y9 // 62714d23ef0b + VPXORD -17(BP)(SI*1), Y22, K3, Y9 // 62714d23ef8c35efffffff + VPXORD Z30, Z20, K3, Z1 // 62915d43efce + VPXORD Z5, Z20, K3, Z1 // 62f15d43efcd + VPXORD -17(BP)(SI*2), Z20, K3, Z1 // 62f15d43ef8c75efffffff + VPXORD 7(AX)(CX*2), Z20, K3, Z1 // 62f15d43ef8c4807000000 + VPXORD Z30, Z9, K3, Z1 // 6291354befce + VPXORD Z5, Z9, K3, Z1 // 62f1354befcd + VPXORD -17(BP)(SI*2), Z9, K3, Z1 // 62f1354bef8c75efffffff + VPXORD 7(AX)(CX*2), Z9, K3, Z1 // 62f1354bef8c4807000000 + VPXORD Z30, Z20, K3, Z9 // 62115d43efce + VPXORD Z5, Z20, K3, Z9 // 62715d43efcd + VPXORD -17(BP)(SI*2), Z20, K3, Z9 // 62715d43ef8c75efffffff + VPXORD 7(AX)(CX*2), Z20, K3, Z9 // 62715d43ef8c4807000000 + VPXORD Z30, Z9, K3, Z9 // 6211354befce + VPXORD Z5, Z9, K3, Z9 // 6271354befcd + VPXORD -17(BP)(SI*2), Z9, K3, Z9 // 6271354bef8c75efffffff + VPXORD 7(AX)(CX*2), Z9, K3, Z9 // 6271354bef8c4807000000 + VPXORQ X23, X23, K3, X16 // 62a1c503efc7 + VPXORQ (SI), X23, K3, X16 // 62e1c503ef06 + VPXORQ 7(SI)(DI*2), X23, K3, X16 // 62e1c503ef847e07000000 + VPXORQ Y6, Y1, K2, Y14 // 6271f52aeff6 + VPXORQ 15(R8)(R14*4), Y1, K2, Y14 // 6211f52aefb4b00f000000 + VPXORQ -7(CX)(DX*4), Y1, K2, Y14 // 6271f52aefb491f9ffffff + VPXORQ Z16, Z7, K1, Z26 // 6221c549efd0 + VPXORQ Z25, Z7, K1, Z26 // 6201c549efd1 + VPXORQ 15(R8)(R14*1), Z7, K1, Z26 // 6201c549ef94300f000000 + VPXORQ 15(R8)(R14*2), Z7, K1, Z26 // 6201c549ef94700f000000 + VPXORQ Z16, Z21, K1, Z26 // 6221d541efd0 + VPXORQ Z25, Z21, K1, Z26 // 6201d541efd1 + VPXORQ 15(R8)(R14*1), Z21, K1, Z26 // 6201d541ef94300f000000 + VPXORQ 15(R8)(R14*2), Z21, K1, Z26 // 6201d541ef94700f000000 + VPXORQ Z16, Z7, K1, Z22 // 62a1c549eff0 + VPXORQ Z25, Z7, K1, Z22 // 6281c549eff1 + VPXORQ 15(R8)(R14*1), Z7, K1, Z22 // 6281c549efb4300f000000 + VPXORQ 15(R8)(R14*2), Z7, K1, Z22 // 6281c549efb4700f000000 + VPXORQ Z16, Z21, K1, Z22 // 62a1d541eff0 + VPXORQ Z25, Z21, K1, Z22 // 6281d541eff1 + VPXORQ 15(R8)(R14*1), Z21, K1, Z22 // 6281d541efb4300f000000 + VPXORQ 15(R8)(R14*2), Z21, K1, Z22 // 6281d541efb4700f000000 + VRCP14PD X11, K5, X31 // 6242fd0d4cfb + VRCP14PD 17(SP), K5, X31 // 6262fd0d4cbc2411000000 + VRCP14PD -17(BP)(SI*4), K5, X31 // 6262fd0d4cbcb5efffffff + VRCP14PD Y23, K7, Y9 // 6232fd2f4ccf + VRCP14PD -17(BP)(SI*2), K7, Y9 // 6272fd2f4c8c75efffffff + VRCP14PD 7(AX)(CX*2), K7, Y9 // 6272fd2f4c8c4807000000 + VRCP14PD Z0, K7, Z6 // 62f2fd4f4cf0 + VRCP14PD Z8, K7, Z6 // 62d2fd4f4cf0 + VRCP14PD (CX), K7, Z6 // 62f2fd4f4c31 + VRCP14PD 99(R15), K7, Z6 // 62d2fd4f4cb763000000 + VRCP14PD Z0, K7, Z2 // 62f2fd4f4cd0 + VRCP14PD Z8, K7, Z2 // 62d2fd4f4cd0 + VRCP14PD (CX), K7, Z2 // 62f2fd4f4c11 + VRCP14PD 99(R15), K7, Z2 // 62d2fd4f4c9763000000 + VRCP14PS X5, K6, X22 // 62e27d0e4cf5 + VRCP14PS 7(AX), K6, X22 // 62e27d0e4cb007000000 + VRCP14PS (DI), K6, X22 // 62e27d0e4c37 + VRCP14PS Y5, K3, Y31 // 62627d2b4cfd + VRCP14PS 15(R8)(R14*1), K3, Y31 // 62027d2b4cbc300f000000 + VRCP14PS 15(R8)(R14*2), K3, Y31 // 62027d2b4cbc700f000000 + VRCP14PS Z14, K7, Z15 // 62527d4f4cfe + VRCP14PS Z27, K7, Z15 // 62127d4f4cfb + VRCP14PS 99(R15)(R15*2), K7, Z15 // 62127d4f4cbc7f63000000 + VRCP14PS -7(DI), K7, Z15 // 62727d4f4cbff9ffffff + VRCP14PS Z14, K7, Z12 // 62527d4f4ce6 + VRCP14PS Z27, K7, Z12 // 62127d4f4ce3 + VRCP14PS 99(R15)(R15*2), K7, Z12 // 62127d4f4ca47f63000000 + VRCP14PS -7(DI), K7, Z12 // 62727d4f4ca7f9ffffff + VRCP14SD X17, X0, K4, X14 // 6232fd0c4df1 or 6232fd2c4df1 or 6232fd4c4df1 + VRCP14SD (SI), X0, K4, X14 // 6272fd0c4d36 or 6272fd2c4d36 or 6272fd4c4d36 + VRCP14SD 7(SI)(DI*2), X0, K4, X14 // 6272fd0c4db47e07000000 or 6272fd2c4db47e07000000 or 6272fd4c4db47e07000000 + VRCP14SS X11, X15, K4, X7 // 62d2050c4dfb or 62d2052c4dfb or 62d2054c4dfb + VRCP14SS -7(DI)(R8*1), X15, K4, X7 // 62b2050c4dbc07f9ffffff or 62b2052c4dbc07f9ffffff or 62b2054c4dbc07f9ffffff + VRCP14SS (SP), X15, K4, X7 // 62f2050c4d3c24 or 62f2052c4d3c24 or 62f2054c4d3c24 + VRNDSCALEPD $64, X16, K4, X20 // 62a3fd0c09e040 + VRNDSCALEPD $64, 7(SI)(DI*8), K4, X20 // 62e3fd0c09a4fe0700000040 + VRNDSCALEPD $64, -15(R14), K4, X20 // 62c3fd0c09a6f1ffffff40 + VRNDSCALEPD $27, Y2, K1, Y28 // 6263fd2909e21b + VRNDSCALEPD $27, (CX), K1, Y28 // 6263fd2909211b + VRNDSCALEPD $27, 99(R15), K1, Y28 // 6243fd2909a7630000001b + VRNDSCALEPD $47, Z21, K3, Z8 // 6233fd4b09c52f + VRNDSCALEPD $47, Z5, K3, Z8 // 6273fd4b09c52f + VRNDSCALEPD $47, Z21, K3, Z28 // 6223fd4b09e52f + VRNDSCALEPD $47, Z5, K3, Z28 // 6263fd4b09e52f + VRNDSCALEPD $82, Z12, K4, Z16 // 62c3fd4c09c452 + VRNDSCALEPD $82, Z27, K4, Z16 // 6283fd4c09c352 + VRNDSCALEPD $82, 15(R8), K4, Z16 // 62c3fd4c09800f00000052 + VRNDSCALEPD $82, (BP), K4, Z16 // 62e3fd4c09450052 + VRNDSCALEPD $82, Z12, K4, Z13 // 6253fd4c09ec52 + VRNDSCALEPD $82, Z27, K4, Z13 // 6213fd4c09eb52 + VRNDSCALEPD $82, 15(R8), K4, Z13 // 6253fd4c09a80f00000052 + VRNDSCALEPD $82, (BP), K4, Z13 // 6273fd4c096d0052 + VRNDSCALEPS $126, X6, K5, X12 // 62737d0d08e67e + VRNDSCALEPS $126, 7(SI)(DI*1), K5, X12 // 62737d0d08a43e070000007e + VRNDSCALEPS $126, 15(DX)(BX*8), K5, X12 // 62737d0d08a4da0f0000007e + VRNDSCALEPS $94, Y27, K7, Y24 // 62037d2f08c35e + VRNDSCALEPS $94, 99(R15)(R15*2), K7, Y24 // 62037d2f08847f630000005e + VRNDSCALEPS $94, -7(DI), K7, Y24 // 62637d2f0887f9ffffff5e + VRNDSCALEPS $121, Z6, K7, Z22 // 62e37d4f08f679 + VRNDSCALEPS $121, Z8, K7, Z22 // 62c37d4f08f079 + VRNDSCALEPS $121, Z6, K7, Z11 // 62737d4f08de79 + VRNDSCALEPS $121, Z8, K7, Z11 // 62537d4f08d879 + VRNDSCALEPS $13, Z12, K6, Z25 // 62437d4e08cc0d + VRNDSCALEPS $13, Z17, K6, Z25 // 62237d4e08c90d + VRNDSCALEPS $13, 15(R8)(R14*8), K6, Z25 // 62037d4e088cf00f0000000d + VRNDSCALEPS $13, -15(R14)(R15*2), K6, Z25 // 62037d4e088c7ef1ffffff0d + VRNDSCALEPS $13, Z12, K6, Z12 // 62537d4e08e40d + VRNDSCALEPS $13, Z17, K6, Z12 // 62337d4e08e10d + VRNDSCALEPS $13, 15(R8)(R14*8), K6, Z12 // 62137d4e08a4f00f0000000d + VRNDSCALEPS $13, -15(R14)(R15*2), K6, Z12 // 62137d4e08a47ef1ffffff0d + VRNDSCALESD $65, X6, X28, K3, X17 // 62e39d030bce41 + VRNDSCALESD $67, X8, X8, K7, X1 // 62d3bd0f0bc843 or 62d3bd2f0bc843 or 62d3bd4f0bc843 + VRNDSCALESD $67, 17(SP), X8, K7, X1 // 62f3bd0f0b8c241100000043 or 62f3bd2f0b8c241100000043 or 62f3bd4f0b8c241100000043 + VRNDSCALESD $67, -17(BP)(SI*4), X8, K7, X1 // 62f3bd0f0b8cb5efffffff43 or 62f3bd2f0b8cb5efffffff43 or 62f3bd4f0b8cb5efffffff43 + VRNDSCALESS $127, X11, X0, K4, X6 // 62d37d0c0af37f + VRNDSCALESS $0, X6, X6, K4, X16 // 62e34d0c0ac600 or 62e34d2c0ac600 or 62e34d4c0ac600 + VRNDSCALESS $0, (AX), X6, K4, X16 // 62e34d0c0a0000 or 62e34d2c0a0000 or 62e34d4c0a0000 + VRNDSCALESS $0, 7(SI), X6, K4, X16 // 62e34d0c0a860700000000 or 62e34d2c0a860700000000 or 62e34d4c0a860700000000 + VRSQRT14PD X12, K7, X22 // 62c2fd0f4ef4 + VRSQRT14PD -7(DI)(R8*1), K7, X22 // 62a2fd0f4eb407f9ffffff + VRSQRT14PD (SP), K7, X22 // 62e2fd0f4e3424 + VRSQRT14PD Y11, K2, Y0 // 62d2fd2a4ec3 + VRSQRT14PD -7(CX)(DX*1), K2, Y0 // 62f2fd2a4e8411f9ffffff + VRSQRT14PD -15(R14)(R15*4), K2, Y0 // 6292fd2a4e84bef1ffffff + VRSQRT14PD Z6, K5, Z9 // 6272fd4d4ece + VRSQRT14PD Z25, K5, Z9 // 6212fd4d4ec9 + VRSQRT14PD -15(R14)(R15*1), K5, Z9 // 6212fd4d4e8c3ef1ffffff + VRSQRT14PD -15(BX), K5, Z9 // 6272fd4d4e8bf1ffffff + VRSQRT14PD Z6, K5, Z12 // 6272fd4d4ee6 + VRSQRT14PD Z25, K5, Z12 // 6212fd4d4ee1 + VRSQRT14PD -15(R14)(R15*1), K5, Z12 // 6212fd4d4ea43ef1ffffff + VRSQRT14PD -15(BX), K5, Z12 // 6272fd4d4ea3f1ffffff + VRSQRT14PS X28, K3, X16 // 62827d0b4ec4 + VRSQRT14PS -7(CX), K3, X16 // 62e27d0b4e81f9ffffff + VRSQRT14PS 15(DX)(BX*4), K3, X16 // 62e27d0b4e849a0f000000 + VRSQRT14PS Y3, K4, Y31 // 62627d2c4efb + VRSQRT14PS 15(DX)(BX*1), K4, Y31 // 62627d2c4ebc1a0f000000 + VRSQRT14PS -7(CX)(DX*2), K4, Y31 // 62627d2c4ebc51f9ffffff + VRSQRT14PS Z8, K2, Z3 // 62d27d4a4ed8 + VRSQRT14PS Z2, K2, Z3 // 62f27d4a4eda + VRSQRT14PS 7(AX)(CX*4), K2, Z3 // 62f27d4a4e9c8807000000 + VRSQRT14PS 7(AX)(CX*1), K2, Z3 // 62f27d4a4e9c0807000000 + VRSQRT14PS Z8, K2, Z21 // 62c27d4a4ee8 + VRSQRT14PS Z2, K2, Z21 // 62e27d4a4eea + VRSQRT14PS 7(AX)(CX*4), K2, Z21 // 62e27d4a4eac8807000000 + VRSQRT14PS 7(AX)(CX*1), K2, Z21 // 62e27d4a4eac0807000000 + VRSQRT14SD X11, X15, K2, X8 // 6252850a4fc3 or 6252852a4fc3 or 6252854a4fc3 + VRSQRT14SD 7(AX), X15, K2, X8 // 6272850a4f8007000000 or 6272852a4f8007000000 or 6272854a4f8007000000 + VRSQRT14SD (DI), X15, K2, X8 // 6272850a4f07 or 6272852a4f07 or 6272854a4f07 + VRSQRT14SS X13, X19, K3, X1 // 62d265034fcd or 62d265234fcd or 62d265434fcd + VRSQRT14SS (BX), X19, K3, X1 // 62f265034f0b or 62f265234f0b or 62f265434f0b + VRSQRT14SS -17(BP)(SI*1), X19, K3, X1 // 62f265034f8c35efffffff or 62f265234f8c35efffffff or 62f265434f8c35efffffff + VSCALEFPD X27, X2, K1, X2 // 6292ed092cd3 + VSCALEFPD 99(R15)(R15*8), X2, K1, X2 // 6292ed092c94ff63000000 + VSCALEFPD 7(AX)(CX*8), X2, K1, X2 // 62f2ed092c94c807000000 + VSCALEFPD Y13, Y2, K1, Y14 // 6252ed292cf5 + VSCALEFPD -17(BP), Y2, K1, Y14 // 6272ed292cb5efffffff + VSCALEFPD -15(R14)(R15*8), Y2, K1, Y14 // 6212ed292cb4fef1ffffff + VSCALEFPD Z7, Z2, K7, Z18 // 62e2ed4f2cd7 + VSCALEFPD Z13, Z2, K7, Z18 // 62c2ed4f2cd5 + VSCALEFPD Z7, Z21, K7, Z18 // 62e2d5472cd7 + VSCALEFPD Z13, Z21, K7, Z18 // 62c2d5472cd5 + VSCALEFPD Z7, Z2, K7, Z24 // 6262ed4f2cc7 + VSCALEFPD Z13, Z2, K7, Z24 // 6242ed4f2cc5 + VSCALEFPD Z7, Z21, K7, Z24 // 6262d5472cc7 + VSCALEFPD Z13, Z21, K7, Z24 // 6242d5472cc5 + VSCALEFPD Z1, Z6, K2, Z6 // 62f2cd4a2cf1 + VSCALEFPD Z15, Z6, K2, Z6 // 62d2cd4a2cf7 + VSCALEFPD 7(SI)(DI*4), Z6, K2, Z6 // 62f2cd4a2cb4be07000000 + VSCALEFPD -7(DI)(R8*2), Z6, K2, Z6 // 62b2cd4a2cb447f9ffffff + VSCALEFPD Z1, Z22, K2, Z6 // 62f2cd422cf1 + VSCALEFPD Z15, Z22, K2, Z6 // 62d2cd422cf7 + VSCALEFPD 7(SI)(DI*4), Z22, K2, Z6 // 62f2cd422cb4be07000000 + VSCALEFPD -7(DI)(R8*2), Z22, K2, Z6 // 62b2cd422cb447f9ffffff + VSCALEFPD Z1, Z6, K2, Z16 // 62e2cd4a2cc1 + VSCALEFPD Z15, Z6, K2, Z16 // 62c2cd4a2cc7 + VSCALEFPD 7(SI)(DI*4), Z6, K2, Z16 // 62e2cd4a2c84be07000000 + VSCALEFPD -7(DI)(R8*2), Z6, K2, Z16 // 62a2cd4a2c8447f9ffffff + VSCALEFPD Z1, Z22, K2, Z16 // 62e2cd422cc1 + VSCALEFPD Z15, Z22, K2, Z16 // 62c2cd422cc7 + VSCALEFPD 7(SI)(DI*4), Z22, K2, Z16 // 62e2cd422c84be07000000 + VSCALEFPD -7(DI)(R8*2), Z22, K2, Z16 // 62a2cd422c8447f9ffffff + VSCALEFPS X30, X22, K4, X26 // 62024d042cd6 + VSCALEFPS (AX), X22, K4, X26 // 62624d042c10 + VSCALEFPS 7(SI), X22, K4, X26 // 62624d042c9607000000 + VSCALEFPS Y22, Y15, K1, Y27 // 622205292cde + VSCALEFPS 17(SP)(BP*2), Y15, K1, Y27 // 626205292c9c6c11000000 + VSCALEFPS -7(DI)(R8*4), Y15, K1, Y27 // 622205292c9c87f9ffffff + VSCALEFPS Z22, Z18, K3, Z13 // 62326d432cee + VSCALEFPS Z7, Z18, K3, Z13 // 62726d432cef + VSCALEFPS Z22, Z8, K3, Z13 // 62323d4b2cee + VSCALEFPS Z7, Z8, K3, Z13 // 62723d4b2cef + VSCALEFPS Z1, Z20, K4, Z2 // 62f25d442cd1 + VSCALEFPS Z3, Z20, K4, Z2 // 62f25d442cd3 + VSCALEFPS 17(SP), Z20, K4, Z2 // 62f25d442c942411000000 + VSCALEFPS -17(BP)(SI*4), Z20, K4, Z2 // 62f25d442c94b5efffffff + VSCALEFPS Z1, Z9, K4, Z2 // 62f2354c2cd1 + VSCALEFPS Z3, Z9, K4, Z2 // 62f2354c2cd3 + VSCALEFPS 17(SP), Z9, K4, Z2 // 62f2354c2c942411000000 + VSCALEFPS -17(BP)(SI*4), Z9, K4, Z2 // 62f2354c2c94b5efffffff + VSCALEFPS Z1, Z20, K4, Z31 // 62625d442cf9 + VSCALEFPS Z3, Z20, K4, Z31 // 62625d442cfb + VSCALEFPS 17(SP), Z20, K4, Z31 // 62625d442cbc2411000000 + VSCALEFPS -17(BP)(SI*4), Z20, K4, Z31 // 62625d442cbcb5efffffff + VSCALEFPS Z1, Z9, K4, Z31 // 6262354c2cf9 + VSCALEFPS Z3, Z9, K4, Z31 // 6262354c2cfb + VSCALEFPS 17(SP), Z9, K4, Z31 // 6262354c2cbc2411000000 + VSCALEFPS -17(BP)(SI*4), Z9, K4, Z31 // 6262354c2cbcb5efffffff + VSCALEFSD X15, X11, K5, X3 // 62d2a50d2ddf + VSCALEFSD X6, X13, K7, X30 // 6262950f2df6 or 6262952f2df6 or 6262954f2df6 + VSCALEFSD -17(BP)(SI*8), X13, K7, X30 // 6262950f2db4f5efffffff or 6262952f2db4f5efffffff or 6262954f2db4f5efffffff + VSCALEFSD (R15), X13, K7, X30 // 6242950f2d37 or 6242952f2d37 or 6242954f2d37 + VSCALEFSS X30, X23, K7, X12 // 621245072de6 + VSCALEFSS X2, X20, K6, X8 // 62725d062dc2 or 62725d262dc2 or 62725d462dc2 + VSCALEFSS (R8), X20, K6, X8 // 62525d062d00 or 62525d262d00 or 62525d462d00 + VSCALEFSS 15(DX)(BX*2), X20, K6, X8 // 62725d062d845a0f000000 or 62725d262d845a0f000000 or 62725d462d845a0f000000 + VSCATTERDPD X9, K3, (DX)(X10*4) // 6232fd0ba20c92 + VSCATTERDPD X9, K3, (SP)(X4*2) // 6272fd0ba20c64 + VSCATTERDPD X9, K3, (R14)(X29*8) // 6212fd03a20cee + VSCATTERDPD Y20, K7, (AX)(X4*1) // 62e2fd2fa22420 + VSCATTERDPD Y20, K7, (BP)(X10*2) // 62a2fd2fa2645500 + VSCATTERDPD Y20, K7, (R10)(X29*8) // 6282fd27a224ea + VSCATTERDPD Z12, K4, (R10)(Y29*8) // 6212fd44a224ea + VSCATTERDPD Z16, K4, (R10)(Y29*8) // 6282fd44a204ea + VSCATTERDPD Z12, K4, (SP)(Y4*2) // 6272fd4ca22464 + VSCATTERDPD Z16, K4, (SP)(Y4*2) // 62e2fd4ca20464 + VSCATTERDPD Z12, K4, (DX)(Y10*4) // 6232fd4ca22492 + VSCATTERDPD Z16, K4, (DX)(Y10*4) // 62a2fd4ca20492 + VSCATTERDPS X26, K4, (DX)(X10*4) // 62227d0ca21492 + VSCATTERDPS X26, K4, (SP)(X4*2) // 62627d0ca21464 + VSCATTERDPS X26, K4, (R14)(X29*8) // 62027d04a214ee + VSCATTERDPS Y18, K7, (R14)(Y29*8) // 62827d27a214ee + VSCATTERDPS Y18, K7, (AX)(Y4*1) // 62e27d2fa21420 + VSCATTERDPS Y18, K7, (BP)(Y10*2) // 62a27d2fa2545500 + VSCATTERDPS Z28, K2, (BP)(Z10*2) // 62227d4aa2645500 + VSCATTERDPS Z13, K2, (BP)(Z10*2) // 62327d4aa26c5500 + VSCATTERDPS Z28, K2, (R10)(Z29*8) // 62027d42a224ea + VSCATTERDPS Z13, K2, (R10)(Z29*8) // 62127d42a22cea + VSCATTERDPS Z28, K2, (R14)(Z29*8) // 62027d42a224ee + VSCATTERDPS Z13, K2, (R14)(Z29*8) // 62127d42a22cee + VSCATTERQPD X19, K2, (AX)(X4*1) // 62e2fd0aa31c20 + VSCATTERQPD X19, K2, (BP)(X10*2) // 62a2fd0aa35c5500 + VSCATTERQPD X19, K2, (R10)(X29*8) // 6282fd02a31cea + VSCATTERQPD Y24, K1, (R10)(Y29*8) // 6202fd21a304ea + VSCATTERQPD Y24, K1, (SP)(Y4*2) // 6262fd29a30464 + VSCATTERQPD Y24, K1, (DX)(Y10*4) // 6222fd29a30492 + VSCATTERQPD Z14, K2, (DX)(Z10*4) // 6232fd4aa33492 + VSCATTERQPD Z28, K2, (DX)(Z10*4) // 6222fd4aa32492 + VSCATTERQPD Z14, K2, (AX)(Z4*1) // 6272fd4aa33420 + VSCATTERQPD Z28, K2, (AX)(Z4*1) // 6262fd4aa32420 + VSCATTERQPD Z14, K2, (SP)(Z4*2) // 6272fd4aa33464 + VSCATTERQPD Z28, K2, (SP)(Z4*2) // 6262fd4aa32464 + VSCATTERQPS X0, K1, (DX)(X10*4) // 62b27d09a30492 + VSCATTERQPS X0, K1, (SP)(X4*2) // 62f27d09a30464 + VSCATTERQPS X0, K1, (R14)(X29*8) // 62927d01a304ee + VSCATTERQPS X31, K7, (R14)(Y29*8) // 62027d27a33cee + VSCATTERQPS X31, K7, (AX)(Y4*1) // 62627d2fa33c20 + VSCATTERQPS X31, K7, (BP)(Y10*2) // 62227d2fa37c5500 + VSCATTERQPS Y9, K1, (BP)(Z10*2) // 62327d49a34c5500 + VSCATTERQPS Y9, K1, (R10)(Z29*8) // 62127d41a30cea + VSCATTERQPS Y9, K1, (R14)(Z29*8) // 62127d41a30cee + VSHUFF32X4 $97, Y23, Y19, K1, Y3 // 62b3652123df61 + VSHUFF32X4 $97, 15(R8), Y19, K1, Y3 // 62d3652123980f00000061 + VSHUFF32X4 $97, (BP), Y19, K1, Y3 // 62f36521235d0061 + VSHUFF32X4 $81, Z19, Z15, K1, Z3 // 62b3054923db51 + VSHUFF32X4 $81, Z15, Z15, K1, Z3 // 62d3054923df51 + VSHUFF32X4 $81, 7(AX), Z15, K1, Z3 // 62f3054923980700000051 + VSHUFF32X4 $81, (DI), Z15, K1, Z3 // 62f30549231f51 + VSHUFF32X4 $81, Z19, Z30, K1, Z3 // 62b30d4123db51 + VSHUFF32X4 $81, Z15, Z30, K1, Z3 // 62d30d4123df51 + VSHUFF32X4 $81, 7(AX), Z30, K1, Z3 // 62f30d4123980700000051 + VSHUFF32X4 $81, (DI), Z30, K1, Z3 // 62f30d41231f51 + VSHUFF32X4 $81, Z19, Z15, K1, Z12 // 6233054923e351 + VSHUFF32X4 $81, Z15, Z15, K1, Z12 // 6253054923e751 + VSHUFF32X4 $81, 7(AX), Z15, K1, Z12 // 6273054923a00700000051 + VSHUFF32X4 $81, (DI), Z15, K1, Z12 // 62730549232751 + VSHUFF32X4 $81, Z19, Z30, K1, Z12 // 62330d4123e351 + VSHUFF32X4 $81, Z15, Z30, K1, Z12 // 62530d4123e751 + VSHUFF32X4 $81, 7(AX), Z30, K1, Z12 // 62730d4123a00700000051 + VSHUFF32X4 $81, (DI), Z30, K1, Z12 // 62730d41232751 + VSHUFF64X2 $42, Y21, Y14, K7, Y19 // 62a38d2f23dd2a + VSHUFF64X2 $42, 15(R8)(R14*8), Y14, K7, Y19 // 62838d2f239cf00f0000002a + VSHUFF64X2 $42, -15(R14)(R15*2), Y14, K7, Y19 // 62838d2f239c7ef1ffffff2a + VSHUFF64X2 $79, Z14, Z3, K2, Z5 // 62d3e54a23ee4f + VSHUFF64X2 $79, Z15, Z3, K2, Z5 // 62d3e54a23ef4f + VSHUFF64X2 $79, 99(R15)(R15*1), Z3, K2, Z5 // 6293e54a23ac3f630000004f + VSHUFF64X2 $79, (DX), Z3, K2, Z5 // 62f3e54a232a4f + VSHUFF64X2 $79, Z14, Z5, K2, Z5 // 62d3d54a23ee4f + VSHUFF64X2 $79, Z15, Z5, K2, Z5 // 62d3d54a23ef4f + VSHUFF64X2 $79, 99(R15)(R15*1), Z5, K2, Z5 // 6293d54a23ac3f630000004f + VSHUFF64X2 $79, (DX), Z5, K2, Z5 // 62f3d54a232a4f + VSHUFF64X2 $79, Z14, Z3, K2, Z1 // 62d3e54a23ce4f + VSHUFF64X2 $79, Z15, Z3, K2, Z1 // 62d3e54a23cf4f + VSHUFF64X2 $79, 99(R15)(R15*1), Z3, K2, Z1 // 6293e54a238c3f630000004f + VSHUFF64X2 $79, (DX), Z3, K2, Z1 // 62f3e54a230a4f + VSHUFF64X2 $79, Z14, Z5, K2, Z1 // 62d3d54a23ce4f + VSHUFF64X2 $79, Z15, Z5, K2, Z1 // 62d3d54a23cf4f + VSHUFF64X2 $79, 99(R15)(R15*1), Z5, K2, Z1 // 6293d54a238c3f630000004f + VSHUFF64X2 $79, (DX), Z5, K2, Z1 // 62f3d54a230a4f + VSHUFI32X4 $64, Y2, Y16, K4, Y5 // 62f37d2443ea40 + VSHUFI32X4 $64, -15(R14)(R15*1), Y16, K4, Y5 // 62937d2443ac3ef1ffffff40 + VSHUFI32X4 $64, -15(BX), Y16, K4, Y5 // 62f37d2443abf1ffffff40 + VSHUFI32X4 $27, Z20, Z16, K1, Z21 // 62a37d4143ec1b + VSHUFI32X4 $27, Z0, Z16, K1, Z21 // 62e37d4143e81b + VSHUFI32X4 $27, -17(BP)(SI*8), Z16, K1, Z21 // 62e37d4143acf5efffffff1b + VSHUFI32X4 $27, (R15), Z16, K1, Z21 // 62c37d41432f1b + VSHUFI32X4 $27, Z20, Z9, K1, Z21 // 62a3354943ec1b + VSHUFI32X4 $27, Z0, Z9, K1, Z21 // 62e3354943e81b + VSHUFI32X4 $27, -17(BP)(SI*8), Z9, K1, Z21 // 62e3354943acf5efffffff1b + VSHUFI32X4 $27, (R15), Z9, K1, Z21 // 62c33549432f1b + VSHUFI32X4 $27, Z20, Z16, K1, Z8 // 62337d4143c41b + VSHUFI32X4 $27, Z0, Z16, K1, Z8 // 62737d4143c01b + VSHUFI32X4 $27, -17(BP)(SI*8), Z16, K1, Z8 // 62737d414384f5efffffff1b + VSHUFI32X4 $27, (R15), Z16, K1, Z8 // 62537d4143071b + VSHUFI32X4 $27, Z20, Z9, K1, Z8 // 6233354943c41b + VSHUFI32X4 $27, Z0, Z9, K1, Z8 // 6273354943c01b + VSHUFI32X4 $27, -17(BP)(SI*8), Z9, K1, Z8 // 627335494384f5efffffff1b + VSHUFI32X4 $27, (R15), Z9, K1, Z8 // 6253354943071b + VSHUFI64X2 $47, Y6, Y20, K3, Y21 // 62e3dd2343ee2f + VSHUFI64X2 $47, 7(AX)(CX*4), Y20, K3, Y21 // 62e3dd2343ac88070000002f + VSHUFI64X2 $47, 7(AX)(CX*1), Y20, K3, Y21 // 62e3dd2343ac08070000002f + VSHUFI64X2 $82, Z0, Z0, K4, Z23 // 62e3fd4c43f852 + VSHUFI64X2 $82, Z25, Z0, K4, Z23 // 6283fd4c43f952 + VSHUFI64X2 $82, 7(SI)(DI*8), Z0, K4, Z23 // 62e3fd4c43bcfe0700000052 + VSHUFI64X2 $82, -15(R14), Z0, K4, Z23 // 62c3fd4c43bef1ffffff52 + VSHUFI64X2 $82, Z0, Z11, K4, Z23 // 62e3a54c43f852 + VSHUFI64X2 $82, Z25, Z11, K4, Z23 // 6283a54c43f952 + VSHUFI64X2 $82, 7(SI)(DI*8), Z11, K4, Z23 // 62e3a54c43bcfe0700000052 + VSHUFI64X2 $82, -15(R14), Z11, K4, Z23 // 62c3a54c43bef1ffffff52 + VSHUFI64X2 $82, Z0, Z0, K4, Z19 // 62e3fd4c43d852 + VSHUFI64X2 $82, Z25, Z0, K4, Z19 // 6283fd4c43d952 + VSHUFI64X2 $82, 7(SI)(DI*8), Z0, K4, Z19 // 62e3fd4c439cfe0700000052 + VSHUFI64X2 $82, -15(R14), Z0, K4, Z19 // 62c3fd4c439ef1ffffff52 + VSHUFI64X2 $82, Z0, Z11, K4, Z19 // 62e3a54c43d852 + VSHUFI64X2 $82, Z25, Z11, K4, Z19 // 6283a54c43d952 + VSHUFI64X2 $82, 7(SI)(DI*8), Z11, K4, Z19 // 62e3a54c439cfe0700000052 + VSHUFI64X2 $82, -15(R14), Z11, K4, Z19 // 62c3a54c439ef1ffffff52 + VSHUFPD $126, X8, X7, K5, X16 // 62c1c50dc6c07e + VSHUFPD $126, (BX), X7, K5, X16 // 62e1c50dc6037e + VSHUFPD $126, -17(BP)(SI*1), X7, K5, X16 // 62e1c50dc68435efffffff7e + VSHUFPD $94, Y11, Y6, K7, Y31 // 6241cd2fc6fb5e + VSHUFPD $94, (SI), Y6, K7, Y31 // 6261cd2fc63e5e + VSHUFPD $94, 7(SI)(DI*2), Y6, K7, Y31 // 6261cd2fc6bc7e070000005e + VSHUFPD $121, Z9, Z0, K7, Z24 // 6241fd4fc6c179 + VSHUFPD $121, Z3, Z0, K7, Z24 // 6261fd4fc6c379 + VSHUFPD $121, 7(SI)(DI*1), Z0, K7, Z24 // 6261fd4fc6843e0700000079 + VSHUFPD $121, 15(DX)(BX*8), Z0, K7, Z24 // 6261fd4fc684da0f00000079 + VSHUFPD $121, Z9, Z26, K7, Z24 // 6241ad47c6c179 + VSHUFPD $121, Z3, Z26, K7, Z24 // 6261ad47c6c379 + VSHUFPD $121, 7(SI)(DI*1), Z26, K7, Z24 // 6261ad47c6843e0700000079 + VSHUFPD $121, 15(DX)(BX*8), Z26, K7, Z24 // 6261ad47c684da0f00000079 + VSHUFPD $121, Z9, Z0, K7, Z12 // 6251fd4fc6e179 + VSHUFPD $121, Z3, Z0, K7, Z12 // 6271fd4fc6e379 + VSHUFPD $121, 7(SI)(DI*1), Z0, K7, Z12 // 6271fd4fc6a43e0700000079 + VSHUFPD $121, 15(DX)(BX*8), Z0, K7, Z12 // 6271fd4fc6a4da0f00000079 + VSHUFPD $121, Z9, Z26, K7, Z12 // 6251ad47c6e179 + VSHUFPD $121, Z3, Z26, K7, Z12 // 6271ad47c6e379 + VSHUFPD $121, 7(SI)(DI*1), Z26, K7, Z12 // 6271ad47c6a43e0700000079 + VSHUFPD $121, 15(DX)(BX*8), Z26, K7, Z12 // 6271ad47c6a4da0f00000079 + VSHUFPS $13, X15, X0, K6, X1 // 62d17c0ec6cf0d + VSHUFPS $13, 15(R8)(R14*4), X0, K6, X1 // 62917c0ec68cb00f0000000d + VSHUFPS $13, -7(CX)(DX*4), X0, K6, X1 // 62f17c0ec68c91f9ffffff0d + VSHUFPS $65, Y6, Y7, K3, Y19 // 62e1442bc6de41 + VSHUFPS $65, 17(SP)(BP*8), Y7, K3, Y19 // 62e1442bc69cec1100000041 + VSHUFPS $65, 17(SP)(BP*4), Y7, K3, Y19 // 62e1442bc69cac1100000041 + VSHUFPS $67, Z20, Z9, K7, Z9 // 6231344fc6cc43 + VSHUFPS $67, Z0, Z9, K7, Z9 // 6271344fc6c843 + VSHUFPS $67, -7(DI)(R8*1), Z9, K7, Z9 // 6231344fc68c07f9ffffff43 + VSHUFPS $67, (SP), Z9, K7, Z9 // 6271344fc60c2443 + VSHUFPS $67, Z20, Z28, K7, Z9 // 62311c47c6cc43 + VSHUFPS $67, Z0, Z28, K7, Z9 // 62711c47c6c843 + VSHUFPS $67, -7(DI)(R8*1), Z28, K7, Z9 // 62311c47c68c07f9ffffff43 + VSHUFPS $67, (SP), Z28, K7, Z9 // 62711c47c60c2443 + VSHUFPS $67, Z20, Z9, K7, Z25 // 6221344fc6cc43 + VSHUFPS $67, Z0, Z9, K7, Z25 // 6261344fc6c843 + VSHUFPS $67, -7(DI)(R8*1), Z9, K7, Z25 // 6221344fc68c07f9ffffff43 + VSHUFPS $67, (SP), Z9, K7, Z25 // 6261344fc60c2443 + VSHUFPS $67, Z20, Z28, K7, Z25 // 62211c47c6cc43 + VSHUFPS $67, Z0, Z28, K7, Z25 // 62611c47c6c843 + VSHUFPS $67, -7(DI)(R8*1), Z28, K7, Z25 // 62211c47c68c07f9ffffff43 + VSHUFPS $67, (SP), Z28, K7, Z25 // 62611c47c60c2443 + VSQRTPD X16, K4, X0 // 62b1fd0c51c0 + VSQRTPD (R8), K4, X0 // 62d1fd0c5100 + VSQRTPD 15(DX)(BX*2), K4, X0 // 62f1fd0c51845a0f000000 + VSQRTPD Y3, K4, Y0 // 62f1fd2c51c3 + VSQRTPD 7(SI)(DI*4), K4, Y0 // 62f1fd2c5184be07000000 + VSQRTPD -7(DI)(R8*2), K4, Y0 // 62b1fd2c518447f9ffffff + VSQRTPD Z17, K7, Z17 // 62a1fd4f51c9 + VSQRTPD Z23, K7, Z17 // 62a1fd4f51cf + VSQRTPD Z17, K7, Z0 // 62b1fd4f51c1 + VSQRTPD Z23, K7, Z0 // 62b1fd4f51c7 + VSQRTPD Z21, K2, Z31 // 6221fd4a51fd + VSQRTPD Z9, K2, Z31 // 6241fd4a51f9 + VSQRTPD -7(CX), K2, Z31 // 6261fd4a51b9f9ffffff + VSQRTPD 15(DX)(BX*4), K2, Z31 // 6261fd4a51bc9a0f000000 + VSQRTPD Z21, K2, Z0 // 62b1fd4a51c5 + VSQRTPD Z9, K2, Z0 // 62d1fd4a51c1 + VSQRTPD -7(CX), K2, Z0 // 62f1fd4a5181f9ffffff + VSQRTPD 15(DX)(BX*4), K2, Z0 // 62f1fd4a51849a0f000000 + VSQRTPS X0, K5, X21 // 62e17c0d51e8 + VSQRTPS 17(SP)(BP*1), K5, X21 // 62e17c0d51ac2c11000000 + VSQRTPS -7(CX)(DX*8), K5, X21 // 62e17c0d51acd1f9ffffff + VSQRTPS Y20, K3, Y5 // 62b17c2b51ec + VSQRTPS 17(SP), K3, Y5 // 62f17c2b51ac2411000000 + VSQRTPS -17(BP)(SI*4), K3, Y5 // 62f17c2b51acb5efffffff + VSQRTPS Z1, K4, Z6 // 62f17c4c51f1 + VSQRTPS Z9, K4, Z6 // 62d17c4c51f1 + VSQRTPS Z1, K4, Z9 // 62717c4c51c9 + VSQRTPS Z9, K4, Z9 // 62517c4c51c9 + VSQRTPS Z30, K2, Z20 // 62817c4a51e6 + VSQRTPS Z5, K2, Z20 // 62e17c4a51e5 + VSQRTPS 99(R15)(R15*8), K2, Z20 // 62817c4a51a4ff63000000 + VSQRTPS 7(AX)(CX*8), K2, Z20 // 62e17c4a51a4c807000000 + VSQRTPS Z30, K2, Z9 // 62117c4a51ce + VSQRTPS Z5, K2, Z9 // 62717c4a51cd + VSQRTPS 99(R15)(R15*8), K2, Z9 // 62117c4a518cff63000000 + VSQRTPS 7(AX)(CX*8), K2, Z9 // 62717c4a518cc807000000 + VSQRTSD X7, X22, K2, X28 // 6261cf0251e7 + VSQRTSD X16, X7, K3, X19 // 62a1c70b51d8 or 62a1c72b51d8 or 62a1c74b51d8 + VSQRTSD 7(SI)(DI*8), X7, K3, X19 // 62e1c70b519cfe07000000 or 62e1c72b519cfe07000000 or 62e1c74b519cfe07000000 + VSQRTSD -15(R14), X7, K3, X19 // 62c1c70b519ef1ffffff or 62c1c72b519ef1ffffff or 62c1c74b519ef1ffffff + VSQRTSS X7, X1, K3, X31 // 6261760b51ff + VSQRTSS X12, X15, K3, X9 // 6251060b51cc or 6251062b51cc or 6251064b51cc + VSQRTSS 17(SP)(BP*1), X15, K3, X9 // 6271060b518c2c11000000 or 6271062b518c2c11000000 or 6271064b518c2c11000000 + VSQRTSS -7(CX)(DX*8), X15, K3, X9 // 6271060b518cd1f9ffffff or 6271062b518cd1f9ffffff or 6271064b518cd1f9ffffff + VSUBPD X14, X12, K2, X0 // 62d19d0a5cc6 + VSUBPD -17(BP)(SI*2), X12, K2, X0 // 62f19d0a5c8475efffffff + VSUBPD 7(AX)(CX*2), X12, K2, X0 // 62f19d0a5c844807000000 + VSUBPD Y5, Y3, K1, Y12 // 6271e5295ce5 + VSUBPD 7(AX), Y3, K1, Y12 // 6271e5295ca007000000 + VSUBPD (DI), Y3, K1, Y12 // 6271e5295c27 + VSUBPD Z16, Z7, K2, Z26 // 6221c54a5cd0 + VSUBPD Z25, Z7, K2, Z26 // 6201c54a5cd1 + VSUBPD Z16, Z21, K2, Z26 // 6221d5425cd0 + VSUBPD Z25, Z21, K2, Z26 // 6201d5425cd1 + VSUBPD Z16, Z7, K2, Z22 // 62a1c54a5cf0 + VSUBPD Z25, Z7, K2, Z22 // 6281c54a5cf1 + VSUBPD Z16, Z21, K2, Z22 // 62a1d5425cf0 + VSUBPD Z25, Z21, K2, Z22 // 6281d5425cf1 + VSUBPD Z21, Z12, K1, Z14 // 62319d495cf5 + VSUBPD Z9, Z12, K1, Z14 // 62519d495cf1 + VSUBPD (AX), Z12, K1, Z14 // 62719d495c30 + VSUBPD 7(SI), Z12, K1, Z14 // 62719d495cb607000000 + VSUBPD Z21, Z13, K1, Z14 // 623195495cf5 + VSUBPD Z9, Z13, K1, Z14 // 625195495cf1 + VSUBPD (AX), Z13, K1, Z14 // 627195495c30 + VSUBPD 7(SI), Z13, K1, Z14 // 627195495cb607000000 + VSUBPD Z21, Z12, K1, Z13 // 62319d495ced + VSUBPD Z9, Z12, K1, Z13 // 62519d495ce9 + VSUBPD (AX), Z12, K1, Z13 // 62719d495c28 + VSUBPD 7(SI), Z12, K1, Z13 // 62719d495cae07000000 + VSUBPD Z21, Z13, K1, Z13 // 623195495ced + VSUBPD Z9, Z13, K1, Z13 // 625195495ce9 + VSUBPD (AX), Z13, K1, Z13 // 627195495c28 + VSUBPD 7(SI), Z13, K1, Z13 // 627195495cae07000000 + VSUBPS X15, X17, K7, X5 // 62d174075cef + VSUBPS 15(R8)(R14*1), X17, K7, X5 // 629174075cac300f000000 + VSUBPS 15(R8)(R14*2), X17, K7, X5 // 629174075cac700f000000 + VSUBPS Y0, Y7, K1, Y28 // 626144295ce0 + VSUBPS 99(R15)(R15*1), Y7, K1, Y28 // 620144295ca43f63000000 + VSUBPS (DX), Y7, K1, Y28 // 626144295c22 + VSUBPS Z23, Z27, K1, Z2 // 62b124415cd7 + VSUBPS Z9, Z27, K1, Z2 // 62d124415cd1 + VSUBPS Z23, Z25, K1, Z2 // 62b134415cd7 + VSUBPS Z9, Z25, K1, Z2 // 62d134415cd1 + VSUBPS Z23, Z27, K1, Z7 // 62b124415cff + VSUBPS Z9, Z27, K1, Z7 // 62d124415cf9 + VSUBPS Z23, Z25, K1, Z7 // 62b134415cff + VSUBPS Z9, Z25, K1, Z7 // 62d134415cf9 + VSUBPS Z14, Z3, K1, Z27 // 624164495cde + VSUBPS Z7, Z3, K1, Z27 // 626164495cdf + VSUBPS (BX), Z3, K1, Z27 // 626164495c1b + VSUBPS -17(BP)(SI*1), Z3, K1, Z27 // 626164495c9c35efffffff + VSUBPS Z14, Z0, K1, Z27 // 62417c495cde + VSUBPS Z7, Z0, K1, Z27 // 62617c495cdf + VSUBPS (BX), Z0, K1, Z27 // 62617c495c1b + VSUBPS -17(BP)(SI*1), Z0, K1, Z27 // 62617c495c9c35efffffff + VSUBPS Z14, Z3, K1, Z14 // 625164495cf6 + VSUBPS Z7, Z3, K1, Z14 // 627164495cf7 + VSUBPS (BX), Z3, K1, Z14 // 627164495c33 + VSUBPS -17(BP)(SI*1), Z3, K1, Z14 // 627164495cb435efffffff + VSUBPS Z14, Z0, K1, Z14 // 62517c495cf6 + VSUBPS Z7, Z0, K1, Z14 // 62717c495cf7 + VSUBPS (BX), Z0, K1, Z14 // 62717c495c33 + VSUBPS -17(BP)(SI*1), Z0, K1, Z14 // 62717c495cb435efffffff + VSUBSD X26, X3, K7, X8 // 6211e70f5cc2 + VSUBSD X28, X13, K2, X23 // 6281970a5cfc or 6281972a5cfc or 6281974a5cfc + VSUBSD 7(SI)(DI*1), X13, K2, X23 // 62e1970a5cbc3e07000000 or 62e1972a5cbc3e07000000 or 62e1974a5cbc3e07000000 + VSUBSD 15(DX)(BX*8), X13, K2, X23 // 62e1970a5cbcda0f000000 or 62e1972a5cbcda0f000000 or 62e1974a5cbcda0f000000 + VSUBSS X15, X9, K4, X24 // 6241360c5cc7 + VSUBSS X21, X18, K1, X26 // 62216e015cd5 or 62216e215cd5 or 62216e415cd5 + VSUBSS -17(BP)(SI*2), X18, K1, X26 // 62616e015c9475efffffff or 62616e215c9475efffffff or 62616e415c9475efffffff + VSUBSS 7(AX)(CX*2), X18, K1, X26 // 62616e015c944807000000 or 62616e215c944807000000 or 62616e415c944807000000 + VUCOMISD X3, X31 // 6261fd082efb or 6261fd282efb or 6261fd482efb + VUCOMISD -7(DI)(R8*1), X31 // 6221fd082ebc07f9ffffff or 6221fd282ebc07f9ffffff or 6221fd482ebc07f9ffffff + VUCOMISD (SP), X31 // 6261fd082e3c24 or 6261fd282e3c24 or 6261fd482e3c24 + VUCOMISS X24, X0 // 62917c082ec0 or 62917c282ec0 or 62917c482ec0 + VUNPCKHPD X9, X7, K3, X20 // 62c1c50b15e1 + VUNPCKHPD (R14), X7, K3, X20 // 62c1c50b1526 + VUNPCKHPD -7(DI)(R8*8), X7, K3, X20 // 62a1c50b15a4c7f9ffffff + VUNPCKHPD Y12, Y13, K4, Y22 // 62c1952c15f4 + VUNPCKHPD -17(BP)(SI*8), Y13, K4, Y22 // 62e1952c15b4f5efffffff + VUNPCKHPD (R15), Y13, K4, Y22 // 62c1952c1537 + VUNPCKHPD Z1, Z22, K5, Z8 // 6271cd4515c1 + VUNPCKHPD Z16, Z22, K5, Z8 // 6231cd4515c0 + VUNPCKHPD 15(R8)(R14*4), Z22, K5, Z8 // 6211cd451584b00f000000 + VUNPCKHPD -7(CX)(DX*4), Z22, K5, Z8 // 6271cd45158491f9ffffff + VUNPCKHPD Z1, Z25, K5, Z8 // 6271b54515c1 + VUNPCKHPD Z16, Z25, K5, Z8 // 6231b54515c0 + VUNPCKHPD 15(R8)(R14*4), Z25, K5, Z8 // 6211b5451584b00f000000 + VUNPCKHPD -7(CX)(DX*4), Z25, K5, Z8 // 6271b545158491f9ffffff + VUNPCKHPD Z1, Z22, K5, Z24 // 6261cd4515c1 + VUNPCKHPD Z16, Z22, K5, Z24 // 6221cd4515c0 + VUNPCKHPD 15(R8)(R14*4), Z22, K5, Z24 // 6201cd451584b00f000000 + VUNPCKHPD -7(CX)(DX*4), Z22, K5, Z24 // 6261cd45158491f9ffffff + VUNPCKHPD Z1, Z25, K5, Z24 // 6261b54515c1 + VUNPCKHPD Z16, Z25, K5, Z24 // 6221b54515c0 + VUNPCKHPD 15(R8)(R14*4), Z25, K5, Z24 // 6201b5451584b00f000000 + VUNPCKHPD -7(CX)(DX*4), Z25, K5, Z24 // 6261b545158491f9ffffff + VUNPCKHPS X5, X14, K7, X7 // 62f10c0f15fd + VUNPCKHPS 99(R15)(R15*4), X14, K7, X7 // 62910c0f15bcbf63000000 + VUNPCKHPS 15(DX), X14, K7, X7 // 62f10c0f15ba0f000000 + VUNPCKHPS Y17, Y14, K7, Y1 // 62b10c2f15c9 + VUNPCKHPS 7(SI)(DI*8), Y14, K7, Y1 // 62f10c2f158cfe07000000 + VUNPCKHPS -15(R14), Y14, K7, Y1 // 62d10c2f158ef1ffffff + VUNPCKHPS Z15, Z0, K6, Z6 // 62d17c4e15f7 + VUNPCKHPS Z12, Z0, K6, Z6 // 62d17c4e15f4 + VUNPCKHPS (R8), Z0, K6, Z6 // 62d17c4e1530 + VUNPCKHPS 15(DX)(BX*2), Z0, K6, Z6 // 62f17c4e15b45a0f000000 + VUNPCKHPS Z15, Z8, K6, Z6 // 62d13c4e15f7 + VUNPCKHPS Z12, Z8, K6, Z6 // 62d13c4e15f4 + VUNPCKHPS (R8), Z8, K6, Z6 // 62d13c4e1530 + VUNPCKHPS 15(DX)(BX*2), Z8, K6, Z6 // 62f13c4e15b45a0f000000 + VUNPCKHPS Z15, Z0, K6, Z2 // 62d17c4e15d7 + VUNPCKHPS Z12, Z0, K6, Z2 // 62d17c4e15d4 + VUNPCKHPS (R8), Z0, K6, Z2 // 62d17c4e1510 + VUNPCKHPS 15(DX)(BX*2), Z0, K6, Z2 // 62f17c4e15945a0f000000 + VUNPCKHPS Z15, Z8, K6, Z2 // 62d13c4e15d7 + VUNPCKHPS Z12, Z8, K6, Z2 // 62d13c4e15d4 + VUNPCKHPS (R8), Z8, K6, Z2 // 62d13c4e1510 + VUNPCKHPS 15(DX)(BX*2), Z8, K6, Z2 // 62f13c4e15945a0f000000 + VUNPCKLPD X21, X3, K3, X31 // 6221e50b14fd + VUNPCKLPD (CX), X3, K3, X31 // 6261e50b1439 + VUNPCKLPD 99(R15), X3, K3, X31 // 6241e50b14bf63000000 + VUNPCKLPD Y31, Y9, K7, Y7 // 6291b52f14ff + VUNPCKLPD 7(SI)(DI*1), Y9, K7, Y7 // 62f1b52f14bc3e07000000 + VUNPCKLPD 15(DX)(BX*8), Y9, K7, Y7 // 62f1b52f14bcda0f000000 + VUNPCKLPD Z13, Z11, K4, Z14 // 6251a54c14f5 + VUNPCKLPD Z14, Z11, K4, Z14 // 6251a54c14f6 + VUNPCKLPD 17(SP)(BP*1), Z11, K4, Z14 // 6271a54c14b42c11000000 + VUNPCKLPD -7(CX)(DX*8), Z11, K4, Z14 // 6271a54c14b4d1f9ffffff + VUNPCKLPD Z13, Z5, K4, Z14 // 6251d54c14f5 + VUNPCKLPD Z14, Z5, K4, Z14 // 6251d54c14f6 + VUNPCKLPD 17(SP)(BP*1), Z5, K4, Z14 // 6271d54c14b42c11000000 + VUNPCKLPD -7(CX)(DX*8), Z5, K4, Z14 // 6271d54c14b4d1f9ffffff + VUNPCKLPD Z13, Z11, K4, Z27 // 6241a54c14dd + VUNPCKLPD Z14, Z11, K4, Z27 // 6241a54c14de + VUNPCKLPD 17(SP)(BP*1), Z11, K4, Z27 // 6261a54c149c2c11000000 + VUNPCKLPD -7(CX)(DX*8), Z11, K4, Z27 // 6261a54c149cd1f9ffffff + VUNPCKLPD Z13, Z5, K4, Z27 // 6241d54c14dd + VUNPCKLPD Z14, Z5, K4, Z27 // 6241d54c14de + VUNPCKLPD 17(SP)(BP*1), Z5, K4, Z27 // 6261d54c149c2c11000000 + VUNPCKLPD -7(CX)(DX*8), Z5, K4, Z27 // 6261d54c149cd1f9ffffff + VUNPCKLPS X13, X11, K4, X1 // 62d1240c14cd + VUNPCKLPS 99(R15)(R15*2), X11, K4, X1 // 6291240c148c7f63000000 + VUNPCKLPS -7(DI), X11, K4, X1 // 62f1240c148ff9ffffff + VUNPCKLPS Y28, Y1, K7, Y8 // 6211742f14c4 + VUNPCKLPS -7(DI)(R8*1), Y1, K7, Y8 // 6231742f148407f9ffffff + VUNPCKLPS (SP), Y1, K7, Y8 // 6271742f140424 + VUNPCKLPS Z6, Z2, K2, Z5 // 62f16c4a14ee + VUNPCKLPS Z14, Z2, K2, Z5 // 62d16c4a14ee + VUNPCKLPS -17(BP)(SI*2), Z2, K2, Z5 // 62f16c4a14ac75efffffff + VUNPCKLPS 7(AX)(CX*2), Z2, K2, Z5 // 62f16c4a14ac4807000000 + VUNPCKLPS Z6, Z2, K2, Z23 // 62e16c4a14fe + VUNPCKLPS Z14, Z2, K2, Z23 // 62c16c4a14fe + VUNPCKLPS -17(BP)(SI*2), Z2, K2, Z23 // 62e16c4a14bc75efffffff + VUNPCKLPS 7(AX)(CX*2), Z2, K2, Z23 // 62e16c4a14bc4807000000 + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512pf.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512pf.s new file mode 100644 index 0000000000000000000000000000000000000000..1b3cce7217bed4495340d96270cf374eb15bba9b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/avx512pf.s @@ -0,0 +1,54 @@ +// Code generated by avx512test. DO NOT EDIT. + +#include "../../../../../../runtime/textflag.h" + +TEXT asmtest_avx512pf(SB), NOSPLIT, $0 + VGATHERPF0DPD K5, (R10)(Y29*8) // 6292fd45c60cea + VGATHERPF0DPD K5, (SP)(Y4*2) // 62f2fd4dc60c64 + VGATHERPF0DPD K5, (DX)(Y10*4) // 62b2fd4dc60c92 + VGATHERPF0DPS K3, (BP)(Z10*2) // 62b27d4bc64c5500 + VGATHERPF0DPS K3, (R10)(Z29*8) // 62927d43c60cea + VGATHERPF0DPS K3, (R14)(Z29*8) // 62927d43c60cee + VGATHERPF0QPD K4, (DX)(Z10*4) // 62b2fd4cc70c92 + VGATHERPF0QPD K4, (AX)(Z4*1) // 62f2fd4cc70c20 + VGATHERPF0QPD K4, (SP)(Z4*2) // 62f2fd4cc70c64 + VGATHERPF0QPS K2, (BP)(Z10*2) // 62b27d4ac74c5500 + VGATHERPF0QPS K2, (R10)(Z29*8) // 62927d42c70cea + VGATHERPF0QPS K2, (R14)(Z29*8) // 62927d42c70cee + VGATHERPF1DPD K2, (R14)(Y29*8) // 6292fd42c614ee + VGATHERPF1DPD K2, (AX)(Y4*1) // 62f2fd4ac61420 + VGATHERPF1DPD K2, (BP)(Y10*2) // 62b2fd4ac6545500 + VGATHERPF1DPS K3, (DX)(Z10*4) // 62b27d4bc61492 + VGATHERPF1DPS K3, (AX)(Z4*1) // 62f27d4bc61420 + VGATHERPF1DPS K3, (SP)(Z4*2) // 62f27d4bc61464 + VGATHERPF1QPD K3, (DX)(Z10*4) // 62b2fd4bc71492 + VGATHERPF1QPD K3, (AX)(Z4*1) // 62f2fd4bc71420 + VGATHERPF1QPD K3, (SP)(Z4*2) // 62f2fd4bc71464 + VGATHERPF1QPS K3, (BP)(Z10*2) // 62b27d4bc7545500 + VGATHERPF1QPS K3, (R10)(Z29*8) // 62927d43c714ea + VGATHERPF1QPS K3, (R14)(Z29*8) // 62927d43c714ee + VSCATTERPF0DPD K5, (R10)(Y29*8) // 6292fd45c62cea + VSCATTERPF0DPD K5, (SP)(Y4*2) // 62f2fd4dc62c64 + VSCATTERPF0DPD K5, (DX)(Y10*4) // 62b2fd4dc62c92 + VSCATTERPF0DPS K3, (DX)(Z10*4) // 62b27d4bc62c92 + VSCATTERPF0DPS K3, (AX)(Z4*1) // 62f27d4bc62c20 + VSCATTERPF0DPS K3, (SP)(Z4*2) // 62f27d4bc62c64 + VSCATTERPF0QPD K4, (DX)(Z10*4) // 62b2fd4cc72c92 + VSCATTERPF0QPD K4, (AX)(Z4*1) // 62f2fd4cc72c20 + VSCATTERPF0QPD K4, (SP)(Z4*2) // 62f2fd4cc72c64 + VSCATTERPF0QPS K2, (BP)(Z10*2) // 62b27d4ac76c5500 + VSCATTERPF0QPS K2, (R10)(Z29*8) // 62927d42c72cea + VSCATTERPF0QPS K2, (R14)(Z29*8) // 62927d42c72cee + VSCATTERPF1DPD K2, (R14)(Y29*8) // 6292fd42c634ee + VSCATTERPF1DPD K2, (AX)(Y4*1) // 62f2fd4ac63420 + VSCATTERPF1DPD K2, (BP)(Y10*2) // 62b2fd4ac6745500 + VSCATTERPF1DPS K3, (BP)(Z10*2) // 62b27d4bc6745500 + VSCATTERPF1DPS K3, (R10)(Z29*8) // 62927d43c634ea + VSCATTERPF1DPS K3, (R14)(Z29*8) // 62927d43c634ee + VSCATTERPF1QPD K3, (DX)(Z10*4) // 62b2fd4bc73492 + VSCATTERPF1QPD K3, (AX)(Z4*1) // 62f2fd4bc73420 + VSCATTERPF1QPD K3, (SP)(Z4*2) // 62f2fd4bc73464 + VSCATTERPF1QPS K3, (BP)(Z10*2) // 62b27d4bc7745500 + VSCATTERPF1QPS K3, (R10)(Z29*8) // 62927d43c734ea + VSCATTERPF1QPS K3, (R14)(Z29*8) // 62927d43c734ee + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/gfni_avx512f.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/gfni_avx512f.s new file mode 100644 index 0000000000000000000000000000000000000000..9df5f0ed5249e3e7995f723affcd987139974bfb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/gfni_avx512f.s @@ -0,0 +1,324 @@ +// Code generated by avx512test. DO NOT EDIT. + +#include "../../../../../../runtime/textflag.h" + +TEXT asmtest_gfni_avx512f(SB), NOSPLIT, $0 + VGF2P8AFFINEINVQB $64, X8, X31, K3, X26 // 62438503cfd040 + VGF2P8AFFINEINVQB $64, X1, X31, K3, X26 // 62638503cfd140 + VGF2P8AFFINEINVQB $64, X0, X31, K3, X26 // 62638503cfd040 + VGF2P8AFFINEINVQB $64, -17(BP), X31, K3, X26 // 62638503cf95efffffff40 + VGF2P8AFFINEINVQB $64, -15(R14)(R15*8), X31, K3, X26 // 62038503cf94fef1ffffff40 + VGF2P8AFFINEINVQB $64, X8, X16, K3, X26 // 6243fd03cfd040 + VGF2P8AFFINEINVQB $64, X1, X16, K3, X26 // 6263fd03cfd140 + VGF2P8AFFINEINVQB $64, X0, X16, K3, X26 // 6263fd03cfd040 + VGF2P8AFFINEINVQB $64, -17(BP), X16, K3, X26 // 6263fd03cf95efffffff40 + VGF2P8AFFINEINVQB $64, -15(R14)(R15*8), X16, K3, X26 // 6203fd03cf94fef1ffffff40 + VGF2P8AFFINEINVQB $64, X8, X7, K3, X26 // 6243c50bcfd040 + VGF2P8AFFINEINVQB $64, X1, X7, K3, X26 // 6263c50bcfd140 + VGF2P8AFFINEINVQB $64, X0, X7, K3, X26 // 6263c50bcfd040 + VGF2P8AFFINEINVQB $64, -17(BP), X7, K3, X26 // 6263c50bcf95efffffff40 + VGF2P8AFFINEINVQB $64, -15(R14)(R15*8), X7, K3, X26 // 6203c50bcf94fef1ffffff40 + VGF2P8AFFINEINVQB $64, X8, X31, K3, X19 // 62c38503cfd840 + VGF2P8AFFINEINVQB $64, X1, X31, K3, X19 // 62e38503cfd940 + VGF2P8AFFINEINVQB $64, X0, X31, K3, X19 // 62e38503cfd840 + VGF2P8AFFINEINVQB $64, -17(BP), X31, K3, X19 // 62e38503cf9defffffff40 + VGF2P8AFFINEINVQB $64, -15(R14)(R15*8), X31, K3, X19 // 62838503cf9cfef1ffffff40 + VGF2P8AFFINEINVQB $64, X8, X16, K3, X19 // 62c3fd03cfd840 + VGF2P8AFFINEINVQB $64, X1, X16, K3, X19 // 62e3fd03cfd940 + VGF2P8AFFINEINVQB $64, X0, X16, K3, X19 // 62e3fd03cfd840 + VGF2P8AFFINEINVQB $64, -17(BP), X16, K3, X19 // 62e3fd03cf9defffffff40 + VGF2P8AFFINEINVQB $64, -15(R14)(R15*8), X16, K3, X19 // 6283fd03cf9cfef1ffffff40 + VGF2P8AFFINEINVQB $64, X8, X7, K3, X19 // 62c3c50bcfd840 + VGF2P8AFFINEINVQB $64, X1, X7, K3, X19 // 62e3c50bcfd940 + VGF2P8AFFINEINVQB $64, X0, X7, K3, X19 // 62e3c50bcfd840 + VGF2P8AFFINEINVQB $64, -17(BP), X7, K3, X19 // 62e3c50bcf9defffffff40 + VGF2P8AFFINEINVQB $64, -15(R14)(R15*8), X7, K3, X19 // 6283c50bcf9cfef1ffffff40 + VGF2P8AFFINEINVQB $64, X8, X31, K3, X0 // 62d38503cfc040 + VGF2P8AFFINEINVQB $64, X1, X31, K3, X0 // 62f38503cfc140 + VGF2P8AFFINEINVQB $64, X0, X31, K3, X0 // 62f38503cfc040 + VGF2P8AFFINEINVQB $64, -17(BP), X31, K3, X0 // 62f38503cf85efffffff40 + VGF2P8AFFINEINVQB $64, -15(R14)(R15*8), X31, K3, X0 // 62938503cf84fef1ffffff40 + VGF2P8AFFINEINVQB $64, X8, X16, K3, X0 // 62d3fd03cfc040 + VGF2P8AFFINEINVQB $64, X1, X16, K3, X0 // 62f3fd03cfc140 + VGF2P8AFFINEINVQB $64, X0, X16, K3, X0 // 62f3fd03cfc040 + VGF2P8AFFINEINVQB $64, -17(BP), X16, K3, X0 // 62f3fd03cf85efffffff40 + VGF2P8AFFINEINVQB $64, -15(R14)(R15*8), X16, K3, X0 // 6293fd03cf84fef1ffffff40 + VGF2P8AFFINEINVQB $64, X8, X7, K3, X0 // 62d3c50bcfc040 + VGF2P8AFFINEINVQB $64, X1, X7, K3, X0 // 62f3c50bcfc140 + VGF2P8AFFINEINVQB $64, X0, X7, K3, X0 // 62f3c50bcfc040 + VGF2P8AFFINEINVQB $64, -17(BP), X7, K3, X0 // 62f3c50bcf85efffffff40 + VGF2P8AFFINEINVQB $64, -15(R14)(R15*8), X7, K3, X0 // 6293c50bcf84fef1ffffff40 + VGF2P8AFFINEINVQB $27, Y5, Y20, K3, Y0 // 62f3dd23cfc51b + VGF2P8AFFINEINVQB $27, Y28, Y20, K3, Y0 // 6293dd23cfc41b + VGF2P8AFFINEINVQB $27, Y7, Y20, K3, Y0 // 62f3dd23cfc71b + VGF2P8AFFINEINVQB $27, (BX), Y20, K3, Y0 // 62f3dd23cf031b + VGF2P8AFFINEINVQB $27, -17(BP)(SI*1), Y20, K3, Y0 // 62f3dd23cf8435efffffff1b + VGF2P8AFFINEINVQB $27, Y5, Y12, K3, Y0 // 62f39d2bcfc51b + VGF2P8AFFINEINVQB $27, Y28, Y12, K3, Y0 // 62939d2bcfc41b + VGF2P8AFFINEINVQB $27, Y7, Y12, K3, Y0 // 62f39d2bcfc71b + VGF2P8AFFINEINVQB $27, (BX), Y12, K3, Y0 // 62f39d2bcf031b + VGF2P8AFFINEINVQB $27, -17(BP)(SI*1), Y12, K3, Y0 // 62f39d2bcf8435efffffff1b + VGF2P8AFFINEINVQB $27, Y5, Y3, K3, Y0 // 62f3e52bcfc51b + VGF2P8AFFINEINVQB $27, Y28, Y3, K3, Y0 // 6293e52bcfc41b + VGF2P8AFFINEINVQB $27, Y7, Y3, K3, Y0 // 62f3e52bcfc71b + VGF2P8AFFINEINVQB $27, (BX), Y3, K3, Y0 // 62f3e52bcf031b + VGF2P8AFFINEINVQB $27, -17(BP)(SI*1), Y3, K3, Y0 // 62f3e52bcf8435efffffff1b + VGF2P8AFFINEINVQB $27, Y5, Y20, K3, Y3 // 62f3dd23cfdd1b + VGF2P8AFFINEINVQB $27, Y28, Y20, K3, Y3 // 6293dd23cfdc1b + VGF2P8AFFINEINVQB $27, Y7, Y20, K3, Y3 // 62f3dd23cfdf1b + VGF2P8AFFINEINVQB $27, (BX), Y20, K3, Y3 // 62f3dd23cf1b1b + VGF2P8AFFINEINVQB $27, -17(BP)(SI*1), Y20, K3, Y3 // 62f3dd23cf9c35efffffff1b + VGF2P8AFFINEINVQB $27, Y5, Y12, K3, Y3 // 62f39d2bcfdd1b + VGF2P8AFFINEINVQB $27, Y28, Y12, K3, Y3 // 62939d2bcfdc1b + VGF2P8AFFINEINVQB $27, Y7, Y12, K3, Y3 // 62f39d2bcfdf1b + VGF2P8AFFINEINVQB $27, (BX), Y12, K3, Y3 // 62f39d2bcf1b1b + VGF2P8AFFINEINVQB $27, -17(BP)(SI*1), Y12, K3, Y3 // 62f39d2bcf9c35efffffff1b + VGF2P8AFFINEINVQB $27, Y5, Y3, K3, Y3 // 62f3e52bcfdd1b + VGF2P8AFFINEINVQB $27, Y28, Y3, K3, Y3 // 6293e52bcfdc1b + VGF2P8AFFINEINVQB $27, Y7, Y3, K3, Y3 // 62f3e52bcfdf1b + VGF2P8AFFINEINVQB $27, (BX), Y3, K3, Y3 // 62f3e52bcf1b1b + VGF2P8AFFINEINVQB $27, -17(BP)(SI*1), Y3, K3, Y3 // 62f3e52bcf9c35efffffff1b + VGF2P8AFFINEINVQB $27, Y5, Y20, K3, Y5 // 62f3dd23cfed1b + VGF2P8AFFINEINVQB $27, Y28, Y20, K3, Y5 // 6293dd23cfec1b + VGF2P8AFFINEINVQB $27, Y7, Y20, K3, Y5 // 62f3dd23cfef1b + VGF2P8AFFINEINVQB $27, (BX), Y20, K3, Y5 // 62f3dd23cf2b1b + VGF2P8AFFINEINVQB $27, -17(BP)(SI*1), Y20, K3, Y5 // 62f3dd23cfac35efffffff1b + VGF2P8AFFINEINVQB $27, Y5, Y12, K3, Y5 // 62f39d2bcfed1b + VGF2P8AFFINEINVQB $27, Y28, Y12, K3, Y5 // 62939d2bcfec1b + VGF2P8AFFINEINVQB $27, Y7, Y12, K3, Y5 // 62f39d2bcfef1b + VGF2P8AFFINEINVQB $27, (BX), Y12, K3, Y5 // 62f39d2bcf2b1b + VGF2P8AFFINEINVQB $27, -17(BP)(SI*1), Y12, K3, Y5 // 62f39d2bcfac35efffffff1b + VGF2P8AFFINEINVQB $27, Y5, Y3, K3, Y5 // 62f3e52bcfed1b + VGF2P8AFFINEINVQB $27, Y28, Y3, K3, Y5 // 6293e52bcfec1b + VGF2P8AFFINEINVQB $27, Y7, Y3, K3, Y5 // 62f3e52bcfef1b + VGF2P8AFFINEINVQB $27, (BX), Y3, K3, Y5 // 62f3e52bcf2b1b + VGF2P8AFFINEINVQB $27, -17(BP)(SI*1), Y3, K3, Y5 // 62f3e52bcfac35efffffff1b + VGF2P8AFFINEINVQB $47, Z3, Z14, K2, Z28 // 62638d4acfe32f + VGF2P8AFFINEINVQB $47, Z12, Z14, K2, Z28 // 62438d4acfe42f + VGF2P8AFFINEINVQB $47, 99(R15)(R15*1), Z14, K2, Z28 // 62038d4acfa43f630000002f + VGF2P8AFFINEINVQB $47, (DX), Z14, K2, Z28 // 62638d4acf222f + VGF2P8AFFINEINVQB $47, Z3, Z28, K2, Z28 // 62639d42cfe32f + VGF2P8AFFINEINVQB $47, Z12, Z28, K2, Z28 // 62439d42cfe42f + VGF2P8AFFINEINVQB $47, 99(R15)(R15*1), Z28, K2, Z28 // 62039d42cfa43f630000002f + VGF2P8AFFINEINVQB $47, (DX), Z28, K2, Z28 // 62639d42cf222f + VGF2P8AFFINEINVQB $47, Z3, Z14, K2, Z13 // 62738d4acfeb2f + VGF2P8AFFINEINVQB $47, Z12, Z14, K2, Z13 // 62538d4acfec2f + VGF2P8AFFINEINVQB $47, 99(R15)(R15*1), Z14, K2, Z13 // 62138d4acfac3f630000002f + VGF2P8AFFINEINVQB $47, (DX), Z14, K2, Z13 // 62738d4acf2a2f + VGF2P8AFFINEINVQB $47, Z3, Z28, K2, Z13 // 62739d42cfeb2f + VGF2P8AFFINEINVQB $47, Z12, Z28, K2, Z13 // 62539d42cfec2f + VGF2P8AFFINEINVQB $47, 99(R15)(R15*1), Z28, K2, Z13 // 62139d42cfac3f630000002f + VGF2P8AFFINEINVQB $47, (DX), Z28, K2, Z13 // 62739d42cf2a2f + VGF2P8AFFINEQB $82, X22, X21, K1, X15 // 6233d501cefe52 + VGF2P8AFFINEQB $82, X7, X21, K1, X15 // 6273d501ceff52 + VGF2P8AFFINEQB $82, X19, X21, K1, X15 // 6233d501cefb52 + VGF2P8AFFINEQB $82, 17(SP)(BP*2), X21, K1, X15 // 6273d501cebc6c1100000052 + VGF2P8AFFINEQB $82, -7(DI)(R8*4), X21, K1, X15 // 6233d501cebc87f9ffffff52 + VGF2P8AFFINEQB $82, X22, X0, K1, X15 // 6233fd09cefe52 + VGF2P8AFFINEQB $82, X7, X0, K1, X15 // 6273fd09ceff52 + VGF2P8AFFINEQB $82, X19, X0, K1, X15 // 6233fd09cefb52 + VGF2P8AFFINEQB $82, 17(SP)(BP*2), X0, K1, X15 // 6273fd09cebc6c1100000052 + VGF2P8AFFINEQB $82, -7(DI)(R8*4), X0, K1, X15 // 6233fd09cebc87f9ffffff52 + VGF2P8AFFINEQB $82, X22, X28, K1, X15 // 62339d01cefe52 + VGF2P8AFFINEQB $82, X7, X28, K1, X15 // 62739d01ceff52 + VGF2P8AFFINEQB $82, X19, X28, K1, X15 // 62339d01cefb52 + VGF2P8AFFINEQB $82, 17(SP)(BP*2), X28, K1, X15 // 62739d01cebc6c1100000052 + VGF2P8AFFINEQB $82, -7(DI)(R8*4), X28, K1, X15 // 62339d01cebc87f9ffffff52 + VGF2P8AFFINEQB $82, X22, X21, K1, X0 // 62b3d501cec652 + VGF2P8AFFINEQB $82, X7, X21, K1, X0 // 62f3d501cec752 + VGF2P8AFFINEQB $82, X19, X21, K1, X0 // 62b3d501cec352 + VGF2P8AFFINEQB $82, 17(SP)(BP*2), X21, K1, X0 // 62f3d501ce846c1100000052 + VGF2P8AFFINEQB $82, -7(DI)(R8*4), X21, K1, X0 // 62b3d501ce8487f9ffffff52 + VGF2P8AFFINEQB $82, X22, X0, K1, X0 // 62b3fd09cec652 + VGF2P8AFFINEQB $82, X7, X0, K1, X0 // 62f3fd09cec752 + VGF2P8AFFINEQB $82, X19, X0, K1, X0 // 62b3fd09cec352 + VGF2P8AFFINEQB $82, 17(SP)(BP*2), X0, K1, X0 // 62f3fd09ce846c1100000052 + VGF2P8AFFINEQB $82, -7(DI)(R8*4), X0, K1, X0 // 62b3fd09ce8487f9ffffff52 + VGF2P8AFFINEQB $82, X22, X28, K1, X0 // 62b39d01cec652 + VGF2P8AFFINEQB $82, X7, X28, K1, X0 // 62f39d01cec752 + VGF2P8AFFINEQB $82, X19, X28, K1, X0 // 62b39d01cec352 + VGF2P8AFFINEQB $82, 17(SP)(BP*2), X28, K1, X0 // 62f39d01ce846c1100000052 + VGF2P8AFFINEQB $82, -7(DI)(R8*4), X28, K1, X0 // 62b39d01ce8487f9ffffff52 + VGF2P8AFFINEQB $82, X22, X21, K1, X16 // 62a3d501cec652 + VGF2P8AFFINEQB $82, X7, X21, K1, X16 // 62e3d501cec752 + VGF2P8AFFINEQB $82, X19, X21, K1, X16 // 62a3d501cec352 + VGF2P8AFFINEQB $82, 17(SP)(BP*2), X21, K1, X16 // 62e3d501ce846c1100000052 + VGF2P8AFFINEQB $82, -7(DI)(R8*4), X21, K1, X16 // 62a3d501ce8487f9ffffff52 + VGF2P8AFFINEQB $82, X22, X0, K1, X16 // 62a3fd09cec652 + VGF2P8AFFINEQB $82, X7, X0, K1, X16 // 62e3fd09cec752 + VGF2P8AFFINEQB $82, X19, X0, K1, X16 // 62a3fd09cec352 + VGF2P8AFFINEQB $82, 17(SP)(BP*2), X0, K1, X16 // 62e3fd09ce846c1100000052 + VGF2P8AFFINEQB $82, -7(DI)(R8*4), X0, K1, X16 // 62a3fd09ce8487f9ffffff52 + VGF2P8AFFINEQB $82, X22, X28, K1, X16 // 62a39d01cec652 + VGF2P8AFFINEQB $82, X7, X28, K1, X16 // 62e39d01cec752 + VGF2P8AFFINEQB $82, X19, X28, K1, X16 // 62a39d01cec352 + VGF2P8AFFINEQB $82, 17(SP)(BP*2), X28, K1, X16 // 62e39d01ce846c1100000052 + VGF2P8AFFINEQB $82, -7(DI)(R8*4), X28, K1, X16 // 62a39d01ce8487f9ffffff52 + VGF2P8AFFINEQB $126, Y17, Y12, K2, Y0 // 62b39d2acec17e + VGF2P8AFFINEQB $126, Y7, Y12, K2, Y0 // 62f39d2acec77e + VGF2P8AFFINEQB $126, Y9, Y12, K2, Y0 // 62d39d2acec17e + VGF2P8AFFINEQB $126, 15(R8)(R14*4), Y12, K2, Y0 // 62939d2ace84b00f0000007e + VGF2P8AFFINEQB $126, -7(CX)(DX*4), Y12, K2, Y0 // 62f39d2ace8491f9ffffff7e + VGF2P8AFFINEQB $126, Y17, Y1, K2, Y0 // 62b3f52acec17e + VGF2P8AFFINEQB $126, Y7, Y1, K2, Y0 // 62f3f52acec77e + VGF2P8AFFINEQB $126, Y9, Y1, K2, Y0 // 62d3f52acec17e + VGF2P8AFFINEQB $126, 15(R8)(R14*4), Y1, K2, Y0 // 6293f52ace84b00f0000007e + VGF2P8AFFINEQB $126, -7(CX)(DX*4), Y1, K2, Y0 // 62f3f52ace8491f9ffffff7e + VGF2P8AFFINEQB $126, Y17, Y14, K2, Y0 // 62b38d2acec17e + VGF2P8AFFINEQB $126, Y7, Y14, K2, Y0 // 62f38d2acec77e + VGF2P8AFFINEQB $126, Y9, Y14, K2, Y0 // 62d38d2acec17e + VGF2P8AFFINEQB $126, 15(R8)(R14*4), Y14, K2, Y0 // 62938d2ace84b00f0000007e + VGF2P8AFFINEQB $126, -7(CX)(DX*4), Y14, K2, Y0 // 62f38d2ace8491f9ffffff7e + VGF2P8AFFINEQB $126, Y17, Y12, K2, Y22 // 62a39d2acef17e + VGF2P8AFFINEQB $126, Y7, Y12, K2, Y22 // 62e39d2acef77e + VGF2P8AFFINEQB $126, Y9, Y12, K2, Y22 // 62c39d2acef17e + VGF2P8AFFINEQB $126, 15(R8)(R14*4), Y12, K2, Y22 // 62839d2aceb4b00f0000007e + VGF2P8AFFINEQB $126, -7(CX)(DX*4), Y12, K2, Y22 // 62e39d2aceb491f9ffffff7e + VGF2P8AFFINEQB $126, Y17, Y1, K2, Y22 // 62a3f52acef17e + VGF2P8AFFINEQB $126, Y7, Y1, K2, Y22 // 62e3f52acef77e + VGF2P8AFFINEQB $126, Y9, Y1, K2, Y22 // 62c3f52acef17e + VGF2P8AFFINEQB $126, 15(R8)(R14*4), Y1, K2, Y22 // 6283f52aceb4b00f0000007e + VGF2P8AFFINEQB $126, -7(CX)(DX*4), Y1, K2, Y22 // 62e3f52aceb491f9ffffff7e + VGF2P8AFFINEQB $126, Y17, Y14, K2, Y22 // 62a38d2acef17e + VGF2P8AFFINEQB $126, Y7, Y14, K2, Y22 // 62e38d2acef77e + VGF2P8AFFINEQB $126, Y9, Y14, K2, Y22 // 62c38d2acef17e + VGF2P8AFFINEQB $126, 15(R8)(R14*4), Y14, K2, Y22 // 62838d2aceb4b00f0000007e + VGF2P8AFFINEQB $126, -7(CX)(DX*4), Y14, K2, Y22 // 62e38d2aceb491f9ffffff7e + VGF2P8AFFINEQB $126, Y17, Y12, K2, Y13 // 62339d2acee97e + VGF2P8AFFINEQB $126, Y7, Y12, K2, Y13 // 62739d2aceef7e + VGF2P8AFFINEQB $126, Y9, Y12, K2, Y13 // 62539d2acee97e + VGF2P8AFFINEQB $126, 15(R8)(R14*4), Y12, K2, Y13 // 62139d2aceacb00f0000007e + VGF2P8AFFINEQB $126, -7(CX)(DX*4), Y12, K2, Y13 // 62739d2aceac91f9ffffff7e + VGF2P8AFFINEQB $126, Y17, Y1, K2, Y13 // 6233f52acee97e + VGF2P8AFFINEQB $126, Y7, Y1, K2, Y13 // 6273f52aceef7e + VGF2P8AFFINEQB $126, Y9, Y1, K2, Y13 // 6253f52acee97e + VGF2P8AFFINEQB $126, 15(R8)(R14*4), Y1, K2, Y13 // 6213f52aceacb00f0000007e + VGF2P8AFFINEQB $126, -7(CX)(DX*4), Y1, K2, Y13 // 6273f52aceac91f9ffffff7e + VGF2P8AFFINEQB $126, Y17, Y14, K2, Y13 // 62338d2acee97e + VGF2P8AFFINEQB $126, Y7, Y14, K2, Y13 // 62738d2aceef7e + VGF2P8AFFINEQB $126, Y9, Y14, K2, Y13 // 62538d2acee97e + VGF2P8AFFINEQB $126, 15(R8)(R14*4), Y14, K2, Y13 // 62138d2aceacb00f0000007e + VGF2P8AFFINEQB $126, -7(CX)(DX*4), Y14, K2, Y13 // 62738d2aceac91f9ffffff7e + VGF2P8AFFINEQB $94, Z5, Z19, K1, Z15 // 6273e541cefd5e + VGF2P8AFFINEQB $94, Z1, Z19, K1, Z15 // 6273e541cef95e + VGF2P8AFFINEQB $94, -17(BP)(SI*8), Z19, K1, Z15 // 6273e541cebcf5efffffff5e + VGF2P8AFFINEQB $94, (R15), Z19, K1, Z15 // 6253e541ce3f5e + VGF2P8AFFINEQB $94, Z5, Z15, K1, Z15 // 62738549cefd5e + VGF2P8AFFINEQB $94, Z1, Z15, K1, Z15 // 62738549cef95e + VGF2P8AFFINEQB $94, -17(BP)(SI*8), Z15, K1, Z15 // 62738549cebcf5efffffff5e + VGF2P8AFFINEQB $94, (R15), Z15, K1, Z15 // 62538549ce3f5e + VGF2P8AFFINEQB $94, Z5, Z19, K1, Z30 // 6263e541cef55e + VGF2P8AFFINEQB $94, Z1, Z19, K1, Z30 // 6263e541cef15e + VGF2P8AFFINEQB $94, -17(BP)(SI*8), Z19, K1, Z30 // 6263e541ceb4f5efffffff5e + VGF2P8AFFINEQB $94, (R15), Z19, K1, Z30 // 6243e541ce375e + VGF2P8AFFINEQB $94, Z5, Z15, K1, Z30 // 62638549cef55e + VGF2P8AFFINEQB $94, Z1, Z15, K1, Z30 // 62638549cef15e + VGF2P8AFFINEQB $94, -17(BP)(SI*8), Z15, K1, Z30 // 62638549ceb4f5efffffff5e + VGF2P8AFFINEQB $94, (R15), Z15, K1, Z30 // 62438549ce375e + VGF2P8MULB X15, X1, K7, X7 // 62d2750fcfff + VGF2P8MULB X12, X1, K7, X7 // 62d2750fcffc + VGF2P8MULB X0, X1, K7, X7 // 62f2750fcff8 + VGF2P8MULB 15(R8), X1, K7, X7 // 62d2750fcfb80f000000 + VGF2P8MULB (BP), X1, K7, X7 // 62f2750fcf7d00 + VGF2P8MULB X15, X7, K7, X7 // 62d2450fcfff + VGF2P8MULB X12, X7, K7, X7 // 62d2450fcffc + VGF2P8MULB X0, X7, K7, X7 // 62f2450fcff8 + VGF2P8MULB 15(R8), X7, K7, X7 // 62d2450fcfb80f000000 + VGF2P8MULB (BP), X7, K7, X7 // 62f2450fcf7d00 + VGF2P8MULB X15, X9, K7, X7 // 62d2350fcfff + VGF2P8MULB X12, X9, K7, X7 // 62d2350fcffc + VGF2P8MULB X0, X9, K7, X7 // 62f2350fcff8 + VGF2P8MULB 15(R8), X9, K7, X7 // 62d2350fcfb80f000000 + VGF2P8MULB (BP), X9, K7, X7 // 62f2350fcf7d00 + VGF2P8MULB X15, X1, K7, X16 // 62c2750fcfc7 + VGF2P8MULB X12, X1, K7, X16 // 62c2750fcfc4 + VGF2P8MULB X0, X1, K7, X16 // 62e2750fcfc0 + VGF2P8MULB 15(R8), X1, K7, X16 // 62c2750fcf800f000000 + VGF2P8MULB (BP), X1, K7, X16 // 62e2750fcf4500 + VGF2P8MULB X15, X7, K7, X16 // 62c2450fcfc7 + VGF2P8MULB X12, X7, K7, X16 // 62c2450fcfc4 + VGF2P8MULB X0, X7, K7, X16 // 62e2450fcfc0 + VGF2P8MULB 15(R8), X7, K7, X16 // 62c2450fcf800f000000 + VGF2P8MULB (BP), X7, K7, X16 // 62e2450fcf4500 + VGF2P8MULB X15, X9, K7, X16 // 62c2350fcfc7 + VGF2P8MULB X12, X9, K7, X16 // 62c2350fcfc4 + VGF2P8MULB X0, X9, K7, X16 // 62e2350fcfc0 + VGF2P8MULB 15(R8), X9, K7, X16 // 62c2350fcf800f000000 + VGF2P8MULB (BP), X9, K7, X16 // 62e2350fcf4500 + VGF2P8MULB X15, X1, K7, X31 // 6242750fcfff + VGF2P8MULB X12, X1, K7, X31 // 6242750fcffc + VGF2P8MULB X0, X1, K7, X31 // 6262750fcff8 + VGF2P8MULB 15(R8), X1, K7, X31 // 6242750fcfb80f000000 + VGF2P8MULB (BP), X1, K7, X31 // 6262750fcf7d00 + VGF2P8MULB X15, X7, K7, X31 // 6242450fcfff + VGF2P8MULB X12, X7, K7, X31 // 6242450fcffc + VGF2P8MULB X0, X7, K7, X31 // 6262450fcff8 + VGF2P8MULB 15(R8), X7, K7, X31 // 6242450fcfb80f000000 + VGF2P8MULB (BP), X7, K7, X31 // 6262450fcf7d00 + VGF2P8MULB X15, X9, K7, X31 // 6242350fcfff + VGF2P8MULB X12, X9, K7, X31 // 6242350fcffc + VGF2P8MULB X0, X9, K7, X31 // 6262350fcff8 + VGF2P8MULB 15(R8), X9, K7, X31 // 6242350fcfb80f000000 + VGF2P8MULB (BP), X9, K7, X31 // 6262350fcf7d00 + VGF2P8MULB Y2, Y28, K1, Y31 // 62621d21cffa + VGF2P8MULB Y21, Y28, K1, Y31 // 62221d21cffd + VGF2P8MULB Y12, Y28, K1, Y31 // 62421d21cffc + VGF2P8MULB (R8), Y28, K1, Y31 // 62421d21cf38 + VGF2P8MULB 15(DX)(BX*2), Y28, K1, Y31 // 62621d21cfbc5a0f000000 + VGF2P8MULB Y2, Y13, K1, Y31 // 62621529cffa + VGF2P8MULB Y21, Y13, K1, Y31 // 62221529cffd + VGF2P8MULB Y12, Y13, K1, Y31 // 62421529cffc + VGF2P8MULB (R8), Y13, K1, Y31 // 62421529cf38 + VGF2P8MULB 15(DX)(BX*2), Y13, K1, Y31 // 62621529cfbc5a0f000000 + VGF2P8MULB Y2, Y7, K1, Y31 // 62624529cffa + VGF2P8MULB Y21, Y7, K1, Y31 // 62224529cffd + VGF2P8MULB Y12, Y7, K1, Y31 // 62424529cffc + VGF2P8MULB (R8), Y7, K1, Y31 // 62424529cf38 + VGF2P8MULB 15(DX)(BX*2), Y7, K1, Y31 // 62624529cfbc5a0f000000 + VGF2P8MULB Y2, Y28, K1, Y8 // 62721d21cfc2 + VGF2P8MULB Y21, Y28, K1, Y8 // 62321d21cfc5 + VGF2P8MULB Y12, Y28, K1, Y8 // 62521d21cfc4 + VGF2P8MULB (R8), Y28, K1, Y8 // 62521d21cf00 + VGF2P8MULB 15(DX)(BX*2), Y28, K1, Y8 // 62721d21cf845a0f000000 + VGF2P8MULB Y2, Y13, K1, Y8 // 62721529cfc2 + VGF2P8MULB Y21, Y13, K1, Y8 // 62321529cfc5 + VGF2P8MULB Y12, Y13, K1, Y8 // 62521529cfc4 + VGF2P8MULB (R8), Y13, K1, Y8 // 62521529cf00 + VGF2P8MULB 15(DX)(BX*2), Y13, K1, Y8 // 62721529cf845a0f000000 + VGF2P8MULB Y2, Y7, K1, Y8 // 62724529cfc2 + VGF2P8MULB Y21, Y7, K1, Y8 // 62324529cfc5 + VGF2P8MULB Y12, Y7, K1, Y8 // 62524529cfc4 + VGF2P8MULB (R8), Y7, K1, Y8 // 62524529cf00 + VGF2P8MULB 15(DX)(BX*2), Y7, K1, Y8 // 62724529cf845a0f000000 + VGF2P8MULB Y2, Y28, K1, Y1 // 62f21d21cfca + VGF2P8MULB Y21, Y28, K1, Y1 // 62b21d21cfcd + VGF2P8MULB Y12, Y28, K1, Y1 // 62d21d21cfcc + VGF2P8MULB (R8), Y28, K1, Y1 // 62d21d21cf08 + VGF2P8MULB 15(DX)(BX*2), Y28, K1, Y1 // 62f21d21cf8c5a0f000000 + VGF2P8MULB Y2, Y13, K1, Y1 // 62f21529cfca + VGF2P8MULB Y21, Y13, K1, Y1 // 62b21529cfcd + VGF2P8MULB Y12, Y13, K1, Y1 // 62d21529cfcc + VGF2P8MULB (R8), Y13, K1, Y1 // 62d21529cf08 + VGF2P8MULB 15(DX)(BX*2), Y13, K1, Y1 // 62f21529cf8c5a0f000000 + VGF2P8MULB Y2, Y7, K1, Y1 // 62f24529cfca + VGF2P8MULB Y21, Y7, K1, Y1 // 62b24529cfcd + VGF2P8MULB Y12, Y7, K1, Y1 // 62d24529cfcc + VGF2P8MULB (R8), Y7, K1, Y1 // 62d24529cf08 + VGF2P8MULB 15(DX)(BX*2), Y7, K1, Y1 // 62f24529cf8c5a0f000000 + VGF2P8MULB Z21, Z14, K1, Z3 // 62b20d49cfdd + VGF2P8MULB Z8, Z14, K1, Z3 // 62d20d49cfd8 + VGF2P8MULB 7(SI)(DI*8), Z14, K1, Z3 // 62f20d49cf9cfe07000000 + VGF2P8MULB -15(R14), Z14, K1, Z3 // 62d20d49cf9ef1ffffff + VGF2P8MULB Z21, Z15, K1, Z3 // 62b20549cfdd + VGF2P8MULB Z8, Z15, K1, Z3 // 62d20549cfd8 + VGF2P8MULB 7(SI)(DI*8), Z15, K1, Z3 // 62f20549cf9cfe07000000 + VGF2P8MULB -15(R14), Z15, K1, Z3 // 62d20549cf9ef1ffffff + VGF2P8MULB Z21, Z14, K1, Z5 // 62b20d49cfed + VGF2P8MULB Z8, Z14, K1, Z5 // 62d20d49cfe8 + VGF2P8MULB 7(SI)(DI*8), Z14, K1, Z5 // 62f20d49cfacfe07000000 + VGF2P8MULB -15(R14), Z14, K1, Z5 // 62d20d49cfaef1ffffff + VGF2P8MULB Z21, Z15, K1, Z5 // 62b20549cfed + VGF2P8MULB Z8, Z15, K1, Z5 // 62d20549cfe8 + VGF2P8MULB 7(SI)(DI*8), Z15, K1, Z5 // 62f20549cfacfe07000000 + VGF2P8MULB -15(R14), Z15, K1, Z5 // 62d20549cfaef1ffffff + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/vpclmulqdq_avx512f.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/vpclmulqdq_avx512f.s new file mode 100644 index 0000000000000000000000000000000000000000..86579d682ee62d93a3551daf5e160baab34864c6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/avx512enc/vpclmulqdq_avx512f.s @@ -0,0 +1,94 @@ +// Code generated by avx512test. DO NOT EDIT. + +#include "../../../../../../runtime/textflag.h" + +TEXT asmtest_vpclmulqdq_avx512f(SB), NOSPLIT, $0 + VPCLMULQDQ $127, X22, X21, X15 // 6233550044fe7f or 6233d50044fe7f + VPCLMULQDQ $127, X7, X21, X15 // 6273550044ff7f or 6273d50044ff7f + VPCLMULQDQ $127, X19, X21, X15 // 6233550044fb7f or 6233d50044fb7f + VPCLMULQDQ $127, -17(BP)(SI*8), X21, X15 // 6273550044bcf5efffffff7f or 6273d50044bcf5efffffff7f + VPCLMULQDQ $127, (R15), X21, X15 // 62535500443f7f or 6253d500443f7f + VPCLMULQDQ $127, X22, X0, X15 // 62337d0844fe7f or 6233fd0844fe7f + VPCLMULQDQ $127, X19, X0, X15 // 62337d0844fb7f or 6233fd0844fb7f + VPCLMULQDQ $127, X22, X28, X15 // 62331d0044fe7f or 62339d0044fe7f + VPCLMULQDQ $127, X7, X28, X15 // 62731d0044ff7f or 62739d0044ff7f + VPCLMULQDQ $127, X19, X28, X15 // 62331d0044fb7f or 62339d0044fb7f + VPCLMULQDQ $127, -17(BP)(SI*8), X28, X15 // 62731d0044bcf5efffffff7f or 62739d0044bcf5efffffff7f + VPCLMULQDQ $127, (R15), X28, X15 // 62531d00443f7f or 62539d00443f7f + VPCLMULQDQ $127, X22, X21, X0 // 62b3550044c67f or 62b3d50044c67f + VPCLMULQDQ $127, X7, X21, X0 // 62f3550044c77f or 62f3d50044c77f + VPCLMULQDQ $127, X19, X21, X0 // 62b3550044c37f or 62b3d50044c37f + VPCLMULQDQ $127, -17(BP)(SI*8), X21, X0 // 62f355004484f5efffffff7f or 62f3d5004484f5efffffff7f + VPCLMULQDQ $127, (R15), X21, X0 // 62d3550044077f or 62d3d50044077f + VPCLMULQDQ $127, X22, X0, X0 // 62b37d0844c67f or 62b3fd0844c67f + VPCLMULQDQ $127, X19, X0, X0 // 62b37d0844c37f or 62b3fd0844c37f + VPCLMULQDQ $127, X22, X28, X0 // 62b31d0044c67f or 62b39d0044c67f + VPCLMULQDQ $127, X7, X28, X0 // 62f31d0044c77f or 62f39d0044c77f + VPCLMULQDQ $127, X19, X28, X0 // 62b31d0044c37f or 62b39d0044c37f + VPCLMULQDQ $127, -17(BP)(SI*8), X28, X0 // 62f31d004484f5efffffff7f or 62f39d004484f5efffffff7f + VPCLMULQDQ $127, (R15), X28, X0 // 62d31d0044077f or 62d39d0044077f + VPCLMULQDQ $127, X22, X21, X16 // 62a3550044c67f or 62a3d50044c67f + VPCLMULQDQ $127, X7, X21, X16 // 62e3550044c77f or 62e3d50044c77f + VPCLMULQDQ $127, X19, X21, X16 // 62a3550044c37f or 62a3d50044c37f + VPCLMULQDQ $127, -17(BP)(SI*8), X21, X16 // 62e355004484f5efffffff7f or 62e3d5004484f5efffffff7f + VPCLMULQDQ $127, (R15), X21, X16 // 62c3550044077f or 62c3d50044077f + VPCLMULQDQ $127, X22, X0, X16 // 62a37d0844c67f or 62a3fd0844c67f + VPCLMULQDQ $127, X7, X0, X16 // 62e37d0844c77f or 62e3fd0844c77f + VPCLMULQDQ $127, X19, X0, X16 // 62a37d0844c37f or 62a3fd0844c37f + VPCLMULQDQ $127, -17(BP)(SI*8), X0, X16 // 62e37d084484f5efffffff7f or 62e3fd084484f5efffffff7f + VPCLMULQDQ $127, (R15), X0, X16 // 62c37d0844077f or 62c3fd0844077f + VPCLMULQDQ $127, X22, X28, X16 // 62a31d0044c67f or 62a39d0044c67f + VPCLMULQDQ $127, X7, X28, X16 // 62e31d0044c77f or 62e39d0044c77f + VPCLMULQDQ $127, X19, X28, X16 // 62a31d0044c37f or 62a39d0044c37f + VPCLMULQDQ $127, -17(BP)(SI*8), X28, X16 // 62e31d004484f5efffffff7f or 62e39d004484f5efffffff7f + VPCLMULQDQ $127, (R15), X28, X16 // 62c31d0044077f or 62c39d0044077f + VPCLMULQDQ $0, Y15, Y2, Y31 // 62436d2844ff00 or 6243ed2844ff00 + VPCLMULQDQ $0, Y22, Y2, Y31 // 62236d2844fe00 or 6223ed2844fe00 + VPCLMULQDQ $0, Y20, Y2, Y31 // 62236d2844fc00 or 6223ed2844fc00 + VPCLMULQDQ $0, 99(R15)(R15*4), Y2, Y31 // 62036d2844bcbf6300000000 or 6203ed2844bcbf6300000000 + VPCLMULQDQ $0, 15(DX), Y2, Y31 // 62636d2844ba0f00000000 or 6263ed2844ba0f00000000 + VPCLMULQDQ $0, Y15, Y13, Y31 // 6243152844ff00 or 6243952844ff00 + VPCLMULQDQ $0, Y22, Y13, Y31 // 6223152844fe00 or 6223952844fe00 + VPCLMULQDQ $0, Y20, Y13, Y31 // 6223152844fc00 or 6223952844fc00 + VPCLMULQDQ $0, 99(R15)(R15*4), Y13, Y31 // 6203152844bcbf6300000000 or 6203952844bcbf6300000000 + VPCLMULQDQ $0, 15(DX), Y13, Y31 // 6263152844ba0f00000000 or 6263952844ba0f00000000 + VPCLMULQDQ $0, Y15, Y27, Y31 // 6243252044ff00 or 6243a52044ff00 + VPCLMULQDQ $0, Y22, Y27, Y31 // 6223252044fe00 or 6223a52044fe00 + VPCLMULQDQ $0, Y20, Y27, Y31 // 6223252044fc00 or 6223a52044fc00 + VPCLMULQDQ $0, 99(R15)(R15*4), Y27, Y31 // 6203252044bcbf6300000000 or 6203a52044bcbf6300000000 + VPCLMULQDQ $0, 15(DX), Y27, Y31 // 6263252044ba0f00000000 or 6263a52044ba0f00000000 + VPCLMULQDQ $0, Y22, Y2, Y3 // 62b36d2844de00 or 62b3ed2844de00 + VPCLMULQDQ $0, Y20, Y2, Y3 // 62b36d2844dc00 or 62b3ed2844dc00 + VPCLMULQDQ $0, Y22, Y13, Y3 // 62b3152844de00 or 62b3952844de00 + VPCLMULQDQ $0, Y20, Y13, Y3 // 62b3152844dc00 or 62b3952844dc00 + VPCLMULQDQ $0, Y15, Y27, Y3 // 62d3252044df00 or 62d3a52044df00 + VPCLMULQDQ $0, Y22, Y27, Y3 // 62b3252044de00 or 62b3a52044de00 + VPCLMULQDQ $0, Y20, Y27, Y3 // 62b3252044dc00 or 62b3a52044dc00 + VPCLMULQDQ $0, 99(R15)(R15*4), Y27, Y3 // 62932520449cbf6300000000 or 6293a520449cbf6300000000 + VPCLMULQDQ $0, 15(DX), Y27, Y3 // 62f32520449a0f00000000 or 62f3a520449a0f00000000 + VPCLMULQDQ $0, Y22, Y2, Y14 // 62336d2844f600 or 6233ed2844f600 + VPCLMULQDQ $0, Y20, Y2, Y14 // 62336d2844f400 or 6233ed2844f400 + VPCLMULQDQ $0, Y22, Y13, Y14 // 6233152844f600 or 6233952844f600 + VPCLMULQDQ $0, Y20, Y13, Y14 // 6233152844f400 or 6233952844f400 + VPCLMULQDQ $0, Y15, Y27, Y14 // 6253252044f700 or 6253a52044f700 + VPCLMULQDQ $0, Y22, Y27, Y14 // 6233252044f600 or 6233a52044f600 + VPCLMULQDQ $0, Y20, Y27, Y14 // 6233252044f400 or 6233a52044f400 + VPCLMULQDQ $0, 99(R15)(R15*4), Y27, Y14 // 6213252044b4bf6300000000 or 6213a52044b4bf6300000000 + VPCLMULQDQ $0, 15(DX), Y27, Y14 // 6273252044b20f00000000 or 6273a52044b20f00000000 + VPCLMULQDQ $97, Z9, Z0, Z24 // 62437d4844c161 or 6243fd4844c161 + VPCLMULQDQ $97, Z3, Z0, Z24 // 62637d4844c361 or 6263fd4844c361 + VPCLMULQDQ $97, 7(SI)(DI*1), Z0, Z24 // 62637d4844843e0700000061 or 6263fd4844843e0700000061 + VPCLMULQDQ $97, 15(DX)(BX*8), Z0, Z24 // 62637d484484da0f00000061 or 6263fd484484da0f00000061 + VPCLMULQDQ $97, Z9, Z26, Z24 // 62432d4044c161 or 6243ad4044c161 + VPCLMULQDQ $97, Z3, Z26, Z24 // 62632d4044c361 or 6263ad4044c361 + VPCLMULQDQ $97, 7(SI)(DI*1), Z26, Z24 // 62632d4044843e0700000061 or 6263ad4044843e0700000061 + VPCLMULQDQ $97, 15(DX)(BX*8), Z26, Z24 // 62632d404484da0f00000061 or 6263ad404484da0f00000061 + VPCLMULQDQ $97, Z9, Z0, Z12 // 62537d4844e161 or 6253fd4844e161 + VPCLMULQDQ $97, Z3, Z0, Z12 // 62737d4844e361 or 6273fd4844e361 + VPCLMULQDQ $97, 7(SI)(DI*1), Z0, Z12 // 62737d4844a43e0700000061 or 6273fd4844a43e0700000061 + VPCLMULQDQ $97, 15(DX)(BX*8), Z0, Z12 // 62737d4844a4da0f00000061 or 6273fd4844a4da0f00000061 + VPCLMULQDQ $97, Z9, Z26, Z12 // 62532d4044e161 or 6253ad4044e161 + VPCLMULQDQ $97, Z3, Z26, Z12 // 62732d4044e361 or 6273ad4044e361 + VPCLMULQDQ $97, 7(SI)(DI*1), Z26, Z12 // 62732d4044a43e0700000061 or 6273ad4044a43e0700000061 + VPCLMULQDQ $97, 15(DX)(BX*8), Z26, Z12 // 62732d4044a4da0f00000061 or 6273ad4044a4da0f00000061 + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/buildtagerror.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/buildtagerror.s new file mode 100644 index 0000000000000000000000000000000000000000..5a2d65b97846be09a4d6efac68f6381fcc1091ae --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/buildtagerror.s @@ -0,0 +1,8 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#define X 1 + +//go:build x // ERROR "misplaced //go:build comment" + diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/duperror.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/duperror.s new file mode 100644 index 0000000000000000000000000000000000000000..cd5934b01a87dd4330044486037c25c3d5973354 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/duperror.s @@ -0,0 +1,14 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +TEXT foo(SB), 0, $0 + RET +TEXT foo(SB), 0, $0 // ERROR "symbol foo redeclared" + RET + +GLOBL bar(SB), 0, $8 +GLOBL bar(SB), 0, $8 // ERROR "symbol bar redeclared" + +DATA bar+0(SB)/8, $0 +DATA bar+0(SB)/8, $0 // ERROR "overlapping DATA entry for bar" diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/loong64.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/loong64.s new file mode 100644 index 0000000000000000000000000000000000000000..51b195b4b01673d99781b4d2a336854df88dfd82 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/loong64.s @@ -0,0 +1,12 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "../../../../../runtime/textflag.h" +// TODO: cover more instruction + +TEXT foo(SB),DUPOK|NOSPLIT,$0 + JAL 1(PC) //CALL 1(PC) //00040054 + JAL (R4) //CALL (R4) //8100004c + // relocation in play so the assembled offset should be 0 + JAL foo(SB) //CALL foo(SB) //00000054 diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/loong64enc1.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/loong64enc1.s new file mode 100644 index 0000000000000000000000000000000000000000..ea6c569f9dface39837fe02613da0e57357ae9c5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/loong64enc1.s @@ -0,0 +1,235 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "../../../../../runtime/textflag.h" + +TEXT asmtest(SB),DUPOK|NOSPLIT,$0 +lable1: + BFPT 1(PC) // 00050048 + BFPT lable1 // BFPT 2 //1ffdff4b + +lable2: + BFPF 1(PC) // 00040048 + BFPF lable2 // BFPF 4 // 1ffcff4b + + // relocation in play so the assembled offset should be 0 + JMP foo(SB) // 00000050 + + JMP (R4) // 8000004c + JMP 1(PC) // 00040050 + MOVW $65536, R4 // 04020014 + MOVW $4096, R4 // 24000014 + MOVV $65536, R4 // 04020014 + MOVV $4096, R4 // 24000014 + MOVW R4, R5 // 85001700 + MOVV R4, R5 // 85001500 + MOVBU R4, R5 // 85fc4303 + SUB R4, R5, R6 // a6101100 + SUBV R4, R5, R6 // a6901100 + ADD R4, R5, R6 // a6101000 + ADDV R4, R5, R6 // a6901000 + AND R4, R5, R6 // a6901400 + SUB R4, R5 // a5101100 + SUBV R4, R5 // a5901100 + ADD R4, R5 // a5101000 + ADDV R4, R5 // a5901000 + AND R4, R5 // a5901400 + NEGW R4, R5 // 05101100 + NEGV R4, R5 // 05901100 + SLL R4, R5 // a5101700 + SLL R4, R5, R6 // a6101700 + SRL R4, R5 // a5901700 + SRL R4, R5, R6 // a6901700 + SRA R4, R5 // a5101800 + SRA R4, R5, R6 // a6101800 + ROTR R4, R5 // a5101b00 + ROTR R4, R5, R6 // a6101b00 + SLLV R4, R5 // a5901800 + SLLV R4, R5, R6 // a6901800 + ROTRV R4, R5 // a5901b00 + ROTRV R4, R5, R6 // a6901b00 + CLO R4, R5 // 85100000 + CLZ R4, R5 // 85140000 + ADDF F4, F5 // a5900001 + ADDF F4, R5, F6 // a6900001 + CMPEQF F4, R5 // a010120c + ABSF F4, F5 // 85041401 + MOVVF F4, F5 // 85181d01 + MOVF F4, F5 // 85941401 + MOVD F4, F5 // 85981401 + MOVW R4, result+16(FP) // 64608029 + MOVWU R4, result+16(FP) // 64608029 + MOVV R4, result+16(FP) // 6460c029 + MOVB R4, result+16(FP) // 64600029 + MOVBU R4, result+16(FP) // 64600029 + MOVWL R4, result+16(FP) // 6460002f + MOVVL R4, result+16(FP) // 6460802f + MOVW R4, 1(R5) // a4048029 + MOVWU R4, 1(R5) // a4048029 + MOVV R4, 1(R5) // a404c029 + MOVB R4, 1(R5) // a4040029 + MOVBU R4, 1(R5) // a4040029 + MOVWL R4, 1(R5) // a404002f + MOVVL R4, 1(R5) // a404802f + SC R4, 1(R5) // a4040021 + SCV R4, 1(R5) // a4040023 + MOVW y+8(FP), R4 // 64408028 + MOVWU y+8(FP), R4 // 6440802a + MOVV y+8(FP), R4 // 6440c028 + MOVB y+8(FP), R4 // 64400028 + MOVBU y+8(FP), R4 // 6440002a + MOVWL y+8(FP), R4 // 6440002e + MOVVL y+8(FP), R4 // 6440802e + MOVW 1(R5), R4 // a4048028 + MOVWU 1(R5), R4 // a404802a + MOVV 1(R5), R4 // a404c028 + MOVB 1(R5), R4 // a4040028 + MOVBU 1(R5), R4 // a404002a + MOVWL 1(R5), R4 // a404002e + MOVVL 1(R5), R4 // a404802e + LL 1(R5), R4 // a4040020 + LLV 1(R5), R4 // a4040022 + MOVW $4(R4), R5 // 8510c002 + MOVV $4(R4), R5 // 8510c002 + MOVW $-1, R4 // 04fcff02 + MOVV $-1, R4 // 04fcff02 + MOVW $1, R4 // 0404c002 + MOVV $1, R4 // 0404c002 + ADD $-1, R4, R5 // 85fcbf02 + ADD $-1, R4 // 84fcbf02 + ADDV $-1, R4, R5 // 85fcff02 + ADDV $-1, R4 // 84fcff02 + AND $1, R4, R5 // 85044003 + AND $1, R4 // 84044003 + SLL $4, R4, R5 // 85904000 + SLL $4, R4 // 84904000 + SRL $4, R4, R5 // 85904400 + SRL $4, R4 // 84904400 + SRA $4, R4, R5 // 85904800 + SRA $4, R4 // 84904800 + ROTR $4, R4, R5 // 85904c00 + ROTR $4, R4 // 84904c00 + SLLV $4, R4, R5 // 85104100 + SLLV $4, R4 // 84104100 + ROTRV $4, R4, R5 // 85104d00 + ROTRV $4, R4 // 84104d00 + SYSCALL // 00002b00 + BEQ R4, R5, 1(PC) // 85040058 + BEQ R4, 1(PC) // 80040040 + BEQ R4, R0, 1(PC) // 80040040 + BEQ R0, R4, 1(PC) // 80040040 + BNE R4, R5, 1(PC) // 8504005c + BNE R4, 1(PC) // 80040044 + BNE R4, R0, 1(PC) // 80040044 + BNE R0, R4, 1(PC) // 80040044 + BLTU R4, 1(PC) // 80040068 + MOVW y+8(FP), F4 // 6440002b + MOVF y+8(FP), F4 // 6440002b + MOVD y+8(FP), F4 // 6440802b + MOVW 1(F5), F4 // a404002b + MOVF 1(F5), F4 // a404002b + MOVD 1(F5), F4 // a404802b + MOVW F4, result+16(FP) // 6460402b + MOVF F4, result+16(FP) // 6460402b + MOVD F4, result+16(FP) // 6460c02b + MOVW F4, 1(F5) // a404402b + MOVF F4, 1(F5) // a404402b + MOVD F4, 1(F5) // a404c02b + MOVW R4, F5 // 85a41401 + MOVW F4, R5 // 85b41401 + MOVV R4, F5 // 85a81401 + MOVV F4, R5 // 85b81401 + WORD $74565 // 45230100 + BREAK R4, result+16(FP) // 64600006 + BREAK R4, 1(R5) // a4040006 + BREAK // 00002a00 + UNDEF // 00002a00 + + // mul + MUL R4, R5 // a5101c00 + MUL R4, R5, R6 // a6101c00 + MULV R4, R5 // a5901d00 + MULV R4, R5, R6 // a6901d00 + MULVU R4, R5 // a5901d00 + MULVU R4, R5, R6 // a6901d00 + MULHV R4, R5 // a5101e00 + MULHV R4, R5, R6 // a6101e00 + MULHVU R4, R5 // a5901e00 + MULHVU R4, R5, R6 // a6901e00 + REMV R4, R5 // a5902200 + REMV R4, R5, R6 // a6902200 + REMVU R4, R5 // a5902300 + REMVU R4, R5, R6 // a6902300 + DIVV R4, R5 // a5102200 + DIVV R4, R5, R6 // a6102200 + DIVVU R4, R5 // a5102300 + DIVVU R4, R5, R6 // a6102300 + + MOVH R4, result+16(FP) // 64604029 + MOVH R4, 1(R5) // a4044029 + MOVH y+8(FP), R4 // 64404028 + MOVH 1(R5), R4 // a4044028 + MOVHU R4, R5 // 8500cf00 + MOVHU R4, result+16(FP) // 64604029 + MOVHU R4, 1(R5) // a4044029 + MOVHU y+8(FP), R4 // 6440402a + MOVHU 1(R5), R4 // a404402a + MULU R4, R5 // a5101c00 + MULU R4, R5, R6 // a6101c00 + MULH R4, R5 // a5901c00 + MULH R4, R5, R6 // a6901c00 + MULHU R4, R5 // a5101d00 + MULHU R4, R5, R6 // a6101d00 + REM R4, R5 // a5902000 + REM R4, R5, R6 // a6902000 + REMU R4, R5 // a5902100 + REMU R4, R5, R6 // a6902100 + DIV R4, R5 // a5102000 + DIV R4, R5, R6 // a6102000 + DIVU R4, R5 // a5102100 + DIVU R4, R5, R6 // a6102100 + SRLV R4, R5 // a5101900 + SRLV R4, R5, R6 // a6101900 + SRLV $4, R4, R5 // 85104500 + SRLV $4, R4 // 84104500 + SRLV $32, R4, R5 // 85804500 + SRLV $32, R4 // 84804500 + + MASKEQZ R4, R5, R6 // a6101300 + MASKNEZ R4, R5, R6 // a6901300 + + MOVFD F4, F5 // 85241901 + MOVDF F4, F5 // 85181901 + MOVWF F4, F5 // 85101d01 + MOVFW F4, F5 // 85041b01 + MOVWD F4, F5 // 85201d01 + MOVDW F4, F5 // 85081b01 + NEGF F4, F5 // 85141401 + NEGD F4, F5 // 85181401 + ABSD F4, F5 // 85081401 + TRUNCDW F4, F5 // 85881a01 + TRUNCFW F4, F5 // 85841a01 + SQRTF F4, F5 // 85441401 + SQRTD F4, F5 // 85481401 + + DBAR // 00007238 + NOOP // 00004003 + + MOVWR R4, result+16(FP) // 6460402f + MOVWR R4, 1(R5) // a404402f + MOVWR y+8(FP), R4 // 6440402e + MOVWR 1(R5), R4 // a404402e + + CMPGTF F4, R5 // a090110c + CMPGTD F4, R5 // a090210c + CMPGEF F4, R5 // a090130c + CMPGED F4, R5 // a090230c + CMPEQD F4, R5 // a010220c + + RDTIMELW R4, R0 // 80600000 + RDTIMEHW R4, R0 // 80640000 + RDTIMED R4, R5 // 85680000 + + MOVV FCC0, R4 // 04dc1401 + MOVV R4, FCC0 // 80d81401 diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/loong64enc2.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/loong64enc2.s new file mode 100644 index 0000000000000000000000000000000000000000..00768365b698e560d586952e68483e1db4db87aa --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/loong64enc2.s @@ -0,0 +1,82 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "../../../../../runtime/textflag.h" + +TEXT asmtest(SB),DUPOK|NOSPLIT,$0 + MOVB R4, R5 // 85e04000a5e04800 + MOVWU R4, R5 // 85804100a5804500 + MOVW $74565, R4 // 4402001484148d03 + MOVW $4097, R4 // 2400001484048003 + MOVV $74565, R4 // 4402001484148d03 + MOVV $4097, R4 // 2400001484048003 + AND $-1, R4, R5 // 1efcbf0285f81400 + AND $-1, R4 // 1efcbf0284f81400 + MOVW $-1, F4 // 1efcbf02c4a71401 + MOVW $1, F4 // 1e048002c4a71401 + TEQ $4, R4, R5 // 8508005c04002a00 + TEQ $4, R4 // 0408005c04002a00 + TNE $4, R4, R5 // 8508005804002a00 + TNE $4, R4 // 0408005804002a00 + ADD $65536, R4, R5 // 1e02001485781000 + ADD $4096, R4, R5 // 3e00001485781000 + ADD $65536, R4 // 1e02001484781000 + ADD $4096, R4 // 3e00001484781000 + ADDV $65536, R4, R5 // 1e02001485f81000 + ADDV $4096, R4, R5 // 3e00001485f81000 + ADDV $65536, R4 // 1e02001484f81000 + ADDV $4096, R4 // 3e00001484f81000 + AND $65536, R4, R5 // 1e02001485f81400 + AND $4096, R4, R5 // 3e00001485f81400 + AND $65536, R4 // 1e02001484f81400 + AND $4096, R4 // 3e00001484f81400 + SGT $65536, R4, R5 // 1e02001485781200 + SGT $4096, R4, R5 // 3e00001485781200 + SGT $65536, R4 // 1e02001484781200 + SGT $4096, R4 // 3e00001484781200 + SGTU $65536, R4, R5 // 1e02001485f81200 + SGTU $4096, R4, R5 // 3e00001485f81200 + SGTU $65536, R4 // 1e02001484f81200 + SGTU $4096, R4 // 3e00001484f81200 + ADDU $65536, R4, R5 // 1e02001485781000 + ADDU $4096, R4, R5 // 3e00001485781000 + ADDU $65536, R4 // 1e02001484781000 + ADDU $4096, R4 // 3e00001484781000 + ADDVU $65536, R4, R5 // 1e02001485f81000 + ADDVU $4096, R4, R5 // 3e00001485f81000 + ADDVU $65536, R4 // 1e02001484f81000 + ADDVU $4096, R4 // 3e00001484f81000 + OR $65536, R4, R5 // 1e02001485781500 + OR $4096, R4, R5 // 3e00001485781500 + OR $65536, R4 // 1e02001484781500 + OR $4096, R4 // 3e00001484781500 + OR $-1, R4, R5 // 1efcbf0285781500 + OR $-1, R4 // 1efcbf0284781500 + XOR $65536, R4, R5 // 1e02001485f81500 + XOR $4096, R4, R5 // 3e00001485f81500 + XOR $65536, R4 // 1e02001484f81500 + XOR $4096, R4 // 3e00001484f81500 + XOR $-1, R4, R5 // 1efcbf0285f81500 + XOR $-1, R4 // 1efcbf0284f81500 + MOVH R4, R5 // 85c04000a5c04800 + + // relocation instructions + MOVW R4, name(SB) // 1e00001ac4038029 + MOVWU R4, name(SB) // 1e00001ac4038029 + MOVV R4, name(SB) // 1e00001ac403c029 + MOVB R4, name(SB) // 1e00001ac4030029 + MOVBU R4, name(SB) // 1e00001ac4030029 + MOVF F4, name(SB) // 1e00001ac403402b + MOVD F4, name(SB) // 1e00001ac403c02b + MOVW name(SB), R4 // 1e00001ac4038028 + MOVWU name(SB), R4 // 1e00001ac403802a + MOVV name(SB), R4 // 1e00001ac403c028 + MOVB name(SB), R4 // 1e00001ac4030028 + MOVBU name(SB), R4 // 1e00001ac403002a + MOVF name(SB), F4 // 1e00001ac403002b + MOVD name(SB), F4 // 1e00001ac403802b + MOVH R4, name(SB) // 1e00001ac4034029 + MOVH name(SB), R4 // 1e00001ac4034028 + MOVHU R4, name(SB) // 1e00001ac4034029 + MOVHU name(SB), R4 // 1e00001ac403402a diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/loong64enc3.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/loong64enc3.s new file mode 100644 index 0000000000000000000000000000000000000000..eceb0d71d001c6215f1c4f3113d8223de8b0a90c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/loong64enc3.s @@ -0,0 +1,131 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "../../../../../runtime/textflag.h" + +TEXT asmtest(SB),DUPOK|NOSPLIT,$0 + MOVW $65536(R4), R5 // 1e020014de03800385f81000 + MOVW $4096(R4), R5 // 3e000014de03800385f81000 + MOVV $65536(R4), R5 // 1e020014de03800385f81000 + MOVV $4096(R4), R5 // 3e000014de03800385f81000 + ADD $74565, R4 // 5e020014de178d0384781000 + ADD $4097, R4 // 3e000014de07800384781000 + ADDV $74565, R4 // 5e020014de178d0384f81000 + ADDV $4097, R4 // 3e000014de07800384f81000 + AND $74565, R4 // 5e020014de178d0384f81400 + AND $4097, R4 // 3e000014de07800384f81400 + ADD $74565, R4, R5 // 5e020014de178d0385781000 + ADD $4097, R4, R5 // 3e000014de07800385781000 + ADDV $74565, R4, R5 // 5e020014de178d0385f81000 + ADDV $4097, R4, R5 // 3e000014de07800385f81000 + AND $74565, R4, R5 // 5e020014de178d0385f81400 + AND $4097, R4, R5 // 3e000014de07800385f81400 + + MOVW R4, result+65540(FP) // 1e020014de8f1000c4338029 + MOVW R4, result+4097(FP) // 3e000014de8f1000c4278029 + MOVWU R4, result+65540(FP) // 1e020014de8f1000c4338029 + MOVWU R4, result+4097(FP) // 3e000014de8f1000c4278029 + MOVV R4, result+65540(FP) // 1e020014de8f1000c433c029 + MOVV R4, result+4097(FP) // 3e000014de8f1000c427c029 + MOVB R4, result+65540(FP) // 1e020014de8f1000c4330029 + MOVB R4, result+4097(FP) // 3e000014de8f1000c4270029 + MOVBU R4, result+65540(FP) // 1e020014de8f1000c4330029 + MOVBU R4, result+4097(FP) // 3e000014de8f1000c4270029 + MOVW R4, 65536(R5) // 1e020014de971000c4038029 + MOVW R4, 4096(R5) // 3e000014de971000c4038029 + MOVWU R4, 65536(R5) // 1e020014de971000c4038029 + MOVWU R4, 4096(R5) // 3e000014de971000c4038029 + MOVV R4, 65536(R5) // 1e020014de971000c403c029 + MOVV R4, 4096(R5) // 3e000014de971000c403c029 + MOVB R4, 65536(R5) // 1e020014de971000c4030029 + MOVB R4, 4096(R5) // 3e000014de971000c4030029 + MOVBU R4, 65536(R5) // 1e020014de971000c4030029 + MOVBU R4, 4096(R5) // 3e000014de971000c4030029 + SC R4, 65536(R5) // 1e020014de971000c4030021 + SC R4, 4096(R5) // 3e000014de971000c4030021 + MOVW y+65540(FP), R4 // 1e020014de8f1000c4338028 + MOVWU y+65540(FP), R4 // 1e020014de8f1000c433802a + MOVV y+65540(FP), R4 // 1e020014de8f1000c433c028 + MOVB y+65540(FP), R4 // 1e020014de8f1000c4330028 + MOVBU y+65540(FP), R4 // 1e020014de8f1000c433002a + MOVW y+4097(FP), R4 // 3e000014de8f1000c4278028 + MOVWU y+4097(FP), R4 // 3e000014de8f1000c427802a + MOVV y+4097(FP), R4 // 3e000014de8f1000c427c028 + MOVB y+4097(FP), R4 // 3e000014de8f1000c4270028 + MOVBU y+4097(FP), R4 // 3e000014de8f1000c427002a + MOVW 65536(R5), R4 // 1e020014de971000c4038028 + MOVWU 65536(R5), R4 // 1e020014de971000c403802a + MOVV 65536(R5), R4 // 1e020014de971000c403c028 + MOVB 65536(R5), R4 // 1e020014de971000c4030028 + MOVBU 65536(R5), R4 // 1e020014de971000c403002a + MOVW 4096(R5), R4 // 3e000014de971000c4038028 + MOVWU 4096(R5), R4 // 3e000014de971000c403802a + MOVV 4096(R5), R4 // 3e000014de971000c403c028 + MOVB 4096(R5), R4 // 3e000014de971000c4030028 + MOVBU 4096(R5), R4 // 3e000014de971000c403002a + MOVW y+65540(FP), F4 // 1e020014de8f1000c433002b + MOVF y+65540(FP), F4 // 1e020014de8f1000c433002b + MOVD y+65540(FP), F4 // 1e020014de8f1000c433802b + MOVW y+4097(FP), F4 // 3e000014de8f1000c427002b + MOVF y+4097(FP), F4 // 3e000014de8f1000c427002b + MOVD y+4097(FP), F4 // 3e000014de8f1000c427802b + MOVW 65536(R5), F4 // 1e020014de971000c403002b + MOVF 65536(R5), F4 // 1e020014de971000c403002b + MOVD 65536(R5), F4 // 1e020014de971000c403802b + MOVW 4096(R5), F4 // 3e000014de971000c403002b + MOVF 4096(R5), F4 // 3e000014de971000c403002b + MOVD 4096(R5), F4 // 3e000014de971000c403802b + MOVW F4, result+65540(FP) // 1e020014de8f1000c433402b + MOVF F4, result+65540(FP) // 1e020014de8f1000c433402b + MOVD F4, result+65540(FP) // 1e020014de8f1000c433c02b + MOVW F4, result+4097(FP) // 3e000014de8f1000c427402b + MOVF F4, result+4097(FP) // 3e000014de8f1000c427402b + MOVD F4, result+4097(FP) // 3e000014de8f1000c427c02b + MOVW F4, 65536(R5) // 1e020014de971000c403402b + MOVF F4, 65536(R5) // 1e020014de971000c403402b + MOVD F4, 65536(R5) // 1e020014de971000c403c02b + MOVW F4, 4096(R5) // 3e000014de971000c403402b + MOVF F4, 4096(R5) // 3e000014de971000c403402b + MOVD F4, 4096(R5) // 3e000014de971000c403c02b + + MOVH R4, result+65540(FP) // 1e020014de8f1000c4334029 + MOVH R4, 65536(R5) // 1e020014de971000c4034029 + MOVH y+65540(FP), R4 // 1e020014de8f1000c4334028 + MOVH 65536(R5), R4 // 1e020014de971000c4034028 + MOVH R4, result+4097(FP) // 3e000014de8f1000c4274029 + MOVH R4, 4096(R5) // 3e000014de971000c4034029 + MOVH y+4097(FP), R4 // 3e000014de8f1000c4274028 + MOVH 4096(R5), R4 // 3e000014de971000c4034028 + MOVHU R4, result+65540(FP) // 1e020014de8f1000c4334029 + MOVHU R4, 65536(R5) // 1e020014de971000c4034029 + MOVHU y+65540(FP), R4 // 1e020014de8f1000c433402a + MOVHU 65536(R5), R4 // 1e020014de971000c403402a + MOVHU R4, result+4097(FP) // 3e000014de8f1000c4274029 + MOVHU R4, 4096(R5) // 3e000014de971000c4034029 + MOVHU y+4097(FP), R4 // 3e000014de8f1000c427402a + MOVHU 4096(R5), R4 // 3e000014de971000c403402a + SGT $74565, R4 // 5e020014de178d0384781200 + SGT $74565, R4, R5 // 5e020014de178d0385781200 + SGT $4097, R4 // 3e000014de07800384781200 + SGT $4097, R4, R5 // 3e000014de07800385781200 + SGTU $74565, R4 // 5e020014de178d0384f81200 + SGTU $74565, R4, R5 // 5e020014de178d0385f81200 + SGTU $4097, R4 // 3e000014de07800384f81200 + SGTU $4097, R4, R5 // 3e000014de07800385f81200 + ADDU $74565, R4 // 5e020014de178d0384781000 + ADDU $74565, R4, R5 // 5e020014de178d0385781000 + ADDU $4097, R4 // 3e000014de07800384781000 + ADDU $4097, R4, R5 // 3e000014de07800385781000 + ADDVU $4097, R4 // 3e000014de07800384f81000 + ADDVU $4097, R4, R5 // 3e000014de07800385f81000 + ADDVU $74565, R4 // 5e020014de178d0384f81000 + ADDVU $74565, R4, R5 // 5e020014de178d0385f81000 + OR $74565, R4 // 5e020014de178d0384781500 + OR $74565, R4, R5 // 5e020014de178d0385781500 + OR $4097, R4 // 3e000014de07800384781500 + OR $4097, R4, R5 // 3e000014de07800385781500 + XOR $74565, R4 // 5e020014de178d0384f81500 + XOR $74565, R4, R5 // 5e020014de178d0385f81500 + XOR $4097, R4 // 3e000014de07800384f81500 + XOR $4097, R4, R5 // 3e000014de07800385f81500 diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/mips.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/mips.s new file mode 100644 index 0000000000000000000000000000000000000000..f65eba07ba338fdc192bdeb198f7993d1b50d6da --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/mips.s @@ -0,0 +1,448 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This input was created by taking the mips64 testcase and modified +// by hand. + +#include "../../../../../runtime/textflag.h" + +TEXT foo(SB),DUPOK|NOSPLIT,$0 + + //inst: + // + // load ints and bytes + // + // LMOVW rreg ',' rreg + // { + // outcode(int($1), &$2, 0, &$4); + // } + MOVW R1, R2 + MOVW LO, R1 + MOVW HI, R1 + MOVW R1, LO + MOVW R1, HI + MOVW R1, R2 + MOVW LO, R1 + MOVW HI, R1 + MOVW R1, LO + MOVW R1, HI + + // LMOVW addr ',' rreg + // { + // outcode(int($1), &$2, 0, &$4); + // } + MOVW foo<>+3(SB), R2 + MOVW 16(R1), R2 + MOVW (R1), R2 + MOVW foo<>+3(SB), R2 + MOVW 16(R1), R2 + MOVW (R1), R2 + LL (R1), R2 + + // LMOVB rreg ',' rreg + // { + // outcode(int($1), &$2, 0, &$4); + // } + MOVB R1, R2 + + // LMOVB addr ',' rreg + // { + // outcode(int($1), &$2, 0, &$4); + // } + MOVB foo<>+3(SB), R2 + MOVB 16(R1), R2 + MOVB (R1), R2 + + // + // load floats + // + // LFMOV addr ',' freg + // { + // outcode(int($1), &$2, 0, &$4); + // } + MOVF foo<>+3(SB), F2 + MOVF 16(R1), F2 + MOVF (R1), F2 + + // LFMOV fimm ',' freg + // { + // outcode(int($1), &$2, 0, &$4); + // } + MOVF $0.1, F2 // MOVF $(0.10000000000000001), F2 + + // LFMOV freg ',' freg + // { + // outcode(int($1), &$2, 0, &$4); + // } + MOVF F1, F2 + + // LFMOV freg ',' addr + // { + // outcode(int($1), &$2, 0, &$4); + // } + MOVF F2, foo<>+3(SB) + MOVF F2, 16(R1) + MOVF F2, (R1) + + // + // store ints and bytes + // + // LMOVW rreg ',' addr + // { + // outcode(int($1), &$2, 0, &$4); + // } + MOVW R1, foo<>+3(SB) + MOVW R1, 16(R2) + MOVW R1, (R2) + MOVW R1, foo<>+3(SB) + MOVW R1, 16(R2) + MOVW R1, (R2) + SC R1, (R2) + + // LMOVB rreg ',' addr + // { + // outcode(int($1), &$2, 0, &$4); + // } + MOVB R1, foo<>+3(SB) + MOVB R1, 16(R2) + MOVB R1, (R2) + + // + // store floats + // + // LMOVW freg ',' addr + // { + // outcode(int($1), &$2, 0, &$4); + // } + MOVD F1, foo<>+3(SB) + MOVD F1, 16(R2) + MOVD F1, (R2) + + // + // floating point status + // + // LMOVW fpscr ',' freg + // { + // outcode(int($1), &$2, 0, &$4); + // } + MOVW FCR0, R1 + + // LMOVW freg ',' fpscr + // { + // outcode(int($1), &$2, 0, &$4); + // } + MOVW R1, FCR0 + + // LMOVW rreg ',' mreg + // { + // outcode(int($1), &$2, 0, &$4); + // } + MOVW R1, M1 + MOVW R1, M1 + + // LMOVW mreg ',' rreg + // { + // outcode(int($1), &$2, 0, &$4); + // } + MOVW M1, R1 + MOVW M1, R1 + + + // + // integer operations + // logical instructions + // shift instructions + // unary instructions + // + // LADDW rreg ',' sreg ',' rreg + // { + // outcode(int($1), &$2, int($4), &$6); + // } + ADD R1, R2, R3 + + // LADDW imm ',' sreg ',' rreg + // { + // outcode(int($1), &$2, int($4), &$6); + // } + ADD $1, R2, R3 + + // LADDW rreg ',' rreg + // { + // outcode(int($1), &$2, 0, &$4); + // } + ADD R1, R2 + + // LADDW imm ',' rreg + // { + // outcode(int($1), &$2, 0, &$4); + // } + ADD $4, R1 + + // LMUL rreg ',' rreg + // { + // outcode(int($1), &$2, 0, &$4); + // } + MUL R1, R2 + + // LSHW rreg ',' sreg ',' rreg + // { + // outcode(int($1), &$2, int($4), &$6); + // } + SLL R1, R2, R3 + + // LSHW rreg ',' rreg + // { + // outcode(int($1), &$2, 0, &$4); + // } + SLL R1, R2 + + // LSHW imm ',' sreg ',' rreg + // { + // outcode(int($1), &$2, int($4), &$6); + // } + SLL $4, R1, R2 + + // LSHW imm ',' rreg + // { + // outcode(int($1), &$2, 0, &$4); + // } + SLL $4, R1 + + // + // move immediate: macro for lui+or, addi, addis, and other combinations + // + // LMOVW imm ',' rreg + // { + // outcode(int($1), &$2, 0, &$4); + // } + MOVW $1, R1 + MOVW $1, R1 + + // LMOVW ximm ',' rreg + // { + // outcode(int($1), &$2, 0, &$4); + // } + MOVW $1, R1 + MOVW $foo(SB), R1 + MOVW $1, R1 + MOVW $foo(SB), R1 + + + // + // branch + // + // LBRA rel + // { + // outcode(int($1), &nullgen, 0, &$2); + // } + BEQ R1, 2(PC) +label0: + JMP 1(PC) + BEQ R1, 2(PC) + JMP label0+0 // JMP 66 + BEQ R1, 2(PC) + JAL 1(PC) // CALL 1(PC) + BEQ R1, 2(PC) + JAL label0+0 // CALL 66 + + // LBRA addr + // { + // outcode(int($1), &nullgen, 0, &$2); + // } + BEQ R1, 2(PC) + JMP 0(R1) // JMP (R1) + BEQ R1, 2(PC) + JMP foo+0(SB) // JMP foo(SB) + BEQ R1, 2(PC) + JAL 0(R1) // CALL (R1) + BEQ R1, 2(PC) + JAL foo+0(SB) // CALL foo(SB) + +// +// BEQ/BNE +// +// LBRA rreg ',' rel +// { +// outcode(int($1), &$2, 0, &$4); +// } +label1: + BEQ R1, 1(PC) + BEQ R1, label1 // BEQ R1, 81 + +// LBRA rreg ',' sreg ',' rel +// { +// outcode(int($1), &$2, 0, &$4); +// } +label2: + BEQ R1, R2, 1(PC) + BEQ R1, R2, label2 // BEQ R1, R2, 83 + +// +// other integer conditional branch +// +// LBRA rreg ',' rel +// { +// outcode(int($1), &$2, 0, &$4); +// } +label3: + BLTZ R1, 1(PC) + BLTZ R1, label3 // BLTZ R1, 85 + +// +// floating point conditional branch +// +// LBRA rel +label4: + BFPT 1(PC) + BFPT label4 // BFPT 87 + + + // + // floating point operate + // + // LFCONV freg ',' freg + // { + // outcode(int($1), &$2, 0, &$4); + // } + ABSD F1, F2 + + // LFADD freg ',' freg + // { + // outcode(int($1), &$2, 0, &$4); + // } + ADDD F1, F2 + + // LFADD freg ',' freg ',' freg + // { + // outcode(int($1), &$2, int($4.Reg), &$6); + // } + ADDD F1, F2, F3 + + // LFCMP freg ',' freg + // { + // outcode(int($1), &$2, 0, &$4); + // } + CMPEQD F1, F2 + + + // + // WORD + // + WORD $1 + + // + // NOP + // + // LNOP comma // asm doesn't support the trailing comma. + // { + // outcode(int($1), &nullgen, 0, &nullgen); + // } + NOP + + // LNOP rreg comma // asm doesn't support the trailing comma. + // { + // outcode(int($1), &$2, 0, &nullgen); + // } + NOP R2 + + // LNOP freg comma // asm doesn't support the trailing comma. + // { + // outcode(int($1), &$2, 0, &nullgen); + // } + NOP F2 + + // LNOP ',' rreg // asm doesn't support the leading comma. + // { + // outcode(int($1), &nullgen, 0, &$3); + // } + NOP R2 + + // LNOP ',' freg // asm doesn't support the leading comma. + // { + // outcode(int($1), &nullgen, 0, &$3); + // } + NOP F2 + + // LNOP imm + // { + // outcode(int($1), &$2, 0, &nullgen); + // } + NOP $4 + + // + // special + // + SYSCALL + BREAK + SYNC + + // + // conditional move on zero/nonzero gp value + // + CMOVN R1, R2, R3 + CMOVZ R1, R2, R3 + + // + // conditional move on fp false/true + // + CMOVF R1, R2 + CMOVT R1, R2 + + // + // conditional traps + // + TEQ $1, R1, R2 + TEQ $1, R1 + + + // + // other + // + CLO R1, R2 + SQRTD F0, F1 + MUL R1, R2, R3 + + + // + // RET + // + // LRETRN comma // asm doesn't support the trailing comma. + // { + // outcode(int($1), &nullgen, 0, &nullgen); + // } + SYSCALL + BEQ R1, 2(PC) + RET + + + // More JMP/JAL cases, and canonical names JMP, CALL. + + JAL foo(SB) // CALL foo(SB) + BEQ R1, 2(PC) + JMP foo(SB) + CALL foo(SB) + RET foo(SB) + + // unary operation + NEGW R1, R2 // 00011023 + CLZ R1, R2 // 70221020 + CLO R1, R2 // 70221021 + + WSBH R1, R2 // 7c0110a0 + + SEB R1, R2 // 7c011420 + SEH R1, R2 // 7c011620 + + // to (Hi, Lo) + MADD R2, R1 // 70220000 + MSUB R2, R1 // 70220004 + MUL R2, R1 // 00220018 + + // END + // + // LEND comma // asm doesn't support the trailing comma. + // { + // outcode(int($1), &nullgen, 0, &nullgen); + // } + END diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/mips64.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/mips64.s new file mode 100644 index 0000000000000000000000000000000000000000..ea4bb80aecbce5741372c24f6b92394d57535fa3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/mips64.s @@ -0,0 +1,642 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This input was created by taking the ppc64 testcase and modified +// by hand. + +#include "../../../../../runtime/textflag.h" + +TEXT foo(SB),DUPOK|NOSPLIT,$0 +// +// branch +// +// LBRA rel +// { +// outcode(int($1), &nullgen, 0, &$2); +// } + BEQ R1, 2(PC) +label0: + JMP 1(PC) // JMP 1(PC) // 10000001 + BEQ R1, 2(PC) + JMP label0+0 // JMP 3 // 1000fffd + BEQ R1, 2(PC) + JAL 1(PC) // CALL 1(PC) // 0c00000f + BEQ R1, 2(PC) + JAL label0+0 // CALL 3 // 0c000007 + +// LBRA addr +// { +// outcode(int($1), &nullgen, 0, &$2); +// } + BEQ R1, 2(PC) + JMP 0(R1) // JMP (R1) // 00200008 + BEQ R1, 2(PC) + JMP foo+0(SB) // JMP foo(SB) // 08000019 + BEQ R1, 2(PC) + JAL 0(R1) // CALL (R1) // 0020f809 + BEQ R1, 2(PC) + JAL foo+0(SB) // CALL foo(SB) // 0c000021 + +// +// BEQ/BNE +// +// LBRA rreg ',' rel +// { +// outcode(int($1), &$2, 0, &$4); +// } +label1: + BEQ R1, 1(PC) // BEQ R1, 1(PC) // 10200001 + BEQ R1, label1 // BEQ R1, 18 // 1020fffd + +// LBRA rreg ',' sreg ',' rel +// { +// outcode(int($1), &$2, 0, &$4); +// } +label2: + BEQ R1, R2, 1(PC) // BEQ R1, R2, 1(PC) // 10220001 + BEQ R1, R2, label2 // BEQ R1, R2, 20 // 1022fffd + +// +// other integer conditional branch +// +// LBRA rreg ',' rel +// { +// outcode(int($1), &$2, 0, &$4); +// } +label3: + BLTZ R1, 1(PC) // BLTZ R1, 1(PC) // 04200001 + BLTZ R1, label3 // BLTZ R1, 22 // 0420fffd + +// +// floating point conditional branch +// +// LBRA rel +label4: + BFPT 1(PC) // BFPT 1(PC) // 4501000100000000 + BFPT label4 // BFPT 24 // 4501fffd00000000 + +//inst: +// +// load ints and bytes +// +// LMOVV rreg ',' rreg +// { +// outcode(int($1), &$2, 0, &$4); +// } + MOVV R25, R17 // 00198825 + MOVV R1, R2 // 00011025 + MOVV LO, R1 // 00000812 + MOVV HI, R1 // 00000810 + MOVV R1, LO // 00200013 + MOVV R1, HI // 00200011 + + +// LMOVW rreg ',' rreg +// { +// outcode(int($1), &$2, 0, &$4); +// } + MOVW R1, R2 // 00011004 + MOVW LO, R1 // 00000812 + MOVW HI, R1 // 00000810 + MOVW R1, LO // 00200013 + MOVW R1, HI // 00200011 + MOVWU R14, R27 // 000ed83c001bd83e + +// LMOVH rreg ',' rreg +// { +// outcode(int($1), &$2, 0, &$4); +// } + MOVH R16, R27 // 0010dc00001bdc03 + MOVHU R1, R3 // 3023ffff + +// LMOVB rreg ',' rreg +// { +// outcode(int($1), &$2, 0, &$4); +// } + MOVB R8, R9 // 00084e0000094e03 + MOVBU R12, R17 // 319100ff + +// LMOVV addr ',' rreg +// { +// outcode(int($1), &$2, 0, &$4); +// } + MOVV foo<>+3(SB), R2 + MOVV (R5), R18 // dcb20000 + MOVV 8(R16), R4 // de040008 + MOVV -32(R14), R1 // ddc1ffe0 + LLV (R1), R2 // d0220000 + +// LMOVW addr ',' rreg +// { +// outcode(int($1), &$2, 0, &$4); +// } + MOVW foo<>+3(SB), R2 + MOVW (R11), R22 // 8d760000 + MOVW 1(R9), R24 // 8d380001 + MOVW -17(R24), R8 // 8f08ffef + MOVWU (R11), R22 // 9d760000 + MOVWU 1(R9), R24 // 9d380001 + MOVWU -17(R24), R8 // 9f08ffef + LL (R1), R2 // c0220000 + +// LMOVH addr ',' rreg +// { +// outcode(int($1), &$2, 0, &$4); +// } + MOVH foo<>+3(SB), R2 + MOVH (R20), R7 // 86870000 + MOVH 54(R11), R26 // 857a0036 + MOVH -42(R3), R20 // 8474ffd6 + MOVHU (R20), R7 // 96870000 + MOVHU 54(R11), R26 // 957a0036 + MOVHU -42(R3), R20 // 9474ffd6 + +// LMOVB addr ',' rreg +// { +// outcode(int($1), &$2, 0, &$4); +// } + MOVB foo<>+3(SB), R2 + MOVB (R4), R21 // 80950000 + MOVB 9(R19), R18 // 82720009 + MOVB -10(R19), R18 // 8272fff6 + MOVBU (R4), R21 // 90950000 + MOVBU 9(R19), R18 // 92720009 + MOVBU -10(R19), R18 // 9272fff6 + +// +// load floats +// +// LFMOV addr ',' freg +// { +// outcode(int($1), &$2, 0, &$4); +// } + MOVD foo<>+3(SB), F2 + MOVD 16(R1), F2 + MOVD (R1), F2 + +// LFMOV fimm ',' freg +// { +// outcode(int($1), &$2, 0, &$4); +// } + MOVD $0.1, F2 // MOVD $(0.10000000000000001), F2 + +// LFMOV freg ',' freg +// { +// outcode(int($1), &$2, 0, &$4); +// } + MOVD F1, F2 + +// LFMOV freg ',' addr +// { +// outcode(int($1), &$2, 0, &$4); +// } + MOVD F2, foo<>+3(SB) + MOVD F2, 16(R1) + MOVD F2, (R1) + +// +// store ints and bytes +// +// LMOVV rreg ',' addr +// { +// outcode(int($1), &$2, 0, &$4); +// } + MOVV R1, foo<>+3(SB) + MOVV R18, (R5) // fcb20000 + MOVV R4, 8(R16) // fe040008 + MOVV R1, -32(R14) // fdc1ffe0 + SCV R1, (R2) // f0410000 + +// LMOVW rreg ',' addr +// { +// outcode(int($1), &$2, 0, &$4); +// } + MOVW R1, foo<>+3(SB) + MOVW R8, (R3) // ac680000 + MOVW R11, 19(R2) // ac4b0013 + MOVW R25, -89(R22) // aed9ffa7 + MOVWU R8, (R3) // ac680000 + MOVWU R11, 19(R2) // ac4b0013 + MOVWU R25, -89(R22) // aed9ffa7 + SC R1, (R2) // e0410000 + +// LMOVH rreg ',' addr +// { +// outcode(int($1), &$2, 0, &$4); +// } + MOVH R13, (R7) // a4ed0000 + MOVH R10, 61(R23) // a6ea003d + MOVH R8, -33(R12) // a588ffdf + MOVHU R13, (R7) // a4ed0000 + MOVHU R10, 61(R23) // a6ea003d + MOVHU R8, -33(R12) // a588ffdf + +// LMOVB rreg ',' addr +// { +// outcode(int($1), &$2, 0, &$4); +// } + MOVB R1, foo<>+3(SB) + MOVB R5, -18(R4) // a085ffee + MOVB R10, 9(R13) // a1aa0009 + MOVB R15, (R13) // a1af0000 + MOVBU R5, -18(R4) // a085ffee + MOVBU R10, 9(R13) // a1aa0009 + MOVBU R15, (R13) // a1af0000 + +// +// store floats +// +// LMOVW freg ',' addr +// { +// outcode(int($1), &$2, 0, &$4); +// } + MOVD F1, foo<>+3(SB) + MOVD F1, 16(R2) + MOVD F1, (R2) + +// +// floating point status +// +// LMOVW fpscr ',' freg +// { +// outcode(int($1), &$2, 0, &$4); +// } + MOVW FCR31, R1 // 4441f800 + +// LMOVW freg ',' fpscr +// { +// outcode(int($1), &$2, 0, &$4); +// } + MOVW R1, FCR31 // 44c1f800 + +// LMOVW rreg ',' mreg +// { +// outcode(int($1), &$2, 0, &$4); +// } + MOVW R1, M1 // 40810800 + MOVV R1, M1 // 40a10800 + +// LMOVW mreg ',' rreg +// { +// outcode(int($1), &$2, 0, &$4); +// } + MOVW M1, R1 // 40010800 + MOVV M1, R1 // 40210800 + + +// +// integer operations +// logical instructions +// shift instructions +// unary instructions +// +// LADDW rreg ',' sreg ',' rreg +// { +// outcode(int($1), &$2, int($4), &$6); +// } + ADD R5, R9, R10 // 01255020 + ADDU R13, R14, R19 // 01cd9821 + ADDV R5, R9, R10 // 0125502c + ADDVU R13, R14, R19 // 01cd982d + +// LADDW imm ',' sreg ',' rreg +// { +// outcode(int($1), &$2, int($4), &$6); +// } + ADD $15176, R14, R9 // 21c93b48 + ADD $-9, R5, R8 // 20a8fff7 + ADDU $10, R9, R9 // 2529000a + ADDV $15176, R14, R9 // 61c93b48 + ADDV $-9, R5, R8 // 60a8fff7 + ADDVU $10, R9, R9 // 6529000a + +// LADDW rreg ',' rreg +// { +// outcode(int($1), &$2, 0, &$4); +// } + ADD R1, R2 // 00411020 + ADDU R1, R2 // 00411021 + ADDV R1, R2 // 0041102c + ADDVU R1, R2 // 0041102d + +// LADDW imm ',' rreg +// { +// outcode(int($1), &$2, 0, &$4); +// } + ADD $4, R1 // 20210004 + ADDV $4, R1 // 60210004 + ADDU $4, R1 // 24210004 + ADDVU $4, R1 // 64210004 + ADD $-7193, R24 // 2318e3e7 + ADDV $-7193, R24 // 6318e3e7 + +// LSUBW rreg ',' sreg ',' rreg +// { +// outcode(int($1), &$2, int($4), &$6); +// } + SUB R6, R26, R27 // 0346d822 + SUBU R6, R26, R27 // 0346d823 + SUBV R16, R17, R26 // 0230d02e + SUBVU R16, R17, R26 // 0230d02f + +// LSUBW imm ',' sreg ',' rreg +// { +// outcode(int($1), &$2, int($4), &$6); +// } + SUB $-3126, R17, R22 // 22360c36 + SUB $3126, R17, R22 // 2236f3ca + SUBU $16384, R17, R12 // 262cc000 + SUBV $-6122, R10, R9 // 614917ea + SUBV $6122, R10, R9 // 6149e816 + SUBVU $1203, R17, R12 // 662cfb4d + +// LSUBW rreg ',' rreg +// { +// outcode(int($1), &$2, 0, &$4); +// } + SUB R14, R13 // 01ae6822 + SUBU R14, R13 // 01ae6823 + SUBV R4, R3 // 0064182e + SUBVU R4, R3 // 0064182f +// LSUBW imm ',' rreg +// { +// outcode(int($1), &$2, 0, &$4); +// } + SUB $6512, R13 // 21ade690 + SUB $-6512, R13 // 21ad1970 + SUBU $6512, R13 // 25ade690 + SUBV $9531, R16 // 6210dac5 + SUBV $-9531, R13 // 61ad253b + SUBVU $9531, R16 // 6610dac5 + +// LMUL rreg ',' rreg +// { +// outcode(int($1), &$2, 0, &$4); +// } + MUL R19, R8 // 01130018 + MULU R21, R13 // 01b50019 + MULV R19, R8 // 0113001c + MULVU R21, R13 // 01b5001d + +// LDIV rreg ',' rreg +// { +// outcode(int($1), &$2, 0, &$4); +// } + DIV R18, R22 // 02d2001a + DIVU R14, R9 // 012e001b + DIVV R8, R13 // 01a8001e + DIVVU R16, R19 // 0270001f + +// LREM rreg ',' rreg +// { +// outcode(int($1), &$2, 0, &$4); +// } + REM R18, R22 // 02d2001a + REMU R14, R9 // 012e001b + REMV R8, R13 // 01a8001e + REMVU R16, R19 // 0270001f + +// LSHW rreg ',' sreg ',' rreg +// { +// outcode(int($1), &$2, int($4), &$6); +// } + SLL R1, R2, R3 // 00221804 + SLLV R10, R22, R21 // 0156a814 + SRL R27, R6, R17 // 03668806 + SRLV R27, R6, R17 // 03668816 + SRA R11, R19, R20 // 0173a007 + SRAV R20, R19, R19 // 02939817 + ROTR R19, R18, R20 // 0272a046 + ROTRV R9, R13, R16 // 012d8056 + +// LSHW rreg ',' rreg +// { +// outcode(int($1), &$2, 0, &$4); +// } + SLL R1, R2 // 00221004 + SLLV R10, R22 // 0156b014 + SRL R27, R6 // 03663006 + SRLV R27, R6 // 03663016 + SRA R11, R19 // 01739807 + SRAV R20, R19 // 02939817 + ROTR R20, R19 // 02939846 + ROTRV R16, R9 // 02094856 + +// LSHW imm ',' sreg ',' rreg +// { +// outcode(int($1), &$2, int($4), &$6); +// } + SLL $19, R22, R21 // 0016acc0 + SLLV $19, R22, R21 // 0016acf8 + SRL $31, R6, R17 // 00068fc2 + SRLV $31, R6, R17 // 00068ffa + SRA $8, R8, R19 // 00089a03 + SRAV $19, R8, R7 // 00083cfb + ROTR $12, R8, R3 // 00281b02 + ROTRV $8, R22, R22 // 0036b23a + +// LSHW imm ',' rreg +// { +// outcode(int($1), &$2, 0, &$4); +// } + SLL $19, R21 // 0015acc0 + SLLV $19, R21 // 0015acf8 + SRL $31, R17 // 00118fc2 + SRLV $31, R17 // 00118ffa + SRA $3, R12 // 000c60c3 + SRAV $12, R3 // 00031b3b + ROTR $12, R8 // 00284302 + ROTRV $63, R22 // 0036b7fe + + +// LAND/LXOR/LNOR/LOR rreg ',' rreg +// { +// outcode(int($1), &$2, 0, &$4); +// } + AND R14, R8 // 010e4024 + XOR R15, R9 // 012f4826 + NOR R16, R10 // 01505027 + OR R17, R11 // 01715825 + +// LAND/LXOR/LOR imm ',' rreg +// { +// outcode(int($1), &$2, 0, &$4); +// } + AND $11, R17, R7 // 3227000b + XOR $341, R1, R23 // 38370155 + OR $254, R25, R13 // 372d00fe +// +// move immediate: macro for lui+or, addi, addis, and other combinations +// +// LMOVW imm ',' rreg +// { +// outcode(int($1), &$2, 0, &$4); +// } + MOVW $1, R1 + MOVV $1, R1 + +// LMOVW ximm ',' rreg +// { +// outcode(int($1), &$2, 0, &$4); +// } + MOVW $1, R1 + MOVW $foo(SB), R1 + MOVV $1, R1 + MOVV $foo(SB), R1 + +// +// floating point operate +// +// LFCONV freg ',' freg +// { +// outcode(int($1), &$2, 0, &$4); +// } + ABSD F1, F2 + +// LFADD freg ',' freg +// { +// outcode(int($1), &$2, 0, &$4); +// } + ADDD F1, F2 + +// LFADD freg ',' freg ',' freg +// { +// outcode(int($1), &$2, int($4.Reg), &$6); +// } + ADDD F1, F2, F3 + +// LFCMP freg ',' freg +// { +// outcode(int($1), &$2, 0, &$4); +// } + CMPEQD F1, F2 + + +// +// WORD +// + WORD $1 // 00000001 + NOOP // 00000000 + SYNC // 0000000f + +// +// NOP +// +// LNOP comma // asm doesn't support the trailing comma. +// { +// outcode(int($1), &nullgen, 0, &nullgen); +// } + NOP + +// LNOP rreg comma // asm doesn't support the trailing comma. +// { +// outcode(int($1), &$2, 0, &nullgen); +// } + NOP R2 + +// LNOP freg comma // asm doesn't support the trailing comma. +// { +// outcode(int($1), &$2, 0, &nullgen); +// } + NOP F2 + +// LNOP ',' rreg // asm doesn't support the leading comma. +// { +// outcode(int($1), &nullgen, 0, &$3); +// } + NOP R2 + +// LNOP ',' freg // asm doesn't support the leading comma. +// { +// outcode(int($1), &nullgen, 0, &$3); +// } + NOP F2 + +// LNOP imm +// { +// outcode(int($1), &$2, 0, &nullgen); +// } + NOP $4 + +// +// special +// + SYSCALL + BREAK + // overloaded cache opcode: + BREAK R1, (R1) + +// +// RET +// +// LRETRN comma // asm doesn't support the trailing comma. +// { +// outcode(int($1), &nullgen, 0, &nullgen); +// } + SYSCALL + BEQ R1, 2(PC) + RET + + +// More JMP/JAL cases, and canonical names JMP, CALL. + + JAL foo(SB) // CALL foo(SB) + BEQ R1, 2(PC) + JMP foo(SB) + CALL foo(SB) + RET foo(SB) + + // unary operation + NEGW R1, R2 // 00011023 + NEGV R1, R2 // 0001102f + + WSBH R1, R2 // 7c0110a0 + DSBH R1, R2 // 7c0110a4 + DSHD R1, R2 // 7c011164 + + SEB R1, R2 // 7c011420 + SEH R1, R2 // 7c011620 + + RET + +// MSA VMOVI + VMOVB $511, W0 // 7b0ff807 + VMOVH $24, W23 // 7b20c5c7 + VMOVW $-24, W15 // 7b5f43c7 + VMOVD $-511, W31 // 7b700fc7 + + VMOVB (R0), W8 // 78000220 + VMOVB 511(R3), W0 // 79ff1820 + VMOVB -512(R12), W21 // 7a006560 + VMOVH (R24), W12 // 7800c321 + VMOVH 110(R19), W8 // 78379a21 + VMOVH -70(R12), W3 // 7bdd60e1 + VMOVW (R3), W31 // 78001fe2 + VMOVW 64(R20), W16 // 7810a422 + VMOVW -104(R17), W24 // 7be68e22 + VMOVD (R3), W2 // 780018a3 + VMOVD 128(R23), W19 // 7810bce3 + VMOVD -256(R31), W0 // 7be0f823 + + VMOVB W8, (R0) // 78000224 + VMOVB W0, 511(R3) // 79ff1824 + VMOVB W21, -512(R12) // 7a006564 + VMOVH W12, (R24) // 7800c325 + VMOVH W8, 110(R19) // 78379a25 + VMOVH W3, -70(R12) // 7bdd60e5 + VMOVW W31, (R3) // 78001fe6 + VMOVW W16, 64(R20) // 7810a426 + VMOVW W24, -104(R17) // 7be68e26 + VMOVD W2, (R3) // 780018a7 + VMOVD W19, 128(R23) // 7810bce7 + VMOVD W0, -256(R31) // 7be0f827 + RET + +// END +// +// LEND comma // asm doesn't support the trailing comma. +// { +// outcode(int($1), &nullgen, 0, &nullgen); +// } + END diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/ppc64.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/ppc64.s new file mode 100644 index 0000000000000000000000000000000000000000..f84bc1491439a7ec6f26118950a1e0944e33a28f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/ppc64.s @@ -0,0 +1,1141 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This contains the majority of valid opcode combinations +// available in cmd/internal/obj/ppc64/asm9.go with +// their valid instruction encodings. + +#include "../../../../../runtime/textflag.h" + +// In case of index mode instructions, usage of +// (Rx)(R0) is equivalent to (Rx+R0) +// In case of base+displacement mode instructions if +// the offset is 0, usage of (Rx) is equivalent to 0(Rx) +TEXT asmtest(SB),DUPOK|NOSPLIT,$0 + // move constants + MOVD $1, R3 // 38600001 + MOVD $-1, R4 // 3880ffff + MOVD $65535, R5 // 6005ffff + MOVD $65536, R6 // 3cc00001 + MOVD $-32767, R5 // 38a08001 + MOVD $-32768, R6 // 38c08000 + MOVD $1234567, R5 // 6405001260a5d687 or 0600001238a0d687 + MOVW $1, R3 // 38600001 + MOVW $-1, R4 // 3880ffff + MOVW $65535, R5 // 6005ffff + MOVW $65536, R6 // 3cc00001 + MOVW $-32767, R5 // 38a08001 + MOVW $-32768, R6 // 38c08000 + MOVW $1234567, R5 // 6405001260a5d687 or 0600001238a0d687 + // Hex constant 0x80000001 + MOVW $2147483649, R5 // 6405800060a50001 or 0600800038a00001 + MOVD $2147483649, R5 // 6405800060a50001 or 0600800038a00001 + // Hex constant 0xFFFFFFFF80000001 + MOVD $-2147483647, R5 // 3ca0800060a50001 or 0603800038a00001 + // Hex constant 0xFFFFFFFE00000002 (load of constant on < power10, pli on >= power10 + MOVD $-8589934590, R5 // 3ca00000e8a50000 or 0602000038a00002 + + // For backwards compatibility, MOVW $const,Rx and MOVWZ $const,Rx assemble identically + // and accept the same constants. + MOVW $2147483648, R5 // 64058000 + MOVWZ $-2147483648, R5 // 3ca08000 + + // TODO: These are preprocessed by the assembler into MOVD $const>>shift, R5; SLD $shift, R5. + // This only captures the MOVD. Should the SLD be appended to the encoding by the test? + // Hex constant 0x20004000000 + MOVD $2199090364416, R5 // 60058001 + // Hex constant 0xFFFFFE0004000000 + MOVD $-2198956146688, R5 // 38a08001 + // TODO: On GOPPC64={power8,power9}, this is preprocessed into MOVD $-1, R5; RLDC R5, $33, $63, R5. + // This only captures the MOVD. Should the RLDC be appended to the encoding by the test? + // Hex constant 0xFFFFFFFE00000001 + MOVD $-8589934591, R5 // 38a0ffff or 0602000038a00001 + + // For #66955. Verify this opcode turns into a load and assembles. + MOVD $-6795364578871345152, R5 // 3ca00000e8a50000 or 04100000e4a00000 + + MOVD 8(R3), R4 // e8830008 + MOVD (R3)(R4), R5 // 7ca4182a + MOVD (R3)(R0), R5 // 7ca0182a + MOVD (R3), R5 // e8a30000 + MOVW 4(R3), R4 // e8830006 + MOVW (R3)(R4), R5 // 7ca41aaa + MOVW (R3)(R0), R5 // 7ca01aaa + MOVW (R3), R5 // e8a30002 + MOVWZ 4(R3), R4 // 80830004 + MOVWZ (R3)(R4), R5 // 7ca4182e + MOVWZ (R3)(R0), R5 // 7ca0182e + MOVWZ (R3), R5 // 80a30000 + MOVH 4(R3), R4 // a8830004 + MOVH (R3)(R4), R5 // 7ca41aae + MOVH (R3)(R0), R5 // 7ca01aae + MOVH (R3), R5 // a8a30000 + + MOVHZ 2(R3), R4 // a0830002 + MOVHZ (R3)(R4), R5 // 7ca41a2e + MOVHZ (R3)(R0), R5 // 7ca01a2e + MOVHZ (R3), R5 // a0a30000 + MOVB 1(R3), R4 // 888300017c840774 + MOVB (R3)(R4), R5 // 7ca418ae7ca50774 + MOVB (R3)(R0), R5 // 7ca018ae7ca50774 + MOVB (R3), R5 // 88a300007ca50774 + MOVBZ 1(R3), R4 // 88830001 + MOVBZ (R3)(R4), R5 // 7ca418ae + MOVBZ (R3)(R0), R5 // 7ca018ae + MOVBZ (R3), R5 // 88a30000 + MOVDBR (R3)(R4), R5 // 7ca41c28 + MOVDBR (R3)(R0), R5 // 7ca01c28 + MOVDBR (R3), R5 // 7ca01c28 + MOVWBR (R3)(R4), R5 // 7ca41c2c + MOVWBR (R3)(R0), R5 // 7ca01c2c + MOVWBR (R3), R5 // 7ca01c2c + MOVHBR (R3)(R4), R5 // 7ca41e2c + MOVHBR (R3)(R0), R5 // 7ca01e2c + MOVHBR (R3), R5 // 7ca01e2c + OR $0, R0, R0 + MOVD $foo+4009806848(FP), R5 // 3ca1ef0138a5cc40 or 0600ef0038a1cc40 + MOVD $foo(SB), R5 // 3ca0000038a50000 or 0610000038a00000 + + MOVDU 8(R3), R4 // e8830009 + MOVDU (R3)(R4), R5 // 7ca4186a + MOVDU (R3)(R0), R5 // 7ca0186a + MOVDU (R3), R5 // e8a30001 + MOVWU (R3)(R4), R5 // 7ca41aea + MOVWU (R3)(R0), R5 // 7ca01aea + MOVWZU 4(R3), R4 // 84830004 + MOVWZU (R3)(R4), R5 // 7ca4186e + MOVWZU (R3)(R0), R5 // 7ca0186e + MOVWZU (R3), R5 // 84a30000 + MOVHU 2(R3), R4 // ac830002 + MOVHU (R3)(R4), R5 // 7ca41aee + MOVHU (R3)(R0), R5 // 7ca01aee + MOVHU (R3), R5 // aca30000 + MOVHZU 2(R3), R4 // a4830002 + MOVHZU (R3)(R4), R5 // 7ca41a6e + MOVHZU (R3)(R0), R5 // 7ca01a6e + MOVHZU (R3), R5 // a4a30000 + MOVBU 1(R3), R4 // 8c8300017c840774 + MOVBU (R3)(R4), R5 // 7ca418ee7ca50774 + MOVBU (R3)(R0), R5 // 7ca018ee7ca50774 + MOVBU (R3), R5 // 8ca300007ca50774 + MOVBZU 1(R3), R4 // 8c830001 + MOVBZU (R3)(R4), R5 // 7ca418ee + MOVBZU (R3)(R0), R5 // 7ca018ee + MOVBZU (R3), R5 // 8ca30000 + + MOVD R4, 8(R3) // f8830008 + MOVD R5, (R3)(R4) // 7ca4192a + MOVD R5, (R3)(R0) // 7ca0192a + MOVD R5, (R3) // f8a30000 + MOVW R4, 4(R3) // 90830004 + MOVW R5, (R3)(R4) // 7ca4192e + MOVW R5, (R3)(R0) // 7ca0192e + MOVW R5, (R3) // 90a30000 + MOVH R4, 2(R3) // b0830002 + MOVH R5, (R3)(R4) // 7ca41b2e + MOVH R5, (R3)(R0) // 7ca01b2e + MOVH R5, (R3) // b0a30000 + MOVB R4, 1(R3) // 98830001 + MOVB R5, (R3)(R4) // 7ca419ae + MOVB R5, (R3)(R0) // 7ca019ae + MOVB R5, (R3) // 98a30000 + MOVDBR R5, (R3)(R4) // 7ca41d28 + MOVDBR R5, (R3)(R0) // 7ca01d28 + MOVDBR R5, (R3) // 7ca01d28 + MOVWBR R5, (R3)(R4) // 7ca41d2c + MOVWBR R5, (R3)(R0) // 7ca01d2c + MOVWBR R5, (R3) // 7ca01d2c + MOVHBR R5, (R3)(R4) // 7ca41f2c + MOVHBR R5, (R3)(R0) // 7ca01f2c + MOVHBR R5, (R3) // 7ca01f2c + + MOVDU R4, 8(R3) // f8830009 + MOVDU R5, (R3)(R4) // 7ca4196a + MOVDU R5, (R3)(R0) // 7ca0196a + MOVDU R5, (R3) // f8a30001 + MOVWU R4, 4(R3) // 94830004 + MOVWU R5, (R3)(R4) // 7ca4196e + MOVWU R5, (R3)(R0) // 7ca0196e + MOVHU R4, 2(R3) // b4830002 + MOVHU R5, (R3)(R4) // 7ca41b6e + MOVHU R5, (R3)(R0) // 7ca01b6e + MOVHU R5, (R3) // b4a30000 + MOVBU R4, 1(R3) // 9c830001 + MOVBU R5, (R3)(R4) // 7ca419ee + MOVBU R5, (R3)(R0) // 7ca019ee + MOVBU R5, (R3) // 9ca30000 + + MOVB $0, R4 // 38800000 + MOVBZ $0, R4 // 38800000 + MOVH $0, R4 // 38800000 + MOVHZ $0, R4 // 38800000 + MOVW $0, R4 // 38800000 + MOVWZ $0, R4 // 38800000 + MOVD $0, R4 // 38800000 + MOVD $0, R0 // 38000000 + + ADD $1, R3 // 38630001 + ADD $1, R3, R4 // 38830001 + ADD $-1, R4 // 3884ffff + ADD $-1, R4, R5 // 38a4ffff + ADD $65535, R5 // 601fffff7cbf2a14 or 0600000038a5ffff + ADD $65535, R5, R6 // 601fffff7cdf2a14 or 0600000038c5ffff + ADD $65536, R6 // 3cc60001 + ADD $65536, R6, R7 // 3ce60001 + ADD $-32767, R5 // 38a58001 + ADD $-32767, R5, R4 // 38858001 + ADD $-32768, R6 // 38c68000 + ADD $-32768, R6, R5 // 38a68000 + // Hex constant 0xFFFFFFFE00000000 + ADD $-8589934592, R5 // 3fe0fffe600000007bff83e4600000007cbf2a14 or 0602000038a50000 + // Hex constant 0xFFFFFFFE00010001 + ADD $-8589869055, R5 // 3fe0fffe63ff00017bff83e463ff00017cbf2a14 or 0602000138a50001 + + //TODO: this compiles to add r5,r6,r0. It should be addi r5,r6,0. + // this is OK since r0 == $0, but the latter is preferred. + ADD $0, R6, R5 // 7ca60214 + + ADD $1234567, R5 // 641f001263ffd6877cbf2a14 or 0600001238a5d687 + ADD $1234567, R5, R6 // 641f001263ffd6877cdf2a14 or 0600001238c5d687 + ADDEX R3, R5, $3, R6 // 7cc32f54 + ADDEX R3, $3, R5, R6 // 7cc32f54 + ADDIS $8, R3 // 3c630008 + ADD $524288, R3 // 3c630008 + ADDIS $1000, R3, R4 // 3c8303e8 + + ANDCC $1, R3 // 70630001 + ANDCC $1, R3, R4 // 70640001 + ANDCC $-1, R4 // 3be0ffff7fe42039 + ANDCC $-1, R4, R5 // 3be0ffff7fe52039 + ANDCC $65535, R5 // 70a5ffff + ANDCC $65535, R5, R6 // 70a6ffff + ANDCC $65536, R6 // 74c60001 + ANDCC $65536, R6, R7 // 74c70001 + ANDCC $-32767, R5 // 3be080017fe52839 + ANDCC $-32767, R5, R4 // 3be080017fe42839 + ANDCC $-32768, R6 // 3be080007fe63039 + ANDCC $-32768, R5, R6 // 3be080007fe62839 + ANDCC $1234567, R5 // 641f001263ffd6877fe52839 + ANDCC $1234567, R5, R6 // 641f001263ffd6877fe62839 + ANDISCC $1, R3 // 74630001 + ANDISCC $1000, R3, R4 // 746403e8 + ANDCC $65536000, R3, R4 // 746403e8 + + OR $1, R3 // 60630001 + OR $1, R3, R4 // 60640001 + OR $-1, R4 // 3be0ffff7fe42378 + OR $-1, R4, R5 // 3be0ffff7fe52378 + OR $65535, R5 // 60a5ffff + OR $65535, R5, R6 // 60a6ffff + OR $65536, R6 // 64c60001 + OR $65536, R6, R7 // 64c70001 + OR $-32767, R5 // 3be080017fe52b78 + OR $-32767, R5, R6 // 3be080017fe62b78 + OR $-32768, R6 // 3be080007fe63378 + OR $-32768, R6, R7 // 3be080007fe73378 + OR $1234567, R5 // 641f001263ffd6877fe52b78 + OR $1234567, R5, R3 // 641f001263ffd6877fe32b78 + OR $2147483648, R5, R3 // 64a38000 + OR $2147483649, R5, R3 // 641f800063ff00017fe32b78 + ORIS $255, R3, R4 // 646400ff + OR $16711680, R3, R4 // 646400ff + + XOR $1, R3 // 68630001 + XOR $1, R3, R4 // 68640001 + XOR $-1, R4 // 3be0ffff7fe42278 + XOR $-1, R4, R5 // 3be0ffff7fe52278 + XOR $65535, R5 // 68a5ffff + XOR $65535, R5, R6 // 68a6ffff + XOR $65536, R6 // 6cc60001 + XOR $65536, R6, R7 // 6cc70001 + XOR $-32767, R5 // 3be080017fe52a78 + XOR $-32767, R5, R6 // 3be080017fe62a78 + XOR $-32768, R6 // 3be080007fe63278 + XOR $-32768, R6, R7 // 3be080007fe73278 + XOR $1234567, R5 // 641f001263ffd6877fe52a78 + XOR $1234567, R5, R3 // 641f001263ffd6877fe32a78 + XORIS $15, R3, R4 // 6c64000f + XOR $983040, R3, R4 // 6c64000f + + // TODO: the order of CR operands don't match + CMP R3, R4 // 7c232000 + CMPU R3, R4 // 7c232040 + CMPW R3, R4 // 7c032000 + CMPWU R3, R4 // 7c032040 + CMPB R3,R4,R4 // 7c6423f8 + CMPEQB R3,R4,CR6 // 7f0321c0 + + ADD R3, R4 // 7c841a14 + ADD R3, R4, R5 // 7ca41a14 + ADDC R3, R4 // 7c841814 + ADDC R3, R4, R5 // 7ca41814 + ADDCC R3, R4, R5 // 7ca41a15 + ADDE R3, R4 // 7c841914 + ADDECC R3, R4 // 7c841915 + ADDEV R3, R4 // 7c841d14 + ADDEVCC R3, R4 // 7c841d15 + ADDV R3, R4 // 7c841e14 + ADDVCC R3, R4 // 7c841e15 + ADDCCC R3, R4, R5 // 7ca41815 + ADDCCC $65536, R4, R5 // 641f0001600000007cbf2015 + ADDCCC $65537, R4, R5 // 641f000163ff00017cbf2015 + ADDME R3, R4 // 7c8301d4 + ADDMECC R3, R4 // 7c8301d5 + ADDMEV R3, R4 // 7c8305d4 + ADDMEVCC R3, R4 // 7c8305d5 + ADDCV R3, R4 // 7c841c14 + ADDCVCC R3, R4 // 7c841c15 + ADDZE R3, R4 // 7c830194 + ADDZECC R3, R4 // 7c830195 + ADDZEV R3, R4 // 7c830594 + ADDZEVCC R3, R4 // 7c830595 + SUBME R3, R4 // 7c8301d0 + SUBMECC R3, R4 // 7c8301d1 + SUBMEV R3, R4 // 7c8305d0 + SUBZE R3, R4 // 7c830190 + SUBZECC R3, R4 // 7c830191 + SUBZEV R3, R4 // 7c830590 + SUBZEVCC R3, R4 // 7c830591 + + AND R3, R4 // 7c841838 + AND R3, R4, R5 // 7c851838 + ANDN R3, R4, R5 // 7c851878 + ANDCC R3, R4, R5 // 7c851839 + ANDNCC R3, R4, R5 // 7c851879 + OR R3, R4 // 7c841b78 + OR R3, R4, R5 // 7c851b78 + ORN R3, R4, R5 // 7c851b38 + ORCC R3, R4, R5 // 7c851b79 + ORNCC R3, R4, R5 // 7c851b39 + XOR R3, R4 // 7c841a78 + XOR R3, R4, R5 // 7c851a78 + XORCC R3, R4, R5 // 7c851a79 + NAND R3, R4, R5 // 7c851bb8 + NANDCC R3, R4, R5 // 7c851bb9 + EQV R3, R4, R5 // 7c851a38 + EQVCC R3, R4, R5 // 7c851a39 + NOR R3, R4, R5 // 7c8518f8 + NORCC R3, R4, R5 // 7c8518f9 + + SUB R3, R4 // 7c832050 + SUB R3, R4, R5 // 7ca32050 + SUBC R3, R4 // 7c832010 + SUBC R3, R4, R5 // 7ca32010 + SUBCC R3, R4, R5 // 7ca32051 + SUBVCC R3, R4, R5 // 7ca32451 + SUBCCC R3, R4, R5 // 7ca32011 + SUBCV R3, R4, R5 // 7ca32410 + SUBCVCC R3, R4, R5 // 7ca32411 + SUBMEVCC R3, R4 // 7c8305d1 + SUBV R3, R4, R5 // 7ca32450 + SUBE R3, R4, R5 // 7ca32110 + SUBECC R3, R4, R5 // 7ca32111 + SUBEV R3, R4, R5 // 7ca32510 + SUBEVCC R3, R4, R5 // 7ca32511 + SUBC R3, $65536, R4 // 3fe00001600000007c83f810 + SUBC R3, $65537, R4 // 3fe0000163ff00017c83f810 + + MULLW R3, R4 // 7c8419d6 + MULLW R3, R4, R5 // 7ca419d6 + MULLW $10, R3 // 1c63000a + MULLW $10000000, R3 // 641f009863ff96807c7f19d6 + + MULLWCC R3, R4, R5 // 7ca419d7 + MULHW R3, R4, R5 // 7ca41896 + + MULHWU R3, R4, R5 // 7ca41816 + MULLD R3, R4 // 7c8419d2 + MULLD R4, R4, R5 // 7ca421d2 + MULLD $20, R4 // 1c840014 + MULLD $200000000, R4 // 641f0beb63ffc2007c9f21d2 + + MULLDCC R3, R4, R5 // 7ca419d3 + MULHD R3, R4, R5 // 7ca41892 + MULHDCC R3, R4, R5 // 7ca41893 + MULHDU R3, R4, R5 // 7ca41812 + MULHDUCC R3, R4, R5 // 7ca41813 + + MULLWV R3, R4 // 7c841dd6 + MULLWV R3, R4, R5 // 7ca41dd6 + MULLWVCC R3, R4, R5 // 7ca41dd7 + MULHWUCC R3, R4, R5 // 7ca41817 + MULLDV R3, R4, R5 // 7ca41dd2 + MULLDVCC R3, R4, R5 // 7ca41dd3 + + DIVD R3,R4 // 7c841bd2 + DIVD R3, R4, R5 // 7ca41bd2 + DIVW R3, R4 // 7c841bd6 + DIVW R3, R4, R5 // 7ca41bd6 + DIVDCC R3,R4, R5 // 7ca41bd3 + DIVWCC R3,R4, R5 // 7ca41bd7 + DIVDU R3, R4, R5 // 7ca41b92 + DIVWU R3, R4, R5 // 7ca41b96 + DIVDV R3, R4, R5 // 7ca41fd2 + DIVWV R3, R4, R5 // 7ca41fd6 + DIVDUCC R3, R4, R5 // 7ca41b93 + DIVWUCC R3, R4, R5 // 7ca41b97 + DIVDVCC R3, R4, R5 // 7ca41fd3 + DIVWVCC R3, R4, R5 // 7ca41fd7 + DIVDUV R3, R4, R5 // 7ca41f92 + DIVDUVCC R3, R4, R5 // 7ca41f93 + DIVWUVCC R3, R4, R5 // 7ca41f97 + DIVWUV R3, R4, R5 // 7ca41f96 + DIVDE R3, R4, R5 // 7ca41b52 + DIVDECC R3, R4, R5 // 7ca41b53 + DIVDEU R3, R4, R5 // 7ca41b12 + DIVDEUCC R3, R4, R5 // 7ca41b13 + + REM R3, R4, R5 // 7fe41bd67fff19d67cbf2050 + REMU R3, R4, R5 // 7fe41b967fff19d67bff00287cbf2050 + REMD R3, R4, R5 // 7fe41bd27fff19d27cbf2050 + REMDU R3, R4, R5 // 7fe41b927fff19d27cbf2050 + + MADDHD R3,R4,R5,R6 // 10c32170 + MADDHDU R3,R4,R5,R6 // 10c32171 + + MODUD R3, R4, R5 // 7ca41a12 + MODUW R3, R4, R5 // 7ca41a16 + MODSD R3, R4, R5 // 7ca41e12 + MODSW R3, R4, R5 // 7ca41e16 + + SLW $8, R3, R4 // 5464402e + SLW R3, R4, R5 // 7c851830 + SLWCC R3, R4 // 7c841831 + SLD $16, R3, R4 // 786483e4 + SLD R3, R4, R5 // 7c851836 + SLDCC R3, R4 // 7c841837 + + SRW $8, R3, R4 // 5464c23e + SRW R3, R4, R5 // 7c851c30 + SRWCC R3, R4 // 7c841c31 + SRAW $8, R3, R4 // 7c644670 + SRAW R3, R4, R5 // 7c851e30 + SRAWCC R3, R4 // 7c841e31 + SRD $16, R3, R4 // 78648402 + SRD R3, R4, R5 // 7c851c36 + SRDCC R3, R4 // 7c841c37 + SRAD $16, R3, R4 // 7c648674 + SRAD R3, R4, R5 // 7c851e34 + SRDCC R3, R4 // 7c841c37 + ROTLW $16, R3, R4 // 5464803e + ROTLW R3, R4, R5 // 5c85183e + ROTL $16, R3, R4 // 78648000 + EXTSWSLI $3, R4, R5 // 7c851ef4 + EXTSWSLICC $16, R3, R4 // 7c6486f5 + EXTSB R3, R4 // 7c640774 + EXTSBCC R3, R4 // 7c640775 + EXTSH R3, R4 // 7c640734 + EXTSHCC R3, R4 // 7c640735 + EXTSW R3, R4 // 7c6407b4 + EXTSWCC R3, R4 // 7c6407b5 + RLWMI $7, R3, $4026531855, R6 // 50663f06 + RLWMI $7, R3, $1, R6 // 50663ffe + RLWMI $7, R3, $2147483648, R6 // 50663800 + RLWMI $7, R3, $65535, R6 // 50663c3e + RLWMI $7, R3, $16, $31, R6 // 50663c3e + RLWMICC $7, R3, $65535, R6 // 50663c3f + RLWMICC $7, R3, $16, $31, R6 // 50663c3f + RLWNM $3, R4, $7, R6 // 54861f7e + RLWNM $0, R4, $7, R6 // 5486077e + RLWNM R0, R4, $7, R6 // 5c86077e + RLWNM $3, R4, $29, $31, R6 // 54861f7e + RLWNM $0, R4, $29, $31, R6 // 5486077e + RLWNM R0, R4, $29, $31, R6 // 5c86077e + RLWNM R3, R4, $7, R6 // 5c861f7e + RLWNM R3, R4, $29, $31, R6 // 5c861f7e + RLWNMCC $3, R4, $7, R6 // 54861f7f + RLWNMCC $3, R4, $29, $31, R6 // 54861f7f + RLWNMCC R3, R4, $7, R6 // 5c861f7f + RLWNMCC R3, R4, $29, $31, R6 // 5c861f7f + RLDMI $0, R4, $7, R6 // 7886076c + RLDMICC $0, R4, $7, R6 // 7886076d + RLDIMI $0, R4, $7, R6 // 788601cc + RLDIMICC $0, R4, $7, R6 // 788601cd + RLDC $0, R4, $15, R6 // 78860728 + RLDC R3, $32, $12, R4 // 7864030a + RLDC R3, $8, $32, R4 // 78644028 + RLDCCC R3, $32, $12, R4 // 7864030b + RLDCCC R3, $8, $32, R4 // 78644029 + RLDCCC $0, R4, $15, R6 // 78860729 + RLDCL $0, R4, $7, R6 // 78860770 + RLDCLCC $0, R4, $15, R6 // 78860721 + RLDCR $0, R4, $-16, R6 // 788606f2 + RLDCRCC $0, R4, $-16, R6 // 788606f3 + RLDICL $0, R4, $15, R6 // 788603c0 + RLDICLCC $0, R4, $15, R6 // 788603c1 + RLDICR $0, R4, $15, R6 // 788603c4 + RLDICRCC $0, R4, $15, R6 // 788603c5 + RLDIC $0, R4, $15, R6 // 788603c8 + RLDICCC $0, R4, $15, R6 // 788603c9 + CLRLSLWI $16, R5, $8, R4 // 54a4422e + CLRLSLDI $24, R4, $2, R3 // 78831588 + RLDCR $1, R1, $-16, R1 // 78210ee4 + RLDCRCC $1, R1, $-16, R1 // 78210ee5 + CNTLZW R3,R4 // 7c640034 + CNTLZWCC R3,R4 // 7c640035 + CNTLZD R3, R4 // 7c640074 + CNTLZDCC R3, R4 // 7c640075 + CNTTZW R3,R4 // 7c640434 + CNTTZWCC R3,R4 // 7c640435 + CNTTZD R3,R4 // 7c640474 + CNTTZDCC R3,R4 // 7c640475 + NEG R3, R4 // 7c8300d0 + NEGCC R3, R4 // 7c8300d1 + NEGV R3, R4 // 7c8304d0 + NEGVCC R3, R4 // 7c8304d1 + + BEQ 0(PC) // 41820000 + BEQ CR1,0(PC) // 41860000 + BGE 0(PC) // 40800000 + BGE CR2,0(PC) // 40880000 + BGT 4(PC) // 41810010 + BGT CR3,4(PC) // 418d0010 + BLE 0(PC) // 40810000 + BLE CR4,0(PC) // 40910000 + BLT 0(PC) // 41800000 + BLT CR5,0(PC) // 41940000 + BNE 0(PC) // 40820000 + BLT CR6,0(PC) // 41980000 + BVC 0(PC) // 40830000 + BVS 0(PC) // 41830000 + JMP 8(PC) // 48000010 + + NOP + NOP R2 + NOP F2 + NOP $4 + + CRAND CR0GT, CR0EQ, CR0SO // 4c620a02 + CRANDN CR0GT, CR0EQ, CR0SO // 4c620902 + CREQV CR0GT, CR0EQ, CR0SO // 4c620a42 + CRNAND CR0GT, CR0EQ, CR0SO // 4c6209c2 + CRNOR CR0GT, CR0EQ, CR0SO // 4c620842 + CROR CR0GT, CR0EQ, CR0SO // 4c620b82 + CRORN CR0GT, CR0EQ, CR0SO // 4c620b42 + CRXOR CR0GT, CR0EQ, CR0SO // 4c620982 + + ISEL $0, R3, R4, R5 // 7ca3201e + ISEL $1, R3, R4, R5 // 7ca3205e + ISEL $2, R3, R4, R5 // 7ca3209e + ISEL $3, R3, R4, R5 // 7ca320de + ISEL $4, R3, R4, R5 // 7ca3211e + ISEL $31, R3, R4, R5 // 7ca327de + ISEL CR0LT, R3, R4, R5 // 7ca3201e + ISEL CR0GT, R3, R4, R5 // 7ca3205e + ISEL CR0EQ, R3, R4, R5 // 7ca3209e + ISEL CR0SO, R3, R4, R5 // 7ca320de + ISEL CR1LT, R3, R4, R5 // 7ca3211e + ISEL CR7SO, R3, R4, R5 // 7ca327de + POPCNTB R3, R4 // 7c6400f4 + POPCNTW R3, R4 // 7c6402f4 + POPCNTD R3, R4 // 7c6403f4 + + PASTECC R3, R4 // 7c23270d + COPY R3, R4 // 7c23260c + + // load-and-reserve + LBAR (R4)(R3*1),$1,R5 // 7ca32069 + LBAR (R4)(R0),$1,R5 // 7ca02069 + LBAR (R4),$0,R5 // 7ca02068 + LBAR (R3),R5 // 7ca01868 + LHAR (R4)(R3*1),$1,R5 // 7ca320e9 + LHAR (R4)(R0),$1,R5 // 7ca020e9 + LHAR (R4),$0,R5 // 7ca020e8 + LHAR (R3),R5 // 7ca018e8 + LWAR (R4)(R3*1),$1,R5 // 7ca32029 + LWAR (R4)(R0),$1,R5 // 7ca02029 + LWAR (R4),$0,R5 // 7ca02028 + LWAR (R3),R5 // 7ca01828 + LDAR (R4)(R3*1),$1,R5 // 7ca320a9 + LDAR (R4)(R0),$1,R5 // 7ca020a9 + LDAR (R4),$0,R5 // 7ca020a8 + LDAR (R3),R5 // 7ca018a8 + + LSW (R3)(R4), R5 // 7ca41c2a + LSW (R3)(R0), R5 // 7ca01c2a + LSW (R3), R5 // 7ca01c2a + + STBCCC R3, (R4)(R5) // 7c65256d + STBCCC R3, (R4)(R0) // 7c60256d + STBCCC R3, (R4) // 7c60256d + STWCCC R3, (R4)(R5) // 7c65212d + STWCCC R3, (R4)(R0) // 7c60212d + STWCCC R3, (R4) // 7c60212d + STDCCC R3, (R4)(R5) // 7c6521ad + STDCCC R3, (R4)(R0) // 7c6021ad + STDCCC R3, (R4) // 7c6021ad + STHCCC R3, (R4)(R5) // 7c6525ad + STHCCC R3, (R4)(R0) // 7c6025ad + STHCCC R3, (R4) // 7c6025ad + STSW R3, (R4)(R5) // 7c65252a + STSW R3, (R4)(R0) // 7c60252a + STSW R3, (R4) // 7c60252a + + SYNC // 7c0004ac + ISYNC // 4c00012c + LWSYNC // 7c2004ac + EIEIO // 7c0006ac + PTESYNC // 7c4004ac + TLBIE R3 // 7c001a64 + TLBIEL R3 // 7c001a24 + TLBSYNC // 7c00046c + HRFID // 4c000224 + SLBIA // 7c0003e4 + SLBIE R3 // 7c001b64 + SLBMFEE R3, R4 // 7c801f26 + SLBMFEV R3, R4 // 7c801ea6 + SLBMTE R3, R4 // 7c801b24 + + TW $31, R0, R0 // 7fe00008 + TD $31, R0, R0 // 7fe00088 + DARN $1, R5 // 7ca105e6 + + DCBF (R3)(R4) // 7c0418ac + DCBF (R3)(R0) // 7c0018ac + DCBF (R3) // 7c0018ac + + DCBST (R3)(R4) // 7c04186c + DCBST (R3)(R0) // 7c00186c + DCBST (R3) // 7c00186c + DCBZ (R3)(R4) // 7c041fec + DCBZ (R3)(R0) // 7c001fec + DCBZ (R3) // 7c001fec + DCBT (R3)(R4) // 7c041a2c + DCBT (R3)(R0) // 7c001a2c + DCBT (R3) // 7c001a2c + ICBI (R3)(R4) // 7c041fac + ICBI (R3)(R0) // 7c001fac + ICBI (R3) // 7c001fac + + // float constants + FMOVD $(0.0), F1 // f0210cd0 + FMOVD $(-0.0), F1 // f0210cd0fc200850 + + FMOVD 8(R3), F1 // c8230008 + FMOVD (R3)(R4), F1 // 7c241cae + FMOVD (R3)(R0), F1 // 7c201cae + FMOVD (R3), F1 // c8230000 + FMOVDU 8(R3), F1 // cc230008 + FMOVDU (R3)(R4), F1 // 7c241cee + FMOVDU (R3)(R0), F1 // 7c201cee + FMOVDU (R3), F1 // cc230000 + FMOVS 4(R3), F1 // c0230004 + FMOVS (R3)(R4), F1 // 7c241c2e + FMOVS (R3)(R0), F1 // 7c201c2e + FMOVS (R3), F1 // c0230000 + FMOVSU 4(R3), F1 // c4230004 + FMOVSU (R3)(R4), F1 // 7c241c6e + FMOVSU (R3)(R0), F1 // 7c201c6e + FMOVSU (R3), F1 // c4230000 + FMOVSX (R3)(R4), F1 // 7c241eae + FMOVSX (R3)(R0), F1 // 7c201eae + FMOVSX (R3), F1 // 7c201eae + FMOVSZ (R3)(R4), F1 // 7c241eee + FMOVSZ (R3)(R0), F1 // 7c201eee + FMOVSZ (R3), F1 // 7c201eee + + FMOVD F1, 8(R3) // d8230008 + FMOVD F1, (R3)(R4) // 7c241dae + FMOVD F1, (R3)(R0) // 7c201dae + FMOVD F1, (R3) // d8230000 + FMOVDU F1, 8(R3) // dc230008 + FMOVDU F1, (R3)(R4) // 7c241dee + FMOVDU F1, (R3)(R0) // 7c201dee + FMOVDU F1, (R3) // dc230000 + FMOVS F1, 4(R3) // d0230004 + FMOVS F1, (R3)(R4) // 7c241d2e + FMOVS F1, (R3)(R0) // 7c201d2e + FMOVS F1, (R3) // d0230000 + FMOVSU F1, 4(R3) // d4230004 + FMOVSU F1, (R3)(R4) // 7c241d6e + FMOVSU F1, (R3)(R0) // 7c201d6e + FMOVSU F1, (R3) // d4230000 + FMOVSX F1, (R3)(R4) // 7c241fae + FMOVSX F1, (R3)(R0) // 7c201fae + FMOVSX F1, (R3) // 7c201fae + FADD F1, F2 // fc42082a + FADD F1, F2, F3 // fc62082a + FADDCC F1, F2, F3 // fc62082b + FMOVDCC F1, F2 // fc400891 + FADDS F1, F2 // ec42082a + FADDS F1, F2, F3 // ec62082a + FADDSCC F1, F2, F3 // ec62082b + FSUB F1, F2 // fc420828 + FSUB F1, F2, F3 // fc620828 + FSUBCC F1, F2, F3 // fc620829 + FSUBS F1, F2 // ec420828 + FSUBS F1, F2, F3 // ec620828 + FSUBCC F1, F2, F3 // fc620829 + FSUBSCC F1, F2, F3 // ec620829 + FMUL F1, F2 // fc420072 + FMUL F1, F2, F3 // fc620072 + FMULCC F1, F2, F3 // fc620073 + FMULS F1, F2 // ec420072 + FMULS F1, F2, F3 // ec620072 + FMULSCC F1, F2, F3 // ec620073 + FDIV F1, F2 // fc420824 + FDIV F1, F2, F3 // fc620824 + FDIVCC F1, F2, F3 // fc620825 + FDIVS F1, F2 // ec420824 + FDIVS F1, F2, F3 // ec620824 + FDIVSCC F1, F2, F3 // ec620825 + FTDIV F1, F2, $2 // fd011100 + FTSQRT F1, $2 // fd000940 + FMADD F1, F2, F3, F4 // fc8110fa + FMADDCC F1, F2, F3, F4 // fc8110fb + FMADDS F1, F2, F3, F4 // ec8110fa + FMADDSCC F1, F2, F3, F4 // ec8110fb + FMSUB F1, F2, F3, F4 // fc8110f8 + FMSUBCC F1, F2, F3, F4 // fc8110f9 + FMSUBS F1, F2, F3, F4 // ec8110f8 + FMSUBSCC F1, F2, F3, F4 // ec8110f9 + FNMADD F1, F2, F3, F4 // fc8110fe + FNMADDCC F1, F2, F3, F4 // fc8110ff + FNMADDS F1, F2, F3, F4 // ec8110fe + FNMADDSCC F1, F2, F3, F4 // ec8110ff + FNMSUB F1, F2, F3, F4 // fc8110fc + FNMSUBCC F1, F2, F3, F4 // fc8110fd + FNMSUBS F1, F2, F3, F4 // ec8110fc + FNMSUBSCC F1, F2, F3, F4 // ec8110fd + FSEL F1, F2, F3, F4 // fc8110ee + FSELCC F1, F2, F3, F4 // fc8110ef + FABS F1, F2 // fc400a10 + FNABS F1, F2 // fc400910 + FABSCC F1, F2 // fc400a11 + FNABSCC F1, F2 // fc400911 + FNEG F1, F2 // fc400850 + FNEGCC F1, F2 // fc400851 + FABSCC F1, F2 // fc400a11 + FRSP F1, F2 // fc400818 + FRSPCC F1, F2 // fc400819 + FCTIW F1, F2 // fc40081c + FCTIWCC F1, F2 // fc40081d + FCTIWZ F1, F2 // fc40081e + FCTIWZCC F1, F2 // fc40081f + FCTID F1, F2 // fc400e5c + FCTIDCC F1, F2 // fc400e5d + FCTIDZ F1, F2 // fc400e5e + FCTIDZCC F1, F2 // fc400e5f + FCFID F1, F2 // fc400e9c + FCFIDCC F1, F2 // fc400e9d + FCFIDU F1, F2 // fc400f9c + FCFIDUCC F1, F2 // fc400f9d + FCFIDS F1, F2 // ec400e9c + FCFIDSCC F1, F2 // ec400e9d + FRES F1, F2 // ec400830 + FRESCC F1, F2 // ec400831 + FRIM F1, F2 // fc400bd0 + FRIMCC F1, F2 // fc400bd1 + FRIP F1, F2 // fc400b90 + FRIPCC F1, F2 // fc400b91 + FRIZ F1, F2 // fc400b50 + FRIZCC F1, F2 // fc400b51 + FRIN F1, F2 // fc400b10 + FRINCC F1, F2 // fc400b11 + FRSQRTE F1, F2 // fc400834 + FRSQRTECC F1, F2 // fc400835 + FSQRT F1, F2 // fc40082c + FSQRTCC F1, F2 // fc40082d + FSQRTS F1, F2 // ec40082c + FSQRTSCC F1, F2 // ec40082d + FCPSGN F1, F2 // fc420810 + FCPSGNCC F1, F2 // fc420811 + FCMPO F1, F2 // fc011040 + FCMPU F1, F2 // fc011000 + LVX (R3)(R4), V1 // 7c2418ce + LVX (R3)(R0), V1 // 7c2018ce + LVX (R3), V1 // 7c2018ce + LVXL (R3)(R4), V1 // 7c241ace + LVXL (R3)(R0), V1 // 7c201ace + LVXL (R3), V1 // 7c201ace + LVSL (R3)(R4), V1 // 7c24180c + LVSL (R3)(R0), V1 // 7c20180c + LVSL (R3), V1 // 7c20180c + LVSR (R3)(R4), V1 // 7c24184c + LVSR (R3)(R0), V1 // 7c20184c + LVSR (R3), V1 // 7c20184c + LVEBX (R3)(R4), V1 // 7c24180e + LVEBX (R3)(R0), V1 // 7c20180e + LVEBX (R3), V1 // 7c20180e + LVEHX (R3)(R4), V1 // 7c24184e + LVEHX (R3)(R0), V1 // 7c20184e + LVEHX (R3), V1 // 7c20184e + LVEWX (R3)(R4), V1 // 7c24188e + LVEWX (R3)(R0), V1 // 7c20188e + LVEWX (R3), V1 // 7c20188e + STVX V1, (R3)(R4) // 7c2419ce + STVX V1, (R3)(R0) // 7c2019ce + STVX V1, (R3) // 7c2019ce + STVXL V1, (R3)(R4) // 7c241bce + STVXL V1, (R3)(R0) // 7c201bce + STVXL V1, (R3) // 7c201bce + STVEBX V1, (R3)(R4) // 7c24190e + STVEBX V1, (R3)(R0) // 7c20190e + STVEBX V1, (R3) // 7c20190e + STVEHX V1, (R3)(R4) // 7c24194e + STVEHX V1, (R3)(R0) // 7c20194e + STVEHX V1, (R3) // 7c20194e + STVEWX V1, (R3)(R4) // 7c24198e + STVEWX V1, (R3)(R0) // 7c20198e + STVEWX V1, (R3) // 7c20198e + + VAND V1, V2, V3 // 10611404 + VANDC V1, V2, V3 // 10611444 + VNAND V1, V2, V3 // 10611584 + VOR V1, V2, V3 // 10611484 + VORC V1, V2, V3 // 10611544 + VXOR V1, V2, V3 // 106114c4 + VNOR V1, V2, V3 // 10611504 + VEQV V1, V2, V3 // 10611684 + VADDUBM V1, V2, V3 // 10611000 + VADDUHM V1, V2, V3 // 10611040 + VADDUWM V1, V2, V3 // 10611080 + VADDUDM V1, V2, V3 // 106110c0 + VADDUQM V1, V2, V3 // 10611100 + VADDCUQ V1, V2, V3 // 10611140 + VADDCUW V1, V2, V3 // 10611180 + VADDUBS V1, V2, V3 // 10611200 + VADDUHS V1, V2, V3 // 10611240 + VADDUWS V1, V2, V3 // 10611280 + VADDSBS V1, V2, V3 // 10611300 + VADDSHS V1, V2, V3 // 10611340 + VADDSWS V1, V2, V3 // 10611380 + VADDEUQM V1, V2, V3, V4 // 108110fc + VADDECUQ V1, V2, V3, V4 // 108110fd + VSUBUBM V1, V2, V3 // 10611400 + VSUBUHM V1, V2, V3 // 10611440 + VSUBUWM V1, V2, V3 // 10611480 + VSUBUDM V1, V2, V3 // 106114c0 + VSUBUQM V1, V2, V3 // 10611500 + VSUBCUQ V1, V2, V3 // 10611540 + VSUBCUW V1, V2, V3 // 10611580 + VSUBUBS V1, V2, V3 // 10611600 + VSUBUHS V1, V2, V3 // 10611640 + VSUBUWS V1, V2, V3 // 10611680 + VSUBSBS V1, V2, V3 // 10611700 + VSUBSHS V1, V2, V3 // 10611740 + VSUBSWS V1, V2, V3 // 10611780 + VSUBEUQM V1, V2, V3, V4 // 108110fe + VSUBECUQ V1, V2, V3, V4 // 108110ff + VMULESB V1, V2, V3 // 10611308 + VMULESW V1, V2, V3 // 10611388 + VMULOSB V1, V2, V3 // 10611108 + VMULEUB V1, V2, V3 // 10611208 + VMULOUB V1, V2, V3 // 10611008 + VMULESH V1, V2, V3 // 10611348 + VMULOSH V1, V2, V3 // 10611148 + VMULEUH V1, V2, V3 // 10611248 + VMULOUH V1, V2, V3 // 10611048 + VMULESH V1, V2, V3 // 10611348 + VMULOSW V1, V2, V3 // 10611188 + VMULEUW V1, V2, V3 // 10611288 + VMULOUW V1, V2, V3 // 10611088 + VMULUWM V1, V2, V3 // 10611089 + VPMSUMB V1, V2, V3 // 10611408 + VPMSUMH V1, V2, V3 // 10611448 + VPMSUMW V1, V2, V3 // 10611488 + VPMSUMD V1, V2, V3 // 106114c8 + VMSUMUDM V1, V2, V3, V4 // 108110e3 + VRLB V1, V2, V3 // 10611004 + VRLH V1, V2, V3 // 10611044 + VRLW V1, V2, V3 // 10611084 + VRLD V1, V2, V3 // 106110c4 + VSLB V1, V2, V3 // 10611104 + VSLH V1, V2, V3 // 10611144 + VSLW V1, V2, V3 // 10611184 + VSL V1, V2, V3 // 106111c4 + VSLO V1, V2, V3 // 1061140c + VSRB V1, V2, V3 // 10611204 + VSRH V1, V2, V3 // 10611244 + VSRW V1, V2, V3 // 10611284 + VSRD V1, V2, V3 // 106116c4 + VSR V1, V2, V3 // 106112c4 + VSRO V1, V2, V3 // 1061144c + VSLD V1, V2, V3 // 106115c4 + VSRAB V1, V2, V3 // 10611304 + VSRAH V1, V2, V3 // 10611344 + VSRAW V1, V2, V3 // 10611384 + VSRAD V1, V2, V3 // 106113c4 + VSLDOI $3, V1, V2, V3 // 106110ec + VCLZB V1, V2 // 10400f02 + VCLZH V1, V2 // 10400f42 + VCLZW V1, V2 // 10400f82 + VCLZD V1, V2 // 10400fc2 + VPOPCNTB V1, V2 // 10400f03 + VPOPCNTH V1, V2 // 10400f43 + VPOPCNTW V1, V2 // 10400f83 + VPOPCNTD V1, V2 // 10400fc3 + VCMPEQUB V1, V2, V3 // 10611006 + VCMPEQUBCC V1, V2, V3 // 10611406 + VCMPEQUH V1, V2, V3 // 10611046 + VCMPEQUHCC V1, V2, V3 // 10611446 + VCMPEQUW V1, V2, V3 // 10611086 + VCMPEQUWCC V1, V2, V3 // 10611486 + VCMPEQUD V1, V2, V3 // 106110c7 + VCMPEQUDCC V1, V2, V3 // 106114c7 + VCMPGTUB V1, V2, V3 // 10611206 + VCMPGTUBCC V1, V2, V3 // 10611606 + VCMPGTUH V1, V2, V3 // 10611246 + VCMPGTUHCC V1, V2, V3 // 10611646 + VCMPGTUW V1, V2, V3 // 10611286 + VCMPGTUWCC V1, V2, V3 // 10611686 + VCMPGTUD V1, V2, V3 // 106112c7 + VCMPGTUDCC V1, V2, V3 // 106116c7 + VCMPGTSB V1, V2, V3 // 10611306 + VCMPGTSBCC V1, V2, V3 // 10611706 + VCMPGTSH V1, V2, V3 // 10611346 + VCMPGTSHCC V1, V2, V3 // 10611746 + VCMPGTSW V1, V2, V3 // 10611386 + VCMPGTSWCC V1, V2, V3 // 10611786 + VCMPGTSD V1, V2, V3 // 106113c7 + VCMPGTSDCC V1, V2, V3 // 106117c7 + VCMPNEZB V1, V2, V3 // 10611107 + VCMPNEZBCC V1, V2, V3 // 10611507 + VCMPNEB V1, V2, V3 // 10611007 + VCMPNEBCC V1, V2, V3 // 10611407 + VCMPNEH V1, V2, V3 // 10611047 + VCMPNEHCC V1, V2, V3 // 10611447 + VCMPNEW V1, V2, V3 // 10611087 + VCMPNEWCC V1, V2, V3 // 10611487 + VPERM V1, V2, V3, V4 // 108110eb + VPERMR V1, V2, V3, V4 // 108110fb + VPERMXOR V1, V2, V3, V4 // 108110ed + VBPERMQ V1, V2, V3 // 1061154c + VBPERMD V1, V2, V3 // 106115cc + VSEL V1, V2, V3, V4 // 108110ea + VSPLTB $1, V1, V2 // 10410a0c + VSPLTH $1, V1, V2 // 10410a4c + VSPLTW $1, V1, V2 // 10410a8c + VSPLTISB $1, V1 // 1021030c + VSPLTISW $1, V1 // 1021038c + VSPLTISH $1, V1 // 1021034c + VCIPHER V1, V2, V3 // 10611508 + VCIPHERLAST V1, V2, V3 // 10611509 + VNCIPHER V1, V2, V3 // 10611548 + VNCIPHERLAST V1, V2, V3 // 10611549 + VSBOX V1, V2 // 104105c8 + VSHASIGMAW $1, V1, $15, V2 // 10418e82 + VSHASIGMAW $1, $15, V1, V2 // 10418e82 + VSHASIGMAD $2, V1, $15, V2 // 104196c2 + VSHASIGMAD $2, $15, V1, V2 // 104196c2 + + LXVD2X (R3)(R4), VS1 // 7c241e98 + LXVD2X (R3)(R0), VS1 // 7c201e98 + LXVD2X (R3), VS1 // 7c201e98 + LXVDSX (R3)(R4), VS1 // 7c241a98 + LXVDSX (R3)(R0), VS1 // 7c201a98 + LXVDSX (R3), VS1 // 7c201a98 + LXVH8X (R3)(R4), VS1 // 7c241e58 + LXVH8X (R3)(R0), VS1 // 7c201e58 + LXVH8X (R3), VS1 // 7c201e58 + LXVB16X (R3)(R4), VS1 // 7c241ed8 + LXVB16X (R3)(R0), VS1 // 7c201ed8 + LXVB16X (R3), VS1 // 7c201ed8 + LXVW4X (R3)(R4), VS1 // 7c241e18 + LXVW4X (R3)(R0), VS1 // 7c201e18 + LXVW4X (R3), VS1 // 7c201e18 + LXV 16(R3), VS1 // f4230011 + LXV (R3), VS1 // f4230001 + LXV 16(R3), VS33 // f4230019 + LXV (R3), VS33 // f4230009 + LXV 16(R3), V1 // f4230019 + LXV (R3), V1 // f4230009 + LXVL R3, R4, VS1 // 7c23221a + LXVLL R3, R4, VS1 // 7c23225a + LXVX R3, R4, VS1 // 7c232218 + LXSDX (R3)(R4), VS1 // 7c241c98 + LXSDX (R3)(R0), VS1 // 7c201c98 + LXSDX (R3), VS1 // 7c201c98 + STXVD2X VS1, (R3)(R4) // 7c241f98 + STXVD2X VS1, (R3)(R0) // 7c201f98 + STXVD2X VS1, (R3) // 7c201f98 + STXVW4X VS1, (R3)(R4) // 7c241f18 + STXVW4X VS1, (R3)(R0) // 7c201f18 + STXVW4X VS1, (R3) // 7c201f18 + STXV VS1,16(R3) // f4230015 + STXV VS1,(R3) // f4230005 + STXVL VS1, R3, R4 // 7c23231a + STXVLL VS1, R3, R4 // 7c23235a + STXVX VS1, R3, R4 // 7c232318 + STXVB16X VS1, (R4)(R5) // 7c2527d8 + STXVB16X VS1, (R4)(R0) // 7c2027d8 + STXVB16X VS1, (R4) // 7c2027d8 + STXVH8X VS1, (R4)(R5) // 7c252758 + STXVH8X VS1, (R4)(R0) // 7c202758 + STXVH8X VS1, (R4) // 7c202758 + STXSDX VS1, (R3)(R4) // 7c241d98 + STXSDX VS1, (R4)(R0) // 7c202598 + STXSDX VS1, (R4) // 7c202598 + LXSIWAX (R3)(R4), VS1 // 7c241898 + LXSIWAX (R3)(R0), VS1 // 7c201898 + LXSIWAX (R3), VS1 // 7c201898 + LXSIWZX (R3)(R4), VS1 // 7c241818 + LXSIWZX (R3)(R0), VS1 // 7c201818 + LXSIWZX (R3), VS1 // 7c201818 + STXSIWX VS1, (R3)(R4) // 7c241918 + STXSIWX VS1, (R3)(R0) // 7c201918 + STXSIWX VS1, (R3) // 7c201918 + MFVSRD VS1, R3 // 7c230066 + MTFPRD R3, F0 // 7c030166 + MFVRD V0, R3 // 7c030067 + MFVSRLD VS63,R4 // 7fe40267 + MFVSRLD V31,R4 // 7fe40267 + MFVSRWZ VS33,R4 // 7c2400e7 + MFVSRWZ V1,R4 // 7c2400e7 + MTVSRD R3, VS1 // 7c230166 + MTVSRDD R3, R4, VS1 // 7c232366 + MTVSRDD R3, R4, VS33 // 7c232367 + MTVSRDD R3, R4, V1 // 7c232367 + MTVRD R3, V13 // 7da30167 + MTVSRWA R4, VS31 // 7fe401a6 + MTVSRWS R4, VS32 // 7c040327 + MTVSRWZ R4, VS63 // 7fe401e7 + MTFSB0 $2 // fc40008c + MTFSB0CC $2 // fc40008d + MTFSB1 $2 // fc40004c + MTFSB1CC $2 // fc40004d + XXBRQ VS0, VS1 // f03f076c + XXBRD VS0, VS1 // f037076c + XXBRW VS1, VS2 // f04f0f6c + XXBRH VS2, VS3 // f067176c + XXLAND VS1, VS2, VS3 // f0611410 + XXLAND V1, V2, V3 // f0611417 + XXLAND VS33, VS34, VS35 // f0611417 + XXLANDC VS1, VS2, VS3 // f0611450 + XXLEQV VS0, VS1, VS2 // f0400dd0 + XXLNAND VS0, VS1, VS2 // f0400d90 + XXLNOR VS0, VS1, VS32 // f0000d11 + XXLOR VS1, VS2, VS3 // f0611490 + XXLORC VS1, VS2, VS3 // f0611550 + XXLORQ VS1, VS2, VS3 // f0611490 + XXLXOR VS1, VS2, VS3 // f06114d0 + XXSEL VS1, VS2, VS3, VS4 // f08110f0 + XXSEL VS33, VS34, VS35, VS36 // f08110ff + XXSEL V1, V2, V3, V4 // f08110ff + XXMRGHW VS1, VS2, VS3 // f0611090 + XXMRGLW VS1, VS2, VS3 // f0611190 + XXSPLTW VS1, $1, VS2 // f0410a90 + XXSPLTW VS33, $1, VS34 // f0410a93 + XXSPLTW V1, $1, V2 // f0410a93 + XXPERM VS1, VS2, VS3 // f06110d0 + XXSLDWI VS1, VS2, $1, VS3 // f0611110 + XXSLDWI V1, V2, $1, V3 // f0611117 + XXSLDWI V1, $1, V2, V3 // f0611117 + XXSLDWI VS33, VS34, $1, VS35 // f0611117 + XXSLDWI VS33, $1, VS34, VS35 // f0611117 + XXPERMDI VS33, VS34, $1, VS35 // f0611157 + XXPERMDI VS33, $1, VS34, VS35 // f0611157 + XSCVDPSP VS1, VS2 // f0400c24 + XVCVDPSP VS1, VS2 // f0400e24 + XSCVSXDDP VS1, VS2 // f0400de0 + XVCVDPSXDS VS1, VS2 // f0400f60 + XVCVSXDDP VS1, VS2 // f0400fe0 + XSCVDPSPN VS1,VS32 // f0000c2d + XSCVDPSP VS1,VS32 // f0000c25 + XSCVDPSXDS VS1,VS32 // f0000d61 + XSCVDPSXWS VS1,VS32 // f0000961 + XSCVDPUXDS VS1,VS32 // f0000d21 + XSCVDPUXWS VS1,VS32 // f0000921 + XSCVSPDPN VS1,VS32 // f0000d2d + XSCVSPDP VS1,VS32 // f0000d25 + XSCVSXDDP VS1,VS32 // f0000de1 + XSCVSXDSP VS1,VS32 // f0000ce1 + XSCVUXDDP VS1,VS32 // f0000da1 + XSCVUXDSP VS1,VS32 // f0000ca1 + XVCVDPSP VS1,VS32 // f0000e25 + XVCVDPSXDS VS1,VS32 // f0000f61 + XVCVDPSXWS VS1,VS32 // f0000b61 + XVCVDPUXDS VS1,VS32 // f0000f21 + XVCVDPUXWS VS1,VS32 // f0000b21 + XVCVSPDP VS1,VS32 // f0000f25 + XVCVSPSXDS VS1,VS32 // f0000e61 + XVCVSPSXWS VS1,VS32 // f0000a61 + XVCVSPUXDS VS1,VS32 // f0000e21 + XVCVSPUXWS VS1,VS32 // f0000a21 + XVCVSXDDP VS1,VS32 // f0000fe1 + XVCVSXDSP VS1,VS32 // f0000ee1 + XVCVSXWDP VS1,VS32 // f0000be1 + XVCVSXWSP VS1,VS32 // f0000ae1 + XVCVUXDDP VS1,VS32 // f0000fa1 + XVCVUXDSP VS1,VS32 // f0000ea1 + XVCVUXWDP VS1,VS32 // f0000ba1 + XVCVUXWSP VS1,VS32 // f0000aa1 + + MOVD R3, LR // 7c6803a6 + MOVD R3, CTR // 7c6903a6 + MOVD R3, XER // 7c6103a6 + MOVD LR, R3 // 7c6802a6 + MOVD CTR, R3 // 7c6902a6 + MOVD XER, R3 // 7c6102a6 + MOVFL CR3, CR1 // 4c8c0000 + + MOVW CR0, R1 // 7c380026 + MOVW CR7, R1 // 7c301026 + MOVW CR, R1 // 7c200026 + + MOVW R1, CR // 7c2ff120 + MOVFL R1, CR // 7c2ff120 + MOVW R1, CR2 // 7c320120 + MOVFL R1, CR2 // 7c320120 + MOVFL R1, $255 // 7c2ff120 + MOVFL R1, $1 // 7c301120 + MOVFL R1, $128 // 7c380120 + MOVFL R1, $3 // 7c203120 + MOVMW 4(R3), R4 // b8830004 + + + // Verify supported bdnz/bdz encodings. + BC 16,0,0(PC) // BC $16, CR0LT, 0(PC) // 42000000 + BDNZ 0(PC) // 42000000 + BDZ 0(PC) // 42400000 + BC 18,0,0(PC) // BC $18, CR0LT, 0(PC) // 42400000 + + // Verify the supported forms of bcclr[l] + BC $20,CR0LT,$1,LR // 4e800820 + BC $20,CR0LT,$0,LR // 4e800020 + BC $20,CR0LT,LR // 4e800020 + BC $20,CR0GT,LR // 4e810020 + BC 20,CR0LT,LR // BC $20,CR0LT,LR // 4e800020 + BC 20,undefined_symbol,LR // BC $20,CR0LT,LR // 4e800020 + BC 20,undefined_symbol+1,LR // BC $20,CR0GT,LR // 4e810020 + JMP LR // 4e800020 + BR LR // JMP LR // 4e800020 + BCL $20,CR0LT,$1,LR // 4e800821 + BCL $20,CR0LT,$0,LR // 4e800021 + BCL $20,CR0LT,LR // 4e800021 + BCL $20,CR0GT,LR // 4e810021 + BCL 20,CR0LT,LR // BCL $20,CR0LT,LR // 4e800021 + BCL 20,undefined_symbol,LR // BCL $20,CR0LT,LR // 4e800021 + BCL 20,undefined_symbol+1,LR // BCL $20,CR0GT,LR // 4e810021 + + // Verify the supported forms of bcctr[l] + BC $20,CR0LT,CTR // 4e800420 + BC $20,CR0GT,CTR // 4e810420 + BC 20,CR0LT,CTR // BC $20,CR0LT,CTR // 4e800420 + BC 20,undefined_symbol,CTR // BC $20,CR0LT,CTR // 4e800420 + BC 20,undefined_symbol+1,CTR // BC $20,CR0GT,CTR // 4e810420 + JMP CTR // 4e800420 + BR CTR // JMP CTR // 4e800420 + BCL $20,CR0LT,CTR // 4e800421 + BCL $20,CR0GT,CTR // 4e810421 + BCL 20,CR0LT,CTR // BCL $20,CR0LT,CTR // 4e800421 + BCL 20,undefined_symbol,CTR // BCL $20,CR0LT,CTR // 4e800421 + BCL 20,undefined_symbol+1,CTR // BCL $20,CR0GT,CTR // 4e810421 + + // Verify bc encoding (without pic enabled) + BC $16,CR0LT,0(PC) // 42000000 + BCL $16,CR0LT,0(PC) // 42000001 + BC $18,CR0LT,0(PC) // 42400000 + + MOVD SPR(3), 4(R1) // 7fe302a6fbe10004 + MOVD XER, 4(R1) // 7fe102a6fbe10004 + MOVD 4(R1), SPR(3) // ebe100047fe303a6 + MOVD 4(R1), XER // ebe100047fe103a6 + PNOP // 0700000000000000 + + SETB CR1,R3 // 7c640100 + VCLZLSBB V1, R2 // 10400e02 + VCTZLSBB V1, R2 // 10410e02 + + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/ppc64_p10.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/ppc64_p10.s new file mode 100644 index 0000000000000000000000000000000000000000..4419aa045a39c04063aacb5f0e2cf65ff20753ad --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/ppc64_p10.s @@ -0,0 +1,273 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This contains the valid opcode combinations available +// in cmd/internal/obj/ppc64/asm9.go which exist for +// POWER10/ISA 3.1. + +#include "../../../../../runtime/textflag.h" + +TEXT asmtest(SB), DUPOK|NOSPLIT, $0 + BRD R1, R2 // 7c220176 + BRH R1, R2 // 7c2201b6 + BRW R1, R2 // 7c220136 + CFUGED R1, R2, R3 // 7c2311b8 + CNTLZDM R2, R3, R1 // 7c411876 + CNTTZDM R2, R3, R1 // 7c411c76 + DCFFIXQQ V1, F2 // fc400fc4 + DCTFIXQQ F2, V3 // fc6117c4 + LXVKQ $0, VS33 // f03f02d1 + LXVP 12352(R5), VS6 // 18c53040 + LXVPX (R1)(R2), VS4 // 7c820a9a + LXVRBX (R1)(R2), VS4 // 7c82081a + LXVRDX (R1)(R2), VS4 // 7c8208da + LXVRHX (R1)(R2), VS4 // 7c82085a + LXVRWX (R1)(R2), VS4 // 7c82089a + MTVSRBM R1, V1 // 10300e42 + MTVSRBMI $5, V1 // 10220015 + MTVSRDM R1, V1 // 10330e42 + MTVSRHM R1, V1 // 10310e42 + MTVSRQM R1, V1 // 10340e42 + MTVSRWM R1, V1 // 10320e42 + PADDI R3, $1234567890, $1, R4 // 06104996388302d2 + PADDI R0, $1234567890, $0, R4 // 06004996388002d2 + PADDI R0, $1234567890, $1, R4 // 06104996388002d2 + PDEPD R1, R2, R3 // 7c231138 + PEXTD R1, R2, R3 // 7c231178 + PLBZ 1234(R1), $0, R3 // 06000000886104d260000000 + // Note, PLD crosses a 64B boundary, and a nop is inserted between PLBZ and PLD + PLD 1234(R1), $0, R3 // 04000000e46104d2 + PLFD 1234(R1), $0, F3 // 06000000c86104d2 + PLFS 1234567890(R4), $0, F3 // 06004996c06402d2 + PLFS 1234567890(R0), $1, F3 // 06104996c06002d2 + PLHA 1234(R1), $0, R3 // 06000000a86104d2 + PLHZ 1234(R1), $0, R3 // 06000000a06104d2 + PLQ 1234(R1), $0, R4 // 04000000e08104d2 + PLWA 1234(R1), $0, R3 // 04000000a46104d2 + PLWZ 1234567890(R4), $0, R3 // 06004996806402d2 + PLWZ 1234567890(R0), $1, R3 // 06104996806002d2 + PLXSD 1234(R1), $0, V1 // 04000000a82104d2 + PLXSSP 5(R1), $0, V2 // 04000000ac410005 + PLXSSP 5(R0), $1, V2 // 04100000ac400005 + PLXV 12346891(R6), $1, VS44 // 041000bccd86660b + PLXVP 12345678(R4), $1, VS4 // 041000bce884614e + PMXVBF16GER2 VS1, VS2, $1, $2, $3, A1 // 0790c012ec811198 + PMXVBF16GER2NN VS1, VS2, $1, $2, $3, A1 // 0790c012ec811790 + PMXVBF16GER2NP VS1, VS2, $1, $2, $3, A1 // 0790c012ec811390 + PMXVBF16GER2PN VS1, VS2, $1, $2, $3, A1 // 0790c012ec811590 + PMXVBF16GER2PP VS1, VS2, $1, $2, $3, A1 // 0790c012ec811190 + PMXVF16GER2 VS1, VS2, $1, $2, $3, A1 // 0790c012ec811098 + PMXVF16GER2NN VS1, VS2, $1, $2, $3, A1 // 0790c012ec811690 + PMXVF16GER2NP VS1, VS2, $1, $2, $3, A1 // 0790c012ec811290 + PMXVF16GER2PN VS1, VS2, $1, $2, $3, A1 // 0790c012ec811490 + PMXVF16GER2PP VS1, VS2, $1, $2, $3, A1 // 0790c012ec811090 + PMXVF32GER VS1, VS2, $1, $2, A1 // 07900012ec8110d8 + PMXVF32GERNN VS1, VS2, $1, $2, A1 // 07900012ec8116d0 + PMXVF32GERNP VS1, VS2, $1, $2, A1 // 07900012ec8112d0 + PMXVF32GERPN VS1, VS2, $1, $2, A1 // 07900012ec8114d0 + PMXVF32GERPP VS1, VS2, $1, $2, A1 // 07900012ec8110d0 + PMXVF64GER VS4, VS2, $1, $2, A1 // 07900018ec8411d8 + PMXVF64GERNN VS4, VS2, $1, $2, A1 // 07900018ec8417d0 + PMXVF64GERNP VS4, VS2, $1, $2, A1 // 07900018ec8413d0 + PMXVF64GERPN VS4, VS2, $1, $2, A1 // 07900018ec8415d0 + PMXVF64GERPP VS4, VS2, $1, $2, A1 // 07900018ec8411d0 + PMXVI16GER2 VS1, VS2, $1, $2, $3, A1 // 0790c012ec811258 + PMXVI16GER2PP VS1, VS2, $1, $2, $3, A1 // 0790c012ec811358 + PMXVI16GER2S VS1, VS2, $1, $2, $3, A1 // 0790c012ec811158 + PMXVI16GER2SPP VS1, VS2, $1, $2, $3, A1 // 0790c012ec811150 + PMXVI4GER8 VS1, VS2, $1, $2, $3, A1 // 07900312ec811118 + PMXVI4GER8PP VS1, VS2, $1, $2, $3, A1 // 07900312ec811110 + PMXVI8GER4 VS1, VS2, $1, $2, $3, A1 // 07903012ec811018 + PMXVI8GER4PP VS1, VS2, $1, $2, $3, A1 // 07903012ec811010 + PMXVI8GER4SPP VS1, VS2, $1, $2, $3, A1 // 07903012ec811318 + PNOP // 0700000000000000 + PSTB R1, $1, 12345678(R2) // 061000bc9822614e + PSTD R1, $1, 12345678(R2) // 041000bcf422614e + PSTFD F1, $1, 12345678(R2) // 061000bcd822614e + PSTFS F1, $1, 123456789(R7) // 0610075bd027cd15 + PSTH R1, $1, 12345678(R2) // 061000bcb022614e + PSTQ R2, $1, 12345678(R2) // 041000bcf042614e + PSTW R1, $1, 12345678(R2) // 061000bc9022614e + PSTW R24, $0, 45(R13) // 06000000930d002d + PSTXSD V1, $1, 12345678(R2) // 041000bcb822614e + PSTXSSP V1, $1, 1234567890(R0) // 04104996bc2002d2 + PSTXSSP V1, $1, 1234567890(R1) // 04104996bc2102d2 + PSTXSSP V1, $0, 1234567890(R3) // 04004996bc2302d2 + PSTXV VS6, $1, 1234567890(R5) // 04104996d8c502d2 + PSTXVP VS2, $1, 12345678(R2) // 041000bcf842614e + PSTXVP VS62, $0, 5555555(R3) // 04000054fbe3c563 + SETBC CR2EQ, R2 // 7c4a0300 + SETBCR CR2LT, R2 // 7c480340 + SETNBC CR2GT, R2 // 7c490380 + SETNBCR CR6SO, R2 // 7c5b03c0 + STXVP VS6, 12352(R5) // 18c53041 + STXVPX VS22, (R1)(R2) // 7ec20b9a + STXVRBX VS2, (R1)(R2) // 7c42091a + STXVRDX VS2, (R1)(R2) // 7c4209da + STXVRHX VS2, (R1)(R2) // 7c42095a + STXVRWX VS2, (R1)(R2) // 7c42099a + VCFUGED V1, V2, V3 // 1061154d + VCLRLB V1, R2, V3 // 1061118d + VCLRRB V1, R2, V3 // 106111cd + VCLZDM V1, V2, V3 // 10611784 + VCMPEQUQ V1, V2, V3 // 106111c7 + VCMPEQUQCC V1, V2, V3 // 106115c7 + VCMPGTSQ V1, V2, V3 // 10611387 + VCMPGTSQCC V1, V2, V3 // 10611787 + VCMPGTUQ V1, V2, V3 // 10611287 + VCMPGTUQCC V1, V2, V3 // 10611687 + VCMPSQ V1, V2, CR2 // 11011141 + VCMPUQ V1, V2, CR3 // 11811101 + VCNTMBB V1, $1, R3 // 10790e42 + VCNTMBD V1, $1, R3 // 107f0e42 + VCNTMBH V1, $1, R3 // 107b0e42 + VCNTMBW V1, $1, R3 // 107d0e42 + VCTZDM V1, V2, V3 // 106117c4 + VDIVESD V1, V2, V3 // 106113cb + VDIVESQ V1, V2, V3 // 1061130b + VDIVESW V1, V2, V3 // 1061138b + VDIVEUD V1, V2, V3 // 106112cb + VDIVEUQ V1, V2, V3 // 1061120b + VDIVEUW V1, V2, V3 // 1061128b + VDIVSD V1, V2, V3 // 106111cb + VDIVSQ V1, V2, V3 // 1061110b + VDIVSW V1, V2, V3 // 1061118b + VDIVUD V1, V2, V3 // 106110cb + VDIVUQ V1, V2, V3 // 1061100b + VDIVUW V1, V2, V3 // 1061108b + VEXPANDBM V1, V2 // 10400e42 + VEXPANDDM V1, V2 // 10430e42 + VEXPANDHM V1, V2 // 10410e42 + VEXPANDQM V1, V2 // 10440e42 + VEXPANDWM V1, V2 // 10420e42 + VEXTDDVLX V1, V2, R3, V4 // 108110de + VEXTDDVRX V1, V2, R3, V4 // 108110df + VEXTDUBVLX V1, V2, R3, V4 // 108110d8 + VEXTDUBVRX V1, V2, R3, V4 // 108110d9 + VEXTDUHVLX V1, V2, R3, V4 // 108110da + VEXTDUHVRX V1, V2, R3, V4 // 108110db + VEXTDUWVLX V1, V2, R3, V4 // 108110dc + VEXTDUWVRX V1, V2, R5, V3 // 1061115d + VEXTRACTBM V1, R2 // 10480e42 + VEXTRACTDM V1, R2 // 104b0e42 + VEXTRACTHM V1, R2 // 10490e42 + VEXTRACTQM V1, R2 // 104c0e42 + VEXTRACTWM V1, R6 // 10ca0e42 + VEXTSD2Q V1, V2 // 105b0e02 + VGNB V1, $1, R31 // 13e10ccc + VINSBLX R1, R2, V3 // 1061120f + VINSBRX R1, R2, V3 // 1061130f + VINSBVLX R1, V1, V2 // 1041080f + VINSBVRX R1, V1, V2 // 1041090f + VINSD R1, $2, V2 // 104209cf + VINSDLX R1, R2, V3 // 106112cf + VINSDRX R1, R2, V3 // 106113cf + VINSHLX R1, R2, V3 // 1061124f + VINSHRX R1, R2, V3 // 1061134f + VINSHVLX R1, V2, V3 // 1061104f + VINSHVRX R1, V2, V3 // 1061114f + VINSW R1, $4, V3 // 106408cf + VINSWLX R1, R2, V3 // 1061128f + VINSWRX R1, R2, V3 // 1061138f + VINSWVLX R1, V2, V3 // 1061108f + VINSWVRX R1, V2, V3 // 1061118f + VMODSD V1, V2, V3 // 106117cb + VMODSQ V1, V2, V3 // 1061170b + VMODSW V1, V2, V3 // 1061178b + VMODUD V1, V2, V3 // 106116cb + VMODUQ V1, V2, V3 // 1061160b + VMODUW V1, V2, V3 // 1061168b + VMSUMCUD V1, V2, V3, V4 // 108110d7 + VMULESD V1, V2, V3 // 106113c8 + VMULEUD V1, V2, V3 // 106112c8 + VMULHSD V1, V2, V3 // 106113c9 + VMULHSW V1, V2, V3 // 10611389 + VMULHUD V1, V2, V3 // 106112c9 + VMULHUW V1, V2, V3 // 10611289 + VMULLD V1, V2, V3 // 106111c9 + VMULOSD V1, V2, V3 // 106111c8 + VMULOUD V1, V2, V3 // 106110c8 + VPDEPD V1, V2, V3 // 106115cd + VPEXTD V1, V2, V3 // 1061158d + VRLQ V1, V2, V3 // 10611005 + VRLQMI V1, V2, V3 // 10611045 + VRLQNM V1, V2, V3 // 10611145 + VSLDBI V1, V2, $3, V3 // 106110d6 + VSLQ V1, V2, V3 // 10611105 + VSRAQ V1, V2, V3 // 10611305 + VSRDBI V1, V2, $3, V4 // 108112d6 + VSRQ V1, V2, V3 // 10611205 + VSTRIBL V1, V2 // 1040080d + VSTRIBLCC V1, V2 // 10400c0d + VSTRIBR V1, V2 // 1041080d + VSTRIBRCC V1, V2 // 10410c0d + VSTRIHL V1, V2 // 1042080d + VSTRIHLCC V1, V2 // 10420c0d + VSTRIHR V1, V2 // 1043080d + VSTRIHRCC V1, V2 // 10430c0d + XSCMPEQQP V1, V2, V3 // fc611088 + XSCMPGEQP V1, V2, V3 // fc611188 + XSCMPGTQP V1, V2, V3 // fc6111c8 + XSCVQPSQZ V1, V2 // fc480e88 + XSCVQPUQZ V1, V2 // fc400e88 + XSCVSQQP V1, V2 // fc4b0e88 + XSCVUQQP V2, V3 // fc631688 + XSMAXCQP V1, V2, V3 // fc611548 + XSMINCQP V1, V2, V4 // fc8115c8 + XVBF16GER2 VS1, VS2, A1 // ec811198 + XVBF16GER2NN VS1, VS2, A1 // ec811790 + XVBF16GER2NP VS1, VS2, A1 // ec811390 + XVBF16GER2PN VS1, VS2, A1 // ec811590 + XVBF16GER2PP VS1, VS2, A1 // ec811190 + XVCVBF16SPN VS2, VS3 // f070176c + XVCVSPBF16 VS1, VS4 // f0910f6c + XVF16GER2 VS1, VS2, A1 // ec811098 + XVF16GER2NN VS1, VS2, A1 // ec811690 + XVF16GER2NP VS1, VS2, A1 // ec811290 + XVF16GER2PN VS1, VS2, A1 // ec811490 + XVF16GER2PP VS1, VS2, A1 // ec811090 + XVF32GER VS1, VS2, A1 // ec8110d8 + XVF32GERNN VS1, VS2, A1 // ec8116d0 + XVF32GERNP VS1, VS2, A1 // ec8112d0 + XVF32GERPN VS1, VS2, A1 // ec8114d0 + XVF32GERPP VS1, VS2, A1 // ec8110d0 + XVF64GER VS2, VS1, A1 // ec8209d8 + XVF64GERNN VS2, VS1, A1 // ec820fd0 + XVF64GERNP VS2, VS1, A1 // ec820bd0 + XVF64GERPN VS2, VS1, A1 // ec820dd0 + XVF64GERPP VS2, VS1, A1 // ec8209d0 + XVI16GER2 VS1, VS2, A1 // ec811258 + XVI16GER2PP VS1, VS2, A1 // ec811358 + XVI16GER2S VS1, VS2, A1 // ec811158 + XVI16GER2SPP VS1, VS2, A1 // ec811150 + XVI4GER8 VS1, VS2, A1 // ec811118 + XVI4GER8PP VS1, VS2, A1 // ec811110 + XVI8GER4 VS1, VS2, A1 // ec811018 + XVI8GER4PP VS1, VS2, A1 // ec811010 + XVI8GER4SPP VS4, VS6, A1 // ec843318 + XVTLSBB VS1, CR2 // f1020f6c + XXBLENDVB VS1, VS3, VS7, VS11 // 05000000856119c0 + XXBLENDVD VS1, VS3, VS7, VS11 // 05000000856119f0 + XXBLENDVH VS1, VS3, VS7, VS11 // 05000000856119d0 + XXBLENDVW VS1, VS3, VS7, VS11 // 05000000856119e0 + XXEVAL VS1, VS2, VS3, $2, VS4 // 05000002888110d0 + XXGENPCVBM V2, $2, VS3 // f0621728 + XXGENPCVDM V2, $2, VS3 // f062176a + XXGENPCVHM V2, $2, VS3 // f062172a + XXGENPCVWM V2, $2, VS3 // f0621768 + XXMFACC A1 // 7c800162 + XXMTACC A1 // 7c810162 + XXPERMX VS1, VS34, VS2, $2, VS3 // 0500000288611082 + XXSETACCZ A1 // 7c830162 + XXSPLTI32DX $1, $1234, VS3 // 05000000806204d2 + XXSPLTIDP $12345678, VS4 // 050000bc8084614e + XXSPLTIW $123456, VS3 // 050000018066e240 + + // ISA 3.1B + HASHST R2, -8(R1) // 7fe115a5 + HASHSTP R2, -8(R1) // 7fe11525 + HASHCHK -8(R1), R2 // 7fe115e5 + HASHCHKP -8(R1), R2 // 7fe11565 + + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/riscv64.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/riscv64.s new file mode 100644 index 0000000000000000000000000000000000000000..072302b2257d72029c274d3d6458bf4dc866e073 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/riscv64.s @@ -0,0 +1,424 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "../../../../../runtime/textflag.h" + +TEXT asmtest(SB),DUPOK|NOSPLIT,$0 +start: + // Unprivileged ISA + + // 2.4: Integer Computational Instructions + + ADDI $2047, X5 // 9382f27f + ADDI $-2048, X5 // 93820280 + ADDI $2048, X5 // 9382024093820240 + ADDI $-2049, X5 // 938202c09382f2bf + ADDI $4094, X5 // 9382f27f9382f27f + ADDI $-4096, X5 // 9382028093820280 + ADDI $4095, X5 // b71f00009b8fffffb382f201 + ADDI $-4097, X5 // b7ffffff9b8fffffb382f201 + ADDI $2047, X5, X6 // 1383f27f + ADDI $-2048, X5, X6 // 13830280 + ADDI $2048, X5, X6 // 1383024013030340 + ADDI $-2049, X5, X6 // 138302c01303f3bf + ADDI $4094, X5, X6 // 1383f27f1303f37f + ADDI $-4096, X5, X6 // 1383028013030380 + ADDI $4095, X5, X6 // b71f00009b8fffff3383f201 + ADDI $-4097, X5, X6 // b7ffffff9b8fffff3383f201 + + SLTI $55, X5, X7 // 93a37203 + SLTIU $55, X5, X7 // 93b37203 + + ANDI $1, X5, X6 // 13f31200 + ANDI $1, X5 // 93f21200 + ANDI $2048, X5 // b71f00009b8f0f80b3f2f201 + ORI $1, X5, X6 // 13e31200 + ORI $1, X5 // 93e21200 + ORI $2048, X5 // b71f00009b8f0f80b3e2f201 + XORI $1, X5, X6 // 13c31200 + XORI $1, X5 // 93c21200 + XORI $2048, X5 // b71f00009b8f0f80b3c2f201 + + SLLI $1, X5, X6 // 13931200 + SLLI $1, X5 // 93921200 + SRLI $1, X5, X6 // 13d31200 + SRLI $1, X5 // 93d21200 + SRAI $1, X5, X6 // 13d31240 + SRAI $1, X5 // 93d21240 + + ADD X6, X5, X7 // b3836200 + ADD X5, X6 // 33035300 + ADD $2047, X5, X6 // 1383f27f + ADD $-2048, X5, X6 // 13830280 + ADD $2047, X5 // 9382f27f + ADD $-2048, X5 // 93820280 + + SLT X6, X5, X7 // b3a36200 + SLT $55, X5, X7 // 93a37203 + SLTU X6, X5, X7 // b3b36200 + SLTU $55, X5, X7 // 93b37203 + + AND X6, X5, X7 // b3f36200 + AND X5, X6 // 33735300 + AND $1, X5, X6 // 13f31200 + AND $1, X5 // 93f21200 + OR X6, X5, X7 // b3e36200 + OR X5, X6 // 33635300 + OR $1, X5, X6 // 13e31200 + OR $1, X5 // 93e21200 + XOR X6, X5, X7 // b3c36200 + XOR X5, X6 // 33435300 + XOR $1, X5, X6 // 13c31200 + XOR $1, X5 // 93c21200 + + AUIPC $0, X10 // 17050000 + AUIPC $0, X11 // 97050000 + AUIPC $1, X10 // 17150000 + AUIPC $-524288, X15 // 97070080 + AUIPC $524287, X10 // 17f5ff7f + + LUI $0, X15 // b7070000 + LUI $167, X15 // b7770a00 + LUI $-524288, X15 // b7070080 + LUI $524287, X15 // b7f7ff7f + + SLL X6, X5, X7 // b3936200 + SLL X5, X6 // 33135300 + SLL $1, X5, X6 // 13931200 + SLL $1, X5 // 93921200 + SRL X6, X5, X7 // b3d36200 + SRL X5, X6 // 33535300 + SRL $1, X5, X6 // 13d31200 + SRL $1, X5 // 93d21200 + + SUB X6, X5, X7 // b3836240 + SUB X5, X6 // 33035340 + SUB $-2047, X5, X6 // 1383f27f + SUB $2048, X5, X6 // 13830280 + SUB $-2047, X5 // 9382f27f + SUB $2048, X5 // 93820280 + + SRA X6, X5, X7 // b3d36240 + SRA X5, X6 // 33535340 + SRA $1, X5, X6 // 13d31240 + SRA $1, X5 // 93d21240 + + // 2.5: Control Transfer Instructions + JAL X5, 2(PC) // ef028000 + JALR X6, (X5) // 67830200 + JALR X6, 4(X5) // 67834200 + BEQ X5, X6, 2(PC) // 63846200 + BNE X5, X6, 2(PC) // 63946200 + BLT X5, X6, 2(PC) // 63c46200 + BLTU X5, X6, 2(PC) // 63e46200 + BGE X5, X6, 2(PC) // 63d46200 + BGEU X5, X6, 2(PC) // 63f46200 + + // 2.6: Load and Store Instructions + LW (X5), X6 // 03a30200 + LW 4(X5), X6 // 03a34200 + LWU (X5), X6 // 03e30200 + LWU 4(X5), X6 // 03e34200 + LH (X5), X6 // 03930200 + LH 4(X5), X6 // 03934200 + LHU (X5), X6 // 03d30200 + LHU 4(X5), X6 // 03d34200 + LB (X5), X6 // 03830200 + LB 4(X5), X6 // 03834200 + LBU (X5), X6 // 03c30200 + LBU 4(X5), X6 // 03c34200 + + SW X5, (X6) // 23205300 + SW X5, 4(X6) // 23225300 + SH X5, (X6) // 23105300 + SH X5, 4(X6) // 23125300 + SB X5, (X6) // 23005300 + SB X5, 4(X6) // 23025300 + + // 2.7: Memory Ordering Instructions + FENCE // 0f00f00f + + // 5.2: Integer Computational Instructions (RV64I) + ADDIW $1, X5, X6 // 1b831200 + SLLIW $1, X5, X6 // 1b931200 + SRLIW $1, X5, X6 // 1bd31200 + SRAIW $1, X5, X6 // 1bd31240 + ADDW X5, X6, X7 // bb035300 + SLLW X5, X6, X7 // bb135300 + SRLW X5, X6, X7 // bb535300 + SUBW X5, X6, X7 // bb035340 + SRAW X5, X6, X7 // bb535340 + ADDIW $1, X6 // 1b031300 + SLLIW $1, X6 // 1b131300 + SRLIW $1, X6 // 1b531300 + SRAIW $1, X6 // 1b531340 + ADDW X5, X7 // bb835300 + SLLW X5, X7 // bb935300 + SRLW X5, X7 // bbd35300 + SUBW X5, X7 // bb835340 + SRAW X5, X7 // bbd35340 + ADDW $1, X6 // 1b031300 + SLLW $1, X6 // 1b131300 + SRLW $1, X6 // 1b531300 + SUBW $1, X6 // 1b03f3ff + SRAW $1, X6 // 1b531340 + + // 5.3: Load and Store Instructions (RV64I) + LD (X5), X6 // 03b30200 + LD 4(X5), X6 // 03b34200 + SD X5, (X6) // 23305300 + SD X5, 4(X6) // 23325300 + + // 7.1: Multiplication Operations + MUL X5, X6, X7 // b3035302 + MULH X5, X6, X7 // b3135302 + MULHU X5, X6, X7 // b3335302 + MULHSU X5, X6, X7 // b3235302 + MULW X5, X6, X7 // bb035302 + DIV X5, X6, X7 // b3435302 + DIVU X5, X6, X7 // b3535302 + REM X5, X6, X7 // b3635302 + REMU X5, X6, X7 // b3735302 + DIVW X5, X6, X7 // bb435302 + DIVUW X5, X6, X7 // bb535302 + REMW X5, X6, X7 // bb635302 + REMUW X5, X6, X7 // bb735302 + + // 8.2: Load-Reserved/Store-Conditional + LRW (X5), X6 // 2fa30214 + LRD (X5), X6 // 2fb30214 + SCW X5, (X6), X7 // af23531a + SCD X5, (X6), X7 // af33531a + + // 8.3: Atomic Memory Operations + AMOSWAPW X5, (X6), X7 // af23530e + AMOSWAPD X5, (X6), X7 // af33530e + AMOADDW X5, (X6), X7 // af235306 + AMOADDD X5, (X6), X7 // af335306 + AMOANDW X5, (X6), X7 // af235366 + AMOANDD X5, (X6), X7 // af335366 + AMOORW X5, (X6), X7 // af235346 + AMOORD X5, (X6), X7 // af335346 + AMOXORW X5, (X6), X7 // af235326 + AMOXORD X5, (X6), X7 // af335326 + AMOMAXW X5, (X6), X7 // af2353a6 + AMOMAXD X5, (X6), X7 // af3353a6 + AMOMAXUW X5, (X6), X7 // af2353e6 + AMOMAXUD X5, (X6), X7 // af3353e6 + AMOMINW X5, (X6), X7 // af235386 + AMOMIND X5, (X6), X7 // af335386 + AMOMINUW X5, (X6), X7 // af2353c6 + AMOMINUD X5, (X6), X7 // af3353c6 + + // 10.1: Base Counters and Timers + RDCYCLE X5 // f32200c0 + RDTIME X5 // f32210c0 + RDINSTRET X5 // f32220c0 + + // 11.5: Single-Precision Load and Store Instructions + FLW (X5), F0 // 07a00200 + FLW 4(X5), F0 // 07a04200 + FSW F0, (X5) // 27a00200 + FSW F0, 4(X5) // 27a20200 + + // 11.6: Single-Precision Floating-Point Computational Instructions + FADDS F1, F0, F2 // 53011000 + FSUBS F1, F0, F2 // 53011008 + FMULS F1, F0, F2 // 53011010 + FDIVS F1, F0, F2 // 53011018 + FMINS F1, F0, F2 // 53011028 + FMAXS F1, F0, F2 // 53111028 + FSQRTS F0, F1 // d3000058 + + // 11.7: Single-Precision Floating-Point Conversion and Move Instructions + FCVTWS F0, X5 // d31200c0 + FCVTLS F0, X5 // d31220c0 + FCVTSW X5, F0 // 538002d0 + FCVTSL X5, F0 // 538022d0 + FCVTWUS F0, X5 // d31210c0 + FCVTLUS F0, X5 // d31230c0 + FCVTSWU X5, F0 // 538012d0 + FCVTSLU X5, F0 // 538032d0 + FSGNJS F1, F0, F2 // 53011020 + FSGNJNS F1, F0, F2 // 53111020 + FSGNJXS F1, F0, F2 // 53211020 + FMVXS F0, X5 // d30200e0 + FMVSX X5, F0 // 538002f0 + FMVXW F0, X5 // d30200e0 + FMVWX X5, F0 // 538002f0 + FMADDS F1, F2, F3, F4 // 43822018 + FMSUBS F1, F2, F3, F4 // 47822018 + FNMSUBS F1, F2, F3, F4 // 4b822018 + FNMADDS F1, F2, F3, F4 // 4f822018 + + // 11.8: Single-Precision Floating-Point Compare Instructions + FEQS F0, F1, X7 // d3a300a0 + FLTS F0, F1, X7 // d39300a0 + FLES F0, F1, X7 // d38300a0 + + // 11.9: Single-Precision Floating-Point Classify Instruction + FCLASSS F0, X5 // d31200e0 + + // 12.3: Double-Precision Load and Store Instructions + FLD (X5), F0 // 07b00200 + FLD 4(X5), F0 // 07b04200 + FSD F0, (X5) // 27b00200 + FSD F0, 4(X5) // 27b20200 + + // 12.4: Double-Precision Floating-Point Computational Instructions + FADDD F1, F0, F2 // 53011002 + FSUBD F1, F0, F2 // 5301100a + FMULD F1, F0, F2 // 53011012 + FDIVD F1, F0, F2 // 5301101a + FMIND F1, F0, F2 // 5301102a + FMAXD F1, F0, F2 // 5311102a + FSQRTD F0, F1 // d300005a + + // 12.5: Double-Precision Floating-Point Conversion and Move Instructions + FCVTWD F0, X5 // d31200c2 + FCVTLD F0, X5 // d31220c2 + FCVTDW X5, F0 // 538002d2 + FCVTDL X5, F0 // 538022d2 + FCVTWUD F0, X5 // d31210c2 + FCVTLUD F0, X5 // d31230c2 + FCVTDWU X5, F0 // 538012d2 + FCVTDLU X5, F0 // 538032d2 + FCVTSD F0, F1 // d3001040 + FCVTDS F0, F1 // d3000042 + FSGNJD F1, F0, F2 // 53011022 + FSGNJND F1, F0, F2 // 53111022 + FSGNJXD F1, F0, F2 // 53211022 + FMVXD F0, X5 // d30200e2 + FMVDX X5, F0 // 538002f2 + FMADDD F1, F2, F3, F4 // 4382201a + FMSUBD F1, F2, F3, F4 // 4782201a + FNMSUBD F1, F2, F3, F4 // 4b82201a + FNMADDD F1, F2, F3, F4 // 4f82201a + + // 12.6: Double-Precision Floating-Point Classify Instruction + FCLASSD F0, X5 // d31200e2 + + // Privileged ISA + + // 3.2.1: Environment Call and Breakpoint + ECALL // 73000000 + SCALL // 73000000 + EBREAK // 73001000 + SBREAK // 73001000 + + // Arbitrary bytes (entered in little-endian mode) + WORD $0x12345678 // WORD $305419896 // 78563412 + WORD $0x9abcdef0 // WORD $2596069104 // f0debc9a + + // MOV pseudo-instructions + MOV X5, X6 // 13830200 + MOV $2047, X5 // 9302f07f + MOV $-2048, X5 // 93020080 + MOV $2048, X5 // b71200009b820280 + MOV $-2049, X5 // b7f2ffff9b82f27f + MOV $4096, X5 // b7120000 + MOV $2147479552, X5 // b7f2ff7f + MOV $2147483647, X5 // b70200809b82f2ff + MOV $-2147483647, X5 // b70200809b821200 + + // Converted to load of symbol (AUIPC + LD) + MOV $4294967295, X5 // 9702000083b20200 + // Converted to MOV $1, X5 + SLLI $32, X5 + MOV $4294967296, X5 // 9302100093920202 + + MOV (X5), X6 // 03b30200 + MOV 4(X5), X6 // 03b34200 + MOVB (X5), X6 // 03830200 + MOVB 4(X5), X6 // 03834200 + MOVH (X5), X6 // 03930200 + MOVH 4(X5), X6 // 03934200 + MOVW (X5), X6 // 03a30200 + MOVW 4(X5), X6 // 03a34200 + MOV X5, (X6) // 23305300 + MOV X5, 4(X6) // 23325300 + MOVB X5, (X6) // 23005300 + MOVB X5, 4(X6) // 23025300 + MOVH X5, (X6) // 23105300 + MOVH X5, 4(X6) // 23125300 + MOVW X5, (X6) // 23205300 + MOVW X5, 4(X6) // 23225300 + + MOVB X5, X6 // 1393820313538343 + MOVH X5, X6 // 1393020313530343 + MOVW X5, X6 // 1b830200 + MOVBU X5, X6 // 13f3f20f + MOVHU X5, X6 // 1393020313530303 + MOVWU X5, X6 // 1393020213530302 + + MOVF 4(X5), F0 // 07a04200 + MOVF F0, 4(X5) // 27a20200 + MOVF F0, F1 // d3000020 + + MOVD 4(X5), F0 // 07b04200 + MOVD F0, 4(X5) // 27b20200 + MOVD F0, F1 // d3000022 + + // TLS load with local-exec (LUI + ADDIW + ADD of TP + load) + MOV tls(SB), X5 // b70f00009b8f0f00b38f4f0083b20f00 + MOVB tls(SB), X5 // b70f00009b8f0f00b38f4f0083820f00 + + // TLS store with local-exec (LUI + ADDIW + ADD of TP + store) + MOV X5, tls(SB) // b70f00009b8f0f00b38f4f0023b05f00 + MOVB X5, tls(SB) // b70f00009b8f0f00b38f4f0023805f00 + + // NOT pseudo-instruction + NOT X5 // 93c2f2ff + NOT X5, X6 // 13c3f2ff + + // NEG/NEGW pseudo-instructions + NEG X5 // b3025040 + NEG X5, X6 // 33035040 + NEGW X5 // bb025040 + NEGW X5, X6 // 3b035040 + + // This jumps to the second instruction in the function (the + // first instruction is an invisible stack pointer adjustment). + JMP start // JMP 2 + + JMP 2(PC) // 6f008000 + JMP (X5) // 67800200 + JMP 4(X5) // 67804200 + + // CALL and JMP to symbol are encoded as JAL (using LR or ZERO + // respectively), with a R_RISCV_JAL relocation. The linker resolves + // the real address and updates the immediate, using a trampoline in + // the case where the address is not directly reachable. + CALL asmtest(SB) // ef000000 + JMP asmtest(SB) // 6f000000 + + // Branch pseudo-instructions + BEQZ X5, 2(PC) // 63840200 + BGEZ X5, 2(PC) // 63d40200 + BGT X5, X6, 2(PC) // 63445300 + BGTU X5, X6, 2(PC) // 63645300 + BGTZ X5, 2(PC) // 63445000 + BLE X5, X6, 2(PC) // 63545300 + BLEU X5, X6, 2(PC) // 63745300 + BLEZ X5, 2(PC) // 63545000 + BLTZ X5, 2(PC) // 63c40200 + BNEZ X5, 2(PC) // 63940200 + + // Set pseudo-instructions + SEQZ X15, X15 // 93b71700 + SNEZ X15, X15 // b337f000 + + // F extension + FABSS F0, F1 // d3200020 + FNEGS F0, F1 // d3100020 + FNES F0, F1, X7 // d3a300a093c31300 + + // D extension + FABSD F0, F1 // d3200022 + FNEGD F0, F1 // d3100022 + FNED F0, F1, X5 // d3a200a293c21200 + FLTD F0, F1, X5 // d39200a2 + FLED F0, F1, X5 // d38200a2 + FEQD F0, F1, X5 // d3a200a2 + +GLOBL tls(SB), TLSBSS, $8 diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/riscv64error.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/riscv64error.s new file mode 100644 index 0000000000000000000000000000000000000000..2dc9db3fb1f6be9542a7a52c583fe9e18a593501 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/riscv64error.s @@ -0,0 +1,45 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +TEXT errors(SB),$0 + MOV $errors(SB), (X5) // ERROR "address load must target register" + MOV $8(SP), (X5) // ERROR "address load must target register" + MOVB $8(SP), X5 // ERROR "unsupported address load" + MOVH $8(SP), X5 // ERROR "unsupported address load" + MOVW $8(SP), X5 // ERROR "unsupported address load" + MOVF $8(SP), X5 // ERROR "unsupported address load" + MOV $1234, 0(SP) // ERROR "constant load must target register" + MOV $1234, 8(SP) // ERROR "constant load must target register" + MOV $0, 0(SP) // ERROR "constant load must target register" + MOV $0, 8(SP) // ERROR "constant load must target register" + MOV $1234, 0(SP) // ERROR "constant load must target register" + MOV $1234, 8(SP) // ERROR "constant load must target register" + MOVB $1, X5 // ERROR "unsupported constant load" + MOVH $1, X5 // ERROR "unsupported constant load" + MOVW $1, X5 // ERROR "unsupported constant load" + MOVF $1, X5 // ERROR "unsupported constant load" + MOVBU X5, (X6) // ERROR "unsupported unsigned store" + MOVHU X5, (X6) // ERROR "unsupported unsigned store" + MOVWU X5, (X6) // ERROR "unsupported unsigned store" + MOVF F0, F1, F2 // ERROR "illegal MOV instruction" + MOVD F0, F1, F2 // ERROR "illegal MOV instruction" + MOV X10, X11, X12 // ERROR "illegal MOV instruction" + MOVW X10, X11, X12 // ERROR "illegal MOV instruction" + SLLI $64, X5, X6 // ERROR "shift amount out of range 0 to 63" + SRLI $64, X5, X6 // ERROR "shift amount out of range 0 to 63" + SRAI $64, X5, X6 // ERROR "shift amount out of range 0 to 63" + SLLI $-1, X5, X6 // ERROR "shift amount out of range 0 to 63" + SRLI $-1, X5, X6 // ERROR "shift amount out of range 0 to 63" + SRAI $-1, X5, X6 // ERROR "shift amount out of range 0 to 63" + SLLIW $32, X5, X6 // ERROR "shift amount out of range 0 to 31" + SRLIW $32, X5, X6 // ERROR "shift amount out of range 0 to 31" + SRAIW $32, X5, X6 // ERROR "shift amount out of range 0 to 31" + SLLIW $-1, X5, X6 // ERROR "shift amount out of range 0 to 31" + SRLIW $-1, X5, X6 // ERROR "shift amount out of range 0 to 31" + SRAIW $-1, X5, X6 // ERROR "shift amount out of range 0 to 31" + SD X5, 4294967296(X6) // ERROR "constant 4294967296 too large" + SRLI $1, X5, F1 // ERROR "expected integer register in rd position but got non-integer register F1" + SRLI $1, F1, X5 // ERROR "expected integer register in rs1 position but got non-integer register F1" + FNES F1, (X5) // ERROR "needs an integer register output" + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/s390x.s b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/s390x.s new file mode 100644 index 0000000000000000000000000000000000000000..977190678f9585234b98bcdd5d0f0f122404faf7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/asm/testdata/s390x.s @@ -0,0 +1,533 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "../../../../../runtime/textflag.h" + +TEXT main·foo(SB),DUPOK|NOSPLIT,$16-0 // TEXT main.foo(SB), DUPOK|NOSPLIT, $16-0 + MOVD R1, R2 // b9040021 + MOVW R3, R4 // b9140043 + MOVH R5, R6 // b9070065 + MOVB R7, R8 // b9060087 + MOVWZ R1, R2 // b9160021 + MOVHZ R2, R3 // b9850032 + MOVBZ R4, R5 // b9840054 + MOVDBR R1, R2 // b90f0021 + MOVWBR R3, R4 // b91f0043 + + MOVDEQ R0, R1 // b9e28010 + MOVDGE R2, R3 // b9e2a032 + MOVDGT R4, R5 // b9e22054 + MOVDLE R6, R7 // b9e2c076 + MOVDLT R8, R9 // b9e24098 + MOVDNE R10, R11 // b9e270ba + + LOCR $3, R2, R1 // b9f23012 + LOCGR $7, R5, R6 // b9e27065 + + MOVD (R15), R1 // e310f0000004 + MOVW (R15), R2 // e320f0000014 + MOVH (R15), R3 // e330f0000015 + MOVB (R15), R4 // e340f0000077 + MOVWZ (R15), R5 // e350f0000016 + MOVHZ (R15), R6 // e360f0000091 + MOVBZ (R15), R7 // e370f0000090 + MOVDBR (R15), R8 // e380f000000f + MOVWBR (R15), R9 // e390f000001e + + MOVD R1, n-8(SP) // e310f0100024 + MOVW R2, n-8(SP) // 5020f010 + MOVH R3, n-8(SP) // 4030f010 + MOVB R4, n-8(SP) // 4240f010 + MOVWZ R5, n-8(SP) // 5050f010 + MOVHZ R6, n-8(SP) // 4060f010 + MOVBZ R7, n-8(SP) // 4270f010 + MOVDBR R8, n-8(SP) // e380f010002f + MOVWBR R9, n-8(SP) // e390f010003e + + MOVD $-8589934592, R1 // c01efffffffe + MOVW $-131072, R2 // c021fffe0000 + MOVH $-512, R3 // a739fe00 + MOVB $-1, R4 // a749ffff + + MOVD $32767, n-8(SP) // e548f0107fff + MOVD $-1, -524288(R1) // e3a010008071e548a000ffff + MOVW $32767, n-8(SP) // e54cf0107fff + MOVW $-32768, 4096(R2) // e3a020000171e54ca0008000 + MOVH $512, n-8(SP) // e544f0100200 + MOVH $-512, 524288(R3) // c0a10008000041aa3000e544a000fe00 + MOVB $-1, n-8(SP) // 92fff010 + MOVB $255, 4096(R4) // ebff40000152 + MOVB $-128, -524288(R5) // eb8050008052 + MOVB $1, -524289(R6) // c0a1fff7ffff41aa60009201a000 + + // RX (12-bit displacement) and RXY (20-bit displacement) instruction encoding (e.g: ST vs STY) + MOVW R1, 4095(R2)(R3) // 50132fff + MOVW R1, 4096(R2)(R3) // e31320000150 + MOVWZ R1, 4095(R2)(R3) // 50132fff + MOVWZ R1, 4096(R2)(R3) // e31320000150 + MOVH R1, 4095(R2)(R3) // 40132fff + MOVHZ R1, 4095(R2)(R3) // 40132fff + MOVH R1, 4096(R2)(R3) // e31320000170 + MOVHZ R1, 4096(R2)(R3) // e31320000170 + MOVB R1, 4095(R2)(R3) // 42132fff + MOVBZ R1, 4095(R2)(R3) // 42132fff + MOVB R1, 4096(R2)(R3) // e31320000172 + MOVBZ R1, 4096(R2)(R3) // e31320000172 + + ADD R1, R2 // b9e81022 + ADD R1, R2, R3 // b9e81032 + ADD $8192, R1 // a71b2000 + ADD $8192, R1, R2 // ec21200000d9 + ADD $32768, R1 // c21800008000 + ADD $32768, R1, R2 // b9040021c22800008000 + ADDC R1, R2 // b9ea1022 + ADDC $1, R1, R2 // ec21000100db + ADDC $-1, R1, R2 // ec21ffff00db + ADDC R1, R2, R3 // b9ea1032 + ADDW R1, R2 // 1a21 + ADDW R1, R2, R3 // b9f81032 + ADDW $8192, R1 // a71a2000 + ADDW $8192, R1, R2 // ec21200000d8 + ADDE R1, R2 // b9880021 + SUB R3, R4 // b9090043 + SUB R3, R4, R5 // b9e93054 + SUB $8192, R3 // a73be000 + SUB $8192, R3, R4 // ec43e00000d9 + SUBC R1, R2 // b90b0021 + SUBC $1, R1, R2 // ec21ffff00db + SUBC R2, R3, R4 // b9eb2043 + SUBW R3, R4 // 1b43 + SUBW R3, R4, R5 // b9f93054 + SUBW $8192, R1 // c21500002000 + SUBW $8192, R1, R2 // 1821c22500002000 + MULLW R6, R7 // b91c0076 + MULLW R6, R7, R8 // b9040087b91c0086 + MULLW $8192, R6 // a76c2000 + MULLW $8192, R6, R7 // 1876a77c2000 + MULLW $-32769, R8 // c281ffff7fff + MULLW $-32769, R8, R9 // 1898c291ffff7fff + MULLD $-2147483648, R1 // c21080000000 + MULLD $-2147483648, R1, R2 // b9040021c22080000000 + MULHD R9, R8 // b90400b8b98600a9ebb9003f000ab98000b8b90900abebb8003f000ab98000b9b9e9b08a + MULHD R7, R2, R1 // b90400b2b98600a7ebb7003f000ab98000b2b90900abebb2003f000ab98000b7b9e9b01a + MULHDU R3, R4 // b90400b4b98600a3b904004a + MULHDU R5, R6, R7 // b90400b6b98600a5b904007a + MLGR R1, R2 // b9860021 + DIVD R1, R2 // b90400b2b90d00a1b904002b + DIVD R1, R2, R3 // b90400b2b90d00a1b904003b + DIVW R4, R5 // b90400b5b91d00a4b904005b + DIVW R4, R5, R6 // b90400b5b91d00a4b904006b + DIVDU R7, R8 // a7a90000b90400b8b98700a7b904008b + DIVDU R7, R8, R9 // a7a90000b90400b8b98700a7b904009b + DIVWU R1, R2 // a7a90000b90400b2b99700a1b904002b + DIVWU R1, R2, R3 // a7a90000b90400b2b99700a1b904003b + MODD R1, R2 // b90400b2b90d00a1b904002a + MODD R1, R2, R3 // b90400b2b90d00a1b904003a + MODW R4, R5 // b90400b5b91d00a4b904005a + MODW R4, R5, R6 // b90400b5b91d00a4b904006a + MODDU R7, R8 // a7a90000b90400b8b98700a7b904008a + MODDU R7, R8, R9 // a7a90000b90400b8b98700a7b904009a + MODWU R1, R2 // a7a90000b90400b2b99700a1b904002a + MODWU R1, R2, R3 // a7a90000b90400b2b99700a1b904003a + NEG R1 // b9030011 + NEG R1, R2 // b9030021 + NEGW R1 // b9130011 + NEGW R1, R2 // b9130021 + FLOGR R2, R2 // b9830022 + POPCNT R3, R4 // b9e10043 + + AND R1, R2 // b9800021 + AND R1, R2, R3 // b9e42031 + AND $-2, R1 // a517fffe + AND $-65536, R1 // c01bffff0000 + AND $1, R1 // c0a100000001b980001a + ANDW R1, R2 // 1421 + ANDW R1, R2, R3 // b9f42031 + ANDW $1, R1 // c01b00000001 + ANDW $131071, R1 // a5160001 + ANDW $65536, R1 // c01b00010000 + ANDW $-2, R1 // a517fffe + OR R1, R2 // b9810021 + OR R1, R2, R3 // b9e62031 + OR $1, R1 // a51b0001 + OR $131071, R1 // c01d0001ffff + OR $65536, R1 // c01d00010000 + OR $-2, R1 // c0a1fffffffeb981001a + ORW R1, R2 // 1621 + ORW R1, R2, R3 // b9f62031 + ORW $1, R1 // a51b0001 + ORW $131071, R1 // c01d0001ffff + ORW $65536, R1 // a51a0001 + ORW $-2, R1 // c01dfffffffe + XOR R1, R2 // b9820021 + XOR R1, R2, R3 // b9e72031 + XOR $1, R1 // c01700000001 + XOR $131071, R1 // c0170001ffff + XOR $65536, R1 // c01700010000 + XOR $-2, R1 // c0a1fffffffeb982001a + XORW R1, R2 // 1721 + XORW R1, R2, R3 // b9f72031 + XORW $1, R1 // c01700000001 + XORW $131071, R1 // c0170001ffff + XORW $65536, R1 // c01700010000 + XORW $-2, R1 // c017fffffffe + + ADD -524288(R1), R2 // e32010008008 + ADD 524287(R3), R4 // e3403fff7f08 + ADD -524289(R1), R2 // c0a1fff7ffffe32a10000008 + ADD 524288(R3), R4 // c0a100080000e34a30000008 + ADD -524289(R1)(R2*1), R3 // c0a1fff7ffff41aa2000e33a10000008 + ADD 524288(R3)(R4*1), R5 // c0a10008000041aa4000e35a30000008 + ADDC (R1), R2 // e3201000000a + ADDW (R5), R6 // 5a605000 + ADDW 4095(R7), R8 // 5a807fff + ADDW -1(R1), R2 // e3201fffff5a + ADDW 4096(R3), R4 // e3403000015a + ADDE 4096(R3), R4 // e34030000188 + ADDE 4096(R3)(R2*1), R4 // e34230000188 + ADDE 524288(R3)(R4*1), R5 // c0a10008000041aa4000e35a30000088 + MULLD (R1)(R2*1), R3 // e3321000000c + MULLW (R3)(R4*1), R5 // 71543000 + MULLW 4096(R3), R4 // e34030000151 + SUB (R1), R2 // e32010000009 + SUBC (R1), R2 // e3201000000b + SUBE (R1), R2 // e32010000089 + SUBW (R1), R2 // 5b201000 + SUBW -1(R1), R2 // e3201fffff5b + AND (R1), R2 // e32010000080 + ANDW (R1), R2 // 54201000 + ANDW -1(R1), R2 // e3201fffff54 + OR (R1), R2 // e32010000081 + ORW (R1), R2 // 56201000 + ORW -1(R1), R2 // e3201fffff56 + XOR (R1), R2 // e32010000082 + XORW (R1), R2 // 57201000 + XORW -1(R1), R2 // e3201fffff57 + + // shift and rotate instructions + SRD $4, R4, R7 // eb740004000c + SRD R1, R4, R7 // eb741000000c + SRW $4, R4, R7 // eb74000400de + SRW R1, R4, R7 // eb74100000de + SLW $4, R3, R6 // eb63000400df + SLW R2, R3, R6 // eb63200000df + SLD $4, R3, R6 // eb630004000d + SLD R2, R3, R6 // eb632000000d + SRAD $4, R5, R8 // eb850004000a + SRAD R3, R5, R8 // eb853000000a + SRAW $4, R5, R8 // eb85000400dc + SRAW R3, R5, R8 // eb85300000dc + RLL R1, R2, R3 // eb321000001d + RLL $4, R2, R3 // eb320004001d + RLLG R1, R2, R3 // eb321000001c + RLLG $4, R2, R3 // eb320004001c + + RNSBG $0, $31, $32, R1, R2 // ec21001f2054 + RXSBG $17, $8, $16, R3, R4 // ec4311081057 + ROSBG $9, $24, $11, R5, R6 // ec6509180b56 + RNSBGT $0, $31, $32, R7, R8 // ec87801f2054 + RXSBGT $17, $8, $16, R9, R10 // eca991081057 + ROSBGT $9, $24, $11, R11, R0 // ec0b89180b56 + RISBG $0, $31, $32, R1, R2 // ec21001f2055 + RISBGN $17, $8, $16, R3, R4 // ec4311081059 + RISBGZ $9, $24, $11, R5, R6 // ec6509980b55 + RISBGNZ $0, $31, $32, R7, R8 // ec87009f2059 + RISBHG $17, $8, $16, R9, R10 // eca91108105d + RISBLG $9, $24, $11, R11, R0 // ec0b09180b51 + RISBHGZ $17, $8, $16, R9, R10 // eca91188105d + RISBLGZ $9, $24, $11, R11, R0 // ec0b09980b51 + + LAA R1, R2, 524287(R3) // eb213fff7ff8 + LAAG R4, R5, -524288(R6) // eb54600080e8 + LAAL R7, R8, 8192(R9) // eb87900002fa + LAALG R10, R11, -8192(R12) // ebbac000feea + LAN R1, R2, (R3) // eb21300000f4 + LANG R4, R5, (R6) // eb54600000e4 + LAX R7, R8, (R9) // eb87900000f7 + LAXG R10, R11, (R12) // ebbac00000e7 + LAO R1, R2, (R3) // eb21300000f6 + LAOG R4, R5, (R6) // eb54600000e6 + + // load and store multiple + LMG n-8(SP), R3, R4 // eb34f0100004 + LMG -5(R5), R3, R4 // eb345ffbff04 + LMY n-8(SP), R3, R4 // 9834f010 + LMY 4096(R1), R3, R4 // eb3410000198 + STMG R1, R2, n-8(SP) // eb12f0100024 + STMG R1, R2, -5(R3) // eb123ffbff24 + STMY R1, R2, n-8(SP) // 9012f010 + STMY R1, R2, 4096(R3) // eb1230000190 + + XC $8, (R15), n-8(SP) // d707f010f000 + NC $8, (R15), n-8(SP) // d407f010f000 + OC $8, (R15), n-8(SP) // d607f010f000 + MVC $8, (R15), n-8(SP) // d207f010f000 + MVCIN $8, (R15), n-8(SP) // e807f010f000 + CLC $8, (R15), n-8(SP) // d507f000f010 + XC $256, -8(R15), -8(R15) // b90400afc2a8fffffff8d7ffa000a000 + MVC $256, 8192(R1), 8192(R2) // b90400a2c2a800002000b90400b1c2b800002000d2ffa000b000 + + CMP R1, R2 // b9200012 + CMP R3, $32767 // a73f7fff + CMP R3, $32768 // c23c00008000 + CMP R3, $-2147483648 // c23c80000000 + CMPU R4, R5 // b9210045 + CMPU R6, $4294967295 // c26effffffff + CMPW R7, R8 // 1978 + CMPW R9, $-32768 // a79e8000 + CMPW R9, $-32769 // c29dffff7fff + CMPW R9, $-2147483648 // c29d80000000 + CMPWU R1, R2 // 1512 + CMPWU R3, $4294967295 // c23fffffffff + + TMHH R1, $65535 // a712ffff + TMHL R2, $1 // a7230001 + TMLH R3, $0 // a7300000 + TMLL R4, $32768 // a7418000 + + IPM R3 // b2220030 + IPM R12 // b22200c0 + + SPM R1 // 0410 + SPM R10 // 04a0 + + BRC $7, 0(PC) // a7740000 + BNE 0(PC) // a7740000 + BEQ 0(PC) // a7840000 + BLT 0(PC) // a7440000 + BLE 0(PC) // a7c40000 + BGT 0(PC) // a7240000 + BGE 0(PC) // a7a40000 + BLTU 0(PC) // a7540000 + BLEU 0(PC) // a7d40000 + + BRCT R1, 0(PC) // a7160000 + BRCTG R2, 0(PC) // a7270000 + + CMPBNE R1, R2, 0(PC) // ec1200007064 + CMPBEQ R3, R4, 0(PC) // ec3400008064 + CMPBLT R5, R6, 0(PC) // ec5600004064 + CMPBLE R7, R8, 0(PC) // ec780000c064 + CMPBGT R9, R1, 0(PC) // ec9100002064 + CMPBGE R2, R3, 0(PC) // ec230000a064 + + CMPBNE R1, $-127, 0(PC) // ec170000817c + CMPBEQ R3, $0, 0(PC) // ec380000007c + CMPBLT R5, $128, 0(PC) // ec540000807c + CMPBLE R7, $127, 0(PC) // ec7c00007f7c + CMPBGT R9, $0, 0(PC) // ec920000007c + CMPBGE R2, $128, 0(PC) // ec2a0000807c + + CMPUBNE R1, R2, 0(PC) // ec1200007065 + CMPUBEQ R3, R4, 0(PC) // ec3400008065 + CMPUBLT R5, R6, 0(PC) // ec5600004065 + CMPUBLE R7, R8, 0(PC) // ec780000c065 + CMPUBGT R9, R1, 0(PC) // ec9100002065 + CMPUBGE R2, R3, 0(PC) // ec230000a065 + + CMPUBNE R1, $256, 0(PC) // ec170000007d + CMPUBEQ R3, $0, 0(PC) // ec380000007d + CMPUBLT R5, $256, 0(PC) // ec540000007d + CMPUBLE R7, $0, 0(PC) // ec7c0000007d + CMPUBGT R9, $256, 0(PC) // ec920000007d + CMPUBGE R2, $0, 0(PC) // ec2a0000007d + + CRJ $15, R1, R2, 0(PC) // ec120000f076 + CGRJ $12, R3, R4, 0(PC) // ec340000c064 + CLRJ $3, R5, R6, 0(PC) // ec5600003077 + CLGRJ $0, R7, R8, 0(PC) // ec7800000065 + + CIJ $4, R9, $127, 0(PC) // ec9400007f7e + CGIJ $8, R11, $-128, 0(PC) // ecb80000807c + CLIJ $1, R1, $255, 0(PC) // ec110000ff7f + CLGIJ $2, R3, $0, 0(PC) // ec320000007d + + LGDR F1, R12 // b3cd00c1 + LDGR R2, F15 // b3c100f2 + + CEFBRA R0, F15 // b39400f0 + CDFBRA R1, F14 // b39500e1 + CEGBRA R2, F13 // b3a400d2 + CDGBRA R3, F12 // b3a500c3 + + CELFBR R0, F15 // b39000f0 + CDLFBR R1, F14 // b39100e1 + CELGBR R2, F13 // b3a000d2 + CDLGBR R3, F12 // b3a100c3 + + CFEBRA F15, R1 // b398501f + CFDBRA F14, R2 // b399502e + CGEBRA F13, R3 // b3a8503d + CGDBRA F12, R4 // b3a9504c + + CLFEBR F15, R1 // b39c501f + CLFDBR F14, R2 // b39d502e + CLGEBR F13, R3 // b3ac503d + CLGDBR F12, R4 // b3ad504c + + FMOVS $0, F11 // b37400b0 + FMOVD $0, F12 // b37500c0 + FMOVS (R1)(R2*1), F0 // 78021000 + FMOVS n-8(SP), F15 // 78f0f010 + FMOVD -9999999(R8)(R9*1), F8 // c0a1ff67698141aa9000688a8000 + FMOVD F4, F5 // 2854 + + // RX (12-bit displacement) and RXY (20-bit displacement) instruction encoding (e.g. LD vs LDY) + FMOVD (R1), F0 // 68001000 + FMOVD 4095(R2), F13 // 68d02fff + FMOVD 4096(R2), F15 // edf020000165 + FMOVS 4095(R2)(R3), F13 // 78d32fff + FMOVS 4096(R2)(R4), F15 // edf420000164 + FMOVD F0, 4095(R1) // 60001fff + FMOVD F0, 4096(R1) // ed0010000167 + FMOVS F13, 4095(R2)(R3) // 70d32fff + FMOVS F13, 4096(R2)(R3) // edd320000166 + + FADDS F0, F15 // b30a00f0 + FADD F1, F14 // b31a00e1 + FSUBS F2, F13 // b30b00d2 + FSUB F3, F12 // b31b00c3 + FMULS F4, F11 // b31700b4 + FMUL F5, F10 // b31c00a5 + FDIVS F6, F9 // b30d0096 + FDIV F7, F8 // b31d0087 + FABS F1, F2 // b3100021 + FSQRTS F3, F4 // b3140043 + FSQRT F5, F15 // b31500f5 + FIEBR $0, F0, F1 // b3570010 + FIDBR $7, F2, F3 // b35f7032 + FMADD F1, F1, F1 // b31e1011 + FMADDS F1, F2, F3 // b30e3012 + FMSUB F4, F5, F5 // b31f5045 + FMSUBS F6, F6, F7 // b30f7066 + LPDFR F1, F2 // b3700021 + LNDFR F3, F4 // b3710043 + CPSDR F5, F6, F7 // b3725076 + LTEBR F1, F2 // b3020021 + LTDBR F3, F4 // b3120043 + TCEB F5, $8 // ed5000080010 + TCDB F15, $4095 // edf00fff0011 + + UNDEF // 00000000 + BRRK // 0001 + NOPH // 0700 + + SYNC // 07e0 + + KM R2, R4 // b92e0024 + KMC R2, R6 // b92f0026 + KLMD R2, R8 // b93f0028 + KIMD R0, R4 // b93e0004 + KDSA R0, R8 // b93a0008 + KMA R2, R6, R4 // b9296024 + KMCTR R2, R6, R4 // b92d6024 + + // vector add and sub instructions + VAB V3, V4, V4 // e743400000f3 + VAH V3, V4, V4 // e743400010f3 + VAF V3, V4, V4 // e743400020f3 + VAG V3, V4, V4 // e743400030f3 + VAQ V3, V4, V4 // e743400040f3 + VAB V1, V2 // e721200000f3 + VAH V1, V2 // e721200010f3 + VAF V1, V2 // e721200020f3 + VAG V1, V2 // e721200030f3 + VAQ V1, V2 // e721200040f3 + VSB V3, V4, V4 // e744300000f7 + VSH V3, V4, V4 // e744300010f7 + VSF V3, V4, V4 // e744300020f7 + VSG V3, V4, V4 // e744300030f7 + VSQ V3, V4, V4 // e744300040f7 + VSB V1, V2 // e722100000f7 + VSH V1, V2 // e722100010f7 + VSF V1, V2 // e722100020f7 + VSG V1, V2 // e722100030f7 + VSQ V1, V2 // e722100040f7 + + VCEQB V1, V3, V3 // e731300000f8 + VL (R15), V1 // e710f0000006 + VST V1, (R15) // e710f000000e + VL (R15), V31 // e7f0f0000806 + VST V31, (R15) // e7f0f000080e + VESLB $5, V14 // e7ee00050030 + VESRAG $0, V15, V16 // e70f0000383a + VLM (R15), V8, V23 // e787f0000436 + VSTM V8, V23, (R15) // e787f000043e + VONE V1 // e710ffff0044 + VZERO V16 // e70000000844 + VGBM $52428, V31 // e7f0cccc0844 + VREPIB $255, V4 // e74000ff0045 + VREPIH $-1, V16 // e700ffff1845 + VREPIF $-32768, V0 // e70080002045 + VREPIG $32767, V31 // e7f07fff3845 + VREPG $1, V4, V16 // e7040001384d + VREPB $4, V31, V1 // e71f0004044d + VFTCIDB $4095, V1, V2 // e721fff0304a + WFTCIDB $3276, V15, V16 // e70fccc8384a + VPOPCT V8, V19 // e73800000850 + VFEEZBS V1, V2, V31 // e7f120300880 + WFCHDBS V22, V23, V4 // e746701836eb + VMNH V1, V2, V30 // e7e1200018fe + VERLLVF V2, V30, V27 // e7be20002c73 + VSCBIB V0, V23, V24 // e78700000cf5 + VN V2, V1, V0 // e70210000068 + VNC V2, V1, V0 // e70210000069 + VO V2, V1, V0 // e7021000006a + VX V2, V1, V0 // e7021000006d + VN V16, V1 // e71010000468 + VNC V16, V1 // e71010000469 + VO V16, V1 // e7101000046a + VX V16, V1 // e7101000046d + VNOT V16, V1 // e7101000046b + VCLZF V16, V17 // e71000002c53 + VLVGP R3, R4, V8 // e78340000062 + VGEG $1, 8(R15)(V30*1), V31 // e7fef0081c12 + VSCEG $1, V31, 16(R15)(V30*1) // e7fef0101c1a + VGEF $0, 2048(R15)(V1*1), V2 // e721f8000013 + VSCEF $0, V2, 4095(R15)(V1*1) // e721ffff001b + VLL R0, (R15), V1 // e710f0000037 + VSTL R0, V16, (R15) // e700f000083f + VGMH $8, $16, V12 // e7c008101046 + VLEIB $15, $255, V0 // e70000fff040 + VLEIH $7, $-32768, V15 // e7f080007041 + VLEIF $2, $-43, V16 // e700ffd52843 + VLEIG $1, $32767, V31 // e7f07fff1842 + VSLDB $3, V1, V16, V18 // e72100030a77 + VERIMB $2, V31, V1, V2 // e72f10020472 + VSEL V1, V2, V3, V4 // e7412000308d + VGFMAH V21, V31, V24, V0 // e705f10087bc + VFMADB V16, V8, V9, V10 // e7a08300948f + WFMADB V17, V18, V19, V20 // e74123083f8f + VFMSDB V2, V25, V24, V31 // e7f293008b8e + WFMSDB V31, V2, V3, V4 // e74f2308348e + VPERM V31, V0, V2, V3 // e73f0000248c + VPDI $1, V2, V31, V1 // e712f0001284 + VLEG $1, (R3), V1 // e71030001002 + VLEF $2, (R0), V31 // e7f000002803 + VLEH $3, (R12), V16 // e700c0003801 + VLEB $15, 4095(R9), V15 // e7f09ffff000 + VSTEG $1, V30, (R1)(R2*1) // e7e21000180a + VSTEF $3, V2, (R9) // e7209000300b + VSTEH $7, V31, (R2) // e7f020007809 + VSTEB $15, V29, 4094(R12) // e7d0cffef808 + VMSLG V21, V22, V23, V24 // e78563007fb8 + VMSLEG V21, V22, V23, V24 // e78563807fb8 + VMSLOG V21, V22, V23, V24 // e78563407fb8 + VMSLEOG V21, V22, V23, V24 // e78563c07fb8 + VSUMGH V1, V2, V3 // e73120001065 + VSUMGF V16, V17, V18 // e72010002e65 + VSUMQF V4, V5, V6 // e76450002067 + VSUMQG V19, V20, V21 // e75340003e67 + VSUMB V7, V8, V9 // e79780000064 + VSUMH V22, V23, V24 // e78670001e64 + + RET + RET foo(SB) + +TEXT main·init(SB),DUPOK|NOSPLIT,$0 // TEXT main.init(SB), DUPOK|NOSPLIT, $0 + RET + +TEXT main·main(SB),DUPOK|NOSPLIT,$0 // TEXT main.main(SB), DUPOK|NOSPLIT, $0 + BL main·foo(SB) // CALL main.foo(SB) + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/flags/flags.go b/platform/dbops/binaries/go/go/src/cmd/asm/internal/flags/flags.go new file mode 100644 index 0000000000000000000000000000000000000000..e15a062749fab5351ce4e85e5f041ac566417702 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/flags/flags.go @@ -0,0 +1,89 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package flags implements top-level flags and the usage message for the assembler. +package flags + +import ( + "cmd/internal/obj" + "cmd/internal/objabi" + "flag" + "fmt" + "os" + "path/filepath" + "strings" +) + +var ( + Debug = flag.Bool("debug", false, "dump instructions as they are parsed") + OutputFile = flag.String("o", "", "output file; default foo.o for /a/b/c/foo.s as first argument") + TrimPath = flag.String("trimpath", "", "remove prefix from recorded source file paths") + Shared = flag.Bool("shared", false, "generate code that can be linked into a shared library") + Dynlink = flag.Bool("dynlink", false, "support references to Go symbols defined in other shared libraries") + Linkshared = flag.Bool("linkshared", false, "generate code that will be linked against Go shared libraries") + AllErrors = flag.Bool("e", false, "no limit on number of errors reported") + SymABIs = flag.Bool("gensymabis", false, "write symbol ABI information to output file, don't assemble") + Importpath = flag.String("p", obj.UnlinkablePkg, "set expected package import to path") + Spectre = flag.String("spectre", "", "enable spectre mitigations in `list` (all, ret)") +) + +var DebugFlags struct { + MayMoreStack string `help:"call named function before all stack growth checks"` + PCTab string `help:"print named pc-value table\nOne of: pctospadj, pctofile, pctoline, pctoinline, pctopcdata"` +} + +var ( + D MultiFlag + I MultiFlag + PrintOut int + DebugV bool +) + +func init() { + flag.Var(&D, "D", "predefined symbol with optional simple value -D=identifier=value; can be set multiple times") + flag.Var(&I, "I", "include directory; can be set multiple times") + flag.BoolVar(&DebugV, "v", false, "print debug output") + flag.Var(objabi.NewDebugFlag(&DebugFlags, nil), "d", "enable debugging settings; try -d help") + objabi.AddVersionFlag() // -V + objabi.Flagcount("S", "print assembly and machine code", &PrintOut) +} + +// MultiFlag allows setting a value multiple times to collect a list, as in -I=dir1 -I=dir2. +type MultiFlag []string + +func (m *MultiFlag) String() string { + if len(*m) == 0 { + return "" + } + return fmt.Sprint(*m) +} + +func (m *MultiFlag) Set(val string) error { + (*m) = append(*m, val) + return nil +} + +func Usage() { + fmt.Fprintf(os.Stderr, "usage: asm [options] file.s ...\n") + fmt.Fprintf(os.Stderr, "Flags:\n") + flag.PrintDefaults() + os.Exit(2) +} + +func Parse() { + objabi.Flagparse(Usage) + if flag.NArg() == 0 { + flag.Usage() + } + + // Flag refinement. + if *OutputFile == "" { + if flag.NArg() != 1 { + flag.Usage() + } + input := filepath.Base(flag.Arg(0)) + input = strings.TrimSuffix(input, ".s") + *OutputFile = fmt.Sprintf("%s.o", input) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/lex/input.go b/platform/dbops/binaries/go/go/src/cmd/asm/internal/lex/input.go new file mode 100644 index 0000000000000000000000000000000000000000..da4ebe6d6e3447fa5a672863b68e459b720646ec --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/lex/input.go @@ -0,0 +1,486 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lex + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "text/scanner" + + "cmd/asm/internal/flags" + "cmd/internal/objabi" + "cmd/internal/src" +) + +// Input is the main input: a stack of readers and some macro definitions. +// It also handles #include processing (by pushing onto the input stack) +// and parses and instantiates macro definitions. +type Input struct { + Stack + includes []string + beginningOfLine bool + ifdefStack []bool + macros map[string]*Macro + text string // Text of last token returned by Next. + peek bool + peekToken ScanToken + peekText string +} + +// NewInput returns an Input from the given path. +func NewInput(name string) *Input { + return &Input{ + // include directories: look in source dir, then -I directories. + includes: append([]string{filepath.Dir(name)}, flags.I...), + beginningOfLine: true, + macros: predefine(flags.D), + } +} + +// predefine installs the macros set by the -D flag on the command line. +func predefine(defines flags.MultiFlag) map[string]*Macro { + macros := make(map[string]*Macro) + for _, name := range defines { + value := "1" + i := strings.IndexRune(name, '=') + if i > 0 { + name, value = name[:i], name[i+1:] + } + tokens := Tokenize(name) + if len(tokens) != 1 || tokens[0].ScanToken != scanner.Ident { + fmt.Fprintf(os.Stderr, "asm: parsing -D: %q is not a valid identifier name\n", tokens[0]) + flags.Usage() + } + macros[name] = &Macro{ + name: name, + args: nil, + tokens: Tokenize(value), + } + } + return macros +} + +var panicOnError bool // For testing. + +func (in *Input) Error(args ...interface{}) { + if panicOnError { + panic(fmt.Errorf("%s:%d: %s", in.File(), in.Line(), fmt.Sprintln(args...))) + } + fmt.Fprintf(os.Stderr, "%s:%d: %s", in.File(), in.Line(), fmt.Sprintln(args...)) + os.Exit(1) +} + +// expectText is like Error but adds "got XXX" where XXX is a quoted representation of the most recent token. +func (in *Input) expectText(args ...interface{}) { + in.Error(append(args, "; got", strconv.Quote(in.Stack.Text()))...) +} + +// enabled reports whether the input is enabled by an ifdef, or is at the top level. +func (in *Input) enabled() bool { + return len(in.ifdefStack) == 0 || in.ifdefStack[len(in.ifdefStack)-1] +} + +func (in *Input) expectNewline(directive string) { + tok := in.Stack.Next() + if tok != '\n' { + in.expectText("expected newline after", directive) + } +} + +func (in *Input) Next() ScanToken { + if in.peek { + in.peek = false + tok := in.peekToken + in.text = in.peekText + return tok + } + // If we cannot generate a token after 100 macro invocations, we're in trouble. + // The usual case is caught by Push, below, but be safe. + for nesting := 0; nesting < 100; { + tok := in.Stack.Next() + switch tok { + case '#': + if !in.beginningOfLine { + in.Error("'#' must be first item on line") + } + in.beginningOfLine = in.hash() + in.text = "#" + return '#' + + case scanner.Ident: + // Is it a macro name? + name := in.Stack.Text() + macro := in.macros[name] + if macro != nil { + nesting++ + in.invokeMacro(macro) + continue + } + fallthrough + default: + if tok == scanner.EOF && len(in.ifdefStack) > 0 { + // We're skipping text but have run out of input with no #endif. + in.Error("unclosed #ifdef or #ifndef") + } + in.beginningOfLine = tok == '\n' + if in.enabled() { + in.text = in.Stack.Text() + return tok + } + } + } + in.Error("recursive macro invocation") + return 0 +} + +func (in *Input) Text() string { + return in.text +} + +// hash processes a # preprocessor directive. It reports whether it completes. +func (in *Input) hash() bool { + // We have a '#'; it must be followed by a known word (define, include, etc.). + tok := in.Stack.Next() + if tok != scanner.Ident { + in.expectText("expected identifier after '#'") + } + if !in.enabled() { + // Can only start including again if we are at #else or #endif but also + // need to keep track of nested #if[n]defs. + // We let #line through because it might affect errors. + switch in.Stack.Text() { + case "else", "endif", "ifdef", "ifndef", "line": + // Press on. + default: + return false + } + } + switch in.Stack.Text() { + case "define": + in.define() + case "else": + in.else_() + case "endif": + in.endif() + case "ifdef": + in.ifdef(true) + case "ifndef": + in.ifdef(false) + case "include": + in.include() + case "line": + in.line() + case "undef": + in.undef() + default: + in.Error("unexpected token after '#':", in.Stack.Text()) + } + return true +} + +// macroName returns the name for the macro being referenced. +func (in *Input) macroName() string { + // We use the Stack's input method; no macro processing at this stage. + tok := in.Stack.Next() + if tok != scanner.Ident { + in.expectText("expected identifier after # directive") + } + // Name is alphanumeric by definition. + return in.Stack.Text() +} + +// #define processing. +func (in *Input) define() { + name := in.macroName() + args, tokens := in.macroDefinition(name) + in.defineMacro(name, args, tokens) +} + +// defineMacro stores the macro definition in the Input. +func (in *Input) defineMacro(name string, args []string, tokens []Token) { + if in.macros[name] != nil { + in.Error("redefinition of macro:", name) + } + in.macros[name] = &Macro{ + name: name, + args: args, + tokens: tokens, + } +} + +// macroDefinition returns the list of formals and the tokens of the definition. +// The argument list is nil for no parens on the definition; otherwise a list of +// formal argument names. +func (in *Input) macroDefinition(name string) ([]string, []Token) { + prevCol := in.Stack.Col() + tok := in.Stack.Next() + if tok == '\n' || tok == scanner.EOF { + return nil, nil // No definition for macro + } + var args []string + // The C preprocessor treats + // #define A(x) + // and + // #define A (x) + // distinctly: the first is a macro with arguments, the second without. + // Distinguish these cases using the column number, since we don't + // see the space itself. Note that text/scanner reports the position at the + // end of the token. It's where you are now, and you just read this token. + if tok == '(' && in.Stack.Col() == prevCol+1 { + // Macro has arguments. Scan list of formals. + acceptArg := true + args = []string{} // Zero length but not nil. + Loop: + for { + tok = in.Stack.Next() + switch tok { + case ')': + tok = in.Stack.Next() // First token of macro definition. + break Loop + case ',': + if acceptArg { + in.Error("bad syntax in definition for macro:", name) + } + acceptArg = true + case scanner.Ident: + if !acceptArg { + in.Error("bad syntax in definition for macro:", name) + } + arg := in.Stack.Text() + if i := lookup(args, arg); i >= 0 { + in.Error("duplicate argument", arg, "in definition for macro:", name) + } + args = append(args, arg) + acceptArg = false + default: + in.Error("bad definition for macro:", name) + } + } + } + var tokens []Token + // Scan to newline. Backslashes escape newlines. + for tok != '\n' { + if tok == scanner.EOF { + in.Error("missing newline in definition for macro:", name) + } + if tok == '\\' { + tok = in.Stack.Next() + if tok != '\n' && tok != '\\' { + in.Error(`can only escape \ or \n in definition for macro:`, name) + } + } + tokens = append(tokens, Make(tok, in.Stack.Text())) + tok = in.Stack.Next() + } + return args, tokens +} + +func lookup(args []string, arg string) int { + for i, a := range args { + if a == arg { + return i + } + } + return -1 +} + +// invokeMacro pushes onto the input Stack a Slice that holds the macro definition with the actual +// parameters substituted for the formals. +// Invoking a macro does not touch the PC/line history. +func (in *Input) invokeMacro(macro *Macro) { + // If the macro has no arguments, just substitute the text. + if macro.args == nil { + in.Push(NewSlice(in.Base(), in.Line(), macro.tokens)) + return + } + tok := in.Stack.Next() + if tok != '(' { + // If the macro has arguments but is invoked without them, all we push is the macro name. + // First, put back the token. + in.peekToken = tok + in.peekText = in.text + in.peek = true + in.Push(NewSlice(in.Base(), in.Line(), []Token{Make(macroName, macro.name)})) + return + } + actuals := in.argsFor(macro) + var tokens []Token + for _, tok := range macro.tokens { + if tok.ScanToken != scanner.Ident { + tokens = append(tokens, tok) + continue + } + substitution := actuals[tok.text] + if substitution == nil { + tokens = append(tokens, tok) + continue + } + tokens = append(tokens, substitution...) + } + in.Push(NewSlice(in.Base(), in.Line(), tokens)) +} + +// argsFor returns a map from formal name to actual value for this argumented macro invocation. +// The opening parenthesis has been absorbed. +func (in *Input) argsFor(macro *Macro) map[string][]Token { + var args [][]Token + // One macro argument per iteration. Collect them all and check counts afterwards. + for argNum := 0; ; argNum++ { + tokens, tok := in.collectArgument(macro) + args = append(args, tokens) + if tok == ')' { + break + } + } + // Zero-argument macros are tricky. + if len(macro.args) == 0 && len(args) == 1 && args[0] == nil { + args = nil + } else if len(args) != len(macro.args) { + in.Error("wrong arg count for macro", macro.name) + } + argMap := make(map[string][]Token) + for i, arg := range args { + argMap[macro.args[i]] = arg + } + return argMap +} + +// collectArgument returns the actual tokens for a single argument of a macro. +// It also returns the token that terminated the argument, which will always +// be either ',' or ')'. The starting '(' has been scanned. +func (in *Input) collectArgument(macro *Macro) ([]Token, ScanToken) { + nesting := 0 + var tokens []Token + for { + tok := in.Stack.Next() + if tok == scanner.EOF || tok == '\n' { + in.Error("unterminated arg list invoking macro:", macro.name) + } + if nesting == 0 && (tok == ')' || tok == ',') { + return tokens, tok + } + if tok == '(' { + nesting++ + } + if tok == ')' { + nesting-- + } + tokens = append(tokens, Make(tok, in.Stack.Text())) + } +} + +// #ifdef and #ifndef processing. +func (in *Input) ifdef(truth bool) { + name := in.macroName() + in.expectNewline("#if[n]def") + if !in.enabled() { + truth = false + } else if _, defined := in.macros[name]; !defined { + truth = !truth + } + in.ifdefStack = append(in.ifdefStack, truth) +} + +// #else processing +func (in *Input) else_() { + in.expectNewline("#else") + if len(in.ifdefStack) == 0 { + in.Error("unmatched #else") + } + if len(in.ifdefStack) == 1 || in.ifdefStack[len(in.ifdefStack)-2] { + in.ifdefStack[len(in.ifdefStack)-1] = !in.ifdefStack[len(in.ifdefStack)-1] + } +} + +// #endif processing. +func (in *Input) endif() { + in.expectNewline("#endif") + if len(in.ifdefStack) == 0 { + in.Error("unmatched #endif") + } + in.ifdefStack = in.ifdefStack[:len(in.ifdefStack)-1] +} + +// #include processing. +func (in *Input) include() { + // Find and parse string. + tok := in.Stack.Next() + if tok != scanner.String { + in.expectText("expected string after #include") + } + name, err := strconv.Unquote(in.Stack.Text()) + if err != nil { + in.Error("unquoting include file name: ", err) + } + in.expectNewline("#include") + // Push tokenizer for file onto stack. + fd, err := os.Open(name) + if err != nil { + for _, dir := range in.includes { + fd, err = os.Open(filepath.Join(dir, name)) + if err == nil { + break + } + } + if err != nil { + in.Error("#include:", err) + } + } + in.Push(NewTokenizer(name, fd, fd)) +} + +// #line processing. +func (in *Input) line() { + // Only need to handle Plan 9 format: #line 337 "filename" + tok := in.Stack.Next() + if tok != scanner.Int { + in.expectText("expected line number after #line") + } + line, err := strconv.Atoi(in.Stack.Text()) + if err != nil { + in.Error("error parsing #line (cannot happen):", err) + } + tok = in.Stack.Next() + if tok != scanner.String { + in.expectText("expected file name in #line") + } + file, err := strconv.Unquote(in.Stack.Text()) + if err != nil { + in.Error("unquoting #line file name: ", err) + } + tok = in.Stack.Next() + if tok != '\n' { + in.Error("unexpected token at end of #line: ", tok) + } + pos := src.MakePos(in.Base(), uint(in.Line())+1, 1) // +1 because #line nnn means line nnn starts on next line + in.Stack.SetBase(src.NewLinePragmaBase(pos, file, objabi.AbsFile(objabi.WorkingDir(), file, *flags.TrimPath), uint(line), 1)) +} + +// #undef processing +func (in *Input) undef() { + name := in.macroName() + if in.macros[name] == nil { + in.Error("#undef for undefined macro:", name) + } + // Newline must be next. + tok := in.Stack.Next() + if tok != '\n' { + in.Error("syntax error in #undef for macro:", name) + } + delete(in.macros, name) +} + +func (in *Input) Push(r TokenReader) { + if len(in.tr) > 100 { + in.Error("input recursion") + } + in.Stack.Push(r) +} + +func (in *Input) Close() { +} diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/lex/lex.go b/platform/dbops/binaries/go/go/src/cmd/asm/internal/lex/lex.go new file mode 100644 index 0000000000000000000000000000000000000000..f1923bee05c0e8a1341a277c7c1740df4a13541d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/lex/lex.go @@ -0,0 +1,137 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lex implements lexical analysis for the assembler. +package lex + +import ( + "fmt" + "log" + "os" + "strings" + "text/scanner" + + "cmd/internal/src" +) + +// A ScanToken represents an input item. It is a simple wrapping of rune, as +// returned by text/scanner.Scanner, plus a couple of extra values. +type ScanToken rune + +const ( + // Asm defines some two-character lexemes. We make up + // a rune/ScanToken value for them - ugly but simple. + LSH ScanToken = -1000 - iota // << Left shift. + RSH // >> Logical right shift. + ARR // -> Used on ARM for shift type 3, arithmetic right shift. + ROT // @> Used on ARM for shift type 4, rotate right. + Include // included file started here + BuildComment // //go:build or +build comment + macroName // name of macro that should not be expanded +) + +// IsRegisterShift reports whether the token is one of the ARM register shift operators. +func IsRegisterShift(r ScanToken) bool { + return ROT <= r && r <= LSH // Order looks backwards because these are negative. +} + +func (t ScanToken) String() string { + switch t { + case scanner.EOF: + return "EOF" + case scanner.Ident: + return "identifier" + case scanner.Int: + return "integer constant" + case scanner.Float: + return "float constant" + case scanner.Char: + return "rune constant" + case scanner.String: + return "string constant" + case scanner.RawString: + return "raw string constant" + case scanner.Comment: + return "comment" + default: + return fmt.Sprintf("%q", rune(t)) + } +} + +// NewLexer returns a lexer for the named file and the given link context. +func NewLexer(name string) TokenReader { + input := NewInput(name) + fd, err := os.Open(name) + if err != nil { + log.Fatalf("%s\n", err) + } + input.Push(NewTokenizer(name, fd, fd)) + return input +} + +// The other files in this directory each contain an implementation of TokenReader. + +// A TokenReader is like a reader, but returns lex tokens of type Token. It also can tell you what +// the text of the most recently returned token is, and where it was found. +// The underlying scanner elides all spaces except newline, so the input looks like a stream of +// Tokens; original spacing is lost but we don't need it. +type TokenReader interface { + // Next returns the next token. + Next() ScanToken + // The following methods all refer to the most recent token returned by Next. + // Text returns the original string representation of the token. + Text() string + // File reports the source file name of the token. + File() string + // Base reports the position base of the token. + Base() *src.PosBase + // SetBase sets the position base. + SetBase(*src.PosBase) + // Line reports the source line number of the token. + Line() int + // Col reports the source column number of the token. + Col() int + // Close does any teardown required. + Close() +} + +// A Token is a scan token plus its string value. +// A macro is stored as a sequence of Tokens with spaces stripped. +type Token struct { + ScanToken + text string +} + +// Make returns a Token with the given rune (ScanToken) and text representation. +func Make(token ScanToken, text string) Token { + // Substitute the substitutes for . and /. + text = strings.ReplaceAll(text, "\u00B7", ".") + text = strings.ReplaceAll(text, "\u2215", "/") + return Token{ScanToken: token, text: text} +} + +func (l Token) String() string { + return l.text +} + +// A Macro represents the definition of a #defined macro. +type Macro struct { + name string // The #define name. + args []string // Formal arguments. + tokens []Token // Body of macro. +} + +// Tokenize turns a string into a list of Tokens; used to parse the -D flag and in tests. +func Tokenize(str string) []Token { + t := NewTokenizer("command line", strings.NewReader(str), nil) + var tokens []Token + for { + tok := t.Next() + if tok == scanner.EOF { + break + } + tokens = append(tokens, Make(tok, t.Text())) + } + return tokens +} diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/lex/lex_test.go b/platform/dbops/binaries/go/go/src/cmd/asm/internal/lex/lex_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e8dcf4b22f1218dc3e403d3271ba22945a447e87 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/lex/lex_test.go @@ -0,0 +1,364 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lex + +import ( + "strings" + "testing" + "text/scanner" +) + +type lexTest struct { + name string + input string + output string +} + +var lexTests = []lexTest{ + { + "empty", + "", + "", + }, + { + "simple", + "1 (a)", + "1.(.a.)", + }, + { + "simple define", + lines( + "#define A 1234", + "A", + ), + "1234.\n", + }, + { + "define without value", + "#define A", + "", + }, + { + "macro without arguments", + "#define A() 1234\n" + "A()\n", + "1234.\n", + }, + { + "macro with just parens as body", + "#define A () \n" + "A\n", + "(.).\n", + }, + { + "macro with parens but no arguments", + "#define A (x) \n" + "A\n", + "(.x.).\n", + }, + { + "macro with arguments", + "#define A(x, y, z) x+z+y\n" + "A(1, 2, 3)\n", + "1.+.3.+.2.\n", + }, + { + "argumented macro invoked without arguments", + lines( + "#define X() foo ", + "X()", + "X", + ), + "foo.\n.X.\n", + }, + { + "multiline macro without arguments", + lines( + "#define A 1\\", + "\t2\\", + "\t3", + "before", + "A", + "after", + ), + "before.\n.1.\n.2.\n.3.\n.after.\n", + }, + { + "multiline macro with arguments", + lines( + "#define A(a, b, c) a\\", + "\tb\\", + "\tc", + "before", + "A(1, 2, 3)", + "after", + ), + "before.\n.1.\n.2.\n.3.\n.after.\n", + }, + { + "LOAD macro", + lines( + "#define LOAD(off, reg) \\", + "\tMOVBLZX (off*4)(R12), reg \\", + "\tADDB reg, DX", + "", + "LOAD(8, AX)", + ), + "\n.\n.MOVBLZX.(.8.*.4.).(.R12.).,.AX.\n.ADDB.AX.,.DX.\n", + }, + { + "nested multiline macro", + lines( + "#define KEYROUND(xmm, load, off, r1, r2, index) \\", + "\tMOVBLZX (BP)(DX*4), R8 \\", + "\tload((off+1), r2) \\", + "\tMOVB R8, (off*4)(R12) \\", + "\tPINSRW $index, (BP)(R8*4), xmm", + "#define LOAD(off, reg) \\", + "\tMOVBLZX (off*4)(R12), reg \\", + "\tADDB reg, DX", + "KEYROUND(X0, LOAD, 8, AX, BX, 0)", + ), + "\n.MOVBLZX.(.BP.).(.DX.*.4.).,.R8.\n.\n.MOVBLZX.(.(.8.+.1.).*.4.).(.R12.).,.BX.\n.ADDB.BX.,.DX.\n.MOVB.R8.,.(.8.*.4.).(.R12.).\n.PINSRW.$.0.,.(.BP.).(.R8.*.4.).,.X0.\n", + }, + { + "taken #ifdef", + lines( + "#define A", + "#ifdef A", + "#define B 1234", + "#endif", + "B", + ), + "1234.\n", + }, + { + "not taken #ifdef", + lines( + "#ifdef A", + "#define B 1234", + "#endif", + "B", + ), + "B.\n", + }, + { + "taken #ifdef with else", + lines( + "#define A", + "#ifdef A", + "#define B 1234", + "#else", + "#define B 5678", + "#endif", + "B", + ), + "1234.\n", + }, + { + "not taken #ifdef with else", + lines( + "#ifdef A", + "#define B 1234", + "#else", + "#define B 5678", + "#endif", + "B", + ), + "5678.\n", + }, + { + "nested taken/taken #ifdef", + lines( + "#define A", + "#define B", + "#ifdef A", + "#ifdef B", + "#define C 1234", + "#else", + "#define C 5678", + "#endif", + "#endif", + "C", + ), + "1234.\n", + }, + { + "nested taken/not-taken #ifdef", + lines( + "#define A", + "#ifdef A", + "#ifdef B", + "#define C 1234", + "#else", + "#define C 5678", + "#endif", + "#endif", + "C", + ), + "5678.\n", + }, + { + "nested not-taken/would-be-taken #ifdef", + lines( + "#define B", + "#ifdef A", + "#ifdef B", + "#define C 1234", + "#else", + "#define C 5678", + "#endif", + "#endif", + "C", + ), + "C.\n", + }, + { + "nested not-taken/not-taken #ifdef", + lines( + "#ifdef A", + "#ifdef B", + "#define C 1234", + "#else", + "#define C 5678", + "#endif", + "#endif", + "C", + ), + "C.\n", + }, + { + "nested #define", + lines( + "#define A #define B THIS", + "A", + "B", + ), + "THIS.\n", + }, + { + "nested #define with args", + lines( + "#define A #define B(x) x", + "A", + "B(THIS)", + ), + "THIS.\n", + }, + /* This one fails. See comment in Slice.Col. + { + "nested #define with args", + lines( + "#define A #define B (x) x", + "A", + "B(THIS)", + ), + "x.\n", + }, + */ +} + +func TestLex(t *testing.T) { + for _, test := range lexTests { + input := NewInput(test.name) + input.Push(NewTokenizer(test.name, strings.NewReader(test.input), nil)) + result := drain(input) + if result != test.output { + t.Errorf("%s: got %q expected %q", test.name, result, test.output) + } + } +} + +// lines joins the arguments together as complete lines. +func lines(a ...string) string { + return strings.Join(a, "\n") + "\n" +} + +// drain returns a single string representing the processed input tokens. +func drain(input *Input) string { + var buf strings.Builder + for { + tok := input.Next() + if tok == scanner.EOF { + return buf.String() + } + if tok == '#' { + continue + } + if buf.Len() > 0 { + buf.WriteByte('.') + } + buf.WriteString(input.Text()) + } +} + +type badLexTest struct { + input string + error string +} + +var badLexTests = []badLexTest{ + { + "3 #define foo bar\n", + "'#' must be first item on line", + }, + { + "#ifdef foo\nhello", + "unclosed #ifdef or #ifndef", + }, + { + "#ifndef foo\nhello", + "unclosed #ifdef or #ifndef", + }, + { + "#ifdef foo\nhello\n#else\nbye", + "unclosed #ifdef or #ifndef", + }, + { + "#define A() A()\nA()", + "recursive macro invocation", + }, + { + "#define A a\n#define A a\n", + "redefinition of macro", + }, + { + "#define A a", + "no newline after macro definition", + }, +} + +func TestBadLex(t *testing.T) { + for _, test := range badLexTests { + input := NewInput(test.error) + input.Push(NewTokenizer(test.error, strings.NewReader(test.input), nil)) + err := firstError(input) + if err == nil { + t.Errorf("%s: got no error", test.error) + continue + } + if !strings.Contains(err.Error(), test.error) { + t.Errorf("got error %q expected %q", err.Error(), test.error) + } + } +} + +// firstError returns the first error value triggered by the input. +func firstError(input *Input) (err error) { + panicOnError = true + defer func() { + panicOnError = false + switch e := recover(); e := e.(type) { + case nil: + case error: + err = e + default: + panic(e) + } + }() + + for { + tok := input.Next() + if tok == scanner.EOF { + return + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/lex/slice.go b/platform/dbops/binaries/go/go/src/cmd/asm/internal/lex/slice.go new file mode 100644 index 0000000000000000000000000000000000000000..61b15dd963191f8145948ab7c3881386d327c196 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/lex/slice.go @@ -0,0 +1,74 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lex + +import ( + "text/scanner" + + "cmd/internal/src" +) + +// A Slice reads from a slice of Tokens. +type Slice struct { + tokens []Token + base *src.PosBase + line int + pos int +} + +func NewSlice(base *src.PosBase, line int, tokens []Token) *Slice { + return &Slice{ + tokens: tokens, + base: base, + line: line, + pos: -1, // Next will advance to zero. + } +} + +func (s *Slice) Next() ScanToken { + s.pos++ + if s.pos >= len(s.tokens) { + return scanner.EOF + } + return s.tokens[s.pos].ScanToken +} + +func (s *Slice) Text() string { + return s.tokens[s.pos].text +} + +func (s *Slice) File() string { + return s.base.Filename() +} + +func (s *Slice) Base() *src.PosBase { + return s.base +} + +func (s *Slice) SetBase(base *src.PosBase) { + // Cannot happen because we only have slices of already-scanned text, + // but be prepared. + s.base = base +} + +func (s *Slice) Line() int { + return s.line +} + +func (s *Slice) Col() int { + // TODO: Col is only called when defining a macro and all it cares about is increasing + // position to discover whether there is a blank before the parenthesis. + // We only get here if defining a macro inside a macro. + // This imperfect implementation means we cannot tell the difference between + // #define A #define B(x) x + // and + // #define A #define B (x) x + // The first definition of B has an argument, the second doesn't. Because we let + // text/scanner strip the blanks for us, this is extremely rare, hard to fix, and not worth it. + return s.pos +} + +func (s *Slice) Close() { +} diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/lex/stack.go b/platform/dbops/binaries/go/go/src/cmd/asm/internal/lex/stack.go new file mode 100644 index 0000000000000000000000000000000000000000..929e5281b4a392aba4ea74bc88da4329f5631929 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/lex/stack.go @@ -0,0 +1,61 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lex + +import ( + "text/scanner" + + "cmd/internal/src" +) + +// A Stack is a stack of TokenReaders. As the top TokenReader hits EOF, +// it resumes reading the next one down. +type Stack struct { + tr []TokenReader +} + +// Push adds tr to the top (end) of the input stack. (Popping happens automatically.) +func (s *Stack) Push(tr TokenReader) { + s.tr = append(s.tr, tr) +} + +func (s *Stack) Next() ScanToken { + tos := s.tr[len(s.tr)-1] + tok := tos.Next() + for tok == scanner.EOF && len(s.tr) > 1 { + tos.Close() + // Pop the topmost item from the stack and resume with the next one down. + s.tr = s.tr[:len(s.tr)-1] + tok = s.Next() + } + return tok +} + +func (s *Stack) Text() string { + return s.tr[len(s.tr)-1].Text() +} + +func (s *Stack) File() string { + return s.Base().Filename() +} + +func (s *Stack) Base() *src.PosBase { + return s.tr[len(s.tr)-1].Base() +} + +func (s *Stack) SetBase(base *src.PosBase) { + s.tr[len(s.tr)-1].SetBase(base) +} + +func (s *Stack) Line() int { + return s.tr[len(s.tr)-1].Line() +} + +func (s *Stack) Col() int { + return s.tr[len(s.tr)-1].Col() +} + +func (s *Stack) Close() { // Unused. +} diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/internal/lex/tokenizer.go b/platform/dbops/binaries/go/go/src/cmd/asm/internal/lex/tokenizer.go new file mode 100644 index 0000000000000000000000000000000000000000..f60f7a11af3c33bf50cda80f34456fe9ef294e98 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/internal/lex/tokenizer.go @@ -0,0 +1,153 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lex + +import ( + "go/build/constraint" + "io" + "os" + "strings" + "text/scanner" + "unicode" + + "cmd/asm/internal/flags" + "cmd/internal/objabi" + "cmd/internal/src" +) + +// A Tokenizer is a simple wrapping of text/scanner.Scanner, configured +// for our purposes and made a TokenReader. It forms the lowest level, +// turning text from readers into tokens. +type Tokenizer struct { + tok ScanToken + s *scanner.Scanner + base *src.PosBase + line int + file *os.File // If non-nil, file descriptor to close. +} + +func NewTokenizer(name string, r io.Reader, file *os.File) *Tokenizer { + var s scanner.Scanner + s.Init(r) + // Newline is like a semicolon; other space characters are fine. + s.Whitespace = 1<<'\t' | 1<<'\r' | 1<<' ' + // Don't skip comments: we need to count newlines. + s.Mode = scanner.ScanChars | + scanner.ScanFloats | + scanner.ScanIdents | + scanner.ScanInts | + scanner.ScanStrings | + scanner.ScanComments + s.Position.Filename = name + s.IsIdentRune = isIdentRune + return &Tokenizer{ + s: &s, + base: src.NewFileBase(name, objabi.AbsFile(objabi.WorkingDir(), name, *flags.TrimPath)), + line: 1, + file: file, + } +} + +// We want center dot (·) and division slash (∕) to work as identifier characters. +func isIdentRune(ch rune, i int) bool { + if unicode.IsLetter(ch) { + return true + } + switch ch { + case '_': // Underscore; traditional. + return true + case '\u00B7': // Represents the period in runtime.exit. U+00B7 '·' middle dot + return true + case '\u2215': // Represents the slash in runtime/debug.setGCPercent. U+2215 '∕' division slash + return true + } + // Digits are OK only after the first character. + return i > 0 && unicode.IsDigit(ch) +} + +func (t *Tokenizer) Text() string { + switch t.tok { + case LSH: + return "<<" + case RSH: + return ">>" + case ARR: + return "->" + case ROT: + return "@>" + } + return t.s.TokenText() +} + +func (t *Tokenizer) File() string { + return t.base.Filename() +} + +func (t *Tokenizer) Base() *src.PosBase { + return t.base +} + +func (t *Tokenizer) SetBase(base *src.PosBase) { + t.base = base +} + +func (t *Tokenizer) Line() int { + return t.line +} + +func (t *Tokenizer) Col() int { + return t.s.Pos().Column +} + +func (t *Tokenizer) Next() ScanToken { + s := t.s + for { + t.tok = ScanToken(s.Scan()) + if t.tok != scanner.Comment { + break + } + text := s.TokenText() + t.line += strings.Count(text, "\n") + if constraint.IsGoBuild(text) { + t.tok = BuildComment + break + } + } + switch t.tok { + case '\n': + t.line++ + case '-': + if s.Peek() == '>' { + s.Next() + t.tok = ARR + return ARR + } + case '@': + if s.Peek() == '>' { + s.Next() + t.tok = ROT + return ROT + } + case '<': + if s.Peek() == '<' { + s.Next() + t.tok = LSH + return LSH + } + case '>': + if s.Peek() == '>' { + s.Next() + t.tok = RSH + return RSH + } + } + return t.tok +} + +func (t *Tokenizer) Close() { + if t.file != nil { + t.file.Close() + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/asm/main.go b/platform/dbops/binaries/go/go/src/cmd/asm/main.go new file mode 100644 index 0000000000000000000000000000000000000000..ba69195056a9ef94f763ccfdbda447f97db62c73 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/asm/main.go @@ -0,0 +1,124 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bufio" + "flag" + "fmt" + "internal/buildcfg" + "log" + "os" + + "cmd/asm/internal/arch" + "cmd/asm/internal/asm" + "cmd/asm/internal/flags" + "cmd/asm/internal/lex" + + "cmd/internal/bio" + "cmd/internal/obj" + "cmd/internal/objabi" +) + +func main() { + log.SetFlags(0) + log.SetPrefix("asm: ") + + buildcfg.Check() + GOARCH := buildcfg.GOARCH + + flags.Parse() + + architecture := arch.Set(GOARCH, *flags.Shared || *flags.Dynlink) + if architecture == nil { + log.Fatalf("unrecognized architecture %s", GOARCH) + } + ctxt := obj.Linknew(architecture.LinkArch) + ctxt.Debugasm = flags.PrintOut + ctxt.Debugvlog = flags.DebugV + ctxt.Flag_dynlink = *flags.Dynlink + ctxt.Flag_linkshared = *flags.Linkshared + ctxt.Flag_shared = *flags.Shared || *flags.Dynlink + ctxt.Flag_maymorestack = flags.DebugFlags.MayMoreStack + ctxt.Debugpcln = flags.DebugFlags.PCTab + ctxt.IsAsm = true + ctxt.Pkgpath = *flags.Importpath + switch *flags.Spectre { + default: + log.Printf("unknown setting -spectre=%s", *flags.Spectre) + os.Exit(2) + case "": + // nothing + case "index": + // known to compiler; ignore here so people can use + // the same list with -gcflags=-spectre=LIST and -asmflags=-spectrre=LIST + case "all", "ret": + ctxt.Retpoline = true + } + + ctxt.Bso = bufio.NewWriter(os.Stdout) + defer ctxt.Bso.Flush() + + architecture.Init(ctxt) + + // Create object file, write header. + buf, err := bio.Create(*flags.OutputFile) + if err != nil { + log.Fatal(err) + } + defer buf.Close() + + if !*flags.SymABIs { + buf.WriteString(objabi.HeaderString()) + fmt.Fprintf(buf, "!\n") + } + + // Set macros for GOEXPERIMENTs so we can easily switch + // runtime assembly code based on them. + if objabi.LookupPkgSpecial(ctxt.Pkgpath).AllowAsmABI { + for _, exp := range buildcfg.Experiment.Enabled() { + flags.D = append(flags.D, "GOEXPERIMENT_"+exp) + } + } + + var ok, diag bool + var failedFile string + for _, f := range flag.Args() { + lexer := lex.NewLexer(f) + parser := asm.NewParser(ctxt, architecture, lexer) + ctxt.DiagFunc = func(format string, args ...interface{}) { + diag = true + log.Printf(format, args...) + } + if *flags.SymABIs { + ok = parser.ParseSymABIs(buf) + } else { + pList := new(obj.Plist) + pList.Firstpc, ok = parser.Parse() + // reports errors to parser.Errorf + if ok { + obj.Flushplist(ctxt, pList, nil) + } + } + if !ok { + failedFile = f + break + } + } + if ok && !*flags.SymABIs { + ctxt.NumberSyms() + obj.WriteObjFile(ctxt, buf) + } + if !ok || diag { + if failedFile != "" { + log.Printf("assembly of %s failed", failedFile) + } else { + log.Print("assembly failed") + } + buf.Close() + os.Remove(*flags.OutputFile) + os.Exit(1) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/buildid/buildid.go b/platform/dbops/binaries/go/go/src/cmd/buildid/buildid.go new file mode 100644 index 0000000000000000000000000000000000000000..72ad80dbbba02f094aaac43d1e924c5fdf9b39f9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/buildid/buildid.go @@ -0,0 +1,80 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "flag" + "fmt" + "log" + "os" + "strings" + + "cmd/internal/buildid" +) + +func usage() { + fmt.Fprintf(os.Stderr, "usage: go tool buildid [-w] file\n") + flag.PrintDefaults() + os.Exit(2) +} + +var wflag = flag.Bool("w", false, "write build ID") + +func main() { + log.SetPrefix("buildid: ") + log.SetFlags(0) + flag.Usage = usage + flag.Parse() + if flag.NArg() != 1 { + usage() + } + + file := flag.Arg(0) + id, err := buildid.ReadFile(file) + if err != nil { + log.Fatal(err) + } + if !*wflag { + fmt.Printf("%s\n", id) + return + } + + // Keep in sync with src/cmd/go/internal/work/buildid.go:updateBuildID + + f, err := os.Open(file) + if err != nil { + log.Fatal(err) + } + matches, hash, err := buildid.FindAndHash(f, id, 0) + f.Close() + if err != nil { + log.Fatal(err) + } + + // <= go 1.7 doesn't embed the contentID or actionID, so no slash is present + if !strings.Contains(id, "/") { + log.Fatalf("%s: build ID is a legacy format...binary too old for this tool", file) + } + + newID := id[:strings.LastIndex(id, "/")] + "/" + buildid.HashToString(hash) + if len(newID) != len(id) { + log.Fatalf("%s: build ID length mismatch %q vs %q", file, id, newID) + } + + if len(matches) == 0 { + return + } + + f, err = os.OpenFile(file, os.O_RDWR, 0) + if err != nil { + log.Fatal(err) + } + if err := buildid.Rewrite(f, matches, newID); err != nil { + log.Fatal(err) + } + if err := f.Close(); err != nil { + log.Fatal(err) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/buildid/doc.go b/platform/dbops/binaries/go/go/src/cmd/buildid/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..a554d798c062bb070d648783fbf2afb28ec6c269 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/buildid/doc.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Buildid displays or updates the build ID stored in a Go package or binary. + +Usage: + + go tool buildid [-w] file + +By default, buildid prints the build ID found in the named file. +If the -w option is given, buildid rewrites the build ID found in +the file to accurately record a content hash of the file. + +This tool is only intended for use by the go command or +other build systems. +*/ +package main diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/ast.go b/platform/dbops/binaries/go/go/src/cmd/cgo/ast.go new file mode 100644 index 0000000000000000000000000000000000000000..3cbbeafdca87358d6ac8a9259c058372069b67cb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/ast.go @@ -0,0 +1,577 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Parse input AST and prepare Prog structure. + +package main + +import ( + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/scanner" + "go/token" + "os" + "strings" +) + +func parse(name string, src []byte, flags parser.Mode) *ast.File { + ast1, err := parser.ParseFile(fset, name, src, flags) + if err != nil { + if list, ok := err.(scanner.ErrorList); ok { + // If err is a scanner.ErrorList, its String will print just + // the first error and then (+n more errors). + // Instead, turn it into a new Error that will return + // details for all the errors. + for _, e := range list { + fmt.Fprintln(os.Stderr, e) + } + os.Exit(2) + } + fatalf("parsing %s: %s", name, err) + } + return ast1 +} + +func sourceLine(n ast.Node) int { + return fset.Position(n.Pos()).Line +} + +// ParseGo populates f with information learned from the Go source code +// which was read from the named file. It gathers the C preamble +// attached to the import "C" comment, a list of references to C.xxx, +// a list of exported functions, and the actual AST, to be rewritten and +// printed. +func (f *File) ParseGo(abspath string, src []byte) { + // Two different parses: once with comments, once without. + // The printer is not good enough at printing comments in the + // right place when we start editing the AST behind its back, + // so we use ast1 to look for the doc comments on import "C" + // and on exported functions, and we use ast2 for translating + // and reprinting. + // In cgo mode, we ignore ast2 and just apply edits directly + // the text behind ast1. In godefs mode we modify and print ast2. + ast1 := parse(abspath, src, parser.SkipObjectResolution|parser.ParseComments) + ast2 := parse(abspath, src, parser.SkipObjectResolution) + + f.Package = ast1.Name.Name + f.Name = make(map[string]*Name) + f.NamePos = make(map[*Name]token.Pos) + + // In ast1, find the import "C" line and get any extra C preamble. + sawC := false + for _, decl := range ast1.Decls { + switch decl := decl.(type) { + case *ast.GenDecl: + for _, spec := range decl.Specs { + s, ok := spec.(*ast.ImportSpec) + if !ok || s.Path.Value != `"C"` { + continue + } + sawC = true + if s.Name != nil { + error_(s.Path.Pos(), `cannot rename import "C"`) + } + cg := s.Doc + if cg == nil && len(decl.Specs) == 1 { + cg = decl.Doc + } + if cg != nil { + if strings.ContainsAny(abspath, "\r\n") { + // This should have been checked when the file path was first resolved, + // but we double check here just to be sure. + fatalf("internal error: ParseGo: abspath contains unexpected newline character: %q", abspath) + } + f.Preamble += fmt.Sprintf("#line %d %q\n", sourceLine(cg), abspath) + f.Preamble += commentText(cg) + "\n" + f.Preamble += "#line 1 \"cgo-generated-wrapper\"\n" + } + } + + case *ast.FuncDecl: + // Also, reject attempts to declare methods on C.T or *C.T. + // (The generated code would otherwise accept this + // invalid input; see issue #57926.) + if decl.Recv != nil && len(decl.Recv.List) > 0 { + recvType := decl.Recv.List[0].Type + if recvType != nil { + t := recvType + if star, ok := unparen(t).(*ast.StarExpr); ok { + t = star.X + } + if sel, ok := unparen(t).(*ast.SelectorExpr); ok { + var buf strings.Builder + format.Node(&buf, fset, recvType) + error_(sel.Pos(), `cannot define new methods on non-local type %s`, &buf) + } + } + } + } + + } + if !sawC { + error_(ast1.Package, `cannot find import "C"`) + } + + // In ast2, strip the import "C" line. + if *godefs { + w := 0 + for _, decl := range ast2.Decls { + d, ok := decl.(*ast.GenDecl) + if !ok { + ast2.Decls[w] = decl + w++ + continue + } + ws := 0 + for _, spec := range d.Specs { + s, ok := spec.(*ast.ImportSpec) + if !ok || s.Path.Value != `"C"` { + d.Specs[ws] = spec + ws++ + } + } + if ws == 0 { + continue + } + d.Specs = d.Specs[0:ws] + ast2.Decls[w] = d + w++ + } + ast2.Decls = ast2.Decls[0:w] + } else { + for _, decl := range ast2.Decls { + d, ok := decl.(*ast.GenDecl) + if !ok { + continue + } + for _, spec := range d.Specs { + if s, ok := spec.(*ast.ImportSpec); ok && s.Path.Value == `"C"` { + // Replace "C" with _ "unsafe", to keep program valid. + // (Deleting import statement or clause is not safe if it is followed + // in the source by an explicit semicolon.) + f.Edit.Replace(f.offset(s.Path.Pos()), f.offset(s.Path.End()), `_ "unsafe"`) + } + } + } + } + + // Accumulate pointers to uses of C.x. + if f.Ref == nil { + f.Ref = make([]*Ref, 0, 8) + } + f.walk(ast2, ctxProg, (*File).validateIdents) + f.walk(ast2, ctxProg, (*File).saveExprs) + + // Accumulate exported functions. + // The comments are only on ast1 but we need to + // save the function bodies from ast2. + // The first walk fills in ExpFunc, and the + // second walk changes the entries to + // refer to ast2 instead. + f.walk(ast1, ctxProg, (*File).saveExport) + f.walk(ast2, ctxProg, (*File).saveExport2) + + f.Comments = ast1.Comments + f.AST = ast2 +} + +// Like ast.CommentGroup's Text method but preserves +// leading blank lines, so that line numbers line up. +func commentText(g *ast.CommentGroup) string { + var pieces []string + for _, com := range g.List { + c := com.Text + // Remove comment markers. + // The parser has given us exactly the comment text. + switch c[1] { + case '/': + //-style comment (no newline at the end) + c = c[2:] + "\n" + case '*': + /*-style comment */ + c = c[2 : len(c)-2] + } + pieces = append(pieces, c) + } + return strings.Join(pieces, "") +} + +func (f *File) validateIdents(x interface{}, context astContext) { + if x, ok := x.(*ast.Ident); ok { + if f.isMangledName(x.Name) { + error_(x.Pos(), "identifier %q may conflict with identifiers generated by cgo", x.Name) + } + } +} + +// Save various references we are going to need later. +func (f *File) saveExprs(x interface{}, context astContext) { + switch x := x.(type) { + case *ast.Expr: + switch (*x).(type) { + case *ast.SelectorExpr: + f.saveRef(x, context) + } + case *ast.CallExpr: + f.saveCall(x, context) + } +} + +// Save references to C.xxx for later processing. +func (f *File) saveRef(n *ast.Expr, context astContext) { + sel := (*n).(*ast.SelectorExpr) + // For now, assume that the only instance of capital C is when + // used as the imported package identifier. + // The parser should take care of scoping in the future, so + // that we will be able to distinguish a "top-level C" from a + // local C. + if l, ok := sel.X.(*ast.Ident); !ok || l.Name != "C" { + return + } + if context == ctxAssign2 { + context = ctxExpr + } + if context == ctxEmbedType { + error_(sel.Pos(), "cannot embed C type") + } + goname := sel.Sel.Name + if goname == "errno" { + error_(sel.Pos(), "cannot refer to errno directly; see documentation") + return + } + if goname == "_CMalloc" { + error_(sel.Pos(), "cannot refer to C._CMalloc; use C.malloc") + return + } + if goname == "malloc" { + goname = "_CMalloc" + } + name := f.Name[goname] + if name == nil { + name = &Name{ + Go: goname, + } + f.Name[goname] = name + f.NamePos[name] = sel.Pos() + } + f.Ref = append(f.Ref, &Ref{ + Name: name, + Expr: n, + Context: context, + }) +} + +// Save calls to C.xxx for later processing. +func (f *File) saveCall(call *ast.CallExpr, context astContext) { + sel, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + return + } + if l, ok := sel.X.(*ast.Ident); !ok || l.Name != "C" { + return + } + c := &Call{Call: call, Deferred: context == ctxDefer} + f.Calls = append(f.Calls, c) +} + +// If a function should be exported add it to ExpFunc. +func (f *File) saveExport(x interface{}, context astContext) { + n, ok := x.(*ast.FuncDecl) + if !ok { + return + } + + if n.Doc == nil { + return + } + for _, c := range n.Doc.List { + if !strings.HasPrefix(c.Text, "//export ") { + continue + } + + name := strings.TrimSpace(c.Text[9:]) + if name == "" { + error_(c.Pos(), "export missing name") + } + + if name != n.Name.Name { + error_(c.Pos(), "export comment has wrong name %q, want %q", name, n.Name.Name) + } + + doc := "" + for _, c1 := range n.Doc.List { + if c1 != c { + doc += c1.Text + "\n" + } + } + + f.ExpFunc = append(f.ExpFunc, &ExpFunc{ + Func: n, + ExpName: name, + Doc: doc, + }) + break + } +} + +// Make f.ExpFunc[i] point at the Func from this AST instead of the other one. +func (f *File) saveExport2(x interface{}, context astContext) { + n, ok := x.(*ast.FuncDecl) + if !ok { + return + } + + for _, exp := range f.ExpFunc { + if exp.Func.Name.Name == n.Name.Name { + exp.Func = n + break + } + } +} + +type astContext int + +const ( + ctxProg astContext = iota + ctxEmbedType + ctxType + ctxStmt + ctxExpr + ctxField + ctxParam + ctxAssign2 // assignment of a single expression to two variables + ctxSwitch + ctxTypeSwitch + ctxFile + ctxDecl + ctxSpec + ctxDefer + ctxCall // any function call other than ctxCall2 + ctxCall2 // function call whose result is assigned to two variables + ctxSelector +) + +// walk walks the AST x, calling visit(f, x, context) for each node. +func (f *File) walk(x interface{}, context astContext, visit func(*File, interface{}, astContext)) { + visit(f, x, context) + switch n := x.(type) { + case *ast.Expr: + f.walk(*n, context, visit) + + // everything else just recurs + default: + f.walkUnexpected(x, context, visit) + + case nil: + + // These are ordered and grouped to match ../../go/ast/ast.go + case *ast.Field: + if len(n.Names) == 0 && context == ctxField { + f.walk(&n.Type, ctxEmbedType, visit) + } else { + f.walk(&n.Type, ctxType, visit) + } + case *ast.FieldList: + for _, field := range n.List { + f.walk(field, context, visit) + } + case *ast.BadExpr: + case *ast.Ident: + case *ast.Ellipsis: + f.walk(&n.Elt, ctxType, visit) + case *ast.BasicLit: + case *ast.FuncLit: + f.walk(n.Type, ctxType, visit) + f.walk(n.Body, ctxStmt, visit) + case *ast.CompositeLit: + f.walk(&n.Type, ctxType, visit) + f.walk(n.Elts, ctxExpr, visit) + case *ast.ParenExpr: + f.walk(&n.X, context, visit) + case *ast.SelectorExpr: + f.walk(&n.X, ctxSelector, visit) + case *ast.IndexExpr: + f.walk(&n.X, ctxExpr, visit) + f.walk(&n.Index, ctxExpr, visit) + case *ast.SliceExpr: + f.walk(&n.X, ctxExpr, visit) + if n.Low != nil { + f.walk(&n.Low, ctxExpr, visit) + } + if n.High != nil { + f.walk(&n.High, ctxExpr, visit) + } + if n.Max != nil { + f.walk(&n.Max, ctxExpr, visit) + } + case *ast.TypeAssertExpr: + f.walk(&n.X, ctxExpr, visit) + f.walk(&n.Type, ctxType, visit) + case *ast.CallExpr: + if context == ctxAssign2 { + f.walk(&n.Fun, ctxCall2, visit) + } else { + f.walk(&n.Fun, ctxCall, visit) + } + f.walk(n.Args, ctxExpr, visit) + case *ast.StarExpr: + f.walk(&n.X, context, visit) + case *ast.UnaryExpr: + f.walk(&n.X, ctxExpr, visit) + case *ast.BinaryExpr: + f.walk(&n.X, ctxExpr, visit) + f.walk(&n.Y, ctxExpr, visit) + case *ast.KeyValueExpr: + f.walk(&n.Key, ctxExpr, visit) + f.walk(&n.Value, ctxExpr, visit) + + case *ast.ArrayType: + f.walk(&n.Len, ctxExpr, visit) + f.walk(&n.Elt, ctxType, visit) + case *ast.StructType: + f.walk(n.Fields, ctxField, visit) + case *ast.FuncType: + if tparams := funcTypeTypeParams(n); tparams != nil { + f.walk(tparams, ctxParam, visit) + } + f.walk(n.Params, ctxParam, visit) + if n.Results != nil { + f.walk(n.Results, ctxParam, visit) + } + case *ast.InterfaceType: + f.walk(n.Methods, ctxField, visit) + case *ast.MapType: + f.walk(&n.Key, ctxType, visit) + f.walk(&n.Value, ctxType, visit) + case *ast.ChanType: + f.walk(&n.Value, ctxType, visit) + + case *ast.BadStmt: + case *ast.DeclStmt: + f.walk(n.Decl, ctxDecl, visit) + case *ast.EmptyStmt: + case *ast.LabeledStmt: + f.walk(n.Stmt, ctxStmt, visit) + case *ast.ExprStmt: + f.walk(&n.X, ctxExpr, visit) + case *ast.SendStmt: + f.walk(&n.Chan, ctxExpr, visit) + f.walk(&n.Value, ctxExpr, visit) + case *ast.IncDecStmt: + f.walk(&n.X, ctxExpr, visit) + case *ast.AssignStmt: + f.walk(n.Lhs, ctxExpr, visit) + if len(n.Lhs) == 2 && len(n.Rhs) == 1 { + f.walk(n.Rhs, ctxAssign2, visit) + } else { + f.walk(n.Rhs, ctxExpr, visit) + } + case *ast.GoStmt: + f.walk(n.Call, ctxExpr, visit) + case *ast.DeferStmt: + f.walk(n.Call, ctxDefer, visit) + case *ast.ReturnStmt: + f.walk(n.Results, ctxExpr, visit) + case *ast.BranchStmt: + case *ast.BlockStmt: + f.walk(n.List, context, visit) + case *ast.IfStmt: + f.walk(n.Init, ctxStmt, visit) + f.walk(&n.Cond, ctxExpr, visit) + f.walk(n.Body, ctxStmt, visit) + f.walk(n.Else, ctxStmt, visit) + case *ast.CaseClause: + if context == ctxTypeSwitch { + context = ctxType + } else { + context = ctxExpr + } + f.walk(n.List, context, visit) + f.walk(n.Body, ctxStmt, visit) + case *ast.SwitchStmt: + f.walk(n.Init, ctxStmt, visit) + f.walk(&n.Tag, ctxExpr, visit) + f.walk(n.Body, ctxSwitch, visit) + case *ast.TypeSwitchStmt: + f.walk(n.Init, ctxStmt, visit) + f.walk(n.Assign, ctxStmt, visit) + f.walk(n.Body, ctxTypeSwitch, visit) + case *ast.CommClause: + f.walk(n.Comm, ctxStmt, visit) + f.walk(n.Body, ctxStmt, visit) + case *ast.SelectStmt: + f.walk(n.Body, ctxStmt, visit) + case *ast.ForStmt: + f.walk(n.Init, ctxStmt, visit) + f.walk(&n.Cond, ctxExpr, visit) + f.walk(n.Post, ctxStmt, visit) + f.walk(n.Body, ctxStmt, visit) + case *ast.RangeStmt: + f.walk(&n.Key, ctxExpr, visit) + f.walk(&n.Value, ctxExpr, visit) + f.walk(&n.X, ctxExpr, visit) + f.walk(n.Body, ctxStmt, visit) + + case *ast.ImportSpec: + case *ast.ValueSpec: + f.walk(&n.Type, ctxType, visit) + if len(n.Names) == 2 && len(n.Values) == 1 { + f.walk(&n.Values[0], ctxAssign2, visit) + } else { + f.walk(n.Values, ctxExpr, visit) + } + case *ast.TypeSpec: + if tparams := typeSpecTypeParams(n); tparams != nil { + f.walk(tparams, ctxParam, visit) + } + f.walk(&n.Type, ctxType, visit) + + case *ast.BadDecl: + case *ast.GenDecl: + f.walk(n.Specs, ctxSpec, visit) + case *ast.FuncDecl: + if n.Recv != nil { + f.walk(n.Recv, ctxParam, visit) + } + f.walk(n.Type, ctxType, visit) + if n.Body != nil { + f.walk(n.Body, ctxStmt, visit) + } + + case *ast.File: + f.walk(n.Decls, ctxDecl, visit) + + case *ast.Package: + for _, file := range n.Files { + f.walk(file, ctxFile, visit) + } + + case []ast.Decl: + for _, d := range n { + f.walk(d, context, visit) + } + case []ast.Expr: + for i := range n { + f.walk(&n[i], context, visit) + } + case []ast.Stmt: + for _, s := range n { + f.walk(s, context, visit) + } + case []ast.Spec: + for _, s := range n { + f.walk(s, context, visit) + } + } +} + +// If x is of the form (T), unparen returns unparen(T), otherwise it returns x. +func unparen(x ast.Expr) ast.Expr { + if p, isParen := x.(*ast.ParenExpr); isParen { + x = unparen(p.X) + } + return x +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/ast_go1.go b/platform/dbops/binaries/go/go/src/cmd/cgo/ast_go1.go new file mode 100644 index 0000000000000000000000000000000000000000..2f65f0f718356fd81210f441eef57c349a680f43 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/ast_go1.go @@ -0,0 +1,25 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build compiler_bootstrap + +package main + +import ( + "go/ast" + "go/token" +) + +func (f *File) walkUnexpected(x interface{}, context astContext, visit func(*File, interface{}, astContext)) { + error_(token.NoPos, "unexpected type %T in walk", x) + panic("unexpected type") +} + +func funcTypeTypeParams(n *ast.FuncType) *ast.FieldList { + return nil +} + +func typeSpecTypeParams(n *ast.TypeSpec) *ast.FieldList { + return nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/ast_go118.go b/platform/dbops/binaries/go/go/src/cmd/cgo/ast_go118.go new file mode 100644 index 0000000000000000000000000000000000000000..ced30728dc9a798b63f4c15608ca914b993626db --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/ast_go118.go @@ -0,0 +1,32 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !compiler_bootstrap + +package main + +import ( + "go/ast" + "go/token" +) + +func (f *File) walkUnexpected(x interface{}, context astContext, visit func(*File, interface{}, astContext)) { + switch n := x.(type) { + default: + error_(token.NoPos, "unexpected type %T in walk", x) + panic("unexpected type") + + case *ast.IndexListExpr: + f.walk(&n.X, ctxExpr, visit) + f.walk(n.Indices, ctxExpr, visit) + } +} + +func funcTypeTypeParams(n *ast.FuncType) *ast.FieldList { + return n.TypeParams +} + +func typeSpecTypeParams(n *ast.TypeSpec) *ast.FieldList { + return n.TypeParams +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/doc.go b/platform/dbops/binaries/go/go/src/cmd/cgo/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..c2e375165c6731903225624d293a9beb52e4ee95 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/doc.go @@ -0,0 +1,1064 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Cgo enables the creation of Go packages that call C code. + +# Using cgo with the go command + +To use cgo write normal Go code that imports a pseudo-package "C". +The Go code can then refer to types such as C.size_t, variables such +as C.stdout, or functions such as C.putchar. + +If the import of "C" is immediately preceded by a comment, that +comment, called the preamble, is used as a header when compiling +the C parts of the package. For example: + + // #include + // #include + import "C" + +The preamble may contain any C code, including function and variable +declarations and definitions. These may then be referred to from Go +code as though they were defined in the package "C". All names +declared in the preamble may be used, even if they start with a +lower-case letter. Exception: static variables in the preamble may +not be referenced from Go code; static functions are permitted. + +See $GOROOT/cmd/cgo/internal/teststdio and $GOROOT/misc/cgo/gmp for examples. See +"C? Go? Cgo!" for an introduction to using cgo: +https://golang.org/doc/articles/c_go_cgo.html. + +CFLAGS, CPPFLAGS, CXXFLAGS, FFLAGS and LDFLAGS may be defined with pseudo +#cgo directives within these comments to tweak the behavior of the C, C++ +or Fortran compiler. Values defined in multiple directives are concatenated +together. The directive can include a list of build constraints limiting its +effect to systems satisfying one of the constraints +(see https://golang.org/pkg/go/build/#hdr-Build_Constraints for details about the constraint syntax). +For example: + + // #cgo CFLAGS: -DPNG_DEBUG=1 + // #cgo amd64 386 CFLAGS: -DX86=1 + // #cgo LDFLAGS: -lpng + // #include + import "C" + +Alternatively, CPPFLAGS and LDFLAGS may be obtained via the pkg-config tool +using a '#cgo pkg-config:' directive followed by the package names. +For example: + + // #cgo pkg-config: png cairo + // #include + import "C" + +The default pkg-config tool may be changed by setting the PKG_CONFIG environment variable. + +For security reasons, only a limited set of flags are allowed, notably -D, -U, -I, and -l. +To allow additional flags, set CGO_CFLAGS_ALLOW to a regular expression +matching the new flags. To disallow flags that would otherwise be allowed, +set CGO_CFLAGS_DISALLOW to a regular expression matching arguments +that must be disallowed. In both cases the regular expression must match +a full argument: to allow -mfoo=bar, use CGO_CFLAGS_ALLOW='-mfoo.*', +not just CGO_CFLAGS_ALLOW='-mfoo'. Similarly named variables control +the allowed CPPFLAGS, CXXFLAGS, FFLAGS, and LDFLAGS. + +Also for security reasons, only a limited set of characters are +permitted, notably alphanumeric characters and a few symbols, such as +'.', that will not be interpreted in unexpected ways. Attempts to use +forbidden characters will get a "malformed #cgo argument" error. + +When building, the CGO_CFLAGS, CGO_CPPFLAGS, CGO_CXXFLAGS, CGO_FFLAGS and +CGO_LDFLAGS environment variables are added to the flags derived from +these directives. Package-specific flags should be set using the +directives, not the environment variables, so that builds work in +unmodified environments. Flags obtained from environment variables +are not subject to the security limitations described above. + +All the cgo CPPFLAGS and CFLAGS directives in a package are concatenated and +used to compile C files in that package. All the CPPFLAGS and CXXFLAGS +directives in a package are concatenated and used to compile C++ files in that +package. All the CPPFLAGS and FFLAGS directives in a package are concatenated +and used to compile Fortran files in that package. All the LDFLAGS directives +in any package in the program are concatenated and used at link time. All the +pkg-config directives are concatenated and sent to pkg-config simultaneously +to add to each appropriate set of command-line flags. + +When the cgo directives are parsed, any occurrence of the string ${SRCDIR} +will be replaced by the absolute path to the directory containing the source +file. This allows pre-compiled static libraries to be included in the package +directory and linked properly. +For example if package foo is in the directory /go/src/foo: + + // #cgo LDFLAGS: -L${SRCDIR}/libs -lfoo + +Will be expanded to: + + // #cgo LDFLAGS: -L/go/src/foo/libs -lfoo + +When the Go tool sees that one or more Go files use the special import +"C", it will look for other non-Go files in the directory and compile +them as part of the Go package. Any .c, .s, .S or .sx files will be +compiled with the C compiler. Any .cc, .cpp, or .cxx files will be +compiled with the C++ compiler. Any .f, .F, .for or .f90 files will be +compiled with the fortran compiler. Any .h, .hh, .hpp, or .hxx files will +not be compiled separately, but, if these header files are changed, +the package (including its non-Go source files) will be recompiled. +Note that changes to files in other directories do not cause the package +to be recompiled, so all non-Go source code for the package should be +stored in the package directory, not in subdirectories. +The default C and C++ compilers may be changed by the CC and CXX +environment variables, respectively; those environment variables +may include command line options. + +The cgo tool will always invoke the C compiler with the source file's +directory in the include path; i.e. -I${SRCDIR} is always implied. This +means that if a header file foo/bar.h exists both in the source +directory and also in the system include directory (or some other place +specified by a -I flag), then "#include " will always find the +local version in preference to any other version. + +The cgo tool is enabled by default for native builds on systems where +it is expected to work. It is disabled by default when cross-compiling +as well as when the CC environment variable is unset and the default +C compiler (typically gcc or clang) cannot be found on the system PATH. +You can override the default by setting the CGO_ENABLED +environment variable when running the go tool: set it to 1 to enable +the use of cgo, and to 0 to disable it. The go tool will set the +build constraint "cgo" if cgo is enabled. The special import "C" +implies the "cgo" build constraint, as though the file also said +"//go:build cgo". Therefore, if cgo is disabled, files that import +"C" will not be built by the go tool. (For more about build constraints +see https://golang.org/pkg/go/build/#hdr-Build_Constraints). + +When cross-compiling, you must specify a C cross-compiler for cgo to +use. You can do this by setting the generic CC_FOR_TARGET or the +more specific CC_FOR_${GOOS}_${GOARCH} (for example, CC_FOR_linux_arm) +environment variable when building the toolchain using make.bash, +or you can set the CC environment variable any time you run the go tool. + +The CXX_FOR_TARGET, CXX_FOR_${GOOS}_${GOARCH}, and CXX +environment variables work in a similar way for C++ code. + +# Go references to C + +Within the Go file, C's struct field names that are keywords in Go +can be accessed by prefixing them with an underscore: if x points at a C +struct with a field named "type", x._type accesses the field. +C struct fields that cannot be expressed in Go, such as bit fields +or misaligned data, are omitted in the Go struct, replaced by +appropriate padding to reach the next field or the end of the struct. + +The standard C numeric types are available under the names +C.char, C.schar (signed char), C.uchar (unsigned char), +C.short, C.ushort (unsigned short), C.int, C.uint (unsigned int), +C.long, C.ulong (unsigned long), C.longlong (long long), +C.ulonglong (unsigned long long), C.float, C.double, +C.complexfloat (complex float), and C.complexdouble (complex double). +The C type void* is represented by Go's unsafe.Pointer. +The C types __int128_t and __uint128_t are represented by [16]byte. + +A few special C types which would normally be represented by a pointer +type in Go are instead represented by a uintptr. See the Special +cases section below. + +To access a struct, union, or enum type directly, prefix it with +struct_, union_, or enum_, as in C.struct_stat. + +The size of any C type T is available as C.sizeof_T, as in +C.sizeof_struct_stat. + +A C function may be declared in the Go file with a parameter type of +the special name _GoString_. This function may be called with an +ordinary Go string value. The string length, and a pointer to the +string contents, may be accessed by calling the C functions + + size_t _GoStringLen(_GoString_ s); + const char *_GoStringPtr(_GoString_ s); + +These functions are only available in the preamble, not in other C +files. The C code must not modify the contents of the pointer returned +by _GoStringPtr. Note that the string contents may not have a trailing +NUL byte. + +As Go doesn't have support for C's union type in the general case, +C's union types are represented as a Go byte array with the same length. + +Go structs cannot embed fields with C types. + +Go code cannot refer to zero-sized fields that occur at the end of +non-empty C structs. To get the address of such a field (which is the +only operation you can do with a zero-sized field) you must take the +address of the struct and add the size of the struct. + +Cgo translates C types into equivalent unexported Go types. +Because the translations are unexported, a Go package should not +expose C types in its exported API: a C type used in one Go package +is different from the same C type used in another. + +Any C function (even void functions) may be called in a multiple +assignment context to retrieve both the return value (if any) and the +C errno variable as an error (use _ to skip the result value if the +function returns void). For example: + + n, err = C.sqrt(-1) + _, err := C.voidFunc() + var n, err = C.sqrt(1) + +Calling C function pointers is currently not supported, however you can +declare Go variables which hold C function pointers and pass them +back and forth between Go and C. C code may call function pointers +received from Go. For example: + + package main + + // typedef int (*intFunc) (); + // + // int + // bridge_int_func(intFunc f) + // { + // return f(); + // } + // + // int fortytwo() + // { + // return 42; + // } + import "C" + import "fmt" + + func main() { + f := C.intFunc(C.fortytwo) + fmt.Println(int(C.bridge_int_func(f))) + // Output: 42 + } + +In C, a function argument written as a fixed size array +actually requires a pointer to the first element of the array. +C compilers are aware of this calling convention and adjust +the call accordingly, but Go cannot. In Go, you must pass +the pointer to the first element explicitly: C.f(&C.x[0]). + +Calling variadic C functions is not supported. It is possible to +circumvent this by using a C function wrapper. For example: + + package main + + // #include + // #include + // + // static void myprint(char* s) { + // printf("%s\n", s); + // } + import "C" + import "unsafe" + + func main() { + cs := C.CString("Hello from stdio") + C.myprint(cs) + C.free(unsafe.Pointer(cs)) + } + +A few special functions convert between Go and C types +by making copies of the data. In pseudo-Go definitions: + + // Go string to C string + // The C string is allocated in the C heap using malloc. + // It is the caller's responsibility to arrange for it to be + // freed, such as by calling C.free (be sure to include stdlib.h + // if C.free is needed). + func C.CString(string) *C.char + + // Go []byte slice to C array + // The C array is allocated in the C heap using malloc. + // It is the caller's responsibility to arrange for it to be + // freed, such as by calling C.free (be sure to include stdlib.h + // if C.free is needed). + func C.CBytes([]byte) unsafe.Pointer + + // C string to Go string + func C.GoString(*C.char) string + + // C data with explicit length to Go string + func C.GoStringN(*C.char, C.int) string + + // C data with explicit length to Go []byte + func C.GoBytes(unsafe.Pointer, C.int) []byte + +As a special case, C.malloc does not call the C library malloc directly +but instead calls a Go helper function that wraps the C library malloc +but guarantees never to return nil. If C's malloc indicates out of memory, +the helper function crashes the program, like when Go itself runs out +of memory. Because C.malloc cannot fail, it has no two-result form +that returns errno. + +# C references to Go + +Go functions can be exported for use by C code in the following way: + + //export MyFunction + func MyFunction(arg1, arg2 int, arg3 string) int64 {...} + + //export MyFunction2 + func MyFunction2(arg1, arg2 int, arg3 string) (int64, *C.char) {...} + +They will be available in the C code as: + + extern GoInt64 MyFunction(int arg1, int arg2, GoString arg3); + extern struct MyFunction2_return MyFunction2(int arg1, int arg2, GoString arg3); + +found in the _cgo_export.h generated header, after any preambles +copied from the cgo input files. Functions with multiple +return values are mapped to functions returning a struct. + +Not all Go types can be mapped to C types in a useful way. +Go struct types are not supported; use a C struct type. +Go array types are not supported; use a C pointer. + +Go functions that take arguments of type string may be called with the +C type _GoString_, described above. The _GoString_ type will be +automatically defined in the preamble. Note that there is no way for C +code to create a value of this type; this is only useful for passing +string values from Go to C and back to Go. + +Using //export in a file places a restriction on the preamble: +since it is copied into two different C output files, it must not +contain any definitions, only declarations. If a file contains both +definitions and declarations, then the two output files will produce +duplicate symbols and the linker will fail. To avoid this, definitions +must be placed in preambles in other files, or in C source files. + +# Passing pointers + +Go is a garbage collected language, and the garbage collector needs to +know the location of every pointer to Go memory. Because of this, +there are restrictions on passing pointers between Go and C. + +In this section the term Go pointer means a pointer to memory +allocated by Go (such as by using the & operator or calling the +predefined new function) and the term C pointer means a pointer to +memory allocated by C (such as by a call to C.malloc). Whether a +pointer is a Go pointer or a C pointer is a dynamic property +determined by how the memory was allocated; it has nothing to do with +the type of the pointer. + +Note that values of some Go types, other than the type's zero value, +always include Go pointers. This is true of string, slice, interface, +channel, map, and function types. A pointer type may hold a Go pointer +or a C pointer. Array and struct types may or may not include Go +pointers, depending on the element types. All the discussion below +about Go pointers applies not just to pointer types, but also to other +types that include Go pointers. + +All Go pointers passed to C must point to pinned Go memory. Go pointers +passed as function arguments to C functions have the memory they point to +implicitly pinned for the duration of the call. Go memory reachable from +these function arguments must be pinned as long as the C code has access +to it. Whether Go memory is pinned is a dynamic property of that memory +region; it has nothing to do with the type of the pointer. + +Go values created by calling new, by taking the address of a composite +literal, or by taking the address of a local variable may also have their +memory pinned using [runtime.Pinner]. This type may be used to manage +the duration of the memory's pinned status, potentially beyond the +duration of a C function call. Memory may be pinned more than once and +must be unpinned exactly the same number of times it has been pinned. + +Go code may pass a Go pointer to C provided the memory to which it +points does not contain any Go pointers to memory that is unpinned. When +passing a pointer to a field in a struct, the Go memory in question is +the memory occupied by the field, not the entire struct. When passing a +pointer to an element in an array or slice, the Go memory in question is +the entire array or the entire backing array of the slice. + +C code may keep a copy of a Go pointer only as long as the memory it +points to is pinned. + +C code may not keep a copy of a Go pointer after the call returns, +unless the memory it points to is pinned with [runtime.Pinner] and the +Pinner is not unpinned while the Go pointer is stored in C memory. +This implies that C code may not keep a copy of a string, slice, +channel, and so forth, because they cannot be pinned with +[runtime.Pinner]. + +The _GoString_ type also may not be pinned with [runtime.Pinner]. +Because it includes a Go pointer, the memory it points to is only pinned +for the duration of the call; _GoString_ values may not be retained by C +code. + +A Go function called by C code may return a Go pointer to pinned memory +(which implies that it may not return a string, slice, channel, and so +forth). A Go function called by C code may take C pointers as arguments, +and it may store non-pointer data, C pointers, or Go pointers to pinned +memory through those pointers. It may not store a Go pointer to unpinned +memory in memory pointed to by a C pointer (which again, implies that it +may not store a string, slice, channel, and so forth). A Go function +called by C code may take a Go pointer but it must preserve the property +that the Go memory to which it points (and the Go memory to which that +memory points, and so on) is pinned. + +These rules are checked dynamically at runtime. The checking is +controlled by the cgocheck setting of the GODEBUG environment +variable. The default setting is GODEBUG=cgocheck=1, which implements +reasonably cheap dynamic checks. These checks may be disabled +entirely using GODEBUG=cgocheck=0. Complete checking of pointer +handling, at some cost in run time, is available via GODEBUG=cgocheck=2. + +It is possible to defeat this enforcement by using the unsafe package, +and of course there is nothing stopping the C code from doing anything +it likes. However, programs that break these rules are likely to fail +in unexpected and unpredictable ways. + +The runtime/cgo.Handle type can be used to safely pass Go values +between Go and C. See the runtime/cgo package documentation for details. + +Note: the current implementation has a bug. While Go code is permitted +to write nil or a C pointer (but not a Go pointer) to C memory, the +current implementation may sometimes cause a runtime error if the +contents of the C memory appear to be a Go pointer. Therefore, avoid +passing uninitialized C memory to Go code if the Go code is going to +store pointer values in it. Zero out the memory in C before passing it +to Go. + +# Special cases + +A few special C types which would normally be represented by a pointer +type in Go are instead represented by a uintptr. Those include: + +1. The *Ref types on Darwin, rooted at CoreFoundation's CFTypeRef type. + +2. The object types from Java's JNI interface: + + jobject + jclass + jthrowable + jstring + jarray + jbooleanArray + jbyteArray + jcharArray + jshortArray + jintArray + jlongArray + jfloatArray + jdoubleArray + jobjectArray + jweak + +3. The EGLDisplay and EGLConfig types from the EGL API. + +These types are uintptr on the Go side because they would otherwise +confuse the Go garbage collector; they are sometimes not really +pointers but data structures encoded in a pointer type. All operations +on these types must happen in C. The proper constant to initialize an +empty such reference is 0, not nil. + +These special cases were introduced in Go 1.10. For auto-updating code +from Go 1.9 and earlier, use the cftype or jni rewrites in the Go fix tool: + + go tool fix -r cftype + go tool fix -r jni + +It will replace nil with 0 in the appropriate places. + +The EGLDisplay case was introduced in Go 1.12. Use the egl rewrite +to auto-update code from Go 1.11 and earlier: + + go tool fix -r egl + +The EGLConfig case was introduced in Go 1.15. Use the eglconf rewrite +to auto-update code from Go 1.14 and earlier: + + go tool fix -r eglconf + +# Using cgo directly + +Usage: + + go tool cgo [cgo options] [-- compiler options] gofiles... + +Cgo transforms the specified input Go source files into several output +Go and C source files. + +The compiler options are passed through uninterpreted when +invoking the C compiler to compile the C parts of the package. + +The following options are available when running cgo directly: + + -V + Print cgo version and exit. + -debug-define + Debugging option. Print #defines. + -debug-gcc + Debugging option. Trace C compiler execution and output. + -dynimport file + Write list of symbols imported by file. Write to + -dynout argument or to standard output. Used by go + build when building a cgo package. + -dynlinker + Write dynamic linker as part of -dynimport output. + -dynout file + Write -dynimport output to file. + -dynpackage package + Set Go package for -dynimport output. + -exportheader file + If there are any exported functions, write the + generated export declarations to file. + C code can #include this to see the declarations. + -importpath string + The import path for the Go package. Optional; used for + nicer comments in the generated files. + -import_runtime_cgo + If set (which it is by default) import runtime/cgo in + generated output. + -import_syscall + If set (which it is by default) import syscall in + generated output. + -gccgo + Generate output for the gccgo compiler rather than the + gc compiler. + -gccgoprefix prefix + The -fgo-prefix option to be used with gccgo. + -gccgopkgpath path + The -fgo-pkgpath option to be used with gccgo. + -gccgo_define_cgoincomplete + Define cgo.Incomplete locally rather than importing it from + the "runtime/cgo" package. Used for old gccgo versions. + -godefs + Write out input file in Go syntax replacing C package + names with real values. Used to generate files in the + syscall package when bootstrapping a new target. + -objdir directory + Put all generated files in directory. + -srcdir directory +*/ +package main + +/* +Implementation details. + +Cgo provides a way for Go programs to call C code linked into the same +address space. This comment explains the operation of cgo. + +Cgo reads a set of Go source files and looks for statements saying +import "C". If the import has a doc comment, that comment is +taken as literal C code to be used as a preamble to any C code +generated by cgo. A typical preamble #includes necessary definitions: + + // #include + import "C" + +For more details about the usage of cgo, see the documentation +comment at the top of this file. + +Understanding C + +Cgo scans the Go source files that import "C" for uses of that +package, such as C.puts. It collects all such identifiers. The next +step is to determine each kind of name. In C.xxx the xxx might refer +to a type, a function, a constant, or a global variable. Cgo must +decide which. + +The obvious thing for cgo to do is to process the preamble, expanding +#includes and processing the corresponding C code. That would require +a full C parser and type checker that was also aware of any extensions +known to the system compiler (for example, all the GNU C extensions) as +well as the system-specific header locations and system-specific +pre-#defined macros. This is certainly possible to do, but it is an +enormous amount of work. + +Cgo takes a different approach. It determines the meaning of C +identifiers not by parsing C code but by feeding carefully constructed +programs into the system C compiler and interpreting the generated +error messages, debug information, and object files. In practice, +parsing these is significantly less work and more robust than parsing +C source. + +Cgo first invokes gcc -E -dM on the preamble, in order to find out +about simple #defines for constants and the like. These are recorded +for later use. + +Next, cgo needs to identify the kinds for each identifier. For the +identifiers C.foo, cgo generates this C program: + + + #line 1 "not-declared" + void __cgo_f_1_1(void) { __typeof__(foo) *__cgo_undefined__1; } + #line 1 "not-type" + void __cgo_f_1_2(void) { foo *__cgo_undefined__2; } + #line 1 "not-int-const" + void __cgo_f_1_3(void) { enum { __cgo_undefined__3 = (foo)*1 }; } + #line 1 "not-num-const" + void __cgo_f_1_4(void) { static const double __cgo_undefined__4 = (foo); } + #line 1 "not-str-lit" + void __cgo_f_1_5(void) { static const char __cgo_undefined__5[] = (foo); } + +This program will not compile, but cgo can use the presence or absence +of an error message on a given line to deduce the information it +needs. The program is syntactically valid regardless of whether each +name is a type or an ordinary identifier, so there will be no syntax +errors that might stop parsing early. + +An error on not-declared:1 indicates that foo is undeclared. +An error on not-type:1 indicates that foo is not a type (if declared at all, it is an identifier). +An error on not-int-const:1 indicates that foo is not an integer constant. +An error on not-num-const:1 indicates that foo is not a number constant. +An error on not-str-lit:1 indicates that foo is not a string literal. +An error on not-signed-int-const:1 indicates that foo is not a signed integer constant. + +The line number specifies the name involved. In the example, 1 is foo. + +Next, cgo must learn the details of each type, variable, function, or +constant. It can do this by reading object files. If cgo has decided +that t1 is a type, v2 and v3 are variables or functions, and i4, i5 +are integer constants, u6 is an unsigned integer constant, and f7 and f8 +are float constants, and s9 and s10 are string constants, it generates: + + + __typeof__(t1) *__cgo__1; + __typeof__(v2) *__cgo__2; + __typeof__(v3) *__cgo__3; + __typeof__(i4) *__cgo__4; + enum { __cgo_enum__4 = i4 }; + __typeof__(i5) *__cgo__5; + enum { __cgo_enum__5 = i5 }; + __typeof__(u6) *__cgo__6; + enum { __cgo_enum__6 = u6 }; + __typeof__(f7) *__cgo__7; + __typeof__(f8) *__cgo__8; + __typeof__(s9) *__cgo__9; + __typeof__(s10) *__cgo__10; + + long long __cgodebug_ints[] = { + 0, // t1 + 0, // v2 + 0, // v3 + i4, + i5, + u6, + 0, // f7 + 0, // f8 + 0, // s9 + 0, // s10 + 1 + }; + + double __cgodebug_floats[] = { + 0, // t1 + 0, // v2 + 0, // v3 + 0, // i4 + 0, // i5 + 0, // u6 + f7, + f8, + 0, // s9 + 0, // s10 + 1 + }; + + const char __cgodebug_str__9[] = s9; + const unsigned long long __cgodebug_strlen__9 = sizeof(s9)-1; + const char __cgodebug_str__10[] = s10; + const unsigned long long __cgodebug_strlen__10 = sizeof(s10)-1; + +and again invokes the system C compiler, to produce an object file +containing debug information. Cgo parses the DWARF debug information +for __cgo__N to learn the type of each identifier. (The types also +distinguish functions from global variables.) Cgo reads the constant +values from the __cgodebug_* from the object file's data segment. + +At this point cgo knows the meaning of each C.xxx well enough to start +the translation process. + +Translating Go + +Given the input Go files x.go and y.go, cgo generates these source +files: + + x.cgo1.go # for gc (cmd/compile) + y.cgo1.go # for gc + _cgo_gotypes.go # for gc + _cgo_import.go # for gc (if -dynout _cgo_import.go) + x.cgo2.c # for gcc + y.cgo2.c # for gcc + _cgo_defun.c # for gcc (if -gccgo) + _cgo_export.c # for gcc + _cgo_export.h # for gcc + _cgo_main.c # for gcc + _cgo_flags # for build tool (if -gccgo) + +The file x.cgo1.go is a copy of x.go with the import "C" removed and +references to C.xxx replaced with names like _Cfunc_xxx or _Ctype_xxx. +The definitions of those identifiers, written as Go functions, types, +or variables, are provided in _cgo_gotypes.go. + +Here is a _cgo_gotypes.go containing definitions for needed C types: + + type _Ctype_char int8 + type _Ctype_int int32 + type _Ctype_void [0]byte + +The _cgo_gotypes.go file also contains the definitions of the +functions. They all have similar bodies that invoke runtime·cgocall +to make a switch from the Go runtime world to the system C (GCC-based) +world. + +For example, here is the definition of _Cfunc_puts: + + //go:cgo_import_static _cgo_be59f0f25121_Cfunc_puts + //go:linkname __cgofn__cgo_be59f0f25121_Cfunc_puts _cgo_be59f0f25121_Cfunc_puts + var __cgofn__cgo_be59f0f25121_Cfunc_puts byte + var _cgo_be59f0f25121_Cfunc_puts = unsafe.Pointer(&__cgofn__cgo_be59f0f25121_Cfunc_puts) + + func _Cfunc_puts(p0 *_Ctype_char) (r1 _Ctype_int) { + _cgo_runtime_cgocall(_cgo_be59f0f25121_Cfunc_puts, uintptr(unsafe.Pointer(&p0))) + return + } + +The hexadecimal number is a hash of cgo's input, chosen to be +deterministic yet unlikely to collide with other uses. The actual +function _cgo_be59f0f25121_Cfunc_puts is implemented in a C source +file compiled by gcc, the file x.cgo2.c: + + void + _cgo_be59f0f25121_Cfunc_puts(void *v) + { + struct { + char* p0; + int r; + char __pad12[4]; + } __attribute__((__packed__, __gcc_struct__)) *a = v; + a->r = puts((void*)a->p0); + } + +It extracts the arguments from the pointer to _Cfunc_puts's argument +frame, invokes the system C function (in this case, puts), stores the +result in the frame, and returns. + +Linking + +Once the _cgo_export.c and *.cgo2.c files have been compiled with gcc, +they need to be linked into the final binary, along with the libraries +they might depend on (in the case of puts, stdio). cmd/link has been +extended to understand basic ELF files, but it does not understand ELF +in the full complexity that modern C libraries embrace, so it cannot +in general generate direct references to the system libraries. + +Instead, the build process generates an object file using dynamic +linkage to the desired libraries. The main function is provided by +_cgo_main.c: + + int main() { return 0; } + void crosscall2(void(*fn)(void*), void *a, int c, uintptr_t ctxt) { } + uintptr_t _cgo_wait_runtime_init_done(void) { return 0; } + void _cgo_release_context(uintptr_t ctxt) { } + char* _cgo_topofstack(void) { return (char*)0; } + void _cgo_allocate(void *a, int c) { } + void _cgo_panic(void *a, int c) { } + void _cgo_reginit(void) { } + +The extra functions here are stubs to satisfy the references in the C +code generated for gcc. The build process links this stub, along with +_cgo_export.c and *.cgo2.c, into a dynamic executable and then lets +cgo examine the executable. Cgo records the list of shared library +references and resolved names and writes them into a new file +_cgo_import.go, which looks like: + + //go:cgo_dynamic_linker "/lib64/ld-linux-x86-64.so.2" + //go:cgo_import_dynamic puts puts#GLIBC_2.2.5 "libc.so.6" + //go:cgo_import_dynamic __libc_start_main __libc_start_main#GLIBC_2.2.5 "libc.so.6" + //go:cgo_import_dynamic stdout stdout#GLIBC_2.2.5 "libc.so.6" + //go:cgo_import_dynamic fflush fflush#GLIBC_2.2.5 "libc.so.6" + //go:cgo_import_dynamic _ _ "libpthread.so.0" + //go:cgo_import_dynamic _ _ "libc.so.6" + +In the end, the compiled Go package, which will eventually be +presented to cmd/link as part of a larger program, contains: + + _go_.o # gc-compiled object for _cgo_gotypes.go, _cgo_import.go, *.cgo1.go + _all.o # gcc-compiled object for _cgo_export.c, *.cgo2.c + +If there is an error generating the _cgo_import.go file, then, instead +of adding _cgo_import.go to the package, the go tool adds an empty +file named dynimportfail. The _cgo_import.go file is only needed when +using internal linking mode, which is not the default when linking +programs that use cgo (as described below). If the linker sees a file +named dynimportfail it reports an error if it has been told to use +internal linking mode. This approach is taken because generating +_cgo_import.go requires doing a full C link of the package, which can +fail for reasons that are irrelevant when using external linking mode. + +The final program will be a dynamic executable, so that cmd/link can avoid +needing to process arbitrary .o files. It only needs to process the .o +files generated from C files that cgo writes, and those are much more +limited in the ELF or other features that they use. + +In essence, the _cgo_import.o file includes the extra linking +directives that cmd/link is not sophisticated enough to derive from _all.o +on its own. Similarly, the _all.o uses dynamic references to real +system object code because cmd/link is not sophisticated enough to process +the real code. + +The main benefits of this system are that cmd/link remains relatively simple +(it does not need to implement a complete ELF and Mach-O linker) and +that gcc is not needed after the package is compiled. For example, +package net uses cgo for access to name resolution functions provided +by libc. Although gcc is needed to compile package net, gcc is not +needed to link programs that import package net. + +Runtime + +When using cgo, Go must not assume that it owns all details of the +process. In particular it needs to coordinate with C in the use of +threads and thread-local storage. The runtime package declares a few +variables: + + var ( + iscgo bool + _cgo_init unsafe.Pointer + _cgo_thread_start unsafe.Pointer + ) + +Any package using cgo imports "runtime/cgo", which provides +initializations for these variables. It sets iscgo to true, _cgo_init +to a gcc-compiled function that can be called early during program +startup, and _cgo_thread_start to a gcc-compiled function that can be +used to create a new thread, in place of the runtime's usual direct +system calls. + +Internal and External Linking + +The text above describes "internal" linking, in which cmd/link parses and +links host object files (ELF, Mach-O, PE, and so on) into the final +executable itself. Keeping cmd/link simple means we cannot possibly +implement the full semantics of the host linker, so the kinds of +objects that can be linked directly into the binary is limited (other +code can only be used as a dynamic library). On the other hand, when +using internal linking, cmd/link can generate Go binaries by itself. + +In order to allow linking arbitrary object files without requiring +dynamic libraries, cgo supports an "external" linking mode too. In +external linking mode, cmd/link does not process any host object files. +Instead, it collects all the Go code and writes a single go.o object +file containing it. Then it invokes the host linker (usually gcc) to +combine the go.o object file and any supporting non-Go code into a +final executable. External linking avoids the dynamic library +requirement but introduces a requirement that the host linker be +present to create such a binary. + +Most builds both compile source code and invoke the linker to create a +binary. When cgo is involved, the compile step already requires gcc, so +it is not problematic for the link step to require gcc too. + +An important exception is builds using a pre-compiled copy of the +standard library. In particular, package net uses cgo on most systems, +and we want to preserve the ability to compile pure Go code that +imports net without requiring gcc to be present at link time. (In this +case, the dynamic library requirement is less significant, because the +only library involved is libc.so, which can usually be assumed +present.) + +This conflict between functionality and the gcc requirement means we +must support both internal and external linking, depending on the +circumstances: if net is the only cgo-using package, then internal +linking is probably fine, but if other packages are involved, so that there +are dependencies on libraries beyond libc, external linking is likely +to work better. The compilation of a package records the relevant +information to support both linking modes, leaving the decision +to be made when linking the final binary. + +Linking Directives + +In either linking mode, package-specific directives must be passed +through to cmd/link. These are communicated by writing //go: directives in a +Go source file compiled by gc. The directives are copied into the .o +object file and then processed by the linker. + +The directives are: + +//go:cgo_import_dynamic [ [""]] + + In internal linking mode, allow an unresolved reference to + , assuming it will be resolved by a dynamic library + symbol. The optional specifies the symbol's name and + possibly version in the dynamic library, and the optional "" + names the specific library where the symbol should be found. + + On AIX, the library pattern is slightly different. It must be + "lib.a/obj.o" with obj.o the member of this library exporting + this symbol. + + In the , # or @ can be used to introduce a symbol version. + + Examples: + //go:cgo_import_dynamic puts + //go:cgo_import_dynamic puts puts#GLIBC_2.2.5 + //go:cgo_import_dynamic puts puts#GLIBC_2.2.5 "libc.so.6" + + A side effect of the cgo_import_dynamic directive with a + library is to make the final binary depend on that dynamic + library. To get the dependency without importing any specific + symbols, use _ for local and remote. + + Example: + //go:cgo_import_dynamic _ _ "libc.so.6" + + For compatibility with current versions of SWIG, + #pragma dynimport is an alias for //go:cgo_import_dynamic. + +//go:cgo_dynamic_linker "" + + In internal linking mode, use "" as the dynamic linker + in the final binary. This directive is only needed from one + package when constructing a binary; by convention it is + supplied by runtime/cgo. + + Example: + //go:cgo_dynamic_linker "/lib/ld-linux.so.2" + +//go:cgo_export_dynamic + + In internal linking mode, put the Go symbol + named into the program's exported symbol table as + , so that C code can refer to it by that name. This + mechanism makes it possible for C code to call back into Go or + to share Go's data. + + For compatibility with current versions of SWIG, + #pragma dynexport is an alias for //go:cgo_export_dynamic. + +//go:cgo_import_static + + In external linking mode, allow unresolved references to + in the go.o object file prepared for the host linker, + under the assumption that will be supplied by the + other object files that will be linked with go.o. + + Example: + //go:cgo_import_static puts_wrapper + +//go:cgo_export_static + + In external linking mode, put the Go symbol + named into the program's exported symbol table as + , so that C code can refer to it by that name. This + mechanism makes it possible for C code to call back into Go or + to share Go's data. + +//go:cgo_ldflag "" + + In external linking mode, invoke the host linker (usually gcc) + with "" as a command-line argument following the .o files. + Note that the arguments are for "gcc", not "ld". + + Example: + //go:cgo_ldflag "-lpthread" + //go:cgo_ldflag "-L/usr/local/sqlite3/lib" + +A package compiled with cgo will include directives for both +internal and external linking; the linker will select the appropriate +subset for the chosen linking mode. + +Example + +As a simple example, consider a package that uses cgo to call C.sin. +The following code will be generated by cgo: + + // compiled by gc + + //go:cgo_ldflag "-lm" + + type _Ctype_double float64 + + //go:cgo_import_static _cgo_gcc_Cfunc_sin + //go:linkname __cgo_gcc_Cfunc_sin _cgo_gcc_Cfunc_sin + var __cgo_gcc_Cfunc_sin byte + var _cgo_gcc_Cfunc_sin = unsafe.Pointer(&__cgo_gcc_Cfunc_sin) + + func _Cfunc_sin(p0 _Ctype_double) (r1 _Ctype_double) { + _cgo_runtime_cgocall(_cgo_gcc_Cfunc_sin, uintptr(unsafe.Pointer(&p0))) + return + } + + // compiled by gcc, into foo.cgo2.o + + void + _cgo_gcc_Cfunc_sin(void *v) + { + struct { + double p0; + double r; + } __attribute__((__packed__)) *a = v; + a->r = sin(a->p0); + } + +What happens at link time depends on whether the final binary is linked +using the internal or external mode. If other packages are compiled in +"external only" mode, then the final link will be an external one. +Otherwise the link will be an internal one. + +The linking directives are used according to the kind of final link +used. + +In internal mode, cmd/link itself processes all the host object files, in +particular foo.cgo2.o. To do so, it uses the cgo_import_dynamic and +cgo_dynamic_linker directives to learn that the otherwise undefined +reference to sin in foo.cgo2.o should be rewritten to refer to the +symbol sin with version GLIBC_2.2.5 from the dynamic library +"libm.so.6", and the binary should request "/lib/ld-linux.so.2" as its +runtime dynamic linker. + +In external mode, cmd/link does not process any host object files, in +particular foo.cgo2.o. It links together the gc-generated object +files, along with any other Go code, into a go.o file. While doing +that, cmd/link will discover that there is no definition for +_cgo_gcc_Cfunc_sin, referred to by the gc-compiled source file. This +is okay, because cmd/link also processes the cgo_import_static directive and +knows that _cgo_gcc_Cfunc_sin is expected to be supplied by a host +object file, so cmd/link does not treat the missing symbol as an error when +creating go.o. Indeed, the definition for _cgo_gcc_Cfunc_sin will be +provided to the host linker by foo2.cgo.o, which in turn will need the +symbol 'sin'. cmd/link also processes the cgo_ldflag directives, so that it +knows that the eventual host link command must include the -lm +argument, so that the host linker will be able to find 'sin' in the +math library. + +cmd/link Command Line Interface + +The go command and any other Go-aware build systems invoke cmd/link +to link a collection of packages into a single binary. By default, cmd/link will +present the same interface it does today: + + cmd/link main.a + +produces a file named a.out, even if cmd/link does so by invoking the host +linker in external linking mode. + +By default, cmd/link will decide the linking mode as follows: if the only +packages using cgo are those on a list of known standard library +packages (net, os/user, runtime/cgo), cmd/link will use internal linking +mode. Otherwise, there are non-standard cgo packages involved, and cmd/link +will use external linking mode. The first rule means that a build of +the godoc binary, which uses net but no other cgo, can run without +needing gcc available. The second rule means that a build of a +cgo-wrapped library like sqlite3 can generate a standalone executable +instead of needing to refer to a dynamic library. The specific choice +can be overridden using a command line flag: cmd/link -linkmode=internal or +cmd/link -linkmode=external. + +In an external link, cmd/link will create a temporary directory, write any +host object files found in package archives to that directory (renamed +to avoid conflicts), write the go.o file to that directory, and invoke +the host linker. The default value for the host linker is $CC, split +into fields, or else "gcc". The specific host linker command line can +be overridden using command line flags: cmd/link -extld=clang +-extldflags='-ggdb -O3'. If any package in a build includes a .cc or +other file compiled by the C++ compiler, the go tool will use the +-extld option to set the host linker to the C++ compiler. + +These defaults mean that Go-aware build systems can ignore the linking +changes and keep running plain 'cmd/link' and get reasonable results, but +they can also control the linking details if desired. + +*/ diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/gcc.go b/platform/dbops/binaries/go/go/src/cmd/cgo/gcc.go new file mode 100644 index 0000000000000000000000000000000000000000..6e7556de9606281571b1955bfa400d0fb6ca5c86 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/gcc.go @@ -0,0 +1,3536 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Annotate Ref in Prog with C types by parsing gcc debug output. +// Conversion of debug output to Go types. + +package main + +import ( + "bytes" + "debug/dwarf" + "debug/elf" + "debug/macho" + "debug/pe" + "encoding/binary" + "errors" + "flag" + "fmt" + "go/ast" + "go/parser" + "go/token" + "internal/xcoff" + "math" + "os" + "os/exec" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "cmd/internal/quoted" +) + +var debugDefine = flag.Bool("debug-define", false, "print relevant #defines") +var debugGcc = flag.Bool("debug-gcc", false, "print gcc invocations") + +var nameToC = map[string]string{ + "schar": "signed char", + "uchar": "unsigned char", + "ushort": "unsigned short", + "uint": "unsigned int", + "ulong": "unsigned long", + "longlong": "long long", + "ulonglong": "unsigned long long", + "complexfloat": "float _Complex", + "complexdouble": "double _Complex", +} + +var incomplete = "_cgopackage.Incomplete" + +// cname returns the C name to use for C.s. +// The expansions are listed in nameToC and also +// struct_foo becomes "struct foo", and similarly for +// union and enum. +func cname(s string) string { + if t, ok := nameToC[s]; ok { + return t + } + + if strings.HasPrefix(s, "struct_") { + return "struct " + s[len("struct_"):] + } + if strings.HasPrefix(s, "union_") { + return "union " + s[len("union_"):] + } + if strings.HasPrefix(s, "enum_") { + return "enum " + s[len("enum_"):] + } + if strings.HasPrefix(s, "sizeof_") { + return "sizeof(" + cname(s[len("sizeof_"):]) + ")" + } + return s +} + +// ProcessCgoDirectives processes the import C preamble: +// 1. discards all #cgo CFLAGS, LDFLAGS, nocallback and noescape directives, +// so they don't make their way into _cgo_export.h. +// 2. parse the nocallback and noescape directives. +func (f *File) ProcessCgoDirectives() { + linesIn := strings.Split(f.Preamble, "\n") + linesOut := make([]string, 0, len(linesIn)) + f.NoCallbacks = make(map[string]bool) + f.NoEscapes = make(map[string]bool) + for _, line := range linesIn { + l := strings.TrimSpace(line) + if len(l) < 5 || l[:4] != "#cgo" || !unicode.IsSpace(rune(l[4])) { + linesOut = append(linesOut, line) + } else { + linesOut = append(linesOut, "") + + // #cgo (nocallback|noescape) + if fields := strings.Fields(l); len(fields) == 3 { + directive := fields[1] + funcName := fields[2] + if directive == "nocallback" { + fatalf("#cgo nocallback disabled until Go 1.23") + f.NoCallbacks[funcName] = true + } else if directive == "noescape" { + fatalf("#cgo noescape disabled until Go 1.23") + f.NoEscapes[funcName] = true + } + } + } + } + f.Preamble = strings.Join(linesOut, "\n") +} + +// addToFlag appends args to flag. +func (p *Package) addToFlag(flag string, args []string) { + if flag == "CFLAGS" { + // We'll also need these when preprocessing for dwarf information. + // However, discard any -g options: we need to be able + // to parse the debug info, so stick to what we expect. + for _, arg := range args { + if !strings.HasPrefix(arg, "-g") { + p.GccOptions = append(p.GccOptions, arg) + } + } + } + if flag == "LDFLAGS" { + p.LdFlags = append(p.LdFlags, args...) + } +} + +// splitQuoted splits the string s around each instance of one or more consecutive +// white space characters while taking into account quotes and escaping, and +// returns an array of substrings of s or an empty list if s contains only white space. +// Single quotes and double quotes are recognized to prevent splitting within the +// quoted region, and are removed from the resulting substrings. If a quote in s +// isn't closed err will be set and r will have the unclosed argument as the +// last element. The backslash is used for escaping. +// +// For example, the following string: +// +// `a b:"c d" 'e''f' "g\""` +// +// Would be parsed as: +// +// []string{"a", "b:c d", "ef", `g"`} +func splitQuoted(s string) (r []string, err error) { + var args []string + arg := make([]rune, len(s)) + escaped := false + quoted := false + quote := '\x00' + i := 0 + for _, r := range s { + switch { + case escaped: + escaped = false + case r == '\\': + escaped = true + continue + case quote != 0: + if r == quote { + quote = 0 + continue + } + case r == '"' || r == '\'': + quoted = true + quote = r + continue + case unicode.IsSpace(r): + if quoted || i > 0 { + quoted = false + args = append(args, string(arg[:i])) + i = 0 + } + continue + } + arg[i] = r + i++ + } + if quoted || i > 0 { + args = append(args, string(arg[:i])) + } + if quote != 0 { + err = errors.New("unclosed quote") + } else if escaped { + err = errors.New("unfinished escaping") + } + return args, err +} + +// Translate rewrites f.AST, the original Go input, to remove +// references to the imported package C, replacing them with +// references to the equivalent Go types, functions, and variables. +func (p *Package) Translate(f *File) { + for _, cref := range f.Ref { + // Convert C.ulong to C.unsigned long, etc. + cref.Name.C = cname(cref.Name.Go) + } + + var conv typeConv + conv.Init(p.PtrSize, p.IntSize) + + p.loadDefines(f) + p.typedefs = map[string]bool{} + p.typedefList = nil + numTypedefs := -1 + for len(p.typedefs) > numTypedefs { + numTypedefs = len(p.typedefs) + // Also ask about any typedefs we've seen so far. + for _, info := range p.typedefList { + if f.Name[info.typedef] != nil { + continue + } + n := &Name{ + Go: info.typedef, + C: info.typedef, + } + f.Name[info.typedef] = n + f.NamePos[n] = info.pos + } + needType := p.guessKinds(f) + if len(needType) > 0 { + p.loadDWARF(f, &conv, needType) + } + + // In godefs mode we're OK with the typedefs, which + // will presumably also be defined in the file, we + // don't want to resolve them to their base types. + if *godefs { + break + } + } + p.prepareNames(f) + if p.rewriteCalls(f) { + // Add `import _cgo_unsafe "unsafe"` after the package statement. + f.Edit.Insert(f.offset(f.AST.Name.End()), "; import _cgo_unsafe \"unsafe\"") + } + p.rewriteRef(f) +} + +// loadDefines coerces gcc into spitting out the #defines in use +// in the file f and saves relevant renamings in f.Name[name].Define. +func (p *Package) loadDefines(f *File) { + var b bytes.Buffer + b.WriteString(builtinProlog) + b.WriteString(f.Preamble) + stdout := p.gccDefines(b.Bytes()) + + for _, line := range strings.Split(stdout, "\n") { + if len(line) < 9 || line[0:7] != "#define" { + continue + } + + line = strings.TrimSpace(line[8:]) + + var key, val string + spaceIndex := strings.Index(line, " ") + tabIndex := strings.Index(line, "\t") + + if spaceIndex == -1 && tabIndex == -1 { + continue + } else if tabIndex == -1 || (spaceIndex != -1 && spaceIndex < tabIndex) { + key = line[0:spaceIndex] + val = strings.TrimSpace(line[spaceIndex:]) + } else { + key = line[0:tabIndex] + val = strings.TrimSpace(line[tabIndex:]) + } + + if key == "__clang__" { + p.GccIsClang = true + } + + if n := f.Name[key]; n != nil { + if *debugDefine { + fmt.Fprintf(os.Stderr, "#define %s %s\n", key, val) + } + n.Define = val + } + } +} + +// guessKinds tricks gcc into revealing the kind of each +// name xxx for the references C.xxx in the Go input. +// The kind is either a constant, type, or variable. +func (p *Package) guessKinds(f *File) []*Name { + // Determine kinds for names we already know about, + // like #defines or 'struct foo', before bothering with gcc. + var names, needType []*Name + optional := map[*Name]bool{} + for _, key := range nameKeys(f.Name) { + n := f.Name[key] + // If we've already found this name as a #define + // and we can translate it as a constant value, do so. + if n.Define != "" { + if i, err := strconv.ParseInt(n.Define, 0, 64); err == nil { + n.Kind = "iconst" + // Turn decimal into hex, just for consistency + // with enum-derived constants. Otherwise + // in the cgo -godefs output half the constants + // are in hex and half are in whatever the #define used. + n.Const = fmt.Sprintf("%#x", i) + } else if n.Define[0] == '\'' { + if _, err := parser.ParseExpr(n.Define); err == nil { + n.Kind = "iconst" + n.Const = n.Define + } + } else if n.Define[0] == '"' { + if _, err := parser.ParseExpr(n.Define); err == nil { + n.Kind = "sconst" + n.Const = n.Define + } + } + + if n.IsConst() { + continue + } + } + + // If this is a struct, union, or enum type name, no need to guess the kind. + if strings.HasPrefix(n.C, "struct ") || strings.HasPrefix(n.C, "union ") || strings.HasPrefix(n.C, "enum ") { + n.Kind = "type" + needType = append(needType, n) + continue + } + + if (goos == "darwin" || goos == "ios") && strings.HasSuffix(n.C, "Ref") { + // For FooRef, find out if FooGetTypeID exists. + s := n.C[:len(n.C)-3] + "GetTypeID" + n := &Name{Go: s, C: s} + names = append(names, n) + optional[n] = true + } + + // Otherwise, we'll need to find out from gcc. + names = append(names, n) + } + + // Bypass gcc if there's nothing left to find out. + if len(names) == 0 { + return needType + } + + // Coerce gcc into telling us whether each name is a type, a value, or undeclared. + // For names, find out whether they are integer constants. + // We used to look at specific warning or error messages here, but that tied the + // behavior too closely to specific versions of the compilers. + // Instead, arrange that we can infer what we need from only the presence or absence + // of an error on a specific line. + // + // For each name, we generate these lines, where xxx is the index in toSniff plus one. + // + // #line xxx "not-declared" + // void __cgo_f_xxx_1(void) { __typeof__(name) *__cgo_undefined__1; } + // #line xxx "not-type" + // void __cgo_f_xxx_2(void) { name *__cgo_undefined__2; } + // #line xxx "not-int-const" + // void __cgo_f_xxx_3(void) { enum { __cgo_undefined__3 = (name)*1 }; } + // #line xxx "not-num-const" + // void __cgo_f_xxx_4(void) { static const double __cgo_undefined__4 = (name); } + // #line xxx "not-str-lit" + // void __cgo_f_xxx_5(void) { static const char __cgo_undefined__5[] = (name); } + // + // If we see an error at not-declared:xxx, the corresponding name is not declared. + // If we see an error at not-type:xxx, the corresponding name is not a type. + // If we see an error at not-int-const:xxx, the corresponding name is not an integer constant. + // If we see an error at not-num-const:xxx, the corresponding name is not a number constant. + // If we see an error at not-str-lit:xxx, the corresponding name is not a string literal. + // + // The specific input forms are chosen so that they are valid C syntax regardless of + // whether name denotes a type or an expression. + + var b bytes.Buffer + b.WriteString(builtinProlog) + b.WriteString(f.Preamble) + + for i, n := range names { + fmt.Fprintf(&b, "#line %d \"not-declared\"\n"+ + "void __cgo_f_%d_1(void) { __typeof__(%s) *__cgo_undefined__1; }\n"+ + "#line %d \"not-type\"\n"+ + "void __cgo_f_%d_2(void) { %s *__cgo_undefined__2; }\n"+ + "#line %d \"not-int-const\"\n"+ + "void __cgo_f_%d_3(void) { enum { __cgo_undefined__3 = (%s)*1 }; }\n"+ + "#line %d \"not-num-const\"\n"+ + "void __cgo_f_%d_4(void) { static const double __cgo_undefined__4 = (%s); }\n"+ + "#line %d \"not-str-lit\"\n"+ + "void __cgo_f_%d_5(void) { static const char __cgo_undefined__5[] = (%s); }\n", + i+1, i+1, n.C, + i+1, i+1, n.C, + i+1, i+1, n.C, + i+1, i+1, n.C, + i+1, i+1, n.C, + ) + } + fmt.Fprintf(&b, "#line 1 \"completed\"\n"+ + "int __cgo__1 = __cgo__2;\n") + + // We need to parse the output from this gcc command, so ensure that it + // doesn't have any ANSI escape sequences in it. (TERM=dumb is + // insufficient; if the user specifies CGO_CFLAGS=-fdiagnostics-color, + // GCC will ignore TERM, and GCC can also be configured at compile-time + // to ignore TERM.) + stderr := p.gccErrors(b.Bytes(), "-fdiagnostics-color=never") + if strings.Contains(stderr, "unrecognized command line option") { + // We're using an old version of GCC that doesn't understand + // -fdiagnostics-color. Those versions can't print color anyway, + // so just rerun without that option. + stderr = p.gccErrors(b.Bytes()) + } + if stderr == "" { + fatalf("%s produced no output\non input:\n%s", gccBaseCmd[0], b.Bytes()) + } + + completed := false + sniff := make([]int, len(names)) + const ( + notType = 1 << iota + notIntConst + notNumConst + notStrLiteral + notDeclared + ) + sawUnmatchedErrors := false + for _, line := range strings.Split(stderr, "\n") { + // Ignore warnings and random comments, with one + // exception: newer GCC versions will sometimes emit + // an error on a macro #define with a note referring + // to where the expansion occurs. We care about where + // the expansion occurs, so in that case treat the note + // as an error. + isError := strings.Contains(line, ": error:") + isErrorNote := strings.Contains(line, ": note:") && sawUnmatchedErrors + if !isError && !isErrorNote { + continue + } + + c1 := strings.Index(line, ":") + if c1 < 0 { + continue + } + c2 := strings.Index(line[c1+1:], ":") + if c2 < 0 { + continue + } + c2 += c1 + 1 + + filename := line[:c1] + i, _ := strconv.Atoi(line[c1+1 : c2]) + i-- + if i < 0 || i >= len(names) { + if isError { + sawUnmatchedErrors = true + } + continue + } + + switch filename { + case "completed": + // Strictly speaking, there is no guarantee that seeing the error at completed:1 + // (at the end of the file) means we've seen all the errors from earlier in the file, + // but usually it does. Certainly if we don't see the completed:1 error, we did + // not get all the errors we expected. + completed = true + + case "not-declared": + sniff[i] |= notDeclared + case "not-type": + sniff[i] |= notType + case "not-int-const": + sniff[i] |= notIntConst + case "not-num-const": + sniff[i] |= notNumConst + case "not-str-lit": + sniff[i] |= notStrLiteral + default: + if isError { + sawUnmatchedErrors = true + } + continue + } + + sawUnmatchedErrors = false + } + + if !completed { + fatalf("%s did not produce error at completed:1\non input:\n%s\nfull error output:\n%s", gccBaseCmd[0], b.Bytes(), stderr) + } + + for i, n := range names { + switch sniff[i] { + default: + if sniff[i]¬Declared != 0 && optional[n] { + // Ignore optional undeclared identifiers. + // Don't report an error, and skip adding n to the needType array. + continue + } + error_(f.NamePos[n], "could not determine kind of name for C.%s", fixGo(n.Go)) + case notStrLiteral | notType: + n.Kind = "iconst" + case notIntConst | notStrLiteral | notType: + n.Kind = "fconst" + case notIntConst | notNumConst | notType: + n.Kind = "sconst" + case notIntConst | notNumConst | notStrLiteral: + n.Kind = "type" + case notIntConst | notNumConst | notStrLiteral | notType: + n.Kind = "not-type" + } + needType = append(needType, n) + } + if nerrors > 0 { + // Check if compiling the preamble by itself causes any errors, + // because the messages we've printed out so far aren't helpful + // to users debugging preamble mistakes. See issue 8442. + preambleErrors := p.gccErrors([]byte(builtinProlog + f.Preamble)) + if len(preambleErrors) > 0 { + error_(token.NoPos, "\n%s errors for preamble:\n%s", gccBaseCmd[0], preambleErrors) + } + + fatalf("unresolved names") + } + + return needType +} + +// loadDWARF parses the DWARF debug information generated +// by gcc to learn the details of the constants, variables, and types +// being referred to as C.xxx. +func (p *Package) loadDWARF(f *File, conv *typeConv, names []*Name) { + // Extract the types from the DWARF section of an object + // from a well-formed C program. Gcc only generates DWARF info + // for symbols in the object file, so it is not enough to print the + // preamble and hope the symbols we care about will be there. + // Instead, emit + // __typeof__(names[i]) *__cgo__i; + // for each entry in names and then dereference the type we + // learn for __cgo__i. + var b bytes.Buffer + b.WriteString(builtinProlog) + b.WriteString(f.Preamble) + b.WriteString("#line 1 \"cgo-dwarf-inference\"\n") + for i, n := range names { + fmt.Fprintf(&b, "__typeof__(%s) *__cgo__%d;\n", n.C, i) + if n.Kind == "iconst" { + fmt.Fprintf(&b, "enum { __cgo_enum__%d = %s };\n", i, n.C) + } + } + + // We create a data block initialized with the values, + // so we can read them out of the object file. + fmt.Fprintf(&b, "long long __cgodebug_ints[] = {\n") + for _, n := range names { + if n.Kind == "iconst" { + fmt.Fprintf(&b, "\t%s,\n", n.C) + } else { + fmt.Fprintf(&b, "\t0,\n") + } + } + // for the last entry, we cannot use 0, otherwise + // in case all __cgodebug_data is zero initialized, + // LLVM-based gcc will place the it in the __DATA.__common + // zero-filled section (our debug/macho doesn't support + // this) + fmt.Fprintf(&b, "\t1\n") + fmt.Fprintf(&b, "};\n") + + // do the same work for floats. + fmt.Fprintf(&b, "double __cgodebug_floats[] = {\n") + for _, n := range names { + if n.Kind == "fconst" { + fmt.Fprintf(&b, "\t%s,\n", n.C) + } else { + fmt.Fprintf(&b, "\t0,\n") + } + } + fmt.Fprintf(&b, "\t1\n") + fmt.Fprintf(&b, "};\n") + + // do the same work for strings. + for i, n := range names { + if n.Kind == "sconst" { + fmt.Fprintf(&b, "const char __cgodebug_str__%d[] = %s;\n", i, n.C) + fmt.Fprintf(&b, "const unsigned long long __cgodebug_strlen__%d = sizeof(%s)-1;\n", i, n.C) + } + } + + d, ints, floats, strs := p.gccDebug(b.Bytes(), len(names)) + + // Scan DWARF info for top-level TagVariable entries with AttrName __cgo__i. + types := make([]dwarf.Type, len(names)) + r := d.Reader() + for { + e, err := r.Next() + if err != nil { + fatalf("reading DWARF entry: %s", err) + } + if e == nil { + break + } + switch e.Tag { + case dwarf.TagVariable: + name, _ := e.Val(dwarf.AttrName).(string) + // As of https://reviews.llvm.org/D123534, clang + // now emits DW_TAG_variable DIEs that have + // no name (so as to be able to describe the + // type and source locations of constant strings) + // like the second arg in the call below: + // + // myfunction(42, "foo") + // + // If a var has no name we won't see attempts to + // refer to it via "C.", so skip these vars + // + // See issue 53000 for more context. + if name == "" { + break + } + typOff, _ := e.Val(dwarf.AttrType).(dwarf.Offset) + if typOff == 0 { + if e.Val(dwarf.AttrSpecification) != nil { + // Since we are reading all the DWARF, + // assume we will see the variable elsewhere. + break + } + fatalf("malformed DWARF TagVariable entry") + } + if !strings.HasPrefix(name, "__cgo__") { + break + } + typ, err := d.Type(typOff) + if err != nil { + fatalf("loading DWARF type: %s", err) + } + t, ok := typ.(*dwarf.PtrType) + if !ok || t == nil { + fatalf("internal error: %s has non-pointer type", name) + } + i, err := strconv.Atoi(name[7:]) + if err != nil { + fatalf("malformed __cgo__ name: %s", name) + } + types[i] = t.Type + p.recordTypedefs(t.Type, f.NamePos[names[i]]) + } + if e.Tag != dwarf.TagCompileUnit { + r.SkipChildren() + } + } + + // Record types and typedef information. + for i, n := range names { + if strings.HasSuffix(n.Go, "GetTypeID") && types[i].String() == "func() CFTypeID" { + conv.getTypeIDs[n.Go[:len(n.Go)-9]] = true + } + } + for i, n := range names { + if types[i] == nil { + continue + } + pos := f.NamePos[n] + f, fok := types[i].(*dwarf.FuncType) + if n.Kind != "type" && fok { + n.Kind = "func" + n.FuncType = conv.FuncType(f, pos) + } else { + n.Type = conv.Type(types[i], pos) + switch n.Kind { + case "iconst": + if i < len(ints) { + if _, ok := types[i].(*dwarf.UintType); ok { + n.Const = fmt.Sprintf("%#x", uint64(ints[i])) + } else { + n.Const = fmt.Sprintf("%#x", ints[i]) + } + } + case "fconst": + if i >= len(floats) { + break + } + switch base(types[i]).(type) { + case *dwarf.IntType, *dwarf.UintType: + // This has an integer type so it's + // not really a floating point + // constant. This can happen when the + // C compiler complains about using + // the value as an integer constant, + // but not as a general constant. + // Treat this as a variable of the + // appropriate type, not a constant, + // to get C-style type handling, + // avoiding the problem that C permits + // uint64(-1) but Go does not. + // See issue 26066. + n.Kind = "var" + default: + n.Const = fmt.Sprintf("%f", floats[i]) + } + case "sconst": + if i < len(strs) { + n.Const = fmt.Sprintf("%q", strs[i]) + } + } + } + conv.FinishType(pos) + } +} + +// recordTypedefs remembers in p.typedefs all the typedefs used in dtypes and its children. +func (p *Package) recordTypedefs(dtype dwarf.Type, pos token.Pos) { + p.recordTypedefs1(dtype, pos, map[dwarf.Type]bool{}) +} + +func (p *Package) recordTypedefs1(dtype dwarf.Type, pos token.Pos, visited map[dwarf.Type]bool) { + if dtype == nil { + return + } + if visited[dtype] { + return + } + visited[dtype] = true + switch dt := dtype.(type) { + case *dwarf.TypedefType: + if strings.HasPrefix(dt.Name, "__builtin") { + // Don't look inside builtin types. There be dragons. + return + } + if !p.typedefs[dt.Name] { + p.typedefs[dt.Name] = true + p.typedefList = append(p.typedefList, typedefInfo{dt.Name, pos}) + p.recordTypedefs1(dt.Type, pos, visited) + } + case *dwarf.PtrType: + p.recordTypedefs1(dt.Type, pos, visited) + case *dwarf.ArrayType: + p.recordTypedefs1(dt.Type, pos, visited) + case *dwarf.QualType: + p.recordTypedefs1(dt.Type, pos, visited) + case *dwarf.FuncType: + p.recordTypedefs1(dt.ReturnType, pos, visited) + for _, a := range dt.ParamType { + p.recordTypedefs1(a, pos, visited) + } + case *dwarf.StructType: + for _, f := range dt.Field { + p.recordTypedefs1(f.Type, pos, visited) + } + } +} + +// prepareNames finalizes the Kind field of not-type names and sets +// the mangled name of all names. +func (p *Package) prepareNames(f *File) { + for _, n := range f.Name { + if n.Kind == "not-type" { + if n.Define == "" { + n.Kind = "var" + } else { + n.Kind = "macro" + n.FuncType = &FuncType{ + Result: n.Type, + Go: &ast.FuncType{ + Results: &ast.FieldList{List: []*ast.Field{{Type: n.Type.Go}}}, + }, + } + } + } + p.mangleName(n) + if n.Kind == "type" && typedef[n.Mangle] == nil { + typedef[n.Mangle] = n.Type + } + } +} + +// mangleName does name mangling to translate names +// from the original Go source files to the names +// used in the final Go files generated by cgo. +func (p *Package) mangleName(n *Name) { + // When using gccgo variables have to be + // exported so that they become global symbols + // that the C code can refer to. + prefix := "_C" + if *gccgo && n.IsVar() { + prefix = "C" + } + n.Mangle = prefix + n.Kind + "_" + n.Go +} + +func (f *File) isMangledName(s string) bool { + prefix := "_C" + if strings.HasPrefix(s, prefix) { + t := s[len(prefix):] + for _, k := range nameKinds { + if strings.HasPrefix(t, k+"_") { + return true + } + } + } + return false +} + +// rewriteCalls rewrites all calls that pass pointers to check that +// they follow the rules for passing pointers between Go and C. +// This reports whether the package needs to import unsafe as _cgo_unsafe. +func (p *Package) rewriteCalls(f *File) bool { + needsUnsafe := false + // Walk backward so that in C.f1(C.f2()) we rewrite C.f2 first. + for _, call := range f.Calls { + if call.Done { + continue + } + start := f.offset(call.Call.Pos()) + end := f.offset(call.Call.End()) + str, nu := p.rewriteCall(f, call) + if str != "" { + f.Edit.Replace(start, end, str) + if nu { + needsUnsafe = true + } + } + } + return needsUnsafe +} + +// rewriteCall rewrites one call to add pointer checks. +// If any pointer checks are required, we rewrite the call into a +// function literal that calls _cgoCheckPointer for each pointer +// argument and then calls the original function. +// This returns the rewritten call and whether the package needs to +// import unsafe as _cgo_unsafe. +// If it returns the empty string, the call did not need to be rewritten. +func (p *Package) rewriteCall(f *File, call *Call) (string, bool) { + // This is a call to C.xxx; set goname to "xxx". + // It may have already been mangled by rewriteName. + var goname string + switch fun := call.Call.Fun.(type) { + case *ast.SelectorExpr: + goname = fun.Sel.Name + case *ast.Ident: + goname = strings.TrimPrefix(fun.Name, "_C2func_") + goname = strings.TrimPrefix(goname, "_Cfunc_") + } + if goname == "" || goname == "malloc" { + return "", false + } + name := f.Name[goname] + if name == nil || name.Kind != "func" { + // Probably a type conversion. + return "", false + } + + params := name.FuncType.Params + args := call.Call.Args + end := call.Call.End() + + // Avoid a crash if the number of arguments doesn't match + // the number of parameters. + // This will be caught when the generated file is compiled. + if len(args) != len(params) { + return "", false + } + + any := false + for i, param := range params { + if p.needsPointerCheck(f, param.Go, args[i]) { + any = true + break + } + } + if !any { + return "", false + } + + // We need to rewrite this call. + // + // Rewrite C.f(p) to + // func() { + // _cgo0 := p + // _cgoCheckPointer(_cgo0, nil) + // C.f(_cgo0) + // }() + // Using a function literal like this lets us evaluate the + // function arguments only once while doing pointer checks. + // This is particularly useful when passing additional arguments + // to _cgoCheckPointer, as done in checkIndex and checkAddr. + // + // When the function argument is a conversion to unsafe.Pointer, + // we unwrap the conversion before checking the pointer, + // and then wrap again when calling C.f. This lets us check + // the real type of the pointer in some cases. See issue #25941. + // + // When the call to C.f is deferred, we use an additional function + // literal to evaluate the arguments at the right time. + // defer func() func() { + // _cgo0 := p + // return func() { + // _cgoCheckPointer(_cgo0, nil) + // C.f(_cgo0) + // } + // }()() + // This works because the defer statement evaluates the first + // function literal in order to get the function to call. + + var sb bytes.Buffer + sb.WriteString("func() ") + if call.Deferred { + sb.WriteString("func() ") + } + + needsUnsafe := false + result := false + twoResults := false + if !call.Deferred { + // Check whether this call expects two results. + for _, ref := range f.Ref { + if ref.Expr != &call.Call.Fun { + continue + } + if ref.Context == ctxCall2 { + sb.WriteString("(") + result = true + twoResults = true + } + break + } + + // Add the result type, if any. + if name.FuncType.Result != nil { + rtype := p.rewriteUnsafe(name.FuncType.Result.Go) + if rtype != name.FuncType.Result.Go { + needsUnsafe = true + } + sb.WriteString(gofmtLine(rtype)) + result = true + } + + // Add the second result type, if any. + if twoResults { + if name.FuncType.Result == nil { + // An explicit void result looks odd but it + // seems to be how cgo has worked historically. + sb.WriteString("_Ctype_void") + } + sb.WriteString(", error)") + } + } + + sb.WriteString("{ ") + + // Define _cgoN for each argument value. + // Write _cgoCheckPointer calls to sbCheck. + var sbCheck bytes.Buffer + for i, param := range params { + origArg := args[i] + arg, nu := p.mangle(f, &args[i], true) + if nu { + needsUnsafe = true + } + + // Use "var x T = ..." syntax to explicitly convert untyped + // constants to the parameter type, to avoid a type mismatch. + ptype := p.rewriteUnsafe(param.Go) + + if !p.needsPointerCheck(f, param.Go, args[i]) || param.BadPointer || p.checkUnsafeStringData(args[i]) { + if ptype != param.Go { + needsUnsafe = true + } + fmt.Fprintf(&sb, "var _cgo%d %s = %s; ", i, + gofmtLine(ptype), gofmtPos(arg, origArg.Pos())) + continue + } + + // Check for &a[i]. + if p.checkIndex(&sb, &sbCheck, arg, i) { + continue + } + + // Check for &x. + if p.checkAddr(&sb, &sbCheck, arg, i) { + continue + } + + // Check for a[:]. + if p.checkSlice(&sb, &sbCheck, arg, i) { + continue + } + + fmt.Fprintf(&sb, "_cgo%d := %s; ", i, gofmtPos(arg, origArg.Pos())) + fmt.Fprintf(&sbCheck, "_cgoCheckPointer(_cgo%d, nil); ", i) + } + + if call.Deferred { + sb.WriteString("return func() { ") + } + + // Write out the calls to _cgoCheckPointer. + sb.WriteString(sbCheck.String()) + + if result { + sb.WriteString("return ") + } + + m, nu := p.mangle(f, &call.Call.Fun, false) + if nu { + needsUnsafe = true + } + sb.WriteString(gofmtPos(m, end)) + + sb.WriteString("(") + for i := range params { + if i > 0 { + sb.WriteString(", ") + } + fmt.Fprintf(&sb, "_cgo%d", i) + } + sb.WriteString("); ") + if call.Deferred { + sb.WriteString("}") + } + sb.WriteString("}") + if call.Deferred { + sb.WriteString("()") + } + sb.WriteString("()") + + return sb.String(), needsUnsafe +} + +// needsPointerCheck reports whether the type t needs a pointer check. +// This is true if t is a pointer and if the value to which it points +// might contain a pointer. +func (p *Package) needsPointerCheck(f *File, t ast.Expr, arg ast.Expr) bool { + // An untyped nil does not need a pointer check, and when + // _cgoCheckPointer returns the untyped nil the type assertion we + // are going to insert will fail. Easier to just skip nil arguments. + // TODO: Note that this fails if nil is shadowed. + if id, ok := arg.(*ast.Ident); ok && id.Name == "nil" { + return false + } + + return p.hasPointer(f, t, true) +} + +// hasPointer is used by needsPointerCheck. If top is true it returns +// whether t is or contains a pointer that might point to a pointer. +// If top is false it reports whether t is or contains a pointer. +// f may be nil. +func (p *Package) hasPointer(f *File, t ast.Expr, top bool) bool { + switch t := t.(type) { + case *ast.ArrayType: + if t.Len == nil { + if !top { + return true + } + return p.hasPointer(f, t.Elt, false) + } + return p.hasPointer(f, t.Elt, top) + case *ast.StructType: + for _, field := range t.Fields.List { + if p.hasPointer(f, field.Type, top) { + return true + } + } + return false + case *ast.StarExpr: // Pointer type. + if !top { + return true + } + // Check whether this is a pointer to a C union (or class) + // type that contains a pointer. + if unionWithPointer[t.X] { + return true + } + return p.hasPointer(f, t.X, false) + case *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.ChanType: + return true + case *ast.Ident: + // TODO: Handle types defined within function. + for _, d := range p.Decl { + gd, ok := d.(*ast.GenDecl) + if !ok || gd.Tok != token.TYPE { + continue + } + for _, spec := range gd.Specs { + ts, ok := spec.(*ast.TypeSpec) + if !ok { + continue + } + if ts.Name.Name == t.Name { + return p.hasPointer(f, ts.Type, top) + } + } + } + if def := typedef[t.Name]; def != nil { + return p.hasPointer(f, def.Go, top) + } + if t.Name == "string" { + return !top + } + if t.Name == "error" { + return true + } + if goTypes[t.Name] != nil { + return false + } + // We can't figure out the type. Conservative + // approach is to assume it has a pointer. + return true + case *ast.SelectorExpr: + if l, ok := t.X.(*ast.Ident); !ok || l.Name != "C" { + // Type defined in a different package. + // Conservative approach is to assume it has a + // pointer. + return true + } + if f == nil { + // Conservative approach: assume pointer. + return true + } + name := f.Name[t.Sel.Name] + if name != nil && name.Kind == "type" && name.Type != nil && name.Type.Go != nil { + return p.hasPointer(f, name.Type.Go, top) + } + // We can't figure out the type. Conservative + // approach is to assume it has a pointer. + return true + default: + error_(t.Pos(), "could not understand type %s", gofmt(t)) + return true + } +} + +// mangle replaces references to C names in arg with the mangled names, +// rewriting calls when it finds them. +// It removes the corresponding references in f.Ref and f.Calls, so that we +// don't try to do the replacement again in rewriteRef or rewriteCall. +// If addPosition is true, add position info to the idents of C names in arg. +func (p *Package) mangle(f *File, arg *ast.Expr, addPosition bool) (ast.Expr, bool) { + needsUnsafe := false + f.walk(arg, ctxExpr, func(f *File, arg interface{}, context astContext) { + px, ok := arg.(*ast.Expr) + if !ok { + return + } + sel, ok := (*px).(*ast.SelectorExpr) + if ok { + if l, ok := sel.X.(*ast.Ident); !ok || l.Name != "C" { + return + } + + for _, r := range f.Ref { + if r.Expr == px { + *px = p.rewriteName(f, r, addPosition) + r.Done = true + break + } + } + + return + } + + call, ok := (*px).(*ast.CallExpr) + if !ok { + return + } + + for _, c := range f.Calls { + if !c.Done && c.Call.Lparen == call.Lparen { + cstr, nu := p.rewriteCall(f, c) + if cstr != "" { + // Smuggle the rewritten call through an ident. + *px = ast.NewIdent(cstr) + if nu { + needsUnsafe = true + } + c.Done = true + } + } + } + }) + return *arg, needsUnsafe +} + +// checkIndex checks whether arg has the form &a[i], possibly inside +// type conversions. If so, then in the general case it writes +// +// _cgoIndexNN := a +// _cgoNN := &cgoIndexNN[i] // with type conversions, if any +// +// to sb, and writes +// +// _cgoCheckPointer(_cgoNN, _cgoIndexNN) +// +// to sbCheck, and returns true. If a is a simple variable or field reference, +// it writes +// +// _cgoIndexNN := &a +// +// and dereferences the uses of _cgoIndexNN. Taking the address avoids +// making a copy of an array. +// +// This tells _cgoCheckPointer to check the complete contents of the +// slice or array being indexed, but no other part of the memory allocation. +func (p *Package) checkIndex(sb, sbCheck *bytes.Buffer, arg ast.Expr, i int) bool { + // Strip type conversions. + x := arg + for { + c, ok := x.(*ast.CallExpr) + if !ok || len(c.Args) != 1 { + break + } + if !p.isType(c.Fun) && !p.isUnsafeData(c.Fun, false) { + break + } + x = c.Args[0] + } + u, ok := x.(*ast.UnaryExpr) + if !ok || u.Op != token.AND { + return false + } + index, ok := u.X.(*ast.IndexExpr) + if !ok { + return false + } + + addr := "" + deref := "" + if p.isVariable(index.X) { + addr = "&" + deref = "*" + } + + fmt.Fprintf(sb, "_cgoIndex%d := %s%s; ", i, addr, gofmtPos(index.X, index.X.Pos())) + origX := index.X + index.X = ast.NewIdent(fmt.Sprintf("_cgoIndex%d", i)) + if deref == "*" { + index.X = &ast.StarExpr{X: index.X} + } + fmt.Fprintf(sb, "_cgo%d := %s; ", i, gofmtPos(arg, arg.Pos())) + index.X = origX + + fmt.Fprintf(sbCheck, "_cgoCheckPointer(_cgo%d, %s_cgoIndex%d); ", i, deref, i) + + return true +} + +// checkAddr checks whether arg has the form &x, possibly inside type +// conversions. If so, it writes +// +// _cgoBaseNN := &x +// _cgoNN := _cgoBaseNN // with type conversions, if any +// +// to sb, and writes +// +// _cgoCheckPointer(_cgoBaseNN, true) +// +// to sbCheck, and returns true. This tells _cgoCheckPointer to check +// just the contents of the pointer being passed, not any other part +// of the memory allocation. This is run after checkIndex, which looks +// for the special case of &a[i], which requires different checks. +func (p *Package) checkAddr(sb, sbCheck *bytes.Buffer, arg ast.Expr, i int) bool { + // Strip type conversions. + px := &arg + for { + c, ok := (*px).(*ast.CallExpr) + if !ok || len(c.Args) != 1 { + break + } + if !p.isType(c.Fun) && !p.isUnsafeData(c.Fun, false) { + break + } + px = &c.Args[0] + } + if u, ok := (*px).(*ast.UnaryExpr); !ok || u.Op != token.AND { + return false + } + + fmt.Fprintf(sb, "_cgoBase%d := %s; ", i, gofmtPos(*px, (*px).Pos())) + + origX := *px + *px = ast.NewIdent(fmt.Sprintf("_cgoBase%d", i)) + fmt.Fprintf(sb, "_cgo%d := %s; ", i, gofmtPos(arg, arg.Pos())) + *px = origX + + // Use "0 == 0" to do the right thing in the unlikely event + // that "true" is shadowed. + fmt.Fprintf(sbCheck, "_cgoCheckPointer(_cgoBase%d, 0 == 0); ", i) + + return true +} + +// checkSlice checks whether arg has the form x[i:j], possibly inside +// type conversions. If so, it writes +// +// _cgoSliceNN := x[i:j] +// _cgoNN := _cgoSliceNN // with type conversions, if any +// +// to sb, and writes +// +// _cgoCheckPointer(_cgoSliceNN, true) +// +// to sbCheck, and returns true. This tells _cgoCheckPointer to check +// just the contents of the slice being passed, not any other part +// of the memory allocation. +func (p *Package) checkSlice(sb, sbCheck *bytes.Buffer, arg ast.Expr, i int) bool { + // Strip type conversions. + px := &arg + for { + c, ok := (*px).(*ast.CallExpr) + if !ok || len(c.Args) != 1 { + break + } + if !p.isType(c.Fun) && !p.isUnsafeData(c.Fun, false) { + break + } + px = &c.Args[0] + } + if _, ok := (*px).(*ast.SliceExpr); !ok { + return false + } + + fmt.Fprintf(sb, "_cgoSlice%d := %s; ", i, gofmtPos(*px, (*px).Pos())) + + origX := *px + *px = ast.NewIdent(fmt.Sprintf("_cgoSlice%d", i)) + fmt.Fprintf(sb, "_cgo%d := %s; ", i, gofmtPos(arg, arg.Pos())) + *px = origX + + // Use 0 == 0 to do the right thing in the unlikely event + // that "true" is shadowed. + fmt.Fprintf(sbCheck, "_cgoCheckPointer(_cgoSlice%d, 0 == 0); ", i) + + return true +} + +// checkUnsafeStringData checks for a call to unsafe.StringData. +// The result of that call can't contain a pointer so there is +// no need to call _cgoCheckPointer. +func (p *Package) checkUnsafeStringData(arg ast.Expr) bool { + x := arg + for { + c, ok := x.(*ast.CallExpr) + if !ok || len(c.Args) != 1 { + break + } + if p.isUnsafeData(c.Fun, true) { + return true + } + if !p.isType(c.Fun) { + break + } + x = c.Args[0] + } + return false +} + +// isType reports whether the expression is definitely a type. +// This is conservative--it returns false for an unknown identifier. +func (p *Package) isType(t ast.Expr) bool { + switch t := t.(type) { + case *ast.SelectorExpr: + id, ok := t.X.(*ast.Ident) + if !ok { + return false + } + if id.Name == "unsafe" && t.Sel.Name == "Pointer" { + return true + } + if id.Name == "C" && typedef["_Ctype_"+t.Sel.Name] != nil { + return true + } + return false + case *ast.Ident: + // TODO: This ignores shadowing. + switch t.Name { + case "unsafe.Pointer", "bool", "byte", + "complex64", "complex128", + "error", + "float32", "float64", + "int", "int8", "int16", "int32", "int64", + "rune", "string", + "uint", "uint8", "uint16", "uint32", "uint64", "uintptr": + + return true + } + if strings.HasPrefix(t.Name, "_Ctype_") { + return true + } + case *ast.ParenExpr: + return p.isType(t.X) + case *ast.StarExpr: + return p.isType(t.X) + case *ast.ArrayType, *ast.StructType, *ast.FuncType, *ast.InterfaceType, + *ast.MapType, *ast.ChanType: + + return true + } + return false +} + +// isUnsafeData reports whether the expression is unsafe.StringData +// or unsafe.SliceData. We can ignore these when checking for pointers +// because they don't change whether or not their argument contains +// any Go pointers. If onlyStringData is true we only check for StringData. +func (p *Package) isUnsafeData(x ast.Expr, onlyStringData bool) bool { + st, ok := x.(*ast.SelectorExpr) + if !ok { + return false + } + id, ok := st.X.(*ast.Ident) + if !ok { + return false + } + if id.Name != "unsafe" { + return false + } + if !onlyStringData && st.Sel.Name == "SliceData" { + return true + } + return st.Sel.Name == "StringData" +} + +// isVariable reports whether x is a variable, possibly with field references. +func (p *Package) isVariable(x ast.Expr) bool { + switch x := x.(type) { + case *ast.Ident: + return true + case *ast.SelectorExpr: + return p.isVariable(x.X) + case *ast.IndexExpr: + return true + } + return false +} + +// rewriteUnsafe returns a version of t with references to unsafe.Pointer +// rewritten to use _cgo_unsafe.Pointer instead. +func (p *Package) rewriteUnsafe(t ast.Expr) ast.Expr { + switch t := t.(type) { + case *ast.Ident: + // We don't see a SelectorExpr for unsafe.Pointer; + // this is created by code in this file. + if t.Name == "unsafe.Pointer" { + return ast.NewIdent("_cgo_unsafe.Pointer") + } + case *ast.ArrayType: + t1 := p.rewriteUnsafe(t.Elt) + if t1 != t.Elt { + r := *t + r.Elt = t1 + return &r + } + case *ast.StructType: + changed := false + fields := *t.Fields + fields.List = nil + for _, f := range t.Fields.List { + ft := p.rewriteUnsafe(f.Type) + if ft == f.Type { + fields.List = append(fields.List, f) + } else { + fn := *f + fn.Type = ft + fields.List = append(fields.List, &fn) + changed = true + } + } + if changed { + r := *t + r.Fields = &fields + return &r + } + case *ast.StarExpr: // Pointer type. + x1 := p.rewriteUnsafe(t.X) + if x1 != t.X { + r := *t + r.X = x1 + return &r + } + } + return t +} + +// rewriteRef rewrites all the C.xxx references in f.AST to refer to the +// Go equivalents, now that we have figured out the meaning of all +// the xxx. In *godefs mode, rewriteRef replaces the names +// with full definitions instead of mangled names. +func (p *Package) rewriteRef(f *File) { + // Keep a list of all the functions, to remove the ones + // only used as expressions and avoid generating bridge + // code for them. + functions := make(map[string]bool) + + for _, n := range f.Name { + if n.Kind == "func" { + functions[n.Go] = false + } + } + + // Now that we have all the name types filled in, + // scan through the Refs to identify the ones that + // are trying to do a ,err call. Also check that + // functions are only used in calls. + for _, r := range f.Ref { + if r.Name.IsConst() && r.Name.Const == "" { + error_(r.Pos(), "unable to find value of constant C.%s", fixGo(r.Name.Go)) + } + + if r.Name.Kind == "func" { + switch r.Context { + case ctxCall, ctxCall2: + functions[r.Name.Go] = true + } + } + + expr := p.rewriteName(f, r, false) + + if *godefs { + // Substitute definition for mangled type name. + if r.Name.Type != nil && r.Name.Kind == "type" { + expr = r.Name.Type.Go + } + if id, ok := expr.(*ast.Ident); ok { + if t := typedef[id.Name]; t != nil { + expr = t.Go + } + if id.Name == r.Name.Mangle && r.Name.Const != "" { + expr = ast.NewIdent(r.Name.Const) + } + } + } + + // Copy position information from old expr into new expr, + // in case expression being replaced is first on line. + // See golang.org/issue/6563. + pos := (*r.Expr).Pos() + if x, ok := expr.(*ast.Ident); ok { + expr = &ast.Ident{NamePos: pos, Name: x.Name} + } + + // Change AST, because some later processing depends on it, + // and also because -godefs mode still prints the AST. + old := *r.Expr + *r.Expr = expr + + // Record source-level edit for cgo output. + if !r.Done { + // Prepend a space in case the earlier code ends + // with '/', which would give us a "//" comment. + repl := " " + gofmtPos(expr, old.Pos()) + end := fset.Position(old.End()) + // Subtract 1 from the column if we are going to + // append a close parenthesis. That will set the + // correct column for the following characters. + sub := 0 + if r.Name.Kind != "type" { + sub = 1 + } + if end.Column > sub { + repl = fmt.Sprintf("%s /*line :%d:%d*/", repl, end.Line, end.Column-sub) + } + if r.Name.Kind != "type" { + repl = "(" + repl + ")" + } + f.Edit.Replace(f.offset(old.Pos()), f.offset(old.End()), repl) + } + } + + // Remove functions only used as expressions, so their respective + // bridge functions are not generated. + for name, used := range functions { + if !used { + delete(f.Name, name) + } + } +} + +// rewriteName returns the expression used to rewrite a reference. +// If addPosition is true, add position info in the ident name. +func (p *Package) rewriteName(f *File, r *Ref, addPosition bool) ast.Expr { + getNewIdent := ast.NewIdent + if addPosition { + getNewIdent = func(newName string) *ast.Ident { + mangledIdent := ast.NewIdent(newName) + if len(newName) == len(r.Name.Go) { + return mangledIdent + } + p := fset.Position((*r.Expr).End()) + if p.Column == 0 { + return mangledIdent + } + return ast.NewIdent(fmt.Sprintf("%s /*line :%d:%d*/", newName, p.Line, p.Column)) + } + } + var expr ast.Expr = getNewIdent(r.Name.Mangle) // default + switch r.Context { + case ctxCall, ctxCall2: + if r.Name.Kind != "func" { + if r.Name.Kind == "type" { + r.Context = ctxType + if r.Name.Type == nil { + error_(r.Pos(), "invalid conversion to C.%s: undefined C type '%s'", fixGo(r.Name.Go), r.Name.C) + } + break + } + error_(r.Pos(), "call of non-function C.%s", fixGo(r.Name.Go)) + break + } + if r.Context == ctxCall2 { + if r.Name.Go == "_CMalloc" { + error_(r.Pos(), "no two-result form for C.malloc") + break + } + // Invent new Name for the two-result function. + n := f.Name["2"+r.Name.Go] + if n == nil { + n = new(Name) + *n = *r.Name + n.AddError = true + n.Mangle = "_C2func_" + n.Go + f.Name["2"+r.Name.Go] = n + } + expr = getNewIdent(n.Mangle) + r.Name = n + break + } + case ctxExpr: + switch r.Name.Kind { + case "func": + if builtinDefs[r.Name.C] != "" { + error_(r.Pos(), "use of builtin '%s' not in function call", fixGo(r.Name.C)) + } + + // Function is being used in an expression, to e.g. pass around a C function pointer. + // Create a new Name for this Ref which causes the variable to be declared in Go land. + fpName := "fp_" + r.Name.Go + name := f.Name[fpName] + if name == nil { + name = &Name{ + Go: fpName, + C: r.Name.C, + Kind: "fpvar", + Type: &Type{Size: p.PtrSize, Align: p.PtrSize, C: c("void*"), Go: ast.NewIdent("unsafe.Pointer")}, + } + p.mangleName(name) + f.Name[fpName] = name + } + r.Name = name + // Rewrite into call to _Cgo_ptr to prevent assignments. The _Cgo_ptr + // function is defined in out.go and simply returns its argument. See + // issue 7757. + expr = &ast.CallExpr{ + Fun: &ast.Ident{NamePos: (*r.Expr).Pos(), Name: "_Cgo_ptr"}, + Args: []ast.Expr{getNewIdent(name.Mangle)}, + } + case "type": + // Okay - might be new(T), T(x), Generic[T], etc. + if r.Name.Type == nil { + error_(r.Pos(), "expression C.%s: undefined C type '%s'", fixGo(r.Name.Go), r.Name.C) + } + case "var": + expr = &ast.StarExpr{Star: (*r.Expr).Pos(), X: expr} + case "macro": + expr = &ast.CallExpr{Fun: expr} + } + case ctxSelector: + if r.Name.Kind == "var" { + expr = &ast.StarExpr{Star: (*r.Expr).Pos(), X: expr} + } else { + error_(r.Pos(), "only C variables allowed in selector expression %s", fixGo(r.Name.Go)) + } + case ctxType: + if r.Name.Kind != "type" { + error_(r.Pos(), "expression C.%s used as type", fixGo(r.Name.Go)) + } else if r.Name.Type == nil { + // Use of C.enum_x, C.struct_x or C.union_x without C definition. + // GCC won't raise an error when using pointers to such unknown types. + error_(r.Pos(), "type C.%s: undefined C type '%s'", fixGo(r.Name.Go), r.Name.C) + } + default: + if r.Name.Kind == "func" { + error_(r.Pos(), "must call C.%s", fixGo(r.Name.Go)) + } + } + return expr +} + +// gofmtPos returns the gofmt-formatted string for an AST node, +// with a comment setting the position before the node. +func gofmtPos(n ast.Expr, pos token.Pos) string { + s := gofmtLine(n) + p := fset.Position(pos) + if p.Column == 0 { + return s + } + return fmt.Sprintf("/*line :%d:%d*/%s", p.Line, p.Column, s) +} + +// checkGCCBaseCmd returns the start of the compiler command line. +// It uses $CC if set, or else $GCC, or else the compiler recorded +// during the initial build as defaultCC. +// defaultCC is defined in zdefaultcc.go, written by cmd/dist. +// +// The compiler command line is split into arguments on whitespace. Quotes +// are understood, so arguments may contain whitespace. +// +// checkGCCBaseCmd confirms that the compiler exists in PATH, returning +// an error if it does not. +func checkGCCBaseCmd() ([]string, error) { + // Use $CC if set, since that's what the build uses. + value := os.Getenv("CC") + if value == "" { + // Try $GCC if set, since that's what we used to use. + value = os.Getenv("GCC") + } + if value == "" { + value = defaultCC(goos, goarch) + } + args, err := quoted.Split(value) + if err != nil { + return nil, err + } + if len(args) == 0 { + return nil, errors.New("CC not set and no default found") + } + if _, err := exec.LookPath(args[0]); err != nil { + return nil, fmt.Errorf("C compiler %q not found: %v", args[0], err) + } + return args[:len(args):len(args)], nil +} + +// gccMachine returns the gcc -m flag to use, either "-m32", "-m64" or "-marm". +func (p *Package) gccMachine() []string { + switch goarch { + case "amd64": + if goos == "darwin" { + return []string{"-arch", "x86_64", "-m64"} + } + return []string{"-m64"} + case "arm64": + if goos == "darwin" { + return []string{"-arch", "arm64"} + } + case "386": + return []string{"-m32"} + case "arm": + return []string{"-marm"} // not thumb + case "s390": + return []string{"-m31"} + case "s390x": + return []string{"-m64"} + case "mips64", "mips64le": + if gomips64 == "hardfloat" { + return []string{"-mabi=64", "-mhard-float"} + } else if gomips64 == "softfloat" { + return []string{"-mabi=64", "-msoft-float"} + } + case "mips", "mipsle": + if gomips == "hardfloat" { + return []string{"-mabi=32", "-mfp32", "-mhard-float", "-mno-odd-spreg"} + } else if gomips == "softfloat" { + return []string{"-mabi=32", "-msoft-float"} + } + case "loong64": + return []string{"-mabi=lp64d"} + } + return nil +} + +func gccTmp() string { + return *objDir + "_cgo_.o" +} + +// gccCmd returns the gcc command line to use for compiling +// the input. +func (p *Package) gccCmd() []string { + c := append(gccBaseCmd, + "-w", // no warnings + "-Wno-error", // warnings are not errors + "-o"+gccTmp(), // write object to tmp + "-gdwarf-2", // generate DWARF v2 debugging symbols + "-c", // do not link + "-xc", // input language is C + ) + if p.GccIsClang { + c = append(c, + "-ferror-limit=0", + // Apple clang version 1.7 (tags/Apple/clang-77) (based on LLVM 2.9svn) + // doesn't have -Wno-unneeded-internal-declaration, so we need yet another + // flag to disable the warning. Yes, really good diagnostics, clang. + "-Wno-unknown-warning-option", + "-Wno-unneeded-internal-declaration", + "-Wno-unused-function", + "-Qunused-arguments", + // Clang embeds prototypes for some builtin functions, + // like malloc and calloc, but all size_t parameters are + // incorrectly typed unsigned long. We work around that + // by disabling the builtin functions (this is safe as + // it won't affect the actual compilation of the C code). + // See: https://golang.org/issue/6506. + "-fno-builtin", + ) + } + + c = append(c, p.GccOptions...) + c = append(c, p.gccMachine()...) + if goos == "aix" { + c = append(c, "-maix64") + c = append(c, "-mcmodel=large") + } + // disable LTO so we get an object whose symbols we can read + c = append(c, "-fno-lto") + c = append(c, "-") //read input from standard input + return c +} + +// gccDebug runs gcc -gdwarf-2 over the C program stdin and +// returns the corresponding DWARF data and, if present, debug data block. +func (p *Package) gccDebug(stdin []byte, nnames int) (d *dwarf.Data, ints []int64, floats []float64, strs []string) { + runGcc(stdin, p.gccCmd()) + + isDebugInts := func(s string) bool { + // Some systems use leading _ to denote non-assembly symbols. + return s == "__cgodebug_ints" || s == "___cgodebug_ints" + } + isDebugFloats := func(s string) bool { + // Some systems use leading _ to denote non-assembly symbols. + return s == "__cgodebug_floats" || s == "___cgodebug_floats" + } + indexOfDebugStr := func(s string) int { + // Some systems use leading _ to denote non-assembly symbols. + if strings.HasPrefix(s, "___") { + s = s[1:] + } + if strings.HasPrefix(s, "__cgodebug_str__") { + if n, err := strconv.Atoi(s[len("__cgodebug_str__"):]); err == nil { + return n + } + } + return -1 + } + indexOfDebugStrlen := func(s string) int { + // Some systems use leading _ to denote non-assembly symbols. + if strings.HasPrefix(s, "___") { + s = s[1:] + } + if strings.HasPrefix(s, "__cgodebug_strlen__") { + if n, err := strconv.Atoi(s[len("__cgodebug_strlen__"):]); err == nil { + return n + } + } + return -1 + } + + strs = make([]string, nnames) + + strdata := make(map[int]string, nnames) + strlens := make(map[int]int, nnames) + + buildStrings := func() { + for n, strlen := range strlens { + data := strdata[n] + if len(data) <= strlen { + fatalf("invalid string literal") + } + strs[n] = data[:strlen] + } + } + + if f, err := macho.Open(gccTmp()); err == nil { + defer f.Close() + d, err := f.DWARF() + if err != nil { + fatalf("cannot load DWARF output from %s: %v", gccTmp(), err) + } + bo := f.ByteOrder + if f.Symtab != nil { + for i := range f.Symtab.Syms { + s := &f.Symtab.Syms[i] + switch { + case isDebugInts(s.Name): + // Found it. Now find data section. + if i := int(s.Sect) - 1; 0 <= i && i < len(f.Sections) { + sect := f.Sections[i] + if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size { + if sdat, err := sect.Data(); err == nil { + data := sdat[s.Value-sect.Addr:] + ints = make([]int64, len(data)/8) + for i := range ints { + ints[i] = int64(bo.Uint64(data[i*8:])) + } + } + } + } + case isDebugFloats(s.Name): + // Found it. Now find data section. + if i := int(s.Sect) - 1; 0 <= i && i < len(f.Sections) { + sect := f.Sections[i] + if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size { + if sdat, err := sect.Data(); err == nil { + data := sdat[s.Value-sect.Addr:] + floats = make([]float64, len(data)/8) + for i := range floats { + floats[i] = math.Float64frombits(bo.Uint64(data[i*8:])) + } + } + } + } + default: + if n := indexOfDebugStr(s.Name); n != -1 { + // Found it. Now find data section. + if i := int(s.Sect) - 1; 0 <= i && i < len(f.Sections) { + sect := f.Sections[i] + if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size { + if sdat, err := sect.Data(); err == nil { + data := sdat[s.Value-sect.Addr:] + strdata[n] = string(data) + } + } + } + break + } + if n := indexOfDebugStrlen(s.Name); n != -1 { + // Found it. Now find data section. + if i := int(s.Sect) - 1; 0 <= i && i < len(f.Sections) { + sect := f.Sections[i] + if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size { + if sdat, err := sect.Data(); err == nil { + data := sdat[s.Value-sect.Addr:] + strlen := bo.Uint64(data[:8]) + if strlen > (1<<(uint(p.IntSize*8)-1) - 1) { // greater than MaxInt? + fatalf("string literal too big") + } + strlens[n] = int(strlen) + } + } + } + break + } + } + } + + buildStrings() + } + return d, ints, floats, strs + } + + if f, err := elf.Open(gccTmp()); err == nil { + defer f.Close() + d, err := f.DWARF() + if err != nil { + fatalf("cannot load DWARF output from %s: %v", gccTmp(), err) + } + bo := f.ByteOrder + symtab, err := f.Symbols() + if err == nil { + // Check for use of -fsanitize=hwaddress (issue 53285). + removeTag := func(v uint64) uint64 { return v } + if goarch == "arm64" { + for i := range symtab { + if symtab[i].Name == "__hwasan_init" { + // -fsanitize=hwaddress on ARM + // uses the upper byte of a + // memory address as a hardware + // tag. Remove it so that + // we can find the associated + // data. + removeTag = func(v uint64) uint64 { return v &^ (0xff << (64 - 8)) } + break + } + } + } + + for i := range symtab { + s := &symtab[i] + switch { + case isDebugInts(s.Name): + // Found it. Now find data section. + if i := int(s.Section); 0 <= i && i < len(f.Sections) { + sect := f.Sections[i] + val := removeTag(s.Value) + if sect.Addr <= val && val < sect.Addr+sect.Size { + if sdat, err := sect.Data(); err == nil { + data := sdat[val-sect.Addr:] + ints = make([]int64, len(data)/8) + for i := range ints { + ints[i] = int64(bo.Uint64(data[i*8:])) + } + } + } + } + case isDebugFloats(s.Name): + // Found it. Now find data section. + if i := int(s.Section); 0 <= i && i < len(f.Sections) { + sect := f.Sections[i] + val := removeTag(s.Value) + if sect.Addr <= val && val < sect.Addr+sect.Size { + if sdat, err := sect.Data(); err == nil { + data := sdat[val-sect.Addr:] + floats = make([]float64, len(data)/8) + for i := range floats { + floats[i] = math.Float64frombits(bo.Uint64(data[i*8:])) + } + } + } + } + default: + if n := indexOfDebugStr(s.Name); n != -1 { + // Found it. Now find data section. + if i := int(s.Section); 0 <= i && i < len(f.Sections) { + sect := f.Sections[i] + val := removeTag(s.Value) + if sect.Addr <= val && val < sect.Addr+sect.Size { + if sdat, err := sect.Data(); err == nil { + data := sdat[val-sect.Addr:] + strdata[n] = string(data) + } + } + } + break + } + if n := indexOfDebugStrlen(s.Name); n != -1 { + // Found it. Now find data section. + if i := int(s.Section); 0 <= i && i < len(f.Sections) { + sect := f.Sections[i] + val := removeTag(s.Value) + if sect.Addr <= val && val < sect.Addr+sect.Size { + if sdat, err := sect.Data(); err == nil { + data := sdat[val-sect.Addr:] + strlen := bo.Uint64(data[:8]) + if strlen > (1<<(uint(p.IntSize*8)-1) - 1) { // greater than MaxInt? + fatalf("string literal too big") + } + strlens[n] = int(strlen) + } + } + } + break + } + } + } + + buildStrings() + } + return d, ints, floats, strs + } + + if f, err := pe.Open(gccTmp()); err == nil { + defer f.Close() + d, err := f.DWARF() + if err != nil { + fatalf("cannot load DWARF output from %s: %v", gccTmp(), err) + } + bo := binary.LittleEndian + for _, s := range f.Symbols { + switch { + case isDebugInts(s.Name): + if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) { + sect := f.Sections[i] + if s.Value < sect.Size { + if sdat, err := sect.Data(); err == nil { + data := sdat[s.Value:] + ints = make([]int64, len(data)/8) + for i := range ints { + ints[i] = int64(bo.Uint64(data[i*8:])) + } + } + } + } + case isDebugFloats(s.Name): + if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) { + sect := f.Sections[i] + if s.Value < sect.Size { + if sdat, err := sect.Data(); err == nil { + data := sdat[s.Value:] + floats = make([]float64, len(data)/8) + for i := range floats { + floats[i] = math.Float64frombits(bo.Uint64(data[i*8:])) + } + } + } + } + default: + if n := indexOfDebugStr(s.Name); n != -1 { + if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) { + sect := f.Sections[i] + if s.Value < sect.Size { + if sdat, err := sect.Data(); err == nil { + data := sdat[s.Value:] + strdata[n] = string(data) + } + } + } + break + } + if n := indexOfDebugStrlen(s.Name); n != -1 { + if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) { + sect := f.Sections[i] + if s.Value < sect.Size { + if sdat, err := sect.Data(); err == nil { + data := sdat[s.Value:] + strlen := bo.Uint64(data[:8]) + if strlen > (1<<(uint(p.IntSize*8)-1) - 1) { // greater than MaxInt? + fatalf("string literal too big") + } + strlens[n] = int(strlen) + } + } + } + break + } + } + } + + buildStrings() + + return d, ints, floats, strs + } + + if f, err := xcoff.Open(gccTmp()); err == nil { + defer f.Close() + d, err := f.DWARF() + if err != nil { + fatalf("cannot load DWARF output from %s: %v", gccTmp(), err) + } + bo := binary.BigEndian + for _, s := range f.Symbols { + switch { + case isDebugInts(s.Name): + if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) { + sect := f.Sections[i] + if s.Value < sect.Size { + if sdat, err := sect.Data(); err == nil { + data := sdat[s.Value:] + ints = make([]int64, len(data)/8) + for i := range ints { + ints[i] = int64(bo.Uint64(data[i*8:])) + } + } + } + } + case isDebugFloats(s.Name): + if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) { + sect := f.Sections[i] + if s.Value < sect.Size { + if sdat, err := sect.Data(); err == nil { + data := sdat[s.Value:] + floats = make([]float64, len(data)/8) + for i := range floats { + floats[i] = math.Float64frombits(bo.Uint64(data[i*8:])) + } + } + } + } + default: + if n := indexOfDebugStr(s.Name); n != -1 { + if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) { + sect := f.Sections[i] + if s.Value < sect.Size { + if sdat, err := sect.Data(); err == nil { + data := sdat[s.Value:] + strdata[n] = string(data) + } + } + } + break + } + if n := indexOfDebugStrlen(s.Name); n != -1 { + if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) { + sect := f.Sections[i] + if s.Value < sect.Size { + if sdat, err := sect.Data(); err == nil { + data := sdat[s.Value:] + strlen := bo.Uint64(data[:8]) + if strlen > (1<<(uint(p.IntSize*8)-1) - 1) { // greater than MaxInt? + fatalf("string literal too big") + } + strlens[n] = int(strlen) + } + } + } + break + } + } + } + + buildStrings() + return d, ints, floats, strs + } + fatalf("cannot parse gcc output %s as ELF, Mach-O, PE, XCOFF object", gccTmp()) + panic("not reached") +} + +// gccDefines runs gcc -E -dM -xc - over the C program stdin +// and returns the corresponding standard output, which is the +// #defines that gcc encountered while processing the input +// and its included files. +func (p *Package) gccDefines(stdin []byte) string { + base := append(gccBaseCmd, "-E", "-dM", "-xc") + base = append(base, p.gccMachine()...) + stdout, _ := runGcc(stdin, append(append(base, p.GccOptions...), "-")) + return stdout +} + +// gccErrors runs gcc over the C program stdin and returns +// the errors that gcc prints. That is, this function expects +// gcc to fail. +func (p *Package) gccErrors(stdin []byte, extraArgs ...string) string { + // TODO(rsc): require failure + args := p.gccCmd() + + // Optimization options can confuse the error messages; remove them. + nargs := make([]string, 0, len(args)+len(extraArgs)) + for _, arg := range args { + if !strings.HasPrefix(arg, "-O") { + nargs = append(nargs, arg) + } + } + + // Force -O0 optimization and append extra arguments, but keep the + // trailing "-" at the end. + li := len(nargs) - 1 + last := nargs[li] + nargs[li] = "-O0" + nargs = append(nargs, extraArgs...) + nargs = append(nargs, last) + + if *debugGcc { + fmt.Fprintf(os.Stderr, "$ %s < 0 { + dtype := c.ptrKeys[0] + dtypeKey := dtype.String() + c.ptrKeys = c.ptrKeys[1:] + ptrs := c.ptrs[dtypeKey] + delete(c.ptrs, dtypeKey) + + // Note Type might invalidate c.ptrs[dtypeKey]. + t := c.Type(dtype, pos) + for _, ptr := range ptrs { + ptr.Go.(*ast.StarExpr).X = t.Go + ptr.C.Set("%s*", t.C) + } + } +} + +// Type returns a *Type with the same memory layout as +// dtype when used as the type of a variable or a struct field. +func (c *typeConv) Type(dtype dwarf.Type, pos token.Pos) *Type { + return c.loadType(dtype, pos, "") +} + +// loadType recursively loads the requested dtype and its dependency graph. +func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Type { + // Always recompute bad pointer typedefs, as the set of such + // typedefs changes as we see more types. + checkCache := true + if dtt, ok := dtype.(*dwarf.TypedefType); ok && c.badPointerTypedef(dtt) { + checkCache = false + } + + // The cache key should be relative to its parent. + // See issue https://golang.org/issue/31891 + key := parent + " > " + dtype.String() + + if checkCache { + if t, ok := c.m[key]; ok { + if t.Go == nil { + fatalf("%s: type conversion loop at %s", lineno(pos), dtype) + } + return t + } + } + + t := new(Type) + t.Size = dtype.Size() // note: wrong for array of pointers, corrected below + t.Align = -1 + t.C = &TypeRepr{Repr: dtype.Common().Name} + c.m[key] = t + + switch dt := dtype.(type) { + default: + fatalf("%s: unexpected type: %s", lineno(pos), dtype) + + case *dwarf.AddrType: + if t.Size != c.ptrSize { + fatalf("%s: unexpected: %d-byte address type - %s", lineno(pos), t.Size, dtype) + } + t.Go = c.uintptr + t.Align = t.Size + + case *dwarf.ArrayType: + if dt.StrideBitSize > 0 { + // Cannot represent bit-sized elements in Go. + t.Go = c.Opaque(t.Size) + break + } + count := dt.Count + if count == -1 { + // Indicates flexible array member, which Go doesn't support. + // Translate to zero-length array instead. + count = 0 + } + sub := c.Type(dt.Type, pos) + t.Align = sub.Align + t.Go = &ast.ArrayType{ + Len: c.intExpr(count), + Elt: sub.Go, + } + // Recalculate t.Size now that we know sub.Size. + t.Size = count * sub.Size + t.C.Set("__typeof__(%s[%d])", sub.C, dt.Count) + + case *dwarf.BoolType: + t.Go = c.bool + t.Align = 1 + + case *dwarf.CharType: + if t.Size != 1 { + fatalf("%s: unexpected: %d-byte char type - %s", lineno(pos), t.Size, dtype) + } + t.Go = c.int8 + t.Align = 1 + + case *dwarf.EnumType: + if t.Align = t.Size; t.Align >= c.ptrSize { + t.Align = c.ptrSize + } + t.C.Set("enum " + dt.EnumName) + signed := 0 + t.EnumValues = make(map[string]int64) + for _, ev := range dt.Val { + t.EnumValues[ev.Name] = ev.Val + if ev.Val < 0 { + signed = signedDelta + } + } + switch t.Size + int64(signed) { + default: + fatalf("%s: unexpected: %d-byte enum type - %s", lineno(pos), t.Size, dtype) + case 1: + t.Go = c.uint8 + case 2: + t.Go = c.uint16 + case 4: + t.Go = c.uint32 + case 8: + t.Go = c.uint64 + case 1 + signedDelta: + t.Go = c.int8 + case 2 + signedDelta: + t.Go = c.int16 + case 4 + signedDelta: + t.Go = c.int32 + case 8 + signedDelta: + t.Go = c.int64 + } + + case *dwarf.FloatType: + switch t.Size { + default: + fatalf("%s: unexpected: %d-byte float type - %s", lineno(pos), t.Size, dtype) + case 4: + t.Go = c.float32 + case 8: + t.Go = c.float64 + } + if t.Align = t.Size; t.Align >= c.ptrSize { + t.Align = c.ptrSize + } + + case *dwarf.ComplexType: + switch t.Size { + default: + fatalf("%s: unexpected: %d-byte complex type - %s", lineno(pos), t.Size, dtype) + case 8: + t.Go = c.complex64 + case 16: + t.Go = c.complex128 + } + if t.Align = t.Size / 2; t.Align >= c.ptrSize { + t.Align = c.ptrSize + } + + case *dwarf.FuncType: + // No attempt at translation: would enable calls + // directly between worlds, but we need to moderate those. + t.Go = c.uintptr + t.Align = c.ptrSize + + case *dwarf.IntType: + if dt.BitSize > 0 { + fatalf("%s: unexpected: %d-bit int type - %s", lineno(pos), dt.BitSize, dtype) + } + switch t.Size { + default: + fatalf("%s: unexpected: %d-byte int type - %s", lineno(pos), t.Size, dtype) + case 1: + t.Go = c.int8 + case 2: + t.Go = c.int16 + case 4: + t.Go = c.int32 + case 8: + t.Go = c.int64 + case 16: + t.Go = &ast.ArrayType{ + Len: c.intExpr(t.Size), + Elt: c.uint8, + } + } + if t.Align = t.Size; t.Align >= c.ptrSize { + t.Align = c.ptrSize + } + + case *dwarf.PtrType: + // Clang doesn't emit DW_AT_byte_size for pointer types. + if t.Size != c.ptrSize && t.Size != -1 { + fatalf("%s: unexpected: %d-byte pointer type - %s", lineno(pos), t.Size, dtype) + } + t.Size = c.ptrSize + t.Align = c.ptrSize + + if _, ok := base(dt.Type).(*dwarf.VoidType); ok { + t.Go = c.goVoidPtr + t.C.Set("void*") + dq := dt.Type + for { + if d, ok := dq.(*dwarf.QualType); ok { + t.C.Set(d.Qual + " " + t.C.String()) + dq = d.Type + } else { + break + } + } + break + } + + // Placeholder initialization; completed in FinishType. + t.Go = &ast.StarExpr{} + t.C.Set("*") + key := dt.Type.String() + if _, ok := c.ptrs[key]; !ok { + c.ptrKeys = append(c.ptrKeys, dt.Type) + } + c.ptrs[key] = append(c.ptrs[key], t) + + case *dwarf.QualType: + t1 := c.Type(dt.Type, pos) + t.Size = t1.Size + t.Align = t1.Align + t.Go = t1.Go + if unionWithPointer[t1.Go] { + unionWithPointer[t.Go] = true + } + t.EnumValues = nil + t.Typedef = "" + t.C.Set("%s "+dt.Qual, t1.C) + return t + + case *dwarf.StructType: + // Convert to Go struct, being careful about alignment. + // Have to give it a name to simulate C "struct foo" references. + tag := dt.StructName + if dt.ByteSize < 0 && tag == "" { // opaque unnamed struct - should not be possible + break + } + if tag == "" { + tag = anonymousStructTag[dt] + if tag == "" { + tag = "__" + strconv.Itoa(tagGen) + tagGen++ + anonymousStructTag[dt] = tag + } + } else if t.C.Empty() { + t.C.Set(dt.Kind + " " + tag) + } + name := c.Ident("_Ctype_" + dt.Kind + "_" + tag) + t.Go = name // publish before recursive calls + goIdent[name.Name] = name + if dt.ByteSize < 0 { + // Don't override old type + if _, ok := typedef[name.Name]; ok { + break + } + + // Size calculation in c.Struct/c.Opaque will die with size=-1 (unknown), + // so execute the basic things that the struct case would do + // other than try to determine a Go representation. + tt := *t + tt.C = &TypeRepr{"%s %s", []interface{}{dt.Kind, tag}} + // We don't know what the representation of this struct is, so don't let + // anyone allocate one on the Go side. As a side effect of this annotation, + // pointers to this type will not be considered pointers in Go. They won't + // get writebarrier-ed or adjusted during a stack copy. This should handle + // all the cases badPointerTypedef used to handle, but hopefully will + // continue to work going forward without any more need for cgo changes. + tt.Go = c.Ident(incomplete) + typedef[name.Name] = &tt + break + } + switch dt.Kind { + case "class", "union": + t.Go = c.Opaque(t.Size) + if c.dwarfHasPointer(dt, pos) { + unionWithPointer[t.Go] = true + } + if t.C.Empty() { + t.C.Set("__typeof__(unsigned char[%d])", t.Size) + } + t.Align = 1 // TODO: should probably base this on field alignment. + typedef[name.Name] = t + case "struct": + g, csyntax, align := c.Struct(dt, pos) + if t.C.Empty() { + t.C.Set(csyntax) + } + t.Align = align + tt := *t + if tag != "" { + tt.C = &TypeRepr{"struct %s", []interface{}{tag}} + } + tt.Go = g + if c.incompleteStructs[tag] { + tt.Go = c.Ident(incomplete) + } + typedef[name.Name] = &tt + } + + case *dwarf.TypedefType: + // Record typedef for printing. + if dt.Name == "_GoString_" { + // Special C name for Go string type. + // Knows string layout used by compilers: pointer plus length, + // which rounds up to 2 pointers after alignment. + t.Go = c.string + t.Size = c.ptrSize * 2 + t.Align = c.ptrSize + break + } + if dt.Name == "_GoBytes_" { + // Special C name for Go []byte type. + // Knows slice layout used by compilers: pointer, length, cap. + t.Go = c.Ident("[]byte") + t.Size = c.ptrSize + 4 + 4 + t.Align = c.ptrSize + break + } + name := c.Ident("_Ctype_" + dt.Name) + goIdent[name.Name] = name + akey := "" + if c.anonymousStructTypedef(dt) { + // only load type recursively for typedefs of anonymous + // structs, see issues 37479 and 37621. + akey = key + } + sub := c.loadType(dt.Type, pos, akey) + if c.badPointerTypedef(dt) { + // Treat this typedef as a uintptr. + s := *sub + s.Go = c.uintptr + s.BadPointer = true + sub = &s + // Make sure we update any previously computed type. + if oldType := typedef[name.Name]; oldType != nil { + oldType.Go = sub.Go + oldType.BadPointer = true + } + } + if c.badVoidPointerTypedef(dt) { + // Treat this typedef as a pointer to a _cgopackage.Incomplete. + s := *sub + s.Go = c.Ident("*" + incomplete) + sub = &s + // Make sure we update any previously computed type. + if oldType := typedef[name.Name]; oldType != nil { + oldType.Go = sub.Go + } + } + // Check for non-pointer "struct {...}; typedef struct *" + // typedefs that should be marked Incomplete. + if ptr, ok := dt.Type.(*dwarf.PtrType); ok { + if strct, ok := ptr.Type.(*dwarf.StructType); ok { + if c.badStructPointerTypedef(dt.Name, strct) { + c.incompleteStructs[strct.StructName] = true + // Make sure we update any previously computed type. + name := "_Ctype_struct_" + strct.StructName + if oldType := typedef[name]; oldType != nil { + oldType.Go = c.Ident(incomplete) + } + } + } + } + t.Go = name + t.BadPointer = sub.BadPointer + if unionWithPointer[sub.Go] { + unionWithPointer[t.Go] = true + } + t.Size = sub.Size + t.Align = sub.Align + oldType := typedef[name.Name] + if oldType == nil { + tt := *t + tt.Go = sub.Go + tt.BadPointer = sub.BadPointer + typedef[name.Name] = &tt + } + + // If sub.Go.Name is "_Ctype_struct_foo" or "_Ctype_union_foo" or "_Ctype_class_foo", + // use that as the Go form for this typedef too, so that the typedef will be interchangeable + // with the base type. + // In -godefs mode, do this for all typedefs. + if isStructUnionClass(sub.Go) || *godefs { + t.Go = sub.Go + + if isStructUnionClass(sub.Go) { + // Use the typedef name for C code. + typedef[sub.Go.(*ast.Ident).Name].C = t.C + } + + // If we've seen this typedef before, and it + // was an anonymous struct/union/class before + // too, use the old definition. + // TODO: it would be safer to only do this if + // we verify that the types are the same. + if oldType != nil && isStructUnionClass(oldType.Go) { + t.Go = oldType.Go + } + } + + case *dwarf.UcharType: + if t.Size != 1 { + fatalf("%s: unexpected: %d-byte uchar type - %s", lineno(pos), t.Size, dtype) + } + t.Go = c.uint8 + t.Align = 1 + + case *dwarf.UintType: + if dt.BitSize > 0 { + fatalf("%s: unexpected: %d-bit uint type - %s", lineno(pos), dt.BitSize, dtype) + } + switch t.Size { + default: + fatalf("%s: unexpected: %d-byte uint type - %s", lineno(pos), t.Size, dtype) + case 1: + t.Go = c.uint8 + case 2: + t.Go = c.uint16 + case 4: + t.Go = c.uint32 + case 8: + t.Go = c.uint64 + case 16: + t.Go = &ast.ArrayType{ + Len: c.intExpr(t.Size), + Elt: c.uint8, + } + } + if t.Align = t.Size; t.Align >= c.ptrSize { + t.Align = c.ptrSize + } + + case *dwarf.VoidType: + t.Go = c.goVoid + t.C.Set("void") + t.Align = 1 + } + + switch dtype.(type) { + case *dwarf.AddrType, *dwarf.BoolType, *dwarf.CharType, *dwarf.ComplexType, *dwarf.IntType, *dwarf.FloatType, *dwarf.UcharType, *dwarf.UintType: + s := dtype.Common().Name + if s != "" { + if ss, ok := dwarfToName[s]; ok { + s = ss + } + s = strings.Replace(s, " ", "", -1) + name := c.Ident("_Ctype_" + s) + tt := *t + typedef[name.Name] = &tt + if !*godefs { + t.Go = name + } + } + } + + if t.Size < 0 { + // Unsized types are [0]byte, unless they're typedefs of other types + // or structs with tags. + // if so, use the name we've already defined. + t.Size = 0 + switch dt := dtype.(type) { + case *dwarf.TypedefType: + // ok + case *dwarf.StructType: + if dt.StructName != "" { + break + } + t.Go = c.Opaque(0) + default: + t.Go = c.Opaque(0) + } + if t.C.Empty() { + t.C.Set("void") + } + } + + if t.C.Empty() { + fatalf("%s: internal error: did not create C name for %s", lineno(pos), dtype) + } + + return t +} + +// isStructUnionClass reports whether the type described by the Go syntax x +// is a struct, union, or class with a tag. +func isStructUnionClass(x ast.Expr) bool { + id, ok := x.(*ast.Ident) + if !ok { + return false + } + name := id.Name + return strings.HasPrefix(name, "_Ctype_struct_") || + strings.HasPrefix(name, "_Ctype_union_") || + strings.HasPrefix(name, "_Ctype_class_") +} + +// FuncArg returns a Go type with the same memory layout as +// dtype when used as the type of a C function argument. +func (c *typeConv) FuncArg(dtype dwarf.Type, pos token.Pos) *Type { + t := c.Type(unqual(dtype), pos) + switch dt := dtype.(type) { + case *dwarf.ArrayType: + // Arrays are passed implicitly as pointers in C. + // In Go, we must be explicit. + tr := &TypeRepr{} + tr.Set("%s*", t.C) + return &Type{ + Size: c.ptrSize, + Align: c.ptrSize, + Go: &ast.StarExpr{X: t.Go}, + C: tr, + } + case *dwarf.TypedefType: + // C has much more relaxed rules than Go for + // implicit type conversions. When the parameter + // is type T defined as *X, simulate a little of the + // laxness of C by making the argument *X instead of T. + if ptr, ok := base(dt.Type).(*dwarf.PtrType); ok { + // Unless the typedef happens to point to void* since + // Go has special rules around using unsafe.Pointer. + if _, void := base(ptr.Type).(*dwarf.VoidType); void { + break + } + // ...or the typedef is one in which we expect bad pointers. + // It will be a uintptr instead of *X. + if c.baseBadPointerTypedef(dt) { + break + } + + t = c.Type(ptr, pos) + if t == nil { + return nil + } + + // For a struct/union/class, remember the C spelling, + // in case it has __attribute__((unavailable)). + // See issue 2888. + if isStructUnionClass(t.Go) { + t.Typedef = dt.Name + } + } + } + return t +} + +// FuncType returns the Go type analogous to dtype. +// There is no guarantee about matching memory layout. +func (c *typeConv) FuncType(dtype *dwarf.FuncType, pos token.Pos) *FuncType { + p := make([]*Type, len(dtype.ParamType)) + gp := make([]*ast.Field, len(dtype.ParamType)) + for i, f := range dtype.ParamType { + // gcc's DWARF generator outputs a single DotDotDotType parameter for + // function pointers that specify no parameters (e.g. void + // (*__cgo_0)()). Treat this special case as void. This case is + // invalid according to ISO C anyway (i.e. void (*__cgo_1)(...) is not + // legal). + if _, ok := f.(*dwarf.DotDotDotType); ok && i == 0 { + p, gp = nil, nil + break + } + p[i] = c.FuncArg(f, pos) + gp[i] = &ast.Field{Type: p[i].Go} + } + var r *Type + var gr []*ast.Field + if _, ok := base(dtype.ReturnType).(*dwarf.VoidType); ok { + gr = []*ast.Field{{Type: c.goVoid}} + } else if dtype.ReturnType != nil { + r = c.Type(unqual(dtype.ReturnType), pos) + gr = []*ast.Field{{Type: r.Go}} + } + return &FuncType{ + Params: p, + Result: r, + Go: &ast.FuncType{ + Params: &ast.FieldList{List: gp}, + Results: &ast.FieldList{List: gr}, + }, + } +} + +// Identifier +func (c *typeConv) Ident(s string) *ast.Ident { + return ast.NewIdent(s) +} + +// Opaque type of n bytes. +func (c *typeConv) Opaque(n int64) ast.Expr { + return &ast.ArrayType{ + Len: c.intExpr(n), + Elt: c.byte, + } +} + +// Expr for integer n. +func (c *typeConv) intExpr(n int64) ast.Expr { + return &ast.BasicLit{ + Kind: token.INT, + Value: strconv.FormatInt(n, 10), + } +} + +// Add padding of given size to fld. +func (c *typeConv) pad(fld []*ast.Field, sizes []int64, size int64) ([]*ast.Field, []int64) { + n := len(fld) + fld = fld[0 : n+1] + fld[n] = &ast.Field{Names: []*ast.Ident{c.Ident("_")}, Type: c.Opaque(size)} + sizes = sizes[0 : n+1] + sizes[n] = size + return fld, sizes +} + +// Struct conversion: return Go and (gc) C syntax for type. +func (c *typeConv) Struct(dt *dwarf.StructType, pos token.Pos) (expr *ast.StructType, csyntax string, align int64) { + // Minimum alignment for a struct is 1 byte. + align = 1 + + var buf strings.Builder + buf.WriteString("struct {") + fld := make([]*ast.Field, 0, 2*len(dt.Field)+1) // enough for padding around every field + sizes := make([]int64, 0, 2*len(dt.Field)+1) + off := int64(0) + + // Rename struct fields that happen to be named Go keywords into + // _{keyword}. Create a map from C ident -> Go ident. The Go ident will + // be mangled. Any existing identifier that already has the same name on + // the C-side will cause the Go-mangled version to be prefixed with _. + // (e.g. in a struct with fields '_type' and 'type', the latter would be + // rendered as '__type' in Go). + ident := make(map[string]string) + used := make(map[string]bool) + for _, f := range dt.Field { + ident[f.Name] = f.Name + used[f.Name] = true + } + + if !*godefs { + for cid, goid := range ident { + if token.Lookup(goid).IsKeyword() { + // Avoid keyword + goid = "_" + goid + + // Also avoid existing fields + for _, exist := used[goid]; exist; _, exist = used[goid] { + goid = "_" + goid + } + + used[goid] = true + ident[cid] = goid + } + } + } + + anon := 0 + for _, f := range dt.Field { + name := f.Name + ft := f.Type + + // In godefs mode, if this field is a C11 + // anonymous union then treat the first field in the + // union as the field in the struct. This handles + // cases like the glibc file; see + // issue 6677. + if *godefs { + if st, ok := f.Type.(*dwarf.StructType); ok && name == "" && st.Kind == "union" && len(st.Field) > 0 && !used[st.Field[0].Name] { + name = st.Field[0].Name + ident[name] = name + ft = st.Field[0].Type + } + } + + // TODO: Handle fields that are anonymous structs by + // promoting the fields of the inner struct. + + t := c.Type(ft, pos) + tgo := t.Go + size := t.Size + talign := t.Align + if f.BitOffset > 0 || f.BitSize > 0 { + // The layout of bitfields is implementation defined, + // so we don't know how they correspond to Go fields + // even if they are aligned at byte boundaries. + continue + } + + if talign > 0 && f.ByteOffset%talign != 0 { + // Drop misaligned fields, the same way we drop integer bit fields. + // The goal is to make available what can be made available. + // Otherwise one bad and unneeded field in an otherwise okay struct + // makes the whole program not compile. Much of the time these + // structs are in system headers that cannot be corrected. + continue + } + + // Round off up to talign, assumed to be a power of 2. + off = (off + talign - 1) &^ (talign - 1) + + if f.ByteOffset > off { + fld, sizes = c.pad(fld, sizes, f.ByteOffset-off) + off = f.ByteOffset + } + if f.ByteOffset < off { + // Drop a packed field that we can't represent. + continue + } + + n := len(fld) + fld = fld[0 : n+1] + if name == "" { + name = fmt.Sprintf("anon%d", anon) + anon++ + ident[name] = name + } + fld[n] = &ast.Field{Names: []*ast.Ident{c.Ident(ident[name])}, Type: tgo} + sizes = sizes[0 : n+1] + sizes[n] = size + off += size + buf.WriteString(t.C.String()) + buf.WriteString(" ") + buf.WriteString(name) + buf.WriteString("; ") + if talign > align { + align = talign + } + } + if off < dt.ByteSize { + fld, sizes = c.pad(fld, sizes, dt.ByteSize-off) + off = dt.ByteSize + } + + // If the last field in a non-zero-sized struct is zero-sized + // the compiler is going to pad it by one (see issue 9401). + // We can't permit that, because then the size of the Go + // struct will not be the same as the size of the C struct. + // Our only option in such a case is to remove the field, + // which means that it cannot be referenced from Go. + for off > 0 && sizes[len(sizes)-1] == 0 { + n := len(sizes) + fld = fld[0 : n-1] + sizes = sizes[0 : n-1] + } + + if off != dt.ByteSize { + fatalf("%s: struct size calculation error off=%d bytesize=%d", lineno(pos), off, dt.ByteSize) + } + buf.WriteString("}") + csyntax = buf.String() + + if *godefs { + godefsFields(fld) + } + expr = &ast.StructType{Fields: &ast.FieldList{List: fld}} + return +} + +// dwarfHasPointer reports whether the DWARF type dt contains a pointer. +func (c *typeConv) dwarfHasPointer(dt dwarf.Type, pos token.Pos) bool { + switch dt := dt.(type) { + default: + fatalf("%s: unexpected type: %s", lineno(pos), dt) + return false + + case *dwarf.AddrType, *dwarf.BoolType, *dwarf.CharType, *dwarf.EnumType, + *dwarf.FloatType, *dwarf.ComplexType, *dwarf.FuncType, + *dwarf.IntType, *dwarf.UcharType, *dwarf.UintType, *dwarf.VoidType: + + return false + + case *dwarf.ArrayType: + return c.dwarfHasPointer(dt.Type, pos) + + case *dwarf.PtrType: + return true + + case *dwarf.QualType: + return c.dwarfHasPointer(dt.Type, pos) + + case *dwarf.StructType: + for _, f := range dt.Field { + if c.dwarfHasPointer(f.Type, pos) { + return true + } + } + return false + + case *dwarf.TypedefType: + if dt.Name == "_GoString_" || dt.Name == "_GoBytes_" { + return true + } + return c.dwarfHasPointer(dt.Type, pos) + } +} + +func upper(s string) string { + if s == "" { + return "" + } + r, size := utf8.DecodeRuneInString(s) + if r == '_' { + return "X" + s + } + return string(unicode.ToUpper(r)) + s[size:] +} + +// godefsFields rewrites field names for use in Go or C definitions. +// It strips leading common prefixes (like tv_ in tv_sec, tv_usec) +// converts names to upper case, and rewrites _ into Pad_godefs_n, +// so that all fields are exported. +func godefsFields(fld []*ast.Field) { + prefix := fieldPrefix(fld) + + // Issue 48396: check for duplicate field names. + if prefix != "" { + names := make(map[string]bool) + fldLoop: + for _, f := range fld { + for _, n := range f.Names { + name := n.Name + if name == "_" { + continue + } + if name != prefix { + name = strings.TrimPrefix(n.Name, prefix) + } + name = upper(name) + if names[name] { + // Field name conflict: don't remove prefix. + prefix = "" + break fldLoop + } + names[name] = true + } + } + } + + npad := 0 + for _, f := range fld { + for _, n := range f.Names { + if n.Name != prefix { + n.Name = strings.TrimPrefix(n.Name, prefix) + } + if n.Name == "_" { + // Use exported name instead. + n.Name = "Pad_cgo_" + strconv.Itoa(npad) + npad++ + } + n.Name = upper(n.Name) + } + } +} + +// fieldPrefix returns the prefix that should be removed from all the +// field names when generating the C or Go code. For generated +// C, we leave the names as is (tv_sec, tv_usec), since that's what +// people are used to seeing in C. For generated Go code, such as +// package syscall's data structures, we drop a common prefix +// (so sec, usec, which will get turned into Sec, Usec for exporting). +func fieldPrefix(fld []*ast.Field) string { + prefix := "" + for _, f := range fld { + for _, n := range f.Names { + // Ignore field names that don't have the prefix we're + // looking for. It is common in C headers to have fields + // named, say, _pad in an otherwise prefixed header. + // If the struct has 3 fields tv_sec, tv_usec, _pad1, then we + // still want to remove the tv_ prefix. + // The check for "orig_" here handles orig_eax in the + // x86 ptrace register sets, which otherwise have all fields + // with reg_ prefixes. + if strings.HasPrefix(n.Name, "orig_") || strings.HasPrefix(n.Name, "_") { + continue + } + i := strings.Index(n.Name, "_") + if i < 0 { + continue + } + if prefix == "" { + prefix = n.Name[:i+1] + } else if prefix != n.Name[:i+1] { + return "" + } + } + } + return prefix +} + +// anonymousStructTypedef reports whether dt is a C typedef for an anonymous +// struct. +func (c *typeConv) anonymousStructTypedef(dt *dwarf.TypedefType) bool { + st, ok := dt.Type.(*dwarf.StructType) + return ok && st.StructName == "" +} + +// badPointerTypedef reports whether dt is a C typedef that should not be +// considered a pointer in Go. A typedef is bad if C code sometimes stores +// non-pointers in this type. +// TODO: Currently our best solution is to find these manually and list them as +// they come up. A better solution is desired. +// Note: DEPRECATED. There is now a better solution. Search for incomplete in this file. +func (c *typeConv) badPointerTypedef(dt *dwarf.TypedefType) bool { + if c.badCFType(dt) { + return true + } + if c.badJNI(dt) { + return true + } + if c.badEGLType(dt) { + return true + } + return false +} + +// badVoidPointerTypedef is like badPointerTypeDef, but for "void *" typedefs that should be _cgopackage.Incomplete. +func (c *typeConv) badVoidPointerTypedef(dt *dwarf.TypedefType) bool { + // Match the Windows HANDLE type (#42018). + if goos != "windows" || dt.Name != "HANDLE" { + return false + } + // Check that the typedef is "typedef void *". + if ptr, ok := dt.Type.(*dwarf.PtrType); ok { + if _, ok := ptr.Type.(*dwarf.VoidType); ok { + return true + } + } + return false +} + +// badStructPointerTypedef is like badVoidPointerTypedef but for structs. +func (c *typeConv) badStructPointerTypedef(name string, dt *dwarf.StructType) bool { + // Windows handle types can all potentially contain non-pointers. + // badVoidPointerTypedef handles the "void *" HANDLE type, but other + // handles are defined as + // + // struct __{int unused;}; typedef struct __ *name; + // + // by the DECLARE_HANDLE macro in STRICT mode. The macro is declared in + // the Windows ntdef.h header, + // + // https://github.com/tpn/winsdk-10/blob/master/Include/10.0.16299.0/shared/ntdef.h#L779 + if goos != "windows" { + return false + } + if len(dt.Field) != 1 { + return false + } + if dt.StructName != name+"__" { + return false + } + if f := dt.Field[0]; f.Name != "unused" || f.Type.Common().Name != "int" { + return false + } + return true +} + +// baseBadPointerTypedef reports whether the base of a chain of typedefs is a bad typedef +// as badPointerTypedef reports. +func (c *typeConv) baseBadPointerTypedef(dt *dwarf.TypedefType) bool { + for { + if t, ok := dt.Type.(*dwarf.TypedefType); ok { + dt = t + continue + } + break + } + return c.badPointerTypedef(dt) +} + +func (c *typeConv) badCFType(dt *dwarf.TypedefType) bool { + // The real bad types are CFNumberRef and CFDateRef. + // Sometimes non-pointers are stored in these types. + // CFTypeRef is a supertype of those, so it can have bad pointers in it as well. + // We return true for the other *Ref types just so casting between them is easier. + // We identify the correct set of types as those ending in Ref and for which + // there exists a corresponding GetTypeID function. + // See comment below for details about the bad pointers. + if goos != "darwin" && goos != "ios" { + return false + } + s := dt.Name + if !strings.HasSuffix(s, "Ref") { + return false + } + s = s[:len(s)-3] + if s == "CFType" { + return true + } + if c.getTypeIDs[s] { + return true + } + if i := strings.Index(s, "Mutable"); i >= 0 && c.getTypeIDs[s[:i]+s[i+7:]] { + // Mutable and immutable variants share a type ID. + return true + } + return false +} + +// Comment from Darwin's CFInternal.h +/* +// Tagged pointer support +// Low-bit set means tagged object, next 3 bits (currently) +// define the tagged object class, next 4 bits are for type +// information for the specific tagged object class. Thus, +// the low byte is for type info, and the rest of a pointer +// (32 or 64-bit) is for payload, whatever the tagged class. +// +// Note that the specific integers used to identify the +// specific tagged classes can and will change from release +// to release (that's why this stuff is in CF*Internal*.h), +// as can the definition of type info vs payload above. +// +#if __LP64__ +#define CF_IS_TAGGED_OBJ(PTR) ((uintptr_t)(PTR) & 0x1) +#define CF_TAGGED_OBJ_TYPE(PTR) ((uintptr_t)(PTR) & 0xF) +#else +#define CF_IS_TAGGED_OBJ(PTR) 0 +#define CF_TAGGED_OBJ_TYPE(PTR) 0 +#endif + +enum { + kCFTaggedObjectID_Invalid = 0, + kCFTaggedObjectID_Atom = (0 << 1) + 1, + kCFTaggedObjectID_Undefined3 = (1 << 1) + 1, + kCFTaggedObjectID_Undefined2 = (2 << 1) + 1, + kCFTaggedObjectID_Integer = (3 << 1) + 1, + kCFTaggedObjectID_DateTS = (4 << 1) + 1, + kCFTaggedObjectID_ManagedObjectID = (5 << 1) + 1, // Core Data + kCFTaggedObjectID_Date = (6 << 1) + 1, + kCFTaggedObjectID_Undefined7 = (7 << 1) + 1, +}; +*/ + +func (c *typeConv) badJNI(dt *dwarf.TypedefType) bool { + // In Dalvik and ART, the jobject type in the JNI interface of the JVM has the + // property that it is sometimes (always?) a small integer instead of a real pointer. + // Note: although only the android JVMs are bad in this respect, we declare the JNI types + // bad regardless of platform, so the same Go code compiles on both android and non-android. + if parent, ok := jniTypes[dt.Name]; ok { + // Try to make sure we're talking about a JNI type, not just some random user's + // type that happens to use the same name. + // C doesn't have the notion of a package, so it's hard to be certain. + + // Walk up to jobject, checking each typedef on the way. + w := dt + for parent != "" { + t, ok := w.Type.(*dwarf.TypedefType) + if !ok || t.Name != parent { + return false + } + w = t + parent, ok = jniTypes[w.Name] + if !ok { + return false + } + } + + // Check that the typedef is either: + // 1: + // struct _jobject; + // typedef struct _jobject *jobject; + // 2: (in NDK16 in C++) + // class _jobject {}; + // typedef _jobject* jobject; + // 3: (in NDK16 in C) + // typedef void* jobject; + if ptr, ok := w.Type.(*dwarf.PtrType); ok { + switch v := ptr.Type.(type) { + case *dwarf.VoidType: + return true + case *dwarf.StructType: + if v.StructName == "_jobject" && len(v.Field) == 0 { + switch v.Kind { + case "struct": + if v.Incomplete { + return true + } + case "class": + if !v.Incomplete { + return true + } + } + } + } + } + } + return false +} + +func (c *typeConv) badEGLType(dt *dwarf.TypedefType) bool { + if dt.Name != "EGLDisplay" && dt.Name != "EGLConfig" { + return false + } + // Check that the typedef is "typedef void *". + if ptr, ok := dt.Type.(*dwarf.PtrType); ok { + if _, ok := ptr.Type.(*dwarf.VoidType); ok { + return true + } + } + return false +} + +// jniTypes maps from JNI types that we want to be uintptrs, to the underlying type to which +// they are mapped. The base "jobject" maps to the empty string. +var jniTypes = map[string]string{ + "jobject": "", + "jclass": "jobject", + "jthrowable": "jobject", + "jstring": "jobject", + "jarray": "jobject", + "jbooleanArray": "jarray", + "jbyteArray": "jarray", + "jcharArray": "jarray", + "jshortArray": "jarray", + "jintArray": "jarray", + "jlongArray": "jarray", + "jfloatArray": "jarray", + "jdoubleArray": "jarray", + "jobjectArray": "jarray", + "jweak": "jobject", +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/godefs.go b/platform/dbops/binaries/go/go/src/cmd/cgo/godefs.go new file mode 100644 index 0000000000000000000000000000000000000000..f62867053f58211c680c2a186a29aa9b680794ac --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/godefs.go @@ -0,0 +1,170 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "go/ast" + "go/printer" + "go/token" + "os" + "path/filepath" + "strings" +) + +// godefs returns the output for -godefs mode. +func (p *Package) godefs(f *File, args []string) string { + var buf strings.Builder + + fmt.Fprintf(&buf, "// Code generated by cmd/cgo -godefs; DO NOT EDIT.\n") + fmt.Fprintf(&buf, "// %s %s\n", filepath.Base(args[0]), strings.Join(args[1:], " ")) + fmt.Fprintf(&buf, "\n") + + override := make(map[string]string) + + // Allow source file to specify override mappings. + // For example, the socket data structures refer + // to in_addr and in_addr6 structs but we want to be + // able to treat them as byte arrays, so the godefs + // inputs in package syscall say + // + // // +godefs map struct_in_addr [4]byte + // // +godefs map struct_in_addr6 [16]byte + // + for _, g := range f.Comments { + for _, c := range g.List { + i := strings.Index(c.Text, "+godefs map") + if i < 0 { + continue + } + s := strings.TrimSpace(c.Text[i+len("+godefs map"):]) + i = strings.Index(s, " ") + if i < 0 { + fmt.Fprintf(os.Stderr, "invalid +godefs map comment: %s\n", c.Text) + continue + } + override["_Ctype_"+strings.TrimSpace(s[:i])] = strings.TrimSpace(s[i:]) + } + } + for _, n := range f.Name { + if s := override[n.Go]; s != "" { + override[n.Mangle] = s + } + } + + // Otherwise, if the source file says type T C.whatever, + // use "T" as the mangling of C.whatever, + // except in the definition (handled at end of function). + refName := make(map[*ast.Expr]*Name) + for _, r := range f.Ref { + refName[r.Expr] = r.Name + } + for _, d := range f.AST.Decls { + d, ok := d.(*ast.GenDecl) + if !ok || d.Tok != token.TYPE { + continue + } + for _, s := range d.Specs { + s := s.(*ast.TypeSpec) + n := refName[&s.Type] + if n != nil && n.Mangle != "" { + override[n.Mangle] = s.Name.Name + } + } + } + + // Extend overrides using typedefs: + // If we know that C.xxx should format as T + // and xxx is a typedef for yyy, make C.yyy format as T. + for typ, def := range typedef { + if new := override[typ]; new != "" { + if id, ok := def.Go.(*ast.Ident); ok { + override[id.Name] = new + } + } + } + + // Apply overrides. + for old, new := range override { + if id := goIdent[old]; id != nil { + id.Name = new + } + } + + // Any names still using the _C syntax are not going to compile, + // although in general we don't know whether they all made it + // into the file, so we can't warn here. + // + // The most common case is union types, which begin with + // _Ctype_union and for which typedef[name] is a Go byte + // array of the appropriate size (such as [4]byte). + // Substitute those union types with byte arrays. + for name, id := range goIdent { + if id.Name == name && strings.Contains(name, "_Ctype_union") { + if def := typedef[name]; def != nil { + id.Name = gofmt(def) + } + } + } + + conf.Fprint(&buf, fset, f.AST) + + return buf.String() +} + +var gofmtBuf strings.Builder + +// gofmt returns the gofmt-formatted string for an AST node. +func gofmt(n interface{}) string { + gofmtBuf.Reset() + err := printer.Fprint(&gofmtBuf, fset, n) + if err != nil { + return "<" + err.Error() + ">" + } + return gofmtBuf.String() +} + +// gofmtLineReplacer is used to put a gofmt-formatted string for an +// AST expression onto a single line. The lexer normally inserts a +// semicolon at each newline, so we can replace newline with semicolon. +// However, we can't do that in cases where the lexer would not insert +// a semicolon. We only have to worry about cases that can occur in an +// expression passed through gofmt, which means composite literals and +// (due to the printer possibly inserting newlines because of position +// information) operators. +var gofmtLineReplacer = strings.NewReplacer( + // Want to replace \n without ; after everything from + // https://golang.org/ref/spec#Operators_and_punctuation + // EXCEPT ++ -- ) ] } + "++\n", "++;", + "--\n", "--;", + + "+\n", "+ ", + "-\n", "- ", + "*\n", "* ", + "/\n", "/ ", + "%\n", "% ", + "&\n", "& ", + "|\n", "| ", + "^\n", "^ ", + "<\n", "< ", + ">\n", "> ", + "=\n", "= ", + "!\n", "! ", // not possible in gofmt today + "(\n", "(", + "[\n", "[", // not possible in gofmt today + "{\n", "{", + ",\n", ",", + ".\n", ". ", + ":\n", ": ", // not possible in gofmt today + + "\n", ";", +) + +// gofmtLine returns the gofmt-formatted string for an AST node, +// ensuring that it is on a single line. +func gofmtLine(n interface{}) string { + return gofmtLineReplacer.Replace(gofmt(n)) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/cgotest/overlaydir.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/cgotest/overlaydir.go new file mode 100644 index 0000000000000000000000000000000000000000..c6b161545dfe067e95f006c6303a196eb8865f61 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/cgotest/overlaydir.go @@ -0,0 +1,75 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgotest + +import ( + "io" + "os" + "path/filepath" + "strings" +) + +// OverlayDir makes a minimal-overhead copy of srcRoot in which new files may be added. +func OverlayDir(dstRoot, srcRoot string) error { + dstRoot = filepath.Clean(dstRoot) + if err := os.MkdirAll(dstRoot, 0777); err != nil { + return err + } + + srcRoot, err := filepath.Abs(srcRoot) + if err != nil { + return err + } + + return filepath.Walk(srcRoot, func(srcPath string, info os.FileInfo, err error) error { + if err != nil || srcPath == srcRoot { + return err + } + + suffix := strings.TrimPrefix(srcPath, srcRoot) + for len(suffix) > 0 && suffix[0] == filepath.Separator { + suffix = suffix[1:] + } + dstPath := filepath.Join(dstRoot, suffix) + + perm := info.Mode() & os.ModePerm + if info.Mode()&os.ModeSymlink != 0 { + info, err = os.Stat(srcPath) + if err != nil { + return err + } + perm = info.Mode() & os.ModePerm + } + + // Always copy directories (don't symlink them). + // If we add a file in the overlay, we don't want to add it in the original. + if info.IsDir() { + return os.MkdirAll(dstPath, perm|0200) + } + + // If the OS supports symlinks, use them instead of copying bytes. + if err := os.Symlink(srcPath, dstPath); err == nil { + return nil + } + + // Otherwise, copy the bytes. + src, err := os.Open(srcPath) + if err != nil { + return err + } + defer src.Close() + + dst, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm) + if err != nil { + return err + } + + _, err = io.Copy(dst, src) + if closeErr := dst.Close(); err == nil { + err = closeErr + } + return err + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/swig/swig_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/swig/swig_test.go new file mode 100644 index 0000000000000000000000000000000000000000..923378b2dd8fb0569a0af674bdc0d23e3db2a7b7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/swig/swig_test.go @@ -0,0 +1,160 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package swig + +import ( + "cmd/internal/quoted" + "internal/testenv" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "testing" +) + +func TestStdio(t *testing.T) { + testenv.MustHaveCGO(t) + mustHaveSwig(t) + run(t, "testdata/stdio", false) +} + +func TestCall(t *testing.T) { + testenv.MustHaveCGO(t) + mustHaveSwig(t) + mustHaveCxx(t) + run(t, "testdata/callback", false, "Call") + t.Run("lto", func(t *testing.T) { run(t, "testdata/callback", true, "Call") }) +} + +func TestCallback(t *testing.T) { + testenv.MustHaveCGO(t) + mustHaveSwig(t) + mustHaveCxx(t) + run(t, "testdata/callback", false, "Callback") + t.Run("lto", func(t *testing.T) { run(t, "testdata/callback", true, "Callback") }) +} + +func run(t *testing.T, dir string, lto bool, args ...string) { + runArgs := append([]string{"run", "."}, args...) + cmd := exec.Command("go", runArgs...) + cmd.Dir = dir + if lto { + // On the builders we're using the default /usr/bin/ld, but + // that has problems when asking for LTO in particular. Force + // use of lld, which ships with our clang installation. + extraLDFlags := "" + if strings.Contains(testenv.Builder(), "clang") { + extraLDFlags += " -fuse-ld=lld" + } + const cflags = "-flto -Wno-lto-type-mismatch -Wno-unknown-warning-option" + cmd.Env = append(cmd.Environ(), + "CGO_CFLAGS="+cflags, + "CGO_CXXFLAGS="+cflags, + "CGO_LDFLAGS="+cflags+extraLDFlags) + } + out, err := cmd.CombinedOutput() + if string(out) != "OK\n" { + t.Errorf("%s", string(out)) + } + if err != nil { + t.Errorf("%s", err) + } +} + +func mustHaveCxx(t *testing.T) { + // Ask the go tool for the CXX it's configured to use. + cxx, err := exec.Command("go", "env", "CXX").CombinedOutput() + if err != nil { + t.Fatalf("go env CXX failed: %s", err) + } + args, err := quoted.Split(string(cxx)) + if err != nil { + t.Skipf("could not parse 'go env CXX' output %q: %s", string(cxx), err) + } + if len(args) == 0 { + t.Skip("no C++ compiler") + } + testenv.MustHaveExecPath(t, string(args[0])) +} + +var ( + swigOnce sync.Once + haveSwig bool +) + +func mustHaveSwig(t *testing.T) { + swigOnce.Do(func() { + mustHaveSwigOnce(t) + haveSwig = true + }) + // The first call will skip t with a nice message. On later calls, we just skip. + if !haveSwig { + t.Skip("swig not found") + } +} + +func mustHaveSwigOnce(t *testing.T) { + swig, err := exec.LookPath("swig") + if err != nil { + t.Skipf("swig not in PATH: %s", err) + } + + // Check that swig was installed with Go support by checking + // that a go directory exists inside the swiglib directory. + // See https://golang.org/issue/23469. + output, err := exec.Command(swig, "-go", "-swiglib").Output() + if err != nil { + t.Skip("swig is missing Go support") + } + swigDir := strings.TrimSpace(string(output)) + + _, err = os.Stat(filepath.Join(swigDir, "go")) + if err != nil { + t.Skip("swig is missing Go support") + } + + // Check that swig has a new enough version. + // See https://golang.org/issue/22858. + out, err := exec.Command(swig, "-version").CombinedOutput() + if err != nil { + t.Skipf("failed to get swig version:%s\n%s", err, string(out)) + } + + re := regexp.MustCompile(`[vV]ersion +(\d+)([.]\d+)?([.]\d+)?`) + matches := re.FindSubmatch(out) + if matches == nil { + // Can't find version number; hope for the best. + t.Logf("failed to find swig version, continuing") + return + } + + var parseError error + atoi := func(s string) int { + x, err := strconv.Atoi(s) + if err != nil && parseError == nil { + parseError = err + } + return x + } + var major, minor, patch int + major = atoi(string(matches[1])) + if len(matches[2]) > 0 { + minor = atoi(string(matches[2][1:])) + } + if len(matches[3]) > 0 { + patch = atoi(string(matches[3][1:])) + } + if parseError != nil { + t.Logf("error parsing swig version %q, continuing anyway: %s", string(matches[0]), parseError) + return + } + t.Logf("found swig version %d.%d.%d", major, minor, patch) + if major < 3 || (major == 3 && minor == 0 && patch < 6) { + t.Skip("test requires swig 3.0.6 or later") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/swig/testdata/callback/main.cc b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/swig/testdata/callback/main.cc new file mode 100644 index 0000000000000000000000000000000000000000..7de917cde45782430734543cd15d1bf60f716c7e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/swig/testdata/callback/main.cc @@ -0,0 +1,15 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This .cc file will be automatically compiled by the go tool and +// included in the package. + +#include +#include "main.h" + +std::string Caller::call() { + if (callback_ != 0) + return callback_->run(); + return ""; +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/swig/testdata/callback/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/swig/testdata/callback/main.go new file mode 100644 index 0000000000000000000000000000000000000000..73034a0c7c25e920504a6b7162429a0cfe40a66c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/swig/testdata/callback/main.go @@ -0,0 +1,60 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "os" +) + +func main() { + if len(os.Args) != 2 { + fatal("usage: callback testname") + } + switch os.Args[1] { + default: + fatal("unknown test %q", os.Args[1]) + case "Call": + testCall() + case "Callback": + testCallback() + } + println("OK") +} + +func fatal(f string, args ...any) { + fmt.Fprintln(os.Stderr, fmt.Sprintf(f, args...)) + os.Exit(1) +} + +type GoCallback struct{} + +func (p *GoCallback) Run() string { + return "GoCallback.Run" +} + +func testCall() { + c := NewCaller() + cb := NewCallback() + + c.SetCallback(cb) + s := c.Call() + if s != "Callback::run" { + fatal("unexpected string from Call: %q", s) + } + c.DelCallback() +} + +func testCallback() { + c := NewCaller() + cb := NewDirectorCallback(&GoCallback{}) + c.SetCallback(cb) + s := c.Call() + if s != "GoCallback.Run" { + fatal("unexpected string from Call with callback: %q", s) + } + c.DelCallback() + DeleteDirectorCallback(cb) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/swig/testdata/callback/main.h b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/swig/testdata/callback/main.h new file mode 100644 index 0000000000000000000000000000000000000000..4b661060d83ad14b83ef1f6f50a72e937139378d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/swig/testdata/callback/main.h @@ -0,0 +1,20 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +class Callback { +public: + virtual ~Callback() { } + virtual std::string run() { return "Callback::run"; } +}; + +class Caller { +private: + Callback *callback_; +public: + Caller(): callback_(0) { } + ~Caller() { delCallback(); } + void delCallback() { delete callback_; callback_ = 0; } + void setCallback(Callback *cb) { delCallback(); callback_ = cb; } + std::string call(); +}; diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/swig/testdata/callback/main.swigcxx b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/swig/testdata/callback/main.swigcxx new file mode 100644 index 0000000000000000000000000000000000000000..0fd73d63623126d995c62bb8a3bb13654b666081 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/swig/testdata/callback/main.swigcxx @@ -0,0 +1,18 @@ +/* Copyright 2011 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. */ + +/* An example of writing a C++ virtual function in Go. */ + +%module(directors="1") callback + +%{ +#include +#include "main.h" +%} + +%include "std_string.i" + +%feature("director"); + +%include "main.h" diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/swig/testdata/stdio/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/swig/testdata/stdio/main.go new file mode 100644 index 0000000000000000000000000000000000000000..0296dd3224d7e415f5f6e425151abdb7bf7d15a1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/swig/testdata/stdio/main.go @@ -0,0 +1,45 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is here just to cause problems. +// main.swig turns into a file also named main.go. +// Make sure cmd/go keeps them separate +// when both are passed to cgo. + +package main + +//int F(void) { return 1; } +import "C" +import ( + "fmt" + "os" +) + +func F() int { return int(C.F()) } + +func main() { + if x := int(C.F()); x != 1 { + fatal("x = %d, want 1", x) + } + + // Open this file itself and verify that the first few characters are + // as expected. + f := Fopen("main.go", "r") + if f.Swigcptr() == 0 { + fatal("fopen failed") + } + if Fgetc(f) != '/' || Fgetc(f) != '/' || Fgetc(f) != ' ' || Fgetc(f) != 'C' { + fatal("read unexpected characters") + } + if Fclose(f) != 0 { + fatal("fclose failed") + } + + println("OK") +} + +func fatal(f string, args ...any) { + fmt.Fprintln(os.Stderr, fmt.Sprintf(f, args...)) + os.Exit(1) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/swig/testdata/stdio/main.swig b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/swig/testdata/stdio/main.swig new file mode 100644 index 0000000000000000000000000000000000000000..b28ae0a6b7ab6c85811048c977ed07d7028dfd3d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/swig/testdata/stdio/main.swig @@ -0,0 +1,24 @@ +/* Copyright 2011 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. */ + +/* A trivial example of wrapping a C library using SWIG. */ + +%{ +#include +#include +%} + +%typemap(gotype) const char * "string" +%typemap(in) const char * %{ + $1 = malloc($input.n + 1); + memcpy($1, $input.p, $input.n); + $1[$input.n] = '\0'; +%} +%typemap(freearg) const char * %{ + free($1); +%} + +FILE *fopen(const char *name, const char *mode); +int fclose(FILE *); +int fgetc(FILE *); diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/backdoor.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/backdoor.go new file mode 100644 index 0000000000000000000000000000000000000000..6fb33d66cb2cf43f72acff28e46bfbb343a25b67 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/backdoor.go @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgotest + +import _ "unsafe" + +//go:linkname lockedOSThread runtime.lockedOSThread +//extern runtime_lockedOSThread +func lockedOSThread() bool diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/buildid_linux.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/buildid_linux.go new file mode 100644 index 0000000000000000000000000000000000000000..84d3edb664eb25534e998f54c1402dfd1cbc2418 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/buildid_linux.go @@ -0,0 +1,78 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgotest + +// Test that we have no more than one build ID. In the past we used +// to generate a separate build ID for each package using cgo, and the +// linker concatenated them all. We don't want that--we only want +// one. + +import ( + "bytes" + "debug/elf" + "os" + "testing" +) + +func testBuildID(t *testing.T) { + f, err := elf.Open("/proc/self/exe") + if err != nil { + if os.IsNotExist(err) { + t.Skip("no /proc/self/exe") + } + t.Fatal("opening /proc/self/exe: ", err) + } + defer f.Close() + + c := 0 +sections: + for i, s := range f.Sections { + if s.Type != elf.SHT_NOTE { + continue + } + + d, err := s.Data() + if err != nil { + t.Logf("reading data of note section %d: %v", i, err) + continue + } + + for len(d) > 0 { + + // ELF standards differ as to the sizes in + // note sections. Both the GNU linker and + // gold always generate 32-bit sizes, so that + // is what we assume here. + + if len(d) < 12 { + t.Logf("note section %d too short (%d < 12)", i, len(d)) + continue sections + } + + namesz := f.ByteOrder.Uint32(d) + descsz := f.ByteOrder.Uint32(d[4:]) + typ := f.ByteOrder.Uint32(d[8:]) + + an := (namesz + 3) &^ 3 + ad := (descsz + 3) &^ 3 + + if int(12+an+ad) > len(d) { + t.Logf("note section %d too short for header (%d < 12 + align(%d,4) + align(%d,4))", i, len(d), namesz, descsz) + continue sections + } + + // 3 == NT_GNU_BUILD_ID + if typ == 3 && namesz == 4 && bytes.Equal(d[12:16], []byte("GNU\000")) { + c++ + } + + d = d[12+an+ad:] + } + } + + if c > 1 { + t.Errorf("found %d build ID notes", c) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/callback.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/callback.go new file mode 100644 index 0000000000000000000000000000000000000000..478bf8294af3a5d855b16fdb6f0e6fe5993044ae --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/callback.go @@ -0,0 +1,1782 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgotest + +/* +void callback(void *f); +void callGoFoo(void); +void callGoStackCheck(void); +void callPanic(void); +int callGoReturnVal(void); +int returnAfterGrow(void); +int returnAfterGrowFromGo(void); +void callGoWithString(void); +*/ +import "C" + +import ( + "path" + "runtime" + "strings" + "sync" + "testing" + "unsafe" +) + +// Pass a func value from nestedCall to goCallback using an integer token. +var callbackMutex sync.Mutex +var callbackToken int +var callbackFuncs = make(map[int]func()) + +// nestedCall calls into C, back into Go, and finally to f. +func nestedCall(f func()) { + // callback(x) calls goCallback(x) + callbackMutex.Lock() + callbackToken++ + i := callbackToken + callbackFuncs[i] = f + callbackMutex.Unlock() + + // Pass the address of i because the C function was written to + // take a pointer. We could pass an int if we felt like + // rewriting the C code. + C.callback(unsafe.Pointer(&i)) + + callbackMutex.Lock() + delete(callbackFuncs, i) + callbackMutex.Unlock() +} + +//export goCallback +func goCallback(p unsafe.Pointer) { + i := *(*int)(p) + + callbackMutex.Lock() + f := callbackFuncs[i] + callbackMutex.Unlock() + + if f == nil { + panic("missing callback function") + } + f() +} + +func testCallback(t *testing.T) { + var x = false + nestedCall(func() { x = true }) + if !x { + t.Fatal("nestedCall did not call func") + } +} + +func testCallbackGC(t *testing.T) { + nestedCall(runtime.GC) +} + +func testCallbackPanic(t *testing.T) { + // Make sure panic during callback unwinds properly. + if lockedOSThread() { + t.Fatal("locked OS thread on entry to TestCallbackPanic") + } + defer func() { + s := recover() + if s == nil { + t.Fatal("did not panic") + } + if s.(string) != "callback panic" { + t.Fatal("wrong panic:", s) + } + if lockedOSThread() { + t.Fatal("locked OS thread on exit from TestCallbackPanic") + } + }() + nestedCall(func() { panic("callback panic") }) + panic("nestedCall returned") +} + +func testCallbackPanicLoop(t *testing.T) { + // Make sure we don't blow out m->g0 stack. + for i := 0; i < 100000; i++ { + testCallbackPanic(t) + } +} + +func testCallbackPanicLocked(t *testing.T) { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + if !lockedOSThread() { + t.Fatal("runtime.LockOSThread didn't") + } + defer func() { + s := recover() + if s == nil { + t.Fatal("did not panic") + } + if s.(string) != "callback panic" { + t.Fatal("wrong panic:", s) + } + if !lockedOSThread() { + t.Fatal("lost lock on OS thread after panic") + } + }() + nestedCall(func() { panic("callback panic") }) + panic("nestedCall returned") +} + +// Callback with zero arguments used to make the stack misaligned, +// which broke the garbage collector and other things. +func testZeroArgCallback(t *testing.T) { + defer func() { + s := recover() + if s != nil { + t.Fatal("panic during callback:", s) + } + }() + C.callGoFoo() +} + +//export goFoo +func goFoo() { + x := 1 + for i := 0; i < 10000; i++ { + // variadic call mallocs + writes to + variadic(x, x, x) + if x != 1 { + panic("bad x") + } + } +} + +func variadic(x ...interface{}) {} + +func testBlocking(t *testing.T) { + c := make(chan int) + go func() { + for i := 0; i < 10; i++ { + c <- <-c + } + }() + nestedCall(func() { + for i := 0; i < 10; i++ { + c <- i + if j := <-c; j != i { + t.Errorf("out of sync %d != %d", j, i) + } + } + }) +} + +// Test that the stack can be unwound through a call out and call back +// into Go. +func testCallbackCallers(t *testing.T) { + if runtime.Compiler != "gc" { + // The exact function names are not going to be the same. + t.Skip("skipping for non-gc toolchain") + } + pc := make([]uintptr, 100) + n := 0 + name := []string{ + "runtime.cgocallbackg1", + "runtime.cgocallbackg", + "runtime.cgocallback", + "runtime.systemstack_switch", + "runtime.cgocall", + "test._Cfunc_callback", + "test.nestedCall.func1", + "test.nestedCall", + "test.testCallbackCallers", + "test.TestCallbackCallers", + "testing.tRunner", + "runtime.goexit", + } + nestedCall(func() { + n = runtime.Callers(4, pc) + }) + if n != len(name) { + t.Errorf("expected %d frames, got %d", len(name), n) + } + for i := 0; i < n; i++ { + f := runtime.FuncForPC(pc[i] - 1) // TODO: use runtime.CallersFrames + if f == nil { + t.Fatalf("expected non-nil Func for pc %d", pc[i]) + } + fname := f.Name() + // Remove the prepended pathname from automatically + // generated cgo function names. + if strings.HasPrefix(fname, "_") { + fname = path.Base(f.Name()[1:]) + } + // In module mode, this package has a fully-qualified import path. + // Remove it if present. + fname = strings.TrimPrefix(fname, "cmd/cgo/internal/") + + namei := "" + if i < len(name) { + namei = name[i] + } + if fname != namei { + t.Errorf("stk[%d] = %q, want %q", i, fname, namei) + } + } +} + +func testPanicFromC(t *testing.T) { + defer func() { + r := recover() + if r == nil { + t.Fatal("did not panic") + } + if r.(string) != "panic from C" { + t.Fatal("wrong panic:", r) + } + }() + C.callPanic() +} + +// Test that C code can return a value if it calls a Go function that +// causes a stack copy. +func testReturnAfterGrow(t *testing.T) { + // Use a new goroutine so that we get a small stack. + c := make(chan int) + go func() { + c <- int(C.returnAfterGrow()) + }() + if got, want := <-c, 123456; got != want { + t.Errorf("got %d want %d", got, want) + } +} + +// Test that we can return a value from Go->C->Go if the Go code +// causes a stack copy. +func testReturnAfterGrowFromGo(t *testing.T) { + // Use a new goroutine so that we get a small stack. + c := make(chan int) + go func() { + c <- int(C.returnAfterGrowFromGo()) + }() + if got, want := <-c, 129*128/2; got != want { + t.Errorf("got %d want %d", got, want) + } +} + +//export goReturnVal +func goReturnVal() (r C.int) { + // Force a stack copy. + var f func(int) int + f = func(i int) int { + var buf [256]byte + use(buf[:]) + if i == 0 { + return 0 + } + return i + f(i-1) + } + r = C.int(f(128)) + return +} + +// Test that C can pass in a Go string from a string constant. +func testCallGoWithString(t *testing.T) { + C.callGoWithString() + want := "string passed from C to Go" + if stringFromGo != want { + t.Errorf("string passed through C is %s, want %s", stringFromGo, want) + } +} + +var stringFromGo string + +//export goWithString +func goWithString(s string) { + stringFromGo = s +} + +func testCallbackStack(t *testing.T) { + // Make cgo call and callback with different amount of stack available. + // We do not do any explicit checks, just ensure that it does not crash. + for _, f := range splitTests { + f() + } +} + +//export goStackCheck +func goStackCheck() { + // use some stack memory to trigger split stack check + var buf [256]byte + use(buf[:]) +} + +var Used byte + +func use(buf []byte) { + for _, c := range buf { + Used += c + } +} + +var splitTests = []func(){ + // Edit .+1,/^}/-1|seq 4 4 5000 | sed 's/.*/ stack&,/' | fmt + stack4, stack8, stack12, stack16, stack20, stack24, stack28, + stack32, stack36, stack40, stack44, stack48, stack52, stack56, + stack60, stack64, stack68, stack72, stack76, stack80, stack84, + stack88, stack92, stack96, stack100, stack104, stack108, stack112, + stack116, stack120, stack124, stack128, stack132, stack136, + stack140, stack144, stack148, stack152, stack156, stack160, + stack164, stack168, stack172, stack176, stack180, stack184, + stack188, stack192, stack196, stack200, stack204, stack208, + stack212, stack216, stack220, stack224, stack228, stack232, + stack236, stack240, stack244, stack248, stack252, stack256, + stack260, stack264, stack268, stack272, stack276, stack280, + stack284, stack288, stack292, stack296, stack300, stack304, + stack308, stack312, stack316, stack320, stack324, stack328, + stack332, stack336, stack340, stack344, stack348, stack352, + stack356, stack360, stack364, stack368, stack372, stack376, + stack380, stack384, stack388, stack392, stack396, stack400, + stack404, stack408, stack412, stack416, stack420, stack424, + stack428, stack432, stack436, stack440, stack444, stack448, + stack452, stack456, stack460, stack464, stack468, stack472, + stack476, stack480, stack484, stack488, stack492, stack496, + stack500, stack504, stack508, stack512, stack516, stack520, + stack524, stack528, stack532, stack536, stack540, stack544, + stack548, stack552, stack556, stack560, stack564, stack568, + stack572, stack576, stack580, stack584, stack588, stack592, + stack596, stack600, stack604, stack608, stack612, stack616, + stack620, stack624, stack628, stack632, stack636, stack640, + stack644, stack648, stack652, stack656, stack660, stack664, + stack668, stack672, stack676, stack680, stack684, stack688, + stack692, stack696, stack700, stack704, stack708, stack712, + stack716, stack720, stack724, stack728, stack732, stack736, + stack740, stack744, stack748, stack752, stack756, stack760, + stack764, stack768, stack772, stack776, stack780, stack784, + stack788, stack792, stack796, stack800, stack804, stack808, + stack812, stack816, stack820, stack824, stack828, stack832, + stack836, stack840, stack844, stack848, stack852, stack856, + stack860, stack864, stack868, stack872, stack876, stack880, + stack884, stack888, stack892, stack896, stack900, stack904, + stack908, stack912, stack916, stack920, stack924, stack928, + stack932, stack936, stack940, stack944, stack948, stack952, + stack956, stack960, stack964, stack968, stack972, stack976, + stack980, stack984, stack988, stack992, stack996, stack1000, + stack1004, stack1008, stack1012, stack1016, stack1020, stack1024, + stack1028, stack1032, stack1036, stack1040, stack1044, stack1048, + stack1052, stack1056, stack1060, stack1064, stack1068, stack1072, + stack1076, stack1080, stack1084, stack1088, stack1092, stack1096, + stack1100, stack1104, stack1108, stack1112, stack1116, stack1120, + stack1124, stack1128, stack1132, stack1136, stack1140, stack1144, + stack1148, stack1152, stack1156, stack1160, stack1164, stack1168, + stack1172, stack1176, stack1180, stack1184, stack1188, stack1192, + stack1196, stack1200, stack1204, stack1208, stack1212, stack1216, + stack1220, stack1224, stack1228, stack1232, stack1236, stack1240, + stack1244, stack1248, stack1252, stack1256, stack1260, stack1264, + stack1268, stack1272, stack1276, stack1280, stack1284, stack1288, + stack1292, stack1296, stack1300, stack1304, stack1308, stack1312, + stack1316, stack1320, stack1324, stack1328, stack1332, stack1336, + stack1340, stack1344, stack1348, stack1352, stack1356, stack1360, + stack1364, stack1368, stack1372, stack1376, stack1380, stack1384, + stack1388, stack1392, stack1396, stack1400, stack1404, stack1408, + stack1412, stack1416, stack1420, stack1424, stack1428, stack1432, + stack1436, stack1440, stack1444, stack1448, stack1452, stack1456, + stack1460, stack1464, stack1468, stack1472, stack1476, stack1480, + stack1484, stack1488, stack1492, stack1496, stack1500, stack1504, + stack1508, stack1512, stack1516, stack1520, stack1524, stack1528, + stack1532, stack1536, stack1540, stack1544, stack1548, stack1552, + stack1556, stack1560, stack1564, stack1568, stack1572, stack1576, + stack1580, stack1584, stack1588, stack1592, stack1596, stack1600, + stack1604, stack1608, stack1612, stack1616, stack1620, stack1624, + stack1628, stack1632, stack1636, stack1640, stack1644, stack1648, + stack1652, stack1656, stack1660, stack1664, stack1668, stack1672, + stack1676, stack1680, stack1684, stack1688, stack1692, stack1696, + stack1700, stack1704, stack1708, stack1712, stack1716, stack1720, + stack1724, stack1728, stack1732, stack1736, stack1740, stack1744, + stack1748, stack1752, stack1756, stack1760, stack1764, stack1768, + stack1772, stack1776, stack1780, stack1784, stack1788, stack1792, + stack1796, stack1800, stack1804, stack1808, stack1812, stack1816, + stack1820, stack1824, stack1828, stack1832, stack1836, stack1840, + stack1844, stack1848, stack1852, stack1856, stack1860, stack1864, + stack1868, stack1872, stack1876, stack1880, stack1884, stack1888, + stack1892, stack1896, stack1900, stack1904, stack1908, stack1912, + stack1916, stack1920, stack1924, stack1928, stack1932, stack1936, + stack1940, stack1944, stack1948, stack1952, stack1956, stack1960, + stack1964, stack1968, stack1972, stack1976, stack1980, stack1984, + stack1988, stack1992, stack1996, stack2000, stack2004, stack2008, + stack2012, stack2016, stack2020, stack2024, stack2028, stack2032, + stack2036, stack2040, stack2044, stack2048, stack2052, stack2056, + stack2060, stack2064, stack2068, stack2072, stack2076, stack2080, + stack2084, stack2088, stack2092, stack2096, stack2100, stack2104, + stack2108, stack2112, stack2116, stack2120, stack2124, stack2128, + stack2132, stack2136, stack2140, stack2144, stack2148, stack2152, + stack2156, stack2160, stack2164, stack2168, stack2172, stack2176, + stack2180, stack2184, stack2188, stack2192, stack2196, stack2200, + stack2204, stack2208, stack2212, stack2216, stack2220, stack2224, + stack2228, stack2232, stack2236, stack2240, stack2244, stack2248, + stack2252, stack2256, stack2260, stack2264, stack2268, stack2272, + stack2276, stack2280, stack2284, stack2288, stack2292, stack2296, + stack2300, stack2304, stack2308, stack2312, stack2316, stack2320, + stack2324, stack2328, stack2332, stack2336, stack2340, stack2344, + stack2348, stack2352, stack2356, stack2360, stack2364, stack2368, + stack2372, stack2376, stack2380, stack2384, stack2388, stack2392, + stack2396, stack2400, stack2404, stack2408, stack2412, stack2416, + stack2420, stack2424, stack2428, stack2432, stack2436, stack2440, + stack2444, stack2448, stack2452, stack2456, stack2460, stack2464, + stack2468, stack2472, stack2476, stack2480, stack2484, stack2488, + stack2492, stack2496, stack2500, stack2504, stack2508, stack2512, + stack2516, stack2520, stack2524, stack2528, stack2532, stack2536, + stack2540, stack2544, stack2548, stack2552, stack2556, stack2560, + stack2564, stack2568, stack2572, stack2576, stack2580, stack2584, + stack2588, stack2592, stack2596, stack2600, stack2604, stack2608, + stack2612, stack2616, stack2620, stack2624, stack2628, stack2632, + stack2636, stack2640, stack2644, stack2648, stack2652, stack2656, + stack2660, stack2664, stack2668, stack2672, stack2676, stack2680, + stack2684, stack2688, stack2692, stack2696, stack2700, stack2704, + stack2708, stack2712, stack2716, stack2720, stack2724, stack2728, + stack2732, stack2736, stack2740, stack2744, stack2748, stack2752, + stack2756, stack2760, stack2764, stack2768, stack2772, stack2776, + stack2780, stack2784, stack2788, stack2792, stack2796, stack2800, + stack2804, stack2808, stack2812, stack2816, stack2820, stack2824, + stack2828, stack2832, stack2836, stack2840, stack2844, stack2848, + stack2852, stack2856, stack2860, stack2864, stack2868, stack2872, + stack2876, stack2880, stack2884, stack2888, stack2892, stack2896, + stack2900, stack2904, stack2908, stack2912, stack2916, stack2920, + stack2924, stack2928, stack2932, stack2936, stack2940, stack2944, + stack2948, stack2952, stack2956, stack2960, stack2964, stack2968, + stack2972, stack2976, stack2980, stack2984, stack2988, stack2992, + stack2996, stack3000, stack3004, stack3008, stack3012, stack3016, + stack3020, stack3024, stack3028, stack3032, stack3036, stack3040, + stack3044, stack3048, stack3052, stack3056, stack3060, stack3064, + stack3068, stack3072, stack3076, stack3080, stack3084, stack3088, + stack3092, stack3096, stack3100, stack3104, stack3108, stack3112, + stack3116, stack3120, stack3124, stack3128, stack3132, stack3136, + stack3140, stack3144, stack3148, stack3152, stack3156, stack3160, + stack3164, stack3168, stack3172, stack3176, stack3180, stack3184, + stack3188, stack3192, stack3196, stack3200, stack3204, stack3208, + stack3212, stack3216, stack3220, stack3224, stack3228, stack3232, + stack3236, stack3240, stack3244, stack3248, stack3252, stack3256, + stack3260, stack3264, stack3268, stack3272, stack3276, stack3280, + stack3284, stack3288, stack3292, stack3296, stack3300, stack3304, + stack3308, stack3312, stack3316, stack3320, stack3324, stack3328, + stack3332, stack3336, stack3340, stack3344, stack3348, stack3352, + stack3356, stack3360, stack3364, stack3368, stack3372, stack3376, + stack3380, stack3384, stack3388, stack3392, stack3396, stack3400, + stack3404, stack3408, stack3412, stack3416, stack3420, stack3424, + stack3428, stack3432, stack3436, stack3440, stack3444, stack3448, + stack3452, stack3456, stack3460, stack3464, stack3468, stack3472, + stack3476, stack3480, stack3484, stack3488, stack3492, stack3496, + stack3500, stack3504, stack3508, stack3512, stack3516, stack3520, + stack3524, stack3528, stack3532, stack3536, stack3540, stack3544, + stack3548, stack3552, stack3556, stack3560, stack3564, stack3568, + stack3572, stack3576, stack3580, stack3584, stack3588, stack3592, + stack3596, stack3600, stack3604, stack3608, stack3612, stack3616, + stack3620, stack3624, stack3628, stack3632, stack3636, stack3640, + stack3644, stack3648, stack3652, stack3656, stack3660, stack3664, + stack3668, stack3672, stack3676, stack3680, stack3684, stack3688, + stack3692, stack3696, stack3700, stack3704, stack3708, stack3712, + stack3716, stack3720, stack3724, stack3728, stack3732, stack3736, + stack3740, stack3744, stack3748, stack3752, stack3756, stack3760, + stack3764, stack3768, stack3772, stack3776, stack3780, stack3784, + stack3788, stack3792, stack3796, stack3800, stack3804, stack3808, + stack3812, stack3816, stack3820, stack3824, stack3828, stack3832, + stack3836, stack3840, stack3844, stack3848, stack3852, stack3856, + stack3860, stack3864, stack3868, stack3872, stack3876, stack3880, + stack3884, stack3888, stack3892, stack3896, stack3900, stack3904, + stack3908, stack3912, stack3916, stack3920, stack3924, stack3928, + stack3932, stack3936, stack3940, stack3944, stack3948, stack3952, + stack3956, stack3960, stack3964, stack3968, stack3972, stack3976, + stack3980, stack3984, stack3988, stack3992, stack3996, stack4000, + stack4004, stack4008, stack4012, stack4016, stack4020, stack4024, + stack4028, stack4032, stack4036, stack4040, stack4044, stack4048, + stack4052, stack4056, stack4060, stack4064, stack4068, stack4072, + stack4076, stack4080, stack4084, stack4088, stack4092, stack4096, + stack4100, stack4104, stack4108, stack4112, stack4116, stack4120, + stack4124, stack4128, stack4132, stack4136, stack4140, stack4144, + stack4148, stack4152, stack4156, stack4160, stack4164, stack4168, + stack4172, stack4176, stack4180, stack4184, stack4188, stack4192, + stack4196, stack4200, stack4204, stack4208, stack4212, stack4216, + stack4220, stack4224, stack4228, stack4232, stack4236, stack4240, + stack4244, stack4248, stack4252, stack4256, stack4260, stack4264, + stack4268, stack4272, stack4276, stack4280, stack4284, stack4288, + stack4292, stack4296, stack4300, stack4304, stack4308, stack4312, + stack4316, stack4320, stack4324, stack4328, stack4332, stack4336, + stack4340, stack4344, stack4348, stack4352, stack4356, stack4360, + stack4364, stack4368, stack4372, stack4376, stack4380, stack4384, + stack4388, stack4392, stack4396, stack4400, stack4404, stack4408, + stack4412, stack4416, stack4420, stack4424, stack4428, stack4432, + stack4436, stack4440, stack4444, stack4448, stack4452, stack4456, + stack4460, stack4464, stack4468, stack4472, stack4476, stack4480, + stack4484, stack4488, stack4492, stack4496, stack4500, stack4504, + stack4508, stack4512, stack4516, stack4520, stack4524, stack4528, + stack4532, stack4536, stack4540, stack4544, stack4548, stack4552, + stack4556, stack4560, stack4564, stack4568, stack4572, stack4576, + stack4580, stack4584, stack4588, stack4592, stack4596, stack4600, + stack4604, stack4608, stack4612, stack4616, stack4620, stack4624, + stack4628, stack4632, stack4636, stack4640, stack4644, stack4648, + stack4652, stack4656, stack4660, stack4664, stack4668, stack4672, + stack4676, stack4680, stack4684, stack4688, stack4692, stack4696, + stack4700, stack4704, stack4708, stack4712, stack4716, stack4720, + stack4724, stack4728, stack4732, stack4736, stack4740, stack4744, + stack4748, stack4752, stack4756, stack4760, stack4764, stack4768, + stack4772, stack4776, stack4780, stack4784, stack4788, stack4792, + stack4796, stack4800, stack4804, stack4808, stack4812, stack4816, + stack4820, stack4824, stack4828, stack4832, stack4836, stack4840, + stack4844, stack4848, stack4852, stack4856, stack4860, stack4864, + stack4868, stack4872, stack4876, stack4880, stack4884, stack4888, + stack4892, stack4896, stack4900, stack4904, stack4908, stack4912, + stack4916, stack4920, stack4924, stack4928, stack4932, stack4936, + stack4940, stack4944, stack4948, stack4952, stack4956, stack4960, + stack4964, stack4968, stack4972, stack4976, stack4980, stack4984, + stack4988, stack4992, stack4996, stack5000, +} + +// Edit .+1,$ | seq 4 4 5000 | sed 's/.*/func stack&() { var buf [&]byte; use(buf[:]); C.callGoStackCheck() }/' +func stack4() { var buf [4]byte; use(buf[:]); C.callGoStackCheck() } +func stack8() { var buf [8]byte; use(buf[:]); C.callGoStackCheck() } +func stack12() { var buf [12]byte; use(buf[:]); C.callGoStackCheck() } +func stack16() { var buf [16]byte; use(buf[:]); C.callGoStackCheck() } +func stack20() { var buf [20]byte; use(buf[:]); C.callGoStackCheck() } +func stack24() { var buf [24]byte; use(buf[:]); C.callGoStackCheck() } +func stack28() { var buf [28]byte; use(buf[:]); C.callGoStackCheck() } +func stack32() { var buf [32]byte; use(buf[:]); C.callGoStackCheck() } +func stack36() { var buf [36]byte; use(buf[:]); C.callGoStackCheck() } +func stack40() { var buf [40]byte; use(buf[:]); C.callGoStackCheck() } +func stack44() { var buf [44]byte; use(buf[:]); C.callGoStackCheck() } +func stack48() { var buf [48]byte; use(buf[:]); C.callGoStackCheck() } +func stack52() { var buf [52]byte; use(buf[:]); C.callGoStackCheck() } +func stack56() { var buf [56]byte; use(buf[:]); C.callGoStackCheck() } +func stack60() { var buf [60]byte; use(buf[:]); C.callGoStackCheck() } +func stack64() { var buf [64]byte; use(buf[:]); C.callGoStackCheck() } +func stack68() { var buf [68]byte; use(buf[:]); C.callGoStackCheck() } +func stack72() { var buf [72]byte; use(buf[:]); C.callGoStackCheck() } +func stack76() { var buf [76]byte; use(buf[:]); C.callGoStackCheck() } +func stack80() { var buf [80]byte; use(buf[:]); C.callGoStackCheck() } +func stack84() { var buf [84]byte; use(buf[:]); C.callGoStackCheck() } +func stack88() { var buf [88]byte; use(buf[:]); C.callGoStackCheck() } +func stack92() { var buf [92]byte; use(buf[:]); C.callGoStackCheck() } +func stack96() { var buf [96]byte; use(buf[:]); C.callGoStackCheck() } +func stack100() { var buf [100]byte; use(buf[:]); C.callGoStackCheck() } +func stack104() { var buf [104]byte; use(buf[:]); C.callGoStackCheck() } +func stack108() { var buf [108]byte; use(buf[:]); C.callGoStackCheck() } +func stack112() { var buf [112]byte; use(buf[:]); C.callGoStackCheck() } +func stack116() { var buf [116]byte; use(buf[:]); C.callGoStackCheck() } +func stack120() { var buf [120]byte; use(buf[:]); C.callGoStackCheck() } +func stack124() { var buf [124]byte; use(buf[:]); C.callGoStackCheck() } +func stack128() { var buf [128]byte; use(buf[:]); C.callGoStackCheck() } +func stack132() { var buf [132]byte; use(buf[:]); C.callGoStackCheck() } +func stack136() { var buf [136]byte; use(buf[:]); C.callGoStackCheck() } +func stack140() { var buf [140]byte; use(buf[:]); C.callGoStackCheck() } +func stack144() { var buf [144]byte; use(buf[:]); C.callGoStackCheck() } +func stack148() { var buf [148]byte; use(buf[:]); C.callGoStackCheck() } +func stack152() { var buf [152]byte; use(buf[:]); C.callGoStackCheck() } +func stack156() { var buf [156]byte; use(buf[:]); C.callGoStackCheck() } +func stack160() { var buf [160]byte; use(buf[:]); C.callGoStackCheck() } +func stack164() { var buf [164]byte; use(buf[:]); C.callGoStackCheck() } +func stack168() { var buf [168]byte; use(buf[:]); C.callGoStackCheck() } +func stack172() { var buf [172]byte; use(buf[:]); C.callGoStackCheck() } +func stack176() { var buf [176]byte; use(buf[:]); C.callGoStackCheck() } +func stack180() { var buf [180]byte; use(buf[:]); C.callGoStackCheck() } +func stack184() { var buf [184]byte; use(buf[:]); C.callGoStackCheck() } +func stack188() { var buf [188]byte; use(buf[:]); C.callGoStackCheck() } +func stack192() { var buf [192]byte; use(buf[:]); C.callGoStackCheck() } +func stack196() { var buf [196]byte; use(buf[:]); C.callGoStackCheck() } +func stack200() { var buf [200]byte; use(buf[:]); C.callGoStackCheck() } +func stack204() { var buf [204]byte; use(buf[:]); C.callGoStackCheck() } +func stack208() { var buf [208]byte; use(buf[:]); C.callGoStackCheck() } +func stack212() { var buf [212]byte; use(buf[:]); C.callGoStackCheck() } +func stack216() { var buf [216]byte; use(buf[:]); C.callGoStackCheck() } +func stack220() { var buf [220]byte; use(buf[:]); C.callGoStackCheck() } +func stack224() { var buf [224]byte; use(buf[:]); C.callGoStackCheck() } +func stack228() { var buf [228]byte; use(buf[:]); C.callGoStackCheck() } +func stack232() { var buf [232]byte; use(buf[:]); C.callGoStackCheck() } +func stack236() { var buf [236]byte; use(buf[:]); C.callGoStackCheck() } +func stack240() { var buf [240]byte; use(buf[:]); C.callGoStackCheck() } +func stack244() { var buf [244]byte; use(buf[:]); C.callGoStackCheck() } +func stack248() { var buf [248]byte; use(buf[:]); C.callGoStackCheck() } +func stack252() { var buf [252]byte; use(buf[:]); C.callGoStackCheck() } +func stack256() { var buf [256]byte; use(buf[:]); C.callGoStackCheck() } +func stack260() { var buf [260]byte; use(buf[:]); C.callGoStackCheck() } +func stack264() { var buf [264]byte; use(buf[:]); C.callGoStackCheck() } +func stack268() { var buf [268]byte; use(buf[:]); C.callGoStackCheck() } +func stack272() { var buf [272]byte; use(buf[:]); C.callGoStackCheck() } +func stack276() { var buf [276]byte; use(buf[:]); C.callGoStackCheck() } +func stack280() { var buf [280]byte; use(buf[:]); C.callGoStackCheck() } +func stack284() { var buf [284]byte; use(buf[:]); C.callGoStackCheck() } +func stack288() { var buf [288]byte; use(buf[:]); C.callGoStackCheck() } +func stack292() { var buf [292]byte; use(buf[:]); C.callGoStackCheck() } +func stack296() { var buf [296]byte; use(buf[:]); C.callGoStackCheck() } +func stack300() { var buf [300]byte; use(buf[:]); C.callGoStackCheck() } +func stack304() { var buf [304]byte; use(buf[:]); C.callGoStackCheck() } +func stack308() { var buf [308]byte; use(buf[:]); C.callGoStackCheck() } +func stack312() { var buf [312]byte; use(buf[:]); C.callGoStackCheck() } +func stack316() { var buf [316]byte; use(buf[:]); C.callGoStackCheck() } +func stack320() { var buf [320]byte; use(buf[:]); C.callGoStackCheck() } +func stack324() { var buf [324]byte; use(buf[:]); C.callGoStackCheck() } +func stack328() { var buf [328]byte; use(buf[:]); C.callGoStackCheck() } +func stack332() { var buf [332]byte; use(buf[:]); C.callGoStackCheck() } +func stack336() { var buf [336]byte; use(buf[:]); C.callGoStackCheck() } +func stack340() { var buf [340]byte; use(buf[:]); C.callGoStackCheck() } +func stack344() { var buf [344]byte; use(buf[:]); C.callGoStackCheck() } +func stack348() { var buf [348]byte; use(buf[:]); C.callGoStackCheck() } +func stack352() { var buf [352]byte; use(buf[:]); C.callGoStackCheck() } +func stack356() { var buf [356]byte; use(buf[:]); C.callGoStackCheck() } +func stack360() { var buf [360]byte; use(buf[:]); C.callGoStackCheck() } +func stack364() { var buf [364]byte; use(buf[:]); C.callGoStackCheck() } +func stack368() { var buf [368]byte; use(buf[:]); C.callGoStackCheck() } +func stack372() { var buf [372]byte; use(buf[:]); C.callGoStackCheck() } +func stack376() { var buf [376]byte; use(buf[:]); C.callGoStackCheck() } +func stack380() { var buf [380]byte; use(buf[:]); C.callGoStackCheck() } +func stack384() { var buf [384]byte; use(buf[:]); C.callGoStackCheck() } +func stack388() { var buf [388]byte; use(buf[:]); C.callGoStackCheck() } +func stack392() { var buf [392]byte; use(buf[:]); C.callGoStackCheck() } +func stack396() { var buf [396]byte; use(buf[:]); C.callGoStackCheck() } +func stack400() { var buf [400]byte; use(buf[:]); C.callGoStackCheck() } +func stack404() { var buf [404]byte; use(buf[:]); C.callGoStackCheck() } +func stack408() { var buf [408]byte; use(buf[:]); C.callGoStackCheck() } +func stack412() { var buf [412]byte; use(buf[:]); C.callGoStackCheck() } +func stack416() { var buf [416]byte; use(buf[:]); C.callGoStackCheck() } +func stack420() { var buf [420]byte; use(buf[:]); C.callGoStackCheck() } +func stack424() { var buf [424]byte; use(buf[:]); C.callGoStackCheck() } +func stack428() { var buf [428]byte; use(buf[:]); C.callGoStackCheck() } +func stack432() { var buf [432]byte; use(buf[:]); C.callGoStackCheck() } +func stack436() { var buf [436]byte; use(buf[:]); C.callGoStackCheck() } +func stack440() { var buf [440]byte; use(buf[:]); C.callGoStackCheck() } +func stack444() { var buf [444]byte; use(buf[:]); C.callGoStackCheck() } +func stack448() { var buf [448]byte; use(buf[:]); C.callGoStackCheck() } +func stack452() { var buf [452]byte; use(buf[:]); C.callGoStackCheck() } +func stack456() { var buf [456]byte; use(buf[:]); C.callGoStackCheck() } +func stack460() { var buf [460]byte; use(buf[:]); C.callGoStackCheck() } +func stack464() { var buf [464]byte; use(buf[:]); C.callGoStackCheck() } +func stack468() { var buf [468]byte; use(buf[:]); C.callGoStackCheck() } +func stack472() { var buf [472]byte; use(buf[:]); C.callGoStackCheck() } +func stack476() { var buf [476]byte; use(buf[:]); C.callGoStackCheck() } +func stack480() { var buf [480]byte; use(buf[:]); C.callGoStackCheck() } +func stack484() { var buf [484]byte; use(buf[:]); C.callGoStackCheck() } +func stack488() { var buf [488]byte; use(buf[:]); C.callGoStackCheck() } +func stack492() { var buf [492]byte; use(buf[:]); C.callGoStackCheck() } +func stack496() { var buf [496]byte; use(buf[:]); C.callGoStackCheck() } +func stack500() { var buf [500]byte; use(buf[:]); C.callGoStackCheck() } +func stack504() { var buf [504]byte; use(buf[:]); C.callGoStackCheck() } +func stack508() { var buf [508]byte; use(buf[:]); C.callGoStackCheck() } +func stack512() { var buf [512]byte; use(buf[:]); C.callGoStackCheck() } +func stack516() { var buf [516]byte; use(buf[:]); C.callGoStackCheck() } +func stack520() { var buf [520]byte; use(buf[:]); C.callGoStackCheck() } +func stack524() { var buf [524]byte; use(buf[:]); C.callGoStackCheck() } +func stack528() { var buf [528]byte; use(buf[:]); C.callGoStackCheck() } +func stack532() { var buf [532]byte; use(buf[:]); C.callGoStackCheck() } +func stack536() { var buf [536]byte; use(buf[:]); C.callGoStackCheck() } +func stack540() { var buf [540]byte; use(buf[:]); C.callGoStackCheck() } +func stack544() { var buf [544]byte; use(buf[:]); C.callGoStackCheck() } +func stack548() { var buf [548]byte; use(buf[:]); C.callGoStackCheck() } +func stack552() { var buf [552]byte; use(buf[:]); C.callGoStackCheck() } +func stack556() { var buf [556]byte; use(buf[:]); C.callGoStackCheck() } +func stack560() { var buf [560]byte; use(buf[:]); C.callGoStackCheck() } +func stack564() { var buf [564]byte; use(buf[:]); C.callGoStackCheck() } +func stack568() { var buf [568]byte; use(buf[:]); C.callGoStackCheck() } +func stack572() { var buf [572]byte; use(buf[:]); C.callGoStackCheck() } +func stack576() { var buf [576]byte; use(buf[:]); C.callGoStackCheck() } +func stack580() { var buf [580]byte; use(buf[:]); C.callGoStackCheck() } +func stack584() { var buf [584]byte; use(buf[:]); C.callGoStackCheck() } +func stack588() { var buf [588]byte; use(buf[:]); C.callGoStackCheck() } +func stack592() { var buf [592]byte; use(buf[:]); C.callGoStackCheck() } +func stack596() { var buf [596]byte; use(buf[:]); C.callGoStackCheck() } +func stack600() { var buf [600]byte; use(buf[:]); C.callGoStackCheck() } +func stack604() { var buf [604]byte; use(buf[:]); C.callGoStackCheck() } +func stack608() { var buf [608]byte; use(buf[:]); C.callGoStackCheck() } +func stack612() { var buf [612]byte; use(buf[:]); C.callGoStackCheck() } +func stack616() { var buf [616]byte; use(buf[:]); C.callGoStackCheck() } +func stack620() { var buf [620]byte; use(buf[:]); C.callGoStackCheck() } +func stack624() { var buf [624]byte; use(buf[:]); C.callGoStackCheck() } +func stack628() { var buf [628]byte; use(buf[:]); C.callGoStackCheck() } +func stack632() { var buf [632]byte; use(buf[:]); C.callGoStackCheck() } +func stack636() { var buf [636]byte; use(buf[:]); C.callGoStackCheck() } +func stack640() { var buf [640]byte; use(buf[:]); C.callGoStackCheck() } +func stack644() { var buf [644]byte; use(buf[:]); C.callGoStackCheck() } +func stack648() { var buf [648]byte; use(buf[:]); C.callGoStackCheck() } +func stack652() { var buf [652]byte; use(buf[:]); C.callGoStackCheck() } +func stack656() { var buf [656]byte; use(buf[:]); C.callGoStackCheck() } +func stack660() { var buf [660]byte; use(buf[:]); C.callGoStackCheck() } +func stack664() { var buf [664]byte; use(buf[:]); C.callGoStackCheck() } +func stack668() { var buf [668]byte; use(buf[:]); C.callGoStackCheck() } +func stack672() { var buf [672]byte; use(buf[:]); C.callGoStackCheck() } +func stack676() { var buf [676]byte; use(buf[:]); C.callGoStackCheck() } +func stack680() { var buf [680]byte; use(buf[:]); C.callGoStackCheck() } +func stack684() { var buf [684]byte; use(buf[:]); C.callGoStackCheck() } +func stack688() { var buf [688]byte; use(buf[:]); C.callGoStackCheck() } +func stack692() { var buf [692]byte; use(buf[:]); C.callGoStackCheck() } +func stack696() { var buf [696]byte; use(buf[:]); C.callGoStackCheck() } +func stack700() { var buf [700]byte; use(buf[:]); C.callGoStackCheck() } +func stack704() { var buf [704]byte; use(buf[:]); C.callGoStackCheck() } +func stack708() { var buf [708]byte; use(buf[:]); C.callGoStackCheck() } +func stack712() { var buf [712]byte; use(buf[:]); C.callGoStackCheck() } +func stack716() { var buf [716]byte; use(buf[:]); C.callGoStackCheck() } +func stack720() { var buf [720]byte; use(buf[:]); C.callGoStackCheck() } +func stack724() { var buf [724]byte; use(buf[:]); C.callGoStackCheck() } +func stack728() { var buf [728]byte; use(buf[:]); C.callGoStackCheck() } +func stack732() { var buf [732]byte; use(buf[:]); C.callGoStackCheck() } +func stack736() { var buf [736]byte; use(buf[:]); C.callGoStackCheck() } +func stack740() { var buf [740]byte; use(buf[:]); C.callGoStackCheck() } +func stack744() { var buf [744]byte; use(buf[:]); C.callGoStackCheck() } +func stack748() { var buf [748]byte; use(buf[:]); C.callGoStackCheck() } +func stack752() { var buf [752]byte; use(buf[:]); C.callGoStackCheck() } +func stack756() { var buf [756]byte; use(buf[:]); C.callGoStackCheck() } +func stack760() { var buf [760]byte; use(buf[:]); C.callGoStackCheck() } +func stack764() { var buf [764]byte; use(buf[:]); C.callGoStackCheck() } +func stack768() { var buf [768]byte; use(buf[:]); C.callGoStackCheck() } +func stack772() { var buf [772]byte; use(buf[:]); C.callGoStackCheck() } +func stack776() { var buf [776]byte; use(buf[:]); C.callGoStackCheck() } +func stack780() { var buf [780]byte; use(buf[:]); C.callGoStackCheck() } +func stack784() { var buf [784]byte; use(buf[:]); C.callGoStackCheck() } +func stack788() { var buf [788]byte; use(buf[:]); C.callGoStackCheck() } +func stack792() { var buf [792]byte; use(buf[:]); C.callGoStackCheck() } +func stack796() { var buf [796]byte; use(buf[:]); C.callGoStackCheck() } +func stack800() { var buf [800]byte; use(buf[:]); C.callGoStackCheck() } +func stack804() { var buf [804]byte; use(buf[:]); C.callGoStackCheck() } +func stack808() { var buf [808]byte; use(buf[:]); C.callGoStackCheck() } +func stack812() { var buf [812]byte; use(buf[:]); C.callGoStackCheck() } +func stack816() { var buf [816]byte; use(buf[:]); C.callGoStackCheck() } +func stack820() { var buf [820]byte; use(buf[:]); C.callGoStackCheck() } +func stack824() { var buf [824]byte; use(buf[:]); C.callGoStackCheck() } +func stack828() { var buf [828]byte; use(buf[:]); C.callGoStackCheck() } +func stack832() { var buf [832]byte; use(buf[:]); C.callGoStackCheck() } +func stack836() { var buf [836]byte; use(buf[:]); C.callGoStackCheck() } +func stack840() { var buf [840]byte; use(buf[:]); C.callGoStackCheck() } +func stack844() { var buf [844]byte; use(buf[:]); C.callGoStackCheck() } +func stack848() { var buf [848]byte; use(buf[:]); C.callGoStackCheck() } +func stack852() { var buf [852]byte; use(buf[:]); C.callGoStackCheck() } +func stack856() { var buf [856]byte; use(buf[:]); C.callGoStackCheck() } +func stack860() { var buf [860]byte; use(buf[:]); C.callGoStackCheck() } +func stack864() { var buf [864]byte; use(buf[:]); C.callGoStackCheck() } +func stack868() { var buf [868]byte; use(buf[:]); C.callGoStackCheck() } +func stack872() { var buf [872]byte; use(buf[:]); C.callGoStackCheck() } +func stack876() { var buf [876]byte; use(buf[:]); C.callGoStackCheck() } +func stack880() { var buf [880]byte; use(buf[:]); C.callGoStackCheck() } +func stack884() { var buf [884]byte; use(buf[:]); C.callGoStackCheck() } +func stack888() { var buf [888]byte; use(buf[:]); C.callGoStackCheck() } +func stack892() { var buf [892]byte; use(buf[:]); C.callGoStackCheck() } +func stack896() { var buf [896]byte; use(buf[:]); C.callGoStackCheck() } +func stack900() { var buf [900]byte; use(buf[:]); C.callGoStackCheck() } +func stack904() { var buf [904]byte; use(buf[:]); C.callGoStackCheck() } +func stack908() { var buf [908]byte; use(buf[:]); C.callGoStackCheck() } +func stack912() { var buf [912]byte; use(buf[:]); C.callGoStackCheck() } +func stack916() { var buf [916]byte; use(buf[:]); C.callGoStackCheck() } +func stack920() { var buf [920]byte; use(buf[:]); C.callGoStackCheck() } +func stack924() { var buf [924]byte; use(buf[:]); C.callGoStackCheck() } +func stack928() { var buf [928]byte; use(buf[:]); C.callGoStackCheck() } +func stack932() { var buf [932]byte; use(buf[:]); C.callGoStackCheck() } +func stack936() { var buf [936]byte; use(buf[:]); C.callGoStackCheck() } +func stack940() { var buf [940]byte; use(buf[:]); C.callGoStackCheck() } +func stack944() { var buf [944]byte; use(buf[:]); C.callGoStackCheck() } +func stack948() { var buf [948]byte; use(buf[:]); C.callGoStackCheck() } +func stack952() { var buf [952]byte; use(buf[:]); C.callGoStackCheck() } +func stack956() { var buf [956]byte; use(buf[:]); C.callGoStackCheck() } +func stack960() { var buf [960]byte; use(buf[:]); C.callGoStackCheck() } +func stack964() { var buf [964]byte; use(buf[:]); C.callGoStackCheck() } +func stack968() { var buf [968]byte; use(buf[:]); C.callGoStackCheck() } +func stack972() { var buf [972]byte; use(buf[:]); C.callGoStackCheck() } +func stack976() { var buf [976]byte; use(buf[:]); C.callGoStackCheck() } +func stack980() { var buf [980]byte; use(buf[:]); C.callGoStackCheck() } +func stack984() { var buf [984]byte; use(buf[:]); C.callGoStackCheck() } +func stack988() { var buf [988]byte; use(buf[:]); C.callGoStackCheck() } +func stack992() { var buf [992]byte; use(buf[:]); C.callGoStackCheck() } +func stack996() { var buf [996]byte; use(buf[:]); C.callGoStackCheck() } +func stack1000() { var buf [1000]byte; use(buf[:]); C.callGoStackCheck() } +func stack1004() { var buf [1004]byte; use(buf[:]); C.callGoStackCheck() } +func stack1008() { var buf [1008]byte; use(buf[:]); C.callGoStackCheck() } +func stack1012() { var buf [1012]byte; use(buf[:]); C.callGoStackCheck() } +func stack1016() { var buf [1016]byte; use(buf[:]); C.callGoStackCheck() } +func stack1020() { var buf [1020]byte; use(buf[:]); C.callGoStackCheck() } +func stack1024() { var buf [1024]byte; use(buf[:]); C.callGoStackCheck() } +func stack1028() { var buf [1028]byte; use(buf[:]); C.callGoStackCheck() } +func stack1032() { var buf [1032]byte; use(buf[:]); C.callGoStackCheck() } +func stack1036() { var buf [1036]byte; use(buf[:]); C.callGoStackCheck() } +func stack1040() { var buf [1040]byte; use(buf[:]); C.callGoStackCheck() } +func stack1044() { var buf [1044]byte; use(buf[:]); C.callGoStackCheck() } +func stack1048() { var buf [1048]byte; use(buf[:]); C.callGoStackCheck() } +func stack1052() { var buf [1052]byte; use(buf[:]); C.callGoStackCheck() } +func stack1056() { var buf [1056]byte; use(buf[:]); C.callGoStackCheck() } +func stack1060() { var buf [1060]byte; use(buf[:]); C.callGoStackCheck() } +func stack1064() { var buf [1064]byte; use(buf[:]); C.callGoStackCheck() } +func stack1068() { var buf [1068]byte; use(buf[:]); C.callGoStackCheck() } +func stack1072() { var buf [1072]byte; use(buf[:]); C.callGoStackCheck() } +func stack1076() { var buf [1076]byte; use(buf[:]); C.callGoStackCheck() } +func stack1080() { var buf [1080]byte; use(buf[:]); C.callGoStackCheck() } +func stack1084() { var buf [1084]byte; use(buf[:]); C.callGoStackCheck() } +func stack1088() { var buf [1088]byte; use(buf[:]); C.callGoStackCheck() } +func stack1092() { var buf [1092]byte; use(buf[:]); C.callGoStackCheck() } +func stack1096() { var buf [1096]byte; use(buf[:]); C.callGoStackCheck() } +func stack1100() { var buf [1100]byte; use(buf[:]); C.callGoStackCheck() } +func stack1104() { var buf [1104]byte; use(buf[:]); C.callGoStackCheck() } +func stack1108() { var buf [1108]byte; use(buf[:]); C.callGoStackCheck() } +func stack1112() { var buf [1112]byte; use(buf[:]); C.callGoStackCheck() } +func stack1116() { var buf [1116]byte; use(buf[:]); C.callGoStackCheck() } +func stack1120() { var buf [1120]byte; use(buf[:]); C.callGoStackCheck() } +func stack1124() { var buf [1124]byte; use(buf[:]); C.callGoStackCheck() } +func stack1128() { var buf [1128]byte; use(buf[:]); C.callGoStackCheck() } +func stack1132() { var buf [1132]byte; use(buf[:]); C.callGoStackCheck() } +func stack1136() { var buf [1136]byte; use(buf[:]); C.callGoStackCheck() } +func stack1140() { var buf [1140]byte; use(buf[:]); C.callGoStackCheck() } +func stack1144() { var buf [1144]byte; use(buf[:]); C.callGoStackCheck() } +func stack1148() { var buf [1148]byte; use(buf[:]); C.callGoStackCheck() } +func stack1152() { var buf [1152]byte; use(buf[:]); C.callGoStackCheck() } +func stack1156() { var buf [1156]byte; use(buf[:]); C.callGoStackCheck() } +func stack1160() { var buf [1160]byte; use(buf[:]); C.callGoStackCheck() } +func stack1164() { var buf [1164]byte; use(buf[:]); C.callGoStackCheck() } +func stack1168() { var buf [1168]byte; use(buf[:]); C.callGoStackCheck() } +func stack1172() { var buf [1172]byte; use(buf[:]); C.callGoStackCheck() } +func stack1176() { var buf [1176]byte; use(buf[:]); C.callGoStackCheck() } +func stack1180() { var buf [1180]byte; use(buf[:]); C.callGoStackCheck() } +func stack1184() { var buf [1184]byte; use(buf[:]); C.callGoStackCheck() } +func stack1188() { var buf [1188]byte; use(buf[:]); C.callGoStackCheck() } +func stack1192() { var buf [1192]byte; use(buf[:]); C.callGoStackCheck() } +func stack1196() { var buf [1196]byte; use(buf[:]); C.callGoStackCheck() } +func stack1200() { var buf [1200]byte; use(buf[:]); C.callGoStackCheck() } +func stack1204() { var buf [1204]byte; use(buf[:]); C.callGoStackCheck() } +func stack1208() { var buf [1208]byte; use(buf[:]); C.callGoStackCheck() } +func stack1212() { var buf [1212]byte; use(buf[:]); C.callGoStackCheck() } +func stack1216() { var buf [1216]byte; use(buf[:]); C.callGoStackCheck() } +func stack1220() { var buf [1220]byte; use(buf[:]); C.callGoStackCheck() } +func stack1224() { var buf [1224]byte; use(buf[:]); C.callGoStackCheck() } +func stack1228() { var buf [1228]byte; use(buf[:]); C.callGoStackCheck() } +func stack1232() { var buf [1232]byte; use(buf[:]); C.callGoStackCheck() } +func stack1236() { var buf [1236]byte; use(buf[:]); C.callGoStackCheck() } +func stack1240() { var buf [1240]byte; use(buf[:]); C.callGoStackCheck() } +func stack1244() { var buf [1244]byte; use(buf[:]); C.callGoStackCheck() } +func stack1248() { var buf [1248]byte; use(buf[:]); C.callGoStackCheck() } +func stack1252() { var buf [1252]byte; use(buf[:]); C.callGoStackCheck() } +func stack1256() { var buf [1256]byte; use(buf[:]); C.callGoStackCheck() } +func stack1260() { var buf [1260]byte; use(buf[:]); C.callGoStackCheck() } +func stack1264() { var buf [1264]byte; use(buf[:]); C.callGoStackCheck() } +func stack1268() { var buf [1268]byte; use(buf[:]); C.callGoStackCheck() } +func stack1272() { var buf [1272]byte; use(buf[:]); C.callGoStackCheck() } +func stack1276() { var buf [1276]byte; use(buf[:]); C.callGoStackCheck() } +func stack1280() { var buf [1280]byte; use(buf[:]); C.callGoStackCheck() } +func stack1284() { var buf [1284]byte; use(buf[:]); C.callGoStackCheck() } +func stack1288() { var buf [1288]byte; use(buf[:]); C.callGoStackCheck() } +func stack1292() { var buf [1292]byte; use(buf[:]); C.callGoStackCheck() } +func stack1296() { var buf [1296]byte; use(buf[:]); C.callGoStackCheck() } +func stack1300() { var buf [1300]byte; use(buf[:]); C.callGoStackCheck() } +func stack1304() { var buf [1304]byte; use(buf[:]); C.callGoStackCheck() } +func stack1308() { var buf [1308]byte; use(buf[:]); C.callGoStackCheck() } +func stack1312() { var buf [1312]byte; use(buf[:]); C.callGoStackCheck() } +func stack1316() { var buf [1316]byte; use(buf[:]); C.callGoStackCheck() } +func stack1320() { var buf [1320]byte; use(buf[:]); C.callGoStackCheck() } +func stack1324() { var buf [1324]byte; use(buf[:]); C.callGoStackCheck() } +func stack1328() { var buf [1328]byte; use(buf[:]); C.callGoStackCheck() } +func stack1332() { var buf [1332]byte; use(buf[:]); C.callGoStackCheck() } +func stack1336() { var buf [1336]byte; use(buf[:]); C.callGoStackCheck() } +func stack1340() { var buf [1340]byte; use(buf[:]); C.callGoStackCheck() } +func stack1344() { var buf [1344]byte; use(buf[:]); C.callGoStackCheck() } +func stack1348() { var buf [1348]byte; use(buf[:]); C.callGoStackCheck() } +func stack1352() { var buf [1352]byte; use(buf[:]); C.callGoStackCheck() } +func stack1356() { var buf [1356]byte; use(buf[:]); C.callGoStackCheck() } +func stack1360() { var buf [1360]byte; use(buf[:]); C.callGoStackCheck() } +func stack1364() { var buf [1364]byte; use(buf[:]); C.callGoStackCheck() } +func stack1368() { var buf [1368]byte; use(buf[:]); C.callGoStackCheck() } +func stack1372() { var buf [1372]byte; use(buf[:]); C.callGoStackCheck() } +func stack1376() { var buf [1376]byte; use(buf[:]); C.callGoStackCheck() } +func stack1380() { var buf [1380]byte; use(buf[:]); C.callGoStackCheck() } +func stack1384() { var buf [1384]byte; use(buf[:]); C.callGoStackCheck() } +func stack1388() { var buf [1388]byte; use(buf[:]); C.callGoStackCheck() } +func stack1392() { var buf [1392]byte; use(buf[:]); C.callGoStackCheck() } +func stack1396() { var buf [1396]byte; use(buf[:]); C.callGoStackCheck() } +func stack1400() { var buf [1400]byte; use(buf[:]); C.callGoStackCheck() } +func stack1404() { var buf [1404]byte; use(buf[:]); C.callGoStackCheck() } +func stack1408() { var buf [1408]byte; use(buf[:]); C.callGoStackCheck() } +func stack1412() { var buf [1412]byte; use(buf[:]); C.callGoStackCheck() } +func stack1416() { var buf [1416]byte; use(buf[:]); C.callGoStackCheck() } +func stack1420() { var buf [1420]byte; use(buf[:]); C.callGoStackCheck() } +func stack1424() { var buf [1424]byte; use(buf[:]); C.callGoStackCheck() } +func stack1428() { var buf [1428]byte; use(buf[:]); C.callGoStackCheck() } +func stack1432() { var buf [1432]byte; use(buf[:]); C.callGoStackCheck() } +func stack1436() { var buf [1436]byte; use(buf[:]); C.callGoStackCheck() } +func stack1440() { var buf [1440]byte; use(buf[:]); C.callGoStackCheck() } +func stack1444() { var buf [1444]byte; use(buf[:]); C.callGoStackCheck() } +func stack1448() { var buf [1448]byte; use(buf[:]); C.callGoStackCheck() } +func stack1452() { var buf [1452]byte; use(buf[:]); C.callGoStackCheck() } +func stack1456() { var buf [1456]byte; use(buf[:]); C.callGoStackCheck() } +func stack1460() { var buf [1460]byte; use(buf[:]); C.callGoStackCheck() } +func stack1464() { var buf [1464]byte; use(buf[:]); C.callGoStackCheck() } +func stack1468() { var buf [1468]byte; use(buf[:]); C.callGoStackCheck() } +func stack1472() { var buf [1472]byte; use(buf[:]); C.callGoStackCheck() } +func stack1476() { var buf [1476]byte; use(buf[:]); C.callGoStackCheck() } +func stack1480() { var buf [1480]byte; use(buf[:]); C.callGoStackCheck() } +func stack1484() { var buf [1484]byte; use(buf[:]); C.callGoStackCheck() } +func stack1488() { var buf [1488]byte; use(buf[:]); C.callGoStackCheck() } +func stack1492() { var buf [1492]byte; use(buf[:]); C.callGoStackCheck() } +func stack1496() { var buf [1496]byte; use(buf[:]); C.callGoStackCheck() } +func stack1500() { var buf [1500]byte; use(buf[:]); C.callGoStackCheck() } +func stack1504() { var buf [1504]byte; use(buf[:]); C.callGoStackCheck() } +func stack1508() { var buf [1508]byte; use(buf[:]); C.callGoStackCheck() } +func stack1512() { var buf [1512]byte; use(buf[:]); C.callGoStackCheck() } +func stack1516() { var buf [1516]byte; use(buf[:]); C.callGoStackCheck() } +func stack1520() { var buf [1520]byte; use(buf[:]); C.callGoStackCheck() } +func stack1524() { var buf [1524]byte; use(buf[:]); C.callGoStackCheck() } +func stack1528() { var buf [1528]byte; use(buf[:]); C.callGoStackCheck() } +func stack1532() { var buf [1532]byte; use(buf[:]); C.callGoStackCheck() } +func stack1536() { var buf [1536]byte; use(buf[:]); C.callGoStackCheck() } +func stack1540() { var buf [1540]byte; use(buf[:]); C.callGoStackCheck() } +func stack1544() { var buf [1544]byte; use(buf[:]); C.callGoStackCheck() } +func stack1548() { var buf [1548]byte; use(buf[:]); C.callGoStackCheck() } +func stack1552() { var buf [1552]byte; use(buf[:]); C.callGoStackCheck() } +func stack1556() { var buf [1556]byte; use(buf[:]); C.callGoStackCheck() } +func stack1560() { var buf [1560]byte; use(buf[:]); C.callGoStackCheck() } +func stack1564() { var buf [1564]byte; use(buf[:]); C.callGoStackCheck() } +func stack1568() { var buf [1568]byte; use(buf[:]); C.callGoStackCheck() } +func stack1572() { var buf [1572]byte; use(buf[:]); C.callGoStackCheck() } +func stack1576() { var buf [1576]byte; use(buf[:]); C.callGoStackCheck() } +func stack1580() { var buf [1580]byte; use(buf[:]); C.callGoStackCheck() } +func stack1584() { var buf [1584]byte; use(buf[:]); C.callGoStackCheck() } +func stack1588() { var buf [1588]byte; use(buf[:]); C.callGoStackCheck() } +func stack1592() { var buf [1592]byte; use(buf[:]); C.callGoStackCheck() } +func stack1596() { var buf [1596]byte; use(buf[:]); C.callGoStackCheck() } +func stack1600() { var buf [1600]byte; use(buf[:]); C.callGoStackCheck() } +func stack1604() { var buf [1604]byte; use(buf[:]); C.callGoStackCheck() } +func stack1608() { var buf [1608]byte; use(buf[:]); C.callGoStackCheck() } +func stack1612() { var buf [1612]byte; use(buf[:]); C.callGoStackCheck() } +func stack1616() { var buf [1616]byte; use(buf[:]); C.callGoStackCheck() } +func stack1620() { var buf [1620]byte; use(buf[:]); C.callGoStackCheck() } +func stack1624() { var buf [1624]byte; use(buf[:]); C.callGoStackCheck() } +func stack1628() { var buf [1628]byte; use(buf[:]); C.callGoStackCheck() } +func stack1632() { var buf [1632]byte; use(buf[:]); C.callGoStackCheck() } +func stack1636() { var buf [1636]byte; use(buf[:]); C.callGoStackCheck() } +func stack1640() { var buf [1640]byte; use(buf[:]); C.callGoStackCheck() } +func stack1644() { var buf [1644]byte; use(buf[:]); C.callGoStackCheck() } +func stack1648() { var buf [1648]byte; use(buf[:]); C.callGoStackCheck() } +func stack1652() { var buf [1652]byte; use(buf[:]); C.callGoStackCheck() } +func stack1656() { var buf [1656]byte; use(buf[:]); C.callGoStackCheck() } +func stack1660() { var buf [1660]byte; use(buf[:]); C.callGoStackCheck() } +func stack1664() { var buf [1664]byte; use(buf[:]); C.callGoStackCheck() } +func stack1668() { var buf [1668]byte; use(buf[:]); C.callGoStackCheck() } +func stack1672() { var buf [1672]byte; use(buf[:]); C.callGoStackCheck() } +func stack1676() { var buf [1676]byte; use(buf[:]); C.callGoStackCheck() } +func stack1680() { var buf [1680]byte; use(buf[:]); C.callGoStackCheck() } +func stack1684() { var buf [1684]byte; use(buf[:]); C.callGoStackCheck() } +func stack1688() { var buf [1688]byte; use(buf[:]); C.callGoStackCheck() } +func stack1692() { var buf [1692]byte; use(buf[:]); C.callGoStackCheck() } +func stack1696() { var buf [1696]byte; use(buf[:]); C.callGoStackCheck() } +func stack1700() { var buf [1700]byte; use(buf[:]); C.callGoStackCheck() } +func stack1704() { var buf [1704]byte; use(buf[:]); C.callGoStackCheck() } +func stack1708() { var buf [1708]byte; use(buf[:]); C.callGoStackCheck() } +func stack1712() { var buf [1712]byte; use(buf[:]); C.callGoStackCheck() } +func stack1716() { var buf [1716]byte; use(buf[:]); C.callGoStackCheck() } +func stack1720() { var buf [1720]byte; use(buf[:]); C.callGoStackCheck() } +func stack1724() { var buf [1724]byte; use(buf[:]); C.callGoStackCheck() } +func stack1728() { var buf [1728]byte; use(buf[:]); C.callGoStackCheck() } +func stack1732() { var buf [1732]byte; use(buf[:]); C.callGoStackCheck() } +func stack1736() { var buf [1736]byte; use(buf[:]); C.callGoStackCheck() } +func stack1740() { var buf [1740]byte; use(buf[:]); C.callGoStackCheck() } +func stack1744() { var buf [1744]byte; use(buf[:]); C.callGoStackCheck() } +func stack1748() { var buf [1748]byte; use(buf[:]); C.callGoStackCheck() } +func stack1752() { var buf [1752]byte; use(buf[:]); C.callGoStackCheck() } +func stack1756() { var buf [1756]byte; use(buf[:]); C.callGoStackCheck() } +func stack1760() { var buf [1760]byte; use(buf[:]); C.callGoStackCheck() } +func stack1764() { var buf [1764]byte; use(buf[:]); C.callGoStackCheck() } +func stack1768() { var buf [1768]byte; use(buf[:]); C.callGoStackCheck() } +func stack1772() { var buf [1772]byte; use(buf[:]); C.callGoStackCheck() } +func stack1776() { var buf [1776]byte; use(buf[:]); C.callGoStackCheck() } +func stack1780() { var buf [1780]byte; use(buf[:]); C.callGoStackCheck() } +func stack1784() { var buf [1784]byte; use(buf[:]); C.callGoStackCheck() } +func stack1788() { var buf [1788]byte; use(buf[:]); C.callGoStackCheck() } +func stack1792() { var buf [1792]byte; use(buf[:]); C.callGoStackCheck() } +func stack1796() { var buf [1796]byte; use(buf[:]); C.callGoStackCheck() } +func stack1800() { var buf [1800]byte; use(buf[:]); C.callGoStackCheck() } +func stack1804() { var buf [1804]byte; use(buf[:]); C.callGoStackCheck() } +func stack1808() { var buf [1808]byte; use(buf[:]); C.callGoStackCheck() } +func stack1812() { var buf [1812]byte; use(buf[:]); C.callGoStackCheck() } +func stack1816() { var buf [1816]byte; use(buf[:]); C.callGoStackCheck() } +func stack1820() { var buf [1820]byte; use(buf[:]); C.callGoStackCheck() } +func stack1824() { var buf [1824]byte; use(buf[:]); C.callGoStackCheck() } +func stack1828() { var buf [1828]byte; use(buf[:]); C.callGoStackCheck() } +func stack1832() { var buf [1832]byte; use(buf[:]); C.callGoStackCheck() } +func stack1836() { var buf [1836]byte; use(buf[:]); C.callGoStackCheck() } +func stack1840() { var buf [1840]byte; use(buf[:]); C.callGoStackCheck() } +func stack1844() { var buf [1844]byte; use(buf[:]); C.callGoStackCheck() } +func stack1848() { var buf [1848]byte; use(buf[:]); C.callGoStackCheck() } +func stack1852() { var buf [1852]byte; use(buf[:]); C.callGoStackCheck() } +func stack1856() { var buf [1856]byte; use(buf[:]); C.callGoStackCheck() } +func stack1860() { var buf [1860]byte; use(buf[:]); C.callGoStackCheck() } +func stack1864() { var buf [1864]byte; use(buf[:]); C.callGoStackCheck() } +func stack1868() { var buf [1868]byte; use(buf[:]); C.callGoStackCheck() } +func stack1872() { var buf [1872]byte; use(buf[:]); C.callGoStackCheck() } +func stack1876() { var buf [1876]byte; use(buf[:]); C.callGoStackCheck() } +func stack1880() { var buf [1880]byte; use(buf[:]); C.callGoStackCheck() } +func stack1884() { var buf [1884]byte; use(buf[:]); C.callGoStackCheck() } +func stack1888() { var buf [1888]byte; use(buf[:]); C.callGoStackCheck() } +func stack1892() { var buf [1892]byte; use(buf[:]); C.callGoStackCheck() } +func stack1896() { var buf [1896]byte; use(buf[:]); C.callGoStackCheck() } +func stack1900() { var buf [1900]byte; use(buf[:]); C.callGoStackCheck() } +func stack1904() { var buf [1904]byte; use(buf[:]); C.callGoStackCheck() } +func stack1908() { var buf [1908]byte; use(buf[:]); C.callGoStackCheck() } +func stack1912() { var buf [1912]byte; use(buf[:]); C.callGoStackCheck() } +func stack1916() { var buf [1916]byte; use(buf[:]); C.callGoStackCheck() } +func stack1920() { var buf [1920]byte; use(buf[:]); C.callGoStackCheck() } +func stack1924() { var buf [1924]byte; use(buf[:]); C.callGoStackCheck() } +func stack1928() { var buf [1928]byte; use(buf[:]); C.callGoStackCheck() } +func stack1932() { var buf [1932]byte; use(buf[:]); C.callGoStackCheck() } +func stack1936() { var buf [1936]byte; use(buf[:]); C.callGoStackCheck() } +func stack1940() { var buf [1940]byte; use(buf[:]); C.callGoStackCheck() } +func stack1944() { var buf [1944]byte; use(buf[:]); C.callGoStackCheck() } +func stack1948() { var buf [1948]byte; use(buf[:]); C.callGoStackCheck() } +func stack1952() { var buf [1952]byte; use(buf[:]); C.callGoStackCheck() } +func stack1956() { var buf [1956]byte; use(buf[:]); C.callGoStackCheck() } +func stack1960() { var buf [1960]byte; use(buf[:]); C.callGoStackCheck() } +func stack1964() { var buf [1964]byte; use(buf[:]); C.callGoStackCheck() } +func stack1968() { var buf [1968]byte; use(buf[:]); C.callGoStackCheck() } +func stack1972() { var buf [1972]byte; use(buf[:]); C.callGoStackCheck() } +func stack1976() { var buf [1976]byte; use(buf[:]); C.callGoStackCheck() } +func stack1980() { var buf [1980]byte; use(buf[:]); C.callGoStackCheck() } +func stack1984() { var buf [1984]byte; use(buf[:]); C.callGoStackCheck() } +func stack1988() { var buf [1988]byte; use(buf[:]); C.callGoStackCheck() } +func stack1992() { var buf [1992]byte; use(buf[:]); C.callGoStackCheck() } +func stack1996() { var buf [1996]byte; use(buf[:]); C.callGoStackCheck() } +func stack2000() { var buf [2000]byte; use(buf[:]); C.callGoStackCheck() } +func stack2004() { var buf [2004]byte; use(buf[:]); C.callGoStackCheck() } +func stack2008() { var buf [2008]byte; use(buf[:]); C.callGoStackCheck() } +func stack2012() { var buf [2012]byte; use(buf[:]); C.callGoStackCheck() } +func stack2016() { var buf [2016]byte; use(buf[:]); C.callGoStackCheck() } +func stack2020() { var buf [2020]byte; use(buf[:]); C.callGoStackCheck() } +func stack2024() { var buf [2024]byte; use(buf[:]); C.callGoStackCheck() } +func stack2028() { var buf [2028]byte; use(buf[:]); C.callGoStackCheck() } +func stack2032() { var buf [2032]byte; use(buf[:]); C.callGoStackCheck() } +func stack2036() { var buf [2036]byte; use(buf[:]); C.callGoStackCheck() } +func stack2040() { var buf [2040]byte; use(buf[:]); C.callGoStackCheck() } +func stack2044() { var buf [2044]byte; use(buf[:]); C.callGoStackCheck() } +func stack2048() { var buf [2048]byte; use(buf[:]); C.callGoStackCheck() } +func stack2052() { var buf [2052]byte; use(buf[:]); C.callGoStackCheck() } +func stack2056() { var buf [2056]byte; use(buf[:]); C.callGoStackCheck() } +func stack2060() { var buf [2060]byte; use(buf[:]); C.callGoStackCheck() } +func stack2064() { var buf [2064]byte; use(buf[:]); C.callGoStackCheck() } +func stack2068() { var buf [2068]byte; use(buf[:]); C.callGoStackCheck() } +func stack2072() { var buf [2072]byte; use(buf[:]); C.callGoStackCheck() } +func stack2076() { var buf [2076]byte; use(buf[:]); C.callGoStackCheck() } +func stack2080() { var buf [2080]byte; use(buf[:]); C.callGoStackCheck() } +func stack2084() { var buf [2084]byte; use(buf[:]); C.callGoStackCheck() } +func stack2088() { var buf [2088]byte; use(buf[:]); C.callGoStackCheck() } +func stack2092() { var buf [2092]byte; use(buf[:]); C.callGoStackCheck() } +func stack2096() { var buf [2096]byte; use(buf[:]); C.callGoStackCheck() } +func stack2100() { var buf [2100]byte; use(buf[:]); C.callGoStackCheck() } +func stack2104() { var buf [2104]byte; use(buf[:]); C.callGoStackCheck() } +func stack2108() { var buf [2108]byte; use(buf[:]); C.callGoStackCheck() } +func stack2112() { var buf [2112]byte; use(buf[:]); C.callGoStackCheck() } +func stack2116() { var buf [2116]byte; use(buf[:]); C.callGoStackCheck() } +func stack2120() { var buf [2120]byte; use(buf[:]); C.callGoStackCheck() } +func stack2124() { var buf [2124]byte; use(buf[:]); C.callGoStackCheck() } +func stack2128() { var buf [2128]byte; use(buf[:]); C.callGoStackCheck() } +func stack2132() { var buf [2132]byte; use(buf[:]); C.callGoStackCheck() } +func stack2136() { var buf [2136]byte; use(buf[:]); C.callGoStackCheck() } +func stack2140() { var buf [2140]byte; use(buf[:]); C.callGoStackCheck() } +func stack2144() { var buf [2144]byte; use(buf[:]); C.callGoStackCheck() } +func stack2148() { var buf [2148]byte; use(buf[:]); C.callGoStackCheck() } +func stack2152() { var buf [2152]byte; use(buf[:]); C.callGoStackCheck() } +func stack2156() { var buf [2156]byte; use(buf[:]); C.callGoStackCheck() } +func stack2160() { var buf [2160]byte; use(buf[:]); C.callGoStackCheck() } +func stack2164() { var buf [2164]byte; use(buf[:]); C.callGoStackCheck() } +func stack2168() { var buf [2168]byte; use(buf[:]); C.callGoStackCheck() } +func stack2172() { var buf [2172]byte; use(buf[:]); C.callGoStackCheck() } +func stack2176() { var buf [2176]byte; use(buf[:]); C.callGoStackCheck() } +func stack2180() { var buf [2180]byte; use(buf[:]); C.callGoStackCheck() } +func stack2184() { var buf [2184]byte; use(buf[:]); C.callGoStackCheck() } +func stack2188() { var buf [2188]byte; use(buf[:]); C.callGoStackCheck() } +func stack2192() { var buf [2192]byte; use(buf[:]); C.callGoStackCheck() } +func stack2196() { var buf [2196]byte; use(buf[:]); C.callGoStackCheck() } +func stack2200() { var buf [2200]byte; use(buf[:]); C.callGoStackCheck() } +func stack2204() { var buf [2204]byte; use(buf[:]); C.callGoStackCheck() } +func stack2208() { var buf [2208]byte; use(buf[:]); C.callGoStackCheck() } +func stack2212() { var buf [2212]byte; use(buf[:]); C.callGoStackCheck() } +func stack2216() { var buf [2216]byte; use(buf[:]); C.callGoStackCheck() } +func stack2220() { var buf [2220]byte; use(buf[:]); C.callGoStackCheck() } +func stack2224() { var buf [2224]byte; use(buf[:]); C.callGoStackCheck() } +func stack2228() { var buf [2228]byte; use(buf[:]); C.callGoStackCheck() } +func stack2232() { var buf [2232]byte; use(buf[:]); C.callGoStackCheck() } +func stack2236() { var buf [2236]byte; use(buf[:]); C.callGoStackCheck() } +func stack2240() { var buf [2240]byte; use(buf[:]); C.callGoStackCheck() } +func stack2244() { var buf [2244]byte; use(buf[:]); C.callGoStackCheck() } +func stack2248() { var buf [2248]byte; use(buf[:]); C.callGoStackCheck() } +func stack2252() { var buf [2252]byte; use(buf[:]); C.callGoStackCheck() } +func stack2256() { var buf [2256]byte; use(buf[:]); C.callGoStackCheck() } +func stack2260() { var buf [2260]byte; use(buf[:]); C.callGoStackCheck() } +func stack2264() { var buf [2264]byte; use(buf[:]); C.callGoStackCheck() } +func stack2268() { var buf [2268]byte; use(buf[:]); C.callGoStackCheck() } +func stack2272() { var buf [2272]byte; use(buf[:]); C.callGoStackCheck() } +func stack2276() { var buf [2276]byte; use(buf[:]); C.callGoStackCheck() } +func stack2280() { var buf [2280]byte; use(buf[:]); C.callGoStackCheck() } +func stack2284() { var buf [2284]byte; use(buf[:]); C.callGoStackCheck() } +func stack2288() { var buf [2288]byte; use(buf[:]); C.callGoStackCheck() } +func stack2292() { var buf [2292]byte; use(buf[:]); C.callGoStackCheck() } +func stack2296() { var buf [2296]byte; use(buf[:]); C.callGoStackCheck() } +func stack2300() { var buf [2300]byte; use(buf[:]); C.callGoStackCheck() } +func stack2304() { var buf [2304]byte; use(buf[:]); C.callGoStackCheck() } +func stack2308() { var buf [2308]byte; use(buf[:]); C.callGoStackCheck() } +func stack2312() { var buf [2312]byte; use(buf[:]); C.callGoStackCheck() } +func stack2316() { var buf [2316]byte; use(buf[:]); C.callGoStackCheck() } +func stack2320() { var buf [2320]byte; use(buf[:]); C.callGoStackCheck() } +func stack2324() { var buf [2324]byte; use(buf[:]); C.callGoStackCheck() } +func stack2328() { var buf [2328]byte; use(buf[:]); C.callGoStackCheck() } +func stack2332() { var buf [2332]byte; use(buf[:]); C.callGoStackCheck() } +func stack2336() { var buf [2336]byte; use(buf[:]); C.callGoStackCheck() } +func stack2340() { var buf [2340]byte; use(buf[:]); C.callGoStackCheck() } +func stack2344() { var buf [2344]byte; use(buf[:]); C.callGoStackCheck() } +func stack2348() { var buf [2348]byte; use(buf[:]); C.callGoStackCheck() } +func stack2352() { var buf [2352]byte; use(buf[:]); C.callGoStackCheck() } +func stack2356() { var buf [2356]byte; use(buf[:]); C.callGoStackCheck() } +func stack2360() { var buf [2360]byte; use(buf[:]); C.callGoStackCheck() } +func stack2364() { var buf [2364]byte; use(buf[:]); C.callGoStackCheck() } +func stack2368() { var buf [2368]byte; use(buf[:]); C.callGoStackCheck() } +func stack2372() { var buf [2372]byte; use(buf[:]); C.callGoStackCheck() } +func stack2376() { var buf [2376]byte; use(buf[:]); C.callGoStackCheck() } +func stack2380() { var buf [2380]byte; use(buf[:]); C.callGoStackCheck() } +func stack2384() { var buf [2384]byte; use(buf[:]); C.callGoStackCheck() } +func stack2388() { var buf [2388]byte; use(buf[:]); C.callGoStackCheck() } +func stack2392() { var buf [2392]byte; use(buf[:]); C.callGoStackCheck() } +func stack2396() { var buf [2396]byte; use(buf[:]); C.callGoStackCheck() } +func stack2400() { var buf [2400]byte; use(buf[:]); C.callGoStackCheck() } +func stack2404() { var buf [2404]byte; use(buf[:]); C.callGoStackCheck() } +func stack2408() { var buf [2408]byte; use(buf[:]); C.callGoStackCheck() } +func stack2412() { var buf [2412]byte; use(buf[:]); C.callGoStackCheck() } +func stack2416() { var buf [2416]byte; use(buf[:]); C.callGoStackCheck() } +func stack2420() { var buf [2420]byte; use(buf[:]); C.callGoStackCheck() } +func stack2424() { var buf [2424]byte; use(buf[:]); C.callGoStackCheck() } +func stack2428() { var buf [2428]byte; use(buf[:]); C.callGoStackCheck() } +func stack2432() { var buf [2432]byte; use(buf[:]); C.callGoStackCheck() } +func stack2436() { var buf [2436]byte; use(buf[:]); C.callGoStackCheck() } +func stack2440() { var buf [2440]byte; use(buf[:]); C.callGoStackCheck() } +func stack2444() { var buf [2444]byte; use(buf[:]); C.callGoStackCheck() } +func stack2448() { var buf [2448]byte; use(buf[:]); C.callGoStackCheck() } +func stack2452() { var buf [2452]byte; use(buf[:]); C.callGoStackCheck() } +func stack2456() { var buf [2456]byte; use(buf[:]); C.callGoStackCheck() } +func stack2460() { var buf [2460]byte; use(buf[:]); C.callGoStackCheck() } +func stack2464() { var buf [2464]byte; use(buf[:]); C.callGoStackCheck() } +func stack2468() { var buf [2468]byte; use(buf[:]); C.callGoStackCheck() } +func stack2472() { var buf [2472]byte; use(buf[:]); C.callGoStackCheck() } +func stack2476() { var buf [2476]byte; use(buf[:]); C.callGoStackCheck() } +func stack2480() { var buf [2480]byte; use(buf[:]); C.callGoStackCheck() } +func stack2484() { var buf [2484]byte; use(buf[:]); C.callGoStackCheck() } +func stack2488() { var buf [2488]byte; use(buf[:]); C.callGoStackCheck() } +func stack2492() { var buf [2492]byte; use(buf[:]); C.callGoStackCheck() } +func stack2496() { var buf [2496]byte; use(buf[:]); C.callGoStackCheck() } +func stack2500() { var buf [2500]byte; use(buf[:]); C.callGoStackCheck() } +func stack2504() { var buf [2504]byte; use(buf[:]); C.callGoStackCheck() } +func stack2508() { var buf [2508]byte; use(buf[:]); C.callGoStackCheck() } +func stack2512() { var buf [2512]byte; use(buf[:]); C.callGoStackCheck() } +func stack2516() { var buf [2516]byte; use(buf[:]); C.callGoStackCheck() } +func stack2520() { var buf [2520]byte; use(buf[:]); C.callGoStackCheck() } +func stack2524() { var buf [2524]byte; use(buf[:]); C.callGoStackCheck() } +func stack2528() { var buf [2528]byte; use(buf[:]); C.callGoStackCheck() } +func stack2532() { var buf [2532]byte; use(buf[:]); C.callGoStackCheck() } +func stack2536() { var buf [2536]byte; use(buf[:]); C.callGoStackCheck() } +func stack2540() { var buf [2540]byte; use(buf[:]); C.callGoStackCheck() } +func stack2544() { var buf [2544]byte; use(buf[:]); C.callGoStackCheck() } +func stack2548() { var buf [2548]byte; use(buf[:]); C.callGoStackCheck() } +func stack2552() { var buf [2552]byte; use(buf[:]); C.callGoStackCheck() } +func stack2556() { var buf [2556]byte; use(buf[:]); C.callGoStackCheck() } +func stack2560() { var buf [2560]byte; use(buf[:]); C.callGoStackCheck() } +func stack2564() { var buf [2564]byte; use(buf[:]); C.callGoStackCheck() } +func stack2568() { var buf [2568]byte; use(buf[:]); C.callGoStackCheck() } +func stack2572() { var buf [2572]byte; use(buf[:]); C.callGoStackCheck() } +func stack2576() { var buf [2576]byte; use(buf[:]); C.callGoStackCheck() } +func stack2580() { var buf [2580]byte; use(buf[:]); C.callGoStackCheck() } +func stack2584() { var buf [2584]byte; use(buf[:]); C.callGoStackCheck() } +func stack2588() { var buf [2588]byte; use(buf[:]); C.callGoStackCheck() } +func stack2592() { var buf [2592]byte; use(buf[:]); C.callGoStackCheck() } +func stack2596() { var buf [2596]byte; use(buf[:]); C.callGoStackCheck() } +func stack2600() { var buf [2600]byte; use(buf[:]); C.callGoStackCheck() } +func stack2604() { var buf [2604]byte; use(buf[:]); C.callGoStackCheck() } +func stack2608() { var buf [2608]byte; use(buf[:]); C.callGoStackCheck() } +func stack2612() { var buf [2612]byte; use(buf[:]); C.callGoStackCheck() } +func stack2616() { var buf [2616]byte; use(buf[:]); C.callGoStackCheck() } +func stack2620() { var buf [2620]byte; use(buf[:]); C.callGoStackCheck() } +func stack2624() { var buf [2624]byte; use(buf[:]); C.callGoStackCheck() } +func stack2628() { var buf [2628]byte; use(buf[:]); C.callGoStackCheck() } +func stack2632() { var buf [2632]byte; use(buf[:]); C.callGoStackCheck() } +func stack2636() { var buf [2636]byte; use(buf[:]); C.callGoStackCheck() } +func stack2640() { var buf [2640]byte; use(buf[:]); C.callGoStackCheck() } +func stack2644() { var buf [2644]byte; use(buf[:]); C.callGoStackCheck() } +func stack2648() { var buf [2648]byte; use(buf[:]); C.callGoStackCheck() } +func stack2652() { var buf [2652]byte; use(buf[:]); C.callGoStackCheck() } +func stack2656() { var buf [2656]byte; use(buf[:]); C.callGoStackCheck() } +func stack2660() { var buf [2660]byte; use(buf[:]); C.callGoStackCheck() } +func stack2664() { var buf [2664]byte; use(buf[:]); C.callGoStackCheck() } +func stack2668() { var buf [2668]byte; use(buf[:]); C.callGoStackCheck() } +func stack2672() { var buf [2672]byte; use(buf[:]); C.callGoStackCheck() } +func stack2676() { var buf [2676]byte; use(buf[:]); C.callGoStackCheck() } +func stack2680() { var buf [2680]byte; use(buf[:]); C.callGoStackCheck() } +func stack2684() { var buf [2684]byte; use(buf[:]); C.callGoStackCheck() } +func stack2688() { var buf [2688]byte; use(buf[:]); C.callGoStackCheck() } +func stack2692() { var buf [2692]byte; use(buf[:]); C.callGoStackCheck() } +func stack2696() { var buf [2696]byte; use(buf[:]); C.callGoStackCheck() } +func stack2700() { var buf [2700]byte; use(buf[:]); C.callGoStackCheck() } +func stack2704() { var buf [2704]byte; use(buf[:]); C.callGoStackCheck() } +func stack2708() { var buf [2708]byte; use(buf[:]); C.callGoStackCheck() } +func stack2712() { var buf [2712]byte; use(buf[:]); C.callGoStackCheck() } +func stack2716() { var buf [2716]byte; use(buf[:]); C.callGoStackCheck() } +func stack2720() { var buf [2720]byte; use(buf[:]); C.callGoStackCheck() } +func stack2724() { var buf [2724]byte; use(buf[:]); C.callGoStackCheck() } +func stack2728() { var buf [2728]byte; use(buf[:]); C.callGoStackCheck() } +func stack2732() { var buf [2732]byte; use(buf[:]); C.callGoStackCheck() } +func stack2736() { var buf [2736]byte; use(buf[:]); C.callGoStackCheck() } +func stack2740() { var buf [2740]byte; use(buf[:]); C.callGoStackCheck() } +func stack2744() { var buf [2744]byte; use(buf[:]); C.callGoStackCheck() } +func stack2748() { var buf [2748]byte; use(buf[:]); C.callGoStackCheck() } +func stack2752() { var buf [2752]byte; use(buf[:]); C.callGoStackCheck() } +func stack2756() { var buf [2756]byte; use(buf[:]); C.callGoStackCheck() } +func stack2760() { var buf [2760]byte; use(buf[:]); C.callGoStackCheck() } +func stack2764() { var buf [2764]byte; use(buf[:]); C.callGoStackCheck() } +func stack2768() { var buf [2768]byte; use(buf[:]); C.callGoStackCheck() } +func stack2772() { var buf [2772]byte; use(buf[:]); C.callGoStackCheck() } +func stack2776() { var buf [2776]byte; use(buf[:]); C.callGoStackCheck() } +func stack2780() { var buf [2780]byte; use(buf[:]); C.callGoStackCheck() } +func stack2784() { var buf [2784]byte; use(buf[:]); C.callGoStackCheck() } +func stack2788() { var buf [2788]byte; use(buf[:]); C.callGoStackCheck() } +func stack2792() { var buf [2792]byte; use(buf[:]); C.callGoStackCheck() } +func stack2796() { var buf [2796]byte; use(buf[:]); C.callGoStackCheck() } +func stack2800() { var buf [2800]byte; use(buf[:]); C.callGoStackCheck() } +func stack2804() { var buf [2804]byte; use(buf[:]); C.callGoStackCheck() } +func stack2808() { var buf [2808]byte; use(buf[:]); C.callGoStackCheck() } +func stack2812() { var buf [2812]byte; use(buf[:]); C.callGoStackCheck() } +func stack2816() { var buf [2816]byte; use(buf[:]); C.callGoStackCheck() } +func stack2820() { var buf [2820]byte; use(buf[:]); C.callGoStackCheck() } +func stack2824() { var buf [2824]byte; use(buf[:]); C.callGoStackCheck() } +func stack2828() { var buf [2828]byte; use(buf[:]); C.callGoStackCheck() } +func stack2832() { var buf [2832]byte; use(buf[:]); C.callGoStackCheck() } +func stack2836() { var buf [2836]byte; use(buf[:]); C.callGoStackCheck() } +func stack2840() { var buf [2840]byte; use(buf[:]); C.callGoStackCheck() } +func stack2844() { var buf [2844]byte; use(buf[:]); C.callGoStackCheck() } +func stack2848() { var buf [2848]byte; use(buf[:]); C.callGoStackCheck() } +func stack2852() { var buf [2852]byte; use(buf[:]); C.callGoStackCheck() } +func stack2856() { var buf [2856]byte; use(buf[:]); C.callGoStackCheck() } +func stack2860() { var buf [2860]byte; use(buf[:]); C.callGoStackCheck() } +func stack2864() { var buf [2864]byte; use(buf[:]); C.callGoStackCheck() } +func stack2868() { var buf [2868]byte; use(buf[:]); C.callGoStackCheck() } +func stack2872() { var buf [2872]byte; use(buf[:]); C.callGoStackCheck() } +func stack2876() { var buf [2876]byte; use(buf[:]); C.callGoStackCheck() } +func stack2880() { var buf [2880]byte; use(buf[:]); C.callGoStackCheck() } +func stack2884() { var buf [2884]byte; use(buf[:]); C.callGoStackCheck() } +func stack2888() { var buf [2888]byte; use(buf[:]); C.callGoStackCheck() } +func stack2892() { var buf [2892]byte; use(buf[:]); C.callGoStackCheck() } +func stack2896() { var buf [2896]byte; use(buf[:]); C.callGoStackCheck() } +func stack2900() { var buf [2900]byte; use(buf[:]); C.callGoStackCheck() } +func stack2904() { var buf [2904]byte; use(buf[:]); C.callGoStackCheck() } +func stack2908() { var buf [2908]byte; use(buf[:]); C.callGoStackCheck() } +func stack2912() { var buf [2912]byte; use(buf[:]); C.callGoStackCheck() } +func stack2916() { var buf [2916]byte; use(buf[:]); C.callGoStackCheck() } +func stack2920() { var buf [2920]byte; use(buf[:]); C.callGoStackCheck() } +func stack2924() { var buf [2924]byte; use(buf[:]); C.callGoStackCheck() } +func stack2928() { var buf [2928]byte; use(buf[:]); C.callGoStackCheck() } +func stack2932() { var buf [2932]byte; use(buf[:]); C.callGoStackCheck() } +func stack2936() { var buf [2936]byte; use(buf[:]); C.callGoStackCheck() } +func stack2940() { var buf [2940]byte; use(buf[:]); C.callGoStackCheck() } +func stack2944() { var buf [2944]byte; use(buf[:]); C.callGoStackCheck() } +func stack2948() { var buf [2948]byte; use(buf[:]); C.callGoStackCheck() } +func stack2952() { var buf [2952]byte; use(buf[:]); C.callGoStackCheck() } +func stack2956() { var buf [2956]byte; use(buf[:]); C.callGoStackCheck() } +func stack2960() { var buf [2960]byte; use(buf[:]); C.callGoStackCheck() } +func stack2964() { var buf [2964]byte; use(buf[:]); C.callGoStackCheck() } +func stack2968() { var buf [2968]byte; use(buf[:]); C.callGoStackCheck() } +func stack2972() { var buf [2972]byte; use(buf[:]); C.callGoStackCheck() } +func stack2976() { var buf [2976]byte; use(buf[:]); C.callGoStackCheck() } +func stack2980() { var buf [2980]byte; use(buf[:]); C.callGoStackCheck() } +func stack2984() { var buf [2984]byte; use(buf[:]); C.callGoStackCheck() } +func stack2988() { var buf [2988]byte; use(buf[:]); C.callGoStackCheck() } +func stack2992() { var buf [2992]byte; use(buf[:]); C.callGoStackCheck() } +func stack2996() { var buf [2996]byte; use(buf[:]); C.callGoStackCheck() } +func stack3000() { var buf [3000]byte; use(buf[:]); C.callGoStackCheck() } +func stack3004() { var buf [3004]byte; use(buf[:]); C.callGoStackCheck() } +func stack3008() { var buf [3008]byte; use(buf[:]); C.callGoStackCheck() } +func stack3012() { var buf [3012]byte; use(buf[:]); C.callGoStackCheck() } +func stack3016() { var buf [3016]byte; use(buf[:]); C.callGoStackCheck() } +func stack3020() { var buf [3020]byte; use(buf[:]); C.callGoStackCheck() } +func stack3024() { var buf [3024]byte; use(buf[:]); C.callGoStackCheck() } +func stack3028() { var buf [3028]byte; use(buf[:]); C.callGoStackCheck() } +func stack3032() { var buf [3032]byte; use(buf[:]); C.callGoStackCheck() } +func stack3036() { var buf [3036]byte; use(buf[:]); C.callGoStackCheck() } +func stack3040() { var buf [3040]byte; use(buf[:]); C.callGoStackCheck() } +func stack3044() { var buf [3044]byte; use(buf[:]); C.callGoStackCheck() } +func stack3048() { var buf [3048]byte; use(buf[:]); C.callGoStackCheck() } +func stack3052() { var buf [3052]byte; use(buf[:]); C.callGoStackCheck() } +func stack3056() { var buf [3056]byte; use(buf[:]); C.callGoStackCheck() } +func stack3060() { var buf [3060]byte; use(buf[:]); C.callGoStackCheck() } +func stack3064() { var buf [3064]byte; use(buf[:]); C.callGoStackCheck() } +func stack3068() { var buf [3068]byte; use(buf[:]); C.callGoStackCheck() } +func stack3072() { var buf [3072]byte; use(buf[:]); C.callGoStackCheck() } +func stack3076() { var buf [3076]byte; use(buf[:]); C.callGoStackCheck() } +func stack3080() { var buf [3080]byte; use(buf[:]); C.callGoStackCheck() } +func stack3084() { var buf [3084]byte; use(buf[:]); C.callGoStackCheck() } +func stack3088() { var buf [3088]byte; use(buf[:]); C.callGoStackCheck() } +func stack3092() { var buf [3092]byte; use(buf[:]); C.callGoStackCheck() } +func stack3096() { var buf [3096]byte; use(buf[:]); C.callGoStackCheck() } +func stack3100() { var buf [3100]byte; use(buf[:]); C.callGoStackCheck() } +func stack3104() { var buf [3104]byte; use(buf[:]); C.callGoStackCheck() } +func stack3108() { var buf [3108]byte; use(buf[:]); C.callGoStackCheck() } +func stack3112() { var buf [3112]byte; use(buf[:]); C.callGoStackCheck() } +func stack3116() { var buf [3116]byte; use(buf[:]); C.callGoStackCheck() } +func stack3120() { var buf [3120]byte; use(buf[:]); C.callGoStackCheck() } +func stack3124() { var buf [3124]byte; use(buf[:]); C.callGoStackCheck() } +func stack3128() { var buf [3128]byte; use(buf[:]); C.callGoStackCheck() } +func stack3132() { var buf [3132]byte; use(buf[:]); C.callGoStackCheck() } +func stack3136() { var buf [3136]byte; use(buf[:]); C.callGoStackCheck() } +func stack3140() { var buf [3140]byte; use(buf[:]); C.callGoStackCheck() } +func stack3144() { var buf [3144]byte; use(buf[:]); C.callGoStackCheck() } +func stack3148() { var buf [3148]byte; use(buf[:]); C.callGoStackCheck() } +func stack3152() { var buf [3152]byte; use(buf[:]); C.callGoStackCheck() } +func stack3156() { var buf [3156]byte; use(buf[:]); C.callGoStackCheck() } +func stack3160() { var buf [3160]byte; use(buf[:]); C.callGoStackCheck() } +func stack3164() { var buf [3164]byte; use(buf[:]); C.callGoStackCheck() } +func stack3168() { var buf [3168]byte; use(buf[:]); C.callGoStackCheck() } +func stack3172() { var buf [3172]byte; use(buf[:]); C.callGoStackCheck() } +func stack3176() { var buf [3176]byte; use(buf[:]); C.callGoStackCheck() } +func stack3180() { var buf [3180]byte; use(buf[:]); C.callGoStackCheck() } +func stack3184() { var buf [3184]byte; use(buf[:]); C.callGoStackCheck() } +func stack3188() { var buf [3188]byte; use(buf[:]); C.callGoStackCheck() } +func stack3192() { var buf [3192]byte; use(buf[:]); C.callGoStackCheck() } +func stack3196() { var buf [3196]byte; use(buf[:]); C.callGoStackCheck() } +func stack3200() { var buf [3200]byte; use(buf[:]); C.callGoStackCheck() } +func stack3204() { var buf [3204]byte; use(buf[:]); C.callGoStackCheck() } +func stack3208() { var buf [3208]byte; use(buf[:]); C.callGoStackCheck() } +func stack3212() { var buf [3212]byte; use(buf[:]); C.callGoStackCheck() } +func stack3216() { var buf [3216]byte; use(buf[:]); C.callGoStackCheck() } +func stack3220() { var buf [3220]byte; use(buf[:]); C.callGoStackCheck() } +func stack3224() { var buf [3224]byte; use(buf[:]); C.callGoStackCheck() } +func stack3228() { var buf [3228]byte; use(buf[:]); C.callGoStackCheck() } +func stack3232() { var buf [3232]byte; use(buf[:]); C.callGoStackCheck() } +func stack3236() { var buf [3236]byte; use(buf[:]); C.callGoStackCheck() } +func stack3240() { var buf [3240]byte; use(buf[:]); C.callGoStackCheck() } +func stack3244() { var buf [3244]byte; use(buf[:]); C.callGoStackCheck() } +func stack3248() { var buf [3248]byte; use(buf[:]); C.callGoStackCheck() } +func stack3252() { var buf [3252]byte; use(buf[:]); C.callGoStackCheck() } +func stack3256() { var buf [3256]byte; use(buf[:]); C.callGoStackCheck() } +func stack3260() { var buf [3260]byte; use(buf[:]); C.callGoStackCheck() } +func stack3264() { var buf [3264]byte; use(buf[:]); C.callGoStackCheck() } +func stack3268() { var buf [3268]byte; use(buf[:]); C.callGoStackCheck() } +func stack3272() { var buf [3272]byte; use(buf[:]); C.callGoStackCheck() } +func stack3276() { var buf [3276]byte; use(buf[:]); C.callGoStackCheck() } +func stack3280() { var buf [3280]byte; use(buf[:]); C.callGoStackCheck() } +func stack3284() { var buf [3284]byte; use(buf[:]); C.callGoStackCheck() } +func stack3288() { var buf [3288]byte; use(buf[:]); C.callGoStackCheck() } +func stack3292() { var buf [3292]byte; use(buf[:]); C.callGoStackCheck() } +func stack3296() { var buf [3296]byte; use(buf[:]); C.callGoStackCheck() } +func stack3300() { var buf [3300]byte; use(buf[:]); C.callGoStackCheck() } +func stack3304() { var buf [3304]byte; use(buf[:]); C.callGoStackCheck() } +func stack3308() { var buf [3308]byte; use(buf[:]); C.callGoStackCheck() } +func stack3312() { var buf [3312]byte; use(buf[:]); C.callGoStackCheck() } +func stack3316() { var buf [3316]byte; use(buf[:]); C.callGoStackCheck() } +func stack3320() { var buf [3320]byte; use(buf[:]); C.callGoStackCheck() } +func stack3324() { var buf [3324]byte; use(buf[:]); C.callGoStackCheck() } +func stack3328() { var buf [3328]byte; use(buf[:]); C.callGoStackCheck() } +func stack3332() { var buf [3332]byte; use(buf[:]); C.callGoStackCheck() } +func stack3336() { var buf [3336]byte; use(buf[:]); C.callGoStackCheck() } +func stack3340() { var buf [3340]byte; use(buf[:]); C.callGoStackCheck() } +func stack3344() { var buf [3344]byte; use(buf[:]); C.callGoStackCheck() } +func stack3348() { var buf [3348]byte; use(buf[:]); C.callGoStackCheck() } +func stack3352() { var buf [3352]byte; use(buf[:]); C.callGoStackCheck() } +func stack3356() { var buf [3356]byte; use(buf[:]); C.callGoStackCheck() } +func stack3360() { var buf [3360]byte; use(buf[:]); C.callGoStackCheck() } +func stack3364() { var buf [3364]byte; use(buf[:]); C.callGoStackCheck() } +func stack3368() { var buf [3368]byte; use(buf[:]); C.callGoStackCheck() } +func stack3372() { var buf [3372]byte; use(buf[:]); C.callGoStackCheck() } +func stack3376() { var buf [3376]byte; use(buf[:]); C.callGoStackCheck() } +func stack3380() { var buf [3380]byte; use(buf[:]); C.callGoStackCheck() } +func stack3384() { var buf [3384]byte; use(buf[:]); C.callGoStackCheck() } +func stack3388() { var buf [3388]byte; use(buf[:]); C.callGoStackCheck() } +func stack3392() { var buf [3392]byte; use(buf[:]); C.callGoStackCheck() } +func stack3396() { var buf [3396]byte; use(buf[:]); C.callGoStackCheck() } +func stack3400() { var buf [3400]byte; use(buf[:]); C.callGoStackCheck() } +func stack3404() { var buf [3404]byte; use(buf[:]); C.callGoStackCheck() } +func stack3408() { var buf [3408]byte; use(buf[:]); C.callGoStackCheck() } +func stack3412() { var buf [3412]byte; use(buf[:]); C.callGoStackCheck() } +func stack3416() { var buf [3416]byte; use(buf[:]); C.callGoStackCheck() } +func stack3420() { var buf [3420]byte; use(buf[:]); C.callGoStackCheck() } +func stack3424() { var buf [3424]byte; use(buf[:]); C.callGoStackCheck() } +func stack3428() { var buf [3428]byte; use(buf[:]); C.callGoStackCheck() } +func stack3432() { var buf [3432]byte; use(buf[:]); C.callGoStackCheck() } +func stack3436() { var buf [3436]byte; use(buf[:]); C.callGoStackCheck() } +func stack3440() { var buf [3440]byte; use(buf[:]); C.callGoStackCheck() } +func stack3444() { var buf [3444]byte; use(buf[:]); C.callGoStackCheck() } +func stack3448() { var buf [3448]byte; use(buf[:]); C.callGoStackCheck() } +func stack3452() { var buf [3452]byte; use(buf[:]); C.callGoStackCheck() } +func stack3456() { var buf [3456]byte; use(buf[:]); C.callGoStackCheck() } +func stack3460() { var buf [3460]byte; use(buf[:]); C.callGoStackCheck() } +func stack3464() { var buf [3464]byte; use(buf[:]); C.callGoStackCheck() } +func stack3468() { var buf [3468]byte; use(buf[:]); C.callGoStackCheck() } +func stack3472() { var buf [3472]byte; use(buf[:]); C.callGoStackCheck() } +func stack3476() { var buf [3476]byte; use(buf[:]); C.callGoStackCheck() } +func stack3480() { var buf [3480]byte; use(buf[:]); C.callGoStackCheck() } +func stack3484() { var buf [3484]byte; use(buf[:]); C.callGoStackCheck() } +func stack3488() { var buf [3488]byte; use(buf[:]); C.callGoStackCheck() } +func stack3492() { var buf [3492]byte; use(buf[:]); C.callGoStackCheck() } +func stack3496() { var buf [3496]byte; use(buf[:]); C.callGoStackCheck() } +func stack3500() { var buf [3500]byte; use(buf[:]); C.callGoStackCheck() } +func stack3504() { var buf [3504]byte; use(buf[:]); C.callGoStackCheck() } +func stack3508() { var buf [3508]byte; use(buf[:]); C.callGoStackCheck() } +func stack3512() { var buf [3512]byte; use(buf[:]); C.callGoStackCheck() } +func stack3516() { var buf [3516]byte; use(buf[:]); C.callGoStackCheck() } +func stack3520() { var buf [3520]byte; use(buf[:]); C.callGoStackCheck() } +func stack3524() { var buf [3524]byte; use(buf[:]); C.callGoStackCheck() } +func stack3528() { var buf [3528]byte; use(buf[:]); C.callGoStackCheck() } +func stack3532() { var buf [3532]byte; use(buf[:]); C.callGoStackCheck() } +func stack3536() { var buf [3536]byte; use(buf[:]); C.callGoStackCheck() } +func stack3540() { var buf [3540]byte; use(buf[:]); C.callGoStackCheck() } +func stack3544() { var buf [3544]byte; use(buf[:]); C.callGoStackCheck() } +func stack3548() { var buf [3548]byte; use(buf[:]); C.callGoStackCheck() } +func stack3552() { var buf [3552]byte; use(buf[:]); C.callGoStackCheck() } +func stack3556() { var buf [3556]byte; use(buf[:]); C.callGoStackCheck() } +func stack3560() { var buf [3560]byte; use(buf[:]); C.callGoStackCheck() } +func stack3564() { var buf [3564]byte; use(buf[:]); C.callGoStackCheck() } +func stack3568() { var buf [3568]byte; use(buf[:]); C.callGoStackCheck() } +func stack3572() { var buf [3572]byte; use(buf[:]); C.callGoStackCheck() } +func stack3576() { var buf [3576]byte; use(buf[:]); C.callGoStackCheck() } +func stack3580() { var buf [3580]byte; use(buf[:]); C.callGoStackCheck() } +func stack3584() { var buf [3584]byte; use(buf[:]); C.callGoStackCheck() } +func stack3588() { var buf [3588]byte; use(buf[:]); C.callGoStackCheck() } +func stack3592() { var buf [3592]byte; use(buf[:]); C.callGoStackCheck() } +func stack3596() { var buf [3596]byte; use(buf[:]); C.callGoStackCheck() } +func stack3600() { var buf [3600]byte; use(buf[:]); C.callGoStackCheck() } +func stack3604() { var buf [3604]byte; use(buf[:]); C.callGoStackCheck() } +func stack3608() { var buf [3608]byte; use(buf[:]); C.callGoStackCheck() } +func stack3612() { var buf [3612]byte; use(buf[:]); C.callGoStackCheck() } +func stack3616() { var buf [3616]byte; use(buf[:]); C.callGoStackCheck() } +func stack3620() { var buf [3620]byte; use(buf[:]); C.callGoStackCheck() } +func stack3624() { var buf [3624]byte; use(buf[:]); C.callGoStackCheck() } +func stack3628() { var buf [3628]byte; use(buf[:]); C.callGoStackCheck() } +func stack3632() { var buf [3632]byte; use(buf[:]); C.callGoStackCheck() } +func stack3636() { var buf [3636]byte; use(buf[:]); C.callGoStackCheck() } +func stack3640() { var buf [3640]byte; use(buf[:]); C.callGoStackCheck() } +func stack3644() { var buf [3644]byte; use(buf[:]); C.callGoStackCheck() } +func stack3648() { var buf [3648]byte; use(buf[:]); C.callGoStackCheck() } +func stack3652() { var buf [3652]byte; use(buf[:]); C.callGoStackCheck() } +func stack3656() { var buf [3656]byte; use(buf[:]); C.callGoStackCheck() } +func stack3660() { var buf [3660]byte; use(buf[:]); C.callGoStackCheck() } +func stack3664() { var buf [3664]byte; use(buf[:]); C.callGoStackCheck() } +func stack3668() { var buf [3668]byte; use(buf[:]); C.callGoStackCheck() } +func stack3672() { var buf [3672]byte; use(buf[:]); C.callGoStackCheck() } +func stack3676() { var buf [3676]byte; use(buf[:]); C.callGoStackCheck() } +func stack3680() { var buf [3680]byte; use(buf[:]); C.callGoStackCheck() } +func stack3684() { var buf [3684]byte; use(buf[:]); C.callGoStackCheck() } +func stack3688() { var buf [3688]byte; use(buf[:]); C.callGoStackCheck() } +func stack3692() { var buf [3692]byte; use(buf[:]); C.callGoStackCheck() } +func stack3696() { var buf [3696]byte; use(buf[:]); C.callGoStackCheck() } +func stack3700() { var buf [3700]byte; use(buf[:]); C.callGoStackCheck() } +func stack3704() { var buf [3704]byte; use(buf[:]); C.callGoStackCheck() } +func stack3708() { var buf [3708]byte; use(buf[:]); C.callGoStackCheck() } +func stack3712() { var buf [3712]byte; use(buf[:]); C.callGoStackCheck() } +func stack3716() { var buf [3716]byte; use(buf[:]); C.callGoStackCheck() } +func stack3720() { var buf [3720]byte; use(buf[:]); C.callGoStackCheck() } +func stack3724() { var buf [3724]byte; use(buf[:]); C.callGoStackCheck() } +func stack3728() { var buf [3728]byte; use(buf[:]); C.callGoStackCheck() } +func stack3732() { var buf [3732]byte; use(buf[:]); C.callGoStackCheck() } +func stack3736() { var buf [3736]byte; use(buf[:]); C.callGoStackCheck() } +func stack3740() { var buf [3740]byte; use(buf[:]); C.callGoStackCheck() } +func stack3744() { var buf [3744]byte; use(buf[:]); C.callGoStackCheck() } +func stack3748() { var buf [3748]byte; use(buf[:]); C.callGoStackCheck() } +func stack3752() { var buf [3752]byte; use(buf[:]); C.callGoStackCheck() } +func stack3756() { var buf [3756]byte; use(buf[:]); C.callGoStackCheck() } +func stack3760() { var buf [3760]byte; use(buf[:]); C.callGoStackCheck() } +func stack3764() { var buf [3764]byte; use(buf[:]); C.callGoStackCheck() } +func stack3768() { var buf [3768]byte; use(buf[:]); C.callGoStackCheck() } +func stack3772() { var buf [3772]byte; use(buf[:]); C.callGoStackCheck() } +func stack3776() { var buf [3776]byte; use(buf[:]); C.callGoStackCheck() } +func stack3780() { var buf [3780]byte; use(buf[:]); C.callGoStackCheck() } +func stack3784() { var buf [3784]byte; use(buf[:]); C.callGoStackCheck() } +func stack3788() { var buf [3788]byte; use(buf[:]); C.callGoStackCheck() } +func stack3792() { var buf [3792]byte; use(buf[:]); C.callGoStackCheck() } +func stack3796() { var buf [3796]byte; use(buf[:]); C.callGoStackCheck() } +func stack3800() { var buf [3800]byte; use(buf[:]); C.callGoStackCheck() } +func stack3804() { var buf [3804]byte; use(buf[:]); C.callGoStackCheck() } +func stack3808() { var buf [3808]byte; use(buf[:]); C.callGoStackCheck() } +func stack3812() { var buf [3812]byte; use(buf[:]); C.callGoStackCheck() } +func stack3816() { var buf [3816]byte; use(buf[:]); C.callGoStackCheck() } +func stack3820() { var buf [3820]byte; use(buf[:]); C.callGoStackCheck() } +func stack3824() { var buf [3824]byte; use(buf[:]); C.callGoStackCheck() } +func stack3828() { var buf [3828]byte; use(buf[:]); C.callGoStackCheck() } +func stack3832() { var buf [3832]byte; use(buf[:]); C.callGoStackCheck() } +func stack3836() { var buf [3836]byte; use(buf[:]); C.callGoStackCheck() } +func stack3840() { var buf [3840]byte; use(buf[:]); C.callGoStackCheck() } +func stack3844() { var buf [3844]byte; use(buf[:]); C.callGoStackCheck() } +func stack3848() { var buf [3848]byte; use(buf[:]); C.callGoStackCheck() } +func stack3852() { var buf [3852]byte; use(buf[:]); C.callGoStackCheck() } +func stack3856() { var buf [3856]byte; use(buf[:]); C.callGoStackCheck() } +func stack3860() { var buf [3860]byte; use(buf[:]); C.callGoStackCheck() } +func stack3864() { var buf [3864]byte; use(buf[:]); C.callGoStackCheck() } +func stack3868() { var buf [3868]byte; use(buf[:]); C.callGoStackCheck() } +func stack3872() { var buf [3872]byte; use(buf[:]); C.callGoStackCheck() } +func stack3876() { var buf [3876]byte; use(buf[:]); C.callGoStackCheck() } +func stack3880() { var buf [3880]byte; use(buf[:]); C.callGoStackCheck() } +func stack3884() { var buf [3884]byte; use(buf[:]); C.callGoStackCheck() } +func stack3888() { var buf [3888]byte; use(buf[:]); C.callGoStackCheck() } +func stack3892() { var buf [3892]byte; use(buf[:]); C.callGoStackCheck() } +func stack3896() { var buf [3896]byte; use(buf[:]); C.callGoStackCheck() } +func stack3900() { var buf [3900]byte; use(buf[:]); C.callGoStackCheck() } +func stack3904() { var buf [3904]byte; use(buf[:]); C.callGoStackCheck() } +func stack3908() { var buf [3908]byte; use(buf[:]); C.callGoStackCheck() } +func stack3912() { var buf [3912]byte; use(buf[:]); C.callGoStackCheck() } +func stack3916() { var buf [3916]byte; use(buf[:]); C.callGoStackCheck() } +func stack3920() { var buf [3920]byte; use(buf[:]); C.callGoStackCheck() } +func stack3924() { var buf [3924]byte; use(buf[:]); C.callGoStackCheck() } +func stack3928() { var buf [3928]byte; use(buf[:]); C.callGoStackCheck() } +func stack3932() { var buf [3932]byte; use(buf[:]); C.callGoStackCheck() } +func stack3936() { var buf [3936]byte; use(buf[:]); C.callGoStackCheck() } +func stack3940() { var buf [3940]byte; use(buf[:]); C.callGoStackCheck() } +func stack3944() { var buf [3944]byte; use(buf[:]); C.callGoStackCheck() } +func stack3948() { var buf [3948]byte; use(buf[:]); C.callGoStackCheck() } +func stack3952() { var buf [3952]byte; use(buf[:]); C.callGoStackCheck() } +func stack3956() { var buf [3956]byte; use(buf[:]); C.callGoStackCheck() } +func stack3960() { var buf [3960]byte; use(buf[:]); C.callGoStackCheck() } +func stack3964() { var buf [3964]byte; use(buf[:]); C.callGoStackCheck() } +func stack3968() { var buf [3968]byte; use(buf[:]); C.callGoStackCheck() } +func stack3972() { var buf [3972]byte; use(buf[:]); C.callGoStackCheck() } +func stack3976() { var buf [3976]byte; use(buf[:]); C.callGoStackCheck() } +func stack3980() { var buf [3980]byte; use(buf[:]); C.callGoStackCheck() } +func stack3984() { var buf [3984]byte; use(buf[:]); C.callGoStackCheck() } +func stack3988() { var buf [3988]byte; use(buf[:]); C.callGoStackCheck() } +func stack3992() { var buf [3992]byte; use(buf[:]); C.callGoStackCheck() } +func stack3996() { var buf [3996]byte; use(buf[:]); C.callGoStackCheck() } +func stack4000() { var buf [4000]byte; use(buf[:]); C.callGoStackCheck() } +func stack4004() { var buf [4004]byte; use(buf[:]); C.callGoStackCheck() } +func stack4008() { var buf [4008]byte; use(buf[:]); C.callGoStackCheck() } +func stack4012() { var buf [4012]byte; use(buf[:]); C.callGoStackCheck() } +func stack4016() { var buf [4016]byte; use(buf[:]); C.callGoStackCheck() } +func stack4020() { var buf [4020]byte; use(buf[:]); C.callGoStackCheck() } +func stack4024() { var buf [4024]byte; use(buf[:]); C.callGoStackCheck() } +func stack4028() { var buf [4028]byte; use(buf[:]); C.callGoStackCheck() } +func stack4032() { var buf [4032]byte; use(buf[:]); C.callGoStackCheck() } +func stack4036() { var buf [4036]byte; use(buf[:]); C.callGoStackCheck() } +func stack4040() { var buf [4040]byte; use(buf[:]); C.callGoStackCheck() } +func stack4044() { var buf [4044]byte; use(buf[:]); C.callGoStackCheck() } +func stack4048() { var buf [4048]byte; use(buf[:]); C.callGoStackCheck() } +func stack4052() { var buf [4052]byte; use(buf[:]); C.callGoStackCheck() } +func stack4056() { var buf [4056]byte; use(buf[:]); C.callGoStackCheck() } +func stack4060() { var buf [4060]byte; use(buf[:]); C.callGoStackCheck() } +func stack4064() { var buf [4064]byte; use(buf[:]); C.callGoStackCheck() } +func stack4068() { var buf [4068]byte; use(buf[:]); C.callGoStackCheck() } +func stack4072() { var buf [4072]byte; use(buf[:]); C.callGoStackCheck() } +func stack4076() { var buf [4076]byte; use(buf[:]); C.callGoStackCheck() } +func stack4080() { var buf [4080]byte; use(buf[:]); C.callGoStackCheck() } +func stack4084() { var buf [4084]byte; use(buf[:]); C.callGoStackCheck() } +func stack4088() { var buf [4088]byte; use(buf[:]); C.callGoStackCheck() } +func stack4092() { var buf [4092]byte; use(buf[:]); C.callGoStackCheck() } +func stack4096() { var buf [4096]byte; use(buf[:]); C.callGoStackCheck() } +func stack4100() { var buf [4100]byte; use(buf[:]); C.callGoStackCheck() } +func stack4104() { var buf [4104]byte; use(buf[:]); C.callGoStackCheck() } +func stack4108() { var buf [4108]byte; use(buf[:]); C.callGoStackCheck() } +func stack4112() { var buf [4112]byte; use(buf[:]); C.callGoStackCheck() } +func stack4116() { var buf [4116]byte; use(buf[:]); C.callGoStackCheck() } +func stack4120() { var buf [4120]byte; use(buf[:]); C.callGoStackCheck() } +func stack4124() { var buf [4124]byte; use(buf[:]); C.callGoStackCheck() } +func stack4128() { var buf [4128]byte; use(buf[:]); C.callGoStackCheck() } +func stack4132() { var buf [4132]byte; use(buf[:]); C.callGoStackCheck() } +func stack4136() { var buf [4136]byte; use(buf[:]); C.callGoStackCheck() } +func stack4140() { var buf [4140]byte; use(buf[:]); C.callGoStackCheck() } +func stack4144() { var buf [4144]byte; use(buf[:]); C.callGoStackCheck() } +func stack4148() { var buf [4148]byte; use(buf[:]); C.callGoStackCheck() } +func stack4152() { var buf [4152]byte; use(buf[:]); C.callGoStackCheck() } +func stack4156() { var buf [4156]byte; use(buf[:]); C.callGoStackCheck() } +func stack4160() { var buf [4160]byte; use(buf[:]); C.callGoStackCheck() } +func stack4164() { var buf [4164]byte; use(buf[:]); C.callGoStackCheck() } +func stack4168() { var buf [4168]byte; use(buf[:]); C.callGoStackCheck() } +func stack4172() { var buf [4172]byte; use(buf[:]); C.callGoStackCheck() } +func stack4176() { var buf [4176]byte; use(buf[:]); C.callGoStackCheck() } +func stack4180() { var buf [4180]byte; use(buf[:]); C.callGoStackCheck() } +func stack4184() { var buf [4184]byte; use(buf[:]); C.callGoStackCheck() } +func stack4188() { var buf [4188]byte; use(buf[:]); C.callGoStackCheck() } +func stack4192() { var buf [4192]byte; use(buf[:]); C.callGoStackCheck() } +func stack4196() { var buf [4196]byte; use(buf[:]); C.callGoStackCheck() } +func stack4200() { var buf [4200]byte; use(buf[:]); C.callGoStackCheck() } +func stack4204() { var buf [4204]byte; use(buf[:]); C.callGoStackCheck() } +func stack4208() { var buf [4208]byte; use(buf[:]); C.callGoStackCheck() } +func stack4212() { var buf [4212]byte; use(buf[:]); C.callGoStackCheck() } +func stack4216() { var buf [4216]byte; use(buf[:]); C.callGoStackCheck() } +func stack4220() { var buf [4220]byte; use(buf[:]); C.callGoStackCheck() } +func stack4224() { var buf [4224]byte; use(buf[:]); C.callGoStackCheck() } +func stack4228() { var buf [4228]byte; use(buf[:]); C.callGoStackCheck() } +func stack4232() { var buf [4232]byte; use(buf[:]); C.callGoStackCheck() } +func stack4236() { var buf [4236]byte; use(buf[:]); C.callGoStackCheck() } +func stack4240() { var buf [4240]byte; use(buf[:]); C.callGoStackCheck() } +func stack4244() { var buf [4244]byte; use(buf[:]); C.callGoStackCheck() } +func stack4248() { var buf [4248]byte; use(buf[:]); C.callGoStackCheck() } +func stack4252() { var buf [4252]byte; use(buf[:]); C.callGoStackCheck() } +func stack4256() { var buf [4256]byte; use(buf[:]); C.callGoStackCheck() } +func stack4260() { var buf [4260]byte; use(buf[:]); C.callGoStackCheck() } +func stack4264() { var buf [4264]byte; use(buf[:]); C.callGoStackCheck() } +func stack4268() { var buf [4268]byte; use(buf[:]); C.callGoStackCheck() } +func stack4272() { var buf [4272]byte; use(buf[:]); C.callGoStackCheck() } +func stack4276() { var buf [4276]byte; use(buf[:]); C.callGoStackCheck() } +func stack4280() { var buf [4280]byte; use(buf[:]); C.callGoStackCheck() } +func stack4284() { var buf [4284]byte; use(buf[:]); C.callGoStackCheck() } +func stack4288() { var buf [4288]byte; use(buf[:]); C.callGoStackCheck() } +func stack4292() { var buf [4292]byte; use(buf[:]); C.callGoStackCheck() } +func stack4296() { var buf [4296]byte; use(buf[:]); C.callGoStackCheck() } +func stack4300() { var buf [4300]byte; use(buf[:]); C.callGoStackCheck() } +func stack4304() { var buf [4304]byte; use(buf[:]); C.callGoStackCheck() } +func stack4308() { var buf [4308]byte; use(buf[:]); C.callGoStackCheck() } +func stack4312() { var buf [4312]byte; use(buf[:]); C.callGoStackCheck() } +func stack4316() { var buf [4316]byte; use(buf[:]); C.callGoStackCheck() } +func stack4320() { var buf [4320]byte; use(buf[:]); C.callGoStackCheck() } +func stack4324() { var buf [4324]byte; use(buf[:]); C.callGoStackCheck() } +func stack4328() { var buf [4328]byte; use(buf[:]); C.callGoStackCheck() } +func stack4332() { var buf [4332]byte; use(buf[:]); C.callGoStackCheck() } +func stack4336() { var buf [4336]byte; use(buf[:]); C.callGoStackCheck() } +func stack4340() { var buf [4340]byte; use(buf[:]); C.callGoStackCheck() } +func stack4344() { var buf [4344]byte; use(buf[:]); C.callGoStackCheck() } +func stack4348() { var buf [4348]byte; use(buf[:]); C.callGoStackCheck() } +func stack4352() { var buf [4352]byte; use(buf[:]); C.callGoStackCheck() } +func stack4356() { var buf [4356]byte; use(buf[:]); C.callGoStackCheck() } +func stack4360() { var buf [4360]byte; use(buf[:]); C.callGoStackCheck() } +func stack4364() { var buf [4364]byte; use(buf[:]); C.callGoStackCheck() } +func stack4368() { var buf [4368]byte; use(buf[:]); C.callGoStackCheck() } +func stack4372() { var buf [4372]byte; use(buf[:]); C.callGoStackCheck() } +func stack4376() { var buf [4376]byte; use(buf[:]); C.callGoStackCheck() } +func stack4380() { var buf [4380]byte; use(buf[:]); C.callGoStackCheck() } +func stack4384() { var buf [4384]byte; use(buf[:]); C.callGoStackCheck() } +func stack4388() { var buf [4388]byte; use(buf[:]); C.callGoStackCheck() } +func stack4392() { var buf [4392]byte; use(buf[:]); C.callGoStackCheck() } +func stack4396() { var buf [4396]byte; use(buf[:]); C.callGoStackCheck() } +func stack4400() { var buf [4400]byte; use(buf[:]); C.callGoStackCheck() } +func stack4404() { var buf [4404]byte; use(buf[:]); C.callGoStackCheck() } +func stack4408() { var buf [4408]byte; use(buf[:]); C.callGoStackCheck() } +func stack4412() { var buf [4412]byte; use(buf[:]); C.callGoStackCheck() } +func stack4416() { var buf [4416]byte; use(buf[:]); C.callGoStackCheck() } +func stack4420() { var buf [4420]byte; use(buf[:]); C.callGoStackCheck() } +func stack4424() { var buf [4424]byte; use(buf[:]); C.callGoStackCheck() } +func stack4428() { var buf [4428]byte; use(buf[:]); C.callGoStackCheck() } +func stack4432() { var buf [4432]byte; use(buf[:]); C.callGoStackCheck() } +func stack4436() { var buf [4436]byte; use(buf[:]); C.callGoStackCheck() } +func stack4440() { var buf [4440]byte; use(buf[:]); C.callGoStackCheck() } +func stack4444() { var buf [4444]byte; use(buf[:]); C.callGoStackCheck() } +func stack4448() { var buf [4448]byte; use(buf[:]); C.callGoStackCheck() } +func stack4452() { var buf [4452]byte; use(buf[:]); C.callGoStackCheck() } +func stack4456() { var buf [4456]byte; use(buf[:]); C.callGoStackCheck() } +func stack4460() { var buf [4460]byte; use(buf[:]); C.callGoStackCheck() } +func stack4464() { var buf [4464]byte; use(buf[:]); C.callGoStackCheck() } +func stack4468() { var buf [4468]byte; use(buf[:]); C.callGoStackCheck() } +func stack4472() { var buf [4472]byte; use(buf[:]); C.callGoStackCheck() } +func stack4476() { var buf [4476]byte; use(buf[:]); C.callGoStackCheck() } +func stack4480() { var buf [4480]byte; use(buf[:]); C.callGoStackCheck() } +func stack4484() { var buf [4484]byte; use(buf[:]); C.callGoStackCheck() } +func stack4488() { var buf [4488]byte; use(buf[:]); C.callGoStackCheck() } +func stack4492() { var buf [4492]byte; use(buf[:]); C.callGoStackCheck() } +func stack4496() { var buf [4496]byte; use(buf[:]); C.callGoStackCheck() } +func stack4500() { var buf [4500]byte; use(buf[:]); C.callGoStackCheck() } +func stack4504() { var buf [4504]byte; use(buf[:]); C.callGoStackCheck() } +func stack4508() { var buf [4508]byte; use(buf[:]); C.callGoStackCheck() } +func stack4512() { var buf [4512]byte; use(buf[:]); C.callGoStackCheck() } +func stack4516() { var buf [4516]byte; use(buf[:]); C.callGoStackCheck() } +func stack4520() { var buf [4520]byte; use(buf[:]); C.callGoStackCheck() } +func stack4524() { var buf [4524]byte; use(buf[:]); C.callGoStackCheck() } +func stack4528() { var buf [4528]byte; use(buf[:]); C.callGoStackCheck() } +func stack4532() { var buf [4532]byte; use(buf[:]); C.callGoStackCheck() } +func stack4536() { var buf [4536]byte; use(buf[:]); C.callGoStackCheck() } +func stack4540() { var buf [4540]byte; use(buf[:]); C.callGoStackCheck() } +func stack4544() { var buf [4544]byte; use(buf[:]); C.callGoStackCheck() } +func stack4548() { var buf [4548]byte; use(buf[:]); C.callGoStackCheck() } +func stack4552() { var buf [4552]byte; use(buf[:]); C.callGoStackCheck() } +func stack4556() { var buf [4556]byte; use(buf[:]); C.callGoStackCheck() } +func stack4560() { var buf [4560]byte; use(buf[:]); C.callGoStackCheck() } +func stack4564() { var buf [4564]byte; use(buf[:]); C.callGoStackCheck() } +func stack4568() { var buf [4568]byte; use(buf[:]); C.callGoStackCheck() } +func stack4572() { var buf [4572]byte; use(buf[:]); C.callGoStackCheck() } +func stack4576() { var buf [4576]byte; use(buf[:]); C.callGoStackCheck() } +func stack4580() { var buf [4580]byte; use(buf[:]); C.callGoStackCheck() } +func stack4584() { var buf [4584]byte; use(buf[:]); C.callGoStackCheck() } +func stack4588() { var buf [4588]byte; use(buf[:]); C.callGoStackCheck() } +func stack4592() { var buf [4592]byte; use(buf[:]); C.callGoStackCheck() } +func stack4596() { var buf [4596]byte; use(buf[:]); C.callGoStackCheck() } +func stack4600() { var buf [4600]byte; use(buf[:]); C.callGoStackCheck() } +func stack4604() { var buf [4604]byte; use(buf[:]); C.callGoStackCheck() } +func stack4608() { var buf [4608]byte; use(buf[:]); C.callGoStackCheck() } +func stack4612() { var buf [4612]byte; use(buf[:]); C.callGoStackCheck() } +func stack4616() { var buf [4616]byte; use(buf[:]); C.callGoStackCheck() } +func stack4620() { var buf [4620]byte; use(buf[:]); C.callGoStackCheck() } +func stack4624() { var buf [4624]byte; use(buf[:]); C.callGoStackCheck() } +func stack4628() { var buf [4628]byte; use(buf[:]); C.callGoStackCheck() } +func stack4632() { var buf [4632]byte; use(buf[:]); C.callGoStackCheck() } +func stack4636() { var buf [4636]byte; use(buf[:]); C.callGoStackCheck() } +func stack4640() { var buf [4640]byte; use(buf[:]); C.callGoStackCheck() } +func stack4644() { var buf [4644]byte; use(buf[:]); C.callGoStackCheck() } +func stack4648() { var buf [4648]byte; use(buf[:]); C.callGoStackCheck() } +func stack4652() { var buf [4652]byte; use(buf[:]); C.callGoStackCheck() } +func stack4656() { var buf [4656]byte; use(buf[:]); C.callGoStackCheck() } +func stack4660() { var buf [4660]byte; use(buf[:]); C.callGoStackCheck() } +func stack4664() { var buf [4664]byte; use(buf[:]); C.callGoStackCheck() } +func stack4668() { var buf [4668]byte; use(buf[:]); C.callGoStackCheck() } +func stack4672() { var buf [4672]byte; use(buf[:]); C.callGoStackCheck() } +func stack4676() { var buf [4676]byte; use(buf[:]); C.callGoStackCheck() } +func stack4680() { var buf [4680]byte; use(buf[:]); C.callGoStackCheck() } +func stack4684() { var buf [4684]byte; use(buf[:]); C.callGoStackCheck() } +func stack4688() { var buf [4688]byte; use(buf[:]); C.callGoStackCheck() } +func stack4692() { var buf [4692]byte; use(buf[:]); C.callGoStackCheck() } +func stack4696() { var buf [4696]byte; use(buf[:]); C.callGoStackCheck() } +func stack4700() { var buf [4700]byte; use(buf[:]); C.callGoStackCheck() } +func stack4704() { var buf [4704]byte; use(buf[:]); C.callGoStackCheck() } +func stack4708() { var buf [4708]byte; use(buf[:]); C.callGoStackCheck() } +func stack4712() { var buf [4712]byte; use(buf[:]); C.callGoStackCheck() } +func stack4716() { var buf [4716]byte; use(buf[:]); C.callGoStackCheck() } +func stack4720() { var buf [4720]byte; use(buf[:]); C.callGoStackCheck() } +func stack4724() { var buf [4724]byte; use(buf[:]); C.callGoStackCheck() } +func stack4728() { var buf [4728]byte; use(buf[:]); C.callGoStackCheck() } +func stack4732() { var buf [4732]byte; use(buf[:]); C.callGoStackCheck() } +func stack4736() { var buf [4736]byte; use(buf[:]); C.callGoStackCheck() } +func stack4740() { var buf [4740]byte; use(buf[:]); C.callGoStackCheck() } +func stack4744() { var buf [4744]byte; use(buf[:]); C.callGoStackCheck() } +func stack4748() { var buf [4748]byte; use(buf[:]); C.callGoStackCheck() } +func stack4752() { var buf [4752]byte; use(buf[:]); C.callGoStackCheck() } +func stack4756() { var buf [4756]byte; use(buf[:]); C.callGoStackCheck() } +func stack4760() { var buf [4760]byte; use(buf[:]); C.callGoStackCheck() } +func stack4764() { var buf [4764]byte; use(buf[:]); C.callGoStackCheck() } +func stack4768() { var buf [4768]byte; use(buf[:]); C.callGoStackCheck() } +func stack4772() { var buf [4772]byte; use(buf[:]); C.callGoStackCheck() } +func stack4776() { var buf [4776]byte; use(buf[:]); C.callGoStackCheck() } +func stack4780() { var buf [4780]byte; use(buf[:]); C.callGoStackCheck() } +func stack4784() { var buf [4784]byte; use(buf[:]); C.callGoStackCheck() } +func stack4788() { var buf [4788]byte; use(buf[:]); C.callGoStackCheck() } +func stack4792() { var buf [4792]byte; use(buf[:]); C.callGoStackCheck() } +func stack4796() { var buf [4796]byte; use(buf[:]); C.callGoStackCheck() } +func stack4800() { var buf [4800]byte; use(buf[:]); C.callGoStackCheck() } +func stack4804() { var buf [4804]byte; use(buf[:]); C.callGoStackCheck() } +func stack4808() { var buf [4808]byte; use(buf[:]); C.callGoStackCheck() } +func stack4812() { var buf [4812]byte; use(buf[:]); C.callGoStackCheck() } +func stack4816() { var buf [4816]byte; use(buf[:]); C.callGoStackCheck() } +func stack4820() { var buf [4820]byte; use(buf[:]); C.callGoStackCheck() } +func stack4824() { var buf [4824]byte; use(buf[:]); C.callGoStackCheck() } +func stack4828() { var buf [4828]byte; use(buf[:]); C.callGoStackCheck() } +func stack4832() { var buf [4832]byte; use(buf[:]); C.callGoStackCheck() } +func stack4836() { var buf [4836]byte; use(buf[:]); C.callGoStackCheck() } +func stack4840() { var buf [4840]byte; use(buf[:]); C.callGoStackCheck() } +func stack4844() { var buf [4844]byte; use(buf[:]); C.callGoStackCheck() } +func stack4848() { var buf [4848]byte; use(buf[:]); C.callGoStackCheck() } +func stack4852() { var buf [4852]byte; use(buf[:]); C.callGoStackCheck() } +func stack4856() { var buf [4856]byte; use(buf[:]); C.callGoStackCheck() } +func stack4860() { var buf [4860]byte; use(buf[:]); C.callGoStackCheck() } +func stack4864() { var buf [4864]byte; use(buf[:]); C.callGoStackCheck() } +func stack4868() { var buf [4868]byte; use(buf[:]); C.callGoStackCheck() } +func stack4872() { var buf [4872]byte; use(buf[:]); C.callGoStackCheck() } +func stack4876() { var buf [4876]byte; use(buf[:]); C.callGoStackCheck() } +func stack4880() { var buf [4880]byte; use(buf[:]); C.callGoStackCheck() } +func stack4884() { var buf [4884]byte; use(buf[:]); C.callGoStackCheck() } +func stack4888() { var buf [4888]byte; use(buf[:]); C.callGoStackCheck() } +func stack4892() { var buf [4892]byte; use(buf[:]); C.callGoStackCheck() } +func stack4896() { var buf [4896]byte; use(buf[:]); C.callGoStackCheck() } +func stack4900() { var buf [4900]byte; use(buf[:]); C.callGoStackCheck() } +func stack4904() { var buf [4904]byte; use(buf[:]); C.callGoStackCheck() } +func stack4908() { var buf [4908]byte; use(buf[:]); C.callGoStackCheck() } +func stack4912() { var buf [4912]byte; use(buf[:]); C.callGoStackCheck() } +func stack4916() { var buf [4916]byte; use(buf[:]); C.callGoStackCheck() } +func stack4920() { var buf [4920]byte; use(buf[:]); C.callGoStackCheck() } +func stack4924() { var buf [4924]byte; use(buf[:]); C.callGoStackCheck() } +func stack4928() { var buf [4928]byte; use(buf[:]); C.callGoStackCheck() } +func stack4932() { var buf [4932]byte; use(buf[:]); C.callGoStackCheck() } +func stack4936() { var buf [4936]byte; use(buf[:]); C.callGoStackCheck() } +func stack4940() { var buf [4940]byte; use(buf[:]); C.callGoStackCheck() } +func stack4944() { var buf [4944]byte; use(buf[:]); C.callGoStackCheck() } +func stack4948() { var buf [4948]byte; use(buf[:]); C.callGoStackCheck() } +func stack4952() { var buf [4952]byte; use(buf[:]); C.callGoStackCheck() } +func stack4956() { var buf [4956]byte; use(buf[:]); C.callGoStackCheck() } +func stack4960() { var buf [4960]byte; use(buf[:]); C.callGoStackCheck() } +func stack4964() { var buf [4964]byte; use(buf[:]); C.callGoStackCheck() } +func stack4968() { var buf [4968]byte; use(buf[:]); C.callGoStackCheck() } +func stack4972() { var buf [4972]byte; use(buf[:]); C.callGoStackCheck() } +func stack4976() { var buf [4976]byte; use(buf[:]); C.callGoStackCheck() } +func stack4980() { var buf [4980]byte; use(buf[:]); C.callGoStackCheck() } +func stack4984() { var buf [4984]byte; use(buf[:]); C.callGoStackCheck() } +func stack4988() { var buf [4988]byte; use(buf[:]); C.callGoStackCheck() } +func stack4992() { var buf [4992]byte; use(buf[:]); C.callGoStackCheck() } +func stack4996() { var buf [4996]byte; use(buf[:]); C.callGoStackCheck() } +func stack5000() { var buf [5000]byte; use(buf[:]); C.callGoStackCheck() } diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/callback_c.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/callback_c.c new file mode 100644 index 0000000000000000000000000000000000000000..8ecf70f2729c6ffe3383d16ca2bc95451499cf67 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/callback_c.c @@ -0,0 +1,67 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include + +#include "_cgo_export.h" + +void +callback(void *f) +{ + // use some stack space + volatile char data[64*1024]; + + data[0] = 0; + goCallback(f); + data[sizeof(data)-1] = 0; +} + +void +callGoFoo(void) +{ + extern void goFoo(void); + goFoo(); +} + +void +IntoC(void) +{ + BackIntoGo(); +} + +void +Issue1560InC(void) +{ + Issue1560FromC(); +} + +void +callGoStackCheck(void) +{ + extern void goStackCheck(void); + goStackCheck(); +} + +int +returnAfterGrow(void) +{ + extern int goReturnVal(void); + goReturnVal(); + return 123456; +} + +int +returnAfterGrowFromGo(void) +{ + extern int goReturnVal(void); + return goReturnVal(); +} + +void +callGoWithString(void) +{ + extern void goWithString(GoString); + const char *str = "string passed from C to Go"; + goWithString((GoString){str, strlen(str)}); +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/callback_c_gc.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/callback_c_gc.c new file mode 100644 index 0000000000000000000000000000000000000000..c6666c2b4e58ecae6b1d7f96ee1f4b1b0617f21a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/callback_c_gc.c @@ -0,0 +1,25 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +#include "_cgo_export.h" +#include +#include +#include + +/* Test calling panic from C. This is what SWIG does. */ + +extern void crosscall2(void (*fn)(void *, int), void *, int); +extern void _cgo_panic(void *, int); +extern void _cgo_allocate(void *, int); + +void +callPanic(void) +{ + struct { const char *p; } a; + a.p = "panic from C"; + crosscall2(_cgo_panic, &a, sizeof a); + *(int*)1 = 1; +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/callback_c_gccgo.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/callback_c_gccgo.c new file mode 100644 index 0000000000000000000000000000000000000000..91d37f02d8f3b2ecfa370ebd992ccb7590f541d7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/callback_c_gccgo.c @@ -0,0 +1,21 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gccgo + +#include "_cgo_export.h" +#include +#include +#include + +/* Test calling panic from C. This is what SWIG does. */ + +extern void _cgo_panic(const char *); +extern void *_cgo_allocate(size_t); + +void +callPanic(void) +{ + _cgo_panic("panic from C"); +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/callback_windows.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/callback_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..77bdfa4dd371acb08d9bed408e917a1bd76f077e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/callback_windows.go @@ -0,0 +1,109 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgotest + +/* +#include +USHORT backtrace(ULONG FramesToCapture, PVOID *BackTrace) { +#ifdef _AMD64_ + CONTEXT context; + RtlCaptureContext(&context); + ULONG64 ControlPc; + ControlPc = context.Rip; + int i; + for (i = 0; i < FramesToCapture; i++) { + PRUNTIME_FUNCTION FunctionEntry; + ULONG64 ImageBase; + VOID *HandlerData; + ULONG64 EstablisherFrame; + + FunctionEntry = RtlLookupFunctionEntry(ControlPc, &ImageBase, NULL); + + if (!FunctionEntry) { + // For simplicity, don't unwind leaf entries, which are not used in this test. + break; + } else { + RtlVirtualUnwind(0, ImageBase, ControlPc, FunctionEntry, &context, &HandlerData, &EstablisherFrame, NULL); + } + + ControlPc = context.Rip; + // Check if we left the user range. + if (ControlPc < 0x10000) { + break; + } + + BackTrace[i] = (PVOID)(ControlPc); + } + return i; +#else + return 0; +#endif +} +*/ +import "C" + +import ( + "internal/testenv" + "reflect" + "runtime" + "strings" + "testing" + "unsafe" +) + +// Test that the stack can be unwound through a call out and call back +// into Go. +func testCallbackCallersSEH(t *testing.T) { + testenv.SkipIfOptimizationOff(t) // This test requires inlining. + if runtime.Compiler != "gc" { + // The exact function names are not going to be the same. + t.Skip("skipping for non-gc toolchain") + } + if runtime.GOARCH != "amd64" { + // TODO: support SEH on other architectures. + t.Skip("skipping on non-amd64") + } + // Only frames in the test package are checked. + want := []string{ + "test._Cfunc_backtrace", + "test.testCallbackCallersSEH.func1.1", + "test.testCallbackCallersSEH.func1", + "test.goCallback", + "test._Cfunc_callback", + "test.nestedCall.func1", + "test.nestedCall", + "test.testCallbackCallersSEH", + "test.TestCallbackCallersSEH", + } + pc := make([]uintptr, 100) + n := 0 + nestedCall(func() { + n = int(C.backtrace(C.DWORD(len(pc)), (*C.PVOID)(unsafe.Pointer(&pc[0])))) + }) + got := make([]string, 0, n) + for i := 0; i < n; i++ { + f := runtime.FuncForPC(pc[i] - 1) + if f == nil { + continue + } + fname := f.Name() + switch fname { + case "goCallback": + // TODO(qmuntal): investigate why this function doesn't appear + // when using the external linker. + continue + } + // In module mode, this package has a fully-qualified import path. + // Remove it if present. + fname = strings.TrimPrefix(fname, "cmd/cgo/internal/") + if !strings.HasPrefix(fname, "test.") { + continue + } + got = append(got, fname) + } + if !reflect.DeepEqual(want, got) { + t.Errorf("incorrect backtrace:\nwant:\t%v\ngot:\t%v", want, got) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/callstub_linux_ppc64le.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/callstub_linux_ppc64le.go new file mode 100644 index 0000000000000000000000000000000000000000..93c29e1c7cdd97778cc936c8578a1da56e06960c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/callstub_linux_ppc64le.go @@ -0,0 +1,20 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgotest + +// extern int notoc_func(void); +// int TestPPC64Stubs(void) { +// return notoc_func(); +// } +import "C" +import "testing" + +func testPPC64CallStubs(t *testing.T) { + // Verify the trampolines run on the testing machine. If they + // do not, or are missing, a crash is expected. + if C.TestPPC64Stubs() != 0 { + t.Skipf("This test requires binutils 2.35 or newer.") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/cgo_linux_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/cgo_linux_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3defc32ffd8562bf8d0a76c7638e76daf314d900 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/cgo_linux_test.go @@ -0,0 +1,45 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cgo + +package cgotest + +import ( + "os" + "runtime" + "testing" +) + +func TestSetgid(t *testing.T) { + if runtime.GOOS == "android" { + t.Skip("unsupported on Android") + } + if _, err := os.Stat("/etc/alpine-release"); err == nil { + t.Skip("setgid is broken with musl libc - go.dev/issue/39857") + } + testSetgid(t) +} + +func TestSetgidStress(t *testing.T) { + if runtime.GOOS == "android" { + t.Skip("unsupported on Android") + } + if _, err := os.Stat("/etc/alpine-release"); err == nil { + t.Skip("setgid is broken with musl libc - go.dev/issue/39857") + } + testSetgidStress(t) +} + +func Test1435(t *testing.T) { test1435(t) } +func Test6997(t *testing.T) { test6997(t) } + +func Test9400(t *testing.T) { + if _, err := os.Stat("/etc/alpine-release"); err == nil { + t.Skip("setgid is broken with musl libc - go.dev/issue/39857") + } + test9400(t) +} + +func TestBuildID(t *testing.T) { testBuildID(t) } diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/cgo_stubs_android_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/cgo_stubs_android_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a1c2482ab8def9123c0138cfe0eb07492c6afc66 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/cgo_stubs_android_test.go @@ -0,0 +1,12 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgotest + +import "testing" + +// Stubs for tests that fails to build on Android +func test6997(t *testing.T) {} +func test8694(t *testing.T) {} +func testSigaltstack(t *testing.T) {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/cgo_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/cgo_test.go new file mode 100644 index 0000000000000000000000000000000000000000..5e02888b3dddd9cb185998ed61ca6ea2c28b445c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/cgo_test.go @@ -0,0 +1,112 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cgo + +package cgotest + +import "testing" + +// The actual test functions are in non-_test.go files +// so that they can use cgo (import "C"). +// These wrappers are here for gotest to find. + +func Test1328(t *testing.T) { test1328(t) } +func Test1560(t *testing.T) { test1560(t) } +func Test1635(t *testing.T) { test1635(t) } +func Test3250(t *testing.T) { test3250(t) } +func Test3729(t *testing.T) { test3729(t) } +func Test3775(t *testing.T) { test3775(t) } +func Test4029(t *testing.T) { test4029(t) } +func Test4339(t *testing.T) { test4339(t) } +func Test5227(t *testing.T) { test5227(t) } +func Test5242(t *testing.T) { test5242(t) } +func Test5337(t *testing.T) { test5337(t) } +func Test5548(t *testing.T) { test5548(t) } +func Test5603(t *testing.T) { test5603(t) } +func Test5986(t *testing.T) { test5986(t) } +func Test6390(t *testing.T) { test6390(t) } +func Test6833(t *testing.T) { test6833(t) } +func Test6907(t *testing.T) { test6907(t) } +func Test6907Go(t *testing.T) { test6907Go(t) } +func Test7560(t *testing.T) { test7560(t) } +func Test7665(t *testing.T) { test7665(t) } +func Test7978(t *testing.T) { test7978(t) } +func Test8092(t *testing.T) { test8092(t) } +func Test8517(t *testing.T) { test8517(t) } +func Test8694(t *testing.T) { test8694(t) } +func Test8756(t *testing.T) { test8756(t) } +func Test8811(t *testing.T) { test8811(t) } +func Test9026(t *testing.T) { test9026(t) } +func Test9510(t *testing.T) { test9510(t) } +func Test9557(t *testing.T) { test9557(t) } +func Test10303(t *testing.T) { test10303(t, 10) } +func Test11925(t *testing.T) { test11925(t) } +func Test12030(t *testing.T) { test12030(t) } +func Test14838(t *testing.T) { test14838(t) } +func Test17065(t *testing.T) { test17065(t) } +func Test17537(t *testing.T) { test17537(t) } +func Test18126(t *testing.T) { test18126(t) } +func Test18720(t *testing.T) { test18720(t) } +func Test20129(t *testing.T) { test20129(t) } +func Test20266(t *testing.T) { test20266(t) } +func Test20369(t *testing.T) { test20369(t) } +func Test20910(t *testing.T) { test20910(t) } +func Test21708(t *testing.T) { test21708(t) } +func Test21809(t *testing.T) { test21809(t) } +func Test21897(t *testing.T) { test21897(t) } +func Test22906(t *testing.T) { test22906(t) } +func Test23356(t *testing.T) { test23356(t) } +func Test24206(t *testing.T) { test24206(t) } +func Test25143(t *testing.T) { test25143(t) } +func Test26066(t *testing.T) { test26066(t) } +func Test26213(t *testing.T) { test26213(t) } +func Test27660(t *testing.T) { test27660(t) } +func Test28896(t *testing.T) { test28896(t) } +func Test30065(t *testing.T) { test30065(t) } +func Test32579(t *testing.T) { test32579(t) } +func Test31891(t *testing.T) { test31891(t) } +func Test42018(t *testing.T) { test42018(t) } +func Test45451(t *testing.T) { test45451(t) } +func Test49633(t *testing.T) { test49633(t) } +func TestAlign(t *testing.T) { testAlign(t) } +func TestAtol(t *testing.T) { testAtol(t) } +func TestBlocking(t *testing.T) { testBlocking(t) } +func TestBoolAlign(t *testing.T) { testBoolAlign(t) } +func TestCallGoWithString(t *testing.T) { testCallGoWithString(t) } +func TestCallback(t *testing.T) { testCallback(t) } +func TestCallbackCallers(t *testing.T) { testCallbackCallers(t) } +func TestCallbackGC(t *testing.T) { testCallbackGC(t) } +func TestCallbackPanic(t *testing.T) { testCallbackPanic(t) } +func TestCallbackPanicLocked(t *testing.T) { testCallbackPanicLocked(t) } +func TestCallbackPanicLoop(t *testing.T) { testCallbackPanicLoop(t) } +func TestCallbackStack(t *testing.T) { testCallbackStack(t) } +func TestCflags(t *testing.T) { testCflags(t) } +func TestCheckConst(t *testing.T) { testCheckConst(t) } +func TestConst(t *testing.T) { testConst(t) } +func TestCthread(t *testing.T) { testCthread(t) } +func TestEnum(t *testing.T) { testEnum(t) } +func TestNamedEnum(t *testing.T) { testNamedEnum(t) } +func TestCastToEnum(t *testing.T) { testCastToEnum(t) } +func TestErrno(t *testing.T) { testErrno(t) } +func TestFpVar(t *testing.T) { testFpVar(t) } +func TestGCC68255(t *testing.T) { testGCC68255(t) } +func TestHandle(t *testing.T) { testHandle(t) } +func TestHelpers(t *testing.T) { testHelpers(t) } +func TestLibgcc(t *testing.T) { testLibgcc(t) } +func TestMultipleAssign(t *testing.T) { testMultipleAssign(t) } +func TestNaming(t *testing.T) { testNaming(t) } +func TestPanicFromC(t *testing.T) { testPanicFromC(t) } +func TestPrintf(t *testing.T) { testPrintf(t) } +func TestReturnAfterGrow(t *testing.T) { testReturnAfterGrow(t) } +func TestReturnAfterGrowFromGo(t *testing.T) { testReturnAfterGrowFromGo(t) } +func TestSetEnv(t *testing.T) { testSetEnv(t) } +func TestThreadLock(t *testing.T) { testThreadLockFunc(t) } +func TestUnsignedInt(t *testing.T) { testUnsignedInt(t) } +func TestZeroArgCallback(t *testing.T) { testZeroArgCallback(t) } + +func BenchmarkCgoCall(b *testing.B) { benchCgoCall(b) } +func BenchmarkGoString(b *testing.B) { benchGoString(b) } +func BenchmarkCGoCallback(b *testing.B) { benchCallback(b) } +func BenchmarkCGoInCThread(b *testing.B) { benchCGoInCthread(b) } diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/cgo_thread_lock.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/cgo_thread_lock.go new file mode 100644 index 0000000000000000000000000000000000000000..e8749384af14ba05aa192b2b7d47810156b08ad4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/cgo_thread_lock.go @@ -0,0 +1,57 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux + +package cgotest + +/* +#include +#include +#include +void Gosched(void); +static bool Ctid(void) { + long tid1 = syscall(SYS_gettid); + Gosched(); + return tid1 == syscall(SYS_gettid); +} +*/ +import "C" + +import ( + "runtime" + "testing" + "time" +) + +//export Gosched +func Gosched() { + runtime.Gosched() +} + +func init() { + testThreadLockFunc = testThreadLock +} + +func testThreadLock(t *testing.T) { + stop := make(chan int) + go func() { + // We need the G continue running, + // so the M has a chance to run this G. + for { + select { + case <-stop: + return + case <-time.After(time.Millisecond * 100): + } + } + }() + defer close(stop) + + for i := 0; i < 1000; i++ { + if !C.Ctid() { + t.Fatalf("cgo has not locked OS thread") + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/cgo_unix_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/cgo_unix_test.go new file mode 100644 index 0000000000000000000000000000000000000000..5c1f9b7e4060f61d2a68024717daff61a6a9c739 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/cgo_unix_test.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cgo && !windows + +package cgotest + +import "testing" + +func TestSigaltstack(t *testing.T) { testSigaltstack(t) } +func TestSigprocmask(t *testing.T) { testSigprocmask(t) } +func Test18146(t *testing.T) { test18146(t) } diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/cthread_unix.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/cthread_unix.c new file mode 100644 index 0000000000000000000000000000000000000000..d0da643158e5f45776ec1296b8f76325942401e4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/cthread_unix.c @@ -0,0 +1,58 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris + +#include +#include "_cgo_export.h" + +static void* +addThread(void *p) +{ + int i, max; + + max = *(int*)p; + for(i=0; i MaxThread) + nthread = MaxThread; + for(i=0; i +#include +#include "_cgo_export.h" + +__stdcall +static unsigned int +addThread(void *p) +{ + int i, max; + + max = *(int*)p; + for(i=0; i MaxThread) + nthread = MaxThread; + for(i=0; i +// #include +// #include +// #include +// #include +// +// pthread_t *t = NULL; +// pthread_mutex_t mu; +// int nts = 0; +// int all_done = 0; +// +// static void *aFn(void *vargp) { +// int done = 0; +// while (!done) { +// usleep(100); +// pthread_mutex_lock(&mu); +// done = all_done; +// pthread_mutex_unlock(&mu); +// } +// return NULL; +// } +// +// void trial(int argc) { +// int i; +// nts = argc; +// t = calloc(nts, sizeof(pthread_t)); +// pthread_mutex_init(&mu, NULL); +// for (i = 0; i < nts; i++) { +// pthread_create(&t[i], NULL, aFn, NULL); +// } +// } +// +// void cleanup(void) { +// int i; +// pthread_mutex_lock(&mu); +// all_done = 1; +// pthread_mutex_unlock(&mu); +// for (i = 0; i < nts; i++) { +// pthread_join(t[i], NULL); +// } +// pthread_mutex_destroy(&mu); +// free(t); +// } +import "C" + +// compareStatus is used to confirm the contents of the thread +// specific status files match expectations. +func compareStatus(filter, expect string) error { + expected := filter + expect + pid := syscall.Getpid() + fs, err := os.ReadDir(fmt.Sprintf("/proc/%d/task", pid)) + if err != nil { + return fmt.Errorf("unable to find %d tasks: %v", pid, err) + } + expectedProc := fmt.Sprintf("Pid:\t%d", pid) + foundAThread := false + for _, f := range fs { + tf := fmt.Sprintf("/proc/%s/status", f.Name()) + d, err := os.ReadFile(tf) + if err != nil { + // There are a surprising number of ways this + // can error out on linux. We've seen all of + // the following, so treat any error here as + // equivalent to the "process is gone": + // os.IsNotExist(err), + // "... : no such process", + // "... : bad file descriptor. + continue + } + lines := strings.Split(string(d), "\n") + for _, line := range lines { + // Different kernel vintages pad differently. + line = strings.TrimSpace(line) + if strings.HasPrefix(line, "Pid:\t") { + // On loaded systems, it is possible + // for a TID to be reused really + // quickly. As such, we need to + // validate that the thread status + // info we just read is a task of the + // same process PID as we are + // currently running, and not a + // recently terminated thread + // resurfaced in a different process. + if line != expectedProc { + break + } + // Fall through in the unlikely case + // that filter at some point is + // "Pid:\t". + } + if strings.HasPrefix(line, filter) { + if line == expected { + foundAThread = true + break + } + if filter == "Groups:" && strings.HasPrefix(line, "Groups:\t") { + // https://github.com/golang/go/issues/46145 + // Containers don't reliably output this line in sorted order so manually sort and compare that. + a := strings.Split(line[8:], " ") + sort.Strings(a) + got := strings.Join(a, " ") + if got == expected[8:] { + foundAThread = true + break + } + + } + return fmt.Errorf("%q got:%q want:%q (bad) [pid=%d file:'%s' %v]\n", tf, line, expected, pid, string(d), expectedProc) + } + } + } + if !foundAThread { + return fmt.Errorf("found no thread /proc//status files for process %q", expectedProc) + } + return nil +} + +// test1435 test 9 glibc implemented setuid/gid syscall functions are +// mapped. This test is a slightly more expansive test than that of +// src/syscall/syscall_linux_test.go:TestSetuidEtc() insofar as it +// launches concurrent threads from C code via CGo and validates that +// they are subject to the system calls being tested. For the actual +// Go functionality being tested here, the syscall_linux_test version +// is considered authoritative, but non-trivial improvements to that +// should be mirrored here. +func test1435(t *testing.T) { + if syscall.Getuid() != 0 { + t.Skip("skipping root only test") + } + if testing.Short() && testenv.Builder() != "" && os.Getenv("USER") == "swarming" { + // The Go build system's swarming user is known not to be root. + // Unfortunately, it sometimes appears as root due the current + // implementation of a no-network check using 'unshare -n -r'. + // Since this test does need root to work, we need to skip it. + t.Skip("skipping root only test on a non-root builder") + } + if runtime.GOOS == "linux" { + if _, err := os.Stat("/etc/alpine-release"); err == nil { + t.Skip("skipping failing test on alpine - go.dev/issue/19938") + } + } + + // Launch some threads in C. + const cts = 5 + C.trial(cts) + defer C.cleanup() + + vs := []struct { + call string + fn func() error + filter, expect string + }{ + {call: "Setegid(1)", fn: func() error { return syscall.Setegid(1) }, filter: "Gid:", expect: "\t0\t1\t0\t1"}, + {call: "Setegid(0)", fn: func() error { return syscall.Setegid(0) }, filter: "Gid:", expect: "\t0\t0\t0\t0"}, + + {call: "Seteuid(1)", fn: func() error { return syscall.Seteuid(1) }, filter: "Uid:", expect: "\t0\t1\t0\t1"}, + {call: "Setuid(0)", fn: func() error { return syscall.Setuid(0) }, filter: "Uid:", expect: "\t0\t0\t0\t0"}, + + {call: "Setgid(1)", fn: func() error { return syscall.Setgid(1) }, filter: "Gid:", expect: "\t1\t1\t1\t1"}, + {call: "Setgid(0)", fn: func() error { return syscall.Setgid(0) }, filter: "Gid:", expect: "\t0\t0\t0\t0"}, + + {call: "Setgroups([]int{0,1,2,3})", fn: func() error { return syscall.Setgroups([]int{0, 1, 2, 3}) }, filter: "Groups:", expect: "\t0 1 2 3"}, + {call: "Setgroups(nil)", fn: func() error { return syscall.Setgroups(nil) }, filter: "Groups:", expect: ""}, + {call: "Setgroups([]int{0})", fn: func() error { return syscall.Setgroups([]int{0}) }, filter: "Groups:", expect: "\t0"}, + + {call: "Setregid(101,0)", fn: func() error { return syscall.Setregid(101, 0) }, filter: "Gid:", expect: "\t101\t0\t0\t0"}, + {call: "Setregid(0,102)", fn: func() error { return syscall.Setregid(0, 102) }, filter: "Gid:", expect: "\t0\t102\t102\t102"}, + {call: "Setregid(0,0)", fn: func() error { return syscall.Setregid(0, 0) }, filter: "Gid:", expect: "\t0\t0\t0\t0"}, + + {call: "Setreuid(1,0)", fn: func() error { return syscall.Setreuid(1, 0) }, filter: "Uid:", expect: "\t1\t0\t0\t0"}, + {call: "Setreuid(0,2)", fn: func() error { return syscall.Setreuid(0, 2) }, filter: "Uid:", expect: "\t0\t2\t2\t2"}, + {call: "Setreuid(0,0)", fn: func() error { return syscall.Setreuid(0, 0) }, filter: "Uid:", expect: "\t0\t0\t0\t0"}, + + {call: "Setresgid(101,0,102)", fn: func() error { return syscall.Setresgid(101, 0, 102) }, filter: "Gid:", expect: "\t101\t0\t102\t0"}, + {call: "Setresgid(0,102,101)", fn: func() error { return syscall.Setresgid(0, 102, 101) }, filter: "Gid:", expect: "\t0\t102\t101\t102"}, + {call: "Setresgid(0,0,0)", fn: func() error { return syscall.Setresgid(0, 0, 0) }, filter: "Gid:", expect: "\t0\t0\t0\t0"}, + + {call: "Setresuid(1,0,2)", fn: func() error { return syscall.Setresuid(1, 0, 2) }, filter: "Uid:", expect: "\t1\t0\t2\t0"}, + {call: "Setresuid(0,2,1)", fn: func() error { return syscall.Setresuid(0, 2, 1) }, filter: "Uid:", expect: "\t0\t2\t1\t2"}, + {call: "Setresuid(0,0,0)", fn: func() error { return syscall.Setresuid(0, 0, 0) }, filter: "Uid:", expect: "\t0\t0\t0\t0"}, + } + + for i, v := range vs { + if err := v.fn(); err != nil { + t.Errorf("[%d] %q failed: %v", i, v.call, err) + continue + } + if err := compareStatus(v.filter, v.expect); err != nil { + t.Errorf("[%d] %q comparison: %v", i, v.call, err) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue18146.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue18146.go new file mode 100644 index 0000000000000000000000000000000000000000..112b7ee2e7373212c0113e2510933db4dd160724 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue18146.go @@ -0,0 +1,128 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cgo && !windows + +// Issue 18146: pthread_create failure during syscall.Exec. + +package cgotest + +import ( + "bytes" + "crypto/md5" + "os" + "os/exec" + "runtime" + "syscall" + "testing" + "time" +) + +func test18146(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + + if runtime.GOOS == "darwin" || runtime.GOOS == "ios" { + t.Skipf("skipping flaky test on %s; see golang.org/issue/18202", runtime.GOOS) + } + + if runtime.GOARCH == "mips" || runtime.GOARCH == "mips64" { + t.Skipf("skipping on %s", runtime.GOARCH) + } + + attempts := 1000 + threads := 4 + + // Restrict the number of attempts based on RLIMIT_NPROC. + // Tediously, RLIMIT_NPROC was left out of the syscall package, + // probably because it is not in POSIX.1, so we define it here. + // It is not defined on Solaris. + var nproc int + setNproc := true + switch runtime.GOOS { + default: + setNproc = false + case "aix": + nproc = 9 + case "linux": + nproc = 6 + case "darwin", "dragonfly", "freebsd", "netbsd", "openbsd": + nproc = 7 + } + if setNproc { + var rlim syscall.Rlimit + if syscall.Getrlimit(nproc, &rlim) == nil { + max := int(rlim.Cur) / (threads + 5) + if attempts > max { + t.Logf("lowering attempts from %d to %d for RLIMIT_NPROC", attempts, max) + attempts = max + } + } + } + + if os.Getenv("test18146") == "exec" { + runtime.GOMAXPROCS(1) + for n := threads; n > 0; n-- { + go func() { + for { + _ = md5.Sum([]byte("Hello, !")) + } + }() + } + runtime.GOMAXPROCS(threads) + argv := append(os.Args, "-test.run=^$") + if err := syscall.Exec(os.Args[0], argv, os.Environ()); err != nil { + t.Fatal(err) + } + } + + var cmds []*exec.Cmd + defer func() { + for _, cmd := range cmds { + cmd.Process.Kill() + } + }() + + args := append(append([]string(nil), os.Args[1:]...), "-test.run=^Test18146$") + for n := attempts; n > 0; n-- { + cmd := exec.Command(os.Args[0], args...) + cmd.Env = append(os.Environ(), "test18146=exec") + buf := bytes.NewBuffer(nil) + cmd.Stdout = buf + cmd.Stderr = buf + if err := cmd.Start(); err != nil { + // We are starting so many processes that on + // some systems (problem seen on Darwin, + // Dragonfly, OpenBSD) the fork call will fail + // with EAGAIN. + if pe, ok := err.(*os.PathError); ok { + err = pe.Err + } + if se, ok := err.(syscall.Errno); ok && (se == syscall.EAGAIN || se == syscall.EMFILE) { + time.Sleep(time.Millisecond) + continue + } + + t.Error(err) + return + } + cmds = append(cmds, cmd) + } + + failures := 0 + for _, cmd := range cmds { + err := cmd.Wait() + if err == nil { + continue + } + + t.Errorf("syscall.Exec failed: %v\n%s", err, cmd.Stdout) + failures++ + } + + if failures > 0 { + t.Logf("Failed %v of %v attempts.", failures, len(cmds)) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue20266.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue20266.go new file mode 100644 index 0000000000000000000000000000000000000000..9f95086cc7bdbc6837ad40bf17faac44ea139cf5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue20266.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue 20266: use -I with a relative path. + +package cgotest + +/* +#cgo CFLAGS: -I issue20266 -Iissue20266 -Ddef20266 +#include "issue20266.h" +*/ +import "C" + +import "testing" + +func test20266(t *testing.T) { + if got, want := C.issue20266, 20266; got != want { + t.Errorf("got %d, want %d", got, want) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue20266/issue20266.h b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue20266/issue20266.h new file mode 100644 index 0000000000000000000000000000000000000000..8d3258ec6b874584c0c93296f362d92b78a2be67 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue20266/issue20266.h @@ -0,0 +1,9 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#define issue20266 20266 + +#ifndef def20266 +#error "expected def20266 to be defined" +#endif diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue20910.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue20910.c new file mode 100644 index 0000000000000000000000000000000000000000..e8d623fc9837e839df14d66342e4037449e0857a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue20910.c @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include +#include +#include +#include "_cgo_export.h" + +/* Test calling a Go function with multiple return values. */ + +void +callMulti(void) +{ + struct multi_return result = multi(); + assert(strcmp(result.r0, "multi") == 0); + assert(result.r1 == 0); + free(result.r0); +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue21897.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue21897.go new file mode 100644 index 0000000000000000000000000000000000000000..cd3600a0cf0f304089eb687354a4815eb4188aae --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue21897.go @@ -0,0 +1,56 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && cgo && !internal + +package cgotest + +/* +#cgo LDFLAGS: -framework CoreFoundation +#include +*/ +import "C" +import ( + "runtime/debug" + "testing" + "unsafe" +) + +func test21897(t *testing.T) { + // Please write barrier, kick in soon. + defer debug.SetGCPercent(debug.SetGCPercent(1)) + + for i := 0; i < 10000; i++ { + testCFNumberRef() + testCFDateRef() + testCFBooleanRef() + // Allocate some memory, so eventually the write barrier is enabled + // and it will see writes of bad pointers in the test* functions below. + byteSliceSink = make([]byte, 1024) + } +} + +var byteSliceSink []byte + +func testCFNumberRef() { + var v int64 = 0 + xCFNumberRef = C.CFNumberCreate(C.kCFAllocatorSystemDefault, C.kCFNumberSInt64Type, unsafe.Pointer(&v)) + //fmt.Printf("CFNumberRef: %x\n", uintptr(unsafe.Pointer(xCFNumberRef))) +} + +var xCFNumberRef C.CFNumberRef + +func testCFDateRef() { + xCFDateRef = C.CFDateCreate(C.kCFAllocatorSystemDefault, 0) // 0 value is 1 Jan 2001 00:00:00 GMT + //fmt.Printf("CFDateRef: %x\n", uintptr(unsafe.Pointer(xCFDateRef))) +} + +var xCFDateRef C.CFDateRef + +func testCFBooleanRef() { + xCFBooleanRef = C.kCFBooleanFalse + //fmt.Printf("CFBooleanRef: %x\n", uintptr(unsafe.Pointer(xCFBooleanRef))) +} + +var xCFBooleanRef C.CFBooleanRef diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue21897b.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue21897b.go new file mode 100644 index 0000000000000000000000000000000000000000..e12564c2164804f85317a96afa63403ef3213def --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue21897b.go @@ -0,0 +1,13 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !darwin || !cgo || internal + +package cgotest + +import "testing" + +func test21897(t *testing.T) { + t.Skip("test runs only on darwin+cgo") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue23555.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue23555.go new file mode 100644 index 0000000000000000000000000000000000000000..12321488497366cb7246ec9c5df77f51b166950b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue23555.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cgo + +// Test that we can have two identical cgo packages in a single binary. +// No runtime test; just make sure it compiles. + +package cgotest + +import ( + _ "cmd/cgo/internal/test/issue23555a" + _ "cmd/cgo/internal/test/issue23555b" +) diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue23555a/a.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue23555a/a.go new file mode 100644 index 0000000000000000000000000000000000000000..cb6626bb2b0120b0a2f6f130543f65ab22f68ec0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue23555a/a.go @@ -0,0 +1,12 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue23555 + +// #include +import "C" + +func X() { + C.free(C.malloc(10)) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue23555b/a.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue23555b/a.go new file mode 100644 index 0000000000000000000000000000000000000000..cb6626bb2b0120b0a2f6f130543f65ab22f68ec0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue23555b/a.go @@ -0,0 +1,12 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue23555 + +// #include +import "C" + +func X() { + C.free(C.malloc(10)) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue24161_darwin_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue24161_darwin_test.go new file mode 100644 index 0000000000000000000000000000000000000000..9d087519dfce18a17bb0384474de2a417f78f11f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue24161_darwin_test.go @@ -0,0 +1,33 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cgo + +package cgotest + +import ( + "testing" + + "cmd/cgo/internal/test/issue24161arg" + "cmd/cgo/internal/test/issue24161e0" + "cmd/cgo/internal/test/issue24161e1" + "cmd/cgo/internal/test/issue24161e2" + "cmd/cgo/internal/test/issue24161res" +) + +func Test24161Arg(t *testing.T) { + issue24161arg.Test(t) +} +func Test24161Res(t *testing.T) { + issue24161res.Test(t) +} +func Test24161Example0(t *testing.T) { + issue24161e0.Test(t) +} +func Test24161Example1(t *testing.T) { + issue24161e1.Test(t) +} +func Test24161Example2(t *testing.T) { + issue24161e2.Test(t) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue24161arg/def.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue24161arg/def.go new file mode 100644 index 0000000000000000000000000000000000000000..acea3aeb34e1d500d8255d1f2296992dd8e71bcd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue24161arg/def.go @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin + +package issue24161arg + +/* +#cgo LDFLAGS: -framework CoreFoundation +#include +*/ +import "C" + +func test24161array() C.CFArrayRef { + return C.CFArrayCreate(0, nil, 0, nil) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue24161arg/use.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue24161arg/use.go new file mode 100644 index 0000000000000000000000000000000000000000..7987105efa6b529044d18da3b007693c050d31c2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue24161arg/use.go @@ -0,0 +1,19 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin + +package issue24161arg + +/* +#cgo LDFLAGS: -framework CoreFoundation +#include +*/ +import "C" +import "testing" + +func Test(t *testing.T) { + a := test24161array() + C.CFArrayCreateCopy(0, a) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue24161e0/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue24161e0/main.go new file mode 100644 index 0000000000000000000000000000000000000000..5912fe27cc23b64212cd7a7d93735fd737d93172 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue24161e0/main.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin + +package issue24161e0 + +/* +#cgo CFLAGS: -x objective-c +#cgo LDFLAGS: -framework CoreFoundation -framework Security +#include +#include +#include +#if TARGET_OS_IPHONE == 0 && __MAC_OS_X_VERSION_MAX_ALLOWED < 101200 + typedef CFStringRef SecKeyAlgorithm; + static CFDataRef SecKeyCreateSignature(SecKeyRef key, SecKeyAlgorithm algorithm, CFDataRef dataToSign, CFErrorRef *error){return NULL;} + #define kSecKeyAlgorithmECDSASignatureDigestX962SHA1 foo() + static SecKeyAlgorithm foo(void){return NULL;} +#endif +*/ +import "C" +import "testing" + +func f1() { + C.SecKeyCreateSignature(0, C.kSecKeyAlgorithmECDSASignatureDigestX962SHA1, 0, nil) +} + +func Test(t *testing.T) {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue24161e1/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue24161e1/main.go new file mode 100644 index 0000000000000000000000000000000000000000..8c2bc6ec07adb0fc54ee1c206c61027e5f1f9a53 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue24161e1/main.go @@ -0,0 +1,38 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin + +package issue24161e1 + +/* +#cgo CFLAGS: -x objective-c +#cgo LDFLAGS: -framework CoreFoundation -framework Security +#include +#include +#include +#if TARGET_OS_IPHONE == 0 && __MAC_OS_X_VERSION_MAX_ALLOWED < 101200 + typedef CFStringRef SecKeyAlgorithm; + static CFDataRef SecKeyCreateSignature(SecKeyRef key, SecKeyAlgorithm algorithm, CFDataRef dataToSign, CFErrorRef *error){return NULL;} + #define kSecKeyAlgorithmECDSASignatureDigestX962SHA1 foo() + static SecKeyAlgorithm foo(void){return NULL;} +#endif +*/ +import "C" +import ( + "fmt" + "testing" +) + +func f1() { + C.SecKeyCreateSignature(0, C.kSecKeyAlgorithmECDSASignatureDigestX962SHA1, 0, nil) +} + +func f2(e C.CFErrorRef) { + if desc := C.CFErrorCopyDescription(e); desc != 0 { + fmt.Println(desc) + } +} + +func Test(t *testing.T) {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue24161e2/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue24161e2/main.go new file mode 100644 index 0000000000000000000000000000000000000000..159f4796fe6367e52f8a6a2d07c52008c36ff972 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue24161e2/main.go @@ -0,0 +1,40 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin + +package issue24161e2 + +/* +#cgo CFLAGS: -x objective-c +#cgo LDFLAGS: -framework CoreFoundation -framework Security +#include +#include +#include +#if TARGET_OS_IPHONE == 0 && __MAC_OS_X_VERSION_MAX_ALLOWED < 101200 + typedef CFStringRef SecKeyAlgorithm; + static CFDataRef SecKeyCreateSignature(SecKeyRef key, SecKeyAlgorithm algorithm, CFDataRef dataToSign, CFErrorRef *error){return NULL;} + #define kSecKeyAlgorithmECDSASignatureDigestX962SHA1 foo() + static SecKeyAlgorithm foo(void){return NULL;} +#endif +*/ +import "C" +import ( + "fmt" + "testing" +) + +var _ C.CFStringRef + +func f1() { + C.SecKeyCreateSignature(0, C.kSecKeyAlgorithmECDSASignatureDigestX962SHA1, 0, nil) +} + +func f2(e C.CFErrorRef) { + if desc := C.CFErrorCopyDescription(e); desc != 0 { + fmt.Println(desc) + } +} + +func Test(t *testing.T) {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue24161res/restype.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue24161res/restype.go new file mode 100644 index 0000000000000000000000000000000000000000..07cb98dbcfefccbaa325a47b8fa9de6bd6bd4528 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue24161res/restype.go @@ -0,0 +1,23 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin + +package issue24161res + +/* +#cgo LDFLAGS: -framework CoreFoundation +#include +*/ +import "C" +import ( + "reflect" + "testing" +) + +func Test(t *testing.T) { + if k := reflect.TypeOf(C.CFArrayCreate(0, nil, 0, nil)).Kind(); k != reflect.Uintptr { + t.Fatalf("bad kind %s\n", k) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue26213/jni.h b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue26213/jni.h new file mode 100644 index 0000000000000000000000000000000000000000..0c76979a5a0c4e3841692e453c6468dfd2dde5ca --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue26213/jni.h @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// It's going to be hard to include a whole real JVM to test this. +// So we'll simulate a really easy JVM using just the parts we need. + +// This is the relevant part of jni.h. + +// On Android NDK16, jobject is defined like this in C and C++ +typedef void* jobject; + +typedef jobject jclass; +typedef jobject jthrowable; +typedef jobject jstring; +typedef jobject jarray; +typedef jarray jbooleanArray; +typedef jarray jbyteArray; +typedef jarray jcharArray; +typedef jarray jshortArray; +typedef jarray jintArray; +typedef jarray jlongArray; +typedef jarray jfloatArray; +typedef jarray jdoubleArray; +typedef jarray jobjectArray; + +typedef jobject jweak; + +// Note: jvalue is already a non-pointer type due to it being a C union. diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue26213/test26213.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue26213/test26213.go new file mode 100644 index 0000000000000000000000000000000000000000..5d1f637ff96288bec83e7cfe2d892c840456838f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue26213/test26213.go @@ -0,0 +1,46 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue26213 + +/* +#include "jni.h" +*/ +import "C" +import ( + "testing" +) + +func Test26213(t *testing.T) { + var x1 C.jobject = 0 // Note: 0, not nil. That makes sure we use uintptr for these types. + _ = x1 + var x2 C.jclass = 0 + _ = x2 + var x3 C.jthrowable = 0 + _ = x3 + var x4 C.jstring = 0 + _ = x4 + var x5 C.jarray = 0 + _ = x5 + var x6 C.jbooleanArray = 0 + _ = x6 + var x7 C.jbyteArray = 0 + _ = x7 + var x8 C.jcharArray = 0 + _ = x8 + var x9 C.jshortArray = 0 + _ = x9 + var x10 C.jintArray = 0 + _ = x10 + var x11 C.jlongArray = 0 + _ = x11 + var x12 C.jfloatArray = 0 + _ = x12 + var x13 C.jdoubleArray = 0 + _ = x13 + var x14 C.jobjectArray = 0 + _ = x14 + var x15 C.jweak = 0 + _ = x15 +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue26430.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue26430.go new file mode 100644 index 0000000000000000000000000000000000000000..837a745cb42d0fc45b585dd974436b1b0fddd0cd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue26430.go @@ -0,0 +1,12 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cgo + +// Issue 26430: incomplete typedef leads to inconsistent typedefs error. +// No runtime test; just make sure it compiles. + +package cgotest + +import _ "cmd/cgo/internal/test/issue26430" diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue26430/a.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue26430/a.go new file mode 100644 index 0000000000000000000000000000000000000000..fbaa46b1e8d1f6ff7448dae2a694b72de992ce9f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue26430/a.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package a + +// typedef struct S ST; +// static ST* F() { return 0; } +import "C" + +func F1() { + C.F() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue26430/b.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue26430/b.go new file mode 100644 index 0000000000000000000000000000000000000000..a7c527cde3e28c415047c19373a74761b1e1f017 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue26430/b.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package a + +// typedef struct S ST; +// struct S { int f; }; +import "C" + +func F2(p *C.ST) { + p.f = 1 +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue26743.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue26743.go new file mode 100644 index 0000000000000000000000000000000000000000..b6e1ac58bc94168c25edf098f80cabde690a60d1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue26743.go @@ -0,0 +1,12 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cgo + +// Issue 26743: typedef of uint leads to inconsistent typedefs error. +// No runtime test; just make sure it compiles. + +package cgotest + +import _ "cmd/cgo/internal/test/issue26743" diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue26743/a.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue26743/a.go new file mode 100644 index 0000000000000000000000000000000000000000..a3df1797b3b5133d63ecfef7d50c53365d02290a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue26743/a.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue26743 + +// typedef unsigned int uint; +// int C1(uint x) { return x; } +import "C" + +var V1 = C.C1(0) diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue26743/b.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue26743/b.go new file mode 100644 index 0000000000000000000000000000000000000000..c5f1ae478ca080656b0a01ecb39965730ed06502 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue26743/b.go @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue26743 + +import "C" + +var V2 C.uint diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue27054/egl.h b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue27054/egl.h new file mode 100644 index 0000000000000000000000000000000000000000..30796273e80b96fa401ac5d6cf2f734813093db4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue27054/egl.h @@ -0,0 +1,8 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This is the relevant part of EGL/egl.h. + +typedef void *EGLDisplay; +typedef void *EGLConfig; diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue27054/test27054.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue27054/test27054.go new file mode 100644 index 0000000000000000000000000000000000000000..01bf43a913b6e0f427b7eb406f2bae90289613f0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue27054/test27054.go @@ -0,0 +1,21 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue27054 + +/* +#include "egl.h" +*/ +import "C" +import ( + "testing" +) + +func Test27054(t *testing.T) { + var ( + // Note: 0, not nil. That makes sure we use uintptr for these types. + _ C.EGLDisplay = 0 + _ C.EGLConfig = 0 + ) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue27340.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue27340.go new file mode 100644 index 0000000000000000000000000000000000000000..a6de328a7f500aa26214631d2c613bd898d08bc5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue27340.go @@ -0,0 +1,14 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cgo + +// Failed to resolve typedefs consistently. +// No runtime test; just make sure it compiles. + +package cgotest + +import "cmd/cgo/internal/test/issue27340" + +var issue27340Var = issue27340.Issue27340GoFunc diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue27340/a.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue27340/a.go new file mode 100644 index 0000000000000000000000000000000000000000..f5b120c1fd8249718afeb9167630a3ccdb4c9222 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue27340/a.go @@ -0,0 +1,42 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Failed to resolve typedefs consistently. +// No runtime test; just make sure it compiles. +// In separate directory to isolate #pragma GCC diagnostic. + +package issue27340 + +// We use the #pragma to avoid a compiler warning about incompatible +// pointer types, because we generate code passing a struct ptr rather +// than using the typedef. This warning is expected and does not break +// a normal build. +// We can only disable -Wincompatible-pointer-types starting with GCC 5. + +// #if __GNU_MAJOR__ >= 5 +// +// #pragma GCC diagnostic ignored "-Wincompatible-pointer-types" +// +// typedef struct { +// int a; +// } issue27340Struct, *issue27340Ptr; +// +// static void issue27340CFunc(issue27340Ptr p) {} +// +// #else /* _GNU_MAJOR_ < 5 */ +// +// typedef struct { +// int a; +// } issue27340Struct; +// +// static issue27340Struct* issue27340Ptr(issue27340Struct* p) { return p; } +// +// static void issue27340CFunc(issue27340Struct *p) {} +// #endif /* _GNU_MAJOR_ < 5 */ +import "C" + +func Issue27340GoFunc() { + var s C.issue27340Struct + C.issue27340CFunc(C.issue27340Ptr(&s)) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue29563.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue29563.go new file mode 100644 index 0000000000000000000000000000000000000000..f5077598ec21a5962a3ea791351d5bc06c5d6376 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue29563.go @@ -0,0 +1,12 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cgo && !windows + +// Issue 29563: internal linker fails on duplicate weak symbols. +// No runtime test; just make sure it compiles. + +package cgotest + +import _ "cmd/cgo/internal/test/issue29563" diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue29563/weak.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue29563/weak.go new file mode 100644 index 0000000000000000000000000000000000000000..21cf635ccae619b513195e74fd71e85a98c55b05 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue29563/weak.go @@ -0,0 +1,13 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue29563 + +//int foo1(); +//int foo2(); +import "C" + +func Bar() int { + return int(C.foo1()) + int(C.foo2()) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue29563/weak1.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue29563/weak1.c new file mode 100644 index 0000000000000000000000000000000000000000..86a22734adc78275de07ae24b8486047c0331209 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue29563/weak1.c @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +extern int weaksym __attribute__((__weak__)); +int weaksym = 42; + +int foo1() +{ + return weaksym; +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue29563/weak2.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue29563/weak2.c new file mode 100644 index 0000000000000000000000000000000000000000..e01eae8b58fed2aa1e32068b29254b278b38bf2a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue29563/weak2.c @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +extern int weaksym __attribute__((__weak__)); +int weaksym = 42; + +int foo2() +{ + return weaksym; +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue30527.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue30527.go new file mode 100644 index 0000000000000000000000000000000000000000..d3e57b69720859a3dd401b1f70719b0d747f7e61 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue30527.go @@ -0,0 +1,16 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cgo + +// Issue 30527: function call rewriting casts untyped +// constants to int because of ":=" usage. + +package cgotest + +import "cmd/cgo/internal/test/issue30527" + +func issue30527G() { + issue30527.G(nil) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue30527/a.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue30527/a.go new file mode 100644 index 0000000000000000000000000000000000000000..eb50147b39fcf84013ef31c910e503fa8a247e57 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue30527/a.go @@ -0,0 +1,19 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue30527 + +import "math" + +/* +#include + +static void issue30527F(char **p, uint64_t mod, uint32_t unused) {} +*/ +import "C" + +func G(p **C.char) { + C.issue30527F(p, math.MaxUint64, 1) + C.issue30527F(p, 1<<64-1, Z) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue30527/b.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue30527/b.go new file mode 100644 index 0000000000000000000000000000000000000000..87e8255bd8d3a840cc06443a0044dd5f6884775d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue30527/b.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue30527 + +const ( + X = 1 << iota + Y + Z +) diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue31891.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue31891.c new file mode 100644 index 0000000000000000000000000000000000000000..67a0dda2d68442165176c8e833dd5aece67fe993 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue31891.c @@ -0,0 +1,13 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "_cgo_export.h" + +void callIssue31891() { + Issue31891A a; + useIssue31891A(&a); + + Issue31891B b; + useIssue31891B(&b); +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue4029.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue4029.c new file mode 100644 index 0000000000000000000000000000000000000000..7a8fdc11b48c770a75bae61d6c2bfc66df920552 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue4029.c @@ -0,0 +1,29 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows && !static && !(darwin && internal) + +#include +#include + +// Write our own versions of dlopen/dlsym/dlclose so that we represent +// the opaque handle as a Go uintptr rather than a Go pointer to avoid +// garbage collector confusion. See issue 23663. + +uintptr_t dlopen4029(char* name, int flags) { + return (uintptr_t)(dlopen(name, flags)); +} + +uintptr_t dlsym4029(uintptr_t handle, char* name) { + return (uintptr_t)(dlsym((void*)(handle), name)); +} + +int dlclose4029(uintptr_t handle) { + return dlclose((void*)(handle)); +} + +void call4029(void *arg) { + void (*fn)(void) = arg; + fn(); +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue4029.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue4029.go new file mode 100644 index 0000000000000000000000000000000000000000..506c999bdb4984627e752beaf87d1bd12bfd6594 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue4029.go @@ -0,0 +1,76 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows && !static && !(darwin && internal) + +// Excluded in darwin internal linking PIE (which is the default) mode, +// as dynamic export is not supported. + +package cgotest + +/* +#include +#include +#cgo linux LDFLAGS: -ldl + +extern uintptr_t dlopen4029(char*, int); +extern uintptr_t dlsym4029(uintptr_t, char*); +extern int dlclose4029(uintptr_t); + +extern void call4029(uintptr_t arg); +*/ +import "C" + +import ( + "testing" +) + +var callbacks int + +//export IMPIsOpaque +func IMPIsOpaque() { + callbacks++ +} + +//export IMPInitWithFrame +func IMPInitWithFrame() { + callbacks++ +} + +//export IMPDrawRect +func IMPDrawRect() { + callbacks++ +} + +//export IMPWindowResize +func IMPWindowResize() { + callbacks++ +} + +func test4029(t *testing.T) { + loadThySelf(t, "IMPWindowResize") + loadThySelf(t, "IMPDrawRect") + loadThySelf(t, "IMPInitWithFrame") + loadThySelf(t, "IMPIsOpaque") + if callbacks != 4 { + t.Errorf("got %d callbacks, expected 4", callbacks) + } +} + +func loadThySelf(t *testing.T, symbol string) { + this_process := C.dlopen4029(nil, C.RTLD_NOW) + if this_process == 0 { + t.Error("dlopen:", C.GoString(C.dlerror())) + return + } + defer C.dlclose4029(this_process) + + symbol_address := C.dlsym4029(this_process, C.CString(symbol)) + if symbol_address == 0 { + t.Error("dlsym:", C.GoString(C.dlerror())) + return + } + t.Log(symbol, symbol_address) + C.call4029(symbol_address) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue4029w.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue4029w.go new file mode 100644 index 0000000000000000000000000000000000000000..aa4c2f59bd9be0d5b5361875e01562edef901d9c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue4029w.go @@ -0,0 +1,12 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows || static || (darwin && internal) + +package cgotest + +import "testing" + +func test4029(t *testing.T) { +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue41761.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue41761.go new file mode 100644 index 0000000000000000000000000000000000000000..27d904760d8490afb8d6fd3fbfe8fc1d42dbfb7d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue41761.go @@ -0,0 +1,20 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgotest + +/* + typedef struct S41761 S41761; +*/ +import "C" + +import ( + "cmd/cgo/internal/test/issue41761a" + "testing" +) + +func test41761(t *testing.T) { + var x issue41761a.T + _ = (*C.struct_S41761)(x.X) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue41761a/a.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue41761a/a.go new file mode 100644 index 0000000000000000000000000000000000000000..1c52782e05e3d5be0d0e9be596528cb46e7e2bed --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue41761a/a.go @@ -0,0 +1,14 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue41761a + +/* + typedef struct S41761 S41761; +*/ +import "C" + +type T struct { + X *C.S41761 +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue42018.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue42018.go new file mode 100644 index 0000000000000000000000000000000000000000..6b369bfab46b1bb5ffbbf12661fa9a63c5468da3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue42018.go @@ -0,0 +1,13 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows + +package cgotest + +import "testing" + +func test42018(t *testing.T) { + t.Skip("skipping Windows-only test") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue42018_windows.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue42018_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..8f4570ab2a5915cb3c4a015e5e8007901da6d14c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue42018_windows.go @@ -0,0 +1,46 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgotest + +/* +typedef void *HANDLE; + +struct HWND__{int unused;}; typedef struct HWND__ *HWND; +*/ +import "C" + +import ( + "testing" + "unsafe" +) + +func test42018(t *testing.T) { + // Test that Windows handles are marked go:notinheap, by growing the + // stack and checking for pointer adjustments. Trick from + // test/fixedbugs/issue40954.go. + var i int + handle := C.HANDLE(unsafe.Pointer(uintptr(unsafe.Pointer(&i)))) + recurseHANDLE(100, handle, uintptr(unsafe.Pointer(&i))) + hwnd := C.HWND(unsafe.Pointer(uintptr(unsafe.Pointer(&i)))) + recurseHWND(400, hwnd, uintptr(unsafe.Pointer(&i))) +} + +func recurseHANDLE(n int, p C.HANDLE, v uintptr) { + if n > 0 { + recurseHANDLE(n-1, p, v) + } + if uintptr(unsafe.Pointer(p)) != v { + panic("adjusted notinheap pointer") + } +} + +func recurseHWND(n int, p C.HWND, v uintptr) { + if n > 0 { + recurseHWND(n-1, p, v) + } + if uintptr(unsafe.Pointer(p)) != v { + panic("adjusted notinheap pointer") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue42495.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue42495.go new file mode 100644 index 0000000000000000000000000000000000000000..509a67d9a3077dceb1fb20268772d40fae196560 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue42495.go @@ -0,0 +1,15 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgotest + +// typedef struct { } T42495A; +// typedef struct { int x[0]; } T42495B; +import "C" + +//export Issue42495A +func Issue42495A(C.T42495A) {} + +//export Issue42495B +func Issue42495B(C.T42495B) {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue4273.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue4273.c new file mode 100644 index 0000000000000000000000000000000000000000..cac98768dea9c8554fbe5a73c5a2aeed2726955d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue4273.c @@ -0,0 +1,10 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#ifdef __ELF__ +__attribute__((weak)) +__attribute__((visibility("hidden"))) +void _compilerrt_abort_impl(const char *file, int line, const char *func) { +} +#endif diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue4273b.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue4273b.c new file mode 100644 index 0000000000000000000000000000000000000000..71e3f0d976ad4e1f5156d1b488a8a8c2844a0066 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue4273b.c @@ -0,0 +1,11 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#ifdef __ELF__ +extern void _compilerrt_abort_impl(const char *file, int line, const char *func); + +void __my_abort(const char *file, int line, const char *func) { + _compilerrt_abort_impl(file, line, func); +} +#endif diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue4339.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue4339.c new file mode 100644 index 0000000000000000000000000000000000000000..d0e64878d121233c73d8e845bdec5c959e6b3ee3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue4339.c @@ -0,0 +1,22 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include +#include "issue4339.h" + +static void +impl(void) +{ + //printf("impl\n"); +} + +Issue4339 exported4339 = {"bar", impl}; + +void +handle4339(Issue4339 *x) +{ + //printf("handle\n"); + x->bar(); + //printf("done\n"); +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue4339.h b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue4339.h new file mode 100644 index 0000000000000000000000000000000000000000..99a09960e24cb8bbc8ee47fe24e0f762c278e345 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue4339.h @@ -0,0 +1,13 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +typedef struct Issue4339 Issue4339; + +struct Issue4339 { + char *name; + void (*bar)(void); +}; + +extern Issue4339 exported4339; +void handle4339(Issue4339*); diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue43639.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue43639.go new file mode 100644 index 0000000000000000000000000000000000000000..c297bfe37f46e044931126ecfbe95b1e422a8ca9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue43639.go @@ -0,0 +1,11 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cgo + +package cgotest + +// Issue 43639: No runtime test needed, make sure package cmd/cgo/internal/test/issue43639 compiles well. + +import _ "cmd/cgo/internal/test/issue43639" diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue43639/a.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue43639/a.go new file mode 100644 index 0000000000000000000000000000000000000000..fe37d5e4b0f00d2feb9b4ddaccea6cbfc91d9876 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue43639/a.go @@ -0,0 +1,8 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue43639 + +// #cgo CFLAGS: -W -Wall -Werror +import "C" diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue52611.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue52611.go new file mode 100644 index 0000000000000000000000000000000000000000..9082a538010a4f009738a0895d342b3ce022223b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue52611.go @@ -0,0 +1,15 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cgo + +// Issue 52611: inconsistent compiler behaviour when compiling a C.struct. +// No runtime test; just make sure it compiles. + +package cgotest + +import ( + _ "cmd/cgo/internal/test/issue52611a" + _ "cmd/cgo/internal/test/issue52611b" +) diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue52611a/a.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue52611a/a.go new file mode 100644 index 0000000000000000000000000000000000000000..0764688ec47e8ff14b3e7994372a4aa997821a73 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue52611a/a.go @@ -0,0 +1,16 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue52611a + +/* +typedef struct Foo { + int X; +} Foo; +*/ +import "C" + +func GetX1(foo *C.struct_Foo) int32 { + return int32(foo.X) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue52611a/b.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue52611a/b.go new file mode 100644 index 0000000000000000000000000000000000000000..74a50c5deac718b4e85db9da7b12effdefa09f7b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue52611a/b.go @@ -0,0 +1,11 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue52611a + +import "C" + +func GetX2(foo *C.struct_Foo) int32 { + return int32(foo.X) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue52611b/a.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue52611b/a.go new file mode 100644 index 0000000000000000000000000000000000000000..730b52f5e9fea25378823084589f810140ef7716 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue52611b/a.go @@ -0,0 +1,11 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue52611b + +import "C" + +func GetX1(bar *C.struct_Bar) int32 { + return int32(bar.X) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue52611b/b.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue52611b/b.go new file mode 100644 index 0000000000000000000000000000000000000000..d30417539512af1ed7718a21ba267f61c0c1d959 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue52611b/b.go @@ -0,0 +1,16 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue52611b + +/* +typedef struct Bar { + int X; +} Bar; +*/ +import "C" + +func GetX2(bar *C.struct_Bar) int32 { + return int32(bar.X) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue5548_c.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue5548_c.c new file mode 100644 index 0000000000000000000000000000000000000000..84115266ffd545a07ac2c81042295f1b304022b7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue5548_c.c @@ -0,0 +1,24 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "_cgo_export.h" + +static void clobber_stack() { + volatile char a[1024]; + int i; + for(i = 0; i < sizeof a; i++) + a[i] = 0xff; +} + +static int call_go() { + GoString s; + s.p = "test"; + s.n = 4; + return issue5548FromC(s, 42); +} + +int issue5548_in_c() { + clobber_stack(); + return call_go(); +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue5740a.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue5740a.c new file mode 100644 index 0000000000000000000000000000000000000000..a6a7d0c96015d998f385814368814c55747fb930 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue5740a.c @@ -0,0 +1,9 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +static int volatile val = 2; + +int test5740a() { + return val; +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue5740b.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue5740b.c new file mode 100644 index 0000000000000000000000000000000000000000..c2ff5fbc4a545cfdd87fb52de3a30cc70d8ef838 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue5740b.c @@ -0,0 +1,9 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +static int volatile val = 3; + +int test5740b() { + return val; +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue6833_c.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue6833_c.c new file mode 100644 index 0000000000000000000000000000000000000000..c94c2c6d4577a929c82d8d6a8f40e20917400cdc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue6833_c.c @@ -0,0 +1,10 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "_cgo_export.h" + +unsigned long long +issue6833Func(unsigned int aui, unsigned long long aull) { + return GoIssue6833Func(aui, aull); +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue6907export_c.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue6907export_c.c new file mode 100644 index 0000000000000000000000000000000000000000..9b1a4fc630b9be7471578b2cfe2998b27b6df052 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue6907export_c.c @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include + +#include "_cgo_export.h" + +int CheckIssue6907C(_GoString_ s) { + return CheckIssue6907Go(s); +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue6997_linux.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue6997_linux.c new file mode 100644 index 0000000000000000000000000000000000000000..c6d251bbe503fcbb3a88d2652fd05a079e006082 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue6997_linux.c @@ -0,0 +1,28 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !android + +#include +#include +#include + +static pthread_t thread; + +static void* threadfunc(void* dummy) { + while(1) { + sleep(1); + } +} + +int StartThread() { + return pthread_create(&thread, NULL, &threadfunc, NULL); +} + +int CancelThread() { + void *r; + pthread_cancel(thread); + pthread_join(thread, &r); + return (r == PTHREAD_CANCELED); +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue6997_linux.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue6997_linux.go new file mode 100644 index 0000000000000000000000000000000000000000..1de5edda045d6b131ff14d37ffa7783c0fa65de1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue6997_linux.go @@ -0,0 +1,44 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !android + +// Test that pthread_cancel works as expected +// (NPTL uses SIGRTMIN to implement thread cancellation) +// See https://golang.org/issue/6997 +package cgotest + +/* +#cgo CFLAGS: -pthread +#cgo LDFLAGS: -pthread +extern int StartThread(); +extern int CancelThread(); +*/ +import "C" + +import ( + "testing" + "time" +) + +func test6997(t *testing.T) { + r := C.StartThread() + if r != 0 { + t.Error("pthread_create failed") + } + c := make(chan C.int) + go func() { + time.Sleep(500 * time.Millisecond) + c <- C.CancelThread() + }() + + select { + case r = <-c: + if r == 0 { + t.Error("pthread finished but wasn't canceled??") + } + case <-time.After(30 * time.Second): + t.Error("hung in pthread_cancel/pthread_join") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue7234_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue7234_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c191a1a66fe2086c3c5c476c1df0ce78482b87d7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue7234_test.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgotest + +import "testing" + +// This test actually doesn't have anything to do with cgo. It is a +// test of https://golang.org/issue/7234, a compiler/linker bug in +// handling string constants when using -linkmode=external. The test +// is in this directory because we routinely test -linkmode=external +// here. + +var v7234 = [...]string{"runtime/cgo"} + +func Test7234(t *testing.T) { + if v7234[0] != "runtime/cgo" { + t.Errorf("bad string constant %q", v7234[0]) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8148.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8148.c new file mode 100644 index 0000000000000000000000000000000000000000..927b4346cbe03df4339c3a342629921bf1dc4b6c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8148.c @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "_cgo_export.h" + +int get8148(void) { + T t; + t.i = 42; + return issue8148Callback(&t); +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8148.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8148.go new file mode 100644 index 0000000000000000000000000000000000000000..aee9003d5075bf02751a477e9fc7ffb8da2d4009 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8148.go @@ -0,0 +1,24 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue 8148. A typedef of an unnamed struct didn't work when used +// with an exported Go function. No runtime test; just make sure it +// compiles. + +package cgotest + +/* +typedef struct { int i; } T; +int get8148(void); +*/ +import "C" + +//export issue8148Callback +func issue8148Callback(t *C.T) C.int { + return t.i +} + +func Issue8148() int { + return int(C.get8148()) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8331.h b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8331.h new file mode 100644 index 0000000000000000000000000000000000000000..8065be08904de3bcba1f732d0aefed4a67ce71b3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8331.h @@ -0,0 +1,7 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +typedef struct { + int i; +} issue8331; diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8517.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8517.go new file mode 100644 index 0000000000000000000000000000000000000000..226151302263a0f4d28152717bd1110400a89bff --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8517.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows + +package cgotest + +import "testing" + +func test8517(t *testing.T) { + t.Skip("skipping windows only test") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8517_windows.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8517_windows.c new file mode 100644 index 0000000000000000000000000000000000000000..a0b94c126f6330ed2fa5915b69e88a12d12d2872 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8517_windows.c @@ -0,0 +1,24 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "windows.h" + +extern void testHandleLeaksCallback(); + +DWORD WINAPI testHandleLeaksFunc(LPVOID lpThreadParameter) +{ + int i; + for(i = 0; i < 100; i++) { + testHandleLeaksCallback(); + } + return 0; +} + +void testHandleLeaks() +{ + HANDLE h; + h = CreateThread(NULL, 0, &testHandleLeaksFunc, 0, 0, NULL); + WaitForSingleObject(h, INFINITE); + CloseHandle(h); +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8517_windows.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8517_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..3782631e91b46488115fec94bdbcd4e8bc73e9f1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8517_windows.go @@ -0,0 +1,45 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgotest + +//void testHandleLeaks(); +import "C" + +import ( + "syscall" + "testing" + "unsafe" +) + +var issue8517counter int + +var ( + kernel32 = syscall.MustLoadDLL("kernel32.dll") + getProcessHandleCount = kernel32.MustFindProc("GetProcessHandleCount") +) + +func processHandleCount(t *testing.T) int { + const current_process = ^uintptr(0) + var c uint32 + r, _, err := getProcessHandleCount.Call(current_process, uintptr(unsafe.Pointer(&c))) + if r == 0 { + t.Fatal(err) + } + return int(c) +} + +func test8517(t *testing.T) { + c1 := processHandleCount(t) + C.testHandleLeaks() + c2 := processHandleCount(t) + if c1+issue8517counter <= c2 { + t.Fatalf("too many handles leaked: issue8517counter=%v c1=%v c2=%v", issue8517counter, c1, c2) + } +} + +//export testHandleLeaksCallback +func testHandleLeaksCallback() { + issue8517counter++ +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8694.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8694.go new file mode 100644 index 0000000000000000000000000000000000000000..3b8f065d278d8edf067af2dfdc7fe0a8d0d2c064 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8694.go @@ -0,0 +1,40 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !android + +package cgotest + +/* +#include + +complex float complexFloatSquared(complex float a) { return a*a; } +complex double complexDoubleSquared(complex double a) { return a*a; } +*/ +import "C" + +import ( + "runtime" + "testing" +) + +func test8694(t *testing.T) { + if runtime.GOARCH == "arm" { + t.Skip("test8694 is disabled on ARM because 5l cannot handle thumb library.") + } + // Really just testing that this compiles, but check answer anyway. + x := C.complexfloat(2 + 3i) + x2 := x * x + cx2 := C.complexFloatSquared(x) + if cx2 != x2 { + t.Errorf("C.complexFloatSquared(%v) = %v, want %v", x, cx2, x2) + } + + y := C.complexdouble(2 + 3i) + y2 := y * y + cy2 := C.complexDoubleSquared(y) + if cy2 != y2 { + t.Errorf("C.complexDoubleSquared(%v) = %v, want %v", y, cy2, y2) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8756.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8756.go new file mode 100644 index 0000000000000000000000000000000000000000..d8eadfde6ded13a00330f52def2e6347ada2f675 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8756.go @@ -0,0 +1,21 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgotest + +/* +#cgo !darwin LDFLAGS: -lm +#include +*/ +import "C" +import ( + "testing" + + "cmd/cgo/internal/test/issue8756" +) + +func test8756(t *testing.T) { + issue8756.Pow() + C.pow(1, 2) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8756/issue8756.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8756/issue8756.go new file mode 100644 index 0000000000000000000000000000000000000000..02a1424b9f1f50bb3a00d79ceb5f7a6bd4aa42d5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8756/issue8756.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue8756 + +/* +#cgo !darwin LDFLAGS: -lm +#include +*/ +import "C" + +func Pow() { + C.pow(1, 2) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8811.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8811.c new file mode 100644 index 0000000000000000000000000000000000000000..41b3c7c8ea3e62e9005f976273e3fed287a64fc2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8811.c @@ -0,0 +1,8 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +int issue8811Initialized = 0; + +void issue8811Init() { +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8828.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8828.go new file mode 100644 index 0000000000000000000000000000000000000000..9904a663d11ce7dd9d3f80061450cb206e8ae459 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8828.go @@ -0,0 +1,16 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cgo + +// Issue 8828: compiling a file with -compiler=gccgo fails if a .c file +// has the same name as compiled directory. + +package cgotest + +import "cmd/cgo/internal/test/issue8828" + +func p() { + issue8828.Bar() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8828/issue8828.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8828/issue8828.c new file mode 100644 index 0000000000000000000000000000000000000000..27ec23a26059f48a2774dec01a5dff8b61a95a0d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8828/issue8828.c @@ -0,0 +1,7 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +void foo() +{ +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8828/trivial.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8828/trivial.go new file mode 100644 index 0000000000000000000000000000000000000000..9f2619654f5ca9c489f79879ef4935c1ba30092d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue8828/trivial.go @@ -0,0 +1,12 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue8828 + +//void foo(); +import "C" + +func Bar() { + C.foo() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9026.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9026.go new file mode 100644 index 0000000000000000000000000000000000000000..bab06ba63db914ef5156c20769f54cb151e1860f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9026.go @@ -0,0 +1,15 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cgo + +package cgotest + +import ( + "testing" + + "cmd/cgo/internal/test/issue9026" +) + +func test9026(t *testing.T) { issue9026.Test(t) } diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9026/issue9026.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9026/issue9026.go new file mode 100644 index 0000000000000000000000000000000000000000..13bc180321baabc2bdc75b63126b974f702998c7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9026/issue9026.go @@ -0,0 +1,40 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue9026 + +// This file appears in its own package since the assertion tests the +// per-package counter used to create fresh identifiers. + +/* +typedef struct { int i; } git_merge_file_input; + +typedef struct { int j; } git_merge_file_options; + +void git_merge_file( + git_merge_file_input *in, + git_merge_file_options *opts) {} +*/ +import "C" +import ( + "fmt" + "testing" +) + +func Test(t *testing.T) { + var in C.git_merge_file_input + var opts *C.git_merge_file_options + C.git_merge_file(&in, opts) + + // Test that the generated type names are deterministic. + // (Previously this would fail about 10% of the time.) + // + // Brittle: the assertion may fail spuriously when the algorithm + // changes, but should remain stable otherwise. + got := fmt.Sprintf("%T %T", in, opts) + want := "issue9026._Ctype_struct___0 *issue9026._Ctype_struct___1" + if got != want { + t.Errorf("Non-deterministic type names: got %s, want %s", got, want) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_386.s b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_386.s new file mode 100644 index 0000000000000000000000000000000000000000..8a3830135fb0e6e5a2699625a9da57d43f7922b6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_386.s @@ -0,0 +1,27 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +#include "textflag.h" + +TEXT ·RewindAndSetgid(SB),NOSPLIT,$0-0 + MOVL $·Baton(SB), BX + // Rewind stack pointer so anything that happens on the stack + // will clobber the test pattern created by the caller + ADDL $(1024 * 8), SP + + // Ask signaller to setgid + MOVL $1, (BX) + + // Wait for setgid completion +loop: + PAUSE + MOVL (BX), AX + CMPL AX, $0 + JNE loop + + // Restore stack + SUBL $(1024 * 8), SP + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_amd64x.s b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_amd64x.s new file mode 100644 index 0000000000000000000000000000000000000000..07adaf745f4e56aa04cebeeb2df7db3f4be28529 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_amd64x.s @@ -0,0 +1,26 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (amd64 || amd64p32) && gc + +#include "textflag.h" + +TEXT ·RewindAndSetgid(SB),NOSPLIT,$0-0 + // Rewind stack pointer so anything that happens on the stack + // will clobber the test pattern created by the caller + ADDQ $(1024 * 8), SP + + // Ask signaller to setgid + MOVL $1, ·Baton(SB) + + // Wait for setgid completion +loop: + PAUSE + MOVL ·Baton(SB), AX + CMPL AX, $0 + JNE loop + + // Restore stack + SUBQ $(1024 * 8), SP + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_arm.s b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_arm.s new file mode 100644 index 0000000000000000000000000000000000000000..41261725ca48012048690e3a9c0073db90ec47bf --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_arm.s @@ -0,0 +1,39 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +#include "textflag.h" + +TEXT cas<>(SB),NOSPLIT,$0 + MOVW $0xffff0fc0, R15 // R15 is PC + +TEXT ·RewindAndSetgid(SB),NOSPLIT|NOFRAME,$0-0 + // Save link register + MOVW R14, R4 + + // Rewind stack pointer so anything that happens on the stack + // will clobber the test pattern created by the caller + ADD $(1024 * 8), R13 + + // Ask signaller to setgid + MOVW $·Baton(SB), R2 +storeloop: + MOVW 0(R2), R0 + MOVW $1, R1 + BL cas<>(SB) + BCC storeloop + + // Wait for setgid completion +loop: + MOVW $0, R0 + MOVW $0, R1 + BL cas<>(SB) + BCC loop + + // Restore stack + SUB $(1024 * 8), R13 + + MOVW R4, R14 + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_arm64.s b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_arm64.s new file mode 100644 index 0000000000000000000000000000000000000000..affbd71e65d2db92e41e802d9df497ff28607c1a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_arm64.s @@ -0,0 +1,39 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +#include "textflag.h" + +TEXT ·RewindAndSetgid(SB),NOSPLIT|NOFRAME,$0-0 + // Save link register + MOVD R30, R9 + + // Rewind stack pointer so anything that happens on the stack + // will clobber the test pattern created by the caller + ADD $(1024 * 8), RSP + + // Ask signaller to setgid + MOVD $·Baton(SB), R0 + MOVD $1, R1 +storeloop: + LDAXRW (R0), R2 + STLXRW R1, (R0), R3 + CBNZ R3, storeloop + + // Wait for setgid completion + MOVW $0, R1 + MOVW $0, R2 +loop: + LDAXRW (R0), R3 + CMPW R1, R3 + BNE loop + STLXRW R2, (R0), R3 + CBNZ R3, loop + + // Restore stack + SUB $(1024 * 8), RSP + + MOVD R9, R30 + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_loong64.s b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_loong64.s new file mode 100644 index 0000000000000000000000000000000000000000..c242fc6c623566e9d7eca3b457a5059310d9cbed --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_loong64.s @@ -0,0 +1,28 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·RewindAndSetgid(SB),NOSPLIT|NOFRAME,$0-0 + // Rewind stack pointer so anything that happens on the stack + // will clobber the test pattern created by the caller + ADDV $(1024*8), R3 + + // Ask signaller to setgid + MOVW $1, R12 + DBAR + MOVW R12, ·Baton(SB) + DBAR + + // Wait for setgid completion +loop: + DBAR + MOVW ·Baton(SB), R12 + OR R13, R13, R13 // hint that we're in a spin loop + BNE R12, loop + DBAR + + // Restore stack + ADDV $(-1024*8), R3 + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_mips64x.s b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_mips64x.s new file mode 100644 index 0000000000000000000000000000000000000000..3edba3dd82a24278a763b7f9a5eed5424bc375f3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_mips64x.s @@ -0,0 +1,32 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (mips64 || mips64le) && gc + +#include "textflag.h" + +#define SYNC WORD $0xf + +TEXT ·RewindAndSetgid(SB),NOSPLIT|NOFRAME,$0-0 + // Rewind stack pointer so anything that happens on the stack + // will clobber the test pattern created by the caller + ADDV $(1024*8), R29 + + // Ask signaller to setgid + MOVW $1, R1 + SYNC + MOVW R1, ·Baton(SB) + SYNC + + // Wait for setgid completion +loop: + SYNC + MOVW ·Baton(SB), R1 + OR R2, R2, R2 // hint that we're in a spin loop + BNE R1, loop + SYNC + + // Restore stack + ADDV $(-1024*8), R29 + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_mipsx.s b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_mipsx.s new file mode 100644 index 0000000000000000000000000000000000000000..695273d90f6e5797b5b908af68c035c85f35d6f1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_mipsx.s @@ -0,0 +1,30 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (mips || mipsle) && gc + +#include "textflag.h" + +TEXT ·RewindAndSetgid(SB),NOSPLIT|NOFRAME,$0-0 + // Rewind stack pointer so anything that happens on the stack + // will clobber the test pattern created by the caller + ADDU $(1024*8), R29 + + // Ask signaller to setgid + MOVW $1, R1 + SYNC + MOVW R1, ·Baton(SB) + SYNC + + // Wait for setgid completion +loop: + SYNC + MOVW ·Baton(SB), R1 + OR R2, R2, R2 // hint that we're in a spin loop + BNE R1, loop + SYNC + + // Restore stack + ADDU $(-1024*8), R29 + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_ppc64x.s b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_ppc64x.s new file mode 100644 index 0000000000000000000000000000000000000000..5f13f1696d354680418763917084e59864ea3839 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_ppc64x.s @@ -0,0 +1,31 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (ppc64 || ppc64le) && gc + +#include "textflag.h" + +TEXT ·RewindAndSetgid(SB),NOSPLIT|NOFRAME,$0-0 + // Rewind stack pointer so anything that happens on the stack + // will clobber the test pattern created by the caller + ADD $(1024 * 8), R1 + + // Ask signaller to setgid + MOVW $1, R3 + SYNC + MOVW R3, ·Baton(SB) + + // Wait for setgid completion +loop: + SYNC + MOVW ·Baton(SB), R3 + CMP R3, $0 + // Hint that we're in a spin loop + OR R1, R1, R1 + BNE loop + ISYNC + + // Restore stack + SUB $(1024 * 8), R1 + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_riscv64.s b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_riscv64.s new file mode 100644 index 0000000000000000000000000000000000000000..0f10e3a3262024a60506b36386a2f4106a29ddb9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_riscv64.s @@ -0,0 +1,30 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build riscv64 && gc + +#include "textflag.h" + +TEXT ·RewindAndSetgid(SB),NOSPLIT|NOFRAME,$0-0 + // Rewind stack pointer so anything that happens on the stack + // will clobber the test pattern created by the caller + ADD $(1024*8), X2 + + // Ask signaller to setgid + MOV $1, X5 + FENCE + MOVW X5, ·Baton(SB) + FENCE + + // Wait for setgid completion +loop: + FENCE + MOVW ·Baton(SB), X5 + OR X6, X6, X6 // hint that we're in a spin loop + BNE ZERO, X5, loop + FENCE + + // Restore stack + ADD $(-1024*8), X2 + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_s390x.s b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_s390x.s new file mode 100644 index 0000000000000000000000000000000000000000..2552fa7008f934c6e715afaabafbaa231d85db33 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/asm_s390x.s @@ -0,0 +1,26 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +#include "textflag.h" + +TEXT ·RewindAndSetgid(SB),NOSPLIT,$0-0 + // Rewind stack pointer so anything that happens on the stack + // will clobber the test pattern created by the caller + ADD $(1024 * 8), R15 + + // Ask signaller to setgid + MOVD $·Baton(SB), R5 + MOVW $1, 0(R5) + + // Wait for setgid completion +loop: + SYNC + MOVW ·Baton(SB), R3 + CMPBNE R3, $0, loop + + // Restore stack + SUB $(1024 * 8), R15 + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/gccgo.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/gccgo.go new file mode 100644 index 0000000000000000000000000000000000000000..4dd987bf7495ad904ec7eaaa517114a12b281872 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/gccgo.go @@ -0,0 +1,26 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gccgo + +package issue9400 + +import ( + "runtime" + "sync/atomic" +) + +// The test for the gc compiler resets the stack pointer so that the +// stack gets modified. We don't have a way to do that for gccgo +// without writing more assembly code, which we haven't bothered to +// do. So this is not much of a test. + +var Baton int32 + +func RewindAndSetgid() { + atomic.StoreInt32(&Baton, 1) + for atomic.LoadInt32(&Baton) != 0 { + runtime.Gosched() + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/stubs.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/stubs.go new file mode 100644 index 0000000000000000000000000000000000000000..c2b235abab1f1b9d531f900eb658613fe8fbcba8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400/stubs.go @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && gc + +package issue9400 + +var Baton int32 + +func RewindAndSetgid() diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400_linux.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400_linux.go new file mode 100644 index 0000000000000000000000000000000000000000..41b9ab9dc7eebef179eb67aea6f8b0c4a30de459 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9400_linux.go @@ -0,0 +1,67 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that SIGSETXID runs on signal stack, since it's likely to +// overflow if it runs on the Go stack. + +package cgotest + +/* +#include +#include +*/ +import "C" + +import ( + "runtime" + "runtime/debug" + "sync/atomic" + "testing" + + "cmd/cgo/internal/test/issue9400" +) + +func test9400(t *testing.T) { + // We synchronize through a shared variable, so we need two procs + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) + + // Start signaller + atomic.StoreInt32(&issue9400.Baton, 0) + go func() { + // Wait for RewindAndSetgid + for atomic.LoadInt32(&issue9400.Baton) == 0 { + runtime.Gosched() + } + // Broadcast SIGSETXID + runtime.LockOSThread() + C.setgid(0) + // Indicate that signalling is done + atomic.StoreInt32(&issue9400.Baton, 0) + }() + + // Grow the stack and put down a test pattern + const pattern = 0x123456789abcdef + var big [1024]uint64 // len must match assembly + for i := range big { + big[i] = pattern + } + + // Disable GC for the duration of the test. + // This avoids a potential GC deadlock when spinning in uninterruptible ASM below #49695. + defer debug.SetGCPercent(debug.SetGCPercent(-1)) + // SetGCPercent waits until the mark phase is over, but the runtime + // also preempts at the start of the sweep phase, so make sure that's + // done too. See #49695. + runtime.GC() + + // Temporarily rewind the stack and trigger SIGSETXID + issue9400.RewindAndSetgid() + + // Check test pattern + for i := range big { + if big[i] != pattern { + t.Fatalf("entry %d of test pattern is wrong; %#x != %#x", i, big[i], uint64(pattern)) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9510.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9510.go new file mode 100644 index 0000000000000000000000000000000000000000..7f0aff4fe4c8f34621eff02e14807c807683c40d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9510.go @@ -0,0 +1,26 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cgo + +// Test that we can link together two different cgo packages that both +// use the same libgcc function. + +package cgotest + +import ( + "runtime" + "testing" + + "cmd/cgo/internal/test/issue9510a" + "cmd/cgo/internal/test/issue9510b" +) + +func test9510(t *testing.T) { + if runtime.GOARCH == "arm" { + t.Skip("skipping because libgcc may be a Thumb library") + } + issue9510a.F(1, 1) + issue9510b.F(1, 1) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9510a/a.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9510a/a.go new file mode 100644 index 0000000000000000000000000000000000000000..f0a0128d104e5ba507df897b391b024f7d199a40 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9510a/a.go @@ -0,0 +1,19 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue9510a + +/* +static double csquare(double a, double b) { + __complex__ double d; + __real__ d = a; + __imag__ d = b; + return __real__ (d * d); +} +*/ +import "C" + +func F(a, b float64) float64 { + return float64(C.csquare(C.double(a), C.double(b))) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9510b/b.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9510b/b.go new file mode 100644 index 0000000000000000000000000000000000000000..6e22508c32aef9d89ad40d7d8891c916a1f53675 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/issue9510b/b.go @@ -0,0 +1,19 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue9510b + +/* +static double csquare(double a, double b) { + __complex__ double d; + __real__ d = a; + __imag__ d = b; + return __real__ (d * d); +} +*/ +import "C" + +func F(a, b float64) float64 { + return float64(C.csquare(C.double(a), C.double(b))) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/linux_ppc64le_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/linux_ppc64le_test.go new file mode 100644 index 0000000000000000000000000000000000000000..67b6b161d695fc4046be2cf72f90207dd1831594 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/linux_ppc64le_test.go @@ -0,0 +1,13 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64le && linux && cgo + +package cgotest + +import "testing" + +func TestPPC64CallStubs(t *testing.T) { + testPPC64CallStubs(t) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/seh_internal_windows_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/seh_internal_windows_test.go new file mode 100644 index 0000000000000000000000000000000000000000..708ffdc6f60bda468cff4798da48ce73a709fabc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/seh_internal_windows_test.go @@ -0,0 +1,16 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cgo && windows && internal + +package cgotest + +import ( + "internal/testenv" + "testing" +) + +func TestCallbackCallersSEH(t *testing.T) { + testenv.SkipFlaky(t, 65116) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/seh_windows_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/seh_windows_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4a8d5bbd4dcc02136fa4a3ed55dd72aed4067f85 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/seh_windows_test.go @@ -0,0 +1,11 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cgo && windows && !internal + +package cgotest + +import "testing" + +func TestCallbackCallersSEH(t *testing.T) { testCallbackCallersSEH(t) } diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/setgid2_linux.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/setgid2_linux.go new file mode 100644 index 0000000000000000000000000000000000000000..438f5ae512d5fd94d2b73b7782f0cd0393945d8a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/setgid2_linux.go @@ -0,0 +1,35 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Stress test setgid and thread creation. A thread +// can get a SIGSETXID signal early on at thread +// initialization, causing crash. See issue 53374. + +package cgotest + +/* +#include +#include +*/ +import "C" + +import ( + "runtime" + "testing" +) + +func testSetgidStress(t *testing.T) { + const N = 50 + ch := make(chan int, N) + for i := 0; i < N; i++ { + go func() { + C.setgid(0) + ch <- 1 + runtime.LockOSThread() // so every goroutine uses a new thread + }() + } + for i := 0; i < N; i++ { + <-ch + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/setgid_linux.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/setgid_linux.go new file mode 100644 index 0000000000000000000000000000000000000000..7c64946cb34b68fa1770fbfdc9cbf1f3ab7d1092 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/setgid_linux.go @@ -0,0 +1,49 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that setgid does not hang on Linux. +// See https://golang.org/issue/3871 for details. + +package cgotest + +/* +#include +#include +*/ +import "C" + +import ( + "os" + "os/signal" + "syscall" + "testing" + "time" +) + +func runTestSetgid() bool { + c := make(chan bool) + go func() { + C.setgid(0) + c <- true + }() + select { + case <-c: + return true + case <-time.After(5 * time.Second): + return false + } + +} + +func testSetgid(t *testing.T) { + if !runTestSetgid() { + t.Error("setgid hung") + } + + // Now try it again after using signal.Notify. + signal.Notify(make(chan os.Signal, 1), syscall.SIGINT) + if !runTestSetgid() { + t.Error("setgid hung after signal.Notify") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/sigaltstack.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/sigaltstack.go new file mode 100644 index 0000000000000000000000000000000000000000..d468cf82511cfe75b5534d61ef72707bea91aa98 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/sigaltstack.go @@ -0,0 +1,78 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows && !android + +// Test that the Go runtime still works if C code changes the signal stack. + +package cgotest + +/* +#include +#include +#include +#include + +#ifdef _AIX +// On AIX, SIGSTKSZ is too small to handle Go sighandler. +#define CSIGSTKSZ 0x4000 +#else +#define CSIGSTKSZ SIGSTKSZ +#endif + +static stack_t oss; +static char signalStack[CSIGSTKSZ]; + +static void changeSignalStack(void) { + stack_t ss; + memset(&ss, 0, sizeof ss); + ss.ss_sp = signalStack; + ss.ss_flags = 0; + ss.ss_size = CSIGSTKSZ; + if (sigaltstack(&ss, &oss) < 0) { + perror("sigaltstack"); + abort(); + } +} + +static void restoreSignalStack(void) { +#if (defined(__x86_64__) || defined(__i386__)) && defined(__APPLE__) + // The Darwin C library enforces a minimum that the kernel does not. + // This is OK since we allocated this much space in mpreinit, + // it was just removed from the buffer by stackalloc. + oss.ss_size = MINSIGSTKSZ; +#endif + if (sigaltstack(&oss, NULL) < 0) { + perror("sigaltstack restore"); + abort(); + } +} + +static int zero(void) { + return 0; +} +*/ +import "C" + +import ( + "runtime" + "testing" +) + +func testSigaltstack(t *testing.T) { + switch { + case runtime.GOOS == "solaris", runtime.GOOS == "illumos", runtime.GOOS == "ios" && runtime.GOARCH == "arm64": + t.Skipf("switching signal stack not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) + } + + C.changeSignalStack() + defer C.restoreSignalStack() + defer func() { + if recover() == nil { + t.Error("did not see expected panic") + } + }() + v := 1 / int(C.zero()) + t.Errorf("unexpected success of division by zero == %d", v) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/sigprocmask.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/sigprocmask.c new file mode 100644 index 0000000000000000000000000000000000000000..43158332b9bc9eccc55afdcc4dee3b9faa9c99fb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/sigprocmask.c @@ -0,0 +1,51 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows + +#include +#include +#include +#include +#include +#include +#include + +extern void IntoGoAndBack(); + +int CheckBlocked() { + sigset_t mask; + sigprocmask(SIG_BLOCK, NULL, &mask); + return sigismember(&mask, SIGIO); +} + +static void* sigthreadfunc(void* unused) { + sigset_t mask; + sigemptyset(&mask); + sigaddset(&mask, SIGIO); + sigprocmask(SIG_BLOCK, &mask, NULL); + IntoGoAndBack(); + return NULL; +} + +int RunSigThread() { + int tries; + pthread_t thread; + int r; + struct timespec ts; + + for (tries = 0; tries < 20; tries++) { + r = pthread_create(&thread, NULL, &sigthreadfunc, NULL); + if (r == 0) { + return pthread_join(thread, NULL); + } + if (r != EAGAIN) { + return r; + } + ts.tv_sec = 0; + ts.tv_nsec = (tries + 1) * 1000 * 1000; // Milliseconds. + nanosleep(&ts, NULL); + } + return EAGAIN; +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/sigprocmask.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/sigprocmask.go new file mode 100644 index 0000000000000000000000000000000000000000..6cc04d6855371dfe214b563f12d362ee5cd6dd08 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/sigprocmask.go @@ -0,0 +1,40 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows + +package cgotest + +/* +#cgo CFLAGS: -pthread +#cgo LDFLAGS: -pthread +extern int RunSigThread(); +extern int CheckBlocked(); +*/ +import "C" +import ( + "os" + "os/signal" + "syscall" + "testing" +) + +var blocked bool + +//export IntoGoAndBack +func IntoGoAndBack() { + // Verify that SIGIO stays blocked on the C thread + // even when unblocked for signal.Notify(). + signal.Notify(make(chan os.Signal), syscall.SIGIO) + blocked = C.CheckBlocked() != 0 +} + +func testSigprocmask(t *testing.T) { + if r := C.RunSigThread(); r != 0 { + t.Errorf("pthread_create/pthread_join failed: %d", r) + } + if !blocked { + t.Error("Go runtime unblocked SIGIO") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/stubtest_linux_ppc64le.S b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/stubtest_linux_ppc64le.S new file mode 100644 index 0000000000000000000000000000000000000000..0c519705a508f20d1ae16642a118fb007ef733cc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/stubtest_linux_ppc64le.S @@ -0,0 +1,122 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// When linking C ELFv2 objects, the Go linker may need to insert calling stubs. +// A call stub is usually needed when the ELFv2 st_other attribute is different +// between caller and callee. +// +// The type of call stub inserted will vary depending on GOPPC64 and the +// buildmode (e.g pie builds shared code, default builds fixed-position code). +// CI is set up to run for P8 and P10 machines, and this test is run in both +// pie and default modes. +// +// Several functions are written with interesting st_other attributes, and +// call each other to test various calling combinations which require stubs. +// +// The call tree is as follows, starting from TestPPC64Stubs (A C function): +// TestPPC64Stubs (compiled PIC by default by Go) +// notoc_func [called TOC -> NOTOC (but R2 is preserved)] +// toc_func [called NOTOC -> TOC] +// notoc_nor2_func [called TOC -> NOTOC] +// random [dynamic TOC call] +// random [dynamic NOTOC call] +// +// Depending on the GOPPC64/buildmode used, and type of call, one of 7 stubs may need inserted: +// +// TOC -> NOTOC: Save R2, call global entry. (valid for any GOPPC64) +// TOC save slot is rewrittent to restore TOC. +// NOTOC -> TOC [P10]: A PIC call stub using P10 instructions to call the global entry +// NOTOC -> TOC [P8]: A PIC call stub using P8 instructions to call the global entry +// +// TOC -> dynamic: A PLT call stub is generated which saves R2. +// TOC save slot is rewritten to restore TOC. +// NOTOC -> dynamic [P10]: A stub using pcrel instructions is generated. +// NOTOC -> dynamic [P8/default]: A P8 compatible, non-PIC stub is generated +// NOTOC -> dynamic [P8/pie]: A P8 compatible, PIC stub is generated +// +// +// Some notes about other cases: +// TOC -> TOC, NOTOC -> NOTOC, NOTOC -> TOC local calls do not require require call stubs. +// TOC -> NOTOC (R2 is preserved, st_other==0): A special case where a call stub is not needed. + +// This test requires a binutils with power10 and ELFv2 1.5 support. This is earliest verified version. +.if .gasversion. >= 23500 + +// A function which does not guarantee R2 is preserved. +// R2 is clobbered here to ensure the stubs preserve it. + .globl notoc_nor2_func + .type notoc_nor2_func, @function +notoc_nor2_func: + .localentry notoc_nor2_func,1 + li 2,0 + blr + +// A function which expects R2 to hold TOC, and has a distinct local entry. + .globl toc_func + .type toc_func, @function +toc_func: + addis 2,12,.TOC.-toc_func@ha + addi 2,2,.TOC.-toc_func@l + .localentry toc_func, .-toc_func + mflr 0 + std 0,16(1) + stdu 1,-32(1) + + // Call a NOTOC function which clobbers R2. + bl notoc_nor2_func + nop + + // Call libc random. This should generate a TOC relative plt stub. + bl random + nop + + addi 1,1,32 + ld 0,16(1) + mtlr 0 + blr + +// An ELFv2 st_other==0 function. It preserves R2 (TOC), but does not use it. + .globl notoc_func + .type notoc_func, @function +notoc_func: + // Save R2 and LR and stack a frame. + mflr 0 + std 0,16(1) + stdu 1,-32(1) + + // Save R2 in TOC save slot. + std 2,24(1) + + // clobber R2 + li 2,0 + + // Call type2_func. A call stub from notoc to toc should be inserted. + bl toc_func@notoc + + // Call libc random. A notoc plt stub should be inserted. + bl random@notoc + + // Return 0 to indicate the test ran. + li 3,0 + + // Restore R2 + ld 2,24(1) + + // Restore LR and pop stack + addi 1,1,32 + ld 0,16(1) + mtlr 0 + blr + +.else + +// A stub for older binutils + .globl notoc_func + .type notoc_func, @function +notoc_func: + // Return 1 to indicate the test was skipped. + li 3,1 + blr + +.endif diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/test.go new file mode 100644 index 0000000000000000000000000000000000000000..9b3790eb11e2d85dde3c4f23cf06f42210969fa3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/test.go @@ -0,0 +1,2323 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test cases for cgo. +// Both the import "C" prologue and the main file are sorted by issue number. +// This file contains C definitions (not just declarations) +// and so it must NOT contain any //export directives on Go functions. +// See testx.go for exports. + +package cgotest + +/* +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#cgo !darwin LDFLAGS: -lm + +#ifndef WIN32 +#include +#include +#endif + +// alignment tests + +typedef unsigned char Uint8; +typedef unsigned short Uint16; + +typedef enum { + MOD1 = 0x0000, + MODX = 0x8000 +} SDLMod; + +typedef enum { + A1 = 1, + B1 = 322, + SDLK_LAST +} SDLKey; + +typedef struct SDL_keysym { + Uint8 scancode; + SDLKey sym; + SDLMod mod; + Uint16 unicode; +} SDL_keysym; + +typedef struct SDL_KeyboardEvent { + Uint8 typ; + Uint8 which; + Uint8 state; + SDL_keysym keysym; +} SDL_KeyboardEvent; + +void makeEvent(SDL_KeyboardEvent *event) { + unsigned char *p; + int i; + + p = (unsigned char*)event; + for (i=0; ityp == typ && e->which == which && e->state == state && e->keysym.scancode == scan && e->keysym.sym == sym && e->keysym.mod == mod && e->keysym.unicode == uni; +} + +void cTest(SDL_KeyboardEvent *event) { + printf("C: %#x %#x %#x %#x %#x %#x %#x\n", event->typ, event->which, event->state, + event->keysym.scancode, event->keysym.sym, event->keysym.mod, event->keysym.unicode); + fflush(stdout); +} + +// api + +const char *greeting = "hello, world"; + +// basic test cases + +#define SHIFT(x, y) ((x)<<(y)) +#define KILO SHIFT(1, 10) +#define UINT32VAL 0xc008427bU + +enum E { + Enum1 = 1, + Enum2 = 2, +}; + +typedef unsigned char cgo_uuid_t[20]; + +void uuid_generate(cgo_uuid_t x) { + x[0] = 0; +} + +struct S { + int x; +}; + +const char *cstr = "abcefghijklmnopqrstuvwxyzABCEFGHIJKLMNOPQRSTUVWXYZ1234567890"; + +extern enum E myConstFunc(struct S* const ctx, int const id, struct S **const filter); + +enum E myConstFunc(struct S *const ctx, int const id, struct S **const filter) { return 0; } + +int add(int x, int y) { + return x+y; +}; + +// escape vs noescape + +// TODO(#56378): enable in Go 1.23: +// #cgo noescape handleGoStringPointerNoescape +void handleGoStringPointerNoescape(void *s) {} + +void handleGoStringPointerEscape(void *s) {} + +// Following mimics vulkan complex definitions for benchmarking cgocheck overhead. + +typedef uint32_t VkFlags; +typedef VkFlags VkDeviceQueueCreateFlags; +typedef uint32_t VkStructureType; + +typedef struct VkDeviceQueueCreateInfo { + VkStructureType sType; + const void* pNext; + VkDeviceQueueCreateFlags flags; + uint32_t queueFamilyIndex; + uint32_t queueCount; + const float* pQueuePriorities; +} VkDeviceQueueCreateInfo; + +typedef struct VkPhysicalDeviceFeatures { + uint32_t bools[56]; +} VkPhysicalDeviceFeatures; + +typedef struct VkDeviceCreateInfo { + VkStructureType sType; + const void* pNext; + VkFlags flags; + uint32_t queueCreateInfoCount; + const VkDeviceQueueCreateInfo* pQueueCreateInfos; + uint32_t enabledLayerCount; + const char* const* ppEnabledLayerNames; + uint32_t enabledExtensionCount; + const char* const* ppEnabledExtensionNames; + const VkPhysicalDeviceFeatures* pEnabledFeatures; +} VkDeviceCreateInfo; + +void handleComplexPointer(VkDeviceCreateInfo *a0) {} +void handleComplexPointer8( + VkDeviceCreateInfo *a0, VkDeviceCreateInfo *a1, VkDeviceCreateInfo *a2, VkDeviceCreateInfo *a3, + VkDeviceCreateInfo *a4, VkDeviceCreateInfo *a5, VkDeviceCreateInfo *a6, VkDeviceCreateInfo *a7 +) {} + +// complex alignment + +struct { + float x; + _Complex float y; +} cplxAlign = { 3.14, 2.17 }; + +// constants and pointer checking + +#define CheckConstVal 0 + +typedef struct { + int *p; +} CheckConstStruct; + +static void CheckConstFunc(CheckConstStruct *p, int e) {} + +// duplicate symbol + +int base_symbol = 0; +#define alias_one base_symbol +#define alias_two base_symbol + +// function pointer variables + +typedef int (*intFunc) (); + +int +bridge_int_func(intFunc f) +{ + return f(); +} + +int fortytwo() +{ + return 42; +} + +// issue 1222 +typedef union { + long align; +} xxpthread_mutex_t; +struct ibv_async_event { + union { + int x; + } element; +}; +struct ibv_context { + xxpthread_mutex_t mutex; +}; + +// issue 1635 +// Mac OS X's gcc will generate scattered relocation 2/1 for +// this function on Darwin/386, and 8l couldn't handle it. +// this example is in issue 1635 +void scatter() { + void *p = scatter; + printf("scatter = %p\n", p); +} + +// Adding this explicit extern declaration makes this a test for +// https://gcc.gnu.org/PR68072 aka https://golang.org/issue/13344 . +// It used to cause a cgo error when building with GCC 6. +extern int hola; + +// this example is in issue 3253 +int hola = 0; +int testHola() { return hola; } + +// issue 3250 +#ifdef WIN32 +void testSendSIG() {} +#else +static void *thread(void *p) { + const int M = 100; + int i; + (void)p; + for (i = 0; i < M; i++) { + pthread_kill(pthread_self(), SIGCHLD); + usleep(rand() % 20 + 5); + } + return NULL; +} +void testSendSIG() { + const int N = 20; + int i; + pthread_t tid[N]; + for (i = 0; i < N; i++) { + usleep(rand() % 200 + 100); + pthread_create(&tid[i], 0, thread, NULL); + } + for (i = 0; i < N; i++) + pthread_join(tid[i], 0); +} +#endif + +// issue 3261 +// libgcc on ARM might be compiled as thumb code, but our 5l +// can't handle that, so we have to disable this test on arm. +#ifdef __ARMEL__ +int vabs(int x) { + puts("testLibgcc is disabled on ARM because 5l cannot handle thumb library."); + return (x < 0) ? -x : x; +} +#elif defined(__arm64__) && defined(__clang__) +int vabs(int x) { + puts("testLibgcc is disabled on ARM64 with clang due to lack of libgcc."); + return (x < 0) ? -x : x; +} +#else +int __absvsi2(int); // dummy prototype for libgcc function +// we shouldn't name the function abs, as gcc might use +// the builtin one. +int vabs(int x) { return __absvsi2(x); } +#endif + + +// issue 3729 +// access errno from void C function +const char _expA = 0x42; +const float _expB = 3.14159; +const short _expC = 0x55aa; +const int _expD = 0xdeadbeef; + +#ifdef WIN32 +void g(void) {} +void g2(int x, char a, float b, short c, int d) {} +#else + +void g(void) { + errno = E2BIG; +} + +// try to pass some non-trivial arguments to function g2 +void g2(int x, char a, float b, short c, int d) { + if (a == _expA && b == _expB && c == _expC && d == _expD) + errno = x; + else + errno = -1; +} +#endif + +// issue 3945 +// Test that cgo reserves enough stack space during cgo call. +// See https://golang.org/issue/3945 for details. +void say() { + printf("%s from C\n", "hello"); +} + +// issue 4054 part 1 - other half in testx.go + +typedef enum { + A = 0, + B, + C, + D, + E, + F, + G, + H, + II, + J, +} issue4054a; + +// issue 4339 +// We've historically permitted #include <>, so test it here. Issue 29333. +// Also see issue 41059. +#include + +// issue 4417 +// cmd/cgo: bool alignment/padding issue. +// bool alignment is wrong and causing wrong arguments when calling functions. +static int c_bool(bool a, bool b, int c, bool d, bool e) { + return c; +} + +// issue 4857 +#cgo CFLAGS: -Werror +const struct { int a; } *issue4857() { return (void *)0; } + +// issue 5224 +// Test that the #cgo CFLAGS directive works, +// with and without platform filters. +#cgo CFLAGS: -DCOMMON_VALUE=123 +#cgo windows CFLAGS: -DIS_WINDOWS=1 +#cgo !windows CFLAGS: -DIS_WINDOWS=0 +int common = COMMON_VALUE; +int is_windows = IS_WINDOWS; + +// issue 5227 +// linker incorrectly treats common symbols and +// leaves them undefined. + +typedef struct { + int Count; +} Fontinfo; + +Fontinfo SansTypeface; + +extern void init(); + +Fontinfo loadfont() { + Fontinfo f = {0}; + return f; +} + +void init() { + SansTypeface = loadfont(); +} + +// issue 5242 +// Cgo incorrectly computed the alignment of structs +// with no Go accessible fields as 0, and then panicked on +// modulo-by-zero computations. + +// issue 50987 +// disable arm64 GCC warnings +#cgo CFLAGS: -Wno-psabi -Wno-unknown-warning-option + +typedef struct { +} foo; + +typedef struct { + int x : 1; +} bar; + +int issue5242(foo f, bar b) { + return 5242; +} + +// issue 5337 +// Verify that we can withstand SIGPROF received on foreign threads + +#ifdef WIN32 +void test5337() {} +#else +static void *thread1(void *p) { + (void)p; + pthread_kill(pthread_self(), SIGPROF); + return NULL; +} +void test5337() { + pthread_t tid; + pthread_create(&tid, 0, thread1, NULL); + pthread_join(tid, 0); +} +#endif + +// issue 5603 + +const long long issue5603exp = 0x12345678; +long long issue5603foo0() { return issue5603exp; } +long long issue5603foo1(void *p) { return issue5603exp; } +long long issue5603foo2(void *p, void *q) { return issue5603exp; } +long long issue5603foo3(void *p, void *q, void *r) { return issue5603exp; } +long long issue5603foo4(void *p, void *q, void *r, void *s) { return issue5603exp; } + +// issue 5740 + +int test5740a(void), test5740b(void); + +// issue 5986 +static void output5986() +{ + int current_row = 0, row_count = 0; + double sum_squares = 0; + double d; + do { + if (current_row == 10) { + current_row = 0; + } + ++row_count; + } + while (current_row++ != 1); + d = sqrt(sum_squares / row_count); + printf("sqrt is: %g\n", d); +} + +// issue 6128 +// Test handling of #defined names in clang. +// NOTE: Must use hex, or else a shortcut for decimals +// in cgo avoids trying to pass this to clang. +#define X 0x1 + +// issue 6472 +typedef struct +{ + struct + { + int x; + } y[16]; +} z; + +// issue 6612 +// Test new scheme for deciding whether C.name is an expression, type, constant. +// Clang silences some warnings when the name is a #defined macro, so test those too +// (even though we now use errors exclusively, not warnings). + +void myfunc(void) {} +int myvar = 5; +const char *mytext = "abcdef"; +typedef int mytype; +enum { + myenum = 1234, +}; + +#define myfunc_def myfunc +#define myvar_def myvar +#define mytext_def mytext +#define mytype_def mytype +#define myenum_def myenum +#define myint_def 12345 +#define myfloat_def 1.5 +#define mystring_def "hello" + +// issue 6907 +char* Issue6907CopyString(_GoString_ s) { + size_t n; + const char *p; + char *r; + + n = _GoStringLen(s); + p = _GoStringPtr(s); + r = malloc(n + 1); + memmove(r, p, n); + r[n] = '\0'; + return r; +} + +// issue 7560 +typedef struct { + char x; + long y; +} __attribute__((__packed__)) misaligned; + +int +offset7560(void) +{ + return (uintptr_t)&((misaligned*)0)->y; +} + +// issue 7786 +// No runtime test, just make sure that typedef and struct/union/class are interchangeable at compile time. + +struct test7786; +typedef struct test7786 typedef_test7786; +void f7786(struct test7786 *ctx) {} +void g7786(typedef_test7786 *ctx) {} + +typedef struct body7786 typedef_body7786; +struct body7786 { int x; }; +void b7786(struct body7786 *ctx) {} +void c7786(typedef_body7786 *ctx) {} + +typedef union union7786 typedef_union7786; +void u7786(union union7786 *ctx) {} +void v7786(typedef_union7786 *ctx) {} + +// issue 8092 +// Test that linker defined symbols (e.g., text, data) don't +// conflict with C symbols. +char text[] = "text"; +char data[] = "data"; +char *ctext(void) { return text; } +char *cdata(void) { return data; } + +// issue 8428 +// Cgo inconsistently translated zero size arrays. + +struct issue8428one { + char b; + char rest[]; +}; + +struct issue8428two { + void *p; + char b; + char rest[0]; + char pad; +}; + +struct issue8428three { + char w[1][2][3][0]; + char x[2][3][0][1]; + char y[3][0][1][2]; + char z[0][1][2][3]; +}; + +// issue 8331 part 1 - part 2 in testx.go +// A typedef of an unnamed struct is the same struct when +// #include'd twice. No runtime test; just make sure it compiles. +#include "issue8331.h" + +// issue 8368 and 8441 +// Recursive struct definitions didn't work. +// No runtime test; just make sure it compiles. +typedef struct one one; +typedef struct two two; +struct one { + two *x; +}; +struct two { + one *x; +}; + +// issue 8811 + +extern int issue8811Initialized; +extern void issue8811Init(); + +void issue8811Execute() { + if(!issue8811Initialized) + issue8811Init(); +} + +// issue 8945 + +typedef void (*PFunc8945)(); +PFunc8945 func8945; + +// issue 9557 + +struct issue9557_t { + int a; +} test9557bar = { 42 }; +struct issue9557_t *issue9557foo = &test9557bar; + +// issue 10303 +// Pointers passed to C were not marked as escaping (bug in cgo). + +typedef int *intptr; + +void setintstar(int *x) { + *x = 1; +} + +void setintptr(intptr x) { + *x = 1; +} + +void setvoidptr(void *x) { + *(int*)x = 1; +} + +typedef struct Struct Struct; +struct Struct { + int *P; +}; + +void setstruct(Struct s) { + *s.P = 1; +} + +// issue 11925 +// Structs with zero-length trailing fields are now padded by the Go compiler. + +struct a11925 { + int i; + char a[0]; + char b[0]; +}; + +struct b11925 { + int i; + char a[0]; + char b[]; +}; + +// issue 12030 +void issue12030conv(char *buf, double x) { + sprintf(buf, "d=%g", x); +} + +// issue 14838 + +int check_cbytes(char *b, size_t l) { + int i; + for (i = 0; i < l; i++) { + if (b[i] != i) { + return 0; + } + } + return 1; +} + +// issue 17065 +// Test that C symbols larger than a page play nicely with the race detector. +int ii[65537]; + +// issue 17537 +// The void* cast introduced by cgo to avoid problems +// with const/volatile qualifiers breaks C preprocessor macros that +// emulate functions. + +typedef struct { + int i; +} S17537; + +int I17537(S17537 *p); + +#define I17537(p) ((p)->i) + +// Calling this function used to fail without the cast. +const int F17537(const char **p) { + return **p; +} + +// issue 17723 +// API compatibility checks + +typedef char *cstring_pointer; +static void cstring_pointer_fun(cstring_pointer dummy) { } +const char *api_hello = "hello!"; + +// Calling this function used to trigger an error from the C compiler +// (issue 18298). +void F18298(const void *const *p) { +} + +// Test that conversions between typedefs work as they used to. +typedef const void *T18298_1; +struct S18298 { int i; }; +typedef const struct S18298 *T18298_2; +void G18298(T18298_1 t) { +} + +// issue 18126 +// cgo check of void function returning errno. +void Issue18126C(void **p) {} + +// issue 18720 + +#define HELLO "hello" +#define WORLD "world" +#define HELLO_WORLD HELLO "\000" WORLD + +struct foo { char c; }; +#define SIZE_OF(x) sizeof(x) +#define SIZE_OF_FOO SIZE_OF(struct foo) +#define VAR1 VAR +#define VAR var +int var = 5; + +#define ADDR &var + +#define CALL fn() +int fn(void) { + return ++var; +} + +// issue 20129 + +int issue20129 = 0; +typedef void issue20129Void; +issue20129Void issue20129Foo() { + issue20129 = 1; +} +typedef issue20129Void issue20129Void2; +issue20129Void2 issue20129Bar() { + issue20129 = 2; +} + +// issue 20369 +#define XUINT64_MAX 18446744073709551615ULL + +// issue 21668 +// Fail to guess the kind of the constant "x". +// No runtime test; just make sure it compiles. +const int x21668 = 42; + +// issue 21708 +#define CAST_TO_INT64 (int64_t)(-1) + +// issue 21809 +// Compile C `typedef` to go type aliases. + +typedef long MySigned_t; +// tests alias-to-alias +typedef MySigned_t MySigned2_t; +long takes_long(long x) { return x * x; } +MySigned_t takes_typedef(MySigned_t x) { return x * x; } + +// issue 22906 + +// It's going to be hard to include a whole real JVM to test this. +// So we'll simulate a really easy JVM using just the parts we need. +// This is the relevant part of jni.h. + +struct _jobject; + +typedef struct _jobject *jobject; +typedef jobject jclass; +typedef jobject jthrowable; +typedef jobject jstring; +typedef jobject jarray; +typedef jarray jbooleanArray; +typedef jarray jbyteArray; +typedef jarray jcharArray; +typedef jarray jshortArray; +typedef jarray jintArray; +typedef jarray jlongArray; +typedef jarray jfloatArray; +typedef jarray jdoubleArray; +typedef jarray jobjectArray; + +typedef jobject jweak; + +// Note: jvalue is already a non-pointer type due to it being a C union. + +// issue 22958 + +typedef struct { + unsigned long long f8 : 8; + unsigned long long f16 : 16; + unsigned long long f24 : 24; + unsigned long long f32 : 32; + unsigned long long f40 : 40; + unsigned long long f48 : 48; + unsigned long long f56 : 56; + unsigned long long f64 : 64; +} issue22958Type; + +// issue 23356 +int a(void) { return 5; }; +int r(void) { return 3; }; + +// issue 23720 +typedef int *issue23720A; +typedef const int *issue23720B; +void issue23720F(issue23720B a) {} + +// issue 24206 +#if defined(__linux__) && defined(__x86_64__) +#include +// Returns string with null byte at the last valid address +char* dangerousString1() { + int pageSize = 4096; + char *data = mmap(0, 2 * pageSize, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, 0, 0); + mprotect(data + pageSize,pageSize,PROT_NONE); + int start = pageSize - 123 - 1; // last 123 bytes of first page + 1 null byte + int i = start; + for (; i < pageSize; i++) { + data[i] = 'x'; + } + data[pageSize -1 ] = 0; + return data+start; +} + +char* dangerousString2() { + int pageSize = 4096; + char *data = mmap(0, 3 * pageSize, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, 0, 0); + mprotect(data + 2 * pageSize,pageSize,PROT_NONE); + int start = pageSize - 123 - 1; // last 123 bytes of first page + 1 null byte + int i = start; + for (; i < 2 * pageSize; i++) { + data[i] = 'x'; + } + data[2*pageSize -1 ] = 0; + return data+start; +} +#else +char *dangerousString1() { return NULL; } +char *dangerousString2() { return NULL; } +#endif + +// issue 26066 +const unsigned long long int issue26066 = (const unsigned long long) -1; + +// issue 26517 +// Introduce two pointer types which are distinct, but have the same +// base type. Make sure that both of those pointer types get resolved +// correctly. Before the fix for 26517 if one of these pointer types +// was resolved before the other one was processed, the second one +// would never be resolved. +// Before this issue was fixed this test failed on Windows, +// where va_list expands to a named char* type. +typedef va_list TypeOne; +typedef char *TypeTwo; + +// issue 28540 + +static void twoargs1(void *p, int n) {} +static void *twoargs2() { return 0; } +static int twoargs3(void * p) { return 0; } + +// issue 28545 +// Failed to add type conversion for negative constant. + +static void issue28545F(char **p, int n, complex double a) {} + +// issue 28772 part 1 - part 2 in testx.go +// Failed to add type conversion for Go constant set to C constant. +// No runtime test; just make sure it compiles. + +#define issue28772Constant 1 + +// issue 28896 +// cgo was incorrectly adding padding after a packed struct. +typedef struct { + void *f1; + uint32_t f2; +} __attribute__((__packed__)) innerPacked; + +typedef struct { + innerPacked g1; + uint64_t g2; +} outerPacked; + +typedef struct { + void *f1; + uint32_t f2; +} innerUnpacked; + +typedef struct { + innerUnpacked g1; + uint64_t g2; +} outerUnpacked; + +size_t offset(int x) { + switch (x) { + case 0: + return offsetof(innerPacked, f2); + case 1: + return offsetof(outerPacked, g2); + case 2: + return offsetof(innerUnpacked, f2); + case 3: + return offsetof(outerUnpacked, g2); + default: + abort(); + } +} + +// issue 29748 + +typedef struct { char **p; } S29748; +static int f29748(S29748 *p) { return 0; } + +// issue 29781 +// Error with newline inserted into constant expression. +// Compilation test only, nothing to run. + +static void issue29781F(char **p, int n) {} +#define ISSUE29781C 0 + +// issue 31093 +static uint16_t issue31093F(uint16_t v) { return v; } + +// issue 32579 +typedef struct S32579 { unsigned char data[1]; } S32579; + +// issue 37033, cgo.Handle +extern void GoFunc37033(uintptr_t handle); +void cFunc37033(uintptr_t handle) { GoFunc37033(handle); } + +// issue 38649 +// Test that #define'd type aliases work. +#define netbsd_gid unsigned int + +// issue 40494 +// Inconsistent handling of tagged enum and union types. +enum Enum40494 { X_40494 }; +union Union40494 { int x; }; +void issue40494(enum Enum40494 e, union Union40494* up) {} + +// Issue 45451, bad handling of go:notinheap types. +typedef struct issue45451Undefined issue45451; + +// Issue 49633, example of cgo.Handle with void*. +extern void GoFunc49633(void*); +void cfunc49633(void *context) { GoFunc49633(context); } + +*/ +import "C" + +import ( + "context" + "fmt" + "math" + "math/rand" + "os" + "os/signal" + "reflect" + "runtime" + "runtime/cgo" + "sync" + "syscall" + "testing" + "time" + "unsafe" +) + +// alignment + +func testAlign(t *testing.T) { + var evt C.SDL_KeyboardEvent + C.makeEvent(&evt) + if C.same(&evt, evt.typ, evt.which, evt.state, evt.keysym.scancode, evt.keysym.sym, evt.keysym.mod, evt.keysym.unicode) == 0 { + t.Error("*** bad alignment") + C.cTest(&evt) + t.Errorf("Go: %#x %#x %#x %#x %#x %#x %#x\n", + evt.typ, evt.which, evt.state, evt.keysym.scancode, + evt.keysym.sym, evt.keysym.mod, evt.keysym.unicode) + t.Error(evt) + } +} + +// api + +const greeting = "hello, world" + +type testPair struct { + Name string + Got, Want interface{} +} + +var testPairs = []testPair{ + {"GoString", C.GoString(C.greeting), greeting}, + {"GoStringN", C.GoStringN(C.greeting, 5), greeting[:5]}, + {"GoBytes", C.GoBytes(unsafe.Pointer(C.greeting), 5), []byte(greeting[:5])}, +} + +func testHelpers(t *testing.T) { + for _, pair := range testPairs { + if !reflect.DeepEqual(pair.Got, pair.Want) { + t.Errorf("%s: got %#v, want %#v", pair.Name, pair.Got, pair.Want) + } + } +} + +// basic test cases + +const EINVAL = C.EINVAL /* test #define */ + +var KILO = C.KILO + +func uuidgen() { + var uuid C.cgo_uuid_t + C.uuid_generate(&uuid[0]) +} + +func Strtol(s string, base int) (int, error) { + p := C.CString(s) + n, err := C.strtol(p, nil, C.int(base)) + C.free(unsafe.Pointer(p)) + return int(n), err +} + +func Atol(s string) int { + p := C.CString(s) + n := C.atol(p) + C.free(unsafe.Pointer(p)) + return int(n) +} + +func testConst(t *testing.T) { + C.myConstFunc(nil, 0, nil) +} + +func testEnum(t *testing.T) { + if C.Enum1 != 1 || C.Enum2 != 2 { + t.Error("bad enum", C.Enum1, C.Enum2) + } +} + +func testNamedEnum(t *testing.T) { + e := new(C.enum_E) + + *e = C.Enum1 + if *e != 1 { + t.Error("bad enum", C.Enum1) + } + + *e = C.Enum2 + if *e != 2 { + t.Error("bad enum", C.Enum2) + } +} + +func testCastToEnum(t *testing.T) { + e := C.enum_E(C.Enum1) + if e != 1 { + t.Error("bad enum", C.Enum1) + } + + e = C.enum_E(C.Enum2) + if e != 2 { + t.Error("bad enum", C.Enum2) + } +} + +func testAtol(t *testing.T) { + l := Atol("123") + if l != 123 { + t.Error("Atol 123: ", l) + } +} + +func testErrno(t *testing.T) { + p := C.CString("no-such-file") + m := C.CString("r") + f, err := C.fopen(p, m) + C.free(unsafe.Pointer(p)) + C.free(unsafe.Pointer(m)) + if err == nil { + C.fclose(f) + t.Fatalf("C.fopen: should fail") + } + if err != syscall.ENOENT { + t.Fatalf("C.fopen: unexpected error: %v", err) + } +} + +func testMultipleAssign(t *testing.T) { + p := C.CString("234") + n, m := C.strtol(p, nil, 345), C.strtol(p, nil, 10) + if runtime.GOOS == "openbsd" { + // Bug in OpenBSD strtol(3) - base > 36 succeeds. + if (n != 0 && n != 239089) || m != 234 { + t.Fatal("Strtol x2: ", n, m) + } + } else if n != 0 || m != 234 { + t.Fatal("Strtol x2: ", n, m) + } + C.free(unsafe.Pointer(p)) +} + +var ( + cuint = (C.uint)(0) + culong C.ulong + cchar C.char +) + +type Context struct { + ctx *C.struct_ibv_context +} + +func benchCgoCall(b *testing.B) { + b.Run("add-int", func(b *testing.B) { + const x = C.int(2) + const y = C.int(3) + + for i := 0; i < b.N; i++ { + C.add(x, y) + } + }) + + b.Run("one-pointer", func(b *testing.B) { + var a0 C.VkDeviceCreateInfo + for i := 0; i < b.N; i++ { + C.handleComplexPointer(&a0) + } + }) + b.Run("string-pointer-escape", func(b *testing.B) { + for i := 0; i < b.N; i++ { + var s string + C.handleGoStringPointerEscape(unsafe.Pointer(&s)) + } + }) + b.Run("string-pointer-noescape", func(b *testing.B) { + for i := 0; i < b.N; i++ { + var s string + C.handleGoStringPointerNoescape(unsafe.Pointer(&s)) + } + }) + b.Run("eight-pointers", func(b *testing.B) { + var a0, a1, a2, a3, a4, a5, a6, a7 C.VkDeviceCreateInfo + for i := 0; i < b.N; i++ { + C.handleComplexPointer8(&a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7) + } + }) + b.Run("eight-pointers-nil", func(b *testing.B) { + var a0, a1, a2, a3, a4, a5, a6, a7 *C.VkDeviceCreateInfo + for i := 0; i < b.N; i++ { + C.handleComplexPointer8(a0, a1, a2, a3, a4, a5, a6, a7) + } + }) + b.Run("eight-pointers-array", func(b *testing.B) { + var a [8]C.VkDeviceCreateInfo + for i := 0; i < b.N; i++ { + C.handleComplexPointer8(&a[0], &a[1], &a[2], &a[3], &a[4], &a[5], &a[6], &a[7]) + } + }) + b.Run("eight-pointers-slice", func(b *testing.B) { + a := make([]C.VkDeviceCreateInfo, 8) + for i := 0; i < b.N; i++ { + C.handleComplexPointer8(&a[0], &a[1], &a[2], &a[3], &a[4], &a[5], &a[6], &a[7]) + } + }) +} + +// Benchmark measuring overhead from Go to C and back to Go (via a callback) +func benchCallback(b *testing.B) { + var x = false + for i := 0; i < b.N; i++ { + nestedCall(func() { x = true }) + } + if !x { + b.Fatal("nestedCall was not invoked") + } +} + +var sinkString string + +func benchGoString(b *testing.B) { + for i := 0; i < b.N; i++ { + sinkString = C.GoString(C.cstr) + } + const want = "abcefghijklmnopqrstuvwxyzABCEFGHIJKLMNOPQRSTUVWXYZ1234567890" + if sinkString != want { + b.Fatalf("%q != %q", sinkString, want) + } +} + +// Static (build-time) test that syntax traversal visits all operands of s[i:j:k]. +func sliceOperands(array [2000]int) { + _ = array[C.KILO:C.KILO:C.KILO] // no type error +} + +// set in cgo_thread_lock.go init +var testThreadLockFunc = func(*testing.T) {} + +// complex alignment + +func TestComplexAlign(t *testing.T) { + if C.cplxAlign.x != 3.14 { + t.Errorf("got %v, expected 3.14", C.cplxAlign.x) + } + if C.cplxAlign.y != 2.17 { + t.Errorf("got %v, expected 2.17", C.cplxAlign.y) + } +} + +// constants and pointer checking + +func testCheckConst(t *testing.T) { + // The test is that this compiles successfully. + p := C.malloc(C.size_t(unsafe.Sizeof(C.int(0)))) + defer C.free(p) + C.CheckConstFunc(&C.CheckConstStruct{(*C.int)(p)}, C.CheckConstVal) +} + +// duplicate symbol + +func duplicateSymbols() { + fmt.Printf("%v %v %v\n", C.base_symbol, C.alias_one, C.alias_two) +} + +// environment + +// This is really an os package test but here for convenience. +func testSetEnv(t *testing.T) { + if runtime.GOOS == "windows" { + // Go uses SetEnvironmentVariable on windows. However, + // C runtime takes a *copy* at process startup of the + // OS environment, and stores it in environ/envp. + // It is this copy that getenv/putenv manipulate. + t.Logf("skipping test") + return + } + const key = "CGO_OS_TEST_KEY" + const val = "CGO_OS_TEST_VALUE" + os.Setenv(key, val) + keyc := C.CString(key) + defer C.free(unsafe.Pointer(keyc)) + v := C.getenv(keyc) + if uintptr(unsafe.Pointer(v)) == 0 { + t.Fatal("getenv returned NULL") + } + vs := C.GoString(v) + if vs != val { + t.Fatalf("getenv() = %q; want %q", vs, val) + } +} + +// function pointer variables + +func callBridge(f C.intFunc) int { + return int(C.bridge_int_func(f)) +} + +func callCBridge(f C.intFunc) C.int { + return C.bridge_int_func(f) +} + +func testFpVar(t *testing.T) { + const expected = 42 + f := C.intFunc(C.fortytwo) + res1 := C.bridge_int_func(f) + if r1 := int(res1); r1 != expected { + t.Errorf("got %d, want %d", r1, expected) + } + res2 := callCBridge(f) + if r2 := int(res2); r2 != expected { + t.Errorf("got %d, want %d", r2, expected) + } + r3 := callBridge(f) + if r3 != expected { + t.Errorf("got %d, want %d", r3, expected) + } +} + +// issue 1222 +type AsyncEvent struct { + event C.struct_ibv_async_event +} + +// issue 1635 + +func test1635(t *testing.T) { + C.scatter() + if v := C.hola; v != 0 { + t.Fatalf("C.hola is %d, should be 0", v) + } + if v := C.testHola(); v != 0 { + t.Fatalf("C.testHola() is %d, should be 0", v) + } +} + +// issue 2470 + +func testUnsignedInt(t *testing.T) { + a := (int64)(C.UINT32VAL) + b := (int64)(0xc008427b) + if a != b { + t.Errorf("Incorrect unsigned int - got %x, want %x", a, b) + } +} + +// issue 3250 + +func test3250(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("not applicable on windows") + } + + t.Skip("skipped, see golang.org/issue/5885") + var ( + thres = 1 + sig = syscall_dot_SIGCHLD + ) + type result struct { + n int + sig os.Signal + } + var ( + sigCh = make(chan os.Signal, 10) + waitStart = make(chan struct{}) + waitDone = make(chan result) + ) + + signal.Notify(sigCh, sig) + + go func() { + n := 0 + alarm := time.After(time.Second * 3) + for { + select { + case <-waitStart: + waitStart = nil + case v := <-sigCh: + n++ + if v != sig || n > thres { + waitDone <- result{n, v} + return + } + case <-alarm: + waitDone <- result{n, sig} + return + } + } + }() + + waitStart <- struct{}{} + C.testSendSIG() + r := <-waitDone + if r.sig != sig { + t.Fatalf("received signal %v, but want %v", r.sig, sig) + } + t.Logf("got %d signals\n", r.n) + if r.n <= thres { + t.Fatalf("expected more than %d", thres) + } +} + +// issue 3261 + +func testLibgcc(t *testing.T) { + var table = []struct { + in, out C.int + }{ + {0, 0}, + {1, 1}, + {-42, 42}, + {1000300, 1000300}, + {1 - 1<<31, 1<<31 - 1}, + } + for _, v := range table { + if o := C.vabs(v.in); o != v.out { + t.Fatalf("abs(%d) got %d, should be %d", v.in, o, v.out) + return + } + } +} + +// issue 3729 + +func test3729(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("skipping on windows") + } + + _, e := C.g() + if e != syscall.E2BIG { + t.Errorf("got %q, expect %q", e, syscall.E2BIG) + } + _, e = C.g2(C.EINVAL, C._expA, C._expB, C._expC, C._expD) + if e != syscall.EINVAL { + t.Errorf("got %q, expect %q", e, syscall.EINVAL) + } +} + +// issue 3945 + +func testPrintf(t *testing.T) { + C.say() +} + +// issue 4054 + +var issue4054a = []int{C.A, C.B, C.C, C.D, C.E, C.F, C.G, C.H, C.I, C.J} + +// issue 4339 + +func test4339(t *testing.T) { + C.handle4339(&C.exported4339) +} + +// issue 4417 + +func testBoolAlign(t *testing.T) { + b := C.c_bool(true, true, 10, true, false) + if b != 10 { + t.Fatalf("found %d expected 10\n", b) + } + b = C.c_bool(true, true, 5, true, true) + if b != 5 { + t.Fatalf("found %d expected 5\n", b) + } + b = C.c_bool(true, true, 3, true, false) + if b != 3 { + t.Fatalf("found %d expected 3\n", b) + } + b = C.c_bool(false, false, 1, true, false) + if b != 1 { + t.Fatalf("found %d expected 1\n", b) + } + b = C.c_bool(false, true, 200, true, false) + if b != 200 { + t.Fatalf("found %d expected 200\n", b) + } +} + +// issue 4857 + +func test4857() { + _ = C.issue4857() +} + +// issue 5224 + +func testCflags(t *testing.T) { + is_windows := C.is_windows == 1 + if is_windows != (runtime.GOOS == "windows") { + t.Errorf("is_windows: %v, runtime.GOOS: %s", is_windows, runtime.GOOS) + } + if C.common != 123 { + t.Errorf("common: %v (expected 123)", C.common) + } +} + +// issue 5227 + +func test5227(t *testing.T) { + C.init() +} + +func selectfont() C.Fontinfo { + return C.SansTypeface +} + +// issue 5242 + +func test5242(t *testing.T) { + if got := C.issue5242(C.foo{}, C.bar{}); got != 5242 { + t.Errorf("got %v", got) + } +} + +func test5603(t *testing.T) { + var x [5]int64 + exp := int64(C.issue5603exp) + x[0] = int64(C.issue5603foo0()) + x[1] = int64(C.issue5603foo1(nil)) + x[2] = int64(C.issue5603foo2(nil, nil)) + x[3] = int64(C.issue5603foo3(nil, nil, nil)) + x[4] = int64(C.issue5603foo4(nil, nil, nil, nil)) + for i, v := range x { + if v != exp { + t.Errorf("issue5603foo%d() returns %v, expected %v", i, v, exp) + } + } +} + +// issue 5337 + +func test5337(t *testing.T) { + C.test5337() +} + +// issue 5740 + +func test5740(t *testing.T) { + if v := C.test5740a() + C.test5740b(); v != 5 { + t.Errorf("expected 5, got %v", v) + } +} + +// issue 5986 + +func test5986(t *testing.T) { + C.output5986() +} + +// issue 6128 + +func test6128() { + // nothing to run, just make sure this compiles. + _ = C.X +} + +// issue 6390 + +func test6390(t *testing.T) { + p1 := C.malloc(1024) + if p1 == nil { + t.Fatalf("C.malloc(1024) returned nil") + } + p2 := C.malloc(0) + if p2 == nil { + t.Fatalf("C.malloc(0) returned nil") + } + C.free(p1) + C.free(p2) +} + +func test6472() { + // nothing to run, just make sure this compiles + s := new(C.z) + println(s.y[0].x) +} + +// issue 6506 + +func test6506() { + // nothing to run, just make sure this compiles + var x C.size_t + + C.calloc(x, x) + C.malloc(x) + C.realloc(nil, x) + C.memcpy(nil, nil, x) + C.memcmp(nil, nil, x) + C.memmove(nil, nil, x) + C.strncpy(nil, nil, x) + C.strncmp(nil, nil, x) + C.strncat(nil, nil, x) + x = C.strxfrm(nil, nil, x) + C.memchr(nil, 0, x) + x = C.strcspn(nil, nil) + x = C.strspn(nil, nil) + C.memset(nil, 0, x) + x = C.strlen(nil) + _ = x +} + +// issue 6612 + +func testNaming(t *testing.T) { + C.myfunc() + C.myfunc_def() + if v := C.myvar; v != 5 { + t.Errorf("C.myvar = %d, want 5", v) + } + if v := C.myvar_def; v != 5 { + t.Errorf("C.myvar_def = %d, want 5", v) + } + if s := C.GoString(C.mytext); s != "abcdef" { + t.Errorf("C.mytext = %q, want %q", s, "abcdef") + } + if s := C.GoString(C.mytext_def); s != "abcdef" { + t.Errorf("C.mytext_def = %q, want %q", s, "abcdef") + } + if c := C.myenum; c != 1234 { + t.Errorf("C.myenum = %v, want 1234", c) + } + if c := C.myenum_def; c != 1234 { + t.Errorf("C.myenum_def = %v, want 1234", c) + } + { + const c = C.myenum + if c != 1234 { + t.Errorf("C.myenum as const = %v, want 1234", c) + } + } + { + const c = C.myenum_def + if c != 1234 { + t.Errorf("C.myenum as const = %v, want 1234", c) + } + } + if c := C.myint_def; c != 12345 { + t.Errorf("C.myint_def = %v, want 12345", c) + } + { + const c = C.myint_def + if c != 12345 { + t.Errorf("C.myint as const = %v, want 12345", c) + } + } + + if c := C.myfloat_def; c != 1.5 { + t.Errorf("C.myint_def = %v, want 1.5", c) + } + { + const c = C.myfloat_def + if c != 1.5 { + t.Errorf("C.myint as const = %v, want 1.5", c) + } + } + + if s := C.mystring_def; s != "hello" { + t.Errorf("C.mystring_def = %q, want %q", s, "hello") + } +} + +// issue 6907 + +func test6907(t *testing.T) { + want := "yarn" + if got := C.GoString(C.Issue6907CopyString(want)); got != want { + t.Errorf("C.GoString(C.Issue6907CopyString(%q)) == %q, want %q", want, got, want) + } +} + +// issue 7560 + +func test7560(t *testing.T) { + // some mingw don't implement __packed__ correctly. + if C.offset7560() != 1 { + t.Skip("C compiler did not pack struct") + } + + // C.misaligned should have x but then a padding field to get to the end of the struct. + // There should not be a field named 'y'. + var v C.misaligned + rt := reflect.TypeOf(&v).Elem() + if rt.NumField() != 2 || rt.Field(0).Name != "x" || rt.Field(1).Name != "_" { + t.Errorf("unexpected fields in C.misaligned:\n") + for i := 0; i < rt.NumField(); i++ { + t.Logf("%+v\n", rt.Field(i)) + } + } +} + +// issue 7786 + +func f() { + var x1 *C.typedef_test7786 + var x2 *C.struct_test7786 + x1 = x2 + x2 = x1 + C.f7786(x1) + C.f7786(x2) + C.g7786(x1) + C.g7786(x2) + + var b1 *C.typedef_body7786 + var b2 *C.struct_body7786 + b1 = b2 + b2 = b1 + C.b7786(b1) + C.b7786(b2) + C.c7786(b1) + C.c7786(b2) + + var u1 *C.typedef_union7786 + var u2 *C.union_union7786 + u1 = u2 + u2 = u1 + C.u7786(u1) + C.u7786(u2) + C.v7786(u1) + C.v7786(u2) +} + +// issue 8092 + +func test8092(t *testing.T) { + tests := []struct { + s string + a, b *C.char + }{ + {"text", &C.text[0], C.ctext()}, + {"data", &C.data[0], C.cdata()}, + } + for _, test := range tests { + if test.a != test.b { + t.Errorf("%s: pointer mismatch: %v != %v", test.s, test.a, test.b) + } + if got := C.GoString(test.a); got != test.s { + t.Errorf("%s: points at %#v, want %#v", test.s, got, test.s) + } + } +} + +// issues 8368 and 8441 + +func issue8368(one *C.struct_one, two *C.struct_two) { +} + +func issue8441(one *C.one, two *C.two) { + issue8441(two.x, one.x) +} + +// issue 8428 + +var _ = C.struct_issue8428one{ + b: C.char(0), + // The trailing rest field is not available in cgo. + // See issue 11925. + // rest: [0]C.char{}, +} + +var _ = C.struct_issue8428two{ + p: unsafe.Pointer(nil), + b: C.char(0), + rest: [0]C.char{}, +} + +var _ = C.struct_issue8428three{ + w: [1][2][3][0]C.char{}, + x: [2][3][0][1]C.char{}, + y: [3][0][1][2]C.char{}, + z: [0][1][2][3]C.char{}, +} + +// issue 8811 + +func test8811(t *testing.T) { + C.issue8811Execute() +} + +// issue 9557 + +func test9557(t *testing.T) { + // implicitly dereference a Go variable + foo := C.issue9557foo + if v := foo.a; v != 42 { + t.Fatalf("foo.a expected 42, but got %d", v) + } + + // explicitly dereference a C variable + if v := (*C.issue9557foo).a; v != 42 { + t.Fatalf("(*C.issue9557foo).a expected 42, but is %d", v) + } + + // implicitly dereference a C variable + if v := C.issue9557foo.a; v != 42 { + t.Fatalf("C.issue9557foo.a expected 42, but is %d", v) + } +} + +// issue 8331 part 1 + +func issue8331a() C.issue8331 { + return issue8331Var +} + +// issue 10303 + +func test10303(t *testing.T, n int) { + if runtime.Compiler == "gccgo" { + t.Skip("gccgo permits C pointers on the stack") + } + + // Run at a few different stack depths just to avoid an unlucky pass + // due to variables ending up on different pages. + if n > 0 { + test10303(t, n-1) + } + if t.Failed() { + return + } + var x, y, z, v, si C.int + var s C.Struct + C.setintstar(&x) + C.setintptr(&y) + C.setvoidptr(unsafe.Pointer(&v)) + s.P = &si + C.setstruct(s) + + if uintptr(unsafe.Pointer(&x))&^0xfff == uintptr(unsafe.Pointer(&z))&^0xfff { + t.Error("C int* argument on stack") + } + if uintptr(unsafe.Pointer(&y))&^0xfff == uintptr(unsafe.Pointer(&z))&^0xfff { + t.Error("C intptr argument on stack") + } + if uintptr(unsafe.Pointer(&v))&^0xfff == uintptr(unsafe.Pointer(&z))&^0xfff { + t.Error("C void* argument on stack") + } + if uintptr(unsafe.Pointer(&si))&^0xfff == uintptr(unsafe.Pointer(&z))&^0xfff { + t.Error("C struct field pointer on stack") + } +} + +// issue 11925 + +func test11925(t *testing.T) { + if C.sizeof_struct_a11925 != unsafe.Sizeof(C.struct_a11925{}) { + t.Errorf("size of a changed: C %d, Go %d", C.sizeof_struct_a11925, unsafe.Sizeof(C.struct_a11925{})) + } + if C.sizeof_struct_b11925 != unsafe.Sizeof(C.struct_b11925{}) { + t.Errorf("size of b changed: C %d, Go %d", C.sizeof_struct_b11925, unsafe.Sizeof(C.struct_b11925{})) + } +} + +// issue 12030 + +func test12030(t *testing.T) { + buf := (*C.char)(C.malloc(256)) + defer C.free(unsafe.Pointer(buf)) + for _, f := range []float64{1.0, 2.0, 3.14} { + C.issue12030conv(buf, C.double(f)) + got := C.GoString(buf) + if want := fmt.Sprintf("d=%g", f); got != want { + t.Fatalf("C.sprintf failed for %g: %q != %q", f, got, want) + } + } +} + +// issue 13402 + +var _ C.complexfloat +var _ C.complexdouble + +// issue 13930 +// Test that cgo's multiple-value special form for +// C function calls works in variable declaration statements. + +var _, _ = C.abs(0) + +// issue 14838 + +func test14838(t *testing.T) { + data := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} + cData := C.CBytes(data) + defer C.free(cData) + + if C.check_cbytes((*C.char)(cData), C.size_t(len(data))) == 0 { + t.Fatalf("mismatched data: expected %v, got %v", data, (*(*[10]byte)(unsafe.Pointer(cData)))[:]) + } +} + +// issue 17065 + +var sink C.int + +func test17065(t *testing.T) { + if runtime.GOOS == "darwin" || runtime.GOOS == "ios" { + t.Skip("broken on darwin; issue 17065") + } + for i := range C.ii { + sink = C.ii[i] + } +} + +// issue 17537 + +func test17537(t *testing.T) { + v := C.S17537{i: 17537} + if got, want := C.I17537(&v), C.int(17537); got != want { + t.Errorf("got %d, want %d", got, want) + } + + p := (*C.char)(C.malloc(1)) + *p = 17 + if got, want := C.F17537(&p), C.int(17); got != want { + t.Errorf("got %d, want %d", got, want) + } + + C.F18298(nil) + var v18298 C.T18298_2 + C.G18298(C.T18298_1(v18298)) +} + +// issue 17723 + +func testAPI() { + var cs *C.char + cs = C.CString("hello") + defer C.free(unsafe.Pointer(cs)) + var s string + s = C.GoString((*C.char)(C.api_hello)) + s = C.GoStringN((*C.char)(C.api_hello), C.int(6)) + var b []byte + b = C.GoBytes(unsafe.Pointer(C.api_hello), C.int(6)) + _, _ = s, b + C.cstring_pointer_fun(nil) +} + +// issue 18126 + +func test18126(t *testing.T) { + p := C.malloc(1) + _, err := C.Issue18126C(&p) + C.free(p) + _ = err +} + +// issue 18720 + +func test18720(t *testing.T) { + if got, want := C.HELLO_WORLD, "hello\000world"; got != want { + t.Errorf("C.HELLO_WORLD == %q, expected %q", got, want) + } + + if got, want := C.VAR1, C.int(5); got != want { + t.Errorf("C.VAR1 == %v, expected %v", got, want) + } + + if got, want := *C.ADDR, C.int(5); got != want { + t.Errorf("*C.ADDR == %v, expected %v", got, want) + } + + if got, want := C.CALL, C.int(6); got != want { + t.Errorf("C.CALL == %v, expected %v", got, want) + } + + if got, want := C.CALL, C.int(7); got != want { + t.Errorf("C.CALL == %v, expected %v", got, want) + } + + // Issue 20125. + if got, want := C.SIZE_OF_FOO, 1; got != want { + t.Errorf("C.SIZE_OF_FOO == %v, expected %v", got, want) + } +} + +// issue 20129 + +func test20129(t *testing.T) { + if C.issue20129 != 0 { + t.Fatal("test is broken") + } + C.issue20129Foo() + if C.issue20129 != 1 { + t.Errorf("got %v but expected %v", C.issue20129, 1) + } + C.issue20129Bar() + if C.issue20129 != 2 { + t.Errorf("got %v but expected %v", C.issue20129, 2) + } +} + +// issue 20369 + +func test20369(t *testing.T) { + if C.XUINT64_MAX != math.MaxUint64 { + t.Fatalf("got %v, want %v", uint64(C.XUINT64_MAX), uint64(math.MaxUint64)) + } +} + +// issue 21668 + +var issue21668_X = C.x21668 + +// issue 21708 + +func test21708(t *testing.T) { + if got, want := C.CAST_TO_INT64, -1; got != want { + t.Errorf("C.CAST_TO_INT64 == %v, expected %v", got, want) + } +} + +// issue 21809 + +func test21809(t *testing.T) { + longVar := C.long(3) + typedefVar := C.MySigned_t(4) + typedefTypedefVar := C.MySigned2_t(5) + + // all three should be considered identical to `long` + if ret := C.takes_long(longVar); ret != 9 { + t.Errorf("got %v but expected %v", ret, 9) + } + if ret := C.takes_long(typedefVar); ret != 16 { + t.Errorf("got %v but expected %v", ret, 16) + } + if ret := C.takes_long(typedefTypedefVar); ret != 25 { + t.Errorf("got %v but expected %v", ret, 25) + } + + // They should also be identical to the typedef'd type + if ret := C.takes_typedef(longVar); ret != 9 { + t.Errorf("got %v but expected %v", ret, 9) + } + if ret := C.takes_typedef(typedefVar); ret != 16 { + t.Errorf("got %v but expected %v", ret, 16) + } + if ret := C.takes_typedef(typedefTypedefVar); ret != 25 { + t.Errorf("got %v but expected %v", ret, 25) + } +} + +// issue 22906 + +func test22906(t *testing.T) { + var x1 C.jobject = 0 // Note: 0, not nil. That makes sure we use uintptr for these types. + _ = x1 + var x2 C.jclass = 0 + _ = x2 + var x3 C.jthrowable = 0 + _ = x3 + var x4 C.jstring = 0 + _ = x4 + var x5 C.jarray = 0 + _ = x5 + var x6 C.jbooleanArray = 0 + _ = x6 + var x7 C.jbyteArray = 0 + _ = x7 + var x8 C.jcharArray = 0 + _ = x8 + var x9 C.jshortArray = 0 + _ = x9 + var x10 C.jintArray = 0 + _ = x10 + var x11 C.jlongArray = 0 + _ = x11 + var x12 C.jfloatArray = 0 + _ = x12 + var x13 C.jdoubleArray = 0 + _ = x13 + var x14 C.jobjectArray = 0 + _ = x14 + var x15 C.jweak = 0 + _ = x15 +} + +// issue 22958 +// Nothing to run, just make sure this compiles. +var Vissue22958 C.issue22958Type + +func test23356(t *testing.T) { + if got, want := C.a(), C.int(5); got != want { + t.Errorf("C.a() == %v, expected %v", got, want) + } + if got, want := C.r(), C.int(3); got != want { + t.Errorf("C.r() == %v, expected %v", got, want) + } +} + +// issue 23720 + +func Issue23720F() { + var x C.issue23720A + C.issue23720F(x) +} + +// issue 24206 + +func test24206(t *testing.T) { + if runtime.GOOS != "linux" || runtime.GOARCH != "amd64" { + t.Skipf("skipping on %s/%s", runtime.GOOS, runtime.GOARCH) + } + + if l := len(C.GoString(C.dangerousString1())); l != 123 { + t.Errorf("Incorrect string length - got %d, want 123", l) + } + if l := len(C.GoString(C.dangerousString2())); l != 4096+123 { + t.Errorf("Incorrect string length - got %d, want %d", l, 4096+123) + } +} + +// issue 25143 + +func issue25143sum(ns ...C.int) C.int { + total := C.int(0) + for _, n := range ns { + total += n + } + return total +} + +func test25143(t *testing.T) { + if got, want := issue25143sum(1, 2, 3), C.int(6); got != want { + t.Errorf("issue25143sum(1, 2, 3) == %v, expected %v", got, want) + } +} + +// issue 26066 +// Wrong type of constant with GCC 8 and newer. + +func test26066(t *testing.T) { + var i = int64(C.issue26066) + if i != -1 { + t.Errorf("got %d, want -1", i) + } +} + +// issue 26517 +var a C.TypeOne +var b C.TypeTwo + +// issue 27660 +// Stress the interaction between the race detector and cgo in an +// attempt to reproduce the memory corruption described in #27660. +// The bug was very timing sensitive; at the time of writing this +// test would only trigger the bug about once out of every five runs. + +func test27660(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ints := make([]int, 100) + locks := make([]sync.Mutex, 100) + // Slowly create threads so that ThreadSanitizer is forced to + // frequently resize its SyncClocks. + for i := 0; i < 100; i++ { + go func() { + for ctx.Err() == nil { + // Sleep in C for long enough that it is likely that the runtime + // will retake this goroutine's currently wired P. + C.usleep(1000 /* 1ms */) + runtime.Gosched() // avoid starvation (see #28701) + } + }() + go func() { + // Trigger lots of synchronization and memory reads/writes to + // increase the likelihood that the race described in #27660 + // results in corruption of ThreadSanitizer's internal state + // and thus an assertion failure or segfault. + i := 0 + for ctx.Err() == nil { + j := rand.Intn(100) + locks[j].Lock() + ints[j]++ + locks[j].Unlock() + // needed for gccgo, to avoid creation of an + // unpreemptible "fast path" in this loop. Choice + // of (1<<24) is somewhat arbitrary. + if i%(1<<24) == 0 { + runtime.Gosched() + } + i++ + + } + }() + time.Sleep(time.Millisecond) + } +} + +// issue 28540 + +func twoargsF() { + var v struct{ p *byte } + C.twoargs1(C.twoargs2(), C.twoargs3(unsafe.Pointer(&v))) +} + +// issue 28545 + +func issue28545G(p **C.char) { + C.issue28545F(p, -1, (0)) + C.issue28545F(p, 2+3, complex(1, 1)) + C.issue28545F(p, issue28772Constant, issue28772Constant2) +} + +// issue 28772 part 1 - part 2 in testx.go + +const issue28772Constant = C.issue28772Constant + +// issue 28896 + +func offset(i int) uintptr { + var pi C.innerPacked + var po C.outerPacked + var ui C.innerUnpacked + var uo C.outerUnpacked + switch i { + case 0: + return unsafe.Offsetof(pi.f2) + case 1: + return unsafe.Offsetof(po.g2) + case 2: + return unsafe.Offsetof(ui.f2) + case 3: + return unsafe.Offsetof(uo.g2) + default: + panic("can't happen") + } +} + +func test28896(t *testing.T) { + for i := 0; i < 4; i++ { + c := uintptr(C.offset(C.int(i))) + g := offset(i) + if c != g { + t.Errorf("%d: C: %d != Go %d", i, c, g) + } + } +} + +// issue 29383 +// cgo's /*line*/ comments failed when inserted after '/', +// because the result looked like a "//" comment. +// No runtime test; just make sure it compiles. + +func Issue29383(n, size uint) int { + if ^C.size_t(0)/C.size_t(n) < C.size_t(size) { + return 0 + } + return 0 +} + +// issue 29748 +// Error handling a struct initializer that requires pointer checking. +// Compilation test only, nothing to run. + +var Vissue29748 = C.f29748(&C.S29748{ + nil, +}) + +func Fissue299748() { + C.f29748(&C.S29748{ + nil, + }) +} + +// issue 29781 + +var issue29781X struct{ X int } + +func issue29781F(...int) int { return 0 } + +func issue29781G() { + var p *C.char + C.issue29781F(&p, C.ISSUE29781C+1) + C.issue29781F(nil, (C.int)( + 0)) + C.issue29781F(&p, (C.int)(0)) + C.issue29781F(&p, (C.int)( + 0)) + C.issue29781F(&p, (C.int)(issue29781X. + X)) +} + +// issue 30065 + +func test30065(t *testing.T) { + var a [256]byte + b := []byte("a") + C.memcpy(unsafe.Pointer(&a), unsafe.Pointer(&b[0]), 1) + if a[0] != 'a' { + t.Errorf("&a failed: got %c, want %c", a[0], 'a') + } + + b = []byte("b") + C.memcpy(unsafe.Pointer(&a[0]), unsafe.Pointer(&b[0]), 1) + if a[0] != 'b' { + t.Errorf("&a[0] failed: got %c, want %c", a[0], 'b') + } + + d := make([]byte, 256) + b = []byte("c") + C.memcpy(unsafe.Pointer(&d[0]), unsafe.Pointer(&b[0]), 1) + if d[0] != 'c' { + t.Errorf("&d[0] failed: got %c, want %c", d[0], 'c') + } +} + +// issue 31093 +// No runtime test; just make sure it compiles. + +func Issue31093() { + C.issue31093F(C.ushort(0)) +} + +// issue 32579 + +func test32579(t *testing.T) { + var s [1]C.struct_S32579 + C.memset(unsafe.Pointer(&s[0].data[0]), 1, 1) + if s[0].data[0] != 1 { + t.Errorf("&s[0].data[0] failed: got %d, want %d", s[0].data[0], 1) + } +} + +// issue 37033, check if cgo.Handle works properly + +func testHandle(t *testing.T) { + ch := make(chan int) + + for i := 0; i < 42; i++ { + h := cgo.NewHandle(ch) + go func() { + C.cFunc37033(C.uintptr_t(h)) + }() + if v := <-ch; issue37033 != v { + t.Fatalf("unexpected receiving value: got %d, want %d", v, issue37033) + } + h.Delete() + } +} + +// issue 38649 + +var issue38649 C.netbsd_gid = 42 + +// issue 39877 + +var issue39877 *C.void = nil + +// issue 40494 +// No runtime test; just make sure it compiles. + +func Issue40494() { + C.issue40494(C.enum_Enum40494(C.X_40494), (*C.union_Union40494)(nil)) +} + +// Issue 45451. +func test45451(t *testing.T) { + var u *C.issue45451 + typ := reflect.ValueOf(u).Type().Elem() + + // The type is undefined in C so allocating it should panic. + defer func() { + if r := recover(); r == nil { + t.Error("expected panic") + } + }() + + _ = reflect.New(typ) + t.Errorf("reflect.New(%v) should have panicked", typ) +} + +// issue 52542 + +func func52542[T ~[]C.int]() {} + +type type52542[T ~*C.float] struct{} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/test26213.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/test26213.go new file mode 100644 index 0000000000000000000000000000000000000000..04f8e840175dfa7258d5ec716b5130ab406ceb0e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/test26213.go @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cgo + +package cgotest + +import ( + "testing" + + "cmd/cgo/internal/test/issue26213" +) + +func test26213(t *testing.T) { + issue26213.Test26213(t) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/test_unix.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/test_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..664c4850d387231a8403ae4184c362198ce52a24 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/test_unix.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package cgotest + +import "syscall" + +var syscall_dot_SIGCHLD = syscall.SIGCHLD diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/test_windows.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/test_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..7bfb33a83c3787a319d9b2a448034a5ec89a3568 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/test_windows.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgotest + +import "syscall" + +var syscall_dot_SIGCHLD syscall.Signal diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/testx.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/testx.c new file mode 100644 index 0000000000000000000000000000000000000000..1258e326a41d1a8dda48d0462f66dd43c1b5f5a7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/testx.c @@ -0,0 +1,24 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "_cgo_export.h" + +void lockOSThreadC(void) { + lockOSThreadCallback(); +} + +void issue7978c(uint32_t *sync) { + while(__atomic_load_n(sync, __ATOMIC_SEQ_CST) != 0) + ; + __atomic_add_fetch(sync, 1, __ATOMIC_SEQ_CST); + while(__atomic_load_n(sync, __ATOMIC_SEQ_CST) != 2) + ; + issue7978cb(); + __atomic_add_fetch(sync, 1, __ATOMIC_SEQ_CST); + while(__atomic_load_n(sync, __ATOMIC_SEQ_CST) != 6) + ; +} + +void f7665(void) { +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/testx.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/testx.go new file mode 100644 index 0000000000000000000000000000000000000000..0e2a51a52280ba3a43f2e80e167f1445cf6f39e8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/testx.go @@ -0,0 +1,597 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test cases for cgo. +// Both the import "C" prologue and the main file are sorted by issue number. +// This file contains //export directives on Go functions +// and so it must NOT contain C definitions (only declarations). +// See test.go for C definitions. + +package cgotest + +import ( + "runtime" + "runtime/cgo" + "runtime/debug" + "strings" + "sync" + "sync/atomic" + "testing" + "unsafe" +) + +/* +// threads +extern void doAdd(int, int); +extern int callGoInCThread(int); + +// issue 1328 +void IntoC(void); + +// issue 1560 +extern void Issue1560InC(void); + +// twoSleep returns the absolute start time of the first sleep +// in ms. +long long twoSleep(int); + +// issue 3775 +void lockOSThreadC(void); +int usleep(unsigned usec); + +// issue 4054 part 2 - part 1 in test.go +typedef enum { + A = 0, + B, + C, + D, + E, + F, + G, + H, + II, + J, +} issue4054b; + +// issue 5548 + +extern int issue5548_in_c(void); + +// issue 6833 + +extern unsigned long long issue6833Func(unsigned int, unsigned long long); + +// issue 6907 + +extern int CheckIssue6907C(_GoString_); + +// issue 7665 + +extern void f7665(void); + +// issue 7978 +// Stack tracing didn't work during cgo code after calling a Go +// callback. Make sure GC works and the stack trace is correct. + +#include + +// use ugly atomic variable sync since that doesn't require calling back into +// Go code or OS dependencies +void issue7978c(uint32_t *sync); + +// issue 8331 part 2 - part 1 in test.go +// A typedef of an unnamed struct is the same struct when +// #include'd twice. No runtime test; just make sure it compiles. +#include "issue8331.h" + +// issue 8945 + +typedef void (*PFunc8945)(); +extern PFunc8945 func8945; // definition is in test.go + +// issue 20910 +void callMulti(void); + +// issue 28772 part 2 - part 1 in issuex.go +#define issue28772Constant2 2 + + +// issue 31891 +typedef struct { + long obj; +} Issue31891A; + +typedef struct { + long obj; +} Issue31891B; + +void callIssue31891(void); + +typedef struct { + int i; +} Issue38408, *PIssue38408; + +extern void cfunc49633(void*); // definition is in test.go +*/ +import "C" + +// exports + +//export ReturnIntLong +func ReturnIntLong() (int, C.long) { + return 1, 2 +} + +//export gc +func gc() { + runtime.GC() +} + +// threads + +var sum struct { + sync.Mutex + i int +} + +//export Add +func Add(x int) { + defer func() { + recover() + }() + sum.Lock() + sum.i += x + sum.Unlock() + var p *int + *p = 2 +} + +//export goDummy +func goDummy() { +} + +func testCthread(t *testing.T) { + if (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && runtime.GOARCH == "arm64" { + t.Skip("the iOS exec wrapper is unable to properly handle the panic from Add") + } + sum.i = 0 + C.doAdd(10, 6) + + want := 10 * (10 - 1) / 2 * 6 + if sum.i != want { + t.Fatalf("sum=%d, want %d", sum.i, want) + } +} + +// Benchmark measuring overhead from C to Go in a C thread. +// Create a new C thread and invoke Go function repeatedly in the new C thread. +func benchCGoInCthread(b *testing.B) { + n := C.callGoInCThread(C.int(b.N)) + if int(n) != b.N { + b.Fatal("unmatch loop times") + } +} + +// issue 1328 + +//export BackIntoGo +func BackIntoGo() { + x := 1 + + for i := 0; i < 10000; i++ { + xvariadic(x) + if x != 1 { + panic("x is not 1?") + } + } +} + +func xvariadic(x ...interface{}) { +} + +func test1328(t *testing.T) { + C.IntoC() +} + +// issue 1560 +// Test that C functions and Go functions run in parallel. + +var ( + issue1560 int32 + + issue1560Ch = make(chan bool, 2) +) + +//export Issue1560FromC +func Issue1560FromC() { + for atomic.LoadInt32(&issue1560) != 1 { + runtime.Gosched() + } + atomic.AddInt32(&issue1560, 1) + for atomic.LoadInt32(&issue1560) != 3 { + runtime.Gosched() + } + issue1560Ch <- true +} + +func Issue1560FromGo() { + atomic.AddInt32(&issue1560, 1) + for atomic.LoadInt32(&issue1560) != 2 { + runtime.Gosched() + } + atomic.AddInt32(&issue1560, 1) + issue1560Ch <- true +} + +func test1560(t *testing.T) { + go Issue1560FromGo() + go C.Issue1560InC() + <-issue1560Ch + <-issue1560Ch +} + +// issue 2462 + +//export exportbyte +func exportbyte() byte { + return 0 +} + +//export exportbool +func exportbool() bool { + return false +} + +//export exportrune +func exportrune() rune { + return 0 +} + +//export exporterror +func exporterror() error { + return nil +} + +//export exportint +func exportint() int { + return 0 +} + +//export exportuint +func exportuint() uint { + return 0 +} + +//export exportuintptr +func exportuintptr() uintptr { + return (uintptr)(0) +} + +//export exportint8 +func exportint8() int8 { + return 0 +} + +//export exportuint8 +func exportuint8() uint8 { + return 0 +} + +//export exportint16 +func exportint16() int16 { + return 0 +} + +//export exportuint16 +func exportuint16() uint16 { + return 0 +} + +//export exportint32 +func exportint32() int32 { + return 0 +} + +//export exportuint32 +func exportuint32() uint32 { + return 0 +} + +//export exportint64 +func exportint64() int64 { + return 0 +} + +//export exportuint64 +func exportuint64() uint64 { + return 0 +} + +//export exportfloat32 +func exportfloat32() float32 { + return 0 +} + +//export exportfloat64 +func exportfloat64() float64 { + return 0 +} + +//export exportcomplex64 +func exportcomplex64() complex64 { + return 0 +} + +//export exportcomplex128 +func exportcomplex128() complex128 { + return 0 +} + +// issue 3741 + +//export exportSliceIn +func exportSliceIn(s []byte) bool { + return len(s) == cap(s) +} + +//export exportSliceOut +func exportSliceOut() []byte { + return []byte{1} +} + +//export exportSliceInOut +func exportSliceInOut(s []byte) []byte { + return s +} + +// issue 3775 + +func init() { + if runtime.GOOS == "android" { + return + } + // Same as test3775 but run during init so that + // there are two levels of internal runtime lock + // (1 for init, 1 for cgo). + // This would have been broken by CL 11663043. + C.lockOSThreadC() +} + +func test3775(t *testing.T) { + if runtime.GOOS == "android" { + return + } + // Used to panic because of the UnlockOSThread below. + C.lockOSThreadC() +} + +//export lockOSThreadCallback +func lockOSThreadCallback() { + runtime.LockOSThread() + runtime.UnlockOSThread() + go C.usleep(10000) + runtime.Gosched() +} + +// issue 4054 part 2 - part 1 in test.go + +var issue4054b = []int{C.A, C.B, C.C, C.D, C.E, C.F, C.G, C.H, C.II, C.J} + +//export issue5548FromC +func issue5548FromC(s string, i int) int { + if len(s) == 4 && s == "test" && i == 42 { + return 12345 + } + println("got", len(s), i) + return 9876 +} + +func test5548(t *testing.T) { + if x := C.issue5548_in_c(); x != 12345 { + t.Errorf("issue5548_in_c = %d, want %d", x, 12345) + } +} + +// issue 6833 + +//export GoIssue6833Func +func GoIssue6833Func(aui uint, aui64 uint64) uint64 { + return aui64 + uint64(aui) +} + +func test6833(t *testing.T) { + ui := 7 + ull := uint64(0x4000300020001000) + v := uint64(C.issue6833Func(C.uint(ui), C.ulonglong(ull))) + exp := uint64(ui) + ull + if v != exp { + t.Errorf("issue6833Func() returns %x, expected %x", v, exp) + } +} + +// issue 6907 + +const CString = "C string" + +//export CheckIssue6907Go +func CheckIssue6907Go(s string) C.int { + if s == CString { + return 1 + } + return 0 +} + +func test6907Go(t *testing.T) { + if got := C.CheckIssue6907C(CString); got != 1 { + t.Errorf("C.CheckIssue6907C() == %d, want %d", got, 1) + } +} + +// issue 7665 + +var bad7665 unsafe.Pointer = C.f7665 +var good7665 uintptr = uintptr(C.f7665) + +func test7665(t *testing.T) { + if bad7665 == nil || uintptr(bad7665) != good7665 { + t.Errorf("ptrs = %p, %#x, want same non-nil pointer", bad7665, good7665) + } +} + +// issue 7978 + +var issue7978sync uint32 + +func issue7978check(t *testing.T, wantFunc string, badFunc string, depth int) { + runtime.GC() + buf := make([]byte, 65536) + trace := string(buf[:runtime.Stack(buf, true)]) + for _, goroutine := range strings.Split(trace, "\n\n") { + if strings.Contains(goroutine, "test.issue7978go") { + trace := strings.Split(goroutine, "\n") + // look for the expected function in the stack + for i := 0; i < depth; i++ { + if badFunc != "" && strings.Contains(trace[1+2*i], badFunc) { + t.Errorf("bad stack: found %s in the stack:\n%s", badFunc, goroutine) + return + } + if strings.Contains(trace[1+2*i], wantFunc) { + return + } + } + t.Errorf("bad stack: didn't find %s in the stack:\n%s", wantFunc, goroutine) + return + } + } + t.Errorf("bad stack: goroutine not found. Full stack dump:\n%s", trace) +} + +func issue7978wait(store uint32, wait uint32) { + if store != 0 { + atomic.StoreUint32(&issue7978sync, store) + } + for atomic.LoadUint32(&issue7978sync) != wait { + runtime.Gosched() + } +} + +//export issue7978cb +func issue7978cb() { + // Force a stack growth from the callback to put extra + // pressure on the runtime. See issue #17785. + growStack(64) + issue7978wait(3, 4) +} + +func growStack(n int) int { + var buf [128]int + if n == 0 { + return 0 + } + return buf[growStack(n-1)] +} + +func issue7978go() { + C.issue7978c((*C.uint32_t)(&issue7978sync)) + issue7978wait(7, 8) +} + +func test7978(t *testing.T) { + if runtime.Compiler == "gccgo" { + t.Skip("gccgo can not do stack traces of C code") + } + debug.SetTraceback("2") + issue7978sync = 0 + go issue7978go() + // test in c code, before callback + issue7978wait(0, 1) + issue7978check(t, "_Cfunc_issue7978c(", "", 1) + // test in go code, during callback + issue7978wait(2, 3) + issue7978check(t, "test.issue7978cb(", "test.issue7978go", 3) + // test in c code, after callback + issue7978wait(4, 5) + issue7978check(t, "_Cfunc_issue7978c(", "_cgoexpwrap", 1) + // test in go code, after return from cgo + issue7978wait(6, 7) + issue7978check(t, "test.issue7978go(", "", 3) + atomic.StoreUint32(&issue7978sync, 8) +} + +// issue 8331 part 2 + +var issue8331Var C.issue8331 + +// issue 8945 + +//export Test8945 +func Test8945() { + _ = C.func8945 +} + +// issue 20910 + +//export multi +func multi() (*C.char, C.int) { + return C.CString("multi"), 0 +} + +func test20910(t *testing.T) { + C.callMulti() +} + +// issue 28772 part 2 + +const issue28772Constant2 = C.issue28772Constant2 + +// issue 31891 + +//export useIssue31891A +func useIssue31891A(c *C.Issue31891A) {} + +//export useIssue31891B +func useIssue31891B(c *C.Issue31891B) {} + +func test31891(t *testing.T) { + C.callIssue31891() +} + +// issue 37033, check if cgo.Handle works properly + +var issue37033 = 42 + +//export GoFunc37033 +func GoFunc37033(handle C.uintptr_t) { + h := cgo.Handle(handle) + ch := h.Value().(chan int) + ch <- issue37033 +} + +// issue 38408 +// A typedef pointer can be used as the element type. +// No runtime test; just make sure it compiles. +var _ C.PIssue38408 = &C.Issue38408{i: 1} + +// issue 49633, example use of cgo.Handle with void* + +type data49633 struct { + msg string +} + +//export GoFunc49633 +func GoFunc49633(context unsafe.Pointer) { + h := *(*cgo.Handle)(context) + v := h.Value().(*data49633) + v.msg = "hello" +} + +func test49633(t *testing.T) { + v := &data49633{} + h := cgo.NewHandle(v) + defer h.Delete() + C.cfunc49633(unsafe.Pointer(&h)) + if v.msg != "hello" { + t.Errorf("msg = %q, want 'hello'", v.msg) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/typeparam.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/typeparam.go new file mode 100644 index 0000000000000000000000000000000000000000..5f766c2bcb93ab35acf093e8070d0193f664683d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/test/typeparam.go @@ -0,0 +1,17 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgotest + +// #include +import "C" + +func generic[T, U any](t T, u U) {} + +func useGeneric() { + const zero C.size_t = 0 + + generic(zero, zero) + generic[C.size_t, C.size_t](0, 0) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/carchive_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/carchive_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b140a9c61378a76cc0b28101a4a6add84c7845d8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/carchive_test.go @@ -0,0 +1,1399 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This test uses various syscall.SIG* constants that are defined on Unix +// platforms and Windows. + +//go:build unix || windows + +package carchive_test + +import ( + "bufio" + "bytes" + "cmd/cgo/internal/cgotest" + "debug/elf" + "flag" + "fmt" + "internal/testenv" + "io" + "log" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "syscall" + "testing" + "time" + "unicode" +) + +var globalSkip = func(t *testing.T) {} + +// Program to run. +var bin []string + +// C compiler with args (from $(go env CC) $(go env GOGCCFLAGS)). +var cc []string + +// ".exe" on Windows. +var exeSuffix string + +var GOOS, GOARCH, GOPATH string +var libgodir string + +var testWork bool // If true, preserve temporary directories. + +func TestMain(m *testing.M) { + flag.BoolVar(&testWork, "testwork", false, "if true, log and preserve the test's temporary working directory") + flag.Parse() + + log.SetFlags(log.Lshortfile) + os.Exit(testMain(m)) +} + +func testMain(m *testing.M) int { + if testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" { + globalSkip = func(t *testing.T) { t.Skip("short mode and $GO_BUILDER_NAME not set") } + return m.Run() + } + if runtime.GOOS == "linux" { + if _, err := os.Stat("/etc/alpine-release"); err == nil { + globalSkip = func(t *testing.T) { t.Skip("skipping failing test on alpine - go.dev/issue/19938") } + return m.Run() + } + } + + // We need a writable GOPATH in which to run the tests. + // Construct one in a temporary directory. + var err error + GOPATH, err = os.MkdirTemp("", "carchive_test") + if err != nil { + log.Panic(err) + } + if testWork { + log.Println(GOPATH) + } else { + defer os.RemoveAll(GOPATH) + } + os.Setenv("GOPATH", GOPATH) + + // Copy testdata into GOPATH/src/testarchive, along with a go.mod file + // declaring the same path. + modRoot := filepath.Join(GOPATH, "src", "testcarchive") + if err := cgotest.OverlayDir(modRoot, "testdata"); err != nil { + log.Panic(err) + } + if err := os.Chdir(modRoot); err != nil { + log.Panic(err) + } + os.Setenv("PWD", modRoot) + if err := os.WriteFile("go.mod", []byte("module testcarchive\n"), 0666); err != nil { + log.Panic(err) + } + + GOOS = goEnv("GOOS") + GOARCH = goEnv("GOARCH") + bin = cmdToRun("./testp") + + ccOut := goEnv("CC") + cc = []string{string(ccOut)} + + out := goEnv("GOGCCFLAGS") + quote := '\000' + start := 0 + lastSpace := true + backslash := false + s := string(out) + for i, c := range s { + if quote == '\000' && unicode.IsSpace(c) { + if !lastSpace { + cc = append(cc, s[start:i]) + lastSpace = true + } + } else { + if lastSpace { + start = i + lastSpace = false + } + if quote == '\000' && !backslash && (c == '"' || c == '\'') { + quote = c + backslash = false + } else if !backslash && quote == c { + quote = '\000' + } else if (quote == '\000' || quote == '"') && !backslash && c == '\\' { + backslash = true + } else { + backslash = false + } + } + } + if !lastSpace { + cc = append(cc, s[start:]) + } + + if GOOS == "aix" { + // -Wl,-bnoobjreorder is mandatory to keep the same layout + // in .text section. + cc = append(cc, "-Wl,-bnoobjreorder") + } + if GOOS == "ios" { + // Linking runtime/cgo on ios requires the CoreFoundation framework because + // x_cgo_init uses CoreFoundation APIs to switch directory to the app root. + // + // TODO(#58225): This special case probably should not be needed. + // runtime/cgo is a very low-level package, and should not provide + // high-level behaviors like changing the current working directory at init. + cc = append(cc, "-framework", "CoreFoundation") + } + libbase := GOOS + "_" + GOARCH + if runtime.Compiler == "gccgo" { + libbase = "gccgo_" + libgodir + "_fPIC" + } else { + switch GOOS { + case "darwin", "ios": + if GOARCH == "arm64" { + libbase += "_shared" + } + case "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris", "illumos": + libbase += "_shared" + } + } + libgodir = filepath.Join(GOPATH, "pkg", libbase, "testcarchive") + cc = append(cc, "-I", libgodir) + + // Force reallocation (and avoid aliasing bugs) for parallel tests that append to cc. + cc = cc[:len(cc):len(cc)] + + if GOOS == "windows" { + exeSuffix = ".exe" + } + + return m.Run() +} + +func goEnv(key string) string { + out, err := exec.Command("go", "env", key).Output() + if err != nil { + if ee, ok := err.(*exec.ExitError); ok { + fmt.Fprintf(os.Stderr, "%s", ee.Stderr) + } + log.Panicf("go env %s failed:\n%s\n", key, err) + } + return strings.TrimSpace(string(out)) +} + +func cmdToRun(name string) []string { + execScript := "go_" + goEnv("GOOS") + "_" + goEnv("GOARCH") + "_exec" + executor, err := exec.LookPath(execScript) + if err != nil { + return []string{name} + } + return []string{executor, name} +} + +// genHeader writes a C header file for the C-exported declarations found in .go +// source files in dir. +// +// TODO(golang.org/issue/35715): This should be simpler. +func genHeader(t *testing.T, header, dir string) { + t.Helper() + + // The 'cgo' command generates a number of additional artifacts, + // but we're only interested in the header. + // Shunt the rest of the outputs to a temporary directory. + objDir, err := os.MkdirTemp(GOPATH, "_obj") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(objDir) + + files, err := filepath.Glob(filepath.Join(dir, "*.go")) + if err != nil { + t.Fatal(err) + } + + cmd := exec.Command("go", "tool", "cgo", + "-objdir", objDir, + "-exportheader", header) + cmd.Args = append(cmd.Args, files...) + t.Log(cmd.Args) + if out, err := cmd.CombinedOutput(); err != nil { + t.Logf("%s", out) + t.Fatal(err) + } +} + +func testInstall(t *testing.T, exe, libgoa, libgoh string, buildcmd ...string) { + t.Helper() + cmd := exec.Command(buildcmd[0], buildcmd[1:]...) + cmd.Env = append(cmd.Environ(), "GO111MODULE=off") // 'go install' only works in GOPATH mode + t.Log(buildcmd) + if out, err := cmd.CombinedOutput(); err != nil { + t.Logf("%s", out) + t.Fatal(err) + } + if !testWork { + defer func() { + os.Remove(libgoa) + os.Remove(libgoh) + }() + } + + ccArgs := append(cc, "-o", exe, "main.c") + if GOOS == "windows" { + ccArgs = append(ccArgs, "main_windows.c", libgoa, "-lntdll", "-lws2_32", "-lwinmm") + } else { + ccArgs = append(ccArgs, "main_unix.c", libgoa) + } + if runtime.Compiler == "gccgo" { + ccArgs = append(ccArgs, "-lgo") + } + t.Log(ccArgs) + if out, err := exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput(); err != nil { + t.Logf("%s", out) + t.Fatal(err) + } + if !testWork { + defer os.Remove(exe) + } + + binArgs := append(cmdToRun(exe), "arg1", "arg2") + cmd = exec.Command(binArgs[0], binArgs[1:]...) + if runtime.Compiler == "gccgo" { + cmd.Env = append(cmd.Environ(), "GCCGO=1") + } + if out, err := cmd.CombinedOutput(); err != nil { + t.Logf("%s", out) + t.Fatal(err) + } + + checkLineComments(t, libgoh) +} + +var badLineRegexp = regexp.MustCompile(`(?m)^#line [0-9]+ "/.*$`) + +// checkLineComments checks that the export header generated by +// -buildmode=c-archive doesn't have any absolute paths in the #line +// comments. We don't want those paths because they are unhelpful for +// the user and make the files change based on details of the location +// of GOPATH. +func checkLineComments(t *testing.T, hdrname string) { + hdr, err := os.ReadFile(hdrname) + if err != nil { + if !os.IsNotExist(err) { + t.Error(err) + } + return + } + if line := badLineRegexp.Find(hdr); line != nil { + t.Errorf("bad #line directive with absolute path in %s: %q", hdrname, line) + } +} + +// checkArchive verifies that the created library looks OK. +// We just check a couple of things now, we can add more checks as needed. +func checkArchive(t *testing.T, arname string) { + t.Helper() + + switch GOOS { + case "aix", "darwin", "ios", "windows": + // We don't have any checks for non-ELF libraries yet. + if _, err := os.Stat(arname); err != nil { + t.Errorf("archive %s does not exist: %v", arname, err) + } + default: + checkELFArchive(t, arname) + } +} + +// checkELFArchive checks an ELF archive. +func checkELFArchive(t *testing.T, arname string) { + t.Helper() + + f, err := os.Open(arname) + if err != nil { + t.Errorf("archive %s does not exist: %v", arname, err) + return + } + defer f.Close() + + // TODO(iant): put these in a shared package? But where? + const ( + magic = "!\n" + fmag = "`\n" + + namelen = 16 + datelen = 12 + uidlen = 6 + gidlen = 6 + modelen = 8 + sizelen = 10 + fmaglen = 2 + hdrlen = namelen + datelen + uidlen + gidlen + modelen + sizelen + fmaglen + ) + + type arhdr struct { + name string + date string + uid string + gid string + mode string + size string + fmag string + } + + var magbuf [len(magic)]byte + if _, err := io.ReadFull(f, magbuf[:]); err != nil { + t.Errorf("%s: archive too short", arname) + return + } + if string(magbuf[:]) != magic { + t.Errorf("%s: incorrect archive magic string %q", arname, magbuf) + } + + off := int64(len(magic)) + for { + if off&1 != 0 { + var b [1]byte + if _, err := f.Read(b[:]); err != nil { + if err == io.EOF { + break + } + t.Errorf("%s: error skipping alignment byte at %d: %v", arname, off, err) + } + off++ + } + + var hdrbuf [hdrlen]byte + if _, err := io.ReadFull(f, hdrbuf[:]); err != nil { + if err == io.EOF { + break + } + t.Errorf("%s: error reading archive header at %d: %v", arname, off, err) + return + } + + var hdr arhdr + hdrslice := hdrbuf[:] + set := func(len int, ps *string) { + *ps = string(bytes.TrimSpace(hdrslice[:len])) + hdrslice = hdrslice[len:] + } + set(namelen, &hdr.name) + set(datelen, &hdr.date) + set(uidlen, &hdr.uid) + set(gidlen, &hdr.gid) + set(modelen, &hdr.mode) + set(sizelen, &hdr.size) + hdr.fmag = string(hdrslice[:fmaglen]) + hdrslice = hdrslice[fmaglen:] + if len(hdrslice) != 0 { + t.Fatalf("internal error: len(hdrslice) == %d", len(hdrslice)) + } + + if hdr.fmag != fmag { + t.Errorf("%s: invalid fmagic value %q at %d", arname, hdr.fmag, off) + return + } + + size, err := strconv.ParseInt(hdr.size, 10, 64) + if err != nil { + t.Errorf("%s: error parsing size %q at %d: %v", arname, hdr.size, off, err) + return + } + + off += hdrlen + + switch hdr.name { + case "__.SYMDEF", "/", "/SYM64/": + // The archive symbol map. + case "//", "ARFILENAMES/": + // The extended name table. + default: + // This should be an ELF object. + checkELFArchiveObject(t, arname, off, io.NewSectionReader(f, off, size)) + } + + off += size + if _, err := f.Seek(off, io.SeekStart); err != nil { + t.Errorf("%s: failed to seek to %d: %v", arname, off, err) + } + } +} + +// checkELFArchiveObject checks an object in an ELF archive. +func checkELFArchiveObject(t *testing.T, arname string, off int64, obj io.ReaderAt) { + t.Helper() + + ef, err := elf.NewFile(obj) + if err != nil { + t.Errorf("%s: failed to open ELF file at %d: %v", arname, off, err) + return + } + defer ef.Close() + + // Verify section types. + for _, sec := range ef.Sections { + want := elf.SHT_NULL + switch sec.Name { + case ".text", ".data": + want = elf.SHT_PROGBITS + case ".bss": + want = elf.SHT_NOBITS + case ".symtab": + want = elf.SHT_SYMTAB + case ".strtab": + want = elf.SHT_STRTAB + case ".init_array": + want = elf.SHT_INIT_ARRAY + case ".fini_array": + want = elf.SHT_FINI_ARRAY + case ".preinit_array": + want = elf.SHT_PREINIT_ARRAY + } + if want != elf.SHT_NULL && sec.Type != want { + t.Errorf("%s: incorrect section type in elf file at %d for section %q: got %v want %v", arname, off, sec.Name, sec.Type, want) + } + } +} + +func TestInstall(t *testing.T) { + globalSkip(t) + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + testenv.MustHaveBuildMode(t, "c-archive") + + if !testWork { + defer os.RemoveAll(filepath.Join(GOPATH, "pkg")) + } + + libgoa := "libgo.a" + if runtime.Compiler == "gccgo" { + libgoa = "liblibgo.a" + } + + // Generate the p.h header file. + // + // 'go install -i -buildmode=c-archive ./libgo' would do that too, but that + // would also attempt to install transitive standard-library dependencies to + // GOROOT, and we cannot assume that GOROOT is writable. (A non-root user may + // be running this test in a GOROOT owned by root.) + genHeader(t, "p.h", "./p") + + testInstall(t, "./testp1"+exeSuffix, + filepath.Join(libgodir, libgoa), + filepath.Join(libgodir, "libgo.h"), + "go", "install", "-buildmode=c-archive", "./libgo") + + // Test building libgo other than installing it. + // Header files are now present. + testInstall(t, "./testp2"+exeSuffix, "libgo.a", "libgo.h", + "go", "build", "-buildmode=c-archive", filepath.Join(".", "libgo", "libgo.go")) + + testInstall(t, "./testp3"+exeSuffix, "libgo.a", "libgo.h", + "go", "build", "-buildmode=c-archive", "-o", "libgo.a", "./libgo") +} + +func TestEarlySignalHandler(t *testing.T) { + switch GOOS { + case "darwin", "ios": + switch GOARCH { + case "arm64": + t.Skipf("skipping on %s/%s; see https://golang.org/issue/13701", GOOS, GOARCH) + } + case "windows": + t.Skip("skipping signal test on Windows") + } + globalSkip(t) + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + testenv.MustHaveBuildMode(t, "c-archive") + + if !testWork { + defer func() { + os.Remove("libgo2.a") + os.Remove("libgo2.h") + os.Remove("testp" + exeSuffix) + os.RemoveAll(filepath.Join(GOPATH, "pkg")) + }() + } + + cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo2.a", "./libgo2") + if out, err := cmd.CombinedOutput(); err != nil { + t.Logf("%s", out) + t.Fatal(err) + } + checkLineComments(t, "libgo2.h") + checkArchive(t, "libgo2.a") + + ccArgs := append(cc, "-o", "testp"+exeSuffix, "main2.c", "libgo2.a") + if runtime.Compiler == "gccgo" { + ccArgs = append(ccArgs, "-lgo") + } + if out, err := exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput(); err != nil { + t.Logf("%s", out) + t.Fatal(err) + } + + darwin := "0" + if runtime.GOOS == "darwin" { + darwin = "1" + } + cmd = exec.Command(bin[0], append(bin[1:], darwin)...) + + if out, err := cmd.CombinedOutput(); err != nil { + t.Logf("%s", out) + t.Fatal(err) + } +} + +func TestSignalForwarding(t *testing.T) { + globalSkip(t) + checkSignalForwardingTest(t) + buildSignalForwardingTest(t) + + cmd := exec.Command(bin[0], append(bin[1:], "1")...) + + out, err := cmd.CombinedOutput() + t.Logf("%v\n%s", cmd.Args, out) + expectSignal(t, err, syscall.SIGSEGV, 0) + + // SIGPIPE is never forwarded on darwin. See golang.org/issue/33384. + if runtime.GOOS != "darwin" && runtime.GOOS != "ios" { + // Test SIGPIPE forwarding + cmd = exec.Command(bin[0], append(bin[1:], "3")...) + + out, err = cmd.CombinedOutput() + if len(out) > 0 { + t.Logf("%s", out) + } + expectSignal(t, err, syscall.SIGPIPE, 0) + } +} + +func TestSignalForwardingExternal(t *testing.T) { + if GOOS == "freebsd" || GOOS == "aix" { + t.Skipf("skipping on %s/%s; signal always goes to the Go runtime", GOOS, GOARCH) + } else if GOOS == "darwin" && GOARCH == "amd64" { + t.Skipf("skipping on %s/%s: runtime does not permit SI_USER SIGSEGV", GOOS, GOARCH) + } + globalSkip(t) + checkSignalForwardingTest(t) + buildSignalForwardingTest(t) + + // We want to send the process a signal and see if it dies. + // Normally the signal goes to the C thread, the Go signal + // handler picks it up, sees that it is running in a C thread, + // and the program dies. Unfortunately, occasionally the + // signal is delivered to a Go thread, which winds up + // discarding it because it was sent by another program and + // there is no Go handler for it. To avoid this, run the + // program several times in the hopes that it will eventually + // fail. + const tries = 20 + for i := 0; i < tries; i++ { + err := runSignalForwardingTest(t, "2") + if err == nil { + continue + } + + // If the signal is delivered to a C thread, as expected, + // the Go signal handler will disable itself and re-raise + // the signal, causing the program to die with SIGSEGV. + // + // It is also possible that the signal will be + // delivered to a Go thread, such as a GC thread. + // Currently when the Go runtime sees that a SIGSEGV was + // sent from a different program, it first tries to send + // the signal to the os/signal API. If nothing is looking + // for (or explicitly ignoring) SIGSEGV, then it crashes. + // Because the Go runtime is invoked via a c-archive, + // it treats this as GOTRACEBACK=crash, meaning that it + // dumps a stack trace for all goroutines, which it does + // by raising SIGQUIT. The effect is that we will see the + // program die with SIGQUIT in that case, not SIGSEGV. + if expectSignal(t, err, syscall.SIGSEGV, syscall.SIGQUIT) { + return + } + } + + t.Errorf("program succeeded unexpectedly %d times", tries) +} + +func TestSignalForwardingGo(t *testing.T) { + // This test fails on darwin-amd64 because of the special + // handling of user-generated SIGSEGV signals in fixsigcode in + // runtime/signal_darwin_amd64.go. + if runtime.GOOS == "darwin" && runtime.GOARCH == "amd64" { + t.Skip("not supported on darwin-amd64") + } + globalSkip(t) + + checkSignalForwardingTest(t) + buildSignalForwardingTest(t) + err := runSignalForwardingTest(t, "4") + + // Occasionally the signal will be delivered to a C thread, + // and the program will crash with SIGSEGV. + expectSignal(t, err, syscall.SIGQUIT, syscall.SIGSEGV) +} + +// checkSignalForwardingTest calls t.Skip if the SignalForwarding test +// doesn't work on this platform. +func checkSignalForwardingTest(t *testing.T) { + switch GOOS { + case "darwin", "ios": + switch GOARCH { + case "arm64": + t.Skipf("skipping on %s/%s; see https://golang.org/issue/13701", GOOS, GOARCH) + } + case "windows": + t.Skip("skipping signal test on Windows") + } + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + testenv.MustHaveBuildMode(t, "c-archive") +} + +// buildSignalForwardingTest builds the executable used by the various +// signal forwarding tests. +func buildSignalForwardingTest(t *testing.T) { + if !testWork { + t.Cleanup(func() { + os.Remove("libgo2.a") + os.Remove("libgo2.h") + os.Remove("testp" + exeSuffix) + os.RemoveAll(filepath.Join(GOPATH, "pkg")) + }) + } + + t.Log("go build -buildmode=c-archive -o libgo2.a ./libgo2") + cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo2.a", "./libgo2") + out, err := cmd.CombinedOutput() + if len(out) > 0 { + t.Logf("%s", out) + } + if err != nil { + t.Fatal(err) + } + + checkLineComments(t, "libgo2.h") + checkArchive(t, "libgo2.a") + + ccArgs := append(cc, "-o", "testp"+exeSuffix, "main5.c", "libgo2.a") + if runtime.Compiler == "gccgo" { + ccArgs = append(ccArgs, "-lgo") + } + t.Log(ccArgs) + out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput() + if len(out) > 0 { + t.Logf("%s", out) + } + if err != nil { + t.Fatal(err) + } +} + +func runSignalForwardingTest(t *testing.T, arg string) error { + t.Logf("%v %s", bin, arg) + cmd := exec.Command(bin[0], append(bin[1:], arg)...) + + var out strings.Builder + cmd.Stdout = &out + + stderr, err := cmd.StderrPipe() + if err != nil { + t.Fatal(err) + } + defer stderr.Close() + + r := bufio.NewReader(stderr) + + err = cmd.Start() + if err != nil { + t.Fatal(err) + } + + // Wait for trigger to ensure that process is started. + ok, err := r.ReadString('\n') + + // Verify trigger. + if err != nil || ok != "OK\n" { + t.Fatal("Did not receive OK signal") + } + + var wg sync.WaitGroup + wg.Add(1) + var errsb strings.Builder + go func() { + defer wg.Done() + io.Copy(&errsb, r) + }() + + // Give the program a chance to enter the function. + // If the program doesn't get there the test will still + // pass, although it doesn't quite test what we intended. + // This is fine as long as the program normally makes it. + time.Sleep(time.Millisecond) + + cmd.Process.Signal(syscall.SIGSEGV) + + err = cmd.Wait() + + s := out.String() + if len(s) > 0 { + t.Log(s) + } + wg.Wait() + s = errsb.String() + if len(s) > 0 { + t.Log(s) + } + + return err +} + +// expectSignal checks that err, the exit status of a test program, +// shows a failure due to a specific signal or two. Returns whether we +// found an expected signal. +func expectSignal(t *testing.T, err error, sig1, sig2 syscall.Signal) bool { + t.Helper() + if err == nil { + t.Error("test program succeeded unexpectedly") + } else if ee, ok := err.(*exec.ExitError); !ok { + t.Errorf("error (%v) has type %T; expected exec.ExitError", err, err) + } else if ws, ok := ee.Sys().(syscall.WaitStatus); !ok { + t.Errorf("error.Sys (%v) has type %T; expected syscall.WaitStatus", ee.Sys(), ee.Sys()) + } else if !ws.Signaled() || (ws.Signal() != sig1 && ws.Signal() != sig2) { + if sig2 == 0 { + t.Errorf("got %q; expected signal %q", ee, sig1) + } else { + t.Errorf("got %q; expected signal %q or %q", ee, sig1, sig2) + } + } else { + return true + } + return false +} + +func TestOsSignal(t *testing.T) { + switch GOOS { + case "windows": + t.Skip("skipping signal test on Windows") + } + globalSkip(t) + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + testenv.MustHaveBuildMode(t, "c-archive") + + if !testWork { + defer func() { + os.Remove("libgo3.a") + os.Remove("libgo3.h") + os.Remove("testp" + exeSuffix) + os.RemoveAll(filepath.Join(GOPATH, "pkg")) + }() + } + + cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo3.a", "./libgo3") + if out, err := cmd.CombinedOutput(); err != nil { + t.Logf("%s", out) + t.Fatal(err) + } + checkLineComments(t, "libgo3.h") + checkArchive(t, "libgo3.a") + + ccArgs := append(cc, "-o", "testp"+exeSuffix, "main3.c", "libgo3.a") + if runtime.Compiler == "gccgo" { + ccArgs = append(ccArgs, "-lgo") + } + if out, err := exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput(); err != nil { + t.Logf("%s", out) + t.Fatal(err) + } + + if out, err := exec.Command(bin[0], bin[1:]...).CombinedOutput(); err != nil { + t.Logf("%s", out) + t.Fatal(err) + } +} + +func TestSigaltstack(t *testing.T) { + switch GOOS { + case "windows": + t.Skip("skipping signal test on Windows") + } + globalSkip(t) + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + testenv.MustHaveBuildMode(t, "c-archive") + + if !testWork { + defer func() { + os.Remove("libgo4.a") + os.Remove("libgo4.h") + os.Remove("testp" + exeSuffix) + os.RemoveAll(filepath.Join(GOPATH, "pkg")) + }() + } + + cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo4.a", "./libgo4") + if out, err := cmd.CombinedOutput(); err != nil { + t.Logf("%s", out) + t.Fatal(err) + } + checkLineComments(t, "libgo4.h") + checkArchive(t, "libgo4.a") + + ccArgs := append(cc, "-o", "testp"+exeSuffix, "main4.c", "libgo4.a") + if runtime.Compiler == "gccgo" { + ccArgs = append(ccArgs, "-lgo") + } + if out, err := exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput(); err != nil { + t.Logf("%s", out) + t.Fatal(err) + } + + if out, err := exec.Command(bin[0], bin[1:]...).CombinedOutput(); err != nil { + t.Logf("%s", out) + t.Fatal(err) + } +} + +const testar = `#!/usr/bin/env bash +while [[ $1 == -* ]] >/dev/null; do + shift +done +echo "testar" > $1 +echo "testar" > PWD/testar.ran +` + +func TestExtar(t *testing.T) { + switch GOOS { + case "windows": + t.Skip("skipping signal test on Windows") + } + if runtime.Compiler == "gccgo" { + t.Skip("skipping -extar test when using gccgo") + } + globalSkip(t) + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + testenv.MustHaveBuildMode(t, "c-archive") + testenv.MustHaveExecPath(t, "bash") // This test uses a bash script + + if !testWork { + defer func() { + os.Remove("libgo4.a") + os.Remove("libgo4.h") + os.Remove("testar") + os.Remove("testar.ran") + os.RemoveAll(filepath.Join(GOPATH, "pkg")) + }() + } + + os.Remove("testar") + dir, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + s := strings.Replace(testar, "PWD", dir, 1) + if err := os.WriteFile("testar", []byte(s), 0777); err != nil { + t.Fatal(err) + } + + cmd := exec.Command("go", "build", "-buildmode=c-archive", "-ldflags=-extar="+filepath.Join(dir, "testar"), "-o", "libgo4.a", "./libgo4") + if out, err := cmd.CombinedOutput(); err != nil { + t.Logf("%s", out) + t.Fatal(err) + } + checkLineComments(t, "libgo4.h") + + if _, err := os.Stat("testar.ran"); err != nil { + if os.IsNotExist(err) { + t.Error("testar does not exist after go build") + } else { + t.Errorf("error checking testar: %v", err) + } + } +} + +func TestPIE(t *testing.T) { + switch GOOS { + case "windows", "darwin", "ios", "plan9": + t.Skipf("skipping PIE test on %s", GOOS) + } + globalSkip(t) + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + testenv.MustHaveBuildMode(t, "c-archive") + + libgoa := "libgo.a" + if runtime.Compiler == "gccgo" { + libgoa = "liblibgo.a" + } + + if !testWork { + defer func() { + os.Remove("testp" + exeSuffix) + os.Remove(libgoa) + os.RemoveAll(filepath.Join(GOPATH, "pkg")) + }() + } + + // Generate the p.h header file. + // + // 'go install -i -buildmode=c-archive ./libgo' would do that too, but that + // would also attempt to install transitive standard-library dependencies to + // GOROOT, and we cannot assume that GOROOT is writable. (A non-root user may + // be running this test in a GOROOT owned by root.) + genHeader(t, "p.h", "./p") + + cmd := exec.Command("go", "build", "-buildmode=c-archive", "./libgo") + if out, err := cmd.CombinedOutput(); err != nil { + t.Logf("%s", out) + t.Fatal(err) + } + + ccArgs := append(cc, "-fPIE", "-pie", "-o", "testp"+exeSuffix, "main.c", "main_unix.c", libgoa) + if runtime.Compiler == "gccgo" { + ccArgs = append(ccArgs, "-lgo") + } + if out, err := exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput(); err != nil { + t.Logf("%s", out) + t.Fatal(err) + } + + binArgs := append(bin, "arg1", "arg2") + cmd = exec.Command(binArgs[0], binArgs[1:]...) + if runtime.Compiler == "gccgo" { + cmd.Env = append(os.Environ(), "GCCGO=1") + } + if out, err := cmd.CombinedOutput(); err != nil { + t.Logf("%s", out) + t.Fatal(err) + } + + if GOOS != "aix" { + f, err := elf.Open("testp" + exeSuffix) + if err != nil { + t.Fatal("elf.Open failed: ", err) + } + defer f.Close() + if hasDynTag(t, f, elf.DT_TEXTREL) { + t.Errorf("%s has DT_TEXTREL flag", "testp"+exeSuffix) + } + } +} + +func hasDynTag(t *testing.T, f *elf.File, tag elf.DynTag) bool { + ds := f.SectionByType(elf.SHT_DYNAMIC) + if ds == nil { + t.Error("no SHT_DYNAMIC section") + return false + } + d, err := ds.Data() + if err != nil { + t.Errorf("can't read SHT_DYNAMIC contents: %v", err) + return false + } + for len(d) > 0 { + var t elf.DynTag + switch f.Class { + case elf.ELFCLASS32: + t = elf.DynTag(f.ByteOrder.Uint32(d[:4])) + d = d[8:] + case elf.ELFCLASS64: + t = elf.DynTag(f.ByteOrder.Uint64(d[:8])) + d = d[16:] + } + if t == tag { + return true + } + } + return false +} + +func TestSIGPROF(t *testing.T) { + switch GOOS { + case "windows", "plan9": + t.Skipf("skipping SIGPROF test on %s", GOOS) + case "darwin", "ios": + t.Skipf("skipping SIGPROF test on %s; see https://golang.org/issue/19320", GOOS) + } + globalSkip(t) + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + testenv.MustHaveBuildMode(t, "c-archive") + + t.Parallel() + + if !testWork { + defer func() { + os.Remove("testp6" + exeSuffix) + os.Remove("libgo6.a") + os.Remove("libgo6.h") + }() + } + + cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo6.a", "./libgo6") + out, err := cmd.CombinedOutput() + t.Logf("%v\n%s", cmd.Args, out) + if err != nil { + t.Fatal(err) + } + checkLineComments(t, "libgo6.h") + checkArchive(t, "libgo6.a") + + ccArgs := append(cc, "-o", "testp6"+exeSuffix, "main6.c", "libgo6.a") + if runtime.Compiler == "gccgo" { + ccArgs = append(ccArgs, "-lgo") + } + out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput() + t.Logf("%v\n%s", ccArgs, out) + if err != nil { + t.Fatal(err) + } + + argv := cmdToRun("./testp6") + cmd = exec.Command(argv[0], argv[1:]...) + out, err = cmd.CombinedOutput() + t.Logf("%v\n%s", argv, out) + if err != nil { + t.Fatal(err) + } +} + +// TestCompileWithoutShared tests that if we compile code without the +// -shared option, we can put it into an archive. When we use the go +// tool with -buildmode=c-archive, it passes -shared to the compiler, +// so we override that. The go tool doesn't work this way, but Bazel +// will likely do it in the future. And it ought to work. This test +// was added because at one time it did not work on PPC Linux. +func TestCompileWithoutShared(t *testing.T) { + globalSkip(t) + // For simplicity, reuse the signal forwarding test. + checkSignalForwardingTest(t) + testenv.MustHaveGoBuild(t) + + if !testWork { + defer func() { + os.Remove("libgo2.a") + os.Remove("libgo2.h") + }() + } + + cmd := exec.Command("go", "build", "-buildmode=c-archive", "-gcflags=-shared=false", "-o", "libgo2.a", "./libgo2") + out, err := cmd.CombinedOutput() + t.Logf("%v\n%s", cmd.Args, out) + if err != nil { + t.Fatal(err) + } + checkLineComments(t, "libgo2.h") + checkArchive(t, "libgo2.a") + + exe := "./testnoshared" + exeSuffix + + // In some cases, -no-pie is needed here, but not accepted everywhere. First try + // if -no-pie is accepted. See #22126. + ccArgs := append(cc, "-o", exe, "-no-pie", "main5.c", "libgo2.a") + if runtime.Compiler == "gccgo" { + ccArgs = append(ccArgs, "-lgo") + } + out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput() + t.Logf("%v\n%s", ccArgs, out) + + // If -no-pie unrecognized, try -nopie if this is possibly clang + if err != nil && bytes.Contains(out, []byte("unknown")) && !strings.Contains(cc[0], "gcc") { + ccArgs = append(cc, "-o", exe, "-nopie", "main5.c", "libgo2.a") + out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput() + t.Logf("%v\n%s", ccArgs, out) + } + + // Don't use either -no-pie or -nopie + if err != nil && bytes.Contains(out, []byte("unrecognized")) { + ccArgs = append(cc, "-o", exe, "main5.c", "libgo2.a") + out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput() + t.Logf("%v\n%s", ccArgs, out) + } + if err != nil { + t.Fatal(err) + } + if !testWork { + defer os.Remove(exe) + } + + binArgs := append(cmdToRun(exe), "1") + out, err = exec.Command(binArgs[0], binArgs[1:]...).CombinedOutput() + t.Logf("%v\n%s", binArgs, out) + expectSignal(t, err, syscall.SIGSEGV, 0) + + // SIGPIPE is never forwarded on darwin. See golang.org/issue/33384. + if runtime.GOOS != "darwin" && runtime.GOOS != "ios" { + binArgs := append(cmdToRun(exe), "3") + out, err = exec.Command(binArgs[0], binArgs[1:]...).CombinedOutput() + t.Logf("%v\n%s", binArgs, out) + expectSignal(t, err, syscall.SIGPIPE, 0) + } +} + +// Test that installing a second time recreates the header file. +func TestCachedInstall(t *testing.T) { + globalSkip(t) + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + testenv.MustHaveBuildMode(t, "c-archive") + + if !testWork { + defer os.RemoveAll(filepath.Join(GOPATH, "pkg")) + } + + h := filepath.Join(libgodir, "libgo.h") + + buildcmd := []string{"go", "install", "-buildmode=c-archive", "./libgo"} + + cmd := exec.Command(buildcmd[0], buildcmd[1:]...) + cmd.Env = append(cmd.Environ(), "GO111MODULE=off") // 'go install' only works in GOPATH mode + t.Log(buildcmd) + if out, err := cmd.CombinedOutput(); err != nil { + t.Logf("%s", out) + t.Fatal(err) + } + + if _, err := os.Stat(h); err != nil { + t.Errorf("libgo.h not installed: %v", err) + } + + if err := os.Remove(h); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(buildcmd[0], buildcmd[1:]...) + cmd.Env = append(cmd.Environ(), "GO111MODULE=off") + t.Log(buildcmd) + if out, err := cmd.CombinedOutput(); err != nil { + t.Logf("%s", out) + t.Fatal(err) + } + + if _, err := os.Stat(h); err != nil { + t.Errorf("libgo.h not installed in second run: %v", err) + } +} + +// Issue 35294. +func TestManyCalls(t *testing.T) { + globalSkip(t) + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + testenv.MustHaveBuildMode(t, "c-archive") + + t.Parallel() + + if !testWork { + defer func() { + os.Remove("testp7" + exeSuffix) + os.Remove("libgo7.a") + os.Remove("libgo7.h") + }() + } + + cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo7.a", "./libgo7") + out, err := cmd.CombinedOutput() + t.Logf("%v\n%s", cmd.Args, out) + if err != nil { + t.Fatal(err) + } + checkLineComments(t, "libgo7.h") + checkArchive(t, "libgo7.a") + + ccArgs := append(cc, "-o", "testp7"+exeSuffix, "main7.c", "libgo7.a") + if runtime.Compiler == "gccgo" { + ccArgs = append(ccArgs, "-lgo") + } + out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput() + t.Logf("%v\n%s", ccArgs, out) + if err != nil { + t.Fatal(err) + } + + argv := cmdToRun("./testp7") + cmd = exec.Command(argv[0], argv[1:]...) + sb := new(strings.Builder) + cmd.Stdout = sb + cmd.Stderr = sb + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + timer := time.AfterFunc(time.Minute, + func() { + t.Error("test program timed out") + cmd.Process.Kill() + }, + ) + defer timer.Stop() + + err = cmd.Wait() + t.Logf("%v\n%s", cmd.Args, sb) + if err != nil { + t.Error(err) + } +} + +// Issue 49288. +func TestPreemption(t *testing.T) { + if runtime.Compiler == "gccgo" { + t.Skip("skipping asynchronous preemption test with gccgo") + } + globalSkip(t) + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + testenv.MustHaveBuildMode(t, "c-archive") + + t.Parallel() + + if !testWork { + defer func() { + os.Remove("testp8" + exeSuffix) + os.Remove("libgo8.a") + os.Remove("libgo8.h") + }() + } + + cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo8.a", "./libgo8") + out, err := cmd.CombinedOutput() + t.Logf("%v\n%s", cmd.Args, out) + if err != nil { + t.Fatal(err) + } + checkLineComments(t, "libgo8.h") + checkArchive(t, "libgo8.a") + + ccArgs := append(cc, "-o", "testp8"+exeSuffix, "main8.c", "libgo8.a") + out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput() + t.Logf("%v\n%s", ccArgs, out) + if err != nil { + t.Fatal(err) + } + + argv := cmdToRun("./testp8") + cmd = exec.Command(argv[0], argv[1:]...) + sb := new(strings.Builder) + cmd.Stdout = sb + cmd.Stderr = sb + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + timer := time.AfterFunc(time.Minute, + func() { + t.Error("test program timed out") + cmd.Process.Kill() + }, + ) + defer timer.Stop() + + err = cmd.Wait() + t.Logf("%v\n%s", cmd.Args, sb) + if err != nil { + t.Error(err) + } +} + +// Issue 59294. Test calling Go function from C after using some +// stack space. +func TestDeepStack(t *testing.T) { + globalSkip(t) + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + testenv.MustHaveBuildMode(t, "c-archive") + + t.Parallel() + + if !testWork { + defer func() { + os.Remove("testp9" + exeSuffix) + os.Remove("libgo9.a") + os.Remove("libgo9.h") + }() + } + + cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo9.a", "./libgo9") + out, err := cmd.CombinedOutput() + t.Logf("%v\n%s", cmd.Args, out) + if err != nil { + t.Fatal(err) + } + checkLineComments(t, "libgo9.h") + checkArchive(t, "libgo9.a") + + // build with -O0 so the C compiler won't optimize out the large stack frame + ccArgs := append(cc, "-O0", "-o", "testp9"+exeSuffix, "main9.c", "libgo9.a") + out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput() + t.Logf("%v\n%s", ccArgs, out) + if err != nil { + t.Fatal(err) + } + + argv := cmdToRun("./testp9") + cmd = exec.Command(argv[0], argv[1:]...) + sb := new(strings.Builder) + cmd.Stdout = sb + cmd.Stderr = sb + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + timer := time.AfterFunc(time.Minute, + func() { + t.Error("test program timed out") + cmd.Process.Kill() + }, + ) + defer timer.Stop() + + err = cmd.Wait() + t.Logf("%v\n%s", cmd.Args, sb) + if err != nil { + t.Error(err) + } +} + +func TestSharedObject(t *testing.T) { + // Test that we can put a Go c-archive into a C shared object. + globalSkip(t) + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + testenv.MustHaveBuildMode(t, "c-archive") + + t.Parallel() + + if !testWork { + defer func() { + os.Remove("libgo_s.a") + os.Remove("libgo_s.h") + os.Remove("libgo_s.so") + }() + } + + cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo_s.a", "./libgo") + out, err := cmd.CombinedOutput() + t.Logf("%v\n%s", cmd.Args, out) + if err != nil { + t.Fatal(err) + } + + ccArgs := append(cc, "-shared", "-o", "libgo_s.so", "libgo_s.a") + out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput() + t.Logf("%v\n%s", ccArgs, out) + if err != nil { + t.Fatal(err) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/libgo/libgo.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/libgo/libgo.go new file mode 100644 index 0000000000000000000000000000000000000000..37b30c14632dcacb3ea761cbc04664767ab91b47 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/libgo/libgo.go @@ -0,0 +1,53 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "os" + "syscall" + "time" + + _ "testcarchive/p" +) + +import "C" + +var initCh = make(chan int, 1) +var ranMain bool + +func init() { + // emulate an exceedingly slow package initialization function + time.Sleep(100 * time.Millisecond) + initCh <- 42 +} + +func main() { ranMain = true } + +//export DidInitRun +func DidInitRun() bool { + select { + case x := <-initCh: + if x != 42 { + // Just in case initCh was not correctly made. + println("want init value of 42, got: ", x) + syscall.Exit(2) + } + return true + default: + return false + } +} + +//export DidMainRun +func DidMainRun() bool { return ranMain } + +//export CheckArgs +func CheckArgs() { + if len(os.Args) != 3 || os.Args[1] != "arg1" || os.Args[2] != "arg2" { + fmt.Printf("CheckArgs: want [_, arg1, arg2], got: %v\n", os.Args) + os.Exit(2) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/libgo2/libgo2.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/libgo2/libgo2.go new file mode 100644 index 0000000000000000000000000000000000000000..b2e7731a45d77a5ab664d134407315597e7f0727 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/libgo2/libgo2.go @@ -0,0 +1,91 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +/* +#include +#include +#include +#include + +// Raise SIGPIPE. +static void CRaiseSIGPIPE() { + int fds[2]; + + if (pipe(fds) == -1) { + perror("pipe"); + exit(EXIT_FAILURE); + } + // Close the reader end + close(fds[0]); + // Write to the writer end to provoke a SIGPIPE + if (write(fds[1], "some data", 9) != -1) { + fprintf(stderr, "write to a closed pipe succeeded\n"); + exit(EXIT_FAILURE); + } + close(fds[1]); +} +*/ +import "C" + +import ( + "fmt" + "os" + "runtime" +) + +// RunGoroutines starts some goroutines that don't do anything. +// The idea is to get some threads going, so that a signal will be delivered +// to a thread started by Go. +// +//export RunGoroutines +func RunGoroutines() { + for i := 0; i < 4; i++ { + go func() { + runtime.LockOSThread() + select {} + }() + } +} + +// Block blocks the current thread while running Go code. +// +//export Block +func Block() { + select {} +} + +var P *byte + +// TestSEGV makes sure that an invalid address turns into a run-time Go panic. +// +//export TestSEGV +func TestSEGV() { + defer func() { + if recover() == nil { + fmt.Fprintln(os.Stderr, "no panic from segv") + os.Exit(1) + } + }() + *P = 0 + fmt.Fprintln(os.Stderr, "continued after segv") + os.Exit(1) +} + +// Noop ensures that the Go runtime is initialized. +// +//export Noop +func Noop() { +} + +// Raise SIGPIPE. +// +//export GoRaiseSIGPIPE +func GoRaiseSIGPIPE() { + C.CRaiseSIGPIPE() +} + +func main() { +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/libgo3/libgo3.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/libgo3/libgo3.go new file mode 100644 index 0000000000000000000000000000000000000000..136695b09be8e9dcde3bec1ad1b20a16b36ff48c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/libgo3/libgo3.go @@ -0,0 +1,60 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "C" + +import ( + "os" + "os/signal" + "syscall" + "time" +) + +// The channel used to read SIGIO signals. +var sigioChan chan os.Signal + +// CatchSIGIO starts catching SIGIO signals. +// +//export CatchSIGIO +func CatchSIGIO() { + sigioChan = make(chan os.Signal, 1) + signal.Notify(sigioChan, syscall.SIGIO) +} + +// ResetSIGIO stops catching SIGIO signals. +// +//export ResetSIGIO +func ResetSIGIO() { + signal.Reset(syscall.SIGIO) +} + +// SawSIGIO reports whether we saw a SIGIO. +// +//export SawSIGIO +func SawSIGIO() C.int { + select { + case <-sigioChan: + return 1 + case <-time.After(5 * time.Second): + return 0 + } +} + +// ProvokeSIGPIPE provokes a kernel-initiated SIGPIPE. +// +//export ProvokeSIGPIPE +func ProvokeSIGPIPE() { + r, w, err := os.Pipe() + if err != nil { + panic(err) + } + r.Close() + defer w.Close() + w.Write([]byte("some data")) +} + +func main() { +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/libgo4/libgo4.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/libgo4/libgo4.go new file mode 100644 index 0000000000000000000000000000000000000000..c81d3af4ea3b97e3b81d9f5d84a1f7ecd642981d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/libgo4/libgo4.go @@ -0,0 +1,55 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +/* +#include +#include + +// Raise SIGIO. +static void CRaiseSIGIO(pthread_t* p) { + pthread_kill(*p, SIGIO); +} +*/ +import "C" + +import ( + "os" + "os/signal" + "sync/atomic" + "syscall" +) + +var sigioCount int32 + +// Catch SIGIO. +// +//export GoCatchSIGIO +func GoCatchSIGIO() { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGIO) + go func() { + for range c { + atomic.AddInt32(&sigioCount, 1) + } + }() +} + +// Raise SIGIO. +// +//export GoRaiseSIGIO +func GoRaiseSIGIO(p *C.pthread_t) { + C.CRaiseSIGIO(p) +} + +// Return the number of SIGIO signals seen. +// +//export SIGIOCount +func SIGIOCount() C.int { + return C.int(atomic.LoadInt32(&sigioCount)) +} + +func main() { +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/libgo6/sigprof.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/libgo6/sigprof.go new file mode 100644 index 0000000000000000000000000000000000000000..31527c59af1213aeee245b5daa545ee7194fa93d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/libgo6/sigprof.go @@ -0,0 +1,25 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "io" + "runtime/pprof" +) + +import "C" + +//export go_start_profile +func go_start_profile() { + pprof.StartCPUProfile(io.Discard) +} + +//export go_stop_profile +func go_stop_profile() { + pprof.StopCPUProfile() +} + +func main() { +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/libgo7/sink.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/libgo7/sink.go new file mode 100644 index 0000000000000000000000000000000000000000..d61638b38e8160449355447b7dd6e0c741061609 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/libgo7/sink.go @@ -0,0 +1,17 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "C" + +var sink []byte + +//export GoFunction7 +func GoFunction7() { + sink = make([]byte, 4096) +} + +func main() { +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/libgo8/a.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/libgo8/a.go new file mode 100644 index 0000000000000000000000000000000000000000..718418ecb8765f297fdb7074ef6624295e2cb070 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/libgo8/a.go @@ -0,0 +1,36 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "C" + +import ( + "os" + "runtime" + "sync/atomic" +) + +var started int32 + +// Start a goroutine that loops forever. +func init() { + runtime.GOMAXPROCS(1) + go func() { + for { + atomic.StoreInt32(&started, 1) + } + }() +} + +//export GoFunction8 +func GoFunction8() { + for atomic.LoadInt32(&started) == 0 { + runtime.Gosched() + } + os.Exit(0) +} + +func main() { +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/libgo9/a.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/libgo9/a.go new file mode 100644 index 0000000000000000000000000000000000000000..acb08d90ecd5bfb63ae93a83f2d2568e777301ad --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/libgo9/a.go @@ -0,0 +1,14 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "runtime" + +import "C" + +func main() {} + +//export GoF +func GoF() { runtime.GC() } diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main.c new file mode 100644 index 0000000000000000000000000000000000000000..163b5398e5ecb4bb5d024ee55bfa68df7360b3ac --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main.c @@ -0,0 +1,48 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include +#include +#include + +#include "p.h" +#include "libgo.h" + +extern int install_handler(); +extern int check_handler(); + +int main(void) { + int32_t res; + + int r1 = install_handler(); + if (r1!=0) { + return r1; + } + + if (!DidInitRun()) { + fprintf(stderr, "ERROR: buildmode=c-archive init should run\n"); + return 2; + } + + if (DidMainRun()) { + fprintf(stderr, "ERROR: buildmode=c-archive should not run main\n"); + return 2; + } + + int r2 = check_handler(); + if (r2!=0) { + return r2; + } + + res = FromPkg(); + if (res != 1024) { + fprintf(stderr, "ERROR: FromPkg()=%d, want 1024\n", res); + return 2; + } + + CheckArgs(); + + fprintf(stderr, "PASS\n"); + return 0; +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main2.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main2.c new file mode 100644 index 0000000000000000000000000000000000000000..e82294ded89e43f8a803ca2dead62a60608f6ab8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main2.c @@ -0,0 +1,239 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test installing a signal handler before the Go code starts. +// This is a lot like ../testcshared/main4.c. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "libgo2.h" + +static void die(const char* msg) { + perror(msg); + exit(EXIT_FAILURE); +} + +static volatile sig_atomic_t sigioSeen; +static volatile sig_atomic_t sigpipeSeen; + +// Use up some stack space. +static void recur(int i, char *p) { + char a[1024]; + + *p = '\0'; + if (i > 0) { + recur(i - 1, a); + } +} + +static void pipeHandler(int signo, siginfo_t* info, void* ctxt) { + sigpipeSeen = 1; +} + +// Signal handler that uses up more stack space than a goroutine will have. +static void ioHandler(int signo, siginfo_t* info, void* ctxt) { + char a[1024]; + + recur(4, a); + sigioSeen = 1; +} + +static jmp_buf jmp; +static char* nullPointer; + +// An arbitrary function which requires proper stack alignment; see +// http://golang.org/issue/17641. +static void callWithVarargs(void* dummy, ...) { + va_list args; + va_start(args, dummy); + va_end(args); +} + +// Signal handler for SIGSEGV on a C thread. +static void segvHandler(int signo, siginfo_t* info, void* ctxt) { + sigset_t mask; + int i; + + // Call an arbitrary function that requires the stack to be properly aligned. + callWithVarargs("dummy arg", 3.1415); + + if (sigemptyset(&mask) < 0) { + die("sigemptyset"); + } + if (sigaddset(&mask, SIGSEGV) < 0) { + die("sigaddset"); + } + i = sigprocmask(SIG_UNBLOCK, &mask, NULL); + if (i != 0) { + fprintf(stderr, "sigprocmask: %s\n", strerror(i)); + exit(EXIT_FAILURE); + } + + // Don't try this at home. + longjmp(jmp, signo); + + // We should never get here. + abort(); +} + +// Set up the signal handlers in a high priority constructor, +// so that they are installed before the Go code starts. + +static void init(void) __attribute__ ((constructor (200))); + +static void init() { + struct sigaction sa; + + memset(&sa, 0, sizeof sa); + sa.sa_sigaction = ioHandler; + if (sigemptyset(&sa.sa_mask) < 0) { + die("sigemptyset"); + } + sa.sa_flags = SA_SIGINFO; + if (sigaction(SIGIO, &sa, NULL) < 0) { + die("sigaction"); + } + + sa.sa_sigaction = segvHandler; + if (sigaction(SIGSEGV, &sa, NULL) < 0 || sigaction(SIGBUS, &sa, NULL) < 0) { + die("sigaction"); + } + + sa.sa_sigaction = pipeHandler; + if (sigaction(SIGPIPE, &sa, NULL) < 0) { + die("sigaction"); + } +} + +int main(int argc, char** argv) { + int verbose; + sigset_t mask; + int i; + struct timespec ts; + int darwin; + + darwin = atoi(argv[1]); + + verbose = argc > 2; + + setvbuf(stdout, NULL, _IONBF, 0); + + // Call setsid so that we can use kill(0, SIGIO) below. + // Don't check the return value so that this works both from + // a job control shell and from a shell script. + setsid(); + + if (verbose) { + printf("calling RunGoroutines\n"); + } + + RunGoroutines(); + + // Block SIGIO in this thread to make it more likely that it + // will be delivered to a goroutine. + + if (verbose) { + printf("calling pthread_sigmask\n"); + } + + if (sigemptyset(&mask) < 0) { + die("sigemptyset"); + } + if (sigaddset(&mask, SIGIO) < 0) { + die("sigaddset"); + } + i = pthread_sigmask(SIG_BLOCK, &mask, NULL); + if (i != 0) { + fprintf(stderr, "pthread_sigmask: %s\n", strerror(i)); + exit(EXIT_FAILURE); + } + + if (verbose) { + printf("calling kill\n"); + } + + if (kill(0, SIGIO) < 0) { + die("kill"); + } + + if (verbose) { + printf("waiting for sigioSeen\n"); + } + + // Wait until the signal has been delivered. + i = 0; + while (!sigioSeen) { + ts.tv_sec = 0; + ts.tv_nsec = 1000000; + nanosleep(&ts, NULL); + i++; + if (i > 5000) { + fprintf(stderr, "looping too long waiting for SIGIO\n"); + exit(EXIT_FAILURE); + } + } + + if (verbose) { + printf("provoking SIGPIPE\n"); + } + + // SIGPIPE is never forwarded on Darwin, see golang.org/issue/33384. + if (!darwin) { + GoRaiseSIGPIPE(); + + if (verbose) { + printf("waiting for sigpipeSeen\n"); + } + + // Wait until the signal has been delivered. + i = 0; + while (!sigpipeSeen) { + ts.tv_sec = 0; + ts.tv_nsec = 1000000; + nanosleep(&ts, NULL); + i++; + if (i > 5000) { + fprintf(stderr, "looping too long waiting for SIGPIPE\n"); + exit(EXIT_FAILURE); + } + } + } + + if (verbose) { + printf("calling setjmp\n"); + } + + // Test that a SIGSEGV on this thread is delivered to us. + if (setjmp(jmp) == 0) { + if (verbose) { + printf("triggering SIGSEGV\n"); + } + + *nullPointer = '\0'; + + fprintf(stderr, "continued after address error\n"); + exit(EXIT_FAILURE); + } + + if (verbose) { + printf("calling TestSEGV\n"); + } + + TestSEGV(); + + printf("PASS\n"); + return 0; +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main3.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main3.c new file mode 100644 index 0000000000000000000000000000000000000000..983e1b61222d3c6ea325f62b5b6d5704e656e168 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main3.c @@ -0,0 +1,210 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test os/signal.Notify and os/signal.Reset. +// This is a lot like ../testcshared/main5.c. + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "libgo3.h" + +static void die(const char* msg) { + perror(msg); + exit(EXIT_FAILURE); +} + +static volatile sig_atomic_t sigioSeen; + +static void ioHandler(int signo, siginfo_t* info, void* ctxt) { + sigioSeen = 1; +} + +// Set up the SIGPIPE signal handler in a high priority constructor, so +// that it is installed before the Go code starts. + +static void pipeHandler(int signo, siginfo_t* info, void* ctxt) { + const char *s = "unexpected SIGPIPE\n"; + write(2, s, strlen(s)); + exit(EXIT_FAILURE); +} + +static void init(void) __attribute__ ((constructor (200))); + +static void init() { + struct sigaction sa; + + memset(&sa, 0, sizeof sa); + sa.sa_sigaction = pipeHandler; + if (sigemptyset(&sa.sa_mask) < 0) { + die("sigemptyset"); + } + sa.sa_flags = SA_SIGINFO; + if (sigaction(SIGPIPE, &sa, NULL) < 0) { + die("sigaction"); + } +} + +static void *provokeSIGPIPE(void *arg) { + ProvokeSIGPIPE(); + return NULL; +} + +int main(int argc, char** argv) { + int verbose; + struct sigaction sa; + int i; + struct timespec ts; + int res; + pthread_t tid; + + verbose = argc > 2; + setvbuf(stdout, NULL, _IONBF, 0); + + if (verbose) { + printf("raising SIGPIPE\n"); + } + + // Test that the Go runtime handles SIGPIPE, even if we installed + // a non-default SIGPIPE handler before the runtime initializes. + ProvokeSIGPIPE(); + + // Test that SIGPIPE on a non-main thread is also handled by Go. + res = pthread_create(&tid, NULL, provokeSIGPIPE, NULL); + if (res != 0) { + fprintf(stderr, "pthread_create: %s\n", strerror(res)); + exit(EXIT_FAILURE); + } + + res = pthread_join(tid, NULL); + if (res != 0) { + fprintf(stderr, "pthread_join: %s\n", strerror(res)); + exit(EXIT_FAILURE); + } + + if (verbose) { + printf("calling sigaction\n"); + } + + memset(&sa, 0, sizeof sa); + sa.sa_sigaction = ioHandler; + if (sigemptyset(&sa.sa_mask) < 0) { + die("sigemptyset"); + } + sa.sa_flags = SA_SIGINFO; + if (sigaction(SIGIO, &sa, NULL) < 0) { + die("sigaction"); + } + + // At this point there should not be a Go signal handler + // installed for SIGIO. + + if (verbose) { + printf("raising SIGIO\n"); + } + + if (raise(SIGIO) < 0) { + die("raise"); + } + + if (verbose) { + printf("waiting for sigioSeen\n"); + } + + // Wait until the signal has been delivered. + i = 0; + while (!sigioSeen) { + ts.tv_sec = 0; + ts.tv_nsec = 1000000; + nanosleep(&ts, NULL); + i++; + if (i > 5000) { + fprintf(stderr, "looping too long waiting for signal\n"); + exit(EXIT_FAILURE); + } + } + + sigioSeen = 0; + + // Tell the Go code to catch SIGIO. + + if (verbose) { + printf("calling CatchSIGIO\n"); + } + + CatchSIGIO(); + + if (verbose) { + printf("raising SIGIO\n"); + } + + if (raise(SIGIO) < 0) { + die("raise"); + } + + if (verbose) { + printf("calling SawSIGIO\n"); + } + + if (!SawSIGIO()) { + fprintf(stderr, "Go handler did not see SIGIO\n"); + exit(EXIT_FAILURE); + } + + if (sigioSeen != 0) { + fprintf(stderr, "C handler saw SIGIO when only Go handler should have\n"); + exit(EXIT_FAILURE); + } + + // Tell the Go code to stop catching SIGIO. + + if (verbose) { + printf("calling ResetSIGIO\n"); + } + + ResetSIGIO(); + + if (verbose) { + printf("raising SIGIO\n"); + } + + if (raise(SIGIO) < 0) { + die("raise"); + } + + if (verbose) { + printf("calling SawSIGIO\n"); + } + + if (SawSIGIO()) { + fprintf(stderr, "Go handler saw SIGIO after Reset\n"); + exit(EXIT_FAILURE); + } + + if (verbose) { + printf("waiting for sigioSeen\n"); + } + + // Wait until the signal has been delivered. + i = 0; + while (!sigioSeen) { + ts.tv_sec = 0; + ts.tv_nsec = 1000000; + nanosleep(&ts, NULL); + i++; + if (i > 5000) { + fprintf(stderr, "looping too long waiting for signal\n"); + exit(EXIT_FAILURE); + } + } + + printf("PASS\n"); + return 0; +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main4.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main4.c new file mode 100644 index 0000000000000000000000000000000000000000..04f774008f1101d003eb5ae489ea5a791f4639f2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main4.c @@ -0,0 +1,204 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test a C thread that calls sigaltstack and then calls Go code. + +#include +#include +#include +#include +#include +#include +#include + +#include "libgo4.h" + +#ifdef _AIX +// On AIX, CSIGSTKSZ is too small to handle Go sighandler. +#define CSIGSTKSZ 0x4000 +#else +#define CSIGSTKSZ SIGSTKSZ +#endif + +static void die(const char* msg) { + perror(msg); + exit(EXIT_FAILURE); +} + +static int ok = 1; + +static void ioHandler(int signo, siginfo_t* info, void* ctxt) { +} + +// Set up the SIGIO signal handler in a high priority constructor, so +// that it is installed before the Go code starts. + +static void init(void) __attribute__ ((constructor (200))); + +static void init() { + struct sigaction sa; + + memset(&sa, 0, sizeof sa); + sa.sa_sigaction = ioHandler; + if (sigemptyset(&sa.sa_mask) < 0) { + die("sigemptyset"); + } + sa.sa_flags = SA_SIGINFO | SA_ONSTACK; + if (sigaction(SIGIO, &sa, NULL) < 0) { + die("sigaction"); + } +} + +// Test raising SIGIO on a C thread with an alternate signal stack +// when there is a Go signal handler for SIGIO. +static void* thread1(void* arg __attribute__ ((unused))) { + stack_t ss; + int i; + stack_t nss; + struct timespec ts; + + // Set up an alternate signal stack for this thread. + memset(&ss, 0, sizeof ss); + ss.ss_sp = malloc(CSIGSTKSZ); + if (ss.ss_sp == NULL) { + die("malloc"); + } + ss.ss_flags = 0; + ss.ss_size = CSIGSTKSZ; + if (sigaltstack(&ss, NULL) < 0) { + die("sigaltstack"); + } + + // Send ourselves a SIGIO. This will be caught by the Go + // signal handler which should forward to the C signal + // handler. + i = pthread_kill(pthread_self(), SIGIO); + if (i != 0) { + fprintf(stderr, "pthread_kill: %s\n", strerror(i)); + exit(EXIT_FAILURE); + } + + // Wait until the signal has been delivered. + i = 0; + while (SIGIOCount() == 0) { + ts.tv_sec = 0; + ts.tv_nsec = 1000000; + nanosleep(&ts, NULL); + i++; + if (i > 5000) { + fprintf(stderr, "looping too long waiting for signal\n"); + exit(EXIT_FAILURE); + } + } + + // We should still be on the same signal stack. + if (sigaltstack(NULL, &nss) < 0) { + die("sigaltstack check"); + } + if ((nss.ss_flags & SS_DISABLE) != 0) { + fprintf(stderr, "sigaltstack disabled on return from Go\n"); + ok = 0; + } else if (nss.ss_sp != ss.ss_sp) { + fprintf(stderr, "sigaltstack changed on return from Go\n"); + ok = 0; + } + + return NULL; +} + +// Test calling a Go function to raise SIGIO on a C thread with an +// alternate signal stack when there is a Go signal handler for SIGIO. +static void* thread2(void* arg __attribute__ ((unused))) { + stack_t ss; + int i; + int oldcount; + pthread_t tid; + struct timespec ts; + stack_t nss; + + // Set up an alternate signal stack for this thread. + memset(&ss, 0, sizeof ss); + ss.ss_sp = malloc(CSIGSTKSZ); + if (ss.ss_sp == NULL) { + die("malloc"); + } + ss.ss_flags = 0; + ss.ss_size = CSIGSTKSZ; + if (sigaltstack(&ss, NULL) < 0) { + die("sigaltstack"); + } + + oldcount = SIGIOCount(); + + // Call a Go function that will call a C function to send us a + // SIGIO. + tid = pthread_self(); + GoRaiseSIGIO(&tid); + + // Wait until the signal has been delivered. + i = 0; + while (SIGIOCount() == oldcount) { + ts.tv_sec = 0; + ts.tv_nsec = 1000000; + nanosleep(&ts, NULL); + i++; + if (i > 5000) { + fprintf(stderr, "looping too long waiting for signal\n"); + exit(EXIT_FAILURE); + } + } + + // We should still be on the same signal stack. + if (sigaltstack(NULL, &nss) < 0) { + die("sigaltstack check"); + } + if ((nss.ss_flags & SS_DISABLE) != 0) { + fprintf(stderr, "sigaltstack disabled on return from Go\n"); + ok = 0; + } else if (nss.ss_sp != ss.ss_sp) { + fprintf(stderr, "sigaltstack changed on return from Go\n"); + ok = 0; + } + + return NULL; +} + +int main(int argc, char **argv) { + pthread_t tid; + int i; + + // Tell the Go library to start looking for SIGIO. + GoCatchSIGIO(); + + i = pthread_create(&tid, NULL, thread1, NULL); + if (i != 0) { + fprintf(stderr, "pthread_create: %s\n", strerror(i)); + exit(EXIT_FAILURE); + } + + i = pthread_join(tid, NULL); + if (i != 0) { + fprintf(stderr, "pthread_join: %s\n", strerror(i)); + exit(EXIT_FAILURE); + } + + i = pthread_create(&tid, NULL, thread2, NULL); + if (i != 0) { + fprintf(stderr, "pthread_create: %s\n", strerror(i)); + exit(EXIT_FAILURE); + } + + i = pthread_join(tid, NULL); + if (i != 0) { + fprintf(stderr, "pthread_join: %s\n", strerror(i)); + exit(EXIT_FAILURE); + } + + if (!ok) { + exit(EXIT_FAILURE); + } + + printf("PASS\n"); + return 0; +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main5.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main5.c new file mode 100644 index 0000000000000000000000000000000000000000..c64c246fdea8e5dfe058d39563d65b85c04f65c5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main5.c @@ -0,0 +1,105 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test for verifying that the Go runtime properly forwards +// signals when non-Go signals are raised. + +#include +#include +#include +#include +#include +#include + +#include "libgo2.h" + +int *nilp; + +int main(int argc, char** argv) { + int verbose; + int test; + + if (argc < 2) { + printf("Missing argument\n"); + return 1; + } + + test = atoi(argv[1]); + + verbose = (argc > 2); + + Noop(); + + switch (test) { + case 1: { + if (verbose) { + printf("attempting segfault\n"); + } + + *nilp = 0; + break; + } + + case 2: { + struct timeval tv; + + if (verbose) { + printf("attempting external signal test\n"); + } + + fprintf(stderr, "OK\n"); + fflush(stderr); + + // The program should be interrupted before + // this sleep finishes. We use select rather + // than sleep because in older versions of + // glibc the sleep function does some signal + // fiddling to handle SIGCHLD. If this + // program is fiddling signals just when the + // test program sends the signal, the signal + // may be delivered to a Go thread which will + // break this test. + tv.tv_sec = 60; + tv.tv_usec = 0; + select(0, NULL, NULL, NULL, &tv); + + break; + } + case 3: { + if (verbose) { + printf("attempting SIGPIPE\n"); + } + + int fd[2]; + if (pipe(fd) != 0) { + printf("pipe(2) failed\n"); + return 0; + } + // Close the reading end. + close(fd[0]); + // Expect that write(2) fails (EPIPE) + if (write(fd[1], "some data", 9) != -1) { + printf("write(2) unexpectedly succeeded\n"); + return 0; + } + printf("did not receive SIGPIPE\n"); + return 0; + } + case 4: { + fprintf(stderr, "OK\n"); + fflush(stderr); + + if (verbose) { + printf("calling Block\n"); + } + Block(); + } + default: + printf("Unknown test: %d\n", test); + return 0; + } + + printf("FAIL\n"); + return 0; +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main6.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main6.c new file mode 100644 index 0000000000000000000000000000000000000000..2745eb9dc5e7887963d1ad855c0f210af5b453a2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main6.c @@ -0,0 +1,34 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that using the Go profiler in a C program does not crash. + +#include +#include + +#include "libgo6.h" + +int main(int argc, char **argv) { + struct timeval tvstart, tvnow; + int diff; + + gettimeofday(&tvstart, NULL); + + go_start_profile(); + + // Busy wait so we have something to profile. + // If we just sleep the profiling signal will never fire. + while (1) { + gettimeofday(&tvnow, NULL); + diff = (tvnow.tv_sec - tvstart.tv_sec) * 1000 * 1000 + (tvnow.tv_usec - tvstart.tv_usec); + + // Profile frequency is 100Hz so we should definitely + // get a signal in 50 milliseconds. + if (diff > 50 * 1000) + break; + } + + go_stop_profile(); + return 0; +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main7.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main7.c new file mode 100644 index 0000000000000000000000000000000000000000..2c6d98daa833df5fa5b5d9047e913209ff826287 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main7.c @@ -0,0 +1,18 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that lots of calls don't deadlock. + +#include + +#include "libgo7.h" + +int main() { + int i; + + for (i = 0; i < 100000; i++) { + GoFunction7(); + } + return 0; +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main8.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main8.c new file mode 100644 index 0000000000000000000000000000000000000000..95fb7a349e145086ddd0a411f70a2d858758d63d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main8.c @@ -0,0 +1,16 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test preemption. + +#include + +#include "libgo8.h" + +int main() { + GoFunction8(); + + // That should have exited the program. + abort(); +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main9.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main9.c new file mode 100644 index 0000000000000000000000000000000000000000..95ad4dea49fb1a625755134141739cd2bb1c0550 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main9.c @@ -0,0 +1,24 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "libgo9.h" + +void use(int *x) { (*x)++; } + +void callGoFWithDeepStack() { + int x[10000]; + + use(&x[0]); + use(&x[9999]); + + GoF(); + + use(&x[0]); + use(&x[9999]); +} + +int main() { + GoF(); // call GoF without using much stack + callGoFWithDeepStack(); // call GoF with a deep stack +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main_unix.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main_unix.c new file mode 100644 index 0000000000000000000000000000000000000000..bd00f9d233995f771575c66109f11f92f1396dd6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main_unix.c @@ -0,0 +1,59 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include +#include +#include +#include +#include + +struct sigaction sa; +struct sigaction osa; + +static void (*oldHandler)(int, siginfo_t*, void*); + +static void handler(int signo, siginfo_t* info, void* ctxt) { + if (oldHandler) { + oldHandler(signo, info, ctxt); + } +} + +int install_handler() { + // Install our own signal handler. + memset(&sa, 0, sizeof sa); + sa.sa_sigaction = handler; + sigemptyset(&sa.sa_mask); + sa.sa_flags = SA_ONSTACK | SA_SIGINFO; + memset(&osa, 0, sizeof osa); + sigemptyset(&osa.sa_mask); + if (sigaction(SIGSEGV, &sa, &osa) < 0) { + perror("sigaction"); + return 2; + } + if (osa.sa_handler == SIG_DFL) { + fprintf(stderr, "Go runtime did not install signal handler\n"); + return 2; + } + // gccgo does not set SA_ONSTACK for SIGSEGV. + if (getenv("GCCGO") == NULL && (osa.sa_flags&SA_ONSTACK) == 0) { + fprintf(stderr, "Go runtime did not install signal handler\n"); + return 2; + } + oldHandler = osa.sa_sigaction; + + return 0; +} + +int check_handler() { + if (sigaction(SIGSEGV, NULL, &sa) < 0) { + perror("sigaction check"); + return 2; + } + if (sa.sa_sigaction != handler) { + fprintf(stderr, "ERROR: wrong signal handler: %p != %p\n", sa.sa_sigaction, handler); + return 2; + } + return 0; +} + diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main_windows.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main_windows.c new file mode 100644 index 0000000000000000000000000000000000000000..eded8af1a2c9af04576a14828372c9c01c8710e9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/main_windows.c @@ -0,0 +1,17 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + * Dummy implementations for Windows, because Windows doesn't + * support Unix-style signal handling. + */ + +int install_handler() { + return 0; +} + + +int check_handler() { + return 0; +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/p/p.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/p/p.go new file mode 100644 index 0000000000000000000000000000000000000000..82b445c12109cb3a01f3ec59a2534d5d97768a3e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcarchive/testdata/p/p.go @@ -0,0 +1,10 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +import "C" + +//export FromPkg +func FromPkg() int32 { return 1024 } diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/cshared_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/cshared_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7e9a274d05628ee3114f3881f3da3259dce6a23a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/cshared_test.go @@ -0,0 +1,882 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cshared_test + +import ( + "bufio" + "bytes" + "cmd/cgo/internal/cgotest" + "debug/elf" + "debug/pe" + "encoding/binary" + "flag" + "fmt" + "internal/testenv" + "log" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "sync" + "testing" + "unicode" +) + +var globalSkip = func(t *testing.T) {} + +// C compiler with args (from $(go env CC) $(go env GOGCCFLAGS)). +var cc []string + +// ".exe" on Windows. +var exeSuffix string + +var GOOS, GOARCH, GOROOT string +var installdir string +var libgoname string + +func TestMain(m *testing.M) { + os.Exit(testMain(m)) +} + +func testMain(m *testing.M) int { + log.SetFlags(log.Lshortfile) + flag.Parse() + if testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" { + globalSkip = func(t *testing.T) { t.Skip("short mode and $GO_BUILDER_NAME not set") } + return m.Run() + } + if runtime.GOOS == "linux" { + if _, err := os.Stat("/etc/alpine-release"); err == nil { + globalSkip = func(t *testing.T) { t.Skip("skipping failing test on alpine - go.dev/issue/19938") } + return m.Run() + } + } + if !testenv.HasGoBuild() { + // Checking for "go build" is a proxy for whether or not we can run "go env". + globalSkip = func(t *testing.T) { t.Skip("no go build") } + return m.Run() + } + + GOOS = goEnv("GOOS") + GOARCH = goEnv("GOARCH") + GOROOT = goEnv("GOROOT") + + if _, err := os.Stat(GOROOT); os.IsNotExist(err) { + log.Fatalf("Unable able to find GOROOT at '%s'", GOROOT) + } + + cc = []string{goEnv("CC")} + + out := goEnv("GOGCCFLAGS") + quote := '\000' + start := 0 + lastSpace := true + backslash := false + s := string(out) + for i, c := range s { + if quote == '\000' && unicode.IsSpace(c) { + if !lastSpace { + cc = append(cc, s[start:i]) + lastSpace = true + } + } else { + if lastSpace { + start = i + lastSpace = false + } + if quote == '\000' && !backslash && (c == '"' || c == '\'') { + quote = c + backslash = false + } else if !backslash && quote == c { + quote = '\000' + } else if (quote == '\000' || quote == '"') && !backslash && c == '\\' { + backslash = true + } else { + backslash = false + } + } + } + if !lastSpace { + cc = append(cc, s[start:]) + } + + switch GOOS { + case "darwin", "ios": + // For Darwin/ARM. + // TODO(crawshaw): can we do better? + cc = append(cc, []string{"-framework", "CoreFoundation", "-framework", "Foundation"}...) + case "android": + cc = append(cc, "-pie") + } + libgodir := GOOS + "_" + GOARCH + switch GOOS { + case "darwin", "ios": + if GOARCH == "arm64" { + libgodir += "_shared" + } + case "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris", "illumos": + libgodir += "_shared" + } + cc = append(cc, "-I", filepath.Join("pkg", libgodir)) + + // Force reallocation (and avoid aliasing bugs) for parallel tests that append to cc. + cc = cc[:len(cc):len(cc)] + + if GOOS == "windows" { + exeSuffix = ".exe" + } + + // Copy testdata into GOPATH/src/testcshared, along with a go.mod file + // declaring the same path. + + GOPATH, err := os.MkdirTemp("", "cshared_test") + if err != nil { + log.Panic(err) + } + defer os.RemoveAll(GOPATH) + os.Setenv("GOPATH", GOPATH) + + modRoot := filepath.Join(GOPATH, "src", "testcshared") + if err := cgotest.OverlayDir(modRoot, "testdata"); err != nil { + log.Panic(err) + } + if err := os.Chdir(modRoot); err != nil { + log.Panic(err) + } + os.Setenv("PWD", modRoot) + if err := os.WriteFile("go.mod", []byte("module testcshared\n"), 0666); err != nil { + log.Panic(err) + } + + defer func() { + if installdir != "" { + err := os.RemoveAll(installdir) + if err != nil { + log.Panic(err) + } + } + }() + + return m.Run() +} + +func goEnv(key string) string { + out, err := exec.Command("go", "env", key).Output() + if err != nil { + log.Printf("go env %s failed:\n%s", key, err) + log.Panicf("%s", err.(*exec.ExitError).Stderr) + } + return strings.TrimSpace(string(out)) +} + +func cmdToRun(name string) string { + return "./" + name + exeSuffix +} + +func run(t *testing.T, extraEnv []string, args ...string) string { + t.Helper() + cmd := exec.Command(args[0], args[1:]...) + if len(extraEnv) > 0 { + cmd.Env = append(os.Environ(), extraEnv...) + } + stderr := new(strings.Builder) + cmd.Stderr = stderr + + if GOOS != "windows" { + // TestUnexportedSymbols relies on file descriptor 30 + // being closed when the program starts, so enforce + // that in all cases. (The first three descriptors are + // stdin/stdout/stderr, so we just need to make sure + // that cmd.ExtraFiles[27] exists and is nil.) + cmd.ExtraFiles = make([]*os.File, 28) + } + + t.Logf("run: %v", args) + out, err := cmd.Output() + if stderr.Len() > 0 { + t.Logf("stderr:\n%s", stderr) + } + if err != nil { + t.Fatalf("command failed: %v\n%v\n%s\n", args, err, out) + } + return string(out) +} + +func runExe(t *testing.T, extraEnv []string, args ...string) string { + t.Helper() + return run(t, extraEnv, args...) +} + +func runCC(t *testing.T, args ...string) string { + t.Helper() + // This function is run in parallel, so append to a copy of cc + // rather than cc itself. + return run(t, nil, append(append([]string(nil), cc...), args...)...) +} + +func createHeaders() error { + // The 'cgo' command generates a number of additional artifacts, + // but we're only interested in the header. + // Shunt the rest of the outputs to a temporary directory. + objDir, err := os.MkdirTemp("", "testcshared_obj") + if err != nil { + return err + } + defer os.RemoveAll(objDir) + + // Generate a C header file for p, which is a non-main dependency + // of main package libgo. + // + // TODO(golang.org/issue/35715): This should be simpler. + args := []string{"go", "tool", "cgo", + "-objdir", objDir, + "-exportheader", "p.h", + filepath.Join(".", "p", "p.go")} + cmd := exec.Command(args[0], args[1:]...) + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("command failed: %v\n%v\n%s\n", args, err, out) + } + + // Generate a C header file for libgo itself. + installdir, err = os.MkdirTemp("", "testcshared") + if err != nil { + return err + } + libgoname = "libgo.a" + + args = []string{"go", "build", "-buildmode=c-shared", "-o", filepath.Join(installdir, libgoname), "./libgo"} + cmd = exec.Command(args[0], args[1:]...) + out, err = cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("command failed: %v\n%v\n%s\n", args, err, out) + } + + args = []string{"go", "build", "-buildmode=c-shared", + "-installsuffix", "testcshared", + "-o", libgoname, + filepath.Join(".", "libgo", "libgo.go")} + if GOOS == "windows" && strings.HasSuffix(args[6], ".a") { + args[6] = strings.TrimSuffix(args[6], ".a") + ".dll" + } + cmd = exec.Command(args[0], args[1:]...) + out, err = cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("command failed: %v\n%v\n%s\n", args, err, out) + } + if GOOS == "windows" { + // We can't simply pass -Wl,--out-implib, because this relies on having imports from multiple packages, + // which results in the linkers output implib getting overwritten at each step. So instead build the + // import library the traditional way, using a def file. + err = os.WriteFile("libgo.def", + []byte("LIBRARY libgo.dll\nEXPORTS\n\tDidInitRun\n\tDidMainRun\n\tDivu\n\tFromPkg\n\t_cgo_dummy_export\n"), + 0644) + if err != nil { + return fmt.Errorf("unable to write def file: %v", err) + } + out, err = exec.Command(cc[0], append(cc[1:], "-print-prog-name=dlltool")...).CombinedOutput() + if err != nil { + return fmt.Errorf("unable to find dlltool path: %v\n%s\n", err, out) + } + dlltoolpath := strings.TrimSpace(string(out)) + if filepath.Ext(dlltoolpath) == "" { + // Some compilers report slash-separated paths without extensions + // instead of ordinary Windows paths. + // Try to find the canonical name for the path. + if lp, err := exec.LookPath(dlltoolpath); err == nil { + dlltoolpath = lp + } + } + + args := []string{dlltoolpath, "-D", args[6], "-l", libgoname, "-d", "libgo.def"} + + if filepath.Ext(dlltoolpath) == "" { + // This is an unfortunate workaround for + // https://github.com/mstorsjo/llvm-mingw/issues/205 in which + // we basically reimplement the contents of the dlltool.sh + // wrapper: https://git.io/JZFlU. + // TODO(thanm): remove this workaround once we can upgrade + // the compilers on the windows-arm64 builder. + dlltoolContents, err := os.ReadFile(args[0]) + if err != nil { + return fmt.Errorf("unable to read dlltool: %v\n", err) + } + if bytes.HasPrefix(dlltoolContents, []byte("#!/bin/sh")) && bytes.Contains(dlltoolContents, []byte("llvm-dlltool")) { + base, name := filepath.Split(args[0]) + args[0] = filepath.Join(base, "llvm-dlltool") + var machine string + switch prefix, _, _ := strings.Cut(name, "-"); prefix { + case "i686": + machine = "i386" + case "x86_64": + machine = "i386:x86-64" + case "armv7": + machine = "arm" + case "aarch64": + machine = "arm64" + } + if len(machine) > 0 { + args = append(args, "-m", machine) + } + } + } + + out, err = exec.Command(args[0], args[1:]...).CombinedOutput() + if err != nil { + return fmt.Errorf("unable to run dlltool to create import library: %v\n%s\n", err, out) + } + } + + return nil +} + +var ( + headersOnce sync.Once + headersErr error +) + +func createHeadersOnce(t *testing.T) { + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + testenv.MustHaveBuildMode(t, "c-shared") + + headersOnce.Do(func() { + headersErr = createHeaders() + }) + if headersErr != nil { + t.Helper() + t.Fatal(headersErr) + } +} + +// test0: exported symbols in shared lib are accessible. +func TestExportedSymbols(t *testing.T) { + globalSkip(t) + testenv.MustHaveCGO(t) + testenv.MustHaveExec(t) + + t.Parallel() + + cmd := "testp0" + bin := cmdToRun(cmd) + + createHeadersOnce(t) + + runCC(t, "-I", installdir, "-o", cmd, "main0.c", libgoname) + + defer os.Remove(bin) + + out := runExe(t, []string{"LD_LIBRARY_PATH=."}, bin) + if strings.TrimSpace(out) != "PASS" { + t.Error(out) + } +} + +func checkNumberOfExportedFunctionsWindows(t *testing.T, exportAllSymbols bool) { + const prog = ` +package main + +import "C" + +//export GoFunc +func GoFunc() { + println(42) +} + +//export GoFunc2 +func GoFunc2() { + println(24) +} + +func main() { +} +` + + tmpdir := t.TempDir() + + srcfile := filepath.Join(tmpdir, "test.go") + objfile := filepath.Join(tmpdir, "test.dll") + if err := os.WriteFile(srcfile, []byte(prog), 0666); err != nil { + t.Fatal(err) + } + argv := []string{"build", "-buildmode=c-shared"} + if exportAllSymbols { + argv = append(argv, "-ldflags", "-extldflags=-Wl,--export-all-symbols") + } + argv = append(argv, "-o", objfile, srcfile) + out, err := exec.Command("go", argv...).CombinedOutput() + if err != nil { + t.Fatalf("build failure: %s\n%s\n", err, string(out)) + } + + f, err := pe.Open(objfile) + if err != nil { + t.Fatalf("pe.Open failed: %v", err) + } + defer f.Close() + section := f.Section(".edata") + if section == nil { + t.Skip(".edata section is not present") + } + + // TODO: deduplicate this struct from cmd/link/internal/ld/pe.go + type IMAGE_EXPORT_DIRECTORY struct { + _ [2]uint32 + _ [2]uint16 + _ [2]uint32 + NumberOfFunctions uint32 + NumberOfNames uint32 + _ [3]uint32 + } + var e IMAGE_EXPORT_DIRECTORY + if err := binary.Read(section.Open(), binary.LittleEndian, &e); err != nil { + t.Fatalf("binary.Read failed: %v", err) + } + + // Only the two exported functions and _cgo_dummy_export should be exported + expectedNumber := uint32(3) + + if exportAllSymbols { + if e.NumberOfFunctions <= expectedNumber { + t.Fatalf("missing exported functions: %v", e.NumberOfFunctions) + } + if e.NumberOfNames <= expectedNumber { + t.Fatalf("missing exported names: %v", e.NumberOfNames) + } + } else { + if e.NumberOfFunctions != expectedNumber { + t.Fatalf("got %d exported functions; want %d", e.NumberOfFunctions, expectedNumber) + } + if e.NumberOfNames != expectedNumber { + t.Fatalf("got %d exported names; want %d", e.NumberOfNames, expectedNumber) + } + } +} + +func TestNumberOfExportedFunctions(t *testing.T) { + if GOOS != "windows" { + t.Skip("skipping windows only test") + } + globalSkip(t) + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + testenv.MustHaveBuildMode(t, "c-shared") + + t.Parallel() + + t.Run("OnlyExported", func(t *testing.T) { + checkNumberOfExportedFunctionsWindows(t, false) + }) + t.Run("All", func(t *testing.T) { + checkNumberOfExportedFunctionsWindows(t, true) + }) +} + +// test1: shared library can be dynamically loaded and exported symbols are accessible. +func TestExportedSymbolsWithDynamicLoad(t *testing.T) { + if GOOS == "windows" { + t.Skipf("Skipping on %s", GOOS) + } + globalSkip(t) + testenv.MustHaveCGO(t) + testenv.MustHaveExec(t) + + t.Parallel() + + cmd := "testp1" + bin := cmdToRun(cmd) + + createHeadersOnce(t) + + if GOOS != "freebsd" { + runCC(t, "-o", cmd, "main1.c", "-ldl") + } else { + runCC(t, "-o", cmd, "main1.c") + } + + defer os.Remove(bin) + + out := runExe(t, nil, bin, "./"+libgoname) + if strings.TrimSpace(out) != "PASS" { + t.Error(out) + } +} + +// test2: tests libgo2 which does not export any functions. +func TestUnexportedSymbols(t *testing.T) { + if GOOS == "windows" { + t.Skipf("Skipping on %s", GOOS) + } + globalSkip(t) + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + testenv.MustHaveBuildMode(t, "c-shared") + + t.Parallel() + + cmd := "testp2" + bin := cmdToRun(cmd) + libname := "libgo2.a" + + run(t, + nil, + "go", "build", + "-buildmode=c-shared", + "-installsuffix", "testcshared", + "-o", libname, "./libgo2", + ) + + linkFlags := "-Wl,--no-as-needed" + if GOOS == "darwin" || GOOS == "ios" { + linkFlags = "" + } + + runCC(t, "-o", cmd, "main2.c", linkFlags, libname) + + defer os.Remove(libname) + defer os.Remove(bin) + + out := runExe(t, []string{"LD_LIBRARY_PATH=."}, bin) + + if strings.TrimSpace(out) != "PASS" { + t.Error(out) + } +} + +// test3: tests main.main is exported on android. +func TestMainExportedOnAndroid(t *testing.T) { + globalSkip(t) + testenv.MustHaveCGO(t) + testenv.MustHaveExec(t) + + t.Parallel() + + switch GOOS { + case "android": + break + default: + t.Logf("Skipping on %s", GOOS) + return + } + + cmd := "testp3" + bin := cmdToRun(cmd) + + createHeadersOnce(t) + + runCC(t, "-o", cmd, "main3.c", "-ldl") + + defer os.Remove(bin) + + out := runExe(t, nil, bin, "./"+libgoname) + if strings.TrimSpace(out) != "PASS" { + t.Error(out) + } +} + +func testSignalHandlers(t *testing.T, pkgname, cfile, cmd string) { + if GOOS == "windows" { + t.Skipf("Skipping on %s", GOOS) + } + globalSkip(t) + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + testenv.MustHaveBuildMode(t, "c-shared") + + libname := pkgname + ".a" + run(t, + nil, + "go", "build", + "-buildmode=c-shared", + "-installsuffix", "testcshared", + "-o", libname, pkgname, + ) + if GOOS != "freebsd" { + runCC(t, "-pthread", "-o", cmd, cfile, "-ldl") + } else { + runCC(t, "-pthread", "-o", cmd, cfile) + } + + bin := cmdToRun(cmd) + + defer os.Remove(libname) + defer os.Remove(bin) + defer os.Remove(pkgname + ".h") + + args := []string{bin, "./" + libname} + if testing.Verbose() { + args = append(args, "verbose") + } + out := runExe(t, nil, args...) + if strings.TrimSpace(out) != "PASS" { + t.Errorf("%v%s", args, out) + } +} + +// test4: test signal handlers +func TestSignalHandlers(t *testing.T) { + t.Parallel() + testSignalHandlers(t, "./libgo4", "main4.c", "testp4") +} + +// test5: test signal handlers with os/signal.Notify +func TestSignalHandlersWithNotify(t *testing.T) { + t.Parallel() + testSignalHandlers(t, "./libgo5", "main5.c", "testp5") +} + +func TestPIE(t *testing.T) { + switch GOOS { + case "linux", "android": + break + default: + t.Skipf("Skipping on %s", GOOS) + } + globalSkip(t) + + t.Parallel() + + createHeadersOnce(t) + + f, err := elf.Open(libgoname) + if err != nil { + t.Fatalf("elf.Open failed: %v", err) + } + defer f.Close() + + ds := f.SectionByType(elf.SHT_DYNAMIC) + if ds == nil { + t.Fatalf("no SHT_DYNAMIC section") + } + d, err := ds.Data() + if err != nil { + t.Fatalf("can't read SHT_DYNAMIC contents: %v", err) + } + for len(d) > 0 { + var tag elf.DynTag + switch f.Class { + case elf.ELFCLASS32: + tag = elf.DynTag(f.ByteOrder.Uint32(d[:4])) + d = d[8:] + case elf.ELFCLASS64: + tag = elf.DynTag(f.ByteOrder.Uint64(d[:8])) + d = d[16:] + } + if tag == elf.DT_TEXTREL { + t.Fatalf("%s has DT_TEXTREL flag", libgoname) + } + } +} + +// Test that installing a second time recreates the header file. +func TestCachedInstall(t *testing.T) { + globalSkip(t) + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + testenv.MustHaveBuildMode(t, "c-shared") + + tmpdir, err := os.MkdirTemp("", "cshared") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + copyFile(t, filepath.Join(tmpdir, "src", "testcshared", "go.mod"), "go.mod") + copyFile(t, filepath.Join(tmpdir, "src", "testcshared", "libgo", "libgo.go"), filepath.Join("libgo", "libgo.go")) + copyFile(t, filepath.Join(tmpdir, "src", "testcshared", "p", "p.go"), filepath.Join("p", "p.go")) + + buildcmd := []string{"go", "install", "-x", "-buildmode=c-shared", "-installsuffix", "testcshared", "./libgo"} + + cmd := exec.Command(buildcmd[0], buildcmd[1:]...) + cmd.Dir = filepath.Join(tmpdir, "src", "testcshared") + env := append(cmd.Environ(), + "GOPATH="+tmpdir, + "GOBIN="+filepath.Join(tmpdir, "bin"), + "GO111MODULE=off", // 'go install' only works in GOPATH mode + ) + cmd.Env = env + t.Log(buildcmd) + out, err := cmd.CombinedOutput() + t.Logf("%s", out) + if err != nil { + t.Fatal(err) + } + + var libgoh, ph string + + walker := func(path string, info os.FileInfo, err error) error { + if err != nil { + t.Fatal(err) + } + var ps *string + switch filepath.Base(path) { + case "libgo.h": + ps = &libgoh + case "p.h": + ps = &ph + } + if ps != nil { + if *ps != "" { + t.Fatalf("%s found again", *ps) + } + *ps = path + } + return nil + } + + if err := filepath.Walk(tmpdir, walker); err != nil { + t.Fatal(err) + } + + if libgoh == "" { + t.Fatal("libgo.h not installed") + } + + if err := os.Remove(libgoh); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(buildcmd[0], buildcmd[1:]...) + cmd.Dir = filepath.Join(tmpdir, "src", "testcshared") + cmd.Env = env + t.Log(buildcmd) + out, err = cmd.CombinedOutput() + t.Logf("%s", out) + if err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(libgoh); err != nil { + t.Errorf("libgo.h not installed in second run: %v", err) + } +} + +// copyFile copies src to dst. +func copyFile(t *testing.T, dst, src string) { + t.Helper() + data, err := os.ReadFile(src) + if err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(filepath.Dir(dst), 0777); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(dst, data, 0666); err != nil { + t.Fatal(err) + } +} + +func TestGo2C2Go(t *testing.T) { + switch GOOS { + case "darwin", "ios", "windows": + // Non-ELF shared libraries don't support the multiple + // copies of the runtime package implied by this test. + t.Skipf("linking c-shared into Go programs not supported on %s; issue 29061, 49457", GOOS) + case "android": + t.Skip("test fails on android; issue 29087") + } + globalSkip(t) + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + testenv.MustHaveBuildMode(t, "c-shared") + + t.Parallel() + + tmpdir, err := os.MkdirTemp("", "cshared-TestGo2C2Go") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + lib := filepath.Join(tmpdir, "libtestgo2c2go.a") + var env []string + if GOOS == "windows" && strings.HasSuffix(lib, ".a") { + env = append(env, "CGO_LDFLAGS=-Wl,--out-implib,"+lib, "CGO_LDFLAGS_ALLOW=.*") + lib = strings.TrimSuffix(lib, ".a") + ".dll" + } + run(t, env, "go", "build", "-buildmode=c-shared", "-o", lib, "./go2c2go/go") + + cgoCflags := os.Getenv("CGO_CFLAGS") + if cgoCflags != "" { + cgoCflags += " " + } + cgoCflags += "-I" + tmpdir + + cgoLdflags := os.Getenv("CGO_LDFLAGS") + if cgoLdflags != "" { + cgoLdflags += " " + } + cgoLdflags += "-L" + tmpdir + " -ltestgo2c2go" + + goenv := []string{"CGO_CFLAGS=" + cgoCflags, "CGO_LDFLAGS=" + cgoLdflags} + + ldLibPath := os.Getenv("LD_LIBRARY_PATH") + if ldLibPath != "" { + ldLibPath += ":" + } + ldLibPath += tmpdir + + runenv := []string{"LD_LIBRARY_PATH=" + ldLibPath} + + bin := filepath.Join(tmpdir, "m1") + exeSuffix + run(t, goenv, "go", "build", "-o", bin, "./go2c2go/m1") + runExe(t, runenv, bin) + + bin = filepath.Join(tmpdir, "m2") + exeSuffix + run(t, goenv, "go", "build", "-o", bin, "./go2c2go/m2") + runExe(t, runenv, bin) +} + +func TestIssue36233(t *testing.T) { + globalSkip(t) + testenv.MustHaveCGO(t) + + t.Parallel() + + // Test that the export header uses GoComplex64 and GoComplex128 + // for complex types. + + tmpdir, err := os.MkdirTemp("", "cshared-TestIssue36233") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + const exportHeader = "issue36233.h" + + run(t, nil, "go", "tool", "cgo", "-exportheader", exportHeader, "-objdir", tmpdir, "./issue36233/issue36233.go") + data, err := os.ReadFile(exportHeader) + if err != nil { + t.Fatal(err) + } + + funcs := []struct{ name, signature string }{ + {"exportComplex64", "GoComplex64 exportComplex64(GoComplex64 v)"}, + {"exportComplex128", "GoComplex128 exportComplex128(GoComplex128 v)"}, + {"exportComplexfloat", "GoComplex64 exportComplexfloat(GoComplex64 v)"}, + {"exportComplexdouble", "GoComplex128 exportComplexdouble(GoComplex128 v)"}, + } + + scanner := bufio.NewScanner(bytes.NewReader(data)) + var found int + for scanner.Scan() { + b := scanner.Bytes() + for _, fn := range funcs { + if bytes.Contains(b, []byte(fn.name)) { + found++ + if !bytes.Contains(b, []byte(fn.signature)) { + t.Errorf("function signature mismatch; got %q, want %q", b, fn.signature) + } + } + } + } + if err = scanner.Err(); err != nil { + t.Errorf("scanner encountered error: %v", err) + } + if found != len(funcs) { + t.Error("missing functions") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/go2c2go/go/shlib.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/go2c2go/go/shlib.go new file mode 100644 index 0000000000000000000000000000000000000000..76a5323ad2d5590c0907a373d1706e5ece07a68a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/go2c2go/go/shlib.go @@ -0,0 +1,12 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "C" + +//export GoFunc +func GoFunc() int { return 1 } + +func main() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/go2c2go/m1/c.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/go2c2go/m1/c.c new file mode 100644 index 0000000000000000000000000000000000000000..0e8fac4cf36f589a65465be33eedb1f12d3f06e9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/go2c2go/m1/c.c @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "libtestgo2c2go.h" + +int CFunc(void) { + return (GoFunc() << 8) + 2; +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/go2c2go/m1/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/go2c2go/m1/main.go new file mode 100644 index 0000000000000000000000000000000000000000..17ba1eb0a72e565d557b1684ebe3c16935b8ed3b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/go2c2go/m1/main.go @@ -0,0 +1,22 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// extern int CFunc(void); +import "C" + +import ( + "fmt" + "os" +) + +func main() { + got := C.CFunc() + const want = (1 << 8) | 2 + if got != want { + fmt.Printf("got %#x, want %#x\n", got, want) + os.Exit(1) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/go2c2go/m2/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/go2c2go/m2/main.go new file mode 100644 index 0000000000000000000000000000000000000000..91bf308057caa014a6a0f1dadf80559a2dd6e895 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/go2c2go/m2/main.go @@ -0,0 +1,22 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// #include "libtestgo2c2go.h" +import "C" + +import ( + "fmt" + "os" +) + +func main() { + got := C.GoFunc() + const want = 1 + if got != want { + fmt.Printf("got %#x, want %#x\n", got, want) + os.Exit(1) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/issue36233/issue36233.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/issue36233/issue36233.go new file mode 100644 index 0000000000000000000000000000000000000000..433bf5ce559811dada9d1921d9ee8acdadb22948 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/issue36233/issue36233.go @@ -0,0 +1,30 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// #include +import "C" + +//export exportComplex64 +func exportComplex64(v complex64) complex64 { + return v +} + +//export exportComplex128 +func exportComplex128(v complex128) complex128 { + return v +} + +//export exportComplexfloat +func exportComplexfloat(v C.complexfloat) C.complexfloat { + return v +} + +//export exportComplexdouble +func exportComplexdouble(v C.complexdouble) C.complexdouble { + return v +} + +func main() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/libgo/libgo.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/libgo/libgo.go new file mode 100644 index 0000000000000000000000000000000000000000..063441766a1664333e91e6ec7dafd6adc68510cb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/libgo/libgo.go @@ -0,0 +1,46 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "syscall" + _ "testcshared/p" + "time" +) + +import "C" + +var initCh = make(chan int, 1) +var ranMain bool + +func init() { + // emulate an exceedingly slow package initialization function + time.Sleep(100 * time.Millisecond) + initCh <- 42 +} + +func main() { + ranMain = true +} + +//export DidInitRun +func DidInitRun() bool { + select { + case x := <-initCh: + if x != 42 { + // Just in case initCh was not correctly made. + println("want init value of 42, got: ", x) + syscall.Exit(2) + } + return true + default: + return false + } +} + +//export DidMainRun +func DidMainRun() bool { + return ranMain +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/libgo2/dup2.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/libgo2/dup2.go new file mode 100644 index 0000000000000000000000000000000000000000..d50e0c42abef26536678f96b2b9253f30b5774dd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/libgo2/dup2.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || dragonfly || freebsd || (linux && !arm64 && !loong64 && !riscv64) || netbsd || openbsd + +package main + +import "syscall" + +func dup2(oldfd, newfd int) error { + return syscall.Dup2(oldfd, newfd) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/libgo2/dup3.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/libgo2/dup3.go new file mode 100644 index 0000000000000000000000000000000000000000..ec4b5a73ec28b24999b3d89f7eac46e9956517db --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/libgo2/dup3.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (linux && arm64) || (linux && loong64) || (linux && riscv64) + +package main + +import "syscall" + +func dup2(oldfd, newfd int) error { + return syscall.Dup3(oldfd, newfd, 0) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/libgo2/libgo2.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/libgo2/libgo2.go new file mode 100644 index 0000000000000000000000000000000000000000..5f6cfd00397af1356712f59d2c7ae8c0f07213d0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/libgo2/libgo2.go @@ -0,0 +1,52 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris + +package main + +// Test a shared library created by -buildmode=c-shared that does not +// export anything. + +import ( + "fmt" + "os" + "syscall" +) + +// To test this we want to communicate between the main program and +// the shared library without using any exported symbols. The init +// function creates a pipe and Dups the read end to a known number +// that the C code can also use. + +const ( + fd = 30 +) + +func init() { + var p [2]int + if e := syscall.Pipe(p[0:]); e != nil { + fmt.Fprintf(os.Stderr, "pipe: %v\n", e) + os.Exit(2) + } + + if e := dup2(p[0], fd); e != nil { + fmt.Fprintf(os.Stderr, "dup2: %v\n", e) + os.Exit(2) + } + + const str = "PASS" + if n, e := syscall.Write(p[1], []byte(str)); e != nil || n != len(str) { + fmt.Fprintf(os.Stderr, "write: %d %v\n", n, e) + os.Exit(2) + } + + if e := syscall.Close(p[1]); e != nil { + fmt.Fprintf(os.Stderr, "close: %v\n", e) + os.Exit(2) + } +} + +func main() { +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/libgo4/libgo4.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/libgo4/libgo4.go new file mode 100644 index 0000000000000000000000000000000000000000..9c30c8585a33336a21710ae1b070b43dc43438f5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/libgo4/libgo4.go @@ -0,0 +1,47 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "C" + +import ( + "fmt" + "os" + "runtime" +) + +// RunGoroutines starts some goroutines that don't do anything. +// The idea is to get some threads going, so that a signal will be delivered +// to a thread started by Go. +// +//export RunGoroutines +func RunGoroutines() { + for i := 0; i < 4; i++ { + go func() { + runtime.LockOSThread() + select {} + }() + } +} + +var P *byte + +// TestSEGV makes sure that an invalid address turns into a run-time Go panic. +// +//export TestSEGV +func TestSEGV() { + defer func() { + if recover() == nil { + fmt.Fprintln(os.Stderr, "no panic from segv") + os.Exit(1) + } + }() + *P = 0 + fmt.Fprintln(os.Stderr, "continued after segv") + os.Exit(1) +} + +func main() { +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/libgo5/libgo5.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/libgo5/libgo5.go new file mode 100644 index 0000000000000000000000000000000000000000..c70dd681fa8eed902111f7f89275cfa2afa2a58f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/libgo5/libgo5.go @@ -0,0 +1,56 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "C" + +import ( + "os" + "os/signal" + "syscall" + "time" +) + +// The channel used to read SIGIO signals. +var sigioChan chan os.Signal + +// CatchSIGIO starts catching SIGIO signals. +// +//export CatchSIGIO +func CatchSIGIO() { + sigioChan = make(chan os.Signal, 1) + signal.Notify(sigioChan, syscall.SIGIO) +} + +// ResetSIGIO stops catching SIGIO signals. +// +//export ResetSIGIO +func ResetSIGIO() { + signal.Reset(syscall.SIGIO) +} + +// AwaitSIGIO blocks indefinitely until a SIGIO is reported. +// +//export AwaitSIGIO +func AwaitSIGIO() { + <-sigioChan +} + +// SawSIGIO reports whether we saw a SIGIO within a brief pause. +// +//export SawSIGIO +func SawSIGIO() bool { + timer := time.NewTimer(100 * time.Millisecond) + select { + case <-sigioChan: + timer.Stop() + return true + case <-timer.C: + return false + } +} + +func main() { +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/main0.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/main0.c new file mode 100644 index 0000000000000000000000000000000000000000..39ef7e30513f3ab5684eac58c279d67a9d9536c6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/main0.c @@ -0,0 +1,42 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include +#include + +#include "p.h" +#include "libgo.h" + +// Tests libgo.so to export the following functions. +// int8_t DidInitRun(); +// int8_t DidMainRun(); +// int32_t FromPkg(); +// uint32_t Divu(uint32_t, uint32_t); +int main(void) { + int8_t ran_init = DidInitRun(); + if (!ran_init) { + fprintf(stderr, "ERROR: DidInitRun returned unexpected results: %d\n", + ran_init); + return 1; + } + int8_t ran_main = DidMainRun(); + if (ran_main) { + fprintf(stderr, "ERROR: DidMainRun returned unexpected results: %d\n", + ran_main); + return 1; + } + int32_t from_pkg = FromPkg(); + if (from_pkg != 1024) { + fprintf(stderr, "ERROR: FromPkg=%d, want %d\n", from_pkg, 1024); + return 1; + } + uint32_t divu = Divu(2264, 31); + if (divu != 73) { + fprintf(stderr, "ERROR: Divu(2264, 31)=%d, want %d\n", divu, 73); + return 1; + } + // test.bash looks for "PASS" to ensure this program has reached the end. + printf("PASS\n"); + return 0; +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/main1.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/main1.c new file mode 100644 index 0000000000000000000000000000000000000000..420dd1ea97459aa7cf26dd2a1d56e892a6d279f6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/main1.c @@ -0,0 +1,69 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include +#include +#include + +int check_int8(void* handle, const char* fname, int8_t want) { + int8_t (*fn)(); + fn = (int8_t (*)())dlsym(handle, fname); + if (!fn) { + fprintf(stderr, "ERROR: missing %s: %s\n", fname, dlerror()); + return 1; + } + signed char ret = fn(); + if (ret != want) { + fprintf(stderr, "ERROR: %s=%d, want %d\n", fname, ret, want); + return 1; + } + return 0; +} + +int check_int32(void* handle, const char* fname, int32_t want) { + int32_t (*fn)(); + fn = (int32_t (*)())dlsym(handle, fname); + if (!fn) { + fprintf(stderr, "ERROR: missing %s: %s\n", fname, dlerror()); + return 1; + } + int32_t ret = fn(); + if (ret != want) { + fprintf(stderr, "ERROR: %s=%d, want %d\n", fname, ret, want); + return 1; + } + return 0; +} + +// Tests libgo.so to export the following functions. +// int8_t DidInitRun() // returns true +// int8_t DidMainRun() // returns true +// int32_t FromPkg() // returns 1024 +int main(int argc, char** argv) { + void* handle = dlopen(argv[1], RTLD_LAZY | RTLD_GLOBAL); + if (!handle) { + fprintf(stderr, "ERROR: failed to open the shared library: %s\n", + dlerror()); + return 2; + } + + int ret = 0; + ret = check_int8(handle, "DidInitRun", 1); + if (ret != 0) { + return ret; + } + + ret = check_int8(handle, "DidMainRun", 0); + if (ret != 0) { + return ret; + } + + ret = check_int32(handle, "FromPkg", 1024); + if (ret != 0) { + return ret; + } + // test.bash looks for "PASS" to ensure this program has reached the end. + printf("PASS\n"); + return 0; +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/main2.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/main2.c new file mode 100644 index 0000000000000000000000000000000000000000..f89bcca474f55617ddc40f366f3e1e5228e1bc28 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/main2.c @@ -0,0 +1,56 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include +#include +#include +#include +#include +#include + +#define fd (30) + +// Tests libgo2.so, which does not export any functions. +// Read a string from the file descriptor and print it. +int main(void) { + int i; + ssize_t n; + char buf[20]; + struct timespec ts; + + // The descriptor will be initialized in a thread, so we have to + // give a chance to get opened. + for (i = 0; i < 200; i++) { + n = read(fd, buf, sizeof buf); + if (n >= 0) + break; + if (errno != EBADF && errno != EINVAL) { + fprintf(stderr, "BUG: read: %s\n", strerror(errno)); + return 2; + } + + // An EBADF error means that the shared library has not opened the + // descriptor yet. + ts.tv_sec = 0; + ts.tv_nsec = 10000000; + nanosleep(&ts, NULL); + } + + if (n < 0) { + fprintf(stderr, "BUG: failed to read any data from pipe\n"); + return 2; + } + + if (n == 0) { + fprintf(stderr, "BUG: unexpected EOF\n"); + return 2; + } + + if (n == sizeof buf) { + n--; + } + buf[n] = '\0'; + printf("%s\n", buf); + return 0; +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/main3.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/main3.c new file mode 100644 index 0000000000000000000000000000000000000000..49cc0558a01a328b59959ba25724b6bcf5962dc3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/main3.c @@ -0,0 +1,29 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include +#include +#include + +// Tests "main.main" is exported on android/arm, +// which golang.org/x/mobile/app depends on. +int main(int argc, char** argv) { + void* handle = dlopen(argv[1], RTLD_LAZY | RTLD_GLOBAL); + if (!handle) { + fprintf(stderr, "ERROR: failed to open the shared library: %s\n", + dlerror()); + return 2; + } + + uintptr_t main_fn = (uintptr_t)dlsym(handle, "main.main"); + if (!main_fn) { + fprintf(stderr, "ERROR: missing main.main: %s\n", dlerror()); + return 2; + } + + // TODO(hyangah): check that main.main can run. + + printf("PASS\n"); + return 0; +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/main4.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/main4.c new file mode 100644 index 0000000000000000000000000000000000000000..467a611ae7f5a6174046e6f839df42c34c5c2652 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/main4.c @@ -0,0 +1,215 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that a signal handler that uses up stack space does not crash +// if the signal is delivered to a thread running a goroutine. +// This is a lot like ../testcarchive/main2.c. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void die(const char* msg) { + perror(msg); + exit(EXIT_FAILURE); +} + +static volatile sig_atomic_t sigioSeen; + +// Use up some stack space. +static void recur(int i, char *p) { + char a[1024]; + + *p = '\0'; + if (i > 0) { + recur(i - 1, a); + } +} + +// Signal handler that uses up more stack space than a goroutine will have. +static void ioHandler(int signo, siginfo_t* info, void* ctxt) { + char a[1024]; + + recur(4, a); + sigioSeen = 1; +} + +static jmp_buf jmp; +static char* nullPointer; + +// Signal handler for SIGSEGV on a C thread. +static void segvHandler(int signo, siginfo_t* info, void* ctxt) { + sigset_t mask; + int i; + + if (sigemptyset(&mask) < 0) { + die("sigemptyset"); + } + if (sigaddset(&mask, SIGSEGV) < 0) { + die("sigaddset"); + } + i = sigprocmask(SIG_UNBLOCK, &mask, NULL); + if (i != 0) { + fprintf(stderr, "sigprocmask: %s\n", strerror(i)); + exit(EXIT_FAILURE); + } + + // Don't try this at home. + longjmp(jmp, signo); + + // We should never get here. + abort(); +} + +int main(int argc, char** argv) { + int verbose; + struct sigaction sa; + void* handle; + void (*fn)(void); + sigset_t mask; + int i; + struct timespec ts; + + verbose = argc > 2; + setvbuf(stdout, NULL, _IONBF, 0); + + // Call setsid so that we can use kill(0, SIGIO) below. + // Don't check the return value so that this works both from + // a job control shell and from a shell script. + setsid(); + + if (verbose) { + fprintf(stderr, "calling sigaction\n"); + } + + memset(&sa, 0, sizeof sa); + sa.sa_sigaction = ioHandler; + if (sigemptyset(&sa.sa_mask) < 0) { + die("sigemptyset"); + } + sa.sa_flags = SA_SIGINFO; + if (sigaction(SIGIO, &sa, NULL) < 0) { + die("sigaction"); + } + + sa.sa_sigaction = segvHandler; + if (sigaction(SIGSEGV, &sa, NULL) < 0 || sigaction(SIGBUS, &sa, NULL) < 0) { + die("sigaction"); + } + + if (verbose) { + fprintf(stderr, "calling dlopen\n"); + } + + handle = dlopen(argv[1], RTLD_NOW | RTLD_GLOBAL); + if (handle == NULL) { + fprintf(stderr, "%s\n", dlerror()); + exit(EXIT_FAILURE); + } + + if (verbose) { + fprintf(stderr, "calling dlsym\n"); + } + + // Start some goroutines. + fn = (void(*)(void))dlsym(handle, "RunGoroutines"); + if (fn == NULL) { + fprintf(stderr, "%s\n", dlerror()); + exit(EXIT_FAILURE); + } + + if (verbose) { + fprintf(stderr, "calling RunGoroutines\n"); + } + + fn(); + + // Block SIGIO in this thread to make it more likely that it + // will be delivered to a goroutine. + + if (verbose) { + fprintf(stderr, "calling pthread_sigmask\n"); + } + + if (sigemptyset(&mask) < 0) { + die("sigemptyset"); + } + if (sigaddset(&mask, SIGIO) < 0) { + die("sigaddset"); + } + i = pthread_sigmask(SIG_BLOCK, &mask, NULL); + if (i != 0) { + fprintf(stderr, "pthread_sigmask: %s\n", strerror(i)); + exit(EXIT_FAILURE); + } + + if (verbose) { + fprintf(stderr, "calling kill\n"); + } + + if (kill(0, SIGIO) < 0) { + die("kill"); + } + + if (verbose) { + fprintf(stderr, "waiting for sigioSeen\n"); + } + + // Wait until the signal has been delivered. + i = 0; + while (!sigioSeen) { + ts.tv_sec = 0; + ts.tv_nsec = 1000000; + nanosleep(&ts, NULL); + i++; + if (i > 5000) { + fprintf(stderr, "looping too long waiting for signal\n"); + exit(EXIT_FAILURE); + } + } + + if (verbose) { + fprintf(stderr, "calling setjmp\n"); + } + + // Test that a SIGSEGV on this thread is delivered to us. + if (setjmp(jmp) == 0) { + if (verbose) { + fprintf(stderr, "triggering SIGSEGV\n"); + } + + *nullPointer = '\0'; + + fprintf(stderr, "continued after address error\n"); + exit(EXIT_FAILURE); + } + + if (verbose) { + fprintf(stderr, "calling dlsym\n"); + } + + // Make sure that a SIGSEGV in Go causes a run-time panic. + fn = (void (*)(void))dlsym(handle, "TestSEGV"); + if (fn == NULL) { + fprintf(stderr, "%s\n", dlerror()); + exit(EXIT_FAILURE); + } + + if (verbose) { + fprintf(stderr, "calling TestSEGV\n"); + } + + fn(); + + printf("PASS\n"); + return 0; +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/main5.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/main5.c new file mode 100644 index 0000000000000000000000000000000000000000..563329e3311fe09fe009ec5a950c8ff485f69545 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/main5.c @@ -0,0 +1,205 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that a signal handler works in non-Go code when using +// os/signal.Notify. +// This is a lot like ../testcarchive/main3.c. + +#include +#include +#include +#include +#include +#include +#include +#include + +static void die(const char* msg) { + perror(msg); + exit(EXIT_FAILURE); +} + +static volatile sig_atomic_t sigioSeen; + +static void ioHandler(int signo, siginfo_t* info, void* ctxt) { + sigioSeen = 1; +} + +int main(int argc, char** argv) { + int verbose; + struct sigaction sa; + void* handle; + void (*catchSIGIO)(void); + void (*resetSIGIO)(void); + void (*awaitSIGIO)(); + bool (*sawSIGIO)(); + int i; + struct timespec ts; + + verbose = argc > 2; + setvbuf(stdout, NULL, _IONBF, 0); + + if (verbose) { + fprintf(stderr, "calling sigaction\n"); + } + + memset(&sa, 0, sizeof sa); + sa.sa_sigaction = ioHandler; + if (sigemptyset(&sa.sa_mask) < 0) { + die("sigemptyset"); + } + sa.sa_flags = SA_SIGINFO; + if (sigaction(SIGIO, &sa, NULL) < 0) { + die("sigaction"); + } + + if (verbose) { + fprintf(stderr, "calling dlopen\n"); + } + + handle = dlopen(argv[1], RTLD_NOW | RTLD_GLOBAL); + if (handle == NULL) { + fprintf(stderr, "%s\n", dlerror()); + exit(EXIT_FAILURE); + } + + // At this point there should not be a Go signal handler + // installed for SIGIO. + + if (verbose) { + fprintf(stderr, "raising SIGIO\n"); + } + + if (raise(SIGIO) < 0) { + die("raise"); + } + + if (verbose) { + fprintf(stderr, "waiting for sigioSeen\n"); + } + + // Wait until the signal has been delivered. + i = 0; + while (!sigioSeen) { + ts.tv_sec = 0; + ts.tv_nsec = 1000000; + nanosleep(&ts, NULL); + i++; + if (i > 5000) { + fprintf(stderr, "looping too long waiting for signal\n"); + exit(EXIT_FAILURE); + } + } + + sigioSeen = 0; + + // Tell the Go code to catch SIGIO. + + if (verbose) { + fprintf(stderr, "calling dlsym\n"); + } + + catchSIGIO = (void(*)(void))dlsym(handle, "CatchSIGIO"); + if (catchSIGIO == NULL) { + fprintf(stderr, "%s\n", dlerror()); + exit(EXIT_FAILURE); + } + + if (verbose) { + fprintf(stderr, "calling CatchSIGIO\n"); + } + + catchSIGIO(); + + if (verbose) { + fprintf(stderr, "raising SIGIO\n"); + } + + if (raise(SIGIO) < 0) { + die("raise"); + } + + if (verbose) { + fprintf(stderr, "calling dlsym\n"); + } + + // Check that the Go code saw SIGIO. + awaitSIGIO = (void (*)(void))dlsym(handle, "AwaitSIGIO"); + if (awaitSIGIO == NULL) { + fprintf(stderr, "%s\n", dlerror()); + exit(EXIT_FAILURE); + } + + if (verbose) { + fprintf(stderr, "calling AwaitSIGIO\n"); + } + + awaitSIGIO(); + + if (sigioSeen != 0) { + fprintf(stderr, "C handler saw SIGIO when only Go handler should have\n"); + exit(EXIT_FAILURE); + } + + // Tell the Go code to stop catching SIGIO. + + if (verbose) { + fprintf(stderr, "calling dlsym\n"); + } + + resetSIGIO = (void (*)(void))dlsym(handle, "ResetSIGIO"); + if (resetSIGIO == NULL) { + fprintf(stderr, "%s\n", dlerror()); + exit(EXIT_FAILURE); + } + + if (verbose) { + fprintf(stderr, "calling ResetSIGIO\n"); + } + + resetSIGIO(); + + sawSIGIO = (bool (*)(void))dlsym(handle, "SawSIGIO"); + if (sawSIGIO == NULL) { + fprintf(stderr, "%s\n", dlerror()); + exit(EXIT_FAILURE); + } + + if (verbose) { + fprintf(stderr, "raising SIGIO\n"); + } + + if (raise(SIGIO) < 0) { + die("raise"); + } + + if (verbose) { + fprintf(stderr, "calling SawSIGIO\n"); + } + + if (sawSIGIO()) { + fprintf(stderr, "Go handler saw SIGIO after Reset\n"); + exit(EXIT_FAILURE); + } + + if (verbose) { + fprintf(stderr, "waiting for sigioSeen\n"); + } + + // Wait until the signal has been delivered. + i = 0; + while (!sigioSeen) { + ts.tv_sec = 0; + ts.tv_nsec = 1000000; + nanosleep(&ts, NULL); + i++; + if (i > 5000) { + fprintf(stderr, "looping too long waiting for signal\n"); + exit(EXIT_FAILURE); + } + } + + printf("PASS\n"); + return 0; +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/p/p.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/p/p.go new file mode 100644 index 0000000000000000000000000000000000000000..0f02cf3ce6cf94c630542d8df953ad0db40cbff7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testcshared/testdata/p/p.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +import "C" + +//export FromPkg +func FromPkg() int32 { return 1024 } + +//export Divu +func Divu(a, b uint32) uint32 { return a / b } diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/argposition_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/argposition_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0876dc4caf35aac9704502be9540c8ca8617813b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/argposition_test.go @@ -0,0 +1,137 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue 42580: cmd/cgo: shifting identifier position in ast + +package errorstest + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/token" + "internal/testenv" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" +) + +type ShortPosition struct { + Line int + Column int + Visited bool +} + +type IdentPositionInfo map[string][]ShortPosition + +type Visitor struct { + identPosInfo IdentPositionInfo + fset *token.FileSet + t *testing.T +} + +func (v *Visitor) Visit(node ast.Node) ast.Visitor { + if ident, ok := node.(*ast.Ident); ok { + if expectedPositions, ok := v.identPosInfo[ident.Name]; ok { + gotMatch := false + var errorMessage strings.Builder + for caseIndex, expectedPos := range expectedPositions { + actualPosition := v.fset.PositionFor(ident.Pos(), true) + errorOccured := false + if expectedPos.Line != actualPosition.Line { + fmt.Fprintf(&errorMessage, "wrong line number for ident %s: expected: %d got: %d\n", ident.Name, expectedPos.Line, actualPosition.Line) + errorOccured = true + } + if expectedPos.Column != actualPosition.Column { + fmt.Fprintf(&errorMessage, "wrong column number for ident %s: expected: %d got: %d\n", ident.Name, expectedPos.Column, actualPosition.Column) + errorOccured = true + } + if errorOccured { + continue + } + gotMatch = true + expectedPositions[caseIndex].Visited = true + } + + if !gotMatch { + v.t.Errorf(errorMessage.String()) + } + } + } + return v +} + +func TestArgumentsPositions(t *testing.T) { + testenv.MustHaveCGO(t) + testenv.MustHaveExec(t) + + testdata, err := filepath.Abs("testdata") + if err != nil { + t.Fatal(err) + } + + tmpPath := t.TempDir() + + dir := filepath.Join(tmpPath, "src", "testpositions") + if err := os.MkdirAll(dir, 0755); err != nil { + t.Fatal(err) + } + + cmd := exec.Command("go", "tool", "cgo", + "-srcdir", testdata, + "-objdir", dir, + "issue42580.go") + cmd.Stderr = new(bytes.Buffer) + + err = cmd.Run() + if err != nil { + t.Fatalf("%s: %v\n%s", cmd, err, cmd.Stderr) + } + mainProcessed, err := os.ReadFile(filepath.Join(dir, "issue42580.cgo1.go")) + if err != nil { + t.Fatal(err) + } + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, "", mainProcessed, parser.AllErrors) + if err != nil { + fmt.Println(err) + return + } + + expectation := IdentPositionInfo{ + "checkedPointer": []ShortPosition{ + ShortPosition{ + Line: 32, + Column: 56, + }, + }, + "singleInnerPointerChecked": []ShortPosition{ + ShortPosition{ + Line: 37, + Column: 91, + }, + }, + "doublePointerChecked": []ShortPosition{ + ShortPosition{ + Line: 42, + Column: 91, + }, + }, + } + for _, decl := range f.Decls { + if fdecl, ok := decl.(*ast.FuncDecl); ok { + ast.Walk(&Visitor{expectation, fset, t}, fdecl.Body) + } + } + for ident, positions := range expectation { + for _, position := range positions { + if !position.Visited { + t.Errorf("Position %d:%d missed for %s ident", position.Line, position.Column, ident) + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/badsym_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/badsym_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6c87977bd1dcc1061704b107cfbdaa6bee5078b7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/badsym_test.go @@ -0,0 +1,231 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package errorstest + +import ( + "bytes" + "cmd/internal/quoted" + "internal/testenv" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "unicode" +) + +// A manually modified object file could pass unexpected characters +// into the files generated by cgo. + +const magicInput = "abcdefghijklmnopqrstuvwxyz0123" +const magicReplace = "\n//go:cgo_ldflag \"-badflag\"\n//" + +const cSymbol = "BadSymbol" + magicInput + "Name" +const cDefSource = "int " + cSymbol + " = 1;" +const cRefSource = "extern int " + cSymbol + "; int F() { return " + cSymbol + "; }" + +// goSource is the source code for the trivial Go file we use. +// We will replace TMPDIR with the temporary directory name. +const goSource = ` +package main + +// #cgo LDFLAGS: TMPDIR/cbad.o TMPDIR/cbad.so +// extern int F(); +import "C" + +func main() { + println(C.F()) +} +` + +func TestBadSymbol(t *testing.T) { + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + + dir := t.TempDir() + + mkdir := func(base string) string { + ret := filepath.Join(dir, base) + if err := os.Mkdir(ret, 0755); err != nil { + t.Fatal(err) + } + return ret + } + + cdir := mkdir("c") + godir := mkdir("go") + + makeFile := func(mdir, base, source string) string { + ret := filepath.Join(mdir, base) + if err := os.WriteFile(ret, []byte(source), 0644); err != nil { + t.Fatal(err) + } + return ret + } + + cDefFile := makeFile(cdir, "cdef.c", cDefSource) + cRefFile := makeFile(cdir, "cref.c", cRefSource) + + ccCmd := cCompilerCmd(t) + + cCompile := func(arg, base, src string) string { + out := filepath.Join(cdir, base) + run := append(ccCmd, arg, "-o", out, src) + output, err := exec.Command(run[0], run[1:]...).CombinedOutput() + if err != nil { + t.Log(run) + t.Logf("%s", output) + t.Fatal(err) + } + if err := os.Remove(src); err != nil { + t.Fatal(err) + } + return out + } + + // Build a shared library that defines a symbol whose name + // contains magicInput. + + cShared := cCompile("-shared", "c.so", cDefFile) + + // Build an object file that refers to the symbol whose name + // contains magicInput. + + cObj := cCompile("-c", "c.o", cRefFile) + + // Rewrite the shared library and the object file, replacing + // magicInput with magicReplace. This will have the effect of + // introducing a symbol whose name looks like a cgo command. + // The cgo tool will use that name when it generates the + // _cgo_import.go file, thus smuggling a magic //go:cgo_ldflag + // pragma into a Go file. We used to not check the pragmas in + // _cgo_import.go. + + rewrite := func(from, to string) { + obj, err := os.ReadFile(from) + if err != nil { + t.Fatal(err) + } + + if bytes.Count(obj, []byte(magicInput)) == 0 { + t.Fatalf("%s: did not find magic string", from) + } + + if len(magicInput) != len(magicReplace) { + t.Fatalf("internal test error: different magic lengths: %d != %d", len(magicInput), len(magicReplace)) + } + + obj = bytes.ReplaceAll(obj, []byte(magicInput), []byte(magicReplace)) + + if err := os.WriteFile(to, obj, 0644); err != nil { + t.Fatal(err) + } + } + + cBadShared := filepath.Join(godir, "cbad.so") + rewrite(cShared, cBadShared) + + cBadObj := filepath.Join(godir, "cbad.o") + rewrite(cObj, cBadObj) + + goSourceBadObject := strings.ReplaceAll(goSource, "TMPDIR", godir) + makeFile(godir, "go.go", goSourceBadObject) + + makeFile(godir, "go.mod", "module badsym") + + // Try to build our little package. + cmd := exec.Command("go", "build", "-ldflags=-v") + cmd.Dir = godir + output, err := cmd.CombinedOutput() + + // The build should fail, but we want it to fail because we + // detected the error, not because we passed a bad flag to the + // C linker. + + if err == nil { + t.Errorf("go build succeeded unexpectedly") + } + + t.Logf("%s", output) + + for _, line := range bytes.Split(output, []byte("\n")) { + if bytes.Contains(line, []byte("dynamic symbol")) && bytes.Contains(line, []byte("contains unsupported character")) { + // This is the error from cgo. + continue + } + + // We passed -ldflags=-v to see the external linker invocation, + // which should not include -badflag. + if bytes.Contains(line, []byte("-badflag")) { + t.Error("output should not mention -badflag") + } + + // Also check for compiler errors, just in case. + // GCC says "unrecognized command line option". + // clang says "unknown argument". + if bytes.Contains(line, []byte("unrecognized")) || bytes.Contains(output, []byte("unknown")) { + t.Error("problem should have been caught before invoking C linker") + } + } +} + +func cCompilerCmd(t *testing.T) []string { + cc, err := quoted.Split(goEnv(t, "CC")) + if err != nil { + t.Skipf("parsing go env CC: %s", err) + } + if len(cc) == 0 { + t.Skipf("no C compiler") + } + testenv.MustHaveExecPath(t, cc[0]) + + out := goEnv(t, "GOGCCFLAGS") + quote := '\000' + start := 0 + lastSpace := true + backslash := false + s := string(out) + for i, c := range s { + if quote == '\000' && unicode.IsSpace(c) { + if !lastSpace { + cc = append(cc, s[start:i]) + lastSpace = true + } + } else { + if lastSpace { + start = i + lastSpace = false + } + if quote == '\000' && !backslash && (c == '"' || c == '\'') { + quote = c + backslash = false + } else if !backslash && quote == c { + quote = '\000' + } else if (quote == '\000' || quote == '"') && !backslash && c == '\\' { + backslash = true + } else { + backslash = false + } + } + } + if !lastSpace { + cc = append(cc, s[start:]) + } + + // Force reallocation (and avoid aliasing bugs) for tests that append to cc. + cc = cc[:len(cc):len(cc)] + + return cc +} + +func goEnv(t *testing.T, key string) string { + out, err := exec.Command("go", "env", key).CombinedOutput() + if err != nil { + t.Logf("go env %s\n", key) + t.Logf("%s", out) + t.Fatal(err) + } + return strings.TrimSpace(string(out)) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/errors_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/errors_test.go new file mode 100644 index 0000000000000000000000000000000000000000..86236249ca96928d226dae21dc5b32b705ad7bcd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/errors_test.go @@ -0,0 +1,180 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package errorstest + +import ( + "bytes" + "fmt" + "internal/testenv" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + "strings" + "testing" +) + +func path(file string) string { + return filepath.Join("testdata", file) +} + +func check(t *testing.T, file string) { + t.Run(file, func(t *testing.T) { + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + t.Parallel() + + contents, err := os.ReadFile(path(file)) + if err != nil { + t.Fatal(err) + } + var errors []*regexp.Regexp + for i, line := range bytes.Split(contents, []byte("\n")) { + if bytes.HasSuffix(line, []byte("ERROR HERE")) { + re := regexp.MustCompile(regexp.QuoteMeta(fmt.Sprintf("%s:%d:", file, i+1))) + errors = append(errors, re) + continue + } + + if _, frag, ok := bytes.Cut(line, []byte("ERROR HERE: ")); ok { + re, err := regexp.Compile(fmt.Sprintf(":%d:.*%s", i+1, frag)) + if err != nil { + t.Errorf("Invalid regexp after `ERROR HERE: `: %#q", frag) + continue + } + errors = append(errors, re) + } + + if _, frag, ok := bytes.Cut(line, []byte("ERROR MESSAGE: ")); ok { + re, err := regexp.Compile(string(frag)) + if err != nil { + t.Errorf("Invalid regexp after `ERROR MESSAGE: `: %#q", frag) + continue + } + errors = append(errors, re) + } + } + if len(errors) == 0 { + t.Fatalf("cannot find ERROR HERE") + } + expect(t, file, errors) + }) +} + +func expect(t *testing.T, file string, errors []*regexp.Regexp) { + dir, err := os.MkdirTemp("", filepath.Base(t.Name())) + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + dst := filepath.Join(dir, strings.TrimSuffix(file, ".go")) + cmd := exec.Command("go", "build", "-gcflags=-L -e", "-o="+dst, path(file)) // TODO(gri) no need for -gcflags=-L if go tool is adjusted + out, err := cmd.CombinedOutput() + if err == nil { + t.Errorf("expected cgo to fail but it succeeded") + } + + lines := bytes.Split(out, []byte("\n")) + for _, re := range errors { + found := false + for _, line := range lines { + if re.Match(line) { + t.Logf("found match for %#q: %q", re, line) + found = true + break + } + } + if !found { + t.Errorf("expected error output to contain %#q", re) + } + } + + if t.Failed() { + t.Logf("actual output:\n%s", out) + } +} + +func sizeofLongDouble(t *testing.T) int { + testenv.MustHaveGoRun(t) + testenv.MustHaveCGO(t) + cmd := exec.Command("go", "run", path("long_double_size.go")) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("%#q: %v:\n%s", strings.Join(cmd.Args, " "), err, out) + } + + i, err := strconv.Atoi(strings.TrimSpace(string(out))) + if err != nil { + t.Fatalf("long_double_size.go printed invalid size: %s", out) + } + return i +} + +func TestReportsTypeErrors(t *testing.T) { + for _, file := range []string{ + "err1.go", + "err2.go", + "err5.go", + "issue11097a.go", + "issue11097b.go", + "issue18452.go", + "issue18889.go", + "issue28721.go", + "issue33061.go", + "issue50710.go", + } { + check(t, file) + } + + if sizeofLongDouble(t) > 8 { + for _, file := range []string{ + "err4.go", + "issue28069.go", + } { + check(t, file) + } + } +} + +func TestToleratesOptimizationFlag(t *testing.T) { + for _, cflags := range []string{ + "", + "-O", + } { + cflags := cflags + t.Run(cflags, func(t *testing.T) { + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + t.Parallel() + + cmd := exec.Command("go", "build", path("issue14669.go")) + cmd.Env = append(os.Environ(), "CGO_CFLAGS="+cflags) + out, err := cmd.CombinedOutput() + if err != nil { + t.Errorf("%#q: %v:\n%s", strings.Join(cmd.Args, " "), err, out) + } + }) + } +} + +func TestMallocCrashesOnNil(t *testing.T) { + testenv.MustHaveCGO(t) + testenv.MustHaveGoRun(t) + t.Parallel() + + cmd := exec.Command("go", "run", path("malloc.go")) + out, err := cmd.CombinedOutput() + if err == nil { + t.Logf("%#q:\n%s", strings.Join(cmd.Args, " "), out) + t.Fatalf("succeeded unexpectedly") + } +} + +func TestNotMatchedCFunction(t *testing.T) { + file := "notmatchedcfunction.go" + check(t, file) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/ptr_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/ptr_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8fff7615d3560c78e0d8ec7ed7c75ccb6f68c6b8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/ptr_test.go @@ -0,0 +1,707 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests that cgo detects invalid pointer passing at runtime. + +package errorstest + +import ( + "bytes" + "flag" + "fmt" + "internal/testenv" + "os" + "os/exec" + "path/filepath" + "slices" + "strings" + "sync/atomic" + "testing" +) + +var tmp = flag.String("tmp", "", "use `dir` for temporary files and do not clean up") + +// ptrTest is the tests without the boilerplate. +type ptrTest struct { + name string // for reporting + c string // the cgo comment + c1 string // cgo comment forced into non-export cgo file + imports []string // a list of imports + support string // supporting functions + body string // the body of the main function + extra []extra // extra files + fail bool // whether the test should fail + expensive bool // whether the test requires the expensive check +} + +type extra struct { + name string + contents string +} + +var ptrTests = []ptrTest{ + { + // Passing a pointer to a struct that contains a Go pointer. + name: "ptr1", + c: `typedef struct s1 { int *p; } s1; void f1(s1 *ps) {}`, + body: `C.f1(&C.s1{new(C.int)})`, + fail: true, + }, + { + // Passing a pointer to a struct that contains a Go pointer. + name: "ptr2", + c: `typedef struct s2 { int *p; } s2; void f2(s2 *ps) {}`, + body: `p := &C.s2{new(C.int)}; C.f2(p)`, + fail: true, + }, + { + // Passing a pointer to an int field of a Go struct + // that (irrelevantly) contains a Go pointer. + name: "ok1", + c: `struct s3 { int i; int *p; }; void f3(int *p) {}`, + body: `p := &C.struct_s3{i: 0, p: new(C.int)}; C.f3(&p.i)`, + fail: false, + }, + { + // Passing a pointer to a pointer field of a Go struct. + name: "ptrfield", + c: `struct s4 { int i; int *p; }; void f4(int **p) {}`, + body: `p := &C.struct_s4{i: 0, p: new(C.int)}; C.f4(&p.p)`, + fail: true, + }, + { + // Passing a pointer to a pointer field of a Go + // struct, where the field does not contain a Go + // pointer, but another field (irrelevantly) does. + name: "ptrfieldok", + c: `struct s5 { int *p1; int *p2; }; void f5(int **p) {}`, + body: `p := &C.struct_s5{p1: nil, p2: new(C.int)}; C.f5(&p.p1)`, + fail: false, + }, + { + // Passing the address of a slice with no Go pointers. + name: "sliceok1", + c: `void f6(void **p) {}`, + imports: []string{"unsafe"}, + body: `s := []unsafe.Pointer{nil}; C.f6(&s[0])`, + fail: false, + }, + { + // Passing the address of a slice with a Go pointer. + name: "sliceptr1", + c: `void f7(void **p) {}`, + imports: []string{"unsafe"}, + body: `i := 0; s := []unsafe.Pointer{unsafe.Pointer(&i)}; C.f7(&s[0])`, + fail: true, + }, + { + // Passing the address of a slice with a Go pointer, + // where we are passing the address of an element that + // is not a Go pointer. + name: "sliceptr2", + c: `void f8(void **p) {}`, + imports: []string{"unsafe"}, + body: `i := 0; s := []unsafe.Pointer{nil, unsafe.Pointer(&i)}; C.f8(&s[0])`, + fail: true, + }, + { + // Passing the address of a slice that is an element + // in a struct only looks at the slice. + name: "sliceok2", + c: `void f9(void **p) {}`, + imports: []string{"unsafe"}, + support: `type S9 struct { p *int; s []unsafe.Pointer }`, + body: `i := 0; p := &S9{p:&i, s:[]unsafe.Pointer{nil}}; C.f9(&p.s[0])`, + fail: false, + }, + { + // Passing the address of a slice of an array that is + // an element in a struct, with a type conversion. + name: "sliceok3", + c: `void f10(void* p) {}`, + imports: []string{"unsafe"}, + support: `type S10 struct { p *int; a [4]byte }`, + body: `i := 0; p := &S10{p:&i}; s := p.a[:]; C.f10(unsafe.Pointer(&s[0]))`, + fail: false, + }, + { + // Passing the address of a slice of an array that is + // an element in a struct, with a type conversion. + name: "sliceok4", + c: `typedef void* PV11; void f11(PV11 p) {}`, + imports: []string{"unsafe"}, + support: `type S11 struct { p *int; a [4]byte }`, + body: `i := 0; p := &S11{p:&i}; C.f11(C.PV11(unsafe.Pointer(&p.a[0])))`, + fail: false, + }, + { + // Passing the address of a static variable with no + // pointers doesn't matter. + name: "varok", + c: `void f12(char** parg) {}`, + support: `var hello12 = [...]C.char{'h', 'e', 'l', 'l', 'o'}`, + body: `parg := [1]*C.char{&hello12[0]}; C.f12(&parg[0])`, + fail: false, + }, + { + // Passing the address of a static variable with + // pointers does matter. + name: "var1", + c: `void f13(char*** parg) {}`, + support: `var hello13 = [...]*C.char{new(C.char)}`, + body: `parg := [1]**C.char{&hello13[0]}; C.f13(&parg[0])`, + fail: true, + }, + { + // Storing a Go pointer into C memory should fail. + name: "barrier", + c: `#include + char **f14a() { return malloc(sizeof(char*)); } + void f14b(char **p) {}`, + body: `p := C.f14a(); *p = new(C.char); C.f14b(p)`, + fail: true, + expensive: true, + }, + { + // Storing a pinned Go pointer into C memory should succeed. + name: "barrierpinnedok", + c: `#include + char **f14a2() { return malloc(sizeof(char*)); } + void f14b2(char **p) {}`, + imports: []string{"runtime"}, + body: `var pinr runtime.Pinner; p := C.f14a2(); x := new(C.char); pinr.Pin(x); *p = x; C.f14b2(p); pinr.Unpin()`, + fail: false, + expensive: true, + }, + { + // Storing a Go pointer into C memory by assigning a + // large value should fail. + name: "barrierstruct", + c: `#include + struct s15 { char *a[10]; }; + struct s15 *f15() { return malloc(sizeof(struct s15)); } + void f15b(struct s15 *p) {}`, + body: `p := C.f15(); p.a = [10]*C.char{new(C.char)}; C.f15b(p)`, + fail: true, + expensive: true, + }, + { + // Storing a Go pointer into C memory using a slice + // copy should fail. + name: "barrierslice", + c: `#include + struct s16 { char *a[10]; }; + struct s16 *f16() { return malloc(sizeof(struct s16)); } + void f16b(struct s16 *p) {}`, + body: `p := C.f16(); copy(p.a[:], []*C.char{new(C.char)}); C.f16b(p)`, + fail: true, + expensive: true, + }, + { + // A very large value uses a GC program, which is a + // different code path. + name: "barriergcprogarray", + c: `#include + struct s17 { char *a[32769]; }; + struct s17 *f17() { return malloc(sizeof(struct s17)); } + void f17b(struct s17 *p) {}`, + body: `p := C.f17(); p.a = [32769]*C.char{new(C.char)}; C.f17b(p)`, + fail: true, + expensive: true, + }, + { + // Similar case, with a source on the heap. + name: "barriergcprogarrayheap", + c: `#include + struct s18 { char *a[32769]; }; + struct s18 *f18() { return malloc(sizeof(struct s18)); } + void f18b(struct s18 *p) {} + void f18c(void *p) {}`, + imports: []string{"unsafe"}, + body: `p := C.f18(); n := &[32769]*C.char{new(C.char)}; p.a = *n; C.f18b(p); n[0] = nil; C.f18c(unsafe.Pointer(n))`, + fail: true, + expensive: true, + }, + { + // A GC program with a struct. + name: "barriergcprogstruct", + c: `#include + struct s19a { char *a[32769]; }; + struct s19b { struct s19a f; }; + struct s19b *f19() { return malloc(sizeof(struct s19b)); } + void f19b(struct s19b *p) {}`, + body: `p := C.f19(); p.f = C.struct_s19a{[32769]*C.char{new(C.char)}}; C.f19b(p)`, + fail: true, + expensive: true, + }, + { + // Similar case, with a source on the heap. + name: "barriergcprogstructheap", + c: `#include + struct s20a { char *a[32769]; }; + struct s20b { struct s20a f; }; + struct s20b *f20() { return malloc(sizeof(struct s20b)); } + void f20b(struct s20b *p) {} + void f20c(void *p) {}`, + imports: []string{"unsafe"}, + body: `p := C.f20(); n := &C.struct_s20a{[32769]*C.char{new(C.char)}}; p.f = *n; C.f20b(p); n.a[0] = nil; C.f20c(unsafe.Pointer(n))`, + fail: true, + expensive: true, + }, + { + // Exported functions may not return Go pointers. + name: "export1", + c: `#ifdef _WIN32 + __declspec(dllexport) + #endif + extern unsigned char *GoFn21();`, + support: `//export GoFn21 + func GoFn21() *byte { return new(byte) }`, + body: `C.GoFn21()`, + fail: true, + }, + { + // Returning a C pointer is fine. + name: "exportok", + c: `#include + #ifdef _WIN32 + __declspec(dllexport) + #endif + extern unsigned char *GoFn22();`, + support: `//export GoFn22 + func GoFn22() *byte { return (*byte)(C.malloc(1)) }`, + body: `C.GoFn22()`, + }, + { + // Passing a Go string is fine. + name: "passstring", + c: `#include + typedef struct { const char *p; ptrdiff_t n; } gostring23; + gostring23 f23(gostring23 s) { return s; }`, + imports: []string{"unsafe"}, + body: `s := "a"; r := C.f23(*(*C.gostring23)(unsafe.Pointer(&s))); if *(*string)(unsafe.Pointer(&r)) != s { panic(r) }`, + }, + { + // Passing a slice of Go strings fails. + name: "passstringslice", + c: `void f24(void *p) {}`, + imports: []string{"strings", "unsafe"}, + support: `type S24 struct { a [1]string }`, + body: `s := S24{a:[1]string{strings.Repeat("a", 2)}}; C.f24(unsafe.Pointer(&s.a[0]))`, + fail: true, + }, + { + // Exported functions may not return strings. + name: "retstring", + c: `extern void f25();`, + imports: []string{"strings"}, + support: `//export GoStr25 + func GoStr25() string { return strings.Repeat("a", 2) }`, + body: `C.f25()`, + c1: `#include + typedef struct { const char *p; ptrdiff_t n; } gostring25; + extern gostring25 GoStr25(); + void f25() { GoStr25(); }`, + fail: true, + }, + { + // Don't check non-pointer data. + // Uses unsafe code to get a pointer we shouldn't check. + // Although we use unsafe, the uintptr represents an integer + // that happens to have the same representation as a pointer; + // that is, we are testing something that is not unsafe. + name: "ptrdata1", + c: `#include + void f26(void* p) {}`, + imports: []string{"unsafe"}, + support: `type S26 struct { p *int; a [8*8]byte; u uintptr }`, + body: `i := 0; p := &S26{u:uintptr(unsafe.Pointer(&i))}; q := (*S26)(C.malloc(C.size_t(unsafe.Sizeof(*p)))); *q = *p; C.f26(unsafe.Pointer(q))`, + fail: false, + }, + { + // Like ptrdata1, but with a type that uses a GC program. + name: "ptrdata2", + c: `#include + void f27(void* p) {}`, + imports: []string{"unsafe"}, + support: `type S27 struct { p *int; a [32769*8]byte; q *int; u uintptr }`, + body: `i := 0; p := S27{u:uintptr(unsafe.Pointer(&i))}; q := (*S27)(C.malloc(C.size_t(unsafe.Sizeof(p)))); *q = p; C.f27(unsafe.Pointer(q))`, + fail: false, + }, + { + // Check deferred pointers when they are used, not + // when the defer statement is run. + name: "defer1", + c: `typedef struct s28 { int *p; } s28; void f28(s28 *ps) {}`, + body: `p := &C.s28{}; defer C.f28(p); p.p = new(C.int)`, + fail: true, + }, + { + // Check a pointer to a union if the union has any + // pointer fields. + name: "union1", + c: `typedef union { char **p; unsigned long i; } u29; void f29(u29 *pu) {}`, + imports: []string{"unsafe"}, + body: `var b C.char; p := &b; C.f29((*C.u29)(unsafe.Pointer(&p)))`, + fail: true, + }, + { + // Don't check a pointer to a union if the union does + // not have any pointer fields. + // Like ptrdata1 above, the uintptr represents an + // integer that happens to have the same + // representation as a pointer. + name: "union2", + c: `typedef union { unsigned long i; } u39; void f39(u39 *pu) {}`, + imports: []string{"unsafe"}, + body: `var b C.char; p := &b; C.f39((*C.u39)(unsafe.Pointer(&p)))`, + fail: false, + }, + { + // Test preemption while entering a cgo call. Issue #21306. + name: "preemptduringcall", + c: `void f30() {}`, + imports: []string{"runtime", "sync"}, + body: `var wg sync.WaitGroup; wg.Add(100); for i := 0; i < 100; i++ { go func(i int) { for j := 0; j < 100; j++ { C.f30(); runtime.GOMAXPROCS(i) }; wg.Done() }(i) }; wg.Wait()`, + fail: false, + }, + { + // Test poller deadline with cgocheck=2. Issue #23435. + name: "deadline", + c: `#define US31 10`, + imports: []string{"os", "time"}, + body: `r, _, _ := os.Pipe(); r.SetDeadline(time.Now().Add(C.US31 * time.Microsecond))`, + fail: false, + }, + { + // Test for double evaluation of channel receive. + name: "chanrecv", + c: `void f32(char** p) {}`, + imports: []string{"time"}, + body: `c := make(chan []*C.char, 2); c <- make([]*C.char, 1); go func() { time.Sleep(10 * time.Second); panic("received twice from chan") }(); C.f32(&(<-c)[0]);`, + fail: false, + }, + { + // Test that converting the address of a struct field + // to unsafe.Pointer still just checks that field. + // Issue #25941. + name: "structfield", + c: `void f33(void* p) {}`, + imports: []string{"unsafe"}, + support: `type S33 struct { p *int; a [8]byte; u uintptr }`, + body: `s := &S33{p: new(int)}; C.f33(unsafe.Pointer(&s.a))`, + fail: false, + }, + { + // Test that converting multiple struct field + // addresses to unsafe.Pointer still just checks those + // fields. Issue #25941. + name: "structfield2", + c: `void f34(void* p, int r, void* s) {}`, + imports: []string{"unsafe"}, + support: `type S34 struct { a [8]byte; p *int; b int64; }`, + body: `s := &S34{p: new(int)}; C.f34(unsafe.Pointer(&s.a), 32, unsafe.Pointer(&s.b))`, + fail: false, + }, + { + // Test that second argument to cgoCheckPointer is + // evaluated when a deferred function is deferred, not + // when it is run. + name: "defer2", + c: `void f35(char **pc) {}`, + support: `type S35a struct { s []*C.char }; type S35b struct { ps *S35a }`, + body: `p := &S35b{&S35a{[]*C.char{nil}}}; defer C.f35(&p.ps.s[0]); p.ps = nil`, + fail: false, + }, + { + // Test that indexing into a function call still + // examines only the slice being indexed. + name: "buffer", + c: `void f36(void *p) {}`, + imports: []string{"bytes", "unsafe"}, + body: `var b bytes.Buffer; b.WriteString("a"); C.f36(unsafe.Pointer(&b.Bytes()[0]))`, + fail: false, + }, + { + // Test that bgsweep releasing a finalizer is OK. + name: "finalizer", + c: `// Nothing to declare.`, + imports: []string{"os"}, + support: `func open37() { os.Open(os.Args[0]) }; var G37 [][]byte`, + body: `for i := 0; i < 10000; i++ { G37 = append(G37, make([]byte, 4096)); if i % 100 == 0 { G37 = nil; open37() } }`, + fail: false, + }, + { + // Test that converting generated struct to interface is OK. + name: "structof", + c: `// Nothing to declare.`, + imports: []string{"reflect"}, + support: `type MyInt38 int; func (i MyInt38) Get() int { return int(i) }; type Getter38 interface { Get() int }`, + body: `t := reflect.StructOf([]reflect.StructField{{Name: "MyInt38", Type: reflect.TypeOf(MyInt38(0)), Anonymous: true}}); v := reflect.New(t).Elem(); v.Interface().(Getter38).Get()`, + fail: false, + }, + { + // Test that a converted address of a struct field results + // in a check for just that field and not the whole struct. + name: "structfieldcast", + c: `struct S40i { int i; int* p; }; void f40(struct S40i* p) {}`, + support: `type S40 struct { p *int; a C.struct_S40i }`, + body: `s := &S40{p: new(int)}; C.f40((*C.struct_S40i)(&s.a))`, + fail: false, + }, + { + // Test that we handle unsafe.StringData. + name: "stringdata", + c: `void f41(void* p) {}`, + imports: []string{"unsafe"}, + body: `s := struct { a [4]byte; p *int }{p: new(int)}; str := unsafe.String(&s.a[0], 4); C.f41(unsafe.Pointer(unsafe.StringData(str)))`, + fail: false, + }, + { + name: "slicedata", + c: `void f42(void* p) {}`, + imports: []string{"unsafe"}, + body: `s := []*byte{nil, new(byte)}; C.f42(unsafe.Pointer(unsafe.SliceData(s)))`, + fail: true, + }, + { + name: "slicedata2", + c: `void f43(void* p) {}`, + imports: []string{"unsafe"}, + body: `s := struct { a [4]byte; p *int }{p: new(int)}; C.f43(unsafe.Pointer(unsafe.SliceData(s.a[:])))`, + fail: false, + }, +} + +func TestPointerChecks(t *testing.T) { + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + + var gopath string + var dir string + if *tmp != "" { + gopath = *tmp + dir = "" + } else { + d, err := os.MkdirTemp("", filepath.Base(t.Name())) + if err != nil { + t.Fatal(err) + } + dir = d + gopath = d + } + + exe := buildPtrTests(t, gopath, false) + exe2 := buildPtrTests(t, gopath, true) + + // We (TestPointerChecks) return before the parallel subtest functions do, + // so we can't just defer os.RemoveAll(dir). Instead we have to wait for + // the parallel subtests to finish. This code looks racy but is not: + // the add +1 run in serial before testOne blocks. The -1 run in parallel + // after testOne finishes. + var pending int32 + for _, pt := range ptrTests { + pt := pt + t.Run(pt.name, func(t *testing.T) { + atomic.AddInt32(&pending, +1) + defer func() { + if atomic.AddInt32(&pending, -1) == 0 { + os.RemoveAll(dir) + } + }() + testOne(t, pt, exe, exe2) + }) + } +} + +func buildPtrTests(t *testing.T, gopath string, cgocheck2 bool) (exe string) { + + src := filepath.Join(gopath, "src", "ptrtest") + if err := os.MkdirAll(src, 0777); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(src, "go.mod"), []byte("module ptrtest\ngo 1.20"), 0666); err != nil { + t.Fatal(err) + } + + // Prepare two cgo inputs: one for standard cgo and one for //export cgo. + // (The latter cannot have C definitions, only declarations.) + var cgo1, cgo2 bytes.Buffer + fmt.Fprintf(&cgo1, "package main\n\n/*\n") + fmt.Fprintf(&cgo2, "package main\n\n/*\n") + + // C code + for _, pt := range ptrTests { + cgo := &cgo1 + if strings.Contains(pt.support, "//export") { + cgo = &cgo2 + } + fmt.Fprintf(cgo, "%s\n", pt.c) + fmt.Fprintf(&cgo1, "%s\n", pt.c1) + } + fmt.Fprintf(&cgo1, "*/\nimport \"C\"\n\n") + fmt.Fprintf(&cgo2, "*/\nimport \"C\"\n\n") + + // Imports + did1 := make(map[string]bool) + did2 := make(map[string]bool) + did1["os"] = true // for ptrTestMain + fmt.Fprintf(&cgo1, "import \"os\"\n") + + for _, pt := range ptrTests { + did := did1 + cgo := &cgo1 + if strings.Contains(pt.support, "//export") { + did = did2 + cgo = &cgo2 + } + for _, imp := range pt.imports { + if !did[imp] { + did[imp] = true + fmt.Fprintf(cgo, "import %q\n", imp) + } + } + } + + // Func support and bodies. + for _, pt := range ptrTests { + cgo := &cgo1 + if strings.Contains(pt.support, "//export") { + cgo = &cgo2 + } + fmt.Fprintf(cgo, "%s\nfunc %s() {\n%s\n}\n", pt.support, pt.name, pt.body) + } + + // Func list and main dispatch. + fmt.Fprintf(&cgo1, "var funcs = map[string]func() {\n") + for _, pt := range ptrTests { + fmt.Fprintf(&cgo1, "\t%q: %s,\n", pt.name, pt.name) + } + fmt.Fprintf(&cgo1, "}\n\n") + fmt.Fprintf(&cgo1, "%s\n", ptrTestMain) + + if err := os.WriteFile(filepath.Join(src, "cgo1.go"), cgo1.Bytes(), 0666); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(src, "cgo2.go"), cgo2.Bytes(), 0666); err != nil { + t.Fatal(err) + } + + exeName := "ptrtest.exe" + if cgocheck2 { + exeName = "ptrtest2.exe" + } + cmd := exec.Command("go", "build", "-o", exeName) + cmd.Dir = src + cmd.Env = append(os.Environ(), "GOPATH="+gopath) + + // Set or remove cgocheck2 from the environment. + goexperiment := strings.Split(os.Getenv("GOEXPERIMENT"), ",") + if len(goexperiment) == 1 && goexperiment[0] == "" { + goexperiment = nil + } + i := slices.Index(goexperiment, "cgocheck2") + changed := false + if cgocheck2 && i < 0 { + goexperiment = append(goexperiment, "cgocheck2") + changed = true + } else if !cgocheck2 && i >= 0 { + goexperiment = append(goexperiment[:i], goexperiment[i+1:]...) + changed = true + } + if changed { + cmd.Env = append(cmd.Env, "GOEXPERIMENT="+strings.Join(goexperiment, ",")) + } + + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("go build: %v\n%s", err, out) + } + + return filepath.Join(src, exeName) +} + +const ptrTestMain = ` +func main() { + for _, arg := range os.Args[1:] { + f := funcs[arg] + if f == nil { + panic("missing func "+arg) + } + f() + } +} +` + +var csem = make(chan bool, 16) + +func testOne(t *testing.T, pt ptrTest, exe, exe2 string) { + t.Parallel() + + // Run the tests in parallel, but don't run too many + // executions in parallel, to avoid overloading the system. + runcmd := func(cgocheck string) ([]byte, error) { + csem <- true + defer func() { <-csem }() + x := exe + if cgocheck == "2" { + x = exe2 + cgocheck = "1" + } + cmd := exec.Command(x, pt.name) + cmd.Env = append(os.Environ(), "GODEBUG=cgocheck="+cgocheck) + return cmd.CombinedOutput() + } + + if pt.expensive { + buf, err := runcmd("1") + if err != nil { + t.Logf("%s", buf) + if pt.fail { + t.Fatalf("test marked expensive, but failed when not expensive: %v", err) + } else { + t.Errorf("failed unexpectedly with GODEBUG=cgocheck=1: %v", err) + } + } + + } + + cgocheck := "" + if pt.expensive { + cgocheck = "2" + } + + buf, err := runcmd(cgocheck) + if pt.fail { + if err == nil { + t.Logf("%s", buf) + t.Fatalf("did not fail as expected") + } else if !bytes.Contains(buf, []byte("Go pointer")) { + t.Logf("%s", buf) + t.Fatalf("did not print expected error (failed with %v)", err) + } + } else { + if err != nil { + t.Logf("%s", buf) + t.Fatalf("failed unexpectedly: %v", err) + } + + if !pt.expensive { + // Make sure it passes with the expensive checks. + buf, err := runcmd("2") + if err != nil { + t.Logf("%s", buf) + t.Fatalf("failed unexpectedly with expensive checks: %v", err) + } + } + } + + if pt.fail { + buf, err := runcmd("0") + if err != nil { + t.Logf("%s", buf) + t.Fatalf("failed unexpectedly with GODEBUG=cgocheck=0: %v", err) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/err1.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/err1.go new file mode 100644 index 0000000000000000000000000000000000000000..ced7443599b081522500b551744e3ec7d1cce95b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/err1.go @@ -0,0 +1,22 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +/* +#cgo LDFLAGS: -L/nonexist + +void test() { + xxx; // ERROR HERE +} + +// Issue 8442. Cgo output unhelpful error messages for +// invalid C preambles. +void issue8442foo(UNDEF*); // ERROR HERE +*/ +import "C" + +func main() { + C.test() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/err2.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/err2.go new file mode 100644 index 0000000000000000000000000000000000000000..aa941584c3c2cfcaa7491f00733c4aa0244d674f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/err2.go @@ -0,0 +1,110 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +/* +#include + +typedef struct foo foo_t; +typedef struct bar bar_t; + +foo_t *foop; + +long double x = 0; + +static int transform(int x) { return x; } + +typedef void v; +void F(v** p) {} + +void fvi(void *p, int x) {} + +void fppi(int** p) {} + +int i; +void fi(int i) {} +*/ +import "C" +import ( + "unsafe" +) + +func main() { + s := "" + _ = s + C.malloc(s) // ERROR HERE + + x := (*C.bar_t)(nil) + C.foop = x // ERROR HERE + + // issue 13129: used to output error about C.unsignedshort with CC=clang + var x1 C.ushort + x1 = int(0) // ERROR HERE: C\.ushort + + // issue 13423 + _ = C.fopen() // ERROR HERE + + // issue 13467 + var x2 rune = '✈' + var _ rune = C.transform(x2) // ERROR HERE: C\.int + + // issue 13635: used to output error about C.unsignedchar. + // This test tests all such types. + var ( + _ C.uchar = "uc" // ERROR HERE: C\.uchar + _ C.schar = "sc" // ERROR HERE: C\.schar + _ C.ushort = "us" // ERROR HERE: C\.ushort + _ C.uint = "ui" // ERROR HERE: C\.uint + _ C.ulong = "ul" // ERROR HERE: C\.ulong + _ C.longlong = "ll" // ERROR HERE: C\.longlong + _ C.ulonglong = "ull" // ERROR HERE: C\.ulonglong + _ C.complexfloat = "cf" // ERROR HERE: C\.complexfloat + _ C.complexdouble = "cd" // ERROR HERE: C\.complexdouble + ) + + // issue 13830 + // cgo converts C void* to Go unsafe.Pointer, so despite appearances C + // void** is Go *unsafe.Pointer. This test verifies that we detect the + // problem at build time. + { + type v [0]byte + + f := func(p **v) { + C.F((**C.v)(unsafe.Pointer(p))) // ERROR HERE + } + var p *v + f(&p) + } + + // issue 16116 + _ = C.fvi(1) // ERROR HERE + + // Issue 16591: Test that we detect an invalid call that was being + // hidden by a type conversion inserted by cgo checking. + { + type x *C.int + var p *x + C.fppi(p) // ERROR HERE + } + + // issue 26745 + _ = func(i int) int { + // typecheck reports at column 14 ('+'), but types2 reports at + // column 10 ('C'). + // TODO(mdempsky): Investigate why, and see if types2 can be + // updated to match typecheck behavior. + return C.i + 1 // ERROR HERE: \b(10|14)\b + } + _ = func(i int) { + // typecheck reports at column 7 ('('), but types2 reports at + // column 8 ('i'). The types2 position is more correct, but + // updating typecheck here is fundamentally challenging because of + // IR limitations. + C.fi(i) // ERROR HERE: \b(7|8)\b + } + + C.fi = C.fi // ERROR HERE + +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/err4.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/err4.go new file mode 100644 index 0000000000000000000000000000000000000000..8e5f78e987b61d81cefc82b0a350c2613891179d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/err4.go @@ -0,0 +1,15 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +/* +long double x = 0; +*/ +import "C" + +func main() { + _ = C.x // ERROR HERE + _ = C.x +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/err5.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/err5.go new file mode 100644 index 0000000000000000000000000000000000000000..c12a290d3890fde4d035d59eab24f1f63a410493 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/err5.go @@ -0,0 +1,11 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +//line /tmp/_cgo_.go:1 +//go:cgo_dynamic_linker "/elf/interp" +// ERROR MESSAGE: only allowed in cgo-generated code + +func main() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue11097a.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue11097a.go new file mode 100644 index 0000000000000000000000000000000000000000..028d10ce5cb78b42928deee427a1bf3a81f88f48 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue11097a.go @@ -0,0 +1,15 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +/* +//enum test { foo, bar }; +*/ +import "C" + +func main() { + var a = C.enum_test(1) // ERROR HERE + _ = a +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue11097b.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue11097b.go new file mode 100644 index 0000000000000000000000000000000000000000..b00f24fc103833842fc65ee0088bbee7cbd7bb78 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue11097b.go @@ -0,0 +1,15 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +/* +//enum test { foo, bar }; +*/ +import "C" + +func main() { + p := new(C.enum_test) // ERROR HERE + _ = p +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue14669.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue14669.go new file mode 100644 index 0000000000000000000000000000000000000000..04d2bcb631d2495963cd123e066cd380b4fd53a5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue14669.go @@ -0,0 +1,23 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue 14669: test that fails when build with CGO_CFLAGS selecting +// optimization. + +package p + +/* +const int E = 1; + +typedef struct s { + int c; +} s; +*/ +import "C" + +func F() { + _ = C.s{ + c: C.E, + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue18452.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue18452.go new file mode 100644 index 0000000000000000000000000000000000000000..0386d768927a1c6d027ffb3a288facd90787944b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue18452.go @@ -0,0 +1,18 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue 18452: show pos info in undefined name errors + +package p + +import ( + "C" + "fmt" +) + +func a() { + fmt.Println("Hello, world!") + C.function_that_does_not_exist() // ERROR HERE + C.pi // ERROR HERE +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue18889.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue18889.go new file mode 100644 index 0000000000000000000000000000000000000000..bba6b8f9bb1736f8208c3ab83621f7136cd849c9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue18889.go @@ -0,0 +1,7 @@ +package main + +import "C" + +func main() { + _ = C.malloc // ERROR HERE +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue28069.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue28069.go new file mode 100644 index 0000000000000000000000000000000000000000..e19a3b45bd58c34b53637b995ac12e029fbf0a2b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue28069.go @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that the error message for an unrepresentable typedef in a +// union appears on the right line. This test is only run if the size +// of long double is larger than 64. + +package main + +/* +typedef long double Float128; + +typedef struct SV { + union { + Float128 float128; + } value; +} SV; +*/ +import "C" + +type ts struct { + tv *C.SV // ERROR HERE +} + +func main() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue28721.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue28721.go new file mode 100644 index 0000000000000000000000000000000000000000..0eb2a9271c29d6083ced737fab3f597145643f94 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue28721.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// cgo should reject the use of mangled C names. + +package main + +/* +typedef struct a { + int i; +} a; +void fn(void) {} +*/ +import "C" + +type B _Ctype_struct_a // ERROR HERE + +var a _Ctype_struct_a // ERROR HERE + +type A struct { + a *_Ctype_struct_a // ERROR HERE +} + +var notExist _Ctype_NotExist // ERROR HERE + +func main() { + _Cfunc_fn() // ERROR HERE +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue33061.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue33061.go new file mode 100644 index 0000000000000000000000000000000000000000..77d5f7a7c9189ba02a5eacdba3deade4acb44315 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue33061.go @@ -0,0 +1,17 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// cgo shouldn't crash if there is an extra argument with a C reference. + +package main + +// void F(void* p) {}; +import "C" + +import "unsafe" + +func F() { + var i int + C.F(unsafe.Pointer(&i), C.int(0)) // ERROR HERE +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue42580.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue42580.go new file mode 100644 index 0000000000000000000000000000000000000000..aba80dfebadf56a3d4c261697f6ac1537c9ca4ad --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue42580.go @@ -0,0 +1,44 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue 42580: cmd/cgo: shifting identifier position in ast + +package cgotest + +// typedef int (*intFunc) (); +// +// char* strarg = ""; +// +// int func_with_char(char* arg, void* dummy) +// {return 5;} +// +// int* get_arr(char* arg, void* dummy) +// {return NULL;} +import "C" +import "unsafe" + +// Test variables +var ( + checkedPointer = []byte{1} + doublePointerChecked = []byte{1} + singleInnerPointerChecked = []byte{1} +) + +// This test checks the positions of variable identifiers. +// Changing the positions of the test variables idents after this point will break the test. + +func TestSingleArgumentCast() C.int { + retcode := C.func_with_char((*C.char)(unsafe.Pointer(&checkedPointer[0])), unsafe.Pointer(C.strarg)) + return retcode +} + +func TestSingleArgumentCastRecFuncAsSimpleArg() C.int { + retcode := C.func_with_char((*C.char)(unsafe.Pointer(C.get_arr((*C.char)(unsafe.Pointer(&singleInnerPointerChecked[0])), unsafe.Pointer(C.strarg)))), nil) + return retcode +} + +func TestSingleArgumentCastRecFunc() C.int { + retcode := C.func_with_char((*C.char)(unsafe.Pointer(C.get_arr((*C.char)(unsafe.Pointer(&doublePointerChecked[0])), unsafe.Pointer(C.strarg)))), unsafe.Pointer(C.strarg)) + return retcode +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue50710.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue50710.go new file mode 100644 index 0000000000000000000000000000000000000000..dffea229031d412c1a8e9322459326021ba7675e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/issue50710.go @@ -0,0 +1,14 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// size_t StrLen(_GoString_ s) { +// return _GoStringLen(s); +// } +import "C" + +func main() { + C.StrLen1() // ERROR HERE +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/long_double_size.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/long_double_size.go new file mode 100644 index 0000000000000000000000000000000000000000..8b797f886aed4002a8145c17000e09313644417c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/long_double_size.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +/* +const int sizeofLongDouble = sizeof(long double); +*/ +import "C" + +import "fmt" + +func main() { + fmt.Println(C.sizeofLongDouble) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/malloc.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/malloc.go new file mode 100644 index 0000000000000000000000000000000000000000..65da0208b9708f8115abcdc00733acff25358ddc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/malloc.go @@ -0,0 +1,34 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that C.malloc does not return nil. + +package main + +// #include +import "C" + +import ( + "fmt" + "runtime" +) + +func main() { + var size C.size_t + size-- + + // The Dragonfly libc succeeds when asked to allocate + // 0xffffffffffffffff bytes, so pass a different value that + // causes it to fail. + if runtime.GOOS == "dragonfly" { + size = C.size_t(0x7fffffff << (32 * (^uintptr(0) >> 63))) + } + + p := C.malloc(size) + if p == nil { + fmt.Println("malloc: C.malloc returned nil") + // Just exit normally--the test script expects this + // program to crash, so exiting normally indicates failure. + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/notmatchedcfunction.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/notmatchedcfunction.go new file mode 100644 index 0000000000000000000000000000000000000000..5ec9ec5d4a9790916b955a69e512bc047756b8b7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testerrors/testdata/notmatchedcfunction.go @@ -0,0 +1,15 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +/* +// TODO(#56378): change back to "#cgo noescape noMatchedCFunction: no matched C function" in Go 1.23 +// ERROR MESSAGE: #cgo noescape disabled until Go 1.23 +#cgo noescape noMatchedCFunction +*/ +import "C" + +func main() { +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testfortran/fortran_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testfortran/fortran_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0eae7c5f53d4aa1a78a6b834bd99fd078d4d89be --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testfortran/fortran_test.go @@ -0,0 +1,91 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fortran + +import ( + "internal/testenv" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" +) + +func TestFortran(t *testing.T) { + testenv.MustHaveGoRun(t) + testenv.MustHaveCGO(t) + + // Find the FORTRAN compiler. + fc := os.Getenv("FC") + if fc == "" { + fc, _ = exec.LookPath("gfortran") + } + if fc == "" { + t.Skip("fortran compiler not found (try setting $FC)") + } + + var fcExtra []string + if strings.Contains(fc, "gfortran") { + // TODO: This duplicates but also diverges from logic from cmd/go + // itself. For example, cmd/go merely adds -lgfortran without the extra + // library path work. If this is what's necessary to run gfortran, we + // should reconcile the logic here and in cmd/go.. Maybe this should + // become a cmd/go script test to share that logic. + + // Add -m32 if we're targeting 386, in case this is a cross-compile. + if runtime.GOARCH == "386" { + fcExtra = append(fcExtra, "-m32") + } + + // Find libgfortran. If the FORTRAN compiler isn't bundled + // with the C linker, this may be in a path the C linker can't + // find on its own. (See #14544) + libExt := "so" + switch runtime.GOOS { + case "darwin": + libExt = "dylib" + case "aix": + libExt = "a" + } + libPath, err := exec.Command(fc, append([]string{"-print-file-name=libgfortran." + libExt}, fcExtra...)...).CombinedOutput() + if err != nil { + t.Errorf("error invoking %s: %s", fc, err) + } + libDir := filepath.Dir(string(libPath)) + cgoLDFlags := os.Getenv("CGO_LDFLAGS") + cgoLDFlags += " -L " + libDir + if runtime.GOOS != "aix" { + cgoLDFlags += " -Wl,-rpath," + libDir + } + t.Logf("CGO_LDFLAGS=%s", cgoLDFlags) + os.Setenv("CGO_LDFLAGS", cgoLDFlags) + + } + + // Do a test build that doesn't involve Go FORTRAN support. + fcArgs := append([]string{"testdata/helloworld/helloworld.f90", "-o", "/dev/null"}, fcExtra...) + t.Logf("%s %s", fc, fcArgs) + if err := exec.Command(fc, fcArgs...).Run(); err != nil { + t.Skipf("skipping Fortran test: could not build helloworld.f90 with %s: %s", fc, err) + } + + // Finally, run the actual test. + t.Log("go", "run", "./testdata/testprog") + var stdout, stderr strings.Builder + cmd := exec.Command("go", "run", "./testdata/testprog") + cmd.Stdout = &stdout + cmd.Stderr = &stderr + err := cmd.Run() + t.Logf("%v", cmd) + if stderr.Len() != 0 { + t.Logf("stderr:\n%s", stderr.String()) + } + if err != nil { + t.Errorf("%v\n%s", err, stdout.String()) + } else if stdout.String() != "ok\n" { + t.Errorf("stdout:\n%s\nwant \"ok\"", stdout.String()) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testfortran/testdata/helloworld/helloworld.f90 b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testfortran/testdata/helloworld/helloworld.f90 new file mode 100644 index 0000000000000000000000000000000000000000..cbc34c16ef733ec3cfd36e0c43de7958f3204242 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testfortran/testdata/helloworld/helloworld.f90 @@ -0,0 +1,3 @@ + program HelloWorldF90 + write(*,*) "Hello World!" + end program HelloWorldF90 diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testfortran/testdata/testprog/answer.f90 b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testfortran/testdata/testprog/answer.f90 new file mode 100644 index 0000000000000000000000000000000000000000..b3717ee27a67cbf382215128bf4954e4d5193cc9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testfortran/testdata/testprog/answer.f90 @@ -0,0 +1,9 @@ +! Copyright 2016 The Go Authors. All rights reserved. +! Use of this source code is governed by a BSD-style +! license that can be found in the LICENSE file. + +function the_answer() result(j) bind(C) + use iso_c_binding, only: c_int + integer(c_int) :: j ! output + j = 42 +end function the_answer diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testfortran/testdata/testprog/fortran.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testfortran/testdata/testprog/fortran.go new file mode 100644 index 0000000000000000000000000000000000000000..e98d76c3e602a8ffd1555d36eb6931c0f437ed24 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testfortran/testdata/testprog/fortran.go @@ -0,0 +1,24 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// int the_answer(); +import "C" +import ( + "fmt" + "os" +) + +func TheAnswer() int { + return int(C.the_answer()) +} + +func main() { + if a := TheAnswer(); a != 42 { + fmt.Fprintln(os.Stderr, "Unexpected result for The Answer. Got:", a, " Want: 42") + os.Exit(1) + } + fmt.Fprintln(os.Stdout, "ok") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/anonunion.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/anonunion.go new file mode 100644 index 0000000000000000000000000000000000000000..2c86c5c29ea634473e447aa67bfb9aeb9aef499a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/anonunion.go @@ -0,0 +1,26 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +package main + +// This file tests that when cgo -godefs sees a struct with a field +// that is an anonymous union, the first field in the union is +// promoted to become a field of the struct. See issue 6677 for +// background. + +/* +typedef struct { + union { + long l; + int c; + }; +} t; +*/ +import "C" + +// Input for cgo -godefs. + +type T C.t diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/bitfields.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/bitfields.go new file mode 100644 index 0000000000000000000000000000000000000000..431ffc069685bba40dd3669cbd99cc81a9ca7d00 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/bitfields.go @@ -0,0 +1,31 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +package main + +// This file tests that we don't generate an incorrect field location +// for a bitfield that appears aligned. + +/* +struct bitfields { + unsigned int B1 : 5; + unsigned int B2 : 1; + unsigned int B3 : 1; + unsigned int B4 : 1; + unsigned int Short1 : 16; // misaligned on 8 bit boundary + unsigned int B5 : 1; + unsigned int B6 : 1; + unsigned int B7 : 1; + unsigned int B8 : 1; + unsigned int B9 : 1; + unsigned int B10 : 3; + unsigned int Short2 : 16; // alignment is OK + unsigned int Short3 : 16; // alignment is OK +}; +*/ +import "C" + +type bitfields C.struct_bitfields diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/fieldtypedef.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/fieldtypedef.go new file mode 100644 index 0000000000000000000000000000000000000000..d3ab1902c12284dae13347af8d0f773db0cc2a59 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/fieldtypedef.go @@ -0,0 +1,18 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +package main + +/* +struct S1 { int f1; }; +struct S2 { struct S1 s1; }; +typedef struct S1 S1Type; +typedef struct S2 S2Type; +*/ +import "C" + +type S1 C.S1Type +type S2 C.S2Type diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/issue37479.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/issue37479.go new file mode 100644 index 0000000000000000000000000000000000000000..d54531045f1989599f9e39533fec3b70af7554f4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/issue37479.go @@ -0,0 +1,33 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +package main + +/* +typedef struct A A; + +typedef struct { + struct A *next; + struct A **prev; +} N; + +struct A +{ + N n; +}; + +typedef struct B +{ + A* a; +} B; +*/ +import "C" + +type N C.N + +type A C.A + +type B C.B diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/issue37621.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/issue37621.go new file mode 100644 index 0000000000000000000000000000000000000000..655e8ae46583b5cca498b7958f5453e189e4b2eb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/issue37621.go @@ -0,0 +1,23 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +package main + +/* +struct tt { + long long a; + long long b; +}; + +struct s { + struct tt ts[3]; +}; +*/ +import "C" + +type TT C.struct_tt + +type S C.struct_s diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/issue38649.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/issue38649.go new file mode 100644 index 0000000000000000000000000000000000000000..78b5f78eddd86b0e5d4be1fd29ce4687de066083 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/issue38649.go @@ -0,0 +1,15 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +package main + +/* +struct Issue38649 { int x; }; +#define issue38649 struct Issue38649 +*/ +import "C" + +type issue38649 C.issue38649 diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/issue39534.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/issue39534.go new file mode 100644 index 0000000000000000000000000000000000000000..af730e98d94457acc92ce6c7844c450d2e3786c2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/issue39534.go @@ -0,0 +1,12 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +package main + +// enum { ENUMVAL = 0x1 }; +import "C" + +const ENUMVAL = C.ENUMVAL diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/issue48396.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/issue48396.go new file mode 100644 index 0000000000000000000000000000000000000000..81dd2feb800fcbc4c7e6737eac2f3be6f66546e5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/issue48396.go @@ -0,0 +1,18 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +package main + +/* +// from +struct issue48396 { + int fd; + int bpf_fd; +}; +*/ +import "C" + +type Issue48396 C.struct_issue48396 diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/issue8478.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/issue8478.go new file mode 100644 index 0000000000000000000000000000000000000000..f4ef164bda246d52d50b284fc1755ce342891199 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/issue8478.go @@ -0,0 +1,20 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +package main + +// Issue 8478. Test that void* is consistently mapped to *byte. + +/* +typedef struct { + void *p; + void **q; + void ***r; +} s; +*/ +import "C" + +type Issue8478 C.s diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/main.go new file mode 100644 index 0000000000000000000000000000000000000000..5c670f3d329c1acc2e2788c8024cd2233ec0aa62 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testdata/main.go @@ -0,0 +1,57 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "os" + "reflect" +) + +// Test that the struct field in anonunion.go was promoted. +var v1 T +var v2 = v1.L + +// Test that P, Q, and R all point to byte. +var v3 = Issue8478{P: (*byte)(nil), Q: (**byte)(nil), R: (***byte)(nil)} + +// Test that N, A and B are fully defined +var v4 = N{} +var v5 = A{} +var v6 = B{} + +// Test that S is fully defined +var v7 = S{} + +// Test that #define'd type is fully defined +var _ = issue38649{X: 0} + +// Test that prefixes do not cause duplicate field names. +var _ = Issue48396{Fd: 1, Bpf_fd: 2} + +func main() { + pass := true + + // The Go translation of bitfields should not have any of the + // bitfield types. The order in which bitfields are laid out + // in memory is implementation defined, so we can't easily + // know how a bitfield should correspond to a Go type, even if + // it appears to be aligned correctly. + bitfieldType := reflect.TypeOf(bitfields{}) + check := func(name string) { + _, ok := bitfieldType.FieldByName(name) + if ok { + fmt.Fprintf(os.Stderr, "found unexpected bitfields field %s\n", name) + pass = false + } + } + check("Short1") + check("Short2") + check("Short3") + + if !pass { + os.Exit(1) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testgodefs_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testgodefs_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8138b7fa3d5a5fe611fbe19f27e5d02a2baab594 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testgodefs/testgodefs_test.go @@ -0,0 +1,116 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testgodefs + +import ( + "bytes" + "internal/testenv" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" +) + +// We are testing cgo -godefs, which translates Go files that use +// import "C" into Go files with Go definitions of types defined in the +// import "C" block. Add more tests here. +var filePrefixes = []string{ + "anonunion", + "bitfields", + "issue8478", + "fieldtypedef", + "issue37479", + "issue37621", + "issue38649", + "issue39534", + "issue48396", +} + +func TestGoDefs(t *testing.T) { + testenv.MustHaveGoRun(t) + testenv.MustHaveCGO(t) + + testdata, err := filepath.Abs("testdata") + if err != nil { + t.Fatal(err) + } + + gopath, err := os.MkdirTemp("", "testgodefs-gopath") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(gopath) + + dir := filepath.Join(gopath, "src", "testgodefs") + if err := os.MkdirAll(dir, 0755); err != nil { + t.Fatal(err) + } + + for _, fp := range filePrefixes { + cmd := exec.Command("go", "tool", "cgo", + "-godefs", + "-srcdir", testdata, + "-objdir", dir, + fp+".go") + cmd.Stderr = new(bytes.Buffer) + + out, err := cmd.Output() + if err != nil { + t.Fatalf("%s: %v\n%s", strings.Join(cmd.Args, " "), err, cmd.Stderr) + } + + fn := fp + "_defs.go" + if err := os.WriteFile(filepath.Join(dir, fn), out, 0644); err != nil { + t.Fatal(err) + } + + // Verify that command line arguments are not rewritten in the generated comment, + // see go.dev/issue/52063 + hasGeneratedByComment := false + for _, line := range strings.Split(strings.TrimSpace(string(out)), "\n") { + cgoExe := "cgo" + if runtime.GOOS == "windows" { + cgoExe = "cgo.exe" + } + if !strings.HasPrefix(line, "// "+cgoExe+" -godefs") { + continue + } + if want := "// " + cgoExe + " " + strings.Join(cmd.Args[3:], " "); line != want { + t.Errorf("%s: got generated comment %q, want %q", fn, line, want) + } + hasGeneratedByComment = true + break + } + + if !hasGeneratedByComment { + t.Errorf("%s: comment with generating cgo -godefs command not found", fn) + } + } + + main, err := os.ReadFile(filepath.Join("testdata", "main.go")) + if err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(dir, "main.go"), main, 0644); err != nil { + t.Fatal(err) + } + + if err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module testgodefs\ngo 1.14\n"), 0644); err != nil { + t.Fatal(err) + } + + // Use 'go run' to build and run the resulting binary in a single step, + // instead of invoking 'go build' and the resulting binary separately, so that + // this test can pass on mobile builders, which do not copy artifacts back + // from remote invocations. + cmd := exec.Command("go", "run", ".") + cmd.Env = append(os.Environ(), "GOPATH="+gopath) + cmd.Dir = dir + if out, err := cmd.CombinedOutput(); err != nil { + t.Fatalf("%s [%s]: %v\n%s", strings.Join(cmd.Args, " "), dir, err, out) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testlife/life_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testlife/life_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e93d29c4d978b7658aedb985ace73cb96c84123e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testlife/life_test.go @@ -0,0 +1,65 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package life_test + +import ( + "bytes" + "cmd/cgo/internal/cgotest" + "internal/testenv" + "log" + "os" + "os/exec" + "path/filepath" + "testing" +) + +func TestMain(m *testing.M) { + log.SetFlags(log.Lshortfile) + os.Exit(testMain(m)) +} + +func testMain(m *testing.M) int { + GOPATH, err := os.MkdirTemp("", "cgolife") + if err != nil { + log.Panic(err) + } + defer os.RemoveAll(GOPATH) + os.Setenv("GOPATH", GOPATH) + + // Copy testdata into GOPATH/src/cgolife, along with a go.mod file + // declaring the same path. + modRoot := filepath.Join(GOPATH, "src", "cgolife") + if err := cgotest.OverlayDir(modRoot, "testdata"); err != nil { + log.Panic(err) + } + if err := os.Chdir(modRoot); err != nil { + log.Panic(err) + } + os.Setenv("PWD", modRoot) + if err := os.WriteFile("go.mod", []byte("module cgolife\n"), 0666); err != nil { + log.Panic(err) + } + + return m.Run() +} + +// TestTestRun runs a test case for cgo //export. +func TestTestRun(t *testing.T) { + testenv.MustHaveGoRun(t) + testenv.MustHaveCGO(t) + + cmd := exec.Command("go", "run", "main.go") + got, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("%v: %s\n%s", cmd, err, got) + } + want, err := os.ReadFile("main.out") + if err != nil { + t.Fatal("reading golden output:", err) + } + if !bytes.Equal(got, want) { + t.Errorf("'%v' output does not match expected in main.out. Instead saw:\n%s", cmd, got) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testlife/testdata/c-life.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testlife/testdata/c-life.c new file mode 100644 index 0000000000000000000000000000000000000000..f853163e2f0c1ebdc067e166fc2ee2f420149a4b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testlife/testdata/c-life.c @@ -0,0 +1,56 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include +#include "life.h" +#include "_cgo_export.h" + +const int MYCONST = 0; + +// Do the actual manipulation of the life board in C. This could be +// done easily in Go, we are just using C for demonstration +// purposes. +void +Step(int x, int y, int *a, int *n) +{ + struct GoStart_return r; + + // Use Go to start 4 goroutines each of which handles 1/4 of the + // board. + r = GoStart(0, x, y, 0, x / 2, 0, y / 2, a, n); + assert(r.r0 == 0 && r.r1 == 100); // test multiple returns + r = GoStart(1, x, y, x / 2, x, 0, y / 2, a, n); + assert(r.r0 == 1 && r.r1 == 101); // test multiple returns + GoStart(2, x, y, 0, x / 2, y / 2, y, a, n); + GoStart(3, x, y, x / 2, x, y / 2, y, a, n); + GoWait(0); + GoWait(1); + GoWait(2); + GoWait(3); +} + +// The actual computation. This is called in parallel. +void +DoStep(int xdim, int ydim, int xstart, int xend, int ystart, int yend, int *a, int *n) +{ + int x, y, c, i, j; + + for(x = xstart; x < xend; x++) { + for(y = ystart; y < yend; y++) { + c = 0; + for(i = -1; i <= 1; i++) { + for(j = -1; j <= 1; j++) { + if(x+i >= 0 && x+i < xdim && + y+j >= 0 && y+j < ydim && + (i != 0 || j != 0)) + c += a[(x+i)*xdim + (y+j)] != 0; + } + } + if(c == 3 || (c == 2 && a[x*xdim + y] != 0)) + n[x*xdim + y] = 1; + else + n[x*xdim + y] = 0; + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testlife/testdata/life.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testlife/testdata/life.go new file mode 100644 index 0000000000000000000000000000000000000000..72311404dfd62ac85475bd8ee6d76eb63dd42c6e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testlife/testdata/life.go @@ -0,0 +1,40 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgolife + +// #include "life.h" +import "C" + +import "unsafe" + +func Run(gen, x, y int, a []int32) { + n := make([]int32, x*y) + for i := 0; i < gen; i++ { + C.Step(C.int(x), C.int(y), (*C.int)(unsafe.Pointer(&a[0])), (*C.int)(unsafe.Pointer(&n[0]))) + copy(a, n) + } +} + +// Keep the channels visible from Go. +var chans [4]chan bool + +// Double return value is just for testing. +// +//export GoStart +func GoStart(i, xdim, ydim, xstart, xend, ystart, yend C.int, a *C.int, n *C.int) (int, int) { + c := make(chan bool, int(C.MYCONST)) + go func() { + C.DoStep(xdim, ydim, xstart, xend, ystart, yend, a, n) + c <- true + }() + chans[i] = c + return int(i), int(i + 100) +} + +//export GoWait +func GoWait(i C.int) { + <-chans[i] + chans[i] = nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testlife/testdata/life.h b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testlife/testdata/life.h new file mode 100644 index 0000000000000000000000000000000000000000..11d2b97226767bb0d4c86ad0dc7a8f991389e54a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testlife/testdata/life.h @@ -0,0 +1,7 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +extern void Step(int, int, int *, int *); +extern void DoStep(int, int, int, int, int, int, int *, int *); +extern const int MYCONST; diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testlife/testdata/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testlife/testdata/main.go new file mode 100644 index 0000000000000000000000000000000000000000..e9d19be48741919d5f9a4ca3924db78e04dda970 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testlife/testdata/main.go @@ -0,0 +1,47 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build test_run + +// Run the game of life in C using Go for parallelization. + +package main + +import ( + "flag" + "fmt" + + "cgolife" +) + +const MAXDIM = 100 + +var dim = flag.Int("dim", 16, "board dimensions") +var gen = flag.Int("gen", 10, "generations") + +func main() { + flag.Parse() + + var a [MAXDIM * MAXDIM]int32 + for i := 2; i < *dim; i += 8 { + for j := 2; j < *dim-3; j += 8 { + for y := 0; y < 3; y++ { + a[i**dim+j+y] = 1 + } + } + } + + cgolife.Run(*gen, *dim, *dim, a[:]) + + for i := 0; i < *dim; i++ { + for j := 0; j < *dim; j++ { + if a[i**dim+j] == 0 { + fmt.Print(" ") + } else { + fmt.Print("X") + } + } + fmt.Print("\n") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testlife/testdata/main.out b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testlife/testdata/main.out new file mode 100644 index 0000000000000000000000000000000000000000..26fc9c6e3ff6d126734a1608560c8efccb95cc1f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testlife/testdata/main.out @@ -0,0 +1,16 @@ + + + XXX XXX + + + + + + + + XXX XXX + + + + + diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testnocgo/nocgo.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testnocgo/nocgo.go new file mode 100644 index 0000000000000000000000000000000000000000..00ae5e9c86282e2e370265d889203f634316313c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testnocgo/nocgo.go @@ -0,0 +1,22 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that -static works when not using cgo. This test is in +// misc/cgo to take advantage of the testing framework support for +// when -static is expected to work. + +package nocgo + +func NoCgo() int { + c := make(chan int) + + // The test is run with external linking, which means that + // goroutines will be created via the runtime/cgo package. + // Make sure that works. + go func() { + c <- 42 + }() + + return <-c +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testnocgo/nocgo_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testnocgo/nocgo_test.go new file mode 100644 index 0000000000000000000000000000000000000000..45d247cf9545a644e54b09735779a33c62397850 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testnocgo/nocgo_test.go @@ -0,0 +1,14 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package nocgo + +import "testing" + +func TestNop(t *testing.T) { + i := NoCgo() + if i != 42 { + t.Errorf("got %d, want %d", i, 42) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/altpath/testdata/common/common.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/altpath/testdata/common/common.go new file mode 100644 index 0000000000000000000000000000000000000000..505ba02b1f182b6c5d89de21b99cd76f66604d12 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/altpath/testdata/common/common.go @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package common + +var X int + +func init() { + X = 4 +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/altpath/testdata/plugin-mismatch/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/altpath/testdata/plugin-mismatch/main.go new file mode 100644 index 0000000000000000000000000000000000000000..bfb4ba45aa29b8e54081aa34f1bd2af2944c28a3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/altpath/testdata/plugin-mismatch/main.go @@ -0,0 +1,17 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// // No C code required. +import "C" + +// The common package imported here does not match the common package +// imported by plugin1. A program that attempts to load plugin1 and +// plugin-mismatch should produce an error. +import "testplugin/common" + +func ReadCommonX() int { + return common.X +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/plugin_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/plugin_test.go new file mode 100644 index 0000000000000000000000000000000000000000..85dfd31123b5dedd27aa83544caf692bc3805395 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/plugin_test.go @@ -0,0 +1,424 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package plugin_test + +import ( + "bytes" + "cmd/cgo/internal/cgotest" + "context" + "flag" + "fmt" + "internal/platform" + "internal/testenv" + "log" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" + "time" +) + +var globalSkip = func(t *testing.T) {} + +var gcflags string = os.Getenv("GO_GCFLAGS") +var goroot string + +func TestMain(m *testing.M) { + flag.Parse() + log.SetFlags(log.Lshortfile) + os.Exit(testMain(m)) +} + +// tmpDir is used to cleanup logged commands -- s/tmpDir/$TMPDIR/ +var tmpDir string + +// prettyPrintf prints lines with tmpDir sanitized. +func prettyPrintf(format string, args ...interface{}) { + s := fmt.Sprintf(format, args...) + if tmpDir != "" { + s = strings.ReplaceAll(s, tmpDir, "$TMPDIR") + } + fmt.Print(s) +} + +func testMain(m *testing.M) int { + if testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" { + globalSkip = func(t *testing.T) { t.Skip("short mode and $GO_BUILDER_NAME not set") } + return m.Run() + } + if !platform.BuildModeSupported(runtime.Compiler, "plugin", runtime.GOOS, runtime.GOARCH) { + globalSkip = func(t *testing.T) { t.Skip("plugin build mode not supported") } + return m.Run() + } + if !testenv.HasCGO() { + globalSkip = func(t *testing.T) { t.Skip("cgo not supported") } + return m.Run() + } + + cwd, err := os.Getwd() + if err != nil { + log.Fatal(err) + } + goroot = filepath.Join(cwd, "../../../../..") + + // Copy testdata into GOPATH/src/testplugin, along with a go.mod file + // declaring the same path. + + GOPATH, err := os.MkdirTemp("", "plugin_test") + if err != nil { + log.Panic(err) + } + defer os.RemoveAll(GOPATH) + tmpDir = GOPATH + fmt.Printf("TMPDIR=%s\n", tmpDir) + + modRoot := filepath.Join(GOPATH, "src", "testplugin") + altRoot := filepath.Join(GOPATH, "alt", "src", "testplugin") + for srcRoot, dstRoot := range map[string]string{ + "testdata": modRoot, + filepath.Join("altpath", "testdata"): altRoot, + } { + if err := cgotest.OverlayDir(dstRoot, srcRoot); err != nil { + log.Panic(err) + } + prettyPrintf("mkdir -p %s\n", dstRoot) + prettyPrintf("rsync -a %s/ %s\n", srcRoot, dstRoot) + + if err := os.WriteFile(filepath.Join(dstRoot, "go.mod"), []byte("module testplugin\n"), 0666); err != nil { + log.Panic(err) + } + prettyPrintf("echo 'module testplugin' > %s/go.mod\n", dstRoot) + } + + os.Setenv("GOPATH", filepath.Join(GOPATH, "alt")) + if err := os.Chdir(altRoot); err != nil { + log.Panic(err) + } else { + prettyPrintf("cd %s\n", altRoot) + } + os.Setenv("PWD", altRoot) + goCmd(nil, "build", "-buildmode=plugin", "-o", filepath.Join(modRoot, "plugin-mismatch.so"), "./plugin-mismatch") + + os.Setenv("GOPATH", GOPATH) + if err := os.Chdir(modRoot); err != nil { + log.Panic(err) + } else { + prettyPrintf("cd %s\n", modRoot) + } + os.Setenv("PWD", modRoot) + + os.Setenv("LD_LIBRARY_PATH", modRoot) + + goCmd(nil, "build", "-buildmode=plugin", "./plugin1") + goCmd(nil, "build", "-buildmode=plugin", "./plugin2") + so, err := os.ReadFile("plugin2.so") + if err != nil { + log.Panic(err) + } + if err := os.WriteFile("plugin2-dup.so", so, 0444); err != nil { + log.Panic(err) + } + prettyPrintf("cp plugin2.so plugin2-dup.so\n") + + goCmd(nil, "build", "-buildmode=plugin", "-o=sub/plugin1.so", "./sub/plugin1") + goCmd(nil, "build", "-buildmode=plugin", "-o=unnamed1.so", "./unnamed1/main.go") + goCmd(nil, "build", "-buildmode=plugin", "-o=unnamed2.so", "./unnamed2/main.go") + goCmd(nil, "build", "-o", "host.exe", "./host") + + return m.Run() +} + +func goCmd(t *testing.T, op string, args ...string) string { + if t != nil { + t.Helper() + } + var flags []string + if op != "tool" { + flags = []string{"-gcflags", gcflags} + } + return run(t, filepath.Join(goroot, "bin", "go"), append(append([]string{op}, flags...), args...)...) +} + +// escape converts a string to something suitable for a shell command line. +func escape(s string) string { + s = strings.Replace(s, "\\", "\\\\", -1) + s = strings.Replace(s, "'", "\\'", -1) + // Conservative guess at characters that will force quoting + if s == "" || strings.ContainsAny(s, "\\ ;#*&$~?!|[]()<>{}`") { + s = "'" + s + "'" + } + return s +} + +// asCommandLine renders cmd as something that could be copy-and-pasted into a command line +func asCommandLine(cwd string, cmd *exec.Cmd) string { + s := "(" + if cmd.Dir != "" && cmd.Dir != cwd { + s += "cd" + escape(cmd.Dir) + ";" + } + for _, e := range cmd.Env { + if !strings.HasPrefix(e, "PATH=") && + !strings.HasPrefix(e, "HOME=") && + !strings.HasPrefix(e, "USER=") && + !strings.HasPrefix(e, "SHELL=") { + s += " " + s += escape(e) + } + } + // These EVs are relevant to this test. + for _, e := range os.Environ() { + if strings.HasPrefix(e, "PWD=") || + strings.HasPrefix(e, "GOPATH=") || + strings.HasPrefix(e, "LD_LIBRARY_PATH=") { + s += " " + s += escape(e) + } + } + for _, a := range cmd.Args { + s += " " + s += escape(a) + } + s += " )" + return s +} + +func run(t *testing.T, bin string, args ...string) string { + cmd := exec.Command(bin, args...) + cmdLine := asCommandLine(".", cmd) + prettyPrintf("%s\n", cmdLine) + cmd.Stderr = new(strings.Builder) + out, err := cmd.Output() + if err != nil { + if t == nil { + log.Panicf("%s: %v\n%s", strings.Join(cmd.Args, " "), err, cmd.Stderr) + } else { + t.Helper() + t.Fatalf("%s: %v\n%s", strings.Join(cmd.Args, " "), err, cmd.Stderr) + } + } + + return string(bytes.TrimSpace(out)) +} + +func TestDWARFSections(t *testing.T) { + // test that DWARF sections are emitted for plugins and programs importing "plugin" + globalSkip(t) + goCmd(t, "run", "./checkdwarf/main.go", "plugin2.so", "plugin2.UnexportedNameReuse") + goCmd(t, "run", "./checkdwarf/main.go", "./host.exe", "main.main") +} + +func TestBuildID(t *testing.T) { + // check that plugin has build ID. + globalSkip(t) + b := goCmd(t, "tool", "buildid", "plugin1.so") + if len(b) == 0 { + t.Errorf("build id not found") + } +} + +func TestRunHost(t *testing.T) { + globalSkip(t) + run(t, "./host.exe") +} + +func TestUniqueTypesAndItabs(t *testing.T) { + globalSkip(t) + goCmd(t, "build", "-buildmode=plugin", "./iface_a") + goCmd(t, "build", "-buildmode=plugin", "./iface_b") + goCmd(t, "build", "-o", "iface.exe", "./iface") + run(t, "./iface.exe") +} + +func TestIssue18676(t *testing.T) { + // make sure we don't add the same itab twice. + // The buggy code hangs forever, so use a timeout to check for that. + globalSkip(t) + goCmd(t, "build", "-buildmode=plugin", "-o", "plugin.so", "./issue18676/plugin.go") + goCmd(t, "build", "-o", "issue18676.exe", "./issue18676/main.go") + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + cmd := exec.CommandContext(ctx, "./issue18676.exe") + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("%s: %v\n%s", strings.Join(cmd.Args, " "), err, out) + } +} + +func TestIssue19534(t *testing.T) { + // Test that we can load a plugin built in a path with non-alpha characters. + globalSkip(t) + goCmd(t, "build", "-buildmode=plugin", "-gcflags=-p=issue.19534", "-ldflags=-pluginpath=issue.19534", "-o", "plugin.so", "./issue19534/plugin.go") + goCmd(t, "build", "-o", "issue19534.exe", "./issue19534/main.go") + run(t, "./issue19534.exe") +} + +func TestIssue18584(t *testing.T) { + globalSkip(t) + goCmd(t, "build", "-buildmode=plugin", "-o", "plugin.so", "./issue18584/plugin.go") + goCmd(t, "build", "-o", "issue18584.exe", "./issue18584/main.go") + run(t, "./issue18584.exe") +} + +func TestIssue19418(t *testing.T) { + globalSkip(t) + goCmd(t, "build", "-buildmode=plugin", "-ldflags=-X main.Val=linkstr", "-o", "plugin.so", "./issue19418/plugin.go") + goCmd(t, "build", "-o", "issue19418.exe", "./issue19418/main.go") + run(t, "./issue19418.exe") +} + +func TestIssue19529(t *testing.T) { + globalSkip(t) + goCmd(t, "build", "-buildmode=plugin", "-o", "plugin.so", "./issue19529/plugin.go") +} + +func TestIssue22175(t *testing.T) { + globalSkip(t) + goCmd(t, "build", "-buildmode=plugin", "-o", "issue22175_plugin1.so", "./issue22175/plugin1.go") + goCmd(t, "build", "-buildmode=plugin", "-o", "issue22175_plugin2.so", "./issue22175/plugin2.go") + goCmd(t, "build", "-o", "issue22175.exe", "./issue22175/main.go") + run(t, "./issue22175.exe") +} + +func TestIssue22295(t *testing.T) { + globalSkip(t) + goCmd(t, "build", "-buildmode=plugin", "-o", "issue.22295.so", "./issue22295.pkg") + goCmd(t, "build", "-o", "issue22295.exe", "./issue22295.pkg/main.go") + run(t, "./issue22295.exe") +} + +func TestIssue24351(t *testing.T) { + globalSkip(t) + goCmd(t, "build", "-buildmode=plugin", "-o", "issue24351.so", "./issue24351/plugin.go") + goCmd(t, "build", "-o", "issue24351.exe", "./issue24351/main.go") + run(t, "./issue24351.exe") +} + +func TestIssue25756(t *testing.T) { + globalSkip(t) + goCmd(t, "build", "-buildmode=plugin", "-o", "life.so", "./issue25756/plugin") + goCmd(t, "build", "-o", "issue25756.exe", "./issue25756/main.go") + // Fails intermittently, but 20 runs should cause the failure + for n := 20; n > 0; n-- { + t.Run(fmt.Sprint(n), func(t *testing.T) { + t.Parallel() + run(t, "./issue25756.exe") + }) + } +} + +// Test with main using -buildmode=pie with plugin for issue #43228 +func TestIssue25756pie(t *testing.T) { + globalSkip(t) + goCmd(t, "build", "-buildmode=plugin", "-o", "life.so", "./issue25756/plugin") + goCmd(t, "build", "-buildmode=pie", "-o", "issue25756pie.exe", "./issue25756/main.go") + run(t, "./issue25756pie.exe") +} + +func TestMethod(t *testing.T) { + // Exported symbol's method must be live. + globalSkip(t) + goCmd(t, "build", "-buildmode=plugin", "-o", "plugin.so", "./method/plugin.go") + goCmd(t, "build", "-o", "method.exe", "./method/main.go") + run(t, "./method.exe") +} + +func TestMethod2(t *testing.T) { + globalSkip(t) + goCmd(t, "build", "-buildmode=plugin", "-o", "method2.so", "./method2/plugin.go") + goCmd(t, "build", "-o", "method2.exe", "./method2/main.go") + run(t, "./method2.exe") +} + +func TestMethod3(t *testing.T) { + globalSkip(t) + goCmd(t, "build", "-buildmode=plugin", "-o", "method3.so", "./method3/plugin.go") + goCmd(t, "build", "-o", "method3.exe", "./method3/main.go") + run(t, "./method3.exe") +} + +func TestIssue44956(t *testing.T) { + globalSkip(t) + goCmd(t, "build", "-buildmode=plugin", "-o", "issue44956p1.so", "./issue44956/plugin1.go") + goCmd(t, "build", "-buildmode=plugin", "-o", "issue44956p2.so", "./issue44956/plugin2.go") + goCmd(t, "build", "-o", "issue44956.exe", "./issue44956/main.go") + run(t, "./issue44956.exe") +} + +func TestIssue52937(t *testing.T) { + globalSkip(t) + goCmd(t, "build", "-buildmode=plugin", "-o", "issue52937.so", "./issue52937/main.go") +} + +func TestIssue53989(t *testing.T) { + globalSkip(t) + goCmd(t, "build", "-buildmode=plugin", "-o", "issue53989.so", "./issue53989/plugin.go") + goCmd(t, "build", "-o", "issue53989.exe", "./issue53989/main.go") + run(t, "./issue53989.exe") +} + +func TestForkExec(t *testing.T) { + // Issue 38824: importing the plugin package causes it hang in forkExec on darwin. + globalSkip(t) + + t.Parallel() + goCmd(t, "build", "-o", "forkexec.exe", "./forkexec/main.go") + + for i := 0; i < 100; i++ { + cmd := testenv.Command(t, "./forkexec.exe", "1") + err := cmd.Run() + if err != nil { + if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 { + t.Logf("stderr:\n%s", ee.Stderr) + } + t.Errorf("running command failed: %v", err) + break + } + } +} + +func TestSymbolNameMangle(t *testing.T) { + // Issue 58800: generic function name may contain weird characters + // that confuse the external linker. + // Issue 62098: the name mangling code doesn't handle some string + // symbols correctly. + globalSkip(t) + goCmd(t, "build", "-buildmode=plugin", "-o", "mangle.so", "./mangle/plugin.go") +} + +func TestIssue62430(t *testing.T) { + globalSkip(t) + goCmd(t, "build", "-buildmode=plugin", "-o", "issue62430.so", "./issue62430/plugin.go") + goCmd(t, "build", "-o", "issue62430.exe", "./issue62430/main.go") + run(t, "./issue62430.exe") +} + +func TestTextSectionSplit(t *testing.T) { + globalSkip(t) + if runtime.GOOS != "darwin" || runtime.GOARCH != "arm64" { + t.Skipf("text section splitting is not done in %s/%s", runtime.GOOS, runtime.GOARCH) + } + + // Use -ldflags=-debugtextsize=262144 to let the linker split text section + // at a smaller size threshold, so it actually splits for the test binary. + goCmd(nil, "build", "-ldflags=-debugtextsize=262144", "-o", "host-split.exe", "./host") + run(t, "./host-split.exe") + + // Check that we did split text sections. + syms := goCmd(nil, "tool", "nm", "host-split.exe") + if !strings.Contains(syms, "runtime.text.1") { + t.Errorf("runtime.text.1 not found, text section not split?") + } +} + +func TestIssue67976(t *testing.T) { + // Issue 67976: build failure with loading a dynimport variable (the runtime/pprof + // package does this on darwin) in a plugin on darwin/amd64. + // The test program uses runtime/pprof in a plugin. + globalSkip(t) + goCmd(t, "build", "-buildmode=plugin", "-o", "issue67976.so", "./issue67976/plugin.go") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/checkdwarf/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/checkdwarf/main.go new file mode 100644 index 0000000000000000000000000000000000000000..7886c834e7ca26359c5395916562d638eab8c456 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/checkdwarf/main.go @@ -0,0 +1,106 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Usage: +// +// checkdwarf +// +// Opens , which must be an executable or a library and checks that +// there is an entry in .debug_info whose name ends in + +package main + +import ( + "debug/dwarf" + "debug/elf" + "debug/macho" + "debug/pe" + "fmt" + "os" + "strings" +) + +func usage() { + fmt.Fprintf(os.Stderr, "checkdwarf executable-or-library DIE-suffix\n") +} + +type dwarfer interface { + DWARF() (*dwarf.Data, error) +} + +func openElf(path string) dwarfer { + exe, err := elf.Open(path) + if err != nil { + return nil + } + return exe +} + +func openMacho(path string) dwarfer { + exe, err := macho.Open(path) + if err != nil { + return nil + } + return exe +} + +func openPE(path string) dwarfer { + exe, err := pe.Open(path) + if err != nil { + return nil + } + return exe +} + +func main() { + if len(os.Args) != 3 { + usage() + } + + exePath := os.Args[1] + dieSuffix := os.Args[2] + + var exe dwarfer + + for _, openfn := range []func(string) dwarfer{openMacho, openPE, openElf} { + exe = openfn(exePath) + if exe != nil { + break + } + } + + if exe == nil { + fmt.Fprintf(os.Stderr, "could not open %s\n", exePath) + os.Exit(1) + } + + data, err := exe.DWARF() + if err != nil { + fmt.Fprintf(os.Stderr, "%s: error opening DWARF: %v\n", exePath, err) + os.Exit(1) + } + + rdr := data.Reader() + for { + e, err := rdr.Next() + if err != nil { + fmt.Fprintf(os.Stderr, "%s: error reading DWARF: %v\n", exePath, err) + os.Exit(1) + } + if e == nil { + break + } + name, hasname := e.Val(dwarf.AttrName).(string) + if !hasname { + continue + } + if strings.HasSuffix(name, dieSuffix) { + // found + os.Exit(0) + } + } + + fmt.Fprintf(os.Stderr, "%s: no entry with a name ending in %q was found\n", exePath, dieSuffix) + os.Exit(1) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/common/common.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/common/common.go new file mode 100644 index 0000000000000000000000000000000000000000..b064e6bccfe828194a6315ef0e3015e5e1c4e101 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/common/common.go @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package common + +var X int + +func init() { + X = 3 +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/forkexec/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/forkexec/main.go new file mode 100644 index 0000000000000000000000000000000000000000..3169ff5f04d100cb61304466696cc50d3f7fcc46 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/forkexec/main.go @@ -0,0 +1,30 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "os/exec" + _ "plugin" + "sync" +) + +func main() { + if os.Args[1] != "1" { + return + } + + var wg sync.WaitGroup + for i := 0; i < 8; i++ { + wg.Add(1) + go func() { + defer wg.Done() + // does not matter what we exec, just exec itself + cmd := exec.Command("./forkexec.exe", "0") + cmd.Run() + }() + } + wg.Wait() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/host/host.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/host/host.go new file mode 100644 index 0000000000000000000000000000000000000000..a3799328cdc2b81f4ca1eea6670df6bf27e46896 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/host/host.go @@ -0,0 +1,176 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "log" + "path/filepath" + "plugin" + "strings" + + "testplugin/common" +) + +func init() { + common.X *= 5 +} + +// testUnnamed tests that two plugins built with .go files passed on +// the command line do not have overlapping symbols. That is, +// unnamed1.so/FuncInt and unnamed2.so/FuncInt should be distinct functions. +func testUnnamed() { + p, err := plugin.Open("unnamed1.so") + if err != nil { + log.Fatalf(`plugin.Open("unnamed1.so"): %v`, err) + } + fn, err := p.Lookup("FuncInt") + if err != nil { + log.Fatalf(`unnamed1.so: Lookup("FuncInt") failed: %v`, err) + } + if got, want := fn.(func() int)(), 1; got != want { + log.Fatalf("unnamed1.so: FuncInt()=%d, want %d", got, want) + } + + p, err = plugin.Open("unnamed2.so") + if err != nil { + log.Fatalf(`plugin.Open("unnamed2.so"): %v`, err) + } + fn, err = p.Lookup("FuncInt") + if err != nil { + log.Fatalf(`unnamed2.so: Lookup("FuncInt") failed: %v`, err) + } + if got, want := fn.(func() int)(), 2; got != want { + log.Fatalf("unnamed2.so: FuncInt()=%d, want %d", got, want) + } +} + +func main() { + if got, want := common.X, 3*5; got != want { + log.Fatalf("before plugin load common.X=%d, want %d", got, want) + } + + p, err := plugin.Open("plugin1.so") + if err != nil { + log.Fatalf("plugin.Open failed: %v", err) + } + + const wantX = 3 * 5 * 7 + if got := common.X; got != wantX { + log.Fatalf("after plugin load common.X=%d, want %d", got, wantX) + } + + seven, err := p.Lookup("Seven") + if err != nil { + log.Fatalf(`Lookup("Seven") failed: %v`, err) + } + if got, want := *seven.(*int), 7; got != want { + log.Fatalf("plugin1.Seven=%d, want %d", got, want) + } + + readFunc, err := p.Lookup("ReadCommonX") + if err != nil { + log.Fatalf(`plugin1.Lookup("ReadCommonX") failed: %v`, err) + } + if got := readFunc.(func() int)(); got != wantX { + log.Fatalf("plugin1.ReadCommonX()=%d, want %d", got, wantX) + } + + // sub/plugin1.so is a different plugin with the same name as + // the already loaded plugin. It also depends on common. Test + // that we can load the different plugin, it is actually + // different, and that it sees the same common package. + subpPath, err := filepath.Abs("sub/plugin1.so") + if err != nil { + log.Fatalf("filepath.Abs(%q) failed: %v", subpPath, err) + } + subp, err := plugin.Open(subpPath) + if err != nil { + log.Fatalf("plugin.Open(%q) failed: %v", subpPath, err) + } + + funcVar, err := subp.Lookup("FuncVar") + if err != nil { + log.Fatalf(`sub/plugin1.Lookup("FuncVar") failed: %v`, err) + } + called := false + *funcVar.(*func()) = func() { + called = true + } + + readFunc, err = subp.Lookup("ReadCommonX") + if err != nil { + log.Fatalf(`sub/plugin1.Lookup("ReadCommonX") failed: %v`, err) + } + if got := readFunc.(func() int)(); got != wantX { + log.Fatalf("sub/plugin1.ReadCommonX()=%d, want %d", got, wantX) + } + if !called { + log.Fatal("calling ReadCommonX did not call FuncVar") + } + + subf, err := subp.Lookup("F") + if err != nil { + log.Fatalf(`sub/plugin1.Lookup("F") failed: %v`, err) + } + if gotf := subf.(func() int)(); gotf != 17 { + log.Fatalf(`sub/plugin1.F()=%d, want 17`, gotf) + } + f, err := p.Lookup("F") + if err != nil { + log.Fatalf(`plugin1.Lookup("F") failed: %v`, err) + } + if gotf := f.(func() int)(); gotf != 3 { + log.Fatalf(`plugin1.F()=%d, want 17`, gotf) + } + + p2, err := plugin.Open("plugin2.so") + if err != nil { + log.Fatalf("plugin.Open failed: %v", err) + } + // Check that plugin2's init function was called, and + // that it modifies the same global variable as the host. + if got, want := common.X, 2; got != want { + log.Fatalf("after loading plugin2, common.X=%d, want %d", got, want) + } + + _, err = plugin.Open("plugin2-dup.so") + if err == nil { + log.Fatal(`plugin.Open("plugin2-dup.so"): duplicate open should have failed`) + } + if s := err.Error(); !strings.Contains(s, "already loaded") { + log.Fatal(`plugin.Open("plugin2.so"): error does not mention "already loaded"`) + } + + _, err = plugin.Open("plugin-mismatch.so") + if err == nil { + log.Fatal(`plugin.Open("plugin-mismatch.so"): should have failed`) + } + if s := err.Error(); !strings.Contains(s, "different version") { + log.Fatalf(`plugin.Open("plugin-mismatch.so"): error does not mention "different version": %v`, s) + } + + _, err = plugin.Open("plugin2-dup.so") + if err == nil { + log.Fatal(`plugin.Open("plugin2-dup.so"): duplicate open after bad plugin should have failed`) + } + _, err = plugin.Open("plugin2.so") + if err != nil { + log.Fatalf(`plugin.Open("plugin2.so"): second open with same name failed: %v`, err) + } + + // Test that unexported types with the same names in + // different plugins do not interfere with each other. + // + // See Issue #21386. + UnexportedNameReuse, _ := p.Lookup("UnexportedNameReuse") + UnexportedNameReuse.(func())() + UnexportedNameReuse, _ = p2.Lookup("UnexportedNameReuse") + UnexportedNameReuse.(func())() + + testUnnamed() + + fmt.Println("PASS") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/iface/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/iface/main.go new file mode 100644 index 0000000000000000000000000000000000000000..c04f28880f5d427c40f618cbb54d91feb52da091 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/iface/main.go @@ -0,0 +1,47 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "log" + "plugin" + + "testplugin/iface_i" +) + +func main() { + a, err := plugin.Open("iface_a.so") + if err != nil { + log.Fatalf(`plugin.Open("iface_a.so"): %v`, err) + } + b, err := plugin.Open("iface_b.so") + if err != nil { + log.Fatalf(`plugin.Open("iface_b.so"): %v`, err) + } + + af, err := a.Lookup("F") + if err != nil { + log.Fatalf(`a.Lookup("F") failed: %v`, err) + } + bf, err := b.Lookup("F") + if err != nil { + log.Fatalf(`b.Lookup("F") failed: %v`, err) + } + if af.(func() interface{})() != bf.(func() interface{})() { + panic("empty interfaces not equal") + } + + ag, err := a.Lookup("G") + if err != nil { + log.Fatalf(`a.Lookup("G") failed: %v`, err) + } + bg, err := b.Lookup("G") + if err != nil { + log.Fatalf(`b.Lookup("G") failed: %v`, err) + } + if ag.(func() iface_i.I)() != bg.(func() iface_i.I)() { + panic("nonempty interfaces not equal") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/iface_a/a.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/iface_a/a.go new file mode 100644 index 0000000000000000000000000000000000000000..357f7e827ed0227feb2b4942faba9a91ce3dfd06 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/iface_a/a.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "testplugin/iface_i" + +//go:noinline +func F() interface{} { + return (*iface_i.T)(nil) +} + +//go:noinline +func G() iface_i.I { + return (*iface_i.T)(nil) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/iface_b/b.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/iface_b/b.go new file mode 100644 index 0000000000000000000000000000000000000000..357f7e827ed0227feb2b4942faba9a91ce3dfd06 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/iface_b/b.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "testplugin/iface_i" + +//go:noinline +func F() interface{} { + return (*iface_i.T)(nil) +} + +//go:noinline +func G() iface_i.I { + return (*iface_i.T)(nil) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/iface_i/i.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/iface_i/i.go new file mode 100644 index 0000000000000000000000000000000000000000..31c80387c7e56c752e422fb73e81592d1a34c87b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/iface_i/i.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package iface_i + +type I interface { + M() +} + +type T struct { +} + +func (t *T) M() { +} + +// *T implements I diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue18584/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue18584/main.go new file mode 100644 index 0000000000000000000000000000000000000000..c280fd4620371432a356e9004a49f79a1dc5d9c6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue18584/main.go @@ -0,0 +1,23 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "plugin" + +func main() { + p, err := plugin.Open("plugin.so") + if err != nil { + panic(err) + } + + sym, err := p.Lookup("G") + if err != nil { + panic(err) + } + g := sym.(func() bool) + if !g() { + panic("expected types to match, Issue #18584") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue18584/plugin.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue18584/plugin.go new file mode 100644 index 0000000000000000000000000000000000000000..be0868d375298b97787ec86082bf0a05add06de4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue18584/plugin.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "reflect" + +type C struct { +} + +func F(c *C) *C { + return nil +} + +func G() bool { + var c *C + return reflect.TypeOf(F).Out(0) == reflect.TypeOf(c) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue18676/dynamodbstreamsevt/definition.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue18676/dynamodbstreamsevt/definition.go new file mode 100644 index 0000000000000000000000000000000000000000..70fd054d089ee2cde6447298741454f25c3a0b37 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue18676/dynamodbstreamsevt/definition.go @@ -0,0 +1,13 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dynamodbstreamsevt + +import "encoding/json" + +var foo json.RawMessage + +type Event struct{} + +func (e *Event) Dummy() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue18676/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue18676/main.go new file mode 100644 index 0000000000000000000000000000000000000000..471f3d969ce84e22142c2e6433b3b56ba8493fc4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue18676/main.go @@ -0,0 +1,32 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The bug happened like this: +// 1. The main binary adds an itab for *json.UnsupportedValueError / error +// (concrete type / interface type). This itab goes in hash bucket 0x111. +// 2. The plugin adds that same itab again. That makes a cycle in the itab +// chain rooted at hash bucket 0x111. +// 3. The main binary then asks for the itab for *dynamodbstreamsevt.Event / +// json.Unmarshaler. This itab happens to also live in bucket 0x111. +// The lookup code goes into an infinite loop searching for this itab. +// +// The code is carefully crafted so that the two itabs are both from the +// same bucket, and so that the second itab doesn't exist in +// the itab hashmap yet (so the entire linked list must be searched). +package main + +import ( + "encoding/json" + "plugin" + "testplugin/issue18676/dynamodbstreamsevt" +) + +func main() { + plugin.Open("plugin.so") + + var x interface{} = (*dynamodbstreamsevt.Event)(nil) + if _, ok := x.(json.Unmarshaler); !ok { + println("something") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue18676/plugin.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue18676/plugin.go new file mode 100644 index 0000000000000000000000000000000000000000..e7fc74f7774220d9d0c06c2f1985851c42b928d8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue18676/plugin.go @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "C" + +import "testplugin/issue18676/dynamodbstreamsevt" + +func F(evt *dynamodbstreamsevt.Event) {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue19418/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue19418/main.go new file mode 100644 index 0000000000000000000000000000000000000000..2ec9f9aaaa2f1556296606ffc143287dd68494e3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue19418/main.go @@ -0,0 +1,29 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "os" + "plugin" +) + +func main() { + p, err := plugin.Open("plugin.so") + if err != nil { + panic(err) + } + + val, err := p.Lookup("Val") + if err != nil { + panic(err) + } + got := *val.(*string) + const want = "linkstr" + if got != want { + fmt.Fprintf(os.Stderr, "issue19418 value is %q, want %q\n", got, want) + os.Exit(2) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue19418/plugin.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue19418/plugin.go new file mode 100644 index 0000000000000000000000000000000000000000..fe93b161431faeed62d876c236e95a8964760f19 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue19418/plugin.go @@ -0,0 +1,7 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +var Val = "val-unset" diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue19529/plugin.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue19529/plugin.go new file mode 100644 index 0000000000000000000000000000000000000000..ad2df6cc7c7e74f388da964af36c50321471e91c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue19529/plugin.go @@ -0,0 +1,15 @@ +package main + +import ( + "reflect" +) + +type Foo struct { + Bar string `json:"Bar@baz,omitempty"` +} + +func F() { + println(reflect.TypeOf(Foo{}).Field(0).Tag) +} + +func main() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue19534/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue19534/main.go new file mode 100644 index 0000000000000000000000000000000000000000..de263b6f0f2b159dc52ef4401dea2c0c863aa2b2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue19534/main.go @@ -0,0 +1,23 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "plugin" + +func main() { + p, err := plugin.Open("plugin.so") + if err != nil { + panic(err) + } + + sym, err := p.Lookup("Foo") + if err != nil { + panic(err) + } + f := sym.(func() int) + if f() != 42 { + panic("expected f() == 42") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue19534/plugin.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue19534/plugin.go new file mode 100644 index 0000000000000000000000000000000000000000..582d33305c94932ac155fe87c92a68208541afe5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue19534/plugin.go @@ -0,0 +1,9 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func Foo() int { + return 42 +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue22175/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue22175/main.go new file mode 100644 index 0000000000000000000000000000000000000000..9be9bab9dc35cfb2f9a8e56dc25ebb25096b5fd4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue22175/main.go @@ -0,0 +1,28 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "os" + "plugin" +) + +func main() { + p2, err := plugin.Open("issue22175_plugin1.so") + if err != nil { + panic(err) + } + f, err := p2.Lookup("F") + if err != nil { + panic(err) + } + got := f.(func() int)() + const want = 971 + if got != want { + fmt.Fprintf(os.Stderr, "issue22175: F()=%d, want %d", got, want) + os.Exit(1) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue22175/plugin1.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue22175/plugin1.go new file mode 100644 index 0000000000000000000000000000000000000000..5ae6cb631e786e7a7fcfa4f31344dfcf3aa94268 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue22175/plugin1.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "plugin" + +func F() int { + p2, err := plugin.Open("issue22175_plugin2.so") + if err != nil { + panic(err) + } + g, err := p2.Lookup("G") + if err != nil { + panic(err) + } + return g.(func() int)() +} + +func main() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue22175/plugin2.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue22175/plugin2.go new file mode 100644 index 0000000000000000000000000000000000000000..f387a192e678b0a37ce630cca19081b7e82b5075 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue22175/plugin2.go @@ -0,0 +1,9 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func G() int { return 971 } + +func main() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue22295.pkg/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue22295.pkg/main.go new file mode 100644 index 0000000000000000000000000000000000000000..44b2a2140ea145fe05f3af6fb3f62db51befb70b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue22295.pkg/main.go @@ -0,0 +1,28 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +package main + +import ( + "log" + "plugin" +) + +func main() { + p, err := plugin.Open("issue.22295.so") + if err != nil { + log.Fatal(err) + } + f, err := p.Lookup("F") + if err != nil { + log.Fatal(err) + } + const want = 2503 + got := f.(func() int)() + if got != want { + log.Fatalf("got %d, want %d", got, want) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue22295.pkg/plugin.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue22295.pkg/plugin.go new file mode 100644 index 0000000000000000000000000000000000000000..46b08a405bce9ce98c54f1920c731aa11c90822f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue22295.pkg/plugin.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +var f *int + +func init() { + f = new(int) + *f = 2503 +} + +func F() int { return *f } + +func main() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue24351/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue24351/main.go new file mode 100644 index 0000000000000000000000000000000000000000..4107adff7b476cb69db6e58ef00c774bb91fe576 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue24351/main.go @@ -0,0 +1,21 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "plugin" + +func main() { + p, err := plugin.Open("issue24351.so") + if err != nil { + panic(err) + } + f, err := p.Lookup("B") + if err != nil { + panic(err) + } + c := make(chan bool) + f.(func(chan bool))(c) + <-c +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue24351/plugin.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue24351/plugin.go new file mode 100644 index 0000000000000000000000000000000000000000..db17e0a6097c29686d7f782054f3ad14e7fc10d4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue24351/plugin.go @@ -0,0 +1,14 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "fmt" + +func B(c chan bool) { + go func() { + fmt.Println(1.5) + c <- true + }() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue25756/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue25756/main.go new file mode 100644 index 0000000000000000000000000000000000000000..817daf42f68270ac0a13006add9bb22696f19c24 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue25756/main.go @@ -0,0 +1,52 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Run the game of life in C using Go for parallelization. + +package main + +import ( + "flag" + "fmt" + "plugin" +) + +const MAXDIM = 100 + +var dim = flag.Int("dim", 16, "board dimensions") +var gen = flag.Int("gen", 10, "generations") + +func main() { + flag.Parse() + + var a [MAXDIM * MAXDIM]int32 + for i := 2; i < *dim; i += 8 { + for j := 2; j < *dim-3; j += 8 { + for y := 0; y < 3; y++ { + a[i**dim+j+y] = 1 + } + } + } + + p, err := plugin.Open("life.so") + if err != nil { + panic(err) + } + f, err := p.Lookup("Run") + if err != nil { + panic(err) + } + f.(func(int, int, int, []int32))(*gen, *dim, *dim, a[:]) + + for i := 0; i < *dim; i++ { + for j := 0; j < *dim; j++ { + if a[i**dim+j] == 0 { + fmt.Print(" ") + } else { + fmt.Print("X") + } + } + fmt.Print("\n") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue25756/plugin/c-life.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue25756/plugin/c-life.c new file mode 100644 index 0000000000000000000000000000000000000000..f853163e2f0c1ebdc067e166fc2ee2f420149a4b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue25756/plugin/c-life.c @@ -0,0 +1,56 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include +#include "life.h" +#include "_cgo_export.h" + +const int MYCONST = 0; + +// Do the actual manipulation of the life board in C. This could be +// done easily in Go, we are just using C for demonstration +// purposes. +void +Step(int x, int y, int *a, int *n) +{ + struct GoStart_return r; + + // Use Go to start 4 goroutines each of which handles 1/4 of the + // board. + r = GoStart(0, x, y, 0, x / 2, 0, y / 2, a, n); + assert(r.r0 == 0 && r.r1 == 100); // test multiple returns + r = GoStart(1, x, y, x / 2, x, 0, y / 2, a, n); + assert(r.r0 == 1 && r.r1 == 101); // test multiple returns + GoStart(2, x, y, 0, x / 2, y / 2, y, a, n); + GoStart(3, x, y, x / 2, x, y / 2, y, a, n); + GoWait(0); + GoWait(1); + GoWait(2); + GoWait(3); +} + +// The actual computation. This is called in parallel. +void +DoStep(int xdim, int ydim, int xstart, int xend, int ystart, int yend, int *a, int *n) +{ + int x, y, c, i, j; + + for(x = xstart; x < xend; x++) { + for(y = ystart; y < yend; y++) { + c = 0; + for(i = -1; i <= 1; i++) { + for(j = -1; j <= 1; j++) { + if(x+i >= 0 && x+i < xdim && + y+j >= 0 && y+j < ydim && + (i != 0 || j != 0)) + c += a[(x+i)*xdim + (y+j)] != 0; + } + } + if(c == 3 || (c == 2 && a[x*xdim + y] != 0)) + n[x*xdim + y] = 1; + else + n[x*xdim + y] = 0; + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue25756/plugin/life.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue25756/plugin/life.go new file mode 100644 index 0000000000000000000000000000000000000000..468bc6fab6d463989b63ee119f4e2c5b1dc91098 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue25756/plugin/life.go @@ -0,0 +1,40 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// #include "life.h" +import "C" + +import "unsafe" + +func Run(gen, x, y int, a []int32) { + n := make([]int32, x*y) + for i := 0; i < gen; i++ { + C.Step(C.int(x), C.int(y), (*C.int)(unsafe.Pointer(&a[0])), (*C.int)(unsafe.Pointer(&n[0]))) + copy(a, n) + } +} + +// Keep the channels visible from Go. +var chans [4]chan bool + +// Double return value is just for testing. +// +//export GoStart +func GoStart(i, xdim, ydim, xstart, xend, ystart, yend C.int, a *C.int, n *C.int) (int, int) { + c := make(chan bool, int(C.MYCONST)) + go func() { + C.DoStep(xdim, ydim, xstart, xend, ystart, yend, a, n) + c <- true + }() + chans[i] = c + return int(i), int(i + 100) +} + +//export GoWait +func GoWait(i C.int) { + <-chans[i] + chans[i] = nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue25756/plugin/life.h b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue25756/plugin/life.h new file mode 100644 index 0000000000000000000000000000000000000000..11d2b97226767bb0d4c86ad0dc7a8f991389e54a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue25756/plugin/life.h @@ -0,0 +1,7 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +extern void Step(int, int, int *, int *); +extern void DoStep(int, int, int, int, int, int, int *, int *); +extern const int MYCONST; diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue44956/base/base.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue44956/base/base.go new file mode 100644 index 0000000000000000000000000000000000000000..609aa0dff4e812a92d227076fa97f528461bda2c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue44956/base/base.go @@ -0,0 +1,7 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package base + +var X = &map[int]int{123: 456} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue44956/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue44956/main.go new file mode 100644 index 0000000000000000000000000000000000000000..287a60585e0b03c2e3abad65547728f793d30fa0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue44956/main.go @@ -0,0 +1,47 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue 44956: writable static temp is not exported correctly. +// In the test below, package base is +// +// X = &map{...} +// +// which compiles to +// +// X = &stmp // static +// stmp = makemap(...) // in init function +// +// plugin1 and plugin2 both import base. plugin1 doesn't use +// base.X, so that symbol is deadcoded in plugin1. +// +// plugin1 is loaded first. base.init runs at that point, which +// initialize base.stmp. +// +// plugin2 is then loaded. base.init already ran, so it doesn't run +// again. When base.stmp is not exported, plugin2's base.X points to +// its own private base.stmp, which is not initialized, fail. + +package main + +import "plugin" + +func main() { + _, err := plugin.Open("issue44956p1.so") + if err != nil { + panic("FAIL") + } + + p2, err := plugin.Open("issue44956p2.so") + if err != nil { + panic("FAIL") + } + f, err := p2.Lookup("F") + if err != nil { + panic("FAIL") + } + x := f.(func() *map[int]int)() + if x == nil || (*x)[123] != 456 { + panic("FAIL") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue44956/plugin1.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue44956/plugin1.go new file mode 100644 index 0000000000000000000000000000000000000000..499fa31abf8da03021063ee4cf57361954696741 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue44956/plugin1.go @@ -0,0 +1,9 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import _ "testplugin/issue44956/base" + +func main() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue44956/plugin2.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue44956/plugin2.go new file mode 100644 index 0000000000000000000000000000000000000000..a73542ca716a61bd4f05630c086ca329e1d33730 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue44956/plugin2.go @@ -0,0 +1,11 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "testplugin/issue44956/base" + +func F() *map[int]int { return base.X } + +func main() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue52937/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue52937/main.go new file mode 100644 index 0000000000000000000000000000000000000000..66f09effea28c3bea14063532a07542257cc9ddd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue52937/main.go @@ -0,0 +1,9 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func main() {} +func F[T any]() {} +func G[T any](T) {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue53989/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue53989/main.go new file mode 100644 index 0000000000000000000000000000000000000000..6907dfd858096ced64e6be526ab5d5681a75d0eb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue53989/main.go @@ -0,0 +1,32 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue 53989: the use of jump table caused a function +// from the plugin jumps in the middle of the function +// to the function with the same name in the main +// executable. As these two functions may be compiled +// differently as plugin needs to be PIC, this causes +// crash. + +package main + +import ( + "plugin" + + "testplugin/issue53989/p" +) + +func main() { + p.Square(7) // call the function in main executable + + p, err := plugin.Open("issue53989.so") + if err != nil { + panic(err) + } + f, err := p.Lookup("Square") + if err != nil { + panic(err) + } + f.(func(int))(7) // call the plugin one +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue53989/p/p.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue53989/p/p.go new file mode 100644 index 0000000000000000000000000000000000000000..02567c1cee07bb7ef57e4630c39f7af399f1349a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue53989/p/p.go @@ -0,0 +1,52 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +import ( + "fmt" + "runtime" +) + +var y int + +//go:noinline +func Square(x int) { + var pc0, pc1 [1]uintptr + runtime.Callers(1, pc0[:]) // get PC at entry + + // a switch using jump table + switch x { + case 1: + y = 1 + case 2: + y = 4 + case 3: + y = 9 + case 4: + y = 16 + case 5: + y = 25 + case 6: + y = 36 + case 7: + y = 49 + case 8: + y = 64 + default: + panic("too large") + } + + // check PC is in the same function + runtime.Callers(1, pc1[:]) + if pc1[0] < pc0[0] || pc1[0] > pc0[0]+1000000 { + fmt.Printf("jump across DSO boundary. pc0=%x, pc1=%x\n", pc0[0], pc1[0]) + panic("FAIL") + } + + if y != x*x { + fmt.Printf("x=%d y=%d!=%d\n", x, y, x*x) + panic("FAIL") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue53989/plugin.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue53989/plugin.go new file mode 100644 index 0000000000000000000000000000000000000000..a753ee4419d735d9267e0ce3f37b9679e4172562 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue53989/plugin.go @@ -0,0 +1,13 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "testplugin/issue53989/p" + +func Square(x int) { // export Square for plugin + p.Square(x) +} + +func main() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue62430/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue62430/main.go new file mode 100644 index 0000000000000000000000000000000000000000..80108407c27d155be0bda1f9d99e8d794c6cc035 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue62430/main.go @@ -0,0 +1,35 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue 62430: a program that uses plugins may appear +// to have no references to an initialized global map variable defined +// in some stdlib package (ex: unicode), however there +// may be references to that map var from a plugin that +// gets loaded. + +package main + +import ( + "fmt" + "plugin" + "unicode" +) + +func main() { + p, err := plugin.Open("issue62430.so") + if err != nil { + panic(err) + } + s, err := p.Lookup("F") + if err != nil { + panic(err) + } + + f := s.(func(string) *unicode.RangeTable) + if f("C") == nil { + panic("unicode.Categories not properly initialized") + } else { + fmt.Println("unicode.Categories properly initialized") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue62430/plugin.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue62430/plugin.go new file mode 100644 index 0000000000000000000000000000000000000000..e42cd8bb77c24ccd56a8c765d631d907d755d995 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue62430/plugin.go @@ -0,0 +1,11 @@ +package main + +import ( + "unicode" +) + +func F(s string) *unicode.RangeTable { + return unicode.Categories[s] +} + +func main() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue67976/plugin.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue67976/plugin.go new file mode 100644 index 0000000000000000000000000000000000000000..502ecc5c4750f4920759768a408eb197f3afc022 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/issue67976/plugin.go @@ -0,0 +1,16 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "io" + "runtime/pprof" +) + +func main() {} + +func Start() { + pprof.StartCPUProfile(io.Discard) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/mangle/plugin.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/mangle/plugin.go new file mode 100644 index 0000000000000000000000000000000000000000..e1ccb70672cd1506d1be6505ac8aacde73e27921 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/mangle/plugin.go @@ -0,0 +1,38 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test cases for symbol name mangling. + +package main + +import ( + "fmt" + "strings" +) + +// Issue 58800: +// Instantiated function name may contain weird characters +// that confuse the external linker, so it needs to be +// mangled. +type S struct { + X int `parser:"|@@)"` +} + +//go:noinline +func F[T any]() {} + +func P() { + F[S]() +} + +// Issue 62098: the name mangling code doesn't handle some string +// symbols correctly. +func G(id string) error { + if strings.ContainsAny(id, "&$@;/:+,?\\{^}%`]\">[~<#|") { + return fmt.Errorf("invalid") + } + return nil +} + +func main() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/method/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/method/main.go new file mode 100644 index 0000000000000000000000000000000000000000..5e9189b450077f952fb95cd97f121bb0982e8f4f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/method/main.go @@ -0,0 +1,26 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue 42579: methods of symbols exported from plugin must be live. + +package main + +import ( + "plugin" + "reflect" +) + +func main() { + p, err := plugin.Open("plugin.so") + if err != nil { + panic(err) + } + + x, err := p.Lookup("X") + if err != nil { + panic(err) + } + + reflect.ValueOf(x).Elem().MethodByName("M").Call(nil) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/method/plugin.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/method/plugin.go new file mode 100644 index 0000000000000000000000000000000000000000..240edd3bc4500c95dc51f21702506a2cab530f3c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/method/plugin.go @@ -0,0 +1,13 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func main() {} + +type T int + +func (T) M() { println("M") } + +var X T diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/method2/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/method2/main.go new file mode 100644 index 0000000000000000000000000000000000000000..89afbda3d479af1e0c3f520bbd000c973d9af2dd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/method2/main.go @@ -0,0 +1,32 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// A type can be passed to a plugin and converted to interface +// there. So its methods need to be live. + +package main + +import ( + "plugin" + + "testplugin/method2/p" +) + +var t p.T + +type I interface{ M() } + +func main() { + pl, err := plugin.Open("method2.so") + if err != nil { + panic(err) + } + + f, err := pl.Lookup("F") + if err != nil { + panic(err) + } + + f.(func(p.T) interface{})(t).(I).M() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/method2/p/p.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/method2/p/p.go new file mode 100644 index 0000000000000000000000000000000000000000..acb526acec9a0f7b677f52ff1e024f0b3b6523a9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/method2/p/p.go @@ -0,0 +1,9 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +type T int + +func (T) M() { println("M") } diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/method2/plugin.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/method2/plugin.go new file mode 100644 index 0000000000000000000000000000000000000000..6198e7648ee204227c6e05d57c70d4b2fc39f5d0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/method2/plugin.go @@ -0,0 +1,11 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "testplugin/method2/p" + +func main() {} + +func F(t p.T) interface{} { return t } diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/method3/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/method3/main.go new file mode 100644 index 0000000000000000000000000000000000000000..a3a51711cda5ae2d35bf3d1c08a1dab1105e88ca --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/method3/main.go @@ -0,0 +1,32 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// An unexported method can be reachable from the plugin via interface +// when a package is shared. So it need to be live. + +package main + +import ( + "plugin" + + "testplugin/method3/p" +) + +var i p.I + +func main() { + pl, err := plugin.Open("method3.so") + if err != nil { + panic(err) + } + + f, err := pl.Lookup("F") + if err != nil { + panic(err) + } + + f.(func())() + + i = p.T(123) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/method3/p/p.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/method3/p/p.go new file mode 100644 index 0000000000000000000000000000000000000000..f72f7c715cfab25312b5b097ff0176d78ba7cbd5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/method3/p/p.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +type T int + +func (T) m() { println("m") } + +type I interface{ m() } + +func F() { + i.m() +} + +var i I = T(123) diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/method3/plugin.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/method3/plugin.go new file mode 100644 index 0000000000000000000000000000000000000000..bd25b31857e92d6e4524ac65915058600920bc00 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/method3/plugin.go @@ -0,0 +1,11 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "testplugin/method3/p" + +func main() {} + +func F() { p.F() } diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/plugin1/plugin1.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/plugin1/plugin1.go new file mode 100644 index 0000000000000000000000000000000000000000..d29d674ade072885c7ca314f0a29c693fcd5d1ff --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/plugin1/plugin1.go @@ -0,0 +1,57 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// // No C code required. +import "C" + +import ( + "reflect" + + "testplugin/common" +) + +func F() int { + _ = make([]byte, 1<<21) // trigger stack unwind, Issue #18190. + return 3 +} + +func ReadCommonX() int { + return common.X +} + +var Seven int + +func call(fn func()) { + fn() +} + +func g() { + common.X *= Seven +} + +func init() { + Seven = 7 + call(g) +} + +type sameNameReusedInPlugins struct { + X string +} + +type sameNameHolder struct { + F *sameNameReusedInPlugins +} + +func UnexportedNameReuse() { + h := sameNameHolder{} + v := reflect.ValueOf(&h).Elem().Field(0) + newval := reflect.New(v.Type().Elem()) + v.Set(newval) +} + +func main() { + panic("plugin1.main called") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/plugin2/plugin2.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/plugin2/plugin2.go new file mode 100644 index 0000000000000000000000000000000000000000..31ed642ca5bb5fbe6f251be440f70308d131a5dd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/plugin2/plugin2.go @@ -0,0 +1,44 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +//#include +//#include +import "C" + +// #include +// void cfunc() {} // uses cgo_topofstack + +import ( + "reflect" + "strings" + + "testplugin/common" +) + +func init() { + _ = strings.NewReplacer() // trigger stack unwind, Issue #18190. + C.strerror(C.EIO) // uses cgo_topofstack + common.X = 2 +} + +type sameNameReusedInPlugins struct { + X string +} + +type sameNameHolder struct { + F *sameNameReusedInPlugins +} + +func UnexportedNameReuse() { + h := sameNameHolder{} + v := reflect.ValueOf(&h).Elem().Field(0) + newval := reflect.New(v.Type().Elem()) + v.Set(newval) +} + +func main() { + panic("plugin1.main called") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/sub/plugin1/plugin1.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/sub/plugin1/plugin1.go new file mode 100644 index 0000000000000000000000000000000000000000..5f891b09a334bab8331c1363d22d91aaf1641486 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/sub/plugin1/plugin1.go @@ -0,0 +1,23 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// // No C code required. +import "C" + +import "testplugin/common" + +func F() int { return 17 } + +var FuncVar = func() {} + +func ReadCommonX() int { + FuncVar() + return common.X +} + +func main() { + panic("plugin1.main called") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/unnamed1/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/unnamed1/main.go new file mode 100644 index 0000000000000000000000000000000000000000..1620dc48ce06cf2aa33cf3f94d28c7b83513078d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/unnamed1/main.go @@ -0,0 +1,25 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +package main + +// // No C code required. +import "C" + +func FuncInt() int { return 1 } + +// Add a recursive type to check that type equality across plugins doesn't +// crash. See https://golang.org/issues/19258 +func FuncRecursive() X { return X{} } + +type Y struct { + X *X +} +type X struct { + Y Y +} + +func main() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/unnamed2/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/unnamed2/main.go new file mode 100644 index 0000000000000000000000000000000000000000..027ef6475ce94c20c7f660567cd574cbfa0ad66e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testplugin/testdata/unnamed2/main.go @@ -0,0 +1,23 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +package main + +// // No C code required. +import "C" + +func FuncInt() int { return 2 } + +func FuncRecursive() X { return X{} } + +type Y struct { + X *X +} +type X struct { + Y Y +} + +func main() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/asan_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/asan_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7db356244a85a8bd164468acca74dcd9f736e0ec --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/asan_test.go @@ -0,0 +1,149 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux || (freebsd && amd64) + +package sanitizers_test + +import ( + "fmt" + "internal/platform" + "internal/testenv" + "strings" + "testing" +) + +func TestASAN(t *testing.T) { + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + goos, err := goEnv("GOOS") + if err != nil { + t.Fatal(err) + } + goarch, err := goEnv("GOARCH") + if err != nil { + t.Fatal(err) + } + // The asan tests require support for the -asan option. + if !platform.ASanSupported(goos, goarch) { + t.Skipf("skipping on %s/%s; -asan option is not supported.", goos, goarch) + } + // The current implementation is only compatible with the ASan library from version + // v7 to v9 (See the description in src/runtime/asan/asan.go). Therefore, using the + // -asan option must use a compatible version of ASan library, which requires that + // the gcc version is not less than 7 and the clang version is not less than 9, + // otherwise a segmentation fault will occur. + if !compilerRequiredAsanVersion(goos, goarch) { + t.Skipf("skipping on %s/%s: too old version of compiler", goos, goarch) + } + + t.Parallel() + requireOvercommit(t) + config := configure("address") + config.skipIfCSanitizerBroken(t) + + mustRun(t, config.goCmd("build", "std")) + + cases := []struct { + src string + memoryAccessError string + errorLocation string + experiments []string + }{ + {src: "asan1_fail.go", memoryAccessError: "heap-use-after-free", errorLocation: "asan1_fail.go:25"}, + {src: "asan2_fail.go", memoryAccessError: "heap-buffer-overflow", errorLocation: "asan2_fail.go:31"}, + {src: "asan3_fail.go", memoryAccessError: "use-after-poison", errorLocation: "asan3_fail.go:13"}, + {src: "asan4_fail.go", memoryAccessError: "use-after-poison", errorLocation: "asan4_fail.go:13"}, + {src: "asan5_fail.go", memoryAccessError: "use-after-poison", errorLocation: "asan5_fail.go:18"}, + {src: "asan_useAfterReturn.go"}, + {src: "asan_unsafe_fail1.go", memoryAccessError: "use-after-poison", errorLocation: "asan_unsafe_fail1.go:25"}, + {src: "asan_unsafe_fail2.go", memoryAccessError: "use-after-poison", errorLocation: "asan_unsafe_fail2.go:25"}, + {src: "asan_unsafe_fail3.go", memoryAccessError: "use-after-poison", errorLocation: "asan_unsafe_fail3.go:18"}, + {src: "asan_global1_fail.go", memoryAccessError: "global-buffer-overflow", errorLocation: "asan_global1_fail.go:12"}, + {src: "asan_global2_fail.go", memoryAccessError: "global-buffer-overflow", errorLocation: "asan_global2_fail.go:19"}, + {src: "asan_global3_fail.go", memoryAccessError: "global-buffer-overflow", errorLocation: "asan_global3_fail.go:13"}, + {src: "asan_global4_fail.go", memoryAccessError: "global-buffer-overflow", errorLocation: "asan_global4_fail.go:21"}, + {src: "asan_global5.go"}, + {src: "arena_fail.go", memoryAccessError: "use-after-poison", errorLocation: "arena_fail.go:26", experiments: []string{"arenas"}}, + } + for _, tc := range cases { + tc := tc + name := strings.TrimSuffix(tc.src, ".go") + t.Run(name, func(t *testing.T) { + t.Parallel() + + dir := newTempDir(t) + defer dir.RemoveAll(t) + + outPath := dir.Join(name) + mustRun(t, config.goCmdWithExperiments("build", []string{"-o", outPath, srcPath(tc.src)}, tc.experiments)) + + cmd := hangProneCmd(outPath) + if tc.memoryAccessError != "" { + outb, err := cmd.CombinedOutput() + out := string(outb) + if err != nil && strings.Contains(out, tc.memoryAccessError) { + // This string is output if the + // sanitizer library needs a + // symbolizer program and can't find it. + const noSymbolizer = "external symbolizer" + // Check if -asan option can correctly print where the error occurred. + if tc.errorLocation != "" && + !strings.Contains(out, tc.errorLocation) && + !strings.Contains(out, noSymbolizer) && + compilerSupportsLocation() { + + t.Errorf("%#q exited without expected location of the error\n%s; got failure\n%s", strings.Join(cmd.Args, " "), tc.errorLocation, out) + } + return + } + t.Fatalf("%#q exited without expected memory access error\n%s; got failure\n%s", strings.Join(cmd.Args, " "), tc.memoryAccessError, out) + } + mustRun(t, cmd) + }) + } +} + +func TestASANLinkerX(t *testing.T) { + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + // Test ASAN with linker's -X flag (see issue 56175). + goos, err := goEnv("GOOS") + if err != nil { + t.Fatal(err) + } + goarch, err := goEnv("GOARCH") + if err != nil { + t.Fatal(err) + } + // The asan tests require support for the -asan option. + if !platform.ASanSupported(goos, goarch) { + t.Skipf("skipping on %s/%s; -asan option is not supported.", goos, goarch) + } + if !compilerRequiredAsanVersion(goos, goarch) { + t.Skipf("skipping on %s/%s: too old version of compiler", goos, goarch) + } + + t.Parallel() + requireOvercommit(t) + config := configure("address") + config.skipIfCSanitizerBroken(t) + + dir := newTempDir(t) + defer dir.RemoveAll(t) + + var ldflags string + for i := 1; i <= 10; i++ { + ldflags += fmt.Sprintf("-X=main.S%d=%d -X=cmd/cgo/internal/testsanitizers/testdata/asan_linkerx/p.S%d=%d ", i, i, i, i) + } + + // build the binary + outPath := dir.Join("main.exe") + cmd := config.goCmd("build", "-ldflags="+ldflags, "-o", outPath) + cmd.Dir = srcPath("asan_linkerx") + mustRun(t, cmd) + + // run the binary + mustRun(t, hangProneCmd(outPath)) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/cc_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/cc_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e650de835ab42ecfd348820b1c094dad885a7642 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/cc_test.go @@ -0,0 +1,588 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This test uses the Pdeathsig field of syscall.SysProcAttr, so it only works +// on platforms that support that. + +//go:build linux || (freebsd && amd64) + +// sanitizers_test checks the use of Go with sanitizers like msan, asan, etc. +// See https://github.com/google/sanitizers. +package sanitizers_test + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "internal/testenv" + "os" + "os/exec" + "os/user" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "syscall" + "testing" + "time" + "unicode" +) + +var overcommit struct { + sync.Once + value int + err error +} + +// requireOvercommit skips t if the kernel does not allow overcommit. +func requireOvercommit(t *testing.T) { + t.Helper() + + overcommit.Once.Do(func() { + var out []byte + out, overcommit.err = os.ReadFile("/proc/sys/vm/overcommit_memory") + if overcommit.err != nil { + return + } + overcommit.value, overcommit.err = strconv.Atoi(string(bytes.TrimSpace(out))) + }) + + if overcommit.err != nil { + t.Skipf("couldn't determine vm.overcommit_memory (%v); assuming no overcommit", overcommit.err) + } + if overcommit.value == 2 { + t.Skip("vm.overcommit_memory=2") + } +} + +var env struct { + sync.Once + m map[string]string + err error +} + +// goEnv returns the output of $(go env) as a map. +func goEnv(key string) (string, error) { + env.Once.Do(func() { + var out []byte + out, env.err = exec.Command("go", "env", "-json").Output() + if env.err != nil { + return + } + + env.m = make(map[string]string) + env.err = json.Unmarshal(out, &env.m) + }) + if env.err != nil { + return "", env.err + } + + v, ok := env.m[key] + if !ok { + return "", fmt.Errorf("`go env`: no entry for %v", key) + } + return v, nil +} + +// replaceEnv sets the key environment variable to value in cmd. +func replaceEnv(cmd *exec.Cmd, key, value string) { + if cmd.Env == nil { + cmd.Env = cmd.Environ() + } + cmd.Env = append(cmd.Env, key+"="+value) +} + +// appendExperimentEnv appends comma-separated experiments to GOEXPERIMENT. +func appendExperimentEnv(cmd *exec.Cmd, experiments []string) { + if cmd.Env == nil { + cmd.Env = cmd.Environ() + } + exps := strings.Join(experiments, ",") + for _, evar := range cmd.Env { + c := strings.SplitN(evar, "=", 2) + if c[0] == "GOEXPERIMENT" { + exps = c[1] + "," + exps + } + } + cmd.Env = append(cmd.Env, "GOEXPERIMENT="+exps) +} + +// mustRun executes t and fails cmd with a well-formatted message if it fails. +func mustRun(t *testing.T, cmd *exec.Cmd) { + t.Helper() + out := new(strings.Builder) + cmd.Stdout = out + cmd.Stderr = out + + err := cmd.Start() + if err != nil { + t.Fatalf("%v: %v", cmd, err) + } + + if deadline, ok := t.Deadline(); ok { + timeout := time.Until(deadline) + timeout -= timeout / 10 // Leave 10% headroom for logging and cleanup. + timer := time.AfterFunc(timeout, func() { + cmd.Process.Signal(syscall.SIGQUIT) + }) + defer timer.Stop() + } + + if err := cmd.Wait(); err != nil { + t.Fatalf("%v exited with %v\n%s", cmd, err, out) + } +} + +// cc returns a cmd that executes `$(go env CC) $(go env GOGCCFLAGS) $args`. +func cc(args ...string) (*exec.Cmd, error) { + CC, err := goEnv("CC") + if err != nil { + return nil, err + } + + GOGCCFLAGS, err := goEnv("GOGCCFLAGS") + if err != nil { + return nil, err + } + + // Split GOGCCFLAGS, respecting quoting. + // + // TODO(bcmills): This code also appears in + // cmd/cgo/internal/testcarchive/carchive_test.go, and perhaps ought to go in + // src/cmd/dist/test.go as well. Figure out where to put it so that it can be + // shared. + var flags []string + quote := '\000' + start := 0 + lastSpace := true + backslash := false + for i, c := range GOGCCFLAGS { + if quote == '\000' && unicode.IsSpace(c) { + if !lastSpace { + flags = append(flags, GOGCCFLAGS[start:i]) + lastSpace = true + } + } else { + if lastSpace { + start = i + lastSpace = false + } + if quote == '\000' && !backslash && (c == '"' || c == '\'') { + quote = c + backslash = false + } else if !backslash && quote == c { + quote = '\000' + } else if (quote == '\000' || quote == '"') && !backslash && c == '\\' { + backslash = true + } else { + backslash = false + } + } + } + if !lastSpace { + flags = append(flags, GOGCCFLAGS[start:]) + } + + cmd := exec.Command(CC, flags...) + cmd.Args = append(cmd.Args, args...) + return cmd, nil +} + +type version struct { + name string + major, minor int +} + +var compiler struct { + sync.Once + version + err error +} + +// compilerVersion detects the version of $(go env CC). +// +// It returns a non-nil error if the compiler matches a known version schema but +// the version could not be parsed, or if $(go env CC) could not be determined. +func compilerVersion() (version, error) { + compiler.Once.Do(func() { + compiler.err = func() error { + compiler.name = "unknown" + + cmd, err := cc("--version") + if err != nil { + return err + } + out, err := cmd.Output() + if err != nil { + // Compiler does not support "--version" flag: not Clang or GCC. + return nil + } + + var match [][]byte + if bytes.HasPrefix(out, []byte("gcc")) { + compiler.name = "gcc" + cmd, err := cc("-dumpfullversion", "-dumpversion") + if err != nil { + return err + } + out, err := cmd.Output() + if err != nil { + // gcc, but does not support gcc's "-v" flag?! + return err + } + gccRE := regexp.MustCompile(`(\d+)\.(\d+)`) + match = gccRE.FindSubmatch(out) + } else { + clangRE := regexp.MustCompile(`clang version (\d+)\.(\d+)`) + if match = clangRE.FindSubmatch(out); len(match) > 0 { + compiler.name = "clang" + } + } + + if len(match) < 3 { + return nil // "unknown" + } + if compiler.major, err = strconv.Atoi(string(match[1])); err != nil { + return err + } + if compiler.minor, err = strconv.Atoi(string(match[2])); err != nil { + return err + } + return nil + }() + }) + return compiler.version, compiler.err +} + +// compilerSupportsLocation reports whether the compiler should be +// able to provide file/line information in backtraces. +func compilerSupportsLocation() bool { + compiler, err := compilerVersion() + if err != nil { + return false + } + switch compiler.name { + case "gcc": + return compiler.major >= 10 + case "clang": + // TODO(65606): The clang toolchain on the LUCI builders is not built against + // zlib, the ASAN runtime can't actually symbolize its own stack trace. Once + // this is resolved, one way or another, switch this back to 'true'. We still + // have coverage from the 'gcc' case above. + if inLUCIBuild() { + return false + } + return true + default: + return false + } +} + +// inLUCIBuild returns true if we're currently executing in a LUCI build. +func inLUCIBuild() bool { + u, err := user.Current() + if err != nil { + return false + } + return testenv.Builder() != "" && u.Username == "swarming" +} + +// compilerRequiredTsanVersion reports whether the compiler is the version required by Tsan. +// Only restrictions for ppc64le are known; otherwise return true. +func compilerRequiredTsanVersion(goos, goarch string) bool { + compiler, err := compilerVersion() + if err != nil { + return false + } + if compiler.name == "gcc" && goarch == "ppc64le" { + return compiler.major >= 9 + } + return true +} + +// compilerRequiredAsanVersion reports whether the compiler is the version required by Asan. +func compilerRequiredAsanVersion(goos, goarch string) bool { + compiler, err := compilerVersion() + if err != nil { + return false + } + switch compiler.name { + case "gcc": + if goarch == "loong64" { + return compiler.major >= 14 + } + if goarch == "ppc64le" { + return compiler.major >= 9 + } + return compiler.major >= 7 + case "clang": + if goarch == "loong64" { + return compiler.major >= 16 + } + return compiler.major >= 9 + default: + return false + } +} + +type compilerCheck struct { + once sync.Once + err error + skip bool // If true, skip with err instead of failing with it. +} + +type config struct { + sanitizer string + + cFlags, ldFlags, goFlags []string + + sanitizerCheck, runtimeCheck compilerCheck +} + +var configs struct { + sync.Mutex + m map[string]*config +} + +// configure returns the configuration for the given sanitizer. +func configure(sanitizer string) *config { + configs.Lock() + defer configs.Unlock() + if c, ok := configs.m[sanitizer]; ok { + return c + } + + c := &config{ + sanitizer: sanitizer, + cFlags: []string{"-fsanitize=" + sanitizer}, + ldFlags: []string{"-fsanitize=" + sanitizer}, + } + + if testing.Verbose() { + c.goFlags = append(c.goFlags, "-x") + } + + switch sanitizer { + case "memory": + c.goFlags = append(c.goFlags, "-msan") + + case "thread": + c.goFlags = append(c.goFlags, "--installsuffix=tsan") + compiler, _ := compilerVersion() + if compiler.name == "gcc" { + c.cFlags = append(c.cFlags, "-fPIC") + c.ldFlags = append(c.ldFlags, "-fPIC", "-static-libtsan") + } + + case "address": + c.goFlags = append(c.goFlags, "-asan") + // Set the debug mode to print the C stack trace. + c.cFlags = append(c.cFlags, "-g") + + case "fuzzer": + c.goFlags = append(c.goFlags, "-tags=libfuzzer", "-gcflags=-d=libfuzzer") + + default: + panic(fmt.Sprintf("unrecognized sanitizer: %q", sanitizer)) + } + + if configs.m == nil { + configs.m = make(map[string]*config) + } + configs.m[sanitizer] = c + return c +} + +// goCmd returns a Cmd that executes "go $subcommand $args" with appropriate +// additional flags and environment. +func (c *config) goCmd(subcommand string, args ...string) *exec.Cmd { + return c.goCmdWithExperiments(subcommand, args, nil) +} + +// goCmdWithExperiments returns a Cmd that executes +// "GOEXPERIMENT=$experiments go $subcommand $args" with appropriate +// additional flags and CGO-related environment variables. +func (c *config) goCmdWithExperiments(subcommand string, args []string, experiments []string) *exec.Cmd { + cmd := exec.Command("go", subcommand) + cmd.Args = append(cmd.Args, c.goFlags...) + cmd.Args = append(cmd.Args, args...) + replaceEnv(cmd, "CGO_CFLAGS", strings.Join(c.cFlags, " ")) + replaceEnv(cmd, "CGO_LDFLAGS", strings.Join(c.ldFlags, " ")) + appendExperimentEnv(cmd, experiments) + return cmd +} + +// skipIfCSanitizerBroken skips t if the C compiler does not produce working +// binaries as configured. +func (c *config) skipIfCSanitizerBroken(t *testing.T) { + check := &c.sanitizerCheck + check.once.Do(func() { + check.skip, check.err = c.checkCSanitizer() + }) + if check.err != nil { + t.Helper() + if check.skip { + t.Skip(check.err) + } + t.Fatal(check.err) + } +} + +var cMain = []byte(` +int main() { + return 0; +} +`) + +var cLibFuzzerInput = []byte(` +#include +int LLVMFuzzerTestOneInput(char *data, size_t size) { + return 0; +} +`) + +func (c *config) checkCSanitizer() (skip bool, err error) { + dir, err := os.MkdirTemp("", c.sanitizer) + if err != nil { + return false, fmt.Errorf("failed to create temp directory: %v", err) + } + defer os.RemoveAll(dir) + + src := filepath.Join(dir, "return0.c") + cInput := cMain + if c.sanitizer == "fuzzer" { + // libFuzzer generates the main function itself, and uses a different input. + cInput = cLibFuzzerInput + } + if err := os.WriteFile(src, cInput, 0600); err != nil { + return false, fmt.Errorf("failed to write C source file: %v", err) + } + + dst := filepath.Join(dir, "return0") + cmd, err := cc(c.cFlags...) + if err != nil { + return false, err + } + cmd.Args = append(cmd.Args, c.ldFlags...) + cmd.Args = append(cmd.Args, "-o", dst, src) + out, err := cmd.CombinedOutput() + if err != nil { + if bytes.Contains(out, []byte("-fsanitize")) && + (bytes.Contains(out, []byte("unrecognized")) || + bytes.Contains(out, []byte("unsupported"))) { + return true, errors.New(string(out)) + } + return true, fmt.Errorf("%#q failed: %v\n%s", strings.Join(cmd.Args, " "), err, out) + } + + if c.sanitizer == "fuzzer" { + // For fuzzer, don't try running the test binary. It never finishes. + return false, nil + } + + if out, err := exec.Command(dst).CombinedOutput(); err != nil { + if os.IsNotExist(err) { + return true, fmt.Errorf("%#q failed to produce executable: %v", strings.Join(cmd.Args, " "), err) + } + snippet, _, _ := bytes.Cut(out, []byte("\n")) + return true, fmt.Errorf("%#q generated broken executable: %v\n%s", strings.Join(cmd.Args, " "), err, snippet) + } + + return false, nil +} + +// skipIfRuntimeIncompatible skips t if the Go runtime is suspected not to work +// with cgo as configured. +func (c *config) skipIfRuntimeIncompatible(t *testing.T) { + check := &c.runtimeCheck + check.once.Do(func() { + check.skip, check.err = c.checkRuntime() + }) + if check.err != nil { + t.Helper() + if check.skip { + t.Skip(check.err) + } + t.Fatal(check.err) + } +} + +func (c *config) checkRuntime() (skip bool, err error) { + if c.sanitizer != "thread" { + return false, nil + } + + // libcgo.h sets CGO_TSAN if it detects TSAN support in the C compiler. + // Dump the preprocessor defines to check that works. + // (Sometimes it doesn't: see https://golang.org/issue/15983.) + cmd, err := cc(c.cFlags...) + if err != nil { + return false, err + } + cmd.Args = append(cmd.Args, "-dM", "-E", "../../../../runtime/cgo/libcgo.h") + cmdStr := strings.Join(cmd.Args, " ") + out, err := cmd.CombinedOutput() + if err != nil { + return false, fmt.Errorf("%#q exited with %v\n%s", cmdStr, err, out) + } + if !bytes.Contains(out, []byte("#define CGO_TSAN")) { + return true, fmt.Errorf("%#q did not define CGO_TSAN", cmdStr) + } + return false, nil +} + +// srcPath returns the path to the given file relative to this test's source tree. +func srcPath(path string) string { + return filepath.Join("testdata", path) +} + +// A tempDir manages a temporary directory within a test. +type tempDir struct { + base string +} + +func (d *tempDir) RemoveAll(t *testing.T) { + t.Helper() + if d.base == "" { + return + } + if err := os.RemoveAll(d.base); err != nil { + t.Fatalf("Failed to remove temp dir: %v", err) + } +} + +func (d *tempDir) Base() string { + return d.base +} + +func (d *tempDir) Join(name string) string { + return filepath.Join(d.base, name) +} + +func newTempDir(t *testing.T) *tempDir { + t.Helper() + dir, err := os.MkdirTemp("", filepath.Dir(t.Name())) + if err != nil { + t.Fatalf("Failed to create temp dir: %v", err) + } + return &tempDir{base: dir} +} + +// hangProneCmd returns an exec.Cmd for a command that is likely to hang. +// +// If one of these tests hangs, the caller is likely to kill the test process +// using SIGINT, which will be sent to all of the processes in the test's group. +// Unfortunately, TSAN in particular is prone to dropping signals, so the SIGINT +// may terminate the test binary but leave the subprocess running. hangProneCmd +// configures subprocess to receive SIGKILL instead to ensure that it won't +// leak. +func hangProneCmd(name string, arg ...string) *exec.Cmd { + cmd := exec.Command(name, arg...) + cmd.SysProcAttr = &syscall.SysProcAttr{ + Pdeathsig: syscall.SIGKILL, + } + return cmd +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/cshared_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/cshared_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f26c50a6219a5128183f014f04465da9acc459cf --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/cshared_test.go @@ -0,0 +1,98 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux || (freebsd && amd64) + +package sanitizers_test + +import ( + "fmt" + "internal/platform" + "internal/testenv" + "os" + "strings" + "testing" +) + +func TestShared(t *testing.T) { + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + testenv.MustHaveBuildMode(t, "c-shared") + + t.Parallel() + requireOvercommit(t) + + GOOS, err := goEnv("GOOS") + if err != nil { + t.Fatal(err) + } + + GOARCH, err := goEnv("GOARCH") + if err != nil { + t.Fatal(err) + } + + libExt := "so" + if GOOS == "darwin" { + libExt = "dylib" + } + + cases := []struct { + src string + sanitizer string + }{ + { + src: "msan_shared.go", + sanitizer: "memory", + }, + { + src: "tsan_shared.go", + sanitizer: "thread", + }, + } + + for _, tc := range cases { + tc := tc + name := strings.TrimSuffix(tc.src, ".go") + //The memory sanitizer tests require support for the -msan option. + if tc.sanitizer == "memory" && !platform.MSanSupported(GOOS, GOARCH) { + t.Logf("skipping %s test on %s/%s; -msan option is not supported.", name, GOOS, GOARCH) + continue + } + if tc.sanitizer == "thread" && !compilerRequiredTsanVersion(GOOS, GOARCH) { + t.Logf("skipping %s test on %s/%s; compiler version too old for -tsan.", name, GOOS, GOARCH) + continue + } + + t.Run(name, func(t *testing.T) { + t.Parallel() + config := configure(tc.sanitizer) + config.skipIfCSanitizerBroken(t) + + dir := newTempDir(t) + defer dir.RemoveAll(t) + + lib := dir.Join(fmt.Sprintf("lib%s.%s", name, libExt)) + mustRun(t, config.goCmd("build", "-buildmode=c-shared", "-o", lib, srcPath(tc.src))) + + cSrc := dir.Join("main.c") + if err := os.WriteFile(cSrc, cMain, 0600); err != nil { + t.Fatalf("failed to write C source file: %v", err) + } + + dstBin := dir.Join(name) + cmd, err := cc(config.cFlags...) + if err != nil { + t.Fatal(err) + } + cmd.Args = append(cmd.Args, config.ldFlags...) + cmd.Args = append(cmd.Args, "-o", dstBin, cSrc, lib) + mustRun(t, cmd) + + cmd = hangProneCmd(dstBin) + replaceEnv(cmd, "LD_LIBRARY_PATH", ".") + mustRun(t, cmd) + }) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/empty_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/empty_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e7fed995511873f1e5b0a466444e530b62c46fec --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/empty_test.go @@ -0,0 +1,8 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// All of the actual test files have limited build constraints. This file +// ensures there's at least one test file on every platform. + +package sanitizers_test diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/libfuzzer_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/libfuzzer_test.go new file mode 100644 index 0000000000000000000000000000000000000000..85c8f7bbfbeddadb4a1d05d45efa4491be629af0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/libfuzzer_test.go @@ -0,0 +1,101 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux || (freebsd && amd64) + +package sanitizers_test + +import ( + "internal/testenv" + "strings" + "testing" +) + +func TestLibFuzzer(t *testing.T) { + // Skip tests in short mode. + if testing.Short() { + t.Skip("libfuzzer tests can take upwards of minutes to run; skipping in short mode") + } + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + + goos, err := goEnv("GOOS") + if err != nil { + t.Fatal(err) + } + goarch, err := goEnv("GOARCH") + if err != nil { + t.Fatal(err) + } + if !libFuzzerSupported(goos, goarch) { + t.Skipf("skipping on %s/%s; libfuzzer option is not supported.", goos, goarch) + } + config := configure("fuzzer") + config.skipIfCSanitizerBroken(t) + + cases := []struct { + goSrc string + cSrc string + expectedError string + }{ + {goSrc: "libfuzzer1.go", expectedError: "panic: found it"}, + {goSrc: "libfuzzer2.go", cSrc: "libfuzzer2.c", expectedError: "panic: found it"}, + } + for _, tc := range cases { + tc := tc + name := strings.TrimSuffix(tc.goSrc, ".go") + t.Run(name, func(t *testing.T) { + t.Parallel() + + dir := newTempDir(t) + defer dir.RemoveAll(t) + + // build Go code in libfuzzer mode to a c-archive + outPath := dir.Join(name) + archivePath := dir.Join(name + ".a") + mustRun(t, config.goCmd("build", "-buildmode=c-archive", "-o", archivePath, srcPath(tc.goSrc))) + + // build C code (if any) and link with Go code + cmd, err := cc(config.cFlags...) + if err != nil { + t.Fatalf("error running cc: %v", err) + } + cmd.Args = append(cmd.Args, config.ldFlags...) + cmd.Args = append(cmd.Args, "-o", outPath, "-I", dir.Base()) + if tc.cSrc != "" { + cmd.Args = append(cmd.Args, srcPath(tc.cSrc)) + } + cmd.Args = append(cmd.Args, archivePath) + mustRun(t, cmd) + + cmd = hangProneCmd(outPath) + cmd.Dir = dir.Base() + outb, err := cmd.CombinedOutput() + out := string(outb) + if err == nil { + t.Fatalf("fuzzing succeeded unexpectedly; output:\n%s", out) + } + if !strings.Contains(out, tc.expectedError) { + t.Errorf("exited without expected error %q; got\n%s", tc.expectedError, out) + } + }) + } +} + +// libFuzzerSupported is a copy of the function internal/platform.FuzzInstrumented, +// because the internal package can't be used here. +func libFuzzerSupported(goos, goarch string) bool { + switch goarch { + case "amd64", "arm64": + // TODO(#14565): support more architectures. + switch goos { + case "darwin", "freebsd", "linux", "windows": + return true + default: + return false + } + default: + return false + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/msan_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/msan_test.go new file mode 100644 index 0000000000000000000000000000000000000000..83d66f6660d7bfdfaf55c83c692bad006db8e753 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/msan_test.go @@ -0,0 +1,87 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux || (freebsd && amd64) + +package sanitizers_test + +import ( + "internal/platform" + "internal/testenv" + "strings" + "testing" +) + +func TestMSAN(t *testing.T) { + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + goos, err := goEnv("GOOS") + if err != nil { + t.Fatal(err) + } + goarch, err := goEnv("GOARCH") + if err != nil { + t.Fatal(err) + } + // The msan tests require support for the -msan option. + if !platform.MSanSupported(goos, goarch) { + t.Skipf("skipping on %s/%s; -msan option is not supported.", goos, goarch) + } + + t.Parallel() + // Overcommit is enabled by default on FreeBSD (vm.overcommit=0, see tuning(7)). + // Do not skip tests with stricter overcommit settings unless testing shows that FreeBSD has similar issues. + if goos == "linux" { + requireOvercommit(t) + } + config := configure("memory") + config.skipIfCSanitizerBroken(t) + + mustRun(t, config.goCmd("build", "std")) + + cases := []struct { + src string + wantErr bool + experiments []string + }{ + {src: "msan.go"}, + {src: "msan2.go"}, + {src: "msan2_cmsan.go"}, + {src: "msan3.go"}, + {src: "msan4.go"}, + {src: "msan5.go"}, + {src: "msan6.go"}, + {src: "msan7.go"}, + {src: "msan8.go"}, + {src: "msan_fail.go", wantErr: true}, + // This may not always fail specifically due to MSAN. It may sometimes + // fail because of a fault. However, we don't care what kind of error we + // get here, just that we get an error. This is an MSAN test because without + // MSAN it would not fail deterministically. + {src: "arena_fail.go", wantErr: true, experiments: []string{"arenas"}}, + } + for _, tc := range cases { + tc := tc + name := strings.TrimSuffix(tc.src, ".go") + t.Run(name, func(t *testing.T) { + t.Parallel() + + dir := newTempDir(t) + defer dir.RemoveAll(t) + + outPath := dir.Join(name) + mustRun(t, config.goCmdWithExperiments("build", []string{"-o", outPath, srcPath(tc.src)}, tc.experiments)) + + cmd := hangProneCmd(outPath) + if tc.wantErr { + out, err := cmd.CombinedOutput() + if err != nil { + return + } + t.Fatalf("%#q exited without error; want MSAN failure\n%s", strings.Join(cmd.Args, " "), out) + } + mustRun(t, cmd) + }) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/arena_fail.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/arena_fail.go new file mode 100644 index 0000000000000000000000000000000000000000..5b6c52e4358cebf930d7e852ce3983088a37e276 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/arena_fail.go @@ -0,0 +1,27 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.arenas + +package main + +import "arena" + +func main() { + a := arena.NewArena() + x := arena.New[[200]byte](a) + x[0] = 9 + a.Free() + // Use after free. + // + // ASAN should detect this deterministically as Free + // should poison the arena memory. + // + // MSAN should detect that this access is to freed + // memory. This may crash with an "accessed freed arena + // memory" error before MSAN gets a chance, but if MSAN + // was not enabled there would be a chance that this + // could fail to crash on its own. + println(x[0]) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan1_fail.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan1_fail.go new file mode 100644 index 0000000000000000000000000000000000000000..80289e5c30452f697a9c4a0968854584e62b2d57 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan1_fail.go @@ -0,0 +1,28 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +/* +#include +#include + +int *p; +int* test() { + p = (int *)malloc(2 * sizeof(int)); + free(p); + return p; +} +*/ +import "C" +import "fmt" + +func main() { + // C passes Go an invalid pointer. + a := C.test() + // Use after free + *a = 2 // BOOM + // We shouldn't get here; asan should stop us first. + fmt.Println(*a) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan2_fail.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan2_fail.go new file mode 100644 index 0000000000000000000000000000000000000000..3ab060857107ae76fd4e834b85c2668c1743b83f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan2_fail.go @@ -0,0 +1,34 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +/* +#include +#include + +int *p; +int* f() { + int i; + p = (int *)malloc(5*sizeof(int)); + for (i = 0; i < 5; i++) { + p[i] = i+10; + } + return p; +} +*/ +import "C" +import ( + "fmt" + "unsafe" +) + +func main() { + a := C.f() + q5 := (*C.int)(unsafe.Add(unsafe.Pointer(a), 4*5)) + // Access to C pointer out of bounds. + *q5 = 100 // BOOM + // We shouldn't get here; asan should stop us first. + fmt.Printf("q5: %d, %x\n", *q5, q5) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan3_fail.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan3_fail.go new file mode 100644 index 0000000000000000000000000000000000000000..9f6d26dd89dbce12f1e9759749544520182b9a3a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan3_fail.go @@ -0,0 +1,23 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +/* +#include +#include + +void test(int *a) { + // Access Go pointer out of bounds. + int c = a[5]; // BOOM + // We shouldn't get here; asan should stop us first. + printf("a[5]=%d\n", c); +} +*/ +import "C" + +func main() { + cIntSlice := []C.int{200, 201, 203, 203, 204} + C.test(&cIntSlice[0]) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan4_fail.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan4_fail.go new file mode 100644 index 0000000000000000000000000000000000000000..12098458ae91d27f6c9d06c316240136dd8c3edf --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan4_fail.go @@ -0,0 +1,22 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +/* +#include +#include + +void test(int* a) { + // Access Go pointer out of bounds. + a[3] = 300; // BOOM + // We shouldn't get here; asan should stop us first. + printf("a[3]=%d\n", a[3]); +}*/ +import "C" + +func main() { + var cIntArray [2]C.int + C.test(&cIntArray[0]) // cIntArray is moved to heap. +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan5_fail.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan5_fail.go new file mode 100644 index 0000000000000000000000000000000000000000..d6853eab7333d95b79d2a35fbc37f671c89b9b3a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan5_fail.go @@ -0,0 +1,21 @@ +package main + +import ( + "fmt" + "runtime" + "unsafe" +) + +func main() { + p := new([1024 * 1000]int) + p[0] = 10 + r := bar(&p[1024*1000-1]) + fmt.Printf("r value is %d", r) +} + +func bar(a *int) int { + p := unsafe.Add(unsafe.Pointer(a), 2*unsafe.Sizeof(int(1))) + runtime.ASanWrite(p, 8) // BOOM + *((*int)(p)) = 10 + return *((*int)(p)) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_global1_fail.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_global1_fail.go new file mode 100644 index 0000000000000000000000000000000000000000..6cfc0b713812ff3a6ca1a8ba860800cb79fd73cf --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_global1_fail.go @@ -0,0 +1,25 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +/* +#include +#include + +int test(int *a) { + a[2] = 300; // BOOM + return a[2]; +} +*/ +import "C" + +import "fmt" + +var cIntArray [2]C.int + +func main() { + r := C.test(&cIntArray[0]) + fmt.Println("r value = ", r) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_global2_fail.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_global2_fail.go new file mode 100644 index 0000000000000000000000000000000000000000..19326333682f72a4be103119ff6c373228eda4b4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_global2_fail.go @@ -0,0 +1,31 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +/* +#include +#include + +struct ss { + int *p; + int len; + int cap; +}; + +int test(struct ss *a) { + struct ss *t = a + 1; + t->len = 100; // BOOM + return t->len; +} +*/ +import "C" +import "fmt" + +var tt C.struct_ss + +func main() { + r := C.test(&tt) + fmt.Println("r value = ", r) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_global3_fail.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_global3_fail.go new file mode 100644 index 0000000000000000000000000000000000000000..9ab026c7fa147a362c3f1cf4a96afa6ce579ff61 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_global3_fail.go @@ -0,0 +1,28 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +/* +#include +#include + +int test(int *a) { + int* p = a+1; + *p = 10; // BOOM + return *p; +} +*/ +import "C" +import ( + "fmt" + "unsafe" +) + +var cIntV C.int + +func main() { + r := C.test((*C.int)(unsafe.Pointer(&cIntV))) + fmt.Printf("r value is %d", r) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_global4_fail.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_global4_fail.go new file mode 100644 index 0000000000000000000000000000000000000000..d593598d5b29ba7f563b9e0cb8bb0ebfb85f510a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_global4_fail.go @@ -0,0 +1,25 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "unsafe" +) + +var intGlo int + +func main() { + r := bar(&intGlo) + fmt.Printf("r value is %d", r) +} + +func bar(a *int) int { + p := (*int)(unsafe.Add(unsafe.Pointer(a), 1*unsafe.Sizeof(int(1)))) + if *p == 10 { // BOOM + fmt.Println("its value is 10") + } + return *p +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_global5.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_global5.go new file mode 100644 index 0000000000000000000000000000000000000000..0ed103da4f2e8d7e60480d666f441d563353f27b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_global5.go @@ -0,0 +1,22 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" +) + +type Any struct { + s string + b int64 +} + +var Sg = []interface{}{ + Any{"a", 10}, +} + +func main() { + fmt.Println(Sg[0]) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_linkerx/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_linkerx/main.go new file mode 100644 index 0000000000000000000000000000000000000000..290b5888d700b9961ac9d9c7555266ebe72c0697 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_linkerx/main.go @@ -0,0 +1,28 @@ +package main + +import "cmd/cgo/internal/testsanitizers/testdata/asan_linkerx/p" + +func pstring(s *string) { + println(*s) +} + +func main() { + all := []*string{ + &S1, &S2, &S3, &S4, &S5, &S6, &S7, &S8, &S9, &S10, + &p.S1, &p.S2, &p.S3, &p.S4, &p.S5, &p.S6, &p.S7, &p.S8, &p.S9, &p.S10, + } + for _, ps := range all { + pstring(ps) + } +} + +var S1 string +var S2 string +var S3 string +var S4 string +var S5 string +var S6 string +var S7 string +var S8 string +var S9 string +var S10 string diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_linkerx/p/p.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_linkerx/p/p.go new file mode 100644 index 0000000000000000000000000000000000000000..c31f00109d6d97cd344d220040b308d89c4f945e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_linkerx/p/p.go @@ -0,0 +1,12 @@ +package p + +var S1 string +var S2 string +var S3 string +var S4 string +var S5 string +var S6 string +var S7 string +var S8 string +var S9 string +var S10 string diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_unsafe_fail1.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_unsafe_fail1.go new file mode 100644 index 0000000000000000000000000000000000000000..ec54a66880c0891e35dc2ae49cc90afca9d58be7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_unsafe_fail1.go @@ -0,0 +1,27 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "unsafe" +) + +func main() { + a := 1 + b := 2 + c := add(a, b) + d := a + b + fmt.Println(c, d) +} + +//go:noinline +func add(a1, b1 int) int { + // The arguments. + // When -asan is enabled, unsafe.Pointer(&a1) conversion is escaping. + var p *int = (*int)(unsafe.Add(unsafe.Pointer(&a1), 1*unsafe.Sizeof(int(1)))) + *p = 10 // BOOM + return a1 + b1 +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_unsafe_fail2.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_unsafe_fail2.go new file mode 100644 index 0000000000000000000000000000000000000000..70f21275af58ee493b985a26aa3b9a4887e297bf --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_unsafe_fail2.go @@ -0,0 +1,28 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "unsafe" +) + +func main() { + a := 1 + b := 2 + c := add(a, b) + d := a + b + fmt.Println(c, d) +} + +//go:noinline +func add(a1, b1 int) (ret int) { + // The return value + // When -asan is enabled, the unsafe.Pointer(&ret) conversion is escaping. + var p *int = (*int)(unsafe.Add(unsafe.Pointer(&ret), 1*unsafe.Sizeof(int(1)))) + *p = 123 // BOOM + ret = a1 + b1 + return +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_unsafe_fail3.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_unsafe_fail3.go new file mode 100644 index 0000000000000000000000000000000000000000..47a8a072ef4d36de99032d1b30bcc4bdc610dbb9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_unsafe_fail3.go @@ -0,0 +1,21 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "unsafe" +) + +func main() { + a := 1 + b := 2 + // The local variables. + // When -asan is enabled, the unsafe.Pointer(&a) conversion is escaping. + var p *int = (*int)(unsafe.Add(unsafe.Pointer(&a), 1*unsafe.Sizeof(int(1)))) + *p = 20 // BOOM + d := a + b + fmt.Println(d) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_useAfterReturn.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_useAfterReturn.go new file mode 100644 index 0000000000000000000000000000000000000000..3d3d5a6ab1ad473cd4911d1a2f13933b7538d16b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/asan_useAfterReturn.go @@ -0,0 +1,26 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// The -fsanitize=address option of C compier can detect stack-use-after-return bugs. +// In the following program, the local variable 'local' was moved to heap by the Go +// compiler because foo() is returning the reference to 'local', and return stack of +// foo() will be invalid. Thus for main() to use the reference to 'local', the 'local' +// must be available even after foo() has finished. Therefore, Go has no such issue. + +import "fmt" + +var ptr *int + +func main() { + foo() + fmt.Printf("ptr=%x, %v", *ptr, ptr) +} + +func foo() { + var local int + local = 1 + ptr = &local // local is moved to heap. +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/libfuzzer1.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/libfuzzer1.go new file mode 100644 index 0000000000000000000000000000000000000000..d178fb1ca0a3059a64a1f3c7307a876bd43946b4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/libfuzzer1.go @@ -0,0 +1,16 @@ +package main + +import "C" + +import "unsafe" + +//export LLVMFuzzerTestOneInput +func LLVMFuzzerTestOneInput(p unsafe.Pointer, sz C.int) C.int { + b := C.GoBytes(p, sz) + if len(b) >= 6 && b[0] == 'F' && b[1] == 'u' && b[2] == 'z' && b[3] == 'z' && b[4] == 'M' && b[5] == 'e' { + panic("found it") + } + return 0 +} + +func main() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/libfuzzer2.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/libfuzzer2.c new file mode 100644 index 0000000000000000000000000000000000000000..567ff5a1cccac5796183601002336decf82847ee --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/libfuzzer2.c @@ -0,0 +1,11 @@ +#include + +#include "libfuzzer2.h" + +int LLVMFuzzerTestOneInput(char *data, size_t size) { + if (size > 0 && data[0] == 'H') + if (size > 1 && data[1] == 'I') + if (size > 2 && data[2] == '!') + FuzzMe(data, size); + return 0; +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/libfuzzer2.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/libfuzzer2.go new file mode 100644 index 0000000000000000000000000000000000000000..c7a43259768574af6d00368632777d82703de13d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/libfuzzer2.go @@ -0,0 +1,16 @@ +package main + +import "C" + +import "unsafe" + +//export FuzzMe +func FuzzMe(p unsafe.Pointer, sz C.int) { + b := C.GoBytes(p, sz) + b = b[3:] + if len(b) >= 4 && b[0] == 'f' && b[1] == 'u' && b[2] == 'z' && b[3] == 'z' { + panic("found it") + } +} + +func main() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan.go new file mode 100644 index 0000000000000000000000000000000000000000..5d73c3807925da7412231d432919029eaee70ac5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan.go @@ -0,0 +1,35 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +/* +#include + +void f(int32_t *p, int n) { + int i; + + for (i = 0; i < n; i++) { + p[i] = (int32_t)i; + } +} +*/ +import "C" + +import ( + "fmt" + "os" + "unsafe" +) + +func main() { + a := make([]int32, 10) + C.f((*C.int32_t)(unsafe.Pointer(&a[0])), C.int(len(a))) + for i, v := range a { + if i != int(v) { + fmt.Printf("bad %d: %v\n", i, a) + os.Exit(1) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan2.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan2.go new file mode 100644 index 0000000000000000000000000000000000000000..6690cb034fcf36015f37787e8119c1976804fb28 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan2.go @@ -0,0 +1,35 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +/* +#include +#include +#include + +void f(int32_t *p, int n) { + int32_t * volatile q = (int32_t *)malloc(sizeof(int32_t) * n); + memcpy(p, q, n * sizeof(*p)); + free(q); +} + +void g(int32_t *p, int n) { + if (p[4] != 1) { + abort(); + } +} +*/ +import "C" + +import ( + "unsafe" +) + +func main() { + a := make([]int32, 10) + C.f((*C.int32_t)(unsafe.Pointer(&a[0])), C.int(len(a))) + a[4] = 1 + C.g((*C.int32_t)(unsafe.Pointer(&a[0])), C.int(len(a))) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan2_cmsan.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan2_cmsan.go new file mode 100644 index 0000000000000000000000000000000000000000..8fdaea90c979245db3eb6221be76d77ac9fdf499 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan2_cmsan.go @@ -0,0 +1,38 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +/* +#cgo LDFLAGS: -fsanitize=memory +#cgo CPPFLAGS: -fsanitize=memory + +#include +#include +#include + +void f(int32_t *p, int n) { + int32_t * volatile q = (int32_t *)malloc(sizeof(int32_t) * n); + memcpy(p, q, n * sizeof(*p)); + free(q); +} + +void g(int32_t *p, int n) { + if (p[4] != 1) { + abort(); + } +} +*/ +import "C" + +import ( + "unsafe" +) + +func main() { + a := make([]int32, 10) + C.f((*C.int32_t)(unsafe.Pointer(&a[0])), C.int(len(a))) + a[4] = 1 + C.g((*C.int32_t)(unsafe.Pointer(&a[0])), C.int(len(a))) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan3.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan3.go new file mode 100644 index 0000000000000000000000000000000000000000..61a9c29e1a93644d21de1c7ffaa4034524f40a19 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan3.go @@ -0,0 +1,33 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +/* +extern int *GoFn(int *); + +// Yes, you can have definitions if you use //export, as long as they are weak. +int f(void) __attribute__ ((weak)); + +int f() { + int i; + int *p = GoFn(&i); + if (*p != 12345) + return 0; + return 1; +} +*/ +import "C" + +//export GoFn +func GoFn(p *C.int) *C.int { + *p = C.int(12345) + return p +} + +func main() { + if r := C.f(); r != 1 { + panic(r) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan4.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan4.go new file mode 100644 index 0000000000000000000000000000000000000000..6c91ff5f091b6227e10ea76f5c7e04346b4dcd96 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan4.go @@ -0,0 +1,50 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// The memory profiler can call copy from a slice on the system stack, +// which msan used to think meant a reference to uninitialized memory. + +/* +#include +#include + +extern void Nop(char*); + +// Use weak as a hack to permit defining a function even though we use export. +void poison() __attribute__ ((weak)); + +// Poison the stack. +void poison() { + char a[1024]; + Nop(&a[0]); +} + +*/ +import "C" + +import ( + "runtime" +) + +func main() { + runtime.MemProfileRate = 1 + start(100) +} + +func start(i int) { + if i == 0 { + return + } + C.poison() + // Tie up a thread. + // We won't actually wait for this sleep to complete. + go func() { C.sleep(1) }() + start(i - 1) +} + +//export Nop +func Nop(*C.char) { +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan5.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan5.go new file mode 100644 index 0000000000000000000000000000000000000000..f1479eb8a005693fcea56d476d44454349ec4bb1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan5.go @@ -0,0 +1,57 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// Using reflect to set a value was not seen by msan. + +/* +#include + +extern void Go1(int*); +extern void Go2(char*); + +// Use weak as a hack to permit defining a function even though we use export. +void C1() __attribute__ ((weak)); +void C2() __attribute__ ((weak)); + +void C1() { + int i; + Go1(&i); + if (i != 42) { + abort(); + } +} + +void C2() { + char a[2]; + a[1] = 42; + Go2(a); + if (a[0] != 42) { + abort(); + } +} +*/ +import "C" + +import ( + "reflect" + "unsafe" +) + +//export Go1 +func Go1(p *C.int) { + reflect.ValueOf(p).Elem().Set(reflect.ValueOf(C.int(42))) +} + +//export Go2 +func Go2(p *C.char) { + a := (*[2]byte)(unsafe.Pointer(p)) + reflect.Copy(reflect.ValueOf(a[:1]), reflect.ValueOf(a[1:])) +} + +func main() { + C.C1() + C.C2() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan6.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan6.go new file mode 100644 index 0000000000000000000000000000000000000000..e96e8f9ead57f73a0e593324a1dba7d2f84ed624 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan6.go @@ -0,0 +1,75 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// A C function returning a value on the Go stack could leave the Go +// stack marked as uninitialized, potentially causing a later error +// when the stack is used for something else. Issue 26209. + +/* +#cgo LDFLAGS: -fsanitize=memory +#cgo CPPFLAGS: -fsanitize=memory + +#include +#include +#include + +typedef struct { + uintptr_t a[20]; +} S; + +S f() { + S *p; + + p = (S *)(malloc(sizeof(S))); + p->a[0] = 0; + return *p; +} +*/ +import "C" + +// allocateStack extends the stack so that stack copying doesn't +// confuse the msan data structures. +// +//go:noinline +func allocateStack(i int) int { + if i == 0 { + return i + } + return allocateStack(i - 1) +} + +// F1 marks a chunk of stack as uninitialized. +// C.f returns an uninitialized struct on the stack, so msan will mark +// the stack as uninitialized. +// +//go:noinline +func F1() uintptr { + s := C.f() + return uintptr(s.a[0]) +} + +// F2 allocates a struct on the stack and converts it to an empty interface, +// which will call msanread and see that the data appears uninitialized. +// +//go:noinline +func F2() interface{} { + return C.S{} +} + +func poisonStack(i int) int { + if i == 0 { + return int(F1()) + } + F1() + r := poisonStack(i - 1) + F2() + return r +} + +func main() { + allocateStack(16384) + poisonStack(128) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan7.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan7.go new file mode 100644 index 0000000000000000000000000000000000000000..2f29fd21b2ee329f87a48a9dfe5b5a26b5b97623 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan7.go @@ -0,0 +1,38 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// Test passing C struct to exported Go function. + +/* +#include +#include + +// T is a C struct with alignment padding after b. +// The padding bytes are not considered initialized by MSAN. +// It is big enough to be passed on stack in C ABI (and least +// on AMD64). +typedef struct { char b; uintptr_t x, y; } T; + +extern void F(T); + +// Use weak as a hack to permit defining a function even though we use export. +void CF(int x) __attribute__ ((weak)); +void CF(int x) { + T *t = malloc(sizeof(T)); + t->b = (char)x; + t->x = x; + t->y = x; + F(*t); +} +*/ +import "C" + +//export F +func F(t C.T) { println(t.b, t.x, t.y) } + +func main() { + C.CF(C.int(0)) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan8.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan8.go new file mode 100644 index 0000000000000000000000000000000000000000..1cb5c5677fa758711ae44a6d04e3fdcccbe6c00e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan8.go @@ -0,0 +1,109 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +/* +#include +#include +#include + +#include + +// cgoTracebackArg is the type of the argument passed to msanGoTraceback. +struct cgoTracebackArg { + uintptr_t context; + uintptr_t sigContext; + uintptr_t* buf; + uintptr_t max; +}; + +// msanGoTraceback is registered as the cgo traceback function. +// This will be called when a signal occurs. +void msanGoTraceback(void* parg) { + struct cgoTracebackArg* arg = (struct cgoTracebackArg*)(parg); + arg->buf[0] = 0; +} + +// msanGoWait will be called with all registers undefined as far as +// msan is concerned. It just waits for a signal. +// Because the registers are msan-undefined, the signal handler will +// be invoked with all registers msan-undefined. +__attribute__((noinline)) +void msanGoWait(unsigned long a1, unsigned long a2, unsigned long a3, unsigned long a4, unsigned long a5, unsigned long a6) { + sigset_t mask; + + sigemptyset(&mask); + sigsuspend(&mask); +} + +// msanGoSignalThread is the thread ID of the msanGoLoop thread. +static pthread_t msanGoSignalThread; + +// msanGoSignalThreadSet is used to record that msanGoSignalThread +// has been initialized. This is accessed atomically. +static int32_t msanGoSignalThreadSet; + +// uninit is explicitly poisoned, so that we can make all registers +// undefined by calling msanGoWait. +static unsigned long uninit; + +// msanGoLoop loops calling msanGoWait, with the arguments passed +// such that msan thinks that they are undefined. msan permits +// undefined values to be used as long as they are not used to +// for conditionals or for memory access. +void msanGoLoop() { + int i; + + msanGoSignalThread = pthread_self(); + __atomic_store_n(&msanGoSignalThreadSet, 1, __ATOMIC_SEQ_CST); + + // Force uninit to be undefined for msan. + __msan_poison(&uninit, sizeof uninit); + for (i = 0; i < 100; i++) { + msanGoWait(uninit, uninit, uninit, uninit, uninit, uninit); + } +} + +// msanGoReady returns whether msanGoSignalThread is set. +int msanGoReady() { + return __atomic_load_n(&msanGoSignalThreadSet, __ATOMIC_SEQ_CST) != 0; +} + +// msanGoSendSignal sends a signal to the msanGoLoop thread. +void msanGoSendSignal() { + pthread_kill(msanGoSignalThread, SIGWINCH); +} +*/ +import "C" + +import ( + "runtime" + "time" +) + +func main() { + runtime.SetCgoTraceback(0, C.msanGoTraceback, nil, nil) + + c := make(chan bool) + go func() { + defer func() { c <- true }() + C.msanGoLoop() + }() + + for C.msanGoReady() == 0 { + time.Sleep(time.Microsecond) + } + +loop: + for { + select { + case <-c: + break loop + default: + C.msanGoSendSignal() + time.Sleep(time.Microsecond) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan_fail.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan_fail.go new file mode 100644 index 0000000000000000000000000000000000000000..4c8dab34f6e225ed2fe791063683e97b58265deb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan_fail.go @@ -0,0 +1,36 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +/* +#include +#include +#include + +void f(int32_t *p, int n) { + int32_t * volatile q = (int32_t *)malloc(sizeof(int32_t) * n); + memcpy(p, q, n * sizeof(*p)); + free(q); +} + +void g(int32_t *p, int n) { + if (p[4] != 1) { + // We shouldn't get here; msan should stop us first. + exit(0); + } +} +*/ +import "C" + +import ( + "unsafe" +) + +func main() { + a := make([]int32, 10) + C.f((*C.int32_t)(unsafe.Pointer(&a[0])), C.int(len(a))) + a[3] = 1 + C.g((*C.int32_t)(unsafe.Pointer(&a[0])), C.int(len(a))) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan_shared.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan_shared.go new file mode 100644 index 0000000000000000000000000000000000000000..966947cac359802072372ee044bd40323472b884 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/msan_shared.go @@ -0,0 +1,12 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This program segfaulted during libpreinit when built with -msan: +// http://golang.org/issue/18707 + +package main + +import "C" + +func main() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan.go new file mode 100644 index 0000000000000000000000000000000000000000..6c377a701fb965fc036fb4fa48c76c99369d09d2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan.go @@ -0,0 +1,44 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// This program produced false race reports when run under the C/C++ +// ThreadSanitizer, as it did not understand the synchronization in +// the Go code. + +/* +#cgo CFLAGS: -fsanitize=thread +#cgo LDFLAGS: -fsanitize=thread + +int val; + +int getVal() { + return val; +} + +void setVal(int i) { + val = i; +} +*/ +import "C" + +import ( + "runtime" +) + +func main() { + runtime.LockOSThread() + C.setVal(1) + c := make(chan bool) + go func() { + runtime.LockOSThread() + C.setVal(2) + c <- true + }() + <-c + if v := C.getVal(); v != 2 { + panic(v) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan10.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan10.go new file mode 100644 index 0000000000000000000000000000000000000000..a40f2455537c162f9d830f198c0a7b9be6efdbeb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan10.go @@ -0,0 +1,31 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// This program hung when run under the C/C++ ThreadSanitizer. +// TSAN defers asynchronous signals until the signaled thread calls into libc. +// Since the Go runtime makes direct futex syscalls, Go runtime threads could +// run for an arbitrarily long time without triggering the libc interceptors. +// See https://golang.org/issue/18717. + +import ( + "os" + "os/signal" + "syscall" +) + +/* +#cgo CFLAGS: -g -fsanitize=thread +#cgo LDFLAGS: -g -fsanitize=thread +*/ +import "C" + +func main() { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGUSR1) + defer signal.Stop(c) + syscall.Kill(syscall.Getpid(), syscall.SIGUSR1) + <-c +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan11.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan11.go new file mode 100644 index 0000000000000000000000000000000000000000..189e10f699797c33bbbe3b26d8b0d27a91dd9c98 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan11.go @@ -0,0 +1,55 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// This program hung when run under the C/C++ ThreadSanitizer. TSAN defers +// asynchronous signals until the signaled thread calls into libc. The runtime's +// sysmon goroutine idles itself using direct usleep syscalls, so it could +// run for an arbitrarily long time without triggering the libc interceptors. +// See https://golang.org/issue/18717. + +import ( + "os" + "os/signal" + "syscall" +) + +/* +#cgo CFLAGS: -g -fsanitize=thread +#cgo LDFLAGS: -g -fsanitize=thread + +#include +#include +#include +#include + +static void raise_usr2(int signo) { + raise(SIGUSR2); +} + +static void register_handler(int signo) { + struct sigaction sa; + memset(&sa, 0, sizeof(sa)); + sigemptyset(&sa.sa_mask); + sa.sa_flags = SA_ONSTACK; + sa.sa_handler = raise_usr2; + + if (sigaction(SIGUSR1, &sa, NULL) != 0) { + perror("failed to register SIGUSR1 handler"); + exit(EXIT_FAILURE); + } +} +*/ +import "C" + +func main() { + ch := make(chan os.Signal, 1) + signal.Notify(ch, syscall.SIGUSR2) + + C.register_handler(C.int(syscall.SIGUSR1)) + syscall.Kill(syscall.Getpid(), syscall.SIGUSR1) + + <-ch +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan12.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan12.go new file mode 100644 index 0000000000000000000000000000000000000000..0ef545d09b6f59581b58705ddf7d97c84891dd40 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan12.go @@ -0,0 +1,35 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// This program hung when run under the C/C++ ThreadSanitizer. TSAN installs a +// libc interceptor that writes signal handlers to a global variable within the +// TSAN runtime instead of making a sigaction system call. A bug in +// syscall.runtime_AfterForkInChild corrupted TSAN's signal forwarding table +// during calls to (*os/exec.Cmd).Run, causing the parent process to fail to +// invoke signal handlers. + +import ( + "fmt" + "os" + "os/exec" + "os/signal" + "syscall" +) + +import "C" + +func main() { + ch := make(chan os.Signal, 1) + signal.Notify(ch, syscall.SIGUSR1) + + if err := exec.Command("true").Run(); err != nil { + fmt.Fprintf(os.Stderr, "Unexpected error from `true`: %v", err) + os.Exit(1) + } + + syscall.Kill(syscall.Getpid(), syscall.SIGUSR1) + <-ch +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan13.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan13.go new file mode 100644 index 0000000000000000000000000000000000000000..ebdf63581b0a23f9591b1c7702492dc1998d0a65 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan13.go @@ -0,0 +1,90 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// This program failed when run under the C/C++ ThreadSanitizer. +// There was no TSAN synchronization for the call to the cgo +// traceback routine. + +/* +#cgo CFLAGS: -g -fsanitize=thread +#cgo LDFLAGS: -g -fsanitize=thread + +#include +#include +#include +#include +#include + +struct tracebackArg { + uintptr_t Context; + uintptr_t SigContext; + uintptr_t* Buf; + uintptr_t Max; +}; + +void tsanTraceback(struct tracebackArg *arg) { + arg->Buf[0] = 0; +} + +static void* spin(void *arg) { + size_t n; + struct timeval tvstart, tvnow; + int diff; + void *prev; + void *cur; + + prev = NULL; + gettimeofday(&tvstart, NULL); + for (n = 0; n < 1<<20; n++) { + cur = malloc(n); + free(prev); + prev = cur; + + gettimeofday(&tvnow, NULL); + diff = (tvnow.tv_sec - tvstart.tv_sec) * 1000 * 1000 + (tvnow.tv_usec - tvstart.tv_usec); + + // Profile frequency is 100Hz so we should definitely + // get some signals in 50 milliseconds. + if (diff > 50 * 1000) { + break; + } + } + + free(prev); + + return NULL; +} + +static void runThreads(int n) { + pthread_t ids[64]; + int i; + + if (n > 64) { + n = 64; + } + for (i = 0; i < n; i++) { + pthread_create(&ids[i], NULL, spin, NULL); + } + for (i = 0; i < n; i++) { + pthread_join(ids[i], NULL); + } +} +*/ +import "C" + +import ( + "io" + "runtime" + "runtime/pprof" + "unsafe" +) + +func main() { + runtime.SetCgoTraceback(0, unsafe.Pointer(C.tsanTraceback), nil, nil) + pprof.StartCPUProfile(io.Discard) + C.runThreads(C.int(runtime.GOMAXPROCS(0))) + pprof.StopCPUProfile() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan14.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan14.go new file mode 100644 index 0000000000000000000000000000000000000000..d594ffb5c0fdfade252bc7e4a22859b020910ab3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan14.go @@ -0,0 +1,53 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// This program failed when run under the C/C++ ThreadSanitizer. +// +// cgocallback on a new thread calls into runtime.needm -> _cgo_getstackbound +// to update gp.stack.lo with the stack bounds. If the G itself is passed to +// _cgo_getstackbound, then writes to the same G can be seen on multiple +// threads (when the G is reused after thread exit). This would trigger TSAN. + +/* +#include + +void go_callback(); + +static void *thr(void *arg) { + go_callback(); + return 0; +} + +static void foo() { + pthread_t th; + pthread_attr_t attr; + pthread_attr_init(&attr); + pthread_attr_setstacksize(&attr, 256 << 10); + pthread_create(&th, &attr, thr, 0); + pthread_join(th, 0); +} +*/ +import "C" + +import ( + "time" +) + +//export go_callback +func go_callback() { +} + +func main() { + for i := 0; i < 2; i++ { + go func() { + for { + C.foo() + } + }() + } + + time.Sleep(1000*time.Millisecond) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan2.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan2.go new file mode 100644 index 0000000000000000000000000000000000000000..5018a1987caca01ea8913906ef1d7f983f6a8eed --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan2.go @@ -0,0 +1,55 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// This program produced false race reports when run under the C/C++ +// ThreadSanitizer, as it did not understand the synchronization in +// the Go code. + +/* +#cgo CFLAGS: -fsanitize=thread +#cgo LDFLAGS: -fsanitize=thread + +extern void GoRun(void); + +// Yes, you can have definitions if you use //export, as long as they are weak. + +int val __attribute__ ((weak)); + +int run(void) __attribute__ ((weak)); + +int run() { + val = 1; + GoRun(); + return val; +} + +void setVal(int) __attribute__ ((weak)); + +void setVal(int i) { + val = i; +} +*/ +import "C" + +import "runtime" + +//export GoRun +func GoRun() { + runtime.LockOSThread() + c := make(chan bool) + go func() { + runtime.LockOSThread() + C.setVal(2) + c <- true + }() + <-c +} + +func main() { + if v := C.run(); v != 2 { + panic(v) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan3.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan3.go new file mode 100644 index 0000000000000000000000000000000000000000..87f6c80f1b18d56f9135805cd6af5be600aed1c8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan3.go @@ -0,0 +1,40 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// The stubs for the C functions read and write the same slot on the +// g0 stack when copying arguments in and out. + +/* +#cgo CFLAGS: -fsanitize=thread +#cgo LDFLAGS: -fsanitize=thread + +int Func1() { + return 0; +} + +void Func2(int x) { + (void)x; +} +*/ +import "C" + +func main() { + const N = 10000 + done := make(chan bool, N) + for i := 0; i < N; i++ { + go func() { + C.Func1() + done <- true + }() + go func() { + C.Func2(0) + done <- true + }() + } + for i := 0; i < 2*N; i++ { + <-done + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan4.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan4.go new file mode 100644 index 0000000000000000000000000000000000000000..f0c76d84116a6010977c09a531bbc96bf208e1cf --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan4.go @@ -0,0 +1,34 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// Check that calls to C.malloc/C.free do not trigger TSAN false +// positive reports. + +// #cgo CFLAGS: -fsanitize=thread +// #cgo LDFLAGS: -fsanitize=thread +// #include +import "C" + +import ( + "runtime" + "sync" +) + +func main() { + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for i := 0; i < 100; i++ { + p := C.malloc(C.size_t(i * 10)) + runtime.Gosched() + C.free(p) + } + }() + } + wg.Wait() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan5.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan5.go new file mode 100644 index 0000000000000000000000000000000000000000..1214a7743b67ba6f2543f39eecf4b2347b5b3fde --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan5.go @@ -0,0 +1,51 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// Check that calls to C.malloc/C.free do not collide with the calls +// made by the os/user package. + +// #cgo CFLAGS: -fsanitize=thread +// #cgo LDFLAGS: -fsanitize=thread +// #include +import "C" + +import ( + "fmt" + "os" + "os/user" + "runtime" + "sync" +) + +func main() { + u, err := user.Current() + if err != nil { + fmt.Fprintln(os.Stderr, err) + // Let the test pass. + os.Exit(0) + } + + var wg sync.WaitGroup + for i := 0; i < 20; i++ { + wg.Add(2) + go func() { + defer wg.Done() + for i := 0; i < 1000; i++ { + user.Lookup(u.Username) + runtime.Gosched() + } + }() + go func() { + defer wg.Done() + for i := 0; i < 1000; i++ { + p := C.malloc(C.size_t(len(u.Username) + 1)) + runtime.Gosched() + C.free(p) + } + }() + } + wg.Wait() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan6.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan6.go new file mode 100644 index 0000000000000000000000000000000000000000..c96f08d2f3752f2165a1ee2700d56c08d75539ba --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan6.go @@ -0,0 +1,49 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// Check that writes to Go allocated memory, with Go synchronization, +// do not look like a race. + +/* +#cgo CFLAGS: -fsanitize=thread +#cgo LDFLAGS: -fsanitize=thread + +void f(char *p) { + *p = 1; +} +*/ +import "C" + +import ( + "runtime" + "sync" +) + +func main() { + var wg sync.WaitGroup + var mu sync.Mutex + c := make(chan []C.char, 100) + for i := 0; i < 10; i++ { + wg.Add(2) + go func() { + defer wg.Done() + for i := 0; i < 100; i++ { + c <- make([]C.char, 4096) + runtime.Gosched() + } + }() + go func() { + defer wg.Done() + for i := 0; i < 100; i++ { + p := &(<-c)[0] + mu.Lock() + C.f(p) + mu.Unlock() + } + }() + } + wg.Wait() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan7.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan7.go new file mode 100644 index 0000000000000000000000000000000000000000..2fb9e45ee2d6081da2de2a8df97875deeea8353e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan7.go @@ -0,0 +1,40 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// Setting an environment variable in a cgo program changes the C +// environment. Test that this does not confuse the race detector. + +/* +#cgo CFLAGS: -fsanitize=thread +#cgo LDFLAGS: -fsanitize=thread +*/ +import "C" + +import ( + "fmt" + "os" + "sync" + "time" +) + +func main() { + var wg sync.WaitGroup + var mu sync.Mutex + f := func() { + defer wg.Done() + for i := 0; i < 100; i++ { + time.Sleep(time.Microsecond) + mu.Lock() + s := fmt.Sprint(i) + os.Setenv("TSAN_TEST"+s, s) + mu.Unlock() + } + } + wg.Add(2) + go f() + go f() + wg.Wait() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan8.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan8.go new file mode 100644 index 0000000000000000000000000000000000000000..88d82a6078926aa76bcccc91b90829749edb9f54 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan8.go @@ -0,0 +1,60 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// This program failed when run under the C/C++ ThreadSanitizer. The TSAN +// sigaction function interceptor returned SIG_DFL instead of the Go runtime's +// handler in registerSegvForwarder. + +/* +#cgo CFLAGS: -fsanitize=thread +#cgo LDFLAGS: -fsanitize=thread + +#include +#include +#include +#include + +struct sigaction prev_sa; + +void forwardSignal(int signo, siginfo_t *info, void *context) { + // One of sa_sigaction and/or sa_handler + if ((prev_sa.sa_flags&SA_SIGINFO) != 0) { + prev_sa.sa_sigaction(signo, info, context); + return; + } + if (prev_sa.sa_handler != SIG_IGN && prev_sa.sa_handler != SIG_DFL) { + prev_sa.sa_handler(signo); + return; + } + + fprintf(stderr, "No Go handler to forward to!\n"); + abort(); +} + +void registerSegvFowarder() { + struct sigaction sa; + memset(&sa, 0, sizeof(sa)); + sigemptyset(&sa.sa_mask); + sa.sa_flags = SA_SIGINFO | SA_ONSTACK; + sa.sa_sigaction = forwardSignal; + + if (sigaction(SIGSEGV, &sa, &prev_sa) != 0) { + perror("failed to register SEGV forwarder"); + exit(EXIT_FAILURE); + } +} +*/ +import "C" + +func main() { + C.registerSegvFowarder() + + defer func() { + recover() + }() + var nilp *int + *nilp = 42 +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan9.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan9.go new file mode 100644 index 0000000000000000000000000000000000000000..06304be751b5e217113a7d280c31ca61636117a2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan9.go @@ -0,0 +1,67 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// This program failed when run under the C/C++ ThreadSanitizer. The +// TSAN library was not keeping track of whether signals should be +// delivered on the alternate signal stack, and the Go signal handler +// was not preserving callee-saved registers from C callers. + +/* +#cgo CFLAGS: -g -fsanitize=thread +#cgo LDFLAGS: -g -fsanitize=thread + +#include +#include + +void spin() { + size_t n; + struct timeval tvstart, tvnow; + int diff; + void *prev = NULL, *cur; + + gettimeofday(&tvstart, NULL); + for (n = 0; n < 1<<20; n++) { + cur = malloc(n); + free(prev); + prev = cur; + + gettimeofday(&tvnow, NULL); + diff = (tvnow.tv_sec - tvstart.tv_sec) * 1000 * 1000 + (tvnow.tv_usec - tvstart.tv_usec); + + // Profile frequency is 100Hz so we should definitely + // get a signal in 50 milliseconds. + if (diff > 50 * 1000) { + break; + } + } + + free(prev); +} +*/ +import "C" + +import ( + "io" + "runtime/pprof" + "time" +) + +func goSpin() { + start := time.Now() + for n := 0; n < 1<<20; n++ { + _ = make([]byte, n) + if time.Since(start) > 50*time.Millisecond { + break + } + } +} + +func main() { + pprof.StartCPUProfile(io.Discard) + go C.spin() + goSpin() + pprof.StopCPUProfile() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan_shared.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan_shared.go new file mode 100644 index 0000000000000000000000000000000000000000..55ff67ecbafc473795ad158b794a2f15675c59f5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/testdata/tsan_shared.go @@ -0,0 +1,63 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// This program failed with SIGSEGV when run under the C/C++ ThreadSanitizer. +// The Go runtime had re-registered the C handler with the wrong flags due to a +// typo, resulting in null pointers being passed for the info and context +// parameters to the handler. + +/* +#cgo CFLAGS: -fsanitize=thread +#cgo LDFLAGS: -fsanitize=thread + +#include +#include +#include +#include +#include + +void check_params(int signo, siginfo_t *info, void *context) { + ucontext_t* uc = (ucontext_t*)(context); + + if (info->si_signo != signo) { + fprintf(stderr, "info->si_signo does not match signo.\n"); + abort(); + } + + if (uc->uc_stack.ss_size == 0) { + fprintf(stderr, "uc_stack has size 0.\n"); + abort(); + } +} + + +// Set up the signal handler in a high priority constructor, so +// that it is installed before the Go code starts. + +static void register_handler(void) __attribute__ ((constructor (200))); + +static void register_handler() { + struct sigaction sa; + memset(&sa, 0, sizeof(sa)); + sigemptyset(&sa.sa_mask); + sa.sa_flags = SA_SIGINFO; + sa.sa_sigaction = check_params; + + if (sigaction(SIGUSR1, &sa, NULL) != 0) { + perror("failed to register SIGUSR1 handler"); + exit(EXIT_FAILURE); + } +} +*/ +import "C" + +import "syscall" + +func init() { + C.raise(C.int(syscall.SIGUSR1)) +} + +func main() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/tsan_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/tsan_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8e758e6ea7c85819131ebb99aa3309a35e08c0df --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testsanitizers/tsan_test.go @@ -0,0 +1,80 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux || (freebsd && amd64) + +package sanitizers_test + +import ( + "internal/testenv" + "strings" + "testing" +) + +func TestTSAN(t *testing.T) { + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + + goos, err := goEnv("GOOS") + if err != nil { + t.Fatal(err) + } + goarch, err := goEnv("GOARCH") + if err != nil { + t.Fatal(err) + } + // The msan tests require support for the -msan option. + if !compilerRequiredTsanVersion(goos, goarch) { + t.Skipf("skipping on %s/%s; compiler version for -tsan option is too old.", goos, goarch) + } + + t.Parallel() + requireOvercommit(t) + config := configure("thread") + config.skipIfCSanitizerBroken(t) + + mustRun(t, config.goCmd("build", "std")) + + cases := []struct { + src string + needsRuntime bool + }{ + {src: "tsan.go"}, + {src: "tsan2.go"}, + {src: "tsan3.go"}, + {src: "tsan4.go"}, + {src: "tsan5.go", needsRuntime: true}, + {src: "tsan6.go", needsRuntime: true}, + {src: "tsan7.go", needsRuntime: true}, + {src: "tsan8.go"}, + {src: "tsan9.go"}, + {src: "tsan10.go", needsRuntime: true}, + {src: "tsan11.go", needsRuntime: true}, + {src: "tsan12.go", needsRuntime: true}, + {src: "tsan13.go", needsRuntime: true}, + {src: "tsan14.go", needsRuntime: true}, + } + for _, tc := range cases { + tc := tc + name := strings.TrimSuffix(tc.src, ".go") + t.Run(name, func(t *testing.T) { + t.Parallel() + + dir := newTempDir(t) + defer dir.RemoveAll(t) + + outPath := dir.Join(name) + mustRun(t, config.goCmd("build", "-o", outPath, srcPath(tc.src))) + + cmd := hangProneCmd(outPath) + if tc.needsRuntime { + config.skipIfRuntimeIncompatible(t) + } + // If we don't see halt_on_error, the program + // will only exit non-zero if we call C.exit. + cmd.Env = append(cmd.Environ(), "TSAN_OPTIONS=halt_on_error=1") + mustRun(t, cmd) + }) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/shared_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/shared_test.go new file mode 100644 index 0000000000000000000000000000000000000000..814b9994f824abaa76c25dab477e15f0cedd6504 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/shared_test.go @@ -0,0 +1,1184 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package shared_test + +import ( + "bufio" + "bytes" + "cmd/cgo/internal/cgotest" + "debug/elf" + "encoding/binary" + "flag" + "fmt" + "go/build" + "internal/platform" + "internal/testenv" + "io" + "log" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "testing" + "time" +) + +var globalSkip = func(t testing.TB) {} + +var gopathInstallDir, gorootInstallDir string +var oldGOROOT string + +// This is the smallest set of packages we can link into a shared +// library (runtime/cgo is built implicitly). +var minpkgs = []string{"runtime", "sync/atomic"} +var soname = "libruntime,sync-atomic.so" + +var testX = flag.Bool("testx", false, "if true, pass -x to 'go' subcommands invoked by the test") +var testWork = flag.Bool("testwork", false, "if true, log and do not delete the temporary working directory") + +// run runs a command and calls t.Errorf if it fails. +func run(t *testing.T, msg string, args ...string) { + runWithEnv(t, msg, nil, args...) +} + +// runWithEnv runs a command under the given environment and calls t.Errorf if it fails. +func runWithEnv(t *testing.T, msg string, env []string, args ...string) { + c := exec.Command(args[0], args[1:]...) + if len(env) != 0 { + c.Env = append(os.Environ(), env...) + } + if output, err := c.CombinedOutput(); err != nil { + t.Errorf("executing %s (%s) failed %s:\n%s", strings.Join(args, " "), msg, err, output) + } +} + +// goCmd invokes the go tool with the installsuffix set up by TestMain. It calls +// t.Fatalf if the command fails. +func goCmd(t *testing.T, args ...string) string { + newargs := []string{args[0]} + if *testX && args[0] != "env" { + newargs = append(newargs, "-x", "-ldflags=-v") + } + newargs = append(newargs, args[1:]...) + c := exec.Command(filepath.Join(oldGOROOT, "bin", "go"), newargs...) + stderr := new(strings.Builder) + c.Stderr = stderr + + if testing.Verbose() && t == nil { + fmt.Fprintf(os.Stderr, "+ go %s\n", strings.Join(args, " ")) + c.Stderr = os.Stderr + } + output, err := c.Output() + + if err != nil { + if t != nil { + t.Helper() + t.Fatalf("executing %s failed %v:\n%s", strings.Join(c.Args, " "), err, stderr) + } else { + // Panic instead of using log.Fatalf so that deferred cleanup may run in testMain. + log.Panicf("executing %s failed %v:\n%s", strings.Join(c.Args, " "), err, stderr) + } + } + if testing.Verbose() && t != nil { + t.Logf("go %s", strings.Join(args, " ")) + if stderr.Len() > 0 { + t.Logf("%s", stderr) + } + } + return string(bytes.TrimSpace(output)) +} + +// TestMain calls testMain so that the latter can use defer (TestMain exits with os.Exit). +func testMain(m *testing.M) (int, error) { + if testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" { + globalSkip = func(t testing.TB) { t.Skip("short mode and $GO_BUILDER_NAME not set") } + return m.Run(), nil + } + if !platform.BuildModeSupported(runtime.Compiler, "shared", runtime.GOOS, runtime.GOARCH) { + globalSkip = func(t testing.TB) { t.Skip("shared build mode not supported") } + return m.Run(), nil + } + if !testenv.HasCGO() { + globalSkip = testenv.MustHaveCGO + return m.Run(), nil + } + + cwd, err := os.Getwd() + if err != nil { + log.Fatal(err) + } + oldGOROOT = filepath.Join(cwd, "../../../../..") + + workDir, err := os.MkdirTemp("", "shared_test") + if err != nil { + return 0, err + } + if *testWork || testing.Verbose() { + fmt.Printf("+ mkdir -p %s\n", workDir) + } + if !*testWork { + defer os.RemoveAll(workDir) + } + + // -buildmode=shared fundamentally does not work in module mode. + // (It tries to share package dependencies across builds, but in module mode + // each module has its own distinct set of dependency versions.) + // We would like to eliminate it (see https://go.dev/issue/47788), + // but first need to figure out a replacement that covers the small subset + // of use-cases where -buildmode=shared still works today. + // For now, run the tests in GOPATH mode only. + os.Setenv("GO111MODULE", "off") + + // Some tests need to edit the source in GOPATH, so copy this directory to a + // temporary directory and chdir to that. + gopath := filepath.Join(workDir, "gopath") + modRoot, err := cloneTestdataModule(gopath) + if err != nil { + return 0, err + } + if testing.Verbose() { + fmt.Printf("+ export GOPATH=%s\n", gopath) + fmt.Printf("+ cd %s\n", modRoot) + } + os.Setenv("GOPATH", gopath) + // Explicitly override GOBIN as well, in case it was set through a GOENV file. + os.Setenv("GOBIN", filepath.Join(gopath, "bin")) + os.Chdir(modRoot) + os.Setenv("PWD", modRoot) + + // The test also needs to install libraries into GOROOT/pkg, so copy the + // subset of GOROOT that we need. + // + // TODO(golang.org/issue/28553): Rework -buildmode=shared so that it does not + // need to write to GOROOT. + goroot := filepath.Join(workDir, "goroot") + if err := cloneGOROOTDeps(goroot); err != nil { + return 0, err + } + if testing.Verbose() { + fmt.Fprintf(os.Stderr, "+ export GOROOT=%s\n", goroot) + } + os.Setenv("GOROOT", goroot) + + myContext := build.Default + myContext.GOROOT = goroot + myContext.GOPATH = gopath + + // All tests depend on runtime being built into a shared library. Because + // that takes a few seconds, do it here and have all tests use the version + // built here. + goCmd(nil, append([]string{"install", "-buildmode=shared"}, minpkgs...)...) + + shlib := goCmd(nil, "list", "-linkshared", "-f={{.Shlib}}", "runtime") + if shlib != "" { + gorootInstallDir = filepath.Dir(shlib) + } + + myContext.InstallSuffix = "_dynlink" + depP, err := myContext.Import("./depBase", ".", build.ImportComment) + if err != nil { + return 0, fmt.Errorf("import failed: %v", err) + } + if depP.PkgTargetRoot == "" { + gopathInstallDir = filepath.Dir(goCmd(nil, "list", "-buildmode=shared", "-f", "{{.Target}}", "./depBase")) + } else { + gopathInstallDir = filepath.Join(depP.PkgTargetRoot, "testshared") + } + return m.Run(), nil +} + +func TestMain(m *testing.M) { + log.SetFlags(log.Lshortfile) + flag.Parse() + + exitCode, err := testMain(m) + if err != nil { + log.Fatal(err) + } + os.Exit(exitCode) +} + +// cloneTestdataModule clones the packages from src/testshared into gopath. +// It returns the directory within gopath at which the module root is located. +func cloneTestdataModule(gopath string) (string, error) { + modRoot := filepath.Join(gopath, "src", "testshared") + if err := cgotest.OverlayDir(modRoot, "testdata"); err != nil { + return "", err + } + if err := os.WriteFile(filepath.Join(modRoot, "go.mod"), []byte("module testshared\n"), 0644); err != nil { + return "", err + } + return modRoot, nil +} + +// cloneGOROOTDeps copies (or symlinks) the portions of GOROOT/src and +// GOROOT/pkg relevant to this test into the given directory. +// It must be run from within the testdata module. +func cloneGOROOTDeps(goroot string) error { + // Before we clone GOROOT, figure out which packages we need to copy over. + listArgs := []string{ + "list", + "-deps", + "-f", "{{if and .Standard (not .ForTest)}}{{.ImportPath}}{{end}}", + } + stdDeps := goCmd(nil, append(listArgs, minpkgs...)...) + testdataDeps := goCmd(nil, append(listArgs, "-test", "./...")...) + + pkgs := append(strings.Split(strings.TrimSpace(stdDeps), "\n"), + strings.Split(strings.TrimSpace(testdataDeps), "\n")...) + sort.Strings(pkgs) + var pkgRoots []string + for _, pkg := range pkgs { + parentFound := false + for _, prev := range pkgRoots { + if pkg == prev || strings.HasPrefix(pkg, prev+"/") { + // We will copy in the source for pkg when we copy in prev. + parentFound = true + break + } + } + if !parentFound { + pkgRoots = append(pkgRoots, pkg) + } + } + + gorootDirs := []string{ + "pkg/tool", + "pkg/include", + } + for _, pkg := range pkgRoots { + gorootDirs = append(gorootDirs, filepath.Join("src", pkg)) + } + + for _, dir := range gorootDirs { + if testing.Verbose() { + fmt.Fprintf(os.Stderr, "+ cp -r %s %s\n", filepath.Join(oldGOROOT, dir), filepath.Join(goroot, dir)) + } + if err := cgotest.OverlayDir(filepath.Join(goroot, dir), filepath.Join(oldGOROOT, dir)); err != nil { + return err + } + } + + return nil +} + +// The shared library was built at the expected location. +func TestSOBuilt(t *testing.T) { + globalSkip(t) + _, err := os.Stat(filepath.Join(gorootInstallDir, soname)) + if err != nil { + t.Error(err) + } +} + +func hasDynTag(f *elf.File, tag elf.DynTag) bool { + ds := f.SectionByType(elf.SHT_DYNAMIC) + if ds == nil { + return false + } + d, err := ds.Data() + if err != nil { + return false + } + for len(d) > 0 { + var t elf.DynTag + switch f.Class { + case elf.ELFCLASS32: + t = elf.DynTag(f.ByteOrder.Uint32(d[0:4])) + d = d[8:] + case elf.ELFCLASS64: + t = elf.DynTag(f.ByteOrder.Uint64(d[0:8])) + d = d[16:] + } + if t == tag { + return true + } + } + return false +} + +// The shared library does not have relocations against the text segment. +func TestNoTextrel(t *testing.T) { + globalSkip(t) + sopath := filepath.Join(gorootInstallDir, soname) + f, err := elf.Open(sopath) + if err != nil { + t.Fatal("elf.Open failed: ", err) + } + defer f.Close() + if hasDynTag(f, elf.DT_TEXTREL) { + t.Errorf("%s has DT_TEXTREL set", soname) + } +} + +// The shared library does not contain symbols called ".dup" +// (See golang.org/issue/14841.) +func TestNoDupSymbols(t *testing.T) { + globalSkip(t) + sopath := filepath.Join(gorootInstallDir, soname) + f, err := elf.Open(sopath) + if err != nil { + t.Fatal("elf.Open failed: ", err) + } + defer f.Close() + syms, err := f.Symbols() + if err != nil { + t.Errorf("error reading symbols %v", err) + return + } + for _, s := range syms { + if s.Name == ".dup" { + t.Fatalf("%s contains symbol called .dup", sopath) + } + } +} + +// The install command should have created a "shlibname" file for the +// listed packages (and runtime/cgo, and math on arm) indicating the +// name of the shared library containing it. +func TestShlibnameFiles(t *testing.T) { + globalSkip(t) + pkgs := append([]string{}, minpkgs...) + pkgs = append(pkgs, "runtime/cgo") + if runtime.GOARCH == "arm" { + pkgs = append(pkgs, "math") + } + for _, pkg := range pkgs { + shlibnamefile := filepath.Join(gorootInstallDir, pkg+".shlibname") + contentsb, err := os.ReadFile(shlibnamefile) + if err != nil { + t.Errorf("error reading shlibnamefile for %s: %v", pkg, err) + continue + } + contents := strings.TrimSpace(string(contentsb)) + if contents != soname { + t.Errorf("shlibnamefile for %s has wrong contents: %q", pkg, contents) + } + } +} + +// Is a given offset into the file contained in a loaded segment? +func isOffsetLoaded(f *elf.File, offset uint64) bool { + for _, prog := range f.Progs { + if prog.Type == elf.PT_LOAD { + if prog.Off <= offset && offset < prog.Off+prog.Filesz { + return true + } + } + } + return false +} + +func rnd(v int32, r int32) int32 { + if r <= 0 { + return v + } + v += r - 1 + c := v % r + if c < 0 { + c += r + } + v -= c + return v +} + +func readwithpad(r io.Reader, sz int32) ([]byte, error) { + data := make([]byte, rnd(sz, 4)) + _, err := io.ReadFull(r, data) + if err != nil { + return nil, err + } + data = data[:sz] + return data, nil +} + +type note struct { + name string + tag int32 + desc string + section *elf.Section +} + +// Read all notes from f. As ELF section names are not supposed to be special, one +// looks for a particular note by scanning all SHT_NOTE sections looking for a note +// with a particular "name" and "tag". +func readNotes(f *elf.File) ([]*note, error) { + var notes []*note + for _, sect := range f.Sections { + if sect.Type != elf.SHT_NOTE { + continue + } + r := sect.Open() + for { + var namesize, descsize, tag int32 + err := binary.Read(r, f.ByteOrder, &namesize) + if err != nil { + if err == io.EOF { + break + } + return nil, fmt.Errorf("read namesize failed: %v", err) + } + err = binary.Read(r, f.ByteOrder, &descsize) + if err != nil { + return nil, fmt.Errorf("read descsize failed: %v", err) + } + err = binary.Read(r, f.ByteOrder, &tag) + if err != nil { + return nil, fmt.Errorf("read type failed: %v", err) + } + name, err := readwithpad(r, namesize) + if err != nil { + return nil, fmt.Errorf("read name failed: %v", err) + } + desc, err := readwithpad(r, descsize) + if err != nil { + return nil, fmt.Errorf("read desc failed: %v", err) + } + notes = append(notes, ¬e{name: string(name), tag: tag, desc: string(desc), section: sect}) + } + } + return notes, nil +} + +func dynStrings(t *testing.T, path string, flag elf.DynTag) []string { + t.Helper() + f, err := elf.Open(path) + if err != nil { + t.Fatalf("elf.Open(%q) failed: %v", path, err) + } + defer f.Close() + dynstrings, err := f.DynString(flag) + if err != nil { + t.Fatalf("DynString(%s) failed on %s: %v", flag, path, err) + } + return dynstrings +} + +func AssertIsLinkedToRegexp(t *testing.T, path string, re *regexp.Regexp) { + t.Helper() + for _, dynstring := range dynStrings(t, path, elf.DT_NEEDED) { + if re.MatchString(dynstring) { + return + } + } + t.Errorf("%s is not linked to anything matching %v", path, re) +} + +func AssertIsLinkedTo(t *testing.T, path, lib string) { + t.Helper() + AssertIsLinkedToRegexp(t, path, regexp.MustCompile(regexp.QuoteMeta(lib))) +} + +func AssertHasRPath(t *testing.T, path, dir string) { + t.Helper() + for _, tag := range []elf.DynTag{elf.DT_RPATH, elf.DT_RUNPATH} { + for _, dynstring := range dynStrings(t, path, tag) { + for _, rpath := range strings.Split(dynstring, ":") { + if filepath.Clean(rpath) == filepath.Clean(dir) { + return + } + } + } + } + t.Errorf("%s does not have rpath %s", path, dir) +} + +// Build a trivial program that links against the shared runtime and check it runs. +func TestTrivialExecutable(t *testing.T) { + globalSkip(t) + goCmd(t, "install", "-linkshared", "./trivial") + run(t, "trivial executable", "../../bin/trivial") + AssertIsLinkedTo(t, "../../bin/trivial", soname) + AssertHasRPath(t, "../../bin/trivial", gorootInstallDir) + // It is 19K on linux/amd64, with separate-code in binutils ld and 64k being most common alignment + // 4*64k should be enough, but this might need revision eventually. + checkSize(t, "../../bin/trivial", 256000) +} + +// Build a trivial program in PIE mode that links against the shared runtime and check it runs. +func TestTrivialExecutablePIE(t *testing.T) { + globalSkip(t) + goCmd(t, "build", "-buildmode=pie", "-o", "trivial.pie", "-linkshared", "./trivial") + run(t, "trivial executable", "./trivial.pie") + AssertIsLinkedTo(t, "./trivial.pie", soname) + AssertHasRPath(t, "./trivial.pie", gorootInstallDir) + // It is 19K on linux/amd64, with separate-code in binutils ld and 64k being most common alignment + // 4*64k should be enough, but this might need revision eventually. + checkSize(t, "./trivial.pie", 256000) +} + +// Check that the file size does not exceed a limit. +func checkSize(t *testing.T, f string, limit int64) { + fi, err := os.Stat(f) + if err != nil { + t.Fatalf("stat failed: %v", err) + } + if sz := fi.Size(); sz > limit { + t.Errorf("file too large: got %d, want <= %d", sz, limit) + } +} + +// Build a division test program and check it runs. +func TestDivisionExecutable(t *testing.T) { + globalSkip(t) + goCmd(t, "install", "-linkshared", "./division") + run(t, "division executable", "../../bin/division") +} + +// Build an executable that uses cgo linked against the shared runtime and check it +// runs. +func TestCgoExecutable(t *testing.T) { + globalSkip(t) + goCmd(t, "install", "-linkshared", "./execgo") + run(t, "cgo executable", "../../bin/execgo") +} + +func checkPIE(t *testing.T, name string) { + f, err := elf.Open(name) + if err != nil { + t.Fatal("elf.Open failed: ", err) + } + defer f.Close() + if f.Type != elf.ET_DYN { + t.Errorf("%s has type %v, want ET_DYN", name, f.Type) + } + if hasDynTag(f, elf.DT_TEXTREL) { + t.Errorf("%s has DT_TEXTREL set", name) + } +} + +func TestTrivialPIE(t *testing.T) { + if strings.HasSuffix(os.Getenv("GO_BUILDER_NAME"), "-alpine") { + t.Skip("skipping on alpine until issue #54354 resolved") + } + globalSkip(t) + testenv.MustHaveBuildMode(t, "pie") + name := "trivial_pie" + goCmd(t, "build", "-buildmode=pie", "-o="+name, "./trivial") + defer os.Remove(name) + run(t, name, "./"+name) + checkPIE(t, name) +} + +func TestCgoPIE(t *testing.T) { + globalSkip(t) + testenv.MustHaveCGO(t) + testenv.MustHaveBuildMode(t, "pie") + name := "cgo_pie" + goCmd(t, "build", "-buildmode=pie", "-o="+name, "./execgo") + defer os.Remove(name) + run(t, name, "./"+name) + checkPIE(t, name) +} + +// Build a GOPATH package into a shared library that links against the goroot runtime +// and an executable that links against both. +func TestGopathShlib(t *testing.T) { + globalSkip(t) + goCmd(t, "install", "-buildmode=shared", "-linkshared", "./depBase") + shlib := goCmd(t, "list", "-f", "{{.Shlib}}", "-buildmode=shared", "-linkshared", "./depBase") + AssertIsLinkedTo(t, shlib, soname) + goCmd(t, "install", "-linkshared", "./exe") + AssertIsLinkedTo(t, "../../bin/exe", soname) + AssertIsLinkedTo(t, "../../bin/exe", filepath.Base(shlib)) + AssertHasRPath(t, "../../bin/exe", gorootInstallDir) + AssertHasRPath(t, "../../bin/exe", filepath.Dir(gopathInstallDir)) + // And check it runs. + run(t, "executable linked to GOPATH library", "../../bin/exe") +} + +// The shared library contains a note listing the packages it contains in a section +// that is not mapped into memory. +func testPkgListNote(t *testing.T, f *elf.File, note *note) { + if note.section.Flags != 0 { + t.Errorf("package list section has flags %v, want 0", note.section.Flags) + } + if isOffsetLoaded(f, note.section.Offset) { + t.Errorf("package list section contained in PT_LOAD segment") + } + if note.desc != "testshared/depBase\n" { + t.Errorf("incorrect package list %q, want %q", note.desc, "testshared/depBase\n") + } +} + +// The shared library contains a note containing the ABI hash that is mapped into +// memory and there is a local symbol called go.link.abihashbytes that points 16 +// bytes into it. +func testABIHashNote(t *testing.T, f *elf.File, note *note) { + if note.section.Flags != elf.SHF_ALLOC { + t.Errorf("abi hash section has flags %v, want SHF_ALLOC", note.section.Flags) + } + if !isOffsetLoaded(f, note.section.Offset) { + t.Errorf("abihash section not contained in PT_LOAD segment") + } + var hashbytes elf.Symbol + symbols, err := f.Symbols() + if err != nil { + t.Errorf("error reading symbols %v", err) + return + } + for _, sym := range symbols { + if sym.Name == "go:link.abihashbytes" { + hashbytes = sym + } + } + if hashbytes.Name == "" { + t.Errorf("no symbol called go:link.abihashbytes") + return + } + if elf.ST_BIND(hashbytes.Info) != elf.STB_LOCAL { + t.Errorf("%s has incorrect binding %v, want STB_LOCAL", hashbytes.Name, elf.ST_BIND(hashbytes.Info)) + } + if f.Sections[hashbytes.Section] != note.section { + t.Errorf("%s has incorrect section %v, want %s", hashbytes.Name, f.Sections[hashbytes.Section].Name, note.section.Name) + } + if hashbytes.Value-note.section.Addr != 16 { + t.Errorf("%s has incorrect offset into section %d, want 16", hashbytes.Name, hashbytes.Value-note.section.Addr) + } +} + +// A Go shared library contains a note indicating which other Go shared libraries it +// was linked against in an unmapped section. +func testDepsNote(t *testing.T, f *elf.File, note *note) { + if note.section.Flags != 0 { + t.Errorf("package list section has flags %v, want 0", note.section.Flags) + } + if isOffsetLoaded(f, note.section.Offset) { + t.Errorf("package list section contained in PT_LOAD segment") + } + // libdepBase.so just links against the lib containing the runtime. + if note.desc != soname { + t.Errorf("incorrect dependency list %q, want %q", note.desc, soname) + } +} + +// The shared library contains notes with defined contents; see above. +func TestNotes(t *testing.T) { + globalSkip(t) + goCmd(t, "install", "-buildmode=shared", "-linkshared", "./depBase") + shlib := goCmd(t, "list", "-f", "{{.Shlib}}", "-buildmode=shared", "-linkshared", "./depBase") + f, err := elf.Open(shlib) + if err != nil { + t.Fatal(err) + } + defer f.Close() + notes, err := readNotes(f) + if err != nil { + t.Fatal(err) + } + pkgListNoteFound := false + abiHashNoteFound := false + depsNoteFound := false + for _, note := range notes { + if note.name != "Go\x00\x00" { + continue + } + switch note.tag { + case 1: // ELF_NOTE_GOPKGLIST_TAG + if pkgListNoteFound { + t.Error("multiple package list notes") + } + testPkgListNote(t, f, note) + pkgListNoteFound = true + case 2: // ELF_NOTE_GOABIHASH_TAG + if abiHashNoteFound { + t.Error("multiple abi hash notes") + } + testABIHashNote(t, f, note) + abiHashNoteFound = true + case 3: // ELF_NOTE_GODEPS_TAG + if depsNoteFound { + t.Error("multiple dependency list notes") + } + testDepsNote(t, f, note) + depsNoteFound = true + } + } + if !pkgListNoteFound { + t.Error("package list note not found") + } + if !abiHashNoteFound { + t.Error("abi hash note not found") + } + if !depsNoteFound { + t.Error("deps note not found") + } +} + +// Build a GOPATH package (depBase) into a shared library that links against the goroot +// runtime, another package (dep2) that links against the first, and an +// executable that links against dep2. +func TestTwoGopathShlibs(t *testing.T) { + globalSkip(t) + goCmd(t, "install", "-buildmode=shared", "-linkshared", "./depBase") + goCmd(t, "install", "-buildmode=shared", "-linkshared", "./dep2") + goCmd(t, "install", "-linkshared", "./exe2") + run(t, "executable linked to GOPATH library", "../../bin/exe2") +} + +func TestThreeGopathShlibs(t *testing.T) { + globalSkip(t) + goCmd(t, "install", "-buildmode=shared", "-linkshared", "./depBase") + goCmd(t, "install", "-buildmode=shared", "-linkshared", "./dep2") + goCmd(t, "install", "-buildmode=shared", "-linkshared", "./dep3") + goCmd(t, "install", "-linkshared", "./exe3") + run(t, "executable linked to GOPATH library", "../../bin/exe3") +} + +// If gccgo is not available or not new enough, call t.Skip. +func requireGccgo(t *testing.T) { + t.Helper() + + if runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" { + t.Skip("gccgo test skipped on PPC64 until issue #60798 is resolved") + } + + gccgoName := os.Getenv("GCCGO") + if gccgoName == "" { + gccgoName = "gccgo" + } + gccgoPath, err := exec.LookPath(gccgoName) + if err != nil { + t.Skip("gccgo not found") + } + cmd := exec.Command(gccgoPath, "-dumpversion") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("%s -dumpversion failed: %v\n%s", gccgoPath, err, output) + } + dot := bytes.Index(output, []byte{'.'}) + if dot > 0 { + output = output[:dot] + } + major, err := strconv.Atoi(strings.TrimSpace(string(output))) + if err != nil { + t.Skipf("can't parse gccgo version number %s", output) + } + if major < 5 { + t.Skipf("gccgo too old (%s)", strings.TrimSpace(string(output))) + } + + gomod, err := exec.Command("go", "env", "GOMOD").Output() + if err != nil { + t.Fatalf("go env GOMOD: %v", err) + } + if len(bytes.TrimSpace(gomod)) > 0 { + t.Skipf("gccgo not supported in module mode; see golang.org/issue/30344") + } +} + +// Build a GOPATH package into a shared library with gccgo and an executable that +// links against it. +func TestGoPathShlibGccgo(t *testing.T) { + globalSkip(t) + requireGccgo(t) + + libgoRE := regexp.MustCompile("libgo.so.[0-9]+") + + goCmd(t, "install", "-compiler=gccgo", "-buildmode=shared", "-linkshared", "./depBase") + + // Run 'go list' after 'go install': with gccgo, we apparently don't know the + // shlib location until after we've installed it. + shlib := goCmd(t, "list", "-compiler=gccgo", "-buildmode=shared", "-linkshared", "-f", "{{.Shlib}}", "./depBase") + + AssertIsLinkedToRegexp(t, shlib, libgoRE) + goCmd(t, "install", "-compiler=gccgo", "-linkshared", "./exe") + AssertIsLinkedToRegexp(t, "../../bin/exe", libgoRE) + AssertIsLinkedTo(t, "../../bin/exe", filepath.Base(shlib)) + AssertHasRPath(t, "../../bin/exe", filepath.Dir(shlib)) + // And check it runs. + run(t, "gccgo-built", "../../bin/exe") +} + +// The gccgo version of TestTwoGopathShlibs: build a GOPATH package into a shared +// library with gccgo, another GOPATH package that depends on the first and an +// executable that links the second library. +func TestTwoGopathShlibsGccgo(t *testing.T) { + globalSkip(t) + requireGccgo(t) + + libgoRE := regexp.MustCompile("libgo.so.[0-9]+") + + goCmd(t, "install", "-compiler=gccgo", "-buildmode=shared", "-linkshared", "./depBase") + goCmd(t, "install", "-compiler=gccgo", "-buildmode=shared", "-linkshared", "./dep2") + goCmd(t, "install", "-compiler=gccgo", "-linkshared", "./exe2") + + // Run 'go list' after 'go install': with gccgo, we apparently don't know the + // shlib location until after we've installed it. + dep2 := goCmd(t, "list", "-compiler=gccgo", "-buildmode=shared", "-linkshared", "-f", "{{.Shlib}}", "./dep2") + depBase := goCmd(t, "list", "-compiler=gccgo", "-buildmode=shared", "-linkshared", "-f", "{{.Shlib}}", "./depBase") + + AssertIsLinkedToRegexp(t, depBase, libgoRE) + AssertIsLinkedToRegexp(t, dep2, libgoRE) + AssertIsLinkedTo(t, dep2, filepath.Base(depBase)) + AssertIsLinkedToRegexp(t, "../../bin/exe2", libgoRE) + AssertIsLinkedTo(t, "../../bin/exe2", filepath.Base(dep2)) + AssertIsLinkedTo(t, "../../bin/exe2", filepath.Base(depBase)) + + // And check it runs. + run(t, "gccgo-built", "../../bin/exe2") +} + +// Testing rebuilding of shared libraries when they are stale is a bit more +// complicated that it seems like it should be. First, we make everything "old": but +// only a few seconds old, or it might be older than gc (or the runtime source) and +// everything will get rebuilt. Then define a timestamp slightly newer than this +// time, which is what we set the mtime to of a file to cause it to be seen as new, +// and finally another slightly even newer one that we can compare files against to +// see if they have been rebuilt. +var oldTime = time.Now().Add(-9 * time.Second) +var nearlyNew = time.Now().Add(-6 * time.Second) +var stampTime = time.Now().Add(-3 * time.Second) + +// resetFileStamps makes "everything" (bin, src, pkg from GOPATH and the +// test-specific parts of GOROOT) appear old. +func resetFileStamps() { + chtime := func(path string, info os.FileInfo, err error) error { + return os.Chtimes(path, oldTime, oldTime) + } + reset := func(path string) { + if err := filepath.Walk(path, chtime); err != nil { + log.Panicf("resetFileStamps failed: %v", err) + } + + } + reset("../../bin") + reset("../../pkg") + reset("../../src") + reset(gorootInstallDir) +} + +// touch changes path and returns a function that changes it back. +// It also sets the time of the file, so that we can see if it is rewritten. +func touch(t *testing.T, path string) (cleanup func()) { + t.Helper() + data, err := os.ReadFile(path) + if err != nil { + t.Fatal(err) + } + old := make([]byte, len(data)) + copy(old, data) + if bytes.HasPrefix(data, []byte("!\n")) { + // Change last digit of build ID. + // (Content ID in the new content-based build IDs.) + const marker = `build id "` + i := bytes.Index(data, []byte(marker)) + if i < 0 { + t.Fatal("cannot find build id in archive") + } + j := bytes.IndexByte(data[i+len(marker):], '"') + if j < 0 { + t.Fatal("cannot find build id in archive") + } + i += len(marker) + j - 1 + if data[i] == 'a' { + data[i] = 'b' + } else { + data[i] = 'a' + } + } else { + // assume it's a text file + data = append(data, '\n') + } + + // If the file is still a symlink from an overlay, delete it so that we will + // replace it with a regular file instead of overwriting the symlinked one. + fi, err := os.Lstat(path) + if err == nil && !fi.Mode().IsRegular() { + fi, err = os.Stat(path) + if err := os.Remove(path); err != nil { + t.Fatal(err) + } + } + if err != nil { + t.Fatal(err) + } + + // If we're replacing a symlink to a read-only file, make the new file + // user-writable. + perm := fi.Mode().Perm() | 0200 + + if err := os.WriteFile(path, data, perm); err != nil { + t.Fatal(err) + } + if err := os.Chtimes(path, nearlyNew, nearlyNew); err != nil { + t.Fatal(err) + } + return func() { + if err := os.WriteFile(path, old, perm); err != nil { + t.Fatal(err) + } + } +} + +// isNew returns if the path is newer than the time stamp used by touch. +func isNew(t *testing.T, path string) bool { + t.Helper() + fi, err := os.Stat(path) + if err != nil { + t.Fatal(err) + } + return fi.ModTime().After(stampTime) +} + +// Fail unless path has been rebuilt (i.e. is newer than the time stamp used by +// isNew) +func AssertRebuilt(t *testing.T, msg, path string) { + t.Helper() + if !isNew(t, path) { + t.Errorf("%s was not rebuilt (%s)", msg, path) + } +} + +// Fail if path has been rebuilt (i.e. is newer than the time stamp used by isNew) +func AssertNotRebuilt(t *testing.T, msg, path string) { + t.Helper() + if isNew(t, path) { + t.Errorf("%s was rebuilt (%s)", msg, path) + } +} + +func TestRebuilding(t *testing.T) { + globalSkip(t) + goCmd(t, "install", "-buildmode=shared", "-linkshared", "./depBase") + goCmd(t, "install", "-linkshared", "./exe") + info := strings.Fields(goCmd(t, "list", "-buildmode=shared", "-linkshared", "-f", "{{.Target}} {{.Shlib}}", "./depBase")) + if len(info) != 2 { + t.Fatalf("go list failed to report Target and/or Shlib") + } + target := info[0] + shlib := info[1] + + // If the source is newer than both the .a file and the .so, both are rebuilt. + t.Run("newsource", func(t *testing.T) { + resetFileStamps() + cleanup := touch(t, "./depBase/dep.go") + defer func() { + cleanup() + goCmd(t, "install", "-linkshared", "./exe") + }() + goCmd(t, "install", "-linkshared", "./exe") + AssertRebuilt(t, "new source", target) + AssertRebuilt(t, "new source", shlib) + }) + + // If the .a file is newer than the .so, the .so is rebuilt (but not the .a) + t.Run("newarchive", func(t *testing.T) { + resetFileStamps() + AssertNotRebuilt(t, "new .a file before build", target) + goCmd(t, "list", "-linkshared", "-f={{.ImportPath}} {{.Stale}} {{.StaleReason}} {{.Target}}", "./depBase") + AssertNotRebuilt(t, "new .a file before build", target) + cleanup := touch(t, target) + defer func() { + cleanup() + goCmd(t, "install", "-v", "-linkshared", "./exe") + }() + goCmd(t, "install", "-v", "-linkshared", "./exe") + AssertNotRebuilt(t, "new .a file", target) + AssertRebuilt(t, "new .a file", shlib) + }) +} + +func appendFile(t *testing.T, path, content string) { + t.Helper() + f, err := os.OpenFile(path, os.O_WRONLY|os.O_APPEND, 0660) + if err != nil { + t.Fatalf("os.OpenFile failed: %v", err) + } + defer func() { + err := f.Close() + if err != nil { + t.Fatalf("f.Close failed: %v", err) + } + }() + _, err = f.WriteString(content) + if err != nil { + t.Fatalf("f.WriteString failed: %v", err) + } +} + +func createFile(t *testing.T, path, content string) { + t.Helper() + f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0644) + if err != nil { + t.Fatalf("os.OpenFile failed: %v", err) + } + _, err = f.WriteString(content) + if closeErr := f.Close(); err == nil { + err = closeErr + } + if err != nil { + t.Fatalf("WriteString failed: %v", err) + } +} + +func TestABIChecking(t *testing.T) { + globalSkip(t) + goCmd(t, "install", "-buildmode=shared", "-linkshared", "./depBase") + goCmd(t, "install", "-linkshared", "./exe") + + // If we make an ABI-breaking change to depBase and rebuild libp.so but not exe, + // exe will abort with a complaint on startup. + // This assumes adding an exported function breaks ABI, which is not true in + // some senses but suffices for the narrow definition of ABI compatibility the + // toolchain uses today. + resetFileStamps() + + createFile(t, "./depBase/break.go", "package depBase\nfunc ABIBreak() {}\n") + defer os.Remove("./depBase/break.go") + + goCmd(t, "install", "-buildmode=shared", "-linkshared", "./depBase") + c := exec.Command("../../bin/exe") + output, err := c.CombinedOutput() + if err == nil { + t.Fatal("executing exe did not fail after ABI break") + } + scanner := bufio.NewScanner(bytes.NewReader(output)) + foundMsg := false + const wantPrefix = "abi mismatch detected between the executable and lib" + for scanner.Scan() { + if strings.HasPrefix(scanner.Text(), wantPrefix) { + foundMsg = true + break + } + } + if err = scanner.Err(); err != nil { + t.Errorf("scanner encountered error: %v", err) + } + if !foundMsg { + t.Fatalf("exe failed, but without line %q; got output:\n%s", wantPrefix, output) + } + + // Rebuilding exe makes it work again. + goCmd(t, "install", "-linkshared", "./exe") + run(t, "rebuilt exe", "../../bin/exe") + + // If we make a change which does not break ABI (such as adding an unexported + // function) and rebuild libdepBase.so, exe still works, even if new function + // is in a file by itself. + resetFileStamps() + createFile(t, "./depBase/dep2.go", "package depBase\nfunc noABIBreak() {}\n") + goCmd(t, "install", "-buildmode=shared", "-linkshared", "./depBase") + run(t, "after non-ABI breaking change", "../../bin/exe") +} + +// If a package 'explicit' imports a package 'implicit', building +// 'explicit' into a shared library implicitly includes implicit in +// the shared library. Building an executable that imports both +// explicit and implicit builds the code from implicit into the +// executable rather than fetching it from the shared library. The +// link still succeeds and the executable still runs though. +func TestImplicitInclusion(t *testing.T) { + globalSkip(t) + goCmd(t, "install", "-buildmode=shared", "-linkshared", "./explicit") + goCmd(t, "install", "-linkshared", "./implicitcmd") + run(t, "running executable linked against library that contains same package as it", "../../bin/implicitcmd") +} + +// Tests to make sure that the type fields of empty interfaces and itab +// fields of nonempty interfaces are unique even across modules, +// so that interface equality works correctly. +func TestInterface(t *testing.T) { + globalSkip(t) + goCmd(t, "install", "-buildmode=shared", "-linkshared", "./iface_a") + // Note: iface_i gets installed implicitly as a dependency of iface_a. + goCmd(t, "install", "-buildmode=shared", "-linkshared", "./iface_b") + goCmd(t, "install", "-linkshared", "./iface") + run(t, "running type/itab uniqueness tester", "../../bin/iface") +} + +// Access a global variable from a library. +func TestGlobal(t *testing.T) { + globalSkip(t) + goCmd(t, "install", "-buildmode=shared", "-linkshared", "./globallib") + goCmd(t, "install", "-linkshared", "./global") + run(t, "global executable", "../../bin/global") + AssertIsLinkedTo(t, "../../bin/global", soname) + AssertHasRPath(t, "../../bin/global", gorootInstallDir) +} + +// Run a test using -linkshared of an installed shared package. +// Issue 26400. +func TestTestInstalledShared(t *testing.T) { + globalSkip(t) + goCmd(t, "test", "-linkshared", "-test.short", "sync/atomic") +} + +// Test generated pointer method with -linkshared. +// Issue 25065. +func TestGeneratedMethod(t *testing.T) { + globalSkip(t) + goCmd(t, "install", "-buildmode=shared", "-linkshared", "./issue25065") +} + +// Test use of shared library struct with generated hash function. +// Issue 30768. +func TestGeneratedHash(t *testing.T) { + globalSkip(t) + goCmd(t, "install", "-buildmode=shared", "-linkshared", "./issue30768/issue30768lib") + goCmd(t, "test", "-linkshared", "./issue30768") +} + +// Test that packages can be added not in dependency order (here a depends on b, and a adds +// before b). This could happen with e.g. go build -buildmode=shared std. See issue 39777. +func TestPackageOrder(t *testing.T) { + globalSkip(t) + goCmd(t, "install", "-buildmode=shared", "-linkshared", "./issue39777/a", "./issue39777/b") +} + +// Test that GC data are generated correctly by the linker when it needs a type defined in +// a shared library. See issue 39927. +func TestGCData(t *testing.T) { + globalSkip(t) + goCmd(t, "install", "-buildmode=shared", "-linkshared", "./gcdata/p") + goCmd(t, "build", "-linkshared", "./gcdata/main") + runWithEnv(t, "running gcdata/main", []string{"GODEBUG=clobberfree=1"}, "./main") +} + +// Test that we don't decode type symbols from shared libraries (which has no data, +// causing panic). See issue 44031. +func TestIssue44031(t *testing.T) { + globalSkip(t) + goCmd(t, "install", "-buildmode=shared", "-linkshared", "./issue44031/a") + goCmd(t, "install", "-buildmode=shared", "-linkshared", "./issue44031/b") + goCmd(t, "run", "-linkshared", "./issue44031/main") +} + +// Test that we use a variable from shared libraries (which implement an +// interface in shared libraries.). A weak reference is used in the itab +// in main process. It can cause unreachable panic. See issue 47873. +func TestIssue47873(t *testing.T) { + globalSkip(t) + goCmd(t, "install", "-buildmode=shared", "-linkshared", "./issue47837/a") + goCmd(t, "run", "-linkshared", "./issue47837/main") +} + +func TestIssue62277(t *testing.T) { + globalSkip(t) + goCmd(t, "install", "-buildmode=shared", "-linkshared", "./issue62277/p") + goCmd(t, "test", "-linkshared", "./issue62277") +} + +// Test that we can build std in shared mode. +func TestStd(t *testing.T) { + if testing.Short() { + t.Skip("skip in short mode") + } + globalSkip(t) + t.Parallel() + tmpDir := t.TempDir() + // Use a temporary pkgdir to not interfere with other tests, and not write to GOROOT. + // Cannot use goCmd as it runs with cloned GOROOT which is incomplete. + runWithEnv(t, "building std", []string{"GOROOT=" + oldGOROOT}, + filepath.Join(oldGOROOT, "bin", "go"), "install", "-buildmode=shared", "-pkgdir="+tmpDir, "std") + + // Issue #58966. + runWithEnv(t, "testing issue #58966", []string{"GOROOT=" + oldGOROOT}, + filepath.Join(oldGOROOT, "bin", "go"), "run", "-linkshared", "-pkgdir="+tmpDir, "./issue58966/main.go") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/dep2/dep2.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/dep2/dep2.go new file mode 100644 index 0000000000000000000000000000000000000000..18d774b5fcb07ccb8b9eae444191963a8c20a5e3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/dep2/dep2.go @@ -0,0 +1,21 @@ +package dep2 + +import "testshared/depBase" + +func init() { + if !depBase.Initialized { + panic("depBase not initialized") + } +} + +var W int = 1 + +var hasProg depBase.HasProg + +type Dep2 struct { + depBase.Dep +} + +func G() int { + return depBase.F() + 1 +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/dep3/dep3.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/dep3/dep3.go new file mode 100644 index 0000000000000000000000000000000000000000..6b02ad2ee5b97dae33373421c6bd8c3b8f616623 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/dep3/dep3.go @@ -0,0 +1,22 @@ +package dep3 + +// The point of this test file is that it references a type from +// depBase that is also referenced in dep2, but dep2 is loaded by the +// linker before depBase (because it is earlier in the import list). +// There was a bug in the linker where it would not correctly read out +// the type data in this case and later crash. + +import ( + "testshared/dep2" + "testshared/depBase" +) + +type Dep3 struct { + dep depBase.Dep + dep2 dep2.Dep2 +} + +func D3() int { + var x Dep3 + return x.dep.X + x.dep2.X +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/depBase/asm.s b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/depBase/asm.s new file mode 100644 index 0000000000000000000000000000000000000000..51adca3a338d1877a863744c752242cf675e79cc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/depBase/asm.s @@ -0,0 +1,10 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +#include "textflag.h" + +TEXT ·ImplementedInAsm(SB),NOSPLIT,$0-0 + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/depBase/dep.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/depBase/dep.go new file mode 100644 index 0000000000000000000000000000000000000000..a143fe2ff1604599bf2568f4b1914647969ae50d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/depBase/dep.go @@ -0,0 +1,53 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package depBase + +import ( + "os" + "reflect" + + "testshared/depBaseInternal" +) + +// Issue 61973: indirect dependencies are not initialized. +func init() { + if !depBaseInternal.Initialized { + panic("depBaseInternal not initialized") + } + if os.Stdout == nil { + panic("os.Stdout is nil") + } + + Initialized = true +} + +var Initialized bool + +var SlicePtr interface{} = &[]int{} + +var V int = 1 + +var HasMask []string = []string{"hi"} + +type HasProg struct { + array [1024]*byte +} + +type Dep struct { + X int +} + +func (d *Dep) Method() int { + // This code below causes various go.itab.* symbols to be generated in + // the shared library. Similar code in ../exe/exe.go results in + // exercising https://golang.org/issues/17594 + reflect.TypeOf(os.Stdout).Elem() + return 10 +} + +func F() int { + defer func() {}() + return V +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/depBase/gccgo.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/depBase/gccgo.go new file mode 100644 index 0000000000000000000000000000000000000000..a59d0b8c3ff56baf9a8eb0a02df9a31fca43c2f0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/depBase/gccgo.go @@ -0,0 +1,9 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gccgo + +package depBase + +func ImplementedInAsm() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/depBase/stubs.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/depBase/stubs.go new file mode 100644 index 0000000000000000000000000000000000000000..c15e4e9a7445259ef638913dd5403ed9b2d4f59f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/depBase/stubs.go @@ -0,0 +1,9 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +package depBase + +func ImplementedInAsm() diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/depBaseInternal/dep.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/depBaseInternal/dep.go new file mode 100644 index 0000000000000000000000000000000000000000..906bff09c40c72730cacdf86cb5e8d5ecb8d8d05 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/depBaseInternal/dep.go @@ -0,0 +1,13 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// depBaseInternal is only imported by depBase. + +package depBaseInternal + +var Initialized bool + +func init() { + Initialized = true +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/division/division.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/division/division.go new file mode 100644 index 0000000000000000000000000000000000000000..bb5fc984602f72f19110e44eea505b15ac3917b8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/division/division.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +//go:noinline +func div(x, y uint32) uint32 { + return x / y +} + +func main() { + a := div(97, 11) + if a != 8 { + panic("FAIL") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/exe/exe.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/exe/exe.go new file mode 100644 index 0000000000000000000000000000000000000000..ee95f97bc99edbe6b946b18043c6d45c734fcebb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/exe/exe.go @@ -0,0 +1,45 @@ +package main + +import ( + "os" + "reflect" + "runtime" + + "testshared/depBase" +) + +// Having a function declared in the main package triggered +// golang.org/issue/18250 +func DeclaredInMain() { +} + +type C struct { +} + +func F() *C { + return nil +} + +var slicePtr interface{} = &[]int{} + +func main() { + defer depBase.ImplementedInAsm() + // This code below causes various go.itab.* symbols to be generated in + // the executable. Similar code in ../depBase/dep.go results in + // exercising https://golang.org/issues/17594 + reflect.TypeOf(os.Stdout).Elem() + runtime.GC() + depBase.V = depBase.F() + 1 + + var c *C + if reflect.TypeOf(F).Out(0) != reflect.TypeOf(c) { + panic("bad reflection results, see golang.org/issue/18252") + } + + sp := reflect.New(reflect.TypeOf(slicePtr).Elem()) + s := sp.Interface() + + if reflect.TypeOf(s) != reflect.TypeOf(slicePtr) { + panic("bad reflection results, see golang.org/issue/18729") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/exe2/exe2.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/exe2/exe2.go new file mode 100644 index 0000000000000000000000000000000000000000..433f331e369de6e4cf09b7fec1685d09c054cc99 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/exe2/exe2.go @@ -0,0 +1,8 @@ +package main + +import "testshared/dep2" + +func main() { + d := &dep2.Dep2{} + dep2.W = dep2.G() + 1 + d.Method() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/exe3/exe3.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/exe3/exe3.go new file mode 100644 index 0000000000000000000000000000000000000000..533e3a9e3dd1c051659e681f8233b0885effce0d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/exe3/exe3.go @@ -0,0 +1,7 @@ +package main + +import "testshared/dep3" + +func main() { + dep3.D3() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/execgo/exe.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/execgo/exe.go new file mode 100644 index 0000000000000000000000000000000000000000..0427be8bdfdfcb18d00ffe0b0e69a674a091aa62 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/execgo/exe.go @@ -0,0 +1,8 @@ +package main + +/* + */ +import "C" + +func main() { +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/explicit/explicit.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/explicit/explicit.go new file mode 100644 index 0000000000000000000000000000000000000000..af969fcb23a8febb5827916cc2967add8ede7187 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/explicit/explicit.go @@ -0,0 +1,9 @@ +package explicit + +import ( + "testshared/implicit" +) + +func E() int { + return implicit.I() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/gcdata/main/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/gcdata/main/main.go new file mode 100644 index 0000000000000000000000000000000000000000..394862fd94cbf208858972f3f340d5db4c23bc40 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/gcdata/main/main.go @@ -0,0 +1,37 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that GC data is generated correctly for global +// variables with types defined in a shared library. +// See issue 39927. + +// This test run under GODEBUG=clobberfree=1. The check +// *x[i] == 12345 depends on this debug mode to clobber +// the value if the object is freed prematurely. + +package main + +import ( + "fmt" + "runtime" + "testshared/gcdata/p" +) + +var x p.T + +func main() { + for i := range x { + x[i] = new(int) + *x[i] = 12345 + } + runtime.GC() + runtime.GC() + runtime.GC() + for i := range x { + if *x[i] != 12345 { + fmt.Printf("x[%d] == %d, want 12345\n", i, *x[i]) + panic("FAIL") + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/gcdata/p/p.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/gcdata/p/p.go new file mode 100644 index 0000000000000000000000000000000000000000..1fee75429efe9da9af62708bfec739698bed32a9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/gcdata/p/p.go @@ -0,0 +1,7 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +type T [10]*int diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/global/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/global/main.go new file mode 100644 index 0000000000000000000000000000000000000000..f43e7c3fb3559f59854ad02f75b33c1c2e1bb56d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/global/main.go @@ -0,0 +1,71 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "testshared/globallib" +) + +//go:noinline +func testLoop() { + for i, s := range globallib.Data { + if s != int64(i) { + panic("testLoop: mismatch") + } + } +} + +//go:noinline +func ptrData() *[1<<20 + 10]int64 { + return &globallib.Data +} + +//go:noinline +func testMediumOffset() { + for i, s := range globallib.Data[1<<16-2:] { + if s != int64(i)+1<<16-2 { + panic("testMediumOffset: index mismatch") + } + } + + x := globallib.Data[1<<16-1] + if x != 1<<16-1 { + panic("testMediumOffset: direct mismatch") + } + + y := &globallib.Data[1<<16-3] + if y != &ptrData()[1<<16-3] { + panic("testMediumOffset: address mismatch") + } +} + +//go:noinline +func testLargeOffset() { + for i, s := range globallib.Data[1<<20:] { + if s != int64(i)+1<<20 { + panic("testLargeOffset: index mismatch") + } + } + + x := globallib.Data[1<<20+1] + if x != 1<<20+1 { + panic("testLargeOffset: direct mismatch") + } + + y := &globallib.Data[1<<20+2] + if y != &ptrData()[1<<20+2] { + panic("testLargeOffset: address mismatch") + } +} + +func main() { + testLoop() + + // SSA rules commonly merge offsets into addresses. These + // tests access global data in different ways to try + // and exercise different SSA rules. + testMediumOffset() + testLargeOffset() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/globallib/global.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/globallib/global.go new file mode 100644 index 0000000000000000000000000000000000000000..b4372a2e9e29eb5c90ac4c8cb3549d43280109c1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/globallib/global.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package globallib + +// Data is large enough to that offsets into it do not fit into +// 16-bit or 20-bit immediates. Ideally we'd also try and overrun +// 32-bit immediates, but that requires the test machine to have +// too much memory. +var Data [1<<20 + 10]int64 + +func init() { + for i := range Data { + Data[i] = int64(i) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/iface/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/iface/main.go new file mode 100644 index 0000000000000000000000000000000000000000..d26ebbcc9cc1c466f1b4fb5918a4707480e25969 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/iface/main.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "testshared/iface_a" +import "testshared/iface_b" + +func main() { + if iface_a.F() != iface_b.F() { + panic("empty interfaces not equal") + } + if iface_a.G() != iface_b.G() { + panic("non-empty interfaces not equal") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/iface_a/a.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/iface_a/a.go new file mode 100644 index 0000000000000000000000000000000000000000..e2cef1ecda6313a827b0708f9be672d1f3db2347 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/iface_a/a.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package iface_a + +import "testshared/iface_i" + +//go:noinline +func F() interface{} { + return (*iface_i.T)(nil) +} + +//go:noinline +func G() iface_i.I { + return (*iface_i.T)(nil) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/iface_b/b.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/iface_b/b.go new file mode 100644 index 0000000000000000000000000000000000000000..dd3e027b37a3bd2037ade0c18bf5de9bfe04e0d0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/iface_b/b.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package iface_b + +import "testshared/iface_i" + +//go:noinline +func F() interface{} { + return (*iface_i.T)(nil) +} + +//go:noinline +func G() iface_i.I { + return (*iface_i.T)(nil) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/iface_i/i.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/iface_i/i.go new file mode 100644 index 0000000000000000000000000000000000000000..31c80387c7e56c752e422fb73e81592d1a34c87b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/iface_i/i.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package iface_i + +type I interface { + M() +} + +type T struct { +} + +func (t *T) M() { +} + +// *T implements I diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/implicit/implicit.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/implicit/implicit.go new file mode 100644 index 0000000000000000000000000000000000000000..5360188c562386fb1fae175817838fb27a045f5f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/implicit/implicit.go @@ -0,0 +1,5 @@ +package implicit + +func I() int { + return 42 +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/implicitcmd/implicitcmd.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/implicitcmd/implicitcmd.go new file mode 100644 index 0000000000000000000000000000000000000000..4d4296738e3c018713d16c1bb570187e42d3e3fb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/implicitcmd/implicitcmd.go @@ -0,0 +1,10 @@ +package main + +import ( + "testshared/explicit" + "testshared/implicit" +) + +func main() { + println(implicit.I() + explicit.E()) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue25065/a.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue25065/a.go new file mode 100644 index 0000000000000000000000000000000000000000..646de4e52faf1901f7bb9c76a415d043d48ad990 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue25065/a.go @@ -0,0 +1,21 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package issue25065 has a type with a method that is +// 1. referenced in a method expression +// 2. not called +// 3. not converted to an interface +// 4. is a value method but the reference is to the pointer method +// +// These cases avoid the call to makefuncsym from typecheckfunc, but we +// still need to call makefuncsym somehow or the symbol will not be defined. +package issue25065 + +type T int + +func (t T) M() {} + +func F() func(*T) { + return (*T).M +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue30768/issue30768lib/lib.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue30768/issue30768lib/lib.go new file mode 100644 index 0000000000000000000000000000000000000000..9e45ebe683552153573a1c603d0e3cc981a0d036 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue30768/issue30768lib/lib.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue30768lib + +// S is a struct that requires a generated hash function. +type S struct { + A string + B int +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue30768/x_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue30768/x_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1bbd139d3ebcde4e52426b1ff061b8a3dedfa00a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue30768/x_test.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue30768_test + +import ( + "testing" + + "testshared/issue30768/issue30768lib" +) + +type s struct { + s issue30768lib.S +} + +func Test30768(t *testing.T) { + // Calling t.Log will convert S to an empty interface, + // which will force a reference to the generated hash function, + // defined in the shared library. + t.Log(s{}) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue39777/a/a.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue39777/a/a.go new file mode 100644 index 0000000000000000000000000000000000000000..c7bf8359514ae7c081492943484a365a2b84c916 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue39777/a/a.go @@ -0,0 +1,9 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package a + +import "testshared/issue39777/b" + +func F() { b.F() } diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue39777/b/b.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue39777/b/b.go new file mode 100644 index 0000000000000000000000000000000000000000..4e681965e68b15e8be6633fb598a56ad2504cf7b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue39777/b/b.go @@ -0,0 +1,7 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package b + +func F() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue44031/a/a.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue44031/a/a.go new file mode 100644 index 0000000000000000000000000000000000000000..48827e682fb60e29eb13a333daf9da9360eb6090 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue44031/a/a.go @@ -0,0 +1,9 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package a + +type ATypeWithALoooooongName interface { // a long name, so the type descriptor symbol name is mangled + M() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue44031/b/b.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue44031/b/b.go new file mode 100644 index 0000000000000000000000000000000000000000..ad3ebec2b988a2c8287cd35412be0e89d4cd7051 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue44031/b/b.go @@ -0,0 +1,17 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package b + +import "testshared/issue44031/a" + +type T int + +func (T) M() {} + +var i = a.ATypeWithALoooooongName(T(0)) + +func F() { + i.M() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue44031/main/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue44031/main/main.go new file mode 100644 index 0000000000000000000000000000000000000000..47f2e3a98e8f61ba1a09682b00f37def4e55302c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue44031/main/main.go @@ -0,0 +1,20 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "testshared/issue44031/b" + +type t int + +func (t) m() {} + +type i interface{ m() } // test that unexported method is correctly marked + +var v interface{} = t(0) + +func main() { + b.F() + v.(i).m() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue47837/a/a.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue47837/a/a.go new file mode 100644 index 0000000000000000000000000000000000000000..68588eda2fa0a90019274e3b448e354c39ceaf6f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue47837/a/a.go @@ -0,0 +1,19 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package a + +type A interface { + M() +} + +//go:noinline +func TheFuncWithArgA(a A) { + a.M() +} + +type ImplA struct{} + +//go:noinline +func (A *ImplA) M() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue47837/main/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue47837/main/main.go new file mode 100644 index 0000000000000000000000000000000000000000..77c6f3437938a48e2b6c22b4c69df99d084e1a61 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue47837/main/main.go @@ -0,0 +1,14 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "testshared/issue47837/a" +) + +func main() { + var vara a.ImplA + a.TheFuncWithArgA(&vara) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue58966/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue58966/main.go new file mode 100644 index 0000000000000000000000000000000000000000..2d923c36076d5f5ffa62fa21d8b40c69b9dd786c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue58966/main.go @@ -0,0 +1,15 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "crypto/elliptic" + +var curve elliptic.Curve + +func main() { + switch curve { + case elliptic.P224(): + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue62277/issue62277_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue62277/issue62277_test.go new file mode 100644 index 0000000000000000000000000000000000000000..89a0601c9b5e6c1821686a316c559a5173765321 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue62277/issue62277_test.go @@ -0,0 +1,16 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue62277_test + +import ( + "testing" + + "testshared/issue62277/p" +) + +func TestIssue62277(t *testing.T) { + t.Log(p.S) + t.Log(p.T) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue62277/p/p.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue62277/p/p.go new file mode 100644 index 0000000000000000000000000000000000000000..97bde0c10f67b7b867906dcf74c1477fe2929d93 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/issue62277/p/p.go @@ -0,0 +1,17 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +var S = func() []string { + return []string{"LD_LIBRARY_PATH"} +}() + +var T []string + +func init() { + T = func() []string { + return []string{"LD_LIBRARY_PATH"} + }() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/trivial/trivial.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/trivial/trivial.go new file mode 100644 index 0000000000000000000000000000000000000000..6ade47ce36fb6ff469a0c5bf05b283b2238f91d7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testshared/testdata/trivial/trivial.go @@ -0,0 +1,9 @@ +package main + +func main() { + // This is enough to make sure that the executable references + // a type descriptor, which was the cause of + // https://golang.org/issue/25970. + c := make(chan int) + _ = c +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/so_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/so_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e011167f388ed3656f909a435c53830e29f61d9b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/so_test.go @@ -0,0 +1,137 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package so_test + +import ( + "cmd/cgo/internal/cgotest" + "internal/testenv" + "log" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" +) + +func TestSO(t *testing.T) { + testSO(t, "so") +} + +func TestSOVar(t *testing.T) { + testSO(t, "sovar") +} + +func testSO(t *testing.T, dir string) { + if runtime.GOOS == "ios" { + t.Skip("iOS disallows dynamic loading of user libraries") + } + testenv.MustHaveGoBuild(t) + testenv.MustHaveExec(t) + testenv.MustHaveCGO(t) + + GOPATH, err := os.MkdirTemp("", "cgosotest") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(GOPATH) + + modRoot := filepath.Join(GOPATH, "src", "cgosotest") + if err := cgotest.OverlayDir(modRoot, filepath.Join("testdata", dir)); err != nil { + log.Panic(err) + } + if err := os.WriteFile(filepath.Join(modRoot, "go.mod"), []byte("module cgosotest\n"), 0666); err != nil { + log.Panic(err) + } + + cmd := exec.Command("go", "env", "CC", "GOGCCFLAGS") + cmd.Dir = modRoot + cmd.Stderr = new(strings.Builder) + cmd.Env = append(os.Environ(), "GOPATH="+GOPATH) + out, err := cmd.Output() + if err != nil { + t.Fatalf("%s: %v\n%s", strings.Join(cmd.Args, " "), err, cmd.Stderr) + } + lines := strings.Split(string(out), "\n") + if len(lines) != 3 || lines[2] != "" { + t.Fatalf("Unexpected output from %s:\n%s", strings.Join(cmd.Args, " "), lines) + } + + cc := lines[0] + if cc == "" { + t.Fatal("CC environment variable (go env CC) cannot be empty") + } + gogccflags := strings.Split(lines[1], " ") + + // build shared object + ext := "so" + args := append(gogccflags, "-shared") + switch runtime.GOOS { + case "darwin", "ios": + ext = "dylib" + args = append(args, "-undefined", "suppress", "-flat_namespace") + case "windows": + ext = "dll" + args = append(args, "-DEXPORT_DLL") + // At least in mingw-clang it is not permitted to just name a .dll + // on the command line. You must name the corresponding import + // library instead, even though the dll is used when the executable is run. + args = append(args, "-Wl,-out-implib,libcgosotest.a") + case "aix": + ext = "so.1" + } + sofname := "libcgosotest." + ext + args = append(args, "-o", sofname, "cgoso_c.c") + + cmd = exec.Command(cc, args...) + cmd.Dir = modRoot + cmd.Env = append(os.Environ(), "GOPATH="+GOPATH) + out, err = cmd.CombinedOutput() + if err != nil { + t.Fatalf("%s: %s\n%s", strings.Join(cmd.Args, " "), err, out) + } + t.Logf("%s:\n%s", strings.Join(cmd.Args, " "), out) + + if runtime.GOOS == "aix" { + // Shared object must be wrapped by an archive + cmd = exec.Command("ar", "-X64", "-q", "libcgosotest.a", "libcgosotest.so.1") + cmd.Dir = modRoot + out, err = cmd.CombinedOutput() + if err != nil { + t.Fatalf("%s: %s\n%s", strings.Join(cmd.Args, " "), err, out) + } + } + + cmd = exec.Command("go", "build", "-o", "main.exe", "main.go") + cmd.Dir = modRoot + cmd.Env = append(os.Environ(), "GOPATH="+GOPATH) + out, err = cmd.CombinedOutput() + if err != nil { + t.Fatalf("%s: %s\n%s", strings.Join(cmd.Args, " "), err, out) + } + t.Logf("%s:\n%s", strings.Join(cmd.Args, " "), out) + + cmd = exec.Command("./main.exe") + cmd.Dir = modRoot + cmd.Env = append(os.Environ(), "GOPATH="+GOPATH) + if runtime.GOOS != "windows" { + s := "LD_LIBRARY_PATH" + if runtime.GOOS == "darwin" || runtime.GOOS == "ios" { + s = "DYLD_LIBRARY_PATH" + } + cmd.Env = append(os.Environ(), s+"=.") + + // On FreeBSD 64-bit architectures, the 32-bit linker looks for + // different environment variables. + if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" { + cmd.Env = append(cmd.Env, "LD_32_LIBRARY_PATH=.") + } + } + out, err = cmd.CombinedOutput() + if err != nil { + t.Fatalf("%s: %s\n%s", strings.Join(cmd.Args, " "), err, out) + } + t.Logf("%s:\n%s", strings.Join(cmd.Args, " "), out) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/testdata/so/cgoso.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/testdata/so/cgoso.c new file mode 100644 index 0000000000000000000000000000000000000000..612e5d335a9809b68c90ac6ec8c54e6e9c6bee48 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/testdata/so/cgoso.c @@ -0,0 +1,14 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "_cgo_export.h" + +#if defined(WIN32) || defined(_AIX) +extern void setCallback(void *); +void init() { + setCallback(goCallback); +} +#else +void init() {} +#endif diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/testdata/so/cgoso.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/testdata/so/cgoso.go new file mode 100644 index 0000000000000000000000000000000000000000..b59b2a8e8b1541f6a2a351b8e716267a5441d13d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/testdata/so/cgoso.go @@ -0,0 +1,32 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgosotest + +/* +// intentionally write the same LDFLAGS differently +// to test correct handling of LDFLAGS. +#cgo linux LDFLAGS: -L. -lcgosotest +#cgo dragonfly LDFLAGS: -L. -l cgosotest +#cgo freebsd LDFLAGS: -L. -l cgosotest +#cgo openbsd LDFLAGS: -L. -l cgosotest +#cgo solaris LDFLAGS: -L. -lcgosotest +#cgo netbsd LDFLAGS: -L. libcgosotest.so +#cgo darwin LDFLAGS: -L. libcgosotest.dylib +#cgo windows LDFLAGS: -L. libcgosotest.a +#cgo aix LDFLAGS: -L. -l cgosotest + +void init(void); +void sofunc(void); +*/ +import "C" + +func Test() { + C.init() + C.sofunc() +} + +//export goCallback +func goCallback() { +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/testdata/so/cgoso_c.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/testdata/so/cgoso_c.c new file mode 100644 index 0000000000000000000000000000000000000000..d5fb559f830f694674290f17b78c5b91cb183f25 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/testdata/so/cgoso_c.c @@ -0,0 +1,39 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +#ifdef WIN32 +// A Windows DLL is unable to call an arbitrary function in +// the main executable. Work around that by making the main +// executable pass the callback function pointer to us. +void (*goCallback)(void); +__declspec(dllexport) void setCallback(void *f) +{ + goCallback = (void (*)())f; +} +__declspec(dllexport) void sofunc(void); +#elif defined(_AIX) +// AIX doesn't allow the creation of a shared object with an +// undefined symbol. It's possible to bypass this problem by +// using -Wl,-G and -Wl,-brtl option which allows run-time linking. +// However, that's not how most of AIX shared object works. +// Therefore, it's better to consider goCallback as a pointer and +// to set up during an init function. +void (*goCallback)(void); +void setCallback(void *f) { goCallback = f; } +#else +extern void goCallback(void); +void setCallback(void *f) { (void)f; } +#endif + +// OpenBSD and older Darwin lack TLS support +#if !defined(__OpenBSD__) && !defined(__APPLE__) +__thread int tlsvar = 12345; +#endif + +void sofunc(void) +{ + goCallback(); +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/testdata/so/cgoso_unix.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/testdata/so/cgoso_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..ea9cb0a903c6f715431da84d359acf8e1538fb1b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/testdata/so/cgoso_unix.go @@ -0,0 +1,20 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || dragonfly || freebsd || linux || netbsd || solaris + +package cgosotest + +/* +extern int __thread tlsvar; +int *getTLS() { return &tlsvar; } +*/ +import "C" + +func init() { + if v := *C.getTLS(); v != 12345 { + println("got", v) + panic("BAD TLS value") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/testdata/so/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/testdata/so/main.go new file mode 100644 index 0000000000000000000000000000000000000000..84382f739c00c9249011be97a11ae01764944e08 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/testdata/so/main.go @@ -0,0 +1,13 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +package main + +import "cgosotest" + +func main() { + cgosotest.Test() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/testdata/sovar/cgoso.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/testdata/sovar/cgoso.go new file mode 100644 index 0000000000000000000000000000000000000000..d9deb556da87758e579d1996439ea9cfb70cf4ea --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/testdata/sovar/cgoso.go @@ -0,0 +1,44 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgosotest + +// This test verifies that Go can access C variables +// in shared object file via cgo. + +/* +// intentionally write the same LDFLAGS differently +// to test correct handling of LDFLAGS. +#cgo windows CFLAGS: -DIMPORT_DLL +#cgo linux LDFLAGS: -L. -lcgosotest +#cgo dragonfly LDFLAGS: -L. -l cgosotest +#cgo freebsd LDFLAGS: -L. -l cgosotest +#cgo openbsd LDFLAGS: -L. -l cgosotest +#cgo solaris LDFLAGS: -L. -lcgosotest +#cgo netbsd LDFLAGS: -L. libcgosotest.so +#cgo darwin LDFLAGS: -L. libcgosotest.dylib +#cgo windows LDFLAGS: -L. libcgosotest.a +#cgo aix LDFLAGS: -L. -l cgosotest + +#include "cgoso_c.h" + +const char* getVar() { + return exported_var; +} +*/ +import "C" + +import "fmt" + +func Test() { + const want = "Hello world" + got := C.GoString(C.getVar()) + if got != want { + panic(fmt.Sprintf("testExportedVar: got %q, but want %q", got, want)) + } + got = C.GoString(C.exported_var) + if got != want { + panic(fmt.Sprintf("testExportedVar: got %q, but want %q", got, want)) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/testdata/sovar/cgoso_c.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/testdata/sovar/cgoso_c.c new file mode 100644 index 0000000000000000000000000000000000000000..36f4d570eea5a645b518423cf98e8714b57fd5ab --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/testdata/sovar/cgoso_c.c @@ -0,0 +1,7 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +const char *exported_var = "Hello world"; diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/testdata/sovar/cgoso_c.h b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/testdata/sovar/cgoso_c.h new file mode 100644 index 0000000000000000000000000000000000000000..eccd8c0d0cd4a85ae14023fb18cb1757316182ef --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/testdata/sovar/cgoso_c.h @@ -0,0 +1,17 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +#ifdef WIN32 +#if defined(EXPORT_DLL) +# define VAR __declspec(dllexport) +#elif defined(IMPORT_DLL) +# define VAR __declspec(dllimport) +#endif +#else +# define VAR extern +#endif + +VAR const char *exported_var; diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/testdata/sovar/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/testdata/sovar/main.go new file mode 100644 index 0000000000000000000000000000000000000000..018b835c6c689e55252d85edcb58949e44380255 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testso/testdata/sovar/main.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +package main + +import "cgosotest" + +func main() { + cgosotest.Test() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/teststdio/stdio_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/teststdio/stdio_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3883422d6f08ac77b8cba6ac01d1fb85e9ab0f32 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/teststdio/stdio_test.go @@ -0,0 +1,77 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stdio_test + +import ( + "bytes" + "cmd/cgo/internal/cgotest" + "internal/testenv" + "log" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" +) + +func TestMain(m *testing.M) { + log.SetFlags(log.Lshortfile) + os.Exit(testMain(m)) +} + +func testMain(m *testing.M) int { + GOPATH, err := os.MkdirTemp("", "cgostdio") + if err != nil { + log.Panic(err) + } + defer os.RemoveAll(GOPATH) + os.Setenv("GOPATH", GOPATH) + + // Copy testdata into GOPATH/src/cgostdio, along with a go.mod file + // declaring the same path. + modRoot := filepath.Join(GOPATH, "src", "cgostdio") + if err := cgotest.OverlayDir(modRoot, "testdata"); err != nil { + log.Panic(err) + } + if err := os.Chdir(modRoot); err != nil { + log.Panic(err) + } + os.Setenv("PWD", modRoot) + if err := os.WriteFile("go.mod", []byte("module cgostdio\n"), 0666); err != nil { + log.Panic(err) + } + + return m.Run() +} + +// TestTestRun runs a cgo test that doesn't depend on non-standard libraries. +func TestTestRun(t *testing.T) { + testenv.MustHaveGoRun(t) + testenv.MustHaveCGO(t) + + for _, file := range [...]string{ + "chain.go", + "fib.go", + "hello.go", + } { + file := file + wantFile := strings.Replace(file, ".go", ".out", 1) + t.Run(file, func(t *testing.T) { + cmd := exec.Command("go", "run", file) + got, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("%v: %s\n%s", cmd, err, got) + } + got = bytes.ReplaceAll(got, []byte("\r\n"), []byte("\n")) + want, err := os.ReadFile(wantFile) + if err != nil { + t.Fatal("reading golden output:", err) + } + if !bytes.Equal(got, want) { + t.Errorf("'%v' output does not match expected in %s. Instead saw:\n%s", cmd, wantFile, got) + } + }) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/teststdio/testdata/chain.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/teststdio/testdata/chain.go new file mode 100644 index 0000000000000000000000000000000000000000..c7163f5ae0bb356cb3e45cfcb53d7aba9459a418 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/teststdio/testdata/chain.go @@ -0,0 +1,46 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build test_run + +// Pass numbers along a chain of threads. + +package main + +import ( + "runtime" + "strconv" + + "cgostdio/stdio" +) + +const N = 10 +const R = 5 + +func link(left chan<- int, right <-chan int) { + // Keep the links in dedicated operating system + // threads, so that this program tests coordination + // between pthreads and not just goroutines. + runtime.LockOSThread() + for { + v := <-right + stdio.Stdout.WriteString(strconv.Itoa(v) + "\n") + left <- 1 + v + } +} + +func main() { + leftmost := make(chan int) + var left chan int + right := leftmost + for i := 0; i < N; i++ { + left, right = right, make(chan int) + go link(left, right) + } + for i := 0; i < R; i++ { + right <- 0 + x := <-leftmost + stdio.Stdout.WriteString(strconv.Itoa(x) + "\n") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/teststdio/testdata/chain.out b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/teststdio/testdata/chain.out new file mode 100644 index 0000000000000000000000000000000000000000..963cf9b6679b180a5f6423d847cbd8104bcd7a8c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/teststdio/testdata/chain.out @@ -0,0 +1,55 @@ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/teststdio/testdata/fib.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/teststdio/testdata/fib.go new file mode 100644 index 0000000000000000000000000000000000000000..96173683353151458b698f6a0409aacfa8cd1177 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/teststdio/testdata/fib.go @@ -0,0 +1,50 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build test_run + +// Compute Fibonacci numbers with two goroutines +// that pass integers back and forth. No actual +// concurrency, just threads and synchronization +// and foreign code on multiple pthreads. + +package main + +import ( + "runtime" + "strconv" + + "cgostdio/stdio" +) + +func fibber(c, out chan int64, i int64) { + // Keep the fibbers in dedicated operating system + // threads, so that this program tests coordination + // between pthreads and not just goroutines. + runtime.LockOSThread() + + if i == 0 { + c <- i + } + for { + j := <-c + stdio.Stdout.WriteString(strconv.FormatInt(j, 10) + "\n") + out <- j + <-out + i += j + c <- i + } +} + +func main() { + c := make(chan int64) + out := make(chan int64) + go fibber(c, out, 0) + go fibber(c, out, 1) + <-out + for i := 0; i < 90; i++ { + out <- 1 + <-out + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/teststdio/testdata/fib.out b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/teststdio/testdata/fib.out new file mode 100644 index 0000000000000000000000000000000000000000..17ff503356d8e40e12ed97ed51b5675299d4879c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/teststdio/testdata/fib.out @@ -0,0 +1,91 @@ +0 +1 +1 +2 +3 +5 +8 +13 +21 +34 +55 +89 +144 +233 +377 +610 +987 +1597 +2584 +4181 +6765 +10946 +17711 +28657 +46368 +75025 +121393 +196418 +317811 +514229 +832040 +1346269 +2178309 +3524578 +5702887 +9227465 +14930352 +24157817 +39088169 +63245986 +102334155 +165580141 +267914296 +433494437 +701408733 +1134903170 +1836311903 +2971215073 +4807526976 +7778742049 +12586269025 +20365011074 +32951280099 +53316291173 +86267571272 +139583862445 +225851433717 +365435296162 +591286729879 +956722026041 +1548008755920 +2504730781961 +4052739537881 +6557470319842 +10610209857723 +17167680177565 +27777890035288 +44945570212853 +72723460248141 +117669030460994 +190392490709135 +308061521170129 +498454011879264 +806515533049393 +1304969544928657 +2111485077978050 +3416454622906707 +5527939700884757 +8944394323791464 +14472334024676221 +23416728348467685 +37889062373143906 +61305790721611591 +99194853094755497 +160500643816367088 +259695496911122585 +420196140727489673 +679891637638612258 +1100087778366101931 +1779979416004714189 +2880067194370816120 diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/teststdio/testdata/hello.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/teststdio/testdata/hello.go new file mode 100644 index 0000000000000000000000000000000000000000..c0b52bf6c5a19758f86418a6fc3490235d80f480 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/teststdio/testdata/hello.go @@ -0,0 +1,13 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build test_run + +package main + +import "cgostdio/stdio" + +func main() { + stdio.Stdout.WriteString(stdio.Greeting + "\n") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/teststdio/testdata/hello.out b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/teststdio/testdata/hello.out new file mode 100644 index 0000000000000000000000000000000000000000..4b5fa63702dd96796042e92787f464e28f09f17d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/teststdio/testdata/hello.out @@ -0,0 +1 @@ +hello, world diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/teststdio/testdata/stdio/file.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/teststdio/testdata/stdio/file.go new file mode 100644 index 0000000000000000000000000000000000000000..d97ee4c3a172db930737bc23690ce8522a6fd5ed --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/teststdio/testdata/stdio/file.go @@ -0,0 +1,42 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +A trivial example of wrapping a C library in Go. +For a more complex example and explanation, +see misc/cgo/gmp/gmp.go. +*/ + +package stdio + +/* +#include +#include +#include +#include + +char* greeting = "hello, world"; +*/ +import "C" +import "unsafe" + +type File C.FILE + +// Test reference to library symbol. +// Stdout and stderr are too special to be a reliable test. +//var = C.environ + +func (f *File) WriteString(s string) { + p := C.CString(s) + C.fputs(p, (*C.FILE)(f)) + C.free(unsafe.Pointer(p)) + f.Flush() +} + +func (f *File) Flush() { + C.fflush((*C.FILE)(f)) +} + +var Greeting = C.GoString(C.greeting) +var Gbytes = C.GoBytes(unsafe.Pointer(C.greeting), C.int(len(Greeting))) diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/teststdio/testdata/stdio/stdio.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/teststdio/testdata/stdio/stdio.go new file mode 100644 index 0000000000000000000000000000000000000000..08286d4898e01819f6c08aa5e257108ccd363b80 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/teststdio/testdata/stdio/stdio.go @@ -0,0 +1,20 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stdio + +/* +#include + +// on mingw, stderr and stdout are defined as &_iob[FILENO] +// on netbsd, they are defined as &__sF[FILENO] +// and cgo doesn't recognize them, so write a function to get them, +// instead of depending on internals of libc implementation. +FILE *getStdout(void) { return stdout; } +FILE *getStderr(void) { return stderr; } +*/ +import "C" + +var Stdout = (*File)(C.getStdout()) +var Stderr = (*File)(C.getStderr()) diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testtls/tls.c b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testtls/tls.c new file mode 100644 index 0000000000000000000000000000000000000000..8839cc86762c995cf3666f5663e46cc9f64e0d09 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testtls/tls.c @@ -0,0 +1,47 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include + +#if __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_THREADS__) + +// Mingw seems not to have threads.h, so we use the _Thread_local keyword rather +// than the thread_local macro. +static _Thread_local int tls; + +const char * +checkTLS() { + return NULL; +} + +void +setTLS(int v) +{ + tls = v; +} + +int +getTLS() +{ + return tls; +} + +#else + +const char * +checkTLS() { + return "_Thread_local requires C11 and not __STDC_NO_THREADS__"; +} + +void +setTLS(int v) { +} + +int +getTLS() +{ + return 0; +} + +#endif diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testtls/tls.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testtls/tls.go new file mode 100644 index 0000000000000000000000000000000000000000..78628f5caa2fce5de51a0a03b7714fd89ac2d889 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testtls/tls.go @@ -0,0 +1,34 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgotlstest + +// extern const char *checkTLS(); +// extern void setTLS(int); +// extern int getTLS(); +import "C" + +import ( + "runtime" + "testing" +) + +func testTLS(t *testing.T) { + if skip := C.checkTLS(); skip != nil { + t.Skipf("%s", C.GoString(skip)) + } + + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + if val := C.getTLS(); val != 0 { + t.Fatalf("at start, C.getTLS() = %#x, want 0", val) + } + + const keyVal = 0x1234 + C.setTLS(keyVal) + if val := C.getTLS(); val != keyVal { + t.Fatalf("at end, C.getTLS() = %#x, want %#x", val, keyVal) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testtls/tls_none.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testtls/tls_none.go new file mode 100644 index 0000000000000000000000000000000000000000..b6033fb76d4335950e457a70c370f48ace76c3c1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testtls/tls_none.go @@ -0,0 +1,13 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo + +package cgotlstest + +import "testing" + +func testTLS(t *testing.T) { + t.Skip("cgo not supported") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testtls/tls_test.go b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testtls/tls_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8e14add9886211296e759eed36982425c9bcb281 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/internal/testtls/tls_test.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgotlstest + +import "testing" + +func TestTLS(t *testing.T) { + testTLS(t) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/main.go b/platform/dbops/binaries/go/go/src/cmd/cgo/main.go new file mode 100644 index 0000000000000000000000000000000000000000..fce2671c2c123b3b87887c4cd6552addd4b36509 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/main.go @@ -0,0 +1,533 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Cgo; see doc.go for an overview. + +// TODO(rsc): +// Emit correct line number annotations. +// Make gc understand the annotations. + +package main + +import ( + "flag" + "fmt" + "go/ast" + "go/printer" + "go/token" + "internal/buildcfg" + "io" + "os" + "path/filepath" + "reflect" + "runtime" + "sort" + "strings" + + "cmd/internal/edit" + "cmd/internal/notsha256" + "cmd/internal/objabi" +) + +// A Package collects information about the package we're going to write. +type Package struct { + PackageName string // name of package + PackagePath string + PtrSize int64 + IntSize int64 + GccOptions []string + GccIsClang bool + LdFlags []string // #cgo LDFLAGS + Written map[string]bool + Name map[string]*Name // accumulated Name from Files + ExpFunc []*ExpFunc // accumulated ExpFunc from Files + Decl []ast.Decl + GoFiles []string // list of Go files + GccFiles []string // list of gcc output files + Preamble string // collected preamble for _cgo_export.h + typedefs map[string]bool // type names that appear in the types of the objects we're interested in + typedefList []typedefInfo + noCallbacks map[string]bool // C function names with #cgo nocallback directive + noEscapes map[string]bool // C function names with #cgo noescape directive +} + +// A typedefInfo is an element on Package.typedefList: a typedef name +// and the position where it was required. +type typedefInfo struct { + typedef string + pos token.Pos +} + +// A File collects information about a single Go input file. +type File struct { + AST *ast.File // parsed AST + Comments []*ast.CommentGroup // comments from file + Package string // Package name + Preamble string // C preamble (doc comment on import "C") + Ref []*Ref // all references to C.xxx in AST + Calls []*Call // all calls to C.xxx in AST + ExpFunc []*ExpFunc // exported functions for this file + Name map[string]*Name // map from Go name to Name + NamePos map[*Name]token.Pos // map from Name to position of the first reference + NoCallbacks map[string]bool // C function names that with #cgo nocallback directive + NoEscapes map[string]bool // C function names that with #cgo noescape directive + Edit *edit.Buffer +} + +func (f *File) offset(p token.Pos) int { + return fset.Position(p).Offset +} + +func nameKeys(m map[string]*Name) []string { + var ks []string + for k := range m { + ks = append(ks, k) + } + sort.Strings(ks) + return ks +} + +// A Call refers to a call of a C.xxx function in the AST. +type Call struct { + Call *ast.CallExpr + Deferred bool + Done bool +} + +// A Ref refers to an expression of the form C.xxx in the AST. +type Ref struct { + Name *Name + Expr *ast.Expr + Context astContext + Done bool +} + +func (r *Ref) Pos() token.Pos { + return (*r.Expr).Pos() +} + +var nameKinds = []string{"iconst", "fconst", "sconst", "type", "var", "fpvar", "func", "macro", "not-type"} + +// A Name collects information about C.xxx. +type Name struct { + Go string // name used in Go referring to package C + Mangle string // name used in generated Go + C string // name used in C + Define string // #define expansion + Kind string // one of the nameKinds + Type *Type // the type of xxx + FuncType *FuncType + AddError bool + Const string // constant definition +} + +// IsVar reports whether Kind is either "var" or "fpvar" +func (n *Name) IsVar() bool { + return n.Kind == "var" || n.Kind == "fpvar" +} + +// IsConst reports whether Kind is either "iconst", "fconst" or "sconst" +func (n *Name) IsConst() bool { + return strings.HasSuffix(n.Kind, "const") +} + +// An ExpFunc is an exported function, callable from C. +// Such functions are identified in the Go input file +// by doc comments containing the line //export ExpName +type ExpFunc struct { + Func *ast.FuncDecl + ExpName string // name to use from C + Doc string +} + +// A TypeRepr contains the string representation of a type. +type TypeRepr struct { + Repr string + FormatArgs []interface{} +} + +// A Type collects information about a type in both the C and Go worlds. +type Type struct { + Size int64 + Align int64 + C *TypeRepr + Go ast.Expr + EnumValues map[string]int64 + Typedef string + BadPointer bool // this pointer type should be represented as a uintptr (deprecated) +} + +// A FuncType collects information about a function type in both the C and Go worlds. +type FuncType struct { + Params []*Type + Result *Type + Go *ast.FuncType +} + +func usage() { + fmt.Fprint(os.Stderr, "usage: cgo -- [compiler options] file.go ...\n") + flag.PrintDefaults() + os.Exit(2) +} + +var ptrSizeMap = map[string]int64{ + "386": 4, + "alpha": 8, + "amd64": 8, + "arm": 4, + "arm64": 8, + "loong64": 8, + "m68k": 4, + "mips": 4, + "mipsle": 4, + "mips64": 8, + "mips64le": 8, + "nios2": 4, + "ppc": 4, + "ppc64": 8, + "ppc64le": 8, + "riscv": 4, + "riscv64": 8, + "s390": 4, + "s390x": 8, + "sh": 4, + "shbe": 4, + "sparc": 4, + "sparc64": 8, +} + +var intSizeMap = map[string]int64{ + "386": 4, + "alpha": 8, + "amd64": 8, + "arm": 4, + "arm64": 8, + "loong64": 8, + "m68k": 4, + "mips": 4, + "mipsle": 4, + "mips64": 8, + "mips64le": 8, + "nios2": 4, + "ppc": 4, + "ppc64": 8, + "ppc64le": 8, + "riscv": 4, + "riscv64": 8, + "s390": 4, + "s390x": 8, + "sh": 4, + "shbe": 4, + "sparc": 4, + "sparc64": 8, +} + +var cPrefix string + +var fset = token.NewFileSet() + +var dynobj = flag.String("dynimport", "", "if non-empty, print dynamic import data for that file") +var dynout = flag.String("dynout", "", "write -dynimport output to this file") +var dynpackage = flag.String("dynpackage", "main", "set Go package for -dynimport output") +var dynlinker = flag.Bool("dynlinker", false, "record dynamic linker information in -dynimport mode") + +// This flag is for bootstrapping a new Go implementation, +// to generate Go types that match the data layout and +// constant values used in the host's C libraries and system calls. +var godefs = flag.Bool("godefs", false, "for bootstrap: write Go definitions for C file to standard output") + +var srcDir = flag.String("srcdir", "", "source directory") +var objDir = flag.String("objdir", "", "object directory") +var importPath = flag.String("importpath", "", "import path of package being built (for comments in generated files)") +var exportHeader = flag.String("exportheader", "", "where to write export header if any exported functions") + +var gccgo = flag.Bool("gccgo", false, "generate files for use with gccgo") +var gccgoprefix = flag.String("gccgoprefix", "", "-fgo-prefix option used with gccgo") +var gccgopkgpath = flag.String("gccgopkgpath", "", "-fgo-pkgpath option used with gccgo") +var gccgoMangler func(string) string +var gccgoDefineCgoIncomplete = flag.Bool("gccgo_define_cgoincomplete", false, "define cgo.Incomplete for older gccgo/GoLLVM") +var importRuntimeCgo = flag.Bool("import_runtime_cgo", true, "import runtime/cgo in generated code") +var importSyscall = flag.Bool("import_syscall", true, "import syscall in generated code") +var trimpath = flag.String("trimpath", "", "applies supplied rewrites or trims prefixes to recorded source file paths") + +var goarch, goos, gomips, gomips64 string +var gccBaseCmd []string + +func main() { + objabi.AddVersionFlag() // -V + objabi.Flagparse(usage) + + if *gccgoDefineCgoIncomplete { + if !*gccgo { + fmt.Fprintf(os.Stderr, "cgo: -gccgo_define_cgoincomplete without -gccgo\n") + os.Exit(2) + } + incomplete = "_cgopackage_Incomplete" + } + + if *dynobj != "" { + // cgo -dynimport is essentially a separate helper command + // built into the cgo binary. It scans a gcc-produced executable + // and dumps information about the imported symbols and the + // imported libraries. The 'go build' rules for cgo prepare an + // appropriate executable and then use its import information + // instead of needing to make the linkers duplicate all the + // specialized knowledge gcc has about where to look for imported + // symbols and which ones to use. + dynimport(*dynobj) + return + } + + if *godefs { + // Generating definitions pulled from header files, + // to be checked into Go repositories. + // Line numbers are just noise. + conf.Mode &^= printer.SourcePos + } + + args := flag.Args() + if len(args) < 1 { + usage() + } + + // Find first arg that looks like a go file and assume everything before + // that are options to pass to gcc. + var i int + for i = len(args); i > 0; i-- { + if !strings.HasSuffix(args[i-1], ".go") { + break + } + } + if i == len(args) { + usage() + } + + // Save original command line arguments for the godefs generated comment. Relative file + // paths in os.Args will be rewritten to absolute file paths in the loop below. + osArgs := make([]string, len(os.Args)) + copy(osArgs, os.Args[:]) + goFiles := args[i:] + + for _, arg := range args[:i] { + if arg == "-fsanitize=thread" { + tsanProlog = yesTsanProlog + } + if arg == "-fsanitize=memory" { + msanProlog = yesMsanProlog + } + } + + p := newPackage(args[:i]) + + // We need a C compiler to be available. Check this. + var err error + gccBaseCmd, err = checkGCCBaseCmd() + if err != nil { + fatalf("%v", err) + os.Exit(2) + } + + // Record CGO_LDFLAGS from the environment for external linking. + if ldflags := os.Getenv("CGO_LDFLAGS"); ldflags != "" { + args, err := splitQuoted(ldflags) + if err != nil { + fatalf("bad CGO_LDFLAGS: %q (%s)", ldflags, err) + } + p.addToFlag("LDFLAGS", args) + } + + // Need a unique prefix for the global C symbols that + // we use to coordinate between gcc and ourselves. + // We already put _cgo_ at the beginning, so the main + // concern is other cgo wrappers for the same functions. + // Use the beginning of the notsha256 of the input to disambiguate. + h := notsha256.New() + io.WriteString(h, *importPath) + fs := make([]*File, len(goFiles)) + for i, input := range goFiles { + if *srcDir != "" { + input = filepath.Join(*srcDir, input) + } + + // Create absolute path for file, so that it will be used in error + // messages and recorded in debug line number information. + // This matches the rest of the toolchain. See golang.org/issue/5122. + if aname, err := filepath.Abs(input); err == nil { + input = aname + } + + b, err := os.ReadFile(input) + if err != nil { + fatalf("%s", err) + } + if _, err = h.Write(b); err != nil { + fatalf("%s", err) + } + + // Apply trimpath to the file path. The path won't be read from after this point. + input, _ = objabi.ApplyRewrites(input, *trimpath) + if strings.ContainsAny(input, "\r\n") { + // ParseGo, (*Package).writeOutput, and printer.Fprint in SourcePos mode + // all emit line directives, which don't permit newlines in the file path. + // Bail early if we see anything newline-like in the trimmed path. + fatalf("input path contains newline character: %q", input) + } + goFiles[i] = input + + f := new(File) + f.Edit = edit.NewBuffer(b) + f.ParseGo(input, b) + f.ProcessCgoDirectives() + fs[i] = f + } + + cPrefix = fmt.Sprintf("_%x", h.Sum(nil)[0:6]) + + if *objDir == "" { + // make sure that _obj directory exists, so that we can write + // all the output files there. + os.Mkdir("_obj", 0777) + *objDir = "_obj" + } + *objDir += string(filepath.Separator) + + for i, input := range goFiles { + f := fs[i] + p.Translate(f) + for _, cref := range f.Ref { + switch cref.Context { + case ctxCall, ctxCall2: + if cref.Name.Kind != "type" { + break + } + old := *cref.Expr + *cref.Expr = cref.Name.Type.Go + f.Edit.Replace(f.offset(old.Pos()), f.offset(old.End()), gofmt(cref.Name.Type.Go)) + } + } + if nerrors > 0 { + os.Exit(2) + } + p.PackagePath = f.Package + p.Record(f) + if *godefs { + os.Stdout.WriteString(p.godefs(f, osArgs)) + } else { + p.writeOutput(f, input) + } + } + cFunctions := make(map[string]bool) + for _, key := range nameKeys(p.Name) { + n := p.Name[key] + if n.FuncType != nil { + cFunctions[n.C] = true + } + } + + for funcName := range p.noEscapes { + if _, found := cFunctions[funcName]; !found { + error_(token.NoPos, "#cgo noescape %s: no matched C function", funcName) + } + } + + for funcName := range p.noCallbacks { + if _, found := cFunctions[funcName]; !found { + error_(token.NoPos, "#cgo nocallback %s: no matched C function", funcName) + } + } + + if !*godefs { + p.writeDefs() + } + if nerrors > 0 { + os.Exit(2) + } +} + +// newPackage returns a new Package that will invoke +// gcc with the additional arguments specified in args. +func newPackage(args []string) *Package { + goarch = runtime.GOARCH + if s := os.Getenv("GOARCH"); s != "" { + goarch = s + } + goos = runtime.GOOS + if s := os.Getenv("GOOS"); s != "" { + goos = s + } + buildcfg.Check() + gomips = buildcfg.GOMIPS + gomips64 = buildcfg.GOMIPS64 + ptrSize := ptrSizeMap[goarch] + if ptrSize == 0 { + fatalf("unknown ptrSize for $GOARCH %q", goarch) + } + intSize := intSizeMap[goarch] + if intSize == 0 { + fatalf("unknown intSize for $GOARCH %q", goarch) + } + + // Reset locale variables so gcc emits English errors [sic]. + os.Setenv("LANG", "en_US.UTF-8") + os.Setenv("LC_ALL", "C") + + p := &Package{ + PtrSize: ptrSize, + IntSize: intSize, + Written: make(map[string]bool), + noCallbacks: make(map[string]bool), + noEscapes: make(map[string]bool), + } + p.addToFlag("CFLAGS", args) + return p +} + +// Record what needs to be recorded about f. +func (p *Package) Record(f *File) { + if p.PackageName == "" { + p.PackageName = f.Package + } else if p.PackageName != f.Package { + error_(token.NoPos, "inconsistent package names: %s, %s", p.PackageName, f.Package) + } + + if p.Name == nil { + p.Name = f.Name + } else { + for k, v := range f.Name { + if p.Name[k] == nil { + p.Name[k] = v + } else if p.incompleteTypedef(p.Name[k].Type) { + p.Name[k] = v + } else if p.incompleteTypedef(v.Type) { + // Nothing to do. + } else if _, ok := nameToC[k]; ok { + // Names we predefine may appear inconsistent + // if some files typedef them and some don't. + // Issue 26743. + } else if !reflect.DeepEqual(p.Name[k], v) { + error_(token.NoPos, "inconsistent definitions for C.%s", fixGo(k)) + } + } + } + + // merge nocallback & noescape + for k, v := range f.NoCallbacks { + p.noCallbacks[k] = v + } + for k, v := range f.NoEscapes { + p.noEscapes[k] = v + } + + if f.ExpFunc != nil { + p.ExpFunc = append(p.ExpFunc, f.ExpFunc...) + p.Preamble += "\n" + f.Preamble + } + p.Decl = append(p.Decl, f.AST.Decls...) +} + +// incompleteTypedef reports whether t appears to be an incomplete +// typedef definition. +func (p *Package) incompleteTypedef(t *Type) bool { + return t == nil || (t.Size == 0 && t.Align == -1) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/out.go b/platform/dbops/binaries/go/go/src/cmd/cgo/out.go new file mode 100644 index 0000000000000000000000000000000000000000..2189ad5f41f2e8c6e96f3164249aa62ad32352f8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/out.go @@ -0,0 +1,2009 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "cmd/internal/pkgpath" + "debug/elf" + "debug/macho" + "debug/pe" + "fmt" + "go/ast" + "go/printer" + "go/token" + "internal/xcoff" + "io" + "os" + "os/exec" + "path/filepath" + "regexp" + "sort" + "strings" + "unicode" +) + +var ( + conf = printer.Config{Mode: printer.SourcePos, Tabwidth: 8} + noSourceConf = printer.Config{Tabwidth: 8} +) + +// writeDefs creates output files to be compiled by gc and gcc. +func (p *Package) writeDefs() { + var fgo2, fc io.Writer + f := creat(*objDir + "_cgo_gotypes.go") + defer f.Close() + fgo2 = f + if *gccgo { + f := creat(*objDir + "_cgo_defun.c") + defer f.Close() + fc = f + } + fm := creat(*objDir + "_cgo_main.c") + + var gccgoInit strings.Builder + + if !*gccgo { + for _, arg := range p.LdFlags { + fmt.Fprintf(fgo2, "//go:cgo_ldflag %q\n", arg) + } + } else { + fflg := creat(*objDir + "_cgo_flags") + for _, arg := range p.LdFlags { + fmt.Fprintf(fflg, "_CGO_LDFLAGS=%s\n", arg) + } + fflg.Close() + } + + // Write C main file for using gcc to resolve imports. + fmt.Fprintf(fm, "#include \n") // For size_t below. + fmt.Fprintf(fm, "int main() { return 0; }\n") + if *importRuntimeCgo { + fmt.Fprintf(fm, "void crosscall2(void(*fn)(void*) __attribute__((unused)), void *a __attribute__((unused)), int c __attribute__((unused)), size_t ctxt __attribute__((unused))) { }\n") + fmt.Fprintf(fm, "size_t _cgo_wait_runtime_init_done(void) { return 0; }\n") + fmt.Fprintf(fm, "void _cgo_release_context(size_t ctxt __attribute__((unused))) { }\n") + fmt.Fprintf(fm, "char* _cgo_topofstack(void) { return (char*)0; }\n") + } else { + // If we're not importing runtime/cgo, we *are* runtime/cgo, + // which provides these functions. We just need a prototype. + fmt.Fprintf(fm, "void crosscall2(void(*fn)(void*), void *a, int c, size_t ctxt);\n") + fmt.Fprintf(fm, "size_t _cgo_wait_runtime_init_done(void);\n") + fmt.Fprintf(fm, "void _cgo_release_context(size_t);\n") + } + fmt.Fprintf(fm, "void _cgo_allocate(void *a __attribute__((unused)), int c __attribute__((unused))) { }\n") + fmt.Fprintf(fm, "void _cgo_panic(void *a __attribute__((unused)), int c __attribute__((unused))) { }\n") + fmt.Fprintf(fm, "void _cgo_reginit(void) { }\n") + + // Write second Go output: definitions of _C_xxx. + // In a separate file so that the import of "unsafe" does not + // pollute the original file. + fmt.Fprintf(fgo2, "// Code generated by cmd/cgo; DO NOT EDIT.\n\n") + fmt.Fprintf(fgo2, "package %s\n\n", p.PackageName) + fmt.Fprintf(fgo2, "import \"unsafe\"\n\n") + if *importSyscall { + fmt.Fprintf(fgo2, "import \"syscall\"\n\n") + } + if *importRuntimeCgo { + if !*gccgoDefineCgoIncomplete { + fmt.Fprintf(fgo2, "import _cgopackage \"runtime/cgo\"\n\n") + fmt.Fprintf(fgo2, "type _ _cgopackage.Incomplete\n") // prevent import-not-used error + } else { + fmt.Fprintf(fgo2, "//go:notinheap\n") + fmt.Fprintf(fgo2, "type _cgopackage_Incomplete struct{ _ struct{ _ struct{} } }\n") + } + } + if *importSyscall { + fmt.Fprintf(fgo2, "var _ syscall.Errno\n") + } + fmt.Fprintf(fgo2, "func _Cgo_ptr(ptr unsafe.Pointer) unsafe.Pointer { return ptr }\n\n") + + if !*gccgo { + fmt.Fprintf(fgo2, "//go:linkname _Cgo_always_false runtime.cgoAlwaysFalse\n") + fmt.Fprintf(fgo2, "var _Cgo_always_false bool\n") + fmt.Fprintf(fgo2, "//go:linkname _Cgo_use runtime.cgoUse\n") + fmt.Fprintf(fgo2, "func _Cgo_use(interface{})\n") + } + fmt.Fprintf(fgo2, "//go:linkname _Cgo_no_callback runtime.cgoNoCallback\n") + fmt.Fprintf(fgo2, "func _Cgo_no_callback(bool)\n") + + typedefNames := make([]string, 0, len(typedef)) + for name := range typedef { + if name == "_Ctype_void" { + // We provide an appropriate declaration for + // _Ctype_void below (#39877). + continue + } + typedefNames = append(typedefNames, name) + } + sort.Strings(typedefNames) + for _, name := range typedefNames { + def := typedef[name] + fmt.Fprintf(fgo2, "type %s ", name) + // We don't have source info for these types, so write them out without source info. + // Otherwise types would look like: + // + // type _Ctype_struct_cb struct { + // //line :1 + // on_test *[0]byte + // //line :1 + // } + // + // Which is not useful. Moreover we never override source info, + // so subsequent source code uses the same source info. + // Moreover, empty file name makes compile emit no source debug info at all. + var buf bytes.Buffer + noSourceConf.Fprint(&buf, fset, def.Go) + if bytes.HasPrefix(buf.Bytes(), []byte("_Ctype_")) || + strings.HasPrefix(name, "_Ctype_enum_") || + strings.HasPrefix(name, "_Ctype_union_") { + // This typedef is of the form `typedef a b` and should be an alias. + fmt.Fprintf(fgo2, "= ") + } + fmt.Fprintf(fgo2, "%s", buf.Bytes()) + fmt.Fprintf(fgo2, "\n\n") + } + if *gccgo { + fmt.Fprintf(fgo2, "type _Ctype_void byte\n") + } else { + fmt.Fprintf(fgo2, "type _Ctype_void [0]byte\n") + } + + if *gccgo { + fmt.Fprint(fgo2, gccgoGoProlog) + fmt.Fprint(fc, p.cPrologGccgo()) + } else { + fmt.Fprint(fgo2, goProlog) + } + + if fc != nil { + fmt.Fprintf(fc, "#line 1 \"cgo-generated-wrappers\"\n") + } + if fm != nil { + fmt.Fprintf(fm, "#line 1 \"cgo-generated-wrappers\"\n") + } + + gccgoSymbolPrefix := p.gccgoSymbolPrefix() + + cVars := make(map[string]bool) + for _, key := range nameKeys(p.Name) { + n := p.Name[key] + if !n.IsVar() { + continue + } + + if !cVars[n.C] { + if *gccgo { + fmt.Fprintf(fc, "extern byte *%s;\n", n.C) + } else { + // Force a reference to all symbols so that + // the external linker will add DT_NEEDED + // entries as needed on ELF systems. + // Treat function variables differently + // to avoid type conflict errors from LTO + // (Link Time Optimization). + if n.Kind == "fpvar" { + fmt.Fprintf(fm, "extern void %s();\n", n.C) + } else { + fmt.Fprintf(fm, "extern char %s[];\n", n.C) + fmt.Fprintf(fm, "void *_cgohack_%s = %s;\n\n", n.C, n.C) + } + fmt.Fprintf(fgo2, "//go:linkname __cgo_%s %s\n", n.C, n.C) + fmt.Fprintf(fgo2, "//go:cgo_import_static %s\n", n.C) + fmt.Fprintf(fgo2, "var __cgo_%s byte\n", n.C) + } + cVars[n.C] = true + } + + var node ast.Node + if n.Kind == "var" { + node = &ast.StarExpr{X: n.Type.Go} + } else if n.Kind == "fpvar" { + node = n.Type.Go + } else { + panic(fmt.Errorf("invalid var kind %q", n.Kind)) + } + if *gccgo { + fmt.Fprintf(fc, `extern void *%s __asm__("%s.%s");`, n.Mangle, gccgoSymbolPrefix, gccgoToSymbol(n.Mangle)) + fmt.Fprintf(&gccgoInit, "\t%s = &%s;\n", n.Mangle, n.C) + fmt.Fprintf(fc, "\n") + } + + fmt.Fprintf(fgo2, "var %s ", n.Mangle) + conf.Fprint(fgo2, fset, node) + if !*gccgo { + fmt.Fprintf(fgo2, " = (") + conf.Fprint(fgo2, fset, node) + fmt.Fprintf(fgo2, ")(unsafe.Pointer(&__cgo_%s))", n.C) + } + fmt.Fprintf(fgo2, "\n") + } + if *gccgo { + fmt.Fprintf(fc, "\n") + } + + for _, key := range nameKeys(p.Name) { + n := p.Name[key] + if n.Const != "" { + fmt.Fprintf(fgo2, "const %s = %s\n", n.Mangle, n.Const) + } + } + fmt.Fprintf(fgo2, "\n") + + callsMalloc := false + for _, key := range nameKeys(p.Name) { + n := p.Name[key] + if n.FuncType != nil { + p.writeDefsFunc(fgo2, n, &callsMalloc) + } + } + + fgcc := creat(*objDir + "_cgo_export.c") + fgcch := creat(*objDir + "_cgo_export.h") + if *gccgo { + p.writeGccgoExports(fgo2, fm, fgcc, fgcch) + } else { + p.writeExports(fgo2, fm, fgcc, fgcch) + } + + if callsMalloc && !*gccgo { + fmt.Fprint(fgo2, strings.Replace(cMallocDefGo, "PREFIX", cPrefix, -1)) + fmt.Fprint(fgcc, strings.Replace(strings.Replace(cMallocDefC, "PREFIX", cPrefix, -1), "PACKED", p.packedAttribute(), -1)) + } + + if err := fgcc.Close(); err != nil { + fatalf("%s", err) + } + if err := fgcch.Close(); err != nil { + fatalf("%s", err) + } + + if *exportHeader != "" && len(p.ExpFunc) > 0 { + fexp := creat(*exportHeader) + fgcch, err := os.Open(*objDir + "_cgo_export.h") + if err != nil { + fatalf("%s", err) + } + defer fgcch.Close() + _, err = io.Copy(fexp, fgcch) + if err != nil { + fatalf("%s", err) + } + if err = fexp.Close(); err != nil { + fatalf("%s", err) + } + } + + init := gccgoInit.String() + if init != "" { + // The init function does nothing but simple + // assignments, so it won't use much stack space, so + // it's OK to not split the stack. Splitting the stack + // can run into a bug in clang (as of 2018-11-09): + // this is a leaf function, and when clang sees a leaf + // function it won't emit the split stack prologue for + // the function. However, if this function refers to a + // non-split-stack function, which will happen if the + // cgo code refers to a C function not compiled with + // -fsplit-stack, then the linker will think that it + // needs to adjust the split stack prologue, but there + // won't be one. Marking the function explicitly + // no_split_stack works around this problem by telling + // the linker that it's OK if there is no split stack + // prologue. + fmt.Fprintln(fc, "static void init(void) __attribute__ ((constructor, no_split_stack));") + fmt.Fprintln(fc, "static void init(void) {") + fmt.Fprint(fc, init) + fmt.Fprintln(fc, "}") + } +} + +// elfImportedSymbols is like elf.File.ImportedSymbols, but it +// includes weak symbols. +// +// A bug in some versions of LLD (at least LLD 8) cause it to emit +// several pthreads symbols as weak, but we need to import those. See +// issue #31912 or https://bugs.llvm.org/show_bug.cgi?id=42442. +// +// When doing external linking, we hand everything off to the external +// linker, which will create its own dynamic symbol tables. For +// internal linking, this may turn weak imports into strong imports, +// which could cause dynamic linking to fail if a symbol really isn't +// defined. However, the standard library depends on everything it +// imports, and this is the primary use of dynamic symbol tables with +// internal linking. +func elfImportedSymbols(f *elf.File) []elf.ImportedSymbol { + syms, _ := f.DynamicSymbols() + var imports []elf.ImportedSymbol + for _, s := range syms { + if (elf.ST_BIND(s.Info) == elf.STB_GLOBAL || elf.ST_BIND(s.Info) == elf.STB_WEAK) && s.Section == elf.SHN_UNDEF { + imports = append(imports, elf.ImportedSymbol{ + Name: s.Name, + Library: s.Library, + Version: s.Version, + }) + } + } + return imports +} + +func dynimport(obj string) { + stdout := os.Stdout + if *dynout != "" { + f, err := os.Create(*dynout) + if err != nil { + fatalf("%s", err) + } + stdout = f + } + + fmt.Fprintf(stdout, "package %s\n", *dynpackage) + + if f, err := elf.Open(obj); err == nil { + if *dynlinker { + // Emit the cgo_dynamic_linker line. + if sec := f.Section(".interp"); sec != nil { + if data, err := sec.Data(); err == nil && len(data) > 1 { + // skip trailing \0 in data + fmt.Fprintf(stdout, "//go:cgo_dynamic_linker %q\n", string(data[:len(data)-1])) + } + } + } + sym := elfImportedSymbols(f) + for _, s := range sym { + targ := s.Name + if s.Version != "" { + targ += "#" + s.Version + } + checkImportSymName(s.Name) + checkImportSymName(targ) + fmt.Fprintf(stdout, "//go:cgo_import_dynamic %s %s %q\n", s.Name, targ, s.Library) + } + lib, _ := f.ImportedLibraries() + for _, l := range lib { + fmt.Fprintf(stdout, "//go:cgo_import_dynamic _ _ %q\n", l) + } + return + } + + if f, err := macho.Open(obj); err == nil { + sym, _ := f.ImportedSymbols() + for _, s := range sym { + if len(s) > 0 && s[0] == '_' { + s = s[1:] + } + checkImportSymName(s) + fmt.Fprintf(stdout, "//go:cgo_import_dynamic %s %s %q\n", s, s, "") + } + lib, _ := f.ImportedLibraries() + for _, l := range lib { + fmt.Fprintf(stdout, "//go:cgo_import_dynamic _ _ %q\n", l) + } + return + } + + if f, err := pe.Open(obj); err == nil { + sym, _ := f.ImportedSymbols() + for _, s := range sym { + ss := strings.Split(s, ":") + name := strings.Split(ss[0], "@")[0] + checkImportSymName(name) + checkImportSymName(ss[0]) + fmt.Fprintf(stdout, "//go:cgo_import_dynamic %s %s %q\n", name, ss[0], strings.ToLower(ss[1])) + } + return + } + + if f, err := xcoff.Open(obj); err == nil { + sym, err := f.ImportedSymbols() + if err != nil { + fatalf("cannot load imported symbols from XCOFF file %s: %v", obj, err) + } + for _, s := range sym { + if s.Name == "runtime_rt0_go" || s.Name == "_rt0_ppc64_aix_lib" { + // These symbols are imported by runtime/cgo but + // must not be added to _cgo_import.go as there are + // Go symbols. + continue + } + checkImportSymName(s.Name) + fmt.Fprintf(stdout, "//go:cgo_import_dynamic %s %s %q\n", s.Name, s.Name, s.Library) + } + lib, err := f.ImportedLibraries() + if err != nil { + fatalf("cannot load imported libraries from XCOFF file %s: %v", obj, err) + } + for _, l := range lib { + fmt.Fprintf(stdout, "//go:cgo_import_dynamic _ _ %q\n", l) + } + return + } + + fatalf("cannot parse %s as ELF, Mach-O, PE or XCOFF", obj) +} + +// checkImportSymName checks a symbol name we are going to emit as part +// of a //go:cgo_import_dynamic pragma. These names come from object +// files, so they may be corrupt. We are going to emit them unquoted, +// so while they don't need to be valid symbol names (and in some cases, +// involving symbol versions, they won't be) they must contain only +// graphic characters and must not contain Go comments. +func checkImportSymName(s string) { + for _, c := range s { + if !unicode.IsGraphic(c) || unicode.IsSpace(c) { + fatalf("dynamic symbol %q contains unsupported character", s) + } + } + if strings.Contains(s, "//") || strings.Contains(s, "/*") { + fatalf("dynamic symbol %q contains Go comment") + } +} + +// Construct a gcc struct matching the gc argument frame. +// Assumes that in gcc, char is 1 byte, short 2 bytes, int 4 bytes, long long 8 bytes. +// These assumptions are checked by the gccProlog. +// Also assumes that gc convention is to word-align the +// input and output parameters. +func (p *Package) structType(n *Name) (string, int64) { + var buf strings.Builder + fmt.Fprint(&buf, "struct {\n") + off := int64(0) + for i, t := range n.FuncType.Params { + if off%t.Align != 0 { + pad := t.Align - off%t.Align + fmt.Fprintf(&buf, "\t\tchar __pad%d[%d];\n", off, pad) + off += pad + } + c := t.Typedef + if c == "" { + c = t.C.String() + } + fmt.Fprintf(&buf, "\t\t%s p%d;\n", c, i) + off += t.Size + } + if off%p.PtrSize != 0 { + pad := p.PtrSize - off%p.PtrSize + fmt.Fprintf(&buf, "\t\tchar __pad%d[%d];\n", off, pad) + off += pad + } + if t := n.FuncType.Result; t != nil { + if off%t.Align != 0 { + pad := t.Align - off%t.Align + fmt.Fprintf(&buf, "\t\tchar __pad%d[%d];\n", off, pad) + off += pad + } + fmt.Fprintf(&buf, "\t\t%s r;\n", t.C) + off += t.Size + } + if off%p.PtrSize != 0 { + pad := p.PtrSize - off%p.PtrSize + fmt.Fprintf(&buf, "\t\tchar __pad%d[%d];\n", off, pad) + off += pad + } + if off == 0 { + fmt.Fprintf(&buf, "\t\tchar unused;\n") // avoid empty struct + } + fmt.Fprintf(&buf, "\t}") + return buf.String(), off +} + +func (p *Package) writeDefsFunc(fgo2 io.Writer, n *Name, callsMalloc *bool) { + name := n.Go + gtype := n.FuncType.Go + void := gtype.Results == nil || len(gtype.Results.List) == 0 + if n.AddError { + // Add "error" to return type list. + // Type list is known to be 0 or 1 element - it's a C function. + err := &ast.Field{Type: ast.NewIdent("error")} + l := gtype.Results.List + if len(l) == 0 { + l = []*ast.Field{err} + } else { + l = []*ast.Field{l[0], err} + } + t := new(ast.FuncType) + *t = *gtype + t.Results = &ast.FieldList{List: l} + gtype = t + } + + // Go func declaration. + d := &ast.FuncDecl{ + Name: ast.NewIdent(n.Mangle), + Type: gtype, + } + + // Builtins defined in the C prolog. + inProlog := builtinDefs[name] != "" + cname := fmt.Sprintf("_cgo%s%s", cPrefix, n.Mangle) + paramnames := []string(nil) + if d.Type.Params != nil { + for i, param := range d.Type.Params.List { + paramName := fmt.Sprintf("p%d", i) + param.Names = []*ast.Ident{ast.NewIdent(paramName)} + paramnames = append(paramnames, paramName) + } + } + + if *gccgo { + // Gccgo style hooks. + fmt.Fprint(fgo2, "\n") + conf.Fprint(fgo2, fset, d) + fmt.Fprint(fgo2, " {\n") + if !inProlog { + fmt.Fprint(fgo2, "\tdefer syscall.CgocallDone()\n") + fmt.Fprint(fgo2, "\tsyscall.Cgocall()\n") + } + if n.AddError { + fmt.Fprint(fgo2, "\tsyscall.SetErrno(0)\n") + } + fmt.Fprint(fgo2, "\t") + if !void { + fmt.Fprint(fgo2, "r := ") + } + fmt.Fprintf(fgo2, "%s(%s)\n", cname, strings.Join(paramnames, ", ")) + + if n.AddError { + fmt.Fprint(fgo2, "\te := syscall.GetErrno()\n") + fmt.Fprint(fgo2, "\tif e != 0 {\n") + fmt.Fprint(fgo2, "\t\treturn ") + if !void { + fmt.Fprint(fgo2, "r, ") + } + fmt.Fprint(fgo2, "e\n") + fmt.Fprint(fgo2, "\t}\n") + fmt.Fprint(fgo2, "\treturn ") + if !void { + fmt.Fprint(fgo2, "r, ") + } + fmt.Fprint(fgo2, "nil\n") + } else if !void { + fmt.Fprint(fgo2, "\treturn r\n") + } + + fmt.Fprint(fgo2, "}\n") + + // declare the C function. + fmt.Fprintf(fgo2, "//extern %s\n", cname) + d.Name = ast.NewIdent(cname) + if n.AddError { + l := d.Type.Results.List + d.Type.Results.List = l[:len(l)-1] + } + conf.Fprint(fgo2, fset, d) + fmt.Fprint(fgo2, "\n") + + return + } + + if inProlog { + fmt.Fprint(fgo2, builtinDefs[name]) + if strings.Contains(builtinDefs[name], "_cgo_cmalloc") { + *callsMalloc = true + } + return + } + + // Wrapper calls into gcc, passing a pointer to the argument frame. + fmt.Fprintf(fgo2, "//go:cgo_import_static %s\n", cname) + fmt.Fprintf(fgo2, "//go:linkname __cgofn_%s %s\n", cname, cname) + fmt.Fprintf(fgo2, "var __cgofn_%s byte\n", cname) + fmt.Fprintf(fgo2, "var %s = unsafe.Pointer(&__cgofn_%s)\n", cname, cname) + + nret := 0 + if !void { + d.Type.Results.List[0].Names = []*ast.Ident{ast.NewIdent("r1")} + nret = 1 + } + if n.AddError { + d.Type.Results.List[nret].Names = []*ast.Ident{ast.NewIdent("r2")} + } + + fmt.Fprint(fgo2, "\n") + fmt.Fprint(fgo2, "//go:cgo_unsafe_args\n") + conf.Fprint(fgo2, fset, d) + fmt.Fprint(fgo2, " {\n") + + // NOTE: Using uintptr to hide from escape analysis. + arg := "0" + if len(paramnames) > 0 { + arg = "uintptr(unsafe.Pointer(&p0))" + } else if !void { + arg = "uintptr(unsafe.Pointer(&r1))" + } + + noCallback := p.noCallbacks[n.C] + if noCallback { + // disable cgocallback, will check it in runtime. + fmt.Fprintf(fgo2, "\t_Cgo_no_callback(true)\n") + } + + prefix := "" + if n.AddError { + prefix = "errno := " + } + fmt.Fprintf(fgo2, "\t%s_cgo_runtime_cgocall(%s, %s)\n", prefix, cname, arg) + if n.AddError { + fmt.Fprintf(fgo2, "\tif errno != 0 { r2 = syscall.Errno(errno) }\n") + } + if noCallback { + fmt.Fprintf(fgo2, "\t_Cgo_no_callback(false)\n") + } + + // skip _Cgo_use when noescape exist, + // so that the compiler won't force to escape them to heap. + if !p.noEscapes[n.C] { + fmt.Fprintf(fgo2, "\tif _Cgo_always_false {\n") + if d.Type.Params != nil { + for i := range d.Type.Params.List { + fmt.Fprintf(fgo2, "\t\t_Cgo_use(p%d)\n", i) + } + } + fmt.Fprintf(fgo2, "\t}\n") + } + fmt.Fprintf(fgo2, "\treturn\n") + fmt.Fprintf(fgo2, "}\n") +} + +// writeOutput creates stubs for a specific source file to be compiled by gc +func (p *Package) writeOutput(f *File, srcfile string) { + base := srcfile + base = strings.TrimSuffix(base, ".go") + base = filepath.Base(base) + fgo1 := creat(*objDir + base + ".cgo1.go") + fgcc := creat(*objDir + base + ".cgo2.c") + + p.GoFiles = append(p.GoFiles, base+".cgo1.go") + p.GccFiles = append(p.GccFiles, base+".cgo2.c") + + // Write Go output: Go input with rewrites of C.xxx to _C_xxx. + fmt.Fprintf(fgo1, "// Code generated by cmd/cgo; DO NOT EDIT.\n\n") + if strings.ContainsAny(srcfile, "\r\n") { + // This should have been checked when the file path was first resolved, + // but we double check here just to be sure. + fatalf("internal error: writeOutput: srcfile contains unexpected newline character: %q", srcfile) + } + fmt.Fprintf(fgo1, "//line %s:1:1\n", srcfile) + fgo1.Write(f.Edit.Bytes()) + + // While we process the vars and funcs, also write gcc output. + // Gcc output starts with the preamble. + fmt.Fprintf(fgcc, "%s\n", builtinProlog) + fmt.Fprintf(fgcc, "%s\n", f.Preamble) + fmt.Fprintf(fgcc, "%s\n", gccProlog) + fmt.Fprintf(fgcc, "%s\n", tsanProlog) + fmt.Fprintf(fgcc, "%s\n", msanProlog) + + for _, key := range nameKeys(f.Name) { + n := f.Name[key] + if n.FuncType != nil { + p.writeOutputFunc(fgcc, n) + } + } + + fgo1.Close() + fgcc.Close() +} + +// fixGo converts the internal Name.Go field into the name we should show +// to users in error messages. There's only one for now: on input we rewrite +// C.malloc into C._CMalloc, so change it back here. +func fixGo(name string) string { + if name == "_CMalloc" { + return "malloc" + } + return name +} + +var isBuiltin = map[string]bool{ + "_Cfunc_CString": true, + "_Cfunc_CBytes": true, + "_Cfunc_GoString": true, + "_Cfunc_GoStringN": true, + "_Cfunc_GoBytes": true, + "_Cfunc__CMalloc": true, +} + +func (p *Package) writeOutputFunc(fgcc *os.File, n *Name) { + name := n.Mangle + if isBuiltin[name] || p.Written[name] { + // The builtins are already defined in the C prolog, and we don't + // want to duplicate function definitions we've already done. + return + } + p.Written[name] = true + + if *gccgo { + p.writeGccgoOutputFunc(fgcc, n) + return + } + + ctype, _ := p.structType(n) + + // Gcc wrapper unpacks the C argument struct + // and calls the actual C function. + fmt.Fprintf(fgcc, "CGO_NO_SANITIZE_THREAD\n") + if n.AddError { + fmt.Fprintf(fgcc, "int\n") + } else { + fmt.Fprintf(fgcc, "void\n") + } + fmt.Fprintf(fgcc, "_cgo%s%s(void *v)\n", cPrefix, n.Mangle) + fmt.Fprintf(fgcc, "{\n") + if n.AddError { + fmt.Fprintf(fgcc, "\tint _cgo_errno;\n") + } + // We're trying to write a gcc struct that matches gc's layout. + // Use packed attribute to force no padding in this struct in case + // gcc has different packing requirements. + fmt.Fprintf(fgcc, "\t%s %v *_cgo_a = v;\n", ctype, p.packedAttribute()) + if n.FuncType.Result != nil { + // Save the stack top for use below. + fmt.Fprintf(fgcc, "\tchar *_cgo_stktop = _cgo_topofstack();\n") + } + tr := n.FuncType.Result + if tr != nil { + fmt.Fprintf(fgcc, "\t__typeof__(_cgo_a->r) _cgo_r;\n") + } + fmt.Fprintf(fgcc, "\t_cgo_tsan_acquire();\n") + if n.AddError { + fmt.Fprintf(fgcc, "\terrno = 0;\n") + } + fmt.Fprintf(fgcc, "\t") + if tr != nil { + fmt.Fprintf(fgcc, "_cgo_r = ") + if c := tr.C.String(); c[len(c)-1] == '*' { + fmt.Fprint(fgcc, "(__typeof__(_cgo_a->r)) ") + } + } + if n.Kind == "macro" { + fmt.Fprintf(fgcc, "%s;\n", n.C) + } else { + fmt.Fprintf(fgcc, "%s(", n.C) + for i := range n.FuncType.Params { + if i > 0 { + fmt.Fprintf(fgcc, ", ") + } + fmt.Fprintf(fgcc, "_cgo_a->p%d", i) + } + fmt.Fprintf(fgcc, ");\n") + } + if n.AddError { + fmt.Fprintf(fgcc, "\t_cgo_errno = errno;\n") + } + fmt.Fprintf(fgcc, "\t_cgo_tsan_release();\n") + if n.FuncType.Result != nil { + // The cgo call may have caused a stack copy (via a callback). + // Adjust the return value pointer appropriately. + fmt.Fprintf(fgcc, "\t_cgo_a = (void*)((char*)_cgo_a + (_cgo_topofstack() - _cgo_stktop));\n") + // Save the return value. + fmt.Fprintf(fgcc, "\t_cgo_a->r = _cgo_r;\n") + // The return value is on the Go stack. If we are using msan, + // and if the C value is partially or completely uninitialized, + // the assignment will mark the Go stack as uninitialized. + // The Go compiler does not update msan for changes to the + // stack. It is possible that the stack will remain + // uninitialized, and then later be used in a way that is + // visible to msan, possibly leading to a false positive. + // Mark the stack space as written, to avoid this problem. + // See issue 26209. + fmt.Fprintf(fgcc, "\t_cgo_msan_write(&_cgo_a->r, sizeof(_cgo_a->r));\n") + } + if n.AddError { + fmt.Fprintf(fgcc, "\treturn _cgo_errno;\n") + } + fmt.Fprintf(fgcc, "}\n") + fmt.Fprintf(fgcc, "\n") +} + +// Write out a wrapper for a function when using gccgo. This is a +// simple wrapper that just calls the real function. We only need a +// wrapper to support static functions in the prologue--without a +// wrapper, we can't refer to the function, since the reference is in +// a different file. +func (p *Package) writeGccgoOutputFunc(fgcc *os.File, n *Name) { + fmt.Fprintf(fgcc, "CGO_NO_SANITIZE_THREAD\n") + if t := n.FuncType.Result; t != nil { + fmt.Fprintf(fgcc, "%s\n", t.C.String()) + } else { + fmt.Fprintf(fgcc, "void\n") + } + fmt.Fprintf(fgcc, "_cgo%s%s(", cPrefix, n.Mangle) + for i, t := range n.FuncType.Params { + if i > 0 { + fmt.Fprintf(fgcc, ", ") + } + c := t.Typedef + if c == "" { + c = t.C.String() + } + fmt.Fprintf(fgcc, "%s p%d", c, i) + } + fmt.Fprintf(fgcc, ")\n") + fmt.Fprintf(fgcc, "{\n") + if t := n.FuncType.Result; t != nil { + fmt.Fprintf(fgcc, "\t%s _cgo_r;\n", t.C.String()) + } + fmt.Fprintf(fgcc, "\t_cgo_tsan_acquire();\n") + fmt.Fprintf(fgcc, "\t") + if t := n.FuncType.Result; t != nil { + fmt.Fprintf(fgcc, "_cgo_r = ") + // Cast to void* to avoid warnings due to omitted qualifiers. + if c := t.C.String(); c[len(c)-1] == '*' { + fmt.Fprintf(fgcc, "(void*)") + } + } + if n.Kind == "macro" { + fmt.Fprintf(fgcc, "%s;\n", n.C) + } else { + fmt.Fprintf(fgcc, "%s(", n.C) + for i := range n.FuncType.Params { + if i > 0 { + fmt.Fprintf(fgcc, ", ") + } + fmt.Fprintf(fgcc, "p%d", i) + } + fmt.Fprintf(fgcc, ");\n") + } + fmt.Fprintf(fgcc, "\t_cgo_tsan_release();\n") + if t := n.FuncType.Result; t != nil { + fmt.Fprintf(fgcc, "\treturn ") + // Cast to void* to avoid warnings due to omitted qualifiers + // and explicit incompatible struct types. + if c := t.C.String(); c[len(c)-1] == '*' { + fmt.Fprintf(fgcc, "(void*)") + } + fmt.Fprintf(fgcc, "_cgo_r;\n") + } + fmt.Fprintf(fgcc, "}\n") + fmt.Fprintf(fgcc, "\n") +} + +// packedAttribute returns host compiler struct attribute that will be +// used to match gc's struct layout. For example, on 386 Windows, +// gcc wants to 8-align int64s, but gc does not. +// Use __gcc_struct__ to work around https://gcc.gnu.org/PR52991 on x86, +// and https://golang.org/issue/5603. +func (p *Package) packedAttribute() string { + s := "__attribute__((__packed__" + if !p.GccIsClang && (goarch == "amd64" || goarch == "386") { + s += ", __gcc_struct__" + } + return s + "))" +} + +// exportParamName returns the value of param as it should be +// displayed in a c header file. If param contains any non-ASCII +// characters, this function will return the character p followed by +// the value of position; otherwise, this function will return the +// value of param. +func exportParamName(param string, position int) string { + if param == "" { + return fmt.Sprintf("p%d", position) + } + + pname := param + + for i := 0; i < len(param); i++ { + if param[i] > unicode.MaxASCII { + pname = fmt.Sprintf("p%d", position) + break + } + } + + return pname +} + +// Write out the various stubs we need to support functions exported +// from Go so that they are callable from C. +func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) { + p.writeExportHeader(fgcch) + + fmt.Fprintf(fgcc, "/* Code generated by cmd/cgo; DO NOT EDIT. */\n\n") + fmt.Fprintf(fgcc, "#include \n") + fmt.Fprintf(fgcc, "#include \"_cgo_export.h\"\n\n") + + // We use packed structs, but they are always aligned. + // The pragmas and address-of-packed-member are only recognized as + // warning groups in clang 4.0+, so ignore unknown pragmas first. + fmt.Fprintf(fgcc, "#pragma GCC diagnostic ignored \"-Wunknown-pragmas\"\n") + fmt.Fprintf(fgcc, "#pragma GCC diagnostic ignored \"-Wpragmas\"\n") + fmt.Fprintf(fgcc, "#pragma GCC diagnostic ignored \"-Waddress-of-packed-member\"\n") + fmt.Fprintf(fgcc, "#pragma GCC diagnostic ignored \"-Wunknown-warning-option\"\n") + fmt.Fprintf(fgcc, "#pragma GCC diagnostic ignored \"-Wunaligned-access\"\n") + + fmt.Fprintf(fgcc, "extern void crosscall2(void (*fn)(void *), void *, int, size_t);\n") + fmt.Fprintf(fgcc, "extern size_t _cgo_wait_runtime_init_done(void);\n") + fmt.Fprintf(fgcc, "extern void _cgo_release_context(size_t);\n\n") + fmt.Fprintf(fgcc, "extern char* _cgo_topofstack(void);") + fmt.Fprintf(fgcc, "%s\n", tsanProlog) + fmt.Fprintf(fgcc, "%s\n", msanProlog) + + for _, exp := range p.ExpFunc { + fn := exp.Func + + // Construct a struct that will be used to communicate + // arguments from C to Go. The C and Go definitions + // just have to agree. The gcc struct will be compiled + // with __attribute__((packed)) so all padding must be + // accounted for explicitly. + ctype := "struct {\n" + gotype := new(bytes.Buffer) + fmt.Fprintf(gotype, "struct {\n") + off := int64(0) + npad := 0 + argField := func(typ ast.Expr, namePat string, args ...interface{}) { + name := fmt.Sprintf(namePat, args...) + t := p.cgoType(typ) + if off%t.Align != 0 { + pad := t.Align - off%t.Align + ctype += fmt.Sprintf("\t\tchar __pad%d[%d];\n", npad, pad) + off += pad + npad++ + } + ctype += fmt.Sprintf("\t\t%s %s;\n", t.C, name) + fmt.Fprintf(gotype, "\t\t%s ", name) + noSourceConf.Fprint(gotype, fset, typ) + fmt.Fprintf(gotype, "\n") + off += t.Size + } + if fn.Recv != nil { + argField(fn.Recv.List[0].Type, "recv") + } + fntype := fn.Type + forFieldList(fntype.Params, + func(i int, aname string, atype ast.Expr) { + argField(atype, "p%d", i) + }) + forFieldList(fntype.Results, + func(i int, aname string, atype ast.Expr) { + argField(atype, "r%d", i) + }) + if ctype == "struct {\n" { + ctype += "\t\tchar unused;\n" // avoid empty struct + } + ctype += "\t}" + fmt.Fprintf(gotype, "\t}") + + // Get the return type of the wrapper function + // compiled by gcc. + gccResult := "" + if fntype.Results == nil || len(fntype.Results.List) == 0 { + gccResult = "void" + } else if len(fntype.Results.List) == 1 && len(fntype.Results.List[0].Names) <= 1 { + gccResult = p.cgoType(fntype.Results.List[0].Type).C.String() + } else { + fmt.Fprintf(fgcch, "\n/* Return type for %s */\n", exp.ExpName) + fmt.Fprintf(fgcch, "struct %s_return {\n", exp.ExpName) + forFieldList(fntype.Results, + func(i int, aname string, atype ast.Expr) { + fmt.Fprintf(fgcch, "\t%s r%d;", p.cgoType(atype).C, i) + if len(aname) > 0 { + fmt.Fprintf(fgcch, " /* %s */", aname) + } + fmt.Fprint(fgcch, "\n") + }) + fmt.Fprintf(fgcch, "};\n") + gccResult = "struct " + exp.ExpName + "_return" + } + + // Build the wrapper function compiled by gcc. + gccExport := "" + if goos == "windows" { + gccExport = "__declspec(dllexport) " + } + s := fmt.Sprintf("%s%s %s(", gccExport, gccResult, exp.ExpName) + if fn.Recv != nil { + s += p.cgoType(fn.Recv.List[0].Type).C.String() + s += " recv" + } + forFieldList(fntype.Params, + func(i int, aname string, atype ast.Expr) { + if i > 0 || fn.Recv != nil { + s += ", " + } + s += fmt.Sprintf("%s %s", p.cgoType(atype).C, exportParamName(aname, i)) + }) + s += ")" + + if len(exp.Doc) > 0 { + fmt.Fprintf(fgcch, "\n%s", exp.Doc) + if !strings.HasSuffix(exp.Doc, "\n") { + fmt.Fprint(fgcch, "\n") + } + } + fmt.Fprintf(fgcch, "extern %s;\n", s) + + fmt.Fprintf(fgcc, "extern void _cgoexp%s_%s(void *);\n", cPrefix, exp.ExpName) + fmt.Fprintf(fgcc, "\nCGO_NO_SANITIZE_THREAD") + fmt.Fprintf(fgcc, "\n%s\n", s) + fmt.Fprintf(fgcc, "{\n") + fmt.Fprintf(fgcc, "\tsize_t _cgo_ctxt = _cgo_wait_runtime_init_done();\n") + // The results part of the argument structure must be + // initialized to 0 so the write barriers generated by + // the assignments to these fields in Go are safe. + // + // We use a local static variable to get the zeroed + // value of the argument type. This avoids including + // string.h for memset, and is also robust to C++ + // types with constructors. Both GCC and LLVM optimize + // this into just zeroing _cgo_a. + fmt.Fprintf(fgcc, "\ttypedef %s %v _cgo_argtype;\n", ctype, p.packedAttribute()) + fmt.Fprintf(fgcc, "\tstatic _cgo_argtype _cgo_zero;\n") + fmt.Fprintf(fgcc, "\t_cgo_argtype _cgo_a = _cgo_zero;\n") + if gccResult != "void" && (len(fntype.Results.List) > 1 || len(fntype.Results.List[0].Names) > 1) { + fmt.Fprintf(fgcc, "\t%s r;\n", gccResult) + } + if fn.Recv != nil { + fmt.Fprintf(fgcc, "\t_cgo_a.recv = recv;\n") + } + forFieldList(fntype.Params, + func(i int, aname string, atype ast.Expr) { + fmt.Fprintf(fgcc, "\t_cgo_a.p%d = %s;\n", i, exportParamName(aname, i)) + }) + fmt.Fprintf(fgcc, "\t_cgo_tsan_release();\n") + fmt.Fprintf(fgcc, "\tcrosscall2(_cgoexp%s_%s, &_cgo_a, %d, _cgo_ctxt);\n", cPrefix, exp.ExpName, off) + fmt.Fprintf(fgcc, "\t_cgo_tsan_acquire();\n") + fmt.Fprintf(fgcc, "\t_cgo_release_context(_cgo_ctxt);\n") + if gccResult != "void" { + if len(fntype.Results.List) == 1 && len(fntype.Results.List[0].Names) <= 1 { + fmt.Fprintf(fgcc, "\treturn _cgo_a.r0;\n") + } else { + forFieldList(fntype.Results, + func(i int, aname string, atype ast.Expr) { + fmt.Fprintf(fgcc, "\tr.r%d = _cgo_a.r%d;\n", i, i) + }) + fmt.Fprintf(fgcc, "\treturn r;\n") + } + } + fmt.Fprintf(fgcc, "}\n") + + // In internal linking mode, the Go linker sees both + // the C wrapper written above and the Go wrapper it + // references. Hence, export the C wrapper (e.g., for + // if we're building a shared object). The Go linker + // will resolve the C wrapper's reference to the Go + // wrapper without a separate export. + fmt.Fprintf(fgo2, "//go:cgo_export_dynamic %s\n", exp.ExpName) + // cgo_export_static refers to a symbol by its linker + // name, so set the linker name of the Go wrapper. + fmt.Fprintf(fgo2, "//go:linkname _cgoexp%s_%s _cgoexp%s_%s\n", cPrefix, exp.ExpName, cPrefix, exp.ExpName) + // In external linking mode, the Go linker sees the Go + // wrapper, but not the C wrapper. For this case, + // export the Go wrapper so the host linker can + // resolve the reference from the C wrapper to the Go + // wrapper. + fmt.Fprintf(fgo2, "//go:cgo_export_static _cgoexp%s_%s\n", cPrefix, exp.ExpName) + + // Build the wrapper function compiled by cmd/compile. + // This unpacks the argument struct above and calls the Go function. + fmt.Fprintf(fgo2, "func _cgoexp%s_%s(a *%s) {\n", cPrefix, exp.ExpName, gotype) + + fmt.Fprintf(fm, "void _cgoexp%s_%s(void* p){}\n", cPrefix, exp.ExpName) + + fmt.Fprintf(fgo2, "\t") + + if gccResult != "void" { + // Write results back to frame. + forFieldList(fntype.Results, + func(i int, aname string, atype ast.Expr) { + if i > 0 { + fmt.Fprintf(fgo2, ", ") + } + fmt.Fprintf(fgo2, "a.r%d", i) + }) + fmt.Fprintf(fgo2, " = ") + } + if fn.Recv != nil { + fmt.Fprintf(fgo2, "a.recv.") + } + fmt.Fprintf(fgo2, "%s(", exp.Func.Name) + forFieldList(fntype.Params, + func(i int, aname string, atype ast.Expr) { + if i > 0 { + fmt.Fprint(fgo2, ", ") + } + fmt.Fprintf(fgo2, "a.p%d", i) + }) + fmt.Fprint(fgo2, ")\n") + if gccResult != "void" { + // Verify that any results don't contain any + // Go pointers. + forFieldList(fntype.Results, + func(i int, aname string, atype ast.Expr) { + if !p.hasPointer(nil, atype, false) { + return + } + fmt.Fprintf(fgo2, "\t_cgoCheckResult(a.r%d)\n", i) + }) + } + fmt.Fprint(fgo2, "}\n") + } + + fmt.Fprintf(fgcch, "%s", gccExportHeaderEpilog) +} + +// Write out the C header allowing C code to call exported gccgo functions. +func (p *Package) writeGccgoExports(fgo2, fm, fgcc, fgcch io.Writer) { + gccgoSymbolPrefix := p.gccgoSymbolPrefix() + + p.writeExportHeader(fgcch) + + fmt.Fprintf(fgcc, "/* Code generated by cmd/cgo; DO NOT EDIT. */\n\n") + fmt.Fprintf(fgcc, "#include \"_cgo_export.h\"\n") + + fmt.Fprintf(fgcc, "%s\n", gccgoExportFileProlog) + fmt.Fprintf(fgcc, "%s\n", tsanProlog) + fmt.Fprintf(fgcc, "%s\n", msanProlog) + + for _, exp := range p.ExpFunc { + fn := exp.Func + fntype := fn.Type + + cdeclBuf := new(strings.Builder) + resultCount := 0 + forFieldList(fntype.Results, + func(i int, aname string, atype ast.Expr) { resultCount++ }) + switch resultCount { + case 0: + fmt.Fprintf(cdeclBuf, "void") + case 1: + forFieldList(fntype.Results, + func(i int, aname string, atype ast.Expr) { + t := p.cgoType(atype) + fmt.Fprintf(cdeclBuf, "%s", t.C) + }) + default: + // Declare a result struct. + fmt.Fprintf(fgcch, "\n/* Return type for %s */\n", exp.ExpName) + fmt.Fprintf(fgcch, "struct %s_return {\n", exp.ExpName) + forFieldList(fntype.Results, + func(i int, aname string, atype ast.Expr) { + t := p.cgoType(atype) + fmt.Fprintf(fgcch, "\t%s r%d;", t.C, i) + if len(aname) > 0 { + fmt.Fprintf(fgcch, " /* %s */", aname) + } + fmt.Fprint(fgcch, "\n") + }) + fmt.Fprintf(fgcch, "};\n") + fmt.Fprintf(cdeclBuf, "struct %s_return", exp.ExpName) + } + + cRet := cdeclBuf.String() + + cdeclBuf = new(strings.Builder) + fmt.Fprintf(cdeclBuf, "(") + if fn.Recv != nil { + fmt.Fprintf(cdeclBuf, "%s recv", p.cgoType(fn.Recv.List[0].Type).C.String()) + } + // Function parameters. + forFieldList(fntype.Params, + func(i int, aname string, atype ast.Expr) { + if i > 0 || fn.Recv != nil { + fmt.Fprintf(cdeclBuf, ", ") + } + t := p.cgoType(atype) + fmt.Fprintf(cdeclBuf, "%s p%d", t.C, i) + }) + fmt.Fprintf(cdeclBuf, ")") + cParams := cdeclBuf.String() + + if len(exp.Doc) > 0 { + fmt.Fprintf(fgcch, "\n%s", exp.Doc) + } + + fmt.Fprintf(fgcch, "extern %s %s%s;\n", cRet, exp.ExpName, cParams) + + // We need to use a name that will be exported by the + // Go code; otherwise gccgo will make it static and we + // will not be able to link against it from the C + // code. + goName := "Cgoexp_" + exp.ExpName + fmt.Fprintf(fgcc, `extern %s %s %s __asm__("%s.%s");`, cRet, goName, cParams, gccgoSymbolPrefix, gccgoToSymbol(goName)) + fmt.Fprint(fgcc, "\n") + + fmt.Fprint(fgcc, "\nCGO_NO_SANITIZE_THREAD\n") + fmt.Fprintf(fgcc, "%s %s %s {\n", cRet, exp.ExpName, cParams) + if resultCount > 0 { + fmt.Fprintf(fgcc, "\t%s r;\n", cRet) + } + fmt.Fprintf(fgcc, "\tif(_cgo_wait_runtime_init_done)\n") + fmt.Fprintf(fgcc, "\t\t_cgo_wait_runtime_init_done();\n") + fmt.Fprintf(fgcc, "\t_cgo_tsan_release();\n") + fmt.Fprint(fgcc, "\t") + if resultCount > 0 { + fmt.Fprint(fgcc, "r = ") + } + fmt.Fprintf(fgcc, "%s(", goName) + if fn.Recv != nil { + fmt.Fprint(fgcc, "recv") + } + forFieldList(fntype.Params, + func(i int, aname string, atype ast.Expr) { + if i > 0 || fn.Recv != nil { + fmt.Fprintf(fgcc, ", ") + } + fmt.Fprintf(fgcc, "p%d", i) + }) + fmt.Fprint(fgcc, ");\n") + fmt.Fprintf(fgcc, "\t_cgo_tsan_acquire();\n") + if resultCount > 0 { + fmt.Fprint(fgcc, "\treturn r;\n") + } + fmt.Fprint(fgcc, "}\n") + + // Dummy declaration for _cgo_main.c + fmt.Fprintf(fm, `char %s[1] __asm__("%s.%s");`, goName, gccgoSymbolPrefix, gccgoToSymbol(goName)) + fmt.Fprint(fm, "\n") + + // For gccgo we use a wrapper function in Go, in order + // to call CgocallBack and CgocallBackDone. + + // This code uses printer.Fprint, not conf.Fprint, + // because we don't want //line comments in the middle + // of the function types. + fmt.Fprint(fgo2, "\n") + fmt.Fprintf(fgo2, "func %s(", goName) + if fn.Recv != nil { + fmt.Fprint(fgo2, "recv ") + printer.Fprint(fgo2, fset, fn.Recv.List[0].Type) + } + forFieldList(fntype.Params, + func(i int, aname string, atype ast.Expr) { + if i > 0 || fn.Recv != nil { + fmt.Fprintf(fgo2, ", ") + } + fmt.Fprintf(fgo2, "p%d ", i) + printer.Fprint(fgo2, fset, atype) + }) + fmt.Fprintf(fgo2, ")") + if resultCount > 0 { + fmt.Fprintf(fgo2, " (") + forFieldList(fntype.Results, + func(i int, aname string, atype ast.Expr) { + if i > 0 { + fmt.Fprint(fgo2, ", ") + } + printer.Fprint(fgo2, fset, atype) + }) + fmt.Fprint(fgo2, ")") + } + fmt.Fprint(fgo2, " {\n") + fmt.Fprint(fgo2, "\tsyscall.CgocallBack()\n") + fmt.Fprint(fgo2, "\tdefer syscall.CgocallBackDone()\n") + fmt.Fprint(fgo2, "\t") + if resultCount > 0 { + fmt.Fprint(fgo2, "return ") + } + if fn.Recv != nil { + fmt.Fprint(fgo2, "recv.") + } + fmt.Fprintf(fgo2, "%s(", exp.Func.Name) + forFieldList(fntype.Params, + func(i int, aname string, atype ast.Expr) { + if i > 0 { + fmt.Fprint(fgo2, ", ") + } + fmt.Fprintf(fgo2, "p%d", i) + }) + fmt.Fprint(fgo2, ")\n") + fmt.Fprint(fgo2, "}\n") + } + + fmt.Fprintf(fgcch, "%s", gccExportHeaderEpilog) +} + +// writeExportHeader writes out the start of the _cgo_export.h file. +func (p *Package) writeExportHeader(fgcch io.Writer) { + fmt.Fprintf(fgcch, "/* Code generated by cmd/cgo; DO NOT EDIT. */\n\n") + pkg := *importPath + if pkg == "" { + pkg = p.PackagePath + } + fmt.Fprintf(fgcch, "/* package %s */\n\n", pkg) + fmt.Fprintf(fgcch, "%s\n", builtinExportProlog) + + // Remove absolute paths from #line comments in the preamble. + // They aren't useful for people using the header file, + // and they mean that the header files change based on the + // exact location of GOPATH. + re := regexp.MustCompile(`(?m)^(#line\s+\d+\s+")[^"]*[/\\]([^"]*")`) + preamble := re.ReplaceAllString(p.Preamble, "$1$2") + + fmt.Fprintf(fgcch, "/* Start of preamble from import \"C\" comments. */\n\n") + fmt.Fprintf(fgcch, "%s\n", preamble) + fmt.Fprintf(fgcch, "\n/* End of preamble from import \"C\" comments. */\n\n") + + fmt.Fprintf(fgcch, "%s\n", p.gccExportHeaderProlog()) +} + +// gccgoToSymbol converts a name to a mangled symbol for gccgo. +func gccgoToSymbol(ppath string) string { + if gccgoMangler == nil { + var err error + cmd := os.Getenv("GCCGO") + if cmd == "" { + cmd, err = exec.LookPath("gccgo") + if err != nil { + fatalf("unable to locate gccgo: %v", err) + } + } + gccgoMangler, err = pkgpath.ToSymbolFunc(cmd, *objDir) + if err != nil { + fatalf("%v", err) + } + } + return gccgoMangler(ppath) +} + +// Return the package prefix when using gccgo. +func (p *Package) gccgoSymbolPrefix() string { + if !*gccgo { + return "" + } + + if *gccgopkgpath != "" { + return gccgoToSymbol(*gccgopkgpath) + } + if *gccgoprefix == "" && p.PackageName == "main" { + return "main" + } + prefix := gccgoToSymbol(*gccgoprefix) + if prefix == "" { + prefix = "go" + } + return prefix + "." + p.PackageName +} + +// Call a function for each entry in an ast.FieldList, passing the +// index into the list, the name if any, and the type. +func forFieldList(fl *ast.FieldList, fn func(int, string, ast.Expr)) { + if fl == nil { + return + } + i := 0 + for _, r := range fl.List { + if r.Names == nil { + fn(i, "", r.Type) + i++ + } else { + for _, n := range r.Names { + fn(i, n.Name, r.Type) + i++ + } + } + } +} + +func c(repr string, args ...interface{}) *TypeRepr { + return &TypeRepr{repr, args} +} + +// Map predeclared Go types to Type. +var goTypes = map[string]*Type{ + "bool": {Size: 1, Align: 1, C: c("GoUint8")}, + "byte": {Size: 1, Align: 1, C: c("GoUint8")}, + "int": {Size: 0, Align: 0, C: c("GoInt")}, + "uint": {Size: 0, Align: 0, C: c("GoUint")}, + "rune": {Size: 4, Align: 4, C: c("GoInt32")}, + "int8": {Size: 1, Align: 1, C: c("GoInt8")}, + "uint8": {Size: 1, Align: 1, C: c("GoUint8")}, + "int16": {Size: 2, Align: 2, C: c("GoInt16")}, + "uint16": {Size: 2, Align: 2, C: c("GoUint16")}, + "int32": {Size: 4, Align: 4, C: c("GoInt32")}, + "uint32": {Size: 4, Align: 4, C: c("GoUint32")}, + "int64": {Size: 8, Align: 8, C: c("GoInt64")}, + "uint64": {Size: 8, Align: 8, C: c("GoUint64")}, + "float32": {Size: 4, Align: 4, C: c("GoFloat32")}, + "float64": {Size: 8, Align: 8, C: c("GoFloat64")}, + "complex64": {Size: 8, Align: 4, C: c("GoComplex64")}, + "complex128": {Size: 16, Align: 8, C: c("GoComplex128")}, +} + +// Map an ast type to a Type. +func (p *Package) cgoType(e ast.Expr) *Type { + switch t := e.(type) { + case *ast.StarExpr: + x := p.cgoType(t.X) + return &Type{Size: p.PtrSize, Align: p.PtrSize, C: c("%s*", x.C)} + case *ast.ArrayType: + if t.Len == nil { + // Slice: pointer, len, cap. + return &Type{Size: p.PtrSize * 3, Align: p.PtrSize, C: c("GoSlice")} + } + // Non-slice array types are not supported. + case *ast.StructType: + // Not supported. + case *ast.FuncType: + return &Type{Size: p.PtrSize, Align: p.PtrSize, C: c("void*")} + case *ast.InterfaceType: + return &Type{Size: 2 * p.PtrSize, Align: p.PtrSize, C: c("GoInterface")} + case *ast.MapType: + return &Type{Size: p.PtrSize, Align: p.PtrSize, C: c("GoMap")} + case *ast.ChanType: + return &Type{Size: p.PtrSize, Align: p.PtrSize, C: c("GoChan")} + case *ast.Ident: + goTypesFixup := func(r *Type) *Type { + if r.Size == 0 { // int or uint + rr := new(Type) + *rr = *r + rr.Size = p.IntSize + rr.Align = p.IntSize + r = rr + } + if r.Align > p.PtrSize { + r.Align = p.PtrSize + } + return r + } + // Look up the type in the top level declarations. + // TODO: Handle types defined within a function. + for _, d := range p.Decl { + gd, ok := d.(*ast.GenDecl) + if !ok || gd.Tok != token.TYPE { + continue + } + for _, spec := range gd.Specs { + ts, ok := spec.(*ast.TypeSpec) + if !ok { + continue + } + if ts.Name.Name == t.Name { + return p.cgoType(ts.Type) + } + } + } + if def := typedef[t.Name]; def != nil { + if defgo, ok := def.Go.(*ast.Ident); ok { + switch defgo.Name { + case "complex64", "complex128": + // MSVC does not support the _Complex keyword + // nor the complex macro. + // Use GoComplex64 and GoComplex128 instead, + // which are typedef-ed to a compatible type. + // See go.dev/issues/36233. + return goTypesFixup(goTypes[defgo.Name]) + } + } + return def + } + if t.Name == "uintptr" { + return &Type{Size: p.PtrSize, Align: p.PtrSize, C: c("GoUintptr")} + } + if t.Name == "string" { + // The string data is 1 pointer + 1 (pointer-sized) int. + return &Type{Size: 2 * p.PtrSize, Align: p.PtrSize, C: c("GoString")} + } + if t.Name == "error" { + return &Type{Size: 2 * p.PtrSize, Align: p.PtrSize, C: c("GoInterface")} + } + if r, ok := goTypes[t.Name]; ok { + return goTypesFixup(r) + } + error_(e.Pos(), "unrecognized Go type %s", t.Name) + return &Type{Size: 4, Align: 4, C: c("int")} + case *ast.SelectorExpr: + id, ok := t.X.(*ast.Ident) + if ok && id.Name == "unsafe" && t.Sel.Name == "Pointer" { + return &Type{Size: p.PtrSize, Align: p.PtrSize, C: c("void*")} + } + } + error_(e.Pos(), "Go type not supported in export: %s", gofmt(e)) + return &Type{Size: 4, Align: 4, C: c("int")} +} + +const gccProlog = ` +#line 1 "cgo-gcc-prolog" +/* + If x and y are not equal, the type will be invalid + (have a negative array count) and an inscrutable error will come + out of the compiler and hopefully mention "name". +*/ +#define __cgo_compile_assert_eq(x, y, name) typedef char name[(x-y)*(x-y)*-2UL+1UL]; + +/* Check at compile time that the sizes we use match our expectations. */ +#define __cgo_size_assert(t, n) __cgo_compile_assert_eq(sizeof(t), (size_t)n, _cgo_sizeof_##t##_is_not_##n) + +__cgo_size_assert(char, 1) +__cgo_size_assert(short, 2) +__cgo_size_assert(int, 4) +typedef long long __cgo_long_long; +__cgo_size_assert(__cgo_long_long, 8) +__cgo_size_assert(float, 4) +__cgo_size_assert(double, 8) + +extern char* _cgo_topofstack(void); + +/* + We use packed structs, but they are always aligned. + The pragmas and address-of-packed-member are only recognized as warning + groups in clang 4.0+, so ignore unknown pragmas first. +*/ +#pragma GCC diagnostic ignored "-Wunknown-pragmas" +#pragma GCC diagnostic ignored "-Wpragmas" +#pragma GCC diagnostic ignored "-Waddress-of-packed-member" +#pragma GCC diagnostic ignored "-Wunknown-warning-option" +#pragma GCC diagnostic ignored "-Wunaligned-access" + +#include +#include +` + +// Prologue defining TSAN functions in C. +const noTsanProlog = ` +#define CGO_NO_SANITIZE_THREAD +#define _cgo_tsan_acquire() +#define _cgo_tsan_release() +` + +// This must match the TSAN code in runtime/cgo/libcgo.h. +// This is used when the code is built with the C/C++ Thread SANitizer, +// which is not the same as the Go race detector. +// __tsan_acquire tells TSAN that we are acquiring a lock on a variable, +// in this case _cgo_sync. __tsan_release releases the lock. +// (There is no actual lock, we are just telling TSAN that there is.) +// +// When we call from Go to C we call _cgo_tsan_acquire. +// When the C function returns we call _cgo_tsan_release. +// Similarly, when C calls back into Go we call _cgo_tsan_release +// and then call _cgo_tsan_acquire when we return to C. +// These calls tell TSAN that there is a serialization point at the C call. +// +// This is necessary because TSAN, which is a C/C++ tool, can not see +// the synchronization in the Go code. Without these calls, when +// multiple goroutines call into C code, TSAN does not understand +// that the calls are properly synchronized on the Go side. +// +// To be clear, if the calls are not properly synchronized on the Go side, +// we will be hiding races. But when using TSAN on mixed Go C/C++ code +// it is more important to avoid false positives, which reduce confidence +// in the tool, than to avoid false negatives. +const yesTsanProlog = ` +#line 1 "cgo-tsan-prolog" +#define CGO_NO_SANITIZE_THREAD __attribute__ ((no_sanitize_thread)) + +long long _cgo_sync __attribute__ ((common)); + +extern void __tsan_acquire(void*); +extern void __tsan_release(void*); + +__attribute__ ((unused)) +static void _cgo_tsan_acquire() { + __tsan_acquire(&_cgo_sync); +} + +__attribute__ ((unused)) +static void _cgo_tsan_release() { + __tsan_release(&_cgo_sync); +} +` + +// Set to yesTsanProlog if we see -fsanitize=thread in the flags for gcc. +var tsanProlog = noTsanProlog + +// noMsanProlog is a prologue defining an MSAN function in C. +// This is used when not compiling with -fsanitize=memory. +const noMsanProlog = ` +#define _cgo_msan_write(addr, sz) +` + +// yesMsanProlog is a prologue defining an MSAN function in C. +// This is used when compiling with -fsanitize=memory. +// See the comment above where _cgo_msan_write is called. +const yesMsanProlog = ` +extern void __msan_unpoison(const volatile void *, size_t); + +#define _cgo_msan_write(addr, sz) __msan_unpoison((addr), (sz)) +` + +// msanProlog is set to yesMsanProlog if we see -fsanitize=memory in the flags +// for the C compiler. +var msanProlog = noMsanProlog + +const builtinProlog = ` +#line 1 "cgo-builtin-prolog" +#include + +/* Define intgo when compiling with GCC. */ +typedef ptrdiff_t intgo; + +#define GO_CGO_GOSTRING_TYPEDEF +typedef struct { const char *p; intgo n; } _GoString_; +typedef struct { char *p; intgo n; intgo c; } _GoBytes_; +_GoString_ GoString(char *p); +_GoString_ GoStringN(char *p, int l); +_GoBytes_ GoBytes(void *p, int n); +char *CString(_GoString_); +void *CBytes(_GoBytes_); +void *_CMalloc(size_t); + +__attribute__ ((unused)) +static size_t _GoStringLen(_GoString_ s) { return (size_t)s.n; } + +__attribute__ ((unused)) +static const char *_GoStringPtr(_GoString_ s) { return s.p; } +` + +const goProlog = ` +//go:linkname _cgo_runtime_cgocall runtime.cgocall +func _cgo_runtime_cgocall(unsafe.Pointer, uintptr) int32 + +//go:linkname _cgoCheckPointer runtime.cgoCheckPointer +//go:noescape +func _cgoCheckPointer(interface{}, interface{}) + +//go:linkname _cgoCheckResult runtime.cgoCheckResult +//go:noescape +func _cgoCheckResult(interface{}) +` + +const gccgoGoProlog = ` +func _cgoCheckPointer(interface{}, interface{}) + +func _cgoCheckResult(interface{}) +` + +const goStringDef = ` +//go:linkname _cgo_runtime_gostring runtime.gostring +func _cgo_runtime_gostring(*_Ctype_char) string + +// GoString converts the C string p into a Go string. +func _Cfunc_GoString(p *_Ctype_char) string { + return _cgo_runtime_gostring(p) +} +` + +const goStringNDef = ` +//go:linkname _cgo_runtime_gostringn runtime.gostringn +func _cgo_runtime_gostringn(*_Ctype_char, int) string + +// GoStringN converts the C data p with explicit length l to a Go string. +func _Cfunc_GoStringN(p *_Ctype_char, l _Ctype_int) string { + return _cgo_runtime_gostringn(p, int(l)) +} +` + +const goBytesDef = ` +//go:linkname _cgo_runtime_gobytes runtime.gobytes +func _cgo_runtime_gobytes(unsafe.Pointer, int) []byte + +// GoBytes converts the C data p with explicit length l to a Go []byte. +func _Cfunc_GoBytes(p unsafe.Pointer, l _Ctype_int) []byte { + return _cgo_runtime_gobytes(p, int(l)) +} +` + +const cStringDef = ` +// CString converts the Go string s to a C string. +// +// The C string is allocated in the C heap using malloc. +// It is the caller's responsibility to arrange for it to be +// freed, such as by calling C.free (be sure to include stdlib.h +// if C.free is needed). +func _Cfunc_CString(s string) *_Ctype_char { + if len(s)+1 <= 0 { + panic("string too large") + } + p := _cgo_cmalloc(uint64(len(s)+1)) + sliceHeader := struct { + p unsafe.Pointer + len int + cap int + }{p, len(s)+1, len(s)+1} + b := *(*[]byte)(unsafe.Pointer(&sliceHeader)) + copy(b, s) + b[len(s)] = 0 + return (*_Ctype_char)(p) +} +` + +const cBytesDef = ` +// CBytes converts the Go []byte slice b to a C array. +// +// The C array is allocated in the C heap using malloc. +// It is the caller's responsibility to arrange for it to be +// freed, such as by calling C.free (be sure to include stdlib.h +// if C.free is needed). +func _Cfunc_CBytes(b []byte) unsafe.Pointer { + p := _cgo_cmalloc(uint64(len(b))) + sliceHeader := struct { + p unsafe.Pointer + len int + cap int + }{p, len(b), len(b)} + s := *(*[]byte)(unsafe.Pointer(&sliceHeader)) + copy(s, b) + return p +} +` + +const cMallocDef = ` +func _Cfunc__CMalloc(n _Ctype_size_t) unsafe.Pointer { + return _cgo_cmalloc(uint64(n)) +} +` + +var builtinDefs = map[string]string{ + "GoString": goStringDef, + "GoStringN": goStringNDef, + "GoBytes": goBytesDef, + "CString": cStringDef, + "CBytes": cBytesDef, + "_CMalloc": cMallocDef, +} + +// Definitions for C.malloc in Go and in C. We define it ourselves +// since we call it from functions we define, such as C.CString. +// Also, we have historically ensured that C.malloc does not return +// nil even for an allocation of 0. + +const cMallocDefGo = ` +//go:cgo_import_static _cgoPREFIX_Cfunc__Cmalloc +//go:linkname __cgofn__cgoPREFIX_Cfunc__Cmalloc _cgoPREFIX_Cfunc__Cmalloc +var __cgofn__cgoPREFIX_Cfunc__Cmalloc byte +var _cgoPREFIX_Cfunc__Cmalloc = unsafe.Pointer(&__cgofn__cgoPREFIX_Cfunc__Cmalloc) + +//go:linkname runtime_throw runtime.throw +func runtime_throw(string) + +//go:cgo_unsafe_args +func _cgo_cmalloc(p0 uint64) (r1 unsafe.Pointer) { + _cgo_runtime_cgocall(_cgoPREFIX_Cfunc__Cmalloc, uintptr(unsafe.Pointer(&p0))) + if r1 == nil { + runtime_throw("runtime: C malloc failed") + } + return +} +` + +// cMallocDefC defines the C version of C.malloc for the gc compiler. +// It is defined here because C.CString and friends need a definition. +// We define it by hand, rather than simply inventing a reference to +// C.malloc, because may not have been included. +// This is approximately what writeOutputFunc would generate, but +// skips the cgo_topofstack code (which is only needed if the C code +// calls back into Go). This also avoids returning nil for an +// allocation of 0 bytes. +const cMallocDefC = ` +CGO_NO_SANITIZE_THREAD +void _cgoPREFIX_Cfunc__Cmalloc(void *v) { + struct { + unsigned long long p0; + void *r1; + } PACKED *a = v; + void *ret; + _cgo_tsan_acquire(); + ret = malloc(a->p0); + if (ret == 0 && a->p0 == 0) { + ret = malloc(1); + } + a->r1 = ret; + _cgo_tsan_release(); +} +` + +func (p *Package) cPrologGccgo() string { + r := strings.NewReplacer( + "PREFIX", cPrefix, + "GCCGOSYMBOLPREF", p.gccgoSymbolPrefix(), + "_cgoCheckPointer", gccgoToSymbol("_cgoCheckPointer"), + "_cgoCheckResult", gccgoToSymbol("_cgoCheckResult")) + return r.Replace(cPrologGccgo) +} + +const cPrologGccgo = ` +#line 1 "cgo-c-prolog-gccgo" +#include +#include +#include + +typedef unsigned char byte; +typedef intptr_t intgo; + +struct __go_string { + const unsigned char *__data; + intgo __length; +}; + +typedef struct __go_open_array { + void* __values; + intgo __count; + intgo __capacity; +} Slice; + +struct __go_string __go_byte_array_to_string(const void* p, intgo len); +struct __go_open_array __go_string_to_byte_array (struct __go_string str); + +extern void runtime_throw(const char *); + +const char *_cgoPREFIX_Cfunc_CString(struct __go_string s) { + char *p = malloc(s.__length+1); + if(p == NULL) + runtime_throw("runtime: C malloc failed"); + memmove(p, s.__data, s.__length); + p[s.__length] = 0; + return p; +} + +void *_cgoPREFIX_Cfunc_CBytes(struct __go_open_array b) { + char *p = malloc(b.__count); + if(p == NULL) + runtime_throw("runtime: C malloc failed"); + memmove(p, b.__values, b.__count); + return p; +} + +struct __go_string _cgoPREFIX_Cfunc_GoString(char *p) { + intgo len = (p != NULL) ? strlen(p) : 0; + return __go_byte_array_to_string(p, len); +} + +struct __go_string _cgoPREFIX_Cfunc_GoStringN(char *p, int32_t n) { + return __go_byte_array_to_string(p, n); +} + +Slice _cgoPREFIX_Cfunc_GoBytes(char *p, int32_t n) { + struct __go_string s = { (const unsigned char *)p, n }; + return __go_string_to_byte_array(s); +} + +void *_cgoPREFIX_Cfunc__CMalloc(size_t n) { + void *p = malloc(n); + if(p == NULL && n == 0) + p = malloc(1); + if(p == NULL) + runtime_throw("runtime: C malloc failed"); + return p; +} + +struct __go_type_descriptor; +typedef struct __go_empty_interface { + const struct __go_type_descriptor *__type_descriptor; + void *__object; +} Eface; + +extern void runtimeCgoCheckPointer(Eface, Eface) + __asm__("runtime.cgoCheckPointer") + __attribute__((weak)); + +extern void localCgoCheckPointer(Eface, Eface) + __asm__("GCCGOSYMBOLPREF._cgoCheckPointer"); + +void localCgoCheckPointer(Eface ptr, Eface arg) { + if(runtimeCgoCheckPointer) { + runtimeCgoCheckPointer(ptr, arg); + } +} + +extern void runtimeCgoCheckResult(Eface) + __asm__("runtime.cgoCheckResult") + __attribute__((weak)); + +extern void localCgoCheckResult(Eface) + __asm__("GCCGOSYMBOLPREF._cgoCheckResult"); + +void localCgoCheckResult(Eface val) { + if(runtimeCgoCheckResult) { + runtimeCgoCheckResult(val); + } +} +` + +// builtinExportProlog is a shorter version of builtinProlog, +// to be put into the _cgo_export.h file. +// For historical reasons we can't use builtinProlog in _cgo_export.h, +// because _cgo_export.h defines GoString as a struct while builtinProlog +// defines it as a function. We don't change this to avoid unnecessarily +// breaking existing code. +// The test of GO_CGO_GOSTRING_TYPEDEF avoids a duplicate definition +// error if a Go file with a cgo comment #include's the export header +// generated by a different package. +const builtinExportProlog = ` +#line 1 "cgo-builtin-export-prolog" + +#include + +#ifndef GO_CGO_EXPORT_PROLOGUE_H +#define GO_CGO_EXPORT_PROLOGUE_H + +#ifndef GO_CGO_GOSTRING_TYPEDEF +typedef struct { const char *p; ptrdiff_t n; } _GoString_; +#endif + +#endif +` + +func (p *Package) gccExportHeaderProlog() string { + return strings.Replace(gccExportHeaderProlog, "GOINTBITS", fmt.Sprint(8*p.IntSize), -1) +} + +// gccExportHeaderProlog is written to the exported header, after the +// import "C" comment preamble but before the generated declarations +// of exported functions. This permits the generated declarations to +// use the type names that appear in goTypes, above. +// +// The test of GO_CGO_GOSTRING_TYPEDEF avoids a duplicate definition +// error if a Go file with a cgo comment #include's the export header +// generated by a different package. Unfortunately GoString means two +// different things: in this prolog it means a C name for the Go type, +// while in the prolog written into the start of the C code generated +// from a cgo-using Go file it means the C.GoString function. There is +// no way to resolve this conflict, but it also doesn't make much +// difference, as Go code never wants to refer to the latter meaning. +const gccExportHeaderProlog = ` +/* Start of boilerplate cgo prologue. */ +#line 1 "cgo-gcc-export-header-prolog" + +#ifndef GO_CGO_PROLOGUE_H +#define GO_CGO_PROLOGUE_H + +typedef signed char GoInt8; +typedef unsigned char GoUint8; +typedef short GoInt16; +typedef unsigned short GoUint16; +typedef int GoInt32; +typedef unsigned int GoUint32; +typedef long long GoInt64; +typedef unsigned long long GoUint64; +typedef GoIntGOINTBITS GoInt; +typedef GoUintGOINTBITS GoUint; +typedef size_t GoUintptr; +typedef float GoFloat32; +typedef double GoFloat64; +#ifdef _MSC_VER +#include +typedef _Fcomplex GoComplex64; +typedef _Dcomplex GoComplex128; +#else +typedef float _Complex GoComplex64; +typedef double _Complex GoComplex128; +#endif + +/* + static assertion to make sure the file is being used on architecture + at least with matching size of GoInt. +*/ +typedef char _check_for_GOINTBITS_bit_pointer_matching_GoInt[sizeof(void*)==GOINTBITS/8 ? 1:-1]; + +#ifndef GO_CGO_GOSTRING_TYPEDEF +typedef _GoString_ GoString; +#endif +typedef void *GoMap; +typedef void *GoChan; +typedef struct { void *t; void *v; } GoInterface; +typedef struct { void *data; GoInt len; GoInt cap; } GoSlice; + +#endif + +/* End of boilerplate cgo prologue. */ + +#ifdef __cplusplus +extern "C" { +#endif +` + +// gccExportHeaderEpilog goes at the end of the generated header file. +const gccExportHeaderEpilog = ` +#ifdef __cplusplus +} +#endif +` + +// gccgoExportFileProlog is written to the _cgo_export.c file when +// using gccgo. +// We use weak declarations, and test the addresses, so that this code +// works with older versions of gccgo. +const gccgoExportFileProlog = ` +#line 1 "cgo-gccgo-export-file-prolog" +extern _Bool runtime_iscgo __attribute__ ((weak)); + +static void GoInit(void) __attribute__ ((constructor)); +static void GoInit(void) { + if(&runtime_iscgo) + runtime_iscgo = 1; +} + +extern size_t _cgo_wait_runtime_init_done(void) __attribute__ ((weak)); +` diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/util.go b/platform/dbops/binaries/go/go/src/cmd/cgo/util.go new file mode 100644 index 0000000000000000000000000000000000000000..054cd6c5c729e9e9af7593b8dd4e0e42015cc2cc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/util.go @@ -0,0 +1,114 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "go/token" + "os" + "os/exec" +) + +// run runs the command argv, feeding in stdin on standard input. +// It returns the output to standard output and standard error. +// ok indicates whether the command exited successfully. +func run(stdin []byte, argv []string) (stdout, stderr []byte, ok bool) { + if i := find(argv, "-xc"); i >= 0 && argv[len(argv)-1] == "-" { + // Some compilers have trouble with standard input. + // Others have trouble with -xc. + // Avoid both problems by writing a file with a .c extension. + f, err := os.CreateTemp("", "cgo-gcc-input-") + if err != nil { + fatalf("%s", err) + } + name := f.Name() + f.Close() + if err := os.WriteFile(name+".c", stdin, 0666); err != nil { + os.Remove(name) + fatalf("%s", err) + } + defer os.Remove(name) + defer os.Remove(name + ".c") + + // Build new argument list without -xc and trailing -. + new := append(argv[:i:i], argv[i+1:len(argv)-1]...) + + // Since we are going to write the file to a temporary directory, + // we will need to add -I . explicitly to the command line: + // any #include "foo" before would have looked in the current + // directory as the directory "holding" standard input, but now + // the temporary directory holds the input. + // We've also run into compilers that reject "-I." but allow "-I", ".", + // so be sure to use two arguments. + // This matters mainly for people invoking cgo -godefs by hand. + new = append(new, "-I", ".") + + // Finish argument list with path to C file. + new = append(new, name+".c") + + argv = new + stdin = nil + } + + p := exec.Command(argv[0], argv[1:]...) + p.Stdin = bytes.NewReader(stdin) + var bout, berr bytes.Buffer + p.Stdout = &bout + p.Stderr = &berr + // Disable escape codes in clang error messages. + p.Env = append(os.Environ(), "TERM=dumb") + err := p.Run() + if _, ok := err.(*exec.ExitError); err != nil && !ok { + fatalf("exec %s: %s", argv[0], err) + } + ok = p.ProcessState.Success() + stdout, stderr = bout.Bytes(), berr.Bytes() + return +} + +func find(argv []string, target string) int { + for i, arg := range argv { + if arg == target { + return i + } + } + return -1 +} + +func lineno(pos token.Pos) string { + return fset.Position(pos).String() +} + +// Die with an error message. +func fatalf(msg string, args ...interface{}) { + // If we've already printed other errors, they might have + // caused the fatal condition. Assume they're enough. + if nerrors == 0 { + fmt.Fprintf(os.Stderr, "cgo: "+msg+"\n", args...) + } + os.Exit(2) +} + +var nerrors int + +func error_(pos token.Pos, msg string, args ...interface{}) { + nerrors++ + if pos.IsValid() { + fmt.Fprintf(os.Stderr, "%s: ", fset.Position(pos).String()) + } else { + fmt.Fprintf(os.Stderr, "cgo: ") + } + fmt.Fprintf(os.Stderr, msg, args...) + fmt.Fprintf(os.Stderr, "\n") +} + +func creat(name string) *os.File { + f, err := os.Create(name) + if err != nil { + fatalf("%s", err) + } + return f +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cgo/zdefaultcc.go b/platform/dbops/binaries/go/go/src/cmd/cgo/zdefaultcc.go new file mode 100644 index 0000000000000000000000000000000000000000..cd59ad1bf5f6bfbf2c3ed0b32b20d58ec56b9709 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cgo/zdefaultcc.go @@ -0,0 +1,23 @@ +// Code generated by go tool dist; DO NOT EDIT. + +package main + +const defaultPkgConfig = `pkg-config` +func defaultCC(goos, goarch string) string { + switch goos+`/`+goarch { + } + switch goos { + case "darwin", "ios", "freebsd", "openbsd": + return "clang" + } + return "gcc" +} +func defaultCXX(goos, goarch string) string { + switch goos+`/`+goarch { + } + switch goos { + case "darwin", "ios", "freebsd", "openbsd": + return "clang++" + } + return "g++" +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/README.md b/platform/dbops/binaries/go/go/src/cmd/compile/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9b99a1b10535e7c560bce30aa589e5357f7c888e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/README.md @@ -0,0 +1,316 @@ + + +## Introduction to the Go compiler + +`cmd/compile` contains the main packages that form the Go compiler. The compiler +may be logically split in four phases, which we will briefly describe alongside +the list of packages that contain their code. + +You may sometimes hear the terms "front-end" and "back-end" when referring to +the compiler. Roughly speaking, these translate to the first two and last two +phases we are going to list here. A third term, "middle-end", often refers to +much of the work that happens in the second phase. + +Note that the `go/*` family of packages, such as `go/parser` and +`go/types`, are mostly unused by the compiler. Since the compiler was +initially written in C, the `go/*` packages were developed to enable +writing tools working with Go code, such as `gofmt` and `vet`. +However, over time the compiler's internal APIs have slowly evolved to +be more familiar to users of the `go/*` packages. + +It should be clarified that the name "gc" stands for "Go compiler", and has +little to do with uppercase "GC", which stands for garbage collection. + +### 1. Parsing + +* `cmd/compile/internal/syntax` (lexer, parser, syntax tree) + +In the first phase of compilation, source code is tokenized (lexical analysis), +parsed (syntax analysis), and a syntax tree is constructed for each source +file. + +Each syntax tree is an exact representation of the respective source file, with +nodes corresponding to the various elements of the source such as expressions, +declarations, and statements. The syntax tree also includes position information +which is used for error reporting and the creation of debugging information. + +### 2. Type checking + +* `cmd/compile/internal/types2` (type checking) + +The types2 package is a port of `go/types` to use the syntax package's +AST instead of `go/ast`. + +### 3. IR construction ("noding") + +* `cmd/compile/internal/types` (compiler types) +* `cmd/compile/internal/ir` (compiler AST) +* `cmd/compile/internal/noder` (create compiler AST) + +The compiler middle end uses its own AST definition and representation of Go +types carried over from when it was written in C. All of its code is written in +terms of these, so the next step after type checking is to convert the syntax +and types2 representations to ir and types. This process is referred to as +"noding." + +Noding using a process called Unified IR, which builds a node representation +using a serialized version of the typechecked code from step 2. +Unified IR is also involved in import/export of packages and inlining. + +### 4. Middle end + +* `cmd/compile/internal/deadcode` (dead code elimination) +* `cmd/compile/internal/inline` (function call inlining) +* `cmd/compile/internal/devirtualize` (devirtualization of known interface method calls) +* `cmd/compile/internal/escape` (escape analysis) + +Several optimization passes are performed on the IR representation: +dead code elimination, (early) devirtualization, function call +inlining, and escape analysis. + +### 5. Walk + +* `cmd/compile/internal/walk` (order of evaluation, desugaring) + +The final pass over the IR representation is "walk," which serves two purposes: + +1. It decomposes complex statements into individual, simpler statements, + introducing temporary variables and respecting order of evaluation. This step + is also referred to as "order." + +2. It desugars higher-level Go constructs into more primitive ones. For example, + `switch` statements are turned into binary search or jump tables, and + operations on maps and channels are replaced with runtime calls. + +### 6. Generic SSA + +* `cmd/compile/internal/ssa` (SSA passes and rules) +* `cmd/compile/internal/ssagen` (converting IR to SSA) + +In this phase, IR is converted into Static Single Assignment (SSA) form, a +lower-level intermediate representation with specific properties that make it +easier to implement optimizations and to eventually generate machine code from +it. + +During this conversion, function intrinsics are applied. These are special +functions that the compiler has been taught to replace with heavily optimized +code on a case-by-case basis. + +Certain nodes are also lowered into simpler components during the AST to SSA +conversion, so that the rest of the compiler can work with them. For instance, +the copy builtin is replaced by memory moves, and range loops are rewritten into +for loops. Some of these currently happen before the conversion to SSA due to +historical reasons, but the long-term plan is to move all of them here. + +Then, a series of machine-independent passes and rules are applied. These do not +concern any single computer architecture, and thus run on all `GOARCH` variants. +These passes include dead code elimination, removal of +unneeded nil checks, and removal of unused branches. The generic rewrite rules +mainly concern expressions, such as replacing some expressions with constant +values, and optimizing multiplications and float operations. + +### 7. Generating machine code + +* `cmd/compile/internal/ssa` (SSA lowering and arch-specific passes) +* `cmd/internal/obj` (machine code generation) + +The machine-dependent phase of the compiler begins with the "lower" pass, which +rewrites generic values into their machine-specific variants. For example, on +amd64 memory operands are possible, so many load-store operations may be combined. + +Note that the lower pass runs all machine-specific rewrite rules, and thus it +currently applies lots of optimizations too. + +Once the SSA has been "lowered" and is more specific to the target architecture, +the final code optimization passes are run. This includes yet another dead code +elimination pass, moving values closer to their uses, the removal of local +variables that are never read from, and register allocation. + +Other important pieces of work done as part of this step include stack frame +layout, which assigns stack offsets to local variables, and pointer liveness +analysis, which computes which on-stack pointers are live at each GC safe point. + +At the end of the SSA generation phase, Go functions have been transformed into +a series of obj.Prog instructions. These are passed to the assembler +(`cmd/internal/obj`), which turns them into machine code and writes out the +final object file. The object file will also contain reflect data, export data, +and debugging information. + +### 8. Tips + +#### Getting Started + +* If you have never contributed to the compiler before, a simple way to begin + can be adding a log statement or `panic("here")` to get some + initial insight into whatever you are investigating. + +* The compiler itself provides logging, debugging and visualization capabilities, + such as: + ``` + $ go build -gcflags=-m=2 # print optimization info, including inlining, escape analysis + $ go build -gcflags=-d=ssa/check_bce/debug # print bounds check info + $ go build -gcflags=-W # print internal parse tree after type checking + $ GOSSAFUNC=Foo go build # generate ssa.html file for func Foo + $ go build -gcflags=-S # print assembly + $ go tool compile -bench=out.txt x.go # print timing of compiler phases + ``` + + Some flags alter the compiler behavior, such as: + ``` + $ go tool compile -h file.go # panic on first compile error encountered + $ go build -gcflags=-d=checkptr=2 # enable additional unsafe pointer checking + ``` + + There are many additional flags. Some descriptions are available via: + ``` + $ go tool compile -h # compiler flags, e.g., go build -gcflags='-m=1 -l' + $ go tool compile -d help # debug flags, e.g., go build -gcflags=-d=checkptr=2 + $ go tool compile -d ssa/help # ssa flags, e.g., go build -gcflags=-d=ssa/prove/debug=2 + ``` + + There are some additional details about `-gcflags` and the differences between `go build` + vs. `go tool compile` in a [section below](#-gcflags-and-go-build-vs-go-tool-compile). + +* In general, when investigating a problem in the compiler you usually want to + start with the simplest possible reproduction and understand exactly what is + happening with it. + +#### Testing your changes + +* Be sure to read the [Quickly testing your changes](https://go.dev/doc/contribute#quick_test) + section of the Go Contribution Guide. + +* Some tests live within the cmd/compile packages and can be run by `go test ./...` or similar, + but many cmd/compile tests are in the top-level + [test](https://github.com/golang/go/tree/master/test) directory: + + ``` + $ go test cmd/internal/testdir # all tests in 'test' dir + $ go test cmd/internal/testdir -run='Test/escape.*.go' # test specific files in 'test' dir + ``` + For details, see the [testdir README](https://github.com/golang/go/tree/master/test#readme). + The `errorCheck` method in [testdir_test.go](https://github.com/golang/go/blob/master/src/cmd/internal/testdir/testdir_test.go) + is helpful for a description of the `ERROR` comments used in many of those tests. + + In addition, the `go/types` package from the standard library and `cmd/compile/internal/types2` + have shared tests in `src/internal/types/testdata`, and both type checkers + should be checked if anything changes there. + +* The new [application-based coverage profiling](https://go.dev/testing/coverage/) can be used + with the compiler, such as: + + ``` + $ go install -cover -coverpkg=cmd/compile/... cmd/compile # build compiler with coverage instrumentation + $ mkdir /tmp/coverdir # pick location for coverage data + $ GOCOVERDIR=/tmp/coverdir go test [...] # use compiler, saving coverage data + $ go tool covdata textfmt -i=/tmp/coverdir -o coverage.out # convert to traditional coverage format + $ go tool cover -html coverage.out # view coverage via traditional tools + ``` + +#### Juggling compiler versions + +* Many of the compiler tests use the version of the `go` command found in your PATH and + its corresponding `compile` binary. + +* If you are in a branch and your PATH includes `/bin`, + doing `go install cmd/compile` will build the compiler using the code from your + branch and install it to the proper location so that subsequent `go` commands + like `go build` or `go test ./...` will exercise your freshly built compiler. + +* [toolstash](https://pkg.go.dev/golang.org/x/tools/cmd/toolstash) provides a way + to save, run, and restore a known good copy of the Go toolchain. For example, it can be + a good practice to initially build your branch, save that version of + the toolchain, then restore the known good version of the tools to compile + your work-in-progress version of the compiler. + + Sample set up steps: + ``` + $ go install golang.org/x/tools/cmd/toolstash@latest + $ git clone https://go.googlesource.com/go + $ cd go + $ git checkout -b mybranch + $ ./src/all.bash # build and confirm good starting point + $ export PATH=$PWD/bin:$PATH + $ toolstash save # save current tools + ``` + After that, your edit/compile/test cycle can be similar to: + ``` + <... make edits to cmd/compile source ...> + $ toolstash restore && go install cmd/compile # restore known good tools to build compiler + <... 'go build', 'go test', etc. ...> # use freshly built compiler + ``` + +* toolstash also allows comparing the installed vs. stashed copy of + the compiler, such as if you expect equivalent behavior after a refactor. + For example, to check that your changed compiler produces identical object files to + the stashed compiler while building the standard library: + ``` + $ toolstash restore && go install cmd/compile # build latest compiler + $ go build -toolexec "toolstash -cmp" -a -v std # compare latest vs. saved compiler + ``` + +* If versions appear to get out of sync (for example, with errors like + `linked object header mismatch` with version strings like + `devel go1.21-db3f952b1f`), you might need to do + `toolstash restore && go install cmd/...` to update all the tools under cmd. + +#### Additional helpful tools + +* [compilebench](https://pkg.go.dev/golang.org/x/tools/cmd/compilebench) benchmarks + the speed of the compiler. + +* [benchstat](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat) is the standard tool + for reporting performance changes resulting from compiler modifications, + including whether any improvements are statistically significant: + ``` + $ go test -bench=SomeBenchmarks -count=20 > new.txt # use new compiler + $ toolstash restore # restore old compiler + $ go test -bench=SomeBenchmarks -count=20 > old.txt # use old compiler + $ benchstat old.txt new.txt # compare old vs. new + ``` + +* [bent](https://pkg.go.dev/golang.org/x/benchmarks/cmd/bent) facilitates running a + large set of benchmarks from various community Go projects inside a Docker container. + +* [perflock](https://github.com/aclements/perflock) helps obtain more consistent + benchmark results, including by manipulating CPU frequency scaling settings on Linux. + +* [view-annotated-file](https://github.com/loov/view-annotated-file) (from the community) + overlays inlining, bounds check, and escape info back onto the source code. + +* [godbolt.org](https://go.godbolt.org) is widely used to examine + and share assembly output from many compilers, including the Go compiler. It can also + [compare](https://go.godbolt.org/z/5Gs1G4bKG) assembly for different versions of + a function or across Go compiler versions, which can be helpful for investigations and + bug reports. + +#### -gcflags and 'go build' vs. 'go tool compile' + +* `-gcflags` is a go command [build flag](https://pkg.go.dev/cmd/go#hdr-Compile_packages_and_dependencies). + `go build -gcflags=` passes the supplied `` to the underlying + `compile` invocation(s) while still doing everything that the `go build` command + normally does (e.g., handling the build cache, modules, and so on). In contrast, + `go tool compile ` asks the `go` command to invoke `compile ` a single time + without involving the standard `go build` machinery. In some cases, it can be helpful to have + fewer moving parts by doing `go tool compile `, such as if you have a + small standalone source file that can be compiled without any assistance from `go build`. + In other cases, it is more convenient to pass `-gcflags` to a build command like + `go build`, `go test`, or `go install`. + +* `-gcflags` by default applies to the packages named on the command line, but can + use package patterns such as `-gcflags='all=-m=1 -l'`, or multiple package patterns such as + `-gcflags='all=-m=1' -gcflags='fmt=-m=2'`. For details, see the + [cmd/go documentation](https://pkg.go.dev/cmd/go#hdr-Compile_packages_and_dependencies). + +### Further reading + +To dig deeper into how the SSA package works, including its passes and rules, +head to [cmd/compile/internal/ssa/README.md](internal/ssa/README.md). + +Finally, if something in this README or the SSA README is unclear +or if you have an idea for an improvement, feel free to leave a comment in +[issue 30074](https://go.dev/issue/30074). diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/abi-internal.md b/platform/dbops/binaries/go/go/src/cmd/compile/abi-internal.md new file mode 100644 index 0000000000000000000000000000000000000000..eae230dc070d86964125ee2472f9850c091c4e4e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/abi-internal.md @@ -0,0 +1,973 @@ +# Go internal ABI specification + +Self-link: [go.dev/s/regabi](https://go.dev/s/regabi) + +This document describes Go’s internal application binary interface +(ABI), known as ABIInternal. +Go's ABI defines the layout of data in memory and the conventions for +calling between Go functions. +This ABI is *unstable* and will change between Go versions. +If you’re writing assembly code, please instead refer to Go’s +[assembly documentation](/doc/asm.html), which describes Go’s stable +ABI, known as ABI0. + +All functions defined in Go source follow ABIInternal. +However, ABIInternal and ABI0 functions are able to call each other +through transparent *ABI wrappers*, described in the [internal calling +convention proposal](https://golang.org/design/27539-internal-abi). + +Go uses a common ABI design across all architectures. +We first describe the common ABI, and then cover per-architecture +specifics. + +*Rationale*: For the reasoning behind using a common ABI across +architectures instead of the platform ABI, see the [register-based Go +calling convention proposal](https://golang.org/design/40724-register-calling). + +## Memory layout + +Go's built-in types have the following sizes and alignments. +Many, though not all, of these sizes are guaranteed by the [language +specification](/doc/go_spec.html#Size_and_alignment_guarantees). +Those that aren't guaranteed may change in future versions of Go (for +example, we've considered changing the alignment of int64 on 32-bit). + +| Type | 64-bit | | 32-bit | | +|-----------------------------|--------|-------|--------|-------| +| | Size | Align | Size | Align | +| bool, uint8, int8 | 1 | 1 | 1 | 1 | +| uint16, int16 | 2 | 2 | 2 | 2 | +| uint32, int32 | 4 | 4 | 4 | 4 | +| uint64, int64 | 8 | 8 | 8 | 4 | +| int, uint | 8 | 8 | 4 | 4 | +| float32 | 4 | 4 | 4 | 4 | +| float64 | 8 | 8 | 8 | 4 | +| complex64 | 8 | 4 | 8 | 4 | +| complex128 | 16 | 8 | 16 | 4 | +| uintptr, *T, unsafe.Pointer | 8 | 8 | 4 | 4 | + +The types `byte` and `rune` are aliases for `uint8` and `int32`, +respectively, and hence have the same size and alignment as these +types. + +The layout of `map`, `chan`, and `func` types is equivalent to *T. + +To describe the layout of the remaining composite types, we first +define the layout of a *sequence* S of N fields with types +t1, t2, ..., tN. +We define the byte offset at which each field begins relative to a +base address of 0, as well as the size and alignment of the sequence +as follows: + +``` +offset(S, i) = 0 if i = 1 + = align(offset(S, i-1) + sizeof(t_(i-1)), alignof(t_i)) +alignof(S) = 1 if N = 0 + = max(alignof(t_i) | 1 <= i <= N) +sizeof(S) = 0 if N = 0 + = align(offset(S, N) + sizeof(t_N), alignof(S)) +``` + +Where sizeof(T) and alignof(T) are the size and alignment of type T, +respectively, and align(x, y) rounds x up to a multiple of y. + +The `interface{}` type is a sequence of 1. a pointer to the runtime type +description for the interface's dynamic type and 2. an `unsafe.Pointer` +data field. +Any other interface type (besides the empty interface) is a sequence +of 1. a pointer to the runtime "itab" that gives the method pointers and +the type of the data field and 2. an `unsafe.Pointer` data field. +An interface can be "direct" or "indirect" depending on the dynamic +type: a direct interface stores the value directly in the data field, +and an indirect interface stores a pointer to the value in the data +field. +An interface can only be direct if the value consists of a single +pointer word. + +An array type `[N]T` is a sequence of N fields of type T. + +The slice type `[]T` is a sequence of a `*[cap]T` pointer to the slice +backing store, an `int` giving the `len` of the slice, and an `int` +giving the `cap` of the slice. + +The `string` type is a sequence of a `*[len]byte` pointer to the +string backing store, and an `int` giving the `len` of the string. + +A struct type `struct { f1 t1; ...; fM tM }` is laid out as the +sequence t1, ..., tM, tP, where tP is either: + +- Type `byte` if sizeof(tM) = 0 and any of sizeof(t*i*) ≠ 0. +- Empty (size 0 and align 1) otherwise. + +The padding byte prevents creating a past-the-end pointer by taking +the address of the final, empty fN field. + +Note that user-written assembly code should generally not depend on Go +type layout and should instead use the constants defined in +[`go_asm.h`](/doc/asm.html#data-offsets). + +## Function call argument and result passing + +Function calls pass arguments and results using a combination of the +stack and machine registers. +Each argument or result is passed either entirely in registers or +entirely on the stack. +Because access to registers is generally faster than access to the +stack, arguments and results are preferentially passed in registers. +However, any argument or result that contains a non-trivial array or +does not fit entirely in the remaining available registers is passed +on the stack. + +Each architecture defines a sequence of integer registers and a +sequence of floating-point registers. +At a high level, arguments and results are recursively broken down +into values of base types and these base values are assigned to +registers from these sequences. + +Arguments and results can share the same registers, but do not share +the same stack space. +Beyond the arguments and results passed on the stack, the caller also +reserves spill space on the stack for all register-based arguments +(but does not populate this space). + +The receiver, arguments, and results of function or method F are +assigned to registers or the stack using the following algorithm: + +1. Let NI and NFP be the length of integer and floating-point register + sequences defined by the architecture. + Let I and FP be 0; these are the indexes of the next integer and + floating-point register. + Let S, the type sequence defining the stack frame, be empty. +1. If F is a method, assign F’s receiver. +1. For each argument A of F, assign A. +1. Add a pointer-alignment field to S. This has size 0 and the same + alignment as `uintptr`. +1. Reset I and FP to 0. +1. For each result R of F, assign R. +1. Add a pointer-alignment field to S. +1. For each register-assigned receiver and argument of F, let T be its + type and add T to the stack sequence S. + This is the argument's (or receiver's) spill space and will be + uninitialized at the call. +1. Add a pointer-alignment field to S. + +Assigning a receiver, argument, or result V of underlying type T works +as follows: + +1. Remember I and FP. +1. If T has zero size, add T to the stack sequence S and return. +1. Try to register-assign V. +1. If step 3 failed, reset I and FP to the values from step 1, add T + to the stack sequence S, and assign V to this field in S. + +Register-assignment of a value V of underlying type T works as follows: + +1. If T is a boolean or integral type that fits in an integer + register, assign V to register I and increment I. +1. If T is an integral type that fits in two integer registers, assign + the least significant and most significant halves of V to registers + I and I+1, respectively, and increment I by 2 +1. If T is a floating-point type and can be represented without loss + of precision in a floating-point register, assign V to register FP + and increment FP. +1. If T is a complex type, recursively register-assign its real and + imaginary parts. +1. If T is a pointer type, map type, channel type, or function type, + assign V to register I and increment I. +1. If T is a string type, interface type, or slice type, recursively + register-assign V’s components (2 for strings and interfaces, 3 for + slices). +1. If T is a struct type, recursively register-assign each field of V. +1. If T is an array type of length 0, do nothing. +1. If T is an array type of length 1, recursively register-assign its + one element. +1. If T is an array type of length > 1, fail. +1. If I > NI or FP > NFP, fail. +1. If any recursive assignment above fails, fail. + +The above algorithm produces an assignment of each receiver, argument, +and result to registers or to a field in the stack sequence. +The final stack sequence looks like: stack-assigned receiver, +stack-assigned arguments, pointer-alignment, stack-assigned results, +pointer-alignment, spill space for each register-assigned argument, +pointer-alignment. +The following diagram shows what this stack frame looks like on the +stack, using the typical convention where address 0 is at the bottom: + + +------------------------------+ + | . . . | + | 2nd reg argument spill space | + | 1st reg argument spill space | + | | + | . . . | + | 2nd stack-assigned result | + | 1st stack-assigned result | + | | + | . . . | + | 2nd stack-assigned argument | + | 1st stack-assigned argument | + | stack-assigned receiver | + +------------------------------+ ↓ lower addresses + +To perform a call, the caller reserves space starting at the lowest +address in its stack frame for the call stack frame, stores arguments +in the registers and argument stack fields determined by the above +algorithm, and performs the call. +At the time of a call, spill space, result stack fields, and result +registers are left uninitialized. +Upon return, the callee must have stored results to all result +registers and result stack fields determined by the above algorithm. + +There are no callee-save registers, so a call may overwrite any +register that doesn’t have a fixed meaning, including argument +registers. + +### Example + +Consider the function `func f(a1 uint8, a2 [2]uintptr, a3 uint8) (r1 +struct { x uintptr; y [2]uintptr }, r2 string)` on a 64-bit +architecture with hypothetical integer registers R0–R9. + +On entry, `a1` is assigned to `R0`, `a3` is assigned to `R1` and the +stack frame is laid out in the following sequence: + + a2 [2]uintptr + r1.x uintptr + r1.y [2]uintptr + a1Spill uint8 + a3Spill uint8 + _ [6]uint8 // alignment padding + +In the stack frame, only the `a2` field is initialized on entry; the +rest of the frame is left uninitialized. + +On exit, `r2.base` is assigned to `R0`, `r2.len` is assigned to `R1`, +and `r1.x` and `r1.y` are initialized in the stack frame. + +There are several things to note in this example. +First, `a2` and `r1` are stack-assigned because they contain arrays. +The other arguments and results are register-assigned. +Result `r2` is decomposed into its components, which are individually +register-assigned. +On the stack, the stack-assigned arguments appear at lower addresses +than the stack-assigned results, which appear at lower addresses than +the argument spill area. +Only arguments, not results, are assigned a spill area on the stack. + +### Rationale + +Each base value is assigned to its own register to optimize +construction and access. +An alternative would be to pack multiple sub-word values into +registers, or to simply map an argument's in-memory layout to +registers (this is common in C ABIs), but this typically adds cost to +pack and unpack these values. +Modern architectures have more than enough registers to pass all +arguments and results this way for nearly all functions (see the +appendix), so there’s little downside to spreading base values across +registers. + +Arguments that can’t be fully assigned to registers are passed +entirely on the stack in case the callee takes the address of that +argument. +If an argument could be split across the stack and registers and the +callee took its address, it would need to be reconstructed in memory, +a process that would be proportional to the size of the argument. + +Non-trivial arrays are always passed on the stack because indexing +into an array typically requires a computed offset, which generally +isn’t possible with registers. +Arrays in general are rare in function signatures (only 0.7% of +functions in the Go 1.15 standard library and 0.2% in kubelet). +We considered allowing array fields to be passed on the stack while +the rest of an argument’s fields are passed in registers, but this +creates the same problems as other large structs if the callee takes +the address of an argument, and would benefit <0.1% of functions in +kubelet (and even these very little). + +We make exceptions for 0 and 1-element arrays because these don’t +require computed offsets, and 1-element arrays are already decomposed +in the compiler’s SSA representation. + +The ABI assignment algorithm above is equivalent to Go’s stack-based +ABI0 calling convention if there are zero architecture registers. +This is intended to ease the transition to the register-based internal +ABI and make it easy for the compiler to generate either calling +convention. +An architecture may still define register meanings that aren’t +compatible with ABI0, but these differences should be easy to account +for in the compiler. + +The assignment algorithm assigns zero-sized values to the stack +(assignment step 2) in order to support ABI0-equivalence. +While these values take no space themselves, they do result in +alignment padding on the stack in ABI0. +Without this step, the internal ABI would register-assign zero-sized +values even on architectures that provide no argument registers +because they don't consume any registers, and hence not add alignment +padding to the stack. + +The algorithm reserves spill space for arguments in the caller’s frame +so that the compiler can generate a stack growth path that spills into +this reserved space. +If the callee has to grow the stack, it may not be able to reserve +enough additional stack space in its own frame to spill these, which +is why it’s important that the caller do so. +These slots also act as the home location if these arguments need to +be spilled for any other reason, which simplifies traceback printing. + +There are several options for how to lay out the argument spill space. +We chose to lay out each argument according to its type's usual memory +layout but to separate the spill space from the regular argument +space. +Using the usual memory layout simplifies the compiler because it +already understands this layout. +Also, if a function takes the address of a register-assigned argument, +the compiler must spill that argument to memory in its usual memory +layout and it's more convenient to use the argument spill space for +this purpose. + +Alternatively, the spill space could be structured around argument +registers. +In this approach, the stack growth spill path would spill each +argument register to a register-sized stack word. +However, if the function takes the address of a register-assigned +argument, the compiler would have to reconstruct it in memory layout +elsewhere on the stack. + +The spill space could also be interleaved with the stack-assigned +arguments so the arguments appear in order whether they are register- +or stack-assigned. +This would be close to ABI0, except that register-assigned arguments +would be uninitialized on the stack and there's no need to reserve +stack space for register-assigned results. +We expect separating the spill space to perform better because of +memory locality. +Separating the space is also potentially simpler for `reflect` calls +because this allows `reflect` to summarize the spill space as a single +number. +Finally, the long-term intent is to remove reserved spill slots +entirely – allowing most functions to be called without any stack +setup and easing the introduction of callee-save registers – and +separating the spill space makes that transition easier. + +## Closures + +A func value (e.g., `var x func()`) is a pointer to a closure object. +A closure object begins with a pointer-sized program counter +representing the entry point of the function, followed by zero or more +bytes containing the closed-over environment. + +Closure calls follow the same conventions as static function and +method calls, with one addition. Each architecture specifies a +*closure context pointer* register and calls to closures store the +address of the closure object in the closure context pointer register +prior to the call. + +## Software floating-point mode + +In "softfloat" mode, the ABI simply treats the hardware as having zero +floating-point registers. +As a result, any arguments containing floating-point values will be +passed on the stack. + +*Rationale*: Softfloat mode is about compatibility over performance +and is not commonly used. +Hence, we keep the ABI as simple as possible in this case, rather than +adding additional rules for passing floating-point values in integer +registers. + +## Architecture specifics + +This section describes per-architecture register mappings, as well as +other per-architecture special cases. + +### amd64 architecture + +The amd64 architecture uses the following sequence of 9 registers for +integer arguments and results: + + RAX, RBX, RCX, RDI, RSI, R8, R9, R10, R11 + +It uses X0 – X14 for floating-point arguments and results. + +*Rationale*: These sequences are chosen from the available registers +to be relatively easy to remember. + +Registers R12 and R13 are permanent scratch registers. +R15 is a scratch register except in dynamically linked binaries. + +*Rationale*: Some operations such as stack growth and reflection calls +need dedicated scratch registers in order to manipulate call frames +without corrupting arguments or results. + +Special-purpose registers are as follows: + +| Register | Call meaning | Return meaning | Body meaning | +| --- | --- | --- | --- | +| RSP | Stack pointer | Same | Same | +| RBP | Frame pointer | Same | Same | +| RDX | Closure context pointer | Scratch | Scratch | +| R12 | Scratch | Scratch | Scratch | +| R13 | Scratch | Scratch | Scratch | +| R14 | Current goroutine | Same | Same | +| R15 | GOT reference temporary if dynlink | Same | Same | +| X15 | Zero value (*) | Same | Scratch | + +(*) Except on Plan 9, where X15 is a scratch register because SSE +registers cannot be used in note handlers (so the compiler avoids +using them except when absolutely necessary). + +*Rationale*: These register meanings are compatible with Go’s +stack-based calling convention except for R14 and X15, which will have +to be restored on transitions from ABI0 code to ABIInternal code. +In ABI0, these are undefined, so transitions from ABIInternal to ABI0 +can ignore these registers. + +*Rationale*: For the current goroutine pointer, we chose a register +that requires an additional REX byte. +While this adds one byte to every function prologue, it is hardly ever +accessed outside the function prologue and we expect making more +single-byte registers available to be a net win. + +*Rationale*: We could allow R14 (the current goroutine pointer) to be +a scratch register in function bodies because it can always be +restored from TLS on amd64. +However, we designate it as a fixed register for simplicity and for +consistency with other architectures that may not have a copy of the +current goroutine pointer in TLS. + +*Rationale*: We designate X15 as a fixed zero register because +functions often have to bulk zero their stack frames, and this is more +efficient with a designated zero register. + +*Implementation note*: Registers with fixed meaning at calls but not +in function bodies must be initialized by "injected" calls such as +signal-based panics. + +#### Stack layout + +The stack pointer, RSP, grows down and is always aligned to 8 bytes. + +The amd64 architecture does not use a link register. + +A function's stack frame is laid out as follows: + + +------------------------------+ + | return PC | + | RBP on entry | + | ... locals ... | + | ... outgoing arguments ... | + +------------------------------+ ↓ lower addresses + +The "return PC" is pushed as part of the standard amd64 `CALL` +operation. +On entry, a function subtracts from RSP to open its stack frame and +saves the value of RBP directly below the return PC. +A leaf function that does not require any stack space may omit the +saved RBP. + +The Go ABI's use of RBP as a frame pointer register is compatible with +amd64 platform conventions so that Go can inter-operate with platform +debuggers and profilers. + +#### Flags + +The direction flag (D) is always cleared (set to the “forward” +direction) at a call. +The arithmetic status flags are treated like scratch registers and not +preserved across calls. +All other bits in RFLAGS are system flags. + +At function calls and returns, the CPU is in x87 mode (not MMX +technology mode). + +*Rationale*: Go on amd64 does not use either the x87 registers or MMX +registers. Hence, we follow the SysV platform conventions in order to +simplify transitions to and from the C ABI. + +At calls, the MXCSR control bits are always set as follows: + +| Flag | Bit | Value | Meaning | +| --- | --- | --- | --- | +| FZ | 15 | 0 | Do not flush to zero | +| RC | 14/13 | 0 (RN) | Round to nearest | +| PM | 12 | 1 | Precision masked | +| UM | 11 | 1 | Underflow masked | +| OM | 10 | 1 | Overflow masked | +| ZM | 9 | 1 | Divide-by-zero masked | +| DM | 8 | 1 | Denormal operations masked | +| IM | 7 | 1 | Invalid operations masked | +| DAZ | 6 | 0 | Do not zero de-normals | + +The MXCSR status bits are callee-save. + +*Rationale*: Having a fixed MXCSR control configuration allows Go +functions to use SSE operations without modifying or saving the MXCSR. +Functions are allowed to modify it between calls (as long as they +restore it), but as of this writing Go code never does. +The above fixed configuration matches the process initialization +control bits specified by the ELF AMD64 ABI. + +The x87 floating-point control word is not used by Go on amd64. + +### arm64 architecture + +The arm64 architecture uses R0 – R15 for integer arguments and results. + +It uses F0 – F15 for floating-point arguments and results. + +*Rationale*: 16 integer registers and 16 floating-point registers are +more than enough for passing arguments and results for practically all +functions (see Appendix). While there are more registers available, +using more registers provides little benefit. Additionally, it will add +overhead on code paths where the number of arguments are not statically +known (e.g. reflect call), and will consume more stack space when there +is only limited stack space available to fit in the nosplit limit. + +Registers R16 and R17 are permanent scratch registers. They are also +used as scratch registers by the linker (Go linker and external +linker) in trampolines. + +Register R18 is reserved and never used. It is reserved for the OS +on some platforms (e.g. macOS). + +Registers R19 – R25 are permanent scratch registers. In addition, +R27 is a permanent scratch register used by the assembler when +expanding instructions. + +Floating-point registers F16 – F31 are also permanent scratch +registers. + +Special-purpose registers are as follows: + +| Register | Call meaning | Return meaning | Body meaning | +| --- | --- | --- | --- | +| RSP | Stack pointer | Same | Same | +| R30 | Link register | Same | Scratch (non-leaf functions) | +| R29 | Frame pointer | Same | Same | +| R28 | Current goroutine | Same | Same | +| R27 | Scratch | Scratch | Scratch | +| R26 | Closure context pointer | Scratch | Scratch | +| R18 | Reserved (not used) | Same | Same | +| ZR | Zero value | Same | Same | + +*Rationale*: These register meanings are compatible with Go’s +stack-based calling convention. + +*Rationale*: The link register, R30, holds the function return +address at the function entry. For functions that have frames +(including most non-leaf functions), R30 is saved to stack in the +function prologue and restored in the epilogue. Within the function +body, R30 can be used as a scratch register. + +*Implementation note*: Registers with fixed meaning at calls but not +in function bodies must be initialized by "injected" calls such as +signal-based panics. + +#### Stack layout + +The stack pointer, RSP, grows down and is always aligned to 16 bytes. + +*Rationale*: The arm64 architecture requires the stack pointer to be +16-byte aligned. + +A function's stack frame, after the frame is created, is laid out as +follows: + + +------------------------------+ + | ... locals ... | + | ... outgoing arguments ... | + | return PC | ← RSP points to + | frame pointer on entry | + +------------------------------+ ↓ lower addresses + +The "return PC" is loaded to the link register, R30, as part of the +arm64 `CALL` operation. + +On entry, a function subtracts from RSP to open its stack frame, and +saves the values of R30 and R29 at the bottom of the frame. +Specifically, R30 is saved at 0(RSP) and R29 is saved at -8(RSP), +after RSP is updated. + +A leaf function that does not require any stack space may omit the +saved R30 and R29. + +The Go ABI's use of R29 as a frame pointer register is compatible with +arm64 architecture requirement so that Go can inter-operate with platform +debuggers and profilers. + +This stack layout is used by both register-based (ABIInternal) and +stack-based (ABI0) calling conventions. + +#### Flags + +The arithmetic status flags (NZCV) are treated like scratch registers +and not preserved across calls. +All other bits in PSTATE are system flags and are not modified by Go. + +The floating-point status register (FPSR) is treated like scratch +registers and not preserved across calls. + +At calls, the floating-point control register (FPCR) bits are always +set as follows: + +| Flag | Bit | Value | Meaning | +| --- | --- | --- | --- | +| DN | 25 | 0 | Propagate NaN operands | +| FZ | 24 | 0 | Do not flush to zero | +| RC | 23/22 | 0 (RN) | Round to nearest, choose even if tied | +| IDE | 15 | 0 | Denormal operations trap disabled | +| IXE | 12 | 0 | Inexact trap disabled | +| UFE | 11 | 0 | Underflow trap disabled | +| OFE | 10 | 0 | Overflow trap disabled | +| DZE | 9 | 0 | Divide-by-zero trap disabled | +| IOE | 8 | 0 | Invalid operations trap disabled | +| NEP | 2 | 0 | Scalar operations do not affect higher elements in vector registers | +| AH | 1 | 0 | No alternate handling of de-normal inputs | +| FIZ | 0 | 0 | Do not zero de-normals | + +*Rationale*: Having a fixed FPCR control configuration allows Go +functions to use floating-point and vector (SIMD) operations without +modifying or saving the FPCR. +Functions are allowed to modify it between calls (as long as they +restore it), but as of this writing Go code never does. + +### loong64 architecture + +The loong64 architecture uses R4 – R19 for integer arguments and integer results. + +It uses F0 – F15 for floating-point arguments and results. + +Registers R20 - R21, R23 – R28, R30 - R31, F16 – F31 are permanent scratch registers. + +Register R2 is reserved and never used. + +Register R20, R21 is Used by runtime.duffcopy, runtime.duffzero. + +Special-purpose registers used within Go generated code and Go assembly code +are as follows: + +| Register | Call meaning | Return meaning | Body meaning | +| --- | --- | --- | --- | +| R0 | Zero value | Same | Same | +| R1 | Link register | Link register | Scratch | +| R3 | Stack pointer | Same | Same | +| R20,R21 | Scratch | Scratch | Used by duffcopy, duffzero | +| R22 | Current goroutine | Same | Same | +| R29 | Closure context pointer | Same | Same | +| R30, R31 | used by the assembler | Same | Same | + +*Rationale*: These register meanings are compatible with Go’s stack-based +calling convention. + +#### Stack layout + +The stack pointer, R3, grows down and is aligned to 8 bytes. + +A function's stack frame, after the frame is created, is laid out as +follows: + + +------------------------------+ + | ... locals ... | + | ... outgoing arguments ... | + | return PC | ← R3 points to + +------------------------------+ ↓ lower addresses + +This stack layout is used by both register-based (ABIInternal) and +stack-based (ABI0) calling conventions. + +The "return PC" is loaded to the link register, R1, as part of the +loong64 `JAL` operation. + +#### Flags +All bits in CSR are system flags and are not modified by Go. + +### ppc64 architecture + +The ppc64 architecture uses R3 – R10 and R14 – R17 for integer arguments +and results. + +It uses F1 – F12 for floating-point arguments and results. + +Register R31 is a permanent scratch register in Go. + +Special-purpose registers used within Go generated code and Go +assembly code are as follows: + +| Register | Call meaning | Return meaning | Body meaning | +| --- | --- | --- | --- | +| R0 | Zero value | Same | Same | +| R1 | Stack pointer | Same | Same | +| R2 | TOC register | Same | Same | +| R11 | Closure context pointer | Scratch | Scratch | +| R12 | Function address on indirect calls | Scratch | Scratch | +| R13 | TLS pointer | Same | Same | +| R20,R21 | Scratch | Scratch | Used by duffcopy, duffzero | +| R30 | Current goroutine | Same | Same | +| R31 | Scratch | Scratch | Scratch | +| LR | Link register | Link register | Scratch | +*Rationale*: These register meanings are compatible with Go’s +stack-based calling convention. + +The link register, LR, holds the function return +address at the function entry and is set to the correct return +address before exiting the function. It is also used +in some cases as the function address when doing an indirect call. + +The register R2 contains the address of the TOC (table of contents) which +contains data or code addresses used when generating position independent +code. Non-Go code generated when using cgo contains TOC-relative addresses +which depend on R2 holding a valid TOC. Go code compiled with -shared or +-dynlink initializes and maintains R2 and uses it in some cases for +function calls; Go code compiled without these options does not modify R2. + +When making a function call R12 contains the function address for use by the +code to generate R2 at the beginning of the function. R12 can be used for +other purposes within the body of the function, such as trampoline generation. + +R20 and R21 are used in duffcopy and duffzero which could be generated +before arguments are saved so should not be used for register arguments. + +The Count register CTR can be used as the call target for some branch instructions. +It holds the return address when preemption has occurred. + +On PPC64 when a float32 is loaded it becomes a float64 in the register, which is +different from other platforms and that needs to be recognized by the internal +implementation of reflection so that float32 arguments are passed correctly. + +Registers R18 - R29 and F13 - F31 are considered scratch registers. + +#### Stack layout + +The stack pointer, R1, grows down and is aligned to 8 bytes in Go, but changed +to 16 bytes when calling cgo. + +A function's stack frame, after the frame is created, is laid out as +follows: + + +------------------------------+ + | ... locals ... | + | ... outgoing arguments ... | + | 24 TOC register R2 save | When compiled with -shared/-dynlink + | 16 Unused in Go | Not used in Go + | 8 CR save | nonvolatile CR fields + | 0 return PC | ← R1 points to + +------------------------------+ ↓ lower addresses + +The "return PC" is loaded to the link register, LR, as part of the +ppc64 `BL` operations. + +On entry to a non-leaf function, the stack frame size is subtracted from R1 to +create its stack frame, and saves the value of LR at the bottom of the frame. + +A leaf function that does not require any stack space does not modify R1 and +does not save LR. + +*NOTE*: We might need to save the frame pointer on the stack as +in the PPC64 ELF v2 ABI so Go can inter-operate with platform debuggers +and profilers. + +This stack layout is used by both register-based (ABIInternal) and +stack-based (ABI0) calling conventions. + +#### Flags + +The condition register consists of 8 condition code register fields +CR0-CR7. Go generated code only sets and uses CR0, commonly set by +compare functions and use to determine the target of a conditional +branch. The generated code does not set or use CR1-CR7. + +The floating point status and control register (FPSCR) is initialized +to 0 by the kernel at startup of the Go program and not changed by +the Go generated code. + +### riscv64 architecture + +The riscv64 architecture uses X10 – X17, X8, X9, X18 – X23 for integer arguments +and results. + +It uses F10 – F17, F8, F9, F18 – F23 for floating-point arguments and results. + +Special-purpose registers used within Go generated code and Go +assembly code are as follows: + +| Register | Call meaning | Return meaning | Body meaning | +| --- | --- | --- | --- | +| X0 | Zero value | Same | Same | +| X1 | Link register | Link register | Scratch | +| X2 | Stack pointer | Same | Same | +| X3 | Global pointer | Same | Used by dynamic linker | +| X4 | TLS (thread pointer) | TLS | Scratch | +| X24,X25 | Scratch | Scratch | Used by duffcopy, duffzero | +| X26 | Closure context pointer | Scratch | Scratch | +| X27 | Current goroutine | Same | Same | +| X31 | Scratch | Scratch | Scratch | + +*Rationale*: These register meanings are compatible with Go’s +stack-based calling convention. Context register X20 will change to X26, +duffcopy, duffzero register will change to X24, X25 before this register ABI been adopted. +X10 – X17, X8, X9, X18 – X23, is the same order as A0 – A7, S0 – S7 in platform ABI. +F10 – F17, F8, F9, F18 – F23, is the same order as FA0 – FA7, FS0 – FS7 in platform ABI. +X8 – X23, F8 – F15 are used for compressed instruction (RVC) which will benefit code size in the future. + +#### Stack layout + +The stack pointer, X2, grows down and is aligned to 8 bytes. + +A function's stack frame, after the frame is created, is laid out as +follows: + + +------------------------------+ + | ... locals ... | + | ... outgoing arguments ... | + | return PC | ← X2 points to + +------------------------------+ ↓ lower addresses + +The "return PC" is loaded to the link register, X1, as part of the +riscv64 `CALL` operation. + +#### Flags + +The riscv64 has Zicsr extension for control and status register (CSR) and +treated as scratch register. +All bits in CSR are system flags and are not modified by Go. + +## Future directions + +### Spill path improvements + +The ABI currently reserves spill space for argument registers so the +compiler can statically generate an argument spill path before calling +into `runtime.morestack` to grow the stack. +This ensures there will be sufficient spill space even when the stack +is nearly exhausted and keeps stack growth and stack scanning +essentially unchanged from ABI0. + +However, this wastes stack space (the median wastage is 16 bytes per +call), resulting in larger stacks and increased cache footprint. +A better approach would be to reserve stack space only when spilling. +One way to ensure enough space is available to spill would be for +every function to ensure there is enough space for the function's own +frame *as well as* the spill space of all functions it calls. +For most functions, this would change the threshold for the prologue +stack growth check. +For `nosplit` functions, this would change the threshold used in the +linker's static stack size check. + +Allocating spill space in the callee rather than the caller may also +allow for faster reflection calls in the common case where a function +takes only register arguments, since it would allow reflection to make +these calls directly without allocating any frame. + +The statically-generated spill path also increases code size. +It is possible to instead have a generic spill path in the runtime, as +part of `morestack`. +However, this complicates reserving the spill space, since spilling +all possible register arguments would, in most cases, take +significantly more space than spilling only those used by a particular +function. +Some options are to spill to a temporary space and copy back only the +registers used by the function, or to grow the stack if necessary +before spilling to it (using a temporary space if necessary), or to +use a heap-allocated space if insufficient stack space is available. +These options all add enough complexity that we will have to make this +decision based on the actual code size growth caused by the static +spill paths. + +### Clobber sets + +As defined, the ABI does not use callee-save registers. +This significantly simplifies the garbage collector and the compiler's +register allocator, but at some performance cost. +A potentially better balance for Go code would be to use *clobber +sets*: for each function, the compiler records the set of registers it +clobbers (including those clobbered by functions it calls) and any +register not clobbered by function F can remain live across calls to +F. + +This is generally a good fit for Go because Go's package DAG allows +function metadata like the clobber set to flow up the call graph, even +across package boundaries. +Clobber sets would require relatively little change to the garbage +collector, unlike general callee-save registers. +One disadvantage of clobber sets over callee-save registers is that +they don't help with indirect function calls or interface method +calls, since static information isn't available in these cases. + +### Large aggregates + +Go encourages passing composite values by value, and this simplifies +reasoning about mutation and races. +However, this comes at a performance cost for large composite values. +It may be possible to instead transparently pass large composite +values by reference and delay copying until it is actually necessary. + +## Appendix: Register usage analysis + +In order to understand the impacts of the above design on register +usage, we +[analyzed](https://github.com/aclements/go-misc/tree/master/abi) the +impact of the above ABI on a large code base: cmd/kubelet from +[Kubernetes](https://github.com/kubernetes/kubernetes) at tag v1.18.8. + +The following table shows the impact of different numbers of available +integer and floating-point registers on argument assignment: + +``` +| | | | stack args | spills | stack total | +| ints | floats | % fit | p50 | p95 | p99 | p50 | p95 | p99 | p50 | p95 | p99 | +| 0 | 0 | 6.3% | 32 | 152 | 256 | 0 | 0 | 0 | 32 | 152 | 256 | +| 0 | 8 | 6.4% | 32 | 152 | 256 | 0 | 0 | 0 | 32 | 152 | 256 | +| 1 | 8 | 21.3% | 24 | 144 | 248 | 8 | 8 | 8 | 32 | 152 | 256 | +| 2 | 8 | 38.9% | 16 | 128 | 224 | 8 | 16 | 16 | 24 | 136 | 240 | +| 3 | 8 | 57.0% | 0 | 120 | 224 | 16 | 24 | 24 | 24 | 136 | 240 | +| 4 | 8 | 73.0% | 0 | 120 | 216 | 16 | 32 | 32 | 24 | 136 | 232 | +| 5 | 8 | 83.3% | 0 | 112 | 216 | 16 | 40 | 40 | 24 | 136 | 232 | +| 6 | 8 | 87.5% | 0 | 112 | 208 | 16 | 48 | 48 | 24 | 136 | 232 | +| 7 | 8 | 89.8% | 0 | 112 | 208 | 16 | 48 | 56 | 24 | 136 | 232 | +| 8 | 8 | 91.3% | 0 | 112 | 200 | 16 | 56 | 64 | 24 | 136 | 232 | +| 9 | 8 | 92.1% | 0 | 112 | 192 | 16 | 56 | 72 | 24 | 136 | 232 | +| 10 | 8 | 92.6% | 0 | 104 | 192 | 16 | 56 | 72 | 24 | 136 | 232 | +| 11 | 8 | 93.1% | 0 | 104 | 184 | 16 | 56 | 80 | 24 | 128 | 232 | +| 12 | 8 | 93.4% | 0 | 104 | 176 | 16 | 56 | 88 | 24 | 128 | 232 | +| 13 | 8 | 94.0% | 0 | 88 | 176 | 16 | 56 | 96 | 24 | 128 | 232 | +| 14 | 8 | 94.4% | 0 | 80 | 152 | 16 | 64 | 104 | 24 | 128 | 232 | +| 15 | 8 | 94.6% | 0 | 80 | 152 | 16 | 64 | 112 | 24 | 128 | 232 | +| 16 | 8 | 94.9% | 0 | 16 | 152 | 16 | 64 | 112 | 24 | 128 | 232 | +| ∞ | 8 | 99.8% | 0 | 0 | 0 | 24 | 112 | 216 | 24 | 120 | 216 | +``` + +The first two columns show the number of available integer and +floating-point registers. +The first row shows the results for 0 integer and 0 floating-point +registers, which is equivalent to ABI0. +We found that any reasonable number of floating-point registers has +the same effect, so we fixed it at 8 for all other rows. + +The “% fit” column gives the fraction of functions where all arguments +and results are register-assigned and no arguments are passed on the +stack. +The three “stack args” columns give the median, 95th and 99th +percentile number of bytes of stack arguments. +The “spills” columns likewise summarize the number of bytes in +on-stack spill space. +And “stack total” summarizes the sum of stack arguments and on-stack +spill slots. +Note that these are three different distributions; for example, +there’s no single function that takes 0 stack argument bytes, 16 spill +bytes, and 24 total stack bytes. + +From this, we can see that the fraction of functions that fit entirely +in registers grows very slowly once it reaches about 90%, though +curiously there is a small minority of functions that could benefit +from a huge number of registers. +Making 9 integer registers available on amd64 puts it in this realm. +We also see that the stack space required for most functions is fairly +small. +While the increasing space required for spills largely balances out +the decreasing space required for stack arguments as the number of +available registers increases, there is a general reduction in the +total stack space required with more available registers. +This does, however, suggest that eliminating spill slots in the future +would noticeably reduce stack requirements. diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/default.pgo b/platform/dbops/binaries/go/go/src/cmd/compile/default.pgo new file mode 100644 index 0000000000000000000000000000000000000000..2626b932a599c15ea94f9e07f430e1ffe2941d05 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/default.pgo @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d466218cdda3a9c80a0fe513731efb3fd5e69a77bbd40047572b27fed4e8c1b +size 286041 diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/doc.go b/platform/dbops/binaries/go/go/src/cmd/compile/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..507899e222eb1c45cb1142de4b2aa22092236000 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/doc.go @@ -0,0 +1,321 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Compile, typically invoked as ``go tool compile,'' compiles a single Go package +comprising the files named on the command line. It then writes a single +object file named for the basename of the first source file with a .o suffix. +The object file can then be combined with other objects into a package archive +or passed directly to the linker (``go tool link''). If invoked with -pack, the compiler +writes an archive directly, bypassing the intermediate object file. + +The generated files contain type information about the symbols exported by +the package and about types used by symbols imported by the package from +other packages. It is therefore not necessary when compiling client C of +package P to read the files of P's dependencies, only the compiled output of P. + +Command Line + +Usage: + + go tool compile [flags] file... + +The specified files must be Go source files and all part of the same package. +The same compiler is used for all target operating systems and architectures. +The GOOS and GOARCH environment variables set the desired target. + +Flags: + + -D path + Set relative path for local imports. + -I dir1 -I dir2 + Search for imported packages in dir1, dir2, etc, + after consulting $GOROOT/pkg/$GOOS_$GOARCH. + -L + Show complete file path in error messages. + -N + Disable optimizations. + -S + Print assembly listing to standard output (code only). + -S -S + Print assembly listing to standard output (code and data). + -V + Print compiler version and exit. + -asmhdr file + Write assembly header to file. + -asan + Insert calls to C/C++ address sanitizer. + -buildid id + Record id as the build id in the export metadata. + -blockprofile file + Write block profile for the compilation to file. + -c int + Concurrency during compilation. Set 1 for no concurrency (default is 1). + -complete + Assume package has no non-Go components. + -cpuprofile file + Write a CPU profile for the compilation to file. + -dynlink + Allow references to Go symbols in shared libraries (experimental). + -e + Remove the limit on the number of errors reported (default limit is 10). + -goversion string + Specify required go tool version of the runtime. + Exits when the runtime go version does not match goversion. + -h + Halt with a stack trace at the first error detected. + -importcfg file + Read import configuration from file. + In the file, set importmap, packagefile to specify import resolution. + -installsuffix suffix + Look for packages in $GOROOT/pkg/$GOOS_$GOARCH_suffix + instead of $GOROOT/pkg/$GOOS_$GOARCH. + -l + Disable inlining. + -lang version + Set language version to compile, as in -lang=go1.12. + Default is current version. + -linkobj file + Write linker-specific object to file and compiler-specific + object to usual output file (as specified by -o). + Without this flag, the -o output is a combination of both + linker and compiler input. + -m + Print optimization decisions. Higher values or repetition + produce more detail. + -memprofile file + Write memory profile for the compilation to file. + -memprofilerate rate + Set runtime.MemProfileRate for the compilation to rate. + -msan + Insert calls to C/C++ memory sanitizer. + -mutexprofile file + Write mutex profile for the compilation to file. + -nolocalimports + Disallow local (relative) imports. + -o file + Write object to file (default file.o or, with -pack, file.a). + -p path + Set expected package import path for the code being compiled, + and diagnose imports that would cause a circular dependency. + -pack + Write a package (archive) file rather than an object file + -race + Compile with race detector enabled. + -s + Warn about composite literals that can be simplified. + -shared + Generate code that can be linked into a shared library. + -spectre list + Enable spectre mitigations in list (all, index, ret). + -traceprofile file + Write an execution trace to file. + -trimpath prefix + Remove prefix from recorded source file paths. + +Flags related to debugging information: + + -dwarf + Generate DWARF symbols. + -dwarflocationlists + Add location lists to DWARF in optimized mode. + -gendwarfinl int + Generate DWARF inline info records (default 2). + +Flags to debug the compiler itself: + + -E + Debug symbol export. + -K + Debug missing line numbers. + -d list + Print debug information about items in list. Try -d help for further information. + -live + Debug liveness analysis. + -v + Increase debug verbosity. + -% + Debug non-static initializers. + -W + Debug parse tree after type checking. + -f + Debug stack frames. + -i + Debug line number stack. + -j + Debug runtime-initialized variables. + -r + Debug generated wrappers. + -w + Debug type checking. + +Compiler Directives + +The compiler accepts directives in the form of comments. +To distinguish them from non-directive comments, directives +require no space between the comment opening and the name of the directive. However, since +they are comments, tools unaware of the directive convention or of a particular +directive can skip over a directive like any other comment. +*/ +// Line directives come in several forms: +// +// //line :line +// //line :line:col +// //line filename:line +// //line filename:line:col +// /*line :line*/ +// /*line :line:col*/ +// /*line filename:line*/ +// /*line filename:line:col*/ +// +// In order to be recognized as a line directive, the comment must start with +// //line or /*line followed by a space, and must contain at least one colon. +// The //line form must start at the beginning of a line. +// A line directive specifies the source position for the character immediately following +// the comment as having come from the specified file, line and column: +// For a //line comment, this is the first character of the next line, and +// for a /*line comment this is the character position immediately following the closing */. +// If no filename is given, the recorded filename is empty if there is also no column number; +// otherwise it is the most recently recorded filename (actual filename or filename specified +// by previous line directive). +// If a line directive doesn't specify a column number, the column is "unknown" until +// the next directive and the compiler does not report column numbers for that range. +// The line directive text is interpreted from the back: First the trailing :ddd is peeled +// off from the directive text if ddd is a valid number > 0. Then the second :ddd +// is peeled off the same way if it is valid. Anything before that is considered the filename +// (possibly including blanks and colons). Invalid line or column values are reported as errors. +// +// Examples: +// +// //line foo.go:10 the filename is foo.go, and the line number is 10 for the next line +// //line C:foo.go:10 colons are permitted in filenames, here the filename is C:foo.go, and the line is 10 +// //line a:100 :10 blanks are permitted in filenames, here the filename is " a:100 " (excluding quotes) +// /*line :10:20*/x the position of x is in the current file with line number 10 and column number 20 +// /*line foo: 10 */ this comment is recognized as invalid line directive (extra blanks around line number) +// +// Line directives typically appear in machine-generated code, so that compilers and debuggers +// will report positions in the original input to the generator. +/* +The line directive is a historical special case; all other directives are of the form +//go:name, indicating that they are defined by the Go toolchain. +Each directive must be placed its own line, with only leading spaces and tabs +allowed before the comment. +Each directive applies to the Go code that immediately follows it, +which typically must be a declaration. + + //go:noescape + +The //go:noescape directive must be followed by a function declaration without +a body (meaning that the function has an implementation not written in Go). +It specifies that the function does not allow any of the pointers passed as +arguments to escape into the heap or into the values returned from the function. +This information can be used during the compiler's escape analysis of Go code +calling the function. + + //go:uintptrescapes + +The //go:uintptrescapes directive must be followed by a function declaration. +It specifies that the function's uintptr arguments may be pointer values that +have been converted to uintptr and must be on the heap and kept alive for the +duration of the call, even though from the types alone it would appear that the +object is no longer needed during the call. The conversion from pointer to +uintptr must appear in the argument list of any call to this function. This +directive is necessary for some low-level system call implementations and +should be avoided otherwise. + + //go:noinline + +The //go:noinline directive must be followed by a function declaration. +It specifies that calls to the function should not be inlined, overriding +the compiler's usual optimization rules. This is typically only needed +for special runtime functions or when debugging the compiler. + + //go:norace + +The //go:norace directive must be followed by a function declaration. +It specifies that the function's memory accesses must be ignored by the +race detector. This is most commonly used in low-level code invoked +at times when it is unsafe to call into the race detector runtime. + + //go:nosplit + +The //go:nosplit directive must be followed by a function declaration. +It specifies that the function must omit its usual stack overflow check. +This is most commonly used by low-level runtime code invoked +at times when it is unsafe for the calling goroutine to be preempted. + + //go:linkname localname [importpath.name] + +The //go:linkname directive conventionally precedes the var or func +declaration named by ``localname``, though its position does not +change its effect. +This directive determines the object-file symbol used for a Go var or +func declaration, allowing two Go symbols to alias the same +object-file symbol, thereby enabling one package to access a symbol in +another package even when this would violate the usual encapsulation +of unexported declarations, or even type safety. +For that reason, it is only enabled in files that have imported "unsafe". + +It may be used in two scenarios. Let's assume that package upper +imports package lower, perhaps indirectly. In the first scenario, +package lower defines a symbol whose object file name belongs to +package upper. Both packages contain a linkname directive: package +lower uses the two-argument form and package upper uses the +one-argument form. In the example below, lower.f is an alias for the +function upper.g: + + package upper + import _ "unsafe" + //go:linkname g + func g() + + package lower + import _ "unsafe" + //go:linkname f upper.g + func f() { ... } + +The linkname directive in package upper suppresses the usual error for +a function that lacks a body. (That check may alternatively be +suppressed by including a .s file, even an empty one, in the package.) + +In the second scenario, package upper unilaterally creates an alias +for a symbol in package lower. In the example below, upper.g is an alias +for the function lower.f. + + package upper + import _ "unsafe" + //go:linkname g lower.f + func g() + + package lower + func f() { ... } + +The declaration of lower.f may also have a linkname directive with a +single argument, f. This is optional, but helps alert the reader that +the function is accessed from outside the package. + + //go:wasmimport importmodule importname + +The //go:wasmimport directive is wasm-only and must be followed by a +function declaration. +It specifies that the function is provided by a wasm module identified +by ``importmodule`` and ``importname``. + + //go:wasmimport a_module f + func g() + +The types of parameters and return values to the Go function are translated to +Wasm according to the following table: + + Go types Wasm types + int32, uint32 i32 + int64, uint64 i64 + float32 f32 + float64 f64 + unsafe.Pointer i32 + +Any other parameter types are disallowed by the compiler. + +*/ +package main diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/abi/abiutils.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/abi/abiutils.go new file mode 100644 index 0000000000000000000000000000000000000000..607d462493e9f0b17d3f365babe2a62141f05046 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/abi/abiutils.go @@ -0,0 +1,683 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package abi + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/src" + "fmt" + "math" + "sync" +) + +//...................................................................... +// +// Public/exported bits of the ABI utilities. +// + +// ABIParamResultInfo stores the results of processing a given +// function type to compute stack layout and register assignments. For +// each input and output parameter we capture whether the param was +// register-assigned (and to which register(s)) or the stack offset +// for the param if is not going to be passed in registers according +// to the rules in the Go internal ABI specification (1.17). +type ABIParamResultInfo struct { + inparams []ABIParamAssignment // Includes receiver for method calls. Does NOT include hidden closure pointer. + outparams []ABIParamAssignment + offsetToSpillArea int64 + spillAreaSize int64 + inRegistersUsed int + outRegistersUsed int + config *ABIConfig // to enable String() method +} + +func (a *ABIParamResultInfo) Config() *ABIConfig { + return a.config +} + +func (a *ABIParamResultInfo) InParams() []ABIParamAssignment { + return a.inparams +} + +func (a *ABIParamResultInfo) OutParams() []ABIParamAssignment { + return a.outparams +} + +func (a *ABIParamResultInfo) InRegistersUsed() int { + return a.inRegistersUsed +} + +func (a *ABIParamResultInfo) OutRegistersUsed() int { + return a.outRegistersUsed +} + +func (a *ABIParamResultInfo) InParam(i int) *ABIParamAssignment { + return &a.inparams[i] +} + +func (a *ABIParamResultInfo) OutParam(i int) *ABIParamAssignment { + return &a.outparams[i] +} + +func (a *ABIParamResultInfo) SpillAreaOffset() int64 { + return a.offsetToSpillArea +} + +func (a *ABIParamResultInfo) SpillAreaSize() int64 { + return a.spillAreaSize +} + +// ArgWidth returns the amount of stack needed for all the inputs +// and outputs of a function or method, including ABI-defined parameter +// slots and ABI-defined spill slots for register-resident parameters. +// The name is inherited from (*Type).ArgWidth(), which it replaces. +func (a *ABIParamResultInfo) ArgWidth() int64 { + return a.spillAreaSize + a.offsetToSpillArea - a.config.LocalsOffset() +} + +// RegIndex stores the index into the set of machine registers used by +// the ABI on a specific architecture for parameter passing. RegIndex +// values 0 through N-1 (where N is the number of integer registers +// used for param passing according to the ABI rules) describe integer +// registers; values N through M (where M is the number of floating +// point registers used). Thus if the ABI says there are 5 integer +// registers and 7 floating point registers, then RegIndex value of 4 +// indicates the 5th integer register, and a RegIndex value of 11 +// indicates the 7th floating point register. +type RegIndex uint8 + +// ABIParamAssignment holds information about how a specific param or +// result will be passed: in registers (in which case 'Registers' is +// populated) or on the stack (in which case 'Offset' is set to a +// non-negative stack offset). The values in 'Registers' are indices +// (as described above), not architected registers. +type ABIParamAssignment struct { + Type *types.Type + Name *ir.Name + Registers []RegIndex + offset int32 +} + +// Offset returns the stack offset for addressing the parameter that "a" describes. +// This will panic if "a" describes a register-allocated parameter. +func (a *ABIParamAssignment) Offset() int32 { + if len(a.Registers) > 0 { + base.Fatalf("register allocated parameters have no offset") + } + return a.offset +} + +// RegisterTypes returns a slice of the types of the registers +// corresponding to a slice of parameters. The returned slice +// has capacity for one more, likely a memory type. +func RegisterTypes(apa []ABIParamAssignment) []*types.Type { + rcount := 0 + for _, pa := range apa { + rcount += len(pa.Registers) + } + if rcount == 0 { + // Note that this catches top-level struct{} and [0]Foo, which are stack allocated. + return make([]*types.Type, 0, 1) + } + rts := make([]*types.Type, 0, rcount+1) + for _, pa := range apa { + if len(pa.Registers) == 0 { + continue + } + rts = appendParamTypes(rts, pa.Type) + } + return rts +} + +func (pa *ABIParamAssignment) RegisterTypesAndOffsets() ([]*types.Type, []int64) { + l := len(pa.Registers) + if l == 0 { + return nil, nil + } + typs := make([]*types.Type, 0, l) + offs := make([]int64, 0, l) + offs, _ = appendParamOffsets(offs, 0, pa.Type) + return appendParamTypes(typs, pa.Type), offs +} + +func appendParamTypes(rts []*types.Type, t *types.Type) []*types.Type { + w := t.Size() + if w == 0 { + return rts + } + if t.IsScalar() || t.IsPtrShaped() { + if t.IsComplex() { + c := types.FloatForComplex(t) + return append(rts, c, c) + } else { + if int(t.Size()) <= types.RegSize { + return append(rts, t) + } + // assume 64bit int on 32-bit machine + // TODO endianness? Should high-order (sign bits) word come first? + if t.IsSigned() { + rts = append(rts, types.Types[types.TINT32]) + } else { + rts = append(rts, types.Types[types.TUINT32]) + } + return append(rts, types.Types[types.TUINT32]) + } + } else { + typ := t.Kind() + switch typ { + case types.TARRAY: + for i := int64(0); i < t.NumElem(); i++ { // 0 gets no registers, plus future-proofing. + rts = appendParamTypes(rts, t.Elem()) + } + case types.TSTRUCT: + for _, f := range t.Fields() { + if f.Type.Size() > 0 { // embedded zero-width types receive no registers + rts = appendParamTypes(rts, f.Type) + } + } + case types.TSLICE: + return appendParamTypes(rts, synthSlice) + case types.TSTRING: + return appendParamTypes(rts, synthString) + case types.TINTER: + return appendParamTypes(rts, synthIface) + } + } + return rts +} + +// appendParamOffsets appends the offset(s) of type t, starting from "at", +// to input offsets, and returns the longer slice and the next unused offset. +func appendParamOffsets(offsets []int64, at int64, t *types.Type) ([]int64, int64) { + at = align(at, t) + w := t.Size() + if w == 0 { + return offsets, at + } + if t.IsScalar() || t.IsPtrShaped() { + if t.IsComplex() || int(t.Size()) > types.RegSize { // complex and *int64 on 32-bit + s := w / 2 + return append(offsets, at, at+s), at + w + } else { + return append(offsets, at), at + w + } + } else { + typ := t.Kind() + switch typ { + case types.TARRAY: + for i := int64(0); i < t.NumElem(); i++ { + offsets, at = appendParamOffsets(offsets, at, t.Elem()) + } + case types.TSTRUCT: + for i, f := range t.Fields() { + offsets, at = appendParamOffsets(offsets, at, f.Type) + if f.Type.Size() == 0 && i == t.NumFields()-1 { + at++ // last field has zero width + } + } + at = align(at, t) // type size is rounded up to its alignment + case types.TSLICE: + return appendParamOffsets(offsets, at, synthSlice) + case types.TSTRING: + return appendParamOffsets(offsets, at, synthString) + case types.TINTER: + return appendParamOffsets(offsets, at, synthIface) + } + } + return offsets, at +} + +// FrameOffset returns the frame-pointer-relative location that a function +// would spill its input or output parameter to, if such a spill slot exists. +// If there is none defined (e.g., register-allocated outputs) it panics. +// For register-allocated inputs that is their spill offset reserved for morestack; +// for stack-allocated inputs and outputs, that is their location on the stack. +// (In a future version of the ABI, register-resident inputs may lose their defined +// spill area to help reduce stack sizes.) +func (a *ABIParamAssignment) FrameOffset(i *ABIParamResultInfo) int64 { + if a.offset == -1 { + base.Fatalf("function parameter has no ABI-defined frame-pointer offset") + } + if len(a.Registers) == 0 { // passed on stack + return int64(a.offset) - i.config.LocalsOffset() + } + // spill area for registers + return int64(a.offset) + i.SpillAreaOffset() - i.config.LocalsOffset() +} + +// RegAmounts holds a specified number of integer/float registers. +type RegAmounts struct { + intRegs int + floatRegs int +} + +// ABIConfig captures the number of registers made available +// by the ABI rules for parameter passing and result returning. +type ABIConfig struct { + // Do we need anything more than this? + offsetForLocals int64 // e.g., obj.(*Link).Arch.FixedFrameSize -- extra linkage information on some architectures. + regAmounts RegAmounts + which obj.ABI +} + +// NewABIConfig returns a new ABI configuration for an architecture with +// iRegsCount integer/pointer registers and fRegsCount floating point registers. +func NewABIConfig(iRegsCount, fRegsCount int, offsetForLocals int64, which uint8) *ABIConfig { + return &ABIConfig{offsetForLocals: offsetForLocals, regAmounts: RegAmounts{iRegsCount, fRegsCount}, which: obj.ABI(which)} +} + +// Copy returns config. +// +// TODO(mdempsky): Remove. +func (config *ABIConfig) Copy() *ABIConfig { + return config +} + +// Which returns the ABI number +func (config *ABIConfig) Which() obj.ABI { + return config.which +} + +// LocalsOffset returns the architecture-dependent offset from SP for args and results. +// In theory this is only used for debugging; it ought to already be incorporated into +// results from the ABI-related methods +func (config *ABIConfig) LocalsOffset() int64 { + return config.offsetForLocals +} + +// FloatIndexFor translates r into an index in the floating point parameter +// registers. If the result is negative, the input index was actually for the +// integer parameter registers. +func (config *ABIConfig) FloatIndexFor(r RegIndex) int64 { + return int64(r) - int64(config.regAmounts.intRegs) +} + +// NumParamRegs returns the total number of registers used to +// represent a parameter of the given type, which must be register +// assignable. +func (config *ABIConfig) NumParamRegs(typ *types.Type) int { + intRegs, floatRegs := typ.Registers() + if intRegs == math.MaxUint8 && floatRegs == math.MaxUint8 { + base.Fatalf("cannot represent parameters of type %v in registers", typ) + } + return int(intRegs) + int(floatRegs) +} + +// ABIAnalyzeTypes takes slices of parameter and result types, and returns an ABIParamResultInfo, +// based on the given configuration. This is the same result computed by config.ABIAnalyze applied to the +// corresponding method/function type, except that all the embedded parameter names are nil. +// This is intended for use by ssagen/ssa.go:(*state).rtcall, for runtime functions that lack a parsed function type. +func (config *ABIConfig) ABIAnalyzeTypes(params, results []*types.Type) *ABIParamResultInfo { + setup() + s := assignState{ + stackOffset: config.offsetForLocals, + rTotal: config.regAmounts, + } + + assignParams := func(params []*types.Type, isResult bool) []ABIParamAssignment { + res := make([]ABIParamAssignment, len(params)) + for i, param := range params { + res[i] = s.assignParam(param, nil, isResult) + } + return res + } + + info := &ABIParamResultInfo{config: config} + + // Inputs + info.inparams = assignParams(params, false) + s.stackOffset = types.RoundUp(s.stackOffset, int64(types.RegSize)) + info.inRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs + + // Outputs + s.rUsed = RegAmounts{} + info.outparams = assignParams(results, true) + // The spill area is at a register-aligned offset and its size is rounded up to a register alignment. + // TODO in theory could align offset only to minimum required by spilled data types. + info.offsetToSpillArea = alignTo(s.stackOffset, types.RegSize) + info.spillAreaSize = alignTo(s.spillOffset, types.RegSize) + info.outRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs + + return info +} + +// ABIAnalyzeFuncType takes a function type 'ft' and an ABI rules description +// 'config' and analyzes the function to determine how its parameters +// and results will be passed (in registers or on the stack), returning +// an ABIParamResultInfo object that holds the results of the analysis. +func (config *ABIConfig) ABIAnalyzeFuncType(ft *types.Type) *ABIParamResultInfo { + setup() + s := assignState{ + stackOffset: config.offsetForLocals, + rTotal: config.regAmounts, + } + + assignParams := func(params []*types.Field, isResult bool) []ABIParamAssignment { + res := make([]ABIParamAssignment, len(params)) + for i, param := range params { + var name *ir.Name + if param.Nname != nil { + name = param.Nname.(*ir.Name) + } + res[i] = s.assignParam(param.Type, name, isResult) + } + return res + } + + info := &ABIParamResultInfo{config: config} + + // Inputs + info.inparams = assignParams(ft.RecvParams(), false) + s.stackOffset = types.RoundUp(s.stackOffset, int64(types.RegSize)) + info.inRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs + + // Outputs + s.rUsed = RegAmounts{} + info.outparams = assignParams(ft.Results(), true) + // The spill area is at a register-aligned offset and its size is rounded up to a register alignment. + // TODO in theory could align offset only to minimum required by spilled data types. + info.offsetToSpillArea = alignTo(s.stackOffset, types.RegSize) + info.spillAreaSize = alignTo(s.spillOffset, types.RegSize) + info.outRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs + return info +} + +// ABIAnalyze returns the same result as ABIAnalyzeFuncType, but also +// updates the offsets of all the receiver, input, and output fields. +// If setNname is true, it also sets the FrameOffset of the Nname for +// the field(s); this is for use when compiling a function and figuring out +// spill locations. Doing this for callers can cause races for register +// outputs because their frame location transitions from BOGUS_FUNARG_OFFSET +// to zero to an as-if-AUTO offset that has no use for callers. +func (config *ABIConfig) ABIAnalyze(t *types.Type, setNname bool) *ABIParamResultInfo { + result := config.ABIAnalyzeFuncType(t) + + // Fill in the frame offsets for receiver, inputs, results + for i, f := range t.RecvParams() { + config.updateOffset(result, f, result.inparams[i], false, setNname) + } + for i, f := range t.Results() { + config.updateOffset(result, f, result.outparams[i], true, setNname) + } + return result +} + +func (config *ABIConfig) updateOffset(result *ABIParamResultInfo, f *types.Field, a ABIParamAssignment, isResult, setNname bool) { + if f.Offset != types.BADWIDTH { + base.Fatalf("field offset for %s at %s has been set to %d", f.Sym, base.FmtPos(f.Pos), f.Offset) + } + + // Everything except return values in registers has either a frame home (if not in a register) or a frame spill location. + if !isResult || len(a.Registers) == 0 { + // The type frame offset DOES NOT show effects of minimum frame size. + // Getting this wrong breaks stackmaps, see liveness/plive.go:WriteFuncMap and typebits/typebits.go:Set + off := a.FrameOffset(result) + if setNname && f.Nname != nil { + f.Nname.(*ir.Name).SetFrameOffset(off) + f.Nname.(*ir.Name).SetIsOutputParamInRegisters(false) + } + } else { + if setNname && f.Nname != nil { + fname := f.Nname.(*ir.Name) + fname.SetIsOutputParamInRegisters(true) + fname.SetFrameOffset(0) + } + } +} + +//...................................................................... +// +// Non-public portions. + +// regString produces a human-readable version of a RegIndex. +func (c *RegAmounts) regString(r RegIndex) string { + if int(r) < c.intRegs { + return fmt.Sprintf("I%d", int(r)) + } else if int(r) < c.intRegs+c.floatRegs { + return fmt.Sprintf("F%d", int(r)-c.intRegs) + } + return fmt.Sprintf("%d", r) +} + +// ToString method renders an ABIParamAssignment in human-readable +// form, suitable for debugging or unit testing. +func (ri *ABIParamAssignment) ToString(config *ABIConfig, extra bool) string { + regs := "R{" + offname := "spilloffset" // offset is for spill for register(s) + if len(ri.Registers) == 0 { + offname = "offset" // offset is for memory arg + } + for _, r := range ri.Registers { + regs += " " + config.regAmounts.regString(r) + if extra { + regs += fmt.Sprintf("(%d)", r) + } + } + if extra { + regs += fmt.Sprintf(" | #I=%d, #F=%d", config.regAmounts.intRegs, config.regAmounts.floatRegs) + } + return fmt.Sprintf("%s } %s: %d typ: %v", regs, offname, ri.offset, ri.Type) +} + +// String method renders an ABIParamResultInfo in human-readable +// form, suitable for debugging or unit testing. +func (ri *ABIParamResultInfo) String() string { + res := "" + for k, p := range ri.inparams { + res += fmt.Sprintf("IN %d: %s\n", k, p.ToString(ri.config, false)) + } + for k, r := range ri.outparams { + res += fmt.Sprintf("OUT %d: %s\n", k, r.ToString(ri.config, false)) + } + res += fmt.Sprintf("offsetToSpillArea: %d spillAreaSize: %d", + ri.offsetToSpillArea, ri.spillAreaSize) + return res +} + +// assignState holds intermediate state during the register assigning process +// for a given function signature. +type assignState struct { + rTotal RegAmounts // total reg amounts from ABI rules + rUsed RegAmounts // regs used by params completely assigned so far + stackOffset int64 // current stack offset + spillOffset int64 // current spill offset +} + +// align returns a rounded up to t's alignment. +func align(a int64, t *types.Type) int64 { + return alignTo(a, int(uint8(t.Alignment()))) +} + +// alignTo returns a rounded up to t, where t must be 0 or a power of 2. +func alignTo(a int64, t int) int64 { + if t == 0 { + return a + } + return types.RoundUp(a, int64(t)) +} + +// nextSlot allocates the next available slot for typ. +func nextSlot(offsetp *int64, typ *types.Type) int64 { + offset := align(*offsetp, typ) + *offsetp = offset + typ.Size() + return offset +} + +// allocateRegs returns an ordered list of register indices for a parameter or result +// that we've just determined to be register-assignable. The number of registers +// needed is assumed to be stored in state.pUsed. +func (state *assignState) allocateRegs(regs []RegIndex, t *types.Type) []RegIndex { + if t.Size() == 0 { + return regs + } + ri := state.rUsed.intRegs + rf := state.rUsed.floatRegs + if t.IsScalar() || t.IsPtrShaped() { + if t.IsComplex() { + regs = append(regs, RegIndex(rf+state.rTotal.intRegs), RegIndex(rf+1+state.rTotal.intRegs)) + rf += 2 + } else if t.IsFloat() { + regs = append(regs, RegIndex(rf+state.rTotal.intRegs)) + rf += 1 + } else { + n := (int(t.Size()) + types.RegSize - 1) / types.RegSize + for i := 0; i < n; i++ { // looking ahead to really big integers + regs = append(regs, RegIndex(ri)) + ri += 1 + } + } + state.rUsed.intRegs = ri + state.rUsed.floatRegs = rf + return regs + } else { + typ := t.Kind() + switch typ { + case types.TARRAY: + for i := int64(0); i < t.NumElem(); i++ { + regs = state.allocateRegs(regs, t.Elem()) + } + return regs + case types.TSTRUCT: + for _, f := range t.Fields() { + regs = state.allocateRegs(regs, f.Type) + } + return regs + case types.TSLICE: + return state.allocateRegs(regs, synthSlice) + case types.TSTRING: + return state.allocateRegs(regs, synthString) + case types.TINTER: + return state.allocateRegs(regs, synthIface) + } + } + base.Fatalf("was not expecting type %s", t) + panic("unreachable") +} + +// synthOnce ensures that we only create the synth* fake types once. +var synthOnce sync.Once + +// synthSlice, synthString, and syncIface are synthesized struct types +// meant to capture the underlying implementations of string/slice/interface. +var synthSlice *types.Type +var synthString *types.Type +var synthIface *types.Type + +// setup performs setup for the register assignment utilities, manufacturing +// a small set of synthesized types that we'll need along the way. +func setup() { + synthOnce.Do(func() { + fname := types.BuiltinPkg.Lookup + nxp := src.NoXPos + bp := types.NewPtr(types.Types[types.TUINT8]) + it := types.Types[types.TINT] + synthSlice = types.NewStruct([]*types.Field{ + types.NewField(nxp, fname("ptr"), bp), + types.NewField(nxp, fname("len"), it), + types.NewField(nxp, fname("cap"), it), + }) + types.CalcStructSize(synthSlice) + synthString = types.NewStruct([]*types.Field{ + types.NewField(nxp, fname("data"), bp), + types.NewField(nxp, fname("len"), it), + }) + types.CalcStructSize(synthString) + unsp := types.Types[types.TUNSAFEPTR] + synthIface = types.NewStruct([]*types.Field{ + types.NewField(nxp, fname("f1"), unsp), + types.NewField(nxp, fname("f2"), unsp), + }) + types.CalcStructSize(synthIface) + }) +} + +// assignParam processes a given receiver, param, or result +// of field f to determine whether it can be register assigned. +// The result of the analysis is recorded in the result +// ABIParamResultInfo held in 'state'. +func (state *assignState) assignParam(typ *types.Type, name *ir.Name, isResult bool) ABIParamAssignment { + registers := state.tryAllocRegs(typ) + + var offset int64 = -1 + if registers == nil { // stack allocated; needs stack slot + offset = nextSlot(&state.stackOffset, typ) + } else if !isResult { // register-allocated param; needs spill slot + offset = nextSlot(&state.spillOffset, typ) + } + + return ABIParamAssignment{ + Type: typ, + Name: name, + Registers: registers, + offset: int32(offset), + } +} + +// tryAllocRegs attempts to allocate registers to represent a +// parameter of the given type. If unsuccessful, it returns nil. +func (state *assignState) tryAllocRegs(typ *types.Type) []RegIndex { + if typ.Size() == 0 { + return nil // zero-size parameters are defined as being stack allocated + } + + intRegs, floatRegs := typ.Registers() + if int(intRegs) > state.rTotal.intRegs-state.rUsed.intRegs || int(floatRegs) > state.rTotal.floatRegs-state.rUsed.floatRegs { + return nil // too few available registers + } + + regs := make([]RegIndex, 0, int(intRegs)+int(floatRegs)) + return state.allocateRegs(regs, typ) +} + +// ComputePadding returns a list of "post element" padding values in +// the case where we have a structure being passed in registers. Given +// a param assignment corresponding to a struct, it returns a list +// containing padding values for each field, e.g. the Kth element in +// the list is the amount of padding between field K and the following +// field. For things that are not structs (or structs without padding) +// it returns a list of zeros. Example: +// +// type small struct { +// x uint16 +// y uint8 +// z int32 +// w int32 +// } +// +// For this struct we would return a list [0, 1, 0, 0], meaning that +// we have one byte of padding after the second field, and no bytes of +// padding after any of the other fields. Input parameter "storage" is +// a slice with enough capacity to accommodate padding elements for +// the architected register set in question. +func (pa *ABIParamAssignment) ComputePadding(storage []uint64) []uint64 { + nr := len(pa.Registers) + padding := storage[:nr] + for i := 0; i < nr; i++ { + padding[i] = 0 + } + if pa.Type.Kind() != types.TSTRUCT || nr == 0 { + return padding + } + types := make([]*types.Type, 0, nr) + types = appendParamTypes(types, pa.Type) + if len(types) != nr { + panic("internal error") + } + off := int64(0) + for idx, t := range types { + ts := t.Size() + off += int64(ts) + if idx < len(types)-1 { + noff := align(off, types[idx+1]) + if noff != off { + padding[idx] = uint64(noff - off) + } + } + } + return padding +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/abt/avlint32.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/abt/avlint32.go new file mode 100644 index 0000000000000000000000000000000000000000..28c1642c6ec48c0c3a679963e23e11f5c45fe488 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/abt/avlint32.go @@ -0,0 +1,832 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package abt + +import ( + "fmt" + "strconv" + "strings" +) + +const ( + LEAF_HEIGHT = 1 + ZERO_HEIGHT = 0 + NOT_KEY32 = int32(-0x80000000) +) + +// T is the exported applicative balanced tree data type. +// A T can be used as a value; updates to one copy of the value +// do not change other copies. +type T struct { + root *node32 + size int +} + +// node32 is the internal tree node data type +type node32 struct { + // Standard conventions hold for left = smaller, right = larger + left, right *node32 + data interface{} + key int32 + height_ int8 +} + +func makeNode(key int32) *node32 { + return &node32{key: key, height_: LEAF_HEIGHT} +} + +// IsEmpty returns true iff t is empty. +func (t *T) IsEmpty() bool { + return t.root == nil +} + +// IsSingle returns true iff t is a singleton (leaf). +func (t *T) IsSingle() bool { + return t.root != nil && t.root.isLeaf() +} + +// VisitInOrder applies f to the key and data pairs in t, +// with keys ordered from smallest to largest. +func (t *T) VisitInOrder(f func(int32, interface{})) { + if t.root == nil { + return + } + t.root.visitInOrder(f) +} + +func (n *node32) nilOrData() interface{} { + if n == nil { + return nil + } + return n.data +} + +func (n *node32) nilOrKeyAndData() (k int32, d interface{}) { + if n == nil { + k = NOT_KEY32 + d = nil + } else { + k = n.key + d = n.data + } + return +} + +func (n *node32) height() int8 { + if n == nil { + return 0 + } + return n.height_ +} + +// Find returns the data associated with x in the tree, or +// nil if x is not in the tree. +func (t *T) Find(x int32) interface{} { + return t.root.find(x).nilOrData() +} + +// Insert either adds x to the tree if x was not previously +// a key in the tree, or updates the data for x in the tree if +// x was already a key in the tree. The previous data associated +// with x is returned, and is nil if x was not previously a +// key in the tree. +func (t *T) Insert(x int32, data interface{}) interface{} { + if x == NOT_KEY32 { + panic("Cannot use sentinel value -0x80000000 as key") + } + n := t.root + var newroot *node32 + var o *node32 + if n == nil { + n = makeNode(x) + newroot = n + } else { + newroot, n, o = n.aInsert(x) + } + var r interface{} + if o != nil { + r = o.data + } else { + t.size++ + } + n.data = data + t.root = newroot + return r +} + +func (t *T) Copy() *T { + u := *t + return &u +} + +func (t *T) Delete(x int32) interface{} { + n := t.root + if n == nil { + return nil + } + d, s := n.aDelete(x) + if d == nil { + return nil + } + t.root = s + t.size-- + return d.data +} + +func (t *T) DeleteMin() (int32, interface{}) { + n := t.root + if n == nil { + return NOT_KEY32, nil + } + d, s := n.aDeleteMin() + if d == nil { + return NOT_KEY32, nil + } + t.root = s + t.size-- + return d.key, d.data +} + +func (t *T) DeleteMax() (int32, interface{}) { + n := t.root + if n == nil { + return NOT_KEY32, nil + } + d, s := n.aDeleteMax() + if d == nil { + return NOT_KEY32, nil + } + t.root = s + t.size-- + return d.key, d.data +} + +func (t *T) Size() int { + return t.size +} + +// Intersection returns the intersection of t and u, where the result +// data for any common keys is given by f(t's data, u's data) -- f need +// not be symmetric. If f returns nil, then the key and data are not +// added to the result. If f itself is nil, then whatever value was +// already present in the smaller set is used. +func (t *T) Intersection(u *T, f func(x, y interface{}) interface{}) *T { + if t.Size() == 0 || u.Size() == 0 { + return &T{} + } + + // For faster execution and less allocation, prefer t smaller, iterate over t. + if t.Size() <= u.Size() { + v := t.Copy() + for it := t.Iterator(); !it.Done(); { + k, d := it.Next() + e := u.Find(k) + if e == nil { + v.Delete(k) + continue + } + if f == nil { + continue + } + if c := f(d, e); c != d { + if c == nil { + v.Delete(k) + } else { + v.Insert(k, c) + } + } + } + return v + } + v := u.Copy() + for it := u.Iterator(); !it.Done(); { + k, e := it.Next() + d := t.Find(k) + if d == nil { + v.Delete(k) + continue + } + if f == nil { + continue + } + if c := f(d, e); c != d { + if c == nil { + v.Delete(k) + } else { + v.Insert(k, c) + } + } + } + + return v +} + +// Union returns the union of t and u, where the result data for any common keys +// is given by f(t's data, u's data) -- f need not be symmetric. If f returns nil, +// then the key and data are not added to the result. If f itself is nil, then +// whatever value was already present in the larger set is used. +func (t *T) Union(u *T, f func(x, y interface{}) interface{}) *T { + if t.Size() == 0 { + return u + } + if u.Size() == 0 { + return t + } + + if t.Size() >= u.Size() { + v := t.Copy() + for it := u.Iterator(); !it.Done(); { + k, e := it.Next() + d := t.Find(k) + if d == nil { + v.Insert(k, e) + continue + } + if f == nil { + continue + } + if c := f(d, e); c != d { + if c == nil { + v.Delete(k) + } else { + v.Insert(k, c) + } + } + } + return v + } + + v := u.Copy() + for it := t.Iterator(); !it.Done(); { + k, d := it.Next() + e := u.Find(k) + if e == nil { + v.Insert(k, d) + continue + } + if f == nil { + continue + } + if c := f(d, e); c != d { + if c == nil { + v.Delete(k) + } else { + v.Insert(k, c) + } + } + } + return v +} + +// Difference returns the difference of t and u, subject to the result +// of f applied to data corresponding to equal keys. If f returns nil +// (or if f is nil) then the key+data are excluded, as usual. If f +// returns not-nil, then that key+data pair is inserted. instead. +func (t *T) Difference(u *T, f func(x, y interface{}) interface{}) *T { + if t.Size() == 0 { + return &T{} + } + if u.Size() == 0 { + return t + } + v := t.Copy() + for it := t.Iterator(); !it.Done(); { + k, d := it.Next() + e := u.Find(k) + if e != nil { + if f == nil { + v.Delete(k) + continue + } + c := f(d, e) + if c == nil { + v.Delete(k) + continue + } + if c != d { + v.Insert(k, c) + } + } + } + return v +} + +func (t *T) Iterator() Iterator { + return Iterator{it: t.root.iterator()} +} + +func (t *T) Equals(u *T) bool { + if t == u { + return true + } + if t.Size() != u.Size() { + return false + } + return t.root.equals(u.root) +} + +func (t *T) String() string { + var b strings.Builder + first := true + for it := t.Iterator(); !it.Done(); { + k, v := it.Next() + if first { + first = false + } else { + b.WriteString("; ") + } + b.WriteString(strconv.FormatInt(int64(k), 10)) + b.WriteString(":") + fmt.Fprint(&b, v) + } + return b.String() +} + +func (t *node32) equals(u *node32) bool { + if t == u { + return true + } + it, iu := t.iterator(), u.iterator() + for !it.done() && !iu.done() { + nt := it.next() + nu := iu.next() + if nt == nu { + continue + } + if nt.key != nu.key { + return false + } + if nt.data != nu.data { + return false + } + } + return it.done() == iu.done() +} + +func (t *T) Equiv(u *T, eqv func(x, y interface{}) bool) bool { + if t == u { + return true + } + if t.Size() != u.Size() { + return false + } + return t.root.equiv(u.root, eqv) +} + +func (t *node32) equiv(u *node32, eqv func(x, y interface{}) bool) bool { + if t == u { + return true + } + it, iu := t.iterator(), u.iterator() + for !it.done() && !iu.done() { + nt := it.next() + nu := iu.next() + if nt == nu { + continue + } + if nt.key != nu.key { + return false + } + if !eqv(nt.data, nu.data) { + return false + } + } + return it.done() == iu.done() +} + +type iterator struct { + parents []*node32 +} + +type Iterator struct { + it iterator +} + +func (it *Iterator) Next() (int32, interface{}) { + x := it.it.next() + if x == nil { + return NOT_KEY32, nil + } + return x.key, x.data +} + +func (it *Iterator) Done() bool { + return len(it.it.parents) == 0 +} + +func (t *node32) iterator() iterator { + if t == nil { + return iterator{} + } + it := iterator{parents: make([]*node32, 0, int(t.height()))} + it.leftmost(t) + return it +} + +func (it *iterator) leftmost(t *node32) { + for t != nil { + it.parents = append(it.parents, t) + t = t.left + } +} + +func (it *iterator) done() bool { + return len(it.parents) == 0 +} + +func (it *iterator) next() *node32 { + l := len(it.parents) + if l == 0 { + return nil + } + x := it.parents[l-1] // return value + if x.right != nil { + it.leftmost(x.right) + return x + } + // discard visited top of parents + l-- + it.parents = it.parents[:l] + y := x // y is known visited/returned + for l > 0 && y == it.parents[l-1].right { + y = it.parents[l-1] + l-- + it.parents = it.parents[:l] + } + + return x +} + +// Min returns the minimum element of t. +// If t is empty, then (NOT_KEY32, nil) is returned. +func (t *T) Min() (k int32, d interface{}) { + return t.root.min().nilOrKeyAndData() +} + +// Max returns the maximum element of t. +// If t is empty, then (NOT_KEY32, nil) is returned. +func (t *T) Max() (k int32, d interface{}) { + return t.root.max().nilOrKeyAndData() +} + +// Glb returns the greatest-lower-bound-exclusive of x and the associated +// data. If x has no glb in the tree, then (NOT_KEY32, nil) is returned. +func (t *T) Glb(x int32) (k int32, d interface{}) { + return t.root.glb(x, false).nilOrKeyAndData() +} + +// GlbEq returns the greatest-lower-bound-inclusive of x and the associated +// data. If x has no glbEQ in the tree, then (NOT_KEY32, nil) is returned. +func (t *T) GlbEq(x int32) (k int32, d interface{}) { + return t.root.glb(x, true).nilOrKeyAndData() +} + +// Lub returns the least-upper-bound-exclusive of x and the associated +// data. If x has no lub in the tree, then (NOT_KEY32, nil) is returned. +func (t *T) Lub(x int32) (k int32, d interface{}) { + return t.root.lub(x, false).nilOrKeyAndData() +} + +// LubEq returns the least-upper-bound-inclusive of x and the associated +// data. If x has no lubEq in the tree, then (NOT_KEY32, nil) is returned. +func (t *T) LubEq(x int32) (k int32, d interface{}) { + return t.root.lub(x, true).nilOrKeyAndData() +} + +func (t *node32) isLeaf() bool { + return t.left == nil && t.right == nil && t.height_ == LEAF_HEIGHT +} + +func (t *node32) visitInOrder(f func(int32, interface{})) { + if t.left != nil { + t.left.visitInOrder(f) + } + f(t.key, t.data) + if t.right != nil { + t.right.visitInOrder(f) + } +} + +func (t *node32) find(key int32) *node32 { + for t != nil { + if key < t.key { + t = t.left + } else if key > t.key { + t = t.right + } else { + return t + } + } + return nil +} + +func (t *node32) min() *node32 { + if t == nil { + return t + } + for t.left != nil { + t = t.left + } + return t +} + +func (t *node32) max() *node32 { + if t == nil { + return t + } + for t.right != nil { + t = t.right + } + return t +} + +func (t *node32) glb(key int32, allow_eq bool) *node32 { + var best *node32 = nil + for t != nil { + if key <= t.key { + if allow_eq && key == t.key { + return t + } + // t is too big, glb is to left. + t = t.left + } else { + // t is a lower bound, record it and seek a better one. + best = t + t = t.right + } + } + return best +} + +func (t *node32) lub(key int32, allow_eq bool) *node32 { + var best *node32 = nil + for t != nil { + if key >= t.key { + if allow_eq && key == t.key { + return t + } + // t is too small, lub is to right. + t = t.right + } else { + // t is an upper bound, record it and seek a better one. + best = t + t = t.left + } + } + return best +} + +func (t *node32) aInsert(x int32) (newroot, newnode, oldnode *node32) { + // oldnode default of nil is good, others should be assigned. + if x == t.key { + oldnode = t + newt := *t + newnode = &newt + newroot = newnode + return + } + if x < t.key { + if t.left == nil { + t = t.copy() + n := makeNode(x) + t.left = n + newnode = n + newroot = t + t.height_ = 2 // was balanced w/ 0, sibling is height 0 or 1 + return + } + var new_l *node32 + new_l, newnode, oldnode = t.left.aInsert(x) + t = t.copy() + t.left = new_l + if new_l.height() > 1+t.right.height() { + newroot = t.aLeftIsHigh(newnode) + } else { + t.height_ = 1 + max(t.left.height(), t.right.height()) + newroot = t + } + } else { // x > t.key + if t.right == nil { + t = t.copy() + n := makeNode(x) + t.right = n + newnode = n + newroot = t + t.height_ = 2 // was balanced w/ 0, sibling is height 0 or 1 + return + } + var new_r *node32 + new_r, newnode, oldnode = t.right.aInsert(x) + t = t.copy() + t.right = new_r + if new_r.height() > 1+t.left.height() { + newroot = t.aRightIsHigh(newnode) + } else { + t.height_ = 1 + max(t.left.height(), t.right.height()) + newroot = t + } + } + return +} + +func (t *node32) aDelete(key int32) (deleted, newSubTree *node32) { + if t == nil { + return nil, nil + } + + if key < t.key { + oh := t.left.height() + d, tleft := t.left.aDelete(key) + if tleft == t.left { + return d, t + } + return d, t.copy().aRebalanceAfterLeftDeletion(oh, tleft) + } else if key > t.key { + oh := t.right.height() + d, tright := t.right.aDelete(key) + if tright == t.right { + return d, t + } + return d, t.copy().aRebalanceAfterRightDeletion(oh, tright) + } + + if t.height() == LEAF_HEIGHT { + return t, nil + } + + // Interior delete by removing left.Max or right.Min, + // then swapping contents + if t.left.height() > t.right.height() { + oh := t.left.height() + d, tleft := t.left.aDeleteMax() + r := t + t = t.copy() + t.data, t.key = d.data, d.key + return r, t.aRebalanceAfterLeftDeletion(oh, tleft) + } + + oh := t.right.height() + d, tright := t.right.aDeleteMin() + r := t + t = t.copy() + t.data, t.key = d.data, d.key + return r, t.aRebalanceAfterRightDeletion(oh, tright) +} + +func (t *node32) aDeleteMin() (deleted, newSubTree *node32) { + if t == nil { + return nil, nil + } + if t.left == nil { // leaf or left-most + return t, t.right + } + oh := t.left.height() + d, tleft := t.left.aDeleteMin() + if tleft == t.left { + return d, t + } + return d, t.copy().aRebalanceAfterLeftDeletion(oh, tleft) +} + +func (t *node32) aDeleteMax() (deleted, newSubTree *node32) { + if t == nil { + return nil, nil + } + + if t.right == nil { // leaf or right-most + return t, t.left + } + + oh := t.right.height() + d, tright := t.right.aDeleteMax() + if tright == t.right { + return d, t + } + return d, t.copy().aRebalanceAfterRightDeletion(oh, tright) +} + +func (t *node32) aRebalanceAfterLeftDeletion(oldLeftHeight int8, tleft *node32) *node32 { + t.left = tleft + + if oldLeftHeight == tleft.height() || oldLeftHeight == t.right.height() { + // this node is still balanced and its height is unchanged + return t + } + + if oldLeftHeight > t.right.height() { + // left was larger + t.height_-- + return t + } + + // left height fell by 1 and it was already less than right height + t.right = t.right.copy() + return t.aRightIsHigh(nil) +} + +func (t *node32) aRebalanceAfterRightDeletion(oldRightHeight int8, tright *node32) *node32 { + t.right = tright + + if oldRightHeight == tright.height() || oldRightHeight == t.left.height() { + // this node is still balanced and its height is unchanged + return t + } + + if oldRightHeight > t.left.height() { + // left was larger + t.height_-- + return t + } + + // right height fell by 1 and it was already less than left height + t.left = t.left.copy() + return t.aLeftIsHigh(nil) +} + +// aRightIsHigh does rotations necessary to fix a high right child +// assume that t and t.right are already fresh copies. +func (t *node32) aRightIsHigh(newnode *node32) *node32 { + right := t.right + if right.right.height() < right.left.height() { + // double rotation + if newnode != right.left { + right.left = right.left.copy() + } + t.right = right.leftToRoot() + } + t = t.rightToRoot() + return t +} + +// aLeftIsHigh does rotations necessary to fix a high left child +// assume that t and t.left are already fresh copies. +func (t *node32) aLeftIsHigh(newnode *node32) *node32 { + left := t.left + if left.left.height() < left.right.height() { + // double rotation + if newnode != left.right { + left.right = left.right.copy() + } + t.left = left.rightToRoot() + } + t = t.leftToRoot() + return t +} + +// rightToRoot does that rotation, modifying t and t.right in the process. +func (t *node32) rightToRoot() *node32 { + // this + // left right + // rl rr + // + // becomes + // + // right + // this rr + // left rl + // + right := t.right + rl := right.left + right.left = t + // parent's child ptr fixed in caller + t.right = rl + t.height_ = 1 + max(rl.height(), t.left.height()) + right.height_ = 1 + max(t.height(), right.right.height()) + return right +} + +// leftToRoot does that rotation, modifying t and t.left in the process. +func (t *node32) leftToRoot() *node32 { + // this + // left right + // ll lr + // + // becomes + // + // left + // ll this + // lr right + // + left := t.left + lr := left.right + left.right = t + // parent's child ptr fixed in caller + t.left = lr + t.height_ = 1 + max(lr.height(), t.right.height()) + left.height_ = 1 + max(t.height(), left.left.height()) + return left +} + +func max(a, b int8) int8 { + if a > b { + return a + } + return b +} + +func (t *node32) copy() *node32 { + u := *t + return &u +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/abt/avlint32_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/abt/avlint32_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7fa9ed4fd68236d7dcb2198fc95c97f91fcd7626 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/abt/avlint32_test.go @@ -0,0 +1,700 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package abt + +import ( + "fmt" + "strconv" + "testing" +) + +func makeTree(te *testing.T, x []int32, check bool) (t *T, k int, min, max int32) { + t = &T{} + k = 0 + min = int32(0x7fffffff) + max = int32(-0x80000000) + history := []*T{} + + for _, d := range x { + d = d + d // double everything for Glb/Lub testing. + + if check { + history = append(history, t.Copy()) + } + + t.Insert(d, stringer(fmt.Sprintf("%v", d))) + + k++ + if d < min { + min = d + } + if d > max { + max = d + } + + if !check { + continue + } + + for j, old := range history { + s, i := old.wellFormed() + if s != "" { + te.Errorf("Old tree consistency problem %v at k=%d, j=%d, old=\n%v, t=\n%v", s, k, j, old.DebugString(), t.DebugString()) + return + } + if i != j { + te.Errorf("Wrong tree size %v, expected %v for old %v", i, j, old.DebugString()) + } + } + s, i := t.wellFormed() + if s != "" { + te.Errorf("Tree consistency problem at %v", s) + return + } + if i != k { + te.Errorf("Wrong tree size %v, expected %v for %v", i, k, t.DebugString()) + return + } + if t.Size() != k { + te.Errorf("Wrong t.Size() %v, expected %v for %v", t.Size(), k, t.DebugString()) + return + } + } + return +} + +func applicInsert(te *testing.T, x []int32) { + makeTree(te, x, true) +} + +func applicFind(te *testing.T, x []int32) { + t, _, _, _ := makeTree(te, x, false) + + for _, d := range x { + d = d + d // double everything for Glb/Lub testing. + s := fmt.Sprintf("%v", d) + f := t.Find(d) + + // data + if s != fmt.Sprint(f) { + te.Errorf("s(%v) != f(%v)", s, f) + } + } +} + +func applicBounds(te *testing.T, x []int32) { + t, _, min, max := makeTree(te, x, false) + for _, d := range x { + d = d + d // double everything for Glb/Lub testing. + s := fmt.Sprintf("%v", d) + + kg, g := t.Glb(d + 1) + kge, ge := t.GlbEq(d) + kl, l := t.Lub(d - 1) + kle, le := t.LubEq(d) + + // keys + if d != kg { + te.Errorf("d(%v) != kg(%v)", d, kg) + } + if d != kl { + te.Errorf("d(%v) != kl(%v)", d, kl) + } + if d != kge { + te.Errorf("d(%v) != kge(%v)", d, kge) + } + if d != kle { + te.Errorf("d(%v) != kle(%v)", d, kle) + } + // data + if s != fmt.Sprint(g) { + te.Errorf("s(%v) != g(%v)", s, g) + } + if s != fmt.Sprint(l) { + te.Errorf("s(%v) != l(%v)", s, l) + } + if s != fmt.Sprint(ge) { + te.Errorf("s(%v) != ge(%v)", s, ge) + } + if s != fmt.Sprint(le) { + te.Errorf("s(%v) != le(%v)", s, le) + } + } + + for _, d := range x { + d = d + d // double everything for Glb/Lub testing. + s := fmt.Sprintf("%v", d) + kge, ge := t.GlbEq(d + 1) + kle, le := t.LubEq(d - 1) + if d != kge { + te.Errorf("d(%v) != kge(%v)", d, kge) + } + if d != kle { + te.Errorf("d(%v) != kle(%v)", d, kle) + } + if s != fmt.Sprint(ge) { + te.Errorf("s(%v) != ge(%v)", s, ge) + } + if s != fmt.Sprint(le) { + te.Errorf("s(%v) != le(%v)", s, le) + } + } + + kg, g := t.Glb(min) + kge, ge := t.GlbEq(min - 1) + kl, l := t.Lub(max) + kle, le := t.LubEq(max + 1) + fmin := t.Find(min - 1) + fmax := t.Find(max + 1) + + if kg != NOT_KEY32 || kge != NOT_KEY32 || kl != NOT_KEY32 || kle != NOT_KEY32 { + te.Errorf("Got non-error-key for missing query") + } + + if g != nil || ge != nil || l != nil || le != nil || fmin != nil || fmax != nil { + te.Errorf("Got non-error-data for missing query") + } +} + +func applicDeleteMin(te *testing.T, x []int32) { + t, _, _, _ := makeTree(te, x, false) + _, size := t.wellFormed() + history := []*T{} + for !t.IsEmpty() { + k, _ := t.Min() + history = append(history, t.Copy()) + kd, _ := t.DeleteMin() + if kd != k { + te.Errorf("Deleted minimum key %v not equal to minimum %v", kd, k) + } + for j, old := range history { + s, i := old.wellFormed() + if s != "" { + te.Errorf("Tree consistency problem %s at old after DeleteMin, old=\n%stree=\n%v", s, old.DebugString(), t.DebugString()) + return + } + if i != len(x)-j { + te.Errorf("Wrong old tree size %v, expected %v after DeleteMin, old=\n%vtree\n%v", i, len(x)-j, old.DebugString(), t.DebugString()) + return + } + } + size-- + s, i := t.wellFormed() + if s != "" { + te.Errorf("Tree consistency problem at %v after DeleteMin, tree=\n%v", s, t.DebugString()) + return + } + if i != size { + te.Errorf("Wrong tree size %v, expected %v after DeleteMin", i, size) + return + } + if t.Size() != size { + te.Errorf("Wrong t.Size() %v, expected %v for %v", t.Size(), i, t.DebugString()) + return + } + } +} + +func applicDeleteMax(te *testing.T, x []int32) { + t, _, _, _ := makeTree(te, x, false) + _, size := t.wellFormed() + history := []*T{} + + for !t.IsEmpty() { + k, _ := t.Max() + history = append(history, t.Copy()) + kd, _ := t.DeleteMax() + if kd != k { + te.Errorf("Deleted maximum key %v not equal to maximum %v", kd, k) + } + + for j, old := range history { + s, i := old.wellFormed() + if s != "" { + te.Errorf("Tree consistency problem %s at old after DeleteMin, old=\n%stree=\n%v", s, old.DebugString(), t.DebugString()) + return + } + if i != len(x)-j { + te.Errorf("Wrong old tree size %v, expected %v after DeleteMin, old=\n%vtree\n%v", i, len(x)-j, old.DebugString(), t.DebugString()) + return + } + } + + size-- + s, i := t.wellFormed() + if s != "" { + te.Errorf("Tree consistency problem at %v after DeleteMax, tree=\n%v", s, t.DebugString()) + return + } + if i != size { + te.Errorf("Wrong tree size %v, expected %v after DeleteMax", i, size) + return + } + if t.Size() != size { + te.Errorf("Wrong t.Size() %v, expected %v for %v", t.Size(), i, t.DebugString()) + return + } + } +} + +func applicDelete(te *testing.T, x []int32) { + t, _, _, _ := makeTree(te, x, false) + _, size := t.wellFormed() + history := []*T{} + + missing := t.Delete(11) + if missing != nil { + te.Errorf("Returned a value when there should have been none, %v", missing) + return + } + + s, i := t.wellFormed() + if s != "" { + te.Errorf("Tree consistency problem at %v after delete of missing value, tree=\n%v", s, t.DebugString()) + return + } + if size != i { + te.Errorf("Delete of missing data should not change tree size, expected %d, got %d", size, i) + return + } + + for _, d := range x { + d += d // double + vWant := fmt.Sprintf("%v", d) + history = append(history, t.Copy()) + v := t.Delete(d) + + for j, old := range history { + s, i := old.wellFormed() + if s != "" { + te.Errorf("Tree consistency problem %s at old after DeleteMin, old=\n%stree=\n%v", s, old.DebugString(), t.DebugString()) + return + } + if i != len(x)-j { + te.Errorf("Wrong old tree size %v, expected %v after DeleteMin, old=\n%vtree\n%v", i, len(x)-j, old.DebugString(), t.DebugString()) + return + } + } + + if v.(*sstring).s != vWant { + te.Errorf("Deleted %v expected %v but got %v", d, vWant, v) + return + } + size-- + s, i := t.wellFormed() + if s != "" { + te.Errorf("Tree consistency problem at %v after Delete %d, tree=\n%v", s, d, t.DebugString()) + return + } + if i != size { + te.Errorf("Wrong tree size %v, expected %v after Delete", i, size) + return + } + if t.Size() != size { + te.Errorf("Wrong t.Size() %v, expected %v for %v", t.Size(), i, t.DebugString()) + return + } + } + +} + +func applicIterator(te *testing.T, x []int32) { + t, _, _, _ := makeTree(te, x, false) + it := t.Iterator() + for !it.Done() { + k0, d0 := it.Next() + k1, d1 := t.DeleteMin() + if k0 != k1 || d0 != d1 { + te.Errorf("Iterator and deleteMin mismatch, k0, k1, d0, d1 = %v, %v, %v, %v", k0, k1, d0, d1) + return + } + } + if t.Size() != 0 { + te.Errorf("Iterator ended early, remaining tree = \n%s", t.DebugString()) + return + } +} + +func equiv(a, b interface{}) bool { + sa, sb := a.(*sstring), b.(*sstring) + return *sa == *sb +} + +func applicEquals(te *testing.T, x, y []int32) { + t, _, _, _ := makeTree(te, x, false) + u, _, _, _ := makeTree(te, y, false) + if !t.Equiv(t, equiv) { + te.Errorf("Equiv failure, t == t, =\n%v", t.DebugString()) + return + } + if !t.Equiv(t.Copy(), equiv) { + te.Errorf("Equiv failure, t == t.Copy(), =\n%v", t.DebugString()) + return + } + if !t.Equiv(u, equiv) { + te.Errorf("Equiv failure, t == u, =\n%v", t.DebugString()) + return + } + v := t.Copy() + + v.DeleteMax() + if t.Equiv(v, equiv) { + te.Errorf("!Equiv failure, t != v, =\n%v\nand%v\n", t.DebugString(), v.DebugString()) + return + } + + if v.Equiv(u, equiv) { + te.Errorf("!Equiv failure, v != u, =\n%v\nand%v\n", v.DebugString(), u.DebugString()) + return + } + +} + +func tree(x []int32) *T { + t := &T{} + for _, d := range x { + t.Insert(d, stringer(fmt.Sprintf("%v", d))) + } + return t +} + +func treePlus1(x []int32) *T { + t := &T{} + for _, d := range x { + t.Insert(d, stringer(fmt.Sprintf("%v", d+1))) + } + return t +} +func TestApplicInsert(t *testing.T) { + applicInsert(t, []int32{24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25}) + applicInsert(t, []int32{1, 2, 3, 4}) + applicInsert(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9}) + applicInsert(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25}) + applicInsert(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}) + applicInsert(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}) + applicInsert(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24}) + applicInsert(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2}) +} + +func TestApplicFind(t *testing.T) { + applicFind(t, []int32{24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25}) + applicFind(t, []int32{1, 2, 3, 4}) + applicFind(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9}) + applicFind(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25}) + applicFind(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}) + applicFind(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}) + applicFind(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24}) + applicFind(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2}) +} + +func TestBounds(t *testing.T) { + applicBounds(t, []int32{24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25}) + applicBounds(t, []int32{1, 2, 3, 4}) + applicBounds(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9}) + applicBounds(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25}) + applicBounds(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}) + applicBounds(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}) + applicBounds(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24}) + applicBounds(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2}) +} +func TestDeleteMin(t *testing.T) { + applicDeleteMin(t, []int32{1, 2, 3, 4}) + applicDeleteMin(t, []int32{24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25}) + applicDeleteMin(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9}) + applicDeleteMin(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25}) + applicDeleteMin(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}) + applicDeleteMin(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}) + applicDeleteMin(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24}) + applicDeleteMin(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2}) +} +func TestDeleteMax(t *testing.T) { + applicDeleteMax(t, []int32{24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25}) + applicDeleteMax(t, []int32{1, 2, 3, 4}) + applicDeleteMax(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9}) + applicDeleteMax(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25}) + applicDeleteMax(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}) + applicDeleteMax(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}) + applicDeleteMax(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24}) + applicDeleteMax(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2}) +} +func TestDelete(t *testing.T) { + applicDelete(t, []int32{24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25}) + applicDelete(t, []int32{1, 2, 3, 4}) + applicDelete(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9}) + applicDelete(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25}) + applicDelete(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}) + applicDelete(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}) + applicDelete(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24}) + applicDelete(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2}) +} +func TestIterator(t *testing.T) { + applicIterator(t, []int32{1, 2, 3, 4}) + applicIterator(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9}) + applicIterator(t, []int32{24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25}) + applicIterator(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25}) + applicIterator(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}) + applicIterator(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}) + applicIterator(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24}) + applicIterator(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2}) +} +func TestEquals(t *testing.T) { + applicEquals(t, []int32{1, 2, 3, 4}, []int32{4, 3, 2, 1}) + + applicEquals(t, []int32{24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25}, + []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25}) + applicEquals(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}, + []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}) + applicEquals(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24}, + []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2}) +} + +func first(x, y interface{}) interface{} { + return x +} +func second(x, y interface{}) interface{} { + return y +} +func alwaysNil(x, y interface{}) interface{} { + return nil +} +func smaller(x, y interface{}) interface{} { + xi, _ := strconv.Atoi(fmt.Sprint(x)) + yi, _ := strconv.Atoi(fmt.Sprint(y)) + if xi < yi { + return x + } + return y +} +func assert(t *testing.T, expected, got *T, what string) { + s, _ := got.wellFormed() + if s != "" { + t.Errorf("Tree consistency problem %v for 'got' in assert for %s, tree=\n%v", s, what, got.DebugString()) + return + } + + if !expected.Equiv(got, equiv) { + t.Errorf("%s fail, expected\n%vgot\n%v\n", what, expected.DebugString(), got.DebugString()) + } +} + +func TestSetOps(t *testing.T) { + A := tree([]int32{1, 2, 3, 4}) + B := tree([]int32{3, 4, 5, 6, 7}) + + AIB := tree([]int32{3, 4}) + ADB := tree([]int32{1, 2}) + BDA := tree([]int32{5, 6, 7}) + AUB := tree([]int32{1, 2, 3, 4, 5, 6, 7}) + AXB := tree([]int32{1, 2, 5, 6, 7}) + + aib1 := A.Intersection(B, first) + assert(t, AIB, aib1, "aib1") + if A.Find(3) != aib1.Find(3) { + t.Errorf("Failed aliasing/reuse check, A/aib1") + } + aib2 := A.Intersection(B, second) + assert(t, AIB, aib2, "aib2") + if B.Find(3) != aib2.Find(3) { + t.Errorf("Failed aliasing/reuse check, B/aib2") + } + aib3 := B.Intersection(A, first) + assert(t, AIB, aib3, "aib3") + if A.Find(3) != aib3.Find(3) { + // A is smaller, intersection favors reuse from smaller when function is "first" + t.Errorf("Failed aliasing/reuse check, A/aib3") + } + aib4 := B.Intersection(A, second) + assert(t, AIB, aib4, "aib4") + if A.Find(3) != aib4.Find(3) { + t.Errorf("Failed aliasing/reuse check, A/aib4") + } + + aub1 := A.Union(B, first) + assert(t, AUB, aub1, "aub1") + if B.Find(3) != aub1.Find(3) { + // B is larger, union favors reuse from larger when function is "first" + t.Errorf("Failed aliasing/reuse check, A/aub1") + } + aub2 := A.Union(B, second) + assert(t, AUB, aub2, "aub2") + if B.Find(3) != aub2.Find(3) { + t.Errorf("Failed aliasing/reuse check, B/aub2") + } + aub3 := B.Union(A, first) + assert(t, AUB, aub3, "aub3") + if B.Find(3) != aub3.Find(3) { + t.Errorf("Failed aliasing/reuse check, B/aub3") + } + aub4 := B.Union(A, second) + assert(t, AUB, aub4, "aub4") + if A.Find(3) != aub4.Find(3) { + t.Errorf("Failed aliasing/reuse check, A/aub4") + } + + axb1 := A.Union(B, alwaysNil) + assert(t, AXB, axb1, "axb1") + axb2 := B.Union(A, alwaysNil) + assert(t, AXB, axb2, "axb2") + + adb := A.Difference(B, alwaysNil) + assert(t, ADB, adb, "adb") + bda := B.Difference(A, nil) + assert(t, BDA, bda, "bda") + + Ap1 := treePlus1([]int32{1, 2, 3, 4}) + + ada1_1 := A.Difference(Ap1, smaller) + assert(t, A, ada1_1, "ada1_1") + ada1_2 := Ap1.Difference(A, smaller) + assert(t, A, ada1_2, "ada1_2") + +} + +type sstring struct { + s string +} + +func (s *sstring) String() string { + return s.s +} + +func stringer(s string) interface{} { + return &sstring{s} +} + +// wellFormed ensures that a red-black tree meets +// all of its invariants and returns a string identifying +// the first problem encountered. If there is no problem +// then the returned string is empty. The size is also +// returned to allow comparison of calculated tree size +// with expected. +func (t *T) wellFormed() (s string, i int) { + if t.root == nil { + s = "" + i = 0 + return + } + return t.root.wellFormedSubtree(nil, -0x80000000, 0x7fffffff) +} + +// wellFormedSubtree ensures that a red-black subtree meets +// all of its invariants and returns a string identifying +// the first problem encountered. If there is no problem +// then the returned string is empty. The size is also +// returned to allow comparison of calculated tree size +// with expected. +func (t *node32) wellFormedSubtree(parent *node32, keyMin, keyMax int32) (s string, i int) { + i = -1 // initialize to a failing value + s = "" // s is the reason for failure; empty means okay. + + if keyMin >= t.key { + s = " min >= t.key" + return + } + + if keyMax <= t.key { + s = " max <= t.key" + return + } + + l := t.left + r := t.right + + lh := l.height() + rh := r.height() + mh := max(lh, rh) + th := t.height() + dh := lh - rh + if dh < 0 { + dh = -dh + } + if dh > 1 { + s = fmt.Sprintf(" dh > 1, t=%d", t.key) + return + } + + if l == nil && r == nil { + if th != LEAF_HEIGHT { + s = " leaf height wrong" + return + } + } + + if th != mh+1 { + s = " th != mh + 1" + return + } + + if l != nil { + if th <= lh { + s = " t.height <= l.height" + } else if th > 2+lh { + s = " t.height > 2+l.height" + } else if t.key <= l.key { + s = " t.key <= l.key" + } + if s != "" { + return + } + + } + + if r != nil { + if th <= rh { + s = " t.height <= r.height" + } else if th > 2+rh { + s = " t.height > 2+r.height" + } else if t.key >= r.key { + s = " t.key >= r.key" + } + if s != "" { + return + } + } + + ii := 1 + if l != nil { + res, il := l.wellFormedSubtree(t, keyMin, t.key) + if res != "" { + s = ".L" + res + return + } + ii += il + } + if r != nil { + res, ir := r.wellFormedSubtree(t, t.key, keyMax) + if res != "" { + s = ".R" + res + return + } + ii += ir + } + i = ii + return +} + +func (t *T) DebugString() string { + if t.root == nil { + return "" + } + return t.root.DebugString(0) +} + +// DebugString prints the tree with nested information +// to allow an eyeball check on the tree balance. +func (t *node32) DebugString(indent int) string { + s := "" + if t.left != nil { + s = s + t.left.DebugString(indent+1) + } + for i := 0; i < indent; i++ { + s = s + " " + } + s = s + fmt.Sprintf("%v=%v:%d\n", t.key, t.data, t.height_) + if t.right != nil { + s = s + t.right.DebugString(indent+1) + } + return s +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/amd64/galign.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/amd64/galign.go new file mode 100644 index 0000000000000000000000000000000000000000..ca44263afc476c4eef177df64562920b13e7afdc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/amd64/galign.go @@ -0,0 +1,27 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package amd64 + +import ( + "cmd/compile/internal/ssagen" + "cmd/internal/obj/x86" +) + +var leaptr = x86.ALEAQ + +func Init(arch *ssagen.ArchInfo) { + arch.LinkArch = &x86.Linkamd64 + arch.REGSP = x86.REGSP + arch.MAXWIDTH = 1 << 50 + + arch.ZeroRange = zerorange + arch.Ginsnop = ginsnop + + arch.SSAMarkMoves = ssaMarkMoves + arch.SSAGenValue = ssaGenValue + arch.SSAGenBlock = ssaGenBlock + arch.LoadRegResult = loadRegResult + arch.SpillArgReg = spillArgReg +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/amd64/ggen.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/amd64/ggen.go new file mode 100644 index 0000000000000000000000000000000000000000..db98a22a1e1927a237aa40f4a3b4311c86ca05f4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/amd64/ggen.go @@ -0,0 +1,135 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package amd64 + +import ( + "cmd/compile/internal/ir" + "cmd/compile/internal/objw" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/obj/x86" + "internal/buildcfg" +) + +// no floating point in note handlers on Plan 9 +var isPlan9 = buildcfg.GOOS == "plan9" + +// DUFFZERO consists of repeated blocks of 4 MOVUPSs + LEAQ, +// See runtime/mkduff.go. +const ( + dzBlocks = 16 // number of MOV/ADD blocks + dzBlockLen = 4 // number of clears per block + dzBlockSize = 23 // size of instructions in a single block + dzMovSize = 5 // size of single MOV instruction w/ offset + dzLeaqSize = 4 // size of single LEAQ instruction + dzClearStep = 16 // number of bytes cleared by each MOV instruction + + dzClearLen = dzClearStep * dzBlockLen // bytes cleared by one block + dzSize = dzBlocks * dzBlockSize +) + +// dzOff returns the offset for a jump into DUFFZERO. +// b is the number of bytes to zero. +func dzOff(b int64) int64 { + off := int64(dzSize) + off -= b / dzClearLen * dzBlockSize + tailLen := b % dzClearLen + if tailLen >= dzClearStep { + off -= dzLeaqSize + dzMovSize*(tailLen/dzClearStep) + } + return off +} + +// duffzeroDI returns the pre-adjustment to DI for a call to DUFFZERO. +// b is the number of bytes to zero. +func dzDI(b int64) int64 { + tailLen := b % dzClearLen + if tailLen < dzClearStep { + return 0 + } + tailSteps := tailLen / dzClearStep + return -dzClearStep * (dzBlockLen - tailSteps) +} + +func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog { + const ( + r13 = 1 << iota // if R13 is already zeroed. + ) + + if cnt == 0 { + return p + } + + if cnt == 8 { + p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_SP, off) + } else if !isPlan9 && cnt <= int64(8*types.RegSize) { + for i := int64(0); i < cnt/16; i++ { + p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16) + } + + if cnt%16 != 0 { + p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16)) + } + } else if !isPlan9 && (cnt <= int64(128*types.RegSize)) { + // Save DI to r12. With the amd64 Go register abi, DI can contain + // an incoming parameter, whereas R12 is always scratch. + p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_DI, 0, obj.TYPE_REG, x86.REG_R12, 0) + // Emit duffzero call + p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0) + p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt)) + p.To.Sym = ir.Syms.Duffzero + if cnt%16 != 0 { + p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8)) + } + // Restore DI from r12 + p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_R12, 0, obj.TYPE_REG, x86.REG_DI, 0) + + } else { + // When the register ABI is in effect, at this point in the + // prolog we may have live values in all of RAX,RDI,RCX. Save + // them off to registers before the REPSTOSQ below, then + // restore. Note that R12 and R13 are always available as + // scratch regs; here we also use R15 (this is safe to do + // since there won't be any globals accessed in the prolog). + // See rewriteToUseGot() in obj6.go for more on r15 use. + + // Save rax/rdi/rcx + p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_DI, 0, obj.TYPE_REG, x86.REG_R12, 0) + p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_REG, x86.REG_R13, 0) + p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_CX, 0, obj.TYPE_REG, x86.REG_R15, 0) + + // Set up the REPSTOSQ and kick it off. + p = pp.Append(p, x86.AXORL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_REG, x86.REG_AX, 0) + p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0) + p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0) + p = pp.Append(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0) + p = pp.Append(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0) + + // Restore rax/rdi/rcx + p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_R12, 0, obj.TYPE_REG, x86.REG_DI, 0) + p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_R13, 0, obj.TYPE_REG, x86.REG_AX, 0) + p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_R15, 0, obj.TYPE_REG, x86.REG_CX, 0) + + // Record the fact that r13 is no longer zero. + *state &= ^uint32(r13) + } + + return p +} + +func ginsnop(pp *objw.Progs) *obj.Prog { + // This is a hardware nop (1-byte 0x90) instruction, + // even though we describe it as an explicit XCHGL here. + // Particularly, this does not zero the high 32 bits + // like typical *L opcodes. + // (gas assembles "xchg %eax,%eax" to 0x87 0xc0, which + // does zero the high 32 bits.) + p := pp.Prog(x86.AXCHGL) + p.From.Type = obj.TYPE_REG + p.From.Reg = x86.REG_AX + p.To.Type = obj.TYPE_REG + p.To.Reg = x86.REG_AX + return p +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/amd64/ssa.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/amd64/ssa.go new file mode 100644 index 0000000000000000000000000000000000000000..ab762c24f67034e5d43c17878affc9a055664ab8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/amd64/ssa.go @@ -0,0 +1,1444 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package amd64 + +import ( + "fmt" + "internal/buildcfg" + "math" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/logopt" + "cmd/compile/internal/objw" + "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/obj/x86" +) + +// ssaMarkMoves marks any MOVXconst ops that need to avoid clobbering flags. +func ssaMarkMoves(s *ssagen.State, b *ssa.Block) { + flive := b.FlagsLiveAtEnd + for _, c := range b.ControlValues() { + flive = c.Type.IsFlags() || flive + } + for i := len(b.Values) - 1; i >= 0; i-- { + v := b.Values[i] + if flive && (v.Op == ssa.OpAMD64MOVLconst || v.Op == ssa.OpAMD64MOVQconst) { + // The "mark" is any non-nil Aux value. + v.Aux = ssa.AuxMark + } + if v.Type.IsFlags() { + flive = false + } + for _, a := range v.Args { + if a.Type.IsFlags() { + flive = true + } + } + } +} + +// loadByType returns the load instruction of the given type. +func loadByType(t *types.Type) obj.As { + // Avoid partial register write + if !t.IsFloat() { + switch t.Size() { + case 1: + return x86.AMOVBLZX + case 2: + return x86.AMOVWLZX + } + } + // Otherwise, there's no difference between load and store opcodes. + return storeByType(t) +} + +// storeByType returns the store instruction of the given type. +func storeByType(t *types.Type) obj.As { + width := t.Size() + if t.IsFloat() { + switch width { + case 4: + return x86.AMOVSS + case 8: + return x86.AMOVSD + } + } else { + switch width { + case 1: + return x86.AMOVB + case 2: + return x86.AMOVW + case 4: + return x86.AMOVL + case 8: + return x86.AMOVQ + case 16: + return x86.AMOVUPS + } + } + panic(fmt.Sprintf("bad store type %v", t)) +} + +// moveByType returns the reg->reg move instruction of the given type. +func moveByType(t *types.Type) obj.As { + if t.IsFloat() { + // Moving the whole sse2 register is faster + // than moving just the correct low portion of it. + // There is no xmm->xmm move with 1 byte opcode, + // so use movups, which has 2 byte opcode. + return x86.AMOVUPS + } else { + switch t.Size() { + case 1: + // Avoids partial register write + return x86.AMOVL + case 2: + return x86.AMOVL + case 4: + return x86.AMOVL + case 8: + return x86.AMOVQ + case 16: + return x86.AMOVUPS // int128s are in SSE registers + default: + panic(fmt.Sprintf("bad int register width %d:%v", t.Size(), t)) + } + } +} + +// opregreg emits instructions for +// +// dest := dest(To) op src(From) +// +// and also returns the created obj.Prog so it +// may be further adjusted (offset, scale, etc). +func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog { + p := s.Prog(op) + p.From.Type = obj.TYPE_REG + p.To.Type = obj.TYPE_REG + p.To.Reg = dest + p.From.Reg = src + return p +} + +// memIdx fills out a as an indexed memory reference for v. +// It assumes that the base register and the index register +// are v.Args[0].Reg() and v.Args[1].Reg(), respectively. +// The caller must still use gc.AddAux/gc.AddAux2 to handle v.Aux as necessary. +func memIdx(a *obj.Addr, v *ssa.Value) { + r, i := v.Args[0].Reg(), v.Args[1].Reg() + a.Type = obj.TYPE_MEM + a.Scale = v.Op.Scale() + if a.Scale == 1 && i == x86.REG_SP { + r, i = i, r + } + a.Reg = r + a.Index = i +} + +// DUFFZERO consists of repeated blocks of 4 MOVUPSs + LEAQ, +// See runtime/mkduff.go. +func duffStart(size int64) int64 { + x, _ := duff(size) + return x +} +func duffAdj(size int64) int64 { + _, x := duff(size) + return x +} + +// duff returns the offset (from duffzero, in bytes) and pointer adjust (in bytes) +// required to use the duffzero mechanism for a block of the given size. +func duff(size int64) (int64, int64) { + if size < 32 || size > 1024 || size%dzClearStep != 0 { + panic("bad duffzero size") + } + steps := size / dzClearStep + blocks := steps / dzBlockLen + steps %= dzBlockLen + off := dzBlockSize * (dzBlocks - blocks) + var adj int64 + if steps != 0 { + off -= dzLeaqSize + off -= dzMovSize * steps + adj -= dzClearStep * (dzBlockLen - steps) + } + return off, adj +} + +func getgFromTLS(s *ssagen.State, r int16) { + // See the comments in cmd/internal/obj/x86/obj6.go + // near CanUse1InsnTLS for a detailed explanation of these instructions. + if x86.CanUse1InsnTLS(base.Ctxt) { + // MOVQ (TLS), r + p := s.Prog(x86.AMOVQ) + p.From.Type = obj.TYPE_MEM + p.From.Reg = x86.REG_TLS + p.To.Type = obj.TYPE_REG + p.To.Reg = r + } else { + // MOVQ TLS, r + // MOVQ (r)(TLS*1), r + p := s.Prog(x86.AMOVQ) + p.From.Type = obj.TYPE_REG + p.From.Reg = x86.REG_TLS + p.To.Type = obj.TYPE_REG + p.To.Reg = r + q := s.Prog(x86.AMOVQ) + q.From.Type = obj.TYPE_MEM + q.From.Reg = r + q.From.Index = x86.REG_TLS + q.From.Scale = 1 + q.To.Type = obj.TYPE_REG + q.To.Reg = r + } +} + +func ssaGenValue(s *ssagen.State, v *ssa.Value) { + switch v.Op { + case ssa.OpAMD64VFMADD231SD: + p := s.Prog(v.Op.Asm()) + p.From = obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[2].Reg()} + p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()} + p.AddRestSourceReg(v.Args[1].Reg()) + case ssa.OpAMD64ADDQ, ssa.OpAMD64ADDL: + r := v.Reg() + r1 := v.Args[0].Reg() + r2 := v.Args[1].Reg() + switch { + case r == r1: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r2 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + case r == r2: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r1 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + default: + var asm obj.As + if v.Op == ssa.OpAMD64ADDQ { + asm = x86.ALEAQ + } else { + asm = x86.ALEAL + } + p := s.Prog(asm) + p.From.Type = obj.TYPE_MEM + p.From.Reg = r1 + p.From.Scale = 1 + p.From.Index = r2 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + } + // 2-address opcode arithmetic + case ssa.OpAMD64SUBQ, ssa.OpAMD64SUBL, + ssa.OpAMD64MULQ, ssa.OpAMD64MULL, + ssa.OpAMD64ANDQ, ssa.OpAMD64ANDL, + ssa.OpAMD64ORQ, ssa.OpAMD64ORL, + ssa.OpAMD64XORQ, ssa.OpAMD64XORL, + ssa.OpAMD64SHLQ, ssa.OpAMD64SHLL, + ssa.OpAMD64SHRQ, ssa.OpAMD64SHRL, ssa.OpAMD64SHRW, ssa.OpAMD64SHRB, + ssa.OpAMD64SARQ, ssa.OpAMD64SARL, ssa.OpAMD64SARW, ssa.OpAMD64SARB, + ssa.OpAMD64ROLQ, ssa.OpAMD64ROLL, ssa.OpAMD64ROLW, ssa.OpAMD64ROLB, + ssa.OpAMD64RORQ, ssa.OpAMD64RORL, ssa.OpAMD64RORW, ssa.OpAMD64RORB, + ssa.OpAMD64ADDSS, ssa.OpAMD64ADDSD, ssa.OpAMD64SUBSS, ssa.OpAMD64SUBSD, + ssa.OpAMD64MULSS, ssa.OpAMD64MULSD, ssa.OpAMD64DIVSS, ssa.OpAMD64DIVSD, + ssa.OpAMD64MINSS, ssa.OpAMD64MINSD, + ssa.OpAMD64POR, ssa.OpAMD64PXOR, + ssa.OpAMD64BTSL, ssa.OpAMD64BTSQ, + ssa.OpAMD64BTCL, ssa.OpAMD64BTCQ, + ssa.OpAMD64BTRL, ssa.OpAMD64BTRQ: + opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg()) + + case ssa.OpAMD64SHRDQ, ssa.OpAMD64SHLDQ: + p := s.Prog(v.Op.Asm()) + lo, hi, bits := v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg() + p.From.Type = obj.TYPE_REG + p.From.Reg = bits + p.To.Type = obj.TYPE_REG + p.To.Reg = lo + p.AddRestSourceReg(hi) + + case ssa.OpAMD64BLSIQ, ssa.OpAMD64BLSIL, + ssa.OpAMD64BLSMSKQ, ssa.OpAMD64BLSMSKL, + ssa.OpAMD64BLSRQ, ssa.OpAMD64BLSRL: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + switch v.Op { + case ssa.OpAMD64BLSRQ, ssa.OpAMD64BLSRL: + p.To.Reg = v.Reg0() + default: + p.To.Reg = v.Reg() + } + + case ssa.OpAMD64ANDNQ, ssa.OpAMD64ANDNL: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + p.AddRestSourceReg(v.Args[1].Reg()) + + case ssa.OpAMD64SARXL, ssa.OpAMD64SARXQ, + ssa.OpAMD64SHLXL, ssa.OpAMD64SHLXQ, + ssa.OpAMD64SHRXL, ssa.OpAMD64SHRXQ: + p := opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg()) + p.AddRestSourceReg(v.Args[0].Reg()) + + case ssa.OpAMD64SHLXLload, ssa.OpAMD64SHLXQload, + ssa.OpAMD64SHRXLload, ssa.OpAMD64SHRXQload, + ssa.OpAMD64SARXLload, ssa.OpAMD64SARXQload: + p := opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg()) + m := obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[0].Reg()} + ssagen.AddAux(&m, v) + p.AddRestSource(m) + + case ssa.OpAMD64SHLXLloadidx1, ssa.OpAMD64SHLXLloadidx4, ssa.OpAMD64SHLXLloadidx8, + ssa.OpAMD64SHRXLloadidx1, ssa.OpAMD64SHRXLloadidx4, ssa.OpAMD64SHRXLloadidx8, + ssa.OpAMD64SARXLloadidx1, ssa.OpAMD64SARXLloadidx4, ssa.OpAMD64SARXLloadidx8, + ssa.OpAMD64SHLXQloadidx1, ssa.OpAMD64SHLXQloadidx8, + ssa.OpAMD64SHRXQloadidx1, ssa.OpAMD64SHRXQloadidx8, + ssa.OpAMD64SARXQloadidx1, ssa.OpAMD64SARXQloadidx8: + p := opregreg(s, v.Op.Asm(), v.Reg(), v.Args[2].Reg()) + m := obj.Addr{Type: obj.TYPE_MEM} + memIdx(&m, v) + ssagen.AddAux(&m, v) + p.AddRestSource(m) + + case ssa.OpAMD64DIVQU, ssa.OpAMD64DIVLU, ssa.OpAMD64DIVWU: + // Arg[0] (the dividend) is in AX. + // Arg[1] (the divisor) can be in any other register. + // Result[0] (the quotient) is in AX. + // Result[1] (the remainder) is in DX. + r := v.Args[1].Reg() + + // Zero extend dividend. + opregreg(s, x86.AXORL, x86.REG_DX, x86.REG_DX) + + // Issue divide. + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r + + case ssa.OpAMD64DIVQ, ssa.OpAMD64DIVL, ssa.OpAMD64DIVW: + // Arg[0] (the dividend) is in AX. + // Arg[1] (the divisor) can be in any other register. + // Result[0] (the quotient) is in AX. + // Result[1] (the remainder) is in DX. + r := v.Args[1].Reg() + + var opCMP, opNEG, opSXD obj.As + switch v.Op { + case ssa.OpAMD64DIVQ: + opCMP, opNEG, opSXD = x86.ACMPQ, x86.ANEGQ, x86.ACQO + case ssa.OpAMD64DIVL: + opCMP, opNEG, opSXD = x86.ACMPL, x86.ANEGL, x86.ACDQ + case ssa.OpAMD64DIVW: + opCMP, opNEG, opSXD = x86.ACMPW, x86.ANEGW, x86.ACWD + } + + // CPU faults upon signed overflow, which occurs when the most + // negative int is divided by -1. Handle divide by -1 as a special case. + var j1, j2 *obj.Prog + if ssa.DivisionNeedsFixUp(v) { + c := s.Prog(opCMP) + c.From.Type = obj.TYPE_REG + c.From.Reg = r + c.To.Type = obj.TYPE_CONST + c.To.Offset = -1 + + // Divisor is not -1, proceed with normal division. + j1 = s.Prog(x86.AJNE) + j1.To.Type = obj.TYPE_BRANCH + + // Divisor is -1, manually compute quotient and remainder via fixup code. + // n / -1 = -n + n1 := s.Prog(opNEG) + n1.To.Type = obj.TYPE_REG + n1.To.Reg = x86.REG_AX + + // n % -1 == 0 + opregreg(s, x86.AXORL, x86.REG_DX, x86.REG_DX) + + // TODO(khr): issue only the -1 fixup code we need. + // For instance, if only the quotient is used, no point in zeroing the remainder. + + // Skip over normal division. + j2 = s.Prog(obj.AJMP) + j2.To.Type = obj.TYPE_BRANCH + } + + // Sign extend dividend and perform division. + p := s.Prog(opSXD) + if j1 != nil { + j1.To.SetTarget(p) + } + p = s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r + + if j2 != nil { + j2.To.SetTarget(s.Pc()) + } + + case ssa.OpAMD64HMULQ, ssa.OpAMD64HMULL, ssa.OpAMD64HMULQU, ssa.OpAMD64HMULLU: + // the frontend rewrites constant division by 8/16/32 bit integers into + // HMUL by a constant + // SSA rewrites generate the 64 bit versions + + // Arg[0] is already in AX as it's the only register we allow + // and DX is the only output we care about (the high bits) + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + + // IMULB puts the high portion in AH instead of DL, + // so move it to DL for consistency + if v.Type.Size() == 1 { + m := s.Prog(x86.AMOVB) + m.From.Type = obj.TYPE_REG + m.From.Reg = x86.REG_AH + m.To.Type = obj.TYPE_REG + m.To.Reg = x86.REG_DX + } + + case ssa.OpAMD64MULQU, ssa.OpAMD64MULLU: + // Arg[0] is already in AX as it's the only register we allow + // results lo in AX + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + + case ssa.OpAMD64MULQU2: + // Arg[0] is already in AX as it's the only register we allow + // results hi in DX, lo in AX + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + + case ssa.OpAMD64DIVQU2: + // Arg[0], Arg[1] are already in Dx, AX, as they're the only registers we allow + // results q in AX, r in DX + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[2].Reg() + + case ssa.OpAMD64AVGQU: + // compute (x+y)/2 unsigned. + // Do a 64-bit add, the overflow goes into the carry. + // Shift right once and pull the carry back into the 63rd bit. + p := s.Prog(x86.AADDQ) + p.From.Type = obj.TYPE_REG + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + p.From.Reg = v.Args[1].Reg() + p = s.Prog(x86.ARCRQ) + p.From.Type = obj.TYPE_CONST + p.From.Offset = 1 + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.OpAMD64ADDQcarry, ssa.OpAMD64ADCQ: + r := v.Reg0() + r0 := v.Args[0].Reg() + r1 := v.Args[1].Reg() + switch r { + case r0: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r1 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + case r1: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r0 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + default: + v.Fatalf("output not in same register as an input %s", v.LongString()) + } + + case ssa.OpAMD64SUBQborrow, ssa.OpAMD64SBBQ: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + + case ssa.OpAMD64ADDQconstcarry, ssa.OpAMD64ADCQconst, ssa.OpAMD64SUBQconstborrow, ssa.OpAMD64SBBQconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + + case ssa.OpAMD64ADDQconst, ssa.OpAMD64ADDLconst: + r := v.Reg() + a := v.Args[0].Reg() + if r == a { + switch v.AuxInt { + case 1: + var asm obj.As + // Software optimization manual recommends add $1,reg. + // But inc/dec is 1 byte smaller. ICC always uses inc + // Clang/GCC choose depending on flags, but prefer add. + // Experiments show that inc/dec is both a little faster + // and make a binary a little smaller. + if v.Op == ssa.OpAMD64ADDQconst { + asm = x86.AINCQ + } else { + asm = x86.AINCL + } + p := s.Prog(asm) + p.To.Type = obj.TYPE_REG + p.To.Reg = r + return + case -1: + var asm obj.As + if v.Op == ssa.OpAMD64ADDQconst { + asm = x86.ADECQ + } else { + asm = x86.ADECL + } + p := s.Prog(asm) + p.To.Type = obj.TYPE_REG + p.To.Reg = r + return + case 0x80: + // 'SUBQ $-0x80, r' is shorter to encode than + // and functionally equivalent to 'ADDQ $0x80, r'. + asm := x86.ASUBL + if v.Op == ssa.OpAMD64ADDQconst { + asm = x86.ASUBQ + } + p := s.Prog(asm) + p.From.Type = obj.TYPE_CONST + p.From.Offset = -0x80 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + return + + } + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = r + return + } + var asm obj.As + if v.Op == ssa.OpAMD64ADDQconst { + asm = x86.ALEAQ + } else { + asm = x86.ALEAL + } + p := s.Prog(asm) + p.From.Type = obj.TYPE_MEM + p.From.Reg = a + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = r + + case ssa.OpAMD64CMOVQEQ, ssa.OpAMD64CMOVLEQ, ssa.OpAMD64CMOVWEQ, + ssa.OpAMD64CMOVQLT, ssa.OpAMD64CMOVLLT, ssa.OpAMD64CMOVWLT, + ssa.OpAMD64CMOVQNE, ssa.OpAMD64CMOVLNE, ssa.OpAMD64CMOVWNE, + ssa.OpAMD64CMOVQGT, ssa.OpAMD64CMOVLGT, ssa.OpAMD64CMOVWGT, + ssa.OpAMD64CMOVQLE, ssa.OpAMD64CMOVLLE, ssa.OpAMD64CMOVWLE, + ssa.OpAMD64CMOVQGE, ssa.OpAMD64CMOVLGE, ssa.OpAMD64CMOVWGE, + ssa.OpAMD64CMOVQHI, ssa.OpAMD64CMOVLHI, ssa.OpAMD64CMOVWHI, + ssa.OpAMD64CMOVQLS, ssa.OpAMD64CMOVLLS, ssa.OpAMD64CMOVWLS, + ssa.OpAMD64CMOVQCC, ssa.OpAMD64CMOVLCC, ssa.OpAMD64CMOVWCC, + ssa.OpAMD64CMOVQCS, ssa.OpAMD64CMOVLCS, ssa.OpAMD64CMOVWCS, + ssa.OpAMD64CMOVQGTF, ssa.OpAMD64CMOVLGTF, ssa.OpAMD64CMOVWGTF, + ssa.OpAMD64CMOVQGEF, ssa.OpAMD64CMOVLGEF, ssa.OpAMD64CMOVWGEF: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.OpAMD64CMOVQNEF, ssa.OpAMD64CMOVLNEF, ssa.OpAMD64CMOVWNEF: + // Flag condition: ^ZERO || PARITY + // Generate: + // CMOV*NE SRC,DST + // CMOV*PS SRC,DST + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + var q *obj.Prog + if v.Op == ssa.OpAMD64CMOVQNEF { + q = s.Prog(x86.ACMOVQPS) + } else if v.Op == ssa.OpAMD64CMOVLNEF { + q = s.Prog(x86.ACMOVLPS) + } else { + q = s.Prog(x86.ACMOVWPS) + } + q.From.Type = obj.TYPE_REG + q.From.Reg = v.Args[1].Reg() + q.To.Type = obj.TYPE_REG + q.To.Reg = v.Reg() + + case ssa.OpAMD64CMOVQEQF, ssa.OpAMD64CMOVLEQF, ssa.OpAMD64CMOVWEQF: + // Flag condition: ZERO && !PARITY + // Generate: + // MOV SRC,TMP + // CMOV*NE DST,TMP + // CMOV*PC TMP,DST + // + // TODO(rasky): we could generate: + // CMOV*NE DST,SRC + // CMOV*PC SRC,DST + // But this requires a way for regalloc to know that SRC might be + // clobbered by this instruction. + t := v.RegTmp() + opregreg(s, moveByType(v.Type), t, v.Args[1].Reg()) + + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = t + var q *obj.Prog + if v.Op == ssa.OpAMD64CMOVQEQF { + q = s.Prog(x86.ACMOVQPC) + } else if v.Op == ssa.OpAMD64CMOVLEQF { + q = s.Prog(x86.ACMOVLPC) + } else { + q = s.Prog(x86.ACMOVWPC) + } + q.From.Type = obj.TYPE_REG + q.From.Reg = t + q.To.Type = obj.TYPE_REG + q.To.Reg = v.Reg() + + case ssa.OpAMD64MULQconst, ssa.OpAMD64MULLconst: + r := v.Reg() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = r + p.AddRestSourceReg(v.Args[0].Reg()) + + case ssa.OpAMD64ANDQconst: + asm := v.Op.Asm() + // If the constant is positive and fits into 32 bits, use ANDL. + // This saves a few bytes of encoding. + if 0 <= v.AuxInt && v.AuxInt <= (1<<32-1) { + asm = x86.AANDL + } + p := s.Prog(asm) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.OpAMD64SUBQconst, ssa.OpAMD64SUBLconst, + ssa.OpAMD64ANDLconst, + ssa.OpAMD64ORQconst, ssa.OpAMD64ORLconst, + ssa.OpAMD64XORQconst, ssa.OpAMD64XORLconst, + ssa.OpAMD64SHLQconst, ssa.OpAMD64SHLLconst, + ssa.OpAMD64SHRQconst, ssa.OpAMD64SHRLconst, ssa.OpAMD64SHRWconst, ssa.OpAMD64SHRBconst, + ssa.OpAMD64SARQconst, ssa.OpAMD64SARLconst, ssa.OpAMD64SARWconst, ssa.OpAMD64SARBconst, + ssa.OpAMD64ROLQconst, ssa.OpAMD64ROLLconst, ssa.OpAMD64ROLWconst, ssa.OpAMD64ROLBconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpAMD64SBBQcarrymask, ssa.OpAMD64SBBLcarrymask: + r := v.Reg() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r + p.To.Type = obj.TYPE_REG + p.To.Reg = r + case ssa.OpAMD64LEAQ1, ssa.OpAMD64LEAQ2, ssa.OpAMD64LEAQ4, ssa.OpAMD64LEAQ8, + ssa.OpAMD64LEAL1, ssa.OpAMD64LEAL2, ssa.OpAMD64LEAL4, ssa.OpAMD64LEAL8, + ssa.OpAMD64LEAW1, ssa.OpAMD64LEAW2, ssa.OpAMD64LEAW4, ssa.OpAMD64LEAW8: + p := s.Prog(v.Op.Asm()) + memIdx(&p.From, v) + o := v.Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = o + if v.AuxInt != 0 && v.Aux == nil { + // Emit an additional LEA to add the displacement instead of creating a slow 3 operand LEA. + switch v.Op { + case ssa.OpAMD64LEAQ1, ssa.OpAMD64LEAQ2, ssa.OpAMD64LEAQ4, ssa.OpAMD64LEAQ8: + p = s.Prog(x86.ALEAQ) + case ssa.OpAMD64LEAL1, ssa.OpAMD64LEAL2, ssa.OpAMD64LEAL4, ssa.OpAMD64LEAL8: + p = s.Prog(x86.ALEAL) + case ssa.OpAMD64LEAW1, ssa.OpAMD64LEAW2, ssa.OpAMD64LEAW4, ssa.OpAMD64LEAW8: + p = s.Prog(x86.ALEAW) + } + p.From.Type = obj.TYPE_MEM + p.From.Reg = o + p.To.Type = obj.TYPE_REG + p.To.Reg = o + } + ssagen.AddAux(&p.From, v) + case ssa.OpAMD64LEAQ, ssa.OpAMD64LEAL, ssa.OpAMD64LEAW: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpAMD64CMPQ, ssa.OpAMD64CMPL, ssa.OpAMD64CMPW, ssa.OpAMD64CMPB, + ssa.OpAMD64TESTQ, ssa.OpAMD64TESTL, ssa.OpAMD64TESTW, ssa.OpAMD64TESTB, + ssa.OpAMD64BTL, ssa.OpAMD64BTQ: + opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg()) + case ssa.OpAMD64UCOMISS, ssa.OpAMD64UCOMISD: + // Go assembler has swapped operands for UCOMISx relative to CMP, + // must account for that right here. + opregreg(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg()) + case ssa.OpAMD64CMPQconst, ssa.OpAMD64CMPLconst, ssa.OpAMD64CMPWconst, ssa.OpAMD64CMPBconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_CONST + p.To.Offset = v.AuxInt + case ssa.OpAMD64BTLconst, ssa.OpAMD64BTQconst, + ssa.OpAMD64TESTQconst, ssa.OpAMD64TESTLconst, ssa.OpAMD64TESTWconst, ssa.OpAMD64TESTBconst, + ssa.OpAMD64BTSQconst, + ssa.OpAMD64BTCQconst, + ssa.OpAMD64BTRQconst: + op := v.Op + if op == ssa.OpAMD64BTQconst && v.AuxInt < 32 { + // Emit 32-bit version because it's shorter + op = ssa.OpAMD64BTLconst + } + p := s.Prog(op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Args[0].Reg() + case ssa.OpAMD64CMPQload, ssa.OpAMD64CMPLload, ssa.OpAMD64CMPWload, ssa.OpAMD64CMPBload: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Args[1].Reg() + case ssa.OpAMD64CMPQconstload, ssa.OpAMD64CMPLconstload, ssa.OpAMD64CMPWconstload, ssa.OpAMD64CMPBconstload: + sc := v.AuxValAndOff() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux2(&p.From, v, sc.Off64()) + p.To.Type = obj.TYPE_CONST + p.To.Offset = sc.Val64() + case ssa.OpAMD64CMPQloadidx8, ssa.OpAMD64CMPQloadidx1, ssa.OpAMD64CMPLloadidx4, ssa.OpAMD64CMPLloadidx1, ssa.OpAMD64CMPWloadidx2, ssa.OpAMD64CMPWloadidx1, ssa.OpAMD64CMPBloadidx1: + p := s.Prog(v.Op.Asm()) + memIdx(&p.From, v) + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Args[2].Reg() + case ssa.OpAMD64CMPQconstloadidx8, ssa.OpAMD64CMPQconstloadidx1, ssa.OpAMD64CMPLconstloadidx4, ssa.OpAMD64CMPLconstloadidx1, ssa.OpAMD64CMPWconstloadidx2, ssa.OpAMD64CMPWconstloadidx1, ssa.OpAMD64CMPBconstloadidx1: + sc := v.AuxValAndOff() + p := s.Prog(v.Op.Asm()) + memIdx(&p.From, v) + ssagen.AddAux2(&p.From, v, sc.Off64()) + p.To.Type = obj.TYPE_CONST + p.To.Offset = sc.Val64() + case ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst: + x := v.Reg() + + // If flags aren't live (indicated by v.Aux == nil), + // then we can rewrite MOV $0, AX into XOR AX, AX. + if v.AuxInt == 0 && v.Aux == nil { + opregreg(s, x86.AXORL, x, x) + break + } + + asm := v.Op.Asm() + // Use MOVL to move a small constant into a register + // when the constant is positive and fits into 32 bits. + if 0 <= v.AuxInt && v.AuxInt <= (1<<32-1) { + // The upper 32bit are zeroed automatically when using MOVL. + asm = x86.AMOVL + } + p := s.Prog(asm) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = x + case ssa.OpAMD64MOVSSconst, ssa.OpAMD64MOVSDconst: + x := v.Reg() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_FCONST + p.From.Val = math.Float64frombits(uint64(v.AuxInt)) + p.To.Type = obj.TYPE_REG + p.To.Reg = x + case ssa.OpAMD64MOVQload, ssa.OpAMD64MOVLload, ssa.OpAMD64MOVWload, ssa.OpAMD64MOVBload, ssa.OpAMD64MOVOload, + ssa.OpAMD64MOVSSload, ssa.OpAMD64MOVSDload, ssa.OpAMD64MOVBQSXload, ssa.OpAMD64MOVWQSXload, ssa.OpAMD64MOVLQSXload, + ssa.OpAMD64MOVBEQload, ssa.OpAMD64MOVBELload: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpAMD64MOVBloadidx1, ssa.OpAMD64MOVWloadidx1, ssa.OpAMD64MOVLloadidx1, ssa.OpAMD64MOVQloadidx1, ssa.OpAMD64MOVSSloadidx1, ssa.OpAMD64MOVSDloadidx1, + ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8, ssa.OpAMD64MOVLloadidx8, ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4, ssa.OpAMD64MOVWloadidx2, + ssa.OpAMD64MOVBELloadidx1, ssa.OpAMD64MOVBELloadidx4, ssa.OpAMD64MOVBELloadidx8, ssa.OpAMD64MOVBEQloadidx1, ssa.OpAMD64MOVBEQloadidx8: + p := s.Prog(v.Op.Asm()) + memIdx(&p.From, v) + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore, ssa.OpAMD64MOVOstore, + ssa.OpAMD64ADDQmodify, ssa.OpAMD64SUBQmodify, ssa.OpAMD64ANDQmodify, ssa.OpAMD64ORQmodify, ssa.OpAMD64XORQmodify, + ssa.OpAMD64ADDLmodify, ssa.OpAMD64SUBLmodify, ssa.OpAMD64ANDLmodify, ssa.OpAMD64ORLmodify, ssa.OpAMD64XORLmodify, + ssa.OpAMD64MOVBEQstore, ssa.OpAMD64MOVBELstore, ssa.OpAMD64MOVBEWstore: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + case ssa.OpAMD64MOVBstoreidx1, ssa.OpAMD64MOVWstoreidx1, ssa.OpAMD64MOVLstoreidx1, ssa.OpAMD64MOVQstoreidx1, ssa.OpAMD64MOVSSstoreidx1, ssa.OpAMD64MOVSDstoreidx1, + ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8, ssa.OpAMD64MOVLstoreidx8, ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4, ssa.OpAMD64MOVWstoreidx2, + ssa.OpAMD64ADDLmodifyidx1, ssa.OpAMD64ADDLmodifyidx4, ssa.OpAMD64ADDLmodifyidx8, ssa.OpAMD64ADDQmodifyidx1, ssa.OpAMD64ADDQmodifyidx8, + ssa.OpAMD64SUBLmodifyidx1, ssa.OpAMD64SUBLmodifyidx4, ssa.OpAMD64SUBLmodifyidx8, ssa.OpAMD64SUBQmodifyidx1, ssa.OpAMD64SUBQmodifyidx8, + ssa.OpAMD64ANDLmodifyidx1, ssa.OpAMD64ANDLmodifyidx4, ssa.OpAMD64ANDLmodifyidx8, ssa.OpAMD64ANDQmodifyidx1, ssa.OpAMD64ANDQmodifyidx8, + ssa.OpAMD64ORLmodifyidx1, ssa.OpAMD64ORLmodifyidx4, ssa.OpAMD64ORLmodifyidx8, ssa.OpAMD64ORQmodifyidx1, ssa.OpAMD64ORQmodifyidx8, + ssa.OpAMD64XORLmodifyidx1, ssa.OpAMD64XORLmodifyidx4, ssa.OpAMD64XORLmodifyidx8, ssa.OpAMD64XORQmodifyidx1, ssa.OpAMD64XORQmodifyidx8, + ssa.OpAMD64MOVBEWstoreidx1, ssa.OpAMD64MOVBEWstoreidx2, ssa.OpAMD64MOVBELstoreidx1, ssa.OpAMD64MOVBELstoreidx4, ssa.OpAMD64MOVBELstoreidx8, ssa.OpAMD64MOVBEQstoreidx1, ssa.OpAMD64MOVBEQstoreidx8: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[2].Reg() + memIdx(&p.To, v) + ssagen.AddAux(&p.To, v) + case ssa.OpAMD64ADDQconstmodify, ssa.OpAMD64ADDLconstmodify: + sc := v.AuxValAndOff() + off := sc.Off64() + val := sc.Val() + if val == 1 || val == -1 { + var asm obj.As + if v.Op == ssa.OpAMD64ADDQconstmodify { + if val == 1 { + asm = x86.AINCQ + } else { + asm = x86.ADECQ + } + } else { + if val == 1 { + asm = x86.AINCL + } else { + asm = x86.ADECL + } + } + p := s.Prog(asm) + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux2(&p.To, v, off) + break + } + fallthrough + case ssa.OpAMD64ANDQconstmodify, ssa.OpAMD64ANDLconstmodify, ssa.OpAMD64ORQconstmodify, ssa.OpAMD64ORLconstmodify, + ssa.OpAMD64XORQconstmodify, ssa.OpAMD64XORLconstmodify, + ssa.OpAMD64BTSQconstmodify, ssa.OpAMD64BTRQconstmodify, ssa.OpAMD64BTCQconstmodify: + sc := v.AuxValAndOff() + off := sc.Off64() + val := sc.Val64() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = val + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux2(&p.To, v, off) + + case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + sc := v.AuxValAndOff() + p.From.Offset = sc.Val64() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux2(&p.To, v, sc.Off64()) + case ssa.OpAMD64MOVOstoreconst: + sc := v.AuxValAndOff() + if sc.Val() != 0 { + v.Fatalf("MOVO for non zero constants not implemented: %s", v.LongString()) + } + + if s.ABI != obj.ABIInternal { + // zero X15 manually + opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15) + } + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = x86.REG_X15 + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux2(&p.To, v, sc.Off64()) + + case ssa.OpAMD64MOVQstoreconstidx1, ssa.OpAMD64MOVQstoreconstidx8, ssa.OpAMD64MOVLstoreconstidx1, ssa.OpAMD64MOVLstoreconstidx4, ssa.OpAMD64MOVWstoreconstidx1, ssa.OpAMD64MOVWstoreconstidx2, ssa.OpAMD64MOVBstoreconstidx1, + ssa.OpAMD64ADDLconstmodifyidx1, ssa.OpAMD64ADDLconstmodifyidx4, ssa.OpAMD64ADDLconstmodifyidx8, ssa.OpAMD64ADDQconstmodifyidx1, ssa.OpAMD64ADDQconstmodifyidx8, + ssa.OpAMD64ANDLconstmodifyidx1, ssa.OpAMD64ANDLconstmodifyidx4, ssa.OpAMD64ANDLconstmodifyidx8, ssa.OpAMD64ANDQconstmodifyidx1, ssa.OpAMD64ANDQconstmodifyidx8, + ssa.OpAMD64ORLconstmodifyidx1, ssa.OpAMD64ORLconstmodifyidx4, ssa.OpAMD64ORLconstmodifyidx8, ssa.OpAMD64ORQconstmodifyidx1, ssa.OpAMD64ORQconstmodifyidx8, + ssa.OpAMD64XORLconstmodifyidx1, ssa.OpAMD64XORLconstmodifyidx4, ssa.OpAMD64XORLconstmodifyidx8, ssa.OpAMD64XORQconstmodifyidx1, ssa.OpAMD64XORQconstmodifyidx8: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + sc := v.AuxValAndOff() + p.From.Offset = sc.Val64() + switch { + case p.As == x86.AADDQ && p.From.Offset == 1: + p.As = x86.AINCQ + p.From.Type = obj.TYPE_NONE + case p.As == x86.AADDQ && p.From.Offset == -1: + p.As = x86.ADECQ + p.From.Type = obj.TYPE_NONE + case p.As == x86.AADDL && p.From.Offset == 1: + p.As = x86.AINCL + p.From.Type = obj.TYPE_NONE + case p.As == x86.AADDL && p.From.Offset == -1: + p.As = x86.ADECL + p.From.Type = obj.TYPE_NONE + } + memIdx(&p.To, v) + ssagen.AddAux2(&p.To, v, sc.Off64()) + case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX, + ssa.OpAMD64CVTTSS2SL, ssa.OpAMD64CVTTSD2SL, ssa.OpAMD64CVTTSS2SQ, ssa.OpAMD64CVTTSD2SQ, + ssa.OpAMD64CVTSS2SD, ssa.OpAMD64CVTSD2SS: + opregreg(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg()) + case ssa.OpAMD64CVTSL2SD, ssa.OpAMD64CVTSQ2SD, ssa.OpAMD64CVTSQ2SS, ssa.OpAMD64CVTSL2SS: + r := v.Reg() + // Break false dependency on destination register. + opregreg(s, x86.AXORPS, r, r) + opregreg(s, v.Op.Asm(), r, v.Args[0].Reg()) + case ssa.OpAMD64MOVQi2f, ssa.OpAMD64MOVQf2i, ssa.OpAMD64MOVLi2f, ssa.OpAMD64MOVLf2i: + var p *obj.Prog + switch v.Op { + case ssa.OpAMD64MOVQi2f, ssa.OpAMD64MOVQf2i: + p = s.Prog(x86.AMOVQ) + case ssa.OpAMD64MOVLi2f, ssa.OpAMD64MOVLf2i: + p = s.Prog(x86.AMOVL) + } + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpAMD64ADDQload, ssa.OpAMD64ADDLload, ssa.OpAMD64SUBQload, ssa.OpAMD64SUBLload, + ssa.OpAMD64ANDQload, ssa.OpAMD64ANDLload, ssa.OpAMD64ORQload, ssa.OpAMD64ORLload, + ssa.OpAMD64XORQload, ssa.OpAMD64XORLload, ssa.OpAMD64ADDSDload, ssa.OpAMD64ADDSSload, + ssa.OpAMD64SUBSDload, ssa.OpAMD64SUBSSload, ssa.OpAMD64MULSDload, ssa.OpAMD64MULSSload, + ssa.OpAMD64DIVSDload, ssa.OpAMD64DIVSSload: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[1].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpAMD64ADDLloadidx1, ssa.OpAMD64ADDLloadidx4, ssa.OpAMD64ADDLloadidx8, ssa.OpAMD64ADDQloadidx1, ssa.OpAMD64ADDQloadidx8, + ssa.OpAMD64SUBLloadidx1, ssa.OpAMD64SUBLloadidx4, ssa.OpAMD64SUBLloadidx8, ssa.OpAMD64SUBQloadidx1, ssa.OpAMD64SUBQloadidx8, + ssa.OpAMD64ANDLloadidx1, ssa.OpAMD64ANDLloadidx4, ssa.OpAMD64ANDLloadidx8, ssa.OpAMD64ANDQloadidx1, ssa.OpAMD64ANDQloadidx8, + ssa.OpAMD64ORLloadidx1, ssa.OpAMD64ORLloadidx4, ssa.OpAMD64ORLloadidx8, ssa.OpAMD64ORQloadidx1, ssa.OpAMD64ORQloadidx8, + ssa.OpAMD64XORLloadidx1, ssa.OpAMD64XORLloadidx4, ssa.OpAMD64XORLloadidx8, ssa.OpAMD64XORQloadidx1, ssa.OpAMD64XORQloadidx8, + ssa.OpAMD64ADDSSloadidx1, ssa.OpAMD64ADDSSloadidx4, ssa.OpAMD64ADDSDloadidx1, ssa.OpAMD64ADDSDloadidx8, + ssa.OpAMD64SUBSSloadidx1, ssa.OpAMD64SUBSSloadidx4, ssa.OpAMD64SUBSDloadidx1, ssa.OpAMD64SUBSDloadidx8, + ssa.OpAMD64MULSSloadidx1, ssa.OpAMD64MULSSloadidx4, ssa.OpAMD64MULSDloadidx1, ssa.OpAMD64MULSDloadidx8, + ssa.OpAMD64DIVSSloadidx1, ssa.OpAMD64DIVSSloadidx4, ssa.OpAMD64DIVSDloadidx1, ssa.OpAMD64DIVSDloadidx8: + p := s.Prog(v.Op.Asm()) + + r, i := v.Args[1].Reg(), v.Args[2].Reg() + p.From.Type = obj.TYPE_MEM + p.From.Scale = v.Op.Scale() + if p.From.Scale == 1 && i == x86.REG_SP { + r, i = i, r + } + p.From.Reg = r + p.From.Index = i + + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpAMD64DUFFZERO: + if s.ABI != obj.ABIInternal { + // zero X15 manually + opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15) + } + off := duffStart(v.AuxInt) + adj := duffAdj(v.AuxInt) + var p *obj.Prog + if adj != 0 { + p = s.Prog(x86.ALEAQ) + p.From.Type = obj.TYPE_MEM + p.From.Offset = adj + p.From.Reg = x86.REG_DI + p.To.Type = obj.TYPE_REG + p.To.Reg = x86.REG_DI + } + p = s.Prog(obj.ADUFFZERO) + p.To.Type = obj.TYPE_ADDR + p.To.Sym = ir.Syms.Duffzero + p.To.Offset = off + case ssa.OpAMD64DUFFCOPY: + p := s.Prog(obj.ADUFFCOPY) + p.To.Type = obj.TYPE_ADDR + p.To.Sym = ir.Syms.Duffcopy + if v.AuxInt%16 != 0 { + v.Fatalf("bad DUFFCOPY AuxInt %v", v.AuxInt) + } + p.To.Offset = 14 * (64 - v.AuxInt/16) + // 14 and 64 are magic constants. 14 is the number of bytes to encode: + // MOVUPS (SI), X0 + // ADDQ $16, SI + // MOVUPS X0, (DI) + // ADDQ $16, DI + // and 64 is the number of such blocks. See src/runtime/duff_amd64.s:duffcopy. + + case ssa.OpCopy: // TODO: use MOVQreg for reg->reg copies instead of OpCopy? + if v.Type.IsMemory() { + return + } + x := v.Args[0].Reg() + y := v.Reg() + if x != y { + opregreg(s, moveByType(v.Type), y, x) + } + case ssa.OpLoadReg: + if v.Type.IsFlags() { + v.Fatalf("load flags not implemented: %v", v.LongString()) + return + } + p := s.Prog(loadByType(v.Type)) + ssagen.AddrAuto(&p.From, v.Args[0]) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.OpStoreReg: + if v.Type.IsFlags() { + v.Fatalf("store flags not implemented: %v", v.LongString()) + return + } + p := s.Prog(storeByType(v.Type)) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + ssagen.AddrAuto(&p.To, v) + case ssa.OpAMD64LoweredHasCPUFeature: + p := s.Prog(x86.AMOVBLZX) + p.From.Type = obj.TYPE_MEM + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpArgIntReg, ssa.OpArgFloatReg: + // The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill + // The loop only runs once. + for _, ap := range v.Block.Func.RegArgs { + // Pass the spill/unspill information along to the assembler, offset by size of return PC pushed on stack. + addr := ssagen.SpillSlotAddr(ap, x86.REG_SP, v.Block.Func.Config.PtrSize) + s.FuncInfo().AddSpill( + obj.RegSpill{Reg: ap.Reg, Addr: addr, Unspill: loadByType(ap.Type), Spill: storeByType(ap.Type)}) + } + v.Block.Func.RegArgs = nil + ssagen.CheckArgReg(v) + case ssa.OpAMD64LoweredGetClosurePtr: + // Closure pointer is DX. + ssagen.CheckLoweredGetClosurePtr(v) + case ssa.OpAMD64LoweredGetG: + if s.ABI == obj.ABIInternal { + v.Fatalf("LoweredGetG should not appear in ABIInternal") + } + r := v.Reg() + getgFromTLS(s, r) + case ssa.OpAMD64CALLstatic, ssa.OpAMD64CALLtail: + if s.ABI == obj.ABI0 && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABIInternal { + // zeroing X15 when entering ABIInternal from ABI0 + if buildcfg.GOOS != "plan9" { // do not use SSE on Plan 9 + opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15) + } + // set G register from TLS + getgFromTLS(s, x86.REG_R14) + } + if v.Op == ssa.OpAMD64CALLtail { + s.TailCall(v) + break + } + s.Call(v) + if s.ABI == obj.ABIInternal && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABI0 { + // zeroing X15 when entering ABIInternal from ABI0 + if buildcfg.GOOS != "plan9" { // do not use SSE on Plan 9 + opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15) + } + // set G register from TLS + getgFromTLS(s, x86.REG_R14) + } + case ssa.OpAMD64CALLclosure, ssa.OpAMD64CALLinter: + s.Call(v) + + case ssa.OpAMD64LoweredGetCallerPC: + p := s.Prog(x86.AMOVQ) + p.From.Type = obj.TYPE_MEM + p.From.Offset = -8 // PC is stored 8 bytes below first parameter. + p.From.Name = obj.NAME_PARAM + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.OpAMD64LoweredGetCallerSP: + // caller's SP is the address of the first arg + mov := x86.AMOVQ + if types.PtrSize == 4 { + mov = x86.AMOVL + } + p := s.Prog(mov) + p.From.Type = obj.TYPE_ADDR + p.From.Offset = -base.Ctxt.Arch.FixedFrameSize // 0 on amd64, just to be consistent with other architectures + p.From.Name = obj.NAME_PARAM + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.OpAMD64LoweredWB: + p := s.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + // AuxInt encodes how many buffer entries we need. + p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1] + + case ssa.OpAMD64LoweredPanicBoundsA, ssa.OpAMD64LoweredPanicBoundsB, ssa.OpAMD64LoweredPanicBoundsC: + p := s.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt] + s.UseArgs(int64(2 * types.PtrSize)) // space used in callee args area by assembly stubs + + case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL, + ssa.OpAMD64BSWAPQ, ssa.OpAMD64BSWAPL, + ssa.OpAMD64NOTQ, ssa.OpAMD64NOTL: + p := s.Prog(v.Op.Asm()) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.OpAMD64NEGLflags: + p := s.Prog(v.Op.Asm()) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + + case ssa.OpAMD64BSFQ, ssa.OpAMD64BSRQ, ssa.OpAMD64BSFL, ssa.OpAMD64BSRL, ssa.OpAMD64SQRTSD, ssa.OpAMD64SQRTSS: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + switch v.Op { + case ssa.OpAMD64BSFQ, ssa.OpAMD64BSRQ: + p.To.Reg = v.Reg0() + case ssa.OpAMD64BSFL, ssa.OpAMD64BSRL, ssa.OpAMD64SQRTSD, ssa.OpAMD64SQRTSS: + p.To.Reg = v.Reg() + } + case ssa.OpAMD64ROUNDSD: + p := s.Prog(v.Op.Asm()) + val := v.AuxInt + // 0 means math.RoundToEven, 1 Floor, 2 Ceil, 3 Trunc + if val < 0 || val > 3 { + v.Fatalf("Invalid rounding mode") + } + p.From.Offset = val + p.From.Type = obj.TYPE_CONST + p.AddRestSourceReg(v.Args[0].Reg()) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpAMD64POPCNTQ, ssa.OpAMD64POPCNTL, + ssa.OpAMD64TZCNTQ, ssa.OpAMD64TZCNTL, + ssa.OpAMD64LZCNTQ, ssa.OpAMD64LZCNTL: + if v.Args[0].Reg() != v.Reg() { + // POPCNT/TZCNT/LZCNT have a false dependency on the destination register on Intel cpus. + // TZCNT/LZCNT problem affects pre-Skylake models. See discussion at https://gcc.gnu.org/bugzilla/show_bug.cgi?id=62011#c7. + // Xor register with itself to break the dependency. + opregreg(s, x86.AXORL, v.Reg(), v.Reg()) + } + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.OpAMD64SETEQ, ssa.OpAMD64SETNE, + ssa.OpAMD64SETL, ssa.OpAMD64SETLE, + ssa.OpAMD64SETG, ssa.OpAMD64SETGE, + ssa.OpAMD64SETGF, ssa.OpAMD64SETGEF, + ssa.OpAMD64SETB, ssa.OpAMD64SETBE, + ssa.OpAMD64SETORD, ssa.OpAMD64SETNAN, + ssa.OpAMD64SETA, ssa.OpAMD64SETAE, + ssa.OpAMD64SETO: + p := s.Prog(v.Op.Asm()) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.OpAMD64SETEQstore, ssa.OpAMD64SETNEstore, + ssa.OpAMD64SETLstore, ssa.OpAMD64SETLEstore, + ssa.OpAMD64SETGstore, ssa.OpAMD64SETGEstore, + ssa.OpAMD64SETBstore, ssa.OpAMD64SETBEstore, + ssa.OpAMD64SETAstore, ssa.OpAMD64SETAEstore: + p := s.Prog(v.Op.Asm()) + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + + case ssa.OpAMD64SETEQstoreidx1, ssa.OpAMD64SETNEstoreidx1, + ssa.OpAMD64SETLstoreidx1, ssa.OpAMD64SETLEstoreidx1, + ssa.OpAMD64SETGstoreidx1, ssa.OpAMD64SETGEstoreidx1, + ssa.OpAMD64SETBstoreidx1, ssa.OpAMD64SETBEstoreidx1, + ssa.OpAMD64SETAstoreidx1, ssa.OpAMD64SETAEstoreidx1: + p := s.Prog(v.Op.Asm()) + memIdx(&p.To, v) + ssagen.AddAux(&p.To, v) + + case ssa.OpAMD64SETNEF: + t := v.RegTmp() + p := s.Prog(v.Op.Asm()) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + q := s.Prog(x86.ASETPS) + q.To.Type = obj.TYPE_REG + q.To.Reg = t + // ORL avoids partial register write and is smaller than ORQ, used by old compiler + opregreg(s, x86.AORL, v.Reg(), t) + + case ssa.OpAMD64SETEQF: + t := v.RegTmp() + p := s.Prog(v.Op.Asm()) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + q := s.Prog(x86.ASETPC) + q.To.Type = obj.TYPE_REG + q.To.Reg = t + // ANDL avoids partial register write and is smaller than ANDQ, used by old compiler + opregreg(s, x86.AANDL, v.Reg(), t) + + case ssa.OpAMD64InvertFlags: + v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) + case ssa.OpAMD64FlagEQ, ssa.OpAMD64FlagLT_ULT, ssa.OpAMD64FlagLT_UGT, ssa.OpAMD64FlagGT_ULT, ssa.OpAMD64FlagGT_UGT: + v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString()) + case ssa.OpAMD64AddTupleFirst32, ssa.OpAMD64AddTupleFirst64: + v.Fatalf("AddTupleFirst* should never make it to codegen %v", v.LongString()) + case ssa.OpAMD64REPSTOSQ: + s.Prog(x86.AREP) + s.Prog(x86.ASTOSQ) + case ssa.OpAMD64REPMOVSQ: + s.Prog(x86.AREP) + s.Prog(x86.AMOVSQ) + case ssa.OpAMD64LoweredNilCheck: + // Issue a load which will fault if the input is nil. + // TODO: We currently use the 2-byte instruction TESTB AX, (reg). + // Should we use the 3-byte TESTB $0, (reg) instead? It is larger + // but it doesn't have false dependency on AX. + // Or maybe allocate an output register and use MOVL (reg),reg2 ? + // That trades clobbering flags for clobbering a register. + p := s.Prog(x86.ATESTB) + p.From.Type = obj.TYPE_REG + p.From.Reg = x86.REG_AX + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + if logopt.Enabled() { + logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) + } + if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers + base.WarnfAt(v.Pos, "generated nil check") + } + case ssa.OpAMD64MOVBatomicload, ssa.OpAMD64MOVLatomicload, ssa.OpAMD64MOVQatomicload: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + case ssa.OpAMD64XCHGB, ssa.OpAMD64XCHGL, ssa.OpAMD64XCHGQ: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Reg0() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[1].Reg() + ssagen.AddAux(&p.To, v) + case ssa.OpAMD64XADDLlock, ssa.OpAMD64XADDQlock: + s.Prog(x86.ALOCK) + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Reg0() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[1].Reg() + ssagen.AddAux(&p.To, v) + case ssa.OpAMD64CMPXCHGLlock, ssa.OpAMD64CMPXCHGQlock: + if v.Args[1].Reg() != x86.REG_AX { + v.Fatalf("input[1] not in AX %s", v.LongString()) + } + s.Prog(x86.ALOCK) + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[2].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + p = s.Prog(x86.ASETEQ) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + case ssa.OpAMD64ANDBlock, ssa.OpAMD64ANDLlock, ssa.OpAMD64ORBlock, ssa.OpAMD64ORLlock: + s.Prog(x86.ALOCK) + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + case ssa.OpAMD64PrefetchT0, ssa.OpAMD64PrefetchNTA: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + case ssa.OpClobber: + p := s.Prog(x86.AMOVL) + p.From.Type = obj.TYPE_CONST + p.From.Offset = 0xdeaddead + p.To.Type = obj.TYPE_MEM + p.To.Reg = x86.REG_SP + ssagen.AddAux(&p.To, v) + p = s.Prog(x86.AMOVL) + p.From.Type = obj.TYPE_CONST + p.From.Offset = 0xdeaddead + p.To.Type = obj.TYPE_MEM + p.To.Reg = x86.REG_SP + ssagen.AddAux(&p.To, v) + p.To.Offset += 4 + case ssa.OpClobberReg: + x := uint64(0xdeaddeaddeaddead) + p := s.Prog(x86.AMOVQ) + p.From.Type = obj.TYPE_CONST + p.From.Offset = int64(x) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + default: + v.Fatalf("genValue not implemented: %s", v.LongString()) + } +} + +var blockJump = [...]struct { + asm, invasm obj.As +}{ + ssa.BlockAMD64EQ: {x86.AJEQ, x86.AJNE}, + ssa.BlockAMD64NE: {x86.AJNE, x86.AJEQ}, + ssa.BlockAMD64LT: {x86.AJLT, x86.AJGE}, + ssa.BlockAMD64GE: {x86.AJGE, x86.AJLT}, + ssa.BlockAMD64LE: {x86.AJLE, x86.AJGT}, + ssa.BlockAMD64GT: {x86.AJGT, x86.AJLE}, + ssa.BlockAMD64OS: {x86.AJOS, x86.AJOC}, + ssa.BlockAMD64OC: {x86.AJOC, x86.AJOS}, + ssa.BlockAMD64ULT: {x86.AJCS, x86.AJCC}, + ssa.BlockAMD64UGE: {x86.AJCC, x86.AJCS}, + ssa.BlockAMD64UGT: {x86.AJHI, x86.AJLS}, + ssa.BlockAMD64ULE: {x86.AJLS, x86.AJHI}, + ssa.BlockAMD64ORD: {x86.AJPC, x86.AJPS}, + ssa.BlockAMD64NAN: {x86.AJPS, x86.AJPC}, +} + +var eqfJumps = [2][2]ssagen.IndexJump{ + {{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPS, Index: 1}}, // next == b.Succs[0] + {{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPC, Index: 0}}, // next == b.Succs[1] +} +var nefJumps = [2][2]ssagen.IndexJump{ + {{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPC, Index: 1}}, // next == b.Succs[0] + {{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPS, Index: 0}}, // next == b.Succs[1] +} + +func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { + switch b.Kind { + case ssa.BlockPlain: + if b.Succs[0].Block() != next { + p := s.Prog(obj.AJMP) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) + } + case ssa.BlockDefer: + // defer returns in rax: + // 0 if we should continue executing + // 1 if we should jump to deferreturn call + p := s.Prog(x86.ATESTL) + p.From.Type = obj.TYPE_REG + p.From.Reg = x86.REG_AX + p.To.Type = obj.TYPE_REG + p.To.Reg = x86.REG_AX + p = s.Prog(x86.AJNE) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()}) + if b.Succs[0].Block() != next { + p := s.Prog(obj.AJMP) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) + } + case ssa.BlockExit, ssa.BlockRetJmp: + case ssa.BlockRet: + s.Prog(obj.ARET) + + case ssa.BlockAMD64EQF: + s.CombJump(b, next, &eqfJumps) + + case ssa.BlockAMD64NEF: + s.CombJump(b, next, &nefJumps) + + case ssa.BlockAMD64EQ, ssa.BlockAMD64NE, + ssa.BlockAMD64LT, ssa.BlockAMD64GE, + ssa.BlockAMD64LE, ssa.BlockAMD64GT, + ssa.BlockAMD64OS, ssa.BlockAMD64OC, + ssa.BlockAMD64ULT, ssa.BlockAMD64UGT, + ssa.BlockAMD64ULE, ssa.BlockAMD64UGE: + jmp := blockJump[b.Kind] + switch next { + case b.Succs[0].Block(): + s.Br(jmp.invasm, b.Succs[1].Block()) + case b.Succs[1].Block(): + s.Br(jmp.asm, b.Succs[0].Block()) + default: + if b.Likely != ssa.BranchUnlikely { + s.Br(jmp.asm, b.Succs[0].Block()) + s.Br(obj.AJMP, b.Succs[1].Block()) + } else { + s.Br(jmp.invasm, b.Succs[1].Block()) + s.Br(obj.AJMP, b.Succs[0].Block()) + } + } + + case ssa.BlockAMD64JUMPTABLE: + // JMP *(TABLE)(INDEX*8) + p := s.Prog(obj.AJMP) + p.To.Type = obj.TYPE_MEM + p.To.Reg = b.Controls[1].Reg() + p.To.Index = b.Controls[0].Reg() + p.To.Scale = 8 + // Save jump tables for later resolution of the target blocks. + s.JumpTables = append(s.JumpTables, b) + + default: + b.Fatalf("branch not implemented: %s", b.LongString()) + } +} + +func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog { + p := s.Prog(loadByType(t)) + p.From.Type = obj.TYPE_MEM + p.From.Name = obj.NAME_AUTO + p.From.Sym = n.Linksym() + p.From.Offset = n.FrameOffset() + off + p.To.Type = obj.TYPE_REG + p.To.Reg = reg + return p +} + +func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog { + p = pp.Append(p, storeByType(t), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off) + p.To.Name = obj.NAME_PARAM + p.To.Sym = n.Linksym() + p.Pos = p.Pos.WithNotStmt() + return p +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/amd64/versions_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/amd64/versions_test.go new file mode 100644 index 0000000000000000000000000000000000000000..fc0046aceebade784641f915884e4ca36d1c953b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/amd64/versions_test.go @@ -0,0 +1,433 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// When using GOEXPERIMENT=boringcrypto, the test program links in the boringcrypto syso, +// which does not respect GOAMD64, so we skip the test if boringcrypto is enabled. +//go:build !boringcrypto + +package amd64_test + +import ( + "bufio" + "debug/elf" + "debug/macho" + "errors" + "fmt" + "go/build" + "internal/testenv" + "io" + "math" + "math/bits" + "os" + "os/exec" + "regexp" + "runtime" + "strconv" + "strings" + "testing" +) + +// Test to make sure that when building for GOAMD64=v1, we don't +// use any >v1 instructions. +func TestGoAMD64v1(t *testing.T) { + if runtime.GOARCH != "amd64" { + t.Skip("amd64-only test") + } + if runtime.GOOS != "linux" && runtime.GOOS != "darwin" { + t.Skip("test only works on elf or macho platforms") + } + for _, tag := range build.Default.ToolTags { + if tag == "amd64.v2" { + t.Skip("compiling for GOAMD64=v2 or higher") + } + } + if os.Getenv("TESTGOAMD64V1") != "" { + t.Skip("recursive call") + } + + // Make a binary which will be a modified version of the + // currently running binary. + dst, err := os.CreateTemp("", "TestGoAMD64v1") + if err != nil { + t.Fatalf("failed to create temp file: %v", err) + } + defer os.Remove(dst.Name()) + dst.Chmod(0500) // make executable + + // Clobber all the non-v1 opcodes. + opcodes := map[string]bool{} + var features []string + for feature, opcodeList := range featureToOpcodes { + if runtimeFeatures[feature] { + features = append(features, fmt.Sprintf("cpu.%s=off", feature)) + } + for _, op := range opcodeList { + opcodes[op] = true + } + } + clobber(t, os.Args[0], dst, opcodes) + if err = dst.Close(); err != nil { + t.Fatalf("can't close binary: %v", err) + } + + // Run the resulting binary. + cmd := testenv.Command(t, dst.Name()) + testenv.CleanCmdEnv(cmd) + cmd.Env = append(cmd.Env, "TESTGOAMD64V1=yes") + cmd.Env = append(cmd.Env, fmt.Sprintf("GODEBUG=%s", strings.Join(features, ","))) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("couldn't execute test: %s", err) + } + // Expect to see output of the form "PASS\n", unless the test binary + // was compiled for coverage (in which case there will be an extra line). + success := false + lines := strings.Split(string(out), "\n") + if len(lines) == 2 { + success = lines[0] == "PASS" && lines[1] == "" + } else if len(lines) == 3 { + success = lines[0] == "PASS" && + strings.HasPrefix(lines[1], "coverage") && lines[2] == "" + } + if !success { + t.Fatalf("test reported error: %s lines=%+v", string(out), lines) + } +} + +// Clobber copies the binary src to dst, replacing all the instructions in opcodes with +// faulting instructions. +func clobber(t *testing.T, src string, dst *os.File, opcodes map[string]bool) { + // Run objdump to get disassembly. + var re *regexp.Regexp + var disasm io.Reader + if false { + // TODO: go tool objdump doesn't disassemble the bmi1 instructions + // in question correctly. See issue 48584. + cmd := testenv.Command(t, "go", "tool", "objdump", src) + var err error + disasm, err = cmd.StdoutPipe() + if err != nil { + t.Fatal(err) + } + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := cmd.Wait(); err != nil { + t.Error(err) + } + }) + re = regexp.MustCompile(`^[^:]*:[-\d]+\s+0x([\da-f]+)\s+([\da-f]+)\s+([A-Z]+)`) + } else { + // TODO: we're depending on platform-native objdump here. Hence the Skipf + // below if it doesn't run for some reason. + cmd := testenv.Command(t, "objdump", "-d", src) + var err error + disasm, err = cmd.StdoutPipe() + if err != nil { + t.Fatal(err) + } + if err := cmd.Start(); err != nil { + if errors.Is(err, exec.ErrNotFound) { + t.Skipf("can't run test due to missing objdump: %s", err) + } + t.Fatal(err) + } + t.Cleanup(func() { + if err := cmd.Wait(); err != nil { + t.Error(err) + } + }) + re = regexp.MustCompile(`^\s*([\da-f]+):\s*((?:[\da-f][\da-f] )+)\s*([a-z\d]+)`) + } + + // Find all the instruction addresses we need to edit. + virtualEdits := map[uint64]bool{} + scanner := bufio.NewScanner(disasm) + for scanner.Scan() { + line := scanner.Text() + parts := re.FindStringSubmatch(line) + if len(parts) == 0 { + continue + } + addr, err := strconv.ParseUint(parts[1], 16, 64) + if err != nil { + continue // not a hex address + } + opcode := strings.ToLower(parts[3]) + if !opcodes[opcode] { + continue + } + t.Logf("clobbering instruction %s", line) + n := (len(parts[2]) - strings.Count(parts[2], " ")) / 2 // number of bytes in instruction encoding + for i := 0; i < n; i++ { + // Only really need to make the first byte faulting, but might + // as well make all the bytes faulting. + virtualEdits[addr+uint64(i)] = true + } + } + + // Figure out where in the binary the edits must be done. + physicalEdits := map[uint64]bool{} + if e, err := elf.Open(src); err == nil { + for _, sec := range e.Sections { + vaddr := sec.Addr + paddr := sec.Offset + size := sec.Size + for a := range virtualEdits { + if a >= vaddr && a < vaddr+size { + physicalEdits[paddr+(a-vaddr)] = true + } + } + } + } else if m, err2 := macho.Open(src); err2 == nil { + for _, sec := range m.Sections { + vaddr := sec.Addr + paddr := uint64(sec.Offset) + size := sec.Size + for a := range virtualEdits { + if a >= vaddr && a < vaddr+size { + physicalEdits[paddr+(a-vaddr)] = true + } + } + } + } else { + t.Log(err) + t.Log(err2) + t.Fatal("executable format not elf or macho") + } + if len(virtualEdits) != len(physicalEdits) { + t.Fatal("couldn't find an instruction in text sections") + } + + // Copy source to destination, making edits along the way. + f, err := os.Open(src) + if err != nil { + t.Fatal(err) + } + r := bufio.NewReader(f) + w := bufio.NewWriter(dst) + a := uint64(0) + done := 0 + for { + b, err := r.ReadByte() + if err == io.EOF { + break + } + if err != nil { + t.Fatal("can't read") + } + if physicalEdits[a] { + b = 0xcc // INT3 opcode + done++ + } + err = w.WriteByte(b) + if err != nil { + t.Fatal("can't write") + } + a++ + } + if done != len(physicalEdits) { + t.Fatal("physical edits remaining") + } + w.Flush() + f.Close() +} + +func setOf(keys ...string) map[string]bool { + m := make(map[string]bool, len(keys)) + for _, key := range keys { + m[key] = true + } + return m +} + +var runtimeFeatures = setOf( + "adx", "aes", "avx", "avx2", "bmi1", "bmi2", "erms", "fma", + "pclmulqdq", "popcnt", "rdtscp", "sse3", "sse41", "sse42", "ssse3", +) + +var featureToOpcodes = map[string][]string{ + // Note: we include *q, *l, and plain opcodes here. + // go tool objdump doesn't include a [QL] on popcnt instructions, until CL 351889 + // native objdump doesn't include [QL] on linux. + "popcnt": {"popcntq", "popcntl", "popcnt"}, + "bmi1": { + "andnq", "andnl", "andn", + "blsiq", "blsil", "blsi", + "blsmskq", "blsmskl", "blsmsk", + "blsrq", "blsrl", "blsr", + "tzcntq", "tzcntl", "tzcnt", + }, + "bmi2": { + "sarxq", "sarxl", "sarx", + "shlxq", "shlxl", "shlx", + "shrxq", "shrxl", "shrx", + }, + "sse41": { + "roundsd", + "pinsrq", "pinsrl", "pinsrd", "pinsrb", "pinsr", + "pextrq", "pextrl", "pextrd", "pextrb", "pextr", + "pminsb", "pminsd", "pminuw", "pminud", // Note: ub and sw are ok. + "pmaxsb", "pmaxsd", "pmaxuw", "pmaxud", + "pmovzxbw", "pmovzxbd", "pmovzxbq", "pmovzxwd", "pmovzxwq", "pmovzxdq", + "pmovsxbw", "pmovsxbd", "pmovsxbq", "pmovsxwd", "pmovsxwq", "pmovsxdq", + "pblendvb", + }, + "fma": {"vfmadd231sd"}, + "movbe": {"movbeqq", "movbeq", "movbell", "movbel", "movbe"}, + "lzcnt": {"lzcntq", "lzcntl", "lzcnt"}, +} + +// Test to use POPCNT instruction, if available +func TestPopCnt(t *testing.T) { + for _, tt := range []struct { + x uint64 + want int + }{ + {0b00001111, 4}, + {0b00001110, 3}, + {0b00001100, 2}, + {0b00000000, 0}, + } { + if got := bits.OnesCount64(tt.x); got != tt.want { + t.Errorf("OnesCount64(%#x) = %d, want %d", tt.x, got, tt.want) + } + if got := bits.OnesCount32(uint32(tt.x)); got != tt.want { + t.Errorf("OnesCount32(%#x) = %d, want %d", tt.x, got, tt.want) + } + } +} + +// Test to use ANDN, if available +func TestAndNot(t *testing.T) { + for _, tt := range []struct { + x, y, want uint64 + }{ + {0b00001111, 0b00000011, 0b1100}, + {0b00001111, 0b00001100, 0b0011}, + {0b00000000, 0b00000000, 0b0000}, + } { + if got := tt.x &^ tt.y; got != tt.want { + t.Errorf("%#x &^ %#x = %#x, want %#x", tt.x, tt.y, got, tt.want) + } + if got := uint32(tt.x) &^ uint32(tt.y); got != uint32(tt.want) { + t.Errorf("%#x &^ %#x = %#x, want %#x", tt.x, tt.y, got, tt.want) + } + } +} + +// Test to use BLSI, if available +func TestBLSI(t *testing.T) { + for _, tt := range []struct { + x, want uint64 + }{ + {0b00001111, 0b001}, + {0b00001110, 0b010}, + {0b00001100, 0b100}, + {0b11000110, 0b010}, + {0b00000000, 0b000}, + } { + if got := tt.x & -tt.x; got != tt.want { + t.Errorf("%#x & (-%#x) = %#x, want %#x", tt.x, tt.x, got, tt.want) + } + if got := uint32(tt.x) & -uint32(tt.x); got != uint32(tt.want) { + t.Errorf("%#x & (-%#x) = %#x, want %#x", tt.x, tt.x, got, tt.want) + } + } +} + +// Test to use BLSMSK, if available +func TestBLSMSK(t *testing.T) { + for _, tt := range []struct { + x, want uint64 + }{ + {0b00001111, 0b001}, + {0b00001110, 0b011}, + {0b00001100, 0b111}, + {0b11000110, 0b011}, + {0b00000000, 1<<64 - 1}, + } { + if got := tt.x ^ (tt.x - 1); got != tt.want { + t.Errorf("%#x ^ (%#x-1) = %#x, want %#x", tt.x, tt.x, got, tt.want) + } + if got := uint32(tt.x) ^ (uint32(tt.x) - 1); got != uint32(tt.want) { + t.Errorf("%#x ^ (%#x-1) = %#x, want %#x", tt.x, tt.x, got, uint32(tt.want)) + } + } +} + +// Test to use BLSR, if available +func TestBLSR(t *testing.T) { + for _, tt := range []struct { + x, want uint64 + }{ + {0b00001111, 0b00001110}, + {0b00001110, 0b00001100}, + {0b00001100, 0b00001000}, + {0b11000110, 0b11000100}, + {0b00000000, 0b00000000}, + } { + if got := tt.x & (tt.x - 1); got != tt.want { + t.Errorf("%#x & (%#x-1) = %#x, want %#x", tt.x, tt.x, got, tt.want) + } + if got := uint32(tt.x) & (uint32(tt.x) - 1); got != uint32(tt.want) { + t.Errorf("%#x & (%#x-1) = %#x, want %#x", tt.x, tt.x, got, tt.want) + } + } +} + +func TestTrailingZeros(t *testing.T) { + for _, tt := range []struct { + x uint64 + want int + }{ + {0b00001111, 0}, + {0b00001110, 1}, + {0b00001100, 2}, + {0b00001000, 3}, + {0b00000000, 64}, + } { + if got := bits.TrailingZeros64(tt.x); got != tt.want { + t.Errorf("TrailingZeros64(%#x) = %d, want %d", tt.x, got, tt.want) + } + want := tt.want + if want == 64 { + want = 32 + } + if got := bits.TrailingZeros32(uint32(tt.x)); got != want { + t.Errorf("TrailingZeros64(%#x) = %d, want %d", tt.x, got, want) + } + } +} + +func TestRound(t *testing.T) { + for _, tt := range []struct { + x, want float64 + }{ + {1.4, 1}, + {1.5, 2}, + {1.6, 2}, + {2.4, 2}, + {2.5, 2}, + {2.6, 3}, + } { + if got := math.RoundToEven(tt.x); got != tt.want { + t.Errorf("RoundToEven(%f) = %f, want %f", tt.x, got, tt.want) + } + } +} + +func TestFMA(t *testing.T) { + for _, tt := range []struct { + x, y, z, want float64 + }{ + {2, 3, 4, 10}, + {3, 4, 5, 17}, + } { + if got := math.FMA(tt.x, tt.y, tt.z); got != tt.want { + t.Errorf("FMA(%f,%f,%f) = %f, want %f", tt.x, tt.y, tt.z, got, tt.want) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/arm/galign.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/arm/galign.go new file mode 100644 index 0000000000000000000000000000000000000000..43d811832eb82c427508a99c887ce6e31d85d08f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/arm/galign.go @@ -0,0 +1,25 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arm + +import ( + "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" + "cmd/internal/obj/arm" + "internal/buildcfg" +) + +func Init(arch *ssagen.ArchInfo) { + arch.LinkArch = &arm.Linkarm + arch.REGSP = arm.REGSP + arch.MAXWIDTH = (1 << 32) - 1 + arch.SoftFloat = buildcfg.GOARM.SoftFloat + arch.ZeroRange = zerorange + arch.Ginsnop = ginsnop + + arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {} + arch.SSAGenValue = ssaGenValue + arch.SSAGenBlock = ssaGenBlock +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/arm/ggen.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/arm/ggen.go new file mode 100644 index 0000000000000000000000000000000000000000..f2c676300a93a5531c2f160fb0dba7f121068683 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/arm/ggen.go @@ -0,0 +1,60 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arm + +import ( + "cmd/compile/internal/ir" + "cmd/compile/internal/objw" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/obj/arm" +) + +func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog { + if cnt == 0 { + return p + } + if *r0 == 0 { + p = pp.Append(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0) + *r0 = 1 + } + + if cnt < int64(4*types.PtrSize) { + for i := int64(0); i < cnt; i += int64(types.PtrSize) { + p = pp.Append(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i) + } + } else if cnt <= int64(128*types.PtrSize) { + p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0) + p.Reg = arm.REGSP + p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ir.Syms.Duffzero + p.To.Offset = 4 * (128 - cnt/int64(types.PtrSize)) + } else { + p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0) + p.Reg = arm.REGSP + p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm.REG_R2, 0) + p.Reg = arm.REG_R1 + p = pp.Append(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4) + p1 := p + p.Scond |= arm.C_PBIT + p = pp.Append(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0) + p.Reg = arm.REG_R2 + p = pp.Append(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0) + p.To.SetTarget(p1) + } + + return p +} + +func ginsnop(pp *objw.Progs) *obj.Prog { + p := pp.Prog(arm.AAND) + p.From.Type = obj.TYPE_REG + p.From.Reg = arm.REG_R0 + p.To.Type = obj.TYPE_REG + p.To.Reg = arm.REG_R0 + p.Scond = arm.C_SCOND_EQ + return p +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/arm/ssa.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/arm/ssa.go new file mode 100644 index 0000000000000000000000000000000000000000..638ed3ed4ef3ba5eee9e7d4cb8db3bb654d06b60 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/arm/ssa.go @@ -0,0 +1,981 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arm + +import ( + "fmt" + "internal/buildcfg" + "math" + "math/bits" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/logopt" + "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/obj/arm" +) + +// loadByType returns the load instruction of the given type. +func loadByType(t *types.Type) obj.As { + if t.IsFloat() { + switch t.Size() { + case 4: + return arm.AMOVF + case 8: + return arm.AMOVD + } + } else { + switch t.Size() { + case 1: + if t.IsSigned() { + return arm.AMOVB + } else { + return arm.AMOVBU + } + case 2: + if t.IsSigned() { + return arm.AMOVH + } else { + return arm.AMOVHU + } + case 4: + return arm.AMOVW + } + } + panic("bad load type") +} + +// storeByType returns the store instruction of the given type. +func storeByType(t *types.Type) obj.As { + if t.IsFloat() { + switch t.Size() { + case 4: + return arm.AMOVF + case 8: + return arm.AMOVD + } + } else { + switch t.Size() { + case 1: + return arm.AMOVB + case 2: + return arm.AMOVH + case 4: + return arm.AMOVW + } + } + panic("bad store type") +} + +// shift type is used as Offset in obj.TYPE_SHIFT operands to encode shifted register operands. +type shift int64 + +// copied from ../../../internal/obj/util.go:/TYPE_SHIFT +func (v shift) String() string { + op := "<<>>->@>"[((v>>5)&3)<<1:] + if v&(1<<4) != 0 { + // register shift + return fmt.Sprintf("R%d%c%cR%d", v&15, op[0], op[1], (v>>8)&15) + } else { + // constant shift + return fmt.Sprintf("R%d%c%c%d", v&15, op[0], op[1], (v>>7)&31) + } +} + +// makeshift encodes a register shifted by a constant. +func makeshift(v *ssa.Value, reg int16, typ int64, s int64) shift { + if s < 0 || s >= 32 { + v.Fatalf("shift out of range: %d", s) + } + return shift(int64(reg&0xf) | typ | (s&31)<<7) +} + +// genshift generates a Prog for r = r0 op (r1 shifted by n). +func genshift(s *ssagen.State, v *ssa.Value, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog { + p := s.Prog(as) + p.From.Type = obj.TYPE_SHIFT + p.From.Offset = int64(makeshift(v, r1, typ, n)) + p.Reg = r0 + if r != 0 { + p.To.Type = obj.TYPE_REG + p.To.Reg = r + } + return p +} + +// makeregshift encodes a register shifted by a register. +func makeregshift(r1 int16, typ int64, r2 int16) shift { + return shift(int64(r1&0xf) | typ | int64(r2&0xf)<<8 | 1<<4) +} + +// genregshift generates a Prog for r = r0 op (r1 shifted by r2). +func genregshift(s *ssagen.State, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog { + p := s.Prog(as) + p.From.Type = obj.TYPE_SHIFT + p.From.Offset = int64(makeregshift(r1, typ, r2)) + p.Reg = r0 + if r != 0 { + p.To.Type = obj.TYPE_REG + p.To.Reg = r + } + return p +} + +// find a (lsb, width) pair for BFC +// lsb must be in [0, 31], width must be in [1, 32 - lsb] +// return (0xffffffff, 0) if v is not a binary like 0...01...10...0 +func getBFC(v uint32) (uint32, uint32) { + var m, l uint32 + // BFC is not applicable with zero + if v == 0 { + return 0xffffffff, 0 + } + // find the lowest set bit, for example l=2 for 0x3ffffffc + l = uint32(bits.TrailingZeros32(v)) + // m-1 represents the highest set bit index, for example m=30 for 0x3ffffffc + m = 32 - uint32(bits.LeadingZeros32(v)) + // check if v is a binary like 0...01...10...0 + if (1< l for non-zero v + return l, m - l + } + // invalid + return 0xffffffff, 0 +} + +func ssaGenValue(s *ssagen.State, v *ssa.Value) { + switch v.Op { + case ssa.OpCopy, ssa.OpARMMOVWreg: + if v.Type.IsMemory() { + return + } + x := v.Args[0].Reg() + y := v.Reg() + if x == y { + return + } + as := arm.AMOVW + if v.Type.IsFloat() { + switch v.Type.Size() { + case 4: + as = arm.AMOVF + case 8: + as = arm.AMOVD + default: + panic("bad float size") + } + } + p := s.Prog(as) + p.From.Type = obj.TYPE_REG + p.From.Reg = x + p.To.Type = obj.TYPE_REG + p.To.Reg = y + case ssa.OpARMMOVWnop: + // nothing to do + case ssa.OpLoadReg: + if v.Type.IsFlags() { + v.Fatalf("load flags not implemented: %v", v.LongString()) + return + } + p := s.Prog(loadByType(v.Type)) + ssagen.AddrAuto(&p.From, v.Args[0]) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpStoreReg: + if v.Type.IsFlags() { + v.Fatalf("store flags not implemented: %v", v.LongString()) + return + } + p := s.Prog(storeByType(v.Type)) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + ssagen.AddrAuto(&p.To, v) + case ssa.OpARMADD, + ssa.OpARMADC, + ssa.OpARMSUB, + ssa.OpARMSBC, + ssa.OpARMRSB, + ssa.OpARMAND, + ssa.OpARMOR, + ssa.OpARMXOR, + ssa.OpARMBIC, + ssa.OpARMMUL, + ssa.OpARMADDF, + ssa.OpARMADDD, + ssa.OpARMSUBF, + ssa.OpARMSUBD, + ssa.OpARMSLL, + ssa.OpARMSRL, + ssa.OpARMSRA, + ssa.OpARMMULF, + ssa.OpARMMULD, + ssa.OpARMNMULF, + ssa.OpARMNMULD, + ssa.OpARMDIVF, + ssa.OpARMDIVD: + r := v.Reg() + r1 := v.Args[0].Reg() + r2 := v.Args[1].Reg() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r2 + p.Reg = r1 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + case ssa.OpARMSRR: + genregshift(s, arm.AMOVW, 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_RR) + case ssa.OpARMMULAF, ssa.OpARMMULAD, ssa.OpARMMULSF, ssa.OpARMMULSD, ssa.OpARMFMULAD: + r := v.Reg() + r0 := v.Args[0].Reg() + r1 := v.Args[1].Reg() + r2 := v.Args[2].Reg() + if r != r0 { + v.Fatalf("result and addend are not in the same register: %v", v.LongString()) + } + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r2 + p.Reg = r1 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + case ssa.OpARMADDS, + ssa.OpARMSUBS: + r := v.Reg0() + r1 := v.Args[0].Reg() + r2 := v.Args[1].Reg() + p := s.Prog(v.Op.Asm()) + p.Scond = arm.C_SBIT + p.From.Type = obj.TYPE_REG + p.From.Reg = r2 + p.Reg = r1 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + case ssa.OpARMSRAcond: + // ARM shift instructions uses only the low-order byte of the shift amount + // generate conditional instructions to deal with large shifts + // flag is already set + // SRA.HS $31, Rarg0, Rdst // shift 31 bits to get the sign bit + // SRA.LO Rarg1, Rarg0, Rdst + r := v.Reg() + r1 := v.Args[0].Reg() + r2 := v.Args[1].Reg() + p := s.Prog(arm.ASRA) + p.Scond = arm.C_SCOND_HS + p.From.Type = obj.TYPE_CONST + p.From.Offset = 31 + p.Reg = r1 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + p = s.Prog(arm.ASRA) + p.Scond = arm.C_SCOND_LO + p.From.Type = obj.TYPE_REG + p.From.Reg = r2 + p.Reg = r1 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + case ssa.OpARMBFX, ssa.OpARMBFXU: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt >> 8 + p.AddRestSourceConst(v.AuxInt & 0xff) + p.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARMANDconst, ssa.OpARMBICconst: + // try to optimize ANDconst and BICconst to BFC, which saves bytes and ticks + // BFC is only available on ARMv7, and its result and source are in the same register + if buildcfg.GOARM.Version == 7 && v.Reg() == v.Args[0].Reg() { + var val uint32 + if v.Op == ssa.OpARMANDconst { + val = ^uint32(v.AuxInt) + } else { // BICconst + val = uint32(v.AuxInt) + } + lsb, width := getBFC(val) + // omit BFC for ARM's imm12 + if 8 < width && width < 24 { + p := s.Prog(arm.ABFC) + p.From.Type = obj.TYPE_CONST + p.From.Offset = int64(width) + p.AddRestSourceConst(int64(lsb)) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + break + } + } + // fall back to ordinary form + fallthrough + case ssa.OpARMADDconst, + ssa.OpARMADCconst, + ssa.OpARMSUBconst, + ssa.OpARMSBCconst, + ssa.OpARMRSBconst, + ssa.OpARMRSCconst, + ssa.OpARMORconst, + ssa.OpARMXORconst, + ssa.OpARMSLLconst, + ssa.OpARMSRLconst, + ssa.OpARMSRAconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARMADDSconst, + ssa.OpARMSUBSconst, + ssa.OpARMRSBSconst: + p := s.Prog(v.Op.Asm()) + p.Scond = arm.C_SBIT + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + case ssa.OpARMSRRconst: + genshift(s, v, arm.AMOVW, 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt) + case ssa.OpARMADDshiftLL, + ssa.OpARMADCshiftLL, + ssa.OpARMSUBshiftLL, + ssa.OpARMSBCshiftLL, + ssa.OpARMRSBshiftLL, + ssa.OpARMRSCshiftLL, + ssa.OpARMANDshiftLL, + ssa.OpARMORshiftLL, + ssa.OpARMXORshiftLL, + ssa.OpARMBICshiftLL: + genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt) + case ssa.OpARMADDSshiftLL, + ssa.OpARMSUBSshiftLL, + ssa.OpARMRSBSshiftLL: + p := genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LL, v.AuxInt) + p.Scond = arm.C_SBIT + case ssa.OpARMADDshiftRL, + ssa.OpARMADCshiftRL, + ssa.OpARMSUBshiftRL, + ssa.OpARMSBCshiftRL, + ssa.OpARMRSBshiftRL, + ssa.OpARMRSCshiftRL, + ssa.OpARMANDshiftRL, + ssa.OpARMORshiftRL, + ssa.OpARMXORshiftRL, + ssa.OpARMBICshiftRL: + genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt) + case ssa.OpARMADDSshiftRL, + ssa.OpARMSUBSshiftRL, + ssa.OpARMRSBSshiftRL: + p := genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LR, v.AuxInt) + p.Scond = arm.C_SBIT + case ssa.OpARMADDshiftRA, + ssa.OpARMADCshiftRA, + ssa.OpARMSUBshiftRA, + ssa.OpARMSBCshiftRA, + ssa.OpARMRSBshiftRA, + ssa.OpARMRSCshiftRA, + ssa.OpARMANDshiftRA, + ssa.OpARMORshiftRA, + ssa.OpARMXORshiftRA, + ssa.OpARMBICshiftRA: + genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt) + case ssa.OpARMADDSshiftRA, + ssa.OpARMSUBSshiftRA, + ssa.OpARMRSBSshiftRA: + p := genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_AR, v.AuxInt) + p.Scond = arm.C_SBIT + case ssa.OpARMXORshiftRR: + genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt) + case ssa.OpARMMVNshiftLL: + genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt) + case ssa.OpARMMVNshiftRL: + genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt) + case ssa.OpARMMVNshiftRA: + genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt) + case ssa.OpARMMVNshiftLLreg: + genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL) + case ssa.OpARMMVNshiftRLreg: + genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR) + case ssa.OpARMMVNshiftRAreg: + genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR) + case ssa.OpARMADDshiftLLreg, + ssa.OpARMADCshiftLLreg, + ssa.OpARMSUBshiftLLreg, + ssa.OpARMSBCshiftLLreg, + ssa.OpARMRSBshiftLLreg, + ssa.OpARMRSCshiftLLreg, + ssa.OpARMANDshiftLLreg, + ssa.OpARMORshiftLLreg, + ssa.OpARMXORshiftLLreg, + ssa.OpARMBICshiftLLreg: + genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LL) + case ssa.OpARMADDSshiftLLreg, + ssa.OpARMSUBSshiftLLreg, + ssa.OpARMRSBSshiftLLreg: + p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LL) + p.Scond = arm.C_SBIT + case ssa.OpARMADDshiftRLreg, + ssa.OpARMADCshiftRLreg, + ssa.OpARMSUBshiftRLreg, + ssa.OpARMSBCshiftRLreg, + ssa.OpARMRSBshiftRLreg, + ssa.OpARMRSCshiftRLreg, + ssa.OpARMANDshiftRLreg, + ssa.OpARMORshiftRLreg, + ssa.OpARMXORshiftRLreg, + ssa.OpARMBICshiftRLreg: + genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LR) + case ssa.OpARMADDSshiftRLreg, + ssa.OpARMSUBSshiftRLreg, + ssa.OpARMRSBSshiftRLreg: + p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LR) + p.Scond = arm.C_SBIT + case ssa.OpARMADDshiftRAreg, + ssa.OpARMADCshiftRAreg, + ssa.OpARMSUBshiftRAreg, + ssa.OpARMSBCshiftRAreg, + ssa.OpARMRSBshiftRAreg, + ssa.OpARMRSCshiftRAreg, + ssa.OpARMANDshiftRAreg, + ssa.OpARMORshiftRAreg, + ssa.OpARMXORshiftRAreg, + ssa.OpARMBICshiftRAreg: + genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_AR) + case ssa.OpARMADDSshiftRAreg, + ssa.OpARMSUBSshiftRAreg, + ssa.OpARMRSBSshiftRAreg: + p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_AR) + p.Scond = arm.C_SBIT + case ssa.OpARMHMUL, + ssa.OpARMHMULU: + // 32-bit high multiplication + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_REGREG + p.To.Reg = v.Reg() + p.To.Offset = arm.REGTMP // throw away low 32-bit into tmp register + case ssa.OpARMMULLU: + // 32-bit multiplication, results 64-bit, high 32-bit in out0, low 32-bit in out1 + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_REGREG + p.To.Reg = v.Reg0() // high 32-bit + p.To.Offset = int64(v.Reg1()) // low 32-bit + case ssa.OpARMMULA, ssa.OpARMMULS: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_REGREG2 + p.To.Reg = v.Reg() // result + p.To.Offset = int64(v.Args[2].Reg()) // addend + case ssa.OpARMMOVWconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARMMOVFconst, + ssa.OpARMMOVDconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_FCONST + p.From.Val = math.Float64frombits(uint64(v.AuxInt)) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARMCMP, + ssa.OpARMCMN, + ssa.OpARMTST, + ssa.OpARMTEQ, + ssa.OpARMCMPF, + ssa.OpARMCMPD: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + // Special layout in ARM assembly + // Comparing to x86, the operands of ARM's CMP are reversed. + p.From.Reg = v.Args[1].Reg() + p.Reg = v.Args[0].Reg() + case ssa.OpARMCMPconst, + ssa.OpARMCMNconst, + ssa.OpARMTSTconst, + ssa.OpARMTEQconst: + // Special layout in ARM assembly + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.Reg = v.Args[0].Reg() + case ssa.OpARMCMPF0, + ssa.OpARMCMPD0: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + case ssa.OpARMCMPshiftLL, ssa.OpARMCMNshiftLL, ssa.OpARMTSTshiftLL, ssa.OpARMTEQshiftLL: + genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LL, v.AuxInt) + case ssa.OpARMCMPshiftRL, ssa.OpARMCMNshiftRL, ssa.OpARMTSTshiftRL, ssa.OpARMTEQshiftRL: + genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LR, v.AuxInt) + case ssa.OpARMCMPshiftRA, ssa.OpARMCMNshiftRA, ssa.OpARMTSTshiftRA, ssa.OpARMTEQshiftRA: + genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_AR, v.AuxInt) + case ssa.OpARMCMPshiftLLreg, ssa.OpARMCMNshiftLLreg, ssa.OpARMTSTshiftLLreg, ssa.OpARMTEQshiftLLreg: + genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LL) + case ssa.OpARMCMPshiftRLreg, ssa.OpARMCMNshiftRLreg, ssa.OpARMTSTshiftRLreg, ssa.OpARMTEQshiftRLreg: + genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LR) + case ssa.OpARMCMPshiftRAreg, ssa.OpARMCMNshiftRAreg, ssa.OpARMTSTshiftRAreg, ssa.OpARMTEQshiftRAreg: + genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_AR) + case ssa.OpARMMOVWaddr: + p := s.Prog(arm.AMOVW) + p.From.Type = obj.TYPE_ADDR + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + var wantreg string + // MOVW $sym+off(base), R + // the assembler expands it as the following: + // - base is SP: add constant offset to SP (R13) + // when constant is large, tmp register (R11) may be used + // - base is SB: load external address from constant pool (use relocation) + switch v.Aux.(type) { + default: + v.Fatalf("aux is of unknown type %T", v.Aux) + case *obj.LSym: + wantreg = "SB" + ssagen.AddAux(&p.From, v) + case *ir.Name: + wantreg = "SP" + ssagen.AddAux(&p.From, v) + case nil: + // No sym, just MOVW $off(SP), R + wantreg = "SP" + p.From.Offset = v.AuxInt + } + if reg := v.Args[0].RegName(); reg != wantreg { + v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg) + } + + case ssa.OpARMMOVBload, + ssa.OpARMMOVBUload, + ssa.OpARMMOVHload, + ssa.OpARMMOVHUload, + ssa.OpARMMOVWload, + ssa.OpARMMOVFload, + ssa.OpARMMOVDload: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARMMOVBstore, + ssa.OpARMMOVHstore, + ssa.OpARMMOVWstore, + ssa.OpARMMOVFstore, + ssa.OpARMMOVDstore: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + case ssa.OpARMMOVWloadidx, ssa.OpARMMOVBUloadidx, ssa.OpARMMOVBloadidx, ssa.OpARMMOVHUloadidx, ssa.OpARMMOVHloadidx: + // this is just shift 0 bits + fallthrough + case ssa.OpARMMOVWloadshiftLL: + p := genshift(s, v, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt) + p.From.Reg = v.Args[0].Reg() + case ssa.OpARMMOVWloadshiftRL: + p := genshift(s, v, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt) + p.From.Reg = v.Args[0].Reg() + case ssa.OpARMMOVWloadshiftRA: + p := genshift(s, v, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt) + p.From.Reg = v.Args[0].Reg() + case ssa.OpARMMOVWstoreidx, ssa.OpARMMOVBstoreidx, ssa.OpARMMOVHstoreidx: + // this is just shift 0 bits + fallthrough + case ssa.OpARMMOVWstoreshiftLL: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[2].Reg() + p.To.Type = obj.TYPE_SHIFT + p.To.Reg = v.Args[0].Reg() + p.To.Offset = int64(makeshift(v, v.Args[1].Reg(), arm.SHIFT_LL, v.AuxInt)) + case ssa.OpARMMOVWstoreshiftRL: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[2].Reg() + p.To.Type = obj.TYPE_SHIFT + p.To.Reg = v.Args[0].Reg() + p.To.Offset = int64(makeshift(v, v.Args[1].Reg(), arm.SHIFT_LR, v.AuxInt)) + case ssa.OpARMMOVWstoreshiftRA: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[2].Reg() + p.To.Type = obj.TYPE_SHIFT + p.To.Reg = v.Args[0].Reg() + p.To.Offset = int64(makeshift(v, v.Args[1].Reg(), arm.SHIFT_AR, v.AuxInt)) + case ssa.OpARMMOVBreg, + ssa.OpARMMOVBUreg, + ssa.OpARMMOVHreg, + ssa.OpARMMOVHUreg: + a := v.Args[0] + for a.Op == ssa.OpCopy || a.Op == ssa.OpARMMOVWreg || a.Op == ssa.OpARMMOVWnop { + a = a.Args[0] + } + if a.Op == ssa.OpLoadReg { + t := a.Type + switch { + case v.Op == ssa.OpARMMOVBreg && t.Size() == 1 && t.IsSigned(), + v.Op == ssa.OpARMMOVBUreg && t.Size() == 1 && !t.IsSigned(), + v.Op == ssa.OpARMMOVHreg && t.Size() == 2 && t.IsSigned(), + v.Op == ssa.OpARMMOVHUreg && t.Size() == 2 && !t.IsSigned(): + // arg is a proper-typed load, already zero/sign-extended, don't extend again + if v.Reg() == v.Args[0].Reg() { + return + } + p := s.Prog(arm.AMOVW) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + return + default: + } + } + if buildcfg.GOARM.Version >= 6 { + // generate more efficient "MOVB/MOVBU/MOVH/MOVHU Reg@>0, Reg" on ARMv6 & ARMv7 + genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, 0) + return + } + fallthrough + case ssa.OpARMMVN, + ssa.OpARMCLZ, + ssa.OpARMREV, + ssa.OpARMREV16, + ssa.OpARMRBIT, + ssa.OpARMSQRTF, + ssa.OpARMSQRTD, + ssa.OpARMNEGF, + ssa.OpARMNEGD, + ssa.OpARMABSD, + ssa.OpARMMOVWF, + ssa.OpARMMOVWD, + ssa.OpARMMOVFW, + ssa.OpARMMOVDW, + ssa.OpARMMOVFD, + ssa.OpARMMOVDF: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARMMOVWUF, + ssa.OpARMMOVWUD, + ssa.OpARMMOVFWU, + ssa.OpARMMOVDWU: + p := s.Prog(v.Op.Asm()) + p.Scond = arm.C_UBIT + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARMCMOVWHSconst: + p := s.Prog(arm.AMOVW) + p.Scond = arm.C_SCOND_HS + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARMCMOVWLSconst: + p := s.Prog(arm.AMOVW) + p.Scond = arm.C_SCOND_LS + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARMCALLstatic, ssa.OpARMCALLclosure, ssa.OpARMCALLinter: + s.Call(v) + case ssa.OpARMCALLtail: + s.TailCall(v) + case ssa.OpARMCALLudiv: + p := s.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ir.Syms.Udiv + case ssa.OpARMLoweredWB: + p := s.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + // AuxInt encodes how many buffer entries we need. + p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1] + case ssa.OpARMLoweredPanicBoundsA, ssa.OpARMLoweredPanicBoundsB, ssa.OpARMLoweredPanicBoundsC: + p := s.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt] + s.UseArgs(8) // space used in callee args area by assembly stubs + case ssa.OpARMLoweredPanicExtendA, ssa.OpARMLoweredPanicExtendB, ssa.OpARMLoweredPanicExtendC: + p := s.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt] + s.UseArgs(12) // space used in callee args area by assembly stubs + case ssa.OpARMDUFFZERO: + p := s.Prog(obj.ADUFFZERO) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ir.Syms.Duffzero + p.To.Offset = v.AuxInt + case ssa.OpARMDUFFCOPY: + p := s.Prog(obj.ADUFFCOPY) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ir.Syms.Duffcopy + p.To.Offset = v.AuxInt + case ssa.OpARMLoweredNilCheck: + // Issue a load which will fault if arg is nil. + p := s.Prog(arm.AMOVB) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = arm.REGTMP + if logopt.Enabled() { + logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) + } + if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers + base.WarnfAt(v.Pos, "generated nil check") + } + case ssa.OpARMLoweredZero: + // MOVW.P Rarg2, 4(R1) + // CMP Rarg1, R1 + // BLE -2(PC) + // arg1 is the address of the last element to zero + // arg2 is known to be zero + // auxint is alignment + var sz int64 + var mov obj.As + switch { + case v.AuxInt%4 == 0: + sz = 4 + mov = arm.AMOVW + case v.AuxInt%2 == 0: + sz = 2 + mov = arm.AMOVH + default: + sz = 1 + mov = arm.AMOVB + } + p := s.Prog(mov) + p.Scond = arm.C_PBIT + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[2].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = arm.REG_R1 + p.To.Offset = sz + p2 := s.Prog(arm.ACMP) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = v.Args[1].Reg() + p2.Reg = arm.REG_R1 + p3 := s.Prog(arm.ABLE) + p3.To.Type = obj.TYPE_BRANCH + p3.To.SetTarget(p) + case ssa.OpARMLoweredMove: + // MOVW.P 4(R1), Rtmp + // MOVW.P Rtmp, 4(R2) + // CMP Rarg2, R1 + // BLE -3(PC) + // arg2 is the address of the last element of src + // auxint is alignment + var sz int64 + var mov obj.As + switch { + case v.AuxInt%4 == 0: + sz = 4 + mov = arm.AMOVW + case v.AuxInt%2 == 0: + sz = 2 + mov = arm.AMOVH + default: + sz = 1 + mov = arm.AMOVB + } + p := s.Prog(mov) + p.Scond = arm.C_PBIT + p.From.Type = obj.TYPE_MEM + p.From.Reg = arm.REG_R1 + p.From.Offset = sz + p.To.Type = obj.TYPE_REG + p.To.Reg = arm.REGTMP + p2 := s.Prog(mov) + p2.Scond = arm.C_PBIT + p2.From.Type = obj.TYPE_REG + p2.From.Reg = arm.REGTMP + p2.To.Type = obj.TYPE_MEM + p2.To.Reg = arm.REG_R2 + p2.To.Offset = sz + p3 := s.Prog(arm.ACMP) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = v.Args[2].Reg() + p3.Reg = arm.REG_R1 + p4 := s.Prog(arm.ABLE) + p4.To.Type = obj.TYPE_BRANCH + p4.To.SetTarget(p) + case ssa.OpARMEqual, + ssa.OpARMNotEqual, + ssa.OpARMLessThan, + ssa.OpARMLessEqual, + ssa.OpARMGreaterThan, + ssa.OpARMGreaterEqual, + ssa.OpARMLessThanU, + ssa.OpARMLessEqualU, + ssa.OpARMGreaterThanU, + ssa.OpARMGreaterEqualU: + // generate boolean values + // use conditional move + p := s.Prog(arm.AMOVW) + p.From.Type = obj.TYPE_CONST + p.From.Offset = 0 + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + p = s.Prog(arm.AMOVW) + p.Scond = condBits[v.Op] + p.From.Type = obj.TYPE_CONST + p.From.Offset = 1 + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARMLoweredGetClosurePtr: + // Closure pointer is R7 (arm.REGCTXT). + ssagen.CheckLoweredGetClosurePtr(v) + case ssa.OpARMLoweredGetCallerSP: + // caller's SP is FixedFrameSize below the address of the first arg + p := s.Prog(arm.AMOVW) + p.From.Type = obj.TYPE_ADDR + p.From.Offset = -base.Ctxt.Arch.FixedFrameSize + p.From.Name = obj.NAME_PARAM + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARMLoweredGetCallerPC: + p := s.Prog(obj.AGETCALLERPC) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARMFlagConstant: + v.Fatalf("FlagConstant op should never make it to codegen %v", v.LongString()) + case ssa.OpARMInvertFlags: + v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) + case ssa.OpClobber, ssa.OpClobberReg: + // TODO: implement for clobberdead experiment. Nop is ok for now. + default: + v.Fatalf("genValue not implemented: %s", v.LongString()) + } +} + +var condBits = map[ssa.Op]uint8{ + ssa.OpARMEqual: arm.C_SCOND_EQ, + ssa.OpARMNotEqual: arm.C_SCOND_NE, + ssa.OpARMLessThan: arm.C_SCOND_LT, + ssa.OpARMLessThanU: arm.C_SCOND_LO, + ssa.OpARMLessEqual: arm.C_SCOND_LE, + ssa.OpARMLessEqualU: arm.C_SCOND_LS, + ssa.OpARMGreaterThan: arm.C_SCOND_GT, + ssa.OpARMGreaterThanU: arm.C_SCOND_HI, + ssa.OpARMGreaterEqual: arm.C_SCOND_GE, + ssa.OpARMGreaterEqualU: arm.C_SCOND_HS, +} + +var blockJump = map[ssa.BlockKind]struct { + asm, invasm obj.As +}{ + ssa.BlockARMEQ: {arm.ABEQ, arm.ABNE}, + ssa.BlockARMNE: {arm.ABNE, arm.ABEQ}, + ssa.BlockARMLT: {arm.ABLT, arm.ABGE}, + ssa.BlockARMGE: {arm.ABGE, arm.ABLT}, + ssa.BlockARMLE: {arm.ABLE, arm.ABGT}, + ssa.BlockARMGT: {arm.ABGT, arm.ABLE}, + ssa.BlockARMULT: {arm.ABLO, arm.ABHS}, + ssa.BlockARMUGE: {arm.ABHS, arm.ABLO}, + ssa.BlockARMUGT: {arm.ABHI, arm.ABLS}, + ssa.BlockARMULE: {arm.ABLS, arm.ABHI}, + ssa.BlockARMLTnoov: {arm.ABMI, arm.ABPL}, + ssa.BlockARMGEnoov: {arm.ABPL, arm.ABMI}, +} + +// To model a 'LEnoov' ('<=' without overflow checking) branching. +var leJumps = [2][2]ssagen.IndexJump{ + {{Jump: arm.ABEQ, Index: 0}, {Jump: arm.ABPL, Index: 1}}, // next == b.Succs[0] + {{Jump: arm.ABMI, Index: 0}, {Jump: arm.ABEQ, Index: 0}}, // next == b.Succs[1] +} + +// To model a 'GTnoov' ('>' without overflow checking) branching. +var gtJumps = [2][2]ssagen.IndexJump{ + {{Jump: arm.ABMI, Index: 1}, {Jump: arm.ABEQ, Index: 1}}, // next == b.Succs[0] + {{Jump: arm.ABEQ, Index: 1}, {Jump: arm.ABPL, Index: 0}}, // next == b.Succs[1] +} + +func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { + switch b.Kind { + case ssa.BlockPlain: + if b.Succs[0].Block() != next { + p := s.Prog(obj.AJMP) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) + } + + case ssa.BlockDefer: + // defer returns in R0: + // 0 if we should continue executing + // 1 if we should jump to deferreturn call + p := s.Prog(arm.ACMP) + p.From.Type = obj.TYPE_CONST + p.From.Offset = 0 + p.Reg = arm.REG_R0 + p = s.Prog(arm.ABNE) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()}) + if b.Succs[0].Block() != next { + p := s.Prog(obj.AJMP) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) + } + + case ssa.BlockExit, ssa.BlockRetJmp: + + case ssa.BlockRet: + s.Prog(obj.ARET) + + case ssa.BlockARMEQ, ssa.BlockARMNE, + ssa.BlockARMLT, ssa.BlockARMGE, + ssa.BlockARMLE, ssa.BlockARMGT, + ssa.BlockARMULT, ssa.BlockARMUGT, + ssa.BlockARMULE, ssa.BlockARMUGE, + ssa.BlockARMLTnoov, ssa.BlockARMGEnoov: + jmp := blockJump[b.Kind] + switch next { + case b.Succs[0].Block(): + s.Br(jmp.invasm, b.Succs[1].Block()) + case b.Succs[1].Block(): + s.Br(jmp.asm, b.Succs[0].Block()) + default: + if b.Likely != ssa.BranchUnlikely { + s.Br(jmp.asm, b.Succs[0].Block()) + s.Br(obj.AJMP, b.Succs[1].Block()) + } else { + s.Br(jmp.invasm, b.Succs[1].Block()) + s.Br(obj.AJMP, b.Succs[0].Block()) + } + } + + case ssa.BlockARMLEnoov: + s.CombJump(b, next, &leJumps) + + case ssa.BlockARMGTnoov: + s.CombJump(b, next, >Jumps) + + default: + b.Fatalf("branch not implemented: %s", b.LongString()) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/arm64/galign.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/arm64/galign.go new file mode 100644 index 0000000000000000000000000000000000000000..3ebd860de8f887c4c0b4dbc3934b7ed297995398 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/arm64/galign.go @@ -0,0 +1,27 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arm64 + +import ( + "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" + "cmd/internal/obj/arm64" +) + +func Init(arch *ssagen.ArchInfo) { + arch.LinkArch = &arm64.Linkarm64 + arch.REGSP = arm64.REGSP + arch.MAXWIDTH = 1 << 50 + + arch.PadFrame = padframe + arch.ZeroRange = zerorange + arch.Ginsnop = ginsnop + + arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {} + arch.SSAGenValue = ssaGenValue + arch.SSAGenBlock = ssaGenBlock + arch.LoadRegResult = loadRegResult + arch.SpillArgReg = spillArgReg +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/arm64/ggen.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/arm64/ggen.go new file mode 100644 index 0000000000000000000000000000000000000000..a681adcb7fe28fb71797688981c90cf7e0077c1e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/arm64/ggen.go @@ -0,0 +1,73 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arm64 + +import ( + "cmd/compile/internal/ir" + "cmd/compile/internal/objw" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/obj/arm64" +) + +func padframe(frame int64) int64 { + // arm64 requires that the frame size (not counting saved FP&LR) + // be 16 bytes aligned. If not, pad it. + if frame%16 != 0 { + frame += 16 - (frame % 16) + } + return frame +} + +func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { + if cnt == 0 { + return p + } + if cnt < int64(4*types.PtrSize) { + for i := int64(0); i < cnt; i += int64(types.PtrSize) { + p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off+i) + } + } else if cnt <= int64(128*types.PtrSize) { + if cnt%(2*int64(types.PtrSize)) != 0 { + p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off) + off += int64(types.PtrSize) + cnt -= int64(types.PtrSize) + } + p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REG_R20, 0) + p = pp.Append(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off, obj.TYPE_REG, arm64.REG_R20, 0) + p.Reg = arm64.REG_R20 + p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ir.Syms.Duffzero + p.To.Offset = 4 * (64 - cnt/(2*int64(types.PtrSize))) + } else { + // Not using REGTMP, so this is async preemptible (async preemption clobbers REGTMP). + // We are at the function entry, where no register is live, so it is okay to clobber + // other registers + const rtmp = arm64.REG_R20 + p = pp.Append(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, rtmp, 0) + p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0) + p = pp.Append(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT1, 0) + p.Reg = arm64.REGRT1 + p = pp.Append(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, rtmp, 0) + p = pp.Append(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT2, 0) + p.Reg = arm64.REGRT1 + p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(types.PtrSize)) + p.Scond = arm64.C_XPRE + p1 := p + p = pp.Append(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0) + p.Reg = arm64.REGRT2 + p = pp.Append(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0) + p.To.SetTarget(p1) + } + + return p +} + +func ginsnop(pp *objw.Progs) *obj.Prog { + p := pp.Prog(arm64.AHINT) + p.From.Type = obj.TYPE_CONST + return p +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/arm64/ssa.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/arm64/ssa.go new file mode 100644 index 0000000000000000000000000000000000000000..27b4e881c020168e7b4d3162aea6e671a680a81f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/arm64/ssa.go @@ -0,0 +1,1371 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arm64 + +import ( + "math" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/logopt" + "cmd/compile/internal/objw" + "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/obj/arm64" +) + +// loadByType returns the load instruction of the given type. +func loadByType(t *types.Type) obj.As { + if t.IsFloat() { + switch t.Size() { + case 4: + return arm64.AFMOVS + case 8: + return arm64.AFMOVD + } + } else { + switch t.Size() { + case 1: + if t.IsSigned() { + return arm64.AMOVB + } else { + return arm64.AMOVBU + } + case 2: + if t.IsSigned() { + return arm64.AMOVH + } else { + return arm64.AMOVHU + } + case 4: + if t.IsSigned() { + return arm64.AMOVW + } else { + return arm64.AMOVWU + } + case 8: + return arm64.AMOVD + } + } + panic("bad load type") +} + +// storeByType returns the store instruction of the given type. +func storeByType(t *types.Type) obj.As { + if t.IsFloat() { + switch t.Size() { + case 4: + return arm64.AFMOVS + case 8: + return arm64.AFMOVD + } + } else { + switch t.Size() { + case 1: + return arm64.AMOVB + case 2: + return arm64.AMOVH + case 4: + return arm64.AMOVW + case 8: + return arm64.AMOVD + } + } + panic("bad store type") +} + +// makeshift encodes a register shifted by a constant, used as an Offset in Prog. +func makeshift(v *ssa.Value, reg int16, typ int64, s int64) int64 { + if s < 0 || s >= 64 { + v.Fatalf("shift out of range: %d", s) + } + return int64(reg&31)<<16 | typ | (s&63)<<10 +} + +// genshift generates a Prog for r = r0 op (r1 shifted by n). +func genshift(s *ssagen.State, v *ssa.Value, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog { + p := s.Prog(as) + p.From.Type = obj.TYPE_SHIFT + p.From.Offset = makeshift(v, r1, typ, n) + p.Reg = r0 + if r != 0 { + p.To.Type = obj.TYPE_REG + p.To.Reg = r + } + return p +} + +// generate the memory operand for the indexed load/store instructions. +// base and idx are registers. +func genIndexedOperand(op ssa.Op, base, idx int16) obj.Addr { + // Reg: base register, Index: (shifted) index register + mop := obj.Addr{Type: obj.TYPE_MEM, Reg: base} + switch op { + case ssa.OpARM64MOVDloadidx8, ssa.OpARM64MOVDstoreidx8, ssa.OpARM64MOVDstorezeroidx8, + ssa.OpARM64FMOVDloadidx8, ssa.OpARM64FMOVDstoreidx8: + mop.Index = arm64.REG_LSL | 3<<5 | idx&31 + case ssa.OpARM64MOVWloadidx4, ssa.OpARM64MOVWUloadidx4, ssa.OpARM64MOVWstoreidx4, ssa.OpARM64MOVWstorezeroidx4, + ssa.OpARM64FMOVSloadidx4, ssa.OpARM64FMOVSstoreidx4: + mop.Index = arm64.REG_LSL | 2<<5 | idx&31 + case ssa.OpARM64MOVHloadidx2, ssa.OpARM64MOVHUloadidx2, ssa.OpARM64MOVHstoreidx2, ssa.OpARM64MOVHstorezeroidx2: + mop.Index = arm64.REG_LSL | 1<<5 | idx&31 + default: // not shifted + mop.Index = idx + } + return mop +} + +func ssaGenValue(s *ssagen.State, v *ssa.Value) { + switch v.Op { + case ssa.OpCopy, ssa.OpARM64MOVDreg: + if v.Type.IsMemory() { + return + } + x := v.Args[0].Reg() + y := v.Reg() + if x == y { + return + } + as := arm64.AMOVD + if v.Type.IsFloat() { + switch v.Type.Size() { + case 4: + as = arm64.AFMOVS + case 8: + as = arm64.AFMOVD + default: + panic("bad float size") + } + } + p := s.Prog(as) + p.From.Type = obj.TYPE_REG + p.From.Reg = x + p.To.Type = obj.TYPE_REG + p.To.Reg = y + case ssa.OpARM64MOVDnop: + // nothing to do + case ssa.OpLoadReg: + if v.Type.IsFlags() { + v.Fatalf("load flags not implemented: %v", v.LongString()) + return + } + p := s.Prog(loadByType(v.Type)) + ssagen.AddrAuto(&p.From, v.Args[0]) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpStoreReg: + if v.Type.IsFlags() { + v.Fatalf("store flags not implemented: %v", v.LongString()) + return + } + p := s.Prog(storeByType(v.Type)) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + ssagen.AddrAuto(&p.To, v) + case ssa.OpArgIntReg, ssa.OpArgFloatReg: + // The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill + // The loop only runs once. + for _, a := range v.Block.Func.RegArgs { + // Pass the spill/unspill information along to the assembler, offset by size of + // the saved LR slot. + addr := ssagen.SpillSlotAddr(a, arm64.REGSP, base.Ctxt.Arch.FixedFrameSize) + s.FuncInfo().AddSpill( + obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type), Spill: storeByType(a.Type)}) + } + v.Block.Func.RegArgs = nil + ssagen.CheckArgReg(v) + case ssa.OpARM64ADD, + ssa.OpARM64SUB, + ssa.OpARM64AND, + ssa.OpARM64OR, + ssa.OpARM64XOR, + ssa.OpARM64BIC, + ssa.OpARM64EON, + ssa.OpARM64ORN, + ssa.OpARM64MUL, + ssa.OpARM64MULW, + ssa.OpARM64MNEG, + ssa.OpARM64MNEGW, + ssa.OpARM64MULH, + ssa.OpARM64UMULH, + ssa.OpARM64MULL, + ssa.OpARM64UMULL, + ssa.OpARM64DIV, + ssa.OpARM64UDIV, + ssa.OpARM64DIVW, + ssa.OpARM64UDIVW, + ssa.OpARM64MOD, + ssa.OpARM64UMOD, + ssa.OpARM64MODW, + ssa.OpARM64UMODW, + ssa.OpARM64SLL, + ssa.OpARM64SRL, + ssa.OpARM64SRA, + ssa.OpARM64FADDS, + ssa.OpARM64FADDD, + ssa.OpARM64FSUBS, + ssa.OpARM64FSUBD, + ssa.OpARM64FMULS, + ssa.OpARM64FMULD, + ssa.OpARM64FNMULS, + ssa.OpARM64FNMULD, + ssa.OpARM64FDIVS, + ssa.OpARM64FDIVD, + ssa.OpARM64FMINS, + ssa.OpARM64FMIND, + ssa.OpARM64FMAXS, + ssa.OpARM64FMAXD, + ssa.OpARM64ROR, + ssa.OpARM64RORW: + r := v.Reg() + r1 := v.Args[0].Reg() + r2 := v.Args[1].Reg() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r2 + p.Reg = r1 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + case ssa.OpARM64FMADDS, + ssa.OpARM64FMADDD, + ssa.OpARM64FNMADDS, + ssa.OpARM64FNMADDD, + ssa.OpARM64FMSUBS, + ssa.OpARM64FMSUBD, + ssa.OpARM64FNMSUBS, + ssa.OpARM64FNMSUBD, + ssa.OpARM64MADD, + ssa.OpARM64MADDW, + ssa.OpARM64MSUB, + ssa.OpARM64MSUBW: + rt := v.Reg() + ra := v.Args[0].Reg() + rm := v.Args[1].Reg() + rn := v.Args[2].Reg() + p := s.Prog(v.Op.Asm()) + p.Reg = ra + p.From.Type = obj.TYPE_REG + p.From.Reg = rm + p.AddRestSourceReg(rn) + p.To.Type = obj.TYPE_REG + p.To.Reg = rt + case ssa.OpARM64ADDconst, + ssa.OpARM64SUBconst, + ssa.OpARM64ANDconst, + ssa.OpARM64ORconst, + ssa.OpARM64XORconst, + ssa.OpARM64SLLconst, + ssa.OpARM64SRLconst, + ssa.OpARM64SRAconst, + ssa.OpARM64RORconst, + ssa.OpARM64RORWconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARM64ADDSconstflags: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + case ssa.OpARM64ADCzerocarry: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = arm64.REGZERO + p.Reg = arm64.REGZERO + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARM64ADCSflags, + ssa.OpARM64ADDSflags, + ssa.OpARM64SBCSflags, + ssa.OpARM64SUBSflags: + r := v.Reg0() + r1 := v.Args[0].Reg() + r2 := v.Args[1].Reg() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r2 + p.Reg = r1 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + case ssa.OpARM64NEGSflags: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + case ssa.OpARM64NGCzerocarry: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = arm64.REGZERO + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARM64EXTRconst, + ssa.OpARM64EXTRWconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.AddRestSourceReg(v.Args[0].Reg()) + p.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARM64MVNshiftLL, ssa.OpARM64NEGshiftLL: + genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm64.SHIFT_LL, v.AuxInt) + case ssa.OpARM64MVNshiftRL, ssa.OpARM64NEGshiftRL: + genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm64.SHIFT_LR, v.AuxInt) + case ssa.OpARM64MVNshiftRA, ssa.OpARM64NEGshiftRA: + genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm64.SHIFT_AR, v.AuxInt) + case ssa.OpARM64MVNshiftRO: + genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm64.SHIFT_ROR, v.AuxInt) + case ssa.OpARM64ADDshiftLL, + ssa.OpARM64SUBshiftLL, + ssa.OpARM64ANDshiftLL, + ssa.OpARM64ORshiftLL, + ssa.OpARM64XORshiftLL, + ssa.OpARM64EONshiftLL, + ssa.OpARM64ORNshiftLL, + ssa.OpARM64BICshiftLL: + genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_LL, v.AuxInt) + case ssa.OpARM64ADDshiftRL, + ssa.OpARM64SUBshiftRL, + ssa.OpARM64ANDshiftRL, + ssa.OpARM64ORshiftRL, + ssa.OpARM64XORshiftRL, + ssa.OpARM64EONshiftRL, + ssa.OpARM64ORNshiftRL, + ssa.OpARM64BICshiftRL: + genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_LR, v.AuxInt) + case ssa.OpARM64ADDshiftRA, + ssa.OpARM64SUBshiftRA, + ssa.OpARM64ANDshiftRA, + ssa.OpARM64ORshiftRA, + ssa.OpARM64XORshiftRA, + ssa.OpARM64EONshiftRA, + ssa.OpARM64ORNshiftRA, + ssa.OpARM64BICshiftRA: + genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_AR, v.AuxInt) + case ssa.OpARM64ANDshiftRO, + ssa.OpARM64ORshiftRO, + ssa.OpARM64XORshiftRO, + ssa.OpARM64EONshiftRO, + ssa.OpARM64ORNshiftRO, + ssa.OpARM64BICshiftRO: + genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_ROR, v.AuxInt) + case ssa.OpARM64MOVDconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARM64FMOVSconst, + ssa.OpARM64FMOVDconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_FCONST + p.From.Val = math.Float64frombits(uint64(v.AuxInt)) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARM64FCMPS0, + ssa.OpARM64FCMPD0: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_FCONST + p.From.Val = math.Float64frombits(0) + p.Reg = v.Args[0].Reg() + case ssa.OpARM64CMP, + ssa.OpARM64CMPW, + ssa.OpARM64CMN, + ssa.OpARM64CMNW, + ssa.OpARM64TST, + ssa.OpARM64TSTW, + ssa.OpARM64FCMPS, + ssa.OpARM64FCMPD: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.Reg = v.Args[0].Reg() + case ssa.OpARM64CMPconst, + ssa.OpARM64CMPWconst, + ssa.OpARM64CMNconst, + ssa.OpARM64CMNWconst, + ssa.OpARM64TSTconst, + ssa.OpARM64TSTWconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.Reg = v.Args[0].Reg() + case ssa.OpARM64CMPshiftLL, ssa.OpARM64CMNshiftLL, ssa.OpARM64TSTshiftLL: + genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_LL, v.AuxInt) + case ssa.OpARM64CMPshiftRL, ssa.OpARM64CMNshiftRL, ssa.OpARM64TSTshiftRL: + genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_LR, v.AuxInt) + case ssa.OpARM64CMPshiftRA, ssa.OpARM64CMNshiftRA, ssa.OpARM64TSTshiftRA: + genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_AR, v.AuxInt) + case ssa.OpARM64TSTshiftRO: + genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_ROR, v.AuxInt) + case ssa.OpARM64MOVDaddr: + p := s.Prog(arm64.AMOVD) + p.From.Type = obj.TYPE_ADDR + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + var wantreg string + // MOVD $sym+off(base), R + // the assembler expands it as the following: + // - base is SP: add constant offset to SP (R13) + // when constant is large, tmp register (R11) may be used + // - base is SB: load external address from constant pool (use relocation) + switch v.Aux.(type) { + default: + v.Fatalf("aux is of unknown type %T", v.Aux) + case *obj.LSym: + wantreg = "SB" + ssagen.AddAux(&p.From, v) + case *ir.Name: + wantreg = "SP" + ssagen.AddAux(&p.From, v) + case nil: + // No sym, just MOVD $off(SP), R + wantreg = "SP" + p.From.Offset = v.AuxInt + } + if reg := v.Args[0].RegName(); reg != wantreg { + v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg) + } + case ssa.OpARM64MOVBload, + ssa.OpARM64MOVBUload, + ssa.OpARM64MOVHload, + ssa.OpARM64MOVHUload, + ssa.OpARM64MOVWload, + ssa.OpARM64MOVWUload, + ssa.OpARM64MOVDload, + ssa.OpARM64FMOVSload, + ssa.OpARM64FMOVDload: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARM64LDP: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REGREG + p.To.Reg = v.Reg0() + p.To.Offset = int64(v.Reg1()) + case ssa.OpARM64MOVBloadidx, + ssa.OpARM64MOVBUloadidx, + ssa.OpARM64MOVHloadidx, + ssa.OpARM64MOVHUloadidx, + ssa.OpARM64MOVWloadidx, + ssa.OpARM64MOVWUloadidx, + ssa.OpARM64MOVDloadidx, + ssa.OpARM64FMOVSloadidx, + ssa.OpARM64FMOVDloadidx, + ssa.OpARM64MOVHloadidx2, + ssa.OpARM64MOVHUloadidx2, + ssa.OpARM64MOVWloadidx4, + ssa.OpARM64MOVWUloadidx4, + ssa.OpARM64MOVDloadidx8, + ssa.OpARM64FMOVDloadidx8, + ssa.OpARM64FMOVSloadidx4: + p := s.Prog(v.Op.Asm()) + p.From = genIndexedOperand(v.Op, v.Args[0].Reg(), v.Args[1].Reg()) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARM64LDAR, + ssa.OpARM64LDARB, + ssa.OpARM64LDARW: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + case ssa.OpARM64MOVBstore, + ssa.OpARM64MOVHstore, + ssa.OpARM64MOVWstore, + ssa.OpARM64MOVDstore, + ssa.OpARM64FMOVSstore, + ssa.OpARM64FMOVDstore, + ssa.OpARM64STLRB, + ssa.OpARM64STLR, + ssa.OpARM64STLRW: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + case ssa.OpARM64MOVBstoreidx, + ssa.OpARM64MOVHstoreidx, + ssa.OpARM64MOVWstoreidx, + ssa.OpARM64MOVDstoreidx, + ssa.OpARM64FMOVSstoreidx, + ssa.OpARM64FMOVDstoreidx, + ssa.OpARM64MOVHstoreidx2, + ssa.OpARM64MOVWstoreidx4, + ssa.OpARM64FMOVSstoreidx4, + ssa.OpARM64MOVDstoreidx8, + ssa.OpARM64FMOVDstoreidx8: + p := s.Prog(v.Op.Asm()) + p.To = genIndexedOperand(v.Op, v.Args[0].Reg(), v.Args[1].Reg()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[2].Reg() + case ssa.OpARM64STP: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REGREG + p.From.Reg = v.Args[1].Reg() + p.From.Offset = int64(v.Args[2].Reg()) + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + case ssa.OpARM64MOVBstorezero, + ssa.OpARM64MOVHstorezero, + ssa.OpARM64MOVWstorezero, + ssa.OpARM64MOVDstorezero: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = arm64.REGZERO + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + case ssa.OpARM64MOVBstorezeroidx, + ssa.OpARM64MOVHstorezeroidx, + ssa.OpARM64MOVWstorezeroidx, + ssa.OpARM64MOVDstorezeroidx, + ssa.OpARM64MOVHstorezeroidx2, + ssa.OpARM64MOVWstorezeroidx4, + ssa.OpARM64MOVDstorezeroidx8: + p := s.Prog(v.Op.Asm()) + p.To = genIndexedOperand(v.Op, v.Args[0].Reg(), v.Args[1].Reg()) + p.From.Type = obj.TYPE_REG + p.From.Reg = arm64.REGZERO + case ssa.OpARM64MOVQstorezero: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REGREG + p.From.Reg = arm64.REGZERO + p.From.Offset = int64(arm64.REGZERO) + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + case ssa.OpARM64BFI, + ssa.OpARM64BFXIL: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt >> 8 + p.AddRestSourceConst(v.AuxInt & 0xff) + p.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARM64SBFIZ, + ssa.OpARM64SBFX, + ssa.OpARM64UBFIZ, + ssa.OpARM64UBFX: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt >> 8 + p.AddRestSourceConst(v.AuxInt & 0xff) + p.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARM64LoweredAtomicExchange64, + ssa.OpARM64LoweredAtomicExchange32: + // LDAXR (Rarg0), Rout + // STLXR Rarg1, (Rarg0), Rtmp + // CBNZ Rtmp, -2(PC) + ld := arm64.ALDAXR + st := arm64.ASTLXR + if v.Op == ssa.OpARM64LoweredAtomicExchange32 { + ld = arm64.ALDAXRW + st = arm64.ASTLXRW + } + r0 := v.Args[0].Reg() + r1 := v.Args[1].Reg() + out := v.Reg0() + p := s.Prog(ld) + p.From.Type = obj.TYPE_MEM + p.From.Reg = r0 + p.To.Type = obj.TYPE_REG + p.To.Reg = out + p1 := s.Prog(st) + p1.From.Type = obj.TYPE_REG + p1.From.Reg = r1 + p1.To.Type = obj.TYPE_MEM + p1.To.Reg = r0 + p1.RegTo2 = arm64.REGTMP + p2 := s.Prog(arm64.ACBNZ) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = arm64.REGTMP + p2.To.Type = obj.TYPE_BRANCH + p2.To.SetTarget(p) + case ssa.OpARM64LoweredAtomicExchange64Variant, + ssa.OpARM64LoweredAtomicExchange32Variant: + swap := arm64.ASWPALD + if v.Op == ssa.OpARM64LoweredAtomicExchange32Variant { + swap = arm64.ASWPALW + } + r0 := v.Args[0].Reg() + r1 := v.Args[1].Reg() + out := v.Reg0() + + // SWPALD Rarg1, (Rarg0), Rout + p := s.Prog(swap) + p.From.Type = obj.TYPE_REG + p.From.Reg = r1 + p.To.Type = obj.TYPE_MEM + p.To.Reg = r0 + p.RegTo2 = out + + case ssa.OpARM64LoweredAtomicAdd64, + ssa.OpARM64LoweredAtomicAdd32: + // LDAXR (Rarg0), Rout + // ADD Rarg1, Rout + // STLXR Rout, (Rarg0), Rtmp + // CBNZ Rtmp, -3(PC) + ld := arm64.ALDAXR + st := arm64.ASTLXR + if v.Op == ssa.OpARM64LoweredAtomicAdd32 { + ld = arm64.ALDAXRW + st = arm64.ASTLXRW + } + r0 := v.Args[0].Reg() + r1 := v.Args[1].Reg() + out := v.Reg0() + p := s.Prog(ld) + p.From.Type = obj.TYPE_MEM + p.From.Reg = r0 + p.To.Type = obj.TYPE_REG + p.To.Reg = out + p1 := s.Prog(arm64.AADD) + p1.From.Type = obj.TYPE_REG + p1.From.Reg = r1 + p1.To.Type = obj.TYPE_REG + p1.To.Reg = out + p2 := s.Prog(st) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = out + p2.To.Type = obj.TYPE_MEM + p2.To.Reg = r0 + p2.RegTo2 = arm64.REGTMP + p3 := s.Prog(arm64.ACBNZ) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = arm64.REGTMP + p3.To.Type = obj.TYPE_BRANCH + p3.To.SetTarget(p) + case ssa.OpARM64LoweredAtomicAdd64Variant, + ssa.OpARM64LoweredAtomicAdd32Variant: + // LDADDAL Rarg1, (Rarg0), Rout + // ADD Rarg1, Rout + op := arm64.ALDADDALD + if v.Op == ssa.OpARM64LoweredAtomicAdd32Variant { + op = arm64.ALDADDALW + } + r0 := v.Args[0].Reg() + r1 := v.Args[1].Reg() + out := v.Reg0() + p := s.Prog(op) + p.From.Type = obj.TYPE_REG + p.From.Reg = r1 + p.To.Type = obj.TYPE_MEM + p.To.Reg = r0 + p.RegTo2 = out + p1 := s.Prog(arm64.AADD) + p1.From.Type = obj.TYPE_REG + p1.From.Reg = r1 + p1.To.Type = obj.TYPE_REG + p1.To.Reg = out + case ssa.OpARM64LoweredAtomicCas64, + ssa.OpARM64LoweredAtomicCas32: + // LDAXR (Rarg0), Rtmp + // CMP Rarg1, Rtmp + // BNE 3(PC) + // STLXR Rarg2, (Rarg0), Rtmp + // CBNZ Rtmp, -4(PC) + // CSET EQ, Rout + ld := arm64.ALDAXR + st := arm64.ASTLXR + cmp := arm64.ACMP + if v.Op == ssa.OpARM64LoweredAtomicCas32 { + ld = arm64.ALDAXRW + st = arm64.ASTLXRW + cmp = arm64.ACMPW + } + r0 := v.Args[0].Reg() + r1 := v.Args[1].Reg() + r2 := v.Args[2].Reg() + out := v.Reg0() + p := s.Prog(ld) + p.From.Type = obj.TYPE_MEM + p.From.Reg = r0 + p.To.Type = obj.TYPE_REG + p.To.Reg = arm64.REGTMP + p1 := s.Prog(cmp) + p1.From.Type = obj.TYPE_REG + p1.From.Reg = r1 + p1.Reg = arm64.REGTMP + p2 := s.Prog(arm64.ABNE) + p2.To.Type = obj.TYPE_BRANCH + p3 := s.Prog(st) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = r2 + p3.To.Type = obj.TYPE_MEM + p3.To.Reg = r0 + p3.RegTo2 = arm64.REGTMP + p4 := s.Prog(arm64.ACBNZ) + p4.From.Type = obj.TYPE_REG + p4.From.Reg = arm64.REGTMP + p4.To.Type = obj.TYPE_BRANCH + p4.To.SetTarget(p) + p5 := s.Prog(arm64.ACSET) + p5.From.Type = obj.TYPE_SPECIAL // assembler encodes conditional bits in Offset + p5.From.Offset = int64(arm64.SPOP_EQ) + p5.To.Type = obj.TYPE_REG + p5.To.Reg = out + p2.To.SetTarget(p5) + case ssa.OpARM64LoweredAtomicCas64Variant, + ssa.OpARM64LoweredAtomicCas32Variant: + // Rarg0: ptr + // Rarg1: old + // Rarg2: new + // MOV Rarg1, Rtmp + // CASAL Rtmp, (Rarg0), Rarg2 + // CMP Rarg1, Rtmp + // CSET EQ, Rout + cas := arm64.ACASALD + cmp := arm64.ACMP + mov := arm64.AMOVD + if v.Op == ssa.OpARM64LoweredAtomicCas32Variant { + cas = arm64.ACASALW + cmp = arm64.ACMPW + mov = arm64.AMOVW + } + r0 := v.Args[0].Reg() + r1 := v.Args[1].Reg() + r2 := v.Args[2].Reg() + out := v.Reg0() + + // MOV Rarg1, Rtmp + p := s.Prog(mov) + p.From.Type = obj.TYPE_REG + p.From.Reg = r1 + p.To.Type = obj.TYPE_REG + p.To.Reg = arm64.REGTMP + + // CASAL Rtmp, (Rarg0), Rarg2 + p1 := s.Prog(cas) + p1.From.Type = obj.TYPE_REG + p1.From.Reg = arm64.REGTMP + p1.To.Type = obj.TYPE_MEM + p1.To.Reg = r0 + p1.RegTo2 = r2 + + // CMP Rarg1, Rtmp + p2 := s.Prog(cmp) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = r1 + p2.Reg = arm64.REGTMP + + // CSET EQ, Rout + p3 := s.Prog(arm64.ACSET) + p3.From.Type = obj.TYPE_SPECIAL // assembler encodes conditional bits in Offset + p3.From.Offset = int64(arm64.SPOP_EQ) + p3.To.Type = obj.TYPE_REG + p3.To.Reg = out + + case ssa.OpARM64LoweredAtomicAnd8, + ssa.OpARM64LoweredAtomicAnd32, + ssa.OpARM64LoweredAtomicOr8, + ssa.OpARM64LoweredAtomicOr32: + // LDAXRB/LDAXRW (Rarg0), Rout + // AND/OR Rarg1, Rout + // STLXRB/STLXRB Rout, (Rarg0), Rtmp + // CBNZ Rtmp, -3(PC) + ld := arm64.ALDAXRB + st := arm64.ASTLXRB + if v.Op == ssa.OpARM64LoweredAtomicAnd32 || v.Op == ssa.OpARM64LoweredAtomicOr32 { + ld = arm64.ALDAXRW + st = arm64.ASTLXRW + } + r0 := v.Args[0].Reg() + r1 := v.Args[1].Reg() + out := v.Reg0() + p := s.Prog(ld) + p.From.Type = obj.TYPE_MEM + p.From.Reg = r0 + p.To.Type = obj.TYPE_REG + p.To.Reg = out + p1 := s.Prog(v.Op.Asm()) + p1.From.Type = obj.TYPE_REG + p1.From.Reg = r1 + p1.To.Type = obj.TYPE_REG + p1.To.Reg = out + p2 := s.Prog(st) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = out + p2.To.Type = obj.TYPE_MEM + p2.To.Reg = r0 + p2.RegTo2 = arm64.REGTMP + p3 := s.Prog(arm64.ACBNZ) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = arm64.REGTMP + p3.To.Type = obj.TYPE_BRANCH + p3.To.SetTarget(p) + case ssa.OpARM64LoweredAtomicAnd8Variant, + ssa.OpARM64LoweredAtomicAnd32Variant: + atomic_clear := arm64.ALDCLRALW + if v.Op == ssa.OpARM64LoweredAtomicAnd8Variant { + atomic_clear = arm64.ALDCLRALB + } + r0 := v.Args[0].Reg() + r1 := v.Args[1].Reg() + out := v.Reg0() + + // MNV Rarg1 Rtemp + p := s.Prog(arm64.AMVN) + p.From.Type = obj.TYPE_REG + p.From.Reg = r1 + p.To.Type = obj.TYPE_REG + p.To.Reg = arm64.REGTMP + + // LDCLRALW Rtemp, (Rarg0), Rout + p1 := s.Prog(atomic_clear) + p1.From.Type = obj.TYPE_REG + p1.From.Reg = arm64.REGTMP + p1.To.Type = obj.TYPE_MEM + p1.To.Reg = r0 + p1.RegTo2 = out + + // AND Rarg1, Rout + p2 := s.Prog(arm64.AAND) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = r1 + p2.To.Type = obj.TYPE_REG + p2.To.Reg = out + + case ssa.OpARM64LoweredAtomicOr8Variant, + ssa.OpARM64LoweredAtomicOr32Variant: + atomic_or := arm64.ALDORALW + if v.Op == ssa.OpARM64LoweredAtomicOr8Variant { + atomic_or = arm64.ALDORALB + } + r0 := v.Args[0].Reg() + r1 := v.Args[1].Reg() + out := v.Reg0() + + // LDORALW Rarg1, (Rarg0), Rout + p := s.Prog(atomic_or) + p.From.Type = obj.TYPE_REG + p.From.Reg = r1 + p.To.Type = obj.TYPE_MEM + p.To.Reg = r0 + p.RegTo2 = out + + // ORR Rarg1, Rout + p2 := s.Prog(arm64.AORR) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = r1 + p2.To.Type = obj.TYPE_REG + p2.To.Reg = out + + case ssa.OpARM64MOVBreg, + ssa.OpARM64MOVBUreg, + ssa.OpARM64MOVHreg, + ssa.OpARM64MOVHUreg, + ssa.OpARM64MOVWreg, + ssa.OpARM64MOVWUreg: + a := v.Args[0] + for a.Op == ssa.OpCopy || a.Op == ssa.OpARM64MOVDreg { + a = a.Args[0] + } + if a.Op == ssa.OpLoadReg { + t := a.Type + switch { + case v.Op == ssa.OpARM64MOVBreg && t.Size() == 1 && t.IsSigned(), + v.Op == ssa.OpARM64MOVBUreg && t.Size() == 1 && !t.IsSigned(), + v.Op == ssa.OpARM64MOVHreg && t.Size() == 2 && t.IsSigned(), + v.Op == ssa.OpARM64MOVHUreg && t.Size() == 2 && !t.IsSigned(), + v.Op == ssa.OpARM64MOVWreg && t.Size() == 4 && t.IsSigned(), + v.Op == ssa.OpARM64MOVWUreg && t.Size() == 4 && !t.IsSigned(): + // arg is a proper-typed load, already zero/sign-extended, don't extend again + if v.Reg() == v.Args[0].Reg() { + return + } + p := s.Prog(arm64.AMOVD) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + return + default: + } + } + fallthrough + case ssa.OpARM64MVN, + ssa.OpARM64NEG, + ssa.OpARM64FABSD, + ssa.OpARM64FMOVDfpgp, + ssa.OpARM64FMOVDgpfp, + ssa.OpARM64FMOVSfpgp, + ssa.OpARM64FMOVSgpfp, + ssa.OpARM64FNEGS, + ssa.OpARM64FNEGD, + ssa.OpARM64FSQRTS, + ssa.OpARM64FSQRTD, + ssa.OpARM64FCVTZSSW, + ssa.OpARM64FCVTZSDW, + ssa.OpARM64FCVTZUSW, + ssa.OpARM64FCVTZUDW, + ssa.OpARM64FCVTZSS, + ssa.OpARM64FCVTZSD, + ssa.OpARM64FCVTZUS, + ssa.OpARM64FCVTZUD, + ssa.OpARM64SCVTFWS, + ssa.OpARM64SCVTFWD, + ssa.OpARM64SCVTFS, + ssa.OpARM64SCVTFD, + ssa.OpARM64UCVTFWS, + ssa.OpARM64UCVTFWD, + ssa.OpARM64UCVTFS, + ssa.OpARM64UCVTFD, + ssa.OpARM64FCVTSD, + ssa.OpARM64FCVTDS, + ssa.OpARM64REV, + ssa.OpARM64REVW, + ssa.OpARM64REV16, + ssa.OpARM64REV16W, + ssa.OpARM64RBIT, + ssa.OpARM64RBITW, + ssa.OpARM64CLZ, + ssa.OpARM64CLZW, + ssa.OpARM64FRINTAD, + ssa.OpARM64FRINTMD, + ssa.OpARM64FRINTND, + ssa.OpARM64FRINTPD, + ssa.OpARM64FRINTZD: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARM64LoweredRound32F, ssa.OpARM64LoweredRound64F: + // input is already rounded + case ssa.OpARM64VCNT: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = (v.Args[0].Reg()-arm64.REG_F0)&31 + arm64.REG_ARNG + ((arm64.ARNG_8B & 15) << 5) + p.To.Type = obj.TYPE_REG + p.To.Reg = (v.Reg()-arm64.REG_F0)&31 + arm64.REG_ARNG + ((arm64.ARNG_8B & 15) << 5) + case ssa.OpARM64VUADDLV: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = (v.Args[0].Reg()-arm64.REG_F0)&31 + arm64.REG_ARNG + ((arm64.ARNG_8B & 15) << 5) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() - arm64.REG_F0 + arm64.REG_V0 + case ssa.OpARM64CSEL, ssa.OpARM64CSEL0: + r1 := int16(arm64.REGZERO) + if v.Op != ssa.OpARM64CSEL0 { + r1 = v.Args[1].Reg() + } + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_SPECIAL // assembler encodes conditional bits in Offset + condCode := condBits[ssa.Op(v.AuxInt)] + p.From.Offset = int64(condCode) + p.Reg = v.Args[0].Reg() + p.AddRestSourceReg(r1) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARM64CSINC, ssa.OpARM64CSINV, ssa.OpARM64CSNEG: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_SPECIAL // assembler encodes conditional bits in Offset + condCode := condBits[ssa.Op(v.AuxInt)] + p.From.Offset = int64(condCode) + p.Reg = v.Args[0].Reg() + p.AddRestSourceReg(v.Args[1].Reg()) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARM64CSETM: + p := s.Prog(arm64.ACSETM) + p.From.Type = obj.TYPE_SPECIAL // assembler encodes conditional bits in Offset + condCode := condBits[ssa.Op(v.AuxInt)] + p.From.Offset = int64(condCode) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARM64DUFFZERO: + // runtime.duffzero expects start address in R20 + p := s.Prog(obj.ADUFFZERO) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ir.Syms.Duffzero + p.To.Offset = v.AuxInt + case ssa.OpARM64LoweredZero: + // STP.P (ZR,ZR), 16(R16) + // CMP Rarg1, R16 + // BLE -2(PC) + // arg1 is the address of the last 16-byte unit to zero + p := s.Prog(arm64.ASTP) + p.Scond = arm64.C_XPOST + p.From.Type = obj.TYPE_REGREG + p.From.Reg = arm64.REGZERO + p.From.Offset = int64(arm64.REGZERO) + p.To.Type = obj.TYPE_MEM + p.To.Reg = arm64.REG_R16 + p.To.Offset = 16 + p2 := s.Prog(arm64.ACMP) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = v.Args[1].Reg() + p2.Reg = arm64.REG_R16 + p3 := s.Prog(arm64.ABLE) + p3.To.Type = obj.TYPE_BRANCH + p3.To.SetTarget(p) + case ssa.OpARM64DUFFCOPY: + p := s.Prog(obj.ADUFFCOPY) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ir.Syms.Duffcopy + p.To.Offset = v.AuxInt + case ssa.OpARM64LoweredMove: + // LDP.P 16(R16), (R25, Rtmp) + // STP.P (R25, Rtmp), 16(R17) + // CMP Rarg2, R16 + // BLE -3(PC) + // arg2 is the address of the last element of src + p := s.Prog(arm64.ALDP) + p.Scond = arm64.C_XPOST + p.From.Type = obj.TYPE_MEM + p.From.Reg = arm64.REG_R16 + p.From.Offset = 16 + p.To.Type = obj.TYPE_REGREG + p.To.Reg = arm64.REG_R25 + p.To.Offset = int64(arm64.REGTMP) + p2 := s.Prog(arm64.ASTP) + p2.Scond = arm64.C_XPOST + p2.From.Type = obj.TYPE_REGREG + p2.From.Reg = arm64.REG_R25 + p2.From.Offset = int64(arm64.REGTMP) + p2.To.Type = obj.TYPE_MEM + p2.To.Reg = arm64.REG_R17 + p2.To.Offset = 16 + p3 := s.Prog(arm64.ACMP) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = v.Args[2].Reg() + p3.Reg = arm64.REG_R16 + p4 := s.Prog(arm64.ABLE) + p4.To.Type = obj.TYPE_BRANCH + p4.To.SetTarget(p) + case ssa.OpARM64CALLstatic, ssa.OpARM64CALLclosure, ssa.OpARM64CALLinter: + s.Call(v) + case ssa.OpARM64CALLtail: + s.TailCall(v) + case ssa.OpARM64LoweredWB: + p := s.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + // AuxInt encodes how many buffer entries we need. + p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1] + + case ssa.OpARM64LoweredPanicBoundsA, ssa.OpARM64LoweredPanicBoundsB, ssa.OpARM64LoweredPanicBoundsC: + p := s.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt] + s.UseArgs(16) // space used in callee args area by assembly stubs + case ssa.OpARM64LoweredNilCheck: + // Issue a load which will fault if arg is nil. + p := s.Prog(arm64.AMOVB) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = arm64.REGTMP + if logopt.Enabled() { + logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) + } + if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Line==1 in generated wrappers + base.WarnfAt(v.Pos, "generated nil check") + } + case ssa.OpARM64Equal, + ssa.OpARM64NotEqual, + ssa.OpARM64LessThan, + ssa.OpARM64LessEqual, + ssa.OpARM64GreaterThan, + ssa.OpARM64GreaterEqual, + ssa.OpARM64LessThanU, + ssa.OpARM64LessEqualU, + ssa.OpARM64GreaterThanU, + ssa.OpARM64GreaterEqualU, + ssa.OpARM64LessThanF, + ssa.OpARM64LessEqualF, + ssa.OpARM64GreaterThanF, + ssa.OpARM64GreaterEqualF, + ssa.OpARM64NotLessThanF, + ssa.OpARM64NotLessEqualF, + ssa.OpARM64NotGreaterThanF, + ssa.OpARM64NotGreaterEqualF, + ssa.OpARM64LessThanNoov, + ssa.OpARM64GreaterEqualNoov: + // generate boolean values using CSET + p := s.Prog(arm64.ACSET) + p.From.Type = obj.TYPE_SPECIAL // assembler encodes conditional bits in Offset + condCode := condBits[v.Op] + p.From.Offset = int64(condCode) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARM64PRFM: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_CONST + p.To.Offset = v.AuxInt + case ssa.OpARM64LoweredGetClosurePtr: + // Closure pointer is R26 (arm64.REGCTXT). + ssagen.CheckLoweredGetClosurePtr(v) + case ssa.OpARM64LoweredGetCallerSP: + // caller's SP is FixedFrameSize below the address of the first arg + p := s.Prog(arm64.AMOVD) + p.From.Type = obj.TYPE_ADDR + p.From.Offset = -base.Ctxt.Arch.FixedFrameSize + p.From.Name = obj.NAME_PARAM + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARM64LoweredGetCallerPC: + p := s.Prog(obj.AGETCALLERPC) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpARM64DMB: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + case ssa.OpARM64FlagConstant: + v.Fatalf("FlagConstant op should never make it to codegen %v", v.LongString()) + case ssa.OpARM64InvertFlags: + v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) + case ssa.OpClobber: + // MOVW $0xdeaddead, REGTMP + // MOVW REGTMP, (slot) + // MOVW REGTMP, 4(slot) + p := s.Prog(arm64.AMOVW) + p.From.Type = obj.TYPE_CONST + p.From.Offset = 0xdeaddead + p.To.Type = obj.TYPE_REG + p.To.Reg = arm64.REGTMP + p = s.Prog(arm64.AMOVW) + p.From.Type = obj.TYPE_REG + p.From.Reg = arm64.REGTMP + p.To.Type = obj.TYPE_MEM + p.To.Reg = arm64.REGSP + ssagen.AddAux(&p.To, v) + p = s.Prog(arm64.AMOVW) + p.From.Type = obj.TYPE_REG + p.From.Reg = arm64.REGTMP + p.To.Type = obj.TYPE_MEM + p.To.Reg = arm64.REGSP + ssagen.AddAux2(&p.To, v, v.AuxInt+4) + case ssa.OpClobberReg: + x := uint64(0xdeaddeaddeaddead) + p := s.Prog(arm64.AMOVD) + p.From.Type = obj.TYPE_CONST + p.From.Offset = int64(x) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + default: + v.Fatalf("genValue not implemented: %s", v.LongString()) + } +} + +var condBits = map[ssa.Op]arm64.SpecialOperand{ + ssa.OpARM64Equal: arm64.SPOP_EQ, + ssa.OpARM64NotEqual: arm64.SPOP_NE, + ssa.OpARM64LessThan: arm64.SPOP_LT, + ssa.OpARM64LessThanU: arm64.SPOP_LO, + ssa.OpARM64LessEqual: arm64.SPOP_LE, + ssa.OpARM64LessEqualU: arm64.SPOP_LS, + ssa.OpARM64GreaterThan: arm64.SPOP_GT, + ssa.OpARM64GreaterThanU: arm64.SPOP_HI, + ssa.OpARM64GreaterEqual: arm64.SPOP_GE, + ssa.OpARM64GreaterEqualU: arm64.SPOP_HS, + ssa.OpARM64LessThanF: arm64.SPOP_MI, // Less than + ssa.OpARM64LessEqualF: arm64.SPOP_LS, // Less than or equal to + ssa.OpARM64GreaterThanF: arm64.SPOP_GT, // Greater than + ssa.OpARM64GreaterEqualF: arm64.SPOP_GE, // Greater than or equal to + + // The following condition codes have unordered to handle comparisons related to NaN. + ssa.OpARM64NotLessThanF: arm64.SPOP_PL, // Greater than, equal to, or unordered + ssa.OpARM64NotLessEqualF: arm64.SPOP_HI, // Greater than or unordered + ssa.OpARM64NotGreaterThanF: arm64.SPOP_LE, // Less than, equal to or unordered + ssa.OpARM64NotGreaterEqualF: arm64.SPOP_LT, // Less than or unordered + + ssa.OpARM64LessThanNoov: arm64.SPOP_MI, // Less than but without honoring overflow + ssa.OpARM64GreaterEqualNoov: arm64.SPOP_PL, // Greater than or equal to but without honoring overflow +} + +var blockJump = map[ssa.BlockKind]struct { + asm, invasm obj.As +}{ + ssa.BlockARM64EQ: {arm64.ABEQ, arm64.ABNE}, + ssa.BlockARM64NE: {arm64.ABNE, arm64.ABEQ}, + ssa.BlockARM64LT: {arm64.ABLT, arm64.ABGE}, + ssa.BlockARM64GE: {arm64.ABGE, arm64.ABLT}, + ssa.BlockARM64LE: {arm64.ABLE, arm64.ABGT}, + ssa.BlockARM64GT: {arm64.ABGT, arm64.ABLE}, + ssa.BlockARM64ULT: {arm64.ABLO, arm64.ABHS}, + ssa.BlockARM64UGE: {arm64.ABHS, arm64.ABLO}, + ssa.BlockARM64UGT: {arm64.ABHI, arm64.ABLS}, + ssa.BlockARM64ULE: {arm64.ABLS, arm64.ABHI}, + ssa.BlockARM64Z: {arm64.ACBZ, arm64.ACBNZ}, + ssa.BlockARM64NZ: {arm64.ACBNZ, arm64.ACBZ}, + ssa.BlockARM64ZW: {arm64.ACBZW, arm64.ACBNZW}, + ssa.BlockARM64NZW: {arm64.ACBNZW, arm64.ACBZW}, + ssa.BlockARM64TBZ: {arm64.ATBZ, arm64.ATBNZ}, + ssa.BlockARM64TBNZ: {arm64.ATBNZ, arm64.ATBZ}, + ssa.BlockARM64FLT: {arm64.ABMI, arm64.ABPL}, + ssa.BlockARM64FGE: {arm64.ABGE, arm64.ABLT}, + ssa.BlockARM64FLE: {arm64.ABLS, arm64.ABHI}, + ssa.BlockARM64FGT: {arm64.ABGT, arm64.ABLE}, + ssa.BlockARM64LTnoov: {arm64.ABMI, arm64.ABPL}, + ssa.BlockARM64GEnoov: {arm64.ABPL, arm64.ABMI}, +} + +// To model a 'LEnoov' ('<=' without overflow checking) branching. +var leJumps = [2][2]ssagen.IndexJump{ + {{Jump: arm64.ABEQ, Index: 0}, {Jump: arm64.ABPL, Index: 1}}, // next == b.Succs[0] + {{Jump: arm64.ABMI, Index: 0}, {Jump: arm64.ABEQ, Index: 0}}, // next == b.Succs[1] +} + +// To model a 'GTnoov' ('>' without overflow checking) branching. +var gtJumps = [2][2]ssagen.IndexJump{ + {{Jump: arm64.ABMI, Index: 1}, {Jump: arm64.ABEQ, Index: 1}}, // next == b.Succs[0] + {{Jump: arm64.ABEQ, Index: 1}, {Jump: arm64.ABPL, Index: 0}}, // next == b.Succs[1] +} + +func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { + switch b.Kind { + case ssa.BlockPlain: + if b.Succs[0].Block() != next { + p := s.Prog(obj.AJMP) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) + } + + case ssa.BlockDefer: + // defer returns in R0: + // 0 if we should continue executing + // 1 if we should jump to deferreturn call + p := s.Prog(arm64.ACMP) + p.From.Type = obj.TYPE_CONST + p.From.Offset = 0 + p.Reg = arm64.REG_R0 + p = s.Prog(arm64.ABNE) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()}) + if b.Succs[0].Block() != next { + p := s.Prog(obj.AJMP) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) + } + + case ssa.BlockExit, ssa.BlockRetJmp: + + case ssa.BlockRet: + s.Prog(obj.ARET) + + case ssa.BlockARM64EQ, ssa.BlockARM64NE, + ssa.BlockARM64LT, ssa.BlockARM64GE, + ssa.BlockARM64LE, ssa.BlockARM64GT, + ssa.BlockARM64ULT, ssa.BlockARM64UGT, + ssa.BlockARM64ULE, ssa.BlockARM64UGE, + ssa.BlockARM64Z, ssa.BlockARM64NZ, + ssa.BlockARM64ZW, ssa.BlockARM64NZW, + ssa.BlockARM64FLT, ssa.BlockARM64FGE, + ssa.BlockARM64FLE, ssa.BlockARM64FGT, + ssa.BlockARM64LTnoov, ssa.BlockARM64GEnoov: + jmp := blockJump[b.Kind] + var p *obj.Prog + switch next { + case b.Succs[0].Block(): + p = s.Br(jmp.invasm, b.Succs[1].Block()) + case b.Succs[1].Block(): + p = s.Br(jmp.asm, b.Succs[0].Block()) + default: + if b.Likely != ssa.BranchUnlikely { + p = s.Br(jmp.asm, b.Succs[0].Block()) + s.Br(obj.AJMP, b.Succs[1].Block()) + } else { + p = s.Br(jmp.invasm, b.Succs[1].Block()) + s.Br(obj.AJMP, b.Succs[0].Block()) + } + } + if !b.Controls[0].Type.IsFlags() { + p.From.Type = obj.TYPE_REG + p.From.Reg = b.Controls[0].Reg() + } + case ssa.BlockARM64TBZ, ssa.BlockARM64TBNZ: + jmp := blockJump[b.Kind] + var p *obj.Prog + switch next { + case b.Succs[0].Block(): + p = s.Br(jmp.invasm, b.Succs[1].Block()) + case b.Succs[1].Block(): + p = s.Br(jmp.asm, b.Succs[0].Block()) + default: + if b.Likely != ssa.BranchUnlikely { + p = s.Br(jmp.asm, b.Succs[0].Block()) + s.Br(obj.AJMP, b.Succs[1].Block()) + } else { + p = s.Br(jmp.invasm, b.Succs[1].Block()) + s.Br(obj.AJMP, b.Succs[0].Block()) + } + } + p.From.Offset = b.AuxInt + p.From.Type = obj.TYPE_CONST + p.Reg = b.Controls[0].Reg() + + case ssa.BlockARM64LEnoov: + s.CombJump(b, next, &leJumps) + case ssa.BlockARM64GTnoov: + s.CombJump(b, next, >Jumps) + + case ssa.BlockARM64JUMPTABLE: + // MOVD (TABLE)(IDX<<3), Rtmp + // JMP (Rtmp) + p := s.Prog(arm64.AMOVD) + p.From = genIndexedOperand(ssa.OpARM64MOVDloadidx8, b.Controls[1].Reg(), b.Controls[0].Reg()) + p.To.Type = obj.TYPE_REG + p.To.Reg = arm64.REGTMP + p = s.Prog(obj.AJMP) + p.To.Type = obj.TYPE_MEM + p.To.Reg = arm64.REGTMP + // Save jump tables for later resolution of the target blocks. + s.JumpTables = append(s.JumpTables, b) + + default: + b.Fatalf("branch not implemented: %s", b.LongString()) + } +} + +func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog { + p := s.Prog(loadByType(t)) + p.From.Type = obj.TYPE_MEM + p.From.Name = obj.NAME_AUTO + p.From.Sym = n.Linksym() + p.From.Offset = n.FrameOffset() + off + p.To.Type = obj.TYPE_REG + p.To.Reg = reg + return p +} + +func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog { + p = pp.Append(p, storeByType(t), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off) + p.To.Name = obj.NAME_PARAM + p.To.Sym = n.Linksym() + p.Pos = p.Pos.WithNotStmt() + return p +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/base.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/base.go new file mode 100644 index 0000000000000000000000000000000000000000..ee3772c5ca2bfa44feae2978405fd61b4ae4ebd9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/base.go @@ -0,0 +1,221 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package base + +import ( + "fmt" + "os" + "runtime" + "runtime/debug" + "runtime/metrics" +) + +var atExitFuncs []func() + +func AtExit(f func()) { + atExitFuncs = append(atExitFuncs, f) +} + +func Exit(code int) { + for i := len(atExitFuncs) - 1; i >= 0; i-- { + f := atExitFuncs[i] + atExitFuncs = atExitFuncs[:i] + f() + } + os.Exit(code) +} + +// To enable tracing support (-t flag), set EnableTrace to true. +const EnableTrace = false + +// forEachGC calls fn each GC cycle until it returns false. +func forEachGC(fn func() bool) { + type T [32]byte // large enough to avoid runtime's tiny object allocator + + var finalizer func(*T) + finalizer = func(p *T) { + if fn() { + runtime.SetFinalizer(p, finalizer) + } + } + + finalizer(new(T)) +} + +// AdjustStartingHeap modifies GOGC so that GC should not occur until the heap +// grows to the requested size. This is intended but not promised, though it +// is true-mostly, depending on when the adjustment occurs and on the +// compiler's input and behavior. Once this size is approximately reached +// GOGC is reset to 100; subsequent GCs may reduce the heap below the requested +// size, but this function does not affect that. +// +// -d=gcadjust=1 enables logging of GOGC adjustment events. +// +// NOTE: If you think this code would help startup time in your own +// application and you decide to use it, please benchmark first to see if it +// actually works for you (it may not: the Go compiler is not typical), and +// whatever the outcome, please leave a comment on bug #56546. This code +// uses supported interfaces, but depends more than we like on +// current+observed behavior of the garbage collector, so if many people need +// this feature, we should consider/propose a better way to accomplish it. +func AdjustStartingHeap(requestedHeapGoal uint64) { + logHeapTweaks := Debug.GCAdjust == 1 + mp := runtime.GOMAXPROCS(0) + gcConcurrency := Flag.LowerC + + const ( + goal = "/gc/heap/goal:bytes" + count = "/gc/cycles/total:gc-cycles" + allocs = "/gc/heap/allocs:bytes" + frees = "/gc/heap/frees:bytes" + ) + + sample := []metrics.Sample{{Name: goal}, {Name: count}, {Name: allocs}, {Name: frees}} + const ( + GOAL = 0 + COUNT = 1 + ALLOCS = 2 + FREES = 3 + ) + + // Assumptions and observations of Go's garbage collector, as of Go 1.17-1.20: + + // - the initial heap goal is 4M, by fiat. It is possible for Go to start + // with a heap as small as 512k, so this may change in the future. + + // - except for the first heap goal, heap goal is a function of + // observed-live at the previous GC and current GOGC. After the first + // GC, adjusting GOGC immediately updates GOGC; before the first GC, + // adjusting GOGC does not modify goal (but the change takes effect after + // the first GC). + + // - the before/after first GC behavior is not guaranteed anywhere, it's + // just behavior, and it's a bad idea to rely on it. + + // - we don't know exactly when GC will run, even after we adjust GOGC; the + // first GC may not have happened yet, may have already happened, or may + // be currently in progress, and GCs can start for several reasons. + + // - forEachGC above will run the provided function at some delay after each + // GC's mark phase terminates; finalizers are run after marking as the + // spans containing finalizable objects are swept, driven by GC + // background activity and allocation demand. + + // - "live at last GC" is not available through the current metrics + // interface. Instead, live is estimated by knowing the adjusted value of + // GOGC and the new heap goal following a GC (this requires knowing that + // at least one GC has occurred): + // estLive = 100 * newGoal / (100 + currentGogc) + // this new value of GOGC + // newGogc = 100*requestedHeapGoal/estLive - 100 + // will result in the desired goal. The logging code checks that the + // resulting goal is correct. + + // There's a small risk that the finalizer will be slow to run after a GC + // that expands the goal to a huge value, and that this will lead to + // out-of-memory. This doesn't seem to happen; in experiments on a variety + // of machines with a variety of extra loads to disrupt scheduling, the + // worst overshoot observed was 50% past requestedHeapGoal. + + metrics.Read(sample) + for _, s := range sample { + if s.Value.Kind() == metrics.KindBad { + // Just return, a slightly slower compilation is a tolerable outcome. + if logHeapTweaks { + fmt.Fprintf(os.Stderr, "GCAdjust: Regret unexpected KindBad for metric %s\n", s.Name) + } + return + } + } + + // Tinker with GOGC to make the heap grow rapidly at first. + currentGoal := sample[GOAL].Value.Uint64() // Believe this will be 4MByte or less, perhaps 512k + myGogc := 100 * requestedHeapGoal / currentGoal + if myGogc <= 150 { + return + } + + if logHeapTweaks { + sample := append([]metrics.Sample(nil), sample...) // avoid races with GC callback + AtExit(func() { + metrics.Read(sample) + goal := sample[GOAL].Value.Uint64() + count := sample[COUNT].Value.Uint64() + oldGogc := debug.SetGCPercent(100) + if oldGogc == 100 { + fmt.Fprintf(os.Stderr, "GCAdjust: AtExit goal %d gogc %d count %d maxprocs %d gcConcurrency %d\n", + goal, oldGogc, count, mp, gcConcurrency) + } else { + inUse := sample[ALLOCS].Value.Uint64() - sample[FREES].Value.Uint64() + overPct := 100 * (int(inUse) - int(requestedHeapGoal)) / int(requestedHeapGoal) + fmt.Fprintf(os.Stderr, "GCAdjust: AtExit goal %d gogc %d count %d maxprocs %d gcConcurrency %d overPct %d\n", + goal, oldGogc, count, mp, gcConcurrency, overPct) + + } + }) + } + + debug.SetGCPercent(int(myGogc)) + + adjustFunc := func() bool { + + metrics.Read(sample) + goal := sample[GOAL].Value.Uint64() + count := sample[COUNT].Value.Uint64() + + if goal <= requestedHeapGoal { // Stay the course + if logHeapTweaks { + fmt.Fprintf(os.Stderr, "GCAdjust: Reuse GOGC adjust, current goal %d, count is %d, current gogc %d\n", + goal, count, myGogc) + } + return true + } + + // Believe goal has been adjusted upwards, else it would be less-than-or-equal than requestedHeapGoal + calcLive := 100 * goal / (100 + myGogc) + + if 2*calcLive < requestedHeapGoal { // calcLive can exceed requestedHeapGoal! + myGogc = 100*requestedHeapGoal/calcLive - 100 + + if myGogc > 125 { + // Not done growing the heap. + oldGogc := debug.SetGCPercent(int(myGogc)) + + if logHeapTweaks { + // Check that the new goal looks right + inUse := sample[ALLOCS].Value.Uint64() - sample[FREES].Value.Uint64() + metrics.Read(sample) + newGoal := sample[GOAL].Value.Uint64() + pctOff := 100 * (int64(newGoal) - int64(requestedHeapGoal)) / int64(requestedHeapGoal) + // Check that the new goal is close to requested. 3% of make.bash fails this test. Why, TBD. + if pctOff < 2 { + fmt.Fprintf(os.Stderr, "GCAdjust: Retry GOGC adjust, current goal %d, count is %d, gogc was %d, is now %d, calcLive %d pctOff %d\n", + goal, count, oldGogc, myGogc, calcLive, pctOff) + } else { + // The GC is being annoying and not giving us the goal that we requested, say more to help understand when/why. + fmt.Fprintf(os.Stderr, "GCAdjust: Retry GOGC adjust, current goal %d, count is %d, gogc was %d, is now %d, calcLive %d pctOff %d inUse %d\n", + goal, count, oldGogc, myGogc, calcLive, pctOff, inUse) + } + } + return true + } + } + + // In this case we're done boosting GOGC, set it to 100 and don't set a new finalizer. + oldGogc := debug.SetGCPercent(100) + // inUse helps estimate how late the finalizer ran; at the instant the previous GC ended, + // it was (in theory) equal to the previous GC's heap goal. In a growing heap it is + // expected to grow to the new heap goal. + inUse := sample[ALLOCS].Value.Uint64() - sample[FREES].Value.Uint64() + overPct := 100 * (int(inUse) - int(requestedHeapGoal)) / int(requestedHeapGoal) + if logHeapTweaks { + fmt.Fprintf(os.Stderr, "GCAdjust: Reset GOGC adjust, old goal %d, count is %d, gogc was %d, calcLive %d inUse %d overPct %d\n", + goal, count, oldGogc, calcLive, inUse, overPct) + } + return false + } + + forEachGC(adjustFunc) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/bootstrap_false.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/bootstrap_false.go new file mode 100644 index 0000000000000000000000000000000000000000..ea6da4348f53971936dcfbeb2055f137ca246011 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/bootstrap_false.go @@ -0,0 +1,11 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !compiler_bootstrap + +package base + +// CompilerBootstrap reports whether the current compiler binary was +// built with -tags=compiler_bootstrap. +const CompilerBootstrap = false diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/bootstrap_true.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/bootstrap_true.go new file mode 100644 index 0000000000000000000000000000000000000000..d0c6c88f56f94cc20f559e1bff38252853ca3354 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/bootstrap_true.go @@ -0,0 +1,11 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build compiler_bootstrap + +package base + +// CompilerBootstrap reports whether the current compiler binary was +// built with -tags=compiler_bootstrap. +const CompilerBootstrap = true diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/debug.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/debug.go new file mode 100644 index 0000000000000000000000000000000000000000..420ad1305e8c86b8c91498fdc1c90257f0847b2d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/debug.go @@ -0,0 +1,76 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Debug arguments, set by -d flag. + +package base + +// Debug holds the parsed debugging configuration values. +var Debug DebugFlags + +// DebugFlags defines the debugging configuration values (see var Debug). +// Each struct field is a different value, named for the lower-case of the field name. +// Each field must be an int or string and must have a `help` struct tag. +// +// The -d option takes a comma-separated list of settings. +// Each setting is name=value; for ints, name is short for name=1. +type DebugFlags struct { + Append int `help:"print information about append compilation"` + Checkptr int `help:"instrument unsafe pointer conversions\n0: instrumentation disabled\n1: conversions involving unsafe.Pointer are instrumented\n2: conversions to unsafe.Pointer force heap allocation" concurrent:"ok"` + Closure int `help:"print information about closure compilation"` + Defer int `help:"print information about defer compilation"` + DisableNil int `help:"disable nil checks" concurrent:"ok"` + DumpInlFuncProps string `help:"dump function properties from inl heuristics to specified file"` + DumpInlCallSiteScores int `help:"dump scored callsites during inlining"` + InlScoreAdj string `help:"set inliner score adjustments (ex: -d=inlscoreadj=panicPathAdj:10/passConstToNestedIfAdj:-90)"` + InlBudgetSlack int `help:"amount to expand the initial inline budget when new inliner enabled. Defaults to 80 if option not set." concurrent:"ok"` + DumpPtrs int `help:"show Node pointers values in dump output"` + DwarfInl int `help:"print information about DWARF inlined function creation"` + EscapeMutationsCalls int `help:"print extra escape analysis diagnostics about mutations and calls" concurrent:"ok"` + Export int `help:"print export data"` + Fmahash string `help:"hash value for use in debugging platform-dependent multiply-add use" concurrent:"ok"` + GCAdjust int `help:"log adjustments to GOGC" concurrent:"ok"` + GCCheck int `help:"check heap/gc use by compiler" concurrent:"ok"` + GCProg int `help:"print dump of GC programs"` + Gossahash string `help:"hash value for use in debugging the compiler"` + InlFuncsWithClosures int `help:"allow functions with closures to be inlined" concurrent:"ok"` + InlStaticInit int `help:"allow static initialization of inlined calls" concurrent:"ok"` + Libfuzzer int `help:"enable coverage instrumentation for libfuzzer"` + LoopVar int `help:"shared (0, default), 1 (private loop variables), 2, private + log"` + LoopVarHash string `help:"for debugging changes in loop behavior. Overrides experiment and loopvar flag."` + LocationLists int `help:"print information about DWARF location list creation"` + MaxShapeLen int `help:"hash shape names longer than this threshold (default 500)" concurrent:"ok"` + Nil int `help:"print information about nil checks"` + NoOpenDefer int `help:"disable open-coded defers" concurrent:"ok"` + NoRefName int `help:"do not include referenced symbol names in object file" concurrent:"ok"` + PCTab string `help:"print named pc-value table\nOne of: pctospadj, pctofile, pctoline, pctoinline, pctopcdata"` + Panic int `help:"show all compiler panics"` + Reshape int `help:"print information about expression reshaping"` + Shapify int `help:"print information about shaping recursive types"` + Slice int `help:"print information about slice compilation"` + SoftFloat int `help:"force compiler to emit soft-float code" concurrent:"ok"` + StaticCopy int `help:"print information about missed static copies" concurrent:"ok"` + SyncFrames int `help:"how many writer stack frames to include at sync points in unified export data"` + TypeAssert int `help:"print information about type assertion inlining"` + WB int `help:"print information about write barriers"` + ABIWrap int `help:"print information about ABI wrapper generation"` + MayMoreStack string `help:"call named function before all stack growth checks" concurrent:"ok"` + PGODebug int `help:"debug profile-guided optimizations"` + PGOHash string `help:"hash value for debugging profile-guided optimizations" concurrent:"ok"` + PGOInline int `help:"enable profile-guided inlining" concurrent:"ok"` + PGOInlineCDFThreshold string `help:"cumulative threshold percentage for determining call sites as hot candidates for inlining" concurrent:"ok"` + PGOInlineBudget int `help:"inline budget for hot functions" concurrent:"ok"` + PGODevirtualize int `help:"enable profile-guided devirtualization; 0 to disable, 1 to enable interface devirtualization, 2 to enable function devirtualization" concurrent:"ok"` + RangeFuncCheck int `help:"insert code to check behavior of range iterator functions" concurrent:"ok"` + WrapGlobalMapDbg int `help:"debug trace output for global map init wrapping"` + WrapGlobalMapCtl int `help:"global map init wrap control (0 => default, 1 => off, 2 => stress mode, no size cutoff)"` + ZeroCopy int `help:"enable zero-copy string->[]byte conversions" concurrent:"ok"` + + ConcurrentOk bool // true if only concurrentOk flags seen +} + +// DebugSSA is called to set a -d ssa/... option. +// If nil, those options are reported as invalid options. +// If DebugSSA returns a non-empty string, that text is reported as a compiler error. +var DebugSSA func(phase, flag string, val int, valString string) string diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/flag.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/flag.go new file mode 100644 index 0000000000000000000000000000000000000000..a3144f8fb4a6c2f4b6b77acb822786c3f8c44c64 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/flag.go @@ -0,0 +1,575 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package base + +import ( + "cmd/internal/cov/covcmd" + "encoding/json" + "flag" + "fmt" + "internal/buildcfg" + "internal/platform" + "log" + "os" + "reflect" + "runtime" + "strings" + + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/internal/sys" +) + +func usage() { + fmt.Fprintf(os.Stderr, "usage: compile [options] file.go...\n") + objabi.Flagprint(os.Stderr) + Exit(2) +} + +// Flag holds the parsed command-line flags. +// See ParseFlag for non-zero defaults. +var Flag CmdFlags + +// A CountFlag is a counting integer flag. +// It accepts -name=value to set the value directly, +// but it also accepts -name with no =value to increment the count. +type CountFlag int + +// CmdFlags defines the command-line flags (see var Flag). +// Each struct field is a different flag, by default named for the lower-case of the field name. +// If the flag name is a single letter, the default flag name is left upper-case. +// If the flag name is "Lower" followed by a single letter, the default flag name is the lower-case of the last letter. +// +// If this default flag name can't be made right, the `flag` struct tag can be used to replace it, +// but this should be done only in exceptional circumstances: it helps everyone if the flag name +// is obvious from the field name when the flag is used elsewhere in the compiler sources. +// The `flag:"-"` struct tag makes a field invisible to the flag logic and should also be used sparingly. +// +// Each field must have a `help` struct tag giving the flag help message. +// +// The allowed field types are bool, int, string, pointers to those (for values stored elsewhere), +// CountFlag (for a counting flag), and func(string) (for a flag that uses special code for parsing). +type CmdFlags struct { + // Single letters + B CountFlag "help:\"disable bounds checking\"" + C CountFlag "help:\"disable printing of columns in error messages\"" + D string "help:\"set relative `path` for local imports\"" + E CountFlag "help:\"debug symbol export\"" + I func(string) "help:\"add `directory` to import search path\"" + K CountFlag "help:\"debug missing line numbers\"" + L CountFlag "help:\"also show actual source file names in error messages for positions affected by //line directives\"" + N CountFlag "help:\"disable optimizations\"" + S CountFlag "help:\"print assembly listing\"" + // V is added by objabi.AddVersionFlag + W CountFlag "help:\"debug parse tree after type checking\"" + + LowerC int "help:\"concurrency during compilation (1 means no concurrency)\"" + LowerD flag.Value "help:\"enable debugging settings; try -d help\"" + LowerE CountFlag "help:\"no limit on number of errors reported\"" + LowerH CountFlag "help:\"halt on error\"" + LowerJ CountFlag "help:\"debug runtime-initialized variables\"" + LowerL CountFlag "help:\"disable inlining\"" + LowerM CountFlag "help:\"print optimization decisions\"" + LowerO string "help:\"write output to `file`\"" + LowerP *string "help:\"set expected package import `path`\"" // &Ctxt.Pkgpath, set below + LowerR CountFlag "help:\"debug generated wrappers\"" + LowerT bool "help:\"enable tracing for debugging the compiler\"" + LowerW CountFlag "help:\"debug type checking\"" + LowerV *bool "help:\"increase debug verbosity\"" + + // Special characters + Percent CountFlag "flag:\"%\" help:\"debug non-static initializers\"" + CompilingRuntime bool "flag:\"+\" help:\"compiling runtime\"" + + // Longer names + AsmHdr string "help:\"write assembly header to `file`\"" + ASan bool "help:\"build code compatible with C/C++ address sanitizer\"" + Bench string "help:\"append benchmark times to `file`\"" + BlockProfile string "help:\"write block profile to `file`\"" + BuildID string "help:\"record `id` as the build id in the export metadata\"" + CPUProfile string "help:\"write cpu profile to `file`\"" + Complete bool "help:\"compiling complete package (no C or assembly)\"" + ClobberDead bool "help:\"clobber dead stack slots (for debugging)\"" + ClobberDeadReg bool "help:\"clobber dead registers (for debugging)\"" + Dwarf bool "help:\"generate DWARF symbols\"" + DwarfBASEntries *bool "help:\"use base address selection entries in DWARF\"" // &Ctxt.UseBASEntries, set below + DwarfLocationLists *bool "help:\"add location lists to DWARF in optimized mode\"" // &Ctxt.Flag_locationlists, set below + Dynlink *bool "help:\"support references to Go symbols defined in other shared libraries\"" // &Ctxt.Flag_dynlink, set below + EmbedCfg func(string) "help:\"read go:embed configuration from `file`\"" + Env func(string) "help:\"add `definition` of the form key=value to environment\"" + GenDwarfInl int "help:\"generate DWARF inline info records\"" // 0=disabled, 1=funcs, 2=funcs+formals/locals + GoVersion string "help:\"required version of the runtime\"" + ImportCfg func(string) "help:\"read import configuration from `file`\"" + InstallSuffix string "help:\"set pkg directory `suffix`\"" + JSON string "help:\"version,file for JSON compiler/optimizer detail output\"" + Lang string "help:\"Go language version source code expects\"" + LinkObj string "help:\"write linker-specific object to `file`\"" + LinkShared *bool "help:\"generate code that will be linked against Go shared libraries\"" // &Ctxt.Flag_linkshared, set below + Live CountFlag "help:\"debug liveness analysis\"" + MSan bool "help:\"build code compatible with C/C++ memory sanitizer\"" + MemProfile string "help:\"write memory profile to `file`\"" + MemProfileRate int "help:\"set runtime.MemProfileRate to `rate`\"" + MutexProfile string "help:\"write mutex profile to `file`\"" + NoLocalImports bool "help:\"reject local (relative) imports\"" + CoverageCfg func(string) "help:\"read coverage configuration from `file`\"" + Pack bool "help:\"write to file.a instead of file.o\"" + Race bool "help:\"enable race detector\"" + Shared *bool "help:\"generate code that can be linked into a shared library\"" // &Ctxt.Flag_shared, set below + SmallFrames bool "help:\"reduce the size limit for stack allocated objects\"" // small stacks, to diagnose GC latency; see golang.org/issue/27732 + Spectre string "help:\"enable spectre mitigations in `list` (all, index, ret)\"" + Std bool "help:\"compiling standard library\"" + SymABIs string "help:\"read symbol ABIs from `file`\"" + TraceProfile string "help:\"write an execution trace to `file`\"" + TrimPath string "help:\"remove `prefix` from recorded source file paths\"" + WB bool "help:\"enable write barrier\"" // TODO: remove + PgoProfile string "help:\"read profile from `file`\"" + ErrorURL bool "help:\"print explanatory URL with error message if applicable\"" + + // Configuration derived from flags; not a flag itself. + Cfg struct { + Embed struct { // set by -embedcfg + Patterns map[string][]string + Files map[string]string + } + ImportDirs []string // appended to by -I + ImportMap map[string]string // set by -importcfg + PackageFile map[string]string // set by -importcfg; nil means not in use + CoverageInfo *covcmd.CoverFixupConfig // set by -coveragecfg + SpectreIndex bool // set by -spectre=index or -spectre=all + // Whether we are adding any sort of code instrumentation, such as + // when the race detector is enabled. + Instrumenting bool + } +} + +func addEnv(s string) { + i := strings.Index(s, "=") + if i < 0 { + log.Fatal("-env argument must be of the form key=value") + } + os.Setenv(s[:i], s[i+1:]) +} + +// ParseFlags parses the command-line flags into Flag. +func ParseFlags() { + Flag.I = addImportDir + + Flag.LowerC = runtime.GOMAXPROCS(0) + Flag.LowerD = objabi.NewDebugFlag(&Debug, DebugSSA) + Flag.LowerP = &Ctxt.Pkgpath + Flag.LowerV = &Ctxt.Debugvlog + + Flag.Dwarf = buildcfg.GOARCH != "wasm" + Flag.DwarfBASEntries = &Ctxt.UseBASEntries + Flag.DwarfLocationLists = &Ctxt.Flag_locationlists + *Flag.DwarfLocationLists = true + Flag.Dynlink = &Ctxt.Flag_dynlink + Flag.EmbedCfg = readEmbedCfg + Flag.Env = addEnv + Flag.GenDwarfInl = 2 + Flag.ImportCfg = readImportCfg + Flag.CoverageCfg = readCoverageCfg + Flag.LinkShared = &Ctxt.Flag_linkshared + Flag.Shared = &Ctxt.Flag_shared + Flag.WB = true + + Debug.ConcurrentOk = true + Debug.MaxShapeLen = 500 + Debug.InlFuncsWithClosures = 1 + Debug.InlStaticInit = 1 + Debug.PGOInline = 1 + Debug.PGODevirtualize = 2 + Debug.SyncFrames = -1 // disable sync markers by default + Debug.ZeroCopy = 1 + Debug.RangeFuncCheck = 1 + + Debug.Checkptr = -1 // so we can tell whether it is set explicitly + + Flag.Cfg.ImportMap = make(map[string]string) + + objabi.AddVersionFlag() // -V + registerFlags() + objabi.Flagparse(usage) + + if gcd := os.Getenv("GOCOMPILEDEBUG"); gcd != "" { + // This will only override the flags set in gcd; + // any others set on the command line remain set. + Flag.LowerD.Set(gcd) + } + + if Debug.Gossahash != "" { + hashDebug = NewHashDebug("gossahash", Debug.Gossahash, nil) + } + + // Compute whether we're compiling the runtime from the package path. Test + // code can also use the flag to set this explicitly. + if Flag.Std && objabi.LookupPkgSpecial(Ctxt.Pkgpath).Runtime { + Flag.CompilingRuntime = true + } + + // Three inputs govern loop iteration variable rewriting, hash, experiment, flag. + // The loop variable rewriting is: + // IF non-empty hash, then hash determines behavior (function+line match) (*) + // ELSE IF experiment and flag==0, then experiment (set flag=1) + // ELSE flag (note that build sets flag per-package), with behaviors: + // -1 => no change to behavior. + // 0 => no change to behavior (unless non-empty hash, see above) + // 1 => apply change to likely-iteration-variable-escaping loops + // 2 => apply change, log results + // 11 => apply change EVERYWHERE, do not log results (for debugging/benchmarking) + // 12 => apply change EVERYWHERE, log results (for debugging/benchmarking) + // + // The expected uses of the these inputs are, in believed most-likely to least likely: + // GOEXPERIMENT=loopvar -- apply change to entire application + // -gcflags=some_package=-d=loopvar=1 -- apply change to some_package (**) + // -gcflags=some_package=-d=loopvar=2 -- apply change to some_package, log it + // GOEXPERIMENT=loopvar -gcflags=some_package=-d=loopvar=-1 -- apply change to all but one package + // GOCOMPILEDEBUG=loopvarhash=... -- search for failure cause + // + // (*) For debugging purposes, providing loopvar flag >= 11 will expand the hash-eligible set of loops to all. + // (**) Loop semantics, changed or not, follow code from a package when it is inlined; that is, the behavior + // of an application compiled with partially modified loop semantics does not depend on inlining. + + if Debug.LoopVarHash != "" { + // This first little bit controls the inputs for debug-hash-matching. + mostInlineOnly := true + if strings.HasPrefix(Debug.LoopVarHash, "IL") { + // When hash-searching on a position that is an inline site, default is to use the + // most-inlined position only. This makes the hash faster, plus there's no point + // reporting a problem with all the inlining; there's only one copy of the source. + // However, if for some reason you wanted it per-site, you can get this. (The default + // hash-search behavior for compiler debugging is at an inline site.) + Debug.LoopVarHash = Debug.LoopVarHash[2:] + mostInlineOnly = false + } + // end of testing trickiness + LoopVarHash = NewHashDebug("loopvarhash", Debug.LoopVarHash, nil) + if Debug.LoopVar < 11 { // >= 11 means all loops are rewrite-eligible + Debug.LoopVar = 1 // 1 means those loops that syntactically escape their dcl vars are eligible. + } + LoopVarHash.SetInlineSuffixOnly(mostInlineOnly) + } else if buildcfg.Experiment.LoopVar && Debug.LoopVar == 0 { + Debug.LoopVar = 1 + } + + if Debug.Fmahash != "" { + FmaHash = NewHashDebug("fmahash", Debug.Fmahash, nil) + } + if Debug.PGOHash != "" { + PGOHash = NewHashDebug("pgohash", Debug.PGOHash, nil) + } + + if Flag.MSan && !platform.MSanSupported(buildcfg.GOOS, buildcfg.GOARCH) { + log.Fatalf("%s/%s does not support -msan", buildcfg.GOOS, buildcfg.GOARCH) + } + if Flag.ASan && !platform.ASanSupported(buildcfg.GOOS, buildcfg.GOARCH) { + log.Fatalf("%s/%s does not support -asan", buildcfg.GOOS, buildcfg.GOARCH) + } + if Flag.Race && !platform.RaceDetectorSupported(buildcfg.GOOS, buildcfg.GOARCH) { + log.Fatalf("%s/%s does not support -race", buildcfg.GOOS, buildcfg.GOARCH) + } + if (*Flag.Shared || *Flag.Dynlink || *Flag.LinkShared) && !Ctxt.Arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) { + log.Fatalf("%s/%s does not support -shared", buildcfg.GOOS, buildcfg.GOARCH) + } + parseSpectre(Flag.Spectre) // left as string for RecordFlags + + Ctxt.Flag_shared = Ctxt.Flag_dynlink || Ctxt.Flag_shared + Ctxt.Flag_optimize = Flag.N == 0 + Ctxt.Debugasm = int(Flag.S) + Ctxt.Flag_maymorestack = Debug.MayMoreStack + Ctxt.Flag_noRefName = Debug.NoRefName != 0 + + if flag.NArg() < 1 { + usage() + } + + if Flag.GoVersion != "" && Flag.GoVersion != runtime.Version() { + fmt.Printf("compile: version %q does not match go tool version %q\n", runtime.Version(), Flag.GoVersion) + Exit(2) + } + + if *Flag.LowerP == "" { + *Flag.LowerP = obj.UnlinkablePkg + } + + if Flag.LowerO == "" { + p := flag.Arg(0) + if i := strings.LastIndex(p, "/"); i >= 0 { + p = p[i+1:] + } + if runtime.GOOS == "windows" { + if i := strings.LastIndex(p, `\`); i >= 0 { + p = p[i+1:] + } + } + if i := strings.LastIndex(p, "."); i >= 0 { + p = p[:i] + } + suffix := ".o" + if Flag.Pack { + suffix = ".a" + } + Flag.LowerO = p + suffix + } + switch { + case Flag.Race && Flag.MSan: + log.Fatal("cannot use both -race and -msan") + case Flag.Race && Flag.ASan: + log.Fatal("cannot use both -race and -asan") + case Flag.MSan && Flag.ASan: + log.Fatal("cannot use both -msan and -asan") + } + if Flag.Race || Flag.MSan || Flag.ASan { + // -race, -msan and -asan imply -d=checkptr for now. + if Debug.Checkptr == -1 { // if not set explicitly + Debug.Checkptr = 1 + } + } + + if Flag.LowerC < 1 { + log.Fatalf("-c must be at least 1, got %d", Flag.LowerC) + } + if !concurrentBackendAllowed() { + Flag.LowerC = 1 + } + + if Flag.CompilingRuntime { + // It is not possible to build the runtime with no optimizations, + // because the compiler cannot eliminate enough write barriers. + Flag.N = 0 + Ctxt.Flag_optimize = true + + // Runtime can't use -d=checkptr, at least not yet. + Debug.Checkptr = 0 + + // Fuzzing the runtime isn't interesting either. + Debug.Libfuzzer = 0 + } + + if Debug.Checkptr == -1 { // if not set explicitly + Debug.Checkptr = 0 + } + + // set via a -d flag + Ctxt.Debugpcln = Debug.PCTab +} + +// registerFlags adds flag registrations for all the fields in Flag. +// See the comment on type CmdFlags for the rules. +func registerFlags() { + var ( + boolType = reflect.TypeOf(bool(false)) + intType = reflect.TypeOf(int(0)) + stringType = reflect.TypeOf(string("")) + ptrBoolType = reflect.TypeOf(new(bool)) + ptrIntType = reflect.TypeOf(new(int)) + ptrStringType = reflect.TypeOf(new(string)) + countType = reflect.TypeOf(CountFlag(0)) + funcType = reflect.TypeOf((func(string))(nil)) + ) + + v := reflect.ValueOf(&Flag).Elem() + t := v.Type() + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Name == "Cfg" { + continue + } + + var name string + if len(f.Name) == 1 { + name = f.Name + } else if len(f.Name) == 6 && f.Name[:5] == "Lower" && 'A' <= f.Name[5] && f.Name[5] <= 'Z' { + name = string(rune(f.Name[5] + 'a' - 'A')) + } else { + name = strings.ToLower(f.Name) + } + if tag := f.Tag.Get("flag"); tag != "" { + name = tag + } + + help := f.Tag.Get("help") + if help == "" { + panic(fmt.Sprintf("base.Flag.%s is missing help text", f.Name)) + } + + if k := f.Type.Kind(); (k == reflect.Ptr || k == reflect.Func) && v.Field(i).IsNil() { + panic(fmt.Sprintf("base.Flag.%s is uninitialized %v", f.Name, f.Type)) + } + + switch f.Type { + case boolType: + p := v.Field(i).Addr().Interface().(*bool) + flag.BoolVar(p, name, *p, help) + case intType: + p := v.Field(i).Addr().Interface().(*int) + flag.IntVar(p, name, *p, help) + case stringType: + p := v.Field(i).Addr().Interface().(*string) + flag.StringVar(p, name, *p, help) + case ptrBoolType: + p := v.Field(i).Interface().(*bool) + flag.BoolVar(p, name, *p, help) + case ptrIntType: + p := v.Field(i).Interface().(*int) + flag.IntVar(p, name, *p, help) + case ptrStringType: + p := v.Field(i).Interface().(*string) + flag.StringVar(p, name, *p, help) + case countType: + p := (*int)(v.Field(i).Addr().Interface().(*CountFlag)) + objabi.Flagcount(name, help, p) + case funcType: + f := v.Field(i).Interface().(func(string)) + objabi.Flagfn1(name, help, f) + default: + if val, ok := v.Field(i).Interface().(flag.Value); ok { + flag.Var(val, name, help) + } else { + panic(fmt.Sprintf("base.Flag.%s has unexpected type %s", f.Name, f.Type)) + } + } + } +} + +// concurrentFlagOk reports whether the current compiler flags +// are compatible with concurrent compilation. +func concurrentFlagOk() bool { + // TODO(rsc): Many of these are fine. Remove them. + return Flag.Percent == 0 && + Flag.E == 0 && + Flag.K == 0 && + Flag.L == 0 && + Flag.LowerH == 0 && + Flag.LowerJ == 0 && + Flag.LowerM == 0 && + Flag.LowerR == 0 +} + +func concurrentBackendAllowed() bool { + if !concurrentFlagOk() { + return false + } + + // Debug.S by itself is ok, because all printing occurs + // while writing the object file, and that is non-concurrent. + // Adding Debug_vlog, however, causes Debug.S to also print + // while flushing the plist, which happens concurrently. + if Ctxt.Debugvlog || !Debug.ConcurrentOk || Flag.Live > 0 { + return false + } + // TODO: Test and delete this condition. + if buildcfg.Experiment.FieldTrack { + return false + } + // TODO: fix races and enable the following flags + if Ctxt.Flag_dynlink || Flag.Race { + return false + } + return true +} + +func addImportDir(dir string) { + if dir != "" { + Flag.Cfg.ImportDirs = append(Flag.Cfg.ImportDirs, dir) + } +} + +func readImportCfg(file string) { + if Flag.Cfg.ImportMap == nil { + Flag.Cfg.ImportMap = make(map[string]string) + } + Flag.Cfg.PackageFile = map[string]string{} + data, err := os.ReadFile(file) + if err != nil { + log.Fatalf("-importcfg: %v", err) + } + + for lineNum, line := range strings.Split(string(data), "\n") { + lineNum++ // 1-based + line = strings.TrimSpace(line) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + + verb, args, found := strings.Cut(line, " ") + if found { + args = strings.TrimSpace(args) + } + before, after, hasEq := strings.Cut(args, "=") + + switch verb { + default: + log.Fatalf("%s:%d: unknown directive %q", file, lineNum, verb) + case "importmap": + if !hasEq || before == "" || after == "" { + log.Fatalf(`%s:%d: invalid importmap: syntax is "importmap old=new"`, file, lineNum) + } + Flag.Cfg.ImportMap[before] = after + case "packagefile": + if !hasEq || before == "" || after == "" { + log.Fatalf(`%s:%d: invalid packagefile: syntax is "packagefile path=filename"`, file, lineNum) + } + Flag.Cfg.PackageFile[before] = after + } + } +} + +func readCoverageCfg(file string) { + var cfg covcmd.CoverFixupConfig + data, err := os.ReadFile(file) + if err != nil { + log.Fatalf("-coveragecfg: %v", err) + } + if err := json.Unmarshal(data, &cfg); err != nil { + log.Fatalf("error reading -coveragecfg file %q: %v", file, err) + } + Flag.Cfg.CoverageInfo = &cfg +} + +func readEmbedCfg(file string) { + data, err := os.ReadFile(file) + if err != nil { + log.Fatalf("-embedcfg: %v", err) + } + if err := json.Unmarshal(data, &Flag.Cfg.Embed); err != nil { + log.Fatalf("%s: %v", file, err) + } + if Flag.Cfg.Embed.Patterns == nil { + log.Fatalf("%s: invalid embedcfg: missing Patterns", file) + } + if Flag.Cfg.Embed.Files == nil { + log.Fatalf("%s: invalid embedcfg: missing Files", file) + } +} + +// parseSpectre parses the spectre configuration from the string s. +func parseSpectre(s string) { + for _, f := range strings.Split(s, ",") { + f = strings.TrimSpace(f) + switch f { + default: + log.Fatalf("unknown setting -spectre=%s", f) + case "": + // nothing + case "all": + Flag.Cfg.SpectreIndex = true + Ctxt.Retpoline = true + case "index": + Flag.Cfg.SpectreIndex = true + case "ret": + Ctxt.Retpoline = true + } + } + + if Flag.Cfg.SpectreIndex { + switch buildcfg.GOARCH { + case "amd64": + // ok + default: + log.Fatalf("GOARCH=%s does not support -spectre=index", buildcfg.GOARCH) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/hashdebug.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/hashdebug.go new file mode 100644 index 0000000000000000000000000000000000000000..8342a5b9d93d3012ca614a4c25c4321e3126008d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/hashdebug.go @@ -0,0 +1,417 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package base + +import ( + "bytes" + "cmd/internal/obj" + "cmd/internal/src" + "fmt" + "internal/bisect" + "io" + "os" + "path/filepath" + "strconv" + "strings" + "sync" +) + +type hashAndMask struct { + // a hash h matches if (h^hash)&mask == 0 + hash uint64 + mask uint64 + name string // base name, or base name + "0", "1", etc. +} + +type HashDebug struct { + mu sync.Mutex // for logfile, posTmp, bytesTmp + name string // base name of the flag/variable. + // what file (if any) receives the yes/no logging? + // default is os.Stdout + logfile io.Writer + posTmp []src.Pos + bytesTmp bytes.Buffer + matches []hashAndMask // A hash matches if one of these matches. + excludes []hashAndMask // explicitly excluded hash suffixes + bisect *bisect.Matcher + fileSuffixOnly bool // for Pos hashes, remove the directory prefix. + inlineSuffixOnly bool // for Pos hashes, remove all but the most inline position. +} + +// SetInlineSuffixOnly controls whether hashing and reporting use the entire +// inline position, or just the most-inline suffix. Compiler debugging tends +// to want the whole inlining, debugging user problems (loopvarhash, e.g.) +// typically does not need to see the entire inline tree, there is just one +// copy of the source code. +func (d *HashDebug) SetInlineSuffixOnly(b bool) *HashDebug { + d.inlineSuffixOnly = b + return d +} + +// The default compiler-debugging HashDebug, for "-d=gossahash=..." +var hashDebug *HashDebug + +var FmaHash *HashDebug // for debugging fused-multiply-add floating point changes +var LoopVarHash *HashDebug // for debugging shared/private loop variable changes +var PGOHash *HashDebug // for debugging PGO optimization decisions + +// DebugHashMatchPkgFunc reports whether debug variable Gossahash +// +// 1. is empty (returns true; this is a special more-quickly implemented case of 4 below) +// +// 2. is "y" or "Y" (returns true) +// +// 3. is "n" or "N" (returns false) +// +// 4. does not explicitly exclude the sha1 hash of pkgAndName (see step 6) +// +// 5. is a suffix of the sha1 hash of pkgAndName (returns true) +// +// 6. OR +// if the (non-empty) value is in the regular language +// "(-[01]+/)+?([01]+(/[01]+)+?" +// (exclude..)(....include...) +// test the [01]+ exclude substrings, if any suffix-match, return false (4 above) +// test the [01]+ include substrings, if any suffix-match, return true +// The include substrings AFTER the first slash are numbered 0,1, etc and +// are named fmt.Sprintf("%s%d", varname, number) +// As an extra-special case for multiple failure search, +// an excludes-only string ending in a slash (terminated, not separated) +// implicitly specifies the include string "0/1", that is, match everything. +// (Exclude strings are used for automated search for multiple failures.) +// Clause 6 is not really intended for human use and only +// matters for failures that require multiple triggers. +// +// Otherwise it returns false. +// +// Unless Flags.Gossahash is empty, when DebugHashMatchPkgFunc returns true the message +// +// "%s triggered %s\n", varname, pkgAndName +// +// is printed on the file named in environment variable GSHS_LOGFILE, +// or standard out if that is empty. "Varname" is either the name of +// the variable or the name of the substring, depending on which matched. +// +// Typical use: +// +// 1. you make a change to the compiler, say, adding a new phase +// +// 2. it is broken in some mystifying way, for example, make.bash builds a broken +// compiler that almost works, but crashes compiling a test in run.bash. +// +// 3. add this guard to the code, which by default leaves it broken, but does not +// run the broken new code if Flags.Gossahash is non-empty and non-matching: +// +// if !base.DebugHashMatch(ir.PkgFuncName(fn)) { +// return nil // early exit, do nothing +// } +// +// 4. rebuild w/o the bad code, +// GOCOMPILEDEBUG=gossahash=n ./all.bash +// to verify that you put the guard in the right place with the right sense of the test. +// +// 5. use github.com/dr2chase/gossahash to search for the error: +// +// go install github.com/dr2chase/gossahash@latest +// +// gossahash -- +// +// for example: GOMAXPROCS=1 gossahash -- ./all.bash +// +// 6. gossahash should return a single function whose miscompilation +// causes the problem, and you can focus on that. +func DebugHashMatchPkgFunc(pkg, fn string) bool { + return hashDebug.MatchPkgFunc(pkg, fn, nil) +} + +func DebugHashMatchPos(pos src.XPos) bool { + return hashDebug.MatchPos(pos, nil) +} + +// HasDebugHash returns true if Flags.Gossahash is non-empty, which +// results in hashDebug being not-nil. I.e., if !HasDebugHash(), +// there is no need to create the string for hashing and testing. +func HasDebugHash() bool { + return hashDebug != nil +} + +// TODO: Delete when we switch to bisect-only. +func toHashAndMask(s, varname string) hashAndMask { + l := len(s) + if l > 64 { + s = s[l-64:] + l = 64 + } + m := ^(^uint64(0) << l) + h, err := strconv.ParseUint(s, 2, 64) + if err != nil { + Fatalf("Could not parse %s (=%s) as a binary number", varname, s) + } + + return hashAndMask{name: varname, hash: h, mask: m} +} + +// NewHashDebug returns a new hash-debug tester for the +// environment variable ev. If ev is not set, it returns +// nil, allowing a lightweight check for normal-case behavior. +func NewHashDebug(ev, s string, file io.Writer) *HashDebug { + if s == "" { + return nil + } + + hd := &HashDebug{name: ev, logfile: file} + if !strings.Contains(s, "/") { + m, err := bisect.New(s) + if err != nil { + Fatalf("%s: %v", ev, err) + } + hd.bisect = m + return hd + } + + // TODO: Delete remainder of function when we switch to bisect-only. + ss := strings.Split(s, "/") + // first remove any leading exclusions; these are preceded with "-" + i := 0 + for len(ss) > 0 { + s := ss[0] + if len(s) == 0 || len(s) > 0 && s[0] != '-' { + break + } + ss = ss[1:] + hd.excludes = append(hd.excludes, toHashAndMask(s[1:], fmt.Sprintf("%s%d", "HASH_EXCLUDE", i))) + i++ + } + // hash searches may use additional EVs with 0, 1, 2, ... suffixes. + i = 0 + for _, s := range ss { + if s == "" { + if i != 0 || len(ss) > 1 && ss[1] != "" || len(ss) > 2 { + Fatalf("Empty hash match string for %s should be first (and only) one", ev) + } + // Special case of should match everything. + hd.matches = append(hd.matches, toHashAndMask("0", fmt.Sprintf("%s0", ev))) + hd.matches = append(hd.matches, toHashAndMask("1", fmt.Sprintf("%s1", ev))) + break + } + if i == 0 { + hd.matches = append(hd.matches, toHashAndMask(s, fmt.Sprintf("%s", ev))) + } else { + hd.matches = append(hd.matches, toHashAndMask(s, fmt.Sprintf("%s%d", ev, i-1))) + } + i++ + } + return hd +} + +// TODO: Delete when we switch to bisect-only. +func (d *HashDebug) excluded(hash uint64) bool { + for _, m := range d.excludes { + if (m.hash^hash)&m.mask == 0 { + return true + } + } + return false +} + +// TODO: Delete when we switch to bisect-only. +func hashString(hash uint64) string { + hstr := "" + if hash == 0 { + hstr = "0" + } else { + for ; hash != 0; hash = hash >> 1 { + hstr = string('0'+byte(hash&1)) + hstr + } + } + if len(hstr) > 24 { + hstr = hstr[len(hstr)-24:] + } + return hstr +} + +// TODO: Delete when we switch to bisect-only. +func (d *HashDebug) match(hash uint64) *hashAndMask { + for i, m := range d.matches { + if (m.hash^hash)&m.mask == 0 { + return &d.matches[i] + } + } + return nil +} + +// MatchPkgFunc returns true if either the variable used to create d is +// unset, or if its value is y, or if it is a suffix of the base-two +// representation of the hash of pkg and fn. If the variable is not nil, +// then a true result is accompanied by stylized output to d.logfile, which +// is used for automated bug search. +func (d *HashDebug) MatchPkgFunc(pkg, fn string, note func() string) bool { + if d == nil { + return true + } + // Written this way to make inlining likely. + return d.matchPkgFunc(pkg, fn, note) +} + +func (d *HashDebug) matchPkgFunc(pkg, fn string, note func() string) bool { + hash := bisect.Hash(pkg, fn) + return d.matchAndLog(hash, func() string { return pkg + "." + fn }, note) +} + +// MatchPos is similar to MatchPkgFunc, but for hash computation +// it uses the source position including all inlining information instead of +// package name and path. +// Note that the default answer for no environment variable (d == nil) +// is "yes", do the thing. +func (d *HashDebug) MatchPos(pos src.XPos, desc func() string) bool { + if d == nil { + return true + } + // Written this way to make inlining likely. + return d.matchPos(Ctxt, pos, desc) +} + +func (d *HashDebug) matchPos(ctxt *obj.Link, pos src.XPos, note func() string) bool { + return d.matchPosWithInfo(ctxt, pos, nil, note) +} + +func (d *HashDebug) matchPosWithInfo(ctxt *obj.Link, pos src.XPos, info any, note func() string) bool { + hash := d.hashPos(ctxt, pos) + if info != nil { + hash = bisect.Hash(hash, info) + } + return d.matchAndLog(hash, + func() string { + r := d.fmtPos(ctxt, pos) + if info != nil { + r += fmt.Sprintf(" (%v)", info) + } + return r + }, + note) +} + +// MatchPosWithInfo is similar to MatchPos, but with additional information +// that is included for hash computation, so it can distinguish multiple +// matches on the same source location. +// Note that the default answer for no environment variable (d == nil) +// is "yes", do the thing. +func (d *HashDebug) MatchPosWithInfo(pos src.XPos, info any, desc func() string) bool { + if d == nil { + return true + } + // Written this way to make inlining likely. + return d.matchPosWithInfo(Ctxt, pos, info, desc) +} + +// matchAndLog is the core matcher. It reports whether the hash matches the pattern. +// If a report needs to be printed, match prints that report to the log file. +// The text func must be non-nil and should return a user-readable +// representation of what was hashed. The note func may be nil; if non-nil, +// it should return additional information to display to the user when this +// change is selected. +func (d *HashDebug) matchAndLog(hash uint64, text, note func() string) bool { + if d.bisect != nil { + enabled := d.bisect.ShouldEnable(hash) + if d.bisect.ShouldPrint(hash) { + disabled := "" + if !enabled { + disabled = " [DISABLED]" + } + var t string + if !d.bisect.MarkerOnly() { + t = text() + if note != nil { + if n := note(); n != "" { + t += ": " + n + disabled + disabled = "" + } + } + } + d.log(d.name, hash, strings.TrimSpace(t+disabled)) + } + return enabled + } + + // TODO: Delete rest of function body when we switch to bisect-only. + if d.excluded(hash) { + return false + } + if m := d.match(hash); m != nil { + d.log(m.name, hash, text()) + return true + } + return false +} + +// short returns the form of file name to use for d. +// The default is the full path, but fileSuffixOnly selects +// just the final path element. +func (d *HashDebug) short(name string) string { + if d.fileSuffixOnly { + return filepath.Base(name) + } + return name +} + +// hashPos returns a hash of the position pos, including its entire inline stack. +// If d.inlineSuffixOnly is true, hashPos only considers the innermost (leaf) position on the inline stack. +func (d *HashDebug) hashPos(ctxt *obj.Link, pos src.XPos) uint64 { + if d.inlineSuffixOnly { + p := ctxt.InnermostPos(pos) + return bisect.Hash(d.short(p.Filename()), p.Line(), p.Col()) + } + h := bisect.Hash() + ctxt.AllPos(pos, func(p src.Pos) { + h = bisect.Hash(h, d.short(p.Filename()), p.Line(), p.Col()) + }) + return h +} + +// fmtPos returns a textual formatting of the position pos, including its entire inline stack. +// If d.inlineSuffixOnly is true, fmtPos only considers the innermost (leaf) position on the inline stack. +func (d *HashDebug) fmtPos(ctxt *obj.Link, pos src.XPos) string { + format := func(p src.Pos) string { + return fmt.Sprintf("%s:%d:%d", d.short(p.Filename()), p.Line(), p.Col()) + } + if d.inlineSuffixOnly { + return format(ctxt.InnermostPos(pos)) + } + var stk []string + ctxt.AllPos(pos, func(p src.Pos) { + stk = append(stk, format(p)) + }) + return strings.Join(stk, "; ") +} + +// log prints a match with the given hash and textual formatting. +// TODO: Delete varname parameter when we switch to bisect-only. +func (d *HashDebug) log(varname string, hash uint64, text string) { + d.mu.Lock() + defer d.mu.Unlock() + + file := d.logfile + if file == nil { + if tmpfile := os.Getenv("GSHS_LOGFILE"); tmpfile != "" { + var err error + file, err = os.OpenFile(tmpfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) + if err != nil { + Fatalf("could not open hash-testing logfile %s", tmpfile) + return + } + } + if file == nil { + file = os.Stdout + } + d.logfile = file + } + + // Bisect output. + fmt.Fprintf(file, "%s %s\n", text, bisect.Marker(hash)) + + // Gossahash output. + // TODO: Delete rest of function when we switch to bisect-only. + fmt.Fprintf(file, "%s triggered %s %s\n", varname, text, hashString(hash)) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/hashdebug_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/hashdebug_test.go new file mode 100644 index 0000000000000000000000000000000000000000..62ef2ed4939d64bd4f8de5fc307b477a173a2bab --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/hashdebug_test.go @@ -0,0 +1,140 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package base + +import ( + "bytes" + "internal/bisect" + "strings" + "testing" +) + +func TestHashDebugGossahashY(t *testing.T) { + hd := NewHashDebug("GOSSAHASH", "y", new(bytes.Buffer)) + if hd == nil { + t.Errorf("NewHashDebug should not return nil for GOSSASHASH=y") + } + if !hd.MatchPkgFunc("anything", "anyfunc", nil) { + t.Errorf("NewHashDebug should return yes for everything for GOSSASHASH=y") + } +} + +func TestHashDebugGossahashN(t *testing.T) { + hd := NewHashDebug("GOSSAHASH", "n", new(bytes.Buffer)) + if hd == nil { + t.Errorf("NewHashDebug should not return nil for GOSSASHASH=n") + } + if hd.MatchPkgFunc("anything", "anyfunc", nil) { + t.Errorf("NewHashDebug should return no for everything for GOSSASHASH=n") + } +} + +func TestHashDebugGossahashEmpty(t *testing.T) { + hd := NewHashDebug("GOSSAHASH", "", nil) + if hd != nil { + t.Errorf("NewHashDebug should return nil for GOSSASHASH=\"\"") + } +} + +func TestHashDebugMagic(t *testing.T) { + hd := NewHashDebug("FOOXYZZY", "y", nil) + hd0 := NewHashDebug("FOOXYZZY0", "n", nil) + if hd == nil { + t.Errorf("NewHashDebug should have succeeded for FOOXYZZY") + } + if hd0 == nil { + t.Errorf("NewHashDebug should have succeeded for FOOXYZZY0") + } +} + +func TestHash(t *testing.T) { + h0 := bisect.Hash("bar", "0") + h1 := bisect.Hash("bar", "1") + t.Logf(`These values are used in other tests: Hash("bar", "0")=%#64b, Hash("bar", "1")=%#64b`, h0, h1) + if h0 == h1 { + t.Errorf("Hashes 0x%x and 0x%x should differ", h0, h1) + } +} + +func TestHashMatch(t *testing.T) { + b := new(bytes.Buffer) + hd := NewHashDebug("GOSSAHASH", "v1110", b) + check := hd.MatchPkgFunc("bar", "0", func() string { return "note" }) + msg := b.String() + t.Logf("message was '%s'", msg) + if !check { + t.Errorf("GOSSAHASH=1110 should have matched for 'bar', '0'") + } + wantPrefix(t, msg, "bar.0: note [bisect-match ") + wantContains(t, msg, "\nGOSSAHASH triggered bar.0: note ") +} + +func TestYMatch(t *testing.T) { + b := new(bytes.Buffer) + hd := NewHashDebug("GOSSAHASH", "vy", b) + check := hd.MatchPkgFunc("bar", "0", nil) + msg := b.String() + t.Logf("message was '%s'", msg) + if !check { + t.Errorf("GOSSAHASH=y should have matched for 'bar', '0'") + } + wantPrefix(t, msg, "bar.0 [bisect-match ") + wantContains(t, msg, "\nGOSSAHASH triggered bar.0 010100100011100101011110") +} + +func TestNMatch(t *testing.T) { + b := new(bytes.Buffer) + hd := NewHashDebug("GOSSAHASH", "vn", b) + check := hd.MatchPkgFunc("bar", "0", nil) + msg := b.String() + t.Logf("message was '%s'", msg) + if check { + t.Errorf("GOSSAHASH=n should NOT have matched for 'bar', '0'") + } + wantPrefix(t, msg, "bar.0 [DISABLED] [bisect-match ") + wantContains(t, msg, "\nGOSSAHASH triggered bar.0 [DISABLED] 010100100011100101011110") +} + +func TestHashNoMatch(t *testing.T) { + b := new(bytes.Buffer) + hd := NewHashDebug("GOSSAHASH", "01110", b) + check := hd.MatchPkgFunc("bar", "0", nil) + msg := b.String() + t.Logf("message was '%s'", msg) + if check { + t.Errorf("GOSSAHASH=001100 should NOT have matched for 'bar', '0'") + } + if msg != "" { + t.Errorf("Message should have been empty, instead %s", msg) + } + +} + +func TestHashSecondMatch(t *testing.T) { + b := new(bytes.Buffer) + hd := NewHashDebug("GOSSAHASH", "01110/11110", b) + + check := hd.MatchPkgFunc("bar", "0", nil) + msg := b.String() + t.Logf("message was '%s'", msg) + if !check { + t.Errorf("GOSSAHASH=001100, GOSSAHASH0=0011 should have matched for 'bar', '0'") + } + wantContains(t, msg, "\nGOSSAHASH0 triggered bar") +} + +func wantPrefix(t *testing.T, got, want string) { + t.Helper() + if !strings.HasPrefix(got, want) { + t.Errorf("want prefix %q, got:\n%s", want, got) + } +} + +func wantContains(t *testing.T, got, want string) { + t.Helper() + if !strings.Contains(got, want) { + t.Errorf("want contains %q, got:\n%s", want, got) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/link.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/link.go new file mode 100644 index 0000000000000000000000000000000000000000..d8aa5a7dccd209c4e17f758442a1427e1dd3d8b6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/link.go @@ -0,0 +1,53 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package base + +import ( + "cmd/internal/obj" +) + +// ReservedImports are import paths used internally for generated +// symbols by the compiler. +// +// The linker uses the magic symbol prefixes "go:" and "type:". +// Avoid potential confusion between import paths and symbols +// by rejecting these reserved imports for now. Also, people +// "can do weird things in GOPATH and we'd prefer they didn't +// do _that_ weird thing" (per rsc). See also #4257. +var ReservedImports = map[string]bool{ + "go": true, + "type": true, +} + +var Ctxt *obj.Link + +// TODO(mdempsky): These should probably be obj.Link methods. + +// PkgLinksym returns the linker symbol for name within the given +// package prefix. For user packages, prefix should be the package +// path encoded with objabi.PathToPrefix. +func PkgLinksym(prefix, name string, abi obj.ABI) *obj.LSym { + if name == "_" { + // TODO(mdempsky): Cleanup callers and Fatalf instead. + return linksym(prefix, "_", abi) + } + sep := "." + if ReservedImports[prefix] { + sep = ":" + } + return linksym(prefix, prefix+sep+name, abi) +} + +// Linkname returns the linker symbol for the given name as it might +// appear within a //go:linkname directive. +func Linkname(name string, abi obj.ABI) *obj.LSym { + return linksym("_", name, abi) +} + +// linksym is an internal helper function for implementing the above +// exported APIs. +func linksym(pkg, name string, abi obj.ABI) *obj.LSym { + return Ctxt.LookupABIInit(name, abi, func(r *obj.LSym) { r.Pkg = pkg }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/mapfile_mmap.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/mapfile_mmap.go new file mode 100644 index 0000000000000000000000000000000000000000..b66c9eb260ffe5fe41419bfd213a445d0a1258ae --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/mapfile_mmap.go @@ -0,0 +1,45 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris + +package base + +import ( + "internal/unsafeheader" + "os" + "runtime" + "syscall" + "unsafe" +) + +// TODO(mdempsky): Is there a higher-level abstraction that still +// works well for iimport? + +// MapFile returns length bytes from the file starting at the +// specified offset as a string. +func MapFile(f *os.File, offset, length int64) (string, error) { + // POSIX mmap: "The implementation may require that off is a + // multiple of the page size." + x := offset & int64(os.Getpagesize()-1) + offset -= x + length += x + + buf, err := syscall.Mmap(int(f.Fd()), offset, int(length), syscall.PROT_READ, syscall.MAP_SHARED) + runtime.KeepAlive(f) + if err != nil { + return "", err + } + + buf = buf[x:] + pSlice := (*unsafeheader.Slice)(unsafe.Pointer(&buf)) + + var res string + pString := (*unsafeheader.String)(unsafe.Pointer(&res)) + + pString.Data = pSlice.Data + pString.Len = pSlice.Len + + return res, nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/mapfile_read.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/mapfile_read.go new file mode 100644 index 0000000000000000000000000000000000000000..783f8c460281008b3f33366de2a00b23a7fdf824 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/mapfile_read.go @@ -0,0 +1,21 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris + +package base + +import ( + "io" + "os" +) + +func MapFile(f *os.File, offset, length int64) (string, error) { + buf := make([]byte, length) + _, err := io.ReadFull(io.NewSectionReader(f, offset, length), buf) + if err != nil { + return "", err + } + return string(buf), nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/print.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/print.go new file mode 100644 index 0000000000000000000000000000000000000000..cc36acec4b65080a2bb2959110e88056cbc8c68a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/print.go @@ -0,0 +1,283 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package base + +import ( + "fmt" + "internal/buildcfg" + "internal/types/errors" + "os" + "runtime/debug" + "sort" + "strings" + + "cmd/internal/src" +) + +// An errorMsg is a queued error message, waiting to be printed. +type errorMsg struct { + pos src.XPos + msg string + code errors.Code +} + +// Pos is the current source position being processed, +// printed by Errorf, ErrorfLang, Fatalf, and Warnf. +var Pos src.XPos + +var ( + errorMsgs []errorMsg + numErrors int // number of entries in errorMsgs that are errors (as opposed to warnings) + numSyntaxErrors int +) + +// Errors returns the number of errors reported. +func Errors() int { + return numErrors +} + +// SyntaxErrors returns the number of syntax errors reported. +func SyntaxErrors() int { + return numSyntaxErrors +} + +// addErrorMsg adds a new errorMsg (which may be a warning) to errorMsgs. +func addErrorMsg(pos src.XPos, code errors.Code, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + // Only add the position if know the position. + // See issue golang.org/issue/11361. + if pos.IsKnown() { + msg = fmt.Sprintf("%v: %s", FmtPos(pos), msg) + } + errorMsgs = append(errorMsgs, errorMsg{ + pos: pos, + msg: msg + "\n", + code: code, + }) +} + +// FmtPos formats pos as a file:line string. +func FmtPos(pos src.XPos) string { + if Ctxt == nil { + return "???" + } + return Ctxt.OutermostPos(pos).Format(Flag.C == 0, Flag.L == 1) +} + +// byPos sorts errors by source position. +type byPos []errorMsg + +func (x byPos) Len() int { return len(x) } +func (x byPos) Less(i, j int) bool { return x[i].pos.Before(x[j].pos) } +func (x byPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +// FlushErrors sorts errors seen so far by line number, prints them to stdout, +// and empties the errors array. +func FlushErrors() { + if Ctxt != nil && Ctxt.Bso != nil { + Ctxt.Bso.Flush() + } + if len(errorMsgs) == 0 { + return + } + sort.Stable(byPos(errorMsgs)) + for i, err := range errorMsgs { + if i == 0 || err.msg != errorMsgs[i-1].msg { + fmt.Print(err.msg) + } + } + errorMsgs = errorMsgs[:0] +} + +// lasterror keeps track of the most recently issued error, +// to avoid printing multiple error messages on the same line. +var lasterror struct { + syntax src.XPos // source position of last syntax error + other src.XPos // source position of last non-syntax error + msg string // error message of last non-syntax error +} + +// sameline reports whether two positions a, b are on the same line. +func sameline(a, b src.XPos) bool { + p := Ctxt.PosTable.Pos(a) + q := Ctxt.PosTable.Pos(b) + return p.Base() == q.Base() && p.Line() == q.Line() +} + +// Errorf reports a formatted error at the current line. +func Errorf(format string, args ...interface{}) { + ErrorfAt(Pos, 0, format, args...) +} + +// ErrorfAt reports a formatted error message at pos. +func ErrorfAt(pos src.XPos, code errors.Code, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + + if strings.HasPrefix(msg, "syntax error") { + numSyntaxErrors++ + // only one syntax error per line, no matter what error + if sameline(lasterror.syntax, pos) { + return + } + lasterror.syntax = pos + } else { + // only one of multiple equal non-syntax errors per line + // (FlushErrors shows only one of them, so we filter them + // here as best as we can (they may not appear in order) + // so that we don't count them here and exit early, and + // then have nothing to show for.) + if sameline(lasterror.other, pos) && lasterror.msg == msg { + return + } + lasterror.other = pos + lasterror.msg = msg + } + + addErrorMsg(pos, code, "%s", msg) + numErrors++ + + hcrash() + if numErrors >= 10 && Flag.LowerE == 0 { + FlushErrors() + fmt.Printf("%v: too many errors\n", FmtPos(pos)) + ErrorExit() + } +} + +// UpdateErrorDot is a clumsy hack that rewrites the last error, +// if it was "LINE: undefined: NAME", to be "LINE: undefined: NAME in EXPR". +// It is used to give better error messages for dot (selector) expressions. +func UpdateErrorDot(line string, name, expr string) { + if len(errorMsgs) == 0 { + return + } + e := &errorMsgs[len(errorMsgs)-1] + if strings.HasPrefix(e.msg, line) && e.msg == fmt.Sprintf("%v: undefined: %v\n", line, name) { + e.msg = fmt.Sprintf("%v: undefined: %v in %v\n", line, name, expr) + } +} + +// Warn reports a formatted warning at the current line. +// In general the Go compiler does NOT generate warnings, +// so this should be used only when the user has opted in +// to additional output by setting a particular flag. +func Warn(format string, args ...interface{}) { + WarnfAt(Pos, format, args...) +} + +// WarnfAt reports a formatted warning at pos. +// In general the Go compiler does NOT generate warnings, +// so this should be used only when the user has opted in +// to additional output by setting a particular flag. +func WarnfAt(pos src.XPos, format string, args ...interface{}) { + addErrorMsg(pos, 0, format, args...) + if Flag.LowerM != 0 { + FlushErrors() + } +} + +// Fatalf reports a fatal error - an internal problem - at the current line and exits. +// If other errors have already been printed, then Fatalf just quietly exits. +// (The internal problem may have been caused by incomplete information +// after the already-reported errors, so best to let users fix those and +// try again without being bothered about a spurious internal error.) +// +// But if no errors have been printed, or if -d panic has been specified, +// Fatalf prints the error as an "internal compiler error". In a released build, +// it prints an error asking to file a bug report. In development builds, it +// prints a stack trace. +// +// If -h has been specified, Fatalf panics to force the usual runtime info dump. +func Fatalf(format string, args ...interface{}) { + FatalfAt(Pos, format, args...) +} + +// FatalfAt reports a fatal error - an internal problem - at pos and exits. +// If other errors have already been printed, then FatalfAt just quietly exits. +// (The internal problem may have been caused by incomplete information +// after the already-reported errors, so best to let users fix those and +// try again without being bothered about a spurious internal error.) +// +// But if no errors have been printed, or if -d panic has been specified, +// FatalfAt prints the error as an "internal compiler error". In a released build, +// it prints an error asking to file a bug report. In development builds, it +// prints a stack trace. +// +// If -h has been specified, FatalfAt panics to force the usual runtime info dump. +func FatalfAt(pos src.XPos, format string, args ...interface{}) { + FlushErrors() + + if Debug.Panic != 0 || numErrors == 0 { + fmt.Printf("%v: internal compiler error: ", FmtPos(pos)) + fmt.Printf(format, args...) + fmt.Printf("\n") + + // If this is a released compiler version, ask for a bug report. + if Debug.Panic == 0 && strings.HasPrefix(buildcfg.Version, "go") { + fmt.Printf("\n") + fmt.Printf("Please file a bug report including a short program that triggers the error.\n") + fmt.Printf("https://go.dev/issue/new\n") + } else { + // Not a release; dump a stack trace, too. + fmt.Println() + os.Stdout.Write(debug.Stack()) + fmt.Println() + } + } + + hcrash() + ErrorExit() +} + +// Assert reports "assertion failed" with Fatalf, unless b is true. +func Assert(b bool) { + if !b { + Fatalf("assertion failed") + } +} + +// Assertf reports a fatal error with Fatalf, unless b is true. +func Assertf(b bool, format string, args ...interface{}) { + if !b { + Fatalf(format, args...) + } +} + +// AssertfAt reports a fatal error with FatalfAt, unless b is true. +func AssertfAt(b bool, pos src.XPos, format string, args ...interface{}) { + if !b { + FatalfAt(pos, format, args...) + } +} + +// hcrash crashes the compiler when -h is set, to find out where a message is generated. +func hcrash() { + if Flag.LowerH != 0 { + FlushErrors() + if Flag.LowerO != "" { + os.Remove(Flag.LowerO) + } + panic("-h") + } +} + +// ErrorExit handles an error-status exit. +// It flushes any pending errors, removes the output file, and exits. +func ErrorExit() { + FlushErrors() + if Flag.LowerO != "" { + os.Remove(Flag.LowerO) + } + os.Exit(2) +} + +// ExitIfErrors calls ErrorExit if any errors have been reported. +func ExitIfErrors() { + if Errors() > 0 { + ErrorExit() + } +} + +var AutogeneratedPos src.XPos diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/timings.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/timings.go new file mode 100644 index 0000000000000000000000000000000000000000..f48ac93699b816a3f43de3e527a45350f74bb2ba --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/base/timings.go @@ -0,0 +1,237 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package base + +import ( + "fmt" + "io" + "strings" + "time" +) + +var Timer Timings + +// Timings collects the execution times of labeled phases +// which are added through a sequence of Start/Stop calls. +// Events may be associated with each phase via AddEvent. +type Timings struct { + list []timestamp + events map[int][]*event // lazily allocated +} + +type timestamp struct { + time time.Time + label string + start bool +} + +type event struct { + size int64 // count or amount of data processed (allocations, data size, lines, funcs, ...) + unit string // unit of size measure (count, MB, lines, funcs, ...) +} + +func (t *Timings) append(labels []string, start bool) { + t.list = append(t.list, timestamp{time.Now(), strings.Join(labels, ":"), start}) +} + +// Start marks the beginning of a new phase and implicitly stops the previous phase. +// The phase name is the colon-separated concatenation of the labels. +func (t *Timings) Start(labels ...string) { + t.append(labels, true) +} + +// Stop marks the end of a phase and implicitly starts a new phase. +// The labels are added to the labels of the ended phase. +func (t *Timings) Stop(labels ...string) { + t.append(labels, false) +} + +// AddEvent associates an event, i.e., a count, or an amount of data, +// with the most recently started or stopped phase; or the very first +// phase if Start or Stop hasn't been called yet. The unit specifies +// the unit of measurement (e.g., MB, lines, no. of funcs, etc.). +func (t *Timings) AddEvent(size int64, unit string) { + m := t.events + if m == nil { + m = make(map[int][]*event) + t.events = m + } + i := len(t.list) + if i > 0 { + i-- + } + m[i] = append(m[i], &event{size, unit}) +} + +// Write prints the phase times to w. +// The prefix is printed at the start of each line. +func (t *Timings) Write(w io.Writer, prefix string) { + if len(t.list) > 0 { + var lines lines + + // group of phases with shared non-empty label prefix + var group struct { + label string // label prefix + tot time.Duration // accumulated phase time + size int // number of phases collected in group + } + + // accumulated time between Stop/Start timestamps + var unaccounted time.Duration + + // process Start/Stop timestamps + pt := &t.list[0] // previous timestamp + tot := t.list[len(t.list)-1].time.Sub(pt.time) + for i := 1; i < len(t.list); i++ { + qt := &t.list[i] // current timestamp + dt := qt.time.Sub(pt.time) + + var label string + var events []*event + if pt.start { + // previous phase started + label = pt.label + events = t.events[i-1] + if qt.start { + // start implicitly ended previous phase; nothing to do + } else { + // stop ended previous phase; append stop labels, if any + if qt.label != "" { + label += ":" + qt.label + } + // events associated with stop replace prior events + if e := t.events[i]; e != nil { + events = e + } + } + } else { + // previous phase stopped + if qt.start { + // between a stopped and started phase; unaccounted time + unaccounted += dt + } else { + // previous stop implicitly started current phase + label = qt.label + events = t.events[i] + } + } + if label != "" { + // add phase to existing group, or start a new group + l := commonPrefix(group.label, label) + if group.size == 1 && l != "" || group.size > 1 && l == group.label { + // add to existing group + group.label = l + group.tot += dt + group.size++ + } else { + // start a new group + if group.size > 1 { + lines.add(prefix+group.label+"subtotal", 1, group.tot, tot, nil) + } + group.label = label + group.tot = dt + group.size = 1 + } + + // write phase + lines.add(prefix+label, 1, dt, tot, events) + } + + pt = qt + } + + if group.size > 1 { + lines.add(prefix+group.label+"subtotal", 1, group.tot, tot, nil) + } + + if unaccounted != 0 { + lines.add(prefix+"unaccounted", 1, unaccounted, tot, nil) + } + + lines.add(prefix+"total", 1, tot, tot, nil) + + lines.write(w) + } +} + +func commonPrefix(a, b string) string { + i := 0 + for i < len(a) && i < len(b) && a[i] == b[i] { + i++ + } + return a[:i] +} + +type lines [][]string + +func (lines *lines) add(label string, n int, dt, tot time.Duration, events []*event) { + var line []string + add := func(format string, args ...interface{}) { + line = append(line, fmt.Sprintf(format, args...)) + } + + add("%s", label) + add(" %d", n) + add(" %d ns/op", dt) + add(" %.2f %%", float64(dt)/float64(tot)*100) + + for _, e := range events { + add(" %d", e.size) + add(" %s", e.unit) + add(" %d", int64(float64(e.size)/dt.Seconds()+0.5)) + add(" %s/s", e.unit) + } + + *lines = append(*lines, line) +} + +func (lines lines) write(w io.Writer) { + // determine column widths and contents + var widths []int + var number []bool + for _, line := range lines { + for i, col := range line { + if i < len(widths) { + if len(col) > widths[i] { + widths[i] = len(col) + } + } else { + widths = append(widths, len(col)) + number = append(number, isnumber(col)) // first line determines column contents + } + } + } + + // make column widths a multiple of align for more stable output + const align = 1 // set to a value > 1 to enable + if align > 1 { + for i, w := range widths { + w += align - 1 + widths[i] = w - w%align + } + } + + // print lines taking column widths and contents into account + for _, line := range lines { + for i, col := range line { + format := "%-*s" + if number[i] { + format = "%*s" // numbers are right-aligned + } + fmt.Fprintf(w, format, widths[i], col) + } + fmt.Fprintln(w) + } +} + +func isnumber(s string) bool { + for _, ch := range s { + if ch <= ' ' { + continue // ignore leading whitespace + } + return '0' <= ch && ch <= '9' || ch == '.' || ch == '-' || ch == '+' + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/bitvec/bv.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/bitvec/bv.go new file mode 100644 index 0000000000000000000000000000000000000000..ad7ed0a1965e9c064e2dddf0f19125dfd4fe7ed4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/bitvec/bv.go @@ -0,0 +1,201 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bitvec + +import ( + "math/bits" + + "cmd/compile/internal/base" +) + +const ( + wordBits = 32 + wordMask = wordBits - 1 + wordShift = 5 +) + +// A BitVec is a bit vector. +type BitVec struct { + N int32 // number of bits in vector + B []uint32 // words holding bits +} + +func New(n int32) BitVec { + nword := (n + wordBits - 1) / wordBits + return BitVec{n, make([]uint32, nword)} +} + +type Bulk struct { + words []uint32 + nbit int32 + nword int32 +} + +func NewBulk(nbit int32, count int32) Bulk { + nword := (nbit + wordBits - 1) / wordBits + size := int64(nword) * int64(count) + if int64(int32(size*4)) != size*4 { + base.Fatalf("NewBulk too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size) + } + return Bulk{ + words: make([]uint32, size), + nbit: nbit, + nword: nword, + } +} + +func (b *Bulk) Next() BitVec { + out := BitVec{b.nbit, b.words[:b.nword]} + b.words = b.words[b.nword:] + return out +} + +func (bv1 BitVec) Eq(bv2 BitVec) bool { + if bv1.N != bv2.N { + base.Fatalf("bvequal: lengths %d and %d are not equal", bv1.N, bv2.N) + } + for i, x := range bv1.B { + if x != bv2.B[i] { + return false + } + } + return true +} + +func (dst BitVec) Copy(src BitVec) { + copy(dst.B, src.B) +} + +func (bv BitVec) Get(i int32) bool { + if i < 0 || i >= bv.N { + base.Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.N) + } + mask := uint32(1 << uint(i%wordBits)) + return bv.B[i>>wordShift]&mask != 0 +} + +func (bv BitVec) Set(i int32) { + if i < 0 || i >= bv.N { + base.Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.N) + } + mask := uint32(1 << uint(i%wordBits)) + bv.B[i/wordBits] |= mask +} + +func (bv BitVec) Unset(i int32) { + if i < 0 || i >= bv.N { + base.Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.N) + } + mask := uint32(1 << uint(i%wordBits)) + bv.B[i/wordBits] &^= mask +} + +// bvnext returns the smallest index >= i for which bvget(bv, i) == 1. +// If there is no such index, bvnext returns -1. +func (bv BitVec) Next(i int32) int32 { + if i >= bv.N { + return -1 + } + + // Jump i ahead to next word with bits. + if bv.B[i>>wordShift]>>uint(i&wordMask) == 0 { + i &^= wordMask + i += wordBits + for i < bv.N && bv.B[i>>wordShift] == 0 { + i += wordBits + } + } + + if i >= bv.N { + return -1 + } + + // Find 1 bit. + w := bv.B[i>>wordShift] >> uint(i&wordMask) + i += int32(bits.TrailingZeros32(w)) + + return i +} + +func (bv BitVec) IsEmpty() bool { + for _, x := range bv.B { + if x != 0 { + return false + } + } + return true +} + +func (bv BitVec) Count() int { + n := 0 + for _, x := range bv.B { + n += bits.OnesCount32(x) + } + return n +} + +func (bv BitVec) Not() { + for i, x := range bv.B { + bv.B[i] = ^x + } + if bv.N%wordBits != 0 { + bv.B[len(bv.B)-1] &= 1< 1 { + align := t.Alignment() + if off := t.Field(start).Offset; off&(align-1) != 0 { + // Offset is less aligned than the containing type. + // Use offset to determine alignment. + align = 1 << uint(bits.TrailingZeros64(uint64(off))) + } + size := t.Field(next).End() - t.Field(start).Offset + if size > align { + break + } + } + } + return t.Field(next-1).End() - t.Field(start).Offset, next +} + +// EqCanPanic reports whether == on type t could panic (has an interface somewhere). +// t must be comparable. +func EqCanPanic(t *types.Type) bool { + switch t.Kind() { + default: + return false + case types.TINTER: + return true + case types.TARRAY: + return EqCanPanic(t.Elem()) + case types.TSTRUCT: + for _, f := range t.Fields() { + if !f.Sym.IsBlank() && EqCanPanic(f.Type) { + return true + } + } + return false + } +} + +// EqStructCost returns the cost of an equality comparison of two structs. +// +// The cost is determined using an algorithm which takes into consideration +// the size of the registers in the current architecture and the size of the +// memory-only fields in the struct. +func EqStructCost(t *types.Type) int64 { + cost := int64(0) + + for i, fields := 0, t.Fields(); i < len(fields); { + f := fields[i] + + // Skip blank-named fields. + if f.Sym.IsBlank() { + i++ + continue + } + + n, _, next := eqStructFieldCost(t, i) + + cost += n + i = next + } + + return cost +} + +// eqStructFieldCost returns the cost of an equality comparison of two struct fields. +// t is the parent struct type, and i is the index of the field in the parent struct type. +// eqStructFieldCost may compute the cost of several adjacent fields at once. It returns +// the cost, the size of the set of fields it computed the cost for (in bytes), and the +// index of the first field not part of the set of fields for which the cost +// has already been calculated. +func eqStructFieldCost(t *types.Type, i int) (int64, int64, int) { + var ( + cost = int64(0) + regSize = int64(types.RegSize) + + size int64 + next int + ) + + if base.Ctxt.Arch.CanMergeLoads { + // If we can merge adjacent loads then we can calculate the cost of the + // comparison using the size of the memory run and the size of the registers. + size, next = Memrun(t, i) + cost = size / regSize + if size%regSize != 0 { + cost++ + } + return cost, size, next + } + + // If we cannot merge adjacent loads then we have to use the size of the + // field and take into account the type to determine how many loads and compares + // are needed. + ft := t.Field(i).Type + size = ft.Size() + next = i + 1 + + return calculateCostForType(ft), size, next +} + +func calculateCostForType(t *types.Type) int64 { + var cost int64 + switch t.Kind() { + case types.TSTRUCT: + return EqStructCost(t) + case types.TSLICE: + // Slices are not comparable. + base.Fatalf("calculateCostForType: unexpected slice type") + case types.TARRAY: + elemCost := calculateCostForType(t.Elem()) + cost = t.NumElem() * elemCost + case types.TSTRING, types.TINTER, types.TCOMPLEX64, types.TCOMPLEX128: + cost = 2 + case types.TINT64, types.TUINT64: + cost = 8 / int64(types.RegSize) + default: + cost = 1 + } + return cost +} + +// EqStruct compares two structs np and nq for equality. +// It works by building a list of boolean conditions to satisfy. +// Conditions must be evaluated in the returned order and +// properly short-circuited by the caller. +// The first return value is the flattened list of conditions, +// the second value is a boolean indicating whether any of the +// comparisons could panic. +func EqStruct(t *types.Type, np, nq ir.Node) ([]ir.Node, bool) { + // The conditions are a list-of-lists. Conditions are reorderable + // within each inner list. The outer lists must be evaluated in order. + var conds [][]ir.Node + conds = append(conds, []ir.Node{}) + and := func(n ir.Node) { + i := len(conds) - 1 + conds[i] = append(conds[i], n) + } + + // Walk the struct using memequal for runs of AMEM + // and calling specific equality tests for the others. + for i, fields := 0, t.Fields(); i < len(fields); { + f := fields[i] + + // Skip blank-named fields. + if f.Sym.IsBlank() { + i++ + continue + } + + typeCanPanic := EqCanPanic(f.Type) + + // Compare non-memory fields with field equality. + if !IsRegularMemory(f.Type) { + if typeCanPanic { + // Enforce ordering by starting a new set of reorderable conditions. + conds = append(conds, []ir.Node{}) + } + switch { + case f.Type.IsString(): + p := typecheck.DotField(base.Pos, typecheck.Expr(np), i) + q := typecheck.DotField(base.Pos, typecheck.Expr(nq), i) + eqlen, eqmem := EqString(p, q) + and(eqlen) + and(eqmem) + default: + and(eqfield(np, nq, i)) + } + if typeCanPanic { + // Also enforce ordering after something that can panic. + conds = append(conds, []ir.Node{}) + } + i++ + continue + } + + cost, size, next := eqStructFieldCost(t, i) + if cost <= 4 { + // Cost of 4 or less: use plain field equality. + for j := i; j < next; j++ { + and(eqfield(np, nq, j)) + } + } else { + // Higher cost: use memequal. + cc := eqmem(np, nq, i, size) + and(cc) + } + i = next + } + + // Sort conditions to put runtime calls last. + // Preserve the rest of the ordering. + var flatConds []ir.Node + for _, c := range conds { + isCall := func(n ir.Node) bool { + return n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC + } + sort.SliceStable(c, func(i, j int) bool { + return !isCall(c[i]) && isCall(c[j]) + }) + flatConds = append(flatConds, c...) + } + return flatConds, len(conds) > 1 +} + +// EqString returns the nodes +// +// len(s) == len(t) +// +// and +// +// memequal(s.ptr, t.ptr, len(s)) +// +// which can be used to construct string equality comparison. +// eqlen must be evaluated before eqmem, and shortcircuiting is required. +func EqString(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) { + s = typecheck.Conv(s, types.Types[types.TSTRING]) + t = typecheck.Conv(t, types.Types[types.TSTRING]) + sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, s) + tptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, t) + slen := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, s), types.Types[types.TUINTPTR]) + tlen := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, t), types.Types[types.TUINTPTR]) + + // Pick the 3rd arg to memequal. Both slen and tlen are fine to use, because we short + // circuit the memequal call if they aren't the same. But if one is a constant some + // memequal optimizations are easier to apply. + probablyConstant := func(n ir.Node) bool { + if n.Op() == ir.OCONVNOP { + n = n.(*ir.ConvExpr).X + } + if n.Op() == ir.OLITERAL { + return true + } + if n.Op() != ir.ONAME { + return false + } + name := n.(*ir.Name) + if name.Class != ir.PAUTO { + return false + } + if def := name.Defn; def == nil { + // n starts out as the empty string + return true + } else if def.Op() == ir.OAS && (def.(*ir.AssignStmt).Y == nil || def.(*ir.AssignStmt).Y.Op() == ir.OLITERAL) { + // n starts out as a constant string + return true + } + return false + } + cmplen := slen + if probablyConstant(t) && !probablyConstant(s) { + cmplen = tlen + } + + fn := typecheck.LookupRuntime("memequal", types.Types[types.TUINT8], types.Types[types.TUINT8]) + call := typecheck.Call(base.Pos, fn, []ir.Node{sptr, tptr, ir.Copy(cmplen)}, false).(*ir.CallExpr) + + cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, slen, tlen) + cmp = typecheck.Expr(cmp).(*ir.BinaryExpr) + cmp.SetType(types.Types[types.TBOOL]) + return cmp, call +} + +// EqInterface returns the nodes +// +// s.tab == t.tab (or s.typ == t.typ, as appropriate) +// +// and +// +// ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate) +// +// which can be used to construct interface equality comparison. +// eqtab must be evaluated before eqdata, and shortcircuiting is required. +func EqInterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) { + if !types.Identical(s.Type(), t.Type()) { + base.Fatalf("EqInterface %v %v", s.Type(), t.Type()) + } + // func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool) + // func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool) + var fn ir.Node + if s.Type().IsEmptyInterface() { + fn = typecheck.LookupRuntime("efaceeq") + } else { + fn = typecheck.LookupRuntime("ifaceeq") + } + + stab := ir.NewUnaryExpr(base.Pos, ir.OITAB, s) + ttab := ir.NewUnaryExpr(base.Pos, ir.OITAB, t) + sdata := ir.NewUnaryExpr(base.Pos, ir.OIDATA, s) + tdata := ir.NewUnaryExpr(base.Pos, ir.OIDATA, t) + sdata.SetType(types.Types[types.TUNSAFEPTR]) + tdata.SetType(types.Types[types.TUNSAFEPTR]) + sdata.SetTypecheck(1) + tdata.SetTypecheck(1) + + call := typecheck.Call(base.Pos, fn, []ir.Node{stab, sdata, tdata}, false).(*ir.CallExpr) + + cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, stab, ttab) + cmp = typecheck.Expr(cmp).(*ir.BinaryExpr) + cmp.SetType(types.Types[types.TBOOL]) + return cmp, call +} + +// eqfield returns the node +// +// p.field == q.field +func eqfield(p, q ir.Node, field int) ir.Node { + nx := typecheck.DotField(base.Pos, typecheck.Expr(p), field) + ny := typecheck.DotField(base.Pos, typecheck.Expr(q), field) + return typecheck.Expr(ir.NewBinaryExpr(base.Pos, ir.OEQ, nx, ny)) +} + +// eqmem returns the node +// +// memequal(&p.field, &q.field, size) +func eqmem(p, q ir.Node, field int, size int64) ir.Node { + nx := typecheck.Expr(typecheck.NodAddr(typecheck.DotField(base.Pos, p, field))) + ny := typecheck.Expr(typecheck.NodAddr(typecheck.DotField(base.Pos, q, field))) + + fn, needsize := eqmemfunc(size, nx.Type().Elem()) + call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil) + call.Args.Append(nx) + call.Args.Append(ny) + if needsize { + call.Args.Append(ir.NewInt(base.Pos, size)) + } + + return call +} + +func eqmemfunc(size int64, t *types.Type) (fn *ir.Name, needsize bool) { + if !base.Ctxt.Arch.CanMergeLoads && t.Alignment() < int64(base.Ctxt.Arch.Alignment) && t.Alignment() < t.Size() { + // We can't use larger comparisons if the value might not be aligned + // enough for the larger comparison. See issues 46283 and 67160. + size = 0 + } + switch size { + case 1, 2, 4, 8, 16: + buf := fmt.Sprintf("memequal%d", int(size)*8) + return typecheck.LookupRuntime(buf, t, t), false + } + + return typecheck.LookupRuntime("memequal", t, t), true +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/compare/compare_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/compare/compare_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2f761655094ff4560c723a1e9c3de040692c2ff3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/compare/compare_test.go @@ -0,0 +1,101 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package compare + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/src" + "cmd/internal/sys" + "testing" +) + +type typefn func() *types.Type + +func init() { + // These are the few constants that need to be initialized in order to use + // the types package without using the typecheck package by calling + // typecheck.InitUniverse() (the normal way to initialize the types package). + types.PtrSize = 8 + types.RegSize = 8 + types.MaxWidth = 1 << 50 + typecheck.InitUniverse() + base.Ctxt = &obj.Link{Arch: &obj.LinkArch{Arch: &sys.Arch{Alignment: 1, CanMergeLoads: true}}} +} + +func TestEqStructCost(t *testing.T) { + repeat := func(n int, typ *types.Type) []*types.Type { + typs := make([]*types.Type, n) + for i := range typs { + typs[i] = typ + } + return typs + } + + tt := []struct { + name string + cost int64 + nonMergeLoadCost int64 + fieldTypes []*types.Type + }{ + {"struct without fields", 0, 0, nil}, + {"struct with 1 byte field", 1, 1, repeat(1, types.ByteType)}, + {"struct with 8 byte fields", 1, 8, repeat(8, types.ByteType)}, + {"struct with 16 byte fields", 2, 16, repeat(16, types.ByteType)}, + {"struct with 32 byte fields", 4, 32, repeat(32, types.ByteType)}, + {"struct with 2 int32 fields", 1, 2, repeat(2, types.Types[types.TINT32])}, + {"struct with 2 int32 fields and 1 int64", 2, 3, + []*types.Type{ + types.Types[types.TINT32], + types.Types[types.TINT32], + types.Types[types.TINT64], + }, + }, + {"struct with 1 int field and 1 string", 3, 3, + []*types.Type{ + types.Types[types.TINT64], + types.Types[types.TSTRING], + }, + }, + {"struct with 2 strings", 4, 4, repeat(2, types.Types[types.TSTRING])}, + {"struct with 1 large byte array field", 26, 101, + []*types.Type{ + types.NewArray(types.Types[types.TUINT16], 101), + }, + }, + {"struct with string array field", 4, 4, + []*types.Type{ + types.NewArray(types.Types[types.TSTRING], 2), + }, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + fields := make([]*types.Field, len(tc.fieldTypes)) + for i, ftyp := range tc.fieldTypes { + fields[i] = types.NewField(src.NoXPos, typecheck.LookupNum("f", i), ftyp) + } + typ := types.NewStruct(fields) + types.CalcSize(typ) + + want := tc.cost + base.Ctxt.Arch.CanMergeLoads = true + actual := EqStructCost(typ) + if actual != want { + t.Errorf("CanMergeLoads=true EqStructCost(%v) = %d, want %d", typ, actual, want) + } + + base.Ctxt.Arch.CanMergeLoads = false + want = tc.nonMergeLoadCost + actual = EqStructCost(typ) + if actual != want { + t.Errorf("CanMergeLoads=false EqStructCost(%v) = %d, want %d", typ, actual, want) + } + }) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/coverage/cover.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/coverage/cover.go new file mode 100644 index 0000000000000000000000000000000000000000..5320f004da60bddeb417ea853befe36f5a8aeb4b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/coverage/cover.go @@ -0,0 +1,200 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package coverage + +// This package contains support routines for coverage "fixup" in the +// compiler, which happens when compiling a package whose source code +// has been run through "cmd/cover" to add instrumentation. The two +// important entry points are FixupVars (called prior to package init +// generation) and FixupInit (called following package init +// generation). + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/objabi" + "internal/coverage" + "strconv" + "strings" +) + +// names records state information collected in the first fixup +// phase so that it can be passed to the second fixup phase. +type names struct { + MetaVar *ir.Name + PkgIdVar *ir.Name + InitFn *ir.Func + CounterMode coverage.CounterMode + CounterGran coverage.CounterGranularity +} + +// Fixup adds calls to the pkg init function as appropriate to +// register coverage-related variables with the runtime. +// +// It also reclassifies selected variables (for example, tagging +// coverage counter variables with flags so that they can be handled +// properly downstream). +func Fixup() { + if base.Flag.Cfg.CoverageInfo == nil { + return // not using coverage + } + + metaVarName := base.Flag.Cfg.CoverageInfo.MetaVar + pkgIdVarName := base.Flag.Cfg.CoverageInfo.PkgIdVar + counterMode := base.Flag.Cfg.CoverageInfo.CounterMode + counterGran := base.Flag.Cfg.CoverageInfo.CounterGranularity + counterPrefix := base.Flag.Cfg.CoverageInfo.CounterPrefix + var metavar *ir.Name + var pkgidvar *ir.Name + + ckTypSanity := func(nm *ir.Name, tag string) { + if nm.Type() == nil || nm.Type().HasPointers() { + base.Fatalf("unsuitable %s %q mentioned in coveragecfg, improper type '%v'", tag, nm.Sym().Name, nm.Type()) + } + } + + for _, nm := range typecheck.Target.Externs { + s := nm.Sym() + switch s.Name { + case metaVarName: + metavar = nm + ckTypSanity(nm, "metavar") + nm.MarkReadonly() + continue + case pkgIdVarName: + pkgidvar = nm + ckTypSanity(nm, "pkgidvar") + nm.SetCoverageAuxVar(true) + s := nm.Linksym() + s.Type = objabi.SCOVERAGE_AUXVAR + continue + } + if strings.HasPrefix(s.Name, counterPrefix) { + ckTypSanity(nm, "countervar") + nm.SetCoverageCounter(true) + s := nm.Linksym() + s.Type = objabi.SCOVERAGE_COUNTER + } + } + cm := coverage.ParseCounterMode(counterMode) + if cm == coverage.CtrModeInvalid { + base.Fatalf("bad setting %q for covermode in coveragecfg:", + counterMode) + } + var cg coverage.CounterGranularity + switch counterGran { + case "perblock": + cg = coverage.CtrGranularityPerBlock + case "perfunc": + cg = coverage.CtrGranularityPerFunc + default: + base.Fatalf("bad setting %q for covergranularity in coveragecfg:", + counterGran) + } + + cnames := names{ + MetaVar: metavar, + PkgIdVar: pkgidvar, + CounterMode: cm, + CounterGran: cg, + } + + for _, fn := range typecheck.Target.Funcs { + if ir.FuncName(fn) == "init" { + cnames.InitFn = fn + break + } + } + if cnames.InitFn == nil { + panic("unexpected (no init func for -cover build)") + } + + hashv, len := metaHashAndLen() + if cnames.CounterMode != coverage.CtrModeTestMain { + registerMeta(cnames, hashv, len) + } + if base.Ctxt.Pkgpath == "main" { + addInitHookCall(cnames.InitFn, cnames.CounterMode) + } +} + +func metaHashAndLen() ([16]byte, int) { + + // Read meta-data hash from config entry. + mhash := base.Flag.Cfg.CoverageInfo.MetaHash + if len(mhash) != 32 { + base.Fatalf("unexpected: got metahash length %d want 32", len(mhash)) + } + var hv [16]byte + for i := 0; i < 16; i++ { + nib := string(mhash[i*2 : i*2+2]) + x, err := strconv.ParseInt(nib, 16, 32) + if err != nil { + base.Fatalf("metahash bad byte %q", nib) + } + hv[i] = byte(x) + } + + // Return hash and meta-data len + return hv, base.Flag.Cfg.CoverageInfo.MetaLen +} + +func registerMeta(cnames names, hashv [16]byte, mdlen int) { + // Materialize expression for hash (an array literal) + pos := cnames.InitFn.Pos() + elist := make([]ir.Node, 0, 16) + for i := 0; i < 16; i++ { + elem := ir.NewInt(base.Pos, int64(hashv[i])) + elist = append(elist, elem) + } + ht := types.NewArray(types.Types[types.TUINT8], 16) + hashx := ir.NewCompLitExpr(pos, ir.OCOMPLIT, ht, elist) + + // Materalize expression corresponding to address of the meta-data symbol. + mdax := typecheck.NodAddr(cnames.MetaVar) + mdauspx := typecheck.ConvNop(mdax, types.Types[types.TUNSAFEPTR]) + + // Materialize expression for length. + lenx := ir.NewInt(base.Pos, int64(mdlen)) // untyped + + // Generate a call to runtime.addCovMeta, e.g. + // + // pkgIdVar = runtime.addCovMeta(&sym, len, hash, pkgpath, pkid, cmode, cgran) + // + fn := typecheck.LookupRuntime("addCovMeta") + pkid := coverage.HardCodedPkgID(base.Ctxt.Pkgpath) + pkIdNode := ir.NewInt(base.Pos, int64(pkid)) + cmodeNode := ir.NewInt(base.Pos, int64(cnames.CounterMode)) + cgranNode := ir.NewInt(base.Pos, int64(cnames.CounterGran)) + pkPathNode := ir.NewString(base.Pos, base.Ctxt.Pkgpath) + callx := typecheck.Call(pos, fn, []ir.Node{mdauspx, lenx, hashx, + pkPathNode, pkIdNode, cmodeNode, cgranNode}, false) + assign := callx + if pkid == coverage.NotHardCoded { + assign = typecheck.Stmt(ir.NewAssignStmt(pos, cnames.PkgIdVar, callx)) + } + + // Tack the call onto the start of our init function. We do this + // early in the init since it's possible that instrumented function + // bodies (with counter updates) might be inlined into init. + cnames.InitFn.Body.Prepend(assign) +} + +// addInitHookCall generates a call to runtime/coverage.initHook() and +// inserts it into the package main init function, which will kick off +// the process for coverage data writing (emit meta data, and register +// an exit hook to emit counter data). +func addInitHookCall(initfn *ir.Func, cmode coverage.CounterMode) { + typecheck.InitCoverage() + pos := initfn.Pos() + istest := cmode == coverage.CtrModeTestMain + initf := typecheck.LookupCoverage("initHook") + istestNode := ir.NewBool(base.Pos, istest) + args := []ir.Node{istestNode} + callx := typecheck.Call(pos, initf, args, false) + initfn.Body.Append(callx) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/devirtualize/devirtualize.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/devirtualize/devirtualize.go new file mode 100644 index 0000000000000000000000000000000000000000..5d1b9526271cb7f0a8ef9ff4fa0fa58825804b83 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/devirtualize/devirtualize.go @@ -0,0 +1,140 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package devirtualize implements two "devirtualization" optimization passes: +// +// - "Static" devirtualization which replaces interface method calls with +// direct concrete-type method calls where possible. +// - "Profile-guided" devirtualization which replaces indirect calls with a +// conditional direct call to the hottest concrete callee from a profile, as +// well as a fallback using the original indirect call. +package devirtualize + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" +) + +// StaticCall devirtualizes the given call if possible when the concrete callee +// is available statically. +func StaticCall(call *ir.CallExpr) { + // For promoted methods (including value-receiver methods promoted + // to pointer-receivers), the interface method wrapper may contain + // expressions that can panic (e.g., ODEREF, ODOTPTR, + // ODOTINTER). Devirtualization involves inlining these expressions + // (and possible panics) to the call site. This normally isn't a + // problem, but for go/defer statements it can move the panic from + // when/where the call executes to the go/defer statement itself, + // which is a visible change in semantics (e.g., #52072). To prevent + // this, we skip devirtualizing calls within go/defer statements + // altogether. + if call.GoDefer { + return + } + + if call.Op() != ir.OCALLINTER { + return + } + + sel := call.Fun.(*ir.SelectorExpr) + r := ir.StaticValue(sel.X) + if r.Op() != ir.OCONVIFACE { + return + } + recv := r.(*ir.ConvExpr) + + typ := recv.X.Type() + if typ.IsInterface() { + return + } + + // If typ is a shape type, then it was a type argument originally + // and we'd need an indirect call through the dictionary anyway. + // We're unable to devirtualize this call. + if typ.IsShape() { + return + } + + // If typ *has* a shape type, then it's a shaped, instantiated + // type like T[go.shape.int], and its methods (may) have an extra + // dictionary parameter. We could devirtualize this call if we + // could derive an appropriate dictionary argument. + // + // TODO(mdempsky): If typ has has a promoted non-generic method, + // then that method won't require a dictionary argument. We could + // still devirtualize those calls. + // + // TODO(mdempsky): We have the *runtime.itab in recv.TypeWord. It + // should be possible to compute the represented type's runtime + // dictionary from this (e.g., by adding a pointer from T[int]'s + // *runtime._type to .dict.T[int]; or by recognizing static + // references to go:itab.T[int],iface and constructing a direct + // reference to .dict.T[int]). + if typ.HasShape() { + if base.Flag.LowerM != 0 { + base.WarnfAt(call.Pos(), "cannot devirtualize %v: shaped receiver %v", call, typ) + } + return + } + + // Further, if sel.X's type has a shape type, then it's a shaped + // interface type. In this case, the (non-dynamic) TypeAssertExpr + // we construct below would attempt to create an itab + // corresponding to this shaped interface type; but the actual + // itab pointer in the interface value will correspond to the + // original (non-shaped) interface type instead. These are + // functionally equivalent, but they have distinct pointer + // identities, which leads to the type assertion failing. + // + // TODO(mdempsky): We know the type assertion here is safe, so we + // could instead set a flag so that walk skips the itab check. For + // now, punting is easy and safe. + if sel.X.Type().HasShape() { + if base.Flag.LowerM != 0 { + base.WarnfAt(call.Pos(), "cannot devirtualize %v: shaped interface %v", call, sel.X.Type()) + } + return + } + + dt := ir.NewTypeAssertExpr(sel.Pos(), sel.X, nil) + dt.SetType(typ) + x := typecheck.XDotMethod(sel.Pos(), dt, sel.Sel, true) + switch x.Op() { + case ir.ODOTMETH: + if base.Flag.LowerM != 0 { + base.WarnfAt(call.Pos(), "devirtualizing %v to %v", sel, typ) + } + call.SetOp(ir.OCALLMETH) + call.Fun = x + case ir.ODOTINTER: + // Promoted method from embedded interface-typed field (#42279). + if base.Flag.LowerM != 0 { + base.WarnfAt(call.Pos(), "partially devirtualizing %v to %v", sel, typ) + } + call.SetOp(ir.OCALLINTER) + call.Fun = x + default: + base.FatalfAt(call.Pos(), "failed to devirtualize %v (%v)", x, x.Op()) + } + + // Duplicated logic from typecheck for function call return + // value types. + // + // Receiver parameter size may have changed; need to update + // call.Type to get correct stack offsets for result + // parameters. + types.CheckSize(x.Type()) + switch ft := x.Type(); ft.NumResults() { + case 0: + case 1: + call.SetType(ft.Result(0).Type) + default: + call.SetType(ft.ResultsTuple()) + } + + // Desugar OCALLMETH, if we created one (#57309). + typecheck.FixMethodCall(call) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/devirtualize/pgo.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/devirtualize/pgo.go new file mode 100644 index 0000000000000000000000000000000000000000..170bf746739e7ba6b9cd792fa538f712fb654a3e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/devirtualize/pgo.go @@ -0,0 +1,820 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package devirtualize + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/inline" + "cmd/compile/internal/ir" + "cmd/compile/internal/logopt" + "cmd/compile/internal/pgo" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/src" + "encoding/json" + "fmt" + "os" + "strings" +) + +// CallStat summarizes a single call site. +// +// This is used only for debug logging. +type CallStat struct { + Pkg string // base.Ctxt.Pkgpath + Pos string // file:line:col of call. + + Caller string // Linker symbol name of calling function. + + // Direct or indirect call. + Direct bool + + // For indirect calls, interface call or other indirect function call. + Interface bool + + // Total edge weight from this call site. + Weight int64 + + // Hottest callee from this call site, regardless of type + // compatibility. + Hottest string + HottestWeight int64 + + // Devirtualized callee if != "". + // + // Note that this may be different than Hottest because we apply + // type-check restrictions, which helps distinguish multiple calls on + // the same line. + Devirtualized string + DevirtualizedWeight int64 +} + +// ProfileGuided performs call devirtualization of indirect calls based on +// profile information. +// +// Specifically, it performs conditional devirtualization of interface calls or +// function value calls for the hottest callee. +// +// That is, for interface calls it performs a transformation like: +// +// type Iface interface { +// Foo() +// } +// +// type Concrete struct{} +// +// func (Concrete) Foo() {} +// +// func foo(i Iface) { +// i.Foo() +// } +// +// to: +// +// func foo(i Iface) { +// if c, ok := i.(Concrete); ok { +// c.Foo() +// } else { +// i.Foo() +// } +// } +// +// For function value calls it performs a transformation like: +// +// func Concrete() {} +// +// func foo(fn func()) { +// fn() +// } +// +// to: +// +// func foo(fn func()) { +// if internal/abi.FuncPCABIInternal(fn) == internal/abi.FuncPCABIInternal(Concrete) { +// Concrete() +// } else { +// fn() +// } +// } +// +// The primary benefit of this transformation is enabling inlining of the +// direct call. +func ProfileGuided(fn *ir.Func, p *pgo.Profile) { + ir.CurFunc = fn + + name := ir.LinkFuncName(fn) + + var jsonW *json.Encoder + if base.Debug.PGODebug >= 3 { + jsonW = json.NewEncoder(os.Stdout) + } + + var edit func(n ir.Node) ir.Node + edit = func(n ir.Node) ir.Node { + if n == nil { + return n + } + + ir.EditChildren(n, edit) + + call, ok := n.(*ir.CallExpr) + if !ok { + return n + } + + var stat *CallStat + if base.Debug.PGODebug >= 3 { + // Statistics about every single call. Handy for external data analysis. + // + // TODO(prattmic): Log via logopt? + stat = constructCallStat(p, fn, name, call) + if stat != nil { + defer func() { + jsonW.Encode(&stat) + }() + } + } + + op := call.Op() + if op != ir.OCALLFUNC && op != ir.OCALLINTER { + return n + } + + if base.Debug.PGODebug >= 2 { + fmt.Printf("%v: PGO devirtualize considering call %v\n", ir.Line(call), call) + } + + if call.GoDefer { + if base.Debug.PGODebug >= 2 { + fmt.Printf("%v: can't PGO devirtualize go/defer call %v\n", ir.Line(call), call) + } + return n + } + + var newNode ir.Node + var callee *ir.Func + var weight int64 + switch op { + case ir.OCALLFUNC: + newNode, callee, weight = maybeDevirtualizeFunctionCall(p, fn, call) + case ir.OCALLINTER: + newNode, callee, weight = maybeDevirtualizeInterfaceCall(p, fn, call) + default: + panic("unreachable") + } + + if newNode == nil { + return n + } + + if stat != nil { + stat.Devirtualized = ir.LinkFuncName(callee) + stat.DevirtualizedWeight = weight + } + + return newNode + } + + ir.EditChildren(fn, edit) +} + +// Devirtualize interface call if possible and eligible. Returns the new +// ir.Node if call was devirtualized, and if so also the callee and weight of +// the devirtualized edge. +func maybeDevirtualizeInterfaceCall(p *pgo.Profile, fn *ir.Func, call *ir.CallExpr) (ir.Node, *ir.Func, int64) { + if base.Debug.PGODevirtualize < 1 { + return nil, nil, 0 + } + + // Bail if we do not have a hot callee. + callee, weight := findHotConcreteInterfaceCallee(p, fn, call) + if callee == nil { + return nil, nil, 0 + } + // Bail if we do not have a Type node for the hot callee. + ctyp := methodRecvType(callee) + if ctyp == nil { + return nil, nil, 0 + } + // Bail if we know for sure it won't inline. + if !shouldPGODevirt(callee) { + return nil, nil, 0 + } + // Bail if de-selected by PGO Hash. + if !base.PGOHash.MatchPosWithInfo(call.Pos(), "devirt", nil) { + return nil, nil, 0 + } + + return rewriteInterfaceCall(call, fn, callee, ctyp), callee, weight +} + +// Devirtualize an indirect function call if possible and eligible. Returns the new +// ir.Node if call was devirtualized, and if so also the callee and weight of +// the devirtualized edge. +func maybeDevirtualizeFunctionCall(p *pgo.Profile, fn *ir.Func, call *ir.CallExpr) (ir.Node, *ir.Func, int64) { + if base.Debug.PGODevirtualize < 2 { + return nil, nil, 0 + } + + // Bail if this is a direct call; no devirtualization necessary. + callee := pgo.DirectCallee(call.Fun) + if callee != nil { + return nil, nil, 0 + } + + // Bail if we do not have a hot callee. + callee, weight := findHotConcreteFunctionCallee(p, fn, call) + if callee == nil { + return nil, nil, 0 + } + + // TODO(go.dev/issue/61577): Closures need the closure context passed + // via the context register. That requires extra plumbing that we + // haven't done yet. + if callee.OClosure != nil { + if base.Debug.PGODebug >= 3 { + fmt.Printf("callee %s is a closure, skipping\n", ir.FuncName(callee)) + } + return nil, nil, 0 + } + // runtime.memhash_varlen does not look like a closure, but it uses + // runtime.getclosureptr to access data encoded by callers, which are + // are generated by cmd/compile/internal/reflectdata.genhash. + if callee.Sym().Pkg.Path == "runtime" && callee.Sym().Name == "memhash_varlen" { + if base.Debug.PGODebug >= 3 { + fmt.Printf("callee %s is a closure (runtime.memhash_varlen), skipping\n", ir.FuncName(callee)) + } + return nil, nil, 0 + } + // TODO(prattmic): We don't properly handle methods as callees in two + // different dimensions: + // + // 1. Method expressions. e.g., + // + // var fn func(*os.File, []byte) (int, error) = (*os.File).Read + // + // In this case, typ will report *os.File as the receiver while + // ctyp reports it as the first argument. types.Identical ignores + // receiver parameters, so it treats these as different, even though + // they are still call compatible. + // + // 2. Method values. e.g., + // + // var f *os.File + // var fn func([]byte) (int, error) = f.Read + // + // types.Identical will treat these as compatible (since receiver + // parameters are ignored). However, in this case, we do not call + // (*os.File).Read directly. Instead, f is stored in closure context + // and we call the wrapper (*os.File).Read-fm. However, runtime/pprof + // hides wrappers from profiles, making it appear that there is a call + // directly to the method. We could recognize this pattern return the + // wrapper rather than the method. + // + // N.B. perf profiles will report wrapper symbols directly, so + // ideally we should support direct wrapper references as well. + if callee.Type().Recv() != nil { + if base.Debug.PGODebug >= 3 { + fmt.Printf("callee %s is a method, skipping\n", ir.FuncName(callee)) + } + return nil, nil, 0 + } + + // Bail if we know for sure it won't inline. + if !shouldPGODevirt(callee) { + return nil, nil, 0 + } + // Bail if de-selected by PGO Hash. + if !base.PGOHash.MatchPosWithInfo(call.Pos(), "devirt", nil) { + return nil, nil, 0 + } + + return rewriteFunctionCall(call, fn, callee), callee, weight +} + +// shouldPGODevirt checks if we should perform PGO devirtualization to the +// target function. +// +// PGO devirtualization is most valuable when the callee is inlined, so if it +// won't inline we can skip devirtualizing. +func shouldPGODevirt(fn *ir.Func) bool { + var reason string + if base.Flag.LowerM > 1 || logopt.Enabled() { + defer func() { + if reason != "" { + if base.Flag.LowerM > 1 { + fmt.Printf("%v: should not PGO devirtualize %v: %s\n", ir.Line(fn), ir.FuncName(fn), reason) + } + if logopt.Enabled() { + logopt.LogOpt(fn.Pos(), ": should not PGO devirtualize function", "pgo-devirtualize", ir.FuncName(fn), reason) + } + } + }() + } + + reason = inline.InlineImpossible(fn) + if reason != "" { + return false + } + + // TODO(prattmic): checking only InlineImpossible is very conservative, + // primarily excluding only functions with pragmas. We probably want to + // move in either direction. Either: + // + // 1. Don't even bother to check InlineImpossible, as it affects so few + // functions. + // + // 2. Or consider the function body (notably cost) to better determine + // if the function will actually inline. + + return true +} + +// constructCallStat builds an initial CallStat describing this call, for +// logging. If the call is devirtualized, the devirtualization fields should be +// updated. +func constructCallStat(p *pgo.Profile, fn *ir.Func, name string, call *ir.CallExpr) *CallStat { + switch call.Op() { + case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH: + default: + // We don't care about logging builtin functions. + return nil + } + + stat := CallStat{ + Pkg: base.Ctxt.Pkgpath, + Pos: ir.Line(call), + Caller: name, + } + + offset := pgo.NodeLineOffset(call, fn) + + hotter := func(e *pgo.IREdge) bool { + if stat.Hottest == "" { + return true + } + if e.Weight != stat.HottestWeight { + return e.Weight > stat.HottestWeight + } + // If weight is the same, arbitrarily sort lexicographally, as + // findHotConcreteCallee does. + return e.Dst.Name() < stat.Hottest + } + + // Sum of all edges from this callsite, regardless of callee. + // For direct calls, this should be the same as the single edge + // weight (except for multiple calls on one line, which we + // can't distinguish). + callerNode := p.WeightedCG.IRNodes[name] + for _, edge := range callerNode.OutEdges { + if edge.CallSiteOffset != offset { + continue + } + stat.Weight += edge.Weight + if hotter(edge) { + stat.HottestWeight = edge.Weight + stat.Hottest = edge.Dst.Name() + } + } + + switch call.Op() { + case ir.OCALLFUNC: + stat.Interface = false + + callee := pgo.DirectCallee(call.Fun) + if callee != nil { + stat.Direct = true + if stat.Hottest == "" { + stat.Hottest = ir.LinkFuncName(callee) + } + } else { + stat.Direct = false + } + case ir.OCALLINTER: + stat.Direct = false + stat.Interface = true + case ir.OCALLMETH: + base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck") + } + + return &stat +} + +// copyInputs copies the inputs to a call: the receiver (for interface calls) +// or function value (for function value calls) and the arguments. These +// expressions are evaluated once and assigned to temporaries. +// +// The assignment statement is added to init and the copied receiver/fn +// expression and copied arguments expressions are returned. +func copyInputs(curfn *ir.Func, pos src.XPos, recvOrFn ir.Node, args []ir.Node, init *ir.Nodes) (ir.Node, []ir.Node) { + // Evaluate receiver/fn and argument expressions. The receiver/fn is + // used twice but we don't want to cause side effects twice. The + // arguments are used in two different calls and we can't trivially + // copy them. + // + // recvOrFn must be first in the assignment list as its side effects + // must be ordered before argument side effects. + var lhs, rhs []ir.Node + newRecvOrFn := typecheck.TempAt(pos, curfn, recvOrFn.Type()) + lhs = append(lhs, newRecvOrFn) + rhs = append(rhs, recvOrFn) + + for _, arg := range args { + argvar := typecheck.TempAt(pos, curfn, arg.Type()) + + lhs = append(lhs, argvar) + rhs = append(rhs, arg) + } + + asList := ir.NewAssignListStmt(pos, ir.OAS2, lhs, rhs) + init.Append(typecheck.Stmt(asList)) + + return newRecvOrFn, lhs[1:] +} + +// retTemps returns a slice of temporaries to be used for storing result values from call. +func retTemps(curfn *ir.Func, pos src.XPos, call *ir.CallExpr) []ir.Node { + sig := call.Fun.Type() + var retvars []ir.Node + for _, ret := range sig.Results() { + retvars = append(retvars, typecheck.TempAt(pos, curfn, ret.Type)) + } + return retvars +} + +// condCall returns an ir.InlinedCallExpr that performs a call to thenCall if +// cond is true and elseCall if cond is false. The return variables of the +// InlinedCallExpr evaluate to the return values from the call. +func condCall(curfn *ir.Func, pos src.XPos, cond ir.Node, thenCall, elseCall *ir.CallExpr, init ir.Nodes) *ir.InlinedCallExpr { + // Doesn't matter whether we use thenCall or elseCall, they must have + // the same return types. + retvars := retTemps(curfn, pos, thenCall) + + var thenBlock, elseBlock ir.Nodes + if len(retvars) == 0 { + thenBlock.Append(thenCall) + elseBlock.Append(elseCall) + } else { + // Copy slice so edits in one location don't affect another. + thenRet := append([]ir.Node(nil), retvars...) + thenAsList := ir.NewAssignListStmt(pos, ir.OAS2, thenRet, []ir.Node{thenCall}) + thenBlock.Append(typecheck.Stmt(thenAsList)) + + elseRet := append([]ir.Node(nil), retvars...) + elseAsList := ir.NewAssignListStmt(pos, ir.OAS2, elseRet, []ir.Node{elseCall}) + elseBlock.Append(typecheck.Stmt(elseAsList)) + } + + nif := ir.NewIfStmt(pos, cond, thenBlock, elseBlock) + nif.SetInit(init) + nif.Likely = true + + body := []ir.Node{typecheck.Stmt(nif)} + + // This isn't really an inlined call of course, but InlinedCallExpr + // makes handling reassignment of return values easier. + res := ir.NewInlinedCallExpr(pos, body, retvars) + res.SetType(thenCall.Type()) + res.SetTypecheck(1) + return res +} + +// rewriteInterfaceCall devirtualizes the given interface call using a direct +// method call to concretetyp. +func rewriteInterfaceCall(call *ir.CallExpr, curfn, callee *ir.Func, concretetyp *types.Type) ir.Node { + if base.Flag.LowerM != 0 { + fmt.Printf("%v: PGO devirtualizing interface call %v to %v\n", ir.Line(call), call.Fun, callee) + } + + // We generate an OINCALL of: + // + // var recv Iface + // + // var arg1 A1 + // var argN AN + // + // var ret1 R1 + // var retN RN + // + // recv, arg1, argN = recv expr, arg1 expr, argN expr + // + // t, ok := recv.(Concrete) + // if ok { + // ret1, retN = t.Method(arg1, ... argN) + // } else { + // ret1, retN = recv.Method(arg1, ... argN) + // } + // + // OINCALL retvars: ret1, ... retN + // + // This isn't really an inlined call of course, but InlinedCallExpr + // makes handling reassignment of return values easier. + // + // TODO(prattmic): This increases the size of the AST in the caller, + // making it less like to inline. We may want to compensate for this + // somehow. + + sel := call.Fun.(*ir.SelectorExpr) + method := sel.Sel + pos := call.Pos() + init := ir.TakeInit(call) + + recv, args := copyInputs(curfn, pos, sel.X, call.Args.Take(), &init) + + // Copy slice so edits in one location don't affect another. + argvars := append([]ir.Node(nil), args...) + call.Args = argvars + + tmpnode := typecheck.TempAt(base.Pos, curfn, concretetyp) + tmpok := typecheck.TempAt(base.Pos, curfn, types.Types[types.TBOOL]) + + assert := ir.NewTypeAssertExpr(pos, recv, concretetyp) + + assertAsList := ir.NewAssignListStmt(pos, ir.OAS2, []ir.Node{tmpnode, tmpok}, []ir.Node{typecheck.Expr(assert)}) + init.Append(typecheck.Stmt(assertAsList)) + + concreteCallee := typecheck.XDotMethod(pos, tmpnode, method, true) + // Copy slice so edits in one location don't affect another. + argvars = append([]ir.Node(nil), argvars...) + concreteCall := typecheck.Call(pos, concreteCallee, argvars, call.IsDDD).(*ir.CallExpr) + + res := condCall(curfn, pos, tmpok, concreteCall, call, init) + + if base.Debug.PGODebug >= 3 { + fmt.Printf("PGO devirtualizing interface call to %+v. After: %+v\n", concretetyp, res) + } + + return res +} + +// rewriteFunctionCall devirtualizes the given OCALLFUNC using a direct +// function call to callee. +func rewriteFunctionCall(call *ir.CallExpr, curfn, callee *ir.Func) ir.Node { + if base.Flag.LowerM != 0 { + fmt.Printf("%v: PGO devirtualizing function call %v to %v\n", ir.Line(call), call.Fun, callee) + } + + // We generate an OINCALL of: + // + // var fn FuncType + // + // var arg1 A1 + // var argN AN + // + // var ret1 R1 + // var retN RN + // + // fn, arg1, argN = fn expr, arg1 expr, argN expr + // + // fnPC := internal/abi.FuncPCABIInternal(fn) + // concretePC := internal/abi.FuncPCABIInternal(concrete) + // + // if fnPC == concretePC { + // ret1, retN = concrete(arg1, ... argN) // Same closure context passed (TODO) + // } else { + // ret1, retN = fn(arg1, ... argN) + // } + // + // OINCALL retvars: ret1, ... retN + // + // This isn't really an inlined call of course, but InlinedCallExpr + // makes handling reassignment of return values easier. + + pos := call.Pos() + init := ir.TakeInit(call) + + fn, args := copyInputs(curfn, pos, call.Fun, call.Args.Take(), &init) + + // Copy slice so edits in one location don't affect another. + argvars := append([]ir.Node(nil), args...) + call.Args = argvars + + // FuncPCABIInternal takes an interface{}, emulate that. This is needed + // for to ensure we get the MAKEFACE we need for SSA. + fnIface := typecheck.Expr(ir.NewConvExpr(pos, ir.OCONV, types.Types[types.TINTER], fn)) + calleeIface := typecheck.Expr(ir.NewConvExpr(pos, ir.OCONV, types.Types[types.TINTER], callee.Nname)) + + fnPC := ir.FuncPC(pos, fnIface, obj.ABIInternal) + concretePC := ir.FuncPC(pos, calleeIface, obj.ABIInternal) + + pcEq := typecheck.Expr(ir.NewBinaryExpr(base.Pos, ir.OEQ, fnPC, concretePC)) + + // TODO(go.dev/issue/61577): Handle callees that a closures and need a + // copy of the closure context from call. For now, we skip callees that + // are closures in maybeDevirtualizeFunctionCall. + if callee.OClosure != nil { + base.Fatalf("Callee is a closure: %+v", callee) + } + + // Copy slice so edits in one location don't affect another. + argvars = append([]ir.Node(nil), argvars...) + concreteCall := typecheck.Call(pos, callee.Nname, argvars, call.IsDDD).(*ir.CallExpr) + + res := condCall(curfn, pos, pcEq, concreteCall, call, init) + + if base.Debug.PGODebug >= 3 { + fmt.Printf("PGO devirtualizing function call to %+v. After: %+v\n", ir.FuncName(callee), res) + } + + return res +} + +// methodRecvType returns the type containing method fn. Returns nil if fn +// is not a method. +func methodRecvType(fn *ir.Func) *types.Type { + recv := fn.Nname.Type().Recv() + if recv == nil { + return nil + } + return recv.Type +} + +// interfaceCallRecvTypeAndMethod returns the type and the method of the interface +// used in an interface call. +func interfaceCallRecvTypeAndMethod(call *ir.CallExpr) (*types.Type, *types.Sym) { + if call.Op() != ir.OCALLINTER { + base.Fatalf("Call isn't OCALLINTER: %+v", call) + } + + sel, ok := call.Fun.(*ir.SelectorExpr) + if !ok { + base.Fatalf("OCALLINTER doesn't contain SelectorExpr: %+v", call) + } + + return sel.X.Type(), sel.Sel +} + +// findHotConcreteCallee returns the *ir.Func of the hottest callee of a call, +// if available, and its edge weight. extraFn can perform additional +// applicability checks on each candidate edge. If extraFn returns false, +// candidate will not be considered a valid callee candidate. +func findHotConcreteCallee(p *pgo.Profile, caller *ir.Func, call *ir.CallExpr, extraFn func(callerName string, callOffset int, candidate *pgo.IREdge) bool) (*ir.Func, int64) { + callerName := ir.LinkFuncName(caller) + callerNode := p.WeightedCG.IRNodes[callerName] + callOffset := pgo.NodeLineOffset(call, caller) + + var hottest *pgo.IREdge + + // Returns true if e is hotter than hottest. + // + // Naively this is just e.Weight > hottest.Weight, but because OutEdges + // has arbitrary iteration order, we need to apply additional sort + // criteria when e.Weight == hottest.Weight to ensure we have stable + // selection. + hotter := func(e *pgo.IREdge) bool { + if hottest == nil { + return true + } + if e.Weight != hottest.Weight { + return e.Weight > hottest.Weight + } + + // Now e.Weight == hottest.Weight, we must select on other + // criteria. + + // If only one edge has IR, prefer that one. + if (hottest.Dst.AST == nil) != (e.Dst.AST == nil) { + if e.Dst.AST != nil { + return true + } + return false + } + + // Arbitrary, but the callee names will always differ. Select + // the lexicographically first callee. + return e.Dst.Name() < hottest.Dst.Name() + } + + for _, e := range callerNode.OutEdges { + if e.CallSiteOffset != callOffset { + continue + } + + if !hotter(e) { + // TODO(prattmic): consider total caller weight? i.e., + // if the hottest callee is only 10% of the weight, + // maybe don't devirtualize? Similarly, if this is call + // is globally very cold, there is not much value in + // devirtualizing. + if base.Debug.PGODebug >= 2 { + fmt.Printf("%v: edge %s:%d -> %s (weight %d): too cold (hottest %d)\n", ir.Line(call), callerName, callOffset, e.Dst.Name(), e.Weight, hottest.Weight) + } + continue + } + + if e.Dst.AST == nil { + // Destination isn't visible from this package + // compilation. + // + // We must assume it implements the interface. + // + // We still record this as the hottest callee so far + // because we only want to return the #1 hottest + // callee. If we skip this then we'd return the #2 + // hottest callee. + if base.Debug.PGODebug >= 2 { + fmt.Printf("%v: edge %s:%d -> %s (weight %d) (missing IR): hottest so far\n", ir.Line(call), callerName, callOffset, e.Dst.Name(), e.Weight) + } + hottest = e + continue + } + + if extraFn != nil && !extraFn(callerName, callOffset, e) { + continue + } + + if base.Debug.PGODebug >= 2 { + fmt.Printf("%v: edge %s:%d -> %s (weight %d): hottest so far\n", ir.Line(call), callerName, callOffset, e.Dst.Name(), e.Weight) + } + hottest = e + } + + if hottest == nil { + if base.Debug.PGODebug >= 2 { + fmt.Printf("%v: call %s:%d: no hot callee\n", ir.Line(call), callerName, callOffset) + } + return nil, 0 + } + + if base.Debug.PGODebug >= 2 { + fmt.Printf("%v call %s:%d: hottest callee %s (weight %d)\n", ir.Line(call), callerName, callOffset, hottest.Dst.Name(), hottest.Weight) + } + return hottest.Dst.AST, hottest.Weight +} + +// findHotConcreteInterfaceCallee returns the *ir.Func of the hottest callee of an +// interface call, if available, and its edge weight. +func findHotConcreteInterfaceCallee(p *pgo.Profile, caller *ir.Func, call *ir.CallExpr) (*ir.Func, int64) { + inter, method := interfaceCallRecvTypeAndMethod(call) + + return findHotConcreteCallee(p, caller, call, func(callerName string, callOffset int, e *pgo.IREdge) bool { + ctyp := methodRecvType(e.Dst.AST) + if ctyp == nil { + // Not a method. + // TODO(prattmic): Support non-interface indirect calls. + if base.Debug.PGODebug >= 2 { + fmt.Printf("%v: edge %s:%d -> %s (weight %d): callee not a method\n", ir.Line(call), callerName, callOffset, e.Dst.Name(), e.Weight) + } + return false + } + + // If ctyp doesn't implement inter it is most likely from a + // different call on the same line + if !typecheck.Implements(ctyp, inter) { + // TODO(prattmic): this is overly strict. Consider if + // ctyp is a partial implementation of an interface + // that gets embedded in types that complete the + // interface. It would still be OK to devirtualize a + // call to this method. + // + // What we'd need to do is check that the function + // pointer in the itab matches the method we want, + // rather than doing a full type assertion. + if base.Debug.PGODebug >= 2 { + why := typecheck.ImplementsExplain(ctyp, inter) + fmt.Printf("%v: edge %s:%d -> %s (weight %d): %v doesn't implement %v (%s)\n", ir.Line(call), callerName, callOffset, e.Dst.Name(), e.Weight, ctyp, inter, why) + } + return false + } + + // If the method name is different it is most likely from a + // different call on the same line + if !strings.HasSuffix(e.Dst.Name(), "."+method.Name) { + if base.Debug.PGODebug >= 2 { + fmt.Printf("%v: edge %s:%d -> %s (weight %d): callee is a different method\n", ir.Line(call), callerName, callOffset, e.Dst.Name(), e.Weight) + } + return false + } + + return true + }) +} + +// findHotConcreteFunctionCallee returns the *ir.Func of the hottest callee of an +// indirect function call, if available, and its edge weight. +func findHotConcreteFunctionCallee(p *pgo.Profile, caller *ir.Func, call *ir.CallExpr) (*ir.Func, int64) { + typ := call.Fun.Type().Underlying() + + return findHotConcreteCallee(p, caller, call, func(callerName string, callOffset int, e *pgo.IREdge) bool { + ctyp := e.Dst.AST.Type().Underlying() + + // If ctyp doesn't match typ it is most likely from a different + // call on the same line. + // + // Note that we are comparing underlying types, as different + // defined types are OK. e.g., a call to a value of type + // net/http.HandlerFunc can be devirtualized to a function with + // the same underlying type. + if !types.Identical(typ, ctyp) { + if base.Debug.PGODebug >= 2 { + fmt.Printf("%v: edge %s:%d -> %s (weight %d): %v doesn't match %v\n", ir.Line(call), callerName, callOffset, e.Dst.Name(), e.Weight, ctyp, typ) + } + return false + } + + return true + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/devirtualize/pgo_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/devirtualize/pgo_test.go new file mode 100644 index 0000000000000000000000000000000000000000..84c96df12211d6a2ac60f22fa6a951d699d22790 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/devirtualize/pgo_test.go @@ -0,0 +1,217 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package devirtualize + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/pgo" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/src" + "testing" +) + +func init() { + // These are the few constants that need to be initialized in order to use + // the types package without using the typecheck package by calling + // typecheck.InitUniverse() (the normal way to initialize the types package). + types.PtrSize = 8 + types.RegSize = 8 + types.MaxWidth = 1 << 50 + typecheck.InitUniverse() + base.Ctxt = &obj.Link{} + base.Debug.PGODebug = 3 +} + +func makePos(b *src.PosBase, line, col uint) src.XPos { + return base.Ctxt.PosTable.XPos(src.MakePos(b, line, col)) +} + +type profileBuilder struct { + p *pgo.Profile +} + +func newProfileBuilder() *profileBuilder { + // findHotConcreteCallee only uses pgo.Profile.WeightedCG, so we're + // going to take a shortcut and only construct that. + return &profileBuilder{ + p: &pgo.Profile{ + WeightedCG: &pgo.IRGraph{ + IRNodes: make(map[string]*pgo.IRNode), + }, + }, + } +} + +// Profile returns the constructed profile. +func (p *profileBuilder) Profile() *pgo.Profile { + return p.p +} + +// NewNode creates a new IRNode and adds it to the profile. +// +// fn may be nil, in which case the node will set LinkerSymbolName. +func (p *profileBuilder) NewNode(name string, fn *ir.Func) *pgo.IRNode { + n := &pgo.IRNode{ + OutEdges: make(map[pgo.NamedCallEdge]*pgo.IREdge), + } + if fn != nil { + n.AST = fn + } else { + n.LinkerSymbolName = name + } + p.p.WeightedCG.IRNodes[name] = n + return n +} + +// Add a new call edge from caller to callee. +func addEdge(caller, callee *pgo.IRNode, offset int, weight int64) { + namedEdge := pgo.NamedCallEdge{ + CallerName: caller.Name(), + CalleeName: callee.Name(), + CallSiteOffset: offset, + } + irEdge := &pgo.IREdge{ + Src: caller, + Dst: callee, + CallSiteOffset: offset, + Weight: weight, + } + caller.OutEdges[namedEdge] = irEdge +} + +// Create a new struct type named structName with a method named methName and +// return the method. +func makeStructWithMethod(pkg *types.Pkg, structName, methName string) *ir.Func { + // type structName struct{} + structType := types.NewStruct(nil) + + // func (structName) methodName() + recv := types.NewField(src.NoXPos, typecheck.Lookup(structName), structType) + sig := types.NewSignature(recv, nil, nil) + fn := ir.NewFunc(src.NoXPos, src.NoXPos, pkg.Lookup(structName+"."+methName), sig) + + // Add the method to the struct. + structType.SetMethods([]*types.Field{types.NewField(src.NoXPos, typecheck.Lookup(methName), sig)}) + + return fn +} + +func TestFindHotConcreteInterfaceCallee(t *testing.T) { + p := newProfileBuilder() + + pkgFoo := types.NewPkg("example.com/foo", "foo") + basePos := src.NewFileBase("foo.go", "/foo.go") + + const ( + // Caller start line. + callerStart = 42 + + // The line offset of the call we care about. + callOffset = 1 + + // The line offset of some other call we don't care about. + wrongCallOffset = 2 + ) + + // type IFace interface { + // Foo() + // } + fooSig := types.NewSignature(types.FakeRecv(), nil, nil) + method := types.NewField(src.NoXPos, typecheck.Lookup("Foo"), fooSig) + iface := types.NewInterface([]*types.Field{method}) + + callerFn := ir.NewFunc(makePos(basePos, callerStart, 1), src.NoXPos, pkgFoo.Lookup("Caller"), types.NewSignature(nil, nil, nil)) + + hotCalleeFn := makeStructWithMethod(pkgFoo, "HotCallee", "Foo") + coldCalleeFn := makeStructWithMethod(pkgFoo, "ColdCallee", "Foo") + wrongLineCalleeFn := makeStructWithMethod(pkgFoo, "WrongLineCallee", "Foo") + wrongMethodCalleeFn := makeStructWithMethod(pkgFoo, "WrongMethodCallee", "Bar") + + callerNode := p.NewNode("example.com/foo.Caller", callerFn) + hotCalleeNode := p.NewNode("example.com/foo.HotCallee.Foo", hotCalleeFn) + coldCalleeNode := p.NewNode("example.com/foo.ColdCallee.Foo", coldCalleeFn) + wrongLineCalleeNode := p.NewNode("example.com/foo.WrongCalleeLine.Foo", wrongLineCalleeFn) + wrongMethodCalleeNode := p.NewNode("example.com/foo.WrongCalleeMethod.Foo", wrongMethodCalleeFn) + + hotMissingCalleeNode := p.NewNode("example.com/bar.HotMissingCallee.Foo", nil) + + addEdge(callerNode, wrongLineCalleeNode, wrongCallOffset, 100) // Really hot, but wrong line. + addEdge(callerNode, wrongMethodCalleeNode, callOffset, 100) // Really hot, but wrong method type. + addEdge(callerNode, hotCalleeNode, callOffset, 10) + addEdge(callerNode, coldCalleeNode, callOffset, 1) + + // Equal weight, but IR missing. + // + // N.B. example.com/bar sorts lexicographically before example.com/foo, + // so if the IR availability of hotCalleeNode doesn't get precedence, + // this would be mistakenly selected. + addEdge(callerNode, hotMissingCalleeNode, callOffset, 10) + + // IFace.Foo() + sel := typecheck.NewMethodExpr(src.NoXPos, iface, typecheck.Lookup("Foo")) + call := ir.NewCallExpr(makePos(basePos, callerStart+callOffset, 1), ir.OCALLINTER, sel, nil) + + gotFn, gotWeight := findHotConcreteInterfaceCallee(p.Profile(), callerFn, call) + if gotFn != hotCalleeFn { + t.Errorf("findHotConcreteInterfaceCallee func got %v want %v", gotFn, hotCalleeFn) + } + if gotWeight != 10 { + t.Errorf("findHotConcreteInterfaceCallee weight got %v want 10", gotWeight) + } +} + +func TestFindHotConcreteFunctionCallee(t *testing.T) { + // TestFindHotConcreteInterfaceCallee already covered basic weight + // comparisons, which is shared logic. Here we just test type signature + // disambiguation. + + p := newProfileBuilder() + + pkgFoo := types.NewPkg("example.com/foo", "foo") + basePos := src.NewFileBase("foo.go", "/foo.go") + + const ( + // Caller start line. + callerStart = 42 + + // The line offset of the call we care about. + callOffset = 1 + ) + + callerFn := ir.NewFunc(makePos(basePos, callerStart, 1), src.NoXPos, pkgFoo.Lookup("Caller"), types.NewSignature(nil, nil, nil)) + + // func HotCallee() + hotCalleeFn := ir.NewFunc(src.NoXPos, src.NoXPos, pkgFoo.Lookup("HotCallee"), types.NewSignature(nil, nil, nil)) + + // func WrongCallee() bool + wrongCalleeFn := ir.NewFunc(src.NoXPos, src.NoXPos, pkgFoo.Lookup("WrongCallee"), types.NewSignature(nil, nil, + []*types.Field{ + types.NewField(src.NoXPos, nil, types.Types[types.TBOOL]), + }, + )) + + callerNode := p.NewNode("example.com/foo.Caller", callerFn) + hotCalleeNode := p.NewNode("example.com/foo.HotCallee", hotCalleeFn) + wrongCalleeNode := p.NewNode("example.com/foo.WrongCallee", wrongCalleeFn) + + addEdge(callerNode, wrongCalleeNode, callOffset, 100) // Really hot, but wrong function type. + addEdge(callerNode, hotCalleeNode, callOffset, 10) + + // var fn func() + name := ir.NewNameAt(src.NoXPos, typecheck.Lookup("fn"), types.NewSignature(nil, nil, nil)) + // fn() + call := ir.NewCallExpr(makePos(basePos, callerStart+callOffset, 1), ir.OCALL, name, nil) + + gotFn, gotWeight := findHotConcreteFunctionCallee(p.Profile(), callerFn, call) + if gotFn != hotCalleeFn { + t.Errorf("findHotConcreteFunctionCallee func got %v want %v", gotFn, hotCalleeFn) + } + if gotWeight != 10 { + t.Errorf("findHotConcreteFunctionCallee weight got %v want 10", gotWeight) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/dwarfgen/dwarf.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/dwarfgen/dwarf.go new file mode 100644 index 0000000000000000000000000000000000000000..e9553d118535e9c68d7dc931db4dbeefad0dba11 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/dwarfgen/dwarf.go @@ -0,0 +1,594 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dwarfgen + +import ( + "bytes" + "flag" + "fmt" + "internal/buildcfg" + "sort" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/reflectdata" + "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" + "cmd/compile/internal/types" + "cmd/internal/dwarf" + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/internal/src" +) + +func Info(fnsym *obj.LSym, infosym *obj.LSym, curfn obj.Func) (scopes []dwarf.Scope, inlcalls dwarf.InlCalls) { + fn := curfn.(*ir.Func) + + if fn.Nname != nil { + expect := fn.Linksym() + if fnsym.ABI() == obj.ABI0 { + expect = fn.LinksymABI(obj.ABI0) + } + if fnsym != expect { + base.Fatalf("unexpected fnsym: %v != %v", fnsym, expect) + } + } + + // Back when there were two different *Funcs for a function, this code + // was not consistent about whether a particular *Node being processed + // was an ODCLFUNC or ONAME node. Partly this is because inlined function + // bodies have no ODCLFUNC node, which was it's own inconsistency. + // In any event, the handling of the two different nodes for DWARF purposes + // was subtly different, likely in unintended ways. CL 272253 merged the + // two nodes' Func fields, so that code sees the same *Func whether it is + // holding the ODCLFUNC or the ONAME. This resulted in changes in the + // DWARF output. To preserve the existing DWARF output and leave an + // intentional change for a future CL, this code does the following when + // fn.Op == ONAME: + // + // 1. Disallow use of createComplexVars in createDwarfVars. + // It was not possible to reach that code for an ONAME before, + // because the DebugInfo was set only on the ODCLFUNC Func. + // Calling into it in the ONAME case causes an index out of bounds panic. + // + // 2. Do not populate apdecls. fn.Func.Dcl was in the ODCLFUNC Func, + // not the ONAME Func. Populating apdecls for the ONAME case results + // in selected being populated after createSimpleVars is called in + // createDwarfVars, and then that causes the loop to skip all the entries + // in dcl, meaning that the RecordAutoType calls don't happen. + // + // These two adjustments keep toolstash -cmp working for now. + // Deciding the right answer is, as they say, future work. + // + // We can tell the difference between the old ODCLFUNC and ONAME + // cases by looking at the infosym.Name. If it's empty, DebugInfo is + // being called from (*obj.Link).populateDWARF, which used to use + // the ODCLFUNC. If it's non-empty (the name will end in $abstract), + // DebugInfo is being called from (*obj.Link).DwarfAbstractFunc, + // which used to use the ONAME form. + isODCLFUNC := infosym.Name == "" + + var apdecls []*ir.Name + // Populate decls for fn. + if isODCLFUNC { + for _, n := range fn.Dcl { + if n.Op() != ir.ONAME { // might be OTYPE or OLITERAL + continue + } + switch n.Class { + case ir.PAUTO: + if !n.Used() { + // Text == nil -> generating abstract function + if fnsym.Func().Text != nil { + base.Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)") + } + continue + } + case ir.PPARAM, ir.PPARAMOUT: + default: + continue + } + apdecls = append(apdecls, n) + if n.Type().Kind() == types.TSSA { + // Can happen for TypeInt128 types. This only happens for + // spill locations, so not a huge deal. + continue + } + fnsym.Func().RecordAutoType(reflectdata.TypeLinksym(n.Type())) + } + } + + decls, dwarfVars := createDwarfVars(fnsym, isODCLFUNC, fn, apdecls) + + // For each type referenced by the functions auto vars but not + // already referenced by a dwarf var, attach an R_USETYPE relocation to + // the function symbol to insure that the type included in DWARF + // processing during linking. + typesyms := []*obj.LSym{} + for t := range fnsym.Func().Autot { + typesyms = append(typesyms, t) + } + sort.Sort(obj.BySymName(typesyms)) + for _, sym := range typesyms { + r := obj.Addrel(infosym) + r.Sym = sym + r.Type = objabi.R_USETYPE + } + fnsym.Func().Autot = nil + + var varScopes []ir.ScopeID + for _, decl := range decls { + pos := declPos(decl) + varScopes = append(varScopes, findScope(fn.Marks, pos)) + } + + scopes = assembleScopes(fnsym, fn, dwarfVars, varScopes) + if base.Flag.GenDwarfInl > 0 { + inlcalls = assembleInlines(fnsym, dwarfVars) + } + return scopes, inlcalls +} + +func declPos(decl *ir.Name) src.XPos { + return decl.Canonical().Pos() +} + +// createDwarfVars process fn, returning a list of DWARF variables and the +// Nodes they represent. +func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var) { + // Collect a raw list of DWARF vars. + var vars []*dwarf.Var + var decls []*ir.Name + var selected ir.NameSet + + if base.Ctxt.Flag_locationlists && base.Ctxt.Flag_optimize && fn.DebugInfo != nil && complexOK { + decls, vars, selected = createComplexVars(fnsym, fn) + } else if fn.ABI == obj.ABIInternal && base.Flag.N != 0 && complexOK { + decls, vars, selected = createABIVars(fnsym, fn, apDecls) + } else { + decls, vars, selected = createSimpleVars(fnsym, apDecls) + } + if fn.DebugInfo != nil { + // Recover zero sized variables eliminated by the stackframe pass + for _, n := range fn.DebugInfo.(*ssa.FuncDebug).OptDcl { + if n.Class != ir.PAUTO { + continue + } + types.CalcSize(n.Type()) + if n.Type().Size() == 0 { + decls = append(decls, n) + vars = append(vars, createSimpleVar(fnsym, n)) + vars[len(vars)-1].StackOffset = 0 + fnsym.Func().RecordAutoType(reflectdata.TypeLinksym(n.Type())) + } + } + } + + dcl := apDecls + if fnsym.WasInlined() { + dcl = preInliningDcls(fnsym) + } else { + // The backend's stackframe pass prunes away entries from the + // fn's Dcl list, including PARAMOUT nodes that correspond to + // output params passed in registers. Add back in these + // entries here so that we can process them properly during + // DWARF-gen. See issue 48573 for more details. + debugInfo := fn.DebugInfo.(*ssa.FuncDebug) + for _, n := range debugInfo.RegOutputParams { + if n.Class != ir.PPARAMOUT || !n.IsOutputParamInRegisters() { + panic("invalid ir.Name on debugInfo.RegOutputParams list") + } + dcl = append(dcl, n) + } + } + + // If optimization is enabled, the list above will typically be + // missing some of the original pre-optimization variables in the + // function (they may have been promoted to registers, folded into + // constants, dead-coded away, etc). Input arguments not eligible + // for SSA optimization are also missing. Here we add back in entries + // for selected missing vars. Note that the recipe below creates a + // conservative location. The idea here is that we want to + // communicate to the user that "yes, there is a variable named X + // in this function, but no, I don't have enough information to + // reliably report its contents." + // For non-SSA-able arguments, however, the correct information + // is known -- they have a single home on the stack. + for _, n := range dcl { + if selected.Has(n) { + continue + } + c := n.Sym().Name[0] + if c == '.' || n.Type().IsUntyped() { + continue + } + if n.Class == ir.PPARAM && !ssa.CanSSA(n.Type()) { + // SSA-able args get location lists, and may move in and + // out of registers, so those are handled elsewhere. + // Autos and named output params seem to get handled + // with VARDEF, which creates location lists. + // Args not of SSA-able type are treated here; they + // are homed on the stack in a single place for the + // entire call. + vars = append(vars, createSimpleVar(fnsym, n)) + decls = append(decls, n) + continue + } + typename := dwarf.InfoPrefix + types.TypeSymName(n.Type()) + decls = append(decls, n) + abbrev := dwarf.DW_ABRV_AUTO_LOCLIST + isReturnValue := (n.Class == ir.PPARAMOUT) + if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT { + abbrev = dwarf.DW_ABRV_PARAM_LOCLIST + } + if n.Esc() == ir.EscHeap { + // The variable in question has been promoted to the heap. + // Its address is in n.Heapaddr. + // TODO(thanm): generate a better location expression + } + inlIndex := 0 + if base.Flag.GenDwarfInl > 1 { + if n.InlFormal() || n.InlLocal() { + inlIndex = posInlIndex(n.Pos()) + 1 + if n.InlFormal() { + abbrev = dwarf.DW_ABRV_PARAM_LOCLIST + } + } + } + declpos := base.Ctxt.InnermostPos(n.Pos()) + vars = append(vars, &dwarf.Var{ + Name: n.Sym().Name, + IsReturnValue: isReturnValue, + Abbrev: abbrev, + StackOffset: int32(n.FrameOffset()), + Type: base.Ctxt.Lookup(typename), + DeclFile: declpos.RelFilename(), + DeclLine: declpos.RelLine(), + DeclCol: declpos.RelCol(), + InlIndex: int32(inlIndex), + ChildIndex: -1, + DictIndex: n.DictIndex, + }) + // Record go type of to insure that it gets emitted by the linker. + fnsym.Func().RecordAutoType(reflectdata.TypeLinksym(n.Type())) + } + + // Sort decls and vars. + sortDeclsAndVars(fn, decls, vars) + + return decls, vars +} + +// sortDeclsAndVars sorts the decl and dwarf var lists according to +// parameter declaration order, so as to insure that when a subprogram +// DIE is emitted, its parameter children appear in declaration order. +// Prior to the advent of the register ABI, sorting by frame offset +// would achieve this; with the register we now need to go back to the +// original function signature. +func sortDeclsAndVars(fn *ir.Func, decls []*ir.Name, vars []*dwarf.Var) { + paramOrder := make(map[*ir.Name]int) + idx := 1 + for _, f := range fn.Type().RecvParamsResults() { + if n, ok := f.Nname.(*ir.Name); ok { + paramOrder[n] = idx + idx++ + } + } + sort.Stable(varsAndDecls{decls, vars, paramOrder}) +} + +type varsAndDecls struct { + decls []*ir.Name + vars []*dwarf.Var + paramOrder map[*ir.Name]int +} + +func (v varsAndDecls) Len() int { + return len(v.decls) +} + +func (v varsAndDecls) Less(i, j int) bool { + nameLT := func(ni, nj *ir.Name) bool { + oi, foundi := v.paramOrder[ni] + oj, foundj := v.paramOrder[nj] + if foundi { + if foundj { + return oi < oj + } else { + return true + } + } + return false + } + return nameLT(v.decls[i], v.decls[j]) +} + +func (v varsAndDecls) Swap(i, j int) { + v.vars[i], v.vars[j] = v.vars[j], v.vars[i] + v.decls[i], v.decls[j] = v.decls[j], v.decls[i] +} + +// Given a function that was inlined at some point during the +// compilation, return a sorted list of nodes corresponding to the +// autos/locals in that function prior to inlining. If this is a +// function that is not local to the package being compiled, then the +// names of the variables may have been "versioned" to avoid conflicts +// with local vars; disregard this versioning when sorting. +func preInliningDcls(fnsym *obj.LSym) []*ir.Name { + fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*ir.Func) + var rdcl []*ir.Name + for _, n := range fn.Inl.Dcl { + c := n.Sym().Name[0] + // Avoid reporting "_" parameters, since if there are more than + // one, it can result in a collision later on, as in #23179. + if n.Sym().Name == "_" || c == '.' || n.Type().IsUntyped() { + continue + } + rdcl = append(rdcl, n) + } + return rdcl +} + +// createSimpleVars creates a DWARF entry for every variable declared in the +// function, claiming that they are permanently on the stack. +func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, ir.NameSet) { + var vars []*dwarf.Var + var decls []*ir.Name + var selected ir.NameSet + for _, n := range apDecls { + if ir.IsAutoTmp(n) { + continue + } + + decls = append(decls, n) + vars = append(vars, createSimpleVar(fnsym, n)) + selected.Add(n) + } + return decls, vars, selected +} + +func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var { + var abbrev int + var offs int64 + + localAutoOffset := func() int64 { + offs = n.FrameOffset() + if base.Ctxt.Arch.FixedFrameSize == 0 { + offs -= int64(types.PtrSize) + } + if buildcfg.FramePointerEnabled { + offs -= int64(types.PtrSize) + } + return offs + } + + switch n.Class { + case ir.PAUTO: + offs = localAutoOffset() + abbrev = dwarf.DW_ABRV_AUTO + case ir.PPARAM, ir.PPARAMOUT: + abbrev = dwarf.DW_ABRV_PARAM + if n.IsOutputParamInRegisters() { + offs = localAutoOffset() + } else { + offs = n.FrameOffset() + base.Ctxt.Arch.FixedFrameSize + } + + default: + base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class, n) + } + + typename := dwarf.InfoPrefix + types.TypeSymName(n.Type()) + delete(fnsym.Func().Autot, reflectdata.TypeLinksym(n.Type())) + inlIndex := 0 + if base.Flag.GenDwarfInl > 1 { + if n.InlFormal() || n.InlLocal() { + inlIndex = posInlIndex(n.Pos()) + 1 + if n.InlFormal() { + abbrev = dwarf.DW_ABRV_PARAM + } + } + } + declpos := base.Ctxt.InnermostPos(declPos(n)) + return &dwarf.Var{ + Name: n.Sym().Name, + IsReturnValue: n.Class == ir.PPARAMOUT, + IsInlFormal: n.InlFormal(), + Abbrev: abbrev, + StackOffset: int32(offs), + Type: base.Ctxt.Lookup(typename), + DeclFile: declpos.RelFilename(), + DeclLine: declpos.RelLine(), + DeclCol: declpos.RelCol(), + InlIndex: int32(inlIndex), + ChildIndex: -1, + DictIndex: n.DictIndex, + } +} + +// createABIVars creates DWARF variables for functions in which the +// register ABI is enabled but optimization is turned off. It uses a +// hybrid approach in which register-resident input params are +// captured with location lists, and all other vars use the "simple" +// strategy. +func createABIVars(fnsym *obj.LSym, fn *ir.Func, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, ir.NameSet) { + + // Invoke createComplexVars to generate dwarf vars for input parameters + // that are register-allocated according to the ABI rules. + decls, vars, selected := createComplexVars(fnsym, fn) + + // Now fill in the remainder of the variables: input parameters + // that are not register-resident, output parameters, and local + // variables. + for _, n := range apDecls { + if ir.IsAutoTmp(n) { + continue + } + if _, ok := selected[n]; ok { + // already handled + continue + } + + decls = append(decls, n) + vars = append(vars, createSimpleVar(fnsym, n)) + selected.Add(n) + } + + return decls, vars, selected +} + +// createComplexVars creates recomposed DWARF vars with location lists, +// suitable for describing optimized code. +func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Name, []*dwarf.Var, ir.NameSet) { + debugInfo := fn.DebugInfo.(*ssa.FuncDebug) + + // Produce a DWARF variable entry for each user variable. + var decls []*ir.Name + var vars []*dwarf.Var + var ssaVars ir.NameSet + + for varID, dvar := range debugInfo.Vars { + n := dvar + ssaVars.Add(n) + for _, slot := range debugInfo.VarSlots[varID] { + ssaVars.Add(debugInfo.Slots[slot].N) + } + + if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil { + decls = append(decls, n) + vars = append(vars, dvar) + } + } + + return decls, vars, ssaVars +} + +// createComplexVar builds a single DWARF variable entry and location list. +func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var { + debug := fn.DebugInfo.(*ssa.FuncDebug) + n := debug.Vars[varID] + + var abbrev int + switch n.Class { + case ir.PAUTO: + abbrev = dwarf.DW_ABRV_AUTO_LOCLIST + case ir.PPARAM, ir.PPARAMOUT: + abbrev = dwarf.DW_ABRV_PARAM_LOCLIST + default: + return nil + } + + gotype := reflectdata.TypeLinksym(n.Type()) + delete(fnsym.Func().Autot, gotype) + typename := dwarf.InfoPrefix + gotype.Name[len("type:"):] + inlIndex := 0 + if base.Flag.GenDwarfInl > 1 { + if n.InlFormal() || n.InlLocal() { + inlIndex = posInlIndex(n.Pos()) + 1 + if n.InlFormal() { + abbrev = dwarf.DW_ABRV_PARAM_LOCLIST + } + } + } + declpos := base.Ctxt.InnermostPos(n.Pos()) + dvar := &dwarf.Var{ + Name: n.Sym().Name, + IsReturnValue: n.Class == ir.PPARAMOUT, + IsInlFormal: n.InlFormal(), + Abbrev: abbrev, + Type: base.Ctxt.Lookup(typename), + // The stack offset is used as a sorting key, so for decomposed + // variables just give it the first one. It's not used otherwise. + // This won't work well if the first slot hasn't been assigned a stack + // location, but it's not obvious how to do better. + StackOffset: ssagen.StackOffset(debug.Slots[debug.VarSlots[varID][0]]), + DeclFile: declpos.RelFilename(), + DeclLine: declpos.RelLine(), + DeclCol: declpos.RelCol(), + InlIndex: int32(inlIndex), + ChildIndex: -1, + DictIndex: n.DictIndex, + } + list := debug.LocationLists[varID] + if len(list) != 0 { + dvar.PutLocationList = func(listSym, startPC dwarf.Sym) { + debug.PutLocationList(list, base.Ctxt, listSym.(*obj.LSym), startPC.(*obj.LSym)) + } + } + return dvar +} + +// RecordFlags records the specified command-line flags to be placed +// in the DWARF info. +func RecordFlags(flags ...string) { + if base.Ctxt.Pkgpath == "" { + panic("missing pkgpath") + } + + type BoolFlag interface { + IsBoolFlag() bool + } + type CountFlag interface { + IsCountFlag() bool + } + var cmd bytes.Buffer + for _, name := range flags { + f := flag.Lookup(name) + if f == nil { + continue + } + getter := f.Value.(flag.Getter) + if getter.String() == f.DefValue { + // Flag has default value, so omit it. + continue + } + if bf, ok := f.Value.(BoolFlag); ok && bf.IsBoolFlag() { + val, ok := getter.Get().(bool) + if ok && val { + fmt.Fprintf(&cmd, " -%s", f.Name) + continue + } + } + if cf, ok := f.Value.(CountFlag); ok && cf.IsCountFlag() { + val, ok := getter.Get().(int) + if ok && val == 1 { + fmt.Fprintf(&cmd, " -%s", f.Name) + continue + } + } + fmt.Fprintf(&cmd, " -%s=%v", f.Name, getter.Get()) + } + + // Adds flag to producer string signaling whether regabi is turned on or + // off. + // Once regabi is turned on across the board and the relative GOEXPERIMENT + // knobs no longer exist this code should be removed. + if buildcfg.Experiment.RegabiArgs { + cmd.Write([]byte(" regabi")) + } + + if cmd.Len() == 0 { + return + } + s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "producer." + base.Ctxt.Pkgpath) + s.Type = objabi.SDWARFCUINFO + // Sometimes (for example when building tests) we can link + // together two package main archives. So allow dups. + s.Set(obj.AttrDuplicateOK, true) + base.Ctxt.Data = append(base.Ctxt.Data, s) + s.P = cmd.Bytes()[1:] +} + +// RecordPackageName records the name of the package being +// compiled, so that the linker can save it in the compile unit's DIE. +func RecordPackageName() { + s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "packagename." + base.Ctxt.Pkgpath) + s.Type = objabi.SDWARFCUINFO + // Sometimes (for example when building tests) we can link + // together two package main archives. So allow dups. + s.Set(obj.AttrDuplicateOK, true) + base.Ctxt.Data = append(base.Ctxt.Data, s) + s.P = []byte(types.LocalPkg.Name) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/dwarfgen/dwinl.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/dwarfgen/dwinl.go new file mode 100644 index 0000000000000000000000000000000000000000..655e7c66ac24f8da8efe7d81156e73006c8ad365 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/dwarfgen/dwinl.go @@ -0,0 +1,441 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dwarfgen + +import ( + "fmt" + "strings" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/internal/dwarf" + "cmd/internal/obj" + "cmd/internal/src" +) + +// To identify variables by original source position. +type varPos struct { + DeclName string + DeclFile string + DeclLine uint + DeclCol uint +} + +// This is the main entry point for collection of raw material to +// drive generation of DWARF "inlined subroutine" DIEs. See proposal +// 22080 for more details and background info. +func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls { + var inlcalls dwarf.InlCalls + + if base.Debug.DwarfInl != 0 { + base.Ctxt.Logf("assembling DWARF inlined routine info for %v\n", fnsym.Name) + } + + // This maps inline index (from Ctxt.InlTree) to index in inlcalls.Calls + imap := make(map[int]int) + + // Walk progs to build up the InlCalls data structure + var prevpos src.XPos + for p := fnsym.Func().Text; p != nil; p = p.Link { + if p.Pos == prevpos { + continue + } + ii := posInlIndex(p.Pos) + if ii >= 0 { + insertInlCall(&inlcalls, ii, imap) + } + prevpos = p.Pos + } + + // This is used to partition DWARF vars by inline index. Vars not + // produced by the inliner will wind up in the vmap[0] entry. + vmap := make(map[int32][]*dwarf.Var) + + // Now walk the dwarf vars and partition them based on whether they + // were produced by the inliner (dwv.InlIndex > 0) or were original + // vars/params from the function (dwv.InlIndex == 0). + for _, dwv := range dwVars { + + vmap[dwv.InlIndex] = append(vmap[dwv.InlIndex], dwv) + + // Zero index => var was not produced by an inline + if dwv.InlIndex == 0 { + continue + } + + // Look up index in our map, then tack the var in question + // onto the vars list for the correct inlined call. + ii := int(dwv.InlIndex) - 1 + idx, ok := imap[ii] + if !ok { + // We can occasionally encounter a var produced by the + // inliner for which there is no remaining prog; add a new + // entry to the call list in this scenario. + idx = insertInlCall(&inlcalls, ii, imap) + } + inlcalls.Calls[idx].InlVars = + append(inlcalls.Calls[idx].InlVars, dwv) + } + + // Post process the map above to assign child indices to vars. + // + // A given variable is treated differently depending on whether it + // is part of the top-level function (ii == 0) or if it was + // produced as a result of an inline (ii != 0). + // + // If a variable was not produced by an inline and its containing + // function was not inlined, then we just assign an ordering of + // based on variable name. + // + // If a variable was not produced by an inline and its containing + // function was inlined, then we need to assign a child index + // based on the order of vars in the abstract function (in + // addition, those vars that don't appear in the abstract + // function, such as "~r1", are flagged as such). + // + // If a variable was produced by an inline, then we locate it in + // the pre-inlining decls for the target function and assign child + // index accordingly. + for ii, sl := range vmap { + var m map[varPos]int + if ii == 0 { + if !fnsym.WasInlined() { + for j, v := range sl { + v.ChildIndex = int32(j) + } + continue + } + m = makePreinlineDclMap(fnsym) + } else { + ifnlsym := base.Ctxt.InlTree.InlinedFunction(int(ii - 1)) + m = makePreinlineDclMap(ifnlsym) + } + + // Here we assign child indices to variables based on + // pre-inlined decls, and set the "IsInAbstract" flag + // appropriately. In addition: parameter and local variable + // names are given "middle dot" version numbers as part of the + // writing them out to export data (see issue 4326). If DWARF + // inlined routine generation is turned on, we want to undo + // this versioning, since DWARF variables in question will be + // parented by the inlined routine and not the top-level + // caller. + synthCount := len(m) + for _, v := range sl { + vp := varPos{ + DeclName: v.Name, + DeclFile: v.DeclFile, + DeclLine: v.DeclLine, + DeclCol: v.DeclCol, + } + synthesized := strings.HasPrefix(v.Name, "~") || v.Name == "_" + if idx, found := m[vp]; found { + v.ChildIndex = int32(idx) + v.IsInAbstract = !synthesized + } else { + // Variable can't be found in the pre-inline dcl list. + // In the top-level case (ii=0) this can happen + // because a composite variable was split into pieces, + // and we're looking at a piece. We can also see + // return temps (~r%d) that were created during + // lowering, or unnamed params ("_"). + v.ChildIndex = int32(synthCount) + synthCount++ + } + } + } + + // Make a second pass through the progs to compute PC ranges for + // the various inlined calls. + start := int64(-1) + curii := -1 + var prevp *obj.Prog + for p := fnsym.Func().Text; p != nil; prevp, p = p, p.Link { + if prevp != nil && p.Pos == prevp.Pos { + continue + } + ii := posInlIndex(p.Pos) + if ii == curii { + continue + } + // Close out the current range + if start != -1 { + addRange(inlcalls.Calls, start, p.Pc, curii, imap) + } + // Begin new range + start = p.Pc + curii = ii + } + if start != -1 { + addRange(inlcalls.Calls, start, fnsym.Size, curii, imap) + } + + // Issue 33188: if II foo is a child of II bar, then ensure that + // bar's ranges include the ranges of foo (the loop above will produce + // disjoint ranges). + for k, c := range inlcalls.Calls { + if c.Root { + unifyCallRanges(inlcalls, k) + } + } + + // Debugging + if base.Debug.DwarfInl != 0 { + dumpInlCalls(inlcalls) + dumpInlVars(dwVars) + } + + // Perform a consistency check on inlined routine PC ranges + // produced by unifyCallRanges above. In particular, complain in + // cases where you have A -> B -> C (e.g. C is inlined into B, and + // B is inlined into A) and the ranges for B are not enclosed + // within the ranges for A, or C within B. + for k, c := range inlcalls.Calls { + if c.Root { + checkInlCall(fnsym.Name, inlcalls, fnsym.Size, k, -1) + } + } + + return inlcalls +} + +// Secondary hook for DWARF inlined subroutine generation. This is called +// late in the compilation when it is determined that we need an +// abstract function DIE for an inlined routine imported from a +// previously compiled package. +func AbstractFunc(fn *obj.LSym) { + ifn := base.Ctxt.DwFixups.GetPrecursorFunc(fn) + if ifn == nil { + base.Ctxt.Diag("failed to locate precursor fn for %v", fn) + return + } + _ = ifn.(*ir.Func) + if base.Debug.DwarfInl != 0 { + base.Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name) + } + base.Ctxt.DwarfAbstractFunc(ifn, fn) +} + +// Given a function that was inlined as part of the compilation, dig +// up the pre-inlining DCL list for the function and create a map that +// supports lookup of pre-inline dcl index, based on variable +// position/name. NB: the recipe for computing variable pos/file/line +// needs to be kept in sync with the similar code in gc.createSimpleVars +// and related functions. +func makePreinlineDclMap(fnsym *obj.LSym) map[varPos]int { + dcl := preInliningDcls(fnsym) + m := make(map[varPos]int) + for i, n := range dcl { + pos := base.Ctxt.InnermostPos(n.Pos()) + vp := varPos{ + DeclName: n.Sym().Name, + DeclFile: pos.RelFilename(), + DeclLine: pos.RelLine(), + DeclCol: pos.RelCol(), + } + if _, found := m[vp]; found { + // We can see collisions (variables with the same name/file/line/col) in obfuscated or machine-generated code -- see issue 44378 for an example. Skip duplicates in such cases, since it is unlikely that a human will be debugging such code. + continue + } + m[vp] = i + } + return m +} + +func insertInlCall(dwcalls *dwarf.InlCalls, inlIdx int, imap map[int]int) int { + callIdx, found := imap[inlIdx] + if found { + return callIdx + } + + // Haven't seen this inline yet. Visit parent of inline if there + // is one. We do this first so that parents appear before their + // children in the resulting table. + parCallIdx := -1 + parInlIdx := base.Ctxt.InlTree.Parent(inlIdx) + if parInlIdx >= 0 { + parCallIdx = insertInlCall(dwcalls, parInlIdx, imap) + } + + // Create new entry for this inline + inlinedFn := base.Ctxt.InlTree.InlinedFunction(inlIdx) + callXPos := base.Ctxt.InlTree.CallPos(inlIdx) + callPos := base.Ctxt.InnermostPos(callXPos) + absFnSym := base.Ctxt.DwFixups.AbsFuncDwarfSym(inlinedFn) + ic := dwarf.InlCall{ + InlIndex: inlIdx, + CallPos: callPos, + AbsFunSym: absFnSym, + Root: parCallIdx == -1, + } + dwcalls.Calls = append(dwcalls.Calls, ic) + callIdx = len(dwcalls.Calls) - 1 + imap[inlIdx] = callIdx + + if parCallIdx != -1 { + // Add this inline to parent's child list + dwcalls.Calls[parCallIdx].Children = append(dwcalls.Calls[parCallIdx].Children, callIdx) + } + + return callIdx +} + +// Given a src.XPos, return its associated inlining index if it +// corresponds to something created as a result of an inline, or -1 if +// there is no inline info. Note that the index returned will refer to +// the deepest call in the inlined stack, e.g. if you have "A calls B +// calls C calls D" and all three callees are inlined (B, C, and D), +// the index for a node from the inlined body of D will refer to the +// call to D from C. Whew. +func posInlIndex(xpos src.XPos) int { + pos := base.Ctxt.PosTable.Pos(xpos) + if b := pos.Base(); b != nil { + ii := b.InliningIndex() + if ii >= 0 { + return ii + } + } + return -1 +} + +func addRange(calls []dwarf.InlCall, start, end int64, ii int, imap map[int]int) { + if start == -1 { + panic("bad range start") + } + if end == -1 { + panic("bad range end") + } + if ii == -1 { + return + } + if start == end { + return + } + // Append range to correct inlined call + callIdx, found := imap[ii] + if !found { + base.Fatalf("can't find inlIndex %d in imap for prog at %d\n", ii, start) + } + call := &calls[callIdx] + call.Ranges = append(call.Ranges, dwarf.Range{Start: start, End: end}) +} + +func dumpInlCall(inlcalls dwarf.InlCalls, idx, ilevel int) { + for i := 0; i < ilevel; i++ { + base.Ctxt.Logf(" ") + } + ic := inlcalls.Calls[idx] + callee := base.Ctxt.InlTree.InlinedFunction(ic.InlIndex) + base.Ctxt.Logf(" %d: II:%d (%s) V: (", idx, ic.InlIndex, callee.Name) + for _, f := range ic.InlVars { + base.Ctxt.Logf(" %v", f.Name) + } + base.Ctxt.Logf(" ) C: (") + for _, k := range ic.Children { + base.Ctxt.Logf(" %v", k) + } + base.Ctxt.Logf(" ) R:") + for _, r := range ic.Ranges { + base.Ctxt.Logf(" [%d,%d)", r.Start, r.End) + } + base.Ctxt.Logf("\n") + for _, k := range ic.Children { + dumpInlCall(inlcalls, k, ilevel+1) + } + +} + +func dumpInlCalls(inlcalls dwarf.InlCalls) { + for k, c := range inlcalls.Calls { + if c.Root { + dumpInlCall(inlcalls, k, 0) + } + } +} + +func dumpInlVars(dwvars []*dwarf.Var) { + for i, dwv := range dwvars { + typ := "local" + if dwv.Abbrev == dwarf.DW_ABRV_PARAM_LOCLIST || dwv.Abbrev == dwarf.DW_ABRV_PARAM { + typ = "param" + } + ia := 0 + if dwv.IsInAbstract { + ia = 1 + } + base.Ctxt.Logf("V%d: %s CI:%d II:%d IA:%d %s\n", i, dwv.Name, dwv.ChildIndex, dwv.InlIndex-1, ia, typ) + } +} + +func rangesContains(par []dwarf.Range, rng dwarf.Range) (bool, string) { + for _, r := range par { + if rng.Start >= r.Start && rng.End <= r.End { + return true, "" + } + } + msg := fmt.Sprintf("range [%d,%d) not contained in {", rng.Start, rng.End) + for _, r := range par { + msg += fmt.Sprintf(" [%d,%d)", r.Start, r.End) + } + msg += " }" + return false, msg +} + +func rangesContainsAll(parent, child []dwarf.Range) (bool, string) { + for _, r := range child { + c, m := rangesContains(parent, r) + if !c { + return false, m + } + } + return true, "" +} + +// checkInlCall verifies that the PC ranges for inline info 'idx' are +// enclosed/contained within the ranges of its parent inline (or if +// this is a root/toplevel inline, checks that the ranges fall within +// the extent of the top level function). A panic is issued if a +// malformed range is found. +func checkInlCall(funcName string, inlCalls dwarf.InlCalls, funcSize int64, idx, parentIdx int) { + + // Callee + ic := inlCalls.Calls[idx] + callee := base.Ctxt.InlTree.InlinedFunction(ic.InlIndex).Name + calleeRanges := ic.Ranges + + // Caller + caller := funcName + parentRanges := []dwarf.Range{dwarf.Range{Start: int64(0), End: funcSize}} + if parentIdx != -1 { + pic := inlCalls.Calls[parentIdx] + caller = base.Ctxt.InlTree.InlinedFunction(pic.InlIndex).Name + parentRanges = pic.Ranges + } + + // Callee ranges contained in caller ranges? + c, m := rangesContainsAll(parentRanges, calleeRanges) + if !c { + base.Fatalf("** malformed inlined routine range in %s: caller %s callee %s II=%d %s\n", funcName, caller, callee, idx, m) + } + + // Now visit kids + for _, k := range ic.Children { + checkInlCall(funcName, inlCalls, funcSize, k, idx) + } +} + +// unifyCallRanges ensures that the ranges for a given inline +// transitively include all of the ranges for its child inlines. +func unifyCallRanges(inlcalls dwarf.InlCalls, idx int) { + ic := &inlcalls.Calls[idx] + for _, childIdx := range ic.Children { + // First make sure child ranges are unified. + unifyCallRanges(inlcalls, childIdx) + + // Then merge child ranges into ranges for this inline. + cic := inlcalls.Calls[childIdx] + ic.Ranges = dwarf.MergeRanges(ic.Ranges, cic.Ranges) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/dwarfgen/marker.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/dwarfgen/marker.go new file mode 100644 index 0000000000000000000000000000000000000000..ec6ce45a900bc098b97773ce16f1c33a5217e23f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/dwarfgen/marker.go @@ -0,0 +1,94 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dwarfgen + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/internal/src" +) + +// A ScopeMarker tracks scope nesting and boundaries for later use +// during DWARF generation. +type ScopeMarker struct { + parents []ir.ScopeID + marks []ir.Mark +} + +// checkPos validates the given position and returns the current scope. +func (m *ScopeMarker) checkPos(pos src.XPos) ir.ScopeID { + if !pos.IsKnown() { + base.Fatalf("unknown scope position") + } + + if len(m.marks) == 0 { + return 0 + } + + last := &m.marks[len(m.marks)-1] + if xposBefore(pos, last.Pos) { + base.FatalfAt(pos, "non-monotonic scope positions\n\t%v: previous scope position", base.FmtPos(last.Pos)) + } + return last.Scope +} + +// Push records a transition to a new child scope of the current scope. +func (m *ScopeMarker) Push(pos src.XPos) { + current := m.checkPos(pos) + + m.parents = append(m.parents, current) + child := ir.ScopeID(len(m.parents)) + + m.marks = append(m.marks, ir.Mark{Pos: pos, Scope: child}) +} + +// Pop records a transition back to the current scope's parent. +func (m *ScopeMarker) Pop(pos src.XPos) { + current := m.checkPos(pos) + + parent := m.parents[current-1] + + m.marks = append(m.marks, ir.Mark{Pos: pos, Scope: parent}) +} + +// Unpush removes the current scope, which must be empty. +func (m *ScopeMarker) Unpush() { + i := len(m.marks) - 1 + current := m.marks[i].Scope + + if current != ir.ScopeID(len(m.parents)) { + base.FatalfAt(m.marks[i].Pos, "current scope is not empty") + } + + m.parents = m.parents[:current-1] + m.marks = m.marks[:i] +} + +// WriteTo writes the recorded scope marks to the given function, +// and resets the marker for reuse. +func (m *ScopeMarker) WriteTo(fn *ir.Func) { + m.compactMarks() + + fn.Parents = make([]ir.ScopeID, len(m.parents)) + copy(fn.Parents, m.parents) + m.parents = m.parents[:0] + + fn.Marks = make([]ir.Mark, len(m.marks)) + copy(fn.Marks, m.marks) + m.marks = m.marks[:0] +} + +func (m *ScopeMarker) compactMarks() { + n := 0 + for _, next := range m.marks { + if n > 0 && next.Pos == m.marks[n-1].Pos { + m.marks[n-1].Scope = next.Scope + continue + } + m.marks[n] = next + n++ + } + m.marks = m.marks[:n] +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/dwarfgen/scope.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/dwarfgen/scope.go new file mode 100644 index 0000000000000000000000000000000000000000..b4ae69e96fa7c4f64adca5a488286a7838f1ce8e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/dwarfgen/scope.go @@ -0,0 +1,136 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dwarfgen + +import ( + "sort" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/internal/dwarf" + "cmd/internal/obj" + "cmd/internal/src" +) + +// See golang.org/issue/20390. +func xposBefore(p, q src.XPos) bool { + return base.Ctxt.PosTable.Pos(p).Before(base.Ctxt.PosTable.Pos(q)) +} + +func findScope(marks []ir.Mark, pos src.XPos) ir.ScopeID { + i := sort.Search(len(marks), func(i int) bool { + return xposBefore(pos, marks[i].Pos) + }) + if i == 0 { + return 0 + } + return marks[i-1].Scope +} + +func assembleScopes(fnsym *obj.LSym, fn *ir.Func, dwarfVars []*dwarf.Var, varScopes []ir.ScopeID) []dwarf.Scope { + // Initialize the DWARF scope tree based on lexical scopes. + dwarfScopes := make([]dwarf.Scope, 1+len(fn.Parents)) + for i, parent := range fn.Parents { + dwarfScopes[i+1].Parent = int32(parent) + } + + scopeVariables(dwarfVars, varScopes, dwarfScopes, fnsym.ABI() != obj.ABI0) + if fnsym.Func().Text != nil { + scopePCs(fnsym, fn.Marks, dwarfScopes) + } + return compactScopes(dwarfScopes) +} + +// scopeVariables assigns DWARF variable records to their scopes. +func scopeVariables(dwarfVars []*dwarf.Var, varScopes []ir.ScopeID, dwarfScopes []dwarf.Scope, regabi bool) { + if regabi { + sort.Stable(varsByScope{dwarfVars, varScopes}) + } else { + sort.Stable(varsByScopeAndOffset{dwarfVars, varScopes}) + } + + i0 := 0 + for i := range dwarfVars { + if varScopes[i] == varScopes[i0] { + continue + } + dwarfScopes[varScopes[i0]].Vars = dwarfVars[i0:i] + i0 = i + } + if i0 < len(dwarfVars) { + dwarfScopes[varScopes[i0]].Vars = dwarfVars[i0:] + } +} + +// scopePCs assigns PC ranges to their scopes. +func scopePCs(fnsym *obj.LSym, marks []ir.Mark, dwarfScopes []dwarf.Scope) { + // If there aren't any child scopes (in particular, when scope + // tracking is disabled), we can skip a whole lot of work. + if len(marks) == 0 { + return + } + p0 := fnsym.Func().Text + scope := findScope(marks, p0.Pos) + for p := p0; p != nil; p = p.Link { + if p.Pos == p0.Pos { + continue + } + dwarfScopes[scope].AppendRange(dwarf.Range{Start: p0.Pc, End: p.Pc}) + p0 = p + scope = findScope(marks, p0.Pos) + } + if p0.Pc < fnsym.Size { + dwarfScopes[scope].AppendRange(dwarf.Range{Start: p0.Pc, End: fnsym.Size}) + } +} + +func compactScopes(dwarfScopes []dwarf.Scope) []dwarf.Scope { + // Reverse pass to propagate PC ranges to parent scopes. + for i := len(dwarfScopes) - 1; i > 0; i-- { + s := &dwarfScopes[i] + dwarfScopes[s.Parent].UnifyRanges(s) + } + + return dwarfScopes +} + +type varsByScopeAndOffset struct { + vars []*dwarf.Var + scopes []ir.ScopeID +} + +func (v varsByScopeAndOffset) Len() int { + return len(v.vars) +} + +func (v varsByScopeAndOffset) Less(i, j int) bool { + if v.scopes[i] != v.scopes[j] { + return v.scopes[i] < v.scopes[j] + } + return v.vars[i].StackOffset < v.vars[j].StackOffset +} + +func (v varsByScopeAndOffset) Swap(i, j int) { + v.vars[i], v.vars[j] = v.vars[j], v.vars[i] + v.scopes[i], v.scopes[j] = v.scopes[j], v.scopes[i] +} + +type varsByScope struct { + vars []*dwarf.Var + scopes []ir.ScopeID +} + +func (v varsByScope) Len() int { + return len(v.vars) +} + +func (v varsByScope) Less(i, j int) bool { + return v.scopes[i] < v.scopes[j] +} + +func (v varsByScope) Swap(i, j int) { + v.vars[i], v.vars[j] = v.vars[j], v.vars[i] + v.scopes[i], v.scopes[j] = v.scopes[j], v.scopes[i] +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/dwarfgen/scope_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/dwarfgen/scope_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ee4170ef44fe1101a7ca58d3fc1de1a18395cc29 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/dwarfgen/scope_test.go @@ -0,0 +1,527 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dwarfgen + +import ( + "debug/dwarf" + "fmt" + "internal/platform" + "internal/testenv" + "os" + "path/filepath" + "runtime" + "sort" + "strconv" + "strings" + "testing" + + "cmd/internal/objfile" +) + +type testline struct { + // line is one line of go source + line string + + // scopes is a list of scope IDs of all the lexical scopes that this line + // of code belongs to. + // Scope IDs are assigned by traversing the tree of lexical blocks of a + // function in pre-order + // Scope IDs are function specific, i.e. scope 0 is always the root scope + // of the function that this line belongs to. Empty scopes are not assigned + // an ID (because they are not saved in debug_info). + // Scope 0 is always omitted from this list since all lines always belong + // to it. + scopes []int + + // vars is the list of variables that belong in scopes[len(scopes)-1]. + // Local variables are prefixed with "var ", formal parameters with "arg ". + // Must be ordered alphabetically. + // Set to nil to skip the check. + vars []string + + // decl is the list of variables declared at this line. + decl []string + + // declBefore is the list of variables declared at or before this line. + declBefore []string +} + +var testfile = []testline{ + {line: "package main"}, + {line: "var sink any"}, + {line: "func f1(x int) { }"}, + {line: "func f2(x int) { }"}, + {line: "func f3(x int) { }"}, + {line: "func f4(x int) { }"}, + {line: "func f5(x int) { }"}, + {line: "func f6(x int) { }"}, + {line: "func leak(x interface{}) { sink = x }"}, + {line: "func gret1() int { return 2 }"}, + {line: "func gretbool() bool { return true }"}, + {line: "func gret3() (int, int, int) { return 0, 1, 2 }"}, + {line: "var v = []int{ 0, 1, 2 }"}, + {line: "var ch = make(chan int)"}, + {line: "var floatch = make(chan float64)"}, + {line: "var iface interface{}"}, + {line: "func TestNestedFor() {", vars: []string{"var a int"}}, + {line: " a := 0", decl: []string{"a"}}, + {line: " f1(a)"}, + {line: " for i := 0; i < 5; i++ {", scopes: []int{1}, vars: []string{"var i int"}, decl: []string{"i"}}, + {line: " f2(i)", scopes: []int{1}}, + {line: " for i := 0; i < 5; i++ {", scopes: []int{1, 2}, vars: []string{"var i int"}, decl: []string{"i"}}, + {line: " f3(i)", scopes: []int{1, 2}}, + {line: " }"}, + {line: " f4(i)", scopes: []int{1}}, + {line: " }"}, + {line: " f5(a)"}, + {line: "}"}, + {line: "func TestOas2() {", vars: []string{}}, + {line: " if a, b, c := gret3(); a != 1 {", scopes: []int{1}, vars: []string{"var a int", "var b int", "var c int"}}, + {line: " f1(a)", scopes: []int{1}}, + {line: " f1(b)", scopes: []int{1}}, + {line: " f1(c)", scopes: []int{1}}, + {line: " }"}, + {line: " for i, x := range v {", scopes: []int{2}, vars: []string{"var i int", "var x int"}}, + {line: " f1(i)", scopes: []int{2}}, + {line: " f1(x)", scopes: []int{2}}, + {line: " }"}, + {line: " if a, ok := <- ch; ok {", scopes: []int{3}, vars: []string{"var a int", "var ok bool"}}, + {line: " f1(a)", scopes: []int{3}}, + {line: " }"}, + {line: " if a, ok := iface.(int); ok {", scopes: []int{4}, vars: []string{"var a int", "var ok bool"}}, + {line: " f1(a)", scopes: []int{4}}, + {line: " }"}, + {line: "}"}, + {line: "func TestIfElse() {"}, + {line: " if x := gret1(); x != 0 {", scopes: []int{1}, vars: []string{"var x int"}}, + {line: " a := 0", scopes: []int{1, 2}, vars: []string{"var a int"}}, + {line: " f1(a); f1(x)", scopes: []int{1, 2}}, + {line: " } else {"}, + {line: " b := 1", scopes: []int{1, 3}, vars: []string{"var b int"}}, + {line: " f1(b); f1(x+1)", scopes: []int{1, 3}}, + {line: " }"}, + {line: "}"}, + {line: "func TestSwitch() {", vars: []string{}}, + {line: " switch x := gret1(); x {", scopes: []int{1}, vars: []string{"var x int"}}, + {line: " case 0:", scopes: []int{1, 2}}, + {line: " i := x + 5", scopes: []int{1, 2}, vars: []string{"var i int"}}, + {line: " f1(x); f1(i)", scopes: []int{1, 2}}, + {line: " case 1:", scopes: []int{1, 3}}, + {line: " j := x + 10", scopes: []int{1, 3}, vars: []string{"var j int"}}, + {line: " f1(x); f1(j)", scopes: []int{1, 3}}, + {line: " case 2:", scopes: []int{1, 4}}, + {line: " k := x + 2", scopes: []int{1, 4}, vars: []string{"var k int"}}, + {line: " f1(x); f1(k)", scopes: []int{1, 4}}, + {line: " }"}, + {line: "}"}, + {line: "func TestTypeSwitch() {", vars: []string{}}, + {line: " switch x := iface.(type) {"}, + {line: " case int:", scopes: []int{1}}, + {line: " f1(x)", scopes: []int{1}, vars: []string{"var x int"}}, + {line: " case uint8:", scopes: []int{2}}, + {line: " f1(int(x))", scopes: []int{2}, vars: []string{"var x uint8"}}, + {line: " case float64:", scopes: []int{3}}, + {line: " f1(int(x)+1)", scopes: []int{3}, vars: []string{"var x float64"}}, + {line: " }"}, + {line: "}"}, + {line: "func TestSelectScope() {"}, + {line: " select {"}, + {line: " case i := <- ch:", scopes: []int{1}}, + {line: " f1(i)", scopes: []int{1}, vars: []string{"var i int"}}, + {line: " case f := <- floatch:", scopes: []int{2}}, + {line: " f1(int(f))", scopes: []int{2}, vars: []string{"var f float64"}}, + {line: " }"}, + {line: "}"}, + {line: "func TestBlock() {", vars: []string{"var a int"}}, + {line: " a := 1"}, + {line: " {"}, + {line: " b := 2", scopes: []int{1}, vars: []string{"var b int"}}, + {line: " f1(b)", scopes: []int{1}}, + {line: " f1(a)", scopes: []int{1}}, + {line: " }"}, + {line: "}"}, + {line: "func TestDiscontiguousRanges() {", vars: []string{"var a int"}}, + {line: " a := 0"}, + {line: " f1(a)"}, + {line: " {"}, + {line: " b := 0", scopes: []int{1}, vars: []string{"var b int"}}, + {line: " f2(b)", scopes: []int{1}}, + {line: " if gretbool() {", scopes: []int{1}}, + {line: " c := 0", scopes: []int{1, 2}, vars: []string{"var c int"}}, + {line: " f3(c)", scopes: []int{1, 2}}, + {line: " } else {"}, + {line: " c := 1.1", scopes: []int{1, 3}, vars: []string{"var c float64"}}, + {line: " f4(int(c))", scopes: []int{1, 3}}, + {line: " }"}, + {line: " f5(b)", scopes: []int{1}}, + {line: " }"}, + {line: " f6(a)"}, + {line: "}"}, + {line: "func TestClosureScope() {", vars: []string{"var a int", "var b int", "var f func(int)"}}, + {line: " a := 1; b := 1"}, + {line: " f := func(c int) {", scopes: []int{0}, vars: []string{"arg c int", "var &b *int", "var a int", "var d int"}, declBefore: []string{"&b", "a"}}, + {line: " d := 3"}, + {line: " f1(c); f1(d)"}, + {line: " if e := 3; e != 0 {", scopes: []int{1}, vars: []string{"var e int"}}, + {line: " f1(e)", scopes: []int{1}}, + {line: " f1(a)", scopes: []int{1}}, + {line: " b = 2", scopes: []int{1}}, + {line: " }"}, + {line: " }"}, + {line: " f(3); f1(b)"}, + {line: "}"}, + {line: "func TestEscape() {"}, + {line: " a := 1", vars: []string{"var a int"}}, + {line: " {"}, + {line: " b := 2", scopes: []int{1}, vars: []string{"var &b *int", "var p *int"}}, + {line: " p := &b", scopes: []int{1}}, + {line: " f1(a)", scopes: []int{1}}, + {line: " leak(p)", scopes: []int{1}}, + {line: " }"}, + {line: "}"}, + {line: "var fglob func() int"}, + {line: "func TestCaptureVar(flag bool) {"}, + {line: " a := 1", vars: []string{"arg flag bool", "var a int"}}, // TODO(register args) restore "arg ~r1 func() int", + {line: " if flag {"}, + {line: " b := 2", scopes: []int{1}, vars: []string{"var b int", "var f func() int"}}, + {line: " f := func() int {", scopes: []int{1, 0}}, + {line: " return b + 1"}, + {line: " }"}, + {line: " fglob = f", scopes: []int{1}}, + {line: " }"}, + {line: " f1(a)"}, + {line: "}"}, + {line: "func main() {"}, + {line: " TestNestedFor()"}, + {line: " TestOas2()"}, + {line: " TestIfElse()"}, + {line: " TestSwitch()"}, + {line: " TestTypeSwitch()"}, + {line: " TestSelectScope()"}, + {line: " TestBlock()"}, + {line: " TestDiscontiguousRanges()"}, + {line: " TestClosureScope()"}, + {line: " TestEscape()"}, + {line: " TestCaptureVar(true)"}, + {line: "}"}, +} + +const detailOutput = false + +// Compiles testfile checks that the description of lexical blocks emitted +// by the linker in debug_info, for each function in the main package, +// corresponds to what we expect it to be. +func TestScopeRanges(t *testing.T) { + testenv.MustHaveGoBuild(t) + t.Parallel() + + if !platform.ExecutableHasDWARF(runtime.GOOS, runtime.GOARCH) { + t.Skipf("skipping on %s/%s: no DWARF symbol table in executables", runtime.GOOS, runtime.GOARCH) + } + + src, f := gobuild(t, t.TempDir(), false, testfile) + defer f.Close() + + // the compiler uses forward slashes for paths even on windows + src = strings.Replace(src, "\\", "/", -1) + + pcln, err := f.PCLineTable() + if err != nil { + t.Fatal(err) + } + dwarfData, err := f.DWARF() + if err != nil { + t.Fatal(err) + } + dwarfReader := dwarfData.Reader() + + lines := make(map[line][]*lexblock) + + for { + entry, err := dwarfReader.Next() + if err != nil { + t.Fatal(err) + } + if entry == nil { + break + } + + if entry.Tag != dwarf.TagSubprogram { + continue + } + + name, ok := entry.Val(dwarf.AttrName).(string) + if !ok || !strings.HasPrefix(name, "main.Test") { + continue + } + + var scope lexblock + ctxt := scopexplainContext{ + dwarfData: dwarfData, + dwarfReader: dwarfReader, + scopegen: 1, + } + + readScope(&ctxt, &scope, entry) + + scope.markLines(pcln, lines) + } + + anyerror := false + for i := range testfile { + tgt := testfile[i].scopes + out := lines[line{src, i + 1}] + + if detailOutput { + t.Logf("%s // %v", testfile[i].line, out) + } + + scopesok := checkScopes(tgt, out) + if !scopesok { + t.Logf("mismatch at line %d %q: expected: %v got: %v\n", i, testfile[i].line, tgt, scopesToString(out)) + } + + varsok := true + if testfile[i].vars != nil { + if len(out) > 0 { + varsok = checkVars(testfile[i].vars, out[len(out)-1].vars) + if !varsok { + t.Logf("variable mismatch at line %d %q for scope %d: expected: %v got: %v\n", i+1, testfile[i].line, out[len(out)-1].id, testfile[i].vars, out[len(out)-1].vars) + } + for j := range testfile[i].decl { + if line := declLineForVar(out[len(out)-1].vars, testfile[i].decl[j]); line != i+1 { + t.Errorf("wrong declaration line for variable %s, expected %d got: %d", testfile[i].decl[j], i+1, line) + } + } + + for j := range testfile[i].declBefore { + if line := declLineForVar(out[len(out)-1].vars, testfile[i].declBefore[j]); line > i+1 { + t.Errorf("wrong declaration line for variable %s, expected %d (or less) got: %d", testfile[i].declBefore[j], i+1, line) + } + } + } + } + + anyerror = anyerror || !scopesok || !varsok + } + + if anyerror { + t.Fatalf("mismatched output") + } +} + +func scopesToString(v []*lexblock) string { + r := make([]string, len(v)) + for i, s := range v { + r[i] = strconv.Itoa(s.id) + } + return "[ " + strings.Join(r, ", ") + " ]" +} + +func checkScopes(tgt []int, out []*lexblock) bool { + if len(out) > 0 { + // omit scope 0 + out = out[1:] + } + if len(tgt) != len(out) { + return false + } + for i := range tgt { + if tgt[i] != out[i].id { + return false + } + } + return true +} + +func checkVars(tgt []string, out []variable) bool { + if len(tgt) != len(out) { + return false + } + for i := range tgt { + if tgt[i] != out[i].expr { + return false + } + } + return true +} + +func declLineForVar(scope []variable, name string) int { + for i := range scope { + if scope[i].name() == name { + return scope[i].declLine + } + } + return -1 +} + +type lexblock struct { + id int + ranges [][2]uint64 + vars []variable + scopes []lexblock +} + +type variable struct { + expr string + declLine int +} + +func (v *variable) name() string { + return strings.Split(v.expr, " ")[1] +} + +type line struct { + file string + lineno int +} + +type scopexplainContext struct { + dwarfData *dwarf.Data + dwarfReader *dwarf.Reader + scopegen int +} + +// readScope reads the DW_TAG_lexical_block or the DW_TAG_subprogram in +// entry and writes a description in scope. +// Nested DW_TAG_lexical_block entries are read recursively. +func readScope(ctxt *scopexplainContext, scope *lexblock, entry *dwarf.Entry) { + var err error + scope.ranges, err = ctxt.dwarfData.Ranges(entry) + if err != nil { + panic(err) + } + for { + e, err := ctxt.dwarfReader.Next() + if err != nil { + panic(err) + } + switch e.Tag { + case 0: + sort.Slice(scope.vars, func(i, j int) bool { + return scope.vars[i].expr < scope.vars[j].expr + }) + return + case dwarf.TagFormalParameter: + typ, err := ctxt.dwarfData.Type(e.Val(dwarf.AttrType).(dwarf.Offset)) + if err != nil { + panic(err) + } + scope.vars = append(scope.vars, entryToVar(e, "arg", typ)) + case dwarf.TagVariable: + typ, err := ctxt.dwarfData.Type(e.Val(dwarf.AttrType).(dwarf.Offset)) + if err != nil { + panic(err) + } + scope.vars = append(scope.vars, entryToVar(e, "var", typ)) + case dwarf.TagLexDwarfBlock: + scope.scopes = append(scope.scopes, lexblock{id: ctxt.scopegen}) + ctxt.scopegen++ + readScope(ctxt, &scope.scopes[len(scope.scopes)-1], e) + } + } +} + +func entryToVar(e *dwarf.Entry, kind string, typ dwarf.Type) variable { + return variable{ + fmt.Sprintf("%s %s %s", kind, e.Val(dwarf.AttrName).(string), typ.String()), + int(e.Val(dwarf.AttrDeclLine).(int64)), + } +} + +// markLines marks all lines that belong to this scope with this scope +// Recursively calls markLines for all children scopes. +func (scope *lexblock) markLines(pcln objfile.Liner, lines map[line][]*lexblock) { + for _, r := range scope.ranges { + for pc := r[0]; pc < r[1]; pc++ { + file, lineno, _ := pcln.PCToLine(pc) + l := line{file, lineno} + if len(lines[l]) == 0 || lines[l][len(lines[l])-1] != scope { + lines[l] = append(lines[l], scope) + } + } + } + + for i := range scope.scopes { + scope.scopes[i].markLines(pcln, lines) + } +} + +func gobuild(t *testing.T, dir string, optimized bool, testfile []testline) (string, *objfile.File) { + src := filepath.Join(dir, "test.go") + dst := filepath.Join(dir, "out.o") + + f, err := os.Create(src) + if err != nil { + t.Fatal(err) + } + for i := range testfile { + f.Write([]byte(testfile[i].line)) + f.Write([]byte{'\n'}) + } + f.Close() + + args := []string{"build"} + if !optimized { + args = append(args, "-gcflags=-N -l") + } + args = append(args, "-o", dst, src) + + cmd := testenv.Command(t, testenv.GoToolPath(t), args...) + if b, err := cmd.CombinedOutput(); err != nil { + t.Logf("build: %s\n", string(b)) + t.Fatal(err) + } + + pkg, err := objfile.Open(dst) + if err != nil { + t.Fatal(err) + } + return src, pkg +} + +// TestEmptyDwarfRanges tests that no list entry in debug_ranges has start == end. +// See issue #23928. +func TestEmptyDwarfRanges(t *testing.T) { + testenv.MustHaveGoRun(t) + t.Parallel() + + if !platform.ExecutableHasDWARF(runtime.GOOS, runtime.GOARCH) { + t.Skipf("skipping on %s/%s: no DWARF symbol table in executables", runtime.GOOS, runtime.GOARCH) + } + + _, f := gobuild(t, t.TempDir(), true, []testline{{line: "package main"}, {line: "func main(){ println(\"hello\") }"}}) + defer f.Close() + + dwarfData, err := f.DWARF() + if err != nil { + t.Fatal(err) + } + dwarfReader := dwarfData.Reader() + + for { + entry, err := dwarfReader.Next() + if err != nil { + t.Fatal(err) + } + if entry == nil { + break + } + + ranges, err := dwarfData.Ranges(entry) + if err != nil { + t.Fatal(err) + } + if ranges == nil { + continue + } + + for _, rng := range ranges { + if rng[0] == rng[1] { + t.Errorf("range entry with start == end: %v", rng) + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/escape/assign.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/escape/assign.go new file mode 100644 index 0000000000000000000000000000000000000000..6af53886831a1ebaebe3a14ad79f2601e3b00d95 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/escape/assign.go @@ -0,0 +1,128 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package escape + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" +) + +// addr evaluates an addressable expression n and returns a hole +// that represents storing into the represented location. +func (e *escape) addr(n ir.Node) hole { + if n == nil || ir.IsBlank(n) { + // Can happen in select case, range, maybe others. + return e.discardHole() + } + + k := e.heapHole() + + switch n.Op() { + default: + base.Fatalf("unexpected addr: %v", n) + case ir.ONAME: + n := n.(*ir.Name) + if n.Class == ir.PEXTERN { + break + } + k = e.oldLoc(n).asHole() + case ir.OLINKSYMOFFSET: + break + case ir.ODOT: + n := n.(*ir.SelectorExpr) + k = e.addr(n.X) + case ir.OINDEX: + n := n.(*ir.IndexExpr) + e.discard(n.Index) + if n.X.Type().IsArray() { + k = e.addr(n.X) + } else { + e.mutate(n.X) + } + case ir.ODEREF: + n := n.(*ir.StarExpr) + e.mutate(n.X) + case ir.ODOTPTR: + n := n.(*ir.SelectorExpr) + e.mutate(n.X) + case ir.OINDEXMAP: + n := n.(*ir.IndexExpr) + e.discard(n.X) + e.assignHeap(n.Index, "key of map put", n) + } + + return k +} + +func (e *escape) mutate(n ir.Node) { + e.expr(e.mutatorHole(), n) +} + +func (e *escape) addrs(l ir.Nodes) []hole { + var ks []hole + for _, n := range l { + ks = append(ks, e.addr(n)) + } + return ks +} + +func (e *escape) assignHeap(src ir.Node, why string, where ir.Node) { + e.expr(e.heapHole().note(where, why), src) +} + +// assignList evaluates the assignment dsts... = srcs.... +func (e *escape) assignList(dsts, srcs []ir.Node, why string, where ir.Node) { + ks := e.addrs(dsts) + for i, k := range ks { + var src ir.Node + if i < len(srcs) { + src = srcs[i] + } + + if dst := dsts[i]; dst != nil { + // Detect implicit conversion of uintptr to unsafe.Pointer when + // storing into reflect.{Slice,String}Header. + if dst.Op() == ir.ODOTPTR && ir.IsReflectHeaderDataField(dst) { + e.unsafeValue(e.heapHole().note(where, why), src) + continue + } + + // Filter out some no-op assignments for escape analysis. + if src != nil && isSelfAssign(dst, src) { + if base.Flag.LowerM != 0 { + base.WarnfAt(where.Pos(), "%v ignoring self-assignment in %v", e.curfn, where) + } + k = e.discardHole() + } + } + + e.expr(k.note(where, why), src) + } + + e.reassigned(ks, where) +} + +// reassigned marks the locations associated with the given holes as +// reassigned, unless the location represents a variable declared and +// assigned exactly once by where. +func (e *escape) reassigned(ks []hole, where ir.Node) { + if as, ok := where.(*ir.AssignStmt); ok && as.Op() == ir.OAS && as.Y == nil { + if dst, ok := as.X.(*ir.Name); ok && dst.Op() == ir.ONAME && dst.Defn == nil { + // Zero-value assignment for variable declared without an + // explicit initial value. Assume this is its initialization + // statement. + return + } + } + + for _, k := range ks { + loc := k.dst + // Variables declared by range statements are assigned on every iteration. + if n, ok := loc.n.(*ir.Name); ok && n.Defn == where && where.Op() != ir.ORANGE { + continue + } + loc.reassigned = true + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/escape/call.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/escape/call.go new file mode 100644 index 0000000000000000000000000000000000000000..4a3753ada9cc91226a498d904bed201229e20cc8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/escape/call.go @@ -0,0 +1,361 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package escape + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/src" +) + +// call evaluates a call expressions, including builtin calls. ks +// should contain the holes representing where the function callee's +// results flows. +func (e *escape) call(ks []hole, call ir.Node) { + argument := func(k hole, arg ir.Node) { + // TODO(mdempsky): Should be "call argument". + e.expr(k.note(call, "call parameter"), arg) + } + + switch call.Op() { + default: + ir.Dump("esc", call) + base.Fatalf("unexpected call op: %v", call.Op()) + + case ir.OCALLFUNC, ir.OCALLINTER: + call := call.(*ir.CallExpr) + typecheck.AssertFixedCall(call) + + // Pick out the function callee, if statically known. + // + // TODO(mdempsky): Change fn from *ir.Name to *ir.Func, but some + // functions (e.g., runtime builtins, method wrappers, generated + // eq/hash functions) don't have it set. Investigate whether + // that's a concern. + var fn *ir.Name + switch call.Op() { + case ir.OCALLFUNC: + v := ir.StaticValue(call.Fun) + fn = ir.StaticCalleeName(v) + } + + fntype := call.Fun.Type() + if fn != nil { + fntype = fn.Type() + } + + if ks != nil && fn != nil && e.inMutualBatch(fn) { + for i, result := range fn.Type().Results() { + e.expr(ks[i], result.Nname.(*ir.Name)) + } + } + + var recvArg ir.Node + if call.Op() == ir.OCALLFUNC { + // Evaluate callee function expression. + calleeK := e.discardHole() + if fn == nil { // unknown callee + for _, k := range ks { + if k.dst != &e.blankLoc { + // The results flow somewhere, but we don't statically + // know the callee function. If a closure flows here, we + // need to conservatively assume its results might flow to + // the heap. + calleeK = e.calleeHole().note(call, "callee operand") + break + } + } + } + e.expr(calleeK, call.Fun) + } else { + recvArg = call.Fun.(*ir.SelectorExpr).X + } + + // argumentParam handles escape analysis of assigning a call + // argument to its corresponding parameter. + argumentParam := func(param *types.Field, arg ir.Node) { + e.rewriteArgument(arg, call, fn) + argument(e.tagHole(ks, fn, param), arg) + } + + args := call.Args + if recvParam := fntype.Recv(); recvParam != nil { + if recvArg == nil { + // Function call using method expression. Receiver argument is + // at the front of the regular arguments list. + recvArg, args = args[0], args[1:] + } + + argumentParam(recvParam, recvArg) + } + + for i, param := range fntype.Params() { + argumentParam(param, args[i]) + } + + case ir.OINLCALL: + call := call.(*ir.InlinedCallExpr) + e.stmts(call.Body) + for i, result := range call.ReturnVars { + k := e.discardHole() + if ks != nil { + k = ks[i] + } + e.expr(k, result) + } + + case ir.OAPPEND: + call := call.(*ir.CallExpr) + args := call.Args + + // Appendee slice may flow directly to the result, if + // it has enough capacity. Alternatively, a new heap + // slice might be allocated, and all slice elements + // might flow to heap. + appendeeK := e.teeHole(ks[0], e.mutatorHole()) + if args[0].Type().Elem().HasPointers() { + appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice")) + } + argument(appendeeK, args[0]) + + if call.IsDDD { + appendedK := e.discardHole() + if args[1].Type().IsSlice() && args[1].Type().Elem().HasPointers() { + appendedK = e.heapHole().deref(call, "appended slice...") + } + argument(appendedK, args[1]) + } else { + for i := 1; i < len(args); i++ { + argument(e.heapHole(), args[i]) + } + } + e.discard(call.RType) + + case ir.OCOPY: + call := call.(*ir.BinaryExpr) + argument(e.mutatorHole(), call.X) + + copiedK := e.discardHole() + if call.Y.Type().IsSlice() && call.Y.Type().Elem().HasPointers() { + copiedK = e.heapHole().deref(call, "copied slice") + } + argument(copiedK, call.Y) + e.discard(call.RType) + + case ir.OPANIC: + call := call.(*ir.UnaryExpr) + argument(e.heapHole(), call.X) + + case ir.OCOMPLEX: + call := call.(*ir.BinaryExpr) + e.discard(call.X) + e.discard(call.Y) + + case ir.ODELETE, ir.OPRINT, ir.OPRINTLN, ir.ORECOVERFP: + call := call.(*ir.CallExpr) + for _, arg := range call.Args { + e.discard(arg) + } + e.discard(call.RType) + + case ir.OMIN, ir.OMAX: + call := call.(*ir.CallExpr) + for _, arg := range call.Args { + argument(ks[0], arg) + } + e.discard(call.RType) + + case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE: + call := call.(*ir.UnaryExpr) + e.discard(call.X) + + case ir.OCLEAR: + call := call.(*ir.UnaryExpr) + argument(e.mutatorHole(), call.X) + + case ir.OUNSAFESTRINGDATA, ir.OUNSAFESLICEDATA: + call := call.(*ir.UnaryExpr) + argument(ks[0], call.X) + + case ir.OUNSAFEADD, ir.OUNSAFESLICE, ir.OUNSAFESTRING: + call := call.(*ir.BinaryExpr) + argument(ks[0], call.X) + e.discard(call.Y) + e.discard(call.RType) + } +} + +// goDeferStmt analyzes a "go" or "defer" statement. +func (e *escape) goDeferStmt(n *ir.GoDeferStmt) { + k := e.heapHole() + if n.Op() == ir.ODEFER && e.loopDepth == 1 && n.DeferAt == nil { + // Top-level defer arguments don't escape to the heap, + // but they do need to last until they're invoked. + k = e.later(e.discardHole()) + + // force stack allocation of defer record, unless + // open-coded defers are used (see ssa.go) + n.SetEsc(ir.EscNever) + } + + // If the function is already a zero argument/result function call, + // just escape analyze it normally. + // + // Note that the runtime is aware of this optimization for + // "go" statements that start in reflect.makeFuncStub or + // reflect.methodValueCall. + + call, ok := n.Call.(*ir.CallExpr) + if !ok || call.Op() != ir.OCALLFUNC { + base.FatalfAt(n.Pos(), "expected function call: %v", n.Call) + } + if sig := call.Fun.Type(); sig.NumParams()+sig.NumResults() != 0 { + base.FatalfAt(n.Pos(), "expected signature without parameters or results: %v", sig) + } + + if clo, ok := call.Fun.(*ir.ClosureExpr); ok && n.Op() == ir.OGO { + clo.IsGoWrap = true + } + + e.expr(k, call.Fun) +} + +// rewriteArgument rewrites the argument arg of the given call expression. +// fn is the static callee function, if known. +func (e *escape) rewriteArgument(arg ir.Node, call *ir.CallExpr, fn *ir.Name) { + if fn == nil || fn.Func == nil { + return + } + pragma := fn.Func.Pragma + if pragma&(ir.UintptrKeepAlive|ir.UintptrEscapes) == 0 { + return + } + + // unsafeUintptr rewrites "uintptr(ptr)" arguments to syscall-like + // functions, so that ptr is kept alive and/or escaped as + // appropriate. unsafeUintptr also reports whether it modified arg0. + unsafeUintptr := func(arg ir.Node) { + // If the argument is really a pointer being converted to uintptr, + // arrange for the pointer to be kept alive until the call + // returns, by copying it into a temp and marking that temp still + // alive when we pop the temp stack. + conv, ok := arg.(*ir.ConvExpr) + if !ok || conv.Op() != ir.OCONVNOP { + return // not a conversion + } + if !conv.X.Type().IsUnsafePtr() || !conv.Type().IsUintptr() { + return // not an unsafe.Pointer->uintptr conversion + } + + // Create and declare a new pointer-typed temp variable. + // + // TODO(mdempsky): This potentially violates the Go spec's order + // of evaluations, by evaluating arg.X before any other + // operands. + tmp := e.copyExpr(conv.Pos(), conv.X, call.PtrInit()) + conv.X = tmp + + k := e.mutatorHole() + if pragma&ir.UintptrEscapes != 0 { + k = e.heapHole().note(conv, "//go:uintptrescapes") + } + e.flow(k, e.oldLoc(tmp)) + + if pragma&ir.UintptrKeepAlive != 0 { + tmp.SetAddrtaken(true) // ensure SSA keeps the tmp variable + call.KeepAlive = append(call.KeepAlive, tmp) + } + } + + // For variadic functions, the compiler has already rewritten: + // + // f(a, b, c) + // + // to: + // + // f([]T{a, b, c}...) + // + // So we need to look into slice elements to handle uintptr(ptr) + // arguments to variadic syscall-like functions correctly. + if arg.Op() == ir.OSLICELIT { + list := arg.(*ir.CompLitExpr).List + for _, el := range list { + if el.Op() == ir.OKEY { + el = el.(*ir.KeyExpr).Value + } + unsafeUintptr(el) + } + } else { + unsafeUintptr(arg) + } +} + +// copyExpr creates and returns a new temporary variable within fn; +// appends statements to init to declare and initialize it to expr; +// and escape analyzes the data flow. +func (e *escape) copyExpr(pos src.XPos, expr ir.Node, init *ir.Nodes) *ir.Name { + if ir.HasUniquePos(expr) { + pos = expr.Pos() + } + + tmp := typecheck.TempAt(pos, e.curfn, expr.Type()) + + stmts := []ir.Node{ + ir.NewDecl(pos, ir.ODCL, tmp), + ir.NewAssignStmt(pos, tmp, expr), + } + typecheck.Stmts(stmts) + init.Append(stmts...) + + e.newLoc(tmp, true) + e.stmts(stmts) + + return tmp +} + +// tagHole returns a hole for evaluating an argument passed to param. +// ks should contain the holes representing where the function +// callee's results flows. fn is the statically-known callee function, +// if any. +func (e *escape) tagHole(ks []hole, fn *ir.Name, param *types.Field) hole { + // If this is a dynamic call, we can't rely on param.Note. + if fn == nil { + return e.heapHole() + } + + if e.inMutualBatch(fn) { + if param.Nname == nil { + return e.discardHole() + } + return e.addr(param.Nname.(*ir.Name)) + } + + // Call to previously tagged function. + + var tagKs []hole + esc := parseLeaks(param.Note) + + if x := esc.Heap(); x >= 0 { + tagKs = append(tagKs, e.heapHole().shift(x)) + } + if x := esc.Mutator(); x >= 0 { + tagKs = append(tagKs, e.mutatorHole().shift(x)) + } + if x := esc.Callee(); x >= 0 { + tagKs = append(tagKs, e.calleeHole().shift(x)) + } + + if ks != nil { + for i := 0; i < numEscResults; i++ { + if x := esc.Result(i); x >= 0 { + tagKs = append(tagKs, ks[i].shift(x)) + } + } + } + + return e.teeHole(tagKs...) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/escape/escape.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/escape/escape.go new file mode 100644 index 0000000000000000000000000000000000000000..7df367caf7049aa8db85d1e21c969394a40ee54d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/escape/escape.go @@ -0,0 +1,509 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package escape + +import ( + "fmt" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/logopt" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/src" +) + +// Escape analysis. +// +// Here we analyze functions to determine which Go variables +// (including implicit allocations such as calls to "new" or "make", +// composite literals, etc.) can be allocated on the stack. The two +// key invariants we have to ensure are: (1) pointers to stack objects +// cannot be stored in the heap, and (2) pointers to a stack object +// cannot outlive that object (e.g., because the declaring function +// returned and destroyed the object's stack frame, or its space is +// reused across loop iterations for logically distinct variables). +// +// We implement this with a static data-flow analysis of the AST. +// First, we construct a directed weighted graph where vertices +// (termed "locations") represent variables allocated by statements +// and expressions, and edges represent assignments between variables +// (with weights representing addressing/dereference counts). +// +// Next we walk the graph looking for assignment paths that might +// violate the invariants stated above. If a variable v's address is +// stored in the heap or elsewhere that may outlive it, then v is +// marked as requiring heap allocation. +// +// To support interprocedural analysis, we also record data-flow from +// each function's parameters to the heap and to its result +// parameters. This information is summarized as "parameter tags", +// which are used at static call sites to improve escape analysis of +// function arguments. + +// Constructing the location graph. +// +// Every allocating statement (e.g., variable declaration) or +// expression (e.g., "new" or "make") is first mapped to a unique +// "location." +// +// We also model every Go assignment as a directed edges between +// locations. The number of dereference operations minus the number of +// addressing operations is recorded as the edge's weight (termed +// "derefs"). For example: +// +// p = &q // -1 +// p = q // 0 +// p = *q // 1 +// p = **q // 2 +// +// p = **&**&q // 2 +// +// Note that the & operator can only be applied to addressable +// expressions, and the expression &x itself is not addressable, so +// derefs cannot go below -1. +// +// Every Go language construct is lowered into this representation, +// generally without sensitivity to flow, path, or context; and +// without distinguishing elements within a compound variable. For +// example: +// +// var x struct { f, g *int } +// var u []*int +// +// x.f = u[0] +// +// is modeled simply as +// +// x = *u +// +// That is, we don't distinguish x.f from x.g, or u[0] from u[1], +// u[2], etc. However, we do record the implicit dereference involved +// in indexing a slice. + +// A batch holds escape analysis state that's shared across an entire +// batch of functions being analyzed at once. +type batch struct { + allLocs []*location + closures []closure + + heapLoc location + mutatorLoc location + calleeLoc location + blankLoc location +} + +// A closure holds a closure expression and its spill hole (i.e., +// where the hole representing storing into its closure record). +type closure struct { + k hole + clo *ir.ClosureExpr +} + +// An escape holds state specific to a single function being analyzed +// within a batch. +type escape struct { + *batch + + curfn *ir.Func // function being analyzed + + labels map[*types.Sym]labelState // known labels + + // loopDepth counts the current loop nesting depth within + // curfn. It increments within each "for" loop and at each + // label with a corresponding backwards "goto" (i.e., + // unstructured loop). + loopDepth int +} + +func Funcs(all []*ir.Func) { + ir.VisitFuncsBottomUp(all, Batch) +} + +// Batch performs escape analysis on a minimal batch of +// functions. +func Batch(fns []*ir.Func, recursive bool) { + var b batch + b.heapLoc.attrs = attrEscapes | attrPersists | attrMutates | attrCalls + b.mutatorLoc.attrs = attrMutates + b.calleeLoc.attrs = attrCalls + + // Construct data-flow graph from syntax trees. + for _, fn := range fns { + if base.Flag.W > 1 { + s := fmt.Sprintf("\nbefore escape %v", fn) + ir.Dump(s, fn) + } + b.initFunc(fn) + } + for _, fn := range fns { + if !fn.IsHiddenClosure() { + b.walkFunc(fn) + } + } + + // We've walked the function bodies, so we've seen everywhere a + // variable might be reassigned or have it's address taken. Now we + // can decide whether closures should capture their free variables + // by value or reference. + for _, closure := range b.closures { + b.flowClosure(closure.k, closure.clo) + } + b.closures = nil + + for _, loc := range b.allLocs { + if why := HeapAllocReason(loc.n); why != "" { + b.flow(b.heapHole().addr(loc.n, why), loc) + } + } + + b.walkAll() + b.finish(fns) +} + +func (b *batch) with(fn *ir.Func) *escape { + return &escape{ + batch: b, + curfn: fn, + loopDepth: 1, + } +} + +func (b *batch) initFunc(fn *ir.Func) { + e := b.with(fn) + if fn.Esc() != escFuncUnknown { + base.Fatalf("unexpected node: %v", fn) + } + fn.SetEsc(escFuncPlanned) + if base.Flag.LowerM > 3 { + ir.Dump("escAnalyze", fn) + } + + // Allocate locations for local variables. + for _, n := range fn.Dcl { + e.newLoc(n, true) + } + + // Also for hidden parameters (e.g., the ".this" parameter to a + // method value wrapper). + if fn.OClosure == nil { + for _, n := range fn.ClosureVars { + e.newLoc(n.Canonical(), true) + } + } + + // Initialize resultIndex for result parameters. + for i, f := range fn.Type().Results() { + e.oldLoc(f.Nname.(*ir.Name)).resultIndex = 1 + i + } +} + +func (b *batch) walkFunc(fn *ir.Func) { + e := b.with(fn) + fn.SetEsc(escFuncStarted) + + // Identify labels that mark the head of an unstructured loop. + ir.Visit(fn, func(n ir.Node) { + switch n.Op() { + case ir.OLABEL: + n := n.(*ir.LabelStmt) + if n.Label.IsBlank() { + break + } + if e.labels == nil { + e.labels = make(map[*types.Sym]labelState) + } + e.labels[n.Label] = nonlooping + + case ir.OGOTO: + // If we visited the label before the goto, + // then this is a looping label. + n := n.(*ir.BranchStmt) + if e.labels[n.Label] == nonlooping { + e.labels[n.Label] = looping + } + } + }) + + e.block(fn.Body) + + if len(e.labels) != 0 { + base.FatalfAt(fn.Pos(), "leftover labels after walkFunc") + } +} + +func (b *batch) flowClosure(k hole, clo *ir.ClosureExpr) { + for _, cv := range clo.Func.ClosureVars { + n := cv.Canonical() + loc := b.oldLoc(cv) + if !loc.captured { + base.FatalfAt(cv.Pos(), "closure variable never captured: %v", cv) + } + + // Capture by value for variables <= 128 bytes that are never reassigned. + n.SetByval(!loc.addrtaken && !loc.reassigned && n.Type().Size() <= 128) + if !n.Byval() { + n.SetAddrtaken(true) + if n.Sym().Name == typecheck.LocalDictName { + base.FatalfAt(n.Pos(), "dictionary variable not captured by value") + } + } + + if base.Flag.LowerM > 1 { + how := "ref" + if n.Byval() { + how = "value" + } + base.WarnfAt(n.Pos(), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", n.Curfn, how, n, loc.addrtaken, loc.reassigned, n.Type().Size()) + } + + // Flow captured variables to closure. + k := k + if !cv.Byval() { + k = k.addr(cv, "reference") + } + b.flow(k.note(cv, "captured by a closure"), loc) + } +} + +func (b *batch) finish(fns []*ir.Func) { + // Record parameter tags for package export data. + for _, fn := range fns { + fn.SetEsc(escFuncTagged) + + for i, param := range fn.Type().RecvParams() { + param.Note = b.paramTag(fn, 1+i, param) + } + } + + for _, loc := range b.allLocs { + n := loc.n + if n == nil { + continue + } + + if n.Op() == ir.ONAME { + n := n.(*ir.Name) + n.Opt = nil + } + + // Update n.Esc based on escape analysis results. + + // Omit escape diagnostics for go/defer wrappers, at least for now. + // Historically, we haven't printed them, and test cases don't expect them. + // TODO(mdempsky): Update tests to expect this. + goDeferWrapper := n.Op() == ir.OCLOSURE && n.(*ir.ClosureExpr).Func.Wrapper() + + if loc.hasAttr(attrEscapes) { + if n.Op() == ir.ONAME { + if base.Flag.CompilingRuntime { + base.ErrorfAt(n.Pos(), 0, "%v escapes to heap, not allowed in runtime", n) + } + if base.Flag.LowerM != 0 { + base.WarnfAt(n.Pos(), "moved to heap: %v", n) + } + } else { + if base.Flag.LowerM != 0 && !goDeferWrapper { + base.WarnfAt(n.Pos(), "%v escapes to heap", n) + } + if logopt.Enabled() { + var e_curfn *ir.Func // TODO(mdempsky): Fix. + logopt.LogOpt(n.Pos(), "escape", "escape", ir.FuncName(e_curfn)) + } + } + n.SetEsc(ir.EscHeap) + } else { + if base.Flag.LowerM != 0 && n.Op() != ir.ONAME && !goDeferWrapper { + base.WarnfAt(n.Pos(), "%v does not escape", n) + } + n.SetEsc(ir.EscNone) + if !loc.hasAttr(attrPersists) { + switch n.Op() { + case ir.OCLOSURE: + n := n.(*ir.ClosureExpr) + n.SetTransient(true) + case ir.OMETHVALUE: + n := n.(*ir.SelectorExpr) + n.SetTransient(true) + case ir.OSLICELIT: + n := n.(*ir.CompLitExpr) + n.SetTransient(true) + } + } + } + + // If the result of a string->[]byte conversion is never mutated, + // then it can simply reuse the string's memory directly. + if base.Debug.ZeroCopy != 0 { + if n, ok := n.(*ir.ConvExpr); ok && n.Op() == ir.OSTR2BYTES && !loc.hasAttr(attrMutates) { + if base.Flag.LowerM >= 1 { + base.WarnfAt(n.Pos(), "zero-copy string->[]byte conversion") + } + n.SetOp(ir.OSTR2BYTESTMP) + } + } + } +} + +// inMutualBatch reports whether function fn is in the batch of +// mutually recursive functions being analyzed. When this is true, +// fn has not yet been analyzed, so its parameters and results +// should be incorporated directly into the flow graph instead of +// relying on its escape analysis tagging. +func (b *batch) inMutualBatch(fn *ir.Name) bool { + if fn.Defn != nil && fn.Defn.Esc() < escFuncTagged { + if fn.Defn.Esc() == escFuncUnknown { + base.FatalfAt(fn.Pos(), "graph inconsistency: %v", fn) + } + return true + } + return false +} + +const ( + escFuncUnknown = 0 + iota + escFuncPlanned + escFuncStarted + escFuncTagged +) + +// Mark labels that have no backjumps to them as not increasing e.loopdepth. +type labelState int + +const ( + looping labelState = 1 + iota + nonlooping +) + +func (b *batch) paramTag(fn *ir.Func, narg int, f *types.Field) string { + name := func() string { + if f.Nname != nil { + return f.Nname.Sym().Name + } + return fmt.Sprintf("arg#%d", narg) + } + + // Only report diagnostics for user code; + // not for wrappers generated around them. + // TODO(mdempsky): Generalize this. + diagnose := base.Flag.LowerM != 0 && !(fn.Wrapper() || fn.Dupok()) + + if len(fn.Body) == 0 { + // Assume that uintptr arguments must be held live across the call. + // This is most important for syscall.Syscall. + // See golang.org/issue/13372. + // This really doesn't have much to do with escape analysis per se, + // but we are reusing the ability to annotate an individual function + // argument and pass those annotations along to importing code. + fn.Pragma |= ir.UintptrKeepAlive + + if f.Type.IsUintptr() { + if diagnose { + base.WarnfAt(f.Pos, "assuming %v is unsafe uintptr", name()) + } + return "" + } + + if !f.Type.HasPointers() { // don't bother tagging for scalars + return "" + } + + var esc leaks + + // External functions are assumed unsafe, unless + // //go:noescape is given before the declaration. + if fn.Pragma&ir.Noescape != 0 { + if diagnose && f.Sym != nil { + base.WarnfAt(f.Pos, "%v does not escape", name()) + } + esc.AddMutator(0) + esc.AddCallee(0) + } else { + if diagnose && f.Sym != nil { + base.WarnfAt(f.Pos, "leaking param: %v", name()) + } + esc.AddHeap(0) + } + + return esc.Encode() + } + + if fn.Pragma&ir.UintptrEscapes != 0 { + if f.Type.IsUintptr() { + if diagnose { + base.WarnfAt(f.Pos, "marking %v as escaping uintptr", name()) + } + return "" + } + if f.IsDDD() && f.Type.Elem().IsUintptr() { + // final argument is ...uintptr. + if diagnose { + base.WarnfAt(f.Pos, "marking %v as escaping ...uintptr", name()) + } + return "" + } + } + + if !f.Type.HasPointers() { // don't bother tagging for scalars + return "" + } + + // Unnamed parameters are unused and therefore do not escape. + if f.Sym == nil || f.Sym.IsBlank() { + var esc leaks + return esc.Encode() + } + + n := f.Nname.(*ir.Name) + loc := b.oldLoc(n) + esc := loc.paramEsc + esc.Optimize() + + if diagnose && !loc.hasAttr(attrEscapes) { + b.reportLeaks(f.Pos, name(), esc, fn.Type()) + } + + return esc.Encode() +} + +func (b *batch) reportLeaks(pos src.XPos, name string, esc leaks, sig *types.Type) { + warned := false + if x := esc.Heap(); x >= 0 { + if x == 0 { + base.WarnfAt(pos, "leaking param: %v", name) + } else { + // TODO(mdempsky): Mention level=x like below? + base.WarnfAt(pos, "leaking param content: %v", name) + } + warned = true + } + for i := 0; i < numEscResults; i++ { + if x := esc.Result(i); x >= 0 { + res := sig.Result(i).Nname.Sym().Name + base.WarnfAt(pos, "leaking param: %v to result %v level=%d", name, res, x) + warned = true + } + } + + if base.Debug.EscapeMutationsCalls <= 0 { + if !warned { + base.WarnfAt(pos, "%v does not escape", name) + } + return + } + + if x := esc.Mutator(); x >= 0 { + base.WarnfAt(pos, "mutates param: %v derefs=%v", name, x) + warned = true + } + if x := esc.Callee(); x >= 0 { + base.WarnfAt(pos, "calls param: %v derefs=%v", name, x) + warned = true + } + + if !warned { + base.WarnfAt(pos, "%v does not escape, mutate, or call", name) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/escape/expr.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/escape/expr.go new file mode 100644 index 0000000000000000000000000000000000000000..6aa5ad74136f4f369dc9d2e35ab26d2f4231b433 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/escape/expr.go @@ -0,0 +1,341 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package escape + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" +) + +// expr models evaluating an expression n and flowing the result into +// hole k. +func (e *escape) expr(k hole, n ir.Node) { + if n == nil { + return + } + e.stmts(n.Init()) + e.exprSkipInit(k, n) +} + +func (e *escape) exprSkipInit(k hole, n ir.Node) { + if n == nil { + return + } + + lno := ir.SetPos(n) + defer func() { + base.Pos = lno + }() + + if k.derefs >= 0 && !n.Type().IsUntyped() && !n.Type().HasPointers() { + k.dst = &e.blankLoc + } + + switch n.Op() { + default: + base.Fatalf("unexpected expr: %s %v", n.Op().String(), n) + + case ir.OLITERAL, ir.ONIL, ir.OGETG, ir.OGETCALLERPC, ir.OGETCALLERSP, ir.OTYPE, ir.OMETHEXPR, ir.OLINKSYMOFFSET: + // nop + + case ir.ONAME: + n := n.(*ir.Name) + if n.Class == ir.PFUNC || n.Class == ir.PEXTERN { + return + } + e.flow(k, e.oldLoc(n)) + + case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT: + n := n.(*ir.UnaryExpr) + e.discard(n.X) + case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: + n := n.(*ir.BinaryExpr) + e.discard(n.X) + e.discard(n.Y) + case ir.OANDAND, ir.OOROR: + n := n.(*ir.LogicalExpr) + e.discard(n.X) + e.discard(n.Y) + case ir.OADDR: + n := n.(*ir.AddrExpr) + e.expr(k.addr(n, "address-of"), n.X) // "address-of" + case ir.ODEREF: + n := n.(*ir.StarExpr) + e.expr(k.deref(n, "indirection"), n.X) // "indirection" + case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER: + n := n.(*ir.SelectorExpr) + e.expr(k.note(n, "dot"), n.X) + case ir.ODOTPTR: + n := n.(*ir.SelectorExpr) + e.expr(k.deref(n, "dot of pointer"), n.X) // "dot of pointer" + case ir.ODOTTYPE, ir.ODOTTYPE2: + n := n.(*ir.TypeAssertExpr) + e.expr(k.dotType(n.Type(), n, "dot"), n.X) + case ir.ODYNAMICDOTTYPE, ir.ODYNAMICDOTTYPE2: + n := n.(*ir.DynamicTypeAssertExpr) + e.expr(k.dotType(n.Type(), n, "dot"), n.X) + // n.T doesn't need to be tracked; it always points to read-only storage. + case ir.OINDEX: + n := n.(*ir.IndexExpr) + if n.X.Type().IsArray() { + e.expr(k.note(n, "fixed-array-index-of"), n.X) + } else { + // TODO(mdempsky): Fix why reason text. + e.expr(k.deref(n, "dot of pointer"), n.X) + } + e.discard(n.Index) + case ir.OINDEXMAP: + n := n.(*ir.IndexExpr) + e.discard(n.X) + e.discard(n.Index) + case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR: + n := n.(*ir.SliceExpr) + e.expr(k.note(n, "slice"), n.X) + e.discard(n.Low) + e.discard(n.High) + e.discard(n.Max) + + case ir.OCONV, ir.OCONVNOP: + n := n.(*ir.ConvExpr) + if (ir.ShouldCheckPtr(e.curfn, 2) || ir.ShouldAsanCheckPtr(e.curfn)) && n.Type().IsUnsafePtr() && n.X.Type().IsPtr() { + // When -d=checkptr=2 or -asan is enabled, + // treat conversions to unsafe.Pointer as an + // escaping operation. This allows better + // runtime instrumentation, since we can more + // easily detect object boundaries on the heap + // than the stack. + e.assignHeap(n.X, "conversion to unsafe.Pointer", n) + } else if n.Type().IsUnsafePtr() && n.X.Type().IsUintptr() { + e.unsafeValue(k, n.X) + } else { + e.expr(k, n.X) + } + case ir.OCONVIFACE: + n := n.(*ir.ConvExpr) + if !n.X.Type().IsInterface() && !types.IsDirectIface(n.X.Type()) { + k = e.spill(k, n) + } + e.expr(k.note(n, "interface-converted"), n.X) + case ir.OMAKEFACE: + n := n.(*ir.BinaryExpr) + // Note: n.X is not needed because it can never point to memory that might escape. + e.expr(k, n.Y) + case ir.OITAB, ir.OIDATA, ir.OSPTR: + n := n.(*ir.UnaryExpr) + e.expr(k, n.X) + case ir.OSLICE2ARR: + // Converting a slice to array is effectively a deref. + n := n.(*ir.ConvExpr) + e.expr(k.deref(n, "slice-to-array"), n.X) + case ir.OSLICE2ARRPTR: + // the slice pointer flows directly to the result + n := n.(*ir.ConvExpr) + e.expr(k, n.X) + case ir.ORECV: + n := n.(*ir.UnaryExpr) + e.discard(n.X) + + case ir.OCALLMETH, ir.OCALLFUNC, ir.OCALLINTER, ir.OINLCALL, + ir.OLEN, ir.OCAP, ir.OMIN, ir.OMAX, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY, ir.ORECOVERFP, + ir.OUNSAFEADD, ir.OUNSAFESLICE, ir.OUNSAFESTRING, ir.OUNSAFESTRINGDATA, ir.OUNSAFESLICEDATA: + e.call([]hole{k}, n) + + case ir.ONEW: + n := n.(*ir.UnaryExpr) + e.spill(k, n) + + case ir.OMAKESLICE: + n := n.(*ir.MakeExpr) + e.spill(k, n) + e.discard(n.Len) + e.discard(n.Cap) + case ir.OMAKECHAN: + n := n.(*ir.MakeExpr) + e.discard(n.Len) + case ir.OMAKEMAP: + n := n.(*ir.MakeExpr) + e.spill(k, n) + e.discard(n.Len) + + case ir.OMETHVALUE: + // Flow the receiver argument to both the closure and + // to the receiver parameter. + + n := n.(*ir.SelectorExpr) + closureK := e.spill(k, n) + + m := n.Selection + + // We don't know how the method value will be called + // later, so conservatively assume the result + // parameters all flow to the heap. + // + // TODO(mdempsky): Change ks into a callback, so that + // we don't have to create this slice? + var ks []hole + for i := m.Type.NumResults(); i > 0; i-- { + ks = append(ks, e.heapHole()) + } + name, _ := m.Nname.(*ir.Name) + paramK := e.tagHole(ks, name, m.Type.Recv()) + + e.expr(e.teeHole(paramK, closureK), n.X) + + case ir.OPTRLIT: + n := n.(*ir.AddrExpr) + e.expr(e.spill(k, n), n.X) + + case ir.OARRAYLIT: + n := n.(*ir.CompLitExpr) + for _, elt := range n.List { + if elt.Op() == ir.OKEY { + elt = elt.(*ir.KeyExpr).Value + } + e.expr(k.note(n, "array literal element"), elt) + } + + case ir.OSLICELIT: + n := n.(*ir.CompLitExpr) + k = e.spill(k, n) + + for _, elt := range n.List { + if elt.Op() == ir.OKEY { + elt = elt.(*ir.KeyExpr).Value + } + e.expr(k.note(n, "slice-literal-element"), elt) + } + + case ir.OSTRUCTLIT: + n := n.(*ir.CompLitExpr) + for _, elt := range n.List { + e.expr(k.note(n, "struct literal element"), elt.(*ir.StructKeyExpr).Value) + } + + case ir.OMAPLIT: + n := n.(*ir.CompLitExpr) + e.spill(k, n) + + // Map keys and values are always stored in the heap. + for _, elt := range n.List { + elt := elt.(*ir.KeyExpr) + e.assignHeap(elt.Key, "map literal key", n) + e.assignHeap(elt.Value, "map literal value", n) + } + + case ir.OCLOSURE: + n := n.(*ir.ClosureExpr) + k = e.spill(k, n) + e.closures = append(e.closures, closure{k, n}) + + if fn := n.Func; fn.IsHiddenClosure() { + for _, cv := range fn.ClosureVars { + if loc := e.oldLoc(cv); !loc.captured { + loc.captured = true + + // Ignore reassignments to the variable in straightline code + // preceding the first capture by a closure. + if loc.loopDepth == e.loopDepth { + loc.reassigned = false + } + } + } + + for _, n := range fn.Dcl { + // Add locations for local variables of the + // closure, if needed, in case we're not including + // the closure func in the batch for escape + // analysis (happens for escape analysis called + // from reflectdata.methodWrapper) + if n.Op() == ir.ONAME && n.Opt == nil { + e.with(fn).newLoc(n, true) + } + } + e.walkFunc(fn) + } + + case ir.ORUNES2STR, ir.OBYTES2STR, ir.OSTR2RUNES, ir.OSTR2BYTES, ir.ORUNESTR: + n := n.(*ir.ConvExpr) + e.spill(k, n) + e.discard(n.X) + + case ir.OADDSTR: + n := n.(*ir.AddStringExpr) + e.spill(k, n) + + // Arguments of OADDSTR never escape; + // runtime.concatstrings makes sure of that. + e.discards(n.List) + + case ir.ODYNAMICTYPE: + // Nothing to do - argument is a *runtime._type (+ maybe a *runtime.itab) pointing to static data section + } +} + +// unsafeValue evaluates a uintptr-typed arithmetic expression looking +// for conversions from an unsafe.Pointer. +func (e *escape) unsafeValue(k hole, n ir.Node) { + if n.Type().Kind() != types.TUINTPTR { + base.Fatalf("unexpected type %v for %v", n.Type(), n) + } + if k.addrtaken { + base.Fatalf("unexpected addrtaken") + } + + e.stmts(n.Init()) + + switch n.Op() { + case ir.OCONV, ir.OCONVNOP: + n := n.(*ir.ConvExpr) + if n.X.Type().IsUnsafePtr() { + e.expr(k, n.X) + } else { + e.discard(n.X) + } + case ir.ODOTPTR: + n := n.(*ir.SelectorExpr) + if ir.IsReflectHeaderDataField(n) { + e.expr(k.deref(n, "reflect.Header.Data"), n.X) + } else { + e.discard(n.X) + } + case ir.OPLUS, ir.ONEG, ir.OBITNOT: + n := n.(*ir.UnaryExpr) + e.unsafeValue(k, n.X) + case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OAND, ir.OANDNOT: + n := n.(*ir.BinaryExpr) + e.unsafeValue(k, n.X) + e.unsafeValue(k, n.Y) + case ir.OLSH, ir.ORSH: + n := n.(*ir.BinaryExpr) + e.unsafeValue(k, n.X) + // RHS need not be uintptr-typed (#32959) and can't meaningfully + // flow pointers anyway. + e.discard(n.Y) + default: + e.exprSkipInit(e.discardHole(), n) + } +} + +// discard evaluates an expression n for side-effects, but discards +// its value. +func (e *escape) discard(n ir.Node) { + e.expr(e.discardHole(), n) +} + +func (e *escape) discards(l ir.Nodes) { + for _, n := range l { + e.discard(n) + } +} + +// spill allocates a new location associated with expression n, flows +// its address to k, and returns a hole that flows values to it. It's +// intended for use with most expressions that allocate storage. +func (e *escape) spill(k hole, n ir.Node) hole { + loc := e.newLoc(n, false) + e.flow(k.addr(n, "spill"), loc) + return loc.asHole() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/escape/graph.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/escape/graph.go new file mode 100644 index 0000000000000000000000000000000000000000..75e2546a7b7a0604195e69e06cf057fe2c908b52 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/escape/graph.go @@ -0,0 +1,376 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package escape + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/logopt" + "cmd/compile/internal/types" + "fmt" +) + +// Below we implement the methods for walking the AST and recording +// data flow edges. Note that because a sub-expression might have +// side-effects, it's important to always visit the entire AST. +// +// For example, write either: +// +// if x { +// e.discard(n.Left) +// } else { +// e.value(k, n.Left) +// } +// +// or +// +// if x { +// k = e.discardHole() +// } +// e.value(k, n.Left) +// +// Do NOT write: +// +// // BAD: possibly loses side-effects within n.Left +// if !x { +// e.value(k, n.Left) +// } + +// A location represents an abstract location that stores a Go +// variable. +type location struct { + n ir.Node // represented variable or expression, if any + curfn *ir.Func // enclosing function + edges []edge // incoming edges + loopDepth int // loopDepth at declaration + + // resultIndex records the tuple index (starting at 1) for + // PPARAMOUT variables within their function's result type. + // For non-PPARAMOUT variables it's 0. + resultIndex int + + // derefs and walkgen are used during walkOne to track the + // minimal dereferences from the walk root. + derefs int // >= -1 + walkgen uint32 + + // dst and dstEdgeindex track the next immediate assignment + // destination location during walkone, along with the index + // of the edge pointing back to this location. + dst *location + dstEdgeIdx int + + // queued is used by walkAll to track whether this location is + // in the walk queue. + queued bool + + // attrs is a bitset of location attributes. + attrs locAttr + + // paramEsc records the represented parameter's leak set. + paramEsc leaks + + captured bool // has a closure captured this variable? + reassigned bool // has this variable been reassigned? + addrtaken bool // has this variable's address been taken? +} + +type locAttr uint8 + +const ( + // attrEscapes indicates whether the represented variable's address + // escapes; that is, whether the variable must be heap allocated. + attrEscapes locAttr = 1 << iota + + // attrPersists indicates whether the represented expression's + // address outlives the statement; that is, whether its storage + // cannot be immediately reused. + attrPersists + + // attrMutates indicates whether pointers that are reachable from + // this location may have their addressed memory mutated. This is + // used to detect string->[]byte conversions that can be safely + // optimized away. + attrMutates + + // attrCalls indicates whether closures that are reachable from this + // location may be called without tracking their results. This is + // used to better optimize indirect closure calls. + attrCalls +) + +func (l *location) hasAttr(attr locAttr) bool { return l.attrs&attr != 0 } + +// An edge represents an assignment edge between two Go variables. +type edge struct { + src *location + derefs int // >= -1 + notes *note +} + +func (l *location) asHole() hole { + return hole{dst: l} +} + +// leak records that parameter l leaks to sink. +func (l *location) leakTo(sink *location, derefs int) { + // If sink is a result parameter that doesn't escape (#44614) + // and we can fit return bits into the escape analysis tag, + // then record as a result leak. + if !sink.hasAttr(attrEscapes) && sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn { + ri := sink.resultIndex - 1 + if ri < numEscResults { + // Leak to result parameter. + l.paramEsc.AddResult(ri, derefs) + return + } + } + + // Otherwise, record as heap leak. + l.paramEsc.AddHeap(derefs) +} + +// leakTo records that parameter l leaks to sink. +func (b *batch) leakTo(l, sink *location, derefs int) { + if (logopt.Enabled() || base.Flag.LowerM >= 2) && !l.hasAttr(attrEscapes) { + if base.Flag.LowerM >= 2 { + fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, b.explainLoc(sink), derefs) + } + explanation := b.explainPath(sink, l) + if logopt.Enabled() { + var e_curfn *ir.Func // TODO(mdempsky): Fix. + logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e_curfn), + fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, b.explainLoc(sink), derefs), explanation) + } + } + + // If sink is a result parameter that doesn't escape (#44614) + // and we can fit return bits into the escape analysis tag, + // then record as a result leak. + if !sink.hasAttr(attrEscapes) && sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn { + if ri := sink.resultIndex - 1; ri < numEscResults { + // Leak to result parameter. + l.paramEsc.AddResult(ri, derefs) + return + } + } + + // Otherwise, record as heap leak. + l.paramEsc.AddHeap(derefs) +} + +func (l *location) isName(c ir.Class) bool { + return l.n != nil && l.n.Op() == ir.ONAME && l.n.(*ir.Name).Class == c +} + +// A hole represents a context for evaluation of a Go +// expression. E.g., when evaluating p in "x = **p", we'd have a hole +// with dst==x and derefs==2. +type hole struct { + dst *location + derefs int // >= -1 + notes *note + + // addrtaken indicates whether this context is taking the address of + // the expression, independent of whether the address will actually + // be stored into a variable. + addrtaken bool +} + +type note struct { + next *note + where ir.Node + why string +} + +func (k hole) note(where ir.Node, why string) hole { + if where == nil || why == "" { + base.Fatalf("note: missing where/why") + } + if base.Flag.LowerM >= 2 || logopt.Enabled() { + k.notes = ¬e{ + next: k.notes, + where: where, + why: why, + } + } + return k +} + +func (k hole) shift(delta int) hole { + k.derefs += delta + if k.derefs < -1 { + base.Fatalf("derefs underflow: %v", k.derefs) + } + k.addrtaken = delta < 0 + return k +} + +func (k hole) deref(where ir.Node, why string) hole { return k.shift(1).note(where, why) } +func (k hole) addr(where ir.Node, why string) hole { return k.shift(-1).note(where, why) } + +func (k hole) dotType(t *types.Type, where ir.Node, why string) hole { + if !t.IsInterface() && !types.IsDirectIface(t) { + k = k.shift(1) + } + return k.note(where, why) +} + +func (b *batch) flow(k hole, src *location) { + if k.addrtaken { + src.addrtaken = true + } + + dst := k.dst + if dst == &b.blankLoc { + return + } + if dst == src && k.derefs >= 0 { // dst = dst, dst = *dst, ... + return + } + if dst.hasAttr(attrEscapes) && k.derefs < 0 { // dst = &src + if base.Flag.LowerM >= 2 || logopt.Enabled() { + pos := base.FmtPos(src.n.Pos()) + if base.Flag.LowerM >= 2 { + fmt.Printf("%s: %v escapes to heap:\n", pos, src.n) + } + explanation := b.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{}) + if logopt.Enabled() { + var e_curfn *ir.Func // TODO(mdempsky): Fix. + logopt.LogOpt(src.n.Pos(), "escapes", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", src.n), explanation) + } + + } + src.attrs |= attrEscapes | attrPersists | attrMutates | attrCalls + return + } + + // TODO(mdempsky): Deduplicate edges? + dst.edges = append(dst.edges, edge{src: src, derefs: k.derefs, notes: k.notes}) +} + +func (b *batch) heapHole() hole { return b.heapLoc.asHole() } +func (b *batch) mutatorHole() hole { return b.mutatorLoc.asHole() } +func (b *batch) calleeHole() hole { return b.calleeLoc.asHole() } +func (b *batch) discardHole() hole { return b.blankLoc.asHole() } + +func (b *batch) oldLoc(n *ir.Name) *location { + if n.Canonical().Opt == nil { + base.FatalfAt(n.Pos(), "%v has no location", n) + } + return n.Canonical().Opt.(*location) +} + +func (e *escape) newLoc(n ir.Node, persists bool) *location { + if e.curfn == nil { + base.Fatalf("e.curfn isn't set") + } + if n != nil && n.Type() != nil && n.Type().NotInHeap() { + base.ErrorfAt(n.Pos(), 0, "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type()) + } + + if n != nil && n.Op() == ir.ONAME { + if canon := n.(*ir.Name).Canonical(); n != canon { + base.FatalfAt(n.Pos(), "newLoc on non-canonical %v (canonical is %v)", n, canon) + } + } + loc := &location{ + n: n, + curfn: e.curfn, + loopDepth: e.loopDepth, + } + if persists { + loc.attrs |= attrPersists + } + e.allLocs = append(e.allLocs, loc) + if n != nil { + if n.Op() == ir.ONAME { + n := n.(*ir.Name) + if n.Class == ir.PPARAM && n.Curfn == nil { + // ok; hidden parameter + } else if n.Curfn != e.curfn { + base.FatalfAt(n.Pos(), "curfn mismatch: %v != %v for %v", n.Curfn, e.curfn, n) + } + + if n.Opt != nil { + base.FatalfAt(n.Pos(), "%v already has a location", n) + } + n.Opt = loc + } + } + return loc +} + +// teeHole returns a new hole that flows into each hole of ks, +// similar to the Unix tee(1) command. +func (e *escape) teeHole(ks ...hole) hole { + if len(ks) == 0 { + return e.discardHole() + } + if len(ks) == 1 { + return ks[0] + } + // TODO(mdempsky): Optimize if there's only one non-discard hole? + + // Given holes "l1 = _", "l2 = **_", "l3 = *_", ..., create a + // new temporary location ltmp, wire it into place, and return + // a hole for "ltmp = _". + loc := e.newLoc(nil, false) + for _, k := range ks { + // N.B., "p = &q" and "p = &tmp; tmp = q" are not + // semantically equivalent. To combine holes like "l1 + // = _" and "l2 = &_", we'd need to wire them as "l1 = + // *ltmp" and "l2 = ltmp" and return "ltmp = &_" + // instead. + if k.derefs < 0 { + base.Fatalf("teeHole: negative derefs") + } + + e.flow(k, loc) + } + return loc.asHole() +} + +// later returns a new hole that flows into k, but some time later. +// Its main effect is to prevent immediate reuse of temporary +// variables introduced during Order. +func (e *escape) later(k hole) hole { + loc := e.newLoc(nil, true) + e.flow(k, loc) + return loc.asHole() +} + +// Fmt is called from node printing to print information about escape analysis results. +func Fmt(n ir.Node) string { + text := "" + switch n.Esc() { + case ir.EscUnknown: + break + + case ir.EscHeap: + text = "esc(h)" + + case ir.EscNone: + text = "esc(no)" + + case ir.EscNever: + text = "esc(N)" + + default: + text = fmt.Sprintf("esc(%d)", n.Esc()) + } + + if n.Op() == ir.ONAME { + n := n.(*ir.Name) + if loc, ok := n.Opt.(*location); ok && loc.loopDepth != 0 { + if text != "" { + text += " " + } + text += fmt.Sprintf("ld(%d)", loc.loopDepth) + } + } + + return text +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/escape/leaks.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/escape/leaks.go new file mode 100644 index 0000000000000000000000000000000000000000..942f87d2a225ff62d7bda73ab3252c67c680cd2b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/escape/leaks.go @@ -0,0 +1,126 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package escape + +import ( + "cmd/compile/internal/base" + "math" + "strings" +) + +// A leaks represents a set of assignment flows from a parameter to +// the heap, mutator, callee, or to any of its function's (first +// numEscResults) result parameters. +type leaks [8]uint8 + +const ( + leakHeap = iota + leakMutator + leakCallee + leakResult0 +) + +const numEscResults = len(leaks{}) - leakResult0 + +// Heap returns the minimum deref count of any assignment flow from l +// to the heap. If no such flows exist, Heap returns -1. +func (l leaks) Heap() int { return l.get(leakHeap) } + +// Mutator returns the minimum deref count of any assignment flow from +// l to the pointer operand of an indirect assignment statement. If no +// such flows exist, Mutator returns -1. +func (l leaks) Mutator() int { return l.get(leakMutator) } + +// Callee returns the minimum deref count of any assignment flow from +// l to the callee operand of call expression. If no such flows exist, +// Callee returns -1. +func (l leaks) Callee() int { return l.get(leakCallee) } + +// Result returns the minimum deref count of any assignment flow from +// l to its function's i'th result parameter. If no such flows exist, +// Result returns -1. +func (l leaks) Result(i int) int { return l.get(leakResult0 + i) } + +// AddHeap adds an assignment flow from l to the heap. +func (l *leaks) AddHeap(derefs int) { l.add(leakHeap, derefs) } + +// AddMutator adds a flow from l to the mutator (i.e., a pointer +// operand of an indirect assignment statement). +func (l *leaks) AddMutator(derefs int) { l.add(leakMutator, derefs) } + +// AddCallee adds an assignment flow from l to the callee operand of a +// call expression. +func (l *leaks) AddCallee(derefs int) { l.add(leakCallee, derefs) } + +// AddResult adds an assignment flow from l to its function's i'th +// result parameter. +func (l *leaks) AddResult(i, derefs int) { l.add(leakResult0+i, derefs) } + +func (l leaks) get(i int) int { return int(l[i]) - 1 } + +func (l *leaks) add(i, derefs int) { + if old := l.get(i); old < 0 || derefs < old { + l.set(i, derefs) + } +} + +func (l *leaks) set(i, derefs int) { + v := derefs + 1 + if v < 0 { + base.Fatalf("invalid derefs count: %v", derefs) + } + if v > math.MaxUint8 { + v = math.MaxUint8 + } + + l[i] = uint8(v) +} + +// Optimize removes result flow paths that are equal in length or +// longer than the shortest heap flow path. +func (l *leaks) Optimize() { + // If we have a path to the heap, then there's no use in + // keeping equal or longer paths elsewhere. + if x := l.Heap(); x >= 0 { + for i := 1; i < len(*l); i++ { + if l.get(i) >= x { + l.set(i, -1) + } + } + } +} + +var leakTagCache = map[leaks]string{} + +// Encode converts l into a binary string for export data. +func (l leaks) Encode() string { + if l.Heap() == 0 { + // Space optimization: empty string encodes more + // efficiently in export data. + return "" + } + if s, ok := leakTagCache[l]; ok { + return s + } + + n := len(l) + for n > 0 && l[n-1] == 0 { + n-- + } + s := "esc:" + string(l[:n]) + leakTagCache[l] = s + return s +} + +// parseLeaks parses a binary string representing a leaks. +func parseLeaks(s string) leaks { + var l leaks + if !strings.HasPrefix(s, "esc:") { + l.AddHeap(0) + return l + } + copy(l[:], s[4:]) + return l +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/escape/solve.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/escape/solve.go new file mode 100644 index 0000000000000000000000000000000000000000..2675a16a241fe32055b96ae403896f203c476aa6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/escape/solve.go @@ -0,0 +1,326 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package escape + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/logopt" + "cmd/internal/src" + "fmt" + "strings" +) + +// walkAll computes the minimal dereferences between all pairs of +// locations. +func (b *batch) walkAll() { + // We use a work queue to keep track of locations that we need + // to visit, and repeatedly walk until we reach a fixed point. + // + // We walk once from each location (including the heap), and + // then re-enqueue each location on its transition from + // !persists->persists and !escapes->escapes, which can each + // happen at most once. So we take Θ(len(e.allLocs)) walks. + + // LIFO queue, has enough room for e.allLocs and e.heapLoc. + todo := make([]*location, 0, len(b.allLocs)+1) + enqueue := func(loc *location) { + if !loc.queued { + todo = append(todo, loc) + loc.queued = true + } + } + + for _, loc := range b.allLocs { + enqueue(loc) + } + enqueue(&b.mutatorLoc) + enqueue(&b.calleeLoc) + enqueue(&b.heapLoc) + + var walkgen uint32 + for len(todo) > 0 { + root := todo[len(todo)-1] + todo = todo[:len(todo)-1] + root.queued = false + + walkgen++ + b.walkOne(root, walkgen, enqueue) + } +} + +// walkOne computes the minimal number of dereferences from root to +// all other locations. +func (b *batch) walkOne(root *location, walkgen uint32, enqueue func(*location)) { + // The data flow graph has negative edges (from addressing + // operations), so we use the Bellman-Ford algorithm. However, + // we don't have to worry about infinite negative cycles since + // we bound intermediate dereference counts to 0. + + root.walkgen = walkgen + root.derefs = 0 + root.dst = nil + + if root.hasAttr(attrCalls) { + if clo, ok := root.n.(*ir.ClosureExpr); ok { + if fn := clo.Func; b.inMutualBatch(fn.Nname) && !fn.ClosureResultsLost() { + fn.SetClosureResultsLost(true) + + // Re-flow from the closure's results, now that we're aware + // we lost track of them. + for _, result := range fn.Type().Results() { + enqueue(b.oldLoc(result.Nname.(*ir.Name))) + } + } + } + } + + todo := []*location{root} // LIFO queue + for len(todo) > 0 { + l := todo[len(todo)-1] + todo = todo[:len(todo)-1] + + derefs := l.derefs + var newAttrs locAttr + + // If l.derefs < 0, then l's address flows to root. + addressOf := derefs < 0 + if addressOf { + // For a flow path like "root = &l; l = x", + // l's address flows to root, but x's does + // not. We recognize this by lower bounding + // derefs at 0. + derefs = 0 + + // If l's address flows somewhere that + // outlives it, then l needs to be heap + // allocated. + if b.outlives(root, l) { + if !l.hasAttr(attrEscapes) && (logopt.Enabled() || base.Flag.LowerM >= 2) { + if base.Flag.LowerM >= 2 { + fmt.Printf("%s: %v escapes to heap:\n", base.FmtPos(l.n.Pos()), l.n) + } + explanation := b.explainPath(root, l) + if logopt.Enabled() { + var e_curfn *ir.Func // TODO(mdempsky): Fix. + logopt.LogOpt(l.n.Pos(), "escape", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation) + } + } + newAttrs |= attrEscapes | attrPersists | attrMutates | attrCalls + } else + // If l's address flows to a persistent location, then l needs + // to persist too. + if root.hasAttr(attrPersists) { + newAttrs |= attrPersists + } + } + + if derefs == 0 { + newAttrs |= root.attrs & (attrMutates | attrCalls) + } + + // l's value flows to root. If l is a function + // parameter and root is the heap or a + // corresponding result parameter, then record + // that value flow for tagging the function + // later. + if l.isName(ir.PPARAM) { + if b.outlives(root, l) { + if !l.hasAttr(attrEscapes) && (logopt.Enabled() || base.Flag.LowerM >= 2) { + if base.Flag.LowerM >= 2 { + fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, b.explainLoc(root), derefs) + } + explanation := b.explainPath(root, l) + if logopt.Enabled() { + var e_curfn *ir.Func // TODO(mdempsky): Fix. + logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e_curfn), + fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, b.explainLoc(root), derefs), explanation) + } + } + l.leakTo(root, derefs) + } + if root.hasAttr(attrMutates) { + l.paramEsc.AddMutator(derefs) + } + if root.hasAttr(attrCalls) { + l.paramEsc.AddCallee(derefs) + } + } + + if newAttrs&^l.attrs != 0 { + l.attrs |= newAttrs + enqueue(l) + if l.attrs&attrEscapes != 0 { + continue + } + } + + for i, edge := range l.edges { + if edge.src.hasAttr(attrEscapes) { + continue + } + d := derefs + edge.derefs + if edge.src.walkgen != walkgen || edge.src.derefs > d { + edge.src.walkgen = walkgen + edge.src.derefs = d + edge.src.dst = l + edge.src.dstEdgeIdx = i + todo = append(todo, edge.src) + } + } + } +} + +// explainPath prints an explanation of how src flows to the walk root. +func (b *batch) explainPath(root, src *location) []*logopt.LoggedOpt { + visited := make(map[*location]bool) + pos := base.FmtPos(src.n.Pos()) + var explanation []*logopt.LoggedOpt + for { + // Prevent infinite loop. + if visited[src] { + if base.Flag.LowerM >= 2 { + fmt.Printf("%s: warning: truncated explanation due to assignment cycle; see golang.org/issue/35518\n", pos) + } + break + } + visited[src] = true + dst := src.dst + edge := &dst.edges[src.dstEdgeIdx] + if edge.src != src { + base.Fatalf("path inconsistency: %v != %v", edge.src, src) + } + + explanation = b.explainFlow(pos, dst, src, edge.derefs, edge.notes, explanation) + + if dst == root { + break + } + src = dst + } + + return explanation +} + +func (b *batch) explainFlow(pos string, dst, srcloc *location, derefs int, notes *note, explanation []*logopt.LoggedOpt) []*logopt.LoggedOpt { + ops := "&" + if derefs >= 0 { + ops = strings.Repeat("*", derefs) + } + print := base.Flag.LowerM >= 2 + + flow := fmt.Sprintf(" flow: %s = %s%v:", b.explainLoc(dst), ops, b.explainLoc(srcloc)) + if print { + fmt.Printf("%s:%s\n", pos, flow) + } + if logopt.Enabled() { + var epos src.XPos + if notes != nil { + epos = notes.where.Pos() + } else if srcloc != nil && srcloc.n != nil { + epos = srcloc.n.Pos() + } + var e_curfn *ir.Func // TODO(mdempsky): Fix. + explanation = append(explanation, logopt.NewLoggedOpt(epos, epos, "escflow", "escape", ir.FuncName(e_curfn), flow)) + } + + for note := notes; note != nil; note = note.next { + if print { + fmt.Printf("%s: from %v (%v) at %s\n", pos, note.where, note.why, base.FmtPos(note.where.Pos())) + } + if logopt.Enabled() { + var e_curfn *ir.Func // TODO(mdempsky): Fix. + notePos := note.where.Pos() + explanation = append(explanation, logopt.NewLoggedOpt(notePos, notePos, "escflow", "escape", ir.FuncName(e_curfn), + fmt.Sprintf(" from %v (%v)", note.where, note.why))) + } + } + return explanation +} + +func (b *batch) explainLoc(l *location) string { + if l == &b.heapLoc { + return "{heap}" + } + if l.n == nil { + // TODO(mdempsky): Omit entirely. + return "{temp}" + } + if l.n.Op() == ir.ONAME { + return fmt.Sprintf("%v", l.n) + } + return fmt.Sprintf("{storage for %v}", l.n) +} + +// outlives reports whether values stored in l may survive beyond +// other's lifetime if stack allocated. +func (b *batch) outlives(l, other *location) bool { + // The heap outlives everything. + if l.hasAttr(attrEscapes) { + return true + } + + // Pseudo-locations that don't really exist. + if l == &b.mutatorLoc || l == &b.calleeLoc { + return false + } + + // We don't know what callers do with returned values, so + // pessimistically we need to assume they flow to the heap and + // outlive everything too. + if l.isName(ir.PPARAMOUT) { + // Exception: Closures can return locations allocated outside of + // them without forcing them to the heap, if we can statically + // identify all call sites. For example: + // + // var u int // okay to stack allocate + // fn := func() *int { return &u }() + // *fn() = 42 + if containsClosure(other.curfn, l.curfn) && !l.curfn.ClosureResultsLost() { + return false + } + + return true + } + + // If l and other are within the same function, then l + // outlives other if it was declared outside other's loop + // scope. For example: + // + // var l *int + // for { + // l = new(int) // must heap allocate: outlives for loop + // } + if l.curfn == other.curfn && l.loopDepth < other.loopDepth { + return true + } + + // If other is declared within a child closure of where l is + // declared, then l outlives it. For example: + // + // var l *int + // func() { + // l = new(int) // must heap allocate: outlives call frame (if not inlined) + // }() + if containsClosure(l.curfn, other.curfn) { + return true + } + + return false +} + +// containsClosure reports whether c is a closure contained within f. +func containsClosure(f, c *ir.Func) bool { + // Common cases. + if f == c || c.OClosure == nil { + return false + } + + // Closures within function Foo are named like "Foo.funcN..." + // TODO(mdempsky): Better way to recognize this. + fn := f.Sym().Name + cn := c.Sym().Name + return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.' +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/escape/stmt.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/escape/stmt.go new file mode 100644 index 0000000000000000000000000000000000000000..b766864a306f8297d7612555da3a29c6cda37da5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/escape/stmt.go @@ -0,0 +1,218 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package escape + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "fmt" +) + +// stmt evaluates a single Go statement. +func (e *escape) stmt(n ir.Node) { + if n == nil { + return + } + + lno := ir.SetPos(n) + defer func() { + base.Pos = lno + }() + + if base.Flag.LowerM > 2 { + fmt.Printf("%v:[%d] %v stmt: %v\n", base.FmtPos(base.Pos), e.loopDepth, e.curfn, n) + } + + e.stmts(n.Init()) + + switch n.Op() { + default: + base.Fatalf("unexpected stmt: %v", n) + + case ir.OFALL, ir.OINLMARK: + // nop + + case ir.OBREAK, ir.OCONTINUE, ir.OGOTO: + // TODO(mdempsky): Handle dead code? + + case ir.OBLOCK: + n := n.(*ir.BlockStmt) + e.stmts(n.List) + + case ir.ODCL: + // Record loop depth at declaration. + n := n.(*ir.Decl) + if !ir.IsBlank(n.X) { + e.dcl(n.X) + } + + case ir.OLABEL: + n := n.(*ir.LabelStmt) + if n.Label.IsBlank() { + break + } + switch e.labels[n.Label] { + case nonlooping: + if base.Flag.LowerM > 2 { + fmt.Printf("%v:%v non-looping label\n", base.FmtPos(base.Pos), n) + } + case looping: + if base.Flag.LowerM > 2 { + fmt.Printf("%v: %v looping label\n", base.FmtPos(base.Pos), n) + } + e.loopDepth++ + default: + base.Fatalf("label %v missing tag", n.Label) + } + delete(e.labels, n.Label) + + case ir.OIF: + n := n.(*ir.IfStmt) + e.discard(n.Cond) + e.block(n.Body) + e.block(n.Else) + + case ir.OCHECKNIL: + n := n.(*ir.UnaryExpr) + e.discard(n.X) + + case ir.OFOR: + n := n.(*ir.ForStmt) + base.Assert(!n.DistinctVars) // Should all be rewritten before escape analysis + e.loopDepth++ + e.discard(n.Cond) + e.stmt(n.Post) + e.block(n.Body) + e.loopDepth-- + + case ir.ORANGE: + // for Key, Value = range X { Body } + n := n.(*ir.RangeStmt) + base.Assert(!n.DistinctVars) // Should all be rewritten before escape analysis + + // X is evaluated outside the loop and persists until the loop + // terminates. + tmp := e.newLoc(nil, true) + e.expr(tmp.asHole(), n.X) + + e.loopDepth++ + ks := e.addrs([]ir.Node{n.Key, n.Value}) + if n.X.Type().IsArray() { + e.flow(ks[1].note(n, "range"), tmp) + } else { + e.flow(ks[1].deref(n, "range-deref"), tmp) + } + e.reassigned(ks, n) + + e.block(n.Body) + e.loopDepth-- + + case ir.OSWITCH: + n := n.(*ir.SwitchStmt) + + if guard, ok := n.Tag.(*ir.TypeSwitchGuard); ok { + var ks []hole + if guard.Tag != nil { + for _, cas := range n.Cases { + cv := cas.Var + k := e.dcl(cv) // type switch variables have no ODCL. + if cv.Type().HasPointers() { + ks = append(ks, k.dotType(cv.Type(), cas, "switch case")) + } + } + } + e.expr(e.teeHole(ks...), n.Tag.(*ir.TypeSwitchGuard).X) + } else { + e.discard(n.Tag) + } + + for _, cas := range n.Cases { + e.discards(cas.List) + e.block(cas.Body) + } + + case ir.OSELECT: + n := n.(*ir.SelectStmt) + for _, cas := range n.Cases { + e.stmt(cas.Comm) + e.block(cas.Body) + } + case ir.ORECV: + // TODO(mdempsky): Consider e.discard(n.Left). + n := n.(*ir.UnaryExpr) + e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit + case ir.OSEND: + n := n.(*ir.SendStmt) + e.discard(n.Chan) + e.assignHeap(n.Value, "send", n) + + case ir.OAS: + n := n.(*ir.AssignStmt) + e.assignList([]ir.Node{n.X}, []ir.Node{n.Y}, "assign", n) + case ir.OASOP: + n := n.(*ir.AssignOpStmt) + // TODO(mdempsky): Worry about OLSH/ORSH? + e.assignList([]ir.Node{n.X}, []ir.Node{n.Y}, "assign", n) + case ir.OAS2: + n := n.(*ir.AssignListStmt) + e.assignList(n.Lhs, n.Rhs, "assign-pair", n) + + case ir.OAS2DOTTYPE: // v, ok = x.(type) + n := n.(*ir.AssignListStmt) + e.assignList(n.Lhs, n.Rhs, "assign-pair-dot-type", n) + case ir.OAS2MAPR: // v, ok = m[k] + n := n.(*ir.AssignListStmt) + e.assignList(n.Lhs, n.Rhs, "assign-pair-mapr", n) + case ir.OAS2RECV, ir.OSELRECV2: // v, ok = <-ch + n := n.(*ir.AssignListStmt) + e.assignList(n.Lhs, n.Rhs, "assign-pair-receive", n) + + case ir.OAS2FUNC: + n := n.(*ir.AssignListStmt) + e.stmts(n.Rhs[0].Init()) + ks := e.addrs(n.Lhs) + e.call(ks, n.Rhs[0]) + e.reassigned(ks, n) + case ir.ORETURN: + n := n.(*ir.ReturnStmt) + results := e.curfn.Type().Results() + dsts := make([]ir.Node, len(results)) + for i, res := range results { + dsts[i] = res.Nname.(*ir.Name) + } + e.assignList(dsts, n.Results, "return", n) + case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OINLCALL, ir.OCLEAR, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTLN, ir.ORECOVERFP: + e.call(nil, n) + case ir.OGO, ir.ODEFER: + n := n.(*ir.GoDeferStmt) + e.goDeferStmt(n) + + case ir.OTAILCALL: + n := n.(*ir.TailCallStmt) + e.call(nil, n.Call) + } +} + +func (e *escape) stmts(l ir.Nodes) { + for _, n := range l { + e.stmt(n) + } +} + +// block is like stmts, but preserves loopDepth. +func (e *escape) block(l ir.Nodes) { + old := e.loopDepth + e.stmts(l) + e.loopDepth = old +} + +func (e *escape) dcl(n *ir.Name) hole { + if n.Curfn != e.curfn || n.IsClosureVar() { + base.Fatalf("bad declaration of %v", n) + } + loc := e.oldLoc(n) + loc.loopDepth = e.loopDepth + return loc.asHole() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/escape/utils.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/escape/utils.go new file mode 100644 index 0000000000000000000000000000000000000000..bd1d2c22a2cfb8b85bc70f7be4e2ee0c6e9b089b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/escape/utils.go @@ -0,0 +1,222 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package escape + +import ( + "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" +) + +func isSliceSelfAssign(dst, src ir.Node) bool { + // Detect the following special case. + // + // func (b *Buffer) Foo() { + // n, m := ... + // b.buf = b.buf[n:m] + // } + // + // This assignment is a no-op for escape analysis, + // it does not store any new pointers into b that were not already there. + // However, without this special case b will escape, because we assign to OIND/ODOTPTR. + // Here we assume that the statement will not contain calls, + // that is, that order will move any calls to init. + // Otherwise base ONAME value could change between the moments + // when we evaluate it for dst and for src. + + // dst is ONAME dereference. + var dstX ir.Node + switch dst.Op() { + default: + return false + case ir.ODEREF: + dst := dst.(*ir.StarExpr) + dstX = dst.X + case ir.ODOTPTR: + dst := dst.(*ir.SelectorExpr) + dstX = dst.X + } + if dstX.Op() != ir.ONAME { + return false + } + // src is a slice operation. + switch src.Op() { + case ir.OSLICE, ir.OSLICE3, ir.OSLICESTR: + // OK. + case ir.OSLICEARR, ir.OSLICE3ARR: + // Since arrays are embedded into containing object, + // slice of non-pointer array will introduce a new pointer into b that was not already there + // (pointer to b itself). After such assignment, if b contents escape, + // b escapes as well. If we ignore such OSLICEARR, we will conclude + // that b does not escape when b contents do. + // + // Pointer to an array is OK since it's not stored inside b directly. + // For slicing an array (not pointer to array), there is an implicit OADDR. + // We check that to determine non-pointer array slicing. + src := src.(*ir.SliceExpr) + if src.X.Op() == ir.OADDR { + return false + } + default: + return false + } + // slice is applied to ONAME dereference. + var baseX ir.Node + switch base := src.(*ir.SliceExpr).X; base.Op() { + default: + return false + case ir.ODEREF: + base := base.(*ir.StarExpr) + baseX = base.X + case ir.ODOTPTR: + base := base.(*ir.SelectorExpr) + baseX = base.X + } + if baseX.Op() != ir.ONAME { + return false + } + // dst and src reference the same base ONAME. + return dstX.(*ir.Name) == baseX.(*ir.Name) +} + +// isSelfAssign reports whether assignment from src to dst can +// be ignored by the escape analysis as it's effectively a self-assignment. +func isSelfAssign(dst, src ir.Node) bool { + if isSliceSelfAssign(dst, src) { + return true + } + + // Detect trivial assignments that assign back to the same object. + // + // It covers these cases: + // val.x = val.y + // val.x[i] = val.y[j] + // val.x1.x2 = val.x1.y2 + // ... etc + // + // These assignments do not change assigned object lifetime. + + if dst == nil || src == nil || dst.Op() != src.Op() { + return false + } + + // The expression prefix must be both "safe" and identical. + switch dst.Op() { + case ir.ODOT, ir.ODOTPTR: + // Safe trailing accessors that are permitted to differ. + dst := dst.(*ir.SelectorExpr) + src := src.(*ir.SelectorExpr) + return ir.SameSafeExpr(dst.X, src.X) + case ir.OINDEX: + dst := dst.(*ir.IndexExpr) + src := src.(*ir.IndexExpr) + if mayAffectMemory(dst.Index) || mayAffectMemory(src.Index) { + return false + } + return ir.SameSafeExpr(dst.X, src.X) + default: + return false + } +} + +// mayAffectMemory reports whether evaluation of n may affect the program's +// memory state. If the expression can't affect memory state, then it can be +// safely ignored by the escape analysis. +func mayAffectMemory(n ir.Node) bool { + // We may want to use a list of "memory safe" ops instead of generally + // "side-effect free", which would include all calls and other ops that can + // allocate or change global state. For now, it's safer to start with the latter. + // + // We're ignoring things like division by zero, index out of range, + // and nil pointer dereference here. + + // TODO(rsc): It seems like it should be possible to replace this with + // an ir.Any looking for any op that's not the ones in the case statement. + // But that produces changes in the compiled output detected by buildall. + switch n.Op() { + case ir.ONAME, ir.OLITERAL, ir.ONIL: + return false + + case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD: + n := n.(*ir.BinaryExpr) + return mayAffectMemory(n.X) || mayAffectMemory(n.Y) + + case ir.OINDEX: + n := n.(*ir.IndexExpr) + return mayAffectMemory(n.X) || mayAffectMemory(n.Index) + + case ir.OCONVNOP, ir.OCONV: + n := n.(*ir.ConvExpr) + return mayAffectMemory(n.X) + + case ir.OLEN, ir.OCAP, ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG: + n := n.(*ir.UnaryExpr) + return mayAffectMemory(n.X) + + case ir.ODOT, ir.ODOTPTR: + n := n.(*ir.SelectorExpr) + return mayAffectMemory(n.X) + + case ir.ODEREF: + n := n.(*ir.StarExpr) + return mayAffectMemory(n.X) + + default: + return true + } +} + +// HeapAllocReason returns the reason the given Node must be heap +// allocated, or the empty string if it doesn't. +func HeapAllocReason(n ir.Node) string { + if n == nil || n.Type() == nil { + return "" + } + + // Parameters are always passed via the stack. + if n.Op() == ir.ONAME { + n := n.(*ir.Name) + if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT { + return "" + } + } + + if n.Type().Size() > ir.MaxStackVarSize { + return "too large for stack" + } + if n.Type().Alignment() > int64(types.PtrSize) { + return "too aligned for stack" + } + + if (n.Op() == ir.ONEW || n.Op() == ir.OPTRLIT) && n.Type().Elem().Size() > ir.MaxImplicitStackVarSize { + return "too large for stack" + } + if (n.Op() == ir.ONEW || n.Op() == ir.OPTRLIT) && n.Type().Elem().Alignment() > int64(types.PtrSize) { + return "too aligned for stack" + } + + if n.Op() == ir.OCLOSURE && typecheck.ClosureType(n.(*ir.ClosureExpr)).Size() > ir.MaxImplicitStackVarSize { + return "too large for stack" + } + if n.Op() == ir.OMETHVALUE && typecheck.MethodValueType(n.(*ir.SelectorExpr)).Size() > ir.MaxImplicitStackVarSize { + return "too large for stack" + } + + if n.Op() == ir.OMAKESLICE { + n := n.(*ir.MakeExpr) + r := n.Cap + if r == nil { + r = n.Len + } + if !ir.IsSmallIntConst(r) { + return "non-constant size" + } + if t := n.Type(); t.Elem().Size() != 0 && ir.Int64Val(r) > ir.MaxImplicitStackVarSize/t.Elem().Size() { + return "too large for stack" + } + } + + return "" +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/gc/compile.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/gc/compile.go new file mode 100644 index 0000000000000000000000000000000000000000..0f57f8ca8293d18b3c3dcce662050625ee761d7c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/gc/compile.go @@ -0,0 +1,198 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gc + +import ( + "internal/race" + "math/rand" + "sort" + "sync" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/liveness" + "cmd/compile/internal/objw" + "cmd/compile/internal/ssagen" + "cmd/compile/internal/staticinit" + "cmd/compile/internal/types" + "cmd/compile/internal/walk" + "cmd/internal/obj" +) + +// "Portable" code generation. + +var ( + compilequeue []*ir.Func // functions waiting to be compiled +) + +func enqueueFunc(fn *ir.Func) { + if ir.CurFunc != nil { + base.FatalfAt(fn.Pos(), "enqueueFunc %v inside %v", fn, ir.CurFunc) + } + + if ir.FuncName(fn) == "_" { + // Skip compiling blank functions. + // Frontend already reported any spec-mandated errors (#29870). + return + } + + // Don't try compiling dead hidden closure. + if fn.IsDeadcodeClosure() { + return + } + + if clo := fn.OClosure; clo != nil && !ir.IsTrivialClosure(clo) { + return // we'll get this as part of its enclosing function + } + + if ssagen.CreateWasmImportWrapper(fn) { + return + } + + if len(fn.Body) == 0 { + // Initialize ABI wrappers if necessary. + ir.InitLSym(fn, false) + types.CalcSize(fn.Type()) + a := ssagen.AbiForBodylessFuncStackMap(fn) + abiInfo := a.ABIAnalyzeFuncType(fn.Type()) // abiInfo has spill/home locations for wrapper + liveness.WriteFuncMap(fn, abiInfo) + if fn.ABI == obj.ABI0 { + x := ssagen.EmitArgInfo(fn, abiInfo) + objw.Global(x, int32(len(x.P)), obj.RODATA|obj.LOCAL) + } + return + } + + errorsBefore := base.Errors() + + todo := []*ir.Func{fn} + for len(todo) > 0 { + next := todo[len(todo)-1] + todo = todo[:len(todo)-1] + + prepareFunc(next) + todo = append(todo, next.Closures...) + } + + if base.Errors() > errorsBefore { + return + } + + // Enqueue just fn itself. compileFunctions will handle + // scheduling compilation of its closures after it's done. + compilequeue = append(compilequeue, fn) +} + +// prepareFunc handles any remaining frontend compilation tasks that +// aren't yet safe to perform concurrently. +func prepareFunc(fn *ir.Func) { + // Set up the function's LSym early to avoid data races with the assemblers. + // Do this before walk, as walk needs the LSym to set attributes/relocations + // (e.g. in MarkTypeUsedInInterface). + ir.InitLSym(fn, true) + + // If this function is a compiler-generated outlined global map + // initializer function, register its LSym for later processing. + if staticinit.MapInitToVar != nil { + if _, ok := staticinit.MapInitToVar[fn]; ok { + ssagen.RegisterMapInitLsym(fn.Linksym()) + } + } + + // Calculate parameter offsets. + types.CalcSize(fn.Type()) + + ir.CurFunc = fn + walk.Walk(fn) + ir.CurFunc = nil // enforce no further uses of CurFunc +} + +// compileFunctions compiles all functions in compilequeue. +// It fans out nBackendWorkers to do the work +// and waits for them to complete. +func compileFunctions() { + if race.Enabled { + // Randomize compilation order to try to shake out races. + tmp := make([]*ir.Func, len(compilequeue)) + perm := rand.Perm(len(compilequeue)) + for i, v := range perm { + tmp[v] = compilequeue[i] + } + copy(compilequeue, tmp) + } else { + // Compile the longest functions first, + // since they're most likely to be the slowest. + // This helps avoid stragglers. + sort.Slice(compilequeue, func(i, j int) bool { + return len(compilequeue[i].Body) > len(compilequeue[j].Body) + }) + } + + // By default, we perform work right away on the current goroutine + // as the solo worker. + queue := func(work func(int)) { + work(0) + } + + if nWorkers := base.Flag.LowerC; nWorkers > 1 { + // For concurrent builds, we allow the work queue + // to grow arbitrarily large, but only nWorkers work items + // can be running concurrently. + workq := make(chan func(int)) + done := make(chan int) + go func() { + ids := make([]int, nWorkers) + for i := range ids { + ids[i] = i + } + var pending []func(int) + for { + select { + case work := <-workq: + pending = append(pending, work) + case id := <-done: + ids = append(ids, id) + } + for len(pending) > 0 && len(ids) > 0 { + work := pending[len(pending)-1] + id := ids[len(ids)-1] + pending = pending[:len(pending)-1] + ids = ids[:len(ids)-1] + go func() { + work(id) + done <- id + }() + } + } + }() + queue = func(work func(int)) { + workq <- work + } + } + + var wg sync.WaitGroup + var compile func([]*ir.Func) + compile = func(fns []*ir.Func) { + wg.Add(len(fns)) + for _, fn := range fns { + fn := fn + queue(func(worker int) { + ssagen.Compile(fn, worker) + compile(fn.Closures) + wg.Done() + }) + } + } + + types.CalcSizeDisabled = true // not safe to calculate sizes concurrently + base.Ctxt.InParallel = true + + compile(compilequeue) + compilequeue = nil + wg.Wait() + + base.Ctxt.InParallel = false + types.CalcSizeDisabled = false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/gc/export.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/gc/export.go new file mode 100644 index 0000000000000000000000000000000000000000..c93f008ba2c26bf6e9277acf48b2c411daea7e58 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/gc/export.go @@ -0,0 +1,51 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gc + +import ( + "fmt" + "go/constant" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/bio" +) + +func dumpasmhdr() { + b, err := bio.Create(base.Flag.AsmHdr) + if err != nil { + base.Fatalf("%v", err) + } + fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", types.LocalPkg.Name) + for _, n := range typecheck.Target.AsmHdrDecls { + if n.Sym().IsBlank() { + continue + } + switch n.Op() { + case ir.OLITERAL: + t := n.Val().Kind() + if t == constant.Float || t == constant.Complex { + break + } + fmt.Fprintf(b, "#define const_%s %v\n", n.Sym().Name, n.Val().ExactString()) + + case ir.OTYPE: + t := n.Type() + if !t.IsStruct() || t.StructType().Map != nil || t.IsFuncArgStruct() { + break + } + fmt.Fprintf(b, "#define %s__size %d\n", n.Sym().Name, int(t.Size())) + for _, f := range t.Fields() { + if !f.Sym.IsBlank() { + fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym().Name, f.Sym.Name, int(f.Offset)) + } + } + } + } + + b.Close() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/gc/main.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/gc/main.go new file mode 100644 index 0000000000000000000000000000000000000000..7e5069fcedc26af486c43bbb549ae2c1bd4ed3cb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/gc/main.go @@ -0,0 +1,391 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gc + +import ( + "bufio" + "bytes" + "cmd/compile/internal/base" + "cmd/compile/internal/coverage" + "cmd/compile/internal/dwarfgen" + "cmd/compile/internal/escape" + "cmd/compile/internal/inline" + "cmd/compile/internal/inline/interleaved" + "cmd/compile/internal/ir" + "cmd/compile/internal/logopt" + "cmd/compile/internal/loopvar" + "cmd/compile/internal/noder" + "cmd/compile/internal/pgo" + "cmd/compile/internal/pkginit" + "cmd/compile/internal/reflectdata" + "cmd/compile/internal/rttype" + "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" + "cmd/compile/internal/staticinit" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/dwarf" + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/internal/src" + "flag" + "fmt" + "internal/buildcfg" + "log" + "os" + "runtime" +) + +// handlePanic ensures that we print out an "internal compiler error" for any panic +// or runtime exception during front-end compiler processing (unless there have +// already been some compiler errors). It may also be invoked from the explicit panic in +// hcrash(), in which case, we pass the panic on through. +func handlePanic() { + if err := recover(); err != nil { + if err == "-h" { + // Force real panic now with -h option (hcrash) - the error + // information will have already been printed. + panic(err) + } + base.Fatalf("panic: %v", err) + } +} + +// Main parses flags and Go source files specified in the command-line +// arguments, type-checks the parsed Go package, compiles functions to machine +// code, and finally writes the compiled package definition to disk. +func Main(archInit func(*ssagen.ArchInfo)) { + base.Timer.Start("fe", "init") + + defer handlePanic() + + archInit(&ssagen.Arch) + + base.Ctxt = obj.Linknew(ssagen.Arch.LinkArch) + base.Ctxt.DiagFunc = base.Errorf + base.Ctxt.DiagFlush = base.FlushErrors + base.Ctxt.Bso = bufio.NewWriter(os.Stdout) + + // UseBASEntries is preferred because it shaves about 2% off build time, but LLDB, dsymutil, and dwarfdump + // on Darwin don't support it properly, especially since macOS 10.14 (Mojave). This is exposed as a flag + // to allow testing with LLVM tools on Linux, and to help with reporting this bug to the LLVM project. + // See bugs 31188 and 21945 (CLs 170638, 98075, 72371). + base.Ctxt.UseBASEntries = base.Ctxt.Headtype != objabi.Hdarwin + + base.DebugSSA = ssa.PhaseOption + base.ParseFlags() + + if os.Getenv("GOGC") == "" { // GOGC set disables starting heap adjustment + // More processors will use more heap, but assume that more memory is available. + // So 1 processor -> 40MB, 4 -> 64MB, 12 -> 128MB + base.AdjustStartingHeap(uint64(32+8*base.Flag.LowerC) << 20) + } + + types.LocalPkg = types.NewPkg(base.Ctxt.Pkgpath, "") + + // pseudo-package, for scoping + types.BuiltinPkg = types.NewPkg("go.builtin", "") // TODO(gri) name this package go.builtin? + types.BuiltinPkg.Prefix = "go:builtin" + + // pseudo-package, accessed by import "unsafe" + types.UnsafePkg = types.NewPkg("unsafe", "unsafe") + + // Pseudo-package that contains the compiler's builtin + // declarations for package runtime. These are declared in a + // separate package to avoid conflicts with package runtime's + // actual declarations, which may differ intentionally but + // insignificantly. + ir.Pkgs.Runtime = types.NewPkg("go.runtime", "runtime") + ir.Pkgs.Runtime.Prefix = "runtime" + + // pseudo-packages used in symbol tables + ir.Pkgs.Itab = types.NewPkg("go.itab", "go.itab") + ir.Pkgs.Itab.Prefix = "go:itab" + + // pseudo-package used for methods with anonymous receivers + ir.Pkgs.Go = types.NewPkg("go", "") + + // pseudo-package for use with code coverage instrumentation. + ir.Pkgs.Coverage = types.NewPkg("go.coverage", "runtime/coverage") + ir.Pkgs.Coverage.Prefix = "runtime/coverage" + + // Record flags that affect the build result. (And don't + // record flags that don't, since that would cause spurious + // changes in the binary.) + dwarfgen.RecordFlags("B", "N", "l", "msan", "race", "asan", "shared", "dynlink", "dwarf", "dwarflocationlists", "dwarfbasentries", "smallframes", "spectre") + + if !base.EnableTrace && base.Flag.LowerT { + log.Fatalf("compiler not built with support for -t") + } + + // Enable inlining (after RecordFlags, to avoid recording the rewritten -l). For now: + // default: inlining on. (Flag.LowerL == 1) + // -l: inlining off (Flag.LowerL == 0) + // -l=2, -l=3: inlining on again, with extra debugging (Flag.LowerL > 1) + if base.Flag.LowerL <= 1 { + base.Flag.LowerL = 1 - base.Flag.LowerL + } + + if base.Flag.SmallFrames { + ir.MaxStackVarSize = 128 * 1024 + ir.MaxImplicitStackVarSize = 16 * 1024 + } + + if base.Flag.Dwarf { + base.Ctxt.DebugInfo = dwarfgen.Info + base.Ctxt.GenAbstractFunc = dwarfgen.AbstractFunc + base.Ctxt.DwFixups = obj.NewDwarfFixupTable(base.Ctxt) + } else { + // turn off inline generation if no dwarf at all + base.Flag.GenDwarfInl = 0 + base.Ctxt.Flag_locationlists = false + } + if base.Ctxt.Flag_locationlists && len(base.Ctxt.Arch.DWARFRegisters) == 0 { + log.Fatalf("location lists requested but register mapping not available on %v", base.Ctxt.Arch.Name) + } + + types.ParseLangFlag() + + symABIs := ssagen.NewSymABIs() + if base.Flag.SymABIs != "" { + symABIs.ReadSymABIs(base.Flag.SymABIs) + } + + if objabi.LookupPkgSpecial(base.Ctxt.Pkgpath).NoInstrument { + base.Flag.Race = false + base.Flag.MSan = false + base.Flag.ASan = false + } + + ssagen.Arch.LinkArch.Init(base.Ctxt) + startProfile() + if base.Flag.Race || base.Flag.MSan || base.Flag.ASan { + base.Flag.Cfg.Instrumenting = true + } + if base.Flag.Dwarf { + dwarf.EnableLogging(base.Debug.DwarfInl != 0) + } + if base.Debug.SoftFloat != 0 { + ssagen.Arch.SoftFloat = true + } + + if base.Flag.JSON != "" { // parse version,destination from json logging optimization. + logopt.LogJsonOption(base.Flag.JSON) + } + + ir.EscFmt = escape.Fmt + ir.IsIntrinsicCall = ssagen.IsIntrinsicCall + inline.SSADumpInline = ssagen.DumpInline + ssagen.InitEnv() + ssagen.InitTables() + + types.PtrSize = ssagen.Arch.LinkArch.PtrSize + types.RegSize = ssagen.Arch.LinkArch.RegSize + types.MaxWidth = ssagen.Arch.MAXWIDTH + + typecheck.Target = new(ir.Package) + + base.AutogeneratedPos = makePos(src.NewFileBase("", ""), 1, 0) + + typecheck.InitUniverse() + typecheck.InitRuntime() + rttype.Init() + + // Parse and typecheck input. + noder.LoadPackage(flag.Args()) + + // As a convenience to users (toolchain maintainers, in particular), + // when compiling a package named "main", we default the package + // path to "main" if the -p flag was not specified. + if base.Ctxt.Pkgpath == obj.UnlinkablePkg && types.LocalPkg.Name == "main" { + base.Ctxt.Pkgpath = "main" + types.LocalPkg.Path = "main" + types.LocalPkg.Prefix = "main" + } + + dwarfgen.RecordPackageName() + + // Prepare for backend processing. + ssagen.InitConfig() + + // Apply coverage fixups, if applicable. + coverage.Fixup() + + // Read profile file and build profile-graph and weighted-call-graph. + base.Timer.Start("fe", "pgo-load-profile") + var profile *pgo.Profile + if base.Flag.PgoProfile != "" { + var err error + profile, err = pgo.New(base.Flag.PgoProfile) + if err != nil { + log.Fatalf("%s: PGO error: %v", base.Flag.PgoProfile, err) + } + } + + // Interleaved devirtualization and inlining. + base.Timer.Start("fe", "devirtualize-and-inline") + interleaved.DevirtualizeAndInlinePackage(typecheck.Target, profile) + + noder.MakeWrappers(typecheck.Target) // must happen after inlining + + // Get variable capture right in for loops. + var transformed []loopvar.VarAndLoop + for _, fn := range typecheck.Target.Funcs { + transformed = append(transformed, loopvar.ForCapture(fn)...) + } + ir.CurFunc = nil + + // Build init task, if needed. + pkginit.MakeTask() + + // Generate ABI wrappers. Must happen before escape analysis + // and doesn't benefit from dead-coding or inlining. + symABIs.GenABIWrappers() + + // Escape analysis. + // Required for moving heap allocations onto stack, + // which in turn is required by the closure implementation, + // which stores the addresses of stack variables into the closure. + // If the closure does not escape, it needs to be on the stack + // or else the stack copier will not update it. + // Large values are also moved off stack in escape analysis; + // because large values may contain pointers, it must happen early. + base.Timer.Start("fe", "escapes") + escape.Funcs(typecheck.Target.Funcs) + + loopvar.LogTransformations(transformed) + + // Collect information for go:nowritebarrierrec + // checking. This must happen before transforming closures during Walk + // We'll do the final check after write barriers are + // inserted. + if base.Flag.CompilingRuntime { + ssagen.EnableNoWriteBarrierRecCheck() + } + + ir.CurFunc = nil + + reflectdata.WriteBasicTypes() + + // Compile top-level declarations. + // + // There are cyclic dependencies between all of these phases, so we + // need to iterate all of them until we reach a fixed point. + base.Timer.Start("be", "compilefuncs") + for nextFunc, nextExtern := 0, 0; ; { + reflectdata.WriteRuntimeTypes() + + if nextExtern < len(typecheck.Target.Externs) { + switch n := typecheck.Target.Externs[nextExtern]; n.Op() { + case ir.ONAME: + dumpGlobal(n) + case ir.OLITERAL: + dumpGlobalConst(n) + case ir.OTYPE: + reflectdata.NeedRuntimeType(n.Type()) + } + nextExtern++ + continue + } + + if nextFunc < len(typecheck.Target.Funcs) { + enqueueFunc(typecheck.Target.Funcs[nextFunc]) + nextFunc++ + continue + } + + // The SSA backend supports using multiple goroutines, so keep it + // as late as possible to maximize how much work we can batch and + // process concurrently. + if len(compilequeue) != 0 { + compileFunctions() + continue + } + + // Finalize DWARF inline routine DIEs, then explicitly turn off + // further DWARF inlining generation to avoid problems with + // generated method wrappers. + // + // Note: The DWARF fixup code for inlined calls currently doesn't + // allow multiple invocations, so we intentionally run it just + // once after everything else. Worst case, some generated + // functions have slightly larger DWARF DIEs. + if base.Ctxt.DwFixups != nil { + base.Ctxt.DwFixups.Finalize(base.Ctxt.Pkgpath, base.Debug.DwarfInl != 0) + base.Ctxt.DwFixups = nil + base.Flag.GenDwarfInl = 0 + continue // may have called reflectdata.TypeLinksym (#62156) + } + + break + } + + base.Timer.AddEvent(int64(len(typecheck.Target.Funcs)), "funcs") + + if base.Flag.CompilingRuntime { + // Write barriers are now known. Check the call graph. + ssagen.NoWriteBarrierRecCheck() + } + + // Add keep relocations for global maps. + if base.Debug.WrapGlobalMapCtl != 1 { + staticinit.AddKeepRelocations() + } + + // Write object data to disk. + base.Timer.Start("be", "dumpobj") + dumpdata() + base.Ctxt.NumberSyms() + dumpobj() + if base.Flag.AsmHdr != "" { + dumpasmhdr() + } + + ssagen.CheckLargeStacks() + typecheck.CheckFuncStack() + + if len(compilequeue) != 0 { + base.Fatalf("%d uncompiled functions", len(compilequeue)) + } + + logopt.FlushLoggedOpts(base.Ctxt, base.Ctxt.Pkgpath) + base.ExitIfErrors() + + base.FlushErrors() + base.Timer.Stop() + + if base.Flag.Bench != "" { + if err := writebench(base.Flag.Bench); err != nil { + log.Fatalf("cannot write benchmark data: %v", err) + } + } +} + +func writebench(filename string) error { + f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) + if err != nil { + return err + } + + var buf bytes.Buffer + fmt.Fprintln(&buf, "commit:", buildcfg.Version) + fmt.Fprintln(&buf, "goos:", runtime.GOOS) + fmt.Fprintln(&buf, "goarch:", runtime.GOARCH) + base.Timer.Write(&buf, "BenchmarkCompile:"+base.Ctxt.Pkgpath+":") + + n, err := f.Write(buf.Bytes()) + if err != nil { + return err + } + if n != buf.Len() { + panic("bad writer") + } + + return f.Close() +} + +func makePos(b *src.PosBase, line, col uint) src.XPos { + return base.Ctxt.PosTable.XPos(src.MakePos(b, line, col)) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/gc/obj.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/gc/obj.go new file mode 100644 index 0000000000000000000000000000000000000000..e090cafb610f4353d618e73637376c66d5fbbb43 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/gc/obj.go @@ -0,0 +1,284 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gc + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/noder" + "cmd/compile/internal/objw" + "cmd/compile/internal/pkginit" + "cmd/compile/internal/reflectdata" + "cmd/compile/internal/staticdata" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/archive" + "cmd/internal/bio" + "cmd/internal/obj" + "cmd/internal/objabi" + "encoding/json" + "fmt" + "strings" +) + +// These modes say which kind of object file to generate. +// The default use of the toolchain is to set both bits, +// generating a combined compiler+linker object, one that +// serves to describe the package to both the compiler and the linker. +// In fact the compiler and linker read nearly disjoint sections of +// that file, though, so in a distributed build setting it can be more +// efficient to split the output into two files, supplying the compiler +// object only to future compilations and the linker object only to +// future links. +// +// By default a combined object is written, but if -linkobj is specified +// on the command line then the default -o output is a compiler object +// and the -linkobj output is a linker object. +const ( + modeCompilerObj = 1 << iota + modeLinkerObj +) + +func dumpobj() { + if base.Flag.LinkObj == "" { + dumpobj1(base.Flag.LowerO, modeCompilerObj|modeLinkerObj) + return + } + dumpobj1(base.Flag.LowerO, modeCompilerObj) + dumpobj1(base.Flag.LinkObj, modeLinkerObj) +} + +func dumpobj1(outfile string, mode int) { + bout, err := bio.Create(outfile) + if err != nil { + base.FlushErrors() + fmt.Printf("can't create %s: %v\n", outfile, err) + base.ErrorExit() + } + defer bout.Close() + bout.WriteString("!\n") + + if mode&modeCompilerObj != 0 { + start := startArchiveEntry(bout) + dumpCompilerObj(bout) + finishArchiveEntry(bout, start, "__.PKGDEF") + } + if mode&modeLinkerObj != 0 { + start := startArchiveEntry(bout) + dumpLinkerObj(bout) + finishArchiveEntry(bout, start, "_go_.o") + } +} + +func printObjHeader(bout *bio.Writer) { + bout.WriteString(objabi.HeaderString()) + if base.Flag.BuildID != "" { + fmt.Fprintf(bout, "build id %q\n", base.Flag.BuildID) + } + if types.LocalPkg.Name == "main" { + fmt.Fprintf(bout, "main\n") + } + fmt.Fprintf(bout, "\n") // header ends with blank line +} + +func startArchiveEntry(bout *bio.Writer) int64 { + var arhdr [archive.HeaderSize]byte + bout.Write(arhdr[:]) + return bout.Offset() +} + +func finishArchiveEntry(bout *bio.Writer, start int64, name string) { + bout.Flush() + size := bout.Offset() - start + if size&1 != 0 { + bout.WriteByte(0) + } + bout.MustSeek(start-archive.HeaderSize, 0) + + var arhdr [archive.HeaderSize]byte + archive.FormatHeader(arhdr[:], name, size) + bout.Write(arhdr[:]) + bout.Flush() + bout.MustSeek(start+size+(size&1), 0) +} + +func dumpCompilerObj(bout *bio.Writer) { + printObjHeader(bout) + noder.WriteExports(bout) +} + +func dumpdata() { + reflectdata.WriteGCSymbols() + reflectdata.WritePluginTable() + dumpembeds() + + if reflectdata.ZeroSize > 0 { + zero := base.PkgLinksym("go:map", "zero", obj.ABI0) + objw.Global(zero, int32(reflectdata.ZeroSize), obj.DUPOK|obj.RODATA) + zero.Set(obj.AttrStatic, true) + } + + staticdata.WriteFuncSyms() + addGCLocals() +} + +func dumpLinkerObj(bout *bio.Writer) { + printObjHeader(bout) + + if len(typecheck.Target.CgoPragmas) != 0 { + // write empty export section; must be before cgo section + fmt.Fprintf(bout, "\n$$\n\n$$\n\n") + fmt.Fprintf(bout, "\n$$ // cgo\n") + if err := json.NewEncoder(bout).Encode(typecheck.Target.CgoPragmas); err != nil { + base.Fatalf("serializing pragcgobuf: %v", err) + } + fmt.Fprintf(bout, "\n$$\n\n") + } + + fmt.Fprintf(bout, "\n!\n") + + obj.WriteObjFile(base.Ctxt, bout) +} + +func dumpGlobal(n *ir.Name) { + if n.Type() == nil { + base.Fatalf("external %v nil type\n", n) + } + if n.Class == ir.PFUNC { + return + } + if n.Sym().Pkg != types.LocalPkg { + return + } + types.CalcSize(n.Type()) + ggloblnod(n) + if n.CoverageCounter() || n.CoverageAuxVar() || n.Linksym().Static() { + return + } + base.Ctxt.DwarfGlobal(types.TypeSymName(n.Type()), n.Linksym()) +} + +func dumpGlobalConst(n *ir.Name) { + // only export typed constants + t := n.Type() + if t == nil { + return + } + if n.Sym().Pkg != types.LocalPkg { + return + } + // only export integer constants for now + if !t.IsInteger() { + return + } + v := n.Val() + if t.IsUntyped() { + // Export untyped integers as int (if they fit). + t = types.Types[types.TINT] + if ir.ConstOverflow(v, t) { + return + } + } else { + // If the type of the constant is an instantiated generic, we need to emit + // that type so the linker knows about it. See issue 51245. + _ = reflectdata.TypeLinksym(t) + } + base.Ctxt.DwarfIntConst(n.Sym().Name, types.TypeSymName(t), ir.IntVal(t, v)) +} + +// addGCLocals adds gcargs, gclocals, gcregs, and stack object symbols to Ctxt.Data. +// +// This is done during the sequential phase after compilation, since +// global symbols can't be declared during parallel compilation. +func addGCLocals() { + for _, s := range base.Ctxt.Text { + fn := s.Func() + if fn == nil { + continue + } + for _, gcsym := range []*obj.LSym{fn.GCArgs, fn.GCLocals} { + if gcsym != nil && !gcsym.OnList() { + objw.Global(gcsym, int32(len(gcsym.P)), obj.RODATA|obj.DUPOK) + } + } + if x := fn.StackObjects; x != nil { + objw.Global(x, int32(len(x.P)), obj.RODATA) + x.Set(obj.AttrStatic, true) + } + if x := fn.OpenCodedDeferInfo; x != nil { + objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK) + } + if x := fn.ArgInfo; x != nil { + objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK) + x.Set(obj.AttrStatic, true) + } + if x := fn.ArgLiveInfo; x != nil { + objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK) + x.Set(obj.AttrStatic, true) + } + if x := fn.WrapInfo; x != nil && !x.OnList() { + objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK) + x.Set(obj.AttrStatic, true) + } + for _, jt := range fn.JumpTables { + objw.Global(jt.Sym, int32(len(jt.Targets)*base.Ctxt.Arch.PtrSize), obj.RODATA) + } + } +} + +func ggloblnod(nam *ir.Name) { + s := nam.Linksym() + + // main_inittask and runtime_inittask in package runtime (and in + // test/initempty.go) aren't real variable declarations, but + // linknamed variables pointing to the compiler's generated + // .inittask symbol. The real symbol was already written out in + // pkginit.Task, so we need to avoid writing them out a second time + // here, otherwise base.Ctxt.Globl will fail. + if strings.HasSuffix(s.Name, "..inittask") && s.OnList() { + return + } + + s.Gotype = reflectdata.TypeLinksym(nam.Type()) + flags := 0 + if nam.Readonly() { + flags = obj.RODATA + } + if nam.Type() != nil && !nam.Type().HasPointers() { + flags |= obj.NOPTR + } + size := nam.Type().Size() + linkname := nam.Sym().Linkname + name := nam.Sym().Name + + // We've skipped linkname'd globals's instrument, so we can skip them here as well. + if base.Flag.ASan && linkname == "" && pkginit.InstrumentGlobalsMap[name] != nil { + // Write the new size of instrumented global variables that have + // trailing redzones into object file. + rzSize := pkginit.GetRedzoneSizeForGlobal(size) + sizeWithRZ := rzSize + size + base.Ctxt.Globl(s, sizeWithRZ, flags) + } else { + base.Ctxt.Globl(s, size, flags) + } + if nam.Libfuzzer8BitCounter() { + s.Type = objabi.SLIBFUZZER_8BIT_COUNTER + } + if nam.CoverageCounter() { + s.Type = objabi.SCOVERAGE_COUNTER + } + if nam.Sym().Linkname != "" { + // Make sure linkname'd symbol is non-package. When a symbol is + // both imported and linkname'd, s.Pkg may not set to "_" in + // types.Sym.Linksym because LSym already exists. Set it here. + s.Pkg = "_" + } +} + +func dumpembeds() { + for _, v := range typecheck.Target.Embeds { + staticdata.WriteEmbed(v) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/gc/util.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/gc/util.go new file mode 100644 index 0000000000000000000000000000000000000000..b82a983d9feaf211bbaa2073bc38177a79d2f44c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/gc/util.go @@ -0,0 +1,117 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gc + +import ( + "net/url" + "os" + "path/filepath" + "runtime" + "runtime/pprof" + tracepkg "runtime/trace" + "strings" + + "cmd/compile/internal/base" +) + +func profileName(fn, suffix string) string { + if strings.HasSuffix(fn, string(os.PathSeparator)) { + err := os.MkdirAll(fn, 0755) + if err != nil { + base.Fatalf("%v", err) + } + } + if fi, statErr := os.Stat(fn); statErr == nil && fi.IsDir() { + fn = filepath.Join(fn, url.PathEscape(base.Ctxt.Pkgpath)+suffix) + } + return fn +} + +func startProfile() { + if base.Flag.CPUProfile != "" { + fn := profileName(base.Flag.CPUProfile, ".cpuprof") + f, err := os.Create(fn) + if err != nil { + base.Fatalf("%v", err) + } + if err := pprof.StartCPUProfile(f); err != nil { + base.Fatalf("%v", err) + } + base.AtExit(pprof.StopCPUProfile) + } + if base.Flag.MemProfile != "" { + if base.Flag.MemProfileRate != 0 { + runtime.MemProfileRate = base.Flag.MemProfileRate + } + const ( + gzipFormat = 0 + textFormat = 1 + ) + // compilebench parses the memory profile to extract memstats, + // which are only written in the legacy (text) pprof format. + // See golang.org/issue/18641 and runtime/pprof/pprof.go:writeHeap. + // gzipFormat is what most people want, otherwise + var format = textFormat + fn := base.Flag.MemProfile + if strings.HasSuffix(fn, string(os.PathSeparator)) { + err := os.MkdirAll(fn, 0755) + if err != nil { + base.Fatalf("%v", err) + } + } + if fi, statErr := os.Stat(fn); statErr == nil && fi.IsDir() { + fn = filepath.Join(fn, url.PathEscape(base.Ctxt.Pkgpath)+".memprof") + format = gzipFormat + } + + f, err := os.Create(fn) + + if err != nil { + base.Fatalf("%v", err) + } + base.AtExit(func() { + // Profile all outstanding allocations. + runtime.GC() + if err := pprof.Lookup("heap").WriteTo(f, format); err != nil { + base.Fatalf("%v", err) + } + }) + } else { + // Not doing memory profiling; disable it entirely. + runtime.MemProfileRate = 0 + } + if base.Flag.BlockProfile != "" { + f, err := os.Create(profileName(base.Flag.BlockProfile, ".blockprof")) + if err != nil { + base.Fatalf("%v", err) + } + runtime.SetBlockProfileRate(1) + base.AtExit(func() { + pprof.Lookup("block").WriteTo(f, 0) + f.Close() + }) + } + if base.Flag.MutexProfile != "" { + f, err := os.Create(profileName(base.Flag.MutexProfile, ".mutexprof")) + if err != nil { + base.Fatalf("%v", err) + } + runtime.SetMutexProfileFraction(1) + base.AtExit(func() { + pprof.Lookup("mutex").WriteTo(f, 0) + f.Close() + }) + } + if base.Flag.TraceProfile != "" { + f, err := os.Create(profileName(base.Flag.TraceProfile, ".trace")) + if err != nil { + base.Fatalf("%v", err) + } + if err := tracepkg.Start(f); err != nil { + base.Fatalf("%v", err) + } + base.AtExit(tracepkg.Stop) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/exportdata.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/exportdata.go new file mode 100644 index 0000000000000000000000000000000000000000..42fc5c9a573d04ce302cb7a6d9f71fdd13d86090 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/exportdata.go @@ -0,0 +1,95 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements FindExportData. + +package importer + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" +) + +func readGopackHeader(r *bufio.Reader) (name string, size int, err error) { + // See $GOROOT/include/ar.h. + hdr := make([]byte, 16+12+6+6+8+10+2) + _, err = io.ReadFull(r, hdr) + if err != nil { + return + } + // leave for debugging + if false { + fmt.Printf("header: %s", hdr) + } + s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10])) + size, err = strconv.Atoi(s) + if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' { + err = fmt.Errorf("invalid archive header") + return + } + name = strings.TrimSpace(string(hdr[:16])) + return +} + +// FindExportData positions the reader r at the beginning of the +// export data section of an underlying GC-created object/archive +// file by reading from it. The reader must be positioned at the +// start of the file before calling this function. The hdr result +// is the string before the export data, either "$$" or "$$B". +// +// If size is non-negative, it's the number of bytes of export data +// still available to read from r. +func FindExportData(r *bufio.Reader) (hdr string, size int, err error) { + // Read first line to make sure this is an object file. + line, err := r.ReadSlice('\n') + if err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + + if string(line) == "!\n" { + // Archive file. Scan to __.PKGDEF. + var name string + if name, size, err = readGopackHeader(r); err != nil { + return + } + + // First entry should be __.PKGDEF. + if name != "__.PKGDEF" { + err = fmt.Errorf("go archive is missing __.PKGDEF") + return + } + + // Read first line of __.PKGDEF data, so that line + // is once again the first line of the input. + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + } + + // Now at __.PKGDEF in archive or still at beginning of file. + // Either way, line should begin with "go object ". + if !strings.HasPrefix(string(line), "go object ") { + err = fmt.Errorf("not a Go object file") + return + } + size -= len(line) + + // Skip over object header to export data. + // Begins after first line starting with $$. + for line[0] != '$' { + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + size -= len(line) + } + hdr = string(line) + + return +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/gcimporter.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/gcimporter.go new file mode 100644 index 0000000000000000000000000000000000000000..1f7b49c8c3d53547b300083a9403a6137b9a5cd8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/gcimporter.go @@ -0,0 +1,253 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// package importer implements Import for gc-generated object files. +package importer + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "go/build" + "internal/pkgbits" + "io" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + + "cmd/compile/internal/types2" +) + +var exportMap sync.Map // package dir → func() (string, error) + +// lookupGorootExport returns the location of the export data +// (normally found in the build cache, but located in GOROOT/pkg +// in prior Go releases) for the package located in pkgDir. +// +// (We use the package's directory instead of its import path +// mainly to simplify handling of the packages in src/vendor +// and cmd/vendor.) +func lookupGorootExport(pkgDir string) (string, error) { + f, ok := exportMap.Load(pkgDir) + if !ok { + var ( + listOnce sync.Once + exportPath string + err error + ) + f, _ = exportMap.LoadOrStore(pkgDir, func() (string, error) { + listOnce.Do(func() { + cmd := exec.Command(filepath.Join(build.Default.GOROOT, "bin", "go"), "list", "-export", "-f", "{{.Export}}", pkgDir) + cmd.Dir = build.Default.GOROOT + cmd.Env = append(os.Environ(), "PWD="+cmd.Dir, "GOROOT="+build.Default.GOROOT) + var output []byte + output, err = cmd.Output() + if err != nil { + if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 { + err = errors.New(string(ee.Stderr)) + } + return + } + + exports := strings.Split(string(bytes.TrimSpace(output)), "\n") + if len(exports) != 1 { + err = fmt.Errorf("go list reported %d exports; expected 1", len(exports)) + return + } + + exportPath = exports[0] + }) + + return exportPath, err + }) + } + + return f.(func() (string, error))() +} + +var pkgExts = [...]string{".a", ".o"} // a file from the build cache will have no extension + +// FindPkg returns the filename and unique package id for an import +// path based on package information provided by build.Import (using +// the build.Default build.Context). A relative srcDir is interpreted +// relative to the current working directory. +func FindPkg(path, srcDir string) (filename, id string, err error) { + if path == "" { + return "", "", errors.New("path is empty") + } + + var noext string + switch { + default: + // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" + // Don't require the source files to be present. + if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 + srcDir = abs + } + var bp *build.Package + bp, err = build.Import(path, srcDir, build.FindOnly|build.AllowBinary) + if bp.PkgObj == "" { + if bp.Goroot && bp.Dir != "" { + filename, err = lookupGorootExport(bp.Dir) + if err == nil { + _, err = os.Stat(filename) + } + if err == nil { + return filename, bp.ImportPath, nil + } + } + goto notfound + } else { + noext = strings.TrimSuffix(bp.PkgObj, ".a") + } + id = bp.ImportPath + + case build.IsLocalImport(path): + // "./x" -> "/this/directory/x.ext", "/this/directory/x" + noext = filepath.Join(srcDir, path) + id = noext + + case filepath.IsAbs(path): + // for completeness only - go/build.Import + // does not support absolute imports + // "/x" -> "/x.ext", "/x" + noext = path + id = path + } + + if false { // for debugging + if path != id { + fmt.Printf("%s -> %s\n", path, id) + } + } + + // try extensions + for _, ext := range pkgExts { + filename = noext + ext + f, statErr := os.Stat(filename) + if statErr == nil && !f.IsDir() { + return filename, id, nil + } + if err == nil { + err = statErr + } + } + +notfound: + if err == nil { + return "", path, fmt.Errorf("can't find import: %q", path) + } + return "", path, fmt.Errorf("can't find import: %q: %w", path, err) +} + +// Import imports a gc-generated package given its import path and srcDir, adds +// the corresponding package object to the packages map, and returns the object. +// The packages map must contain all packages already imported. +func Import(packages map[string]*types2.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types2.Package, err error) { + var rc io.ReadCloser + var id string + if lookup != nil { + // With custom lookup specified, assume that caller has + // converted path to a canonical import path for use in the map. + if path == "unsafe" { + return types2.Unsafe, nil + } + id = path + + // No need to re-import if the package was imported completely before. + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + f, err := lookup(path) + if err != nil { + return nil, err + } + rc = f + } else { + var filename string + filename, id, err = FindPkg(path, srcDir) + if filename == "" { + if path == "unsafe" { + return types2.Unsafe, nil + } + return nil, err + } + + // no need to re-import if the package was imported completely before + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + // add file name to error + err = fmt.Errorf("%s: %v", filename, err) + } + }() + rc = f + } + defer rc.Close() + + buf := bufio.NewReader(rc) + hdr, size, err := FindExportData(buf) + if err != nil { + return + } + + switch hdr { + case "$$\n": + err = fmt.Errorf("import %q: old textual export format no longer supported (recompile library)", path) + + case "$$B\n": + var data []byte + var r io.Reader = buf + if size >= 0 { + r = io.LimitReader(r, int64(size)) + } + data, err = io.ReadAll(r) + if err != nil { + break + } + + if len(data) == 0 { + err = fmt.Errorf("import %q: missing export data", path) + break + } + exportFormat := data[0] + s := string(data[1:]) + + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + switch exportFormat { + case 'u': + s = s[:strings.Index(s, "\n$$\n")] + input := pkgbits.NewPkgDecoder(id, s) + pkg = ReadPackage(nil, packages, input) + case 'i': + pkg, err = ImportData(packages, s, id) + default: + err = fmt.Errorf("import %q: old binary export format no longer supported (recompile library)", path) + } + + default: + err = fmt.Errorf("import %q: unknown export data header: %q", path, hdr) + } + + return +} + +type byPath []*types2.Package + +func (a byPath) Len() int { return len(a) } +func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/gcimporter_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/gcimporter_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7fe4445dad7638b8c06a1a63e1ac9241be80c1c2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/gcimporter_test.go @@ -0,0 +1,608 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package importer + +import ( + "bytes" + "cmd/compile/internal/types2" + "fmt" + "go/build" + "internal/testenv" + "os" + "os/exec" + "path" + "path/filepath" + "runtime" + "strings" + "testing" + "time" +) + +func TestMain(m *testing.M) { + build.Default.GOROOT = testenv.GOROOT(nil) + os.Exit(m.Run()) +} + +// compile runs the compiler on filename, with dirname as the working directory, +// and writes the output file to outdirname. +// compile gives the resulting package a packagepath of testdata/. +func compile(t *testing.T, dirname, filename, outdirname string, packagefiles map[string]string) string { + // filename must end with ".go" + basename, ok := strings.CutSuffix(filepath.Base(filename), ".go") + if !ok { + t.Helper() + t.Fatalf("filename doesn't end in .go: %s", filename) + } + objname := basename + ".o" + outname := filepath.Join(outdirname, objname) + pkgpath := path.Join("testdata", basename) + + importcfgfile := os.DevNull + if len(packagefiles) > 0 { + importcfgfile = filepath.Join(outdirname, basename) + ".importcfg" + importcfg := new(bytes.Buffer) + for k, v := range packagefiles { + fmt.Fprintf(importcfg, "packagefile %s=%s\n", k, v) + } + if err := os.WriteFile(importcfgfile, importcfg.Bytes(), 0655); err != nil { + t.Fatal(err) + } + } + + cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "compile", "-p", pkgpath, "-D", "testdata", "-importcfg", importcfgfile, "-o", outname, filename) + cmd.Dir = dirname + out, err := cmd.CombinedOutput() + if err != nil { + t.Helper() + t.Logf("%s", out) + t.Fatalf("go tool compile %s failed: %s", filename, err) + } + return outname +} + +func testPath(t *testing.T, path, srcDir string) *types2.Package { + t0 := time.Now() + pkg, err := Import(make(map[string]*types2.Package), path, srcDir, nil) + if err != nil { + t.Errorf("testPath(%s): %s", path, err) + return nil + } + t.Logf("testPath(%s): %v", path, time.Since(t0)) + return pkg +} + +func mktmpdir(t *testing.T) string { + tmpdir := t.TempDir() + if err := os.Mkdir(filepath.Join(tmpdir, "testdata"), 0700); err != nil { + t.Fatal("mktmpdir:", err) + } + return tmpdir +} + +func TestImportTestdata(t *testing.T) { + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + } + + testenv.MustHaveGoBuild(t) + + testfiles := map[string][]string{ + "exports.go": {"go/ast", "go/token"}, + "generics.go": nil, + } + if true /* was goexperiment.Unified */ { + // TODO(mdempsky): Fix test below to flatten the transitive + // Package.Imports graph. Unified IR is more precise about + // recreating the package import graph. + testfiles["exports.go"] = []string{"go/ast"} + } + + for testfile, wantImports := range testfiles { + tmpdir := mktmpdir(t) + + importMap := map[string]string{} + for _, pkg := range wantImports { + export, _, err := FindPkg(pkg, "testdata") + if export == "" { + t.Fatalf("no export data found for %s: %v", pkg, err) + } + importMap[pkg] = export + } + + compile(t, "testdata", testfile, filepath.Join(tmpdir, "testdata"), importMap) + path := "./testdata/" + strings.TrimSuffix(testfile, ".go") + + if pkg := testPath(t, path, tmpdir); pkg != nil { + // The package's Imports list must include all packages + // explicitly imported by testfile, plus all packages + // referenced indirectly via exported objects in testfile. + got := fmt.Sprint(pkg.Imports()) + for _, want := range wantImports { + if !strings.Contains(got, want) { + t.Errorf(`Package("exports").Imports() = %s, does not contain %s`, got, want) + } + } + } + } +} + +func TestVersionHandling(t *testing.T) { + testenv.MustHaveGoBuild(t) + + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + } + + const dir = "./testdata/versions" + list, err := os.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + + tmpdir := mktmpdir(t) + corruptdir := filepath.Join(tmpdir, "testdata", "versions") + if err := os.Mkdir(corruptdir, 0700); err != nil { + t.Fatal(err) + } + + for _, f := range list { + name := f.Name() + if !strings.HasSuffix(name, ".a") { + continue // not a package file + } + if strings.Contains(name, "corrupted") { + continue // don't process a leftover corrupted file + } + pkgpath := "./" + name[:len(name)-2] + + if testing.Verbose() { + t.Logf("importing %s", name) + } + + // test that export data can be imported + _, err := Import(make(map[string]*types2.Package), pkgpath, dir, nil) + if err != nil { + // ok to fail if it fails with a no longer supported error for select files + if strings.Contains(err.Error(), "no longer supported") { + switch name { + case "test_go1.7_0.a", "test_go1.7_1.a", + "test_go1.8_4.a", "test_go1.8_5.a", + "test_go1.11_6b.a", "test_go1.11_999b.a": + continue + } + // fall through + } + // ok to fail if it fails with a newer version error for select files + if strings.Contains(err.Error(), "newer version") { + switch name { + case "test_go1.11_999i.a": + continue + } + // fall through + } + t.Errorf("import %q failed: %v", pkgpath, err) + continue + } + + // create file with corrupted export data + // 1) read file + data, err := os.ReadFile(filepath.Join(dir, name)) + if err != nil { + t.Fatal(err) + } + // 2) find export data + i := bytes.Index(data, []byte("\n$$B\n")) + 5 + j := bytes.Index(data[i:], []byte("\n$$\n")) + i + if i < 0 || j < 0 || i > j { + t.Fatalf("export data section not found (i = %d, j = %d)", i, j) + } + // 3) corrupt the data (increment every 7th byte) + for k := j - 13; k >= i; k -= 7 { + data[k]++ + } + // 4) write the file + pkgpath += "_corrupted" + filename := filepath.Join(corruptdir, pkgpath) + ".a" + os.WriteFile(filename, data, 0666) + + // test that importing the corrupted file results in an error + _, err = Import(make(map[string]*types2.Package), pkgpath, corruptdir, nil) + if err == nil { + t.Errorf("import corrupted %q succeeded", pkgpath) + } else if msg := err.Error(); !strings.Contains(msg, "version skew") { + t.Errorf("import %q error incorrect (%s)", pkgpath, msg) + } + } +} + +func TestImportStdLib(t *testing.T) { + if testing.Short() { + t.Skip("the imports can be expensive, and this test is especially slow when the build cache is empty") + } + testenv.MustHaveGoBuild(t) + + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + } + + // Get list of packages in stdlib. Filter out test-only packages with {{if .GoFiles}} check. + var stderr bytes.Buffer + cmd := exec.Command("go", "list", "-f", "{{if .GoFiles}}{{.ImportPath}}{{end}}", "std") + cmd.Stderr = &stderr + out, err := cmd.Output() + if err != nil { + t.Fatalf("failed to run go list to determine stdlib packages: %v\nstderr:\n%v", err, stderr.String()) + } + pkgs := strings.Fields(string(out)) + + var nimports int + for _, pkg := range pkgs { + t.Run(pkg, func(t *testing.T) { + if testPath(t, pkg, filepath.Join(testenv.GOROOT(t), "src", path.Dir(pkg))) != nil { + nimports++ + } + }) + } + const minPkgs = 225 // 'GOOS=plan9 go1.18 list std | wc -l' reports 228; most other platforms have more. + if len(pkgs) < minPkgs { + t.Fatalf("too few packages (%d) were imported", nimports) + } + + t.Logf("tested %d imports", nimports) +} + +var importedObjectTests = []struct { + name string + want string +}{ + // non-interfaces + {"crypto.Hash", "type Hash uint"}, + {"go/ast.ObjKind", "type ObjKind int"}, + {"go/types.Qualifier", "type Qualifier func(*Package) string"}, + {"go/types.Comparable", "func Comparable(T Type) bool"}, + {"math.Pi", "const Pi untyped float"}, + {"math.Sin", "func Sin(x float64) float64"}, + {"go/ast.NotNilFilter", "func NotNilFilter(_ string, v reflect.Value) bool"}, + {"go/internal/gcimporter.FindPkg", "func FindPkg(path string, srcDir string) (filename string, id string, err error)"}, + + // interfaces + {"context.Context", "type Context interface{Deadline() (deadline time.Time, ok bool); Done() <-chan struct{}; Err() error; Value(key any) any}"}, + {"crypto.Decrypter", "type Decrypter interface{Decrypt(rand io.Reader, msg []byte, opts DecrypterOpts) (plaintext []byte, err error); Public() PublicKey}"}, + {"encoding.BinaryMarshaler", "type BinaryMarshaler interface{MarshalBinary() (data []byte, err error)}"}, + {"io.Reader", "type Reader interface{Read(p []byte) (n int, err error)}"}, + {"io.ReadWriter", "type ReadWriter interface{Reader; Writer}"}, + {"go/ast.Node", "type Node interface{End() go/token.Pos; Pos() go/token.Pos}"}, + {"go/types.Type", "type Type interface{String() string; Underlying() Type}"}, +} + +func TestImportedTypes(t *testing.T) { + testenv.MustHaveGoBuild(t) + + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + } + + for _, test := range importedObjectTests { + s := strings.Split(test.name, ".") + if len(s) != 2 { + t.Fatal("inconsistent test data") + } + importPath := s[0] + objName := s[1] + + pkg, err := Import(make(map[string]*types2.Package), importPath, ".", nil) + if err != nil { + t.Error(err) + continue + } + + obj := pkg.Scope().Lookup(objName) + if obj == nil { + t.Errorf("%s: object not found", test.name) + continue + } + + got := types2.ObjectString(obj, types2.RelativeTo(pkg)) + if got != test.want { + t.Errorf("%s: got %q; want %q", test.name, got, test.want) + } + + if named, _ := obj.Type().(*types2.Named); named != nil { + verifyInterfaceMethodRecvs(t, named, 0) + } + } +} + +// verifyInterfaceMethodRecvs verifies that method receiver types +// are named if the methods belong to a named interface type. +func verifyInterfaceMethodRecvs(t *testing.T, named *types2.Named, level int) { + // avoid endless recursion in case of an embedding bug that lead to a cycle + if level > 10 { + t.Errorf("%s: embeds itself", named) + return + } + + iface, _ := named.Underlying().(*types2.Interface) + if iface == nil { + return // not an interface + } + + // The unified IR importer always sets interface method receiver + // parameters to point to the Interface type, rather than the Named. + // See #49906. + // + // TODO(mdempsky): This is only true for the types2 importer. For + // the go/types importer, we duplicate the Interface and rewrite its + // receiver methods to match historical behavior. + var want types2.Type = named + if true /* was goexperiment.Unified */ { + want = iface + } + + // check explicitly declared methods + for i := 0; i < iface.NumExplicitMethods(); i++ { + m := iface.ExplicitMethod(i) + recv := m.Type().(*types2.Signature).Recv() + if recv == nil { + t.Errorf("%s: missing receiver type", m) + continue + } + if recv.Type() != want { + t.Errorf("%s: got recv type %s; want %s", m, recv.Type(), named) + } + } + + // check embedded interfaces (if they are named, too) + for i := 0; i < iface.NumEmbeddeds(); i++ { + // embedding of interfaces cannot have cycles; recursion will terminate + if etype, _ := iface.EmbeddedType(i).(*types2.Named); etype != nil { + verifyInterfaceMethodRecvs(t, etype, level+1) + } + } +} + +func TestIssue5815(t *testing.T) { + testenv.MustHaveGoBuild(t) + + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + } + + pkg := importPkg(t, "strings", ".") + + scope := pkg.Scope() + for _, name := range scope.Names() { + obj := scope.Lookup(name) + if obj.Pkg() == nil { + t.Errorf("no pkg for %s", obj) + } + if tname, _ := obj.(*types2.TypeName); tname != nil { + named := tname.Type().(*types2.Named) + for i := 0; i < named.NumMethods(); i++ { + m := named.Method(i) + if m.Pkg() == nil { + t.Errorf("no pkg for %s", m) + } + } + } + } +} + +// Smoke test to ensure that imported methods get the correct package. +func TestCorrectMethodPackage(t *testing.T) { + testenv.MustHaveGoBuild(t) + + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + } + + imports := make(map[string]*types2.Package) + _, err := Import(imports, "net/http", ".", nil) + if err != nil { + t.Fatal(err) + } + + mutex := imports["sync"].Scope().Lookup("Mutex").(*types2.TypeName).Type() + obj, _, _ := types2.LookupFieldOrMethod(types2.NewPointer(mutex), false, nil, "Lock") + lock := obj.(*types2.Func) + if got, want := lock.Pkg().Path(), "sync"; got != want { + t.Errorf("got package path %q; want %q", got, want) + } +} + +func TestIssue13566(t *testing.T) { + testenv.MustHaveGoBuild(t) + + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + } + + tmpdir := mktmpdir(t) + testoutdir := filepath.Join(tmpdir, "testdata") + + // b.go needs to be compiled from the output directory so that the compiler can + // find the compiled package a. We pass the full path to compile() so that we + // don't have to copy the file to that directory. + bpath, err := filepath.Abs(filepath.Join("testdata", "b.go")) + if err != nil { + t.Fatal(err) + } + + jsonExport, _, err := FindPkg("encoding/json", "testdata") + if jsonExport == "" { + t.Fatalf("no export data found for encoding/json: %v", err) + } + + compile(t, "testdata", "a.go", testoutdir, map[string]string{"encoding/json": jsonExport}) + compile(t, testoutdir, bpath, testoutdir, map[string]string{"testdata/a": filepath.Join(testoutdir, "a.o")}) + + // import must succeed (test for issue at hand) + pkg := importPkg(t, "./testdata/b", tmpdir) + + // make sure all indirectly imported packages have names + for _, imp := range pkg.Imports() { + if imp.Name() == "" { + t.Errorf("no name for %s package", imp.Path()) + } + } +} + +func TestIssue13898(t *testing.T) { + testenv.MustHaveGoBuild(t) + + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + } + + // import go/internal/gcimporter which imports go/types partially + imports := make(map[string]*types2.Package) + _, err := Import(imports, "go/internal/gcimporter", ".", nil) + if err != nil { + t.Fatal(err) + } + + // look for go/types package + var goTypesPkg *types2.Package + for path, pkg := range imports { + if path == "go/types" { + goTypesPkg = pkg + break + } + } + if goTypesPkg == nil { + t.Fatal("go/types not found") + } + + // look for go/types.Object type + obj := lookupObj(t, goTypesPkg.Scope(), "Object") + typ, ok := obj.Type().(*types2.Named) + if !ok { + t.Fatalf("go/types.Object type is %v; wanted named type", typ) + } + + // lookup go/types.Object.Pkg method + m, index, indirect := types2.LookupFieldOrMethod(typ, false, nil, "Pkg") + if m == nil { + t.Fatalf("go/types.Object.Pkg not found (index = %v, indirect = %v)", index, indirect) + } + + // the method must belong to go/types + if m.Pkg().Path() != "go/types" { + t.Fatalf("found %v; want go/types", m.Pkg()) + } +} + +func TestIssue15517(t *testing.T) { + testenv.MustHaveGoBuild(t) + + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + } + + tmpdir := mktmpdir(t) + + compile(t, "testdata", "p.go", filepath.Join(tmpdir, "testdata"), nil) + + // Multiple imports of p must succeed without redeclaration errors. + // We use an import path that's not cleaned up so that the eventual + // file path for the package is different from the package path; this + // will expose the error if it is present. + // + // (Issue: Both the textual and the binary importer used the file path + // of the package to be imported as key into the shared packages map. + // However, the binary importer then used the package path to identify + // the imported package to mark it as complete; effectively marking the + // wrong package as complete. By using an "unclean" package path, the + // file and package path are different, exposing the problem if present. + // The same issue occurs with vendoring.) + imports := make(map[string]*types2.Package) + for i := 0; i < 3; i++ { + if _, err := Import(imports, "./././testdata/p", tmpdir, nil); err != nil { + t.Fatal(err) + } + } +} + +func TestIssue15920(t *testing.T) { + testenv.MustHaveGoBuild(t) + + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + } + + compileAndImportPkg(t, "issue15920") +} + +func TestIssue20046(t *testing.T) { + testenv.MustHaveGoBuild(t) + + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + } + + // "./issue20046".V.M must exist + pkg := compileAndImportPkg(t, "issue20046") + obj := lookupObj(t, pkg.Scope(), "V") + if m, index, indirect := types2.LookupFieldOrMethod(obj.Type(), false, nil, "M"); m == nil { + t.Fatalf("V.M not found (index = %v, indirect = %v)", index, indirect) + } +} +func TestIssue25301(t *testing.T) { + testenv.MustHaveGoBuild(t) + + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + } + + compileAndImportPkg(t, "issue25301") +} + +func TestIssue25596(t *testing.T) { + testenv.MustHaveGoBuild(t) + + // This package only handles gc export data. + if runtime.Compiler != "gc" { + t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) + } + + compileAndImportPkg(t, "issue25596") +} + +func importPkg(t *testing.T, path, srcDir string) *types2.Package { + pkg, err := Import(make(map[string]*types2.Package), path, srcDir, nil) + if err != nil { + t.Helper() + t.Fatal(err) + } + return pkg +} + +func compileAndImportPkg(t *testing.T, name string) *types2.Package { + t.Helper() + tmpdir := mktmpdir(t) + compile(t, "testdata", name+".go", filepath.Join(tmpdir, "testdata"), nil) + return importPkg(t, "./testdata/"+name, tmpdir) +} + +func lookupObj(t *testing.T, scope *types2.Scope, name string) types2.Object { + if obj := scope.Lookup(name); obj != nil { + return obj + } + t.Helper() + t.Fatalf("%s not found", name) + return nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/iimport.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/iimport.go new file mode 100644 index 0000000000000000000000000000000000000000..498134755d9ab6c1deff53fd3d71d770ed5969af --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/iimport.go @@ -0,0 +1,793 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed package import. +// See cmd/compile/internal/typecheck/iexport.go for the export data format. + +package importer + +import ( + "cmd/compile/internal/syntax" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types2" + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "io" + "math/big" + "sort" + "strings" +) + +type intReader struct { + *strings.Reader + path string +} + +func (r *intReader) int64() int64 { + i, err := binary.ReadVarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +func (r *intReader) uint64() uint64 { + i, err := binary.ReadUvarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +// Keep this in sync with constants in iexport.go. +const ( + iexportVersionGo1_11 = 0 + iexportVersionPosCol = 1 + iexportVersionGenerics = 2 + iexportVersionGo1_18 = 2 + + iexportVersionCurrent = 2 +) + +type ident struct { + pkg *types2.Package + name string +} + +const predeclReserved = 32 + +type itag uint64 + +const ( + // Types + definedType itag = iota + pointerType + sliceType + arrayType + chanType + mapType + signatureType + structType + interfaceType + typeParamType + instanceType + unionType +) + +// ImportData imports a package from the serialized package data +// and returns the number of bytes consumed and a reference to the package. +// If the export data version is not recognized or the format is otherwise +// compromised, an error is returned. +func ImportData(imports map[string]*types2.Package, data, path string) (pkg *types2.Package, err error) { + const currentVersion = iexportVersionCurrent + version := int64(-1) + defer func() { + if e := recover(); e != nil { + if version > currentVersion { + err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) + } else { + err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + } + } + }() + + r := &intReader{strings.NewReader(data), path} + + version = int64(r.uint64()) + switch version { + case iexportVersionGo1_18, iexportVersionPosCol, iexportVersionGo1_11: + default: + errorf("unknown iexport format version %d", version) + } + + sLen := int64(r.uint64()) + dLen := int64(r.uint64()) + + whence, _ := r.Seek(0, io.SeekCurrent) + stringData := data[whence : whence+sLen] + declData := data[whence+sLen : whence+sLen+dLen] + r.Seek(sLen+dLen, io.SeekCurrent) + + p := iimporter{ + exportVersion: version, + ipath: path, + version: int(version), + + stringData: stringData, + pkgCache: make(map[uint64]*types2.Package), + posBaseCache: make(map[uint64]*syntax.PosBase), + + declData: declData, + pkgIndex: make(map[*types2.Package]map[string]uint64), + typCache: make(map[uint64]types2.Type), + // Separate map for typeparams, keyed by their package and unique + // name (name with subscript). + tparamIndex: make(map[ident]*types2.TypeParam), + } + + for i, pt := range predeclared { + p.typCache[uint64(i)] = pt + } + + pkgList := make([]*types2.Package, r.uint64()) + for i := range pkgList { + pkgPathOff := r.uint64() + pkgPath := p.stringAt(pkgPathOff) + pkgName := p.stringAt(r.uint64()) + _ = int(r.uint64()) // was package height, but not necessary anymore. + + if pkgPath == "" { + pkgPath = path + } + pkg := imports[pkgPath] + if pkg == nil { + pkg = types2.NewPackage(pkgPath, pkgName) + imports[pkgPath] = pkg + } else { + if pkg.Name() != pkgName { + errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path) + } + } + + p.pkgCache[pkgPathOff] = pkg + + nameIndex := make(map[string]uint64) + for nSyms := r.uint64(); nSyms > 0; nSyms-- { + name := p.stringAt(r.uint64()) + nameIndex[name] = r.uint64() + } + + p.pkgIndex[pkg] = nameIndex + pkgList[i] = pkg + } + + localpkg := pkgList[0] + + names := make([]string, 0, len(p.pkgIndex[localpkg])) + for name := range p.pkgIndex[localpkg] { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + p.doDecl(localpkg, name) + } + + // SetConstraint can't be called if the constraint type is not yet complete. + // When type params are created in the 'P' case of (*importReader).obj(), + // the associated constraint type may not be complete due to recursion. + // Therefore, we defer calling SetConstraint there, and call it here instead + // after all types are complete. + for _, d := range p.later { + d.t.SetConstraint(d.constraint) + } + // record all referenced packages as imports + list := append(([]*types2.Package)(nil), pkgList[1:]...) + sort.Sort(byPath(list)) + localpkg.SetImports(list) + + // package was imported completely and without errors + localpkg.MarkComplete() + + return localpkg, nil +} + +type setConstraintArgs struct { + t *types2.TypeParam + constraint types2.Type +} + +type iimporter struct { + exportVersion int64 + ipath string + version int + + stringData string + pkgCache map[uint64]*types2.Package + posBaseCache map[uint64]*syntax.PosBase + + declData string + pkgIndex map[*types2.Package]map[string]uint64 + typCache map[uint64]types2.Type + tparamIndex map[ident]*types2.TypeParam + + interfaceList []*types2.Interface + + // Arguments for calls to SetConstraint that are deferred due to recursive types + later []setConstraintArgs +} + +func (p *iimporter) doDecl(pkg *types2.Package, name string) { + // See if we've already imported this declaration. + if obj := pkg.Scope().Lookup(name); obj != nil { + return + } + + off, ok := p.pkgIndex[pkg][name] + if !ok { + errorf("%v.%v not in index", pkg, name) + } + + r := &importReader{p: p, currPkg: pkg} + r.declReader.Reset(p.declData[off:]) + + r.obj(name) +} + +func (p *iimporter) stringAt(off uint64) string { + var x [binary.MaxVarintLen64]byte + n := copy(x[:], p.stringData[off:]) + + slen, n := binary.Uvarint(x[:n]) + if n <= 0 { + errorf("varint failed") + } + spos := off + uint64(n) + return p.stringData[spos : spos+slen] +} + +func (p *iimporter) pkgAt(off uint64) *types2.Package { + if pkg, ok := p.pkgCache[off]; ok { + return pkg + } + path := p.stringAt(off) + errorf("missing package %q in %q", path, p.ipath) + return nil +} + +func (p *iimporter) posBaseAt(off uint64) *syntax.PosBase { + if posBase, ok := p.posBaseCache[off]; ok { + return posBase + } + filename := p.stringAt(off) + posBase := syntax.NewTrimmedFileBase(filename, true) + p.posBaseCache[off] = posBase + return posBase +} + +func (p *iimporter) typAt(off uint64, base *types2.Named) types2.Type { + if t, ok := p.typCache[off]; ok && canReuse(base, t) { + return t + } + + if off < predeclReserved { + errorf("predeclared type missing from cache: %v", off) + } + + r := &importReader{p: p} + r.declReader.Reset(p.declData[off-predeclReserved:]) + t := r.doType(base) + + if canReuse(base, t) { + p.typCache[off] = t + } + return t +} + +// canReuse reports whether the type rhs on the RHS of the declaration for def +// may be re-used. +// +// Specifically, if def is non-nil and rhs is an interface type with methods, it +// may not be re-used because we have a convention of setting the receiver type +// for interface methods to def. +func canReuse(def *types2.Named, rhs types2.Type) bool { + if def == nil { + return true + } + iface, _ := rhs.(*types2.Interface) + if iface == nil { + return true + } + // Don't use iface.Empty() here as iface may not be complete. + return iface.NumEmbeddeds() == 0 && iface.NumExplicitMethods() == 0 +} + +type importReader struct { + p *iimporter + declReader strings.Reader + currPkg *types2.Package + prevPosBase *syntax.PosBase + prevLine int64 + prevColumn int64 +} + +func (r *importReader) obj(name string) { + tag := r.byte() + pos := r.pos() + + switch tag { + case 'A': + typ := r.typ() + + r.declare(types2.NewTypeName(pos, r.currPkg, name, typ)) + + case 'C': + typ, val := r.value() + + r.declare(types2.NewConst(pos, r.currPkg, name, typ, val)) + + case 'F', 'G': + var tparams []*types2.TypeParam + if tag == 'G' { + tparams = r.tparamList() + } + sig := r.signature(nil, nil, tparams) + r.declare(types2.NewFunc(pos, r.currPkg, name, sig)) + + case 'T', 'U': + // Types can be recursive. We need to setup a stub + // declaration before recursing. + obj := types2.NewTypeName(pos, r.currPkg, name, nil) + named := types2.NewNamed(obj, nil, nil) + // Declare obj before calling r.tparamList, so the new type name is recognized + // if used in the constraint of one of its own typeparams (see #48280). + r.declare(obj) + if tag == 'U' { + tparams := r.tparamList() + named.SetTypeParams(tparams) + } + + underlying := r.p.typAt(r.uint64(), named).Underlying() + named.SetUnderlying(underlying) + + if !isInterface(underlying) { + for n := r.uint64(); n > 0; n-- { + mpos := r.pos() + mname := r.ident() + recv := r.param() + + // If the receiver has any targs, set those as the + // rparams of the method (since those are the + // typeparams being used in the method sig/body). + targs := baseType(recv.Type()).TypeArgs() + var rparams []*types2.TypeParam + if targs.Len() > 0 { + rparams = make([]*types2.TypeParam, targs.Len()) + for i := range rparams { + rparams[i], _ = targs.At(i).(*types2.TypeParam) + } + } + msig := r.signature(recv, rparams, nil) + + named.AddMethod(types2.NewFunc(mpos, r.currPkg, mname, msig)) + } + } + + case 'P': + // We need to "declare" a typeparam in order to have a name that + // can be referenced recursively (if needed) in the type param's + // bound. + if r.p.exportVersion < iexportVersionGenerics { + errorf("unexpected type param type") + } + name0 := typecheck.TparamName(name) + if name0 == "" { + errorf("malformed type parameter export name %s: missing prefix", name) + } + + tn := types2.NewTypeName(pos, r.currPkg, name0, nil) + t := types2.NewTypeParam(tn, nil) + // To handle recursive references to the typeparam within its + // bound, save the partial type in tparamIndex before reading the bounds. + id := ident{r.currPkg, name} + r.p.tparamIndex[id] = t + + var implicit bool + if r.p.exportVersion >= iexportVersionGo1_18 { + implicit = r.bool() + } + constraint := r.typ() + if implicit { + iface, _ := constraint.(*types2.Interface) + if iface == nil { + errorf("non-interface constraint marked implicit") + } + iface.MarkImplicit() + } + // The constraint type may not be complete, if we + // are in the middle of a type recursion involving type + // constraints. So, we defer SetConstraint until we have + // completely set up all types in ImportData. + r.p.later = append(r.p.later, setConstraintArgs{t: t, constraint: constraint}) + + case 'V': + typ := r.typ() + + r.declare(types2.NewVar(pos, r.currPkg, name, typ)) + + default: + errorf("unexpected tag: %v", tag) + } +} + +func (r *importReader) declare(obj types2.Object) { + obj.Pkg().Scope().Insert(obj) +} + +func (r *importReader) value() (typ types2.Type, val constant.Value) { + typ = r.typ() + if r.p.exportVersion >= iexportVersionGo1_18 { + // TODO: add support for using the kind + _ = constant.Kind(r.int64()) + } + + switch b := typ.Underlying().(*types2.Basic); b.Info() & types2.IsConstType { + case types2.IsBoolean: + val = constant.MakeBool(r.bool()) + + case types2.IsString: + val = constant.MakeString(r.string()) + + case types2.IsInteger: + var x big.Int + r.mpint(&x, b) + val = constant.Make(&x) + + case types2.IsFloat: + val = r.mpfloat(b) + + case types2.IsComplex: + re := r.mpfloat(b) + im := r.mpfloat(b) + val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + + default: + errorf("unexpected type %v", typ) // panics + panic("unreachable") + } + + return +} + +func intSize(b *types2.Basic) (signed bool, maxBytes uint) { + if (b.Info() & types2.IsUntyped) != 0 { + return true, 64 + } + + switch b.Kind() { + case types2.Float32, types2.Complex64: + return true, 3 + case types2.Float64, types2.Complex128: + return true, 7 + } + + signed = (b.Info() & types2.IsUnsigned) == 0 + switch b.Kind() { + case types2.Int8, types2.Uint8: + maxBytes = 1 + case types2.Int16, types2.Uint16: + maxBytes = 2 + case types2.Int32, types2.Uint32: + maxBytes = 4 + default: + maxBytes = 8 + } + + return +} + +func (r *importReader) mpint(x *big.Int, typ *types2.Basic) { + signed, maxBytes := intSize(typ) + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + n, _ := r.declReader.ReadByte() + if uint(n) < maxSmall { + v := int64(n) + if signed { + v >>= 1 + if n&1 != 0 { + v = ^v + } + } + x.SetInt64(v) + return + } + + v := -n + if signed { + v = -(n &^ 1) >> 1 + } + if v < 1 || uint(v) > maxBytes { + errorf("weird decoding: %v, %v => %v", n, signed, v) + } + b := make([]byte, v) + io.ReadFull(&r.declReader, b) + x.SetBytes(b) + if signed && n&1 != 0 { + x.Neg(x) + } +} + +func (r *importReader) mpfloat(typ *types2.Basic) constant.Value { + var mant big.Int + r.mpint(&mant, typ) + var f big.Float + f.SetInt(&mant) + if f.Sign() != 0 { + f.SetMantExp(&f, int(r.int64())) + } + return constant.Make(&f) +} + +func (r *importReader) ident() string { + return r.string() +} + +func (r *importReader) qualifiedIdent() (*types2.Package, string) { + name := r.string() + pkg := r.pkg() + return pkg, name +} + +func (r *importReader) pos() syntax.Pos { + if r.p.version >= 1 { + r.posv1() + } else { + r.posv0() + } + + if (r.prevPosBase == nil || r.prevPosBase.Filename() == "") && r.prevLine == 0 && r.prevColumn == 0 { + return syntax.Pos{} + } + + return syntax.MakePos(r.prevPosBase, uint(r.prevLine), uint(r.prevColumn)) +} + +func (r *importReader) posv0() { + delta := r.int64() + if delta != deltaNewFile { + r.prevLine += delta + } else if l := r.int64(); l == -1 { + r.prevLine += deltaNewFile + } else { + r.prevPosBase = r.posBase() + r.prevLine = l + } +} + +func (r *importReader) posv1() { + delta := r.int64() + r.prevColumn += delta >> 1 + if delta&1 != 0 { + delta = r.int64() + r.prevLine += delta >> 1 + if delta&1 != 0 { + r.prevPosBase = r.posBase() + } + } +} + +func (r *importReader) typ() types2.Type { + return r.p.typAt(r.uint64(), nil) +} + +func isInterface(t types2.Type) bool { + _, ok := t.(*types2.Interface) + return ok +} + +func (r *importReader) pkg() *types2.Package { return r.p.pkgAt(r.uint64()) } +func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } +func (r *importReader) posBase() *syntax.PosBase { return r.p.posBaseAt(r.uint64()) } + +func (r *importReader) doType(base *types2.Named) types2.Type { + switch k := r.kind(); k { + default: + errorf("unexpected kind tag in %q: %v", r.p.ipath, k) + return nil + + case definedType: + pkg, name := r.qualifiedIdent() + r.p.doDecl(pkg, name) + return pkg.Scope().Lookup(name).(*types2.TypeName).Type() + case pointerType: + return types2.NewPointer(r.typ()) + case sliceType: + return types2.NewSlice(r.typ()) + case arrayType: + n := r.uint64() + return types2.NewArray(r.typ(), int64(n)) + case chanType: + dir := chanDir(int(r.uint64())) + return types2.NewChan(dir, r.typ()) + case mapType: + return types2.NewMap(r.typ(), r.typ()) + case signatureType: + r.currPkg = r.pkg() + return r.signature(nil, nil, nil) + + case structType: + r.currPkg = r.pkg() + + fields := make([]*types2.Var, r.uint64()) + tags := make([]string, len(fields)) + for i := range fields { + fpos := r.pos() + fname := r.ident() + ftyp := r.typ() + emb := r.bool() + tag := r.string() + + fields[i] = types2.NewField(fpos, r.currPkg, fname, ftyp, emb) + tags[i] = tag + } + return types2.NewStruct(fields, tags) + + case interfaceType: + r.currPkg = r.pkg() + + embeddeds := make([]types2.Type, r.uint64()) + for i := range embeddeds { + _ = r.pos() + embeddeds[i] = r.typ() + } + + methods := make([]*types2.Func, r.uint64()) + for i := range methods { + mpos := r.pos() + mname := r.ident() + + // TODO(mdempsky): Matches bimport.go, but I + // don't agree with this. + var recv *types2.Var + if base != nil { + recv = types2.NewVar(syntax.Pos{}, r.currPkg, "", base) + } + + msig := r.signature(recv, nil, nil) + methods[i] = types2.NewFunc(mpos, r.currPkg, mname, msig) + } + + typ := types2.NewInterfaceType(methods, embeddeds) + r.p.interfaceList = append(r.p.interfaceList, typ) + return typ + + case typeParamType: + if r.p.exportVersion < iexportVersionGenerics { + errorf("unexpected type param type") + } + pkg, name := r.qualifiedIdent() + id := ident{pkg, name} + if t, ok := r.p.tparamIndex[id]; ok { + // We're already in the process of importing this typeparam. + return t + } + // Otherwise, import the definition of the typeparam now. + r.p.doDecl(pkg, name) + return r.p.tparamIndex[id] + + case instanceType: + if r.p.exportVersion < iexportVersionGenerics { + errorf("unexpected instantiation type") + } + // pos does not matter for instances: they are positioned on the original + // type. + _ = r.pos() + len := r.uint64() + targs := make([]types2.Type, len) + for i := range targs { + targs[i] = r.typ() + } + baseType := r.typ() + // The imported instantiated type doesn't include any methods, so + // we must always use the methods of the base (orig) type. + // TODO provide a non-nil *Context + t, _ := types2.Instantiate(nil, baseType, targs, false) + return t + + case unionType: + if r.p.exportVersion < iexportVersionGenerics { + errorf("unexpected instantiation type") + } + terms := make([]*types2.Term, r.uint64()) + for i := range terms { + terms[i] = types2.NewTerm(r.bool(), r.typ()) + } + return types2.NewUnion(terms) + } +} + +func (r *importReader) kind() itag { + return itag(r.uint64()) +} + +func (r *importReader) signature(recv *types2.Var, rparams, tparams []*types2.TypeParam) *types2.Signature { + params := r.paramList() + results := r.paramList() + variadic := params.Len() > 0 && r.bool() + return types2.NewSignatureType(recv, rparams, tparams, params, results, variadic) +} + +func (r *importReader) tparamList() []*types2.TypeParam { + n := r.uint64() + if n == 0 { + return nil + } + xs := make([]*types2.TypeParam, n) + for i := range xs { + xs[i] = r.typ().(*types2.TypeParam) + } + return xs +} + +func (r *importReader) paramList() *types2.Tuple { + xs := make([]*types2.Var, r.uint64()) + for i := range xs { + xs[i] = r.param() + } + return types2.NewTuple(xs...) +} + +func (r *importReader) param() *types2.Var { + pos := r.pos() + name := r.ident() + typ := r.typ() + return types2.NewParam(pos, r.currPkg, name, typ) +} + +func (r *importReader) bool() bool { + return r.uint64() != 0 +} + +func (r *importReader) int64() int64 { + n, err := binary.ReadVarint(&r.declReader) + if err != nil { + errorf("readVarint: %v", err) + } + return n +} + +func (r *importReader) uint64() uint64 { + n, err := binary.ReadUvarint(&r.declReader) + if err != nil { + errorf("readUvarint: %v", err) + } + return n +} + +func (r *importReader) byte() byte { + x, err := r.declReader.ReadByte() + if err != nil { + errorf("declReader.ReadByte: %v", err) + } + return x +} + +func baseType(typ types2.Type) *types2.Named { + // pointer receivers are never types2.Named types + if p, _ := typ.(*types2.Pointer); p != nil { + typ = p.Elem() + } + // receiver base types are always (possibly generic) types2.Named types + n, _ := typ.(*types2.Named) + return n +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/support.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/support.go new file mode 100644 index 0000000000000000000000000000000000000000..5810f5e172b8139767c27c67ffd5be4c5ba9306d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/support.go @@ -0,0 +1,152 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements support functionality for iimport.go. + +package importer + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/types2" + "fmt" + "go/token" + "internal/pkgbits" + "sync" +) + +func assert(p bool) { + base.Assert(p) +} + +func errorf(format string, args ...interface{}) { + panic(fmt.Sprintf(format, args...)) +} + +const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go + +// Synthesize a token.Pos +type fakeFileSet struct { + fset *token.FileSet + files map[string]*token.File +} + +func (s *fakeFileSet) pos(file string, line, column int) token.Pos { + // TODO(mdempsky): Make use of column. + + // Since we don't know the set of needed file positions, we + // reserve maxlines positions per file. + const maxlines = 64 * 1024 + f := s.files[file] + if f == nil { + f = s.fset.AddFile(file, -1, maxlines) + s.files[file] = f + // Allocate the fake linebreak indices on first use. + // TODO(adonovan): opt: save ~512KB using a more complex scheme? + fakeLinesOnce.Do(func() { + fakeLines = make([]int, maxlines) + for i := range fakeLines { + fakeLines[i] = i + } + }) + f.SetLines(fakeLines) + } + + if line > maxlines { + line = 1 + } + + // Treat the file as if it contained only newlines + // and column=1: use the line number as the offset. + return f.Pos(line - 1) +} + +var ( + fakeLines []int + fakeLinesOnce sync.Once +) + +func chanDir(d int) types2.ChanDir { + // tag values must match the constants in cmd/compile/internal/gc/go.go + switch d { + case 1 /* Crecv */ : + return types2.RecvOnly + case 2 /* Csend */ : + return types2.SendOnly + case 3 /* Cboth */ : + return types2.SendRecv + default: + errorf("unexpected channel dir %d", d) + return 0 + } +} + +var predeclared = []types2.Type{ + // basic types + types2.Typ[types2.Bool], + types2.Typ[types2.Int], + types2.Typ[types2.Int8], + types2.Typ[types2.Int16], + types2.Typ[types2.Int32], + types2.Typ[types2.Int64], + types2.Typ[types2.Uint], + types2.Typ[types2.Uint8], + types2.Typ[types2.Uint16], + types2.Typ[types2.Uint32], + types2.Typ[types2.Uint64], + types2.Typ[types2.Uintptr], + types2.Typ[types2.Float32], + types2.Typ[types2.Float64], + types2.Typ[types2.Complex64], + types2.Typ[types2.Complex128], + types2.Typ[types2.String], + + // basic type aliases + types2.Universe.Lookup("byte").Type(), + types2.Universe.Lookup("rune").Type(), + + // error + types2.Universe.Lookup("error").Type(), + + // untyped types + types2.Typ[types2.UntypedBool], + types2.Typ[types2.UntypedInt], + types2.Typ[types2.UntypedRune], + types2.Typ[types2.UntypedFloat], + types2.Typ[types2.UntypedComplex], + types2.Typ[types2.UntypedString], + types2.Typ[types2.UntypedNil], + + // package unsafe + types2.Typ[types2.UnsafePointer], + + // invalid type + types2.Typ[types2.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + // not to be confused with the universe any + anyType{}, + + // comparable + types2.Universe.Lookup("comparable").Type(), + + // any + types2.Universe.Lookup("any").Type(), +} + +type anyType struct{} + +func (t anyType) Underlying() types2.Type { return t } +func (t anyType) String() string { return "any" } + +// See cmd/compile/internal/noder.derivedInfo. +type derivedInfo struct { + idx pkgbits.Index + needed bool +} + +// See cmd/compile/internal/noder.typeInfo. +type typeInfo struct { + idx pkgbits.Index + derived bool +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/ureader.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/ureader.go new file mode 100644 index 0000000000000000000000000000000000000000..f5c2f41069de8b40086ceadba60eae624814f494 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/ureader.go @@ -0,0 +1,535 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package importer + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/syntax" + "cmd/compile/internal/types2" + "cmd/internal/src" + "internal/pkgbits" +) + +type pkgReader struct { + pkgbits.PkgDecoder + + ctxt *types2.Context + imports map[string]*types2.Package + + posBases []*syntax.PosBase + pkgs []*types2.Package + typs []types2.Type +} + +func ReadPackage(ctxt *types2.Context, imports map[string]*types2.Package, input pkgbits.PkgDecoder) *types2.Package { + pr := pkgReader{ + PkgDecoder: input, + + ctxt: ctxt, + imports: imports, + + posBases: make([]*syntax.PosBase, input.NumElems(pkgbits.RelocPosBase)), + pkgs: make([]*types2.Package, input.NumElems(pkgbits.RelocPkg)), + typs: make([]types2.Type, input.NumElems(pkgbits.RelocType)), + } + + r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) + pkg := r.pkg() + r.Bool() // TODO(mdempsky): Remove; was "has init" + + for i, n := 0, r.Len(); i < n; i++ { + // As if r.obj(), but avoiding the Scope.Lookup call, + // to avoid eager loading of imports. + r.Sync(pkgbits.SyncObject) + assert(!r.Bool()) + r.p.objIdx(r.Reloc(pkgbits.RelocObj)) + assert(r.Len() == 0) + } + + r.Sync(pkgbits.SyncEOF) + + pkg.MarkComplete() + return pkg +} + +type reader struct { + pkgbits.Decoder + + p *pkgReader + + dict *readerDict +} + +type readerDict struct { + bounds []typeInfo + + tparams []*types2.TypeParam + + derived []derivedInfo + derivedTypes []types2.Type +} + +type readerTypeBound struct { + derived bool + boundIdx int +} + +func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { + return &reader{ + Decoder: pr.NewDecoder(k, idx, marker), + p: pr, + } +} + +func (pr *pkgReader) tempReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { + return &reader{ + Decoder: pr.TempDecoder(k, idx, marker), + p: pr, + } +} + +func (pr *pkgReader) retireReader(r *reader) { + pr.RetireDecoder(&r.Decoder) +} + +// @@@ Positions + +func (r *reader) pos() syntax.Pos { + r.Sync(pkgbits.SyncPos) + if !r.Bool() { + return syntax.Pos{} + } + + // TODO(mdempsky): Delta encoding. + posBase := r.posBase() + line := r.Uint() + col := r.Uint() + return syntax.MakePos(posBase, line, col) +} + +func (r *reader) posBase() *syntax.PosBase { + return r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase)) +} + +func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) *syntax.PosBase { + if b := pr.posBases[idx]; b != nil { + return b + } + var b *syntax.PosBase + { + r := pr.tempReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase) + + filename := r.String() + + if r.Bool() { + b = syntax.NewTrimmedFileBase(filename, true) + } else { + pos := r.pos() + line := r.Uint() + col := r.Uint() + b = syntax.NewLineBase(pos, filename, true, line, col) + } + pr.retireReader(r) + } + + pr.posBases[idx] = b + return b +} + +// @@@ Packages + +func (r *reader) pkg() *types2.Package { + r.Sync(pkgbits.SyncPkg) + return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg)) +} + +func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types2.Package { + // TODO(mdempsky): Consider using some non-nil pointer to indicate + // the universe scope, so we don't need to keep re-reading it. + if pkg := pr.pkgs[idx]; pkg != nil { + return pkg + } + + pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg() + pr.pkgs[idx] = pkg + return pkg +} + +func (r *reader) doPkg() *types2.Package { + path := r.String() + switch path { + case "": + path = r.p.PkgPath() + case "builtin": + return nil // universe + case "unsafe": + return types2.Unsafe + } + + if pkg := r.p.imports[path]; pkg != nil { + return pkg + } + + name := r.String() + pkg := types2.NewPackage(path, name) + r.p.imports[path] = pkg + + // TODO(mdempsky): The list of imported packages is important for + // go/types, but we could probably skip populating it for types2. + imports := make([]*types2.Package, r.Len()) + for i := range imports { + imports[i] = r.pkg() + } + pkg.SetImports(imports) + + return pkg +} + +// @@@ Types + +func (r *reader) typ() types2.Type { + return r.p.typIdx(r.typInfo(), r.dict) +} + +func (r *reader) typInfo() typeInfo { + r.Sync(pkgbits.SyncType) + if r.Bool() { + return typeInfo{idx: pkgbits.Index(r.Len()), derived: true} + } + return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false} +} + +func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types2.Type { + idx := info.idx + var where *types2.Type + if info.derived { + where = &dict.derivedTypes[idx] + idx = dict.derived[idx].idx + } else { + where = &pr.typs[idx] + } + + if typ := *where; typ != nil { + return typ + } + + var typ types2.Type + { + r := pr.tempReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx) + r.dict = dict + + typ = r.doTyp() + assert(typ != nil) + pr.retireReader(r) + } + + // See comment in pkgReader.typIdx explaining how this happens. + if prev := *where; prev != nil { + return prev + } + + *where = typ + return typ +} + +func (r *reader) doTyp() (res types2.Type) { + switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag { + default: + base.FatalfAt(src.NoXPos, "unhandled type tag: %v", tag) + panic("unreachable") + + case pkgbits.TypeBasic: + return types2.Typ[r.Len()] + + case pkgbits.TypeNamed: + obj, targs := r.obj() + name := obj.(*types2.TypeName) + if len(targs) != 0 { + t, _ := types2.Instantiate(r.p.ctxt, name.Type(), targs, false) + return t + } + return name.Type() + + case pkgbits.TypeTypeParam: + return r.dict.tparams[r.Len()] + + case pkgbits.TypeArray: + len := int64(r.Uint64()) + return types2.NewArray(r.typ(), len) + case pkgbits.TypeChan: + dir := types2.ChanDir(r.Len()) + return types2.NewChan(dir, r.typ()) + case pkgbits.TypeMap: + return types2.NewMap(r.typ(), r.typ()) + case pkgbits.TypePointer: + return types2.NewPointer(r.typ()) + case pkgbits.TypeSignature: + return r.signature(nil, nil, nil) + case pkgbits.TypeSlice: + return types2.NewSlice(r.typ()) + case pkgbits.TypeStruct: + return r.structType() + case pkgbits.TypeInterface: + return r.interfaceType() + case pkgbits.TypeUnion: + return r.unionType() + } +} + +func (r *reader) structType() *types2.Struct { + fields := make([]*types2.Var, r.Len()) + var tags []string + for i := range fields { + pos := r.pos() + pkg, name := r.selector() + ftyp := r.typ() + tag := r.String() + embedded := r.Bool() + + fields[i] = types2.NewField(pos, pkg, name, ftyp, embedded) + if tag != "" { + for len(tags) < i { + tags = append(tags, "") + } + tags = append(tags, tag) + } + } + return types2.NewStruct(fields, tags) +} + +func (r *reader) unionType() *types2.Union { + terms := make([]*types2.Term, r.Len()) + for i := range terms { + terms[i] = types2.NewTerm(r.Bool(), r.typ()) + } + return types2.NewUnion(terms) +} + +func (r *reader) interfaceType() *types2.Interface { + methods := make([]*types2.Func, r.Len()) + embeddeds := make([]types2.Type, r.Len()) + implicit := len(methods) == 0 && len(embeddeds) == 1 && r.Bool() + + for i := range methods { + pos := r.pos() + pkg, name := r.selector() + mtyp := r.signature(nil, nil, nil) + methods[i] = types2.NewFunc(pos, pkg, name, mtyp) + } + + for i := range embeddeds { + embeddeds[i] = r.typ() + } + + iface := types2.NewInterfaceType(methods, embeddeds) + if implicit { + iface.MarkImplicit() + } + return iface +} + +func (r *reader) signature(recv *types2.Var, rtparams, tparams []*types2.TypeParam) *types2.Signature { + r.Sync(pkgbits.SyncSignature) + + params := r.params() + results := r.params() + variadic := r.Bool() + + return types2.NewSignatureType(recv, rtparams, tparams, params, results, variadic) +} + +func (r *reader) params() *types2.Tuple { + r.Sync(pkgbits.SyncParams) + params := make([]*types2.Var, r.Len()) + for i := range params { + params[i] = r.param() + } + return types2.NewTuple(params...) +} + +func (r *reader) param() *types2.Var { + r.Sync(pkgbits.SyncParam) + + pos := r.pos() + pkg, name := r.localIdent() + typ := r.typ() + + return types2.NewParam(pos, pkg, name, typ) +} + +// @@@ Objects + +func (r *reader) obj() (types2.Object, []types2.Type) { + r.Sync(pkgbits.SyncObject) + + assert(!r.Bool()) + + pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj)) + obj := pkg.Scope().Lookup(name) + + targs := make([]types2.Type, r.Len()) + for i := range targs { + targs[i] = r.typ() + } + + return obj, targs +} + +func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types2.Package, string) { + var objPkg *types2.Package + var objName string + var tag pkgbits.CodeObj + { + rname := pr.tempReader(pkgbits.RelocName, idx, pkgbits.SyncObject1) + + objPkg, objName = rname.qualifiedIdent() + assert(objName != "") + + tag = pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj)) + pr.retireReader(rname) + } + + if tag == pkgbits.ObjStub { + base.Assertf(objPkg == nil || objPkg == types2.Unsafe, "unexpected stub package: %v", objPkg) + return objPkg, objName + } + + objPkg.Scope().InsertLazy(objName, func() types2.Object { + dict := pr.objDictIdx(idx) + + r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1) + r.dict = dict + + switch tag { + default: + panic("weird") + + case pkgbits.ObjAlias: + pos := r.pos() + typ := r.typ() + return types2.NewTypeName(pos, objPkg, objName, typ) + + case pkgbits.ObjConst: + pos := r.pos() + typ := r.typ() + val := r.Value() + return types2.NewConst(pos, objPkg, objName, typ, val) + + case pkgbits.ObjFunc: + pos := r.pos() + tparams := r.typeParamNames() + sig := r.signature(nil, nil, tparams) + return types2.NewFunc(pos, objPkg, objName, sig) + + case pkgbits.ObjType: + pos := r.pos() + + return types2.NewTypeNameLazy(pos, objPkg, objName, func(named *types2.Named) (tparams []*types2.TypeParam, underlying types2.Type, methods []*types2.Func) { + tparams = r.typeParamNames() + + // TODO(mdempsky): Rewrite receiver types to underlying is an + // Interface? The go/types importer does this (I think because + // unit tests expected that), but cmd/compile doesn't care + // about it, so maybe we can avoid worrying about that here. + underlying = r.typ().Underlying() + + methods = make([]*types2.Func, r.Len()) + for i := range methods { + methods[i] = r.method() + } + + return + }) + + case pkgbits.ObjVar: + pos := r.pos() + typ := r.typ() + return types2.NewVar(pos, objPkg, objName, typ) + } + }) + + return objPkg, objName +} + +func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { + var dict readerDict + { + r := pr.tempReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1) + + if implicits := r.Len(); implicits != 0 { + base.Fatalf("unexpected object with %v implicit type parameter(s)", implicits) + } + + dict.bounds = make([]typeInfo, r.Len()) + for i := range dict.bounds { + dict.bounds[i] = r.typInfo() + } + + dict.derived = make([]derivedInfo, r.Len()) + dict.derivedTypes = make([]types2.Type, len(dict.derived)) + for i := range dict.derived { + dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} + } + + pr.retireReader(r) + } + // function references follow, but reader doesn't need those + + return &dict +} + +func (r *reader) typeParamNames() []*types2.TypeParam { + r.Sync(pkgbits.SyncTypeParamNames) + + // Note: This code assumes it only processes objects without + // implement type parameters. This is currently fine, because + // reader is only used to read in exported declarations, which are + // always package scoped. + + if len(r.dict.bounds) == 0 { + return nil + } + + // Careful: Type parameter lists may have cycles. To allow for this, + // we construct the type parameter list in two passes: first we + // create all the TypeNames and TypeParams, then we construct and + // set the bound type. + + r.dict.tparams = make([]*types2.TypeParam, len(r.dict.bounds)) + for i := range r.dict.bounds { + pos := r.pos() + pkg, name := r.localIdent() + + tname := types2.NewTypeName(pos, pkg, name, nil) + r.dict.tparams[i] = types2.NewTypeParam(tname, nil) + } + + for i, bound := range r.dict.bounds { + r.dict.tparams[i].SetConstraint(r.p.typIdx(bound, r.dict)) + } + + return r.dict.tparams +} + +func (r *reader) method() *types2.Func { + r.Sync(pkgbits.SyncMethod) + pos := r.pos() + pkg, name := r.selector() + + rtparams := r.typeParamNames() + sig := r.signature(r.param(), rtparams, nil) + + _ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go. + return types2.NewFunc(pos, pkg, name, sig) +} + +func (r *reader) qualifiedIdent() (*types2.Package, string) { return r.ident(pkgbits.SyncSym) } +func (r *reader) localIdent() (*types2.Package, string) { return r.ident(pkgbits.SyncLocalIdent) } +func (r *reader) selector() (*types2.Package, string) { return r.ident(pkgbits.SyncSelector) } + +func (r *reader) ident(marker pkgbits.SyncMarker) (*types2.Package, string) { + r.Sync(marker) + return r.pkg(), r.String() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inl.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inl.go new file mode 100644 index 0000000000000000000000000000000000000000..b365008c764c2d18eaab8939afa862587158b708 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inl.go @@ -0,0 +1,1217 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// The inlining facility makes 2 passes: first CanInline determines which +// functions are suitable for inlining, and for those that are it +// saves a copy of the body. Then InlineCalls walks each function body to +// expand calls to inlinable functions. +// +// The Debug.l flag controls the aggressiveness. Note that main() swaps level 0 and 1, +// making 1 the default and -l disable. Additional levels (beyond -l) may be buggy and +// are not supported. +// 0: disabled +// 1: 80-nodes leaf functions, oneliners, panic, lazy typechecking (default) +// 2: (unassigned) +// 3: (unassigned) +// 4: allow non-leaf functions +// +// At some point this may get another default and become switch-offable with -N. +// +// The -d typcheckinl flag enables early typechecking of all imported bodies, +// which is useful to flush out bugs. +// +// The Debug.m flag enables diagnostic output. a single -m is useful for verifying +// which calls get inlined or not, more is for debugging, and may go away at any point. + +package inline + +import ( + "fmt" + "go/constant" + "internal/buildcfg" + "strconv" + + "cmd/compile/internal/base" + "cmd/compile/internal/inline/inlheur" + "cmd/compile/internal/ir" + "cmd/compile/internal/logopt" + "cmd/compile/internal/pgo" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/obj" +) + +// Inlining budget parameters, gathered in one place +const ( + inlineMaxBudget = 80 + inlineExtraAppendCost = 0 + // default is to inline if there's at most one call. -l=4 overrides this by using 1 instead. + inlineExtraCallCost = 57 // 57 was benchmarked to provided most benefit with no bad surprises; see https://github.com/golang/go/issues/19348#issuecomment-439370742 + inlineExtraPanicCost = 1 // do not penalize inlining panics. + inlineExtraThrowCost = inlineMaxBudget // with current (2018-05/1.11) code, inlining runtime.throw does not help. + + inlineBigFunctionNodes = 5000 // Functions with this many nodes are considered "big". + inlineBigFunctionMaxCost = 20 // Max cost of inlinee when inlining into a "big" function. +) + +var ( + // List of all hot callee nodes. + // TODO(prattmic): Make this non-global. + candHotCalleeMap = make(map[*pgo.IRNode]struct{}) + + // List of all hot call sites. CallSiteInfo.Callee is always nil. + // TODO(prattmic): Make this non-global. + candHotEdgeMap = make(map[pgo.CallSiteInfo]struct{}) + + // Threshold in percentage for hot callsite inlining. + inlineHotCallSiteThresholdPercent float64 + + // Threshold in CDF percentage for hot callsite inlining, + // that is, for a threshold of X the hottest callsites that + // make up the top X% of total edge weight will be + // considered hot for inlining candidates. + inlineCDFHotCallSiteThresholdPercent = float64(99) + + // Budget increased due to hotness. + inlineHotMaxBudget int32 = 2000 +) + +// PGOInlinePrologue records the hot callsites from ir-graph. +func PGOInlinePrologue(p *pgo.Profile, funcs []*ir.Func) { + if base.Debug.PGOInlineCDFThreshold != "" { + if s, err := strconv.ParseFloat(base.Debug.PGOInlineCDFThreshold, 64); err == nil && s >= 0 && s <= 100 { + inlineCDFHotCallSiteThresholdPercent = s + } else { + base.Fatalf("invalid PGOInlineCDFThreshold, must be between 0 and 100") + } + } + var hotCallsites []pgo.NamedCallEdge + inlineHotCallSiteThresholdPercent, hotCallsites = hotNodesFromCDF(p) + if base.Debug.PGODebug > 0 { + fmt.Printf("hot-callsite-thres-from-CDF=%v\n", inlineHotCallSiteThresholdPercent) + } + + if x := base.Debug.PGOInlineBudget; x != 0 { + inlineHotMaxBudget = int32(x) + } + + for _, n := range hotCallsites { + // mark inlineable callees from hot edges + if callee := p.WeightedCG.IRNodes[n.CalleeName]; callee != nil { + candHotCalleeMap[callee] = struct{}{} + } + // mark hot call sites + if caller := p.WeightedCG.IRNodes[n.CallerName]; caller != nil && caller.AST != nil { + csi := pgo.CallSiteInfo{LineOffset: n.CallSiteOffset, Caller: caller.AST} + candHotEdgeMap[csi] = struct{}{} + } + } + + if base.Debug.PGODebug >= 3 { + fmt.Printf("hot-cg before inline in dot format:") + p.PrintWeightedCallGraphDOT(inlineHotCallSiteThresholdPercent) + } +} + +// hotNodesFromCDF computes an edge weight threshold and the list of hot +// nodes that make up the given percentage of the CDF. The threshold, as +// a percent, is the lower bound of weight for nodes to be considered hot +// (currently only used in debug prints) (in case of equal weights, +// comparing with the threshold may not accurately reflect which nodes are +// considiered hot). +func hotNodesFromCDF(p *pgo.Profile) (float64, []pgo.NamedCallEdge) { + cum := int64(0) + for i, n := range p.NamedEdgeMap.ByWeight { + w := p.NamedEdgeMap.Weight[n] + cum += w + if pgo.WeightInPercentage(cum, p.TotalWeight) > inlineCDFHotCallSiteThresholdPercent { + // nodes[:i+1] to include the very last node that makes it to go over the threshold. + // (Say, if the CDF threshold is 50% and one hot node takes 60% of weight, we want to + // include that node instead of excluding it.) + return pgo.WeightInPercentage(w, p.TotalWeight), p.NamedEdgeMap.ByWeight[:i+1] + } + } + return 0, p.NamedEdgeMap.ByWeight +} + +// CanInlineFuncs computes whether a batch of functions are inlinable. +func CanInlineFuncs(funcs []*ir.Func, profile *pgo.Profile) { + if profile != nil { + PGOInlinePrologue(profile, funcs) + } + + ir.VisitFuncsBottomUp(funcs, func(list []*ir.Func, recursive bool) { + CanInlineSCC(list, recursive, profile) + }) +} + +// CanInlineSCC computes the inlinability of functions within an SCC +// (strongly connected component). +// +// CanInlineSCC is designed to be used by ir.VisitFuncsBottomUp +// callbacks. +func CanInlineSCC(funcs []*ir.Func, recursive bool, profile *pgo.Profile) { + if base.Flag.LowerL == 0 { + return + } + + numfns := numNonClosures(funcs) + + for _, fn := range funcs { + if !recursive || numfns > 1 { + // We allow inlining if there is no + // recursion, or the recursion cycle is + // across more than one function. + CanInline(fn, profile) + } else { + if base.Flag.LowerM > 1 && fn.OClosure == nil { + fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(fn), fn.Nname) + } + } + if inlheur.Enabled() { + analyzeFuncProps(fn, profile) + } + } +} + +// GarbageCollectUnreferencedHiddenClosures makes a pass over all the +// top-level (non-hidden-closure) functions looking for nested closure +// functions that are reachable, then sweeps through the Target.Decls +// list and marks any non-reachable hidden closure function as dead. +// See issues #59404 and #59638 for more context. +func GarbageCollectUnreferencedHiddenClosures() { + + liveFuncs := make(map[*ir.Func]bool) + + var markLiveFuncs func(fn *ir.Func) + markLiveFuncs = func(fn *ir.Func) { + if liveFuncs[fn] { + return + } + liveFuncs[fn] = true + ir.Visit(fn, func(n ir.Node) { + if clo, ok := n.(*ir.ClosureExpr); ok { + markLiveFuncs(clo.Func) + } + }) + } + + for i := 0; i < len(typecheck.Target.Funcs); i++ { + fn := typecheck.Target.Funcs[i] + if fn.IsHiddenClosure() { + continue + } + markLiveFuncs(fn) + } + + for i := 0; i < len(typecheck.Target.Funcs); i++ { + fn := typecheck.Target.Funcs[i] + if !fn.IsHiddenClosure() { + continue + } + if fn.IsDeadcodeClosure() { + continue + } + if liveFuncs[fn] { + continue + } + fn.SetIsDeadcodeClosure(true) + if base.Flag.LowerM > 2 { + fmt.Printf("%v: unreferenced closure %v marked as dead\n", ir.Line(fn), fn) + } + if fn.Inl != nil && fn.LSym == nil { + ir.InitLSym(fn, true) + } + } +} + +// inlineBudget determines the max budget for function 'fn' prior to +// analyzing the hairyness of the body of 'fn'. We pass in the pgo +// profile if available (which can change the budget), also a +// 'relaxed' flag, which expands the budget slightly to allow for the +// possibility that a call to the function might have its score +// adjusted downwards. If 'verbose' is set, then print a remark where +// we boost the budget due to PGO. +func inlineBudget(fn *ir.Func, profile *pgo.Profile, relaxed bool, verbose bool) int32 { + // Update the budget for profile-guided inlining. + budget := int32(inlineMaxBudget) + if profile != nil { + if n, ok := profile.WeightedCG.IRNodes[ir.LinkFuncName(fn)]; ok { + if _, ok := candHotCalleeMap[n]; ok { + budget = int32(inlineHotMaxBudget) + if verbose { + fmt.Printf("hot-node enabled increased budget=%v for func=%v\n", budget, ir.PkgFuncName(fn)) + } + } + } + } + if relaxed { + budget += inlheur.BudgetExpansion(inlineMaxBudget) + } + return budget +} + +// CanInline determines whether fn is inlineable. +// If so, CanInline saves copies of fn.Body and fn.Dcl in fn.Inl. +// fn and fn.Body will already have been typechecked. +func CanInline(fn *ir.Func, profile *pgo.Profile) { + if fn.Nname == nil { + base.Fatalf("CanInline no nname %+v", fn) + } + + var reason string // reason, if any, that the function was not inlined + if base.Flag.LowerM > 1 || logopt.Enabled() { + defer func() { + if reason != "" { + if base.Flag.LowerM > 1 { + fmt.Printf("%v: cannot inline %v: %s\n", ir.Line(fn), fn.Nname, reason) + } + if logopt.Enabled() { + logopt.LogOpt(fn.Pos(), "cannotInlineFunction", "inline", ir.FuncName(fn), reason) + } + } + }() + } + + reason = InlineImpossible(fn) + if reason != "" { + return + } + if fn.Typecheck() == 0 { + base.Fatalf("CanInline on non-typechecked function %v", fn) + } + + n := fn.Nname + if n.Func.InlinabilityChecked() { + return + } + defer n.Func.SetInlinabilityChecked(true) + + cc := int32(inlineExtraCallCost) + if base.Flag.LowerL == 4 { + cc = 1 // this appears to yield better performance than 0. + } + + // Used a "relaxed" inline budget if the new inliner is enabled. + relaxed := inlheur.Enabled() + + // Compute the inline budget for this func. + budget := inlineBudget(fn, profile, relaxed, base.Debug.PGODebug > 0) + + // At this point in the game the function we're looking at may + // have "stale" autos, vars that still appear in the Dcl list, but + // which no longer have any uses in the function body (due to + // elimination by deadcode). We'd like to exclude these dead vars + // when creating the "Inline.Dcl" field below; to accomplish this, + // the hairyVisitor below builds up a map of used/referenced + // locals, and we use this map to produce a pruned Inline.Dcl + // list. See issue 25459 for more context. + + visitor := hairyVisitor{ + curFunc: fn, + isBigFunc: IsBigFunc(fn), + budget: budget, + maxBudget: budget, + extraCallCost: cc, + profile: profile, + } + if visitor.tooHairy(fn) { + reason = visitor.reason + return + } + + n.Func.Inl = &ir.Inline{ + Cost: budget - visitor.budget, + Dcl: pruneUnusedAutos(n.Func.Dcl, &visitor), + HaveDcl: true, + + CanDelayResults: canDelayResults(fn), + } + if base.Flag.LowerM != 0 || logopt.Enabled() { + noteInlinableFunc(n, fn, budget-visitor.budget) + } +} + +// noteInlinableFunc issues a message to the user that the specified +// function is inlinable. +func noteInlinableFunc(n *ir.Name, fn *ir.Func, cost int32) { + if base.Flag.LowerM > 1 { + fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, cost, fn.Type(), ir.Nodes(fn.Body)) + } else if base.Flag.LowerM != 0 { + fmt.Printf("%v: can inline %v\n", ir.Line(fn), n) + } + // JSON optimization log output. + if logopt.Enabled() { + logopt.LogOpt(fn.Pos(), "canInlineFunction", "inline", ir.FuncName(fn), fmt.Sprintf("cost: %d", cost)) + } +} + +// InlineImpossible returns a non-empty reason string if fn is impossible to +// inline regardless of cost or contents. +func InlineImpossible(fn *ir.Func) string { + var reason string // reason, if any, that the function can not be inlined. + if fn.Nname == nil { + reason = "no name" + return reason + } + + // If marked "go:noinline", don't inline. + if fn.Pragma&ir.Noinline != 0 { + reason = "marked go:noinline" + return reason + } + + // If marked "go:norace" and -race compilation, don't inline. + if base.Flag.Race && fn.Pragma&ir.Norace != 0 { + reason = "marked go:norace with -race compilation" + return reason + } + + // If marked "go:nocheckptr" and -d checkptr compilation, don't inline. + if base.Debug.Checkptr != 0 && fn.Pragma&ir.NoCheckPtr != 0 { + reason = "marked go:nocheckptr" + return reason + } + + // If marked "go:cgo_unsafe_args", don't inline, since the function + // makes assumptions about its argument frame layout. + if fn.Pragma&ir.CgoUnsafeArgs != 0 { + reason = "marked go:cgo_unsafe_args" + return reason + } + + // If marked as "go:uintptrkeepalive", don't inline, since the keep + // alive information is lost during inlining. + // + // TODO(prattmic): This is handled on calls during escape analysis, + // which is after inlining. Move prior to inlining so the keep-alive is + // maintained after inlining. + if fn.Pragma&ir.UintptrKeepAlive != 0 { + reason = "marked as having a keep-alive uintptr argument" + return reason + } + + // If marked as "go:uintptrescapes", don't inline, since the escape + // information is lost during inlining. + if fn.Pragma&ir.UintptrEscapes != 0 { + reason = "marked as having an escaping uintptr argument" + return reason + } + + // The nowritebarrierrec checker currently works at function + // granularity, so inlining yeswritebarrierrec functions can confuse it + // (#22342). As a workaround, disallow inlining them for now. + if fn.Pragma&ir.Yeswritebarrierrec != 0 { + reason = "marked go:yeswritebarrierrec" + return reason + } + + // If a local function has no fn.Body (is defined outside of Go), cannot inline it. + // Imported functions don't have fn.Body but might have inline body in fn.Inl. + if len(fn.Body) == 0 && !typecheck.HaveInlineBody(fn) { + reason = "no function body" + return reason + } + + return "" +} + +// canDelayResults reports whether inlined calls to fn can delay +// declaring the result parameter until the "return" statement. +func canDelayResults(fn *ir.Func) bool { + // We can delay declaring+initializing result parameters if: + // (1) there's exactly one "return" statement in the inlined function; + // (2) it's not an empty return statement (#44355); and + // (3) the result parameters aren't named. + + nreturns := 0 + ir.VisitList(fn.Body, func(n ir.Node) { + if n, ok := n.(*ir.ReturnStmt); ok { + nreturns++ + if len(n.Results) == 0 { + nreturns++ // empty return statement (case 2) + } + } + }) + + if nreturns != 1 { + return false // not exactly one return statement (case 1) + } + + // temporaries for return values. + for _, param := range fn.Type().Results() { + if sym := param.Sym; sym != nil && !sym.IsBlank() { + return false // found a named result parameter (case 3) + } + } + + return true +} + +// hairyVisitor visits a function body to determine its inlining +// hairiness and whether or not it can be inlined. +type hairyVisitor struct { + // This is needed to access the current caller in the doNode function. + curFunc *ir.Func + isBigFunc bool + budget int32 + maxBudget int32 + reason string + extraCallCost int32 + usedLocals ir.NameSet + do func(ir.Node) bool + profile *pgo.Profile +} + +func (v *hairyVisitor) tooHairy(fn *ir.Func) bool { + v.do = v.doNode // cache closure + if ir.DoChildren(fn, v.do) { + return true + } + if v.budget < 0 { + v.reason = fmt.Sprintf("function too complex: cost %d exceeds budget %d", v.maxBudget-v.budget, v.maxBudget) + return true + } + return false +} + +// doNode visits n and its children, updates the state in v, and returns true if +// n makes the current function too hairy for inlining. +func (v *hairyVisitor) doNode(n ir.Node) bool { + if n == nil { + return false + } +opSwitch: + switch n.Op() { + // Call is okay if inlinable and we have the budget for the body. + case ir.OCALLFUNC: + n := n.(*ir.CallExpr) + // Functions that call runtime.getcaller{pc,sp} can not be inlined + // because getcaller{pc,sp} expect a pointer to the caller's first argument. + // + // runtime.throw is a "cheap call" like panic in normal code. + var cheap bool + if n.Fun.Op() == ir.ONAME { + name := n.Fun.(*ir.Name) + if name.Class == ir.PFUNC { + switch fn := types.RuntimeSymName(name.Sym()); fn { + case "getcallerpc", "getcallersp": + v.reason = "call to " + fn + return true + case "throw": + v.budget -= inlineExtraThrowCost + break opSwitch + case "panicrangeexit": + cheap = true + } + // Special case for reflect.noescape. It does just type + // conversions to appease the escape analysis, and doesn't + // generate code. + if types.ReflectSymName(name.Sym()) == "noescape" { + cheap = true + } + } + // Special case for coverage counter updates; although + // these correspond to real operations, we treat them as + // zero cost for the moment. This is due to the existence + // of tests that are sensitive to inlining-- if the + // insertion of coverage instrumentation happens to tip a + // given function over the threshold and move it from + // "inlinable" to "not-inlinable", this can cause changes + // in allocation behavior, which can then result in test + // failures (a good example is the TestAllocations in + // crypto/ed25519). + if isAtomicCoverageCounterUpdate(n) { + return false + } + } + if n.Fun.Op() == ir.OMETHEXPR { + if meth := ir.MethodExprName(n.Fun); meth != nil { + if fn := meth.Func; fn != nil { + s := fn.Sym() + if types.RuntimeSymName(s) == "heapBits.nextArena" { + // Special case: explicitly allow mid-stack inlining of + // runtime.heapBits.next even though it calls slow-path + // runtime.heapBits.nextArena. + cheap = true + } + // Special case: on architectures that can do unaligned loads, + // explicitly mark encoding/binary methods as cheap, + // because in practice they are, even though our inlining + // budgeting system does not see that. See issue 42958. + if base.Ctxt.Arch.CanMergeLoads && s.Pkg.Path == "encoding/binary" { + switch s.Name { + case "littleEndian.Uint64", "littleEndian.Uint32", "littleEndian.Uint16", + "bigEndian.Uint64", "bigEndian.Uint32", "bigEndian.Uint16", + "littleEndian.PutUint64", "littleEndian.PutUint32", "littleEndian.PutUint16", + "bigEndian.PutUint64", "bigEndian.PutUint32", "bigEndian.PutUint16", + "littleEndian.AppendUint64", "littleEndian.AppendUint32", "littleEndian.AppendUint16", + "bigEndian.AppendUint64", "bigEndian.AppendUint32", "bigEndian.AppendUint16": + cheap = true + } + } + } + } + } + if cheap { + break // treat like any other node, that is, cost of 1 + } + + if ir.IsIntrinsicCall(n) { + // Treat like any other node. + break + } + + if callee := inlCallee(v.curFunc, n.Fun, v.profile); callee != nil && typecheck.HaveInlineBody(callee) { + // Check whether we'd actually inline this call. Set + // log == false since we aren't actually doing inlining + // yet. + if ok, _ := canInlineCallExpr(v.curFunc, n, callee, v.isBigFunc, false); ok { + // mkinlcall would inline this call [1], so use + // the cost of the inline body as the cost of + // the call, as that is what will actually + // appear in the code. + // + // [1] This is almost a perfect match to the + // mkinlcall logic, except that + // canInlineCallExpr considers inlining cycles + // by looking at what has already been inlined. + // Since we haven't done any inlining yet we + // will miss those. + v.budget -= callee.Inl.Cost + break + } + } + + // Call cost for non-leaf inlining. + v.budget -= v.extraCallCost + + case ir.OCALLMETH: + base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck") + + // Things that are too hairy, irrespective of the budget + case ir.OCALL, ir.OCALLINTER: + // Call cost for non-leaf inlining. + v.budget -= v.extraCallCost + + case ir.OPANIC: + n := n.(*ir.UnaryExpr) + if n.X.Op() == ir.OCONVIFACE && n.X.(*ir.ConvExpr).Implicit() { + // Hack to keep reflect.flag.mustBe inlinable for TestIntendedInlining. + // Before CL 284412, these conversions were introduced later in the + // compiler, so they didn't count against inlining budget. + v.budget++ + } + v.budget -= inlineExtraPanicCost + + case ir.ORECOVER: + base.FatalfAt(n.Pos(), "ORECOVER missed typecheck") + case ir.ORECOVERFP: + // recover matches the argument frame pointer to find + // the right panic value, so it needs an argument frame. + v.reason = "call to recover" + return true + + case ir.OCLOSURE: + if base.Debug.InlFuncsWithClosures == 0 { + v.reason = "not inlining functions with closures" + return true + } + + // TODO(danscales): Maybe make budget proportional to number of closure + // variables, e.g.: + //v.budget -= int32(len(n.(*ir.ClosureExpr).Func.ClosureVars) * 3) + // TODO(austin): However, if we're able to inline this closure into + // v.curFunc, then we actually pay nothing for the closure captures. We + // should try to account for that if we're going to account for captures. + v.budget -= 15 + + case ir.OGO, ir.ODEFER, ir.OTAILCALL: + v.reason = "unhandled op " + n.Op().String() + return true + + case ir.OAPPEND: + v.budget -= inlineExtraAppendCost + + case ir.OADDR: + n := n.(*ir.AddrExpr) + // Make "&s.f" cost 0 when f's offset is zero. + if dot, ok := n.X.(*ir.SelectorExpr); ok && (dot.Op() == ir.ODOT || dot.Op() == ir.ODOTPTR) { + if _, ok := dot.X.(*ir.Name); ok && dot.Selection.Offset == 0 { + v.budget += 2 // undo ir.OADDR+ir.ODOT/ir.ODOTPTR + } + } + + case ir.ODEREF: + // *(*X)(unsafe.Pointer(&x)) is low-cost + n := n.(*ir.StarExpr) + + ptr := n.X + for ptr.Op() == ir.OCONVNOP { + ptr = ptr.(*ir.ConvExpr).X + } + if ptr.Op() == ir.OADDR { + v.budget += 1 // undo half of default cost of ir.ODEREF+ir.OADDR + } + + case ir.OCONVNOP: + // This doesn't produce code, but the children might. + v.budget++ // undo default cost + + case ir.OFALL, ir.OTYPE: + // These nodes don't produce code; omit from inlining budget. + return false + + case ir.OIF: + n := n.(*ir.IfStmt) + if ir.IsConst(n.Cond, constant.Bool) { + // This if and the condition cost nothing. + if doList(n.Init(), v.do) { + return true + } + if ir.BoolVal(n.Cond) { + return doList(n.Body, v.do) + } else { + return doList(n.Else, v.do) + } + } + + case ir.ONAME: + n := n.(*ir.Name) + if n.Class == ir.PAUTO { + v.usedLocals.Add(n) + } + + case ir.OBLOCK: + // The only OBLOCK we should see at this point is an empty one. + // In any event, let the visitList(n.List()) below take care of the statements, + // and don't charge for the OBLOCK itself. The ++ undoes the -- below. + v.budget++ + + case ir.OMETHVALUE, ir.OSLICELIT: + v.budget-- // Hack for toolstash -cmp. + + case ir.OMETHEXPR: + v.budget++ // Hack for toolstash -cmp. + + case ir.OAS2: + n := n.(*ir.AssignListStmt) + + // Unified IR unconditionally rewrites: + // + // a, b = f() + // + // into: + // + // DCL tmp1 + // DCL tmp2 + // tmp1, tmp2 = f() + // a, b = tmp1, tmp2 + // + // so that it can insert implicit conversions as necessary. To + // minimize impact to the existing inlining heuristics (in + // particular, to avoid breaking the existing inlinability regress + // tests), we need to compensate for this here. + // + // See also identical logic in IsBigFunc. + if len(n.Rhs) > 0 { + if init := n.Rhs[0].Init(); len(init) == 1 { + if _, ok := init[0].(*ir.AssignListStmt); ok { + // 4 for each value, because each temporary variable now + // appears 3 times (DCL, LHS, RHS), plus an extra DCL node. + // + // 1 for the extra "tmp1, tmp2 = f()" assignment statement. + v.budget += 4*int32(len(n.Lhs)) + 1 + } + } + } + + case ir.OAS: + // Special case for coverage counter updates and coverage + // function registrations. Although these correspond to real + // operations, we treat them as zero cost for the moment. This + // is primarily due to the existence of tests that are + // sensitive to inlining-- if the insertion of coverage + // instrumentation happens to tip a given function over the + // threshold and move it from "inlinable" to "not-inlinable", + // this can cause changes in allocation behavior, which can + // then result in test failures (a good example is the + // TestAllocations in crypto/ed25519). + n := n.(*ir.AssignStmt) + if n.X.Op() == ir.OINDEX && isIndexingCoverageCounter(n.X) { + return false + } + } + + v.budget-- + + // When debugging, don't stop early, to get full cost of inlining this function + if v.budget < 0 && base.Flag.LowerM < 2 && !logopt.Enabled() { + v.reason = "too expensive" + return true + } + + return ir.DoChildren(n, v.do) +} + +// IsBigFunc reports whether fn is a "big" function. +// +// Note: The criteria for "big" is heuristic and subject to change. +func IsBigFunc(fn *ir.Func) bool { + budget := inlineBigFunctionNodes + return ir.Any(fn, func(n ir.Node) bool { + // See logic in hairyVisitor.doNode, explaining unified IR's + // handling of "a, b = f()" assignments. + if n, ok := n.(*ir.AssignListStmt); ok && n.Op() == ir.OAS2 && len(n.Rhs) > 0 { + if init := n.Rhs[0].Init(); len(init) == 1 { + if _, ok := init[0].(*ir.AssignListStmt); ok { + budget += 4*len(n.Lhs) + 1 + } + } + } + + budget-- + return budget <= 0 + }) +} + +// TryInlineCall returns an inlined call expression for call, or nil +// if inlining is not possible. +func TryInlineCall(callerfn *ir.Func, call *ir.CallExpr, bigCaller bool, profile *pgo.Profile) *ir.InlinedCallExpr { + if base.Flag.LowerL == 0 { + return nil + } + if call.Op() != ir.OCALLFUNC { + return nil + } + if call.GoDefer || call.NoInline { + return nil + } + + // Prevent inlining some reflect.Value methods when using checkptr, + // even when package reflect was compiled without it (#35073). + if base.Debug.Checkptr != 0 && call.Fun.Op() == ir.OMETHEXPR { + if method := ir.MethodExprName(call.Fun); method != nil { + switch types.ReflectSymName(method.Sym()) { + case "Value.UnsafeAddr", "Value.Pointer": + return nil + } + } + } + + if base.Flag.LowerM > 3 { + fmt.Printf("%v:call to func %+v\n", ir.Line(call), call.Fun) + } + if ir.IsIntrinsicCall(call) { + return nil + } + if fn := inlCallee(callerfn, call.Fun, profile); fn != nil && typecheck.HaveInlineBody(fn) { + return mkinlcall(callerfn, call, fn, bigCaller) + } + return nil +} + +// inlCallee takes a function-typed expression and returns the underlying function ONAME +// that it refers to if statically known. Otherwise, it returns nil. +func inlCallee(caller *ir.Func, fn ir.Node, profile *pgo.Profile) (res *ir.Func) { + fn = ir.StaticValue(fn) + switch fn.Op() { + case ir.OMETHEXPR: + fn := fn.(*ir.SelectorExpr) + n := ir.MethodExprName(fn) + // Check that receiver type matches fn.X. + // TODO(mdempsky): Handle implicit dereference + // of pointer receiver argument? + if n == nil || !types.Identical(n.Type().Recv().Type, fn.X.Type()) { + return nil + } + return n.Func + case ir.ONAME: + fn := fn.(*ir.Name) + if fn.Class == ir.PFUNC { + return fn.Func + } + case ir.OCLOSURE: + fn := fn.(*ir.ClosureExpr) + c := fn.Func + if len(c.ClosureVars) != 0 && c.ClosureVars[0].Outer.Curfn != caller { + return nil // inliner doesn't support inlining across closure frames + } + CanInline(c, profile) + return c + } + return nil +} + +var inlgen int + +// SSADumpInline gives the SSA back end a chance to dump the function +// when producing output for debugging the compiler itself. +var SSADumpInline = func(*ir.Func) {} + +// InlineCall allows the inliner implementation to be overridden. +// If it returns nil, the function will not be inlined. +var InlineCall = func(callerfn *ir.Func, call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr { + base.Fatalf("inline.InlineCall not overridden") + panic("unreachable") +} + +// inlineCostOK returns true if call n from caller to callee is cheap enough to +// inline. bigCaller indicates that caller is a big function. +// +// In addition to the "cost OK" boolean, it also returns the "max +// cost" limit used to make the decision (which may differ depending +// on func size), and the score assigned to this specific callsite. +func inlineCostOK(n *ir.CallExpr, caller, callee *ir.Func, bigCaller bool) (bool, int32, int32) { + maxCost := int32(inlineMaxBudget) + if bigCaller { + // We use this to restrict inlining into very big functions. + // See issue 26546 and 17566. + maxCost = inlineBigFunctionMaxCost + } + + metric := callee.Inl.Cost + if inlheur.Enabled() { + score, ok := inlheur.GetCallSiteScore(caller, n) + if ok { + metric = int32(score) + } + } + + if metric <= maxCost { + // Simple case. Function is already cheap enough. + return true, 0, metric + } + + // We'll also allow inlining of hot functions below inlineHotMaxBudget, + // but only in small functions. + + lineOffset := pgo.NodeLineOffset(n, caller) + csi := pgo.CallSiteInfo{LineOffset: lineOffset, Caller: caller} + if _, ok := candHotEdgeMap[csi]; !ok { + // Cold + return false, maxCost, metric + } + + // Hot + + if bigCaller { + if base.Debug.PGODebug > 0 { + fmt.Printf("hot-big check disallows inlining for call %s (cost %d) at %v in big function %s\n", ir.PkgFuncName(callee), callee.Inl.Cost, ir.Line(n), ir.PkgFuncName(caller)) + } + return false, maxCost, metric + } + + if metric > inlineHotMaxBudget { + return false, inlineHotMaxBudget, metric + } + + if !base.PGOHash.MatchPosWithInfo(n.Pos(), "inline", nil) { + // De-selected by PGO Hash. + return false, maxCost, metric + } + + if base.Debug.PGODebug > 0 { + fmt.Printf("hot-budget check allows inlining for call %s (cost %d) at %v in function %s\n", ir.PkgFuncName(callee), callee.Inl.Cost, ir.Line(n), ir.PkgFuncName(caller)) + } + + return true, 0, metric +} + +// canInlineCallsite returns true if the call n from caller to callee +// can be inlined, plus the score computed for the call expr in +// question. bigCaller indicates that caller is a big function. log +// indicates that the 'cannot inline' reason should be logged. +// +// Preconditions: CanInline(callee) has already been called. +func canInlineCallExpr(callerfn *ir.Func, n *ir.CallExpr, callee *ir.Func, bigCaller bool, log bool) (bool, int32) { + if callee.Inl == nil { + // callee is never inlinable. + if log && logopt.Enabled() { + logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn), + fmt.Sprintf("%s cannot be inlined", ir.PkgFuncName(callee))) + } + return false, 0 + } + + ok, maxCost, callSiteScore := inlineCostOK(n, callerfn, callee, bigCaller) + if !ok { + // callee cost too high for this call site. + if log && logopt.Enabled() { + logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn), + fmt.Sprintf("cost %d of %s exceeds max caller cost %d", callee.Inl.Cost, ir.PkgFuncName(callee), maxCost)) + } + return false, 0 + } + + if callee == callerfn { + // Can't recursively inline a function into itself. + if log && logopt.Enabled() { + logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", ir.FuncName(callerfn))) + } + return false, 0 + } + + if base.Flag.Cfg.Instrumenting && types.IsNoInstrumentPkg(callee.Sym().Pkg) { + // Runtime package must not be instrumented. + // Instrument skips runtime package. However, some runtime code can be + // inlined into other packages and instrumented there. To avoid this, + // we disable inlining of runtime functions when instrumenting. + // The example that we observed is inlining of LockOSThread, + // which lead to false race reports on m contents. + if log && logopt.Enabled() { + logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn), + fmt.Sprintf("call to runtime function %s in instrumented build", ir.PkgFuncName(callee))) + } + return false, 0 + } + + if base.Flag.Race && types.IsNoRacePkg(callee.Sym().Pkg) { + if log && logopt.Enabled() { + logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn), + fmt.Sprintf(`call to into "no-race" package function %s in race build`, ir.PkgFuncName(callee))) + } + return false, 0 + } + + // Check if we've already inlined this function at this particular + // call site, in order to stop inlining when we reach the beginning + // of a recursion cycle again. We don't inline immediately recursive + // functions, but allow inlining if there is a recursion cycle of + // many functions. Most likely, the inlining will stop before we + // even hit the beginning of the cycle again, but this catches the + // unusual case. + parent := base.Ctxt.PosTable.Pos(n.Pos()).Base().InliningIndex() + sym := callee.Linksym() + for inlIndex := parent; inlIndex >= 0; inlIndex = base.Ctxt.InlTree.Parent(inlIndex) { + if base.Ctxt.InlTree.InlinedFunction(inlIndex) == sym { + if log { + if base.Flag.LowerM > 1 { + fmt.Printf("%v: cannot inline %v into %v: repeated recursive cycle\n", ir.Line(n), callee, ir.FuncName(callerfn)) + } + if logopt.Enabled() { + logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn), + fmt.Sprintf("repeated recursive cycle to %s", ir.PkgFuncName(callee))) + } + } + return false, 0 + } + } + + return true, callSiteScore +} + +// mkinlcall returns an OINLCALL node that can replace OCALLFUNC n, or +// nil if it cannot be inlined. callerfn is the function that contains +// n, and fn is the function being called. +// +// The result of mkinlcall MUST be assigned back to n, e.g. +// +// n.Left = mkinlcall(n.Left, fn, isddd) +func mkinlcall(callerfn *ir.Func, n *ir.CallExpr, fn *ir.Func, bigCaller bool) *ir.InlinedCallExpr { + ok, score := canInlineCallExpr(callerfn, n, fn, bigCaller, true) + if !ok { + return nil + } + typecheck.AssertFixedCall(n) + + parent := base.Ctxt.PosTable.Pos(n.Pos()).Base().InliningIndex() + sym := fn.Linksym() + inlIndex := base.Ctxt.InlTree.Add(parent, n.Pos(), sym, ir.FuncName(fn)) + + closureInitLSym := func(n *ir.CallExpr, fn *ir.Func) { + // The linker needs FuncInfo metadata for all inlined + // functions. This is typically handled by gc.enqueueFunc + // calling ir.InitLSym for all function declarations in + // typecheck.Target.Decls (ir.UseClosure adds all closures to + // Decls). + // + // However, non-trivial closures in Decls are ignored, and are + // insteaded enqueued when walk of the calling function + // discovers them. + // + // This presents a problem for direct calls to closures. + // Inlining will replace the entire closure definition with its + // body, which hides the closure from walk and thus suppresses + // symbol creation. + // + // Explicitly create a symbol early in this edge case to ensure + // we keep this metadata. + // + // TODO: Refactor to keep a reference so this can all be done + // by enqueueFunc. + + if n.Op() != ir.OCALLFUNC { + // Not a standard call. + return + } + if n.Fun.Op() != ir.OCLOSURE { + // Not a direct closure call. + return + } + + clo := n.Fun.(*ir.ClosureExpr) + if ir.IsTrivialClosure(clo) { + // enqueueFunc will handle trivial closures anyways. + return + } + + ir.InitLSym(fn, true) + } + + closureInitLSym(n, fn) + + if base.Flag.GenDwarfInl > 0 { + if !sym.WasInlined() { + base.Ctxt.DwFixups.SetPrecursorFunc(sym, fn) + sym.Set(obj.AttrWasInlined, true) + } + } + + if base.Flag.LowerM != 0 { + if buildcfg.Experiment.NewInliner { + fmt.Printf("%v: inlining call to %v with score %d\n", + ir.Line(n), fn, score) + } else { + fmt.Printf("%v: inlining call to %v\n", ir.Line(n), fn) + } + } + if base.Flag.LowerM > 2 { + fmt.Printf("%v: Before inlining: %+v\n", ir.Line(n), n) + } + + res := InlineCall(callerfn, n, fn, inlIndex) + + if res == nil { + base.FatalfAt(n.Pos(), "inlining call to %v failed", fn) + } + + if base.Flag.LowerM > 2 { + fmt.Printf("%v: After inlining %+v\n\n", ir.Line(res), res) + } + + if inlheur.Enabled() { + inlheur.UpdateCallsiteTable(callerfn, n, res) + } + + return res +} + +// CalleeEffects appends any side effects from evaluating callee to init. +func CalleeEffects(init *ir.Nodes, callee ir.Node) { + for { + init.Append(ir.TakeInit(callee)...) + + switch callee.Op() { + case ir.ONAME, ir.OCLOSURE, ir.OMETHEXPR: + return // done + + case ir.OCONVNOP: + conv := callee.(*ir.ConvExpr) + callee = conv.X + + case ir.OINLCALL: + ic := callee.(*ir.InlinedCallExpr) + init.Append(ic.Body.Take()...) + callee = ic.SingleResult() + + default: + base.FatalfAt(callee.Pos(), "unexpected callee expression: %v", callee) + } + } +} + +func pruneUnusedAutos(ll []*ir.Name, vis *hairyVisitor) []*ir.Name { + s := make([]*ir.Name, 0, len(ll)) + for _, n := range ll { + if n.Class == ir.PAUTO { + if !vis.usedLocals.Has(n) { + // TODO(mdempsky): Simplify code after confident that this + // never happens anymore. + base.FatalfAt(n.Pos(), "unused auto: %v", n) + continue + } + } + s = append(s, n) + } + return s +} + +// numNonClosures returns the number of functions in list which are not closures. +func numNonClosures(list []*ir.Func) int { + count := 0 + for _, fn := range list { + if fn.OClosure == nil { + count++ + } + } + return count +} + +func doList(list []ir.Node, do func(ir.Node) bool) bool { + for _, x := range list { + if x != nil { + if do(x) { + return true + } + } + } + return false +} + +// isIndexingCoverageCounter returns true if the specified node 'n' is indexing +// into a coverage counter array. +func isIndexingCoverageCounter(n ir.Node) bool { + if n.Op() != ir.OINDEX { + return false + } + ixn := n.(*ir.IndexExpr) + if ixn.X.Op() != ir.ONAME || !ixn.X.Type().IsArray() { + return false + } + nn := ixn.X.(*ir.Name) + return nn.CoverageCounter() +} + +// isAtomicCoverageCounterUpdate examines the specified node to +// determine whether it represents a call to sync/atomic.AddUint32 to +// increment a coverage counter. +func isAtomicCoverageCounterUpdate(cn *ir.CallExpr) bool { + if cn.Fun.Op() != ir.ONAME { + return false + } + name := cn.Fun.(*ir.Name) + if name.Class != ir.PFUNC { + return false + } + fn := name.Sym().Name + if name.Sym().Pkg.Path != "sync/atomic" || + (fn != "AddUint32" && fn != "StoreUint32") { + return false + } + if len(cn.Args) != 2 || cn.Args[0].Op() != ir.OADDR { + return false + } + adn := cn.Args[0].(*ir.AddrExpr) + v := isIndexingCoverageCounter(adn.X) + return v +} + +func PostProcessCallSites(profile *pgo.Profile) { + if base.Debug.DumpInlCallSiteScores != 0 { + budgetCallback := func(fn *ir.Func, prof *pgo.Profile) (int32, bool) { + v := inlineBudget(fn, prof, false, false) + return v, v == inlineHotMaxBudget + } + inlheur.DumpInlCallSiteScores(profile, budgetCallback) + } +} + +func analyzeFuncProps(fn *ir.Func, p *pgo.Profile) { + canInline := func(fn *ir.Func) { CanInline(fn, p) } + budgetForFunc := func(fn *ir.Func) int32 { + return inlineBudget(fn, p, true, false) + } + inlheur.AnalyzeFunc(fn, canInline, budgetForFunc, inlineMaxBudget) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/abi.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/abi.go new file mode 100644 index 0000000000000000000000000000000000000000..ebe0fbfb2a731998df0bb34abf2f2185376a372c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/abi.go @@ -0,0 +1,78 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import ( + "cmd/compile/internal/base" + "cmd/internal/obj" +) + +// InitLSym defines f's obj.LSym and initializes it based on the +// properties of f. This includes setting the symbol flags and ABI and +// creating and initializing related DWARF symbols. +// +// InitLSym must be called exactly once per function and must be +// called for both functions with bodies and functions without bodies. +// For body-less functions, we only create the LSym; for functions +// with bodies call a helper to setup up / populate the LSym. +func InitLSym(f *Func, hasBody bool) { + if f.LSym != nil { + base.FatalfAt(f.Pos(), "InitLSym called twice on %v", f) + } + + if nam := f.Nname; !IsBlank(nam) { + f.LSym = nam.LinksymABI(f.ABI) + if f.Pragma&Systemstack != 0 { + f.LSym.Set(obj.AttrCFunc, true) + } + } + if hasBody { + setupTextLSym(f, 0) + } +} + +// setupTextLSym initializes the LSym for a with-body text symbol. +func setupTextLSym(f *Func, flag int) { + if f.Dupok() { + flag |= obj.DUPOK + } + if f.Wrapper() { + flag |= obj.WRAPPER + } + if f.ABIWrapper() { + flag |= obj.ABIWRAPPER + } + if f.Needctxt() { + flag |= obj.NEEDCTXT + } + if f.Pragma&Nosplit != 0 { + flag |= obj.NOSPLIT + } + if f.IsPackageInit() { + flag |= obj.PKGINIT + } + + // Clumsy but important. + // For functions that could be on the path of invoking a deferred + // function that can recover (runtime.reflectcall, reflect.callReflect, + // and reflect.callMethod), we want the panic+recover special handling. + // See test/recover.go for test cases and src/reflect/value.go + // for the actual functions being considered. + // + // runtime.reflectcall is an assembly function which tailcalls + // WRAPPER functions (runtime.callNN). Its ABI wrapper needs WRAPPER + // flag as well. + fnname := f.Sym().Name + if base.Ctxt.Pkgpath == "runtime" && fnname == "reflectcall" { + flag |= obj.WRAPPER + } else if base.Ctxt.Pkgpath == "reflect" { + switch fnname { + case "callReflect", "callMethod": + flag |= obj.WRAPPER + } + } + + base.Ctxt.InitTextSym(f.LSym, flag, f.Pos()) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/bitset.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/bitset.go new file mode 100644 index 0000000000000000000000000000000000000000..bae400586695b78605643db4dd23e71811bb976f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/bitset.go @@ -0,0 +1,37 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +type bitset8 uint8 + +func (f *bitset8) set(mask uint8, b bool) { + if b { + *(*uint8)(f) |= mask + } else { + *(*uint8)(f) &^= mask + } +} + +func (f bitset8) get2(shift uint8) uint8 { + return uint8(f>>shift) & 3 +} + +// set2 sets two bits in f using the bottom two bits of b. +func (f *bitset8) set2(shift uint8, b uint8) { + // Clear old bits. + *(*uint8)(f) &^= 3 << shift + // Set new bits. + *(*uint8)(f) |= uint8(b&3) << shift +} + +type bitset16 uint16 + +func (f *bitset16) set(mask uint16, b bool) { + if b { + *(*uint16)(f) |= mask + } else { + *(*uint16)(f) &^= mask + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/cfg.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/cfg.go new file mode 100644 index 0000000000000000000000000000000000000000..49e1ed31cbf261959469abce5886af4616fc5c1d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/cfg.go @@ -0,0 +1,26 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +var ( + // MaxStackVarSize is the maximum size variable which we will allocate on the stack. + // This limit is for explicit variable declarations like "var x T" or "x := ...". + // Note: the flag smallframes can update this value. + MaxStackVarSize = int64(10 * 1024 * 1024) + + // MaxImplicitStackVarSize is the maximum size of implicit variables that we will allocate on the stack. + // p := new(T) allocating T on the stack + // p := &T{} allocating T on the stack + // s := make([]T, n) allocating [n]T on the stack + // s := []byte("...") allocating [n]byte on the stack + // Note: the flag smallframes can update this value. + MaxImplicitStackVarSize = int64(64 * 1024) + + // MaxSmallArraySize is the maximum size of an array which is considered small. + // Small arrays will be initialized directly with a sequence of constant stores. + // Large arrays will be initialized by copying from a static temp. + // 256 bytes was chosen to minimize generated code + statictmp size. + MaxSmallArraySize = int64(256) +) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/check_reassign_no.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/check_reassign_no.go new file mode 100644 index 0000000000000000000000000000000000000000..8290a7da7e824496d70cc856938c8da8874cde20 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/check_reassign_no.go @@ -0,0 +1,9 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !checknewoldreassignment + +package ir + +const consistencyCheckEnabled = false diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/check_reassign_yes.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/check_reassign_yes.go new file mode 100644 index 0000000000000000000000000000000000000000..30876cca20f7afa8e206cd51242febbe901ccb49 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/check_reassign_yes.go @@ -0,0 +1,9 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build checknewoldreassignment + +package ir + +const consistencyCheckEnabled = true diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/class_string.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/class_string.go new file mode 100644 index 0000000000000000000000000000000000000000..11a94c004701ba4f6238217b145b2972abbea1a7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/class_string.go @@ -0,0 +1,30 @@ +// Code generated by "stringer -type=Class name.go"; DO NOT EDIT. + +package ir + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Pxxx-0] + _ = x[PEXTERN-1] + _ = x[PAUTO-2] + _ = x[PAUTOHEAP-3] + _ = x[PPARAM-4] + _ = x[PPARAMOUT-5] + _ = x[PTYPEPARAM-6] + _ = x[PFUNC-7] +} + +const _Class_name = "PxxxPEXTERNPAUTOPAUTOHEAPPPARAMPPARAMOUTPTYPEPARAMPFUNC" + +var _Class_index = [...]uint8{0, 4, 11, 16, 25, 31, 40, 50, 55} + +func (i Class) String() string { + if i >= Class(len(_Class_index)-1) { + return "Class(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Class_name[_Class_index[i]:_Class_index[i+1]] +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/const.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/const.go new file mode 100644 index 0000000000000000000000000000000000000000..0efd1137fe4b9aa3aca9df608d4e3143e33cbca5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/const.go @@ -0,0 +1,161 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import ( + "go/constant" + "math" + "math/big" + + "cmd/compile/internal/base" + "cmd/compile/internal/types" + "cmd/internal/src" +) + +// NewBool returns an OLITERAL representing b as an untyped boolean. +func NewBool(pos src.XPos, b bool) Node { + return NewBasicLit(pos, types.UntypedBool, constant.MakeBool(b)) +} + +// NewInt returns an OLITERAL representing v as an untyped integer. +func NewInt(pos src.XPos, v int64) Node { + return NewBasicLit(pos, types.UntypedInt, constant.MakeInt64(v)) +} + +// NewString returns an OLITERAL representing s as an untyped string. +func NewString(pos src.XPos, s string) Node { + return NewBasicLit(pos, types.UntypedString, constant.MakeString(s)) +} + +// NewUintptr returns an OLITERAL representing v as a uintptr. +func NewUintptr(pos src.XPos, v int64) Node { + return NewBasicLit(pos, types.Types[types.TUINTPTR], constant.MakeInt64(v)) +} + +// NewZero returns a zero value of the given type. +func NewZero(pos src.XPos, typ *types.Type) Node { + switch { + case typ.HasNil(): + return NewNilExpr(pos, typ) + case typ.IsInteger(): + return NewBasicLit(pos, typ, intZero) + case typ.IsFloat(): + return NewBasicLit(pos, typ, floatZero) + case typ.IsComplex(): + return NewBasicLit(pos, typ, complexZero) + case typ.IsBoolean(): + return NewBasicLit(pos, typ, constant.MakeBool(false)) + case typ.IsString(): + return NewBasicLit(pos, typ, constant.MakeString("")) + case typ.IsArray() || typ.IsStruct(): + // TODO(mdempsky): Return a typechecked expression instead. + return NewCompLitExpr(pos, OCOMPLIT, typ, nil) + } + + base.FatalfAt(pos, "unexpected type: %v", typ) + panic("unreachable") +} + +var ( + intZero = constant.MakeInt64(0) + floatZero = constant.ToFloat(intZero) + complexZero = constant.ToComplex(intZero) +) + +// NewOne returns an OLITERAL representing 1 with the given type. +func NewOne(pos src.XPos, typ *types.Type) Node { + var val constant.Value + switch { + case typ.IsInteger(): + val = intOne + case typ.IsFloat(): + val = floatOne + case typ.IsComplex(): + val = complexOne + default: + base.FatalfAt(pos, "%v cannot represent 1", typ) + } + + return NewBasicLit(pos, typ, val) +} + +var ( + intOne = constant.MakeInt64(1) + floatOne = constant.ToFloat(intOne) + complexOne = constant.ToComplex(intOne) +) + +const ( + // Maximum size in bits for big.Ints before signaling + // overflow and also mantissa precision for big.Floats. + ConstPrec = 512 +) + +func BigFloat(v constant.Value) *big.Float { + f := new(big.Float) + f.SetPrec(ConstPrec) + switch u := constant.Val(v).(type) { + case int64: + f.SetInt64(u) + case *big.Int: + f.SetInt(u) + case *big.Float: + f.Set(u) + case *big.Rat: + f.SetRat(u) + default: + base.Fatalf("unexpected: %v", u) + } + return f +} + +// ConstOverflow reports whether constant value v is too large +// to represent with type t. +func ConstOverflow(v constant.Value, t *types.Type) bool { + switch { + case t.IsInteger(): + bits := uint(8 * t.Size()) + if t.IsUnsigned() { + x, ok := constant.Uint64Val(v) + return !ok || x>>bits != 0 + } + x, ok := constant.Int64Val(v) + if x < 0 { + x = ^x + } + return !ok || x>>(bits-1) != 0 + case t.IsFloat(): + switch t.Size() { + case 4: + f, _ := constant.Float32Val(v) + return math.IsInf(float64(f), 0) + case 8: + f, _ := constant.Float64Val(v) + return math.IsInf(f, 0) + } + case t.IsComplex(): + ft := types.FloatForComplex(t) + return ConstOverflow(constant.Real(v), ft) || ConstOverflow(constant.Imag(v), ft) + } + base.Fatalf("ConstOverflow: %v, %v", v, t) + panic("unreachable") +} + +// IsConstNode reports whether n is a Go language constant (as opposed to a +// compile-time constant). +// +// Expressions derived from nil, like string([]byte(nil)), while they +// may be known at compile time, are not Go language constants. +func IsConstNode(n Node) bool { + return n.Op() == OLITERAL +} + +func IsSmallIntConst(n Node) bool { + if n.Op() == OLITERAL { + v, ok := constant.Int64Val(n.Val()) + return ok && int64(int32(v)) == v + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/copy.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/copy.go new file mode 100644 index 0000000000000000000000000000000000000000..d30f7bc688093834e271cd2caf3dd96b75ed62b9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/copy.go @@ -0,0 +1,43 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import ( + "cmd/internal/src" +) + +// Copy returns a shallow copy of n. +func Copy(n Node) Node { + return n.copy() +} + +// DeepCopy returns a “deep” copy of n, with its entire structure copied +// (except for shared nodes like ONAME, ONONAME, OLITERAL, and OTYPE). +// If pos.IsKnown(), it sets the source position of newly allocated Nodes to pos. +func DeepCopy(pos src.XPos, n Node) Node { + var edit func(Node) Node + edit = func(x Node) Node { + switch x.Op() { + case ONAME, ONONAME, OLITERAL, ONIL, OTYPE: + return x + } + x = Copy(x) + if pos.IsKnown() { + x.SetPos(pos) + } + EditChildren(x, edit) + return x + } + return edit(n) +} + +// DeepCopyList returns a list of deep copies (using DeepCopy) of the nodes in list. +func DeepCopyList(pos src.XPos, list []Node) []Node { + var out []Node + for _, n := range list { + out = append(out, DeepCopy(pos, n)) + } + return out +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/dump.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/dump.go new file mode 100644 index 0000000000000000000000000000000000000000..4c218682ea6c758d8d8c757826478a067b77fc64 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/dump.go @@ -0,0 +1,256 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements textual dumping of arbitrary data structures +// for debugging purposes. The code is customized for Node graphs +// and may be used for an alternative view of the node structure. + +package ir + +import ( + "fmt" + "io" + "os" + "reflect" + "regexp" + + "cmd/compile/internal/base" + "cmd/compile/internal/types" + "cmd/internal/src" +) + +// DumpAny is like FDumpAny but prints to stderr. +func DumpAny(root interface{}, filter string, depth int) { + FDumpAny(os.Stderr, root, filter, depth) +} + +// FDumpAny prints the structure of a rooted data structure +// to w by depth-first traversal of the data structure. +// +// The filter parameter is a regular expression. If it is +// non-empty, only struct fields whose names match filter +// are printed. +// +// The depth parameter controls how deep traversal recurses +// before it returns (higher value means greater depth). +// If an empty field filter is given, a good depth default value +// is 4. A negative depth means no depth limit, which may be fine +// for small data structures or if there is a non-empty filter. +// +// In the output, Node structs are identified by their Op name +// rather than their type; struct fields with zero values or +// non-matching field names are omitted, and "…" means recursion +// depth has been reached or struct fields have been omitted. +func FDumpAny(w io.Writer, root interface{}, filter string, depth int) { + if root == nil { + fmt.Fprintln(w, "nil") + return + } + + if filter == "" { + filter = ".*" // default + } + + p := dumper{ + output: w, + fieldrx: regexp.MustCompile(filter), + ptrmap: make(map[uintptr]int), + last: '\n', // force printing of line number on first line + } + + p.dump(reflect.ValueOf(root), depth) + p.printf("\n") +} + +type dumper struct { + output io.Writer + fieldrx *regexp.Regexp // field name filter + ptrmap map[uintptr]int // ptr -> dump line number + lastadr string // last address string printed (for shortening) + + // output + indent int // current indentation level + last byte // last byte processed by Write + line int // current line number +} + +var indentBytes = []byte(". ") + +func (p *dumper) Write(data []byte) (n int, err error) { + var m int + for i, b := range data { + // invariant: data[0:n] has been written + if b == '\n' { + m, err = p.output.Write(data[n : i+1]) + n += m + if err != nil { + return + } + } else if p.last == '\n' { + p.line++ + _, err = fmt.Fprintf(p.output, "%6d ", p.line) + if err != nil { + return + } + for j := p.indent; j > 0; j-- { + _, err = p.output.Write(indentBytes) + if err != nil { + return + } + } + } + p.last = b + } + if len(data) > n { + m, err = p.output.Write(data[n:]) + n += m + } + return +} + +// printf is a convenience wrapper. +func (p *dumper) printf(format string, args ...interface{}) { + if _, err := fmt.Fprintf(p, format, args...); err != nil { + panic(err) + } +} + +// addr returns the (hexadecimal) address string of the object +// represented by x (or "?" if x is not addressable), with the +// common prefix between this and the prior address replaced by +// "0x…" to make it easier to visually match addresses. +func (p *dumper) addr(x reflect.Value) string { + if !x.CanAddr() { + return "?" + } + adr := fmt.Sprintf("%p", x.Addr().Interface()) + s := adr + if i := commonPrefixLen(p.lastadr, adr); i > 0 { + s = "0x…" + adr[i:] + } + p.lastadr = adr + return s +} + +// dump prints the contents of x. +func (p *dumper) dump(x reflect.Value, depth int) { + if depth == 0 { + p.printf("…") + return + } + + if pos, ok := x.Interface().(src.XPos); ok { + p.printf("%s", base.FmtPos(pos)) + return + } + + switch x.Kind() { + case reflect.String: + p.printf("%q", x.Interface()) // print strings in quotes + + case reflect.Interface: + if x.IsNil() { + p.printf("nil") + return + } + p.dump(x.Elem(), depth-1) + + case reflect.Ptr: + if x.IsNil() { + p.printf("nil") + return + } + + p.printf("*") + ptr := x.Pointer() + if line, exists := p.ptrmap[ptr]; exists { + p.printf("(@%d)", line) + return + } + p.ptrmap[ptr] = p.line + p.dump(x.Elem(), depth) // don't count pointer indirection towards depth + + case reflect.Slice: + if x.IsNil() { + p.printf("nil") + return + } + p.printf("%s (%d entries) {", x.Type(), x.Len()) + if x.Len() > 0 { + p.indent++ + p.printf("\n") + for i, n := 0, x.Len(); i < n; i++ { + p.printf("%d: ", i) + p.dump(x.Index(i), depth-1) + p.printf("\n") + } + p.indent-- + } + p.printf("}") + + case reflect.Struct: + typ := x.Type() + + isNode := false + if n, ok := x.Interface().(Node); ok { + isNode = true + p.printf("%s %s {", n.Op().String(), p.addr(x)) + } else { + p.printf("%s {", typ) + } + p.indent++ + + first := true + omitted := false + for i, n := 0, typ.NumField(); i < n; i++ { + // Exclude non-exported fields because their + // values cannot be accessed via reflection. + if name := typ.Field(i).Name; types.IsExported(name) { + if !p.fieldrx.MatchString(name) { + omitted = true + continue // field name not selected by filter + } + + // special cases + if isNode && name == "Op" { + omitted = true + continue // Op field already printed for Nodes + } + x := x.Field(i) + if x.IsZero() { + omitted = true + continue // exclude zero-valued fields + } + if n, ok := x.Interface().(Nodes); ok && len(n) == 0 { + omitted = true + continue // exclude empty Nodes slices + } + + if first { + p.printf("\n") + first = false + } + p.printf("%s: ", name) + p.dump(x, depth-1) + p.printf("\n") + } + } + if omitted { + p.printf("…\n") + } + + p.indent-- + p.printf("}") + + default: + p.printf("%v", x.Interface()) + } +} + +func commonPrefixLen(a, b string) (i int) { + for i < len(a) && i < len(b) && a[i] == b[i] { + i++ + } + return +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/expr.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/expr.go new file mode 100644 index 0000000000000000000000000000000000000000..da5b437f99e6a45333217353e80a177ac717b4fa --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/expr.go @@ -0,0 +1,1256 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import ( + "bytes" + "cmd/compile/internal/base" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/src" + "fmt" + "go/constant" + "go/token" +) + +// An Expr is a Node that can appear as an expression. +type Expr interface { + Node + isExpr() +} + +// A miniExpr is a miniNode with extra fields common to expressions. +// TODO(rsc): Once we are sure about the contents, compact the bools +// into a bit field and leave extra bits available for implementations +// embedding miniExpr. Right now there are ~60 unused bits sitting here. +type miniExpr struct { + miniNode + typ *types.Type + init Nodes // TODO(rsc): Don't require every Node to have an init + flags bitset8 +} + +const ( + miniExprNonNil = 1 << iota + miniExprTransient + miniExprBounded + miniExprImplicit // for use by implementations; not supported by every Expr + miniExprCheckPtr +) + +func (*miniExpr) isExpr() {} + +func (n *miniExpr) Type() *types.Type { return n.typ } +func (n *miniExpr) SetType(x *types.Type) { n.typ = x } +func (n *miniExpr) NonNil() bool { return n.flags&miniExprNonNil != 0 } +func (n *miniExpr) MarkNonNil() { n.flags |= miniExprNonNil } +func (n *miniExpr) Transient() bool { return n.flags&miniExprTransient != 0 } +func (n *miniExpr) SetTransient(b bool) { n.flags.set(miniExprTransient, b) } +func (n *miniExpr) Bounded() bool { return n.flags&miniExprBounded != 0 } +func (n *miniExpr) SetBounded(b bool) { n.flags.set(miniExprBounded, b) } +func (n *miniExpr) Init() Nodes { return n.init } +func (n *miniExpr) PtrInit() *Nodes { return &n.init } +func (n *miniExpr) SetInit(x Nodes) { n.init = x } + +// An AddStringExpr is a string concatenation List[0] + List[1] + ... + List[len(List)-1]. +type AddStringExpr struct { + miniExpr + List Nodes + Prealloc *Name +} + +func NewAddStringExpr(pos src.XPos, list []Node) *AddStringExpr { + n := &AddStringExpr{} + n.pos = pos + n.op = OADDSTR + n.List = list + return n +} + +// An AddrExpr is an address-of expression &X. +// It may end up being a normal address-of or an allocation of a composite literal. +type AddrExpr struct { + miniExpr + X Node + Prealloc *Name // preallocated storage if any +} + +func NewAddrExpr(pos src.XPos, x Node) *AddrExpr { + if x == nil || x.Typecheck() != 1 { + base.FatalfAt(pos, "missed typecheck: %L", x) + } + n := &AddrExpr{X: x} + n.pos = pos + + switch x.Op() { + case OARRAYLIT, OMAPLIT, OSLICELIT, OSTRUCTLIT: + n.op = OPTRLIT + + default: + n.op = OADDR + if r, ok := OuterValue(x).(*Name); ok && r.Op() == ONAME { + r.SetAddrtaken(true) + + // If r is a closure variable, we need to mark its canonical + // variable as addrtaken too, so that closure conversion + // captures it by reference. + // + // Exception: if we've already marked the variable as + // capture-by-value, then that means this variable isn't + // logically modified, and we must be taking its address to pass + // to a runtime function that won't mutate it. In that case, we + // only need to make sure our own copy is addressable. + if r.IsClosureVar() && !r.Byval() { + r.Canonical().SetAddrtaken(true) + } + } + } + + n.SetType(types.NewPtr(x.Type())) + n.SetTypecheck(1) + + return n +} + +func (n *AddrExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 } +func (n *AddrExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) } + +func (n *AddrExpr) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case OADDR, OPTRLIT: + n.op = op + } +} + +// A BasicLit is a literal of basic type. +type BasicLit struct { + miniExpr + val constant.Value +} + +// NewBasicLit returns an OLITERAL representing val with the given type. +func NewBasicLit(pos src.XPos, typ *types.Type, val constant.Value) Node { + AssertValidTypeForConst(typ, val) + + n := &BasicLit{val: val} + n.op = OLITERAL + n.pos = pos + n.SetType(typ) + n.SetTypecheck(1) + return n +} + +func (n *BasicLit) Val() constant.Value { return n.val } +func (n *BasicLit) SetVal(val constant.Value) { n.val = val } + +// NewConstExpr returns an OLITERAL representing val, copying the +// position and type from orig. +func NewConstExpr(val constant.Value, orig Node) Node { + return NewBasicLit(orig.Pos(), orig.Type(), val) +} + +// A BinaryExpr is a binary expression X Op Y, +// or Op(X, Y) for builtin functions that do not become calls. +type BinaryExpr struct { + miniExpr + X Node + Y Node + RType Node `mknode:"-"` // see reflectdata/helpers.go +} + +func NewBinaryExpr(pos src.XPos, op Op, x, y Node) *BinaryExpr { + n := &BinaryExpr{X: x, Y: y} + n.pos = pos + n.SetOp(op) + return n +} + +func (n *BinaryExpr) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case OADD, OADDSTR, OAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE, + OLSH, OLT, OMOD, OMUL, ONE, OOR, ORSH, OSUB, OXOR, + OCOPY, OCOMPLEX, OUNSAFEADD, OUNSAFESLICE, OUNSAFESTRING, + OMAKEFACE: + n.op = op + } +} + +// A CallExpr is a function call Fun(Args). +type CallExpr struct { + miniExpr + Fun Node + Args Nodes + DeferAt Node + RType Node `mknode:"-"` // see reflectdata/helpers.go + KeepAlive []*Name // vars to be kept alive until call returns + IsDDD bool + GoDefer bool // whether this call is part of a go or defer statement + NoInline bool // whether this call must not be inlined +} + +func NewCallExpr(pos src.XPos, op Op, fun Node, args []Node) *CallExpr { + n := &CallExpr{Fun: fun} + n.pos = pos + n.SetOp(op) + n.Args = args + return n +} + +func (*CallExpr) isStmt() {} + +func (n *CallExpr) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case OAPPEND, + OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, + ODELETE, + OGETG, OGETCALLERPC, OGETCALLERSP, + OMAKE, OMAX, OMIN, OPRINT, OPRINTLN, + ORECOVER, ORECOVERFP: + n.op = op + } +} + +// A ClosureExpr is a function literal expression. +type ClosureExpr struct { + miniExpr + Func *Func `mknode:"-"` + Prealloc *Name + IsGoWrap bool // whether this is wrapper closure of a go statement +} + +// A CompLitExpr is a composite literal Type{Vals}. +// Before type-checking, the type is Ntype. +type CompLitExpr struct { + miniExpr + List Nodes // initialized values + RType Node `mknode:"-"` // *runtime._type for OMAPLIT map types + Prealloc *Name + // For OSLICELIT, Len is the backing array length. + // For OMAPLIT, Len is the number of entries that we've removed from List and + // generated explicit mapassign calls for. This is used to inform the map alloc hint. + Len int64 +} + +func NewCompLitExpr(pos src.XPos, op Op, typ *types.Type, list []Node) *CompLitExpr { + n := &CompLitExpr{List: list} + n.pos = pos + n.SetOp(op) + if typ != nil { + n.SetType(typ) + } + return n +} + +func (n *CompLitExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 } +func (n *CompLitExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) } + +func (n *CompLitExpr) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case OARRAYLIT, OCOMPLIT, OMAPLIT, OSTRUCTLIT, OSLICELIT: + n.op = op + } +} + +// A ConvExpr is a conversion Type(X). +// It may end up being a value or a type. +type ConvExpr struct { + miniExpr + X Node + + // For implementing OCONVIFACE expressions. + // + // TypeWord is an expression yielding a *runtime._type or + // *runtime.itab value to go in the type word of the iface/eface + // result. See reflectdata.ConvIfaceTypeWord for further details. + // + // SrcRType is an expression yielding a *runtime._type value for X, + // if it's not pointer-shaped and needs to be heap allocated. + TypeWord Node `mknode:"-"` + SrcRType Node `mknode:"-"` + + // For -d=checkptr instrumentation of conversions from + // unsafe.Pointer to *Elem or *[Len]Elem. + // + // TODO(mdempsky): We only ever need one of these, but currently we + // don't decide which one until walk. Longer term, it probably makes + // sense to have a dedicated IR op for `(*[Len]Elem)(ptr)[:n:m]` + // expressions. + ElemRType Node `mknode:"-"` + ElemElemRType Node `mknode:"-"` +} + +func NewConvExpr(pos src.XPos, op Op, typ *types.Type, x Node) *ConvExpr { + n := &ConvExpr{X: x} + n.pos = pos + n.typ = typ + n.SetOp(op) + return n +} + +func (n *ConvExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 } +func (n *ConvExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) } +func (n *ConvExpr) CheckPtr() bool { return n.flags&miniExprCheckPtr != 0 } +func (n *ConvExpr) SetCheckPtr(b bool) { n.flags.set(miniExprCheckPtr, b) } + +func (n *ConvExpr) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case OCONV, OCONVIFACE, OCONVNOP, OBYTES2STR, OBYTES2STRTMP, ORUNES2STR, OSTR2BYTES, OSTR2BYTESTMP, OSTR2RUNES, ORUNESTR, OSLICE2ARR, OSLICE2ARRPTR: + n.op = op + } +} + +// An IndexExpr is an index expression X[Index]. +type IndexExpr struct { + miniExpr + X Node + Index Node + RType Node `mknode:"-"` // see reflectdata/helpers.go + Assigned bool +} + +func NewIndexExpr(pos src.XPos, x, index Node) *IndexExpr { + n := &IndexExpr{X: x, Index: index} + n.pos = pos + n.op = OINDEX + return n +} + +func (n *IndexExpr) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case OINDEX, OINDEXMAP: + n.op = op + } +} + +// A KeyExpr is a Key: Value composite literal key. +type KeyExpr struct { + miniExpr + Key Node + Value Node +} + +func NewKeyExpr(pos src.XPos, key, value Node) *KeyExpr { + n := &KeyExpr{Key: key, Value: value} + n.pos = pos + n.op = OKEY + return n +} + +// A StructKeyExpr is a Field: Value composite literal key. +type StructKeyExpr struct { + miniExpr + Field *types.Field + Value Node +} + +func NewStructKeyExpr(pos src.XPos, field *types.Field, value Node) *StructKeyExpr { + n := &StructKeyExpr{Field: field, Value: value} + n.pos = pos + n.op = OSTRUCTKEY + return n +} + +func (n *StructKeyExpr) Sym() *types.Sym { return n.Field.Sym } + +// An InlinedCallExpr is an inlined function call. +type InlinedCallExpr struct { + miniExpr + Body Nodes + ReturnVars Nodes // must be side-effect free +} + +func NewInlinedCallExpr(pos src.XPos, body, retvars []Node) *InlinedCallExpr { + n := &InlinedCallExpr{} + n.pos = pos + n.op = OINLCALL + n.Body = body + n.ReturnVars = retvars + return n +} + +func (n *InlinedCallExpr) SingleResult() Node { + if have := len(n.ReturnVars); have != 1 { + base.FatalfAt(n.Pos(), "inlined call has %v results, expected 1", have) + } + if !n.Type().HasShape() && n.ReturnVars[0].Type().HasShape() { + // If the type of the call is not a shape, but the type of the return value + // is a shape, we need to do an implicit conversion, so the real type + // of n is maintained. + r := NewConvExpr(n.Pos(), OCONVNOP, n.Type(), n.ReturnVars[0]) + r.SetTypecheck(1) + return r + } + return n.ReturnVars[0] +} + +// A LogicalExpr is an expression X Op Y where Op is && or ||. +// It is separate from BinaryExpr to make room for statements +// that must be executed before Y but after X. +type LogicalExpr struct { + miniExpr + X Node + Y Node +} + +func NewLogicalExpr(pos src.XPos, op Op, x, y Node) *LogicalExpr { + n := &LogicalExpr{X: x, Y: y} + n.pos = pos + n.SetOp(op) + return n +} + +func (n *LogicalExpr) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case OANDAND, OOROR: + n.op = op + } +} + +// A MakeExpr is a make expression: make(Type[, Len[, Cap]]). +// Op is OMAKECHAN, OMAKEMAP, OMAKESLICE, or OMAKESLICECOPY, +// but *not* OMAKE (that's a pre-typechecking CallExpr). +type MakeExpr struct { + miniExpr + RType Node `mknode:"-"` // see reflectdata/helpers.go + Len Node + Cap Node +} + +func NewMakeExpr(pos src.XPos, op Op, len, cap Node) *MakeExpr { + n := &MakeExpr{Len: len, Cap: cap} + n.pos = pos + n.SetOp(op) + return n +} + +func (n *MakeExpr) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case OMAKECHAN, OMAKEMAP, OMAKESLICE, OMAKESLICECOPY: + n.op = op + } +} + +// A NilExpr represents the predefined untyped constant nil. +type NilExpr struct { + miniExpr +} + +func NewNilExpr(pos src.XPos, typ *types.Type) *NilExpr { + if typ == nil { + base.FatalfAt(pos, "missing type") + } + n := &NilExpr{} + n.pos = pos + n.op = ONIL + n.SetType(typ) + n.SetTypecheck(1) + return n +} + +// A ParenExpr is a parenthesized expression (X). +// It may end up being a value or a type. +type ParenExpr struct { + miniExpr + X Node +} + +func NewParenExpr(pos src.XPos, x Node) *ParenExpr { + n := &ParenExpr{X: x} + n.op = OPAREN + n.pos = pos + return n +} + +func (n *ParenExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 } +func (n *ParenExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) } + +// A ResultExpr represents a direct access to a result. +type ResultExpr struct { + miniExpr + Index int64 // index of the result expr. +} + +func NewResultExpr(pos src.XPos, typ *types.Type, index int64) *ResultExpr { + n := &ResultExpr{Index: index} + n.pos = pos + n.op = ORESULT + n.typ = typ + return n +} + +// A LinksymOffsetExpr refers to an offset within a global variable. +// It is like a SelectorExpr but without the field name. +type LinksymOffsetExpr struct { + miniExpr + Linksym *obj.LSym + Offset_ int64 +} + +func NewLinksymOffsetExpr(pos src.XPos, lsym *obj.LSym, offset int64, typ *types.Type) *LinksymOffsetExpr { + if typ == nil { + base.FatalfAt(pos, "nil type") + } + n := &LinksymOffsetExpr{Linksym: lsym, Offset_: offset} + n.typ = typ + n.op = OLINKSYMOFFSET + n.SetTypecheck(1) + return n +} + +// NewLinksymExpr is NewLinksymOffsetExpr, but with offset fixed at 0. +func NewLinksymExpr(pos src.XPos, lsym *obj.LSym, typ *types.Type) *LinksymOffsetExpr { + return NewLinksymOffsetExpr(pos, lsym, 0, typ) +} + +// NewNameOffsetExpr is NewLinksymOffsetExpr, but taking a *Name +// representing a global variable instead of an *obj.LSym directly. +func NewNameOffsetExpr(pos src.XPos, name *Name, offset int64, typ *types.Type) *LinksymOffsetExpr { + if name == nil || IsBlank(name) || !(name.Op() == ONAME && name.Class == PEXTERN) { + base.FatalfAt(pos, "cannot take offset of nil, blank name or non-global variable: %v", name) + } + return NewLinksymOffsetExpr(pos, name.Linksym(), offset, typ) +} + +// A SelectorExpr is a selector expression X.Sel. +type SelectorExpr struct { + miniExpr + X Node + // Sel is the name of the field or method being selected, without (in the + // case of methods) any preceding type specifier. If the field/method is + // exported, than the Sym uses the local package regardless of the package + // of the containing type. + Sel *types.Sym + // The actual selected field - may not be filled in until typechecking. + Selection *types.Field + Prealloc *Name // preallocated storage for OMETHVALUE, if any +} + +func NewSelectorExpr(pos src.XPos, op Op, x Node, sel *types.Sym) *SelectorExpr { + n := &SelectorExpr{X: x, Sel: sel} + n.pos = pos + n.SetOp(op) + return n +} + +func (n *SelectorExpr) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case OXDOT, ODOT, ODOTPTR, ODOTMETH, ODOTINTER, OMETHVALUE, OMETHEXPR: + n.op = op + } +} + +func (n *SelectorExpr) Sym() *types.Sym { return n.Sel } +func (n *SelectorExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 } +func (n *SelectorExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) } +func (n *SelectorExpr) Offset() int64 { return n.Selection.Offset } + +func (n *SelectorExpr) FuncName() *Name { + if n.Op() != OMETHEXPR { + panic(n.no("FuncName")) + } + fn := NewNameAt(n.Selection.Pos, MethodSym(n.X.Type(), n.Sel), n.Type()) + fn.Class = PFUNC + if n.Selection.Nname != nil { + // TODO(austin): Nname is nil for interface method + // expressions (I.M), so we can't attach a Func to + // those here. + fn.Func = n.Selection.Nname.(*Name).Func + } + return fn +} + +// A SliceExpr is a slice expression X[Low:High] or X[Low:High:Max]. +type SliceExpr struct { + miniExpr + X Node + Low Node + High Node + Max Node +} + +func NewSliceExpr(pos src.XPos, op Op, x, low, high, max Node) *SliceExpr { + n := &SliceExpr{X: x, Low: low, High: high, Max: max} + n.pos = pos + n.op = op + return n +} + +func (n *SliceExpr) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR: + n.op = op + } +} + +// IsSlice3 reports whether o is a slice3 op (OSLICE3, OSLICE3ARR). +// o must be a slicing op. +func (o Op) IsSlice3() bool { + switch o { + case OSLICE, OSLICEARR, OSLICESTR: + return false + case OSLICE3, OSLICE3ARR: + return true + } + base.Fatalf("IsSlice3 op %v", o) + return false +} + +// A SliceHeader expression constructs a slice header from its parts. +type SliceHeaderExpr struct { + miniExpr + Ptr Node + Len Node + Cap Node +} + +func NewSliceHeaderExpr(pos src.XPos, typ *types.Type, ptr, len, cap Node) *SliceHeaderExpr { + n := &SliceHeaderExpr{Ptr: ptr, Len: len, Cap: cap} + n.pos = pos + n.op = OSLICEHEADER + n.typ = typ + return n +} + +// A StringHeaderExpr expression constructs a string header from its parts. +type StringHeaderExpr struct { + miniExpr + Ptr Node + Len Node +} + +func NewStringHeaderExpr(pos src.XPos, ptr, len Node) *StringHeaderExpr { + n := &StringHeaderExpr{Ptr: ptr, Len: len} + n.pos = pos + n.op = OSTRINGHEADER + n.typ = types.Types[types.TSTRING] + return n +} + +// A StarExpr is a dereference expression *X. +// It may end up being a value or a type. +type StarExpr struct { + miniExpr + X Node +} + +func NewStarExpr(pos src.XPos, x Node) *StarExpr { + n := &StarExpr{X: x} + n.op = ODEREF + n.pos = pos + return n +} + +func (n *StarExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 } +func (n *StarExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) } + +// A TypeAssertionExpr is a selector expression X.(Type). +// Before type-checking, the type is Ntype. +type TypeAssertExpr struct { + miniExpr + X Node + + // Runtime type information provided by walkDotType for + // assertions from non-empty interface to concrete type. + ITab Node `mknode:"-"` // *runtime.itab for Type implementing X's type + + // An internal/abi.TypeAssert descriptor to pass to the runtime. + Descriptor *obj.LSym +} + +func NewTypeAssertExpr(pos src.XPos, x Node, typ *types.Type) *TypeAssertExpr { + n := &TypeAssertExpr{X: x} + n.pos = pos + n.op = ODOTTYPE + if typ != nil { + n.SetType(typ) + } + return n +} + +func (n *TypeAssertExpr) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case ODOTTYPE, ODOTTYPE2: + n.op = op + } +} + +// A DynamicTypeAssertExpr asserts that X is of dynamic type RType. +type DynamicTypeAssertExpr struct { + miniExpr + X Node + + // SrcRType is an expression that yields a *runtime._type value + // representing X's type. It's used in failed assertion panic + // messages. + SrcRType Node + + // RType is an expression that yields a *runtime._type value + // representing the asserted type. + // + // BUG(mdempsky): If ITab is non-nil, RType may be nil. + RType Node + + // ITab is an expression that yields a *runtime.itab value + // representing the asserted type within the assertee expression's + // original interface type. + // + // ITab is only used for assertions from non-empty interface type to + // a concrete (i.e., non-interface) type. For all other assertions, + // ITab is nil. + ITab Node +} + +func NewDynamicTypeAssertExpr(pos src.XPos, op Op, x, rtype Node) *DynamicTypeAssertExpr { + n := &DynamicTypeAssertExpr{X: x, RType: rtype} + n.pos = pos + n.op = op + return n +} + +func (n *DynamicTypeAssertExpr) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case ODYNAMICDOTTYPE, ODYNAMICDOTTYPE2: + n.op = op + } +} + +// A UnaryExpr is a unary expression Op X, +// or Op(X) for a builtin function that does not end up being a call. +type UnaryExpr struct { + miniExpr + X Node +} + +func NewUnaryExpr(pos src.XPos, op Op, x Node) *UnaryExpr { + n := &UnaryExpr{X: x} + n.pos = pos + n.SetOp(op) + return n +} + +func (n *UnaryExpr) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case OBITNOT, ONEG, ONOT, OPLUS, ORECV, + OCAP, OCLEAR, OCLOSE, OIMAG, OLEN, ONEW, OPANIC, OREAL, + OCHECKNIL, OCFUNC, OIDATA, OITAB, OSPTR, + OUNSAFESTRINGDATA, OUNSAFESLICEDATA: + n.op = op + } +} + +func IsZero(n Node) bool { + switch n.Op() { + case ONIL: + return true + + case OLITERAL: + switch u := n.Val(); u.Kind() { + case constant.String: + return constant.StringVal(u) == "" + case constant.Bool: + return !constant.BoolVal(u) + default: + return constant.Sign(u) == 0 + } + + case OARRAYLIT: + n := n.(*CompLitExpr) + for _, n1 := range n.List { + if n1.Op() == OKEY { + n1 = n1.(*KeyExpr).Value + } + if !IsZero(n1) { + return false + } + } + return true + + case OSTRUCTLIT: + n := n.(*CompLitExpr) + for _, n1 := range n.List { + n1 := n1.(*StructKeyExpr) + if !IsZero(n1.Value) { + return false + } + } + return true + } + + return false +} + +// lvalue etc +func IsAddressable(n Node) bool { + switch n.Op() { + case OINDEX: + n := n.(*IndexExpr) + if n.X.Type() != nil && n.X.Type().IsArray() { + return IsAddressable(n.X) + } + if n.X.Type() != nil && n.X.Type().IsString() { + return false + } + fallthrough + case ODEREF, ODOTPTR: + return true + + case ODOT: + n := n.(*SelectorExpr) + return IsAddressable(n.X) + + case ONAME: + n := n.(*Name) + if n.Class == PFUNC { + return false + } + return true + + case OLINKSYMOFFSET: + return true + } + + return false +} + +// StaticValue analyzes n to find the earliest expression that always +// evaluates to the same value as n, which might be from an enclosing +// function. +// +// For example, given: +// +// var x int = g() +// func() { +// y := x +// *p = int(y) +// } +// +// calling StaticValue on the "int(y)" expression returns the outer +// "g()" expression. +func StaticValue(n Node) Node { + for { + if n.Op() == OCONVNOP { + n = n.(*ConvExpr).X + continue + } + + if n.Op() == OINLCALL { + n = n.(*InlinedCallExpr).SingleResult() + continue + } + + n1 := staticValue1(n) + if n1 == nil { + return n + } + n = n1 + } +} + +func staticValue1(nn Node) Node { + if nn.Op() != ONAME { + return nil + } + n := nn.(*Name).Canonical() + if n.Class != PAUTO { + return nil + } + + defn := n.Defn + if defn == nil { + return nil + } + + var rhs Node +FindRHS: + switch defn.Op() { + case OAS: + defn := defn.(*AssignStmt) + rhs = defn.Y + case OAS2: + defn := defn.(*AssignListStmt) + for i, lhs := range defn.Lhs { + if lhs == n { + rhs = defn.Rhs[i] + break FindRHS + } + } + base.Fatalf("%v missing from LHS of %v", n, defn) + default: + return nil + } + if rhs == nil { + base.Fatalf("RHS is nil: %v", defn) + } + + if Reassigned(n) { + return nil + } + + return rhs +} + +// Reassigned takes an ONAME node, walks the function in which it is +// defined, and returns a boolean indicating whether the name has any +// assignments other than its declaration. +// NB: global variables are always considered to be re-assigned. +// TODO: handle initial declaration not including an assignment and +// followed by a single assignment? +// NOTE: any changes made here should also be made in the corresponding +// code in the ReassignOracle.Init method. +func Reassigned(name *Name) bool { + if name.Op() != ONAME { + base.Fatalf("reassigned %v", name) + } + // no way to reliably check for no-reassignment of globals, assume it can be + if name.Curfn == nil { + return true + } + + if name.Addrtaken() { + return true // conservatively assume it's reassigned indirectly + } + + // TODO(mdempsky): This is inefficient and becoming increasingly + // unwieldy. Figure out a way to generalize escape analysis's + // reassignment detection for use by inlining and devirtualization. + + // isName reports whether n is a reference to name. + isName := func(x Node) bool { + if x == nil { + return false + } + n, ok := OuterValue(x).(*Name) + return ok && n.Canonical() == name + } + + var do func(n Node) bool + do = func(n Node) bool { + switch n.Op() { + case OAS: + n := n.(*AssignStmt) + if isName(n.X) && n != name.Defn { + return true + } + case OAS2, OAS2FUNC, OAS2MAPR, OAS2DOTTYPE, OAS2RECV, OSELRECV2: + n := n.(*AssignListStmt) + for _, p := range n.Lhs { + if isName(p) && n != name.Defn { + return true + } + } + case OASOP: + n := n.(*AssignOpStmt) + if isName(n.X) { + return true + } + case OADDR: + n := n.(*AddrExpr) + if isName(n.X) { + base.FatalfAt(n.Pos(), "%v not marked addrtaken", name) + } + case ORANGE: + n := n.(*RangeStmt) + if isName(n.Key) || isName(n.Value) { + return true + } + case OCLOSURE: + n := n.(*ClosureExpr) + if Any(n.Func, do) { + return true + } + } + return false + } + return Any(name.Curfn, do) +} + +// StaticCalleeName returns the ONAME/PFUNC for n, if known. +func StaticCalleeName(n Node) *Name { + switch n.Op() { + case OMETHEXPR: + n := n.(*SelectorExpr) + return MethodExprName(n) + case ONAME: + n := n.(*Name) + if n.Class == PFUNC { + return n + } + case OCLOSURE: + return n.(*ClosureExpr).Func.Nname + } + return nil +} + +// IsIntrinsicCall reports whether the compiler back end will treat the call as an intrinsic operation. +var IsIntrinsicCall = func(*CallExpr) bool { return false } + +// SameSafeExpr checks whether it is safe to reuse one of l and r +// instead of computing both. SameSafeExpr assumes that l and r are +// used in the same statement or expression. In order for it to be +// safe to reuse l or r, they must: +// - be the same expression +// - not have side-effects (no function calls, no channel ops); +// however, panics are ok +// - not cause inappropriate aliasing; e.g. two string to []byte +// conversions, must result in two distinct slices +// +// The handling of OINDEXMAP is subtle. OINDEXMAP can occur both +// as an lvalue (map assignment) and an rvalue (map access). This is +// currently OK, since the only place SameSafeExpr gets used on an +// lvalue expression is for OSLICE and OAPPEND optimizations, and it +// is correct in those settings. +func SameSafeExpr(l Node, r Node) bool { + for l.Op() == OCONVNOP { + l = l.(*ConvExpr).X + } + for r.Op() == OCONVNOP { + r = r.(*ConvExpr).X + } + if l.Op() != r.Op() || !types.Identical(l.Type(), r.Type()) { + return false + } + + switch l.Op() { + case ONAME: + return l == r + + case ODOT, ODOTPTR: + l := l.(*SelectorExpr) + r := r.(*SelectorExpr) + return l.Sel != nil && r.Sel != nil && l.Sel == r.Sel && SameSafeExpr(l.X, r.X) + + case ODEREF: + l := l.(*StarExpr) + r := r.(*StarExpr) + return SameSafeExpr(l.X, r.X) + + case ONOT, OBITNOT, OPLUS, ONEG: + l := l.(*UnaryExpr) + r := r.(*UnaryExpr) + return SameSafeExpr(l.X, r.X) + + case OCONV: + l := l.(*ConvExpr) + r := r.(*ConvExpr) + // Some conversions can't be reused, such as []byte(str). + // Allow only numeric-ish types. This is a bit conservative. + return types.IsSimple[l.Type().Kind()] && SameSafeExpr(l.X, r.X) + + case OINDEX, OINDEXMAP: + l := l.(*IndexExpr) + r := r.(*IndexExpr) + return SameSafeExpr(l.X, r.X) && SameSafeExpr(l.Index, r.Index) + + case OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD: + l := l.(*BinaryExpr) + r := r.(*BinaryExpr) + return SameSafeExpr(l.X, r.X) && SameSafeExpr(l.Y, r.Y) + + case OLITERAL: + return constant.Compare(l.Val(), token.EQL, r.Val()) + + case ONIL: + return true + } + + return false +} + +// ShouldCheckPtr reports whether pointer checking should be enabled for +// function fn at a given level. See debugHelpFooter for defined +// levels. +func ShouldCheckPtr(fn *Func, level int) bool { + return base.Debug.Checkptr >= level && fn.Pragma&NoCheckPtr == 0 +} + +// ShouldAsanCheckPtr reports whether pointer checking should be enabled for +// function fn when -asan is enabled. +func ShouldAsanCheckPtr(fn *Func) bool { + return base.Flag.ASan && fn.Pragma&NoCheckPtr == 0 +} + +// IsReflectHeaderDataField reports whether l is an expression p.Data +// where p has type reflect.SliceHeader or reflect.StringHeader. +func IsReflectHeaderDataField(l Node) bool { + if l.Type() != types.Types[types.TUINTPTR] { + return false + } + + var tsym *types.Sym + switch l.Op() { + case ODOT: + l := l.(*SelectorExpr) + tsym = l.X.Type().Sym() + case ODOTPTR: + l := l.(*SelectorExpr) + tsym = l.X.Type().Elem().Sym() + default: + return false + } + + if tsym == nil || l.Sym().Name != "Data" || tsym.Pkg.Path != "reflect" { + return false + } + return tsym.Name == "SliceHeader" || tsym.Name == "StringHeader" +} + +func ParamNames(ft *types.Type) []Node { + args := make([]Node, ft.NumParams()) + for i, f := range ft.Params() { + args[i] = f.Nname.(*Name) + } + return args +} + +// MethodSym returns the method symbol representing a method name +// associated with a specific receiver type. +// +// Method symbols can be used to distinguish the same method appearing +// in different method sets. For example, T.M and (*T).M have distinct +// method symbols. +// +// The returned symbol will be marked as a function. +func MethodSym(recv *types.Type, msym *types.Sym) *types.Sym { + sym := MethodSymSuffix(recv, msym, "") + sym.SetFunc(true) + return sym +} + +// MethodSymSuffix is like MethodSym, but allows attaching a +// distinguisher suffix. To avoid collisions, the suffix must not +// start with a letter, number, or period. +func MethodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sym { + if msym.IsBlank() { + base.Fatalf("blank method name") + } + + rsym := recv.Sym() + if recv.IsPtr() { + if rsym != nil { + base.Fatalf("declared pointer receiver type: %v", recv) + } + rsym = recv.Elem().Sym() + } + + // Find the package the receiver type appeared in. For + // anonymous receiver types (i.e., anonymous structs with + // embedded fields), use the "go" pseudo-package instead. + rpkg := Pkgs.Go + if rsym != nil { + rpkg = rsym.Pkg + } + + var b bytes.Buffer + if recv.IsPtr() { + // The parentheses aren't really necessary, but + // they're pretty traditional at this point. + fmt.Fprintf(&b, "(%-S)", recv) + } else { + fmt.Fprintf(&b, "%-S", recv) + } + + // A particular receiver type may have multiple non-exported + // methods with the same name. To disambiguate them, include a + // package qualifier for names that came from a different + // package than the receiver type. + if !types.IsExported(msym.Name) && msym.Pkg != rpkg { + b.WriteString(".") + b.WriteString(msym.Pkg.Prefix) + } + + b.WriteString(".") + b.WriteString(msym.Name) + b.WriteString(suffix) + return rpkg.LookupBytes(b.Bytes()) +} + +// LookupMethodSelector returns the types.Sym of the selector for a method +// named in local symbol name, as well as the types.Sym of the receiver. +// +// TODO(prattmic): this does not attempt to handle method suffixes (wrappers). +func LookupMethodSelector(pkg *types.Pkg, name string) (typ, meth *types.Sym, err error) { + typeName, methName := splitType(name) + if typeName == "" { + return nil, nil, fmt.Errorf("%s doesn't contain type split", name) + } + + if len(typeName) > 3 && typeName[:2] == "(*" && typeName[len(typeName)-1] == ')' { + // Symbol name is for a pointer receiver method. We just want + // the base type name. + typeName = typeName[2 : len(typeName)-1] + } + + typ = pkg.Lookup(typeName) + meth = pkg.Selector(methName) + return typ, meth, nil +} + +// splitType splits a local symbol name into type and method (fn). If this a +// free function, typ == "". +// +// N.B. closures and methods can be ambiguous (e.g., bar.func1). These cases +// are returned as methods. +func splitType(name string) (typ, fn string) { + // Types are split on the first dot, ignoring everything inside + // brackets (instantiation of type parameter, usually including + // "go.shape"). + bracket := 0 + for i, r := range name { + if r == '.' && bracket == 0 { + return name[:i], name[i+1:] + } + if r == '[' { + bracket++ + } + if r == ']' { + bracket-- + } + } + return "", name +} + +// MethodExprName returns the ONAME representing the method +// referenced by expression n, which must be a method selector, +// method expression, or method value. +func MethodExprName(n Node) *Name { + name, _ := MethodExprFunc(n).Nname.(*Name) + return name +} + +// MethodExprFunc is like MethodExprName, but returns the types.Field instead. +func MethodExprFunc(n Node) *types.Field { + switch n.Op() { + case ODOTMETH, OMETHEXPR, OMETHVALUE: + return n.(*SelectorExpr).Selection + } + base.Fatalf("unexpected node: %v (%v)", n, n.Op()) + panic("unreachable") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/fmt.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/fmt.go new file mode 100644 index 0000000000000000000000000000000000000000..31c610348bcd80bbff89ced62734c3c213accd8f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/fmt.go @@ -0,0 +1,1208 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import ( + "bytes" + "fmt" + "go/constant" + "io" + "os" + "path/filepath" + "reflect" + "strings" + + "unicode/utf8" + + "cmd/compile/internal/base" + "cmd/compile/internal/types" + "cmd/internal/src" +) + +// Op + +var OpNames = []string{ + OADDR: "&", + OADD: "+", + OADDSTR: "+", + OANDAND: "&&", + OANDNOT: "&^", + OAND: "&", + OAPPEND: "append", + OAS: "=", + OAS2: "=", + OBREAK: "break", + OCALL: "function call", // not actual syntax + OCAP: "cap", + OCASE: "case", + OCLEAR: "clear", + OCLOSE: "close", + OCOMPLEX: "complex", + OBITNOT: "^", + OCONTINUE: "continue", + OCOPY: "copy", + ODELETE: "delete", + ODEFER: "defer", + ODIV: "/", + OEQ: "==", + OFALL: "fallthrough", + OFOR: "for", + OGE: ">=", + OGOTO: "goto", + OGT: ">", + OIF: "if", + OIMAG: "imag", + OINLMARK: "inlmark", + ODEREF: "*", + OLEN: "len", + OLE: "<=", + OLSH: "<<", + OLT: "<", + OMAKE: "make", + ONEG: "-", + OMAX: "max", + OMIN: "min", + OMOD: "%", + OMUL: "*", + ONEW: "new", + ONE: "!=", + ONOT: "!", + OOROR: "||", + OOR: "|", + OPANIC: "panic", + OPLUS: "+", + OPRINTLN: "println", + OPRINT: "print", + ORANGE: "range", + OREAL: "real", + ORECV: "<-", + ORECOVER: "recover", + ORETURN: "return", + ORSH: ">>", + OSELECT: "select", + OSEND: "<-", + OSUB: "-", + OSWITCH: "switch", + OUNSAFEADD: "unsafe.Add", + OUNSAFESLICE: "unsafe.Slice", + OUNSAFESLICEDATA: "unsafe.SliceData", + OUNSAFESTRING: "unsafe.String", + OUNSAFESTRINGDATA: "unsafe.StringData", + OXOR: "^", +} + +// GoString returns the Go syntax for the Op, or else its name. +func (o Op) GoString() string { + if int(o) < len(OpNames) && OpNames[o] != "" { + return OpNames[o] + } + return o.String() +} + +// Format implements formatting for an Op. +// The valid formats are: +// +// %v Go syntax ("+", "<-", "print") +// %+v Debug syntax ("ADD", "RECV", "PRINT") +func (o Op) Format(s fmt.State, verb rune) { + switch verb { + default: + fmt.Fprintf(s, "%%!%c(Op=%d)", verb, int(o)) + case 'v': + if s.Flag('+') { + // %+v is OMUL instead of "*" + io.WriteString(s, o.String()) + return + } + io.WriteString(s, o.GoString()) + } +} + +// Node + +// fmtNode implements formatting for a Node n. +// Every Node implementation must define a Format method that calls fmtNode. +// The valid formats are: +// +// %v Go syntax +// %L Go syntax followed by " (type T)" if type is known. +// %+v Debug syntax, as in Dump. +func fmtNode(n Node, s fmt.State, verb rune) { + // %+v prints Dump. + // Otherwise we print Go syntax. + if s.Flag('+') && verb == 'v' { + dumpNode(s, n, 1) + return + } + + if verb != 'v' && verb != 'S' && verb != 'L' { + fmt.Fprintf(s, "%%!%c(*Node=%p)", verb, n) + return + } + + if n == nil { + fmt.Fprint(s, "") + return + } + + t := n.Type() + if verb == 'L' && t != nil { + if t.Kind() == types.TNIL { + fmt.Fprint(s, "nil") + } else if n.Op() == ONAME && n.Name().AutoTemp() { + fmt.Fprintf(s, "%v value", t) + } else { + fmt.Fprintf(s, "%v (type %v)", n, t) + } + return + } + + // TODO inlining produces expressions with ninits. we can't print these yet. + + if OpPrec[n.Op()] < 0 { + stmtFmt(n, s) + return + } + + exprFmt(n, s, 0) +} + +var OpPrec = []int{ + OAPPEND: 8, + OBYTES2STR: 8, + OARRAYLIT: 8, + OSLICELIT: 8, + ORUNES2STR: 8, + OCALLFUNC: 8, + OCALLINTER: 8, + OCALLMETH: 8, + OCALL: 8, + OCAP: 8, + OCLEAR: 8, + OCLOSE: 8, + OCOMPLIT: 8, + OCONVIFACE: 8, + OCONVNOP: 8, + OCONV: 8, + OCOPY: 8, + ODELETE: 8, + OGETG: 8, + OLEN: 8, + OLITERAL: 8, + OMAKESLICE: 8, + OMAKESLICECOPY: 8, + OMAKE: 8, + OMAPLIT: 8, + OMAX: 8, + OMIN: 8, + ONAME: 8, + ONEW: 8, + ONIL: 8, + ONONAME: 8, + OPANIC: 8, + OPAREN: 8, + OPRINTLN: 8, + OPRINT: 8, + ORUNESTR: 8, + OSLICE2ARR: 8, + OSLICE2ARRPTR: 8, + OSTR2BYTES: 8, + OSTR2RUNES: 8, + OSTRUCTLIT: 8, + OTYPE: 8, + OUNSAFEADD: 8, + OUNSAFESLICE: 8, + OUNSAFESLICEDATA: 8, + OUNSAFESTRING: 8, + OUNSAFESTRINGDATA: 8, + OINDEXMAP: 8, + OINDEX: 8, + OSLICE: 8, + OSLICESTR: 8, + OSLICEARR: 8, + OSLICE3: 8, + OSLICE3ARR: 8, + OSLICEHEADER: 8, + OSTRINGHEADER: 8, + ODOTINTER: 8, + ODOTMETH: 8, + ODOTPTR: 8, + ODOTTYPE2: 8, + ODOTTYPE: 8, + ODOT: 8, + OXDOT: 8, + OMETHVALUE: 8, + OMETHEXPR: 8, + OPLUS: 7, + ONOT: 7, + OBITNOT: 7, + ONEG: 7, + OADDR: 7, + ODEREF: 7, + ORECV: 7, + OMUL: 6, + ODIV: 6, + OMOD: 6, + OLSH: 6, + ORSH: 6, + OAND: 6, + OANDNOT: 6, + OADD: 5, + OSUB: 5, + OOR: 5, + OXOR: 5, + OEQ: 4, + OLT: 4, + OLE: 4, + OGE: 4, + OGT: 4, + ONE: 4, + OSEND: 3, + OANDAND: 2, + OOROR: 1, + + // Statements handled by stmtfmt + OAS: -1, + OAS2: -1, + OAS2DOTTYPE: -1, + OAS2FUNC: -1, + OAS2MAPR: -1, + OAS2RECV: -1, + OASOP: -1, + OBLOCK: -1, + OBREAK: -1, + OCASE: -1, + OCONTINUE: -1, + ODCL: -1, + ODEFER: -1, + OFALL: -1, + OFOR: -1, + OGOTO: -1, + OIF: -1, + OLABEL: -1, + OGO: -1, + ORANGE: -1, + ORETURN: -1, + OSELECT: -1, + OSWITCH: -1, + + OEND: 0, +} + +// StmtWithInit reports whether op is a statement with an explicit init list. +func StmtWithInit(op Op) bool { + switch op { + case OIF, OFOR, OSWITCH: + return true + } + return false +} + +func stmtFmt(n Node, s fmt.State) { + // NOTE(rsc): This code used to support the text-based + // which was more aggressive about printing full Go syntax + // (for example, an actual loop instead of "for loop"). + // The code is preserved for now in case we want to expand + // any of those shortenings later. Or maybe we will delete + // the code. But for now, keep it. + const exportFormat = false + + // some statements allow for an init, but at most one, + // but we may have an arbitrary number added, eg by typecheck + // and inlining. If it doesn't fit the syntax, emit an enclosing + // block starting with the init statements. + + // if we can just say "for" n->ninit; ... then do so + simpleinit := len(n.Init()) == 1 && len(n.Init()[0].Init()) == 0 && StmtWithInit(n.Op()) + + // otherwise, print the inits as separate statements + complexinit := len(n.Init()) != 0 && !simpleinit && exportFormat + + // but if it was for if/for/switch, put in an extra surrounding block to limit the scope + extrablock := complexinit && StmtWithInit(n.Op()) + + if extrablock { + fmt.Fprint(s, "{") + } + + if complexinit { + fmt.Fprintf(s, " %v; ", n.Init()) + } + + switch n.Op() { + case ODCL: + n := n.(*Decl) + fmt.Fprintf(s, "var %v %v", n.X.Sym(), n.X.Type()) + + // Don't export "v = " initializing statements, hope they're always + // preceded by the DCL which will be re-parsed and typechecked to reproduce + // the "v = " again. + case OAS: + n := n.(*AssignStmt) + if n.Def && !complexinit { + fmt.Fprintf(s, "%v := %v", n.X, n.Y) + } else { + fmt.Fprintf(s, "%v = %v", n.X, n.Y) + } + + case OASOP: + n := n.(*AssignOpStmt) + if n.IncDec { + if n.AsOp == OADD { + fmt.Fprintf(s, "%v++", n.X) + } else { + fmt.Fprintf(s, "%v--", n.X) + } + break + } + + fmt.Fprintf(s, "%v %v= %v", n.X, n.AsOp, n.Y) + + case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV: + n := n.(*AssignListStmt) + if n.Def && !complexinit { + fmt.Fprintf(s, "%.v := %.v", n.Lhs, n.Rhs) + } else { + fmt.Fprintf(s, "%.v = %.v", n.Lhs, n.Rhs) + } + + case OBLOCK: + n := n.(*BlockStmt) + if len(n.List) != 0 { + fmt.Fprintf(s, "%v", n.List) + } + + case ORETURN: + n := n.(*ReturnStmt) + fmt.Fprintf(s, "return %.v", n.Results) + + case OTAILCALL: + n := n.(*TailCallStmt) + fmt.Fprintf(s, "tailcall %v", n.Call) + + case OINLMARK: + n := n.(*InlineMarkStmt) + fmt.Fprintf(s, "inlmark %d", n.Index) + + case OGO: + n := n.(*GoDeferStmt) + fmt.Fprintf(s, "go %v", n.Call) + + case ODEFER: + n := n.(*GoDeferStmt) + fmt.Fprintf(s, "defer %v", n.Call) + + case OIF: + n := n.(*IfStmt) + if simpleinit { + fmt.Fprintf(s, "if %v; %v { %v }", n.Init()[0], n.Cond, n.Body) + } else { + fmt.Fprintf(s, "if %v { %v }", n.Cond, n.Body) + } + if len(n.Else) != 0 { + fmt.Fprintf(s, " else { %v }", n.Else) + } + + case OFOR: + n := n.(*ForStmt) + if !exportFormat { // TODO maybe only if FmtShort, same below + fmt.Fprintf(s, "for loop") + break + } + + fmt.Fprint(s, "for") + if n.DistinctVars { + fmt.Fprint(s, " /* distinct */") + } + if simpleinit { + fmt.Fprintf(s, " %v;", n.Init()[0]) + } else if n.Post != nil { + fmt.Fprint(s, " ;") + } + + if n.Cond != nil { + fmt.Fprintf(s, " %v", n.Cond) + } + + if n.Post != nil { + fmt.Fprintf(s, "; %v", n.Post) + } else if simpleinit { + fmt.Fprint(s, ";") + } + + fmt.Fprintf(s, " { %v }", n.Body) + + case ORANGE: + n := n.(*RangeStmt) + if !exportFormat { + fmt.Fprint(s, "for loop") + break + } + + fmt.Fprint(s, "for") + if n.Key != nil { + fmt.Fprintf(s, " %v", n.Key) + if n.Value != nil { + fmt.Fprintf(s, ", %v", n.Value) + } + fmt.Fprint(s, " =") + } + fmt.Fprintf(s, " range %v { %v }", n.X, n.Body) + if n.DistinctVars { + fmt.Fprint(s, " /* distinct vars */") + } + + case OSELECT: + n := n.(*SelectStmt) + if !exportFormat { + fmt.Fprintf(s, "%v statement", n.Op()) + break + } + fmt.Fprintf(s, "select { %v }", n.Cases) + + case OSWITCH: + n := n.(*SwitchStmt) + if !exportFormat { + fmt.Fprintf(s, "%v statement", n.Op()) + break + } + fmt.Fprintf(s, "switch") + if simpleinit { + fmt.Fprintf(s, " %v;", n.Init()[0]) + } + if n.Tag != nil { + fmt.Fprintf(s, " %v ", n.Tag) + } + fmt.Fprintf(s, " { %v }", n.Cases) + + case OCASE: + n := n.(*CaseClause) + if len(n.List) != 0 { + fmt.Fprintf(s, "case %.v", n.List) + } else { + fmt.Fprint(s, "default") + } + fmt.Fprintf(s, ": %v", n.Body) + + case OBREAK, OCONTINUE, OGOTO, OFALL: + n := n.(*BranchStmt) + if n.Label != nil { + fmt.Fprintf(s, "%v %v", n.Op(), n.Label) + } else { + fmt.Fprintf(s, "%v", n.Op()) + } + + case OLABEL: + n := n.(*LabelStmt) + fmt.Fprintf(s, "%v: ", n.Label) + } + + if extrablock { + fmt.Fprint(s, "}") + } +} + +func exprFmt(n Node, s fmt.State, prec int) { + // NOTE(rsc): This code used to support the text-based + // which was more aggressive about printing full Go syntax + // (for example, an actual loop instead of "for loop"). + // The code is preserved for now in case we want to expand + // any of those shortenings later. Or maybe we will delete + // the code. But for now, keep it. + const exportFormat = false + + for { + if n == nil { + fmt.Fprint(s, "") + return + } + + // Skip implicit operations introduced during typechecking. + switch nn := n; nn.Op() { + case OADDR: + nn := nn.(*AddrExpr) + if nn.Implicit() { + n = nn.X + continue + } + case ODEREF: + nn := nn.(*StarExpr) + if nn.Implicit() { + n = nn.X + continue + } + case OCONV, OCONVNOP, OCONVIFACE: + nn := nn.(*ConvExpr) + if nn.Implicit() { + n = nn.X + continue + } + } + + break + } + + nprec := OpPrec[n.Op()] + if n.Op() == OTYPE && n.Type() != nil && n.Type().IsPtr() { + nprec = OpPrec[ODEREF] + } + + if prec > nprec { + fmt.Fprintf(s, "(%v)", n) + return + } + + switch n.Op() { + case OPAREN: + n := n.(*ParenExpr) + fmt.Fprintf(s, "(%v)", n.X) + + case ONIL: + fmt.Fprint(s, "nil") + + case OLITERAL: + if n.Sym() != nil { + fmt.Fprint(s, n.Sym()) + return + } + + typ := n.Type() + val := n.Val() + + // Special case for rune constants. + if typ == types.RuneType || typ == types.UntypedRune { + if x, ok := constant.Uint64Val(val); ok && x <= utf8.MaxRune { + fmt.Fprintf(s, "%q", x) + return + } + } + + // Only include typ if it's neither the default nor untyped type + // for the constant value. + if k := val.Kind(); typ == types.Types[types.DefaultKinds[k]] || typ == types.UntypedTypes[k] { + fmt.Fprint(s, val) + } else { + fmt.Fprintf(s, "%v(%v)", typ, val) + } + + case ODCLFUNC: + n := n.(*Func) + if sym := n.Sym(); sym != nil { + fmt.Fprint(s, sym) + return + } + fmt.Fprintf(s, "") + + case ONAME: + n := n.(*Name) + // Special case: name used as local variable in export. + // _ becomes ~b%d internally; print as _ for export + if !exportFormat && n.Sym() != nil && n.Sym().Name[0] == '~' && n.Sym().Name[1] == 'b' { + fmt.Fprint(s, "_") + return + } + fallthrough + case ONONAME: + fmt.Fprint(s, n.Sym()) + + case OLINKSYMOFFSET: + n := n.(*LinksymOffsetExpr) + fmt.Fprintf(s, "(%v)(%s@%d)", n.Type(), n.Linksym.Name, n.Offset_) + + case OTYPE: + if n.Type() == nil && n.Sym() != nil { + fmt.Fprint(s, n.Sym()) + return + } + fmt.Fprintf(s, "%v", n.Type()) + + case OCLOSURE: + n := n.(*ClosureExpr) + if !exportFormat { + fmt.Fprint(s, "func literal") + return + } + fmt.Fprintf(s, "%v { %v }", n.Type(), n.Func.Body) + + case OPTRLIT: + n := n.(*AddrExpr) + fmt.Fprintf(s, "&%v", n.X) + + case OCOMPLIT, OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT: + n := n.(*CompLitExpr) + if n.Implicit() { + fmt.Fprintf(s, "... argument") + return + } + fmt.Fprintf(s, "%v{%s}", n.Type(), ellipsisIf(len(n.List) != 0)) + + case OKEY: + n := n.(*KeyExpr) + if n.Key != nil && n.Value != nil { + fmt.Fprintf(s, "%v:%v", n.Key, n.Value) + return + } + + if n.Key == nil && n.Value != nil { + fmt.Fprintf(s, ":%v", n.Value) + return + } + if n.Key != nil && n.Value == nil { + fmt.Fprintf(s, "%v:", n.Key) + return + } + fmt.Fprint(s, ":") + + case OSTRUCTKEY: + n := n.(*StructKeyExpr) + fmt.Fprintf(s, "%v:%v", n.Field, n.Value) + + case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH, OMETHVALUE, OMETHEXPR: + n := n.(*SelectorExpr) + exprFmt(n.X, s, nprec) + if n.Sel == nil { + fmt.Fprint(s, ".") + return + } + fmt.Fprintf(s, ".%s", n.Sel.Name) + + case ODOTTYPE, ODOTTYPE2: + n := n.(*TypeAssertExpr) + exprFmt(n.X, s, nprec) + fmt.Fprintf(s, ".(%v)", n.Type()) + + case OINDEX, OINDEXMAP: + n := n.(*IndexExpr) + exprFmt(n.X, s, nprec) + fmt.Fprintf(s, "[%v]", n.Index) + + case OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR: + n := n.(*SliceExpr) + exprFmt(n.X, s, nprec) + fmt.Fprint(s, "[") + if n.Low != nil { + fmt.Fprint(s, n.Low) + } + fmt.Fprint(s, ":") + if n.High != nil { + fmt.Fprint(s, n.High) + } + if n.Op().IsSlice3() { + fmt.Fprint(s, ":") + if n.Max != nil { + fmt.Fprint(s, n.Max) + } + } + fmt.Fprint(s, "]") + + case OSLICEHEADER: + n := n.(*SliceHeaderExpr) + fmt.Fprintf(s, "sliceheader{%v,%v,%v}", n.Ptr, n.Len, n.Cap) + + case OCOMPLEX, OCOPY, OUNSAFEADD, OUNSAFESLICE: + n := n.(*BinaryExpr) + fmt.Fprintf(s, "%v(%v, %v)", n.Op(), n.X, n.Y) + + case OCONV, + OCONVIFACE, + OCONVNOP, + OBYTES2STR, + ORUNES2STR, + OSTR2BYTES, + OSTR2RUNES, + ORUNESTR, + OSLICE2ARR, + OSLICE2ARRPTR: + n := n.(*ConvExpr) + if n.Type() == nil || n.Type().Sym() == nil { + fmt.Fprintf(s, "(%v)", n.Type()) + } else { + fmt.Fprintf(s, "%v", n.Type()) + } + fmt.Fprintf(s, "(%v)", n.X) + + case OREAL, + OIMAG, + OCAP, + OCLEAR, + OCLOSE, + OLEN, + ONEW, + OPANIC: + n := n.(*UnaryExpr) + fmt.Fprintf(s, "%v(%v)", n.Op(), n.X) + + case OAPPEND, + ODELETE, + OMAKE, + OMAX, + OMIN, + ORECOVER, + OPRINT, + OPRINTLN: + n := n.(*CallExpr) + if n.IsDDD { + fmt.Fprintf(s, "%v(%.v...)", n.Op(), n.Args) + return + } + fmt.Fprintf(s, "%v(%.v)", n.Op(), n.Args) + + case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG: + n := n.(*CallExpr) + exprFmt(n.Fun, s, nprec) + if n.IsDDD { + fmt.Fprintf(s, "(%.v...)", n.Args) + return + } + fmt.Fprintf(s, "(%.v)", n.Args) + + case OINLCALL: + n := n.(*InlinedCallExpr) + // TODO(mdempsky): Print Init and/or Body? + if len(n.ReturnVars) == 1 { + fmt.Fprintf(s, "%v", n.ReturnVars[0]) + return + } + fmt.Fprintf(s, "(.%v)", n.ReturnVars) + + case OMAKEMAP, OMAKECHAN, OMAKESLICE: + n := n.(*MakeExpr) + if n.Cap != nil { + fmt.Fprintf(s, "make(%v, %v, %v)", n.Type(), n.Len, n.Cap) + return + } + if n.Len != nil && (n.Op() == OMAKESLICE || !n.Len.Type().IsUntyped()) { + fmt.Fprintf(s, "make(%v, %v)", n.Type(), n.Len) + return + } + fmt.Fprintf(s, "make(%v)", n.Type()) + + case OMAKESLICECOPY: + n := n.(*MakeExpr) + fmt.Fprintf(s, "makeslicecopy(%v, %v, %v)", n.Type(), n.Len, n.Cap) + + case OPLUS, ONEG, OBITNOT, ONOT, ORECV: + // Unary + n := n.(*UnaryExpr) + fmt.Fprintf(s, "%v", n.Op()) + if n.X != nil && n.X.Op() == n.Op() { + fmt.Fprint(s, " ") + } + exprFmt(n.X, s, nprec+1) + + case OADDR: + n := n.(*AddrExpr) + fmt.Fprintf(s, "%v", n.Op()) + if n.X != nil && n.X.Op() == n.Op() { + fmt.Fprint(s, " ") + } + exprFmt(n.X, s, nprec+1) + + case ODEREF: + n := n.(*StarExpr) + fmt.Fprintf(s, "%v", n.Op()) + exprFmt(n.X, s, nprec+1) + + // Binary + case OADD, + OAND, + OANDNOT, + ODIV, + OEQ, + OGE, + OGT, + OLE, + OLT, + OLSH, + OMOD, + OMUL, + ONE, + OOR, + ORSH, + OSUB, + OXOR: + n := n.(*BinaryExpr) + exprFmt(n.X, s, nprec) + fmt.Fprintf(s, " %v ", n.Op()) + exprFmt(n.Y, s, nprec+1) + + case OANDAND, + OOROR: + n := n.(*LogicalExpr) + exprFmt(n.X, s, nprec) + fmt.Fprintf(s, " %v ", n.Op()) + exprFmt(n.Y, s, nprec+1) + + case OSEND: + n := n.(*SendStmt) + exprFmt(n.Chan, s, nprec) + fmt.Fprintf(s, " <- ") + exprFmt(n.Value, s, nprec+1) + + case OADDSTR: + n := n.(*AddStringExpr) + for i, n1 := range n.List { + if i != 0 { + fmt.Fprint(s, " + ") + } + exprFmt(n1, s, nprec) + } + default: + fmt.Fprintf(s, "", n.Op()) + } +} + +func ellipsisIf(b bool) string { + if b { + return "..." + } + return "" +} + +// Nodes + +// Format implements formatting for a Nodes. +// The valid formats are: +// +// %v Go syntax, semicolon-separated +// %.v Go syntax, comma-separated +// %+v Debug syntax, as in DumpList. +func (l Nodes) Format(s fmt.State, verb rune) { + if s.Flag('+') && verb == 'v' { + // %+v is DumpList output + dumpNodes(s, l, 1) + return + } + + if verb != 'v' { + fmt.Fprintf(s, "%%!%c(Nodes)", verb) + return + } + + sep := "; " + if _, ok := s.Precision(); ok { // %.v is expr list + sep = ", " + } + + for i, n := range l { + fmt.Fprint(s, n) + if i+1 < len(l) { + fmt.Fprint(s, sep) + } + } +} + +// Dump + +// Dump prints the message s followed by a debug dump of n. +func Dump(s string, n Node) { + fmt.Printf("%s%+v\n", s, n) +} + +// DumpList prints the message s followed by a debug dump of each node in the list. +func DumpList(s string, list Nodes) { + var buf bytes.Buffer + FDumpList(&buf, s, list) + os.Stdout.Write(buf.Bytes()) +} + +// FDumpList prints to w the message s followed by a debug dump of each node in the list. +func FDumpList(w io.Writer, s string, list Nodes) { + io.WriteString(w, s) + dumpNodes(w, list, 1) + io.WriteString(w, "\n") +} + +// indent prints indentation to w. +func indent(w io.Writer, depth int) { + fmt.Fprint(w, "\n") + for i := 0; i < depth; i++ { + fmt.Fprint(w, ". ") + } +} + +// EscFmt is set by the escape analysis code to add escape analysis details to the node print. +var EscFmt func(n Node) string + +// dumpNodeHeader prints the debug-format node header line to w. +func dumpNodeHeader(w io.Writer, n Node) { + // Useful to see which nodes in an AST printout are actually identical + if base.Debug.DumpPtrs != 0 { + fmt.Fprintf(w, " p(%p)", n) + } + + if base.Debug.DumpPtrs != 0 && n.Name() != nil && n.Name().Defn != nil { + // Useful to see where Defn is set and what node it points to + fmt.Fprintf(w, " defn(%p)", n.Name().Defn) + } + + if base.Debug.DumpPtrs != 0 && n.Name() != nil && n.Name().Curfn != nil { + // Useful to see where Defn is set and what node it points to + fmt.Fprintf(w, " curfn(%p)", n.Name().Curfn) + } + if base.Debug.DumpPtrs != 0 && n.Name() != nil && n.Name().Outer != nil { + // Useful to see where Defn is set and what node it points to + fmt.Fprintf(w, " outer(%p)", n.Name().Outer) + } + + if EscFmt != nil { + if esc := EscFmt(n); esc != "" { + fmt.Fprintf(w, " %s", esc) + } + } + + if n.Sym() != nil && n.Op() != ONAME && n.Op() != ONONAME && n.Op() != OTYPE { + fmt.Fprintf(w, " %+v", n.Sym()) + } + + // Print Node-specific fields of basic type in header line. + v := reflect.ValueOf(n).Elem() + t := v.Type() + nf := t.NumField() + for i := 0; i < nf; i++ { + tf := t.Field(i) + if tf.PkgPath != "" { + // skip unexported field - Interface will fail + continue + } + k := tf.Type.Kind() + if reflect.Bool <= k && k <= reflect.Complex128 { + name := strings.TrimSuffix(tf.Name, "_") + vf := v.Field(i) + vfi := vf.Interface() + if name == "Offset" && vfi == types.BADWIDTH || name != "Offset" && vf.IsZero() { + continue + } + if vfi == true { + fmt.Fprintf(w, " %s", name) + } else { + fmt.Fprintf(w, " %s:%+v", name, vf.Interface()) + } + } + } + + // Print Node-specific booleans by looking for methods. + // Different v, t from above - want *Struct not Struct, for methods. + v = reflect.ValueOf(n) + t = v.Type() + nm := t.NumMethod() + for i := 0; i < nm; i++ { + tm := t.Method(i) + if tm.PkgPath != "" { + // skip unexported method - call will fail + continue + } + m := v.Method(i) + mt := m.Type() + if mt.NumIn() == 0 && mt.NumOut() == 1 && mt.Out(0).Kind() == reflect.Bool { + // TODO(rsc): Remove the func/defer/recover wrapping, + // which is guarding against panics in miniExpr, + // once we get down to the simpler state in which + // nodes have no getter methods that aren't allowed to be called. + func() { + defer func() { recover() }() + if m.Call(nil)[0].Bool() { + name := strings.TrimSuffix(tm.Name, "_") + fmt.Fprintf(w, " %s", name) + } + }() + } + } + + if n.Op() == OCLOSURE { + n := n.(*ClosureExpr) + if fn := n.Func; fn != nil && fn.Nname.Sym() != nil { + fmt.Fprintf(w, " fnName(%+v)", fn.Nname.Sym()) + } + } + + if n.Type() != nil { + if n.Op() == OTYPE { + fmt.Fprintf(w, " type") + } + fmt.Fprintf(w, " %+v", n.Type()) + } + if n.Typecheck() != 0 { + fmt.Fprintf(w, " tc(%d)", n.Typecheck()) + } + + if n.Pos().IsKnown() { + fmt.Fprint(w, " # ") + switch n.Pos().IsStmt() { + case src.PosNotStmt: + fmt.Fprint(w, "_") // "-" would be confusing + case src.PosIsStmt: + fmt.Fprint(w, "+") + } + sep := "" + base.Ctxt.AllPos(n.Pos(), func(pos src.Pos) { + fmt.Fprint(w, sep) + sep = " " + // TODO(mdempsky): Print line pragma details too. + file := filepath.Base(pos.Filename()) + // Note: this output will be parsed by ssa/html.go:(*HTMLWriter).WriteAST. Keep in sync. + fmt.Fprintf(w, "%s:%d:%d", file, pos.Line(), pos.Col()) + }) + } +} + +func dumpNode(w io.Writer, n Node, depth int) { + indent(w, depth) + if depth > 40 { + fmt.Fprint(w, "...") + return + } + + if n == nil { + fmt.Fprint(w, "NilIrNode") + return + } + + if len(n.Init()) != 0 { + fmt.Fprintf(w, "%+v-init", n.Op()) + dumpNodes(w, n.Init(), depth+1) + indent(w, depth) + } + + switch n.Op() { + default: + fmt.Fprintf(w, "%+v", n.Op()) + dumpNodeHeader(w, n) + + case OLITERAL: + fmt.Fprintf(w, "%+v-%v", n.Op(), n.Val()) + dumpNodeHeader(w, n) + return + + case ONAME, ONONAME: + if n.Sym() != nil { + fmt.Fprintf(w, "%+v-%+v", n.Op(), n.Sym()) + } else { + fmt.Fprintf(w, "%+v", n.Op()) + } + dumpNodeHeader(w, n) + return + + case OLINKSYMOFFSET: + n := n.(*LinksymOffsetExpr) + fmt.Fprintf(w, "%+v-%v", n.Op(), n.Linksym) + // Offset is almost always 0, so only print when it's interesting. + if n.Offset_ != 0 { + fmt.Fprintf(w, "%+v", n.Offset_) + } + dumpNodeHeader(w, n) + + case OASOP: + n := n.(*AssignOpStmt) + fmt.Fprintf(w, "%+v-%+v", n.Op(), n.AsOp) + dumpNodeHeader(w, n) + + case OTYPE: + fmt.Fprintf(w, "%+v %+v", n.Op(), n.Sym()) + dumpNodeHeader(w, n) + return + + case OCLOSURE: + fmt.Fprintf(w, "%+v", n.Op()) + dumpNodeHeader(w, n) + + case ODCLFUNC: + // Func has many fields we don't want to print. + // Bypass reflection and just print what we want. + n := n.(*Func) + fmt.Fprintf(w, "%+v", n.Op()) + dumpNodeHeader(w, n) + fn := n + if len(fn.Dcl) > 0 { + indent(w, depth) + fmt.Fprintf(w, "%+v-Dcl", n.Op()) + for _, dcl := range n.Dcl { + dumpNode(w, dcl, depth+1) + } + } + if len(fn.ClosureVars) > 0 { + indent(w, depth) + fmt.Fprintf(w, "%+v-ClosureVars", n.Op()) + for _, cv := range fn.ClosureVars { + dumpNode(w, cv, depth+1) + } + } + if len(fn.Body) > 0 { + indent(w, depth) + fmt.Fprintf(w, "%+v-body", n.Op()) + dumpNodes(w, fn.Body, depth+1) + } + return + } + + v := reflect.ValueOf(n).Elem() + t := reflect.TypeOf(n).Elem() + nf := t.NumField() + for i := 0; i < nf; i++ { + tf := t.Field(i) + vf := v.Field(i) + if tf.PkgPath != "" { + // skip unexported field - Interface will fail + continue + } + switch tf.Type.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Slice: + if vf.IsNil() { + continue + } + } + name := strings.TrimSuffix(tf.Name, "_") + // Do not bother with field name header lines for the + // most common positional arguments: unary, binary expr, + // index expr, send stmt, go and defer call expression. + switch name { + case "X", "Y", "Index", "Chan", "Value", "Call": + name = "" + } + switch val := vf.Interface().(type) { + case Node: + if name != "" { + indent(w, depth) + fmt.Fprintf(w, "%+v-%s", n.Op(), name) + } + dumpNode(w, val, depth+1) + case Nodes: + if len(val) == 0 { + continue + } + if name != "" { + indent(w, depth) + fmt.Fprintf(w, "%+v-%s", n.Op(), name) + } + dumpNodes(w, val, depth+1) + default: + if vf.Kind() == reflect.Slice && vf.Type().Elem().Implements(nodeType) { + if vf.Len() == 0 { + continue + } + if name != "" { + indent(w, depth) + fmt.Fprintf(w, "%+v-%s", n.Op(), name) + } + for i, n := 0, vf.Len(); i < n; i++ { + dumpNode(w, vf.Index(i).Interface().(Node), depth+1) + } + } + } + } +} + +var nodeType = reflect.TypeOf((*Node)(nil)).Elem() + +func dumpNodes(w io.Writer, list Nodes, depth int) { + if len(list) == 0 { + fmt.Fprintf(w, " ") + return + } + + for _, n := range list { + dumpNode(w, n, depth) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/func.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/func.go new file mode 100644 index 0000000000000000000000000000000000000000..303c5e4fd0e53daea006dc7cfd60216cfc6675df --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/func.go @@ -0,0 +1,598 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/internal/src" + "fmt" + "strings" + "unicode/utf8" +) + +// A Func corresponds to a single function in a Go program +// (and vice versa: each function is denoted by exactly one *Func). +// +// There are multiple nodes that represent a Func in the IR. +// +// The ONAME node (Func.Nname) is used for plain references to it. +// The ODCLFUNC node (the Func itself) is used for its declaration code. +// The OCLOSURE node (Func.OClosure) is used for a reference to a +// function literal. +// +// An imported function will have an ONAME node which points to a Func +// with an empty body. +// A declared function or method has an ODCLFUNC (the Func itself) and an ONAME. +// A function literal is represented directly by an OCLOSURE, but it also +// has an ODCLFUNC (and a matching ONAME) representing the compiled +// underlying form of the closure, which accesses the captured variables +// using a special data structure passed in a register. +// +// A method declaration is represented like functions, except f.Sym +// will be the qualified method name (e.g., "T.m"). +// +// A method expression (T.M) is represented as an OMETHEXPR node, +// in which n.Left and n.Right point to the type and method, respectively. +// Each distinct mention of a method expression in the source code +// constructs a fresh node. +// +// A method value (t.M) is represented by ODOTMETH/ODOTINTER +// when it is called directly and by OMETHVALUE otherwise. +// These are like method expressions, except that for ODOTMETH/ODOTINTER, +// the method name is stored in Sym instead of Right. +// Each OMETHVALUE ends up being implemented as a new +// function, a bit like a closure, with its own ODCLFUNC. +// The OMETHVALUE uses n.Func to record the linkage to +// the generated ODCLFUNC, but there is no +// pointer from the Func back to the OMETHVALUE. +type Func struct { + miniNode + Body Nodes + + Nname *Name // ONAME node + OClosure *ClosureExpr // OCLOSURE node + + // ONAME nodes for all params/locals for this func/closure, does NOT + // include closurevars until transforming closures during walk. + // Names must be listed PPARAMs, PPARAMOUTs, then PAUTOs, + // with PPARAMs and PPARAMOUTs in order corresponding to the function signature. + // Anonymous and blank params are declared as ~pNN (for PPARAMs) and ~rNN (for PPARAMOUTs). + Dcl []*Name + + // ClosureVars lists the free variables that are used within a + // function literal, but formally declared in an enclosing + // function. The variables in this slice are the closure function's + // own copy of the variables, which are used within its function + // body. They will also each have IsClosureVar set, and will have + // Byval set if they're captured by value. + ClosureVars []*Name + + // Enclosed functions that need to be compiled. + // Populated during walk. + Closures []*Func + + // Parents records the parent scope of each scope within a + // function. The root scope (0) has no parent, so the i'th + // scope's parent is stored at Parents[i-1]. + Parents []ScopeID + + // Marks records scope boundary changes. + Marks []Mark + + FieldTrack map[*obj.LSym]struct{} + DebugInfo interface{} + LSym *obj.LSym // Linker object in this function's native ABI (Func.ABI) + + Inl *Inline + + // funcLitGen and goDeferGen track how many closures have been + // created in this function for function literals and go/defer + // wrappers, respectively. Used by closureName for creating unique + // function names. + // + // Tracking goDeferGen separately avoids wrappers throwing off + // function literal numbering (e.g., runtime/trace_test.TestTraceSymbolize.func11). + funcLitGen int32 + goDeferGen int32 + + Label int32 // largest auto-generated label in this function + + Endlineno src.XPos + WBPos src.XPos // position of first write barrier; see SetWBPos + + Pragma PragmaFlag // go:xxx function annotations + + flags bitset16 + + // ABI is a function's "definition" ABI. This is the ABI that + // this function's generated code is expecting to be called by. + // + // For most functions, this will be obj.ABIInternal. It may be + // a different ABI for functions defined in assembly or ABI wrappers. + // + // This is included in the export data and tracked across packages. + ABI obj.ABI + // ABIRefs is the set of ABIs by which this function is referenced. + // For ABIs other than this function's definition ABI, the + // compiler generates ABI wrapper functions. This is only tracked + // within a package. + ABIRefs obj.ABISet + + NumDefers int32 // number of defer calls in the function + NumReturns int32 // number of explicit returns in the function + + // NWBRCalls records the LSyms of functions called by this + // function for go:nowritebarrierrec analysis. Only filled in + // if nowritebarrierrecCheck != nil. + NWBRCalls *[]SymAndPos + + // For wrapper functions, WrappedFunc point to the original Func. + // Currently only used for go/defer wrappers. + WrappedFunc *Func + + // WasmImport is used by the //go:wasmimport directive to store info about + // a WebAssembly function import. + WasmImport *WasmImport +} + +// WasmImport stores metadata associated with the //go:wasmimport pragma. +type WasmImport struct { + Module string + Name string +} + +// NewFunc returns a new Func with the given name and type. +// +// fpos is the position of the "func" token, and npos is the position +// of the name identifier. +// +// TODO(mdempsky): I suspect there's no need for separate fpos and +// npos. +func NewFunc(fpos, npos src.XPos, sym *types.Sym, typ *types.Type) *Func { + name := NewNameAt(npos, sym, typ) + name.Class = PFUNC + sym.SetFunc(true) + + fn := &Func{Nname: name} + fn.pos = fpos + fn.op = ODCLFUNC + // Most functions are ABIInternal. The importer or symabis + // pass may override this. + fn.ABI = obj.ABIInternal + fn.SetTypecheck(1) + + name.Func = fn + + return fn +} + +func (f *Func) isStmt() {} + +func (n *Func) copy() Node { panic(n.no("copy")) } +func (n *Func) doChildren(do func(Node) bool) bool { return doNodes(n.Body, do) } +func (n *Func) editChildren(edit func(Node) Node) { editNodes(n.Body, edit) } +func (n *Func) editChildrenWithHidden(edit func(Node) Node) { editNodes(n.Body, edit) } + +func (f *Func) Type() *types.Type { return f.Nname.Type() } +func (f *Func) Sym() *types.Sym { return f.Nname.Sym() } +func (f *Func) Linksym() *obj.LSym { return f.Nname.Linksym() } +func (f *Func) LinksymABI(abi obj.ABI) *obj.LSym { return f.Nname.LinksymABI(abi) } + +// An Inline holds fields used for function bodies that can be inlined. +type Inline struct { + Cost int32 // heuristic cost of inlining this function + + // Copy of Func.Dcl for use during inlining. This copy is needed + // because the function's Dcl may change from later compiler + // transformations. This field is also populated when a function + // from another package is imported and inlined. + Dcl []*Name + HaveDcl bool // whether we've loaded Dcl + + // Function properties, encoded as a string (these are used for + // making inlining decisions). See cmd/compile/internal/inline/inlheur. + Properties string + + // CanDelayResults reports whether it's safe for the inliner to delay + // initializing the result parameters until immediately before the + // "return" statement. + CanDelayResults bool +} + +// A Mark represents a scope boundary. +type Mark struct { + // Pos is the position of the token that marks the scope + // change. + Pos src.XPos + + // Scope identifies the innermost scope to the right of Pos. + Scope ScopeID +} + +// A ScopeID represents a lexical scope within a function. +type ScopeID int32 + +const ( + funcDupok = 1 << iota // duplicate definitions ok + funcWrapper // hide frame from users (elide in tracebacks, don't count as a frame for recover()) + funcABIWrapper // is an ABI wrapper (also set flagWrapper) + funcNeedctxt // function uses context register (has closure variables) + // true if closure inside a function; false if a simple function or a + // closure in a global variable initialization + funcIsHiddenClosure + funcIsDeadcodeClosure // true if closure is deadcode + funcHasDefer // contains a defer statement + funcNilCheckDisabled // disable nil checks when compiling this function + funcInlinabilityChecked // inliner has already determined whether the function is inlinable + funcNeverReturns // function never returns (in most cases calls panic(), os.Exit(), or equivalent) + funcOpenCodedDeferDisallowed // can't do open-coded defers + funcClosureResultsLost // closure is called indirectly and we lost track of its results; used by escape analysis + funcPackageInit // compiler emitted .init func for package +) + +type SymAndPos struct { + Sym *obj.LSym // LSym of callee + Pos src.XPos // line of call +} + +func (f *Func) Dupok() bool { return f.flags&funcDupok != 0 } +func (f *Func) Wrapper() bool { return f.flags&funcWrapper != 0 } +func (f *Func) ABIWrapper() bool { return f.flags&funcABIWrapper != 0 } +func (f *Func) Needctxt() bool { return f.flags&funcNeedctxt != 0 } +func (f *Func) IsHiddenClosure() bool { return f.flags&funcIsHiddenClosure != 0 } +func (f *Func) IsDeadcodeClosure() bool { return f.flags&funcIsDeadcodeClosure != 0 } +func (f *Func) HasDefer() bool { return f.flags&funcHasDefer != 0 } +func (f *Func) NilCheckDisabled() bool { return f.flags&funcNilCheckDisabled != 0 } +func (f *Func) InlinabilityChecked() bool { return f.flags&funcInlinabilityChecked != 0 } +func (f *Func) NeverReturns() bool { return f.flags&funcNeverReturns != 0 } +func (f *Func) OpenCodedDeferDisallowed() bool { return f.flags&funcOpenCodedDeferDisallowed != 0 } +func (f *Func) ClosureResultsLost() bool { return f.flags&funcClosureResultsLost != 0 } +func (f *Func) IsPackageInit() bool { return f.flags&funcPackageInit != 0 } + +func (f *Func) SetDupok(b bool) { f.flags.set(funcDupok, b) } +func (f *Func) SetWrapper(b bool) { f.flags.set(funcWrapper, b) } +func (f *Func) SetABIWrapper(b bool) { f.flags.set(funcABIWrapper, b) } +func (f *Func) SetNeedctxt(b bool) { f.flags.set(funcNeedctxt, b) } +func (f *Func) SetIsHiddenClosure(b bool) { f.flags.set(funcIsHiddenClosure, b) } +func (f *Func) SetIsDeadcodeClosure(b bool) { f.flags.set(funcIsDeadcodeClosure, b) } +func (f *Func) SetHasDefer(b bool) { f.flags.set(funcHasDefer, b) } +func (f *Func) SetNilCheckDisabled(b bool) { f.flags.set(funcNilCheckDisabled, b) } +func (f *Func) SetInlinabilityChecked(b bool) { f.flags.set(funcInlinabilityChecked, b) } +func (f *Func) SetNeverReturns(b bool) { f.flags.set(funcNeverReturns, b) } +func (f *Func) SetOpenCodedDeferDisallowed(b bool) { f.flags.set(funcOpenCodedDeferDisallowed, b) } +func (f *Func) SetClosureResultsLost(b bool) { f.flags.set(funcClosureResultsLost, b) } +func (f *Func) SetIsPackageInit(b bool) { f.flags.set(funcPackageInit, b) } + +func (f *Func) SetWBPos(pos src.XPos) { + if base.Debug.WB != 0 { + base.WarnfAt(pos, "write barrier") + } + if !f.WBPos.IsKnown() { + f.WBPos = pos + } +} + +// FuncName returns the name (without the package) of the function f. +func FuncName(f *Func) string { + if f == nil || f.Nname == nil { + return "" + } + return f.Sym().Name +} + +// PkgFuncName returns the name of the function referenced by f, with package +// prepended. +// +// This differs from the compiler's internal convention where local functions +// lack a package. This is primarily useful when the ultimate consumer of this +// is a human looking at message. +func PkgFuncName(f *Func) string { + if f == nil || f.Nname == nil { + return "" + } + s := f.Sym() + pkg := s.Pkg + + return pkg.Path + "." + s.Name +} + +// LinkFuncName returns the name of the function f, as it will appear in the +// symbol table of the final linked binary. +func LinkFuncName(f *Func) string { + if f == nil || f.Nname == nil { + return "" + } + s := f.Sym() + pkg := s.Pkg + + return objabi.PathToPrefix(pkg.Path) + "." + s.Name +} + +// ParseLinkFuncName parsers a symbol name (as returned from LinkFuncName) back +// to the package path and local symbol name. +func ParseLinkFuncName(name string) (pkg, sym string, err error) { + pkg, sym = splitPkg(name) + if pkg == "" { + return "", "", fmt.Errorf("no package path in name") + } + + pkg, err = objabi.PrefixToPath(pkg) // unescape + if err != nil { + return "", "", fmt.Errorf("malformed package path: %v", err) + } + + return pkg, sym, nil +} + +// Borrowed from x/mod. +func modPathOK(r rune) bool { + if r < utf8.RuneSelf { + return r == '-' || r == '.' || r == '_' || r == '~' || + '0' <= r && r <= '9' || + 'A' <= r && r <= 'Z' || + 'a' <= r && r <= 'z' + } + return false +} + +func escapedImportPathOK(r rune) bool { + return modPathOK(r) || r == '+' || r == '/' || r == '%' +} + +// splitPkg splits the full linker symbol name into package and local symbol +// name. +func splitPkg(name string) (pkgpath, sym string) { + // package-sym split is at first dot after last the / that comes before + // any characters illegal in a package path. + + lastSlashIdx := 0 + for i, r := range name { + // Catches cases like: + // * example.foo[sync/atomic.Uint64]. + // * example%2ecom.foo[sync/atomic.Uint64]. + // + // Note that name is still escaped; unescape occurs after splitPkg. + if !escapedImportPathOK(r) { + break + } + if r == '/' { + lastSlashIdx = i + } + } + for i := lastSlashIdx; i < len(name); i++ { + r := name[i] + if r == '.' { + return name[:i], name[i+1:] + } + } + + return "", name +} + +var CurFunc *Func + +// WithFunc invokes do with CurFunc and base.Pos set to curfn and +// curfn.Pos(), respectively, and then restores their previous values +// before returning. +func WithFunc(curfn *Func, do func()) { + oldfn, oldpos := CurFunc, base.Pos + defer func() { CurFunc, base.Pos = oldfn, oldpos }() + + CurFunc, base.Pos = curfn, curfn.Pos() + do() +} + +func FuncSymName(s *types.Sym) string { + return s.Name + "·f" +} + +// ClosureDebugRuntimeCheck applies boilerplate checks for debug flags +// and compiling runtime. +func ClosureDebugRuntimeCheck(clo *ClosureExpr) { + if base.Debug.Closure > 0 { + if clo.Esc() == EscHeap { + base.WarnfAt(clo.Pos(), "heap closure, captured vars = %v", clo.Func.ClosureVars) + } else { + base.WarnfAt(clo.Pos(), "stack closure, captured vars = %v", clo.Func.ClosureVars) + } + } + if base.Flag.CompilingRuntime && clo.Esc() == EscHeap && !clo.IsGoWrap { + base.ErrorfAt(clo.Pos(), 0, "heap-allocated closure %s, not allowed in runtime", FuncName(clo.Func)) + } +} + +// IsTrivialClosure reports whether closure clo has an +// empty list of captured vars. +func IsTrivialClosure(clo *ClosureExpr) bool { + return len(clo.Func.ClosureVars) == 0 +} + +// globClosgen is like Func.Closgen, but for the global scope. +var globClosgen int32 + +// closureName generates a new unique name for a closure within outerfn at pos. +func closureName(outerfn *Func, pos src.XPos, why Op) *types.Sym { + pkg := types.LocalPkg + outer := "glob." + var prefix string + switch why { + default: + base.FatalfAt(pos, "closureName: bad Op: %v", why) + case OCLOSURE: + if outerfn == nil || outerfn.OClosure == nil { + prefix = "func" + } + case OGO: + prefix = "gowrap" + case ODEFER: + prefix = "deferwrap" + } + gen := &globClosgen + + // There may be multiple functions named "_". In those + // cases, we can't use their individual Closgens as it + // would lead to name clashes. + if outerfn != nil && !IsBlank(outerfn.Nname) { + pkg = outerfn.Sym().Pkg + outer = FuncName(outerfn) + + if why == OCLOSURE { + gen = &outerfn.funcLitGen + } else { + gen = &outerfn.goDeferGen + } + } + + // If this closure was created due to inlining, then incorporate any + // inlined functions' names into the closure's linker symbol name + // too (#60324). + if inlIndex := base.Ctxt.InnermostPos(pos).Base().InliningIndex(); inlIndex >= 0 { + names := []string{outer} + base.Ctxt.InlTree.AllParents(inlIndex, func(call obj.InlinedCall) { + names = append(names, call.Name) + }) + outer = strings.Join(names, ".") + } + + *gen++ + return pkg.Lookup(fmt.Sprintf("%s.%s%d", outer, prefix, *gen)) +} + +// NewClosureFunc creates a new Func to represent a function literal +// with the given type. +// +// fpos the position used for the underlying ODCLFUNC and ONAME, +// whereas cpos is the position used for the OCLOSURE. They're +// separate because in the presence of inlining, the OCLOSURE node +// should have an inline-adjusted position, whereas the ODCLFUNC and +// ONAME must not. +// +// outerfn is the enclosing function, if any. The returned function is +// appending to pkg.Funcs. +// +// why is the reason we're generating this Func. It can be OCLOSURE +// (for a normal function literal) or OGO or ODEFER (for wrapping a +// call expression that has parameters or results). +func NewClosureFunc(fpos, cpos src.XPos, why Op, typ *types.Type, outerfn *Func, pkg *Package) *Func { + fn := NewFunc(fpos, fpos, closureName(outerfn, cpos, why), typ) + fn.SetIsHiddenClosure(outerfn != nil) + + clo := &ClosureExpr{Func: fn} + clo.op = OCLOSURE + clo.pos = cpos + clo.SetType(typ) + clo.SetTypecheck(1) + fn.OClosure = clo + + fn.Nname.Defn = fn + pkg.Funcs = append(pkg.Funcs, fn) + + return fn +} + +// IsFuncPCIntrinsic returns whether n is a direct call of internal/abi.FuncPCABIxxx functions. +func IsFuncPCIntrinsic(n *CallExpr) bool { + if n.Op() != OCALLFUNC || n.Fun.Op() != ONAME { + return false + } + fn := n.Fun.(*Name).Sym() + return (fn.Name == "FuncPCABI0" || fn.Name == "FuncPCABIInternal") && + fn.Pkg.Path == "internal/abi" +} + +// IsIfaceOfFunc inspects whether n is an interface conversion from a direct +// reference of a func. If so, it returns referenced Func; otherwise nil. +// +// This is only usable before walk.walkConvertInterface, which converts to an +// OMAKEFACE. +func IsIfaceOfFunc(n Node) *Func { + if n, ok := n.(*ConvExpr); ok && n.Op() == OCONVIFACE { + if name, ok := n.X.(*Name); ok && name.Op() == ONAME && name.Class == PFUNC { + return name.Func + } + } + return nil +} + +// FuncPC returns a uintptr-typed expression that evaluates to the PC of a +// function as uintptr, as returned by internal/abi.FuncPC{ABI0,ABIInternal}. +// +// n should be a Node of an interface type, as is passed to +// internal/abi.FuncPC{ABI0,ABIInternal}. +// +// TODO(prattmic): Since n is simply an interface{} there is no assertion that +// it is actually a function at all. Perhaps we should emit a runtime type +// assertion? +func FuncPC(pos src.XPos, n Node, wantABI obj.ABI) Node { + if !n.Type().IsInterface() { + base.ErrorfAt(pos, 0, "internal/abi.FuncPC%s expects an interface value, got %v", wantABI, n.Type()) + } + + if fn := IsIfaceOfFunc(n); fn != nil { + name := fn.Nname + abi := fn.ABI + if abi != wantABI { + base.ErrorfAt(pos, 0, "internal/abi.FuncPC%s expects an %v function, %s is defined as %v", wantABI, wantABI, name.Sym().Name, abi) + } + var e Node = NewLinksymExpr(pos, name.Sym().LinksymABI(abi), types.Types[types.TUINTPTR]) + e = NewAddrExpr(pos, e) + e.SetType(types.Types[types.TUINTPTR].PtrTo()) + e = NewConvExpr(pos, OCONVNOP, types.Types[types.TUINTPTR], e) + e.SetTypecheck(1) + return e + } + // fn is not a defined function. It must be ABIInternal. + // Read the address from func value, i.e. *(*uintptr)(idata(fn)). + if wantABI != obj.ABIInternal { + base.ErrorfAt(pos, 0, "internal/abi.FuncPC%s does not accept func expression, which is ABIInternal", wantABI) + } + var e Node = NewUnaryExpr(pos, OIDATA, n) + e.SetType(types.Types[types.TUINTPTR].PtrTo()) + e.SetTypecheck(1) + e = NewStarExpr(pos, e) + e.SetType(types.Types[types.TUINTPTR]) + e.SetTypecheck(1) + return e +} + +// DeclareParams creates Names for all of the parameters in fn's +// signature and adds them to fn.Dcl. +// +// If setNname is true, then it also sets types.Field.Nname for each +// parameter. +func (fn *Func) DeclareParams(setNname bool) { + if fn.Dcl != nil { + base.FatalfAt(fn.Pos(), "%v already has Dcl", fn) + } + + declareParams := func(params []*types.Field, ctxt Class, prefix string, offset int) { + for i, param := range params { + sym := param.Sym + if sym == nil || sym.IsBlank() { + sym = fn.Sym().Pkg.LookupNum(prefix, i) + } + + name := NewNameAt(param.Pos, sym, param.Type) + name.Class = ctxt + name.Curfn = fn + fn.Dcl[offset+i] = name + + if setNname { + param.Nname = name + } + } + } + + sig := fn.Type() + params := sig.RecvParams() + results := sig.Results() + + fn.Dcl = make([]*Name, len(params)+len(results)) + declareParams(params, PPARAM, "~p", 0) + declareParams(results, PPARAMOUT, "~r", len(params)) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/func_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/func_test.go new file mode 100644 index 0000000000000000000000000000000000000000..5b40c02dc4f2ad81fecf354380b008dc6f5949a0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/func_test.go @@ -0,0 +1,82 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import ( + "testing" +) + +func TestSplitPkg(t *testing.T) { + tests := []struct { + in string + pkg string + sym string + }{ + { + in: "foo.Bar", + pkg: "foo", + sym: "Bar", + }, + { + in: "foo/bar.Baz", + pkg: "foo/bar", + sym: "Baz", + }, + { + in: "memeqbody", + pkg: "", + sym: "memeqbody", + }, + { + in: `example%2ecom.Bar`, + pkg: `example%2ecom`, + sym: "Bar", + }, + { + // Not a real generated symbol name, but easier to catch the general parameter form. + in: `foo.Bar[sync/atomic.Uint64]`, + pkg: `foo`, + sym: "Bar[sync/atomic.Uint64]", + }, + { + in: `example%2ecom.Bar[sync/atomic.Uint64]`, + pkg: `example%2ecom`, + sym: "Bar[sync/atomic.Uint64]", + }, + { + in: `gopkg.in/yaml%2ev3.Bar[sync/atomic.Uint64]`, + pkg: `gopkg.in/yaml%2ev3`, + sym: "Bar[sync/atomic.Uint64]", + }, + { + // This one is a real symbol name. + in: `foo.Bar[go.shape.struct { sync/atomic._ sync/atomic.noCopy; sync/atomic._ sync/atomic.align64; sync/atomic.v uint64 }]`, + pkg: `foo`, + sym: "Bar[go.shape.struct { sync/atomic._ sync/atomic.noCopy; sync/atomic._ sync/atomic.align64; sync/atomic.v uint64 }]", + }, + { + in: `example%2ecom.Bar[go.shape.struct { sync/atomic._ sync/atomic.noCopy; sync/atomic._ sync/atomic.align64; sync/atomic.v uint64 }]`, + pkg: `example%2ecom`, + sym: "Bar[go.shape.struct { sync/atomic._ sync/atomic.noCopy; sync/atomic._ sync/atomic.align64; sync/atomic.v uint64 }]", + }, + { + in: `gopkg.in/yaml%2ev3.Bar[go.shape.struct { sync/atomic._ sync/atomic.noCopy; sync/atomic._ sync/atomic.align64; sync/atomic.v uint64 }]`, + pkg: `gopkg.in/yaml%2ev3`, + sym: "Bar[go.shape.struct { sync/atomic._ sync/atomic.noCopy; sync/atomic._ sync/atomic.align64; sync/atomic.v uint64 }]", + }, + } + + for _, tc := range tests { + t.Run(tc.in, func(t *testing.T) { + pkg, sym := splitPkg(tc.in) + if pkg != tc.pkg { + t.Errorf("splitPkg(%q) got pkg %q want %q", tc.in, pkg, tc.pkg) + } + if sym != tc.sym { + t.Errorf("splitPkg(%q) got sym %q want %q", tc.in, sym, tc.sym) + } + }) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/ir.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/ir.go new file mode 100644 index 0000000000000000000000000000000000000000..82224ca2ed8350660cb412acb7e483e27e073ff3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/ir.go @@ -0,0 +1,5 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/mini.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/mini.go new file mode 100644 index 0000000000000000000000000000000000000000..52c622df2306e6f29f46bdf650fc98f25abf8fe5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/mini.go @@ -0,0 +1,86 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run mknode.go + +package ir + +import ( + "cmd/compile/internal/types" + "cmd/internal/src" + "fmt" + "go/constant" +) + +// A miniNode is a minimal node implementation, +// meant to be embedded as the first field in a larger node implementation, +// at a cost of 8 bytes. +// +// A miniNode is NOT a valid Node by itself: the embedding struct +// must at the least provide: +// +// func (n *MyNode) String() string { return fmt.Sprint(n) } +// func (n *MyNode) rawCopy() Node { c := *n; return &c } +// func (n *MyNode) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) } +// +// The embedding struct should also fill in n.op in its constructor, +// for more useful panic messages when invalid methods are called, +// instead of implementing Op itself. +type miniNode struct { + pos src.XPos // uint32 + op Op // uint8 + bits bitset8 + esc uint16 +} + +// posOr returns pos if known, or else n.pos. +// For use in DeepCopy. +func (n *miniNode) posOr(pos src.XPos) src.XPos { + if pos.IsKnown() { + return pos + } + return n.pos +} + +// op can be read, but not written. +// An embedding implementation can provide a SetOp if desired. +// (The panicking SetOp is with the other panics below.) +func (n *miniNode) Op() Op { return n.op } +func (n *miniNode) Pos() src.XPos { return n.pos } +func (n *miniNode) SetPos(x src.XPos) { n.pos = x } +func (n *miniNode) Esc() uint16 { return n.esc } +func (n *miniNode) SetEsc(x uint16) { n.esc = x } + +const ( + miniTypecheckShift = 0 + miniWalked = 1 << 2 // to prevent/catch re-walking +) + +func (n *miniNode) Typecheck() uint8 { return n.bits.get2(miniTypecheckShift) } +func (n *miniNode) SetTypecheck(x uint8) { + if x > 2 { + panic(fmt.Sprintf("cannot SetTypecheck %d", x)) + } + n.bits.set2(miniTypecheckShift, x) +} + +func (n *miniNode) Walked() bool { return n.bits&miniWalked != 0 } +func (n *miniNode) SetWalked(x bool) { n.bits.set(miniWalked, x) } + +// Empty, immutable graph structure. + +func (n *miniNode) Init() Nodes { return Nodes{} } + +// Additional functionality unavailable. + +func (n *miniNode) no(name string) string { return "cannot " + name + " on " + n.op.String() } + +func (n *miniNode) Type() *types.Type { return nil } +func (n *miniNode) SetType(*types.Type) { panic(n.no("SetType")) } +func (n *miniNode) Name() *Name { return nil } +func (n *miniNode) Sym() *types.Sym { return nil } +func (n *miniNode) Val() constant.Value { panic(n.no("Val")) } +func (n *miniNode) SetVal(v constant.Value) { panic(n.no("SetVal")) } +func (n *miniNode) NonNil() bool { return false } +func (n *miniNode) MarkNonNil() { panic(n.no("MarkNonNil")) } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/mknode.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/mknode.go new file mode 100644 index 0000000000000000000000000000000000000000..ca78a03d048ffab05bae48a02b306e1fa54a50fb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/mknode.go @@ -0,0 +1,366 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +// Note: this program must be run in this directory. +// go run mknode.go + +package main + +import ( + "bytes" + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/token" + "io/fs" + "log" + "os" + "sort" + "strings" +) + +var fset = token.NewFileSet() + +var buf bytes.Buffer + +// concreteNodes contains all concrete types in the package that implement Node +// (except for the mini* types). +var concreteNodes []*ast.TypeSpec + +// interfaceNodes contains all interface types in the package that implement Node. +var interfaceNodes []*ast.TypeSpec + +// mini contains the embeddable mini types (miniNode, miniExpr, and miniStmt). +var mini = map[string]*ast.TypeSpec{} + +// implementsNode reports whether the type t is one which represents a Node +// in the AST. +func implementsNode(t ast.Expr) bool { + id, ok := t.(*ast.Ident) + if !ok { + return false // only named types + } + for _, ts := range interfaceNodes { + if ts.Name.Name == id.Name { + return true + } + } + for _, ts := range concreteNodes { + if ts.Name.Name == id.Name { + return true + } + } + return false +} + +func isMini(t ast.Expr) bool { + id, ok := t.(*ast.Ident) + return ok && mini[id.Name] != nil +} + +func isNamedType(t ast.Expr, name string) bool { + if id, ok := t.(*ast.Ident); ok { + if id.Name == name { + return true + } + } + return false +} + +func main() { + fmt.Fprintln(&buf, "// Code generated by mknode.go. DO NOT EDIT.") + fmt.Fprintln(&buf) + fmt.Fprintln(&buf, "package ir") + fmt.Fprintln(&buf) + fmt.Fprintln(&buf, `import "fmt"`) + + filter := func(file fs.FileInfo) bool { + return !strings.HasPrefix(file.Name(), "mknode") + } + pkgs, err := parser.ParseDir(fset, ".", filter, 0) + if err != nil { + panic(err) + } + pkg := pkgs["ir"] + + // Find all the mini types. These let us determine which + // concrete types implement Node, so we need to find them first. + for _, f := range pkg.Files { + for _, d := range f.Decls { + g, ok := d.(*ast.GenDecl) + if !ok { + continue + } + for _, s := range g.Specs { + t, ok := s.(*ast.TypeSpec) + if !ok { + continue + } + if strings.HasPrefix(t.Name.Name, "mini") { + mini[t.Name.Name] = t + // Double-check that it is or embeds miniNode. + if t.Name.Name != "miniNode" { + s := t.Type.(*ast.StructType) + if !isNamedType(s.Fields.List[0].Type, "miniNode") { + panic(fmt.Sprintf("can't find miniNode in %s", t.Name.Name)) + } + } + } + } + } + } + + // Find all the declarations of concrete types that implement Node. + for _, f := range pkg.Files { + for _, d := range f.Decls { + g, ok := d.(*ast.GenDecl) + if !ok { + continue + } + for _, s := range g.Specs { + t, ok := s.(*ast.TypeSpec) + if !ok { + continue + } + if strings.HasPrefix(t.Name.Name, "mini") { + // We don't treat the mini types as + // concrete implementations of Node + // (even though they are) because + // we only use them by embedding them. + continue + } + if isConcreteNode(t) { + concreteNodes = append(concreteNodes, t) + } + if isInterfaceNode(t) { + interfaceNodes = append(interfaceNodes, t) + } + } + } + } + // Sort for deterministic output. + sort.Slice(concreteNodes, func(i, j int) bool { + return concreteNodes[i].Name.Name < concreteNodes[j].Name.Name + }) + // Generate code for each concrete type. + for _, t := range concreteNodes { + processType(t) + } + // Add some helpers. + generateHelpers() + + // Format and write output. + out, err := format.Source(buf.Bytes()) + if err != nil { + // write out mangled source so we can see the bug. + out = buf.Bytes() + } + err = os.WriteFile("node_gen.go", out, 0666) + if err != nil { + log.Fatal(err) + } +} + +// isConcreteNode reports whether the type t is a concrete type +// implementing Node. +func isConcreteNode(t *ast.TypeSpec) bool { + s, ok := t.Type.(*ast.StructType) + if !ok { + return false + } + for _, f := range s.Fields.List { + if isMini(f.Type) { + return true + } + } + return false +} + +// isInterfaceNode reports whether the type t is an interface type +// implementing Node (including Node itself). +func isInterfaceNode(t *ast.TypeSpec) bool { + s, ok := t.Type.(*ast.InterfaceType) + if !ok { + return false + } + if t.Name.Name == "Node" { + return true + } + if t.Name.Name == "OrigNode" || t.Name.Name == "InitNode" { + // These we exempt from consideration (fields of + // this type don't need to be walked or copied). + return false + } + + // Look for embedded Node type. + // Note that this doesn't handle multi-level embedding, but + // we have none of that at the moment. + for _, f := range s.Methods.List { + if len(f.Names) != 0 { + continue + } + if isNamedType(f.Type, "Node") { + return true + } + } + return false +} + +func processType(t *ast.TypeSpec) { + name := t.Name.Name + fmt.Fprintf(&buf, "\n") + fmt.Fprintf(&buf, "func (n *%s) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }\n", name) + + switch name { + case "Name", "Func": + // Too specialized to automate. + return + } + + s := t.Type.(*ast.StructType) + fields := s.Fields.List + + // Expand any embedded fields. + for i := 0; i < len(fields); i++ { + f := fields[i] + if len(f.Names) != 0 { + continue // not embedded + } + if isMini(f.Type) { + // Insert the fields of the embedded type into the main type. + // (It would be easier just to append, but inserting in place + // matches the old mknode behavior.) + ss := mini[f.Type.(*ast.Ident).Name].Type.(*ast.StructType) + var f2 []*ast.Field + f2 = append(f2, fields[:i]...) + f2 = append(f2, ss.Fields.List...) + f2 = append(f2, fields[i+1:]...) + fields = f2 + i-- + continue + } else if isNamedType(f.Type, "origNode") { + // Ignore this field + copy(fields[i:], fields[i+1:]) + fields = fields[:len(fields)-1] + i-- + continue + } else { + panic("unknown embedded field " + fmt.Sprintf("%v", f.Type)) + } + } + // Process fields. + var copyBody strings.Builder + var doChildrenBody strings.Builder + var editChildrenBody strings.Builder + var editChildrenWithHiddenBody strings.Builder + for _, f := range fields { + names := f.Names + ft := f.Type + hidden := false + if f.Tag != nil { + tag := f.Tag.Value[1 : len(f.Tag.Value)-1] + if strings.HasPrefix(tag, "mknode:") { + if tag[7:] == "\"-\"" { + if !isNamedType(ft, "Node") { + continue + } + hidden = true + } else { + panic(fmt.Sprintf("unexpected tag value: %s", tag)) + } + } + } + if isNamedType(ft, "Nodes") { + // Nodes == []Node + ft = &ast.ArrayType{Elt: &ast.Ident{Name: "Node"}} + } + isSlice := false + if a, ok := ft.(*ast.ArrayType); ok && a.Len == nil { + isSlice = true + ft = a.Elt + } + isPtr := false + if p, ok := ft.(*ast.StarExpr); ok { + isPtr = true + ft = p.X + } + if !implementsNode(ft) { + continue + } + for _, name := range names { + ptr := "" + if isPtr { + ptr = "*" + } + if isSlice { + fmt.Fprintf(&editChildrenWithHiddenBody, + "edit%ss(n.%s, edit)\n", ft, name) + } else { + fmt.Fprintf(&editChildrenWithHiddenBody, + "if n.%s != nil {\nn.%s = edit(n.%s).(%s%s)\n}\n", name, name, name, ptr, ft) + } + if hidden { + continue + } + if isSlice { + fmt.Fprintf(©Body, "c.%s = copy%ss(c.%s)\n", name, ft, name) + fmt.Fprintf(&doChildrenBody, + "if do%ss(n.%s, do) {\nreturn true\n}\n", ft, name) + fmt.Fprintf(&editChildrenBody, + "edit%ss(n.%s, edit)\n", ft, name) + } else { + fmt.Fprintf(&doChildrenBody, + "if n.%s != nil && do(n.%s) {\nreturn true\n}\n", name, name) + fmt.Fprintf(&editChildrenBody, + "if n.%s != nil {\nn.%s = edit(n.%s).(%s%s)\n}\n", name, name, name, ptr, ft) + } + } + } + fmt.Fprintf(&buf, "func (n *%s) copy() Node {\nc := *n\n", name) + buf.WriteString(copyBody.String()) + fmt.Fprintf(&buf, "return &c\n}\n") + fmt.Fprintf(&buf, "func (n *%s) doChildren(do func(Node) bool) bool {\n", name) + buf.WriteString(doChildrenBody.String()) + fmt.Fprintf(&buf, "return false\n}\n") + fmt.Fprintf(&buf, "func (n *%s) editChildren(edit func(Node) Node) {\n", name) + buf.WriteString(editChildrenBody.String()) + fmt.Fprintf(&buf, "}\n") + fmt.Fprintf(&buf, "func (n *%s) editChildrenWithHidden(edit func(Node) Node) {\n", name) + buf.WriteString(editChildrenWithHiddenBody.String()) + fmt.Fprintf(&buf, "}\n") +} + +func generateHelpers() { + for _, typ := range []string{"CaseClause", "CommClause", "Name", "Node"} { + ptr := "*" + if typ == "Node" { + ptr = "" // interfaces don't need * + } + fmt.Fprintf(&buf, "\n") + fmt.Fprintf(&buf, "func copy%ss(list []%s%s) []%s%s {\n", typ, ptr, typ, ptr, typ) + fmt.Fprintf(&buf, "if list == nil { return nil }\n") + fmt.Fprintf(&buf, "c := make([]%s%s, len(list))\n", ptr, typ) + fmt.Fprintf(&buf, "copy(c, list)\n") + fmt.Fprintf(&buf, "return c\n") + fmt.Fprintf(&buf, "}\n") + fmt.Fprintf(&buf, "func do%ss(list []%s%s, do func(Node) bool) bool {\n", typ, ptr, typ) + fmt.Fprintf(&buf, "for _, x := range list {\n") + fmt.Fprintf(&buf, "if x != nil && do(x) {\n") + fmt.Fprintf(&buf, "return true\n") + fmt.Fprintf(&buf, "}\n") + fmt.Fprintf(&buf, "}\n") + fmt.Fprintf(&buf, "return false\n") + fmt.Fprintf(&buf, "}\n") + fmt.Fprintf(&buf, "func edit%ss(list []%s%s, edit func(Node) Node) {\n", typ, ptr, typ) + fmt.Fprintf(&buf, "for i, x := range list {\n") + fmt.Fprintf(&buf, "if x != nil {\n") + fmt.Fprintf(&buf, "list[i] = edit(x).(%s%s)\n", ptr, typ) + fmt.Fprintf(&buf, "}\n") + fmt.Fprintf(&buf, "}\n") + fmt.Fprintf(&buf, "}\n") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/name.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/name.go new file mode 100644 index 0000000000000000000000000000000000000000..2844c0b8699bd9dd1113384bf8749c758f7eacc3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/name.go @@ -0,0 +1,399 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/internal/src" + "fmt" + + "go/constant" +) + +// An Ident is an identifier, possibly qualified. +type Ident struct { + miniExpr + sym *types.Sym +} + +func NewIdent(pos src.XPos, sym *types.Sym) *Ident { + n := new(Ident) + n.op = ONONAME + n.pos = pos + n.sym = sym + return n +} + +func (n *Ident) Sym() *types.Sym { return n.sym } + +// Name holds Node fields used only by named nodes (ONAME, OTYPE, some OLITERAL). +type Name struct { + miniExpr + BuiltinOp Op // uint8 + Class Class // uint8 + pragma PragmaFlag // int16 + flags bitset16 + DictIndex uint16 // index of the dictionary entry describing the type of this variable declaration plus 1 + sym *types.Sym + Func *Func // TODO(austin): nil for I.M + Offset_ int64 + val constant.Value + Opt interface{} // for use by escape analysis + Embed *[]Embed // list of embedded files, for ONAME var + + // For a local variable (not param) or extern, the initializing assignment (OAS or OAS2). + // For a closure var, the ONAME node of the original (outermost) captured variable. + // For the case-local variables of a type switch, the type switch guard (OTYPESW). + // For a range variable, the range statement (ORANGE) + // For a recv variable in a case of a select statement, the receive assignment (OSELRECV2) + // For the name of a function, points to corresponding Func node. + Defn Node + + // The function, method, or closure in which local variable or param is declared. + Curfn *Func + + Heapaddr *Name // temp holding heap address of param + + // Outer points to the immediately enclosing function's copy of this + // closure variable. If not a closure variable, then Outer is nil. + Outer *Name +} + +func (n *Name) isExpr() {} + +func (n *Name) copy() Node { panic(n.no("copy")) } +func (n *Name) doChildren(do func(Node) bool) bool { return false } +func (n *Name) editChildren(edit func(Node) Node) {} +func (n *Name) editChildrenWithHidden(edit func(Node) Node) {} + +// RecordFrameOffset records the frame offset for the name. +// It is used by package types when laying out function arguments. +func (n *Name) RecordFrameOffset(offset int64) { + n.SetFrameOffset(offset) +} + +// NewNameAt returns a new ONAME Node associated with symbol s at position pos. +// The caller is responsible for setting Curfn. +func NewNameAt(pos src.XPos, sym *types.Sym, typ *types.Type) *Name { + if sym == nil { + base.Fatalf("NewNameAt nil") + } + n := newNameAt(pos, ONAME, sym) + if typ != nil { + n.SetType(typ) + n.SetTypecheck(1) + } + return n +} + +// NewBuiltin returns a new Name representing a builtin function, +// either predeclared or from package unsafe. +func NewBuiltin(sym *types.Sym, op Op) *Name { + n := newNameAt(src.NoXPos, ONAME, sym) + n.BuiltinOp = op + n.SetTypecheck(1) + sym.Def = n + return n +} + +// NewLocal returns a new function-local variable with the given name and type. +func (fn *Func) NewLocal(pos src.XPos, sym *types.Sym, typ *types.Type) *Name { + if fn.Dcl == nil { + base.FatalfAt(pos, "must call DeclParams on %v first", fn) + } + + n := NewNameAt(pos, sym, typ) + n.Class = PAUTO + n.Curfn = fn + fn.Dcl = append(fn.Dcl, n) + return n +} + +// NewDeclNameAt returns a new Name associated with symbol s at position pos. +// The caller is responsible for setting Curfn. +func NewDeclNameAt(pos src.XPos, op Op, sym *types.Sym) *Name { + if sym == nil { + base.Fatalf("NewDeclNameAt nil") + } + switch op { + case ONAME, OTYPE, OLITERAL: + // ok + default: + base.Fatalf("NewDeclNameAt op %v", op) + } + return newNameAt(pos, op, sym) +} + +// NewConstAt returns a new OLITERAL Node associated with symbol s at position pos. +func NewConstAt(pos src.XPos, sym *types.Sym, typ *types.Type, val constant.Value) *Name { + if sym == nil { + base.Fatalf("NewConstAt nil") + } + n := newNameAt(pos, OLITERAL, sym) + n.SetType(typ) + n.SetTypecheck(1) + n.SetVal(val) + return n +} + +// newNameAt is like NewNameAt but allows sym == nil. +func newNameAt(pos src.XPos, op Op, sym *types.Sym) *Name { + n := new(Name) + n.op = op + n.pos = pos + n.sym = sym + return n +} + +func (n *Name) Name() *Name { return n } +func (n *Name) Sym() *types.Sym { return n.sym } +func (n *Name) SetSym(x *types.Sym) { n.sym = x } +func (n *Name) SubOp() Op { return n.BuiltinOp } +func (n *Name) SetSubOp(x Op) { n.BuiltinOp = x } +func (n *Name) SetFunc(x *Func) { n.Func = x } +func (n *Name) FrameOffset() int64 { return n.Offset_ } +func (n *Name) SetFrameOffset(x int64) { n.Offset_ = x } + +func (n *Name) Linksym() *obj.LSym { return n.sym.Linksym() } +func (n *Name) LinksymABI(abi obj.ABI) *obj.LSym { return n.sym.LinksymABI(abi) } + +func (*Name) CanBeNtype() {} +func (*Name) CanBeAnSSASym() {} +func (*Name) CanBeAnSSAAux() {} + +// Pragma returns the PragmaFlag for p, which must be for an OTYPE. +func (n *Name) Pragma() PragmaFlag { return n.pragma } + +// SetPragma sets the PragmaFlag for p, which must be for an OTYPE. +func (n *Name) SetPragma(flag PragmaFlag) { n.pragma = flag } + +// Alias reports whether p, which must be for an OTYPE, is a type alias. +func (n *Name) Alias() bool { return n.flags&nameAlias != 0 } + +// SetAlias sets whether p, which must be for an OTYPE, is a type alias. +func (n *Name) SetAlias(alias bool) { n.flags.set(nameAlias, alias) } + +const ( + nameReadonly = 1 << iota + nameByval // is the variable captured by value or by reference + nameNeedzero // if it contains pointers, needs to be zeroed on function entry + nameAutoTemp // is the variable a temporary (implies no dwarf info. reset if escapes to heap) + nameUsed // for variable declared and not used error + nameIsClosureVar // PAUTOHEAP closure pseudo-variable; original (if any) at n.Defn + nameIsOutputParamHeapAddr // pointer to a result parameter's heap copy + nameIsOutputParamInRegisters // output parameter in registers spills as an auto + nameAddrtaken // address taken, even if not moved to heap + nameInlFormal // PAUTO created by inliner, derived from callee formal + nameInlLocal // PAUTO created by inliner, derived from callee local + nameOpenDeferSlot // if temporary var storing info for open-coded defers + nameLibfuzzer8BitCounter // if PEXTERN should be assigned to __sancov_cntrs section + nameCoverageCounter // instrumentation counter var for cmd/cover + nameCoverageAuxVar // instrumentation pkg ID variable cmd/cover + nameAlias // is type name an alias +) + +func (n *Name) Readonly() bool { return n.flags&nameReadonly != 0 } +func (n *Name) Needzero() bool { return n.flags&nameNeedzero != 0 } +func (n *Name) AutoTemp() bool { return n.flags&nameAutoTemp != 0 } +func (n *Name) Used() bool { return n.flags&nameUsed != 0 } +func (n *Name) IsClosureVar() bool { return n.flags&nameIsClosureVar != 0 } +func (n *Name) IsOutputParamHeapAddr() bool { return n.flags&nameIsOutputParamHeapAddr != 0 } +func (n *Name) IsOutputParamInRegisters() bool { return n.flags&nameIsOutputParamInRegisters != 0 } +func (n *Name) Addrtaken() bool { return n.flags&nameAddrtaken != 0 } +func (n *Name) InlFormal() bool { return n.flags&nameInlFormal != 0 } +func (n *Name) InlLocal() bool { return n.flags&nameInlLocal != 0 } +func (n *Name) OpenDeferSlot() bool { return n.flags&nameOpenDeferSlot != 0 } +func (n *Name) Libfuzzer8BitCounter() bool { return n.flags&nameLibfuzzer8BitCounter != 0 } +func (n *Name) CoverageCounter() bool { return n.flags&nameCoverageCounter != 0 } +func (n *Name) CoverageAuxVar() bool { return n.flags&nameCoverageAuxVar != 0 } + +func (n *Name) setReadonly(b bool) { n.flags.set(nameReadonly, b) } +func (n *Name) SetNeedzero(b bool) { n.flags.set(nameNeedzero, b) } +func (n *Name) SetAutoTemp(b bool) { n.flags.set(nameAutoTemp, b) } +func (n *Name) SetUsed(b bool) { n.flags.set(nameUsed, b) } +func (n *Name) SetIsClosureVar(b bool) { n.flags.set(nameIsClosureVar, b) } +func (n *Name) SetIsOutputParamHeapAddr(b bool) { n.flags.set(nameIsOutputParamHeapAddr, b) } +func (n *Name) SetIsOutputParamInRegisters(b bool) { n.flags.set(nameIsOutputParamInRegisters, b) } +func (n *Name) SetAddrtaken(b bool) { n.flags.set(nameAddrtaken, b) } +func (n *Name) SetInlFormal(b bool) { n.flags.set(nameInlFormal, b) } +func (n *Name) SetInlLocal(b bool) { n.flags.set(nameInlLocal, b) } +func (n *Name) SetOpenDeferSlot(b bool) { n.flags.set(nameOpenDeferSlot, b) } +func (n *Name) SetLibfuzzer8BitCounter(b bool) { n.flags.set(nameLibfuzzer8BitCounter, b) } +func (n *Name) SetCoverageCounter(b bool) { n.flags.set(nameCoverageCounter, b) } +func (n *Name) SetCoverageAuxVar(b bool) { n.flags.set(nameCoverageAuxVar, b) } + +// OnStack reports whether variable n may reside on the stack. +func (n *Name) OnStack() bool { + if n.Op() == ONAME { + switch n.Class { + case PPARAM, PPARAMOUT, PAUTO: + return n.Esc() != EscHeap + case PEXTERN, PAUTOHEAP: + return false + } + } + // Note: fmt.go:dumpNodeHeader calls all "func() bool"-typed + // methods, but it can only recover from panics, not Fatalf. + panic(fmt.Sprintf("%v: not a variable: %v", base.FmtPos(n.Pos()), n)) +} + +// MarkReadonly indicates that n is an ONAME with readonly contents. +func (n *Name) MarkReadonly() { + if n.Op() != ONAME { + base.Fatalf("Node.MarkReadonly %v", n.Op()) + } + n.setReadonly(true) + // Mark the linksym as readonly immediately + // so that the SSA backend can use this information. + // It will be overridden later during dumpglobls. + n.Linksym().Type = objabi.SRODATA +} + +// Val returns the constant.Value for the node. +func (n *Name) Val() constant.Value { + if n.val == nil { + return constant.MakeUnknown() + } + return n.val +} + +// SetVal sets the constant.Value for the node. +func (n *Name) SetVal(v constant.Value) { + if n.op != OLITERAL { + panic(n.no("SetVal")) + } + AssertValidTypeForConst(n.Type(), v) + n.val = v +} + +// Canonical returns the logical declaration that n represents. If n +// is a closure variable, then Canonical returns the original Name as +// it appears in the function that immediately contains the +// declaration. Otherwise, Canonical simply returns n itself. +func (n *Name) Canonical() *Name { + if n.IsClosureVar() && n.Defn != nil { + n = n.Defn.(*Name) + } + return n +} + +func (n *Name) SetByval(b bool) { + if n.Canonical() != n { + base.Fatalf("SetByval called on non-canonical variable: %v", n) + } + n.flags.set(nameByval, b) +} + +func (n *Name) Byval() bool { + // We require byval to be set on the canonical variable, but we + // allow it to be accessed from any instance. + return n.Canonical().flags&nameByval != 0 +} + +// NewClosureVar returns a new closure variable for fn to refer to +// outer variable n. +func NewClosureVar(pos src.XPos, fn *Func, n *Name) *Name { + switch n.Class { + case PAUTO, PPARAM, PPARAMOUT, PAUTOHEAP: + // ok + default: + // Prevent mistaken capture of global variables. + base.Fatalf("NewClosureVar: %+v", n) + } + + c := NewNameAt(pos, n.Sym(), n.Type()) + c.Curfn = fn + c.Class = PAUTOHEAP + c.SetIsClosureVar(true) + c.Defn = n.Canonical() + c.Outer = n + + fn.ClosureVars = append(fn.ClosureVars, c) + + return c +} + +// NewHiddenParam returns a new hidden parameter for fn with the given +// name and type. +func NewHiddenParam(pos src.XPos, fn *Func, sym *types.Sym, typ *types.Type) *Name { + if fn.OClosure != nil { + base.FatalfAt(fn.Pos(), "cannot add hidden parameters to closures") + } + + fn.SetNeedctxt(true) + + // Create a fake parameter, disassociated from any real function, to + // pretend to capture. + fake := NewNameAt(pos, sym, typ) + fake.Class = PPARAM + fake.SetByval(true) + + return NewClosureVar(pos, fn, fake) +} + +// SameSource reports whether two nodes refer to the same source +// element. +// +// It exists to help incrementally migrate the compiler towards +// allowing the introduction of IdentExpr (#42990). Once we have +// IdentExpr, it will no longer be safe to directly compare Node +// values to tell if they refer to the same Name. Instead, code will +// need to explicitly get references to the underlying Name object(s), +// and compare those instead. +// +// It will still be safe to compare Nodes directly for checking if two +// nodes are syntactically the same. The SameSource function exists to +// indicate code that intentionally compares Nodes for syntactic +// equality as opposed to code that has yet to be updated in +// preparation for IdentExpr. +func SameSource(n1, n2 Node) bool { + return n1 == n2 +} + +// Uses reports whether expression x is a (direct) use of the given +// variable. +func Uses(x Node, v *Name) bool { + if v == nil || v.Op() != ONAME { + base.Fatalf("RefersTo bad Name: %v", v) + } + return x.Op() == ONAME && x.Name() == v +} + +// DeclaredBy reports whether expression x refers (directly) to a +// variable that was declared by the given statement. +func DeclaredBy(x, stmt Node) bool { + if stmt == nil { + base.Fatalf("DeclaredBy nil") + } + return x.Op() == ONAME && SameSource(x.Name().Defn, stmt) +} + +// The Class of a variable/function describes the "storage class" +// of a variable or function. During parsing, storage classes are +// called declaration contexts. +type Class uint8 + +//go:generate stringer -type=Class name.go +const ( + Pxxx Class = iota // no class; used during ssa conversion to indicate pseudo-variables + PEXTERN // global variables + PAUTO // local variables + PAUTOHEAP // local variables or parameters moved to heap + PPARAM // input arguments + PPARAMOUT // output results + PTYPEPARAM // type params + PFUNC // global functions + + // Careful: Class is stored in three bits in Node.flags. + _ = uint((1 << 3) - iota) // static assert for iota <= (1 << 3) +) + +type Embed struct { + Pos src.XPos + Patterns []string +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/node.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/node.go new file mode 100644 index 0000000000000000000000000000000000000000..6513386f03f2f98f69f0448ac6df51edf29244b0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/node.go @@ -0,0 +1,586 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// “Abstract” syntax representation. + +package ir + +import ( + "fmt" + "go/constant" + "sort" + + "cmd/compile/internal/base" + "cmd/compile/internal/types" + "cmd/internal/src" +) + +// A Node is the abstract interface to an IR node. +type Node interface { + // Formatting + Format(s fmt.State, verb rune) + + // Source position. + Pos() src.XPos + SetPos(x src.XPos) + + // For making copies. For Copy and SepCopy. + copy() Node + + doChildren(func(Node) bool) bool + editChildren(func(Node) Node) + editChildrenWithHidden(func(Node) Node) + + // Abstract graph structure, for generic traversals. + Op() Op + Init() Nodes + + // Fields specific to certain Ops only. + Type() *types.Type + SetType(t *types.Type) + Name() *Name + Sym() *types.Sym + Val() constant.Value + SetVal(v constant.Value) + + // Storage for analysis passes. + Esc() uint16 + SetEsc(x uint16) + + // Typecheck values: + // 0 means the node is not typechecked + // 1 means the node is completely typechecked + // 2 means typechecking of the node is in progress + Typecheck() uint8 + SetTypecheck(x uint8) + NonNil() bool + MarkNonNil() +} + +// Line returns n's position as a string. If n has been inlined, +// it uses the outermost position where n has been inlined. +func Line(n Node) string { + return base.FmtPos(n.Pos()) +} + +func IsSynthetic(n Node) bool { + name := n.Sym().Name + return name[0] == '.' || name[0] == '~' +} + +// IsAutoTmp indicates if n was created by the compiler as a temporary, +// based on the setting of the .AutoTemp flag in n's Name. +func IsAutoTmp(n Node) bool { + if n == nil || n.Op() != ONAME { + return false + } + return n.Name().AutoTemp() +} + +// MayBeShared reports whether n may occur in multiple places in the AST. +// Extra care must be taken when mutating such a node. +func MayBeShared(n Node) bool { + switch n.Op() { + case ONAME, OLITERAL, ONIL, OTYPE: + return true + } + return false +} + +type InitNode interface { + Node + PtrInit() *Nodes + SetInit(x Nodes) +} + +func TakeInit(n Node) Nodes { + init := n.Init() + if len(init) != 0 { + n.(InitNode).SetInit(nil) + } + return init +} + +//go:generate stringer -type=Op -trimprefix=O node.go + +type Op uint8 + +// Node ops. +const ( + OXXX Op = iota + + // names + ONAME // var or func name + // Unnamed arg or return value: f(int, string) (int, error) { etc } + // Also used for a qualified package identifier that hasn't been resolved yet. + ONONAME + OTYPE // type name + OLITERAL // literal + ONIL // nil + + // expressions + OADD // X + Y + OSUB // X - Y + OOR // X | Y + OXOR // X ^ Y + OADDSTR // +{List} (string addition, list elements are strings) + OADDR // &X + OANDAND // X && Y + OAPPEND // append(Args); after walk, X may contain elem type descriptor + OBYTES2STR // Type(X) (Type is string, X is a []byte) + OBYTES2STRTMP // Type(X) (Type is string, X is a []byte, ephemeral) + ORUNES2STR // Type(X) (Type is string, X is a []rune) + OSTR2BYTES // Type(X) (Type is []byte, X is a string) + OSTR2BYTESTMP // Type(X) (Type is []byte, X is a string, ephemeral) + OSTR2RUNES // Type(X) (Type is []rune, X is a string) + OSLICE2ARR // Type(X) (Type is [N]T, X is a []T) + OSLICE2ARRPTR // Type(X) (Type is *[N]T, X is a []T) + // X = Y or (if Def=true) X := Y + // If Def, then Init includes a DCL node for X. + OAS + // Lhs = Rhs (x, y, z = a, b, c) or (if Def=true) Lhs := Rhs + // If Def, then Init includes DCL nodes for Lhs + OAS2 + OAS2DOTTYPE // Lhs = Rhs (x, ok = I.(int)) + OAS2FUNC // Lhs = Rhs (x, y = f()) + OAS2MAPR // Lhs = Rhs (x, ok = m["foo"]) + OAS2RECV // Lhs = Rhs (x, ok = <-c) + OASOP // X AsOp= Y (x += y) + OCALL // X(Args) (function call, method call or type conversion) + + // OCALLFUNC, OCALLMETH, and OCALLINTER have the same structure. + // Prior to walk, they are: X(Args), where Args is all regular arguments. + // After walk, if any argument whose evaluation might requires temporary variable, + // that temporary variable will be pushed to Init, Args will contains an updated + // set of arguments. + OCALLFUNC // X(Args) (function call f(args)) + OCALLMETH // X(Args) (direct method call x.Method(args)) + OCALLINTER // X(Args) (interface method call x.Method(args)) + OCAP // cap(X) + OCLEAR // clear(X) + OCLOSE // close(X) + OCLOSURE // func Type { Func.Closure.Body } (func literal) + OCOMPLIT // Type{List} (composite literal, not yet lowered to specific form) + OMAPLIT // Type{List} (composite literal, Type is map) + OSTRUCTLIT // Type{List} (composite literal, Type is struct) + OARRAYLIT // Type{List} (composite literal, Type is array) + OSLICELIT // Type{List} (composite literal, Type is slice), Len is slice length. + OPTRLIT // &X (X is composite literal) + OCONV // Type(X) (type conversion) + OCONVIFACE // Type(X) (type conversion, to interface) + OCONVNOP // Type(X) (type conversion, no effect) + OCOPY // copy(X, Y) + ODCL // var X (declares X of type X.Type) + + // Used during parsing but don't last. + ODCLFUNC // func f() or func (r) f() + + ODELETE // delete(Args) + ODOT // X.Sel (X is of struct type) + ODOTPTR // X.Sel (X is of pointer to struct type) + ODOTMETH // X.Sel (X is non-interface, Sel is method name) + ODOTINTER // X.Sel (X is interface, Sel is method name) + OXDOT // X.Sel (before rewrite to one of the preceding) + ODOTTYPE // X.Ntype or X.Type (.Ntype during parsing, .Type once resolved); after walk, Itab contains address of interface type descriptor and Itab.X contains address of concrete type descriptor + ODOTTYPE2 // X.Ntype or X.Type (.Ntype during parsing, .Type once resolved; on rhs of OAS2DOTTYPE); after walk, Itab contains address of interface type descriptor + OEQ // X == Y + ONE // X != Y + OLT // X < Y + OLE // X <= Y + OGE // X >= Y + OGT // X > Y + ODEREF // *X + OINDEX // X[Index] (index of array or slice) + OINDEXMAP // X[Index] (index of map) + OKEY // Key:Value (key:value in struct/array/map literal) + OSTRUCTKEY // Field:Value (key:value in struct literal, after type checking) + OLEN // len(X) + OMAKE // make(Args) (before type checking converts to one of the following) + OMAKECHAN // make(Type[, Len]) (type is chan) + OMAKEMAP // make(Type[, Len]) (type is map) + OMAKESLICE // make(Type[, Len[, Cap]]) (type is slice) + OMAKESLICECOPY // makeslicecopy(Type, Len, Cap) (type is slice; Len is length and Cap is the copied from slice) + // OMAKESLICECOPY is created by the order pass and corresponds to: + // s = make(Type, Len); copy(s, Cap) + // + // Bounded can be set on the node when Len == len(Cap) is known at compile time. + // + // This node is created so the walk pass can optimize this pattern which would + // otherwise be hard to detect after the order pass. + OMUL // X * Y + ODIV // X / Y + OMOD // X % Y + OLSH // X << Y + ORSH // X >> Y + OAND // X & Y + OANDNOT // X &^ Y + ONEW // new(X); corresponds to calls to new in source code + ONOT // !X + OBITNOT // ^X + OPLUS // +X + ONEG // -X + OOROR // X || Y + OPANIC // panic(X) + OPRINT // print(List) + OPRINTLN // println(List) + OPAREN // (X) + OSEND // Chan <- Value + OSLICE // X[Low : High] (X is untypechecked or slice) + OSLICEARR // X[Low : High] (X is pointer to array) + OSLICESTR // X[Low : High] (X is string) + OSLICE3 // X[Low : High : Max] (X is untypedchecked or slice) + OSLICE3ARR // X[Low : High : Max] (X is pointer to array) + OSLICEHEADER // sliceheader{Ptr, Len, Cap} (Ptr is unsafe.Pointer, Len is length, Cap is capacity) + OSTRINGHEADER // stringheader{Ptr, Len} (Ptr is unsafe.Pointer, Len is length) + ORECOVER // recover() + ORECOVERFP // recover(Args) w/ explicit FP argument + ORECV // <-X + ORUNESTR // Type(X) (Type is string, X is rune) + OSELRECV2 // like OAS2: Lhs = Rhs where len(Lhs)=2, len(Rhs)=1, Rhs[0].Op = ORECV (appears as .Var of OCASE) + OMIN // min(List) + OMAX // max(List) + OREAL // real(X) + OIMAG // imag(X) + OCOMPLEX // complex(X, Y) + OUNSAFEADD // unsafe.Add(X, Y) + OUNSAFESLICE // unsafe.Slice(X, Y) + OUNSAFESLICEDATA // unsafe.SliceData(X) + OUNSAFESTRING // unsafe.String(X, Y) + OUNSAFESTRINGDATA // unsafe.StringData(X) + OMETHEXPR // X(Args) (method expression T.Method(args), first argument is the method receiver) + OMETHVALUE // X.Sel (method expression t.Method, not called) + + // statements + OBLOCK // { List } (block of code) + OBREAK // break [Label] + // OCASE: case List: Body (List==nil means default) + // For OTYPESW, List is a OTYPE node for the specified type (or OLITERAL + // for nil) or an ODYNAMICTYPE indicating a runtime type for generics. + // If a type-switch variable is specified, Var is an + // ONAME for the version of the type-switch variable with the specified + // type. + OCASE + OCONTINUE // continue [Label] + ODEFER // defer Call + OFALL // fallthrough + OFOR // for Init; Cond; Post { Body } + OGOTO // goto Label + OIF // if Init; Cond { Then } else { Else } + OLABEL // Label: + OGO // go Call + ORANGE // for Key, Value = range X { Body } + ORETURN // return Results + OSELECT // select { Cases } + OSWITCH // switch Init; Expr { Cases } + // OTYPESW: X := Y.(type) (appears as .Tag of OSWITCH) + // X is nil if there is no type-switch variable + OTYPESW + + // misc + // intermediate representation of an inlined call. Uses Init (assignments + // for the captured variables, parameters, retvars, & INLMARK op), + // Body (body of the inlined function), and ReturnVars (list of + // return values) + OINLCALL // intermediary representation of an inlined call. + OMAKEFACE // construct an interface value from rtype/itab and data pointers + OITAB // rtype/itab pointer of an interface value + OIDATA // data pointer of an interface value + OSPTR // base pointer of a slice or string. Bounded==1 means known non-nil. + OCFUNC // reference to c function pointer (not go func value) + OCHECKNIL // emit code to ensure pointer/interface not nil + ORESULT // result of a function call; Xoffset is stack offset + OINLMARK // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree. + OLINKSYMOFFSET // offset within a name + OJUMPTABLE // A jump table structure for implementing dense expression switches + OINTERFACESWITCH // A type switch with interface cases + + // opcodes for generics + ODYNAMICDOTTYPE // x = i.(T) where T is a type parameter (or derived from a type parameter) + ODYNAMICDOTTYPE2 // x, ok = i.(T) where T is a type parameter (or derived from a type parameter) + ODYNAMICTYPE // a type node for type switches (represents a dynamic target type for a type switch) + + // arch-specific opcodes + OTAILCALL // tail call to another function + OGETG // runtime.getg() (read g pointer) + OGETCALLERPC // runtime.getcallerpc() (continuation PC in caller frame) + OGETCALLERSP // runtime.getcallersp() (stack pointer in caller frame) + + OEND +) + +// IsCmp reports whether op is a comparison operation (==, !=, <, <=, +// >, or >=). +func (op Op) IsCmp() bool { + switch op { + case OEQ, ONE, OLT, OLE, OGT, OGE: + return true + } + return false +} + +// Nodes is a slice of Node. +type Nodes []Node + +// ToNodes returns s as a slice of Nodes. +func ToNodes[T Node](s []T) Nodes { + res := make(Nodes, len(s)) + for i, n := range s { + res[i] = n + } + return res +} + +// Append appends entries to Nodes. +func (n *Nodes) Append(a ...Node) { + if len(a) == 0 { + return + } + *n = append(*n, a...) +} + +// Prepend prepends entries to Nodes. +// If a slice is passed in, this will take ownership of it. +func (n *Nodes) Prepend(a ...Node) { + if len(a) == 0 { + return + } + *n = append(a, *n...) +} + +// Take clears n, returning its former contents. +func (n *Nodes) Take() []Node { + ret := *n + *n = nil + return ret +} + +// Copy returns a copy of the content of the slice. +func (n Nodes) Copy() Nodes { + if n == nil { + return nil + } + c := make(Nodes, len(n)) + copy(c, n) + return c +} + +// NameQueue is a FIFO queue of *Name. The zero value of NameQueue is +// a ready-to-use empty queue. +type NameQueue struct { + ring []*Name + head, tail int +} + +// Empty reports whether q contains no Names. +func (q *NameQueue) Empty() bool { + return q.head == q.tail +} + +// PushRight appends n to the right of the queue. +func (q *NameQueue) PushRight(n *Name) { + if len(q.ring) == 0 { + q.ring = make([]*Name, 16) + } else if q.head+len(q.ring) == q.tail { + // Grow the ring. + nring := make([]*Name, len(q.ring)*2) + // Copy the old elements. + part := q.ring[q.head%len(q.ring):] + if q.tail-q.head <= len(part) { + part = part[:q.tail-q.head] + copy(nring, part) + } else { + pos := copy(nring, part) + copy(nring[pos:], q.ring[:q.tail%len(q.ring)]) + } + q.ring, q.head, q.tail = nring, 0, q.tail-q.head + } + + q.ring[q.tail%len(q.ring)] = n + q.tail++ +} + +// PopLeft pops a Name from the left of the queue. It panics if q is +// empty. +func (q *NameQueue) PopLeft() *Name { + if q.Empty() { + panic("dequeue empty") + } + n := q.ring[q.head%len(q.ring)] + q.head++ + return n +} + +// NameSet is a set of Names. +type NameSet map[*Name]struct{} + +// Has reports whether s contains n. +func (s NameSet) Has(n *Name) bool { + _, isPresent := s[n] + return isPresent +} + +// Add adds n to s. +func (s *NameSet) Add(n *Name) { + if *s == nil { + *s = make(map[*Name]struct{}) + } + (*s)[n] = struct{}{} +} + +// Sorted returns s sorted according to less. +func (s NameSet) Sorted(less func(*Name, *Name) bool) []*Name { + var res []*Name + for n := range s { + res = append(res, n) + } + sort.Slice(res, func(i, j int) bool { return less(res[i], res[j]) }) + return res +} + +type PragmaFlag uint16 + +const ( + // Func pragmas. + Nointerface PragmaFlag = 1 << iota + Noescape // func parameters don't escape + Norace // func must not have race detector annotations + Nosplit // func should not execute on separate stack + Noinline // func should not be inlined + NoCheckPtr // func should not be instrumented by checkptr + CgoUnsafeArgs // treat a pointer to one arg as a pointer to them all + UintptrKeepAlive // pointers converted to uintptr must be kept alive + UintptrEscapes // pointers converted to uintptr escape + + // Runtime-only func pragmas. + // See ../../../../runtime/HACKING.md for detailed descriptions. + Systemstack // func must run on system stack + Nowritebarrier // emit compiler error instead of write barrier + Nowritebarrierrec // error on write barrier in this or recursive callees + Yeswritebarrierrec // cancels Nowritebarrierrec in this function and callees + + // Go command pragmas + GoBuildPragma + + RegisterParams // TODO(register args) remove after register abi is working + +) + +var BlankNode *Name + +func IsConst(n Node, ct constant.Kind) bool { + return ConstType(n) == ct +} + +// IsNil reports whether n represents the universal untyped zero value "nil". +func IsNil(n Node) bool { + return n != nil && n.Op() == ONIL +} + +func IsBlank(n Node) bool { + if n == nil { + return false + } + return n.Sym().IsBlank() +} + +// IsMethod reports whether n is a method. +// n must be a function or a method. +func IsMethod(n Node) bool { + return n.Type().Recv() != nil +} + +// HasUniquePos reports whether n has a unique position that can be +// used for reporting error messages. +// +// It's primarily used to distinguish references to named objects, +// whose Pos will point back to their declaration position rather than +// their usage position. +func HasUniquePos(n Node) bool { + switch n.Op() { + case ONAME: + return false + case OLITERAL, ONIL, OTYPE: + if n.Sym() != nil { + return false + } + } + + if !n.Pos().IsKnown() { + if base.Flag.K != 0 { + base.Warn("setlineno: unknown position (line 0)") + } + return false + } + + return true +} + +func SetPos(n Node) src.XPos { + lno := base.Pos + if n != nil && HasUniquePos(n) { + base.Pos = n.Pos() + } + return lno +} + +// The result of InitExpr MUST be assigned back to n, e.g. +// +// n.X = InitExpr(init, n.X) +func InitExpr(init []Node, expr Node) Node { + if len(init) == 0 { + return expr + } + + n, ok := expr.(InitNode) + if !ok || MayBeShared(n) { + // Introduce OCONVNOP to hold init list. + n = NewConvExpr(base.Pos, OCONVNOP, nil, expr) + n.SetType(expr.Type()) + n.SetTypecheck(1) + } + + n.PtrInit().Prepend(init...) + return n +} + +// what's the outer value that a write to n affects? +// outer value means containing struct or array. +func OuterValue(n Node) Node { + for { + switch nn := n; nn.Op() { + case OXDOT: + base.FatalfAt(n.Pos(), "OXDOT in OuterValue: %v", n) + case ODOT: + nn := nn.(*SelectorExpr) + n = nn.X + continue + case OPAREN: + nn := nn.(*ParenExpr) + n = nn.X + continue + case OCONVNOP: + nn := nn.(*ConvExpr) + n = nn.X + continue + case OINDEX: + nn := nn.(*IndexExpr) + if nn.X.Type() == nil { + base.Fatalf("OuterValue needs type for %v", nn.X) + } + if nn.X.Type().IsArray() { + n = nn.X + continue + } + } + + return n + } +} + +const ( + EscUnknown = iota + EscNone // Does not escape to heap, result, or parameters. + EscHeap // Reachable from the heap + EscNever // By construction will not escape. +) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/node_gen.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/node_gen.go new file mode 100644 index 0000000000000000000000000000000000000000..fc28067629d6a00c44075cd5e8a7590bb57e4250 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/node_gen.go @@ -0,0 +1,1809 @@ +// Code generated by mknode.go. DO NOT EDIT. + +package ir + +import "fmt" + +func (n *AddStringExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *AddStringExpr) copy() Node { + c := *n + c.init = copyNodes(c.init) + c.List = copyNodes(c.List) + return &c +} +func (n *AddStringExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if doNodes(n.List, do) { + return true + } + if n.Prealloc != nil && do(n.Prealloc) { + return true + } + return false +} +func (n *AddStringExpr) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + editNodes(n.List, edit) + if n.Prealloc != nil { + n.Prealloc = edit(n.Prealloc).(*Name) + } +} +func (n *AddStringExpr) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + editNodes(n.List, edit) + if n.Prealloc != nil { + n.Prealloc = edit(n.Prealloc).(*Name) + } +} + +func (n *AddrExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *AddrExpr) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *AddrExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.X != nil && do(n.X) { + return true + } + if n.Prealloc != nil && do(n.Prealloc) { + return true + } + return false +} +func (n *AddrExpr) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.Prealloc != nil { + n.Prealloc = edit(n.Prealloc).(*Name) + } +} +func (n *AddrExpr) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.Prealloc != nil { + n.Prealloc = edit(n.Prealloc).(*Name) + } +} + +func (n *AssignListStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *AssignListStmt) copy() Node { + c := *n + c.init = copyNodes(c.init) + c.Lhs = copyNodes(c.Lhs) + c.Rhs = copyNodes(c.Rhs) + return &c +} +func (n *AssignListStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if doNodes(n.Lhs, do) { + return true + } + if doNodes(n.Rhs, do) { + return true + } + return false +} +func (n *AssignListStmt) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + editNodes(n.Lhs, edit) + editNodes(n.Rhs, edit) +} +func (n *AssignListStmt) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + editNodes(n.Lhs, edit) + editNodes(n.Rhs, edit) +} + +func (n *AssignOpStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *AssignOpStmt) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *AssignOpStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.X != nil && do(n.X) { + return true + } + if n.Y != nil && do(n.Y) { + return true + } + return false +} +func (n *AssignOpStmt) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.Y != nil { + n.Y = edit(n.Y).(Node) + } +} +func (n *AssignOpStmt) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.Y != nil { + n.Y = edit(n.Y).(Node) + } +} + +func (n *AssignStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *AssignStmt) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *AssignStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.X != nil && do(n.X) { + return true + } + if n.Y != nil && do(n.Y) { + return true + } + return false +} +func (n *AssignStmt) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.Y != nil { + n.Y = edit(n.Y).(Node) + } +} +func (n *AssignStmt) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.Y != nil { + n.Y = edit(n.Y).(Node) + } +} + +func (n *BasicLit) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *BasicLit) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *BasicLit) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + return false +} +func (n *BasicLit) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) +} +func (n *BasicLit) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) +} + +func (n *BinaryExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *BinaryExpr) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *BinaryExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.X != nil && do(n.X) { + return true + } + if n.Y != nil && do(n.Y) { + return true + } + return false +} +func (n *BinaryExpr) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.Y != nil { + n.Y = edit(n.Y).(Node) + } +} +func (n *BinaryExpr) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.Y != nil { + n.Y = edit(n.Y).(Node) + } + if n.RType != nil { + n.RType = edit(n.RType).(Node) + } +} + +func (n *BlockStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *BlockStmt) copy() Node { + c := *n + c.init = copyNodes(c.init) + c.List = copyNodes(c.List) + return &c +} +func (n *BlockStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if doNodes(n.List, do) { + return true + } + return false +} +func (n *BlockStmt) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + editNodes(n.List, edit) +} +func (n *BlockStmt) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + editNodes(n.List, edit) +} + +func (n *BranchStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *BranchStmt) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *BranchStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + return false +} +func (n *BranchStmt) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) +} +func (n *BranchStmt) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) +} + +func (n *CallExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *CallExpr) copy() Node { + c := *n + c.init = copyNodes(c.init) + c.Args = copyNodes(c.Args) + c.KeepAlive = copyNames(c.KeepAlive) + return &c +} +func (n *CallExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.Fun != nil && do(n.Fun) { + return true + } + if doNodes(n.Args, do) { + return true + } + if doNames(n.KeepAlive, do) { + return true + } + return false +} +func (n *CallExpr) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Fun != nil { + n.Fun = edit(n.Fun).(Node) + } + editNodes(n.Args, edit) + editNames(n.KeepAlive, edit) +} +func (n *CallExpr) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Fun != nil { + n.Fun = edit(n.Fun).(Node) + } + editNodes(n.Args, edit) + if n.RType != nil { + n.RType = edit(n.RType).(Node) + } + editNames(n.KeepAlive, edit) +} + +func (n *CaseClause) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *CaseClause) copy() Node { + c := *n + c.init = copyNodes(c.init) + c.List = copyNodes(c.List) + c.RTypes = copyNodes(c.RTypes) + c.Body = copyNodes(c.Body) + return &c +} +func (n *CaseClause) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.Var != nil && do(n.Var) { + return true + } + if doNodes(n.List, do) { + return true + } + if doNodes(n.RTypes, do) { + return true + } + if doNodes(n.Body, do) { + return true + } + return false +} +func (n *CaseClause) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Var != nil { + n.Var = edit(n.Var).(*Name) + } + editNodes(n.List, edit) + editNodes(n.RTypes, edit) + editNodes(n.Body, edit) +} +func (n *CaseClause) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Var != nil { + n.Var = edit(n.Var).(*Name) + } + editNodes(n.List, edit) + editNodes(n.RTypes, edit) + editNodes(n.Body, edit) +} + +func (n *ClosureExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *ClosureExpr) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *ClosureExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.Prealloc != nil && do(n.Prealloc) { + return true + } + return false +} +func (n *ClosureExpr) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Prealloc != nil { + n.Prealloc = edit(n.Prealloc).(*Name) + } +} +func (n *ClosureExpr) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Prealloc != nil { + n.Prealloc = edit(n.Prealloc).(*Name) + } +} + +func (n *CommClause) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *CommClause) copy() Node { + c := *n + c.init = copyNodes(c.init) + c.Body = copyNodes(c.Body) + return &c +} +func (n *CommClause) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.Comm != nil && do(n.Comm) { + return true + } + if doNodes(n.Body, do) { + return true + } + return false +} +func (n *CommClause) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Comm != nil { + n.Comm = edit(n.Comm).(Node) + } + editNodes(n.Body, edit) +} +func (n *CommClause) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Comm != nil { + n.Comm = edit(n.Comm).(Node) + } + editNodes(n.Body, edit) +} + +func (n *CompLitExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *CompLitExpr) copy() Node { + c := *n + c.init = copyNodes(c.init) + c.List = copyNodes(c.List) + return &c +} +func (n *CompLitExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if doNodes(n.List, do) { + return true + } + if n.Prealloc != nil && do(n.Prealloc) { + return true + } + return false +} +func (n *CompLitExpr) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + editNodes(n.List, edit) + if n.Prealloc != nil { + n.Prealloc = edit(n.Prealloc).(*Name) + } +} +func (n *CompLitExpr) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + editNodes(n.List, edit) + if n.RType != nil { + n.RType = edit(n.RType).(Node) + } + if n.Prealloc != nil { + n.Prealloc = edit(n.Prealloc).(*Name) + } +} + +func (n *ConvExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *ConvExpr) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *ConvExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.X != nil && do(n.X) { + return true + } + return false +} +func (n *ConvExpr) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } +} +func (n *ConvExpr) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.TypeWord != nil { + n.TypeWord = edit(n.TypeWord).(Node) + } + if n.SrcRType != nil { + n.SrcRType = edit(n.SrcRType).(Node) + } + if n.ElemRType != nil { + n.ElemRType = edit(n.ElemRType).(Node) + } + if n.ElemElemRType != nil { + n.ElemElemRType = edit(n.ElemElemRType).(Node) + } +} + +func (n *Decl) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *Decl) copy() Node { + c := *n + return &c +} +func (n *Decl) doChildren(do func(Node) bool) bool { + if n.X != nil && do(n.X) { + return true + } + return false +} +func (n *Decl) editChildren(edit func(Node) Node) { + if n.X != nil { + n.X = edit(n.X).(*Name) + } +} +func (n *Decl) editChildrenWithHidden(edit func(Node) Node) { + if n.X != nil { + n.X = edit(n.X).(*Name) + } +} + +func (n *DynamicType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *DynamicType) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *DynamicType) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.RType != nil && do(n.RType) { + return true + } + if n.ITab != nil && do(n.ITab) { + return true + } + return false +} +func (n *DynamicType) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.RType != nil { + n.RType = edit(n.RType).(Node) + } + if n.ITab != nil { + n.ITab = edit(n.ITab).(Node) + } +} +func (n *DynamicType) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.RType != nil { + n.RType = edit(n.RType).(Node) + } + if n.ITab != nil { + n.ITab = edit(n.ITab).(Node) + } +} + +func (n *DynamicTypeAssertExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *DynamicTypeAssertExpr) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *DynamicTypeAssertExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.X != nil && do(n.X) { + return true + } + if n.SrcRType != nil && do(n.SrcRType) { + return true + } + if n.RType != nil && do(n.RType) { + return true + } + if n.ITab != nil && do(n.ITab) { + return true + } + return false +} +func (n *DynamicTypeAssertExpr) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.SrcRType != nil { + n.SrcRType = edit(n.SrcRType).(Node) + } + if n.RType != nil { + n.RType = edit(n.RType).(Node) + } + if n.ITab != nil { + n.ITab = edit(n.ITab).(Node) + } +} +func (n *DynamicTypeAssertExpr) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.SrcRType != nil { + n.SrcRType = edit(n.SrcRType).(Node) + } + if n.RType != nil { + n.RType = edit(n.RType).(Node) + } + if n.ITab != nil { + n.ITab = edit(n.ITab).(Node) + } +} + +func (n *ForStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *ForStmt) copy() Node { + c := *n + c.init = copyNodes(c.init) + c.Body = copyNodes(c.Body) + return &c +} +func (n *ForStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.Cond != nil && do(n.Cond) { + return true + } + if n.Post != nil && do(n.Post) { + return true + } + if doNodes(n.Body, do) { + return true + } + return false +} +func (n *ForStmt) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Cond != nil { + n.Cond = edit(n.Cond).(Node) + } + if n.Post != nil { + n.Post = edit(n.Post).(Node) + } + editNodes(n.Body, edit) +} +func (n *ForStmt) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Cond != nil { + n.Cond = edit(n.Cond).(Node) + } + if n.Post != nil { + n.Post = edit(n.Post).(Node) + } + editNodes(n.Body, edit) +} + +func (n *Func) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } + +func (n *GoDeferStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *GoDeferStmt) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *GoDeferStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.Call != nil && do(n.Call) { + return true + } + return false +} +func (n *GoDeferStmt) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Call != nil { + n.Call = edit(n.Call).(Node) + } +} +func (n *GoDeferStmt) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Call != nil { + n.Call = edit(n.Call).(Node) + } +} + +func (n *Ident) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *Ident) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *Ident) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + return false +} +func (n *Ident) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) +} +func (n *Ident) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) +} + +func (n *IfStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *IfStmt) copy() Node { + c := *n + c.init = copyNodes(c.init) + c.Body = copyNodes(c.Body) + c.Else = copyNodes(c.Else) + return &c +} +func (n *IfStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.Cond != nil && do(n.Cond) { + return true + } + if doNodes(n.Body, do) { + return true + } + if doNodes(n.Else, do) { + return true + } + return false +} +func (n *IfStmt) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Cond != nil { + n.Cond = edit(n.Cond).(Node) + } + editNodes(n.Body, edit) + editNodes(n.Else, edit) +} +func (n *IfStmt) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Cond != nil { + n.Cond = edit(n.Cond).(Node) + } + editNodes(n.Body, edit) + editNodes(n.Else, edit) +} + +func (n *IndexExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *IndexExpr) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *IndexExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.X != nil && do(n.X) { + return true + } + if n.Index != nil && do(n.Index) { + return true + } + return false +} +func (n *IndexExpr) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.Index != nil { + n.Index = edit(n.Index).(Node) + } +} +func (n *IndexExpr) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.Index != nil { + n.Index = edit(n.Index).(Node) + } + if n.RType != nil { + n.RType = edit(n.RType).(Node) + } +} + +func (n *InlineMarkStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *InlineMarkStmt) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *InlineMarkStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + return false +} +func (n *InlineMarkStmt) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) +} +func (n *InlineMarkStmt) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) +} + +func (n *InlinedCallExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *InlinedCallExpr) copy() Node { + c := *n + c.init = copyNodes(c.init) + c.Body = copyNodes(c.Body) + c.ReturnVars = copyNodes(c.ReturnVars) + return &c +} +func (n *InlinedCallExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if doNodes(n.Body, do) { + return true + } + if doNodes(n.ReturnVars, do) { + return true + } + return false +} +func (n *InlinedCallExpr) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + editNodes(n.Body, edit) + editNodes(n.ReturnVars, edit) +} +func (n *InlinedCallExpr) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + editNodes(n.Body, edit) + editNodes(n.ReturnVars, edit) +} + +func (n *InterfaceSwitchStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *InterfaceSwitchStmt) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *InterfaceSwitchStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.Case != nil && do(n.Case) { + return true + } + if n.Itab != nil && do(n.Itab) { + return true + } + if n.RuntimeType != nil && do(n.RuntimeType) { + return true + } + return false +} +func (n *InterfaceSwitchStmt) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Case != nil { + n.Case = edit(n.Case).(Node) + } + if n.Itab != nil { + n.Itab = edit(n.Itab).(Node) + } + if n.RuntimeType != nil { + n.RuntimeType = edit(n.RuntimeType).(Node) + } +} +func (n *InterfaceSwitchStmt) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Case != nil { + n.Case = edit(n.Case).(Node) + } + if n.Itab != nil { + n.Itab = edit(n.Itab).(Node) + } + if n.RuntimeType != nil { + n.RuntimeType = edit(n.RuntimeType).(Node) + } +} + +func (n *JumpTableStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *JumpTableStmt) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *JumpTableStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.Idx != nil && do(n.Idx) { + return true + } + return false +} +func (n *JumpTableStmt) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Idx != nil { + n.Idx = edit(n.Idx).(Node) + } +} +func (n *JumpTableStmt) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Idx != nil { + n.Idx = edit(n.Idx).(Node) + } +} + +func (n *KeyExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *KeyExpr) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *KeyExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.Key != nil && do(n.Key) { + return true + } + if n.Value != nil && do(n.Value) { + return true + } + return false +} +func (n *KeyExpr) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Key != nil { + n.Key = edit(n.Key).(Node) + } + if n.Value != nil { + n.Value = edit(n.Value).(Node) + } +} +func (n *KeyExpr) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Key != nil { + n.Key = edit(n.Key).(Node) + } + if n.Value != nil { + n.Value = edit(n.Value).(Node) + } +} + +func (n *LabelStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *LabelStmt) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *LabelStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + return false +} +func (n *LabelStmt) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) +} +func (n *LabelStmt) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) +} + +func (n *LinksymOffsetExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *LinksymOffsetExpr) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *LinksymOffsetExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + return false +} +func (n *LinksymOffsetExpr) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) +} +func (n *LinksymOffsetExpr) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) +} + +func (n *LogicalExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *LogicalExpr) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *LogicalExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.X != nil && do(n.X) { + return true + } + if n.Y != nil && do(n.Y) { + return true + } + return false +} +func (n *LogicalExpr) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.Y != nil { + n.Y = edit(n.Y).(Node) + } +} +func (n *LogicalExpr) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.Y != nil { + n.Y = edit(n.Y).(Node) + } +} + +func (n *MakeExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *MakeExpr) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *MakeExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.Len != nil && do(n.Len) { + return true + } + if n.Cap != nil && do(n.Cap) { + return true + } + return false +} +func (n *MakeExpr) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Len != nil { + n.Len = edit(n.Len).(Node) + } + if n.Cap != nil { + n.Cap = edit(n.Cap).(Node) + } +} +func (n *MakeExpr) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.RType != nil { + n.RType = edit(n.RType).(Node) + } + if n.Len != nil { + n.Len = edit(n.Len).(Node) + } + if n.Cap != nil { + n.Cap = edit(n.Cap).(Node) + } +} + +func (n *Name) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } + +func (n *NilExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *NilExpr) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *NilExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + return false +} +func (n *NilExpr) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) +} +func (n *NilExpr) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) +} + +func (n *ParenExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *ParenExpr) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *ParenExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.X != nil && do(n.X) { + return true + } + return false +} +func (n *ParenExpr) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } +} +func (n *ParenExpr) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } +} + +func (n *RangeStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *RangeStmt) copy() Node { + c := *n + c.init = copyNodes(c.init) + c.Body = copyNodes(c.Body) + return &c +} +func (n *RangeStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.X != nil && do(n.X) { + return true + } + if n.Key != nil && do(n.Key) { + return true + } + if n.Value != nil && do(n.Value) { + return true + } + if doNodes(n.Body, do) { + return true + } + if n.Prealloc != nil && do(n.Prealloc) { + return true + } + return false +} +func (n *RangeStmt) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.Key != nil { + n.Key = edit(n.Key).(Node) + } + if n.Value != nil { + n.Value = edit(n.Value).(Node) + } + editNodes(n.Body, edit) + if n.Prealloc != nil { + n.Prealloc = edit(n.Prealloc).(*Name) + } +} +func (n *RangeStmt) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.RType != nil { + n.RType = edit(n.RType).(Node) + } + if n.Key != nil { + n.Key = edit(n.Key).(Node) + } + if n.Value != nil { + n.Value = edit(n.Value).(Node) + } + editNodes(n.Body, edit) + if n.Prealloc != nil { + n.Prealloc = edit(n.Prealloc).(*Name) + } + if n.KeyTypeWord != nil { + n.KeyTypeWord = edit(n.KeyTypeWord).(Node) + } + if n.KeySrcRType != nil { + n.KeySrcRType = edit(n.KeySrcRType).(Node) + } + if n.ValueTypeWord != nil { + n.ValueTypeWord = edit(n.ValueTypeWord).(Node) + } + if n.ValueSrcRType != nil { + n.ValueSrcRType = edit(n.ValueSrcRType).(Node) + } +} + +func (n *ResultExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *ResultExpr) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *ResultExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + return false +} +func (n *ResultExpr) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) +} +func (n *ResultExpr) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) +} + +func (n *ReturnStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *ReturnStmt) copy() Node { + c := *n + c.init = copyNodes(c.init) + c.Results = copyNodes(c.Results) + return &c +} +func (n *ReturnStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if doNodes(n.Results, do) { + return true + } + return false +} +func (n *ReturnStmt) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + editNodes(n.Results, edit) +} +func (n *ReturnStmt) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + editNodes(n.Results, edit) +} + +func (n *SelectStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *SelectStmt) copy() Node { + c := *n + c.init = copyNodes(c.init) + c.Cases = copyCommClauses(c.Cases) + c.Compiled = copyNodes(c.Compiled) + return &c +} +func (n *SelectStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if doCommClauses(n.Cases, do) { + return true + } + if doNodes(n.Compiled, do) { + return true + } + return false +} +func (n *SelectStmt) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + editCommClauses(n.Cases, edit) + editNodes(n.Compiled, edit) +} +func (n *SelectStmt) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + editCommClauses(n.Cases, edit) + editNodes(n.Compiled, edit) +} + +func (n *SelectorExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *SelectorExpr) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *SelectorExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.X != nil && do(n.X) { + return true + } + if n.Prealloc != nil && do(n.Prealloc) { + return true + } + return false +} +func (n *SelectorExpr) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.Prealloc != nil { + n.Prealloc = edit(n.Prealloc).(*Name) + } +} +func (n *SelectorExpr) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.Prealloc != nil { + n.Prealloc = edit(n.Prealloc).(*Name) + } +} + +func (n *SendStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *SendStmt) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *SendStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.Chan != nil && do(n.Chan) { + return true + } + if n.Value != nil && do(n.Value) { + return true + } + return false +} +func (n *SendStmt) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Chan != nil { + n.Chan = edit(n.Chan).(Node) + } + if n.Value != nil { + n.Value = edit(n.Value).(Node) + } +} +func (n *SendStmt) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Chan != nil { + n.Chan = edit(n.Chan).(Node) + } + if n.Value != nil { + n.Value = edit(n.Value).(Node) + } +} + +func (n *SliceExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *SliceExpr) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *SliceExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.X != nil && do(n.X) { + return true + } + if n.Low != nil && do(n.Low) { + return true + } + if n.High != nil && do(n.High) { + return true + } + if n.Max != nil && do(n.Max) { + return true + } + return false +} +func (n *SliceExpr) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.Low != nil { + n.Low = edit(n.Low).(Node) + } + if n.High != nil { + n.High = edit(n.High).(Node) + } + if n.Max != nil { + n.Max = edit(n.Max).(Node) + } +} +func (n *SliceExpr) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.Low != nil { + n.Low = edit(n.Low).(Node) + } + if n.High != nil { + n.High = edit(n.High).(Node) + } + if n.Max != nil { + n.Max = edit(n.Max).(Node) + } +} + +func (n *SliceHeaderExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *SliceHeaderExpr) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *SliceHeaderExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.Ptr != nil && do(n.Ptr) { + return true + } + if n.Len != nil && do(n.Len) { + return true + } + if n.Cap != nil && do(n.Cap) { + return true + } + return false +} +func (n *SliceHeaderExpr) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Ptr != nil { + n.Ptr = edit(n.Ptr).(Node) + } + if n.Len != nil { + n.Len = edit(n.Len).(Node) + } + if n.Cap != nil { + n.Cap = edit(n.Cap).(Node) + } +} +func (n *SliceHeaderExpr) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Ptr != nil { + n.Ptr = edit(n.Ptr).(Node) + } + if n.Len != nil { + n.Len = edit(n.Len).(Node) + } + if n.Cap != nil { + n.Cap = edit(n.Cap).(Node) + } +} + +func (n *StarExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *StarExpr) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *StarExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.X != nil && do(n.X) { + return true + } + return false +} +func (n *StarExpr) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } +} +func (n *StarExpr) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } +} + +func (n *StringHeaderExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *StringHeaderExpr) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *StringHeaderExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.Ptr != nil && do(n.Ptr) { + return true + } + if n.Len != nil && do(n.Len) { + return true + } + return false +} +func (n *StringHeaderExpr) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Ptr != nil { + n.Ptr = edit(n.Ptr).(Node) + } + if n.Len != nil { + n.Len = edit(n.Len).(Node) + } +} +func (n *StringHeaderExpr) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Ptr != nil { + n.Ptr = edit(n.Ptr).(Node) + } + if n.Len != nil { + n.Len = edit(n.Len).(Node) + } +} + +func (n *StructKeyExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *StructKeyExpr) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *StructKeyExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.Value != nil && do(n.Value) { + return true + } + return false +} +func (n *StructKeyExpr) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Value != nil { + n.Value = edit(n.Value).(Node) + } +} +func (n *StructKeyExpr) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Value != nil { + n.Value = edit(n.Value).(Node) + } +} + +func (n *SwitchStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *SwitchStmt) copy() Node { + c := *n + c.init = copyNodes(c.init) + c.Cases = copyCaseClauses(c.Cases) + c.Compiled = copyNodes(c.Compiled) + return &c +} +func (n *SwitchStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.Tag != nil && do(n.Tag) { + return true + } + if doCaseClauses(n.Cases, do) { + return true + } + if doNodes(n.Compiled, do) { + return true + } + return false +} +func (n *SwitchStmt) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Tag != nil { + n.Tag = edit(n.Tag).(Node) + } + editCaseClauses(n.Cases, edit) + editNodes(n.Compiled, edit) +} +func (n *SwitchStmt) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Tag != nil { + n.Tag = edit(n.Tag).(Node) + } + editCaseClauses(n.Cases, edit) + editNodes(n.Compiled, edit) +} + +func (n *TailCallStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *TailCallStmt) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *TailCallStmt) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.Call != nil && do(n.Call) { + return true + } + return false +} +func (n *TailCallStmt) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Call != nil { + n.Call = edit(n.Call).(*CallExpr) + } +} +func (n *TailCallStmt) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Call != nil { + n.Call = edit(n.Call).(*CallExpr) + } +} + +func (n *TypeAssertExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *TypeAssertExpr) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *TypeAssertExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.X != nil && do(n.X) { + return true + } + return false +} +func (n *TypeAssertExpr) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } +} +func (n *TypeAssertExpr) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } + if n.ITab != nil { + n.ITab = edit(n.ITab).(Node) + } +} + +func (n *TypeSwitchGuard) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *TypeSwitchGuard) copy() Node { + c := *n + return &c +} +func (n *TypeSwitchGuard) doChildren(do func(Node) bool) bool { + if n.Tag != nil && do(n.Tag) { + return true + } + if n.X != nil && do(n.X) { + return true + } + return false +} +func (n *TypeSwitchGuard) editChildren(edit func(Node) Node) { + if n.Tag != nil { + n.Tag = edit(n.Tag).(*Ident) + } + if n.X != nil { + n.X = edit(n.X).(Node) + } +} +func (n *TypeSwitchGuard) editChildrenWithHidden(edit func(Node) Node) { + if n.Tag != nil { + n.Tag = edit(n.Tag).(*Ident) + } + if n.X != nil { + n.X = edit(n.X).(Node) + } +} + +func (n *UnaryExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *UnaryExpr) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *UnaryExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.X != nil && do(n.X) { + return true + } + return false +} +func (n *UnaryExpr) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } +} +func (n *UnaryExpr) editChildrenWithHidden(edit func(Node) Node) { + editNodes(n.init, edit) + if n.X != nil { + n.X = edit(n.X).(Node) + } +} + +func (n *typeNode) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *typeNode) copy() Node { + c := *n + return &c +} +func (n *typeNode) doChildren(do func(Node) bool) bool { + return false +} +func (n *typeNode) editChildren(edit func(Node) Node) { +} +func (n *typeNode) editChildrenWithHidden(edit func(Node) Node) { +} + +func copyCaseClauses(list []*CaseClause) []*CaseClause { + if list == nil { + return nil + } + c := make([]*CaseClause, len(list)) + copy(c, list) + return c +} +func doCaseClauses(list []*CaseClause, do func(Node) bool) bool { + for _, x := range list { + if x != nil && do(x) { + return true + } + } + return false +} +func editCaseClauses(list []*CaseClause, edit func(Node) Node) { + for i, x := range list { + if x != nil { + list[i] = edit(x).(*CaseClause) + } + } +} + +func copyCommClauses(list []*CommClause) []*CommClause { + if list == nil { + return nil + } + c := make([]*CommClause, len(list)) + copy(c, list) + return c +} +func doCommClauses(list []*CommClause, do func(Node) bool) bool { + for _, x := range list { + if x != nil && do(x) { + return true + } + } + return false +} +func editCommClauses(list []*CommClause, edit func(Node) Node) { + for i, x := range list { + if x != nil { + list[i] = edit(x).(*CommClause) + } + } +} + +func copyNames(list []*Name) []*Name { + if list == nil { + return nil + } + c := make([]*Name, len(list)) + copy(c, list) + return c +} +func doNames(list []*Name, do func(Node) bool) bool { + for _, x := range list { + if x != nil && do(x) { + return true + } + } + return false +} +func editNames(list []*Name, edit func(Node) Node) { + for i, x := range list { + if x != nil { + list[i] = edit(x).(*Name) + } + } +} + +func copyNodes(list []Node) []Node { + if list == nil { + return nil + } + c := make([]Node, len(list)) + copy(c, list) + return c +} +func doNodes(list []Node, do func(Node) bool) bool { + for _, x := range list { + if x != nil && do(x) { + return true + } + } + return false +} +func editNodes(list []Node, edit func(Node) Node) { + for i, x := range list { + if x != nil { + list[i] = edit(x).(Node) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/op_string.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/op_string.go new file mode 100644 index 0000000000000000000000000000000000000000..fb97ac68f459049a8dea166d5083eb7e3aeab39a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/op_string.go @@ -0,0 +1,174 @@ +// Code generated by "stringer -type=Op -trimprefix=O node.go"; DO NOT EDIT. + +package ir + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[OXXX-0] + _ = x[ONAME-1] + _ = x[ONONAME-2] + _ = x[OTYPE-3] + _ = x[OLITERAL-4] + _ = x[ONIL-5] + _ = x[OADD-6] + _ = x[OSUB-7] + _ = x[OOR-8] + _ = x[OXOR-9] + _ = x[OADDSTR-10] + _ = x[OADDR-11] + _ = x[OANDAND-12] + _ = x[OAPPEND-13] + _ = x[OBYTES2STR-14] + _ = x[OBYTES2STRTMP-15] + _ = x[ORUNES2STR-16] + _ = x[OSTR2BYTES-17] + _ = x[OSTR2BYTESTMP-18] + _ = x[OSTR2RUNES-19] + _ = x[OSLICE2ARR-20] + _ = x[OSLICE2ARRPTR-21] + _ = x[OAS-22] + _ = x[OAS2-23] + _ = x[OAS2DOTTYPE-24] + _ = x[OAS2FUNC-25] + _ = x[OAS2MAPR-26] + _ = x[OAS2RECV-27] + _ = x[OASOP-28] + _ = x[OCALL-29] + _ = x[OCALLFUNC-30] + _ = x[OCALLMETH-31] + _ = x[OCALLINTER-32] + _ = x[OCAP-33] + _ = x[OCLEAR-34] + _ = x[OCLOSE-35] + _ = x[OCLOSURE-36] + _ = x[OCOMPLIT-37] + _ = x[OMAPLIT-38] + _ = x[OSTRUCTLIT-39] + _ = x[OARRAYLIT-40] + _ = x[OSLICELIT-41] + _ = x[OPTRLIT-42] + _ = x[OCONV-43] + _ = x[OCONVIFACE-44] + _ = x[OCONVNOP-45] + _ = x[OCOPY-46] + _ = x[ODCL-47] + _ = x[ODCLFUNC-48] + _ = x[ODELETE-49] + _ = x[ODOT-50] + _ = x[ODOTPTR-51] + _ = x[ODOTMETH-52] + _ = x[ODOTINTER-53] + _ = x[OXDOT-54] + _ = x[ODOTTYPE-55] + _ = x[ODOTTYPE2-56] + _ = x[OEQ-57] + _ = x[ONE-58] + _ = x[OLT-59] + _ = x[OLE-60] + _ = x[OGE-61] + _ = x[OGT-62] + _ = x[ODEREF-63] + _ = x[OINDEX-64] + _ = x[OINDEXMAP-65] + _ = x[OKEY-66] + _ = x[OSTRUCTKEY-67] + _ = x[OLEN-68] + _ = x[OMAKE-69] + _ = x[OMAKECHAN-70] + _ = x[OMAKEMAP-71] + _ = x[OMAKESLICE-72] + _ = x[OMAKESLICECOPY-73] + _ = x[OMUL-74] + _ = x[ODIV-75] + _ = x[OMOD-76] + _ = x[OLSH-77] + _ = x[ORSH-78] + _ = x[OAND-79] + _ = x[OANDNOT-80] + _ = x[ONEW-81] + _ = x[ONOT-82] + _ = x[OBITNOT-83] + _ = x[OPLUS-84] + _ = x[ONEG-85] + _ = x[OOROR-86] + _ = x[OPANIC-87] + _ = x[OPRINT-88] + _ = x[OPRINTLN-89] + _ = x[OPAREN-90] + _ = x[OSEND-91] + _ = x[OSLICE-92] + _ = x[OSLICEARR-93] + _ = x[OSLICESTR-94] + _ = x[OSLICE3-95] + _ = x[OSLICE3ARR-96] + _ = x[OSLICEHEADER-97] + _ = x[OSTRINGHEADER-98] + _ = x[ORECOVER-99] + _ = x[ORECOVERFP-100] + _ = x[ORECV-101] + _ = x[ORUNESTR-102] + _ = x[OSELRECV2-103] + _ = x[OMIN-104] + _ = x[OMAX-105] + _ = x[OREAL-106] + _ = x[OIMAG-107] + _ = x[OCOMPLEX-108] + _ = x[OUNSAFEADD-109] + _ = x[OUNSAFESLICE-110] + _ = x[OUNSAFESLICEDATA-111] + _ = x[OUNSAFESTRING-112] + _ = x[OUNSAFESTRINGDATA-113] + _ = x[OMETHEXPR-114] + _ = x[OMETHVALUE-115] + _ = x[OBLOCK-116] + _ = x[OBREAK-117] + _ = x[OCASE-118] + _ = x[OCONTINUE-119] + _ = x[ODEFER-120] + _ = x[OFALL-121] + _ = x[OFOR-122] + _ = x[OGOTO-123] + _ = x[OIF-124] + _ = x[OLABEL-125] + _ = x[OGO-126] + _ = x[ORANGE-127] + _ = x[ORETURN-128] + _ = x[OSELECT-129] + _ = x[OSWITCH-130] + _ = x[OTYPESW-131] + _ = x[OINLCALL-132] + _ = x[OMAKEFACE-133] + _ = x[OITAB-134] + _ = x[OIDATA-135] + _ = x[OSPTR-136] + _ = x[OCFUNC-137] + _ = x[OCHECKNIL-138] + _ = x[ORESULT-139] + _ = x[OINLMARK-140] + _ = x[OLINKSYMOFFSET-141] + _ = x[OJUMPTABLE-142] + _ = x[OINTERFACESWITCH-143] + _ = x[ODYNAMICDOTTYPE-144] + _ = x[ODYNAMICDOTTYPE2-145] + _ = x[ODYNAMICTYPE-146] + _ = x[OTAILCALL-147] + _ = x[OGETG-148] + _ = x[OGETCALLERPC-149] + _ = x[OGETCALLERSP-150] + _ = x[OEND-151] +} + +const _Op_name = "XXXNAMENONAMETYPELITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESSLICE2ARRSLICE2ARRPTRASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCAPCLEARCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERSTRINGHEADERRECOVERRECOVERFPRECVRUNESTRSELRECV2MINMAXREALIMAGCOMPLEXUNSAFEADDUNSAFESLICEUNSAFESLICEDATAUNSAFESTRINGUNSAFESTRINGDATAMETHEXPRMETHVALUEBLOCKBREAKCASECONTINUEDEFERFALLFORGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWINLCALLMAKEFACEITABIDATASPTRCFUNCCHECKNILRESULTINLMARKLINKSYMOFFSETJUMPTABLEINTERFACESWITCHDYNAMICDOTTYPEDYNAMICDOTTYPE2DYNAMICTYPETAILCALLGETGGETCALLERPCGETCALLERSPEND" + +var _Op_index = [...]uint16{0, 3, 7, 13, 17, 24, 27, 30, 33, 35, 38, 44, 48, 54, 60, 69, 81, 90, 99, 111, 120, 129, 141, 143, 146, 156, 163, 170, 177, 181, 185, 193, 201, 210, 213, 218, 223, 230, 237, 243, 252, 260, 268, 274, 278, 287, 294, 298, 301, 308, 314, 317, 323, 330, 338, 342, 349, 357, 359, 361, 363, 365, 367, 369, 374, 379, 387, 390, 399, 402, 406, 414, 421, 430, 443, 446, 449, 452, 455, 458, 461, 467, 470, 473, 479, 483, 486, 490, 495, 500, 506, 511, 515, 520, 528, 536, 542, 551, 562, 574, 581, 590, 594, 601, 609, 612, 615, 619, 623, 630, 639, 650, 665, 677, 693, 701, 710, 715, 720, 724, 732, 737, 741, 744, 748, 750, 755, 757, 762, 768, 774, 780, 786, 793, 801, 805, 810, 814, 819, 827, 833, 840, 853, 862, 877, 891, 906, 917, 925, 929, 940, 951, 954} + +func (i Op) String() string { + if i >= Op(len(_Op_index)-1) { + return "Op(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Op_name[_Op_index[i]:_Op_index[i+1]] +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/package.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/package.go new file mode 100644 index 0000000000000000000000000000000000000000..3b70a9281a34791a5edea4cbf897db60804b12fb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/package.go @@ -0,0 +1,42 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import "cmd/compile/internal/types" + +// A Package holds information about the package being compiled. +type Package struct { + // Imports, listed in source order. + // See golang.org/issue/31636. + Imports []*types.Pkg + + // Init functions, listed in source order. + Inits []*Func + + // Funcs contains all (instantiated) functions, methods, and + // function literals to be compiled. + Funcs []*Func + + // Externs holds constants, (non-generic) types, and variables + // declared at package scope. + Externs []*Name + + // AsmHdrDecls holds declared constants and struct types that should + // be included in -asmhdr output. It's only populated when -asmhdr + // is set. + AsmHdrDecls []*Name + + // Cgo directives. + CgoPragmas [][]string + + // Variables with //go:embed lines. + Embeds []*Name + + // PluginExports holds exported functions and variables that are + // accessible through the package plugin API. It's only populated + // for -buildmode=plugin (i.e., compiling package main and -dynlink + // is set). + PluginExports []*Name +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/reassign_consistency_check.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/reassign_consistency_check.go new file mode 100644 index 0000000000000000000000000000000000000000..e4d928d13263ed4d3bd6f3d53fbd688f119638bc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/reassign_consistency_check.go @@ -0,0 +1,46 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import ( + "cmd/compile/internal/base" + "cmd/internal/src" + "fmt" + "path/filepath" + "strings" +) + +// checkStaticValueResult compares the result from ReassignOracle.StaticValue +// with the corresponding result from ir.StaticValue to make sure they agree. +// This method is called only when turned on via build tag. +func checkStaticValueResult(n Node, newres Node) { + oldres := StaticValue(n) + if oldres != newres { + base.Fatalf("%s: new/old static value disagreement on %v:\nnew=%v\nold=%v", fmtFullPos(n.Pos()), n, newres, oldres) + } +} + +// checkStaticValueResult compares the result from ReassignOracle.Reassigned +// with the corresponding result from ir.Reassigned to make sure they agree. +// This method is called only when turned on via build tag. +func checkReassignedResult(n *Name, newres bool) { + origres := Reassigned(n) + if newres != origres { + base.Fatalf("%s: new/old reassigned disagreement on %v (class %s) newres=%v oldres=%v", fmtFullPos(n.Pos()), n, n.Class.String(), newres, origres) + } +} + +// fmtFullPos returns a verbose dump for pos p, including inlines. +func fmtFullPos(p src.XPos) string { + var sb strings.Builder + sep := "" + base.Ctxt.AllPos(p, func(pos src.Pos) { + fmt.Fprintf(&sb, sep) + sep = "|" + file := filepath.Base(pos.Filename()) + fmt.Fprintf(&sb, "%s:%d:%d", file, pos.Line(), pos.Col()) + }) + return sb.String() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/reassignment.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/reassignment.go new file mode 100644 index 0000000000000000000000000000000000000000..9974292471abda8780f1afeb5e74996fcf365843 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/reassignment.go @@ -0,0 +1,205 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import ( + "cmd/compile/internal/base" +) + +// A ReassignOracle efficiently answers queries about whether local +// variables are reassigned. This helper works by looking for function +// params and short variable declarations (e.g. +// https://go.dev/ref/spec#Short_variable_declarations) that are +// neither address taken nor subsequently re-assigned. It is intended +// to operate much like "ir.StaticValue" and "ir.Reassigned", but in a +// way that does just a single walk of the containing function (as +// opposed to a new walk on every call). +type ReassignOracle struct { + fn *Func + // maps candidate name to its defining assignment (or for + // for params, defining func). + singleDef map[*Name]Node +} + +// Init initializes the oracle based on the IR in function fn, laying +// the groundwork for future calls to the StaticValue and Reassigned +// methods. If the fn's IR is subsequently modified, Init must be +// called again. +func (ro *ReassignOracle) Init(fn *Func) { + ro.fn = fn + + // Collect candidate map. Start by adding function parameters + // explicitly. + ro.singleDef = make(map[*Name]Node) + sig := fn.Type() + numParams := sig.NumRecvs() + sig.NumParams() + for _, param := range fn.Dcl[:numParams] { + if IsBlank(param) { + continue + } + // For params, use func itself as defining node. + ro.singleDef[param] = fn + } + + // Walk the function body to discover any locals assigned + // via ":=" syntax (e.g. "a := "). + var findLocals func(n Node) bool + findLocals = func(n Node) bool { + if nn, ok := n.(*Name); ok { + if nn.Defn != nil && !nn.Addrtaken() && nn.Class == PAUTO { + ro.singleDef[nn] = nn.Defn + } + } else if nn, ok := n.(*ClosureExpr); ok { + Any(nn.Func, findLocals) + } + return false + } + Any(fn, findLocals) + + outerName := func(x Node) *Name { + if x == nil { + return nil + } + n, ok := OuterValue(x).(*Name) + if ok { + return n.Canonical() + } + return nil + } + + // pruneIfNeeded examines node nn appearing on the left hand side + // of assignment statement asn to see if it contains a reassignment + // to any nodes in our candidate map ro.singleDef; if a reassignment + // is found, the corresponding name is deleted from singleDef. + pruneIfNeeded := func(nn Node, asn Node) { + oname := outerName(nn) + if oname == nil { + return + } + defn, ok := ro.singleDef[oname] + if !ok { + return + } + // any assignment to a param invalidates the entry. + paramAssigned := oname.Class == PPARAM + // assignment to local ok iff assignment is its orig def. + localAssigned := (oname.Class == PAUTO && asn != defn) + if paramAssigned || localAssigned { + // We found an assignment to name N that doesn't + // correspond to its original definition; remove + // from candidates. + delete(ro.singleDef, oname) + } + } + + // Prune away anything that looks assigned. This code modeled after + // similar code in ir.Reassigned; any changes there should be made + // here as well. + var do func(n Node) bool + do = func(n Node) bool { + switch n.Op() { + case OAS: + asn := n.(*AssignStmt) + pruneIfNeeded(asn.X, n) + case OAS2, OAS2FUNC, OAS2MAPR, OAS2DOTTYPE, OAS2RECV, OSELRECV2: + asn := n.(*AssignListStmt) + for _, p := range asn.Lhs { + pruneIfNeeded(p, n) + } + case OASOP: + asn := n.(*AssignOpStmt) + pruneIfNeeded(asn.X, n) + case ORANGE: + rs := n.(*RangeStmt) + pruneIfNeeded(rs.Key, n) + pruneIfNeeded(rs.Value, n) + case OCLOSURE: + n := n.(*ClosureExpr) + Any(n.Func, do) + } + return false + } + Any(fn, do) +} + +// StaticValue method has the same semantics as the ir package function +// of the same name; see comments on [StaticValue]. +func (ro *ReassignOracle) StaticValue(n Node) Node { + arg := n + for { + if n.Op() == OCONVNOP { + n = n.(*ConvExpr).X + continue + } + + if n.Op() == OINLCALL { + n = n.(*InlinedCallExpr).SingleResult() + continue + } + + n1 := ro.staticValue1(n) + if n1 == nil { + if consistencyCheckEnabled { + checkStaticValueResult(arg, n) + } + return n + } + n = n1 + } +} + +func (ro *ReassignOracle) staticValue1(nn Node) Node { + if nn.Op() != ONAME { + return nil + } + n := nn.(*Name).Canonical() + if n.Class != PAUTO { + return nil + } + + defn := n.Defn + if defn == nil { + return nil + } + + var rhs Node +FindRHS: + switch defn.Op() { + case OAS: + defn := defn.(*AssignStmt) + rhs = defn.Y + case OAS2: + defn := defn.(*AssignListStmt) + for i, lhs := range defn.Lhs { + if lhs == n { + rhs = defn.Rhs[i] + break FindRHS + } + } + base.Fatalf("%v missing from LHS of %v", n, defn) + default: + return nil + } + if rhs == nil { + base.Fatalf("RHS is nil: %v", defn) + } + + if _, ok := ro.singleDef[n]; !ok { + return nil + } + + return rhs +} + +// Reassigned method has the same semantics as the ir package function +// of the same name; see comments on [Reassigned] for more info. +func (ro *ReassignOracle) Reassigned(n *Name) bool { + _, ok := ro.singleDef[n] + result := !ok + if consistencyCheckEnabled { + checkReassignedResult(n, result) + } + return result +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/scc.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/scc.go new file mode 100644 index 0000000000000000000000000000000000000000..a640f4fc16d32572c7ec029afbfbc2e174482740 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/scc.go @@ -0,0 +1,125 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +// Strongly connected components. +// +// Run analysis on minimal sets of mutually recursive functions +// or single non-recursive functions, bottom up. +// +// Finding these sets is finding strongly connected components +// by reverse topological order in the static call graph. +// The algorithm (known as Tarjan's algorithm) for doing that is taken from +// Sedgewick, Algorithms, Second Edition, p. 482, with two adaptations. +// +// First, a hidden closure function (n.Func.IsHiddenClosure()) cannot be the +// root of a connected component. Refusing to use it as a root +// forces it into the component of the function in which it appears. +// This is more convenient for escape analysis. +// +// Second, each function becomes two virtual nodes in the graph, +// with numbers n and n+1. We record the function's node number as n +// but search from node n+1. If the search tells us that the component +// number (min) is n+1, we know that this is a trivial component: one function +// plus its closures. If the search tells us that the component number is +// n, then there was a path from node n+1 back to node n, meaning that +// the function set is mutually recursive. The escape analysis can be +// more precise when analyzing a single non-recursive function than +// when analyzing a set of mutually recursive functions. + +type bottomUpVisitor struct { + analyze func([]*Func, bool) + visitgen uint32 + nodeID map[*Func]uint32 + stack []*Func +} + +// VisitFuncsBottomUp invokes analyze on the ODCLFUNC nodes listed in list. +// It calls analyze with successive groups of functions, working from +// the bottom of the call graph upward. Each time analyze is called with +// a list of functions, every function on that list only calls other functions +// on the list or functions that have been passed in previous invocations of +// analyze. Closures appear in the same list as their outer functions. +// The lists are as short as possible while preserving those requirements. +// (In a typical program, many invocations of analyze will be passed just +// a single function.) The boolean argument 'recursive' passed to analyze +// specifies whether the functions on the list are mutually recursive. +// If recursive is false, the list consists of only a single function and its closures. +// If recursive is true, the list may still contain only a single function, +// if that function is itself recursive. +func VisitFuncsBottomUp(list []*Func, analyze func(list []*Func, recursive bool)) { + var v bottomUpVisitor + v.analyze = analyze + v.nodeID = make(map[*Func]uint32) + for _, n := range list { + if !n.IsHiddenClosure() { + v.visit(n) + } + } +} + +func (v *bottomUpVisitor) visit(n *Func) uint32 { + if id := v.nodeID[n]; id > 0 { + // already visited + return id + } + + v.visitgen++ + id := v.visitgen + v.nodeID[n] = id + v.visitgen++ + min := v.visitgen + v.stack = append(v.stack, n) + + do := func(defn Node) { + if defn != nil { + if m := v.visit(defn.(*Func)); m < min { + min = m + } + } + } + + Visit(n, func(n Node) { + switch n.Op() { + case ONAME: + if n := n.(*Name); n.Class == PFUNC { + do(n.Defn) + } + case ODOTMETH, OMETHVALUE, OMETHEXPR: + if fn := MethodExprName(n); fn != nil { + do(fn.Defn) + } + case OCLOSURE: + n := n.(*ClosureExpr) + do(n.Func) + } + }) + + if (min == id || min == id+1) && !n.IsHiddenClosure() { + // This node is the root of a strongly connected component. + + // The original min was id+1. If the bottomUpVisitor found its way + // back to id, then this block is a set of mutually recursive functions. + // Otherwise, it's just a lone function that does not recurse. + recursive := min == id + + // Remove connected component from stack and mark v.nodeID so that future + // visits return a large number, which will not affect the caller's min. + var i int + for i = len(v.stack) - 1; i >= 0; i-- { + x := v.stack[i] + v.nodeID[x] = ^uint32(0) + if x == n { + break + } + } + block := v.stack[i:] + // Call analyze on this set of functions. + v.stack = v.stack[:i] + v.analyze(block, recursive) + } + + return min +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/sizeof_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/sizeof_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3b6823895cf3b54feda0aa577655c9f4d3e82fa4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/sizeof_test.go @@ -0,0 +1,37 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import ( + "reflect" + "testing" + "unsafe" +) + +// Assert that the size of important structures do not change unexpectedly. + +func TestSizeof(t *testing.T) { + const _64bit = unsafe.Sizeof(uintptr(0)) == 8 + + var tests = []struct { + val interface{} // type as a value + _32bit uintptr // size on 32bit platforms + _64bit uintptr // size on 64bit platforms + }{ + {Func{}, 168, 288}, + {Name{}, 96, 168}, + } + + for _, tt := range tests { + want := tt._32bit + if _64bit { + want = tt._64bit + } + got := reflect.TypeOf(tt.val).Size() + if want != got { + t.Errorf("unsafe.Sizeof(%T) = %d, want %d", tt.val, got, want) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/stmt.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/stmt.go new file mode 100644 index 0000000000000000000000000000000000000000..0801ecdd9e8722f825fd7de0c55b5616114a9a58 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/stmt.go @@ -0,0 +1,505 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/src" + "go/constant" +) + +// A Decl is a declaration of a const, type, or var. (A declared func is a Func.) +type Decl struct { + miniNode + X *Name // the thing being declared +} + +func NewDecl(pos src.XPos, op Op, x *Name) *Decl { + n := &Decl{X: x} + n.pos = pos + switch op { + default: + panic("invalid Decl op " + op.String()) + case ODCL: + n.op = op + } + return n +} + +func (*Decl) isStmt() {} + +// A Stmt is a Node that can appear as a statement. +// This includes statement-like expressions such as f(). +// +// (It's possible it should include <-c, but that would require +// splitting ORECV out of UnaryExpr, which hasn't yet been +// necessary. Maybe instead we will introduce ExprStmt at +// some point.) +type Stmt interface { + Node + isStmt() +} + +// A miniStmt is a miniNode with extra fields common to statements. +type miniStmt struct { + miniNode + init Nodes +} + +func (*miniStmt) isStmt() {} + +func (n *miniStmt) Init() Nodes { return n.init } +func (n *miniStmt) SetInit(x Nodes) { n.init = x } +func (n *miniStmt) PtrInit() *Nodes { return &n.init } + +// An AssignListStmt is an assignment statement with +// more than one item on at least one side: Lhs = Rhs. +// If Def is true, the assignment is a :=. +type AssignListStmt struct { + miniStmt + Lhs Nodes + Def bool + Rhs Nodes +} + +func NewAssignListStmt(pos src.XPos, op Op, lhs, rhs []Node) *AssignListStmt { + n := &AssignListStmt{} + n.pos = pos + n.SetOp(op) + n.Lhs = lhs + n.Rhs = rhs + return n +} + +func (n *AssignListStmt) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV, OSELRECV2: + n.op = op + } +} + +// An AssignStmt is a simple assignment statement: X = Y. +// If Def is true, the assignment is a :=. +type AssignStmt struct { + miniStmt + X Node + Def bool + Y Node +} + +func NewAssignStmt(pos src.XPos, x, y Node) *AssignStmt { + n := &AssignStmt{X: x, Y: y} + n.pos = pos + n.op = OAS + return n +} + +func (n *AssignStmt) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case OAS: + n.op = op + } +} + +// An AssignOpStmt is an AsOp= assignment statement: X AsOp= Y. +type AssignOpStmt struct { + miniStmt + X Node + AsOp Op // OADD etc + Y Node + IncDec bool // actually ++ or -- +} + +func NewAssignOpStmt(pos src.XPos, asOp Op, x, y Node) *AssignOpStmt { + n := &AssignOpStmt{AsOp: asOp, X: x, Y: y} + n.pos = pos + n.op = OASOP + return n +} + +// A BlockStmt is a block: { List }. +type BlockStmt struct { + miniStmt + List Nodes +} + +func NewBlockStmt(pos src.XPos, list []Node) *BlockStmt { + n := &BlockStmt{} + n.pos = pos + if !pos.IsKnown() { + n.pos = base.Pos + if len(list) > 0 { + n.pos = list[0].Pos() + } + } + n.op = OBLOCK + n.List = list + return n +} + +// A BranchStmt is a break, continue, fallthrough, or goto statement. +type BranchStmt struct { + miniStmt + Label *types.Sym // label if present +} + +func NewBranchStmt(pos src.XPos, op Op, label *types.Sym) *BranchStmt { + switch op { + case OBREAK, OCONTINUE, OFALL, OGOTO: + // ok + default: + panic("NewBranch " + op.String()) + } + n := &BranchStmt{Label: label} + n.pos = pos + n.op = op + return n +} + +func (n *BranchStmt) SetOp(op Op) { + switch op { + default: + panic(n.no("SetOp " + op.String())) + case OBREAK, OCONTINUE, OFALL, OGOTO: + n.op = op + } +} + +func (n *BranchStmt) Sym() *types.Sym { return n.Label } + +// A CaseClause is a case statement in a switch or select: case List: Body. +type CaseClause struct { + miniStmt + Var *Name // declared variable for this case in type switch + List Nodes // list of expressions for switch, early select + + // RTypes is a list of RType expressions, which are copied to the + // corresponding OEQ nodes that are emitted when switch statements + // are desugared. RTypes[i] must be non-nil if the emitted + // comparison for List[i] will be a mixed interface/concrete + // comparison; see reflectdata.CompareRType for details. + // + // Because mixed interface/concrete switch cases are rare, we allow + // len(RTypes) < len(List). Missing entries are implicitly nil. + RTypes Nodes + + Body Nodes +} + +func NewCaseStmt(pos src.XPos, list, body []Node) *CaseClause { + n := &CaseClause{List: list, Body: body} + n.pos = pos + n.op = OCASE + return n +} + +type CommClause struct { + miniStmt + Comm Node // communication case + Body Nodes +} + +func NewCommStmt(pos src.XPos, comm Node, body []Node) *CommClause { + n := &CommClause{Comm: comm, Body: body} + n.pos = pos + n.op = OCASE + return n +} + +// A ForStmt is a non-range for loop: for Init; Cond; Post { Body } +type ForStmt struct { + miniStmt + Label *types.Sym + Cond Node + Post Node + Body Nodes + DistinctVars bool +} + +func NewForStmt(pos src.XPos, init Node, cond, post Node, body []Node, distinctVars bool) *ForStmt { + n := &ForStmt{Cond: cond, Post: post} + n.pos = pos + n.op = OFOR + if init != nil { + n.init = []Node{init} + } + n.Body = body + n.DistinctVars = distinctVars + return n +} + +// A GoDeferStmt is a go or defer statement: go Call / defer Call. +// +// The two opcodes use a single syntax because the implementations +// are very similar: both are concerned with saving Call and running it +// in a different context (a separate goroutine or a later time). +type GoDeferStmt struct { + miniStmt + Call Node + DeferAt Expr +} + +func NewGoDeferStmt(pos src.XPos, op Op, call Node) *GoDeferStmt { + n := &GoDeferStmt{Call: call} + n.pos = pos + switch op { + case ODEFER, OGO: + n.op = op + default: + panic("NewGoDeferStmt " + op.String()) + } + return n +} + +// An IfStmt is a return statement: if Init; Cond { Body } else { Else }. +type IfStmt struct { + miniStmt + Cond Node + Body Nodes + Else Nodes + Likely bool // code layout hint +} + +func NewIfStmt(pos src.XPos, cond Node, body, els []Node) *IfStmt { + n := &IfStmt{Cond: cond} + n.pos = pos + n.op = OIF + n.Body = body + n.Else = els + return n +} + +// A JumpTableStmt is used to implement switches. Its semantics are: +// +// tmp := jt.Idx +// if tmp == Cases[0] goto Targets[0] +// if tmp == Cases[1] goto Targets[1] +// ... +// if tmp == Cases[n] goto Targets[n] +// +// Note that a JumpTableStmt is more like a multiway-goto than +// a multiway-if. In particular, the case bodies are just +// labels to jump to, not full Nodes lists. +type JumpTableStmt struct { + miniStmt + + // Value used to index the jump table. + // We support only integer types that + // are at most the size of a uintptr. + Idx Node + + // If Idx is equal to Cases[i], jump to Targets[i]. + // Cases entries must be distinct and in increasing order. + // The length of Cases and Targets must be equal. + Cases []constant.Value + Targets []*types.Sym +} + +func NewJumpTableStmt(pos src.XPos, idx Node) *JumpTableStmt { + n := &JumpTableStmt{Idx: idx} + n.pos = pos + n.op = OJUMPTABLE + return n +} + +// An InterfaceSwitchStmt is used to implement type switches. +// Its semantics are: +// +// if RuntimeType implements Descriptor.Cases[0] { +// Case, Itab = 0, itab +// } else if RuntimeType implements Descriptor.Cases[1] { +// Case, Itab = 1, itab +// ... +// } else if RuntimeType implements Descriptor.Cases[N-1] { +// Case, Itab = N-1, itab +// } else { +// Case, Itab = len(cases), nil +// } +// +// RuntimeType must be a non-nil *runtime._type. +// Hash must be the hash field of RuntimeType (or its copy loaded from an itab). +// Descriptor must represent an abi.InterfaceSwitch global variable. +type InterfaceSwitchStmt struct { + miniStmt + + Case Node + Itab Node + RuntimeType Node + Hash Node + Descriptor *obj.LSym +} + +func NewInterfaceSwitchStmt(pos src.XPos, case_, itab, runtimeType, hash Node, descriptor *obj.LSym) *InterfaceSwitchStmt { + n := &InterfaceSwitchStmt{ + Case: case_, + Itab: itab, + RuntimeType: runtimeType, + Hash: hash, + Descriptor: descriptor, + } + n.pos = pos + n.op = OINTERFACESWITCH + return n +} + +// An InlineMarkStmt is a marker placed just before an inlined body. +type InlineMarkStmt struct { + miniStmt + Index int64 +} + +func NewInlineMarkStmt(pos src.XPos, index int64) *InlineMarkStmt { + n := &InlineMarkStmt{Index: index} + n.pos = pos + n.op = OINLMARK + return n +} + +func (n *InlineMarkStmt) Offset() int64 { return n.Index } +func (n *InlineMarkStmt) SetOffset(x int64) { n.Index = x } + +// A LabelStmt is a label statement (just the label, not including the statement it labels). +type LabelStmt struct { + miniStmt + Label *types.Sym // "Label:" +} + +func NewLabelStmt(pos src.XPos, label *types.Sym) *LabelStmt { + n := &LabelStmt{Label: label} + n.pos = pos + n.op = OLABEL + return n +} + +func (n *LabelStmt) Sym() *types.Sym { return n.Label } + +// A RangeStmt is a range loop: for Key, Value = range X { Body } +type RangeStmt struct { + miniStmt + Label *types.Sym + Def bool + X Node + RType Node `mknode:"-"` // see reflectdata/helpers.go + Key Node + Value Node + Body Nodes + DistinctVars bool + Prealloc *Name + + // When desugaring the RangeStmt during walk, the assignments to Key + // and Value may require OCONVIFACE operations. If so, these fields + // will be copied to their respective ConvExpr fields. + KeyTypeWord Node `mknode:"-"` + KeySrcRType Node `mknode:"-"` + ValueTypeWord Node `mknode:"-"` + ValueSrcRType Node `mknode:"-"` +} + +func NewRangeStmt(pos src.XPos, key, value, x Node, body []Node, distinctVars bool) *RangeStmt { + n := &RangeStmt{X: x, Key: key, Value: value} + n.pos = pos + n.op = ORANGE + n.Body = body + n.DistinctVars = distinctVars + return n +} + +// A ReturnStmt is a return statement. +type ReturnStmt struct { + miniStmt + Results Nodes // return list +} + +func NewReturnStmt(pos src.XPos, results []Node) *ReturnStmt { + n := &ReturnStmt{} + n.pos = pos + n.op = ORETURN + n.Results = results + return n +} + +// A SelectStmt is a block: { Cases }. +type SelectStmt struct { + miniStmt + Label *types.Sym + Cases []*CommClause + + // TODO(rsc): Instead of recording here, replace with a block? + Compiled Nodes // compiled form, after walkSelect +} + +func NewSelectStmt(pos src.XPos, cases []*CommClause) *SelectStmt { + n := &SelectStmt{Cases: cases} + n.pos = pos + n.op = OSELECT + return n +} + +// A SendStmt is a send statement: X <- Y. +type SendStmt struct { + miniStmt + Chan Node + Value Node +} + +func NewSendStmt(pos src.XPos, ch, value Node) *SendStmt { + n := &SendStmt{Chan: ch, Value: value} + n.pos = pos + n.op = OSEND + return n +} + +// A SwitchStmt is a switch statement: switch Init; Tag { Cases }. +type SwitchStmt struct { + miniStmt + Tag Node + Cases []*CaseClause + Label *types.Sym + + // TODO(rsc): Instead of recording here, replace with a block? + Compiled Nodes // compiled form, after walkSwitch +} + +func NewSwitchStmt(pos src.XPos, tag Node, cases []*CaseClause) *SwitchStmt { + n := &SwitchStmt{Tag: tag, Cases: cases} + n.pos = pos + n.op = OSWITCH + return n +} + +// A TailCallStmt is a tail call statement, which is used for back-end +// code generation to jump directly to another function entirely. +type TailCallStmt struct { + miniStmt + Call *CallExpr // the underlying call +} + +func NewTailCallStmt(pos src.XPos, call *CallExpr) *TailCallStmt { + n := &TailCallStmt{Call: call} + n.pos = pos + n.op = OTAILCALL + return n +} + +// A TypeSwitchGuard is the [Name :=] X.(type) in a type switch. +type TypeSwitchGuard struct { + miniNode + Tag *Ident + X Node + Used bool +} + +func NewTypeSwitchGuard(pos src.XPos, tag *Ident, x Node) *TypeSwitchGuard { + n := &TypeSwitchGuard{Tag: tag, X: x} + n.pos = pos + n.op = OTYPESW + return n +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/symtab.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/symtab.go new file mode 100644 index 0000000000000000000000000000000000000000..202c4942dea86f6f947772da56b0eeb0648ea396 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/symtab.go @@ -0,0 +1,82 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import ( + "cmd/compile/internal/types" + "cmd/internal/obj" +) + +// Syms holds known symbols. +var Syms symsStruct + +type symsStruct struct { + AssertE2I *obj.LSym + AssertE2I2 *obj.LSym + AssertI2I *obj.LSym + AssertI2I2 *obj.LSym + Asanread *obj.LSym + Asanwrite *obj.LSym + CgoCheckMemmove *obj.LSym + CgoCheckPtrWrite *obj.LSym + CheckPtrAlignment *obj.LSym + Deferproc *obj.LSym + Deferprocat *obj.LSym + DeferprocStack *obj.LSym + Deferreturn *obj.LSym + Duffcopy *obj.LSym + Duffzero *obj.LSym + GCWriteBarrier [8]*obj.LSym + Goschedguarded *obj.LSym + Growslice *obj.LSym + InterfaceSwitch *obj.LSym + Memmove *obj.LSym + Msanread *obj.LSym + Msanwrite *obj.LSym + Msanmove *obj.LSym + Newobject *obj.LSym + Newproc *obj.LSym + Panicdivide *obj.LSym + Panicshift *obj.LSym + PanicdottypeE *obj.LSym + PanicdottypeI *obj.LSym + Panicnildottype *obj.LSym + Panicoverflow *obj.LSym + Racefuncenter *obj.LSym + Racefuncexit *obj.LSym + Raceread *obj.LSym + Racereadrange *obj.LSym + Racewrite *obj.LSym + Racewriterange *obj.LSym + TypeAssert *obj.LSym + WBZero *obj.LSym + WBMove *obj.LSym + // Wasm + SigPanic *obj.LSym + Staticuint64s *obj.LSym + Typedmemmove *obj.LSym + Udiv *obj.LSym + WriteBarrier *obj.LSym + Zerobase *obj.LSym + ARM64HasATOMICS *obj.LSym + ARMHasVFPv4 *obj.LSym + X86HasFMA *obj.LSym + X86HasPOPCNT *obj.LSym + X86HasSSE41 *obj.LSym + // Wasm + WasmDiv *obj.LSym + // Wasm + WasmTruncS *obj.LSym + // Wasm + WasmTruncU *obj.LSym +} + +// Pkgs holds known packages. +var Pkgs struct { + Go *types.Pkg + Itab *types.Pkg + Runtime *types.Pkg + Coverage *types.Pkg +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/type.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/type.go new file mode 100644 index 0000000000000000000000000000000000000000..7db76c1427f4bdb6995525810eafed3d600eda29 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/type.go @@ -0,0 +1,69 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/types" + "cmd/internal/src" +) + +// Calling TypeNode converts a *types.Type to a Node shell. + +// A typeNode is a Node wrapper for type t. +type typeNode struct { + miniNode + typ *types.Type +} + +func newTypeNode(typ *types.Type) *typeNode { + n := &typeNode{typ: typ} + n.pos = src.NoXPos + n.op = OTYPE + n.SetTypecheck(1) + return n +} + +func (n *typeNode) Type() *types.Type { return n.typ } +func (n *typeNode) Sym() *types.Sym { return n.typ.Sym() } + +// TypeNode returns the Node representing the type t. +func TypeNode(t *types.Type) Node { + if n := t.Obj(); n != nil { + if n.Type() != t { + base.Fatalf("type skew: %v has type %v, but expected %v", n, n.Type(), t) + } + return n.(*Name) + } + return newTypeNode(t) +} + +// A DynamicType represents a type expression whose exact type must be +// computed dynamically. +type DynamicType struct { + miniExpr + + // RType is an expression that yields a *runtime._type value + // representing the asserted type. + // + // BUG(mdempsky): If ITab is non-nil, RType may be nil. + RType Node + + // ITab is an expression that yields a *runtime.itab value + // representing the asserted type within the assertee expression's + // original interface type. + // + // ITab is only used for assertions (including type switches) from + // non-empty interface type to a concrete (i.e., non-interface) + // type. For all other assertions, ITab is nil. + ITab Node +} + +func NewDynamicType(pos src.XPos, rtype Node) *DynamicType { + n := &DynamicType{RType: rtype} + n.pos = pos + n.op = ODYNAMICTYPE + return n +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/val.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/val.go new file mode 100644 index 0000000000000000000000000000000000000000..16c8a08ca0ddffd7a65ba1c97e07056ddd5c369f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/val.go @@ -0,0 +1,107 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import ( + "go/constant" + + "cmd/compile/internal/base" + "cmd/compile/internal/types" +) + +func ConstType(n Node) constant.Kind { + if n == nil || n.Op() != OLITERAL { + return constant.Unknown + } + return n.Val().Kind() +} + +// IntVal returns v converted to int64. +// Note: if t is uint64, very large values will be converted to negative int64. +func IntVal(t *types.Type, v constant.Value) int64 { + if t.IsUnsigned() { + if x, ok := constant.Uint64Val(v); ok { + return int64(x) + } + } else { + if x, ok := constant.Int64Val(v); ok { + return x + } + } + base.Fatalf("%v out of range for %v", v, t) + panic("unreachable") +} + +func AssertValidTypeForConst(t *types.Type, v constant.Value) { + if !ValidTypeForConst(t, v) { + base.Fatalf("%v (%v) does not represent %v (%v)", t, t.Kind(), v, v.Kind()) + } +} + +func ValidTypeForConst(t *types.Type, v constant.Value) bool { + switch v.Kind() { + case constant.Unknown: + return OKForConst[t.Kind()] + case constant.Bool: + return t.IsBoolean() + case constant.String: + return t.IsString() + case constant.Int: + return t.IsInteger() + case constant.Float: + return t.IsFloat() + case constant.Complex: + return t.IsComplex() + } + + base.Fatalf("unexpected constant kind: %v", v) + panic("unreachable") +} + +var OKForConst [types.NTYPE]bool + +// Int64Val returns n as an int64. +// n must be an integer or rune constant. +func Int64Val(n Node) int64 { + if !IsConst(n, constant.Int) { + base.Fatalf("Int64Val(%v)", n) + } + x, ok := constant.Int64Val(n.Val()) + if !ok { + base.Fatalf("Int64Val(%v)", n) + } + return x +} + +// Uint64Val returns n as a uint64. +// n must be an integer or rune constant. +func Uint64Val(n Node) uint64 { + if !IsConst(n, constant.Int) { + base.Fatalf("Uint64Val(%v)", n) + } + x, ok := constant.Uint64Val(n.Val()) + if !ok { + base.Fatalf("Uint64Val(%v)", n) + } + return x +} + +// BoolVal returns n as a bool. +// n must be a boolean constant. +func BoolVal(n Node) bool { + if !IsConst(n, constant.Bool) { + base.Fatalf("BoolVal(%v)", n) + } + return constant.BoolVal(n.Val()) +} + +// StringVal returns the value of a literal string Node as a string. +// n must be a string constant. +func StringVal(n Node) string { + if !IsConst(n, constant.String) { + base.Fatalf("StringVal(%v)", n) + } + return constant.StringVal(n.Val()) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/visit.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/visit.go new file mode 100644 index 0000000000000000000000000000000000000000..73ec1de544c08fa552b6c5b3fb89ed960e1e4cc6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ir/visit.go @@ -0,0 +1,209 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// IR visitors for walking the IR tree. +// +// The lowest level helpers are DoChildren and EditChildren, which +// nodes help implement and provide control over whether and when +// recursion happens during the walk of the IR. +// +// Although these are both useful directly, two simpler patterns +// are fairly common and also provided: Visit and Any. + +package ir + +// DoChildren calls do(x) on each of n's non-nil child nodes x. +// If any call returns true, DoChildren stops and returns true. +// Otherwise, DoChildren returns false. +// +// Note that DoChildren(n, do) only calls do(x) for n's immediate children. +// If x's children should be processed, then do(x) must call DoChildren(x, do). +// +// DoChildren allows constructing general traversals of the IR graph +// that can stop early if needed. The most general usage is: +// +// var do func(ir.Node) bool +// do = func(x ir.Node) bool { +// ... processing BEFORE visiting children ... +// if ... should visit children ... { +// ir.DoChildren(x, do) +// ... processing AFTER visiting children ... +// } +// if ... should stop parent DoChildren call from visiting siblings ... { +// return true +// } +// return false +// } +// do(root) +// +// Since DoChildren does not return true itself, if the do function +// never wants to stop the traversal, it can assume that DoChildren +// itself will always return false, simplifying to: +// +// var do func(ir.Node) bool +// do = func(x ir.Node) bool { +// ... processing BEFORE visiting children ... +// if ... should visit children ... { +// ir.DoChildren(x, do) +// } +// ... processing AFTER visiting children ... +// return false +// } +// do(root) +// +// The Visit function illustrates a further simplification of the pattern, +// only processing before visiting children and never stopping: +// +// func Visit(n ir.Node, visit func(ir.Node)) { +// if n == nil { +// return +// } +// var do func(ir.Node) bool +// do = func(x ir.Node) bool { +// visit(x) +// return ir.DoChildren(x, do) +// } +// do(n) +// } +// +// The Any function illustrates a different simplification of the pattern, +// visiting each node and then its children, recursively, until finding +// a node x for which cond(x) returns true, at which point the entire +// traversal stops and returns true. +// +// func Any(n ir.Node, cond(ir.Node) bool) bool { +// if n == nil { +// return false +// } +// var do func(ir.Node) bool +// do = func(x ir.Node) bool { +// return cond(x) || ir.DoChildren(x, do) +// } +// return do(n) +// } +// +// Visit and Any are presented above as examples of how to use +// DoChildren effectively, but of course, usage that fits within the +// simplifications captured by Visit or Any will be best served +// by directly calling the ones provided by this package. +func DoChildren(n Node, do func(Node) bool) bool { + if n == nil { + return false + } + return n.doChildren(do) +} + +// Visit visits each non-nil node x in the IR tree rooted at n +// in a depth-first preorder traversal, calling visit on each node visited. +func Visit(n Node, visit func(Node)) { + if n == nil { + return + } + var do func(Node) bool + do = func(x Node) bool { + visit(x) + return DoChildren(x, do) + } + do(n) +} + +// VisitList calls Visit(x, visit) for each node x in the list. +func VisitList(list Nodes, visit func(Node)) { + for _, x := range list { + Visit(x, visit) + } +} + +// VisitFuncAndClosures calls visit on each non-nil node in fn.Body, +// including any nested closure bodies. +func VisitFuncAndClosures(fn *Func, visit func(n Node)) { + VisitList(fn.Body, func(n Node) { + visit(n) + if n, ok := n.(*ClosureExpr); ok && n.Op() == OCLOSURE { + VisitFuncAndClosures(n.Func, visit) + } + }) +} + +// Any looks for a non-nil node x in the IR tree rooted at n +// for which cond(x) returns true. +// Any considers nodes in a depth-first, preorder traversal. +// When Any finds a node x such that cond(x) is true, +// Any ends the traversal and returns true immediately. +// Otherwise Any returns false after completing the entire traversal. +func Any(n Node, cond func(Node) bool) bool { + if n == nil { + return false + } + var do func(Node) bool + do = func(x Node) bool { + return cond(x) || DoChildren(x, do) + } + return do(n) +} + +// AnyList calls Any(x, cond) for each node x in the list, in order. +// If any call returns true, AnyList stops and returns true. +// Otherwise, AnyList returns false after calling Any(x, cond) +// for every x in the list. +func AnyList(list Nodes, cond func(Node) bool) bool { + for _, x := range list { + if Any(x, cond) { + return true + } + } + return false +} + +// EditChildren edits the child nodes of n, replacing each child x with edit(x). +// +// Note that EditChildren(n, edit) only calls edit(x) for n's immediate children. +// If x's children should be processed, then edit(x) must call EditChildren(x, edit). +// +// EditChildren allows constructing general editing passes of the IR graph. +// The most general usage is: +// +// var edit func(ir.Node) ir.Node +// edit = func(x ir.Node) ir.Node { +// ... processing BEFORE editing children ... +// if ... should edit children ... { +// EditChildren(x, edit) +// ... processing AFTER editing children ... +// } +// ... return x ... +// } +// n = edit(n) +// +// EditChildren edits the node in place. To edit a copy, call Copy first. +// As an example, a simple deep copy implementation would be: +// +// func deepCopy(n ir.Node) ir.Node { +// var edit func(ir.Node) ir.Node +// edit = func(x ir.Node) ir.Node { +// x = ir.Copy(x) +// ir.EditChildren(x, edit) +// return x +// } +// return edit(n) +// } +// +// Of course, in this case it is better to call ir.DeepCopy than to build one anew. +func EditChildren(n Node, edit func(Node) Node) { + if n == nil { + return + } + n.editChildren(edit) +} + +// EditChildrenWithHidden is like EditChildren, but also edits +// Node-typed fields tagged with `mknode:"-"`. +// +// TODO(mdempsky): Remove the `mknode:"-"` tags so this function can +// go away. +func EditChildrenWithHidden(n Node, edit func(Node) Node) { + if n == nil { + return + } + n.editChildrenWithHidden(edit) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/liveness/arg.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/liveness/arg.go new file mode 100644 index 0000000000000000000000000000000000000000..e1269a10b73900c8bc2f1b236e6e594f94bde319 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/liveness/arg.go @@ -0,0 +1,339 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package liveness + +import ( + "fmt" + "internal/abi" + + "cmd/compile/internal/base" + "cmd/compile/internal/bitvec" + "cmd/compile/internal/ir" + "cmd/compile/internal/objw" + "cmd/compile/internal/ssa" + "cmd/internal/obj" +) + +// Argument liveness tracking. +// +// For arguments passed in registers, this file tracks if their spill slots +// are live for runtime traceback. An argument spill slot is live at a PC +// if we know that an actual value has stored into it at or before this point. +// +// Stack args are always live and not tracked in this code. Stack args are +// laid out before register spill slots, so we emit the smallest offset that +// needs tracking. Slots before that offset are always live. That offset is +// usually the offset of the first spill slot. But if the first spill slot is +// always live (e.g. if it is address-taken), it will be the offset of a later +// one. +// +// The liveness information is emitted as a FUNCDATA and a PCDATA. +// +// FUNCDATA format: +// - start (smallest) offset that needs tracking (1 byte) +// - a list of bitmaps. +// In a bitmap bit i is set if the i-th spill slot is live. +// +// At a PC where the liveness info changes, a PCDATA indicates the +// byte offset of the liveness map in the FUNCDATA. PCDATA -1 is a +// special case indicating all slots are live (for binary size +// saving). + +const allLiveIdx = -1 + +// name and offset +type nameOff struct { + n *ir.Name + off int64 +} + +func (a nameOff) FrameOffset() int64 { return a.n.FrameOffset() + a.off } +func (a nameOff) String() string { return fmt.Sprintf("%v+%d", a.n, a.off) } + +type blockArgEffects struct { + livein bitvec.BitVec // variables live at block entry + liveout bitvec.BitVec // variables live at block exit +} + +type argLiveness struct { + fn *ir.Func + f *ssa.Func + args []nameOff // name and offset of spill slots + idx map[nameOff]int32 // index in args + + be []blockArgEffects // indexed by block ID + + bvset bvecSet // Set of liveness bitmaps, used for uniquifying. + + // Liveness map indices at each Value (where it changes) and Block entry. + // During the computation the indices are temporarily index to bvset. + // At the end they will be index (offset) to the output funcdata (changed + // in (*argLiveness).emit). + blockIdx map[ssa.ID]int + valueIdx map[ssa.ID]int +} + +// ArgLiveness computes the liveness information of register argument spill slots. +// An argument's spill slot is "live" if we know it contains a meaningful value, +// that is, we have stored the register value to it. +// Returns the liveness map indices at each Block entry and at each Value (where +// it changes). +func ArgLiveness(fn *ir.Func, f *ssa.Func, pp *objw.Progs) (blockIdx, valueIdx map[ssa.ID]int) { + if f.OwnAux.ABIInfo().InRegistersUsed() == 0 || base.Flag.N != 0 { + // No register args. Nothing to emit. + // Or if -N is used we spill everything upfront so it is always live. + return nil, nil + } + + lv := &argLiveness{ + fn: fn, + f: f, + idx: make(map[nameOff]int32), + be: make([]blockArgEffects, f.NumBlocks()), + blockIdx: make(map[ssa.ID]int), + valueIdx: make(map[ssa.ID]int), + } + // Gather all register arg spill slots. + for _, a := range f.OwnAux.ABIInfo().InParams() { + n := a.Name + if n == nil || len(a.Registers) == 0 { + continue + } + _, offs := a.RegisterTypesAndOffsets() + for _, off := range offs { + if n.FrameOffset()+off > 0xff { + // We only print a limited number of args, with stack + // offsets no larger than 255. + continue + } + lv.args = append(lv.args, nameOff{n, off}) + } + } + if len(lv.args) > 10 { + lv.args = lv.args[:10] // We print no more than 10 args. + } + + // We spill address-taken or non-SSA-able value upfront, so they are always live. + alwaysLive := func(n *ir.Name) bool { return n.Addrtaken() || !ssa.CanSSA(n.Type()) } + + // We'll emit the smallest offset for the slots that need liveness info. + // No need to include a slot with a lower offset if it is always live. + for len(lv.args) > 0 && alwaysLive(lv.args[0].n) { + lv.args = lv.args[1:] + } + if len(lv.args) == 0 { + return // everything is always live + } + + for i, a := range lv.args { + lv.idx[a] = int32(i) + } + + nargs := int32(len(lv.args)) + bulk := bitvec.NewBulk(nargs, int32(len(f.Blocks)*2)) + for _, b := range f.Blocks { + be := &lv.be[b.ID] + be.livein = bulk.Next() + be.liveout = bulk.Next() + + // initialize to all 1s, so we can AND them + be.livein.Not() + be.liveout.Not() + } + + entrybe := &lv.be[f.Entry.ID] + entrybe.livein.Clear() + for i, a := range lv.args { + if alwaysLive(a.n) { + entrybe.livein.Set(int32(i)) + } + } + + // Visit blocks in reverse-postorder, compute block effects. + po := f.Postorder() + for i := len(po) - 1; i >= 0; i-- { + b := po[i] + be := &lv.be[b.ID] + + // A slot is live at block entry if it is live in all predecessors. + for _, pred := range b.Preds { + pb := pred.Block() + be.livein.And(be.livein, lv.be[pb.ID].liveout) + } + + be.liveout.Copy(be.livein) + for _, v := range b.Values { + lv.valueEffect(v, be.liveout) + } + } + + // Coalesce identical live vectors. Compute liveness indices at each PC + // where it changes. + live := bitvec.New(nargs) + addToSet := func(bv bitvec.BitVec) (int, bool) { + if bv.Count() == int(nargs) { // special case for all live + return allLiveIdx, false + } + return lv.bvset.add(bv) + } + for _, b := range lv.f.Blocks { + be := &lv.be[b.ID] + lv.blockIdx[b.ID], _ = addToSet(be.livein) + + live.Copy(be.livein) + var lastv *ssa.Value + for i, v := range b.Values { + if lv.valueEffect(v, live) { + // Record that liveness changes but not emit a map now. + // For a sequence of StoreRegs we only need to emit one + // at last. + lastv = v + } + if lastv != nil && (mayFault(v) || i == len(b.Values)-1) { + // Emit the liveness map if it may fault or at the end of + // the block. We may need a traceback if the instruction + // may cause a panic. + var added bool + lv.valueIdx[lastv.ID], added = addToSet(live) + if added { + // live is added to bvset and we cannot modify it now. + // Make a copy. + t := live + live = bitvec.New(nargs) + live.Copy(t) + } + lastv = nil + } + } + + // Sanity check. + if !live.Eq(be.liveout) { + panic("wrong arg liveness map at block end") + } + } + + // Emit funcdata symbol, update indices to offsets in the symbol data. + lsym := lv.emit() + fn.LSym.Func().ArgLiveInfo = lsym + + //lv.print() + + p := pp.Prog(obj.AFUNCDATA) + p.From.SetConst(abi.FUNCDATA_ArgLiveInfo) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = lsym + + return lv.blockIdx, lv.valueIdx +} + +// valueEffect applies the effect of v to live, return whether it is changed. +func (lv *argLiveness) valueEffect(v *ssa.Value, live bitvec.BitVec) bool { + if v.Op != ssa.OpStoreReg { // TODO: include other store instructions? + return false + } + n, off := ssa.AutoVar(v) + if n.Class != ir.PPARAM { + return false + } + i, ok := lv.idx[nameOff{n, off}] + if !ok || live.Get(i) { + return false + } + live.Set(i) + return true +} + +func mayFault(v *ssa.Value) bool { + switch v.Op { + case ssa.OpLoadReg, ssa.OpStoreReg, ssa.OpCopy, ssa.OpPhi, + ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive, + ssa.OpSelect0, ssa.OpSelect1, ssa.OpSelectN, ssa.OpMakeResult, + ssa.OpConvert, ssa.OpInlMark, ssa.OpGetG: + return false + } + if len(v.Args) == 0 { + return false // assume constant op cannot fault + } + return true // conservatively assume all other ops could fault +} + +func (lv *argLiveness) print() { + fmt.Println("argument liveness:", lv.f.Name) + live := bitvec.New(int32(len(lv.args))) + for _, b := range lv.f.Blocks { + be := &lv.be[b.ID] + + fmt.Printf("%v: live in: ", b) + lv.printLivenessVec(be.livein) + if idx, ok := lv.blockIdx[b.ID]; ok { + fmt.Printf(" #%d", idx) + } + fmt.Println() + + for _, v := range b.Values { + if lv.valueEffect(v, live) { + fmt.Printf(" %v: ", v) + lv.printLivenessVec(live) + if idx, ok := lv.valueIdx[v.ID]; ok { + fmt.Printf(" #%d", idx) + } + fmt.Println() + } + } + + fmt.Printf("%v: live out: ", b) + lv.printLivenessVec(be.liveout) + fmt.Println() + } + fmt.Println("liveness maps data:", lv.fn.LSym.Func().ArgLiveInfo.P) +} + +func (lv *argLiveness) printLivenessVec(bv bitvec.BitVec) { + for i, a := range lv.args { + if bv.Get(int32(i)) { + fmt.Printf("%v ", a) + } + } +} + +func (lv *argLiveness) emit() *obj.LSym { + livenessMaps := lv.bvset.extractUnique() + + // stack offsets of register arg spill slots + argOffsets := make([]uint8, len(lv.args)) + for i, a := range lv.args { + off := a.FrameOffset() + if off > 0xff { + panic("offset too large") + } + argOffsets[i] = uint8(off) + } + + idx2off := make([]int, len(livenessMaps)) + + lsym := base.Ctxt.Lookup(lv.fn.LSym.Name + ".argliveinfo") + lsym.Set(obj.AttrContentAddressable, true) + + off := objw.Uint8(lsym, 0, argOffsets[0]) // smallest offset that needs liveness info. + for idx, live := range livenessMaps { + idx2off[idx] = off + off = objw.BitVec(lsym, off, live) + } + + // Update liveness indices to offsets. + for i, x := range lv.blockIdx { + if x != allLiveIdx { + lv.blockIdx[i] = idx2off[x] + } + } + for i, x := range lv.valueIdx { + if x != allLiveIdx { + lv.valueIdx[i] = idx2off[x] + } + } + + return lsym +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/liveness/bvset.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/liveness/bvset.go new file mode 100644 index 0000000000000000000000000000000000000000..60b25938677c85c1ce7c07be102159230f506637 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/liveness/bvset.go @@ -0,0 +1,98 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package liveness + +import "cmd/compile/internal/bitvec" + +// FNV-1 hash function constants. +const ( + h0 = 2166136261 + hp = 16777619 +) + +// bvecSet is a set of bvecs, in initial insertion order. +type bvecSet struct { + index []int // hash -> uniq index. -1 indicates empty slot. + uniq []bitvec.BitVec // unique bvecs, in insertion order +} + +func (m *bvecSet) grow() { + // Allocate new index. + n := len(m.index) * 2 + if n == 0 { + n = 32 + } + newIndex := make([]int, n) + for i := range newIndex { + newIndex[i] = -1 + } + + // Rehash into newIndex. + for i, bv := range m.uniq { + h := hashbitmap(h0, bv) % uint32(len(newIndex)) + for { + j := newIndex[h] + if j < 0 { + newIndex[h] = i + break + } + h++ + if h == uint32(len(newIndex)) { + h = 0 + } + } + } + m.index = newIndex +} + +// add adds bv to the set and returns its index in m.extractUnique, +// and whether it is newly added. +// If it is newly added, the caller must not modify bv after this. +func (m *bvecSet) add(bv bitvec.BitVec) (int, bool) { + if len(m.uniq)*4 >= len(m.index) { + m.grow() + } + + index := m.index + h := hashbitmap(h0, bv) % uint32(len(index)) + for { + j := index[h] + if j < 0 { + // New bvec. + index[h] = len(m.uniq) + m.uniq = append(m.uniq, bv) + return len(m.uniq) - 1, true + } + jlive := m.uniq[j] + if bv.Eq(jlive) { + // Existing bvec. + return j, false + } + + h++ + if h == uint32(len(index)) { + h = 0 + } + } +} + +// extractUnique returns this slice of unique bit vectors in m, as +// indexed by the result of bvecSet.add. +func (m *bvecSet) extractUnique() []bitvec.BitVec { + return m.uniq +} + +func hashbitmap(h uint32, bv bitvec.BitVec) uint32 { + n := int((bv.N + 31) / 32) + for i := 0; i < n; i++ { + w := bv.B[i] + h = (h * hp) ^ (w & 0xff) + h = (h * hp) ^ ((w >> 8) & 0xff) + h = (h * hp) ^ ((w >> 16) & 0xff) + h = (h * hp) ^ ((w >> 24) & 0xff) + } + + return h +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/liveness/plive.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/liveness/plive.go new file mode 100644 index 0000000000000000000000000000000000000000..e4dbfa9fa31e783896be77371295ad5269de1fb4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/liveness/plive.go @@ -0,0 +1,1548 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Garbage collector liveness bitmap generation. + +// The command line flag -live causes this code to print debug information. +// The levels are: +// +// -live (aka -live=1): print liveness lists as code warnings at safe points +// -live=2: print an assembly listing with liveness annotations +// +// Each level includes the earlier output as well. + +package liveness + +import ( + "fmt" + "os" + "sort" + "strings" + + "cmd/compile/internal/abi" + "cmd/compile/internal/base" + "cmd/compile/internal/bitvec" + "cmd/compile/internal/ir" + "cmd/compile/internal/objw" + "cmd/compile/internal/reflectdata" + "cmd/compile/internal/ssa" + "cmd/compile/internal/typebits" + "cmd/compile/internal/types" + "cmd/internal/notsha256" + "cmd/internal/obj" + "cmd/internal/src" + + rtabi "internal/abi" +) + +// OpVarDef is an annotation for the liveness analysis, marking a place +// where a complete initialization (definition) of a variable begins. +// Since the liveness analysis can see initialization of single-word +// variables quite easy, OpVarDef is only needed for multi-word +// variables satisfying isfat(n.Type). For simplicity though, buildssa +// emits OpVarDef regardless of variable width. +// +// An 'OpVarDef x' annotation in the instruction stream tells the liveness +// analysis to behave as though the variable x is being initialized at that +// point in the instruction stream. The OpVarDef must appear before the +// actual (multi-instruction) initialization, and it must also appear after +// any uses of the previous value, if any. For example, if compiling: +// +// x = x[1:] +// +// it is important to generate code like: +// +// base, len, cap = pieces of x[1:] +// OpVarDef x +// x = {base, len, cap} +// +// If instead the generated code looked like: +// +// OpVarDef x +// base, len, cap = pieces of x[1:] +// x = {base, len, cap} +// +// then the liveness analysis would decide the previous value of x was +// unnecessary even though it is about to be used by the x[1:] computation. +// Similarly, if the generated code looked like: +// +// base, len, cap = pieces of x[1:] +// x = {base, len, cap} +// OpVarDef x +// +// then the liveness analysis will not preserve the new value of x, because +// the OpVarDef appears to have "overwritten" it. +// +// OpVarDef is a bit of a kludge to work around the fact that the instruction +// stream is working on single-word values but the liveness analysis +// wants to work on individual variables, which might be multi-word +// aggregates. It might make sense at some point to look into letting +// the liveness analysis work on single-word values as well, although +// there are complications around interface values, slices, and strings, +// all of which cannot be treated as individual words. + +// blockEffects summarizes the liveness effects on an SSA block. +type blockEffects struct { + // Computed during Liveness.prologue using only the content of + // individual blocks: + // + // uevar: upward exposed variables (used before set in block) + // varkill: killed variables (set in block) + uevar bitvec.BitVec + varkill bitvec.BitVec + + // Computed during Liveness.solve using control flow information: + // + // livein: variables live at block entry + // liveout: variables live at block exit + livein bitvec.BitVec + liveout bitvec.BitVec +} + +// A collection of global state used by liveness analysis. +type liveness struct { + fn *ir.Func + f *ssa.Func + vars []*ir.Name + idx map[*ir.Name]int32 + stkptrsize int64 + + be []blockEffects + + // allUnsafe indicates that all points in this function are + // unsafe-points. + allUnsafe bool + // unsafePoints bit i is set if Value ID i is an unsafe-point + // (preemption is not allowed). Only valid if !allUnsafe. + unsafePoints bitvec.BitVec + // unsafeBlocks bit i is set if Block ID i is an unsafe-point + // (preemption is not allowed on any end-of-block + // instructions). Only valid if !allUnsafe. + unsafeBlocks bitvec.BitVec + + // An array with a bit vector for each safe point in the + // current Block during liveness.epilogue. Indexed in Value + // order for that block. Additionally, for the entry block + // livevars[0] is the entry bitmap. liveness.compact moves + // these to stackMaps. + livevars []bitvec.BitVec + + // livenessMap maps from safe points (i.e., CALLs) to their + // liveness map indexes. + livenessMap Map + stackMapSet bvecSet + stackMaps []bitvec.BitVec + + cache progeffectscache + + // partLiveArgs includes input arguments (PPARAM) that may + // be partially live. That is, it is considered live because + // a part of it is used, but we may not initialize all parts. + partLiveArgs map[*ir.Name]bool + + doClobber bool // Whether to clobber dead stack slots in this function. + noClobberArgs bool // Do not clobber function arguments +} + +// Map maps from *ssa.Value to StackMapIndex. +// Also keeps track of unsafe ssa.Values and ssa.Blocks. +// (unsafe = can't be interrupted during GC.) +type Map struct { + Vals map[ssa.ID]objw.StackMapIndex + UnsafeVals map[ssa.ID]bool + UnsafeBlocks map[ssa.ID]bool + // The set of live, pointer-containing variables at the DeferReturn + // call (only set when open-coded defers are used). + DeferReturn objw.StackMapIndex +} + +func (m *Map) reset() { + if m.Vals == nil { + m.Vals = make(map[ssa.ID]objw.StackMapIndex) + m.UnsafeVals = make(map[ssa.ID]bool) + m.UnsafeBlocks = make(map[ssa.ID]bool) + } else { + for k := range m.Vals { + delete(m.Vals, k) + } + for k := range m.UnsafeVals { + delete(m.UnsafeVals, k) + } + for k := range m.UnsafeBlocks { + delete(m.UnsafeBlocks, k) + } + } + m.DeferReturn = objw.StackMapDontCare +} + +func (m *Map) set(v *ssa.Value, i objw.StackMapIndex) { + m.Vals[v.ID] = i +} +func (m *Map) setUnsafeVal(v *ssa.Value) { + m.UnsafeVals[v.ID] = true +} +func (m *Map) setUnsafeBlock(b *ssa.Block) { + m.UnsafeBlocks[b.ID] = true +} + +func (m Map) Get(v *ssa.Value) objw.StackMapIndex { + // If v isn't in the map, then it's a "don't care". + if idx, ok := m.Vals[v.ID]; ok { + return idx + } + return objw.StackMapDontCare +} +func (m Map) GetUnsafe(v *ssa.Value) bool { + // default is safe + return m.UnsafeVals[v.ID] +} +func (m Map) GetUnsafeBlock(b *ssa.Block) bool { + // default is safe + return m.UnsafeBlocks[b.ID] +} + +type progeffectscache struct { + retuevar []int32 + tailuevar []int32 + initialized bool +} + +// shouldTrack reports whether the liveness analysis +// should track the variable n. +// We don't care about variables that have no pointers, +// nor do we care about non-local variables, +// nor do we care about empty structs (handled by the pointer check), +// nor do we care about the fake PAUTOHEAP variables. +func shouldTrack(n *ir.Name) bool { + return (n.Class == ir.PAUTO && n.Esc() != ir.EscHeap || n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT) && n.Type().HasPointers() +} + +// getvariables returns the list of on-stack variables that we need to track +// and a map for looking up indices by *Node. +func getvariables(fn *ir.Func) ([]*ir.Name, map[*ir.Name]int32) { + var vars []*ir.Name + for _, n := range fn.Dcl { + if shouldTrack(n) { + vars = append(vars, n) + } + } + idx := make(map[*ir.Name]int32, len(vars)) + for i, n := range vars { + idx[n] = int32(i) + } + return vars, idx +} + +func (lv *liveness) initcache() { + if lv.cache.initialized { + base.Fatalf("liveness cache initialized twice") + return + } + lv.cache.initialized = true + + for i, node := range lv.vars { + switch node.Class { + case ir.PPARAM: + // A return instruction with a p.to is a tail return, which brings + // the stack pointer back up (if it ever went down) and then jumps + // to a new function entirely. That form of instruction must read + // all the parameters for correctness, and similarly it must not + // read the out arguments - they won't be set until the new + // function runs. + lv.cache.tailuevar = append(lv.cache.tailuevar, int32(i)) + + case ir.PPARAMOUT: + // All results are live at every return point. + // Note that this point is after escaping return values + // are copied back to the stack using their PAUTOHEAP references. + lv.cache.retuevar = append(lv.cache.retuevar, int32(i)) + } + } +} + +// A liveEffect is a set of flags that describe an instruction's +// liveness effects on a variable. +// +// The possible flags are: +// +// uevar - used by the instruction +// varkill - killed by the instruction (set) +// +// A kill happens after the use (for an instruction that updates a value, for example). +type liveEffect int + +const ( + uevar liveEffect = 1 << iota + varkill +) + +// valueEffects returns the index of a variable in lv.vars and the +// liveness effects v has on that variable. +// If v does not affect any tracked variables, it returns -1, 0. +func (lv *liveness) valueEffects(v *ssa.Value) (int32, liveEffect) { + n, e := affectedVar(v) + if e == 0 || n == nil { // cheapest checks first + return -1, 0 + } + // AllocFrame has dropped unused variables from + // lv.fn.Func.Dcl, but they might still be referenced by + // OpVarFoo pseudo-ops. Ignore them to prevent "lost track of + // variable" ICEs (issue 19632). + switch v.Op { + case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive: + if !n.Used() { + return -1, 0 + } + } + + if n.Class == ir.PPARAM && !n.Addrtaken() && n.Type().Size() > int64(types.PtrSize) { + // Only aggregate-typed arguments that are not address-taken can be + // partially live. + lv.partLiveArgs[n] = true + } + + var effect liveEffect + // Read is a read, obviously. + // + // Addr is a read also, as any subsequent holder of the pointer must be able + // to see all the values (including initialization) written so far. + // This also prevents a variable from "coming back from the dead" and presenting + // stale pointers to the garbage collector. See issue 28445. + if e&(ssa.SymRead|ssa.SymAddr) != 0 { + effect |= uevar + } + if e&ssa.SymWrite != 0 && (!isfat(n.Type()) || v.Op == ssa.OpVarDef) { + effect |= varkill + } + + if effect == 0 { + return -1, 0 + } + + if pos, ok := lv.idx[n]; ok { + return pos, effect + } + return -1, 0 +} + +// affectedVar returns the *ir.Name node affected by v. +func affectedVar(v *ssa.Value) (*ir.Name, ssa.SymEffect) { + // Special cases. + switch v.Op { + case ssa.OpLoadReg: + n, _ := ssa.AutoVar(v.Args[0]) + return n, ssa.SymRead + case ssa.OpStoreReg: + n, _ := ssa.AutoVar(v) + return n, ssa.SymWrite + + case ssa.OpArgIntReg: + // This forces the spill slot for the register to be live at function entry. + // one of the following holds for a function F with pointer-valued register arg X: + // 0. No GC (so an uninitialized spill slot is okay) + // 1. GC at entry of F. GC is precise, but the spills around morestack initialize X's spill slot + // 2. Stack growth at entry of F. Same as GC. + // 3. GC occurs within F itself. This has to be from preemption, and thus GC is conservative. + // a. X is in a register -- then X is seen, and the spill slot is also scanned conservatively. + // b. X is spilled -- the spill slot is initialized, and scanned conservatively + // c. X is not live -- the spill slot is scanned conservatively, and it may contain X from an earlier spill. + // 4. GC within G, transitively called from F + // a. X is live at call site, therefore is spilled, to its spill slot (which is live because of subsequent LoadReg). + // b. X is not live at call site -- but neither is its spill slot. + n, _ := ssa.AutoVar(v) + return n, ssa.SymRead + + case ssa.OpVarLive: + return v.Aux.(*ir.Name), ssa.SymRead + case ssa.OpVarDef: + return v.Aux.(*ir.Name), ssa.SymWrite + case ssa.OpKeepAlive: + n, _ := ssa.AutoVar(v.Args[0]) + return n, ssa.SymRead + } + + e := v.Op.SymEffect() + if e == 0 { + return nil, 0 + } + + switch a := v.Aux.(type) { + case nil, *obj.LSym: + // ok, but no node + return nil, e + case *ir.Name: + return a, e + default: + base.Fatalf("weird aux: %s", v.LongString()) + return nil, e + } +} + +type livenessFuncCache struct { + be []blockEffects + livenessMap Map +} + +// Constructs a new liveness structure used to hold the global state of the +// liveness computation. The cfg argument is a slice of *BasicBlocks and the +// vars argument is a slice of *Nodes. +func newliveness(fn *ir.Func, f *ssa.Func, vars []*ir.Name, idx map[*ir.Name]int32, stkptrsize int64) *liveness { + lv := &liveness{ + fn: fn, + f: f, + vars: vars, + idx: idx, + stkptrsize: stkptrsize, + } + + // Significant sources of allocation are kept in the ssa.Cache + // and reused. Surprisingly, the bit vectors themselves aren't + // a major source of allocation, but the liveness maps are. + if lc, _ := f.Cache.Liveness.(*livenessFuncCache); lc == nil { + // Prep the cache so liveness can fill it later. + f.Cache.Liveness = new(livenessFuncCache) + } else { + if cap(lc.be) >= f.NumBlocks() { + lv.be = lc.be[:f.NumBlocks()] + } + lv.livenessMap = Map{ + Vals: lc.livenessMap.Vals, + UnsafeVals: lc.livenessMap.UnsafeVals, + UnsafeBlocks: lc.livenessMap.UnsafeBlocks, + DeferReturn: objw.StackMapDontCare, + } + lc.livenessMap.Vals = nil + lc.livenessMap.UnsafeVals = nil + lc.livenessMap.UnsafeBlocks = nil + } + if lv.be == nil { + lv.be = make([]blockEffects, f.NumBlocks()) + } + + nblocks := int32(len(f.Blocks)) + nvars := int32(len(vars)) + bulk := bitvec.NewBulk(nvars, nblocks*7) + for _, b := range f.Blocks { + be := lv.blockEffects(b) + + be.uevar = bulk.Next() + be.varkill = bulk.Next() + be.livein = bulk.Next() + be.liveout = bulk.Next() + } + lv.livenessMap.reset() + + lv.markUnsafePoints() + + lv.partLiveArgs = make(map[*ir.Name]bool) + + lv.enableClobber() + + return lv +} + +func (lv *liveness) blockEffects(b *ssa.Block) *blockEffects { + return &lv.be[b.ID] +} + +// Generates live pointer value maps for arguments and local variables. The +// this argument and the in arguments are always assumed live. The vars +// argument is a slice of *Nodes. +func (lv *liveness) pointerMap(liveout bitvec.BitVec, vars []*ir.Name, args, locals bitvec.BitVec) { + for i := int32(0); ; i++ { + i = liveout.Next(i) + if i < 0 { + break + } + node := vars[i] + switch node.Class { + case ir.PPARAM, ir.PPARAMOUT: + if !node.IsOutputParamInRegisters() { + if node.FrameOffset() < 0 { + lv.f.Fatalf("Node %v has frameoffset %d\n", node.Sym().Name, node.FrameOffset()) + } + typebits.SetNoCheck(node.Type(), node.FrameOffset(), args) + break + } + fallthrough // PPARAMOUT in registers acts memory-allocates like an AUTO + case ir.PAUTO: + typebits.Set(node.Type(), node.FrameOffset()+lv.stkptrsize, locals) + } + } +} + +// IsUnsafe indicates that all points in this function are +// unsafe-points. +func IsUnsafe(f *ssa.Func) bool { + // The runtime assumes the only safe-points are function + // prologues (because that's how it used to be). We could and + // should improve that, but for now keep consider all points + // in the runtime unsafe. obj will add prologues and their + // safe-points. + // + // go:nosplit functions are similar. Since safe points used to + // be coupled with stack checks, go:nosplit often actually + // means "no safe points in this function". + return base.Flag.CompilingRuntime || f.NoSplit +} + +// markUnsafePoints finds unsafe points and computes lv.unsafePoints. +func (lv *liveness) markUnsafePoints() { + if IsUnsafe(lv.f) { + // No complex analysis necessary. + lv.allUnsafe = true + return + } + + lv.unsafePoints = bitvec.New(int32(lv.f.NumValues())) + lv.unsafeBlocks = bitvec.New(int32(lv.f.NumBlocks())) + + // Mark architecture-specific unsafe points. + for _, b := range lv.f.Blocks { + for _, v := range b.Values { + if v.Op.UnsafePoint() { + lv.unsafePoints.Set(int32(v.ID)) + } + } + } + + for _, b := range lv.f.Blocks { + for _, v := range b.Values { + if v.Op != ssa.OpWBend { + continue + } + // WBend appears at the start of a block, like this: + // ... + // if wbEnabled: goto C else D + // C: + // ... some write barrier enabled code ... + // goto B + // D: + // ... some write barrier disabled code ... + // goto B + // B: + // m1 = Phi mem_C mem_D + // m2 = store operation ... m1 + // m3 = store operation ... m2 + // m4 = WBend m3 + + // Find first memory op in the block, which should be a Phi. + m := v + for { + m = m.MemoryArg() + if m.Block != b { + lv.f.Fatalf("can't find Phi before write barrier end mark %v", v) + } + if m.Op == ssa.OpPhi { + break + } + } + // Find the two predecessor blocks (write barrier on and write barrier off) + if len(m.Args) != 2 { + lv.f.Fatalf("phi before write barrier end mark has %d args, want 2", len(m.Args)) + } + c := b.Preds[0].Block() + d := b.Preds[1].Block() + + // Find their common predecessor block (the one that branches based on wb on/off). + // It might be a diamond pattern, or one of the blocks in the diamond pattern might + // be missing. + var decisionBlock *ssa.Block + if len(c.Preds) == 1 && c.Preds[0].Block() == d { + decisionBlock = d + } else if len(d.Preds) == 1 && d.Preds[0].Block() == c { + decisionBlock = c + } else if len(c.Preds) == 1 && len(d.Preds) == 1 && c.Preds[0].Block() == d.Preds[0].Block() { + decisionBlock = c.Preds[0].Block() + } else { + lv.f.Fatalf("can't find write barrier pattern %v", v) + } + if len(decisionBlock.Succs) != 2 { + lv.f.Fatalf("common predecessor block the wrong type %s", decisionBlock.Kind) + } + + // Flow backwards from the control value to find the + // flag load. We don't know what lowered ops we're + // looking for, but all current arches produce a + // single op that does the memory load from the flag + // address, so we look for that. + var load *ssa.Value + v := decisionBlock.Controls[0] + for { + if v.MemoryArg() != nil { + // Single instruction to load (and maybe compare) the write barrier flag. + if sym, ok := v.Aux.(*obj.LSym); ok && sym == ir.Syms.WriteBarrier { + load = v + break + } + // Some architectures have to materialize the address separate from + // the load. + if sym, ok := v.Args[0].Aux.(*obj.LSym); ok && sym == ir.Syms.WriteBarrier { + load = v + break + } + v.Fatalf("load of write barrier flag not from correct global: %s", v.LongString()) + } + // Common case: just flow backwards. + if len(v.Args) == 1 || len(v.Args) == 2 && v.Args[0] == v.Args[1] { + // Note: 386 lowers Neq32 to (TESTL cond cond), + v = v.Args[0] + continue + } + v.Fatalf("write barrier control value has more than one argument: %s", v.LongString()) + } + + // Mark everything after the load unsafe. + found := false + for _, v := range decisionBlock.Values { + if found { + lv.unsafePoints.Set(int32(v.ID)) + } + found = found || v == load + } + lv.unsafeBlocks.Set(int32(decisionBlock.ID)) + + // Mark the write barrier on/off blocks as unsafe. + for _, e := range decisionBlock.Succs { + x := e.Block() + if x == b { + continue + } + for _, v := range x.Values { + lv.unsafePoints.Set(int32(v.ID)) + } + lv.unsafeBlocks.Set(int32(x.ID)) + } + + // Mark from the join point up to the WBend as unsafe. + for _, v := range b.Values { + if v.Op == ssa.OpWBend { + break + } + lv.unsafePoints.Set(int32(v.ID)) + } + } + } +} + +// Returns true for instructions that must have a stack map. +// +// This does not necessarily mean the instruction is a safe-point. In +// particular, call Values can have a stack map in case the callee +// grows the stack, but not themselves be a safe-point. +func (lv *liveness) hasStackMap(v *ssa.Value) bool { + if !v.Op.IsCall() { + return false + } + // wbZero and wbCopy are write barriers and + // deeply non-preemptible. They are unsafe points and + // hence should not have liveness maps. + if sym, ok := v.Aux.(*ssa.AuxCall); ok && (sym.Fn == ir.Syms.WBZero || sym.Fn == ir.Syms.WBMove) { + return false + } + return true +} + +// Initializes the sets for solving the live variables. Visits all the +// instructions in each basic block to summarizes the information at each basic +// block +func (lv *liveness) prologue() { + lv.initcache() + + for _, b := range lv.f.Blocks { + be := lv.blockEffects(b) + + // Walk the block instructions backward and update the block + // effects with the each prog effects. + for j := len(b.Values) - 1; j >= 0; j-- { + pos, e := lv.valueEffects(b.Values[j]) + if e&varkill != 0 { + be.varkill.Set(pos) + be.uevar.Unset(pos) + } + if e&uevar != 0 { + be.uevar.Set(pos) + } + } + } +} + +// Solve the liveness dataflow equations. +func (lv *liveness) solve() { + // These temporary bitvectors exist to avoid successive allocations and + // frees within the loop. + nvars := int32(len(lv.vars)) + newlivein := bitvec.New(nvars) + newliveout := bitvec.New(nvars) + + // Walk blocks in postorder ordering. This improves convergence. + po := lv.f.Postorder() + + // Iterate through the blocks in reverse round-robin fashion. A work + // queue might be slightly faster. As is, the number of iterations is + // so low that it hardly seems to be worth the complexity. + + for change := true; change; { + change = false + for _, b := range po { + be := lv.blockEffects(b) + + newliveout.Clear() + switch b.Kind { + case ssa.BlockRet: + for _, pos := range lv.cache.retuevar { + newliveout.Set(pos) + } + case ssa.BlockRetJmp: + for _, pos := range lv.cache.tailuevar { + newliveout.Set(pos) + } + case ssa.BlockExit: + // panic exit - nothing to do + default: + // A variable is live on output from this block + // if it is live on input to some successor. + // + // out[b] = \bigcup_{s \in succ[b]} in[s] + newliveout.Copy(lv.blockEffects(b.Succs[0].Block()).livein) + for _, succ := range b.Succs[1:] { + newliveout.Or(newliveout, lv.blockEffects(succ.Block()).livein) + } + } + + if !be.liveout.Eq(newliveout) { + change = true + be.liveout.Copy(newliveout) + } + + // A variable is live on input to this block + // if it is used by this block, or live on output from this block and + // not set by the code in this block. + // + // in[b] = uevar[b] \cup (out[b] \setminus varkill[b]) + newlivein.AndNot(be.liveout, be.varkill) + be.livein.Or(newlivein, be.uevar) + } + } +} + +// Visits all instructions in a basic block and computes a bit vector of live +// variables at each safe point locations. +func (lv *liveness) epilogue() { + nvars := int32(len(lv.vars)) + liveout := bitvec.New(nvars) + livedefer := bitvec.New(nvars) // always-live variables + + // If there is a defer (that could recover), then all output + // parameters are live all the time. In addition, any locals + // that are pointers to heap-allocated output parameters are + // also always live (post-deferreturn code needs these + // pointers to copy values back to the stack). + // TODO: if the output parameter is heap-allocated, then we + // don't need to keep the stack copy live? + if lv.fn.HasDefer() { + for i, n := range lv.vars { + if n.Class == ir.PPARAMOUT { + if n.IsOutputParamHeapAddr() { + // Just to be paranoid. Heap addresses are PAUTOs. + base.Fatalf("variable %v both output param and heap output param", n) + } + if n.Heapaddr != nil { + // If this variable moved to the heap, then + // its stack copy is not live. + continue + } + // Note: zeroing is handled by zeroResults in walk.go. + livedefer.Set(int32(i)) + } + if n.IsOutputParamHeapAddr() { + // This variable will be overwritten early in the function + // prologue (from the result of a mallocgc) but we need to + // zero it in case that malloc causes a stack scan. + n.SetNeedzero(true) + livedefer.Set(int32(i)) + } + if n.OpenDeferSlot() { + // Open-coded defer args slots must be live + // everywhere in a function, since a panic can + // occur (almost) anywhere. Because it is live + // everywhere, it must be zeroed on entry. + livedefer.Set(int32(i)) + // It was already marked as Needzero when created. + if !n.Needzero() { + base.Fatalf("all pointer-containing defer arg slots should have Needzero set") + } + } + } + } + + // We must analyze the entry block first. The runtime assumes + // the function entry map is index 0. Conveniently, layout + // already ensured that the entry block is first. + if lv.f.Entry != lv.f.Blocks[0] { + lv.f.Fatalf("entry block must be first") + } + + { + // Reserve an entry for function entry. + live := bitvec.New(nvars) + lv.livevars = append(lv.livevars, live) + } + + for _, b := range lv.f.Blocks { + be := lv.blockEffects(b) + + // Walk forward through the basic block instructions and + // allocate liveness maps for those instructions that need them. + for _, v := range b.Values { + if !lv.hasStackMap(v) { + continue + } + + live := bitvec.New(nvars) + lv.livevars = append(lv.livevars, live) + } + + // walk backward, construct maps at each safe point + index := int32(len(lv.livevars) - 1) + + liveout.Copy(be.liveout) + for i := len(b.Values) - 1; i >= 0; i-- { + v := b.Values[i] + + if lv.hasStackMap(v) { + // Found an interesting instruction, record the + // corresponding liveness information. + + live := &lv.livevars[index] + live.Or(*live, liveout) + live.Or(*live, livedefer) // only for non-entry safe points + index-- + } + + // Update liveness information. + pos, e := lv.valueEffects(v) + if e&varkill != 0 { + liveout.Unset(pos) + } + if e&uevar != 0 { + liveout.Set(pos) + } + } + + if b == lv.f.Entry { + if index != 0 { + base.Fatalf("bad index for entry point: %v", index) + } + + // Check to make sure only input variables are live. + for i, n := range lv.vars { + if !liveout.Get(int32(i)) { + continue + } + if n.Class == ir.PPARAM { + continue // ok + } + base.FatalfAt(n.Pos(), "bad live variable at entry of %v: %L", lv.fn.Nname, n) + } + + // Record live variables. + live := &lv.livevars[index] + live.Or(*live, liveout) + } + + if lv.doClobber { + lv.clobber(b) + } + + // The liveness maps for this block are now complete. Compact them. + lv.compact(b) + } + + // If we have an open-coded deferreturn call, make a liveness map for it. + if lv.fn.OpenCodedDeferDisallowed() { + lv.livenessMap.DeferReturn = objw.StackMapDontCare + } else { + idx, _ := lv.stackMapSet.add(livedefer) + lv.livenessMap.DeferReturn = objw.StackMapIndex(idx) + } + + // Done compacting. Throw out the stack map set. + lv.stackMaps = lv.stackMapSet.extractUnique() + lv.stackMapSet = bvecSet{} + + // Useful sanity check: on entry to the function, + // the only things that can possibly be live are the + // input parameters. + for j, n := range lv.vars { + if n.Class != ir.PPARAM && lv.stackMaps[0].Get(int32(j)) { + lv.f.Fatalf("%v %L recorded as live on entry", lv.fn.Nname, n) + } + } +} + +// Compact coalesces identical bitmaps from lv.livevars into the sets +// lv.stackMapSet. +// +// Compact clears lv.livevars. +// +// There are actually two lists of bitmaps, one list for the local variables and one +// list for the function arguments. Both lists are indexed by the same PCDATA +// index, so the corresponding pairs must be considered together when +// merging duplicates. The argument bitmaps change much less often during +// function execution than the local variable bitmaps, so it is possible that +// we could introduce a separate PCDATA index for arguments vs locals and +// then compact the set of argument bitmaps separately from the set of +// local variable bitmaps. As of 2014-04-02, doing this to the godoc binary +// is actually a net loss: we save about 50k of argument bitmaps but the new +// PCDATA tables cost about 100k. So for now we keep using a single index for +// both bitmap lists. +func (lv *liveness) compact(b *ssa.Block) { + pos := 0 + if b == lv.f.Entry { + // Handle entry stack map. + lv.stackMapSet.add(lv.livevars[0]) + pos++ + } + for _, v := range b.Values { + if lv.hasStackMap(v) { + idx, _ := lv.stackMapSet.add(lv.livevars[pos]) + pos++ + lv.livenessMap.set(v, objw.StackMapIndex(idx)) + } + if lv.allUnsafe || v.Op != ssa.OpClobber && lv.unsafePoints.Get(int32(v.ID)) { + lv.livenessMap.setUnsafeVal(v) + } + } + if lv.allUnsafe || lv.unsafeBlocks.Get(int32(b.ID)) { + lv.livenessMap.setUnsafeBlock(b) + } + + // Reset livevars. + lv.livevars = lv.livevars[:0] +} + +func (lv *liveness) enableClobber() { + // The clobberdead experiment inserts code to clobber pointer slots in all + // the dead variables (locals and args) at every synchronous safepoint. + if !base.Flag.ClobberDead { + return + } + if lv.fn.Pragma&ir.CgoUnsafeArgs != 0 { + // C or assembly code uses the exact frame layout. Don't clobber. + return + } + if len(lv.vars) > 10000 || len(lv.f.Blocks) > 10000 { + // Be careful to avoid doing too much work. + // Bail if >10000 variables or >10000 blocks. + // Otherwise, giant functions make this experiment generate too much code. + return + } + if lv.f.Name == "forkAndExecInChild" { + // forkAndExecInChild calls vfork on some platforms. + // The code we add here clobbers parts of the stack in the child. + // When the parent resumes, it is using the same stack frame. But the + // child has clobbered stack variables that the parent needs. Boom! + // In particular, the sys argument gets clobbered. + return + } + if lv.f.Name == "wbBufFlush" || + ((lv.f.Name == "callReflect" || lv.f.Name == "callMethod") && lv.fn.ABIWrapper()) { + // runtime.wbBufFlush must not modify its arguments. See the comments + // in runtime/mwbbuf.go:wbBufFlush. + // + // reflect.callReflect and reflect.callMethod are called from special + // functions makeFuncStub and methodValueCall. The runtime expects + // that it can find the first argument (ctxt) at 0(SP) in makeFuncStub + // and methodValueCall's frame (see runtime/traceback.go:getArgInfo). + // Normally callReflect and callMethod already do not modify the + // argument, and keep it alive. But the compiler-generated ABI wrappers + // don't do that. Special case the wrappers to not clobber its arguments. + lv.noClobberArgs = true + } + if h := os.Getenv("GOCLOBBERDEADHASH"); h != "" { + // Clobber only functions where the hash of the function name matches a pattern. + // Useful for binary searching for a miscompiled function. + hstr := "" + for _, b := range notsha256.Sum256([]byte(lv.f.Name)) { + hstr += fmt.Sprintf("%08b", b) + } + if !strings.HasSuffix(hstr, h) { + return + } + fmt.Printf("\t\t\tCLOBBERDEAD %s\n", lv.f.Name) + } + lv.doClobber = true +} + +// Inserts code to clobber pointer slots in all the dead variables (locals and args) +// at every synchronous safepoint in b. +func (lv *liveness) clobber(b *ssa.Block) { + // Copy block's values to a temporary. + oldSched := append([]*ssa.Value{}, b.Values...) + b.Values = b.Values[:0] + idx := 0 + + // Clobber pointer slots in all dead variables at entry. + if b == lv.f.Entry { + for len(oldSched) > 0 && len(oldSched[0].Args) == 0 { + // Skip argless ops. We need to skip at least + // the lowered ClosurePtr op, because it + // really wants to be first. This will also + // skip ops like InitMem and SP, which are ok. + b.Values = append(b.Values, oldSched[0]) + oldSched = oldSched[1:] + } + clobber(lv, b, lv.livevars[0]) + idx++ + } + + // Copy values into schedule, adding clobbering around safepoints. + for _, v := range oldSched { + if !lv.hasStackMap(v) { + b.Values = append(b.Values, v) + continue + } + clobber(lv, b, lv.livevars[idx]) + b.Values = append(b.Values, v) + idx++ + } +} + +// clobber generates code to clobber pointer slots in all dead variables +// (those not marked in live). Clobbering instructions are added to the end +// of b.Values. +func clobber(lv *liveness, b *ssa.Block, live bitvec.BitVec) { + for i, n := range lv.vars { + if !live.Get(int32(i)) && !n.Addrtaken() && !n.OpenDeferSlot() && !n.IsOutputParamHeapAddr() { + // Don't clobber stack objects (address-taken). They are + // tracked dynamically. + // Also don't clobber slots that are live for defers (see + // the code setting livedefer in epilogue). + if lv.noClobberArgs && n.Class == ir.PPARAM { + continue + } + clobberVar(b, n) + } + } +} + +// clobberVar generates code to trash the pointers in v. +// Clobbering instructions are added to the end of b.Values. +func clobberVar(b *ssa.Block, v *ir.Name) { + clobberWalk(b, v, 0, v.Type()) +} + +// b = block to which we append instructions +// v = variable +// offset = offset of (sub-portion of) variable to clobber (in bytes) +// t = type of sub-portion of v. +func clobberWalk(b *ssa.Block, v *ir.Name, offset int64, t *types.Type) { + if !t.HasPointers() { + return + } + switch t.Kind() { + case types.TPTR, + types.TUNSAFEPTR, + types.TFUNC, + types.TCHAN, + types.TMAP: + clobberPtr(b, v, offset) + + case types.TSTRING: + // struct { byte *str; int len; } + clobberPtr(b, v, offset) + + case types.TINTER: + // struct { Itab *tab; void *data; } + // or, when isnilinter(t)==true: + // struct { Type *type; void *data; } + clobberPtr(b, v, offset) + clobberPtr(b, v, offset+int64(types.PtrSize)) + + case types.TSLICE: + // struct { byte *array; int len; int cap; } + clobberPtr(b, v, offset) + + case types.TARRAY: + for i := int64(0); i < t.NumElem(); i++ { + clobberWalk(b, v, offset+i*t.Elem().Size(), t.Elem()) + } + + case types.TSTRUCT: + for _, t1 := range t.Fields() { + clobberWalk(b, v, offset+t1.Offset, t1.Type) + } + + default: + base.Fatalf("clobberWalk: unexpected type, %v", t) + } +} + +// clobberPtr generates a clobber of the pointer at offset offset in v. +// The clobber instruction is added at the end of b. +func clobberPtr(b *ssa.Block, v *ir.Name, offset int64) { + b.NewValue0IA(src.NoXPos, ssa.OpClobber, types.TypeVoid, offset, v) +} + +func (lv *liveness) showlive(v *ssa.Value, live bitvec.BitVec) { + if base.Flag.Live == 0 || ir.FuncName(lv.fn) == "init" || strings.HasPrefix(ir.FuncName(lv.fn), ".") { + return + } + if lv.fn.Wrapper() || lv.fn.Dupok() { + // Skip reporting liveness information for compiler-generated wrappers. + return + } + if !(v == nil || v.Op.IsCall()) { + // Historically we only printed this information at + // calls. Keep doing so. + return + } + if live.IsEmpty() { + return + } + + pos := lv.fn.Nname.Pos() + if v != nil { + pos = v.Pos + } + + s := "live at " + if v == nil { + s += fmt.Sprintf("entry to %s:", ir.FuncName(lv.fn)) + } else if sym, ok := v.Aux.(*ssa.AuxCall); ok && sym.Fn != nil { + fn := sym.Fn.Name + if pos := strings.Index(fn, "."); pos >= 0 { + fn = fn[pos+1:] + } + s += fmt.Sprintf("call to %s:", fn) + } else { + s += "indirect call:" + } + + // Sort variable names for display. Variables aren't in any particular order, and + // the order can change by architecture, particularly with differences in regabi. + var names []string + for j, n := range lv.vars { + if live.Get(int32(j)) { + names = append(names, n.Sym().Name) + } + } + sort.Strings(names) + for _, v := range names { + s += " " + v + } + + base.WarnfAt(pos, s) +} + +func (lv *liveness) printbvec(printed bool, name string, live bitvec.BitVec) bool { + if live.IsEmpty() { + return printed + } + + if !printed { + fmt.Printf("\t") + } else { + fmt.Printf(" ") + } + fmt.Printf("%s=", name) + + comma := "" + for i, n := range lv.vars { + if !live.Get(int32(i)) { + continue + } + fmt.Printf("%s%s", comma, n.Sym().Name) + comma = "," + } + return true +} + +// printeffect is like printbvec, but for valueEffects. +func (lv *liveness) printeffect(printed bool, name string, pos int32, x bool) bool { + if !x { + return printed + } + if !printed { + fmt.Printf("\t") + } else { + fmt.Printf(" ") + } + fmt.Printf("%s=", name) + if x { + fmt.Printf("%s", lv.vars[pos].Sym().Name) + } + + return true +} + +// Prints the computed liveness information and inputs, for debugging. +// This format synthesizes the information used during the multiple passes +// into a single presentation. +func (lv *liveness) printDebug() { + fmt.Printf("liveness: %s\n", ir.FuncName(lv.fn)) + + for i, b := range lv.f.Blocks { + if i > 0 { + fmt.Printf("\n") + } + + // bb#0 pred=1,2 succ=3,4 + fmt.Printf("bb#%d pred=", b.ID) + for j, pred := range b.Preds { + if j > 0 { + fmt.Printf(",") + } + fmt.Printf("%d", pred.Block().ID) + } + fmt.Printf(" succ=") + for j, succ := range b.Succs { + if j > 0 { + fmt.Printf(",") + } + fmt.Printf("%d", succ.Block().ID) + } + fmt.Printf("\n") + + be := lv.blockEffects(b) + + // initial settings + printed := false + printed = lv.printbvec(printed, "uevar", be.uevar) + printed = lv.printbvec(printed, "livein", be.livein) + if printed { + fmt.Printf("\n") + } + + // program listing, with individual effects listed + + if b == lv.f.Entry { + live := lv.stackMaps[0] + fmt.Printf("(%s) function entry\n", base.FmtPos(lv.fn.Nname.Pos())) + fmt.Printf("\tlive=") + printed = false + for j, n := range lv.vars { + if !live.Get(int32(j)) { + continue + } + if printed { + fmt.Printf(",") + } + fmt.Printf("%v", n) + printed = true + } + fmt.Printf("\n") + } + + for _, v := range b.Values { + fmt.Printf("(%s) %v\n", base.FmtPos(v.Pos), v.LongString()) + + pcdata := lv.livenessMap.Get(v) + + pos, effect := lv.valueEffects(v) + printed = false + printed = lv.printeffect(printed, "uevar", pos, effect&uevar != 0) + printed = lv.printeffect(printed, "varkill", pos, effect&varkill != 0) + if printed { + fmt.Printf("\n") + } + + if pcdata.StackMapValid() { + fmt.Printf("\tlive=") + printed = false + if pcdata.StackMapValid() { + live := lv.stackMaps[pcdata] + for j, n := range lv.vars { + if !live.Get(int32(j)) { + continue + } + if printed { + fmt.Printf(",") + } + fmt.Printf("%v", n) + printed = true + } + } + fmt.Printf("\n") + } + + if lv.livenessMap.GetUnsafe(v) { + fmt.Printf("\tunsafe-point\n") + } + } + if lv.livenessMap.GetUnsafeBlock(b) { + fmt.Printf("\tunsafe-block\n") + } + + // bb bitsets + fmt.Printf("end\n") + printed = false + printed = lv.printbvec(printed, "varkill", be.varkill) + printed = lv.printbvec(printed, "liveout", be.liveout) + if printed { + fmt.Printf("\n") + } + } + + fmt.Printf("\n") +} + +// Dumps a slice of bitmaps to a symbol as a sequence of uint32 values. The +// first word dumped is the total number of bitmaps. The second word is the +// length of the bitmaps. All bitmaps are assumed to be of equal length. The +// remaining bytes are the raw bitmaps. +func (lv *liveness) emit() (argsSym, liveSym *obj.LSym) { + // Size args bitmaps to be just large enough to hold the largest pointer. + // First, find the largest Xoffset node we care about. + // (Nodes without pointers aren't in lv.vars; see ShouldTrack.) + var maxArgNode *ir.Name + for _, n := range lv.vars { + switch n.Class { + case ir.PPARAM, ir.PPARAMOUT: + if !n.IsOutputParamInRegisters() { + if maxArgNode == nil || n.FrameOffset() > maxArgNode.FrameOffset() { + maxArgNode = n + } + } + } + } + // Next, find the offset of the largest pointer in the largest node. + var maxArgs int64 + if maxArgNode != nil { + maxArgs = maxArgNode.FrameOffset() + types.PtrDataSize(maxArgNode.Type()) + } + + // Size locals bitmaps to be stkptrsize sized. + // We cannot shrink them to only hold the largest pointer, + // because their size is used to calculate the beginning + // of the local variables frame. + // Further discussion in https://golang.org/cl/104175. + // TODO: consider trimming leading zeros. + // This would require shifting all bitmaps. + maxLocals := lv.stkptrsize + + // Temporary symbols for encoding bitmaps. + var argsSymTmp, liveSymTmp obj.LSym + + args := bitvec.New(int32(maxArgs / int64(types.PtrSize))) + aoff := objw.Uint32(&argsSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps + aoff = objw.Uint32(&argsSymTmp, aoff, uint32(args.N)) // number of bits in each bitmap + + locals := bitvec.New(int32(maxLocals / int64(types.PtrSize))) + loff := objw.Uint32(&liveSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps + loff = objw.Uint32(&liveSymTmp, loff, uint32(locals.N)) // number of bits in each bitmap + + for _, live := range lv.stackMaps { + args.Clear() + locals.Clear() + + lv.pointerMap(live, lv.vars, args, locals) + + aoff = objw.BitVec(&argsSymTmp, aoff, args) + loff = objw.BitVec(&liveSymTmp, loff, locals) + } + + // These symbols will be added to Ctxt.Data by addGCLocals + // after parallel compilation is done. + return base.Ctxt.GCLocalsSym(argsSymTmp.P), base.Ctxt.GCLocalsSym(liveSymTmp.P) +} + +// Entry pointer for Compute analysis. Solves for the Compute of +// pointer variables in the function and emits a runtime data +// structure read by the garbage collector. +// Returns a map from GC safe points to their corresponding stack map index, +// and a map that contains all input parameters that may be partially live. +func Compute(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *objw.Progs) (Map, map[*ir.Name]bool) { + // Construct the global liveness state. + vars, idx := getvariables(curfn) + lv := newliveness(curfn, f, vars, idx, stkptrsize) + + // Run the dataflow framework. + lv.prologue() + lv.solve() + lv.epilogue() + if base.Flag.Live > 0 { + lv.showlive(nil, lv.stackMaps[0]) + for _, b := range f.Blocks { + for _, val := range b.Values { + if idx := lv.livenessMap.Get(val); idx.StackMapValid() { + lv.showlive(val, lv.stackMaps[idx]) + } + } + } + } + if base.Flag.Live >= 2 { + lv.printDebug() + } + + // Update the function cache. + { + cache := f.Cache.Liveness.(*livenessFuncCache) + if cap(lv.be) < 2000 { // Threshold from ssa.Cache slices. + for i := range lv.be { + lv.be[i] = blockEffects{} + } + cache.be = lv.be + } + if len(lv.livenessMap.Vals) < 2000 { + cache.livenessMap = lv.livenessMap + } + } + + // Emit the live pointer map data structures + ls := curfn.LSym + fninfo := ls.Func() + fninfo.GCArgs, fninfo.GCLocals = lv.emit() + + p := pp.Prog(obj.AFUNCDATA) + p.From.SetConst(rtabi.FUNCDATA_ArgsPointerMaps) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = fninfo.GCArgs + + p = pp.Prog(obj.AFUNCDATA) + p.From.SetConst(rtabi.FUNCDATA_LocalsPointerMaps) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = fninfo.GCLocals + + if x := lv.emitStackObjects(); x != nil { + p := pp.Prog(obj.AFUNCDATA) + p.From.SetConst(rtabi.FUNCDATA_StackObjects) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = x + } + + return lv.livenessMap, lv.partLiveArgs +} + +func (lv *liveness) emitStackObjects() *obj.LSym { + var vars []*ir.Name + for _, n := range lv.fn.Dcl { + if shouldTrack(n) && n.Addrtaken() && n.Esc() != ir.EscHeap { + vars = append(vars, n) + } + } + if len(vars) == 0 { + return nil + } + + // Sort variables from lowest to highest address. + sort.Slice(vars, func(i, j int) bool { return vars[i].FrameOffset() < vars[j].FrameOffset() }) + + // Populate the stack object data. + // Format must match runtime/stack.go:stackObjectRecord. + x := base.Ctxt.Lookup(lv.fn.LSym.Name + ".stkobj") + x.Set(obj.AttrContentAddressable, true) + lv.fn.LSym.Func().StackObjects = x + off := 0 + off = objw.Uintptr(x, off, uint64(len(vars))) + for _, v := range vars { + // Note: arguments and return values have non-negative Xoffset, + // in which case the offset is relative to argp. + // Locals have a negative Xoffset, in which case the offset is relative to varp. + // We already limit the frame size, so the offset and the object size + // should not be too big. + frameOffset := v.FrameOffset() + if frameOffset != int64(int32(frameOffset)) { + base.Fatalf("frame offset too big: %v %d", v, frameOffset) + } + off = objw.Uint32(x, off, uint32(frameOffset)) + + t := v.Type() + sz := t.Size() + if sz != int64(int32(sz)) { + base.Fatalf("stack object too big: %v of type %v, size %d", v, t, sz) + } + lsym, useGCProg, ptrdata := reflectdata.GCSym(t) + if useGCProg { + ptrdata = -ptrdata + } + off = objw.Uint32(x, off, uint32(sz)) + off = objw.Uint32(x, off, uint32(ptrdata)) + off = objw.SymPtrOff(x, off, lsym) + } + + if base.Flag.Live != 0 { + for _, v := range vars { + base.WarnfAt(v.Pos(), "stack object %v %v", v, v.Type()) + } + } + + return x +} + +// isfat reports whether a variable of type t needs multiple assignments to initialize. +// For example: +// +// type T struct { x, y int } +// x := T{x: 0, y: 1} +// +// Then we need: +// +// var t T +// t.x = 0 +// t.y = 1 +// +// to fully initialize t. +func isfat(t *types.Type) bool { + if t != nil { + switch t.Kind() { + case types.TSLICE, types.TSTRING, + types.TINTER: // maybe remove later + return true + case types.TARRAY: + // Array of 1 element, check if element is fat + if t.NumElem() == 1 { + return isfat(t.Elem()) + } + return true + case types.TSTRUCT: + // Struct with 1 field, check if field is fat + if t.NumFields() == 1 { + return isfat(t.Field(0).Type) + } + return true + } + } + + return false +} + +// WriteFuncMap writes the pointer bitmaps for bodyless function fn's +// inputs and outputs as the value of symbol .args_stackmap. +// If fn has outputs, two bitmaps are written, otherwise just one. +func WriteFuncMap(fn *ir.Func, abiInfo *abi.ABIParamResultInfo) { + if ir.FuncName(fn) == "_" || fn.Sym().Linkname != "" { + return + } + nptr := int(abiInfo.ArgWidth() / int64(types.PtrSize)) + bv := bitvec.New(int32(nptr)) + + for _, p := range abiInfo.InParams() { + typebits.SetNoCheck(p.Type, p.FrameOffset(abiInfo), bv) + } + + nbitmap := 1 + if fn.Type().NumResults() > 0 { + nbitmap = 2 + } + lsym := base.Ctxt.Lookup(fn.LSym.Name + ".args_stackmap") + off := objw.Uint32(lsym, 0, uint32(nbitmap)) + off = objw.Uint32(lsym, off, uint32(bv.N)) + off = objw.BitVec(lsym, off, bv) + + if fn.Type().NumResults() > 0 { + for _, p := range abiInfo.OutParams() { + if len(p.Registers) == 0 { + typebits.SetNoCheck(p.Type, p.FrameOffset(abiInfo), bv) + } + } + off = objw.BitVec(lsym, off, bv) + } + + objw.Global(lsym, int32(off), obj.RODATA|obj.LOCAL) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/logopt/log_opts.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/logopt/log_opts.go new file mode 100644 index 0000000000000000000000000000000000000000..b731e5593833267d0a05144d997a73214badf0ee --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/logopt/log_opts.go @@ -0,0 +1,540 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package logopt + +import ( + "cmd/internal/obj" + "cmd/internal/src" + "encoding/json" + "fmt" + "internal/buildcfg" + "io" + "log" + "net/url" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "unicode" +) + +// This implements (non)optimization logging for -json option to the Go compiler +// The option is -json 0,. +// +// 0 is the version number; to avoid the need for synchronized updates, if +// new versions of the logging appear, the compiler will support both, for a while, +// and clients will specify what they need. +// +// is a directory. +// Directories are specified with a leading / or os.PathSeparator, +// or more explicitly with file://directory. The second form is intended to +// deal with corner cases on Windows, and to allow specification of a relative +// directory path (which is normally a bad idea, because the local directory +// varies a lot in a build, especially with modules and/or vendoring, and may +// not be writeable). +// +// For each package pkg compiled, a url.PathEscape(pkg)-named subdirectory +// is created. For each source file.go in that package that generates +// diagnostics (no diagnostics means no file), +// a url.PathEscape(file)+".json"-named file is created and contains the +// logged diagnostics. +// +// For example, "cmd%2Finternal%2Fdwarf/%3Cautogenerated%3E.json" +// for "cmd/internal/dwarf" and (which is not really a file, but the compiler sees it) +// +// If the package string is empty, it is replaced internally with string(0) which encodes to %00. +// +// Each log file begins with a JSON record identifying version, +// platform, and other context, followed by optimization-relevant +// LSP Diagnostic records, one per line (LSP version 3.15, no difference from 3.14 on the subset used here +// see https://microsoft.github.io/language-server-protocol/specifications/specification-3-15/ ) +// +// The fields of a Diagnostic are used in the following way: +// Range: the outermost source position, for now begin and end are equal. +// Severity: (always) SeverityInformation (3) +// Source: (always) "go compiler" +// Code: a string describing the missed optimization, e.g., "nilcheck", "cannotInline", "isInBounds", "escape" +// Message: depending on code, additional information, e.g., the reason a function cannot be inlined. +// RelatedInformation: if the missed optimization actually occurred at a function inlined at Range, +// then the sequence of inlined locations appears here, from (second) outermost to innermost, +// each with message="inlineLoc". +// +// In the case of escape analysis explanations, after any outer inlining locations, +// the lines of the explanation appear, each potentially followed with its own inlining +// location if the escape flow occurred within an inlined function. +// +// For example /cmd%2Fcompile%2Finternal%2Fssa/prove.json +// might begin with the following line (wrapped for legibility): +// +// {"version":0,"package":"cmd/compile/internal/ssa","goos":"darwin","goarch":"amd64", +// "gc_version":"devel +e1b9a57852 Fri Nov 1 15:07:00 2019 -0400", +// "file":"/Users/drchase/work/go/src/cmd/compile/internal/ssa/prove.go"} +// +// and later contain (also wrapped for legibility): +// +// {"range":{"start":{"line":191,"character":24},"end":{"line":191,"character":24}}, +// "severity":3,"code":"nilcheck","source":"go compiler","message":"", +// "relatedInformation":[ +// {"location":{"uri":"file:///Users/drchase/work/go/src/cmd/compile/internal/ssa/func.go", +// "range":{"start":{"line":153,"character":16},"end":{"line":153,"character":16}}}, +// "message":"inlineLoc"}]} +// +// That is, at prove.go (implicit from context, provided in both filename and header line), +// line 191, column 24, a nilcheck occurred in the generated code. +// The relatedInformation indicates that this code actually came from +// an inlined call to func.go, line 153, character 16. +// +// prove.go:191: +// ft.orderS = f.newPoset() +// func.go:152 and 153: +// func (f *Func) newPoset() *poset { +// if len(f.Cache.scrPoset) > 0 { +// +// In the case that the package is empty, the string(0) package name is also used in the header record, for example +// +// go tool compile -json=0,file://logopt x.go # no -p option to set the package +// head -1 logopt/%00/x.json +// {"version":0,"package":"\u0000","goos":"darwin","goarch":"amd64","gc_version":"devel +86487adf6a Thu Nov 7 19:34:56 2019 -0500","file":"x.go"} + +type VersionHeader struct { + Version int `json:"version"` + Package string `json:"package"` + Goos string `json:"goos"` + Goarch string `json:"goarch"` + GcVersion string `json:"gc_version"` + File string `json:"file,omitempty"` // LSP requires an enclosing resource, i.e., a file +} + +// DocumentURI, Position, Range, Location, Diagnostic, DiagnosticRelatedInformation all reuse json definitions from gopls. +// See https://github.com/golang/tools/blob/22afafe3322a860fcd3d88448768f9db36f8bc5f/internal/lsp/protocol/tsprotocol.go + +type DocumentURI string + +type Position struct { + Line uint `json:"line"` // gopls uses float64, but json output is the same for integers + Character uint `json:"character"` // gopls uses float64, but json output is the same for integers +} + +// A Range in a text document expressed as (zero-based) start and end positions. +// A range is comparable to a selection in an editor. Therefore the end position is exclusive. +// If you want to specify a range that contains a line including the line ending character(s) +// then use an end position denoting the start of the next line. +type Range struct { + /*Start defined: + * The range's start position + */ + Start Position `json:"start"` + + /*End defined: + * The range's end position + */ + End Position `json:"end"` // exclusive +} + +// A Location represents a location inside a resource, such as a line inside a text file. +type Location struct { + // URI is + URI DocumentURI `json:"uri"` + + // Range is + Range Range `json:"range"` +} + +/* DiagnosticRelatedInformation defined: + * Represents a related message and source code location for a diagnostic. This should be + * used to point to code locations that cause or related to a diagnostics, e.g when duplicating + * a symbol in a scope. + */ +type DiagnosticRelatedInformation struct { + + /*Location defined: + * The location of this related diagnostic information. + */ + Location Location `json:"location"` + + /*Message defined: + * The message of this related diagnostic information. + */ + Message string `json:"message"` +} + +// DiagnosticSeverity defines constants +type DiagnosticSeverity uint + +const ( + /*SeverityInformation defined: + * Reports an information. + */ + SeverityInformation DiagnosticSeverity = 3 +) + +// DiagnosticTag defines constants +type DiagnosticTag uint + +/*Diagnostic defined: + * Represents a diagnostic, such as a compiler error or warning. Diagnostic objects + * are only valid in the scope of a resource. + */ +type Diagnostic struct { + + /*Range defined: + * The range at which the message applies + */ + Range Range `json:"range"` + + /*Severity defined: + * The diagnostic's severity. Can be omitted. If omitted it is up to the + * client to interpret diagnostics as error, warning, info or hint. + */ + Severity DiagnosticSeverity `json:"severity,omitempty"` // always SeverityInformation for optimizer logging. + + /*Code defined: + * The diagnostic's code, which usually appear in the user interface. + */ + Code string `json:"code,omitempty"` // LSP uses 'number | string' = gopls interface{}, but only string here, e.g. "boundsCheck", "nilcheck", etc. + + /*Source defined: + * A human-readable string describing the source of this + * diagnostic, e.g. 'typescript' or 'super lint'. It usually + * appears in the user interface. + */ + Source string `json:"source,omitempty"` // "go compiler" + + /*Message defined: + * The diagnostic's message. It usually appears in the user interface + */ + Message string `json:"message"` // sometimes used, provides additional information. + + /*Tags defined: + * Additional metadata about the diagnostic. + */ + Tags []DiagnosticTag `json:"tags,omitempty"` // always empty for logging optimizations. + + /*RelatedInformation defined: + * An array of related diagnostic information, e.g. when symbol-names within + * a scope collide all definitions can be marked via this property. + */ + RelatedInformation []DiagnosticRelatedInformation `json:"relatedInformation,omitempty"` +} + +// A LoggedOpt is what the compiler produces and accumulates, +// to be converted to JSON for human or IDE consumption. +type LoggedOpt struct { + pos src.XPos // Source code position at which the event occurred. If it is inlined, outer and all inlined locations will appear in JSON. + lastPos src.XPos // Usually the same as pos; current exception is for reporting entire range of transformed loops + compilerPass string // Compiler pass. For human/adhoc consumption; does not appear in JSON (yet) + functionName string // Function name. For human/adhoc consumption; does not appear in JSON (yet) + what string // The (non) optimization; "nilcheck", "boundsCheck", "inline", "noInline" + target []interface{} // Optional target(s) or parameter(s) of "what" -- what was inlined, why it was not, size of copy, etc. 1st is most important/relevant. +} + +type logFormat uint8 + +const ( + None logFormat = iota + Json0 // version 0 for LSP 3.14, 3.15; future versions of LSP may change the format and the compiler may need to support both as clients are updated. +) + +var Format = None +var dest string + +// LogJsonOption parses and validates the version,directory value attached to the -json compiler flag. +func LogJsonOption(flagValue string) { + version, directory := parseLogFlag("json", flagValue) + if version != 0 { + log.Fatal("-json version must be 0") + } + dest = checkLogPath(directory) + Format = Json0 +} + +// parseLogFlag checks the flag passed to -json +// for version,destination format and returns the two parts. +func parseLogFlag(flag, value string) (version int, directory string) { + if Format != None { + log.Fatal("Cannot repeat -json flag") + } + commaAt := strings.Index(value, ",") + if commaAt <= 0 { + log.Fatalf("-%s option should be ',' where is a number", flag) + } + v, err := strconv.Atoi(value[:commaAt]) + if err != nil { + log.Fatalf("-%s option should be ',' where is a number: err=%v", flag, err) + } + version = v + directory = value[commaAt+1:] + return +} + +// isWindowsDriveURIPath returns true if the file URI is of the format used by +// Windows URIs. The url.Parse package does not specially handle Windows paths +// (see golang/go#6027), so we check if the URI path has a drive prefix (e.g. "/C:"). +// (copied from tools/internal/span/uri.go) +// this is less comprehensive that the processing in filepath.IsAbs on Windows. +func isWindowsDriveURIPath(uri string) bool { + if len(uri) < 4 { + return false + } + return uri[0] == '/' && unicode.IsLetter(rune(uri[1])) && uri[2] == ':' +} + +func parseLogPath(destination string) (string, string) { + if filepath.IsAbs(destination) { + return filepath.Clean(destination), "" + } + if strings.HasPrefix(destination, "file://") { // IKWIAD, or Windows C:\foo\bar\baz + uri, err := url.Parse(destination) + if err != nil { + return "", fmt.Sprintf("optimizer logging destination looked like file:// URI but failed to parse: err=%v", err) + } + destination = uri.Host + uri.Path + if isWindowsDriveURIPath(destination) { + // strip leading / from /C: + // unlike tools/internal/span/uri.go, do not uppercase the drive letter -- let filepath.Clean do what it does. + destination = destination[1:] + } + return filepath.Clean(destination), "" + } + return "", fmt.Sprintf("optimizer logging destination %s was neither %s-prefixed directory nor file://-prefixed file URI", destination, string(filepath.Separator)) +} + +// checkLogPath does superficial early checking of the string specifying +// the directory to which optimizer logging is directed, and if +// it passes the test, stores the string in LO_dir. +func checkLogPath(destination string) string { + path, complaint := parseLogPath(destination) + if complaint != "" { + log.Fatalf(complaint) + } + err := os.MkdirAll(path, 0755) + if err != nil { + log.Fatalf("optimizer logging destination ',' but could not create : err=%v", err) + } + return path +} + +var loggedOpts []*LoggedOpt +var mu = sync.Mutex{} // mu protects loggedOpts. + +// NewLoggedOpt allocates a new LoggedOpt, to later be passed to either NewLoggedOpt or LogOpt as "args". +// Pos is the source position (including inlining), what is the message, pass is which pass created the message, +// funcName is the name of the function +// A typical use for this to accumulate an explanation for a missed optimization, for example, why did something escape? +func NewLoggedOpt(pos, lastPos src.XPos, what, pass, funcName string, args ...interface{}) *LoggedOpt { + pass = strings.Replace(pass, " ", "_", -1) + return &LoggedOpt{pos, lastPos, pass, funcName, what, args} +} + +// LogOpt logs information about a (usually missed) optimization performed by the compiler. +// Pos is the source position (including inlining), what is the message, pass is which pass created the message, +// funcName is the name of the function. +func LogOpt(pos src.XPos, what, pass, funcName string, args ...interface{}) { + if Format == None { + return + } + lo := NewLoggedOpt(pos, pos, what, pass, funcName, args...) + mu.Lock() + defer mu.Unlock() + // Because of concurrent calls from back end, no telling what the order will be, but is stable-sorted by outer Pos before use. + loggedOpts = append(loggedOpts, lo) +} + +// LogOptRange is the same as LogOpt, but includes the ability to express a range of positions, +// not just a point. +func LogOptRange(pos, lastPos src.XPos, what, pass, funcName string, args ...interface{}) { + if Format == None { + return + } + lo := NewLoggedOpt(pos, lastPos, what, pass, funcName, args...) + mu.Lock() + defer mu.Unlock() + // Because of concurrent calls from back end, no telling what the order will be, but is stable-sorted by outer Pos before use. + loggedOpts = append(loggedOpts, lo) +} + +// Enabled returns whether optimization logging is enabled. +func Enabled() bool { + switch Format { + case None: + return false + case Json0: + return true + } + panic("Unexpected optimizer-logging level") +} + +// byPos sorts diagnostics by source position. +type byPos struct { + ctxt *obj.Link + a []*LoggedOpt +} + +func (x byPos) Len() int { return len(x.a) } +func (x byPos) Less(i, j int) bool { + return x.ctxt.OutermostPos(x.a[i].pos).Before(x.ctxt.OutermostPos(x.a[j].pos)) +} +func (x byPos) Swap(i, j int) { x.a[i], x.a[j] = x.a[j], x.a[i] } + +func writerForLSP(subdirpath, file string) io.WriteCloser { + basename := file + lastslash := strings.LastIndexAny(basename, "\\/") + if lastslash != -1 { + basename = basename[lastslash+1:] + } + lastdot := strings.LastIndex(basename, ".go") + if lastdot != -1 { + basename = basename[:lastdot] + } + basename = url.PathEscape(basename) + + // Assume a directory, make a file + p := filepath.Join(subdirpath, basename+".json") + w, err := os.Create(p) + if err != nil { + log.Fatalf("Could not create file %s for logging optimizer actions, %v", p, err) + } + return w +} + +func fixSlash(f string) string { + if os.PathSeparator == '/' { + return f + } + return strings.Replace(f, string(os.PathSeparator), "/", -1) +} + +func uriIfy(f string) DocumentURI { + url := url.URL{ + Scheme: "file", + Path: fixSlash(f), + } + return DocumentURI(url.String()) +} + +// Return filename, replacing a first occurrence of $GOROOT with the +// actual value of the GOROOT (because LSP does not speak "$GOROOT"). +func uprootedPath(filename string) string { + if filename == "" { + return "__unnamed__" + } + if buildcfg.GOROOT == "" || !strings.HasPrefix(filename, "$GOROOT/") { + return filename + } + return buildcfg.GOROOT + filename[len("$GOROOT"):] +} + +// FlushLoggedOpts flushes all the accumulated optimization log entries. +func FlushLoggedOpts(ctxt *obj.Link, slashPkgPath string) { + if Format == None { + return + } + + sort.Stable(byPos{ctxt, loggedOpts}) // Stable is necessary to preserve the per-function order, which is repeatable. + switch Format { + + case Json0: // LSP 3.15 + var posTmp, lastTmp []src.Pos + var encoder *json.Encoder + var w io.WriteCloser + + if slashPkgPath == "" { + slashPkgPath = "\000" + } + subdirpath := filepath.Join(dest, url.PathEscape(slashPkgPath)) + err := os.MkdirAll(subdirpath, 0755) + if err != nil { + log.Fatalf("Could not create directory %s for logging optimizer actions, %v", subdirpath, err) + } + diagnostic := Diagnostic{Source: "go compiler", Severity: SeverityInformation} + + // For LSP, make a subdirectory for the package, and for each file foo.go, create foo.json in that subdirectory. + currentFile := "" + for _, x := range loggedOpts { + posTmp, p0 := parsePos(ctxt, x.pos, posTmp) + lastTmp, l0 := parsePos(ctxt, x.lastPos, lastTmp) // These match posTmp/p0 except for most-inline, and that often also matches. + p0f := uprootedPath(p0.Filename()) + + if currentFile != p0f { + if w != nil { + w.Close() + } + currentFile = p0f + w = writerForLSP(subdirpath, currentFile) + encoder = json.NewEncoder(w) + encoder.Encode(VersionHeader{Version: 0, Package: slashPkgPath, Goos: buildcfg.GOOS, Goarch: buildcfg.GOARCH, GcVersion: buildcfg.Version, File: currentFile}) + } + + // The first "target" is the most important one. + var target string + if len(x.target) > 0 { + target = fmt.Sprint(x.target[0]) + } + + diagnostic.Code = x.what + diagnostic.Message = target + diagnostic.Range = newRange(p0, l0) + diagnostic.RelatedInformation = diagnostic.RelatedInformation[:0] + + appendInlinedPos(posTmp, lastTmp, &diagnostic) + + // Diagnostic explanation is stored in RelatedInformation after inlining info + if len(x.target) > 1 { + switch y := x.target[1].(type) { + case []*LoggedOpt: + for _, z := range y { + posTmp, p0 := parsePos(ctxt, z.pos, posTmp) + lastTmp, l0 := parsePos(ctxt, z.lastPos, lastTmp) + loc := newLocation(p0, l0) + msg := z.what + if len(z.target) > 0 { + msg = msg + ": " + fmt.Sprint(z.target[0]) + } + + diagnostic.RelatedInformation = append(diagnostic.RelatedInformation, DiagnosticRelatedInformation{Location: loc, Message: msg}) + appendInlinedPos(posTmp, lastTmp, &diagnostic) + } + } + } + + encoder.Encode(diagnostic) + } + if w != nil { + w.Close() + } + } +} + +// newRange returns a single-position Range for the compiler source location p. +func newRange(p, last src.Pos) Range { + return Range{Start: Position{p.Line(), p.Col()}, + End: Position{last.Line(), last.Col()}} +} + +// newLocation returns the Location for the compiler source location p. +func newLocation(p, last src.Pos) Location { + loc := Location{URI: uriIfy(uprootedPath(p.Filename())), Range: newRange(p, last)} + return loc +} + +// appendInlinedPos extracts inlining information from posTmp and append it to diagnostic. +func appendInlinedPos(posTmp, lastTmp []src.Pos, diagnostic *Diagnostic) { + for i := 1; i < len(posTmp); i++ { + loc := newLocation(posTmp[i], lastTmp[i]) + diagnostic.RelatedInformation = append(diagnostic.RelatedInformation, DiagnosticRelatedInformation{Location: loc, Message: "inlineLoc"}) + } +} + +// parsePos expands a src.XPos into a slice of src.Pos, with the outermost first. +// It returns the slice, and the outermost. +func parsePos(ctxt *obj.Link, pos src.XPos, posTmp []src.Pos) ([]src.Pos, src.Pos) { + posTmp = posTmp[:0] + ctxt.AllPos(pos, func(p src.Pos) { + posTmp = append(posTmp, p) + }) + return posTmp, posTmp[0] +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/logopt/logopt_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/logopt/logopt_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c7debd9897d09b11c2248df775d875550d261134 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/logopt/logopt_test.go @@ -0,0 +1,250 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package logopt + +import ( + "internal/testenv" + "os" + "path/filepath" + "runtime" + "strings" + "testing" +) + +const srcCode = `package x +type pair struct {a,b int} +func bar(y *pair) *int { + return &y.b +} +var a []int +func foo(w, z *pair) *int { + if *bar(w) > 0 { + return bar(z) + } + if a[1] > 0 { + a = a[:2] + } + return &a[0] +} + +// address taking prevents closure inlining +func n() int { + foo := func() int { return 1 } + bar := &foo + x := (*bar)() + foo() + return x +} +` + +func want(t *testing.T, out string, desired string) { + // On Windows, Unicode escapes in the JSON output end up "normalized" elsewhere to /u...., + // so "normalize" what we're looking for to match that. + s := strings.ReplaceAll(desired, string(os.PathSeparator), "/") + if !strings.Contains(out, s) { + t.Errorf("did not see phrase %s in \n%s", s, out) + } +} + +func wantN(t *testing.T, out string, desired string, n int) { + if strings.Count(out, desired) != n { + t.Errorf("expected exactly %d occurrences of %s in \n%s", n, desired, out) + } +} + +func TestPathStuff(t *testing.T) { + sep := string(filepath.Separator) + if path, whine := parseLogPath("file:///c:foo"); path != "c:foo" || whine != "" { // good path + t.Errorf("path='%s', whine='%s'", path, whine) + } + if path, whine := parseLogPath("file:///foo"); path != sep+"foo" || whine != "" { // good path + t.Errorf("path='%s', whine='%s'", path, whine) + } + if path, whine := parseLogPath("foo"); path != "" || whine == "" { // BAD path + t.Errorf("path='%s', whine='%s'", path, whine) + } + if sep == "\\" { // On WINDOWS ONLY + if path, whine := parseLogPath("C:/foo"); path != "C:\\foo" || whine != "" { // good path + t.Errorf("path='%s', whine='%s'", path, whine) + } + if path, whine := parseLogPath("c:foo"); path != "" || whine == "" { // BAD path + t.Errorf("path='%s', whine='%s'", path, whine) + } + if path, whine := parseLogPath("/foo"); path != "" || whine == "" { // BAD path + t.Errorf("path='%s', whine='%s'", path, whine) + } + } else { // ON UNIX ONLY + if path, whine := parseLogPath("/foo"); path != sep+"foo" || whine != "" { // good path + t.Errorf("path='%s', whine='%s'", path, whine) + } + } +} + +func TestLogOpt(t *testing.T) { + t.Parallel() + + testenv.MustHaveGoBuild(t) + + dir := fixSlash(t.TempDir()) // Normalize the directory name as much as possible, for Windows testing + src := filepath.Join(dir, "file.go") + if err := os.WriteFile(src, []byte(srcCode), 0644); err != nil { + t.Fatal(err) + } + + outfile := filepath.Join(dir, "file.o") + + t.Run("JSON_fails", func(t *testing.T) { + // Test malformed flag + out, err := testLogOpt(t, "-json=foo", src, outfile) + if err == nil { + t.Error("-json=foo succeeded unexpectedly") + } + want(t, out, "option should be") + want(t, out, "number") + + // Test a version number that is currently unsupported (and should remain unsupported for a while) + out, err = testLogOpt(t, "-json=9,foo", src, outfile) + if err == nil { + t.Error("-json=0,foo succeeded unexpectedly") + } + want(t, out, "version must be") + + }) + + // replace d (dir) with t ("tmpdir") and convert path separators to '/' + normalize := func(out []byte, d, t string) string { + s := string(out) + s = strings.ReplaceAll(s, d, t) + s = strings.ReplaceAll(s, string(os.PathSeparator), "/") + return s + } + + // Ensure that <128 byte copies are not reported and that 128-byte copies are. + // Check at both 1 and 8-byte alignments. + t.Run("Copy", func(t *testing.T) { + const copyCode = `package x +func s128a1(x *[128]int8) [128]int8 { + return *x +} +func s127a1(x *[127]int8) [127]int8 { + return *x +} +func s16a8(x *[16]int64) [16]int64 { + return *x +} +func s15a8(x *[15]int64) [15]int64 { + return *x +} +` + copy := filepath.Join(dir, "copy.go") + if err := os.WriteFile(copy, []byte(copyCode), 0644); err != nil { + t.Fatal(err) + } + outcopy := filepath.Join(dir, "copy.o") + + // On not-amd64, test the host architecture and os + arches := []string{runtime.GOARCH} + goos0 := runtime.GOOS + if runtime.GOARCH == "amd64" { // Test many things with "linux" (wasm will get "js") + arches = []string{"arm", "arm64", "386", "amd64", "mips", "mips64", "loong64", "ppc64le", "riscv64", "s390x", "wasm"} + goos0 = "linux" + } + + for _, arch := range arches { + t.Run(arch, func(t *testing.T) { + goos := goos0 + if arch == "wasm" { + goos = "js" + } + _, err := testCopy(t, dir, arch, goos, copy, outcopy) + if err != nil { + t.Error("-json=0,file://log/opt should have succeeded") + } + logged, err := os.ReadFile(filepath.Join(dir, "log", "opt", "x", "copy.json")) + if err != nil { + t.Error("-json=0,file://log/opt missing expected log file") + } + slogged := normalize(logged, string(uriIfy(dir)), string(uriIfy("tmpdir"))) + t.Logf("%s", slogged) + want(t, slogged, `{"range":{"start":{"line":3,"character":2},"end":{"line":3,"character":2}},"severity":3,"code":"copy","source":"go compiler","message":"128 bytes"}`) + want(t, slogged, `{"range":{"start":{"line":9,"character":2},"end":{"line":9,"character":2}},"severity":3,"code":"copy","source":"go compiler","message":"128 bytes"}`) + wantN(t, slogged, `"code":"copy"`, 2) + }) + } + }) + + // Some architectures don't fault on nil dereference, so nilchecks are eliminated differently. + // The N-way copy test also doesn't need to run N-ways N times. + if runtime.GOARCH != "amd64" { + return + } + + t.Run("Success", func(t *testing.T) { + // This test is supposed to succeed + + // Note 'file://' is the I-Know-What-I-Am-Doing way of specifying a file, also to deal with corner cases for Windows. + _, err := testLogOptDir(t, dir, "-json=0,file://log/opt", src, outfile) + if err != nil { + t.Error("-json=0,file://log/opt should have succeeded") + } + logged, err := os.ReadFile(filepath.Join(dir, "log", "opt", "x", "file.json")) + if err != nil { + t.Error("-json=0,file://log/opt missing expected log file") + } + // All this delicacy with uriIfy and filepath.Join is to get this test to work right on Windows. + slogged := normalize(logged, string(uriIfy(dir)), string(uriIfy("tmpdir"))) + t.Logf("%s", slogged) + // below shows proper nilcheck + want(t, slogged, `{"range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}},"severity":3,"code":"nilcheck","source":"go compiler","message":"",`+ + `"relatedInformation":[{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":11},"end":{"line":4,"character":11}}},"message":"inlineLoc"}]}`) + want(t, slogged, `{"range":{"start":{"line":11,"character":6},"end":{"line":11,"character":6}},"severity":3,"code":"isInBounds","source":"go compiler","message":""}`) + want(t, slogged, `{"range":{"start":{"line":7,"character":6},"end":{"line":7,"character":6}},"severity":3,"code":"canInlineFunction","source":"go compiler","message":"cost: 35"}`) + // escape analysis explanation + want(t, slogged, `{"range":{"start":{"line":7,"character":13},"end":{"line":7,"character":13}},"severity":3,"code":"leak","source":"go compiler","message":"parameter z leaks to ~r0 with derefs=0",`+ + `"relatedInformation":[`+ + `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: flow: y = z:"},`+ + `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from y := z (assign-pair)"},`+ + `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: flow: ~r0 = y:"},`+ + `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":11},"end":{"line":4,"character":11}}},"message":"inlineLoc"},`+ + `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from y.b (dot of pointer)"},`+ + `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":11},"end":{"line":4,"character":11}}},"message":"inlineLoc"},`+ + `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from \u0026y.b (address-of)"},`+ + `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":9},"end":{"line":4,"character":9}}},"message":"inlineLoc"},`+ + `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from ~r0 = \u0026y.b (assign-pair)"},`+ + `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: flow: ~r0 = ~r0:"},`+ + `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: from return ~r0 (return)"}]}`) + }) +} + +func testLogOpt(t *testing.T, flag, src, outfile string) (string, error) { + run := []string{testenv.GoToolPath(t), "tool", "compile", "-p=p", flag, "-o", outfile, src} + t.Log(run) + cmd := testenv.Command(t, run[0], run[1:]...) + out, err := cmd.CombinedOutput() + t.Logf("%s", out) + return string(out), err +} + +func testLogOptDir(t *testing.T, dir, flag, src, outfile string) (string, error) { + // Notice the specified import path "x" + run := []string{testenv.GoToolPath(t), "tool", "compile", "-p=x", flag, "-o", outfile, src} + t.Log(run) + cmd := testenv.Command(t, run[0], run[1:]...) + cmd.Dir = dir + out, err := cmd.CombinedOutput() + t.Logf("%s", out) + return string(out), err +} + +func testCopy(t *testing.T, dir, goarch, goos, src, outfile string) (string, error) { + // Notice the specified import path "x" + run := []string{testenv.GoToolPath(t), "tool", "compile", "-p=x", "-json=0,file://log/opt", "-o", outfile, src} + t.Log(run) + cmd := testenv.Command(t, run[0], run[1:]...) + cmd.Dir = dir + cmd.Env = append(os.Environ(), "GOARCH="+goarch, "GOOS="+goos) + out, err := cmd.CombinedOutput() + t.Logf("%s", out) + return string(out), err +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/loong64/galign.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loong64/galign.go new file mode 100644 index 0000000000000000000000000000000000000000..a613165054429f995b569f049880fd5618611a09 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loong64/galign.go @@ -0,0 +1,25 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package loong64 + +import ( + "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" + "cmd/internal/obj/loong64" +) + +func Init(arch *ssagen.ArchInfo) { + arch.LinkArch = &loong64.Linkloong64 + arch.REGSP = loong64.REGSP + arch.MAXWIDTH = 1 << 50 + arch.ZeroRange = zerorange + arch.Ginsnop = ginsnop + + arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {} + arch.SSAGenValue = ssaGenValue + arch.SSAGenBlock = ssaGenBlock + arch.LoadRegResult = loadRegResult + arch.SpillArgReg = spillArgReg +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/loong64/ggen.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loong64/ggen.go new file mode 100644 index 0000000000000000000000000000000000000000..27d318a8bb92bdc01663617b50f0a6132a6aa81d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loong64/ggen.go @@ -0,0 +1,60 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package loong64 + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/objw" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/obj/loong64" +) + +func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { + if cnt == 0 { + return p + } + + // Adjust the frame to account for LR. + off += base.Ctxt.Arch.FixedFrameSize + + if cnt < int64(4*types.PtrSize) { + for i := int64(0); i < cnt; i += int64(types.PtrSize) { + p = pp.Append(p, loong64.AMOVV, obj.TYPE_REG, loong64.REGZERO, 0, obj.TYPE_MEM, loong64.REGSP, off+i) + } + } else if cnt <= int64(128*types.PtrSize) { + p = pp.Append(p, loong64.AADDV, obj.TYPE_CONST, 0, off, obj.TYPE_REG, loong64.REGRT1, 0) + p.Reg = loong64.REGSP + p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ir.Syms.Duffzero + p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize)) + } else { + // ADDV $(off), SP, r1 + // ADDV $cnt, r1, r2 + // loop: + // MOVV R0, (r1) + // ADDV $Widthptr, r1 + // BNE r1, r2, loop + p = pp.Append(p, loong64.AADDV, obj.TYPE_CONST, 0, off, obj.TYPE_REG, loong64.REGRT1, 0) + p.Reg = loong64.REGSP + p = pp.Append(p, loong64.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, loong64.REGRT2, 0) + p.Reg = loong64.REGRT1 + p = pp.Append(p, loong64.AMOVV, obj.TYPE_REG, loong64.REGZERO, 0, obj.TYPE_MEM, loong64.REGRT1, 0) + loop := p + p = pp.Append(p, loong64.AADDV, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, loong64.REGRT1, 0) + p = pp.Append(p, loong64.ABNE, obj.TYPE_REG, loong64.REGRT1, 0, obj.TYPE_BRANCH, 0, 0) + p.Reg = loong64.REGRT2 + p.To.SetTarget(loop) + } + + return p +} + +func ginsnop(pp *objw.Progs) *obj.Prog { + p := pp.Prog(loong64.ANOOP) + return p +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/loong64/ssa.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loong64/ssa.go new file mode 100644 index 0000000000000000000000000000000000000000..e7298bdb9fa57011a837b3bbf4e815c184563e6b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loong64/ssa.go @@ -0,0 +1,830 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package loong64 + +import ( + "math" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/logopt" + "cmd/compile/internal/objw" + "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/obj/loong64" +) + +// isFPreg reports whether r is an FP register. +func isFPreg(r int16) bool { + return loong64.REG_F0 <= r && r <= loong64.REG_F31 +} + +// loadByType returns the load instruction of the given type. +func loadByType(t *types.Type, r int16) obj.As { + if isFPreg(r) { + if t.Size() == 4 { + return loong64.AMOVF + } else { + return loong64.AMOVD + } + } else { + switch t.Size() { + case 1: + if t.IsSigned() { + return loong64.AMOVB + } else { + return loong64.AMOVBU + } + case 2: + if t.IsSigned() { + return loong64.AMOVH + } else { + return loong64.AMOVHU + } + case 4: + if t.IsSigned() { + return loong64.AMOVW + } else { + return loong64.AMOVWU + } + case 8: + return loong64.AMOVV + } + } + panic("bad load type") +} + +// storeByType returns the store instruction of the given type. +func storeByType(t *types.Type, r int16) obj.As { + if isFPreg(r) { + if t.Size() == 4 { + return loong64.AMOVF + } else { + return loong64.AMOVD + } + } else { + switch t.Size() { + case 1: + return loong64.AMOVB + case 2: + return loong64.AMOVH + case 4: + return loong64.AMOVW + case 8: + return loong64.AMOVV + } + } + panic("bad store type") +} + +// largestMove returns the largest move instruction possible and its size, +// given the alignment of the total size of the move. +// +// e.g., a 16-byte move may use MOVV, but an 11-byte move must use MOVB. +// +// Note that the moves may not be on naturally aligned addresses depending on +// the source and destination. +// +// This matches the calculation in ssa.moveSize. +func largestMove(alignment int64) (obj.As, int64) { + switch { + case alignment%8 == 0: + return loong64.AMOVV, 8 + case alignment%4 == 0: + return loong64.AMOVW, 4 + case alignment%2 == 0: + return loong64.AMOVH, 2 + default: + return loong64.AMOVB, 1 + } +} + +func ssaGenValue(s *ssagen.State, v *ssa.Value) { + switch v.Op { + case ssa.OpCopy, ssa.OpLOONG64MOVVreg: + if v.Type.IsMemory() { + return + } + x := v.Args[0].Reg() + y := v.Reg() + if x == y { + return + } + as := loong64.AMOVV + if isFPreg(x) && isFPreg(y) { + as = loong64.AMOVD + } + p := s.Prog(as) + p.From.Type = obj.TYPE_REG + p.From.Reg = x + p.To.Type = obj.TYPE_REG + p.To.Reg = y + case ssa.OpLOONG64MOVVnop: + // nothing to do + case ssa.OpLoadReg: + if v.Type.IsFlags() { + v.Fatalf("load flags not implemented: %v", v.LongString()) + return + } + r := v.Reg() + p := s.Prog(loadByType(v.Type, r)) + ssagen.AddrAuto(&p.From, v.Args[0]) + p.To.Type = obj.TYPE_REG + p.To.Reg = r + case ssa.OpStoreReg: + if v.Type.IsFlags() { + v.Fatalf("store flags not implemented: %v", v.LongString()) + return + } + r := v.Args[0].Reg() + p := s.Prog(storeByType(v.Type, r)) + p.From.Type = obj.TYPE_REG + p.From.Reg = r + ssagen.AddrAuto(&p.To, v) + case ssa.OpArgIntReg, ssa.OpArgFloatReg: + // The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill + // The loop only runs once. + for _, a := range v.Block.Func.RegArgs { + // Pass the spill/unspill information along to the assembler, offset by size of + // the saved LR slot. + addr := ssagen.SpillSlotAddr(a, loong64.REGSP, base.Ctxt.Arch.FixedFrameSize) + s.FuncInfo().AddSpill( + obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type, a.Reg), Spill: storeByType(a.Type, a.Reg)}) + } + v.Block.Func.RegArgs = nil + ssagen.CheckArgReg(v) + case ssa.OpLOONG64ADDV, + ssa.OpLOONG64SUBV, + ssa.OpLOONG64AND, + ssa.OpLOONG64OR, + ssa.OpLOONG64XOR, + ssa.OpLOONG64NOR, + ssa.OpLOONG64SLLV, + ssa.OpLOONG64SRLV, + ssa.OpLOONG64SRAV, + ssa.OpLOONG64ROTR, + ssa.OpLOONG64ROTRV, + ssa.OpLOONG64ADDF, + ssa.OpLOONG64ADDD, + ssa.OpLOONG64SUBF, + ssa.OpLOONG64SUBD, + ssa.OpLOONG64MULF, + ssa.OpLOONG64MULD, + ssa.OpLOONG64DIVF, + ssa.OpLOONG64DIVD, + ssa.OpLOONG64MULV, ssa.OpLOONG64MULHV, ssa.OpLOONG64MULHVU, + ssa.OpLOONG64DIVV, ssa.OpLOONG64REMV, ssa.OpLOONG64DIVVU, ssa.OpLOONG64REMVU: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpLOONG64SGT, + ssa.OpLOONG64SGTU: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpLOONG64ADDVconst, + ssa.OpLOONG64SUBVconst, + ssa.OpLOONG64ANDconst, + ssa.OpLOONG64ORconst, + ssa.OpLOONG64XORconst, + ssa.OpLOONG64NORconst, + ssa.OpLOONG64SLLVconst, + ssa.OpLOONG64SRLVconst, + ssa.OpLOONG64SRAVconst, + ssa.OpLOONG64ROTRconst, + ssa.OpLOONG64ROTRVconst, + ssa.OpLOONG64SGTconst, + ssa.OpLOONG64SGTUconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpLOONG64MOVVconst: + r := v.Reg() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = r + if isFPreg(r) { + // cannot move into FP or special registers, use TMP as intermediate + p.To.Reg = loong64.REGTMP + p = s.Prog(loong64.AMOVV) + p.From.Type = obj.TYPE_REG + p.From.Reg = loong64.REGTMP + p.To.Type = obj.TYPE_REG + p.To.Reg = r + } + case ssa.OpLOONG64MOVFconst, + ssa.OpLOONG64MOVDconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_FCONST + p.From.Val = math.Float64frombits(uint64(v.AuxInt)) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpLOONG64CMPEQF, + ssa.OpLOONG64CMPEQD, + ssa.OpLOONG64CMPGEF, + ssa.OpLOONG64CMPGED, + ssa.OpLOONG64CMPGTF, + ssa.OpLOONG64CMPGTD: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.Reg = v.Args[1].Reg() + case ssa.OpLOONG64MOVVaddr: + p := s.Prog(loong64.AMOVV) + p.From.Type = obj.TYPE_ADDR + p.From.Reg = v.Args[0].Reg() + var wantreg string + // MOVV $sym+off(base), R + // the assembler expands it as the following: + // - base is SP: add constant offset to SP (R3) + // when constant is large, tmp register (R30) may be used + // - base is SB: load external address with relocation + switch v.Aux.(type) { + default: + v.Fatalf("aux is of unknown type %T", v.Aux) + case *obj.LSym: + wantreg = "SB" + ssagen.AddAux(&p.From, v) + case *ir.Name: + wantreg = "SP" + ssagen.AddAux(&p.From, v) + case nil: + // No sym, just MOVV $off(SP), R + wantreg = "SP" + p.From.Offset = v.AuxInt + } + if reg := v.Args[0].RegName(); reg != wantreg { + v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg) + } + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpLOONG64MOVBload, + ssa.OpLOONG64MOVBUload, + ssa.OpLOONG64MOVHload, + ssa.OpLOONG64MOVHUload, + ssa.OpLOONG64MOVWload, + ssa.OpLOONG64MOVWUload, + ssa.OpLOONG64MOVVload, + ssa.OpLOONG64MOVFload, + ssa.OpLOONG64MOVDload: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpLOONG64MOVBstore, + ssa.OpLOONG64MOVHstore, + ssa.OpLOONG64MOVWstore, + ssa.OpLOONG64MOVVstore, + ssa.OpLOONG64MOVFstore, + ssa.OpLOONG64MOVDstore: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + case ssa.OpLOONG64MOVBstorezero, + ssa.OpLOONG64MOVHstorezero, + ssa.OpLOONG64MOVWstorezero, + ssa.OpLOONG64MOVVstorezero: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = loong64.REGZERO + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + case ssa.OpLOONG64MOVBreg, + ssa.OpLOONG64MOVBUreg, + ssa.OpLOONG64MOVHreg, + ssa.OpLOONG64MOVHUreg, + ssa.OpLOONG64MOVWreg, + ssa.OpLOONG64MOVWUreg: + a := v.Args[0] + for a.Op == ssa.OpCopy || a.Op == ssa.OpLOONG64MOVVreg { + a = a.Args[0] + } + if a.Op == ssa.OpLoadReg && loong64.REG_R0 <= a.Reg() && a.Reg() <= loong64.REG_R31 { + // LoadReg from a narrower type does an extension, except loading + // to a floating point register. So only eliminate the extension + // if it is loaded to an integer register. + + t := a.Type + switch { + case v.Op == ssa.OpLOONG64MOVBreg && t.Size() == 1 && t.IsSigned(), + v.Op == ssa.OpLOONG64MOVBUreg && t.Size() == 1 && !t.IsSigned(), + v.Op == ssa.OpLOONG64MOVHreg && t.Size() == 2 && t.IsSigned(), + v.Op == ssa.OpLOONG64MOVHUreg && t.Size() == 2 && !t.IsSigned(), + v.Op == ssa.OpLOONG64MOVWreg && t.Size() == 4 && t.IsSigned(), + v.Op == ssa.OpLOONG64MOVWUreg && t.Size() == 4 && !t.IsSigned(): + // arg is a proper-typed load, already zero/sign-extended, don't extend again + if v.Reg() == v.Args[0].Reg() { + return + } + p := s.Prog(loong64.AMOVV) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + return + default: + } + } + fallthrough + case ssa.OpLOONG64MOVWF, + ssa.OpLOONG64MOVWD, + ssa.OpLOONG64TRUNCFW, + ssa.OpLOONG64TRUNCDW, + ssa.OpLOONG64MOVVF, + ssa.OpLOONG64MOVVD, + ssa.OpLOONG64TRUNCFV, + ssa.OpLOONG64TRUNCDV, + ssa.OpLOONG64MOVFD, + ssa.OpLOONG64MOVDF, + ssa.OpLOONG64NEGF, + ssa.OpLOONG64NEGD, + ssa.OpLOONG64SQRTD, + ssa.OpLOONG64SQRTF: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpLOONG64NEGV: + // SUB from REGZERO + p := s.Prog(loong64.ASUBVU) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.Reg = loong64.REGZERO + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpLOONG64DUFFZERO: + // runtime.duffzero expects start address in R20 + p := s.Prog(obj.ADUFFZERO) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ir.Syms.Duffzero + p.To.Offset = v.AuxInt + case ssa.OpLOONG64LoweredZero: + // MOVx R0, (Rarg0) + // ADDV $sz, Rarg0 + // BGEU Rarg1, Rarg0, -2(PC) + mov, sz := largestMove(v.AuxInt) + p := s.Prog(mov) + p.From.Type = obj.TYPE_REG + p.From.Reg = loong64.REGZERO + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + + p2 := s.Prog(loong64.AADDVU) + p2.From.Type = obj.TYPE_CONST + p2.From.Offset = sz + p2.To.Type = obj.TYPE_REG + p2.To.Reg = v.Args[0].Reg() + + p3 := s.Prog(loong64.ABGEU) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = v.Args[1].Reg() + p3.Reg = v.Args[0].Reg() + p3.To.Type = obj.TYPE_BRANCH + p3.To.SetTarget(p) + + case ssa.OpLOONG64DUFFCOPY: + p := s.Prog(obj.ADUFFCOPY) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ir.Syms.Duffcopy + p.To.Offset = v.AuxInt + case ssa.OpLOONG64LoweredMove: + // MOVx (Rarg1), Rtmp + // MOVx Rtmp, (Rarg0) + // ADDV $sz, Rarg1 + // ADDV $sz, Rarg0 + // BGEU Rarg2, Rarg0, -4(PC) + mov, sz := largestMove(v.AuxInt) + p := s.Prog(mov) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = loong64.REGTMP + + p2 := s.Prog(mov) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = loong64.REGTMP + p2.To.Type = obj.TYPE_MEM + p2.To.Reg = v.Args[0].Reg() + + p3 := s.Prog(loong64.AADDVU) + p3.From.Type = obj.TYPE_CONST + p3.From.Offset = sz + p3.To.Type = obj.TYPE_REG + p3.To.Reg = v.Args[1].Reg() + + p4 := s.Prog(loong64.AADDVU) + p4.From.Type = obj.TYPE_CONST + p4.From.Offset = sz + p4.To.Type = obj.TYPE_REG + p4.To.Reg = v.Args[0].Reg() + + p5 := s.Prog(loong64.ABGEU) + p5.From.Type = obj.TYPE_REG + p5.From.Reg = v.Args[2].Reg() + p5.Reg = v.Args[1].Reg() + p5.To.Type = obj.TYPE_BRANCH + p5.To.SetTarget(p) + + case ssa.OpLOONG64CALLstatic, ssa.OpLOONG64CALLclosure, ssa.OpLOONG64CALLinter: + s.Call(v) + case ssa.OpLOONG64CALLtail: + s.TailCall(v) + case ssa.OpLOONG64LoweredWB: + p := s.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + // AuxInt encodes how many buffer entries we need. + p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1] + case ssa.OpLOONG64LoweredPanicBoundsA, ssa.OpLOONG64LoweredPanicBoundsB, ssa.OpLOONG64LoweredPanicBoundsC: + p := s.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt] + s.UseArgs(16) // space used in callee args area by assembly stubs + case ssa.OpLOONG64LoweredAtomicLoad8, ssa.OpLOONG64LoweredAtomicLoad32, ssa.OpLOONG64LoweredAtomicLoad64: + as := loong64.AMOVV + switch v.Op { + case ssa.OpLOONG64LoweredAtomicLoad8: + as = loong64.AMOVB + case ssa.OpLOONG64LoweredAtomicLoad32: + as = loong64.AMOVW + } + s.Prog(loong64.ADBAR) + p := s.Prog(as) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + s.Prog(loong64.ADBAR) + case ssa.OpLOONG64LoweredAtomicStore8, ssa.OpLOONG64LoweredAtomicStore32, ssa.OpLOONG64LoweredAtomicStore64: + as := loong64.AMOVV + switch v.Op { + case ssa.OpLOONG64LoweredAtomicStore8: + as = loong64.AMOVB + case ssa.OpLOONG64LoweredAtomicStore32: + as = loong64.AMOVW + } + s.Prog(loong64.ADBAR) + p := s.Prog(as) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + s.Prog(loong64.ADBAR) + case ssa.OpLOONG64LoweredAtomicStorezero32, ssa.OpLOONG64LoweredAtomicStorezero64: + as := loong64.AMOVV + if v.Op == ssa.OpLOONG64LoweredAtomicStorezero32 { + as = loong64.AMOVW + } + s.Prog(loong64.ADBAR) + p := s.Prog(as) + p.From.Type = obj.TYPE_REG + p.From.Reg = loong64.REGZERO + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + s.Prog(loong64.ADBAR) + case ssa.OpLOONG64LoweredAtomicExchange32, ssa.OpLOONG64LoweredAtomicExchange64: + // DBAR + // MOVV Rarg1, Rtmp + // LL (Rarg0), Rout + // SC Rtmp, (Rarg0) + // BEQ Rtmp, -3(PC) + // DBAR + ll := loong64.ALLV + sc := loong64.ASCV + if v.Op == ssa.OpLOONG64LoweredAtomicExchange32 { + ll = loong64.ALL + sc = loong64.ASC + } + s.Prog(loong64.ADBAR) + p := s.Prog(loong64.AMOVV) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = loong64.REGTMP + p1 := s.Prog(ll) + p1.From.Type = obj.TYPE_MEM + p1.From.Reg = v.Args[0].Reg() + p1.To.Type = obj.TYPE_REG + p1.To.Reg = v.Reg0() + p2 := s.Prog(sc) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = loong64.REGTMP + p2.To.Type = obj.TYPE_MEM + p2.To.Reg = v.Args[0].Reg() + p3 := s.Prog(loong64.ABEQ) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = loong64.REGTMP + p3.To.Type = obj.TYPE_BRANCH + p3.To.SetTarget(p) + s.Prog(loong64.ADBAR) + case ssa.OpLOONG64LoweredAtomicAdd32, ssa.OpLOONG64LoweredAtomicAdd64: + // DBAR + // LL (Rarg0), Rout + // ADDV Rarg1, Rout, Rtmp + // SC Rtmp, (Rarg0) + // BEQ Rtmp, -3(PC) + // DBAR + // ADDV Rarg1, Rout + ll := loong64.ALLV + sc := loong64.ASCV + if v.Op == ssa.OpLOONG64LoweredAtomicAdd32 { + ll = loong64.ALL + sc = loong64.ASC + } + s.Prog(loong64.ADBAR) + p := s.Prog(ll) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + p1 := s.Prog(loong64.AADDVU) + p1.From.Type = obj.TYPE_REG + p1.From.Reg = v.Args[1].Reg() + p1.Reg = v.Reg0() + p1.To.Type = obj.TYPE_REG + p1.To.Reg = loong64.REGTMP + p2 := s.Prog(sc) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = loong64.REGTMP + p2.To.Type = obj.TYPE_MEM + p2.To.Reg = v.Args[0].Reg() + p3 := s.Prog(loong64.ABEQ) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = loong64.REGTMP + p3.To.Type = obj.TYPE_BRANCH + p3.To.SetTarget(p) + s.Prog(loong64.ADBAR) + p4 := s.Prog(loong64.AADDVU) + p4.From.Type = obj.TYPE_REG + p4.From.Reg = v.Args[1].Reg() + p4.Reg = v.Reg0() + p4.To.Type = obj.TYPE_REG + p4.To.Reg = v.Reg0() + case ssa.OpLOONG64LoweredAtomicAddconst32, ssa.OpLOONG64LoweredAtomicAddconst64: + // DBAR + // LL (Rarg0), Rout + // ADDV $auxint, Rout, Rtmp + // SC Rtmp, (Rarg0) + // BEQ Rtmp, -3(PC) + // DBAR + // ADDV $auxint, Rout + ll := loong64.ALLV + sc := loong64.ASCV + if v.Op == ssa.OpLOONG64LoweredAtomicAddconst32 { + ll = loong64.ALL + sc = loong64.ASC + } + s.Prog(loong64.ADBAR) + p := s.Prog(ll) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + p1 := s.Prog(loong64.AADDVU) + p1.From.Type = obj.TYPE_CONST + p1.From.Offset = v.AuxInt + p1.Reg = v.Reg0() + p1.To.Type = obj.TYPE_REG + p1.To.Reg = loong64.REGTMP + p2 := s.Prog(sc) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = loong64.REGTMP + p2.To.Type = obj.TYPE_MEM + p2.To.Reg = v.Args[0].Reg() + p3 := s.Prog(loong64.ABEQ) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = loong64.REGTMP + p3.To.Type = obj.TYPE_BRANCH + p3.To.SetTarget(p) + s.Prog(loong64.ADBAR) + p4 := s.Prog(loong64.AADDVU) + p4.From.Type = obj.TYPE_CONST + p4.From.Offset = v.AuxInt + p4.Reg = v.Reg0() + p4.To.Type = obj.TYPE_REG + p4.To.Reg = v.Reg0() + case ssa.OpLOONG64LoweredAtomicCas32, ssa.OpLOONG64LoweredAtomicCas64: + // MOVV $0, Rout + // DBAR + // LL (Rarg0), Rtmp + // BNE Rtmp, Rarg1, 4(PC) + // MOVV Rarg2, Rout + // SC Rout, (Rarg0) + // BEQ Rout, -4(PC) + // DBAR + ll := loong64.ALLV + sc := loong64.ASCV + if v.Op == ssa.OpLOONG64LoweredAtomicCas32 { + ll = loong64.ALL + sc = loong64.ASC + } + p := s.Prog(loong64.AMOVV) + p.From.Type = obj.TYPE_REG + p.From.Reg = loong64.REGZERO + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + s.Prog(loong64.ADBAR) + p1 := s.Prog(ll) + p1.From.Type = obj.TYPE_MEM + p1.From.Reg = v.Args[0].Reg() + p1.To.Type = obj.TYPE_REG + p1.To.Reg = loong64.REGTMP + p2 := s.Prog(loong64.ABNE) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = v.Args[1].Reg() + p2.Reg = loong64.REGTMP + p2.To.Type = obj.TYPE_BRANCH + p3 := s.Prog(loong64.AMOVV) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = v.Args[2].Reg() + p3.To.Type = obj.TYPE_REG + p3.To.Reg = v.Reg0() + p4 := s.Prog(sc) + p4.From.Type = obj.TYPE_REG + p4.From.Reg = v.Reg0() + p4.To.Type = obj.TYPE_MEM + p4.To.Reg = v.Args[0].Reg() + p5 := s.Prog(loong64.ABEQ) + p5.From.Type = obj.TYPE_REG + p5.From.Reg = v.Reg0() + p5.To.Type = obj.TYPE_BRANCH + p5.To.SetTarget(p1) + p6 := s.Prog(loong64.ADBAR) + p2.To.SetTarget(p6) + case ssa.OpLOONG64LoweredNilCheck: + // Issue a load which will fault if arg is nil. + p := s.Prog(loong64.AMOVB) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = loong64.REGTMP + if logopt.Enabled() { + logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) + } + if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers + base.WarnfAt(v.Pos, "generated nil check") + } + case ssa.OpLOONG64FPFlagTrue, + ssa.OpLOONG64FPFlagFalse: + // MOVV $0, r + // BFPF 2(PC) + // MOVV $1, r + branch := loong64.ABFPF + if v.Op == ssa.OpLOONG64FPFlagFalse { + branch = loong64.ABFPT + } + p := s.Prog(loong64.AMOVV) + p.From.Type = obj.TYPE_REG + p.From.Reg = loong64.REGZERO + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + p2 := s.Prog(branch) + p2.To.Type = obj.TYPE_BRANCH + p3 := s.Prog(loong64.AMOVV) + p3.From.Type = obj.TYPE_CONST + p3.From.Offset = 1 + p3.To.Type = obj.TYPE_REG + p3.To.Reg = v.Reg() + p4 := s.Prog(obj.ANOP) // not a machine instruction, for branch to land + p2.To.SetTarget(p4) + case ssa.OpLOONG64LoweredGetClosurePtr: + // Closure pointer is R22 (loong64.REGCTXT). + ssagen.CheckLoweredGetClosurePtr(v) + case ssa.OpLOONG64LoweredGetCallerSP: + // caller's SP is FixedFrameSize below the address of the first arg + p := s.Prog(loong64.AMOVV) + p.From.Type = obj.TYPE_ADDR + p.From.Offset = -base.Ctxt.Arch.FixedFrameSize + p.From.Name = obj.NAME_PARAM + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpLOONG64LoweredGetCallerPC: + p := s.Prog(obj.AGETCALLERPC) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpLOONG64MASKEQZ, ssa.OpLOONG64MASKNEZ: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpClobber, ssa.OpClobberReg: + // TODO: implement for clobberdead experiment. Nop is ok for now. + default: + v.Fatalf("genValue not implemented: %s", v.LongString()) + } +} + +var blockJump = map[ssa.BlockKind]struct { + asm, invasm obj.As +}{ + ssa.BlockLOONG64EQ: {loong64.ABEQ, loong64.ABNE}, + ssa.BlockLOONG64NE: {loong64.ABNE, loong64.ABEQ}, + ssa.BlockLOONG64LTZ: {loong64.ABLTZ, loong64.ABGEZ}, + ssa.BlockLOONG64GEZ: {loong64.ABGEZ, loong64.ABLTZ}, + ssa.BlockLOONG64LEZ: {loong64.ABLEZ, loong64.ABGTZ}, + ssa.BlockLOONG64GTZ: {loong64.ABGTZ, loong64.ABLEZ}, + ssa.BlockLOONG64FPT: {loong64.ABFPT, loong64.ABFPF}, + ssa.BlockLOONG64FPF: {loong64.ABFPF, loong64.ABFPT}, +} + +func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { + switch b.Kind { + case ssa.BlockPlain: + if b.Succs[0].Block() != next { + p := s.Prog(obj.AJMP) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) + } + case ssa.BlockDefer: + // defer returns in R19: + // 0 if we should continue executing + // 1 if we should jump to deferreturn call + p := s.Prog(loong64.ABNE) + p.From.Type = obj.TYPE_REG + p.From.Reg = loong64.REGZERO + p.Reg = loong64.REG_R19 + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()}) + if b.Succs[0].Block() != next { + p := s.Prog(obj.AJMP) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) + } + case ssa.BlockExit, ssa.BlockRetJmp: + case ssa.BlockRet: + s.Prog(obj.ARET) + case ssa.BlockLOONG64EQ, ssa.BlockLOONG64NE, + ssa.BlockLOONG64LTZ, ssa.BlockLOONG64GEZ, + ssa.BlockLOONG64LEZ, ssa.BlockLOONG64GTZ, + ssa.BlockLOONG64FPT, ssa.BlockLOONG64FPF: + jmp := blockJump[b.Kind] + var p *obj.Prog + switch next { + case b.Succs[0].Block(): + p = s.Br(jmp.invasm, b.Succs[1].Block()) + case b.Succs[1].Block(): + p = s.Br(jmp.asm, b.Succs[0].Block()) + default: + if b.Likely != ssa.BranchUnlikely { + p = s.Br(jmp.asm, b.Succs[0].Block()) + s.Br(obj.AJMP, b.Succs[1].Block()) + } else { + p = s.Br(jmp.invasm, b.Succs[1].Block()) + s.Br(obj.AJMP, b.Succs[0].Block()) + } + } + if !b.Controls[0].Type.IsFlags() { + p.From.Type = obj.TYPE_REG + p.From.Reg = b.Controls[0].Reg() + } + default: + b.Fatalf("branch not implemented: %s", b.LongString()) + } +} + +func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog { + p := s.Prog(loadByType(t, reg)) + p.From.Type = obj.TYPE_MEM + p.From.Name = obj.NAME_AUTO + p.From.Sym = n.Linksym() + p.From.Offset = n.FrameOffset() + off + p.To.Type = obj.TYPE_REG + p.To.Reg = reg + return p +} + +func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog { + p = pp.Append(p, storeByType(t, reg), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off) + p.To.Name = obj.NAME_PARAM + p.To.Sym = n.Linksym() + p.Pos = p.Pos.WithNotStmt() + return p +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/loopvar.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/loopvar.go new file mode 100644 index 0000000000000000000000000000000000000000..030fc04c1369d8da7c1e946dfcc64e709dca2a3d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/loopvar.go @@ -0,0 +1,612 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package loopvar applies the proper variable capture, according +// to experiment, flags, language version, etc. +package loopvar + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/logopt" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/src" + "fmt" +) + +type VarAndLoop struct { + Name *ir.Name + Loop ir.Node // the *ir.RangeStmt or *ir.ForStmt. Used for identity and position + LastPos src.XPos // the last position observed within Loop +} + +// ForCapture transforms for and range loops that declare variables that might be +// captured by a closure or escaped to the heap, using a syntactic check that +// conservatively overestimates the loops where capture occurs, but still avoids +// transforming the (large) majority of loops. It returns the list of names +// subject to this change, that may (once transformed) be heap allocated in the +// process. (This allows checking after escape analysis to call out any such +// variables, in case it causes allocation/performance problems). +// +// The decision to transform loops is normally encoded in the For/Range loop node +// field DistinctVars but is also dependent on base.LoopVarHash, and some values +// of base.Debug.LoopVar (which is set per-package). Decisions encoded in DistinctVars +// are preserved across inlining, so if package a calls b.F and loops in b.F are +// transformed, then they are always transformed, whether b.F is inlined or not. +// +// Per-package, the debug flag settings that affect this transformer: +// +// base.LoopVarHash != nil => use hash setting to govern transformation. +// note that LoopVarHash != nil sets base.Debug.LoopVar to 1 (unless it is >= 11, for testing/debugging). +// +// base.Debug.LoopVar == 11 => transform ALL loops ignoring syntactic/potential escape. Do not log, can be in addition to GOEXPERIMENT. +// +// The effect of GOEXPERIMENT=loopvar is to change the default value (0) of base.Debug.LoopVar to 1 for all packages. +func ForCapture(fn *ir.Func) []VarAndLoop { + // if a loop variable is transformed it is appended to this slice for later logging + var transformed []VarAndLoop + + describe := func(n *ir.Name) string { + pos := n.Pos() + inner := base.Ctxt.InnermostPos(pos) + outer := base.Ctxt.OutermostPos(pos) + if inner == outer { + return fmt.Sprintf("loop variable %v now per-iteration", n) + } + return fmt.Sprintf("loop variable %v now per-iteration (loop inlined into %s:%d)", n, outer.Filename(), outer.Line()) + } + + forCapture := func() { + seq := 1 + + dclFixups := make(map[*ir.Name]ir.Stmt) + + // possibly leaked includes names of declared loop variables that may be leaked; + // the mapped value is true if the name is *syntactically* leaked, and those loops + // will be transformed. + possiblyLeaked := make(map[*ir.Name]bool) + + // these enable an optimization of "escape" under return statements + loopDepth := 0 + returnInLoopDepth := 0 + + // noteMayLeak is called for candidate variables in for range/3-clause, and + // adds them (mapped to false) to possiblyLeaked. + noteMayLeak := func(x ir.Node) { + if n, ok := x.(*ir.Name); ok { + if n.Type().Kind() == types.TBLANK { + return + } + // default is false (leak candidate, not yet known to leak), but flag can make all variables "leak" + possiblyLeaked[n] = base.Debug.LoopVar >= 11 + } + } + + // For reporting, keep track of the last position within any loop. + // Loops nest, also need to be sensitive to inlining. + var lastPos src.XPos + + updateLastPos := func(p src.XPos) { + pl, ll := p.Line(), lastPos.Line() + if p.SameFile(lastPos) && + (pl > ll || pl == ll && p.Col() > lastPos.Col()) { + lastPos = p + } + } + + // maybeReplaceVar unshares an iteration variable for a range loop, + // if that variable was actually (syntactically) leaked, + // subject to hash-variable debugging. + maybeReplaceVar := func(k ir.Node, x *ir.RangeStmt) ir.Node { + if n, ok := k.(*ir.Name); ok && possiblyLeaked[n] { + desc := func() string { + return describe(n) + } + if base.LoopVarHash.MatchPos(n.Pos(), desc) { + // Rename the loop key, prefix body with assignment from loop key + transformed = append(transformed, VarAndLoop{n, x, lastPos}) + tk := typecheck.TempAt(base.Pos, fn, n.Type()) + tk.SetTypecheck(1) + as := ir.NewAssignStmt(x.Pos(), n, tk) + as.Def = true + as.SetTypecheck(1) + x.Body.Prepend(as) + dclFixups[n] = as + return tk + } + } + return k + } + + // scanChildrenThenTransform processes node x to: + // 1. if x is a for/range w/ DistinctVars, note declared iteration variables possiblyLeaked (PL) + // 2. search all of x's children for syntactically escaping references to v in PL, + // meaning either address-of-v or v-captured-by-a-closure + // 3. for all v in PL that had a syntactically escaping reference, transform the declaration + // and (in case of 3-clause loop) the loop to the unshared loop semantics. + // This is all much simpler for range loops; 3-clause loops can have an arbitrary number + // of iteration variables and the transformation is more involved, range loops have at most 2. + var scanChildrenThenTransform func(x ir.Node) bool + scanChildrenThenTransform = func(n ir.Node) bool { + + if loopDepth > 0 { + updateLastPos(n.Pos()) + } + + switch x := n.(type) { + case *ir.ClosureExpr: + if returnInLoopDepth >= loopDepth { + // This expression is a child of a return, which escapes all loops above + // the return, but not those between this expression and the return. + break + } + for _, cv := range x.Func.ClosureVars { + v := cv.Canonical() + if _, ok := possiblyLeaked[v]; ok { + possiblyLeaked[v] = true + } + } + + case *ir.AddrExpr: + if returnInLoopDepth >= loopDepth { + // This expression is a child of a return, which escapes all loops above + // the return, but not those between this expression and the return. + break + } + // Explicitly note address-taken so that return-statements can be excluded + y := ir.OuterValue(x.X) + if y.Op() != ir.ONAME { + break + } + z, ok := y.(*ir.Name) + if !ok { + break + } + switch z.Class { + case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT, ir.PAUTOHEAP: + if _, ok := possiblyLeaked[z]; ok { + possiblyLeaked[z] = true + } + } + + case *ir.ReturnStmt: + savedRILD := returnInLoopDepth + returnInLoopDepth = loopDepth + defer func() { returnInLoopDepth = savedRILD }() + + case *ir.RangeStmt: + if !(x.Def && x.DistinctVars) { + // range loop must define its iteration variables AND have distinctVars. + x.DistinctVars = false + break + } + noteMayLeak(x.Key) + noteMayLeak(x.Value) + loopDepth++ + savedLastPos := lastPos + lastPos = x.Pos() // this sets the file. + ir.DoChildren(n, scanChildrenThenTransform) + loopDepth-- + x.Key = maybeReplaceVar(x.Key, x) + x.Value = maybeReplaceVar(x.Value, x) + thisLastPos := lastPos + lastPos = savedLastPos + updateLastPos(thisLastPos) // this will propagate lastPos if in the same file. + x.DistinctVars = false + return false + + case *ir.ForStmt: + if !x.DistinctVars { + break + } + forAllDefInInit(x, noteMayLeak) + loopDepth++ + savedLastPos := lastPos + lastPos = x.Pos() // this sets the file. + ir.DoChildren(n, scanChildrenThenTransform) + loopDepth-- + var leaked []*ir.Name + // Collect the leaking variables for the much-more-complex transformation. + forAllDefInInit(x, func(z ir.Node) { + if n, ok := z.(*ir.Name); ok && possiblyLeaked[n] { + desc := func() string { + return describe(n) + } + // Hash on n.Pos() for most precise failure location. + if base.LoopVarHash.MatchPos(n.Pos(), desc) { + leaked = append(leaked, n) + } + } + }) + + if len(leaked) > 0 { + // need to transform the for loop just so. + + /* Contrived example, w/ numbered comments from the transformation: + BEFORE: + var escape []*int + for z := 0; z < n; z++ { + if reason() { + escape = append(escape, &z) + continue + } + z = z + z + stuff + } + AFTER: + for z', tmp_first := 0, true; ; { // (4) + // (5) body' follows: + z := z' // (1) + if tmp_first {tmp_first = false} else {z++} // (6) + if ! (z < n) { break } // (7) + // (3, 8) body_continue + if reason() { + escape = append(escape, &z) + goto next // rewritten continue + } + z = z + z + stuff + next: // (9) + z' = z // (2) + } + + In the case that the loop contains no increment (z++), + there is no need for step 6, + and thus no need to test, update, or declare tmp_first (part of step 4). + Similarly if the loop contains no exit test (z < n), + then there is no need for step 7. + */ + + // Expressed in terms of the input ForStmt + // + // type ForStmt struct { + // init Nodes + // Label *types.Sym + // Cond Node // empty if OFORUNTIL + // Post Node + // Body Nodes + // HasBreak bool + // } + + // OFOR: init; loop: if !Cond {break}; Body; Post; goto loop + + // (1) prebody = {z := z' for z in leaked} + // (2) postbody = {z' = z for z in leaked} + // (3) body_continue = {body : s/continue/goto next} + // (4) init' = (init : s/z/z' for z in leaked) + tmp_first := true + // (5) body' = prebody + // appears out of order below + // (6) if tmp_first {tmp_first = false} else {Post} + + // (7) if !cond {break} + + // (8) body_continue (3) + + // (9) next: postbody (2) + // (10) cond' = {} + // (11) post' = {} + + // minor optimizations: + // if Post is empty, tmp_first and step 6 can be skipped. + // if Cond is empty, that code can also be skipped. + + var preBody, postBody ir.Nodes + + // Given original iteration variable z, what is the corresponding z' + // that carries the value from iteration to iteration? + zPrimeForZ := make(map[*ir.Name]*ir.Name) + + // (1,2) initialize preBody and postBody + for _, z := range leaked { + transformed = append(transformed, VarAndLoop{z, x, lastPos}) + + tz := typecheck.TempAt(base.Pos, fn, z.Type()) + tz.SetTypecheck(1) + zPrimeForZ[z] = tz + + as := ir.NewAssignStmt(x.Pos(), z, tz) + as.Def = true + as.SetTypecheck(1) + preBody.Append(as) + dclFixups[z] = as + + as = ir.NewAssignStmt(x.Pos(), tz, z) + as.SetTypecheck(1) + postBody.Append(as) + + } + + // (3) rewrite continues in body -- rewrite is inplace, so works for top level visit, too. + label := typecheck.Lookup(fmt.Sprintf(".3clNext_%d", seq)) + seq++ + labelStmt := ir.NewLabelStmt(x.Pos(), label) + labelStmt.SetTypecheck(1) + + loopLabel := x.Label + loopDepth := 0 + var editContinues func(x ir.Node) bool + editContinues = func(x ir.Node) bool { + + switch c := x.(type) { + case *ir.BranchStmt: + // If this is a continue targeting the loop currently being rewritten, transform it to an appropriate GOTO + if c.Op() == ir.OCONTINUE && (loopDepth == 0 && c.Label == nil || loopLabel != nil && c.Label == loopLabel) { + c.Label = label + c.SetOp(ir.OGOTO) + } + case *ir.RangeStmt, *ir.ForStmt: + loopDepth++ + ir.DoChildren(x, editContinues) + loopDepth-- + return false + } + ir.DoChildren(x, editContinues) + return false + } + for _, y := range x.Body { + editContinues(y) + } + bodyContinue := x.Body + + // (4) rewrite init + forAllDefInInitUpdate(x, func(z ir.Node, pz *ir.Node) { + // note tempFor[n] can be nil if hash searching. + if n, ok := z.(*ir.Name); ok && possiblyLeaked[n] && zPrimeForZ[n] != nil { + *pz = zPrimeForZ[n] + } + }) + + postNotNil := x.Post != nil + var tmpFirstDcl ir.Node + if postNotNil { + // body' = prebody + + // (6) if tmp_first {tmp_first = false} else {Post} + + // if !cond {break} + ... + tmpFirst := typecheck.TempAt(base.Pos, fn, types.Types[types.TBOOL]) + tmpFirstDcl = typecheck.Stmt(ir.NewAssignStmt(x.Pos(), tmpFirst, ir.NewBool(base.Pos, true))) + tmpFirstSetFalse := typecheck.Stmt(ir.NewAssignStmt(x.Pos(), tmpFirst, ir.NewBool(base.Pos, false))) + ifTmpFirst := ir.NewIfStmt(x.Pos(), tmpFirst, ir.Nodes{tmpFirstSetFalse}, ir.Nodes{x.Post}) + ifTmpFirst.PtrInit().Append(typecheck.Stmt(ir.NewDecl(base.Pos, ir.ODCL, tmpFirst))) // declares tmpFirst + preBody.Append(typecheck.Stmt(ifTmpFirst)) + } + + // body' = prebody + + // if tmp_first {tmp_first = false} else {Post} + + // (7) if !cond {break} + ... + if x.Cond != nil { + notCond := ir.NewUnaryExpr(x.Cond.Pos(), ir.ONOT, x.Cond) + notCond.SetType(x.Cond.Type()) + notCond.SetTypecheck(1) + newBreak := ir.NewBranchStmt(x.Pos(), ir.OBREAK, nil) + newBreak.SetTypecheck(1) + ifNotCond := ir.NewIfStmt(x.Pos(), notCond, ir.Nodes{newBreak}, nil) + ifNotCond.SetTypecheck(1) + preBody.Append(ifNotCond) + } + + if postNotNil { + x.PtrInit().Append(tmpFirstDcl) + } + + // (8) + preBody.Append(bodyContinue...) + // (9) + preBody.Append(labelStmt) + preBody.Append(postBody...) + + // (5) body' = prebody + ... + x.Body = preBody + + // (10) cond' = {} + x.Cond = nil + + // (11) post' = {} + x.Post = nil + } + thisLastPos := lastPos + lastPos = savedLastPos + updateLastPos(thisLastPos) // this will propagate lastPos if in the same file. + x.DistinctVars = false + + return false + } + + ir.DoChildren(n, scanChildrenThenTransform) + + return false + } + scanChildrenThenTransform(fn) + if len(transformed) > 0 { + // editNodes scans a slice C of ir.Node, looking for declarations that + // appear in dclFixups. Any declaration D whose "fixup" is an assignmnt + // statement A is removed from the C and relocated to the Init + // of A. editNodes returns the modified slice of ir.Node. + editNodes := func(c ir.Nodes) ir.Nodes { + j := 0 + for _, n := range c { + if d, ok := n.(*ir.Decl); ok { + if s := dclFixups[d.X]; s != nil { + switch a := s.(type) { + case *ir.AssignStmt: + a.PtrInit().Prepend(d) + delete(dclFixups, d.X) // can't be sure of visit order, wouldn't want to visit twice. + default: + base.Fatalf("not implemented yet for node type %v", s.Op()) + } + continue // do not copy this node, and do not increment j + } + } + c[j] = n + j++ + } + for k := j; k < len(c); k++ { + c[k] = nil + } + return c[:j] + } + // fixup all tagged declarations in all the statements lists in fn. + rewriteNodes(fn, editNodes) + } + } + ir.WithFunc(fn, forCapture) + return transformed +} + +// forAllDefInInitUpdate applies "do" to all the defining assignments in the Init clause of a ForStmt. +// This abstracts away some of the boilerplate from the already complex and verbose for-3-clause case. +func forAllDefInInitUpdate(x *ir.ForStmt, do func(z ir.Node, update *ir.Node)) { + for _, s := range x.Init() { + switch y := s.(type) { + case *ir.AssignListStmt: + if !y.Def { + continue + } + for i, z := range y.Lhs { + do(z, &y.Lhs[i]) + } + case *ir.AssignStmt: + if !y.Def { + continue + } + do(y.X, &y.X) + } + } +} + +// forAllDefInInit is forAllDefInInitUpdate without the update option. +func forAllDefInInit(x *ir.ForStmt, do func(z ir.Node)) { + forAllDefInInitUpdate(x, func(z ir.Node, _ *ir.Node) { do(z) }) +} + +// rewriteNodes applies editNodes to all statement lists in fn. +func rewriteNodes(fn *ir.Func, editNodes func(c ir.Nodes) ir.Nodes) { + var forNodes func(x ir.Node) bool + forNodes = func(n ir.Node) bool { + if stmt, ok := n.(ir.InitNode); ok { + // process init list + stmt.SetInit(editNodes(stmt.Init())) + } + switch x := n.(type) { + case *ir.Func: + x.Body = editNodes(x.Body) + case *ir.InlinedCallExpr: + x.Body = editNodes(x.Body) + + case *ir.CaseClause: + x.Body = editNodes(x.Body) + case *ir.CommClause: + x.Body = editNodes(x.Body) + + case *ir.BlockStmt: + x.List = editNodes(x.List) + + case *ir.ForStmt: + x.Body = editNodes(x.Body) + case *ir.RangeStmt: + x.Body = editNodes(x.Body) + case *ir.IfStmt: + x.Body = editNodes(x.Body) + x.Else = editNodes(x.Else) + case *ir.SelectStmt: + x.Compiled = editNodes(x.Compiled) + case *ir.SwitchStmt: + x.Compiled = editNodes(x.Compiled) + } + ir.DoChildren(n, forNodes) + return false + } + forNodes(fn) +} + +func LogTransformations(transformed []VarAndLoop) { + print := 2 <= base.Debug.LoopVar && base.Debug.LoopVar != 11 + + if print || logopt.Enabled() { // 11 is do them all, quietly, 12 includes debugging. + fileToPosBase := make(map[string]*src.PosBase) // used to remove inline context for innermost reporting. + + // trueInlinedPos rebases inner w/o inline context so that it prints correctly in WarnfAt; otherwise it prints as outer. + trueInlinedPos := func(inner src.Pos) src.XPos { + afn := inner.AbsFilename() + pb, ok := fileToPosBase[afn] + if !ok { + pb = src.NewFileBase(inner.Filename(), afn) + fileToPosBase[afn] = pb + } + inner.SetBase(pb) + return base.Ctxt.PosTable.XPos(inner) + } + + type unit struct{} + loopsSeen := make(map[ir.Node]unit) + type loopPos struct { + loop ir.Node + last src.XPos + curfn *ir.Func + } + var loops []loopPos + for _, lv := range transformed { + n := lv.Name + if _, ok := loopsSeen[lv.Loop]; !ok { + l := lv.Loop + loopsSeen[l] = unit{} + loops = append(loops, loopPos{l, lv.LastPos, n.Curfn}) + } + pos := n.Pos() + + inner := base.Ctxt.InnermostPos(pos) + outer := base.Ctxt.OutermostPos(pos) + + if logopt.Enabled() { + // For automated checking of coverage of this transformation, include this in the JSON information. + var nString interface{} = n + if inner != outer { + nString = fmt.Sprintf("%v (from inline)", n) + } + if n.Esc() == ir.EscHeap { + logopt.LogOpt(pos, "iteration-variable-to-heap", "loopvar", ir.FuncName(n.Curfn), nString) + } else { + logopt.LogOpt(pos, "iteration-variable-to-stack", "loopvar", ir.FuncName(n.Curfn), nString) + } + } + if print { + if inner == outer { + if n.Esc() == ir.EscHeap { + base.WarnfAt(pos, "loop variable %v now per-iteration, heap-allocated", n) + } else { + base.WarnfAt(pos, "loop variable %v now per-iteration, stack-allocated", n) + } + } else { + innerXPos := trueInlinedPos(inner) + if n.Esc() == ir.EscHeap { + base.WarnfAt(innerXPos, "loop variable %v now per-iteration, heap-allocated (loop inlined into %s:%d)", n, outer.Filename(), outer.Line()) + } else { + base.WarnfAt(innerXPos, "loop variable %v now per-iteration, stack-allocated (loop inlined into %s:%d)", n, outer.Filename(), outer.Line()) + } + } + } + } + for _, l := range loops { + pos := l.loop.Pos() + last := l.last + loopKind := "range" + if _, ok := l.loop.(*ir.ForStmt); ok { + loopKind = "for" + } + if logopt.Enabled() { + // Intended to help with performance debugging, we record whole loop ranges + logopt.LogOptRange(pos, last, "loop-modified-"+loopKind, "loopvar", ir.FuncName(l.curfn)) + } + if print && 4 <= base.Debug.LoopVar { + // TODO decide if we want to keep this, or not. It was helpful for validating logopt, otherwise, eh. + inner := base.Ctxt.InnermostPos(pos) + outer := base.Ctxt.OutermostPos(pos) + + if inner == outer { + base.WarnfAt(pos, "%s loop ending at %d:%d was modified", loopKind, last.Line(), last.Col()) + } else { + pos = trueInlinedPos(inner) + last = trueInlinedPos(base.Ctxt.InnermostPos(last)) + base.WarnfAt(pos, "%s loop ending at %d:%d was modified (loop inlined into %s:%d)", loopKind, last.Line(), last.Col(), outer.Filename(), outer.Line()) + } + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/loopvar_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/loopvar_test.go new file mode 100644 index 0000000000000000000000000000000000000000..64cfdb77d99f2b844135ec73cca9ee75bb7734aa --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/loopvar_test.go @@ -0,0 +1,383 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package loopvar_test + +import ( + "internal/testenv" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strings" + "testing" +) + +type testcase struct { + lvFlag string // ==-2, -1, 0, 1, 2 + buildExpect string // message, if any + expectRC int + files []string +} + +var for_files = []string{ + "for_esc_address.go", // address of variable + "for_esc_closure.go", // closure of variable + "for_esc_minimal_closure.go", // simple closure of variable + "for_esc_method.go", // method value of variable + "for_complicated_esc_address.go", // modifies loop index in body +} + +var range_files = []string{ + "range_esc_address.go", // address of variable + "range_esc_closure.go", // closure of variable + "range_esc_minimal_closure.go", // simple closure of variable + "range_esc_method.go", // method value of variable +} + +var cases = []testcase{ + {"-1", "", 11, for_files[:1]}, + {"0", "", 0, for_files[:1]}, + {"1", "", 0, for_files[:1]}, + {"2", "loop variable i now per-iteration,", 0, for_files}, + + {"-1", "", 11, range_files[:1]}, + {"0", "", 0, range_files[:1]}, + {"1", "", 0, range_files[:1]}, + {"2", "loop variable i now per-iteration,", 0, range_files}, + + {"1", "", 0, []string{"for_nested.go"}}, +} + +// TestLoopVar checks that the GOEXPERIMENT and debug flags behave as expected. +func TestLoopVarGo1_21(t *testing.T) { + switch runtime.GOOS { + case "linux", "darwin": + default: + t.Skipf("Slow test, usually avoid it, os=%s not linux or darwin", runtime.GOOS) + } + switch runtime.GOARCH { + case "amd64", "arm64": + default: + t.Skipf("Slow test, usually avoid it, arch=%s not amd64 or arm64", runtime.GOARCH) + } + + testenv.MustHaveGoBuild(t) + gocmd := testenv.GoToolPath(t) + tmpdir := t.TempDir() + output := filepath.Join(tmpdir, "foo.exe") + + for i, tc := range cases { + for _, f := range tc.files { + source := f + cmd := testenv.Command(t, gocmd, "build", "-o", output, "-gcflags=-lang=go1.21 -d=loopvar="+tc.lvFlag, source) + cmd.Env = append(cmd.Env, "GOEXPERIMENT=loopvar", "HOME="+tmpdir) + cmd.Dir = "testdata" + t.Logf("File %s loopvar=%s expect '%s' exit code %d", f, tc.lvFlag, tc.buildExpect, tc.expectRC) + b, e := cmd.CombinedOutput() + if e != nil { + t.Error(e) + } + if tc.buildExpect != "" { + s := string(b) + if !strings.Contains(s, tc.buildExpect) { + t.Errorf("File %s test %d expected to match '%s' with \n-----\n%s\n-----", f, i, tc.buildExpect, s) + } + } + // run what we just built. + cmd = testenv.Command(t, output) + b, e = cmd.CombinedOutput() + if tc.expectRC != 0 { + if e == nil { + t.Errorf("Missing expected error, file %s, case %d", f, i) + } else if ee, ok := (e).(*exec.ExitError); !ok || ee.ExitCode() != tc.expectRC { + t.Error(e) + } else { + // okay + } + } else if e != nil { + t.Error(e) + } + } + } +} + +func TestLoopVarInlinesGo1_21(t *testing.T) { + switch runtime.GOOS { + case "linux", "darwin": + default: + t.Skipf("Slow test, usually avoid it, os=%s not linux or darwin", runtime.GOOS) + } + switch runtime.GOARCH { + case "amd64", "arm64": + default: + t.Skipf("Slow test, usually avoid it, arch=%s not amd64 or arm64", runtime.GOARCH) + } + + testenv.MustHaveGoBuild(t) + gocmd := testenv.GoToolPath(t) + tmpdir := t.TempDir() + + root := "cmd/compile/internal/loopvar/testdata/inlines" + + f := func(pkg string) string { + // This disables the loopvar change, except for the specified package. + // The effect should follow the package, even though everything (except "c") + // is inlined. + cmd := testenv.Command(t, gocmd, "run", "-gcflags="+root+"/...=-lang=go1.21", "-gcflags="+pkg+"=-d=loopvar=1", root) + cmd.Env = append(cmd.Env, "GOEXPERIMENT=noloopvar", "HOME="+tmpdir) + cmd.Dir = filepath.Join("testdata", "inlines") + + b, e := cmd.CombinedOutput() + if e != nil { + t.Error(e) + } + return string(b) + } + + a := f(root + "/a") + b := f(root + "/b") + c := f(root + "/c") + m := f(root) + + t.Logf(a) + t.Logf(b) + t.Logf(c) + t.Logf(m) + + if !strings.Contains(a, "f, af, bf, abf, cf sums = 100, 45, 100, 100, 100") { + t.Errorf("Did not see expected value of a") + } + if !strings.Contains(b, "f, af, bf, abf, cf sums = 100, 100, 45, 45, 100") { + t.Errorf("Did not see expected value of b") + } + if !strings.Contains(c, "f, af, bf, abf, cf sums = 100, 100, 100, 100, 45") { + t.Errorf("Did not see expected value of c") + } + if !strings.Contains(m, "f, af, bf, abf, cf sums = 45, 100, 100, 100, 100") { + t.Errorf("Did not see expected value of m") + } +} + +func countMatches(s, re string) int { + slice := regexp.MustCompile(re).FindAllString(s, -1) + return len(slice) +} + +func TestLoopVarHashes(t *testing.T) { + // This behavior does not depend on Go version (1.21 or greater) + switch runtime.GOOS { + case "linux", "darwin": + default: + t.Skipf("Slow test, usually avoid it, os=%s not linux or darwin", runtime.GOOS) + } + switch runtime.GOARCH { + case "amd64", "arm64": + default: + t.Skipf("Slow test, usually avoid it, arch=%s not amd64 or arm64", runtime.GOARCH) + } + + testenv.MustHaveGoBuild(t) + gocmd := testenv.GoToolPath(t) + tmpdir := t.TempDir() + + root := "cmd/compile/internal/loopvar/testdata/inlines" + + f := func(hash string) string { + // This disables the loopvar change, except for the specified hash pattern. + // -trimpath is necessary so we get the same answer no matter where the + // Go repository is checked out. This is not normally a concern since people + // do not normally rely on the meaning of specific hashes. + cmd := testenv.Command(t, gocmd, "run", "-trimpath", root) + cmd.Env = append(cmd.Env, "GOCOMPILEDEBUG=loopvarhash="+hash, "HOME="+tmpdir) + cmd.Dir = filepath.Join("testdata", "inlines") + + b, _ := cmd.CombinedOutput() + // Ignore the error, sometimes it's supposed to fail, the output test will catch it. + return string(b) + } + + for _, arg := range []string{"v001100110110110010100100", "vx336ca4"} { + m := f(arg) + t.Logf(m) + + mCount := countMatches(m, "loopvarhash triggered cmd/compile/internal/loopvar/testdata/inlines/main.go:27:6: .* 001100110110110010100100") + otherCount := strings.Count(m, "loopvarhash") + if mCount < 1 { + t.Errorf("%s: did not see triggered main.go:27:6", arg) + } + if mCount != otherCount { + t.Errorf("%s: too many matches", arg) + } + mCount = countMatches(m, "cmd/compile/internal/loopvar/testdata/inlines/main.go:27:6: .* \\[bisect-match 0x7802e115b9336ca4\\]") + otherCount = strings.Count(m, "[bisect-match ") + if mCount < 1 { + t.Errorf("%s: did not see bisect-match for main.go:27:6", arg) + } + if mCount != otherCount { + t.Errorf("%s: too many matches", arg) + } + + // This next test carefully dodges a bug-to-be-fixed with inlined locations for ir.Names. + if !strings.Contains(m, ", 100, 100, 100, 100") { + t.Errorf("%s: did not see expected value of m run", arg) + } + } +} + +// TestLoopVarVersionEnableFlag checks for loopvar transformation enabled by command line flag (1.22). +func TestLoopVarVersionEnableFlag(t *testing.T) { + switch runtime.GOOS { + case "linux", "darwin": + default: + t.Skipf("Slow test, usually avoid it, os=%s not linux or darwin", runtime.GOOS) + } + switch runtime.GOARCH { + case "amd64", "arm64": + default: + t.Skipf("Slow test, usually avoid it, arch=%s not amd64 or arm64", runtime.GOARCH) + } + + testenv.MustHaveGoBuild(t) + gocmd := testenv.GoToolPath(t) + + // loopvar=3 logs info but does not change loopvarness + cmd := testenv.Command(t, gocmd, "run", "-gcflags=-lang=go1.22 -d=loopvar=3", "opt.go") + cmd.Dir = filepath.Join("testdata") + + b, err := cmd.CombinedOutput() + m := string(b) + + t.Logf(m) + + yCount := strings.Count(m, "opt.go:16:6: loop variable private now per-iteration, heap-allocated (loop inlined into ./opt.go:29)") + nCount := strings.Count(m, "shared") + + if yCount != 1 { + t.Errorf("yCount=%d != 1", yCount) + } + if nCount > 0 { + t.Errorf("nCount=%d > 0", nCount) + } + if err != nil { + t.Errorf("err=%v != nil", err) + } +} + +// TestLoopVarVersionEnableGoBuild checks for loopvar transformation enabled by go:build version (1.22). +func TestLoopVarVersionEnableGoBuild(t *testing.T) { + switch runtime.GOOS { + case "linux", "darwin": + default: + t.Skipf("Slow test, usually avoid it, os=%s not linux or darwin", runtime.GOOS) + } + switch runtime.GOARCH { + case "amd64", "arm64": + default: + t.Skipf("Slow test, usually avoid it, arch=%s not amd64 or arm64", runtime.GOARCH) + } + + testenv.MustHaveGoBuild(t) + gocmd := testenv.GoToolPath(t) + + // loopvar=3 logs info but does not change loopvarness + cmd := testenv.Command(t, gocmd, "run", "-gcflags=-lang=go1.21 -d=loopvar=3", "opt-122.go") + cmd.Dir = filepath.Join("testdata") + + b, err := cmd.CombinedOutput() + m := string(b) + + t.Logf(m) + + yCount := strings.Count(m, "opt-122.go:18:6: loop variable private now per-iteration, heap-allocated (loop inlined into ./opt-122.go:31)") + nCount := strings.Count(m, "shared") + + if yCount != 1 { + t.Errorf("yCount=%d != 1", yCount) + } + if nCount > 0 { + t.Errorf("nCount=%d > 0", nCount) + } + if err != nil { + t.Errorf("err=%v != nil", err) + } +} + +// TestLoopVarVersionDisableFlag checks for loopvar transformation DISABLED by command line version (1.21). +func TestLoopVarVersionDisableFlag(t *testing.T) { + switch runtime.GOOS { + case "linux", "darwin": + default: + t.Skipf("Slow test, usually avoid it, os=%s not linux or darwin", runtime.GOOS) + } + switch runtime.GOARCH { + case "amd64", "arm64": + default: + t.Skipf("Slow test, usually avoid it, arch=%s not amd64 or arm64", runtime.GOARCH) + } + + testenv.MustHaveGoBuild(t) + gocmd := testenv.GoToolPath(t) + + // loopvar=3 logs info but does not change loopvarness + cmd := testenv.Command(t, gocmd, "run", "-gcflags=-lang=go1.21 -d=loopvar=3", "opt.go") + cmd.Dir = filepath.Join("testdata") + + b, err := cmd.CombinedOutput() + m := string(b) + + t.Logf(m) // expect error + + yCount := strings.Count(m, "opt.go:16:6: loop variable private now per-iteration, heap-allocated (loop inlined into ./opt.go:29)") + nCount := strings.Count(m, "shared") + + if yCount != 0 { + t.Errorf("yCount=%d != 0", yCount) + } + if nCount > 0 { + t.Errorf("nCount=%d > 0", nCount) + } + if err == nil { // expect error + t.Errorf("err=%v == nil", err) + } +} + +// TestLoopVarVersionDisableGoBuild checks for loopvar transformation DISABLED by go:build version (1.21). +func TestLoopVarVersionDisableGoBuild(t *testing.T) { + switch runtime.GOOS { + case "linux", "darwin": + default: + t.Skipf("Slow test, usually avoid it, os=%s not linux or darwin", runtime.GOOS) + } + switch runtime.GOARCH { + case "amd64", "arm64": + default: + t.Skipf("Slow test, usually avoid it, arch=%s not amd64 or arm64", runtime.GOARCH) + } + + testenv.MustHaveGoBuild(t) + gocmd := testenv.GoToolPath(t) + + // loopvar=3 logs info but does not change loopvarness + cmd := testenv.Command(t, gocmd, "run", "-gcflags=-lang=go1.22 -d=loopvar=3", "opt-121.go") + cmd.Dir = filepath.Join("testdata") + + b, err := cmd.CombinedOutput() + m := string(b) + + t.Logf(m) // expect error + + yCount := strings.Count(m, "opt-121.go:18:6: loop variable private now per-iteration, heap-allocated (loop inlined into ./opt-121.go:31)") + nCount := strings.Count(m, "shared") + + if yCount != 0 { + t.Errorf("yCount=%d != 0", yCount) + } + if nCount > 0 { + t.Errorf("nCount=%d > 0", nCount) + } + if err == nil { // expect error + t.Errorf("err=%v == nil", err) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/mips/galign.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/mips/galign.go new file mode 100644 index 0000000000000000000000000000000000000000..4e6897042ec04ce2e81fa58960a26541fa5aaeb0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/mips/galign.go @@ -0,0 +1,27 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mips + +import ( + "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" + "cmd/internal/obj/mips" + "internal/buildcfg" +) + +func Init(arch *ssagen.ArchInfo) { + arch.LinkArch = &mips.Linkmips + if buildcfg.GOARCH == "mipsle" { + arch.LinkArch = &mips.Linkmipsle + } + arch.REGSP = mips.REGSP + arch.MAXWIDTH = (1 << 31) - 1 + arch.SoftFloat = (buildcfg.GOMIPS == "softfloat") + arch.ZeroRange = zerorange + arch.Ginsnop = ginsnop + arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {} + arch.SSAGenValue = ssaGenValue + arch.SSAGenBlock = ssaGenBlock +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/mips/ggen.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/mips/ggen.go new file mode 100644 index 0000000000000000000000000000000000000000..e235ef95676f4794e529c6962425954c697a0ddb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/mips/ggen.go @@ -0,0 +1,51 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mips + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/objw" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/obj/mips" +) + +// TODO(mips): implement DUFFZERO +func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { + + if cnt == 0 { + return p + } + if cnt < int64(4*types.PtrSize) { + for i := int64(0); i < cnt; i += int64(types.PtrSize) { + p = pp.Append(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, base.Ctxt.Arch.FixedFrameSize+off+i) + } + } else { + //fmt.Printf("zerorange frame:%v, lo: %v, hi:%v \n", frame ,lo, hi) + // ADD $(FIXED_FRAME+frame+lo-4), SP, r1 + // ADD $cnt, r1, r2 + // loop: + // MOVW R0, (Widthptr)r1 + // ADD $Widthptr, r1 + // BNE r1, r2, loop + p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, base.Ctxt.Arch.FixedFrameSize+off-4, obj.TYPE_REG, mips.REGRT1, 0) + p.Reg = mips.REGSP + p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0) + p.Reg = mips.REGRT1 + p = pp.Append(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize)) + p1 := p + p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0) + p = pp.Append(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0) + p.Reg = mips.REGRT2 + p.To.SetTarget(p1) + } + + return p +} + +func ginsnop(pp *objw.Progs) *obj.Prog { + p := pp.Prog(mips.ANOOP) + return p +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/mips/ssa.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/mips/ssa.go new file mode 100644 index 0000000000000000000000000000000000000000..bfccafd8e5a52ee57ac8a398cd6a57eadde4ad04 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/mips/ssa.go @@ -0,0 +1,880 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mips + +import ( + "math" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/logopt" + "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/obj/mips" +) + +// isFPreg reports whether r is an FP register. +func isFPreg(r int16) bool { + return mips.REG_F0 <= r && r <= mips.REG_F31 +} + +// isHILO reports whether r is HI or LO register. +func isHILO(r int16) bool { + return r == mips.REG_HI || r == mips.REG_LO +} + +// loadByType returns the load instruction of the given type. +func loadByType(t *types.Type, r int16) obj.As { + if isFPreg(r) { + if t.Size() == 4 { // float32 or int32 + return mips.AMOVF + } else { // float64 or int64 + return mips.AMOVD + } + } else { + switch t.Size() { + case 1: + if t.IsSigned() { + return mips.AMOVB + } else { + return mips.AMOVBU + } + case 2: + if t.IsSigned() { + return mips.AMOVH + } else { + return mips.AMOVHU + } + case 4: + return mips.AMOVW + } + } + panic("bad load type") +} + +// storeByType returns the store instruction of the given type. +func storeByType(t *types.Type, r int16) obj.As { + if isFPreg(r) { + if t.Size() == 4 { // float32 or int32 + return mips.AMOVF + } else { // float64 or int64 + return mips.AMOVD + } + } else { + switch t.Size() { + case 1: + return mips.AMOVB + case 2: + return mips.AMOVH + case 4: + return mips.AMOVW + } + } + panic("bad store type") +} + +func ssaGenValue(s *ssagen.State, v *ssa.Value) { + switch v.Op { + case ssa.OpCopy, ssa.OpMIPSMOVWreg: + t := v.Type + if t.IsMemory() { + return + } + x := v.Args[0].Reg() + y := v.Reg() + if x == y { + return + } + as := mips.AMOVW + if isFPreg(x) && isFPreg(y) { + as = mips.AMOVF + if t.Size() == 8 { + as = mips.AMOVD + } + } + + p := s.Prog(as) + p.From.Type = obj.TYPE_REG + p.From.Reg = x + p.To.Type = obj.TYPE_REG + p.To.Reg = y + if isHILO(x) && isHILO(y) || isHILO(x) && isFPreg(y) || isFPreg(x) && isHILO(y) { + // cannot move between special registers, use TMP as intermediate + p.To.Reg = mips.REGTMP + p = s.Prog(mips.AMOVW) + p.From.Type = obj.TYPE_REG + p.From.Reg = mips.REGTMP + p.To.Type = obj.TYPE_REG + p.To.Reg = y + } + case ssa.OpMIPSMOVWnop: + // nothing to do + case ssa.OpLoadReg: + if v.Type.IsFlags() { + v.Fatalf("load flags not implemented: %v", v.LongString()) + return + } + r := v.Reg() + p := s.Prog(loadByType(v.Type, r)) + ssagen.AddrAuto(&p.From, v.Args[0]) + p.To.Type = obj.TYPE_REG + p.To.Reg = r + if isHILO(r) { + // cannot directly load, load to TMP and move + p.To.Reg = mips.REGTMP + p = s.Prog(mips.AMOVW) + p.From.Type = obj.TYPE_REG + p.From.Reg = mips.REGTMP + p.To.Type = obj.TYPE_REG + p.To.Reg = r + } + case ssa.OpStoreReg: + if v.Type.IsFlags() { + v.Fatalf("store flags not implemented: %v", v.LongString()) + return + } + r := v.Args[0].Reg() + if isHILO(r) { + // cannot directly store, move to TMP and store + p := s.Prog(mips.AMOVW) + p.From.Type = obj.TYPE_REG + p.From.Reg = r + p.To.Type = obj.TYPE_REG + p.To.Reg = mips.REGTMP + r = mips.REGTMP + } + p := s.Prog(storeByType(v.Type, r)) + p.From.Type = obj.TYPE_REG + p.From.Reg = r + ssagen.AddrAuto(&p.To, v) + case ssa.OpMIPSADD, + ssa.OpMIPSSUB, + ssa.OpMIPSAND, + ssa.OpMIPSOR, + ssa.OpMIPSXOR, + ssa.OpMIPSNOR, + ssa.OpMIPSSLL, + ssa.OpMIPSSRL, + ssa.OpMIPSSRA, + ssa.OpMIPSADDF, + ssa.OpMIPSADDD, + ssa.OpMIPSSUBF, + ssa.OpMIPSSUBD, + ssa.OpMIPSMULF, + ssa.OpMIPSMULD, + ssa.OpMIPSDIVF, + ssa.OpMIPSDIVD, + ssa.OpMIPSMUL: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpMIPSSGT, + ssa.OpMIPSSGTU: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpMIPSSGTzero, + ssa.OpMIPSSGTUzero: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.Reg = mips.REGZERO + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpMIPSADDconst, + ssa.OpMIPSSUBconst, + ssa.OpMIPSANDconst, + ssa.OpMIPSORconst, + ssa.OpMIPSXORconst, + ssa.OpMIPSNORconst, + ssa.OpMIPSSLLconst, + ssa.OpMIPSSRLconst, + ssa.OpMIPSSRAconst, + ssa.OpMIPSSGTconst, + ssa.OpMIPSSGTUconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpMIPSMULT, + ssa.OpMIPSMULTU, + ssa.OpMIPSDIV, + ssa.OpMIPSDIVU: + // result in hi,lo + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.Reg = v.Args[0].Reg() + case ssa.OpMIPSMOVWconst: + r := v.Reg() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = r + if isFPreg(r) || isHILO(r) { + // cannot move into FP or special registers, use TMP as intermediate + p.To.Reg = mips.REGTMP + p = s.Prog(mips.AMOVW) + p.From.Type = obj.TYPE_REG + p.From.Reg = mips.REGTMP + p.To.Type = obj.TYPE_REG + p.To.Reg = r + } + case ssa.OpMIPSMOVFconst, + ssa.OpMIPSMOVDconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_FCONST + p.From.Val = math.Float64frombits(uint64(v.AuxInt)) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpMIPSCMOVZ: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[2].Reg() + p.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpMIPSCMOVZzero: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.Reg = mips.REGZERO + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpMIPSCMPEQF, + ssa.OpMIPSCMPEQD, + ssa.OpMIPSCMPGEF, + ssa.OpMIPSCMPGED, + ssa.OpMIPSCMPGTF, + ssa.OpMIPSCMPGTD: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.Reg = v.Args[1].Reg() + case ssa.OpMIPSMOVWaddr: + p := s.Prog(mips.AMOVW) + p.From.Type = obj.TYPE_ADDR + p.From.Reg = v.Args[0].Reg() + var wantreg string + // MOVW $sym+off(base), R + // the assembler expands it as the following: + // - base is SP: add constant offset to SP (R29) + // when constant is large, tmp register (R23) may be used + // - base is SB: load external address with relocation + switch v.Aux.(type) { + default: + v.Fatalf("aux is of unknown type %T", v.Aux) + case *obj.LSym: + wantreg = "SB" + ssagen.AddAux(&p.From, v) + case *ir.Name: + wantreg = "SP" + ssagen.AddAux(&p.From, v) + case nil: + // No sym, just MOVW $off(SP), R + wantreg = "SP" + p.From.Offset = v.AuxInt + } + if reg := v.Args[0].RegName(); reg != wantreg { + v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg) + } + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpMIPSMOVBload, + ssa.OpMIPSMOVBUload, + ssa.OpMIPSMOVHload, + ssa.OpMIPSMOVHUload, + ssa.OpMIPSMOVWload, + ssa.OpMIPSMOVFload, + ssa.OpMIPSMOVDload: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpMIPSMOVBstore, + ssa.OpMIPSMOVHstore, + ssa.OpMIPSMOVWstore, + ssa.OpMIPSMOVFstore, + ssa.OpMIPSMOVDstore: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + case ssa.OpMIPSMOVBstorezero, + ssa.OpMIPSMOVHstorezero, + ssa.OpMIPSMOVWstorezero: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = mips.REGZERO + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + case ssa.OpMIPSMOVBreg, + ssa.OpMIPSMOVBUreg, + ssa.OpMIPSMOVHreg, + ssa.OpMIPSMOVHUreg: + a := v.Args[0] + for a.Op == ssa.OpCopy || a.Op == ssa.OpMIPSMOVWreg || a.Op == ssa.OpMIPSMOVWnop { + a = a.Args[0] + } + if a.Op == ssa.OpLoadReg { + t := a.Type + switch { + case v.Op == ssa.OpMIPSMOVBreg && t.Size() == 1 && t.IsSigned(), + v.Op == ssa.OpMIPSMOVBUreg && t.Size() == 1 && !t.IsSigned(), + v.Op == ssa.OpMIPSMOVHreg && t.Size() == 2 && t.IsSigned(), + v.Op == ssa.OpMIPSMOVHUreg && t.Size() == 2 && !t.IsSigned(): + // arg is a proper-typed load, already zero/sign-extended, don't extend again + if v.Reg() == v.Args[0].Reg() { + return + } + p := s.Prog(mips.AMOVW) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + return + default: + } + } + fallthrough + case ssa.OpMIPSMOVWF, + ssa.OpMIPSMOVWD, + ssa.OpMIPSTRUNCFW, + ssa.OpMIPSTRUNCDW, + ssa.OpMIPSMOVFD, + ssa.OpMIPSMOVDF, + ssa.OpMIPSMOVWfpgp, + ssa.OpMIPSMOVWgpfp, + ssa.OpMIPSNEGF, + ssa.OpMIPSNEGD, + ssa.OpMIPSABSD, + ssa.OpMIPSSQRTF, + ssa.OpMIPSSQRTD, + ssa.OpMIPSCLZ: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpMIPSNEG: + // SUB from REGZERO + p := s.Prog(mips.ASUBU) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.Reg = mips.REGZERO + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpMIPSLoweredZero: + // SUBU $4, R1 + // MOVW R0, 4(R1) + // ADDU $4, R1 + // BNE Rarg1, R1, -2(PC) + // arg1 is the address of the last element to zero + var sz int64 + var mov obj.As + switch { + case v.AuxInt%4 == 0: + sz = 4 + mov = mips.AMOVW + case v.AuxInt%2 == 0: + sz = 2 + mov = mips.AMOVH + default: + sz = 1 + mov = mips.AMOVB + } + p := s.Prog(mips.ASUBU) + p.From.Type = obj.TYPE_CONST + p.From.Offset = sz + p.To.Type = obj.TYPE_REG + p.To.Reg = mips.REG_R1 + p2 := s.Prog(mov) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = mips.REGZERO + p2.To.Type = obj.TYPE_MEM + p2.To.Reg = mips.REG_R1 + p2.To.Offset = sz + p3 := s.Prog(mips.AADDU) + p3.From.Type = obj.TYPE_CONST + p3.From.Offset = sz + p3.To.Type = obj.TYPE_REG + p3.To.Reg = mips.REG_R1 + p4 := s.Prog(mips.ABNE) + p4.From.Type = obj.TYPE_REG + p4.From.Reg = v.Args[1].Reg() + p4.Reg = mips.REG_R1 + p4.To.Type = obj.TYPE_BRANCH + p4.To.SetTarget(p2) + case ssa.OpMIPSLoweredMove: + // SUBU $4, R1 + // MOVW 4(R1), Rtmp + // MOVW Rtmp, (R2) + // ADDU $4, R1 + // ADDU $4, R2 + // BNE Rarg2, R1, -4(PC) + // arg2 is the address of the last element of src + var sz int64 + var mov obj.As + switch { + case v.AuxInt%4 == 0: + sz = 4 + mov = mips.AMOVW + case v.AuxInt%2 == 0: + sz = 2 + mov = mips.AMOVH + default: + sz = 1 + mov = mips.AMOVB + } + p := s.Prog(mips.ASUBU) + p.From.Type = obj.TYPE_CONST + p.From.Offset = sz + p.To.Type = obj.TYPE_REG + p.To.Reg = mips.REG_R1 + p2 := s.Prog(mov) + p2.From.Type = obj.TYPE_MEM + p2.From.Reg = mips.REG_R1 + p2.From.Offset = sz + p2.To.Type = obj.TYPE_REG + p2.To.Reg = mips.REGTMP + p3 := s.Prog(mov) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = mips.REGTMP + p3.To.Type = obj.TYPE_MEM + p3.To.Reg = mips.REG_R2 + p4 := s.Prog(mips.AADDU) + p4.From.Type = obj.TYPE_CONST + p4.From.Offset = sz + p4.To.Type = obj.TYPE_REG + p4.To.Reg = mips.REG_R1 + p5 := s.Prog(mips.AADDU) + p5.From.Type = obj.TYPE_CONST + p5.From.Offset = sz + p5.To.Type = obj.TYPE_REG + p5.To.Reg = mips.REG_R2 + p6 := s.Prog(mips.ABNE) + p6.From.Type = obj.TYPE_REG + p6.From.Reg = v.Args[2].Reg() + p6.Reg = mips.REG_R1 + p6.To.Type = obj.TYPE_BRANCH + p6.To.SetTarget(p2) + case ssa.OpMIPSCALLstatic, ssa.OpMIPSCALLclosure, ssa.OpMIPSCALLinter: + s.Call(v) + case ssa.OpMIPSCALLtail: + s.TailCall(v) + case ssa.OpMIPSLoweredWB: + p := s.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + // AuxInt encodes how many buffer entries we need. + p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1] + case ssa.OpMIPSLoweredPanicBoundsA, ssa.OpMIPSLoweredPanicBoundsB, ssa.OpMIPSLoweredPanicBoundsC: + p := s.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt] + s.UseArgs(8) // space used in callee args area by assembly stubs + case ssa.OpMIPSLoweredPanicExtendA, ssa.OpMIPSLoweredPanicExtendB, ssa.OpMIPSLoweredPanicExtendC: + p := s.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt] + s.UseArgs(12) // space used in callee args area by assembly stubs + case ssa.OpMIPSLoweredAtomicLoad8, + ssa.OpMIPSLoweredAtomicLoad32: + s.Prog(mips.ASYNC) + + var op obj.As + switch v.Op { + case ssa.OpMIPSLoweredAtomicLoad8: + op = mips.AMOVB + case ssa.OpMIPSLoweredAtomicLoad32: + op = mips.AMOVW + } + p := s.Prog(op) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + + s.Prog(mips.ASYNC) + case ssa.OpMIPSLoweredAtomicStore8, + ssa.OpMIPSLoweredAtomicStore32: + s.Prog(mips.ASYNC) + + var op obj.As + switch v.Op { + case ssa.OpMIPSLoweredAtomicStore8: + op = mips.AMOVB + case ssa.OpMIPSLoweredAtomicStore32: + op = mips.AMOVW + } + p := s.Prog(op) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + + s.Prog(mips.ASYNC) + case ssa.OpMIPSLoweredAtomicStorezero: + s.Prog(mips.ASYNC) + + p := s.Prog(mips.AMOVW) + p.From.Type = obj.TYPE_REG + p.From.Reg = mips.REGZERO + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + + s.Prog(mips.ASYNC) + case ssa.OpMIPSLoweredAtomicExchange: + // SYNC + // MOVW Rarg1, Rtmp + // LL (Rarg0), Rout + // SC Rtmp, (Rarg0) + // BEQ Rtmp, -3(PC) + // SYNC + s.Prog(mips.ASYNC) + + p := s.Prog(mips.AMOVW) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = mips.REGTMP + + p1 := s.Prog(mips.ALL) + p1.From.Type = obj.TYPE_MEM + p1.From.Reg = v.Args[0].Reg() + p1.To.Type = obj.TYPE_REG + p1.To.Reg = v.Reg0() + + p2 := s.Prog(mips.ASC) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = mips.REGTMP + p2.To.Type = obj.TYPE_MEM + p2.To.Reg = v.Args[0].Reg() + + p3 := s.Prog(mips.ABEQ) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = mips.REGTMP + p3.To.Type = obj.TYPE_BRANCH + p3.To.SetTarget(p) + + s.Prog(mips.ASYNC) + case ssa.OpMIPSLoweredAtomicAdd: + // SYNC + // LL (Rarg0), Rout + // ADDU Rarg1, Rout, Rtmp + // SC Rtmp, (Rarg0) + // BEQ Rtmp, -3(PC) + // SYNC + // ADDU Rarg1, Rout + s.Prog(mips.ASYNC) + + p := s.Prog(mips.ALL) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + + p1 := s.Prog(mips.AADDU) + p1.From.Type = obj.TYPE_REG + p1.From.Reg = v.Args[1].Reg() + p1.Reg = v.Reg0() + p1.To.Type = obj.TYPE_REG + p1.To.Reg = mips.REGTMP + + p2 := s.Prog(mips.ASC) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = mips.REGTMP + p2.To.Type = obj.TYPE_MEM + p2.To.Reg = v.Args[0].Reg() + + p3 := s.Prog(mips.ABEQ) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = mips.REGTMP + p3.To.Type = obj.TYPE_BRANCH + p3.To.SetTarget(p) + + s.Prog(mips.ASYNC) + + p4 := s.Prog(mips.AADDU) + p4.From.Type = obj.TYPE_REG + p4.From.Reg = v.Args[1].Reg() + p4.Reg = v.Reg0() + p4.To.Type = obj.TYPE_REG + p4.To.Reg = v.Reg0() + + case ssa.OpMIPSLoweredAtomicAddconst: + // SYNC + // LL (Rarg0), Rout + // ADDU $auxInt, Rout, Rtmp + // SC Rtmp, (Rarg0) + // BEQ Rtmp, -3(PC) + // SYNC + // ADDU $auxInt, Rout + s.Prog(mips.ASYNC) + + p := s.Prog(mips.ALL) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + + p1 := s.Prog(mips.AADDU) + p1.From.Type = obj.TYPE_CONST + p1.From.Offset = v.AuxInt + p1.Reg = v.Reg0() + p1.To.Type = obj.TYPE_REG + p1.To.Reg = mips.REGTMP + + p2 := s.Prog(mips.ASC) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = mips.REGTMP + p2.To.Type = obj.TYPE_MEM + p2.To.Reg = v.Args[0].Reg() + + p3 := s.Prog(mips.ABEQ) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = mips.REGTMP + p3.To.Type = obj.TYPE_BRANCH + p3.To.SetTarget(p) + + s.Prog(mips.ASYNC) + + p4 := s.Prog(mips.AADDU) + p4.From.Type = obj.TYPE_CONST + p4.From.Offset = v.AuxInt + p4.Reg = v.Reg0() + p4.To.Type = obj.TYPE_REG + p4.To.Reg = v.Reg0() + + case ssa.OpMIPSLoweredAtomicAnd, + ssa.OpMIPSLoweredAtomicOr: + // SYNC + // LL (Rarg0), Rtmp + // AND/OR Rarg1, Rtmp + // SC Rtmp, (Rarg0) + // BEQ Rtmp, -3(PC) + // SYNC + s.Prog(mips.ASYNC) + + p := s.Prog(mips.ALL) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = mips.REGTMP + + p1 := s.Prog(v.Op.Asm()) + p1.From.Type = obj.TYPE_REG + p1.From.Reg = v.Args[1].Reg() + p1.Reg = mips.REGTMP + p1.To.Type = obj.TYPE_REG + p1.To.Reg = mips.REGTMP + + p2 := s.Prog(mips.ASC) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = mips.REGTMP + p2.To.Type = obj.TYPE_MEM + p2.To.Reg = v.Args[0].Reg() + + p3 := s.Prog(mips.ABEQ) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = mips.REGTMP + p3.To.Type = obj.TYPE_BRANCH + p3.To.SetTarget(p) + + s.Prog(mips.ASYNC) + + case ssa.OpMIPSLoweredAtomicCas: + // MOVW $0, Rout + // SYNC + // LL (Rarg0), Rtmp + // BNE Rtmp, Rarg1, 4(PC) + // MOVW Rarg2, Rout + // SC Rout, (Rarg0) + // BEQ Rout, -4(PC) + // SYNC + p := s.Prog(mips.AMOVW) + p.From.Type = obj.TYPE_REG + p.From.Reg = mips.REGZERO + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + + s.Prog(mips.ASYNC) + + p1 := s.Prog(mips.ALL) + p1.From.Type = obj.TYPE_MEM + p1.From.Reg = v.Args[0].Reg() + p1.To.Type = obj.TYPE_REG + p1.To.Reg = mips.REGTMP + + p2 := s.Prog(mips.ABNE) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = v.Args[1].Reg() + p2.Reg = mips.REGTMP + p2.To.Type = obj.TYPE_BRANCH + + p3 := s.Prog(mips.AMOVW) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = v.Args[2].Reg() + p3.To.Type = obj.TYPE_REG + p3.To.Reg = v.Reg0() + + p4 := s.Prog(mips.ASC) + p4.From.Type = obj.TYPE_REG + p4.From.Reg = v.Reg0() + p4.To.Type = obj.TYPE_MEM + p4.To.Reg = v.Args[0].Reg() + + p5 := s.Prog(mips.ABEQ) + p5.From.Type = obj.TYPE_REG + p5.From.Reg = v.Reg0() + p5.To.Type = obj.TYPE_BRANCH + p5.To.SetTarget(p1) + + s.Prog(mips.ASYNC) + + p6 := s.Prog(obj.ANOP) + p2.To.SetTarget(p6) + + case ssa.OpMIPSLoweredNilCheck: + // Issue a load which will fault if arg is nil. + p := s.Prog(mips.AMOVB) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = mips.REGTMP + if logopt.Enabled() { + logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) + } + if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers + base.WarnfAt(v.Pos, "generated nil check") + } + case ssa.OpMIPSFPFlagTrue, + ssa.OpMIPSFPFlagFalse: + // MOVW $1, r + // CMOVF R0, r + + cmov := mips.ACMOVF + if v.Op == ssa.OpMIPSFPFlagFalse { + cmov = mips.ACMOVT + } + p := s.Prog(mips.AMOVW) + p.From.Type = obj.TYPE_CONST + p.From.Offset = 1 + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + p1 := s.Prog(cmov) + p1.From.Type = obj.TYPE_REG + p1.From.Reg = mips.REGZERO + p1.To.Type = obj.TYPE_REG + p1.To.Reg = v.Reg() + + case ssa.OpMIPSLoweredGetClosurePtr: + // Closure pointer is R22 (mips.REGCTXT). + ssagen.CheckLoweredGetClosurePtr(v) + case ssa.OpMIPSLoweredGetCallerSP: + // caller's SP is FixedFrameSize below the address of the first arg + p := s.Prog(mips.AMOVW) + p.From.Type = obj.TYPE_ADDR + p.From.Offset = -base.Ctxt.Arch.FixedFrameSize + p.From.Name = obj.NAME_PARAM + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpMIPSLoweredGetCallerPC: + p := s.Prog(obj.AGETCALLERPC) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpClobber, ssa.OpClobberReg: + // TODO: implement for clobberdead experiment. Nop is ok for now. + default: + v.Fatalf("genValue not implemented: %s", v.LongString()) + } +} + +var blockJump = map[ssa.BlockKind]struct { + asm, invasm obj.As +}{ + ssa.BlockMIPSEQ: {mips.ABEQ, mips.ABNE}, + ssa.BlockMIPSNE: {mips.ABNE, mips.ABEQ}, + ssa.BlockMIPSLTZ: {mips.ABLTZ, mips.ABGEZ}, + ssa.BlockMIPSGEZ: {mips.ABGEZ, mips.ABLTZ}, + ssa.BlockMIPSLEZ: {mips.ABLEZ, mips.ABGTZ}, + ssa.BlockMIPSGTZ: {mips.ABGTZ, mips.ABLEZ}, + ssa.BlockMIPSFPT: {mips.ABFPT, mips.ABFPF}, + ssa.BlockMIPSFPF: {mips.ABFPF, mips.ABFPT}, +} + +func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { + switch b.Kind { + case ssa.BlockPlain: + if b.Succs[0].Block() != next { + p := s.Prog(obj.AJMP) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) + } + case ssa.BlockDefer: + // defer returns in R1: + // 0 if we should continue executing + // 1 if we should jump to deferreturn call + p := s.Prog(mips.ABNE) + p.From.Type = obj.TYPE_REG + p.From.Reg = mips.REGZERO + p.Reg = mips.REG_R1 + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()}) + if b.Succs[0].Block() != next { + p := s.Prog(obj.AJMP) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) + } + case ssa.BlockExit, ssa.BlockRetJmp: + case ssa.BlockRet: + s.Prog(obj.ARET) + case ssa.BlockMIPSEQ, ssa.BlockMIPSNE, + ssa.BlockMIPSLTZ, ssa.BlockMIPSGEZ, + ssa.BlockMIPSLEZ, ssa.BlockMIPSGTZ, + ssa.BlockMIPSFPT, ssa.BlockMIPSFPF: + jmp := blockJump[b.Kind] + var p *obj.Prog + switch next { + case b.Succs[0].Block(): + p = s.Br(jmp.invasm, b.Succs[1].Block()) + case b.Succs[1].Block(): + p = s.Br(jmp.asm, b.Succs[0].Block()) + default: + if b.Likely != ssa.BranchUnlikely { + p = s.Br(jmp.asm, b.Succs[0].Block()) + s.Br(obj.AJMP, b.Succs[1].Block()) + } else { + p = s.Br(jmp.invasm, b.Succs[1].Block()) + s.Br(obj.AJMP, b.Succs[0].Block()) + } + } + if !b.Controls[0].Type.IsFlags() { + p.From.Type = obj.TYPE_REG + p.From.Reg = b.Controls[0].Reg() + } + default: + b.Fatalf("branch not implemented: %s", b.LongString()) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/mips64/galign.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/mips64/galign.go new file mode 100644 index 0000000000000000000000000000000000000000..412bc71aab270d97befa5dd3296ad381db180dcc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/mips64/galign.go @@ -0,0 +1,28 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mips64 + +import ( + "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" + "cmd/internal/obj/mips" + "internal/buildcfg" +) + +func Init(arch *ssagen.ArchInfo) { + arch.LinkArch = &mips.Linkmips64 + if buildcfg.GOARCH == "mips64le" { + arch.LinkArch = &mips.Linkmips64le + } + arch.REGSP = mips.REGSP + arch.MAXWIDTH = 1 << 50 + arch.SoftFloat = buildcfg.GOMIPS64 == "softfloat" + arch.ZeroRange = zerorange + arch.Ginsnop = ginsnop + + arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {} + arch.SSAGenValue = ssaGenValue + arch.SSAGenBlock = ssaGenBlock +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/mips64/ggen.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/mips64/ggen.go new file mode 100644 index 0000000000000000000000000000000000000000..5f3f3e64d9cc1a2bc4592d0a398987a4ccecf10b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/mips64/ggen.go @@ -0,0 +1,55 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mips64 + +import ( + "cmd/compile/internal/ir" + "cmd/compile/internal/objw" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/obj/mips" +) + +func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { + if cnt == 0 { + return p + } + if cnt < int64(4*types.PtrSize) { + for i := int64(0); i < cnt; i += int64(types.PtrSize) { + p = pp.Append(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, 8+off+i) + } + } else if cnt <= int64(128*types.PtrSize) { + p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0) + p.Reg = mips.REGSP + p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ir.Syms.Duffzero + p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize)) + } else { + // ADDV $(8+frame+lo-8), SP, r1 + // ADDV $cnt, r1, r2 + // loop: + // MOVV R0, (Widthptr)r1 + // ADDV $Widthptr, r1 + // BNE r1, r2, loop + p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0) + p.Reg = mips.REGSP + p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0) + p.Reg = mips.REGRT1 + p = pp.Append(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize)) + p1 := p + p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0) + p = pp.Append(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0) + p.Reg = mips.REGRT2 + p.To.SetTarget(p1) + } + + return p +} + +func ginsnop(pp *objw.Progs) *obj.Prog { + p := pp.Prog(mips.ANOOP) + return p +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/mips64/ssa.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/mips64/ssa.go new file mode 100644 index 0000000000000000000000000000000000000000..0c0dc6e4955c1e7d6cef033564f22267da14d6e3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/mips64/ssa.go @@ -0,0 +1,889 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mips64 + +import ( + "math" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/logopt" + "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/obj/mips" +) + +// isFPreg reports whether r is an FP register. +func isFPreg(r int16) bool { + return mips.REG_F0 <= r && r <= mips.REG_F31 +} + +// isHILO reports whether r is HI or LO register. +func isHILO(r int16) bool { + return r == mips.REG_HI || r == mips.REG_LO +} + +// loadByType returns the load instruction of the given type. +func loadByType(t *types.Type, r int16) obj.As { + if isFPreg(r) { + if t.Size() == 4 { // float32 or int32 + return mips.AMOVF + } else { // float64 or int64 + return mips.AMOVD + } + } else { + switch t.Size() { + case 1: + if t.IsSigned() { + return mips.AMOVB + } else { + return mips.AMOVBU + } + case 2: + if t.IsSigned() { + return mips.AMOVH + } else { + return mips.AMOVHU + } + case 4: + if t.IsSigned() { + return mips.AMOVW + } else { + return mips.AMOVWU + } + case 8: + return mips.AMOVV + } + } + panic("bad load type") +} + +// storeByType returns the store instruction of the given type. +func storeByType(t *types.Type, r int16) obj.As { + if isFPreg(r) { + if t.Size() == 4 { // float32 or int32 + return mips.AMOVF + } else { // float64 or int64 + return mips.AMOVD + } + } else { + switch t.Size() { + case 1: + return mips.AMOVB + case 2: + return mips.AMOVH + case 4: + return mips.AMOVW + case 8: + return mips.AMOVV + } + } + panic("bad store type") +} + +func ssaGenValue(s *ssagen.State, v *ssa.Value) { + switch v.Op { + case ssa.OpCopy, ssa.OpMIPS64MOVVreg: + if v.Type.IsMemory() { + return + } + x := v.Args[0].Reg() + y := v.Reg() + if x == y { + return + } + as := mips.AMOVV + if isFPreg(x) && isFPreg(y) { + as = mips.AMOVD + } + p := s.Prog(as) + p.From.Type = obj.TYPE_REG + p.From.Reg = x + p.To.Type = obj.TYPE_REG + p.To.Reg = y + if isHILO(x) && isHILO(y) || isHILO(x) && isFPreg(y) || isFPreg(x) && isHILO(y) { + // cannot move between special registers, use TMP as intermediate + p.To.Reg = mips.REGTMP + p = s.Prog(mips.AMOVV) + p.From.Type = obj.TYPE_REG + p.From.Reg = mips.REGTMP + p.To.Type = obj.TYPE_REG + p.To.Reg = y + } + case ssa.OpMIPS64MOVVnop: + // nothing to do + case ssa.OpLoadReg: + if v.Type.IsFlags() { + v.Fatalf("load flags not implemented: %v", v.LongString()) + return + } + r := v.Reg() + p := s.Prog(loadByType(v.Type, r)) + ssagen.AddrAuto(&p.From, v.Args[0]) + p.To.Type = obj.TYPE_REG + p.To.Reg = r + if isHILO(r) { + // cannot directly load, load to TMP and move + p.To.Reg = mips.REGTMP + p = s.Prog(mips.AMOVV) + p.From.Type = obj.TYPE_REG + p.From.Reg = mips.REGTMP + p.To.Type = obj.TYPE_REG + p.To.Reg = r + } + case ssa.OpStoreReg: + if v.Type.IsFlags() { + v.Fatalf("store flags not implemented: %v", v.LongString()) + return + } + r := v.Args[0].Reg() + if isHILO(r) { + // cannot directly store, move to TMP and store + p := s.Prog(mips.AMOVV) + p.From.Type = obj.TYPE_REG + p.From.Reg = r + p.To.Type = obj.TYPE_REG + p.To.Reg = mips.REGTMP + r = mips.REGTMP + } + p := s.Prog(storeByType(v.Type, r)) + p.From.Type = obj.TYPE_REG + p.From.Reg = r + ssagen.AddrAuto(&p.To, v) + case ssa.OpMIPS64ADDV, + ssa.OpMIPS64SUBV, + ssa.OpMIPS64AND, + ssa.OpMIPS64OR, + ssa.OpMIPS64XOR, + ssa.OpMIPS64NOR, + ssa.OpMIPS64SLLV, + ssa.OpMIPS64SRLV, + ssa.OpMIPS64SRAV, + ssa.OpMIPS64ADDF, + ssa.OpMIPS64ADDD, + ssa.OpMIPS64SUBF, + ssa.OpMIPS64SUBD, + ssa.OpMIPS64MULF, + ssa.OpMIPS64MULD, + ssa.OpMIPS64DIVF, + ssa.OpMIPS64DIVD: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpMIPS64SGT, + ssa.OpMIPS64SGTU: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpMIPS64ADDVconst, + ssa.OpMIPS64SUBVconst, + ssa.OpMIPS64ANDconst, + ssa.OpMIPS64ORconst, + ssa.OpMIPS64XORconst, + ssa.OpMIPS64NORconst, + ssa.OpMIPS64SLLVconst, + ssa.OpMIPS64SRLVconst, + ssa.OpMIPS64SRAVconst, + ssa.OpMIPS64SGTconst, + ssa.OpMIPS64SGTUconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpMIPS64MULV, + ssa.OpMIPS64MULVU, + ssa.OpMIPS64DIVV, + ssa.OpMIPS64DIVVU: + // result in hi,lo + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.Reg = v.Args[0].Reg() + case ssa.OpMIPS64MOVVconst: + r := v.Reg() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = r + if isFPreg(r) || isHILO(r) { + // cannot move into FP or special registers, use TMP as intermediate + p.To.Reg = mips.REGTMP + p = s.Prog(mips.AMOVV) + p.From.Type = obj.TYPE_REG + p.From.Reg = mips.REGTMP + p.To.Type = obj.TYPE_REG + p.To.Reg = r + } + case ssa.OpMIPS64MOVFconst, + ssa.OpMIPS64MOVDconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_FCONST + p.From.Val = math.Float64frombits(uint64(v.AuxInt)) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpMIPS64CMPEQF, + ssa.OpMIPS64CMPEQD, + ssa.OpMIPS64CMPGEF, + ssa.OpMIPS64CMPGED, + ssa.OpMIPS64CMPGTF, + ssa.OpMIPS64CMPGTD: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.Reg = v.Args[1].Reg() + case ssa.OpMIPS64MOVVaddr: + p := s.Prog(mips.AMOVV) + p.From.Type = obj.TYPE_ADDR + p.From.Reg = v.Args[0].Reg() + var wantreg string + // MOVV $sym+off(base), R + // the assembler expands it as the following: + // - base is SP: add constant offset to SP (R29) + // when constant is large, tmp register (R23) may be used + // - base is SB: load external address with relocation + switch v.Aux.(type) { + default: + v.Fatalf("aux is of unknown type %T", v.Aux) + case *obj.LSym: + wantreg = "SB" + ssagen.AddAux(&p.From, v) + case *ir.Name: + wantreg = "SP" + ssagen.AddAux(&p.From, v) + case nil: + // No sym, just MOVV $off(SP), R + wantreg = "SP" + p.From.Offset = v.AuxInt + } + if reg := v.Args[0].RegName(); reg != wantreg { + v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg) + } + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpMIPS64MOVBload, + ssa.OpMIPS64MOVBUload, + ssa.OpMIPS64MOVHload, + ssa.OpMIPS64MOVHUload, + ssa.OpMIPS64MOVWload, + ssa.OpMIPS64MOVWUload, + ssa.OpMIPS64MOVVload, + ssa.OpMIPS64MOVFload, + ssa.OpMIPS64MOVDload: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpMIPS64MOVBstore, + ssa.OpMIPS64MOVHstore, + ssa.OpMIPS64MOVWstore, + ssa.OpMIPS64MOVVstore, + ssa.OpMIPS64MOVFstore, + ssa.OpMIPS64MOVDstore: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + case ssa.OpMIPS64MOVBstorezero, + ssa.OpMIPS64MOVHstorezero, + ssa.OpMIPS64MOVWstorezero, + ssa.OpMIPS64MOVVstorezero: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = mips.REGZERO + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + case ssa.OpMIPS64MOVBreg, + ssa.OpMIPS64MOVBUreg, + ssa.OpMIPS64MOVHreg, + ssa.OpMIPS64MOVHUreg, + ssa.OpMIPS64MOVWreg, + ssa.OpMIPS64MOVWUreg: + a := v.Args[0] + for a.Op == ssa.OpCopy || a.Op == ssa.OpMIPS64MOVVreg { + a = a.Args[0] + } + if a.Op == ssa.OpLoadReg && mips.REG_R0 <= a.Reg() && a.Reg() <= mips.REG_R31 { + // LoadReg from a narrower type does an extension, except loading + // to a floating point register. So only eliminate the extension + // if it is loaded to an integer register. + t := a.Type + switch { + case v.Op == ssa.OpMIPS64MOVBreg && t.Size() == 1 && t.IsSigned(), + v.Op == ssa.OpMIPS64MOVBUreg && t.Size() == 1 && !t.IsSigned(), + v.Op == ssa.OpMIPS64MOVHreg && t.Size() == 2 && t.IsSigned(), + v.Op == ssa.OpMIPS64MOVHUreg && t.Size() == 2 && !t.IsSigned(), + v.Op == ssa.OpMIPS64MOVWreg && t.Size() == 4 && t.IsSigned(), + v.Op == ssa.OpMIPS64MOVWUreg && t.Size() == 4 && !t.IsSigned(): + // arg is a proper-typed load, already zero/sign-extended, don't extend again + if v.Reg() == v.Args[0].Reg() { + return + } + p := s.Prog(mips.AMOVV) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + return + default: + } + } + fallthrough + case ssa.OpMIPS64MOVWF, + ssa.OpMIPS64MOVWD, + ssa.OpMIPS64TRUNCFW, + ssa.OpMIPS64TRUNCDW, + ssa.OpMIPS64MOVVF, + ssa.OpMIPS64MOVVD, + ssa.OpMIPS64TRUNCFV, + ssa.OpMIPS64TRUNCDV, + ssa.OpMIPS64MOVFD, + ssa.OpMIPS64MOVDF, + ssa.OpMIPS64MOVWfpgp, + ssa.OpMIPS64MOVWgpfp, + ssa.OpMIPS64MOVVfpgp, + ssa.OpMIPS64MOVVgpfp, + ssa.OpMIPS64NEGF, + ssa.OpMIPS64NEGD, + ssa.OpMIPS64ABSD, + ssa.OpMIPS64SQRTF, + ssa.OpMIPS64SQRTD: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpMIPS64NEGV: + // SUB from REGZERO + p := s.Prog(mips.ASUBVU) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.Reg = mips.REGZERO + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpMIPS64DUFFZERO: + // runtime.duffzero expects start address - 8 in R1 + p := s.Prog(mips.ASUBVU) + p.From.Type = obj.TYPE_CONST + p.From.Offset = 8 + p.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = mips.REG_R1 + p = s.Prog(obj.ADUFFZERO) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ir.Syms.Duffzero + p.To.Offset = v.AuxInt + case ssa.OpMIPS64LoweredZero: + // SUBV $8, R1 + // MOVV R0, 8(R1) + // ADDV $8, R1 + // BNE Rarg1, R1, -2(PC) + // arg1 is the address of the last element to zero + var sz int64 + var mov obj.As + switch { + case v.AuxInt%8 == 0: + sz = 8 + mov = mips.AMOVV + case v.AuxInt%4 == 0: + sz = 4 + mov = mips.AMOVW + case v.AuxInt%2 == 0: + sz = 2 + mov = mips.AMOVH + default: + sz = 1 + mov = mips.AMOVB + } + p := s.Prog(mips.ASUBVU) + p.From.Type = obj.TYPE_CONST + p.From.Offset = sz + p.To.Type = obj.TYPE_REG + p.To.Reg = mips.REG_R1 + p2 := s.Prog(mov) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = mips.REGZERO + p2.To.Type = obj.TYPE_MEM + p2.To.Reg = mips.REG_R1 + p2.To.Offset = sz + p3 := s.Prog(mips.AADDVU) + p3.From.Type = obj.TYPE_CONST + p3.From.Offset = sz + p3.To.Type = obj.TYPE_REG + p3.To.Reg = mips.REG_R1 + p4 := s.Prog(mips.ABNE) + p4.From.Type = obj.TYPE_REG + p4.From.Reg = v.Args[1].Reg() + p4.Reg = mips.REG_R1 + p4.To.Type = obj.TYPE_BRANCH + p4.To.SetTarget(p2) + case ssa.OpMIPS64DUFFCOPY: + p := s.Prog(obj.ADUFFCOPY) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ir.Syms.Duffcopy + p.To.Offset = v.AuxInt + case ssa.OpMIPS64LoweredMove: + // SUBV $8, R1 + // MOVV 8(R1), Rtmp + // MOVV Rtmp, (R2) + // ADDV $8, R1 + // ADDV $8, R2 + // BNE Rarg2, R1, -4(PC) + // arg2 is the address of the last element of src + var sz int64 + var mov obj.As + switch { + case v.AuxInt%8 == 0: + sz = 8 + mov = mips.AMOVV + case v.AuxInt%4 == 0: + sz = 4 + mov = mips.AMOVW + case v.AuxInt%2 == 0: + sz = 2 + mov = mips.AMOVH + default: + sz = 1 + mov = mips.AMOVB + } + p := s.Prog(mips.ASUBVU) + p.From.Type = obj.TYPE_CONST + p.From.Offset = sz + p.To.Type = obj.TYPE_REG + p.To.Reg = mips.REG_R1 + p2 := s.Prog(mov) + p2.From.Type = obj.TYPE_MEM + p2.From.Reg = mips.REG_R1 + p2.From.Offset = sz + p2.To.Type = obj.TYPE_REG + p2.To.Reg = mips.REGTMP + p3 := s.Prog(mov) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = mips.REGTMP + p3.To.Type = obj.TYPE_MEM + p3.To.Reg = mips.REG_R2 + p4 := s.Prog(mips.AADDVU) + p4.From.Type = obj.TYPE_CONST + p4.From.Offset = sz + p4.To.Type = obj.TYPE_REG + p4.To.Reg = mips.REG_R1 + p5 := s.Prog(mips.AADDVU) + p5.From.Type = obj.TYPE_CONST + p5.From.Offset = sz + p5.To.Type = obj.TYPE_REG + p5.To.Reg = mips.REG_R2 + p6 := s.Prog(mips.ABNE) + p6.From.Type = obj.TYPE_REG + p6.From.Reg = v.Args[2].Reg() + p6.Reg = mips.REG_R1 + p6.To.Type = obj.TYPE_BRANCH + p6.To.SetTarget(p2) + case ssa.OpMIPS64CALLstatic, ssa.OpMIPS64CALLclosure, ssa.OpMIPS64CALLinter: + s.Call(v) + case ssa.OpMIPS64CALLtail: + s.TailCall(v) + case ssa.OpMIPS64LoweredWB: + p := s.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + // AuxInt encodes how many buffer entries we need. + p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1] + case ssa.OpMIPS64LoweredPanicBoundsA, ssa.OpMIPS64LoweredPanicBoundsB, ssa.OpMIPS64LoweredPanicBoundsC: + p := s.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt] + s.UseArgs(16) // space used in callee args area by assembly stubs + case ssa.OpMIPS64LoweredAtomicLoad8, ssa.OpMIPS64LoweredAtomicLoad32, ssa.OpMIPS64LoweredAtomicLoad64: + as := mips.AMOVV + switch v.Op { + case ssa.OpMIPS64LoweredAtomicLoad8: + as = mips.AMOVB + case ssa.OpMIPS64LoweredAtomicLoad32: + as = mips.AMOVW + } + s.Prog(mips.ASYNC) + p := s.Prog(as) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + s.Prog(mips.ASYNC) + case ssa.OpMIPS64LoweredAtomicStore8, ssa.OpMIPS64LoweredAtomicStore32, ssa.OpMIPS64LoweredAtomicStore64: + as := mips.AMOVV + switch v.Op { + case ssa.OpMIPS64LoweredAtomicStore8: + as = mips.AMOVB + case ssa.OpMIPS64LoweredAtomicStore32: + as = mips.AMOVW + } + s.Prog(mips.ASYNC) + p := s.Prog(as) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + s.Prog(mips.ASYNC) + case ssa.OpMIPS64LoweredAtomicStorezero32, ssa.OpMIPS64LoweredAtomicStorezero64: + as := mips.AMOVV + if v.Op == ssa.OpMIPS64LoweredAtomicStorezero32 { + as = mips.AMOVW + } + s.Prog(mips.ASYNC) + p := s.Prog(as) + p.From.Type = obj.TYPE_REG + p.From.Reg = mips.REGZERO + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + s.Prog(mips.ASYNC) + case ssa.OpMIPS64LoweredAtomicExchange32, ssa.OpMIPS64LoweredAtomicExchange64: + // SYNC + // MOVV Rarg1, Rtmp + // LL (Rarg0), Rout + // SC Rtmp, (Rarg0) + // BEQ Rtmp, -3(PC) + // SYNC + ll := mips.ALLV + sc := mips.ASCV + if v.Op == ssa.OpMIPS64LoweredAtomicExchange32 { + ll = mips.ALL + sc = mips.ASC + } + s.Prog(mips.ASYNC) + p := s.Prog(mips.AMOVV) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = mips.REGTMP + p1 := s.Prog(ll) + p1.From.Type = obj.TYPE_MEM + p1.From.Reg = v.Args[0].Reg() + p1.To.Type = obj.TYPE_REG + p1.To.Reg = v.Reg0() + p2 := s.Prog(sc) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = mips.REGTMP + p2.To.Type = obj.TYPE_MEM + p2.To.Reg = v.Args[0].Reg() + p3 := s.Prog(mips.ABEQ) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = mips.REGTMP + p3.To.Type = obj.TYPE_BRANCH + p3.To.SetTarget(p) + s.Prog(mips.ASYNC) + case ssa.OpMIPS64LoweredAtomicAdd32, ssa.OpMIPS64LoweredAtomicAdd64: + // SYNC + // LL (Rarg0), Rout + // ADDV Rarg1, Rout, Rtmp + // SC Rtmp, (Rarg0) + // BEQ Rtmp, -3(PC) + // SYNC + // ADDV Rarg1, Rout + ll := mips.ALLV + sc := mips.ASCV + if v.Op == ssa.OpMIPS64LoweredAtomicAdd32 { + ll = mips.ALL + sc = mips.ASC + } + s.Prog(mips.ASYNC) + p := s.Prog(ll) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + p1 := s.Prog(mips.AADDVU) + p1.From.Type = obj.TYPE_REG + p1.From.Reg = v.Args[1].Reg() + p1.Reg = v.Reg0() + p1.To.Type = obj.TYPE_REG + p1.To.Reg = mips.REGTMP + p2 := s.Prog(sc) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = mips.REGTMP + p2.To.Type = obj.TYPE_MEM + p2.To.Reg = v.Args[0].Reg() + p3 := s.Prog(mips.ABEQ) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = mips.REGTMP + p3.To.Type = obj.TYPE_BRANCH + p3.To.SetTarget(p) + s.Prog(mips.ASYNC) + p4 := s.Prog(mips.AADDVU) + p4.From.Type = obj.TYPE_REG + p4.From.Reg = v.Args[1].Reg() + p4.Reg = v.Reg0() + p4.To.Type = obj.TYPE_REG + p4.To.Reg = v.Reg0() + case ssa.OpMIPS64LoweredAtomicAddconst32, ssa.OpMIPS64LoweredAtomicAddconst64: + // SYNC + // LL (Rarg0), Rout + // ADDV $auxint, Rout, Rtmp + // SC Rtmp, (Rarg0) + // BEQ Rtmp, -3(PC) + // SYNC + // ADDV $auxint, Rout + ll := mips.ALLV + sc := mips.ASCV + if v.Op == ssa.OpMIPS64LoweredAtomicAddconst32 { + ll = mips.ALL + sc = mips.ASC + } + s.Prog(mips.ASYNC) + p := s.Prog(ll) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + p1 := s.Prog(mips.AADDVU) + p1.From.Type = obj.TYPE_CONST + p1.From.Offset = v.AuxInt + p1.Reg = v.Reg0() + p1.To.Type = obj.TYPE_REG + p1.To.Reg = mips.REGTMP + p2 := s.Prog(sc) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = mips.REGTMP + p2.To.Type = obj.TYPE_MEM + p2.To.Reg = v.Args[0].Reg() + p3 := s.Prog(mips.ABEQ) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = mips.REGTMP + p3.To.Type = obj.TYPE_BRANCH + p3.To.SetTarget(p) + s.Prog(mips.ASYNC) + p4 := s.Prog(mips.AADDVU) + p4.From.Type = obj.TYPE_CONST + p4.From.Offset = v.AuxInt + p4.Reg = v.Reg0() + p4.To.Type = obj.TYPE_REG + p4.To.Reg = v.Reg0() + case ssa.OpMIPS64LoweredAtomicAnd32, + ssa.OpMIPS64LoweredAtomicOr32: + // SYNC + // LL (Rarg0), Rtmp + // AND/OR Rarg1, Rtmp + // SC Rtmp, (Rarg0) + // BEQ Rtmp, -3(PC) + // SYNC + s.Prog(mips.ASYNC) + + p := s.Prog(mips.ALL) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = mips.REGTMP + + p1 := s.Prog(v.Op.Asm()) + p1.From.Type = obj.TYPE_REG + p1.From.Reg = v.Args[1].Reg() + p1.Reg = mips.REGTMP + p1.To.Type = obj.TYPE_REG + p1.To.Reg = mips.REGTMP + + p2 := s.Prog(mips.ASC) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = mips.REGTMP + p2.To.Type = obj.TYPE_MEM + p2.To.Reg = v.Args[0].Reg() + + p3 := s.Prog(mips.ABEQ) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = mips.REGTMP + p3.To.Type = obj.TYPE_BRANCH + p3.To.SetTarget(p) + + s.Prog(mips.ASYNC) + + case ssa.OpMIPS64LoweredAtomicCas32, ssa.OpMIPS64LoweredAtomicCas64: + // MOVV $0, Rout + // SYNC + // LL (Rarg0), Rtmp + // BNE Rtmp, Rarg1, 4(PC) + // MOVV Rarg2, Rout + // SC Rout, (Rarg0) + // BEQ Rout, -4(PC) + // SYNC + ll := mips.ALLV + sc := mips.ASCV + if v.Op == ssa.OpMIPS64LoweredAtomicCas32 { + ll = mips.ALL + sc = mips.ASC + } + p := s.Prog(mips.AMOVV) + p.From.Type = obj.TYPE_REG + p.From.Reg = mips.REGZERO + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + s.Prog(mips.ASYNC) + p1 := s.Prog(ll) + p1.From.Type = obj.TYPE_MEM + p1.From.Reg = v.Args[0].Reg() + p1.To.Type = obj.TYPE_REG + p1.To.Reg = mips.REGTMP + p2 := s.Prog(mips.ABNE) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = v.Args[1].Reg() + p2.Reg = mips.REGTMP + p2.To.Type = obj.TYPE_BRANCH + p3 := s.Prog(mips.AMOVV) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = v.Args[2].Reg() + p3.To.Type = obj.TYPE_REG + p3.To.Reg = v.Reg0() + p4 := s.Prog(sc) + p4.From.Type = obj.TYPE_REG + p4.From.Reg = v.Reg0() + p4.To.Type = obj.TYPE_MEM + p4.To.Reg = v.Args[0].Reg() + p5 := s.Prog(mips.ABEQ) + p5.From.Type = obj.TYPE_REG + p5.From.Reg = v.Reg0() + p5.To.Type = obj.TYPE_BRANCH + p5.To.SetTarget(p1) + p6 := s.Prog(mips.ASYNC) + p2.To.SetTarget(p6) + case ssa.OpMIPS64LoweredNilCheck: + // Issue a load which will fault if arg is nil. + p := s.Prog(mips.AMOVB) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = mips.REGTMP + if logopt.Enabled() { + logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) + } + if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers + base.WarnfAt(v.Pos, "generated nil check") + } + case ssa.OpMIPS64FPFlagTrue, + ssa.OpMIPS64FPFlagFalse: + // MOVV $0, r + // BFPF 2(PC) + // MOVV $1, r + branch := mips.ABFPF + if v.Op == ssa.OpMIPS64FPFlagFalse { + branch = mips.ABFPT + } + p := s.Prog(mips.AMOVV) + p.From.Type = obj.TYPE_REG + p.From.Reg = mips.REGZERO + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + p2 := s.Prog(branch) + p2.To.Type = obj.TYPE_BRANCH + p3 := s.Prog(mips.AMOVV) + p3.From.Type = obj.TYPE_CONST + p3.From.Offset = 1 + p3.To.Type = obj.TYPE_REG + p3.To.Reg = v.Reg() + p4 := s.Prog(obj.ANOP) // not a machine instruction, for branch to land + p2.To.SetTarget(p4) + case ssa.OpMIPS64LoweredGetClosurePtr: + // Closure pointer is R22 (mips.REGCTXT). + ssagen.CheckLoweredGetClosurePtr(v) + case ssa.OpMIPS64LoweredGetCallerSP: + // caller's SP is FixedFrameSize below the address of the first arg + p := s.Prog(mips.AMOVV) + p.From.Type = obj.TYPE_ADDR + p.From.Offset = -base.Ctxt.Arch.FixedFrameSize + p.From.Name = obj.NAME_PARAM + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpMIPS64LoweredGetCallerPC: + p := s.Prog(obj.AGETCALLERPC) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpClobber, ssa.OpClobberReg: + // TODO: implement for clobberdead experiment. Nop is ok for now. + default: + v.Fatalf("genValue not implemented: %s", v.LongString()) + } +} + +var blockJump = map[ssa.BlockKind]struct { + asm, invasm obj.As +}{ + ssa.BlockMIPS64EQ: {mips.ABEQ, mips.ABNE}, + ssa.BlockMIPS64NE: {mips.ABNE, mips.ABEQ}, + ssa.BlockMIPS64LTZ: {mips.ABLTZ, mips.ABGEZ}, + ssa.BlockMIPS64GEZ: {mips.ABGEZ, mips.ABLTZ}, + ssa.BlockMIPS64LEZ: {mips.ABLEZ, mips.ABGTZ}, + ssa.BlockMIPS64GTZ: {mips.ABGTZ, mips.ABLEZ}, + ssa.BlockMIPS64FPT: {mips.ABFPT, mips.ABFPF}, + ssa.BlockMIPS64FPF: {mips.ABFPF, mips.ABFPT}, +} + +func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { + switch b.Kind { + case ssa.BlockPlain: + if b.Succs[0].Block() != next { + p := s.Prog(obj.AJMP) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) + } + case ssa.BlockDefer: + // defer returns in R1: + // 0 if we should continue executing + // 1 if we should jump to deferreturn call + p := s.Prog(mips.ABNE) + p.From.Type = obj.TYPE_REG + p.From.Reg = mips.REGZERO + p.Reg = mips.REG_R1 + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()}) + if b.Succs[0].Block() != next { + p := s.Prog(obj.AJMP) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) + } + case ssa.BlockExit, ssa.BlockRetJmp: + case ssa.BlockRet: + s.Prog(obj.ARET) + case ssa.BlockMIPS64EQ, ssa.BlockMIPS64NE, + ssa.BlockMIPS64LTZ, ssa.BlockMIPS64GEZ, + ssa.BlockMIPS64LEZ, ssa.BlockMIPS64GTZ, + ssa.BlockMIPS64FPT, ssa.BlockMIPS64FPF: + jmp := blockJump[b.Kind] + var p *obj.Prog + switch next { + case b.Succs[0].Block(): + p = s.Br(jmp.invasm, b.Succs[1].Block()) + case b.Succs[1].Block(): + p = s.Br(jmp.asm, b.Succs[0].Block()) + default: + if b.Likely != ssa.BranchUnlikely { + p = s.Br(jmp.asm, b.Succs[0].Block()) + s.Br(obj.AJMP, b.Succs[1].Block()) + } else { + p = s.Br(jmp.invasm, b.Succs[1].Block()) + s.Br(obj.AJMP, b.Succs[0].Block()) + } + } + if !b.Controls[0].Type.IsFlags() { + p.From.Type = obj.TYPE_REG + p.From.Reg = b.Controls[0].Reg() + } + default: + b.Fatalf("branch not implemented: %s", b.LongString()) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/codes.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/codes.go new file mode 100644 index 0000000000000000000000000000000000000000..8bdbfc9a8800b8aa56a9abc3fc6936bdf55c8693 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/codes.go @@ -0,0 +1,91 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package noder + +import "internal/pkgbits" + +// A codeStmt distinguishes among statement encodings. +type codeStmt int + +func (c codeStmt) Marker() pkgbits.SyncMarker { return pkgbits.SyncStmt1 } +func (c codeStmt) Value() int { return int(c) } + +const ( + stmtEnd codeStmt = iota + stmtLabel + stmtBlock + stmtExpr + stmtSend + stmtAssign + stmtAssignOp + stmtIncDec + stmtBranch + stmtCall + stmtReturn + stmtIf + stmtFor + stmtSwitch + stmtSelect +) + +// A codeExpr distinguishes among expression encodings. +type codeExpr int + +func (c codeExpr) Marker() pkgbits.SyncMarker { return pkgbits.SyncExpr } +func (c codeExpr) Value() int { return int(c) } + +// TODO(mdempsky): Split expr into addr, for lvalues. +const ( + exprConst codeExpr = iota + exprLocal // local variable + exprGlobal // global variable or function + exprCompLit + exprFuncLit + exprFieldVal + exprMethodVal + exprMethodExpr + exprIndex + exprSlice + exprAssert + exprUnaryOp + exprBinaryOp + exprCall + exprConvert + exprNew + exprMake + exprSizeof + exprAlignof + exprOffsetof + exprZero + exprFuncInst + exprRecv + exprReshape + exprRuntimeBuiltin // a reference to a runtime function from transformed syntax. Followed by string name, e.g., "panicrangeexit" +) + +type codeAssign int + +func (c codeAssign) Marker() pkgbits.SyncMarker { return pkgbits.SyncAssign } +func (c codeAssign) Value() int { return int(c) } + +const ( + assignBlank codeAssign = iota + assignDef + assignExpr +) + +// A codeDecl distinguishes among declaration encodings. +type codeDecl int + +func (c codeDecl) Marker() pkgbits.SyncMarker { return pkgbits.SyncDecl } +func (c codeDecl) Value() int { return int(c) } + +const ( + declEnd codeDecl = iota + declFunc + declMethod + declVar + declOther +) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/export.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/export.go new file mode 100644 index 0000000000000000000000000000000000000000..e1f289b56f8c9c9174b4a71aa9c07a7c9fbb3a4a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/export.go @@ -0,0 +1,30 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package noder + +import ( + "bytes" + "fmt" + "io" + + "cmd/compile/internal/base" + "cmd/internal/bio" +) + +func WriteExports(out *bio.Writer) { + var data bytes.Buffer + + data.WriteByte('u') + writeUnifiedExport(&data) + + // The linker also looks for the $$ marker - use char after $$ to distinguish format. + out.WriteString("\n$$B\n") // indicate binary export format + io.Copy(out, &data) + out.WriteString("\n$$\n") + + if base.Debug.Export != 0 { + fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", base.Ctxt.Pkgpath, data.Len()) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/helpers.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/helpers.go new file mode 100644 index 0000000000000000000000000000000000000000..0bff71e65845efc0a158fceb3d35642b3de0341a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/helpers.go @@ -0,0 +1,140 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package noder + +import ( + "go/constant" + + "cmd/compile/internal/ir" + "cmd/compile/internal/syntax" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/compile/internal/types2" + "cmd/internal/src" +) + +// Helpers for constructing typed IR nodes. +// +// TODO(mdempsky): Move into their own package so they can be easily +// reused by iimport and frontend optimizations. + +type ImplicitNode interface { + ir.Node + SetImplicit(x bool) +} + +// Implicit returns n after marking it as Implicit. +func Implicit(n ImplicitNode) ImplicitNode { + n.SetImplicit(true) + return n +} + +// typed returns n after setting its type to typ. +func typed(typ *types.Type, n ir.Node) ir.Node { + n.SetType(typ) + n.SetTypecheck(1) + return n +} + +// Values + +// FixValue returns val after converting and truncating it as +// appropriate for typ. +func FixValue(typ *types.Type, val constant.Value) constant.Value { + assert(typ.Kind() != types.TFORW) + switch { + case typ.IsInteger(): + val = constant.ToInt(val) + case typ.IsFloat(): + val = constant.ToFloat(val) + case typ.IsComplex(): + val = constant.ToComplex(val) + } + if !typ.IsUntyped() { + val = typecheck.ConvertVal(val, typ, false) + } + ir.AssertValidTypeForConst(typ, val) + return val +} + +// Expressions + +func Addr(pos src.XPos, x ir.Node) *ir.AddrExpr { + n := typecheck.NodAddrAt(pos, x) + typed(types.NewPtr(x.Type()), n) + return n +} + +func Deref(pos src.XPos, typ *types.Type, x ir.Node) *ir.StarExpr { + n := ir.NewStarExpr(pos, x) + typed(typ, n) + return n +} + +// Statements + +func idealType(tv syntax.TypeAndValue) types2.Type { + // The gc backend expects all expressions to have a concrete type, and + // types2 mostly satisfies this expectation already. But there are a few + // cases where the Go spec doesn't require converting to concrete type, + // and so types2 leaves them untyped. So we need to fix those up here. + typ := types2.Unalias(tv.Type) + if basic, ok := typ.(*types2.Basic); ok && basic.Info()&types2.IsUntyped != 0 { + switch basic.Kind() { + case types2.UntypedNil: + // ok; can appear in type switch case clauses + // TODO(mdempsky): Handle as part of type switches instead? + case types2.UntypedInt, types2.UntypedFloat, types2.UntypedComplex: + typ = types2.Typ[types2.Uint] + if tv.Value != nil { + s := constant.ToInt(tv.Value) + assert(s.Kind() == constant.Int) + if constant.Sign(s) < 0 { + typ = types2.Typ[types2.Int] + } + } + case types2.UntypedBool: + typ = types2.Typ[types2.Bool] // expression in "if" or "for" condition + case types2.UntypedString: + typ = types2.Typ[types2.String] // argument to "append" or "copy" calls + case types2.UntypedRune: + typ = types2.Typ[types2.Int32] // range over rune + default: + return nil + } + } + return typ +} + +func isTypeParam(t types2.Type) bool { + _, ok := types2.Unalias(t).(*types2.TypeParam) + return ok +} + +// isNotInHeap reports whether typ is or contains an element of type +// runtime/internal/sys.NotInHeap. +func isNotInHeap(typ types2.Type) bool { + typ = types2.Unalias(typ) + if named, ok := typ.(*types2.Named); ok { + if obj := named.Obj(); obj.Name() == "nih" && obj.Pkg().Path() == "runtime/internal/sys" { + return true + } + typ = named.Underlying() + } + + switch typ := typ.(type) { + case *types2.Array: + return isNotInHeap(typ.Elem()) + case *types2.Struct: + for i := 0; i < typ.NumFields(); i++ { + if isNotInHeap(typ.Field(i).Type()) { + return true + } + } + return false + default: + return false + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/import.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/import.go new file mode 100644 index 0000000000000000000000000000000000000000..e9bb1e313b65303641e6d544179c1715b1fb3d37 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/import.go @@ -0,0 +1,374 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package noder + +import ( + "errors" + "fmt" + "internal/buildcfg" + "internal/pkgbits" + "os" + pathpkg "path" + "runtime" + "strings" + "unicode" + "unicode/utf8" + + "cmd/compile/internal/base" + "cmd/compile/internal/importer" + "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/compile/internal/types2" + "cmd/internal/archive" + "cmd/internal/bio" + "cmd/internal/goobj" + "cmd/internal/objabi" +) + +type gcimports struct { + ctxt *types2.Context + packages map[string]*types2.Package +} + +func (m *gcimports) Import(path string) (*types2.Package, error) { + return m.ImportFrom(path, "" /* no vendoring */, 0) +} + +func (m *gcimports) ImportFrom(path, srcDir string, mode types2.ImportMode) (*types2.Package, error) { + if mode != 0 { + panic("mode must be 0") + } + + _, pkg, err := readImportFile(path, typecheck.Target, m.ctxt, m.packages) + return pkg, err +} + +func isDriveLetter(b byte) bool { + return 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z' +} + +// is this path a local name? begins with ./ or ../ or / +func islocalname(name string) bool { + return strings.HasPrefix(name, "/") || + runtime.GOOS == "windows" && len(name) >= 3 && isDriveLetter(name[0]) && name[1] == ':' && name[2] == '/' || + strings.HasPrefix(name, "./") || name == "." || + strings.HasPrefix(name, "../") || name == ".." +} + +func openPackage(path string) (*os.File, error) { + if islocalname(path) { + if base.Flag.NoLocalImports { + return nil, errors.New("local imports disallowed") + } + + if base.Flag.Cfg.PackageFile != nil { + return os.Open(base.Flag.Cfg.PackageFile[path]) + } + + // try .a before .o. important for building libraries: + // if there is an array.o in the array.a library, + // want to find all of array.a, not just array.o. + if file, err := os.Open(fmt.Sprintf("%s.a", path)); err == nil { + return file, nil + } + if file, err := os.Open(fmt.Sprintf("%s.o", path)); err == nil { + return file, nil + } + return nil, errors.New("file not found") + } + + // local imports should be canonicalized already. + // don't want to see "encoding/../encoding/base64" + // as different from "encoding/base64". + if q := pathpkg.Clean(path); q != path { + return nil, fmt.Errorf("non-canonical import path %q (should be %q)", path, q) + } + + if base.Flag.Cfg.PackageFile != nil { + return os.Open(base.Flag.Cfg.PackageFile[path]) + } + + for _, dir := range base.Flag.Cfg.ImportDirs { + if file, err := os.Open(fmt.Sprintf("%s/%s.a", dir, path)); err == nil { + return file, nil + } + if file, err := os.Open(fmt.Sprintf("%s/%s.o", dir, path)); err == nil { + return file, nil + } + } + + if buildcfg.GOROOT != "" { + suffix := "" + if base.Flag.InstallSuffix != "" { + suffix = "_" + base.Flag.InstallSuffix + } else if base.Flag.Race { + suffix = "_race" + } else if base.Flag.MSan { + suffix = "_msan" + } else if base.Flag.ASan { + suffix = "_asan" + } + + if file, err := os.Open(fmt.Sprintf("%s/pkg/%s_%s%s/%s.a", buildcfg.GOROOT, buildcfg.GOOS, buildcfg.GOARCH, suffix, path)); err == nil { + return file, nil + } + if file, err := os.Open(fmt.Sprintf("%s/pkg/%s_%s%s/%s.o", buildcfg.GOROOT, buildcfg.GOOS, buildcfg.GOARCH, suffix, path)); err == nil { + return file, nil + } + } + return nil, errors.New("file not found") +} + +// resolveImportPath resolves an import path as it appears in a Go +// source file to the package's full path. +func resolveImportPath(path string) (string, error) { + // The package name main is no longer reserved, + // but we reserve the import path "main" to identify + // the main package, just as we reserve the import + // path "math" to identify the standard math package. + if path == "main" { + return "", errors.New("cannot import \"main\"") + } + + if base.Ctxt.Pkgpath == "" { + panic("missing pkgpath") + } + if path == base.Ctxt.Pkgpath { + return "", fmt.Errorf("import %q while compiling that package (import cycle)", path) + } + + if mapped, ok := base.Flag.Cfg.ImportMap[path]; ok { + path = mapped + } + + if islocalname(path) { + if path[0] == '/' { + return "", errors.New("import path cannot be absolute path") + } + + prefix := base.Flag.D + if prefix == "" { + // Questionable, but when -D isn't specified, historically we + // resolve local import paths relative to the directory the + // compiler's current directory, not the respective source + // file's directory. + prefix = base.Ctxt.Pathname + } + path = pathpkg.Join(prefix, path) + + if err := checkImportPath(path, true); err != nil { + return "", err + } + } + + return path, nil +} + +// readImportFile reads the import file for the given package path and +// returns its types.Pkg representation. If packages is non-nil, the +// types2.Package representation is also returned. +func readImportFile(path string, target *ir.Package, env *types2.Context, packages map[string]*types2.Package) (pkg1 *types.Pkg, pkg2 *types2.Package, err error) { + path, err = resolveImportPath(path) + if err != nil { + return + } + + if path == "unsafe" { + pkg1, pkg2 = types.UnsafePkg, types2.Unsafe + + // TODO(mdempsky): Investigate if this actually matters. Why would + // the linker or runtime care whether a package imported unsafe? + if !pkg1.Direct { + pkg1.Direct = true + target.Imports = append(target.Imports, pkg1) + } + + return + } + + pkg1 = types.NewPkg(path, "") + if packages != nil { + pkg2 = packages[path] + assert(pkg1.Direct == (pkg2 != nil && pkg2.Complete())) + } + + if pkg1.Direct { + return + } + pkg1.Direct = true + target.Imports = append(target.Imports, pkg1) + + f, err := openPackage(path) + if err != nil { + return + } + defer f.Close() + + r, end, err := findExportData(f) + if err != nil { + return + } + + if base.Debug.Export != 0 { + fmt.Printf("importing %s (%s)\n", path, f.Name()) + } + + c, err := r.ReadByte() + if err != nil { + return + } + + pos := r.Offset() + + // Map export data section into memory as a single large + // string. This reduces heap fragmentation and allows returning + // individual substrings very efficiently. + var data string + data, err = base.MapFile(r.File(), pos, end-pos) + if err != nil { + return + } + + switch c { + case 'u': + // TODO(mdempsky): This seems a bit clunky. + data = strings.TrimSuffix(data, "\n$$\n") + + pr := pkgbits.NewPkgDecoder(pkg1.Path, data) + + // Read package descriptors for both types2 and compiler backend. + readPackage(newPkgReader(pr), pkg1, false) + pkg2 = importer.ReadPackage(env, packages, pr) + + default: + // Indexed format is distinguished by an 'i' byte, + // whereas previous export formats started with 'c', 'd', or 'v'. + err = fmt.Errorf("unexpected package format byte: %v", c) + return + } + + err = addFingerprint(path, f, end) + return +} + +// findExportData returns a *bio.Reader positioned at the start of the +// binary export data section, and a file offset for where to stop +// reading. +func findExportData(f *os.File) (r *bio.Reader, end int64, err error) { + r = bio.NewReader(f) + + // check object header + line, err := r.ReadString('\n') + if err != nil { + return + } + + if line == "!\n" { // package archive + // package export block should be first + sz := int64(archive.ReadHeader(r.Reader, "__.PKGDEF")) + if sz <= 0 { + err = errors.New("not a package file") + return + } + end = r.Offset() + sz + line, err = r.ReadString('\n') + if err != nil { + return + } + } else { + // Not an archive; provide end of file instead. + // TODO(mdempsky): I don't think this happens anymore. + var fi os.FileInfo + fi, err = f.Stat() + if err != nil { + return + } + end = fi.Size() + } + + if !strings.HasPrefix(line, "go object ") { + err = fmt.Errorf("not a go object file: %s", line) + return + } + if expect := objabi.HeaderString(); line != expect { + err = fmt.Errorf("object is [%s] expected [%s]", line, expect) + return + } + + // process header lines + for !strings.HasPrefix(line, "$$") { + line, err = r.ReadString('\n') + if err != nil { + return + } + } + + // Expect $$B\n to signal binary import format. + if line != "$$B\n" { + err = errors.New("old export format no longer supported (recompile library)") + return + } + + return +} + +// addFingerprint reads the linker fingerprint included at the end of +// the exportdata. +func addFingerprint(path string, f *os.File, end int64) error { + const eom = "\n$$\n" + var fingerprint goobj.FingerprintType + + var buf [len(fingerprint) + len(eom)]byte + if _, err := f.ReadAt(buf[:], end-int64(len(buf))); err != nil { + return err + } + + // Caller should have given us the end position of the export data, + // which should end with the "\n$$\n" marker. As a consistency check + // to make sure we're reading at the right offset, make sure we + // found the marker. + if s := string(buf[len(fingerprint):]); s != eom { + return fmt.Errorf("expected $$ marker, but found %q", s) + } + + copy(fingerprint[:], buf[:]) + base.Ctxt.AddImport(path, fingerprint) + + return nil +} + +func checkImportPath(path string, allowSpace bool) error { + if path == "" { + return errors.New("import path is empty") + } + + if strings.Contains(path, "\x00") { + return errors.New("import path contains NUL") + } + + for ri := range base.ReservedImports { + if path == ri { + return fmt.Errorf("import path %q is reserved and cannot be used", path) + } + } + + for _, r := range path { + switch { + case r == utf8.RuneError: + return fmt.Errorf("import path contains invalid UTF-8 sequence: %q", path) + case r < 0x20 || r == 0x7f: + return fmt.Errorf("import path contains control character: %q", path) + case r == '\\': + return fmt.Errorf("import path contains backslash; use slash: %q", path) + case !allowSpace && unicode.IsSpace(r): + return fmt.Errorf("import path contains space character: %q", path) + case strings.ContainsRune("!\"#$%&'()*,:;<=>?[]^`{|}", r): + return fmt.Errorf("import path contains invalid character '%c': %q", r, path) + } + } + + return nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/irgen.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/irgen.go new file mode 100644 index 0000000000000000000000000000000000000000..145bcc8c3553c4ac5543845ea285004c09b09d66 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/irgen.go @@ -0,0 +1,252 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package noder + +import ( + "fmt" + "internal/buildcfg" + "internal/types/errors" + "regexp" + "sort" + + "cmd/compile/internal/base" + "cmd/compile/internal/rangefunc" + "cmd/compile/internal/syntax" + "cmd/compile/internal/types2" + "cmd/internal/src" +) + +var versionErrorRx = regexp.MustCompile(`requires go[0-9]+\.[0-9]+ or later`) + +// checkFiles configures and runs the types2 checker on the given +// parsed source files and then returns the result. +func checkFiles(m posMap, noders []*noder) (*types2.Package, *types2.Info) { + if base.SyntaxErrors() != 0 { + base.ErrorExit() + } + + // setup and syntax error reporting + files := make([]*syntax.File, len(noders)) + // posBaseMap maps all file pos bases back to *syntax.File + // for checking Go version mismatched. + posBaseMap := make(map[*syntax.PosBase]*syntax.File) + for i, p := range noders { + files[i] = p.file + // The file.Pos() is the position of the package clause. + // If there's a //line directive before that, file.Pos().Base() + // refers to that directive, not the file itself. + // Make sure to consistently map back to file base, here and + // when we look for a file in the conf.Error handler below, + // otherwise the file may not be found (was go.dev/issue/67141). + posBaseMap[fileBase(p.file.Pos())] = p.file + } + + // typechecking + ctxt := types2.NewContext() + importer := gcimports{ + ctxt: ctxt, + packages: make(map[string]*types2.Package), + } + conf := types2.Config{ + Context: ctxt, + GoVersion: base.Flag.Lang, + IgnoreBranchErrors: true, // parser already checked via syntax.CheckBranches mode + Importer: &importer, + Sizes: types2.SizesFor("gc", buildcfg.GOARCH), + } + if base.Flag.ErrorURL { + conf.ErrorURL = " [go.dev/e/%s]" + } + info := &types2.Info{ + StoreTypesInSyntax: true, + Defs: make(map[*syntax.Name]types2.Object), + Uses: make(map[*syntax.Name]types2.Object), + Selections: make(map[*syntax.SelectorExpr]*types2.Selection), + Implicits: make(map[syntax.Node]types2.Object), + Scopes: make(map[syntax.Node]*types2.Scope), + Instances: make(map[*syntax.Name]types2.Instance), + FileVersions: make(map[*syntax.PosBase]string), + // expand as needed + } + conf.Error = func(err error) { + terr := err.(types2.Error) + msg := terr.Msg + if versionErrorRx.MatchString(msg) { + posBase := fileBase(terr.Pos) + fileVersion := info.FileVersions[posBase] + file := posBaseMap[posBase] + if file == nil { + // This should never happen, but be careful and don't crash. + } else if file.GoVersion == fileVersion { + // If we have a version error caused by //go:build, report it. + msg = fmt.Sprintf("%s (file declares //go:build %s)", msg, fileVersion) + } else { + // Otherwise, hint at the -lang setting. + msg = fmt.Sprintf("%s (-lang was set to %s; check go.mod)", msg, base.Flag.Lang) + } + } + base.ErrorfAt(m.makeXPos(terr.Pos), terr.Code, "%s", msg) + } + + pkg, err := conf.Check(base.Ctxt.Pkgpath, files, info) + base.ExitIfErrors() + if err != nil { + base.FatalfAt(src.NoXPos, "conf.Check error: %v", err) + } + + // Check for anonymous interface cycles (#56103). + // TODO(gri) move this code into the type checkers (types2 and go/types) + var f cycleFinder + for _, file := range files { + syntax.Inspect(file, func(n syntax.Node) bool { + if n, ok := n.(*syntax.InterfaceType); ok { + if f.hasCycle(types2.Unalias(n.GetTypeInfo().Type).(*types2.Interface)) { + base.ErrorfAt(m.makeXPos(n.Pos()), errors.InvalidTypeCycle, "invalid recursive type: anonymous interface refers to itself (see https://go.dev/issue/56103)") + + for typ := range f.cyclic { + f.cyclic[typ] = false // suppress duplicate errors + } + } + return false + } + return true + }) + } + base.ExitIfErrors() + + // Implementation restriction: we don't allow not-in-heap types to + // be used as type arguments (#54765). + { + type nihTarg struct { + pos src.XPos + typ types2.Type + } + var nihTargs []nihTarg + + for name, inst := range info.Instances { + for i := 0; i < inst.TypeArgs.Len(); i++ { + if targ := inst.TypeArgs.At(i); isNotInHeap(targ) { + nihTargs = append(nihTargs, nihTarg{m.makeXPos(name.Pos()), targ}) + } + } + } + sort.Slice(nihTargs, func(i, j int) bool { + ti, tj := nihTargs[i], nihTargs[j] + return ti.pos.Before(tj.pos) + }) + for _, targ := range nihTargs { + base.ErrorfAt(targ.pos, 0, "cannot use incomplete (or unallocatable) type as a type argument: %v", targ.typ) + } + } + base.ExitIfErrors() + + // Rewrite range over function to explicit function calls + // with the loop bodies converted into new implicit closures. + // We do this now, before serialization to unified IR, so that if the + // implicit closures are inlined, we will have the unified IR form. + // If we do the rewrite in the back end, like between typecheck and walk, + // then the new implicit closure will not have a unified IR inline body, + // and bodyReaderFor will fail. + rangefunc.Rewrite(pkg, info, files) + + return pkg, info +} + +// fileBase returns a file's position base given a position in the file. +func fileBase(pos syntax.Pos) *syntax.PosBase { + base := pos.Base() + for !base.IsFileBase() { // line directive base + base = base.Pos().Base() + } + return base +} + +// A cycleFinder detects anonymous interface cycles (go.dev/issue/56103). +type cycleFinder struct { + cyclic map[*types2.Interface]bool +} + +// hasCycle reports whether typ is part of an anonymous interface cycle. +func (f *cycleFinder) hasCycle(typ *types2.Interface) bool { + // We use Method instead of ExplicitMethod to implicitly expand any + // embedded interfaces. Then we just need to walk any anonymous + // types, keeping track of *types2.Interface types we visit along + // the way. + for i := 0; i < typ.NumMethods(); i++ { + if f.visit(typ.Method(i).Type()) { + return true + } + } + return false +} + +// visit recursively walks typ0 to check any referenced interface types. +func (f *cycleFinder) visit(typ0 types2.Type) bool { + for { // loop for tail recursion + switch typ := types2.Unalias(typ0).(type) { + default: + base.Fatalf("unexpected type: %T", typ) + + case *types2.Basic, *types2.Named, *types2.TypeParam: + return false // named types cannot be part of an anonymous cycle + case *types2.Pointer: + typ0 = typ.Elem() + case *types2.Array: + typ0 = typ.Elem() + case *types2.Chan: + typ0 = typ.Elem() + case *types2.Map: + if f.visit(typ.Key()) { + return true + } + typ0 = typ.Elem() + case *types2.Slice: + typ0 = typ.Elem() + + case *types2.Struct: + for i := 0; i < typ.NumFields(); i++ { + if f.visit(typ.Field(i).Type()) { + return true + } + } + return false + + case *types2.Interface: + // The empty interface (e.g., "any") cannot be part of a cycle. + if typ.NumExplicitMethods() == 0 && typ.NumEmbeddeds() == 0 { + return false + } + + // As an optimization, we wait to allocate cyclic here, after + // we've found at least one other (non-empty) anonymous + // interface. This means when a cycle is present, we need to + // make an extra recursive call to actually detect it. But for + // most packages, it allows skipping the map allocation + // entirely. + if x, ok := f.cyclic[typ]; ok { + return x + } + if f.cyclic == nil { + f.cyclic = make(map[*types2.Interface]bool) + } + f.cyclic[typ] = true + if f.hasCycle(typ) { + return true + } + f.cyclic[typ] = false + return false + + case *types2.Signature: + return f.visit(typ.Params()) || f.visit(typ.Results()) + case *types2.Tuple: + for i := 0; i < typ.Len(); i++ { + if f.visit(typ.At(i).Type()) { + return true + } + } + return false + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/lex.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/lex.go new file mode 100644 index 0000000000000000000000000000000000000000..c964eca678416c8d440c14af47b3a8b4a9968865 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/lex.go @@ -0,0 +1,184 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package noder + +import ( + "fmt" + "internal/buildcfg" + "strings" + + "cmd/compile/internal/ir" + "cmd/compile/internal/syntax" +) + +func isSpace(c rune) bool { + return c == ' ' || c == '\t' || c == '\n' || c == '\r' +} + +func isQuoted(s string) bool { + return len(s) >= 2 && s[0] == '"' && s[len(s)-1] == '"' +} + +const ( + funcPragmas = ir.Nointerface | + ir.Noescape | + ir.Norace | + ir.Nosplit | + ir.Noinline | + ir.NoCheckPtr | + ir.RegisterParams | // TODO(register args) remove after register abi is working + ir.CgoUnsafeArgs | + ir.UintptrKeepAlive | + ir.UintptrEscapes | + ir.Systemstack | + ir.Nowritebarrier | + ir.Nowritebarrierrec | + ir.Yeswritebarrierrec +) + +func pragmaFlag(verb string) ir.PragmaFlag { + switch verb { + case "go:build": + return ir.GoBuildPragma + case "go:nointerface": + if buildcfg.Experiment.FieldTrack { + return ir.Nointerface + } + case "go:noescape": + return ir.Noescape + case "go:norace": + return ir.Norace + case "go:nosplit": + return ir.Nosplit | ir.NoCheckPtr // implies NoCheckPtr (see #34972) + case "go:noinline": + return ir.Noinline + case "go:nocheckptr": + return ir.NoCheckPtr + case "go:systemstack": + return ir.Systemstack + case "go:nowritebarrier": + return ir.Nowritebarrier + case "go:nowritebarrierrec": + return ir.Nowritebarrierrec | ir.Nowritebarrier // implies Nowritebarrier + case "go:yeswritebarrierrec": + return ir.Yeswritebarrierrec + case "go:cgo_unsafe_args": + return ir.CgoUnsafeArgs | ir.NoCheckPtr // implies NoCheckPtr (see #34968) + case "go:uintptrkeepalive": + return ir.UintptrKeepAlive + case "go:uintptrescapes": + // This directive extends //go:uintptrkeepalive by forcing + // uintptr arguments to escape to the heap, which makes stack + // growth safe. + return ir.UintptrEscapes | ir.UintptrKeepAlive // implies UintptrKeepAlive + case "go:registerparams": // TODO(register args) remove after register abi is working + return ir.RegisterParams + } + return 0 +} + +// pragcgo is called concurrently if files are parsed concurrently. +func (p *noder) pragcgo(pos syntax.Pos, text string) { + f := pragmaFields(text) + + verb := strings.TrimPrefix(f[0], "go:") + f[0] = verb + + switch verb { + case "cgo_export_static", "cgo_export_dynamic": + switch { + case len(f) == 2 && !isQuoted(f[1]): + case len(f) == 3 && !isQuoted(f[1]) && !isQuoted(f[2]): + default: + p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf(`usage: //go:%s local [remote]`, verb)}) + return + } + case "cgo_import_dynamic": + switch { + case len(f) == 2 && !isQuoted(f[1]): + case len(f) == 3 && !isQuoted(f[1]) && !isQuoted(f[2]): + case len(f) == 4 && !isQuoted(f[1]) && !isQuoted(f[2]) && isQuoted(f[3]): + f[3] = strings.Trim(f[3], `"`) + if buildcfg.GOOS == "aix" && f[3] != "" { + // On Aix, library pattern must be "lib.a/object.o" + // or "lib.a/libname.so.X" + n := strings.Split(f[3], "/") + if len(n) != 2 || !strings.HasSuffix(n[0], ".a") || (!strings.HasSuffix(n[1], ".o") && !strings.Contains(n[1], ".so.")) { + p.error(syntax.Error{Pos: pos, Msg: `usage: //go:cgo_import_dynamic local [remote ["lib.a/object.o"]]`}) + return + } + } + default: + p.error(syntax.Error{Pos: pos, Msg: `usage: //go:cgo_import_dynamic local [remote ["library"]]`}) + return + } + case "cgo_import_static": + switch { + case len(f) == 2 && !isQuoted(f[1]): + default: + p.error(syntax.Error{Pos: pos, Msg: `usage: //go:cgo_import_static local`}) + return + } + case "cgo_dynamic_linker": + switch { + case len(f) == 2 && isQuoted(f[1]): + f[1] = strings.Trim(f[1], `"`) + default: + p.error(syntax.Error{Pos: pos, Msg: `usage: //go:cgo_dynamic_linker "path"`}) + return + } + case "cgo_ldflag": + switch { + case len(f) == 2 && isQuoted(f[1]): + f[1] = strings.Trim(f[1], `"`) + default: + p.error(syntax.Error{Pos: pos, Msg: `usage: //go:cgo_ldflag "arg"`}) + return + } + default: + return + } + p.pragcgobuf = append(p.pragcgobuf, f) +} + +// pragmaFields is similar to strings.FieldsFunc(s, isSpace) +// but does not split when inside double quoted regions and always +// splits before the start and after the end of a double quoted region. +// pragmaFields does not recognize escaped quotes. If a quote in s is not +// closed the part after the opening quote will not be returned as a field. +func pragmaFields(s string) []string { + var a []string + inQuote := false + fieldStart := -1 // Set to -1 when looking for start of field. + for i, c := range s { + switch { + case c == '"': + if inQuote { + inQuote = false + a = append(a, s[fieldStart:i+1]) + fieldStart = -1 + } else { + inQuote = true + if fieldStart >= 0 { + a = append(a, s[fieldStart:i]) + } + fieldStart = i + } + case !inQuote && isSpace(c): + if fieldStart >= 0 { + a = append(a, s[fieldStart:i]) + fieldStart = -1 + } + default: + if fieldStart == -1 { + fieldStart = i + } + } + } + if !inQuote && fieldStart >= 0 { // Last field might end at the end of the string. + a = append(a, s[fieldStart:]) + } + return a +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/lex_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/lex_test.go new file mode 100644 index 0000000000000000000000000000000000000000..85a3f06759ad7b477b111db6b0c4ad1ab71b615d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/lex_test.go @@ -0,0 +1,122 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package noder + +import ( + "reflect" + "runtime" + "testing" + + "cmd/compile/internal/syntax" +) + +func eq(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + return true +} + +func TestPragmaFields(t *testing.T) { + var tests = []struct { + in string + want []string + }{ + {"", []string{}}, + {" \t ", []string{}}, + {`""""`, []string{`""`, `""`}}, + {" a'b'c ", []string{"a'b'c"}}, + {"1 2 3 4", []string{"1", "2", "3", "4"}}, + {"\n☺\t☹\n", []string{"☺", "☹"}}, + {`"1 2 " 3 " 4 5"`, []string{`"1 2 "`, `3`, `" 4 5"`}}, + {`"1""2 3""4"`, []string{`"1"`, `"2 3"`, `"4"`}}, + {`12"34"`, []string{`12`, `"34"`}}, + {`12"34 `, []string{`12`}}, + } + + for _, tt := range tests { + got := pragmaFields(tt.in) + if !eq(got, tt.want) { + t.Errorf("pragmaFields(%q) = %v; want %v", tt.in, got, tt.want) + continue + } + } +} + +func TestPragcgo(t *testing.T) { + type testStruct struct { + in string + want []string + } + + var tests = []testStruct{ + {`go:cgo_export_dynamic local`, []string{`cgo_export_dynamic`, `local`}}, + {`go:cgo_export_dynamic local remote`, []string{`cgo_export_dynamic`, `local`, `remote`}}, + {`go:cgo_export_dynamic local' remote'`, []string{`cgo_export_dynamic`, `local'`, `remote'`}}, + {`go:cgo_export_static local`, []string{`cgo_export_static`, `local`}}, + {`go:cgo_export_static local remote`, []string{`cgo_export_static`, `local`, `remote`}}, + {`go:cgo_export_static local' remote'`, []string{`cgo_export_static`, `local'`, `remote'`}}, + {`go:cgo_import_dynamic local`, []string{`cgo_import_dynamic`, `local`}}, + {`go:cgo_import_dynamic local remote`, []string{`cgo_import_dynamic`, `local`, `remote`}}, + {`go:cgo_import_static local`, []string{`cgo_import_static`, `local`}}, + {`go:cgo_import_static local'`, []string{`cgo_import_static`, `local'`}}, + {`go:cgo_dynamic_linker "/path/"`, []string{`cgo_dynamic_linker`, `/path/`}}, + {`go:cgo_dynamic_linker "/p ath/"`, []string{`cgo_dynamic_linker`, `/p ath/`}}, + {`go:cgo_ldflag "arg"`, []string{`cgo_ldflag`, `arg`}}, + {`go:cgo_ldflag "a rg"`, []string{`cgo_ldflag`, `a rg`}}, + } + + if runtime.GOOS != "aix" { + tests = append(tests, []testStruct{ + {`go:cgo_import_dynamic local remote "library"`, []string{`cgo_import_dynamic`, `local`, `remote`, `library`}}, + {`go:cgo_import_dynamic local' remote' "lib rary"`, []string{`cgo_import_dynamic`, `local'`, `remote'`, `lib rary`}}, + }...) + } else { + // cgo_import_dynamic with a library is slightly different on AIX + // as the library field must follow the pattern [libc.a/object.o]. + tests = append(tests, []testStruct{ + {`go:cgo_import_dynamic local remote "lib.a/obj.o"`, []string{`cgo_import_dynamic`, `local`, `remote`, `lib.a/obj.o`}}, + // This test must fail. + {`go:cgo_import_dynamic local' remote' "library"`, []string{`: usage: //go:cgo_import_dynamic local [remote ["lib.a/object.o"]]`}}, + }...) + + } + + var p noder + var nopos syntax.Pos + for _, tt := range tests { + + p.err = make(chan syntax.Error) + gotch := make(chan [][]string, 1) + go func() { + p.pragcgobuf = nil + p.pragcgo(nopos, tt.in) + if p.pragcgobuf != nil { + gotch <- p.pragcgobuf + } + }() + + select { + case e := <-p.err: + want := tt.want[0] + if e.Error() != want { + t.Errorf("pragcgo(%q) = %q; want %q", tt.in, e, want) + continue + } + case got := <-gotch: + want := [][]string{tt.want} + if !reflect.DeepEqual(got, want) { + t.Errorf("pragcgo(%q) = %q; want %q", tt.in, got, want) + continue + } + } + + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/linker.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/linker.go new file mode 100644 index 0000000000000000000000000000000000000000..f5667f57ab98f3f820399636d68c1fcd0ff03bb3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/linker.go @@ -0,0 +1,349 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package noder + +import ( + "internal/buildcfg" + "internal/pkgbits" + "io" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/reflectdata" + "cmd/compile/internal/types" + "cmd/internal/goobj" + "cmd/internal/obj" +) + +// This file implements the unified IR linker, which combines the +// local package's stub data with imported package data to produce a +// complete export data file. It also rewrites the compiler's +// extension data sections based on the results of compilation (e.g., +// the function inlining cost and linker symbol index assignments). +// +// TODO(mdempsky): Using the name "linker" here is confusing, because +// readers are likely to mistake references to it for cmd/link. But +// there's a shortage of good names for "something that combines +// multiple parts into a cohesive whole"... e.g., "assembler" and +// "compiler" are also already taken. + +// TODO(mdempsky): Should linker go into pkgbits? Probably the +// low-level linking details can be moved there, but the logic for +// handling extension data needs to stay in the compiler. + +// A linker combines a package's stub export data with any referenced +// elements from imported packages into a single, self-contained +// export data file. +type linker struct { + pw pkgbits.PkgEncoder + + pkgs map[string]pkgbits.Index + decls map[*types.Sym]pkgbits.Index + bodies map[*types.Sym]pkgbits.Index +} + +// relocAll ensures that all elements specified by pr and relocs are +// copied into the output export data file, and returns the +// corresponding indices in the output. +func (l *linker) relocAll(pr *pkgReader, relocs []pkgbits.RelocEnt) []pkgbits.RelocEnt { + res := make([]pkgbits.RelocEnt, len(relocs)) + for i, rent := range relocs { + rent.Idx = l.relocIdx(pr, rent.Kind, rent.Idx) + res[i] = rent + } + return res +} + +// relocIdx ensures a single element is copied into the output export +// data file, and returns the corresponding index in the output. +func (l *linker) relocIdx(pr *pkgReader, k pkgbits.RelocKind, idx pkgbits.Index) pkgbits.Index { + assert(pr != nil) + + absIdx := pr.AbsIdx(k, idx) + + if newidx := pr.newindex[absIdx]; newidx != 0 { + return ^newidx + } + + var newidx pkgbits.Index + switch k { + case pkgbits.RelocString: + newidx = l.relocString(pr, idx) + case pkgbits.RelocPkg: + newidx = l.relocPkg(pr, idx) + case pkgbits.RelocObj: + newidx = l.relocObj(pr, idx) + + default: + // Generic relocations. + // + // TODO(mdempsky): Deduplicate more sections? In fact, I think + // every section could be deduplicated. This would also be easier + // if we do external relocations. + + w := l.pw.NewEncoderRaw(k) + l.relocCommon(pr, &w, k, idx) + newidx = w.Idx + } + + pr.newindex[absIdx] = ^newidx + + return newidx +} + +// relocString copies the specified string from pr into the output +// export data file, deduplicating it against other strings. +func (l *linker) relocString(pr *pkgReader, idx pkgbits.Index) pkgbits.Index { + return l.pw.StringIdx(pr.StringIdx(idx)) +} + +// relocPkg copies the specified package from pr into the output +// export data file, rewriting its import path to match how it was +// imported. +// +// TODO(mdempsky): Since CL 391014, we already have the compilation +// unit's import path, so there should be no need to rewrite packages +// anymore. +func (l *linker) relocPkg(pr *pkgReader, idx pkgbits.Index) pkgbits.Index { + path := pr.PeekPkgPath(idx) + + if newidx, ok := l.pkgs[path]; ok { + return newidx + } + + r := pr.NewDecoder(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef) + w := l.pw.NewEncoder(pkgbits.RelocPkg, pkgbits.SyncPkgDef) + l.pkgs[path] = w.Idx + + // TODO(mdempsky): We end up leaving an empty string reference here + // from when the package was originally written as "". Probably not + // a big deal, but a little annoying. Maybe relocating + // cross-references in place is the way to go after all. + w.Relocs = l.relocAll(pr, r.Relocs) + + _ = r.String() // original path + w.String(path) + + io.Copy(&w.Data, &r.Data) + + return w.Flush() +} + +// relocObj copies the specified object from pr into the output export +// data file, rewriting its compiler-private extension data (e.g., +// adding inlining cost and escape analysis results for functions). +func (l *linker) relocObj(pr *pkgReader, idx pkgbits.Index) pkgbits.Index { + path, name, tag := pr.PeekObj(idx) + sym := types.NewPkg(path, "").Lookup(name) + + if newidx, ok := l.decls[sym]; ok { + return newidx + } + + if tag == pkgbits.ObjStub && path != "builtin" && path != "unsafe" { + pri, ok := objReader[sym] + if !ok { + base.Fatalf("missing reader for %q.%v", path, name) + } + assert(ok) + + pr = pri.pr + idx = pri.idx + + path2, name2, tag2 := pr.PeekObj(idx) + sym2 := types.NewPkg(path2, "").Lookup(name2) + assert(sym == sym2) + assert(tag2 != pkgbits.ObjStub) + } + + w := l.pw.NewEncoderRaw(pkgbits.RelocObj) + wext := l.pw.NewEncoderRaw(pkgbits.RelocObjExt) + wname := l.pw.NewEncoderRaw(pkgbits.RelocName) + wdict := l.pw.NewEncoderRaw(pkgbits.RelocObjDict) + + l.decls[sym] = w.Idx + assert(wext.Idx == w.Idx) + assert(wname.Idx == w.Idx) + assert(wdict.Idx == w.Idx) + + l.relocCommon(pr, &w, pkgbits.RelocObj, idx) + l.relocCommon(pr, &wname, pkgbits.RelocName, idx) + l.relocCommon(pr, &wdict, pkgbits.RelocObjDict, idx) + + // Generic types and functions won't have definitions, and imported + // objects may not either. + obj, _ := sym.Def.(*ir.Name) + local := sym.Pkg == types.LocalPkg + + if local && obj != nil { + wext.Sync(pkgbits.SyncObject1) + switch tag { + case pkgbits.ObjFunc: + l.relocFuncExt(&wext, obj) + case pkgbits.ObjType: + l.relocTypeExt(&wext, obj) + case pkgbits.ObjVar: + l.relocVarExt(&wext, obj) + } + wext.Flush() + } else { + l.relocCommon(pr, &wext, pkgbits.RelocObjExt, idx) + } + + // Check if we need to export the inline bodies for functions and + // methods. + if obj != nil { + if obj.Op() == ir.ONAME && obj.Class == ir.PFUNC { + l.exportBody(obj, local) + } + + if obj.Op() == ir.OTYPE && !obj.Alias() { + if typ := obj.Type(); !typ.IsInterface() { + for _, method := range typ.Methods() { + l.exportBody(method.Nname.(*ir.Name), local) + } + } + } + } + + return w.Idx +} + +// exportBody exports the given function or method's body, if +// appropriate. local indicates whether it's a local function or +// method available on a locally declared type. (Due to cross-package +// type aliases, a method may be imported, but still available on a +// locally declared type.) +func (l *linker) exportBody(obj *ir.Name, local bool) { + assert(obj.Op() == ir.ONAME && obj.Class == ir.PFUNC) + + fn := obj.Func + if fn.Inl == nil { + return // not inlinable anyway + } + + // As a simple heuristic, if the function was declared in this + // package or we inlined it somewhere in this package, then we'll + // (re)export the function body. This isn't perfect, but seems + // reasonable in practice. In particular, it has the nice property + // that in the worst case, adding a blank import ensures the + // function body is available for inlining. + // + // TODO(mdempsky): Reimplement the reachable method crawling logic + // from typecheck/crawler.go. + exportBody := local || fn.Inl.HaveDcl + if !exportBody { + return + } + + sym := obj.Sym() + if _, ok := l.bodies[sym]; ok { + // Due to type aliases, we might visit methods multiple times. + base.AssertfAt(obj.Type().Recv() != nil, obj.Pos(), "expected method: %v", obj) + return + } + + pri, ok := bodyReaderFor(fn) + assert(ok) + l.bodies[sym] = l.relocIdx(pri.pr, pkgbits.RelocBody, pri.idx) +} + +// relocCommon copies the specified element from pr into w, +// recursively relocating any referenced elements as well. +func (l *linker) relocCommon(pr *pkgReader, w *pkgbits.Encoder, k pkgbits.RelocKind, idx pkgbits.Index) { + r := pr.NewDecoderRaw(k, idx) + w.Relocs = l.relocAll(pr, r.Relocs) + io.Copy(&w.Data, &r.Data) + w.Flush() +} + +func (l *linker) pragmaFlag(w *pkgbits.Encoder, pragma ir.PragmaFlag) { + w.Sync(pkgbits.SyncPragma) + w.Int(int(pragma)) +} + +func (l *linker) relocFuncExt(w *pkgbits.Encoder, name *ir.Name) { + w.Sync(pkgbits.SyncFuncExt) + + l.pragmaFlag(w, name.Func.Pragma) + l.linkname(w, name) + + if buildcfg.GOARCH == "wasm" { + if name.Func.WasmImport != nil { + w.String(name.Func.WasmImport.Module) + w.String(name.Func.WasmImport.Name) + } else { + w.String("") + w.String("") + } + } + + // Relocated extension data. + w.Bool(true) + + // Record definition ABI so cross-ABI calls can be direct. + // This is important for the performance of calling some + // common functions implemented in assembly (e.g., bytealg). + w.Uint64(uint64(name.Func.ABI)) + + // Escape analysis. + for _, f := range name.Type().RecvParams() { + w.String(f.Note) + } + + if inl := name.Func.Inl; w.Bool(inl != nil) { + w.Len(int(inl.Cost)) + w.Bool(inl.CanDelayResults) + if buildcfg.Experiment.NewInliner { + w.String(inl.Properties) + } + } + + w.Sync(pkgbits.SyncEOF) +} + +func (l *linker) relocTypeExt(w *pkgbits.Encoder, name *ir.Name) { + w.Sync(pkgbits.SyncTypeExt) + + typ := name.Type() + + l.pragmaFlag(w, name.Pragma()) + + // For type T, export the index of type descriptor symbols of T and *T. + l.lsymIdx(w, "", reflectdata.TypeLinksym(typ)) + l.lsymIdx(w, "", reflectdata.TypeLinksym(typ.PtrTo())) + + if typ.Kind() != types.TINTER { + for _, method := range typ.Methods() { + l.relocFuncExt(w, method.Nname.(*ir.Name)) + } + } +} + +func (l *linker) relocVarExt(w *pkgbits.Encoder, name *ir.Name) { + w.Sync(pkgbits.SyncVarExt) + l.linkname(w, name) +} + +func (l *linker) linkname(w *pkgbits.Encoder, name *ir.Name) { + w.Sync(pkgbits.SyncLinkname) + + linkname := name.Sym().Linkname + if !l.lsymIdx(w, linkname, name.Linksym()) { + w.String(linkname) + } +} + +func (l *linker) lsymIdx(w *pkgbits.Encoder, linkname string, lsym *obj.LSym) bool { + if lsym.PkgIdx > goobj.PkgIdxSelf || (lsym.PkgIdx == goobj.PkgIdxInvalid && !lsym.Indexed()) || linkname != "" { + w.Int64(-1) + return false + } + + // For a defined symbol, export its index. + // For re-exporting an imported symbol, pass its index through. + w.Int64(int64(lsym.SymIdx)) + return true +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/noder.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/noder.go new file mode 100644 index 0000000000000000000000000000000000000000..1652dc66187eb53261bbef6cbac082e38762f8f3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/noder.go @@ -0,0 +1,449 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package noder + +import ( + "errors" + "fmt" + "internal/buildcfg" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/syntax" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/objabi" +) + +func LoadPackage(filenames []string) { + base.Timer.Start("fe", "parse") + + // Limit the number of simultaneously open files. + sem := make(chan struct{}, runtime.GOMAXPROCS(0)+10) + + noders := make([]*noder, len(filenames)) + for i := range noders { + p := noder{ + err: make(chan syntax.Error), + } + noders[i] = &p + } + + // Move the entire syntax processing logic into a separate goroutine to avoid blocking on the "sem". + go func() { + for i, filename := range filenames { + filename := filename + p := noders[i] + sem <- struct{}{} + go func() { + defer func() { <-sem }() + defer close(p.err) + fbase := syntax.NewFileBase(filename) + + f, err := os.Open(filename) + if err != nil { + p.error(syntax.Error{Msg: err.Error()}) + return + } + defer f.Close() + + p.file, _ = syntax.Parse(fbase, f, p.error, p.pragma, syntax.CheckBranches) // errors are tracked via p.error + }() + } + }() + + var lines uint + var m posMap + for _, p := range noders { + for e := range p.err { + base.ErrorfAt(m.makeXPos(e.Pos), 0, "%s", e.Msg) + } + if p.file == nil { + base.ErrorExit() + } + lines += p.file.EOF.Line() + } + base.Timer.AddEvent(int64(lines), "lines") + + unified(m, noders) +} + +// trimFilename returns the "trimmed" filename of b, which is the +// absolute filename after applying -trimpath processing. This +// filename form is suitable for use in object files and export data. +// +// If b's filename has already been trimmed (i.e., because it was read +// in from an imported package's export data), then the filename is +// returned unchanged. +func trimFilename(b *syntax.PosBase) string { + filename := b.Filename() + if !b.Trimmed() { + dir := "" + if b.IsFileBase() { + dir = base.Ctxt.Pathname + } + filename = objabi.AbsFile(dir, filename, base.Flag.TrimPath) + } + return filename +} + +// noder transforms package syntax's AST into a Node tree. +type noder struct { + file *syntax.File + linknames []linkname + pragcgobuf [][]string + err chan syntax.Error +} + +// linkname records a //go:linkname directive. +type linkname struct { + pos syntax.Pos + local string + remote string +} + +var unOps = [...]ir.Op{ + syntax.Recv: ir.ORECV, + syntax.Mul: ir.ODEREF, + syntax.And: ir.OADDR, + + syntax.Not: ir.ONOT, + syntax.Xor: ir.OBITNOT, + syntax.Add: ir.OPLUS, + syntax.Sub: ir.ONEG, +} + +var binOps = [...]ir.Op{ + syntax.OrOr: ir.OOROR, + syntax.AndAnd: ir.OANDAND, + + syntax.Eql: ir.OEQ, + syntax.Neq: ir.ONE, + syntax.Lss: ir.OLT, + syntax.Leq: ir.OLE, + syntax.Gtr: ir.OGT, + syntax.Geq: ir.OGE, + + syntax.Add: ir.OADD, + syntax.Sub: ir.OSUB, + syntax.Or: ir.OOR, + syntax.Xor: ir.OXOR, + + syntax.Mul: ir.OMUL, + syntax.Div: ir.ODIV, + syntax.Rem: ir.OMOD, + syntax.And: ir.OAND, + syntax.AndNot: ir.OANDNOT, + syntax.Shl: ir.OLSH, + syntax.Shr: ir.ORSH, +} + +// error is called concurrently if files are parsed concurrently. +func (p *noder) error(err error) { + p.err <- err.(syntax.Error) +} + +// pragmas that are allowed in the std lib, but don't have +// a syntax.Pragma value (see lex.go) associated with them. +var allowedStdPragmas = map[string]bool{ + "go:cgo_export_static": true, + "go:cgo_export_dynamic": true, + "go:cgo_import_static": true, + "go:cgo_import_dynamic": true, + "go:cgo_ldflag": true, + "go:cgo_dynamic_linker": true, + "go:embed": true, + "go:generate": true, +} + +// *pragmas is the value stored in a syntax.pragmas during parsing. +type pragmas struct { + Flag ir.PragmaFlag // collected bits + Pos []pragmaPos // position of each individual flag + Embeds []pragmaEmbed + WasmImport *WasmImport +} + +// WasmImport stores metadata associated with the //go:wasmimport pragma +type WasmImport struct { + Pos syntax.Pos + Module string + Name string +} + +type pragmaPos struct { + Flag ir.PragmaFlag + Pos syntax.Pos +} + +type pragmaEmbed struct { + Pos syntax.Pos + Patterns []string +} + +func (p *noder) checkUnusedDuringParse(pragma *pragmas) { + for _, pos := range pragma.Pos { + if pos.Flag&pragma.Flag != 0 { + p.error(syntax.Error{Pos: pos.Pos, Msg: "misplaced compiler directive"}) + } + } + if len(pragma.Embeds) > 0 { + for _, e := range pragma.Embeds { + p.error(syntax.Error{Pos: e.Pos, Msg: "misplaced go:embed directive"}) + } + } + if pragma.WasmImport != nil { + p.error(syntax.Error{Pos: pragma.WasmImport.Pos, Msg: "misplaced go:wasmimport directive"}) + } +} + +// pragma is called concurrently if files are parsed concurrently. +func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.Pragma) syntax.Pragma { + pragma, _ := old.(*pragmas) + if pragma == nil { + pragma = new(pragmas) + } + + if text == "" { + // unused pragma; only called with old != nil. + p.checkUnusedDuringParse(pragma) + return nil + } + + if strings.HasPrefix(text, "line ") { + // line directives are handled by syntax package + panic("unreachable") + } + + if !blankLine { + // directive must be on line by itself + p.error(syntax.Error{Pos: pos, Msg: "misplaced compiler directive"}) + return pragma + } + + switch { + case strings.HasPrefix(text, "go:wasmimport "): + f := strings.Fields(text) + if len(f) != 3 { + p.error(syntax.Error{Pos: pos, Msg: "usage: //go:wasmimport importmodule importname"}) + break + } + + if buildcfg.GOARCH == "wasm" { + // Only actually use them if we're compiling to WASM though. + pragma.WasmImport = &WasmImport{ + Pos: pos, + Module: f[1], + Name: f[2], + } + } + case strings.HasPrefix(text, "go:linkname "): + f := strings.Fields(text) + if !(2 <= len(f) && len(f) <= 3) { + p.error(syntax.Error{Pos: pos, Msg: "usage: //go:linkname localname [linkname]"}) + break + } + // The second argument is optional. If omitted, we use + // the default object symbol name for this and + // linkname only serves to mark this symbol as + // something that may be referenced via the object + // symbol name from another package. + var target string + if len(f) == 3 { + target = f[2] + } else if base.Ctxt.Pkgpath != "" { + // Use the default object symbol name if the + // user didn't provide one. + target = objabi.PathToPrefix(base.Ctxt.Pkgpath) + "." + f[1] + } else { + panic("missing pkgpath") + } + p.linknames = append(p.linknames, linkname{pos, f[1], target}) + + case text == "go:embed", strings.HasPrefix(text, "go:embed "): + args, err := parseGoEmbed(text[len("go:embed"):]) + if err != nil { + p.error(syntax.Error{Pos: pos, Msg: err.Error()}) + } + if len(args) == 0 { + p.error(syntax.Error{Pos: pos, Msg: "usage: //go:embed pattern..."}) + break + } + pragma.Embeds = append(pragma.Embeds, pragmaEmbed{pos, args}) + + case strings.HasPrefix(text, "go:cgo_import_dynamic "): + // This is permitted for general use because Solaris + // code relies on it in golang.org/x/sys/unix and others. + fields := pragmaFields(text) + if len(fields) >= 4 { + lib := strings.Trim(fields[3], `"`) + if lib != "" && !safeArg(lib) && !isCgoGeneratedFile(pos) { + p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("invalid library name %q in cgo_import_dynamic directive", lib)}) + } + p.pragcgo(pos, text) + pragma.Flag |= pragmaFlag("go:cgo_import_dynamic") + break + } + fallthrough + case strings.HasPrefix(text, "go:cgo_"): + // For security, we disallow //go:cgo_* directives other + // than cgo_import_dynamic outside cgo-generated files. + // Exception: they are allowed in the standard library, for runtime and syscall. + if !isCgoGeneratedFile(pos) && !base.Flag.Std { + p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s only allowed in cgo-generated code", text)}) + } + p.pragcgo(pos, text) + fallthrough // because of //go:cgo_unsafe_args + default: + verb := text + if i := strings.Index(text, " "); i >= 0 { + verb = verb[:i] + } + flag := pragmaFlag(verb) + const runtimePragmas = ir.Systemstack | ir.Nowritebarrier | ir.Nowritebarrierrec | ir.Yeswritebarrierrec + if !base.Flag.CompilingRuntime && flag&runtimePragmas != 0 { + p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s only allowed in runtime", verb)}) + } + if flag == ir.UintptrKeepAlive && !base.Flag.Std { + p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s is only allowed in the standard library", verb)}) + } + if flag == 0 && !allowedStdPragmas[verb] && base.Flag.Std { + p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s is not allowed in the standard library", verb)}) + } + pragma.Flag |= flag + pragma.Pos = append(pragma.Pos, pragmaPos{flag, pos}) + } + + return pragma +} + +// isCgoGeneratedFile reports whether pos is in a file +// generated by cgo, which is to say a file with name +// beginning with "_cgo_". Such files are allowed to +// contain cgo directives, and for security reasons +// (primarily misuse of linker flags), other files are not. +// See golang.org/issue/23672. +// Note that cmd/go ignores files whose names start with underscore, +// so the only _cgo_ files we will see from cmd/go are generated by cgo. +// It's easy to bypass this check by calling the compiler directly; +// we only protect against uses by cmd/go. +func isCgoGeneratedFile(pos syntax.Pos) bool { + // We need the absolute file, independent of //line directives, + // so we call pos.Base().Pos(). + return strings.HasPrefix(filepath.Base(trimFilename(pos.Base().Pos().Base())), "_cgo_") +} + +// safeArg reports whether arg is a "safe" command-line argument, +// meaning that when it appears in a command-line, it probably +// doesn't have some special meaning other than its own name. +// This is copied from SafeArg in cmd/go/internal/load/pkg.go. +func safeArg(name string) bool { + if name == "" { + return false + } + c := name[0] + return '0' <= c && c <= '9' || 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || c == '.' || c == '_' || c == '/' || c >= utf8.RuneSelf +} + +// parseGoEmbed parses the text following "//go:embed" to extract the glob patterns. +// It accepts unquoted space-separated patterns as well as double-quoted and back-quoted Go strings. +// go/build/read.go also processes these strings and contains similar logic. +func parseGoEmbed(args string) ([]string, error) { + var list []string + for args = strings.TrimSpace(args); args != ""; args = strings.TrimSpace(args) { + var path string + Switch: + switch args[0] { + default: + i := len(args) + for j, c := range args { + if unicode.IsSpace(c) { + i = j + break + } + } + path = args[:i] + args = args[i:] + + case '`': + i := strings.Index(args[1:], "`") + if i < 0 { + return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args) + } + path = args[1 : 1+i] + args = args[1+i+1:] + + case '"': + i := 1 + for ; i < len(args); i++ { + if args[i] == '\\' { + i++ + continue + } + if args[i] == '"' { + q, err := strconv.Unquote(args[:i+1]) + if err != nil { + return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args[:i+1]) + } + path = q + args = args[i+1:] + break Switch + } + } + if i >= len(args) { + return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args) + } + } + + if args != "" { + r, _ := utf8.DecodeRuneInString(args) + if !unicode.IsSpace(r) { + return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args) + } + } + list = append(list, path) + } + return list, nil +} + +// A function named init is a special case. +// It is called by the initialization before main is run. +// To make it unique within a package and also uncallable, +// the name, normally "pkg.init", is altered to "pkg.init.0". +var renameinitgen int + +func Renameinit() *types.Sym { + s := typecheck.LookupNum("init.", renameinitgen) + renameinitgen++ + return s +} + +func checkEmbed(decl *syntax.VarDecl, haveEmbed, withinFunc bool) error { + switch { + case !haveEmbed: + return errors.New("go:embed only allowed in Go files that import \"embed\"") + case len(decl.NameList) > 1: + return errors.New("go:embed cannot apply to multiple vars") + case decl.Values != nil: + return errors.New("go:embed cannot apply to var with initializer") + case decl.Type == nil: + // Should not happen, since Values == nil now. + return errors.New("go:embed cannot apply to var without type") + case withinFunc: + return errors.New("go:embed cannot apply to var inside func") + case !types.AllowsGoVersion(1, 16): + return fmt.Errorf("go:embed requires go1.16 or later (-lang was set to %s; check go.mod)", base.Flag.Lang) + + default: + return nil + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/posmap.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/posmap.go new file mode 100644 index 0000000000000000000000000000000000000000..439daf454e6fc3c620b1c3dc16952e7c4a3cedfd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/posmap.go @@ -0,0 +1,74 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package noder + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/syntax" + "cmd/internal/src" +) + +// A posMap handles mapping from syntax.Pos to src.XPos. +type posMap struct { + bases map[*syntax.PosBase]*src.PosBase + cache struct { + last *syntax.PosBase + base *src.PosBase + } +} + +type poser interface{ Pos() syntax.Pos } +type ender interface{ End() syntax.Pos } + +func (m *posMap) pos(p poser) src.XPos { return m.makeXPos(p.Pos()) } +func (m *posMap) end(p ender) src.XPos { return m.makeXPos(p.End()) } + +func (m *posMap) makeXPos(pos syntax.Pos) src.XPos { + // Predeclared objects (e.g., the result parameter for error.Error) + // do not have a position. + if !pos.IsKnown() { + return src.NoXPos + } + + posBase := m.makeSrcPosBase(pos.Base()) + return base.Ctxt.PosTable.XPos(src.MakePos(posBase, pos.Line(), pos.Col())) +} + +// makeSrcPosBase translates from a *syntax.PosBase to a *src.PosBase. +func (m *posMap) makeSrcPosBase(b0 *syntax.PosBase) *src.PosBase { + // fast path: most likely PosBase hasn't changed + if m.cache.last == b0 { + return m.cache.base + } + + b1, ok := m.bases[b0] + if !ok { + fn := b0.Filename() + absfn := trimFilename(b0) + + if b0.IsFileBase() { + b1 = src.NewFileBase(fn, absfn) + } else { + // line directive base + p0 := b0.Pos() + p0b := p0.Base() + if p0b == b0 { + panic("infinite recursion in makeSrcPosBase") + } + p1 := src.MakePos(m.makeSrcPosBase(p0b), p0.Line(), p0.Col()) + b1 = src.NewLinePragmaBase(p1, fn, absfn, b0.Line(), b0.Col()) + } + if m.bases == nil { + m.bases = make(map[*syntax.PosBase]*src.PosBase) + } + m.bases[b0] = b1 + } + + // update cache + m.cache.last = b0 + m.cache.base = b1 + + return b1 +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/quirks.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/quirks.go new file mode 100644 index 0000000000000000000000000000000000000000..dd9cec9250e98bd931178d19517e62f46b71bf22 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/quirks.go @@ -0,0 +1,79 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package noder + +import ( + "fmt" + + "cmd/compile/internal/syntax" +) + +// typeExprEndPos returns the position that noder would leave base.Pos +// after parsing the given type expression. +// +// Deprecated: This function exists to emulate position semantics from +// Go 1.17, necessary for compatibility with the backend DWARF +// generation logic that assigns variables to their appropriate scope. +func typeExprEndPos(expr0 syntax.Expr) syntax.Pos { + for { + switch expr := expr0.(type) { + case *syntax.Name: + return expr.Pos() + case *syntax.SelectorExpr: + return expr.X.Pos() + + case *syntax.ParenExpr: + expr0 = expr.X + + case *syntax.Operation: + assert(expr.Op == syntax.Mul) + assert(expr.Y == nil) + expr0 = expr.X + + case *syntax.ArrayType: + expr0 = expr.Elem + case *syntax.ChanType: + expr0 = expr.Elem + case *syntax.DotsType: + expr0 = expr.Elem + case *syntax.MapType: + expr0 = expr.Value + case *syntax.SliceType: + expr0 = expr.Elem + + case *syntax.StructType: + return expr.Pos() + + case *syntax.InterfaceType: + expr0 = lastFieldType(expr.MethodList) + if expr0 == nil { + return expr.Pos() + } + + case *syntax.FuncType: + expr0 = lastFieldType(expr.ResultList) + if expr0 == nil { + expr0 = lastFieldType(expr.ParamList) + if expr0 == nil { + return expr.Pos() + } + } + + case *syntax.IndexExpr: // explicit type instantiation + targs := syntax.UnpackListExpr(expr.Index) + expr0 = targs[len(targs)-1] + + default: + panic(fmt.Sprintf("%s: unexpected type expression %v", expr.Pos(), syntax.String(expr))) + } + } +} + +func lastFieldType(fields []*syntax.Field) syntax.Expr { + if len(fields) == 0 { + return nil + } + return fields[len(fields)-1].Type +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/reader.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/reader.go new file mode 100644 index 0000000000000000000000000000000000000000..2dddd201659024f9aa45d928e84df3d2a15996d8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/reader.go @@ -0,0 +1,3941 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package noder + +import ( + "encoding/hex" + "fmt" + "go/constant" + "internal/buildcfg" + "internal/pkgbits" + "path/filepath" + "strings" + + "cmd/compile/internal/base" + "cmd/compile/internal/dwarfgen" + "cmd/compile/internal/inline" + "cmd/compile/internal/inline/interleaved" + "cmd/compile/internal/ir" + "cmd/compile/internal/objw" + "cmd/compile/internal/reflectdata" + "cmd/compile/internal/staticinit" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/notsha256" + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/internal/src" +) + +// This file implements cmd/compile backend's reader for the Unified +// IR export data. + +// A pkgReader reads Unified IR export data. +type pkgReader struct { + pkgbits.PkgDecoder + + // Indices for encoded things; lazily populated as needed. + // + // Note: Objects (i.e., ir.Names) are lazily instantiated by + // populating their types.Sym.Def; see objReader below. + + posBases []*src.PosBase + pkgs []*types.Pkg + typs []*types.Type + + // offset for rewriting the given (absolute!) index into the output, + // but bitwise inverted so we can detect if we're missing the entry + // or not. + newindex []pkgbits.Index +} + +func newPkgReader(pr pkgbits.PkgDecoder) *pkgReader { + return &pkgReader{ + PkgDecoder: pr, + + posBases: make([]*src.PosBase, pr.NumElems(pkgbits.RelocPosBase)), + pkgs: make([]*types.Pkg, pr.NumElems(pkgbits.RelocPkg)), + typs: make([]*types.Type, pr.NumElems(pkgbits.RelocType)), + + newindex: make([]pkgbits.Index, pr.TotalElems()), + } +} + +// A pkgReaderIndex compactly identifies an index (and its +// corresponding dictionary) within a package's export data. +type pkgReaderIndex struct { + pr *pkgReader + idx pkgbits.Index + dict *readerDict + methodSym *types.Sym + + synthetic func(pos src.XPos, r *reader) +} + +func (pri pkgReaderIndex) asReader(k pkgbits.RelocKind, marker pkgbits.SyncMarker) *reader { + if pri.synthetic != nil { + return &reader{synthetic: pri.synthetic} + } + + r := pri.pr.newReader(k, pri.idx, marker) + r.dict = pri.dict + r.methodSym = pri.methodSym + return r +} + +func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { + return &reader{ + Decoder: pr.NewDecoder(k, idx, marker), + p: pr, + } +} + +// A reader provides APIs for reading an individual element. +type reader struct { + pkgbits.Decoder + + p *pkgReader + + dict *readerDict + + // TODO(mdempsky): The state below is all specific to reading + // function bodies. It probably makes sense to split it out + // separately so that it doesn't take up space in every reader + // instance. + + curfn *ir.Func + locals []*ir.Name + closureVars []*ir.Name + + // funarghack is used during inlining to suppress setting + // Field.Nname to the inlined copies of the parameters. This is + // necessary because we reuse the same types.Type as the original + // function, and most of the compiler still relies on field.Nname to + // find parameters/results. + funarghack bool + + // methodSym is the name of method's name, if reading a method. + // It's nil if reading a normal function or closure body. + methodSym *types.Sym + + // dictParam is the .dict param, if any. + dictParam *ir.Name + + // synthetic is a callback function to construct a synthetic + // function body. It's used for creating the bodies of function + // literals used to curry arguments to shaped functions. + synthetic func(pos src.XPos, r *reader) + + // scopeVars is a stack tracking the number of variables declared in + // the current function at the moment each open scope was opened. + scopeVars []int + marker dwarfgen.ScopeMarker + lastCloseScopePos src.XPos + + // === details for handling inline body expansion === + + // If we're reading in a function body because of inlining, this is + // the call that we're inlining for. + inlCaller *ir.Func + inlCall *ir.CallExpr + inlFunc *ir.Func + inlTreeIndex int + inlPosBases map[*src.PosBase]*src.PosBase + + // suppressInlPos tracks whether position base rewriting for + // inlining should be suppressed. See funcLit. + suppressInlPos int + + delayResults bool + + // Label to return to. + retlabel *types.Sym +} + +// A readerDict represents an instantiated "compile-time dictionary," +// used for resolving any derived types needed for instantiating a +// generic object. +// +// A compile-time dictionary can either be "shaped" or "non-shaped." +// Shaped compile-time dictionaries are only used for instantiating +// shaped type definitions and function bodies, while non-shaped +// compile-time dictionaries are used for instantiating runtime +// dictionaries. +type readerDict struct { + shaped bool // whether this is a shaped dictionary + + // baseSym is the symbol for the object this dictionary belongs to. + // If the object is an instantiated function or defined type, then + // baseSym is the mangled symbol, including any type arguments. + baseSym *types.Sym + + // For non-shaped dictionaries, shapedObj is a reference to the + // corresponding shaped object (always a function or defined type). + shapedObj *ir.Name + + // targs holds the implicit and explicit type arguments in use for + // reading the current object. For example: + // + // func F[T any]() { + // type X[U any] struct { t T; u U } + // var _ X[string] + // } + // + // var _ = F[int] + // + // While instantiating F[int], we need to in turn instantiate + // X[string]. [int] and [string] are explicit type arguments for F + // and X, respectively; but [int] is also the implicit type + // arguments for X. + // + // (As an analogy to function literals, explicits are the function + // literal's formal parameters, while implicits are variables + // captured by the function literal.) + targs []*types.Type + + // implicits counts how many of types within targs are implicit type + // arguments; the rest are explicit. + implicits int + + derived []derivedInfo // reloc index of the derived type's descriptor + derivedTypes []*types.Type // slice of previously computed derived types + + // These slices correspond to entries in the runtime dictionary. + typeParamMethodExprs []readerMethodExprInfo + subdicts []objInfo + rtypes []typeInfo + itabs []itabInfo +} + +type readerMethodExprInfo struct { + typeParamIdx int + method *types.Sym +} + +func setType(n ir.Node, typ *types.Type) { + n.SetType(typ) + n.SetTypecheck(1) +} + +func setValue(name *ir.Name, val constant.Value) { + name.SetVal(val) + name.Defn = nil +} + +// @@@ Positions + +// pos reads a position from the bitstream. +func (r *reader) pos() src.XPos { + return base.Ctxt.PosTable.XPos(r.pos0()) +} + +// origPos reads a position from the bitstream, and returns both the +// original raw position and an inlining-adjusted position. +func (r *reader) origPos() (origPos, inlPos src.XPos) { + r.suppressInlPos++ + origPos = r.pos() + r.suppressInlPos-- + inlPos = r.inlPos(origPos) + return +} + +func (r *reader) pos0() src.Pos { + r.Sync(pkgbits.SyncPos) + if !r.Bool() { + return src.NoPos + } + + posBase := r.posBase() + line := r.Uint() + col := r.Uint() + return src.MakePos(posBase, line, col) +} + +// posBase reads a position base from the bitstream. +func (r *reader) posBase() *src.PosBase { + return r.inlPosBase(r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase))) +} + +// posBaseIdx returns the specified position base, reading it first if +// needed. +func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) *src.PosBase { + if b := pr.posBases[idx]; b != nil { + return b + } + + r := pr.newReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase) + var b *src.PosBase + + absFilename := r.String() + filename := absFilename + + // For build artifact stability, the export data format only + // contains the "absolute" filename as returned by objabi.AbsFile. + // However, some tests (e.g., test/run.go's asmcheck tests) expect + // to see the full, original filename printed out. Re-expanding + // "$GOROOT" to buildcfg.GOROOT is a close-enough approximation to + // satisfy this. + // + // The export data format only ever uses slash paths + // (for cross-operating-system reproducible builds), + // but error messages need to use native paths (backslash on Windows) + // as if they had been specified on the command line. + // (The go command always passes native paths to the compiler.) + const dollarGOROOT = "$GOROOT" + if buildcfg.GOROOT != "" && strings.HasPrefix(filename, dollarGOROOT) { + filename = filepath.FromSlash(buildcfg.GOROOT + filename[len(dollarGOROOT):]) + } + + if r.Bool() { + b = src.NewFileBase(filename, absFilename) + } else { + pos := r.pos0() + line := r.Uint() + col := r.Uint() + b = src.NewLinePragmaBase(pos, filename, absFilename, line, col) + } + + pr.posBases[idx] = b + return b +} + +// inlPosBase returns the inlining-adjusted src.PosBase corresponding +// to oldBase, which must be a non-inlined position. When not +// inlining, this is just oldBase. +func (r *reader) inlPosBase(oldBase *src.PosBase) *src.PosBase { + if index := oldBase.InliningIndex(); index >= 0 { + base.Fatalf("oldBase %v already has inlining index %v", oldBase, index) + } + + if r.inlCall == nil || r.suppressInlPos != 0 { + return oldBase + } + + if newBase, ok := r.inlPosBases[oldBase]; ok { + return newBase + } + + newBase := src.NewInliningBase(oldBase, r.inlTreeIndex) + r.inlPosBases[oldBase] = newBase + return newBase +} + +// inlPos returns the inlining-adjusted src.XPos corresponding to +// xpos, which must be a non-inlined position. When not inlining, this +// is just xpos. +func (r *reader) inlPos(xpos src.XPos) src.XPos { + pos := base.Ctxt.PosTable.Pos(xpos) + pos.SetBase(r.inlPosBase(pos.Base())) + return base.Ctxt.PosTable.XPos(pos) +} + +// @@@ Packages + +// pkg reads a package reference from the bitstream. +func (r *reader) pkg() *types.Pkg { + r.Sync(pkgbits.SyncPkg) + return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg)) +} + +// pkgIdx returns the specified package from the export data, reading +// it first if needed. +func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Pkg { + if pkg := pr.pkgs[idx]; pkg != nil { + return pkg + } + + pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg() + pr.pkgs[idx] = pkg + return pkg +} + +// doPkg reads a package definition from the bitstream. +func (r *reader) doPkg() *types.Pkg { + path := r.String() + switch path { + case "": + path = r.p.PkgPath() + case "builtin": + return types.BuiltinPkg + case "unsafe": + return types.UnsafePkg + } + + name := r.String() + + pkg := types.NewPkg(path, "") + + if pkg.Name == "" { + pkg.Name = name + } else { + base.Assertf(pkg.Name == name, "package %q has name %q, but want %q", pkg.Path, pkg.Name, name) + } + + return pkg +} + +// @@@ Types + +func (r *reader) typ() *types.Type { + return r.typWrapped(true) +} + +// typWrapped is like typ, but allows suppressing generation of +// unnecessary wrappers as a compile-time optimization. +func (r *reader) typWrapped(wrapped bool) *types.Type { + return r.p.typIdx(r.typInfo(), r.dict, wrapped) +} + +func (r *reader) typInfo() typeInfo { + r.Sync(pkgbits.SyncType) + if r.Bool() { + return typeInfo{idx: pkgbits.Index(r.Len()), derived: true} + } + return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false} +} + +// typListIdx returns a list of the specified types, resolving derived +// types within the given dictionary. +func (pr *pkgReader) typListIdx(infos []typeInfo, dict *readerDict) []*types.Type { + typs := make([]*types.Type, len(infos)) + for i, info := range infos { + typs[i] = pr.typIdx(info, dict, true) + } + return typs +} + +// typIdx returns the specified type. If info specifies a derived +// type, it's resolved within the given dictionary. If wrapped is +// true, then method wrappers will be generated, if appropriate. +func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict, wrapped bool) *types.Type { + idx := info.idx + var where **types.Type + if info.derived { + where = &dict.derivedTypes[idx] + idx = dict.derived[idx].idx + } else { + where = &pr.typs[idx] + } + + if typ := *where; typ != nil { + return typ + } + + r := pr.newReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx) + r.dict = dict + + typ := r.doTyp() + assert(typ != nil) + + // For recursive type declarations involving interfaces and aliases, + // above r.doTyp() call may have already set pr.typs[idx], so just + // double check and return the type. + // + // Example: + // + // type F = func(I) + // + // type I interface { + // m(F) + // } + // + // The writer writes data types in following index order: + // + // 0: func(I) + // 1: I + // 2: interface{m(func(I))} + // + // The reader resolves it in following index order: + // + // 0 -> 1 -> 2 -> 0 -> 1 + // + // and can divide in logically 2 steps: + // + // - 0 -> 1 : first time the reader reach type I, + // it creates new named type with symbol I. + // + // - 2 -> 0 -> 1: the reader ends up reaching symbol I again, + // now the symbol I was setup in above step, so + // the reader just return the named type. + // + // Now, the functions called return, the pr.typs looks like below: + // + // - 0 -> 1 -> 2 -> 0 : [ I ] + // - 0 -> 1 -> 2 : [func(I) I ] + // - 0 -> 1 : [func(I) I interface { "".m(func("".I)) }] + // + // The idx 1, corresponding with type I was resolved successfully + // after r.doTyp() call. + + if prev := *where; prev != nil { + return prev + } + + if wrapped { + // Only cache if we're adding wrappers, so that other callers that + // find a cached type know it was wrapped. + *where = typ + + r.needWrapper(typ) + } + + if !typ.IsUntyped() { + types.CheckSize(typ) + } + + return typ +} + +func (r *reader) doTyp() *types.Type { + switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag { + default: + panic(fmt.Sprintf("unexpected type: %v", tag)) + + case pkgbits.TypeBasic: + return *basics[r.Len()] + + case pkgbits.TypeNamed: + obj := r.obj() + assert(obj.Op() == ir.OTYPE) + return obj.Type() + + case pkgbits.TypeTypeParam: + return r.dict.targs[r.Len()] + + case pkgbits.TypeArray: + len := int64(r.Uint64()) + return types.NewArray(r.typ(), len) + case pkgbits.TypeChan: + dir := dirs[r.Len()] + return types.NewChan(r.typ(), dir) + case pkgbits.TypeMap: + return types.NewMap(r.typ(), r.typ()) + case pkgbits.TypePointer: + return types.NewPtr(r.typ()) + case pkgbits.TypeSignature: + return r.signature(nil) + case pkgbits.TypeSlice: + return types.NewSlice(r.typ()) + case pkgbits.TypeStruct: + return r.structType() + case pkgbits.TypeInterface: + return r.interfaceType() + case pkgbits.TypeUnion: + return r.unionType() + } +} + +func (r *reader) unionType() *types.Type { + // In the types1 universe, we only need to handle value types. + // Impure interfaces (i.e., interfaces with non-trivial type sets + // like "int | string") can only appear as type parameter bounds, + // and this is enforced by the types2 type checker. + // + // However, type unions can still appear in pure interfaces if the + // type union is equivalent to "any". E.g., typeparam/issue52124.go + // declares variables with the type "interface { any | int }". + // + // To avoid needing to represent type unions in types1 (since we + // don't have any uses for that today anyway), we simply fold them + // to "any". + + // TODO(mdempsky): Restore consistency check to make sure folding to + // "any" is safe. This is unfortunately tricky, because a pure + // interface can reference impure interfaces too, including + // cyclically (#60117). + if false { + pure := false + for i, n := 0, r.Len(); i < n; i++ { + _ = r.Bool() // tilde + term := r.typ() + if term.IsEmptyInterface() { + pure = true + } + } + if !pure { + base.Fatalf("impure type set used in value type") + } + } + + return types.Types[types.TINTER] +} + +func (r *reader) interfaceType() *types.Type { + nmethods, nembeddeds := r.Len(), r.Len() + implicit := nmethods == 0 && nembeddeds == 1 && r.Bool() + assert(!implicit) // implicit interfaces only appear in constraints + + fields := make([]*types.Field, nmethods+nembeddeds) + methods, embeddeds := fields[:nmethods], fields[nmethods:] + + for i := range methods { + methods[i] = types.NewField(r.pos(), r.selector(), r.signature(types.FakeRecv())) + } + for i := range embeddeds { + embeddeds[i] = types.NewField(src.NoXPos, nil, r.typ()) + } + + if len(fields) == 0 { + return types.Types[types.TINTER] // empty interface + } + return types.NewInterface(fields) +} + +func (r *reader) structType() *types.Type { + fields := make([]*types.Field, r.Len()) + for i := range fields { + field := types.NewField(r.pos(), r.selector(), r.typ()) + field.Note = r.String() + if r.Bool() { + field.Embedded = 1 + } + fields[i] = field + } + return types.NewStruct(fields) +} + +func (r *reader) signature(recv *types.Field) *types.Type { + r.Sync(pkgbits.SyncSignature) + + params := r.params() + results := r.params() + if r.Bool() { // variadic + params[len(params)-1].SetIsDDD(true) + } + + return types.NewSignature(recv, params, results) +} + +func (r *reader) params() []*types.Field { + r.Sync(pkgbits.SyncParams) + params := make([]*types.Field, r.Len()) + for i := range params { + params[i] = r.param() + } + return params +} + +func (r *reader) param() *types.Field { + r.Sync(pkgbits.SyncParam) + return types.NewField(r.pos(), r.localIdent(), r.typ()) +} + +// @@@ Objects + +// objReader maps qualified identifiers (represented as *types.Sym) to +// a pkgReader and corresponding index that can be used for reading +// that object's definition. +var objReader = map[*types.Sym]pkgReaderIndex{} + +// obj reads an instantiated object reference from the bitstream. +func (r *reader) obj() ir.Node { + return r.p.objInstIdx(r.objInfo(), r.dict, false) +} + +// objInfo reads an instantiated object reference from the bitstream +// and returns the encoded reference to it, without instantiating it. +func (r *reader) objInfo() objInfo { + r.Sync(pkgbits.SyncObject) + assert(!r.Bool()) // TODO(mdempsky): Remove; was derived func inst. + idx := r.Reloc(pkgbits.RelocObj) + + explicits := make([]typeInfo, r.Len()) + for i := range explicits { + explicits[i] = r.typInfo() + } + + return objInfo{idx, explicits} +} + +// objInstIdx returns the encoded, instantiated object. If shaped is +// true, then the shaped variant of the object is returned instead. +func (pr *pkgReader) objInstIdx(info objInfo, dict *readerDict, shaped bool) ir.Node { + explicits := pr.typListIdx(info.explicits, dict) + + var implicits []*types.Type + if dict != nil { + implicits = dict.targs + } + + return pr.objIdx(info.idx, implicits, explicits, shaped) +} + +// objIdx returns the specified object, instantiated with the given +// type arguments, if any. +// If shaped is true, then the shaped variant of the object is returned +// instead. +func (pr *pkgReader) objIdx(idx pkgbits.Index, implicits, explicits []*types.Type, shaped bool) ir.Node { + n, err := pr.objIdxMayFail(idx, implicits, explicits, shaped) + if err != nil { + base.Fatalf("%v", err) + } + return n +} + +// objIdxMayFail is equivalent to objIdx, but returns an error rather than +// failing the build if this object requires type arguments and the incorrect +// number of type arguments were passed. +// +// Other sources of internal failure (such as duplicate definitions) still fail +// the build. +func (pr *pkgReader) objIdxMayFail(idx pkgbits.Index, implicits, explicits []*types.Type, shaped bool) (ir.Node, error) { + rname := pr.newReader(pkgbits.RelocName, idx, pkgbits.SyncObject1) + _, sym := rname.qualifiedIdent() + tag := pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj)) + + if tag == pkgbits.ObjStub { + assert(!sym.IsBlank()) + switch sym.Pkg { + case types.BuiltinPkg, types.UnsafePkg: + return sym.Def.(ir.Node), nil + } + if pri, ok := objReader[sym]; ok { + return pri.pr.objIdxMayFail(pri.idx, nil, explicits, shaped) + } + if sym.Pkg.Path == "runtime" { + return typecheck.LookupRuntime(sym.Name), nil + } + base.Fatalf("unresolved stub: %v", sym) + } + + dict, err := pr.objDictIdx(sym, idx, implicits, explicits, shaped) + if err != nil { + return nil, err + } + + sym = dict.baseSym + if !sym.IsBlank() && sym.Def != nil { + return sym.Def.(*ir.Name), nil + } + + r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1) + rext := pr.newReader(pkgbits.RelocObjExt, idx, pkgbits.SyncObject1) + + r.dict = dict + rext.dict = dict + + do := func(op ir.Op, hasTParams bool) *ir.Name { + pos := r.pos() + setBasePos(pos) + if hasTParams { + r.typeParamNames() + } + + name := ir.NewDeclNameAt(pos, op, sym) + name.Class = ir.PEXTERN // may be overridden later + if !sym.IsBlank() { + if sym.Def != nil { + base.FatalfAt(name.Pos(), "already have a definition for %v", name) + } + assert(sym.Def == nil) + sym.Def = name + } + return name + } + + switch tag { + default: + panic("unexpected object") + + case pkgbits.ObjAlias: + name := do(ir.OTYPE, false) + setType(name, r.typ()) + name.SetAlias(true) + return name, nil + + case pkgbits.ObjConst: + name := do(ir.OLITERAL, false) + typ := r.typ() + val := FixValue(typ, r.Value()) + setType(name, typ) + setValue(name, val) + return name, nil + + case pkgbits.ObjFunc: + if sym.Name == "init" { + sym = Renameinit() + } + + npos := r.pos() + setBasePos(npos) + r.typeParamNames() + typ := r.signature(nil) + fpos := r.pos() + + fn := ir.NewFunc(fpos, npos, sym, typ) + name := fn.Nname + if !sym.IsBlank() { + if sym.Def != nil { + base.FatalfAt(name.Pos(), "already have a definition for %v", name) + } + assert(sym.Def == nil) + sym.Def = name + } + + if r.hasTypeParams() { + name.Func.SetDupok(true) + if r.dict.shaped { + setType(name, shapeSig(name.Func, r.dict)) + } else { + todoDicts = append(todoDicts, func() { + r.dict.shapedObj = pr.objIdx(idx, implicits, explicits, true).(*ir.Name) + }) + } + } + + rext.funcExt(name, nil) + return name, nil + + case pkgbits.ObjType: + name := do(ir.OTYPE, true) + typ := types.NewNamed(name) + setType(name, typ) + if r.hasTypeParams() && r.dict.shaped { + typ.SetHasShape(true) + } + + // Important: We need to do this before SetUnderlying. + rext.typeExt(name) + + // We need to defer CheckSize until we've called SetUnderlying to + // handle recursive types. + types.DeferCheckSize() + typ.SetUnderlying(r.typWrapped(false)) + types.ResumeCheckSize() + + if r.hasTypeParams() && !r.dict.shaped { + todoDicts = append(todoDicts, func() { + r.dict.shapedObj = pr.objIdx(idx, implicits, explicits, true).(*ir.Name) + }) + } + + methods := make([]*types.Field, r.Len()) + for i := range methods { + methods[i] = r.method(rext) + } + if len(methods) != 0 { + typ.SetMethods(methods) + } + + if !r.dict.shaped { + r.needWrapper(typ) + } + + return name, nil + + case pkgbits.ObjVar: + name := do(ir.ONAME, false) + setType(name, r.typ()) + rext.varExt(name) + return name, nil + } +} + +func (dict *readerDict) mangle(sym *types.Sym) *types.Sym { + if !dict.hasTypeParams() { + return sym + } + + // If sym is a locally defined generic type, we need the suffix to + // stay at the end after mangling so that types/fmt.go can strip it + // out again when writing the type's runtime descriptor (#54456). + base, suffix := types.SplitVargenSuffix(sym.Name) + + var buf strings.Builder + buf.WriteString(base) + buf.WriteByte('[') + for i, targ := range dict.targs { + if i > 0 { + if i == dict.implicits { + buf.WriteByte(';') + } else { + buf.WriteByte(',') + } + } + buf.WriteString(targ.LinkString()) + } + buf.WriteByte(']') + buf.WriteString(suffix) + return sym.Pkg.Lookup(buf.String()) +} + +// shapify returns the shape type for targ. +// +// If basic is true, then the type argument is used to instantiate a +// type parameter whose constraint is a basic interface. +func shapify(targ *types.Type, basic bool) *types.Type { + if targ.Kind() == types.TFORW { + if targ.IsFullyInstantiated() { + // For recursive instantiated type argument, it may still be a TFORW + // when shapifying happens. If we don't have targ's underlying type, + // shapify won't work. The worst case is we end up not reusing code + // optimally in some tricky cases. + if base.Debug.Shapify != 0 { + base.Warn("skipping shaping of recursive type %v", targ) + } + if targ.HasShape() { + return targ + } + } else { + base.Fatalf("%v is missing its underlying type", targ) + } + } + + // When a pointer type is used to instantiate a type parameter + // constrained by a basic interface, we know the pointer's element + // type can't matter to the generated code. In this case, we can use + // an arbitrary pointer type as the shape type. (To match the + // non-unified frontend, we use `*byte`.) + // + // Otherwise, we simply use the type's underlying type as its shape. + // + // TODO(mdempsky): It should be possible to do much more aggressive + // shaping still; e.g., collapsing all pointer-shaped types into a + // common type, collapsing scalars of the same size/alignment into a + // common type, recursively shaping the element types of composite + // types, and discarding struct field names and tags. However, we'll + // need to start tracking how type parameters are actually used to + // implement some of these optimizations. + under := targ.Underlying() + if basic && targ.IsPtr() && !targ.Elem().NotInHeap() { + under = types.NewPtr(types.Types[types.TUINT8]) + } + + // Hash long type names to bound symbol name length seen by users, + // particularly for large protobuf structs (#65030). + uls := under.LinkString() + if base.Debug.MaxShapeLen != 0 && + len(uls) > base.Debug.MaxShapeLen { + h := notsha256.Sum256([]byte(uls)) + uls = hex.EncodeToString(h[:]) + } + + sym := types.ShapePkg.Lookup(uls) + if sym.Def == nil { + name := ir.NewDeclNameAt(under.Pos(), ir.OTYPE, sym) + typ := types.NewNamed(name) + typ.SetUnderlying(under) + sym.Def = typed(typ, name) + } + res := sym.Def.Type() + assert(res.IsShape()) + assert(res.HasShape()) + return res +} + +// objDictIdx reads and returns the specified object dictionary. +func (pr *pkgReader) objDictIdx(sym *types.Sym, idx pkgbits.Index, implicits, explicits []*types.Type, shaped bool) (*readerDict, error) { + r := pr.newReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1) + + dict := readerDict{ + shaped: shaped, + } + + nimplicits := r.Len() + nexplicits := r.Len() + + if nimplicits > len(implicits) || nexplicits != len(explicits) { + return nil, fmt.Errorf("%v has %v+%v params, but instantiated with %v+%v args", sym, nimplicits, nexplicits, len(implicits), len(explicits)) + } + + dict.targs = append(implicits[:nimplicits:nimplicits], explicits...) + dict.implicits = nimplicits + + // Within the compiler, we can just skip over the type parameters. + for range dict.targs[dict.implicits:] { + // Skip past bounds without actually evaluating them. + r.typInfo() + } + + dict.derived = make([]derivedInfo, r.Len()) + dict.derivedTypes = make([]*types.Type, len(dict.derived)) + for i := range dict.derived { + dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} + } + + // Runtime dictionary information; private to the compiler. + + // If any type argument is already shaped, then we're constructing a + // shaped object, even if not explicitly requested (i.e., calling + // objIdx with shaped==true). This can happen with instantiating + // types that are referenced within a function body. + for _, targ := range dict.targs { + if targ.HasShape() { + dict.shaped = true + break + } + } + + // And if we're constructing a shaped object, then shapify all type + // arguments. + for i, targ := range dict.targs { + basic := r.Bool() + if dict.shaped { + dict.targs[i] = shapify(targ, basic) + } + } + + dict.baseSym = dict.mangle(sym) + + dict.typeParamMethodExprs = make([]readerMethodExprInfo, r.Len()) + for i := range dict.typeParamMethodExprs { + typeParamIdx := r.Len() + method := r.selector() + + dict.typeParamMethodExprs[i] = readerMethodExprInfo{typeParamIdx, method} + } + + dict.subdicts = make([]objInfo, r.Len()) + for i := range dict.subdicts { + dict.subdicts[i] = r.objInfo() + } + + dict.rtypes = make([]typeInfo, r.Len()) + for i := range dict.rtypes { + dict.rtypes[i] = r.typInfo() + } + + dict.itabs = make([]itabInfo, r.Len()) + for i := range dict.itabs { + dict.itabs[i] = itabInfo{typ: r.typInfo(), iface: r.typInfo()} + } + + return &dict, nil +} + +func (r *reader) typeParamNames() { + r.Sync(pkgbits.SyncTypeParamNames) + + for range r.dict.targs[r.dict.implicits:] { + r.pos() + r.localIdent() + } +} + +func (r *reader) method(rext *reader) *types.Field { + r.Sync(pkgbits.SyncMethod) + npos := r.pos() + sym := r.selector() + r.typeParamNames() + recv := r.param() + typ := r.signature(recv) + + fpos := r.pos() + fn := ir.NewFunc(fpos, npos, ir.MethodSym(recv.Type, sym), typ) + name := fn.Nname + + if r.hasTypeParams() { + name.Func.SetDupok(true) + if r.dict.shaped { + typ = shapeSig(name.Func, r.dict) + setType(name, typ) + } + } + + rext.funcExt(name, sym) + + meth := types.NewField(name.Func.Pos(), sym, typ) + meth.Nname = name + meth.SetNointerface(name.Func.Pragma&ir.Nointerface != 0) + + return meth +} + +func (r *reader) qualifiedIdent() (pkg *types.Pkg, sym *types.Sym) { + r.Sync(pkgbits.SyncSym) + pkg = r.pkg() + if name := r.String(); name != "" { + sym = pkg.Lookup(name) + } + return +} + +func (r *reader) localIdent() *types.Sym { + r.Sync(pkgbits.SyncLocalIdent) + pkg := r.pkg() + if name := r.String(); name != "" { + return pkg.Lookup(name) + } + return nil +} + +func (r *reader) selector() *types.Sym { + r.Sync(pkgbits.SyncSelector) + pkg := r.pkg() + name := r.String() + if types.IsExported(name) { + pkg = types.LocalPkg + } + return pkg.Lookup(name) +} + +func (r *reader) hasTypeParams() bool { + return r.dict.hasTypeParams() +} + +func (dict *readerDict) hasTypeParams() bool { + return dict != nil && len(dict.targs) != 0 +} + +// @@@ Compiler extensions + +func (r *reader) funcExt(name *ir.Name, method *types.Sym) { + r.Sync(pkgbits.SyncFuncExt) + + fn := name.Func + + // XXX: Workaround because linker doesn't know how to copy Pos. + if !fn.Pos().IsKnown() { + fn.SetPos(name.Pos()) + } + + // Normally, we only compile local functions, which saves redundant compilation work. + // n.Defn is not nil for local functions, and is nil for imported function. But for + // generic functions, we might have an instantiation that no other package has seen before. + // So we need to be conservative and compile it again. + // + // That's why name.Defn is set here, so ir.VisitFuncsBottomUp can analyze function. + // TODO(mdempsky,cuonglm): find a cleaner way to handle this. + if name.Sym().Pkg == types.LocalPkg || r.hasTypeParams() { + name.Defn = fn + } + + fn.Pragma = r.pragmaFlag() + r.linkname(name) + + if buildcfg.GOARCH == "wasm" { + xmod := r.String() + xname := r.String() + + if xmod != "" && xname != "" { + fn.WasmImport = &ir.WasmImport{ + Module: xmod, + Name: xname, + } + } + } + + if r.Bool() { + assert(name.Defn == nil) + + fn.ABI = obj.ABI(r.Uint64()) + + // Escape analysis. + for _, f := range name.Type().RecvParams() { + f.Note = r.String() + } + + if r.Bool() { + fn.Inl = &ir.Inline{ + Cost: int32(r.Len()), + CanDelayResults: r.Bool(), + } + if buildcfg.Experiment.NewInliner { + fn.Inl.Properties = r.String() + } + } + } else { + r.addBody(name.Func, method) + } + r.Sync(pkgbits.SyncEOF) +} + +func (r *reader) typeExt(name *ir.Name) { + r.Sync(pkgbits.SyncTypeExt) + + typ := name.Type() + + if r.hasTypeParams() { + // Set "RParams" (really type arguments here, not parameters) so + // this type is treated as "fully instantiated". This ensures the + // type descriptor is written out as DUPOK and method wrappers are + // generated even for imported types. + var targs []*types.Type + targs = append(targs, r.dict.targs...) + typ.SetRParams(targs) + } + + name.SetPragma(r.pragmaFlag()) + + typecheck.SetBaseTypeIndex(typ, r.Int64(), r.Int64()) +} + +func (r *reader) varExt(name *ir.Name) { + r.Sync(pkgbits.SyncVarExt) + r.linkname(name) +} + +func (r *reader) linkname(name *ir.Name) { + assert(name.Op() == ir.ONAME) + r.Sync(pkgbits.SyncLinkname) + + if idx := r.Int64(); idx >= 0 { + lsym := name.Linksym() + lsym.SymIdx = int32(idx) + lsym.Set(obj.AttrIndexed, true) + } else { + name.Sym().Linkname = r.String() + } +} + +func (r *reader) pragmaFlag() ir.PragmaFlag { + r.Sync(pkgbits.SyncPragma) + return ir.PragmaFlag(r.Int()) +} + +// @@@ Function bodies + +// bodyReader tracks where the serialized IR for a local or imported, +// generic function's body can be found. +var bodyReader = map[*ir.Func]pkgReaderIndex{} + +// importBodyReader tracks where the serialized IR for an imported, +// static (i.e., non-generic) function body can be read. +var importBodyReader = map[*types.Sym]pkgReaderIndex{} + +// bodyReaderFor returns the pkgReaderIndex for reading fn's +// serialized IR, and whether one was found. +func bodyReaderFor(fn *ir.Func) (pri pkgReaderIndex, ok bool) { + if fn.Nname.Defn != nil { + pri, ok = bodyReader[fn] + base.AssertfAt(ok, base.Pos, "must have bodyReader for %v", fn) // must always be available + } else { + pri, ok = importBodyReader[fn.Sym()] + } + return +} + +// todoDicts holds the list of dictionaries that still need their +// runtime dictionary objects constructed. +var todoDicts []func() + +// todoBodies holds the list of function bodies that still need to be +// constructed. +var todoBodies []*ir.Func + +// addBody reads a function body reference from the element bitstream, +// and associates it with fn. +func (r *reader) addBody(fn *ir.Func, method *types.Sym) { + // addBody should only be called for local functions or imported + // generic functions; see comment in funcExt. + assert(fn.Nname.Defn != nil) + + idx := r.Reloc(pkgbits.RelocBody) + + pri := pkgReaderIndex{r.p, idx, r.dict, method, nil} + bodyReader[fn] = pri + + if r.curfn == nil { + todoBodies = append(todoBodies, fn) + return + } + + pri.funcBody(fn) +} + +func (pri pkgReaderIndex) funcBody(fn *ir.Func) { + r := pri.asReader(pkgbits.RelocBody, pkgbits.SyncFuncBody) + r.funcBody(fn) +} + +// funcBody reads a function body definition from the element +// bitstream, and populates fn with it. +func (r *reader) funcBody(fn *ir.Func) { + r.curfn = fn + r.closureVars = fn.ClosureVars + if len(r.closureVars) != 0 && r.hasTypeParams() { + r.dictParam = r.closureVars[len(r.closureVars)-1] // dictParam is last; see reader.funcLit + } + + ir.WithFunc(fn, func() { + r.declareParams() + + if r.syntheticBody(fn.Pos()) { + return + } + + if !r.Bool() { + return + } + + body := r.stmts() + if body == nil { + body = []ir.Node{typecheck.Stmt(ir.NewBlockStmt(src.NoXPos, nil))} + } + fn.Body = body + fn.Endlineno = r.pos() + }) + + r.marker.WriteTo(fn) +} + +// syntheticBody adds a synthetic body to r.curfn if appropriate, and +// reports whether it did. +func (r *reader) syntheticBody(pos src.XPos) bool { + if r.synthetic != nil { + r.synthetic(pos, r) + return true + } + + // If this function has type parameters and isn't shaped, then we + // just tail call its corresponding shaped variant. + if r.hasTypeParams() && !r.dict.shaped { + r.callShaped(pos) + return true + } + + return false +} + +// callShaped emits a tail call to r.shapedFn, passing along the +// arguments to the current function. +func (r *reader) callShaped(pos src.XPos) { + shapedObj := r.dict.shapedObj + assert(shapedObj != nil) + + var shapedFn ir.Node + if r.methodSym == nil { + // Instantiating a generic function; shapedObj is the shaped + // function itself. + assert(shapedObj.Op() == ir.ONAME && shapedObj.Class == ir.PFUNC) + shapedFn = shapedObj + } else { + // Instantiating a generic type's method; shapedObj is the shaped + // type, so we need to select it's corresponding method. + shapedFn = shapedMethodExpr(pos, shapedObj, r.methodSym) + } + + params := r.syntheticArgs() + + // Construct the arguments list: receiver (if any), then runtime + // dictionary, and finally normal parameters. + // + // Note: For simplicity, shaped methods are added as normal methods + // on their shaped types. So existing code (e.g., packages ir and + // typecheck) expects the shaped type to appear as the receiver + // parameter (or first parameter, as a method expression). Hence + // putting the dictionary parameter after that is the least invasive + // solution at the moment. + var args ir.Nodes + if r.methodSym != nil { + args.Append(params[0]) + params = params[1:] + } + args.Append(typecheck.Expr(ir.NewAddrExpr(pos, r.p.dictNameOf(r.dict)))) + args.Append(params...) + + r.syntheticTailCall(pos, shapedFn, args) +} + +// syntheticArgs returns the recvs and params arguments passed to the +// current function. +func (r *reader) syntheticArgs() ir.Nodes { + sig := r.curfn.Nname.Type() + return ir.ToNodes(r.curfn.Dcl[:sig.NumRecvs()+sig.NumParams()]) +} + +// syntheticTailCall emits a tail call to fn, passing the given +// arguments list. +func (r *reader) syntheticTailCall(pos src.XPos, fn ir.Node, args ir.Nodes) { + // Mark the function as a wrapper so it doesn't show up in stack + // traces. + r.curfn.SetWrapper(true) + + call := typecheck.Call(pos, fn, args, fn.Type().IsVariadic()).(*ir.CallExpr) + + var stmt ir.Node + if fn.Type().NumResults() != 0 { + stmt = typecheck.Stmt(ir.NewReturnStmt(pos, []ir.Node{call})) + } else { + stmt = call + } + r.curfn.Body.Append(stmt) +} + +// dictNameOf returns the runtime dictionary corresponding to dict. +func (pr *pkgReader) dictNameOf(dict *readerDict) *ir.Name { + pos := base.AutogeneratedPos + + // Check that we only instantiate runtime dictionaries with real types. + base.AssertfAt(!dict.shaped, pos, "runtime dictionary of shaped object %v", dict.baseSym) + + sym := dict.baseSym.Pkg.Lookup(objabi.GlobalDictPrefix + "." + dict.baseSym.Name) + if sym.Def != nil { + return sym.Def.(*ir.Name) + } + + name := ir.NewNameAt(pos, sym, dict.varType()) + name.Class = ir.PEXTERN + sym.Def = name // break cycles with mutual subdictionaries + + lsym := name.Linksym() + ot := 0 + + assertOffset := func(section string, offset int) { + base.AssertfAt(ot == offset*types.PtrSize, pos, "writing section %v at offset %v, but it should be at %v*%v", section, ot, offset, types.PtrSize) + } + + assertOffset("type param method exprs", dict.typeParamMethodExprsOffset()) + for _, info := range dict.typeParamMethodExprs { + typeParam := dict.targs[info.typeParamIdx] + method := typecheck.NewMethodExpr(pos, typeParam, info.method) + + rsym := method.FuncName().Linksym() + assert(rsym.ABI() == obj.ABIInternal) // must be ABIInternal; see ir.OCFUNC in ssagen/ssa.go + + ot = objw.SymPtr(lsym, ot, rsym, 0) + } + + assertOffset("subdictionaries", dict.subdictsOffset()) + for _, info := range dict.subdicts { + explicits := pr.typListIdx(info.explicits, dict) + + // Careful: Due to subdictionary cycles, name may not be fully + // initialized yet. + name := pr.objDictName(info.idx, dict.targs, explicits) + + ot = objw.SymPtr(lsym, ot, name.Linksym(), 0) + } + + assertOffset("rtypes", dict.rtypesOffset()) + for _, info := range dict.rtypes { + typ := pr.typIdx(info, dict, true) + ot = objw.SymPtr(lsym, ot, reflectdata.TypeLinksym(typ), 0) + + // TODO(mdempsky): Double check this. + reflectdata.MarkTypeUsedInInterface(typ, lsym) + } + + // For each (typ, iface) pair, we write the *runtime.itab pointer + // for the pair. For pairs that don't actually require an itab + // (i.e., typ is an interface, or iface is an empty interface), we + // write a nil pointer instead. This is wasteful, but rare in + // practice (e.g., instantiating a type parameter with an interface + // type). + assertOffset("itabs", dict.itabsOffset()) + for _, info := range dict.itabs { + typ := pr.typIdx(info.typ, dict, true) + iface := pr.typIdx(info.iface, dict, true) + + if !typ.IsInterface() && iface.IsInterface() && !iface.IsEmptyInterface() { + ot = objw.SymPtr(lsym, ot, reflectdata.ITabLsym(typ, iface), 0) + } else { + ot += types.PtrSize + } + + // TODO(mdempsky): Double check this. + reflectdata.MarkTypeUsedInInterface(typ, lsym) + reflectdata.MarkTypeUsedInInterface(iface, lsym) + } + + objw.Global(lsym, int32(ot), obj.DUPOK|obj.RODATA) + + return name +} + +// typeParamMethodExprsOffset returns the offset of the runtime +// dictionary's type parameter method expressions section, in words. +func (dict *readerDict) typeParamMethodExprsOffset() int { + return 0 +} + +// subdictsOffset returns the offset of the runtime dictionary's +// subdictionary section, in words. +func (dict *readerDict) subdictsOffset() int { + return dict.typeParamMethodExprsOffset() + len(dict.typeParamMethodExprs) +} + +// rtypesOffset returns the offset of the runtime dictionary's rtypes +// section, in words. +func (dict *readerDict) rtypesOffset() int { + return dict.subdictsOffset() + len(dict.subdicts) +} + +// itabsOffset returns the offset of the runtime dictionary's itabs +// section, in words. +func (dict *readerDict) itabsOffset() int { + return dict.rtypesOffset() + len(dict.rtypes) +} + +// numWords returns the total number of words that comprise dict's +// runtime dictionary variable. +func (dict *readerDict) numWords() int64 { + return int64(dict.itabsOffset() + len(dict.itabs)) +} + +// varType returns the type of dict's runtime dictionary variable. +func (dict *readerDict) varType() *types.Type { + return types.NewArray(types.Types[types.TUINTPTR], dict.numWords()) +} + +func (r *reader) declareParams() { + r.curfn.DeclareParams(!r.funarghack) + + for _, name := range r.curfn.Dcl { + if name.Sym().Name == dictParamName { + r.dictParam = name + continue + } + + r.addLocal(name) + } +} + +func (r *reader) addLocal(name *ir.Name) { + if r.synthetic == nil { + r.Sync(pkgbits.SyncAddLocal) + if r.p.SyncMarkers() { + want := r.Int() + if have := len(r.locals); have != want { + base.FatalfAt(name.Pos(), "locals table has desynced") + } + } + r.varDictIndex(name) + } + + r.locals = append(r.locals, name) +} + +func (r *reader) useLocal() *ir.Name { + r.Sync(pkgbits.SyncUseObjLocal) + if r.Bool() { + return r.locals[r.Len()] + } + return r.closureVars[r.Len()] +} + +func (r *reader) openScope() { + r.Sync(pkgbits.SyncOpenScope) + pos := r.pos() + + if base.Flag.Dwarf { + r.scopeVars = append(r.scopeVars, len(r.curfn.Dcl)) + r.marker.Push(pos) + } +} + +func (r *reader) closeScope() { + r.Sync(pkgbits.SyncCloseScope) + r.lastCloseScopePos = r.pos() + + r.closeAnotherScope() +} + +// closeAnotherScope is like closeScope, but it reuses the same mark +// position as the last closeScope call. This is useful for "for" and +// "if" statements, as their implicit blocks always end at the same +// position as an explicit block. +func (r *reader) closeAnotherScope() { + r.Sync(pkgbits.SyncCloseAnotherScope) + + if base.Flag.Dwarf { + scopeVars := r.scopeVars[len(r.scopeVars)-1] + r.scopeVars = r.scopeVars[:len(r.scopeVars)-1] + + // Quirkish: noder decides which scopes to keep before + // typechecking, whereas incremental typechecking during IR + // construction can result in new autotemps being allocated. To + // produce identical output, we ignore autotemps here for the + // purpose of deciding whether to retract the scope. + // + // This is important for net/http/fcgi, because it contains: + // + // var body io.ReadCloser + // if len(content) > 0 { + // body, req.pw = io.Pipe() + // } else { … } + // + // Notably, io.Pipe is inlinable, and inlining it introduces a ~R0 + // variable at the call site. + // + // Noder does not preserve the scope where the io.Pipe() call + // resides, because it doesn't contain any declared variables in + // source. So the ~R0 variable ends up being assigned to the + // enclosing scope instead. + // + // However, typechecking this assignment also introduces + // autotemps, because io.Pipe's results need conversion before + // they can be assigned to their respective destination variables. + // + // TODO(mdempsky): We should probably just keep all scopes, and + // let dwarfgen take care of pruning them instead. + retract := true + for _, n := range r.curfn.Dcl[scopeVars:] { + if !n.AutoTemp() { + retract = false + break + } + } + + if retract { + // no variables were declared in this scope, so we can retract it. + r.marker.Unpush() + } else { + r.marker.Pop(r.lastCloseScopePos) + } + } +} + +// @@@ Statements + +func (r *reader) stmt() ir.Node { + return block(r.stmts()) +} + +func block(stmts []ir.Node) ir.Node { + switch len(stmts) { + case 0: + return nil + case 1: + return stmts[0] + default: + return ir.NewBlockStmt(stmts[0].Pos(), stmts) + } +} + +func (r *reader) stmts() ir.Nodes { + assert(ir.CurFunc == r.curfn) + var res ir.Nodes + + r.Sync(pkgbits.SyncStmts) + for { + tag := codeStmt(r.Code(pkgbits.SyncStmt1)) + if tag == stmtEnd { + r.Sync(pkgbits.SyncStmtsEnd) + return res + } + + if n := r.stmt1(tag, &res); n != nil { + res.Append(typecheck.Stmt(n)) + } + } +} + +func (r *reader) stmt1(tag codeStmt, out *ir.Nodes) ir.Node { + var label *types.Sym + if n := len(*out); n > 0 { + if ls, ok := (*out)[n-1].(*ir.LabelStmt); ok { + label = ls.Label + } + } + + switch tag { + default: + panic("unexpected statement") + + case stmtAssign: + pos := r.pos() + names, lhs := r.assignList() + rhs := r.multiExpr() + + if len(rhs) == 0 { + for _, name := range names { + as := ir.NewAssignStmt(pos, name, nil) + as.PtrInit().Append(ir.NewDecl(pos, ir.ODCL, name)) + out.Append(typecheck.Stmt(as)) + } + return nil + } + + if len(lhs) == 1 && len(rhs) == 1 { + n := ir.NewAssignStmt(pos, lhs[0], rhs[0]) + n.Def = r.initDefn(n, names) + return n + } + + n := ir.NewAssignListStmt(pos, ir.OAS2, lhs, rhs) + n.Def = r.initDefn(n, names) + return n + + case stmtAssignOp: + op := r.op() + lhs := r.expr() + pos := r.pos() + rhs := r.expr() + return ir.NewAssignOpStmt(pos, op, lhs, rhs) + + case stmtIncDec: + op := r.op() + lhs := r.expr() + pos := r.pos() + n := ir.NewAssignOpStmt(pos, op, lhs, ir.NewOne(pos, lhs.Type())) + n.IncDec = true + return n + + case stmtBlock: + out.Append(r.blockStmt()...) + return nil + + case stmtBranch: + pos := r.pos() + op := r.op() + sym := r.optLabel() + return ir.NewBranchStmt(pos, op, sym) + + case stmtCall: + pos := r.pos() + op := r.op() + call := r.expr() + stmt := ir.NewGoDeferStmt(pos, op, call) + if op == ir.ODEFER { + x := r.optExpr() + if x != nil { + stmt.DeferAt = x.(ir.Expr) + } + } + return stmt + + case stmtExpr: + return r.expr() + + case stmtFor: + return r.forStmt(label) + + case stmtIf: + return r.ifStmt() + + case stmtLabel: + pos := r.pos() + sym := r.label() + return ir.NewLabelStmt(pos, sym) + + case stmtReturn: + pos := r.pos() + results := r.multiExpr() + return ir.NewReturnStmt(pos, results) + + case stmtSelect: + return r.selectStmt(label) + + case stmtSend: + pos := r.pos() + ch := r.expr() + value := r.expr() + return ir.NewSendStmt(pos, ch, value) + + case stmtSwitch: + return r.switchStmt(label) + } +} + +func (r *reader) assignList() ([]*ir.Name, []ir.Node) { + lhs := make([]ir.Node, r.Len()) + var names []*ir.Name + + for i := range lhs { + expr, def := r.assign() + lhs[i] = expr + if def { + names = append(names, expr.(*ir.Name)) + } + } + + return names, lhs +} + +// assign returns an assignee expression. It also reports whether the +// returned expression is a newly declared variable. +func (r *reader) assign() (ir.Node, bool) { + switch tag := codeAssign(r.Code(pkgbits.SyncAssign)); tag { + default: + panic("unhandled assignee expression") + + case assignBlank: + return typecheck.AssignExpr(ir.BlankNode), false + + case assignDef: + pos := r.pos() + setBasePos(pos) // test/fixedbugs/issue49767.go depends on base.Pos being set for the r.typ() call here, ugh + name := r.curfn.NewLocal(pos, r.localIdent(), r.typ()) + r.addLocal(name) + return name, true + + case assignExpr: + return r.expr(), false + } +} + +func (r *reader) blockStmt() []ir.Node { + r.Sync(pkgbits.SyncBlockStmt) + r.openScope() + stmts := r.stmts() + r.closeScope() + return stmts +} + +func (r *reader) forStmt(label *types.Sym) ir.Node { + r.Sync(pkgbits.SyncForStmt) + + r.openScope() + + if r.Bool() { + pos := r.pos() + rang := ir.NewRangeStmt(pos, nil, nil, nil, nil, false) + rang.Label = label + + names, lhs := r.assignList() + if len(lhs) >= 1 { + rang.Key = lhs[0] + if len(lhs) >= 2 { + rang.Value = lhs[1] + } + } + rang.Def = r.initDefn(rang, names) + + rang.X = r.expr() + if rang.X.Type().IsMap() { + rang.RType = r.rtype(pos) + } + if rang.Key != nil && !ir.IsBlank(rang.Key) { + rang.KeyTypeWord, rang.KeySrcRType = r.convRTTI(pos) + } + if rang.Value != nil && !ir.IsBlank(rang.Value) { + rang.ValueTypeWord, rang.ValueSrcRType = r.convRTTI(pos) + } + + rang.Body = r.blockStmt() + rang.DistinctVars = r.Bool() + r.closeAnotherScope() + + return rang + } + + pos := r.pos() + init := r.stmt() + cond := r.optExpr() + post := r.stmt() + body := r.blockStmt() + perLoopVars := r.Bool() + r.closeAnotherScope() + + if ir.IsConst(cond, constant.Bool) && !ir.BoolVal(cond) { + return init // simplify "for init; false; post { ... }" into "init" + } + + stmt := ir.NewForStmt(pos, init, cond, post, body, perLoopVars) + stmt.Label = label + return stmt +} + +func (r *reader) ifStmt() ir.Node { + r.Sync(pkgbits.SyncIfStmt) + r.openScope() + pos := r.pos() + init := r.stmts() + cond := r.expr() + staticCond := r.Int() + var then, els []ir.Node + if staticCond >= 0 { + then = r.blockStmt() + } else { + r.lastCloseScopePos = r.pos() + } + if staticCond <= 0 { + els = r.stmts() + } + r.closeAnotherScope() + + if staticCond != 0 { + // We may have removed a dead return statement, which can trip up + // later passes (#62211). To avoid confusion, we instead flatten + // the if statement into a block. + + if cond.Op() != ir.OLITERAL { + init.Append(typecheck.Stmt(ir.NewAssignStmt(pos, ir.BlankNode, cond))) // for side effects + } + init.Append(then...) + init.Append(els...) + return block(init) + } + + n := ir.NewIfStmt(pos, cond, then, els) + n.SetInit(init) + return n +} + +func (r *reader) selectStmt(label *types.Sym) ir.Node { + r.Sync(pkgbits.SyncSelectStmt) + + pos := r.pos() + clauses := make([]*ir.CommClause, r.Len()) + for i := range clauses { + if i > 0 { + r.closeScope() + } + r.openScope() + + pos := r.pos() + comm := r.stmt() + body := r.stmts() + + // "case i = <-c: ..." may require an implicit conversion (e.g., + // see fixedbugs/bug312.go). Currently, typecheck throws away the + // implicit conversion and relies on it being reinserted later, + // but that would lose any explicit RTTI operands too. To preserve + // RTTI, we rewrite this as "case tmp := <-c: i = tmp; ...". + if as, ok := comm.(*ir.AssignStmt); ok && as.Op() == ir.OAS && !as.Def { + if conv, ok := as.Y.(*ir.ConvExpr); ok && conv.Op() == ir.OCONVIFACE { + base.AssertfAt(conv.Implicit(), conv.Pos(), "expected implicit conversion: %v", conv) + + recv := conv.X + base.AssertfAt(recv.Op() == ir.ORECV, recv.Pos(), "expected receive expression: %v", recv) + + tmp := r.temp(pos, recv.Type()) + + // Replace comm with `tmp := <-c`. + tmpAs := ir.NewAssignStmt(pos, tmp, recv) + tmpAs.Def = true + tmpAs.PtrInit().Append(ir.NewDecl(pos, ir.ODCL, tmp)) + comm = tmpAs + + // Change original assignment to `i = tmp`, and prepend to body. + conv.X = tmp + body = append([]ir.Node{as}, body...) + } + } + + // multiExpr will have desugared a comma-ok receive expression + // into a separate statement. However, the rest of the compiler + // expects comm to be the OAS2RECV statement itself, so we need to + // shuffle things around to fit that pattern. + if as2, ok := comm.(*ir.AssignListStmt); ok && as2.Op() == ir.OAS2 { + init := ir.TakeInit(as2.Rhs[0]) + base.AssertfAt(len(init) == 1 && init[0].Op() == ir.OAS2RECV, as2.Pos(), "unexpected assignment: %+v", as2) + + comm = init[0] + body = append([]ir.Node{as2}, body...) + } + + clauses[i] = ir.NewCommStmt(pos, comm, body) + } + if len(clauses) > 0 { + r.closeScope() + } + n := ir.NewSelectStmt(pos, clauses) + n.Label = label + return n +} + +func (r *reader) switchStmt(label *types.Sym) ir.Node { + r.Sync(pkgbits.SyncSwitchStmt) + + r.openScope() + pos := r.pos() + init := r.stmt() + + var tag ir.Node + var ident *ir.Ident + var iface *types.Type + if r.Bool() { + pos := r.pos() + if r.Bool() { + ident = ir.NewIdent(r.pos(), r.localIdent()) + } + x := r.expr() + iface = x.Type() + tag = ir.NewTypeSwitchGuard(pos, ident, x) + } else { + tag = r.optExpr() + } + + clauses := make([]*ir.CaseClause, r.Len()) + for i := range clauses { + if i > 0 { + r.closeScope() + } + r.openScope() + + pos := r.pos() + var cases, rtypes []ir.Node + if iface != nil { + cases = make([]ir.Node, r.Len()) + if len(cases) == 0 { + cases = nil // TODO(mdempsky): Unclear if this matters. + } + for i := range cases { + if r.Bool() { // case nil + cases[i] = typecheck.Expr(types.BuiltinPkg.Lookup("nil").Def.(*ir.NilExpr)) + } else { + cases[i] = r.exprType() + } + } + } else { + cases = r.exprList() + + // For `switch { case any(true): }` (e.g., issue 3980 in + // test/switch.go), the backend still creates a mixed bool/any + // comparison, and we need to explicitly supply the RTTI for the + // comparison. + // + // TODO(mdempsky): Change writer.go to desugar "switch {" into + // "switch true {", which we already handle correctly. + if tag == nil { + for i, cas := range cases { + if cas.Type().IsEmptyInterface() { + for len(rtypes) < i { + rtypes = append(rtypes, nil) + } + rtypes = append(rtypes, reflectdata.TypePtrAt(cas.Pos(), types.Types[types.TBOOL])) + } + } + } + } + + clause := ir.NewCaseStmt(pos, cases, nil) + clause.RTypes = rtypes + + if ident != nil { + name := r.curfn.NewLocal(r.pos(), ident.Sym(), r.typ()) + r.addLocal(name) + clause.Var = name + name.Defn = tag + } + + clause.Body = r.stmts() + clauses[i] = clause + } + if len(clauses) > 0 { + r.closeScope() + } + r.closeScope() + + n := ir.NewSwitchStmt(pos, tag, clauses) + n.Label = label + if init != nil { + n.SetInit([]ir.Node{init}) + } + return n +} + +func (r *reader) label() *types.Sym { + r.Sync(pkgbits.SyncLabel) + name := r.String() + if r.inlCall != nil { + name = fmt.Sprintf("~%s·%d", name, inlgen) + } + return typecheck.Lookup(name) +} + +func (r *reader) optLabel() *types.Sym { + r.Sync(pkgbits.SyncOptLabel) + if r.Bool() { + return r.label() + } + return nil +} + +// initDefn marks the given names as declared by defn and populates +// its Init field with ODCL nodes. It then reports whether any names +// were so declared, which can be used to initialize defn.Def. +func (r *reader) initDefn(defn ir.InitNode, names []*ir.Name) bool { + if len(names) == 0 { + return false + } + + init := make([]ir.Node, len(names)) + for i, name := range names { + name.Defn = defn + init[i] = ir.NewDecl(name.Pos(), ir.ODCL, name) + } + defn.SetInit(init) + return true +} + +// @@@ Expressions + +// expr reads and returns a typechecked expression. +func (r *reader) expr() (res ir.Node) { + defer func() { + if res != nil && res.Typecheck() == 0 { + base.FatalfAt(res.Pos(), "%v missed typecheck", res) + } + }() + + switch tag := codeExpr(r.Code(pkgbits.SyncExpr)); tag { + default: + panic("unhandled expression") + + case exprLocal: + return typecheck.Expr(r.useLocal()) + + case exprGlobal: + // Callee instead of Expr allows builtins + // TODO(mdempsky): Handle builtins directly in exprCall, like method calls? + return typecheck.Callee(r.obj()) + + case exprFuncInst: + origPos, pos := r.origPos() + wrapperFn, baseFn, dictPtr := r.funcInst(pos) + if wrapperFn != nil { + return wrapperFn + } + return r.curry(origPos, false, baseFn, dictPtr, nil) + + case exprConst: + pos := r.pos() + typ := r.typ() + val := FixValue(typ, r.Value()) + return ir.NewBasicLit(pos, typ, val) + + case exprZero: + pos := r.pos() + typ := r.typ() + return ir.NewZero(pos, typ) + + case exprCompLit: + return r.compLit() + + case exprFuncLit: + return r.funcLit() + + case exprFieldVal: + x := r.expr() + pos := r.pos() + sym := r.selector() + + return typecheck.XDotField(pos, x, sym) + + case exprMethodVal: + recv := r.expr() + origPos, pos := r.origPos() + wrapperFn, baseFn, dictPtr := r.methodExpr() + + // For simple wrapperFn values, the existing machinery for creating + // and deduplicating wrapperFn value wrappers still works fine. + if wrapperFn, ok := wrapperFn.(*ir.SelectorExpr); ok && wrapperFn.Op() == ir.OMETHEXPR { + // The receiver expression we constructed may have a shape type. + // For example, in fixedbugs/issue54343.go, `New[int]()` is + // constructed as `New[go.shape.int](&.dict.New[int])`, which + // has type `*T[go.shape.int]`, not `*T[int]`. + // + // However, the method we want to select here is `(*T[int]).M`, + // not `(*T[go.shape.int]).M`, so we need to manually convert + // the type back so that the OXDOT resolves correctly. + // + // TODO(mdempsky): Logically it might make more sense for + // exprCall to take responsibility for setting a non-shaped + // result type, but this is the only place where we care + // currently. And only because existing ir.OMETHVALUE backend + // code relies on n.X.Type() instead of n.Selection.Recv().Type + // (because the latter is types.FakeRecvType() in the case of + // interface method values). + // + if recv.Type().HasShape() { + typ := wrapperFn.Type().Param(0).Type + if !types.Identical(typ, recv.Type()) { + base.FatalfAt(wrapperFn.Pos(), "receiver %L does not match %L", recv, wrapperFn) + } + recv = typecheck.Expr(ir.NewConvExpr(recv.Pos(), ir.OCONVNOP, typ, recv)) + } + + n := typecheck.XDotMethod(pos, recv, wrapperFn.Sel, false) + + // As a consistency check here, we make sure "n" selected the + // same method (represented by a types.Field) that wrapperFn + // selected. However, for anonymous receiver types, there can be + // multiple such types.Field instances (#58563). So we may need + // to fallback to making sure Sym and Type (including the + // receiver parameter's type) match. + if n.Selection != wrapperFn.Selection { + assert(n.Selection.Sym == wrapperFn.Selection.Sym) + assert(types.Identical(n.Selection.Type, wrapperFn.Selection.Type)) + assert(types.Identical(n.Selection.Type.Recv().Type, wrapperFn.Selection.Type.Recv().Type)) + } + + wrapper := methodValueWrapper{ + rcvr: n.X.Type(), + method: n.Selection, + } + + if r.importedDef() { + haveMethodValueWrappers = append(haveMethodValueWrappers, wrapper) + } else { + needMethodValueWrappers = append(needMethodValueWrappers, wrapper) + } + return n + } + + // For more complicated method expressions, we construct a + // function literal wrapper. + return r.curry(origPos, true, baseFn, recv, dictPtr) + + case exprMethodExpr: + recv := r.typ() + + implicits := make([]int, r.Len()) + for i := range implicits { + implicits[i] = r.Len() + } + var deref, addr bool + if r.Bool() { + deref = true + } else if r.Bool() { + addr = true + } + + origPos, pos := r.origPos() + wrapperFn, baseFn, dictPtr := r.methodExpr() + + // If we already have a wrapper and don't need to do anything with + // it, we can just return the wrapper directly. + // + // N.B., we use implicits/deref/addr here as the source of truth + // rather than types.Identical, because the latter can be confused + // by tricky promoted methods (e.g., typeparam/mdempsky/21.go). + if wrapperFn != nil && len(implicits) == 0 && !deref && !addr { + if !types.Identical(recv, wrapperFn.Type().Param(0).Type) { + base.FatalfAt(pos, "want receiver type %v, but have method %L", recv, wrapperFn) + } + return wrapperFn + } + + // Otherwise, if the wrapper function is a static method + // expression (OMETHEXPR) and the receiver type is unshaped, then + // we can rely on a statically generated wrapper being available. + if method, ok := wrapperFn.(*ir.SelectorExpr); ok && method.Op() == ir.OMETHEXPR && !recv.HasShape() { + return typecheck.NewMethodExpr(pos, recv, method.Sel) + } + + return r.methodExprWrap(origPos, recv, implicits, deref, addr, baseFn, dictPtr) + + case exprIndex: + x := r.expr() + pos := r.pos() + index := r.expr() + n := typecheck.Expr(ir.NewIndexExpr(pos, x, index)) + switch n.Op() { + case ir.OINDEXMAP: + n := n.(*ir.IndexExpr) + n.RType = r.rtype(pos) + } + return n + + case exprSlice: + x := r.expr() + pos := r.pos() + var index [3]ir.Node + for i := range index { + index[i] = r.optExpr() + } + op := ir.OSLICE + if index[2] != nil { + op = ir.OSLICE3 + } + return typecheck.Expr(ir.NewSliceExpr(pos, op, x, index[0], index[1], index[2])) + + case exprAssert: + x := r.expr() + pos := r.pos() + typ := r.exprType() + srcRType := r.rtype(pos) + + // TODO(mdempsky): Always emit ODYNAMICDOTTYPE for uniformity? + if typ, ok := typ.(*ir.DynamicType); ok && typ.Op() == ir.ODYNAMICTYPE { + assert := ir.NewDynamicTypeAssertExpr(pos, ir.ODYNAMICDOTTYPE, x, typ.RType) + assert.SrcRType = srcRType + assert.ITab = typ.ITab + return typed(typ.Type(), assert) + } + return typecheck.Expr(ir.NewTypeAssertExpr(pos, x, typ.Type())) + + case exprUnaryOp: + op := r.op() + pos := r.pos() + x := r.expr() + + switch op { + case ir.OADDR: + return typecheck.Expr(typecheck.NodAddrAt(pos, x)) + case ir.ODEREF: + return typecheck.Expr(ir.NewStarExpr(pos, x)) + } + return typecheck.Expr(ir.NewUnaryExpr(pos, op, x)) + + case exprBinaryOp: + op := r.op() + x := r.expr() + pos := r.pos() + y := r.expr() + + switch op { + case ir.OANDAND, ir.OOROR: + return typecheck.Expr(ir.NewLogicalExpr(pos, op, x, y)) + case ir.OLSH, ir.ORSH: + // Untyped rhs of non-constant shift, e.g. x << 1.0. + // If we have a constant value, it must be an int >= 0. + if ir.IsConstNode(y) { + val := constant.ToInt(y.Val()) + assert(val.Kind() == constant.Int && constant.Sign(val) >= 0) + } + } + return typecheck.Expr(ir.NewBinaryExpr(pos, op, x, y)) + + case exprRecv: + x := r.expr() + pos := r.pos() + for i, n := 0, r.Len(); i < n; i++ { + x = Implicit(typecheck.DotField(pos, x, r.Len())) + } + if r.Bool() { // needs deref + x = Implicit(Deref(pos, x.Type().Elem(), x)) + } else if r.Bool() { // needs addr + x = Implicit(Addr(pos, x)) + } + return x + + case exprCall: + var fun ir.Node + var args ir.Nodes + if r.Bool() { // method call + recv := r.expr() + _, method, dictPtr := r.methodExpr() + + if recv.Type().IsInterface() && method.Op() == ir.OMETHEXPR { + method := method.(*ir.SelectorExpr) + + // The compiler backend (e.g., devirtualization) handle + // OCALLINTER/ODOTINTER better than OCALLFUNC/OMETHEXPR for + // interface calls, so we prefer to continue constructing + // calls that way where possible. + // + // There are also corner cases where semantically it's perhaps + // significant; e.g., fixedbugs/issue15975.go, #38634, #52025. + + fun = typecheck.XDotMethod(method.Pos(), recv, method.Sel, true) + } else { + if recv.Type().IsInterface() { + // N.B., this happens currently for typeparam/issue51521.go + // and typeparam/typeswitch3.go. + if base.Flag.LowerM != 0 { + base.WarnfAt(method.Pos(), "imprecise interface call") + } + } + + fun = method + args.Append(recv) + } + if dictPtr != nil { + args.Append(dictPtr) + } + } else if r.Bool() { // call to instanced function + pos := r.pos() + _, shapedFn, dictPtr := r.funcInst(pos) + fun = shapedFn + args.Append(dictPtr) + } else { + fun = r.expr() + } + pos := r.pos() + args.Append(r.multiExpr()...) + dots := r.Bool() + n := typecheck.Call(pos, fun, args, dots) + switch n.Op() { + case ir.OAPPEND: + n := n.(*ir.CallExpr) + n.RType = r.rtype(pos) + // For append(a, b...), we don't need the implicit conversion. The typechecker already + // ensured that a and b are both slices with the same base type, or []byte and string. + if n.IsDDD { + if conv, ok := n.Args[1].(*ir.ConvExpr); ok && conv.Op() == ir.OCONVNOP && conv.Implicit() { + n.Args[1] = conv.X + } + } + case ir.OCOPY: + n := n.(*ir.BinaryExpr) + n.RType = r.rtype(pos) + case ir.ODELETE: + n := n.(*ir.CallExpr) + n.RType = r.rtype(pos) + case ir.OUNSAFESLICE: + n := n.(*ir.BinaryExpr) + n.RType = r.rtype(pos) + } + return n + + case exprMake: + pos := r.pos() + typ := r.exprType() + extra := r.exprs() + n := typecheck.Expr(ir.NewCallExpr(pos, ir.OMAKE, nil, append([]ir.Node{typ}, extra...))).(*ir.MakeExpr) + n.RType = r.rtype(pos) + return n + + case exprNew: + pos := r.pos() + typ := r.exprType() + return typecheck.Expr(ir.NewUnaryExpr(pos, ir.ONEW, typ)) + + case exprSizeof: + return ir.NewUintptr(r.pos(), r.typ().Size()) + + case exprAlignof: + return ir.NewUintptr(r.pos(), r.typ().Alignment()) + + case exprOffsetof: + pos := r.pos() + typ := r.typ() + types.CalcSize(typ) + + var offset int64 + for i := r.Len(); i >= 0; i-- { + field := typ.Field(r.Len()) + offset += field.Offset + typ = field.Type + } + + return ir.NewUintptr(pos, offset) + + case exprReshape: + typ := r.typ() + x := r.expr() + + if types.IdenticalStrict(x.Type(), typ) { + return x + } + + // Comparison expressions are constructed as "untyped bool" still. + // + // TODO(mdempsky): It should be safe to reshape them here too, but + // maybe it's better to construct them with the proper type + // instead. + if x.Type() == types.UntypedBool && typ.IsBoolean() { + return x + } + + base.AssertfAt(x.Type().HasShape() || typ.HasShape(), x.Pos(), "%L and %v are not shape types", x, typ) + base.AssertfAt(types.Identical(x.Type(), typ), x.Pos(), "%L is not shape-identical to %v", x, typ) + + // We use ir.HasUniquePos here as a check that x only appears once + // in the AST, so it's okay for us to call SetType without + // breaking any other uses of it. + // + // Notably, any ONAMEs should already have the exactly right shape + // type and been caught by types.IdenticalStrict above. + base.AssertfAt(ir.HasUniquePos(x), x.Pos(), "cannot call SetType(%v) on %L", typ, x) + + if base.Debug.Reshape != 0 { + base.WarnfAt(x.Pos(), "reshaping %L to %v", x, typ) + } + + x.SetType(typ) + return x + + case exprConvert: + implicit := r.Bool() + typ := r.typ() + pos := r.pos() + typeWord, srcRType := r.convRTTI(pos) + dstTypeParam := r.Bool() + identical := r.Bool() + x := r.expr() + + // TODO(mdempsky): Stop constructing expressions of untyped type. + x = typecheck.DefaultLit(x, typ) + + ce := ir.NewConvExpr(pos, ir.OCONV, typ, x) + ce.TypeWord, ce.SrcRType = typeWord, srcRType + if implicit { + ce.SetImplicit(true) + } + n := typecheck.Expr(ce) + + // Conversions between non-identical, non-empty interfaces always + // requires a runtime call, even if they have identical underlying + // interfaces. This is because we create separate itab instances + // for each unique interface type, not merely each unique + // interface shape. + // + // However, due to shape types, typecheck.Expr might mistakenly + // think a conversion between two non-empty interfaces are + // identical and set ir.OCONVNOP, instead of ir.OCONVIFACE. To + // ensure we update the itab field appropriately, we force it to + // ir.OCONVIFACE instead when shape types are involved. + // + // TODO(mdempsky): Are there other places we might get this wrong? + // Should this be moved down into typecheck.{Assign,Convert}op? + // This would be a non-issue if itabs were unique for each + // *underlying* interface type instead. + if !identical { + if n, ok := n.(*ir.ConvExpr); ok && n.Op() == ir.OCONVNOP && n.Type().IsInterface() && !n.Type().IsEmptyInterface() && (n.Type().HasShape() || n.X.Type().HasShape()) { + n.SetOp(ir.OCONVIFACE) + } + } + + // spec: "If the type is a type parameter, the constant is converted + // into a non-constant value of the type parameter." + if dstTypeParam && ir.IsConstNode(n) { + // Wrap in an OCONVNOP node to ensure result is non-constant. + n = Implicit(ir.NewConvExpr(pos, ir.OCONVNOP, n.Type(), n)) + n.SetTypecheck(1) + } + return n + + case exprRuntimeBuiltin: + builtin := typecheck.LookupRuntime(r.String()) + return builtin + } +} + +// funcInst reads an instantiated function reference, and returns +// three (possibly nil) expressions related to it: +// +// baseFn is always non-nil: it's either a function of the appropriate +// type already, or it has an extra dictionary parameter as the first +// parameter. +// +// If dictPtr is non-nil, then it's a dictionary argument that must be +// passed as the first argument to baseFn. +// +// If wrapperFn is non-nil, then it's either the same as baseFn (if +// dictPtr is nil), or it's semantically equivalent to currying baseFn +// to pass dictPtr. (wrapperFn is nil when dictPtr is an expression +// that needs to be computed dynamically.) +// +// For callers that are creating a call to the returned function, it's +// best to emit a call to baseFn, and include dictPtr in the arguments +// list as appropriate. +// +// For callers that want to return the function without invoking it, +// they may return wrapperFn if it's non-nil; but otherwise, they need +// to create their own wrapper. +func (r *reader) funcInst(pos src.XPos) (wrapperFn, baseFn, dictPtr ir.Node) { + // Like in methodExpr, I'm pretty sure this isn't needed. + var implicits []*types.Type + if r.dict != nil { + implicits = r.dict.targs + } + + if r.Bool() { // dynamic subdictionary + idx := r.Len() + info := r.dict.subdicts[idx] + explicits := r.p.typListIdx(info.explicits, r.dict) + + baseFn = r.p.objIdx(info.idx, implicits, explicits, true).(*ir.Name) + + // TODO(mdempsky): Is there a more robust way to get the + // dictionary pointer type here? + dictPtrType := baseFn.Type().Param(0).Type + dictPtr = typecheck.Expr(ir.NewConvExpr(pos, ir.OCONVNOP, dictPtrType, r.dictWord(pos, r.dict.subdictsOffset()+idx))) + + return + } + + info := r.objInfo() + explicits := r.p.typListIdx(info.explicits, r.dict) + + wrapperFn = r.p.objIdx(info.idx, implicits, explicits, false).(*ir.Name) + baseFn = r.p.objIdx(info.idx, implicits, explicits, true).(*ir.Name) + + dictName := r.p.objDictName(info.idx, implicits, explicits) + dictPtr = typecheck.Expr(ir.NewAddrExpr(pos, dictName)) + + return +} + +func (pr *pkgReader) objDictName(idx pkgbits.Index, implicits, explicits []*types.Type) *ir.Name { + rname := pr.newReader(pkgbits.RelocName, idx, pkgbits.SyncObject1) + _, sym := rname.qualifiedIdent() + tag := pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj)) + + if tag == pkgbits.ObjStub { + assert(!sym.IsBlank()) + if pri, ok := objReader[sym]; ok { + return pri.pr.objDictName(pri.idx, nil, explicits) + } + base.Fatalf("unresolved stub: %v", sym) + } + + dict, err := pr.objDictIdx(sym, idx, implicits, explicits, false) + if err != nil { + base.Fatalf("%v", err) + } + + return pr.dictNameOf(dict) +} + +// curry returns a function literal that calls fun with arg0 and +// (optionally) arg1, accepting additional arguments to the function +// literal as necessary to satisfy fun's signature. +// +// If nilCheck is true and arg0 is an interface value, then it's +// checked to be non-nil as an initial step at the point of evaluating +// the function literal itself. +func (r *reader) curry(origPos src.XPos, ifaceHack bool, fun ir.Node, arg0, arg1 ir.Node) ir.Node { + var captured ir.Nodes + captured.Append(fun, arg0) + if arg1 != nil { + captured.Append(arg1) + } + + params, results := syntheticSig(fun.Type()) + params = params[len(captured)-1:] // skip curried parameters + typ := types.NewSignature(nil, params, results) + + addBody := func(pos src.XPos, r *reader, captured []ir.Node) { + fun := captured[0] + + var args ir.Nodes + args.Append(captured[1:]...) + args.Append(r.syntheticArgs()...) + + r.syntheticTailCall(pos, fun, args) + } + + return r.syntheticClosure(origPos, typ, ifaceHack, captured, addBody) +} + +// methodExprWrap returns a function literal that changes method's +// first parameter's type to recv, and uses implicits/deref/addr to +// select the appropriate receiver parameter to pass to method. +func (r *reader) methodExprWrap(origPos src.XPos, recv *types.Type, implicits []int, deref, addr bool, method, dictPtr ir.Node) ir.Node { + var captured ir.Nodes + captured.Append(method) + + params, results := syntheticSig(method.Type()) + + // Change first parameter to recv. + params[0].Type = recv + + // If we have a dictionary pointer argument to pass, then omit the + // underlying method expression's dictionary parameter from the + // returned signature too. + if dictPtr != nil { + captured.Append(dictPtr) + params = append(params[:1], params[2:]...) + } + + typ := types.NewSignature(nil, params, results) + + addBody := func(pos src.XPos, r *reader, captured []ir.Node) { + fn := captured[0] + args := r.syntheticArgs() + + // Rewrite first argument based on implicits/deref/addr. + { + arg := args[0] + for _, ix := range implicits { + arg = Implicit(typecheck.DotField(pos, arg, ix)) + } + if deref { + arg = Implicit(Deref(pos, arg.Type().Elem(), arg)) + } else if addr { + arg = Implicit(Addr(pos, arg)) + } + args[0] = arg + } + + // Insert dictionary argument, if provided. + if dictPtr != nil { + newArgs := make([]ir.Node, len(args)+1) + newArgs[0] = args[0] + newArgs[1] = captured[1] + copy(newArgs[2:], args[1:]) + args = newArgs + } + + r.syntheticTailCall(pos, fn, args) + } + + return r.syntheticClosure(origPos, typ, false, captured, addBody) +} + +// syntheticClosure constructs a synthetic function literal for +// currying dictionary arguments. origPos is the position used for the +// closure, which must be a non-inlined position. typ is the function +// literal's signature type. +// +// captures is a list of expressions that need to be evaluated at the +// point of function literal evaluation and captured by the function +// literal. If ifaceHack is true and captures[1] is an interface type, +// it's checked to be non-nil after evaluation. +// +// addBody is a callback function to populate the function body. The +// list of captured values passed back has the captured variables for +// use within the function literal, corresponding to the expressions +// in captures. +func (r *reader) syntheticClosure(origPos src.XPos, typ *types.Type, ifaceHack bool, captures ir.Nodes, addBody func(pos src.XPos, r *reader, captured []ir.Node)) ir.Node { + // isSafe reports whether n is an expression that we can safely + // defer to evaluating inside the closure instead, to avoid storing + // them into the closure. + // + // In practice this is always (and only) the wrappee function. + isSafe := func(n ir.Node) bool { + if n.Op() == ir.ONAME && n.(*ir.Name).Class == ir.PFUNC { + return true + } + if n.Op() == ir.OMETHEXPR { + return true + } + + return false + } + + fn := r.inlClosureFunc(origPos, typ) + fn.SetWrapper(true) + + clo := fn.OClosure + inlPos := clo.Pos() + + var init ir.Nodes + for i, n := range captures { + if isSafe(n) { + continue // skip capture; can reference directly + } + + tmp := r.tempCopy(inlPos, n, &init) + ir.NewClosureVar(origPos, fn, tmp) + + // We need to nil check interface receivers at the point of method + // value evaluation, ugh. + if ifaceHack && i == 1 && n.Type().IsInterface() { + check := ir.NewUnaryExpr(inlPos, ir.OCHECKNIL, ir.NewUnaryExpr(inlPos, ir.OITAB, tmp)) + init.Append(typecheck.Stmt(check)) + } + } + + pri := pkgReaderIndex{synthetic: func(pos src.XPos, r *reader) { + captured := make([]ir.Node, len(captures)) + next := 0 + for i, n := range captures { + if isSafe(n) { + captured[i] = n + } else { + captured[i] = r.closureVars[next] + next++ + } + } + assert(next == len(r.closureVars)) + + addBody(origPos, r, captured) + }} + bodyReader[fn] = pri + pri.funcBody(fn) + + return ir.InitExpr(init, clo) +} + +// syntheticSig duplicates and returns the params and results lists +// for sig, but renaming anonymous parameters so they can be assigned +// ir.Names. +func syntheticSig(sig *types.Type) (params, results []*types.Field) { + clone := func(params []*types.Field) []*types.Field { + res := make([]*types.Field, len(params)) + for i, param := range params { + // TODO(mdempsky): It would be nice to preserve the original + // parameter positions here instead, but at least + // typecheck.NewMethodType replaces them with base.Pos, making + // them useless. Worse, the positions copied from base.Pos may + // have inlining contexts, which we definitely don't want here + // (e.g., #54625). + res[i] = types.NewField(base.AutogeneratedPos, param.Sym, param.Type) + res[i].SetIsDDD(param.IsDDD()) + } + return res + } + + return clone(sig.Params()), clone(sig.Results()) +} + +func (r *reader) optExpr() ir.Node { + if r.Bool() { + return r.expr() + } + return nil +} + +// methodExpr reads a method expression reference, and returns three +// (possibly nil) expressions related to it: +// +// baseFn is always non-nil: it's either a function of the appropriate +// type already, or it has an extra dictionary parameter as the second +// parameter (i.e., immediately after the promoted receiver +// parameter). +// +// If dictPtr is non-nil, then it's a dictionary argument that must be +// passed as the second argument to baseFn. +// +// If wrapperFn is non-nil, then it's either the same as baseFn (if +// dictPtr is nil), or it's semantically equivalent to currying baseFn +// to pass dictPtr. (wrapperFn is nil when dictPtr is an expression +// that needs to be computed dynamically.) +// +// For callers that are creating a call to the returned method, it's +// best to emit a call to baseFn, and include dictPtr in the arguments +// list as appropriate. +// +// For callers that want to return a method expression without +// invoking it, they may return wrapperFn if it's non-nil; but +// otherwise, they need to create their own wrapper. +func (r *reader) methodExpr() (wrapperFn, baseFn, dictPtr ir.Node) { + recv := r.typ() + sig0 := r.typ() + pos := r.pos() + sym := r.selector() + + // Signature type to return (i.e., recv prepended to the method's + // normal parameters list). + sig := typecheck.NewMethodType(sig0, recv) + + if r.Bool() { // type parameter method expression + idx := r.Len() + word := r.dictWord(pos, r.dict.typeParamMethodExprsOffset()+idx) + + // TODO(mdempsky): If the type parameter was instantiated with an + // interface type (i.e., embed.IsInterface()), then we could + // return the OMETHEXPR instead and save an indirection. + + // We wrote the method expression's entry point PC into the + // dictionary, but for Go `func` values we need to return a + // closure (i.e., pointer to a structure with the PC as the first + // field). Because method expressions don't have any closure + // variables, we pun the dictionary entry as the closure struct. + fn := typecheck.Expr(ir.NewConvExpr(pos, ir.OCONVNOP, sig, ir.NewAddrExpr(pos, word))) + return fn, fn, nil + } + + // TODO(mdempsky): I'm pretty sure this isn't needed: implicits is + // only relevant to locally defined types, but they can't have + // (non-promoted) methods. + var implicits []*types.Type + if r.dict != nil { + implicits = r.dict.targs + } + + if r.Bool() { // dynamic subdictionary + idx := r.Len() + info := r.dict.subdicts[idx] + explicits := r.p.typListIdx(info.explicits, r.dict) + + shapedObj := r.p.objIdx(info.idx, implicits, explicits, true).(*ir.Name) + shapedFn := shapedMethodExpr(pos, shapedObj, sym) + + // TODO(mdempsky): Is there a more robust way to get the + // dictionary pointer type here? + dictPtrType := shapedFn.Type().Param(1).Type + dictPtr := typecheck.Expr(ir.NewConvExpr(pos, ir.OCONVNOP, dictPtrType, r.dictWord(pos, r.dict.subdictsOffset()+idx))) + + return nil, shapedFn, dictPtr + } + + if r.Bool() { // static dictionary + info := r.objInfo() + explicits := r.p.typListIdx(info.explicits, r.dict) + + shapedObj := r.p.objIdx(info.idx, implicits, explicits, true).(*ir.Name) + shapedFn := shapedMethodExpr(pos, shapedObj, sym) + + dict := r.p.objDictName(info.idx, implicits, explicits) + dictPtr := typecheck.Expr(ir.NewAddrExpr(pos, dict)) + + // Check that dictPtr matches shapedFn's dictionary parameter. + if !types.Identical(dictPtr.Type(), shapedFn.Type().Param(1).Type) { + base.FatalfAt(pos, "dict %L, but shaped method %L", dict, shapedFn) + } + + // For statically known instantiations, we can take advantage of + // the stenciled wrapper. + base.AssertfAt(!recv.HasShape(), pos, "shaped receiver %v", recv) + wrapperFn := typecheck.NewMethodExpr(pos, recv, sym) + base.AssertfAt(types.Identical(sig, wrapperFn.Type()), pos, "wrapper %L does not have type %v", wrapperFn, sig) + + return wrapperFn, shapedFn, dictPtr + } + + // Simple method expression; no dictionary needed. + base.AssertfAt(!recv.HasShape() || recv.IsInterface(), pos, "shaped receiver %v", recv) + fn := typecheck.NewMethodExpr(pos, recv, sym) + return fn, fn, nil +} + +// shapedMethodExpr returns the specified method on the given shaped +// type. +func shapedMethodExpr(pos src.XPos, obj *ir.Name, sym *types.Sym) *ir.SelectorExpr { + assert(obj.Op() == ir.OTYPE) + + typ := obj.Type() + assert(typ.HasShape()) + + method := func() *types.Field { + for _, method := range typ.Methods() { + if method.Sym == sym { + return method + } + } + + base.FatalfAt(pos, "failed to find method %v in shaped type %v", sym, typ) + panic("unreachable") + }() + + // Construct an OMETHEXPR node. + recv := method.Type.Recv().Type + return typecheck.NewMethodExpr(pos, recv, sym) +} + +func (r *reader) multiExpr() []ir.Node { + r.Sync(pkgbits.SyncMultiExpr) + + if r.Bool() { // N:1 + pos := r.pos() + expr := r.expr() + + results := make([]ir.Node, r.Len()) + as := ir.NewAssignListStmt(pos, ir.OAS2, nil, []ir.Node{expr}) + as.Def = true + for i := range results { + tmp := r.temp(pos, r.typ()) + as.PtrInit().Append(ir.NewDecl(pos, ir.ODCL, tmp)) + as.Lhs.Append(tmp) + + res := ir.Node(tmp) + if r.Bool() { + n := ir.NewConvExpr(pos, ir.OCONV, r.typ(), res) + n.TypeWord, n.SrcRType = r.convRTTI(pos) + n.SetImplicit(true) + res = typecheck.Expr(n) + } + results[i] = res + } + + // TODO(mdempsky): Could use ir.InlinedCallExpr instead? + results[0] = ir.InitExpr([]ir.Node{typecheck.Stmt(as)}, results[0]) + return results + } + + // N:N + exprs := make([]ir.Node, r.Len()) + if len(exprs) == 0 { + return nil + } + for i := range exprs { + exprs[i] = r.expr() + } + return exprs +} + +// temp returns a new autotemp of the specified type. +func (r *reader) temp(pos src.XPos, typ *types.Type) *ir.Name { + return typecheck.TempAt(pos, r.curfn, typ) +} + +// tempCopy declares and returns a new autotemp initialized to the +// value of expr. +func (r *reader) tempCopy(pos src.XPos, expr ir.Node, init *ir.Nodes) *ir.Name { + tmp := r.temp(pos, expr.Type()) + + init.Append(typecheck.Stmt(ir.NewDecl(pos, ir.ODCL, tmp))) + + assign := ir.NewAssignStmt(pos, tmp, expr) + assign.Def = true + init.Append(typecheck.Stmt(ir.NewAssignStmt(pos, tmp, expr))) + + tmp.Defn = assign + + return tmp +} + +func (r *reader) compLit() ir.Node { + r.Sync(pkgbits.SyncCompLit) + pos := r.pos() + typ0 := r.typ() + + typ := typ0 + if typ.IsPtr() { + typ = typ.Elem() + } + if typ.Kind() == types.TFORW { + base.FatalfAt(pos, "unresolved composite literal type: %v", typ) + } + var rtype ir.Node + if typ.IsMap() { + rtype = r.rtype(pos) + } + isStruct := typ.Kind() == types.TSTRUCT + + elems := make([]ir.Node, r.Len()) + for i := range elems { + elemp := &elems[i] + + if isStruct { + sk := ir.NewStructKeyExpr(r.pos(), typ.Field(r.Len()), nil) + *elemp, elemp = sk, &sk.Value + } else if r.Bool() { + kv := ir.NewKeyExpr(r.pos(), r.expr(), nil) + *elemp, elemp = kv, &kv.Value + } + + *elemp = wrapName(r.pos(), r.expr()) + } + + lit := typecheck.Expr(ir.NewCompLitExpr(pos, ir.OCOMPLIT, typ, elems)) + if rtype != nil { + lit := lit.(*ir.CompLitExpr) + lit.RType = rtype + } + if typ0.IsPtr() { + lit = typecheck.Expr(typecheck.NodAddrAt(pos, lit)) + lit.SetType(typ0) + } + return lit +} + +func wrapName(pos src.XPos, x ir.Node) ir.Node { + // These nodes do not carry line numbers. + // Introduce a wrapper node to give them the correct line. + switch x.Op() { + case ir.OTYPE, ir.OLITERAL: + if x.Sym() == nil { + break + } + fallthrough + case ir.ONAME, ir.ONONAME, ir.ONIL: + p := ir.NewParenExpr(pos, x) + p.SetImplicit(true) + return p + } + return x +} + +func (r *reader) funcLit() ir.Node { + r.Sync(pkgbits.SyncFuncLit) + + // The underlying function declaration (including its parameters' + // positions, if any) need to remain the original, uninlined + // positions. This is because we track inlining-context on nodes so + // we can synthesize the extra implied stack frames dynamically when + // generating tracebacks, whereas those stack frames don't make + // sense *within* the function literal. (Any necessary inlining + // adjustments will have been applied to the call expression + // instead.) + // + // This is subtle, and getting it wrong leads to cycles in the + // inlining tree, which lead to infinite loops during stack + // unwinding (#46234, #54625). + // + // Note that we *do* want the inline-adjusted position for the + // OCLOSURE node, because that position represents where any heap + // allocation of the closure is credited (#49171). + r.suppressInlPos++ + origPos := r.pos() + sig := r.signature(nil) + r.suppressInlPos-- + + fn := r.inlClosureFunc(origPos, sig) + + fn.ClosureVars = make([]*ir.Name, 0, r.Len()) + for len(fn.ClosureVars) < cap(fn.ClosureVars) { + // TODO(mdempsky): I think these should be original positions too + // (i.e., not inline-adjusted). + ir.NewClosureVar(r.pos(), fn, r.useLocal()) + } + if param := r.dictParam; param != nil { + // If we have a dictionary parameter, capture it too. For + // simplicity, we capture it last and unconditionally. + ir.NewClosureVar(param.Pos(), fn, param) + } + + r.addBody(fn, nil) + + // un-hide closures belong to init function. + if (r.curfn.IsPackageInit() || strings.HasPrefix(r.curfn.Sym().Name, "init.")) && ir.IsTrivialClosure(fn.OClosure) { + fn.SetIsHiddenClosure(false) + } + + return fn.OClosure +} + +// inlClosureFunc constructs a new closure function, but correctly +// handles inlining. +func (r *reader) inlClosureFunc(origPos src.XPos, sig *types.Type) *ir.Func { + curfn := r.inlCaller + if curfn == nil { + curfn = r.curfn + } + + // TODO(mdempsky): Remove hard-coding of typecheck.Target. + return ir.NewClosureFunc(origPos, r.inlPos(origPos), ir.OCLOSURE, sig, curfn, typecheck.Target) +} + +func (r *reader) exprList() []ir.Node { + r.Sync(pkgbits.SyncExprList) + return r.exprs() +} + +func (r *reader) exprs() []ir.Node { + r.Sync(pkgbits.SyncExprs) + nodes := make([]ir.Node, r.Len()) + if len(nodes) == 0 { + return nil // TODO(mdempsky): Unclear if this matters. + } + for i := range nodes { + nodes[i] = r.expr() + } + return nodes +} + +// dictWord returns an expression to return the specified +// uintptr-typed word from the dictionary parameter. +func (r *reader) dictWord(pos src.XPos, idx int) ir.Node { + base.AssertfAt(r.dictParam != nil, pos, "expected dictParam in %v", r.curfn) + return typecheck.Expr(ir.NewIndexExpr(pos, r.dictParam, ir.NewInt(pos, int64(idx)))) +} + +// rttiWord is like dictWord, but converts it to *byte (the type used +// internally to represent *runtime._type and *runtime.itab). +func (r *reader) rttiWord(pos src.XPos, idx int) ir.Node { + return typecheck.Expr(ir.NewConvExpr(pos, ir.OCONVNOP, types.NewPtr(types.Types[types.TUINT8]), r.dictWord(pos, idx))) +} + +// rtype reads a type reference from the element bitstream, and +// returns an expression of type *runtime._type representing that +// type. +func (r *reader) rtype(pos src.XPos) ir.Node { + _, rtype := r.rtype0(pos) + return rtype +} + +func (r *reader) rtype0(pos src.XPos) (typ *types.Type, rtype ir.Node) { + r.Sync(pkgbits.SyncRType) + if r.Bool() { // derived type + idx := r.Len() + info := r.dict.rtypes[idx] + typ = r.p.typIdx(info, r.dict, true) + rtype = r.rttiWord(pos, r.dict.rtypesOffset()+idx) + return + } + + typ = r.typ() + rtype = reflectdata.TypePtrAt(pos, typ) + return +} + +// varDictIndex populates name.DictIndex if name is a derived type. +func (r *reader) varDictIndex(name *ir.Name) { + if r.Bool() { + idx := 1 + r.dict.rtypesOffset() + r.Len() + if int(uint16(idx)) != idx { + base.FatalfAt(name.Pos(), "DictIndex overflow for %v: %v", name, idx) + } + name.DictIndex = uint16(idx) + } +} + +// itab returns a (typ, iface) pair of types. +// +// typRType and ifaceRType are expressions that evaluate to the +// *runtime._type for typ and iface, respectively. +// +// If typ is a concrete type and iface is a non-empty interface type, +// then itab is an expression that evaluates to the *runtime.itab for +// the pair. Otherwise, itab is nil. +func (r *reader) itab(pos src.XPos) (typ *types.Type, typRType ir.Node, iface *types.Type, ifaceRType ir.Node, itab ir.Node) { + typ, typRType = r.rtype0(pos) + iface, ifaceRType = r.rtype0(pos) + + idx := -1 + if r.Bool() { + idx = r.Len() + } + + if !typ.IsInterface() && iface.IsInterface() && !iface.IsEmptyInterface() { + if idx >= 0 { + itab = r.rttiWord(pos, r.dict.itabsOffset()+idx) + } else { + base.AssertfAt(!typ.HasShape(), pos, "%v is a shape type", typ) + base.AssertfAt(!iface.HasShape(), pos, "%v is a shape type", iface) + + lsym := reflectdata.ITabLsym(typ, iface) + itab = typecheck.LinksymAddr(pos, lsym, types.Types[types.TUINT8]) + } + } + + return +} + +// convRTTI returns expressions appropriate for populating an +// ir.ConvExpr's TypeWord and SrcRType fields, respectively. +func (r *reader) convRTTI(pos src.XPos) (typeWord, srcRType ir.Node) { + r.Sync(pkgbits.SyncConvRTTI) + src, srcRType0, dst, dstRType, itab := r.itab(pos) + if !dst.IsInterface() { + return + } + + // See reflectdata.ConvIfaceTypeWord. + switch { + case dst.IsEmptyInterface(): + if !src.IsInterface() { + typeWord = srcRType0 // direct eface construction + } + case !src.IsInterface(): + typeWord = itab // direct iface construction + default: + typeWord = dstRType // convI2I + } + + // See reflectdata.ConvIfaceSrcRType. + if !src.IsInterface() { + srcRType = srcRType0 + } + + return +} + +func (r *reader) exprType() ir.Node { + r.Sync(pkgbits.SyncExprType) + pos := r.pos() + + var typ *types.Type + var rtype, itab ir.Node + + if r.Bool() { + typ, rtype, _, _, itab = r.itab(pos) + if !typ.IsInterface() { + rtype = nil // TODO(mdempsky): Leave set? + } + } else { + typ, rtype = r.rtype0(pos) + + if !r.Bool() { // not derived + return ir.TypeNode(typ) + } + } + + dt := ir.NewDynamicType(pos, rtype) + dt.ITab = itab + return typed(typ, dt) +} + +func (r *reader) op() ir.Op { + r.Sync(pkgbits.SyncOp) + return ir.Op(r.Len()) +} + +// @@@ Package initialization + +func (r *reader) pkgInit(self *types.Pkg, target *ir.Package) { + cgoPragmas := make([][]string, r.Len()) + for i := range cgoPragmas { + cgoPragmas[i] = r.Strings() + } + target.CgoPragmas = cgoPragmas + + r.pkgInitOrder(target) + + r.pkgDecls(target) + + r.Sync(pkgbits.SyncEOF) +} + +// pkgInitOrder creates a synthetic init function to handle any +// package-scope initialization statements. +func (r *reader) pkgInitOrder(target *ir.Package) { + initOrder := make([]ir.Node, r.Len()) + if len(initOrder) == 0 { + return + } + + // Make a function that contains all the initialization statements. + pos := base.AutogeneratedPos + base.Pos = pos + + fn := ir.NewFunc(pos, pos, typecheck.Lookup("init"), types.NewSignature(nil, nil, nil)) + fn.SetIsPackageInit(true) + fn.SetInlinabilityChecked(true) // suppress useless "can inline" diagnostics + + typecheck.DeclFunc(fn) + r.curfn = fn + + for i := range initOrder { + lhs := make([]ir.Node, r.Len()) + for j := range lhs { + lhs[j] = r.obj() + } + rhs := r.expr() + pos := lhs[0].Pos() + + var as ir.Node + if len(lhs) == 1 { + as = typecheck.Stmt(ir.NewAssignStmt(pos, lhs[0], rhs)) + } else { + as = typecheck.Stmt(ir.NewAssignListStmt(pos, ir.OAS2, lhs, []ir.Node{rhs})) + } + + for _, v := range lhs { + v.(*ir.Name).Defn = as + } + + initOrder[i] = as + } + + fn.Body = initOrder + + typecheck.FinishFuncBody() + r.curfn = nil + r.locals = nil + + // Outline (if legal/profitable) global map inits. + staticinit.OutlineMapInits(fn) + + target.Inits = append(target.Inits, fn) +} + +func (r *reader) pkgDecls(target *ir.Package) { + r.Sync(pkgbits.SyncDecls) + for { + switch code := codeDecl(r.Code(pkgbits.SyncDecl)); code { + default: + panic(fmt.Sprintf("unhandled decl: %v", code)) + + case declEnd: + return + + case declFunc: + names := r.pkgObjs(target) + assert(len(names) == 1) + target.Funcs = append(target.Funcs, names[0].Func) + + case declMethod: + typ := r.typ() + sym := r.selector() + + method := typecheck.Lookdot1(nil, sym, typ, typ.Methods(), 0) + target.Funcs = append(target.Funcs, method.Nname.(*ir.Name).Func) + + case declVar: + names := r.pkgObjs(target) + + if n := r.Len(); n > 0 { + assert(len(names) == 1) + embeds := make([]ir.Embed, n) + for i := range embeds { + embeds[i] = ir.Embed{Pos: r.pos(), Patterns: r.Strings()} + } + names[0].Embed = &embeds + target.Embeds = append(target.Embeds, names[0]) + } + + case declOther: + r.pkgObjs(target) + } + } +} + +func (r *reader) pkgObjs(target *ir.Package) []*ir.Name { + r.Sync(pkgbits.SyncDeclNames) + nodes := make([]*ir.Name, r.Len()) + for i := range nodes { + r.Sync(pkgbits.SyncDeclName) + + name := r.obj().(*ir.Name) + nodes[i] = name + + sym := name.Sym() + if sym.IsBlank() { + continue + } + + switch name.Class { + default: + base.FatalfAt(name.Pos(), "unexpected class: %v", name.Class) + + case ir.PEXTERN: + target.Externs = append(target.Externs, name) + + case ir.PFUNC: + assert(name.Type().Recv() == nil) + + // TODO(mdempsky): Cleaner way to recognize init? + if strings.HasPrefix(sym.Name, "init.") { + target.Inits = append(target.Inits, name.Func) + } + } + + if base.Ctxt.Flag_dynlink && types.LocalPkg.Name == "main" && types.IsExported(sym.Name) && name.Op() == ir.ONAME { + assert(!sym.OnExportList()) + target.PluginExports = append(target.PluginExports, name) + sym.SetOnExportList(true) + } + + if base.Flag.AsmHdr != "" && (name.Op() == ir.OLITERAL || name.Op() == ir.OTYPE) { + assert(!sym.Asm()) + target.AsmHdrDecls = append(target.AsmHdrDecls, name) + sym.SetAsm(true) + } + } + + return nodes +} + +// @@@ Inlining + +// unifiedHaveInlineBody reports whether we have the function body for +// fn, so we can inline it. +func unifiedHaveInlineBody(fn *ir.Func) bool { + if fn.Inl == nil { + return false + } + + _, ok := bodyReaderFor(fn) + return ok +} + +var inlgen = 0 + +// unifiedInlineCall implements inline.NewInline by re-reading the function +// body from its Unified IR export data. +func unifiedInlineCall(callerfn *ir.Func, call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr { + pri, ok := bodyReaderFor(fn) + if !ok { + base.FatalfAt(call.Pos(), "cannot inline call to %v: missing inline body", fn) + } + + if !fn.Inl.HaveDcl { + expandInline(fn, pri) + } + + r := pri.asReader(pkgbits.RelocBody, pkgbits.SyncFuncBody) + + tmpfn := ir.NewFunc(fn.Pos(), fn.Nname.Pos(), callerfn.Sym(), fn.Type()) + + r.curfn = tmpfn + + r.inlCaller = callerfn + r.inlCall = call + r.inlFunc = fn + r.inlTreeIndex = inlIndex + r.inlPosBases = make(map[*src.PosBase]*src.PosBase) + r.funarghack = true + + r.closureVars = make([]*ir.Name, len(r.inlFunc.ClosureVars)) + for i, cv := range r.inlFunc.ClosureVars { + // TODO(mdempsky): It should be possible to support this case, but + // for now we rely on the inliner avoiding it. + if cv.Outer.Curfn != callerfn { + base.FatalfAt(call.Pos(), "inlining closure call across frames") + } + r.closureVars[i] = cv.Outer + } + if len(r.closureVars) != 0 && r.hasTypeParams() { + r.dictParam = r.closureVars[len(r.closureVars)-1] // dictParam is last; see reader.funcLit + } + + r.declareParams() + + var inlvars, retvars []*ir.Name + { + sig := r.curfn.Type() + endParams := sig.NumRecvs() + sig.NumParams() + endResults := endParams + sig.NumResults() + + inlvars = r.curfn.Dcl[:endParams] + retvars = r.curfn.Dcl[endParams:endResults] + } + + r.delayResults = fn.Inl.CanDelayResults + + r.retlabel = typecheck.AutoLabel(".i") + inlgen++ + + init := ir.TakeInit(call) + + // For normal function calls, the function callee expression + // may contain side effects. Make sure to preserve these, + // if necessary (#42703). + if call.Op() == ir.OCALLFUNC { + inline.CalleeEffects(&init, call.Fun) + } + + var args ir.Nodes + if call.Op() == ir.OCALLMETH { + base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck") + } + args.Append(call.Args...) + + // Create assignment to declare and initialize inlvars. + as2 := ir.NewAssignListStmt(call.Pos(), ir.OAS2, ir.ToNodes(inlvars), args) + as2.Def = true + var as2init ir.Nodes + for _, name := range inlvars { + if ir.IsBlank(name) { + continue + } + // TODO(mdempsky): Use inlined position of name.Pos() instead? + as2init.Append(ir.NewDecl(call.Pos(), ir.ODCL, name)) + name.Defn = as2 + } + as2.SetInit(as2init) + init.Append(typecheck.Stmt(as2)) + + if !r.delayResults { + // If not delaying retvars, declare and zero initialize the + // result variables now. + for _, name := range retvars { + // TODO(mdempsky): Use inlined position of name.Pos() instead? + init.Append(ir.NewDecl(call.Pos(), ir.ODCL, name)) + ras := ir.NewAssignStmt(call.Pos(), name, nil) + init.Append(typecheck.Stmt(ras)) + } + } + + // Add an inline mark just before the inlined body. + // This mark is inline in the code so that it's a reasonable spot + // to put a breakpoint. Not sure if that's really necessary or not + // (in which case it could go at the end of the function instead). + // Note issue 28603. + init.Append(ir.NewInlineMarkStmt(call.Pos().WithIsStmt(), int64(r.inlTreeIndex))) + + ir.WithFunc(r.curfn, func() { + if !r.syntheticBody(call.Pos()) { + assert(r.Bool()) // have body + + r.curfn.Body = r.stmts() + r.curfn.Endlineno = r.pos() + } + + // TODO(mdempsky): This shouldn't be necessary. Inlining might + // read in new function/method declarations, which could + // potentially be recursively inlined themselves; but we shouldn't + // need to read in the non-inlined bodies for the declarations + // themselves. But currently it's an easy fix to #50552. + readBodies(typecheck.Target, true) + + // Replace any "return" statements within the function body. + var edit func(ir.Node) ir.Node + edit = func(n ir.Node) ir.Node { + if ret, ok := n.(*ir.ReturnStmt); ok { + n = typecheck.Stmt(r.inlReturn(ret, retvars)) + } + ir.EditChildren(n, edit) + return n + } + edit(r.curfn) + }) + + body := ir.Nodes(r.curfn.Body) + + // Reparent any declarations into the caller function. + for _, name := range r.curfn.Dcl { + name.Curfn = callerfn + + if name.Class != ir.PAUTO { + name.SetPos(r.inlPos(name.Pos())) + name.SetInlFormal(true) + name.Class = ir.PAUTO + } else { + name.SetInlLocal(true) + } + } + callerfn.Dcl = append(callerfn.Dcl, r.curfn.Dcl...) + + body.Append(ir.NewLabelStmt(call.Pos(), r.retlabel)) + + res := ir.NewInlinedCallExpr(call.Pos(), body, ir.ToNodes(retvars)) + res.SetInit(init) + res.SetType(call.Type()) + res.SetTypecheck(1) + + // Inlining shouldn't add any functions to todoBodies. + assert(len(todoBodies) == 0) + + return res +} + +// inlReturn returns a statement that can substitute for the given +// return statement when inlining. +func (r *reader) inlReturn(ret *ir.ReturnStmt, retvars []*ir.Name) *ir.BlockStmt { + pos := r.inlCall.Pos() + + block := ir.TakeInit(ret) + + if results := ret.Results; len(results) != 0 { + assert(len(retvars) == len(results)) + + as2 := ir.NewAssignListStmt(pos, ir.OAS2, ir.ToNodes(retvars), ret.Results) + + if r.delayResults { + for _, name := range retvars { + // TODO(mdempsky): Use inlined position of name.Pos() instead? + block.Append(ir.NewDecl(pos, ir.ODCL, name)) + name.Defn = as2 + } + } + + block.Append(as2) + } + + block.Append(ir.NewBranchStmt(pos, ir.OGOTO, r.retlabel)) + return ir.NewBlockStmt(pos, block) +} + +// expandInline reads in an extra copy of IR to populate +// fn.Inl.Dcl. +func expandInline(fn *ir.Func, pri pkgReaderIndex) { + // TODO(mdempsky): Remove this function. It's currently needed by + // dwarfgen/dwarf.go:preInliningDcls, which requires fn.Inl.Dcl to + // create abstract function DIEs. But we should be able to provide it + // with the same information some other way. + + fndcls := len(fn.Dcl) + topdcls := len(typecheck.Target.Funcs) + + tmpfn := ir.NewFunc(fn.Pos(), fn.Nname.Pos(), fn.Sym(), fn.Type()) + tmpfn.ClosureVars = fn.ClosureVars + + { + r := pri.asReader(pkgbits.RelocBody, pkgbits.SyncFuncBody) + + // Don't change parameter's Sym/Nname fields. + r.funarghack = true + + r.funcBody(tmpfn) + } + + // Move tmpfn's params to fn.Inl.Dcl, and reparent under fn. + for _, name := range tmpfn.Dcl { + name.Curfn = fn + } + fn.Inl.Dcl = tmpfn.Dcl + fn.Inl.HaveDcl = true + + // Double check that we didn't change fn.Dcl by accident. + assert(fndcls == len(fn.Dcl)) + + // typecheck.Stmts may have added function literals to + // typecheck.Target.Decls. Remove them again so we don't risk trying + // to compile them multiple times. + typecheck.Target.Funcs = typecheck.Target.Funcs[:topdcls] +} + +// usedLocals returns a set of local variables that are used within body. +func usedLocals(body []ir.Node) ir.NameSet { + var used ir.NameSet + ir.VisitList(body, func(n ir.Node) { + if n, ok := n.(*ir.Name); ok && n.Op() == ir.ONAME && n.Class == ir.PAUTO { + used.Add(n) + } + }) + return used +} + +// @@@ Method wrappers + +// needWrapperTypes lists types for which we may need to generate +// method wrappers. +var needWrapperTypes []*types.Type + +// haveWrapperTypes lists types for which we know we already have +// method wrappers, because we found the type in an imported package. +var haveWrapperTypes []*types.Type + +// needMethodValueWrappers lists methods for which we may need to +// generate method value wrappers. +var needMethodValueWrappers []methodValueWrapper + +// haveMethodValueWrappers lists methods for which we know we already +// have method value wrappers, because we found it in an imported +// package. +var haveMethodValueWrappers []methodValueWrapper + +type methodValueWrapper struct { + rcvr *types.Type + method *types.Field +} + +func (r *reader) needWrapper(typ *types.Type) { + if typ.IsPtr() { + return + } + + // If a type was found in an imported package, then we can assume + // that package (or one of its transitive dependencies) already + // generated method wrappers for it. + if r.importedDef() { + haveWrapperTypes = append(haveWrapperTypes, typ) + } else { + needWrapperTypes = append(needWrapperTypes, typ) + } +} + +// importedDef reports whether r is reading from an imported and +// non-generic element. +// +// If a type was found in an imported package, then we can assume that +// package (or one of its transitive dependencies) already generated +// method wrappers for it. +// +// Exception: If we're instantiating an imported generic type or +// function, we might be instantiating it with type arguments not +// previously seen before. +// +// TODO(mdempsky): Distinguish when a generic function or type was +// instantiated in an imported package so that we can add types to +// haveWrapperTypes instead. +func (r *reader) importedDef() bool { + return r.p != localPkgReader && !r.hasTypeParams() +} + +func MakeWrappers(target *ir.Package) { + // always generate a wrapper for error.Error (#29304) + needWrapperTypes = append(needWrapperTypes, types.ErrorType) + + seen := make(map[string]*types.Type) + + for _, typ := range haveWrapperTypes { + wrapType(typ, target, seen, false) + } + haveWrapperTypes = nil + + for _, typ := range needWrapperTypes { + wrapType(typ, target, seen, true) + } + needWrapperTypes = nil + + for _, wrapper := range haveMethodValueWrappers { + wrapMethodValue(wrapper.rcvr, wrapper.method, target, false) + } + haveMethodValueWrappers = nil + + for _, wrapper := range needMethodValueWrappers { + wrapMethodValue(wrapper.rcvr, wrapper.method, target, true) + } + needMethodValueWrappers = nil +} + +func wrapType(typ *types.Type, target *ir.Package, seen map[string]*types.Type, needed bool) { + key := typ.LinkString() + if prev := seen[key]; prev != nil { + if !types.Identical(typ, prev) { + base.Fatalf("collision: types %v and %v have link string %q", typ, prev, key) + } + return + } + seen[key] = typ + + if !needed { + // Only called to add to 'seen'. + return + } + + if !typ.IsInterface() { + typecheck.CalcMethods(typ) + } + for _, meth := range typ.AllMethods() { + if meth.Sym.IsBlank() || !meth.IsMethod() { + base.FatalfAt(meth.Pos, "invalid method: %v", meth) + } + + methodWrapper(0, typ, meth, target) + + // For non-interface types, we also want *T wrappers. + if !typ.IsInterface() { + methodWrapper(1, typ, meth, target) + + // For not-in-heap types, *T is a scalar, not pointer shaped, + // so the interface wrappers use **T. + if typ.NotInHeap() { + methodWrapper(2, typ, meth, target) + } + } + } +} + +func methodWrapper(derefs int, tbase *types.Type, method *types.Field, target *ir.Package) { + wrapper := tbase + for i := 0; i < derefs; i++ { + wrapper = types.NewPtr(wrapper) + } + + sym := ir.MethodSym(wrapper, method.Sym) + base.Assertf(!sym.Siggen(), "already generated wrapper %v", sym) + sym.SetSiggen(true) + + wrappee := method.Type.Recv().Type + if types.Identical(wrapper, wrappee) || + !types.IsMethodApplicable(wrapper, method) || + !reflectdata.NeedEmit(tbase) { + return + } + + // TODO(mdempsky): Use method.Pos instead? + pos := base.AutogeneratedPos + + fn := newWrapperFunc(pos, sym, wrapper, method) + + var recv ir.Node = fn.Nname.Type().Recv().Nname.(*ir.Name) + + // For simple *T wrappers around T methods, panicwrap produces a + // nicer panic message. + if wrapper.IsPtr() && types.Identical(wrapper.Elem(), wrappee) { + cond := ir.NewBinaryExpr(pos, ir.OEQ, recv, types.BuiltinPkg.Lookup("nil").Def.(ir.Node)) + then := []ir.Node{ir.NewCallExpr(pos, ir.OCALL, typecheck.LookupRuntime("panicwrap"), nil)} + fn.Body.Append(ir.NewIfStmt(pos, cond, then, nil)) + } + + // typecheck will add one implicit deref, if necessary, + // but not-in-heap types require more for their **T wrappers. + for i := 1; i < derefs; i++ { + recv = Implicit(ir.NewStarExpr(pos, recv)) + } + + addTailCall(pos, fn, recv, method) + + finishWrapperFunc(fn, target) +} + +func wrapMethodValue(recvType *types.Type, method *types.Field, target *ir.Package, needed bool) { + sym := ir.MethodSymSuffix(recvType, method.Sym, "-fm") + if sym.Uniq() { + return + } + sym.SetUniq(true) + + // TODO(mdempsky): Use method.Pos instead? + pos := base.AutogeneratedPos + + fn := newWrapperFunc(pos, sym, nil, method) + sym.Def = fn.Nname + + // Declare and initialize variable holding receiver. + recv := ir.NewHiddenParam(pos, fn, typecheck.Lookup(".this"), recvType) + + if !needed { + return + } + + addTailCall(pos, fn, recv, method) + + finishWrapperFunc(fn, target) +} + +func newWrapperFunc(pos src.XPos, sym *types.Sym, wrapper *types.Type, method *types.Field) *ir.Func { + sig := newWrapperType(wrapper, method) + + fn := ir.NewFunc(pos, pos, sym, sig) + fn.DeclareParams(true) + fn.SetDupok(true) // TODO(mdempsky): Leave unset for local, non-generic wrappers? + + return fn +} + +func finishWrapperFunc(fn *ir.Func, target *ir.Package) { + ir.WithFunc(fn, func() { + typecheck.Stmts(fn.Body) + }) + + // We generate wrappers after the global inlining pass, + // so we're responsible for applying inlining ourselves here. + // TODO(prattmic): plumb PGO. + interleaved.DevirtualizeAndInlineFunc(fn, nil) + + // The body of wrapper function after inlining may reveal new ir.OMETHVALUE node, + // we don't know whether wrapper function has been generated for it or not, so + // generate one immediately here. + // + // Further, after CL 492017, function that construct closures is allowed to be inlined, + // even though the closure itself can't be inline. So we also need to visit body of any + // closure that we see when visiting body of the wrapper function. + ir.VisitFuncAndClosures(fn, func(n ir.Node) { + if n, ok := n.(*ir.SelectorExpr); ok && n.Op() == ir.OMETHVALUE { + wrapMethodValue(n.X.Type(), n.Selection, target, true) + } + }) + + fn.Nname.Defn = fn + target.Funcs = append(target.Funcs, fn) +} + +// newWrapperType returns a copy of the given signature type, but with +// the receiver parameter type substituted with recvType. +// If recvType is nil, newWrapperType returns a signature +// without a receiver parameter. +func newWrapperType(recvType *types.Type, method *types.Field) *types.Type { + clone := func(params []*types.Field) []*types.Field { + res := make([]*types.Field, len(params)) + for i, param := range params { + res[i] = types.NewField(param.Pos, param.Sym, param.Type) + res[i].SetIsDDD(param.IsDDD()) + } + return res + } + + sig := method.Type + + var recv *types.Field + if recvType != nil { + recv = types.NewField(sig.Recv().Pos, sig.Recv().Sym, recvType) + } + params := clone(sig.Params()) + results := clone(sig.Results()) + + return types.NewSignature(recv, params, results) +} + +func addTailCall(pos src.XPos, fn *ir.Func, recv ir.Node, method *types.Field) { + sig := fn.Nname.Type() + args := make([]ir.Node, sig.NumParams()) + for i, param := range sig.Params() { + args[i] = param.Nname.(*ir.Name) + } + + // TODO(mdempsky): Support creating OTAILCALL, when possible. See reflectdata.methodWrapper. + // Not urgent though, because tail calls are currently incompatible with regabi anyway. + + fn.SetWrapper(true) // TODO(mdempsky): Leave unset for tail calls? + + dot := typecheck.XDotMethod(pos, recv, method.Sym, true) + call := typecheck.Call(pos, dot, args, method.Type.IsVariadic()).(*ir.CallExpr) + + if method.Type.NumResults() == 0 { + fn.Body.Append(call) + return + } + + ret := ir.NewReturnStmt(pos, nil) + ret.Results = []ir.Node{call} + fn.Body.Append(ret) +} + +func setBasePos(pos src.XPos) { + // Set the position for any error messages we might print (e.g. too large types). + base.Pos = pos +} + +// dictParamName is the name of the synthetic dictionary parameter +// added to shaped functions. +// +// N.B., this variable name is known to Delve: +// https://github.com/go-delve/delve/blob/cb91509630529e6055be845688fd21eb89ae8714/pkg/proc/eval.go#L28 +const dictParamName = typecheck.LocalDictName + +// shapeSig returns a copy of fn's signature, except adding a +// dictionary parameter and promoting the receiver parameter (if any) +// to a normal parameter. +// +// The parameter types.Fields are all copied too, so their Nname +// fields can be initialized for use by the shape function. +func shapeSig(fn *ir.Func, dict *readerDict) *types.Type { + sig := fn.Nname.Type() + oldRecv := sig.Recv() + + var recv *types.Field + if oldRecv != nil { + recv = types.NewField(oldRecv.Pos, oldRecv.Sym, oldRecv.Type) + } + + params := make([]*types.Field, 1+sig.NumParams()) + params[0] = types.NewField(fn.Pos(), fn.Sym().Pkg.Lookup(dictParamName), types.NewPtr(dict.varType())) + for i, param := range sig.Params() { + d := types.NewField(param.Pos, param.Sym, param.Type) + d.SetIsDDD(param.IsDDD()) + params[1+i] = d + } + + results := make([]*types.Field, sig.NumResults()) + for i, result := range sig.Results() { + results[i] = types.NewField(result.Pos, result.Sym, result.Type) + } + + return types.NewSignature(recv, params, results) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/stencil.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/stencil.go new file mode 100644 index 0000000000000000000000000000000000000000..43a39ab2265d799753b584060258c5ea32f519fd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/stencil.go @@ -0,0 +1,16 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file will evolve, since we plan to do a mix of stenciling and passing +// around dictionaries. + +package noder + +import ( + "cmd/compile/internal/base" +) + +func assert(p bool) { + base.Assert(p) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/stmt.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/stmt.go new file mode 100644 index 0000000000000000000000000000000000000000..04f92d2cf52765acd890570a2fad085459c88db4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/stmt.go @@ -0,0 +1,24 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package noder + +import ( + "cmd/compile/internal/ir" + "cmd/compile/internal/syntax" +) + +// TODO(mdempsky): Investigate replacing with switch statements or dense arrays. + +var branchOps = [...]ir.Op{ + syntax.Break: ir.OBREAK, + syntax.Continue: ir.OCONTINUE, + syntax.Fallthrough: ir.OFALL, + syntax.Goto: ir.OGOTO, +} + +var callOps = [...]ir.Op{ + syntax.Defer: ir.ODEFER, + syntax.Go: ir.OGO, +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/types.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/types.go new file mode 100644 index 0000000000000000000000000000000000000000..76c6d15dd83357bacade5c2182e6c1063e15343f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/types.go @@ -0,0 +1,53 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package noder + +import ( + "cmd/compile/internal/types" + "cmd/compile/internal/types2" +) + +var basics = [...]**types.Type{ + types2.Invalid: new(*types.Type), + types2.Bool: &types.Types[types.TBOOL], + types2.Int: &types.Types[types.TINT], + types2.Int8: &types.Types[types.TINT8], + types2.Int16: &types.Types[types.TINT16], + types2.Int32: &types.Types[types.TINT32], + types2.Int64: &types.Types[types.TINT64], + types2.Uint: &types.Types[types.TUINT], + types2.Uint8: &types.Types[types.TUINT8], + types2.Uint16: &types.Types[types.TUINT16], + types2.Uint32: &types.Types[types.TUINT32], + types2.Uint64: &types.Types[types.TUINT64], + types2.Uintptr: &types.Types[types.TUINTPTR], + types2.Float32: &types.Types[types.TFLOAT32], + types2.Float64: &types.Types[types.TFLOAT64], + types2.Complex64: &types.Types[types.TCOMPLEX64], + types2.Complex128: &types.Types[types.TCOMPLEX128], + types2.String: &types.Types[types.TSTRING], + types2.UnsafePointer: &types.Types[types.TUNSAFEPTR], + types2.UntypedBool: &types.UntypedBool, + types2.UntypedInt: &types.UntypedInt, + types2.UntypedRune: &types.UntypedRune, + types2.UntypedFloat: &types.UntypedFloat, + types2.UntypedComplex: &types.UntypedComplex, + types2.UntypedString: &types.UntypedString, + types2.UntypedNil: &types.Types[types.TNIL], +} + +var dirs = [...]types.ChanDir{ + types2.SendRecv: types.Cboth, + types2.SendOnly: types.Csend, + types2.RecvOnly: types.Crecv, +} + +// deref2 does a single deref of types2 type t, if it is a pointer type. +func deref2(t types2.Type) types2.Type { + if ptr := types2.AsPointer(t); ptr != nil { + t = ptr.Elem() + } + return t +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/unified.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/unified.go new file mode 100644 index 0000000000000000000000000000000000000000..562b2e63140db8dbc356d7b9ce61748b6dc4e991 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/unified.go @@ -0,0 +1,538 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package noder + +import ( + "fmt" + "internal/pkgbits" + "io" + "runtime" + "sort" + "strings" + + "cmd/compile/internal/base" + "cmd/compile/internal/inline" + "cmd/compile/internal/ir" + "cmd/compile/internal/pgo" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/compile/internal/types2" + "cmd/internal/src" +) + +// localPkgReader holds the package reader used for reading the local +// package. It exists so the unified IR linker can refer back to it +// later. +var localPkgReader *pkgReader + +// LookupMethodFunc returns the ir.Func for an arbitrary full symbol name if +// that function exists in the set of available export data. +// +// This allows lookup of arbitrary functions and methods that aren't otherwise +// referenced by the local package and thus haven't been read yet. +// +// TODO(prattmic): Does not handle instantiation of generic types. Currently +// profiles don't contain the original type arguments, so we won't be able to +// create the runtime dictionaries. +// +// TODO(prattmic): Hit rate of this function is usually fairly low, and errors +// are only used when debug logging is enabled. Consider constructing cheaper +// errors by default. +func LookupFunc(fullName string) (*ir.Func, error) { + pkgPath, symName, err := ir.ParseLinkFuncName(fullName) + if err != nil { + return nil, fmt.Errorf("error parsing symbol name %q: %v", fullName, err) + } + + pkg, ok := types.PkgMap()[pkgPath] + if !ok { + return nil, fmt.Errorf("pkg %s doesn't exist in %v", pkgPath, types.PkgMap()) + } + + // Symbol naming is ambiguous. We can't necessarily distinguish between + // a method and a closure. e.g., is foo.Bar.func1 a closure defined in + // function Bar, or a method on type Bar? Thus we must simply attempt + // to lookup both. + + fn, err := lookupFunction(pkg, symName) + if err == nil { + return fn, nil + } + + fn, mErr := lookupMethod(pkg, symName) + if mErr == nil { + return fn, nil + } + + return nil, fmt.Errorf("%s is not a function (%v) or method (%v)", fullName, err, mErr) +} + +func lookupFunction(pkg *types.Pkg, symName string) (*ir.Func, error) { + sym := pkg.Lookup(symName) + + // TODO(prattmic): Enclosed functions (e.g., foo.Bar.func1) are not + // present in objReader, only as OCLOSURE nodes in the enclosing + // function. + pri, ok := objReader[sym] + if !ok { + return nil, fmt.Errorf("func sym %v missing objReader", sym) + } + + node, err := pri.pr.objIdxMayFail(pri.idx, nil, nil, false) + if err != nil { + return nil, fmt.Errorf("func sym %v lookup error: %w", sym, err) + } + name := node.(*ir.Name) + if name.Op() != ir.ONAME || name.Class != ir.PFUNC { + return nil, fmt.Errorf("func sym %v refers to non-function name: %v", sym, name) + } + return name.Func, nil +} + +func lookupMethod(pkg *types.Pkg, symName string) (*ir.Func, error) { + // N.B. readPackage creates a Sym for every object in the package to + // initialize objReader and importBodyReader, even if the object isn't + // read. + // + // However, objReader is only initialized for top-level objects, so we + // must first lookup the type and use that to find the method rather + // than looking for the method directly. + typ, meth, err := ir.LookupMethodSelector(pkg, symName) + if err != nil { + return nil, fmt.Errorf("error looking up method symbol %q: %v", symName, err) + } + + pri, ok := objReader[typ] + if !ok { + return nil, fmt.Errorf("type sym %v missing objReader", typ) + } + + node, err := pri.pr.objIdxMayFail(pri.idx, nil, nil, false) + if err != nil { + return nil, fmt.Errorf("func sym %v lookup error: %w", typ, err) + } + name := node.(*ir.Name) + if name.Op() != ir.OTYPE { + return nil, fmt.Errorf("type sym %v refers to non-type name: %v", typ, name) + } + if name.Alias() { + return nil, fmt.Errorf("type sym %v refers to alias", typ) + } + if name.Type().IsInterface() { + return nil, fmt.Errorf("type sym %v refers to interface type", typ) + } + + for _, m := range name.Type().Methods() { + if m.Sym == meth { + fn := m.Nname.(*ir.Name).Func + return fn, nil + } + } + + return nil, fmt.Errorf("method %s missing from method set of %v", symName, typ) +} + +// unified constructs the local package's Internal Representation (IR) +// from its syntax tree (AST). +// +// The pipeline contains 2 steps: +// +// 1. Generate the export data "stub". +// +// 2. Generate the IR from the export data above. +// +// The package data "stub" at step (1) contains everything from the local package, +// but nothing that has been imported. When we're actually writing out export data +// to the output files (see writeNewExport), we run the "linker", which: +// +// - Updates compiler extensions data (e.g. inlining cost, escape analysis results). +// +// - Handles re-exporting any transitive dependencies. +// +// - Prunes out any unnecessary details (e.g. non-inlineable functions, because any +// downstream importers only care about inlinable functions). +// +// The source files are typechecked twice: once before writing the export data +// using types2, and again after reading the export data using gc/typecheck. +// The duplication of work will go away once we only use the types2 type checker, +// removing the gc/typecheck step. For now, it is kept because: +// +// - It reduces the engineering costs in maintaining a fork of typecheck +// (e.g. no need to backport fixes like CL 327651). +// +// - It makes it easier to pass toolstash -cmp. +// +// - Historically, we would always re-run the typechecker after importing a package, +// even though we know the imported data is valid. It's not ideal, but it's +// not causing any problems either. +// +// - gc/typecheck is still in charge of some transformations, such as rewriting +// multi-valued function calls or transforming ir.OINDEX to ir.OINDEXMAP. +// +// Using the syntax tree with types2, which has a complete representation of generics, +// the unified IR has the full typed AST needed for introspection during step (1). +// In other words, we have all the necessary information to build the generic IR form +// (see writer.captureVars for an example). +func unified(m posMap, noders []*noder) { + inline.InlineCall = unifiedInlineCall + typecheck.HaveInlineBody = unifiedHaveInlineBody + pgo.LookupFunc = LookupFunc + + data := writePkgStub(m, noders) + + target := typecheck.Target + + localPkgReader = newPkgReader(pkgbits.NewPkgDecoder(types.LocalPkg.Path, data)) + readPackage(localPkgReader, types.LocalPkg, true) + + r := localPkgReader.newReader(pkgbits.RelocMeta, pkgbits.PrivateRootIdx, pkgbits.SyncPrivate) + r.pkgInit(types.LocalPkg, target) + + readBodies(target, false) + + // Check that nothing snuck past typechecking. + for _, fn := range target.Funcs { + if fn.Typecheck() == 0 { + base.FatalfAt(fn.Pos(), "missed typecheck: %v", fn) + } + + // For functions, check that at least their first statement (if + // any) was typechecked too. + if len(fn.Body) != 0 { + if stmt := fn.Body[0]; stmt.Typecheck() == 0 { + base.FatalfAt(stmt.Pos(), "missed typecheck: %v", stmt) + } + } + } + + // For functions originally came from package runtime, + // mark as norace to prevent instrumenting, see issue #60439. + for _, fn := range target.Funcs { + if !base.Flag.CompilingRuntime && types.RuntimeSymName(fn.Sym()) != "" { + fn.Pragma |= ir.Norace + } + } + + base.ExitIfErrors() // just in case +} + +// readBodies iteratively expands all pending dictionaries and +// function bodies. +// +// If duringInlining is true, then the inline.InlineDecls is called as +// necessary on instantiations of imported generic functions, so their +// inlining costs can be computed. +func readBodies(target *ir.Package, duringInlining bool) { + var inlDecls []*ir.Func + + // Don't use range--bodyIdx can add closures to todoBodies. + for { + // The order we expand dictionaries and bodies doesn't matter, so + // pop from the end to reduce todoBodies reallocations if it grows + // further. + // + // However, we do at least need to flush any pending dictionaries + // before reading bodies, because bodies might reference the + // dictionaries. + + if len(todoDicts) > 0 { + fn := todoDicts[len(todoDicts)-1] + todoDicts = todoDicts[:len(todoDicts)-1] + fn() + continue + } + + if len(todoBodies) > 0 { + fn := todoBodies[len(todoBodies)-1] + todoBodies = todoBodies[:len(todoBodies)-1] + + pri, ok := bodyReader[fn] + assert(ok) + pri.funcBody(fn) + + // Instantiated generic function: add to Decls for typechecking + // and compilation. + if fn.OClosure == nil && len(pri.dict.targs) != 0 { + // cmd/link does not support a type symbol referencing a method symbol + // across DSO boundary, so force re-compiling methods on a generic type + // even it was seen from imported package in linkshared mode, see #58966. + canSkipNonGenericMethod := !(base.Ctxt.Flag_linkshared && ir.IsMethod(fn)) + if duringInlining && canSkipNonGenericMethod { + inlDecls = append(inlDecls, fn) + } else { + target.Funcs = append(target.Funcs, fn) + } + } + + continue + } + + break + } + + todoDicts = nil + todoBodies = nil + + if len(inlDecls) != 0 { + // If we instantiated any generic functions during inlining, we need + // to call CanInline on them so they'll be transitively inlined + // correctly (#56280). + // + // We know these functions were already compiled in an imported + // package though, so we don't need to actually apply InlineCalls or + // save the function bodies any further than this. + // + // We can also lower the -m flag to 0, to suppress duplicate "can + // inline" diagnostics reported against the imported package. Again, + // we already reported those diagnostics in the original package, so + // it's pointless repeating them here. + + oldLowerM := base.Flag.LowerM + base.Flag.LowerM = 0 + inline.CanInlineFuncs(inlDecls, nil) + base.Flag.LowerM = oldLowerM + + for _, fn := range inlDecls { + fn.Body = nil // free memory + } + } +} + +// writePkgStub type checks the given parsed source files, +// writes an export data package stub representing them, +// and returns the result. +func writePkgStub(m posMap, noders []*noder) string { + pkg, info := checkFiles(m, noders) + + pw := newPkgWriter(m, pkg, info) + + pw.collectDecls(noders) + + publicRootWriter := pw.newWriter(pkgbits.RelocMeta, pkgbits.SyncPublic) + privateRootWriter := pw.newWriter(pkgbits.RelocMeta, pkgbits.SyncPrivate) + + assert(publicRootWriter.Idx == pkgbits.PublicRootIdx) + assert(privateRootWriter.Idx == pkgbits.PrivateRootIdx) + + { + w := publicRootWriter + w.pkg(pkg) + w.Bool(false) // TODO(mdempsky): Remove; was "has init" + + scope := pkg.Scope() + names := scope.Names() + w.Len(len(names)) + for _, name := range names { + w.obj(scope.Lookup(name), nil) + } + + w.Sync(pkgbits.SyncEOF) + w.Flush() + } + + { + w := privateRootWriter + w.pkgInit(noders) + w.Flush() + } + + var sb strings.Builder + pw.DumpTo(&sb) + + // At this point, we're done with types2. Make sure the package is + // garbage collected. + freePackage(pkg) + + return sb.String() +} + +// freePackage ensures the given package is garbage collected. +func freePackage(pkg *types2.Package) { + // The GC test below relies on a precise GC that runs finalizers as + // soon as objects are unreachable. Our implementation provides + // this, but other/older implementations may not (e.g., Go 1.4 does + // not because of #22350). To avoid imposing unnecessary + // restrictions on the GOROOT_BOOTSTRAP toolchain, we skip the test + // during bootstrapping. + if base.CompilerBootstrap || base.Debug.GCCheck == 0 { + *pkg = types2.Package{} + return + } + + // Set a finalizer on pkg so we can detect if/when it's collected. + done := make(chan struct{}) + runtime.SetFinalizer(pkg, func(*types2.Package) { close(done) }) + + // Important: objects involved in cycles are not finalized, so zero + // out pkg to break its cycles and allow the finalizer to run. + *pkg = types2.Package{} + + // It typically takes just 1 or 2 cycles to release pkg, but it + // doesn't hurt to try a few more times. + for i := 0; i < 10; i++ { + select { + case <-done: + return + default: + runtime.GC() + } + } + + base.Fatalf("package never finalized") +} + +// readPackage reads package export data from pr to populate +// importpkg. +// +// localStub indicates whether pr is reading the stub export data for +// the local package, as opposed to relocated export data for an +// import. +func readPackage(pr *pkgReader, importpkg *types.Pkg, localStub bool) { + { + r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) + + pkg := r.pkg() + base.Assertf(pkg == importpkg, "have package %q (%p), want package %q (%p)", pkg.Path, pkg, importpkg.Path, importpkg) + + r.Bool() // TODO(mdempsky): Remove; was "has init" + + for i, n := 0, r.Len(); i < n; i++ { + r.Sync(pkgbits.SyncObject) + assert(!r.Bool()) + idx := r.Reloc(pkgbits.RelocObj) + assert(r.Len() == 0) + + path, name, code := r.p.PeekObj(idx) + if code != pkgbits.ObjStub { + objReader[types.NewPkg(path, "").Lookup(name)] = pkgReaderIndex{pr, idx, nil, nil, nil} + } + } + + r.Sync(pkgbits.SyncEOF) + } + + if !localStub { + r := pr.newReader(pkgbits.RelocMeta, pkgbits.PrivateRootIdx, pkgbits.SyncPrivate) + + if r.Bool() { + sym := importpkg.Lookup(".inittask") + task := ir.NewNameAt(src.NoXPos, sym, nil) + task.Class = ir.PEXTERN + sym.Def = task + } + + for i, n := 0, r.Len(); i < n; i++ { + path := r.String() + name := r.String() + idx := r.Reloc(pkgbits.RelocBody) + + sym := types.NewPkg(path, "").Lookup(name) + if _, ok := importBodyReader[sym]; !ok { + importBodyReader[sym] = pkgReaderIndex{pr, idx, nil, nil, nil} + } + } + + r.Sync(pkgbits.SyncEOF) + } +} + +// writeUnifiedExport writes to `out` the finalized, self-contained +// Unified IR export data file for the current compilation unit. +func writeUnifiedExport(out io.Writer) { + l := linker{ + pw: pkgbits.NewPkgEncoder(base.Debug.SyncFrames), + + pkgs: make(map[string]pkgbits.Index), + decls: make(map[*types.Sym]pkgbits.Index), + bodies: make(map[*types.Sym]pkgbits.Index), + } + + publicRootWriter := l.pw.NewEncoder(pkgbits.RelocMeta, pkgbits.SyncPublic) + privateRootWriter := l.pw.NewEncoder(pkgbits.RelocMeta, pkgbits.SyncPrivate) + assert(publicRootWriter.Idx == pkgbits.PublicRootIdx) + assert(privateRootWriter.Idx == pkgbits.PrivateRootIdx) + + var selfPkgIdx pkgbits.Index + + { + pr := localPkgReader + r := pr.NewDecoder(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) + + r.Sync(pkgbits.SyncPkg) + selfPkgIdx = l.relocIdx(pr, pkgbits.RelocPkg, r.Reloc(pkgbits.RelocPkg)) + + r.Bool() // TODO(mdempsky): Remove; was "has init" + + for i, n := 0, r.Len(); i < n; i++ { + r.Sync(pkgbits.SyncObject) + assert(!r.Bool()) + idx := r.Reloc(pkgbits.RelocObj) + assert(r.Len() == 0) + + xpath, xname, xtag := pr.PeekObj(idx) + assert(xpath == pr.PkgPath()) + assert(xtag != pkgbits.ObjStub) + + if types.IsExported(xname) { + l.relocIdx(pr, pkgbits.RelocObj, idx) + } + } + + r.Sync(pkgbits.SyncEOF) + } + + { + var idxs []pkgbits.Index + for _, idx := range l.decls { + idxs = append(idxs, idx) + } + sort.Slice(idxs, func(i, j int) bool { return idxs[i] < idxs[j] }) + + w := publicRootWriter + + w.Sync(pkgbits.SyncPkg) + w.Reloc(pkgbits.RelocPkg, selfPkgIdx) + w.Bool(false) // TODO(mdempsky): Remove; was "has init" + + w.Len(len(idxs)) + for _, idx := range idxs { + w.Sync(pkgbits.SyncObject) + w.Bool(false) + w.Reloc(pkgbits.RelocObj, idx) + w.Len(0) + } + + w.Sync(pkgbits.SyncEOF) + w.Flush() + } + + { + type symIdx struct { + sym *types.Sym + idx pkgbits.Index + } + var bodies []symIdx + for sym, idx := range l.bodies { + bodies = append(bodies, symIdx{sym, idx}) + } + sort.Slice(bodies, func(i, j int) bool { return bodies[i].idx < bodies[j].idx }) + + w := privateRootWriter + + w.Bool(typecheck.Lookup(".inittask").Def != nil) + + w.Len(len(bodies)) + for _, body := range bodies { + w.String(body.sym.Pkg.Path) + w.String(body.sym.Name) + w.Reloc(pkgbits.RelocBody, body.idx) + } + + w.Sync(pkgbits.SyncEOF) + w.Flush() + } + + base.Ctxt.Fingerprint = l.pw.DumpTo(out) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/writer.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/writer.go new file mode 100644 index 0000000000000000000000000000000000000000..c317f392c1d438c94798623a94077b97283be2ba --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/noder/writer.go @@ -0,0 +1,3010 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package noder + +import ( + "fmt" + "go/constant" + "go/token" + "go/version" + "internal/buildcfg" + "internal/pkgbits" + "os" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/syntax" + "cmd/compile/internal/types" + "cmd/compile/internal/types2" +) + +// This file implements the Unified IR package writer and defines the +// Unified IR export data format. +// +// Low-level coding details (e.g., byte-encoding of individual +// primitive values, or handling element bitstreams and +// cross-references) are handled by internal/pkgbits, so here we only +// concern ourselves with higher-level worries like mapping Go +// language constructs into elements. + +// There are two central types in the writing process: the "writer" +// type handles writing out individual elements, while the "pkgWriter" +// type keeps track of which elements have already been created. +// +// For each sort of "thing" (e.g., position, package, object, type) +// that can be written into the export data, there are generally +// several methods that work together: +// +// - writer.thing handles writing out a *use* of a thing, which often +// means writing a relocation to that thing's encoded index. +// +// - pkgWriter.thingIdx handles reserving an index for a thing, and +// writing out any elements needed for the thing. +// +// - writer.doThing handles writing out the *definition* of a thing, +// which in general is a mix of low-level coding primitives (e.g., +// ints and strings) or uses of other things. +// +// A design goal of Unified IR is to have a single, canonical writer +// implementation, but multiple reader implementations each tailored +// to their respective needs. For example, within cmd/compile's own +// backend, inlining is implemented largely by just re-running the +// function body reading code. + +// TODO(mdempsky): Add an importer for Unified IR to the x/tools repo, +// and better document the file format boundary between public and +// private data. + +// A pkgWriter constructs Unified IR export data from the results of +// running the types2 type checker on a Go compilation unit. +type pkgWriter struct { + pkgbits.PkgEncoder + + m posMap + curpkg *types2.Package + info *types2.Info + + // Indices for previously written syntax and types2 things. + + posBasesIdx map[*syntax.PosBase]pkgbits.Index + pkgsIdx map[*types2.Package]pkgbits.Index + typsIdx map[types2.Type]pkgbits.Index + objsIdx map[types2.Object]pkgbits.Index + + // Maps from types2.Objects back to their syntax.Decl. + + funDecls map[*types2.Func]*syntax.FuncDecl + typDecls map[*types2.TypeName]typeDeclGen + + // linknames maps package-scope objects to their linker symbol name, + // if specified by a //go:linkname directive. + linknames map[types2.Object]string + + // cgoPragmas accumulates any //go:cgo_* pragmas that need to be + // passed through to cmd/link. + cgoPragmas [][]string +} + +// newPkgWriter returns an initialized pkgWriter for the specified +// package. +func newPkgWriter(m posMap, pkg *types2.Package, info *types2.Info) *pkgWriter { + return &pkgWriter{ + PkgEncoder: pkgbits.NewPkgEncoder(base.Debug.SyncFrames), + + m: m, + curpkg: pkg, + info: info, + + pkgsIdx: make(map[*types2.Package]pkgbits.Index), + objsIdx: make(map[types2.Object]pkgbits.Index), + typsIdx: make(map[types2.Type]pkgbits.Index), + + posBasesIdx: make(map[*syntax.PosBase]pkgbits.Index), + + funDecls: make(map[*types2.Func]*syntax.FuncDecl), + typDecls: make(map[*types2.TypeName]typeDeclGen), + + linknames: make(map[types2.Object]string), + } +} + +// errorf reports a user error about thing p. +func (pw *pkgWriter) errorf(p poser, msg string, args ...interface{}) { + base.ErrorfAt(pw.m.pos(p), 0, msg, args...) +} + +// fatalf reports an internal compiler error about thing p. +func (pw *pkgWriter) fatalf(p poser, msg string, args ...interface{}) { + base.FatalfAt(pw.m.pos(p), msg, args...) +} + +// unexpected reports a fatal error about a thing of unexpected +// dynamic type. +func (pw *pkgWriter) unexpected(what string, p poser) { + pw.fatalf(p, "unexpected %s: %v (%T)", what, p, p) +} + +func (pw *pkgWriter) typeAndValue(x syntax.Expr) syntax.TypeAndValue { + tv, ok := pw.maybeTypeAndValue(x) + if !ok { + pw.fatalf(x, "missing Types entry: %v", syntax.String(x)) + } + return tv +} + +func (pw *pkgWriter) maybeTypeAndValue(x syntax.Expr) (syntax.TypeAndValue, bool) { + tv := x.GetTypeInfo() + + // If x is a generic function whose type arguments are inferred + // from assignment context, then we need to find its inferred type + // in Info.Instances instead. + if name, ok := x.(*syntax.Name); ok { + if inst, ok := pw.info.Instances[name]; ok { + tv.Type = inst.Type + } + } + + return tv, tv.Type != nil +} + +// typeOf returns the Type of the given value expression. +func (pw *pkgWriter) typeOf(expr syntax.Expr) types2.Type { + tv := pw.typeAndValue(expr) + if !tv.IsValue() { + pw.fatalf(expr, "expected value: %v", syntax.String(expr)) + } + return tv.Type +} + +// A writer provides APIs for writing out an individual element. +type writer struct { + p *pkgWriter + + pkgbits.Encoder + + // sig holds the signature for the current function body, if any. + sig *types2.Signature + + // TODO(mdempsky): We should be able to prune localsIdx whenever a + // scope closes, and then maybe we can just use the same map for + // storing the TypeParams too (as their TypeName instead). + + // localsIdx tracks any local variables declared within this + // function body. It's unused for writing out non-body things. + localsIdx map[*types2.Var]int + + // closureVars tracks any free variables that are referenced by this + // function body. It's unused for writing out non-body things. + closureVars []posVar + closureVarsIdx map[*types2.Var]int // index of previously seen free variables + + dict *writerDict + + // derived tracks whether the type being written out references any + // type parameters. It's unused for writing non-type things. + derived bool +} + +// A writerDict tracks types and objects that are used by a declaration. +type writerDict struct { + implicits []*types2.TypeName + + // derived is a slice of type indices for computing derived types + // (i.e., types that depend on the declaration's type parameters). + derived []derivedInfo + + // derivedIdx maps a Type to its corresponding index within the + // derived slice, if present. + derivedIdx map[types2.Type]pkgbits.Index + + // These slices correspond to entries in the runtime dictionary. + typeParamMethodExprs []writerMethodExprInfo + subdicts []objInfo + rtypes []typeInfo + itabs []itabInfo +} + +type itabInfo struct { + typ typeInfo + iface typeInfo +} + +// typeParamIndex returns the index of the given type parameter within +// the dictionary. This may differ from typ.Index() when there are +// implicit type parameters due to defined types declared within a +// generic function or method. +func (dict *writerDict) typeParamIndex(typ *types2.TypeParam) int { + for idx, implicit := range dict.implicits { + if types2.Unalias(implicit.Type()).(*types2.TypeParam) == typ { + return idx + } + } + + return len(dict.implicits) + typ.Index() +} + +// A derivedInfo represents a reference to an encoded generic Go type. +type derivedInfo struct { + idx pkgbits.Index + needed bool // TODO(mdempsky): Remove. +} + +// A typeInfo represents a reference to an encoded Go type. +// +// If derived is true, then the typeInfo represents a generic Go type +// that contains type parameters. In this case, idx is an index into +// the readerDict.derived{,Types} arrays. +// +// Otherwise, the typeInfo represents a non-generic Go type, and idx +// is an index into the reader.typs array instead. +type typeInfo struct { + idx pkgbits.Index + derived bool +} + +// An objInfo represents a reference to an encoded, instantiated (if +// applicable) Go object. +type objInfo struct { + idx pkgbits.Index // index for the generic function declaration + explicits []typeInfo // info for the type arguments +} + +// A selectorInfo represents a reference to an encoded field or method +// name (i.e., objects that can only be accessed using selector +// expressions). +type selectorInfo struct { + pkgIdx pkgbits.Index + nameIdx pkgbits.Index +} + +// anyDerived reports whether any of info's explicit type arguments +// are derived types. +func (info objInfo) anyDerived() bool { + for _, explicit := range info.explicits { + if explicit.derived { + return true + } + } + return false +} + +// equals reports whether info and other represent the same Go object +// (i.e., same base object and identical type arguments, if any). +func (info objInfo) equals(other objInfo) bool { + if info.idx != other.idx { + return false + } + assert(len(info.explicits) == len(other.explicits)) + for i, targ := range info.explicits { + if targ != other.explicits[i] { + return false + } + } + return true +} + +type writerMethodExprInfo struct { + typeParamIdx int + methodInfo selectorInfo +} + +// typeParamMethodExprIdx returns the index where the given encoded +// method expression function pointer appears within this dictionary's +// type parameters method expressions section, adding it if necessary. +func (dict *writerDict) typeParamMethodExprIdx(typeParamIdx int, methodInfo selectorInfo) int { + newInfo := writerMethodExprInfo{typeParamIdx, methodInfo} + + for idx, oldInfo := range dict.typeParamMethodExprs { + if oldInfo == newInfo { + return idx + } + } + + idx := len(dict.typeParamMethodExprs) + dict.typeParamMethodExprs = append(dict.typeParamMethodExprs, newInfo) + return idx +} + +// subdictIdx returns the index where the given encoded object's +// runtime dictionary appears within this dictionary's subdictionary +// section, adding it if necessary. +func (dict *writerDict) subdictIdx(newInfo objInfo) int { + for idx, oldInfo := range dict.subdicts { + if oldInfo.equals(newInfo) { + return idx + } + } + + idx := len(dict.subdicts) + dict.subdicts = append(dict.subdicts, newInfo) + return idx +} + +// rtypeIdx returns the index where the given encoded type's +// *runtime._type value appears within this dictionary's rtypes +// section, adding it if necessary. +func (dict *writerDict) rtypeIdx(newInfo typeInfo) int { + for idx, oldInfo := range dict.rtypes { + if oldInfo == newInfo { + return idx + } + } + + idx := len(dict.rtypes) + dict.rtypes = append(dict.rtypes, newInfo) + return idx +} + +// itabIdx returns the index where the given encoded type pair's +// *runtime.itab value appears within this dictionary's itabs section, +// adding it if necessary. +func (dict *writerDict) itabIdx(typInfo, ifaceInfo typeInfo) int { + newInfo := itabInfo{typInfo, ifaceInfo} + + for idx, oldInfo := range dict.itabs { + if oldInfo == newInfo { + return idx + } + } + + idx := len(dict.itabs) + dict.itabs = append(dict.itabs, newInfo) + return idx +} + +func (pw *pkgWriter) newWriter(k pkgbits.RelocKind, marker pkgbits.SyncMarker) *writer { + return &writer{ + Encoder: pw.NewEncoder(k, marker), + p: pw, + } +} + +// @@@ Positions + +// pos writes the position of p into the element bitstream. +func (w *writer) pos(p poser) { + w.Sync(pkgbits.SyncPos) + pos := p.Pos() + + // TODO(mdempsky): Track down the remaining cases here and fix them. + if !w.Bool(pos.IsKnown()) { + return + } + + // TODO(mdempsky): Delta encoding. + w.posBase(pos.Base()) + w.Uint(pos.Line()) + w.Uint(pos.Col()) +} + +// posBase writes a reference to the given PosBase into the element +// bitstream. +func (w *writer) posBase(b *syntax.PosBase) { + w.Reloc(pkgbits.RelocPosBase, w.p.posBaseIdx(b)) +} + +// posBaseIdx returns the index for the given PosBase. +func (pw *pkgWriter) posBaseIdx(b *syntax.PosBase) pkgbits.Index { + if idx, ok := pw.posBasesIdx[b]; ok { + return idx + } + + w := pw.newWriter(pkgbits.RelocPosBase, pkgbits.SyncPosBase) + w.p.posBasesIdx[b] = w.Idx + + w.String(trimFilename(b)) + + if !w.Bool(b.IsFileBase()) { + w.pos(b) + w.Uint(b.Line()) + w.Uint(b.Col()) + } + + return w.Flush() +} + +// @@@ Packages + +// pkg writes a use of the given Package into the element bitstream. +func (w *writer) pkg(pkg *types2.Package) { + w.pkgRef(w.p.pkgIdx(pkg)) +} + +func (w *writer) pkgRef(idx pkgbits.Index) { + w.Sync(pkgbits.SyncPkg) + w.Reloc(pkgbits.RelocPkg, idx) +} + +// pkgIdx returns the index for the given package, adding it to the +// package export data if needed. +func (pw *pkgWriter) pkgIdx(pkg *types2.Package) pkgbits.Index { + if idx, ok := pw.pkgsIdx[pkg]; ok { + return idx + } + + w := pw.newWriter(pkgbits.RelocPkg, pkgbits.SyncPkgDef) + pw.pkgsIdx[pkg] = w.Idx + + // The universe and package unsafe need to be handled specially by + // importers anyway, so we serialize them using just their package + // path. This ensures that readers don't confuse them for + // user-defined packages. + switch pkg { + case nil: // universe + w.String("builtin") // same package path used by godoc + case types2.Unsafe: + w.String("unsafe") + default: + // TODO(mdempsky): Write out pkg.Path() for curpkg too. + var path string + if pkg != w.p.curpkg { + path = pkg.Path() + } + base.Assertf(path != "builtin" && path != "unsafe", "unexpected path for user-defined package: %q", path) + w.String(path) + w.String(pkg.Name()) + + w.Len(len(pkg.Imports())) + for _, imp := range pkg.Imports() { + w.pkg(imp) + } + } + + return w.Flush() +} + +// @@@ Types + +var ( + anyTypeName = types2.Universe.Lookup("any").(*types2.TypeName) + comparableTypeName = types2.Universe.Lookup("comparable").(*types2.TypeName) + runeTypeName = types2.Universe.Lookup("rune").(*types2.TypeName) +) + +// typ writes a use of the given type into the bitstream. +func (w *writer) typ(typ types2.Type) { + w.typInfo(w.p.typIdx(typ, w.dict)) +} + +// typInfo writes a use of the given type (specified as a typeInfo +// instead) into the bitstream. +func (w *writer) typInfo(info typeInfo) { + w.Sync(pkgbits.SyncType) + if w.Bool(info.derived) { + w.Len(int(info.idx)) + w.derived = true + } else { + w.Reloc(pkgbits.RelocType, info.idx) + } +} + +// typIdx returns the index where the export data description of type +// can be read back in. If no such index exists yet, it's created. +// +// typIdx also reports whether typ is a derived type; that is, whether +// its identity depends on type parameters. +func (pw *pkgWriter) typIdx(typ types2.Type, dict *writerDict) typeInfo { + if idx, ok := pw.typsIdx[typ]; ok { + return typeInfo{idx: idx, derived: false} + } + if dict != nil { + if idx, ok := dict.derivedIdx[typ]; ok { + return typeInfo{idx: idx, derived: true} + } + } + + w := pw.newWriter(pkgbits.RelocType, pkgbits.SyncTypeIdx) + w.dict = dict + + switch typ := types2.Unalias(typ).(type) { + default: + base.Fatalf("unexpected type: %v (%T)", typ, typ) + + case *types2.Basic: + switch kind := typ.Kind(); { + case kind == types2.Invalid: + base.Fatalf("unexpected types2.Invalid") + + case types2.Typ[kind] == typ: + w.Code(pkgbits.TypeBasic) + w.Len(int(kind)) + + default: + // Handle "byte" and "rune" as references to their TypeNames. + obj := types2.Universe.Lookup(typ.Name()) + assert(obj.Type() == typ) + + w.Code(pkgbits.TypeNamed) + w.obj(obj, nil) + } + + case *types2.Named: + obj, targs := splitNamed(typ) + + // Defined types that are declared within a generic function (and + // thus have implicit type parameters) are always derived types. + if w.p.hasImplicitTypeParams(obj) { + w.derived = true + } + + w.Code(pkgbits.TypeNamed) + w.obj(obj, targs) + + case *types2.TypeParam: + w.derived = true + w.Code(pkgbits.TypeTypeParam) + w.Len(w.dict.typeParamIndex(typ)) + + case *types2.Array: + w.Code(pkgbits.TypeArray) + w.Uint64(uint64(typ.Len())) + w.typ(typ.Elem()) + + case *types2.Chan: + w.Code(pkgbits.TypeChan) + w.Len(int(typ.Dir())) + w.typ(typ.Elem()) + + case *types2.Map: + w.Code(pkgbits.TypeMap) + w.typ(typ.Key()) + w.typ(typ.Elem()) + + case *types2.Pointer: + w.Code(pkgbits.TypePointer) + w.typ(typ.Elem()) + + case *types2.Signature: + base.Assertf(typ.TypeParams() == nil, "unexpected type params: %v", typ) + w.Code(pkgbits.TypeSignature) + w.signature(typ) + + case *types2.Slice: + w.Code(pkgbits.TypeSlice) + w.typ(typ.Elem()) + + case *types2.Struct: + w.Code(pkgbits.TypeStruct) + w.structType(typ) + + case *types2.Interface: + // Handle "any" as reference to its TypeName. + if typ == anyTypeName.Type() { + w.Code(pkgbits.TypeNamed) + w.obj(anyTypeName, nil) + break + } + + w.Code(pkgbits.TypeInterface) + w.interfaceType(typ) + + case *types2.Union: + w.Code(pkgbits.TypeUnion) + w.unionType(typ) + } + + if w.derived { + idx := pkgbits.Index(len(dict.derived)) + dict.derived = append(dict.derived, derivedInfo{idx: w.Flush()}) + dict.derivedIdx[typ] = idx + return typeInfo{idx: idx, derived: true} + } + + pw.typsIdx[typ] = w.Idx + return typeInfo{idx: w.Flush(), derived: false} +} + +func (w *writer) structType(typ *types2.Struct) { + w.Len(typ.NumFields()) + for i := 0; i < typ.NumFields(); i++ { + f := typ.Field(i) + w.pos(f) + w.selector(f) + w.typ(f.Type()) + w.String(typ.Tag(i)) + w.Bool(f.Embedded()) + } +} + +func (w *writer) unionType(typ *types2.Union) { + w.Len(typ.Len()) + for i := 0; i < typ.Len(); i++ { + t := typ.Term(i) + w.Bool(t.Tilde()) + w.typ(t.Type()) + } +} + +func (w *writer) interfaceType(typ *types2.Interface) { + // If typ has no embedded types but it's not a basic interface, then + // the natural description we write out below will fail to + // reconstruct it. + if typ.NumEmbeddeds() == 0 && !typ.IsMethodSet() { + // Currently, this can only happen for the underlying Interface of + // "comparable", which is needed to handle type declarations like + // "type C comparable". + assert(typ == comparableTypeName.Type().(*types2.Named).Underlying()) + + // Export as "interface{ comparable }". + w.Len(0) // NumExplicitMethods + w.Len(1) // NumEmbeddeds + w.Bool(false) // IsImplicit + w.typ(comparableTypeName.Type()) // EmbeddedType(0) + return + } + + w.Len(typ.NumExplicitMethods()) + w.Len(typ.NumEmbeddeds()) + + if typ.NumExplicitMethods() == 0 && typ.NumEmbeddeds() == 1 { + w.Bool(typ.IsImplicit()) + } else { + // Implicit interfaces always have 0 explicit methods and 1 + // embedded type, so we skip writing out the implicit flag + // otherwise as a space optimization. + assert(!typ.IsImplicit()) + } + + for i := 0; i < typ.NumExplicitMethods(); i++ { + m := typ.ExplicitMethod(i) + sig := m.Type().(*types2.Signature) + assert(sig.TypeParams() == nil) + + w.pos(m) + w.selector(m) + w.signature(sig) + } + + for i := 0; i < typ.NumEmbeddeds(); i++ { + w.typ(typ.EmbeddedType(i)) + } +} + +func (w *writer) signature(sig *types2.Signature) { + w.Sync(pkgbits.SyncSignature) + w.params(sig.Params()) + w.params(sig.Results()) + w.Bool(sig.Variadic()) +} + +func (w *writer) params(typ *types2.Tuple) { + w.Sync(pkgbits.SyncParams) + w.Len(typ.Len()) + for i := 0; i < typ.Len(); i++ { + w.param(typ.At(i)) + } +} + +func (w *writer) param(param *types2.Var) { + w.Sync(pkgbits.SyncParam) + w.pos(param) + w.localIdent(param) + w.typ(param.Type()) +} + +// @@@ Objects + +// obj writes a use of the given object into the bitstream. +// +// If obj is a generic object, then explicits are the explicit type +// arguments used to instantiate it (i.e., used to substitute the +// object's own declared type parameters). +func (w *writer) obj(obj types2.Object, explicits *types2.TypeList) { + w.objInfo(w.p.objInstIdx(obj, explicits, w.dict)) +} + +// objInfo writes a use of the given encoded object into the +// bitstream. +func (w *writer) objInfo(info objInfo) { + w.Sync(pkgbits.SyncObject) + w.Bool(false) // TODO(mdempsky): Remove; was derived func inst. + w.Reloc(pkgbits.RelocObj, info.idx) + + w.Len(len(info.explicits)) + for _, info := range info.explicits { + w.typInfo(info) + } +} + +// objInstIdx returns the indices for an object and a corresponding +// list of type arguments used to instantiate it, adding them to the +// export data as needed. +func (pw *pkgWriter) objInstIdx(obj types2.Object, explicits *types2.TypeList, dict *writerDict) objInfo { + explicitInfos := make([]typeInfo, explicits.Len()) + for i := range explicitInfos { + explicitInfos[i] = pw.typIdx(explicits.At(i), dict) + } + return objInfo{idx: pw.objIdx(obj), explicits: explicitInfos} +} + +// objIdx returns the index for the given Object, adding it to the +// export data as needed. +func (pw *pkgWriter) objIdx(obj types2.Object) pkgbits.Index { + // TODO(mdempsky): Validate that obj is a global object (or a local + // defined type, which we hoist to global scope anyway). + + if idx, ok := pw.objsIdx[obj]; ok { + return idx + } + + dict := &writerDict{ + derivedIdx: make(map[types2.Type]pkgbits.Index), + } + + if isDefinedType(obj) && obj.Pkg() == pw.curpkg { + decl, ok := pw.typDecls[obj.(*types2.TypeName)] + assert(ok) + dict.implicits = decl.implicits + } + + // We encode objects into 4 elements across different sections, all + // sharing the same index: + // + // - RelocName has just the object's qualified name (i.e., + // Object.Pkg and Object.Name) and the CodeObj indicating what + // specific type of Object it is (Var, Func, etc). + // + // - RelocObj has the remaining public details about the object, + // relevant to go/types importers. + // + // - RelocObjExt has additional private details about the object, + // which are only relevant to cmd/compile itself. This is + // separated from RelocObj so that go/types importers are + // unaffected by internal compiler changes. + // + // - RelocObjDict has public details about the object's type + // parameters and derived type's used by the object. This is + // separated to facilitate the eventual introduction of + // shape-based stenciling. + // + // TODO(mdempsky): Re-evaluate whether RelocName still makes sense + // to keep separate from RelocObj. + + w := pw.newWriter(pkgbits.RelocObj, pkgbits.SyncObject1) + wext := pw.newWriter(pkgbits.RelocObjExt, pkgbits.SyncObject1) + wname := pw.newWriter(pkgbits.RelocName, pkgbits.SyncObject1) + wdict := pw.newWriter(pkgbits.RelocObjDict, pkgbits.SyncObject1) + + pw.objsIdx[obj] = w.Idx // break cycles + assert(wext.Idx == w.Idx) + assert(wname.Idx == w.Idx) + assert(wdict.Idx == w.Idx) + + w.dict = dict + wext.dict = dict + + code := w.doObj(wext, obj) + w.Flush() + wext.Flush() + + wname.qualifiedIdent(obj) + wname.Code(code) + wname.Flush() + + wdict.objDict(obj, w.dict) + wdict.Flush() + + return w.Idx +} + +// doObj writes the RelocObj definition for obj to w, and the +// RelocObjExt definition to wext. +func (w *writer) doObj(wext *writer, obj types2.Object) pkgbits.CodeObj { + if obj.Pkg() != w.p.curpkg { + return pkgbits.ObjStub + } + + switch obj := obj.(type) { + default: + w.p.unexpected("object", obj) + panic("unreachable") + + case *types2.Const: + w.pos(obj) + w.typ(obj.Type()) + w.Value(obj.Val()) + return pkgbits.ObjConst + + case *types2.Func: + decl, ok := w.p.funDecls[obj] + assert(ok) + sig := obj.Type().(*types2.Signature) + + w.pos(obj) + w.typeParamNames(sig.TypeParams()) + w.signature(sig) + w.pos(decl) + wext.funcExt(obj) + return pkgbits.ObjFunc + + case *types2.TypeName: + if obj.IsAlias() { + w.pos(obj) + w.typ(obj.Type()) + return pkgbits.ObjAlias + } + + named := obj.Type().(*types2.Named) + assert(named.TypeArgs() == nil) + + w.pos(obj) + w.typeParamNames(named.TypeParams()) + wext.typeExt(obj) + w.typ(named.Underlying()) + + w.Len(named.NumMethods()) + for i := 0; i < named.NumMethods(); i++ { + w.method(wext, named.Method(i)) + } + + return pkgbits.ObjType + + case *types2.Var: + w.pos(obj) + w.typ(obj.Type()) + wext.varExt(obj) + return pkgbits.ObjVar + } +} + +// objDict writes the dictionary needed for reading the given object. +func (w *writer) objDict(obj types2.Object, dict *writerDict) { + // TODO(mdempsky): Split objDict into multiple entries? reader.go + // doesn't care about the type parameter bounds, and reader2.go + // doesn't care about referenced functions. + + w.dict = dict // TODO(mdempsky): This is a bit sketchy. + + w.Len(len(dict.implicits)) + + tparams := objTypeParams(obj) + ntparams := tparams.Len() + w.Len(ntparams) + for i := 0; i < ntparams; i++ { + w.typ(tparams.At(i).Constraint()) + } + + nderived := len(dict.derived) + w.Len(nderived) + for _, typ := range dict.derived { + w.Reloc(pkgbits.RelocType, typ.idx) + w.Bool(typ.needed) + } + + // Write runtime dictionary information. + // + // N.B., the go/types importer reads up to the section, but doesn't + // read any further, so it's safe to change. (See TODO above.) + + // For each type parameter, write out whether the constraint is a + // basic interface. This is used to determine how aggressively we + // can shape corresponding type arguments. + // + // This is somewhat redundant with writing out the full type + // parameter constraints above, but the compiler currently skips + // over those. Also, we don't care about the *declared* constraints, + // but how the type parameters are actually *used*. E.g., if a type + // parameter is constrained to `int | uint` but then never used in + // arithmetic/conversions/etc, we could shape those together. + for _, implicit := range dict.implicits { + tparam := types2.Unalias(implicit.Type()).(*types2.TypeParam) + w.Bool(tparam.Underlying().(*types2.Interface).IsMethodSet()) + } + for i := 0; i < ntparams; i++ { + tparam := tparams.At(i) + w.Bool(tparam.Underlying().(*types2.Interface).IsMethodSet()) + } + + w.Len(len(dict.typeParamMethodExprs)) + for _, info := range dict.typeParamMethodExprs { + w.Len(info.typeParamIdx) + w.selectorInfo(info.methodInfo) + } + + w.Len(len(dict.subdicts)) + for _, info := range dict.subdicts { + w.objInfo(info) + } + + w.Len(len(dict.rtypes)) + for _, info := range dict.rtypes { + w.typInfo(info) + } + + w.Len(len(dict.itabs)) + for _, info := range dict.itabs { + w.typInfo(info.typ) + w.typInfo(info.iface) + } + + assert(len(dict.derived) == nderived) +} + +func (w *writer) typeParamNames(tparams *types2.TypeParamList) { + w.Sync(pkgbits.SyncTypeParamNames) + + ntparams := tparams.Len() + for i := 0; i < ntparams; i++ { + tparam := tparams.At(i).Obj() + w.pos(tparam) + w.localIdent(tparam) + } +} + +func (w *writer) method(wext *writer, meth *types2.Func) { + decl, ok := w.p.funDecls[meth] + assert(ok) + sig := meth.Type().(*types2.Signature) + + w.Sync(pkgbits.SyncMethod) + w.pos(meth) + w.selector(meth) + w.typeParamNames(sig.RecvTypeParams()) + w.param(sig.Recv()) + w.signature(sig) + + w.pos(decl) // XXX: Hack to workaround linker limitations. + wext.funcExt(meth) +} + +// qualifiedIdent writes out the name of an object declared at package +// scope. (For now, it's also used to refer to local defined types.) +func (w *writer) qualifiedIdent(obj types2.Object) { + w.Sync(pkgbits.SyncSym) + + name := obj.Name() + if isDefinedType(obj) && obj.Pkg() == w.p.curpkg { + decl, ok := w.p.typDecls[obj.(*types2.TypeName)] + assert(ok) + if decl.gen != 0 { + // For local defined types, we embed a scope-disambiguation + // number directly into their name. types.SplitVargenSuffix then + // knows to look for this. + // + // TODO(mdempsky): Find a better solution; this is terrible. + name = fmt.Sprintf("%s·%v", name, decl.gen) + } + } + + w.pkg(obj.Pkg()) + w.String(name) +} + +// TODO(mdempsky): We should be able to omit pkg from both localIdent +// and selector, because they should always be known from context. +// However, past frustrations with this optimization in iexport make +// me a little nervous to try it again. + +// localIdent writes the name of a locally declared object (i.e., +// objects that can only be accessed by non-qualified name, within the +// context of a particular function). +func (w *writer) localIdent(obj types2.Object) { + assert(!isGlobal(obj)) + w.Sync(pkgbits.SyncLocalIdent) + w.pkg(obj.Pkg()) + w.String(obj.Name()) +} + +// selector writes the name of a field or method (i.e., objects that +// can only be accessed using selector expressions). +func (w *writer) selector(obj types2.Object) { + w.selectorInfo(w.p.selectorIdx(obj)) +} + +func (w *writer) selectorInfo(info selectorInfo) { + w.Sync(pkgbits.SyncSelector) + w.pkgRef(info.pkgIdx) + w.StringRef(info.nameIdx) +} + +func (pw *pkgWriter) selectorIdx(obj types2.Object) selectorInfo { + pkgIdx := pw.pkgIdx(obj.Pkg()) + nameIdx := pw.StringIdx(obj.Name()) + return selectorInfo{pkgIdx: pkgIdx, nameIdx: nameIdx} +} + +// @@@ Compiler extensions + +func (w *writer) funcExt(obj *types2.Func) { + decl, ok := w.p.funDecls[obj] + assert(ok) + + // TODO(mdempsky): Extend these pragma validation flags to account + // for generics. E.g., linkname probably doesn't make sense at + // least. + + pragma := asPragmaFlag(decl.Pragma) + if pragma&ir.Systemstack != 0 && pragma&ir.Nosplit != 0 { + w.p.errorf(decl, "go:nosplit and go:systemstack cannot be combined") + } + wi := asWasmImport(decl.Pragma) + + if decl.Body != nil { + if pragma&ir.Noescape != 0 { + w.p.errorf(decl, "can only use //go:noescape with external func implementations") + } + if wi != nil { + w.p.errorf(decl, "can only use //go:wasmimport with external func implementations") + } + if (pragma&ir.UintptrKeepAlive != 0 && pragma&ir.UintptrEscapes == 0) && pragma&ir.Nosplit == 0 { + // Stack growth can't handle uintptr arguments that may + // be pointers (as we don't know which are pointers + // when creating the stack map). Thus uintptrkeepalive + // functions (and all transitive callees) must be + // nosplit. + // + // N.B. uintptrescapes implies uintptrkeepalive but it + // is OK since the arguments must escape to the heap. + // + // TODO(prattmic): Add recursive nosplit check of callees. + // TODO(prattmic): Functions with no body (i.e., + // assembly) must also be nosplit, but we can't check + // that here. + w.p.errorf(decl, "go:uintptrkeepalive requires go:nosplit") + } + } else { + if base.Flag.Complete || decl.Name.Value == "init" { + // Linknamed functions are allowed to have no body. Hopefully + // the linkname target has a body. See issue 23311. + // Wasmimport functions are also allowed to have no body. + if _, ok := w.p.linknames[obj]; !ok && wi == nil { + w.p.errorf(decl, "missing function body") + } + } + } + + sig, block := obj.Type().(*types2.Signature), decl.Body + body, closureVars := w.p.bodyIdx(sig, block, w.dict) + if len(closureVars) > 0 { + fmt.Fprintln(os.Stderr, "CLOSURE", closureVars) + } + assert(len(closureVars) == 0) + + w.Sync(pkgbits.SyncFuncExt) + w.pragmaFlag(pragma) + w.linkname(obj) + + if buildcfg.GOARCH == "wasm" { + if wi != nil { + w.String(wi.Module) + w.String(wi.Name) + } else { + w.String("") + w.String("") + } + } + + w.Bool(false) // stub extension + w.Reloc(pkgbits.RelocBody, body) + w.Sync(pkgbits.SyncEOF) +} + +func (w *writer) typeExt(obj *types2.TypeName) { + decl, ok := w.p.typDecls[obj] + assert(ok) + + w.Sync(pkgbits.SyncTypeExt) + + w.pragmaFlag(asPragmaFlag(decl.Pragma)) + + // No LSym.SymIdx info yet. + w.Int64(-1) + w.Int64(-1) +} + +func (w *writer) varExt(obj *types2.Var) { + w.Sync(pkgbits.SyncVarExt) + w.linkname(obj) +} + +func (w *writer) linkname(obj types2.Object) { + w.Sync(pkgbits.SyncLinkname) + w.Int64(-1) + w.String(w.p.linknames[obj]) +} + +func (w *writer) pragmaFlag(p ir.PragmaFlag) { + w.Sync(pkgbits.SyncPragma) + w.Int(int(p)) +} + +// @@@ Function bodies + +// bodyIdx returns the index for the given function body (specified by +// block), adding it to the export data +func (pw *pkgWriter) bodyIdx(sig *types2.Signature, block *syntax.BlockStmt, dict *writerDict) (idx pkgbits.Index, closureVars []posVar) { + w := pw.newWriter(pkgbits.RelocBody, pkgbits.SyncFuncBody) + w.sig = sig + w.dict = dict + + w.declareParams(sig) + if w.Bool(block != nil) { + w.stmts(block.List) + w.pos(block.Rbrace) + } + + return w.Flush(), w.closureVars +} + +func (w *writer) declareParams(sig *types2.Signature) { + addLocals := func(params *types2.Tuple) { + for i := 0; i < params.Len(); i++ { + w.addLocal(params.At(i)) + } + } + + if recv := sig.Recv(); recv != nil { + w.addLocal(recv) + } + addLocals(sig.Params()) + addLocals(sig.Results()) +} + +// addLocal records the declaration of a new local variable. +func (w *writer) addLocal(obj *types2.Var) { + idx := len(w.localsIdx) + + w.Sync(pkgbits.SyncAddLocal) + if w.p.SyncMarkers() { + w.Int(idx) + } + w.varDictIndex(obj) + + if w.localsIdx == nil { + w.localsIdx = make(map[*types2.Var]int) + } + w.localsIdx[obj] = idx +} + +// useLocal writes a reference to the given local or free variable +// into the bitstream. +func (w *writer) useLocal(pos syntax.Pos, obj *types2.Var) { + w.Sync(pkgbits.SyncUseObjLocal) + + if idx, ok := w.localsIdx[obj]; w.Bool(ok) { + w.Len(idx) + return + } + + idx, ok := w.closureVarsIdx[obj] + if !ok { + if w.closureVarsIdx == nil { + w.closureVarsIdx = make(map[*types2.Var]int) + } + idx = len(w.closureVars) + w.closureVars = append(w.closureVars, posVar{pos, obj}) + w.closureVarsIdx[obj] = idx + } + w.Len(idx) +} + +func (w *writer) openScope(pos syntax.Pos) { + w.Sync(pkgbits.SyncOpenScope) + w.pos(pos) +} + +func (w *writer) closeScope(pos syntax.Pos) { + w.Sync(pkgbits.SyncCloseScope) + w.pos(pos) + w.closeAnotherScope() +} + +func (w *writer) closeAnotherScope() { + w.Sync(pkgbits.SyncCloseAnotherScope) +} + +// @@@ Statements + +// stmt writes the given statement into the function body bitstream. +func (w *writer) stmt(stmt syntax.Stmt) { + var stmts []syntax.Stmt + if stmt != nil { + stmts = []syntax.Stmt{stmt} + } + w.stmts(stmts) +} + +func (w *writer) stmts(stmts []syntax.Stmt) { + dead := false + w.Sync(pkgbits.SyncStmts) + var lastLabel = -1 + for i, stmt := range stmts { + if _, ok := stmt.(*syntax.LabeledStmt); ok { + lastLabel = i + } + } + for i, stmt := range stmts { + if dead && i > lastLabel { + // Any statements after a terminating and last label statement are safe to omit. + // Otherwise, code after label statement may refer to dead stmts between terminating + // and label statement, see issue #65593. + if _, ok := stmt.(*syntax.LabeledStmt); !ok { + continue + } + } + w.stmt1(stmt) + dead = w.p.terminates(stmt) + } + w.Code(stmtEnd) + w.Sync(pkgbits.SyncStmtsEnd) +} + +func (w *writer) stmt1(stmt syntax.Stmt) { + switch stmt := stmt.(type) { + default: + w.p.unexpected("statement", stmt) + + case nil, *syntax.EmptyStmt: + return + + case *syntax.AssignStmt: + switch { + case stmt.Rhs == nil: + w.Code(stmtIncDec) + w.op(binOps[stmt.Op]) + w.expr(stmt.Lhs) + w.pos(stmt) + + case stmt.Op != 0 && stmt.Op != syntax.Def: + w.Code(stmtAssignOp) + w.op(binOps[stmt.Op]) + w.expr(stmt.Lhs) + w.pos(stmt) + + var typ types2.Type + if stmt.Op != syntax.Shl && stmt.Op != syntax.Shr { + typ = w.p.typeOf(stmt.Lhs) + } + w.implicitConvExpr(typ, stmt.Rhs) + + default: + w.assignStmt(stmt, stmt.Lhs, stmt.Rhs) + } + + case *syntax.BlockStmt: + w.Code(stmtBlock) + w.blockStmt(stmt) + + case *syntax.BranchStmt: + w.Code(stmtBranch) + w.pos(stmt) + w.op(branchOps[stmt.Tok]) + w.optLabel(stmt.Label) + + case *syntax.CallStmt: + w.Code(stmtCall) + w.pos(stmt) + w.op(callOps[stmt.Tok]) + w.expr(stmt.Call) + if stmt.Tok == syntax.Defer { + w.optExpr(stmt.DeferAt) + } + + case *syntax.DeclStmt: + for _, decl := range stmt.DeclList { + w.declStmt(decl) + } + + case *syntax.ExprStmt: + w.Code(stmtExpr) + w.expr(stmt.X) + + case *syntax.ForStmt: + w.Code(stmtFor) + w.forStmt(stmt) + + case *syntax.IfStmt: + w.Code(stmtIf) + w.ifStmt(stmt) + + case *syntax.LabeledStmt: + w.Code(stmtLabel) + w.pos(stmt) + w.label(stmt.Label) + w.stmt1(stmt.Stmt) + + case *syntax.ReturnStmt: + w.Code(stmtReturn) + w.pos(stmt) + + resultTypes := w.sig.Results() + dstType := func(i int) types2.Type { + return resultTypes.At(i).Type() + } + w.multiExpr(stmt, dstType, syntax.UnpackListExpr(stmt.Results)) + + case *syntax.SelectStmt: + w.Code(stmtSelect) + w.selectStmt(stmt) + + case *syntax.SendStmt: + chanType := types2.CoreType(w.p.typeOf(stmt.Chan)).(*types2.Chan) + + w.Code(stmtSend) + w.pos(stmt) + w.expr(stmt.Chan) + w.implicitConvExpr(chanType.Elem(), stmt.Value) + + case *syntax.SwitchStmt: + w.Code(stmtSwitch) + w.switchStmt(stmt) + } +} + +func (w *writer) assignList(expr syntax.Expr) { + exprs := syntax.UnpackListExpr(expr) + w.Len(len(exprs)) + + for _, expr := range exprs { + w.assign(expr) + } +} + +func (w *writer) assign(expr syntax.Expr) { + expr = syntax.Unparen(expr) + + if name, ok := expr.(*syntax.Name); ok { + if name.Value == "_" { + w.Code(assignBlank) + return + } + + if obj, ok := w.p.info.Defs[name]; ok { + obj := obj.(*types2.Var) + + w.Code(assignDef) + w.pos(obj) + w.localIdent(obj) + w.typ(obj.Type()) + + // TODO(mdempsky): Minimize locals index size by deferring + // this until the variables actually come into scope. + w.addLocal(obj) + return + } + } + + w.Code(assignExpr) + w.expr(expr) +} + +func (w *writer) declStmt(decl syntax.Decl) { + switch decl := decl.(type) { + default: + w.p.unexpected("declaration", decl) + + case *syntax.ConstDecl, *syntax.TypeDecl: + + case *syntax.VarDecl: + w.assignStmt(decl, namesAsExpr(decl.NameList), decl.Values) + } +} + +// assignStmt writes out an assignment for "lhs = rhs". +func (w *writer) assignStmt(pos poser, lhs0, rhs0 syntax.Expr) { + lhs := syntax.UnpackListExpr(lhs0) + rhs := syntax.UnpackListExpr(rhs0) + + w.Code(stmtAssign) + w.pos(pos) + + // As if w.assignList(lhs0). + w.Len(len(lhs)) + for _, expr := range lhs { + w.assign(expr) + } + + dstType := func(i int) types2.Type { + dst := lhs[i] + + // Finding dstType is somewhat involved, because for VarDecl + // statements, the Names are only added to the info.{Defs,Uses} + // maps, not to info.Types. + if name, ok := syntax.Unparen(dst).(*syntax.Name); ok { + if name.Value == "_" { + return nil // ok: no implicit conversion + } else if def, ok := w.p.info.Defs[name].(*types2.Var); ok { + return def.Type() + } else if use, ok := w.p.info.Uses[name].(*types2.Var); ok { + return use.Type() + } else { + w.p.fatalf(dst, "cannot find type of destination object: %v", dst) + } + } + + return w.p.typeOf(dst) + } + + w.multiExpr(pos, dstType, rhs) +} + +func (w *writer) blockStmt(stmt *syntax.BlockStmt) { + w.Sync(pkgbits.SyncBlockStmt) + w.openScope(stmt.Pos()) + w.stmts(stmt.List) + w.closeScope(stmt.Rbrace) +} + +func (w *writer) forStmt(stmt *syntax.ForStmt) { + w.Sync(pkgbits.SyncForStmt) + w.openScope(stmt.Pos()) + + if rang, ok := stmt.Init.(*syntax.RangeClause); w.Bool(ok) { + w.pos(rang) + w.assignList(rang.Lhs) + w.expr(rang.X) + + xtyp := w.p.typeOf(rang.X) + if _, isMap := types2.CoreType(xtyp).(*types2.Map); isMap { + w.rtype(xtyp) + } + { + lhs := syntax.UnpackListExpr(rang.Lhs) + assign := func(i int, src types2.Type) { + if i >= len(lhs) { + return + } + dst := syntax.Unparen(lhs[i]) + if name, ok := dst.(*syntax.Name); ok && name.Value == "_" { + return + } + + var dstType types2.Type + if rang.Def { + // For `:=` assignments, the LHS names only appear in Defs, + // not Types (as used by typeOf). + dstType = w.p.info.Defs[dst.(*syntax.Name)].(*types2.Var).Type() + } else { + dstType = w.p.typeOf(dst) + } + + w.convRTTI(src, dstType) + } + + keyType, valueType := types2.RangeKeyVal(w.p.typeOf(rang.X)) + assign(0, keyType) + assign(1, valueType) + } + + } else { + if stmt.Cond != nil && w.p.staticBool(&stmt.Cond) < 0 { // always false + stmt.Post = nil + stmt.Body.List = nil + } + + w.pos(stmt) + w.stmt(stmt.Init) + w.optExpr(stmt.Cond) + w.stmt(stmt.Post) + } + + w.blockStmt(stmt.Body) + w.Bool(w.distinctVars(stmt)) + w.closeAnotherScope() +} + +func (w *writer) distinctVars(stmt *syntax.ForStmt) bool { + lv := base.Debug.LoopVar + fileVersion := w.p.info.FileVersions[stmt.Pos().Base()] + is122 := fileVersion == "" || version.Compare(fileVersion, "go1.22") >= 0 + + // Turning off loopvar for 1.22 is only possible with loopvarhash=qn + // + // Debug.LoopVar values to be preserved for 1.21 compatibility are 1 and 2, + // which are also set (=1) by GOEXPERIMENT=loopvar. The knobs for turning on + // the new, unshared, loopvar behavior apply to versions less than 1.21 because + // (1) 1.21 also did that and (2) this is believed to be the likely use case; + // anyone checking to see if it affects their code will just run the GOEXPERIMENT + // but will not also update all their go.mod files to 1.21. + // + // -gcflags=-d=loopvar=3 enables logging for 1.22 but does not turn loopvar on for <= 1.21. + + return is122 || lv > 0 && lv != 3 +} + +func (w *writer) ifStmt(stmt *syntax.IfStmt) { + cond := w.p.staticBool(&stmt.Cond) + + w.Sync(pkgbits.SyncIfStmt) + w.openScope(stmt.Pos()) + w.pos(stmt) + w.stmt(stmt.Init) + w.expr(stmt.Cond) + w.Int(cond) + if cond >= 0 { + w.blockStmt(stmt.Then) + } else { + w.pos(stmt.Then.Rbrace) + } + if cond <= 0 { + w.stmt(stmt.Else) + } + w.closeAnotherScope() +} + +func (w *writer) selectStmt(stmt *syntax.SelectStmt) { + w.Sync(pkgbits.SyncSelectStmt) + + w.pos(stmt) + w.Len(len(stmt.Body)) + for i, clause := range stmt.Body { + if i > 0 { + w.closeScope(clause.Pos()) + } + w.openScope(clause.Pos()) + + w.pos(clause) + w.stmt(clause.Comm) + w.stmts(clause.Body) + } + if len(stmt.Body) > 0 { + w.closeScope(stmt.Rbrace) + } +} + +func (w *writer) switchStmt(stmt *syntax.SwitchStmt) { + w.Sync(pkgbits.SyncSwitchStmt) + + w.openScope(stmt.Pos()) + w.pos(stmt) + w.stmt(stmt.Init) + + var iface, tagType types2.Type + if guard, ok := stmt.Tag.(*syntax.TypeSwitchGuard); w.Bool(ok) { + iface = w.p.typeOf(guard.X) + + w.pos(guard) + if tag := guard.Lhs; w.Bool(tag != nil) { + w.pos(tag) + + // Like w.localIdent, but we don't have a types2.Object. + w.Sync(pkgbits.SyncLocalIdent) + w.pkg(w.p.curpkg) + w.String(tag.Value) + } + w.expr(guard.X) + } else { + tag := stmt.Tag + + var tagValue constant.Value + if tag != nil { + tv := w.p.typeAndValue(tag) + tagType = tv.Type + tagValue = tv.Value + } else { + tagType = types2.Typ[types2.Bool] + tagValue = constant.MakeBool(true) + } + + if tagValue != nil { + // If the switch tag has a constant value, look for a case + // clause that we always branch to. + func() { + var target *syntax.CaseClause + Outer: + for _, clause := range stmt.Body { + if clause.Cases == nil { + target = clause + } + for _, cas := range syntax.UnpackListExpr(clause.Cases) { + tv := w.p.typeAndValue(cas) + if tv.Value == nil { + return // non-constant case; give up + } + if constant.Compare(tagValue, token.EQL, tv.Value) { + target = clause + break Outer + } + } + } + // We've found the target clause, if any. + + if target != nil { + if hasFallthrough(target.Body) { + return // fallthrough is tricky; give up + } + + // Rewrite as single "default" case. + target.Cases = nil + stmt.Body = []*syntax.CaseClause{target} + } else { + stmt.Body = nil + } + + // Clear switch tag (i.e., replace with implicit "true"). + tag = nil + stmt.Tag = nil + tagType = types2.Typ[types2.Bool] + }() + } + + // Walk is going to emit comparisons between the tag value and + // each case expression, and we want these comparisons to always + // have the same type. If there are any case values that can't be + // converted to the tag value's type, then convert everything to + // `any` instead. + Outer: + for _, clause := range stmt.Body { + for _, cas := range syntax.UnpackListExpr(clause.Cases) { + if casType := w.p.typeOf(cas); !types2.AssignableTo(casType, tagType) { + tagType = types2.NewInterfaceType(nil, nil) + break Outer + } + } + } + + if w.Bool(tag != nil) { + w.implicitConvExpr(tagType, tag) + } + } + + w.Len(len(stmt.Body)) + for i, clause := range stmt.Body { + if i > 0 { + w.closeScope(clause.Pos()) + } + w.openScope(clause.Pos()) + + w.pos(clause) + + cases := syntax.UnpackListExpr(clause.Cases) + if iface != nil { + w.Len(len(cases)) + for _, cas := range cases { + if w.Bool(isNil(w.p, cas)) { + continue + } + w.exprType(iface, cas) + } + } else { + // As if w.exprList(clause.Cases), + // but with implicit conversions to tagType. + + w.Sync(pkgbits.SyncExprList) + w.Sync(pkgbits.SyncExprs) + w.Len(len(cases)) + for _, cas := range cases { + w.implicitConvExpr(tagType, cas) + } + } + + if obj, ok := w.p.info.Implicits[clause]; ok { + // TODO(mdempsky): These pos details are quirkish, but also + // necessary so the variable's position is correct for DWARF + // scope assignment later. It would probably be better for us to + // instead just set the variable's DWARF scoping info earlier so + // we can give it the correct position information. + pos := clause.Pos() + if typs := syntax.UnpackListExpr(clause.Cases); len(typs) != 0 { + pos = typeExprEndPos(typs[len(typs)-1]) + } + w.pos(pos) + + obj := obj.(*types2.Var) + w.typ(obj.Type()) + w.addLocal(obj) + } + + w.stmts(clause.Body) + } + if len(stmt.Body) > 0 { + w.closeScope(stmt.Rbrace) + } + + w.closeScope(stmt.Rbrace) +} + +func (w *writer) label(label *syntax.Name) { + w.Sync(pkgbits.SyncLabel) + + // TODO(mdempsky): Replace label strings with dense indices. + w.String(label.Value) +} + +func (w *writer) optLabel(label *syntax.Name) { + w.Sync(pkgbits.SyncOptLabel) + if w.Bool(label != nil) { + w.label(label) + } +} + +// @@@ Expressions + +// expr writes the given expression into the function body bitstream. +func (w *writer) expr(expr syntax.Expr) { + base.Assertf(expr != nil, "missing expression") + + expr = syntax.Unparen(expr) // skip parens; unneeded after typecheck + + obj, inst := lookupObj(w.p, expr) + targs := inst.TypeArgs + + if tv, ok := w.p.maybeTypeAndValue(expr); ok { + if tv.IsRuntimeHelper() { + if pkg := obj.Pkg(); pkg != nil && pkg.Name() == "runtime" { + objName := obj.Name() + w.Code(exprRuntimeBuiltin) + w.String(objName) + return + } + } + + if tv.IsType() { + w.p.fatalf(expr, "unexpected type expression %v", syntax.String(expr)) + } + + if tv.Value != nil { + w.Code(exprConst) + w.pos(expr) + typ := idealType(tv) + assert(typ != nil) + w.typ(typ) + w.Value(tv.Value) + return + } + + if _, isNil := obj.(*types2.Nil); isNil { + w.Code(exprZero) + w.pos(expr) + w.typ(tv.Type) + return + } + + // With shape types (and particular pointer shaping), we may have + // an expression of type "go.shape.*uint8", but need to reshape it + // to another shape-identical type to allow use in field + // selection, indexing, etc. + if typ := tv.Type; !tv.IsBuiltin() && !isTuple(typ) && !isUntyped(typ) { + w.Code(exprReshape) + w.typ(typ) + // fallthrough + } + } + + if obj != nil { + if targs.Len() != 0 { + obj := obj.(*types2.Func) + + w.Code(exprFuncInst) + w.pos(expr) + w.funcInst(obj, targs) + return + } + + if isGlobal(obj) { + w.Code(exprGlobal) + w.obj(obj, nil) + return + } + + obj := obj.(*types2.Var) + assert(!obj.IsField()) + + w.Code(exprLocal) + w.useLocal(expr.Pos(), obj) + return + } + + switch expr := expr.(type) { + default: + w.p.unexpected("expression", expr) + + case *syntax.CompositeLit: + w.Code(exprCompLit) + w.compLit(expr) + + case *syntax.FuncLit: + w.Code(exprFuncLit) + w.funcLit(expr) + + case *syntax.SelectorExpr: + sel, ok := w.p.info.Selections[expr] + assert(ok) + + switch sel.Kind() { + default: + w.p.fatalf(expr, "unexpected selection kind: %v", sel.Kind()) + + case types2.FieldVal: + w.Code(exprFieldVal) + w.expr(expr.X) + w.pos(expr) + w.selector(sel.Obj()) + + case types2.MethodVal: + w.Code(exprMethodVal) + typ := w.recvExpr(expr, sel) + w.pos(expr) + w.methodExpr(expr, typ, sel) + + case types2.MethodExpr: + w.Code(exprMethodExpr) + + tv := w.p.typeAndValue(expr.X) + assert(tv.IsType()) + + index := sel.Index() + implicits := index[:len(index)-1] + + typ := tv.Type + w.typ(typ) + + w.Len(len(implicits)) + for _, ix := range implicits { + w.Len(ix) + typ = deref2(typ).Underlying().(*types2.Struct).Field(ix).Type() + } + + recv := sel.Obj().(*types2.Func).Type().(*types2.Signature).Recv().Type() + if w.Bool(isPtrTo(typ, recv)) { // need deref + typ = recv + } else if w.Bool(isPtrTo(recv, typ)) { // need addr + typ = recv + } + + w.pos(expr) + w.methodExpr(expr, typ, sel) + } + + case *syntax.IndexExpr: + _ = w.p.typeOf(expr.Index) // ensure this is an index expression, not an instantiation + + xtyp := w.p.typeOf(expr.X) + + var keyType types2.Type + if mapType, ok := types2.CoreType(xtyp).(*types2.Map); ok { + keyType = mapType.Key() + } + + w.Code(exprIndex) + w.expr(expr.X) + w.pos(expr) + w.implicitConvExpr(keyType, expr.Index) + if keyType != nil { + w.rtype(xtyp) + } + + case *syntax.SliceExpr: + w.Code(exprSlice) + w.expr(expr.X) + w.pos(expr) + for _, n := range &expr.Index { + w.optExpr(n) + } + + case *syntax.AssertExpr: + iface := w.p.typeOf(expr.X) + + w.Code(exprAssert) + w.expr(expr.X) + w.pos(expr) + w.exprType(iface, expr.Type) + w.rtype(iface) + + case *syntax.Operation: + if expr.Y == nil { + w.Code(exprUnaryOp) + w.op(unOps[expr.Op]) + w.pos(expr) + w.expr(expr.X) + break + } + + var commonType types2.Type + switch expr.Op { + case syntax.Shl, syntax.Shr: + // ok: operands are allowed to have different types + default: + xtyp := w.p.typeOf(expr.X) + ytyp := w.p.typeOf(expr.Y) + switch { + case types2.AssignableTo(xtyp, ytyp): + commonType = ytyp + case types2.AssignableTo(ytyp, xtyp): + commonType = xtyp + default: + w.p.fatalf(expr, "failed to find common type between %v and %v", xtyp, ytyp) + } + } + + w.Code(exprBinaryOp) + w.op(binOps[expr.Op]) + w.implicitConvExpr(commonType, expr.X) + w.pos(expr) + w.implicitConvExpr(commonType, expr.Y) + + case *syntax.CallExpr: + tv := w.p.typeAndValue(expr.Fun) + if tv.IsType() { + assert(len(expr.ArgList) == 1) + assert(!expr.HasDots) + w.convertExpr(tv.Type, expr.ArgList[0], false) + break + } + + var rtype types2.Type + if tv.IsBuiltin() { + switch obj, _ := lookupObj(w.p, syntax.Unparen(expr.Fun)); obj.Name() { + case "make": + assert(len(expr.ArgList) >= 1) + assert(!expr.HasDots) + + w.Code(exprMake) + w.pos(expr) + w.exprType(nil, expr.ArgList[0]) + w.exprs(expr.ArgList[1:]) + + typ := w.p.typeOf(expr) + switch coreType := types2.CoreType(typ).(type) { + default: + w.p.fatalf(expr, "unexpected core type: %v", coreType) + case *types2.Chan: + w.rtype(typ) + case *types2.Map: + w.rtype(typ) + case *types2.Slice: + w.rtype(sliceElem(typ)) + } + + return + + case "new": + assert(len(expr.ArgList) == 1) + assert(!expr.HasDots) + + w.Code(exprNew) + w.pos(expr) + w.exprType(nil, expr.ArgList[0]) + return + + case "Sizeof": + assert(len(expr.ArgList) == 1) + assert(!expr.HasDots) + + w.Code(exprSizeof) + w.pos(expr) + w.typ(w.p.typeOf(expr.ArgList[0])) + return + + case "Alignof": + assert(len(expr.ArgList) == 1) + assert(!expr.HasDots) + + w.Code(exprAlignof) + w.pos(expr) + w.typ(w.p.typeOf(expr.ArgList[0])) + return + + case "Offsetof": + assert(len(expr.ArgList) == 1) + assert(!expr.HasDots) + selector := syntax.Unparen(expr.ArgList[0]).(*syntax.SelectorExpr) + index := w.p.info.Selections[selector].Index() + + w.Code(exprOffsetof) + w.pos(expr) + w.typ(deref2(w.p.typeOf(selector.X))) + w.Len(len(index) - 1) + for _, idx := range index { + w.Len(idx) + } + return + + case "append": + rtype = sliceElem(w.p.typeOf(expr)) + case "copy": + typ := w.p.typeOf(expr.ArgList[0]) + if tuple, ok := typ.(*types2.Tuple); ok { // "copy(g())" + typ = tuple.At(0).Type() + } + rtype = sliceElem(typ) + case "delete": + typ := w.p.typeOf(expr.ArgList[0]) + if tuple, ok := typ.(*types2.Tuple); ok { // "delete(g())" + typ = tuple.At(0).Type() + } + rtype = typ + case "Slice": + rtype = sliceElem(w.p.typeOf(expr)) + } + } + + writeFunExpr := func() { + fun := syntax.Unparen(expr.Fun) + + if selector, ok := fun.(*syntax.SelectorExpr); ok { + if sel, ok := w.p.info.Selections[selector]; ok && sel.Kind() == types2.MethodVal { + w.Bool(true) // method call + typ := w.recvExpr(selector, sel) + w.methodExpr(selector, typ, sel) + return + } + } + + w.Bool(false) // not a method call (i.e., normal function call) + + if obj, inst := lookupObj(w.p, fun); w.Bool(obj != nil && inst.TypeArgs.Len() != 0) { + obj := obj.(*types2.Func) + + w.pos(fun) + w.funcInst(obj, inst.TypeArgs) + return + } + + w.expr(fun) + } + + sigType := types2.CoreType(tv.Type).(*types2.Signature) + paramTypes := sigType.Params() + + w.Code(exprCall) + writeFunExpr() + w.pos(expr) + + paramType := func(i int) types2.Type { + if sigType.Variadic() && !expr.HasDots && i >= paramTypes.Len()-1 { + return paramTypes.At(paramTypes.Len() - 1).Type().(*types2.Slice).Elem() + } + return paramTypes.At(i).Type() + } + + w.multiExpr(expr, paramType, expr.ArgList) + w.Bool(expr.HasDots) + if rtype != nil { + w.rtype(rtype) + } + } +} + +func sliceElem(typ types2.Type) types2.Type { + return types2.CoreType(typ).(*types2.Slice).Elem() +} + +func (w *writer) optExpr(expr syntax.Expr) { + if w.Bool(expr != nil) { + w.expr(expr) + } +} + +// recvExpr writes out expr.X, but handles any implicit addressing, +// dereferencing, and field selections appropriate for the method +// selection. +func (w *writer) recvExpr(expr *syntax.SelectorExpr, sel *types2.Selection) types2.Type { + index := sel.Index() + implicits := index[:len(index)-1] + + w.Code(exprRecv) + w.expr(expr.X) + w.pos(expr) + w.Len(len(implicits)) + + typ := w.p.typeOf(expr.X) + for _, ix := range implicits { + typ = deref2(typ).Underlying().(*types2.Struct).Field(ix).Type() + w.Len(ix) + } + + recv := sel.Obj().(*types2.Func).Type().(*types2.Signature).Recv().Type() + if w.Bool(isPtrTo(typ, recv)) { // needs deref + typ = recv + } else if w.Bool(isPtrTo(recv, typ)) { // needs addr + typ = recv + } + + return typ +} + +// funcInst writes a reference to an instantiated function. +func (w *writer) funcInst(obj *types2.Func, targs *types2.TypeList) { + info := w.p.objInstIdx(obj, targs, w.dict) + + // Type arguments list contains derived types; we can emit a static + // call to the shaped function, but need to dynamically compute the + // runtime dictionary pointer. + if w.Bool(info.anyDerived()) { + w.Len(w.dict.subdictIdx(info)) + return + } + + // Type arguments list is statically known; we can emit a static + // call with a statically reference to the respective runtime + // dictionary. + w.objInfo(info) +} + +// methodExpr writes out a reference to the method selected by +// expr. sel should be the corresponding types2.Selection, and recv +// the type produced after any implicit addressing, dereferencing, and +// field selection. (Note: recv might differ from sel.Obj()'s receiver +// parameter in the case of interface types, and is needed for +// handling type parameter methods.) +func (w *writer) methodExpr(expr *syntax.SelectorExpr, recv types2.Type, sel *types2.Selection) { + fun := sel.Obj().(*types2.Func) + sig := fun.Type().(*types2.Signature) + + w.typ(recv) + w.typ(sig) + w.pos(expr) + w.selector(fun) + + // Method on a type parameter. These require an indirect call + // through the current function's runtime dictionary. + if typeParam, ok := types2.Unalias(recv).(*types2.TypeParam); w.Bool(ok) { + typeParamIdx := w.dict.typeParamIndex(typeParam) + methodInfo := w.p.selectorIdx(fun) + + w.Len(w.dict.typeParamMethodExprIdx(typeParamIdx, methodInfo)) + return + } + + if isInterface(recv) != isInterface(sig.Recv().Type()) { + w.p.fatalf(expr, "isInterface inconsistency: %v and %v", recv, sig.Recv().Type()) + } + + if !isInterface(recv) { + if named, ok := types2.Unalias(deref2(recv)).(*types2.Named); ok { + obj, targs := splitNamed(named) + info := w.p.objInstIdx(obj, targs, w.dict) + + // Method on a derived receiver type. These can be handled by a + // static call to the shaped method, but require dynamically + // looking up the appropriate dictionary argument in the current + // function's runtime dictionary. + if w.p.hasImplicitTypeParams(obj) || info.anyDerived() { + w.Bool(true) // dynamic subdictionary + w.Len(w.dict.subdictIdx(info)) + return + } + + // Method on a fully known receiver type. These can be handled + // by a static call to the shaped method, and with a static + // reference to the receiver type's dictionary. + if targs.Len() != 0 { + w.Bool(false) // no dynamic subdictionary + w.Bool(true) // static dictionary + w.objInfo(info) + return + } + } + } + + w.Bool(false) // no dynamic subdictionary + w.Bool(false) // no static dictionary +} + +// multiExpr writes a sequence of expressions, where the i'th value is +// implicitly converted to dstType(i). It also handles when exprs is a +// single, multi-valued expression (e.g., the multi-valued argument in +// an f(g()) call, or the RHS operand in a comma-ok assignment). +func (w *writer) multiExpr(pos poser, dstType func(int) types2.Type, exprs []syntax.Expr) { + w.Sync(pkgbits.SyncMultiExpr) + + if len(exprs) == 1 { + expr := exprs[0] + if tuple, ok := w.p.typeOf(expr).(*types2.Tuple); ok { + assert(tuple.Len() > 1) + w.Bool(true) // N:1 assignment + w.pos(pos) + w.expr(expr) + + w.Len(tuple.Len()) + for i := 0; i < tuple.Len(); i++ { + src := tuple.At(i).Type() + // TODO(mdempsky): Investigate not writing src here. I think + // the reader should be able to infer it from expr anyway. + w.typ(src) + if dst := dstType(i); w.Bool(dst != nil && !types2.Identical(src, dst)) { + if src == nil || dst == nil { + w.p.fatalf(pos, "src is %v, dst is %v", src, dst) + } + if !types2.AssignableTo(src, dst) { + w.p.fatalf(pos, "%v is not assignable to %v", src, dst) + } + w.typ(dst) + w.convRTTI(src, dst) + } + } + return + } + } + + w.Bool(false) // N:N assignment + w.Len(len(exprs)) + for i, expr := range exprs { + w.implicitConvExpr(dstType(i), expr) + } +} + +// implicitConvExpr is like expr, but if dst is non-nil and different +// from expr's type, then an implicit conversion operation is inserted +// at expr's position. +func (w *writer) implicitConvExpr(dst types2.Type, expr syntax.Expr) { + w.convertExpr(dst, expr, true) +} + +func (w *writer) convertExpr(dst types2.Type, expr syntax.Expr, implicit bool) { + src := w.p.typeOf(expr) + + // Omit implicit no-op conversions. + identical := dst == nil || types2.Identical(src, dst) + if implicit && identical { + w.expr(expr) + return + } + + if implicit && !types2.AssignableTo(src, dst) { + w.p.fatalf(expr, "%v is not assignable to %v", src, dst) + } + + w.Code(exprConvert) + w.Bool(implicit) + w.typ(dst) + w.pos(expr) + w.convRTTI(src, dst) + w.Bool(isTypeParam(dst)) + w.Bool(identical) + w.expr(expr) +} + +func (w *writer) compLit(lit *syntax.CompositeLit) { + typ := w.p.typeOf(lit) + + w.Sync(pkgbits.SyncCompLit) + w.pos(lit) + w.typ(typ) + + if ptr, ok := types2.CoreType(typ).(*types2.Pointer); ok { + typ = ptr.Elem() + } + var keyType, elemType types2.Type + var structType *types2.Struct + switch typ0 := typ; typ := types2.CoreType(typ).(type) { + default: + w.p.fatalf(lit, "unexpected composite literal type: %v", typ) + case *types2.Array: + elemType = typ.Elem() + case *types2.Map: + w.rtype(typ0) + keyType, elemType = typ.Key(), typ.Elem() + case *types2.Slice: + elemType = typ.Elem() + case *types2.Struct: + structType = typ + } + + w.Len(len(lit.ElemList)) + for i, elem := range lit.ElemList { + elemType := elemType + if structType != nil { + if kv, ok := elem.(*syntax.KeyValueExpr); ok { + // use position of expr.Key rather than of elem (which has position of ':') + w.pos(kv.Key) + i = fieldIndex(w.p.info, structType, kv.Key.(*syntax.Name)) + elem = kv.Value + } else { + w.pos(elem) + } + elemType = structType.Field(i).Type() + w.Len(i) + } else { + if kv, ok := elem.(*syntax.KeyValueExpr); w.Bool(ok) { + // use position of expr.Key rather than of elem (which has position of ':') + w.pos(kv.Key) + w.implicitConvExpr(keyType, kv.Key) + elem = kv.Value + } + } + w.pos(elem) + w.implicitConvExpr(elemType, elem) + } +} + +func (w *writer) funcLit(expr *syntax.FuncLit) { + sig := w.p.typeOf(expr).(*types2.Signature) + + body, closureVars := w.p.bodyIdx(sig, expr.Body, w.dict) + + w.Sync(pkgbits.SyncFuncLit) + w.pos(expr) + w.signature(sig) + + w.Len(len(closureVars)) + for _, cv := range closureVars { + w.pos(cv.pos) + w.useLocal(cv.pos, cv.var_) + } + + w.Reloc(pkgbits.RelocBody, body) +} + +type posVar struct { + pos syntax.Pos + var_ *types2.Var +} + +func (p posVar) String() string { + return p.pos.String() + ":" + p.var_.String() +} + +func (w *writer) exprList(expr syntax.Expr) { + w.Sync(pkgbits.SyncExprList) + w.exprs(syntax.UnpackListExpr(expr)) +} + +func (w *writer) exprs(exprs []syntax.Expr) { + w.Sync(pkgbits.SyncExprs) + w.Len(len(exprs)) + for _, expr := range exprs { + w.expr(expr) + } +} + +// rtype writes information so that the reader can construct an +// expression of type *runtime._type representing typ. +func (w *writer) rtype(typ types2.Type) { + typ = types2.Default(typ) + + info := w.p.typIdx(typ, w.dict) + w.rtypeInfo(info) +} + +func (w *writer) rtypeInfo(info typeInfo) { + w.Sync(pkgbits.SyncRType) + + if w.Bool(info.derived) { + w.Len(w.dict.rtypeIdx(info)) + } else { + w.typInfo(info) + } +} + +// varDictIndex writes out information for populating DictIndex for +// the ir.Name that will represent obj. +func (w *writer) varDictIndex(obj *types2.Var) { + info := w.p.typIdx(obj.Type(), w.dict) + if w.Bool(info.derived) { + w.Len(w.dict.rtypeIdx(info)) + } +} + +func isUntyped(typ types2.Type) bool { + basic, ok := types2.Unalias(typ).(*types2.Basic) + return ok && basic.Info()&types2.IsUntyped != 0 +} + +func isTuple(typ types2.Type) bool { + _, ok := typ.(*types2.Tuple) + return ok +} + +func (w *writer) itab(typ, iface types2.Type) { + typ = types2.Default(typ) + iface = types2.Default(iface) + + typInfo := w.p.typIdx(typ, w.dict) + ifaceInfo := w.p.typIdx(iface, w.dict) + + w.rtypeInfo(typInfo) + w.rtypeInfo(ifaceInfo) + if w.Bool(typInfo.derived || ifaceInfo.derived) { + w.Len(w.dict.itabIdx(typInfo, ifaceInfo)) + } +} + +// convRTTI writes information so that the reader can construct +// expressions for converting from src to dst. +func (w *writer) convRTTI(src, dst types2.Type) { + w.Sync(pkgbits.SyncConvRTTI) + w.itab(src, dst) +} + +func (w *writer) exprType(iface types2.Type, typ syntax.Expr) { + base.Assertf(iface == nil || isInterface(iface), "%v must be nil or an interface type", iface) + + tv := w.p.typeAndValue(typ) + assert(tv.IsType()) + + w.Sync(pkgbits.SyncExprType) + w.pos(typ) + + if w.Bool(iface != nil && !iface.Underlying().(*types2.Interface).Empty()) { + w.itab(tv.Type, iface) + } else { + w.rtype(tv.Type) + + info := w.p.typIdx(tv.Type, w.dict) + w.Bool(info.derived) + } +} + +// isInterface reports whether typ is known to be an interface type. +// If typ is a type parameter, then isInterface reports an internal +// compiler error instead. +func isInterface(typ types2.Type) bool { + if _, ok := types2.Unalias(typ).(*types2.TypeParam); ok { + // typ is a type parameter and may be instantiated as either a + // concrete or interface type, so the writer can't depend on + // knowing this. + base.Fatalf("%v is a type parameter", typ) + } + + _, ok := typ.Underlying().(*types2.Interface) + return ok +} + +// op writes an Op into the bitstream. +func (w *writer) op(op ir.Op) { + // TODO(mdempsky): Remove in favor of explicit codes? Would make + // export data more stable against internal refactorings, but low + // priority at the moment. + assert(op != 0) + w.Sync(pkgbits.SyncOp) + w.Len(int(op)) +} + +// @@@ Package initialization + +// Caution: This code is still clumsy, because toolstash -cmp is +// particularly sensitive to it. + +type typeDeclGen struct { + *syntax.TypeDecl + gen int + + // Implicit type parameters in scope at this type declaration. + implicits []*types2.TypeName +} + +type fileImports struct { + importedEmbed, importedUnsafe bool +} + +// declCollector is a visitor type that collects compiler-needed +// information about declarations that types2 doesn't track. +// +// Notably, it maps declared types and functions back to their +// declaration statement, keeps track of implicit type parameters, and +// assigns unique type "generation" numbers to local defined types. +type declCollector struct { + pw *pkgWriter + typegen *int + file *fileImports + withinFunc bool + implicits []*types2.TypeName +} + +func (c *declCollector) withTParams(obj types2.Object) *declCollector { + tparams := objTypeParams(obj) + n := tparams.Len() + if n == 0 { + return c + } + + copy := *c + copy.implicits = copy.implicits[:len(copy.implicits):len(copy.implicits)] + for i := 0; i < n; i++ { + copy.implicits = append(copy.implicits, tparams.At(i).Obj()) + } + return © +} + +func (c *declCollector) Visit(n syntax.Node) syntax.Visitor { + pw := c.pw + + switch n := n.(type) { + case *syntax.File: + pw.checkPragmas(n.Pragma, ir.GoBuildPragma, false) + + case *syntax.ImportDecl: + pw.checkPragmas(n.Pragma, 0, false) + + switch pw.info.PkgNameOf(n).Imported().Path() { + case "embed": + c.file.importedEmbed = true + case "unsafe": + c.file.importedUnsafe = true + } + + case *syntax.ConstDecl: + pw.checkPragmas(n.Pragma, 0, false) + + case *syntax.FuncDecl: + pw.checkPragmas(n.Pragma, funcPragmas, false) + + obj := pw.info.Defs[n.Name].(*types2.Func) + pw.funDecls[obj] = n + + return c.withTParams(obj) + + case *syntax.TypeDecl: + obj := pw.info.Defs[n.Name].(*types2.TypeName) + d := typeDeclGen{TypeDecl: n, implicits: c.implicits} + + if n.Alias { + pw.checkPragmas(n.Pragma, 0, false) + } else { + pw.checkPragmas(n.Pragma, 0, false) + + // Assign a unique ID to function-scoped defined types. + if c.withinFunc { + *c.typegen++ + d.gen = *c.typegen + } + } + + pw.typDecls[obj] = d + + // TODO(mdempsky): Omit? Not strictly necessary; only matters for + // type declarations within function literals within parameterized + // type declarations, but types2 the function literals will be + // constant folded away. + return c.withTParams(obj) + + case *syntax.VarDecl: + pw.checkPragmas(n.Pragma, 0, true) + + if p, ok := n.Pragma.(*pragmas); ok && len(p.Embeds) > 0 { + if err := checkEmbed(n, c.file.importedEmbed, c.withinFunc); err != nil { + pw.errorf(p.Embeds[0].Pos, "%s", err) + } + } + + case *syntax.BlockStmt: + if !c.withinFunc { + copy := *c + copy.withinFunc = true + return © + } + } + + return c +} + +func (pw *pkgWriter) collectDecls(noders []*noder) { + var typegen int + for _, p := range noders { + var file fileImports + + syntax.Walk(p.file, &declCollector{ + pw: pw, + typegen: &typegen, + file: &file, + }) + + pw.cgoPragmas = append(pw.cgoPragmas, p.pragcgobuf...) + + for _, l := range p.linknames { + if !file.importedUnsafe { + pw.errorf(l.pos, "//go:linkname only allowed in Go files that import \"unsafe\"") + continue + } + + switch obj := pw.curpkg.Scope().Lookup(l.local).(type) { + case *types2.Func, *types2.Var: + if _, ok := pw.linknames[obj]; !ok { + pw.linknames[obj] = l.remote + } else { + pw.errorf(l.pos, "duplicate //go:linkname for %s", l.local) + } + + default: + if types.AllowsGoVersion(1, 18) { + pw.errorf(l.pos, "//go:linkname must refer to declared function or variable") + } + } + } + } +} + +func (pw *pkgWriter) checkPragmas(p syntax.Pragma, allowed ir.PragmaFlag, embedOK bool) { + if p == nil { + return + } + pragma := p.(*pragmas) + + for _, pos := range pragma.Pos { + if pos.Flag&^allowed != 0 { + pw.errorf(pos.Pos, "misplaced compiler directive") + } + } + + if !embedOK { + for _, e := range pragma.Embeds { + pw.errorf(e.Pos, "misplaced go:embed directive") + } + } +} + +func (w *writer) pkgInit(noders []*noder) { + w.Len(len(w.p.cgoPragmas)) + for _, cgoPragma := range w.p.cgoPragmas { + w.Strings(cgoPragma) + } + + w.pkgInitOrder() + + w.Sync(pkgbits.SyncDecls) + for _, p := range noders { + for _, decl := range p.file.DeclList { + w.pkgDecl(decl) + } + } + w.Code(declEnd) + + w.Sync(pkgbits.SyncEOF) +} + +func (w *writer) pkgInitOrder() { + // TODO(mdempsky): Write as a function body instead? + w.Len(len(w.p.info.InitOrder)) + for _, init := range w.p.info.InitOrder { + w.Len(len(init.Lhs)) + for _, v := range init.Lhs { + w.obj(v, nil) + } + w.expr(init.Rhs) + } +} + +func (w *writer) pkgDecl(decl syntax.Decl) { + switch decl := decl.(type) { + default: + w.p.unexpected("declaration", decl) + + case *syntax.ImportDecl: + + case *syntax.ConstDecl: + w.Code(declOther) + w.pkgObjs(decl.NameList...) + + case *syntax.FuncDecl: + if decl.Name.Value == "_" { + break // skip blank functions + } + + obj := w.p.info.Defs[decl.Name].(*types2.Func) + sig := obj.Type().(*types2.Signature) + + if sig.RecvTypeParams() != nil || sig.TypeParams() != nil { + break // skip generic functions + } + + if recv := sig.Recv(); recv != nil { + w.Code(declMethod) + w.typ(recvBase(recv)) + w.selector(obj) + break + } + + w.Code(declFunc) + w.pkgObjs(decl.Name) + + case *syntax.TypeDecl: + if len(decl.TParamList) != 0 { + break // skip generic type decls + } + + if decl.Name.Value == "_" { + break // skip blank type decls + } + + name := w.p.info.Defs[decl.Name].(*types2.TypeName) + // Skip type declarations for interfaces that are only usable as + // type parameter bounds. + if iface, ok := name.Type().Underlying().(*types2.Interface); ok && !iface.IsMethodSet() { + break + } + + w.Code(declOther) + w.pkgObjs(decl.Name) + + case *syntax.VarDecl: + w.Code(declVar) + w.pkgObjs(decl.NameList...) + + var embeds []pragmaEmbed + if p, ok := decl.Pragma.(*pragmas); ok { + embeds = p.Embeds + } + w.Len(len(embeds)) + for _, embed := range embeds { + w.pos(embed.Pos) + w.Strings(embed.Patterns) + } + } +} + +func (w *writer) pkgObjs(names ...*syntax.Name) { + w.Sync(pkgbits.SyncDeclNames) + w.Len(len(names)) + + for _, name := range names { + obj, ok := w.p.info.Defs[name] + assert(ok) + + w.Sync(pkgbits.SyncDeclName) + w.obj(obj, nil) + } +} + +// @@@ Helpers + +// staticBool analyzes a boolean expression and reports whether it's +// always true (positive result), always false (negative result), or +// unknown (zero). +// +// It also simplifies the expression while preserving semantics, if +// possible. +func (pw *pkgWriter) staticBool(ep *syntax.Expr) int { + if val := pw.typeAndValue(*ep).Value; val != nil { + if constant.BoolVal(val) { + return +1 + } else { + return -1 + } + } + + if e, ok := (*ep).(*syntax.Operation); ok { + switch e.Op { + case syntax.Not: + return pw.staticBool(&e.X) + + case syntax.AndAnd: + x := pw.staticBool(&e.X) + if x < 0 { + *ep = e.X + return x + } + + y := pw.staticBool(&e.Y) + if x > 0 || y < 0 { + if pw.typeAndValue(e.X).Value != nil { + *ep = e.Y + } + return y + } + + case syntax.OrOr: + x := pw.staticBool(&e.X) + if x > 0 { + *ep = e.X + return x + } + + y := pw.staticBool(&e.Y) + if x < 0 || y > 0 { + if pw.typeAndValue(e.X).Value != nil { + *ep = e.Y + } + return y + } + } + } + + return 0 +} + +// hasImplicitTypeParams reports whether obj is a defined type with +// implicit type parameters (e.g., declared within a generic function +// or method). +func (pw *pkgWriter) hasImplicitTypeParams(obj *types2.TypeName) bool { + if obj.Pkg() == pw.curpkg { + decl, ok := pw.typDecls[obj] + assert(ok) + if len(decl.implicits) != 0 { + return true + } + } + return false +} + +// isDefinedType reports whether obj is a defined type. +func isDefinedType(obj types2.Object) bool { + if obj, ok := obj.(*types2.TypeName); ok { + return !obj.IsAlias() + } + return false +} + +// isGlobal reports whether obj was declared at package scope. +// +// Caveat: blank objects are not declared. +func isGlobal(obj types2.Object) bool { + return obj.Parent() == obj.Pkg().Scope() +} + +// lookupObj returns the object that expr refers to, if any. If expr +// is an explicit instantiation of a generic object, then the instance +// object is returned as well. +func lookupObj(p *pkgWriter, expr syntax.Expr) (obj types2.Object, inst types2.Instance) { + if index, ok := expr.(*syntax.IndexExpr); ok { + args := syntax.UnpackListExpr(index.Index) + if len(args) == 1 { + tv := p.typeAndValue(args[0]) + if tv.IsValue() { + return // normal index expression + } + } + + expr = index.X + } + + // Strip package qualifier, if present. + if sel, ok := expr.(*syntax.SelectorExpr); ok { + if !isPkgQual(p.info, sel) { + return // normal selector expression + } + expr = sel.Sel + } + + if name, ok := expr.(*syntax.Name); ok { + obj = p.info.Uses[name] + inst = p.info.Instances[name] + } + return +} + +// isPkgQual reports whether the given selector expression is a +// package-qualified identifier. +func isPkgQual(info *types2.Info, sel *syntax.SelectorExpr) bool { + if name, ok := sel.X.(*syntax.Name); ok { + _, isPkgName := info.Uses[name].(*types2.PkgName) + return isPkgName + } + return false +} + +// isNil reports whether expr is a (possibly parenthesized) reference +// to the predeclared nil value. +func isNil(p *pkgWriter, expr syntax.Expr) bool { + tv := p.typeAndValue(expr) + return tv.IsNil() +} + +// isBuiltin reports whether expr is a (possibly parenthesized) +// referenced to the specified built-in function. +func (pw *pkgWriter) isBuiltin(expr syntax.Expr, builtin string) bool { + if name, ok := syntax.Unparen(expr).(*syntax.Name); ok && name.Value == builtin { + return pw.typeAndValue(name).IsBuiltin() + } + return false +} + +// recvBase returns the base type for the given receiver parameter. +func recvBase(recv *types2.Var) *types2.Named { + typ := types2.Unalias(recv.Type()) + if ptr, ok := typ.(*types2.Pointer); ok { + typ = ptr.Elem() + } + return typ.(*types2.Named) +} + +// namesAsExpr returns a list of names as a syntax.Expr. +func namesAsExpr(names []*syntax.Name) syntax.Expr { + if len(names) == 1 { + return names[0] + } + + exprs := make([]syntax.Expr, len(names)) + for i, name := range names { + exprs[i] = name + } + return &syntax.ListExpr{ElemList: exprs} +} + +// fieldIndex returns the index of the struct field named by key. +func fieldIndex(info *types2.Info, str *types2.Struct, key *syntax.Name) int { + field := info.Uses[key].(*types2.Var) + + for i := 0; i < str.NumFields(); i++ { + if str.Field(i) == field { + return i + } + } + + panic(fmt.Sprintf("%s: %v is not a field of %v", key.Pos(), field, str)) +} + +// objTypeParams returns the type parameters on the given object. +func objTypeParams(obj types2.Object) *types2.TypeParamList { + switch obj := obj.(type) { + case *types2.Func: + sig := obj.Type().(*types2.Signature) + if sig.Recv() != nil { + return sig.RecvTypeParams() + } + return sig.TypeParams() + case *types2.TypeName: + if !obj.IsAlias() { + return obj.Type().(*types2.Named).TypeParams() + } + } + return nil +} + +// splitNamed decomposes a use of a defined type into its original +// type definition and the type arguments used to instantiate it. +func splitNamed(typ *types2.Named) (*types2.TypeName, *types2.TypeList) { + base.Assertf(typ.TypeParams().Len() == typ.TypeArgs().Len(), "use of uninstantiated type: %v", typ) + + orig := typ.Origin() + base.Assertf(orig.TypeArgs() == nil, "origin %v of %v has type arguments", orig, typ) + base.Assertf(typ.Obj() == orig.Obj(), "%v has object %v, but %v has object %v", typ, typ.Obj(), orig, orig.Obj()) + + return typ.Obj(), typ.TypeArgs() +} + +func asPragmaFlag(p syntax.Pragma) ir.PragmaFlag { + if p == nil { + return 0 + } + return p.(*pragmas).Flag +} + +func asWasmImport(p syntax.Pragma) *WasmImport { + if p == nil { + return nil + } + return p.(*pragmas).WasmImport +} + +// isPtrTo reports whether from is the type *to. +func isPtrTo(from, to types2.Type) bool { + ptr, ok := types2.Unalias(from).(*types2.Pointer) + return ok && types2.Identical(ptr.Elem(), to) +} + +// hasFallthrough reports whether stmts ends in a fallthrough +// statement. +func hasFallthrough(stmts []syntax.Stmt) bool { + last, ok := lastNonEmptyStmt(stmts).(*syntax.BranchStmt) + return ok && last.Tok == syntax.Fallthrough +} + +// lastNonEmptyStmt returns the last non-empty statement in list, if +// any. +func lastNonEmptyStmt(stmts []syntax.Stmt) syntax.Stmt { + for i := len(stmts) - 1; i >= 0; i-- { + stmt := stmts[i] + if _, ok := stmt.(*syntax.EmptyStmt); !ok { + return stmt + } + } + return nil +} + +// terminates reports whether stmt terminates normal control flow +// (i.e., does not merely advance to the following statement). +func (pw *pkgWriter) terminates(stmt syntax.Stmt) bool { + switch stmt := stmt.(type) { + case *syntax.BranchStmt: + if stmt.Tok == syntax.Goto { + return true + } + case *syntax.ReturnStmt: + return true + case *syntax.ExprStmt: + if call, ok := syntax.Unparen(stmt.X).(*syntax.CallExpr); ok { + if pw.isBuiltin(call.Fun, "panic") { + return true + } + } + + // The handling of BlockStmt here is approximate, but it serves to + // allow dead-code elimination for: + // + // if true { + // return x + // } + // unreachable + case *syntax.IfStmt: + cond := pw.staticBool(&stmt.Cond) + return (cond < 0 || pw.terminates(stmt.Then)) && (cond > 0 || pw.terminates(stmt.Else)) + case *syntax.BlockStmt: + return pw.terminates(lastNonEmptyStmt(stmt.List)) + } + + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/objw/objw.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/objw/objw.go new file mode 100644 index 0000000000000000000000000000000000000000..77744672c197a980e42b990414f4ccc66db86047 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/objw/objw.go @@ -0,0 +1,102 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package objw + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/bitvec" + "cmd/compile/internal/types" + "cmd/internal/obj" + "encoding/binary" +) + +// Uint8 writes an unsigned byte v into s at offset off, +// and returns the next unused offset (i.e., off+1). +func Uint8(s *obj.LSym, off int, v uint8) int { + return UintN(s, off, uint64(v), 1) +} + +func Uint16(s *obj.LSym, off int, v uint16) int { + return UintN(s, off, uint64(v), 2) +} + +func Uint32(s *obj.LSym, off int, v uint32) int { + return UintN(s, off, uint64(v), 4) +} + +func Uintptr(s *obj.LSym, off int, v uint64) int { + return UintN(s, off, v, types.PtrSize) +} + +// Uvarint writes a varint v into s at offset off, +// and returns the next unused offset. +func Uvarint(s *obj.LSym, off int, v uint64) int { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], v) + return int(s.WriteBytes(base.Ctxt, int64(off), buf[:n])) +} + +func Bool(s *obj.LSym, off int, v bool) int { + w := 0 + if v { + w = 1 + } + return UintN(s, off, uint64(w), 1) +} + +// UintN writes an unsigned integer v of size wid bytes into s at offset off, +// and returns the next unused offset. +func UintN(s *obj.LSym, off int, v uint64, wid int) int { + if off&(wid-1) != 0 { + base.Fatalf("duintxxLSym: misaligned: v=%d wid=%d off=%d", v, wid, off) + } + s.WriteInt(base.Ctxt, int64(off), wid, int64(v)) + return off + wid +} + +func SymPtr(s *obj.LSym, off int, x *obj.LSym, xoff int) int { + off = int(types.RoundUp(int64(off), int64(types.PtrSize))) + s.WriteAddr(base.Ctxt, int64(off), types.PtrSize, x, int64(xoff)) + off += types.PtrSize + return off +} + +func SymPtrWeak(s *obj.LSym, off int, x *obj.LSym, xoff int) int { + off = int(types.RoundUp(int64(off), int64(types.PtrSize))) + s.WriteWeakAddr(base.Ctxt, int64(off), types.PtrSize, x, int64(xoff)) + off += types.PtrSize + return off +} + +func SymPtrOff(s *obj.LSym, off int, x *obj.LSym) int { + s.WriteOff(base.Ctxt, int64(off), x, 0) + off += 4 + return off +} + +func SymPtrWeakOff(s *obj.LSym, off int, x *obj.LSym) int { + s.WriteWeakOff(base.Ctxt, int64(off), x, 0) + off += 4 + return off +} + +func Global(s *obj.LSym, width int32, flags int16) { + if flags&obj.LOCAL != 0 { + s.Set(obj.AttrLocal, true) + flags &^= obj.LOCAL + } + base.Ctxt.Globl(s, int64(width), int(flags)) +} + +// BitVec writes the contents of bv into s as sequence of bytes +// in little-endian order, and returns the next unused offset. +func BitVec(s *obj.LSym, off int, bv bitvec.BitVec) int { + // Runtime reads the bitmaps as byte arrays. Oblige. + for j := 0; int32(j) < bv.N; j += 8 { + word := bv.B[j/32] + off = Uint8(s, off, uint8(word>>(uint(j)%32))) + } + return off +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/objw/prog.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/objw/prog.go new file mode 100644 index 0000000000000000000000000000000000000000..84fb9967235ea0a6f9ae8f466365a40bfacad847 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/objw/prog.go @@ -0,0 +1,214 @@ +// Derived from Inferno utils/6c/txt.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6c/txt.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package objw + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/internal/obj" + "cmd/internal/src" + "internal/abi" +) + +var sharedProgArray = new([10000]obj.Prog) // *T instead of T to work around issue 19839 + +// NewProgs returns a new Progs for fn. +// worker indicates which of the backend workers will use the Progs. +func NewProgs(fn *ir.Func, worker int) *Progs { + pp := new(Progs) + if base.Ctxt.CanReuseProgs() { + sz := len(sharedProgArray) / base.Flag.LowerC + pp.Cache = sharedProgArray[sz*worker : sz*(worker+1)] + } + pp.CurFunc = fn + + // prime the pump + pp.Next = pp.NewProg() + pp.Clear(pp.Next) + + pp.Pos = fn.Pos() + pp.SetText(fn) + // PCDATA tables implicitly start with index -1. + pp.PrevLive = -1 + pp.NextLive = pp.PrevLive + pp.NextUnsafe = pp.PrevUnsafe + return pp +} + +// Progs accumulates Progs for a function and converts them into machine code. +type Progs struct { + Text *obj.Prog // ATEXT Prog for this function + Next *obj.Prog // next Prog + PC int64 // virtual PC; count of Progs + Pos src.XPos // position to use for new Progs + CurFunc *ir.Func // fn these Progs are for + Cache []obj.Prog // local progcache + CacheIndex int // first free element of progcache + + NextLive StackMapIndex // liveness index for the next Prog + PrevLive StackMapIndex // last emitted liveness index + + NextUnsafe bool // unsafe mark for the next Prog + PrevUnsafe bool // last emitted unsafe mark +} + +type StackMapIndex int + +// StackMapDontCare indicates that the stack map index at a Value +// doesn't matter. +// +// This is a sentinel value that should never be emitted to the PCDATA +// stream. We use -1000 because that's obviously never a valid stack +// index (but -1 is). +const StackMapDontCare StackMapIndex = -1000 + +func (s StackMapIndex) StackMapValid() bool { + return s != StackMapDontCare +} + +func (pp *Progs) NewProg() *obj.Prog { + var p *obj.Prog + if pp.CacheIndex < len(pp.Cache) { + p = &pp.Cache[pp.CacheIndex] + pp.CacheIndex++ + } else { + p = new(obj.Prog) + } + p.Ctxt = base.Ctxt + return p +} + +// Flush converts from pp to machine code. +func (pp *Progs) Flush() { + plist := &obj.Plist{Firstpc: pp.Text, Curfn: pp.CurFunc} + obj.Flushplist(base.Ctxt, plist, pp.NewProg) +} + +// Free clears pp and any associated resources. +func (pp *Progs) Free() { + if base.Ctxt.CanReuseProgs() { + // Clear progs to enable GC and avoid abuse. + s := pp.Cache[:pp.CacheIndex] + for i := range s { + s[i] = obj.Prog{} + } + } + // Clear pp to avoid abuse. + *pp = Progs{} +} + +// Prog adds a Prog with instruction As to pp. +func (pp *Progs) Prog(as obj.As) *obj.Prog { + if pp.NextLive != StackMapDontCare && pp.NextLive != pp.PrevLive { + // Emit stack map index change. + idx := pp.NextLive + pp.PrevLive = idx + p := pp.Prog(obj.APCDATA) + p.From.SetConst(abi.PCDATA_StackMapIndex) + p.To.SetConst(int64(idx)) + } + if pp.NextUnsafe != pp.PrevUnsafe { + // Emit unsafe-point marker. + pp.PrevUnsafe = pp.NextUnsafe + p := pp.Prog(obj.APCDATA) + p.From.SetConst(abi.PCDATA_UnsafePoint) + if pp.NextUnsafe { + p.To.SetConst(abi.UnsafePointUnsafe) + } else { + p.To.SetConst(abi.UnsafePointSafe) + } + } + + p := pp.Next + pp.Next = pp.NewProg() + pp.Clear(pp.Next) + p.Link = pp.Next + + if !pp.Pos.IsKnown() && base.Flag.K != 0 { + base.Warn("prog: unknown position (line 0)") + } + + p.As = as + p.Pos = pp.Pos + if pp.Pos.IsStmt() == src.PosIsStmt { + // Clear IsStmt for later Progs at this pos provided that as can be marked as a stmt + if LosesStmtMark(as) { + return p + } + pp.Pos = pp.Pos.WithNotStmt() + } + return p +} + +func (pp *Progs) Clear(p *obj.Prog) { + obj.Nopout(p) + p.As = obj.AEND + p.Pc = pp.PC + pp.PC++ +} + +func (pp *Progs) Append(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16, foffset int64, ttype obj.AddrType, treg int16, toffset int64) *obj.Prog { + q := pp.NewProg() + pp.Clear(q) + q.As = as + q.Pos = p.Pos + q.From.Type = ftype + q.From.Reg = freg + q.From.Offset = foffset + q.To.Type = ttype + q.To.Reg = treg + q.To.Offset = toffset + q.Link = p.Link + p.Link = q + return q +} + +func (pp *Progs) SetText(fn *ir.Func) { + if pp.Text != nil { + base.Fatalf("Progs.SetText called twice") + } + ptxt := pp.Prog(obj.ATEXT) + pp.Text = ptxt + + fn.LSym.Func().Text = ptxt + ptxt.From.Type = obj.TYPE_MEM + ptxt.From.Name = obj.NAME_EXTERN + ptxt.From.Sym = fn.LSym +} + +// LosesStmtMark reports whether a prog with op as loses its statement mark on the way to DWARF. +// The attributes from some opcodes are lost in translation. +// TODO: this is an artifact of how funcpctab combines information for instructions at a single PC. +// Should try to fix it there. +func LosesStmtMark(as obj.As) bool { + // is_stmt does not work for these; it DOES for ANOP even though that generates no code. + return as == obj.APCDATA || as == obj.AFUNCDATA +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/pgo/irgraph.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/pgo/irgraph.go new file mode 100644 index 0000000000000000000000000000000000000000..96485e33ab455029c67d9c736dc8f6ac0804a267 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/pgo/irgraph.go @@ -0,0 +1,603 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// A note on line numbers: when working with line numbers, we always use the +// binary-visible relative line number. i.e., the line number as adjusted by +// //line directives (ctxt.InnermostPos(ir.Node.Pos()).RelLine()). Use +// NodeLineOffset to compute line offsets. +// +// If you are thinking, "wait, doesn't that just make things more complex than +// using the real line number?", then you are 100% correct. Unfortunately, +// pprof profiles generated by the runtime always contain line numbers as +// adjusted by //line directives (because that is what we put in pclntab). Thus +// for the best behavior when attempting to match the source with the profile +// it makes sense to use the same line number space. +// +// Some of the effects of this to keep in mind: +// +// - For files without //line directives there is no impact, as RelLine() == +// Line(). +// - For functions entirely covered by the same //line directive (i.e., a +// directive before the function definition and no directives within the +// function), there should also be no impact, as line offsets within the +// function should be the same as the real line offsets. +// - Functions containing //line directives may be impacted. As fake line +// numbers need not be monotonic, we may compute negative line offsets. We +// should accept these and attempt to use them for best-effort matching, as +// these offsets should still match if the source is unchanged, and may +// continue to match with changed source depending on the impact of the +// changes on fake line numbers. +// - Functions containing //line directives may also contain duplicate lines, +// making it ambiguous which call the profile is referencing. This is a +// similar problem to multiple calls on a single real line, as we don't +// currently track column numbers. +// +// Long term it would be best to extend pprof profiles to include real line +// numbers. Until then, we have to live with these complexities. Luckily, +// //line directives that change line numbers in strange ways should be rare, +// and failing PGO matching on these files is not too big of a loss. + +package pgo + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/pgo/internal/graph" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "errors" + "fmt" + "internal/profile" + "os" + "sort" +) + +// IRGraph is a call graph with nodes pointing to IRs of functions and edges +// carrying weights and callsite information. +// +// Nodes for indirect calls may have missing IR (IRNode.AST == nil) if the node +// is not visible from this package (e.g., not in the transitive deps). Keeping +// these nodes allows determining the hottest edge from a call even if that +// callee is not available. +// +// TODO(prattmic): Consider merging this data structure with Graph. This is +// effectively a copy of Graph aggregated to line number and pointing to IR. +type IRGraph struct { + // Nodes of the graph. Each node represents a function, keyed by linker + // symbol name. + IRNodes map[string]*IRNode +} + +// IRNode represents a node (function) in the IRGraph. +type IRNode struct { + // Pointer to the IR of the Function represented by this node. + AST *ir.Func + // Linker symbol name of the Function represented by this node. + // Populated only if AST == nil. + LinkerSymbolName string + + // Set of out-edges in the callgraph. The map uniquely identifies each + // edge based on the callsite and callee, for fast lookup. + OutEdges map[NamedCallEdge]*IREdge +} + +// Name returns the symbol name of this function. +func (i *IRNode) Name() string { + if i.AST != nil { + return ir.LinkFuncName(i.AST) + } + return i.LinkerSymbolName +} + +// IREdge represents a call edge in the IRGraph with source, destination, +// weight, callsite, and line number information. +type IREdge struct { + // Source and destination of the edge in IRNode. + Src, Dst *IRNode + Weight int64 + CallSiteOffset int // Line offset from function start line. +} + +// NamedCallEdge identifies a call edge by linker symbol names and call site +// offset. +type NamedCallEdge struct { + CallerName string + CalleeName string + CallSiteOffset int // Line offset from function start line. +} + +// NamedEdgeMap contains all unique call edges in the profile and their +// edge weight. +type NamedEdgeMap struct { + Weight map[NamedCallEdge]int64 + + // ByWeight lists all keys in Weight, sorted by edge weight. + ByWeight []NamedCallEdge +} + +// CallSiteInfo captures call-site information and its caller/callee. +type CallSiteInfo struct { + LineOffset int // Line offset from function start line. + Caller *ir.Func + Callee *ir.Func +} + +// Profile contains the processed PGO profile and weighted call graph used for +// PGO optimizations. +type Profile struct { + // Aggregated edge weights across the profile. This helps us determine + // the percentage threshold for hot/cold partitioning. + TotalWeight int64 + + // NamedEdgeMap contains all unique call edges in the profile and their + // edge weight. + NamedEdgeMap NamedEdgeMap + + // WeightedCG represents the IRGraph built from profile, which we will + // update as part of inlining. + WeightedCG *IRGraph +} + +// New generates a profile-graph from the profile. +func New(profileFile string) (*Profile, error) { + f, err := os.Open(profileFile) + if err != nil { + return nil, fmt.Errorf("error opening profile: %w", err) + } + defer f.Close() + p, err := profile.Parse(f) + if errors.Is(err, profile.ErrNoData) { + // Treat a completely empty file the same as a profile with no + // samples: nothing to do. + return nil, nil + } else if err != nil { + return nil, fmt.Errorf("error parsing profile: %w", err) + } + + if len(p.Sample) == 0 { + // We accept empty profiles, but there is nothing to do. + return nil, nil + } + + valueIndex := -1 + for i, s := range p.SampleType { + // Samples count is the raw data collected, and CPU nanoseconds is just + // a scaled version of it, so either one we can find is fine. + if (s.Type == "samples" && s.Unit == "count") || + (s.Type == "cpu" && s.Unit == "nanoseconds") { + valueIndex = i + break + } + } + + if valueIndex == -1 { + return nil, fmt.Errorf(`profile does not contain a sample index with value/type "samples/count" or cpu/nanoseconds"`) + } + + g := graph.NewGraph(p, &graph.Options{ + SampleValue: func(v []int64) int64 { return v[valueIndex] }, + }) + + namedEdgeMap, totalWeight, err := createNamedEdgeMap(g) + if err != nil { + return nil, err + } + + if totalWeight == 0 { + return nil, nil // accept but ignore profile with no samples. + } + + // Create package-level call graph with weights from profile and IR. + wg := createIRGraph(namedEdgeMap) + + return &Profile{ + TotalWeight: totalWeight, + NamedEdgeMap: namedEdgeMap, + WeightedCG: wg, + }, nil +} + +// createNamedEdgeMap builds a map of callsite-callee edge weights from the +// profile-graph. +// +// Caller should ignore the profile if totalWeight == 0. +func createNamedEdgeMap(g *graph.Graph) (edgeMap NamedEdgeMap, totalWeight int64, err error) { + seenStartLine := false + + // Process graph and build various node and edge maps which will + // be consumed by AST walk. + weight := make(map[NamedCallEdge]int64) + for _, n := range g.Nodes { + seenStartLine = seenStartLine || n.Info.StartLine != 0 + + canonicalName := n.Info.Name + // Create the key to the nodeMapKey. + namedEdge := NamedCallEdge{ + CallerName: canonicalName, + CallSiteOffset: n.Info.Lineno - n.Info.StartLine, + } + + for _, e := range n.Out { + totalWeight += e.WeightValue() + namedEdge.CalleeName = e.Dest.Info.Name + // Create new entry or increment existing entry. + weight[namedEdge] += e.WeightValue() + } + } + + if totalWeight == 0 { + return NamedEdgeMap{}, 0, nil // accept but ignore profile with no samples. + } + + if !seenStartLine { + // TODO(prattmic): If Function.start_line is missing we could + // fall back to using absolute line numbers, which is better + // than nothing. + return NamedEdgeMap{}, 0, fmt.Errorf("profile missing Function.start_line data (Go version of profiled application too old? Go 1.20+ automatically adds this to profiles)") + } + + byWeight := make([]NamedCallEdge, 0, len(weight)) + for namedEdge := range weight { + byWeight = append(byWeight, namedEdge) + } + sort.Slice(byWeight, func(i, j int) bool { + ei, ej := byWeight[i], byWeight[j] + if wi, wj := weight[ei], weight[ej]; wi != wj { + return wi > wj // want larger weight first + } + // same weight, order by name/line number + if ei.CallerName != ej.CallerName { + return ei.CallerName < ej.CallerName + } + if ei.CalleeName != ej.CalleeName { + return ei.CalleeName < ej.CalleeName + } + return ei.CallSiteOffset < ej.CallSiteOffset + }) + + edgeMap = NamedEdgeMap{ + Weight: weight, + ByWeight: byWeight, + } + + return edgeMap, totalWeight, nil +} + +// initializeIRGraph builds the IRGraph by visiting all the ir.Func in decl list +// of a package. +func createIRGraph(namedEdgeMap NamedEdgeMap) *IRGraph { + g := &IRGraph{ + IRNodes: make(map[string]*IRNode), + } + + // Bottomup walk over the function to create IRGraph. + ir.VisitFuncsBottomUp(typecheck.Target.Funcs, func(list []*ir.Func, recursive bool) { + for _, fn := range list { + visitIR(fn, namedEdgeMap, g) + } + }) + + // Add additional edges for indirect calls. This must be done second so + // that IRNodes is fully populated (see the dummy node TODO in + // addIndirectEdges). + // + // TODO(prattmic): visitIR above populates the graph via direct calls + // discovered via the IR. addIndirectEdges populates the graph via + // calls discovered via the profile. This combination of opposite + // approaches is a bit awkward, particularly because direct calls are + // discoverable via the profile as well. Unify these into a single + // approach. + addIndirectEdges(g, namedEdgeMap) + + return g +} + +// visitIR traverses the body of each ir.Func adds edges to g from ir.Func to +// any called function in the body. +func visitIR(fn *ir.Func, namedEdgeMap NamedEdgeMap, g *IRGraph) { + name := ir.LinkFuncName(fn) + node, ok := g.IRNodes[name] + if !ok { + node = &IRNode{ + AST: fn, + } + g.IRNodes[name] = node + } + + // Recursively walk over the body of the function to create IRGraph edges. + createIRGraphEdge(fn, node, name, namedEdgeMap, g) +} + +// createIRGraphEdge traverses the nodes in the body of ir.Func and adds edges +// between the callernode which points to the ir.Func and the nodes in the +// body. +func createIRGraphEdge(fn *ir.Func, callernode *IRNode, name string, namedEdgeMap NamedEdgeMap, g *IRGraph) { + ir.VisitList(fn.Body, func(n ir.Node) { + switch n.Op() { + case ir.OCALLFUNC: + call := n.(*ir.CallExpr) + // Find the callee function from the call site and add the edge. + callee := DirectCallee(call.Fun) + if callee != nil { + addIREdge(callernode, name, n, callee, namedEdgeMap, g) + } + case ir.OCALLMETH: + call := n.(*ir.CallExpr) + // Find the callee method from the call site and add the edge. + callee := ir.MethodExprName(call.Fun).Func + addIREdge(callernode, name, n, callee, namedEdgeMap, g) + } + }) +} + +// NodeLineOffset returns the line offset of n in fn. +func NodeLineOffset(n ir.Node, fn *ir.Func) int { + // See "A note on line numbers" at the top of the file. + line := int(base.Ctxt.InnermostPos(n.Pos()).RelLine()) + startLine := int(base.Ctxt.InnermostPos(fn.Pos()).RelLine()) + return line - startLine +} + +// addIREdge adds an edge between caller and new node that points to `callee` +// based on the profile-graph and NodeMap. +func addIREdge(callerNode *IRNode, callerName string, call ir.Node, callee *ir.Func, namedEdgeMap NamedEdgeMap, g *IRGraph) { + calleeName := ir.LinkFuncName(callee) + calleeNode, ok := g.IRNodes[calleeName] + if !ok { + calleeNode = &IRNode{ + AST: callee, + } + g.IRNodes[calleeName] = calleeNode + } + + namedEdge := NamedCallEdge{ + CallerName: callerName, + CalleeName: calleeName, + CallSiteOffset: NodeLineOffset(call, callerNode.AST), + } + + // Add edge in the IRGraph from caller to callee. + edge := &IREdge{ + Src: callerNode, + Dst: calleeNode, + Weight: namedEdgeMap.Weight[namedEdge], + CallSiteOffset: namedEdge.CallSiteOffset, + } + + if callerNode.OutEdges == nil { + callerNode.OutEdges = make(map[NamedCallEdge]*IREdge) + } + callerNode.OutEdges[namedEdge] = edge +} + +// LookupFunc looks up a function or method in export data. It is expected to +// be overridden by package noder, to break a dependency cycle. +var LookupFunc = func(fullName string) (*ir.Func, error) { + base.Fatalf("pgo.LookupMethodFunc not overridden") + panic("unreachable") +} + +// addIndirectEdges adds indirect call edges found in the profile to the graph, +// to be used for devirtualization. +// +// N.B. despite the name, addIndirectEdges will add any edges discovered via +// the profile. We don't know for sure that they are indirect, but assume they +// are since direct calls would already be added. (e.g., direct calls that have +// been deleted from source since the profile was taken would be added here). +// +// TODO(prattmic): Devirtualization runs before inlining, so we can't devirtualize +// calls inside inlined call bodies. If we did add that, we'd need edges from +// inlined bodies as well. +func addIndirectEdges(g *IRGraph, namedEdgeMap NamedEdgeMap) { + // g.IRNodes is populated with the set of functions in the local + // package build by VisitIR. We want to filter for local functions + // below, but we also add unknown callees to IRNodes as we go. So make + // an initial copy of IRNodes to recall just the local functions. + localNodes := make(map[string]*IRNode, len(g.IRNodes)) + for k, v := range g.IRNodes { + localNodes[k] = v + } + + // N.B. We must consider edges in a stable order because export data + // lookup order (LookupMethodFunc, below) can impact the export data of + // this package, which must be stable across different invocations for + // reproducibility. + // + // The weight ordering of ByWeight is irrelevant, it just happens to be + // an ordered list of edges that is already available. + for _, key := range namedEdgeMap.ByWeight { + weight := namedEdgeMap.Weight[key] + // All callers in the local package build were added to IRNodes + // in VisitIR. If a caller isn't in the local package build we + // can skip adding edges, since we won't be devirtualizing in + // them anyway. This keeps the graph smaller. + callerNode, ok := localNodes[key.CallerName] + if !ok { + continue + } + + // Already handled this edge? + if _, ok := callerNode.OutEdges[key]; ok { + continue + } + + calleeNode, ok := g.IRNodes[key.CalleeName] + if !ok { + // IR is missing for this callee. VisitIR populates + // IRNodes with all functions discovered via local + // package function declarations and calls. This + // function may still be available from export data of + // a transitive dependency. + // + // TODO(prattmic): Parameterized types/functions are + // not supported. + // + // TODO(prattmic): This eager lookup during graph load + // is simple, but wasteful. We are likely to load many + // functions that we never need. We could delay load + // until we actually need the method in + // devirtualization. Instantiation of generic functions + // will likely need to be done at the devirtualization + // site, if at all. + fn, err := LookupFunc(key.CalleeName) + if err == nil { + if base.Debug.PGODebug >= 3 { + fmt.Printf("addIndirectEdges: %s found in export data\n", key.CalleeName) + } + calleeNode = &IRNode{AST: fn} + + // N.B. we could call createIRGraphEdge to add + // direct calls in this newly-imported + // function's body to the graph. Similarly, we + // could add to this function's queue to add + // indirect calls. However, those would be + // useless given the visit order of inlining, + // and the ordering of PGO devirtualization and + // inlining. This function can only be used as + // an inlined body. We will never do PGO + // devirtualization inside an inlined call. Nor + // will we perform inlining inside an inlined + // call. + } else { + // Still not found. Most likely this is because + // the callee isn't in the transitive deps of + // this package. + // + // Record this call anyway. If this is the hottest, + // then we want to skip devirtualization rather than + // devirtualizing to the second most common callee. + if base.Debug.PGODebug >= 3 { + fmt.Printf("addIndirectEdges: %s not found in export data: %v\n", key.CalleeName, err) + } + calleeNode = &IRNode{LinkerSymbolName: key.CalleeName} + } + + // Add dummy node back to IRNodes. We don't need this + // directly, but PrintWeightedCallGraphDOT uses these + // to print nodes. + g.IRNodes[key.CalleeName] = calleeNode + } + edge := &IREdge{ + Src: callerNode, + Dst: calleeNode, + Weight: weight, + CallSiteOffset: key.CallSiteOffset, + } + + if callerNode.OutEdges == nil { + callerNode.OutEdges = make(map[NamedCallEdge]*IREdge) + } + callerNode.OutEdges[key] = edge + } +} + +// WeightInPercentage converts profile weights to a percentage. +func WeightInPercentage(value int64, total int64) float64 { + return (float64(value) / float64(total)) * 100 +} + +// PrintWeightedCallGraphDOT prints IRGraph in DOT format. +func (p *Profile) PrintWeightedCallGraphDOT(edgeThreshold float64) { + fmt.Printf("\ndigraph G {\n") + fmt.Printf("forcelabels=true;\n") + + // List of functions in this package. + funcs := make(map[string]struct{}) + ir.VisitFuncsBottomUp(typecheck.Target.Funcs, func(list []*ir.Func, recursive bool) { + for _, f := range list { + name := ir.LinkFuncName(f) + funcs[name] = struct{}{} + } + }) + + // Determine nodes of DOT. + // + // Note that ir.Func may be nil for functions not visible from this + // package. + nodes := make(map[string]*ir.Func) + for name := range funcs { + if n, ok := p.WeightedCG.IRNodes[name]; ok { + for _, e := range n.OutEdges { + if _, ok := nodes[e.Src.Name()]; !ok { + nodes[e.Src.Name()] = e.Src.AST + } + if _, ok := nodes[e.Dst.Name()]; !ok { + nodes[e.Dst.Name()] = e.Dst.AST + } + } + if _, ok := nodes[n.Name()]; !ok { + nodes[n.Name()] = n.AST + } + } + } + + // Print nodes. + for name, ast := range nodes { + if _, ok := p.WeightedCG.IRNodes[name]; ok { + style := "solid" + if ast == nil { + style = "dashed" + } + + if ast != nil && ast.Inl != nil { + fmt.Printf("\"%v\" [color=black, style=%s, label=\"%v,inl_cost=%d\"];\n", name, style, name, ast.Inl.Cost) + } else { + fmt.Printf("\"%v\" [color=black, style=%s, label=\"%v\"];\n", name, style, name) + } + } + } + // Print edges. + ir.VisitFuncsBottomUp(typecheck.Target.Funcs, func(list []*ir.Func, recursive bool) { + for _, f := range list { + name := ir.LinkFuncName(f) + if n, ok := p.WeightedCG.IRNodes[name]; ok { + for _, e := range n.OutEdges { + style := "solid" + if e.Dst.AST == nil { + style = "dashed" + } + color := "black" + edgepercent := WeightInPercentage(e.Weight, p.TotalWeight) + if edgepercent > edgeThreshold { + color = "red" + } + + fmt.Printf("edge [color=%s, style=%s];\n", color, style) + fmt.Printf("\"%v\" -> \"%v\" [label=\"%.2f\"];\n", n.Name(), e.Dst.Name(), edgepercent) + } + } + } + }) + fmt.Printf("}\n") +} + +// DirectCallee takes a function-typed expression and returns the underlying +// function that it refers to if statically known. Otherwise, it returns nil. +// +// Equivalent to inline.inlCallee without calling CanInline on closures. +func DirectCallee(fn ir.Node) *ir.Func { + fn = ir.StaticValue(fn) + switch fn.Op() { + case ir.OMETHEXPR: + fn := fn.(*ir.SelectorExpr) + n := ir.MethodExprName(fn) + // Check that receiver type matches fn.X. + // TODO(mdempsky): Handle implicit dereference + // of pointer receiver argument? + if n == nil || !types.Identical(n.Type().Recv().Type, fn.X.Type()) { + return nil + } + return n.Func + case ir.ONAME: + fn := fn.(*ir.Name) + if fn.Class == ir.PFUNC { + return fn.Func + } + case ir.OCLOSURE: + fn := fn.(*ir.ClosureExpr) + c := fn.Func + return c + } + return nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/pkginit/init.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/pkginit/init.go new file mode 100644 index 0000000000000000000000000000000000000000..9278890b632ef6d55cf1c896ab2dc8eb3ff65274 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/pkginit/init.go @@ -0,0 +1,148 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkginit + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/noder" + "cmd/compile/internal/objw" + "cmd/compile/internal/staticinit" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/internal/src" +) + +// MakeTask makes an initialization record for the package, if necessary. +// See runtime/proc.go:initTask for its layout. +// The 3 tasks for initialization are: +// 1. Initialize all of the packages the current package depends on. +// 2. Initialize all the variables that have initializers. +// 3. Run any init functions. +func MakeTask() { + var deps []*obj.LSym // initTask records for packages the current package depends on + var fns []*obj.LSym // functions to call for package initialization + + // Find imported packages with init tasks. + for _, pkg := range typecheck.Target.Imports { + n, ok := pkg.Lookup(".inittask").Def.(*ir.Name) + if !ok { + continue + } + if n.Op() != ir.ONAME || n.Class != ir.PEXTERN { + base.Fatalf("bad inittask: %v", n) + } + deps = append(deps, n.Linksym()) + } + if base.Flag.ASan { + // Make an initialization function to call runtime.asanregisterglobals to register an + // array of instrumented global variables when -asan is enabled. An instrumented global + // variable is described by a structure. + // See the _asan_global structure declared in src/runtime/asan/asan.go. + // + // func init { + // var globals []_asan_global {...} + // asanregisterglobals(&globals[0], len(globals)) + // } + for _, n := range typecheck.Target.Externs { + if canInstrumentGlobal(n) { + name := n.Sym().Name + InstrumentGlobalsMap[name] = n + InstrumentGlobalsSlice = append(InstrumentGlobalsSlice, n) + } + } + ni := len(InstrumentGlobalsMap) + if ni != 0 { + // Make an init._ function. + pos := base.AutogeneratedPos + base.Pos = pos + + sym := noder.Renameinit() + fnInit := ir.NewFunc(pos, pos, sym, types.NewSignature(nil, nil, nil)) + typecheck.DeclFunc(fnInit) + + // Get an array of instrumented global variables. + globals := instrumentGlobals(fnInit) + + // Call runtime.asanregisterglobals function to poison redzones. + // runtime.asanregisterglobals(unsafe.Pointer(&globals[0]), ni) + asancall := ir.NewCallExpr(base.Pos, ir.OCALL, typecheck.LookupRuntime("asanregisterglobals"), nil) + asancall.Args.Append(typecheck.ConvNop(typecheck.NodAddr( + ir.NewIndexExpr(base.Pos, globals, ir.NewInt(base.Pos, 0))), types.Types[types.TUNSAFEPTR])) + asancall.Args.Append(typecheck.DefaultLit(ir.NewInt(base.Pos, int64(ni)), types.Types[types.TUINTPTR])) + + fnInit.Body.Append(asancall) + typecheck.FinishFuncBody() + ir.CurFunc = fnInit + typecheck.Stmts(fnInit.Body) + ir.CurFunc = nil + + typecheck.Target.Inits = append(typecheck.Target.Inits, fnInit) + } + } + + // Record user init functions. + for _, fn := range typecheck.Target.Inits { + if fn.Sym().Name == "init" { + // Synthetic init function for initialization of package-scope + // variables. We can use staticinit to optimize away static + // assignments. + s := staticinit.Schedule{ + Plans: make(map[ir.Node]*staticinit.Plan), + Temps: make(map[ir.Node]*ir.Name), + } + for _, n := range fn.Body { + s.StaticInit(n) + } + fn.Body = s.Out + ir.WithFunc(fn, func() { + typecheck.Stmts(fn.Body) + }) + + if len(fn.Body) == 0 { + fn.Body = []ir.Node{ir.NewBlockStmt(src.NoXPos, nil)} + } + } + + // Skip init functions with empty bodies. + if len(fn.Body) == 1 { + if stmt := fn.Body[0]; stmt.Op() == ir.OBLOCK && len(stmt.(*ir.BlockStmt).List) == 0 { + continue + } + } + fns = append(fns, fn.Nname.Linksym()) + } + + if len(deps) == 0 && len(fns) == 0 && types.LocalPkg.Path != "main" && types.LocalPkg.Path != "runtime" { + return // nothing to initialize + } + + // Make an .inittask structure. + sym := typecheck.Lookup(".inittask") + task := ir.NewNameAt(base.Pos, sym, types.Types[types.TUINT8]) // fake type + task.Class = ir.PEXTERN + sym.Def = task + lsym := task.Linksym() + ot := 0 + ot = objw.Uint32(lsym, ot, 0) // state: not initialized yet + ot = objw.Uint32(lsym, ot, uint32(len(fns))) + for _, f := range fns { + ot = objw.SymPtr(lsym, ot, f, 0) + } + + // Add relocations which tell the linker all of the packages + // that this package depends on (and thus, all of the packages + // that need to be initialized before this one). + for _, d := range deps { + r := obj.Addrel(lsym) + r.Type = objabi.R_INITORDER + r.Sym = d + } + // An initTask has pointers, but none into the Go heap. + // It's not quite read only, the state field must be modifiable. + objw.Global(lsym, int32(ot), obj.NOPTR) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/pkginit/initAsanGlobals.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/pkginit/initAsanGlobals.go new file mode 100644 index 0000000000000000000000000000000000000000..42db0eaf1bbd2abdab11aec6849e749823fbd62c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/pkginit/initAsanGlobals.go @@ -0,0 +1,236 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkginit + +import ( + "strings" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/src" +) + +// instrumentGlobals declares a global array of _asan_global structures and initializes it. +func instrumentGlobals(fn *ir.Func) *ir.Name { + asanGlobalStruct, asanLocationStruct, defStringstruct := createtypes() + lname := typecheck.Lookup + tconv := typecheck.ConvNop + // Make a global array of asanGlobalStruct type. + // var asanglobals []asanGlobalStruct + arraytype := types.NewArray(asanGlobalStruct, int64(len(InstrumentGlobalsMap))) + symG := lname(".asanglobals") + globals := ir.NewNameAt(base.Pos, symG, arraytype) + globals.Class = ir.PEXTERN + symG.Def = globals + typecheck.Target.Externs = append(typecheck.Target.Externs, globals) + // Make a global array of asanLocationStruct type. + // var asanL []asanLocationStruct + arraytype = types.NewArray(asanLocationStruct, int64(len(InstrumentGlobalsMap))) + symL := lname(".asanL") + asanlocation := ir.NewNameAt(base.Pos, symL, arraytype) + asanlocation.Class = ir.PEXTERN + symL.Def = asanlocation + typecheck.Target.Externs = append(typecheck.Target.Externs, asanlocation) + // Make three global string variables to pass the global name and module name + // and the name of the source file that defines it. + // var asanName string + // var asanModulename string + // var asanFilename string + symL = lname(".asanName") + asanName := ir.NewNameAt(base.Pos, symL, types.Types[types.TSTRING]) + asanName.Class = ir.PEXTERN + symL.Def = asanName + typecheck.Target.Externs = append(typecheck.Target.Externs, asanName) + + symL = lname(".asanModulename") + asanModulename := ir.NewNameAt(base.Pos, symL, types.Types[types.TSTRING]) + asanModulename.Class = ir.PEXTERN + symL.Def = asanModulename + typecheck.Target.Externs = append(typecheck.Target.Externs, asanModulename) + + symL = lname(".asanFilename") + asanFilename := ir.NewNameAt(base.Pos, symL, types.Types[types.TSTRING]) + asanFilename.Class = ir.PEXTERN + symL.Def = asanFilename + typecheck.Target.Externs = append(typecheck.Target.Externs, asanFilename) + + var init ir.Nodes + var c ir.Node + // globals[i].odrIndicator = 0 is the default, no need to set it explicitly here. + for i, n := range InstrumentGlobalsSlice { + setField := func(f string, val ir.Node, i int) { + r := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, + ir.NewIndexExpr(base.Pos, globals, ir.NewInt(base.Pos, int64(i))), lname(f)), val) + init.Append(typecheck.Stmt(r)) + } + // globals[i].beg = uintptr(unsafe.Pointer(&n)) + c = tconv(typecheck.NodAddr(n), types.Types[types.TUNSAFEPTR]) + c = tconv(c, types.Types[types.TUINTPTR]) + setField("beg", c, i) + // Assign globals[i].size. + g := n.(*ir.Name) + size := g.Type().Size() + c = typecheck.DefaultLit(ir.NewInt(base.Pos, size), types.Types[types.TUINTPTR]) + setField("size", c, i) + // Assign globals[i].sizeWithRedzone. + rzSize := GetRedzoneSizeForGlobal(size) + sizeWithRz := rzSize + size + c = typecheck.DefaultLit(ir.NewInt(base.Pos, sizeWithRz), types.Types[types.TUINTPTR]) + setField("sizeWithRedzone", c, i) + // The C string type is terminated by a null character "\0", Go should use three-digit + // octal "\000" or two-digit hexadecimal "\x00" to create null terminated string. + // asanName = symbol's linkname + "\000" + // globals[i].name = (*defString)(unsafe.Pointer(&asanName)).data + name := g.Linksym().Name + init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, asanName, ir.NewString(base.Pos, name+"\000")))) + c = tconv(typecheck.NodAddr(asanName), types.Types[types.TUNSAFEPTR]) + c = tconv(c, types.NewPtr(defStringstruct)) + c = ir.NewSelectorExpr(base.Pos, ir.ODOT, c, lname("data")) + setField("name", c, i) + + // Set the name of package being compiled as a unique identifier of a module. + // asanModulename = pkgName + "\000" + init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, asanModulename, ir.NewString(base.Pos, types.LocalPkg.Name+"\000")))) + c = tconv(typecheck.NodAddr(asanModulename), types.Types[types.TUNSAFEPTR]) + c = tconv(c, types.NewPtr(defStringstruct)) + c = ir.NewSelectorExpr(base.Pos, ir.ODOT, c, lname("data")) + setField("moduleName", c, i) + // Assign asanL[i].filename, asanL[i].line, asanL[i].column + // and assign globals[i].location = uintptr(unsafe.Pointer(&asanL[i])) + asanLi := ir.NewIndexExpr(base.Pos, asanlocation, ir.NewInt(base.Pos, int64(i))) + filename := ir.NewString(base.Pos, base.Ctxt.PosTable.Pos(n.Pos()).Filename()+"\000") + init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, asanFilename, filename))) + c = tconv(typecheck.NodAddr(asanFilename), types.Types[types.TUNSAFEPTR]) + c = tconv(c, types.NewPtr(defStringstruct)) + c = ir.NewSelectorExpr(base.Pos, ir.ODOT, c, lname("data")) + init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, asanLi, lname("filename")), c))) + line := ir.NewInt(base.Pos, int64(n.Pos().Line())) + init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, asanLi, lname("line")), line))) + col := ir.NewInt(base.Pos, int64(n.Pos().Col())) + init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, asanLi, lname("column")), col))) + c = tconv(typecheck.NodAddr(asanLi), types.Types[types.TUNSAFEPTR]) + c = tconv(c, types.Types[types.TUINTPTR]) + setField("sourceLocation", c, i) + } + fn.Body.Append(init...) + return globals +} + +// createtypes creates the asanGlobal, asanLocation and defString struct type. +// Go compiler does not refer to the C types, we represent the struct field +// by a uintptr, then use type conversion to make copies of the data. +// E.g., (*defString)(asanGlobal.name).data to C string. +// +// Keep in sync with src/runtime/asan/asan.go. +// type asanGlobal struct { +// beg uintptr +// size uintptr +// size_with_redzone uintptr +// name uintptr +// moduleName uintptr +// hasDynamicInit uintptr +// sourceLocation uintptr +// odrIndicator uintptr +// } +// +// type asanLocation struct { +// filename uintptr +// line int32 +// column int32 +// } +// +// defString is synthesized struct type meant to capture the underlying +// implementations of string. +// type defString struct { +// data uintptr +// len uintptr +// } + +func createtypes() (*types.Type, *types.Type, *types.Type) { + up := types.Types[types.TUINTPTR] + i32 := types.Types[types.TINT32] + fname := typecheck.Lookup + nxp := src.NoXPos + nfield := types.NewField + asanGlobal := types.NewStruct([]*types.Field{ + nfield(nxp, fname("beg"), up), + nfield(nxp, fname("size"), up), + nfield(nxp, fname("sizeWithRedzone"), up), + nfield(nxp, fname("name"), up), + nfield(nxp, fname("moduleName"), up), + nfield(nxp, fname("hasDynamicInit"), up), + nfield(nxp, fname("sourceLocation"), up), + nfield(nxp, fname("odrIndicator"), up), + }) + types.CalcSize(asanGlobal) + + asanLocation := types.NewStruct([]*types.Field{ + nfield(nxp, fname("filename"), up), + nfield(nxp, fname("line"), i32), + nfield(nxp, fname("column"), i32), + }) + types.CalcSize(asanLocation) + + defString := types.NewStruct([]*types.Field{ + types.NewField(nxp, fname("data"), up), + types.NewField(nxp, fname("len"), up), + }) + types.CalcSize(defString) + + return asanGlobal, asanLocation, defString +} + +// Calculate redzone for globals. +func GetRedzoneSizeForGlobal(size int64) int64 { + maxRZ := int64(1 << 18) + minRZ := int64(32) + redZone := (size / minRZ / 4) * minRZ + switch { + case redZone > maxRZ: + redZone = maxRZ + case redZone < minRZ: + redZone = minRZ + } + // Round up to multiple of minRZ. + if size%minRZ != 0 { + redZone += minRZ - (size % minRZ) + } + return redZone +} + +// InstrumentGlobalsMap contains only package-local (and unlinknamed from somewhere else) +// globals. +// And the key is the object name. For example, in package p, a global foo would be in this +// map as "foo". +// Consider range over maps is nondeterministic, make a slice to hold all the values in the +// InstrumentGlobalsMap and iterate over the InstrumentGlobalsSlice. +var InstrumentGlobalsMap = make(map[string]ir.Node) +var InstrumentGlobalsSlice = make([]ir.Node, 0, 0) + +func canInstrumentGlobal(g ir.Node) bool { + if g.Op() != ir.ONAME { + return false + } + n := g.(*ir.Name) + if n.Class == ir.PFUNC { + return false + } + if n.Sym().Pkg != types.LocalPkg { + return false + } + // Do not instrument any _cgo_ related global variables, because they are declared in C code. + if strings.Contains(n.Sym().Name, "cgo") { + return false + } + + // Do not instrument globals that are linknamed, because their home package will do the work. + if n.Sym().Linkname != "" { + return false + } + + return true +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ppc64/galign.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ppc64/galign.go new file mode 100644 index 0000000000000000000000000000000000000000..20fd8cec54f397fb9d05f490798b0c4f3965ae17 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ppc64/galign.go @@ -0,0 +1,29 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ppc64 + +import ( + "cmd/compile/internal/ssagen" + "cmd/internal/obj/ppc64" + "internal/buildcfg" +) + +func Init(arch *ssagen.ArchInfo) { + arch.LinkArch = &ppc64.Linkppc64 + if buildcfg.GOARCH == "ppc64le" { + arch.LinkArch = &ppc64.Linkppc64le + } + arch.REGSP = ppc64.REGSP + arch.MAXWIDTH = 1 << 50 + + arch.ZeroRange = zerorange + arch.Ginsnop = ginsnop + + arch.SSAMarkMoves = ssaMarkMoves + arch.SSAGenValue = ssaGenValue + arch.SSAGenBlock = ssaGenBlock + arch.LoadRegResult = loadRegResult + arch.SpillArgReg = spillArgReg +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ppc64/ggen.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ppc64/ggen.go new file mode 100644 index 0000000000000000000000000000000000000000..4c935cfc71f8d36b7c3b13b0041d9f624e930ecb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ppc64/ggen.go @@ -0,0 +1,54 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ppc64 + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/objw" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/obj/ppc64" +) + +func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { + if cnt == 0 { + return p + } + if cnt < int64(4*types.PtrSize) { + for i := int64(0); i < cnt; i += int64(types.PtrSize) { + p = pp.Append(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, base.Ctxt.Arch.FixedFrameSize+off+i) + } + } else if cnt <= int64(128*types.PtrSize) { + p = pp.Append(p, ppc64.AADD, obj.TYPE_CONST, 0, base.Ctxt.Arch.FixedFrameSize+off-8, obj.TYPE_REG, ppc64.REGRT1, 0) + p.Reg = ppc64.REGSP + p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ir.Syms.Duffzero + p.To.Offset = 4 * (128 - cnt/int64(types.PtrSize)) + } else { + p = pp.Append(p, ppc64.AMOVD, obj.TYPE_CONST, 0, base.Ctxt.Arch.FixedFrameSize+off-8, obj.TYPE_REG, ppc64.REGTMP, 0) + p = pp.Append(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0) + p.Reg = ppc64.REGSP + p = pp.Append(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0) + p = pp.Append(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0) + p.Reg = ppc64.REGRT1 + p = pp.Append(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(types.PtrSize)) + p1 := p + p = pp.Append(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0) + p = pp.Append(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0) + p.To.SetTarget(p1) + } + + return p +} + +func ginsnop(pp *objw.Progs) *obj.Prog { + // Generate the preferred hardware nop: ori 0,0,0 + p := pp.Prog(ppc64.AOR) + p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: 0} + p.To = obj.Addr{Type: obj.TYPE_REG, Reg: ppc64.REG_R0} + return p +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ppc64/opt.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ppc64/opt.go new file mode 100644 index 0000000000000000000000000000000000000000..4f81aa9c1eb2913043047b5ff134bac4237aa424 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ppc64/opt.go @@ -0,0 +1,12 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ppc64 + +// Many Power ISA arithmetic and logical instructions come in four +// standard variants. These bits let us map between variants. +const ( + V_CC = 1 << 0 // xCC (affect CR field 0 flags) + V_V = 1 << 1 // xV (affect SO and OV flags) +) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ppc64/ssa.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ppc64/ssa.go new file mode 100644 index 0000000000000000000000000000000000000000..d20a31e38a292bb14d8e607957d8c307ed95be63 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ppc64/ssa.go @@ -0,0 +1,2078 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ppc64 + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/logopt" + "cmd/compile/internal/objw" + "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/obj/ppc64" + "internal/buildcfg" + "math" + "strings" +) + +// ssaMarkMoves marks any MOVXconst ops that need to avoid clobbering flags. +func ssaMarkMoves(s *ssagen.State, b *ssa.Block) { + // flive := b.FlagsLiveAtEnd + // if b.Control != nil && b.Control.Type.IsFlags() { + // flive = true + // } + // for i := len(b.Values) - 1; i >= 0; i-- { + // v := b.Values[i] + // if flive && (v.Op == v.Op == ssa.OpPPC64MOVDconst) { + // // The "mark" is any non-nil Aux value. + // v.Aux = v + // } + // if v.Type.IsFlags() { + // flive = false + // } + // for _, a := range v.Args { + // if a.Type.IsFlags() { + // flive = true + // } + // } + // } +} + +// loadByType returns the load instruction of the given type. +func loadByType(t *types.Type) obj.As { + if t.IsFloat() { + switch t.Size() { + case 4: + return ppc64.AFMOVS + case 8: + return ppc64.AFMOVD + } + } else { + switch t.Size() { + case 1: + if t.IsSigned() { + return ppc64.AMOVB + } else { + return ppc64.AMOVBZ + } + case 2: + if t.IsSigned() { + return ppc64.AMOVH + } else { + return ppc64.AMOVHZ + } + case 4: + if t.IsSigned() { + return ppc64.AMOVW + } else { + return ppc64.AMOVWZ + } + case 8: + return ppc64.AMOVD + } + } + panic("bad load type") +} + +// storeByType returns the store instruction of the given type. +func storeByType(t *types.Type) obj.As { + if t.IsFloat() { + switch t.Size() { + case 4: + return ppc64.AFMOVS + case 8: + return ppc64.AFMOVD + } + } else { + switch t.Size() { + case 1: + return ppc64.AMOVB + case 2: + return ppc64.AMOVH + case 4: + return ppc64.AMOVW + case 8: + return ppc64.AMOVD + } + } + panic("bad store type") +} + +func ssaGenValue(s *ssagen.State, v *ssa.Value) { + switch v.Op { + case ssa.OpCopy: + t := v.Type + if t.IsMemory() { + return + } + x := v.Args[0].Reg() + y := v.Reg() + if x != y { + rt := obj.TYPE_REG + op := ppc64.AMOVD + + if t.IsFloat() { + op = ppc64.AFMOVD + } + p := s.Prog(op) + p.From.Type = rt + p.From.Reg = x + p.To.Type = rt + p.To.Reg = y + } + + case ssa.OpPPC64LoweredAtomicAnd8, + ssa.OpPPC64LoweredAtomicAnd32, + ssa.OpPPC64LoweredAtomicOr8, + ssa.OpPPC64LoweredAtomicOr32: + // LWSYNC + // LBAR/LWAR (Rarg0), Rtmp + // AND/OR Rarg1, Rtmp + // STBCCC/STWCCC Rtmp, (Rarg0) + // BNE -3(PC) + ld := ppc64.ALBAR + st := ppc64.ASTBCCC + if v.Op == ssa.OpPPC64LoweredAtomicAnd32 || v.Op == ssa.OpPPC64LoweredAtomicOr32 { + ld = ppc64.ALWAR + st = ppc64.ASTWCCC + } + r0 := v.Args[0].Reg() + r1 := v.Args[1].Reg() + // LWSYNC - Assuming shared data not write-through-required nor + // caching-inhibited. See Appendix B.2.2.2 in the ISA 2.07b. + plwsync := s.Prog(ppc64.ALWSYNC) + plwsync.To.Type = obj.TYPE_NONE + // LBAR or LWAR + p := s.Prog(ld) + p.From.Type = obj.TYPE_MEM + p.From.Reg = r0 + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REGTMP + // AND/OR reg1,out + p1 := s.Prog(v.Op.Asm()) + p1.From.Type = obj.TYPE_REG + p1.From.Reg = r1 + p1.To.Type = obj.TYPE_REG + p1.To.Reg = ppc64.REGTMP + // STBCCC or STWCCC + p2 := s.Prog(st) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = ppc64.REGTMP + p2.To.Type = obj.TYPE_MEM + p2.To.Reg = r0 + p2.RegTo2 = ppc64.REGTMP + // BNE retry + p3 := s.Prog(ppc64.ABNE) + p3.To.Type = obj.TYPE_BRANCH + p3.To.SetTarget(p) + + case ssa.OpPPC64LoweredAtomicAdd32, + ssa.OpPPC64LoweredAtomicAdd64: + // LWSYNC + // LDAR/LWAR (Rarg0), Rout + // ADD Rarg1, Rout + // STDCCC/STWCCC Rout, (Rarg0) + // BNE -3(PC) + // MOVW Rout,Rout (if Add32) + ld := ppc64.ALDAR + st := ppc64.ASTDCCC + if v.Op == ssa.OpPPC64LoweredAtomicAdd32 { + ld = ppc64.ALWAR + st = ppc64.ASTWCCC + } + r0 := v.Args[0].Reg() + r1 := v.Args[1].Reg() + out := v.Reg0() + // LWSYNC - Assuming shared data not write-through-required nor + // caching-inhibited. See Appendix B.2.2.2 in the ISA 2.07b. + plwsync := s.Prog(ppc64.ALWSYNC) + plwsync.To.Type = obj.TYPE_NONE + // LDAR or LWAR + p := s.Prog(ld) + p.From.Type = obj.TYPE_MEM + p.From.Reg = r0 + p.To.Type = obj.TYPE_REG + p.To.Reg = out + // ADD reg1,out + p1 := s.Prog(ppc64.AADD) + p1.From.Type = obj.TYPE_REG + p1.From.Reg = r1 + p1.To.Reg = out + p1.To.Type = obj.TYPE_REG + // STDCCC or STWCCC + p3 := s.Prog(st) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = out + p3.To.Type = obj.TYPE_MEM + p3.To.Reg = r0 + // BNE retry + p4 := s.Prog(ppc64.ABNE) + p4.To.Type = obj.TYPE_BRANCH + p4.To.SetTarget(p) + + // Ensure a 32 bit result + if v.Op == ssa.OpPPC64LoweredAtomicAdd32 { + p5 := s.Prog(ppc64.AMOVWZ) + p5.To.Type = obj.TYPE_REG + p5.To.Reg = out + p5.From.Type = obj.TYPE_REG + p5.From.Reg = out + } + + case ssa.OpPPC64LoweredAtomicExchange32, + ssa.OpPPC64LoweredAtomicExchange64: + // LWSYNC + // LDAR/LWAR (Rarg0), Rout + // STDCCC/STWCCC Rout, (Rarg0) + // BNE -2(PC) + // ISYNC + ld := ppc64.ALDAR + st := ppc64.ASTDCCC + if v.Op == ssa.OpPPC64LoweredAtomicExchange32 { + ld = ppc64.ALWAR + st = ppc64.ASTWCCC + } + r0 := v.Args[0].Reg() + r1 := v.Args[1].Reg() + out := v.Reg0() + // LWSYNC - Assuming shared data not write-through-required nor + // caching-inhibited. See Appendix B.2.2.2 in the ISA 2.07b. + plwsync := s.Prog(ppc64.ALWSYNC) + plwsync.To.Type = obj.TYPE_NONE + // LDAR or LWAR + p := s.Prog(ld) + p.From.Type = obj.TYPE_MEM + p.From.Reg = r0 + p.To.Type = obj.TYPE_REG + p.To.Reg = out + // STDCCC or STWCCC + p1 := s.Prog(st) + p1.From.Type = obj.TYPE_REG + p1.From.Reg = r1 + p1.To.Type = obj.TYPE_MEM + p1.To.Reg = r0 + // BNE retry + p2 := s.Prog(ppc64.ABNE) + p2.To.Type = obj.TYPE_BRANCH + p2.To.SetTarget(p) + // ISYNC + pisync := s.Prog(ppc64.AISYNC) + pisync.To.Type = obj.TYPE_NONE + + case ssa.OpPPC64LoweredAtomicLoad8, + ssa.OpPPC64LoweredAtomicLoad32, + ssa.OpPPC64LoweredAtomicLoad64, + ssa.OpPPC64LoweredAtomicLoadPtr: + // SYNC + // MOVB/MOVD/MOVW (Rarg0), Rout + // CMP Rout,Rout + // BNE 1(PC) + // ISYNC + ld := ppc64.AMOVD + cmp := ppc64.ACMP + switch v.Op { + case ssa.OpPPC64LoweredAtomicLoad8: + ld = ppc64.AMOVBZ + case ssa.OpPPC64LoweredAtomicLoad32: + ld = ppc64.AMOVWZ + cmp = ppc64.ACMPW + } + arg0 := v.Args[0].Reg() + out := v.Reg0() + // SYNC when AuxInt == 1; otherwise, load-acquire + if v.AuxInt == 1 { + psync := s.Prog(ppc64.ASYNC) + psync.To.Type = obj.TYPE_NONE + } + // Load + p := s.Prog(ld) + p.From.Type = obj.TYPE_MEM + p.From.Reg = arg0 + p.To.Type = obj.TYPE_REG + p.To.Reg = out + // CMP + p1 := s.Prog(cmp) + p1.From.Type = obj.TYPE_REG + p1.From.Reg = out + p1.To.Type = obj.TYPE_REG + p1.To.Reg = out + // BNE + p2 := s.Prog(ppc64.ABNE) + p2.To.Type = obj.TYPE_BRANCH + // ISYNC + pisync := s.Prog(ppc64.AISYNC) + pisync.To.Type = obj.TYPE_NONE + p2.To.SetTarget(pisync) + + case ssa.OpPPC64LoweredAtomicStore8, + ssa.OpPPC64LoweredAtomicStore32, + ssa.OpPPC64LoweredAtomicStore64: + // SYNC or LWSYNC + // MOVB/MOVW/MOVD arg1,(arg0) + st := ppc64.AMOVD + switch v.Op { + case ssa.OpPPC64LoweredAtomicStore8: + st = ppc64.AMOVB + case ssa.OpPPC64LoweredAtomicStore32: + st = ppc64.AMOVW + } + arg0 := v.Args[0].Reg() + arg1 := v.Args[1].Reg() + // If AuxInt == 0, LWSYNC (Store-Release), else SYNC + // SYNC + syncOp := ppc64.ASYNC + if v.AuxInt == 0 { + syncOp = ppc64.ALWSYNC + } + psync := s.Prog(syncOp) + psync.To.Type = obj.TYPE_NONE + // Store + p := s.Prog(st) + p.To.Type = obj.TYPE_MEM + p.To.Reg = arg0 + p.From.Type = obj.TYPE_REG + p.From.Reg = arg1 + + case ssa.OpPPC64LoweredAtomicCas64, + ssa.OpPPC64LoweredAtomicCas32: + // MOVD $0, Rout + // LWSYNC + // loop: + // LDAR (Rarg0), MutexHint, Rtmp + // CMP Rarg1, Rtmp + // BNE end + // STDCCC Rarg2, (Rarg0) + // BNE loop + // MOVD $1, Rout + // end: + // LWSYNC // Only for sequential consistency; not required in CasRel. + ld := ppc64.ALDAR + st := ppc64.ASTDCCC + cmp := ppc64.ACMP + if v.Op == ssa.OpPPC64LoweredAtomicCas32 { + ld = ppc64.ALWAR + st = ppc64.ASTWCCC + cmp = ppc64.ACMPW + } + r0 := v.Args[0].Reg() + r1 := v.Args[1].Reg() + r2 := v.Args[2].Reg() + out := v.Reg0() + // Initialize return value to false + p := s.Prog(ppc64.AMOVD) + p.From.Type = obj.TYPE_CONST + p.From.Offset = 0 + p.To.Type = obj.TYPE_REG + p.To.Reg = out + // LWSYNC - Assuming shared data not write-through-required nor + // caching-inhibited. See Appendix B.2.2.2 in the ISA 2.07b. + plwsync1 := s.Prog(ppc64.ALWSYNC) + plwsync1.To.Type = obj.TYPE_NONE + // LDAR or LWAR + p0 := s.Prog(ld) + p0.From.Type = obj.TYPE_MEM + p0.From.Reg = r0 + p0.To.Type = obj.TYPE_REG + p0.To.Reg = ppc64.REGTMP + // If it is a Compare-and-Swap-Release operation, set the EH field with + // the release hint. + if v.AuxInt == 0 { + p0.AddRestSourceConst(0) + } + // CMP reg1,reg2 + p1 := s.Prog(cmp) + p1.From.Type = obj.TYPE_REG + p1.From.Reg = r1 + p1.To.Reg = ppc64.REGTMP + p1.To.Type = obj.TYPE_REG + // BNE done with return value = false + p2 := s.Prog(ppc64.ABNE) + p2.To.Type = obj.TYPE_BRANCH + // STDCCC or STWCCC + p3 := s.Prog(st) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = r2 + p3.To.Type = obj.TYPE_MEM + p3.To.Reg = r0 + // BNE retry + p4 := s.Prog(ppc64.ABNE) + p4.To.Type = obj.TYPE_BRANCH + p4.To.SetTarget(p0) + // return value true + p5 := s.Prog(ppc64.AMOVD) + p5.From.Type = obj.TYPE_CONST + p5.From.Offset = 1 + p5.To.Type = obj.TYPE_REG + p5.To.Reg = out + // LWSYNC - Assuming shared data not write-through-required nor + // caching-inhibited. See Appendix B.2.1.1 in the ISA 2.07b. + // If the operation is a CAS-Release, then synchronization is not necessary. + if v.AuxInt != 0 { + plwsync2 := s.Prog(ppc64.ALWSYNC) + plwsync2.To.Type = obj.TYPE_NONE + p2.To.SetTarget(plwsync2) + } else { + // done (label) + p6 := s.Prog(obj.ANOP) + p2.To.SetTarget(p6) + } + + case ssa.OpPPC64LoweredPubBarrier: + // LWSYNC + s.Prog(v.Op.Asm()) + + case ssa.OpPPC64LoweredGetClosurePtr: + // Closure pointer is R11 (already) + ssagen.CheckLoweredGetClosurePtr(v) + + case ssa.OpPPC64LoweredGetCallerSP: + // caller's SP is FixedFrameSize below the address of the first arg + p := s.Prog(ppc64.AMOVD) + p.From.Type = obj.TYPE_ADDR + p.From.Offset = -base.Ctxt.Arch.FixedFrameSize + p.From.Name = obj.NAME_PARAM + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.OpPPC64LoweredGetCallerPC: + p := s.Prog(obj.AGETCALLERPC) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.OpPPC64LoweredRound32F, ssa.OpPPC64LoweredRound64F: + // input is already rounded + + case ssa.OpLoadReg: + loadOp := loadByType(v.Type) + p := s.Prog(loadOp) + ssagen.AddrAuto(&p.From, v.Args[0]) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.OpStoreReg: + storeOp := storeByType(v.Type) + p := s.Prog(storeOp) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + ssagen.AddrAuto(&p.To, v) + + case ssa.OpArgIntReg, ssa.OpArgFloatReg: + // The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill + // The loop only runs once. + for _, a := range v.Block.Func.RegArgs { + // Pass the spill/unspill information along to the assembler, offset by size of + // the saved LR slot. + addr := ssagen.SpillSlotAddr(a, ppc64.REGSP, base.Ctxt.Arch.FixedFrameSize) + s.FuncInfo().AddSpill( + obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type), Spill: storeByType(a.Type)}) + } + v.Block.Func.RegArgs = nil + + ssagen.CheckArgReg(v) + + case ssa.OpPPC64DIVD: + // For now, + // + // cmp arg1, -1 + // be ahead + // v = arg0 / arg1 + // b over + // ahead: v = - arg0 + // over: nop + r := v.Reg() + r0 := v.Args[0].Reg() + r1 := v.Args[1].Reg() + + p := s.Prog(ppc64.ACMP) + p.From.Type = obj.TYPE_REG + p.From.Reg = r1 + p.To.Type = obj.TYPE_CONST + p.To.Offset = -1 + + pbahead := s.Prog(ppc64.ABEQ) + pbahead.To.Type = obj.TYPE_BRANCH + + p = s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r1 + p.Reg = r0 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + + pbover := s.Prog(obj.AJMP) + pbover.To.Type = obj.TYPE_BRANCH + + p = s.Prog(ppc64.ANEG) + p.To.Type = obj.TYPE_REG + p.To.Reg = r + p.From.Type = obj.TYPE_REG + p.From.Reg = r0 + pbahead.To.SetTarget(p) + + p = s.Prog(obj.ANOP) + pbover.To.SetTarget(p) + + case ssa.OpPPC64DIVW: + // word-width version of above + r := v.Reg() + r0 := v.Args[0].Reg() + r1 := v.Args[1].Reg() + + p := s.Prog(ppc64.ACMPW) + p.From.Type = obj.TYPE_REG + p.From.Reg = r1 + p.To.Type = obj.TYPE_CONST + p.To.Offset = -1 + + pbahead := s.Prog(ppc64.ABEQ) + pbahead.To.Type = obj.TYPE_BRANCH + + p = s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r1 + p.Reg = r0 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + + pbover := s.Prog(obj.AJMP) + pbover.To.Type = obj.TYPE_BRANCH + + p = s.Prog(ppc64.ANEG) + p.To.Type = obj.TYPE_REG + p.To.Reg = r + p.From.Type = obj.TYPE_REG + p.From.Reg = r0 + pbahead.To.SetTarget(p) + + p = s.Prog(obj.ANOP) + pbover.To.SetTarget(p) + + case ssa.OpPPC64CLRLSLWI: + r := v.Reg() + r1 := v.Args[0].Reg() + shifts := v.AuxInt + p := s.Prog(v.Op.Asm()) + // clrlslwi ra,rs,mb,sh will become rlwinm ra,rs,sh,mb-sh,31-sh as described in ISA + p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftmb(shifts)} + p.AddRestSourceConst(ssa.GetPPC64Shiftsh(shifts)) + p.Reg = r1 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + + case ssa.OpPPC64CLRLSLDI: + r := v.Reg() + r1 := v.Args[0].Reg() + shifts := v.AuxInt + p := s.Prog(v.Op.Asm()) + // clrlsldi ra,rs,mb,sh will become rldic ra,rs,sh,mb-sh + p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftmb(shifts)} + p.AddRestSourceConst(ssa.GetPPC64Shiftsh(shifts)) + p.Reg = r1 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + + case ssa.OpPPC64ADD, ssa.OpPPC64FADD, ssa.OpPPC64FADDS, ssa.OpPPC64SUB, ssa.OpPPC64FSUB, ssa.OpPPC64FSUBS, + ssa.OpPPC64MULLD, ssa.OpPPC64MULLW, ssa.OpPPC64DIVDU, ssa.OpPPC64DIVWU, + ssa.OpPPC64SRAD, ssa.OpPPC64SRAW, ssa.OpPPC64SRD, ssa.OpPPC64SRW, ssa.OpPPC64SLD, ssa.OpPPC64SLW, + ssa.OpPPC64ROTL, ssa.OpPPC64ROTLW, + ssa.OpPPC64MULHD, ssa.OpPPC64MULHW, ssa.OpPPC64MULHDU, ssa.OpPPC64MULHWU, + ssa.OpPPC64FMUL, ssa.OpPPC64FMULS, ssa.OpPPC64FDIV, ssa.OpPPC64FDIVS, ssa.OpPPC64FCPSGN, + ssa.OpPPC64AND, ssa.OpPPC64OR, ssa.OpPPC64ANDN, ssa.OpPPC64ORN, ssa.OpPPC64NOR, ssa.OpPPC64XOR, ssa.OpPPC64EQV, + ssa.OpPPC64MODUD, ssa.OpPPC64MODSD, ssa.OpPPC64MODUW, ssa.OpPPC64MODSW: + r := v.Reg() + r1 := v.Args[0].Reg() + r2 := v.Args[1].Reg() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r2 + p.Reg = r1 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + + case ssa.OpPPC64ADDCC, ssa.OpPPC64ANDCC, ssa.OpPPC64SUBCC, ssa.OpPPC64ORCC, ssa.OpPPC64XORCC, ssa.OpPPC64NORCC, + ssa.OpPPC64ANDNCC: + r1 := v.Args[0].Reg() + r2 := v.Args[1].Reg() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r2 + p.Reg = r1 + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + + case ssa.OpPPC64NEGCC, ssa.OpPPC64CNTLZDCC: + p := s.Prog(v.Op.Asm()) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + + case ssa.OpPPC64ROTLconst, ssa.OpPPC64ROTLWconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + // Auxint holds encoded rotate + mask + case ssa.OpPPC64RLWINM, ssa.OpPPC64RLWMI: + sh, mb, me, _ := ssa.DecodePPC64RotateMask(v.AuxInt) + p := s.Prog(v.Op.Asm()) + p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()} + p.Reg = v.Args[0].Reg() + p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(sh)} + p.AddRestSourceArgs([]obj.Addr{{Type: obj.TYPE_CONST, Offset: mb}, {Type: obj.TYPE_CONST, Offset: me}}) + // Auxint holds mask + + case ssa.OpPPC64RLDICL, ssa.OpPPC64RLDICR: + sh, mb, me, _ := ssa.DecodePPC64RotateMask(v.AuxInt) + p := s.Prog(v.Op.Asm()) + p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: sh} + switch v.Op { + case ssa.OpPPC64RLDICL: + p.AddRestSourceConst(mb) + case ssa.OpPPC64RLDICR: + p.AddRestSourceConst(me) + } + p.Reg = v.Args[0].Reg() + p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()} + + case ssa.OpPPC64RLWNM: + _, mb, me, _ := ssa.DecodePPC64RotateMask(v.AuxInt) + p := s.Prog(v.Op.Asm()) + p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()} + p.Reg = v.Args[0].Reg() + p.From = obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[1].Reg()} + p.AddRestSourceArgs([]obj.Addr{{Type: obj.TYPE_CONST, Offset: mb}, {Type: obj.TYPE_CONST, Offset: me}}) + + case ssa.OpPPC64MADDLD: + r := v.Reg() + r1 := v.Args[0].Reg() + r2 := v.Args[1].Reg() + r3 := v.Args[2].Reg() + // r = r1*r2 ± r3 + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r1 + p.Reg = r2 + p.AddRestSourceReg(r3) + p.To.Type = obj.TYPE_REG + p.To.Reg = r + + case ssa.OpPPC64FMADD, ssa.OpPPC64FMADDS, ssa.OpPPC64FMSUB, ssa.OpPPC64FMSUBS: + r := v.Reg() + r1 := v.Args[0].Reg() + r2 := v.Args[1].Reg() + r3 := v.Args[2].Reg() + // r = r1*r2 ± r3 + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r1 + p.Reg = r3 + p.AddRestSourceReg(r2) + p.To.Type = obj.TYPE_REG + p.To.Reg = r + + case ssa.OpPPC64NEG, ssa.OpPPC64FNEG, ssa.OpPPC64FSQRT, ssa.OpPPC64FSQRTS, ssa.OpPPC64FFLOOR, ssa.OpPPC64FTRUNC, ssa.OpPPC64FCEIL, + ssa.OpPPC64FCTIDZ, ssa.OpPPC64FCTIWZ, ssa.OpPPC64FCFID, ssa.OpPPC64FCFIDS, ssa.OpPPC64FRSP, ssa.OpPPC64CNTLZD, ssa.OpPPC64CNTLZW, + ssa.OpPPC64POPCNTD, ssa.OpPPC64POPCNTW, ssa.OpPPC64POPCNTB, ssa.OpPPC64MFVSRD, ssa.OpPPC64MTVSRD, ssa.OpPPC64FABS, ssa.OpPPC64FNABS, + ssa.OpPPC64FROUND, ssa.OpPPC64CNTTZW, ssa.OpPPC64CNTTZD, ssa.OpPPC64BRH, ssa.OpPPC64BRW, ssa.OpPPC64BRD: + r := v.Reg() + p := s.Prog(v.Op.Asm()) + p.To.Type = obj.TYPE_REG + p.To.Reg = r + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + + case ssa.OpPPC64ADDconst, ssa.OpPPC64ORconst, ssa.OpPPC64XORconst, + ssa.OpPPC64SRADconst, ssa.OpPPC64SRAWconst, ssa.OpPPC64SRDconst, ssa.OpPPC64SRWconst, + ssa.OpPPC64SLDconst, ssa.OpPPC64SLWconst, ssa.OpPPC64EXTSWSLconst, ssa.OpPPC64MULLWconst, ssa.OpPPC64MULLDconst: + p := s.Prog(v.Op.Asm()) + p.Reg = v.Args[0].Reg() + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.OpPPC64ADDC, ssa.OpPPC64ADDE, ssa.OpPPC64SUBC, ssa.OpPPC64SUBE: + r := v.Reg0() // CA is the first, implied argument. + r1 := v.Args[0].Reg() + r2 := v.Args[1].Reg() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r2 + p.Reg = r1 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + + case ssa.OpPPC64ADDZEzero, ssa.OpPPC64SUBZEzero: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_R0 + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.OpPPC64ADDCconst: + p := s.Prog(v.Op.Asm()) + p.Reg = v.Args[0].Reg() + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + // Output is a pair, the second is the CA, which is implied. + p.To.Reg = v.Reg0() + + case ssa.OpPPC64SUBCconst: + p := s.Prog(v.Op.Asm()) + p.AddRestSourceConst(v.AuxInt) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + + case ssa.OpPPC64SUBFCconst: + p := s.Prog(v.Op.Asm()) + p.AddRestSourceConst(v.AuxInt) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.OpPPC64ADDCCconst, ssa.OpPPC64ANDCCconst: + p := s.Prog(v.Op.Asm()) + p.Reg = v.Args[0].Reg() + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + + case ssa.OpPPC64MOVDaddr: + switch v.Aux.(type) { + default: + v.Fatalf("aux in MOVDaddr is of unknown type %T", v.Aux) + case nil: + // If aux offset and aux int are both 0, and the same + // input and output regs are used, no instruction + // needs to be generated, since it would just be + // addi rx, rx, 0. + if v.AuxInt != 0 || v.Args[0].Reg() != v.Reg() { + p := s.Prog(ppc64.AMOVD) + p.From.Type = obj.TYPE_ADDR + p.From.Reg = v.Args[0].Reg() + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + } + + case *obj.LSym, ir.Node: + p := s.Prog(ppc64.AMOVD) + p.From.Type = obj.TYPE_ADDR + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + ssagen.AddAux(&p.From, v) + + } + + case ssa.OpPPC64MOVDconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.OpPPC64FMOVDconst, ssa.OpPPC64FMOVSconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_FCONST + p.From.Val = math.Float64frombits(uint64(v.AuxInt)) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.OpPPC64FCMPU, ssa.OpPPC64CMP, ssa.OpPPC64CMPW, ssa.OpPPC64CMPU, ssa.OpPPC64CMPWU: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Args[1].Reg() + + case ssa.OpPPC64CMPconst, ssa.OpPPC64CMPUconst, ssa.OpPPC64CMPWconst, ssa.OpPPC64CMPWUconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_CONST + p.To.Offset = v.AuxInt + + case ssa.OpPPC64MOVBreg, ssa.OpPPC64MOVBZreg, ssa.OpPPC64MOVHreg, ssa.OpPPC64MOVHZreg, ssa.OpPPC64MOVWreg, ssa.OpPPC64MOVWZreg: + // Shift in register to required size + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Reg = v.Reg() + p.To.Type = obj.TYPE_REG + + case ssa.OpPPC64MOVDload, ssa.OpPPC64MOVWload: + + // MOVDload and MOVWload are DS form instructions that are restricted to + // offsets that are a multiple of 4. If the offset is not a multiple of 4, + // then the address of the symbol to be loaded is computed (base + offset) + // and used as the new base register and the offset field in the instruction + // can be set to zero. + + // This same problem can happen with gostrings since the final offset is not + // known yet, but could be unaligned after the relocation is resolved. + // So gostrings are handled the same way. + + // This allows the MOVDload and MOVWload to be generated in more cases and + // eliminates some offset and alignment checking in the rules file. + + fromAddr := obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[0].Reg()} + ssagen.AddAux(&fromAddr, v) + + genAddr := false + + switch fromAddr.Name { + case obj.NAME_EXTERN, obj.NAME_STATIC: + // Special case for a rule combines the bytes of gostring. + // The v alignment might seem OK, but we don't want to load it + // using an offset because relocation comes later. + genAddr = strings.HasPrefix(fromAddr.Sym.Name, "go:string") || v.Type.Alignment()%4 != 0 || fromAddr.Offset%4 != 0 + default: + genAddr = fromAddr.Offset%4 != 0 + } + if genAddr { + // Load full address into the temp register. + p := s.Prog(ppc64.AMOVD) + p.From.Type = obj.TYPE_ADDR + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + // Load target using temp as base register + // and offset zero. Setting NAME_NONE + // prevents any extra offsets from being + // added. + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REGTMP + fromAddr.Reg = ppc64.REGTMP + // Clear the offset field and other + // information that might be used + // by the assembler to add to the + // final offset value. + fromAddr.Offset = 0 + fromAddr.Name = obj.NAME_NONE + fromAddr.Sym = nil + } + p := s.Prog(v.Op.Asm()) + p.From = fromAddr + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.OpPPC64MOVHload, ssa.OpPPC64MOVWZload, ssa.OpPPC64MOVBZload, ssa.OpPPC64MOVHZload, ssa.OpPPC64FMOVDload, ssa.OpPPC64FMOVSload: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.OpPPC64MOVDBRload, ssa.OpPPC64MOVWBRload, ssa.OpPPC64MOVHBRload: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.OpPPC64MOVDBRstore, ssa.OpPPC64MOVWBRstore, ssa.OpPPC64MOVHBRstore: + p := s.Prog(v.Op.Asm()) + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + + case ssa.OpPPC64MOVDloadidx, ssa.OpPPC64MOVWloadidx, ssa.OpPPC64MOVHloadidx, ssa.OpPPC64MOVWZloadidx, + ssa.OpPPC64MOVBZloadidx, ssa.OpPPC64MOVHZloadidx, ssa.OpPPC64FMOVDloadidx, ssa.OpPPC64FMOVSloadidx, + ssa.OpPPC64MOVDBRloadidx, ssa.OpPPC64MOVWBRloadidx, ssa.OpPPC64MOVHBRloadidx: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + p.From.Index = v.Args[1].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.OpPPC64DCBT: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_CONST + p.To.Offset = v.AuxInt + + case ssa.OpPPC64MOVWstorezero, ssa.OpPPC64MOVHstorezero, ssa.OpPPC64MOVBstorezero: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REGZERO + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + + case ssa.OpPPC64MOVDstore, ssa.OpPPC64MOVDstorezero: + + // MOVDstore and MOVDstorezero become DS form instructions that are restricted + // to offset values that are a multiple of 4. If the offset field is not a + // multiple of 4, then the full address of the store target is computed (base + + // offset) and used as the new base register and the offset in the instruction + // is set to 0. + + // This allows the MOVDstore and MOVDstorezero to be generated in more cases, + // and prevents checking of the offset value and alignment in the rules. + + toAddr := obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[0].Reg()} + ssagen.AddAux(&toAddr, v) + + if toAddr.Offset%4 != 0 { + p := s.Prog(ppc64.AMOVD) + p.From.Type = obj.TYPE_ADDR + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REGTMP + toAddr.Reg = ppc64.REGTMP + // Clear the offset field and other + // information that might be used + // by the assembler to add to the + // final offset value. + toAddr.Offset = 0 + toAddr.Name = obj.NAME_NONE + toAddr.Sym = nil + } + p := s.Prog(v.Op.Asm()) + p.To = toAddr + p.From.Type = obj.TYPE_REG + if v.Op == ssa.OpPPC64MOVDstorezero { + p.From.Reg = ppc64.REGZERO + } else { + p.From.Reg = v.Args[1].Reg() + } + + case ssa.OpPPC64MOVWstore, ssa.OpPPC64MOVHstore, ssa.OpPPC64MOVBstore, ssa.OpPPC64FMOVDstore, ssa.OpPPC64FMOVSstore: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + + case ssa.OpPPC64MOVDstoreidx, ssa.OpPPC64MOVWstoreidx, ssa.OpPPC64MOVHstoreidx, ssa.OpPPC64MOVBstoreidx, + ssa.OpPPC64FMOVDstoreidx, ssa.OpPPC64FMOVSstoreidx, ssa.OpPPC64MOVDBRstoreidx, ssa.OpPPC64MOVWBRstoreidx, + ssa.OpPPC64MOVHBRstoreidx: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[2].Reg() + p.To.Index = v.Args[1].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + + case ssa.OpPPC64ISEL, ssa.OpPPC64ISELZ: + // ISEL AuxInt ? arg0 : arg1 + // ISELZ is a special case of ISEL where arg1 is implicitly $0. + // + // AuxInt value indicates conditions 0=LT 1=GT 2=EQ 3=SO 4=GE 5=LE 6=NE 7=NSO. + // ISEL accepts a CR bit argument, not a condition as expressed by AuxInt. + // Convert the condition to a CR bit argument by the following conversion: + // + // AuxInt&3 ? arg0 : arg1 for conditions LT, GT, EQ, SO + // AuxInt&3 ? arg1 : arg0 for conditions GE, LE, NE, NSO + p := s.Prog(v.Op.Asm()) + p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()} + p.Reg = v.Args[0].Reg() + if v.Op == ssa.OpPPC64ISEL { + p.AddRestSourceReg(v.Args[1].Reg()) + } else { + p.AddRestSourceReg(ppc64.REG_R0) + } + // AuxInt values 4,5,6 implemented with reverse operand order from 0,1,2 + if v.AuxInt > 3 { + p.Reg, p.GetFrom3().Reg = p.GetFrom3().Reg, p.Reg + } + p.From.SetConst(v.AuxInt & 3) + + case ssa.OpPPC64SETBC, ssa.OpPPC64SETBCR: + p := s.Prog(v.Op.Asm()) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + p.From.Type = obj.TYPE_REG + p.From.Reg = int16(ppc64.REG_CR0LT + v.AuxInt) + + case ssa.OpPPC64LoweredQuadZero, ssa.OpPPC64LoweredQuadZeroShort: + // The LoweredQuad code generation + // generates STXV instructions on + // power9. The Short variation is used + // if no loop is generated. + + // sizes >= 64 generate a loop as follows: + + // Set up loop counter in CTR, used by BC + // XXLXOR clears VS32 + // XXLXOR VS32,VS32,VS32 + // MOVD len/64,REG_TMP + // MOVD REG_TMP,CTR + // loop: + // STXV VS32,0(R20) + // STXV VS32,16(R20) + // STXV VS32,32(R20) + // STXV VS32,48(R20) + // ADD $64,R20 + // BC 16, 0, loop + + // Bytes per iteration + ctr := v.AuxInt / 64 + + // Remainder bytes + rem := v.AuxInt % 64 + + // Only generate a loop if there is more + // than 1 iteration. + if ctr > 1 { + // Set up VS32 (V0) to hold 0s + p := s.Prog(ppc64.AXXLXOR) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_VS32 + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REG_VS32 + p.Reg = ppc64.REG_VS32 + + // Set up CTR loop counter + p = s.Prog(ppc64.AMOVD) + p.From.Type = obj.TYPE_CONST + p.From.Offset = ctr + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REGTMP + + p = s.Prog(ppc64.AMOVD) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REGTMP + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REG_CTR + + // Don't generate padding for + // loops with few iterations. + if ctr > 3 { + p = s.Prog(obj.APCALIGN) + p.From.Type = obj.TYPE_CONST + p.From.Offset = 16 + } + + // generate 4 STXVs to zero 64 bytes + var top *obj.Prog + + p = s.Prog(ppc64.ASTXV) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_VS32 + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + + // Save the top of loop + if top == nil { + top = p + } + p = s.Prog(ppc64.ASTXV) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_VS32 + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + p.To.Offset = 16 + + p = s.Prog(ppc64.ASTXV) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_VS32 + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + p.To.Offset = 32 + + p = s.Prog(ppc64.ASTXV) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_VS32 + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + p.To.Offset = 48 + + // Increment address for the + // 64 bytes just zeroed. + p = s.Prog(ppc64.AADD) + p.Reg = v.Args[0].Reg() + p.From.Type = obj.TYPE_CONST + p.From.Offset = 64 + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Args[0].Reg() + + // Branch back to top of loop + // based on CTR + // BC with BO_BCTR generates bdnz + p = s.Prog(ppc64.ABC) + p.From.Type = obj.TYPE_CONST + p.From.Offset = ppc64.BO_BCTR + p.Reg = ppc64.REG_CR0LT + p.To.Type = obj.TYPE_BRANCH + p.To.SetTarget(top) + } + // When ctr == 1 the loop was not generated but + // there are at least 64 bytes to clear, so add + // that to the remainder to generate the code + // to clear those doublewords + if ctr == 1 { + rem += 64 + } + + // Clear the remainder starting at offset zero + offset := int64(0) + + if rem >= 16 && ctr <= 1 { + // If the XXLXOR hasn't already been + // generated, do it here to initialize + // VS32 (V0) to 0. + p := s.Prog(ppc64.AXXLXOR) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_VS32 + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REG_VS32 + p.Reg = ppc64.REG_VS32 + } + // Generate STXV for 32 or 64 + // bytes. + for rem >= 32 { + p := s.Prog(ppc64.ASTXV) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_VS32 + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + p.To.Offset = offset + + p = s.Prog(ppc64.ASTXV) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_VS32 + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + p.To.Offset = offset + 16 + offset += 32 + rem -= 32 + } + // Generate 16 bytes + if rem >= 16 { + p := s.Prog(ppc64.ASTXV) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_VS32 + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + p.To.Offset = offset + offset += 16 + rem -= 16 + } + + // first clear as many doublewords as possible + // then clear remaining sizes as available + for rem > 0 { + op, size := ppc64.AMOVB, int64(1) + switch { + case rem >= 8: + op, size = ppc64.AMOVD, 8 + case rem >= 4: + op, size = ppc64.AMOVW, 4 + case rem >= 2: + op, size = ppc64.AMOVH, 2 + } + p := s.Prog(op) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_R0 + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + p.To.Offset = offset + rem -= size + offset += size + } + + case ssa.OpPPC64LoweredZero, ssa.OpPPC64LoweredZeroShort: + + // Unaligned data doesn't hurt performance + // for these instructions on power8. + + // For sizes >= 64 generate a loop as follows: + + // Set up loop counter in CTR, used by BC + // XXLXOR VS32,VS32,VS32 + // MOVD len/32,REG_TMP + // MOVD REG_TMP,CTR + // MOVD $16,REG_TMP + // loop: + // STXVD2X VS32,(R0)(R20) + // STXVD2X VS32,(R31)(R20) + // ADD $32,R20 + // BC 16, 0, loop + // + // any remainder is done as described below + + // for sizes < 64 bytes, first clear as many doublewords as possible, + // then handle the remainder + // MOVD R0,(R20) + // MOVD R0,8(R20) + // .... etc. + // + // the remainder bytes are cleared using one or more + // of the following instructions with the appropriate + // offsets depending which instructions are needed + // + // MOVW R0,n1(R20) 4 bytes + // MOVH R0,n2(R20) 2 bytes + // MOVB R0,n3(R20) 1 byte + // + // 7 bytes: MOVW, MOVH, MOVB + // 6 bytes: MOVW, MOVH + // 5 bytes: MOVW, MOVB + // 3 bytes: MOVH, MOVB + + // each loop iteration does 32 bytes + ctr := v.AuxInt / 32 + + // remainder bytes + rem := v.AuxInt % 32 + + // only generate a loop if there is more + // than 1 iteration. + if ctr > 1 { + // Set up VS32 (V0) to hold 0s + p := s.Prog(ppc64.AXXLXOR) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_VS32 + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REG_VS32 + p.Reg = ppc64.REG_VS32 + + // Set up CTR loop counter + p = s.Prog(ppc64.AMOVD) + p.From.Type = obj.TYPE_CONST + p.From.Offset = ctr + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REGTMP + + p = s.Prog(ppc64.AMOVD) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REGTMP + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REG_CTR + + // Set up R31 to hold index value 16 + p = s.Prog(ppc64.AMOVD) + p.From.Type = obj.TYPE_CONST + p.From.Offset = 16 + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REGTMP + + // Don't add padding for alignment + // with few loop iterations. + if ctr > 3 { + p = s.Prog(obj.APCALIGN) + p.From.Type = obj.TYPE_CONST + p.From.Offset = 16 + } + + // generate 2 STXVD2Xs to store 16 bytes + // when this is a loop then the top must be saved + var top *obj.Prog + // This is the top of loop + + p = s.Prog(ppc64.ASTXVD2X) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_VS32 + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + p.To.Index = ppc64.REGZERO + // Save the top of loop + if top == nil { + top = p + } + p = s.Prog(ppc64.ASTXVD2X) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_VS32 + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + p.To.Index = ppc64.REGTMP + + // Increment address for the + // 4 doublewords just zeroed. + p = s.Prog(ppc64.AADD) + p.Reg = v.Args[0].Reg() + p.From.Type = obj.TYPE_CONST + p.From.Offset = 32 + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Args[0].Reg() + + // Branch back to top of loop + // based on CTR + // BC with BO_BCTR generates bdnz + p = s.Prog(ppc64.ABC) + p.From.Type = obj.TYPE_CONST + p.From.Offset = ppc64.BO_BCTR + p.Reg = ppc64.REG_CR0LT + p.To.Type = obj.TYPE_BRANCH + p.To.SetTarget(top) + } + + // when ctr == 1 the loop was not generated but + // there are at least 32 bytes to clear, so add + // that to the remainder to generate the code + // to clear those doublewords + if ctr == 1 { + rem += 32 + } + + // clear the remainder starting at offset zero + offset := int64(0) + + // first clear as many doublewords as possible + // then clear remaining sizes as available + for rem > 0 { + op, size := ppc64.AMOVB, int64(1) + switch { + case rem >= 8: + op, size = ppc64.AMOVD, 8 + case rem >= 4: + op, size = ppc64.AMOVW, 4 + case rem >= 2: + op, size = ppc64.AMOVH, 2 + } + p := s.Prog(op) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_R0 + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + p.To.Offset = offset + rem -= size + offset += size + } + + case ssa.OpPPC64LoweredMove, ssa.OpPPC64LoweredMoveShort: + + bytesPerLoop := int64(32) + // This will be used when moving more + // than 8 bytes. Moves start with + // as many 8 byte moves as possible, then + // 4, 2, or 1 byte(s) as remaining. This will + // work and be efficient for power8 or later. + // If there are 64 or more bytes, then a + // loop is generated to move 32 bytes and + // update the src and dst addresses on each + // iteration. When < 64 bytes, the appropriate + // number of moves are generated based on the + // size. + // When moving >= 64 bytes a loop is used + // MOVD len/32,REG_TMP + // MOVD REG_TMP,CTR + // MOVD $16,REG_TMP + // top: + // LXVD2X (R0)(R21),VS32 + // LXVD2X (R31)(R21),VS33 + // ADD $32,R21 + // STXVD2X VS32,(R0)(R20) + // STXVD2X VS33,(R31)(R20) + // ADD $32,R20 + // BC 16,0,top + // Bytes not moved by this loop are moved + // with a combination of the following instructions, + // starting with the largest sizes and generating as + // many as needed, using the appropriate offset value. + // MOVD n(R21),R31 + // MOVD R31,n(R20) + // MOVW n1(R21),R31 + // MOVW R31,n1(R20) + // MOVH n2(R21),R31 + // MOVH R31,n2(R20) + // MOVB n3(R21),R31 + // MOVB R31,n3(R20) + + // Each loop iteration moves 32 bytes + ctr := v.AuxInt / bytesPerLoop + + // Remainder after the loop + rem := v.AuxInt % bytesPerLoop + + dstReg := v.Args[0].Reg() + srcReg := v.Args[1].Reg() + + // The set of registers used here, must match the clobbered reg list + // in PPC64Ops.go. + offset := int64(0) + + // top of the loop + var top *obj.Prog + // Only generate looping code when loop counter is > 1 for >= 64 bytes + if ctr > 1 { + // Set up the CTR + p := s.Prog(ppc64.AMOVD) + p.From.Type = obj.TYPE_CONST + p.From.Offset = ctr + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REGTMP + + p = s.Prog(ppc64.AMOVD) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REGTMP + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REG_CTR + + // Use REGTMP as index reg + p = s.Prog(ppc64.AMOVD) + p.From.Type = obj.TYPE_CONST + p.From.Offset = 16 + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REGTMP + + // Don't adding padding for + // alignment with small iteration + // counts. + if ctr > 3 { + p = s.Prog(obj.APCALIGN) + p.From.Type = obj.TYPE_CONST + p.From.Offset = 16 + } + + // Generate 16 byte loads and stores. + // Use temp register for index (16) + // on the second one. + + p = s.Prog(ppc64.ALXVD2X) + p.From.Type = obj.TYPE_MEM + p.From.Reg = srcReg + p.From.Index = ppc64.REGZERO + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REG_VS32 + if top == nil { + top = p + } + p = s.Prog(ppc64.ALXVD2X) + p.From.Type = obj.TYPE_MEM + p.From.Reg = srcReg + p.From.Index = ppc64.REGTMP + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REG_VS33 + + // increment the src reg for next iteration + p = s.Prog(ppc64.AADD) + p.Reg = srcReg + p.From.Type = obj.TYPE_CONST + p.From.Offset = bytesPerLoop + p.To.Type = obj.TYPE_REG + p.To.Reg = srcReg + + // generate 16 byte stores + p = s.Prog(ppc64.ASTXVD2X) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_VS32 + p.To.Type = obj.TYPE_MEM + p.To.Reg = dstReg + p.To.Index = ppc64.REGZERO + + p = s.Prog(ppc64.ASTXVD2X) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_VS33 + p.To.Type = obj.TYPE_MEM + p.To.Reg = dstReg + p.To.Index = ppc64.REGTMP + + // increment the dst reg for next iteration + p = s.Prog(ppc64.AADD) + p.Reg = dstReg + p.From.Type = obj.TYPE_CONST + p.From.Offset = bytesPerLoop + p.To.Type = obj.TYPE_REG + p.To.Reg = dstReg + + // BC with BO_BCTR generates bdnz to branch on nonzero CTR + // to loop top. + p = s.Prog(ppc64.ABC) + p.From.Type = obj.TYPE_CONST + p.From.Offset = ppc64.BO_BCTR + p.Reg = ppc64.REG_CR0LT + p.To.Type = obj.TYPE_BRANCH + p.To.SetTarget(top) + + // srcReg and dstReg were incremented in the loop, so + // later instructions start with offset 0. + offset = int64(0) + } + + // No loop was generated for one iteration, so + // add 32 bytes to the remainder to move those bytes. + if ctr == 1 { + rem += bytesPerLoop + } + + if rem >= 16 { + // Generate 16 byte loads and stores. + // Use temp register for index (value 16) + // on the second one. + p := s.Prog(ppc64.ALXVD2X) + p.From.Type = obj.TYPE_MEM + p.From.Reg = srcReg + p.From.Index = ppc64.REGZERO + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REG_VS32 + + p = s.Prog(ppc64.ASTXVD2X) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_VS32 + p.To.Type = obj.TYPE_MEM + p.To.Reg = dstReg + p.To.Index = ppc64.REGZERO + + offset = 16 + rem -= 16 + + if rem >= 16 { + // Use REGTMP as index reg + p := s.Prog(ppc64.AMOVD) + p.From.Type = obj.TYPE_CONST + p.From.Offset = 16 + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REGTMP + + p = s.Prog(ppc64.ALXVD2X) + p.From.Type = obj.TYPE_MEM + p.From.Reg = srcReg + p.From.Index = ppc64.REGTMP + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REG_VS32 + + p = s.Prog(ppc64.ASTXVD2X) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_VS32 + p.To.Type = obj.TYPE_MEM + p.To.Reg = dstReg + p.To.Index = ppc64.REGTMP + + offset = 32 + rem -= 16 + } + } + + // Generate all the remaining load and store pairs, starting with + // as many 8 byte moves as possible, then 4, 2, 1. + for rem > 0 { + op, size := ppc64.AMOVB, int64(1) + switch { + case rem >= 8: + op, size = ppc64.AMOVD, 8 + case rem >= 4: + op, size = ppc64.AMOVWZ, 4 + case rem >= 2: + op, size = ppc64.AMOVH, 2 + } + // Load + p := s.Prog(op) + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REGTMP + p.From.Type = obj.TYPE_MEM + p.From.Reg = srcReg + p.From.Offset = offset + + // Store + p = s.Prog(op) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REGTMP + p.To.Type = obj.TYPE_MEM + p.To.Reg = dstReg + p.To.Offset = offset + rem -= size + offset += size + } + + case ssa.OpPPC64LoweredQuadMove, ssa.OpPPC64LoweredQuadMoveShort: + bytesPerLoop := int64(64) + // This is used when moving more + // than 8 bytes on power9. Moves start with + // as many 8 byte moves as possible, then + // 4, 2, or 1 byte(s) as remaining. This will + // work and be efficient for power8 or later. + // If there are 64 or more bytes, then a + // loop is generated to move 32 bytes and + // update the src and dst addresses on each + // iteration. When < 64 bytes, the appropriate + // number of moves are generated based on the + // size. + // When moving >= 64 bytes a loop is used + // MOVD len/32,REG_TMP + // MOVD REG_TMP,CTR + // top: + // LXV 0(R21),VS32 + // LXV 16(R21),VS33 + // ADD $32,R21 + // STXV VS32,0(R20) + // STXV VS33,16(R20) + // ADD $32,R20 + // BC 16,0,top + // Bytes not moved by this loop are moved + // with a combination of the following instructions, + // starting with the largest sizes and generating as + // many as needed, using the appropriate offset value. + // MOVD n(R21),R31 + // MOVD R31,n(R20) + // MOVW n1(R21),R31 + // MOVW R31,n1(R20) + // MOVH n2(R21),R31 + // MOVH R31,n2(R20) + // MOVB n3(R21),R31 + // MOVB R31,n3(R20) + + // Each loop iteration moves 32 bytes + ctr := v.AuxInt / bytesPerLoop + + // Remainder after the loop + rem := v.AuxInt % bytesPerLoop + + dstReg := v.Args[0].Reg() + srcReg := v.Args[1].Reg() + + offset := int64(0) + + // top of the loop + var top *obj.Prog + + // Only generate looping code when loop counter is > 1 for >= 64 bytes + if ctr > 1 { + // Set up the CTR + p := s.Prog(ppc64.AMOVD) + p.From.Type = obj.TYPE_CONST + p.From.Offset = ctr + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REGTMP + + p = s.Prog(ppc64.AMOVD) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REGTMP + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REG_CTR + + p = s.Prog(obj.APCALIGN) + p.From.Type = obj.TYPE_CONST + p.From.Offset = 16 + + // Generate 16 byte loads and stores. + p = s.Prog(ppc64.ALXV) + p.From.Type = obj.TYPE_MEM + p.From.Reg = srcReg + p.From.Offset = offset + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REG_VS32 + if top == nil { + top = p + } + p = s.Prog(ppc64.ALXV) + p.From.Type = obj.TYPE_MEM + p.From.Reg = srcReg + p.From.Offset = offset + 16 + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REG_VS33 + + // generate 16 byte stores + p = s.Prog(ppc64.ASTXV) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_VS32 + p.To.Type = obj.TYPE_MEM + p.To.Reg = dstReg + p.To.Offset = offset + + p = s.Prog(ppc64.ASTXV) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_VS33 + p.To.Type = obj.TYPE_MEM + p.To.Reg = dstReg + p.To.Offset = offset + 16 + + // Generate 16 byte loads and stores. + p = s.Prog(ppc64.ALXV) + p.From.Type = obj.TYPE_MEM + p.From.Reg = srcReg + p.From.Offset = offset + 32 + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REG_VS32 + + p = s.Prog(ppc64.ALXV) + p.From.Type = obj.TYPE_MEM + p.From.Reg = srcReg + p.From.Offset = offset + 48 + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REG_VS33 + + // generate 16 byte stores + p = s.Prog(ppc64.ASTXV) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_VS32 + p.To.Type = obj.TYPE_MEM + p.To.Reg = dstReg + p.To.Offset = offset + 32 + + p = s.Prog(ppc64.ASTXV) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_VS33 + p.To.Type = obj.TYPE_MEM + p.To.Reg = dstReg + p.To.Offset = offset + 48 + + // increment the src reg for next iteration + p = s.Prog(ppc64.AADD) + p.Reg = srcReg + p.From.Type = obj.TYPE_CONST + p.From.Offset = bytesPerLoop + p.To.Type = obj.TYPE_REG + p.To.Reg = srcReg + + // increment the dst reg for next iteration + p = s.Prog(ppc64.AADD) + p.Reg = dstReg + p.From.Type = obj.TYPE_CONST + p.From.Offset = bytesPerLoop + p.To.Type = obj.TYPE_REG + p.To.Reg = dstReg + + // BC with BO_BCTR generates bdnz to branch on nonzero CTR + // to loop top. + p = s.Prog(ppc64.ABC) + p.From.Type = obj.TYPE_CONST + p.From.Offset = ppc64.BO_BCTR + p.Reg = ppc64.REG_CR0LT + p.To.Type = obj.TYPE_BRANCH + p.To.SetTarget(top) + + // srcReg and dstReg were incremented in the loop, so + // later instructions start with offset 0. + offset = int64(0) + } + + // No loop was generated for one iteration, so + // add 32 bytes to the remainder to move those bytes. + if ctr == 1 { + rem += bytesPerLoop + } + if rem >= 32 { + p := s.Prog(ppc64.ALXV) + p.From.Type = obj.TYPE_MEM + p.From.Reg = srcReg + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REG_VS32 + + p = s.Prog(ppc64.ALXV) + p.From.Type = obj.TYPE_MEM + p.From.Reg = srcReg + p.From.Offset = 16 + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REG_VS33 + + p = s.Prog(ppc64.ASTXV) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_VS32 + p.To.Type = obj.TYPE_MEM + p.To.Reg = dstReg + + p = s.Prog(ppc64.ASTXV) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_VS33 + p.To.Type = obj.TYPE_MEM + p.To.Reg = dstReg + p.To.Offset = 16 + + offset = 32 + rem -= 32 + } + + if rem >= 16 { + // Generate 16 byte loads and stores. + p := s.Prog(ppc64.ALXV) + p.From.Type = obj.TYPE_MEM + p.From.Reg = srcReg + p.From.Offset = offset + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REG_VS32 + + p = s.Prog(ppc64.ASTXV) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_VS32 + p.To.Type = obj.TYPE_MEM + p.To.Reg = dstReg + p.To.Offset = offset + + offset += 16 + rem -= 16 + + if rem >= 16 { + p := s.Prog(ppc64.ALXV) + p.From.Type = obj.TYPE_MEM + p.From.Reg = srcReg + p.From.Offset = offset + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REG_VS32 + + p = s.Prog(ppc64.ASTXV) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_VS32 + p.To.Type = obj.TYPE_MEM + p.To.Reg = dstReg + p.To.Offset = offset + + offset += 16 + rem -= 16 + } + } + // Generate all the remaining load and store pairs, starting with + // as many 8 byte moves as possible, then 4, 2, 1. + for rem > 0 { + op, size := ppc64.AMOVB, int64(1) + switch { + case rem >= 8: + op, size = ppc64.AMOVD, 8 + case rem >= 4: + op, size = ppc64.AMOVWZ, 4 + case rem >= 2: + op, size = ppc64.AMOVH, 2 + } + // Load + p := s.Prog(op) + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REGTMP + p.From.Type = obj.TYPE_MEM + p.From.Reg = srcReg + p.From.Offset = offset + + // Store + p = s.Prog(op) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REGTMP + p.To.Type = obj.TYPE_MEM + p.To.Reg = dstReg + p.To.Offset = offset + rem -= size + offset += size + } + + case ssa.OpPPC64CALLstatic: + s.Call(v) + + case ssa.OpPPC64CALLtail: + s.TailCall(v) + + case ssa.OpPPC64CALLclosure, ssa.OpPPC64CALLinter: + p := s.Prog(ppc64.AMOVD) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REG_LR + + if v.Args[0].Reg() != ppc64.REG_R12 { + v.Fatalf("Function address for %v should be in R12 %d but is in %d", v.LongString(), ppc64.REG_R12, p.From.Reg) + } + + pp := s.Call(v) + + // Convert the call into a blrl with hint this is not a subroutine return. + // The full bclrl opcode must be specified when passing a hint. + pp.As = ppc64.ABCL + pp.From.Type = obj.TYPE_CONST + pp.From.Offset = ppc64.BO_ALWAYS + pp.Reg = ppc64.REG_CR0LT // The preferred value if BI is ignored. + pp.To.Reg = ppc64.REG_LR + pp.AddRestSourceConst(1) + + if ppc64.NeedTOCpointer(base.Ctxt) { + // When compiling Go into PIC, the function we just + // called via pointer might have been implemented in + // a separate module and so overwritten the TOC + // pointer in R2; reload it. + q := s.Prog(ppc64.AMOVD) + q.From.Type = obj.TYPE_MEM + q.From.Offset = 24 + q.From.Reg = ppc64.REGSP + q.To.Type = obj.TYPE_REG + q.To.Reg = ppc64.REG_R2 + } + + case ssa.OpPPC64LoweredWB: + p := s.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + // AuxInt encodes how many buffer entries we need. + p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1] + + case ssa.OpPPC64LoweredPanicBoundsA, ssa.OpPPC64LoweredPanicBoundsB, ssa.OpPPC64LoweredPanicBoundsC: + p := s.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt] + s.UseArgs(16) // space used in callee args area by assembly stubs + + case ssa.OpPPC64LoweredNilCheck: + if buildcfg.GOOS == "aix" { + // CMP Rarg0, R0 + // BNE 2(PC) + // STW R0, 0(R0) + // NOP (so the BNE has somewhere to land) + + // CMP Rarg0, R0 + p := s.Prog(ppc64.ACMP) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REG_R0 + + // BNE 2(PC) + p2 := s.Prog(ppc64.ABNE) + p2.To.Type = obj.TYPE_BRANCH + + // STW R0, 0(R0) + // Write at 0 is forbidden and will trigger a SIGSEGV + p = s.Prog(ppc64.AMOVW) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_R0 + p.To.Type = obj.TYPE_MEM + p.To.Reg = ppc64.REG_R0 + + // NOP (so the BNE has somewhere to land) + nop := s.Prog(obj.ANOP) + p2.To.SetTarget(nop) + + } else { + // Issue a load which will fault if arg is nil. + p := s.Prog(ppc64.AMOVBZ) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REGTMP + } + if logopt.Enabled() { + logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) + } + if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers + base.WarnfAt(v.Pos, "generated nil check") + } + + // These should be resolved by rules and not make it here. + case ssa.OpPPC64Equal, ssa.OpPPC64NotEqual, ssa.OpPPC64LessThan, ssa.OpPPC64FLessThan, + ssa.OpPPC64LessEqual, ssa.OpPPC64GreaterThan, ssa.OpPPC64FGreaterThan, ssa.OpPPC64GreaterEqual, + ssa.OpPPC64FLessEqual, ssa.OpPPC64FGreaterEqual: + v.Fatalf("Pseudo-op should not make it to codegen: %s ###\n", v.LongString()) + case ssa.OpPPC64InvertFlags: + v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) + case ssa.OpPPC64FlagEQ, ssa.OpPPC64FlagLT, ssa.OpPPC64FlagGT: + v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString()) + case ssa.OpClobber, ssa.OpClobberReg: + // TODO: implement for clobberdead experiment. Nop is ok for now. + default: + v.Fatalf("genValue not implemented: %s", v.LongString()) + } +} + +var blockJump = [...]struct { + asm, invasm obj.As + asmeq, invasmun bool +}{ + ssa.BlockPPC64EQ: {ppc64.ABEQ, ppc64.ABNE, false, false}, + ssa.BlockPPC64NE: {ppc64.ABNE, ppc64.ABEQ, false, false}, + + ssa.BlockPPC64LT: {ppc64.ABLT, ppc64.ABGE, false, false}, + ssa.BlockPPC64GE: {ppc64.ABGE, ppc64.ABLT, false, false}, + ssa.BlockPPC64LE: {ppc64.ABLE, ppc64.ABGT, false, false}, + ssa.BlockPPC64GT: {ppc64.ABGT, ppc64.ABLE, false, false}, + + // TODO: need to work FP comparisons into block jumps + ssa.BlockPPC64FLT: {ppc64.ABLT, ppc64.ABGE, false, false}, + ssa.BlockPPC64FGE: {ppc64.ABGT, ppc64.ABLT, true, true}, // GE = GT or EQ; !GE = LT or UN + ssa.BlockPPC64FLE: {ppc64.ABLT, ppc64.ABGT, true, true}, // LE = LT or EQ; !LE = GT or UN + ssa.BlockPPC64FGT: {ppc64.ABGT, ppc64.ABLE, false, false}, +} + +func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { + switch b.Kind { + case ssa.BlockDefer: + // defer returns in R3: + // 0 if we should continue executing + // 1 if we should jump to deferreturn call + p := s.Prog(ppc64.ACMP) + p.From.Type = obj.TYPE_REG + p.From.Reg = ppc64.REG_R3 + p.To.Type = obj.TYPE_REG + p.To.Reg = ppc64.REG_R0 + + p = s.Prog(ppc64.ABNE) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()}) + if b.Succs[0].Block() != next { + p := s.Prog(obj.AJMP) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) + } + + case ssa.BlockPlain: + if b.Succs[0].Block() != next { + p := s.Prog(obj.AJMP) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) + } + case ssa.BlockExit, ssa.BlockRetJmp: + case ssa.BlockRet: + s.Prog(obj.ARET) + + case ssa.BlockPPC64EQ, ssa.BlockPPC64NE, + ssa.BlockPPC64LT, ssa.BlockPPC64GE, + ssa.BlockPPC64LE, ssa.BlockPPC64GT, + ssa.BlockPPC64FLT, ssa.BlockPPC64FGE, + ssa.BlockPPC64FLE, ssa.BlockPPC64FGT: + jmp := blockJump[b.Kind] + switch next { + case b.Succs[0].Block(): + s.Br(jmp.invasm, b.Succs[1].Block()) + if jmp.invasmun { + // TODO: The second branch is probably predict-not-taken since it is for FP unordered + s.Br(ppc64.ABVS, b.Succs[1].Block()) + } + case b.Succs[1].Block(): + s.Br(jmp.asm, b.Succs[0].Block()) + if jmp.asmeq { + s.Br(ppc64.ABEQ, b.Succs[0].Block()) + } + default: + if b.Likely != ssa.BranchUnlikely { + s.Br(jmp.asm, b.Succs[0].Block()) + if jmp.asmeq { + s.Br(ppc64.ABEQ, b.Succs[0].Block()) + } + s.Br(obj.AJMP, b.Succs[1].Block()) + } else { + s.Br(jmp.invasm, b.Succs[1].Block()) + if jmp.invasmun { + // TODO: The second branch is probably predict-not-taken since it is for FP unordered + s.Br(ppc64.ABVS, b.Succs[1].Block()) + } + s.Br(obj.AJMP, b.Succs[0].Block()) + } + } + default: + b.Fatalf("branch not implemented: %s", b.LongString()) + } +} + +func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog { + p := s.Prog(loadByType(t)) + p.From.Type = obj.TYPE_MEM + p.From.Name = obj.NAME_AUTO + p.From.Sym = n.Linksym() + p.From.Offset = n.FrameOffset() + off + p.To.Type = obj.TYPE_REG + p.To.Reg = reg + return p +} + +func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog { + p = pp.Append(p, storeByType(t), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off) + p.To.Name = obj.NAME_PARAM + p.To.Sym = n.Linksym() + p.Pos = p.Pos.WithNotStmt() + return p +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/rangefunc/rangefunc_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/rangefunc/rangefunc_test.go new file mode 100644 index 0000000000000000000000000000000000000000..16856c648c6f66e5df8467ff75370ec0c58d6df2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/rangefunc/rangefunc_test.go @@ -0,0 +1,1297 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.rangefunc + +package rangefunc_test + +import ( + "slices" + "testing" +) + +type Seq2[T1, T2 any] func(yield func(T1, T2) bool) + +// OfSliceIndex returns a Seq over the elements of s. It is equivalent +// to range s. +func OfSliceIndex[T any, S ~[]T](s S) Seq2[int, T] { + return func(yield func(int, T) bool) { + for i, v := range s { + if !yield(i, v) { + return + } + } + return + } +} + +// BadOfSliceIndex is "bad" because it ignores the return value from yield +// and just keeps on iterating. +func BadOfSliceIndex[T any, S ~[]T](s S) Seq2[int, T] { + return func(yield func(int, T) bool) { + for i, v := range s { + yield(i, v) + } + return + } +} + +// VeryBadOfSliceIndex is "very bad" because it ignores the return value from yield +// and just keeps on iterating, and also wraps that call in a defer-recover so it can +// keep on trying after the first panic. +func VeryBadOfSliceIndex[T any, S ~[]T](s S) Seq2[int, T] { + return func(yield func(int, T) bool) { + for i, v := range s { + func() { + defer func() { + recover() + }() + yield(i, v) + }() + } + return + } +} + +// CooperativeBadOfSliceIndex calls the loop body from a goroutine after +// a ping on a channel, and returns recover()on that same channel. +func CooperativeBadOfSliceIndex[T any, S ~[]T](s S, proceed chan any) Seq2[int, T] { + return func(yield func(int, T) bool) { + for i, v := range s { + if !yield(i, v) { + // if the body breaks, call yield just once in a goroutine + go func() { + <-proceed + defer func() { + proceed <- recover() + }() + yield(0, s[0]) + }() + return + } + } + return + } +} + +// TrickyIterator is a type intended to test whether an iterator that +// calls a yield function after loop exit must inevitably escape the +// closure; this might be relevant to future checking/optimization. +type TrickyIterator struct { + yield func(int, int) bool +} + +func (ti *TrickyIterator) iterAll(s []int) Seq2[int, int] { + return func(yield func(int, int) bool) { + ti.yield = yield // Save yield for future abuse + for i, v := range s { + if !yield(i, v) { + return + } + } + return + } +} + +func (ti *TrickyIterator) iterOne(s []int) Seq2[int, int] { + return func(yield func(int, int) bool) { + ti.yield = yield // Save yield for future abuse + if len(s) > 0 { // Not in a loop might escape differently + yield(0, s[0]) + } + return + } +} + +func (ti *TrickyIterator) iterZero(s []int) Seq2[int, int] { + return func(yield func(int, int) bool) { + ti.yield = yield // Save yield for future abuse + // Don't call it at all, maybe it won't escape + return + } +} + +func (ti *TrickyIterator) fail() { + if ti.yield != nil { + ti.yield(1, 1) + } +} + +// Check wraps the function body passed to iterator forall +// in code that ensures that it cannot (successfully) be called +// either after body return false (control flow out of loop) or +// forall itself returns (the iteration is now done). +// +// Note that this can catch errors before the inserted checks. +func Check[U, V any](forall Seq2[U, V]) Seq2[U, V] { + return func(body func(U, V) bool) { + ret := true + forall(func(u U, v V) bool { + if !ret { + panic("Checked iterator access after exit") + } + ret = body(u, v) + return ret + }) + ret = false + } +} + +func TestCheck(t *testing.T) { + i := 0 + defer func() { + if r := recover(); r != nil { + t.Logf("Saw expected panic '%v'", r) + } else { + t.Error("Wanted to see a failure") + } + }() + for _, x := range Check(BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})) { + i += x + if i > 4*9 { + break + } + } +} + +func TestCooperativeBadOfSliceIndex(t *testing.T) { + i := 0 + proceed := make(chan any) + for _, x := range CooperativeBadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, proceed) { + i += x + if i >= 36 { + break + } + } + proceed <- true + if r := <-proceed; r != nil { + t.Logf("Saw expected panic '%v'", r) + } else { + t.Error("Wanted to see a failure") + } + if i != 36 { + t.Errorf("Expected i == 36, saw %d instead", i) + } else { + t.Logf("i = %d", i) + } +} + +func TestCheckCooperativeBadOfSliceIndex(t *testing.T) { + i := 0 + proceed := make(chan any) + for _, x := range Check(CooperativeBadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, proceed)) { + i += x + if i >= 36 { + break + } + } + proceed <- true + if r := <-proceed; r != nil { + t.Logf("Saw expected panic '%v'", r) + } else { + t.Error("Wanted to see a failure") + } + if i != 36 { + t.Errorf("Expected i == 36, saw %d instead", i) + } else { + t.Logf("i = %d", i) + } +} + +func TestTrickyIterAll(t *testing.T) { + trickItAll := TrickyIterator{} + i := 0 + for _, x := range trickItAll.iterAll([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) { + i += x + if i >= 36 { + break + } + } + + if i != 36 { + t.Errorf("Expected i == 36, saw %d instead", i) + } else { + t.Logf("i = %d", i) + } + + defer func() { + if r := recover(); r != nil { + t.Logf("Saw expected panic '%v'", r) + } else { + t.Error("Wanted to see a failure") + } + }() + + trickItAll.fail() +} + +func TestTrickyIterOne(t *testing.T) { + trickItOne := TrickyIterator{} + i := 0 + for _, x := range trickItOne.iterOne([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) { + i += x + if i >= 36 { + break + } + } + + // Don't care about value, ought to be 36 anyhow. + t.Logf("i = %d", i) + + defer func() { + if r := recover(); r != nil { + t.Logf("Saw expected panic '%v'", r) + } else { + t.Error("Wanted to see a failure") + } + }() + + trickItOne.fail() +} + +func TestTrickyIterZero(t *testing.T) { + trickItZero := TrickyIterator{} + i := 0 + for _, x := range trickItZero.iterZero([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) { + i += x + if i >= 36 { + break + } + } + + // Don't care about value, ought to be 0 anyhow. + t.Logf("i = %d", i) + + defer func() { + if r := recover(); r != nil { + t.Logf("Saw expected panic '%v'", r) + } else { + t.Error("Wanted to see a failure") + } + }() + + trickItZero.fail() +} + +func TestCheckTrickyIterZero(t *testing.T) { + trickItZero := TrickyIterator{} + i := 0 + for _, x := range Check(trickItZero.iterZero([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})) { + i += x + if i >= 36 { + break + } + } + + // Don't care about value, ought to be 0 anyhow. + t.Logf("i = %d", i) + + defer func() { + if r := recover(); r != nil { + t.Logf("Saw expected panic '%v'", r) + } else { + t.Error("Wanted to see a failure") + } + }() + + trickItZero.fail() +} + +// TestBreak1 should just work, with well-behaved iterators. +// (The misbehaving iterator detector should not trigger.) +func TestBreak1(t *testing.T) { + var result []int + var expect = []int{1, 2, -1, 1, 2, -2, 1, 2, -3} + for _, x := range OfSliceIndex([]int{-1, -2, -3, -4}) { + if x == -4 { + break + } + for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) { + if y == 3 { + break + } + result = append(result, y) + } + result = append(result, x) + } + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } +} + +// TestBreak2 should just work, with well-behaved iterators. +// (The misbehaving iterator detector should not trigger.) +func TestBreak2(t *testing.T) { + var result []int + var expect = []int{1, 2, -1, 1, 2, -2, 1, 2, -3} +outer: + for _, x := range OfSliceIndex([]int{-1, -2, -3, -4}) { + for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) { + if y == 3 { + break + } + if x == -4 { + break outer + } + + result = append(result, y) + } + result = append(result, x) + } + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } +} + +// TestContinue should just work, with well-behaved iterators. +// (The misbehaving iterator detector should not trigger.) +func TestContinue(t *testing.T) { + var result []int + var expect = []int{-1, 1, 2, -2, 1, 2, -3, 1, 2, -4} +outer: + for _, x := range OfSliceIndex([]int{-1, -2, -3, -4}) { + result = append(result, x) + for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) { + if y == 3 { + continue outer + } + if x == -4 { + break outer + } + + result = append(result, y) + } + result = append(result, x-10) + } + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } +} + +// TestBreak3 should just work, with well-behaved iterators. +// (The misbehaving iterator detector should not trigger.) +func TestBreak3(t *testing.T) { + var result []int + var expect = []int{100, 10, 2, 4, 200, 10, 2, 4, 20, 2, 4, 300, 10, 2, 4, 20, 2, 4, 30} +X: + for _, x := range OfSliceIndex([]int{100, 200, 300, 400}) { + Y: + for _, y := range OfSliceIndex([]int{10, 20, 30, 40}) { + if 10*y >= x { + break + } + result = append(result, y) + if y == 30 { + continue X + } + Z: + for _, z := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) { + if z&1 == 1 { + continue Z + } + result = append(result, z) + if z >= 4 { + continue Y + } + } + result = append(result, -y) // should never be executed + } + result = append(result, x) + } + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } +} + +// TestBreak1BadA should end in a panic when the outer-loop's +// single-level break is ignore by BadOfSliceIndex +func TestBreak1BadA(t *testing.T) { + var result []int + var expect = []int{1, 2, -1, 1, 2, -2, 1, 2, -3} + + defer func() { + if r := recover(); r != nil { + t.Logf("Saw expected panic '%v'", r) + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } + } else { + t.Error("Wanted to see a failure") + } + }() + + for _, x := range BadOfSliceIndex([]int{-1, -2, -3, -4, -5}) { + if x == -4 { + break + } + for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) { + if y == 3 { + break + } + result = append(result, y) + } + result = append(result, x) + } +} + +// TestBreak1BadB should end in a panic, sooner, when the inner-loop's +// (nested) single-level break is ignored by BadOfSliceIndex +func TestBreak1BadB(t *testing.T) { + var result []int + var expect = []int{1, 2} // inner breaks, panics, after before outer appends + + defer func() { + if r := recover(); r != nil { + t.Logf("Saw expected panic '%v'", r) + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } + } else { + t.Error("Wanted to see a failure") + } + }() + + for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) { + if x == -4 { + break + } + for _, y := range BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) { + if y == 3 { + break + } + result = append(result, y) + } + result = append(result, x) + } +} + +// TestMultiCont0 tests multilevel continue with no bad iterators +// (it should just work) +func TestMultiCont0(t *testing.T) { + var result []int + var expect = []int{1000, 10, 2, 4, 2000} + +W: + for _, w := range OfSliceIndex([]int{1000, 2000}) { + result = append(result, w) + if w == 2000 { + break + } + for _, x := range OfSliceIndex([]int{100, 200, 300, 400}) { + for _, y := range OfSliceIndex([]int{10, 20, 30, 40}) { + result = append(result, y) + for _, z := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) { + if z&1 == 1 { + continue + } + result = append(result, z) + if z >= 4 { + continue W // modified to be multilevel + } + } + result = append(result, -y) // should never be executed + } + result = append(result, x) + } + } + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } +} + +// TestMultiCont1 tests multilevel continue with a bad iterator +// in the outermost loop exited by the continue. +func TestMultiCont1(t *testing.T) { + var result []int + var expect = []int{1000, 10, 2, 4} + defer func() { + if r := recover(); r != nil { + t.Logf("Saw expected panic '%v'", r) + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } + } else { + t.Errorf("Wanted to see a failure, result was %v", result) + } + }() + +W: + for _, w := range OfSliceIndex([]int{1000, 2000}) { + result = append(result, w) + if w == 2000 { + break + } + for _, x := range BadOfSliceIndex([]int{100, 200, 300, 400}) { + for _, y := range OfSliceIndex([]int{10, 20, 30, 40}) { + result = append(result, y) + for _, z := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) { + if z&1 == 1 { + continue + } + result = append(result, z) + if z >= 4 { + continue W + } + } + result = append(result, -y) // should never be executed + } + result = append(result, x) + } + } + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } +} + +// TestMultiCont2 tests multilevel continue with a bad iterator +// in a middle loop exited by the continue. +func TestMultiCont2(t *testing.T) { + var result []int + var expect = []int{1000, 10, 2, 4} + defer func() { + if r := recover(); r != nil { + t.Logf("Saw expected panic '%v'", r) + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } + } else { + t.Errorf("Wanted to see a failure, result was %v", result) + } + }() + +W: + for _, w := range OfSliceIndex([]int{1000, 2000}) { + result = append(result, w) + if w == 2000 { + break + } + for _, x := range OfSliceIndex([]int{100, 200, 300, 400}) { + for _, y := range BadOfSliceIndex([]int{10, 20, 30, 40}) { + result = append(result, y) + for _, z := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) { + if z&1 == 1 { + continue + } + result = append(result, z) + if z >= 4 { + continue W + } + } + result = append(result, -y) // should never be executed + } + result = append(result, x) + } + } + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } +} + +// TestMultiCont3 tests multilevel continue with a bad iterator +// in the innermost loop exited by the continue. +func TestMultiCont3(t *testing.T) { + var result []int + var expect = []int{1000, 10, 2, 4} + defer func() { + if r := recover(); r != nil { + t.Logf("Saw expected panic '%v'", r) + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } + } else { + t.Errorf("Wanted to see a failure, result was %v", result) + } + }() + +W: + for _, w := range OfSliceIndex([]int{1000, 2000}) { + result = append(result, w) + if w == 2000 { + break + } + for _, x := range OfSliceIndex([]int{100, 200, 300, 400}) { + for _, y := range OfSliceIndex([]int{10, 20, 30, 40}) { + result = append(result, y) + for _, z := range BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) { + if z&1 == 1 { + continue + } + result = append(result, z) + if z >= 4 { + continue W + } + } + result = append(result, -y) // should never be executed + } + result = append(result, x) + } + } + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } +} + +// TestMultiBreak0 tests multilevel break with a bad iterator +// in the outermost loop exited by the break (the outermost loop). +func TestMultiBreak0(t *testing.T) { + var result []int + var expect = []int{1000, 10, 2, 4} + defer func() { + if r := recover(); r != nil { + t.Logf("Saw expected panic '%v'", r) + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } + } else { + t.Errorf("Wanted to see a failure, result was %v", result) + } + }() + +W: + for _, w := range BadOfSliceIndex([]int{1000, 2000}) { + result = append(result, w) + if w == 2000 { + break + } + for _, x := range OfSliceIndex([]int{100, 200, 300, 400}) { + for _, y := range OfSliceIndex([]int{10, 20, 30, 40}) { + result = append(result, y) + for _, z := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) { + if z&1 == 1 { + continue + } + result = append(result, z) + if z >= 4 { + break W + } + } + result = append(result, -y) // should never be executed + } + result = append(result, x) + } + } + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } +} + +// TestMultiBreak1 tests multilevel break with a bad iterator +// in an intermediate loop exited by the break. +func TestMultiBreak1(t *testing.T) { + var result []int + var expect = []int{1000, 10, 2, 4} + defer func() { + if r := recover(); r != nil { + t.Logf("Saw expected panic '%v'", r) + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } + } else { + t.Errorf("Wanted to see a failure, result was %v", result) + } + }() + +W: + for _, w := range OfSliceIndex([]int{1000, 2000}) { + result = append(result, w) + if w == 2000 { + break + } + for _, x := range BadOfSliceIndex([]int{100, 200, 300, 400}) { + for _, y := range OfSliceIndex([]int{10, 20, 30, 40}) { + result = append(result, y) + for _, z := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) { + if z&1 == 1 { + continue + } + result = append(result, z) + if z >= 4 { + break W + } + } + result = append(result, -y) // should never be executed + } + result = append(result, x) + } + } + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } +} + +// TestMultiBreak2 tests multilevel break with two bad iterators +// in intermediate loops exited by the break. +func TestMultiBreak2(t *testing.T) { + var result []int + var expect = []int{1000, 10, 2, 4} + defer func() { + if r := recover(); r != nil { + t.Logf("Saw expected panic '%v'", r) + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } + } else { + t.Errorf("Wanted to see a failure, result was %v", result) + } + }() + +W: + for _, w := range OfSliceIndex([]int{1000, 2000}) { + result = append(result, w) + if w == 2000 { + break + } + for _, x := range BadOfSliceIndex([]int{100, 200, 300, 400}) { + for _, y := range BadOfSliceIndex([]int{10, 20, 30, 40}) { + result = append(result, y) + for _, z := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) { + if z&1 == 1 { + continue + } + result = append(result, z) + if z >= 4 { + break W + } + } + result = append(result, -y) // should never be executed + } + result = append(result, x) + } + } + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } +} + +// TestMultiBreak3 tests multilevel break with the bad iterator +// in the innermost loop exited by the break. +func TestMultiBreak3(t *testing.T) { + var result []int + var expect = []int{1000, 10, 2, 4} + defer func() { + if r := recover(); r != nil { + t.Logf("Saw expected panic '%v'", r) + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } + } else { + t.Errorf("Wanted to see a failure, result was %v", result) + } + }() + +W: + for _, w := range OfSliceIndex([]int{1000, 2000}) { + result = append(result, w) + if w == 2000 { + break + } + for _, x := range OfSliceIndex([]int{100, 200, 300, 400}) { + for _, y := range OfSliceIndex([]int{10, 20, 30, 40}) { + result = append(result, y) + for _, z := range BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) { + if z&1 == 1 { + continue + } + result = append(result, z) + if z >= 4 { + break W + } + } + result = append(result, -y) // should never be executed + } + result = append(result, x) + } + } + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } +} + +// veryBad tests that a loop nest behaves sensibly in the face of a +// "very bad" iterator. In this case, "sensibly" means that the +// break out of X still occurs after the very bad iterator finally +// quits running (the control flow bread crumbs remain.) +func veryBad(s []int) []int { + var result []int +X: + for _, x := range OfSliceIndex([]int{1, 2, 3}) { + + result = append(result, x) + + for _, y := range VeryBadOfSliceIndex(s) { + result = append(result, y) + break X + } + for _, z := range OfSliceIndex([]int{100, 200, 300}) { + result = append(result, z) + if z == 100 { + break + } + } + } + return result +} + +// checkVeryBad wraps a "very bad" iterator with Check, +// demonstrating that the very bad iterator also hides panics +// thrown by Check. +func checkVeryBad(s []int) []int { + var result []int +X: + for _, x := range OfSliceIndex([]int{1, 2, 3}) { + + result = append(result, x) + + for _, y := range Check(VeryBadOfSliceIndex(s)) { + result = append(result, y) + break X + } + for _, z := range OfSliceIndex([]int{100, 200, 300}) { + result = append(result, z) + if z == 100 { + break + } + } + } + return result +} + +// okay is the not-bad version of veryBad. +// They should behave the same. +func okay(s []int) []int { + var result []int +X: + for _, x := range OfSliceIndex([]int{1, 2, 3}) { + + result = append(result, x) + + for _, y := range OfSliceIndex(s) { + result = append(result, y) + break X + } + for _, z := range OfSliceIndex([]int{100, 200, 300}) { + result = append(result, z) + if z == 100 { + break + } + } + } + return result +} + +// TestVeryBad1 checks the behavior of an extremely poorly behaved iterator. +func TestVeryBad1(t *testing.T) { + result := veryBad([]int{10, 20, 30, 40, 50}) // odd length + expect := []int{1, 10} + + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } +} + +// TestVeryBad2 checks the behavior of an extremely poorly behaved iterator. +func TestVeryBad2(t *testing.T) { + result := veryBad([]int{10, 20, 30, 40}) // even length + expect := []int{1, 10} + + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } +} + +// TestCheckVeryBad checks the behavior of an extremely poorly behaved iterator, +// which also suppresses the exceptions from "Check" +func TestCheckVeryBad(t *testing.T) { + result := checkVeryBad([]int{10, 20, 30, 40}) // even length + expect := []int{1, 10} + + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } +} + +// TestOk is the nice version of the very bad iterator. +func TestOk(t *testing.T) { + result := okay([]int{10, 20, 30, 40, 50}) // odd length + expect := []int{1, 10} + + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } +} + +// testBreak1BadDefer checks that defer behaves properly even in +// the presence of loop bodies panicking out of bad iterators. +// (i.e., the instrumentation did not break defer in these loops) +func testBreak1BadDefer(t *testing.T) (result []int) { + var expect = []int{1, 2, -1, 1, 2, -2, 1, 2, -3, -30, -20, -10} + + defer func() { + if r := recover(); r != nil { + t.Logf("Saw expected panic '%v'", r) + if !slices.Equal(expect, result) { + t.Errorf("(Inner) Expected %v, got %v", expect, result) + } + } else { + t.Error("Wanted to see a failure") + } + }() + + for _, x := range BadOfSliceIndex([]int{-1, -2, -3, -4, -5}) { + if x == -4 { + break + } + defer func() { + result = append(result, x*10) + }() + for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) { + if y == 3 { + break + } + result = append(result, y) + } + result = append(result, x) + } + return +} + +func TestBreak1BadDefer(t *testing.T) { + var result []int + var expect = []int{1, 2, -1, 1, 2, -2, 1, 2, -3, -30, -20, -10} + result = testBreak1BadDefer(t) + if !slices.Equal(expect, result) { + t.Errorf("(Outer) Expected %v, got %v", expect, result) + } +} + +// testReturn1 has no bad iterators. +func testReturn1(t *testing.T) (result []int, err any) { + defer func() { + err = recover() + }() + for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) { + result = append(result, x) + if x == -4 { + break + } + defer func() { + result = append(result, x*10) + }() + for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) { + if y == 3 { + return + } + result = append(result, y) + } + result = append(result, x) + } + return +} + +// testReturn2 has an outermost bad iterator +func testReturn2(t *testing.T) (result []int, err any) { + defer func() { + err = recover() + }() + for _, x := range BadOfSliceIndex([]int{-1, -2, -3, -4, -5}) { + result = append(result, x) + if x == -4 { + break + } + defer func() { + result = append(result, x*10) + }() + for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) { + if y == 3 { + return + } + result = append(result, y) + } + result = append(result, x) + } + return +} + +// testReturn3 has an innermost bad iterator +func testReturn3(t *testing.T) (result []int, err any) { + defer func() { + err = recover() + }() + for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) { + result = append(result, x) + if x == -4 { + break + } + defer func() { + result = append(result, x*10) + }() + for _, y := range BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) { + if y == 3 { + return + } + result = append(result, y) + } + } + return +} + +// TestReturns checks that returns through bad iterators behave properly, +// for inner and outer bad iterators. +func TestReturns(t *testing.T) { + var result []int + var expect = []int{-1, 1, 2, -10} + var err any + + result, err = testReturn1(t) + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } + if err != nil { + t.Errorf("Unexpected error %v", err) + } + + result, err = testReturn2(t) + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } + if err == nil { + t.Errorf("Missing expected error") + } else { + t.Logf("Saw expected panic '%v'", err) + } + + result, err = testReturn3(t) + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } + if err == nil { + t.Errorf("Missing expected error") + } else { + t.Logf("Saw expected panic '%v'", err) + } + +} + +// testGotoA1 tests loop-nest-internal goto, no bad iterators. +func testGotoA1(t *testing.T) (result []int, err any) { + defer func() { + err = recover() + }() + for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) { + result = append(result, x) + if x == -4 { + break + } + defer func() { + result = append(result, x*10) + }() + for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) { + if y == 3 { + goto A + } + result = append(result, y) + } + result = append(result, x) + A: + } + return +} + +// testGotoA2 tests loop-nest-internal goto, outer bad iterator. +func testGotoA2(t *testing.T) (result []int, err any) { + defer func() { + err = recover() + }() + for _, x := range BadOfSliceIndex([]int{-1, -2, -3, -4, -5}) { + result = append(result, x) + if x == -4 { + break + } + defer func() { + result = append(result, x*10) + }() + for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) { + if y == 3 { + goto A + } + result = append(result, y) + } + result = append(result, x) + A: + } + return +} + +// testGotoA3 tests loop-nest-internal goto, inner bad iterator. +func testGotoA3(t *testing.T) (result []int, err any) { + defer func() { + err = recover() + }() + for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) { + result = append(result, x) + if x == -4 { + break + } + defer func() { + result = append(result, x*10) + }() + for _, y := range BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) { + if y == 3 { + goto A + } + result = append(result, y) + } + result = append(result, x) + A: + } + return +} + +func TestGotoA(t *testing.T) { + var result []int + var expect = []int{-1, 1, 2, -2, 1, 2, -3, 1, 2, -4, -30, -20, -10} + var expect3 = []int{-1, 1, 2, -10} // first goto becomes a panic + var err any + + result, err = testGotoA1(t) + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } + if err != nil { + t.Errorf("Unexpected error %v", err) + } + + result, err = testGotoA2(t) + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } + if err == nil { + t.Errorf("Missing expected error") + } else { + t.Logf("Saw expected panic '%v'", err) + } + + result, err = testGotoA3(t) + if !slices.Equal(expect3, result) { + t.Errorf("Expected %v, got %v", expect3, result) + } + if err == nil { + t.Errorf("Missing expected error") + } else { + t.Logf("Saw expected panic '%v'", err) + } +} + +// testGotoB1 tests loop-nest-exiting goto, no bad iterators. +func testGotoB1(t *testing.T) (result []int, err any) { + defer func() { + err = recover() + }() + for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) { + result = append(result, x) + if x == -4 { + break + } + defer func() { + result = append(result, x*10) + }() + for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) { + if y == 3 { + goto B + } + result = append(result, y) + } + result = append(result, x) + } +B: + result = append(result, 999) + return +} + +// testGotoB2 tests loop-nest-exiting goto, outer bad iterator. +func testGotoB2(t *testing.T) (result []int, err any) { + defer func() { + err = recover() + }() + for _, x := range BadOfSliceIndex([]int{-1, -2, -3, -4, -5}) { + result = append(result, x) + if x == -4 { + break + } + defer func() { + result = append(result, x*10) + }() + for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) { + if y == 3 { + goto B + } + result = append(result, y) + } + result = append(result, x) + } +B: + result = append(result, 999) + return +} + +// testGotoB3 tests loop-nest-exiting goto, inner bad iterator. +func testGotoB3(t *testing.T) (result []int, err any) { + defer func() { + err = recover() + }() + for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) { + result = append(result, x) + if x == -4 { + break + } + defer func() { + result = append(result, x*10) + }() + for _, y := range BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) { + if y == 3 { + goto B + } + result = append(result, y) + } + result = append(result, x) + } +B: + result = append(result, 999) + return +} + +func TestGotoB(t *testing.T) { + var result []int + var expect = []int{-1, 1, 2, 999, -10} + var expectX = []int{-1, 1, 2, -10} + var err any + + result, err = testGotoB1(t) + if !slices.Equal(expect, result) { + t.Errorf("Expected %v, got %v", expect, result) + } + if err != nil { + t.Errorf("Unexpected error %v", err) + } + + result, err = testGotoB2(t) + if !slices.Equal(expectX, result) { + t.Errorf("Expected %v, got %v", expectX, result) + } + if err == nil { + t.Errorf("Missing expected error") + } else { + t.Logf("Saw expected panic '%v'", err) + } + + result, err = testGotoB3(t) + if !slices.Equal(expectX, result) { + t.Errorf("Expected %v, got %v", expectX, result) + } + if err == nil { + t.Errorf("Missing expected error") + } else { + t.Logf("Saw expected panic '%v'", err) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/rangefunc/rewrite.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/rangefunc/rewrite.go new file mode 100644 index 0000000000000000000000000000000000000000..d439412ea869407bd67df86f59f4bd080fd102d0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/rangefunc/rewrite.go @@ -0,0 +1,1334 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package rangefunc rewrites range-over-func to code that doesn't use range-over-funcs. +Rewriting the construct in the front end, before noder, means the functions generated during +the rewrite are available in a noder-generated representation for inlining by the back end. + +# Theory of Operation + +The basic idea is to rewrite + + for x := range f { + ... + } + +into + + f(func(x T) bool { + ... + }) + +But it's not usually that easy. + +# Range variables + +For a range not using :=, the assigned variables cannot be function parameters +in the generated body function. Instead, we allocate fake parameters and +start the body with an assignment. For example: + + for expr1, expr2 = range f { + ... + } + +becomes + + f(func(#p1 T1, #p2 T2) bool { + expr1, expr2 = #p1, #p2 + ... + }) + +(All the generated variables have a # at the start to signal that they +are internal variables when looking at the generated code in a +debugger. Because variables have all been resolved to the specific +objects they represent, there is no danger of using plain "p1" and +colliding with a Go variable named "p1"; the # is just nice to have, +not for correctness.) + +It can also happen that there are fewer range variables than function +arguments, in which case we end up with something like + + f(func(x T1, _ T2) bool { + ... + }) + +or + + f(func(#p1 T1, #p2 T2, _ T3) bool { + expr1, expr2 = #p1, #p2 + ... + }) + +# Return + +If the body contains a "break", that break turns into "return false", +to tell f to stop. And if the body contains a "continue", that turns +into "return true", to tell f to proceed with the next value. +Those are the easy cases. + +If the body contains a return or a break/continue/goto L, then we need +to rewrite that into code that breaks out of the loop and then +triggers that control flow. In general we rewrite + + for x := range f { + ... + } + +into + + { + var #next int + f(func(x T1) bool { + ... + return true + }) + ... check #next ... + } + +The variable #next is an integer code that says what to do when f +returns. Each difficult statement sets #next and then returns false to +stop f. + +A plain "return" rewrites to {#next = -1; return false}. +The return false breaks the loop. Then when f returns, the "check +#next" section includes + + if #next == -1 { return } + +which causes the return we want. + +Return with arguments is more involved. We need somewhere to store the +arguments while we break out of f, so we add them to the var +declaration, like: + + { + var ( + #next int + #r1 type1 + #r2 type2 + ) + f(func(x T1) bool { + ... + { + // return a, b + #r1, #r2 = a, b + #next = -2 + return false + } + ... + return true + }) + if #next == -2 { return #r1, #r2 } + } + +TODO: What about: + + func f() (x bool) { + for range g(&x) { + return true + } + } + + func g(p *bool) func(func() bool) { + return func(yield func() bool) { + yield() + // Is *p true or false here? + } + } + +With this rewrite the "return true" is not visible after yield returns, +but maybe it should be? + +# Checking + +To permit checking that an iterator is well-behaved -- that is, that +it does not call the loop body again after it has returned false or +after the entire loop has exited (it might retain a copy of the body +function, or pass it to another goroutine) -- each generated loop has +its own #exitK flag that is checked before each iteration, and set both +at any early exit and after the iteration completes. + +For example: + + for x := range f { + ... + if ... { break } + ... + } + +becomes + + { + var #exit1 bool + f(func(x T1) bool { + if #exit1 { runtime.panicrangeexit() } + ... + if ... { #exit1 = true ; return false } + ... + return true + }) + #exit1 = true + } + +# Nested Loops + +So far we've only considered a single loop. If a function contains a +sequence of loops, each can be translated individually. But loops can +be nested. It would work to translate the innermost loop and then +translate the loop around it, and so on, except that there'd be a lot +of rewriting of rewritten code and the overall traversals could end up +taking time quadratic in the depth of the nesting. To avoid all that, +we use a single rewriting pass that handles a top-most range-over-func +loop and all the range-over-func loops it contains at the same time. + +If we need to return from inside a doubly-nested loop, the rewrites +above stay the same, but the check after the inner loop only says + + if #next < 0 { return false } + +to stop the outer loop so it can do the actual return. That is, + + for range f { + for range g { + ... + return a, b + ... + } + } + +becomes + + { + var ( + #next int + #r1 type1 + #r2 type2 + ) + var #exit1 bool + f(func() { + if #exit1 { runtime.panicrangeexit() } + var #exit2 bool + g(func() { + if #exit2 { runtime.panicrangeexit() } + ... + { + // return a, b + #r1, #r2 = a, b + #next = -2 + #exit1, #exit2 = true, true + return false + } + ... + return true + }) + #exit2 = true + if #next < 0 { + return false + } + return true + }) + #exit1 = true + if #next == -2 { + return #r1, #r2 + } + } + +Note that the #next < 0 after the inner loop handles both kinds of +return with a single check. + +# Labeled break/continue of range-over-func loops + +For a labeled break or continue of an outer range-over-func, we +use positive #next values. Any such labeled break or continue +really means "do N breaks" or "do N breaks and 1 continue". +We encode that as perLoopStep*N or perLoopStep*N+1 respectively. + +Loops that might need to propagate a labeled break or continue +add one or both of these to the #next checks: + + if #next >= 2 { + #next -= 2 + return false + } + + if #next == 1 { + #next = 0 + return true + } + +For example + + F: for range f { + for range g { + for range h { + ... + break F + ... + ... + continue F + ... + } + } + ... + } + +becomes + + { + var #next int + var #exit1 bool + f(func() { + if #exit1 { runtime.panicrangeexit() } + var #exit2 bool + g(func() { + if #exit2 { runtime.panicrangeexit() } + var #exit3 bool + h(func() { + if #exit3 { runtime.panicrangeexit() } + ... + { + // break F + #next = 4 + #exit1, #exit2, #exit3 = true, true, true + return false + } + ... + { + // continue F + #next = 3 + #exit2, #exit3 = true, true + return false + } + ... + return true + }) + #exit3 = true + if #next >= 2 { + #next -= 2 + return false + } + return true + }) + #exit2 = true + if #next >= 2 { + #next -= 2 + return false + } + if #next == 1 { + #next = 0 + return true + } + ... + return true + }) + #exit1 = true + } + +Note that the post-h checks only consider a break, +since no generated code tries to continue g. + +# Gotos and other labeled break/continue + +The final control flow translations are goto and break/continue of a +non-range-over-func statement. In both cases, we may need to break out +of one or more range-over-func loops before we can do the actual +control flow statement. Each such break/continue/goto L statement is +assigned a unique negative #next value (below -2, since -1 and -2 are +for the two kinds of return). Then the post-checks for a given loop +test for the specific codes that refer to labels directly targetable +from that block. Otherwise, the generic + + if #next < 0 { return false } + +check handles stopping the next loop to get one step closer to the label. + +For example + + Top: print("start\n") + for range f { + for range g { + ... + for range h { + ... + goto Top + ... + } + } + } + +becomes + + Top: print("start\n") + { + var #next int + var #exit1 bool + f(func() { + if #exit1 { runtime.panicrangeexit() } + var #exit2 bool + g(func() { + if #exit2 { runtime.panicrangeexit() } + ... + var #exit3 bool + h(func() { + if #exit3 { runtime.panicrangeexit() } + ... + { + // goto Top + #next = -3 + #exit1, #exit2, #exit3 = true, true, true + return false + } + ... + return true + }) + #exit3 = true + if #next < 0 { + return false + } + return true + }) + #exit2 = true + if #next < 0 { + return false + } + return true + }) + #exit1 = true + if #next == -3 { + #next = 0 + goto Top + } + } + +Labeled break/continue to non-range-over-funcs are handled the same +way as goto. + +# Defers + +The last wrinkle is handling defer statements. If we have + + for range f { + defer print("A") + } + +we cannot rewrite that into + + f(func() { + defer print("A") + }) + +because the deferred code will run at the end of the iteration, not +the end of the containing function. To fix that, the runtime provides +a special hook that lets us obtain a defer "token" representing the +outer function and then use it in a later defer to attach the deferred +code to that outer function. + +Normally, + + defer print("A") + +compiles to + + runtime.deferproc(func() { print("A") }) + +This changes in a range-over-func. For example: + + for range f { + defer print("A") + } + +compiles to + + var #defers = runtime.deferrangefunc() + f(func() { + runtime.deferprocat(func() { print("A") }, #defers) + }) + +For this rewriting phase, we insert the explicit initialization of +#defers and then attach the #defers variable to the CallStmt +representing the defer. That variable will be propagated to the +backend and will cause the backend to compile the defer using +deferprocat instead of an ordinary deferproc. + +TODO: Could call runtime.deferrangefuncend after f. +*/ +package rangefunc + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/syntax" + "cmd/compile/internal/types2" + "fmt" + "go/constant" + "os" +) + +// nopos is the zero syntax.Pos. +var nopos syntax.Pos + +// A rewriter implements rewriting the range-over-funcs in a given function. +type rewriter struct { + pkg *types2.Package + info *types2.Info + outer *syntax.FuncType + body *syntax.BlockStmt + + // References to important types and values. + any types2.Object + bool types2.Object + int types2.Object + true types2.Object + false types2.Object + + // Branch numbering, computed as needed. + branchNext map[branch]int // branch -> #next value + labelLoop map[string]*syntax.ForStmt // label -> innermost rangefunc loop it is declared inside (nil for no loop) + + // Stack of nodes being visited. + stack []syntax.Node // all nodes + forStack []*forLoop // range-over-func loops + + rewritten map[*syntax.ForStmt]syntax.Stmt + + // Declared variables in generated code for outermost loop. + declStmt *syntax.DeclStmt + nextVar types2.Object + retVars []types2.Object + defers types2.Object + exitVarCount int // exitvars are referenced from their respective loops +} + +// A branch is a single labeled branch. +type branch struct { + tok syntax.Token + label string +} + +// A forLoop describes a single range-over-func loop being processed. +type forLoop struct { + nfor *syntax.ForStmt // actual syntax + exitFlag *types2.Var // #exit variable for this loop + exitFlagDecl *syntax.VarDecl + + checkRet bool // add check for "return" after loop + checkRetArgs bool // add check for "return args" after loop + checkBreak bool // add check for "break" after loop + checkContinue bool // add check for "continue" after loop + checkBranch []branch // add check for labeled branch after loop +} + +// Rewrite rewrites all the range-over-funcs in the files. +func Rewrite(pkg *types2.Package, info *types2.Info, files []*syntax.File) { + for _, file := range files { + syntax.Inspect(file, func(n syntax.Node) bool { + switch n := n.(type) { + case *syntax.FuncDecl: + rewriteFunc(pkg, info, n.Type, n.Body) + return false + case *syntax.FuncLit: + rewriteFunc(pkg, info, n.Type, n.Body) + return false + } + return true + }) + } +} + +// rewriteFunc rewrites all the range-over-funcs in a single function (a top-level func or a func literal). +// The typ and body are the function's type and body. +func rewriteFunc(pkg *types2.Package, info *types2.Info, typ *syntax.FuncType, body *syntax.BlockStmt) { + if body == nil { + return + } + r := &rewriter{ + pkg: pkg, + info: info, + outer: typ, + body: body, + } + syntax.Inspect(body, r.inspect) + if (base.Flag.W != 0) && r.forStack != nil { + syntax.Fdump(os.Stderr, body) + } +} + +// checkFuncMisuse reports whether to check for misuse of iterator callbacks functions. +func (r *rewriter) checkFuncMisuse() bool { + return base.Debug.RangeFuncCheck != 0 +} + +// inspect is a callback for syntax.Inspect that drives the actual rewriting. +// If it sees a func literal, it kicks off a separate rewrite for that literal. +// Otherwise, it maintains a stack of range-over-func loops and +// converts each in turn. +func (r *rewriter) inspect(n syntax.Node) bool { + switch n := n.(type) { + case *syntax.FuncLit: + rewriteFunc(r.pkg, r.info, n.Type, n.Body) + return false + + default: + // Push n onto stack. + r.stack = append(r.stack, n) + if nfor, ok := forRangeFunc(n); ok { + loop := &forLoop{nfor: nfor} + r.forStack = append(r.forStack, loop) + r.startLoop(loop) + } + + case nil: + // n == nil signals that we are done visiting + // the top-of-stack node's children. Find it. + n = r.stack[len(r.stack)-1] + + // If we are inside a range-over-func, + // take this moment to replace any break/continue/goto/return + // statements directly contained in this node. + // Also replace any converted for statements + // with the rewritten block. + switch n := n.(type) { + case *syntax.BlockStmt: + for i, s := range n.List { + n.List[i] = r.editStmt(s) + } + case *syntax.CaseClause: + for i, s := range n.Body { + n.Body[i] = r.editStmt(s) + } + case *syntax.CommClause: + for i, s := range n.Body { + n.Body[i] = r.editStmt(s) + } + case *syntax.LabeledStmt: + n.Stmt = r.editStmt(n.Stmt) + } + + // Pop n. + if len(r.forStack) > 0 && r.stack[len(r.stack)-1] == r.forStack[len(r.forStack)-1].nfor { + r.endLoop(r.forStack[len(r.forStack)-1]) + r.forStack = r.forStack[:len(r.forStack)-1] + } + r.stack = r.stack[:len(r.stack)-1] + } + return true +} + +// startLoop sets up for converting a range-over-func loop. +func (r *rewriter) startLoop(loop *forLoop) { + // For first loop in function, allocate syntax for any, bool, int, true, and false. + if r.any == nil { + r.any = types2.Universe.Lookup("any") + r.bool = types2.Universe.Lookup("bool") + r.int = types2.Universe.Lookup("int") + r.true = types2.Universe.Lookup("true") + r.false = types2.Universe.Lookup("false") + r.rewritten = make(map[*syntax.ForStmt]syntax.Stmt) + } + if r.checkFuncMisuse() { + // declare the exit flag for this loop's body + loop.exitFlag, loop.exitFlagDecl = r.exitVar(loop.nfor.Pos()) + } +} + +// editStmt returns the replacement for the statement x, +// or x itself if it should be left alone. +// This includes the for loops we are converting, +// as left in x.rewritten by r.endLoop. +func (r *rewriter) editStmt(x syntax.Stmt) syntax.Stmt { + if x, ok := x.(*syntax.ForStmt); ok { + if s := r.rewritten[x]; s != nil { + return s + } + } + + if len(r.forStack) > 0 { + switch x := x.(type) { + case *syntax.BranchStmt: + return r.editBranch(x) + case *syntax.CallStmt: + if x.Tok == syntax.Defer { + return r.editDefer(x) + } + case *syntax.ReturnStmt: + return r.editReturn(x) + } + } + + return x +} + +// editDefer returns the replacement for the defer statement x. +// See the "Defers" section in the package doc comment above for more context. +func (r *rewriter) editDefer(x *syntax.CallStmt) syntax.Stmt { + if r.defers == nil { + // Declare and initialize the #defers token. + init := &syntax.CallExpr{ + Fun: runtimeSym(r.info, "deferrangefunc"), + } + tv := syntax.TypeAndValue{Type: r.any.Type()} + tv.SetIsValue() + init.SetTypeInfo(tv) + r.defers = r.declVar("#defers", r.any.Type(), init) + } + + // Attach the token as an "extra" argument to the defer. + x.DeferAt = r.useVar(r.defers) + setPos(x.DeferAt, x.Pos()) + return x +} + +func (r *rewriter) exitVar(pos syntax.Pos) (*types2.Var, *syntax.VarDecl) { + r.exitVarCount++ + + name := fmt.Sprintf("#exit%d", r.exitVarCount) + typ := r.bool.Type() + obj := types2.NewVar(pos, r.pkg, name, typ) + n := syntax.NewName(pos, name) + setValueType(n, typ) + r.info.Defs[n] = obj + + return obj, &syntax.VarDecl{NameList: []*syntax.Name{n}} +} + +// editReturn returns the replacement for the return statement x. +// See the "Return" section in the package doc comment above for more context. +func (r *rewriter) editReturn(x *syntax.ReturnStmt) syntax.Stmt { + // #next = -1 is return with no arguments; -2 is return with arguments. + var next int + if x.Results == nil { + next = -1 + r.forStack[0].checkRet = true + } else { + next = -2 + r.forStack[0].checkRetArgs = true + } + + // Tell the loops along the way to check for a return. + for _, loop := range r.forStack[1:] { + loop.checkRet = true + } + + // Assign results, set #next, and return false. + bl := &syntax.BlockStmt{} + if x.Results != nil { + if r.retVars == nil { + for i, a := range r.outer.ResultList { + obj := r.declVar(fmt.Sprintf("#r%d", i+1), a.Type.GetTypeInfo().Type, nil) + r.retVars = append(r.retVars, obj) + } + } + bl.List = append(bl.List, &syntax.AssignStmt{Lhs: r.useList(r.retVars), Rhs: x.Results}) + } + bl.List = append(bl.List, &syntax.AssignStmt{Lhs: r.next(), Rhs: r.intConst(next)}) + if r.checkFuncMisuse() { + // mark all enclosing loop bodies as exited + for i := 0; i < len(r.forStack); i++ { + bl.List = append(bl.List, r.setExitedAt(i)) + } + } + bl.List = append(bl.List, &syntax.ReturnStmt{Results: r.useVar(r.false)}) + setPos(bl, x.Pos()) + return bl +} + +// perLoopStep is part of the encoding of loop-spanning control flow +// for function range iterators. Each multiple of two encodes a "return false" +// passing control to an enclosing iterator; a terminal value of 1 encodes +// "return true" (i.e., local continue) from the body function, and a terminal +// value of 0 encodes executing the remainder of the body function. +const perLoopStep = 2 + +// editBranch returns the replacement for the branch statement x, +// or x itself if it should be left alone. +// See the package doc comment above for more context. +func (r *rewriter) editBranch(x *syntax.BranchStmt) syntax.Stmt { + if x.Tok == syntax.Fallthrough { + // Fallthrough is unaffected by the rewrite. + return x + } + + // Find target of break/continue/goto in r.forStack. + // (The target may not be in r.forStack at all.) + targ := x.Target + i := len(r.forStack) - 1 + if x.Label == nil && r.forStack[i].nfor != targ { + // Unlabeled break or continue that's not nfor must be inside nfor. Leave alone. + return x + } + for i >= 0 && r.forStack[i].nfor != targ { + i-- + } + // exitFrom is the index of the loop interior to the target of the control flow, + // if such a loop exists (it does not if i == len(r.forStack) - 1) + exitFrom := i + 1 + + // Compute the value to assign to #next and the specific return to use. + var next int + var ret *syntax.ReturnStmt + if x.Tok == syntax.Goto || i < 0 { + // goto Label + // or break/continue of labeled non-range-over-func loop. + // We may be able to leave it alone, or we may have to break + // out of one or more nested loops and then use #next to signal + // to complete the break/continue/goto. + // Figure out which range-over-func loop contains the label. + r.computeBranchNext() + nfor := r.forStack[len(r.forStack)-1].nfor + label := x.Label.Value + targ := r.labelLoop[label] + if nfor == targ { + // Label is in the innermost range-over-func loop; use it directly. + return x + } + + // Set #next to the code meaning break/continue/goto label. + next = r.branchNext[branch{x.Tok, label}] + + // Break out of nested loops up to targ. + i := len(r.forStack) - 1 + for i >= 0 && r.forStack[i].nfor != targ { + i-- + } + exitFrom = i + 1 + + // Mark loop we exit to get to targ to check for that branch. + // When i==-1 that's the outermost func body + top := r.forStack[i+1] + top.checkBranch = append(top.checkBranch, branch{x.Tok, label}) + + // Mark loops along the way to check for a plain return, so they break. + for j := i + 2; j < len(r.forStack); j++ { + r.forStack[j].checkRet = true + } + + // In the innermost loop, use a plain "return false". + ret = &syntax.ReturnStmt{Results: r.useVar(r.false)} + } else { + // break/continue of labeled range-over-func loop. + depth := len(r.forStack) - 1 - i + + // For continue of innermost loop, use "return true". + // Otherwise we are breaking the innermost loop, so "return false". + + if depth == 0 && x.Tok == syntax.Continue { + ret = &syntax.ReturnStmt{Results: r.useVar(r.true)} + setPos(ret, x.Pos()) + return ret + } + ret = &syntax.ReturnStmt{Results: r.useVar(r.false)} + + // If this is a simple break, mark this loop as exited and return false. + // No adjustments to #next. + if depth == 0 { + var stmts []syntax.Stmt + if r.checkFuncMisuse() { + stmts = []syntax.Stmt{r.setExited(), ret} + } else { + stmts = []syntax.Stmt{ret} + } + bl := &syntax.BlockStmt{ + List: stmts, + } + setPos(bl, x.Pos()) + return bl + } + + // The loop inside the one we are break/continue-ing + // needs to make that happen when we break out of it. + if x.Tok == syntax.Continue { + r.forStack[exitFrom].checkContinue = true + } else { + exitFrom = i + r.forStack[exitFrom].checkBreak = true + } + + // The loops along the way just need to break. + for j := exitFrom + 1; j < len(r.forStack); j++ { + r.forStack[j].checkBreak = true + } + + // Set next to break the appropriate number of times; + // the final time may be a continue, not a break. + next = perLoopStep * depth + if x.Tok == syntax.Continue { + next-- + } + } + + // Assign #next = next and do the return. + as := &syntax.AssignStmt{Lhs: r.next(), Rhs: r.intConst(next)} + bl := &syntax.BlockStmt{ + List: []syntax.Stmt{as}, + } + + if r.checkFuncMisuse() { + // Set #exitK for this loop and those exited by the control flow. + for i := exitFrom; i < len(r.forStack); i++ { + bl.List = append(bl.List, r.setExitedAt(i)) + } + } + + bl.List = append(bl.List, ret) + setPos(bl, x.Pos()) + return bl +} + +// computeBranchNext computes the branchNext numbering +// and determines which labels end up inside which range-over-func loop bodies. +func (r *rewriter) computeBranchNext() { + if r.labelLoop != nil { + return + } + + r.labelLoop = make(map[string]*syntax.ForStmt) + r.branchNext = make(map[branch]int) + + var labels []string + var stack []syntax.Node + var forStack []*syntax.ForStmt + forStack = append(forStack, nil) + syntax.Inspect(r.body, func(n syntax.Node) bool { + if n != nil { + stack = append(stack, n) + if nfor, ok := forRangeFunc(n); ok { + forStack = append(forStack, nfor) + } + if n, ok := n.(*syntax.LabeledStmt); ok { + l := n.Label.Value + labels = append(labels, l) + f := forStack[len(forStack)-1] + r.labelLoop[l] = f + } + } else { + n := stack[len(stack)-1] + stack = stack[:len(stack)-1] + if n == forStack[len(forStack)-1] { + forStack = forStack[:len(forStack)-1] + } + } + return true + }) + + // Assign numbers to all the labels we observed. + used := -2 + for _, l := range labels { + used -= 3 + r.branchNext[branch{syntax.Break, l}] = used + r.branchNext[branch{syntax.Continue, l}] = used + 1 + r.branchNext[branch{syntax.Goto, l}] = used + 2 + } +} + +// endLoop finishes the conversion of a range-over-func loop. +// We have inspected and rewritten the body of the loop and can now +// construct the body function and rewrite the for loop into a call +// bracketed by any declarations and checks it requires. +func (r *rewriter) endLoop(loop *forLoop) { + // Pick apart for range X { ... } + nfor := loop.nfor + start, end := nfor.Pos(), nfor.Body.Rbrace // start, end position of for loop + rclause := nfor.Init.(*syntax.RangeClause) + rfunc := types2.CoreType(rclause.X.GetTypeInfo().Type).(*types2.Signature) // type of X - func(func(...)bool) + if rfunc.Params().Len() != 1 { + base.Fatalf("invalid typecheck of range func") + } + ftyp := types2.CoreType(rfunc.Params().At(0).Type()).(*types2.Signature) // func(...) bool + if ftyp.Results().Len() != 1 { + base.Fatalf("invalid typecheck of range func") + } + + // Build X(bodyFunc) + call := &syntax.ExprStmt{ + X: &syntax.CallExpr{ + Fun: rclause.X, + ArgList: []syntax.Expr{ + r.bodyFunc(nfor.Body.List, syntax.UnpackListExpr(rclause.Lhs), rclause.Def, ftyp, start, end), + }, + }, + } + setPos(call, start) + + // Build checks based on #next after X(bodyFunc) + checks := r.checks(loop, end) + + // Rewrite for vars := range X { ... } to + // + // { + // r.declStmt + // call + // checks + // } + // + // The r.declStmt can be added to by this loop or any inner loop + // during the creation of r.bodyFunc; it is only emitted in the outermost + // converted range loop. + block := &syntax.BlockStmt{Rbrace: end} + setPos(block, start) + if len(r.forStack) == 1 && r.declStmt != nil { + setPos(r.declStmt, start) + block.List = append(block.List, r.declStmt) + } + + // declare the exitFlag here so it has proper scope and zeroing + if r.checkFuncMisuse() { + exitFlagDecl := &syntax.DeclStmt{DeclList: []syntax.Decl{loop.exitFlagDecl}} + block.List = append(block.List, exitFlagDecl) + } + + // iteratorFunc(bodyFunc) + block.List = append(block.List, call) + + if r.checkFuncMisuse() { + // iteratorFunc has exited, mark the exit flag for the body + block.List = append(block.List, r.setExited()) + } + block.List = append(block.List, checks...) + + if len(r.forStack) == 1 { // ending an outermost loop + r.declStmt = nil + r.nextVar = nil + r.retVars = nil + r.defers = nil + } + + r.rewritten[nfor] = block +} + +func (r *rewriter) setExited() *syntax.AssignStmt { + return r.setExitedAt(len(r.forStack) - 1) +} + +func (r *rewriter) setExitedAt(index int) *syntax.AssignStmt { + loop := r.forStack[index] + return &syntax.AssignStmt{ + Lhs: r.useVar(loop.exitFlag), + Rhs: r.useVar(r.true), + } +} + +// bodyFunc converts the loop body (control flow has already been updated) +// to a func literal that can be passed to the range function. +// +// vars is the range variables from the range statement. +// def indicates whether this is a := range statement. +// ftyp is the type of the function we are creating +// start and end are the syntax positions to use for new nodes +// that should be at the start or end of the loop. +func (r *rewriter) bodyFunc(body []syntax.Stmt, lhs []syntax.Expr, def bool, ftyp *types2.Signature, start, end syntax.Pos) *syntax.FuncLit { + // Starting X(bodyFunc); build up bodyFunc first. + var params, results []*types2.Var + results = append(results, types2.NewVar(start, nil, "", r.bool.Type())) + bodyFunc := &syntax.FuncLit{ + // Note: Type is ignored but needs to be non-nil to avoid panic in syntax.Inspect. + Type: &syntax.FuncType{}, + Body: &syntax.BlockStmt{ + List: []syntax.Stmt{}, + Rbrace: end, + }, + } + setPos(bodyFunc, start) + + for i := 0; i < ftyp.Params().Len(); i++ { + typ := ftyp.Params().At(i).Type() + var paramVar *types2.Var + if i < len(lhs) && def { + // Reuse range variable as parameter. + x := lhs[i] + paramVar = r.info.Defs[x.(*syntax.Name)].(*types2.Var) + } else { + // Declare new parameter and assign it to range expression. + paramVar = types2.NewVar(start, r.pkg, fmt.Sprintf("#p%d", 1+i), typ) + if i < len(lhs) { + x := lhs[i] + as := &syntax.AssignStmt{Lhs: x, Rhs: r.useVar(paramVar)} + as.SetPos(x.Pos()) + setPos(as.Rhs, x.Pos()) + bodyFunc.Body.List = append(bodyFunc.Body.List, as) + } + } + params = append(params, paramVar) + } + + tv := syntax.TypeAndValue{ + Type: types2.NewSignatureType(nil, nil, nil, + types2.NewTuple(params...), + types2.NewTuple(results...), + false), + } + tv.SetIsValue() + bodyFunc.SetTypeInfo(tv) + + loop := r.forStack[len(r.forStack)-1] + + if r.checkFuncMisuse() { + bodyFunc.Body.List = append(bodyFunc.Body.List, r.assertNotExited(start, loop)) + } + + // Original loop body (already rewritten by editStmt during inspect). + bodyFunc.Body.List = append(bodyFunc.Body.List, body...) + + // return true to continue at end of loop body + ret := &syntax.ReturnStmt{Results: r.useVar(r.true)} + ret.SetPos(end) + bodyFunc.Body.List = append(bodyFunc.Body.List, ret) + + return bodyFunc +} + +// checks returns the post-call checks that need to be done for the given loop. +func (r *rewriter) checks(loop *forLoop, pos syntax.Pos) []syntax.Stmt { + var list []syntax.Stmt + if len(loop.checkBranch) > 0 { + did := make(map[branch]bool) + for _, br := range loop.checkBranch { + if did[br] { + continue + } + did[br] = true + doBranch := &syntax.BranchStmt{Tok: br.tok, Label: &syntax.Name{Value: br.label}} + list = append(list, r.ifNext(syntax.Eql, r.branchNext[br], doBranch)) + } + } + if len(r.forStack) == 1 { + if loop.checkRetArgs { + list = append(list, r.ifNext(syntax.Eql, -2, retStmt(r.useList(r.retVars)))) + } + if loop.checkRet { + list = append(list, r.ifNext(syntax.Eql, -1, retStmt(nil))) + } + } else { + if loop.checkRetArgs || loop.checkRet { + // Note: next < 0 also handles gotos handled by outer loops. + // We set checkRet in that case to trigger this check. + list = append(list, r.ifNext(syntax.Lss, 0, retStmt(r.useVar(r.false)))) + } + if loop.checkBreak { + list = append(list, r.ifNext(syntax.Geq, perLoopStep, retStmt(r.useVar(r.false)))) + } + if loop.checkContinue { + list = append(list, r.ifNext(syntax.Eql, perLoopStep-1, retStmt(r.useVar(r.true)))) + } + } + + for _, j := range list { + setPos(j, pos) + } + return list +} + +// retStmt returns a return statement returning the given return values. +func retStmt(results syntax.Expr) *syntax.ReturnStmt { + return &syntax.ReturnStmt{Results: results} +} + +// ifNext returns the statement: +// +// if #next op c { adjust; then } +// +// When op is >=, adjust is #next -= c. +// When op is == and c is not -1 or -2, adjust is #next = 0. +// Otherwise adjust is omitted. +func (r *rewriter) ifNext(op syntax.Operator, c int, then syntax.Stmt) syntax.Stmt { + nif := &syntax.IfStmt{ + Cond: &syntax.Operation{Op: op, X: r.next(), Y: r.intConst(c)}, + Then: &syntax.BlockStmt{ + List: []syntax.Stmt{then}, + }, + } + tv := syntax.TypeAndValue{Type: r.bool.Type()} + tv.SetIsValue() + nif.Cond.SetTypeInfo(tv) + + if op == syntax.Geq { + sub := &syntax.AssignStmt{ + Op: syntax.Sub, + Lhs: r.next(), + Rhs: r.intConst(c), + } + nif.Then.List = []syntax.Stmt{sub, then} + } + if op == syntax.Eql && c != -1 && c != -2 { + clr := &syntax.AssignStmt{ + Lhs: r.next(), + Rhs: r.intConst(0), + } + nif.Then.List = []syntax.Stmt{clr, then} + } + + return nif +} + +// setValueType marks x as a value with type typ. +func setValueType(x syntax.Expr, typ syntax.Type) { + tv := syntax.TypeAndValue{Type: typ} + tv.SetIsValue() + x.SetTypeInfo(tv) +} + +// assertNotExited returns the statement: +// +// if #exitK { runtime.panicrangeexit() } +// +// where #exitK is the exit guard for loop. +func (r *rewriter) assertNotExited(start syntax.Pos, loop *forLoop) syntax.Stmt { + callPanicExpr := &syntax.CallExpr{ + Fun: runtimeSym(r.info, "panicrangeexit"), + } + setValueType(callPanicExpr, nil) // no result type + + callPanic := &syntax.ExprStmt{X: callPanicExpr} + + nif := &syntax.IfStmt{ + Cond: r.useVar(loop.exitFlag), + Then: &syntax.BlockStmt{ + List: []syntax.Stmt{callPanic}, + }, + } + setPos(nif, start) + return nif +} + +// next returns a reference to the #next variable. +func (r *rewriter) next() *syntax.Name { + if r.nextVar == nil { + r.nextVar = r.declVar("#next", r.int.Type(), nil) + } + return r.useVar(r.nextVar) +} + +// forRangeFunc checks whether n is a range-over-func. +// If so, it returns n.(*syntax.ForStmt), true. +// Otherwise it returns nil, false. +func forRangeFunc(n syntax.Node) (*syntax.ForStmt, bool) { + nfor, ok := n.(*syntax.ForStmt) + if !ok { + return nil, false + } + nrange, ok := nfor.Init.(*syntax.RangeClause) + if !ok { + return nil, false + } + _, ok = types2.CoreType(nrange.X.GetTypeInfo().Type).(*types2.Signature) + if !ok { + return nil, false + } + return nfor, true +} + +// intConst returns syntax for an integer literal with the given value. +func (r *rewriter) intConst(c int) *syntax.BasicLit { + lit := &syntax.BasicLit{ + Value: fmt.Sprint(c), + Kind: syntax.IntLit, + } + tv := syntax.TypeAndValue{Type: r.int.Type(), Value: constant.MakeInt64(int64(c))} + tv.SetIsValue() + lit.SetTypeInfo(tv) + return lit +} + +// useVar returns syntax for a reference to decl, which should be its declaration. +func (r *rewriter) useVar(obj types2.Object) *syntax.Name { + n := syntax.NewName(nopos, obj.Name()) + tv := syntax.TypeAndValue{Type: obj.Type()} + tv.SetIsValue() + n.SetTypeInfo(tv) + r.info.Uses[n] = obj + return n +} + +// useList is useVar for a list of decls. +func (r *rewriter) useList(vars []types2.Object) syntax.Expr { + var new []syntax.Expr + for _, obj := range vars { + new = append(new, r.useVar(obj)) + } + if len(new) == 1 { + return new[0] + } + return &syntax.ListExpr{ElemList: new} +} + +// declVar declares a variable with a given name type and initializer value. +func (r *rewriter) declVar(name string, typ types2.Type, init syntax.Expr) *types2.Var { + if r.declStmt == nil { + r.declStmt = &syntax.DeclStmt{} + } + stmt := r.declStmt + obj := types2.NewVar(stmt.Pos(), r.pkg, name, typ) + n := syntax.NewName(stmt.Pos(), name) + tv := syntax.TypeAndValue{Type: typ} + tv.SetIsValue() + n.SetTypeInfo(tv) + r.info.Defs[n] = obj + stmt.DeclList = append(stmt.DeclList, &syntax.VarDecl{ + NameList: []*syntax.Name{n}, + // Note: Type is ignored + Values: init, + }) + return obj +} + +// declType declares a type with the given name and type. +// This is more like "type name = typ" than "type name typ". +func declType(pos syntax.Pos, name string, typ types2.Type) *syntax.Name { + n := syntax.NewName(pos, name) + n.SetTypeInfo(syntax.TypeAndValue{Type: typ}) + return n +} + +// runtimePkg is a fake runtime package that contains what we need to refer to in package runtime. +var runtimePkg = func() *types2.Package { + var nopos syntax.Pos + pkg := types2.NewPackage("runtime", "runtime") + anyType := types2.Universe.Lookup("any").Type() + + // func deferrangefunc() unsafe.Pointer + obj := types2.NewFunc(nopos, pkg, "deferrangefunc", types2.NewSignatureType(nil, nil, nil, nil, types2.NewTuple(types2.NewParam(nopos, pkg, "extra", anyType)), false)) + pkg.Scope().Insert(obj) + + // func panicrangeexit() + obj = types2.NewFunc(nopos, pkg, "panicrangeexit", types2.NewSignatureType(nil, nil, nil, nil, nil, false)) + pkg.Scope().Insert(obj) + + return pkg +}() + +// runtimeSym returns a reference to a symbol in the fake runtime package. +func runtimeSym(info *types2.Info, name string) *syntax.Name { + obj := runtimePkg.Scope().Lookup(name) + n := syntax.NewName(nopos, "runtime."+name) + tv := syntax.TypeAndValue{Type: obj.Type()} + tv.SetIsValue() + tv.SetIsRuntimeHelper() + n.SetTypeInfo(tv) + info.Uses[n] = obj + return n +} + +// setPos walks the top structure of x that has no position assigned +// and assigns it all to have position pos. +// When setPos encounters a syntax node with a position assigned, +// setPos does not look inside that node. +// setPos only needs to handle syntax we create in this package; +// all other syntax should have positions assigned already. +func setPos(x syntax.Node, pos syntax.Pos) { + if x == nil { + return + } + syntax.Inspect(x, func(n syntax.Node) bool { + if n == nil || n.Pos() != nopos { + return false + } + n.SetPos(pos) + switch n := n.(type) { + case *syntax.BlockStmt: + if n.Rbrace == nopos { + n.Rbrace = pos + } + } + return true + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/reflectdata/alg.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/reflectdata/alg.go new file mode 100644 index 0000000000000000000000000000000000000000..a0f552215354eb4ddf98e2d056af74178432c119 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/reflectdata/alg.go @@ -0,0 +1,667 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package reflectdata + +import ( + "fmt" + + "cmd/compile/internal/base" + "cmd/compile/internal/compare" + "cmd/compile/internal/ir" + "cmd/compile/internal/objw" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/src" +) + +// AlgType returns the fixed-width AMEMxx variants instead of the general +// AMEM kind when possible. +func AlgType(t *types.Type) types.AlgKind { + a, _ := types.AlgType(t) + if a == types.AMEM { + if t.Alignment() < int64(base.Ctxt.Arch.Alignment) && t.Alignment() < t.Size() { + // For example, we can't treat [2]int16 as an int32 if int32s require + // 4-byte alignment. See issue 46283. + return a + } + switch t.Size() { + case 0: + return types.AMEM0 + case 1: + return types.AMEM8 + case 2: + return types.AMEM16 + case 4: + return types.AMEM32 + case 8: + return types.AMEM64 + case 16: + return types.AMEM128 + } + } + + return a +} + +// genhash returns a symbol which is the closure used to compute +// the hash of a value of type t. +// Note: the generated function must match runtime.typehash exactly. +func genhash(t *types.Type) *obj.LSym { + switch AlgType(t) { + default: + // genhash is only called for types that have equality + base.Fatalf("genhash %v", t) + case types.AMEM0: + return sysClosure("memhash0") + case types.AMEM8: + return sysClosure("memhash8") + case types.AMEM16: + return sysClosure("memhash16") + case types.AMEM32: + return sysClosure("memhash32") + case types.AMEM64: + return sysClosure("memhash64") + case types.AMEM128: + return sysClosure("memhash128") + case types.ASTRING: + return sysClosure("strhash") + case types.AINTER: + return sysClosure("interhash") + case types.ANILINTER: + return sysClosure("nilinterhash") + case types.AFLOAT32: + return sysClosure("f32hash") + case types.AFLOAT64: + return sysClosure("f64hash") + case types.ACPLX64: + return sysClosure("c64hash") + case types.ACPLX128: + return sysClosure("c128hash") + case types.AMEM: + // For other sizes of plain memory, we build a closure + // that calls memhash_varlen. The size of the memory is + // encoded in the first slot of the closure. + closure := TypeLinksymLookup(fmt.Sprintf(".hashfunc%d", t.Size())) + if len(closure.P) > 0 { // already generated + return closure + } + if memhashvarlen == nil { + memhashvarlen = typecheck.LookupRuntimeFunc("memhash_varlen") + } + ot := 0 + ot = objw.SymPtr(closure, ot, memhashvarlen, 0) + ot = objw.Uintptr(closure, ot, uint64(t.Size())) // size encoded in closure + objw.Global(closure, int32(ot), obj.DUPOK|obj.RODATA) + return closure + case types.ASPECIAL: + break + } + + closure := TypeLinksymPrefix(".hashfunc", t) + if len(closure.P) > 0 { // already generated + return closure + } + + // Generate hash functions for subtypes. + // There are cases where we might not use these hashes, + // but in that case they will get dead-code eliminated. + // (And the closure generated by genhash will also get + // dead-code eliminated, as we call the subtype hashers + // directly.) + switch t.Kind() { + case types.TARRAY: + genhash(t.Elem()) + case types.TSTRUCT: + for _, f := range t.Fields() { + genhash(f.Type) + } + } + + if base.Flag.LowerR != 0 { + fmt.Printf("genhash %v %v\n", closure, t) + } + + fn := hashFunc(t) + + // Build closure. It doesn't close over any variables, so + // it contains just the function pointer. + objw.SymPtr(closure, 0, fn.Linksym(), 0) + objw.Global(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA) + + return closure +} + +func hashFunc(t *types.Type) *ir.Func { + sym := TypeSymPrefix(".hash", t) + if sym.Def != nil { + return sym.Def.(*ir.Name).Func + } + + pos := base.AutogeneratedPos // less confusing than end of input + base.Pos = pos + + // func sym(p *T, h uintptr) uintptr + fn := ir.NewFunc(pos, pos, sym, types.NewSignature(nil, + []*types.Field{ + types.NewField(pos, typecheck.Lookup("p"), types.NewPtr(t)), + types.NewField(pos, typecheck.Lookup("h"), types.Types[types.TUINTPTR]), + }, + []*types.Field{ + types.NewField(pos, nil, types.Types[types.TUINTPTR]), + }, + )) + sym.Def = fn.Nname + fn.Pragma |= ir.Noinline // TODO(mdempsky): We need to emit this during the unified frontend instead, to allow inlining. + + typecheck.DeclFunc(fn) + np := fn.Dcl[0] + nh := fn.Dcl[1] + + switch t.Kind() { + case types.TARRAY: + // An array of pure memory would be handled by the + // standard algorithm, so the element type must not be + // pure memory. + hashel := hashfor(t.Elem()) + + // for i := 0; i < nelem; i++ + ni := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT]) + init := ir.NewAssignStmt(base.Pos, ni, ir.NewInt(base.Pos, 0)) + cond := ir.NewBinaryExpr(base.Pos, ir.OLT, ni, ir.NewInt(base.Pos, t.NumElem())) + post := ir.NewAssignStmt(base.Pos, ni, ir.NewBinaryExpr(base.Pos, ir.OADD, ni, ir.NewInt(base.Pos, 1))) + loop := ir.NewForStmt(base.Pos, nil, cond, post, nil, false) + loop.PtrInit().Append(init) + + // h = hashel(&p[i], h) + call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil) + + nx := ir.NewIndexExpr(base.Pos, np, ni) + nx.SetBounded(true) + na := typecheck.NodAddr(nx) + call.Args.Append(na) + call.Args.Append(nh) + loop.Body.Append(ir.NewAssignStmt(base.Pos, nh, call)) + + fn.Body.Append(loop) + + case types.TSTRUCT: + // Walk the struct using memhash for runs of AMEM + // and calling specific hash functions for the others. + for i, fields := 0, t.Fields(); i < len(fields); { + f := fields[i] + + // Skip blank fields. + if f.Sym.IsBlank() { + i++ + continue + } + + // Hash non-memory fields with appropriate hash function. + if !compare.IsRegularMemory(f.Type) { + hashel := hashfor(f.Type) + call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil) + na := typecheck.NodAddr(typecheck.DotField(base.Pos, np, i)) + call.Args.Append(na) + call.Args.Append(nh) + fn.Body.Append(ir.NewAssignStmt(base.Pos, nh, call)) + i++ + continue + } + + // Otherwise, hash a maximal length run of raw memory. + size, next := compare.Memrun(t, i) + + // h = hashel(&p.first, size, h) + hashel := hashmem(f.Type) + call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil) + na := typecheck.NodAddr(typecheck.DotField(base.Pos, np, i)) + call.Args.Append(na) + call.Args.Append(nh) + call.Args.Append(ir.NewInt(base.Pos, size)) + fn.Body.Append(ir.NewAssignStmt(base.Pos, nh, call)) + + i = next + } + } + + r := ir.NewReturnStmt(base.Pos, nil) + r.Results.Append(nh) + fn.Body.Append(r) + + if base.Flag.LowerR != 0 { + ir.DumpList("genhash body", fn.Body) + } + + typecheck.FinishFuncBody() + + fn.SetDupok(true) + + ir.WithFunc(fn, func() { + typecheck.Stmts(fn.Body) + }) + + fn.SetNilCheckDisabled(true) + + return fn +} + +func runtimeHashFor(name string, t *types.Type) *ir.Name { + return typecheck.LookupRuntime(name, t) +} + +// hashfor returns the function to compute the hash of a value of type t. +func hashfor(t *types.Type) *ir.Name { + switch a, _ := types.AlgType(t); a { + case types.AMEM: + base.Fatalf("hashfor with AMEM type") + case types.AINTER: + return runtimeHashFor("interhash", t) + case types.ANILINTER: + return runtimeHashFor("nilinterhash", t) + case types.ASTRING: + return runtimeHashFor("strhash", t) + case types.AFLOAT32: + return runtimeHashFor("f32hash", t) + case types.AFLOAT64: + return runtimeHashFor("f64hash", t) + case types.ACPLX64: + return runtimeHashFor("c64hash", t) + case types.ACPLX128: + return runtimeHashFor("c128hash", t) + } + + fn := hashFunc(t) + return fn.Nname +} + +// sysClosure returns a closure which will call the +// given runtime function (with no closed-over variables). +func sysClosure(name string) *obj.LSym { + s := typecheck.LookupRuntimeVar(name + "·f") + if len(s.P) == 0 { + f := typecheck.LookupRuntimeFunc(name) + objw.SymPtr(s, 0, f, 0) + objw.Global(s, int32(types.PtrSize), obj.DUPOK|obj.RODATA) + } + return s +} + +// geneq returns a symbol which is the closure used to compute +// equality for two objects of type t. +func geneq(t *types.Type) *obj.LSym { + switch AlgType(t) { + case types.ANOEQ: + // The runtime will panic if it tries to compare + // a type with a nil equality function. + return nil + case types.AMEM0: + return sysClosure("memequal0") + case types.AMEM8: + return sysClosure("memequal8") + case types.AMEM16: + return sysClosure("memequal16") + case types.AMEM32: + return sysClosure("memequal32") + case types.AMEM64: + return sysClosure("memequal64") + case types.AMEM128: + return sysClosure("memequal128") + case types.ASTRING: + return sysClosure("strequal") + case types.AINTER: + return sysClosure("interequal") + case types.ANILINTER: + return sysClosure("nilinterequal") + case types.AFLOAT32: + return sysClosure("f32equal") + case types.AFLOAT64: + return sysClosure("f64equal") + case types.ACPLX64: + return sysClosure("c64equal") + case types.ACPLX128: + return sysClosure("c128equal") + case types.AMEM: + // make equality closure. The size of the type + // is encoded in the closure. + closure := TypeLinksymLookup(fmt.Sprintf(".eqfunc%d", t.Size())) + if len(closure.P) != 0 { + return closure + } + if memequalvarlen == nil { + memequalvarlen = typecheck.LookupRuntimeFunc("memequal_varlen") + } + ot := 0 + ot = objw.SymPtr(closure, ot, memequalvarlen, 0) + ot = objw.Uintptr(closure, ot, uint64(t.Size())) + objw.Global(closure, int32(ot), obj.DUPOK|obj.RODATA) + return closure + case types.ASPECIAL: + break + } + + closure := TypeLinksymPrefix(".eqfunc", t) + if len(closure.P) > 0 { // already generated + return closure + } + + if base.Flag.LowerR != 0 { + fmt.Printf("geneq %v\n", t) + } + + fn := eqFunc(t) + + // Generate a closure which points at the function we just generated. + objw.SymPtr(closure, 0, fn.Linksym(), 0) + objw.Global(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA) + return closure +} + +func eqFunc(t *types.Type) *ir.Func { + // Autogenerate code for equality of structs and arrays. + sym := TypeSymPrefix(".eq", t) + if sym.Def != nil { + return sym.Def.(*ir.Name).Func + } + + pos := base.AutogeneratedPos // less confusing than end of input + base.Pos = pos + + // func sym(p, q *T) bool + fn := ir.NewFunc(pos, pos, sym, types.NewSignature(nil, + []*types.Field{ + types.NewField(pos, typecheck.Lookup("p"), types.NewPtr(t)), + types.NewField(pos, typecheck.Lookup("q"), types.NewPtr(t)), + }, + []*types.Field{ + types.NewField(pos, typecheck.Lookup("r"), types.Types[types.TBOOL]), + }, + )) + sym.Def = fn.Nname + fn.Pragma |= ir.Noinline // TODO(mdempsky): We need to emit this during the unified frontend instead, to allow inlining. + + typecheck.DeclFunc(fn) + np := fn.Dcl[0] + nq := fn.Dcl[1] + nr := fn.Dcl[2] + + // Label to jump to if an equality test fails. + neq := typecheck.AutoLabel(".neq") + + // We reach here only for types that have equality but + // cannot be handled by the standard algorithms, + // so t must be either an array or a struct. + switch t.Kind() { + default: + base.Fatalf("geneq %v", t) + + case types.TARRAY: + nelem := t.NumElem() + + // checkAll generates code to check the equality of all array elements. + // If unroll is greater than nelem, checkAll generates: + // + // if eq(p[0], q[0]) && eq(p[1], q[1]) && ... { + // } else { + // goto neq + // } + // + // And so on. + // + // Otherwise it generates: + // + // iterateTo := nelem/unroll*unroll + // for i := 0; i < iterateTo; i += unroll { + // if eq(p[i+0], q[i+0]) && eq(p[i+1], q[i+1]) && ... && eq(p[i+unroll-1], q[i+unroll-1]) { + // } else { + // goto neq + // } + // } + // if eq(p[iterateTo+0], q[iterateTo+0]) && eq(p[iterateTo+1], q[iterateTo+1]) && ... { + // } else { + // goto neq + // } + // + checkAll := func(unroll int64, last bool, eq func(pi, qi ir.Node) ir.Node) { + // checkIdx generates a node to check for equality at index i. + checkIdx := func(i ir.Node) ir.Node { + // pi := p[i] + pi := ir.NewIndexExpr(base.Pos, np, i) + pi.SetBounded(true) + pi.SetType(t.Elem()) + // qi := q[i] + qi := ir.NewIndexExpr(base.Pos, nq, i) + qi.SetBounded(true) + qi.SetType(t.Elem()) + return eq(pi, qi) + } + + iterations := nelem / unroll + iterateTo := iterations * unroll + // If a loop is iterated only once, there shouldn't be any loop at all. + if iterations == 1 { + iterateTo = 0 + } + + if iterateTo > 0 { + // Generate an unrolled for loop. + // for i := 0; i < nelem/unroll*unroll; i += unroll + i := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT]) + init := ir.NewAssignStmt(base.Pos, i, ir.NewInt(base.Pos, 0)) + cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, ir.NewInt(base.Pos, iterateTo)) + loop := ir.NewForStmt(base.Pos, nil, cond, nil, nil, false) + loop.PtrInit().Append(init) + + // if eq(p[i+0], q[i+0]) && eq(p[i+1], q[i+1]) && ... && eq(p[i+unroll-1], q[i+unroll-1]) { + // } else { + // goto neq + // } + for j := int64(0); j < unroll; j++ { + // if check {} else { goto neq } + nif := ir.NewIfStmt(base.Pos, checkIdx(i), nil, nil) + nif.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq)) + loop.Body.Append(nif) + post := ir.NewAssignStmt(base.Pos, i, ir.NewBinaryExpr(base.Pos, ir.OADD, i, ir.NewInt(base.Pos, 1))) + loop.Body.Append(post) + } + + fn.Body.Append(loop) + + if nelem == iterateTo { + if last { + fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(base.Pos, true))) + } + return + } + } + + // Generate remaining checks, if nelem is not a multiple of unroll. + if last { + // Do last comparison in a different manner. + nelem-- + } + // if eq(p[iterateTo+0], q[iterateTo+0]) && eq(p[iterateTo+1], q[iterateTo+1]) && ... { + // } else { + // goto neq + // } + for j := iterateTo; j < nelem; j++ { + // if check {} else { goto neq } + nif := ir.NewIfStmt(base.Pos, checkIdx(ir.NewInt(base.Pos, j)), nil, nil) + nif.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq)) + fn.Body.Append(nif) + } + if last { + fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, checkIdx(ir.NewInt(base.Pos, nelem)))) + } + } + + switch t.Elem().Kind() { + case types.TSTRING: + // Do two loops. First, check that all the lengths match (cheap). + // Second, check that all the contents match (expensive). + checkAll(3, false, func(pi, qi ir.Node) ir.Node { + // Compare lengths. + eqlen, _ := compare.EqString(pi, qi) + return eqlen + }) + checkAll(1, true, func(pi, qi ir.Node) ir.Node { + // Compare contents. + _, eqmem := compare.EqString(pi, qi) + return eqmem + }) + case types.TFLOAT32, types.TFLOAT64: + checkAll(2, true, func(pi, qi ir.Node) ir.Node { + // p[i] == q[i] + return ir.NewBinaryExpr(base.Pos, ir.OEQ, pi, qi) + }) + case types.TSTRUCT: + isCall := func(n ir.Node) bool { + return n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC + } + var expr ir.Node + var hasCallExprs bool + allCallExprs := true + and := func(cond ir.Node) { + if expr == nil { + expr = cond + } else { + expr = ir.NewLogicalExpr(base.Pos, ir.OANDAND, expr, cond) + } + } + + var tmpPos src.XPos + pi := ir.NewIndexExpr(tmpPos, np, ir.NewInt(tmpPos, 0)) + pi.SetBounded(true) + pi.SetType(t.Elem()) + qi := ir.NewIndexExpr(tmpPos, nq, ir.NewInt(tmpPos, 0)) + qi.SetBounded(true) + qi.SetType(t.Elem()) + flatConds, canPanic := compare.EqStruct(t.Elem(), pi, qi) + for _, c := range flatConds { + if isCall(c) { + hasCallExprs = true + } else { + allCallExprs = false + } + } + if !hasCallExprs || allCallExprs || canPanic { + checkAll(1, true, func(pi, qi ir.Node) ir.Node { + // p[i] == q[i] + return ir.NewBinaryExpr(base.Pos, ir.OEQ, pi, qi) + }) + } else { + checkAll(4, false, func(pi, qi ir.Node) ir.Node { + expr = nil + flatConds, _ := compare.EqStruct(t.Elem(), pi, qi) + if len(flatConds) == 0 { + return ir.NewBool(base.Pos, true) + } + for _, c := range flatConds { + if !isCall(c) { + and(c) + } + } + return expr + }) + checkAll(2, true, func(pi, qi ir.Node) ir.Node { + expr = nil + flatConds, _ := compare.EqStruct(t.Elem(), pi, qi) + for _, c := range flatConds { + if isCall(c) { + and(c) + } + } + return expr + }) + } + default: + checkAll(1, true, func(pi, qi ir.Node) ir.Node { + // p[i] == q[i] + return ir.NewBinaryExpr(base.Pos, ir.OEQ, pi, qi) + }) + } + + case types.TSTRUCT: + flatConds, _ := compare.EqStruct(t, np, nq) + if len(flatConds) == 0 { + fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(base.Pos, true))) + } else { + for _, c := range flatConds[:len(flatConds)-1] { + // if cond {} else { goto neq } + n := ir.NewIfStmt(base.Pos, c, nil, nil) + n.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq)) + fn.Body.Append(n) + } + fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, flatConds[len(flatConds)-1])) + } + } + + // ret: + // return + ret := typecheck.AutoLabel(".ret") + fn.Body.Append(ir.NewLabelStmt(base.Pos, ret)) + fn.Body.Append(ir.NewReturnStmt(base.Pos, nil)) + + // neq: + // r = false + // return (or goto ret) + fn.Body.Append(ir.NewLabelStmt(base.Pos, neq)) + fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(base.Pos, false))) + if compare.EqCanPanic(t) || anyCall(fn) { + // Epilogue is large, so share it with the equal case. + fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, ret)) + } else { + // Epilogue is small, so don't bother sharing. + fn.Body.Append(ir.NewReturnStmt(base.Pos, nil)) + } + // TODO(khr): the epilogue size detection condition above isn't perfect. + // We should really do a generic CL that shares epilogues across + // the board. See #24936. + + if base.Flag.LowerR != 0 { + ir.DumpList("geneq body", fn.Body) + } + + typecheck.FinishFuncBody() + + fn.SetDupok(true) + + ir.WithFunc(fn, func() { + typecheck.Stmts(fn.Body) + }) + + // Disable checknils while compiling this code. + // We are comparing a struct or an array, + // neither of which can be nil, and our comparisons + // are shallow. + fn.SetNilCheckDisabled(true) + return fn +} + +// EqFor returns ONAME node represents type t's equal function, and a boolean +// to indicates whether a length needs to be passed when calling the function. +func EqFor(t *types.Type) (ir.Node, bool) { + switch a, _ := types.AlgType(t); a { + case types.AMEM: + return typecheck.LookupRuntime("memequal", t, t), true + case types.ASPECIAL: + fn := eqFunc(t) + return fn.Nname, false + } + base.Fatalf("EqFor %v", t) + return nil, false +} + +func anyCall(fn *ir.Func) bool { + return ir.Any(fn, func(n ir.Node) bool { + // TODO(rsc): No methods? + op := n.Op() + return op == ir.OCALL || op == ir.OCALLFUNC + }) +} + +func hashmem(t *types.Type) ir.Node { + return typecheck.LookupRuntime("memhash", t) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/reflectdata/alg_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/reflectdata/alg_test.go new file mode 100644 index 0000000000000000000000000000000000000000..38fb974f6197ba39502a24a0806b14b83c0f7394 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/reflectdata/alg_test.go @@ -0,0 +1,147 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package reflectdata_test + +import ( + "testing" +) + +func BenchmarkEqArrayOfStrings5(b *testing.B) { + var a [5]string + var c [5]string + + for i := 0; i < 5; i++ { + a[i] = "aaaa" + c[i] = "cccc" + } + + for j := 0; j < b.N; j++ { + _ = a == c + } +} + +func BenchmarkEqArrayOfStrings64(b *testing.B) { + var a [64]string + var c [64]string + + for i := 0; i < 64; i++ { + a[i] = "aaaa" + c[i] = "cccc" + } + + for j := 0; j < b.N; j++ { + _ = a == c + } +} + +func BenchmarkEqArrayOfStrings1024(b *testing.B) { + var a [1024]string + var c [1024]string + + for i := 0; i < 1024; i++ { + a[i] = "aaaa" + c[i] = "cccc" + } + + for j := 0; j < b.N; j++ { + _ = a == c + } +} + +func BenchmarkEqArrayOfFloats5(b *testing.B) { + var a [5]float32 + var c [5]float32 + + for i := 0; i < b.N; i++ { + _ = a == c + } +} + +func BenchmarkEqArrayOfFloats64(b *testing.B) { + var a [64]float32 + var c [64]float32 + + for i := 0; i < b.N; i++ { + _ = a == c + } +} + +func BenchmarkEqArrayOfFloats1024(b *testing.B) { + var a [1024]float32 + var c [1024]float32 + + for i := 0; i < b.N; i++ { + _ = a == c + } +} + +func BenchmarkEqArrayOfStructsEq(b *testing.B) { + type T2 struct { + a string + b int + } + const size = 1024 + var ( + str1 = "foobar" + + a [size]T2 + c [size]T2 + ) + + for i := 0; i < size; i++ { + a[i].a = str1 + c[i].a = str1 + } + + b.ResetTimer() + for j := 0; j < b.N; j++ { + _ = a == c + } +} + +func BenchmarkEqArrayOfStructsNotEq(b *testing.B) { + type T2 struct { + a string + b int + } + const size = 1024 + var ( + str1 = "foobar" + str2 = "foobarz" + + a [size]T2 + c [size]T2 + ) + + for i := 0; i < size; i++ { + a[i].a = str1 + c[i].a = str1 + } + c[len(c)-1].a = str2 + + b.ResetTimer() + for j := 0; j < b.N; j++ { + _ = a == c + } +} + +const size = 16 + +type T1 struct { + a [size]byte +} + +func BenchmarkEqStruct(b *testing.B) { + x, y := T1{}, T1{} + x.a = [size]byte{1, 2, 3, 4, 5, 6, 7, 8} + y.a = [size]byte{2, 3, 4, 5, 6, 7, 8, 9} + + for i := 0; i < b.N; i++ { + f := x == y + if f { + println("hello") + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/reflectdata/helpers.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/reflectdata/helpers.go new file mode 100644 index 0000000000000000000000000000000000000000..9ba62d6a2967da53fd7e7a62e812980b7666b962 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/reflectdata/helpers.go @@ -0,0 +1,216 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package reflectdata + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "cmd/internal/src" +) + +func hasRType(n, rtype ir.Node, fieldName string) bool { + if rtype != nil { + return true + } + + return false +} + +// assertOp asserts that n is an op. +func assertOp(n ir.Node, op ir.Op) { + base.AssertfAt(n.Op() == op, n.Pos(), "want %v, have %v", op, n) +} + +// assertOp2 asserts that n is an op1 or op2. +func assertOp2(n ir.Node, op1, op2 ir.Op) { + base.AssertfAt(n.Op() == op1 || n.Op() == op2, n.Pos(), "want %v or %v, have %v", op1, op2, n) +} + +// kindRType asserts that typ has the given kind, and returns an +// expression that yields the *runtime._type value representing typ. +func kindRType(pos src.XPos, typ *types.Type, k types.Kind) ir.Node { + base.AssertfAt(typ.Kind() == k, pos, "want %v type, have %v", k, typ) + return TypePtrAt(pos, typ) +} + +// mapRType asserts that typ is a map type, and returns an expression +// that yields the *runtime._type value representing typ. +func mapRType(pos src.XPos, typ *types.Type) ir.Node { + return kindRType(pos, typ, types.TMAP) +} + +// chanRType asserts that typ is a map type, and returns an expression +// that yields the *runtime._type value representing typ. +func chanRType(pos src.XPos, typ *types.Type) ir.Node { + return kindRType(pos, typ, types.TCHAN) +} + +// sliceElemRType asserts that typ is a slice type, and returns an +// expression that yields the *runtime._type value representing typ's +// element type. +func sliceElemRType(pos src.XPos, typ *types.Type) ir.Node { + base.AssertfAt(typ.IsSlice(), pos, "want slice type, have %v", typ) + return TypePtrAt(pos, typ.Elem()) +} + +// concreteRType asserts that typ is not an interface type, and +// returns an expression that yields the *runtime._type value +// representing typ. +func concreteRType(pos src.XPos, typ *types.Type) ir.Node { + base.AssertfAt(!typ.IsInterface(), pos, "want non-interface type, have %v", typ) + return TypePtrAt(pos, typ) +} + +// AppendElemRType asserts that n is an "append" operation, and +// returns an expression that yields the *runtime._type value +// representing the result slice type's element type. +func AppendElemRType(pos src.XPos, n *ir.CallExpr) ir.Node { + assertOp(n, ir.OAPPEND) + if hasRType(n, n.RType, "RType") { + return n.RType + } + return sliceElemRType(pos, n.Type()) +} + +// CompareRType asserts that n is a comparison (== or !=) operation +// between expressions of interface and non-interface type, and +// returns an expression that yields the *runtime._type value +// representing the non-interface type. +func CompareRType(pos src.XPos, n *ir.BinaryExpr) ir.Node { + assertOp2(n, ir.OEQ, ir.ONE) + base.AssertfAt(n.X.Type().IsInterface() != n.Y.Type().IsInterface(), n.Pos(), "expect mixed interface and non-interface, have %L and %L", n.X, n.Y) + if hasRType(n, n.RType, "RType") { + return n.RType + } + typ := n.X.Type() + if typ.IsInterface() { + typ = n.Y.Type() + } + return concreteRType(pos, typ) +} + +// ConvIfaceTypeWord asserts that n is conversion to interface type, +// and returns an expression that yields the *runtime._type or +// *runtime.itab value necessary for implementing the conversion. +// +// - *runtime._type for the destination type, for I2I conversions +// - *runtime.itab, for T2I conversions +// - *runtime._type for the source type, for T2E conversions +func ConvIfaceTypeWord(pos src.XPos, n *ir.ConvExpr) ir.Node { + assertOp(n, ir.OCONVIFACE) + src, dst := n.X.Type(), n.Type() + base.AssertfAt(dst.IsInterface(), n.Pos(), "want interface type, have %L", n) + if hasRType(n, n.TypeWord, "TypeWord") { + return n.TypeWord + } + if dst.IsEmptyInterface() { + return concreteRType(pos, src) // direct eface construction + } + if !src.IsInterface() { + return ITabAddrAt(pos, src, dst) // direct iface construction + } + return TypePtrAt(pos, dst) // convI2I +} + +// ConvIfaceSrcRType asserts that n is a conversion from +// non-interface type to interface type, and +// returns an expression that yields the *runtime._type for copying +// the convertee value to the heap. +func ConvIfaceSrcRType(pos src.XPos, n *ir.ConvExpr) ir.Node { + assertOp(n, ir.OCONVIFACE) + if hasRType(n, n.SrcRType, "SrcRType") { + return n.SrcRType + } + return concreteRType(pos, n.X.Type()) +} + +// CopyElemRType asserts that n is a "copy" operation, and returns an +// expression that yields the *runtime._type value representing the +// destination slice type's element type. +func CopyElemRType(pos src.XPos, n *ir.BinaryExpr) ir.Node { + assertOp(n, ir.OCOPY) + if hasRType(n, n.RType, "RType") { + return n.RType + } + return sliceElemRType(pos, n.X.Type()) +} + +// DeleteMapRType asserts that n is a "delete" operation, and returns +// an expression that yields the *runtime._type value representing the +// map type. +func DeleteMapRType(pos src.XPos, n *ir.CallExpr) ir.Node { + assertOp(n, ir.ODELETE) + if hasRType(n, n.RType, "RType") { + return n.RType + } + return mapRType(pos, n.Args[0].Type()) +} + +// IndexMapRType asserts that n is a map index operation, and returns +// an expression that yields the *runtime._type value representing the +// map type. +func IndexMapRType(pos src.XPos, n *ir.IndexExpr) ir.Node { + assertOp(n, ir.OINDEXMAP) + if hasRType(n, n.RType, "RType") { + return n.RType + } + return mapRType(pos, n.X.Type()) +} + +// MakeChanRType asserts that n is a "make" operation for a channel +// type, and returns an expression that yields the *runtime._type +// value representing that channel type. +func MakeChanRType(pos src.XPos, n *ir.MakeExpr) ir.Node { + assertOp(n, ir.OMAKECHAN) + if hasRType(n, n.RType, "RType") { + return n.RType + } + return chanRType(pos, n.Type()) +} + +// MakeMapRType asserts that n is a "make" operation for a map type, +// and returns an expression that yields the *runtime._type value +// representing that map type. +func MakeMapRType(pos src.XPos, n *ir.MakeExpr) ir.Node { + assertOp(n, ir.OMAKEMAP) + if hasRType(n, n.RType, "RType") { + return n.RType + } + return mapRType(pos, n.Type()) +} + +// MakeSliceElemRType asserts that n is a "make" operation for a slice +// type, and returns an expression that yields the *runtime._type +// value representing that slice type's element type. +func MakeSliceElemRType(pos src.XPos, n *ir.MakeExpr) ir.Node { + assertOp2(n, ir.OMAKESLICE, ir.OMAKESLICECOPY) + if hasRType(n, n.RType, "RType") { + return n.RType + } + return sliceElemRType(pos, n.Type()) +} + +// RangeMapRType asserts that n is a "range" loop over a map value, +// and returns an expression that yields the *runtime._type value +// representing that map type. +func RangeMapRType(pos src.XPos, n *ir.RangeStmt) ir.Node { + assertOp(n, ir.ORANGE) + if hasRType(n, n.RType, "RType") { + return n.RType + } + return mapRType(pos, n.X.Type()) +} + +// UnsafeSliceElemRType asserts that n is an "unsafe.Slice" operation, +// and returns an expression that yields the *runtime._type value +// representing the result slice type's element type. +func UnsafeSliceElemRType(pos src.XPos, n *ir.BinaryExpr) ir.Node { + assertOp(n, ir.OUNSAFESLICE) + if hasRType(n, n.RType, "RType") { + return n.RType + } + return sliceElemRType(pos, n.Type()) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/reflectdata/reflect.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/reflectdata/reflect.go new file mode 100644 index 0000000000000000000000000000000000000000..c2407af017e4b3a780454185a24a8475cfbb1179 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/reflectdata/reflect.go @@ -0,0 +1,1898 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package reflectdata + +import ( + "encoding/binary" + "fmt" + "internal/abi" + "os" + "sort" + "strings" + "sync" + + "cmd/compile/internal/base" + "cmd/compile/internal/bitvec" + "cmd/compile/internal/compare" + "cmd/compile/internal/ir" + "cmd/compile/internal/objw" + "cmd/compile/internal/rttype" + "cmd/compile/internal/staticdata" + "cmd/compile/internal/typebits" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/gcprog" + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/internal/src" +) + +type ptabEntry struct { + s *types.Sym + t *types.Type +} + +// runtime interface and reflection data structures +var ( + // protects signatset and signatslice + signatmu sync.Mutex + // Tracking which types need runtime type descriptor + signatset = make(map[*types.Type]struct{}) + // Queue of types wait to be generated runtime type descriptor + signatslice []typeAndStr + + gcsymmu sync.Mutex // protects gcsymset and gcsymslice + gcsymset = make(map[*types.Type]struct{}) +) + +type typeSig struct { + name *types.Sym + isym *obj.LSym + tsym *obj.LSym + type_ *types.Type + mtype *types.Type +} + +// Builds a type representing a Bucket structure for +// the given map type. This type is not visible to users - +// we include only enough information to generate a correct GC +// program for it. +// Make sure this stays in sync with runtime/map.go. +// +// A "bucket" is a "struct" { +// tophash [BUCKETSIZE]uint8 +// keys [BUCKETSIZE]keyType +// elems [BUCKETSIZE]elemType +// overflow *bucket +// } +const ( + BUCKETSIZE = abi.MapBucketCount + MAXKEYSIZE = abi.MapMaxKeyBytes + MAXELEMSIZE = abi.MapMaxElemBytes +) + +func commonSize() int { return int(rttype.Type.Size()) } // Sizeof(runtime._type{}) + +func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{}) + if t.Sym() == nil && len(methods(t)) == 0 { + return 0 + } + return int(rttype.UncommonType.Size()) +} + +func makefield(name string, t *types.Type) *types.Field { + sym := (*types.Pkg)(nil).Lookup(name) + return types.NewField(src.NoXPos, sym, t) +} + +// MapBucketType makes the map bucket type given the type of the map. +func MapBucketType(t *types.Type) *types.Type { + if t.MapType().Bucket != nil { + return t.MapType().Bucket + } + + keytype := t.Key() + elemtype := t.Elem() + types.CalcSize(keytype) + types.CalcSize(elemtype) + if keytype.Size() > MAXKEYSIZE { + keytype = types.NewPtr(keytype) + } + if elemtype.Size() > MAXELEMSIZE { + elemtype = types.NewPtr(elemtype) + } + + field := make([]*types.Field, 0, 5) + + // The first field is: uint8 topbits[BUCKETSIZE]. + arr := types.NewArray(types.Types[types.TUINT8], BUCKETSIZE) + field = append(field, makefield("topbits", arr)) + + arr = types.NewArray(keytype, BUCKETSIZE) + arr.SetNoalg(true) + keys := makefield("keys", arr) + field = append(field, keys) + + arr = types.NewArray(elemtype, BUCKETSIZE) + arr.SetNoalg(true) + elems := makefield("elems", arr) + field = append(field, elems) + + // If keys and elems have no pointers, the map implementation + // can keep a list of overflow pointers on the side so that + // buckets can be marked as having no pointers. + // Arrange for the bucket to have no pointers by changing + // the type of the overflow field to uintptr in this case. + // See comment on hmap.overflow in runtime/map.go. + otyp := types.Types[types.TUNSAFEPTR] + if !elemtype.HasPointers() && !keytype.HasPointers() { + otyp = types.Types[types.TUINTPTR] + } + overflow := makefield("overflow", otyp) + field = append(field, overflow) + + // link up fields + bucket := types.NewStruct(field[:]) + bucket.SetNoalg(true) + types.CalcSize(bucket) + + // Check invariants that map code depends on. + if !types.IsComparable(t.Key()) { + base.Fatalf("unsupported map key type for %v", t) + } + if BUCKETSIZE < 8 { + base.Fatalf("bucket size %d too small for proper alignment %d", BUCKETSIZE, 8) + } + if uint8(keytype.Alignment()) > BUCKETSIZE { + base.Fatalf("key align too big for %v", t) + } + if uint8(elemtype.Alignment()) > BUCKETSIZE { + base.Fatalf("elem align %d too big for %v, BUCKETSIZE=%d", elemtype.Alignment(), t, BUCKETSIZE) + } + if keytype.Size() > MAXKEYSIZE { + base.Fatalf("key size too large for %v", t) + } + if elemtype.Size() > MAXELEMSIZE { + base.Fatalf("elem size too large for %v", t) + } + if t.Key().Size() > MAXKEYSIZE && !keytype.IsPtr() { + base.Fatalf("key indirect incorrect for %v", t) + } + if t.Elem().Size() > MAXELEMSIZE && !elemtype.IsPtr() { + base.Fatalf("elem indirect incorrect for %v", t) + } + if keytype.Size()%keytype.Alignment() != 0 { + base.Fatalf("key size not a multiple of key align for %v", t) + } + if elemtype.Size()%elemtype.Alignment() != 0 { + base.Fatalf("elem size not a multiple of elem align for %v", t) + } + if uint8(bucket.Alignment())%uint8(keytype.Alignment()) != 0 { + base.Fatalf("bucket align not multiple of key align %v", t) + } + if uint8(bucket.Alignment())%uint8(elemtype.Alignment()) != 0 { + base.Fatalf("bucket align not multiple of elem align %v", t) + } + if keys.Offset%keytype.Alignment() != 0 { + base.Fatalf("bad alignment of keys in bmap for %v", t) + } + if elems.Offset%elemtype.Alignment() != 0 { + base.Fatalf("bad alignment of elems in bmap for %v", t) + } + + // Double-check that overflow field is final memory in struct, + // with no padding at end. + if overflow.Offset != bucket.Size()-int64(types.PtrSize) { + base.Fatalf("bad offset of overflow in bmap for %v, overflow.Offset=%d, bucket.Size()-int64(types.PtrSize)=%d", + t, overflow.Offset, bucket.Size()-int64(types.PtrSize)) + } + + t.MapType().Bucket = bucket + + bucket.StructType().Map = t + return bucket +} + +var hmapType *types.Type + +// MapType returns a type interchangeable with runtime.hmap. +// Make sure this stays in sync with runtime/map.go. +func MapType() *types.Type { + if hmapType != nil { + return hmapType + } + + // build a struct: + // type hmap struct { + // count int + // flags uint8 + // B uint8 + // noverflow uint16 + // hash0 uint32 + // buckets unsafe.Pointer + // oldbuckets unsafe.Pointer + // nevacuate uintptr + // extra unsafe.Pointer // *mapextra + // } + // must match runtime/map.go:hmap. + fields := []*types.Field{ + makefield("count", types.Types[types.TINT]), + makefield("flags", types.Types[types.TUINT8]), + makefield("B", types.Types[types.TUINT8]), + makefield("noverflow", types.Types[types.TUINT16]), + makefield("hash0", types.Types[types.TUINT32]), // Used in walk.go for OMAKEMAP. + makefield("buckets", types.Types[types.TUNSAFEPTR]), // Used in walk.go for OMAKEMAP. + makefield("oldbuckets", types.Types[types.TUNSAFEPTR]), + makefield("nevacuate", types.Types[types.TUINTPTR]), + makefield("extra", types.Types[types.TUNSAFEPTR]), + } + + n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("hmap")) + hmap := types.NewNamed(n) + n.SetType(hmap) + n.SetTypecheck(1) + + hmap.SetUnderlying(types.NewStruct(fields)) + types.CalcSize(hmap) + + // The size of hmap should be 48 bytes on 64 bit + // and 28 bytes on 32 bit platforms. + if size := int64(8 + 5*types.PtrSize); hmap.Size() != size { + base.Fatalf("hmap size not correct: got %d, want %d", hmap.Size(), size) + } + + hmapType = hmap + return hmap +} + +var hiterType *types.Type + +// MapIterType returns a type interchangeable with runtime.hiter. +// Make sure this stays in sync with runtime/map.go. +func MapIterType() *types.Type { + if hiterType != nil { + return hiterType + } + + hmap := MapType() + + // build a struct: + // type hiter struct { + // key unsafe.Pointer // *Key + // elem unsafe.Pointer // *Elem + // t unsafe.Pointer // *MapType + // h *hmap + // buckets unsafe.Pointer + // bptr unsafe.Pointer // *bmap + // overflow unsafe.Pointer // *[]*bmap + // oldoverflow unsafe.Pointer // *[]*bmap + // startBucket uintptr + // offset uint8 + // wrapped bool + // B uint8 + // i uint8 + // bucket uintptr + // checkBucket uintptr + // } + // must match runtime/map.go:hiter. + fields := []*types.Field{ + makefield("key", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP. + makefield("elem", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP. + makefield("t", types.Types[types.TUNSAFEPTR]), + makefield("h", types.NewPtr(hmap)), + makefield("buckets", types.Types[types.TUNSAFEPTR]), + makefield("bptr", types.Types[types.TUNSAFEPTR]), + makefield("overflow", types.Types[types.TUNSAFEPTR]), + makefield("oldoverflow", types.Types[types.TUNSAFEPTR]), + makefield("startBucket", types.Types[types.TUINTPTR]), + makefield("offset", types.Types[types.TUINT8]), + makefield("wrapped", types.Types[types.TBOOL]), + makefield("B", types.Types[types.TUINT8]), + makefield("i", types.Types[types.TUINT8]), + makefield("bucket", types.Types[types.TUINTPTR]), + makefield("checkBucket", types.Types[types.TUINTPTR]), + } + + // build iterator struct holding the above fields + n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("hiter")) + hiter := types.NewNamed(n) + n.SetType(hiter) + n.SetTypecheck(1) + + hiter.SetUnderlying(types.NewStruct(fields)) + types.CalcSize(hiter) + if hiter.Size() != int64(12*types.PtrSize) { + base.Fatalf("hash_iter size not correct %d %d", hiter.Size(), 12*types.PtrSize) + } + + hiterType = hiter + return hiter +} + +// methods returns the methods of the non-interface type t, sorted by name. +// Generates stub functions as needed. +func methods(t *types.Type) []*typeSig { + if t.HasShape() { + // Shape types have no methods. + return nil + } + // method type + mt := types.ReceiverBaseType(t) + + if mt == nil { + return nil + } + typecheck.CalcMethods(mt) + + // make list of methods for t, + // generating code if necessary. + var ms []*typeSig + for _, f := range mt.AllMethods() { + if f.Sym == nil { + base.Fatalf("method with no sym on %v", mt) + } + if !f.IsMethod() { + base.Fatalf("non-method on %v method %v %v", mt, f.Sym, f) + } + if f.Type.Recv() == nil { + base.Fatalf("receiver with no type on %v method %v %v", mt, f.Sym, f) + } + if f.Nointerface() && !t.IsFullyInstantiated() { + // Skip creating method wrappers if f is nointerface. But, if + // t is an instantiated type, we still have to call + // methodWrapper, because methodWrapper generates the actual + // generic method on the type as well. + continue + } + + // get receiver type for this particular method. + // if pointer receiver but non-pointer t and + // this is not an embedded pointer inside a struct, + // method does not apply. + if !types.IsMethodApplicable(t, f) { + continue + } + + sig := &typeSig{ + name: f.Sym, + isym: methodWrapper(t, f, true), + tsym: methodWrapper(t, f, false), + type_: typecheck.NewMethodType(f.Type, t), + mtype: typecheck.NewMethodType(f.Type, nil), + } + if f.Nointerface() { + // In the case of a nointerface method on an instantiated + // type, don't actually append the typeSig. + continue + } + ms = append(ms, sig) + } + + return ms +} + +// imethods returns the methods of the interface type t, sorted by name. +func imethods(t *types.Type) []*typeSig { + var methods []*typeSig + for _, f := range t.AllMethods() { + if f.Type.Kind() != types.TFUNC || f.Sym == nil { + continue + } + if f.Sym.IsBlank() { + base.Fatalf("unexpected blank symbol in interface method set") + } + if n := len(methods); n > 0 { + last := methods[n-1] + if !last.name.Less(f.Sym) { + base.Fatalf("sigcmp vs sortinter %v %v", last.name, f.Sym) + } + } + + sig := &typeSig{ + name: f.Sym, + mtype: f.Type, + type_: typecheck.NewMethodType(f.Type, nil), + } + methods = append(methods, sig) + + // NOTE(rsc): Perhaps an oversight that + // IfaceType.Method is not in the reflect data. + // Generate the method body, so that compiled + // code can refer to it. + methodWrapper(t, f, false) + } + + return methods +} + +func dimportpath(p *types.Pkg) { + if p.Pathsym != nil { + return + } + + if p == types.LocalPkg && base.Ctxt.Pkgpath == "" { + panic("missing pkgpath") + } + + // If we are compiling the runtime package, there are two runtime packages around + // -- localpkg and Pkgs.Runtime. We don't want to produce import path symbols for + // both of them, so just produce one for localpkg. + if base.Ctxt.Pkgpath == "runtime" && p == ir.Pkgs.Runtime { + return + } + + s := base.Ctxt.Lookup("type:.importpath." + p.Prefix + ".") + ot := dnameData(s, 0, p.Path, "", nil, false, false) + objw.Global(s, int32(ot), obj.DUPOK|obj.RODATA) + s.Set(obj.AttrContentAddressable, true) + p.Pathsym = s +} + +func dgopkgpath(c rttype.Cursor, pkg *types.Pkg) { + c = c.Field("Bytes") + if pkg == nil { + c.WritePtr(nil) + return + } + + dimportpath(pkg) + c.WritePtr(pkg.Pathsym) +} + +// dgopkgpathOff writes an offset relocation to the pkg path symbol to c. +func dgopkgpathOff(c rttype.Cursor, pkg *types.Pkg) { + if pkg == nil { + c.WriteInt32(0) + return + } + + dimportpath(pkg) + c.WriteSymPtrOff(pkg.Pathsym, false) +} + +// dnameField dumps a reflect.name for a struct field. +func dnameField(c rttype.Cursor, spkg *types.Pkg, ft *types.Field) { + if !types.IsExported(ft.Sym.Name) && ft.Sym.Pkg != spkg { + base.Fatalf("package mismatch for %v", ft.Sym) + } + nsym := dname(ft.Sym.Name, ft.Note, nil, types.IsExported(ft.Sym.Name), ft.Embedded != 0) + c.Field("Bytes").WritePtr(nsym) +} + +// dnameData writes the contents of a reflect.name into s at offset ot. +func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported, embedded bool) int { + if len(name) >= 1<<29 { + base.Fatalf("name too long: %d %s...", len(name), name[:1024]) + } + if len(tag) >= 1<<29 { + base.Fatalf("tag too long: %d %s...", len(tag), tag[:1024]) + } + var nameLen [binary.MaxVarintLen64]byte + nameLenLen := binary.PutUvarint(nameLen[:], uint64(len(name))) + var tagLen [binary.MaxVarintLen64]byte + tagLenLen := binary.PutUvarint(tagLen[:], uint64(len(tag))) + + // Encode name and tag. See reflect/type.go for details. + var bits byte + l := 1 + nameLenLen + len(name) + if exported { + bits |= 1 << 0 + } + if len(tag) > 0 { + l += tagLenLen + len(tag) + bits |= 1 << 1 + } + if pkg != nil { + bits |= 1 << 2 + } + if embedded { + bits |= 1 << 3 + } + b := make([]byte, l) + b[0] = bits + copy(b[1:], nameLen[:nameLenLen]) + copy(b[1+nameLenLen:], name) + if len(tag) > 0 { + tb := b[1+nameLenLen+len(name):] + copy(tb, tagLen[:tagLenLen]) + copy(tb[tagLenLen:], tag) + } + + ot = int(s.WriteBytes(base.Ctxt, int64(ot), b)) + + if pkg != nil { + c := rttype.NewCursor(s, int64(ot), types.Types[types.TUINT32]) + dgopkgpathOff(c, pkg) + ot += 4 + } + + return ot +} + +var dnameCount int + +// dname creates a reflect.name for a struct field or method. +func dname(name, tag string, pkg *types.Pkg, exported, embedded bool) *obj.LSym { + // Write out data as "type:." to signal two things to the + // linker, first that when dynamically linking, the symbol + // should be moved to a relro section, and second that the + // contents should not be decoded as a type. + sname := "type:.namedata." + if pkg == nil { + // In the common case, share data with other packages. + if name == "" { + if exported { + sname += "-noname-exported." + tag + } else { + sname += "-noname-unexported." + tag + } + } else { + if exported { + sname += name + "." + tag + } else { + sname += name + "-" + tag + } + } + } else { + // TODO(mdempsky): We should be able to share these too (except + // maybe when dynamic linking). + sname = fmt.Sprintf("%s%s.%d", sname, types.LocalPkg.Prefix, dnameCount) + dnameCount++ + } + if embedded { + sname += ".embedded" + } + s := base.Ctxt.Lookup(sname) + if len(s.P) > 0 { + return s + } + ot := dnameData(s, 0, name, tag, pkg, exported, embedded) + objw.Global(s, int32(ot), obj.DUPOK|obj.RODATA) + s.Set(obj.AttrContentAddressable, true) + return s +} + +// dextratype dumps the fields of a runtime.uncommontype. +// dataAdd is the offset in bytes after the header where the +// backing array of the []method field should be written. +func dextratype(lsym *obj.LSym, off int64, t *types.Type, dataAdd int) { + m := methods(t) + if t.Sym() == nil && len(m) == 0 { + base.Fatalf("extra requested of type with no extra info %v", t) + } + noff := types.RoundUp(off, int64(types.PtrSize)) + if noff != off { + base.Fatalf("unexpected alignment in dextratype for %v", t) + } + + for _, a := range m { + writeType(a.type_) + } + + c := rttype.NewCursor(lsym, off, rttype.UncommonType) + dgopkgpathOff(c.Field("PkgPath"), typePkg(t)) + + dataAdd += uncommonSize(t) + mcount := len(m) + if mcount != int(uint16(mcount)) { + base.Fatalf("too many methods on %v: %d", t, mcount) + } + xcount := sort.Search(mcount, func(i int) bool { return !types.IsExported(m[i].name.Name) }) + if dataAdd != int(uint32(dataAdd)) { + base.Fatalf("methods are too far away on %v: %d", t, dataAdd) + } + + c.Field("Mcount").WriteUint16(uint16(mcount)) + c.Field("Xcount").WriteUint16(uint16(xcount)) + c.Field("Moff").WriteUint32(uint32(dataAdd)) + // Note: there is an unused uint32 field here. + + // Write the backing array for the []method field. + array := rttype.NewArrayCursor(lsym, off+int64(dataAdd), rttype.Method, mcount) + for i, a := range m { + exported := types.IsExported(a.name.Name) + var pkg *types.Pkg + if !exported && a.name.Pkg != typePkg(t) { + pkg = a.name.Pkg + } + nsym := dname(a.name.Name, "", pkg, exported, false) + + e := array.Elem(i) + e.Field("Name").WriteSymPtrOff(nsym, false) + dmethodptrOff(e.Field("Mtyp"), writeType(a.mtype)) + dmethodptrOff(e.Field("Ifn"), a.isym) + dmethodptrOff(e.Field("Tfn"), a.tsym) + } +} + +func typePkg(t *types.Type) *types.Pkg { + tsym := t.Sym() + if tsym == nil { + switch t.Kind() { + case types.TARRAY, types.TSLICE, types.TPTR, types.TCHAN: + if t.Elem() != nil { + tsym = t.Elem().Sym() + } + } + } + if tsym != nil && tsym.Pkg != types.BuiltinPkg { + return tsym.Pkg + } + return nil +} + +func dmethodptrOff(c rttype.Cursor, x *obj.LSym) { + c.WriteInt32(0) + r := c.Reloc() + r.Sym = x + r.Type = objabi.R_METHODOFF +} + +var kinds = []int{ + types.TINT: objabi.KindInt, + types.TUINT: objabi.KindUint, + types.TINT8: objabi.KindInt8, + types.TUINT8: objabi.KindUint8, + types.TINT16: objabi.KindInt16, + types.TUINT16: objabi.KindUint16, + types.TINT32: objabi.KindInt32, + types.TUINT32: objabi.KindUint32, + types.TINT64: objabi.KindInt64, + types.TUINT64: objabi.KindUint64, + types.TUINTPTR: objabi.KindUintptr, + types.TFLOAT32: objabi.KindFloat32, + types.TFLOAT64: objabi.KindFloat64, + types.TBOOL: objabi.KindBool, + types.TSTRING: objabi.KindString, + types.TPTR: objabi.KindPtr, + types.TSTRUCT: objabi.KindStruct, + types.TINTER: objabi.KindInterface, + types.TCHAN: objabi.KindChan, + types.TMAP: objabi.KindMap, + types.TARRAY: objabi.KindArray, + types.TSLICE: objabi.KindSlice, + types.TFUNC: objabi.KindFunc, + types.TCOMPLEX64: objabi.KindComplex64, + types.TCOMPLEX128: objabi.KindComplex128, + types.TUNSAFEPTR: objabi.KindUnsafePointer, +} + +var ( + memhashvarlen *obj.LSym + memequalvarlen *obj.LSym +) + +// dcommontype dumps the contents of a reflect.rtype (runtime._type) to c. +func dcommontype(c rttype.Cursor, t *types.Type) { + types.CalcSize(t) + eqfunc := geneq(t) + + sptrWeak := true + var sptr *obj.LSym + if !t.IsPtr() || t.IsPtrElem() { + tptr := types.NewPtr(t) + if t.Sym() != nil || methods(tptr) != nil { + sptrWeak = false + } + sptr = writeType(tptr) + } + + gcsym, useGCProg, ptrdata := dgcsym(t, true) + delete(gcsymset, t) + + // ../../../../reflect/type.go:/^type.rtype + // actual type structure + // type rtype struct { + // size uintptr + // ptrdata uintptr + // hash uint32 + // tflag tflag + // align uint8 + // fieldAlign uint8 + // kind uint8 + // equal func(unsafe.Pointer, unsafe.Pointer) bool + // gcdata *byte + // str nameOff + // ptrToThis typeOff + // } + c.Field("Size_").WriteUintptr(uint64(t.Size())) + c.Field("PtrBytes").WriteUintptr(uint64(ptrdata)) + c.Field("Hash").WriteUint32(types.TypeHash(t)) + + var tflag abi.TFlag + if uncommonSize(t) != 0 { + tflag |= abi.TFlagUncommon + } + if t.Sym() != nil && t.Sym().Name != "" { + tflag |= abi.TFlagNamed + } + if compare.IsRegularMemory(t) { + tflag |= abi.TFlagRegularMemory + } + + exported := false + p := t.NameString() + // If we're writing out type T, + // we are very likely to write out type *T as well. + // Use the string "*T"[1:] for "T", so that the two + // share storage. This is a cheap way to reduce the + // amount of space taken up by reflect strings. + if !strings.HasPrefix(p, "*") { + p = "*" + p + tflag |= abi.TFlagExtraStar + if t.Sym() != nil { + exported = types.IsExported(t.Sym().Name) + } + } else { + if t.Elem() != nil && t.Elem().Sym() != nil { + exported = types.IsExported(t.Elem().Sym().Name) + } + } + + if tflag != abi.TFlag(uint8(tflag)) { + // this should optimize away completely + panic("Unexpected change in size of abi.TFlag") + } + c.Field("TFlag").WriteUint8(uint8(tflag)) + + // runtime (and common sense) expects alignment to be a power of two. + i := int(uint8(t.Alignment())) + + if i == 0 { + i = 1 + } + if i&(i-1) != 0 { + base.Fatalf("invalid alignment %d for %v", uint8(t.Alignment()), t) + } + c.Field("Align_").WriteUint8(uint8(t.Alignment())) + c.Field("FieldAlign_").WriteUint8(uint8(t.Alignment())) + + i = kinds[t.Kind()] + if types.IsDirectIface(t) { + i |= objabi.KindDirectIface + } + if useGCProg { + i |= objabi.KindGCProg + } + c.Field("Kind_").WriteUint8(uint8(i)) + + c.Field("Equal").WritePtr(eqfunc) + c.Field("GCData").WritePtr(gcsym) + + nsym := dname(p, "", nil, exported, false) + c.Field("Str").WriteSymPtrOff(nsym, false) + c.Field("PtrToThis").WriteSymPtrOff(sptr, sptrWeak) +} + +// TrackSym returns the symbol for tracking use of field/method f, assumed +// to be a member of struct/interface type t. +func TrackSym(t *types.Type, f *types.Field) *obj.LSym { + return base.PkgLinksym("go:track", t.LinkString()+"."+f.Sym.Name, obj.ABI0) +} + +func TypeSymPrefix(prefix string, t *types.Type) *types.Sym { + p := prefix + "." + t.LinkString() + s := types.TypeSymLookup(p) + + // This function is for looking up type-related generated functions + // (e.g. eq and hash). Make sure they are indeed generated. + signatmu.Lock() + NeedRuntimeType(t) + signatmu.Unlock() + + //print("algsym: %s -> %+S\n", p, s); + + return s +} + +func TypeSym(t *types.Type) *types.Sym { + if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() { + base.Fatalf("TypeSym %v", t) + } + if t.Kind() == types.TFUNC && t.Recv() != nil { + base.Fatalf("misuse of method type: %v", t) + } + s := types.TypeSym(t) + signatmu.Lock() + NeedRuntimeType(t) + signatmu.Unlock() + return s +} + +func TypeLinksymPrefix(prefix string, t *types.Type) *obj.LSym { + return TypeSymPrefix(prefix, t).Linksym() +} + +func TypeLinksymLookup(name string) *obj.LSym { + return types.TypeSymLookup(name).Linksym() +} + +func TypeLinksym(t *types.Type) *obj.LSym { + lsym := TypeSym(t).Linksym() + signatmu.Lock() + if lsym.Extra == nil { + ti := lsym.NewTypeInfo() + ti.Type = t + } + signatmu.Unlock() + return lsym +} + +// TypePtrAt returns an expression that evaluates to the +// *runtime._type value for t. +func TypePtrAt(pos src.XPos, t *types.Type) *ir.AddrExpr { + return typecheck.LinksymAddr(pos, TypeLinksym(t), types.Types[types.TUINT8]) +} + +// ITabLsym returns the LSym representing the itab for concrete type typ implementing +// interface iface. A dummy tab will be created in the unusual case where typ doesn't +// implement iface. Normally, this wouldn't happen, because the typechecker would +// have reported a compile-time error. This situation can only happen when the +// destination type of a type assert or a type in a type switch is parameterized, so +// it may sometimes, but not always, be a type that can't implement the specified +// interface. +func ITabLsym(typ, iface *types.Type) *obj.LSym { + s, existed := ir.Pkgs.Itab.LookupOK(typ.LinkString() + "," + iface.LinkString()) + lsym := s.Linksym() + + if !existed { + writeITab(lsym, typ, iface, true) + } + return lsym +} + +// ITabAddrAt returns an expression that evaluates to the +// *runtime.itab value for concrete type typ implementing interface +// iface. +func ITabAddrAt(pos src.XPos, typ, iface *types.Type) *ir.AddrExpr { + s, existed := ir.Pkgs.Itab.LookupOK(typ.LinkString() + "," + iface.LinkString()) + lsym := s.Linksym() + + if !existed { + writeITab(lsym, typ, iface, false) + } + + return typecheck.LinksymAddr(pos, lsym, types.Types[types.TUINT8]) +} + +// needkeyupdate reports whether map updates with t as a key +// need the key to be updated. +func needkeyupdate(t *types.Type) bool { + switch t.Kind() { + case types.TBOOL, types.TINT, types.TUINT, types.TINT8, types.TUINT8, types.TINT16, types.TUINT16, types.TINT32, types.TUINT32, + types.TINT64, types.TUINT64, types.TUINTPTR, types.TPTR, types.TUNSAFEPTR, types.TCHAN: + return false + + case types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128, // floats and complex can be +0/-0 + types.TINTER, + types.TSTRING: // strings might have smaller backing stores + return true + + case types.TARRAY: + return needkeyupdate(t.Elem()) + + case types.TSTRUCT: + for _, t1 := range t.Fields() { + if needkeyupdate(t1.Type) { + return true + } + } + return false + + default: + base.Fatalf("bad type for map key: %v", t) + return true + } +} + +// hashMightPanic reports whether the hash of a map key of type t might panic. +func hashMightPanic(t *types.Type) bool { + switch t.Kind() { + case types.TINTER: + return true + + case types.TARRAY: + return hashMightPanic(t.Elem()) + + case types.TSTRUCT: + for _, t1 := range t.Fields() { + if hashMightPanic(t1.Type) { + return true + } + } + return false + + default: + return false + } +} + +// formalType replaces predeclared aliases with real types. +// They've been separate internally to make error messages +// better, but we have to merge them in the reflect tables. +func formalType(t *types.Type) *types.Type { + switch t { + case types.AnyType, types.ByteType, types.RuneType: + return types.Types[t.Kind()] + } + return t +} + +func writeType(t *types.Type) *obj.LSym { + t = formalType(t) + if t.IsUntyped() { + base.Fatalf("writeType %v", t) + } + + s := types.TypeSym(t) + lsym := s.Linksym() + + // special case (look for runtime below): + // when compiling package runtime, + // emit the type structures for int, float, etc. + tbase := t + if t.IsPtr() && t.Sym() == nil && t.Elem().Sym() != nil { + tbase = t.Elem() + } + if tbase.Kind() == types.TFORW { + base.Fatalf("unresolved defined type: %v", tbase) + } + + // This is a fake type we generated for our builtin pseudo-runtime + // package. We'll emit a description for the real type while + // compiling package runtime, so we don't need or want to emit one + // from this fake type. + if sym := tbase.Sym(); sym != nil && sym.Pkg == ir.Pkgs.Runtime { + return lsym + } + + if s.Siggen() { + return lsym + } + s.SetSiggen(true) + + if !NeedEmit(tbase) { + if i := typecheck.BaseTypeIndex(t); i >= 0 { + lsym.Pkg = tbase.Sym().Pkg.Prefix + lsym.SymIdx = int32(i) + lsym.Set(obj.AttrIndexed, true) + } + + // TODO(mdempsky): Investigate whether this still happens. + // If we know we don't need to emit code for a type, + // we should have a link-symbol index for it. + // See also TODO in NeedEmit. + return lsym + } + + // Type layout Written by Marker + // +--------------------------------+ - 0 + // | abi/internal.Type | dcommontype + // +--------------------------------+ - A + // | additional type-dependent | code in the switch below + // | fields, e.g. | + // | abi/internal.ArrayType.Len | + // +--------------------------------+ - B + // | internal/abi.UncommonType | dextratype + // | This section is optional, | + // | if type has a name or methods | + // +--------------------------------+ - C + // | variable-length data | code in the switch below + // | referenced by | + // | type-dependent fields, e.g. | + // | abi/internal.StructType.Fields | + // | dataAdd = size of this section | + // +--------------------------------+ - D + // | method list, if any | dextratype + // +--------------------------------+ - E + + // UncommonType section is included if we have a name or a method. + extra := t.Sym() != nil || len(methods(t)) != 0 + + // Decide the underlying type of the descriptor, and remember + // the size we need for variable-length data. + var rt *types.Type + dataAdd := 0 + switch t.Kind() { + default: + rt = rttype.Type + case types.TARRAY: + rt = rttype.ArrayType + case types.TSLICE: + rt = rttype.SliceType + case types.TCHAN: + rt = rttype.ChanType + case types.TFUNC: + rt = rttype.FuncType + dataAdd = (t.NumRecvs() + t.NumParams() + t.NumResults()) * types.PtrSize + case types.TINTER: + rt = rttype.InterfaceType + dataAdd = len(imethods(t)) * int(rttype.IMethod.Size()) + case types.TMAP: + rt = rttype.MapType + case types.TPTR: + rt = rttype.PtrType + // TODO: use rttype.Type for Elem() is ANY? + case types.TSTRUCT: + rt = rttype.StructType + dataAdd = t.NumFields() * int(rttype.StructField.Size()) + } + + // Compute offsets of each section. + B := rt.Size() + C := B + if extra { + C = B + rttype.UncommonType.Size() + } + D := C + int64(dataAdd) + E := D + int64(len(methods(t)))*rttype.Method.Size() + + // Write the runtime._type + c := rttype.NewCursor(lsym, 0, rt) + if rt == rttype.Type { + dcommontype(c, t) + } else { + dcommontype(c.Field("Type"), t) + } + + // Write additional type-specific data + // (Both the fixed size and variable-sized sections.) + switch t.Kind() { + case types.TARRAY: + // internal/abi.ArrayType + s1 := writeType(t.Elem()) + t2 := types.NewSlice(t.Elem()) + s2 := writeType(t2) + c.Field("Elem").WritePtr(s1) + c.Field("Slice").WritePtr(s2) + c.Field("Len").WriteUintptr(uint64(t.NumElem())) + + case types.TSLICE: + // internal/abi.SliceType + s1 := writeType(t.Elem()) + c.Field("Elem").WritePtr(s1) + + case types.TCHAN: + // internal/abi.ChanType + s1 := writeType(t.Elem()) + c.Field("Elem").WritePtr(s1) + c.Field("Dir").WriteInt(int64(t.ChanDir())) + + case types.TFUNC: + // internal/abi.FuncType + for _, t1 := range t.RecvParamsResults() { + writeType(t1.Type) + } + inCount := t.NumRecvs() + t.NumParams() + outCount := t.NumResults() + if t.IsVariadic() { + outCount |= 1 << 15 + } + + c.Field("InCount").WriteUint16(uint16(inCount)) + c.Field("OutCount").WriteUint16(uint16(outCount)) + + // Array of rtype pointers follows funcType. + typs := t.RecvParamsResults() + array := rttype.NewArrayCursor(lsym, C, types.Types[types.TUNSAFEPTR], len(typs)) + for i, t1 := range typs { + array.Elem(i).WritePtr(writeType(t1.Type)) + } + + case types.TINTER: + // internal/abi.InterfaceType + m := imethods(t) + n := len(m) + for _, a := range m { + writeType(a.type_) + } + + var tpkg *types.Pkg + if t.Sym() != nil && t != types.Types[t.Kind()] && t != types.ErrorType { + tpkg = t.Sym().Pkg + } + dgopkgpath(c.Field("PkgPath"), tpkg) + c.Field("Methods").WriteSlice(lsym, C, int64(n), int64(n)) + + array := rttype.NewArrayCursor(lsym, C, rttype.IMethod, n) + for i, a := range m { + exported := types.IsExported(a.name.Name) + var pkg *types.Pkg + if !exported && a.name.Pkg != tpkg { + pkg = a.name.Pkg + } + nsym := dname(a.name.Name, "", pkg, exported, false) + + e := array.Elem(i) + e.Field("Name").WriteSymPtrOff(nsym, false) + e.Field("Typ").WriteSymPtrOff(writeType(a.type_), false) + } + + case types.TMAP: + // internal/abi.MapType + s1 := writeType(t.Key()) + s2 := writeType(t.Elem()) + s3 := writeType(MapBucketType(t)) + hasher := genhash(t.Key()) + + c.Field("Key").WritePtr(s1) + c.Field("Elem").WritePtr(s2) + c.Field("Bucket").WritePtr(s3) + c.Field("Hasher").WritePtr(hasher) + var flags uint32 + // Note: flags must match maptype accessors in ../../../../runtime/type.go + // and maptype builder in ../../../../reflect/type.go:MapOf. + if t.Key().Size() > MAXKEYSIZE { + c.Field("KeySize").WriteUint8(uint8(types.PtrSize)) + flags |= 1 // indirect key + } else { + c.Field("KeySize").WriteUint8(uint8(t.Key().Size())) + } + + if t.Elem().Size() > MAXELEMSIZE { + c.Field("ValueSize").WriteUint8(uint8(types.PtrSize)) + flags |= 2 // indirect value + } else { + c.Field("ValueSize").WriteUint8(uint8(t.Elem().Size())) + } + c.Field("BucketSize").WriteUint16(uint16(MapBucketType(t).Size())) + if types.IsReflexive(t.Key()) { + flags |= 4 // reflexive key + } + if needkeyupdate(t.Key()) { + flags |= 8 // need key update + } + if hashMightPanic(t.Key()) { + flags |= 16 // hash might panic + } + c.Field("Flags").WriteUint32(flags) + + if u := t.Underlying(); u != t { + // If t is a named map type, also keep the underlying map + // type live in the binary. This is important to make sure that + // a named map and that same map cast to its underlying type via + // reflection, use the same hash function. See issue 37716. + r := obj.Addrel(lsym) + r.Sym = writeType(u) + r.Type = objabi.R_KEEP + } + + case types.TPTR: + // internal/abi.PtrType + if t.Elem().Kind() == types.TANY { + base.Fatalf("bad pointer base type") + } + + s1 := writeType(t.Elem()) + c.Field("Elem").WritePtr(s1) + + case types.TSTRUCT: + // internal/abi.StructType + fields := t.Fields() + for _, t1 := range fields { + writeType(t1.Type) + } + + // All non-exported struct field names within a struct + // type must originate from a single package. By + // identifying and recording that package within the + // struct type descriptor, we can omit that + // information from the field descriptors. + var spkg *types.Pkg + for _, f := range fields { + if !types.IsExported(f.Sym.Name) { + spkg = f.Sym.Pkg + break + } + } + + dgopkgpath(c.Field("PkgPath"), spkg) + c.Field("Fields").WriteSlice(lsym, C, int64(len(fields)), int64(len(fields))) + + array := rttype.NewArrayCursor(lsym, C, rttype.StructField, len(fields)) + for i, f := range fields { + e := array.Elem(i) + dnameField(e.Field("Name"), spkg, f) + e.Field("Typ").WritePtr(writeType(f.Type)) + e.Field("Offset").WriteUintptr(uint64(f.Offset)) + } + } + + // Write the extra info, if any. + if extra { + dextratype(lsym, B, t, dataAdd) + } + + // Note: DUPOK is required to ensure that we don't end up with more + // than one type descriptor for a given type, if the type descriptor + // can be defined in multiple packages, that is, unnamed types, + // instantiated types and shape types. + dupok := 0 + if tbase.Sym() == nil || tbase.IsFullyInstantiated() || tbase.HasShape() { + dupok = obj.DUPOK + } + + objw.Global(lsym, int32(E), int16(dupok|obj.RODATA)) + + // The linker will leave a table of all the typelinks for + // types in the binary, so the runtime can find them. + // + // When buildmode=shared, all types are in typelinks so the + // runtime can deduplicate type pointers. + keep := base.Ctxt.Flag_dynlink + if !keep && t.Sym() == nil { + // For an unnamed type, we only need the link if the type can + // be created at run time by reflect.PointerTo and similar + // functions. If the type exists in the program, those + // functions must return the existing type structure rather + // than creating a new one. + switch t.Kind() { + case types.TPTR, types.TARRAY, types.TCHAN, types.TFUNC, types.TMAP, types.TSLICE, types.TSTRUCT: + keep = true + } + } + // Do not put Noalg types in typelinks. See issue #22605. + if types.TypeHasNoAlg(t) { + keep = false + } + lsym.Set(obj.AttrMakeTypelink, keep) + + return lsym +} + +// InterfaceMethodOffset returns the offset of the i-th method in the interface +// type descriptor, ityp. +func InterfaceMethodOffset(ityp *types.Type, i int64) int64 { + // interface type descriptor layout is struct { + // _type // commonSize + // pkgpath // 1 word + // []imethod // 3 words (pointing to [...]imethod below) + // uncommontype // uncommonSize + // [...]imethod + // } + // The size of imethod is 8. + return int64(commonSize()+4*types.PtrSize+uncommonSize(ityp)) + i*8 +} + +// NeedRuntimeType ensures that a runtime type descriptor is emitted for t. +func NeedRuntimeType(t *types.Type) { + if _, ok := signatset[t]; !ok { + signatset[t] = struct{}{} + signatslice = append(signatslice, typeAndStr{t: t, short: types.TypeSymName(t), regular: t.String()}) + } +} + +func WriteRuntimeTypes() { + // Process signatslice. Use a loop, as writeType adds + // entries to signatslice while it is being processed. + for len(signatslice) > 0 { + signats := signatslice + // Sort for reproducible builds. + sort.Sort(typesByString(signats)) + for _, ts := range signats { + t := ts.t + writeType(t) + if t.Sym() != nil { + writeType(types.NewPtr(t)) + } + } + signatslice = signatslice[len(signats):] + } +} + +func WriteGCSymbols() { + // Emit GC data symbols. + gcsyms := make([]typeAndStr, 0, len(gcsymset)) + for t := range gcsymset { + gcsyms = append(gcsyms, typeAndStr{t: t, short: types.TypeSymName(t), regular: t.String()}) + } + sort.Sort(typesByString(gcsyms)) + for _, ts := range gcsyms { + dgcsym(ts.t, true) + } +} + +// writeITab writes the itab for concrete type typ implementing interface iface. If +// allowNonImplement is true, allow the case where typ does not implement iface, and just +// create a dummy itab with zeroed-out method entries. +func writeITab(lsym *obj.LSym, typ, iface *types.Type, allowNonImplement bool) { + // TODO(mdempsky): Fix methodWrapper, geneq, and genhash (and maybe + // others) to stop clobbering these. + oldpos, oldfn := base.Pos, ir.CurFunc + defer func() { base.Pos, ir.CurFunc = oldpos, oldfn }() + + if typ == nil || (typ.IsPtr() && typ.Elem() == nil) || typ.IsUntyped() || iface == nil || !iface.IsInterface() || iface.IsEmptyInterface() { + base.Fatalf("writeITab(%v, %v)", typ, iface) + } + + sigs := iface.AllMethods() + entries := make([]*obj.LSym, 0, len(sigs)) + + // both sigs and methods are sorted by name, + // so we can find the intersection in a single pass + for _, m := range methods(typ) { + if m.name == sigs[0].Sym { + entries = append(entries, m.isym) + if m.isym == nil { + panic("NO ISYM") + } + sigs = sigs[1:] + if len(sigs) == 0 { + break + } + } + } + completeItab := len(sigs) == 0 + if !allowNonImplement && !completeItab { + base.Fatalf("incomplete itab") + } + + // dump empty itab symbol into i.sym + // type itab struct { + // inter *interfacetype + // _type *_type + // hash uint32 // copy of _type.hash. Used for type switches. + // _ [4]byte + // fun [1]uintptr // variable sized. fun[0]==0 means _type does not implement inter. + // } + o := objw.SymPtr(lsym, 0, writeType(iface), 0) + o = objw.SymPtr(lsym, o, writeType(typ), 0) + o = objw.Uint32(lsym, o, types.TypeHash(typ)) // copy of type hash + o += 4 // skip unused field + if !completeItab { + // If typ doesn't implement iface, make method entries be zero. + o = objw.Uintptr(lsym, o, 0) + entries = entries[:0] + } + for _, fn := range entries { + o = objw.SymPtrWeak(lsym, o, fn, 0) // method pointer for each method + } + // Nothing writes static itabs, so they are read only. + objw.Global(lsym, int32(o), int16(obj.DUPOK|obj.RODATA)) + lsym.Set(obj.AttrContentAddressable, true) +} + +func WritePluginTable() { + ptabs := typecheck.Target.PluginExports + if len(ptabs) == 0 { + return + } + + lsym := base.Ctxt.Lookup("go:plugin.tabs") + ot := 0 + for _, p := range ptabs { + // Dump ptab symbol into go.pluginsym package. + // + // type ptab struct { + // name nameOff + // typ typeOff // pointer to symbol + // } + nsym := dname(p.Sym().Name, "", nil, true, false) + t := p.Type() + if p.Class != ir.PFUNC { + t = types.NewPtr(t) + } + tsym := writeType(t) + ot = objw.SymPtrOff(lsym, ot, nsym) + ot = objw.SymPtrOff(lsym, ot, tsym) + // Plugin exports symbols as interfaces. Mark their types + // as UsedInIface. + tsym.Set(obj.AttrUsedInIface, true) + } + objw.Global(lsym, int32(ot), int16(obj.RODATA)) + + lsym = base.Ctxt.Lookup("go:plugin.exports") + ot = 0 + for _, p := range ptabs { + ot = objw.SymPtr(lsym, ot, p.Linksym(), 0) + } + objw.Global(lsym, int32(ot), int16(obj.RODATA)) +} + +// writtenByWriteBasicTypes reports whether typ is written by WriteBasicTypes. +// WriteBasicTypes always writes pointer types; any pointer has been stripped off typ already. +func writtenByWriteBasicTypes(typ *types.Type) bool { + if typ.Sym() == nil && typ.Kind() == types.TFUNC { + // func(error) string + if typ.NumRecvs() == 0 && + typ.NumParams() == 1 && typ.NumResults() == 1 && + typ.Param(0).Type == types.ErrorType && + typ.Result(0).Type == types.Types[types.TSTRING] { + return true + } + } + + // Now we have left the basic types plus any and error, plus slices of them. + // Strip the slice. + if typ.Sym() == nil && typ.IsSlice() { + typ = typ.Elem() + } + + // Basic types. + sym := typ.Sym() + if sym != nil && (sym.Pkg == types.BuiltinPkg || sym.Pkg == types.UnsafePkg) { + return true + } + // any or error + return (sym == nil && typ.IsEmptyInterface()) || typ == types.ErrorType +} + +func WriteBasicTypes() { + // do basic types if compiling package runtime. + // they have to be in at least one package, + // and runtime is always loaded implicitly, + // so this is as good as any. + // another possible choice would be package main, + // but using runtime means fewer copies in object files. + // The code here needs to be in sync with writtenByWriteBasicTypes above. + if base.Ctxt.Pkgpath != "runtime" { + return + } + + // Note: always write NewPtr(t) because NeedEmit's caller strips the pointer. + var list []*types.Type + for i := types.Kind(1); i <= types.TBOOL; i++ { + list = append(list, types.Types[i]) + } + list = append(list, + types.Types[types.TSTRING], + types.Types[types.TUNSAFEPTR], + types.AnyType, + types.ErrorType) + for _, t := range list { + writeType(types.NewPtr(t)) + writeType(types.NewPtr(types.NewSlice(t))) + } + + // emit type for func(error) string, + // which is the type of an auto-generated wrapper. + writeType(types.NewPtr(types.NewSignature(nil, []*types.Field{ + types.NewField(base.Pos, nil, types.ErrorType), + }, []*types.Field{ + types.NewField(base.Pos, nil, types.Types[types.TSTRING]), + }))) +} + +type typeAndStr struct { + t *types.Type + short string // "short" here means TypeSymName + regular string +} + +type typesByString []typeAndStr + +func (a typesByString) Len() int { return len(a) } +func (a typesByString) Less(i, j int) bool { + // put named types before unnamed types + if a[i].t.Sym() != nil && a[j].t.Sym() == nil { + return true + } + if a[i].t.Sym() == nil && a[j].t.Sym() != nil { + return false + } + + if a[i].short != a[j].short { + return a[i].short < a[j].short + } + // When the only difference between the types is whether + // they refer to byte or uint8, such as **byte vs **uint8, + // the types' NameStrings can be identical. + // To preserve deterministic sort ordering, sort these by String(). + // + // TODO(mdempsky): This all seems suspect. Using LinkString would + // avoid naming collisions, and there shouldn't be a reason to care + // about "byte" vs "uint8": they share the same runtime type + // descriptor anyway. + if a[i].regular != a[j].regular { + return a[i].regular < a[j].regular + } + // Identical anonymous interfaces defined in different locations + // will be equal for the above checks, but different in DWARF output. + // Sort by source position to ensure deterministic order. + // See issues 27013 and 30202. + if a[i].t.Kind() == types.TINTER && len(a[i].t.AllMethods()) > 0 { + return a[i].t.AllMethods()[0].Pos.Before(a[j].t.AllMethods()[0].Pos) + } + return false +} +func (a typesByString) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap, +// which holds 1-bit entries describing where pointers are in a given type. +// Above this length, the GC information is recorded as a GC program, +// which can express repetition compactly. In either form, the +// information is used by the runtime to initialize the heap bitmap, +// and for large types (like 128 or more words), they are roughly the +// same speed. GC programs are never much larger and often more +// compact. (If large arrays are involved, they can be arbitrarily +// more compact.) +// +// The cutoff must be large enough that any allocation large enough to +// use a GC program is large enough that it does not share heap bitmap +// bytes with any other objects, allowing the GC program execution to +// assume an aligned start and not use atomic operations. In the current +// runtime, this means all malloc size classes larger than the cutoff must +// be multiples of four words. On 32-bit systems that's 16 bytes, and +// all size classes >= 16 bytes are 16-byte aligned, so no real constraint. +// On 64-bit systems, that's 32 bytes, and 32-byte alignment is guaranteed +// for size classes >= 256 bytes. On a 64-bit system, 256 bytes allocated +// is 32 pointers, the bits for which fit in 4 bytes. So maxPtrmaskBytes +// must be >= 4. +// +// We used to use 16 because the GC programs do have some constant overhead +// to get started, and processing 128 pointers seems to be enough to +// amortize that overhead well. +// +// To make sure that the runtime's chansend can call typeBitsBulkBarrier, +// we raised the limit to 2048, so that even 32-bit systems are guaranteed to +// use bitmaps for objects up to 64 kB in size. +// +// Also known to reflect/type.go. +const maxPtrmaskBytes = 2048 + +// GCSym returns a data symbol containing GC information for type t, along +// with a boolean reporting whether the UseGCProg bit should be set in the +// type kind, and the ptrdata field to record in the reflect type information. +// GCSym may be called in concurrent backend, so it does not emit the symbol +// content. +func GCSym(t *types.Type) (lsym *obj.LSym, useGCProg bool, ptrdata int64) { + // Record that we need to emit the GC symbol. + gcsymmu.Lock() + if _, ok := gcsymset[t]; !ok { + gcsymset[t] = struct{}{} + } + gcsymmu.Unlock() + + return dgcsym(t, false) +} + +// dgcsym returns a data symbol containing GC information for type t, along +// with a boolean reporting whether the UseGCProg bit should be set in the +// type kind, and the ptrdata field to record in the reflect type information. +// When write is true, it writes the symbol data. +func dgcsym(t *types.Type, write bool) (lsym *obj.LSym, useGCProg bool, ptrdata int64) { + ptrdata = types.PtrDataSize(t) + if ptrdata/int64(types.PtrSize) <= maxPtrmaskBytes*8 { + lsym = dgcptrmask(t, write) + return + } + + useGCProg = true + lsym, ptrdata = dgcprog(t, write) + return +} + +// dgcptrmask emits and returns the symbol containing a pointer mask for type t. +func dgcptrmask(t *types.Type, write bool) *obj.LSym { + // Bytes we need for the ptrmask. + n := (types.PtrDataSize(t)/int64(types.PtrSize) + 7) / 8 + // Runtime wants ptrmasks padded to a multiple of uintptr in size. + n = (n + int64(types.PtrSize) - 1) &^ (int64(types.PtrSize) - 1) + ptrmask := make([]byte, n) + fillptrmask(t, ptrmask) + p := fmt.Sprintf("runtime.gcbits.%x", ptrmask) + + lsym := base.Ctxt.Lookup(p) + if write && !lsym.OnList() { + for i, x := range ptrmask { + objw.Uint8(lsym, i, x) + } + objw.Global(lsym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL) + lsym.Set(obj.AttrContentAddressable, true) + } + return lsym +} + +// fillptrmask fills in ptrmask with 1s corresponding to the +// word offsets in t that hold pointers. +// ptrmask is assumed to fit at least types.PtrDataSize(t)/PtrSize bits. +func fillptrmask(t *types.Type, ptrmask []byte) { + for i := range ptrmask { + ptrmask[i] = 0 + } + if !t.HasPointers() { + return + } + + vec := bitvec.New(8 * int32(len(ptrmask))) + typebits.Set(t, 0, vec) + + nptr := types.PtrDataSize(t) / int64(types.PtrSize) + for i := int64(0); i < nptr; i++ { + if vec.Get(int32(i)) { + ptrmask[i/8] |= 1 << (uint(i) % 8) + } + } +} + +// dgcprog emits and returns the symbol containing a GC program for type t +// along with the size of the data described by the program (in the range +// [types.PtrDataSize(t), t.Width]). +// In practice, the size is types.PtrDataSize(t) except for non-trivial arrays. +// For non-trivial arrays, the program describes the full t.Width size. +func dgcprog(t *types.Type, write bool) (*obj.LSym, int64) { + types.CalcSize(t) + if t.Size() == types.BADWIDTH { + base.Fatalf("dgcprog: %v badwidth", t) + } + lsym := TypeLinksymPrefix(".gcprog", t) + var p gcProg + p.init(lsym, write) + p.emit(t, 0) + offset := p.w.BitIndex() * int64(types.PtrSize) + p.end() + if ptrdata := types.PtrDataSize(t); offset < ptrdata || offset > t.Size() { + base.Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Size()) + } + return lsym, offset +} + +type gcProg struct { + lsym *obj.LSym + symoff int + w gcprog.Writer + write bool +} + +func (p *gcProg) init(lsym *obj.LSym, write bool) { + p.lsym = lsym + p.write = write && !lsym.OnList() + p.symoff = 4 // first 4 bytes hold program length + if !write { + p.w.Init(func(byte) {}) + return + } + p.w.Init(p.writeByte) + if base.Debug.GCProg > 0 { + fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", lsym) + p.w.Debug(os.Stderr) + } +} + +func (p *gcProg) writeByte(x byte) { + p.symoff = objw.Uint8(p.lsym, p.symoff, x) +} + +func (p *gcProg) end() { + p.w.End() + if !p.write { + return + } + objw.Uint32(p.lsym, 0, uint32(p.symoff-4)) + objw.Global(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL) + p.lsym.Set(obj.AttrContentAddressable, true) + if base.Debug.GCProg > 0 { + fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.lsym) + } +} + +func (p *gcProg) emit(t *types.Type, offset int64) { + types.CalcSize(t) + if !t.HasPointers() { + return + } + if t.Size() == int64(types.PtrSize) { + p.w.Ptr(offset / int64(types.PtrSize)) + return + } + switch t.Kind() { + default: + base.Fatalf("gcProg.emit: unexpected type %v", t) + + case types.TSTRING: + p.w.Ptr(offset / int64(types.PtrSize)) + + case types.TINTER: + // Note: the first word isn't a pointer. See comment in typebits.Set + p.w.Ptr(offset/int64(types.PtrSize) + 1) + + case types.TSLICE: + p.w.Ptr(offset / int64(types.PtrSize)) + + case types.TARRAY: + if t.NumElem() == 0 { + // should have been handled by haspointers check above + base.Fatalf("gcProg.emit: empty array") + } + + // Flatten array-of-array-of-array to just a big array by multiplying counts. + count := t.NumElem() + elem := t.Elem() + for elem.IsArray() { + count *= elem.NumElem() + elem = elem.Elem() + } + + if !p.w.ShouldRepeat(elem.Size()/int64(types.PtrSize), count) { + // Cheaper to just emit the bits. + for i := int64(0); i < count; i++ { + p.emit(elem, offset+i*elem.Size()) + } + return + } + p.emit(elem, offset) + p.w.ZeroUntil((offset + elem.Size()) / int64(types.PtrSize)) + p.w.Repeat(elem.Size()/int64(types.PtrSize), count-1) + + case types.TSTRUCT: + for _, t1 := range t.Fields() { + p.emit(t1.Type, offset+t1.Offset) + } + } +} + +// ZeroAddr returns the address of a symbol with at least +// size bytes of zeros. +func ZeroAddr(size int64) ir.Node { + if size >= 1<<31 { + base.Fatalf("map elem too big %d", size) + } + if ZeroSize < size { + ZeroSize = size + } + lsym := base.PkgLinksym("go:map", "zero", obj.ABI0) + x := ir.NewLinksymExpr(base.Pos, lsym, types.Types[types.TUINT8]) + return typecheck.Expr(typecheck.NodAddr(x)) +} + +// NeedEmit reports whether typ is a type that we need to emit code +// for (e.g., runtime type descriptors, method wrappers). +func NeedEmit(typ *types.Type) bool { + // TODO(mdempsky): Export data should keep track of which anonymous + // and instantiated types were emitted, so at least downstream + // packages can skip re-emitting them. + // + // Perhaps we can just generalize the linker-symbol indexing to + // track the index of arbitrary types, not just defined types, and + // use its presence to detect this. The same idea would work for + // instantiated generic functions too. + + switch sym := typ.Sym(); { + case writtenByWriteBasicTypes(typ): + return base.Ctxt.Pkgpath == "runtime" + + case sym == nil: + // Anonymous type; possibly never seen before or ever again. + // Need to emit to be safe (however, see TODO above). + return true + + case sym.Pkg == types.LocalPkg: + // Local defined type; our responsibility. + return true + + case typ.IsFullyInstantiated(): + // Instantiated type; possibly instantiated with unique type arguments. + // Need to emit to be safe (however, see TODO above). + return true + + case typ.HasShape(): + // Shape type; need to emit even though it lives in the .shape package. + // TODO: make sure the linker deduplicates them (see dupok in writeType above). + return true + + default: + // Should have been emitted by an imported package. + return false + } +} + +// Generate a wrapper function to convert from +// a receiver of type T to a receiver of type U. +// That is, +// +// func (t T) M() { +// ... +// } +// +// already exists; this function generates +// +// func (u U) M() { +// u.M() +// } +// +// where the types T and U are such that u.M() is valid +// and calls the T.M method. +// The resulting function is for use in method tables. +// +// rcvr - U +// method - M func (t T)(), a TFIELD type struct +// +// Also wraps methods on instantiated generic types for use in itab entries. +// For an instantiated generic type G[int], we generate wrappers like: +// G[int] pointer shaped: +// +// func (x G[int]) f(arg) { +// .inst.G[int].f(dictionary, x, arg) +// } +// +// G[int] not pointer shaped: +// +// func (x *G[int]) f(arg) { +// .inst.G[int].f(dictionary, *x, arg) +// } +// +// These wrappers are always fully stenciled. +func methodWrapper(rcvr *types.Type, method *types.Field, forItab bool) *obj.LSym { + if forItab && !types.IsDirectIface(rcvr) { + rcvr = rcvr.PtrTo() + } + + newnam := ir.MethodSym(rcvr, method.Sym) + lsym := newnam.Linksym() + + // Unified IR creates its own wrappers. + return lsym +} + +var ZeroSize int64 + +// MarkTypeUsedInInterface marks that type t is converted to an interface. +// This information is used in the linker in dead method elimination. +func MarkTypeUsedInInterface(t *types.Type, from *obj.LSym) { + if t.HasShape() { + // Shape types shouldn't be put in interfaces, so we shouldn't ever get here. + base.Fatalf("shape types have no methods %+v", t) + } + MarkTypeSymUsedInInterface(TypeLinksym(t), from) +} +func MarkTypeSymUsedInInterface(tsym *obj.LSym, from *obj.LSym) { + // Emit a marker relocation. The linker will know the type is converted + // to an interface if "from" is reachable. + r := obj.Addrel(from) + r.Sym = tsym + r.Type = objabi.R_USEIFACE +} + +// MarkUsedIfaceMethod marks that an interface method is used in the current +// function. n is OCALLINTER node. +func MarkUsedIfaceMethod(n *ir.CallExpr) { + // skip unnamed functions (func _()) + if ir.CurFunc.LSym == nil { + return + } + dot := n.Fun.(*ir.SelectorExpr) + ityp := dot.X.Type() + if ityp.HasShape() { + // Here we're calling a method on a generic interface. Something like: + // + // type I[T any] interface { foo() T } + // func f[T any](x I[T]) { + // ... = x.foo() + // } + // f[int](...) + // f[string](...) + // + // In this case, in f we're calling foo on a generic interface. + // Which method could that be? Normally we could match the method + // both by name and by type. But in this case we don't really know + // the type of the method we're calling. It could be func()int + // or func()string. So we match on just the function name, instead + // of both the name and the type used for the non-generic case below. + // TODO: instantiations at least know the shape of the instantiated + // type, and the linker could do more complicated matching using + // some sort of fuzzy shape matching. For now, only use the name + // of the method for matching. + r := obj.Addrel(ir.CurFunc.LSym) + r.Sym = staticdata.StringSymNoCommon(dot.Sel.Name) + r.Type = objabi.R_USENAMEDMETHOD + return + } + + tsym := TypeLinksym(ityp) + r := obj.Addrel(ir.CurFunc.LSym) + r.Sym = tsym + // dot.Offset() is the method index * PtrSize (the offset of code pointer + // in itab). + midx := dot.Offset() / int64(types.PtrSize) + r.Add = InterfaceMethodOffset(ityp, midx) + r.Type = objabi.R_USEIFACEMETHOD +} + +func deref(t *types.Type) *types.Type { + if t.IsPtr() { + return t.Elem() + } + return t +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/riscv64/galign.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/riscv64/galign.go new file mode 100644 index 0000000000000000000000000000000000000000..4244afba3e19689286c03633849890e449facb32 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/riscv64/galign.go @@ -0,0 +1,26 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package riscv64 + +import ( + "cmd/compile/internal/ssagen" + "cmd/internal/obj/riscv" +) + +func Init(arch *ssagen.ArchInfo) { + arch.LinkArch = &riscv.LinkRISCV64 + + arch.REGSP = riscv.REG_SP + arch.MAXWIDTH = 1 << 50 + + arch.Ginsnop = ginsnop + arch.ZeroRange = zeroRange + + arch.SSAMarkMoves = ssaMarkMoves + arch.SSAGenValue = ssaGenValue + arch.SSAGenBlock = ssaGenBlock + arch.LoadRegResult = loadRegResult + arch.SpillArgReg = spillArgReg +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/riscv64/ggen.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/riscv64/ggen.go new file mode 100644 index 0000000000000000000000000000000000000000..44488e43276bc24a5dece2337f8f93ecdfc9ae25 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/riscv64/ggen.go @@ -0,0 +1,59 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package riscv64 + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/objw" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/obj/riscv" +) + +func zeroRange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { + if cnt == 0 { + return p + } + + // Adjust the frame to account for LR. + off += base.Ctxt.Arch.FixedFrameSize + + if cnt < int64(4*types.PtrSize) { + for i := int64(0); i < cnt; i += int64(types.PtrSize) { + p = pp.Append(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_SP, off+i) + } + return p + } + + if cnt <= int64(128*types.PtrSize) { + p = pp.Append(p, riscv.AADDI, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_X25, 0) + p.Reg = riscv.REG_SP + p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ir.Syms.Duffzero + p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize)) + return p + } + + // Loop, zeroing pointer width bytes at a time. + // ADD $(off), SP, T0 + // ADD $(cnt), T0, T1 + // loop: + // MOV ZERO, (T0) + // ADD $Widthptr, T0 + // BNE T0, T1, loop + p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_T0, 0) + p.Reg = riscv.REG_SP + p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, riscv.REG_T1, 0) + p.Reg = riscv.REG_T0 + p = pp.Append(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_T0, 0) + loop := p + p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, riscv.REG_T0, 0) + p = pp.Append(p, riscv.ABNE, obj.TYPE_REG, riscv.REG_T0, 0, obj.TYPE_BRANCH, 0, 0) + p.Reg = riscv.REG_T1 + p.To.SetTarget(loop) + return p +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/riscv64/gsubr.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/riscv64/gsubr.go new file mode 100644 index 0000000000000000000000000000000000000000..74bccf8d42ab1b00cad26b7c06e79b8824bf294e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/riscv64/gsubr.go @@ -0,0 +1,20 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package riscv64 + +import ( + "cmd/compile/internal/objw" + "cmd/internal/obj" + "cmd/internal/obj/riscv" +) + +func ginsnop(pp *objw.Progs) *obj.Prog { + // Hardware nop is ADD $0, ZERO + p := pp.Prog(riscv.AADD) + p.From.Type = obj.TYPE_CONST + p.Reg = riscv.REG_ZERO + p.To = obj.Addr{Type: obj.TYPE_REG, Reg: riscv.REG_ZERO} + return p +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/riscv64/ssa.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/riscv64/ssa.go new file mode 100644 index 0000000000000000000000000000000000000000..22338188e5202fd8698adf396ed6875bb84d96a2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/riscv64/ssa.go @@ -0,0 +1,817 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package riscv64 + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/objw" + "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/obj/riscv" +) + +// ssaRegToReg maps ssa register numbers to obj register numbers. +var ssaRegToReg = []int16{ + riscv.REG_X0, + // X1 (LR): unused + riscv.REG_X2, + riscv.REG_X3, + riscv.REG_X4, + riscv.REG_X5, + riscv.REG_X6, + riscv.REG_X7, + riscv.REG_X8, + riscv.REG_X9, + riscv.REG_X10, + riscv.REG_X11, + riscv.REG_X12, + riscv.REG_X13, + riscv.REG_X14, + riscv.REG_X15, + riscv.REG_X16, + riscv.REG_X17, + riscv.REG_X18, + riscv.REG_X19, + riscv.REG_X20, + riscv.REG_X21, + riscv.REG_X22, + riscv.REG_X23, + riscv.REG_X24, + riscv.REG_X25, + riscv.REG_X26, + riscv.REG_X27, + riscv.REG_X28, + riscv.REG_X29, + riscv.REG_X30, + riscv.REG_X31, + riscv.REG_F0, + riscv.REG_F1, + riscv.REG_F2, + riscv.REG_F3, + riscv.REG_F4, + riscv.REG_F5, + riscv.REG_F6, + riscv.REG_F7, + riscv.REG_F8, + riscv.REG_F9, + riscv.REG_F10, + riscv.REG_F11, + riscv.REG_F12, + riscv.REG_F13, + riscv.REG_F14, + riscv.REG_F15, + riscv.REG_F16, + riscv.REG_F17, + riscv.REG_F18, + riscv.REG_F19, + riscv.REG_F20, + riscv.REG_F21, + riscv.REG_F22, + riscv.REG_F23, + riscv.REG_F24, + riscv.REG_F25, + riscv.REG_F26, + riscv.REG_F27, + riscv.REG_F28, + riscv.REG_F29, + riscv.REG_F30, + riscv.REG_F31, + 0, // SB isn't a real register. We fill an Addr.Reg field with 0 in this case. +} + +func loadByType(t *types.Type) obj.As { + width := t.Size() + + if t.IsFloat() { + switch width { + case 4: + return riscv.AMOVF + case 8: + return riscv.AMOVD + default: + base.Fatalf("unknown float width for load %d in type %v", width, t) + return 0 + } + } + + switch width { + case 1: + if t.IsSigned() { + return riscv.AMOVB + } else { + return riscv.AMOVBU + } + case 2: + if t.IsSigned() { + return riscv.AMOVH + } else { + return riscv.AMOVHU + } + case 4: + if t.IsSigned() { + return riscv.AMOVW + } else { + return riscv.AMOVWU + } + case 8: + return riscv.AMOV + default: + base.Fatalf("unknown width for load %d in type %v", width, t) + return 0 + } +} + +// storeByType returns the store instruction of the given type. +func storeByType(t *types.Type) obj.As { + width := t.Size() + + if t.IsFloat() { + switch width { + case 4: + return riscv.AMOVF + case 8: + return riscv.AMOVD + default: + base.Fatalf("unknown float width for store %d in type %v", width, t) + return 0 + } + } + + switch width { + case 1: + return riscv.AMOVB + case 2: + return riscv.AMOVH + case 4: + return riscv.AMOVW + case 8: + return riscv.AMOV + default: + base.Fatalf("unknown width for store %d in type %v", width, t) + return 0 + } +} + +// largestMove returns the largest move instruction possible and its size, +// given the alignment of the total size of the move. +// +// e.g., a 16-byte move may use MOV, but an 11-byte move must use MOVB. +// +// Note that the moves may not be on naturally aligned addresses depending on +// the source and destination. +// +// This matches the calculation in ssa.moveSize. +func largestMove(alignment int64) (obj.As, int64) { + switch { + case alignment%8 == 0: + return riscv.AMOV, 8 + case alignment%4 == 0: + return riscv.AMOVW, 4 + case alignment%2 == 0: + return riscv.AMOVH, 2 + default: + return riscv.AMOVB, 1 + } +} + +// ssaMarkMoves marks any MOVXconst ops that need to avoid clobbering flags. +// RISC-V has no flags, so this is a no-op. +func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {} + +func ssaGenValue(s *ssagen.State, v *ssa.Value) { + s.SetPos(v.Pos) + + switch v.Op { + case ssa.OpInitMem: + // memory arg needs no code + case ssa.OpArg: + // input args need no code + case ssa.OpPhi: + ssagen.CheckLoweredPhi(v) + case ssa.OpCopy, ssa.OpRISCV64MOVDreg: + if v.Type.IsMemory() { + return + } + rs := v.Args[0].Reg() + rd := v.Reg() + if rs == rd { + return + } + as := riscv.AMOV + if v.Type.IsFloat() { + as = riscv.AMOVD + } + p := s.Prog(as) + p.From.Type = obj.TYPE_REG + p.From.Reg = rs + p.To.Type = obj.TYPE_REG + p.To.Reg = rd + case ssa.OpRISCV64MOVDnop: + // nothing to do + case ssa.OpLoadReg: + if v.Type.IsFlags() { + v.Fatalf("load flags not implemented: %v", v.LongString()) + return + } + p := s.Prog(loadByType(v.Type)) + ssagen.AddrAuto(&p.From, v.Args[0]) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpStoreReg: + if v.Type.IsFlags() { + v.Fatalf("store flags not implemented: %v", v.LongString()) + return + } + p := s.Prog(storeByType(v.Type)) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + ssagen.AddrAuto(&p.To, v) + case ssa.OpArgIntReg, ssa.OpArgFloatReg: + // The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill + // The loop only runs once. + for _, a := range v.Block.Func.RegArgs { + // Pass the spill/unspill information along to the assembler, offset by size of + // the saved LR slot. + addr := ssagen.SpillSlotAddr(a, riscv.REG_SP, base.Ctxt.Arch.FixedFrameSize) + s.FuncInfo().AddSpill( + obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type), Spill: storeByType(a.Type)}) + } + v.Block.Func.RegArgs = nil + + ssagen.CheckArgReg(v) + case ssa.OpSP, ssa.OpSB, ssa.OpGetG: + // nothing to do + case ssa.OpRISCV64MOVBreg, ssa.OpRISCV64MOVHreg, ssa.OpRISCV64MOVWreg, + ssa.OpRISCV64MOVBUreg, ssa.OpRISCV64MOVHUreg, ssa.OpRISCV64MOVWUreg: + a := v.Args[0] + for a.Op == ssa.OpCopy || a.Op == ssa.OpRISCV64MOVDreg { + a = a.Args[0] + } + as := v.Op.Asm() + rs := v.Args[0].Reg() + rd := v.Reg() + if a.Op == ssa.OpLoadReg { + t := a.Type + switch { + case v.Op == ssa.OpRISCV64MOVBreg && t.Size() == 1 && t.IsSigned(), + v.Op == ssa.OpRISCV64MOVHreg && t.Size() == 2 && t.IsSigned(), + v.Op == ssa.OpRISCV64MOVWreg && t.Size() == 4 && t.IsSigned(), + v.Op == ssa.OpRISCV64MOVBUreg && t.Size() == 1 && !t.IsSigned(), + v.Op == ssa.OpRISCV64MOVHUreg && t.Size() == 2 && !t.IsSigned(), + v.Op == ssa.OpRISCV64MOVWUreg && t.Size() == 4 && !t.IsSigned(): + // arg is a proper-typed load and already sign/zero-extended + if rs == rd { + return + } + as = riscv.AMOV + default: + } + } + p := s.Prog(as) + p.From.Type = obj.TYPE_REG + p.From.Reg = rs + p.To.Type = obj.TYPE_REG + p.To.Reg = rd + case ssa.OpRISCV64ADD, ssa.OpRISCV64SUB, ssa.OpRISCV64SUBW, ssa.OpRISCV64XOR, ssa.OpRISCV64OR, ssa.OpRISCV64AND, + ssa.OpRISCV64SLL, ssa.OpRISCV64SRA, ssa.OpRISCV64SRAW, ssa.OpRISCV64SRL, ssa.OpRISCV64SRLW, + ssa.OpRISCV64SLT, ssa.OpRISCV64SLTU, ssa.OpRISCV64MUL, ssa.OpRISCV64MULW, ssa.OpRISCV64MULH, + ssa.OpRISCV64MULHU, ssa.OpRISCV64DIV, ssa.OpRISCV64DIVU, ssa.OpRISCV64DIVW, + ssa.OpRISCV64DIVUW, ssa.OpRISCV64REM, ssa.OpRISCV64REMU, ssa.OpRISCV64REMW, + ssa.OpRISCV64REMUW, + ssa.OpRISCV64FADDS, ssa.OpRISCV64FSUBS, ssa.OpRISCV64FMULS, ssa.OpRISCV64FDIVS, + ssa.OpRISCV64FEQS, ssa.OpRISCV64FNES, ssa.OpRISCV64FLTS, ssa.OpRISCV64FLES, + ssa.OpRISCV64FADDD, ssa.OpRISCV64FSUBD, ssa.OpRISCV64FMULD, ssa.OpRISCV64FDIVD, + ssa.OpRISCV64FEQD, ssa.OpRISCV64FNED, ssa.OpRISCV64FLTD, ssa.OpRISCV64FLED, + ssa.OpRISCV64FSGNJD: + r := v.Reg() + r1 := v.Args[0].Reg() + r2 := v.Args[1].Reg() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r2 + p.Reg = r1 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + case ssa.OpRISCV64LoweredMuluhilo: + r0 := v.Args[0].Reg() + r1 := v.Args[1].Reg() + p := s.Prog(riscv.AMULHU) + p.From.Type = obj.TYPE_REG + p.From.Reg = r1 + p.Reg = r0 + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + p1 := s.Prog(riscv.AMUL) + p1.From.Type = obj.TYPE_REG + p1.From.Reg = r1 + p1.Reg = r0 + p1.To.Type = obj.TYPE_REG + p1.To.Reg = v.Reg1() + case ssa.OpRISCV64LoweredMuluover: + r0 := v.Args[0].Reg() + r1 := v.Args[1].Reg() + p := s.Prog(riscv.AMULHU) + p.From.Type = obj.TYPE_REG + p.From.Reg = r1 + p.Reg = r0 + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg1() + p1 := s.Prog(riscv.AMUL) + p1.From.Type = obj.TYPE_REG + p1.From.Reg = r1 + p1.Reg = r0 + p1.To.Type = obj.TYPE_REG + p1.To.Reg = v.Reg0() + p2 := s.Prog(riscv.ASNEZ) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = v.Reg1() + p2.To.Type = obj.TYPE_REG + p2.To.Reg = v.Reg1() + case ssa.OpRISCV64FMADDD, ssa.OpRISCV64FMSUBD, ssa.OpRISCV64FNMADDD, ssa.OpRISCV64FNMSUBD, + ssa.OpRISCV64FMADDS, ssa.OpRISCV64FMSUBS, ssa.OpRISCV64FNMADDS, ssa.OpRISCV64FNMSUBS: + r := v.Reg() + r1 := v.Args[0].Reg() + r2 := v.Args[1].Reg() + r3 := v.Args[2].Reg() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r2 + p.Reg = r1 + p.AddRestSource(obj.Addr{Type: obj.TYPE_REG, Reg: r3}) + p.To.Type = obj.TYPE_REG + p.To.Reg = r + case ssa.OpRISCV64FSQRTS, ssa.OpRISCV64FNEGS, ssa.OpRISCV64FABSD, ssa.OpRISCV64FSQRTD, ssa.OpRISCV64FNEGD, + ssa.OpRISCV64FMVSX, ssa.OpRISCV64FMVDX, + ssa.OpRISCV64FCVTSW, ssa.OpRISCV64FCVTSL, ssa.OpRISCV64FCVTWS, ssa.OpRISCV64FCVTLS, + ssa.OpRISCV64FCVTDW, ssa.OpRISCV64FCVTDL, ssa.OpRISCV64FCVTWD, ssa.OpRISCV64FCVTLD, ssa.OpRISCV64FCVTDS, ssa.OpRISCV64FCVTSD, + ssa.OpRISCV64NOT, ssa.OpRISCV64NEG, ssa.OpRISCV64NEGW: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpRISCV64ADDI, ssa.OpRISCV64ADDIW, ssa.OpRISCV64XORI, ssa.OpRISCV64ORI, ssa.OpRISCV64ANDI, + ssa.OpRISCV64SLLI, ssa.OpRISCV64SRAI, ssa.OpRISCV64SRAIW, ssa.OpRISCV64SRLI, ssa.OpRISCV64SRLIW, ssa.OpRISCV64SLTI, + ssa.OpRISCV64SLTIU: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpRISCV64MOVDconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpRISCV64MOVaddr: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_ADDR + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + var wantreg string + // MOVW $sym+off(base), R + switch v.Aux.(type) { + default: + v.Fatalf("aux is of unknown type %T", v.Aux) + case *obj.LSym: + wantreg = "SB" + ssagen.AddAux(&p.From, v) + case *ir.Name: + wantreg = "SP" + ssagen.AddAux(&p.From, v) + case nil: + // No sym, just MOVW $off(SP), R + wantreg = "SP" + p.From.Reg = riscv.REG_SP + p.From.Offset = v.AuxInt + } + if reg := v.Args[0].RegName(); reg != wantreg { + v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg) + } + case ssa.OpRISCV64MOVBload, ssa.OpRISCV64MOVHload, ssa.OpRISCV64MOVWload, ssa.OpRISCV64MOVDload, + ssa.OpRISCV64MOVBUload, ssa.OpRISCV64MOVHUload, ssa.OpRISCV64MOVWUload, + ssa.OpRISCV64FMOVWload, ssa.OpRISCV64FMOVDload: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpRISCV64MOVBstore, ssa.OpRISCV64MOVHstore, ssa.OpRISCV64MOVWstore, ssa.OpRISCV64MOVDstore, + ssa.OpRISCV64FMOVWstore, ssa.OpRISCV64FMOVDstore: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + case ssa.OpRISCV64MOVBstorezero, ssa.OpRISCV64MOVHstorezero, ssa.OpRISCV64MOVWstorezero, ssa.OpRISCV64MOVDstorezero: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = riscv.REG_ZERO + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + case ssa.OpRISCV64SEQZ, ssa.OpRISCV64SNEZ: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpRISCV64CALLstatic, ssa.OpRISCV64CALLclosure, ssa.OpRISCV64CALLinter: + s.Call(v) + case ssa.OpRISCV64CALLtail: + s.TailCall(v) + case ssa.OpRISCV64LoweredWB: + p := s.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + // AuxInt encodes how many buffer entries we need. + p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1] + case ssa.OpRISCV64LoweredPanicBoundsA, ssa.OpRISCV64LoweredPanicBoundsB, ssa.OpRISCV64LoweredPanicBoundsC: + p := s.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt] + s.UseArgs(16) // space used in callee args area by assembly stubs + + case ssa.OpRISCV64LoweredAtomicLoad8: + s.Prog(riscv.AFENCE) + p := s.Prog(riscv.AMOVBU) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + s.Prog(riscv.AFENCE) + + case ssa.OpRISCV64LoweredAtomicLoad32, ssa.OpRISCV64LoweredAtomicLoad64: + as := riscv.ALRW + if v.Op == ssa.OpRISCV64LoweredAtomicLoad64 { + as = riscv.ALRD + } + p := s.Prog(as) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + + case ssa.OpRISCV64LoweredAtomicStore8: + s.Prog(riscv.AFENCE) + p := s.Prog(riscv.AMOVB) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + s.Prog(riscv.AFENCE) + + case ssa.OpRISCV64LoweredAtomicStore32, ssa.OpRISCV64LoweredAtomicStore64: + as := riscv.AAMOSWAPW + if v.Op == ssa.OpRISCV64LoweredAtomicStore64 { + as = riscv.AAMOSWAPD + } + p := s.Prog(as) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + p.RegTo2 = riscv.REG_ZERO + + case ssa.OpRISCV64LoweredAtomicAdd32, ssa.OpRISCV64LoweredAtomicAdd64: + as := riscv.AAMOADDW + if v.Op == ssa.OpRISCV64LoweredAtomicAdd64 { + as = riscv.AAMOADDD + } + p := s.Prog(as) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + p.RegTo2 = riscv.REG_TMP + + p2 := s.Prog(riscv.AADD) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = riscv.REG_TMP + p2.Reg = v.Args[1].Reg() + p2.To.Type = obj.TYPE_REG + p2.To.Reg = v.Reg0() + + case ssa.OpRISCV64LoweredAtomicExchange32, ssa.OpRISCV64LoweredAtomicExchange64: + as := riscv.AAMOSWAPW + if v.Op == ssa.OpRISCV64LoweredAtomicExchange64 { + as = riscv.AAMOSWAPD + } + p := s.Prog(as) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + p.RegTo2 = v.Reg0() + + case ssa.OpRISCV64LoweredAtomicCas32, ssa.OpRISCV64LoweredAtomicCas64: + // MOV ZERO, Rout + // LR (Rarg0), Rtmp + // BNE Rtmp, Rarg1, 3(PC) + // SC Rarg2, (Rarg0), Rtmp + // BNE Rtmp, ZERO, -3(PC) + // MOV $1, Rout + + lr := riscv.ALRW + sc := riscv.ASCW + if v.Op == ssa.OpRISCV64LoweredAtomicCas64 { + lr = riscv.ALRD + sc = riscv.ASCD + } + + r0 := v.Args[0].Reg() + r1 := v.Args[1].Reg() + r2 := v.Args[2].Reg() + out := v.Reg0() + + p := s.Prog(riscv.AMOV) + p.From.Type = obj.TYPE_REG + p.From.Reg = riscv.REG_ZERO + p.To.Type = obj.TYPE_REG + p.To.Reg = out + + p1 := s.Prog(lr) + p1.From.Type = obj.TYPE_MEM + p1.From.Reg = r0 + p1.To.Type = obj.TYPE_REG + p1.To.Reg = riscv.REG_TMP + + p2 := s.Prog(riscv.ABNE) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = r1 + p2.Reg = riscv.REG_TMP + p2.To.Type = obj.TYPE_BRANCH + + p3 := s.Prog(sc) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = r2 + p3.To.Type = obj.TYPE_MEM + p3.To.Reg = r0 + p3.RegTo2 = riscv.REG_TMP + + p4 := s.Prog(riscv.ABNE) + p4.From.Type = obj.TYPE_REG + p4.From.Reg = riscv.REG_TMP + p4.Reg = riscv.REG_ZERO + p4.To.Type = obj.TYPE_BRANCH + p4.To.SetTarget(p1) + + p5 := s.Prog(riscv.AMOV) + p5.From.Type = obj.TYPE_CONST + p5.From.Offset = 1 + p5.To.Type = obj.TYPE_REG + p5.To.Reg = out + + p6 := s.Prog(obj.ANOP) + p2.To.SetTarget(p6) + + case ssa.OpRISCV64LoweredAtomicAnd32, ssa.OpRISCV64LoweredAtomicOr32: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + p.RegTo2 = riscv.REG_ZERO + + case ssa.OpRISCV64LoweredZero: + mov, sz := largestMove(v.AuxInt) + + // mov ZERO, (Rarg0) + // ADD $sz, Rarg0 + // BGEU Rarg1, Rarg0, -2(PC) + + p := s.Prog(mov) + p.From.Type = obj.TYPE_REG + p.From.Reg = riscv.REG_ZERO + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + + p2 := s.Prog(riscv.AADD) + p2.From.Type = obj.TYPE_CONST + p2.From.Offset = sz + p2.To.Type = obj.TYPE_REG + p2.To.Reg = v.Args[0].Reg() + + p3 := s.Prog(riscv.ABGEU) + p3.To.Type = obj.TYPE_BRANCH + p3.Reg = v.Args[0].Reg() + p3.From.Type = obj.TYPE_REG + p3.From.Reg = v.Args[1].Reg() + p3.To.SetTarget(p) + + case ssa.OpRISCV64LoweredMove: + mov, sz := largestMove(v.AuxInt) + + // mov (Rarg1), T2 + // mov T2, (Rarg0) + // ADD $sz, Rarg0 + // ADD $sz, Rarg1 + // BGEU Rarg2, Rarg0, -4(PC) + + p := s.Prog(mov) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = riscv.REG_T2 + + p2 := s.Prog(mov) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = riscv.REG_T2 + p2.To.Type = obj.TYPE_MEM + p2.To.Reg = v.Args[0].Reg() + + p3 := s.Prog(riscv.AADD) + p3.From.Type = obj.TYPE_CONST + p3.From.Offset = sz + p3.To.Type = obj.TYPE_REG + p3.To.Reg = v.Args[0].Reg() + + p4 := s.Prog(riscv.AADD) + p4.From.Type = obj.TYPE_CONST + p4.From.Offset = sz + p4.To.Type = obj.TYPE_REG + p4.To.Reg = v.Args[1].Reg() + + p5 := s.Prog(riscv.ABGEU) + p5.To.Type = obj.TYPE_BRANCH + p5.Reg = v.Args[1].Reg() + p5.From.Type = obj.TYPE_REG + p5.From.Reg = v.Args[2].Reg() + p5.To.SetTarget(p) + + case ssa.OpRISCV64LoweredNilCheck: + // Issue a load which will fault if arg is nil. + // TODO: optimizations. See arm and amd64 LoweredNilCheck. + p := s.Prog(riscv.AMOVB) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = riscv.REG_ZERO + if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos == 1 in generated wrappers + base.WarnfAt(v.Pos, "generated nil check") + } + + case ssa.OpRISCV64LoweredGetClosurePtr: + // Closure pointer is S10 (riscv.REG_CTXT). + ssagen.CheckLoweredGetClosurePtr(v) + + case ssa.OpRISCV64LoweredGetCallerSP: + // caller's SP is FixedFrameSize below the address of the first arg + p := s.Prog(riscv.AMOV) + p.From.Type = obj.TYPE_ADDR + p.From.Offset = -base.Ctxt.Arch.FixedFrameSize + p.From.Name = obj.NAME_PARAM + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.OpRISCV64LoweredGetCallerPC: + p := s.Prog(obj.AGETCALLERPC) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.OpRISCV64DUFFZERO: + p := s.Prog(obj.ADUFFZERO) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ir.Syms.Duffzero + p.To.Offset = v.AuxInt + + case ssa.OpRISCV64DUFFCOPY: + p := s.Prog(obj.ADUFFCOPY) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ir.Syms.Duffcopy + p.To.Offset = v.AuxInt + + case ssa.OpRISCV64LoweredPubBarrier: + // FENCE + s.Prog(v.Op.Asm()) + + case ssa.OpRISCV64LoweredRound32F, ssa.OpRISCV64LoweredRound64F: + // input is already rounded + + case ssa.OpClobber, ssa.OpClobberReg: + // TODO: implement for clobberdead experiment. Nop is ok for now. + + default: + v.Fatalf("Unhandled op %v", v.Op) + } +} + +var blockBranch = [...]obj.As{ + ssa.BlockRISCV64BEQ: riscv.ABEQ, + ssa.BlockRISCV64BEQZ: riscv.ABEQZ, + ssa.BlockRISCV64BGE: riscv.ABGE, + ssa.BlockRISCV64BGEU: riscv.ABGEU, + ssa.BlockRISCV64BGEZ: riscv.ABGEZ, + ssa.BlockRISCV64BGTZ: riscv.ABGTZ, + ssa.BlockRISCV64BLEZ: riscv.ABLEZ, + ssa.BlockRISCV64BLT: riscv.ABLT, + ssa.BlockRISCV64BLTU: riscv.ABLTU, + ssa.BlockRISCV64BLTZ: riscv.ABLTZ, + ssa.BlockRISCV64BNE: riscv.ABNE, + ssa.BlockRISCV64BNEZ: riscv.ABNEZ, +} + +func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { + s.SetPos(b.Pos) + + switch b.Kind { + case ssa.BlockDefer: + // defer returns in A0: + // 0 if we should continue executing + // 1 if we should jump to deferreturn call + p := s.Prog(riscv.ABNE) + p.To.Type = obj.TYPE_BRANCH + p.From.Type = obj.TYPE_REG + p.From.Reg = riscv.REG_ZERO + p.Reg = riscv.REG_A0 + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()}) + if b.Succs[0].Block() != next { + p := s.Prog(obj.AJMP) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) + } + case ssa.BlockPlain: + if b.Succs[0].Block() != next { + p := s.Prog(obj.AJMP) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) + } + case ssa.BlockExit, ssa.BlockRetJmp: + case ssa.BlockRet: + s.Prog(obj.ARET) + case ssa.BlockRISCV64BEQ, ssa.BlockRISCV64BEQZ, ssa.BlockRISCV64BNE, ssa.BlockRISCV64BNEZ, + ssa.BlockRISCV64BLT, ssa.BlockRISCV64BLEZ, ssa.BlockRISCV64BGE, ssa.BlockRISCV64BGEZ, + ssa.BlockRISCV64BLTZ, ssa.BlockRISCV64BGTZ, ssa.BlockRISCV64BLTU, ssa.BlockRISCV64BGEU: + + as := blockBranch[b.Kind] + invAs := riscv.InvertBranch(as) + + var p *obj.Prog + switch next { + case b.Succs[0].Block(): + p = s.Br(invAs, b.Succs[1].Block()) + case b.Succs[1].Block(): + p = s.Br(as, b.Succs[0].Block()) + default: + if b.Likely != ssa.BranchUnlikely { + p = s.Br(as, b.Succs[0].Block()) + s.Br(obj.AJMP, b.Succs[1].Block()) + } else { + p = s.Br(invAs, b.Succs[1].Block()) + s.Br(obj.AJMP, b.Succs[0].Block()) + } + } + + p.From.Type = obj.TYPE_REG + switch b.Kind { + case ssa.BlockRISCV64BEQ, ssa.BlockRISCV64BNE, ssa.BlockRISCV64BLT, ssa.BlockRISCV64BGE, ssa.BlockRISCV64BLTU, ssa.BlockRISCV64BGEU: + if b.NumControls() != 2 { + b.Fatalf("Unexpected number of controls (%d != 2): %s", b.NumControls(), b.LongString()) + } + p.From.Reg = b.Controls[0].Reg() + p.Reg = b.Controls[1].Reg() + + case ssa.BlockRISCV64BEQZ, ssa.BlockRISCV64BNEZ, ssa.BlockRISCV64BGEZ, ssa.BlockRISCV64BLEZ, ssa.BlockRISCV64BLTZ, ssa.BlockRISCV64BGTZ: + if b.NumControls() != 1 { + b.Fatalf("Unexpected number of controls (%d != 1): %s", b.NumControls(), b.LongString()) + } + p.From.Reg = b.Controls[0].Reg() + } + + default: + b.Fatalf("Unhandled block: %s", b.LongString()) + } +} + +func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog { + p := s.Prog(loadByType(t)) + p.From.Type = obj.TYPE_MEM + p.From.Name = obj.NAME_AUTO + p.From.Sym = n.Linksym() + p.From.Offset = n.FrameOffset() + off + p.To.Type = obj.TYPE_REG + p.To.Reg = reg + return p +} + +func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog { + p = pp.Append(p, storeByType(t), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off) + p.To.Name = obj.NAME_PARAM + p.To.Sym = n.Linksym() + p.Pos = p.Pos.WithNotStmt() + return p +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/rttype/rttype.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/rttype/rttype.go new file mode 100644 index 0000000000000000000000000000000000000000..cdc399d9cfff45e87acd465abbc29e2229d32600 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/rttype/rttype.go @@ -0,0 +1,283 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rttype allows the compiler to share type information with +// the runtime. The shared type information is stored in +// internal/abi. This package translates those types from the host +// machine on which the compiler runs to the target machine on which +// the compiled program will run. In particular, this package handles +// layout differences between e.g. a 64 bit compiler and 32 bit +// target. +package rttype + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/objw" + "cmd/compile/internal/types" + "cmd/internal/obj" + "internal/abi" + "reflect" +) + +// The type structures shared with the runtime. +var Type *types.Type + +var ArrayType *types.Type +var ChanType *types.Type +var FuncType *types.Type +var InterfaceType *types.Type +var MapType *types.Type +var PtrType *types.Type +var SliceType *types.Type +var StructType *types.Type + +// Types that are parts of the types above. +var IMethod *types.Type +var Method *types.Type +var StructField *types.Type +var UncommonType *types.Type + +// Type switches and asserts +var InterfaceSwitch *types.Type +var TypeAssert *types.Type + +func Init() { + // Note: this has to be called explicitly instead of being + // an init function so it runs after the types package has + // been properly initialized. + Type = fromReflect(reflect.TypeOf(abi.Type{})) + ArrayType = fromReflect(reflect.TypeOf(abi.ArrayType{})) + ChanType = fromReflect(reflect.TypeOf(abi.ChanType{})) + FuncType = fromReflect(reflect.TypeOf(abi.FuncType{})) + InterfaceType = fromReflect(reflect.TypeOf(abi.InterfaceType{})) + MapType = fromReflect(reflect.TypeOf(abi.MapType{})) + PtrType = fromReflect(reflect.TypeOf(abi.PtrType{})) + SliceType = fromReflect(reflect.TypeOf(abi.SliceType{})) + StructType = fromReflect(reflect.TypeOf(abi.StructType{})) + + IMethod = fromReflect(reflect.TypeOf(abi.Imethod{})) + Method = fromReflect(reflect.TypeOf(abi.Method{})) + StructField = fromReflect(reflect.TypeOf(abi.StructField{})) + UncommonType = fromReflect(reflect.TypeOf(abi.UncommonType{})) + + InterfaceSwitch = fromReflect(reflect.TypeOf(abi.InterfaceSwitch{})) + TypeAssert = fromReflect(reflect.TypeOf(abi.TypeAssert{})) + + // Make sure abi functions are correct. These functions are used + // by the linker which doesn't have the ability to do type layout, + // so we check the functions it uses here. + ptrSize := types.PtrSize + if got, want := int64(abi.CommonSize(ptrSize)), Type.Size(); got != want { + base.Fatalf("abi.CommonSize() == %d, want %d", got, want) + } + if got, want := int64(abi.StructFieldSize(ptrSize)), StructField.Size(); got != want { + base.Fatalf("abi.StructFieldSize() == %d, want %d", got, want) + } + if got, want := int64(abi.UncommonSize()), UncommonType.Size(); got != want { + base.Fatalf("abi.UncommonSize() == %d, want %d", got, want) + } + if got, want := int64(abi.TFlagOff(ptrSize)), Type.OffsetOf("TFlag"); got != want { + base.Fatalf("abi.TFlagOff() == %d, want %d", got, want) + } +} + +// fromReflect translates from a host type to the equivalent target type. +func fromReflect(rt reflect.Type) *types.Type { + t := reflectToType(rt) + types.CalcSize(t) + return t +} + +// reflectToType converts from a reflect.Type (which is a compiler +// host type) to a *types.Type, which is a target type. The result +// must be CalcSize'd before using. +func reflectToType(rt reflect.Type) *types.Type { + switch rt.Kind() { + case reflect.Bool: + return types.Types[types.TBOOL] + case reflect.Int: + return types.Types[types.TINT] + case reflect.Int32: + return types.Types[types.TINT32] + case reflect.Uint8: + return types.Types[types.TUINT8] + case reflect.Uint16: + return types.Types[types.TUINT16] + case reflect.Uint32: + return types.Types[types.TUINT32] + case reflect.Uintptr: + return types.Types[types.TUINTPTR] + case reflect.Ptr, reflect.Func, reflect.UnsafePointer: + // TODO: there's no mechanism to distinguish different pointer types, + // so we treat them all as unsafe.Pointer. + return types.Types[types.TUNSAFEPTR] + case reflect.Slice: + return types.NewSlice(reflectToType(rt.Elem())) + case reflect.Array: + return types.NewArray(reflectToType(rt.Elem()), int64(rt.Len())) + case reflect.Struct: + fields := make([]*types.Field, rt.NumField()) + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + ft := reflectToType(f.Type) + fields[i] = &types.Field{Sym: &types.Sym{Name: f.Name}, Type: ft} + } + return types.NewStruct(fields) + default: + base.Fatalf("unhandled kind %s", rt.Kind()) + return nil + } +} + +// A Cursor represents a typed location inside a static variable where we +// are going to write. +type Cursor struct { + lsym *obj.LSym + offset int64 + typ *types.Type +} + +// NewCursor returns a cursor starting at lsym+off and having type t. +func NewCursor(lsym *obj.LSym, off int64, t *types.Type) Cursor { + return Cursor{lsym: lsym, offset: off, typ: t} +} + +// WritePtr writes a pointer "target" to the component at the location specified by c. +func (c Cursor) WritePtr(target *obj.LSym) { + if c.typ.Kind() != types.TUNSAFEPTR { + base.Fatalf("can't write ptr, it has kind %s", c.typ.Kind()) + } + if target == nil { + objw.Uintptr(c.lsym, int(c.offset), 0) + } else { + objw.SymPtr(c.lsym, int(c.offset), target, 0) + } +} +func (c Cursor) WriteUintptr(val uint64) { + if c.typ.Kind() != types.TUINTPTR { + base.Fatalf("can't write uintptr, it has kind %s", c.typ.Kind()) + } + objw.Uintptr(c.lsym, int(c.offset), val) +} +func (c Cursor) WriteUint32(val uint32) { + if c.typ.Kind() != types.TUINT32 { + base.Fatalf("can't write uint32, it has kind %s", c.typ.Kind()) + } + objw.Uint32(c.lsym, int(c.offset), val) +} +func (c Cursor) WriteUint16(val uint16) { + if c.typ.Kind() != types.TUINT16 { + base.Fatalf("can't write uint16, it has kind %s", c.typ.Kind()) + } + objw.Uint16(c.lsym, int(c.offset), val) +} +func (c Cursor) WriteUint8(val uint8) { + if c.typ.Kind() != types.TUINT8 { + base.Fatalf("can't write uint8, it has kind %s", c.typ.Kind()) + } + objw.Uint8(c.lsym, int(c.offset), val) +} +func (c Cursor) WriteInt(val int64) { + if c.typ.Kind() != types.TINT { + base.Fatalf("can't write int, it has kind %s", c.typ.Kind()) + } + objw.Uintptr(c.lsym, int(c.offset), uint64(val)) +} +func (c Cursor) WriteInt32(val int32) { + if c.typ.Kind() != types.TINT32 { + base.Fatalf("can't write int32, it has kind %s", c.typ.Kind()) + } + objw.Uint32(c.lsym, int(c.offset), uint32(val)) +} +func (c Cursor) WriteBool(val bool) { + if c.typ.Kind() != types.TBOOL { + base.Fatalf("can't write bool, it has kind %s", c.typ.Kind()) + } + objw.Bool(c.lsym, int(c.offset), val) +} + +// WriteSymPtrOff writes a "pointer" to the given symbol. The symbol +// is encoded as a uint32 offset from the start of the section. +func (c Cursor) WriteSymPtrOff(target *obj.LSym, weak bool) { + if c.typ.Kind() != types.TINT32 && c.typ.Kind() != types.TUINT32 { + base.Fatalf("can't write SymPtr, it has kind %s", c.typ.Kind()) + } + if target == nil { + objw.Uint32(c.lsym, int(c.offset), 0) + } else if weak { + objw.SymPtrWeakOff(c.lsym, int(c.offset), target) + } else { + objw.SymPtrOff(c.lsym, int(c.offset), target) + } +} + +// WriteSlice writes a slice header to c. The pointer is target+off, the len and cap fields are given. +func (c Cursor) WriteSlice(target *obj.LSym, off, len, cap int64) { + if c.typ.Kind() != types.TSLICE { + base.Fatalf("can't write slice, it has kind %s", c.typ.Kind()) + } + objw.SymPtr(c.lsym, int(c.offset), target, int(off)) + objw.Uintptr(c.lsym, int(c.offset)+types.PtrSize, uint64(len)) + objw.Uintptr(c.lsym, int(c.offset)+2*types.PtrSize, uint64(cap)) + // TODO: ability to switch len&cap. Maybe not needed here, as every caller + // passes the same thing for both? + if len != cap { + base.Fatalf("len != cap (%d != %d)", len, cap) + } +} + +// Reloc adds a relocation from the current cursor position. +// Reloc fills in Off and Siz fields. Caller should fill in the rest (Type, others). +func (c Cursor) Reloc() *obj.Reloc { + r := obj.Addrel(c.lsym) + r.Off = int32(c.offset) + r.Siz = uint8(c.typ.Size()) + return r +} + +// Field selects the field with the given name from the struct pointed to by c. +func (c Cursor) Field(name string) Cursor { + if c.typ.Kind() != types.TSTRUCT { + base.Fatalf("can't call Field on non-struct %v", c.typ) + } + for _, f := range c.typ.Fields() { + if f.Sym.Name == name { + return Cursor{lsym: c.lsym, offset: c.offset + f.Offset, typ: f.Type} + } + } + base.Fatalf("couldn't find field %s in %v", name, c.typ) + return Cursor{} +} + +type ArrayCursor struct { + c Cursor // cursor pointing at first element + n int // number of elements +} + +// NewArrayCursor returns a cursor starting at lsym+off and having n copies of type t. +func NewArrayCursor(lsym *obj.LSym, off int64, t *types.Type, n int) ArrayCursor { + return ArrayCursor{ + c: NewCursor(lsym, off, t), + n: n, + } +} + +// Elem selects element i of the array pointed to by c. +func (a ArrayCursor) Elem(i int) Cursor { + if i < 0 || i >= a.n { + base.Fatalf("element index %d out of range [0:%d]", i, a.n) + } + return Cursor{lsym: a.c.lsym, offset: a.c.offset + int64(i)*a.c.typ.Size(), typ: a.c.typ} +} + +// ModifyArray converts a cursor pointing at a type [k]T to a cursor pointing +// at a type [n]T. +// Also returns the size delta, aka (n-k)*sizeof(T). +func (c Cursor) ModifyArray(n int) (ArrayCursor, int64) { + if c.typ.Kind() != types.TARRAY { + base.Fatalf("can't call ModifyArray on non-array %v", c.typ) + } + k := c.typ.NumElem() + return ArrayCursor{c: Cursor{lsym: c.lsym, offset: c.offset, typ: c.typ.Elem()}, n: n}, (int64(n) - k) * c.typ.Elem().Size() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/s390x/galign.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/s390x/galign.go new file mode 100644 index 0000000000000000000000000000000000000000..d880834c220d91c763667614a46b241fb23484bb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/s390x/galign.go @@ -0,0 +1,23 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s390x + +import ( + "cmd/compile/internal/ssagen" + "cmd/internal/obj/s390x" +) + +func Init(arch *ssagen.ArchInfo) { + arch.LinkArch = &s390x.Links390x + arch.REGSP = s390x.REGSP + arch.MAXWIDTH = 1 << 50 + + arch.ZeroRange = zerorange + arch.Ginsnop = ginsnop + + arch.SSAMarkMoves = ssaMarkMoves + arch.SSAGenValue = ssaGenValue + arch.SSAGenBlock = ssaGenBlock +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/s390x/ggen.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/s390x/ggen.go new file mode 100644 index 0000000000000000000000000000000000000000..70e403122481e27ac676ddac656ad564ae75a063 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/s390x/ggen.go @@ -0,0 +1,89 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s390x + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/objw" + "cmd/internal/obj" + "cmd/internal/obj/s390x" +) + +// clearLoopCutOff is the (somewhat arbitrary) value above which it is better +// to have a loop of clear instructions (e.g. XCs) rather than just generating +// multiple instructions (i.e. loop unrolling). +// Must be between 256 and 4096. +const clearLoopCutoff = 1024 + +// zerorange clears the stack in the given range. +func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { + if cnt == 0 { + return p + } + + // Adjust the frame to account for LR. + off += base.Ctxt.Arch.FixedFrameSize + reg := int16(s390x.REGSP) + + // If the off cannot fit in a 12-bit unsigned displacement then we + // need to create a copy of the stack pointer that we can adjust. + // We also need to do this if we are going to loop. + if off < 0 || off > 4096-clearLoopCutoff || cnt > clearLoopCutoff { + p = pp.Append(p, s390x.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, s390x.REGRT1, 0) + p.Reg = int16(s390x.REGSP) + reg = s390x.REGRT1 + off = 0 + } + + // Generate a loop of large clears. + if cnt > clearLoopCutoff { + ireg := int16(s390x.REGRT2) // register holds number of remaining loop iterations + p = pp.Append(p, s390x.AMOVD, obj.TYPE_CONST, 0, cnt/256, obj.TYPE_REG, ireg, 0) + p = pp.Append(p, s390x.ACLEAR, obj.TYPE_CONST, 0, 256, obj.TYPE_MEM, reg, off) + pl := p + p = pp.Append(p, s390x.AADD, obj.TYPE_CONST, 0, 256, obj.TYPE_REG, reg, 0) + p = pp.Append(p, s390x.ABRCTG, obj.TYPE_REG, ireg, 0, obj.TYPE_BRANCH, 0, 0) + p.To.SetTarget(pl) + cnt = cnt % 256 + } + + // Generate remaining clear instructions without a loop. + for cnt > 0 { + n := cnt + + // Can clear at most 256 bytes per instruction. + if n > 256 { + n = 256 + } + + switch n { + // Handle very small clears with move instructions. + case 8, 4, 2, 1: + ins := s390x.AMOVB + switch n { + case 8: + ins = s390x.AMOVD + case 4: + ins = s390x.AMOVW + case 2: + ins = s390x.AMOVH + } + p = pp.Append(p, ins, obj.TYPE_CONST, 0, 0, obj.TYPE_MEM, reg, off) + + // Handle clears that would require multiple move instructions with CLEAR (assembled as XC). + default: + p = pp.Append(p, s390x.ACLEAR, obj.TYPE_CONST, 0, n, obj.TYPE_MEM, reg, off) + } + + cnt -= n + off += n + } + + return p +} + +func ginsnop(pp *objw.Progs) *obj.Prog { + return pp.Prog(s390x.ANOPH) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/s390x/ssa.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/s390x/ssa.go new file mode 100644 index 0000000000000000000000000000000000000000..a97c1569c11806ea37ec8bc70c7d444938e38676 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/s390x/ssa.go @@ -0,0 +1,959 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s390x + +import ( + "math" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/logopt" + "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/obj/s390x" +) + +// ssaMarkMoves marks any MOVXconst ops that need to avoid clobbering flags. +func ssaMarkMoves(s *ssagen.State, b *ssa.Block) { + flive := b.FlagsLiveAtEnd + for _, c := range b.ControlValues() { + flive = c.Type.IsFlags() || flive + } + for i := len(b.Values) - 1; i >= 0; i-- { + v := b.Values[i] + if flive && v.Op == ssa.OpS390XMOVDconst { + // The "mark" is any non-nil Aux value. + v.Aux = ssa.AuxMark + } + if v.Type.IsFlags() { + flive = false + } + for _, a := range v.Args { + if a.Type.IsFlags() { + flive = true + } + } + } +} + +// loadByType returns the load instruction of the given type. +func loadByType(t *types.Type) obj.As { + if t.IsFloat() { + switch t.Size() { + case 4: + return s390x.AFMOVS + case 8: + return s390x.AFMOVD + } + } else { + switch t.Size() { + case 1: + if t.IsSigned() { + return s390x.AMOVB + } else { + return s390x.AMOVBZ + } + case 2: + if t.IsSigned() { + return s390x.AMOVH + } else { + return s390x.AMOVHZ + } + case 4: + if t.IsSigned() { + return s390x.AMOVW + } else { + return s390x.AMOVWZ + } + case 8: + return s390x.AMOVD + } + } + panic("bad load type") +} + +// storeByType returns the store instruction of the given type. +func storeByType(t *types.Type) obj.As { + width := t.Size() + if t.IsFloat() { + switch width { + case 4: + return s390x.AFMOVS + case 8: + return s390x.AFMOVD + } + } else { + switch width { + case 1: + return s390x.AMOVB + case 2: + return s390x.AMOVH + case 4: + return s390x.AMOVW + case 8: + return s390x.AMOVD + } + } + panic("bad store type") +} + +// moveByType returns the reg->reg move instruction of the given type. +func moveByType(t *types.Type) obj.As { + if t.IsFloat() { + return s390x.AFMOVD + } else { + switch t.Size() { + case 1: + if t.IsSigned() { + return s390x.AMOVB + } else { + return s390x.AMOVBZ + } + case 2: + if t.IsSigned() { + return s390x.AMOVH + } else { + return s390x.AMOVHZ + } + case 4: + if t.IsSigned() { + return s390x.AMOVW + } else { + return s390x.AMOVWZ + } + case 8: + return s390x.AMOVD + } + } + panic("bad load type") +} + +// opregreg emits instructions for +// +// dest := dest(To) op src(From) +// +// and also returns the created obj.Prog so it +// may be further adjusted (offset, scale, etc). +func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog { + p := s.Prog(op) + p.From.Type = obj.TYPE_REG + p.To.Type = obj.TYPE_REG + p.To.Reg = dest + p.From.Reg = src + return p +} + +// opregregimm emits instructions for +// +// dest := src(From) op off +// +// and also returns the created obj.Prog so it +// may be further adjusted (offset, scale, etc). +func opregregimm(s *ssagen.State, op obj.As, dest, src int16, off int64) *obj.Prog { + p := s.Prog(op) + p.From.Type = obj.TYPE_CONST + p.From.Offset = off + p.Reg = src + p.To.Reg = dest + p.To.Type = obj.TYPE_REG + return p +} + +func ssaGenValue(s *ssagen.State, v *ssa.Value) { + switch v.Op { + case ssa.OpS390XSLD, ssa.OpS390XSLW, + ssa.OpS390XSRD, ssa.OpS390XSRW, + ssa.OpS390XSRAD, ssa.OpS390XSRAW, + ssa.OpS390XRLLG, ssa.OpS390XRLL: + r := v.Reg() + r1 := v.Args[0].Reg() + r2 := v.Args[1].Reg() + if r2 == s390x.REG_R0 { + v.Fatalf("cannot use R0 as shift value %s", v.LongString()) + } + p := opregreg(s, v.Op.Asm(), r, r2) + if r != r1 { + p.Reg = r1 + } + case ssa.OpS390XRXSBG: + r2 := v.Args[1].Reg() + i := v.Aux.(s390x.RotateParams) + p := s.Prog(v.Op.Asm()) + p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(i.Start)} + p.AddRestSourceArgs([]obj.Addr{ + {Type: obj.TYPE_CONST, Offset: int64(i.End)}, + {Type: obj.TYPE_CONST, Offset: int64(i.Amount)}, + {Type: obj.TYPE_REG, Reg: r2}, + }) + p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()} + case ssa.OpS390XRISBGZ: + r1 := v.Reg() + r2 := v.Args[0].Reg() + i := v.Aux.(s390x.RotateParams) + p := s.Prog(v.Op.Asm()) + p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(i.Start)} + p.AddRestSourceArgs([]obj.Addr{ + {Type: obj.TYPE_CONST, Offset: int64(i.End)}, + {Type: obj.TYPE_CONST, Offset: int64(i.Amount)}, + {Type: obj.TYPE_REG, Reg: r2}, + }) + p.To = obj.Addr{Type: obj.TYPE_REG, Reg: r1} + case ssa.OpS390XADD, ssa.OpS390XADDW, + ssa.OpS390XSUB, ssa.OpS390XSUBW, + ssa.OpS390XAND, ssa.OpS390XANDW, + ssa.OpS390XOR, ssa.OpS390XORW, + ssa.OpS390XXOR, ssa.OpS390XXORW: + r := v.Reg() + r1 := v.Args[0].Reg() + r2 := v.Args[1].Reg() + p := opregreg(s, v.Op.Asm(), r, r2) + if r != r1 { + p.Reg = r1 + } + case ssa.OpS390XADDC: + r1 := v.Reg0() + r2 := v.Args[0].Reg() + r3 := v.Args[1].Reg() + if r1 == r2 { + r2, r3 = r3, r2 + } + p := opregreg(s, v.Op.Asm(), r1, r2) + if r3 != r1 { + p.Reg = r3 + } + case ssa.OpS390XSUBC: + r1 := v.Reg0() + r2 := v.Args[0].Reg() + r3 := v.Args[1].Reg() + p := opregreg(s, v.Op.Asm(), r1, r3) + if r1 != r2 { + p.Reg = r2 + } + case ssa.OpS390XADDE, ssa.OpS390XSUBE: + r2 := v.Args[1].Reg() + opregreg(s, v.Op.Asm(), v.Reg0(), r2) + case ssa.OpS390XADDCconst: + r1 := v.Reg0() + r3 := v.Args[0].Reg() + i2 := int64(int16(v.AuxInt)) + opregregimm(s, v.Op.Asm(), r1, r3, i2) + // 2-address opcode arithmetic + case ssa.OpS390XMULLD, ssa.OpS390XMULLW, + ssa.OpS390XMULHD, ssa.OpS390XMULHDU, + ssa.OpS390XFMULS, ssa.OpS390XFMUL, ssa.OpS390XFDIVS, ssa.OpS390XFDIV: + opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg()) + case ssa.OpS390XFSUBS, ssa.OpS390XFSUB, + ssa.OpS390XFADDS, ssa.OpS390XFADD: + opregreg(s, v.Op.Asm(), v.Reg0(), v.Args[1].Reg()) + case ssa.OpS390XMLGR: + // MLGR Rx R3 -> R2:R3 + r0 := v.Args[0].Reg() + r1 := v.Args[1].Reg() + if r1 != s390x.REG_R3 { + v.Fatalf("We require the multiplcand to be stored in R3 for MLGR %s", v.LongString()) + } + p := s.Prog(s390x.AMLGR) + p.From.Type = obj.TYPE_REG + p.From.Reg = r0 + p.To.Reg = s390x.REG_R2 + p.To.Type = obj.TYPE_REG + case ssa.OpS390XFMADD, ssa.OpS390XFMADDS, + ssa.OpS390XFMSUB, ssa.OpS390XFMSUBS: + r1 := v.Args[1].Reg() + r2 := v.Args[2].Reg() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r1 + p.Reg = r2 + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpS390XFIDBR: + switch v.AuxInt { + case 0, 1, 3, 4, 5, 6, 7: + opregregimm(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt) + default: + v.Fatalf("invalid FIDBR mask: %v", v.AuxInt) + } + case ssa.OpS390XCPSDR: + p := opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg()) + p.Reg = v.Args[0].Reg() + case ssa.OpS390XDIVD, ssa.OpS390XDIVW, + ssa.OpS390XDIVDU, ssa.OpS390XDIVWU, + ssa.OpS390XMODD, ssa.OpS390XMODW, + ssa.OpS390XMODDU, ssa.OpS390XMODWU: + + // TODO(mundaym): use the temp registers every time like x86 does with AX? + dividend := v.Args[0].Reg() + divisor := v.Args[1].Reg() + + // CPU faults upon signed overflow, which occurs when most + // negative int is divided by -1. + var j *obj.Prog + if v.Op == ssa.OpS390XDIVD || v.Op == ssa.OpS390XDIVW || + v.Op == ssa.OpS390XMODD || v.Op == ssa.OpS390XMODW { + + var c *obj.Prog + c = s.Prog(s390x.ACMP) + j = s.Prog(s390x.ABEQ) + + c.From.Type = obj.TYPE_REG + c.From.Reg = divisor + c.To.Type = obj.TYPE_CONST + c.To.Offset = -1 + + j.To.Type = obj.TYPE_BRANCH + + } + + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = divisor + p.Reg = 0 + p.To.Type = obj.TYPE_REG + p.To.Reg = dividend + + // signed division, rest of the check for -1 case + if j != nil { + j2 := s.Prog(s390x.ABR) + j2.To.Type = obj.TYPE_BRANCH + + var n *obj.Prog + if v.Op == ssa.OpS390XDIVD || v.Op == ssa.OpS390XDIVW { + // n * -1 = -n + n = s.Prog(s390x.ANEG) + n.To.Type = obj.TYPE_REG + n.To.Reg = dividend + } else { + // n % -1 == 0 + n = s.Prog(s390x.AXOR) + n.From.Type = obj.TYPE_REG + n.From.Reg = dividend + n.To.Type = obj.TYPE_REG + n.To.Reg = dividend + } + + j.To.SetTarget(n) + j2.To.SetTarget(s.Pc()) + } + case ssa.OpS390XADDconst, ssa.OpS390XADDWconst: + opregregimm(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt) + case ssa.OpS390XMULLDconst, ssa.OpS390XMULLWconst, + ssa.OpS390XSUBconst, ssa.OpS390XSUBWconst, + ssa.OpS390XANDconst, ssa.OpS390XANDWconst, + ssa.OpS390XORconst, ssa.OpS390XORWconst, + ssa.OpS390XXORconst, ssa.OpS390XXORWconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpS390XSLDconst, ssa.OpS390XSLWconst, + ssa.OpS390XSRDconst, ssa.OpS390XSRWconst, + ssa.OpS390XSRADconst, ssa.OpS390XSRAWconst, + ssa.OpS390XRLLconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + r := v.Reg() + r1 := v.Args[0].Reg() + if r != r1 { + p.Reg = r1 + } + p.To.Type = obj.TYPE_REG + p.To.Reg = r + case ssa.OpS390XMOVDaddridx: + r := v.Args[0].Reg() + i := v.Args[1].Reg() + p := s.Prog(s390x.AMOVD) + p.From.Scale = 1 + if i == s390x.REGSP { + r, i = i, r + } + p.From.Type = obj.TYPE_ADDR + p.From.Reg = r + p.From.Index = i + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpS390XMOVDaddr: + p := s.Prog(s390x.AMOVD) + p.From.Type = obj.TYPE_ADDR + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpS390XCMP, ssa.OpS390XCMPW, ssa.OpS390XCMPU, ssa.OpS390XCMPWU: + opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg()) + case ssa.OpS390XFCMPS, ssa.OpS390XFCMP: + opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg()) + case ssa.OpS390XCMPconst, ssa.OpS390XCMPWconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_CONST + p.To.Offset = v.AuxInt + case ssa.OpS390XCMPUconst, ssa.OpS390XCMPWUconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_CONST + p.To.Offset = int64(uint32(v.AuxInt)) + case ssa.OpS390XMOVDconst: + x := v.Reg() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = x + case ssa.OpS390XFMOVSconst, ssa.OpS390XFMOVDconst: + x := v.Reg() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_FCONST + p.From.Val = math.Float64frombits(uint64(v.AuxInt)) + p.To.Type = obj.TYPE_REG + p.To.Reg = x + case ssa.OpS390XADDWload, ssa.OpS390XADDload, + ssa.OpS390XMULLWload, ssa.OpS390XMULLDload, + ssa.OpS390XSUBWload, ssa.OpS390XSUBload, + ssa.OpS390XANDWload, ssa.OpS390XANDload, + ssa.OpS390XORWload, ssa.OpS390XORload, + ssa.OpS390XXORWload, ssa.OpS390XXORload: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[1].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpS390XMOVDload, + ssa.OpS390XMOVWZload, ssa.OpS390XMOVHZload, ssa.OpS390XMOVBZload, + ssa.OpS390XMOVDBRload, ssa.OpS390XMOVWBRload, ssa.OpS390XMOVHBRload, + ssa.OpS390XMOVBload, ssa.OpS390XMOVHload, ssa.OpS390XMOVWload, + ssa.OpS390XFMOVSload, ssa.OpS390XFMOVDload: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpS390XMOVBZloadidx, ssa.OpS390XMOVHZloadidx, ssa.OpS390XMOVWZloadidx, + ssa.OpS390XMOVBloadidx, ssa.OpS390XMOVHloadidx, ssa.OpS390XMOVWloadidx, ssa.OpS390XMOVDloadidx, + ssa.OpS390XMOVHBRloadidx, ssa.OpS390XMOVWBRloadidx, ssa.OpS390XMOVDBRloadidx, + ssa.OpS390XFMOVSloadidx, ssa.OpS390XFMOVDloadidx: + r := v.Args[0].Reg() + i := v.Args[1].Reg() + if i == s390x.REGSP { + r, i = i, r + } + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = r + p.From.Scale = 1 + p.From.Index = i + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpS390XMOVBstore, ssa.OpS390XMOVHstore, ssa.OpS390XMOVWstore, ssa.OpS390XMOVDstore, + ssa.OpS390XMOVHBRstore, ssa.OpS390XMOVWBRstore, ssa.OpS390XMOVDBRstore, + ssa.OpS390XFMOVSstore, ssa.OpS390XFMOVDstore: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + case ssa.OpS390XMOVBstoreidx, ssa.OpS390XMOVHstoreidx, ssa.OpS390XMOVWstoreidx, ssa.OpS390XMOVDstoreidx, + ssa.OpS390XMOVHBRstoreidx, ssa.OpS390XMOVWBRstoreidx, ssa.OpS390XMOVDBRstoreidx, + ssa.OpS390XFMOVSstoreidx, ssa.OpS390XFMOVDstoreidx: + r := v.Args[0].Reg() + i := v.Args[1].Reg() + if i == s390x.REGSP { + r, i = i, r + } + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[2].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = r + p.To.Scale = 1 + p.To.Index = i + ssagen.AddAux(&p.To, v) + case ssa.OpS390XMOVDstoreconst, ssa.OpS390XMOVWstoreconst, ssa.OpS390XMOVHstoreconst, ssa.OpS390XMOVBstoreconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + sc := v.AuxValAndOff() + p.From.Offset = sc.Val64() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux2(&p.To, v, sc.Off64()) + case ssa.OpS390XMOVBreg, ssa.OpS390XMOVHreg, ssa.OpS390XMOVWreg, + ssa.OpS390XMOVBZreg, ssa.OpS390XMOVHZreg, ssa.OpS390XMOVWZreg, + ssa.OpS390XLDGR, ssa.OpS390XLGDR, + ssa.OpS390XCEFBRA, ssa.OpS390XCDFBRA, ssa.OpS390XCEGBRA, ssa.OpS390XCDGBRA, + ssa.OpS390XCFEBRA, ssa.OpS390XCFDBRA, ssa.OpS390XCGEBRA, ssa.OpS390XCGDBRA, + ssa.OpS390XCELFBR, ssa.OpS390XCDLFBR, ssa.OpS390XCELGBR, ssa.OpS390XCDLGBR, + ssa.OpS390XCLFEBR, ssa.OpS390XCLFDBR, ssa.OpS390XCLGEBR, ssa.OpS390XCLGDBR, + ssa.OpS390XLDEBR, ssa.OpS390XLEDBR, + ssa.OpS390XFNEG, ssa.OpS390XFNEGS, + ssa.OpS390XLPDFR, ssa.OpS390XLNDFR: + opregreg(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg()) + case ssa.OpS390XCLEAR: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + sc := v.AuxValAndOff() + p.From.Offset = sc.Val64() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux2(&p.To, v, sc.Off64()) + case ssa.OpCopy: + if v.Type.IsMemory() { + return + } + x := v.Args[0].Reg() + y := v.Reg() + if x != y { + opregreg(s, moveByType(v.Type), y, x) + } + case ssa.OpLoadReg: + if v.Type.IsFlags() { + v.Fatalf("load flags not implemented: %v", v.LongString()) + return + } + p := s.Prog(loadByType(v.Type)) + ssagen.AddrAuto(&p.From, v.Args[0]) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpStoreReg: + if v.Type.IsFlags() { + v.Fatalf("store flags not implemented: %v", v.LongString()) + return + } + p := s.Prog(storeByType(v.Type)) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + ssagen.AddrAuto(&p.To, v) + case ssa.OpS390XLoweredGetClosurePtr: + // Closure pointer is R12 (already) + ssagen.CheckLoweredGetClosurePtr(v) + case ssa.OpS390XLoweredRound32F, ssa.OpS390XLoweredRound64F: + // input is already rounded + case ssa.OpS390XLoweredGetG: + r := v.Reg() + p := s.Prog(s390x.AMOVD) + p.From.Type = obj.TYPE_REG + p.From.Reg = s390x.REGG + p.To.Type = obj.TYPE_REG + p.To.Reg = r + case ssa.OpS390XLoweredGetCallerSP: + // caller's SP is FixedFrameSize below the address of the first arg + p := s.Prog(s390x.AMOVD) + p.From.Type = obj.TYPE_ADDR + p.From.Offset = -base.Ctxt.Arch.FixedFrameSize + p.From.Name = obj.NAME_PARAM + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpS390XLoweredGetCallerPC: + p := s.Prog(obj.AGETCALLERPC) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpS390XCALLstatic, ssa.OpS390XCALLclosure, ssa.OpS390XCALLinter: + s.Call(v) + case ssa.OpS390XCALLtail: + s.TailCall(v) + case ssa.OpS390XLoweredWB: + p := s.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + // AuxInt encodes how many buffer entries we need. + p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1] + case ssa.OpS390XLoweredPanicBoundsA, ssa.OpS390XLoweredPanicBoundsB, ssa.OpS390XLoweredPanicBoundsC: + p := s.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt] + s.UseArgs(16) // space used in callee args area by assembly stubs + case ssa.OpS390XFLOGR, ssa.OpS390XPOPCNT, + ssa.OpS390XNEG, ssa.OpS390XNEGW, + ssa.OpS390XMOVWBR, ssa.OpS390XMOVDBR: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpS390XNOT, ssa.OpS390XNOTW: + v.Fatalf("NOT/NOTW generated %s", v.LongString()) + case ssa.OpS390XSumBytes2, ssa.OpS390XSumBytes4, ssa.OpS390XSumBytes8: + v.Fatalf("SumBytes generated %s", v.LongString()) + case ssa.OpS390XLOCGR: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = int64(v.Aux.(s390x.CCMask)) + p.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpS390XFSQRTS, ssa.OpS390XFSQRT: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpS390XLTDBR, ssa.OpS390XLTEBR: + opregreg(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[0].Reg()) + case ssa.OpS390XInvertFlags: + v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) + case ssa.OpS390XFlagEQ, ssa.OpS390XFlagLT, ssa.OpS390XFlagGT, ssa.OpS390XFlagOV: + v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString()) + case ssa.OpS390XAddTupleFirst32, ssa.OpS390XAddTupleFirst64: + v.Fatalf("AddTupleFirst* should never make it to codegen %v", v.LongString()) + case ssa.OpS390XLoweredNilCheck: + // Issue a load which will fault if the input is nil. + p := s.Prog(s390x.AMOVBZ) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = s390x.REGTMP + if logopt.Enabled() { + logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) + } + if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers + base.WarnfAt(v.Pos, "generated nil check") + } + case ssa.OpS390XMVC: + vo := v.AuxValAndOff() + p := s.Prog(s390x.AMVC) + p.From.Type = obj.TYPE_CONST + p.From.Offset = vo.Val64() + p.AddRestSource(obj.Addr{ + Type: obj.TYPE_MEM, + Reg: v.Args[1].Reg(), + Offset: vo.Off64(), + }) + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + p.To.Offset = vo.Off64() + case ssa.OpS390XSTMG2, ssa.OpS390XSTMG3, ssa.OpS390XSTMG4, + ssa.OpS390XSTM2, ssa.OpS390XSTM3, ssa.OpS390XSTM4: + for i := 2; i < len(v.Args)-1; i++ { + if v.Args[i].Reg() != v.Args[i-1].Reg()+1 { + v.Fatalf("invalid store multiple %s", v.LongString()) + } + } + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.Reg = v.Args[len(v.Args)-2].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + case ssa.OpS390XLoweredMove: + // Inputs must be valid pointers to memory, + // so adjust arg0 and arg1 as part of the expansion. + // arg2 should be src+size, + // + // mvc: MVC $256, 0(R2), 0(R1) + // MOVD $256(R1), R1 + // MOVD $256(R2), R2 + // CMP R2, Rarg2 + // BNE mvc + // MVC $rem, 0(R2), 0(R1) // if rem > 0 + // arg2 is the last address to move in the loop + 256 + mvc := s.Prog(s390x.AMVC) + mvc.From.Type = obj.TYPE_CONST + mvc.From.Offset = 256 + mvc.AddRestSource(obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[1].Reg()}) + mvc.To.Type = obj.TYPE_MEM + mvc.To.Reg = v.Args[0].Reg() + + for i := 0; i < 2; i++ { + movd := s.Prog(s390x.AMOVD) + movd.From.Type = obj.TYPE_ADDR + movd.From.Reg = v.Args[i].Reg() + movd.From.Offset = 256 + movd.To.Type = obj.TYPE_REG + movd.To.Reg = v.Args[i].Reg() + } + + cmpu := s.Prog(s390x.ACMPU) + cmpu.From.Reg = v.Args[1].Reg() + cmpu.From.Type = obj.TYPE_REG + cmpu.To.Reg = v.Args[2].Reg() + cmpu.To.Type = obj.TYPE_REG + + bne := s.Prog(s390x.ABLT) + bne.To.Type = obj.TYPE_BRANCH + bne.To.SetTarget(mvc) + + if v.AuxInt > 0 { + mvc := s.Prog(s390x.AMVC) + mvc.From.Type = obj.TYPE_CONST + mvc.From.Offset = v.AuxInt + mvc.AddRestSource(obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[1].Reg()}) + mvc.To.Type = obj.TYPE_MEM + mvc.To.Reg = v.Args[0].Reg() + } + case ssa.OpS390XLoweredZero: + // Input must be valid pointers to memory, + // so adjust arg0 as part of the expansion. + // arg1 should be src+size, + // + // clear: CLEAR $256, 0(R1) + // MOVD $256(R1), R1 + // CMP R1, Rarg1 + // BNE clear + // CLEAR $rem, 0(R1) // if rem > 0 + // arg1 is the last address to zero in the loop + 256 + clear := s.Prog(s390x.ACLEAR) + clear.From.Type = obj.TYPE_CONST + clear.From.Offset = 256 + clear.To.Type = obj.TYPE_MEM + clear.To.Reg = v.Args[0].Reg() + + movd := s.Prog(s390x.AMOVD) + movd.From.Type = obj.TYPE_ADDR + movd.From.Reg = v.Args[0].Reg() + movd.From.Offset = 256 + movd.To.Type = obj.TYPE_REG + movd.To.Reg = v.Args[0].Reg() + + cmpu := s.Prog(s390x.ACMPU) + cmpu.From.Reg = v.Args[0].Reg() + cmpu.From.Type = obj.TYPE_REG + cmpu.To.Reg = v.Args[1].Reg() + cmpu.To.Type = obj.TYPE_REG + + bne := s.Prog(s390x.ABLT) + bne.To.Type = obj.TYPE_BRANCH + bne.To.SetTarget(clear) + + if v.AuxInt > 0 { + clear := s.Prog(s390x.ACLEAR) + clear.From.Type = obj.TYPE_CONST + clear.From.Offset = v.AuxInt + clear.To.Type = obj.TYPE_MEM + clear.To.Reg = v.Args[0].Reg() + } + case ssa.OpS390XMOVBZatomicload, ssa.OpS390XMOVWZatomicload, ssa.OpS390XMOVDatomicload: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + case ssa.OpS390XMOVBatomicstore, ssa.OpS390XMOVWatomicstore, ssa.OpS390XMOVDatomicstore: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + case ssa.OpS390XLAN, ssa.OpS390XLAO: + // LA(N|O) Ry, TMP, 0(Rx) + op := s.Prog(v.Op.Asm()) + op.From.Type = obj.TYPE_REG + op.From.Reg = v.Args[1].Reg() + op.Reg = s390x.REGTMP + op.To.Type = obj.TYPE_MEM + op.To.Reg = v.Args[0].Reg() + case ssa.OpS390XLANfloor, ssa.OpS390XLAOfloor: + r := v.Args[0].Reg() // clobbered, assumed R1 in comments + + // Round ptr down to nearest multiple of 4. + // ANDW $~3, R1 + ptr := s.Prog(s390x.AANDW) + ptr.From.Type = obj.TYPE_CONST + ptr.From.Offset = 0xfffffffc + ptr.To.Type = obj.TYPE_REG + ptr.To.Reg = r + + // Redirect output of LA(N|O) into R1 since it is clobbered anyway. + // LA(N|O) Rx, R1, 0(R1) + op := s.Prog(v.Op.Asm()) + op.From.Type = obj.TYPE_REG + op.From.Reg = v.Args[1].Reg() + op.Reg = r + op.To.Type = obj.TYPE_MEM + op.To.Reg = r + case ssa.OpS390XLAA, ssa.OpS390XLAAG: + p := s.Prog(v.Op.Asm()) + p.Reg = v.Reg0() + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + case ssa.OpS390XLoweredAtomicCas32, ssa.OpS390XLoweredAtomicCas64: + // Convert the flags output of CS{,G} into a bool. + // CS{,G} arg1, arg2, arg0 + // MOVD $0, ret + // BNE 2(PC) + // MOVD $1, ret + // NOP (so the BNE has somewhere to land) + + // CS{,G} arg1, arg2, arg0 + cs := s.Prog(v.Op.Asm()) + cs.From.Type = obj.TYPE_REG + cs.From.Reg = v.Args[1].Reg() // old + cs.Reg = v.Args[2].Reg() // new + cs.To.Type = obj.TYPE_MEM + cs.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&cs.To, v) + + // MOVD $0, ret + movd := s.Prog(s390x.AMOVD) + movd.From.Type = obj.TYPE_CONST + movd.From.Offset = 0 + movd.To.Type = obj.TYPE_REG + movd.To.Reg = v.Reg0() + + // BNE 2(PC) + bne := s.Prog(s390x.ABNE) + bne.To.Type = obj.TYPE_BRANCH + + // MOVD $1, ret + movd = s.Prog(s390x.AMOVD) + movd.From.Type = obj.TYPE_CONST + movd.From.Offset = 1 + movd.To.Type = obj.TYPE_REG + movd.To.Reg = v.Reg0() + + // NOP (so the BNE has somewhere to land) + nop := s.Prog(obj.ANOP) + bne.To.SetTarget(nop) + case ssa.OpS390XLoweredAtomicExchange32, ssa.OpS390XLoweredAtomicExchange64: + // Loop until the CS{,G} succeeds. + // MOV{WZ,D} arg0, ret + // cs: CS{,G} ret, arg1, arg0 + // BNE cs + + // MOV{WZ,D} arg0, ret + load := s.Prog(loadByType(v.Type.FieldType(0))) + load.From.Type = obj.TYPE_MEM + load.From.Reg = v.Args[0].Reg() + load.To.Type = obj.TYPE_REG + load.To.Reg = v.Reg0() + ssagen.AddAux(&load.From, v) + + // CS{,G} ret, arg1, arg0 + cs := s.Prog(v.Op.Asm()) + cs.From.Type = obj.TYPE_REG + cs.From.Reg = v.Reg0() // old + cs.Reg = v.Args[1].Reg() // new + cs.To.Type = obj.TYPE_MEM + cs.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&cs.To, v) + + // BNE cs + bne := s.Prog(s390x.ABNE) + bne.To.Type = obj.TYPE_BRANCH + bne.To.SetTarget(cs) + case ssa.OpS390XSYNC: + s.Prog(s390x.ASYNC) + case ssa.OpClobber, ssa.OpClobberReg: + // TODO: implement for clobberdead experiment. Nop is ok for now. + default: + v.Fatalf("genValue not implemented: %s", v.LongString()) + } +} + +func blockAsm(b *ssa.Block) obj.As { + switch b.Kind { + case ssa.BlockS390XBRC: + return s390x.ABRC + case ssa.BlockS390XCRJ: + return s390x.ACRJ + case ssa.BlockS390XCGRJ: + return s390x.ACGRJ + case ssa.BlockS390XCLRJ: + return s390x.ACLRJ + case ssa.BlockS390XCLGRJ: + return s390x.ACLGRJ + case ssa.BlockS390XCIJ: + return s390x.ACIJ + case ssa.BlockS390XCGIJ: + return s390x.ACGIJ + case ssa.BlockS390XCLIJ: + return s390x.ACLIJ + case ssa.BlockS390XCLGIJ: + return s390x.ACLGIJ + } + b.Fatalf("blockAsm not implemented: %s", b.LongString()) + panic("unreachable") +} + +func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { + // Handle generic blocks first. + switch b.Kind { + case ssa.BlockPlain: + if b.Succs[0].Block() != next { + p := s.Prog(s390x.ABR) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) + } + return + case ssa.BlockDefer: + // defer returns in R3: + // 0 if we should continue executing + // 1 if we should jump to deferreturn call + p := s.Br(s390x.ACIJ, b.Succs[1].Block()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = int64(s390x.NotEqual & s390x.NotUnordered) // unordered is not possible + p.Reg = s390x.REG_R3 + p.AddRestSourceConst(0) + if b.Succs[0].Block() != next { + s.Br(s390x.ABR, b.Succs[0].Block()) + } + return + case ssa.BlockExit, ssa.BlockRetJmp: + return + case ssa.BlockRet: + s.Prog(obj.ARET) + return + } + + // Handle s390x-specific blocks. These blocks all have a + // condition code mask in the Aux value and 2 successors. + succs := [...]*ssa.Block{b.Succs[0].Block(), b.Succs[1].Block()} + mask := b.Aux.(s390x.CCMask) + + // TODO: take into account Likely property for forward/backward + // branches. We currently can't do this because we don't know + // whether a block has already been emitted. In general forward + // branches are assumed 'not taken' and backward branches are + // assumed 'taken'. + if next == succs[0] { + succs[0], succs[1] = succs[1], succs[0] + mask = mask.Inverse() + } + + p := s.Br(blockAsm(b), succs[0]) + switch b.Kind { + case ssa.BlockS390XBRC: + p.From.Type = obj.TYPE_CONST + p.From.Offset = int64(mask) + case ssa.BlockS390XCGRJ, ssa.BlockS390XCRJ, + ssa.BlockS390XCLGRJ, ssa.BlockS390XCLRJ: + p.From.Type = obj.TYPE_CONST + p.From.Offset = int64(mask & s390x.NotUnordered) // unordered is not possible + p.Reg = b.Controls[0].Reg() + p.AddRestSourceReg(b.Controls[1].Reg()) + case ssa.BlockS390XCGIJ, ssa.BlockS390XCIJ: + p.From.Type = obj.TYPE_CONST + p.From.Offset = int64(mask & s390x.NotUnordered) // unordered is not possible + p.Reg = b.Controls[0].Reg() + p.AddRestSourceConst(int64(int8(b.AuxInt))) + case ssa.BlockS390XCLGIJ, ssa.BlockS390XCLIJ: + p.From.Type = obj.TYPE_CONST + p.From.Offset = int64(mask & s390x.NotUnordered) // unordered is not possible + p.Reg = b.Controls[0].Reg() + p.AddRestSourceConst(int64(uint8(b.AuxInt))) + default: + b.Fatalf("branch not implemented: %s", b.LongString()) + } + if next != succs[1] { + s.Br(s390x.ABR, succs[1]) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/README.md b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5dc4fbee6fe3ccb72e744d4a40501c80fc9be724 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/README.md @@ -0,0 +1,222 @@ + + +## Introduction to the Go compiler's SSA backend + +This package contains the compiler's Static Single Assignment form component. If +you're not familiar with SSA, its [Wikipedia +article](https://en.wikipedia.org/wiki/Static_single_assignment_form) is a good +starting point. + +It is recommended that you first read [cmd/compile/README.md](../../README.md) +if you are not familiar with the Go compiler already. That document gives an +overview of the compiler, and explains what is SSA's part and purpose in it. + +### Key concepts + +The names described below may be loosely related to their Go counterparts, but +note that they are not equivalent. For example, a Go block statement has a +variable scope, yet SSA has no notion of variables nor variable scopes. + +It may also be surprising that values and blocks are named after their unique +sequential IDs. They rarely correspond to named entities in the original code, +such as variables or function parameters. The sequential IDs also allow the +compiler to avoid maps, and it is always possible to track back the values to Go +code using debug and position information. + +#### Values + +Values are the basic building blocks of SSA. Per SSA's very definition, a +value is defined exactly once, but it may be used any number of times. A value +mainly consists of a unique identifier, an operator, a type, and some arguments. + +An operator or `Op` describes the operation that computes the value. The +semantics of each operator can be found in `_gen/*Ops.go`. For example, `OpAdd8` +takes two value arguments holding 8-bit integers and results in their addition. +Here is a possible SSA representation of the addition of two `uint8` values: + + // var c uint8 = a + b + v4 = Add8 v2 v3 + +A value's type will usually be a Go type. For example, the value in the example +above has a `uint8` type, and a constant boolean value will have a `bool` type. +However, certain types don't come from Go and are special; below we will cover +`memory`, the most common of them. + +See [value.go](value.go) for more information. + +#### Memory types + +`memory` represents the global memory state. An `Op` that takes a memory +argument depends on that memory state, and an `Op` which has the memory type +impacts the state of memory. This ensures that memory operations are kept in the +right order. For example: + + // *a = 3 + // *b = *a + v10 = Store {int} v6 v8 v1 + v14 = Store {int} v7 v8 v10 + +Here, `Store` stores its second argument (of type `int`) into the first argument +(of type `*int`). The last argument is the memory state; since the second store +depends on the memory value defined by the first store, the two stores cannot be +reordered. + +See [cmd/compile/internal/types/type.go](../types/type.go) for more information. + +#### Blocks + +A block represents a basic block in the control flow graph of a function. It is, +essentially, a list of values that define the operation of this block. Besides +the list of values, blocks mainly consist of a unique identifier, a kind, and a +list of successor blocks. + +The simplest kind is a `plain` block; it simply hands the control flow to +another block, thus its successors list contains one block. + +Another common block kind is the `exit` block. These have a final value, called +control value, which must return a memory state. This is necessary for functions +to return some values, for example - the caller needs some memory state to +depend on, to ensure that it receives those return values correctly. + +The last important block kind we will mention is the `if` block. It has a single +control value that must be a boolean value, and it has exactly two successor +blocks. The control flow is handed to the first successor if the bool is true, +and to the second otherwise. + +Here is a sample if-else control flow represented with basic blocks: + + // func(b bool) int { + // if b { + // return 2 + // } + // return 3 + // } + b1: + v1 = InitMem + v2 = SP + v5 = Addr <*int> {~r1} v2 + v6 = Arg {b} + v8 = Const64 [2] + v12 = Const64 [3] + If v6 -> b2 b3 + b2: <- b1 + v10 = VarDef {~r1} v1 + v11 = Store {int} v5 v8 v10 + Ret v11 + b3: <- b1 + v14 = VarDef {~r1} v1 + v15 = Store {int} v5 v12 v14 + Ret v15 + + + +See [block.go](block.go) for more information. + +#### Functions + +A function represents a function declaration along with its body. It mainly +consists of a name, a type (its signature), a list of blocks that form its body, +and the entry block within said list. + +When a function is called, the control flow is handed to its entry block. If the +function terminates, the control flow will eventually reach an exit block, thus +ending the function call. + +Note that a function may have zero or multiple exit blocks, just like a Go +function can have any number of return points, but it must have exactly one +entry point block. + +Also note that some SSA functions are autogenerated, such as the hash functions +for each type used as a map key. + +For example, this is what an empty function can look like in SSA, with a single +exit block that returns an uninteresting memory state: + + foo func() + b1: + v1 = InitMem + Ret v1 + +See [func.go](func.go) for more information. + +### Compiler passes + +Having a program in SSA form is not very useful on its own. Its advantage lies +in how easy it is to write optimizations that modify the program to make it +better. The way the Go compiler accomplishes this is via a list of passes. + +Each pass transforms a SSA function in some way. For example, a dead code +elimination pass will remove blocks and values that it can prove will never be +executed, and a nil check elimination pass will remove nil checks which it can +prove to be redundant. + +Compiler passes work on one function at a time, and by default run sequentially +and exactly once. + +The `lower` pass is special; it converts the SSA representation from being +machine-independent to being machine-dependent. That is, some abstract operators +are replaced with their non-generic counterparts, potentially reducing or +increasing the final number of values. + + + +See the `passes` list defined in [compile.go](compile.go) for more information. + +### Playing with SSA + +A good way to see and get used to the compiler's SSA in action is via +`GOSSAFUNC`. For example, to see func `Foo`'s initial SSA form and final +generated assembly, one can run: + + GOSSAFUNC=Foo go build + +The generated `ssa.html` file will also contain the SSA func at each of the +compile passes, making it easy to see what each pass does to a particular +program. You can also click on values and blocks to highlight them, to help +follow the control flow and values. + +The value specified in GOSSAFUNC can also be a package-qualified function +name, e.g. + + GOSSAFUNC=blah.Foo go build + +This will match any function named "Foo" within a package whose final +suffix is "blah" (e.g. something/blah.Foo, anotherthing/extra/blah.Foo). + +If non-HTML dumps are needed, append a "+" to the GOSSAFUNC value +and dumps will be written to stdout: + + GOSSAFUNC=Bar+ go build + + + +### Hacking on SSA + +While most compiler passes are implemented directly in Go code, some others are +code generated. This is currently done via rewrite rules, which have their own +syntax and are maintained in `_gen/*.rules`. Simpler optimizations can be written +easily and quickly this way, but rewrite rules are not suitable for more complex +optimizations. + +To read more on rewrite rules, have a look at the top comments in +[_gen/generic.rules](_gen/generic.rules) and [_gen/rulegen.go](_gen/rulegen.go). + +Similarly, the code to manage operators is also code generated from +`_gen/*Ops.go`, as it is easier to maintain a few tables than a lot of code. +After changing the rules or operators, run `go generate cmd/compile/internal/ssa` +to generate the Go code again. + + diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/TODO b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/TODO new file mode 100644 index 0000000000000000000000000000000000000000..f4e438258c1c4d90c6106ae97070fcd6d6787a57 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/TODO @@ -0,0 +1,24 @@ +This is a list of possible improvements to the SSA pass of the compiler. + +Optimizations (better compiled code) +------------------------------------ +- Reduce register pressure in scheduler +- Make dead store pass inter-block +- If there are a lot of MOVQ $0, ..., then load + 0 into a register and use the register as the source instead. +- Allow large structs to be SSAable (issue 24416) +- Allow arrays of length >1 to be SSAable +- If strings are being passed around without being interpreted (ptr + and len fields being accessed) pass them in xmm registers? + Same for interfaces? +- any pointer generated by unsafe arithmetic must be non-nil? + (Of course that may not be true in general, but it is for all uses + in the runtime, and we can play games with unsafe.) + +Optimizations (better compiler) +------------------------------- +- Handle signed division overflow and sign extension earlier + +Regalloc +-------- +- Make liveness analysis non-quadratic diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/addressingmodes.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/addressingmodes.go new file mode 100644 index 0000000000000000000000000000000000000000..4e3209e396b5af2266494b95727cca2ce77ab9f9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/addressingmodes.go @@ -0,0 +1,518 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// addressingModes combines address calculations into memory operations +// that can perform complicated addressing modes. +func addressingModes(f *Func) { + isInImmediateRange := is32Bit + switch f.Config.arch { + default: + // Most architectures can't do this. + return + case "amd64", "386": + case "s390x": + isInImmediateRange = is20Bit + } + + var tmp []*Value + for _, b := range f.Blocks { + for _, v := range b.Values { + if !combineFirst[v.Op] { + continue + } + // All matched operations have the pointer in arg[0]. + // All results have the pointer in arg[0] and the index in arg[1]. + // *Except* for operations which update a register, + // which are marked with resultInArg0. Those have + // the pointer in arg[1], and the corresponding result op + // has the pointer in arg[1] and the index in arg[2]. + ptrIndex := 0 + if opcodeTable[v.Op].resultInArg0 { + ptrIndex = 1 + } + p := v.Args[ptrIndex] + c, ok := combine[[2]Op{v.Op, p.Op}] + if !ok { + continue + } + // See if we can combine the Aux/AuxInt values. + switch [2]auxType{opcodeTable[v.Op].auxType, opcodeTable[p.Op].auxType} { + case [2]auxType{auxSymOff, auxInt32}: + // TODO: introduce auxSymOff32 + if !isInImmediateRange(v.AuxInt + p.AuxInt) { + continue + } + v.AuxInt += p.AuxInt + case [2]auxType{auxSymOff, auxSymOff}: + if v.Aux != nil && p.Aux != nil { + continue + } + if !isInImmediateRange(v.AuxInt + p.AuxInt) { + continue + } + if p.Aux != nil { + v.Aux = p.Aux + } + v.AuxInt += p.AuxInt + case [2]auxType{auxSymValAndOff, auxInt32}: + vo := ValAndOff(v.AuxInt) + if !vo.canAdd64(p.AuxInt) { + continue + } + v.AuxInt = int64(vo.addOffset64(p.AuxInt)) + case [2]auxType{auxSymValAndOff, auxSymOff}: + vo := ValAndOff(v.AuxInt) + if v.Aux != nil && p.Aux != nil { + continue + } + if !vo.canAdd64(p.AuxInt) { + continue + } + if p.Aux != nil { + v.Aux = p.Aux + } + v.AuxInt = int64(vo.addOffset64(p.AuxInt)) + case [2]auxType{auxSymOff, auxNone}: + // nothing to do + case [2]auxType{auxSymValAndOff, auxNone}: + // nothing to do + default: + f.Fatalf("unknown aux combining for %s and %s\n", v.Op, p.Op) + } + // Combine the operations. + tmp = append(tmp[:0], v.Args[:ptrIndex]...) + tmp = append(tmp, p.Args...) + tmp = append(tmp, v.Args[ptrIndex+1:]...) + v.resetArgs() + v.Op = c + v.AddArgs(tmp...) + if needSplit[c] { + // It turns out that some of the combined instructions have faster two-instruction equivalents, + // but not the two instructions that led to them being combined here. For example + // (CMPBconstload c (ADDQ x y)) -> (CMPBconstloadidx1 c x y) -> (CMPB c (MOVBloadidx1 x y)) + // The final pair of instructions turns out to be notably faster, at least in some benchmarks. + f.Config.splitLoad(v) + } + } + } +} + +// combineFirst contains ops which appear in combine as the +// first part of the key. +var combineFirst = map[Op]bool{} + +func init() { + for k := range combine { + combineFirst[k[0]] = true + } +} + +// needSplit contains instructions that should be postprocessed by splitLoad +// into a more-efficient two-instruction form. +var needSplit = map[Op]bool{ + OpAMD64CMPBloadidx1: true, + OpAMD64CMPWloadidx1: true, + OpAMD64CMPLloadidx1: true, + OpAMD64CMPQloadidx1: true, + OpAMD64CMPWloadidx2: true, + OpAMD64CMPLloadidx4: true, + OpAMD64CMPQloadidx8: true, + + OpAMD64CMPBconstloadidx1: true, + OpAMD64CMPWconstloadidx1: true, + OpAMD64CMPLconstloadidx1: true, + OpAMD64CMPQconstloadidx1: true, + OpAMD64CMPWconstloadidx2: true, + OpAMD64CMPLconstloadidx4: true, + OpAMD64CMPQconstloadidx8: true, +} + +// For each entry k, v in this map, if we have a value x with: +// +// x.Op == k[0] +// x.Args[0].Op == k[1] +// +// then we can set x.Op to v and set x.Args like this: +// +// x.Args[0].Args + x.Args[1:] +// +// Additionally, the Aux/AuxInt from x.Args[0] is merged into x. +var combine = map[[2]Op]Op{ + // amd64 + [2]Op{OpAMD64MOVBload, OpAMD64ADDQ}: OpAMD64MOVBloadidx1, + [2]Op{OpAMD64MOVWload, OpAMD64ADDQ}: OpAMD64MOVWloadidx1, + [2]Op{OpAMD64MOVLload, OpAMD64ADDQ}: OpAMD64MOVLloadidx1, + [2]Op{OpAMD64MOVQload, OpAMD64ADDQ}: OpAMD64MOVQloadidx1, + [2]Op{OpAMD64MOVSSload, OpAMD64ADDQ}: OpAMD64MOVSSloadidx1, + [2]Op{OpAMD64MOVSDload, OpAMD64ADDQ}: OpAMD64MOVSDloadidx1, + + [2]Op{OpAMD64MOVBstore, OpAMD64ADDQ}: OpAMD64MOVBstoreidx1, + [2]Op{OpAMD64MOVWstore, OpAMD64ADDQ}: OpAMD64MOVWstoreidx1, + [2]Op{OpAMD64MOVLstore, OpAMD64ADDQ}: OpAMD64MOVLstoreidx1, + [2]Op{OpAMD64MOVQstore, OpAMD64ADDQ}: OpAMD64MOVQstoreidx1, + [2]Op{OpAMD64MOVSSstore, OpAMD64ADDQ}: OpAMD64MOVSSstoreidx1, + [2]Op{OpAMD64MOVSDstore, OpAMD64ADDQ}: OpAMD64MOVSDstoreidx1, + + [2]Op{OpAMD64MOVBstoreconst, OpAMD64ADDQ}: OpAMD64MOVBstoreconstidx1, + [2]Op{OpAMD64MOVWstoreconst, OpAMD64ADDQ}: OpAMD64MOVWstoreconstidx1, + [2]Op{OpAMD64MOVLstoreconst, OpAMD64ADDQ}: OpAMD64MOVLstoreconstidx1, + [2]Op{OpAMD64MOVQstoreconst, OpAMD64ADDQ}: OpAMD64MOVQstoreconstidx1, + + [2]Op{OpAMD64MOVBload, OpAMD64LEAQ1}: OpAMD64MOVBloadidx1, + [2]Op{OpAMD64MOVWload, OpAMD64LEAQ1}: OpAMD64MOVWloadidx1, + [2]Op{OpAMD64MOVWload, OpAMD64LEAQ2}: OpAMD64MOVWloadidx2, + [2]Op{OpAMD64MOVLload, OpAMD64LEAQ1}: OpAMD64MOVLloadidx1, + [2]Op{OpAMD64MOVLload, OpAMD64LEAQ4}: OpAMD64MOVLloadidx4, + [2]Op{OpAMD64MOVLload, OpAMD64LEAQ8}: OpAMD64MOVLloadidx8, + [2]Op{OpAMD64MOVQload, OpAMD64LEAQ1}: OpAMD64MOVQloadidx1, + [2]Op{OpAMD64MOVQload, OpAMD64LEAQ8}: OpAMD64MOVQloadidx8, + [2]Op{OpAMD64MOVSSload, OpAMD64LEAQ1}: OpAMD64MOVSSloadidx1, + [2]Op{OpAMD64MOVSSload, OpAMD64LEAQ4}: OpAMD64MOVSSloadidx4, + [2]Op{OpAMD64MOVSDload, OpAMD64LEAQ1}: OpAMD64MOVSDloadidx1, + [2]Op{OpAMD64MOVSDload, OpAMD64LEAQ8}: OpAMD64MOVSDloadidx8, + + [2]Op{OpAMD64MOVBstore, OpAMD64LEAQ1}: OpAMD64MOVBstoreidx1, + [2]Op{OpAMD64MOVWstore, OpAMD64LEAQ1}: OpAMD64MOVWstoreidx1, + [2]Op{OpAMD64MOVWstore, OpAMD64LEAQ2}: OpAMD64MOVWstoreidx2, + [2]Op{OpAMD64MOVLstore, OpAMD64LEAQ1}: OpAMD64MOVLstoreidx1, + [2]Op{OpAMD64MOVLstore, OpAMD64LEAQ4}: OpAMD64MOVLstoreidx4, + [2]Op{OpAMD64MOVLstore, OpAMD64LEAQ8}: OpAMD64MOVLstoreidx8, + [2]Op{OpAMD64MOVQstore, OpAMD64LEAQ1}: OpAMD64MOVQstoreidx1, + [2]Op{OpAMD64MOVQstore, OpAMD64LEAQ8}: OpAMD64MOVQstoreidx8, + [2]Op{OpAMD64MOVSSstore, OpAMD64LEAQ1}: OpAMD64MOVSSstoreidx1, + [2]Op{OpAMD64MOVSSstore, OpAMD64LEAQ4}: OpAMD64MOVSSstoreidx4, + [2]Op{OpAMD64MOVSDstore, OpAMD64LEAQ1}: OpAMD64MOVSDstoreidx1, + [2]Op{OpAMD64MOVSDstore, OpAMD64LEAQ8}: OpAMD64MOVSDstoreidx8, + + [2]Op{OpAMD64MOVBstoreconst, OpAMD64LEAQ1}: OpAMD64MOVBstoreconstidx1, + [2]Op{OpAMD64MOVWstoreconst, OpAMD64LEAQ1}: OpAMD64MOVWstoreconstidx1, + [2]Op{OpAMD64MOVWstoreconst, OpAMD64LEAQ2}: OpAMD64MOVWstoreconstidx2, + [2]Op{OpAMD64MOVLstoreconst, OpAMD64LEAQ1}: OpAMD64MOVLstoreconstidx1, + [2]Op{OpAMD64MOVLstoreconst, OpAMD64LEAQ4}: OpAMD64MOVLstoreconstidx4, + [2]Op{OpAMD64MOVQstoreconst, OpAMD64LEAQ1}: OpAMD64MOVQstoreconstidx1, + [2]Op{OpAMD64MOVQstoreconst, OpAMD64LEAQ8}: OpAMD64MOVQstoreconstidx8, + + [2]Op{OpAMD64SETEQstore, OpAMD64LEAQ1}: OpAMD64SETEQstoreidx1, + [2]Op{OpAMD64SETNEstore, OpAMD64LEAQ1}: OpAMD64SETNEstoreidx1, + [2]Op{OpAMD64SETLstore, OpAMD64LEAQ1}: OpAMD64SETLstoreidx1, + [2]Op{OpAMD64SETLEstore, OpAMD64LEAQ1}: OpAMD64SETLEstoreidx1, + [2]Op{OpAMD64SETGstore, OpAMD64LEAQ1}: OpAMD64SETGstoreidx1, + [2]Op{OpAMD64SETGEstore, OpAMD64LEAQ1}: OpAMD64SETGEstoreidx1, + [2]Op{OpAMD64SETBstore, OpAMD64LEAQ1}: OpAMD64SETBstoreidx1, + [2]Op{OpAMD64SETBEstore, OpAMD64LEAQ1}: OpAMD64SETBEstoreidx1, + [2]Op{OpAMD64SETAstore, OpAMD64LEAQ1}: OpAMD64SETAstoreidx1, + [2]Op{OpAMD64SETAEstore, OpAMD64LEAQ1}: OpAMD64SETAEstoreidx1, + + // These instructions are re-split differently for performance, see needSplit above. + // TODO if 386 versions are created, also update needSplit and _gen/386splitload.rules + [2]Op{OpAMD64CMPBload, OpAMD64ADDQ}: OpAMD64CMPBloadidx1, + [2]Op{OpAMD64CMPWload, OpAMD64ADDQ}: OpAMD64CMPWloadidx1, + [2]Op{OpAMD64CMPLload, OpAMD64ADDQ}: OpAMD64CMPLloadidx1, + [2]Op{OpAMD64CMPQload, OpAMD64ADDQ}: OpAMD64CMPQloadidx1, + + [2]Op{OpAMD64CMPBload, OpAMD64LEAQ1}: OpAMD64CMPBloadidx1, + [2]Op{OpAMD64CMPWload, OpAMD64LEAQ1}: OpAMD64CMPWloadidx1, + [2]Op{OpAMD64CMPWload, OpAMD64LEAQ2}: OpAMD64CMPWloadidx2, + [2]Op{OpAMD64CMPLload, OpAMD64LEAQ1}: OpAMD64CMPLloadidx1, + [2]Op{OpAMD64CMPLload, OpAMD64LEAQ4}: OpAMD64CMPLloadidx4, + [2]Op{OpAMD64CMPQload, OpAMD64LEAQ1}: OpAMD64CMPQloadidx1, + [2]Op{OpAMD64CMPQload, OpAMD64LEAQ8}: OpAMD64CMPQloadidx8, + + [2]Op{OpAMD64CMPBconstload, OpAMD64ADDQ}: OpAMD64CMPBconstloadidx1, + [2]Op{OpAMD64CMPWconstload, OpAMD64ADDQ}: OpAMD64CMPWconstloadidx1, + [2]Op{OpAMD64CMPLconstload, OpAMD64ADDQ}: OpAMD64CMPLconstloadidx1, + [2]Op{OpAMD64CMPQconstload, OpAMD64ADDQ}: OpAMD64CMPQconstloadidx1, + + [2]Op{OpAMD64CMPBconstload, OpAMD64LEAQ1}: OpAMD64CMPBconstloadidx1, + [2]Op{OpAMD64CMPWconstload, OpAMD64LEAQ1}: OpAMD64CMPWconstloadidx1, + [2]Op{OpAMD64CMPWconstload, OpAMD64LEAQ2}: OpAMD64CMPWconstloadidx2, + [2]Op{OpAMD64CMPLconstload, OpAMD64LEAQ1}: OpAMD64CMPLconstloadidx1, + [2]Op{OpAMD64CMPLconstload, OpAMD64LEAQ4}: OpAMD64CMPLconstloadidx4, + [2]Op{OpAMD64CMPQconstload, OpAMD64LEAQ1}: OpAMD64CMPQconstloadidx1, + [2]Op{OpAMD64CMPQconstload, OpAMD64LEAQ8}: OpAMD64CMPQconstloadidx8, + + [2]Op{OpAMD64ADDLload, OpAMD64ADDQ}: OpAMD64ADDLloadidx1, + [2]Op{OpAMD64ADDQload, OpAMD64ADDQ}: OpAMD64ADDQloadidx1, + [2]Op{OpAMD64SUBLload, OpAMD64ADDQ}: OpAMD64SUBLloadidx1, + [2]Op{OpAMD64SUBQload, OpAMD64ADDQ}: OpAMD64SUBQloadidx1, + [2]Op{OpAMD64ANDLload, OpAMD64ADDQ}: OpAMD64ANDLloadidx1, + [2]Op{OpAMD64ANDQload, OpAMD64ADDQ}: OpAMD64ANDQloadidx1, + [2]Op{OpAMD64ORLload, OpAMD64ADDQ}: OpAMD64ORLloadidx1, + [2]Op{OpAMD64ORQload, OpAMD64ADDQ}: OpAMD64ORQloadidx1, + [2]Op{OpAMD64XORLload, OpAMD64ADDQ}: OpAMD64XORLloadidx1, + [2]Op{OpAMD64XORQload, OpAMD64ADDQ}: OpAMD64XORQloadidx1, + + [2]Op{OpAMD64ADDLload, OpAMD64LEAQ1}: OpAMD64ADDLloadidx1, + [2]Op{OpAMD64ADDLload, OpAMD64LEAQ4}: OpAMD64ADDLloadidx4, + [2]Op{OpAMD64ADDLload, OpAMD64LEAQ8}: OpAMD64ADDLloadidx8, + [2]Op{OpAMD64ADDQload, OpAMD64LEAQ1}: OpAMD64ADDQloadidx1, + [2]Op{OpAMD64ADDQload, OpAMD64LEAQ8}: OpAMD64ADDQloadidx8, + [2]Op{OpAMD64SUBLload, OpAMD64LEAQ1}: OpAMD64SUBLloadidx1, + [2]Op{OpAMD64SUBLload, OpAMD64LEAQ4}: OpAMD64SUBLloadidx4, + [2]Op{OpAMD64SUBLload, OpAMD64LEAQ8}: OpAMD64SUBLloadidx8, + [2]Op{OpAMD64SUBQload, OpAMD64LEAQ1}: OpAMD64SUBQloadidx1, + [2]Op{OpAMD64SUBQload, OpAMD64LEAQ8}: OpAMD64SUBQloadidx8, + [2]Op{OpAMD64ANDLload, OpAMD64LEAQ1}: OpAMD64ANDLloadidx1, + [2]Op{OpAMD64ANDLload, OpAMD64LEAQ4}: OpAMD64ANDLloadidx4, + [2]Op{OpAMD64ANDLload, OpAMD64LEAQ8}: OpAMD64ANDLloadidx8, + [2]Op{OpAMD64ANDQload, OpAMD64LEAQ1}: OpAMD64ANDQloadidx1, + [2]Op{OpAMD64ANDQload, OpAMD64LEAQ8}: OpAMD64ANDQloadidx8, + [2]Op{OpAMD64ORLload, OpAMD64LEAQ1}: OpAMD64ORLloadidx1, + [2]Op{OpAMD64ORLload, OpAMD64LEAQ4}: OpAMD64ORLloadidx4, + [2]Op{OpAMD64ORLload, OpAMD64LEAQ8}: OpAMD64ORLloadidx8, + [2]Op{OpAMD64ORQload, OpAMD64LEAQ1}: OpAMD64ORQloadidx1, + [2]Op{OpAMD64ORQload, OpAMD64LEAQ8}: OpAMD64ORQloadidx8, + [2]Op{OpAMD64XORLload, OpAMD64LEAQ1}: OpAMD64XORLloadidx1, + [2]Op{OpAMD64XORLload, OpAMD64LEAQ4}: OpAMD64XORLloadidx4, + [2]Op{OpAMD64XORLload, OpAMD64LEAQ8}: OpAMD64XORLloadidx8, + [2]Op{OpAMD64XORQload, OpAMD64LEAQ1}: OpAMD64XORQloadidx1, + [2]Op{OpAMD64XORQload, OpAMD64LEAQ8}: OpAMD64XORQloadidx8, + + [2]Op{OpAMD64ADDLmodify, OpAMD64ADDQ}: OpAMD64ADDLmodifyidx1, + [2]Op{OpAMD64ADDQmodify, OpAMD64ADDQ}: OpAMD64ADDQmodifyidx1, + [2]Op{OpAMD64SUBLmodify, OpAMD64ADDQ}: OpAMD64SUBLmodifyidx1, + [2]Op{OpAMD64SUBQmodify, OpAMD64ADDQ}: OpAMD64SUBQmodifyidx1, + [2]Op{OpAMD64ANDLmodify, OpAMD64ADDQ}: OpAMD64ANDLmodifyidx1, + [2]Op{OpAMD64ANDQmodify, OpAMD64ADDQ}: OpAMD64ANDQmodifyidx1, + [2]Op{OpAMD64ORLmodify, OpAMD64ADDQ}: OpAMD64ORLmodifyidx1, + [2]Op{OpAMD64ORQmodify, OpAMD64ADDQ}: OpAMD64ORQmodifyidx1, + [2]Op{OpAMD64XORLmodify, OpAMD64ADDQ}: OpAMD64XORLmodifyidx1, + [2]Op{OpAMD64XORQmodify, OpAMD64ADDQ}: OpAMD64XORQmodifyidx1, + + [2]Op{OpAMD64ADDLmodify, OpAMD64LEAQ1}: OpAMD64ADDLmodifyidx1, + [2]Op{OpAMD64ADDLmodify, OpAMD64LEAQ4}: OpAMD64ADDLmodifyidx4, + [2]Op{OpAMD64ADDLmodify, OpAMD64LEAQ8}: OpAMD64ADDLmodifyidx8, + [2]Op{OpAMD64ADDQmodify, OpAMD64LEAQ1}: OpAMD64ADDQmodifyidx1, + [2]Op{OpAMD64ADDQmodify, OpAMD64LEAQ8}: OpAMD64ADDQmodifyidx8, + [2]Op{OpAMD64SUBLmodify, OpAMD64LEAQ1}: OpAMD64SUBLmodifyidx1, + [2]Op{OpAMD64SUBLmodify, OpAMD64LEAQ4}: OpAMD64SUBLmodifyidx4, + [2]Op{OpAMD64SUBLmodify, OpAMD64LEAQ8}: OpAMD64SUBLmodifyidx8, + [2]Op{OpAMD64SUBQmodify, OpAMD64LEAQ1}: OpAMD64SUBQmodifyidx1, + [2]Op{OpAMD64SUBQmodify, OpAMD64LEAQ8}: OpAMD64SUBQmodifyidx8, + [2]Op{OpAMD64ANDLmodify, OpAMD64LEAQ1}: OpAMD64ANDLmodifyidx1, + [2]Op{OpAMD64ANDLmodify, OpAMD64LEAQ4}: OpAMD64ANDLmodifyidx4, + [2]Op{OpAMD64ANDLmodify, OpAMD64LEAQ8}: OpAMD64ANDLmodifyidx8, + [2]Op{OpAMD64ANDQmodify, OpAMD64LEAQ1}: OpAMD64ANDQmodifyidx1, + [2]Op{OpAMD64ANDQmodify, OpAMD64LEAQ8}: OpAMD64ANDQmodifyidx8, + [2]Op{OpAMD64ORLmodify, OpAMD64LEAQ1}: OpAMD64ORLmodifyidx1, + [2]Op{OpAMD64ORLmodify, OpAMD64LEAQ4}: OpAMD64ORLmodifyidx4, + [2]Op{OpAMD64ORLmodify, OpAMD64LEAQ8}: OpAMD64ORLmodifyidx8, + [2]Op{OpAMD64ORQmodify, OpAMD64LEAQ1}: OpAMD64ORQmodifyidx1, + [2]Op{OpAMD64ORQmodify, OpAMD64LEAQ8}: OpAMD64ORQmodifyidx8, + [2]Op{OpAMD64XORLmodify, OpAMD64LEAQ1}: OpAMD64XORLmodifyidx1, + [2]Op{OpAMD64XORLmodify, OpAMD64LEAQ4}: OpAMD64XORLmodifyidx4, + [2]Op{OpAMD64XORLmodify, OpAMD64LEAQ8}: OpAMD64XORLmodifyidx8, + [2]Op{OpAMD64XORQmodify, OpAMD64LEAQ1}: OpAMD64XORQmodifyidx1, + [2]Op{OpAMD64XORQmodify, OpAMD64LEAQ8}: OpAMD64XORQmodifyidx8, + + [2]Op{OpAMD64ADDLconstmodify, OpAMD64ADDQ}: OpAMD64ADDLconstmodifyidx1, + [2]Op{OpAMD64ADDQconstmodify, OpAMD64ADDQ}: OpAMD64ADDQconstmodifyidx1, + [2]Op{OpAMD64ANDLconstmodify, OpAMD64ADDQ}: OpAMD64ANDLconstmodifyidx1, + [2]Op{OpAMD64ANDQconstmodify, OpAMD64ADDQ}: OpAMD64ANDQconstmodifyidx1, + [2]Op{OpAMD64ORLconstmodify, OpAMD64ADDQ}: OpAMD64ORLconstmodifyidx1, + [2]Op{OpAMD64ORQconstmodify, OpAMD64ADDQ}: OpAMD64ORQconstmodifyidx1, + [2]Op{OpAMD64XORLconstmodify, OpAMD64ADDQ}: OpAMD64XORLconstmodifyidx1, + [2]Op{OpAMD64XORQconstmodify, OpAMD64ADDQ}: OpAMD64XORQconstmodifyidx1, + + [2]Op{OpAMD64ADDLconstmodify, OpAMD64LEAQ1}: OpAMD64ADDLconstmodifyidx1, + [2]Op{OpAMD64ADDLconstmodify, OpAMD64LEAQ4}: OpAMD64ADDLconstmodifyidx4, + [2]Op{OpAMD64ADDLconstmodify, OpAMD64LEAQ8}: OpAMD64ADDLconstmodifyidx8, + [2]Op{OpAMD64ADDQconstmodify, OpAMD64LEAQ1}: OpAMD64ADDQconstmodifyidx1, + [2]Op{OpAMD64ADDQconstmodify, OpAMD64LEAQ8}: OpAMD64ADDQconstmodifyidx8, + [2]Op{OpAMD64ANDLconstmodify, OpAMD64LEAQ1}: OpAMD64ANDLconstmodifyidx1, + [2]Op{OpAMD64ANDLconstmodify, OpAMD64LEAQ4}: OpAMD64ANDLconstmodifyidx4, + [2]Op{OpAMD64ANDLconstmodify, OpAMD64LEAQ8}: OpAMD64ANDLconstmodifyidx8, + [2]Op{OpAMD64ANDQconstmodify, OpAMD64LEAQ1}: OpAMD64ANDQconstmodifyidx1, + [2]Op{OpAMD64ANDQconstmodify, OpAMD64LEAQ8}: OpAMD64ANDQconstmodifyidx8, + [2]Op{OpAMD64ORLconstmodify, OpAMD64LEAQ1}: OpAMD64ORLconstmodifyidx1, + [2]Op{OpAMD64ORLconstmodify, OpAMD64LEAQ4}: OpAMD64ORLconstmodifyidx4, + [2]Op{OpAMD64ORLconstmodify, OpAMD64LEAQ8}: OpAMD64ORLconstmodifyidx8, + [2]Op{OpAMD64ORQconstmodify, OpAMD64LEAQ1}: OpAMD64ORQconstmodifyidx1, + [2]Op{OpAMD64ORQconstmodify, OpAMD64LEAQ8}: OpAMD64ORQconstmodifyidx8, + [2]Op{OpAMD64XORLconstmodify, OpAMD64LEAQ1}: OpAMD64XORLconstmodifyidx1, + [2]Op{OpAMD64XORLconstmodify, OpAMD64LEAQ4}: OpAMD64XORLconstmodifyidx4, + [2]Op{OpAMD64XORLconstmodify, OpAMD64LEAQ8}: OpAMD64XORLconstmodifyidx8, + [2]Op{OpAMD64XORQconstmodify, OpAMD64LEAQ1}: OpAMD64XORQconstmodifyidx1, + [2]Op{OpAMD64XORQconstmodify, OpAMD64LEAQ8}: OpAMD64XORQconstmodifyidx8, + + [2]Op{OpAMD64ADDSSload, OpAMD64LEAQ1}: OpAMD64ADDSSloadidx1, + [2]Op{OpAMD64ADDSSload, OpAMD64LEAQ4}: OpAMD64ADDSSloadidx4, + [2]Op{OpAMD64ADDSDload, OpAMD64LEAQ1}: OpAMD64ADDSDloadidx1, + [2]Op{OpAMD64ADDSDload, OpAMD64LEAQ8}: OpAMD64ADDSDloadidx8, + [2]Op{OpAMD64SUBSSload, OpAMD64LEAQ1}: OpAMD64SUBSSloadidx1, + [2]Op{OpAMD64SUBSSload, OpAMD64LEAQ4}: OpAMD64SUBSSloadidx4, + [2]Op{OpAMD64SUBSDload, OpAMD64LEAQ1}: OpAMD64SUBSDloadidx1, + [2]Op{OpAMD64SUBSDload, OpAMD64LEAQ8}: OpAMD64SUBSDloadidx8, + [2]Op{OpAMD64MULSSload, OpAMD64LEAQ1}: OpAMD64MULSSloadidx1, + [2]Op{OpAMD64MULSSload, OpAMD64LEAQ4}: OpAMD64MULSSloadidx4, + [2]Op{OpAMD64MULSDload, OpAMD64LEAQ1}: OpAMD64MULSDloadidx1, + [2]Op{OpAMD64MULSDload, OpAMD64LEAQ8}: OpAMD64MULSDloadidx8, + [2]Op{OpAMD64DIVSSload, OpAMD64LEAQ1}: OpAMD64DIVSSloadidx1, + [2]Op{OpAMD64DIVSSload, OpAMD64LEAQ4}: OpAMD64DIVSSloadidx4, + [2]Op{OpAMD64DIVSDload, OpAMD64LEAQ1}: OpAMD64DIVSDloadidx1, + [2]Op{OpAMD64DIVSDload, OpAMD64LEAQ8}: OpAMD64DIVSDloadidx8, + + [2]Op{OpAMD64SARXLload, OpAMD64ADDQ}: OpAMD64SARXLloadidx1, + [2]Op{OpAMD64SARXQload, OpAMD64ADDQ}: OpAMD64SARXQloadidx1, + [2]Op{OpAMD64SHLXLload, OpAMD64ADDQ}: OpAMD64SHLXLloadidx1, + [2]Op{OpAMD64SHLXQload, OpAMD64ADDQ}: OpAMD64SHLXQloadidx1, + [2]Op{OpAMD64SHRXLload, OpAMD64ADDQ}: OpAMD64SHRXLloadidx1, + [2]Op{OpAMD64SHRXQload, OpAMD64ADDQ}: OpAMD64SHRXQloadidx1, + + [2]Op{OpAMD64SARXLload, OpAMD64LEAQ1}: OpAMD64SARXLloadidx1, + [2]Op{OpAMD64SARXLload, OpAMD64LEAQ4}: OpAMD64SARXLloadidx4, + [2]Op{OpAMD64SARXLload, OpAMD64LEAQ8}: OpAMD64SARXLloadidx8, + [2]Op{OpAMD64SARXQload, OpAMD64LEAQ1}: OpAMD64SARXQloadidx1, + [2]Op{OpAMD64SARXQload, OpAMD64LEAQ8}: OpAMD64SARXQloadidx8, + [2]Op{OpAMD64SHLXLload, OpAMD64LEAQ1}: OpAMD64SHLXLloadidx1, + [2]Op{OpAMD64SHLXLload, OpAMD64LEAQ4}: OpAMD64SHLXLloadidx4, + [2]Op{OpAMD64SHLXLload, OpAMD64LEAQ8}: OpAMD64SHLXLloadidx8, + [2]Op{OpAMD64SHLXQload, OpAMD64LEAQ1}: OpAMD64SHLXQloadidx1, + [2]Op{OpAMD64SHLXQload, OpAMD64LEAQ8}: OpAMD64SHLXQloadidx8, + [2]Op{OpAMD64SHRXLload, OpAMD64LEAQ1}: OpAMD64SHRXLloadidx1, + [2]Op{OpAMD64SHRXLload, OpAMD64LEAQ4}: OpAMD64SHRXLloadidx4, + [2]Op{OpAMD64SHRXLload, OpAMD64LEAQ8}: OpAMD64SHRXLloadidx8, + [2]Op{OpAMD64SHRXQload, OpAMD64LEAQ1}: OpAMD64SHRXQloadidx1, + [2]Op{OpAMD64SHRXQload, OpAMD64LEAQ8}: OpAMD64SHRXQloadidx8, + + // amd64/v3 + [2]Op{OpAMD64MOVBELload, OpAMD64ADDQ}: OpAMD64MOVBELloadidx1, + [2]Op{OpAMD64MOVBEQload, OpAMD64ADDQ}: OpAMD64MOVBEQloadidx1, + [2]Op{OpAMD64MOVBELload, OpAMD64LEAQ1}: OpAMD64MOVBELloadidx1, + [2]Op{OpAMD64MOVBELload, OpAMD64LEAQ4}: OpAMD64MOVBELloadidx4, + [2]Op{OpAMD64MOVBELload, OpAMD64LEAQ8}: OpAMD64MOVBELloadidx8, + [2]Op{OpAMD64MOVBEQload, OpAMD64LEAQ1}: OpAMD64MOVBEQloadidx1, + [2]Op{OpAMD64MOVBEQload, OpAMD64LEAQ8}: OpAMD64MOVBEQloadidx8, + + [2]Op{OpAMD64MOVBEWstore, OpAMD64ADDQ}: OpAMD64MOVBEWstoreidx1, + [2]Op{OpAMD64MOVBELstore, OpAMD64ADDQ}: OpAMD64MOVBELstoreidx1, + [2]Op{OpAMD64MOVBEQstore, OpAMD64ADDQ}: OpAMD64MOVBEQstoreidx1, + [2]Op{OpAMD64MOVBEWstore, OpAMD64LEAQ1}: OpAMD64MOVBEWstoreidx1, + [2]Op{OpAMD64MOVBEWstore, OpAMD64LEAQ2}: OpAMD64MOVBEWstoreidx2, + [2]Op{OpAMD64MOVBELstore, OpAMD64LEAQ1}: OpAMD64MOVBELstoreidx1, + [2]Op{OpAMD64MOVBELstore, OpAMD64LEAQ4}: OpAMD64MOVBELstoreidx4, + [2]Op{OpAMD64MOVBELstore, OpAMD64LEAQ8}: OpAMD64MOVBELstoreidx8, + [2]Op{OpAMD64MOVBEQstore, OpAMD64LEAQ1}: OpAMD64MOVBEQstoreidx1, + [2]Op{OpAMD64MOVBEQstore, OpAMD64LEAQ8}: OpAMD64MOVBEQstoreidx8, + + // 386 + [2]Op{Op386MOVBload, Op386ADDL}: Op386MOVBloadidx1, + [2]Op{Op386MOVWload, Op386ADDL}: Op386MOVWloadidx1, + [2]Op{Op386MOVLload, Op386ADDL}: Op386MOVLloadidx1, + [2]Op{Op386MOVSSload, Op386ADDL}: Op386MOVSSloadidx1, + [2]Op{Op386MOVSDload, Op386ADDL}: Op386MOVSDloadidx1, + + [2]Op{Op386MOVBstore, Op386ADDL}: Op386MOVBstoreidx1, + [2]Op{Op386MOVWstore, Op386ADDL}: Op386MOVWstoreidx1, + [2]Op{Op386MOVLstore, Op386ADDL}: Op386MOVLstoreidx1, + [2]Op{Op386MOVSSstore, Op386ADDL}: Op386MOVSSstoreidx1, + [2]Op{Op386MOVSDstore, Op386ADDL}: Op386MOVSDstoreidx1, + + [2]Op{Op386MOVBstoreconst, Op386ADDL}: Op386MOVBstoreconstidx1, + [2]Op{Op386MOVWstoreconst, Op386ADDL}: Op386MOVWstoreconstidx1, + [2]Op{Op386MOVLstoreconst, Op386ADDL}: Op386MOVLstoreconstidx1, + + [2]Op{Op386MOVBload, Op386LEAL1}: Op386MOVBloadidx1, + [2]Op{Op386MOVWload, Op386LEAL1}: Op386MOVWloadidx1, + [2]Op{Op386MOVWload, Op386LEAL2}: Op386MOVWloadidx2, + [2]Op{Op386MOVLload, Op386LEAL1}: Op386MOVLloadidx1, + [2]Op{Op386MOVLload, Op386LEAL4}: Op386MOVLloadidx4, + [2]Op{Op386MOVSSload, Op386LEAL1}: Op386MOVSSloadidx1, + [2]Op{Op386MOVSSload, Op386LEAL4}: Op386MOVSSloadidx4, + [2]Op{Op386MOVSDload, Op386LEAL1}: Op386MOVSDloadidx1, + [2]Op{Op386MOVSDload, Op386LEAL8}: Op386MOVSDloadidx8, + + [2]Op{Op386MOVBstore, Op386LEAL1}: Op386MOVBstoreidx1, + [2]Op{Op386MOVWstore, Op386LEAL1}: Op386MOVWstoreidx1, + [2]Op{Op386MOVWstore, Op386LEAL2}: Op386MOVWstoreidx2, + [2]Op{Op386MOVLstore, Op386LEAL1}: Op386MOVLstoreidx1, + [2]Op{Op386MOVLstore, Op386LEAL4}: Op386MOVLstoreidx4, + [2]Op{Op386MOVSSstore, Op386LEAL1}: Op386MOVSSstoreidx1, + [2]Op{Op386MOVSSstore, Op386LEAL4}: Op386MOVSSstoreidx4, + [2]Op{Op386MOVSDstore, Op386LEAL1}: Op386MOVSDstoreidx1, + [2]Op{Op386MOVSDstore, Op386LEAL8}: Op386MOVSDstoreidx8, + + [2]Op{Op386MOVBstoreconst, Op386LEAL1}: Op386MOVBstoreconstidx1, + [2]Op{Op386MOVWstoreconst, Op386LEAL1}: Op386MOVWstoreconstidx1, + [2]Op{Op386MOVWstoreconst, Op386LEAL2}: Op386MOVWstoreconstidx2, + [2]Op{Op386MOVLstoreconst, Op386LEAL1}: Op386MOVLstoreconstidx1, + [2]Op{Op386MOVLstoreconst, Op386LEAL4}: Op386MOVLstoreconstidx4, + + [2]Op{Op386ADDLload, Op386LEAL4}: Op386ADDLloadidx4, + [2]Op{Op386SUBLload, Op386LEAL4}: Op386SUBLloadidx4, + [2]Op{Op386MULLload, Op386LEAL4}: Op386MULLloadidx4, + [2]Op{Op386ANDLload, Op386LEAL4}: Op386ANDLloadidx4, + [2]Op{Op386ORLload, Op386LEAL4}: Op386ORLloadidx4, + [2]Op{Op386XORLload, Op386LEAL4}: Op386XORLloadidx4, + + [2]Op{Op386ADDLmodify, Op386LEAL4}: Op386ADDLmodifyidx4, + [2]Op{Op386SUBLmodify, Op386LEAL4}: Op386SUBLmodifyidx4, + [2]Op{Op386ANDLmodify, Op386LEAL4}: Op386ANDLmodifyidx4, + [2]Op{Op386ORLmodify, Op386LEAL4}: Op386ORLmodifyidx4, + [2]Op{Op386XORLmodify, Op386LEAL4}: Op386XORLmodifyidx4, + + [2]Op{Op386ADDLconstmodify, Op386LEAL4}: Op386ADDLconstmodifyidx4, + [2]Op{Op386ANDLconstmodify, Op386LEAL4}: Op386ANDLconstmodifyidx4, + [2]Op{Op386ORLconstmodify, Op386LEAL4}: Op386ORLconstmodifyidx4, + [2]Op{Op386XORLconstmodify, Op386LEAL4}: Op386XORLconstmodifyidx4, + + // s390x + [2]Op{OpS390XMOVDload, OpS390XADD}: OpS390XMOVDloadidx, + [2]Op{OpS390XMOVWload, OpS390XADD}: OpS390XMOVWloadidx, + [2]Op{OpS390XMOVHload, OpS390XADD}: OpS390XMOVHloadidx, + [2]Op{OpS390XMOVBload, OpS390XADD}: OpS390XMOVBloadidx, + + [2]Op{OpS390XMOVWZload, OpS390XADD}: OpS390XMOVWZloadidx, + [2]Op{OpS390XMOVHZload, OpS390XADD}: OpS390XMOVHZloadidx, + [2]Op{OpS390XMOVBZload, OpS390XADD}: OpS390XMOVBZloadidx, + + [2]Op{OpS390XMOVDBRload, OpS390XADD}: OpS390XMOVDBRloadidx, + [2]Op{OpS390XMOVWBRload, OpS390XADD}: OpS390XMOVWBRloadidx, + [2]Op{OpS390XMOVHBRload, OpS390XADD}: OpS390XMOVHBRloadidx, + + [2]Op{OpS390XFMOVDload, OpS390XADD}: OpS390XFMOVDloadidx, + [2]Op{OpS390XFMOVSload, OpS390XADD}: OpS390XFMOVSloadidx, + + [2]Op{OpS390XMOVDstore, OpS390XADD}: OpS390XMOVDstoreidx, + [2]Op{OpS390XMOVWstore, OpS390XADD}: OpS390XMOVWstoreidx, + [2]Op{OpS390XMOVHstore, OpS390XADD}: OpS390XMOVHstoreidx, + [2]Op{OpS390XMOVBstore, OpS390XADD}: OpS390XMOVBstoreidx, + + [2]Op{OpS390XMOVDBRstore, OpS390XADD}: OpS390XMOVDBRstoreidx, + [2]Op{OpS390XMOVWBRstore, OpS390XADD}: OpS390XMOVWBRstoreidx, + [2]Op{OpS390XMOVHBRstore, OpS390XADD}: OpS390XMOVHBRstoreidx, + + [2]Op{OpS390XFMOVDstore, OpS390XADD}: OpS390XFMOVDstoreidx, + [2]Op{OpS390XFMOVSstore, OpS390XADD}: OpS390XFMOVSstoreidx, + + [2]Op{OpS390XMOVDload, OpS390XMOVDaddridx}: OpS390XMOVDloadidx, + [2]Op{OpS390XMOVWload, OpS390XMOVDaddridx}: OpS390XMOVWloadidx, + [2]Op{OpS390XMOVHload, OpS390XMOVDaddridx}: OpS390XMOVHloadidx, + [2]Op{OpS390XMOVBload, OpS390XMOVDaddridx}: OpS390XMOVBloadidx, + + [2]Op{OpS390XMOVWZload, OpS390XMOVDaddridx}: OpS390XMOVWZloadidx, + [2]Op{OpS390XMOVHZload, OpS390XMOVDaddridx}: OpS390XMOVHZloadidx, + [2]Op{OpS390XMOVBZload, OpS390XMOVDaddridx}: OpS390XMOVBZloadidx, + + [2]Op{OpS390XMOVDBRload, OpS390XMOVDaddridx}: OpS390XMOVDBRloadidx, + [2]Op{OpS390XMOVWBRload, OpS390XMOVDaddridx}: OpS390XMOVWBRloadidx, + [2]Op{OpS390XMOVHBRload, OpS390XMOVDaddridx}: OpS390XMOVHBRloadidx, + + [2]Op{OpS390XFMOVDload, OpS390XMOVDaddridx}: OpS390XFMOVDloadidx, + [2]Op{OpS390XFMOVSload, OpS390XMOVDaddridx}: OpS390XFMOVSloadidx, + + [2]Op{OpS390XMOVDstore, OpS390XMOVDaddridx}: OpS390XMOVDstoreidx, + [2]Op{OpS390XMOVWstore, OpS390XMOVDaddridx}: OpS390XMOVWstoreidx, + [2]Op{OpS390XMOVHstore, OpS390XMOVDaddridx}: OpS390XMOVHstoreidx, + [2]Op{OpS390XMOVBstore, OpS390XMOVDaddridx}: OpS390XMOVBstoreidx, + + [2]Op{OpS390XMOVDBRstore, OpS390XMOVDaddridx}: OpS390XMOVDBRstoreidx, + [2]Op{OpS390XMOVWBRstore, OpS390XMOVDaddridx}: OpS390XMOVWBRstoreidx, + [2]Op{OpS390XMOVHBRstore, OpS390XMOVDaddridx}: OpS390XMOVHBRstoreidx, + + [2]Op{OpS390XFMOVDstore, OpS390XMOVDaddridx}: OpS390XFMOVDstoreidx, + [2]Op{OpS390XFMOVSstore, OpS390XMOVDaddridx}: OpS390XFMOVSstoreidx, +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/allocators.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/allocators.go new file mode 100644 index 0000000000000000000000000000000000000000..ff70795f82c994523616f9807e1a6a43b4c0d588 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/allocators.go @@ -0,0 +1,311 @@ +// Code generated from _gen/allocators.go using 'go generate'; DO NOT EDIT. + +package ssa + +import ( + "internal/unsafeheader" + "math/bits" + "sync" + "unsafe" +) + +var poolFreeValueSlice [27]sync.Pool + +func (c *Cache) allocValueSlice(n int) []*Value { + var s []*Value + n2 := n + if n2 < 32 { + n2 = 32 + } + b := bits.Len(uint(n2 - 1)) + v := poolFreeValueSlice[b-5].Get() + if v == nil { + s = make([]*Value, 1< 0 { + if b < 0 { + d = d + 1 + } + c = true + } + return c +} + +func BenchmarkPhioptPass(b *testing.B) { + for i := 0; i < b.N; i++ { + a := rand.Perm(i/10 + 10) + for i := 1; i < len(a)/2; i++ { + fn(a[i]-a[i-1], a[i+len(a)/2-2]-a[i+len(a)/2-1]) + } + } +} + +type Point struct { + X, Y int +} + +//go:noinline +func sign(p1, p2, p3 Point) bool { + return (p1.X-p3.X)*(p2.Y-p3.Y)-(p2.X-p3.X)*(p1.Y-p3.Y) < 0 +} + +func BenchmarkInvertLessThanNoov(b *testing.B) { + p1 := Point{1, 2} + p2 := Point{2, 3} + p3 := Point{3, 4} + for i := 0; i < b.N; i++ { + sign(p1, p2, p3) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/biasedsparsemap.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/biasedsparsemap.go new file mode 100644 index 0000000000000000000000000000000000000000..948aef9a9ba86060d6fb43f6155cbaabff9a23db --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/biasedsparsemap.go @@ -0,0 +1,111 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "math" +) + +// A biasedSparseMap is a sparseMap for integers between J and K inclusive, +// where J might be somewhat larger than zero (and K-J is probably much smaller than J). +// (The motivating use case is the line numbers of statements for a single function.) +// Not all features of a SparseMap are exported, and it is also easy to treat a +// biasedSparseMap like a SparseSet. +type biasedSparseMap struct { + s *sparseMap + first int +} + +// newBiasedSparseMap returns a new biasedSparseMap for values between first and last, inclusive. +func newBiasedSparseMap(first, last int) *biasedSparseMap { + if first > last { + return &biasedSparseMap{first: math.MaxInt32, s: nil} + } + return &biasedSparseMap{first: first, s: newSparseMap(1 + last - first)} +} + +// cap returns one more than the largest key valid for s +func (s *biasedSparseMap) cap() int { + if s == nil || s.s == nil { + return 0 + } + return s.s.cap() + int(s.first) +} + +// size returns the number of entries stored in s +func (s *biasedSparseMap) size() int { + if s == nil || s.s == nil { + return 0 + } + return s.s.size() +} + +// contains reports whether x is a key in s +func (s *biasedSparseMap) contains(x uint) bool { + if s == nil || s.s == nil { + return false + } + if int(x) < s.first { + return false + } + if int(x) >= s.cap() { + return false + } + return s.s.contains(ID(int(x) - s.first)) +} + +// get returns the value s maps for key x, or -1 if +// x is not mapped or is out of range for s. +func (s *biasedSparseMap) get(x uint) int32 { + if s == nil || s.s == nil { + return -1 + } + if int(x) < s.first { + return -1 + } + if int(x) >= s.cap() { + return -1 + } + return s.s.get(ID(int(x) - s.first)) +} + +// getEntry returns the i'th key and value stored in s, +// where 0 <= i < s.size() +func (s *biasedSparseMap) getEntry(i int) (x uint, v int32) { + e := s.s.contents()[i] + x = uint(int(e.key) + s.first) + v = e.val + return +} + +// add inserts x->0 into s, provided that x is in the range of keys stored in s. +func (s *biasedSparseMap) add(x uint) { + if int(x) < s.first || int(x) >= s.cap() { + return + } + s.s.set(ID(int(x)-s.first), 0) +} + +// add inserts x->v into s, provided that x is in the range of keys stored in s. +func (s *biasedSparseMap) set(x uint, v int32) { + if int(x) < s.first || int(x) >= s.cap() { + return + } + s.s.set(ID(int(x)-s.first), v) +} + +// remove removes key x from s. +func (s *biasedSparseMap) remove(x uint) { + if int(x) < s.first || int(x) >= s.cap() { + return + } + s.s.remove(ID(int(x) - s.first)) +} + +func (s *biasedSparseMap) clear() { + if s.s != nil { + s.s.clear() + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/block.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/block.go new file mode 100644 index 0000000000000000000000000000000000000000..26af10b59c4bf6c2ff6fd4ec4ac46e5f9106661a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/block.go @@ -0,0 +1,428 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/internal/src" + "fmt" +) + +// Block represents a basic block in the control flow graph of a function. +type Block struct { + // A unique identifier for the block. The system will attempt to allocate + // these IDs densely, but no guarantees. + ID ID + + // Source position for block's control operation + Pos src.XPos + + // The kind of block this is. + Kind BlockKind + + // Likely direction for branches. + // If BranchLikely, Succs[0] is the most likely branch taken. + // If BranchUnlikely, Succs[1] is the most likely branch taken. + // Ignored if len(Succs) < 2. + // Fatal if not BranchUnknown and len(Succs) > 2. + Likely BranchPrediction + + // After flagalloc, records whether flags are live at the end of the block. + FlagsLiveAtEnd bool + + // Subsequent blocks, if any. The number and order depend on the block kind. + Succs []Edge + + // Inverse of successors. + // The order is significant to Phi nodes in the block. + // TODO: predecessors is a pain to maintain. Can we somehow order phi + // arguments by block id and have this field computed explicitly when needed? + Preds []Edge + + // A list of values that determine how the block is exited. The number + // and type of control values depends on the Kind of the block. For + // instance, a BlockIf has a single boolean control value and BlockExit + // has a single memory control value. + // + // The ControlValues() method may be used to get a slice with the non-nil + // control values that can be ranged over. + // + // Controls[1] must be nil if Controls[0] is nil. + Controls [2]*Value + + // Auxiliary info for the block. Its value depends on the Kind. + Aux Aux + AuxInt int64 + + // The unordered set of Values that define the operation of this block. + // After the scheduling pass, this list is ordered. + Values []*Value + + // The containing function + Func *Func + + // Storage for Succs, Preds and Values. + succstorage [2]Edge + predstorage [4]Edge + valstorage [9]*Value +} + +// Edge represents a CFG edge. +// Example edges for b branching to either c or d. +// (c and d have other predecessors.) +// +// b.Succs = [{c,3}, {d,1}] +// c.Preds = [?, ?, ?, {b,0}] +// d.Preds = [?, {b,1}, ?] +// +// These indexes allow us to edit the CFG in constant time. +// In addition, it informs phi ops in degenerate cases like: +// +// b: +// if k then c else c +// c: +// v = Phi(x, y) +// +// Then the indexes tell you whether x is chosen from +// the if or else branch from b. +// +// b.Succs = [{c,0},{c,1}] +// c.Preds = [{b,0},{b,1}] +// +// means x is chosen if k is true. +type Edge struct { + // block edge goes to (in a Succs list) or from (in a Preds list) + b *Block + // index of reverse edge. Invariant: + // e := x.Succs[idx] + // e.b.Preds[e.i] = Edge{x,idx} + // and similarly for predecessors. + i int +} + +func (e Edge) Block() *Block { + return e.b +} +func (e Edge) Index() int { + return e.i +} +func (e Edge) String() string { + return fmt.Sprintf("{%v,%d}", e.b, e.i) +} + +// BlockKind is the kind of SSA block. +type BlockKind int16 + +// short form print +func (b *Block) String() string { + return fmt.Sprintf("b%d", b.ID) +} + +// long form print +func (b *Block) LongString() string { + s := b.Kind.String() + if b.Aux != nil { + s += fmt.Sprintf(" {%s}", b.Aux) + } + if t := b.AuxIntString(); t != "" { + s += fmt.Sprintf(" [%s]", t) + } + for _, c := range b.ControlValues() { + s += fmt.Sprintf(" %s", c) + } + if len(b.Succs) > 0 { + s += " ->" + for _, c := range b.Succs { + s += " " + c.b.String() + } + } + switch b.Likely { + case BranchUnlikely: + s += " (unlikely)" + case BranchLikely: + s += " (likely)" + } + return s +} + +// NumControls returns the number of non-nil control values the +// block has. +func (b *Block) NumControls() int { + if b.Controls[0] == nil { + return 0 + } + if b.Controls[1] == nil { + return 1 + } + return 2 +} + +// ControlValues returns a slice containing the non-nil control +// values of the block. The index of each control value will be +// the same as it is in the Controls property and can be used +// in ReplaceControl calls. +func (b *Block) ControlValues() []*Value { + if b.Controls[0] == nil { + return b.Controls[:0] + } + if b.Controls[1] == nil { + return b.Controls[:1] + } + return b.Controls[:2] +} + +// SetControl removes all existing control values and then adds +// the control value provided. The number of control values after +// a call to SetControl will always be 1. +func (b *Block) SetControl(v *Value) { + b.ResetControls() + b.Controls[0] = v + v.Uses++ +} + +// ResetControls sets the number of controls for the block to 0. +func (b *Block) ResetControls() { + if b.Controls[0] != nil { + b.Controls[0].Uses-- + } + if b.Controls[1] != nil { + b.Controls[1].Uses-- + } + b.Controls = [2]*Value{} // reset both controls to nil +} + +// AddControl appends a control value to the existing list of control values. +func (b *Block) AddControl(v *Value) { + i := b.NumControls() + b.Controls[i] = v // panics if array is full + v.Uses++ +} + +// ReplaceControl exchanges the existing control value at the index provided +// for the new value. The index must refer to a valid control value. +func (b *Block) ReplaceControl(i int, v *Value) { + b.Controls[i].Uses-- + b.Controls[i] = v + v.Uses++ +} + +// CopyControls replaces the controls for this block with those from the +// provided block. The provided block is not modified. +func (b *Block) CopyControls(from *Block) { + if b == from { + return + } + b.ResetControls() + for _, c := range from.ControlValues() { + b.AddControl(c) + } +} + +// Reset sets the block to the provided kind and clears all the blocks control +// and auxiliary values. Other properties of the block, such as its successors, +// predecessors and values are left unmodified. +func (b *Block) Reset(kind BlockKind) { + b.Kind = kind + b.ResetControls() + b.Aux = nil + b.AuxInt = 0 +} + +// resetWithControl resets b and adds control v. +// It is equivalent to b.Reset(kind); b.AddControl(v), +// except that it is one call instead of two and avoids a bounds check. +// It is intended for use by rewrite rules, where this matters. +func (b *Block) resetWithControl(kind BlockKind, v *Value) { + b.Kind = kind + b.ResetControls() + b.Aux = nil + b.AuxInt = 0 + b.Controls[0] = v + v.Uses++ +} + +// resetWithControl2 resets b and adds controls v and w. +// It is equivalent to b.Reset(kind); b.AddControl(v); b.AddControl(w), +// except that it is one call instead of three and avoids two bounds checks. +// It is intended for use by rewrite rules, where this matters. +func (b *Block) resetWithControl2(kind BlockKind, v, w *Value) { + b.Kind = kind + b.ResetControls() + b.Aux = nil + b.AuxInt = 0 + b.Controls[0] = v + b.Controls[1] = w + v.Uses++ + w.Uses++ +} + +// truncateValues truncates b.Values at the ith element, zeroing subsequent elements. +// The values in b.Values after i must already have had their args reset, +// to maintain correct value uses counts. +func (b *Block) truncateValues(i int) { + tail := b.Values[i:] + for j := range tail { + tail[j] = nil + } + b.Values = b.Values[:i] +} + +// AddEdgeTo adds an edge from block b to block c. +func (b *Block) AddEdgeTo(c *Block) { + i := len(b.Succs) + j := len(c.Preds) + b.Succs = append(b.Succs, Edge{c, j}) + c.Preds = append(c.Preds, Edge{b, i}) + b.Func.invalidateCFG() +} + +// removePred removes the ith input edge from b. +// It is the responsibility of the caller to remove +// the corresponding successor edge, and adjust any +// phi values by calling b.removePhiArg(v, i). +func (b *Block) removePred(i int) { + n := len(b.Preds) - 1 + if i != n { + e := b.Preds[n] + b.Preds[i] = e + // Update the other end of the edge we moved. + e.b.Succs[e.i].i = i + } + b.Preds[n] = Edge{} + b.Preds = b.Preds[:n] + b.Func.invalidateCFG() +} + +// removeSucc removes the ith output edge from b. +// It is the responsibility of the caller to remove +// the corresponding predecessor edge. +// Note that this potentially reorders successors of b, so it +// must be used very carefully. +func (b *Block) removeSucc(i int) { + n := len(b.Succs) - 1 + if i != n { + e := b.Succs[n] + b.Succs[i] = e + // Update the other end of the edge we moved. + e.b.Preds[e.i].i = i + } + b.Succs[n] = Edge{} + b.Succs = b.Succs[:n] + b.Func.invalidateCFG() +} + +func (b *Block) swapSuccessors() { + if len(b.Succs) != 2 { + b.Fatalf("swapSuccessors with len(Succs)=%d", len(b.Succs)) + } + e0 := b.Succs[0] + e1 := b.Succs[1] + b.Succs[0] = e1 + b.Succs[1] = e0 + e0.b.Preds[e0.i].i = 1 + e1.b.Preds[e1.i].i = 0 + b.Likely *= -1 +} + +// Swaps b.Succs[x] and b.Succs[y]. +func (b *Block) swapSuccessorsByIdx(x, y int) { + if x == y { + return + } + ex := b.Succs[x] + ey := b.Succs[y] + b.Succs[x] = ey + b.Succs[y] = ex + ex.b.Preds[ex.i].i = y + ey.b.Preds[ey.i].i = x +} + +// removePhiArg removes the ith arg from phi. +// It must be called after calling b.removePred(i) to +// adjust the corresponding phi value of the block: +// +// b.removePred(i) +// for _, v := range b.Values { +// +// if v.Op != OpPhi { +// continue +// } +// b.removePhiArg(v, i) +// +// } +func (b *Block) removePhiArg(phi *Value, i int) { + n := len(b.Preds) + if numPhiArgs := len(phi.Args); numPhiArgs-1 != n { + b.Fatalf("inconsistent state for %v, num predecessors: %d, num phi args: %d", phi, n, numPhiArgs) + } + phi.Args[i].Uses-- + phi.Args[i] = phi.Args[n] + phi.Args[n] = nil + phi.Args = phi.Args[:n] + phielimValue(phi) +} + +// LackingPos indicates whether b is a block whose position should be inherited +// from its successors. This is true if all the values within it have unreliable positions +// and if it is "plain", meaning that there is no control flow that is also very likely +// to correspond to a well-understood source position. +func (b *Block) LackingPos() bool { + // Non-plain predecessors are If or Defer, which both (1) have two successors, + // which might have different line numbers and (2) correspond to statements + // in the source code that have positions, so this case ought not occur anyway. + if b.Kind != BlockPlain { + return false + } + if b.Pos != src.NoXPos { + return false + } + for _, v := range b.Values { + if v.LackingPos() { + continue + } + return false + } + return true +} + +func (b *Block) AuxIntString() string { + switch b.Kind.AuxIntType() { + case "int8": + return fmt.Sprintf("%v", int8(b.AuxInt)) + case "uint8": + return fmt.Sprintf("%v", uint8(b.AuxInt)) + case "": // no aux int type + return "" + default: // type specified but not implemented - print as int64 + return fmt.Sprintf("%v", b.AuxInt) + } +} + +// likelyBranch reports whether block b is the likely branch of all of its predecessors. +func (b *Block) likelyBranch() bool { + if len(b.Preds) == 0 { + return false + } + for _, e := range b.Preds { + p := e.b + if len(p.Succs) == 1 || len(p.Succs) == 2 && (p.Likely == BranchLikely && p.Succs[0].b == b || + p.Likely == BranchUnlikely && p.Succs[1].b == b) { + continue + } + return false + } + return true +} + +func (b *Block) Logf(msg string, args ...interface{}) { b.Func.Logf(msg, args...) } +func (b *Block) Log() bool { return b.Func.Log() } +func (b *Block) Fatalf(msg string, args ...interface{}) { b.Func.Fatalf(msg, args...) } + +type BranchPrediction int8 + +const ( + BranchUnlikely = BranchPrediction(-1) + BranchUnknown = BranchPrediction(0) + BranchLikely = BranchPrediction(+1) +) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/branchelim.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/branchelim.go new file mode 100644 index 0000000000000000000000000000000000000000..f16959dd572973e62fb46a8ab1a61522b173b1ec --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/branchelim.go @@ -0,0 +1,470 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import "cmd/internal/src" + +// branchelim tries to eliminate branches by +// generating CondSelect instructions. +// +// Search for basic blocks that look like +// +// bb0 bb0 +// | \ / \ +// | bb1 or bb1 bb2 <- trivial if/else blocks +// | / \ / +// bb2 bb3 +// +// where the intermediate blocks are mostly empty (with no side-effects); +// rewrite Phis in the postdominator as CondSelects. +func branchelim(f *Func) { + // FIXME: add support for lowering CondSelects on more architectures + switch f.Config.arch { + case "arm64", "ppc64le", "ppc64", "amd64", "wasm", "loong64": + // implemented + default: + return + } + + // Find all the values used in computing the address of any load. + // Typically these values have operations like AddPtr, Lsh64x64, etc. + loadAddr := f.newSparseSet(f.NumValues()) + defer f.retSparseSet(loadAddr) + for _, b := range f.Blocks { + for _, v := range b.Values { + switch v.Op { + case OpLoad, OpAtomicLoad8, OpAtomicLoad32, OpAtomicLoad64, OpAtomicLoadPtr, OpAtomicLoadAcq32, OpAtomicLoadAcq64: + loadAddr.add(v.Args[0].ID) + case OpMove: + loadAddr.add(v.Args[1].ID) + } + } + } + po := f.postorder() + for { + n := loadAddr.size() + for _, b := range po { + for i := len(b.Values) - 1; i >= 0; i-- { + v := b.Values[i] + if !loadAddr.contains(v.ID) { + continue + } + for _, a := range v.Args { + if a.Type.IsInteger() || a.Type.IsPtr() || a.Type.IsUnsafePtr() { + loadAddr.add(a.ID) + } + } + } + } + if loadAddr.size() == n { + break + } + } + + change := true + for change { + change = false + for _, b := range f.Blocks { + change = elimIf(f, loadAddr, b) || elimIfElse(f, loadAddr, b) || change + } + } +} + +func canCondSelect(v *Value, arch string, loadAddr *sparseSet) bool { + if loadAddr.contains(v.ID) { + // The result of the soon-to-be conditional move is used to compute a load address. + // We want to avoid generating a conditional move in this case + // because the load address would now be data-dependent on the condition. + // Previously it would only be control-dependent on the condition, which is faster + // if the branch predicts well (or possibly even if it doesn't, if the load will + // be an expensive cache miss). + // See issue #26306. + return false + } + if arch == "loong64" { + // We should not generate conditional moves if neither of the arguments is constant zero, + // because it requires three instructions (OR, MASKEQZ, MASKNEZ) and will increase the + // register pressure. + if !(v.Args[0].isGenericIntConst() && v.Args[0].AuxInt == 0) && + !(v.Args[1].isGenericIntConst() && v.Args[1].AuxInt == 0) { + return false + } + } + // For now, stick to simple scalars that fit in registers + switch { + case v.Type.Size() > v.Block.Func.Config.RegSize: + return false + case v.Type.IsPtrShaped(): + return true + case v.Type.IsInteger(): + if arch == "amd64" && v.Type.Size() < 2 { + // amd64 doesn't support CMOV with byte registers + return false + } + return true + default: + return false + } +} + +// elimIf converts the one-way branch starting at dom in f to a conditional move if possible. +// loadAddr is a set of values which are used to compute the address of a load. +// Those values are exempt from CMOV generation. +func elimIf(f *Func, loadAddr *sparseSet, dom *Block) bool { + // See if dom is an If with one arm that + // is trivial and succeeded by the other + // successor of dom. + if dom.Kind != BlockIf || dom.Likely != BranchUnknown { + return false + } + var simple, post *Block + for i := range dom.Succs { + bb, other := dom.Succs[i].Block(), dom.Succs[i^1].Block() + if isLeafPlain(bb) && bb.Succs[0].Block() == other { + simple = bb + post = other + break + } + } + if simple == nil || len(post.Preds) != 2 || post == dom { + return false + } + + // We've found our diamond CFG of blocks. + // Now decide if fusing 'simple' into dom+post + // looks profitable. + + // Check that there are Phis, and that all of them + // can be safely rewritten to CondSelect. + hasphis := false + for _, v := range post.Values { + if v.Op == OpPhi { + hasphis = true + if !canCondSelect(v, f.Config.arch, loadAddr) { + return false + } + } + } + if !hasphis { + return false + } + + // Pick some upper bound for the number of instructions + // we'd be willing to execute just to generate a dead + // argument to CondSelect. In the worst case, this is + // the number of useless instructions executed. + const maxfuseinsts = 2 + + if len(simple.Values) > maxfuseinsts || !canSpeculativelyExecute(simple) { + return false + } + + // Replace Phi instructions in b with CondSelect instructions + swap := (post.Preds[0].Block() == dom) != (dom.Succs[0].Block() == post) + for _, v := range post.Values { + if v.Op != OpPhi { + continue + } + v.Op = OpCondSelect + if swap { + v.Args[0], v.Args[1] = v.Args[1], v.Args[0] + } + v.AddArg(dom.Controls[0]) + } + + // Put all of the instructions into 'dom' + // and update the CFG appropriately. + dom.Kind = post.Kind + dom.CopyControls(post) + dom.Aux = post.Aux + dom.Succs = append(dom.Succs[:0], post.Succs...) + for i := range dom.Succs { + e := dom.Succs[i] + e.b.Preds[e.i].b = dom + } + + // Try really hard to preserve statement marks attached to blocks. + simplePos := simple.Pos + postPos := post.Pos + simpleStmt := simplePos.IsStmt() == src.PosIsStmt + postStmt := postPos.IsStmt() == src.PosIsStmt + + for _, v := range simple.Values { + v.Block = dom + } + for _, v := range post.Values { + v.Block = dom + } + + // findBlockPos determines if b contains a stmt-marked value + // that has the same line number as the Pos for b itself. + // (i.e. is the position on b actually redundant?) + findBlockPos := func(b *Block) bool { + pos := b.Pos + for _, v := range b.Values { + // See if there is a stmt-marked value already that matches simple.Pos (and perhaps post.Pos) + if pos.SameFileAndLine(v.Pos) && v.Pos.IsStmt() == src.PosIsStmt { + return true + } + } + return false + } + if simpleStmt { + simpleStmt = !findBlockPos(simple) + if !simpleStmt && simplePos.SameFileAndLine(postPos) { + postStmt = false + } + + } + if postStmt { + postStmt = !findBlockPos(post) + } + + // If simpleStmt and/or postStmt are still true, then try harder + // to find the corresponding statement marks new homes. + + // setBlockPos determines if b contains a can-be-statement value + // that has the same line number as the Pos for b itself, and + // puts a statement mark on it, and returns whether it succeeded + // in this operation. + setBlockPos := func(b *Block) bool { + pos := b.Pos + for _, v := range b.Values { + if pos.SameFileAndLine(v.Pos) && !isPoorStatementOp(v.Op) { + v.Pos = v.Pos.WithIsStmt() + return true + } + } + return false + } + // If necessary and possible, add a mark to a value in simple + if simpleStmt { + if setBlockPos(simple) && simplePos.SameFileAndLine(postPos) { + postStmt = false + } + } + // If necessary and possible, add a mark to a value in post + if postStmt { + postStmt = !setBlockPos(post) + } + + // Before giving up (this was added because it helps), try the end of "dom", and if that is not available, + // try the values in the successor block if it is uncomplicated. + if postStmt { + if dom.Pos.IsStmt() != src.PosIsStmt { + dom.Pos = postPos + } else { + // Try the successor block + if len(dom.Succs) == 1 && len(dom.Succs[0].Block().Preds) == 1 { + succ := dom.Succs[0].Block() + for _, v := range succ.Values { + if isPoorStatementOp(v.Op) { + continue + } + if postPos.SameFileAndLine(v.Pos) { + v.Pos = v.Pos.WithIsStmt() + } + postStmt = false + break + } + // If postStmt still true, tag the block itself if possible + if postStmt && succ.Pos.IsStmt() != src.PosIsStmt { + succ.Pos = postPos + } + } + } + } + + dom.Values = append(dom.Values, simple.Values...) + dom.Values = append(dom.Values, post.Values...) + + // Trash 'post' and 'simple' + clobberBlock(post) + clobberBlock(simple) + + f.invalidateCFG() + return true +} + +// is this a BlockPlain with one predecessor? +func isLeafPlain(b *Block) bool { + return b.Kind == BlockPlain && len(b.Preds) == 1 +} + +func clobberBlock(b *Block) { + b.Values = nil + b.Preds = nil + b.Succs = nil + b.Aux = nil + b.ResetControls() + b.Likely = BranchUnknown + b.Kind = BlockInvalid +} + +// elimIfElse converts the two-way branch starting at dom in f to a conditional move if possible. +// loadAddr is a set of values which are used to compute the address of a load. +// Those values are exempt from CMOV generation. +func elimIfElse(f *Func, loadAddr *sparseSet, b *Block) bool { + // See if 'b' ends in an if/else: it should + // have two successors, both of which are BlockPlain + // and succeeded by the same block. + if b.Kind != BlockIf || b.Likely != BranchUnknown { + return false + } + yes, no := b.Succs[0].Block(), b.Succs[1].Block() + if !isLeafPlain(yes) || len(yes.Values) > 1 || !canSpeculativelyExecute(yes) { + return false + } + if !isLeafPlain(no) || len(no.Values) > 1 || !canSpeculativelyExecute(no) { + return false + } + if b.Succs[0].Block().Succs[0].Block() != b.Succs[1].Block().Succs[0].Block() { + return false + } + // block that postdominates the if/else + post := b.Succs[0].Block().Succs[0].Block() + if len(post.Preds) != 2 || post == b { + return false + } + hasphis := false + for _, v := range post.Values { + if v.Op == OpPhi { + hasphis = true + if !canCondSelect(v, f.Config.arch, loadAddr) { + return false + } + } + } + if !hasphis { + return false + } + + // Don't generate CondSelects if branch is cheaper. + if !shouldElimIfElse(no, yes, post, f.Config.arch) { + return false + } + + // now we're committed: rewrite each Phi as a CondSelect + swap := post.Preds[0].Block() != b.Succs[0].Block() + for _, v := range post.Values { + if v.Op != OpPhi { + continue + } + v.Op = OpCondSelect + if swap { + v.Args[0], v.Args[1] = v.Args[1], v.Args[0] + } + v.AddArg(b.Controls[0]) + } + + // Move the contents of all of these + // blocks into 'b' and update CFG edges accordingly + b.Kind = post.Kind + b.CopyControls(post) + b.Aux = post.Aux + b.Succs = append(b.Succs[:0], post.Succs...) + for i := range b.Succs { + e := b.Succs[i] + e.b.Preds[e.i].b = b + } + for i := range post.Values { + post.Values[i].Block = b + } + for i := range yes.Values { + yes.Values[i].Block = b + } + for i := range no.Values { + no.Values[i].Block = b + } + b.Values = append(b.Values, yes.Values...) + b.Values = append(b.Values, no.Values...) + b.Values = append(b.Values, post.Values...) + + // trash post, yes, and no + clobberBlock(yes) + clobberBlock(no) + clobberBlock(post) + + f.invalidateCFG() + return true +} + +// shouldElimIfElse reports whether estimated cost of eliminating branch +// is lower than threshold. +func shouldElimIfElse(no, yes, post *Block, arch string) bool { + switch arch { + default: + return true + case "amd64": + const maxcost = 2 + phi := 0 + other := 0 + for _, v := range post.Values { + if v.Op == OpPhi { + // Each phi results in CondSelect, which lowers into CMOV, + // CMOV has latency >1 on most CPUs. + phi++ + } + for _, x := range v.Args { + if x.Block == no || x.Block == yes { + other++ + } + } + } + cost := phi * 1 + if phi > 1 { + // If we have more than 1 phi and some values in post have args + // in yes or no blocks, we may have to recalculate condition, because + // those args may clobber flags. For now assume that all operations clobber flags. + cost += other * 1 + } + return cost < maxcost + } +} + +// canSpeculativelyExecute reports whether every value in the block can +// be evaluated without causing any observable side effects (memory +// accesses, panics and so on) except for execution time changes. It +// also ensures that the block does not contain any phis which we can't +// speculatively execute. +// Warning: this function cannot currently detect values that represent +// instructions the execution of which need to be guarded with CPU +// hardware feature checks. See issue #34950. +func canSpeculativelyExecute(b *Block) bool { + // don't fuse memory ops, Phi ops, divides (can panic), + // or anything else with side-effects + for _, v := range b.Values { + if v.Op == OpPhi || isDivMod(v.Op) || isPtrArithmetic(v.Op) || v.Type.IsMemory() || + v.MemoryArg() != nil || opcodeTable[v.Op].hasSideEffects { + return false + } + } + return true +} + +func isDivMod(op Op) bool { + switch op { + case OpDiv8, OpDiv8u, OpDiv16, OpDiv16u, + OpDiv32, OpDiv32u, OpDiv64, OpDiv64u, OpDiv128u, + OpDiv32F, OpDiv64F, + OpMod8, OpMod8u, OpMod16, OpMod16u, + OpMod32, OpMod32u, OpMod64, OpMod64u: + return true + default: + return false + } +} + +func isPtrArithmetic(op Op) bool { + // Pointer arithmetic can't be speculatively executed because the result + // may be an invalid pointer (if, for example, the condition is that the + // base pointer is not nil). See issue 56990. + switch op { + case OpOffPtr, OpAddPtr, OpSubPtr: + return true + default: + return false + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/branchelim_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/branchelim_test.go new file mode 100644 index 0000000000000000000000000000000000000000..20fa84d63ae53ba252d8eee10fd58bc2847545a0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/branchelim_test.go @@ -0,0 +1,172 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/types" + "testing" +) + +// Test that a trivial 'if' is eliminated +func TestBranchElimIf(t *testing.T) { + var testData = []struct { + arch string + intType string + ok bool + }{ + {"arm64", "int32", true}, + {"amd64", "int32", true}, + {"amd64", "int8", false}, + } + + for _, data := range testData { + t.Run(data.arch+"/"+data.intType, func(t *testing.T) { + c := testConfigArch(t, data.arch) + boolType := c.config.Types.Bool + var intType *types.Type + switch data.intType { + case "int32": + intType = c.config.Types.Int32 + case "int8": + intType = c.config.Types.Int8 + default: + t.Fatal("invalid integer type:", data.intType) + } + fun := c.Fun("entry", + Bloc("entry", + Valu("start", OpInitMem, types.TypeMem, 0, nil), + Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil), + Valu("const1", OpConst32, intType, 1, nil), + Valu("const2", OpConst32, intType, 2, nil), + Valu("addr", OpAddr, boolType.PtrTo(), 0, nil, "sb"), + Valu("cond", OpLoad, boolType, 0, nil, "addr", "start"), + If("cond", "b2", "b3")), + Bloc("b2", + Goto("b3")), + Bloc("b3", + Valu("phi", OpPhi, intType, 0, nil, "const1", "const2"), + Valu("retstore", OpStore, types.TypeMem, 0, nil, "phi", "sb", "start"), + Exit("retstore"))) + + CheckFunc(fun.f) + branchelim(fun.f) + CheckFunc(fun.f) + Deadcode(fun.f) + CheckFunc(fun.f) + + if data.ok { + + if len(fun.f.Blocks) != 1 { + t.Fatalf("expected 1 block after branchelim and deadcode; found %d", len(fun.f.Blocks)) + } + if fun.values["phi"].Op != OpCondSelect { + t.Fatalf("expected phi op to be CondSelect; found op %s", fun.values["phi"].Op) + } + if fun.values["phi"].Args[2] != fun.values["cond"] { + t.Errorf("expected CondSelect condition to be %s; found %s", fun.values["cond"], fun.values["phi"].Args[2]) + } + if fun.blocks["entry"].Kind != BlockExit { + t.Errorf("expected entry to be BlockExit; found kind %s", fun.blocks["entry"].Kind.String()) + } + } else { + if len(fun.f.Blocks) != 3 { + t.Fatalf("expected 3 block after branchelim and deadcode; found %d", len(fun.f.Blocks)) + } + } + }) + } +} + +// Test that a trivial if/else is eliminated +func TestBranchElimIfElse(t *testing.T) { + for _, arch := range []string{"arm64", "amd64"} { + t.Run(arch, func(t *testing.T) { + c := testConfigArch(t, arch) + boolType := c.config.Types.Bool + intType := c.config.Types.Int32 + fun := c.Fun("entry", + Bloc("entry", + Valu("start", OpInitMem, types.TypeMem, 0, nil), + Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil), + Valu("const1", OpConst32, intType, 1, nil), + Valu("const2", OpConst32, intType, 2, nil), + Valu("addr", OpAddr, boolType.PtrTo(), 0, nil, "sb"), + Valu("cond", OpLoad, boolType, 0, nil, "addr", "start"), + If("cond", "b2", "b3")), + Bloc("b2", + Goto("b4")), + Bloc("b3", + Goto("b4")), + Bloc("b4", + Valu("phi", OpPhi, intType, 0, nil, "const1", "const2"), + Valu("retstore", OpStore, types.TypeMem, 0, nil, "phi", "sb", "start"), + Exit("retstore"))) + + CheckFunc(fun.f) + branchelim(fun.f) + CheckFunc(fun.f) + Deadcode(fun.f) + CheckFunc(fun.f) + + if len(fun.f.Blocks) != 1 { + t.Fatalf("expected 1 block after branchelim; found %d", len(fun.f.Blocks)) + } + if fun.values["phi"].Op != OpCondSelect { + t.Fatalf("expected phi op to be CondSelect; found op %s", fun.values["phi"].Op) + } + if fun.values["phi"].Args[2] != fun.values["cond"] { + t.Errorf("expected CondSelect condition to be %s; found %s", fun.values["cond"], fun.values["phi"].Args[2]) + } + if fun.blocks["entry"].Kind != BlockExit { + t.Errorf("expected entry to be BlockExit; found kind %s", fun.blocks["entry"].Kind.String()) + } + }) + } +} + +// Test that an if/else CFG that loops back +// into itself does *not* get eliminated. +func TestNoBranchElimLoop(t *testing.T) { + for _, arch := range []string{"arm64", "amd64"} { + t.Run(arch, func(t *testing.T) { + c := testConfigArch(t, arch) + boolType := c.config.Types.Bool + intType := c.config.Types.Int32 + + // The control flow here is totally bogus, + // but a dead cycle seems like the only plausible + // way to arrive at a diamond CFG that is also a loop. + fun := c.Fun("entry", + Bloc("entry", + Valu("start", OpInitMem, types.TypeMem, 0, nil), + Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil), + Valu("const2", OpConst32, intType, 2, nil), + Valu("const3", OpConst32, intType, 3, nil), + Goto("b5")), + Bloc("b2", + Valu("addr", OpAddr, boolType.PtrTo(), 0, nil, "sb"), + Valu("cond", OpLoad, boolType, 0, nil, "addr", "start"), + Valu("phi", OpPhi, intType, 0, nil, "const2", "const3"), + If("cond", "b3", "b4")), + Bloc("b3", + Goto("b2")), + Bloc("b4", + Goto("b2")), + Bloc("b5", + Exit("start"))) + + CheckFunc(fun.f) + branchelim(fun.f) + CheckFunc(fun.f) + + if len(fun.f.Blocks) != 5 { + t.Errorf("expected 5 block after branchelim; found %d", len(fun.f.Blocks)) + } + if fun.values["phi"].Op != OpPhi { + t.Errorf("expected phi op to be CondSelect; found op %s", fun.values["phi"].Op) + } + }) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/cache.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/cache.go new file mode 100644 index 0000000000000000000000000000000000000000..ba36edd3906a0fd40db386d35cd41733059c32ef --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/cache.go @@ -0,0 +1,62 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/internal/obj" + "sort" +) + +// A Cache holds reusable compiler state. +// It is intended to be re-used for multiple Func compilations. +type Cache struct { + // Storage for low-numbered values and blocks. + values [2000]Value + blocks [200]Block + locs [2000]Location + + // Reusable stackAllocState. + // See stackalloc.go's {new,put}StackAllocState. + stackAllocState *stackAllocState + + scrPoset []*poset // scratch poset to be reused + + // Reusable regalloc state. + regallocValues []valState + + ValueToProgAfter []*obj.Prog + debugState debugState + + Liveness interface{} // *gc.livenessFuncCache + + // Free "headers" for use by the allocators in allocators.go. + // Used to put slices in sync.Pools without allocation. + hdrValueSlice []*[]*Value + hdrInt64Slice []*[]int64 +} + +func (c *Cache) Reset() { + nv := sort.Search(len(c.values), func(i int) bool { return c.values[i].ID == 0 }) + xv := c.values[:nv] + for i := range xv { + xv[i] = Value{} + } + nb := sort.Search(len(c.blocks), func(i int) bool { return c.blocks[i].ID == 0 }) + xb := c.blocks[:nb] + for i := range xb { + xb[i] = Block{} + } + nl := sort.Search(len(c.locs), func(i int) bool { return c.locs[i] == nil }) + xl := c.locs[:nl] + for i := range xl { + xl[i] = nil + } + + // regalloc sets the length of c.regallocValues to whatever it may use, + // so clear according to length. + for i := range c.regallocValues { + c.regallocValues[i] = valState{} + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/check.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/check.go new file mode 100644 index 0000000000000000000000000000000000000000..bbfdaceaad90b0f966ce63587eaa0ff0b63dccc1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/check.go @@ -0,0 +1,630 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/ir" + "cmd/internal/obj/s390x" + "math" + "math/bits" +) + +// checkFunc checks invariants of f. +func checkFunc(f *Func) { + blockMark := make([]bool, f.NumBlocks()) + valueMark := make([]bool, f.NumValues()) + + for _, b := range f.Blocks { + if blockMark[b.ID] { + f.Fatalf("block %s appears twice in %s!", b, f.Name) + } + blockMark[b.ID] = true + if b.Func != f { + f.Fatalf("%s.Func=%s, want %s", b, b.Func.Name, f.Name) + } + + for i, e := range b.Preds { + if se := e.b.Succs[e.i]; se.b != b || se.i != i { + f.Fatalf("block pred/succ not crosslinked correctly %d:%s %d:%s", i, b, se.i, se.b) + } + } + for i, e := range b.Succs { + if pe := e.b.Preds[e.i]; pe.b != b || pe.i != i { + f.Fatalf("block succ/pred not crosslinked correctly %d:%s %d:%s", i, b, pe.i, pe.b) + } + } + + switch b.Kind { + case BlockExit: + if len(b.Succs) != 0 { + f.Fatalf("exit block %s has successors", b) + } + if b.NumControls() != 1 { + f.Fatalf("exit block %s has no control value", b) + } + if !b.Controls[0].Type.IsMemory() { + f.Fatalf("exit block %s has non-memory control value %s", b, b.Controls[0].LongString()) + } + case BlockRet: + if len(b.Succs) != 0 { + f.Fatalf("ret block %s has successors", b) + } + if b.NumControls() != 1 { + f.Fatalf("ret block %s has nil control", b) + } + if !b.Controls[0].Type.IsMemory() { + f.Fatalf("ret block %s has non-memory control value %s", b, b.Controls[0].LongString()) + } + case BlockRetJmp: + if len(b.Succs) != 0 { + f.Fatalf("retjmp block %s len(Succs)==%d, want 0", b, len(b.Succs)) + } + if b.NumControls() != 1 { + f.Fatalf("retjmp block %s has nil control", b) + } + if !b.Controls[0].Type.IsMemory() { + f.Fatalf("retjmp block %s has non-memory control value %s", b, b.Controls[0].LongString()) + } + case BlockPlain: + if len(b.Succs) != 1 { + f.Fatalf("plain block %s len(Succs)==%d, want 1", b, len(b.Succs)) + } + if b.NumControls() != 0 { + f.Fatalf("plain block %s has non-nil control %s", b, b.Controls[0].LongString()) + } + case BlockIf: + if len(b.Succs) != 2 { + f.Fatalf("if block %s len(Succs)==%d, want 2", b, len(b.Succs)) + } + if b.NumControls() != 1 { + f.Fatalf("if block %s has no control value", b) + } + if !b.Controls[0].Type.IsBoolean() { + f.Fatalf("if block %s has non-bool control value %s", b, b.Controls[0].LongString()) + } + case BlockDefer: + if len(b.Succs) != 2 { + f.Fatalf("defer block %s len(Succs)==%d, want 2", b, len(b.Succs)) + } + if b.NumControls() != 1 { + f.Fatalf("defer block %s has no control value", b) + } + if !b.Controls[0].Type.IsMemory() { + f.Fatalf("defer block %s has non-memory control value %s", b, b.Controls[0].LongString()) + } + case BlockFirst: + if len(b.Succs) != 2 { + f.Fatalf("plain/dead block %s len(Succs)==%d, want 2", b, len(b.Succs)) + } + if b.NumControls() != 0 { + f.Fatalf("plain/dead block %s has a control value", b) + } + case BlockJumpTable: + if b.NumControls() != 1 { + f.Fatalf("jumpTable block %s has no control value", b) + } + } + if len(b.Succs) != 2 && b.Likely != BranchUnknown { + f.Fatalf("likeliness prediction %d for block %s with %d successors", b.Likely, b, len(b.Succs)) + } + + for _, v := range b.Values { + // Check to make sure argument count makes sense (argLen of -1 indicates + // variable length args) + nArgs := opcodeTable[v.Op].argLen + if nArgs != -1 && int32(len(v.Args)) != nArgs { + f.Fatalf("value %s has %d args, expected %d", v.LongString(), + len(v.Args), nArgs) + } + + // Check to make sure aux values make sense. + canHaveAux := false + canHaveAuxInt := false + // TODO: enforce types of Aux in this switch (like auxString does below) + switch opcodeTable[v.Op].auxType { + case auxNone: + case auxBool: + if v.AuxInt < 0 || v.AuxInt > 1 { + f.Fatalf("bad bool AuxInt value for %v", v) + } + canHaveAuxInt = true + case auxInt8: + if v.AuxInt != int64(int8(v.AuxInt)) { + f.Fatalf("bad int8 AuxInt value for %v", v) + } + canHaveAuxInt = true + case auxInt16: + if v.AuxInt != int64(int16(v.AuxInt)) { + f.Fatalf("bad int16 AuxInt value for %v", v) + } + canHaveAuxInt = true + case auxInt32: + if v.AuxInt != int64(int32(v.AuxInt)) { + f.Fatalf("bad int32 AuxInt value for %v", v) + } + canHaveAuxInt = true + case auxInt64, auxARM64BitField: + canHaveAuxInt = true + case auxInt128: + // AuxInt must be zero, so leave canHaveAuxInt set to false. + case auxUInt8: + if v.AuxInt != int64(uint8(v.AuxInt)) { + f.Fatalf("bad uint8 AuxInt value for %v", v) + } + canHaveAuxInt = true + case auxFloat32: + canHaveAuxInt = true + if math.IsNaN(v.AuxFloat()) { + f.Fatalf("value %v has an AuxInt that encodes a NaN", v) + } + if !isExactFloat32(v.AuxFloat()) { + f.Fatalf("value %v has an AuxInt value that is not an exact float32", v) + } + case auxFloat64: + canHaveAuxInt = true + if math.IsNaN(v.AuxFloat()) { + f.Fatalf("value %v has an AuxInt that encodes a NaN", v) + } + case auxString: + if _, ok := v.Aux.(stringAux); !ok { + f.Fatalf("value %v has Aux type %T, want string", v, v.Aux) + } + canHaveAux = true + case auxCallOff: + canHaveAuxInt = true + fallthrough + case auxCall: + if ac, ok := v.Aux.(*AuxCall); ok { + if v.Op == OpStaticCall && ac.Fn == nil { + f.Fatalf("value %v has *AuxCall with nil Fn", v) + } + } else { + f.Fatalf("value %v has Aux type %T, want *AuxCall", v, v.Aux) + } + canHaveAux = true + case auxNameOffsetInt8: + if _, ok := v.Aux.(*AuxNameOffset); !ok { + f.Fatalf("value %v has Aux type %T, want *AuxNameOffset", v, v.Aux) + } + canHaveAux = true + canHaveAuxInt = true + case auxSym, auxTyp: + canHaveAux = true + case auxSymOff, auxSymValAndOff, auxTypSize: + canHaveAuxInt = true + canHaveAux = true + case auxCCop: + if opcodeTable[Op(v.AuxInt)].name == "OpInvalid" { + f.Fatalf("value %v has an AuxInt value that is a valid opcode", v) + } + canHaveAuxInt = true + case auxS390XCCMask: + if _, ok := v.Aux.(s390x.CCMask); !ok { + f.Fatalf("bad type %T for S390XCCMask in %v", v.Aux, v) + } + canHaveAux = true + case auxS390XRotateParams: + if _, ok := v.Aux.(s390x.RotateParams); !ok { + f.Fatalf("bad type %T for S390XRotateParams in %v", v.Aux, v) + } + canHaveAux = true + case auxFlagConstant: + if v.AuxInt < 0 || v.AuxInt > 15 { + f.Fatalf("bad FlagConstant AuxInt value for %v", v) + } + canHaveAuxInt = true + default: + f.Fatalf("unknown aux type for %s", v.Op) + } + if !canHaveAux && v.Aux != nil { + f.Fatalf("value %s has an Aux value %v but shouldn't", v.LongString(), v.Aux) + } + if !canHaveAuxInt && v.AuxInt != 0 { + f.Fatalf("value %s has an AuxInt value %d but shouldn't", v.LongString(), v.AuxInt) + } + + for i, arg := range v.Args { + if arg == nil { + f.Fatalf("value %s has nil arg", v.LongString()) + } + if v.Op != OpPhi { + // For non-Phi ops, memory args must be last, if present + if arg.Type.IsMemory() && i != len(v.Args)-1 { + f.Fatalf("value %s has non-final memory arg (%d < %d)", v.LongString(), i, len(v.Args)-1) + } + } + } + + if valueMark[v.ID] { + f.Fatalf("value %s appears twice!", v.LongString()) + } + valueMark[v.ID] = true + + if v.Block != b { + f.Fatalf("%s.block != %s", v, b) + } + if v.Op == OpPhi && len(v.Args) != len(b.Preds) { + f.Fatalf("phi length %s does not match pred length %d for block %s", v.LongString(), len(b.Preds), b) + } + + if v.Op == OpAddr { + if len(v.Args) == 0 { + f.Fatalf("no args for OpAddr %s", v.LongString()) + } + if v.Args[0].Op != OpSB { + f.Fatalf("bad arg to OpAddr %v", v) + } + } + + if v.Op == OpLocalAddr { + if len(v.Args) != 2 { + f.Fatalf("wrong # of args for OpLocalAddr %s", v.LongString()) + } + if v.Args[0].Op != OpSP { + f.Fatalf("bad arg 0 to OpLocalAddr %v", v) + } + if !v.Args[1].Type.IsMemory() { + f.Fatalf("bad arg 1 to OpLocalAddr %v", v) + } + } + + if f.RegAlloc != nil && f.Config.SoftFloat && v.Type.IsFloat() { + f.Fatalf("unexpected floating-point type %v", v.LongString()) + } + + // Check types. + // TODO: more type checks? + switch c := f.Config; v.Op { + case OpSP, OpSB: + if v.Type != c.Types.Uintptr { + f.Fatalf("bad %s type: want uintptr, have %s", + v.Op, v.Type.String()) + } + case OpStringLen: + if v.Type != c.Types.Int { + f.Fatalf("bad %s type: want int, have %s", + v.Op, v.Type.String()) + } + case OpLoad: + if !v.Args[1].Type.IsMemory() { + f.Fatalf("bad arg 1 type to %s: want mem, have %s", + v.Op, v.Args[1].Type.String()) + } + case OpStore: + if !v.Type.IsMemory() { + f.Fatalf("bad %s type: want mem, have %s", + v.Op, v.Type.String()) + } + if !v.Args[2].Type.IsMemory() { + f.Fatalf("bad arg 2 type to %s: want mem, have %s", + v.Op, v.Args[2].Type.String()) + } + case OpCondSelect: + if !v.Args[2].Type.IsBoolean() { + f.Fatalf("bad arg 2 type to %s: want boolean, have %s", + v.Op, v.Args[2].Type.String()) + } + case OpAddPtr: + if !v.Args[0].Type.IsPtrShaped() && v.Args[0].Type != c.Types.Uintptr { + f.Fatalf("bad arg 0 type to %s: want ptr, have %s", v.Op, v.Args[0].LongString()) + } + if !v.Args[1].Type.IsInteger() { + f.Fatalf("bad arg 1 type to %s: want integer, have %s", v.Op, v.Args[1].LongString()) + } + case OpVarDef: + if !v.Aux.(*ir.Name).Type().HasPointers() { + f.Fatalf("vardef must have pointer type %s", v.Aux.(*ir.Name).Type().String()) + } + case OpNilCheck: + // nil checks have pointer type before scheduling, and + // void type after scheduling. + if f.scheduled { + if v.Uses != 0 { + f.Fatalf("nilcheck must have 0 uses %s", v.Uses) + } + if !v.Type.IsVoid() { + f.Fatalf("nilcheck must have void type %s", v.Type.String()) + } + } else { + if !v.Type.IsPtrShaped() && !v.Type.IsUintptr() { + f.Fatalf("nilcheck must have pointer type %s", v.Type.String()) + } + } + if !v.Args[0].Type.IsPtrShaped() && !v.Args[0].Type.IsUintptr() { + f.Fatalf("nilcheck must have argument of pointer type %s", v.Args[0].Type.String()) + } + if !v.Args[1].Type.IsMemory() { + f.Fatalf("bad arg 1 type to %s: want mem, have %s", + v.Op, v.Args[1].Type.String()) + } + } + + // TODO: check for cycles in values + } + } + + // Check to make sure all Blocks referenced are in the function. + if !blockMark[f.Entry.ID] { + f.Fatalf("entry block %v is missing", f.Entry) + } + for _, b := range f.Blocks { + for _, c := range b.Preds { + if !blockMark[c.b.ID] { + f.Fatalf("predecessor block %v for %v is missing", c, b) + } + } + for _, c := range b.Succs { + if !blockMark[c.b.ID] { + f.Fatalf("successor block %v for %v is missing", c, b) + } + } + } + + if len(f.Entry.Preds) > 0 { + f.Fatalf("entry block %s of %s has predecessor(s) %v", f.Entry, f.Name, f.Entry.Preds) + } + + // Check to make sure all Values referenced are in the function. + for _, b := range f.Blocks { + for _, v := range b.Values { + for i, a := range v.Args { + if !valueMark[a.ID] { + f.Fatalf("%v, arg %d of %s, is missing", a, i, v.LongString()) + } + } + } + for _, c := range b.ControlValues() { + if !valueMark[c.ID] { + f.Fatalf("control value for %s is missing: %v", b, c) + } + } + } + for b := f.freeBlocks; b != nil; b = b.succstorage[0].b { + if blockMark[b.ID] { + f.Fatalf("used block b%d in free list", b.ID) + } + } + for v := f.freeValues; v != nil; v = v.argstorage[0] { + if valueMark[v.ID] { + f.Fatalf("used value v%d in free list", v.ID) + } + } + + // Check to make sure all args dominate uses. + if f.RegAlloc == nil { + // Note: regalloc introduces non-dominating args. + // See TODO in regalloc.go. + sdom := f.Sdom() + for _, b := range f.Blocks { + for _, v := range b.Values { + for i, arg := range v.Args { + x := arg.Block + y := b + if v.Op == OpPhi { + y = b.Preds[i].b + } + if !domCheck(f, sdom, x, y) { + f.Fatalf("arg %d of value %s does not dominate, arg=%s", i, v.LongString(), arg.LongString()) + } + } + } + for _, c := range b.ControlValues() { + if !domCheck(f, sdom, c.Block, b) { + f.Fatalf("control value %s for %s doesn't dominate", c, b) + } + } + } + } + + // Check loop construction + if f.RegAlloc == nil && f.pass != nil { // non-nil pass allows better-targeted debug printing + ln := f.loopnest() + if !ln.hasIrreducible { + po := f.postorder() // use po to avoid unreachable blocks. + for _, b := range po { + for _, s := range b.Succs { + bb := s.Block() + if ln.b2l[b.ID] == nil && ln.b2l[bb.ID] != nil && bb != ln.b2l[bb.ID].header { + f.Fatalf("block %s not in loop branches to non-header block %s in loop", b.String(), bb.String()) + } + if ln.b2l[b.ID] != nil && ln.b2l[bb.ID] != nil && bb != ln.b2l[bb.ID].header && !ln.b2l[b.ID].isWithinOrEq(ln.b2l[bb.ID]) { + f.Fatalf("block %s in loop branches to non-header block %s in non-containing loop", b.String(), bb.String()) + } + } + } + } + } + + // Check use counts + uses := make([]int32, f.NumValues()) + for _, b := range f.Blocks { + for _, v := range b.Values { + for _, a := range v.Args { + uses[a.ID]++ + } + } + for _, c := range b.ControlValues() { + uses[c.ID]++ + } + } + for _, b := range f.Blocks { + for _, v := range b.Values { + if v.Uses != uses[v.ID] { + f.Fatalf("%s has %d uses, but has Uses=%d", v, uses[v.ID], v.Uses) + } + } + } + + memCheck(f) +} + +func memCheck(f *Func) { + // Check that if a tuple has a memory type, it is second. + for _, b := range f.Blocks { + for _, v := range b.Values { + if v.Type.IsTuple() && v.Type.FieldType(0).IsMemory() { + f.Fatalf("memory is first in a tuple: %s\n", v.LongString()) + } + } + } + + // Single live memory checks. + // These checks only work if there are no memory copies. + // (Memory copies introduce ambiguity about which mem value is really live. + // probably fixable, but it's easier to avoid the problem.) + // For the same reason, disable this check if some memory ops are unused. + for _, b := range f.Blocks { + for _, v := range b.Values { + if (v.Op == OpCopy || v.Uses == 0) && v.Type.IsMemory() { + return + } + } + if b != f.Entry && len(b.Preds) == 0 { + return + } + } + + // Compute live memory at the end of each block. + lastmem := make([]*Value, f.NumBlocks()) + ss := newSparseSet(f.NumValues()) + for _, b := range f.Blocks { + // Mark overwritten memory values. Those are args of other + // ops that generate memory values. + ss.clear() + for _, v := range b.Values { + if v.Op == OpPhi || !v.Type.IsMemory() { + continue + } + if m := v.MemoryArg(); m != nil { + ss.add(m.ID) + } + } + // There should be at most one remaining unoverwritten memory value. + for _, v := range b.Values { + if !v.Type.IsMemory() { + continue + } + if ss.contains(v.ID) { + continue + } + if lastmem[b.ID] != nil { + f.Fatalf("two live memory values in %s: %s and %s", b, lastmem[b.ID], v) + } + lastmem[b.ID] = v + } + // If there is no remaining memory value, that means there was no memory update. + // Take any memory arg. + if lastmem[b.ID] == nil { + for _, v := range b.Values { + if v.Op == OpPhi { + continue + } + m := v.MemoryArg() + if m == nil { + continue + } + if lastmem[b.ID] != nil && lastmem[b.ID] != m { + f.Fatalf("two live memory values in %s: %s and %s", b, lastmem[b.ID], m) + } + lastmem[b.ID] = m + } + } + } + // Propagate last live memory through storeless blocks. + for { + changed := false + for _, b := range f.Blocks { + if lastmem[b.ID] != nil { + continue + } + for _, e := range b.Preds { + p := e.b + if lastmem[p.ID] != nil { + lastmem[b.ID] = lastmem[p.ID] + changed = true + break + } + } + } + if !changed { + break + } + } + // Check merge points. + for _, b := range f.Blocks { + for _, v := range b.Values { + if v.Op == OpPhi && v.Type.IsMemory() { + for i, a := range v.Args { + if a != lastmem[b.Preds[i].b.ID] { + f.Fatalf("inconsistent memory phi %s %d %s %s", v.LongString(), i, a, lastmem[b.Preds[i].b.ID]) + } + } + } + } + } + + // Check that only one memory is live at any point. + if f.scheduled { + for _, b := range f.Blocks { + var mem *Value // the current live memory in the block + for _, v := range b.Values { + if v.Op == OpPhi { + if v.Type.IsMemory() { + mem = v + } + continue + } + if mem == nil && len(b.Preds) > 0 { + // If no mem phi, take mem of any predecessor. + mem = lastmem[b.Preds[0].b.ID] + } + for _, a := range v.Args { + if a.Type.IsMemory() && a != mem { + f.Fatalf("two live mems @ %s: %s and %s", v, mem, a) + } + } + if v.Type.IsMemory() { + mem = v + } + } + } + } + + // Check that after scheduling, phis are always first in the block. + if f.scheduled { + for _, b := range f.Blocks { + seenNonPhi := false + for _, v := range b.Values { + switch v.Op { + case OpPhi: + if seenNonPhi { + f.Fatalf("phi after non-phi @ %s: %s", b, v) + } + default: + seenNonPhi = true + } + } + } + } +} + +// domCheck reports whether x dominates y (including x==y). +func domCheck(f *Func, sdom SparseTree, x, y *Block) bool { + if !sdom.IsAncestorEq(f.Entry, y) { + // unreachable - ignore + return true + } + return sdom.IsAncestorEq(x, y) +} + +// isExactFloat32 reports whether x can be exactly represented as a float32. +func isExactFloat32(x float64) bool { + // Check the mantissa is in range. + if bits.TrailingZeros64(math.Float64bits(x)) < 52-23 { + return false + } + // Check the exponent is in range. The mantissa check above is sufficient for NaN values. + return math.IsNaN(x) || x == float64(float32(x)) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/checkbce.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/checkbce.go new file mode 100644 index 0000000000000000000000000000000000000000..6a9ce2be0aacab01ae33e81eef7bdf734a1d8446 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/checkbce.go @@ -0,0 +1,35 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import "cmd/compile/internal/logopt" + +// checkbce prints all bounds checks that are present in the function. +// Useful to find regressions. checkbce is only activated when with +// corresponding debug options, so it's off by default. +// See test/checkbce.go +func checkbce(f *Func) { + if f.pass.debug <= 0 && !logopt.Enabled() { + return + } + + for _, b := range f.Blocks { + for _, v := range b.Values { + if v.Op == OpIsInBounds || v.Op == OpIsSliceInBounds { + if f.pass.debug > 0 { + f.Warnl(v.Pos, "Found %v", v.Op) + } + if logopt.Enabled() { + if v.Op == OpIsInBounds { + logopt.LogOpt(v.Pos, "isInBounds", "checkbce", f.Name) + } + if v.Op == OpIsSliceInBounds { + logopt.LogOpt(v.Pos, "isSliceInBounds", "checkbce", f.Name) + } + } + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/compile.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/compile.go new file mode 100644 index 0000000000000000000000000000000000000000..d125891f88c58f798e9cd490ba999b0b2ef88ba8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/compile.go @@ -0,0 +1,613 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/internal/src" + "fmt" + "hash/crc32" + "internal/buildcfg" + "io" + "log" + "math/rand" + "os" + "path/filepath" + "regexp" + "runtime" + "sort" + "strings" + "time" +) + +// Compile is the main entry point for this package. +// Compile modifies f so that on return: +// - all Values in f map to 0 or 1 assembly instructions of the target architecture +// - the order of f.Blocks is the order to emit the Blocks +// - the order of b.Values is the order to emit the Values in each Block +// - f has a non-nil regAlloc field +func Compile(f *Func) { + // TODO: debugging - set flags to control verbosity of compiler, + // which phases to dump IR before/after, etc. + if f.Log() { + f.Logf("compiling %s\n", f.Name) + } + + var rnd *rand.Rand + if checkEnabled { + seed := int64(crc32.ChecksumIEEE(([]byte)(f.Name))) ^ int64(checkRandSeed) + rnd = rand.New(rand.NewSource(seed)) + } + + // hook to print function & phase if panic happens + phaseName := "init" + defer func() { + if phaseName != "" { + err := recover() + stack := make([]byte, 16384) + n := runtime.Stack(stack, false) + stack = stack[:n] + if f.HTMLWriter != nil { + f.HTMLWriter.flushPhases() + } + f.Fatalf("panic during %s while compiling %s:\n\n%v\n\n%s\n", phaseName, f.Name, err, stack) + } + }() + + // Run all the passes + if f.Log() { + printFunc(f) + } + f.HTMLWriter.WritePhase("start", "start") + if BuildDump[f.Name] { + f.dumpFile("build") + } + if checkEnabled { + checkFunc(f) + } + const logMemStats = false + for _, p := range passes { + if !f.Config.optimize && !p.required || p.disabled { + continue + } + f.pass = &p + phaseName = p.name + if f.Log() { + f.Logf(" pass %s begin\n", p.name) + } + // TODO: capture logging during this pass, add it to the HTML + var mStart runtime.MemStats + if logMemStats || p.mem { + runtime.ReadMemStats(&mStart) + } + + if checkEnabled && !f.scheduled { + // Test that we don't depend on the value order, by randomizing + // the order of values in each block. See issue 18169. + for _, b := range f.Blocks { + for i := 0; i < len(b.Values)-1; i++ { + j := i + rnd.Intn(len(b.Values)-i) + b.Values[i], b.Values[j] = b.Values[j], b.Values[i] + } + } + } + + tStart := time.Now() + p.fn(f) + tEnd := time.Now() + + // Need something less crude than "Log the whole intermediate result". + if f.Log() || f.HTMLWriter != nil { + time := tEnd.Sub(tStart).Nanoseconds() + var stats string + if logMemStats { + var mEnd runtime.MemStats + runtime.ReadMemStats(&mEnd) + nBytes := mEnd.TotalAlloc - mStart.TotalAlloc + nAllocs := mEnd.Mallocs - mStart.Mallocs + stats = fmt.Sprintf("[%d ns %d allocs %d bytes]", time, nAllocs, nBytes) + } else { + stats = fmt.Sprintf("[%d ns]", time) + } + + if f.Log() { + f.Logf(" pass %s end %s\n", p.name, stats) + printFunc(f) + } + f.HTMLWriter.WritePhase(phaseName, fmt.Sprintf("%s %s", phaseName, stats)) + } + if p.time || p.mem { + // Surround timing information w/ enough context to allow comparisons. + time := tEnd.Sub(tStart).Nanoseconds() + if p.time { + f.LogStat("TIME(ns)", time) + } + if p.mem { + var mEnd runtime.MemStats + runtime.ReadMemStats(&mEnd) + nBytes := mEnd.TotalAlloc - mStart.TotalAlloc + nAllocs := mEnd.Mallocs - mStart.Mallocs + f.LogStat("TIME(ns):BYTES:ALLOCS", time, nBytes, nAllocs) + } + } + if p.dump != nil && p.dump[f.Name] { + // Dump function to appropriately named file + f.dumpFile(phaseName) + } + if checkEnabled { + checkFunc(f) + } + } + + if f.HTMLWriter != nil { + // Ensure we write any pending phases to the html + f.HTMLWriter.flushPhases() + } + + if f.ruleMatches != nil { + var keys []string + for key := range f.ruleMatches { + keys = append(keys, key) + } + sort.Strings(keys) + buf := new(strings.Builder) + fmt.Fprintf(buf, "%s: ", f.Name) + for _, key := range keys { + fmt.Fprintf(buf, "%s=%d ", key, f.ruleMatches[key]) + } + fmt.Fprint(buf, "\n") + fmt.Print(buf.String()) + } + + // Squash error printing defer + phaseName = "" +} + +// DumpFileForPhase creates a file from the function name and phase name, +// warning and returning nil if this is not possible. +func (f *Func) DumpFileForPhase(phaseName string) io.WriteCloser { + f.dumpFileSeq++ + fname := fmt.Sprintf("%s_%02d__%s.dump", f.Name, int(f.dumpFileSeq), phaseName) + fname = strings.Replace(fname, " ", "_", -1) + fname = strings.Replace(fname, "/", "_", -1) + fname = strings.Replace(fname, ":", "_", -1) + + if ssaDir := os.Getenv("GOSSADIR"); ssaDir != "" { + fname = filepath.Join(ssaDir, fname) + } + + fi, err := os.Create(fname) + if err != nil { + f.Warnl(src.NoXPos, "Unable to create after-phase dump file %s", fname) + return nil + } + return fi +} + +// dumpFile creates a file from the phase name and function name +// Dumping is done to files to avoid buffering huge strings before +// output. +func (f *Func) dumpFile(phaseName string) { + fi := f.DumpFileForPhase(phaseName) + if fi != nil { + p := stringFuncPrinter{w: fi} + fprintFunc(p, f) + fi.Close() + } +} + +type pass struct { + name string + fn func(*Func) + required bool + disabled bool + time bool // report time to run pass + mem bool // report mem stats to run pass + stats int // pass reports own "stats" (e.g., branches removed) + debug int // pass performs some debugging. =1 should be in error-testing-friendly Warnl format. + test int // pass-specific ad-hoc option, perhaps useful in development + dump map[string]bool // dump if function name matches +} + +func (p *pass) addDump(s string) { + if p.dump == nil { + p.dump = make(map[string]bool) + } + p.dump[s] = true +} + +func (p *pass) String() string { + if p == nil { + return "nil pass" + } + return p.name +} + +// Run consistency checker between each phase +var ( + checkEnabled = false + checkRandSeed = 0 +) + +// Debug output +var IntrinsicsDebug int +var IntrinsicsDisable bool + +var BuildDebug int +var BuildTest int +var BuildStats int +var BuildDump map[string]bool = make(map[string]bool) // names of functions to dump after initial build of ssa + +var GenssaDump map[string]bool = make(map[string]bool) // names of functions to dump after ssa has been converted to asm + +// PhaseOption sets the specified flag in the specified ssa phase, +// returning empty string if this was successful or a string explaining +// the error if it was not. +// A version of the phase name with "_" replaced by " " is also checked for a match. +// If the phase name begins a '~' then the rest of the underscores-replaced-with-blanks +// version is used as a regular expression to match the phase name(s). +// +// Special cases that have turned out to be useful: +// - ssa/check/on enables checking after each phase +// - ssa/all/time enables time reporting for all phases +// +// See gc/lex.go for dissection of the option string. +// Example uses: +// +// GO_GCFLAGS=-d=ssa/generic_cse/time,ssa/generic_cse/stats,ssa/generic_cse/debug=3 ./make.bash +// +// BOOT_GO_GCFLAGS=-d='ssa/~^.*scc$/off' GO_GCFLAGS='-d=ssa/~^.*scc$/off' ./make.bash +func PhaseOption(phase, flag string, val int, valString string) string { + switch phase { + case "", "help": + lastcr := 0 + phasenames := " check, all, build, intrinsics, genssa" + for _, p := range passes { + pn := strings.Replace(p.name, " ", "_", -1) + if len(pn)+len(phasenames)-lastcr > 70 { + phasenames += "\n " + lastcr = len(phasenames) + phasenames += pn + } else { + phasenames += ", " + pn + } + } + return `PhaseOptions usage: + + go tool compile -d=ssa//[=|] + +where: + +- is one of: +` + phasenames + ` + +- is one of: + on, off, debug, mem, time, test, stats, dump, seed + +- defaults to 1 + +- is required for the "dump" flag, and specifies the + name of function to dump after + +Phase "all" supports flags "time", "mem", and "dump". +Phase "intrinsics" supports flags "on", "off", and "debug". +Phase "genssa" (assembly generation) supports the flag "dump". + +If the "dump" flag is specified, the output is written on a file named +___.dump; otherwise it is directed to stdout. + +Examples: + + -d=ssa/check/on +enables checking after each phase + + -d=ssa/check/seed=1234 +enables checking after each phase, using 1234 to seed the PRNG +used for value order randomization + + -d=ssa/all/time +enables time reporting for all phases + + -d=ssa/prove/debug=2 +sets debugging level to 2 in the prove pass + +Be aware that when "/debug=X" is applied to a pass, some passes +will emit debug output for all functions, and other passes will +only emit debug output for functions that match the current +GOSSAFUNC value. + +Multiple flags can be passed at once, by separating them with +commas. For example: + + -d=ssa/check/on,ssa/all/time +` + } + + if phase == "check" { + switch flag { + case "on": + checkEnabled = val != 0 + debugPoset = checkEnabled // also turn on advanced self-checking in prove's data structure + return "" + case "off": + checkEnabled = val == 0 + debugPoset = checkEnabled + return "" + case "seed": + checkEnabled = true + checkRandSeed = val + debugPoset = checkEnabled + return "" + } + } + + alltime := false + allmem := false + alldump := false + if phase == "all" { + switch flag { + case "time": + alltime = val != 0 + case "mem": + allmem = val != 0 + case "dump": + alldump = val != 0 + if alldump { + BuildDump[valString] = true + GenssaDump[valString] = true + } + default: + return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option (expected ssa/all/{time,mem,dump=function_name})", flag, phase) + } + } + + if phase == "intrinsics" { + switch flag { + case "on": + IntrinsicsDisable = val == 0 + case "off": + IntrinsicsDisable = val != 0 + case "debug": + IntrinsicsDebug = val + default: + return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option (expected ssa/intrinsics/{on,off,debug})", flag, phase) + } + return "" + } + if phase == "build" { + switch flag { + case "debug": + BuildDebug = val + case "test": + BuildTest = val + case "stats": + BuildStats = val + case "dump": + BuildDump[valString] = true + default: + return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option (expected ssa/build/{debug,test,stats,dump=function_name})", flag, phase) + } + return "" + } + if phase == "genssa" { + switch flag { + case "dump": + GenssaDump[valString] = true + default: + return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option (expected ssa/genssa/dump=function_name)", flag, phase) + } + return "" + } + + underphase := strings.Replace(phase, "_", " ", -1) + var re *regexp.Regexp + if phase[0] == '~' { + r, ok := regexp.Compile(underphase[1:]) + if ok != nil { + return fmt.Sprintf("Error %s in regexp for phase %s, flag %s", ok.Error(), phase, flag) + } + re = r + } + matchedOne := false + for i, p := range passes { + if phase == "all" { + p.time = alltime + p.mem = allmem + if alldump { + p.addDump(valString) + } + passes[i] = p + matchedOne = true + } else if p.name == phase || p.name == underphase || re != nil && re.MatchString(p.name) { + switch flag { + case "on": + p.disabled = val == 0 + case "off": + p.disabled = val != 0 + case "time": + p.time = val != 0 + case "mem": + p.mem = val != 0 + case "debug": + p.debug = val + case "stats": + p.stats = val + case "test": + p.test = val + case "dump": + p.addDump(valString) + default: + return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option", flag, phase) + } + if p.disabled && p.required { + return fmt.Sprintf("Cannot disable required SSA phase %s using -d=ssa/%s debug option", phase, phase) + } + passes[i] = p + matchedOne = true + } + } + if matchedOne { + return "" + } + return fmt.Sprintf("Did not find a phase matching %s in -d=ssa/... debug option", phase) +} + +// list of passes for the compiler +var passes = [...]pass{ + // TODO: combine phielim and copyelim into a single pass? + {name: "number lines", fn: numberLines, required: true}, + {name: "early phielim", fn: phielim}, + {name: "early copyelim", fn: copyelim}, + {name: "early deadcode", fn: deadcode}, // remove generated dead code to avoid doing pointless work during opt + {name: "short circuit", fn: shortcircuit}, + {name: "decompose user", fn: decomposeUser, required: true}, + {name: "pre-opt deadcode", fn: deadcode}, + {name: "opt", fn: opt, required: true}, // NB: some generic rules know the name of the opt pass. TODO: split required rules and optimizing rules + {name: "zero arg cse", fn: zcse, required: true}, // required to merge OpSB values + {name: "opt deadcode", fn: deadcode, required: true}, // remove any blocks orphaned during opt + {name: "generic cse", fn: cse}, + {name: "phiopt", fn: phiopt}, + {name: "gcse deadcode", fn: deadcode, required: true}, // clean out after cse and phiopt + {name: "nilcheckelim", fn: nilcheckelim}, + {name: "prove", fn: prove}, + {name: "early fuse", fn: fuseEarly}, + {name: "expand calls", fn: expandCalls, required: true}, + {name: "decompose builtin", fn: postExpandCallsDecompose, required: true}, + {name: "softfloat", fn: softfloat, required: true}, + {name: "late opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules + {name: "dead auto elim", fn: elimDeadAutosGeneric}, + {name: "sccp", fn: sccp}, + {name: "generic deadcode", fn: deadcode, required: true}, // remove dead stores, which otherwise mess up store chain + {name: "check bce", fn: checkbce}, + {name: "branchelim", fn: branchelim}, + {name: "late fuse", fn: fuseLate}, + {name: "dse", fn: dse}, + {name: "memcombine", fn: memcombine}, + {name: "writebarrier", fn: writebarrier, required: true}, // expand write barrier ops + {name: "insert resched checks", fn: insertLoopReschedChecks, + disabled: !buildcfg.Experiment.PreemptibleLoops}, // insert resched checks in loops. + {name: "lower", fn: lower, required: true}, + {name: "addressing modes", fn: addressingModes, required: false}, + {name: "late lower", fn: lateLower, required: true}, + {name: "lowered deadcode for cse", fn: deadcode}, // deadcode immediately before CSE avoids CSE making dead values live again + {name: "lowered cse", fn: cse}, + {name: "elim unread autos", fn: elimUnreadAutos}, + {name: "tighten tuple selectors", fn: tightenTupleSelectors, required: true}, + {name: "lowered deadcode", fn: deadcode, required: true}, + {name: "checkLower", fn: checkLower, required: true}, + {name: "late phielim", fn: phielim}, + {name: "late copyelim", fn: copyelim}, + {name: "tighten", fn: tighten, required: true}, // move values closer to their uses + {name: "late deadcode", fn: deadcode}, + {name: "critical", fn: critical, required: true}, // remove critical edges + {name: "phi tighten", fn: phiTighten}, // place rematerializable phi args near uses to reduce value lifetimes + {name: "likelyadjust", fn: likelyadjust}, + {name: "layout", fn: layout, required: true}, // schedule blocks + {name: "schedule", fn: schedule, required: true}, // schedule values + {name: "late nilcheck", fn: nilcheckelim2}, + {name: "flagalloc", fn: flagalloc, required: true}, // allocate flags register + {name: "regalloc", fn: regalloc, required: true}, // allocate int & float registers + stack slots + {name: "loop rotate", fn: loopRotate}, + {name: "trim", fn: trim}, // remove empty blocks +} + +// Double-check phase ordering constraints. +// This code is intended to document the ordering requirements +// between different phases. It does not override the passes +// list above. +type constraint struct { + a, b string // a must come before b +} + +var passOrder = [...]constraint{ + // "insert resched checks" uses mem, better to clean out stores first. + {"dse", "insert resched checks"}, + // insert resched checks adds new blocks containing generic instructions + {"insert resched checks", "lower"}, + {"insert resched checks", "tighten"}, + + // prove relies on common-subexpression elimination for maximum benefits. + {"generic cse", "prove"}, + // deadcode after prove to eliminate all new dead blocks. + {"prove", "generic deadcode"}, + // common-subexpression before dead-store elim, so that we recognize + // when two address expressions are the same. + {"generic cse", "dse"}, + // cse substantially improves nilcheckelim efficacy + {"generic cse", "nilcheckelim"}, + // allow deadcode to clean up after nilcheckelim + {"nilcheckelim", "generic deadcode"}, + // nilcheckelim generates sequences of plain basic blocks + {"nilcheckelim", "late fuse"}, + // nilcheckelim relies on opt to rewrite user nil checks + {"opt", "nilcheckelim"}, + // tighten will be most effective when as many values have been removed as possible + {"generic deadcode", "tighten"}, + {"generic cse", "tighten"}, + // checkbce needs the values removed + {"generic deadcode", "check bce"}, + // decompose builtin now also cleans up after expand calls + {"expand calls", "decompose builtin"}, + // don't run optimization pass until we've decomposed builtin objects + {"decompose builtin", "late opt"}, + // decompose builtin is the last pass that may introduce new float ops, so run softfloat after it + {"decompose builtin", "softfloat"}, + // tuple selectors must be tightened to generators and de-duplicated before scheduling + {"tighten tuple selectors", "schedule"}, + // remove critical edges before phi tighten, so that phi args get better placement + {"critical", "phi tighten"}, + // don't layout blocks until critical edges have been removed + {"critical", "layout"}, + // regalloc requires the removal of all critical edges + {"critical", "regalloc"}, + // regalloc requires all the values in a block to be scheduled + {"schedule", "regalloc"}, + // the rules in late lower run after the general rules. + {"lower", "late lower"}, + // late lower may generate some values that need to be CSEed. + {"late lower", "lowered cse"}, + // checkLower must run after lowering & subsequent dead code elim + {"lower", "checkLower"}, + {"lowered deadcode", "checkLower"}, + {"late lower", "checkLower"}, + // late nilcheck needs instructions to be scheduled. + {"schedule", "late nilcheck"}, + // flagalloc needs instructions to be scheduled. + {"schedule", "flagalloc"}, + // regalloc needs flags to be allocated first. + {"flagalloc", "regalloc"}, + // loopRotate will confuse regalloc. + {"regalloc", "loop rotate"}, + // trim needs regalloc to be done first. + {"regalloc", "trim"}, + // memcombine works better if fuse happens first, to help merge stores. + {"late fuse", "memcombine"}, + // memcombine is a arch-independent pass. + {"memcombine", "lower"}, +} + +func init() { + for _, c := range passOrder { + a, b := c.a, c.b + i := -1 + j := -1 + for k, p := range passes { + if p.name == a { + i = k + } + if p.name == b { + j = k + } + } + if i < 0 { + log.Panicf("pass %s not found", a) + } + if j < 0 { + log.Panicf("pass %s not found", b) + } + if i >= j { + log.Panicf("passes %s and %s out of order", a, b) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/config.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/config.go new file mode 100644 index 0000000000000000000000000000000000000000..debcf1a0f465a1e207eb804ac65d20a07a9a4050 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/config.go @@ -0,0 +1,420 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/abi" + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/src" + "internal/buildcfg" +) + +// A Config holds readonly compilation information. +// It is created once, early during compilation, +// and shared across all compilations. +type Config struct { + arch string // "amd64", etc. + PtrSize int64 // 4 or 8; copy of cmd/internal/sys.Arch.PtrSize + RegSize int64 // 4 or 8; copy of cmd/internal/sys.Arch.RegSize + Types Types + lowerBlock blockRewriter // block lowering function, first round + lowerValue valueRewriter // value lowering function, first round + lateLowerBlock blockRewriter // block lowering function that needs to be run after the first round; only used on some architectures + lateLowerValue valueRewriter // value lowering function that needs to be run after the first round; only used on some architectures + splitLoad valueRewriter // function for splitting merged load ops; only used on some architectures + registers []Register // machine registers + gpRegMask regMask // general purpose integer register mask + fpRegMask regMask // floating point register mask + fp32RegMask regMask // floating point register mask + fp64RegMask regMask // floating point register mask + specialRegMask regMask // special register mask + intParamRegs []int8 // register numbers of integer param (in/out) registers + floatParamRegs []int8 // register numbers of floating param (in/out) registers + ABI1 *abi.ABIConfig // "ABIInternal" under development // TODO change comment when this becomes current + ABI0 *abi.ABIConfig + GCRegMap []*Register // garbage collector register map, by GC register index + FPReg int8 // register number of frame pointer, -1 if not used + LinkReg int8 // register number of link register if it is a general purpose register, -1 if not used + hasGReg bool // has hardware g register + ctxt *obj.Link // Generic arch information + optimize bool // Do optimization + noDuffDevice bool // Don't use Duff's device + useSSE bool // Use SSE for non-float operations + useAvg bool // Use optimizations that need Avg* operations + useHmul bool // Use optimizations that need Hmul* operations + SoftFloat bool // + Race bool // race detector enabled + BigEndian bool // + UseFMA bool // Use hardware FMA operation + unalignedOK bool // Unaligned loads/stores are ok + haveBswap64 bool // architecture implements Bswap64 + haveBswap32 bool // architecture implements Bswap32 + haveBswap16 bool // architecture implements Bswap16 +} + +type ( + blockRewriter func(*Block) bool + valueRewriter func(*Value) bool +) + +type Types struct { + Bool *types.Type + Int8 *types.Type + Int16 *types.Type + Int32 *types.Type + Int64 *types.Type + UInt8 *types.Type + UInt16 *types.Type + UInt32 *types.Type + UInt64 *types.Type + Int *types.Type + Float32 *types.Type + Float64 *types.Type + UInt *types.Type + Uintptr *types.Type + String *types.Type + BytePtr *types.Type // TODO: use unsafe.Pointer instead? + Int32Ptr *types.Type + UInt32Ptr *types.Type + IntPtr *types.Type + UintptrPtr *types.Type + Float32Ptr *types.Type + Float64Ptr *types.Type + BytePtrPtr *types.Type +} + +// NewTypes creates and populates a Types. +func NewTypes() *Types { + t := new(Types) + t.SetTypPtrs() + return t +} + +// SetTypPtrs populates t. +func (t *Types) SetTypPtrs() { + t.Bool = types.Types[types.TBOOL] + t.Int8 = types.Types[types.TINT8] + t.Int16 = types.Types[types.TINT16] + t.Int32 = types.Types[types.TINT32] + t.Int64 = types.Types[types.TINT64] + t.UInt8 = types.Types[types.TUINT8] + t.UInt16 = types.Types[types.TUINT16] + t.UInt32 = types.Types[types.TUINT32] + t.UInt64 = types.Types[types.TUINT64] + t.Int = types.Types[types.TINT] + t.Float32 = types.Types[types.TFLOAT32] + t.Float64 = types.Types[types.TFLOAT64] + t.UInt = types.Types[types.TUINT] + t.Uintptr = types.Types[types.TUINTPTR] + t.String = types.Types[types.TSTRING] + t.BytePtr = types.NewPtr(types.Types[types.TUINT8]) + t.Int32Ptr = types.NewPtr(types.Types[types.TINT32]) + t.UInt32Ptr = types.NewPtr(types.Types[types.TUINT32]) + t.IntPtr = types.NewPtr(types.Types[types.TINT]) + t.UintptrPtr = types.NewPtr(types.Types[types.TUINTPTR]) + t.Float32Ptr = types.NewPtr(types.Types[types.TFLOAT32]) + t.Float64Ptr = types.NewPtr(types.Types[types.TFLOAT64]) + t.BytePtrPtr = types.NewPtr(types.NewPtr(types.Types[types.TUINT8])) +} + +type Logger interface { + // Logf logs a message from the compiler. + Logf(string, ...interface{}) + + // Log reports whether logging is not a no-op + // some logging calls account for more than a few heap allocations. + Log() bool + + // Fatal reports a compiler error and exits. + Fatalf(pos src.XPos, msg string, args ...interface{}) + + // Warnl writes compiler messages in the form expected by "errorcheck" tests + Warnl(pos src.XPos, fmt_ string, args ...interface{}) + + // Forwards the Debug flags from gc + Debug_checknil() bool +} + +type Frontend interface { + Logger + + // StringData returns a symbol pointing to the given string's contents. + StringData(string) *obj.LSym + + // Given the name for a compound type, returns the name we should use + // for the parts of that compound type. + SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot + + // Syslook returns a symbol of the runtime function/variable with the + // given name. + Syslook(string) *obj.LSym + + // UseWriteBarrier reports whether write barrier is enabled + UseWriteBarrier() bool + + // Func returns the ir.Func of the function being compiled. + Func() *ir.Func +} + +// NewConfig returns a new configuration object for the given architecture. +func NewConfig(arch string, types Types, ctxt *obj.Link, optimize, softfloat bool) *Config { + c := &Config{arch: arch, Types: types} + c.useAvg = true + c.useHmul = true + switch arch { + case "amd64": + c.PtrSize = 8 + c.RegSize = 8 + c.lowerBlock = rewriteBlockAMD64 + c.lowerValue = rewriteValueAMD64 + c.lateLowerBlock = rewriteBlockAMD64latelower + c.lateLowerValue = rewriteValueAMD64latelower + c.splitLoad = rewriteValueAMD64splitload + c.registers = registersAMD64[:] + c.gpRegMask = gpRegMaskAMD64 + c.fpRegMask = fpRegMaskAMD64 + c.specialRegMask = specialRegMaskAMD64 + c.intParamRegs = paramIntRegAMD64 + c.floatParamRegs = paramFloatRegAMD64 + c.FPReg = framepointerRegAMD64 + c.LinkReg = linkRegAMD64 + c.hasGReg = true + c.unalignedOK = true + c.haveBswap64 = true + c.haveBswap32 = true + c.haveBswap16 = true + case "386": + c.PtrSize = 4 + c.RegSize = 4 + c.lowerBlock = rewriteBlock386 + c.lowerValue = rewriteValue386 + c.splitLoad = rewriteValue386splitload + c.registers = registers386[:] + c.gpRegMask = gpRegMask386 + c.fpRegMask = fpRegMask386 + c.FPReg = framepointerReg386 + c.LinkReg = linkReg386 + c.hasGReg = false + c.unalignedOK = true + c.haveBswap32 = true + c.haveBswap16 = true + case "arm": + c.PtrSize = 4 + c.RegSize = 4 + c.lowerBlock = rewriteBlockARM + c.lowerValue = rewriteValueARM + c.registers = registersARM[:] + c.gpRegMask = gpRegMaskARM + c.fpRegMask = fpRegMaskARM + c.FPReg = framepointerRegARM + c.LinkReg = linkRegARM + c.hasGReg = true + case "arm64": + c.PtrSize = 8 + c.RegSize = 8 + c.lowerBlock = rewriteBlockARM64 + c.lowerValue = rewriteValueARM64 + c.lateLowerBlock = rewriteBlockARM64latelower + c.lateLowerValue = rewriteValueARM64latelower + c.registers = registersARM64[:] + c.gpRegMask = gpRegMaskARM64 + c.fpRegMask = fpRegMaskARM64 + c.intParamRegs = paramIntRegARM64 + c.floatParamRegs = paramFloatRegARM64 + c.FPReg = framepointerRegARM64 + c.LinkReg = linkRegARM64 + c.hasGReg = true + c.unalignedOK = true + c.haveBswap64 = true + c.haveBswap32 = true + c.haveBswap16 = true + case "ppc64": + c.BigEndian = true + fallthrough + case "ppc64le": + c.PtrSize = 8 + c.RegSize = 8 + c.lowerBlock = rewriteBlockPPC64 + c.lowerValue = rewriteValuePPC64 + c.lateLowerBlock = rewriteBlockPPC64latelower + c.lateLowerValue = rewriteValuePPC64latelower + c.registers = registersPPC64[:] + c.gpRegMask = gpRegMaskPPC64 + c.fpRegMask = fpRegMaskPPC64 + c.specialRegMask = specialRegMaskPPC64 + c.intParamRegs = paramIntRegPPC64 + c.floatParamRegs = paramFloatRegPPC64 + c.FPReg = framepointerRegPPC64 + c.LinkReg = linkRegPPC64 + c.hasGReg = true + c.unalignedOK = true + // Note: ppc64 has register bswap ops only when GOPPC64>=10. + // But it has bswap+load and bswap+store ops for all ppc64 variants. + // That is the sense we're using them here - they are only used + // in contexts where they can be merged with a load or store. + c.haveBswap64 = true + c.haveBswap32 = true + c.haveBswap16 = true + case "mips64": + c.BigEndian = true + fallthrough + case "mips64le": + c.PtrSize = 8 + c.RegSize = 8 + c.lowerBlock = rewriteBlockMIPS64 + c.lowerValue = rewriteValueMIPS64 + c.registers = registersMIPS64[:] + c.gpRegMask = gpRegMaskMIPS64 + c.fpRegMask = fpRegMaskMIPS64 + c.specialRegMask = specialRegMaskMIPS64 + c.FPReg = framepointerRegMIPS64 + c.LinkReg = linkRegMIPS64 + c.hasGReg = true + case "loong64": + c.PtrSize = 8 + c.RegSize = 8 + c.lowerBlock = rewriteBlockLOONG64 + c.lowerValue = rewriteValueLOONG64 + c.registers = registersLOONG64[:] + c.gpRegMask = gpRegMaskLOONG64 + c.fpRegMask = fpRegMaskLOONG64 + c.intParamRegs = paramIntRegLOONG64 + c.floatParamRegs = paramFloatRegLOONG64 + c.FPReg = framepointerRegLOONG64 + c.LinkReg = linkRegLOONG64 + c.hasGReg = true + case "s390x": + c.PtrSize = 8 + c.RegSize = 8 + c.lowerBlock = rewriteBlockS390X + c.lowerValue = rewriteValueS390X + c.registers = registersS390X[:] + c.gpRegMask = gpRegMaskS390X + c.fpRegMask = fpRegMaskS390X + c.FPReg = framepointerRegS390X + c.LinkReg = linkRegS390X + c.hasGReg = true + c.noDuffDevice = true + c.BigEndian = true + c.unalignedOK = true + c.haveBswap64 = true + c.haveBswap32 = true + c.haveBswap16 = true // only for loads&stores, see ppc64 comment + case "mips": + c.BigEndian = true + fallthrough + case "mipsle": + c.PtrSize = 4 + c.RegSize = 4 + c.lowerBlock = rewriteBlockMIPS + c.lowerValue = rewriteValueMIPS + c.registers = registersMIPS[:] + c.gpRegMask = gpRegMaskMIPS + c.fpRegMask = fpRegMaskMIPS + c.specialRegMask = specialRegMaskMIPS + c.FPReg = framepointerRegMIPS + c.LinkReg = linkRegMIPS + c.hasGReg = true + c.noDuffDevice = true + case "riscv64": + c.PtrSize = 8 + c.RegSize = 8 + c.lowerBlock = rewriteBlockRISCV64 + c.lowerValue = rewriteValueRISCV64 + c.lateLowerBlock = rewriteBlockRISCV64latelower + c.lateLowerValue = rewriteValueRISCV64latelower + c.registers = registersRISCV64[:] + c.gpRegMask = gpRegMaskRISCV64 + c.fpRegMask = fpRegMaskRISCV64 + c.intParamRegs = paramIntRegRISCV64 + c.floatParamRegs = paramFloatRegRISCV64 + c.FPReg = framepointerRegRISCV64 + c.hasGReg = true + case "wasm": + c.PtrSize = 8 + c.RegSize = 8 + c.lowerBlock = rewriteBlockWasm + c.lowerValue = rewriteValueWasm + c.registers = registersWasm[:] + c.gpRegMask = gpRegMaskWasm + c.fpRegMask = fpRegMaskWasm + c.fp32RegMask = fp32RegMaskWasm + c.fp64RegMask = fp64RegMaskWasm + c.FPReg = framepointerRegWasm + c.LinkReg = linkRegWasm + c.hasGReg = true + c.noDuffDevice = true + c.useAvg = false + c.useHmul = false + default: + ctxt.Diag("arch %s not implemented", arch) + } + c.ctxt = ctxt + c.optimize = optimize + c.useSSE = true + c.UseFMA = true + c.SoftFloat = softfloat + if softfloat { + c.floatParamRegs = nil // no FP registers in softfloat mode + } + + c.ABI0 = abi.NewABIConfig(0, 0, ctxt.Arch.FixedFrameSize, 0) + c.ABI1 = abi.NewABIConfig(len(c.intParamRegs), len(c.floatParamRegs), ctxt.Arch.FixedFrameSize, 1) + + // On Plan 9, floating point operations are not allowed in note handler. + if buildcfg.GOOS == "plan9" { + // Don't use FMA on Plan 9 + c.UseFMA = false + + // Don't use Duff's device and SSE on Plan 9 AMD64. + if arch == "amd64" { + c.noDuffDevice = true + c.useSSE = false + } + } + + if ctxt.Flag_shared { + // LoweredWB is secretly a CALL and CALLs on 386 in + // shared mode get rewritten by obj6.go to go through + // the GOT, which clobbers BX. + opcodeTable[Op386LoweredWB].reg.clobbers |= 1 << 3 // BX + } + + // Create the GC register map index. + // TODO: This is only used for debug printing. Maybe export config.registers? + gcRegMapSize := int16(0) + for _, r := range c.registers { + if r.gcNum+1 > gcRegMapSize { + gcRegMapSize = r.gcNum + 1 + } + } + c.GCRegMap = make([]*Register, gcRegMapSize) + for i, r := range c.registers { + if r.gcNum != -1 { + c.GCRegMap[r.gcNum] = &c.registers[i] + } + } + + return c +} + +func (c *Config) Ctxt() *obj.Link { return c.ctxt } + +func (c *Config) haveByteSwap(size int64) bool { + switch size { + case 8: + return c.haveBswap64 + case 4: + return c.haveBswap32 + case 2: + return c.haveBswap16 + default: + base.Fatalf("bad size %d\n", size) + return false + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/copyelim.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/copyelim.go new file mode 100644 index 0000000000000000000000000000000000000000..17f65127ee0da8d105e7dedafce5098aa3269146 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/copyelim.go @@ -0,0 +1,84 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// copyelim removes all uses of OpCopy values from f. +// A subsequent deadcode pass is needed to actually remove the copies. +func copyelim(f *Func) { + // Modify all values so no arg (including args + // of OpCopy) is a copy. + for _, b := range f.Blocks { + for _, v := range b.Values { + copyelimValue(v) + } + } + + // Update block control values. + for _, b := range f.Blocks { + for i, v := range b.ControlValues() { + if v.Op == OpCopy { + b.ReplaceControl(i, v.Args[0]) + } + } + } + + // Update named values. + for _, name := range f.Names { + values := f.NamedValues[*name] + for i, v := range values { + if v.Op == OpCopy { + values[i] = v.Args[0] + } + } + } +} + +// copySource returns the (non-copy) op which is the +// ultimate source of v. v must be a copy op. +func copySource(v *Value) *Value { + w := v.Args[0] + + // This loop is just: + // for w.Op == OpCopy { + // w = w.Args[0] + // } + // but we take some extra care to make sure we + // don't get stuck in an infinite loop. + // Infinite copy loops may happen in unreachable code. + // (TODO: or can they? Needs a test.) + slow := w + var advance bool + for w.Op == OpCopy { + w = w.Args[0] + if w == slow { + w.reset(OpUnknown) + break + } + if advance { + slow = slow.Args[0] + } + advance = !advance + } + + // The answer is w. Update all the copies we saw + // to point directly to w. Doing this update makes + // sure that we don't end up doing O(n^2) work + // for a chain of n copies. + for v != w { + x := v.Args[0] + v.SetArg(0, w) + v = x + } + return w +} + +// copyelimValue ensures that no args of v are copies. +func copyelimValue(v *Value) { + for i, a := range v.Args { + if a.Op == OpCopy { + v.SetArg(i, copySource(a)) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/copyelim_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/copyelim_test.go new file mode 100644 index 0000000000000000000000000000000000000000..fe31b12191646d145742316e59d8d72e5039dbf4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/copyelim_test.go @@ -0,0 +1,41 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/types" + "fmt" + "testing" +) + +func BenchmarkCopyElim1(b *testing.B) { benchmarkCopyElim(b, 1) } +func BenchmarkCopyElim10(b *testing.B) { benchmarkCopyElim(b, 10) } +func BenchmarkCopyElim100(b *testing.B) { benchmarkCopyElim(b, 100) } +func BenchmarkCopyElim1000(b *testing.B) { benchmarkCopyElim(b, 1000) } +func BenchmarkCopyElim10000(b *testing.B) { benchmarkCopyElim(b, 10000) } +func BenchmarkCopyElim100000(b *testing.B) { benchmarkCopyElim(b, 100000) } + +func benchmarkCopyElim(b *testing.B, n int) { + c := testConfig(b) + + values := make([]interface{}, 0, n+2) + values = append(values, Valu("mem", OpInitMem, types.TypeMem, 0, nil)) + last := "mem" + for i := 0; i < n; i++ { + name := fmt.Sprintf("copy%d", i) + values = append(values, Valu(name, OpCopy, types.TypeMem, 0, nil, last)) + last = name + } + values = append(values, Exit(last)) + // Reverse values array to make it hard + for i := 0; i < len(values)/2; i++ { + values[i], values[len(values)-1-i] = values[len(values)-1-i], values[i] + } + + for i := 0; i < b.N; i++ { + fun := c.Fun("entry", Bloc("entry", values...)) + Copyelim(fun.f) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/critical.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/critical.go new file mode 100644 index 0000000000000000000000000000000000000000..f14bb93e6d324131b9b7a4c0c7e6d64614f8d8c7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/critical.go @@ -0,0 +1,111 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// critical splits critical edges (those that go from a block with +// more than one outedge to a block with more than one inedge). +// Regalloc wants a critical-edge-free CFG so it can implement phi values. +func critical(f *Func) { + // maps from phi arg ID to the new block created for that argument + blocks := f.Cache.allocBlockSlice(f.NumValues()) + defer f.Cache.freeBlockSlice(blocks) + // need to iterate over f.Blocks without range, as we might + // need to split critical edges on newly constructed blocks + for j := 0; j < len(f.Blocks); j++ { + b := f.Blocks[j] + if len(b.Preds) <= 1 { + continue + } + + var phi *Value + // determine if we've only got a single phi in this + // block, this is easier to handle than the general + // case of a block with multiple phi values. + for _, v := range b.Values { + if v.Op == OpPhi { + if phi != nil { + phi = nil + break + } + phi = v + } + } + + // reset our block map + if phi != nil { + for _, v := range phi.Args { + blocks[v.ID] = nil + } + } + + // split input edges coming from multi-output blocks. + for i := 0; i < len(b.Preds); { + e := b.Preds[i] + p := e.b + pi := e.i + if p.Kind == BlockPlain { + i++ + continue // only single output block + } + + var d *Block // new block used to remove critical edge + reusedBlock := false // if true, then this is not the first use of this block + if phi != nil { + argID := phi.Args[i].ID + // find or record the block that we used to split + // critical edges for this argument + if d = blocks[argID]; d == nil { + // splitting doesn't necessarily remove the critical edge, + // since we're iterating over len(f.Blocks) above, this forces + // the new blocks to be re-examined. + d = f.NewBlock(BlockPlain) + d.Pos = p.Pos + blocks[argID] = d + if f.pass.debug > 0 { + f.Warnl(p.Pos, "split critical edge") + } + } else { + reusedBlock = true + } + } else { + // no existing block, so allocate a new block + // to place on the edge + d = f.NewBlock(BlockPlain) + d.Pos = p.Pos + if f.pass.debug > 0 { + f.Warnl(p.Pos, "split critical edge") + } + } + + // if this not the first argument for the + // block, then we need to remove the + // corresponding elements from the block + // predecessors and phi args + if reusedBlock { + // Add p->d edge + p.Succs[pi] = Edge{d, len(d.Preds)} + d.Preds = append(d.Preds, Edge{p, pi}) + + // Remove p as a predecessor from b. + b.removePred(i) + + // Update corresponding phi args + b.removePhiArg(phi, i) + + // splitting occasionally leads to a phi having + // a single argument (occurs with -N) + // Don't increment i in this case because we moved + // an unprocessed predecessor down into slot i. + } else { + // splice it in + p.Succs[pi] = Edge{d, 0} + b.Preds[i] = Edge{d, 0} + d.Preds = append(d.Preds, Edge{p, pi}) + d.Succs = append(d.Succs, Edge{b, i}) + i++ + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/cse.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/cse.go new file mode 100644 index 0000000000000000000000000000000000000000..d6497977c74cfc40e9cbe783704587d5cfd76799 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/cse.go @@ -0,0 +1,378 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/types" + "cmd/internal/src" + "fmt" + "sort" +) + +// cse does common-subexpression elimination on the Function. +// Values are just relinked, nothing is deleted. A subsequent deadcode +// pass is required to actually remove duplicate expressions. +func cse(f *Func) { + // Two values are equivalent if they satisfy the following definition: + // equivalent(v, w): + // v.op == w.op + // v.type == w.type + // v.aux == w.aux + // v.auxint == w.auxint + // len(v.args) == len(w.args) + // v.block == w.block if v.op == OpPhi + // equivalent(v.args[i], w.args[i]) for i in 0..len(v.args)-1 + + // The algorithm searches for a partition of f's values into + // equivalence classes using the above definition. + // It starts with a coarse partition and iteratively refines it + // until it reaches a fixed point. + + // Make initial coarse partitions by using a subset of the conditions above. + a := f.Cache.allocValueSlice(f.NumValues()) + defer func() { f.Cache.freeValueSlice(a) }() // inside closure to use final value of a + a = a[:0] + if f.auxmap == nil { + f.auxmap = auxmap{} + } + for _, b := range f.Blocks { + for _, v := range b.Values { + if v.Type.IsMemory() { + continue // memory values can never cse + } + if f.auxmap[v.Aux] == 0 { + f.auxmap[v.Aux] = int32(len(f.auxmap)) + 1 + } + a = append(a, v) + } + } + partition := partitionValues(a, f.auxmap) + + // map from value id back to eqclass id + valueEqClass := f.Cache.allocIDSlice(f.NumValues()) + defer f.Cache.freeIDSlice(valueEqClass) + for _, b := range f.Blocks { + for _, v := range b.Values { + // Use negative equivalence class #s for unique values. + valueEqClass[v.ID] = -v.ID + } + } + var pNum ID = 1 + for _, e := range partition { + if f.pass.debug > 1 && len(e) > 500 { + fmt.Printf("CSE.large partition (%d): ", len(e)) + for j := 0; j < 3; j++ { + fmt.Printf("%s ", e[j].LongString()) + } + fmt.Println() + } + + for _, v := range e { + valueEqClass[v.ID] = pNum + } + if f.pass.debug > 2 && len(e) > 1 { + fmt.Printf("CSE.partition #%d:", pNum) + for _, v := range e { + fmt.Printf(" %s", v.String()) + } + fmt.Printf("\n") + } + pNum++ + } + + // Split equivalence classes at points where they have + // non-equivalent arguments. Repeat until we can't find any + // more splits. + var splitPoints []int + byArgClass := new(partitionByArgClass) // reusable partitionByArgClass to reduce allocations + for { + changed := false + + // partition can grow in the loop. By not using a range loop here, + // we process new additions as they arrive, avoiding O(n^2) behavior. + for i := 0; i < len(partition); i++ { + e := partition[i] + + if opcodeTable[e[0].Op].commutative { + // Order the first two args before comparison. + for _, v := range e { + if valueEqClass[v.Args[0].ID] > valueEqClass[v.Args[1].ID] { + v.Args[0], v.Args[1] = v.Args[1], v.Args[0] + } + } + } + + // Sort by eq class of arguments. + byArgClass.a = e + byArgClass.eqClass = valueEqClass + sort.Sort(byArgClass) + + // Find split points. + splitPoints = append(splitPoints[:0], 0) + for j := 1; j < len(e); j++ { + v, w := e[j-1], e[j] + // Note: commutative args already correctly ordered by byArgClass. + eqArgs := true + for k, a := range v.Args { + b := w.Args[k] + if valueEqClass[a.ID] != valueEqClass[b.ID] { + eqArgs = false + break + } + } + if !eqArgs { + splitPoints = append(splitPoints, j) + } + } + if len(splitPoints) == 1 { + continue // no splits, leave equivalence class alone. + } + + // Move another equivalence class down in place of e. + partition[i] = partition[len(partition)-1] + partition = partition[:len(partition)-1] + i-- + + // Add new equivalence classes for the parts of e we found. + splitPoints = append(splitPoints, len(e)) + for j := 0; j < len(splitPoints)-1; j++ { + f := e[splitPoints[j]:splitPoints[j+1]] + if len(f) == 1 { + // Don't add singletons. + valueEqClass[f[0].ID] = -f[0].ID + continue + } + for _, v := range f { + valueEqClass[v.ID] = pNum + } + pNum++ + partition = append(partition, f) + } + changed = true + } + + if !changed { + break + } + } + + sdom := f.Sdom() + + // Compute substitutions we would like to do. We substitute v for w + // if v and w are in the same equivalence class and v dominates w. + rewrite := f.Cache.allocValueSlice(f.NumValues()) + defer f.Cache.freeValueSlice(rewrite) + byDom := new(partitionByDom) // reusable partitionByDom to reduce allocs + for _, e := range partition { + byDom.a = e + byDom.sdom = sdom + sort.Sort(byDom) + for i := 0; i < len(e)-1; i++ { + // e is sorted by domorder, so a maximal dominant element is first in the slice + v := e[i] + if v == nil { + continue + } + + e[i] = nil + // Replace all elements of e which v dominates + for j := i + 1; j < len(e); j++ { + w := e[j] + if w == nil { + continue + } + if sdom.IsAncestorEq(v.Block, w.Block) { + rewrite[w.ID] = v + e[j] = nil + } else { + // e is sorted by domorder, so v.Block doesn't dominate any subsequent blocks in e + break + } + } + } + } + + rewrites := int64(0) + + // Apply substitutions + for _, b := range f.Blocks { + for _, v := range b.Values { + for i, w := range v.Args { + if x := rewrite[w.ID]; x != nil { + if w.Pos.IsStmt() == src.PosIsStmt { + // about to lose a statement marker, w + // w is an input to v; if they're in the same block + // and the same line, v is a good-enough new statement boundary. + if w.Block == v.Block && w.Pos.Line() == v.Pos.Line() { + v.Pos = v.Pos.WithIsStmt() + w.Pos = w.Pos.WithNotStmt() + } // TODO and if this fails? + } + v.SetArg(i, x) + rewrites++ + } + } + } + for i, v := range b.ControlValues() { + if x := rewrite[v.ID]; x != nil { + if v.Op == OpNilCheck { + // nilcheck pass will remove the nil checks and log + // them appropriately, so don't mess with them here. + continue + } + b.ReplaceControl(i, x) + } + } + } + + if f.pass.stats > 0 { + f.LogStat("CSE REWRITES", rewrites) + } +} + +// An eqclass approximates an equivalence class. During the +// algorithm it may represent the union of several of the +// final equivalence classes. +type eqclass []*Value + +// partitionValues partitions the values into equivalence classes +// based on having all the following features match: +// - opcode +// - type +// - auxint +// - aux +// - nargs +// - block # if a phi op +// - first two arg's opcodes and auxint +// - NOT first two arg's aux; that can break CSE. +// +// partitionValues returns a list of equivalence classes, each +// being a sorted by ID list of *Values. The eqclass slices are +// backed by the same storage as the input slice. +// Equivalence classes of size 1 are ignored. +func partitionValues(a []*Value, auxIDs auxmap) []eqclass { + sort.Sort(sortvalues{a, auxIDs}) + + var partition []eqclass + for len(a) > 0 { + v := a[0] + j := 1 + for ; j < len(a); j++ { + w := a[j] + if cmpVal(v, w, auxIDs) != types.CMPeq { + break + } + } + if j > 1 { + partition = append(partition, a[:j]) + } + a = a[j:] + } + + return partition +} +func lt2Cmp(isLt bool) types.Cmp { + if isLt { + return types.CMPlt + } + return types.CMPgt +} + +type auxmap map[Aux]int32 + +func cmpVal(v, w *Value, auxIDs auxmap) types.Cmp { + // Try to order these comparison by cost (cheaper first) + if v.Op != w.Op { + return lt2Cmp(v.Op < w.Op) + } + if v.AuxInt != w.AuxInt { + return lt2Cmp(v.AuxInt < w.AuxInt) + } + if len(v.Args) != len(w.Args) { + return lt2Cmp(len(v.Args) < len(w.Args)) + } + if v.Op == OpPhi && v.Block != w.Block { + return lt2Cmp(v.Block.ID < w.Block.ID) + } + if v.Type.IsMemory() { + // We will never be able to CSE two values + // that generate memory. + return lt2Cmp(v.ID < w.ID) + } + // OpSelect is a pseudo-op. We need to be more aggressive + // regarding CSE to keep multiple OpSelect's of the same + // argument from existing. + if v.Op != OpSelect0 && v.Op != OpSelect1 && v.Op != OpSelectN { + if tc := v.Type.Compare(w.Type); tc != types.CMPeq { + return tc + } + } + + if v.Aux != w.Aux { + if v.Aux == nil { + return types.CMPlt + } + if w.Aux == nil { + return types.CMPgt + } + return lt2Cmp(auxIDs[v.Aux] < auxIDs[w.Aux]) + } + + return types.CMPeq +} + +// Sort values to make the initial partition. +type sortvalues struct { + a []*Value // array of values + auxIDs auxmap // aux -> aux ID map +} + +func (sv sortvalues) Len() int { return len(sv.a) } +func (sv sortvalues) Swap(i, j int) { sv.a[i], sv.a[j] = sv.a[j], sv.a[i] } +func (sv sortvalues) Less(i, j int) bool { + v := sv.a[i] + w := sv.a[j] + if cmp := cmpVal(v, w, sv.auxIDs); cmp != types.CMPeq { + return cmp == types.CMPlt + } + + // Sort by value ID last to keep the sort result deterministic. + return v.ID < w.ID +} + +type partitionByDom struct { + a []*Value // array of values + sdom SparseTree +} + +func (sv partitionByDom) Len() int { return len(sv.a) } +func (sv partitionByDom) Swap(i, j int) { sv.a[i], sv.a[j] = sv.a[j], sv.a[i] } +func (sv partitionByDom) Less(i, j int) bool { + v := sv.a[i] + w := sv.a[j] + return sv.sdom.domorder(v.Block) < sv.sdom.domorder(w.Block) +} + +type partitionByArgClass struct { + a []*Value // array of values + eqClass []ID // equivalence class IDs of values +} + +func (sv partitionByArgClass) Len() int { return len(sv.a) } +func (sv partitionByArgClass) Swap(i, j int) { sv.a[i], sv.a[j] = sv.a[j], sv.a[i] } +func (sv partitionByArgClass) Less(i, j int) bool { + v := sv.a[i] + w := sv.a[j] + for i, a := range v.Args { + b := w.Args[i] + if sv.eqClass[a.ID] < sv.eqClass[b.ID] { + return true + } + if sv.eqClass[a.ID] > sv.eqClass[b.ID] { + return false + } + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/cse_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/cse_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7d3e44fbe06e4ca689e042ba0e8034ee1e40cc1c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/cse_test.go @@ -0,0 +1,130 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/types" + "testing" +) + +type tstAux struct { + s string +} + +func (*tstAux) CanBeAnSSAAux() {} + +// This tests for a bug found when partitioning, but not sorting by the Aux value. +func TestCSEAuxPartitionBug(t *testing.T) { + c := testConfig(t) + arg1Aux := &tstAux{"arg1-aux"} + arg2Aux := &tstAux{"arg2-aux"} + arg3Aux := &tstAux{"arg3-aux"} + a := c.Temp(c.config.Types.Int8.PtrTo()) + + // construct lots of values with args that have aux values and place + // them in an order that triggers the bug + fun := c.Fun("entry", + Bloc("entry", + Valu("start", OpInitMem, types.TypeMem, 0, nil), + Valu("sp", OpSP, c.config.Types.Uintptr, 0, nil), + Valu("r7", OpAdd64, c.config.Types.Int64, 0, nil, "arg3", "arg1"), + Valu("r1", OpAdd64, c.config.Types.Int64, 0, nil, "arg1", "arg2"), + Valu("arg1", OpArg, c.config.Types.Int64, 0, arg1Aux), + Valu("arg2", OpArg, c.config.Types.Int64, 0, arg2Aux), + Valu("arg3", OpArg, c.config.Types.Int64, 0, arg3Aux), + Valu("r9", OpAdd64, c.config.Types.Int64, 0, nil, "r7", "r8"), + Valu("r4", OpAdd64, c.config.Types.Int64, 0, nil, "r1", "r2"), + Valu("r8", OpAdd64, c.config.Types.Int64, 0, nil, "arg3", "arg2"), + Valu("r2", OpAdd64, c.config.Types.Int64, 0, nil, "arg1", "arg2"), + Valu("raddr", OpLocalAddr, c.config.Types.Int64.PtrTo(), 0, nil, "sp", "start"), + Valu("raddrdef", OpVarDef, types.TypeMem, 0, a, "start"), + Valu("r6", OpAdd64, c.config.Types.Int64, 0, nil, "r4", "r5"), + Valu("r3", OpAdd64, c.config.Types.Int64, 0, nil, "arg1", "arg2"), + Valu("r5", OpAdd64, c.config.Types.Int64, 0, nil, "r2", "r3"), + Valu("r10", OpAdd64, c.config.Types.Int64, 0, nil, "r6", "r9"), + Valu("rstore", OpStore, types.TypeMem, 0, c.config.Types.Int64, "raddr", "r10", "raddrdef"), + Goto("exit")), + Bloc("exit", + Exit("rstore"))) + + CheckFunc(fun.f) + cse(fun.f) + deadcode(fun.f) + CheckFunc(fun.f) + + s1Cnt := 2 + // r1 == r2 == r3, needs to remove two of this set + s2Cnt := 1 + // r4 == r5, needs to remove one of these + for k, v := range fun.values { + if v.Op == OpInvalid { + switch k { + case "r1": + fallthrough + case "r2": + fallthrough + case "r3": + if s1Cnt == 0 { + t.Errorf("cse removed all of r1,r2,r3") + } + s1Cnt-- + + case "r4": + fallthrough + case "r5": + if s2Cnt == 0 { + t.Errorf("cse removed all of r4,r5") + } + s2Cnt-- + default: + t.Errorf("cse removed %s, but shouldn't have", k) + } + } + } + + if s1Cnt != 0 || s2Cnt != 0 { + t.Errorf("%d values missed during cse", s1Cnt+s2Cnt) + } +} + +// TestZCSE tests the zero arg cse. +func TestZCSE(t *testing.T) { + c := testConfig(t) + a := c.Temp(c.config.Types.Int8.PtrTo()) + + fun := c.Fun("entry", + Bloc("entry", + Valu("start", OpInitMem, types.TypeMem, 0, nil), + Valu("sp", OpSP, c.config.Types.Uintptr, 0, nil), + Valu("sb1", OpSB, c.config.Types.Uintptr, 0, nil), + Valu("sb2", OpSB, c.config.Types.Uintptr, 0, nil), + Valu("addr1", OpAddr, c.config.Types.Int64.PtrTo(), 0, nil, "sb1"), + Valu("addr2", OpAddr, c.config.Types.Int64.PtrTo(), 0, nil, "sb2"), + Valu("a1ld", OpLoad, c.config.Types.Int64, 0, nil, "addr1", "start"), + Valu("a2ld", OpLoad, c.config.Types.Int64, 0, nil, "addr2", "start"), + Valu("c1", OpConst64, c.config.Types.Int64, 1, nil), + Valu("r1", OpAdd64, c.config.Types.Int64, 0, nil, "a1ld", "c1"), + Valu("c2", OpConst64, c.config.Types.Int64, 1, nil), + Valu("r2", OpAdd64, c.config.Types.Int64, 0, nil, "a2ld", "c2"), + Valu("r3", OpAdd64, c.config.Types.Int64, 0, nil, "r1", "r2"), + Valu("raddr", OpLocalAddr, c.config.Types.Int64.PtrTo(), 0, nil, "sp", "start"), + Valu("raddrdef", OpVarDef, types.TypeMem, 0, a, "start"), + Valu("rstore", OpStore, types.TypeMem, 0, c.config.Types.Int64, "raddr", "r3", "raddrdef"), + Goto("exit")), + Bloc("exit", + Exit("rstore"))) + + CheckFunc(fun.f) + zcse(fun.f) + deadcode(fun.f) + CheckFunc(fun.f) + + if fun.values["c1"].Op != OpInvalid && fun.values["c2"].Op != OpInvalid { + t.Errorf("zsce should have removed c1 or c2") + } + if fun.values["sb1"].Op != OpInvalid && fun.values["sb2"].Op != OpInvalid { + t.Errorf("zsce should have removed sb1 or sb2") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/deadcode.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/deadcode.go new file mode 100644 index 0000000000000000000000000000000000000000..3bd1737babbb252aaba47ea094ef3af40c28222c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/deadcode.go @@ -0,0 +1,366 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/internal/src" +) + +// findlive returns the reachable blocks and live values in f. +// The caller should call f.Cache.freeBoolSlice(live) when it is done with it. +func findlive(f *Func) (reachable []bool, live []bool) { + reachable = ReachableBlocks(f) + var order []*Value + live, order = liveValues(f, reachable) + f.Cache.freeValueSlice(order) + return +} + +// ReachableBlocks returns the reachable blocks in f. +func ReachableBlocks(f *Func) []bool { + reachable := make([]bool, f.NumBlocks()) + reachable[f.Entry.ID] = true + p := make([]*Block, 0, 64) // stack-like worklist + p = append(p, f.Entry) + for len(p) > 0 { + // Pop a reachable block + b := p[len(p)-1] + p = p[:len(p)-1] + // Mark successors as reachable + s := b.Succs + if b.Kind == BlockFirst { + s = s[:1] + } + for _, e := range s { + c := e.b + if int(c.ID) >= len(reachable) { + f.Fatalf("block %s >= f.NumBlocks()=%d?", c, len(reachable)) + } + if !reachable[c.ID] { + reachable[c.ID] = true + p = append(p, c) // push + } + } + } + return reachable +} + +// liveValues returns the live values in f and a list of values that are eligible +// to be statements in reversed data flow order. +// The second result is used to help conserve statement boundaries for debugging. +// reachable is a map from block ID to whether the block is reachable. +// The caller should call f.Cache.freeBoolSlice(live) and f.Cache.freeValueSlice(liveOrderStmts). +// when they are done with the return values. +func liveValues(f *Func, reachable []bool) (live []bool, liveOrderStmts []*Value) { + live = f.Cache.allocBoolSlice(f.NumValues()) + liveOrderStmts = f.Cache.allocValueSlice(f.NumValues())[:0] + + // After regalloc, consider all values to be live. + // See the comment at the top of regalloc.go and in deadcode for details. + if f.RegAlloc != nil { + for i := range live { + live[i] = true + } + return + } + + // Record all the inline indexes we need + var liveInlIdx map[int]bool + pt := f.Config.ctxt.PosTable + for _, b := range f.Blocks { + for _, v := range b.Values { + i := pt.Pos(v.Pos).Base().InliningIndex() + if i < 0 { + continue + } + if liveInlIdx == nil { + liveInlIdx = map[int]bool{} + } + liveInlIdx[i] = true + } + i := pt.Pos(b.Pos).Base().InliningIndex() + if i < 0 { + continue + } + if liveInlIdx == nil { + liveInlIdx = map[int]bool{} + } + liveInlIdx[i] = true + } + + // Find all live values + q := f.Cache.allocValueSlice(f.NumValues())[:0] + defer f.Cache.freeValueSlice(q) + + // Starting set: all control values of reachable blocks are live. + // Calls are live (because callee can observe the memory state). + for _, b := range f.Blocks { + if !reachable[b.ID] { + continue + } + for _, v := range b.ControlValues() { + if !live[v.ID] { + live[v.ID] = true + q = append(q, v) + if v.Pos.IsStmt() != src.PosNotStmt { + liveOrderStmts = append(liveOrderStmts, v) + } + } + } + for _, v := range b.Values { + if (opcodeTable[v.Op].call || opcodeTable[v.Op].hasSideEffects || opcodeTable[v.Op].nilCheck) && !live[v.ID] { + live[v.ID] = true + q = append(q, v) + if v.Pos.IsStmt() != src.PosNotStmt { + liveOrderStmts = append(liveOrderStmts, v) + } + } + if v.Op == OpInlMark { + if !liveInlIdx[int(v.AuxInt)] { + // We don't need marks for bodies that + // have been completely optimized away. + // TODO: save marks only for bodies which + // have a faulting instruction or a call? + continue + } + live[v.ID] = true + q = append(q, v) + if v.Pos.IsStmt() != src.PosNotStmt { + liveOrderStmts = append(liveOrderStmts, v) + } + } + } + } + + // Compute transitive closure of live values. + for len(q) > 0 { + // pop a reachable value + v := q[len(q)-1] + q[len(q)-1] = nil + q = q[:len(q)-1] + for i, x := range v.Args { + if v.Op == OpPhi && !reachable[v.Block.Preds[i].b.ID] { + continue + } + if !live[x.ID] { + live[x.ID] = true + q = append(q, x) // push + if x.Pos.IsStmt() != src.PosNotStmt { + liveOrderStmts = append(liveOrderStmts, x) + } + } + } + } + + return +} + +// deadcode removes dead code from f. +func deadcode(f *Func) { + // deadcode after regalloc is forbidden for now. Regalloc + // doesn't quite generate legal SSA which will lead to some + // required moves being eliminated. See the comment at the + // top of regalloc.go for details. + if f.RegAlloc != nil { + f.Fatalf("deadcode after regalloc") + } + + // Find reachable blocks. + reachable := ReachableBlocks(f) + + // Get rid of edges from dead to live code. + for _, b := range f.Blocks { + if reachable[b.ID] { + continue + } + for i := 0; i < len(b.Succs); { + e := b.Succs[i] + if reachable[e.b.ID] { + b.removeEdge(i) + } else { + i++ + } + } + } + + // Get rid of dead edges from live code. + for _, b := range f.Blocks { + if !reachable[b.ID] { + continue + } + if b.Kind != BlockFirst { + continue + } + b.removeEdge(1) + b.Kind = BlockPlain + b.Likely = BranchUnknown + } + + // Splice out any copies introduced during dead block removal. + copyelim(f) + + // Find live values. + live, order := liveValues(f, reachable) + defer func() { f.Cache.freeBoolSlice(live) }() + defer func() { f.Cache.freeValueSlice(order) }() + + // Remove dead & duplicate entries from namedValues map. + s := f.newSparseSet(f.NumValues()) + defer f.retSparseSet(s) + i := 0 + for _, name := range f.Names { + j := 0 + s.clear() + values := f.NamedValues[*name] + for _, v := range values { + if live[v.ID] && !s.contains(v.ID) { + values[j] = v + j++ + s.add(v.ID) + } + } + if j == 0 { + delete(f.NamedValues, *name) + } else { + f.Names[i] = name + i++ + for k := len(values) - 1; k >= j; k-- { + values[k] = nil + } + f.NamedValues[*name] = values[:j] + } + } + clearNames := f.Names[i:] + for j := range clearNames { + clearNames[j] = nil + } + f.Names = f.Names[:i] + + pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block + pendingLines.clear() + + // Unlink values and conserve statement boundaries + for i, b := range f.Blocks { + if !reachable[b.ID] { + // TODO what if control is statement boundary? Too late here. + b.ResetControls() + } + for _, v := range b.Values { + if !live[v.ID] { + v.resetArgs() + if v.Pos.IsStmt() == src.PosIsStmt && reachable[b.ID] { + pendingLines.set(v.Pos, int32(i)) // TODO could be more than one pos for a line + } + } + } + } + + // Find new homes for lost lines -- require earliest in data flow with same line that is also in same block + for i := len(order) - 1; i >= 0; i-- { + w := order[i] + if j := pendingLines.get(w.Pos); j > -1 && f.Blocks[j] == w.Block { + w.Pos = w.Pos.WithIsStmt() + pendingLines.remove(w.Pos) + } + } + + // Any boundary that failed to match a live value can move to a block end + pendingLines.foreachEntry(func(j int32, l uint, bi int32) { + b := f.Blocks[bi] + if b.Pos.Line() == l && b.Pos.FileIndex() == j { + b.Pos = b.Pos.WithIsStmt() + } + }) + + // Remove dead values from blocks' value list. Return dead + // values to the allocator. + for _, b := range f.Blocks { + i := 0 + for _, v := range b.Values { + if live[v.ID] { + b.Values[i] = v + i++ + } else { + f.freeValue(v) + } + } + b.truncateValues(i) + } + + // Remove unreachable blocks. Return dead blocks to allocator. + i = 0 + for _, b := range f.Blocks { + if reachable[b.ID] { + f.Blocks[i] = b + i++ + } else { + if len(b.Values) > 0 { + b.Fatalf("live values in unreachable block %v: %v", b, b.Values) + } + f.freeBlock(b) + } + } + // zero remainder to help GC + tail := f.Blocks[i:] + for j := range tail { + tail[j] = nil + } + f.Blocks = f.Blocks[:i] +} + +// removeEdge removes the i'th outgoing edge from b (and +// the corresponding incoming edge from b.Succs[i].b). +// Note that this potentially reorders successors of b, so it +// must be used very carefully. +func (b *Block) removeEdge(i int) { + e := b.Succs[i] + c := e.b + j := e.i + + // Adjust b.Succs + b.removeSucc(i) + + // Adjust c.Preds + c.removePred(j) + + // Remove phi args from c's phis. + for _, v := range c.Values { + if v.Op != OpPhi { + continue + } + c.removePhiArg(v, j) + // Note: this is trickier than it looks. Replacing + // a Phi with a Copy can in general cause problems because + // Phi and Copy don't have exactly the same semantics. + // Phi arguments always come from a predecessor block, + // whereas copies don't. This matters in loops like: + // 1: x = (Phi y) + // y = (Add x 1) + // goto 1 + // If we replace Phi->Copy, we get + // 1: x = (Copy y) + // y = (Add x 1) + // goto 1 + // (Phi y) refers to the *previous* value of y, whereas + // (Copy y) refers to the *current* value of y. + // The modified code has a cycle and the scheduler + // will barf on it. + // + // Fortunately, this situation can only happen for dead + // code loops. We know the code we're working with is + // not dead, so we're ok. + // Proof: If we have a potential bad cycle, we have a + // situation like this: + // x = (Phi z) + // y = (op1 x ...) + // z = (op2 y ...) + // Where opX are not Phi ops. But such a situation + // implies a cycle in the dominator graph. In the + // example, x.Block dominates y.Block, y.Block dominates + // z.Block, and z.Block dominates x.Block (treating + // "dominates" as reflexive). Cycles in the dominator + // graph can only happen in an unreachable cycle. + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/deadcode_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/deadcode_test.go new file mode 100644 index 0000000000000000000000000000000000000000..5777b841ef58bbf4da30251465f8d2d4b9d712a0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/deadcode_test.go @@ -0,0 +1,161 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/types" + "fmt" + "strconv" + "testing" +) + +func TestDeadLoop(t *testing.T) { + c := testConfig(t) + fun := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Goto("exit")), + Bloc("exit", + Exit("mem")), + // dead loop + Bloc("deadblock", + // dead value in dead block + Valu("deadval", OpConstBool, c.config.Types.Bool, 1, nil), + If("deadval", "deadblock", "exit"))) + + CheckFunc(fun.f) + Deadcode(fun.f) + CheckFunc(fun.f) + + for _, b := range fun.f.Blocks { + if b == fun.blocks["deadblock"] { + t.Errorf("dead block not removed") + } + for _, v := range b.Values { + if v == fun.values["deadval"] { + t.Errorf("control value of dead block not removed") + } + } + } +} + +func TestDeadValue(t *testing.T) { + c := testConfig(t) + fun := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("deadval", OpConst64, c.config.Types.Int64, 37, nil), + Goto("exit")), + Bloc("exit", + Exit("mem"))) + + CheckFunc(fun.f) + Deadcode(fun.f) + CheckFunc(fun.f) + + for _, b := range fun.f.Blocks { + for _, v := range b.Values { + if v == fun.values["deadval"] { + t.Errorf("dead value not removed") + } + } + } +} + +func TestNeverTaken(t *testing.T) { + c := testConfig(t) + fun := c.Fun("entry", + Bloc("entry", + Valu("cond", OpConstBool, c.config.Types.Bool, 0, nil), + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + If("cond", "then", "else")), + Bloc("then", + Goto("exit")), + Bloc("else", + Goto("exit")), + Bloc("exit", + Exit("mem"))) + + CheckFunc(fun.f) + Opt(fun.f) + Deadcode(fun.f) + CheckFunc(fun.f) + + if fun.blocks["entry"].Kind != BlockPlain { + t.Errorf("if(false) not simplified") + } + for _, b := range fun.f.Blocks { + if b == fun.blocks["then"] { + t.Errorf("then block still present") + } + for _, v := range b.Values { + if v == fun.values["cond"] { + t.Errorf("constant condition still present") + } + } + } + +} + +func TestNestedDeadBlocks(t *testing.T) { + c := testConfig(t) + fun := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("cond", OpConstBool, c.config.Types.Bool, 0, nil), + If("cond", "b2", "b4")), + Bloc("b2", + If("cond", "b3", "b4")), + Bloc("b3", + If("cond", "b3", "b4")), + Bloc("b4", + If("cond", "b3", "exit")), + Bloc("exit", + Exit("mem"))) + + CheckFunc(fun.f) + Opt(fun.f) + CheckFunc(fun.f) + Deadcode(fun.f) + CheckFunc(fun.f) + if fun.blocks["entry"].Kind != BlockPlain { + t.Errorf("if(false) not simplified") + } + for _, b := range fun.f.Blocks { + if b == fun.blocks["b2"] { + t.Errorf("b2 block still present") + } + if b == fun.blocks["b3"] { + t.Errorf("b3 block still present") + } + for _, v := range b.Values { + if v == fun.values["cond"] { + t.Errorf("constant condition still present") + } + } + } +} + +func BenchmarkDeadCode(b *testing.B) { + for _, n := range [...]int{1, 10, 100, 1000, 10000, 100000, 200000} { + b.Run(strconv.Itoa(n), func(b *testing.B) { + c := testConfig(b) + blocks := make([]bloc, 0, n+2) + blocks = append(blocks, + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Goto("exit"))) + blocks = append(blocks, Bloc("exit", Exit("mem"))) + for i := 0; i < n; i++ { + blocks = append(blocks, Bloc(fmt.Sprintf("dead%d", i), Goto("exit"))) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + fun := c.Fun("entry", blocks...) + Deadcode(fun.f) + } + }) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/deadstore.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/deadstore.go new file mode 100644 index 0000000000000000000000000000000000000000..cb3427103c501bd818e053350de9c4cd18f7e3e5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/deadstore.go @@ -0,0 +1,397 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/ir" + "cmd/compile/internal/types" +) + +// dse does dead-store elimination on the Function. +// Dead stores are those which are unconditionally followed by +// another store to the same location, with no intervening load. +// This implementation only works within a basic block. TODO: use something more global. +func dse(f *Func) { + var stores []*Value + loadUse := f.newSparseSet(f.NumValues()) + defer f.retSparseSet(loadUse) + storeUse := f.newSparseSet(f.NumValues()) + defer f.retSparseSet(storeUse) + shadowed := f.newSparseMap(f.NumValues()) + defer f.retSparseMap(shadowed) + for _, b := range f.Blocks { + // Find all the stores in this block. Categorize their uses: + // loadUse contains stores which are used by a subsequent load. + // storeUse contains stores which are used by a subsequent store. + loadUse.clear() + storeUse.clear() + stores = stores[:0] + for _, v := range b.Values { + if v.Op == OpPhi { + // Ignore phis - they will always be first and can't be eliminated + continue + } + if v.Type.IsMemory() { + stores = append(stores, v) + for _, a := range v.Args { + if a.Block == b && a.Type.IsMemory() { + storeUse.add(a.ID) + if v.Op != OpStore && v.Op != OpZero && v.Op != OpVarDef { + // CALL, DUFFCOPY, etc. are both + // reads and writes. + loadUse.add(a.ID) + } + } + } + } else { + for _, a := range v.Args { + if a.Block == b && a.Type.IsMemory() { + loadUse.add(a.ID) + } + } + } + } + if len(stores) == 0 { + continue + } + + // find last store in the block + var last *Value + for _, v := range stores { + if storeUse.contains(v.ID) { + continue + } + if last != nil { + b.Fatalf("two final stores - simultaneous live stores %s %s", last.LongString(), v.LongString()) + } + last = v + } + if last == nil { + b.Fatalf("no last store found - cycle?") + } + + // Walk backwards looking for dead stores. Keep track of shadowed addresses. + // A "shadowed address" is a pointer, offset, and size describing a memory region that + // is known to be written. We keep track of shadowed addresses in the shadowed map, + // mapping the ID of the address to a shadowRange where future writes will happen. + // Since we're walking backwards, writes to a shadowed region are useless, + // as they will be immediately overwritten. + shadowed.clear() + v := last + + walkloop: + if loadUse.contains(v.ID) { + // Someone might be reading this memory state. + // Clear all shadowed addresses. + shadowed.clear() + } + if v.Op == OpStore || v.Op == OpZero { + ptr := v.Args[0] + var off int64 + for ptr.Op == OpOffPtr { // Walk to base pointer + off += ptr.AuxInt + ptr = ptr.Args[0] + } + var sz int64 + if v.Op == OpStore { + sz = v.Aux.(*types.Type).Size() + } else { // OpZero + sz = v.AuxInt + } + sr := shadowRange(shadowed.get(ptr.ID)) + if sr.contains(off, off+sz) { + // Modify the store/zero into a copy of the memory state, + // effectively eliding the store operation. + if v.Op == OpStore { + // store addr value mem + v.SetArgs1(v.Args[2]) + } else { + // zero addr mem + v.SetArgs1(v.Args[1]) + } + v.Aux = nil + v.AuxInt = 0 + v.Op = OpCopy + } else { + // Extend shadowed region. + shadowed.set(ptr.ID, int32(sr.merge(off, off+sz))) + } + } + // walk to previous store + if v.Op == OpPhi { + // At start of block. Move on to next block. + // The memory phi, if it exists, is always + // the first logical store in the block. + // (Even if it isn't the first in the current b.Values order.) + continue + } + for _, a := range v.Args { + if a.Block == b && a.Type.IsMemory() { + v = a + goto walkloop + } + } + } +} + +// A shadowRange encodes a set of byte offsets [lo():hi()] from +// a given pointer that will be written to later in the block. +// A zero shadowRange encodes an empty shadowed range (and so +// does a -1 shadowRange, which is what sparsemap.get returns +// on a failed lookup). +type shadowRange int32 + +func (sr shadowRange) lo() int64 { + return int64(sr & 0xffff) +} +func (sr shadowRange) hi() int64 { + return int64((sr >> 16) & 0xffff) +} + +// contains reports whether [lo:hi] is completely within sr. +func (sr shadowRange) contains(lo, hi int64) bool { + return lo >= sr.lo() && hi <= sr.hi() +} + +// merge returns the union of sr and [lo:hi]. +// merge is allowed to return something smaller than the union. +func (sr shadowRange) merge(lo, hi int64) shadowRange { + if lo < 0 || hi > 0xffff { + // Ignore offsets that are too large or small. + return sr + } + if sr.lo() == sr.hi() { + // Old range is empty - use new one. + return shadowRange(lo + hi<<16) + } + if hi < sr.lo() || lo > sr.hi() { + // The two regions don't overlap or abut, so we would + // have to keep track of multiple disjoint ranges. + // Because we can only keep one, keep the larger one. + if sr.hi()-sr.lo() >= hi-lo { + return sr + } + return shadowRange(lo + hi<<16) + } + // Regions overlap or abut - compute the union. + return shadowRange(min(lo, sr.lo()) + max(hi, sr.hi())<<16) +} + +// elimDeadAutosGeneric deletes autos that are never accessed. To achieve this +// we track the operations that the address of each auto reaches and if it only +// reaches stores then we delete all the stores. The other operations will then +// be eliminated by the dead code elimination pass. +func elimDeadAutosGeneric(f *Func) { + addr := make(map[*Value]*ir.Name) // values that the address of the auto reaches + elim := make(map[*Value]*ir.Name) // values that could be eliminated if the auto is + var used ir.NameSet // used autos that must be kept + + // visit the value and report whether any of the maps are updated + visit := func(v *Value) (changed bool) { + args := v.Args + switch v.Op { + case OpAddr, OpLocalAddr: + // Propagate the address if it points to an auto. + n, ok := v.Aux.(*ir.Name) + if !ok || n.Class != ir.PAUTO { + return + } + if addr[v] == nil { + addr[v] = n + changed = true + } + return + case OpVarDef: + // v should be eliminated if we eliminate the auto. + n, ok := v.Aux.(*ir.Name) + if !ok || n.Class != ir.PAUTO { + return + } + if elim[v] == nil { + elim[v] = n + changed = true + } + return + case OpVarLive: + // Don't delete the auto if it needs to be kept alive. + + // We depend on this check to keep the autotmp stack slots + // for open-coded defers from being removed (since they + // may not be used by the inline code, but will be used by + // panic processing). + n, ok := v.Aux.(*ir.Name) + if !ok || n.Class != ir.PAUTO { + return + } + if !used.Has(n) { + used.Add(n) + changed = true + } + return + case OpStore, OpMove, OpZero: + // v should be eliminated if we eliminate the auto. + n, ok := addr[args[0]] + if ok && elim[v] == nil { + elim[v] = n + changed = true + } + // Other args might hold pointers to autos. + args = args[1:] + } + + // The code below assumes that we have handled all the ops + // with sym effects already. Sanity check that here. + // Ignore Args since they can't be autos. + if v.Op.SymEffect() != SymNone && v.Op != OpArg { + panic("unhandled op with sym effect") + } + + if v.Uses == 0 && v.Op != OpNilCheck && !v.Op.IsCall() && !v.Op.HasSideEffects() || len(args) == 0 { + // We need to keep nil checks even if they have no use. + // Also keep calls and values that have side effects. + return + } + + // If the address of the auto reaches a memory or control + // operation not covered above then we probably need to keep it. + // We also need to keep autos if they reach Phis (issue #26153). + if v.Type.IsMemory() || v.Type.IsFlags() || v.Op == OpPhi || v.MemoryArg() != nil { + for _, a := range args { + if n, ok := addr[a]; ok { + if !used.Has(n) { + used.Add(n) + changed = true + } + } + } + return + } + + // Propagate any auto addresses through v. + var node *ir.Name + for _, a := range args { + if n, ok := addr[a]; ok && !used.Has(n) { + if node == nil { + node = n + } else if node != n { + // Most of the time we only see one pointer + // reaching an op, but some ops can take + // multiple pointers (e.g. NeqPtr, Phi etc.). + // This is rare, so just propagate the first + // value to keep things simple. + used.Add(n) + changed = true + } + } + } + if node == nil { + return + } + if addr[v] == nil { + // The address of an auto reaches this op. + addr[v] = node + changed = true + return + } + if addr[v] != node { + // This doesn't happen in practice, but catch it just in case. + used.Add(node) + changed = true + } + return + } + + iterations := 0 + for { + if iterations == 4 { + // give up + return + } + iterations++ + changed := false + for _, b := range f.Blocks { + for _, v := range b.Values { + changed = visit(v) || changed + } + // keep the auto if its address reaches a control value + for _, c := range b.ControlValues() { + if n, ok := addr[c]; ok && !used.Has(n) { + used.Add(n) + changed = true + } + } + } + if !changed { + break + } + } + + // Eliminate stores to unread autos. + for v, n := range elim { + if used.Has(n) { + continue + } + // replace with OpCopy + v.SetArgs1(v.MemoryArg()) + v.Aux = nil + v.AuxInt = 0 + v.Op = OpCopy + } +} + +// elimUnreadAutos deletes stores (and associated bookkeeping ops VarDef and VarKill) +// to autos that are never read from. +func elimUnreadAutos(f *Func) { + // Loop over all ops that affect autos taking note of which + // autos we need and also stores that we might be able to + // eliminate. + var seen ir.NameSet + var stores []*Value + for _, b := range f.Blocks { + for _, v := range b.Values { + n, ok := v.Aux.(*ir.Name) + if !ok { + continue + } + if n.Class != ir.PAUTO { + continue + } + + effect := v.Op.SymEffect() + switch effect { + case SymNone, SymWrite: + // If we haven't seen the auto yet + // then this might be a store we can + // eliminate. + if !seen.Has(n) { + stores = append(stores, v) + } + default: + // Assume the auto is needed (loaded, + // has its address taken, etc.). + // Note we have to check the uses + // because dead loads haven't been + // eliminated yet. + if v.Uses > 0 { + seen.Add(n) + } + } + } + } + + // Eliminate stores to unread autos. + for _, store := range stores { + n, _ := store.Aux.(*ir.Name) + if seen.Has(n) { + continue + } + + // replace store with OpCopy + store.SetArgs1(store.MemoryArg()) + store.Aux = nil + store.AuxInt = 0 + store.Op = OpCopy + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/deadstore_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/deadstore_test.go new file mode 100644 index 0000000000000000000000000000000000000000..33cb4b97553c0513b785a7b5cb73507901b7c402 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/deadstore_test.go @@ -0,0 +1,129 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/types" + "testing" +) + +func TestDeadStore(t *testing.T) { + c := testConfig(t) + ptrType := c.config.Types.BytePtr + t.Logf("PTRTYPE %v", ptrType) + fun := c.Fun("entry", + Bloc("entry", + Valu("start", OpInitMem, types.TypeMem, 0, nil), + Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil), + Valu("v", OpConstBool, c.config.Types.Bool, 1, nil), + Valu("addr1", OpAddr, ptrType, 0, nil, "sb"), + Valu("addr2", OpAddr, ptrType, 0, nil, "sb"), + Valu("addr3", OpAddr, ptrType, 0, nil, "sb"), + Valu("zero1", OpZero, types.TypeMem, 1, c.config.Types.Bool, "addr3", "start"), + Valu("store1", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr1", "v", "zero1"), + Valu("store2", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr2", "v", "store1"), + Valu("store3", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr1", "v", "store2"), + Valu("store4", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr3", "v", "store3"), + Goto("exit")), + Bloc("exit", + Exit("store3"))) + + CheckFunc(fun.f) + dse(fun.f) + CheckFunc(fun.f) + + v1 := fun.values["store1"] + if v1.Op != OpCopy { + t.Errorf("dead store not removed") + } + + v2 := fun.values["zero1"] + if v2.Op != OpCopy { + t.Errorf("dead store (zero) not removed") + } +} +func TestDeadStorePhi(t *testing.T) { + // make sure we don't get into an infinite loop with phi values. + c := testConfig(t) + ptrType := c.config.Types.BytePtr + fun := c.Fun("entry", + Bloc("entry", + Valu("start", OpInitMem, types.TypeMem, 0, nil), + Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil), + Valu("v", OpConstBool, c.config.Types.Bool, 1, nil), + Valu("addr", OpAddr, ptrType, 0, nil, "sb"), + Goto("loop")), + Bloc("loop", + Valu("phi", OpPhi, types.TypeMem, 0, nil, "start", "store"), + Valu("store", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr", "v", "phi"), + If("v", "loop", "exit")), + Bloc("exit", + Exit("store"))) + + CheckFunc(fun.f) + dse(fun.f) + CheckFunc(fun.f) +} + +func TestDeadStoreTypes(t *testing.T) { + // Make sure a narrow store can't shadow a wider one. We test an even + // stronger restriction, that one store can't shadow another unless the + // types of the address fields are identical (where identicalness is + // decided by the CSE pass). + c := testConfig(t) + t1 := c.config.Types.UInt64.PtrTo() + t2 := c.config.Types.UInt32.PtrTo() + fun := c.Fun("entry", + Bloc("entry", + Valu("start", OpInitMem, types.TypeMem, 0, nil), + Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil), + Valu("v", OpConstBool, c.config.Types.Bool, 1, nil), + Valu("addr1", OpAddr, t1, 0, nil, "sb"), + Valu("addr2", OpAddr, t2, 0, nil, "sb"), + Valu("store1", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr1", "v", "start"), + Valu("store2", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr2", "v", "store1"), + Goto("exit")), + Bloc("exit", + Exit("store2"))) + + CheckFunc(fun.f) + cse(fun.f) + dse(fun.f) + CheckFunc(fun.f) + + v := fun.values["store1"] + if v.Op == OpCopy { + t.Errorf("store %s incorrectly removed", v) + } +} + +func TestDeadStoreUnsafe(t *testing.T) { + // Make sure a narrow store can't shadow a wider one. The test above + // covers the case of two different types, but unsafe pointer casting + // can get to a point where the size is changed but type unchanged. + c := testConfig(t) + ptrType := c.config.Types.UInt64.PtrTo() + fun := c.Fun("entry", + Bloc("entry", + Valu("start", OpInitMem, types.TypeMem, 0, nil), + Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil), + Valu("v", OpConstBool, c.config.Types.Bool, 1, nil), + Valu("addr1", OpAddr, ptrType, 0, nil, "sb"), + Valu("store1", OpStore, types.TypeMem, 0, c.config.Types.Int64, "addr1", "v", "start"), // store 8 bytes + Valu("store2", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr1", "v", "store1"), // store 1 byte + Goto("exit")), + Bloc("exit", + Exit("store2"))) + + CheckFunc(fun.f) + cse(fun.f) + dse(fun.f) + CheckFunc(fun.f) + + v := fun.values["store1"] + if v.Op == OpCopy { + t.Errorf("store %s incorrectly removed", v) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/debug.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/debug.go new file mode 100644 index 0000000000000000000000000000000000000000..05a72787f345fa01672f71c137714e580a786cde --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/debug.go @@ -0,0 +1,1886 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/abi" + "cmd/compile/internal/abt" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "cmd/internal/dwarf" + "cmd/internal/obj" + "cmd/internal/src" + "encoding/hex" + "fmt" + "internal/buildcfg" + "math/bits" + "sort" + "strings" +) + +type SlotID int32 +type VarID int32 + +// A FuncDebug contains all the debug information for the variables in a +// function. Variables are identified by their LocalSlot, which may be +// the result of decomposing a larger variable. +type FuncDebug struct { + // Slots is all the slots used in the debug info, indexed by their SlotID. + Slots []LocalSlot + // The user variables, indexed by VarID. + Vars []*ir.Name + // The slots that make up each variable, indexed by VarID. + VarSlots [][]SlotID + // The location list data, indexed by VarID. Must be processed by PutLocationList. + LocationLists [][]byte + // Register-resident output parameters for the function. This is filled in at + // SSA generation time. + RegOutputParams []*ir.Name + // Variable declarations that were removed during optimization + OptDcl []*ir.Name + + // Filled in by the user. Translates Block and Value ID to PC. + // + // NOTE: block is only used if value is BlockStart.ID or BlockEnd.ID. + // Otherwise, it is ignored. + GetPC func(block, value ID) int64 +} + +type BlockDebug struct { + // State at the start and end of the block. These are initialized, + // and updated from new information that flows on back edges. + startState, endState abt.T + // Use these to avoid excess work in the merge. If none of the + // predecessors has changed since the last check, the old answer is + // still good. + lastCheckedTime, lastChangedTime int32 + // Whether the block had any changes to user variables at all. + relevant bool + // false until the block has been processed at least once. This + // affects how the merge is done; the goal is to maximize sharing + // and avoid allocation. + everProcessed bool +} + +// A liveSlot is a slot that's live in loc at entry/exit of a block. +type liveSlot struct { + VarLoc +} + +func (ls *liveSlot) String() string { + return fmt.Sprintf("0x%x.%d.%d", ls.Registers, ls.stackOffsetValue(), int32(ls.StackOffset)&1) +} + +func (ls liveSlot) absent() bool { + return ls.Registers == 0 && !ls.onStack() +} + +// StackOffset encodes whether a value is on the stack and if so, where. +// It is a 31-bit integer followed by a presence flag at the low-order +// bit. +type StackOffset int32 + +func (s StackOffset) onStack() bool { + return s != 0 +} + +func (s StackOffset) stackOffsetValue() int32 { + return int32(s) >> 1 +} + +// stateAtPC is the current state of all variables at some point. +type stateAtPC struct { + // The location of each known slot, indexed by SlotID. + slots []VarLoc + // The slots present in each register, indexed by register number. + registers [][]SlotID +} + +// reset fills state with the live variables from live. +func (state *stateAtPC) reset(live abt.T) { + slots, registers := state.slots, state.registers + for i := range slots { + slots[i] = VarLoc{} + } + for i := range registers { + registers[i] = registers[i][:0] + } + for it := live.Iterator(); !it.Done(); { + k, d := it.Next() + live := d.(*liveSlot) + slots[k] = live.VarLoc + if live.VarLoc.Registers == 0 { + continue + } + + mask := uint64(live.VarLoc.Registers) + for { + if mask == 0 { + break + } + reg := uint8(bits.TrailingZeros64(mask)) + mask &^= 1 << reg + + registers[reg] = append(registers[reg], SlotID(k)) + } + } + state.slots, state.registers = slots, registers +} + +func (s *debugState) LocString(loc VarLoc) string { + if loc.absent() { + return "" + } + + var storage []string + if loc.onStack() { + storage = append(storage, fmt.Sprintf("@%+d", loc.stackOffsetValue())) + } + + mask := uint64(loc.Registers) + for { + if mask == 0 { + break + } + reg := uint8(bits.TrailingZeros64(mask)) + mask &^= 1 << reg + + storage = append(storage, s.registers[reg].String()) + } + return strings.Join(storage, ",") +} + +// A VarLoc describes the storage for part of a user variable. +type VarLoc struct { + // The registers this variable is available in. There can be more than + // one in various situations, e.g. it's being moved between registers. + Registers RegisterSet + + StackOffset +} + +func (loc VarLoc) absent() bool { + return loc.Registers == 0 && !loc.onStack() +} + +func (loc VarLoc) intersect(other VarLoc) VarLoc { + if !loc.onStack() || !other.onStack() || loc.StackOffset != other.StackOffset { + loc.StackOffset = 0 + } + loc.Registers &= other.Registers + return loc +} + +var BlockStart = &Value{ + ID: -10000, + Op: OpInvalid, + Aux: StringToAux("BlockStart"), +} + +var BlockEnd = &Value{ + ID: -20000, + Op: OpInvalid, + Aux: StringToAux("BlockEnd"), +} + +var FuncEnd = &Value{ + ID: -30000, + Op: OpInvalid, + Aux: StringToAux("FuncEnd"), +} + +// RegisterSet is a bitmap of registers, indexed by Register.num. +type RegisterSet uint64 + +// logf prints debug-specific logging to stdout (always stdout) if the +// current function is tagged by GOSSAFUNC (for ssa output directed +// either to stdout or html). +func (s *debugState) logf(msg string, args ...interface{}) { + if s.f.PrintOrHtmlSSA { + fmt.Printf(msg, args...) + } +} + +type debugState struct { + // See FuncDebug. + slots []LocalSlot + vars []*ir.Name + varSlots [][]SlotID + lists [][]byte + + // The user variable that each slot rolls up to, indexed by SlotID. + slotVars []VarID + + f *Func + loggingLevel int + convergeCount int // testing; iterate over block debug state this many times + registers []Register + stackOffset func(LocalSlot) int32 + ctxt *obj.Link + + // The names (slots) associated with each value, indexed by Value ID. + valueNames [][]SlotID + + // The current state of whatever analysis is running. + currentState stateAtPC + changedVars *sparseSet + changedSlots *sparseSet + + // The pending location list entry for each user variable, indexed by VarID. + pendingEntries []pendingEntry + + varParts map[*ir.Name][]SlotID + blockDebug []BlockDebug + pendingSlotLocs []VarLoc + partsByVarOffset sort.Interface +} + +func (state *debugState) initializeCache(f *Func, numVars, numSlots int) { + // One blockDebug per block. Initialized in allocBlock. + if cap(state.blockDebug) < f.NumBlocks() { + state.blockDebug = make([]BlockDebug, f.NumBlocks()) + } else { + // This local variable, and the ones like it below, enable compiler + // optimizations. Don't inline them. + b := state.blockDebug[:f.NumBlocks()] + for i := range b { + b[i] = BlockDebug{} + } + } + + // A list of slots per Value. Reuse the previous child slices. + if cap(state.valueNames) < f.NumValues() { + old := state.valueNames + state.valueNames = make([][]SlotID, f.NumValues()) + copy(state.valueNames, old) + } + vn := state.valueNames[:f.NumValues()] + for i := range vn { + vn[i] = vn[i][:0] + } + + // Slot and register contents for currentState. Cleared by reset(). + if cap(state.currentState.slots) < numSlots { + state.currentState.slots = make([]VarLoc, numSlots) + } else { + state.currentState.slots = state.currentState.slots[:numSlots] + } + if cap(state.currentState.registers) < len(state.registers) { + state.currentState.registers = make([][]SlotID, len(state.registers)) + } else { + state.currentState.registers = state.currentState.registers[:len(state.registers)] + } + + // A relatively small slice, but used many times as the return from processValue. + state.changedVars = newSparseSet(numVars) + state.changedSlots = newSparseSet(numSlots) + + // A pending entry per user variable, with space to track each of its pieces. + numPieces := 0 + for i := range state.varSlots { + numPieces += len(state.varSlots[i]) + } + if cap(state.pendingSlotLocs) < numPieces { + state.pendingSlotLocs = make([]VarLoc, numPieces) + } else { + psl := state.pendingSlotLocs[:numPieces] + for i := range psl { + psl[i] = VarLoc{} + } + } + if cap(state.pendingEntries) < numVars { + state.pendingEntries = make([]pendingEntry, numVars) + } + pe := state.pendingEntries[:numVars] + freePieceIdx := 0 + for varID, slots := range state.varSlots { + pe[varID] = pendingEntry{ + pieces: state.pendingSlotLocs[freePieceIdx : freePieceIdx+len(slots)], + } + freePieceIdx += len(slots) + } + state.pendingEntries = pe + + if cap(state.lists) < numVars { + state.lists = make([][]byte, numVars) + } else { + state.lists = state.lists[:numVars] + for i := range state.lists { + state.lists[i] = nil + } + } +} + +func (state *debugState) allocBlock(b *Block) *BlockDebug { + return &state.blockDebug[b.ID] +} + +func (s *debugState) blockEndStateString(b *BlockDebug) string { + endState := stateAtPC{slots: make([]VarLoc, len(s.slots)), registers: make([][]SlotID, len(s.registers))} + endState.reset(b.endState) + return s.stateString(endState) +} + +func (s *debugState) stateString(state stateAtPC) string { + var strs []string + for slotID, loc := range state.slots { + if !loc.absent() { + strs = append(strs, fmt.Sprintf("\t%v = %v\n", s.slots[slotID], s.LocString(loc))) + } + } + + strs = append(strs, "\n") + for reg, slots := range state.registers { + if len(slots) != 0 { + var slotStrs []string + for _, slot := range slots { + slotStrs = append(slotStrs, s.slots[slot].String()) + } + strs = append(strs, fmt.Sprintf("\t%v = %v\n", &s.registers[reg], slotStrs)) + } + } + + if len(strs) == 1 { + return "(no vars)\n" + } + return strings.Join(strs, "") +} + +// slotCanonicalizer is a table used to lookup and canonicalize +// LocalSlot's in a type insensitive way (e.g. taking into account the +// base name, offset, and width of the slot, but ignoring the slot +// type). +type slotCanonicalizer struct { + slmap map[slotKey]SlKeyIdx + slkeys []LocalSlot +} + +func newSlotCanonicalizer() *slotCanonicalizer { + return &slotCanonicalizer{ + slmap: make(map[slotKey]SlKeyIdx), + slkeys: []LocalSlot{LocalSlot{N: nil}}, + } +} + +type SlKeyIdx uint32 + +const noSlot = SlKeyIdx(0) + +// slotKey is a type-insensitive encapsulation of a LocalSlot; it +// is used to key a map within slotCanonicalizer. +type slotKey struct { + name *ir.Name + offset int64 + width int64 + splitOf SlKeyIdx // idx in slkeys slice in slotCanonicalizer + splitOffset int64 +} + +// lookup looks up a LocalSlot in the slot canonicalizer "sc", returning +// a canonical index for the slot, and adding it to the table if need +// be. Return value is the canonical slot index, and a boolean indicating +// whether the slot was found in the table already (TRUE => found). +func (sc *slotCanonicalizer) lookup(ls LocalSlot) (SlKeyIdx, bool) { + split := noSlot + if ls.SplitOf != nil { + split, _ = sc.lookup(*ls.SplitOf) + } + k := slotKey{ + name: ls.N, offset: ls.Off, width: ls.Type.Size(), + splitOf: split, splitOffset: ls.SplitOffset, + } + if idx, ok := sc.slmap[k]; ok { + return idx, true + } + rv := SlKeyIdx(len(sc.slkeys)) + sc.slkeys = append(sc.slkeys, ls) + sc.slmap[k] = rv + return rv, false +} + +func (sc *slotCanonicalizer) canonSlot(idx SlKeyIdx) LocalSlot { + return sc.slkeys[idx] +} + +// PopulateABIInRegArgOps examines the entry block of the function +// and looks for incoming parameters that have missing or partial +// OpArg{Int,Float}Reg values, inserting additional values in +// cases where they are missing. Example: +// +// func foo(s string, used int, notused int) int { +// return len(s) + used +// } +// +// In the function above, the incoming parameter "used" is fully live, +// "notused" is not live, and "s" is partially live (only the length +// field of the string is used). At the point where debug value +// analysis runs, we might expect to see an entry block with: +// +// b1: +// v4 = ArgIntReg {s+8} [0] : BX +// v5 = ArgIntReg {used} [0] : CX +// +// While this is an accurate picture of the live incoming params, +// we also want to have debug locations for non-live params (or +// their non-live pieces), e.g. something like +// +// b1: +// v9 = ArgIntReg <*uint8> {s+0} [0] : AX +// v4 = ArgIntReg {s+8} [0] : BX +// v5 = ArgIntReg {used} [0] : CX +// v10 = ArgIntReg {unused} [0] : DI +// +// This function examines the live OpArg{Int,Float}Reg values and +// synthesizes new (dead) values for the non-live params or the +// non-live pieces of partially live params. +func PopulateABIInRegArgOps(f *Func) { + pri := f.ABISelf.ABIAnalyzeFuncType(f.Type) + + // When manufacturing new slots that correspond to splits of + // composite parameters, we want to avoid creating a new sub-slot + // that differs from some existing sub-slot only by type, since + // the debug location analysis will treat that slot as a separate + // entity. To achieve this, create a lookup table of existing + // slots that is type-insenstitive. + sc := newSlotCanonicalizer() + for _, sl := range f.Names { + sc.lookup(*sl) + } + + // Add slot -> value entry to f.NamedValues if not already present. + addToNV := func(v *Value, sl LocalSlot) { + values, ok := f.NamedValues[sl] + if !ok { + // Haven't seen this slot yet. + sla := f.localSlotAddr(sl) + f.Names = append(f.Names, sla) + } else { + for _, ev := range values { + if v == ev { + return + } + } + } + values = append(values, v) + f.NamedValues[sl] = values + } + + newValues := []*Value{} + + abiRegIndexToRegister := func(reg abi.RegIndex) int8 { + i := f.ABISelf.FloatIndexFor(reg) + if i >= 0 { // float PR + return f.Config.floatParamRegs[i] + } else { + return f.Config.intParamRegs[reg] + } + } + + // Helper to construct a new OpArg{Float,Int}Reg op value. + var pos src.XPos + if len(f.Entry.Values) != 0 { + pos = f.Entry.Values[0].Pos + } + synthesizeOpIntFloatArg := func(n *ir.Name, t *types.Type, reg abi.RegIndex, sl LocalSlot) *Value { + aux := &AuxNameOffset{n, sl.Off} + op, auxInt := ArgOpAndRegisterFor(reg, f.ABISelf) + v := f.newValueNoBlock(op, t, pos) + v.AuxInt = auxInt + v.Aux = aux + v.Args = nil + v.Block = f.Entry + newValues = append(newValues, v) + addToNV(v, sl) + f.setHome(v, &f.Config.registers[abiRegIndexToRegister(reg)]) + return v + } + + // Make a pass through the entry block looking for + // OpArg{Int,Float}Reg ops. Record the slots they use in a table + // ("sc"). We use a type-insensitive lookup for the slot table, + // since the type we get from the ABI analyzer won't always match + // what the compiler uses when creating OpArg{Int,Float}Reg ops. + for _, v := range f.Entry.Values { + if v.Op == OpArgIntReg || v.Op == OpArgFloatReg { + aux := v.Aux.(*AuxNameOffset) + sl := LocalSlot{N: aux.Name, Type: v.Type, Off: aux.Offset} + // install slot in lookup table + idx, _ := sc.lookup(sl) + // add to f.NamedValues if not already present + addToNV(v, sc.canonSlot(idx)) + } else if v.Op.IsCall() { + // if we hit a call, we've gone too far. + break + } + } + + // Now make a pass through the ABI in-params, looking for params + // or pieces of params that we didn't encounter in the loop above. + for _, inp := range pri.InParams() { + if !isNamedRegParam(inp) { + continue + } + n := inp.Name + + // Param is spread across one or more registers. Walk through + // each piece to see whether we've seen an arg reg op for it. + types, offsets := inp.RegisterTypesAndOffsets() + for k, t := range types { + // Note: this recipe for creating a LocalSlot is designed + // to be compatible with the one used in expand_calls.go + // as opposed to decompose.go. The expand calls code just + // takes the base name and creates an offset into it, + // without using the SplitOf/SplitOffset fields. The code + // in decompose.go does the opposite -- it creates a + // LocalSlot object with "Off" set to zero, but with + // SplitOf pointing to a parent slot, and SplitOffset + // holding the offset into the parent object. + pieceSlot := LocalSlot{N: n, Type: t, Off: offsets[k]} + + // Look up this piece to see if we've seen a reg op + // for it. If not, create one. + _, found := sc.lookup(pieceSlot) + if !found { + // This slot doesn't appear in the map, meaning it + // corresponds to an in-param that is not live, or + // a portion of an in-param that is not live/used. + // Add a new dummy OpArg{Int,Float}Reg for it. + synthesizeOpIntFloatArg(n, t, inp.Registers[k], + pieceSlot) + } + } + } + + // Insert the new values into the head of the block. + f.Entry.Values = append(newValues, f.Entry.Values...) +} + +// BuildFuncDebug debug information for f, placing the results +// in "rval". f must be fully processed, so that each Value is where it +// will be when machine code is emitted. +func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingLevel int, stackOffset func(LocalSlot) int32, rval *FuncDebug) { + if f.RegAlloc == nil { + f.Fatalf("BuildFuncDebug on func %v that has not been fully processed", f) + } + state := &f.Cache.debugState + state.loggingLevel = loggingLevel % 1000 + + // A specific number demands exactly that many iterations. Under + // particular circumstances it make require more than the total of + // 2 passes implied by a single run through liveness and a single + // run through location list generation. + state.convergeCount = loggingLevel / 1000 + state.f = f + state.registers = f.Config.registers + state.stackOffset = stackOffset + state.ctxt = ctxt + + if buildcfg.Experiment.RegabiArgs { + PopulateABIInRegArgOps(f) + } + + if state.loggingLevel > 0 { + state.logf("Generating location lists for function %q\n", f.Name) + } + + if state.varParts == nil { + state.varParts = make(map[*ir.Name][]SlotID) + } else { + for n := range state.varParts { + delete(state.varParts, n) + } + } + + // Recompose any decomposed variables, and establish the canonical + // IDs for each var and slot by filling out state.vars and state.slots. + + state.slots = state.slots[:0] + state.vars = state.vars[:0] + for i, slot := range f.Names { + state.slots = append(state.slots, *slot) + if ir.IsSynthetic(slot.N) { + continue + } + + topSlot := slot + for topSlot.SplitOf != nil { + topSlot = topSlot.SplitOf + } + if _, ok := state.varParts[topSlot.N]; !ok { + state.vars = append(state.vars, topSlot.N) + } + state.varParts[topSlot.N] = append(state.varParts[topSlot.N], SlotID(i)) + } + + // Recreate the LocalSlot for each stack-only variable. + // This would probably be better as an output from stackframe. + for _, b := range f.Blocks { + for _, v := range b.Values { + if v.Op == OpVarDef { + n := v.Aux.(*ir.Name) + if ir.IsSynthetic(n) { + continue + } + + if _, ok := state.varParts[n]; !ok { + slot := LocalSlot{N: n, Type: v.Type, Off: 0} + state.slots = append(state.slots, slot) + state.varParts[n] = []SlotID{SlotID(len(state.slots) - 1)} + state.vars = append(state.vars, n) + } + } + } + } + + // Fill in the var<->slot mappings. + if cap(state.varSlots) < len(state.vars) { + state.varSlots = make([][]SlotID, len(state.vars)) + } else { + state.varSlots = state.varSlots[:len(state.vars)] + for i := range state.varSlots { + state.varSlots[i] = state.varSlots[i][:0] + } + } + if cap(state.slotVars) < len(state.slots) { + state.slotVars = make([]VarID, len(state.slots)) + } else { + state.slotVars = state.slotVars[:len(state.slots)] + } + + if state.partsByVarOffset == nil { + state.partsByVarOffset = &partsByVarOffset{} + } + for varID, n := range state.vars { + parts := state.varParts[n] + state.varSlots[varID] = parts + for _, slotID := range parts { + state.slotVars[slotID] = VarID(varID) + } + *state.partsByVarOffset.(*partsByVarOffset) = partsByVarOffset{parts, state.slots} + sort.Sort(state.partsByVarOffset) + } + + state.initializeCache(f, len(state.varParts), len(state.slots)) + + for i, slot := range f.Names { + if ir.IsSynthetic(slot.N) { + continue + } + for _, value := range f.NamedValues[*slot] { + state.valueNames[value.ID] = append(state.valueNames[value.ID], SlotID(i)) + } + } + + blockLocs := state.liveness() + state.buildLocationLists(blockLocs) + + // Populate "rval" with what we've computed. + rval.Slots = state.slots + rval.VarSlots = state.varSlots + rval.Vars = state.vars + rval.LocationLists = state.lists +} + +// liveness walks the function in control flow order, calculating the start +// and end state of each block. +func (state *debugState) liveness() []*BlockDebug { + blockLocs := make([]*BlockDebug, state.f.NumBlocks()) + counterTime := int32(1) + + // Reverse postorder: visit a block after as many as possible of its + // predecessors have been visited. + po := state.f.Postorder() + converged := false + + // The iteration rule is that by default, run until converged, but + // if a particular iteration count is specified, run that many + // iterations, no more, no less. A count is specified as the + // thousands digit of the location lists debug flag, + // e.g. -d=locationlists=4000 + keepGoing := func(k int) bool { + if state.convergeCount == 0 { + return !converged + } + return k < state.convergeCount + } + for k := 0; keepGoing(k); k++ { + if state.loggingLevel > 0 { + state.logf("Liveness pass %d\n", k) + } + converged = true + for i := len(po) - 1; i >= 0; i-- { + b := po[i] + locs := blockLocs[b.ID] + if locs == nil { + locs = state.allocBlock(b) + blockLocs[b.ID] = locs + } + + // Build the starting state for the block from the final + // state of its predecessors. + startState, blockChanged := state.mergePredecessors(b, blockLocs, nil, false) + locs.lastCheckedTime = counterTime + counterTime++ + if state.loggingLevel > 1 { + state.logf("Processing %v, block changed %v, initial state:\n%v", b, blockChanged, state.stateString(state.currentState)) + } + + if blockChanged { + // If the start did not change, then the old endState is good + converged = false + changed := false + state.changedSlots.clear() + + // Update locs/registers with the effects of each Value. + for _, v := range b.Values { + slots := state.valueNames[v.ID] + + // Loads and stores inherit the names of their sources. + var source *Value + switch v.Op { + case OpStoreReg: + source = v.Args[0] + case OpLoadReg: + switch a := v.Args[0]; a.Op { + case OpArg, OpPhi: + source = a + case OpStoreReg: + source = a.Args[0] + default: + if state.loggingLevel > 1 { + state.logf("at %v: load with unexpected source op: %v (%v)\n", v, a.Op, a) + } + } + } + // Update valueNames with the source so that later steps + // don't need special handling. + if source != nil && k == 0 { + // limit to k == 0 otherwise there are duplicates. + slots = append(slots, state.valueNames[source.ID]...) + state.valueNames[v.ID] = slots + } + + reg, _ := state.f.getHome(v.ID).(*Register) + c := state.processValue(v, slots, reg) + changed = changed || c + } + + if state.loggingLevel > 1 { + state.logf("Block %v done, locs:\n%v", b, state.stateString(state.currentState)) + } + + locs.relevant = locs.relevant || changed + if !changed { + locs.endState = startState + } else { + for _, id := range state.changedSlots.contents() { + slotID := SlotID(id) + slotLoc := state.currentState.slots[slotID] + if slotLoc.absent() { + startState.Delete(int32(slotID)) + continue + } + old := startState.Find(int32(slotID)) // do NOT replace existing values + if oldLS, ok := old.(*liveSlot); !ok || oldLS.VarLoc != slotLoc { + startState.Insert(int32(slotID), + &liveSlot{VarLoc: slotLoc}) + } + } + locs.endState = startState + } + locs.lastChangedTime = counterTime + } + counterTime++ + } + } + return blockLocs +} + +// mergePredecessors takes the end state of each of b's predecessors and +// intersects them to form the starting state for b. It puts that state +// in blockLocs[b.ID].startState, and fills state.currentState with it. +// It returns the start state and whether this is changed from the +// previously approximated value of startState for this block. After +// the first call, subsequent calls can only shrink startState. +// +// Passing forLocationLists=true enables additional side-effects that +// are necessary for building location lists but superfluous while still +// iterating to an answer. +// +// If previousBlock is non-nil, it registers changes vs. that block's +// end state in state.changedVars. Note that previousBlock will often +// not be a predecessor. +// +// Note that mergePredecessors behaves slightly differently between +// first and subsequent calls for a block. For the first call, the +// starting state is approximated by taking the state from the +// predecessor whose state is smallest, and removing any elements not +// in all the other predecessors; this makes the smallest number of +// changes and shares the most state. On subsequent calls the old +// value of startState is adjusted with new information; this is judged +// to do the least amount of extra work. +// +// To improve performance, each block's state information is marked with +// lastChanged and lastChecked "times" so unchanged predecessors can be +// skipped on after-the-first iterations. Doing this allows extra +// iterations by the caller to be almost free. +// +// It is important to know that the set representation used for +// startState, endState, and merges can share data for two sets where +// one is a small delta from the other. Doing this does require a +// little care in how sets are updated, both in mergePredecessors, and +// using its result. +func (state *debugState) mergePredecessors(b *Block, blockLocs []*BlockDebug, previousBlock *Block, forLocationLists bool) (abt.T, bool) { + // Filter out back branches. + var predsBuf [10]*Block + + preds := predsBuf[:0] + locs := blockLocs[b.ID] + + blockChanged := !locs.everProcessed // the first time it always changes. + updating := locs.everProcessed + + // For the first merge, exclude predecessors that have not been seen yet. + // I.e., backedges. + for _, pred := range b.Preds { + if bl := blockLocs[pred.b.ID]; bl != nil && bl.everProcessed { + // crucially, a self-edge has bl != nil, but bl.everProcessed is false the first time. + preds = append(preds, pred.b) + } + } + + locs.everProcessed = true + + if state.loggingLevel > 1 { + // The logf below would cause preds to be heap-allocated if + // it were passed directly. + preds2 := make([]*Block, len(preds)) + copy(preds2, preds) + state.logf("Merging %v into %v (changed=%d, checked=%d)\n", preds2, b, locs.lastChangedTime, locs.lastCheckedTime) + } + + state.changedVars.clear() + + markChangedVars := func(slots, merged abt.T) { + if !forLocationLists { + return + } + // Fill changedVars with those that differ between the previous + // block (in the emit order, not necessarily a flow predecessor) + // and the start state for this block. + for it := slots.Iterator(); !it.Done(); { + k, v := it.Next() + m := merged.Find(k) + if m == nil || v.(*liveSlot).VarLoc != m.(*liveSlot).VarLoc { + state.changedVars.add(ID(state.slotVars[k])) + } + } + } + + reset := func(ourStartState abt.T) { + if !(forLocationLists || blockChanged) { + // there is no change and this is not for location lists, do + // not bother to reset currentState because it will not be + // examined. + return + } + state.currentState.reset(ourStartState) + } + + // Zero predecessors + if len(preds) == 0 { + if previousBlock != nil { + state.f.Fatalf("Function %v, block %s with no predecessors is not first block, has previous %s", state.f, b.String(), previousBlock.String()) + } + // startState is empty + reset(abt.T{}) + return abt.T{}, blockChanged + } + + // One predecessor + l0 := blockLocs[preds[0].ID] + p0 := l0.endState + if len(preds) == 1 { + if previousBlock != nil && preds[0].ID != previousBlock.ID { + // Change from previous block is its endState minus the predecessor's endState + markChangedVars(blockLocs[previousBlock.ID].endState, p0) + } + locs.startState = p0 + blockChanged = blockChanged || l0.lastChangedTime > locs.lastCheckedTime + reset(p0) + return p0, blockChanged + } + + // More than one predecessor + + if updating { + // After the first approximation, i.e., when updating, results + // can only get smaller, because initially backedge + // predecessors do not participate in the intersection. This + // means that for the update, given the prior approximation of + // startState, there is no need to re-intersect with unchanged + // blocks. Therefore remove unchanged blocks from the + // predecessor list. + for i := len(preds) - 1; i >= 0; i-- { + pred := preds[i] + if blockLocs[pred.ID].lastChangedTime > locs.lastCheckedTime { + continue // keep this predecessor + } + preds[i] = preds[len(preds)-1] + preds = preds[:len(preds)-1] + if state.loggingLevel > 2 { + state.logf("Pruned b%d, lastChanged was %d but b%d lastChecked is %d\n", pred.ID, blockLocs[pred.ID].lastChangedTime, b.ID, locs.lastCheckedTime) + } + } + // Check for an early out; this should always hit for the update + // if there are no cycles. + if len(preds) == 0 { + blockChanged = false + + reset(locs.startState) + if state.loggingLevel > 2 { + state.logf("Early out, no predecessors changed since last check\n") + } + if previousBlock != nil { + markChangedVars(blockLocs[previousBlock.ID].endState, locs.startState) + } + return locs.startState, blockChanged + } + } + + baseID := preds[0].ID + baseState := p0 + + // Choose the predecessor with the smallest endState for intersection work + for _, pred := range preds[1:] { + if blockLocs[pred.ID].endState.Size() < baseState.Size() { + baseState = blockLocs[pred.ID].endState + baseID = pred.ID + } + } + + if state.loggingLevel > 2 { + state.logf("Starting %v with state from b%v:\n%v", b, baseID, state.blockEndStateString(blockLocs[baseID])) + for _, pred := range preds { + if pred.ID == baseID { + continue + } + state.logf("Merging in state from %v:\n%v", pred, state.blockEndStateString(blockLocs[pred.ID])) + } + } + + state.currentState.reset(abt.T{}) + // The normal logic of "reset" is included in the intersection loop below. + + slotLocs := state.currentState.slots + + // If this is the first call, do updates on the "baseState"; if this + // is a subsequent call, tweak the startState instead. Note that + // these "set" values are values; there are no side effects to + // other values as these are modified. + newState := baseState + if updating { + newState = blockLocs[b.ID].startState + } + + for it := newState.Iterator(); !it.Done(); { + k, d := it.Next() + thisSlot := d.(*liveSlot) + x := thisSlot.VarLoc + x0 := x // initial value in newState + + // Intersect this slot with the slot in all the predecessors + for _, other := range preds { + if !updating && other.ID == baseID { + continue + } + otherSlot := blockLocs[other.ID].endState.Find(k) + if otherSlot == nil { + x = VarLoc{} + break + } + y := otherSlot.(*liveSlot).VarLoc + x = x.intersect(y) + if x.absent() { + x = VarLoc{} + break + } + } + + // Delete if necessary, but not otherwise (in order to maximize sharing). + if x.absent() { + if !x0.absent() { + blockChanged = true + newState.Delete(k) + } + slotLocs[k] = VarLoc{} + continue + } + if x != x0 { + blockChanged = true + newState.Insert(k, &liveSlot{VarLoc: x}) + } + + slotLocs[k] = x + mask := uint64(x.Registers) + for { + if mask == 0 { + break + } + reg := uint8(bits.TrailingZeros64(mask)) + mask &^= 1 << reg + state.currentState.registers[reg] = append(state.currentState.registers[reg], SlotID(k)) + } + } + + if previousBlock != nil { + markChangedVars(blockLocs[previousBlock.ID].endState, newState) + } + locs.startState = newState + return newState, blockChanged +} + +// processValue updates locs and state.registerContents to reflect v, a +// value with the names in vSlots and homed in vReg. "v" becomes +// visible after execution of the instructions evaluating it. It +// returns which VarIDs were modified by the Value's execution. +func (state *debugState) processValue(v *Value, vSlots []SlotID, vReg *Register) bool { + locs := state.currentState + changed := false + setSlot := func(slot SlotID, loc VarLoc) { + changed = true + state.changedVars.add(ID(state.slotVars[slot])) + state.changedSlots.add(ID(slot)) + state.currentState.slots[slot] = loc + } + + // Handle any register clobbering. Call operations, for example, + // clobber all registers even though they don't explicitly write to + // them. + clobbers := uint64(opcodeTable[v.Op].reg.clobbers) + for { + if clobbers == 0 { + break + } + reg := uint8(bits.TrailingZeros64(clobbers)) + clobbers &^= 1 << reg + + for _, slot := range locs.registers[reg] { + if state.loggingLevel > 1 { + state.logf("at %v: %v clobbered out of %v\n", v, state.slots[slot], &state.registers[reg]) + } + + last := locs.slots[slot] + if last.absent() { + state.f.Fatalf("at %v: slot %v in register %v with no location entry", v, state.slots[slot], &state.registers[reg]) + continue + } + regs := last.Registers &^ (1 << reg) + setSlot(slot, VarLoc{regs, last.StackOffset}) + } + + locs.registers[reg] = locs.registers[reg][:0] + } + + switch { + case v.Op == OpVarDef: + n := v.Aux.(*ir.Name) + if ir.IsSynthetic(n) { + break + } + + slotID := state.varParts[n][0] + var stackOffset StackOffset + if v.Op == OpVarDef { + stackOffset = StackOffset(state.stackOffset(state.slots[slotID])<<1 | 1) + } + setSlot(slotID, VarLoc{0, stackOffset}) + if state.loggingLevel > 1 { + if v.Op == OpVarDef { + state.logf("at %v: stack-only var %v now live\n", v, state.slots[slotID]) + } else { + state.logf("at %v: stack-only var %v now dead\n", v, state.slots[slotID]) + } + } + + case v.Op == OpArg: + home := state.f.getHome(v.ID).(LocalSlot) + stackOffset := state.stackOffset(home)<<1 | 1 + for _, slot := range vSlots { + if state.loggingLevel > 1 { + state.logf("at %v: arg %v now on stack in location %v\n", v, state.slots[slot], home) + if last := locs.slots[slot]; !last.absent() { + state.logf("at %v: unexpected arg op on already-live slot %v\n", v, state.slots[slot]) + } + } + + setSlot(slot, VarLoc{0, StackOffset(stackOffset)}) + } + + case v.Op == OpStoreReg: + home := state.f.getHome(v.ID).(LocalSlot) + stackOffset := state.stackOffset(home)<<1 | 1 + for _, slot := range vSlots { + last := locs.slots[slot] + if last.absent() { + if state.loggingLevel > 1 { + state.logf("at %v: unexpected spill of unnamed register %s\n", v, vReg) + } + break + } + + setSlot(slot, VarLoc{last.Registers, StackOffset(stackOffset)}) + if state.loggingLevel > 1 { + state.logf("at %v: %v spilled to stack location %v@%d\n", v, state.slots[slot], home, state.stackOffset(home)) + } + } + + case vReg != nil: + if state.loggingLevel > 1 { + newSlots := make([]bool, len(state.slots)) + for _, slot := range vSlots { + newSlots[slot] = true + } + + for _, slot := range locs.registers[vReg.num] { + if !newSlots[slot] { + state.logf("at %v: overwrote %v in register %v\n", v, state.slots[slot], vReg) + } + } + } + + for _, slot := range locs.registers[vReg.num] { + last := locs.slots[slot] + setSlot(slot, VarLoc{last.Registers &^ (1 << uint8(vReg.num)), last.StackOffset}) + } + locs.registers[vReg.num] = locs.registers[vReg.num][:0] + locs.registers[vReg.num] = append(locs.registers[vReg.num], vSlots...) + for _, slot := range vSlots { + if state.loggingLevel > 1 { + state.logf("at %v: %v now in %s\n", v, state.slots[slot], vReg) + } + + last := locs.slots[slot] + setSlot(slot, VarLoc{1< {foo+0} [0] : AX (foo) + // v34 = ArgIntReg {bar+0} [0] : BX (bar) + // ... + // v77 = StoreReg v67 : ctx+8[unsafe.Pointer] + // v78 = StoreReg v68 : ctx[unsafe.Pointer] + // v79 = Arg <*uint8> {args} : args[*uint8] (args[*uint8]) + // v80 = Arg {args} [8] : args+8[int] (args+8[int]) + // ... + // v1 = InitMem + // + // We can stop scanning the initial portion of the block when + // we either see the InitMem op (for entry blocks) or the + // first non-zero-width op (for other blocks). + for idx := 0; idx < len(b.Values); idx++ { + v := b.Values[idx] + if blockPrologComplete(v) { + break + } + // Consider only "lifetime begins at block start" ops. + if !mustBeFirst(v) && v.Op != OpArg { + continue + } + slots := state.valueNames[v.ID] + reg, _ := state.f.getHome(v.ID).(*Register) + changed := state.processValue(v, slots, reg) // changed == added to state.changedVars + if changed { + for _, varID := range state.changedVars.contents() { + state.updateVar(VarID(varID), v.Block, BlockStart) + } + state.changedVars.clear() + } + } + + // Now examine the block again, handling things other than the + // "begins at block start" lifetimes. + zeroWidthPending := false + prologComplete := false + // expect to see values in pattern (apc)* (zerowidth|real)* + for _, v := range b.Values { + if blockPrologComplete(v) { + prologComplete = true + } + slots := state.valueNames[v.ID] + reg, _ := state.f.getHome(v.ID).(*Register) + changed := state.processValue(v, slots, reg) // changed == added to state.changedVars + + if opcodeTable[v.Op].zeroWidth { + if prologComplete && mustBeFirst(v) { + panic(fmt.Errorf("Unexpected placement of op '%s' appearing after non-pseudo-op at beginning of block %s in %s\n%s", v.LongString(), b, b.Func.Name, b.Func)) + } + if changed { + if mustBeFirst(v) || v.Op == OpArg { + // already taken care of above + continue + } + zeroWidthPending = true + } + continue + } + if !changed && !zeroWidthPending { + continue + } + + // Not zero-width; i.e., a "real" instruction. + zeroWidthPending = false + for _, varID := range state.changedVars.contents() { + state.updateVar(VarID(varID), v.Block, v) + } + state.changedVars.clear() + } + for _, varID := range state.changedVars.contents() { + state.updateVar(VarID(varID), b, BlockEnd) + } + + prevBlock = b + } + + if state.loggingLevel > 0 { + state.logf("location lists:\n") + } + + // Flush any leftover entries live at the end of the last block. + for varID := range state.lists { + state.writePendingEntry(VarID(varID), -1, FuncEnd.ID) + list := state.lists[varID] + if state.loggingLevel > 0 { + if len(list) == 0 { + state.logf("\t%v : empty list\n", state.vars[varID]) + } else { + state.logf("\t%v : %q\n", state.vars[varID], hex.EncodeToString(state.lists[varID])) + } + } + } +} + +// updateVar updates the pending location list entry for varID to +// reflect the new locations in curLoc, beginning at v in block b. +// v may be one of the special values indicating block start or end. +func (state *debugState) updateVar(varID VarID, b *Block, v *Value) { + curLoc := state.currentState.slots + // Assemble the location list entry with whatever's live. + empty := true + for _, slotID := range state.varSlots[varID] { + if !curLoc[slotID].absent() { + empty = false + break + } + } + pending := &state.pendingEntries[varID] + if empty { + state.writePendingEntry(varID, b.ID, v.ID) + pending.clear() + return + } + + // Extend the previous entry if possible. + if pending.present { + merge := true + for i, slotID := range state.varSlots[varID] { + if !canMerge(pending.pieces[i], curLoc[slotID]) { + merge = false + break + } + } + if merge { + return + } + } + + state.writePendingEntry(varID, b.ID, v.ID) + pending.present = true + pending.startBlock = b.ID + pending.startValue = v.ID + for i, slot := range state.varSlots[varID] { + pending.pieces[i] = curLoc[slot] + } +} + +// writePendingEntry writes out the pending entry for varID, if any, +// terminated at endBlock/Value. +func (state *debugState) writePendingEntry(varID VarID, endBlock, endValue ID) { + pending := state.pendingEntries[varID] + if !pending.present { + return + } + + // Pack the start/end coordinates into the start/end addresses + // of the entry, for decoding by PutLocationList. + start, startOK := encodeValue(state.ctxt, pending.startBlock, pending.startValue) + end, endOK := encodeValue(state.ctxt, endBlock, endValue) + if !startOK || !endOK { + // If someone writes a function that uses >65K values, + // they get incomplete debug info on 32-bit platforms. + return + } + if start == end { + if state.loggingLevel > 1 { + // Printf not logf so not gated by GOSSAFUNC; this should fire very rarely. + // TODO this fires a lot, need to figure out why. + state.logf("Skipping empty location list for %v in %s\n", state.vars[varID], state.f.Name) + } + return + } + + list := state.lists[varID] + list = appendPtr(state.ctxt, list, start) + list = appendPtr(state.ctxt, list, end) + // Where to write the length of the location description once + // we know how big it is. + sizeIdx := len(list) + list = list[:len(list)+2] + + if state.loggingLevel > 1 { + var partStrs []string + for i, slot := range state.varSlots[varID] { + partStrs = append(partStrs, fmt.Sprintf("%v@%v", state.slots[slot], state.LocString(pending.pieces[i]))) + } + state.logf("Add entry for %v: \tb%vv%v-b%vv%v = \t%v\n", state.vars[varID], pending.startBlock, pending.startValue, endBlock, endValue, strings.Join(partStrs, " ")) + } + + for i, slotID := range state.varSlots[varID] { + loc := pending.pieces[i] + slot := state.slots[slotID] + + if !loc.absent() { + if loc.onStack() { + if loc.stackOffsetValue() == 0 { + list = append(list, dwarf.DW_OP_call_frame_cfa) + } else { + list = append(list, dwarf.DW_OP_fbreg) + list = dwarf.AppendSleb128(list, int64(loc.stackOffsetValue())) + } + } else { + regnum := state.ctxt.Arch.DWARFRegisters[state.registers[firstReg(loc.Registers)].ObjNum()] + if regnum < 32 { + list = append(list, dwarf.DW_OP_reg0+byte(regnum)) + } else { + list = append(list, dwarf.DW_OP_regx) + list = dwarf.AppendUleb128(list, uint64(regnum)) + } + } + } + + if len(state.varSlots[varID]) > 1 { + list = append(list, dwarf.DW_OP_piece) + list = dwarf.AppendUleb128(list, uint64(slot.Type.Size())) + } + } + state.ctxt.Arch.ByteOrder.PutUint16(list[sizeIdx:], uint16(len(list)-sizeIdx-2)) + state.lists[varID] = list +} + +// PutLocationList adds list (a location list in its intermediate representation) to listSym. +func (debugInfo *FuncDebug) PutLocationList(list []byte, ctxt *obj.Link, listSym, startPC *obj.LSym) { + getPC := debugInfo.GetPC + + if ctxt.UseBASEntries { + listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, ^0) + listSym.WriteAddr(ctxt, listSym.Size, ctxt.Arch.PtrSize, startPC, 0) + } + + // Re-read list, translating its address from block/value ID to PC. + for i := 0; i < len(list); { + begin := getPC(decodeValue(ctxt, readPtr(ctxt, list[i:]))) + end := getPC(decodeValue(ctxt, readPtr(ctxt, list[i+ctxt.Arch.PtrSize:]))) + + // Horrible hack. If a range contains only zero-width + // instructions, e.g. an Arg, and it's at the beginning of the + // function, this would be indistinguishable from an + // end entry. Fudge it. + if begin == 0 && end == 0 { + end = 1 + } + + if ctxt.UseBASEntries { + listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, int64(begin)) + listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, int64(end)) + } else { + listSym.WriteCURelativeAddr(ctxt, listSym.Size, startPC, int64(begin)) + listSym.WriteCURelativeAddr(ctxt, listSym.Size, startPC, int64(end)) + } + + i += 2 * ctxt.Arch.PtrSize + datalen := 2 + int(ctxt.Arch.ByteOrder.Uint16(list[i:])) + listSym.WriteBytes(ctxt, listSym.Size, list[i:i+datalen]) // copy datalen and location encoding + i += datalen + } + + // Location list contents, now with real PCs. + // End entry. + listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, 0) + listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, 0) +} + +// Pack a value and block ID into an address-sized uint, returning +// encoded value and boolean indicating whether the encoding succeeded. +// For 32-bit architectures the process may fail for very large +// procedures(the theory being that it's ok to have degraded debug +// quality in this case). +func encodeValue(ctxt *obj.Link, b, v ID) (uint64, bool) { + if ctxt.Arch.PtrSize == 8 { + result := uint64(b)<<32 | uint64(uint32(v)) + //ctxt.Logf("b %#x (%d) v %#x (%d) -> %#x\n", b, b, v, v, result) + return result, true + } + if ctxt.Arch.PtrSize != 4 { + panic("unexpected pointer size") + } + if ID(int16(b)) != b || ID(int16(v)) != v { + return 0, false + } + return uint64(b)<<16 | uint64(uint16(v)), true +} + +// Unpack a value and block ID encoded by encodeValue. +func decodeValue(ctxt *obj.Link, word uint64) (ID, ID) { + if ctxt.Arch.PtrSize == 8 { + b, v := ID(word>>32), ID(word) + //ctxt.Logf("%#x -> b %#x (%d) v %#x (%d)\n", word, b, b, v, v) + return b, v + } + if ctxt.Arch.PtrSize != 4 { + panic("unexpected pointer size") + } + return ID(word >> 16), ID(int16(word)) +} + +// Append a pointer-sized uint to buf. +func appendPtr(ctxt *obj.Link, buf []byte, word uint64) []byte { + if cap(buf) < len(buf)+20 { + b := make([]byte, len(buf), 20+cap(buf)*2) + copy(b, buf) + buf = b + } + writeAt := len(buf) + buf = buf[0 : len(buf)+ctxt.Arch.PtrSize] + writePtr(ctxt, buf[writeAt:], word) + return buf +} + +// Write a pointer-sized uint to the beginning of buf. +func writePtr(ctxt *obj.Link, buf []byte, word uint64) { + switch ctxt.Arch.PtrSize { + case 4: + ctxt.Arch.ByteOrder.PutUint32(buf, uint32(word)) + case 8: + ctxt.Arch.ByteOrder.PutUint64(buf, word) + default: + panic("unexpected pointer size") + } + +} + +// Read a pointer-sized uint from the beginning of buf. +func readPtr(ctxt *obj.Link, buf []byte) uint64 { + switch ctxt.Arch.PtrSize { + case 4: + return uint64(ctxt.Arch.ByteOrder.Uint32(buf)) + case 8: + return ctxt.Arch.ByteOrder.Uint64(buf) + default: + panic("unexpected pointer size") + } + +} + +// setupLocList creates the initial portion of a location list for a +// user variable. It emits the encoded start/end of the range and a +// placeholder for the size. Return value is the new list plus the +// slot in the list holding the size (to be updated later). +func setupLocList(ctxt *obj.Link, f *Func, list []byte, st, en ID) ([]byte, int) { + start, startOK := encodeValue(ctxt, f.Entry.ID, st) + end, endOK := encodeValue(ctxt, f.Entry.ID, en) + if !startOK || !endOK { + // This could happen if someone writes a function that uses + // >65K values on a 32-bit platform. Hopefully a degraded debugging + // experience is ok in that case. + return nil, 0 + } + list = appendPtr(ctxt, list, start) + list = appendPtr(ctxt, list, end) + + // Where to write the length of the location description once + // we know how big it is. + sizeIdx := len(list) + list = list[:len(list)+2] + return list, sizeIdx +} + +// locatePrologEnd walks the entry block of a function with incoming +// register arguments and locates the last instruction in the prolog +// that spills a register arg. It returns the ID of that instruction +// Example: +// +// b1: +// v3 = ArgIntReg {p1+0} [0] : AX +// ... more arg regs .. +// v4 = ArgFloatReg {f1+0} [0] : X0 +// v52 = MOVQstore {p1} v2 v3 v1 +// ... more stores ... +// v68 = MOVSSstore {f4} v2 v67 v66 +// v38 = MOVQstoreconst {blob} [val=0,off=0] v2 v32 +// +// Important: locatePrologEnd is expected to work properly only with +// optimization turned off (e.g. "-N"). If optimization is enabled +// we can't be assured of finding all input arguments spilled in the +// entry block prolog. +func locatePrologEnd(f *Func) ID { + + // returns true if this instruction looks like it moves an ABI + // register to the stack, along with the value being stored. + isRegMoveLike := func(v *Value) (bool, ID) { + n, ok := v.Aux.(*ir.Name) + var r ID + if !ok || n.Class != ir.PPARAM { + return false, r + } + regInputs, memInputs, spInputs := 0, 0, 0 + for _, a := range v.Args { + if a.Op == OpArgIntReg || a.Op == OpArgFloatReg { + regInputs++ + r = a.ID + } else if a.Type.IsMemory() { + memInputs++ + } else if a.Op == OpSP { + spInputs++ + } else { + return false, r + } + } + return v.Type.IsMemory() && memInputs == 1 && + regInputs == 1 && spInputs == 1, r + } + + // OpArg*Reg values we've seen so far on our forward walk, + // for which we have not yet seen a corresponding spill. + regArgs := make([]ID, 0, 32) + + // removeReg tries to remove a value from regArgs, returning true + // if found and removed, or false otherwise. + removeReg := func(r ID) bool { + for i := 0; i < len(regArgs); i++ { + if regArgs[i] == r { + regArgs = append(regArgs[:i], regArgs[i+1:]...) + return true + } + } + return false + } + + // Walk forwards through the block. When we see OpArg*Reg, record + // the value it produces in the regArgs list. When see a store that uses + // the value, remove the entry. When we hit the last store (use) + // then we've arrived at the end of the prolog. + for k, v := range f.Entry.Values { + if v.Op == OpArgIntReg || v.Op == OpArgFloatReg { + regArgs = append(regArgs, v.ID) + continue + } + if ok, r := isRegMoveLike(v); ok { + if removed := removeReg(r); removed { + if len(regArgs) == 0 { + // Found our last spill; return the value after + // it. Note that it is possible that this spill is + // the last instruction in the block. If so, then + // return the "end of block" sentinel. + if k < len(f.Entry.Values)-1 { + return f.Entry.Values[k+1].ID + } + return BlockEnd.ID + } + } + } + if v.Op.IsCall() { + // if we hit a call, we've gone too far. + return v.ID + } + } + // nothing found + return ID(-1) +} + +// isNamedRegParam returns true if the param corresponding to "p" +// is a named, non-blank input parameter assigned to one or more +// registers. +func isNamedRegParam(p abi.ABIParamAssignment) bool { + if p.Name == nil { + return false + } + n := p.Name + if n.Sym() == nil || n.Sym().IsBlank() { + return false + } + if len(p.Registers) == 0 { + return false + } + return true +} + +// BuildFuncDebugNoOptimized populates a FuncDebug object "rval" with +// entries corresponding to the register-resident input parameters for +// the function "f"; it is used when we are compiling without +// optimization but the register ABI is enabled. For each reg param, +// it constructs a 2-element location list: the first element holds +// the input register, and the second element holds the stack location +// of the param (the assumption being that when optimization is off, +// each input param reg will be spilled in the prolog). +func BuildFuncDebugNoOptimized(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset func(LocalSlot) int32, rval *FuncDebug) { + + pri := f.ABISelf.ABIAnalyzeFuncType(f.Type) + + // Look to see if we have any named register-promoted parameters. + // If there are none, bail early and let the caller sort things + // out for the remainder of the params/locals. + numRegParams := 0 + for _, inp := range pri.InParams() { + if isNamedRegParam(inp) { + numRegParams++ + } + } + if numRegParams == 0 { + return + } + + state := debugState{f: f} + + if loggingEnabled { + state.logf("generating -N reg param loc lists for func %q\n", f.Name) + } + + // Allocate location lists. + rval.LocationLists = make([][]byte, numRegParams) + + // Locate the value corresponding to the last spill of + // an input register. + afterPrologVal := locatePrologEnd(f) + + // Walk the input params again and process the register-resident elements. + pidx := 0 + for _, inp := range pri.InParams() { + if !isNamedRegParam(inp) { + // will be sorted out elsewhere + continue + } + + n := inp.Name + sl := LocalSlot{N: n, Type: inp.Type, Off: 0} + rval.Vars = append(rval.Vars, n) + rval.Slots = append(rval.Slots, sl) + slid := len(rval.VarSlots) + rval.VarSlots = append(rval.VarSlots, []SlotID{SlotID(slid)}) + + if afterPrologVal == ID(-1) { + // This can happen for degenerate functions with infinite + // loops such as that in issue 45948. In such cases, leave + // the var/slot set up for the param, but don't try to + // emit a location list. + if loggingEnabled { + state.logf("locatePrologEnd failed, skipping %v\n", n) + } + pidx++ + continue + } + + // Param is arriving in one or more registers. We need a 2-element + // location expression for it. First entry in location list + // will correspond to lifetime in input registers. + list, sizeIdx := setupLocList(ctxt, f, rval.LocationLists[pidx], + BlockStart.ID, afterPrologVal) + if list == nil { + pidx++ + continue + } + if loggingEnabled { + state.logf("param %v:\n [, %d]:\n", n, afterPrologVal) + } + rtypes, _ := inp.RegisterTypesAndOffsets() + padding := make([]uint64, 0, 32) + padding = inp.ComputePadding(padding) + for k, r := range inp.Registers { + reg := ObjRegForAbiReg(r, f.Config) + dwreg := ctxt.Arch.DWARFRegisters[reg] + if dwreg < 32 { + list = append(list, dwarf.DW_OP_reg0+byte(dwreg)) + } else { + list = append(list, dwarf.DW_OP_regx) + list = dwarf.AppendUleb128(list, uint64(dwreg)) + } + if loggingEnabled { + state.logf(" piece %d -> dwreg %d", k, dwreg) + } + if len(inp.Registers) > 1 { + list = append(list, dwarf.DW_OP_piece) + ts := rtypes[k].Size() + list = dwarf.AppendUleb128(list, uint64(ts)) + if padding[k] > 0 { + if loggingEnabled { + state.logf(" [pad %d bytes]", padding[k]) + } + list = append(list, dwarf.DW_OP_piece) + list = dwarf.AppendUleb128(list, padding[k]) + } + } + if loggingEnabled { + state.logf("\n") + } + } + // fill in length of location expression element + ctxt.Arch.ByteOrder.PutUint16(list[sizeIdx:], uint16(len(list)-sizeIdx-2)) + + // Second entry in the location list will be the stack home + // of the param, once it has been spilled. Emit that now. + list, sizeIdx = setupLocList(ctxt, f, list, + afterPrologVal, FuncEnd.ID) + if list == nil { + pidx++ + continue + } + soff := stackOffset(sl) + if soff == 0 { + list = append(list, dwarf.DW_OP_call_frame_cfa) + } else { + list = append(list, dwarf.DW_OP_fbreg) + list = dwarf.AppendSleb128(list, int64(soff)) + } + if loggingEnabled { + state.logf(" [%d, ): stackOffset=%d\n", afterPrologVal, soff) + } + + // fill in size + ctxt.Arch.ByteOrder.PutUint16(list[sizeIdx:], uint16(len(list)-sizeIdx-2)) + + rval.LocationLists[pidx] = list + pidx++ + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/debug_lines_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/debug_lines_test.go new file mode 100644 index 0000000000000000000000000000000000000000..af9e2a34cfc787897f42c0ec3bc5a32f59c5ca19 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/debug_lines_test.go @@ -0,0 +1,269 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa_test + +import ( + "bufio" + "bytes" + "flag" + "fmt" + "internal/testenv" + "os" + "path/filepath" + "reflect" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "testing" +) + +// Matches lines in genssa output that are marked "isstmt", and the parenthesized plus-prefixed line number is a submatch +var asmLine *regexp.Regexp = regexp.MustCompile(`^\s[vb]\d+\s+\d+\s\(\+(\d+)\)`) + +// this matches e.g. ` v123456789 000007 (+9876654310) MOVUPS X15, ""..autotmp_2-32(SP)` + +// Matches lines in genssa output that describe an inlined file. +// Note it expects an unadventurous choice of basename. +var sepRE = regexp.QuoteMeta(string(filepath.Separator)) +var inlineLine *regexp.Regexp = regexp.MustCompile(`^#\s.*` + sepRE + `[-\w]+\.go:(\d+)`) + +// this matches e.g. # /pa/inline-dumpxxxx.go:6 + +var testGoArchFlag = flag.String("arch", "", "run test for specified architecture") + +func testGoArch() string { + if *testGoArchFlag == "" { + return runtime.GOARCH + } + return *testGoArchFlag +} + +func hasRegisterABI() bool { + switch testGoArch() { + case "amd64", "arm64", "loong64", "ppc64", "ppc64le", "riscv": + return true + } + return false +} + +func unixOnly(t *testing.T) { + if runtime.GOOS != "linux" && runtime.GOOS != "darwin" { // in particular, it could be windows. + t.Skip("this test depends on creating a file with a wonky name, only works for sure on Linux and Darwin") + } +} + +// testDebugLinesDefault removes the first wanted statement on architectures that are not (yet) register ABI. +func testDebugLinesDefault(t *testing.T, gcflags, file, function string, wantStmts []int, ignoreRepeats bool) { + unixOnly(t) + if !hasRegisterABI() { + wantStmts = wantStmts[1:] + } + testDebugLines(t, gcflags, file, function, wantStmts, ignoreRepeats) +} + +func TestDebugLinesSayHi(t *testing.T) { + // This test is potentially fragile, the goal is that debugging should step properly through "sayhi" + // If the blocks are reordered in a way that changes the statement order but execution flows correctly, + // then rearrange the expected numbers. Register abi and not-register-abi also have different sequences, + // at least for now. + + testDebugLinesDefault(t, "-N -l", "sayhi.go", "sayhi", []int{8, 9, 10, 11}, false) +} + +func TestDebugLinesPushback(t *testing.T) { + unixOnly(t) + + switch testGoArch() { + default: + t.Skip("skipped for many architectures") + + case "arm64", "amd64": // register ABI + fn := "(*List[go.shape.int_0]).PushBack" + if true /* was buildcfg.Experiment.Unified */ { + // Unified mangles differently + fn = "(*List[go.shape.int]).PushBack" + } + testDebugLines(t, "-N -l", "pushback.go", fn, []int{17, 18, 19, 20, 21, 22, 24}, true) + } +} + +func TestDebugLinesConvert(t *testing.T) { + unixOnly(t) + + switch testGoArch() { + default: + t.Skip("skipped for many architectures") + + case "arm64", "amd64": // register ABI + fn := "G[go.shape.int_0]" + if true /* was buildcfg.Experiment.Unified */ { + // Unified mangles differently + fn = "G[go.shape.int]" + } + testDebugLines(t, "-N -l", "convertline.go", fn, []int{9, 10, 11}, true) + } +} + +func TestInlineLines(t *testing.T) { + if runtime.GOARCH != "amd64" && *testGoArchFlag == "" { + // As of september 2021, works for everything except mips64, but still potentially fragile + t.Skip("only runs for amd64 unless -arch explicitly supplied") + } + + want := [][]int{{3}, {4, 10}, {4, 10, 16}, {4, 10}, {4, 11, 16}, {4, 11}, {4}, {5, 10}, {5, 10, 16}, {5, 10}, {5, 11, 16}, {5, 11}, {5}} + testInlineStack(t, "inline-dump.go", "f", want) +} + +func TestDebugLines_53456(t *testing.T) { + testDebugLinesDefault(t, "-N -l", "b53456.go", "(*T).Inc", []int{15, 16, 17, 18}, true) +} + +func compileAndDump(t *testing.T, file, function, moreGCFlags string) []byte { + testenv.MustHaveGoBuild(t) + + tmpdir, err := os.MkdirTemp("", "debug_lines_test") + if err != nil { + panic(fmt.Sprintf("Problem creating TempDir, error %v", err)) + } + if testing.Verbose() { + fmt.Printf("Preserving temporary directory %s\n", tmpdir) + } else { + defer os.RemoveAll(tmpdir) + } + + source, err := filepath.Abs(filepath.Join("testdata", file)) + if err != nil { + panic(fmt.Sprintf("Could not get abspath of testdata directory and file, %v", err)) + } + + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-o", "foo.o", "-gcflags=-d=ssa/genssa/dump="+function+" "+moreGCFlags, source) + cmd.Dir = tmpdir + cmd.Env = replaceEnv(cmd.Env, "GOSSADIR", tmpdir) + testGoos := "linux" // default to linux + if testGoArch() == "wasm" { + testGoos = "js" + } + cmd.Env = replaceEnv(cmd.Env, "GOOS", testGoos) + cmd.Env = replaceEnv(cmd.Env, "GOARCH", testGoArch()) + + if testing.Verbose() { + fmt.Printf("About to run %s\n", asCommandLine("", cmd)) + } + + var stdout, stderr strings.Builder + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + t.Fatalf("error running cmd %s: %v\nstdout:\n%sstderr:\n%s\n", asCommandLine("", cmd), err, stdout.String(), stderr.String()) + } + + if s := stderr.String(); s != "" { + t.Fatalf("Wanted empty stderr, instead got:\n%s\n", s) + } + + dumpFile := filepath.Join(tmpdir, function+"_01__genssa.dump") + dumpBytes, err := os.ReadFile(dumpFile) + if err != nil { + t.Fatalf("Could not read dump file %s, err=%v", dumpFile, err) + } + return dumpBytes +} + +func sortInlineStacks(x [][]int) { + sort.Slice(x, func(i, j int) bool { + if len(x[i]) != len(x[j]) { + return len(x[i]) < len(x[j]) + } + for k := range x[i] { + if x[i][k] != x[j][k] { + return x[i][k] < x[j][k] + } + } + return false + }) +} + +// testInlineStack ensures that inlining is described properly in the comments in the dump file +func testInlineStack(t *testing.T, file, function string, wantStacks [][]int) { + // this is an inlining reporting test, not an optimization test. -N makes it less fragile + dumpBytes := compileAndDump(t, file, function, "-N") + dump := bufio.NewScanner(bytes.NewReader(dumpBytes)) + dumpLineNum := 0 + var gotStmts []int + var gotStacks [][]int + for dump.Scan() { + line := dump.Text() + dumpLineNum++ + matches := inlineLine.FindStringSubmatch(line) + if len(matches) == 2 { + stmt, err := strconv.ParseInt(matches[1], 10, 32) + if err != nil { + t.Fatalf("Expected to parse a line number but saw %s instead on dump line #%d, error %v", matches[1], dumpLineNum, err) + } + if testing.Verbose() { + fmt.Printf("Saw stmt# %d for submatch '%s' on dump line #%d = '%s'\n", stmt, matches[1], dumpLineNum, line) + } + gotStmts = append(gotStmts, int(stmt)) + } else if len(gotStmts) > 0 { + gotStacks = append(gotStacks, gotStmts) + gotStmts = nil + } + } + if len(gotStmts) > 0 { + gotStacks = append(gotStacks, gotStmts) + gotStmts = nil + } + sortInlineStacks(gotStacks) + sortInlineStacks(wantStacks) + if !reflect.DeepEqual(wantStacks, gotStacks) { + t.Errorf("wanted inlines %+v but got %+v\n%s", wantStacks, gotStacks, dumpBytes) + } + +} + +// testDebugLines compiles testdata/ with flags -N -l and -d=ssa/genssa/dump= +// then verifies that the statement-marked lines in that file are the same as those in wantStmts +// These files must all be short because this is super-fragile. +// "go build" is run in a temporary directory that is normally deleted, unless -test.v +func testDebugLines(t *testing.T, gcflags, file, function string, wantStmts []int, ignoreRepeats bool) { + dumpBytes := compileAndDump(t, file, function, gcflags) + dump := bufio.NewScanner(bytes.NewReader(dumpBytes)) + var gotStmts []int + dumpLineNum := 0 + for dump.Scan() { + line := dump.Text() + dumpLineNum++ + matches := asmLine.FindStringSubmatch(line) + if len(matches) == 2 { + stmt, err := strconv.ParseInt(matches[1], 10, 32) + if err != nil { + t.Fatalf("Expected to parse a line number but saw %s instead on dump line #%d, error %v", matches[1], dumpLineNum, err) + } + if testing.Verbose() { + fmt.Printf("Saw stmt# %d for submatch '%s' on dump line #%d = '%s'\n", stmt, matches[1], dumpLineNum, line) + } + gotStmts = append(gotStmts, int(stmt)) + } + } + if ignoreRepeats { // remove repeats from gotStmts + newGotStmts := []int{gotStmts[0]} + for _, x := range gotStmts { + if x != newGotStmts[len(newGotStmts)-1] { + newGotStmts = append(newGotStmts, x) + } + } + if !reflect.DeepEqual(wantStmts, newGotStmts) { + t.Errorf("wanted stmts %v but got %v (with repeats still in: %v)", wantStmts, newGotStmts, gotStmts) + } + + } else { + if !reflect.DeepEqual(wantStmts, gotStmts) { + t.Errorf("wanted stmts %v but got %v", wantStmts, gotStmts) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/debug_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/debug_test.go new file mode 100644 index 0000000000000000000000000000000000000000..9ac414c8249b3fdc9f4a8cedf2a07a8ee21e8fb8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/debug_test.go @@ -0,0 +1,1016 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa_test + +import ( + "flag" + "fmt" + "internal/testenv" + "io" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + "testing" + "time" +) + +var ( + update = flag.Bool("u", false, "update test reference files") + verbose = flag.Bool("v", false, "print debugger interactions (very verbose)") + dryrun = flag.Bool("n", false, "just print the command line and first debugging bits") + useGdb = flag.Bool("g", false, "use Gdb instead of Delve (dlv), use gdb reference files") + force = flag.Bool("f", false, "force run under not linux-amd64; also do not use tempdir") + repeats = flag.Bool("r", false, "detect repeats in debug steps and don't ignore them") + inlines = flag.Bool("i", false, "do inlining for gdb (makes testing flaky till inlining info is correct)") +) + +var ( + hexRe = regexp.MustCompile("0x[a-zA-Z0-9]+") + numRe = regexp.MustCompile("-?\\d+") + stringRe = regexp.MustCompile("\"([^\\\"]|(\\.))*\"") + leadingDollarNumberRe = regexp.MustCompile("^[$]\\d+") + optOutGdbRe = regexp.MustCompile("[<]optimized out[>]") + numberColonRe = regexp.MustCompile("^ *\\d+:") +) + +var gdb = "gdb" // Might be "ggdb" on Darwin, because gdb no longer part of XCode +var debugger = "dlv" // For naming files, etc. + +var gogcflags = os.Getenv("GO_GCFLAGS") + +// optimizedLibs usually means "not running in a noopt test builder". +var optimizedLibs = (!strings.Contains(gogcflags, "-N") && !strings.Contains(gogcflags, "-l")) + +// TestNexting go-builds a file, then uses a debugger (default delve, optionally gdb) +// to next through the generated executable, recording each line landed at, and +// then compares those lines with reference file(s). +// Flag -u updates the reference file(s). +// Flag -g changes the debugger to gdb (and uses gdb-specific reference files) +// Flag -v is ever-so-slightly verbose. +// Flag -n is for dry-run, and prints the shell and first debug commands. +// +// Because this test (combined with existing compiler deficiencies) is flaky, +// for gdb-based testing by default inlining is disabled +// (otherwise output depends on library internals) +// and for both gdb and dlv by default repeated lines in the next stream are ignored +// (because this appears to be timing-dependent in gdb, and the cleanest fix is in code common to gdb and dlv). +// +// Also by default, any source code outside of .../testdata/ is not mentioned +// in the debugging histories. This deals both with inlined library code once +// the compiler is generating clean inline records, and also deals with +// runtime code between return from main and process exit. This is hidden +// so that those files (in the runtime/library) can change without affecting +// this test. +// +// These choices can be reversed with -i (inlining on) and -r (repeats detected) which +// will also cause their own failures against the expected outputs. Note that if the compiler +// and debugger were behaving properly, the inlined code and repeated lines would not appear, +// so the expected output is closer to what we hope to see, though it also encodes all our +// current bugs. +// +// The file being tested may contain comments of the form +// //DBG-TAG=(v1,v2,v3) +// where DBG = {gdb,dlv} and TAG={dbg,opt} +// each variable may optionally be followed by a / and one or more of S,A,N,O +// to indicate normalization of Strings, (hex) addresses, and numbers. +// "O" is an explicit indication that we expect it to be optimized out. +// For example: +// +// if len(os.Args) > 1 { //gdb-dbg=(hist/A,cannedInput/A) //dlv-dbg=(hist/A,cannedInput/A) +// +// TODO: not implemented for Delve yet, but this is the plan +// +// After a compiler change that causes a difference in the debug behavior, check +// to see if it is sensible or not, and if it is, update the reference files with +// go test debug_test.go -args -u +// (for Delve) +// go test debug_test.go -args -u -d +func TestNexting(t *testing.T) { + testenv.SkipFlaky(t, 37404) + + skipReasons := "" // Many possible skip reasons, list all that apply + if testing.Short() { + skipReasons = "not run in short mode; " + } + testenv.MustHaveGoBuild(t) + + if *useGdb && !*force && !(runtime.GOOS == "linux" && runtime.GOARCH == "amd64") { + // Running gdb on OSX/darwin is very flaky. + // Sometimes it is called ggdb, depending on how it is installed. + // It also sometimes requires an admin password typed into a dialog box. + // Various architectures tend to differ slightly sometimes, and keeping them + // all in sync is a pain for people who don't have them all at hand, + // so limit testing to amd64 (for now) + skipReasons += "not run when testing gdb (-g) unless forced (-f) or linux-amd64; " + } + + if !*useGdb && !*force && testenv.Builder() == "linux-386-longtest" { + // The latest version of Delve does support linux/386. However, the version currently + // installed in the linux-386-longtest builder does not. See golang.org/issue/39309. + skipReasons += "not run when testing delve on linux-386-longtest builder unless forced (-f); " + } + + if *useGdb { + debugger = "gdb" + _, err := exec.LookPath(gdb) + if err != nil { + if runtime.GOOS != "darwin" { + skipReasons += "not run because gdb not on path; " + } else { + // On Darwin, MacPorts installs gdb as "ggdb". + _, err = exec.LookPath("ggdb") + if err != nil { + skipReasons += "not run because gdb (and also ggdb) request by -g option not on path; " + } else { + gdb = "ggdb" + } + } + } + } else { // Delve + debugger = "dlv" + _, err := exec.LookPath("dlv") + if err != nil { + skipReasons += "not run because dlv not on path; " + } + } + + if skipReasons != "" { + t.Skip(skipReasons[:len(skipReasons)-2]) + } + + optFlags := "" // Whatever flags are needed to test debugging of optimized code. + dbgFlags := "-N -l" + if *useGdb && !*inlines { + // For gdb (default), disable inlining so that a compiler test does not depend on library code. + // TODO: Technically not necessary in 1.10 and later, but it causes a largish regression that needs investigation. + optFlags += " -l" + } + + moreargs := []string{} + if *useGdb && (runtime.GOOS == "darwin" || runtime.GOOS == "windows") { + // gdb and lldb on Darwin do not deal with compressed dwarf. + // also, Windows. + moreargs = append(moreargs, "-ldflags=-compressdwarf=false") + } + + subTest(t, debugger+"-dbg", "hist", dbgFlags, moreargs...) + subTest(t, debugger+"-dbg", "scopes", dbgFlags, moreargs...) + subTest(t, debugger+"-dbg", "i22558", dbgFlags, moreargs...) + + subTest(t, debugger+"-dbg-race", "i22600", dbgFlags, append(moreargs, "-race")...) + + optSubTest(t, debugger+"-opt", "hist", optFlags, 1000, moreargs...) + optSubTest(t, debugger+"-opt", "scopes", optFlags, 1000, moreargs...) + + // Was optSubtest, this test is observed flaky on Linux in Docker on (busy) macOS, probably because of timing + // glitches in this harness. + // TODO get rid of timing glitches in this harness. + skipSubTest(t, debugger+"-opt", "infloop", optFlags, 10, moreargs...) + +} + +// subTest creates a subtest that compiles basename.go with the specified gcflags and additional compiler arguments, +// then runs the debugger on the resulting binary, with any comment-specified actions matching tag triggered. +func subTest(t *testing.T, tag string, basename string, gcflags string, moreargs ...string) { + t.Run(tag+"-"+basename, func(t *testing.T) { + if t.Name() == "TestNexting/gdb-dbg-i22558" { + testenv.SkipFlaky(t, 31263) + } + testNexting(t, basename, tag, gcflags, 1000, moreargs...) + }) +} + +// skipSubTest is the same as subTest except that it skips the test if execution is not forced (-f) +func skipSubTest(t *testing.T, tag string, basename string, gcflags string, count int, moreargs ...string) { + t.Run(tag+"-"+basename, func(t *testing.T) { + if *force { + testNexting(t, basename, tag, gcflags, count, moreargs...) + } else { + t.Skip("skipping flaky test becaused not forced (-f)") + } + }) +} + +// optSubTest is the same as subTest except that it skips the test if the runtime and libraries +// were not compiled with optimization turned on. (The skip may not be necessary with Go 1.10 and later) +func optSubTest(t *testing.T, tag string, basename string, gcflags string, count int, moreargs ...string) { + // If optimized test is run with unoptimized libraries (compiled with -N -l), it is very likely to fail. + // This occurs in the noopt builders (for example). + t.Run(tag+"-"+basename, func(t *testing.T) { + if *force || optimizedLibs { + testNexting(t, basename, tag, gcflags, count, moreargs...) + } else { + t.Skip("skipping for unoptimized stdlib/runtime") + } + }) +} + +func testNexting(t *testing.T, base, tag, gcflags string, count int, moreArgs ...string) { + // (1) In testdata, build sample.go into test-sample. + // (2) Run debugger gathering a history + // (3) Read expected history from testdata/sample..nexts + // optionally, write out testdata/sample..nexts + + testbase := filepath.Join("testdata", base) + "." + tag + tmpbase := filepath.Join("testdata", "test-"+base+"."+tag) + + // Use a temporary directory unless -f is specified + if !*force { + tmpdir := t.TempDir() + tmpbase = filepath.Join(tmpdir, "test-"+base+"."+tag) + if *verbose { + fmt.Printf("Tempdir is %s\n", tmpdir) + } + } + exe := tmpbase + + runGoArgs := []string{"build", "-o", exe, "-gcflags=all=" + gcflags} + runGoArgs = append(runGoArgs, moreArgs...) + runGoArgs = append(runGoArgs, filepath.Join("testdata", base+".go")) + + runGo(t, "", runGoArgs...) + + nextlog := testbase + ".nexts" + tmplog := tmpbase + ".nexts" + var dbg dbgr + if *useGdb { + dbg = newGdb(t, tag, exe) + } else { + dbg = newDelve(t, tag, exe) + } + h1 := runDbgr(dbg, count) + if *dryrun { + fmt.Printf("# Tag for above is %s\n", dbg.tag()) + return + } + if *update { + h1.write(nextlog) + } else { + h0 := &nextHist{} + h0.read(nextlog) + if !h0.equals(h1) { + // Be very noisy about exactly what's wrong to simplify debugging. + h1.write(tmplog) + cmd := testenv.Command(t, "diff", "-u", nextlog, tmplog) + line := asCommandLine("", cmd) + bytes, err := cmd.CombinedOutput() + if err != nil && len(bytes) == 0 { + t.Fatalf("step/next histories differ, diff command %s failed with error=%v", line, err) + } + t.Fatalf("step/next histories differ, diff=\n%s", string(bytes)) + } + } +} + +type dbgr interface { + start() + stepnext(s string) bool // step or next, possible with parameter, gets line etc. returns true for success, false for unsure response + quit() + hist() *nextHist + tag() string +} + +func runDbgr(dbg dbgr, maxNext int) *nextHist { + dbg.start() + if *dryrun { + return nil + } + for i := 0; i < maxNext; i++ { + if !dbg.stepnext("n") { + break + } + } + dbg.quit() + h := dbg.hist() + return h +} + +func runGo(t *testing.T, dir string, args ...string) string { + var stdout, stderr strings.Builder + cmd := testenv.Command(t, testenv.GoToolPath(t), args...) + cmd.Dir = dir + if *dryrun { + fmt.Printf("%s\n", asCommandLine("", cmd)) + return "" + } + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + t.Fatalf("error running cmd (%s): %v\nstdout:\n%sstderr:\n%s\n", asCommandLine("", cmd), err, stdout.String(), stderr.String()) + } + + if s := stderr.String(); s != "" { + t.Fatalf("Stderr = %s\nWant empty", s) + } + + return stdout.String() +} + +// tstring provides two strings, o (stdout) and e (stderr) +type tstring struct { + o string + e string +} + +func (t tstring) String() string { + return t.o + t.e +} + +type pos struct { + line uint32 + file uint8 // Artifact of plans to implement differencing instead of calling out to diff. +} + +type nextHist struct { + f2i map[string]uint8 + fs []string + ps []pos + texts []string + vars [][]string +} + +func (h *nextHist) write(filename string) { + file, err := os.Create(filename) + if err != nil { + panic(fmt.Sprintf("Problem opening %s, error %v\n", filename, err)) + } + defer file.Close() + var lastfile uint8 + for i, x := range h.texts { + p := h.ps[i] + if lastfile != p.file { + fmt.Fprintf(file, " %s\n", h.fs[p.file-1]) + lastfile = p.file + } + fmt.Fprintf(file, "%d:%s\n", p.line, x) + // TODO, normalize between gdb and dlv into a common, comparable format. + for _, y := range h.vars[i] { + y = strings.TrimSpace(y) + fmt.Fprintf(file, "%s\n", y) + } + } + file.Close() +} + +func (h *nextHist) read(filename string) { + h.f2i = make(map[string]uint8) + bytes, err := os.ReadFile(filename) + if err != nil { + panic(fmt.Sprintf("Problem reading %s, error %v\n", filename, err)) + } + var lastfile string + lines := strings.Split(string(bytes), "\n") + for i, l := range lines { + if len(l) > 0 && l[0] != '#' { + if l[0] == ' ' { + // file -- first two characters expected to be " " + lastfile = strings.TrimSpace(l) + } else if numberColonRe.MatchString(l) { + // line number -- : + colonPos := strings.Index(l, ":") + if colonPos == -1 { + panic(fmt.Sprintf("Line %d (%s) in file %s expected to contain ':' but does not.\n", i+1, l, filename)) + } + h.add(lastfile, l[0:colonPos], l[colonPos+1:]) + } else { + h.addVar(l) + } + } + } +} + +// add appends file (name), line (number) and text (string) to the history, +// provided that the file+line combo does not repeat the previous position, +// and provided that the file is within the testdata directory. The return +// value indicates whether the append occurred. +func (h *nextHist) add(file, line, text string) bool { + // Only record source code in testdata unless the inlines flag is set + if !*inlines && !strings.Contains(file, "/testdata/") { + return false + } + fi := h.f2i[file] + if fi == 0 { + h.fs = append(h.fs, file) + fi = uint8(len(h.fs)) + h.f2i[file] = fi + } + + line = strings.TrimSpace(line) + var li int + var err error + if line != "" { + li, err = strconv.Atoi(line) + if err != nil { + panic(fmt.Sprintf("Non-numeric line: %s, error %v\n", line, err)) + } + } + l := len(h.ps) + p := pos{line: uint32(li), file: fi} + + if l == 0 || *repeats || h.ps[l-1] != p { + h.ps = append(h.ps, p) + h.texts = append(h.texts, text) + h.vars = append(h.vars, []string{}) + return true + } + return false +} + +func (h *nextHist) addVar(text string) { + l := len(h.texts) + h.vars[l-1] = append(h.vars[l-1], text) +} + +func invertMapSU8(hf2i map[string]uint8) map[uint8]string { + hi2f := make(map[uint8]string) + for hs, i := range hf2i { + hi2f[i] = hs + } + return hi2f +} + +func (h *nextHist) equals(k *nextHist) bool { + if len(h.f2i) != len(k.f2i) { + return false + } + if len(h.ps) != len(k.ps) { + return false + } + hi2f := invertMapSU8(h.f2i) + ki2f := invertMapSU8(k.f2i) + + for i, hs := range hi2f { + if hs != ki2f[i] { + return false + } + } + + for i, x := range h.ps { + if k.ps[i] != x { + return false + } + } + + for i, hv := range h.vars { + kv := k.vars[i] + if len(hv) != len(kv) { + return false + } + for j, hvt := range hv { + if hvt != kv[j] { + return false + } + } + } + + return true +} + +// canonFileName strips everything before "/src/" from a filename. +// This makes file names portable across different machines, +// home directories, and temporary directories. +func canonFileName(f string) string { + i := strings.Index(f, "/src/") + if i != -1 { + f = f[i+1:] + } + return f +} + +/* Delve */ + +type delveState struct { + cmd *exec.Cmd + tagg string + *ioState + atLineRe *regexp.Regexp // "\n =>" + funcFileLinePCre *regexp.Regexp // "^> ([^ ]+) ([^:]+):([0-9]+) .*[(]PC: (0x[a-z0-9]+)" + line string + file string + function string +} + +func newDelve(t testing.TB, tag, executable string, args ...string) dbgr { + cmd := testenv.Command(t, "dlv", "exec", executable) + cmd.Env = replaceEnv(cmd.Env, "TERM", "dumb") + if len(args) > 0 { + cmd.Args = append(cmd.Args, "--") + cmd.Args = append(cmd.Args, args...) + } + s := &delveState{tagg: tag, cmd: cmd} + // HAHA Delve has control characters embedded to change the color of the => and the line number + // that would be '(\\x1b\\[[0-9;]+m)?' OR TERM=dumb + s.atLineRe = regexp.MustCompile("\n=>[[:space:]]+[0-9]+:(.*)") + s.funcFileLinePCre = regexp.MustCompile("> ([^ ]+) ([^:]+):([0-9]+) .*[(]PC: (0x[a-z0-9]+)[)]\n") + s.ioState = newIoState(s.cmd) + return s +} + +func (s *delveState) tag() string { + return s.tagg +} + +func (s *delveState) stepnext(ss string) bool { + x := s.ioState.writeReadExpect(ss+"\n", "[(]dlv[)] ") + excerpts := s.atLineRe.FindStringSubmatch(x.o) + locations := s.funcFileLinePCre.FindStringSubmatch(x.o) + excerpt := "" + if len(excerpts) > 1 { + excerpt = excerpts[1] + } + if len(locations) > 0 { + fn := canonFileName(locations[2]) + if *verbose { + if s.file != fn { + fmt.Printf("%s\n", locations[2]) // don't canonocalize verbose logging + } + fmt.Printf(" %s\n", locations[3]) + } + s.line = locations[3] + s.file = fn + s.function = locations[1] + s.ioState.history.add(s.file, s.line, excerpt) + // TODO: here is where variable processing will be added. See gdbState.stepnext as a guide. + // Adding this may require some amount of normalization so that logs are comparable. + return true + } + if *verbose { + fmt.Printf("DID NOT MATCH EXPECTED NEXT OUTPUT\nO='%s'\nE='%s'\n", x.o, x.e) + } + return false +} + +func (s *delveState) start() { + if *dryrun { + fmt.Printf("%s\n", asCommandLine("", s.cmd)) + fmt.Printf("b main.test\n") + fmt.Printf("c\n") + return + } + err := s.cmd.Start() + if err != nil { + line := asCommandLine("", s.cmd) + panic(fmt.Sprintf("There was an error [start] running '%s', %v\n", line, err)) + } + s.ioState.readExpecting(-1, 5000, "Type 'help' for list of commands.") + s.ioState.writeReadExpect("b main.test\n", "[(]dlv[)] ") + s.stepnext("c") +} + +func (s *delveState) quit() { + expect("", s.ioState.writeRead("q\n")) +} + +/* Gdb */ + +type gdbState struct { + cmd *exec.Cmd + tagg string + args []string + *ioState + atLineRe *regexp.Regexp + funcFileLinePCre *regexp.Regexp + line string + file string + function string +} + +func newGdb(t testing.TB, tag, executable string, args ...string) dbgr { + // Turn off shell, necessary for Darwin apparently + cmd := testenv.Command(t, gdb, "-nx", + "-iex", fmt.Sprintf("add-auto-load-safe-path %s/src/runtime", runtime.GOROOT()), + "-ex", "set startup-with-shell off", executable) + cmd.Env = replaceEnv(cmd.Env, "TERM", "dumb") + s := &gdbState{tagg: tag, cmd: cmd, args: args} + s.atLineRe = regexp.MustCompile("(^|\n)([0-9]+)(.*)") + s.funcFileLinePCre = regexp.MustCompile( + "([^ ]+) [(][^)]*[)][ \\t\\n]+at ([^:]+):([0-9]+)") + // runtime.main () at /Users/drchase/GoogleDrive/work/go/src/runtime/proc.go:201 + // function file line + // Thread 2 hit Breakpoint 1, main.main () at /Users/drchase/GoogleDrive/work/debug/hist.go:18 + s.ioState = newIoState(s.cmd) + return s +} + +func (s *gdbState) tag() string { + return s.tagg +} + +func (s *gdbState) start() { + run := "run" + for _, a := range s.args { + run += " " + a // Can't quote args for gdb, it will pass them through including the quotes + } + if *dryrun { + fmt.Printf("%s\n", asCommandLine("", s.cmd)) + fmt.Printf("tbreak main.test\n") + fmt.Printf("%s\n", run) + return + } + err := s.cmd.Start() + if err != nil { + line := asCommandLine("", s.cmd) + panic(fmt.Sprintf("There was an error [start] running '%s', %v\n", line, err)) + } + s.ioState.readSimpleExpecting("[(]gdb[)] ") + x := s.ioState.writeReadExpect("b main.test\n", "[(]gdb[)] ") + expect("Breakpoint [0-9]+ at", x) + s.stepnext(run) +} + +func (s *gdbState) stepnext(ss string) bool { + x := s.ioState.writeReadExpect(ss+"\n", "[(]gdb[)] ") + excerpts := s.atLineRe.FindStringSubmatch(x.o) + locations := s.funcFileLinePCre.FindStringSubmatch(x.o) + excerpt := "" + addedLine := false + if len(excerpts) == 0 && len(locations) == 0 { + if *verbose { + fmt.Printf("DID NOT MATCH %s", x.o) + } + return false + } + if len(excerpts) > 0 { + excerpt = excerpts[3] + } + if len(locations) > 0 { + fn := canonFileName(locations[2]) + if *verbose { + if s.file != fn { + fmt.Printf("%s\n", locations[2]) + } + fmt.Printf(" %s\n", locations[3]) + } + s.line = locations[3] + s.file = fn + s.function = locations[1] + addedLine = s.ioState.history.add(s.file, s.line, excerpt) + } + if len(excerpts) > 0 { + if *verbose { + fmt.Printf(" %s\n", excerpts[2]) + } + s.line = excerpts[2] + addedLine = s.ioState.history.add(s.file, s.line, excerpt) + } + + if !addedLine { + // True if this was a repeat line + return true + } + // Look for //gdb-=(v1,v2,v3) and print v1, v2, v3 + vars := varsToPrint(excerpt, "//"+s.tag()+"=(") + for _, v := range vars { + response := printVariableAndNormalize(v, func(v string) string { + return s.ioState.writeReadExpect("p "+v+"\n", "[(]gdb[)] ").String() + }) + s.ioState.history.addVar(response) + } + return true +} + +// printVariableAndNormalize extracts any slash-indicated normalizing requests from the variable +// name, then uses printer to get the value of the variable from the debugger, and then +// normalizes and returns the response. +func printVariableAndNormalize(v string, printer func(v string) string) string { + slashIndex := strings.Index(v, "/") + substitutions := "" + if slashIndex != -1 { + substitutions = v[slashIndex:] + v = v[:slashIndex] + } + response := printer(v) + // expect something like "$1 = ..." + dollar := strings.Index(response, "$") + cr := strings.Index(response, "\n") + + if dollar == -1 { // some not entirely expected response, whine and carry on. + if cr == -1 { + response = strings.TrimSpace(response) // discards trailing newline + response = strings.Replace(response, "\n", "
", -1) + return "$ Malformed response " + response + } + response = strings.TrimSpace(response[:cr]) + return "$ " + response + } + if cr == -1 { + cr = len(response) + } + // Convert the leading $ into the variable name to enhance readability + // and reduce scope of diffs if an earlier print-variable is added. + response = strings.TrimSpace(response[dollar:cr]) + response = leadingDollarNumberRe.ReplaceAllString(response, v) + + // Normalize value as requested. + if strings.Contains(substitutions, "A") { + response = hexRe.ReplaceAllString(response, "") + } + if strings.Contains(substitutions, "N") { + response = numRe.ReplaceAllString(response, "") + } + if strings.Contains(substitutions, "S") { + response = stringRe.ReplaceAllString(response, "") + } + if strings.Contains(substitutions, "O") { + response = optOutGdbRe.ReplaceAllString(response, "") + } + return response +} + +// varsToPrint takes a source code line, and extracts the comma-separated variable names +// found between lookfor and the next ")". +// For example, if line includes "... //gdb-foo=(v1,v2,v3)" and +// lookfor="//gdb-foo=(", then varsToPrint returns ["v1", "v2", "v3"] +func varsToPrint(line, lookfor string) []string { + var vars []string + if strings.Contains(line, lookfor) { + x := line[strings.Index(line, lookfor)+len(lookfor):] + end := strings.Index(x, ")") + if end == -1 { + panic(fmt.Sprintf("Saw variable list begin %s in %s but no closing ')'", lookfor, line)) + } + vars = strings.Split(x[:end], ",") + for i, y := range vars { + vars[i] = strings.TrimSpace(y) + } + } + return vars +} + +func (s *gdbState) quit() { + response := s.ioState.writeRead("q\n") + if strings.Contains(response.o, "Quit anyway? (y or n)") { + defer func() { + if r := recover(); r != nil { + if s, ok := r.(string); !(ok && strings.Contains(s, "'Y\n'")) { + // Not the panic that was expected. + fmt.Printf("Expected a broken pipe panic, but saw the following panic instead") + panic(r) + } + } + }() + s.ioState.writeRead("Y\n") + } +} + +type ioState struct { + stdout io.ReadCloser + stderr io.ReadCloser + stdin io.WriteCloser + outChan chan string + errChan chan string + last tstring // Output of previous step + history *nextHist +} + +func newIoState(cmd *exec.Cmd) *ioState { + var err error + s := &ioState{} + s.history = &nextHist{} + s.history.f2i = make(map[string]uint8) + s.stdout, err = cmd.StdoutPipe() + line := asCommandLine("", cmd) + if err != nil { + panic(fmt.Sprintf("There was an error [stdoutpipe] running '%s', %v\n", line, err)) + } + s.stderr, err = cmd.StderrPipe() + if err != nil { + panic(fmt.Sprintf("There was an error [stdouterr] running '%s', %v\n", line, err)) + } + s.stdin, err = cmd.StdinPipe() + if err != nil { + panic(fmt.Sprintf("There was an error [stdinpipe] running '%s', %v\n", line, err)) + } + + s.outChan = make(chan string, 1) + s.errChan = make(chan string, 1) + go func() { + buffer := make([]byte, 4096) + for { + n, err := s.stdout.Read(buffer) + if n > 0 { + s.outChan <- string(buffer[0:n]) + } + if err == io.EOF || n == 0 { + break + } + if err != nil { + fmt.Printf("Saw an error forwarding stdout") + break + } + } + close(s.outChan) + s.stdout.Close() + }() + + go func() { + buffer := make([]byte, 4096) + for { + n, err := s.stderr.Read(buffer) + if n > 0 { + s.errChan <- string(buffer[0:n]) + } + if err == io.EOF || n == 0 { + break + } + if err != nil { + fmt.Printf("Saw an error forwarding stderr") + break + } + } + close(s.errChan) + s.stderr.Close() + }() + return s +} + +func (s *ioState) hist() *nextHist { + return s.history +} + +// writeRead writes ss, then reads stdout and stderr, waiting 500ms to +// be sure all the output has appeared. +func (s *ioState) writeRead(ss string) tstring { + if *verbose { + fmt.Printf("=> %s", ss) + } + _, err := io.WriteString(s.stdin, ss) + if err != nil { + panic(fmt.Sprintf("There was an error writing '%s', %v\n", ss, err)) + } + return s.readExpecting(-1, 500, "") +} + +// writeReadExpect writes ss, then reads stdout and stderr until something +// that matches expectRE appears. expectRE should not be "" +func (s *ioState) writeReadExpect(ss, expectRE string) tstring { + if *verbose { + fmt.Printf("=> %s", ss) + } + if expectRE == "" { + panic("expectRE should not be empty; use .* instead") + } + _, err := io.WriteString(s.stdin, ss) + if err != nil { + panic(fmt.Sprintf("There was an error writing '%s', %v\n", ss, err)) + } + return s.readSimpleExpecting(expectRE) +} + +func (s *ioState) readExpecting(millis, interlineTimeout int, expectedRE string) tstring { + timeout := time.Millisecond * time.Duration(millis) + interline := time.Millisecond * time.Duration(interlineTimeout) + s.last = tstring{} + var re *regexp.Regexp + if expectedRE != "" { + re = regexp.MustCompile(expectedRE) + } +loop: + for { + var timer <-chan time.Time + if timeout > 0 { + timer = time.After(timeout) + } + select { + case x, ok := <-s.outChan: + if !ok { + s.outChan = nil + } + s.last.o += x + case x, ok := <-s.errChan: + if !ok { + s.errChan = nil + } + s.last.e += x + case <-timer: + break loop + } + if re != nil { + if re.MatchString(s.last.o) { + break + } + if re.MatchString(s.last.e) { + break + } + } + timeout = interline + } + if *verbose { + fmt.Printf("<= %s%s", s.last.o, s.last.e) + } + return s.last +} + +func (s *ioState) readSimpleExpecting(expectedRE string) tstring { + s.last = tstring{} + var re *regexp.Regexp + if expectedRE != "" { + re = regexp.MustCompile(expectedRE) + } + for { + select { + case x, ok := <-s.outChan: + if !ok { + s.outChan = nil + } + s.last.o += x + case x, ok := <-s.errChan: + if !ok { + s.errChan = nil + } + s.last.e += x + } + if re != nil { + if re.MatchString(s.last.o) { + break + } + if re.MatchString(s.last.e) { + break + } + } + } + if *verbose { + fmt.Printf("<= %s%s", s.last.o, s.last.e) + } + return s.last +} + +// replaceEnv returns a new environment derived from env +// by removing any existing definition of ev and adding ev=evv. +func replaceEnv(env []string, ev string, evv string) []string { + if env == nil { + env = os.Environ() + } + evplus := ev + "=" + var found bool + for i, v := range env { + if strings.HasPrefix(v, evplus) { + found = true + env[i] = evplus + evv + } + } + if !found { + env = append(env, evplus+evv) + } + return env +} + +// asCommandLine renders cmd as something that could be copy-and-pasted into a command line +// If cwd is not empty and different from the command's directory, prepend an appropriate "cd" +func asCommandLine(cwd string, cmd *exec.Cmd) string { + s := "(" + if cmd.Dir != "" && cmd.Dir != cwd { + s += "cd" + escape(cmd.Dir) + ";" + } + for _, e := range cmd.Env { + if !strings.HasPrefix(e, "PATH=") && + !strings.HasPrefix(e, "HOME=") && + !strings.HasPrefix(e, "USER=") && + !strings.HasPrefix(e, "SHELL=") { + s += escape(e) + } + } + for _, a := range cmd.Args { + s += escape(a) + } + s += " )" + return s +} + +// escape inserts escapes appropriate for use in a shell command line +func escape(s string) string { + s = strings.Replace(s, "\\", "\\\\", -1) + s = strings.Replace(s, "'", "\\'", -1) + // Conservative guess at characters that will force quoting + if strings.ContainsAny(s, "\\ ;#*&$~?!|[]()<>{}`") { + s = " '" + s + "'" + } else { + s = " " + s + } + return s +} + +func expect(want string, got tstring) { + if want != "" { + match, err := regexp.MatchString(want, got.o) + if err != nil { + panic(fmt.Sprintf("Error for regexp %s, %v\n", want, err)) + } + if match { + return + } + // Ignore error as we have already checked for it before + match, _ = regexp.MatchString(want, got.e) + if match { + return + } + fmt.Printf("EXPECTED '%s'\n GOT O='%s'\nAND E='%s'\n", want, got.o, got.e) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/decompose.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/decompose.go new file mode 100644 index 0000000000000000000000000000000000000000..2293fc01ce6c619784a444524c895d60f23c4e5d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/decompose.go @@ -0,0 +1,479 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/types" + "sort" +) + +// decompose converts phi ops on compound builtin types into phi +// ops on simple types, then invokes rewrite rules to decompose +// other ops on those types. +func decomposeBuiltIn(f *Func) { + // Decompose phis + for _, b := range f.Blocks { + for _, v := range b.Values { + if v.Op != OpPhi { + continue + } + decomposeBuiltInPhi(v) + } + } + + // Decompose other values + // Note: Leave dead values because we need to keep the original + // values around so the name component resolution below can still work. + applyRewrite(f, rewriteBlockdec, rewriteValuedec, leaveDeadValues) + if f.Config.RegSize == 4 { + applyRewrite(f, rewriteBlockdec64, rewriteValuedec64, leaveDeadValues) + } + + // Split up named values into their components. + // accumulate old names for aggregates (that are decomposed) in toDelete for efficient bulk deletion, + // accumulate new LocalSlots in newNames for addition after the iteration. This decomposition is for + // builtin types with leaf components, and thus there is no need to reprocess the newly create LocalSlots. + var toDelete []namedVal + var newNames []*LocalSlot + for i, name := range f.Names { + t := name.Type + switch { + case t.IsInteger() && t.Size() > f.Config.RegSize: + hiName, loName := f.SplitInt64(name) + newNames = maybeAppend2(f, newNames, hiName, loName) + for j, v := range f.NamedValues[*name] { + if v.Op != OpInt64Make { + continue + } + f.NamedValues[*hiName] = append(f.NamedValues[*hiName], v.Args[0]) + f.NamedValues[*loName] = append(f.NamedValues[*loName], v.Args[1]) + toDelete = append(toDelete, namedVal{i, j}) + } + case t.IsComplex(): + rName, iName := f.SplitComplex(name) + newNames = maybeAppend2(f, newNames, rName, iName) + for j, v := range f.NamedValues[*name] { + if v.Op != OpComplexMake { + continue + } + f.NamedValues[*rName] = append(f.NamedValues[*rName], v.Args[0]) + f.NamedValues[*iName] = append(f.NamedValues[*iName], v.Args[1]) + toDelete = append(toDelete, namedVal{i, j}) + } + case t.IsString(): + ptrName, lenName := f.SplitString(name) + newNames = maybeAppend2(f, newNames, ptrName, lenName) + for j, v := range f.NamedValues[*name] { + if v.Op != OpStringMake { + continue + } + f.NamedValues[*ptrName] = append(f.NamedValues[*ptrName], v.Args[0]) + f.NamedValues[*lenName] = append(f.NamedValues[*lenName], v.Args[1]) + toDelete = append(toDelete, namedVal{i, j}) + } + case t.IsSlice(): + ptrName, lenName, capName := f.SplitSlice(name) + newNames = maybeAppend2(f, newNames, ptrName, lenName) + newNames = maybeAppend(f, newNames, capName) + for j, v := range f.NamedValues[*name] { + if v.Op != OpSliceMake { + continue + } + f.NamedValues[*ptrName] = append(f.NamedValues[*ptrName], v.Args[0]) + f.NamedValues[*lenName] = append(f.NamedValues[*lenName], v.Args[1]) + f.NamedValues[*capName] = append(f.NamedValues[*capName], v.Args[2]) + toDelete = append(toDelete, namedVal{i, j}) + } + case t.IsInterface(): + typeName, dataName := f.SplitInterface(name) + newNames = maybeAppend2(f, newNames, typeName, dataName) + for j, v := range f.NamedValues[*name] { + if v.Op != OpIMake { + continue + } + f.NamedValues[*typeName] = append(f.NamedValues[*typeName], v.Args[0]) + f.NamedValues[*dataName] = append(f.NamedValues[*dataName], v.Args[1]) + toDelete = append(toDelete, namedVal{i, j}) + } + case t.IsFloat(): + // floats are never decomposed, even ones bigger than RegSize + case t.Size() > f.Config.RegSize: + f.Fatalf("undecomposed named type %s %v", name, t) + } + } + + deleteNamedVals(f, toDelete) + f.Names = append(f.Names, newNames...) +} + +func maybeAppend(f *Func, ss []*LocalSlot, s *LocalSlot) []*LocalSlot { + if _, ok := f.NamedValues[*s]; !ok { + f.NamedValues[*s] = nil + return append(ss, s) + } + return ss +} + +func maybeAppend2(f *Func, ss []*LocalSlot, s1, s2 *LocalSlot) []*LocalSlot { + return maybeAppend(f, maybeAppend(f, ss, s1), s2) +} + +func decomposeBuiltInPhi(v *Value) { + switch { + case v.Type.IsInteger() && v.Type.Size() > v.Block.Func.Config.RegSize: + decomposeInt64Phi(v) + case v.Type.IsComplex(): + decomposeComplexPhi(v) + case v.Type.IsString(): + decomposeStringPhi(v) + case v.Type.IsSlice(): + decomposeSlicePhi(v) + case v.Type.IsInterface(): + decomposeInterfacePhi(v) + case v.Type.IsFloat(): + // floats are never decomposed, even ones bigger than RegSize + case v.Type.Size() > v.Block.Func.Config.RegSize: + v.Fatalf("%v undecomposed type %v", v, v.Type) + } +} + +func decomposeStringPhi(v *Value) { + types := &v.Block.Func.Config.Types + ptrType := types.BytePtr + lenType := types.Int + + ptr := v.Block.NewValue0(v.Pos, OpPhi, ptrType) + len := v.Block.NewValue0(v.Pos, OpPhi, lenType) + for _, a := range v.Args { + ptr.AddArg(a.Block.NewValue1(v.Pos, OpStringPtr, ptrType, a)) + len.AddArg(a.Block.NewValue1(v.Pos, OpStringLen, lenType, a)) + } + v.reset(OpStringMake) + v.AddArg(ptr) + v.AddArg(len) +} + +func decomposeSlicePhi(v *Value) { + types := &v.Block.Func.Config.Types + ptrType := v.Type.Elem().PtrTo() + lenType := types.Int + + ptr := v.Block.NewValue0(v.Pos, OpPhi, ptrType) + len := v.Block.NewValue0(v.Pos, OpPhi, lenType) + cap := v.Block.NewValue0(v.Pos, OpPhi, lenType) + for _, a := range v.Args { + ptr.AddArg(a.Block.NewValue1(v.Pos, OpSlicePtr, ptrType, a)) + len.AddArg(a.Block.NewValue1(v.Pos, OpSliceLen, lenType, a)) + cap.AddArg(a.Block.NewValue1(v.Pos, OpSliceCap, lenType, a)) + } + v.reset(OpSliceMake) + v.AddArg(ptr) + v.AddArg(len) + v.AddArg(cap) +} + +func decomposeInt64Phi(v *Value) { + cfgtypes := &v.Block.Func.Config.Types + var partType *types.Type + if v.Type.IsSigned() { + partType = cfgtypes.Int32 + } else { + partType = cfgtypes.UInt32 + } + + hi := v.Block.NewValue0(v.Pos, OpPhi, partType) + lo := v.Block.NewValue0(v.Pos, OpPhi, cfgtypes.UInt32) + for _, a := range v.Args { + hi.AddArg(a.Block.NewValue1(v.Pos, OpInt64Hi, partType, a)) + lo.AddArg(a.Block.NewValue1(v.Pos, OpInt64Lo, cfgtypes.UInt32, a)) + } + v.reset(OpInt64Make) + v.AddArg(hi) + v.AddArg(lo) +} + +func decomposeComplexPhi(v *Value) { + cfgtypes := &v.Block.Func.Config.Types + var partType *types.Type + switch z := v.Type.Size(); z { + case 8: + partType = cfgtypes.Float32 + case 16: + partType = cfgtypes.Float64 + default: + v.Fatalf("decomposeComplexPhi: bad complex size %d", z) + } + + real := v.Block.NewValue0(v.Pos, OpPhi, partType) + imag := v.Block.NewValue0(v.Pos, OpPhi, partType) + for _, a := range v.Args { + real.AddArg(a.Block.NewValue1(v.Pos, OpComplexReal, partType, a)) + imag.AddArg(a.Block.NewValue1(v.Pos, OpComplexImag, partType, a)) + } + v.reset(OpComplexMake) + v.AddArg(real) + v.AddArg(imag) +} + +func decomposeInterfacePhi(v *Value) { + uintptrType := v.Block.Func.Config.Types.Uintptr + ptrType := v.Block.Func.Config.Types.BytePtr + + itab := v.Block.NewValue0(v.Pos, OpPhi, uintptrType) + data := v.Block.NewValue0(v.Pos, OpPhi, ptrType) + for _, a := range v.Args { + itab.AddArg(a.Block.NewValue1(v.Pos, OpITab, uintptrType, a)) + data.AddArg(a.Block.NewValue1(v.Pos, OpIData, ptrType, a)) + } + v.reset(OpIMake) + v.AddArg(itab) + v.AddArg(data) +} + +func decomposeUser(f *Func) { + for _, b := range f.Blocks { + for _, v := range b.Values { + if v.Op != OpPhi { + continue + } + decomposeUserPhi(v) + } + } + // Split up named values into their components. + i := 0 + var newNames []*LocalSlot + for _, name := range f.Names { + t := name.Type + switch { + case t.IsStruct(): + newNames = decomposeUserStructInto(f, name, newNames) + case t.IsArray(): + newNames = decomposeUserArrayInto(f, name, newNames) + default: + f.Names[i] = name + i++ + } + } + f.Names = f.Names[:i] + f.Names = append(f.Names, newNames...) +} + +// decomposeUserArrayInto creates names for the element(s) of arrays referenced +// by name where possible, and appends those new names to slots, which is then +// returned. +func decomposeUserArrayInto(f *Func, name *LocalSlot, slots []*LocalSlot) []*LocalSlot { + t := name.Type + if t.NumElem() == 0 { + // TODO(khr): Not sure what to do here. Probably nothing. + // Names for empty arrays aren't important. + return slots + } + if t.NumElem() != 1 { + // shouldn't get here due to CanSSA + f.Fatalf("array not of size 1") + } + elemName := f.SplitArray(name) + var keep []*Value + for _, v := range f.NamedValues[*name] { + if v.Op != OpArrayMake1 { + keep = append(keep, v) + continue + } + f.NamedValues[*elemName] = append(f.NamedValues[*elemName], v.Args[0]) + } + if len(keep) == 0 { + // delete the name for the array as a whole + delete(f.NamedValues, *name) + } else { + f.NamedValues[*name] = keep + } + + if t.Elem().IsArray() { + return decomposeUserArrayInto(f, elemName, slots) + } else if t.Elem().IsStruct() { + return decomposeUserStructInto(f, elemName, slots) + } + + return append(slots, elemName) +} + +// decomposeUserStructInto creates names for the fields(s) of structs referenced +// by name where possible, and appends those new names to slots, which is then +// returned. +func decomposeUserStructInto(f *Func, name *LocalSlot, slots []*LocalSlot) []*LocalSlot { + fnames := []*LocalSlot{} // slots for struct in name + t := name.Type + n := t.NumFields() + + for i := 0; i < n; i++ { + fs := f.SplitStruct(name, i) + fnames = append(fnames, fs) + // arrays and structs will be decomposed further, so + // there's no need to record a name + if !fs.Type.IsArray() && !fs.Type.IsStruct() { + slots = maybeAppend(f, slots, fs) + } + } + + makeOp := StructMakeOp(n) + var keep []*Value + // create named values for each struct field + for _, v := range f.NamedValues[*name] { + if v.Op != makeOp { + keep = append(keep, v) + continue + } + for i := 0; i < len(fnames); i++ { + f.NamedValues[*fnames[i]] = append(f.NamedValues[*fnames[i]], v.Args[i]) + } + } + if len(keep) == 0 { + // delete the name for the struct as a whole + delete(f.NamedValues, *name) + } else { + f.NamedValues[*name] = keep + } + + // now that this f.NamedValues contains values for the struct + // fields, recurse into nested structs + for i := 0; i < n; i++ { + if name.Type.FieldType(i).IsStruct() { + slots = decomposeUserStructInto(f, fnames[i], slots) + delete(f.NamedValues, *fnames[i]) + } else if name.Type.FieldType(i).IsArray() { + slots = decomposeUserArrayInto(f, fnames[i], slots) + delete(f.NamedValues, *fnames[i]) + } + } + return slots +} +func decomposeUserPhi(v *Value) { + switch { + case v.Type.IsStruct(): + decomposeStructPhi(v) + case v.Type.IsArray(): + decomposeArrayPhi(v) + } +} + +// decomposeStructPhi replaces phi-of-struct with structmake(phi-for-each-field), +// and then recursively decomposes the phis for each field. +func decomposeStructPhi(v *Value) { + t := v.Type + n := t.NumFields() + var fields [MaxStruct]*Value + for i := 0; i < n; i++ { + fields[i] = v.Block.NewValue0(v.Pos, OpPhi, t.FieldType(i)) + } + for _, a := range v.Args { + for i := 0; i < n; i++ { + fields[i].AddArg(a.Block.NewValue1I(v.Pos, OpStructSelect, t.FieldType(i), int64(i), a)) + } + } + v.reset(StructMakeOp(n)) + v.AddArgs(fields[:n]...) + + // Recursively decompose phis for each field. + for _, f := range fields[:n] { + decomposeUserPhi(f) + } +} + +// decomposeArrayPhi replaces phi-of-array with arraymake(phi-of-array-element), +// and then recursively decomposes the element phi. +func decomposeArrayPhi(v *Value) { + t := v.Type + if t.NumElem() == 0 { + v.reset(OpArrayMake0) + return + } + if t.NumElem() != 1 { + v.Fatalf("SSAable array must have no more than 1 element") + } + elem := v.Block.NewValue0(v.Pos, OpPhi, t.Elem()) + for _, a := range v.Args { + elem.AddArg(a.Block.NewValue1I(v.Pos, OpArraySelect, t.Elem(), 0, a)) + } + v.reset(OpArrayMake1) + v.AddArg(elem) + + // Recursively decompose elem phi. + decomposeUserPhi(elem) +} + +// MaxStruct is the maximum number of fields a struct +// can have and still be SSAable. +const MaxStruct = 4 + +// StructMakeOp returns the opcode to construct a struct with the +// given number of fields. +func StructMakeOp(nf int) Op { + switch nf { + case 0: + return OpStructMake0 + case 1: + return OpStructMake1 + case 2: + return OpStructMake2 + case 3: + return OpStructMake3 + case 4: + return OpStructMake4 + } + panic("too many fields in an SSAable struct") +} + +type namedVal struct { + locIndex, valIndex int // f.NamedValues[f.Names[locIndex]][valIndex] = key +} + +// deleteNamedVals removes particular values with debugger names from f's naming data structures, +// removes all values with OpInvalid, and re-sorts the list of Names. +func deleteNamedVals(f *Func, toDelete []namedVal) { + // Arrange to delete from larger indices to smaller, to ensure swap-with-end deletion does not invalidate pending indices. + sort.Slice(toDelete, func(i, j int) bool { + if toDelete[i].locIndex != toDelete[j].locIndex { + return toDelete[i].locIndex > toDelete[j].locIndex + } + return toDelete[i].valIndex > toDelete[j].valIndex + + }) + + // Get rid of obsolete names + for _, d := range toDelete { + loc := f.Names[d.locIndex] + vals := f.NamedValues[*loc] + l := len(vals) - 1 + if l > 0 { + vals[d.valIndex] = vals[l] + } + vals[l] = nil + f.NamedValues[*loc] = vals[:l] + } + // Delete locations with no values attached. + end := len(f.Names) + for i := len(f.Names) - 1; i >= 0; i-- { + loc := f.Names[i] + vals := f.NamedValues[*loc] + last := len(vals) + for j := len(vals) - 1; j >= 0; j-- { + if vals[j].Op == OpInvalid { + last-- + vals[j] = vals[last] + vals[last] = nil + } + } + if last < len(vals) { + f.NamedValues[*loc] = vals[:last] + } + if len(vals) == 0 { + delete(f.NamedValues, *loc) + end-- + f.Names[i] = f.Names[end] + f.Names[end] = nil + } + } + f.Names = f.Names[:end] +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/dom.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/dom.go new file mode 100644 index 0000000000000000000000000000000000000000..39ba4d1647b222534cfc6266cb1ebced62066b10 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/dom.go @@ -0,0 +1,275 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file contains code to compute the dominator tree +// of a control-flow graph. + +// postorder computes a postorder traversal ordering for the +// basic blocks in f. Unreachable blocks will not appear. +func postorder(f *Func) []*Block { + return postorderWithNumbering(f, nil) +} + +type blockAndIndex struct { + b *Block + index int // index is the number of successor edges of b that have already been explored. +} + +// postorderWithNumbering provides a DFS postordering. +// This seems to make loop-finding more robust. +func postorderWithNumbering(f *Func, ponums []int32) []*Block { + seen := f.Cache.allocBoolSlice(f.NumBlocks()) + defer f.Cache.freeBoolSlice(seen) + + // result ordering + order := make([]*Block, 0, len(f.Blocks)) + + // stack of blocks and next child to visit + // A constant bound allows this to be stack-allocated. 32 is + // enough to cover almost every postorderWithNumbering call. + s := make([]blockAndIndex, 0, 32) + s = append(s, blockAndIndex{b: f.Entry}) + seen[f.Entry.ID] = true + for len(s) > 0 { + tos := len(s) - 1 + x := s[tos] + b := x.b + if i := x.index; i < len(b.Succs) { + s[tos].index++ + bb := b.Succs[i].Block() + if !seen[bb.ID] { + seen[bb.ID] = true + s = append(s, blockAndIndex{b: bb}) + } + continue + } + s = s[:tos] + if ponums != nil { + ponums[b.ID] = int32(len(order)) + } + order = append(order, b) + } + return order +} + +type linkedBlocks func(*Block) []Edge + +func dominators(f *Func) []*Block { + preds := func(b *Block) []Edge { return b.Preds } + succs := func(b *Block) []Edge { return b.Succs } + + //TODO: benchmark and try to find criteria for swapping between + // dominatorsSimple and dominatorsLT + return f.dominatorsLTOrig(f.Entry, preds, succs) +} + +// dominatorsLTOrig runs Lengauer-Tarjan to compute a dominator tree starting at +// entry and using predFn/succFn to find predecessors/successors to allow +// computing both dominator and post-dominator trees. +func (f *Func) dominatorsLTOrig(entry *Block, predFn linkedBlocks, succFn linkedBlocks) []*Block { + // Adapted directly from the original TOPLAS article's "simple" algorithm + + maxBlockID := entry.Func.NumBlocks() + scratch := f.Cache.allocIDSlice(7 * maxBlockID) + defer f.Cache.freeIDSlice(scratch) + semi := scratch[0*maxBlockID : 1*maxBlockID] + vertex := scratch[1*maxBlockID : 2*maxBlockID] + label := scratch[2*maxBlockID : 3*maxBlockID] + parent := scratch[3*maxBlockID : 4*maxBlockID] + ancestor := scratch[4*maxBlockID : 5*maxBlockID] + bucketHead := scratch[5*maxBlockID : 6*maxBlockID] + bucketLink := scratch[6*maxBlockID : 7*maxBlockID] + + // This version uses integers for most of the computation, + // to make the work arrays smaller and pointer-free. + // fromID translates from ID to *Block where that is needed. + fromID := f.Cache.allocBlockSlice(maxBlockID) + defer f.Cache.freeBlockSlice(fromID) + for _, v := range f.Blocks { + fromID[v.ID] = v + } + idom := make([]*Block, maxBlockID) + + // Step 1. Carry out a depth first search of the problem graph. Number + // the vertices from 1 to n as they are reached during the search. + n := f.dfsOrig(entry, succFn, semi, vertex, label, parent) + + for i := n; i >= 2; i-- { + w := vertex[i] + + // step2 in TOPLAS paper + for _, e := range predFn(fromID[w]) { + v := e.b + if semi[v.ID] == 0 { + // skip unreachable predecessor + // not in original, but we're using existing pred instead of building one. + continue + } + u := evalOrig(v.ID, ancestor, semi, label) + if semi[u] < semi[w] { + semi[w] = semi[u] + } + } + + // add w to bucket[vertex[semi[w]]] + // implement bucket as a linked list implemented + // in a pair of arrays. + vsw := vertex[semi[w]] + bucketLink[w] = bucketHead[vsw] + bucketHead[vsw] = w + + linkOrig(parent[w], w, ancestor) + + // step3 in TOPLAS paper + for v := bucketHead[parent[w]]; v != 0; v = bucketLink[v] { + u := evalOrig(v, ancestor, semi, label) + if semi[u] < semi[v] { + idom[v] = fromID[u] + } else { + idom[v] = fromID[parent[w]] + } + } + } + // step 4 in toplas paper + for i := ID(2); i <= n; i++ { + w := vertex[i] + if idom[w].ID != vertex[semi[w]] { + idom[w] = idom[idom[w].ID] + } + } + + return idom +} + +// dfsOrig performs a depth first search over the blocks starting at entry block +// (in arbitrary order). This is a de-recursed version of dfs from the +// original Tarjan-Lengauer TOPLAS article. It's important to return the +// same values for parent as the original algorithm. +func (f *Func) dfsOrig(entry *Block, succFn linkedBlocks, semi, vertex, label, parent []ID) ID { + n := ID(0) + s := make([]*Block, 0, 256) + s = append(s, entry) + + for len(s) > 0 { + v := s[len(s)-1] + s = s[:len(s)-1] + // recursing on v + + if semi[v.ID] != 0 { + continue // already visited + } + n++ + semi[v.ID] = n + vertex[n] = v.ID + label[v.ID] = v.ID + // ancestor[v] already zero + for _, e := range succFn(v) { + w := e.b + // if it has a dfnum, we've already visited it + if semi[w.ID] == 0 { + // yes, w can be pushed multiple times. + s = append(s, w) + parent[w.ID] = v.ID // keep overwriting this till it is visited. + } + } + } + return n +} + +// compressOrig is the "simple" compress function from LT paper. +func compressOrig(v ID, ancestor, semi, label []ID) { + if ancestor[ancestor[v]] != 0 { + compressOrig(ancestor[v], ancestor, semi, label) + if semi[label[ancestor[v]]] < semi[label[v]] { + label[v] = label[ancestor[v]] + } + ancestor[v] = ancestor[ancestor[v]] + } +} + +// evalOrig is the "simple" eval function from LT paper. +func evalOrig(v ID, ancestor, semi, label []ID) ID { + if ancestor[v] == 0 { + return v + } + compressOrig(v, ancestor, semi, label) + return label[v] +} + +func linkOrig(v, w ID, ancestor []ID) { + ancestor[w] = v +} + +// dominatorsSimple computes the dominator tree for f. It returns a slice +// which maps block ID to the immediate dominator of that block. +// Unreachable blocks map to nil. The entry block maps to nil. +func dominatorsSimple(f *Func) []*Block { + // A simple algorithm for now + // Cooper, Harvey, Kennedy + idom := make([]*Block, f.NumBlocks()) + + // Compute postorder walk + post := f.postorder() + + // Make map from block id to order index (for intersect call) + postnum := f.Cache.allocIntSlice(f.NumBlocks()) + defer f.Cache.freeIntSlice(postnum) + for i, b := range post { + postnum[b.ID] = i + } + + // Make the entry block a self-loop + idom[f.Entry.ID] = f.Entry + if postnum[f.Entry.ID] != len(post)-1 { + f.Fatalf("entry block %v not last in postorder", f.Entry) + } + + // Compute relaxation of idom entries + for { + changed := false + + for i := len(post) - 2; i >= 0; i-- { + b := post[i] + var d *Block + for _, e := range b.Preds { + p := e.b + if idom[p.ID] == nil { + continue + } + if d == nil { + d = p + continue + } + d = intersect(d, p, postnum, idom) + } + if d != idom[b.ID] { + idom[b.ID] = d + changed = true + } + } + if !changed { + break + } + } + // Set idom of entry block to nil instead of itself. + idom[f.Entry.ID] = nil + return idom +} + +// intersect finds the closest dominator of both b and c. +// It requires a postorder numbering of all the blocks. +func intersect(b, c *Block, postnum []int, idom []*Block) *Block { + // TODO: This loop is O(n^2). It used to be used in nilcheck, + // see BenchmarkNilCheckDeep*. + for b != c { + if postnum[b.ID] < postnum[c.ID] { + b = idom[b.ID] + } else { + c = idom[c.ID] + } + } + return b +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/dom_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/dom_test.go new file mode 100644 index 0000000000000000000000000000000000000000..fa517183c2e5a43ff8b8fd64972a670b5bbd7379 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/dom_test.go @@ -0,0 +1,608 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/types" + "testing" +) + +func BenchmarkDominatorsLinear(b *testing.B) { benchmarkDominators(b, 10000, genLinear) } +func BenchmarkDominatorsFwdBack(b *testing.B) { benchmarkDominators(b, 10000, genFwdBack) } +func BenchmarkDominatorsManyPred(b *testing.B) { benchmarkDominators(b, 10000, genManyPred) } +func BenchmarkDominatorsMaxPred(b *testing.B) { benchmarkDominators(b, 10000, genMaxPred) } +func BenchmarkDominatorsMaxPredVal(b *testing.B) { benchmarkDominators(b, 10000, genMaxPredValue) } + +type blockGen func(size int) []bloc + +// genLinear creates an array of blocks that succeed one another +// b_n -> [b_n+1]. +func genLinear(size int) []bloc { + var blocs []bloc + blocs = append(blocs, + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Goto(blockn(0)), + ), + ) + for i := 0; i < size; i++ { + blocs = append(blocs, Bloc(blockn(i), + Goto(blockn(i+1)))) + } + + blocs = append(blocs, + Bloc(blockn(size), Goto("exit")), + Bloc("exit", Exit("mem")), + ) + + return blocs +} + +// genLinear creates an array of blocks that alternate between +// b_n -> [b_n+1], b_n -> [b_n+1, b_n-1] , b_n -> [b_n+1, b_n+2] +func genFwdBack(size int) []bloc { + var blocs []bloc + blocs = append(blocs, + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil), + Goto(blockn(0)), + ), + ) + for i := 0; i < size; i++ { + switch i % 2 { + case 0: + blocs = append(blocs, Bloc(blockn(i), + If("p", blockn(i+1), blockn(i+2)))) + case 1: + blocs = append(blocs, Bloc(blockn(i), + If("p", blockn(i+1), blockn(i-1)))) + } + } + + blocs = append(blocs, + Bloc(blockn(size), Goto("exit")), + Bloc("exit", Exit("mem")), + ) + + return blocs +} + +// genManyPred creates an array of blocks where 1/3rd have a successor of the +// first block, 1/3rd the last block, and the remaining third are plain. +func genManyPred(size int) []bloc { + var blocs []bloc + blocs = append(blocs, + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil), + Goto(blockn(0)), + ), + ) + + // We want predecessor lists to be long, so 2/3rds of the blocks have a + // successor of the first or last block. + for i := 0; i < size; i++ { + switch i % 3 { + case 0: + blocs = append(blocs, Bloc(blockn(i), + Valu("a", OpConstBool, types.Types[types.TBOOL], 1, nil), + Goto(blockn(i+1)))) + case 1: + blocs = append(blocs, Bloc(blockn(i), + Valu("a", OpConstBool, types.Types[types.TBOOL], 1, nil), + If("p", blockn(i+1), blockn(0)))) + case 2: + blocs = append(blocs, Bloc(blockn(i), + Valu("a", OpConstBool, types.Types[types.TBOOL], 1, nil), + If("p", blockn(i+1), blockn(size)))) + } + } + + blocs = append(blocs, + Bloc(blockn(size), Goto("exit")), + Bloc("exit", Exit("mem")), + ) + + return blocs +} + +// genMaxPred maximizes the size of the 'exit' predecessor list. +func genMaxPred(size int) []bloc { + var blocs []bloc + blocs = append(blocs, + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil), + Goto(blockn(0)), + ), + ) + + for i := 0; i < size; i++ { + blocs = append(blocs, Bloc(blockn(i), + If("p", blockn(i+1), "exit"))) + } + + blocs = append(blocs, + Bloc(blockn(size), Goto("exit")), + Bloc("exit", Exit("mem")), + ) + + return blocs +} + +// genMaxPredValue is identical to genMaxPred but contains an +// additional value. +func genMaxPredValue(size int) []bloc { + var blocs []bloc + blocs = append(blocs, + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil), + Goto(blockn(0)), + ), + ) + + for i := 0; i < size; i++ { + blocs = append(blocs, Bloc(blockn(i), + Valu("a", OpConstBool, types.Types[types.TBOOL], 1, nil), + If("p", blockn(i+1), "exit"))) + } + + blocs = append(blocs, + Bloc(blockn(size), Goto("exit")), + Bloc("exit", Exit("mem")), + ) + + return blocs +} + +// sink for benchmark +var domBenchRes []*Block + +func benchmarkDominators(b *testing.B, size int, bg blockGen) { + c := testConfig(b) + fun := c.Fun("entry", bg(size)...) + + CheckFunc(fun.f) + b.SetBytes(int64(size)) + b.ResetTimer() + for i := 0; i < b.N; i++ { + domBenchRes = dominators(fun.f) + } +} + +type domFunc func(f *Func) []*Block + +// verifyDominators verifies that the dominators of fut (function under test) +// as determined by domFn, match the map node->dominator +func verifyDominators(t *testing.T, fut fun, domFn domFunc, doms map[string]string) { + blockNames := map[*Block]string{} + for n, b := range fut.blocks { + blockNames[b] = n + } + + calcDom := domFn(fut.f) + + for n, d := range doms { + nblk, ok := fut.blocks[n] + if !ok { + t.Errorf("invalid block name %s", n) + } + dblk, ok := fut.blocks[d] + if !ok { + t.Errorf("invalid block name %s", d) + } + + domNode := calcDom[nblk.ID] + switch { + case calcDom[nblk.ID] == dblk: + calcDom[nblk.ID] = nil + continue + case calcDom[nblk.ID] != dblk: + t.Errorf("expected %s as dominator of %s, found %s", d, n, blockNames[domNode]) + default: + t.Fatal("unexpected dominator condition") + } + } + + for id, d := range calcDom { + // If nil, we've already verified it + if d == nil { + continue + } + for _, b := range fut.blocks { + if int(b.ID) == id { + t.Errorf("unexpected dominator of %s for %s", blockNames[d], blockNames[b]) + } + } + } + +} + +func TestDominatorsSingleBlock(t *testing.T) { + c := testConfig(t) + fun := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Exit("mem"))) + + doms := map[string]string{} + + CheckFunc(fun.f) + verifyDominators(t, fun, dominators, doms) + verifyDominators(t, fun, dominatorsSimple, doms) + +} + +func TestDominatorsSimple(t *testing.T) { + c := testConfig(t) + fun := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Goto("a")), + Bloc("a", + Goto("b")), + Bloc("b", + Goto("c")), + Bloc("c", + Goto("exit")), + Bloc("exit", + Exit("mem"))) + + doms := map[string]string{ + "a": "entry", + "b": "a", + "c": "b", + "exit": "c", + } + + CheckFunc(fun.f) + verifyDominators(t, fun, dominators, doms) + verifyDominators(t, fun, dominatorsSimple, doms) + +} + +func TestDominatorsMultPredFwd(t *testing.T) { + c := testConfig(t) + fun := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil), + If("p", "a", "c")), + Bloc("a", + If("p", "b", "c")), + Bloc("b", + Goto("c")), + Bloc("c", + Goto("exit")), + Bloc("exit", + Exit("mem"))) + + doms := map[string]string{ + "a": "entry", + "b": "a", + "c": "entry", + "exit": "c", + } + + CheckFunc(fun.f) + verifyDominators(t, fun, dominators, doms) + verifyDominators(t, fun, dominatorsSimple, doms) +} + +func TestDominatorsDeadCode(t *testing.T) { + c := testConfig(t) + fun := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("p", OpConstBool, types.Types[types.TBOOL], 0, nil), + If("p", "b3", "b5")), + Bloc("b2", Exit("mem")), + Bloc("b3", Goto("b2")), + Bloc("b4", Goto("b2")), + Bloc("b5", Goto("b2"))) + + doms := map[string]string{ + "b2": "entry", + "b3": "entry", + "b5": "entry", + } + + CheckFunc(fun.f) + verifyDominators(t, fun, dominators, doms) + verifyDominators(t, fun, dominatorsSimple, doms) +} + +func TestDominatorsMultPredRev(t *testing.T) { + c := testConfig(t) + fun := c.Fun("entry", + Bloc("entry", + Goto("first")), + Bloc("first", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil), + Goto("a")), + Bloc("a", + If("p", "b", "first")), + Bloc("b", + Goto("c")), + Bloc("c", + If("p", "exit", "b")), + Bloc("exit", + Exit("mem"))) + + doms := map[string]string{ + "first": "entry", + "a": "first", + "b": "a", + "c": "b", + "exit": "c", + } + + CheckFunc(fun.f) + verifyDominators(t, fun, dominators, doms) + verifyDominators(t, fun, dominatorsSimple, doms) +} + +func TestDominatorsMultPred(t *testing.T) { + c := testConfig(t) + fun := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil), + If("p", "a", "c")), + Bloc("a", + If("p", "b", "c")), + Bloc("b", + Goto("c")), + Bloc("c", + If("p", "b", "exit")), + Bloc("exit", + Exit("mem"))) + + doms := map[string]string{ + "a": "entry", + "b": "entry", + "c": "entry", + "exit": "c", + } + + CheckFunc(fun.f) + verifyDominators(t, fun, dominators, doms) + verifyDominators(t, fun, dominatorsSimple, doms) +} + +func TestInfiniteLoop(t *testing.T) { + c := testConfig(t) + // note lack of an exit block + fun := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil), + Goto("a")), + Bloc("a", + Goto("b")), + Bloc("b", + Goto("a"))) + + CheckFunc(fun.f) + doms := map[string]string{"a": "entry", + "b": "a"} + verifyDominators(t, fun, dominators, doms) +} + +func TestDomTricky(t *testing.T) { + doms := map[string]string{ + "4": "1", + "2": "4", + "5": "4", + "11": "4", + "15": "4", // the incorrect answer is "5" + "10": "15", + "19": "15", + } + + if4 := [2]string{"2", "5"} + if5 := [2]string{"15", "11"} + if15 := [2]string{"19", "10"} + + for i := 0; i < 8; i++ { + a := 1 & i + b := 1 & i >> 1 + c := 1 & i >> 2 + + cfg := testConfig(t) + fun := cfg.Fun("1", + Bloc("1", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil), + Goto("4")), + Bloc("2", + Goto("11")), + Bloc("4", + If("p", if4[a], if4[1-a])), // 2, 5 + Bloc("5", + If("p", if5[b], if5[1-b])), //15, 11 + Bloc("10", + Exit("mem")), + Bloc("11", + Goto("15")), + Bloc("15", + If("p", if15[c], if15[1-c])), //19, 10 + Bloc("19", + Goto("10"))) + CheckFunc(fun.f) + verifyDominators(t, fun, dominators, doms) + verifyDominators(t, fun, dominatorsSimple, doms) + } +} + +// generateDominatorMap uses dominatorsSimple to obtain a +// reference dominator tree for testing faster algorithms. +func generateDominatorMap(fut fun) map[string]string { + blockNames := map[*Block]string{} + for n, b := range fut.blocks { + blockNames[b] = n + } + referenceDom := dominatorsSimple(fut.f) + doms := make(map[string]string) + for _, b := range fut.f.Blocks { + if d := referenceDom[b.ID]; d != nil { + doms[blockNames[b]] = blockNames[d] + } + } + return doms +} + +func TestDominatorsPostTrickyA(t *testing.T) { + testDominatorsPostTricky(t, "b8", "b11", "b10", "b8", "b14", "b15") +} + +func TestDominatorsPostTrickyB(t *testing.T) { + testDominatorsPostTricky(t, "b11", "b8", "b10", "b8", "b14", "b15") +} + +func TestDominatorsPostTrickyC(t *testing.T) { + testDominatorsPostTricky(t, "b8", "b11", "b8", "b10", "b14", "b15") +} + +func TestDominatorsPostTrickyD(t *testing.T) { + testDominatorsPostTricky(t, "b11", "b8", "b8", "b10", "b14", "b15") +} + +func TestDominatorsPostTrickyE(t *testing.T) { + testDominatorsPostTricky(t, "b8", "b11", "b10", "b8", "b15", "b14") +} + +func TestDominatorsPostTrickyF(t *testing.T) { + testDominatorsPostTricky(t, "b11", "b8", "b10", "b8", "b15", "b14") +} + +func TestDominatorsPostTrickyG(t *testing.T) { + testDominatorsPostTricky(t, "b8", "b11", "b8", "b10", "b15", "b14") +} + +func TestDominatorsPostTrickyH(t *testing.T) { + testDominatorsPostTricky(t, "b11", "b8", "b8", "b10", "b15", "b14") +} + +func testDominatorsPostTricky(t *testing.T, b7then, b7else, b12then, b12else, b13then, b13else string) { + c := testConfig(t) + fun := c.Fun("b1", + Bloc("b1", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil), + If("p", "b3", "b2")), + Bloc("b3", + If("p", "b5", "b6")), + Bloc("b5", + Goto("b7")), + Bloc("b7", + If("p", b7then, b7else)), + Bloc("b8", + Goto("b13")), + Bloc("b13", + If("p", b13then, b13else)), + Bloc("b14", + Goto("b10")), + Bloc("b15", + Goto("b16")), + Bloc("b16", + Goto("b9")), + Bloc("b9", + Goto("b7")), + Bloc("b11", + Goto("b12")), + Bloc("b12", + If("p", b12then, b12else)), + Bloc("b10", + Goto("b6")), + Bloc("b6", + Goto("b17")), + Bloc("b17", + Goto("b18")), + Bloc("b18", + If("p", "b22", "b19")), + Bloc("b22", + Goto("b23")), + Bloc("b23", + If("p", "b21", "b19")), + Bloc("b19", + If("p", "b24", "b25")), + Bloc("b24", + Goto("b26")), + Bloc("b26", + Goto("b25")), + Bloc("b25", + If("p", "b27", "b29")), + Bloc("b27", + Goto("b30")), + Bloc("b30", + Goto("b28")), + Bloc("b29", + Goto("b31")), + Bloc("b31", + Goto("b28")), + Bloc("b28", + If("p", "b32", "b33")), + Bloc("b32", + Goto("b21")), + Bloc("b21", + Goto("b47")), + Bloc("b47", + If("p", "b45", "b46")), + Bloc("b45", + Goto("b48")), + Bloc("b48", + Goto("b49")), + Bloc("b49", + If("p", "b50", "b51")), + Bloc("b50", + Goto("b52")), + Bloc("b52", + Goto("b53")), + Bloc("b53", + Goto("b51")), + Bloc("b51", + Goto("b54")), + Bloc("b54", + Goto("b46")), + Bloc("b46", + Exit("mem")), + Bloc("b33", + Goto("b34")), + Bloc("b34", + Goto("b37")), + Bloc("b37", + If("p", "b35", "b36")), + Bloc("b35", + Goto("b38")), + Bloc("b38", + Goto("b39")), + Bloc("b39", + If("p", "b40", "b41")), + Bloc("b40", + Goto("b42")), + Bloc("b42", + Goto("b43")), + Bloc("b43", + Goto("b41")), + Bloc("b41", + Goto("b44")), + Bloc("b44", + Goto("b36")), + Bloc("b36", + Goto("b20")), + Bloc("b20", + Goto("b18")), + Bloc("b2", + Goto("b4")), + Bloc("b4", + Exit("mem"))) + CheckFunc(fun.f) + doms := generateDominatorMap(fun) + verifyDominators(t, fun, dominators, doms) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/expand_calls.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/expand_calls.go new file mode 100644 index 0000000000000000000000000000000000000000..b0788f1db4bba556b05f93d36705b90f2c975f09 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/expand_calls.go @@ -0,0 +1,1035 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/abi" + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "cmd/internal/src" + "fmt" +) + +func postExpandCallsDecompose(f *Func) { + decomposeUser(f) // redo user decompose to cleanup after expand calls + decomposeBuiltIn(f) // handles both regular decomposition and cleanup. +} + +func expandCalls(f *Func) { + // Convert each aggregate arg to a call into "dismantle aggregate, store/pass parts" + // Convert each aggregate result from a call into "assemble aggregate from parts" + // Convert each multivalue exit into "dismantle aggregate, store/return parts" + // Convert incoming aggregate arg into assembly of parts. + // Feed modified AST to decompose. + + sp, _ := f.spSb() + + x := &expandState{ + f: f, + debug: f.pass.debug, + regSize: f.Config.RegSize, + sp: sp, + typs: &f.Config.Types, + wideSelects: make(map[*Value]*Value), + commonArgs: make(map[selKey]*Value), + commonSelectors: make(map[selKey]*Value), + memForCall: make(map[ID]*Value), + } + + // For 32-bit, need to deal with decomposition of 64-bit integers, which depends on endianness. + if f.Config.BigEndian { + x.firstOp = OpInt64Hi + x.secondOp = OpInt64Lo + x.firstType = x.typs.Int32 + x.secondType = x.typs.UInt32 + } else { + x.firstOp = OpInt64Lo + x.secondOp = OpInt64Hi + x.firstType = x.typs.UInt32 + x.secondType = x.typs.Int32 + } + + // Defer select processing until after all calls and selects are seen. + var selects []*Value + var calls []*Value + var args []*Value + var exitBlocks []*Block + + var m0 *Value + + // Accumulate lists of calls, args, selects, and exit blocks to process, + // note "wide" selects consumed by stores, + // rewrite mem for each call, + // rewrite each OpSelectNAddr. + for _, b := range f.Blocks { + for _, v := range b.Values { + switch v.Op { + case OpInitMem: + m0 = v + + case OpClosureLECall, OpInterLECall, OpStaticLECall, OpTailLECall: + calls = append(calls, v) + + case OpArg: + args = append(args, v) + + case OpStore: + if a := v.Args[1]; a.Op == OpSelectN && !CanSSA(a.Type) { + if a.Uses > 1 { + panic(fmt.Errorf("Saw double use of wide SelectN %s operand of Store %s", + a.LongString(), v.LongString())) + } + x.wideSelects[a] = v + } + + case OpSelectN: + if v.Type == types.TypeMem { + // rewrite the mem selector in place + call := v.Args[0] + aux := call.Aux.(*AuxCall) + mem := x.memForCall[call.ID] + if mem == nil { + v.AuxInt = int64(aux.abiInfo.OutRegistersUsed()) + x.memForCall[call.ID] = v + } else { + panic(fmt.Errorf("Saw two memories for call %v, %v and %v", call, mem, v)) + } + } else { + selects = append(selects, v) + } + + case OpSelectNAddr: + call := v.Args[0] + which := v.AuxInt + aux := call.Aux.(*AuxCall) + pt := v.Type + off := x.offsetFrom(x.f.Entry, x.sp, aux.OffsetOfResult(which), pt) + v.copyOf(off) + } + } + + // rewrite function results from an exit block + // values returned by function need to be split out into registers. + if isBlockMultiValueExit(b) { + exitBlocks = append(exitBlocks, b) + } + } + + // Convert each aggregate arg into Make of its parts (and so on, to primitive types) + for _, v := range args { + var rc registerCursor + a := x.prAssignForArg(v) + aux := x.f.OwnAux + regs := a.Registers + var offset int64 + if len(regs) == 0 { + offset = a.FrameOffset(aux.abiInfo) + } + auxBase := x.offsetFrom(x.f.Entry, x.sp, offset, types.NewPtr(v.Type)) + rc.init(regs, aux.abiInfo, nil, auxBase, 0) + x.rewriteSelectOrArg(f.Entry.Pos, f.Entry, v, v, m0, v.Type, rc) + } + + // Rewrite selects of results (which may be aggregates) into make-aggregates of register/memory-targeted selects + for _, v := range selects { + if v.Op == OpInvalid { + continue + } + + call := v.Args[0] + aux := call.Aux.(*AuxCall) + mem := x.memForCall[call.ID] + if mem == nil { + mem = call.Block.NewValue1I(call.Pos, OpSelectN, types.TypeMem, int64(aux.abiInfo.OutRegistersUsed()), call) + x.memForCall[call.ID] = mem + } + + i := v.AuxInt + regs := aux.RegsOfResult(i) + + // If this select cannot fit into SSA and is stored, either disaggregate to register stores, or mem-mem move. + if store := x.wideSelects[v]; store != nil { + // Use the mem that comes from the store operation. + storeAddr := store.Args[0] + mem := store.Args[2] + if len(regs) > 0 { + // Cannot do a rewrite that builds up a result from pieces; instead, copy pieces to the store operation. + var rc registerCursor + rc.init(regs, aux.abiInfo, nil, storeAddr, 0) + mem = x.rewriteWideSelectToStores(call.Pos, call.Block, v, mem, v.Type, rc) + store.copyOf(mem) + } else { + // Move directly from AuxBase to store target; rewrite the store instruction. + offset := aux.OffsetOfResult(i) + auxBase := x.offsetFrom(x.f.Entry, x.sp, offset, types.NewPtr(v.Type)) + // was Store dst, v, mem + // now Move dst, auxBase, mem + move := store.Block.NewValue3A(store.Pos, OpMove, types.TypeMem, v.Type, storeAddr, auxBase, mem) + move.AuxInt = v.Type.Size() + store.copyOf(move) + } + continue + } + + var auxBase *Value + if len(regs) == 0 { + offset := aux.OffsetOfResult(i) + auxBase = x.offsetFrom(x.f.Entry, x.sp, offset, types.NewPtr(v.Type)) + } + var rc registerCursor + rc.init(regs, aux.abiInfo, nil, auxBase, 0) + x.rewriteSelectOrArg(call.Pos, call.Block, v, v, mem, v.Type, rc) + } + + rewriteCall := func(v *Value, newOp Op, argStart int) { + // Break aggregate args passed to call into smaller pieces. + x.rewriteCallArgs(v, argStart) + v.Op = newOp + rts := abi.RegisterTypes(v.Aux.(*AuxCall).abiInfo.OutParams()) + v.Type = types.NewResults(append(rts, types.TypeMem)) + } + + // Rewrite calls + for _, v := range calls { + switch v.Op { + case OpStaticLECall: + rewriteCall(v, OpStaticCall, 0) + case OpTailLECall: + rewriteCall(v, OpTailCall, 0) + case OpClosureLECall: + rewriteCall(v, OpClosureCall, 2) + case OpInterLECall: + rewriteCall(v, OpInterCall, 1) + } + } + + // Rewrite results from exit blocks + for _, b := range exitBlocks { + v := b.Controls[0] + x.rewriteFuncResults(v, b, f.OwnAux) + b.SetControl(v) + } + +} + +func (x *expandState) rewriteFuncResults(v *Value, b *Block, aux *AuxCall) { + // This is very similar to rewriteCallArgs + // differences: + // firstArg + preArgs + // sp vs auxBase + + m0 := v.MemoryArg() + mem := m0 + + allResults := []*Value{} + var oldArgs []*Value + argsWithoutMem := v.Args[:len(v.Args)-1] + + for j, a := range argsWithoutMem { + oldArgs = append(oldArgs, a) + i := int64(j) + auxType := aux.TypeOfResult(i) + auxBase := b.NewValue2A(v.Pos, OpLocalAddr, types.NewPtr(auxType), aux.NameOfResult(i), x.sp, mem) + auxOffset := int64(0) + aRegs := aux.RegsOfResult(int64(j)) + if a.Op == OpDereference { + a.Op = OpLoad + } + var rc registerCursor + var result *[]*Value + if len(aRegs) > 0 { + result = &allResults + } else { + if a.Op == OpLoad && a.Args[0].Op == OpLocalAddr { + addr := a.Args[0] + if addr.MemoryArg() == a.MemoryArg() && addr.Aux == aux.NameOfResult(i) { + continue // Self move to output parameter + } + } + } + rc.init(aRegs, aux.abiInfo, result, auxBase, auxOffset) + mem = x.decomposeAsNecessary(v.Pos, b, a, mem, rc) + } + v.resetArgs() + v.AddArgs(allResults...) + v.AddArg(mem) + for _, a := range oldArgs { + if a.Uses == 0 { + if x.debug > 1 { + x.Printf("...marking %v unused\n", a.LongString()) + } + x.invalidateRecursively(a) + } + } + v.Type = types.NewResults(append(abi.RegisterTypes(aux.abiInfo.OutParams()), types.TypeMem)) + return +} + +func (x *expandState) rewriteCallArgs(v *Value, firstArg int) { + if x.debug > 1 { + x.indent(3) + defer x.indent(-3) + x.Printf("rewriteCallArgs(%s; %d)\n", v.LongString(), firstArg) + } + // Thread the stores on the memory arg + aux := v.Aux.(*AuxCall) + m0 := v.MemoryArg() + mem := m0 + allResults := []*Value{} + oldArgs := []*Value{} + argsWithoutMem := v.Args[firstArg : len(v.Args)-1] // Also strip closure/interface Op-specific args + + sp := x.sp + if v.Op == OpTailLECall { + // For tail call, we unwind the frame before the call so we'll use the caller's + // SP. + sp = x.f.Entry.NewValue1(src.NoXPos, OpGetCallerSP, x.typs.Uintptr, mem) + } + + for i, a := range argsWithoutMem { // skip leading non-parameter SSA Args and trailing mem SSA Arg. + oldArgs = append(oldArgs, a) + auxI := int64(i) + aRegs := aux.RegsOfArg(auxI) + aType := aux.TypeOfArg(auxI) + + if a.Op == OpDereference { + a.Op = OpLoad + } + var rc registerCursor + var result *[]*Value + var aOffset int64 + if len(aRegs) > 0 { + result = &allResults + } else { + aOffset = aux.OffsetOfArg(auxI) + } + if v.Op == OpTailLECall && a.Op == OpArg && a.AuxInt == 0 { + // It's common for a tail call passing the same arguments (e.g. method wrapper), + // so this would be a self copy. Detect this and optimize it out. + n := a.Aux.(*ir.Name) + if n.Class == ir.PPARAM && n.FrameOffset()+x.f.Config.ctxt.Arch.FixedFrameSize == aOffset { + continue + } + } + if x.debug > 1 { + x.Printf("...storeArg %s, %v, %d\n", a.LongString(), aType, aOffset) + } + + rc.init(aRegs, aux.abiInfo, result, sp, aOffset) + mem = x.decomposeAsNecessary(v.Pos, v.Block, a, mem, rc) + } + var preArgStore [2]*Value + preArgs := append(preArgStore[:0], v.Args[0:firstArg]...) + v.resetArgs() + v.AddArgs(preArgs...) + v.AddArgs(allResults...) + v.AddArg(mem) + for _, a := range oldArgs { + if a.Uses == 0 { + x.invalidateRecursively(a) + } + } + + return +} + +func (x *expandState) decomposePair(pos src.XPos, b *Block, a, mem *Value, t0, t1 *types.Type, o0, o1 Op, rc *registerCursor) *Value { + e := b.NewValue1(pos, o0, t0, a) + pos = pos.WithNotStmt() + mem = x.decomposeAsNecessary(pos, b, e, mem, rc.next(t0)) + e = b.NewValue1(pos, o1, t1, a) + mem = x.decomposeAsNecessary(pos, b, e, mem, rc.next(t1)) + return mem +} + +func (x *expandState) decomposeOne(pos src.XPos, b *Block, a, mem *Value, t0 *types.Type, o0 Op, rc *registerCursor) *Value { + e := b.NewValue1(pos, o0, t0, a) + pos = pos.WithNotStmt() + mem = x.decomposeAsNecessary(pos, b, e, mem, rc.next(t0)) + return mem +} + +// decomposeAsNecessary converts a value (perhaps an aggregate) passed to a call or returned by a function, +// into the appropriate sequence of stores and register assignments to transmit that value in a given ABI, and +// returns the current memory after this convert/rewrite (it may be the input memory, perhaps stores were needed.) +// 'pos' is the source position all this is tied to +// 'b' is the enclosing block +// 'a' is the value to decompose +// 'm0' is the input memory arg used for the first store (or returned if there are no stores) +// 'rc' is a registerCursor which identifies the register/memory destination for the value +func (x *expandState) decomposeAsNecessary(pos src.XPos, b *Block, a, m0 *Value, rc registerCursor) *Value { + if x.debug > 1 { + x.indent(3) + defer x.indent(-3) + } + at := a.Type + if at.Size() == 0 { + return m0 + } + if a.Op == OpDereference { + a.Op = OpLoad // For purposes of parameter passing expansion, a Dereference is a Load. + } + + if !rc.hasRegs() && !CanSSA(at) { + dst := x.offsetFrom(b, rc.storeDest, rc.storeOffset, types.NewPtr(at)) + if x.debug > 1 { + x.Printf("...recur store %s at %s\n", a.LongString(), dst.LongString()) + } + if a.Op == OpLoad { + m0 = b.NewValue3A(pos, OpMove, types.TypeMem, at, dst, a.Args[0], m0) + m0.AuxInt = at.Size() + return m0 + } else { + panic(fmt.Errorf("Store of not a load")) + } + } + + mem := m0 + switch at.Kind() { + case types.TARRAY: + et := at.Elem() + for i := int64(0); i < at.NumElem(); i++ { + e := b.NewValue1I(pos, OpArraySelect, et, i, a) + pos = pos.WithNotStmt() + mem = x.decomposeAsNecessary(pos, b, e, mem, rc.next(et)) + } + return mem + + case types.TSTRUCT: + for i := 0; i < at.NumFields(); i++ { + et := at.Field(i).Type // might need to read offsets from the fields + e := b.NewValue1I(pos, OpStructSelect, et, int64(i), a) + pos = pos.WithNotStmt() + if x.debug > 1 { + x.Printf("...recur decompose %s, %v\n", e.LongString(), et) + } + mem = x.decomposeAsNecessary(pos, b, e, mem, rc.next(et)) + } + return mem + + case types.TSLICE: + mem = x.decomposeOne(pos, b, a, mem, at.Elem().PtrTo(), OpSlicePtr, &rc) + pos = pos.WithNotStmt() + mem = x.decomposeOne(pos, b, a, mem, x.typs.Int, OpSliceLen, &rc) + return x.decomposeOne(pos, b, a, mem, x.typs.Int, OpSliceCap, &rc) + + case types.TSTRING: + return x.decomposePair(pos, b, a, mem, x.typs.BytePtr, x.typs.Int, OpStringPtr, OpStringLen, &rc) + + case types.TINTER: + mem = x.decomposeOne(pos, b, a, mem, x.typs.Uintptr, OpITab, &rc) + pos = pos.WithNotStmt() + // Immediate interfaces cause so many headaches. + if a.Op == OpIMake { + data := a.Args[1] + for data.Op == OpStructMake1 || data.Op == OpArrayMake1 { + data = data.Args[0] + } + return x.decomposeAsNecessary(pos, b, data, mem, rc.next(data.Type)) + } + return x.decomposeOne(pos, b, a, mem, x.typs.BytePtr, OpIData, &rc) + + case types.TCOMPLEX64: + return x.decomposePair(pos, b, a, mem, x.typs.Float32, x.typs.Float32, OpComplexReal, OpComplexImag, &rc) + + case types.TCOMPLEX128: + return x.decomposePair(pos, b, a, mem, x.typs.Float64, x.typs.Float64, OpComplexReal, OpComplexImag, &rc) + + case types.TINT64: + if at.Size() > x.regSize { + return x.decomposePair(pos, b, a, mem, x.firstType, x.secondType, x.firstOp, x.secondOp, &rc) + } + case types.TUINT64: + if at.Size() > x.regSize { + return x.decomposePair(pos, b, a, mem, x.typs.UInt32, x.typs.UInt32, x.firstOp, x.secondOp, &rc) + } + } + + // An atomic type, either record the register or store it and update the memory. + + if rc.hasRegs() { + if x.debug > 1 { + x.Printf("...recur addArg %s\n", a.LongString()) + } + rc.addArg(a) + } else { + dst := x.offsetFrom(b, rc.storeDest, rc.storeOffset, types.NewPtr(at)) + if x.debug > 1 { + x.Printf("...recur store %s at %s\n", a.LongString(), dst.LongString()) + } + mem = b.NewValue3A(pos, OpStore, types.TypeMem, at, dst, a, mem) + } + + return mem +} + +// Convert scalar OpArg into the proper OpWhateverArg instruction +// Convert scalar OpSelectN into perhaps-differently-indexed OpSelectN +// Convert aggregate OpArg into Make of its parts (which are eventually scalars) +// Convert aggregate OpSelectN into Make of its parts (which are eventually scalars) +// Returns the converted value. +// +// - "pos" the position for any generated instructions +// - "b" the block for any generated instructions +// - "container" the outermost OpArg/OpSelectN +// - "a" the instruction to overwrite, if any (only the outermost caller) +// - "m0" the memory arg for any loads that are necessary +// - "at" the type of the Arg/part +// - "rc" the register/memory cursor locating the various parts of the Arg. +func (x *expandState) rewriteSelectOrArg(pos src.XPos, b *Block, container, a, m0 *Value, at *types.Type, rc registerCursor) *Value { + + if at == types.TypeMem { + a.copyOf(m0) + return a + } + + makeOf := func(a *Value, op Op, args []*Value) *Value { + if a == nil { + a = b.NewValue0(pos, op, at) + a.AddArgs(args...) + } else { + a.resetArgs() + a.Aux, a.AuxInt = nil, 0 + a.Pos, a.Op, a.Type = pos, op, at + a.AddArgs(args...) + } + return a + } + + if at.Size() == 0 { + // For consistency, create these values even though they'll ultimately be unused + if at.IsArray() { + return makeOf(a, OpArrayMake0, nil) + } + if at.IsStruct() { + return makeOf(a, OpStructMake0, nil) + } + return a + } + + sk := selKey{from: container, size: 0, offsetOrIndex: rc.storeOffset, typ: at} + dupe := x.commonSelectors[sk] + if dupe != nil { + if a == nil { + return dupe + } + a.copyOf(dupe) + return a + } + + var argStore [10]*Value + args := argStore[:0] + + addArg := func(a0 *Value) { + if a0 == nil { + as := "" + if a != nil { + as = a.LongString() + } + panic(fmt.Errorf("a0 should not be nil, a=%v, container=%v, at=%v", as, container.LongString(), at)) + } + args = append(args, a0) + } + + switch at.Kind() { + case types.TARRAY: + et := at.Elem() + for i := int64(0); i < at.NumElem(); i++ { + e := x.rewriteSelectOrArg(pos, b, container, nil, m0, et, rc.next(et)) + addArg(e) + } + a = makeOf(a, OpArrayMake1, args) + x.commonSelectors[sk] = a + return a + + case types.TSTRUCT: + // Assume ssagen/ssa.go (in buildssa) spills large aggregates so they won't appear here. + for i := 0; i < at.NumFields(); i++ { + et := at.Field(i).Type + e := x.rewriteSelectOrArg(pos, b, container, nil, m0, et, rc.next(et)) + if e == nil { + panic(fmt.Errorf("nil e, et=%v, et.Size()=%d, i=%d", et, et.Size(), i)) + } + addArg(e) + pos = pos.WithNotStmt() + } + if at.NumFields() > 4 { + panic(fmt.Errorf("Too many fields (%d, %d bytes), container=%s", at.NumFields(), at.Size(), container.LongString())) + } + a = makeOf(a, StructMakeOp(at.NumFields()), args) + x.commonSelectors[sk] = a + return a + + case types.TSLICE: + addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, at.Elem().PtrTo(), rc.next(x.typs.BytePtr))) + pos = pos.WithNotStmt() + addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Int, rc.next(x.typs.Int))) + addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Int, rc.next(x.typs.Int))) + a = makeOf(a, OpSliceMake, args) + x.commonSelectors[sk] = a + return a + + case types.TSTRING: + addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.BytePtr, rc.next(x.typs.BytePtr))) + pos = pos.WithNotStmt() + addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Int, rc.next(x.typs.Int))) + a = makeOf(a, OpStringMake, args) + x.commonSelectors[sk] = a + return a + + case types.TINTER: + addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Uintptr, rc.next(x.typs.Uintptr))) + pos = pos.WithNotStmt() + addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.BytePtr, rc.next(x.typs.BytePtr))) + a = makeOf(a, OpIMake, args) + x.commonSelectors[sk] = a + return a + + case types.TCOMPLEX64: + addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Float32, rc.next(x.typs.Float32))) + pos = pos.WithNotStmt() + addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Float32, rc.next(x.typs.Float32))) + a = makeOf(a, OpComplexMake, args) + x.commonSelectors[sk] = a + return a + + case types.TCOMPLEX128: + addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Float64, rc.next(x.typs.Float64))) + pos = pos.WithNotStmt() + addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Float64, rc.next(x.typs.Float64))) + a = makeOf(a, OpComplexMake, args) + x.commonSelectors[sk] = a + return a + + case types.TINT64: + if at.Size() > x.regSize { + addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.firstType, rc.next(x.firstType))) + pos = pos.WithNotStmt() + addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.secondType, rc.next(x.secondType))) + if !x.f.Config.BigEndian { + // Int64Make args are big, little + args[0], args[1] = args[1], args[0] + } + a = makeOf(a, OpInt64Make, args) + x.commonSelectors[sk] = a + return a + } + case types.TUINT64: + if at.Size() > x.regSize { + addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.UInt32, rc.next(x.typs.UInt32))) + pos = pos.WithNotStmt() + addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.UInt32, rc.next(x.typs.UInt32))) + if !x.f.Config.BigEndian { + // Int64Make args are big, little + args[0], args[1] = args[1], args[0] + } + a = makeOf(a, OpInt64Make, args) + x.commonSelectors[sk] = a + return a + } + } + + // An atomic type, either record the register or store it and update the memory. + + // Depending on the container Op, the leaves are either OpSelectN or OpArg{Int,Float}Reg + + if container.Op == OpArg { + if rc.hasRegs() { + op, i := rc.ArgOpAndRegisterFor() + name := container.Aux.(*ir.Name) + a = makeOf(a, op, nil) + a.AuxInt = i + a.Aux = &AuxNameOffset{name, rc.storeOffset} + } else { + key := selKey{container, rc.storeOffset, at.Size(), at} + w := x.commonArgs[key] + if w != nil && w.Uses != 0 { + if a == nil { + a = w + } else { + a.copyOf(w) + } + } else { + if a == nil { + aux := container.Aux + auxInt := container.AuxInt + rc.storeOffset + a = container.Block.NewValue0IA(container.Pos, OpArg, at, auxInt, aux) + } else { + // do nothing, the original should be okay. + } + x.commonArgs[key] = a + } + } + } else if container.Op == OpSelectN { + call := container.Args[0] + aux := call.Aux.(*AuxCall) + which := container.AuxInt + + if at == types.TypeMem { + if a != m0 || a != x.memForCall[call.ID] { + panic(fmt.Errorf("Memories %s, %s, and %s should all be equal after %s", a.LongString(), m0.LongString(), x.memForCall[call.ID], call.LongString())) + } + } else if rc.hasRegs() { + firstReg := uint32(0) + for i := 0; i < int(which); i++ { + firstReg += uint32(len(aux.abiInfo.OutParam(i).Registers)) + } + reg := int64(rc.nextSlice + Abi1RO(firstReg)) + a = makeOf(a, OpSelectN, []*Value{call}) + a.AuxInt = reg + } else { + off := x.offsetFrom(x.f.Entry, x.sp, rc.storeOffset+aux.OffsetOfResult(which), types.NewPtr(at)) + a = makeOf(a, OpLoad, []*Value{off, m0}) + } + + } else { + panic(fmt.Errorf("Expected container OpArg or OpSelectN, saw %v instead", container.LongString())) + } + + x.commonSelectors[sk] = a + return a +} + +// rewriteWideSelectToStores handles the case of a SelectN'd result from a function call that is too large for SSA, +// but is transferred in registers. In this case the register cursor tracks both operands; the register sources and +// the memory destinations. +// This returns the memory flowing out of the last store +func (x *expandState) rewriteWideSelectToStores(pos src.XPos, b *Block, container, m0 *Value, at *types.Type, rc registerCursor) *Value { + + if at.Size() == 0 { + return m0 + } + + switch at.Kind() { + case types.TARRAY: + et := at.Elem() + for i := int64(0); i < at.NumElem(); i++ { + m0 = x.rewriteWideSelectToStores(pos, b, container, m0, et, rc.next(et)) + } + return m0 + + case types.TSTRUCT: + // Assume ssagen/ssa.go (in buildssa) spills large aggregates so they won't appear here. + for i := 0; i < at.NumFields(); i++ { + et := at.Field(i).Type + m0 = x.rewriteWideSelectToStores(pos, b, container, m0, et, rc.next(et)) + pos = pos.WithNotStmt() + } + return m0 + + case types.TSLICE: + m0 = x.rewriteWideSelectToStores(pos, b, container, m0, at.Elem().PtrTo(), rc.next(x.typs.BytePtr)) + pos = pos.WithNotStmt() + m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Int, rc.next(x.typs.Int)) + m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Int, rc.next(x.typs.Int)) + return m0 + + case types.TSTRING: + m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.BytePtr, rc.next(x.typs.BytePtr)) + pos = pos.WithNotStmt() + m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Int, rc.next(x.typs.Int)) + return m0 + + case types.TINTER: + m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Uintptr, rc.next(x.typs.Uintptr)) + pos = pos.WithNotStmt() + m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.BytePtr, rc.next(x.typs.BytePtr)) + return m0 + + case types.TCOMPLEX64: + m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Float32, rc.next(x.typs.Float32)) + pos = pos.WithNotStmt() + m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Float32, rc.next(x.typs.Float32)) + return m0 + + case types.TCOMPLEX128: + m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Float64, rc.next(x.typs.Float64)) + pos = pos.WithNotStmt() + m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Float64, rc.next(x.typs.Float64)) + return m0 + + case types.TINT64: + if at.Size() > x.regSize { + m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.firstType, rc.next(x.firstType)) + pos = pos.WithNotStmt() + m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.secondType, rc.next(x.secondType)) + return m0 + } + case types.TUINT64: + if at.Size() > x.regSize { + m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.UInt32, rc.next(x.typs.UInt32)) + pos = pos.WithNotStmt() + m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.UInt32, rc.next(x.typs.UInt32)) + return m0 + } + } + + // TODO could change treatment of too-large OpArg, would deal with it here. + if container.Op == OpSelectN { + call := container.Args[0] + aux := call.Aux.(*AuxCall) + which := container.AuxInt + + if rc.hasRegs() { + firstReg := uint32(0) + for i := 0; i < int(which); i++ { + firstReg += uint32(len(aux.abiInfo.OutParam(i).Registers)) + } + reg := int64(rc.nextSlice + Abi1RO(firstReg)) + a := b.NewValue1I(pos, OpSelectN, at, reg, call) + dst := x.offsetFrom(b, rc.storeDest, rc.storeOffset, types.NewPtr(at)) + m0 = b.NewValue3A(pos, OpStore, types.TypeMem, at, dst, a, m0) + } else { + panic(fmt.Errorf("Expected rc to have registers")) + } + } else { + panic(fmt.Errorf("Expected container OpSelectN, saw %v instead", container.LongString())) + } + return m0 +} + +func isBlockMultiValueExit(b *Block) bool { + return (b.Kind == BlockRet || b.Kind == BlockRetJmp) && b.Controls[0] != nil && b.Controls[0].Op == OpMakeResult +} + +type Abi1RO uint8 // An offset within a parameter's slice of register indices, for abi1. + +// A registerCursor tracks which register is used for an Arg or regValues, or a piece of such. +type registerCursor struct { + storeDest *Value // if there are no register targets, then this is the base of the store. + storeOffset int64 + regs []abi.RegIndex // the registers available for this Arg/result (which is all in registers or not at all) + nextSlice Abi1RO // the next register/register-slice offset + config *abi.ABIConfig + regValues *[]*Value // values assigned to registers accumulate here +} + +func (c *registerCursor) String() string { + dest := "" + if c.storeDest != nil { + dest = fmt.Sprintf("%s+%d", c.storeDest.String(), c.storeOffset) + } + regs := "" + if c.regValues != nil { + regs = "" + for i, x := range *c.regValues { + if i > 0 { + regs = regs + "; " + } + regs = regs + x.LongString() + } + } + + // not printing the config because that has not been useful + return fmt.Sprintf("RCSR{storeDest=%v, regsLen=%d, nextSlice=%d, regValues=[%s]}", dest, len(c.regs), c.nextSlice, regs) +} + +// next effectively post-increments the register cursor; the receiver is advanced, +// the (aligned) old value is returned. +func (c *registerCursor) next(t *types.Type) registerCursor { + c.storeOffset = types.RoundUp(c.storeOffset, t.Alignment()) + rc := *c + c.storeOffset = types.RoundUp(c.storeOffset+t.Size(), t.Alignment()) + if int(c.nextSlice) < len(c.regs) { + w := c.config.NumParamRegs(t) + c.nextSlice += Abi1RO(w) + } + return rc +} + +// plus returns a register cursor offset from the original, without modifying the original. +func (c *registerCursor) plus(regWidth Abi1RO) registerCursor { + rc := *c + rc.nextSlice += regWidth + return rc +} + +// at returns the register cursor for component i of t, where the first +// component is numbered 0. +func (c *registerCursor) at(t *types.Type, i int) registerCursor { + rc := *c + if i == 0 || len(c.regs) == 0 { + return rc + } + if t.IsArray() { + w := c.config.NumParamRegs(t.Elem()) + rc.nextSlice += Abi1RO(i * w) + return rc + } + if t.IsStruct() { + for j := 0; j < i; j++ { + rc.next(t.FieldType(j)) + } + return rc + } + panic("Haven't implemented this case yet, do I need to?") +} + +func (c *registerCursor) init(regs []abi.RegIndex, info *abi.ABIParamResultInfo, result *[]*Value, storeDest *Value, storeOffset int64) { + c.regs = regs + c.nextSlice = 0 + c.storeOffset = storeOffset + c.storeDest = storeDest + c.config = info.Config() + c.regValues = result +} + +func (c *registerCursor) addArg(v *Value) { + *c.regValues = append(*c.regValues, v) +} + +func (c *registerCursor) hasRegs() bool { + return len(c.regs) > 0 +} + +func (c *registerCursor) ArgOpAndRegisterFor() (Op, int64) { + r := c.regs[c.nextSlice] + return ArgOpAndRegisterFor(r, c.config) +} + +// ArgOpAndRegisterFor converts an abi register index into an ssa Op and corresponding +// arg register index. +func ArgOpAndRegisterFor(r abi.RegIndex, abiConfig *abi.ABIConfig) (Op, int64) { + i := abiConfig.FloatIndexFor(r) + if i >= 0 { // float PR + return OpArgFloatReg, i + } + return OpArgIntReg, int64(r) +} + +type selKey struct { + from *Value // what is selected from + offsetOrIndex int64 // whatever is appropriate for the selector + size int64 + typ *types.Type +} + +type expandState struct { + f *Func + debug int // odd values log lost statement markers, so likely settings are 1 (stmts), 2 (expansion), and 3 (both) + regSize int64 + sp *Value + typs *Types + + firstOp Op // for 64-bit integers on 32-bit machines, first word in memory + secondOp Op // for 64-bit integers on 32-bit machines, second word in memory + firstType *types.Type // first half type, for Int64 + secondType *types.Type // second half type, for Int64 + + wideSelects map[*Value]*Value // Selects that are not SSA-able, mapped to consuming stores. + commonSelectors map[selKey]*Value // used to de-dupe selectors + commonArgs map[selKey]*Value // used to de-dupe OpArg/OpArgIntReg/OpArgFloatReg + memForCall map[ID]*Value // For a call, need to know the unique selector that gets the mem. + indentLevel int // Indentation for debugging recursion +} + +// intPairTypes returns the pair of 32-bit int types needed to encode a 64-bit integer type on a target +// that has no 64-bit integer registers. +func (x *expandState) intPairTypes(et types.Kind) (tHi, tLo *types.Type) { + tHi = x.typs.UInt32 + if et == types.TINT64 { + tHi = x.typs.Int32 + } + tLo = x.typs.UInt32 + return +} + +// offsetFrom creates an offset from a pointer, simplifying chained offsets and offsets from SP +func (x *expandState) offsetFrom(b *Block, from *Value, offset int64, pt *types.Type) *Value { + ft := from.Type + if offset == 0 { + if ft == pt { + return from + } + // This captures common, (apparently) safe cases. The unsafe cases involve ft == uintptr + if (ft.IsPtr() || ft.IsUnsafePtr()) && pt.IsPtr() { + return from + } + } + // Simplify, canonicalize + for from.Op == OpOffPtr { + offset += from.AuxInt + from = from.Args[0] + } + if from == x.sp { + return x.f.ConstOffPtrSP(pt, offset, x.sp) + } + return b.NewValue1I(from.Pos.WithNotStmt(), OpOffPtr, pt, offset, from) +} + +func (x *expandState) regWidth(t *types.Type) Abi1RO { + return Abi1RO(x.f.ABI1.NumParamRegs(t)) +} + +// regOffset returns the register offset of the i'th element of type t +func (x *expandState) regOffset(t *types.Type, i int) Abi1RO { + // TODO maybe cache this in a map if profiling recommends. + if i == 0 { + return 0 + } + if t.IsArray() { + return Abi1RO(i) * x.regWidth(t.Elem()) + } + if t.IsStruct() { + k := Abi1RO(0) + for j := 0; j < i; j++ { + k += x.regWidth(t.FieldType(j)) + } + return k + } + panic("Haven't implemented this case yet, do I need to?") +} + +// prAssignForArg returns the ABIParamAssignment for v, assumed to be an OpArg. +func (x *expandState) prAssignForArg(v *Value) *abi.ABIParamAssignment { + if v.Op != OpArg { + panic(fmt.Errorf("Wanted OpArg, instead saw %s", v.LongString())) + } + return ParamAssignmentForArgName(x.f, v.Aux.(*ir.Name)) +} + +// ParamAssignmentForArgName returns the ABIParamAssignment for f's arg with matching name. +func ParamAssignmentForArgName(f *Func, name *ir.Name) *abi.ABIParamAssignment { + abiInfo := f.OwnAux.abiInfo + ip := abiInfo.InParams() + for i, a := range ip { + if a.Name == name { + return &ip[i] + } + } + panic(fmt.Errorf("Did not match param %v in prInfo %+v", name, abiInfo.InParams())) +} + +// indent increments (or decrements) the indentation. +func (x *expandState) indent(n int) { + x.indentLevel += n +} + +// Printf does an indented fmt.Printf on the format and args. +func (x *expandState) Printf(format string, a ...interface{}) (n int, err error) { + if x.indentLevel > 0 { + fmt.Printf("%[1]*s", x.indentLevel, "") + } + return fmt.Printf(format, a...) +} + +func (x *expandState) invalidateRecursively(a *Value) { + var s string + if x.debug > 0 { + plus := " " + if a.Pos.IsStmt() == src.PosIsStmt { + plus = " +" + } + s = a.String() + plus + a.Pos.LineNumber() + " " + a.LongString() + if x.debug > 1 { + x.Printf("...marking %v unused\n", s) + } + } + lost := a.invalidateRecursively() + if x.debug&1 != 0 && lost { // For odd values of x.debug, do this. + x.Printf("Lost statement marker in %s on former %s\n", base.Ctxt.Pkgpath+"."+x.f.Name, s) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/export_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/export_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b2c4b1997f2ba0bfd5d21a1ba1004f578a3d919e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/export_test.go @@ -0,0 +1,120 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "testing" + + "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/obj/arm64" + "cmd/internal/obj/s390x" + "cmd/internal/obj/x86" + "cmd/internal/src" +) + +var CheckFunc = checkFunc +var Opt = opt +var Deadcode = deadcode +var Copyelim = copyelim + +var testCtxts = map[string]*obj.Link{ + "amd64": obj.Linknew(&x86.Linkamd64), + "s390x": obj.Linknew(&s390x.Links390x), + "arm64": obj.Linknew(&arm64.Linkarm64), +} + +func testConfig(tb testing.TB) *Conf { return testConfigArch(tb, "amd64") } +func testConfigS390X(tb testing.TB) *Conf { return testConfigArch(tb, "s390x") } +func testConfigARM64(tb testing.TB) *Conf { return testConfigArch(tb, "arm64") } + +func testConfigArch(tb testing.TB, arch string) *Conf { + ctxt, ok := testCtxts[arch] + if !ok { + tb.Fatalf("unknown arch %s", arch) + } + if ctxt.Arch.PtrSize != 8 { + tb.Fatal("testTypes is 64-bit only") + } + c := &Conf{ + config: NewConfig(arch, testTypes, ctxt, true, false), + tb: tb, + } + return c +} + +type Conf struct { + config *Config + tb testing.TB + fe Frontend +} + +func (c *Conf) Frontend() Frontend { + if c.fe == nil { + pkg := types.NewPkg("my/import/path", "path") + fn := ir.NewFunc(src.NoXPos, src.NoXPos, pkg.Lookup("function"), types.NewSignature(nil, nil, nil)) + fn.DeclareParams(true) + fn.LSym = &obj.LSym{Name: "my/import/path.function"} + + c.fe = TestFrontend{ + t: c.tb, + ctxt: c.config.ctxt, + f: fn, + } + } + return c.fe +} + +func (c *Conf) Temp(typ *types.Type) *ir.Name { + n := ir.NewNameAt(src.NoXPos, &types.Sym{Name: "aFakeAuto"}, typ) + n.Class = ir.PAUTO + return n +} + +// TestFrontend is a test-only frontend. +// It assumes 64 bit integers and pointers. +type TestFrontend struct { + t testing.TB + ctxt *obj.Link + f *ir.Func +} + +func (TestFrontend) StringData(s string) *obj.LSym { + return nil +} +func (d TestFrontend) SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot { + return LocalSlot{N: parent.N, Type: t, Off: offset} +} +func (d TestFrontend) Syslook(s string) *obj.LSym { + return d.ctxt.Lookup(s) +} +func (TestFrontend) UseWriteBarrier() bool { + return true // only writebarrier_test cares +} + +func (d TestFrontend) Logf(msg string, args ...interface{}) { d.t.Logf(msg, args...) } +func (d TestFrontend) Log() bool { return true } + +func (d TestFrontend) Fatalf(_ src.XPos, msg string, args ...interface{}) { d.t.Fatalf(msg, args...) } +func (d TestFrontend) Warnl(_ src.XPos, msg string, args ...interface{}) { d.t.Logf(msg, args...) } +func (d TestFrontend) Debug_checknil() bool { return false } + +func (d TestFrontend) Func() *ir.Func { + return d.f +} + +var testTypes Types + +func init() { + // TODO(mdempsky): Push into types.InitUniverse or typecheck.InitUniverse. + types.PtrSize = 8 + types.RegSize = 8 + types.MaxWidth = 1 << 50 + + typecheck.InitUniverse() + testTypes.SetTypPtrs() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/flagalloc.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/flagalloc.go new file mode 100644 index 0000000000000000000000000000000000000000..cf2c9a0023f925a003c6bdac4ff7f56702204a89 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/flagalloc.go @@ -0,0 +1,270 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// flagalloc allocates the flag register among all the flag-generating +// instructions. Flag values are recomputed if they need to be +// spilled/restored. +func flagalloc(f *Func) { + // Compute the in-register flag value we want at the end of + // each block. This is basically a best-effort live variable + // analysis, so it can be much simpler than a full analysis. + end := f.Cache.allocValueSlice(f.NumBlocks()) + defer f.Cache.freeValueSlice(end) + po := f.postorder() + for n := 0; n < 2; n++ { + for _, b := range po { + // Walk values backwards to figure out what flag + // value we want in the flag register at the start + // of the block. + var flag *Value + for _, c := range b.ControlValues() { + if c.Type.IsFlags() { + if flag != nil { + panic("cannot have multiple controls using flags") + } + flag = c + } + } + if flag == nil { + flag = end[b.ID] + } + for j := len(b.Values) - 1; j >= 0; j-- { + v := b.Values[j] + if v == flag { + flag = nil + } + if v.clobbersFlags() { + flag = nil + } + for _, a := range v.Args { + if a.Type.IsFlags() { + flag = a + } + } + } + if flag != nil { + for _, e := range b.Preds { + p := e.b + end[p.ID] = flag + } + } + } + } + + // For blocks which have a flags control value, that's the only value + // we can leave in the flags register at the end of the block. (There + // is no place to put a flag regeneration instruction.) + for _, b := range f.Blocks { + if b.Kind == BlockDefer { + // Defer blocks internally use/clobber the flags value. + end[b.ID] = nil + continue + } + for _, v := range b.ControlValues() { + if v.Type.IsFlags() && end[b.ID] != v { + end[b.ID] = nil + } + } + } + + // Compute which flags values will need to be spilled. + spill := map[ID]bool{} + for _, b := range f.Blocks { + var flag *Value + if len(b.Preds) > 0 { + flag = end[b.Preds[0].b.ID] + } + for _, v := range b.Values { + for _, a := range v.Args { + if !a.Type.IsFlags() { + continue + } + if a == flag { + continue + } + // a will need to be restored here. + spill[a.ID] = true + flag = a + } + if v.clobbersFlags() { + flag = nil + } + if v.Type.IsFlags() { + flag = v + } + } + for _, v := range b.ControlValues() { + if v != flag && v.Type.IsFlags() { + spill[v.ID] = true + } + } + if v := end[b.ID]; v != nil && v != flag { + spill[v.ID] = true + } + } + + // Add flag spill and recomputation where they are needed. + var remove []*Value // values that should be checked for possible removal + var oldSched []*Value + for _, b := range f.Blocks { + oldSched = append(oldSched[:0], b.Values...) + b.Values = b.Values[:0] + // The current live flag value (the pre-flagalloc copy). + var flag *Value + if len(b.Preds) > 0 { + flag = end[b.Preds[0].b.ID] + // Note: the following condition depends on the lack of critical edges. + for _, e := range b.Preds[1:] { + p := e.b + if end[p.ID] != flag { + f.Fatalf("live flag in %s's predecessors not consistent", b) + } + } + } + for _, v := range oldSched { + if v.Op == OpPhi && v.Type.IsFlags() { + f.Fatalf("phi of flags not supported: %s", v.LongString()) + } + + // If v will be spilled, and v uses memory, then we must split it + // into a load + a flag generator. + if spill[v.ID] && v.MemoryArg() != nil { + remove = append(remove, v) + if !f.Config.splitLoad(v) { + f.Fatalf("can't split flag generator: %s", v.LongString()) + } + } + + // Make sure any flag arg of v is in the flags register. + // If not, recompute it. + for i, a := range v.Args { + if !a.Type.IsFlags() { + continue + } + if a == flag { + continue + } + // Recalculate a + c := copyFlags(a, b) + // Update v. + v.SetArg(i, c) + // Remember the most-recently computed flag value. + flag = a + } + // Issue v. + b.Values = append(b.Values, v) + if v.clobbersFlags() { + flag = nil + } + if v.Type.IsFlags() { + flag = v + } + } + for i, v := range b.ControlValues() { + if v != flag && v.Type.IsFlags() { + // Recalculate control value. + remove = append(remove, v) + c := copyFlags(v, b) + b.ReplaceControl(i, c) + flag = v + } + } + if v := end[b.ID]; v != nil && v != flag { + // Need to reissue flag generator for use by + // subsequent blocks. + remove = append(remove, v) + copyFlags(v, b) + // Note: this flag generator is not properly linked up + // with the flag users. This breaks the SSA representation. + // We could fix up the users with another pass, but for now + // we'll just leave it. (Regalloc has the same issue for + // standard regs, and it runs next.) + // For this reason, take care not to add this flag + // generator to the remove list. + } + } + + // Save live flag state for later. + for _, b := range f.Blocks { + b.FlagsLiveAtEnd = end[b.ID] != nil + } + + // Remove any now-dead values. + // The number of values to remove is likely small, + // and removing them requires processing all values in a block, + // so minimize the number of blocks that we touch. + + // Shrink remove to contain only dead values, and clobber those dead values. + for i := 0; i < len(remove); i++ { + v := remove[i] + if v.Uses == 0 { + v.reset(OpInvalid) + continue + } + // Remove v. + last := len(remove) - 1 + remove[i] = remove[last] + remove[last] = nil + remove = remove[:last] + i-- // reprocess value at i + } + + if len(remove) == 0 { + return + } + + removeBlocks := f.newSparseSet(f.NumBlocks()) + defer f.retSparseSet(removeBlocks) + for _, v := range remove { + removeBlocks.add(v.Block.ID) + } + + // Process affected blocks, preserving value order. + for _, b := range f.Blocks { + if !removeBlocks.contains(b.ID) { + continue + } + i := 0 + for j := 0; j < len(b.Values); j++ { + v := b.Values[j] + if v.Op == OpInvalid { + continue + } + b.Values[i] = v + i++ + } + b.truncateValues(i) + } +} + +func (v *Value) clobbersFlags() bool { + if opcodeTable[v.Op].clobberFlags { + return true + } + if v.Type.IsTuple() && (v.Type.FieldType(0).IsFlags() || v.Type.FieldType(1).IsFlags()) { + // This case handles the possibility where a flag value is generated but never used. + // In that case, there's no corresponding Select to overwrite the flags value, + // so we must consider flags clobbered by the tuple-generating instruction. + return true + } + return false +} + +// copyFlags copies v (flag generator) into b, returns the copy. +// If v's arg is also flags, copy recursively. +func copyFlags(v *Value, b *Block) *Value { + flagsArgs := make(map[int]*Value) + for i, a := range v.Args { + if a.Type.IsFlags() || a.Type.IsTuple() { + flagsArgs[i] = copyFlags(a, b) + } + } + c := v.copyInto(b) + for i, a := range flagsArgs { + c.SetArg(i, a) + } + return c +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/flags_amd64_test.s b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/flags_amd64_test.s new file mode 100644 index 0000000000000000000000000000000000000000..7402f6badb1d8254bd879a4d7e92118860ca2b81 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/flags_amd64_test.s @@ -0,0 +1,29 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·asmAddFlags(SB),NOSPLIT,$0-24 + MOVQ x+0(FP), AX + ADDQ y+8(FP), AX + PUSHFQ + POPQ AX + MOVQ AX, ret+16(FP) + RET + +TEXT ·asmSubFlags(SB),NOSPLIT,$0-24 + MOVQ x+0(FP), AX + SUBQ y+8(FP), AX + PUSHFQ + POPQ AX + MOVQ AX, ret+16(FP) + RET + +TEXT ·asmAndFlags(SB),NOSPLIT,$0-24 + MOVQ x+0(FP), AX + ANDQ y+8(FP), AX + PUSHFQ + POPQ AX + MOVQ AX, ret+16(FP) + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/flags_arm64_test.s b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/flags_arm64_test.s new file mode 100644 index 0000000000000000000000000000000000000000..639d7e3aedc299ece9b9a0a27debc8b566eee922 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/flags_arm64_test.s @@ -0,0 +1,30 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·asmAddFlags(SB),NOSPLIT,$0-24 + MOVD x+0(FP), R0 + MOVD y+8(FP), R1 + CMN R0, R1 + WORD $0xd53b4200 // MOVD NZCV, R0 + MOVD R0, ret+16(FP) + RET + +TEXT ·asmSubFlags(SB),NOSPLIT,$0-24 + MOVD x+0(FP), R0 + MOVD y+8(FP), R1 + CMP R1, R0 + WORD $0xd53b4200 // MOVD NZCV, R0 + MOVD R0, ret+16(FP) + RET + +TEXT ·asmAndFlags(SB),NOSPLIT,$0-24 + MOVD x+0(FP), R0 + MOVD y+8(FP), R1 + TST R1, R0 + WORD $0xd53b4200 // MOVD NZCV, R0 + BIC $0x30000000, R0 // clear C, V bits, as TST does not change those flags + MOVD R0, ret+16(FP) + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/flags_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/flags_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d0079ac5e83823d8e22ad414f88f61af3aa56bf5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/flags_test.go @@ -0,0 +1,108 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 || arm64 + +package ssa + +// This file tests the functions addFlags64 and subFlags64 by comparing their +// results to what the chip calculates. + +import ( + "runtime" + "testing" +) + +func TestAddFlagsNative(t *testing.T) { + var numbers = []int64{ + 1, 0, -1, + 2, -2, + 1<<63 - 1, -1 << 63, + } + coverage := map[flagConstant]bool{} + for _, x := range numbers { + for _, y := range numbers { + a := addFlags64(x, y) + b := flagRegister2flagConstant(asmAddFlags(x, y), false) + if a != b { + t.Errorf("asmAdd diff: x=%x y=%x got=%s want=%s\n", x, y, a, b) + } + coverage[a] = true + } + } + if len(coverage) != 9 { // TODO: can we cover all outputs? + t.Errorf("coverage too small, got %d want 9", len(coverage)) + } +} + +func TestSubFlagsNative(t *testing.T) { + var numbers = []int64{ + 1, 0, -1, + 2, -2, + 1<<63 - 1, -1 << 63, + } + coverage := map[flagConstant]bool{} + for _, x := range numbers { + for _, y := range numbers { + a := subFlags64(x, y) + b := flagRegister2flagConstant(asmSubFlags(x, y), true) + if a != b { + t.Errorf("asmSub diff: x=%x y=%x got=%s want=%s\n", x, y, a, b) + } + coverage[a] = true + } + } + if len(coverage) != 7 { // TODO: can we cover all outputs? + t.Errorf("coverage too small, got %d want 7", len(coverage)) + } +} + +func TestAndFlagsNative(t *testing.T) { + var numbers = []int64{ + 1, 0, -1, + 2, -2, + 1<<63 - 1, -1 << 63, + } + coverage := map[flagConstant]bool{} + for _, x := range numbers { + for _, y := range numbers { + a := logicFlags64(x & y) + b := flagRegister2flagConstant(asmAndFlags(x, y), false) + if a != b { + t.Errorf("asmAnd diff: x=%x y=%x got=%s want=%s\n", x, y, a, b) + } + coverage[a] = true + } + } + if len(coverage) != 3 { + t.Errorf("coverage too small, got %d want 3", len(coverage)) + } +} + +func asmAddFlags(x, y int64) int +func asmSubFlags(x, y int64) int +func asmAndFlags(x, y int64) int + +func flagRegister2flagConstant(x int, sub bool) flagConstant { + var fcb flagConstantBuilder + switch runtime.GOARCH { + case "amd64": + fcb.Z = x>>6&1 != 0 + fcb.N = x>>7&1 != 0 + fcb.C = x>>0&1 != 0 + if sub { + // Convert from amd64-sense to arm-sense + fcb.C = !fcb.C + } + fcb.V = x>>11&1 != 0 + case "arm64": + fcb.Z = x>>30&1 != 0 + fcb.N = x>>31&1 != 0 + fcb.C = x>>29&1 != 0 + fcb.V = x>>28&1 != 0 + default: + panic("unsupported architecture: " + runtime.GOARCH) + } + return fcb.encode() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/fmahash_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/fmahash_test.go new file mode 100644 index 0000000000000000000000000000000000000000..dfa1aa1147ebf63d4419a5e594c2fbfa4e1b443b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/fmahash_test.go @@ -0,0 +1,52 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa_test + +import ( + "internal/testenv" + "path/filepath" + "regexp" + "runtime" + "testing" +) + +// TestFmaHash checks that the hash-test machinery works properly for a single case. +// It also runs ssa/check and gccheck to be sure that those are checked at least a +// little in each run.bash. It does not check or run the generated code. +// The test file is however a useful example of fused-vs-cascaded multiply-add. +func TestFmaHash(t *testing.T) { + switch runtime.GOOS { + case "linux", "darwin": + default: + t.Skipf("Slow test, usually avoid it, os=%s not linux or darwin", runtime.GOOS) + } + switch runtime.GOARCH { + case "amd64", "arm64": + default: + t.Skipf("Slow test, usually avoid it, arch=%s not amd64 or arm64", runtime.GOARCH) + } + + testenv.MustHaveGoBuild(t) + gocmd := testenv.GoToolPath(t) + tmpdir := t.TempDir() + source := filepath.Join("testdata", "fma.go") + output := filepath.Join(tmpdir, "fma.exe") + cmd := testenv.Command(t, gocmd, "build", "-o", output, source) + // The hash-dependence on file path name is dodged by specifying "all hashes ending in 1" plus "all hashes ending in 0" + // i.e., all hashes. This will print all the FMAs; this test is only interested in one of them (that should appear near the end). + cmd.Env = append(cmd.Env, "GOCOMPILEDEBUG=fmahash=1/0", "GOOS=linux", "GOARCH=arm64", "HOME="+tmpdir) + t.Logf("%v", cmd) + t.Logf("%v", cmd.Env) + b, e := cmd.CombinedOutput() + if e != nil { + t.Error(e) + } + s := string(b) // Looking for "GOFMAHASH triggered main.main:24" + re := "fmahash(0?) triggered .*fma.go:29:..;.*fma.go:18:.." + match := regexp.MustCompile(re) + if !match.MatchString(s) { + t.Errorf("Expected to match '%s' with \n-----\n%s-----", re, s) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/func.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/func.go new file mode 100644 index 0000000000000000000000000000000000000000..031d94f90cf1ffad38136b37d19135087ef0e5b2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/func.go @@ -0,0 +1,842 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/abi" + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/src" + "fmt" + "math" + "strings" +) + +// A Func represents a Go func declaration (or function literal) and its body. +// This package compiles each Func independently. +// Funcs are single-use; a new Func must be created for every compiled function. +type Func struct { + Config *Config // architecture information + Cache *Cache // re-usable cache + fe Frontend // frontend state associated with this Func, callbacks into compiler frontend + pass *pass // current pass information (name, options, etc.) + Name string // e.g. NewFunc or (*Func).NumBlocks (no package prefix) + Type *types.Type // type signature of the function. + Blocks []*Block // unordered set of all basic blocks (note: not indexable by ID) + Entry *Block // the entry basic block + + bid idAlloc // block ID allocator + vid idAlloc // value ID allocator + + HTMLWriter *HTMLWriter // html writer, for debugging + PrintOrHtmlSSA bool // true if GOSSAFUNC matches, true even if fe.Log() (spew phase results to stdout) is false. There's an odd dependence on this in debug.go for method logf. + ruleMatches map[string]int // number of times countRule was called during compilation for any given string + ABI0 *abi.ABIConfig // A copy, for no-sync access + ABI1 *abi.ABIConfig // A copy, for no-sync access + ABISelf *abi.ABIConfig // ABI for function being compiled + ABIDefault *abi.ABIConfig // ABI for rtcall and other no-parsed-signature/pragma functions. + + scheduled bool // Values in Blocks are in final order + laidout bool // Blocks are ordered + NoSplit bool // true if function is marked as nosplit. Used by schedule check pass. + dumpFileSeq uint8 // the sequence numbers of dump file. (%s_%02d__%s.dump", funcname, dumpFileSeq, phaseName) + + // when register allocation is done, maps value ids to locations + RegAlloc []Location + + // temporary registers allocated to rare instructions + tempRegs map[ID]*Register + + // map from LocalSlot to set of Values that we want to store in that slot. + NamedValues map[LocalSlot][]*Value + // Names is a copy of NamedValues.Keys. We keep a separate list + // of keys to make iteration order deterministic. + Names []*LocalSlot + // Canonicalize root/top-level local slots, and canonicalize their pieces. + // Because LocalSlot pieces refer to their parents with a pointer, this ensures that equivalent slots really are equal. + CanonicalLocalSlots map[LocalSlot]*LocalSlot + CanonicalLocalSplits map[LocalSlotSplitKey]*LocalSlot + + // RegArgs is a slice of register-memory pairs that must be spilled and unspilled in the uncommon path of function entry. + RegArgs []Spill + // OwnAux describes parameters and results for this function. + OwnAux *AuxCall + + freeValues *Value // free Values linked by argstorage[0]. All other fields except ID are 0/nil. + freeBlocks *Block // free Blocks linked by succstorage[0].b. All other fields except ID are 0/nil. + + cachedPostorder []*Block // cached postorder traversal + cachedIdom []*Block // cached immediate dominators + cachedSdom SparseTree // cached dominator tree + cachedLoopnest *loopnest // cached loop nest information + cachedLineStarts *xposmap // cached map/set of xpos to integers + + auxmap auxmap // map from aux values to opaque ids used by CSE + constants map[int64][]*Value // constants cache, keyed by constant value; users must check value's Op and Type +} + +type LocalSlotSplitKey struct { + parent *LocalSlot + Off int64 // offset of slot in N + Type *types.Type // type of slot +} + +// NewFunc returns a new, empty function object. +// Caller must reset cache before calling NewFunc. +func (c *Config) NewFunc(fe Frontend, cache *Cache) *Func { + return &Func{ + fe: fe, + Config: c, + Cache: cache, + + NamedValues: make(map[LocalSlot][]*Value), + CanonicalLocalSlots: make(map[LocalSlot]*LocalSlot), + CanonicalLocalSplits: make(map[LocalSlotSplitKey]*LocalSlot), + } +} + +// NumBlocks returns an integer larger than the id of any Block in the Func. +func (f *Func) NumBlocks() int { + return f.bid.num() +} + +// NumValues returns an integer larger than the id of any Value in the Func. +func (f *Func) NumValues() int { + return f.vid.num() +} + +// NameABI returns the function name followed by comma and the ABI number. +// This is intended for use with GOSSAFUNC and HTML dumps, and differs from +// the linker's "<1>" convention because "<" and ">" require shell quoting +// and are not legal file names (for use with GOSSADIR) on Windows. +func (f *Func) NameABI() string { + return FuncNameABI(f.Name, f.ABISelf.Which()) +} + +// FuncNameABI returns n followed by a comma and the value of a. +// This is a separate function to allow a single point encoding +// of the format, which is used in places where there's not a Func yet. +func FuncNameABI(n string, a obj.ABI) string { + return fmt.Sprintf("%s,%d", n, a) +} + +// newSparseSet returns a sparse set that can store at least up to n integers. +func (f *Func) newSparseSet(n int) *sparseSet { + return f.Cache.allocSparseSet(n) +} + +// retSparseSet returns a sparse set to the config's cache of sparse +// sets to be reused by f.newSparseSet. +func (f *Func) retSparseSet(ss *sparseSet) { + f.Cache.freeSparseSet(ss) +} + +// newSparseMap returns a sparse map that can store at least up to n integers. +func (f *Func) newSparseMap(n int) *sparseMap { + return f.Cache.allocSparseMap(n) +} + +// retSparseMap returns a sparse map to the config's cache of sparse +// sets to be reused by f.newSparseMap. +func (f *Func) retSparseMap(ss *sparseMap) { + f.Cache.freeSparseMap(ss) +} + +// newSparseMapPos returns a sparse map that can store at least up to n integers. +func (f *Func) newSparseMapPos(n int) *sparseMapPos { + return f.Cache.allocSparseMapPos(n) +} + +// retSparseMapPos returns a sparse map to the config's cache of sparse +// sets to be reused by f.newSparseMapPos. +func (f *Func) retSparseMapPos(ss *sparseMapPos) { + f.Cache.freeSparseMapPos(ss) +} + +// newPoset returns a new poset from the internal cache +func (f *Func) newPoset() *poset { + if len(f.Cache.scrPoset) > 0 { + po := f.Cache.scrPoset[len(f.Cache.scrPoset)-1] + f.Cache.scrPoset = f.Cache.scrPoset[:len(f.Cache.scrPoset)-1] + return po + } + return newPoset() +} + +// retPoset returns a poset to the internal cache +func (f *Func) retPoset(po *poset) { + f.Cache.scrPoset = append(f.Cache.scrPoset, po) +} + +func (f *Func) localSlotAddr(slot LocalSlot) *LocalSlot { + a, ok := f.CanonicalLocalSlots[slot] + if !ok { + a = new(LocalSlot) + *a = slot // don't escape slot + f.CanonicalLocalSlots[slot] = a + } + return a +} + +func (f *Func) SplitString(name *LocalSlot) (*LocalSlot, *LocalSlot) { + ptrType := types.NewPtr(types.Types[types.TUINT8]) + lenType := types.Types[types.TINT] + // Split this string up into two separate variables. + p := f.SplitSlot(name, ".ptr", 0, ptrType) + l := f.SplitSlot(name, ".len", ptrType.Size(), lenType) + return p, l +} + +func (f *Func) SplitInterface(name *LocalSlot) (*LocalSlot, *LocalSlot) { + n := name.N + u := types.Types[types.TUINTPTR] + t := types.NewPtr(types.Types[types.TUINT8]) + // Split this interface up into two separate variables. + sfx := ".itab" + if n.Type().IsEmptyInterface() { + sfx = ".type" + } + c := f.SplitSlot(name, sfx, 0, u) // see comment in typebits.Set + d := f.SplitSlot(name, ".data", u.Size(), t) + return c, d +} + +func (f *Func) SplitSlice(name *LocalSlot) (*LocalSlot, *LocalSlot, *LocalSlot) { + ptrType := types.NewPtr(name.Type.Elem()) + lenType := types.Types[types.TINT] + p := f.SplitSlot(name, ".ptr", 0, ptrType) + l := f.SplitSlot(name, ".len", ptrType.Size(), lenType) + c := f.SplitSlot(name, ".cap", ptrType.Size()+lenType.Size(), lenType) + return p, l, c +} + +func (f *Func) SplitComplex(name *LocalSlot) (*LocalSlot, *LocalSlot) { + s := name.Type.Size() / 2 + var t *types.Type + if s == 8 { + t = types.Types[types.TFLOAT64] + } else { + t = types.Types[types.TFLOAT32] + } + r := f.SplitSlot(name, ".real", 0, t) + i := f.SplitSlot(name, ".imag", t.Size(), t) + return r, i +} + +func (f *Func) SplitInt64(name *LocalSlot) (*LocalSlot, *LocalSlot) { + var t *types.Type + if name.Type.IsSigned() { + t = types.Types[types.TINT32] + } else { + t = types.Types[types.TUINT32] + } + if f.Config.BigEndian { + return f.SplitSlot(name, ".hi", 0, t), f.SplitSlot(name, ".lo", t.Size(), types.Types[types.TUINT32]) + } + return f.SplitSlot(name, ".hi", t.Size(), t), f.SplitSlot(name, ".lo", 0, types.Types[types.TUINT32]) +} + +func (f *Func) SplitStruct(name *LocalSlot, i int) *LocalSlot { + st := name.Type + return f.SplitSlot(name, st.FieldName(i), st.FieldOff(i), st.FieldType(i)) +} +func (f *Func) SplitArray(name *LocalSlot) *LocalSlot { + n := name.N + at := name.Type + if at.NumElem() != 1 { + base.FatalfAt(n.Pos(), "bad array size") + } + et := at.Elem() + return f.SplitSlot(name, "[0]", 0, et) +} + +func (f *Func) SplitSlot(name *LocalSlot, sfx string, offset int64, t *types.Type) *LocalSlot { + lssk := LocalSlotSplitKey{name, offset, t} + if als, ok := f.CanonicalLocalSplits[lssk]; ok { + return als + } + // Note: the _ field may appear several times. But + // have no fear, identically-named but distinct Autos are + // ok, albeit maybe confusing for a debugger. + ls := f.fe.SplitSlot(name, sfx, offset, t) + f.CanonicalLocalSplits[lssk] = &ls + return &ls +} + +// newValue allocates a new Value with the given fields and places it at the end of b.Values. +func (f *Func) newValue(op Op, t *types.Type, b *Block, pos src.XPos) *Value { + var v *Value + if f.freeValues != nil { + v = f.freeValues + f.freeValues = v.argstorage[0] + v.argstorage[0] = nil + } else { + ID := f.vid.get() + if int(ID) < len(f.Cache.values) { + v = &f.Cache.values[ID] + v.ID = ID + } else { + v = &Value{ID: ID} + } + } + v.Op = op + v.Type = t + v.Block = b + if notStmtBoundary(op) { + pos = pos.WithNotStmt() + } + v.Pos = pos + b.Values = append(b.Values, v) + return v +} + +// newValueNoBlock allocates a new Value with the given fields. +// The returned value is not placed in any block. Once the caller +// decides on a block b, it must set b.Block and append +// the returned value to b.Values. +func (f *Func) newValueNoBlock(op Op, t *types.Type, pos src.XPos) *Value { + var v *Value + if f.freeValues != nil { + v = f.freeValues + f.freeValues = v.argstorage[0] + v.argstorage[0] = nil + } else { + ID := f.vid.get() + if int(ID) < len(f.Cache.values) { + v = &f.Cache.values[ID] + v.ID = ID + } else { + v = &Value{ID: ID} + } + } + v.Op = op + v.Type = t + v.Block = nil // caller must fix this. + if notStmtBoundary(op) { + pos = pos.WithNotStmt() + } + v.Pos = pos + return v +} + +// LogStat writes a string key and int value as a warning in a +// tab-separated format easily handled by spreadsheets or awk. +// file names, lines, and function names are included to provide enough (?) +// context to allow item-by-item comparisons across runs. +// For example: +// awk 'BEGIN {FS="\t"} $3~/TIME/{sum+=$4} END{print "t(ns)=",sum}' t.log +func (f *Func) LogStat(key string, args ...interface{}) { + value := "" + for _, a := range args { + value += fmt.Sprintf("\t%v", a) + } + n := "missing_pass" + if f.pass != nil { + n = strings.Replace(f.pass.name, " ", "_", -1) + } + f.Warnl(f.Entry.Pos, "\t%s\t%s%s\t%s", n, key, value, f.Name) +} + +// unCacheLine removes v from f's constant cache "line" for aux, +// resets v.InCache when it is found (and removed), +// and returns whether v was found in that line. +func (f *Func) unCacheLine(v *Value, aux int64) bool { + vv := f.constants[aux] + for i, cv := range vv { + if v == cv { + vv[i] = vv[len(vv)-1] + vv[len(vv)-1] = nil + f.constants[aux] = vv[0 : len(vv)-1] + v.InCache = false + return true + } + } + return false +} + +// unCache removes v from f's constant cache. +func (f *Func) unCache(v *Value) { + if v.InCache { + aux := v.AuxInt + if f.unCacheLine(v, aux) { + return + } + if aux == 0 { + switch v.Op { + case OpConstNil: + aux = constNilMagic + case OpConstSlice: + aux = constSliceMagic + case OpConstString: + aux = constEmptyStringMagic + case OpConstInterface: + aux = constInterfaceMagic + } + if aux != 0 && f.unCacheLine(v, aux) { + return + } + } + f.Fatalf("unCached value %s not found in cache, auxInt=0x%x, adjusted aux=0x%x", v.LongString(), v.AuxInt, aux) + } +} + +// freeValue frees a value. It must no longer be referenced or have any args. +func (f *Func) freeValue(v *Value) { + if v.Block == nil { + f.Fatalf("trying to free an already freed value") + } + if v.Uses != 0 { + f.Fatalf("value %s still has %d uses", v, v.Uses) + } + if len(v.Args) != 0 { + f.Fatalf("value %s still has %d args", v, len(v.Args)) + } + // Clear everything but ID (which we reuse). + id := v.ID + if v.InCache { + f.unCache(v) + } + *v = Value{} + v.ID = id + v.argstorage[0] = f.freeValues + f.freeValues = v +} + +// NewBlock allocates a new Block of the given kind and places it at the end of f.Blocks. +func (f *Func) NewBlock(kind BlockKind) *Block { + var b *Block + if f.freeBlocks != nil { + b = f.freeBlocks + f.freeBlocks = b.succstorage[0].b + b.succstorage[0].b = nil + } else { + ID := f.bid.get() + if int(ID) < len(f.Cache.blocks) { + b = &f.Cache.blocks[ID] + b.ID = ID + } else { + b = &Block{ID: ID} + } + } + b.Kind = kind + b.Func = f + b.Preds = b.predstorage[:0] + b.Succs = b.succstorage[:0] + b.Values = b.valstorage[:0] + f.Blocks = append(f.Blocks, b) + f.invalidateCFG() + return b +} + +func (f *Func) freeBlock(b *Block) { + if b.Func == nil { + f.Fatalf("trying to free an already freed block") + } + // Clear everything but ID (which we reuse). + id := b.ID + *b = Block{} + b.ID = id + b.succstorage[0].b = f.freeBlocks + f.freeBlocks = b +} + +// NewValue0 returns a new value in the block with no arguments and zero aux values. +func (b *Block) NewValue0(pos src.XPos, op Op, t *types.Type) *Value { + v := b.Func.newValue(op, t, b, pos) + v.AuxInt = 0 + v.Args = v.argstorage[:0] + return v +} + +// NewValue0I returns a new value in the block with no arguments and an auxint value. +func (b *Block) NewValue0I(pos src.XPos, op Op, t *types.Type, auxint int64) *Value { + v := b.Func.newValue(op, t, b, pos) + v.AuxInt = auxint + v.Args = v.argstorage[:0] + return v +} + +// NewValue0A returns a new value in the block with no arguments and an aux value. +func (b *Block) NewValue0A(pos src.XPos, op Op, t *types.Type, aux Aux) *Value { + v := b.Func.newValue(op, t, b, pos) + v.AuxInt = 0 + v.Aux = aux + v.Args = v.argstorage[:0] + return v +} + +// NewValue0IA returns a new value in the block with no arguments and both an auxint and aux values. +func (b *Block) NewValue0IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux Aux) *Value { + v := b.Func.newValue(op, t, b, pos) + v.AuxInt = auxint + v.Aux = aux + v.Args = v.argstorage[:0] + return v +} + +// NewValue1 returns a new value in the block with one argument and zero aux values. +func (b *Block) NewValue1(pos src.XPos, op Op, t *types.Type, arg *Value) *Value { + v := b.Func.newValue(op, t, b, pos) + v.AuxInt = 0 + v.Args = v.argstorage[:1] + v.argstorage[0] = arg + arg.Uses++ + return v +} + +// NewValue1I returns a new value in the block with one argument and an auxint value. +func (b *Block) NewValue1I(pos src.XPos, op Op, t *types.Type, auxint int64, arg *Value) *Value { + v := b.Func.newValue(op, t, b, pos) + v.AuxInt = auxint + v.Args = v.argstorage[:1] + v.argstorage[0] = arg + arg.Uses++ + return v +} + +// NewValue1A returns a new value in the block with one argument and an aux value. +func (b *Block) NewValue1A(pos src.XPos, op Op, t *types.Type, aux Aux, arg *Value) *Value { + v := b.Func.newValue(op, t, b, pos) + v.AuxInt = 0 + v.Aux = aux + v.Args = v.argstorage[:1] + v.argstorage[0] = arg + arg.Uses++ + return v +} + +// NewValue1IA returns a new value in the block with one argument and both an auxint and aux values. +func (b *Block) NewValue1IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux Aux, arg *Value) *Value { + v := b.Func.newValue(op, t, b, pos) + v.AuxInt = auxint + v.Aux = aux + v.Args = v.argstorage[:1] + v.argstorage[0] = arg + arg.Uses++ + return v +} + +// NewValue2 returns a new value in the block with two arguments and zero aux values. +func (b *Block) NewValue2(pos src.XPos, op Op, t *types.Type, arg0, arg1 *Value) *Value { + v := b.Func.newValue(op, t, b, pos) + v.AuxInt = 0 + v.Args = v.argstorage[:2] + v.argstorage[0] = arg0 + v.argstorage[1] = arg1 + arg0.Uses++ + arg1.Uses++ + return v +} + +// NewValue2A returns a new value in the block with two arguments and one aux values. +func (b *Block) NewValue2A(pos src.XPos, op Op, t *types.Type, aux Aux, arg0, arg1 *Value) *Value { + v := b.Func.newValue(op, t, b, pos) + v.AuxInt = 0 + v.Aux = aux + v.Args = v.argstorage[:2] + v.argstorage[0] = arg0 + v.argstorage[1] = arg1 + arg0.Uses++ + arg1.Uses++ + return v +} + +// NewValue2I returns a new value in the block with two arguments and an auxint value. +func (b *Block) NewValue2I(pos src.XPos, op Op, t *types.Type, auxint int64, arg0, arg1 *Value) *Value { + v := b.Func.newValue(op, t, b, pos) + v.AuxInt = auxint + v.Args = v.argstorage[:2] + v.argstorage[0] = arg0 + v.argstorage[1] = arg1 + arg0.Uses++ + arg1.Uses++ + return v +} + +// NewValue2IA returns a new value in the block with two arguments and both an auxint and aux values. +func (b *Block) NewValue2IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux Aux, arg0, arg1 *Value) *Value { + v := b.Func.newValue(op, t, b, pos) + v.AuxInt = auxint + v.Aux = aux + v.Args = v.argstorage[:2] + v.argstorage[0] = arg0 + v.argstorage[1] = arg1 + arg0.Uses++ + arg1.Uses++ + return v +} + +// NewValue3 returns a new value in the block with three arguments and zero aux values. +func (b *Block) NewValue3(pos src.XPos, op Op, t *types.Type, arg0, arg1, arg2 *Value) *Value { + v := b.Func.newValue(op, t, b, pos) + v.AuxInt = 0 + v.Args = v.argstorage[:3] + v.argstorage[0] = arg0 + v.argstorage[1] = arg1 + v.argstorage[2] = arg2 + arg0.Uses++ + arg1.Uses++ + arg2.Uses++ + return v +} + +// NewValue3I returns a new value in the block with three arguments and an auxint value. +func (b *Block) NewValue3I(pos src.XPos, op Op, t *types.Type, auxint int64, arg0, arg1, arg2 *Value) *Value { + v := b.Func.newValue(op, t, b, pos) + v.AuxInt = auxint + v.Args = v.argstorage[:3] + v.argstorage[0] = arg0 + v.argstorage[1] = arg1 + v.argstorage[2] = arg2 + arg0.Uses++ + arg1.Uses++ + arg2.Uses++ + return v +} + +// NewValue3A returns a new value in the block with three argument and an aux value. +func (b *Block) NewValue3A(pos src.XPos, op Op, t *types.Type, aux Aux, arg0, arg1, arg2 *Value) *Value { + v := b.Func.newValue(op, t, b, pos) + v.AuxInt = 0 + v.Aux = aux + v.Args = v.argstorage[:3] + v.argstorage[0] = arg0 + v.argstorage[1] = arg1 + v.argstorage[2] = arg2 + arg0.Uses++ + arg1.Uses++ + arg2.Uses++ + return v +} + +// NewValue4 returns a new value in the block with four arguments and zero aux values. +func (b *Block) NewValue4(pos src.XPos, op Op, t *types.Type, arg0, arg1, arg2, arg3 *Value) *Value { + v := b.Func.newValue(op, t, b, pos) + v.AuxInt = 0 + v.Args = []*Value{arg0, arg1, arg2, arg3} + arg0.Uses++ + arg1.Uses++ + arg2.Uses++ + arg3.Uses++ + return v +} + +// NewValue4I returns a new value in the block with four arguments and auxint value. +func (b *Block) NewValue4I(pos src.XPos, op Op, t *types.Type, auxint int64, arg0, arg1, arg2, arg3 *Value) *Value { + v := b.Func.newValue(op, t, b, pos) + v.AuxInt = auxint + v.Args = []*Value{arg0, arg1, arg2, arg3} + arg0.Uses++ + arg1.Uses++ + arg2.Uses++ + arg3.Uses++ + return v +} + +// constVal returns a constant value for c. +func (f *Func) constVal(op Op, t *types.Type, c int64, setAuxInt bool) *Value { + if f.constants == nil { + f.constants = make(map[int64][]*Value) + } + vv := f.constants[c] + for _, v := range vv { + if v.Op == op && v.Type.Compare(t) == types.CMPeq { + if setAuxInt && v.AuxInt != c { + panic(fmt.Sprintf("cached const %s should have AuxInt of %d", v.LongString(), c)) + } + return v + } + } + var v *Value + if setAuxInt { + v = f.Entry.NewValue0I(src.NoXPos, op, t, c) + } else { + v = f.Entry.NewValue0(src.NoXPos, op, t) + } + f.constants[c] = append(vv, v) + v.InCache = true + return v +} + +// These magic auxint values let us easily cache non-numeric constants +// using the same constants map while making collisions unlikely. +// These values are unlikely to occur in regular code and +// are easy to grep for in case of bugs. +const ( + constSliceMagic = 1122334455 + constInterfaceMagic = 2233445566 + constNilMagic = 3344556677 + constEmptyStringMagic = 4455667788 +) + +// ConstBool returns an int constant representing its argument. +func (f *Func) ConstBool(t *types.Type, c bool) *Value { + i := int64(0) + if c { + i = 1 + } + return f.constVal(OpConstBool, t, i, true) +} +func (f *Func) ConstInt8(t *types.Type, c int8) *Value { + return f.constVal(OpConst8, t, int64(c), true) +} +func (f *Func) ConstInt16(t *types.Type, c int16) *Value { + return f.constVal(OpConst16, t, int64(c), true) +} +func (f *Func) ConstInt32(t *types.Type, c int32) *Value { + return f.constVal(OpConst32, t, int64(c), true) +} +func (f *Func) ConstInt64(t *types.Type, c int64) *Value { + return f.constVal(OpConst64, t, c, true) +} +func (f *Func) ConstFloat32(t *types.Type, c float64) *Value { + return f.constVal(OpConst32F, t, int64(math.Float64bits(float64(float32(c)))), true) +} +func (f *Func) ConstFloat64(t *types.Type, c float64) *Value { + return f.constVal(OpConst64F, t, int64(math.Float64bits(c)), true) +} + +func (f *Func) ConstSlice(t *types.Type) *Value { + return f.constVal(OpConstSlice, t, constSliceMagic, false) +} +func (f *Func) ConstInterface(t *types.Type) *Value { + return f.constVal(OpConstInterface, t, constInterfaceMagic, false) +} +func (f *Func) ConstNil(t *types.Type) *Value { + return f.constVal(OpConstNil, t, constNilMagic, false) +} +func (f *Func) ConstEmptyString(t *types.Type) *Value { + v := f.constVal(OpConstString, t, constEmptyStringMagic, false) + v.Aux = StringToAux("") + return v +} +func (f *Func) ConstOffPtrSP(t *types.Type, c int64, sp *Value) *Value { + v := f.constVal(OpOffPtr, t, c, true) + if len(v.Args) == 0 { + v.AddArg(sp) + } + return v +} + +func (f *Func) Frontend() Frontend { return f.fe } +func (f *Func) Warnl(pos src.XPos, msg string, args ...interface{}) { f.fe.Warnl(pos, msg, args...) } +func (f *Func) Logf(msg string, args ...interface{}) { f.fe.Logf(msg, args...) } +func (f *Func) Log() bool { return f.fe.Log() } + +func (f *Func) Fatalf(msg string, args ...interface{}) { + stats := "crashed" + if f.Log() { + f.Logf(" pass %s end %s\n", f.pass.name, stats) + printFunc(f) + } + if f.HTMLWriter != nil { + f.HTMLWriter.WritePhase(f.pass.name, fmt.Sprintf("%s %s", f.pass.name, stats)) + f.HTMLWriter.flushPhases() + } + f.fe.Fatalf(f.Entry.Pos, msg, args...) +} + +// postorder returns the reachable blocks in f in a postorder traversal. +func (f *Func) postorder() []*Block { + if f.cachedPostorder == nil { + f.cachedPostorder = postorder(f) + } + return f.cachedPostorder +} + +func (f *Func) Postorder() []*Block { + return f.postorder() +} + +// Idom returns a map from block ID to the immediate dominator of that block. +// f.Entry.ID maps to nil. Unreachable blocks map to nil as well. +func (f *Func) Idom() []*Block { + if f.cachedIdom == nil { + f.cachedIdom = dominators(f) + } + return f.cachedIdom +} + +// Sdom returns a sparse tree representing the dominator relationships +// among the blocks of f. +func (f *Func) Sdom() SparseTree { + if f.cachedSdom == nil { + f.cachedSdom = newSparseTree(f, f.Idom()) + } + return f.cachedSdom +} + +// loopnest returns the loop nest information for f. +func (f *Func) loopnest() *loopnest { + if f.cachedLoopnest == nil { + f.cachedLoopnest = loopnestfor(f) + } + return f.cachedLoopnest +} + +// invalidateCFG tells f that its CFG has changed. +func (f *Func) invalidateCFG() { + f.cachedPostorder = nil + f.cachedIdom = nil + f.cachedSdom = nil + f.cachedLoopnest = nil +} + +// DebugHashMatch returns +// +// base.DebugHashMatch(this function's package.name) +// +// for use in bug isolation. The return value is true unless +// environment variable GOSSAHASH is set, in which case "it depends". +// See [base.DebugHashMatch] for more information. +func (f *Func) DebugHashMatch() bool { + if !base.HasDebugHash() { + return true + } + sym := f.fe.Func().Sym() + return base.DebugHashMatchPkgFunc(sym.Pkg.Path, sym.Name) +} + +func (f *Func) spSb() (sp, sb *Value) { + initpos := src.NoXPos // These are originally created with no position in ssa.go; if they are optimized out then recreated, should be the same. + for _, v := range f.Entry.Values { + if v.Op == OpSB { + sb = v + } + if v.Op == OpSP { + sp = v + } + if sb != nil && sp != nil { + return + } + } + if sb == nil { + sb = f.Entry.NewValue0(initpos.WithNotStmt(), OpSB, f.Config.Types.Uintptr) + } + if sp == nil { + sp = f.Entry.NewValue0(initpos.WithNotStmt(), OpSP, f.Config.Types.Uintptr) + } + return +} + +// useFMA allows targeted debugging w/ GOFMAHASH +// If you have an architecture-dependent FP glitch, this will help you find it. +func (f *Func) useFMA(v *Value) bool { + if !f.Config.UseFMA { + return false + } + if base.FmaHash == nil { + return true + } + return base.FmaHash.MatchPos(v.Pos, nil) +} + +// NewLocal returns a new anonymous local variable of the given type. +func (f *Func) NewLocal(pos src.XPos, typ *types.Type) *ir.Name { + return typecheck.TempAt(pos, f.fe.Func(), typ) // Note: adds new auto to fn.Dcl list +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/func_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/func_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6923aaa58ecadd4745f95c1648aaee6805015189 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/func_test.go @@ -0,0 +1,482 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains some utility functions to help define Funcs for testing. +// As an example, the following func +// +// b1: +// v1 = InitMem +// Plain -> b2 +// b2: +// Exit v1 +// b3: +// v2 = Const [true] +// If v2 -> b3 b2 +// +// can be defined as +// +// fun := Fun("entry", +// Bloc("entry", +// Valu("mem", OpInitMem, types.TypeMem, 0, nil), +// Goto("exit")), +// Bloc("exit", +// Exit("mem")), +// Bloc("deadblock", +// Valu("deadval", OpConstBool, c.config.Types.Bool, 0, true), +// If("deadval", "deadblock", "exit"))) +// +// and the Blocks or Values used in the Func can be accessed +// like this: +// fun.blocks["entry"] or fun.values["deadval"] + +package ssa + +// TODO(matloob): Choose better names for Fun, Bloc, Goto, etc. +// TODO(matloob): Write a parser for the Func disassembly. Maybe +// the parser can be used instead of Fun. + +import ( + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/src" + "fmt" + "reflect" + "testing" +) + +// Compare two Funcs for equivalence. Their CFGs must be isomorphic, +// and their values must correspond. +// Requires that values and predecessors are in the same order, even +// though Funcs could be equivalent when they are not. +// TODO(matloob): Allow values and predecessors to be in different +// orders if the CFG are otherwise equivalent. +func Equiv(f, g *Func) bool { + valcor := make(map[*Value]*Value) + var checkVal func(fv, gv *Value) bool + checkVal = func(fv, gv *Value) bool { + if fv == nil && gv == nil { + return true + } + if valcor[fv] == nil && valcor[gv] == nil { + valcor[fv] = gv + valcor[gv] = fv + // Ignore ids. Ops and Types are compared for equality. + // TODO(matloob): Make sure types are canonical and can + // be compared for equality. + if fv.Op != gv.Op || fv.Type != gv.Type || fv.AuxInt != gv.AuxInt { + return false + } + if !reflect.DeepEqual(fv.Aux, gv.Aux) { + // This makes the assumption that aux values can be compared + // using DeepEqual. + // TODO(matloob): Aux values may be *gc.Sym pointers in the near + // future. Make sure they are canonical. + return false + } + if len(fv.Args) != len(gv.Args) { + return false + } + for i := range fv.Args { + if !checkVal(fv.Args[i], gv.Args[i]) { + return false + } + } + } + return valcor[fv] == gv && valcor[gv] == fv + } + blkcor := make(map[*Block]*Block) + var checkBlk func(fb, gb *Block) bool + checkBlk = func(fb, gb *Block) bool { + if blkcor[fb] == nil && blkcor[gb] == nil { + blkcor[fb] = gb + blkcor[gb] = fb + // ignore ids + if fb.Kind != gb.Kind { + return false + } + if len(fb.Values) != len(gb.Values) { + return false + } + for i := range fb.Values { + if !checkVal(fb.Values[i], gb.Values[i]) { + return false + } + } + if len(fb.Succs) != len(gb.Succs) { + return false + } + for i := range fb.Succs { + if !checkBlk(fb.Succs[i].b, gb.Succs[i].b) { + return false + } + } + if len(fb.Preds) != len(gb.Preds) { + return false + } + for i := range fb.Preds { + if !checkBlk(fb.Preds[i].b, gb.Preds[i].b) { + return false + } + } + return true + + } + return blkcor[fb] == gb && blkcor[gb] == fb + } + + return checkBlk(f.Entry, g.Entry) +} + +// fun is the return type of Fun. It contains the created func +// itself as well as indexes from block and value names into the +// corresponding Blocks and Values. +type fun struct { + f *Func + blocks map[string]*Block + values map[string]*Value +} + +var emptyPass pass = pass{ + name: "empty pass", +} + +// AuxCallLSym returns an AuxCall initialized with an LSym that should pass "check" +// as the Aux of a static call. +func AuxCallLSym(name string) *AuxCall { + return &AuxCall{Fn: &obj.LSym{}} +} + +// Fun takes the name of an entry bloc and a series of Bloc calls, and +// returns a fun containing the composed Func. entry must be a name +// supplied to one of the Bloc functions. Each of the bloc names and +// valu names should be unique across the Fun. +func (c *Conf) Fun(entry string, blocs ...bloc) fun { + // TODO: Either mark some SSA tests as t.Parallel, + // or set up a shared Cache and Reset it between tests. + // But not both. + f := c.config.NewFunc(c.Frontend(), new(Cache)) + f.pass = &emptyPass + f.cachedLineStarts = newXposmap(map[int]lineRange{0: {0, 100}, 1: {0, 100}, 2: {0, 100}, 3: {0, 100}, 4: {0, 100}}) + + blocks := make(map[string]*Block) + values := make(map[string]*Value) + // Create all the blocks and values. + for _, bloc := range blocs { + b := f.NewBlock(bloc.control.kind) + blocks[bloc.name] = b + for _, valu := range bloc.valus { + // args are filled in the second pass. + values[valu.name] = b.NewValue0IA(src.NoXPos, valu.op, valu.t, valu.auxint, valu.aux) + } + } + // Connect the blocks together and specify control values. + f.Entry = blocks[entry] + for _, bloc := range blocs { + b := blocks[bloc.name] + c := bloc.control + // Specify control values. + if c.control != "" { + cval, ok := values[c.control] + if !ok { + f.Fatalf("control value for block %s missing", bloc.name) + } + b.SetControl(cval) + } + // Fill in args. + for _, valu := range bloc.valus { + v := values[valu.name] + for _, arg := range valu.args { + a, ok := values[arg] + if !ok { + b.Fatalf("arg %s missing for value %s in block %s", + arg, valu.name, bloc.name) + } + v.AddArg(a) + } + } + // Connect to successors. + for _, succ := range c.succs { + b.AddEdgeTo(blocks[succ]) + } + } + return fun{f, blocks, values} +} + +// Bloc defines a block for Fun. The bloc name should be unique +// across the containing Fun. entries should consist of calls to valu, +// as well as one call to Goto, If, or Exit to specify the block kind. +func Bloc(name string, entries ...interface{}) bloc { + b := bloc{} + b.name = name + seenCtrl := false + for _, e := range entries { + switch v := e.(type) { + case ctrl: + // there should be exactly one Ctrl entry. + if seenCtrl { + panic(fmt.Sprintf("already seen control for block %s", name)) + } + b.control = v + seenCtrl = true + case valu: + b.valus = append(b.valus, v) + } + } + if !seenCtrl { + panic(fmt.Sprintf("block %s doesn't have control", b.name)) + } + return b +} + +// Valu defines a value in a block. +func Valu(name string, op Op, t *types.Type, auxint int64, aux Aux, args ...string) valu { + return valu{name, op, t, auxint, aux, args} +} + +// Goto specifies that this is a BlockPlain and names the single successor. +// TODO(matloob): choose a better name. +func Goto(succ string) ctrl { + return ctrl{BlockPlain, "", []string{succ}} +} + +// If specifies a BlockIf. +func If(cond, sub, alt string) ctrl { + return ctrl{BlockIf, cond, []string{sub, alt}} +} + +// Exit specifies a BlockExit. +func Exit(arg string) ctrl { + return ctrl{BlockExit, arg, []string{}} +} + +// Eq specifies a BlockAMD64EQ. +func Eq(cond, sub, alt string) ctrl { + return ctrl{BlockAMD64EQ, cond, []string{sub, alt}} +} + +// bloc, ctrl, and valu are internal structures used by Bloc, Valu, Goto, +// If, and Exit to help define blocks. + +type bloc struct { + name string + control ctrl + valus []valu +} + +type ctrl struct { + kind BlockKind + control string + succs []string +} + +type valu struct { + name string + op Op + t *types.Type + auxint int64 + aux Aux + args []string +} + +func TestArgs(t *testing.T) { + c := testConfig(t) + fun := c.Fun("entry", + Bloc("entry", + Valu("a", OpConst64, c.config.Types.Int64, 14, nil), + Valu("b", OpConst64, c.config.Types.Int64, 26, nil), + Valu("sum", OpAdd64, c.config.Types.Int64, 0, nil, "a", "b"), + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Goto("exit")), + Bloc("exit", + Exit("mem"))) + sum := fun.values["sum"] + for i, name := range []string{"a", "b"} { + if sum.Args[i] != fun.values[name] { + t.Errorf("arg %d for sum is incorrect: want %s, got %s", + i, sum.Args[i], fun.values[name]) + } + } +} + +func TestEquiv(t *testing.T) { + cfg := testConfig(t) + equivalentCases := []struct{ f, g fun }{ + // simple case + { + cfg.Fun("entry", + Bloc("entry", + Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil), + Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil), + Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "a", "b"), + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Goto("exit")), + Bloc("exit", + Exit("mem"))), + cfg.Fun("entry", + Bloc("entry", + Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil), + Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil), + Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "a", "b"), + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Goto("exit")), + Bloc("exit", + Exit("mem"))), + }, + // block order changed + { + cfg.Fun("entry", + Bloc("entry", + Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil), + Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil), + Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "a", "b"), + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Goto("exit")), + Bloc("exit", + Exit("mem"))), + cfg.Fun("entry", + Bloc("exit", + Exit("mem")), + Bloc("entry", + Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil), + Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil), + Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "a", "b"), + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Goto("exit"))), + }, + } + for _, c := range equivalentCases { + if !Equiv(c.f.f, c.g.f) { + t.Error("expected equivalence. Func definitions:") + t.Error(c.f.f) + t.Error(c.g.f) + } + } + + differentCases := []struct{ f, g fun }{ + // different shape + { + cfg.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Goto("exit")), + Bloc("exit", + Exit("mem"))), + cfg.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Exit("mem"))), + }, + // value order changed + { + cfg.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil), + Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil), + Exit("mem"))), + cfg.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil), + Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil), + Exit("mem"))), + }, + // value auxint different + { + cfg.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil), + Exit("mem"))), + cfg.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("a", OpConst64, cfg.config.Types.Int64, 26, nil), + Exit("mem"))), + }, + // value aux different + { + cfg.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("a", OpConstString, cfg.config.Types.String, 0, StringToAux("foo")), + Exit("mem"))), + cfg.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("a", OpConstString, cfg.config.Types.String, 0, StringToAux("bar")), + Exit("mem"))), + }, + // value args different + { + cfg.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil), + Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil), + Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "a", "b"), + Exit("mem"))), + cfg.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("a", OpConst64, cfg.config.Types.Int64, 0, nil), + Valu("b", OpConst64, cfg.config.Types.Int64, 14, nil), + Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "b", "a"), + Exit("mem"))), + }, + } + for _, c := range differentCases { + if Equiv(c.f.f, c.g.f) { + t.Error("expected difference. Func definitions:") + t.Error(c.f.f) + t.Error(c.g.f) + } + } +} + +// TestConstCache ensures that the cache will not return +// reused free'd values with a non-matching AuxInt +func TestConstCache(t *testing.T) { + c := testConfig(t) + f := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Exit("mem"))) + v1 := f.f.ConstBool(c.config.Types.Bool, false) + v2 := f.f.ConstBool(c.config.Types.Bool, true) + f.f.freeValue(v1) + f.f.freeValue(v2) + v3 := f.f.ConstBool(c.config.Types.Bool, false) + v4 := f.f.ConstBool(c.config.Types.Bool, true) + if v3.AuxInt != 0 { + t.Errorf("expected %s to have auxint of 0\n", v3.LongString()) + } + if v4.AuxInt != 1 { + t.Errorf("expected %s to have auxint of 1\n", v4.LongString()) + } + +} + +// opcodeMap returns a map from opcode to the number of times that opcode +// appears in the function. +func opcodeMap(f *Func) map[Op]int { + m := map[Op]int{} + for _, b := range f.Blocks { + for _, v := range b.Values { + m[v.Op]++ + } + } + return m +} + +// opcodeCounts checks that the number of opcodes listed in m agree with the +// number of opcodes that appear in the function. +func checkOpcodeCounts(t *testing.T, f *Func, m map[Op]int) { + n := opcodeMap(f) + for op, cnt := range m { + if n[op] != cnt { + t.Errorf("%s appears %d times, want %d times", op, n[op], cnt) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/fuse.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/fuse.go new file mode 100644 index 0000000000000000000000000000000000000000..68defde7b4b9564dbc14e4f52f8db86c28a7efc1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/fuse.go @@ -0,0 +1,333 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/internal/src" + "fmt" +) + +// fuseEarly runs fuse(f, fuseTypePlain|fuseTypeIntInRange). +func fuseEarly(f *Func) { fuse(f, fuseTypePlain|fuseTypeIntInRange) } + +// fuseLate runs fuse(f, fuseTypePlain|fuseTypeIf|fuseTypeBranchRedirect). +func fuseLate(f *Func) { fuse(f, fuseTypePlain|fuseTypeIf|fuseTypeBranchRedirect) } + +type fuseType uint8 + +const ( + fuseTypePlain fuseType = 1 << iota + fuseTypeIf + fuseTypeIntInRange + fuseTypeBranchRedirect + fuseTypeShortCircuit +) + +// fuse simplifies control flow by joining basic blocks. +func fuse(f *Func, typ fuseType) { + for changed := true; changed; { + changed = false + // Be sure to avoid quadratic behavior in fuseBlockPlain. See issue 13554. + // Previously this was dealt with using backwards iteration, now fuseBlockPlain + // handles large runs of blocks. + for i := len(f.Blocks) - 1; i >= 0; i-- { + b := f.Blocks[i] + if typ&fuseTypeIf != 0 { + changed = fuseBlockIf(b) || changed + } + if typ&fuseTypeIntInRange != 0 { + changed = fuseIntegerComparisons(b) || changed + } + if typ&fuseTypePlain != 0 { + changed = fuseBlockPlain(b) || changed + } + if typ&fuseTypeShortCircuit != 0 { + changed = shortcircuitBlock(b) || changed + } + } + + if typ&fuseTypeBranchRedirect != 0 { + changed = fuseBranchRedirect(f) || changed + } + if changed { + f.invalidateCFG() + } + } +} + +// fuseBlockIf handles the following cases where s0 and s1 are empty blocks. +// +// b b b b +// \ / \ / | \ / \ / | | | +// s0 s1 | s1 s0 | | | +// \ / | / \ | | | +// ss ss ss ss +// +// If all Phi ops in ss have identical variables for slots corresponding to +// s0, s1 and b then the branch can be dropped. +// This optimization often comes up in switch statements with multiple +// expressions in a case clause: +// +// switch n { +// case 1,2,3: return 4 +// } +// +// TODO: If ss doesn't contain any OpPhis, are s0 and s1 dead code anyway. +func fuseBlockIf(b *Block) bool { + if b.Kind != BlockIf { + return false + } + // It doesn't matter how much Preds does s0 or s1 have. + var ss0, ss1 *Block + s0 := b.Succs[0].b + i0 := b.Succs[0].i + if s0.Kind != BlockPlain || !isEmpty(s0) { + s0, ss0 = b, s0 + } else { + ss0 = s0.Succs[0].b + i0 = s0.Succs[0].i + } + s1 := b.Succs[1].b + i1 := b.Succs[1].i + if s1.Kind != BlockPlain || !isEmpty(s1) { + s1, ss1 = b, s1 + } else { + ss1 = s1.Succs[0].b + i1 = s1.Succs[0].i + } + if ss0 != ss1 { + if s0.Kind == BlockPlain && isEmpty(s0) && s1.Kind == BlockPlain && isEmpty(s1) { + // Two special cases where both s0, s1 and ss are empty blocks. + if s0 == ss1 { + s0, ss0 = b, ss1 + } else if ss0 == s1 { + s1, ss1 = b, ss0 + } else { + return false + } + } else { + return false + } + } + ss := ss0 + + // s0 and s1 are equal with b if the corresponding block is missing + // (2nd, 3rd and 4th case in the figure). + + for _, v := range ss.Values { + if v.Op == OpPhi && v.Uses > 0 && v.Args[i0] != v.Args[i1] { + return false + } + } + + // We do not need to redirect the Preds of s0 and s1 to ss, + // the following optimization will do this. + b.removeEdge(0) + if s0 != b && len(s0.Preds) == 0 { + s0.removeEdge(0) + // Move any (dead) values in s0 to b, + // where they will be eliminated by the next deadcode pass. + for _, v := range s0.Values { + v.Block = b + } + b.Values = append(b.Values, s0.Values...) + // Clear s0. + s0.Kind = BlockInvalid + s0.Values = nil + s0.Succs = nil + s0.Preds = nil + } + + b.Kind = BlockPlain + b.Likely = BranchUnknown + b.ResetControls() + // The values in b may be dead codes, and clearing them in time may + // obtain new optimization opportunities. + // First put dead values that can be deleted into a slice walkValues. + // Then put their arguments in walkValues before resetting the dead values + // in walkValues, because the arguments may also become dead values. + walkValues := []*Value{} + for _, v := range b.Values { + if v.Uses == 0 && v.removeable() { + walkValues = append(walkValues, v) + } + } + for len(walkValues) != 0 { + v := walkValues[len(walkValues)-1] + walkValues = walkValues[:len(walkValues)-1] + if v.Uses == 0 && v.removeable() { + walkValues = append(walkValues, v.Args...) + v.reset(OpInvalid) + } + } + return true +} + +// isEmpty reports whether b contains any live values. +// There may be false positives. +func isEmpty(b *Block) bool { + for _, v := range b.Values { + if v.Uses > 0 || v.Op.IsCall() || v.Op.HasSideEffects() || v.Type.IsVoid() || opcodeTable[v.Op].nilCheck { + return false + } + } + return true +} + +// fuseBlockPlain handles a run of blocks with length >= 2, +// whose interior has single predecessors and successors, +// b must be BlockPlain, allowing it to be any node except the +// last (multiple successors means not BlockPlain). +// Cycles are handled and merged into b's successor. +func fuseBlockPlain(b *Block) bool { + if b.Kind != BlockPlain { + return false + } + + c := b.Succs[0].b + if len(c.Preds) != 1 || c == b { // At least 2 distinct blocks. + return false + } + + // find earliest block in run. Avoid simple cycles. + for len(b.Preds) == 1 && b.Preds[0].b != c && b.Preds[0].b.Kind == BlockPlain { + b = b.Preds[0].b + } + + // find latest block in run. Still beware of simple cycles. + for { + if c.Kind != BlockPlain { + break + } // Has exactly 1 successor + cNext := c.Succs[0].b + if cNext == b { + break + } // not a cycle + if len(cNext.Preds) != 1 { + break + } // no other incoming edge + c = cNext + } + + // Try to preserve any statement marks on the ends of blocks; move values to C + var b_next *Block + for bx := b; bx != c; bx = b_next { + // For each bx with an end-of-block statement marker, + // try to move it to a value in the next block, + // or to the next block's end, if possible. + b_next = bx.Succs[0].b + if bx.Pos.IsStmt() == src.PosIsStmt { + l := bx.Pos.Line() // looking for another place to mark for line l + outOfOrder := false + for _, v := range b_next.Values { + if v.Pos.IsStmt() == src.PosNotStmt { + continue + } + if l == v.Pos.Line() { // Found a Value with same line, therefore done. + v.Pos = v.Pos.WithIsStmt() + l = 0 + break + } + if l < v.Pos.Line() { + // The order of values in a block is not specified so OOO in a block is not interesting, + // but they do all come before the end of the block, so this disqualifies attaching to end of b_next. + outOfOrder = true + } + } + if l != 0 && !outOfOrder && (b_next.Pos.Line() == l || b_next.Pos.IsStmt() != src.PosIsStmt) { + b_next.Pos = bx.Pos.WithIsStmt() + } + } + // move all of bx's values to c (note containing loop excludes c) + for _, v := range bx.Values { + v.Block = c + } + } + + // Compute the total number of values and find the largest value slice in the run, to maximize chance of storage reuse. + total := 0 + totalBeforeMax := 0 // number of elements preceding the maximum block (i.e. its position in the result). + max_b := b // block with maximum capacity + + for bx := b; ; bx = bx.Succs[0].b { + if cap(bx.Values) > cap(max_b.Values) { + totalBeforeMax = total + max_b = bx + } + total += len(bx.Values) + if bx == c { + break + } + } + + // Use c's storage if fused blocks will fit, else use the max if that will fit, else allocate new storage. + + // Take care to avoid c.Values pointing to b.valstorage. + // See golang.org/issue/18602. + + // It's important to keep the elements in the same order; maintenance of + // debugging information depends on the order of *Values in Blocks. + // This can also cause changes in the order (which may affect other + // optimizations and possibly compiler output) for 32-vs-64 bit compilation + // platforms (word size affects allocation bucket size affects slice capacity). + + // figure out what slice will hold the values, + // preposition the destination elements if not allocating new storage + var t []*Value + if total <= len(c.valstorage) { + t = c.valstorage[:total] + max_b = c + totalBeforeMax = total - len(c.Values) + copy(t[totalBeforeMax:], c.Values) + } else if total <= cap(max_b.Values) { // in place, somewhere + t = max_b.Values[0:total] + copy(t[totalBeforeMax:], max_b.Values) + } else { + t = make([]*Value, total) + max_b = nil + } + + // copy the values + copyTo := 0 + for bx := b; ; bx = bx.Succs[0].b { + if bx != max_b { + copy(t[copyTo:], bx.Values) + } else if copyTo != totalBeforeMax { // trust but verify. + panic(fmt.Errorf("totalBeforeMax (%d) != copyTo (%d), max_b=%v, b=%v, c=%v", totalBeforeMax, copyTo, max_b, b, c)) + } + if bx == c { + break + } + copyTo += len(bx.Values) + } + c.Values = t + + // replace b->c edge with preds(b) -> c + c.predstorage[0] = Edge{} + if len(b.Preds) > len(b.predstorage) { + c.Preds = b.Preds + } else { + c.Preds = append(c.predstorage[:0], b.Preds...) + } + for i, e := range c.Preds { + p := e.b + p.Succs[e.i] = Edge{c, i} + } + f := b.Func + if f.Entry == b { + f.Entry = c + } + + // trash b's fields, just in case + for bx := b; bx != c; bx = b_next { + b_next = bx.Succs[0].b + + bx.Kind = BlockInvalid + bx.Values = nil + bx.Preds = nil + bx.Succs = nil + } + return true +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/fuse_branchredirect.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/fuse_branchredirect.go new file mode 100644 index 0000000000000000000000000000000000000000..153c2a56b716b21eaaac2f37c50bb0fb249b2902 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/fuse_branchredirect.go @@ -0,0 +1,112 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// fuseBranchRedirect checks for a CFG in which the outbound branch +// of an If block can be derived from its predecessor If block, in +// some such cases, we can redirect the predecessor If block to the +// corresponding successor block directly. For example: +// +// p: +// v11 = Less64 v10 v8 +// If v11 goto b else u +// b: <- p ... +// v17 = Leq64 v10 v8 +// If v17 goto s else o +// +// We can redirect p to s directly. +// +// The implementation here borrows the framework of the prove pass. +// +// 1, Traverse all blocks of function f to find If blocks. +// 2, For any If block b, traverse all its predecessors to find If blocks. +// 3, For any If block predecessor p, update relationship p->b. +// 4, Traverse all successors of b. +// 5, For any successor s of b, try to update relationship b->s, if a +// contradiction is found then redirect p to another successor of b. +func fuseBranchRedirect(f *Func) bool { + ft := newFactsTable(f) + ft.checkpoint() + + changed := false + for i := len(f.Blocks) - 1; i >= 0; i-- { + b := f.Blocks[i] + if b.Kind != BlockIf { + continue + } + // b is either empty or only contains the control value. + // TODO: if b contains only OpCopy or OpNot related to b.Controls, + // such as Copy(Not(Copy(Less64(v1, v2)))), perhaps it can be optimized. + bCtl := b.Controls[0] + if bCtl.Block != b && len(b.Values) != 0 || (len(b.Values) != 1 || bCtl.Uses != 1) && bCtl.Block == b { + continue + } + + for k := 0; k < len(b.Preds); k++ { + pk := b.Preds[k] + p := pk.b + if p.Kind != BlockIf || p == b { + continue + } + pbranch := positive + if pk.i == 1 { + pbranch = negative + } + ft.checkpoint() + // Assume branch p->b is taken. + addBranchRestrictions(ft, p, pbranch) + // Check if any outgoing branch is unreachable based on the above condition. + parent := b + for j, bbranch := range [...]branch{positive, negative} { + ft.checkpoint() + // Try to update relationship b->child, and check if the contradiction occurs. + addBranchRestrictions(ft, parent, bbranch) + unsat := ft.unsat + ft.restore() + if !unsat { + continue + } + // This branch is impossible,so redirect p directly to another branch. + out := 1 ^ j + child := parent.Succs[out].b + if child == b { + continue + } + b.removePred(k) + p.Succs[pk.i] = Edge{child, len(child.Preds)} + // Fix up Phi value in b to have one less argument. + for _, v := range b.Values { + if v.Op != OpPhi { + continue + } + b.removePhiArg(v, k) + } + // Fix up child to have one more predecessor. + child.Preds = append(child.Preds, Edge{p, pk.i}) + ai := b.Succs[out].i + for _, v := range child.Values { + if v.Op != OpPhi { + continue + } + v.AddArg(v.Args[ai]) + } + if b.Func.pass.debug > 0 { + b.Func.Warnl(b.Controls[0].Pos, "Redirect %s based on %s", b.Controls[0].Op, p.Controls[0].Op) + } + changed = true + k-- + break + } + ft.restore() + } + if len(b.Preds) == 0 && b != f.Entry { + // Block is now dead. + b.Kind = BlockInvalid + } + } + ft.restore() + ft.cleanup(f) + return changed +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/fuse_comparisons.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/fuse_comparisons.go new file mode 100644 index 0000000000000000000000000000000000000000..f5fb84b0d735320d99956a11888ca4d956a61180 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/fuse_comparisons.go @@ -0,0 +1,157 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// fuseIntegerComparisons optimizes inequalities such as '1 <= x && x < 5', +// which can be optimized to 'unsigned(x-1) < 4'. +// +// Look for branch structure like: +// +// p +// |\ +// | b +// |/ \ +// s0 s1 +// +// In our example, p has control '1 <= x', b has control 'x < 5', +// and s0 and s1 are the if and else results of the comparison. +// +// This will be optimized into: +// +// p +// \ +// b +// / \ +// s0 s1 +// +// where b has the combined control value 'unsigned(x-1) < 4'. +// Later passes will then fuse p and b. +func fuseIntegerComparisons(b *Block) bool { + if len(b.Preds) != 1 { + return false + } + p := b.Preds[0].Block() + if b.Kind != BlockIf || p.Kind != BlockIf { + return false + } + + // Don't merge control values if b is likely to be bypassed anyway. + if p.Likely == BranchLikely && p.Succs[0].Block() != b { + return false + } + if p.Likely == BranchUnlikely && p.Succs[1].Block() != b { + return false + } + + // Check if the control values combine to make an integer inequality that + // can be further optimized later. + bc := b.Controls[0] + pc := p.Controls[0] + if !areMergeableInequalities(bc, pc) { + return false + } + + // If the first (true) successors match then we have a disjunction (||). + // If the second (false) successors match then we have a conjunction (&&). + for i, op := range [2]Op{OpOrB, OpAndB} { + if p.Succs[i].Block() != b.Succs[i].Block() { + continue + } + + // TODO(mundaym): should we also check the cost of executing b? + // Currently we might speculatively execute b even if b contains + // a lot of instructions. We could just check that len(b.Values) + // is lower than a fixed amount. Bear in mind however that the + // other optimization passes might yet reduce the cost of b + // significantly so we shouldn't be overly conservative. + if !canSpeculativelyExecute(b) { + return false + } + + // Logically combine the control values for p and b. + v := b.NewValue0(bc.Pos, op, bc.Type) + v.AddArg(pc) + v.AddArg(bc) + + // Set the combined control value as the control value for b. + b.SetControl(v) + + // Modify p so that it jumps directly to b. + p.removeEdge(i) + p.Kind = BlockPlain + p.Likely = BranchUnknown + p.ResetControls() + + return true + } + + // TODO: could negate condition(s) to merge controls. + return false +} + +// getConstIntArgIndex returns the index of the first argument that is a +// constant integer or -1 if no such argument exists. +func getConstIntArgIndex(v *Value) int { + for i, a := range v.Args { + switch a.Op { + case OpConst8, OpConst16, OpConst32, OpConst64: + return i + } + } + return -1 +} + +// isSignedInequality reports whether op represents the inequality < or ≤ +// in the signed domain. +func isSignedInequality(v *Value) bool { + switch v.Op { + case OpLess64, OpLess32, OpLess16, OpLess8, + OpLeq64, OpLeq32, OpLeq16, OpLeq8: + return true + } + return false +} + +// isUnsignedInequality reports whether op represents the inequality < or ≤ +// in the unsigned domain. +func isUnsignedInequality(v *Value) bool { + switch v.Op { + case OpLess64U, OpLess32U, OpLess16U, OpLess8U, + OpLeq64U, OpLeq32U, OpLeq16U, OpLeq8U: + return true + } + return false +} + +func areMergeableInequalities(x, y *Value) bool { + // We need both inequalities to be either in the signed or unsigned domain. + // TODO(mundaym): it would also be good to merge when we have an Eq op that + // could be transformed into a Less/Leq. For example in the unsigned + // domain 'x == 0 || 3 < x' is equivalent to 'x <= 0 || 3 < x' + inequalityChecks := [...]func(*Value) bool{ + isSignedInequality, + isUnsignedInequality, + } + for _, f := range inequalityChecks { + if !f(x) || !f(y) { + continue + } + + // Check that both inequalities are comparisons with constants. + xi := getConstIntArgIndex(x) + if xi < 0 { + return false + } + yi := getConstIntArgIndex(y) + if yi < 0 { + return false + } + + // Check that the non-constant arguments to the inequalities + // are the same. + return x.Args[xi^1] == y.Args[yi^1] + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/fuse_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/fuse_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2f89938d1d92332955de60be0fbb3eefedd19b4d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/fuse_test.go @@ -0,0 +1,305 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/types" + "fmt" + "strconv" + "testing" +) + +func TestFuseEliminatesOneBranch(t *testing.T) { + c := testConfig(t) + ptrType := c.config.Types.BytePtr + fun := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil), + Goto("checkPtr")), + Bloc("checkPtr", + Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"), + Valu("nilptr", OpConstNil, ptrType, 0, nil), + Valu("bool1", OpNeqPtr, c.config.Types.Bool, 0, nil, "ptr1", "nilptr"), + If("bool1", "then", "exit")), + Bloc("then", + Goto("exit")), + Bloc("exit", + Exit("mem"))) + + CheckFunc(fun.f) + fuseLate(fun.f) + + for _, b := range fun.f.Blocks { + if b == fun.blocks["then"] && b.Kind != BlockInvalid { + t.Errorf("then was not eliminated, but should have") + } + } +} + +func TestFuseEliminatesBothBranches(t *testing.T) { + c := testConfig(t) + ptrType := c.config.Types.BytePtr + fun := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil), + Goto("checkPtr")), + Bloc("checkPtr", + Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"), + Valu("nilptr", OpConstNil, ptrType, 0, nil), + Valu("bool1", OpNeqPtr, c.config.Types.Bool, 0, nil, "ptr1", "nilptr"), + If("bool1", "then", "else")), + Bloc("then", + Goto("exit")), + Bloc("else", + Goto("exit")), + Bloc("exit", + Exit("mem"))) + + CheckFunc(fun.f) + fuseLate(fun.f) + + for _, b := range fun.f.Blocks { + if b == fun.blocks["then"] && b.Kind != BlockInvalid { + t.Errorf("then was not eliminated, but should have") + } + if b == fun.blocks["else"] && b.Kind != BlockInvalid { + t.Errorf("else was not eliminated, but should have") + } + } +} + +func TestFuseHandlesPhis(t *testing.T) { + c := testConfig(t) + ptrType := c.config.Types.BytePtr + fun := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil), + Goto("checkPtr")), + Bloc("checkPtr", + Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"), + Valu("nilptr", OpConstNil, ptrType, 0, nil), + Valu("bool1", OpNeqPtr, c.config.Types.Bool, 0, nil, "ptr1", "nilptr"), + If("bool1", "then", "else")), + Bloc("then", + Goto("exit")), + Bloc("else", + Goto("exit")), + Bloc("exit", + Valu("phi", OpPhi, ptrType, 0, nil, "ptr1", "ptr1"), + Exit("mem"))) + + CheckFunc(fun.f) + fuseLate(fun.f) + + for _, b := range fun.f.Blocks { + if b == fun.blocks["then"] && b.Kind != BlockInvalid { + t.Errorf("then was not eliminated, but should have") + } + if b == fun.blocks["else"] && b.Kind != BlockInvalid { + t.Errorf("else was not eliminated, but should have") + } + } +} + +func TestFuseEliminatesEmptyBlocks(t *testing.T) { + c := testConfig(t) + // Case 1, plain type empty blocks z0 ~ z3 will be eliminated. + // entry + // | + // z0 + // | + // z1 + // | + // z2 + // | + // z3 + // | + // exit + fun := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil), + Goto("z0")), + Bloc("z1", + Goto("z2")), + Bloc("z3", + Goto("exit")), + Bloc("z2", + Goto("z3")), + Bloc("z0", + Goto("z1")), + Bloc("exit", + Exit("mem"), + )) + + CheckFunc(fun.f) + fuseLate(fun.f) + + for k, b := range fun.blocks { + if k[:1] == "z" && b.Kind != BlockInvalid { + t.Errorf("case1 %s was not eliminated, but should have", k) + } + } + + // Case 2, empty blocks with If branch, z0 and z1 will be eliminated. + // entry + // / \ + // z0 z1 + // \ / + // exit + fun = c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("c", OpArg, c.config.Types.Bool, 0, nil), + If("c", "z0", "z1")), + Bloc("z0", + Goto("exit")), + Bloc("z1", + Goto("exit")), + Bloc("exit", + Exit("mem"), + )) + + CheckFunc(fun.f) + fuseLate(fun.f) + + for k, b := range fun.blocks { + if k[:1] == "z" && b.Kind != BlockInvalid { + t.Errorf("case2 %s was not eliminated, but should have", k) + } + } + + // Case 3, empty blocks with multiple predecessors, z0 and z1 will be eliminated. + // entry + // | \ + // | b0 + // | / \ + // z0 z1 + // \ / + // exit + fun = c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("c1", OpArg, c.config.Types.Bool, 0, nil), + If("c1", "b0", "z0")), + Bloc("b0", + Valu("c2", OpArg, c.config.Types.Bool, 0, nil), + If("c2", "z1", "z0")), + Bloc("z0", + Goto("exit")), + Bloc("z1", + Goto("exit")), + Bloc("exit", + Exit("mem"), + )) + + CheckFunc(fun.f) + fuseLate(fun.f) + + for k, b := range fun.blocks { + if k[:1] == "z" && b.Kind != BlockInvalid { + t.Errorf("case3 %s was not eliminated, but should have", k) + } + } +} + +func TestFuseSideEffects(t *testing.T) { + c := testConfig(t) + // Case1, test that we don't fuse branches that have side effects but + // have no use (e.g. followed by infinite loop). + // See issue #36005. + fun := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("b", OpArg, c.config.Types.Bool, 0, nil), + If("b", "then", "else")), + Bloc("then", + Valu("call1", OpStaticCall, types.TypeMem, 0, AuxCallLSym("_"), "mem"), + Goto("empty")), + Bloc("else", + Valu("call2", OpStaticCall, types.TypeMem, 0, AuxCallLSym("_"), "mem"), + Goto("empty")), + Bloc("empty", + Goto("loop")), + Bloc("loop", + Goto("loop"))) + + CheckFunc(fun.f) + fuseLate(fun.f) + + for _, b := range fun.f.Blocks { + if b == fun.blocks["then"] && b.Kind == BlockInvalid { + t.Errorf("then is eliminated, but should not") + } + if b == fun.blocks["else"] && b.Kind == BlockInvalid { + t.Errorf("else is eliminated, but should not") + } + } + + // Case2, z0 contains a value that has side effect, z0 shouldn't be eliminated. + // entry + // | \ + // | z0 + // | / + // exit + fun = c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("c1", OpArg, c.config.Types.Bool, 0, nil), + Valu("p", OpArg, c.config.Types.IntPtr, 0, nil), + If("c1", "z0", "exit")), + Bloc("z0", + Valu("nilcheck", OpNilCheck, c.config.Types.IntPtr, 0, nil, "p", "mem"), + Goto("exit")), + Bloc("exit", + Exit("mem"), + )) + CheckFunc(fun.f) + fuseLate(fun.f) + z0, ok := fun.blocks["z0"] + if !ok || z0.Kind == BlockInvalid { + t.Errorf("case2 z0 is eliminated, but should not") + } +} + +func BenchmarkFuse(b *testing.B) { + for _, n := range [...]int{1, 10, 100, 1000, 10000} { + b.Run(strconv.Itoa(n), func(b *testing.B) { + c := testConfig(b) + + blocks := make([]bloc, 0, 2*n+3) + blocks = append(blocks, + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("cond", OpArg, c.config.Types.Bool, 0, nil), + Valu("x", OpArg, c.config.Types.Int64, 0, nil), + Goto("exit"))) + + phiArgs := make([]string, 0, 2*n) + for i := 0; i < n; i++ { + cname := fmt.Sprintf("c%d", i) + blocks = append(blocks, + Bloc(fmt.Sprintf("b%d", i), If("cond", cname, "merge")), + Bloc(cname, Goto("merge"))) + phiArgs = append(phiArgs, "x", "x") + } + blocks = append(blocks, + Bloc("merge", + Valu("phi", OpPhi, types.TypeMem, 0, nil, phiArgs...), + Goto("exit")), + Bloc("exit", + Exit("mem"))) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + fun := c.Fun("entry", blocks...) + fuseLate(fun.f) + } + }) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/generate.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/generate.go new file mode 100644 index 0000000000000000000000000000000000000000..74c5b318291f03ffa1d982114a5ffa7eb81bdefa --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/generate.go @@ -0,0 +1,9 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build generate + +package ssa + +//go:generate go run -C=_gen . diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/html.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/html.go new file mode 100644 index 0000000000000000000000000000000000000000..ea170fbcdba6892c6f52a61e0de2fabb72ab0268 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/html.go @@ -0,0 +1,1319 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "bytes" + "cmd/internal/src" + "fmt" + "html" + "io" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" +) + +type HTMLWriter struct { + w io.WriteCloser + Func *Func + path string + dot *dotWriter + prevHash []byte + pendingPhases []string + pendingTitles []string +} + +func NewHTMLWriter(path string, f *Func, cfgMask string) *HTMLWriter { + path = strings.Replace(path, "/", string(filepath.Separator), -1) + out, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + f.Fatalf("%v", err) + } + reportPath := path + if !filepath.IsAbs(reportPath) { + pwd, err := os.Getwd() + if err != nil { + f.Fatalf("%v", err) + } + reportPath = filepath.Join(pwd, path) + } + html := HTMLWriter{ + w: out, + Func: f, + path: reportPath, + dot: newDotWriter(cfgMask), + } + html.start() + return &html +} + +// Fatalf reports an error and exits. +func (w *HTMLWriter) Fatalf(msg string, args ...interface{}) { + fe := w.Func.Frontend() + fe.Fatalf(src.NoXPos, msg, args...) +} + +// Logf calls the (w *HTMLWriter).Func's Logf method passing along a msg and args. +func (w *HTMLWriter) Logf(msg string, args ...interface{}) { + w.Func.Logf(msg, args...) +} + +func (w *HTMLWriter) start() { + if w == nil { + return + } + w.WriteString("") + w.WriteString(` + + + + + +`) + w.WriteString("") + w.WriteString("

") + w.WriteString(html.EscapeString(w.Func.NameABI())) + w.WriteString("

") + w.WriteString(` +
help +
+ +

+Click on a value or block to toggle highlighting of that value/block +and its uses. (Values and blocks are highlighted by ID, and IDs of +dead items may be reused, so not all highlights necessarily correspond +to the clicked item.) +

+ +

+Faded out values and blocks are dead code that has not been eliminated. +

+ +

+Values printed in italics have a dependency cycle. +

+ +

+CFG: Dashed edge is for unlikely branches. Blue color is for backward edges. +Edge with a dot means that this edge follows the order in which blocks were laidout. +

+ +
+ + +`) + w.WriteString("") + w.WriteString("") +} + +func (w *HTMLWriter) Close() { + if w == nil { + return + } + io.WriteString(w.w, "") + io.WriteString(w.w, "
") + io.WriteString(w.w, "") + io.WriteString(w.w, "") + w.w.Close() + fmt.Printf("dumped SSA for %s to %v\n", w.Func.NameABI(), w.path) +} + +// WritePhase writes f in a column headed by title. +// phase is used for collapsing columns and should be unique across the table. +func (w *HTMLWriter) WritePhase(phase, title string) { + if w == nil { + return // avoid generating HTML just to discard it + } + hash := hashFunc(w.Func) + w.pendingPhases = append(w.pendingPhases, phase) + w.pendingTitles = append(w.pendingTitles, title) + if !bytes.Equal(hash, w.prevHash) { + w.flushPhases() + } + w.prevHash = hash +} + +// flushPhases collects any pending phases and titles, writes them to the html, and resets the pending slices. +func (w *HTMLWriter) flushPhases() { + phaseLen := len(w.pendingPhases) + if phaseLen == 0 { + return + } + phases := strings.Join(w.pendingPhases, " + ") + w.WriteMultiTitleColumn( + phases, + w.pendingTitles, + fmt.Sprintf("hash-%x", w.prevHash), + w.Func.HTML(w.pendingPhases[phaseLen-1], w.dot), + ) + w.pendingPhases = w.pendingPhases[:0] + w.pendingTitles = w.pendingTitles[:0] +} + +// FuncLines contains source code for a function to be displayed +// in sources column. +type FuncLines struct { + Filename string + StartLineno uint + Lines []string +} + +// ByTopo sorts topologically: target function is on top, +// followed by inlined functions sorted by filename and line numbers. +type ByTopo []*FuncLines + +func (x ByTopo) Len() int { return len(x) } +func (x ByTopo) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x ByTopo) Less(i, j int) bool { + a := x[i] + b := x[j] + if a.Filename == b.Filename { + return a.StartLineno < b.StartLineno + } + return a.Filename < b.Filename +} + +// WriteSources writes lines as source code in a column headed by title. +// phase is used for collapsing columns and should be unique across the table. +func (w *HTMLWriter) WriteSources(phase string, all []*FuncLines) { + if w == nil { + return // avoid generating HTML just to discard it + } + var buf strings.Builder + fmt.Fprint(&buf, "
") + filename := "" + for _, fl := range all { + fmt.Fprint(&buf, "
 
") + if filename != fl.Filename { + fmt.Fprint(&buf, "
 
") + filename = fl.Filename + } + for i := range fl.Lines { + ln := int(fl.StartLineno) + i + fmt.Fprintf(&buf, "
%v
", ln, ln) + } + } + fmt.Fprint(&buf, "
")
+	filename = ""
+	for _, fl := range all {
+		fmt.Fprint(&buf, "
 
") + if filename != fl.Filename { + fmt.Fprintf(&buf, "
%v
", fl.Filename) + filename = fl.Filename + } + for i, line := range fl.Lines { + ln := int(fl.StartLineno) + i + var escaped string + if strings.TrimSpace(line) == "" { + escaped = " " + } else { + escaped = html.EscapeString(line) + } + fmt.Fprintf(&buf, "
%v
", ln, escaped) + } + } + fmt.Fprint(&buf, "
") + w.WriteColumn(phase, phase, "allow-x-scroll", buf.String()) +} + +func (w *HTMLWriter) WriteAST(phase string, buf *bytes.Buffer) { + if w == nil { + return // avoid generating HTML just to discard it + } + lines := strings.Split(buf.String(), "\n") + var out strings.Builder + + fmt.Fprint(&out, "
") + for _, l := range lines { + l = strings.TrimSpace(l) + var escaped string + var lineNo string + if l == "" { + escaped = " " + } else { + if strings.HasPrefix(l, "buildssa") { + escaped = fmt.Sprintf("%v", l) + } else { + // Parse the line number from the format file:line:col. + // See the implementation in ir/fmt.go:dumpNodeHeader. + sl := strings.Split(l, ":") + if len(sl) >= 3 { + if _, err := strconv.Atoi(sl[len(sl)-2]); err == nil { + lineNo = sl[len(sl)-2] + } + } + escaped = html.EscapeString(l) + } + } + if lineNo != "" { + fmt.Fprintf(&out, "
%v
", lineNo, escaped) + } else { + fmt.Fprintf(&out, "
%v
", escaped) + } + } + fmt.Fprint(&out, "
") + w.WriteColumn(phase, phase, "allow-x-scroll", out.String()) +} + +// WriteColumn writes raw HTML in a column headed by title. +// It is intended for pre- and post-compilation log output. +func (w *HTMLWriter) WriteColumn(phase, title, class, html string) { + w.WriteMultiTitleColumn(phase, []string{title}, class, html) +} + +func (w *HTMLWriter) WriteMultiTitleColumn(phase string, titles []string, class, html string) { + if w == nil { + return + } + id := strings.Replace(phase, " ", "-", -1) + // collapsed column + w.Printf("
%v
", id, phase) + + if class == "" { + w.Printf("", id) + } else { + w.Printf("", id, class) + } + for _, title := range titles { + w.WriteString("

" + title + "

") + } + w.WriteString(html) + w.WriteString("\n") +} + +func (w *HTMLWriter) Printf(msg string, v ...interface{}) { + if _, err := fmt.Fprintf(w.w, msg, v...); err != nil { + w.Fatalf("%v", err) + } +} + +func (w *HTMLWriter) WriteString(s string) { + if _, err := io.WriteString(w.w, s); err != nil { + w.Fatalf("%v", err) + } +} + +func (v *Value) HTML() string { + // TODO: Using the value ID as the class ignores the fact + // that value IDs get recycled and that some values + // are transmuted into other values. + s := v.String() + return fmt.Sprintf("%s", s, s) +} + +func (v *Value) LongHTML() string { + // TODO: Any intra-value formatting? + // I'm wary of adding too much visual noise, + // but a little bit might be valuable. + // We already have visual noise in the form of punctuation + // maybe we could replace some of that with formatting. + s := fmt.Sprintf("", v.String()) + + linenumber := "(?)" + if v.Pos.IsKnown() { + linenumber = fmt.Sprintf("(%s)", v.Pos.LineNumber(), v.Pos.LineNumberHTML()) + } + + s += fmt.Sprintf("%s %s = %s", v.HTML(), linenumber, v.Op.String()) + + s += " <" + html.EscapeString(v.Type.String()) + ">" + s += html.EscapeString(v.auxString()) + for _, a := range v.Args { + s += fmt.Sprintf(" %s", a.HTML()) + } + r := v.Block.Func.RegAlloc + if int(v.ID) < len(r) && r[v.ID] != nil { + s += " : " + html.EscapeString(r[v.ID].String()) + } + if reg := v.Block.Func.tempRegs[v.ID]; reg != nil { + s += " tmp=" + reg.String() + } + var names []string + for name, values := range v.Block.Func.NamedValues { + for _, value := range values { + if value == v { + names = append(names, name.String()) + break // drop duplicates. + } + } + } + if len(names) != 0 { + s += " (" + strings.Join(names, ", ") + ")" + } + + s += "" + return s +} + +func (b *Block) HTML() string { + // TODO: Using the value ID as the class ignores the fact + // that value IDs get recycled and that some values + // are transmuted into other values. + s := html.EscapeString(b.String()) + return fmt.Sprintf("%s", s, s) +} + +func (b *Block) LongHTML() string { + // TODO: improve this for HTML? + s := fmt.Sprintf("%s", html.EscapeString(b.String()), html.EscapeString(b.Kind.String())) + if b.Aux != nil { + s += html.EscapeString(fmt.Sprintf(" {%v}", b.Aux)) + } + if t := b.AuxIntString(); t != "" { + s += html.EscapeString(fmt.Sprintf(" [%v]", t)) + } + for _, c := range b.ControlValues() { + s += fmt.Sprintf(" %s", c.HTML()) + } + if len(b.Succs) > 0 { + s += " →" // right arrow + for _, e := range b.Succs { + c := e.b + s += " " + c.HTML() + } + } + switch b.Likely { + case BranchUnlikely: + s += " (unlikely)" + case BranchLikely: + s += " (likely)" + } + if b.Pos.IsKnown() { + // TODO does not begin to deal with the full complexity of line numbers. + // Maybe we want a string/slice instead, of outer-inner when inlining. + s += fmt.Sprintf(" (%s)", b.Pos.LineNumber(), b.Pos.LineNumberHTML()) + } + return s +} + +func (f *Func) HTML(phase string, dot *dotWriter) string { + buf := new(strings.Builder) + if dot != nil { + dot.writeFuncSVG(buf, phase, f) + } + fmt.Fprint(buf, "") + p := htmlFuncPrinter{w: buf} + fprintFunc(p, f) + + // fprintFunc(&buf, f) // TODO: HTML, not text,
for line breaks, etc. + fmt.Fprint(buf, "
") + return buf.String() +} + +func (d *dotWriter) writeFuncSVG(w io.Writer, phase string, f *Func) { + if d.broken { + return + } + if _, ok := d.phases[phase]; !ok { + return + } + cmd := exec.Command(d.path, "-Tsvg") + pipe, err := cmd.StdinPipe() + if err != nil { + d.broken = true + fmt.Println(err) + return + } + buf := new(bytes.Buffer) + cmd.Stdout = buf + bufErr := new(strings.Builder) + cmd.Stderr = bufErr + err = cmd.Start() + if err != nil { + d.broken = true + fmt.Println(err) + return + } + fmt.Fprint(pipe, `digraph "" { margin=0; ranksep=.2; `) + id := strings.Replace(phase, " ", "-", -1) + fmt.Fprintf(pipe, `id="g_graph_%s";`, id) + fmt.Fprintf(pipe, `node [style=filled,fillcolor=white,fontsize=16,fontname="Menlo,Times,serif",margin="0.01,0.03"];`) + fmt.Fprintf(pipe, `edge [fontsize=16,fontname="Menlo,Times,serif"];`) + for i, b := range f.Blocks { + if b.Kind == BlockInvalid { + continue + } + layout := "" + if f.laidout { + layout = fmt.Sprintf(" #%d", i) + } + fmt.Fprintf(pipe, `%v [label="%v%s\n%v",id="graph_node_%v_%v",tooltip="%v"];`, b, b, layout, b.Kind.String(), id, b, b.LongString()) + } + indexOf := make([]int, f.NumBlocks()) + for i, b := range f.Blocks { + indexOf[b.ID] = i + } + layoutDrawn := make([]bool, f.NumBlocks()) + + ponums := make([]int32, f.NumBlocks()) + _ = postorderWithNumbering(f, ponums) + isBackEdge := func(from, to ID) bool { + return ponums[from] <= ponums[to] + } + + for _, b := range f.Blocks { + for i, s := range b.Succs { + style := "solid" + color := "black" + arrow := "vee" + if b.unlikelyIndex() == i { + style = "dashed" + } + if f.laidout && indexOf[s.b.ID] == indexOf[b.ID]+1 { + // Red color means ordered edge. It overrides other colors. + arrow = "dotvee" + layoutDrawn[s.b.ID] = true + } else if isBackEdge(b.ID, s.b.ID) { + color = "#2893ff" + } + fmt.Fprintf(pipe, `%v -> %v [label=" %d ",style="%s",color="%s",arrowhead="%s"];`, b, s.b, i, style, color, arrow) + } + } + if f.laidout { + fmt.Fprintln(pipe, `edge[constraint=false,color=gray,style=solid,arrowhead=dot];`) + colors := [...]string{"#eea24f", "#f38385", "#f4d164", "#ca89fc", "gray"} + ci := 0 + for i := 1; i < len(f.Blocks); i++ { + if layoutDrawn[f.Blocks[i].ID] { + continue + } + fmt.Fprintf(pipe, `%s -> %s [color="%s"];`, f.Blocks[i-1], f.Blocks[i], colors[ci]) + ci = (ci + 1) % len(colors) + } + } + fmt.Fprint(pipe, "}") + pipe.Close() + err = cmd.Wait() + if err != nil { + d.broken = true + fmt.Printf("dot: %v\n%v\n", err, bufErr.String()) + return + } + + svgID := "svg_graph_" + id + fmt.Fprintf(w, `
`, svgID, svgID) + // For now, an awful hack: edit the html as it passes through + // our fingers, finding '", b, dead) + fmt.Fprintf(p.w, "
  • %s:", b.HTML()) + if len(b.Preds) > 0 { + io.WriteString(p.w, " ←") // left arrow + for _, e := range b.Preds { + pred := e.b + fmt.Fprintf(p.w, " %s", pred.HTML()) + } + } + if len(b.Values) > 0 { + io.WriteString(p.w, ``) + } + io.WriteString(p.w, "
  • ") + if len(b.Values) > 0 { // start list of values + io.WriteString(p.w, "
  • ") + io.WriteString(p.w, "
      ") + } +} + +func (p htmlFuncPrinter) endBlock(b *Block, reachable bool) { + if len(b.Values) > 0 { // end list of values + io.WriteString(p.w, "
    ") + io.WriteString(p.w, "
  • ") + } + io.WriteString(p.w, "
  • ") + fmt.Fprint(p.w, b.LongHTML()) + io.WriteString(p.w, "
  • ") + io.WriteString(p.w, "") +} + +func (p htmlFuncPrinter) value(v *Value, live bool) { + var dead string + if !live { + dead = "dead-value" + } + fmt.Fprintf(p.w, "
  • ", dead) + fmt.Fprint(p.w, v.LongHTML()) + io.WriteString(p.w, "
  • ") +} + +func (p htmlFuncPrinter) startDepCycle() { + fmt.Fprintln(p.w, "") +} + +func (p htmlFuncPrinter) endDepCycle() { + fmt.Fprintln(p.w, "") +} + +func (p htmlFuncPrinter) named(n LocalSlot, vals []*Value) { + fmt.Fprintf(p.w, "
  • name %s: ", n) + for _, val := range vals { + fmt.Fprintf(p.w, "%s ", val.HTML()) + } + fmt.Fprintf(p.w, "
  • ") +} + +type dotWriter struct { + path string + broken bool + phases map[string]bool // keys specify phases with CFGs +} + +// newDotWriter returns non-nil value when mask is valid. +// dotWriter will generate SVGs only for the phases specified in the mask. +// mask can contain following patterns and combinations of them: +// * - all of them; +// x-y - x through y, inclusive; +// x,y - x and y, but not the passes between. +func newDotWriter(mask string) *dotWriter { + if mask == "" { + return nil + } + // User can specify phase name with _ instead of spaces. + mask = strings.Replace(mask, "_", " ", -1) + ph := make(map[string]bool) + ranges := strings.Split(mask, ",") + for _, r := range ranges { + spl := strings.Split(r, "-") + if len(spl) > 2 { + fmt.Printf("range is not valid: %v\n", mask) + return nil + } + var first, last int + if mask == "*" { + first = 0 + last = len(passes) - 1 + } else { + first = passIdxByName(spl[0]) + last = passIdxByName(spl[len(spl)-1]) + } + if first < 0 || last < 0 || first > last { + fmt.Printf("range is not valid: %v\n", r) + return nil + } + for p := first; p <= last; p++ { + ph[passes[p].name] = true + } + } + + path, err := exec.LookPath("dot") + if err != nil { + fmt.Println(err) + return nil + } + return &dotWriter{path: path, phases: ph} +} + +func passIdxByName(name string) int { + for i, p := range passes { + if p.name == name { + return i + } + } + return -1 +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/id.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/id.go new file mode 100644 index 0000000000000000000000000000000000000000..725279e9fd8d7d1e8e7cd14b5b3332a1eabf18c8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/id.go @@ -0,0 +1,28 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +type ID int32 + +// idAlloc provides an allocator for unique integers. +type idAlloc struct { + last ID +} + +// get allocates an ID and returns it. IDs are always > 0. +func (a *idAlloc) get() ID { + x := a.last + x++ + if x == 1<<31-1 { + panic("too many ids for this function") + } + a.last = x + return x +} + +// num returns the maximum ID ever returned + 1. +func (a *idAlloc) num() int { + return int(a.last + 1) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/layout.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/layout.go new file mode 100644 index 0000000000000000000000000000000000000000..e4a8c6ffbf0dde5d4958c177642547216c1f2cdf --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/layout.go @@ -0,0 +1,185 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// layout orders basic blocks in f with the goal of minimizing control flow instructions. +// After this phase returns, the order of f.Blocks matters and is the order +// in which those blocks will appear in the assembly output. +func layout(f *Func) { + f.Blocks = layoutOrder(f) +} + +// Register allocation may use a different order which has constraints +// imposed by the linear-scan algorithm. +func layoutRegallocOrder(f *Func) []*Block { + // remnant of an experiment; perhaps there will be another. + return layoutOrder(f) +} + +func layoutOrder(f *Func) []*Block { + order := make([]*Block, 0, f.NumBlocks()) + scheduled := f.Cache.allocBoolSlice(f.NumBlocks()) + defer f.Cache.freeBoolSlice(scheduled) + idToBlock := f.Cache.allocBlockSlice(f.NumBlocks()) + defer f.Cache.freeBlockSlice(idToBlock) + indegree := f.Cache.allocIntSlice(f.NumBlocks()) + defer f.Cache.freeIntSlice(indegree) + posdegree := f.newSparseSet(f.NumBlocks()) // blocks with positive remaining degree + defer f.retSparseSet(posdegree) + // blocks with zero remaining degree. Use slice to simulate a LIFO queue to implement + // the depth-first topology sorting algorithm. + var zerodegree []ID + // LIFO queue. Track the successor blocks of the scheduled block so that when we + // encounter loops, we choose to schedule the successor block of the most recently + // scheduled block. + var succs []ID + exit := f.newSparseSet(f.NumBlocks()) // exit blocks + defer f.retSparseSet(exit) + + // Populate idToBlock and find exit blocks. + for _, b := range f.Blocks { + idToBlock[b.ID] = b + if b.Kind == BlockExit { + exit.add(b.ID) + } + } + + // Expand exit to include blocks post-dominated by exit blocks. + for { + changed := false + for _, id := range exit.contents() { + b := idToBlock[id] + NextPred: + for _, pe := range b.Preds { + p := pe.b + if exit.contains(p.ID) { + continue + } + for _, s := range p.Succs { + if !exit.contains(s.b.ID) { + continue NextPred + } + } + // All Succs are in exit; add p. + exit.add(p.ID) + changed = true + } + } + if !changed { + break + } + } + + // Initialize indegree of each block + for _, b := range f.Blocks { + if exit.contains(b.ID) { + // exit blocks are always scheduled last + continue + } + indegree[b.ID] = len(b.Preds) + if len(b.Preds) == 0 { + // Push an element to the tail of the queue. + zerodegree = append(zerodegree, b.ID) + } else { + posdegree.add(b.ID) + } + } + + bid := f.Entry.ID +blockloop: + for { + // add block to schedule + b := idToBlock[bid] + order = append(order, b) + scheduled[bid] = true + if len(order) == len(f.Blocks) { + break + } + + // Here, the order of traversing the b.Succs affects the direction in which the topological + // sort advances in depth. Take the following cfg as an example, regardless of other factors. + // b1 + // 0/ \1 + // b2 b3 + // Traverse b.Succs in order, the right child node b3 will be scheduled immediately after + // b1, traverse b.Succs in reverse order, the left child node b2 will be scheduled + // immediately after b1. The test results show that reverse traversal performs a little + // better. + // Note: You need to consider both layout and register allocation when testing performance. + for i := len(b.Succs) - 1; i >= 0; i-- { + c := b.Succs[i].b + indegree[c.ID]-- + if indegree[c.ID] == 0 { + posdegree.remove(c.ID) + zerodegree = append(zerodegree, c.ID) + } else { + succs = append(succs, c.ID) + } + } + + // Pick the next block to schedule + // Pick among the successor blocks that have not been scheduled yet. + + // Use likely direction if we have it. + var likely *Block + switch b.Likely { + case BranchLikely: + likely = b.Succs[0].b + case BranchUnlikely: + likely = b.Succs[1].b + } + if likely != nil && !scheduled[likely.ID] { + bid = likely.ID + continue + } + + // Use degree for now. + bid = 0 + // TODO: improve this part + // No successor of the previously scheduled block works. + // Pick a zero-degree block if we can. + for len(zerodegree) > 0 { + // Pop an element from the tail of the queue. + cid := zerodegree[len(zerodegree)-1] + zerodegree = zerodegree[:len(zerodegree)-1] + if !scheduled[cid] { + bid = cid + continue blockloop + } + } + + // Still nothing, pick the unscheduled successor block encountered most recently. + for len(succs) > 0 { + // Pop an element from the tail of the queue. + cid := succs[len(succs)-1] + succs = succs[:len(succs)-1] + if !scheduled[cid] { + bid = cid + continue blockloop + } + } + + // Still nothing, pick any non-exit block. + for posdegree.size() > 0 { + cid := posdegree.pop() + if !scheduled[cid] { + bid = cid + continue blockloop + } + } + // Pick any exit block. + // TODO: Order these to minimize jump distances? + for { + cid := exit.pop() + if !scheduled[cid] { + bid = cid + continue blockloop + } + } + } + f.laidout = true + return order + //f.Blocks = order +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/lca.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/lca.go new file mode 100644 index 0000000000000000000000000000000000000000..6e7ad96d29d629162882ef5cfe7a5b4484f268c1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/lca.go @@ -0,0 +1,127 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "math/bits" +) + +// Code to compute lowest common ancestors in the dominator tree. +// https://en.wikipedia.org/wiki/Lowest_common_ancestor +// https://en.wikipedia.org/wiki/Range_minimum_query#Solution_using_constant_time_and_linearithmic_space + +// lcaRange is a data structure that can compute lowest common ancestor queries +// in O(n lg n) precomputed space and O(1) time per query. +type lcaRange struct { + // Additional information about each block (indexed by block ID). + blocks []lcaRangeBlock + + // Data structure for range minimum queries. + // rangeMin[k][i] contains the ID of the minimum depth block + // in the Euler tour from positions i to i+1< 0 { + n := len(q) - 1 + bid := q[n].bid + cid := q[n].cid + q = q[:n] + + // Add block to tour. + blocks[bid].pos = int32(len(tour)) + tour = append(tour, bid) + + // Proceed down next child edge (if any). + if cid == 0 { + // This is our first visit to b. Set its depth. + blocks[bid].depth = blocks[blocks[bid].parent].depth + 1 + // Then explore its first child. + cid = blocks[bid].firstChild + } else { + // We've seen b before. Explore the next child. + cid = blocks[cid].sibling + } + if cid != 0 { + q = append(q, queueEntry{bid, cid}, queueEntry{cid, 0}) + } + } + + // Compute fast range-minimum query data structure + rangeMin := make([][]ID, 0, bits.Len64(uint64(len(tour)))) + rangeMin = append(rangeMin, tour) // 1-size windows are just the tour itself. + for logS, s := 1, 2; s < len(tour); logS, s = logS+1, s*2 { + r := make([]ID, len(tour)-s+1) + for i := 0; i < len(tour)-s+1; i++ { + bid := rangeMin[logS-1][i] + bid2 := rangeMin[logS-1][i+s/2] + if blocks[bid2].depth < blocks[bid].depth { + bid = bid2 + } + r[i] = bid + } + rangeMin = append(rangeMin, r) + } + + return &lcaRange{blocks: blocks, rangeMin: rangeMin} +} + +// find returns the lowest common ancestor of a and b. +func (lca *lcaRange) find(a, b *Block) *Block { + if a == b { + return a + } + // Find the positions of a and b in the Euler tour. + p1 := lca.blocks[a.ID].pos + p2 := lca.blocks[b.ID].pos + if p1 > p2 { + p1, p2 = p2, p1 + } + + // The lowest common ancestor is the minimum depth block + // on the tour from p1 to p2. We've precomputed minimum + // depth blocks for powers-of-two subsequences of the tour. + // Combine the right two precomputed values to get the answer. + logS := uint(log64(int64(p2 - p1))) + bid1 := lca.rangeMin[logS][p1] + bid2 := lca.rangeMin[logS][p2-1< db { + da-- + a = lca.parent[a.ID] + } + for da < db { + db-- + b = lca.parent[b.ID] + } + for a != b { + a = lca.parent[a.ID] + b = lca.parent[b.ID] + } + return a +} + +func (lca *lcaEasy) depth(b *Block) int { + n := 0 + for b != nil { + b = lca.parent[b.ID] + n++ + } + return n +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/likelyadjust.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/likelyadjust.go new file mode 100644 index 0000000000000000000000000000000000000000..1d0e53cf5b60862a69829b65120717bb5e21a8dd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/likelyadjust.go @@ -0,0 +1,580 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "fmt" +) + +type loop struct { + header *Block // The header node of this (reducible) loop + outer *loop // loop containing this loop + + // By default, children, exits, and depth are not initialized. + children []*loop // loops nested directly within this loop. Initialized by assembleChildren(). + exits []*Block // exits records blocks reached by exits from this loop. Initialized by findExits(). + + // Next three fields used by regalloc and/or + // aid in computation of inner-ness and list of blocks. + nBlocks int32 // Number of blocks in this loop but not within inner loops + depth int16 // Nesting depth of the loop; 1 is outermost. Initialized by calculateDepths(). + isInner bool // True if never discovered to contain a loop + + // register allocation uses this. + containsUnavoidableCall bool // True if all paths through the loop have a call +} + +// outerinner records that outer contains inner +func (sdom SparseTree) outerinner(outer, inner *loop) { + // There could be other outer loops found in some random order, + // locate the new outer loop appropriately among them. + + // Outer loop headers dominate inner loop headers. + // Use this to put the "new" "outer" loop in the right place. + oldouter := inner.outer + for oldouter != nil && sdom.isAncestor(outer.header, oldouter.header) { + inner = oldouter + oldouter = inner.outer + } + if outer == oldouter { + return + } + if oldouter != nil { + sdom.outerinner(oldouter, outer) + } + + inner.outer = outer + outer.isInner = false +} + +func checkContainsCall(bb *Block) bool { + if bb.Kind == BlockDefer { + return true + } + for _, v := range bb.Values { + if opcodeTable[v.Op].call { + return true + } + } + return false +} + +type loopnest struct { + f *Func + b2l []*loop + po []*Block + sdom SparseTree + loops []*loop + hasIrreducible bool // TODO current treatment of irreducible loops is very flaky, if accurate loops are needed, must punt at function level. + + // Record which of the lazily initialized fields have actually been initialized. + initializedChildren, initializedDepth, initializedExits bool +} + +func min8(a, b int8) int8 { + if a < b { + return a + } + return b +} + +func max8(a, b int8) int8 { + if a > b { + return a + } + return b +} + +const ( + blDEFAULT = 0 + blMin = blDEFAULT + blCALL = 1 + blRET = 2 + blEXIT = 3 +) + +var bllikelies = [4]string{"default", "call", "ret", "exit"} + +func describePredictionAgrees(b *Block, prediction BranchPrediction) string { + s := "" + if prediction == b.Likely { + s = " (agrees with previous)" + } else if b.Likely != BranchUnknown { + s = " (disagrees with previous, ignored)" + } + return s +} + +func describeBranchPrediction(f *Func, b *Block, likely, not int8, prediction BranchPrediction) { + f.Warnl(b.Pos, "Branch prediction rule %s < %s%s", + bllikelies[likely-blMin], bllikelies[not-blMin], describePredictionAgrees(b, prediction)) +} + +func likelyadjust(f *Func) { + // The values assigned to certain and local only matter + // in their rank order. 0 is default, more positive + // is less likely. It's possible to assign a negative + // unlikeliness (though not currently the case). + certain := f.Cache.allocInt8Slice(f.NumBlocks()) // In the long run, all outcomes are at least this bad. Mainly for Exit + defer f.Cache.freeInt8Slice(certain) + local := f.Cache.allocInt8Slice(f.NumBlocks()) // for our immediate predecessors. + defer f.Cache.freeInt8Slice(local) + + po := f.postorder() + nest := f.loopnest() + b2l := nest.b2l + + for _, b := range po { + switch b.Kind { + case BlockExit: + // Very unlikely. + local[b.ID] = blEXIT + certain[b.ID] = blEXIT + + // Ret, it depends. + case BlockRet, BlockRetJmp: + local[b.ID] = blRET + certain[b.ID] = blRET + + // Calls. TODO not all calls are equal, names give useful clues. + // Any name-based heuristics are only relative to other calls, + // and less influential than inferences from loop structure. + case BlockDefer: + local[b.ID] = blCALL + certain[b.ID] = max8(blCALL, certain[b.Succs[0].b.ID]) + + default: + if len(b.Succs) == 1 { + certain[b.ID] = certain[b.Succs[0].b.ID] + } else if len(b.Succs) == 2 { + // If successor is an unvisited backedge, it's in loop and we don't care. + // Its default unlikely is also zero which is consistent with favoring loop edges. + // Notice that this can act like a "reset" on unlikeliness at loops; the + // default "everything returns" unlikeliness is erased by min with the + // backedge likeliness; however a loop with calls on every path will be + // tagged with call cost. Net effect is that loop entry is favored. + b0 := b.Succs[0].b.ID + b1 := b.Succs[1].b.ID + certain[b.ID] = min8(certain[b0], certain[b1]) + + l := b2l[b.ID] + l0 := b2l[b0] + l1 := b2l[b1] + + prediction := b.Likely + // Weak loop heuristic -- both source and at least one dest are in loops, + // and there is a difference in the destinations. + // TODO what is best arrangement for nested loops? + if l != nil && l0 != l1 { + noprediction := false + switch { + // prefer not to exit loops + case l1 == nil: + prediction = BranchLikely + case l0 == nil: + prediction = BranchUnlikely + + // prefer to stay in loop, not exit to outer. + case l == l0: + prediction = BranchLikely + case l == l1: + prediction = BranchUnlikely + default: + noprediction = true + } + if f.pass.debug > 0 && !noprediction { + f.Warnl(b.Pos, "Branch prediction rule stay in loop%s", + describePredictionAgrees(b, prediction)) + } + + } else { + // Lacking loop structure, fall back on heuristics. + if certain[b1] > certain[b0] { + prediction = BranchLikely + if f.pass.debug > 0 { + describeBranchPrediction(f, b, certain[b0], certain[b1], prediction) + } + } else if certain[b0] > certain[b1] { + prediction = BranchUnlikely + if f.pass.debug > 0 { + describeBranchPrediction(f, b, certain[b1], certain[b0], prediction) + } + } else if local[b1] > local[b0] { + prediction = BranchLikely + if f.pass.debug > 0 { + describeBranchPrediction(f, b, local[b0], local[b1], prediction) + } + } else if local[b0] > local[b1] { + prediction = BranchUnlikely + if f.pass.debug > 0 { + describeBranchPrediction(f, b, local[b1], local[b0], prediction) + } + } + } + if b.Likely != prediction { + if b.Likely == BranchUnknown { + b.Likely = prediction + } + } + } + // Look for calls in the block. If there is one, make this block unlikely. + for _, v := range b.Values { + if opcodeTable[v.Op].call { + local[b.ID] = blCALL + certain[b.ID] = max8(blCALL, certain[b.Succs[0].b.ID]) + break + } + } + } + if f.pass.debug > 2 { + f.Warnl(b.Pos, "BP: Block %s, local=%s, certain=%s", b, bllikelies[local[b.ID]-blMin], bllikelies[certain[b.ID]-blMin]) + } + + } +} + +func (l *loop) String() string { + return fmt.Sprintf("hdr:%s", l.header) +} + +func (l *loop) LongString() string { + i := "" + o := "" + if l.isInner { + i = ", INNER" + } + if l.outer != nil { + o = ", o=" + l.outer.header.String() + } + return fmt.Sprintf("hdr:%s%s%s", l.header, i, o) +} + +func (l *loop) isWithinOrEq(ll *loop) bool { + if ll == nil { // nil means whole program + return true + } + for ; l != nil; l = l.outer { + if l == ll { + return true + } + } + return false +} + +// nearestOuterLoop returns the outer loop of loop most nearly +// containing block b; the header must dominate b. loop itself +// is assumed to not be that loop. For acceptable performance, +// we're relying on loop nests to not be terribly deep. +func (l *loop) nearestOuterLoop(sdom SparseTree, b *Block) *loop { + var o *loop + for o = l.outer; o != nil && !sdom.IsAncestorEq(o.header, b); o = o.outer { + } + return o +} + +func loopnestfor(f *Func) *loopnest { + po := f.postorder() + sdom := f.Sdom() + b2l := make([]*loop, f.NumBlocks()) + loops := make([]*loop, 0) + visited := f.Cache.allocBoolSlice(f.NumBlocks()) + defer f.Cache.freeBoolSlice(visited) + sawIrred := false + + if f.pass.debug > 2 { + fmt.Printf("loop finding in %s\n", f.Name) + } + + // Reducible-loop-nest-finding. + for _, b := range po { + if f.pass != nil && f.pass.debug > 3 { + fmt.Printf("loop finding at %s\n", b) + } + + var innermost *loop // innermost header reachable from this block + + // IF any successor s of b is in a loop headed by h + // AND h dominates b + // THEN b is in the loop headed by h. + // + // Choose the first/innermost such h. + // + // IF s itself dominates b, then s is a loop header; + // and there may be more than one such s. + // Since there's at most 2 successors, the inner/outer ordering + // between them can be established with simple comparisons. + for _, e := range b.Succs { + bb := e.b + l := b2l[bb.ID] + + if sdom.IsAncestorEq(bb, b) { // Found a loop header + if f.pass != nil && f.pass.debug > 4 { + fmt.Printf("loop finding succ %s of %s is header\n", bb.String(), b.String()) + } + if l == nil { + l = &loop{header: bb, isInner: true} + loops = append(loops, l) + b2l[bb.ID] = l + } + } else if !visited[bb.ID] { // Found an irreducible loop + sawIrred = true + if f.pass != nil && f.pass.debug > 4 { + fmt.Printf("loop finding succ %s of %s is IRRED, in %s\n", bb.String(), b.String(), f.Name) + } + } else if l != nil { + // TODO handle case where l is irreducible. + // Perhaps a loop header is inherited. + // is there any loop containing our successor whose + // header dominates b? + if !sdom.IsAncestorEq(l.header, b) { + l = l.nearestOuterLoop(sdom, b) + } + if f.pass != nil && f.pass.debug > 4 { + if l == nil { + fmt.Printf("loop finding succ %s of %s has no loop\n", bb.String(), b.String()) + } else { + fmt.Printf("loop finding succ %s of %s provides loop with header %s\n", bb.String(), b.String(), l.header.String()) + } + } + } else { // No loop + if f.pass != nil && f.pass.debug > 4 { + fmt.Printf("loop finding succ %s of %s has no loop\n", bb.String(), b.String()) + } + + } + + if l == nil || innermost == l { + continue + } + + if innermost == nil { + innermost = l + continue + } + + if sdom.isAncestor(innermost.header, l.header) { + sdom.outerinner(innermost, l) + innermost = l + } else if sdom.isAncestor(l.header, innermost.header) { + sdom.outerinner(l, innermost) + } + } + + if innermost != nil { + b2l[b.ID] = innermost + innermost.nBlocks++ + } + visited[b.ID] = true + } + + ln := &loopnest{f: f, b2l: b2l, po: po, sdom: sdom, loops: loops, hasIrreducible: sawIrred} + + // Calculate containsUnavoidableCall for regalloc + dominatedByCall := f.Cache.allocBoolSlice(f.NumBlocks()) + defer f.Cache.freeBoolSlice(dominatedByCall) + for _, b := range po { + if checkContainsCall(b) { + dominatedByCall[b.ID] = true + } + } + // Run dfs to find path through the loop that avoids all calls. + // Such path either escapes loop or return back to header. + // It isn't enough to have exit not dominated by any call, for example: + // ... some loop + // call1 call2 + // \ / + // exit + // ... + // exit is not dominated by any call, but we don't have call-free path to it. + for _, l := range loops { + // Header contains call. + if dominatedByCall[l.header.ID] { + l.containsUnavoidableCall = true + continue + } + callfreepath := false + tovisit := make([]*Block, 0, len(l.header.Succs)) + // Push all non-loop non-exit successors of header onto toVisit. + for _, s := range l.header.Succs { + nb := s.Block() + // This corresponds to loop with zero iterations. + if !l.iterationEnd(nb, b2l) { + tovisit = append(tovisit, nb) + } + } + for len(tovisit) > 0 { + cur := tovisit[len(tovisit)-1] + tovisit = tovisit[:len(tovisit)-1] + if dominatedByCall[cur.ID] { + continue + } + // Record visited in dominatedByCall. + dominatedByCall[cur.ID] = true + for _, s := range cur.Succs { + nb := s.Block() + if l.iterationEnd(nb, b2l) { + callfreepath = true + } + if !dominatedByCall[nb.ID] { + tovisit = append(tovisit, nb) + } + + } + if callfreepath { + break + } + } + if !callfreepath { + l.containsUnavoidableCall = true + } + } + + // Curious about the loopiness? "-d=ssa/likelyadjust/stats" + if f.pass != nil && f.pass.stats > 0 && len(loops) > 0 { + ln.assembleChildren() + ln.calculateDepths() + ln.findExits() + + // Note stats for non-innermost loops are slightly flawed because + // they don't account for inner loop exits that span multiple levels. + + for _, l := range loops { + x := len(l.exits) + cf := 0 + if !l.containsUnavoidableCall { + cf = 1 + } + inner := 0 + if l.isInner { + inner++ + } + + f.LogStat("loopstats:", + l.depth, "depth", x, "exits", + inner, "is_inner", cf, "always_calls", l.nBlocks, "n_blocks") + } + } + + if f.pass != nil && f.pass.debug > 1 && len(loops) > 0 { + fmt.Printf("Loops in %s:\n", f.Name) + for _, l := range loops { + fmt.Printf("%s, b=", l.LongString()) + for _, b := range f.Blocks { + if b2l[b.ID] == l { + fmt.Printf(" %s", b) + } + } + fmt.Print("\n") + } + fmt.Printf("Nonloop blocks in %s:", f.Name) + for _, b := range f.Blocks { + if b2l[b.ID] == nil { + fmt.Printf(" %s", b) + } + } + fmt.Print("\n") + } + return ln +} + +// assembleChildren initializes the children field of each +// loop in the nest. Loop A is a child of loop B if A is +// directly nested within B (based on the reducible-loops +// detection above) +func (ln *loopnest) assembleChildren() { + if ln.initializedChildren { + return + } + for _, l := range ln.loops { + if l.outer != nil { + l.outer.children = append(l.outer.children, l) + } + } + ln.initializedChildren = true +} + +// calculateDepths uses the children field of loops +// to determine the nesting depth (outer=1) of each +// loop. This is helpful for finding exit edges. +func (ln *loopnest) calculateDepths() { + if ln.initializedDepth { + return + } + ln.assembleChildren() + for _, l := range ln.loops { + if l.outer == nil { + l.setDepth(1) + } + } + ln.initializedDepth = true +} + +// findExits uses loop depth information to find the +// exits from a loop. +func (ln *loopnest) findExits() { + if ln.initializedExits { + return + } + ln.calculateDepths() + b2l := ln.b2l + for _, b := range ln.po { + l := b2l[b.ID] + if l != nil && len(b.Succs) == 2 { + sl := b2l[b.Succs[0].b.ID] + if recordIfExit(l, sl, b.Succs[0].b) { + continue + } + sl = b2l[b.Succs[1].b.ID] + if recordIfExit(l, sl, b.Succs[1].b) { + continue + } + } + } + ln.initializedExits = true +} + +// depth returns the loop nesting level of block b. +func (ln *loopnest) depth(b ID) int16 { + if l := ln.b2l[b]; l != nil { + return l.depth + } + return 0 +} + +// recordIfExit checks sl (the loop containing b) to see if it +// is outside of loop l, and if so, records b as an exit block +// from l and returns true. +func recordIfExit(l, sl *loop, b *Block) bool { + if sl != l { + if sl == nil || sl.depth <= l.depth { + l.exits = append(l.exits, b) + return true + } + // sl is not nil, and is deeper than l + // it's possible for this to be a goto into an irreducible loop made from gotos. + for sl.depth > l.depth { + sl = sl.outer + } + if sl != l { + l.exits = append(l.exits, b) + return true + } + } + return false +} + +func (l *loop) setDepth(d int16) { + l.depth = d + for _, c := range l.children { + c.setDepth(d + 1) + } +} + +// iterationEnd checks if block b ends iteration of loop l. +// Ending iteration means either escaping to outer loop/code or +// going back to header +func (l *loop) iterationEnd(b *Block, b2l []*loop) bool { + return b == l.header || b2l[b.ID] == nil || (b2l[b.ID] != l && b2l[b.ID].depth <= l.depth) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/location.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/location.go new file mode 100644 index 0000000000000000000000000000000000000000..00aea879363d6bda91faee38f188dada7d712a2e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/location.go @@ -0,0 +1,109 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "fmt" +) + +// A place that an ssa variable can reside. +type Location interface { + String() string // name to use in assembly templates: AX, 16(SP), ... +} + +// A Register is a machine register, like AX. +// They are numbered densely from 0 (for each architecture). +type Register struct { + num int32 // dense numbering + objNum int16 // register number from cmd/internal/obj/$ARCH + gcNum int16 // GC register map number (dense numbering of registers that can contain pointers) + name string +} + +func (r *Register) String() string { + return r.name +} + +// ObjNum returns the register number from cmd/internal/obj/$ARCH that +// corresponds to this register. +func (r *Register) ObjNum() int16 { + return r.objNum +} + +// GCNum returns the runtime GC register index of r, or -1 if this +// register can't contain pointers. +func (r *Register) GCNum() int16 { + return r.gcNum +} + +// A LocalSlot is a location in the stack frame, which identifies and stores +// part or all of a PPARAM, PPARAMOUT, or PAUTO ONAME node. +// It can represent a whole variable, part of a larger stack slot, or part of a +// variable that has been decomposed into multiple stack slots. +// As an example, a string could have the following configurations: +// +// stack layout LocalSlots +// +// Optimizations are disabled. s is on the stack and represented in its entirety. +// [ ------- s string ---- ] { N: s, Type: string, Off: 0 } +// +// s was not decomposed, but the SSA operates on its parts individually, so +// there is a LocalSlot for each of its fields that points into the single stack slot. +// [ ------- s string ---- ] { N: s, Type: *uint8, Off: 0 }, {N: s, Type: int, Off: 8} +// +// s was decomposed. Each of its fields is in its own stack slot and has its own LocalSLot. +// [ ptr *uint8 ] [ len int] { N: ptr, Type: *uint8, Off: 0, SplitOf: parent, SplitOffset: 0}, +// { N: len, Type: int, Off: 0, SplitOf: parent, SplitOffset: 8} +// parent = &{N: s, Type: string} +type LocalSlot struct { + N *ir.Name // an ONAME *ir.Name representing a stack location. + Type *types.Type // type of slot + Off int64 // offset of slot in N + + SplitOf *LocalSlot // slot is a decomposition of SplitOf + SplitOffset int64 // .. at this offset. +} + +func (s LocalSlot) String() string { + if s.Off == 0 { + return fmt.Sprintf("%v[%v]", s.N, s.Type) + } + return fmt.Sprintf("%v+%d[%v]", s.N, s.Off, s.Type) +} + +type LocPair [2]Location + +func (t LocPair) String() string { + n0, n1 := "nil", "nil" + if t[0] != nil { + n0 = t[0].String() + } + if t[1] != nil { + n1 = t[1].String() + } + return fmt.Sprintf("<%s,%s>", n0, n1) +} + +type LocResults []Location + +func (t LocResults) String() string { + s := "" + a := "<" + for _, r := range t { + a += s + s = "," + a += r.String() + } + a += ">" + return a +} + +type Spill struct { + Type *types.Type + Offset int64 + Reg int16 +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/loopbce.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/loopbce.go new file mode 100644 index 0000000000000000000000000000000000000000..dd1f39dbef74398f7cf9e338d26d6f9b1a8f4ca7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/loopbce.go @@ -0,0 +1,437 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/types" + "fmt" +) + +type indVarFlags uint8 + +const ( + indVarMinExc indVarFlags = 1 << iota // minimum value is exclusive (default: inclusive) + indVarMaxInc // maximum value is inclusive (default: exclusive) + indVarCountDown // if set the iteration starts at max and count towards min (default: min towards max) +) + +type indVar struct { + ind *Value // induction variable + nxt *Value // the incremented variable + min *Value // minimum value, inclusive/exclusive depends on flags + max *Value // maximum value, inclusive/exclusive depends on flags + entry *Block // entry block in the loop. + flags indVarFlags + // Invariant: for all blocks strictly dominated by entry: + // min <= ind < max [if flags == 0] + // min < ind < max [if flags == indVarMinExc] + // min <= ind <= max [if flags == indVarMaxInc] + // min < ind <= max [if flags == indVarMinExc|indVarMaxInc] +} + +// parseIndVar checks whether the SSA value passed as argument is a valid induction +// variable, and, if so, extracts: +// - the minimum bound +// - the increment value +// - the "next" value (SSA value that is Phi'd into the induction variable every loop) +// +// Currently, we detect induction variables that match (Phi min nxt), +// with nxt being (Add inc ind). +// If it can't parse the induction variable correctly, it returns (nil, nil, nil). +func parseIndVar(ind *Value) (min, inc, nxt *Value) { + if ind.Op != OpPhi { + return + } + + if n := ind.Args[0]; (n.Op == OpAdd64 || n.Op == OpAdd32 || n.Op == OpAdd16 || n.Op == OpAdd8) && (n.Args[0] == ind || n.Args[1] == ind) { + min, nxt = ind.Args[1], n + } else if n := ind.Args[1]; (n.Op == OpAdd64 || n.Op == OpAdd32 || n.Op == OpAdd16 || n.Op == OpAdd8) && (n.Args[0] == ind || n.Args[1] == ind) { + min, nxt = ind.Args[0], n + } else { + // Not a recognized induction variable. + return + } + + if nxt.Args[0] == ind { // nxt = ind + inc + inc = nxt.Args[1] + } else if nxt.Args[1] == ind { // nxt = inc + ind + inc = nxt.Args[0] + } else { + panic("unreachable") // one of the cases must be true from the above. + } + + return +} + +// findIndVar finds induction variables in a function. +// +// Look for variables and blocks that satisfy the following +// +// loop: +// ind = (Phi min nxt), +// if ind < max +// then goto enter_loop +// else goto exit_loop +// +// enter_loop: +// do something +// nxt = inc + ind +// goto loop +// +// exit_loop: +func findIndVar(f *Func) []indVar { + var iv []indVar + sdom := f.Sdom() + + for _, b := range f.Blocks { + if b.Kind != BlockIf || len(b.Preds) != 2 { + continue + } + + var ind *Value // induction variable + var init *Value // starting value + var limit *Value // ending value + + // Check that the control if it either ind = 0; i-- + init, inc, nxt = parseIndVar(limit) + if init == nil { + // No recognized induction variable on either operand + continue + } + + // Ok, the arguments were reversed. Swap them, and remember that we're + // looking at an ind >/>= loop (so the induction must be decrementing). + ind, limit = limit, ind + less = false + } + + if ind.Block != b { + // TODO: Could be extended to include disjointed loop headers. + // I don't think this is causing missed optimizations in real world code often. + // See https://go.dev/issue/63955 + continue + } + + // Expect the increment to be a nonzero constant. + if !inc.isGenericIntConst() { + continue + } + step := inc.AuxInt + if step == 0 { + continue + } + + // Increment sign must match comparison direction. + // When incrementing, the termination comparison must be ind />= limit. + // See issue 26116. + if step > 0 && !less { + continue + } + if step < 0 && less { + continue + } + + // Up to now we extracted the induction variable (ind), + // the increment delta (inc), the temporary sum (nxt), + // the initial value (init) and the limiting value (limit). + // + // We also know that ind has the form (Phi init nxt) where + // nxt is (Add inc nxt) which means: 1) inc dominates nxt + // and 2) there is a loop starting at inc and containing nxt. + // + // We need to prove that the induction variable is incremented + // only when it's smaller than the limiting value. + // Two conditions must happen listed below to accept ind + // as an induction variable. + + // First condition: loop entry has a single predecessor, which + // is the header block. This implies that b.Succs[0] is + // reached iff ind < limit. + if len(b.Succs[0].b.Preds) != 1 { + // b.Succs[1] must exit the loop. + continue + } + + // Second condition: b.Succs[0] dominates nxt so that + // nxt is computed when inc < limit. + if !sdom.IsAncestorEq(b.Succs[0].b, nxt.Block) { + // inc+ind can only be reached through the branch that enters the loop. + continue + } + + // Check for overflow/underflow. We need to make sure that inc never causes + // the induction variable to wrap around. + // We use a function wrapper here for easy return true / return false / keep going logic. + // This function returns true if the increment will never overflow/underflow. + ok := func() bool { + if step > 0 { + if limit.isGenericIntConst() { + // Figure out the actual largest value. + v := limit.AuxInt + if !inclusive { + if v == minSignedValue(limit.Type) { + return false // < minint is never satisfiable. + } + v-- + } + if init.isGenericIntConst() { + // Use stride to compute a better lower limit. + if init.AuxInt > v { + return false + } + v = addU(init.AuxInt, diff(v, init.AuxInt)/uint64(step)*uint64(step)) + } + if addWillOverflow(v, step) { + return false + } + if inclusive && v != limit.AuxInt || !inclusive && v+1 != limit.AuxInt { + // We know a better limit than the programmer did. Use our limit instead. + limit = f.constVal(limit.Op, limit.Type, v, true) + inclusive = true + } + return true + } + if step == 1 && !inclusive { + // Can't overflow because maxint is never a possible value. + return true + } + // If the limit is not a constant, check to see if it is a + // negative offset from a known non-negative value. + knn, k := findKNN(limit) + if knn == nil || k < 0 { + return false + } + // limit == (something nonnegative) - k. That subtraction can't underflow, so + // we can trust it. + if inclusive { + // ind <= knn - k cannot overflow if step is at most k + return step <= k + } + // ind < knn - k cannot overflow if step is at most k+1 + return step <= k+1 && k != maxSignedValue(limit.Type) + } else { // step < 0 + if limit.Op == OpConst64 { + // Figure out the actual smallest value. + v := limit.AuxInt + if !inclusive { + if v == maxSignedValue(limit.Type) { + return false // > maxint is never satisfiable. + } + v++ + } + if init.isGenericIntConst() { + // Use stride to compute a better lower limit. + if init.AuxInt < v { + return false + } + v = subU(init.AuxInt, diff(init.AuxInt, v)/uint64(-step)*uint64(-step)) + } + if subWillUnderflow(v, -step) { + return false + } + if inclusive && v != limit.AuxInt || !inclusive && v-1 != limit.AuxInt { + // We know a better limit than the programmer did. Use our limit instead. + limit = f.constVal(limit.Op, limit.Type, v, true) + inclusive = true + } + return true + } + if step == -1 && !inclusive { + // Can't underflow because minint is never a possible value. + return true + } + } + return false + + } + + if ok() { + flags := indVarFlags(0) + var min, max *Value + if step > 0 { + min = init + max = limit + if inclusive { + flags |= indVarMaxInc + } + } else { + min = limit + max = init + flags |= indVarMaxInc + if !inclusive { + flags |= indVarMinExc + } + flags |= indVarCountDown + step = -step + } + if f.pass.debug >= 1 { + printIndVar(b, ind, min, max, step, flags) + } + + iv = append(iv, indVar{ + ind: ind, + nxt: nxt, + min: min, + max: max, + entry: b.Succs[0].b, + flags: flags, + }) + b.Logf("found induction variable %v (inc = %v, min = %v, max = %v)\n", ind, inc, min, max) + } + + // TODO: other unrolling idioms + // for i := 0; i < KNN - KNN % k ; i += k + // for i := 0; i < KNN&^(k-1) ; i += k // k a power of 2 + // for i := 0; i < KNN&(-k) ; i += k // k a power of 2 + } + + return iv +} + +// addWillOverflow reports whether x+y would result in a value more than maxint. +func addWillOverflow(x, y int64) bool { + return x+y < x +} + +// subWillUnderflow reports whether x-y would result in a value less than minint. +func subWillUnderflow(x, y int64) bool { + return x-y > x +} + +// diff returns x-y as a uint64. Requires x>=y. +func diff(x, y int64) uint64 { + if x < y { + base.Fatalf("diff %d - %d underflowed", x, y) + } + return uint64(x - y) +} + +// addU returns x+y. Requires that x+y does not overflow an int64. +func addU(x int64, y uint64) int64 { + if y >= 1<<63 { + if x >= 0 { + base.Fatalf("addU overflowed %d + %d", x, y) + } + x += 1<<63 - 1 + x += 1 + y -= 1 << 63 + } + if addWillOverflow(x, int64(y)) { + base.Fatalf("addU overflowed %d + %d", x, y) + } + return x + int64(y) +} + +// subU returns x-y. Requires that x-y does not underflow an int64. +func subU(x int64, y uint64) int64 { + if y >= 1<<63 { + if x < 0 { + base.Fatalf("subU underflowed %d - %d", x, y) + } + x -= 1<<63 - 1 + x -= 1 + y -= 1 << 63 + } + if subWillUnderflow(x, int64(y)) { + base.Fatalf("subU underflowed %d - %d", x, y) + } + return x - int64(y) +} + +// if v is known to be x - c, where x is known to be nonnegative and c is a +// constant, return x, c. Otherwise return nil, 0. +func findKNN(v *Value) (*Value, int64) { + var x, y *Value + x = v + switch v.Op { + case OpSub64, OpSub32, OpSub16, OpSub8: + x = v.Args[0] + y = v.Args[1] + + case OpAdd64, OpAdd32, OpAdd16, OpAdd8: + x = v.Args[0] + y = v.Args[1] + if x.isGenericIntConst() { + x, y = y, x + } + } + switch x.Op { + case OpSliceLen, OpStringLen, OpSliceCap: + default: + return nil, 0 + } + if y == nil { + return x, 0 + } + if !y.isGenericIntConst() { + return nil, 0 + } + if v.Op == OpAdd64 || v.Op == OpAdd32 || v.Op == OpAdd16 || v.Op == OpAdd8 { + return x, -y.AuxInt + } + return x, y.AuxInt +} + +func printIndVar(b *Block, i, min, max *Value, inc int64, flags indVarFlags) { + mb1, mb2 := "[", "]" + if flags&indVarMinExc != 0 { + mb1 = "(" + } + if flags&indVarMaxInc == 0 { + mb2 = ")" + } + + mlim1, mlim2 := fmt.Sprint(min.AuxInt), fmt.Sprint(max.AuxInt) + if !min.isGenericIntConst() { + if b.Func.pass.debug >= 2 { + mlim1 = fmt.Sprint(min) + } else { + mlim1 = "?" + } + } + if !max.isGenericIntConst() { + if b.Func.pass.debug >= 2 { + mlim2 = fmt.Sprint(max) + } else { + mlim2 = "?" + } + } + extra := "" + if b.Func.pass.debug >= 2 { + extra = fmt.Sprintf(" (%s)", i) + } + b.Func.Warnl(b.Pos, "Induction variable: limits %v%v,%v%v, increment %d%s", mb1, mlim1, mlim2, mb2, inc, extra) +} + +func minSignedValue(t *types.Type) int64 { + return -1 << (t.Size()*8 - 1) +} + +func maxSignedValue(t *types.Type) int64 { + return 1<<((t.Size()*8)-1) - 1 +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/loopreschedchecks.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/loopreschedchecks.go new file mode 100644 index 0000000000000000000000000000000000000000..0ac473d229474dbc7a6040edfb645df60f82175b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/loopreschedchecks.go @@ -0,0 +1,512 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/types" + "fmt" +) + +// an edgeMem records a backedge, together with the memory +// phi functions at the target of the backedge that must +// be updated when a rescheduling check replaces the backedge. +type edgeMem struct { + e Edge + m *Value // phi for memory at dest of e +} + +// a rewriteTarget is a value-argindex pair indicating +// where a rewrite is applied. Note that this is for values, +// not for block controls, because block controls are not targets +// for the rewrites performed in inserting rescheduling checks. +type rewriteTarget struct { + v *Value + i int +} + +type rewrite struct { + before, after *Value // before is the expected value before rewrite, after is the new value installed. + rewrites []rewriteTarget // all the targets for this rewrite. +} + +func (r *rewrite) String() string { + s := "\n\tbefore=" + r.before.String() + ", after=" + r.after.String() + for _, rw := range r.rewrites { + s += ", (i=" + fmt.Sprint(rw.i) + ", v=" + rw.v.LongString() + ")" + } + s += "\n" + return s +} + +// insertLoopReschedChecks inserts rescheduling checks on loop backedges. +func insertLoopReschedChecks(f *Func) { + // TODO: when split information is recorded in export data, insert checks only on backedges that can be reached on a split-call-free path. + + // Loop reschedule checks compare the stack pointer with + // the per-g stack bound. If the pointer appears invalid, + // that means a reschedule check is needed. + // + // Steps: + // 1. locate backedges. + // 2. Record memory definitions at block end so that + // the SSA graph for mem can be properly modified. + // 3. Ensure that phi functions that will-be-needed for mem + // are present in the graph, initially with trivial inputs. + // 4. Record all to-be-modified uses of mem; + // apply modifications (split into two steps to simplify and + // avoided nagging order-dependencies). + // 5. Rewrite backedges to include reschedule check, + // and modify destination phi function appropriately with new + // definitions for mem. + + if f.NoSplit { // nosplit functions don't reschedule. + return + } + + backedges := backedges(f) + if len(backedges) == 0 { // no backedges means no rescheduling checks. + return + } + + lastMems := findLastMems(f) + + idom := f.Idom() + po := f.postorder() + // The ordering in the dominator tree matters; it's important that + // the walk of the dominator tree also be a preorder (i.e., a node is + // visited only after all its non-backedge predecessors have been visited). + sdom := newSparseOrderedTree(f, idom, po) + + if f.pass.debug > 1 { + fmt.Printf("before %s = %s\n", f.Name, sdom.treestructure(f.Entry)) + } + + tofixBackedges := []edgeMem{} + + for _, e := range backedges { // TODO: could filter here by calls in loops, if declared and inferred nosplit are recorded in export data. + tofixBackedges = append(tofixBackedges, edgeMem{e, nil}) + } + + // It's possible that there is no memory state (no global/pointer loads/stores or calls) + if lastMems[f.Entry.ID] == nil { + lastMems[f.Entry.ID] = f.Entry.NewValue0(f.Entry.Pos, OpInitMem, types.TypeMem) + } + + memDefsAtBlockEnds := f.Cache.allocValueSlice(f.NumBlocks()) // For each block, the mem def seen at its bottom. Could be from earlier block. + defer f.Cache.freeValueSlice(memDefsAtBlockEnds) + + // Propagate last mem definitions forward through successor blocks. + for i := len(po) - 1; i >= 0; i-- { + b := po[i] + mem := lastMems[b.ID] + for j := 0; mem == nil; j++ { // if there's no def, then there's no phi, so the visible mem is identical in all predecessors. + // loop because there might be backedges that haven't been visited yet. + mem = memDefsAtBlockEnds[b.Preds[j].b.ID] + } + memDefsAtBlockEnds[b.ID] = mem + if f.pass.debug > 2 { + fmt.Printf("memDefsAtBlockEnds[%s] = %s\n", b, mem) + } + } + + // Maps from block to newly-inserted phi function in block. + newmemphis := make(map[*Block]rewrite) + + // Insert phi functions as necessary for future changes to flow graph. + for i, emc := range tofixBackedges { + e := emc.e + h := e.b + + // find the phi function for the memory input at "h", if there is one. + var headerMemPhi *Value // look for header mem phi + + for _, v := range h.Values { + if v.Op == OpPhi && v.Type.IsMemory() { + headerMemPhi = v + } + } + + if headerMemPhi == nil { + // if the header is nil, make a trivial phi from the dominator + mem0 := memDefsAtBlockEnds[idom[h.ID].ID] + headerMemPhi = newPhiFor(h, mem0) + newmemphis[h] = rewrite{before: mem0, after: headerMemPhi} + addDFphis(mem0, h, h, f, memDefsAtBlockEnds, newmemphis, sdom) + + } + tofixBackedges[i].m = headerMemPhi + + } + if f.pass.debug > 0 { + for b, r := range newmemphis { + fmt.Printf("before b=%s, rewrite=%s\n", b, r.String()) + } + } + + // dfPhiTargets notes inputs to phis in dominance frontiers that should not + // be rewritten as part of the dominated children of some outer rewrite. + dfPhiTargets := make(map[rewriteTarget]bool) + + rewriteNewPhis(f.Entry, f.Entry, f, memDefsAtBlockEnds, newmemphis, dfPhiTargets, sdom) + + if f.pass.debug > 0 { + for b, r := range newmemphis { + fmt.Printf("after b=%s, rewrite=%s\n", b, r.String()) + } + } + + // Apply collected rewrites. + for _, r := range newmemphis { + for _, rw := range r.rewrites { + rw.v.SetArg(rw.i, r.after) + } + } + + // Rewrite backedges to include reschedule checks. + for _, emc := range tofixBackedges { + e := emc.e + headerMemPhi := emc.m + h := e.b + i := e.i + p := h.Preds[i] + bb := p.b + mem0 := headerMemPhi.Args[i] + // bb e->p h, + // Because we're going to insert a rare-call, make sure the + // looping edge still looks likely. + likely := BranchLikely + if p.i != 0 { + likely = BranchUnlikely + } + if bb.Kind != BlockPlain { // backedges can be unconditional. e.g., if x { something; continue } + bb.Likely = likely + } + + // rewrite edge to include reschedule check + // existing edges: + // + // bb.Succs[p.i] == Edge{h, i} + // h.Preds[i] == p == Edge{bb,p.i} + // + // new block(s): + // test: + // if sp < g.limit { goto sched } + // goto join + // sched: + // mem1 := call resched (mem0) + // goto join + // join: + // mem2 := phi(mem0, mem1) + // goto h + // + // and correct arg i of headerMemPhi and headerCtrPhi + // + // EXCEPT: join block containing only phi functions is bad + // for the register allocator. Therefore, there is no + // join, and branches targeting join must instead target + // the header, and the other phi functions within header are + // adjusted for the additional input. + + test := f.NewBlock(BlockIf) + sched := f.NewBlock(BlockPlain) + + test.Pos = bb.Pos + sched.Pos = bb.Pos + + // if sp < g.limit { goto sched } + // goto header + + cfgtypes := &f.Config.Types + pt := cfgtypes.Uintptr + g := test.NewValue1(bb.Pos, OpGetG, pt, mem0) + sp := test.NewValue0(bb.Pos, OpSP, pt) + cmpOp := OpLess64U + if pt.Size() == 4 { + cmpOp = OpLess32U + } + limaddr := test.NewValue1I(bb.Pos, OpOffPtr, pt, 2*pt.Size(), g) + lim := test.NewValue2(bb.Pos, OpLoad, pt, limaddr, mem0) + cmp := test.NewValue2(bb.Pos, cmpOp, cfgtypes.Bool, sp, lim) + test.SetControl(cmp) + + // if true, goto sched + test.AddEdgeTo(sched) + + // if false, rewrite edge to header. + // do NOT remove+add, because that will perturb all the other phi functions + // as well as messing up other edges to the header. + test.Succs = append(test.Succs, Edge{h, i}) + h.Preds[i] = Edge{test, 1} + headerMemPhi.SetArg(i, mem0) + + test.Likely = BranchUnlikely + + // sched: + // mem1 := call resched (mem0) + // goto header + resched := f.fe.Syslook("goschedguarded") + call := sched.NewValue1A(bb.Pos, OpStaticCall, types.TypeResultMem, StaticAuxCall(resched, bb.Func.ABIDefault.ABIAnalyzeTypes(nil, nil)), mem0) + mem1 := sched.NewValue1I(bb.Pos, OpSelectN, types.TypeMem, 0, call) + sched.AddEdgeTo(h) + headerMemPhi.AddArg(mem1) + + bb.Succs[p.i] = Edge{test, 0} + test.Preds = append(test.Preds, Edge{bb, p.i}) + + // Must correct all the other phi functions in the header for new incoming edge. + // Except for mem phis, it will be the same value seen on the original + // backedge at index i. + for _, v := range h.Values { + if v.Op == OpPhi && v != headerMemPhi { + v.AddArg(v.Args[i]) + } + } + } + + f.invalidateCFG() + + if f.pass.debug > 1 { + sdom = newSparseTree(f, f.Idom()) + fmt.Printf("after %s = %s\n", f.Name, sdom.treestructure(f.Entry)) + } +} + +// newPhiFor inserts a new Phi function into b, +// with all inputs set to v. +func newPhiFor(b *Block, v *Value) *Value { + phiV := b.NewValue0(b.Pos, OpPhi, v.Type) + + for range b.Preds { + phiV.AddArg(v) + } + return phiV +} + +// rewriteNewPhis updates newphis[h] to record all places where the new phi function inserted +// in block h will replace a previous definition. Block b is the block currently being processed; +// if b has its own phi definition then it takes the place of h. +// defsForUses provides information about other definitions of the variable that are present +// (and if nil, indicates that the variable is no longer live) +// sdom must yield a preorder of the flow graph if recursively walked, root-to-children. +// The result of newSparseOrderedTree with order supplied by a dfs-postorder satisfies this +// requirement. +func rewriteNewPhis(h, b *Block, f *Func, defsForUses []*Value, newphis map[*Block]rewrite, dfPhiTargets map[rewriteTarget]bool, sdom SparseTree) { + // If b is a block with a new phi, then a new rewrite applies below it in the dominator tree. + if _, ok := newphis[b]; ok { + h = b + } + change := newphis[h] + x := change.before + y := change.after + + // Apply rewrites to this block + if x != nil { // don't waste time on the common case of no definition. + p := &change.rewrites + for _, v := range b.Values { + if v == y { // don't rewrite self -- phi inputs are handled below. + continue + } + for i, w := range v.Args { + if w != x { + continue + } + tgt := rewriteTarget{v, i} + + // It's possible dominated control flow will rewrite this instead. + // Visiting in preorder (a property of how sdom was constructed) + // ensures that these are seen in the proper order. + if dfPhiTargets[tgt] { + continue + } + *p = append(*p, tgt) + if f.pass.debug > 1 { + fmt.Printf("added block target for h=%v, b=%v, x=%v, y=%v, tgt.v=%s, tgt.i=%d\n", + h, b, x, y, v, i) + } + } + } + + // Rewrite appropriate inputs of phis reached in successors + // in dominance frontier, self, and dominated. + // If the variable def reaching uses in b is itself defined in b, then the new phi function + // does not reach the successors of b. (This assumes a bit about the structure of the + // phi use-def graph, but it's true for memory.) + if dfu := defsForUses[b.ID]; dfu != nil && dfu.Block != b { + for _, e := range b.Succs { + s := e.b + + for _, v := range s.Values { + if v.Op == OpPhi && v.Args[e.i] == x { + tgt := rewriteTarget{v, e.i} + *p = append(*p, tgt) + dfPhiTargets[tgt] = true + if f.pass.debug > 1 { + fmt.Printf("added phi target for h=%v, b=%v, s=%v, x=%v, y=%v, tgt.v=%s, tgt.i=%d\n", + h, b, s, x, y, v.LongString(), e.i) + } + break + } + } + } + } + newphis[h] = change + } + + for c := sdom[b.ID].child; c != nil; c = sdom[c.ID].sibling { + rewriteNewPhis(h, c, f, defsForUses, newphis, dfPhiTargets, sdom) // TODO: convert to explicit stack from recursion. + } +} + +// addDFphis creates new trivial phis that are necessary to correctly reflect (within SSA) +// a new definition for variable "x" inserted at h (usually but not necessarily a phi). +// These new phis can only occur at the dominance frontier of h; block s is in the dominance +// frontier of h if h does not strictly dominate s and if s is a successor of a block b where +// either b = h or h strictly dominates b. +// These newly created phis are themselves new definitions that may require addition of their +// own trivial phi functions in their own dominance frontier, and this is handled recursively. +func addDFphis(x *Value, h, b *Block, f *Func, defForUses []*Value, newphis map[*Block]rewrite, sdom SparseTree) { + oldv := defForUses[b.ID] + if oldv != x { // either a new definition replacing x, or nil if it is proven that there are no uses reachable from b + return + } + idom := f.Idom() +outer: + for _, e := range b.Succs { + s := e.b + // check phi functions in the dominance frontier + if sdom.isAncestor(h, s) { + continue // h dominates s, successor of b, therefore s is not in the frontier. + } + if _, ok := newphis[s]; ok { + continue // successor s of b already has a new phi function, so there is no need to add another. + } + if x != nil { + for _, v := range s.Values { + if v.Op == OpPhi && v.Args[e.i] == x { + continue outer // successor s of b has an old phi function, so there is no need to add another. + } + } + } + + old := defForUses[idom[s.ID].ID] // new phi function is correct-but-redundant, combining value "old" on all inputs. + headerPhi := newPhiFor(s, old) + // the new phi will replace "old" in block s and all blocks dominated by s. + newphis[s] = rewrite{before: old, after: headerPhi} // record new phi, to have inputs labeled "old" rewritten to "headerPhi" + addDFphis(old, s, s, f, defForUses, newphis, sdom) // the new definition may also create new phi functions. + } + for c := sdom[b.ID].child; c != nil; c = sdom[c.ID].sibling { + addDFphis(x, h, c, f, defForUses, newphis, sdom) // TODO: convert to explicit stack from recursion. + } +} + +// findLastMems maps block ids to last memory-output op in a block, if any. +func findLastMems(f *Func) []*Value { + + var stores []*Value + lastMems := f.Cache.allocValueSlice(f.NumBlocks()) + defer f.Cache.freeValueSlice(lastMems) + storeUse := f.newSparseSet(f.NumValues()) + defer f.retSparseSet(storeUse) + for _, b := range f.Blocks { + // Find all the stores in this block. Categorize their uses: + // storeUse contains stores which are used by a subsequent store. + storeUse.clear() + stores = stores[:0] + var memPhi *Value + for _, v := range b.Values { + if v.Op == OpPhi { + if v.Type.IsMemory() { + memPhi = v + } + continue + } + if v.Type.IsMemory() { + stores = append(stores, v) + for _, a := range v.Args { + if a.Block == b && a.Type.IsMemory() { + storeUse.add(a.ID) + } + } + } + } + if len(stores) == 0 { + lastMems[b.ID] = memPhi + continue + } + + // find last store in the block + var last *Value + for _, v := range stores { + if storeUse.contains(v.ID) { + continue + } + if last != nil { + b.Fatalf("two final stores - simultaneous live stores %s %s", last, v) + } + last = v + } + if last == nil { + b.Fatalf("no last store found - cycle?") + } + + // If this is a tuple containing a mem, select just + // the mem. This will generate ops we don't need, but + // it's the easiest thing to do. + if last.Type.IsTuple() { + last = b.NewValue1(last.Pos, OpSelect1, types.TypeMem, last) + } else if last.Type.IsResults() { + last = b.NewValue1I(last.Pos, OpSelectN, types.TypeMem, int64(last.Type.NumFields()-1), last) + } + + lastMems[b.ID] = last + } + return lastMems +} + +// mark values +type markKind uint8 + +const ( + notFound markKind = iota // block has not been discovered yet + notExplored // discovered and in queue, outedges not processed yet + explored // discovered and in queue, outedges processed + done // all done, in output ordering +) + +type backedgesState struct { + b *Block + i int +} + +// backedges returns a slice of successor edges that are back +// edges. For reducible loops, edge.b is the header. +func backedges(f *Func) []Edge { + edges := []Edge{} + mark := make([]markKind, f.NumBlocks()) + stack := []backedgesState{} + + mark[f.Entry.ID] = notExplored + stack = append(stack, backedgesState{f.Entry, 0}) + + for len(stack) > 0 { + l := len(stack) + x := stack[l-1] + if x.i < len(x.b.Succs) { + e := x.b.Succs[x.i] + stack[l-1].i++ + s := e.b + if mark[s.ID] == notFound { + mark[s.ID] = notExplored + stack = append(stack, backedgesState{s, 0}) + } else if mark[s.ID] == notExplored { + edges = append(edges, e) + } + } else { + mark[x.b.ID] = done + stack = stack[0 : l-1] + } + } + return edges +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/looprotate.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/looprotate.go new file mode 100644 index 0000000000000000000000000000000000000000..844a8f712447c933336f743ba4f6482fef12a200 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/looprotate.go @@ -0,0 +1,113 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// loopRotate converts loops with a check-loop-condition-at-beginning +// to loops with a check-loop-condition-at-end. +// This helps loops avoid extra unnecessary jumps. +// +// loop: +// CMPQ ... +// JGE exit +// ... +// JMP loop +// exit: +// +// JMP entry +// loop: +// ... +// entry: +// CMPQ ... +// JLT loop +func loopRotate(f *Func) { + loopnest := f.loopnest() + if loopnest.hasIrreducible { + return + } + if len(loopnest.loops) == 0 { + return + } + + idToIdx := f.Cache.allocIntSlice(f.NumBlocks()) + defer f.Cache.freeIntSlice(idToIdx) + for i, b := range f.Blocks { + idToIdx[b.ID] = i + } + + // Set of blocks we're moving, by ID. + move := map[ID]struct{}{} + + // Map from block ID to the moving blocks that should + // come right after it. + after := map[ID][]*Block{} + + // Check each loop header and decide if we want to move it. + for _, loop := range loopnest.loops { + b := loop.header + var p *Block // b's in-loop predecessor + for _, e := range b.Preds { + if e.b.Kind != BlockPlain { + continue + } + if loopnest.b2l[e.b.ID] != loop { + continue + } + p = e.b + } + if p == nil || p == b { + continue + } + after[p.ID] = []*Block{b} + for { + nextIdx := idToIdx[b.ID] + 1 + if nextIdx >= len(f.Blocks) { // reached end of function (maybe impossible?) + break + } + nextb := f.Blocks[nextIdx] + if nextb == p { // original loop predecessor is next + break + } + if loopnest.b2l[nextb.ID] == loop { + after[p.ID] = append(after[p.ID], nextb) + } + b = nextb + } + // Swap b and p so that we'll handle p before b when moving blocks. + f.Blocks[idToIdx[loop.header.ID]] = p + f.Blocks[idToIdx[p.ID]] = loop.header + idToIdx[loop.header.ID], idToIdx[p.ID] = idToIdx[p.ID], idToIdx[loop.header.ID] + + // Place b after p. + for _, b := range after[p.ID] { + move[b.ID] = struct{}{} + } + } + + // Move blocks to their destinations in a single pass. + // We rely here on the fact that loop headers must come + // before the rest of the loop. And that relies on the + // fact that we only identify reducible loops. + j := 0 + // Some blocks that are not part of a loop may be placed + // between loop blocks. In order to avoid these blocks from + // being overwritten, use a temporary slice. + oldOrder := f.Cache.allocBlockSlice(len(f.Blocks)) + defer f.Cache.freeBlockSlice(oldOrder) + copy(oldOrder, f.Blocks) + for _, b := range oldOrder { + if _, ok := move[b.ID]; ok { + continue + } + f.Blocks[j] = b + j++ + for _, a := range after[b.ID] { + f.Blocks[j] = a + j++ + } + } + if j != len(oldOrder) { + f.Fatalf("bad reordering in looprotate") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/lower.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/lower.go new file mode 100644 index 0000000000000000000000000000000000000000..e4aac47cee116913163515c38e0c6e47b9832b3a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/lower.go @@ -0,0 +1,52 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// convert to machine-dependent ops. +func lower(f *Func) { + // repeat rewrites until we find no more rewrites + applyRewrite(f, f.Config.lowerBlock, f.Config.lowerValue, removeDeadValues) +} + +// lateLower applies those rules that need to be run after the general lower rules. +func lateLower(f *Func) { + // repeat rewrites until we find no more rewrites + if f.Config.lateLowerValue != nil { + applyRewrite(f, f.Config.lateLowerBlock, f.Config.lateLowerValue, removeDeadValues) + } +} + +// checkLower checks for unlowered opcodes and fails if we find one. +func checkLower(f *Func) { + // Needs to be a separate phase because it must run after both + // lowering and a subsequent dead code elimination (because lowering + // rules may leave dead generic ops behind). + for _, b := range f.Blocks { + for _, v := range b.Values { + if !opcodeTable[v.Op].generic { + continue // lowered + } + switch v.Op { + case OpSP, OpSPanchored, OpSB, OpInitMem, OpArg, OpArgIntReg, OpArgFloatReg, OpPhi, OpVarDef, OpVarLive, OpKeepAlive, OpSelect0, OpSelect1, OpSelectN, OpConvert, OpInlMark, OpWBend: + continue // ok not to lower + case OpMakeResult: + if b.Controls[0] == v { + continue + } + case OpGetG: + if f.Config.hasGReg { + // has hardware g register, regalloc takes care of it + continue // ok not to lower + } + } + s := "not lowered: " + v.String() + ", " + v.Op.String() + " " + v.Type.SimpleString() + + for _, a := range v.Args { + s += " " + a.Type.SimpleString() + } + f.Fatalf("%s", s) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/magic.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/magic.go new file mode 100644 index 0000000000000000000000000000000000000000..235b0e5e5c6aa302a8b7da225c17f8c827c6bd38 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/magic.go @@ -0,0 +1,426 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "math/big" + "math/bits" +) + +// So you want to compute x / c for some constant c? +// Machine division instructions are slow, so we try to +// compute this division with a multiplication + a few +// other cheap instructions instead. +// (We assume here that c != 0, +/- 1, or +/- 2^i. Those +// cases are easy to handle in different ways). + +// Technique from https://gmplib.org/~tege/divcnst-pldi94.pdf + +// First consider unsigned division. +// Our strategy is to precompute 1/c then do +// ⎣x / c⎦ = ⎣x * (1/c)⎦. +// 1/c is less than 1, so we can't compute it directly in +// integer arithmetic. Let's instead compute 2^e/c +// for a value of e TBD (^ = exponentiation). Then +// ⎣x / c⎦ = ⎣x * (2^e/c) / 2^e⎦. +// Dividing by 2^e is easy. 2^e/c isn't an integer, unfortunately. +// So we must approximate it. Let's call its approximation m. +// We'll then compute +// ⎣x * m / 2^e⎦ +// Which we want to be equal to ⎣x / c⎦ for 0 <= x < 2^n-1 +// where n is the word size. +// Setting x = c gives us c * m >= 2^e. +// We'll chose m = ⎡2^e/c⎤ to satisfy that equation. +// What remains is to choose e. +// Let m = 2^e/c + delta, 0 <= delta < 1 +// ⎣x * (2^e/c + delta) / 2^e⎦ +// ⎣x / c + x * delta / 2^e⎦ +// We must have x * delta / 2^e < 1/c so that this +// additional term never rounds differently than ⎣x / c⎦ does. +// Rearranging, +// 2^e > x * delta * c +// x can be at most 2^n-1 and delta can be at most 1. +// So it is sufficient to have 2^e >= 2^n*c. +// So we'll choose e = n + s, with s = ⎡log2(c)⎤. +// +// An additional complication arises because m has n+1 bits in it. +// Hardware restricts us to n bit by n bit multiplies. +// We divide into 3 cases: +// +// Case 1: m is even. +// ⎣x / c⎦ = ⎣x * m / 2^(n+s)⎦ +// ⎣x / c⎦ = ⎣x * (m/2) / 2^(n+s-1)⎦ +// ⎣x / c⎦ = ⎣x * (m/2) / 2^n / 2^(s-1)⎦ +// ⎣x / c⎦ = ⎣⎣x * (m/2) / 2^n⎦ / 2^(s-1)⎦ +// multiply + shift +// +// Case 2: c is even. +// ⎣x / c⎦ = ⎣(x/2) / (c/2)⎦ +// ⎣x / c⎦ = ⎣⎣x/2⎦ / (c/2)⎦ +// This is just the original problem, with x' = ⎣x/2⎦, c' = c/2, n' = n-1. +// s' = s-1 +// m' = ⎡2^(n'+s')/c'⎤ +// = ⎡2^(n+s-1)/c⎤ +// = ⎡m/2⎤ +// ⎣x / c⎦ = ⎣x' * m' / 2^(n'+s')⎦ +// ⎣x / c⎦ = ⎣⎣x/2⎦ * ⎡m/2⎤ / 2^(n+s-2)⎦ +// ⎣x / c⎦ = ⎣⎣⎣x/2⎦ * ⎡m/2⎤ / 2^n⎦ / 2^(s-2)⎦ +// shift + multiply + shift +// +// Case 3: everything else +// let k = m - 2^n. k fits in n bits. +// ⎣x / c⎦ = ⎣x * m / 2^(n+s)⎦ +// ⎣x / c⎦ = ⎣x * (2^n + k) / 2^(n+s)⎦ +// ⎣x / c⎦ = ⎣(x + x * k / 2^n) / 2^s⎦ +// ⎣x / c⎦ = ⎣(x + ⎣x * k / 2^n⎦) / 2^s⎦ +// ⎣x / c⎦ = ⎣(x + ⎣x * k / 2^n⎦) / 2^s⎦ +// ⎣x / c⎦ = ⎣⎣(x + ⎣x * k / 2^n⎦) / 2⎦ / 2^(s-1)⎦ +// multiply + avg + shift +// +// These can be implemented in hardware using: +// ⎣a * b / 2^n⎦ - aka high n bits of an n-bit by n-bit multiply. +// ⎣(a+b) / 2⎦ - aka "average" of two n-bit numbers. +// (Not just a regular add & shift because the intermediate result +// a+b has n+1 bits in it. Nevertheless, can be done +// in 2 instructions on x86.) + +// umagicOK reports whether we should strength reduce a n-bit divide by c. +func umagicOK(n uint, c int64) bool { + // Convert from ConstX auxint values to the real uint64 constant they represent. + d := uint64(c) << (64 - n) >> (64 - n) + + // Doesn't work for 0. + // Don't use for powers of 2. + return d&(d-1) != 0 +} + +// umagicOKn reports whether we should strength reduce an unsigned n-bit divide by c. +// We can strength reduce when c != 0 and c is not a power of two. +func umagicOK8(c int8) bool { return c&(c-1) != 0 } +func umagicOK16(c int16) bool { return c&(c-1) != 0 } +func umagicOK32(c int32) bool { return c&(c-1) != 0 } +func umagicOK64(c int64) bool { return c&(c-1) != 0 } + +type umagicData struct { + s int64 // ⎡log2(c)⎤ + m uint64 // ⎡2^(n+s)/c⎤ - 2^n +} + +// umagic computes the constants needed to strength reduce unsigned n-bit divides by the constant uint64(c). +// The return values satisfy for all 0 <= x < 2^n +// +// floor(x / uint64(c)) = x * (m + 2^n) >> (n+s) +func umagic(n uint, c int64) umagicData { + // Convert from ConstX auxint values to the real uint64 constant they represent. + d := uint64(c) << (64 - n) >> (64 - n) + + C := new(big.Int).SetUint64(d) + s := C.BitLen() + M := big.NewInt(1) + M.Lsh(M, n+uint(s)) // 2^(n+s) + M.Add(M, C) // 2^(n+s)+c + M.Sub(M, big.NewInt(1)) // 2^(n+s)+c-1 + M.Div(M, C) // ⎡2^(n+s)/c⎤ + if M.Bit(int(n)) != 1 { + panic("n+1st bit isn't set") + } + M.SetBit(M, int(n), 0) + m := M.Uint64() + return umagicData{s: int64(s), m: m} +} + +func umagic8(c int8) umagicData { return umagic(8, int64(c)) } +func umagic16(c int16) umagicData { return umagic(16, int64(c)) } +func umagic32(c int32) umagicData { return umagic(32, int64(c)) } +func umagic64(c int64) umagicData { return umagic(64, c) } + +// For signed division, we use a similar strategy. +// First, we enforce a positive c. +// x / c = -(x / (-c)) +// This will require an additional Neg op for c<0. +// +// If x is positive we're in a very similar state +// to the unsigned case above. We define: +// s = ⎡log2(c)⎤-1 +// m = ⎡2^(n+s)/c⎤ +// Then +// ⎣x / c⎦ = ⎣x * m / 2^(n+s)⎦ +// If x is negative we have +// ⎡x / c⎤ = ⎣x * m / 2^(n+s)⎦ + 1 +// (TODO: derivation?) +// +// The multiply is a bit odd, as it is a signed n-bit value +// times an unsigned n-bit value. For n smaller than the +// word size, we can extend x and m appropriately and use the +// signed multiply instruction. For n == word size, +// we must use the signed multiply high and correct +// the result by adding x*2^n. +// +// Adding 1 if x<0 is done by subtracting x>>(n-1). + +func smagicOK(n uint, c int64) bool { + if c < 0 { + // Doesn't work for negative c. + return false + } + // Doesn't work for 0. + // Don't use it for powers of 2. + return c&(c-1) != 0 +} + +// smagicOKn reports whether we should strength reduce a signed n-bit divide by c. +func smagicOK8(c int8) bool { return smagicOK(8, int64(c)) } +func smagicOK16(c int16) bool { return smagicOK(16, int64(c)) } +func smagicOK32(c int32) bool { return smagicOK(32, int64(c)) } +func smagicOK64(c int64) bool { return smagicOK(64, c) } + +type smagicData struct { + s int64 // ⎡log2(c)⎤-1 + m uint64 // ⎡2^(n+s)/c⎤ +} + +// smagic computes the constants needed to strength reduce signed n-bit divides by the constant c. +// Must have c>0. +// The return values satisfy for all -2^(n-1) <= x < 2^(n-1) +// +// trunc(x / c) = x * m >> (n+s) + (x < 0 ? 1 : 0) +func smagic(n uint, c int64) smagicData { + C := new(big.Int).SetInt64(c) + s := C.BitLen() - 1 + M := big.NewInt(1) + M.Lsh(M, n+uint(s)) // 2^(n+s) + M.Add(M, C) // 2^(n+s)+c + M.Sub(M, big.NewInt(1)) // 2^(n+s)+c-1 + M.Div(M, C) // ⎡2^(n+s)/c⎤ + if M.Bit(int(n)) != 0 { + panic("n+1st bit is set") + } + if M.Bit(int(n-1)) == 0 { + panic("nth bit is not set") + } + m := M.Uint64() + return smagicData{s: int64(s), m: m} +} + +func smagic8(c int8) smagicData { return smagic(8, int64(c)) } +func smagic16(c int16) smagicData { return smagic(16, int64(c)) } +func smagic32(c int32) smagicData { return smagic(32, int64(c)) } +func smagic64(c int64) smagicData { return smagic(64, c) } + +// Divisibility x%c == 0 can be checked more efficiently than directly computing +// the modulus x%c and comparing against 0. +// +// The same "Division by invariant integers using multiplication" paper +// by Granlund and Montgomery referenced above briefly mentions this method +// and it is further elaborated in "Hacker's Delight" by Warren Section 10-17 +// +// The first thing to note is that for odd integers, exact division can be computed +// by using the modular inverse with respect to the word size 2^n. +// +// Given c, compute m such that (c * m) mod 2^n == 1 +// Then if c divides x (x%c ==0), the quotient is given by q = x/c == x*m mod 2^n +// +// x can range from 0, c, 2c, 3c, ... ⎣(2^n - 1)/c⎦ * c the maximum multiple +// Thus, x*m mod 2^n is 0, 1, 2, 3, ... ⎣(2^n - 1)/c⎦ +// i.e. the quotient takes all values from zero up to max = ⎣(2^n - 1)/c⎦ +// +// If x is not divisible by c, then x*m mod 2^n must take some larger value than max. +// +// This gives x*m mod 2^n <= ⎣(2^n - 1)/c⎦ as a test for divisibility +// involving one multiplication and compare. +// +// To extend this to even integers, consider c = d0 * 2^k where d0 is odd. +// We can test whether x is divisible by both d0 and 2^k. +// For d0, the test is the same as above. Let m be such that m*d0 mod 2^n == 1 +// Then x*m mod 2^n <= ⎣(2^n - 1)/d0⎦ is the first test. +// The test for divisibility by 2^k is a check for k trailing zeroes. +// Note that since d0 is odd, m is odd and thus x*m will have the same number of +// trailing zeroes as x. So the two tests are, +// +// x*m mod 2^n <= ⎣(2^n - 1)/d0⎦ +// and x*m ends in k zero bits +// +// These can be combined into a single comparison by the following +// (theorem ZRU in Hacker's Delight) for unsigned integers. +// +// x <= a and x ends in k zero bits if and only if RotRight(x ,k) <= ⎣a/(2^k)⎦ +// Where RotRight(x ,k) is right rotation of x by k bits. +// +// To prove the first direction, x <= a -> ⎣x/(2^k)⎦ <= ⎣a/(2^k)⎦ +// But since x ends in k zeroes all the rotated bits would be zero too. +// So RotRight(x, k) == ⎣x/(2^k)⎦ <= ⎣a/(2^k)⎦ +// +// If x does not end in k zero bits, then RotRight(x, k) +// has some non-zero bits in the k highest bits. +// ⎣x/(2^k)⎦ has all zeroes in the k highest bits, +// so RotRight(x, k) > ⎣x/(2^k)⎦ +// +// Finally, if x > a and has k trailing zero bits, then RotRight(x, k) == ⎣x/(2^k)⎦ +// and ⎣x/(2^k)⎦ must be greater than ⎣a/(2^k)⎦, that is the top n-k bits of x must +// be greater than the top n-k bits of a because the rest of x bits are zero. +// +// So the two conditions about can be replaced with the single test +// +// RotRight(x*m mod 2^n, k) <= ⎣(2^n - 1)/c⎦ +// +// Where d0*2^k was replaced by c on the right hand side. + +// udivisibleOK reports whether we should strength reduce an unsigned n-bit divisibilty check by c. +func udivisibleOK(n uint, c int64) bool { + // Convert from ConstX auxint values to the real uint64 constant they represent. + d := uint64(c) << (64 - n) >> (64 - n) + + // Doesn't work for 0. + // Don't use for powers of 2. + return d&(d-1) != 0 +} + +func udivisibleOK8(c int8) bool { return udivisibleOK(8, int64(c)) } +func udivisibleOK16(c int16) bool { return udivisibleOK(16, int64(c)) } +func udivisibleOK32(c int32) bool { return udivisibleOK(32, int64(c)) } +func udivisibleOK64(c int64) bool { return udivisibleOK(64, c) } + +type udivisibleData struct { + k int64 // trailingZeros(c) + m uint64 // m * (c>>k) mod 2^n == 1 multiplicative inverse of odd portion modulo 2^n + max uint64 // ⎣(2^n - 1)/ c⎦ max value to for divisibility +} + +func udivisible(n uint, c int64) udivisibleData { + // Convert from ConstX auxint values to the real uint64 constant they represent. + d := uint64(c) << (64 - n) >> (64 - n) + + k := bits.TrailingZeros64(d) + d0 := d >> uint(k) // the odd portion of the divisor + + mask := ^uint64(0) >> (64 - n) + + // Calculate the multiplicative inverse via Newton's method. + // Quadratic convergence doubles the number of correct bits per iteration. + m := d0 // initial guess correct to 3-bits d0*d0 mod 8 == 1 + m = m * (2 - m*d0) // 6-bits + m = m * (2 - m*d0) // 12-bits + m = m * (2 - m*d0) // 24-bits + m = m * (2 - m*d0) // 48-bits + m = m * (2 - m*d0) // 96-bits >= 64-bits + m = m & mask + + max := mask / d + + return udivisibleData{ + k: int64(k), + m: m, + max: max, + } +} + +func udivisible8(c int8) udivisibleData { return udivisible(8, int64(c)) } +func udivisible16(c int16) udivisibleData { return udivisible(16, int64(c)) } +func udivisible32(c int32) udivisibleData { return udivisible(32, int64(c)) } +func udivisible64(c int64) udivisibleData { return udivisible(64, c) } + +// For signed integers, a similar method follows. +// +// Given c > 1 and odd, compute m such that (c * m) mod 2^n == 1 +// Then if c divides x (x%c ==0), the quotient is given by q = x/c == x*m mod 2^n +// +// x can range from ⎡-2^(n-1)/c⎤ * c, ... -c, 0, c, ... ⎣(2^(n-1) - 1)/c⎦ * c +// Thus, x*m mod 2^n is ⎡-2^(n-1)/c⎤, ... -2, -1, 0, 1, 2, ... ⎣(2^(n-1) - 1)/c⎦ +// +// So, x is a multiple of c if and only if: +// ⎡-2^(n-1)/c⎤ <= x*m mod 2^n <= ⎣(2^(n-1) - 1)/c⎦ +// +// Since c > 1 and odd, this can be simplified by +// ⎡-2^(n-1)/c⎤ == ⎡(-2^(n-1) + 1)/c⎤ == -⎣(2^(n-1) - 1)/c⎦ +// +// -⎣(2^(n-1) - 1)/c⎦ <= x*m mod 2^n <= ⎣(2^(n-1) - 1)/c⎦ +// +// To extend this to even integers, consider c = d0 * 2^k where d0 is odd. +// We can test whether x is divisible by both d0 and 2^k. +// +// Let m be such that (d0 * m) mod 2^n == 1. +// Let q = x*m mod 2^n. Then c divides x if: +// +// -⎣(2^(n-1) - 1)/d0⎦ <= q <= ⎣(2^(n-1) - 1)/d0⎦ and q ends in at least k 0-bits +// +// To transform this to a single comparison, we use the following theorem (ZRS in Hacker's Delight). +// +// For a >= 0 the following conditions are equivalent: +// 1) -a <= x <= a and x ends in at least k 0-bits +// 2) RotRight(x+a', k) <= ⎣2a'/2^k⎦ +// +// Where a' = a & -2^k (a with its right k bits set to zero) +// +// To see that 1 & 2 are equivalent, note that -a <= x <= a is equivalent to +// -a' <= x <= a' if and only if x ends in at least k 0-bits. Adding -a' to each side gives, +// 0 <= x + a' <= 2a' and x + a' ends in at least k 0-bits if and only if x does since a' has +// k 0-bits by definition. We can use theorem ZRU above with x -> x + a' and a -> 2a' giving 1) == 2). +// +// Let m be such that (d0 * m) mod 2^n == 1. +// Let q = x*m mod 2^n. +// Let a' = ⎣(2^(n-1) - 1)/d0⎦ & -2^k +// +// Then the divisibility test is: +// +// RotRight(q+a', k) <= ⎣2a'/2^k⎦ +// +// Note that the calculation is performed using unsigned integers. +// Since a' can have n-1 bits, 2a' may have n bits and there is no risk of overflow. + +// sdivisibleOK reports whether we should strength reduce a signed n-bit divisibilty check by c. +func sdivisibleOK(n uint, c int64) bool { + if c < 0 { + // Doesn't work for negative c. + return false + } + // Doesn't work for 0. + // Don't use it for powers of 2. + return c&(c-1) != 0 +} + +func sdivisibleOK8(c int8) bool { return sdivisibleOK(8, int64(c)) } +func sdivisibleOK16(c int16) bool { return sdivisibleOK(16, int64(c)) } +func sdivisibleOK32(c int32) bool { return sdivisibleOK(32, int64(c)) } +func sdivisibleOK64(c int64) bool { return sdivisibleOK(64, c) } + +type sdivisibleData struct { + k int64 // trailingZeros(c) + m uint64 // m * (c>>k) mod 2^n == 1 multiplicative inverse of odd portion modulo 2^n + a uint64 // ⎣(2^(n-1) - 1)/ (c>>k)⎦ & -(1<> uint(k) // the odd portion of the divisor + + mask := ^uint64(0) >> (64 - n) + + // Calculate the multiplicative inverse via Newton's method. + // Quadratic convergence doubles the number of correct bits per iteration. + m := d0 // initial guess correct to 3-bits d0*d0 mod 8 == 1 + m = m * (2 - m*d0) // 6-bits + m = m * (2 - m*d0) // 12-bits + m = m * (2 - m*d0) // 24-bits + m = m * (2 - m*d0) // 48-bits + m = m * (2 - m*d0) // 96-bits >= 64-bits + m = m & mask + + a := ((mask >> 1) / d0) & -(1 << uint(k)) + max := (2 * a) >> uint(k) + + return sdivisibleData{ + k: int64(k), + m: m, + a: a, + max: max, + } +} + +func sdivisible8(c int8) sdivisibleData { return sdivisible(8, int64(c)) } +func sdivisible16(c int16) sdivisibleData { return sdivisible(16, int64(c)) } +func sdivisible32(c int32) sdivisibleData { return sdivisible(32, int64(c)) } +func sdivisible64(c int64) sdivisibleData { return sdivisible(64, c) } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/magic_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/magic_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7c6009dea6c83b9e0daeedf30b13a89b36d84844 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/magic_test.go @@ -0,0 +1,410 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "math/big" + "testing" +) + +func TestMagicExhaustive8(t *testing.T) { + testMagicExhaustive(t, 8) +} +func TestMagicExhaustive8U(t *testing.T) { + testMagicExhaustiveU(t, 8) +} +func TestMagicExhaustive16(t *testing.T) { + if testing.Short() { + t.Skip("slow test; skipping") + } + testMagicExhaustive(t, 16) +} +func TestMagicExhaustive16U(t *testing.T) { + if testing.Short() { + t.Skip("slow test; skipping") + } + testMagicExhaustiveU(t, 16) +} + +// exhaustive test of magic for n bits +func testMagicExhaustive(t *testing.T, n uint) { + min := -int64(1) << (n - 1) + max := int64(1) << (n - 1) + for c := int64(1); c < max; c++ { + if !smagicOK(n, int64(c)) { + continue + } + m := int64(smagic(n, c).m) + s := smagic(n, c).s + for i := min; i < max; i++ { + want := i / c + got := (i * m) >> (n + uint(s)) + if i < 0 { + got++ + } + if want != got { + t.Errorf("signed magic wrong for %d / %d: got %d, want %d (m=%d,s=%d)\n", i, c, got, want, m, s) + } + } + } +} +func testMagicExhaustiveU(t *testing.T, n uint) { + max := uint64(1) << n + for c := uint64(1); c < max; c++ { + if !umagicOK(n, int64(c)) { + continue + } + m := umagic(n, int64(c)).m + s := umagic(n, int64(c)).s + for i := uint64(0); i < max; i++ { + want := i / c + got := (i * (max + m)) >> (n + uint(s)) + if want != got { + t.Errorf("unsigned magic wrong for %d / %d: got %d, want %d (m=%d,s=%d)\n", i, c, got, want, m, s) + } + } + } +} + +func TestMagicUnsigned(t *testing.T) { + One := new(big.Int).SetUint64(1) + for _, n := range [...]uint{8, 16, 32, 64} { + TwoN := new(big.Int).Lsh(One, n) + Max := new(big.Int).Sub(TwoN, One) + for _, c := range [...]uint64{ + 3, + 5, + 6, + 7, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 17, + 1<<8 - 1, + 1<<8 + 1, + 1<<16 - 1, + 1<<16 + 1, + 1<<32 - 1, + 1<<32 + 1, + 1<<64 - 1, + } { + if c>>n != 0 { + continue // not appropriate for the given n. + } + if !umagicOK(n, int64(c)) { + t.Errorf("expected n=%d c=%d to pass\n", n, c) + } + m := umagic(n, int64(c)).m + s := umagic(n, int64(c)).s + + C := new(big.Int).SetUint64(c) + M := new(big.Int).SetUint64(m) + M.Add(M, TwoN) + + // Find largest multiple of c. + Mul := new(big.Int).Div(Max, C) + Mul.Mul(Mul, C) + mul := Mul.Uint64() + + // Try some input values, mostly around multiples of c. + for _, x := range [...]uint64{0, 1, + c - 1, c, c + 1, + 2*c - 1, 2 * c, 2*c + 1, + mul - 1, mul, mul + 1, + uint64(1)< 0 { + continue + } + Want := new(big.Int).Quo(X, C) + Got := new(big.Int).Mul(X, M) + Got.Rsh(Got, n+uint(s)) + if Want.Cmp(Got) != 0 { + t.Errorf("umagic for %d/%d n=%d doesn't work, got=%s, want %s\n", x, c, n, Got, Want) + } + } + } + } +} + +func TestMagicSigned(t *testing.T) { + One := new(big.Int).SetInt64(1) + for _, n := range [...]uint{8, 16, 32, 64} { + TwoNMinusOne := new(big.Int).Lsh(One, n-1) + Max := new(big.Int).Sub(TwoNMinusOne, One) + Min := new(big.Int).Neg(TwoNMinusOne) + for _, c := range [...]int64{ + 3, + 5, + 6, + 7, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 17, + 1<<7 - 1, + 1<<7 + 1, + 1<<15 - 1, + 1<<15 + 1, + 1<<31 - 1, + 1<<31 + 1, + 1<<63 - 1, + } { + if c>>(n-1) != 0 { + continue // not appropriate for the given n. + } + if !smagicOK(n, int64(c)) { + t.Errorf("expected n=%d c=%d to pass\n", n, c) + } + m := smagic(n, int64(c)).m + s := smagic(n, int64(c)).s + + C := new(big.Int).SetInt64(c) + M := new(big.Int).SetUint64(m) + + // Find largest multiple of c. + Mul := new(big.Int).Div(Max, C) + Mul.Mul(Mul, C) + mul := Mul.Int64() + + // Try some input values, mostly around multiples of c. + for _, x := range [...]int64{ + -1, 1, + -c - 1, -c, -c + 1, c - 1, c, c + 1, + -2*c - 1, -2 * c, -2*c + 1, 2*c - 1, 2 * c, 2*c + 1, + -mul - 1, -mul, -mul + 1, mul - 1, mul, mul + 1, + int64(1)<<(n-1) - 1, -int64(1) << (n - 1), + } { + X := new(big.Int).SetInt64(x) + if X.Cmp(Min) < 0 || X.Cmp(Max) > 0 { + continue + } + Want := new(big.Int).Quo(X, C) + Got := new(big.Int).Mul(X, M) + Got.Rsh(Got, n+uint(s)) + if x < 0 { + Got.Add(Got, One) + } + if Want.Cmp(Got) != 0 { + t.Errorf("smagic for %d/%d n=%d doesn't work, got=%s, want %s\n", x, c, n, Got, Want) + } + } + } + } +} + +func testDivisibleExhaustiveU(t *testing.T, n uint) { + maxU := uint64(1) << n + for c := uint64(1); c < maxU; c++ { + if !udivisibleOK(n, int64(c)) { + continue + } + k := udivisible(n, int64(c)).k + m := udivisible(n, int64(c)).m + max := udivisible(n, int64(c)).max + mask := ^uint64(0) >> (64 - n) + for i := uint64(0); i < maxU; i++ { + want := i%c == 0 + mul := (i * m) & mask + rot := (mul>>uint(k) | mul<<(n-uint(k))) & mask + got := rot <= max + if want != got { + t.Errorf("unsigned divisible wrong for %d %% %d == 0: got %v, want %v (k=%d,m=%d,max=%d)\n", i, c, got, want, k, m, max) + } + } + } +} + +func TestDivisibleExhaustive8U(t *testing.T) { + testDivisibleExhaustiveU(t, 8) +} + +func TestDivisibleExhaustive16U(t *testing.T) { + if testing.Short() { + t.Skip("slow test; skipping") + } + testDivisibleExhaustiveU(t, 16) +} + +func TestDivisibleUnsigned(t *testing.T) { + One := new(big.Int).SetUint64(1) + for _, n := range [...]uint{8, 16, 32, 64} { + TwoN := new(big.Int).Lsh(One, n) + Max := new(big.Int).Sub(TwoN, One) + for _, c := range [...]uint64{ + 3, + 5, + 6, + 7, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 17, + 1<<8 - 1, + 1<<8 + 1, + 1<<16 - 1, + 1<<16 + 1, + 1<<32 - 1, + 1<<32 + 1, + 1<<64 - 1, + } { + if c>>n != 0 { + continue // c too large for the given n. + } + if !udivisibleOK(n, int64(c)) { + t.Errorf("expected n=%d c=%d to pass\n", n, c) + } + k := udivisible(n, int64(c)).k + m := udivisible(n, int64(c)).m + max := udivisible(n, int64(c)).max + mask := ^uint64(0) >> (64 - n) + + C := new(big.Int).SetUint64(c) + + // Find largest multiple of c. + Mul := new(big.Int).Div(Max, C) + Mul.Mul(Mul, C) + mul := Mul.Uint64() + + // Try some input values, mostly around multiples of c. + for _, x := range [...]uint64{0, 1, + c - 1, c, c + 1, + 2*c - 1, 2 * c, 2*c + 1, + mul - 1, mul, mul + 1, + uint64(1)< 0 { + continue + } + want := x%c == 0 + mul := (x * m) & mask + rot := (mul>>uint(k) | mul<<(n-uint(k))) & mask + got := rot <= max + if want != got { + t.Errorf("unsigned divisible wrong for %d %% %d == 0: got %v, want %v (k=%d,m=%d,max=%d)\n", x, c, got, want, k, m, max) + } + } + } + } +} + +func testDivisibleExhaustive(t *testing.T, n uint) { + minI := -int64(1) << (n - 1) + maxI := int64(1) << (n - 1) + for c := int64(1); c < maxI; c++ { + if !sdivisibleOK(n, int64(c)) { + continue + } + k := sdivisible(n, int64(c)).k + m := sdivisible(n, int64(c)).m + a := sdivisible(n, int64(c)).a + max := sdivisible(n, int64(c)).max + mask := ^uint64(0) >> (64 - n) + for i := minI; i < maxI; i++ { + want := i%c == 0 + mul := (uint64(i)*m + a) & mask + rot := (mul>>uint(k) | mul<<(n-uint(k))) & mask + got := rot <= max + if want != got { + t.Errorf("signed divisible wrong for %d %% %d == 0: got %v, want %v (k=%d,m=%d,a=%d,max=%d)\n", i, c, got, want, k, m, a, max) + } + } + } +} + +func TestDivisibleExhaustive8(t *testing.T) { + testDivisibleExhaustive(t, 8) +} + +func TestDivisibleExhaustive16(t *testing.T) { + if testing.Short() { + t.Skip("slow test; skipping") + } + testDivisibleExhaustive(t, 16) +} + +func TestDivisibleSigned(t *testing.T) { + One := new(big.Int).SetInt64(1) + for _, n := range [...]uint{8, 16, 32, 64} { + TwoNMinusOne := new(big.Int).Lsh(One, n-1) + Max := new(big.Int).Sub(TwoNMinusOne, One) + Min := new(big.Int).Neg(TwoNMinusOne) + for _, c := range [...]int64{ + 3, + 5, + 6, + 7, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 17, + 1<<7 - 1, + 1<<7 + 1, + 1<<15 - 1, + 1<<15 + 1, + 1<<31 - 1, + 1<<31 + 1, + 1<<63 - 1, + } { + if c>>(n-1) != 0 { + continue // not appropriate for the given n. + } + if !sdivisibleOK(n, int64(c)) { + t.Errorf("expected n=%d c=%d to pass\n", n, c) + } + k := sdivisible(n, int64(c)).k + m := sdivisible(n, int64(c)).m + a := sdivisible(n, int64(c)).a + max := sdivisible(n, int64(c)).max + mask := ^uint64(0) >> (64 - n) + + C := new(big.Int).SetInt64(c) + + // Find largest multiple of c. + Mul := new(big.Int).Div(Max, C) + Mul.Mul(Mul, C) + mul := Mul.Int64() + + // Try some input values, mostly around multiples of c. + for _, x := range [...]int64{ + -1, 1, + -c - 1, -c, -c + 1, c - 1, c, c + 1, + -2*c - 1, -2 * c, -2*c + 1, 2*c - 1, 2 * c, 2*c + 1, + -mul - 1, -mul, -mul + 1, mul - 1, mul, mul + 1, + int64(1)<<(n-1) - 1, -int64(1) << (n - 1), + } { + X := new(big.Int).SetInt64(x) + if X.Cmp(Min) < 0 || X.Cmp(Max) > 0 { + continue + } + want := x%c == 0 + mul := (uint64(x)*m + a) & mask + rot := (mul>>uint(k) | mul<<(n-uint(k))) & mask + got := rot <= max + if want != got { + t.Errorf("signed divisible wrong for %d %% %d == 0: got %v, want %v (k=%d,m=%d,a=%d,max=%d)\n", x, c, got, want, k, m, a, max) + } + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/memcombine.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/memcombine.go new file mode 100644 index 0000000000000000000000000000000000000000..b1a47510be2513d1901a2bd17250ded5e68865e7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/memcombine.go @@ -0,0 +1,806 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/types" + "cmd/internal/src" + "sort" +) + +// memcombine combines smaller loads and stores into larger ones. +// We ensure this generates good code for encoding/binary operations. +// It may help other cases also. +func memcombine(f *Func) { + // This optimization requires that the architecture has + // unaligned loads and unaligned stores. + if !f.Config.unalignedOK { + return + } + + memcombineLoads(f) + memcombineStores(f) +} + +func memcombineLoads(f *Func) { + // Find "OR trees" to start with. + mark := f.newSparseSet(f.NumValues()) + defer f.retSparseSet(mark) + var order []*Value + + // Mark all values that are the argument of an OR. + for _, b := range f.Blocks { + for _, v := range b.Values { + if v.Op == OpOr16 || v.Op == OpOr32 || v.Op == OpOr64 { + mark.add(v.Args[0].ID) + mark.add(v.Args[1].ID) + } + } + } + for _, b := range f.Blocks { + order = order[:0] + for _, v := range b.Values { + if v.Op != OpOr16 && v.Op != OpOr32 && v.Op != OpOr64 { + continue + } + if mark.contains(v.ID) { + // marked - means it is not the root of an OR tree + continue + } + // Add the OR tree rooted at v to the order. + // We use BFS here, but any walk that puts roots before leaves would work. + i := len(order) + order = append(order, v) + for ; i < len(order); i++ { + x := order[i] + for j := 0; j < 2; j++ { + a := x.Args[j] + if a.Op == OpOr16 || a.Op == OpOr32 || a.Op == OpOr64 { + order = append(order, a) + } + } + } + } + for _, v := range order { + max := f.Config.RegSize + switch v.Op { + case OpOr64: + case OpOr32: + max = 4 + case OpOr16: + max = 2 + default: + continue + } + for n := max; n > 1; n /= 2 { + if combineLoads(v, n) { + break + } + } + } + } +} + +// A BaseAddress represents the address ptr+idx, where +// ptr is a pointer type and idx is an integer type. +// idx may be nil, in which case it is treated as 0. +type BaseAddress struct { + ptr *Value + idx *Value +} + +// splitPtr returns the base address of ptr and any +// constant offset from that base. +// BaseAddress{ptr,nil},0 is always a valid result, but splitPtr +// tries to peel away as many constants into off as possible. +func splitPtr(ptr *Value) (BaseAddress, int64) { + var idx *Value + var off int64 + for { + if ptr.Op == OpOffPtr { + off += ptr.AuxInt + ptr = ptr.Args[0] + } else if ptr.Op == OpAddPtr { + if idx != nil { + // We have two or more indexing values. + // Pick the first one we found. + return BaseAddress{ptr: ptr, idx: idx}, off + } + idx = ptr.Args[1] + if idx.Op == OpAdd32 || idx.Op == OpAdd64 { + if idx.Args[0].Op == OpConst32 || idx.Args[0].Op == OpConst64 { + off += idx.Args[0].AuxInt + idx = idx.Args[1] + } else if idx.Args[1].Op == OpConst32 || idx.Args[1].Op == OpConst64 { + off += idx.Args[1].AuxInt + idx = idx.Args[0] + } + } + ptr = ptr.Args[0] + } else { + return BaseAddress{ptr: ptr, idx: idx}, off + } + } +} + +func combineLoads(root *Value, n int64) bool { + orOp := root.Op + var shiftOp Op + switch orOp { + case OpOr64: + shiftOp = OpLsh64x64 + case OpOr32: + shiftOp = OpLsh32x64 + case OpOr16: + shiftOp = OpLsh16x64 + default: + return false + } + + // Find n values that are ORed together with the above op. + a := make([]*Value, 0, 8) + a = append(a, root) + for i := 0; i < len(a) && int64(len(a)) < n; i++ { + v := a[i] + if v.Uses != 1 && v != root { + // Something in this subtree is used somewhere else. + return false + } + if v.Op == orOp { + a[i] = v.Args[0] + a = append(a, v.Args[1]) + i-- + } + } + if int64(len(a)) != n { + return false + } + + // Check that the first entry to see what ops we're looking for. + // All the entries should be of the form shift(extend(load)), maybe with no shift. + v := a[0] + if v.Op == shiftOp { + v = v.Args[0] + } + var extOp Op + if orOp == OpOr64 && (v.Op == OpZeroExt8to64 || v.Op == OpZeroExt16to64 || v.Op == OpZeroExt32to64) || + orOp == OpOr32 && (v.Op == OpZeroExt8to32 || v.Op == OpZeroExt16to32) || + orOp == OpOr16 && v.Op == OpZeroExt8to16 { + extOp = v.Op + v = v.Args[0] + } else { + return false + } + if v.Op != OpLoad { + return false + } + base, _ := splitPtr(v.Args[0]) + mem := v.Args[1] + size := v.Type.Size() + + if root.Block.Func.Config.arch == "S390X" { + // s390x can't handle unaligned accesses to global variables. + if base.ptr.Op == OpAddr { + return false + } + } + + // Check all the entries, extract useful info. + type LoadRecord struct { + load *Value + offset int64 // offset of load address from base + shift int64 + } + r := make([]LoadRecord, n, 8) + for i := int64(0); i < n; i++ { + v := a[i] + if v.Uses != 1 { + return false + } + shift := int64(0) + if v.Op == shiftOp { + if v.Args[1].Op != OpConst64 { + return false + } + shift = v.Args[1].AuxInt + v = v.Args[0] + if v.Uses != 1 { + return false + } + } + if v.Op != extOp { + return false + } + load := v.Args[0] + if load.Op != OpLoad { + return false + } + if load.Uses != 1 { + return false + } + if load.Args[1] != mem { + return false + } + p, off := splitPtr(load.Args[0]) + if p != base { + return false + } + r[i] = LoadRecord{load: load, offset: off, shift: shift} + } + + // Sort in memory address order. + sort.Slice(r, func(i, j int) bool { + return r[i].offset < r[j].offset + }) + + // Check that we have contiguous offsets. + for i := int64(0); i < n; i++ { + if r[i].offset != r[0].offset+i*size { + return false + } + } + + // Check for reads in little-endian or big-endian order. + shift0 := r[0].shift + isLittleEndian := true + for i := int64(0); i < n; i++ { + if r[i].shift != shift0+i*size*8 { + isLittleEndian = false + break + } + } + isBigEndian := true + for i := int64(0); i < n; i++ { + if r[i].shift != shift0-i*size*8 { + isBigEndian = false + break + } + } + if !isLittleEndian && !isBigEndian { + return false + } + + // Find a place to put the new load. + // This is tricky, because it has to be at a point where + // its memory argument is live. We can't just put it in root.Block. + // We use the block of the latest load. + loads := make([]*Value, n, 8) + for i := int64(0); i < n; i++ { + loads[i] = r[i].load + } + loadBlock := mergePoint(root.Block, loads...) + if loadBlock == nil { + return false + } + // Find a source position to use. + pos := src.NoXPos + for _, load := range loads { + if load.Block == loadBlock { + pos = load.Pos + break + } + } + if pos == src.NoXPos { + return false + } + + // Check to see if we need byte swap before storing. + needSwap := isLittleEndian && root.Block.Func.Config.BigEndian || + isBigEndian && !root.Block.Func.Config.BigEndian + if needSwap && (size != 1 || !root.Block.Func.Config.haveByteSwap(n)) { + return false + } + + // This is the commit point. + + // First, issue load at lowest address. + v = loadBlock.NewValue2(pos, OpLoad, sizeType(n*size), r[0].load.Args[0], mem) + + // Byte swap if needed, + if needSwap { + v = byteSwap(loadBlock, pos, v) + } + + // Extend if needed. + if n*size < root.Type.Size() { + v = zeroExtend(loadBlock, pos, v, n*size, root.Type.Size()) + } + + // Shift if needed. + if isLittleEndian && shift0 != 0 { + v = leftShift(loadBlock, pos, v, shift0) + } + if isBigEndian && shift0-(n-1)*size*8 != 0 { + v = leftShift(loadBlock, pos, v, shift0-(n-1)*size*8) + } + + // Install with (Copy v). + root.reset(OpCopy) + root.AddArg(v) + + // Clobber the loads, just to prevent additional work being done on + // subtrees (which are now unreachable). + for i := int64(0); i < n; i++ { + clobber(r[i].load) + } + return true +} + +func memcombineStores(f *Func) { + mark := f.newSparseSet(f.NumValues()) + defer f.retSparseSet(mark) + var order []*Value + + for _, b := range f.Blocks { + // Mark all stores which are not last in a store sequence. + mark.clear() + for _, v := range b.Values { + if v.Op == OpStore { + mark.add(v.MemoryArg().ID) + } + } + + // pick an order for visiting stores such that + // later stores come earlier in the ordering. + order = order[:0] + for _, v := range b.Values { + if v.Op != OpStore { + continue + } + if mark.contains(v.ID) { + continue // not last in a chain of stores + } + for { + order = append(order, v) + v = v.Args[2] + if v.Block != b || v.Op != OpStore { + break + } + } + } + + // Look for combining opportunities at each store in queue order. + for _, v := range order { + if v.Op != OpStore { // already rewritten + continue + } + + size := v.Aux.(*types.Type).Size() + if size >= f.Config.RegSize || size == 0 { + continue + } + + for n := f.Config.RegSize / size; n > 1; n /= 2 { + if combineStores(v, n) { + continue + } + } + } + } +} + +// Try to combine the n stores ending in root. +// Returns true if successful. +func combineStores(root *Value, n int64) bool { + // Helper functions. + type StoreRecord struct { + store *Value + offset int64 + } + getShiftBase := func(a []StoreRecord) *Value { + x := a[0].store.Args[1] + y := a[1].store.Args[1] + switch x.Op { + case OpTrunc64to8, OpTrunc64to16, OpTrunc64to32, OpTrunc32to8, OpTrunc32to16, OpTrunc16to8: + x = x.Args[0] + default: + return nil + } + switch y.Op { + case OpTrunc64to8, OpTrunc64to16, OpTrunc64to32, OpTrunc32to8, OpTrunc32to16, OpTrunc16to8: + y = y.Args[0] + default: + return nil + } + var x2 *Value + switch x.Op { + case OpRsh64Ux64, OpRsh32Ux64, OpRsh16Ux64: + x2 = x.Args[0] + default: + } + var y2 *Value + switch y.Op { + case OpRsh64Ux64, OpRsh32Ux64, OpRsh16Ux64: + y2 = y.Args[0] + default: + } + if y2 == x { + // a shift of x and x itself. + return x + } + if x2 == y { + // a shift of y and y itself. + return y + } + if x2 == y2 { + // 2 shifts both of the same argument. + return x2 + } + return nil + } + isShiftBase := func(v, base *Value) bool { + val := v.Args[1] + switch val.Op { + case OpTrunc64to8, OpTrunc64to16, OpTrunc64to32, OpTrunc32to8, OpTrunc32to16, OpTrunc16to8: + val = val.Args[0] + default: + return false + } + if val == base { + return true + } + switch val.Op { + case OpRsh64Ux64, OpRsh32Ux64, OpRsh16Ux64: + val = val.Args[0] + default: + return false + } + return val == base + } + shift := func(v, base *Value) int64 { + val := v.Args[1] + switch val.Op { + case OpTrunc64to8, OpTrunc64to16, OpTrunc64to32, OpTrunc32to8, OpTrunc32to16, OpTrunc16to8: + val = val.Args[0] + default: + return -1 + } + if val == base { + return 0 + } + switch val.Op { + case OpRsh64Ux64, OpRsh32Ux64, OpRsh16Ux64: + val = val.Args[1] + default: + return -1 + } + if val.Op != OpConst64 { + return -1 + } + return val.AuxInt + } + + // Element size of the individual stores. + size := root.Aux.(*types.Type).Size() + if size*n > root.Block.Func.Config.RegSize { + return false + } + + // Gather n stores to look at. Check easy conditions we require. + a := make([]StoreRecord, 0, 8) + rbase, roff := splitPtr(root.Args[0]) + if root.Block.Func.Config.arch == "S390X" { + // s390x can't handle unaligned accesses to global variables. + if rbase.ptr.Op == OpAddr { + return false + } + } + a = append(a, StoreRecord{root, roff}) + for i, x := int64(1), root.Args[2]; i < n; i, x = i+1, x.Args[2] { + if x.Op != OpStore { + return false + } + if x.Block != root.Block { + return false + } + if x.Uses != 1 { // Note: root can have more than one use. + return false + } + if x.Aux.(*types.Type).Size() != size { + // TODO: the constant source and consecutive load source cases + // do not need all the stores to be the same size. + return false + } + base, off := splitPtr(x.Args[0]) + if base != rbase { + return false + } + a = append(a, StoreRecord{x, off}) + } + // Before we sort, grab the memory arg the result should have. + mem := a[n-1].store.Args[2] + // Also grab position of first store (last in array = first in memory order). + pos := a[n-1].store.Pos + + // Sort stores in increasing address order. + sort.Slice(a, func(i, j int) bool { + return a[i].offset < a[j].offset + }) + + // Check that everything is written to sequential locations. + for i := int64(0); i < n; i++ { + if a[i].offset != a[0].offset+i*size { + return false + } + } + + // Memory location we're going to write at (the lowest one). + ptr := a[0].store.Args[0] + + // Check for constant stores + isConst := true + for i := int64(0); i < n; i++ { + switch a[i].store.Args[1].Op { + case OpConst32, OpConst16, OpConst8: + default: + isConst = false + break + } + } + if isConst { + // Modify root to do all the stores. + var c int64 + mask := int64(1)<<(8*size) - 1 + for i := int64(0); i < n; i++ { + s := 8 * size * int64(i) + if root.Block.Func.Config.BigEndian { + s = 8*size*(n-1) - s + } + c |= (a[i].store.Args[1].AuxInt & mask) << s + } + var cv *Value + switch size * n { + case 2: + cv = root.Block.Func.ConstInt16(types.Types[types.TUINT16], int16(c)) + case 4: + cv = root.Block.Func.ConstInt32(types.Types[types.TUINT32], int32(c)) + case 8: + cv = root.Block.Func.ConstInt64(types.Types[types.TUINT64], c) + } + + // Move all the stores to the root. + for i := int64(0); i < n; i++ { + v := a[i].store + if v == root { + v.Aux = cv.Type // widen store type + v.Pos = pos + v.SetArg(0, ptr) + v.SetArg(1, cv) + v.SetArg(2, mem) + } else { + clobber(v) + v.Type = types.Types[types.TBOOL] // erase memory type + } + } + return true + } + + // Check for consecutive loads as the source of the stores. + var loadMem *Value + var loadBase BaseAddress + var loadIdx int64 + for i := int64(0); i < n; i++ { + load := a[i].store.Args[1] + if load.Op != OpLoad { + loadMem = nil + break + } + if load.Uses != 1 { + loadMem = nil + break + } + if load.Type.IsPtr() { + // Don't combine stores containing a pointer, as we need + // a write barrier for those. This can't currently happen, + // but might in the future if we ever have another + // 8-byte-reg/4-byte-ptr architecture like amd64p32. + loadMem = nil + break + } + mem := load.Args[1] + base, idx := splitPtr(load.Args[0]) + if loadMem == nil { + // First one we found + loadMem = mem + loadBase = base + loadIdx = idx + continue + } + if base != loadBase || mem != loadMem { + loadMem = nil + break + } + if idx != loadIdx+(a[i].offset-a[0].offset) { + loadMem = nil + break + } + } + if loadMem != nil { + // Modify the first load to do a larger load instead. + load := a[0].store.Args[1] + switch size * n { + case 2: + load.Type = types.Types[types.TUINT16] + case 4: + load.Type = types.Types[types.TUINT32] + case 8: + load.Type = types.Types[types.TUINT64] + } + + // Modify root to do the store. + for i := int64(0); i < n; i++ { + v := a[i].store + if v == root { + v.Aux = load.Type // widen store type + v.Pos = pos + v.SetArg(0, ptr) + v.SetArg(1, load) + v.SetArg(2, mem) + } else { + clobber(v) + v.Type = types.Types[types.TBOOL] // erase memory type + } + } + return true + } + + // Check that all the shift/trunc are of the same base value. + shiftBase := getShiftBase(a) + if shiftBase == nil { + return false + } + for i := int64(0); i < n; i++ { + if !isShiftBase(a[i].store, shiftBase) { + return false + } + } + + // Check for writes in little-endian or big-endian order. + isLittleEndian := true + shift0 := shift(a[0].store, shiftBase) + for i := int64(1); i < n; i++ { + if shift(a[i].store, shiftBase) != shift0+i*size*8 { + isLittleEndian = false + break + } + } + isBigEndian := true + for i := int64(1); i < n; i++ { + if shift(a[i].store, shiftBase) != shift0-i*size*8 { + isBigEndian = false + break + } + } + if !isLittleEndian && !isBigEndian { + return false + } + + // Check to see if we need byte swap before storing. + needSwap := isLittleEndian && root.Block.Func.Config.BigEndian || + isBigEndian && !root.Block.Func.Config.BigEndian + if needSwap && (size != 1 || !root.Block.Func.Config.haveByteSwap(n)) { + return false + } + + // This is the commit point. + + // Modify root to do all the stores. + sv := shiftBase + if isLittleEndian && shift0 != 0 { + sv = rightShift(root.Block, root.Pos, sv, shift0) + } + if isBigEndian && shift0-(n-1)*size*8 != 0 { + sv = rightShift(root.Block, root.Pos, sv, shift0-(n-1)*size*8) + } + if sv.Type.Size() > size*n { + sv = truncate(root.Block, root.Pos, sv, sv.Type.Size(), size*n) + } + if needSwap { + sv = byteSwap(root.Block, root.Pos, sv) + } + + // Move all the stores to the root. + for i := int64(0); i < n; i++ { + v := a[i].store + if v == root { + v.Aux = sv.Type // widen store type + v.Pos = pos + v.SetArg(0, ptr) + v.SetArg(1, sv) + v.SetArg(2, mem) + } else { + clobber(v) + v.Type = types.Types[types.TBOOL] // erase memory type + } + } + return true +} + +func sizeType(size int64) *types.Type { + switch size { + case 8: + return types.Types[types.TUINT64] + case 4: + return types.Types[types.TUINT32] + case 2: + return types.Types[types.TUINT16] + default: + base.Fatalf("bad size %d\n", size) + return nil + } +} + +func truncate(b *Block, pos src.XPos, v *Value, from, to int64) *Value { + switch from*10 + to { + case 82: + return b.NewValue1(pos, OpTrunc64to16, types.Types[types.TUINT16], v) + case 84: + return b.NewValue1(pos, OpTrunc64to32, types.Types[types.TUINT32], v) + case 42: + return b.NewValue1(pos, OpTrunc32to16, types.Types[types.TUINT16], v) + default: + base.Fatalf("bad sizes %d %d\n", from, to) + return nil + } +} +func zeroExtend(b *Block, pos src.XPos, v *Value, from, to int64) *Value { + switch from*10 + to { + case 24: + return b.NewValue1(pos, OpZeroExt16to32, types.Types[types.TUINT32], v) + case 28: + return b.NewValue1(pos, OpZeroExt16to64, types.Types[types.TUINT64], v) + case 48: + return b.NewValue1(pos, OpZeroExt32to64, types.Types[types.TUINT64], v) + default: + base.Fatalf("bad sizes %d %d\n", from, to) + return nil + } +} + +func leftShift(b *Block, pos src.XPos, v *Value, shift int64) *Value { + s := b.Func.ConstInt64(types.Types[types.TUINT64], shift) + size := v.Type.Size() + switch size { + case 8: + return b.NewValue2(pos, OpLsh64x64, v.Type, v, s) + case 4: + return b.NewValue2(pos, OpLsh32x64, v.Type, v, s) + case 2: + return b.NewValue2(pos, OpLsh16x64, v.Type, v, s) + default: + base.Fatalf("bad size %d\n", size) + return nil + } +} +func rightShift(b *Block, pos src.XPos, v *Value, shift int64) *Value { + s := b.Func.ConstInt64(types.Types[types.TUINT64], shift) + size := v.Type.Size() + switch size { + case 8: + return b.NewValue2(pos, OpRsh64Ux64, v.Type, v, s) + case 4: + return b.NewValue2(pos, OpRsh32Ux64, v.Type, v, s) + case 2: + return b.NewValue2(pos, OpRsh16Ux64, v.Type, v, s) + default: + base.Fatalf("bad size %d\n", size) + return nil + } +} +func byteSwap(b *Block, pos src.XPos, v *Value) *Value { + switch v.Type.Size() { + case 8: + return b.NewValue1(pos, OpBswap64, v.Type, v) + case 4: + return b.NewValue1(pos, OpBswap32, v.Type, v) + case 2: + return b.NewValue1(pos, OpBswap16, v.Type, v) + + default: + v.Fatalf("bad size %d\n", v.Type.Size()) + return nil + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/nilcheck.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/nilcheck.go new file mode 100644 index 0000000000000000000000000000000000000000..c69cd8c32ed39405f6562ce96542349582f0fc6a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/nilcheck.go @@ -0,0 +1,337 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/ir" + "cmd/internal/src" + "internal/buildcfg" +) + +// nilcheckelim eliminates unnecessary nil checks. +// runs on machine-independent code. +func nilcheckelim(f *Func) { + // A nil check is redundant if the same nil check was successful in a + // dominating block. The efficacy of this pass depends heavily on the + // efficacy of the cse pass. + sdom := f.Sdom() + + // TODO: Eliminate more nil checks. + // We can recursively remove any chain of fixed offset calculations, + // i.e. struct fields and array elements, even with non-constant + // indices: x is non-nil iff x.a.b[i].c is. + + type walkState int + const ( + Work walkState = iota // process nil checks and traverse to dominees + ClearPtr // forget the fact that ptr is nil + ) + + type bp struct { + block *Block // block, or nil in ClearPtr state + ptr *Value // if non-nil, ptr that is to be cleared in ClearPtr state + op walkState + } + + work := make([]bp, 0, 256) + work = append(work, bp{block: f.Entry}) + + // map from value ID to known non-nil version of that value ID + // (in the current dominator path being walked). This slice is updated by + // walkStates to maintain the known non-nil values. + // If there is extrinsic information about non-nil-ness, this map + // points a value to itself. If a value is known non-nil because we + // already did a nil check on it, it points to the nil check operation. + nonNilValues := f.Cache.allocValueSlice(f.NumValues()) + defer f.Cache.freeValueSlice(nonNilValues) + + // make an initial pass identifying any non-nil values + for _, b := range f.Blocks { + for _, v := range b.Values { + // a value resulting from taking the address of a + // value, or a value constructed from an offset of a + // non-nil ptr (OpAddPtr) implies it is non-nil + // We also assume unsafe pointer arithmetic generates non-nil pointers. See #27180. + // We assume that SlicePtr is non-nil because we do a bounds check + // before the slice access (and all cap>0 slices have a non-nil ptr). See #30366. + if v.Op == OpAddr || v.Op == OpLocalAddr || v.Op == OpAddPtr || v.Op == OpOffPtr || v.Op == OpAdd32 || v.Op == OpAdd64 || v.Op == OpSub32 || v.Op == OpSub64 || v.Op == OpSlicePtr { + nonNilValues[v.ID] = v + } + } + } + + for changed := true; changed; { + changed = false + for _, b := range f.Blocks { + for _, v := range b.Values { + // phis whose arguments are all non-nil + // are non-nil + if v.Op == OpPhi { + argsNonNil := true + for _, a := range v.Args { + if nonNilValues[a.ID] == nil { + argsNonNil = false + break + } + } + if argsNonNil { + if nonNilValues[v.ID] == nil { + changed = true + } + nonNilValues[v.ID] = v + } + } + } + } + } + + // allocate auxiliary date structures for computing store order + sset := f.newSparseSet(f.NumValues()) + defer f.retSparseSet(sset) + storeNumber := f.Cache.allocInt32Slice(f.NumValues()) + defer f.Cache.freeInt32Slice(storeNumber) + + // perform a depth first walk of the dominee tree + for len(work) > 0 { + node := work[len(work)-1] + work = work[:len(work)-1] + + switch node.op { + case Work: + b := node.block + + // First, see if we're dominated by an explicit nil check. + if len(b.Preds) == 1 { + p := b.Preds[0].b + if p.Kind == BlockIf && p.Controls[0].Op == OpIsNonNil && p.Succs[0].b == b { + if ptr := p.Controls[0].Args[0]; nonNilValues[ptr.ID] == nil { + nonNilValues[ptr.ID] = ptr + work = append(work, bp{op: ClearPtr, ptr: ptr}) + } + } + } + + // Next, order values in the current block w.r.t. stores. + b.Values = storeOrder(b.Values, sset, storeNumber) + + pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block + pendingLines.clear() + + // Next, process values in the block. + for _, v := range b.Values { + switch v.Op { + case OpIsNonNil: + ptr := v.Args[0] + if nonNilValues[ptr.ID] != nil { + if v.Pos.IsStmt() == src.PosIsStmt { // Boolean true is a terrible statement boundary. + pendingLines.add(v.Pos) + v.Pos = v.Pos.WithNotStmt() + } + // This is a redundant explicit nil check. + v.reset(OpConstBool) + v.AuxInt = 1 // true + } + case OpNilCheck: + ptr := v.Args[0] + if nilCheck := nonNilValues[ptr.ID]; nilCheck != nil { + // This is a redundant implicit nil check. + // Logging in the style of the former compiler -- and omit line 1, + // which is usually in generated code. + if f.fe.Debug_checknil() && v.Pos.Line() > 1 { + f.Warnl(v.Pos, "removed nil check") + } + if v.Pos.IsStmt() == src.PosIsStmt { // About to lose a statement boundary + pendingLines.add(v.Pos) + } + v.Op = OpCopy + v.SetArgs1(nilCheck) + continue + } + // Record the fact that we know ptr is non nil, and remember to + // undo that information when this dominator subtree is done. + nonNilValues[ptr.ID] = v + work = append(work, bp{op: ClearPtr, ptr: ptr}) + fallthrough // a non-eliminated nil check might be a good place for a statement boundary. + default: + if v.Pos.IsStmt() != src.PosNotStmt && !isPoorStatementOp(v.Op) && pendingLines.contains(v.Pos) { + v.Pos = v.Pos.WithIsStmt() + pendingLines.remove(v.Pos) + } + } + } + // This reduces the lost statement count in "go" by 5 (out of 500 total). + for j := range b.Values { // is this an ordering problem? + v := b.Values[j] + if v.Pos.IsStmt() != src.PosNotStmt && !isPoorStatementOp(v.Op) && pendingLines.contains(v.Pos) { + v.Pos = v.Pos.WithIsStmt() + pendingLines.remove(v.Pos) + } + } + if pendingLines.contains(b.Pos) { + b.Pos = b.Pos.WithIsStmt() + pendingLines.remove(b.Pos) + } + + // Add all dominated blocks to the work list. + for w := sdom[node.block.ID].child; w != nil; w = sdom[w.ID].sibling { + work = append(work, bp{op: Work, block: w}) + } + + case ClearPtr: + nonNilValues[node.ptr.ID] = nil + continue + } + } +} + +// All platforms are guaranteed to fault if we load/store to anything smaller than this address. +// +// This should agree with minLegalPointer in the runtime. +const minZeroPage = 4096 + +// faultOnLoad is true if a load to an address below minZeroPage will trigger a SIGSEGV. +var faultOnLoad = buildcfg.GOOS != "aix" + +// nilcheckelim2 eliminates unnecessary nil checks. +// Runs after lowering and scheduling. +func nilcheckelim2(f *Func) { + unnecessary := f.newSparseMap(f.NumValues()) // map from pointer that will be dereferenced to index of dereferencing value in b.Values[] + defer f.retSparseMap(unnecessary) + + pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block + + for _, b := range f.Blocks { + // Walk the block backwards. Find instructions that will fault if their + // input pointer is nil. Remove nil checks on those pointers, as the + // faulting instruction effectively does the nil check for free. + unnecessary.clear() + pendingLines.clear() + // Optimization: keep track of removed nilcheck with smallest index + firstToRemove := len(b.Values) + for i := len(b.Values) - 1; i >= 0; i-- { + v := b.Values[i] + if opcodeTable[v.Op].nilCheck && unnecessary.contains(v.Args[0].ID) { + if f.fe.Debug_checknil() && v.Pos.Line() > 1 { + f.Warnl(v.Pos, "removed nil check") + } + // For bug 33724, policy is that we might choose to bump an existing position + // off the faulting load/store in favor of the one from the nil check. + + // Iteration order means that first nilcheck in the chain wins, others + // are bumped into the ordinary statement preservation algorithm. + u := b.Values[unnecessary.get(v.Args[0].ID)] + if !u.Pos.SameFileAndLine(v.Pos) { + if u.Pos.IsStmt() == src.PosIsStmt { + pendingLines.add(u.Pos) + } + u.Pos = v.Pos + } else if v.Pos.IsStmt() == src.PosIsStmt { + pendingLines.add(v.Pos) + } + + v.reset(OpUnknown) + firstToRemove = i + continue + } + if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() { + if v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(*ir.Name).Type().HasPointers()) { + // These ops don't really change memory. + continue + // Note: OpVarDef requires that the defined variable not have pointers. + // We need to make sure that there's no possible faulting + // instruction between a VarDef and that variable being + // fully initialized. If there was, then anything scanning + // the stack during the handling of that fault will see + // a live but uninitialized pointer variable on the stack. + // + // If we have: + // + // NilCheck p + // VarDef x + // x = *p + // + // We can't rewrite that to + // + // VarDef x + // NilCheck p + // x = *p + // + // Particularly, even though *p faults on p==nil, we still + // have to do the explicit nil check before the VarDef. + // See issue #32288. + } + // This op changes memory. Any faulting instruction after v that + // we've recorded in the unnecessary map is now obsolete. + unnecessary.clear() + } + + // Find any pointers that this op is guaranteed to fault on if nil. + var ptrstore [2]*Value + ptrs := ptrstore[:0] + if opcodeTable[v.Op].faultOnNilArg0 && (faultOnLoad || v.Type.IsMemory()) { + // On AIX, only writing will fault. + ptrs = append(ptrs, v.Args[0]) + } + if opcodeTable[v.Op].faultOnNilArg1 && (faultOnLoad || (v.Type.IsMemory() && v.Op != OpPPC64LoweredMove)) { + // On AIX, only writing will fault. + // LoweredMove is a special case because it's considered as a "mem" as it stores on arg0 but arg1 is accessed as a load and should be checked. + ptrs = append(ptrs, v.Args[1]) + } + + for _, ptr := range ptrs { + // Check to make sure the offset is small. + switch opcodeTable[v.Op].auxType { + case auxSym: + if v.Aux != nil { + continue + } + case auxSymOff: + if v.Aux != nil || v.AuxInt < 0 || v.AuxInt >= minZeroPage { + continue + } + case auxSymValAndOff: + off := ValAndOff(v.AuxInt).Off() + if v.Aux != nil || off < 0 || off >= minZeroPage { + continue + } + case auxInt32: + // Mips uses this auxType for atomic add constant. It does not affect the effective address. + case auxInt64: + // ARM uses this auxType for duffcopy/duffzero/alignment info. + // It does not affect the effective address. + case auxNone: + // offset is zero. + default: + v.Fatalf("can't handle aux %s (type %d) yet\n", v.auxString(), int(opcodeTable[v.Op].auxType)) + } + // This instruction is guaranteed to fault if ptr is nil. + // Any previous nil check op is unnecessary. + unnecessary.set(ptr.ID, int32(i)) + } + } + // Remove values we've clobbered with OpUnknown. + i := firstToRemove + for j := i; j < len(b.Values); j++ { + v := b.Values[j] + if v.Op != OpUnknown { + if !notStmtBoundary(v.Op) && pendingLines.contains(v.Pos) { // Late in compilation, so any remaining NotStmt values are probably okay now. + v.Pos = v.Pos.WithIsStmt() + pendingLines.remove(v.Pos) + } + b.Values[i] = v + i++ + } + } + + if pendingLines.contains(b.Pos) { + b.Pos = b.Pos.WithIsStmt() + } + + b.truncateValues(i) + + // TODO: if b.Kind == BlockPlain, start the analysis in the subsequent block to find + // more unnecessary nil checks. Would fix test/nilptr3.go:159. + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/nilcheck_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/nilcheck_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6c89b1e18569f8fcbf6d2957d7f369063bd1a0f6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/nilcheck_test.go @@ -0,0 +1,438 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/types" + "strconv" + "testing" +) + +func BenchmarkNilCheckDeep1(b *testing.B) { benchmarkNilCheckDeep(b, 1) } +func BenchmarkNilCheckDeep10(b *testing.B) { benchmarkNilCheckDeep(b, 10) } +func BenchmarkNilCheckDeep100(b *testing.B) { benchmarkNilCheckDeep(b, 100) } +func BenchmarkNilCheckDeep1000(b *testing.B) { benchmarkNilCheckDeep(b, 1000) } +func BenchmarkNilCheckDeep10000(b *testing.B) { benchmarkNilCheckDeep(b, 10000) } + +// benchmarkNilCheckDeep is a stress test of nilcheckelim. +// It uses the worst possible input: A linear string of +// nil checks, none of which can be eliminated. +// Run with multiple depths to observe big-O behavior. +func benchmarkNilCheckDeep(b *testing.B, depth int) { + c := testConfig(b) + ptrType := c.config.Types.BytePtr + + var blocs []bloc + blocs = append(blocs, + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil), + Goto(blockn(0)), + ), + ) + for i := 0; i < depth; i++ { + blocs = append(blocs, + Bloc(blockn(i), + Valu(ptrn(i), OpAddr, ptrType, 0, nil, "sb"), + Valu(booln(i), OpIsNonNil, c.config.Types.Bool, 0, nil, ptrn(i)), + If(booln(i), blockn(i+1), "exit"), + ), + ) + } + blocs = append(blocs, + Bloc(blockn(depth), Goto("exit")), + Bloc("exit", Exit("mem")), + ) + + fun := c.Fun("entry", blocs...) + + CheckFunc(fun.f) + b.SetBytes(int64(depth)) // helps for eyeballing linearity + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + nilcheckelim(fun.f) + } +} + +func blockn(n int) string { return "b" + strconv.Itoa(n) } +func ptrn(n int) string { return "p" + strconv.Itoa(n) } +func booln(n int) string { return "c" + strconv.Itoa(n) } + +func isNilCheck(b *Block) bool { + return b.Kind == BlockIf && b.Controls[0].Op == OpIsNonNil +} + +// TestNilcheckSimple verifies that a second repeated nilcheck is removed. +func TestNilcheckSimple(t *testing.T) { + c := testConfig(t) + ptrType := c.config.Types.BytePtr + fun := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil), + Goto("checkPtr")), + Bloc("checkPtr", + Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"), + Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"), + If("bool1", "secondCheck", "exit")), + Bloc("secondCheck", + Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"), + If("bool2", "extra", "exit")), + Bloc("extra", + Goto("exit")), + Bloc("exit", + Exit("mem"))) + + CheckFunc(fun.f) + nilcheckelim(fun.f) + + // clean up the removed nil check + fuse(fun.f, fuseTypePlain) + deadcode(fun.f) + + CheckFunc(fun.f) + for _, b := range fun.f.Blocks { + if b == fun.blocks["secondCheck"] && isNilCheck(b) { + t.Errorf("secondCheck was not eliminated") + } + } +} + +// TestNilcheckDomOrder ensures that the nil check elimination isn't dependent +// on the order of the dominees. +func TestNilcheckDomOrder(t *testing.T) { + c := testConfig(t) + ptrType := c.config.Types.BytePtr + fun := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil), + Goto("checkPtr")), + Bloc("checkPtr", + Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"), + Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"), + If("bool1", "secondCheck", "exit")), + Bloc("exit", + Exit("mem")), + Bloc("secondCheck", + Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"), + If("bool2", "extra", "exit")), + Bloc("extra", + Goto("exit"))) + + CheckFunc(fun.f) + nilcheckelim(fun.f) + + // clean up the removed nil check + fuse(fun.f, fuseTypePlain) + deadcode(fun.f) + + CheckFunc(fun.f) + for _, b := range fun.f.Blocks { + if b == fun.blocks["secondCheck"] && isNilCheck(b) { + t.Errorf("secondCheck was not eliminated") + } + } +} + +// TestNilcheckAddr verifies that nilchecks of OpAddr constructed values are removed. +func TestNilcheckAddr(t *testing.T) { + c := testConfig(t) + ptrType := c.config.Types.BytePtr + fun := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil), + Goto("checkPtr")), + Bloc("checkPtr", + Valu("ptr1", OpAddr, ptrType, 0, nil, "sb"), + Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"), + If("bool1", "extra", "exit")), + Bloc("extra", + Goto("exit")), + Bloc("exit", + Exit("mem"))) + + CheckFunc(fun.f) + nilcheckelim(fun.f) + + // clean up the removed nil check + fuse(fun.f, fuseTypePlain) + deadcode(fun.f) + + CheckFunc(fun.f) + for _, b := range fun.f.Blocks { + if b == fun.blocks["checkPtr"] && isNilCheck(b) { + t.Errorf("checkPtr was not eliminated") + } + } +} + +// TestNilcheckAddPtr verifies that nilchecks of OpAddPtr constructed values are removed. +func TestNilcheckAddPtr(t *testing.T) { + c := testConfig(t) + ptrType := c.config.Types.BytePtr + fun := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil), + Goto("checkPtr")), + Bloc("checkPtr", + Valu("off", OpConst64, c.config.Types.Int64, 20, nil), + Valu("ptr1", OpAddPtr, ptrType, 0, nil, "sb", "off"), + Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"), + If("bool1", "extra", "exit")), + Bloc("extra", + Goto("exit")), + Bloc("exit", + Exit("mem"))) + + CheckFunc(fun.f) + nilcheckelim(fun.f) + + // clean up the removed nil check + fuse(fun.f, fuseTypePlain) + deadcode(fun.f) + + CheckFunc(fun.f) + for _, b := range fun.f.Blocks { + if b == fun.blocks["checkPtr"] && isNilCheck(b) { + t.Errorf("checkPtr was not eliminated") + } + } +} + +// TestNilcheckPhi tests that nil checks of phis, for which all values are known to be +// non-nil are removed. +func TestNilcheckPhi(t *testing.T) { + c := testConfig(t) + ptrType := c.config.Types.BytePtr + fun := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil), + Valu("sp", OpSP, c.config.Types.Uintptr, 0, nil), + Valu("baddr", OpLocalAddr, c.config.Types.Bool, 0, StringToAux("b"), "sp", "mem"), + Valu("bool1", OpLoad, c.config.Types.Bool, 0, nil, "baddr", "mem"), + If("bool1", "b1", "b2")), + Bloc("b1", + Valu("ptr1", OpAddr, ptrType, 0, nil, "sb"), + Goto("checkPtr")), + Bloc("b2", + Valu("ptr2", OpAddr, ptrType, 0, nil, "sb"), + Goto("checkPtr")), + // both ptr1 and ptr2 are guaranteed non-nil here + Bloc("checkPtr", + Valu("phi", OpPhi, ptrType, 0, nil, "ptr1", "ptr2"), + Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "phi"), + If("bool2", "extra", "exit")), + Bloc("extra", + Goto("exit")), + Bloc("exit", + Exit("mem"))) + + CheckFunc(fun.f) + nilcheckelim(fun.f) + + // clean up the removed nil check + fuse(fun.f, fuseTypePlain) + deadcode(fun.f) + + CheckFunc(fun.f) + for _, b := range fun.f.Blocks { + if b == fun.blocks["checkPtr"] && isNilCheck(b) { + t.Errorf("checkPtr was not eliminated") + } + } +} + +// TestNilcheckKeepRemove verifies that duplicate checks of the same pointer +// are removed, but checks of different pointers are not. +func TestNilcheckKeepRemove(t *testing.T) { + c := testConfig(t) + ptrType := c.config.Types.BytePtr + fun := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil), + Goto("checkPtr")), + Bloc("checkPtr", + Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"), + Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"), + If("bool1", "differentCheck", "exit")), + Bloc("differentCheck", + Valu("ptr2", OpLoad, ptrType, 0, nil, "sb", "mem"), + Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr2"), + If("bool2", "secondCheck", "exit")), + Bloc("secondCheck", + Valu("bool3", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"), + If("bool3", "extra", "exit")), + Bloc("extra", + Goto("exit")), + Bloc("exit", + Exit("mem"))) + + CheckFunc(fun.f) + nilcheckelim(fun.f) + + // clean up the removed nil check + fuse(fun.f, fuseTypePlain) + deadcode(fun.f) + + CheckFunc(fun.f) + foundDifferentCheck := false + for _, b := range fun.f.Blocks { + if b == fun.blocks["secondCheck"] && isNilCheck(b) { + t.Errorf("secondCheck was not eliminated") + } + if b == fun.blocks["differentCheck"] && isNilCheck(b) { + foundDifferentCheck = true + } + } + if !foundDifferentCheck { + t.Errorf("removed differentCheck, but shouldn't have") + } +} + +// TestNilcheckInFalseBranch tests that nil checks in the false branch of a nilcheck +// block are *not* removed. +func TestNilcheckInFalseBranch(t *testing.T) { + c := testConfig(t) + ptrType := c.config.Types.BytePtr + fun := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil), + Goto("checkPtr")), + Bloc("checkPtr", + Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"), + Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"), + If("bool1", "extra", "secondCheck")), + Bloc("secondCheck", + Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"), + If("bool2", "extra", "thirdCheck")), + Bloc("thirdCheck", + Valu("bool3", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"), + If("bool3", "extra", "exit")), + Bloc("extra", + Goto("exit")), + Bloc("exit", + Exit("mem"))) + + CheckFunc(fun.f) + nilcheckelim(fun.f) + + // clean up the removed nil check + fuse(fun.f, fuseTypePlain) + deadcode(fun.f) + + CheckFunc(fun.f) + foundSecondCheck := false + foundThirdCheck := false + for _, b := range fun.f.Blocks { + if b == fun.blocks["secondCheck"] && isNilCheck(b) { + foundSecondCheck = true + } + if b == fun.blocks["thirdCheck"] && isNilCheck(b) { + foundThirdCheck = true + } + } + if !foundSecondCheck { + t.Errorf("removed secondCheck, but shouldn't have [false branch]") + } + if !foundThirdCheck { + t.Errorf("removed thirdCheck, but shouldn't have [false branch]") + } +} + +// TestNilcheckUser verifies that a user nil check that dominates a generated nil check +// wil remove the generated nil check. +func TestNilcheckUser(t *testing.T) { + c := testConfig(t) + ptrType := c.config.Types.BytePtr + fun := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil), + Goto("checkPtr")), + Bloc("checkPtr", + Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"), + Valu("nilptr", OpConstNil, ptrType, 0, nil), + Valu("bool1", OpNeqPtr, c.config.Types.Bool, 0, nil, "ptr1", "nilptr"), + If("bool1", "secondCheck", "exit")), + Bloc("secondCheck", + Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"), + If("bool2", "extra", "exit")), + Bloc("extra", + Goto("exit")), + Bloc("exit", + Exit("mem"))) + + CheckFunc(fun.f) + // we need the opt here to rewrite the user nilcheck + opt(fun.f) + nilcheckelim(fun.f) + + // clean up the removed nil check + fuse(fun.f, fuseTypePlain) + deadcode(fun.f) + + CheckFunc(fun.f) + for _, b := range fun.f.Blocks { + if b == fun.blocks["secondCheck"] && isNilCheck(b) { + t.Errorf("secondCheck was not eliminated") + } + } +} + +// TestNilcheckBug reproduces a bug in nilcheckelim found by compiling math/big +func TestNilcheckBug(t *testing.T) { + c := testConfig(t) + ptrType := c.config.Types.BytePtr + fun := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil), + Goto("checkPtr")), + Bloc("checkPtr", + Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"), + Valu("nilptr", OpConstNil, ptrType, 0, nil), + Valu("bool1", OpNeqPtr, c.config.Types.Bool, 0, nil, "ptr1", "nilptr"), + If("bool1", "secondCheck", "couldBeNil")), + Bloc("couldBeNil", + Goto("secondCheck")), + Bloc("secondCheck", + Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"), + If("bool2", "extra", "exit")), + Bloc("extra", + // prevent fuse from eliminating this block + Valu("store", OpStore, types.TypeMem, 0, ptrType, "ptr1", "nilptr", "mem"), + Goto("exit")), + Bloc("exit", + Valu("phi", OpPhi, types.TypeMem, 0, nil, "mem", "store"), + Exit("phi"))) + + CheckFunc(fun.f) + // we need the opt here to rewrite the user nilcheck + opt(fun.f) + nilcheckelim(fun.f) + + // clean up the removed nil check + fuse(fun.f, fuseTypePlain) + deadcode(fun.f) + + CheckFunc(fun.f) + foundSecondCheck := false + for _, b := range fun.f.Blocks { + if b == fun.blocks["secondCheck"] && isNilCheck(b) { + foundSecondCheck = true + } + } + if !foundSecondCheck { + t.Errorf("secondCheck was eliminated, but shouldn't have") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/numberlines.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/numberlines.go new file mode 100644 index 0000000000000000000000000000000000000000..b4eca324d5200590b89c13796f085db9b4ce4439 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/numberlines.go @@ -0,0 +1,262 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/internal/src" + "fmt" + "sort" +) + +func isPoorStatementOp(op Op) bool { + switch op { + // Note that Nilcheck often vanishes, but when it doesn't, you'd love to start the statement there + // so that a debugger-user sees the stop before the panic, and can examine the value. + case OpAddr, OpLocalAddr, OpOffPtr, OpStructSelect, OpPhi, OpITab, OpIData, + OpIMake, OpStringMake, OpSliceMake, OpStructMake0, OpStructMake1, OpStructMake2, OpStructMake3, OpStructMake4, + OpConstBool, OpConst8, OpConst16, OpConst32, OpConst64, OpConst32F, OpConst64F, OpSB, OpSP, + OpArgIntReg, OpArgFloatReg: + return true + } + return false +} + +// nextGoodStatementIndex returns an index at i or later that is believed +// to be a good place to start the statement for b. This decision is +// based on v's Op, the possibility of a better later operation, and +// whether the values following i are the same line as v. +// If a better statement index isn't found, then i is returned. +func nextGoodStatementIndex(v *Value, i int, b *Block) int { + // If the value is the last one in the block, too bad, it will have to do + // (this assumes that the value ordering vaguely corresponds to the source + // program execution order, which tends to be true directly after ssa is + // first built). + if i >= len(b.Values)-1 { + return i + } + // Skip the likely-ephemeral/fragile opcodes expected to vanish in a rewrite. + if !isPoorStatementOp(v.Op) { + return i + } + // Look ahead to see what the line number is on the next thing that could be a boundary. + for j := i + 1; j < len(b.Values); j++ { + u := b.Values[j] + if u.Pos.IsStmt() == src.PosNotStmt { // ignore non-statements + continue + } + if u.Pos.SameFileAndLine(v.Pos) { + if isPoorStatementOp(u.Op) { + continue // Keep looking, this is also not a good statement op + } + return j + } + return i + } + return i +} + +// notStmtBoundary reports whether a value with opcode op can never be a statement +// boundary. Such values don't correspond to a user's understanding of a +// statement boundary. +func notStmtBoundary(op Op) bool { + switch op { + case OpCopy, OpPhi, OpVarDef, OpVarLive, OpUnknown, OpFwdRef, OpArg, OpArgIntReg, OpArgFloatReg: + return true + } + return false +} + +func (b *Block) FirstPossibleStmtValue() *Value { + for _, v := range b.Values { + if notStmtBoundary(v.Op) { + continue + } + return v + } + return nil +} + +func flc(p src.XPos) string { + if p == src.NoXPos { + return "none" + } + return fmt.Sprintf("(%d):%d:%d", p.FileIndex(), p.Line(), p.Col()) +} + +type fileAndPair struct { + f int32 + lp lineRange +} + +type fileAndPairs []fileAndPair + +func (fap fileAndPairs) Len() int { + return len(fap) +} +func (fap fileAndPairs) Less(i, j int) bool { + return fap[i].f < fap[j].f +} +func (fap fileAndPairs) Swap(i, j int) { + fap[i], fap[j] = fap[j], fap[i] +} + +// -d=ssa/number_lines/stats=1 (that bit) for line and file distribution statistics +// -d=ssa/number_lines/debug for information about why particular values are marked as statements. +func numberLines(f *Func) { + po := f.Postorder() + endlines := make(map[ID]src.XPos) + ranges := make(map[int]lineRange) + note := func(p src.XPos) { + line := uint32(p.Line()) + i := int(p.FileIndex()) + lp, found := ranges[i] + change := false + if line < lp.first || !found { + lp.first = line + change = true + } + if line > lp.last { + lp.last = line + change = true + } + if change { + ranges[i] = lp + } + } + + // Visit in reverse post order so that all non-loop predecessors come first. + for j := len(po) - 1; j >= 0; j-- { + b := po[j] + // Find the first interesting position and check to see if it differs from any predecessor + firstPos := src.NoXPos + firstPosIndex := -1 + if b.Pos.IsStmt() != src.PosNotStmt { + note(b.Pos) + } + for i := 0; i < len(b.Values); i++ { + v := b.Values[i] + if v.Pos.IsStmt() != src.PosNotStmt { + note(v.Pos) + // skip ahead to better instruction for this line if possible + i = nextGoodStatementIndex(v, i, b) + v = b.Values[i] + firstPosIndex = i + firstPos = v.Pos + v.Pos = firstPos.WithDefaultStmt() // default to default + break + } + } + + if firstPosIndex == -1 { // Effectively empty block, check block's own Pos, consider preds. + line := src.NoXPos + for _, p := range b.Preds { + pbi := p.Block().ID + if !endlines[pbi].SameFileAndLine(line) { + if line == src.NoXPos { + line = endlines[pbi] + continue + } else { + line = src.NoXPos + break + } + + } + } + // If the block has no statement itself and is effectively empty, tag it w/ predecessor(s) but not as a statement + if b.Pos.IsStmt() == src.PosNotStmt { + b.Pos = line + endlines[b.ID] = line + continue + } + // If the block differs from its predecessors, mark it as a statement + if line == src.NoXPos || !line.SameFileAndLine(b.Pos) { + b.Pos = b.Pos.WithIsStmt() + if f.pass.debug > 0 { + fmt.Printf("Mark stmt effectively-empty-block %s %s %s\n", f.Name, b, flc(b.Pos)) + } + } + endlines[b.ID] = b.Pos + continue + } + // check predecessors for any difference; if firstPos differs, then it is a boundary. + if len(b.Preds) == 0 { // Don't forget the entry block + b.Values[firstPosIndex].Pos = firstPos.WithIsStmt() + if f.pass.debug > 0 { + fmt.Printf("Mark stmt entry-block %s %s %s %s\n", f.Name, b, b.Values[firstPosIndex], flc(firstPos)) + } + } else { // differing pred + for _, p := range b.Preds { + pbi := p.Block().ID + if !endlines[pbi].SameFileAndLine(firstPos) { + b.Values[firstPosIndex].Pos = firstPos.WithIsStmt() + if f.pass.debug > 0 { + fmt.Printf("Mark stmt differing-pred %s %s %s %s, different=%s ending %s\n", + f.Name, b, b.Values[firstPosIndex], flc(firstPos), p.Block(), flc(endlines[pbi])) + } + break + } + } + } + // iterate forward setting each new (interesting) position as a statement boundary. + for i := firstPosIndex + 1; i < len(b.Values); i++ { + v := b.Values[i] + if v.Pos.IsStmt() == src.PosNotStmt { + continue + } + note(v.Pos) + // skip ahead if possible + i = nextGoodStatementIndex(v, i, b) + v = b.Values[i] + if !v.Pos.SameFileAndLine(firstPos) { + if f.pass.debug > 0 { + fmt.Printf("Mark stmt new line %s %s %s %s prev pos = %s\n", f.Name, b, v, flc(v.Pos), flc(firstPos)) + } + firstPos = v.Pos + v.Pos = v.Pos.WithIsStmt() + } else { + v.Pos = v.Pos.WithDefaultStmt() + } + } + if b.Pos.IsStmt() != src.PosNotStmt && !b.Pos.SameFileAndLine(firstPos) { + if f.pass.debug > 0 { + fmt.Printf("Mark stmt end of block differs %s %s %s prev pos = %s\n", f.Name, b, flc(b.Pos), flc(firstPos)) + } + b.Pos = b.Pos.WithIsStmt() + firstPos = b.Pos + } + endlines[b.ID] = firstPos + } + if f.pass.stats&1 != 0 { + // Report summary statistics on the shape of the sparse map about to be constructed + // TODO use this information to make sparse maps faster. + var entries fileAndPairs + for k, v := range ranges { + entries = append(entries, fileAndPair{int32(k), v}) + } + sort.Sort(entries) + total := uint64(0) // sum over files of maxline(file) - minline(file) + maxfile := int32(0) // max(file indices) + minline := uint32(0xffffffff) // min over files of minline(file) + maxline := uint32(0) // max over files of maxline(file) + for _, v := range entries { + if f.pass.stats > 1 { + f.LogStat("file", v.f, "low", v.lp.first, "high", v.lp.last) + } + total += uint64(v.lp.last - v.lp.first) + if maxfile < v.f { + maxfile = v.f + } + if minline > v.lp.first { + minline = v.lp.first + } + if maxline < v.lp.last { + maxline = v.lp.last + } + } + f.LogStat("SUM_LINE_RANGE", total, "MAXMIN_LINE_RANGE", maxline-minline, "MAXFILE", maxfile, "NFILES", len(entries)) + } + // cachedLineStarts is an empty sparse map for values that are included within ranges. + f.cachedLineStarts = newXposmap(ranges) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/op.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/op.go new file mode 100644 index 0000000000000000000000000000000000000000..cb151b2f6c6ad6f1ec95b5af4115d010254356b6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/op.go @@ -0,0 +1,529 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/abi" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "cmd/internal/obj" + "fmt" + "strings" +) + +// An Op encodes the specific operation that a Value performs. +// Opcodes' semantics can be modified by the type and aux fields of the Value. +// For instance, OpAdd can be 32 or 64 bit, signed or unsigned, float or complex, depending on Value.Type. +// Semantics of each op are described in the opcode files in _gen/*Ops.go. +// There is one file for generic (architecture-independent) ops and one file +// for each architecture. +type Op int32 + +type opInfo struct { + name string + reg regInfo + auxType auxType + argLen int32 // the number of arguments, -1 if variable length + asm obj.As + generic bool // this is a generic (arch-independent) opcode + rematerializeable bool // this op is rematerializeable + commutative bool // this operation is commutative (e.g. addition) + resultInArg0 bool // (first, if a tuple) output of v and v.Args[0] must be allocated to the same register + resultNotInArgs bool // outputs must not be allocated to the same registers as inputs + clobberFlags bool // this op clobbers flags register + needIntTemp bool // need a temporary free integer register + call bool // is a function call + tailCall bool // is a tail call + nilCheck bool // this op is a nil check on arg0 + faultOnNilArg0 bool // this op will fault if arg0 is nil (and aux encodes a small offset) + faultOnNilArg1 bool // this op will fault if arg1 is nil (and aux encodes a small offset) + usesScratch bool // this op requires scratch memory space + hasSideEffects bool // for "reasons", not to be eliminated. E.g., atomic store, #19182. + zeroWidth bool // op never translates into any machine code. example: copy, which may sometimes translate to machine code, is not zero-width. + unsafePoint bool // this op is an unsafe point, i.e. not safe for async preemption + symEffect SymEffect // effect this op has on symbol in aux + scale uint8 // amd64/386 indexed load scale +} + +type inputInfo struct { + idx int // index in Args array + regs regMask // allowed input registers +} + +type outputInfo struct { + idx int // index in output tuple + regs regMask // allowed output registers +} + +type regInfo struct { + // inputs encodes the register restrictions for an instruction's inputs. + // Each entry specifies an allowed register set for a particular input. + // They are listed in the order in which regalloc should pick a register + // from the register set (most constrained first). + // Inputs which do not need registers are not listed. + inputs []inputInfo + // clobbers encodes the set of registers that are overwritten by + // the instruction (other than the output registers). + clobbers regMask + // outputs is the same as inputs, but for the outputs of the instruction. + outputs []outputInfo +} + +func (r *regInfo) String() string { + s := "" + s += "INS:\n" + for _, i := range r.inputs { + mask := fmt.Sprintf("%64b", i.regs) + mask = strings.Replace(mask, "0", ".", -1) + s += fmt.Sprintf("%2d |%s|\n", i.idx, mask) + } + s += "OUTS:\n" + for _, i := range r.outputs { + mask := fmt.Sprintf("%64b", i.regs) + mask = strings.Replace(mask, "0", ".", -1) + s += fmt.Sprintf("%2d |%s|\n", i.idx, mask) + } + s += "CLOBBERS:\n" + mask := fmt.Sprintf("%64b", r.clobbers) + mask = strings.Replace(mask, "0", ".", -1) + s += fmt.Sprintf(" |%s|\n", mask) + return s +} + +type auxType int8 + +type AuxNameOffset struct { + Name *ir.Name + Offset int64 +} + +func (a *AuxNameOffset) CanBeAnSSAAux() {} +func (a *AuxNameOffset) String() string { + return fmt.Sprintf("%s+%d", a.Name.Sym().Name, a.Offset) +} + +func (a *AuxNameOffset) FrameOffset() int64 { + return a.Name.FrameOffset() + a.Offset +} + +type AuxCall struct { + Fn *obj.LSym + reg *regInfo // regInfo for this call + abiInfo *abi.ABIParamResultInfo +} + +// Reg returns the regInfo for a given call, combining the derived in/out register masks +// with the machine-specific register information in the input i. (The machine-specific +// regInfo is much handier at the call site than it is when the AuxCall is being constructed, +// therefore do this lazily). +// +// TODO: there is a Clever Hack that allows pre-generation of a small-ish number of the slices +// of inputInfo and outputInfo used here, provided that we are willing to reorder the inputs +// and outputs from calls, so that all integer registers come first, then all floating registers. +// At this point (active development of register ABI) that is very premature, +// but if this turns out to be a cost, we could do it. +func (a *AuxCall) Reg(i *regInfo, c *Config) *regInfo { + if a.reg.clobbers != 0 { + // Already updated + return a.reg + } + if a.abiInfo.InRegistersUsed()+a.abiInfo.OutRegistersUsed() == 0 { + // Shortcut for zero case, also handles old ABI. + a.reg = i + return a.reg + } + + k := len(i.inputs) + for _, p := range a.abiInfo.InParams() { + for _, r := range p.Registers { + m := archRegForAbiReg(r, c) + a.reg.inputs = append(a.reg.inputs, inputInfo{idx: k, regs: (1 << m)}) + k++ + } + } + a.reg.inputs = append(a.reg.inputs, i.inputs...) // These are less constrained, thus should come last + k = len(i.outputs) + for _, p := range a.abiInfo.OutParams() { + for _, r := range p.Registers { + m := archRegForAbiReg(r, c) + a.reg.outputs = append(a.reg.outputs, outputInfo{idx: k, regs: (1 << m)}) + k++ + } + } + a.reg.outputs = append(a.reg.outputs, i.outputs...) + a.reg.clobbers = i.clobbers + return a.reg +} +func (a *AuxCall) ABI() *abi.ABIConfig { + return a.abiInfo.Config() +} +func (a *AuxCall) ABIInfo() *abi.ABIParamResultInfo { + return a.abiInfo +} +func (a *AuxCall) ResultReg(c *Config) *regInfo { + if a.abiInfo.OutRegistersUsed() == 0 { + return a.reg + } + if len(a.reg.inputs) > 0 { + return a.reg + } + k := 0 + for _, p := range a.abiInfo.OutParams() { + for _, r := range p.Registers { + m := archRegForAbiReg(r, c) + a.reg.inputs = append(a.reg.inputs, inputInfo{idx: k, regs: (1 << m)}) + k++ + } + } + return a.reg +} + +// For ABI register index r, returns the (dense) register number used in +// SSA backend. +func archRegForAbiReg(r abi.RegIndex, c *Config) uint8 { + var m int8 + if int(r) < len(c.intParamRegs) { + m = c.intParamRegs[r] + } else { + m = c.floatParamRegs[int(r)-len(c.intParamRegs)] + } + return uint8(m) +} + +// For ABI register index r, returns the register number used in the obj +// package (assembler). +func ObjRegForAbiReg(r abi.RegIndex, c *Config) int16 { + m := archRegForAbiReg(r, c) + return c.registers[m].objNum +} + +// ArgWidth returns the amount of stack needed for all the inputs +// and outputs of a function or method, including ABI-defined parameter +// slots and ABI-defined spill slots for register-resident parameters. +// +// The name is taken from the types package's ArgWidth(), +// which predated changes to the ABI; this version handles those changes. +func (a *AuxCall) ArgWidth() int64 { + return a.abiInfo.ArgWidth() +} + +// ParamAssignmentForResult returns the ABI Parameter assignment for result which (indexed 0, 1, etc). +func (a *AuxCall) ParamAssignmentForResult(which int64) *abi.ABIParamAssignment { + return a.abiInfo.OutParam(int(which)) +} + +// OffsetOfResult returns the SP offset of result which (indexed 0, 1, etc). +func (a *AuxCall) OffsetOfResult(which int64) int64 { + n := int64(a.abiInfo.OutParam(int(which)).Offset()) + return n +} + +// OffsetOfArg returns the SP offset of argument which (indexed 0, 1, etc). +// If the call is to a method, the receiver is the first argument (i.e., index 0) +func (a *AuxCall) OffsetOfArg(which int64) int64 { + n := int64(a.abiInfo.InParam(int(which)).Offset()) + return n +} + +// RegsOfResult returns the register(s) used for result which (indexed 0, 1, etc). +func (a *AuxCall) RegsOfResult(which int64) []abi.RegIndex { + return a.abiInfo.OutParam(int(which)).Registers +} + +// RegsOfArg returns the register(s) used for argument which (indexed 0, 1, etc). +// If the call is to a method, the receiver is the first argument (i.e., index 0) +func (a *AuxCall) RegsOfArg(which int64) []abi.RegIndex { + return a.abiInfo.InParam(int(which)).Registers +} + +// NameOfResult returns the ir.Name of result which (indexed 0, 1, etc). +func (a *AuxCall) NameOfResult(which int64) *ir.Name { + return a.abiInfo.OutParam(int(which)).Name +} + +// TypeOfResult returns the type of result which (indexed 0, 1, etc). +func (a *AuxCall) TypeOfResult(which int64) *types.Type { + return a.abiInfo.OutParam(int(which)).Type +} + +// TypeOfArg returns the type of argument which (indexed 0, 1, etc). +// If the call is to a method, the receiver is the first argument (i.e., index 0) +func (a *AuxCall) TypeOfArg(which int64) *types.Type { + return a.abiInfo.InParam(int(which)).Type +} + +// SizeOfResult returns the size of result which (indexed 0, 1, etc). +func (a *AuxCall) SizeOfResult(which int64) int64 { + return a.TypeOfResult(which).Size() +} + +// SizeOfArg returns the size of argument which (indexed 0, 1, etc). +// If the call is to a method, the receiver is the first argument (i.e., index 0) +func (a *AuxCall) SizeOfArg(which int64) int64 { + return a.TypeOfArg(which).Size() +} + +// NResults returns the number of results. +func (a *AuxCall) NResults() int64 { + return int64(len(a.abiInfo.OutParams())) +} + +// LateExpansionResultType returns the result type (including trailing mem) +// for a call that will be expanded later in the SSA phase. +func (a *AuxCall) LateExpansionResultType() *types.Type { + var tys []*types.Type + for i := int64(0); i < a.NResults(); i++ { + tys = append(tys, a.TypeOfResult(i)) + } + tys = append(tys, types.TypeMem) + return types.NewResults(tys) +} + +// NArgs returns the number of arguments (including receiver, if there is one). +func (a *AuxCall) NArgs() int64 { + return int64(len(a.abiInfo.InParams())) +} + +// String returns "AuxCall{}" +func (a *AuxCall) String() string { + var fn string + if a.Fn == nil { + fn = "AuxCall{nil" // could be interface/closure etc. + } else { + fn = fmt.Sprintf("AuxCall{%v", a.Fn) + } + // TODO how much of the ABI should be printed? + + return fn + "}" +} + +// StaticAuxCall returns an AuxCall for a static call. +func StaticAuxCall(sym *obj.LSym, paramResultInfo *abi.ABIParamResultInfo) *AuxCall { + if paramResultInfo == nil { + panic(fmt.Errorf("Nil paramResultInfo, sym=%v", sym)) + } + var reg *regInfo + if paramResultInfo.InRegistersUsed()+paramResultInfo.OutRegistersUsed() > 0 { + reg = ®Info{} + } + return &AuxCall{Fn: sym, abiInfo: paramResultInfo, reg: reg} +} + +// InterfaceAuxCall returns an AuxCall for an interface call. +func InterfaceAuxCall(paramResultInfo *abi.ABIParamResultInfo) *AuxCall { + var reg *regInfo + if paramResultInfo.InRegistersUsed()+paramResultInfo.OutRegistersUsed() > 0 { + reg = ®Info{} + } + return &AuxCall{Fn: nil, abiInfo: paramResultInfo, reg: reg} +} + +// ClosureAuxCall returns an AuxCall for a closure call. +func ClosureAuxCall(paramResultInfo *abi.ABIParamResultInfo) *AuxCall { + var reg *regInfo + if paramResultInfo.InRegistersUsed()+paramResultInfo.OutRegistersUsed() > 0 { + reg = ®Info{} + } + return &AuxCall{Fn: nil, abiInfo: paramResultInfo, reg: reg} +} + +func (*AuxCall) CanBeAnSSAAux() {} + +// OwnAuxCall returns a function's own AuxCall. +func OwnAuxCall(fn *obj.LSym, paramResultInfo *abi.ABIParamResultInfo) *AuxCall { + // TODO if this remains identical to ClosureAuxCall above after new ABI is done, should deduplicate. + var reg *regInfo + if paramResultInfo.InRegistersUsed()+paramResultInfo.OutRegistersUsed() > 0 { + reg = ®Info{} + } + return &AuxCall{Fn: fn, abiInfo: paramResultInfo, reg: reg} +} + +const ( + auxNone auxType = iota + auxBool // auxInt is 0/1 for false/true + auxInt8 // auxInt is an 8-bit integer + auxInt16 // auxInt is a 16-bit integer + auxInt32 // auxInt is a 32-bit integer + auxInt64 // auxInt is a 64-bit integer + auxInt128 // auxInt represents a 128-bit integer. Always 0. + auxUInt8 // auxInt is an 8-bit unsigned integer + auxFloat32 // auxInt is a float32 (encoded with math.Float64bits) + auxFloat64 // auxInt is a float64 (encoded with math.Float64bits) + auxFlagConstant // auxInt is a flagConstant + auxNameOffsetInt8 // aux is a &struct{Name ir.Name, Offset int64}; auxInt is index in parameter registers array + auxString // aux is a string + auxSym // aux is a symbol (a *gc.Node for locals, an *obj.LSym for globals, or nil for none) + auxSymOff // aux is a symbol, auxInt is an offset + auxSymValAndOff // aux is a symbol, auxInt is a ValAndOff + auxTyp // aux is a type + auxTypSize // aux is a type, auxInt is a size, must have Aux.(Type).Size() == AuxInt + auxCCop // aux is a ssa.Op that represents a flags-to-bool conversion (e.g. LessThan) + auxCall // aux is a *ssa.AuxCall + auxCallOff // aux is a *ssa.AuxCall, AuxInt is int64 param (in+out) size + + // architecture specific aux types + auxARM64BitField // aux is an arm64 bitfield lsb and width packed into auxInt + auxS390XRotateParams // aux is a s390x rotate parameters object encoding start bit, end bit and rotate amount + auxS390XCCMask // aux is a s390x 4-bit condition code mask + auxS390XCCMaskInt8 // aux is a s390x 4-bit condition code mask, auxInt is an int8 immediate + auxS390XCCMaskUint8 // aux is a s390x 4-bit condition code mask, auxInt is a uint8 immediate +) + +// A SymEffect describes the effect that an SSA Value has on the variable +// identified by the symbol in its Aux field. +type SymEffect int8 + +const ( + SymRead SymEffect = 1 << iota + SymWrite + SymAddr + + SymRdWr = SymRead | SymWrite + + SymNone SymEffect = 0 +) + +// A Sym represents a symbolic offset from a base register. +// Currently a Sym can be one of 3 things: +// - a *gc.Node, for an offset from SP (the stack pointer) +// - a *obj.LSym, for an offset from SB (the global pointer) +// - nil, for no offset +type Sym interface { + CanBeAnSSASym() + CanBeAnSSAAux() +} + +// A ValAndOff is used by the several opcodes. It holds +// both a value and a pointer offset. +// A ValAndOff is intended to be encoded into an AuxInt field. +// The zero ValAndOff encodes a value of 0 and an offset of 0. +// The high 32 bits hold a value. +// The low 32 bits hold a pointer offset. +type ValAndOff int64 + +func (x ValAndOff) Val() int32 { return int32(int64(x) >> 32) } +func (x ValAndOff) Val64() int64 { return int64(x) >> 32 } +func (x ValAndOff) Val16() int16 { return int16(int64(x) >> 32) } +func (x ValAndOff) Val8() int8 { return int8(int64(x) >> 32) } + +func (x ValAndOff) Off64() int64 { return int64(int32(x)) } +func (x ValAndOff) Off() int32 { return int32(x) } + +func (x ValAndOff) String() string { + return fmt.Sprintf("val=%d,off=%d", x.Val(), x.Off()) +} + +// validVal reports whether the value can be used +// as an argument to makeValAndOff. +func validVal(val int64) bool { + return val == int64(int32(val)) +} + +func makeValAndOff(val, off int32) ValAndOff { + return ValAndOff(int64(val)<<32 + int64(uint32(off))) +} + +func (x ValAndOff) canAdd32(off int32) bool { + newoff := x.Off64() + int64(off) + return newoff == int64(int32(newoff)) +} +func (x ValAndOff) canAdd64(off int64) bool { + newoff := x.Off64() + off + return newoff == int64(int32(newoff)) +} + +func (x ValAndOff) addOffset32(off int32) ValAndOff { + if !x.canAdd32(off) { + panic("invalid ValAndOff.addOffset32") + } + return makeValAndOff(x.Val(), x.Off()+off) +} +func (x ValAndOff) addOffset64(off int64) ValAndOff { + if !x.canAdd64(off) { + panic("invalid ValAndOff.addOffset64") + } + return makeValAndOff(x.Val(), x.Off()+int32(off)) +} + +// int128 is a type that stores a 128-bit constant. +// The only allowed constant right now is 0, so we can cheat quite a bit. +type int128 int64 + +type BoundsKind uint8 + +const ( + BoundsIndex BoundsKind = iota // indexing operation, 0 <= idx < len failed + BoundsIndexU // ... with unsigned idx + BoundsSliceAlen // 2-arg slicing operation, 0 <= high <= len failed + BoundsSliceAlenU // ... with unsigned high + BoundsSliceAcap // 2-arg slicing operation, 0 <= high <= cap failed + BoundsSliceAcapU // ... with unsigned high + BoundsSliceB // 2-arg slicing operation, 0 <= low <= high failed + BoundsSliceBU // ... with unsigned low + BoundsSlice3Alen // 3-arg slicing operation, 0 <= max <= len failed + BoundsSlice3AlenU // ... with unsigned max + BoundsSlice3Acap // 3-arg slicing operation, 0 <= max <= cap failed + BoundsSlice3AcapU // ... with unsigned max + BoundsSlice3B // 3-arg slicing operation, 0 <= high <= max failed + BoundsSlice3BU // ... with unsigned high + BoundsSlice3C // 3-arg slicing operation, 0 <= low <= high failed + BoundsSlice3CU // ... with unsigned low + BoundsConvert // conversion to array pointer failed + BoundsKindCount +) + +// boundsABI determines which register arguments a bounds check call should use. For an [a:b:c] slice, we do: +// +// CMPQ c, cap +// JA fail1 +// CMPQ b, c +// JA fail2 +// CMPQ a, b +// JA fail3 +// +// fail1: CALL panicSlice3Acap (c, cap) +// fail2: CALL panicSlice3B (b, c) +// fail3: CALL panicSlice3C (a, b) +// +// When we register allocate that code, we want the same register to be used for +// the first arg of panicSlice3Acap and the second arg to panicSlice3B. That way, +// initializing that register once will satisfy both calls. +// That desire ends up dividing the set of bounds check calls into 3 sets. This function +// determines which set to use for a given panic call. +// The first arg for set 0 should be the second arg for set 1. +// The first arg for set 1 should be the second arg for set 2. +func boundsABI(b int64) int { + switch BoundsKind(b) { + case BoundsSlice3Alen, + BoundsSlice3AlenU, + BoundsSlice3Acap, + BoundsSlice3AcapU, + BoundsConvert: + return 0 + case BoundsSliceAlen, + BoundsSliceAlenU, + BoundsSliceAcap, + BoundsSliceAcapU, + BoundsSlice3B, + BoundsSlice3BU: + return 1 + case BoundsIndex, + BoundsIndexU, + BoundsSliceB, + BoundsSliceBU, + BoundsSlice3C, + BoundsSlice3CU: + return 2 + default: + panic("bad BoundsKind") + } +} + +// arm64BitField is the GO type of ARM64BitField auxInt. +// if x is an ARM64BitField, then width=x&0xff, lsb=(x>>8)&0xff, and +// width+lsb<64 for 64-bit variant, width+lsb<32 for 32-bit variant. +// the meaning of width and lsb are instruction-dependent. +type arm64BitField int16 diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/opGen.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/opGen.go new file mode 100644 index 0000000000000000000000000000000000000000..c552832520ec436a82b1669f24591dd98879bbd5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/opGen.go @@ -0,0 +1,41139 @@ +// Code generated from _gen/*Ops.go using 'go generate'; DO NOT EDIT. + +package ssa + +import ( + "cmd/internal/obj" + "cmd/internal/obj/arm" + "cmd/internal/obj/arm64" + "cmd/internal/obj/loong64" + "cmd/internal/obj/mips" + "cmd/internal/obj/ppc64" + "cmd/internal/obj/riscv" + "cmd/internal/obj/s390x" + "cmd/internal/obj/wasm" + "cmd/internal/obj/x86" +) + +const ( + BlockInvalid BlockKind = iota + + Block386EQ + Block386NE + Block386LT + Block386LE + Block386GT + Block386GE + Block386OS + Block386OC + Block386ULT + Block386ULE + Block386UGT + Block386UGE + Block386EQF + Block386NEF + Block386ORD + Block386NAN + + BlockAMD64EQ + BlockAMD64NE + BlockAMD64LT + BlockAMD64LE + BlockAMD64GT + BlockAMD64GE + BlockAMD64OS + BlockAMD64OC + BlockAMD64ULT + BlockAMD64ULE + BlockAMD64UGT + BlockAMD64UGE + BlockAMD64EQF + BlockAMD64NEF + BlockAMD64ORD + BlockAMD64NAN + BlockAMD64JUMPTABLE + + BlockARMEQ + BlockARMNE + BlockARMLT + BlockARMLE + BlockARMGT + BlockARMGE + BlockARMULT + BlockARMULE + BlockARMUGT + BlockARMUGE + BlockARMLTnoov + BlockARMLEnoov + BlockARMGTnoov + BlockARMGEnoov + + BlockARM64EQ + BlockARM64NE + BlockARM64LT + BlockARM64LE + BlockARM64GT + BlockARM64GE + BlockARM64ULT + BlockARM64ULE + BlockARM64UGT + BlockARM64UGE + BlockARM64Z + BlockARM64NZ + BlockARM64ZW + BlockARM64NZW + BlockARM64TBZ + BlockARM64TBNZ + BlockARM64FLT + BlockARM64FLE + BlockARM64FGT + BlockARM64FGE + BlockARM64LTnoov + BlockARM64LEnoov + BlockARM64GTnoov + BlockARM64GEnoov + BlockARM64JUMPTABLE + + BlockLOONG64EQ + BlockLOONG64NE + BlockLOONG64LTZ + BlockLOONG64LEZ + BlockLOONG64GTZ + BlockLOONG64GEZ + BlockLOONG64FPT + BlockLOONG64FPF + + BlockMIPSEQ + BlockMIPSNE + BlockMIPSLTZ + BlockMIPSLEZ + BlockMIPSGTZ + BlockMIPSGEZ + BlockMIPSFPT + BlockMIPSFPF + + BlockMIPS64EQ + BlockMIPS64NE + BlockMIPS64LTZ + BlockMIPS64LEZ + BlockMIPS64GTZ + BlockMIPS64GEZ + BlockMIPS64FPT + BlockMIPS64FPF + + BlockPPC64EQ + BlockPPC64NE + BlockPPC64LT + BlockPPC64LE + BlockPPC64GT + BlockPPC64GE + BlockPPC64FLT + BlockPPC64FLE + BlockPPC64FGT + BlockPPC64FGE + + BlockRISCV64BEQ + BlockRISCV64BNE + BlockRISCV64BLT + BlockRISCV64BGE + BlockRISCV64BLTU + BlockRISCV64BGEU + BlockRISCV64BEQZ + BlockRISCV64BNEZ + BlockRISCV64BLEZ + BlockRISCV64BGEZ + BlockRISCV64BLTZ + BlockRISCV64BGTZ + + BlockS390XBRC + BlockS390XCRJ + BlockS390XCGRJ + BlockS390XCLRJ + BlockS390XCLGRJ + BlockS390XCIJ + BlockS390XCGIJ + BlockS390XCLIJ + BlockS390XCLGIJ + + BlockPlain + BlockIf + BlockDefer + BlockRet + BlockRetJmp + BlockExit + BlockJumpTable + BlockFirst +) + +var blockString = [...]string{ + BlockInvalid: "BlockInvalid", + + Block386EQ: "EQ", + Block386NE: "NE", + Block386LT: "LT", + Block386LE: "LE", + Block386GT: "GT", + Block386GE: "GE", + Block386OS: "OS", + Block386OC: "OC", + Block386ULT: "ULT", + Block386ULE: "ULE", + Block386UGT: "UGT", + Block386UGE: "UGE", + Block386EQF: "EQF", + Block386NEF: "NEF", + Block386ORD: "ORD", + Block386NAN: "NAN", + + BlockAMD64EQ: "EQ", + BlockAMD64NE: "NE", + BlockAMD64LT: "LT", + BlockAMD64LE: "LE", + BlockAMD64GT: "GT", + BlockAMD64GE: "GE", + BlockAMD64OS: "OS", + BlockAMD64OC: "OC", + BlockAMD64ULT: "ULT", + BlockAMD64ULE: "ULE", + BlockAMD64UGT: "UGT", + BlockAMD64UGE: "UGE", + BlockAMD64EQF: "EQF", + BlockAMD64NEF: "NEF", + BlockAMD64ORD: "ORD", + BlockAMD64NAN: "NAN", + BlockAMD64JUMPTABLE: "JUMPTABLE", + + BlockARMEQ: "EQ", + BlockARMNE: "NE", + BlockARMLT: "LT", + BlockARMLE: "LE", + BlockARMGT: "GT", + BlockARMGE: "GE", + BlockARMULT: "ULT", + BlockARMULE: "ULE", + BlockARMUGT: "UGT", + BlockARMUGE: "UGE", + BlockARMLTnoov: "LTnoov", + BlockARMLEnoov: "LEnoov", + BlockARMGTnoov: "GTnoov", + BlockARMGEnoov: "GEnoov", + + BlockARM64EQ: "EQ", + BlockARM64NE: "NE", + BlockARM64LT: "LT", + BlockARM64LE: "LE", + BlockARM64GT: "GT", + BlockARM64GE: "GE", + BlockARM64ULT: "ULT", + BlockARM64ULE: "ULE", + BlockARM64UGT: "UGT", + BlockARM64UGE: "UGE", + BlockARM64Z: "Z", + BlockARM64NZ: "NZ", + BlockARM64ZW: "ZW", + BlockARM64NZW: "NZW", + BlockARM64TBZ: "TBZ", + BlockARM64TBNZ: "TBNZ", + BlockARM64FLT: "FLT", + BlockARM64FLE: "FLE", + BlockARM64FGT: "FGT", + BlockARM64FGE: "FGE", + BlockARM64LTnoov: "LTnoov", + BlockARM64LEnoov: "LEnoov", + BlockARM64GTnoov: "GTnoov", + BlockARM64GEnoov: "GEnoov", + BlockARM64JUMPTABLE: "JUMPTABLE", + + BlockLOONG64EQ: "EQ", + BlockLOONG64NE: "NE", + BlockLOONG64LTZ: "LTZ", + BlockLOONG64LEZ: "LEZ", + BlockLOONG64GTZ: "GTZ", + BlockLOONG64GEZ: "GEZ", + BlockLOONG64FPT: "FPT", + BlockLOONG64FPF: "FPF", + + BlockMIPSEQ: "EQ", + BlockMIPSNE: "NE", + BlockMIPSLTZ: "LTZ", + BlockMIPSLEZ: "LEZ", + BlockMIPSGTZ: "GTZ", + BlockMIPSGEZ: "GEZ", + BlockMIPSFPT: "FPT", + BlockMIPSFPF: "FPF", + + BlockMIPS64EQ: "EQ", + BlockMIPS64NE: "NE", + BlockMIPS64LTZ: "LTZ", + BlockMIPS64LEZ: "LEZ", + BlockMIPS64GTZ: "GTZ", + BlockMIPS64GEZ: "GEZ", + BlockMIPS64FPT: "FPT", + BlockMIPS64FPF: "FPF", + + BlockPPC64EQ: "EQ", + BlockPPC64NE: "NE", + BlockPPC64LT: "LT", + BlockPPC64LE: "LE", + BlockPPC64GT: "GT", + BlockPPC64GE: "GE", + BlockPPC64FLT: "FLT", + BlockPPC64FLE: "FLE", + BlockPPC64FGT: "FGT", + BlockPPC64FGE: "FGE", + + BlockRISCV64BEQ: "BEQ", + BlockRISCV64BNE: "BNE", + BlockRISCV64BLT: "BLT", + BlockRISCV64BGE: "BGE", + BlockRISCV64BLTU: "BLTU", + BlockRISCV64BGEU: "BGEU", + BlockRISCV64BEQZ: "BEQZ", + BlockRISCV64BNEZ: "BNEZ", + BlockRISCV64BLEZ: "BLEZ", + BlockRISCV64BGEZ: "BGEZ", + BlockRISCV64BLTZ: "BLTZ", + BlockRISCV64BGTZ: "BGTZ", + + BlockS390XBRC: "BRC", + BlockS390XCRJ: "CRJ", + BlockS390XCGRJ: "CGRJ", + BlockS390XCLRJ: "CLRJ", + BlockS390XCLGRJ: "CLGRJ", + BlockS390XCIJ: "CIJ", + BlockS390XCGIJ: "CGIJ", + BlockS390XCLIJ: "CLIJ", + BlockS390XCLGIJ: "CLGIJ", + + BlockPlain: "Plain", + BlockIf: "If", + BlockDefer: "Defer", + BlockRet: "Ret", + BlockRetJmp: "RetJmp", + BlockExit: "Exit", + BlockJumpTable: "JumpTable", + BlockFirst: "First", +} + +func (k BlockKind) String() string { return blockString[k] } +func (k BlockKind) AuxIntType() string { + switch k { + case BlockARM64TBZ: + return "int64" + case BlockARM64TBNZ: + return "int64" + case BlockS390XCIJ: + return "int8" + case BlockS390XCGIJ: + return "int8" + case BlockS390XCLIJ: + return "uint8" + case BlockS390XCLGIJ: + return "uint8" + } + return "" +} + +const ( + OpInvalid Op = iota + + Op386ADDSS + Op386ADDSD + Op386SUBSS + Op386SUBSD + Op386MULSS + Op386MULSD + Op386DIVSS + Op386DIVSD + Op386MOVSSload + Op386MOVSDload + Op386MOVSSconst + Op386MOVSDconst + Op386MOVSSloadidx1 + Op386MOVSSloadidx4 + Op386MOVSDloadidx1 + Op386MOVSDloadidx8 + Op386MOVSSstore + Op386MOVSDstore + Op386MOVSSstoreidx1 + Op386MOVSSstoreidx4 + Op386MOVSDstoreidx1 + Op386MOVSDstoreidx8 + Op386ADDSSload + Op386ADDSDload + Op386SUBSSload + Op386SUBSDload + Op386MULSSload + Op386MULSDload + Op386DIVSSload + Op386DIVSDload + Op386ADDL + Op386ADDLconst + Op386ADDLcarry + Op386ADDLconstcarry + Op386ADCL + Op386ADCLconst + Op386SUBL + Op386SUBLconst + Op386SUBLcarry + Op386SUBLconstcarry + Op386SBBL + Op386SBBLconst + Op386MULL + Op386MULLconst + Op386MULLU + Op386HMULL + Op386HMULLU + Op386MULLQU + Op386AVGLU + Op386DIVL + Op386DIVW + Op386DIVLU + Op386DIVWU + Op386MODL + Op386MODW + Op386MODLU + Op386MODWU + Op386ANDL + Op386ANDLconst + Op386ORL + Op386ORLconst + Op386XORL + Op386XORLconst + Op386CMPL + Op386CMPW + Op386CMPB + Op386CMPLconst + Op386CMPWconst + Op386CMPBconst + Op386CMPLload + Op386CMPWload + Op386CMPBload + Op386CMPLconstload + Op386CMPWconstload + Op386CMPBconstload + Op386UCOMISS + Op386UCOMISD + Op386TESTL + Op386TESTW + Op386TESTB + Op386TESTLconst + Op386TESTWconst + Op386TESTBconst + Op386SHLL + Op386SHLLconst + Op386SHRL + Op386SHRW + Op386SHRB + Op386SHRLconst + Op386SHRWconst + Op386SHRBconst + Op386SARL + Op386SARW + Op386SARB + Op386SARLconst + Op386SARWconst + Op386SARBconst + Op386ROLL + Op386ROLW + Op386ROLB + Op386ROLLconst + Op386ROLWconst + Op386ROLBconst + Op386ADDLload + Op386SUBLload + Op386MULLload + Op386ANDLload + Op386ORLload + Op386XORLload + Op386ADDLloadidx4 + Op386SUBLloadidx4 + Op386MULLloadidx4 + Op386ANDLloadidx4 + Op386ORLloadidx4 + Op386XORLloadidx4 + Op386NEGL + Op386NOTL + Op386BSFL + Op386BSFW + Op386LoweredCtz32 + Op386BSRL + Op386BSRW + Op386BSWAPL + Op386SQRTSD + Op386SQRTSS + Op386SBBLcarrymask + Op386SETEQ + Op386SETNE + Op386SETL + Op386SETLE + Op386SETG + Op386SETGE + Op386SETB + Op386SETBE + Op386SETA + Op386SETAE + Op386SETO + Op386SETEQF + Op386SETNEF + Op386SETORD + Op386SETNAN + Op386SETGF + Op386SETGEF + Op386MOVBLSX + Op386MOVBLZX + Op386MOVWLSX + Op386MOVWLZX + Op386MOVLconst + Op386CVTTSD2SL + Op386CVTTSS2SL + Op386CVTSL2SS + Op386CVTSL2SD + Op386CVTSD2SS + Op386CVTSS2SD + Op386PXOR + Op386LEAL + Op386LEAL1 + Op386LEAL2 + Op386LEAL4 + Op386LEAL8 + Op386MOVBload + Op386MOVBLSXload + Op386MOVWload + Op386MOVWLSXload + Op386MOVLload + Op386MOVBstore + Op386MOVWstore + Op386MOVLstore + Op386ADDLmodify + Op386SUBLmodify + Op386ANDLmodify + Op386ORLmodify + Op386XORLmodify + Op386ADDLmodifyidx4 + Op386SUBLmodifyidx4 + Op386ANDLmodifyidx4 + Op386ORLmodifyidx4 + Op386XORLmodifyidx4 + Op386ADDLconstmodify + Op386ANDLconstmodify + Op386ORLconstmodify + Op386XORLconstmodify + Op386ADDLconstmodifyidx4 + Op386ANDLconstmodifyidx4 + Op386ORLconstmodifyidx4 + Op386XORLconstmodifyidx4 + Op386MOVBloadidx1 + Op386MOVWloadidx1 + Op386MOVWloadidx2 + Op386MOVLloadidx1 + Op386MOVLloadidx4 + Op386MOVBstoreidx1 + Op386MOVWstoreidx1 + Op386MOVWstoreidx2 + Op386MOVLstoreidx1 + Op386MOVLstoreidx4 + Op386MOVBstoreconst + Op386MOVWstoreconst + Op386MOVLstoreconst + Op386MOVBstoreconstidx1 + Op386MOVWstoreconstidx1 + Op386MOVWstoreconstidx2 + Op386MOVLstoreconstidx1 + Op386MOVLstoreconstidx4 + Op386DUFFZERO + Op386REPSTOSL + Op386CALLstatic + Op386CALLtail + Op386CALLclosure + Op386CALLinter + Op386DUFFCOPY + Op386REPMOVSL + Op386InvertFlags + Op386LoweredGetG + Op386LoweredGetClosurePtr + Op386LoweredGetCallerPC + Op386LoweredGetCallerSP + Op386LoweredNilCheck + Op386LoweredWB + Op386LoweredPanicBoundsA + Op386LoweredPanicBoundsB + Op386LoweredPanicBoundsC + Op386LoweredPanicExtendA + Op386LoweredPanicExtendB + Op386LoweredPanicExtendC + Op386FlagEQ + Op386FlagLT_ULT + Op386FlagLT_UGT + Op386FlagGT_UGT + Op386FlagGT_ULT + Op386MOVSSconst1 + Op386MOVSDconst1 + Op386MOVSSconst2 + Op386MOVSDconst2 + + OpAMD64ADDSS + OpAMD64ADDSD + OpAMD64SUBSS + OpAMD64SUBSD + OpAMD64MULSS + OpAMD64MULSD + OpAMD64DIVSS + OpAMD64DIVSD + OpAMD64MOVSSload + OpAMD64MOVSDload + OpAMD64MOVSSconst + OpAMD64MOVSDconst + OpAMD64MOVSSloadidx1 + OpAMD64MOVSSloadidx4 + OpAMD64MOVSDloadidx1 + OpAMD64MOVSDloadidx8 + OpAMD64MOVSSstore + OpAMD64MOVSDstore + OpAMD64MOVSSstoreidx1 + OpAMD64MOVSSstoreidx4 + OpAMD64MOVSDstoreidx1 + OpAMD64MOVSDstoreidx8 + OpAMD64ADDSSload + OpAMD64ADDSDload + OpAMD64SUBSSload + OpAMD64SUBSDload + OpAMD64MULSSload + OpAMD64MULSDload + OpAMD64DIVSSload + OpAMD64DIVSDload + OpAMD64ADDSSloadidx1 + OpAMD64ADDSSloadidx4 + OpAMD64ADDSDloadidx1 + OpAMD64ADDSDloadidx8 + OpAMD64SUBSSloadidx1 + OpAMD64SUBSSloadidx4 + OpAMD64SUBSDloadidx1 + OpAMD64SUBSDloadidx8 + OpAMD64MULSSloadidx1 + OpAMD64MULSSloadidx4 + OpAMD64MULSDloadidx1 + OpAMD64MULSDloadidx8 + OpAMD64DIVSSloadidx1 + OpAMD64DIVSSloadidx4 + OpAMD64DIVSDloadidx1 + OpAMD64DIVSDloadidx8 + OpAMD64ADDQ + OpAMD64ADDL + OpAMD64ADDQconst + OpAMD64ADDLconst + OpAMD64ADDQconstmodify + OpAMD64ADDLconstmodify + OpAMD64SUBQ + OpAMD64SUBL + OpAMD64SUBQconst + OpAMD64SUBLconst + OpAMD64MULQ + OpAMD64MULL + OpAMD64MULQconst + OpAMD64MULLconst + OpAMD64MULLU + OpAMD64MULQU + OpAMD64HMULQ + OpAMD64HMULL + OpAMD64HMULQU + OpAMD64HMULLU + OpAMD64AVGQU + OpAMD64DIVQ + OpAMD64DIVL + OpAMD64DIVW + OpAMD64DIVQU + OpAMD64DIVLU + OpAMD64DIVWU + OpAMD64NEGLflags + OpAMD64ADDQcarry + OpAMD64ADCQ + OpAMD64ADDQconstcarry + OpAMD64ADCQconst + OpAMD64SUBQborrow + OpAMD64SBBQ + OpAMD64SUBQconstborrow + OpAMD64SBBQconst + OpAMD64MULQU2 + OpAMD64DIVQU2 + OpAMD64ANDQ + OpAMD64ANDL + OpAMD64ANDQconst + OpAMD64ANDLconst + OpAMD64ANDQconstmodify + OpAMD64ANDLconstmodify + OpAMD64ORQ + OpAMD64ORL + OpAMD64ORQconst + OpAMD64ORLconst + OpAMD64ORQconstmodify + OpAMD64ORLconstmodify + OpAMD64XORQ + OpAMD64XORL + OpAMD64XORQconst + OpAMD64XORLconst + OpAMD64XORQconstmodify + OpAMD64XORLconstmodify + OpAMD64CMPQ + OpAMD64CMPL + OpAMD64CMPW + OpAMD64CMPB + OpAMD64CMPQconst + OpAMD64CMPLconst + OpAMD64CMPWconst + OpAMD64CMPBconst + OpAMD64CMPQload + OpAMD64CMPLload + OpAMD64CMPWload + OpAMD64CMPBload + OpAMD64CMPQconstload + OpAMD64CMPLconstload + OpAMD64CMPWconstload + OpAMD64CMPBconstload + OpAMD64CMPQloadidx8 + OpAMD64CMPQloadidx1 + OpAMD64CMPLloadidx4 + OpAMD64CMPLloadidx1 + OpAMD64CMPWloadidx2 + OpAMD64CMPWloadidx1 + OpAMD64CMPBloadidx1 + OpAMD64CMPQconstloadidx8 + OpAMD64CMPQconstloadidx1 + OpAMD64CMPLconstloadidx4 + OpAMD64CMPLconstloadidx1 + OpAMD64CMPWconstloadidx2 + OpAMD64CMPWconstloadidx1 + OpAMD64CMPBconstloadidx1 + OpAMD64UCOMISS + OpAMD64UCOMISD + OpAMD64BTL + OpAMD64BTQ + OpAMD64BTCL + OpAMD64BTCQ + OpAMD64BTRL + OpAMD64BTRQ + OpAMD64BTSL + OpAMD64BTSQ + OpAMD64BTLconst + OpAMD64BTQconst + OpAMD64BTCQconst + OpAMD64BTRQconst + OpAMD64BTSQconst + OpAMD64BTSQconstmodify + OpAMD64BTRQconstmodify + OpAMD64BTCQconstmodify + OpAMD64TESTQ + OpAMD64TESTL + OpAMD64TESTW + OpAMD64TESTB + OpAMD64TESTQconst + OpAMD64TESTLconst + OpAMD64TESTWconst + OpAMD64TESTBconst + OpAMD64SHLQ + OpAMD64SHLL + OpAMD64SHLQconst + OpAMD64SHLLconst + OpAMD64SHRQ + OpAMD64SHRL + OpAMD64SHRW + OpAMD64SHRB + OpAMD64SHRQconst + OpAMD64SHRLconst + OpAMD64SHRWconst + OpAMD64SHRBconst + OpAMD64SARQ + OpAMD64SARL + OpAMD64SARW + OpAMD64SARB + OpAMD64SARQconst + OpAMD64SARLconst + OpAMD64SARWconst + OpAMD64SARBconst + OpAMD64SHRDQ + OpAMD64SHLDQ + OpAMD64ROLQ + OpAMD64ROLL + OpAMD64ROLW + OpAMD64ROLB + OpAMD64RORQ + OpAMD64RORL + OpAMD64RORW + OpAMD64RORB + OpAMD64ROLQconst + OpAMD64ROLLconst + OpAMD64ROLWconst + OpAMD64ROLBconst + OpAMD64ADDLload + OpAMD64ADDQload + OpAMD64SUBQload + OpAMD64SUBLload + OpAMD64ANDLload + OpAMD64ANDQload + OpAMD64ORQload + OpAMD64ORLload + OpAMD64XORQload + OpAMD64XORLload + OpAMD64ADDLloadidx1 + OpAMD64ADDLloadidx4 + OpAMD64ADDLloadidx8 + OpAMD64ADDQloadidx1 + OpAMD64ADDQloadidx8 + OpAMD64SUBLloadidx1 + OpAMD64SUBLloadidx4 + OpAMD64SUBLloadidx8 + OpAMD64SUBQloadidx1 + OpAMD64SUBQloadidx8 + OpAMD64ANDLloadidx1 + OpAMD64ANDLloadidx4 + OpAMD64ANDLloadidx8 + OpAMD64ANDQloadidx1 + OpAMD64ANDQloadidx8 + OpAMD64ORLloadidx1 + OpAMD64ORLloadidx4 + OpAMD64ORLloadidx8 + OpAMD64ORQloadidx1 + OpAMD64ORQloadidx8 + OpAMD64XORLloadidx1 + OpAMD64XORLloadidx4 + OpAMD64XORLloadidx8 + OpAMD64XORQloadidx1 + OpAMD64XORQloadidx8 + OpAMD64ADDQmodify + OpAMD64SUBQmodify + OpAMD64ANDQmodify + OpAMD64ORQmodify + OpAMD64XORQmodify + OpAMD64ADDLmodify + OpAMD64SUBLmodify + OpAMD64ANDLmodify + OpAMD64ORLmodify + OpAMD64XORLmodify + OpAMD64ADDQmodifyidx1 + OpAMD64ADDQmodifyidx8 + OpAMD64SUBQmodifyidx1 + OpAMD64SUBQmodifyidx8 + OpAMD64ANDQmodifyidx1 + OpAMD64ANDQmodifyidx8 + OpAMD64ORQmodifyidx1 + OpAMD64ORQmodifyidx8 + OpAMD64XORQmodifyidx1 + OpAMD64XORQmodifyidx8 + OpAMD64ADDLmodifyidx1 + OpAMD64ADDLmodifyidx4 + OpAMD64ADDLmodifyidx8 + OpAMD64SUBLmodifyidx1 + OpAMD64SUBLmodifyidx4 + OpAMD64SUBLmodifyidx8 + OpAMD64ANDLmodifyidx1 + OpAMD64ANDLmodifyidx4 + OpAMD64ANDLmodifyidx8 + OpAMD64ORLmodifyidx1 + OpAMD64ORLmodifyidx4 + OpAMD64ORLmodifyidx8 + OpAMD64XORLmodifyidx1 + OpAMD64XORLmodifyidx4 + OpAMD64XORLmodifyidx8 + OpAMD64ADDQconstmodifyidx1 + OpAMD64ADDQconstmodifyidx8 + OpAMD64ANDQconstmodifyidx1 + OpAMD64ANDQconstmodifyidx8 + OpAMD64ORQconstmodifyidx1 + OpAMD64ORQconstmodifyidx8 + OpAMD64XORQconstmodifyidx1 + OpAMD64XORQconstmodifyidx8 + OpAMD64ADDLconstmodifyidx1 + OpAMD64ADDLconstmodifyidx4 + OpAMD64ADDLconstmodifyidx8 + OpAMD64ANDLconstmodifyidx1 + OpAMD64ANDLconstmodifyidx4 + OpAMD64ANDLconstmodifyidx8 + OpAMD64ORLconstmodifyidx1 + OpAMD64ORLconstmodifyidx4 + OpAMD64ORLconstmodifyidx8 + OpAMD64XORLconstmodifyidx1 + OpAMD64XORLconstmodifyidx4 + OpAMD64XORLconstmodifyidx8 + OpAMD64NEGQ + OpAMD64NEGL + OpAMD64NOTQ + OpAMD64NOTL + OpAMD64BSFQ + OpAMD64BSFL + OpAMD64BSRQ + OpAMD64BSRL + OpAMD64CMOVQEQ + OpAMD64CMOVQNE + OpAMD64CMOVQLT + OpAMD64CMOVQGT + OpAMD64CMOVQLE + OpAMD64CMOVQGE + OpAMD64CMOVQLS + OpAMD64CMOVQHI + OpAMD64CMOVQCC + OpAMD64CMOVQCS + OpAMD64CMOVLEQ + OpAMD64CMOVLNE + OpAMD64CMOVLLT + OpAMD64CMOVLGT + OpAMD64CMOVLLE + OpAMD64CMOVLGE + OpAMD64CMOVLLS + OpAMD64CMOVLHI + OpAMD64CMOVLCC + OpAMD64CMOVLCS + OpAMD64CMOVWEQ + OpAMD64CMOVWNE + OpAMD64CMOVWLT + OpAMD64CMOVWGT + OpAMD64CMOVWLE + OpAMD64CMOVWGE + OpAMD64CMOVWLS + OpAMD64CMOVWHI + OpAMD64CMOVWCC + OpAMD64CMOVWCS + OpAMD64CMOVQEQF + OpAMD64CMOVQNEF + OpAMD64CMOVQGTF + OpAMD64CMOVQGEF + OpAMD64CMOVLEQF + OpAMD64CMOVLNEF + OpAMD64CMOVLGTF + OpAMD64CMOVLGEF + OpAMD64CMOVWEQF + OpAMD64CMOVWNEF + OpAMD64CMOVWGTF + OpAMD64CMOVWGEF + OpAMD64BSWAPQ + OpAMD64BSWAPL + OpAMD64POPCNTQ + OpAMD64POPCNTL + OpAMD64SQRTSD + OpAMD64SQRTSS + OpAMD64ROUNDSD + OpAMD64VFMADD231SD + OpAMD64MINSD + OpAMD64MINSS + OpAMD64SBBQcarrymask + OpAMD64SBBLcarrymask + OpAMD64SETEQ + OpAMD64SETNE + OpAMD64SETL + OpAMD64SETLE + OpAMD64SETG + OpAMD64SETGE + OpAMD64SETB + OpAMD64SETBE + OpAMD64SETA + OpAMD64SETAE + OpAMD64SETO + OpAMD64SETEQstore + OpAMD64SETNEstore + OpAMD64SETLstore + OpAMD64SETLEstore + OpAMD64SETGstore + OpAMD64SETGEstore + OpAMD64SETBstore + OpAMD64SETBEstore + OpAMD64SETAstore + OpAMD64SETAEstore + OpAMD64SETEQstoreidx1 + OpAMD64SETNEstoreidx1 + OpAMD64SETLstoreidx1 + OpAMD64SETLEstoreidx1 + OpAMD64SETGstoreidx1 + OpAMD64SETGEstoreidx1 + OpAMD64SETBstoreidx1 + OpAMD64SETBEstoreidx1 + OpAMD64SETAstoreidx1 + OpAMD64SETAEstoreidx1 + OpAMD64SETEQF + OpAMD64SETNEF + OpAMD64SETORD + OpAMD64SETNAN + OpAMD64SETGF + OpAMD64SETGEF + OpAMD64MOVBQSX + OpAMD64MOVBQZX + OpAMD64MOVWQSX + OpAMD64MOVWQZX + OpAMD64MOVLQSX + OpAMD64MOVLQZX + OpAMD64MOVLconst + OpAMD64MOVQconst + OpAMD64CVTTSD2SL + OpAMD64CVTTSD2SQ + OpAMD64CVTTSS2SL + OpAMD64CVTTSS2SQ + OpAMD64CVTSL2SS + OpAMD64CVTSL2SD + OpAMD64CVTSQ2SS + OpAMD64CVTSQ2SD + OpAMD64CVTSD2SS + OpAMD64CVTSS2SD + OpAMD64MOVQi2f + OpAMD64MOVQf2i + OpAMD64MOVLi2f + OpAMD64MOVLf2i + OpAMD64PXOR + OpAMD64POR + OpAMD64LEAQ + OpAMD64LEAL + OpAMD64LEAW + OpAMD64LEAQ1 + OpAMD64LEAL1 + OpAMD64LEAW1 + OpAMD64LEAQ2 + OpAMD64LEAL2 + OpAMD64LEAW2 + OpAMD64LEAQ4 + OpAMD64LEAL4 + OpAMD64LEAW4 + OpAMD64LEAQ8 + OpAMD64LEAL8 + OpAMD64LEAW8 + OpAMD64MOVBload + OpAMD64MOVBQSXload + OpAMD64MOVWload + OpAMD64MOVWQSXload + OpAMD64MOVLload + OpAMD64MOVLQSXload + OpAMD64MOVQload + OpAMD64MOVBstore + OpAMD64MOVWstore + OpAMD64MOVLstore + OpAMD64MOVQstore + OpAMD64MOVOload + OpAMD64MOVOstore + OpAMD64MOVBloadidx1 + OpAMD64MOVWloadidx1 + OpAMD64MOVWloadidx2 + OpAMD64MOVLloadidx1 + OpAMD64MOVLloadidx4 + OpAMD64MOVLloadidx8 + OpAMD64MOVQloadidx1 + OpAMD64MOVQloadidx8 + OpAMD64MOVBstoreidx1 + OpAMD64MOVWstoreidx1 + OpAMD64MOVWstoreidx2 + OpAMD64MOVLstoreidx1 + OpAMD64MOVLstoreidx4 + OpAMD64MOVLstoreidx8 + OpAMD64MOVQstoreidx1 + OpAMD64MOVQstoreidx8 + OpAMD64MOVBstoreconst + OpAMD64MOVWstoreconst + OpAMD64MOVLstoreconst + OpAMD64MOVQstoreconst + OpAMD64MOVOstoreconst + OpAMD64MOVBstoreconstidx1 + OpAMD64MOVWstoreconstidx1 + OpAMD64MOVWstoreconstidx2 + OpAMD64MOVLstoreconstidx1 + OpAMD64MOVLstoreconstidx4 + OpAMD64MOVQstoreconstidx1 + OpAMD64MOVQstoreconstidx8 + OpAMD64DUFFZERO + OpAMD64REPSTOSQ + OpAMD64CALLstatic + OpAMD64CALLtail + OpAMD64CALLclosure + OpAMD64CALLinter + OpAMD64DUFFCOPY + OpAMD64REPMOVSQ + OpAMD64InvertFlags + OpAMD64LoweredGetG + OpAMD64LoweredGetClosurePtr + OpAMD64LoweredGetCallerPC + OpAMD64LoweredGetCallerSP + OpAMD64LoweredNilCheck + OpAMD64LoweredWB + OpAMD64LoweredHasCPUFeature + OpAMD64LoweredPanicBoundsA + OpAMD64LoweredPanicBoundsB + OpAMD64LoweredPanicBoundsC + OpAMD64FlagEQ + OpAMD64FlagLT_ULT + OpAMD64FlagLT_UGT + OpAMD64FlagGT_UGT + OpAMD64FlagGT_ULT + OpAMD64MOVBatomicload + OpAMD64MOVLatomicload + OpAMD64MOVQatomicload + OpAMD64XCHGB + OpAMD64XCHGL + OpAMD64XCHGQ + OpAMD64XADDLlock + OpAMD64XADDQlock + OpAMD64AddTupleFirst32 + OpAMD64AddTupleFirst64 + OpAMD64CMPXCHGLlock + OpAMD64CMPXCHGQlock + OpAMD64ANDBlock + OpAMD64ANDLlock + OpAMD64ORBlock + OpAMD64ORLlock + OpAMD64PrefetchT0 + OpAMD64PrefetchNTA + OpAMD64ANDNQ + OpAMD64ANDNL + OpAMD64BLSIQ + OpAMD64BLSIL + OpAMD64BLSMSKQ + OpAMD64BLSMSKL + OpAMD64BLSRQ + OpAMD64BLSRL + OpAMD64TZCNTQ + OpAMD64TZCNTL + OpAMD64LZCNTQ + OpAMD64LZCNTL + OpAMD64MOVBEWstore + OpAMD64MOVBELload + OpAMD64MOVBELstore + OpAMD64MOVBEQload + OpAMD64MOVBEQstore + OpAMD64MOVBELloadidx1 + OpAMD64MOVBELloadidx4 + OpAMD64MOVBELloadidx8 + OpAMD64MOVBEQloadidx1 + OpAMD64MOVBEQloadidx8 + OpAMD64MOVBEWstoreidx1 + OpAMD64MOVBEWstoreidx2 + OpAMD64MOVBELstoreidx1 + OpAMD64MOVBELstoreidx4 + OpAMD64MOVBELstoreidx8 + OpAMD64MOVBEQstoreidx1 + OpAMD64MOVBEQstoreidx8 + OpAMD64SARXQ + OpAMD64SARXL + OpAMD64SHLXQ + OpAMD64SHLXL + OpAMD64SHRXQ + OpAMD64SHRXL + OpAMD64SARXLload + OpAMD64SARXQload + OpAMD64SHLXLload + OpAMD64SHLXQload + OpAMD64SHRXLload + OpAMD64SHRXQload + OpAMD64SARXLloadidx1 + OpAMD64SARXLloadidx4 + OpAMD64SARXLloadidx8 + OpAMD64SARXQloadidx1 + OpAMD64SARXQloadidx8 + OpAMD64SHLXLloadidx1 + OpAMD64SHLXLloadidx4 + OpAMD64SHLXLloadidx8 + OpAMD64SHLXQloadidx1 + OpAMD64SHLXQloadidx8 + OpAMD64SHRXLloadidx1 + OpAMD64SHRXLloadidx4 + OpAMD64SHRXLloadidx8 + OpAMD64SHRXQloadidx1 + OpAMD64SHRXQloadidx8 + + OpARMADD + OpARMADDconst + OpARMSUB + OpARMSUBconst + OpARMRSB + OpARMRSBconst + OpARMMUL + OpARMHMUL + OpARMHMULU + OpARMCALLudiv + OpARMADDS + OpARMADDSconst + OpARMADC + OpARMADCconst + OpARMSUBS + OpARMSUBSconst + OpARMRSBSconst + OpARMSBC + OpARMSBCconst + OpARMRSCconst + OpARMMULLU + OpARMMULA + OpARMMULS + OpARMADDF + OpARMADDD + OpARMSUBF + OpARMSUBD + OpARMMULF + OpARMMULD + OpARMNMULF + OpARMNMULD + OpARMDIVF + OpARMDIVD + OpARMMULAF + OpARMMULAD + OpARMMULSF + OpARMMULSD + OpARMFMULAD + OpARMAND + OpARMANDconst + OpARMOR + OpARMORconst + OpARMXOR + OpARMXORconst + OpARMBIC + OpARMBICconst + OpARMBFX + OpARMBFXU + OpARMMVN + OpARMNEGF + OpARMNEGD + OpARMSQRTD + OpARMSQRTF + OpARMABSD + OpARMCLZ + OpARMREV + OpARMREV16 + OpARMRBIT + OpARMSLL + OpARMSLLconst + OpARMSRL + OpARMSRLconst + OpARMSRA + OpARMSRAconst + OpARMSRR + OpARMSRRconst + OpARMADDshiftLL + OpARMADDshiftRL + OpARMADDshiftRA + OpARMSUBshiftLL + OpARMSUBshiftRL + OpARMSUBshiftRA + OpARMRSBshiftLL + OpARMRSBshiftRL + OpARMRSBshiftRA + OpARMANDshiftLL + OpARMANDshiftRL + OpARMANDshiftRA + OpARMORshiftLL + OpARMORshiftRL + OpARMORshiftRA + OpARMXORshiftLL + OpARMXORshiftRL + OpARMXORshiftRA + OpARMXORshiftRR + OpARMBICshiftLL + OpARMBICshiftRL + OpARMBICshiftRA + OpARMMVNshiftLL + OpARMMVNshiftRL + OpARMMVNshiftRA + OpARMADCshiftLL + OpARMADCshiftRL + OpARMADCshiftRA + OpARMSBCshiftLL + OpARMSBCshiftRL + OpARMSBCshiftRA + OpARMRSCshiftLL + OpARMRSCshiftRL + OpARMRSCshiftRA + OpARMADDSshiftLL + OpARMADDSshiftRL + OpARMADDSshiftRA + OpARMSUBSshiftLL + OpARMSUBSshiftRL + OpARMSUBSshiftRA + OpARMRSBSshiftLL + OpARMRSBSshiftRL + OpARMRSBSshiftRA + OpARMADDshiftLLreg + OpARMADDshiftRLreg + OpARMADDshiftRAreg + OpARMSUBshiftLLreg + OpARMSUBshiftRLreg + OpARMSUBshiftRAreg + OpARMRSBshiftLLreg + OpARMRSBshiftRLreg + OpARMRSBshiftRAreg + OpARMANDshiftLLreg + OpARMANDshiftRLreg + OpARMANDshiftRAreg + OpARMORshiftLLreg + OpARMORshiftRLreg + OpARMORshiftRAreg + OpARMXORshiftLLreg + OpARMXORshiftRLreg + OpARMXORshiftRAreg + OpARMBICshiftLLreg + OpARMBICshiftRLreg + OpARMBICshiftRAreg + OpARMMVNshiftLLreg + OpARMMVNshiftRLreg + OpARMMVNshiftRAreg + OpARMADCshiftLLreg + OpARMADCshiftRLreg + OpARMADCshiftRAreg + OpARMSBCshiftLLreg + OpARMSBCshiftRLreg + OpARMSBCshiftRAreg + OpARMRSCshiftLLreg + OpARMRSCshiftRLreg + OpARMRSCshiftRAreg + OpARMADDSshiftLLreg + OpARMADDSshiftRLreg + OpARMADDSshiftRAreg + OpARMSUBSshiftLLreg + OpARMSUBSshiftRLreg + OpARMSUBSshiftRAreg + OpARMRSBSshiftLLreg + OpARMRSBSshiftRLreg + OpARMRSBSshiftRAreg + OpARMCMP + OpARMCMPconst + OpARMCMN + OpARMCMNconst + OpARMTST + OpARMTSTconst + OpARMTEQ + OpARMTEQconst + OpARMCMPF + OpARMCMPD + OpARMCMPshiftLL + OpARMCMPshiftRL + OpARMCMPshiftRA + OpARMCMNshiftLL + OpARMCMNshiftRL + OpARMCMNshiftRA + OpARMTSTshiftLL + OpARMTSTshiftRL + OpARMTSTshiftRA + OpARMTEQshiftLL + OpARMTEQshiftRL + OpARMTEQshiftRA + OpARMCMPshiftLLreg + OpARMCMPshiftRLreg + OpARMCMPshiftRAreg + OpARMCMNshiftLLreg + OpARMCMNshiftRLreg + OpARMCMNshiftRAreg + OpARMTSTshiftLLreg + OpARMTSTshiftRLreg + OpARMTSTshiftRAreg + OpARMTEQshiftLLreg + OpARMTEQshiftRLreg + OpARMTEQshiftRAreg + OpARMCMPF0 + OpARMCMPD0 + OpARMMOVWconst + OpARMMOVFconst + OpARMMOVDconst + OpARMMOVWaddr + OpARMMOVBload + OpARMMOVBUload + OpARMMOVHload + OpARMMOVHUload + OpARMMOVWload + OpARMMOVFload + OpARMMOVDload + OpARMMOVBstore + OpARMMOVHstore + OpARMMOVWstore + OpARMMOVFstore + OpARMMOVDstore + OpARMMOVWloadidx + OpARMMOVWloadshiftLL + OpARMMOVWloadshiftRL + OpARMMOVWloadshiftRA + OpARMMOVBUloadidx + OpARMMOVBloadidx + OpARMMOVHUloadidx + OpARMMOVHloadidx + OpARMMOVWstoreidx + OpARMMOVWstoreshiftLL + OpARMMOVWstoreshiftRL + OpARMMOVWstoreshiftRA + OpARMMOVBstoreidx + OpARMMOVHstoreidx + OpARMMOVBreg + OpARMMOVBUreg + OpARMMOVHreg + OpARMMOVHUreg + OpARMMOVWreg + OpARMMOVWnop + OpARMMOVWF + OpARMMOVWD + OpARMMOVWUF + OpARMMOVWUD + OpARMMOVFW + OpARMMOVDW + OpARMMOVFWU + OpARMMOVDWU + OpARMMOVFD + OpARMMOVDF + OpARMCMOVWHSconst + OpARMCMOVWLSconst + OpARMSRAcond + OpARMCALLstatic + OpARMCALLtail + OpARMCALLclosure + OpARMCALLinter + OpARMLoweredNilCheck + OpARMEqual + OpARMNotEqual + OpARMLessThan + OpARMLessEqual + OpARMGreaterThan + OpARMGreaterEqual + OpARMLessThanU + OpARMLessEqualU + OpARMGreaterThanU + OpARMGreaterEqualU + OpARMDUFFZERO + OpARMDUFFCOPY + OpARMLoweredZero + OpARMLoweredMove + OpARMLoweredGetClosurePtr + OpARMLoweredGetCallerSP + OpARMLoweredGetCallerPC + OpARMLoweredPanicBoundsA + OpARMLoweredPanicBoundsB + OpARMLoweredPanicBoundsC + OpARMLoweredPanicExtendA + OpARMLoweredPanicExtendB + OpARMLoweredPanicExtendC + OpARMFlagConstant + OpARMInvertFlags + OpARMLoweredWB + + OpARM64ADCSflags + OpARM64ADCzerocarry + OpARM64ADD + OpARM64ADDconst + OpARM64ADDSconstflags + OpARM64ADDSflags + OpARM64SUB + OpARM64SUBconst + OpARM64SBCSflags + OpARM64SUBSflags + OpARM64MUL + OpARM64MULW + OpARM64MNEG + OpARM64MNEGW + OpARM64MULH + OpARM64UMULH + OpARM64MULL + OpARM64UMULL + OpARM64DIV + OpARM64UDIV + OpARM64DIVW + OpARM64UDIVW + OpARM64MOD + OpARM64UMOD + OpARM64MODW + OpARM64UMODW + OpARM64FADDS + OpARM64FADDD + OpARM64FSUBS + OpARM64FSUBD + OpARM64FMULS + OpARM64FMULD + OpARM64FNMULS + OpARM64FNMULD + OpARM64FDIVS + OpARM64FDIVD + OpARM64AND + OpARM64ANDconst + OpARM64OR + OpARM64ORconst + OpARM64XOR + OpARM64XORconst + OpARM64BIC + OpARM64EON + OpARM64ORN + OpARM64MVN + OpARM64NEG + OpARM64NEGSflags + OpARM64NGCzerocarry + OpARM64FABSD + OpARM64FNEGS + OpARM64FNEGD + OpARM64FSQRTD + OpARM64FSQRTS + OpARM64FMIND + OpARM64FMINS + OpARM64FMAXD + OpARM64FMAXS + OpARM64REV + OpARM64REVW + OpARM64REV16 + OpARM64REV16W + OpARM64RBIT + OpARM64RBITW + OpARM64CLZ + OpARM64CLZW + OpARM64VCNT + OpARM64VUADDLV + OpARM64LoweredRound32F + OpARM64LoweredRound64F + OpARM64FMADDS + OpARM64FMADDD + OpARM64FNMADDS + OpARM64FNMADDD + OpARM64FMSUBS + OpARM64FMSUBD + OpARM64FNMSUBS + OpARM64FNMSUBD + OpARM64MADD + OpARM64MADDW + OpARM64MSUB + OpARM64MSUBW + OpARM64SLL + OpARM64SLLconst + OpARM64SRL + OpARM64SRLconst + OpARM64SRA + OpARM64SRAconst + OpARM64ROR + OpARM64RORW + OpARM64RORconst + OpARM64RORWconst + OpARM64EXTRconst + OpARM64EXTRWconst + OpARM64CMP + OpARM64CMPconst + OpARM64CMPW + OpARM64CMPWconst + OpARM64CMN + OpARM64CMNconst + OpARM64CMNW + OpARM64CMNWconst + OpARM64TST + OpARM64TSTconst + OpARM64TSTW + OpARM64TSTWconst + OpARM64FCMPS + OpARM64FCMPD + OpARM64FCMPS0 + OpARM64FCMPD0 + OpARM64MVNshiftLL + OpARM64MVNshiftRL + OpARM64MVNshiftRA + OpARM64MVNshiftRO + OpARM64NEGshiftLL + OpARM64NEGshiftRL + OpARM64NEGshiftRA + OpARM64ADDshiftLL + OpARM64ADDshiftRL + OpARM64ADDshiftRA + OpARM64SUBshiftLL + OpARM64SUBshiftRL + OpARM64SUBshiftRA + OpARM64ANDshiftLL + OpARM64ANDshiftRL + OpARM64ANDshiftRA + OpARM64ANDshiftRO + OpARM64ORshiftLL + OpARM64ORshiftRL + OpARM64ORshiftRA + OpARM64ORshiftRO + OpARM64XORshiftLL + OpARM64XORshiftRL + OpARM64XORshiftRA + OpARM64XORshiftRO + OpARM64BICshiftLL + OpARM64BICshiftRL + OpARM64BICshiftRA + OpARM64BICshiftRO + OpARM64EONshiftLL + OpARM64EONshiftRL + OpARM64EONshiftRA + OpARM64EONshiftRO + OpARM64ORNshiftLL + OpARM64ORNshiftRL + OpARM64ORNshiftRA + OpARM64ORNshiftRO + OpARM64CMPshiftLL + OpARM64CMPshiftRL + OpARM64CMPshiftRA + OpARM64CMNshiftLL + OpARM64CMNshiftRL + OpARM64CMNshiftRA + OpARM64TSTshiftLL + OpARM64TSTshiftRL + OpARM64TSTshiftRA + OpARM64TSTshiftRO + OpARM64BFI + OpARM64BFXIL + OpARM64SBFIZ + OpARM64SBFX + OpARM64UBFIZ + OpARM64UBFX + OpARM64MOVDconst + OpARM64FMOVSconst + OpARM64FMOVDconst + OpARM64MOVDaddr + OpARM64MOVBload + OpARM64MOVBUload + OpARM64MOVHload + OpARM64MOVHUload + OpARM64MOVWload + OpARM64MOVWUload + OpARM64MOVDload + OpARM64LDP + OpARM64FMOVSload + OpARM64FMOVDload + OpARM64MOVDloadidx + OpARM64MOVWloadidx + OpARM64MOVWUloadidx + OpARM64MOVHloadidx + OpARM64MOVHUloadidx + OpARM64MOVBloadidx + OpARM64MOVBUloadidx + OpARM64FMOVSloadidx + OpARM64FMOVDloadidx + OpARM64MOVHloadidx2 + OpARM64MOVHUloadidx2 + OpARM64MOVWloadidx4 + OpARM64MOVWUloadidx4 + OpARM64MOVDloadidx8 + OpARM64FMOVSloadidx4 + OpARM64FMOVDloadidx8 + OpARM64MOVBstore + OpARM64MOVHstore + OpARM64MOVWstore + OpARM64MOVDstore + OpARM64STP + OpARM64FMOVSstore + OpARM64FMOVDstore + OpARM64MOVBstoreidx + OpARM64MOVHstoreidx + OpARM64MOVWstoreidx + OpARM64MOVDstoreidx + OpARM64FMOVSstoreidx + OpARM64FMOVDstoreidx + OpARM64MOVHstoreidx2 + OpARM64MOVWstoreidx4 + OpARM64MOVDstoreidx8 + OpARM64FMOVSstoreidx4 + OpARM64FMOVDstoreidx8 + OpARM64MOVBstorezero + OpARM64MOVHstorezero + OpARM64MOVWstorezero + OpARM64MOVDstorezero + OpARM64MOVQstorezero + OpARM64MOVBstorezeroidx + OpARM64MOVHstorezeroidx + OpARM64MOVWstorezeroidx + OpARM64MOVDstorezeroidx + OpARM64MOVHstorezeroidx2 + OpARM64MOVWstorezeroidx4 + OpARM64MOVDstorezeroidx8 + OpARM64FMOVDgpfp + OpARM64FMOVDfpgp + OpARM64FMOVSgpfp + OpARM64FMOVSfpgp + OpARM64MOVBreg + OpARM64MOVBUreg + OpARM64MOVHreg + OpARM64MOVHUreg + OpARM64MOVWreg + OpARM64MOVWUreg + OpARM64MOVDreg + OpARM64MOVDnop + OpARM64SCVTFWS + OpARM64SCVTFWD + OpARM64UCVTFWS + OpARM64UCVTFWD + OpARM64SCVTFS + OpARM64SCVTFD + OpARM64UCVTFS + OpARM64UCVTFD + OpARM64FCVTZSSW + OpARM64FCVTZSDW + OpARM64FCVTZUSW + OpARM64FCVTZUDW + OpARM64FCVTZSS + OpARM64FCVTZSD + OpARM64FCVTZUS + OpARM64FCVTZUD + OpARM64FCVTSD + OpARM64FCVTDS + OpARM64FRINTAD + OpARM64FRINTMD + OpARM64FRINTND + OpARM64FRINTPD + OpARM64FRINTZD + OpARM64CSEL + OpARM64CSEL0 + OpARM64CSINC + OpARM64CSINV + OpARM64CSNEG + OpARM64CSETM + OpARM64CALLstatic + OpARM64CALLtail + OpARM64CALLclosure + OpARM64CALLinter + OpARM64LoweredNilCheck + OpARM64Equal + OpARM64NotEqual + OpARM64LessThan + OpARM64LessEqual + OpARM64GreaterThan + OpARM64GreaterEqual + OpARM64LessThanU + OpARM64LessEqualU + OpARM64GreaterThanU + OpARM64GreaterEqualU + OpARM64LessThanF + OpARM64LessEqualF + OpARM64GreaterThanF + OpARM64GreaterEqualF + OpARM64NotLessThanF + OpARM64NotLessEqualF + OpARM64NotGreaterThanF + OpARM64NotGreaterEqualF + OpARM64LessThanNoov + OpARM64GreaterEqualNoov + OpARM64DUFFZERO + OpARM64LoweredZero + OpARM64DUFFCOPY + OpARM64LoweredMove + OpARM64LoweredGetClosurePtr + OpARM64LoweredGetCallerSP + OpARM64LoweredGetCallerPC + OpARM64FlagConstant + OpARM64InvertFlags + OpARM64LDAR + OpARM64LDARB + OpARM64LDARW + OpARM64STLRB + OpARM64STLR + OpARM64STLRW + OpARM64LoweredAtomicExchange64 + OpARM64LoweredAtomicExchange32 + OpARM64LoweredAtomicExchange64Variant + OpARM64LoweredAtomicExchange32Variant + OpARM64LoweredAtomicAdd64 + OpARM64LoweredAtomicAdd32 + OpARM64LoweredAtomicAdd64Variant + OpARM64LoweredAtomicAdd32Variant + OpARM64LoweredAtomicCas64 + OpARM64LoweredAtomicCas32 + OpARM64LoweredAtomicCas64Variant + OpARM64LoweredAtomicCas32Variant + OpARM64LoweredAtomicAnd8 + OpARM64LoweredAtomicAnd32 + OpARM64LoweredAtomicOr8 + OpARM64LoweredAtomicOr32 + OpARM64LoweredAtomicAnd8Variant + OpARM64LoweredAtomicAnd32Variant + OpARM64LoweredAtomicOr8Variant + OpARM64LoweredAtomicOr32Variant + OpARM64LoweredWB + OpARM64LoweredPanicBoundsA + OpARM64LoweredPanicBoundsB + OpARM64LoweredPanicBoundsC + OpARM64PRFM + OpARM64DMB + + OpLOONG64ADDV + OpLOONG64ADDVconst + OpLOONG64SUBV + OpLOONG64SUBVconst + OpLOONG64MULV + OpLOONG64MULHV + OpLOONG64MULHVU + OpLOONG64DIVV + OpLOONG64DIVVU + OpLOONG64REMV + OpLOONG64REMVU + OpLOONG64ADDF + OpLOONG64ADDD + OpLOONG64SUBF + OpLOONG64SUBD + OpLOONG64MULF + OpLOONG64MULD + OpLOONG64DIVF + OpLOONG64DIVD + OpLOONG64AND + OpLOONG64ANDconst + OpLOONG64OR + OpLOONG64ORconst + OpLOONG64XOR + OpLOONG64XORconst + OpLOONG64NOR + OpLOONG64NORconst + OpLOONG64NEGV + OpLOONG64NEGF + OpLOONG64NEGD + OpLOONG64SQRTD + OpLOONG64SQRTF + OpLOONG64MASKEQZ + OpLOONG64MASKNEZ + OpLOONG64SLLV + OpLOONG64SLLVconst + OpLOONG64SRLV + OpLOONG64SRLVconst + OpLOONG64SRAV + OpLOONG64SRAVconst + OpLOONG64ROTR + OpLOONG64ROTRV + OpLOONG64ROTRconst + OpLOONG64ROTRVconst + OpLOONG64SGT + OpLOONG64SGTconst + OpLOONG64SGTU + OpLOONG64SGTUconst + OpLOONG64CMPEQF + OpLOONG64CMPEQD + OpLOONG64CMPGEF + OpLOONG64CMPGED + OpLOONG64CMPGTF + OpLOONG64CMPGTD + OpLOONG64MOVVconst + OpLOONG64MOVFconst + OpLOONG64MOVDconst + OpLOONG64MOVVaddr + OpLOONG64MOVBload + OpLOONG64MOVBUload + OpLOONG64MOVHload + OpLOONG64MOVHUload + OpLOONG64MOVWload + OpLOONG64MOVWUload + OpLOONG64MOVVload + OpLOONG64MOVFload + OpLOONG64MOVDload + OpLOONG64MOVBstore + OpLOONG64MOVHstore + OpLOONG64MOVWstore + OpLOONG64MOVVstore + OpLOONG64MOVFstore + OpLOONG64MOVDstore + OpLOONG64MOVBstorezero + OpLOONG64MOVHstorezero + OpLOONG64MOVWstorezero + OpLOONG64MOVVstorezero + OpLOONG64MOVBreg + OpLOONG64MOVBUreg + OpLOONG64MOVHreg + OpLOONG64MOVHUreg + OpLOONG64MOVWreg + OpLOONG64MOVWUreg + OpLOONG64MOVVreg + OpLOONG64MOVVnop + OpLOONG64MOVWF + OpLOONG64MOVWD + OpLOONG64MOVVF + OpLOONG64MOVVD + OpLOONG64TRUNCFW + OpLOONG64TRUNCDW + OpLOONG64TRUNCFV + OpLOONG64TRUNCDV + OpLOONG64MOVFD + OpLOONG64MOVDF + OpLOONG64CALLstatic + OpLOONG64CALLtail + OpLOONG64CALLclosure + OpLOONG64CALLinter + OpLOONG64DUFFZERO + OpLOONG64DUFFCOPY + OpLOONG64LoweredZero + OpLOONG64LoweredMove + OpLOONG64LoweredAtomicLoad8 + OpLOONG64LoweredAtomicLoad32 + OpLOONG64LoweredAtomicLoad64 + OpLOONG64LoweredAtomicStore8 + OpLOONG64LoweredAtomicStore32 + OpLOONG64LoweredAtomicStore64 + OpLOONG64LoweredAtomicStorezero32 + OpLOONG64LoweredAtomicStorezero64 + OpLOONG64LoweredAtomicExchange32 + OpLOONG64LoweredAtomicExchange64 + OpLOONG64LoweredAtomicAdd32 + OpLOONG64LoweredAtomicAdd64 + OpLOONG64LoweredAtomicAddconst32 + OpLOONG64LoweredAtomicAddconst64 + OpLOONG64LoweredAtomicCas32 + OpLOONG64LoweredAtomicCas64 + OpLOONG64LoweredNilCheck + OpLOONG64FPFlagTrue + OpLOONG64FPFlagFalse + OpLOONG64LoweredGetClosurePtr + OpLOONG64LoweredGetCallerSP + OpLOONG64LoweredGetCallerPC + OpLOONG64LoweredWB + OpLOONG64LoweredPanicBoundsA + OpLOONG64LoweredPanicBoundsB + OpLOONG64LoweredPanicBoundsC + + OpMIPSADD + OpMIPSADDconst + OpMIPSSUB + OpMIPSSUBconst + OpMIPSMUL + OpMIPSMULT + OpMIPSMULTU + OpMIPSDIV + OpMIPSDIVU + OpMIPSADDF + OpMIPSADDD + OpMIPSSUBF + OpMIPSSUBD + OpMIPSMULF + OpMIPSMULD + OpMIPSDIVF + OpMIPSDIVD + OpMIPSAND + OpMIPSANDconst + OpMIPSOR + OpMIPSORconst + OpMIPSXOR + OpMIPSXORconst + OpMIPSNOR + OpMIPSNORconst + OpMIPSNEG + OpMIPSNEGF + OpMIPSNEGD + OpMIPSABSD + OpMIPSSQRTD + OpMIPSSQRTF + OpMIPSSLL + OpMIPSSLLconst + OpMIPSSRL + OpMIPSSRLconst + OpMIPSSRA + OpMIPSSRAconst + OpMIPSCLZ + OpMIPSSGT + OpMIPSSGTconst + OpMIPSSGTzero + OpMIPSSGTU + OpMIPSSGTUconst + OpMIPSSGTUzero + OpMIPSCMPEQF + OpMIPSCMPEQD + OpMIPSCMPGEF + OpMIPSCMPGED + OpMIPSCMPGTF + OpMIPSCMPGTD + OpMIPSMOVWconst + OpMIPSMOVFconst + OpMIPSMOVDconst + OpMIPSMOVWaddr + OpMIPSMOVBload + OpMIPSMOVBUload + OpMIPSMOVHload + OpMIPSMOVHUload + OpMIPSMOVWload + OpMIPSMOVFload + OpMIPSMOVDload + OpMIPSMOVBstore + OpMIPSMOVHstore + OpMIPSMOVWstore + OpMIPSMOVFstore + OpMIPSMOVDstore + OpMIPSMOVBstorezero + OpMIPSMOVHstorezero + OpMIPSMOVWstorezero + OpMIPSMOVWfpgp + OpMIPSMOVWgpfp + OpMIPSMOVBreg + OpMIPSMOVBUreg + OpMIPSMOVHreg + OpMIPSMOVHUreg + OpMIPSMOVWreg + OpMIPSMOVWnop + OpMIPSCMOVZ + OpMIPSCMOVZzero + OpMIPSMOVWF + OpMIPSMOVWD + OpMIPSTRUNCFW + OpMIPSTRUNCDW + OpMIPSMOVFD + OpMIPSMOVDF + OpMIPSCALLstatic + OpMIPSCALLtail + OpMIPSCALLclosure + OpMIPSCALLinter + OpMIPSLoweredAtomicLoad8 + OpMIPSLoweredAtomicLoad32 + OpMIPSLoweredAtomicStore8 + OpMIPSLoweredAtomicStore32 + OpMIPSLoweredAtomicStorezero + OpMIPSLoweredAtomicExchange + OpMIPSLoweredAtomicAdd + OpMIPSLoweredAtomicAddconst + OpMIPSLoweredAtomicCas + OpMIPSLoweredAtomicAnd + OpMIPSLoweredAtomicOr + OpMIPSLoweredZero + OpMIPSLoweredMove + OpMIPSLoweredNilCheck + OpMIPSFPFlagTrue + OpMIPSFPFlagFalse + OpMIPSLoweredGetClosurePtr + OpMIPSLoweredGetCallerSP + OpMIPSLoweredGetCallerPC + OpMIPSLoweredWB + OpMIPSLoweredPanicBoundsA + OpMIPSLoweredPanicBoundsB + OpMIPSLoweredPanicBoundsC + OpMIPSLoweredPanicExtendA + OpMIPSLoweredPanicExtendB + OpMIPSLoweredPanicExtendC + + OpMIPS64ADDV + OpMIPS64ADDVconst + OpMIPS64SUBV + OpMIPS64SUBVconst + OpMIPS64MULV + OpMIPS64MULVU + OpMIPS64DIVV + OpMIPS64DIVVU + OpMIPS64ADDF + OpMIPS64ADDD + OpMIPS64SUBF + OpMIPS64SUBD + OpMIPS64MULF + OpMIPS64MULD + OpMIPS64DIVF + OpMIPS64DIVD + OpMIPS64AND + OpMIPS64ANDconst + OpMIPS64OR + OpMIPS64ORconst + OpMIPS64XOR + OpMIPS64XORconst + OpMIPS64NOR + OpMIPS64NORconst + OpMIPS64NEGV + OpMIPS64NEGF + OpMIPS64NEGD + OpMIPS64ABSD + OpMIPS64SQRTD + OpMIPS64SQRTF + OpMIPS64SLLV + OpMIPS64SLLVconst + OpMIPS64SRLV + OpMIPS64SRLVconst + OpMIPS64SRAV + OpMIPS64SRAVconst + OpMIPS64SGT + OpMIPS64SGTconst + OpMIPS64SGTU + OpMIPS64SGTUconst + OpMIPS64CMPEQF + OpMIPS64CMPEQD + OpMIPS64CMPGEF + OpMIPS64CMPGED + OpMIPS64CMPGTF + OpMIPS64CMPGTD + OpMIPS64MOVVconst + OpMIPS64MOVFconst + OpMIPS64MOVDconst + OpMIPS64MOVVaddr + OpMIPS64MOVBload + OpMIPS64MOVBUload + OpMIPS64MOVHload + OpMIPS64MOVHUload + OpMIPS64MOVWload + OpMIPS64MOVWUload + OpMIPS64MOVVload + OpMIPS64MOVFload + OpMIPS64MOVDload + OpMIPS64MOVBstore + OpMIPS64MOVHstore + OpMIPS64MOVWstore + OpMIPS64MOVVstore + OpMIPS64MOVFstore + OpMIPS64MOVDstore + OpMIPS64MOVBstorezero + OpMIPS64MOVHstorezero + OpMIPS64MOVWstorezero + OpMIPS64MOVVstorezero + OpMIPS64MOVWfpgp + OpMIPS64MOVWgpfp + OpMIPS64MOVVfpgp + OpMIPS64MOVVgpfp + OpMIPS64MOVBreg + OpMIPS64MOVBUreg + OpMIPS64MOVHreg + OpMIPS64MOVHUreg + OpMIPS64MOVWreg + OpMIPS64MOVWUreg + OpMIPS64MOVVreg + OpMIPS64MOVVnop + OpMIPS64MOVWF + OpMIPS64MOVWD + OpMIPS64MOVVF + OpMIPS64MOVVD + OpMIPS64TRUNCFW + OpMIPS64TRUNCDW + OpMIPS64TRUNCFV + OpMIPS64TRUNCDV + OpMIPS64MOVFD + OpMIPS64MOVDF + OpMIPS64CALLstatic + OpMIPS64CALLtail + OpMIPS64CALLclosure + OpMIPS64CALLinter + OpMIPS64DUFFZERO + OpMIPS64DUFFCOPY + OpMIPS64LoweredZero + OpMIPS64LoweredMove + OpMIPS64LoweredAtomicAnd32 + OpMIPS64LoweredAtomicOr32 + OpMIPS64LoweredAtomicLoad8 + OpMIPS64LoweredAtomicLoad32 + OpMIPS64LoweredAtomicLoad64 + OpMIPS64LoweredAtomicStore8 + OpMIPS64LoweredAtomicStore32 + OpMIPS64LoweredAtomicStore64 + OpMIPS64LoweredAtomicStorezero32 + OpMIPS64LoweredAtomicStorezero64 + OpMIPS64LoweredAtomicExchange32 + OpMIPS64LoweredAtomicExchange64 + OpMIPS64LoweredAtomicAdd32 + OpMIPS64LoweredAtomicAdd64 + OpMIPS64LoweredAtomicAddconst32 + OpMIPS64LoweredAtomicAddconst64 + OpMIPS64LoweredAtomicCas32 + OpMIPS64LoweredAtomicCas64 + OpMIPS64LoweredNilCheck + OpMIPS64FPFlagTrue + OpMIPS64FPFlagFalse + OpMIPS64LoweredGetClosurePtr + OpMIPS64LoweredGetCallerSP + OpMIPS64LoweredGetCallerPC + OpMIPS64LoweredWB + OpMIPS64LoweredPanicBoundsA + OpMIPS64LoweredPanicBoundsB + OpMIPS64LoweredPanicBoundsC + + OpPPC64ADD + OpPPC64ADDCC + OpPPC64ADDconst + OpPPC64ADDCCconst + OpPPC64FADD + OpPPC64FADDS + OpPPC64SUB + OpPPC64SUBCC + OpPPC64SUBFCconst + OpPPC64FSUB + OpPPC64FSUBS + OpPPC64MULLD + OpPPC64MULLW + OpPPC64MULLDconst + OpPPC64MULLWconst + OpPPC64MADDLD + OpPPC64MULHD + OpPPC64MULHW + OpPPC64MULHDU + OpPPC64MULHWU + OpPPC64FMUL + OpPPC64FMULS + OpPPC64FMADD + OpPPC64FMADDS + OpPPC64FMSUB + OpPPC64FMSUBS + OpPPC64SRAD + OpPPC64SRAW + OpPPC64SRD + OpPPC64SRW + OpPPC64SLD + OpPPC64SLW + OpPPC64ROTL + OpPPC64ROTLW + OpPPC64CLRLSLWI + OpPPC64CLRLSLDI + OpPPC64ADDC + OpPPC64SUBC + OpPPC64ADDCconst + OpPPC64SUBCconst + OpPPC64ADDE + OpPPC64SUBE + OpPPC64ADDZEzero + OpPPC64SUBZEzero + OpPPC64SRADconst + OpPPC64SRAWconst + OpPPC64SRDconst + OpPPC64SRWconst + OpPPC64SLDconst + OpPPC64SLWconst + OpPPC64ROTLconst + OpPPC64ROTLWconst + OpPPC64EXTSWSLconst + OpPPC64RLWINM + OpPPC64RLWNM + OpPPC64RLWMI + OpPPC64RLDICL + OpPPC64RLDICR + OpPPC64CNTLZD + OpPPC64CNTLZDCC + OpPPC64CNTLZW + OpPPC64CNTTZD + OpPPC64CNTTZW + OpPPC64POPCNTD + OpPPC64POPCNTW + OpPPC64POPCNTB + OpPPC64FDIV + OpPPC64FDIVS + OpPPC64DIVD + OpPPC64DIVW + OpPPC64DIVDU + OpPPC64DIVWU + OpPPC64MODUD + OpPPC64MODSD + OpPPC64MODUW + OpPPC64MODSW + OpPPC64FCTIDZ + OpPPC64FCTIWZ + OpPPC64FCFID + OpPPC64FCFIDS + OpPPC64FRSP + OpPPC64MFVSRD + OpPPC64MTVSRD + OpPPC64AND + OpPPC64ANDN + OpPPC64ANDNCC + OpPPC64ANDCC + OpPPC64OR + OpPPC64ORN + OpPPC64ORCC + OpPPC64NOR + OpPPC64NORCC + OpPPC64XOR + OpPPC64XORCC + OpPPC64EQV + OpPPC64NEG + OpPPC64NEGCC + OpPPC64BRD + OpPPC64BRW + OpPPC64BRH + OpPPC64FNEG + OpPPC64FSQRT + OpPPC64FSQRTS + OpPPC64FFLOOR + OpPPC64FCEIL + OpPPC64FTRUNC + OpPPC64FROUND + OpPPC64FABS + OpPPC64FNABS + OpPPC64FCPSGN + OpPPC64ORconst + OpPPC64XORconst + OpPPC64ANDCCconst + OpPPC64MOVBreg + OpPPC64MOVBZreg + OpPPC64MOVHreg + OpPPC64MOVHZreg + OpPPC64MOVWreg + OpPPC64MOVWZreg + OpPPC64MOVBZload + OpPPC64MOVHload + OpPPC64MOVHZload + OpPPC64MOVWload + OpPPC64MOVWZload + OpPPC64MOVDload + OpPPC64MOVDBRload + OpPPC64MOVWBRload + OpPPC64MOVHBRload + OpPPC64MOVBZloadidx + OpPPC64MOVHloadidx + OpPPC64MOVHZloadidx + OpPPC64MOVWloadidx + OpPPC64MOVWZloadidx + OpPPC64MOVDloadidx + OpPPC64MOVHBRloadidx + OpPPC64MOVWBRloadidx + OpPPC64MOVDBRloadidx + OpPPC64FMOVDloadidx + OpPPC64FMOVSloadidx + OpPPC64DCBT + OpPPC64MOVDBRstore + OpPPC64MOVWBRstore + OpPPC64MOVHBRstore + OpPPC64FMOVDload + OpPPC64FMOVSload + OpPPC64MOVBstore + OpPPC64MOVHstore + OpPPC64MOVWstore + OpPPC64MOVDstore + OpPPC64FMOVDstore + OpPPC64FMOVSstore + OpPPC64MOVBstoreidx + OpPPC64MOVHstoreidx + OpPPC64MOVWstoreidx + OpPPC64MOVDstoreidx + OpPPC64FMOVDstoreidx + OpPPC64FMOVSstoreidx + OpPPC64MOVHBRstoreidx + OpPPC64MOVWBRstoreidx + OpPPC64MOVDBRstoreidx + OpPPC64MOVBstorezero + OpPPC64MOVHstorezero + OpPPC64MOVWstorezero + OpPPC64MOVDstorezero + OpPPC64MOVDaddr + OpPPC64MOVDconst + OpPPC64FMOVDconst + OpPPC64FMOVSconst + OpPPC64FCMPU + OpPPC64CMP + OpPPC64CMPU + OpPPC64CMPW + OpPPC64CMPWU + OpPPC64CMPconst + OpPPC64CMPUconst + OpPPC64CMPWconst + OpPPC64CMPWUconst + OpPPC64ISEL + OpPPC64ISELZ + OpPPC64SETBC + OpPPC64SETBCR + OpPPC64Equal + OpPPC64NotEqual + OpPPC64LessThan + OpPPC64FLessThan + OpPPC64LessEqual + OpPPC64FLessEqual + OpPPC64GreaterThan + OpPPC64FGreaterThan + OpPPC64GreaterEqual + OpPPC64FGreaterEqual + OpPPC64LoweredGetClosurePtr + OpPPC64LoweredGetCallerSP + OpPPC64LoweredGetCallerPC + OpPPC64LoweredNilCheck + OpPPC64LoweredRound32F + OpPPC64LoweredRound64F + OpPPC64CALLstatic + OpPPC64CALLtail + OpPPC64CALLclosure + OpPPC64CALLinter + OpPPC64LoweredZero + OpPPC64LoweredZeroShort + OpPPC64LoweredQuadZeroShort + OpPPC64LoweredQuadZero + OpPPC64LoweredMove + OpPPC64LoweredMoveShort + OpPPC64LoweredQuadMove + OpPPC64LoweredQuadMoveShort + OpPPC64LoweredAtomicStore8 + OpPPC64LoweredAtomicStore32 + OpPPC64LoweredAtomicStore64 + OpPPC64LoweredAtomicLoad8 + OpPPC64LoweredAtomicLoad32 + OpPPC64LoweredAtomicLoad64 + OpPPC64LoweredAtomicLoadPtr + OpPPC64LoweredAtomicAdd32 + OpPPC64LoweredAtomicAdd64 + OpPPC64LoweredAtomicExchange32 + OpPPC64LoweredAtomicExchange64 + OpPPC64LoweredAtomicCas64 + OpPPC64LoweredAtomicCas32 + OpPPC64LoweredAtomicAnd8 + OpPPC64LoweredAtomicAnd32 + OpPPC64LoweredAtomicOr8 + OpPPC64LoweredAtomicOr32 + OpPPC64LoweredWB + OpPPC64LoweredPubBarrier + OpPPC64LoweredPanicBoundsA + OpPPC64LoweredPanicBoundsB + OpPPC64LoweredPanicBoundsC + OpPPC64InvertFlags + OpPPC64FlagEQ + OpPPC64FlagLT + OpPPC64FlagGT + + OpRISCV64ADD + OpRISCV64ADDI + OpRISCV64ADDIW + OpRISCV64NEG + OpRISCV64NEGW + OpRISCV64SUB + OpRISCV64SUBW + OpRISCV64MUL + OpRISCV64MULW + OpRISCV64MULH + OpRISCV64MULHU + OpRISCV64LoweredMuluhilo + OpRISCV64LoweredMuluover + OpRISCV64DIV + OpRISCV64DIVU + OpRISCV64DIVW + OpRISCV64DIVUW + OpRISCV64REM + OpRISCV64REMU + OpRISCV64REMW + OpRISCV64REMUW + OpRISCV64MOVaddr + OpRISCV64MOVDconst + OpRISCV64MOVBload + OpRISCV64MOVHload + OpRISCV64MOVWload + OpRISCV64MOVDload + OpRISCV64MOVBUload + OpRISCV64MOVHUload + OpRISCV64MOVWUload + OpRISCV64MOVBstore + OpRISCV64MOVHstore + OpRISCV64MOVWstore + OpRISCV64MOVDstore + OpRISCV64MOVBstorezero + OpRISCV64MOVHstorezero + OpRISCV64MOVWstorezero + OpRISCV64MOVDstorezero + OpRISCV64MOVBreg + OpRISCV64MOVHreg + OpRISCV64MOVWreg + OpRISCV64MOVDreg + OpRISCV64MOVBUreg + OpRISCV64MOVHUreg + OpRISCV64MOVWUreg + OpRISCV64MOVDnop + OpRISCV64SLL + OpRISCV64SRA + OpRISCV64SRAW + OpRISCV64SRL + OpRISCV64SRLW + OpRISCV64SLLI + OpRISCV64SRAI + OpRISCV64SRAIW + OpRISCV64SRLI + OpRISCV64SRLIW + OpRISCV64XOR + OpRISCV64XORI + OpRISCV64OR + OpRISCV64ORI + OpRISCV64AND + OpRISCV64ANDI + OpRISCV64NOT + OpRISCV64SEQZ + OpRISCV64SNEZ + OpRISCV64SLT + OpRISCV64SLTI + OpRISCV64SLTU + OpRISCV64SLTIU + OpRISCV64LoweredRound32F + OpRISCV64LoweredRound64F + OpRISCV64CALLstatic + OpRISCV64CALLtail + OpRISCV64CALLclosure + OpRISCV64CALLinter + OpRISCV64DUFFZERO + OpRISCV64DUFFCOPY + OpRISCV64LoweredZero + OpRISCV64LoweredMove + OpRISCV64LoweredAtomicLoad8 + OpRISCV64LoweredAtomicLoad32 + OpRISCV64LoweredAtomicLoad64 + OpRISCV64LoweredAtomicStore8 + OpRISCV64LoweredAtomicStore32 + OpRISCV64LoweredAtomicStore64 + OpRISCV64LoweredAtomicExchange32 + OpRISCV64LoweredAtomicExchange64 + OpRISCV64LoweredAtomicAdd32 + OpRISCV64LoweredAtomicAdd64 + OpRISCV64LoweredAtomicCas32 + OpRISCV64LoweredAtomicCas64 + OpRISCV64LoweredAtomicAnd32 + OpRISCV64LoweredAtomicOr32 + OpRISCV64LoweredNilCheck + OpRISCV64LoweredGetClosurePtr + OpRISCV64LoweredGetCallerSP + OpRISCV64LoweredGetCallerPC + OpRISCV64LoweredWB + OpRISCV64LoweredPubBarrier + OpRISCV64LoweredPanicBoundsA + OpRISCV64LoweredPanicBoundsB + OpRISCV64LoweredPanicBoundsC + OpRISCV64FADDS + OpRISCV64FSUBS + OpRISCV64FMULS + OpRISCV64FDIVS + OpRISCV64FMADDS + OpRISCV64FMSUBS + OpRISCV64FNMADDS + OpRISCV64FNMSUBS + OpRISCV64FSQRTS + OpRISCV64FNEGS + OpRISCV64FMVSX + OpRISCV64FCVTSW + OpRISCV64FCVTSL + OpRISCV64FCVTWS + OpRISCV64FCVTLS + OpRISCV64FMOVWload + OpRISCV64FMOVWstore + OpRISCV64FEQS + OpRISCV64FNES + OpRISCV64FLTS + OpRISCV64FLES + OpRISCV64FADDD + OpRISCV64FSUBD + OpRISCV64FMULD + OpRISCV64FDIVD + OpRISCV64FMADDD + OpRISCV64FMSUBD + OpRISCV64FNMADDD + OpRISCV64FNMSUBD + OpRISCV64FSQRTD + OpRISCV64FNEGD + OpRISCV64FABSD + OpRISCV64FSGNJD + OpRISCV64FMVDX + OpRISCV64FCVTDW + OpRISCV64FCVTDL + OpRISCV64FCVTWD + OpRISCV64FCVTLD + OpRISCV64FCVTDS + OpRISCV64FCVTSD + OpRISCV64FMOVDload + OpRISCV64FMOVDstore + OpRISCV64FEQD + OpRISCV64FNED + OpRISCV64FLTD + OpRISCV64FLED + + OpS390XFADDS + OpS390XFADD + OpS390XFSUBS + OpS390XFSUB + OpS390XFMULS + OpS390XFMUL + OpS390XFDIVS + OpS390XFDIV + OpS390XFNEGS + OpS390XFNEG + OpS390XFMADDS + OpS390XFMADD + OpS390XFMSUBS + OpS390XFMSUB + OpS390XLPDFR + OpS390XLNDFR + OpS390XCPSDR + OpS390XFIDBR + OpS390XFMOVSload + OpS390XFMOVDload + OpS390XFMOVSconst + OpS390XFMOVDconst + OpS390XFMOVSloadidx + OpS390XFMOVDloadidx + OpS390XFMOVSstore + OpS390XFMOVDstore + OpS390XFMOVSstoreidx + OpS390XFMOVDstoreidx + OpS390XADD + OpS390XADDW + OpS390XADDconst + OpS390XADDWconst + OpS390XADDload + OpS390XADDWload + OpS390XSUB + OpS390XSUBW + OpS390XSUBconst + OpS390XSUBWconst + OpS390XSUBload + OpS390XSUBWload + OpS390XMULLD + OpS390XMULLW + OpS390XMULLDconst + OpS390XMULLWconst + OpS390XMULLDload + OpS390XMULLWload + OpS390XMULHD + OpS390XMULHDU + OpS390XDIVD + OpS390XDIVW + OpS390XDIVDU + OpS390XDIVWU + OpS390XMODD + OpS390XMODW + OpS390XMODDU + OpS390XMODWU + OpS390XAND + OpS390XANDW + OpS390XANDconst + OpS390XANDWconst + OpS390XANDload + OpS390XANDWload + OpS390XOR + OpS390XORW + OpS390XORconst + OpS390XORWconst + OpS390XORload + OpS390XORWload + OpS390XXOR + OpS390XXORW + OpS390XXORconst + OpS390XXORWconst + OpS390XXORload + OpS390XXORWload + OpS390XADDC + OpS390XADDCconst + OpS390XADDE + OpS390XSUBC + OpS390XSUBE + OpS390XCMP + OpS390XCMPW + OpS390XCMPU + OpS390XCMPWU + OpS390XCMPconst + OpS390XCMPWconst + OpS390XCMPUconst + OpS390XCMPWUconst + OpS390XFCMPS + OpS390XFCMP + OpS390XLTDBR + OpS390XLTEBR + OpS390XSLD + OpS390XSLW + OpS390XSLDconst + OpS390XSLWconst + OpS390XSRD + OpS390XSRW + OpS390XSRDconst + OpS390XSRWconst + OpS390XSRAD + OpS390XSRAW + OpS390XSRADconst + OpS390XSRAWconst + OpS390XRLLG + OpS390XRLL + OpS390XRLLconst + OpS390XRXSBG + OpS390XRISBGZ + OpS390XNEG + OpS390XNEGW + OpS390XNOT + OpS390XNOTW + OpS390XFSQRT + OpS390XFSQRTS + OpS390XLOCGR + OpS390XMOVBreg + OpS390XMOVBZreg + OpS390XMOVHreg + OpS390XMOVHZreg + OpS390XMOVWreg + OpS390XMOVWZreg + OpS390XMOVDconst + OpS390XLDGR + OpS390XLGDR + OpS390XCFDBRA + OpS390XCGDBRA + OpS390XCFEBRA + OpS390XCGEBRA + OpS390XCEFBRA + OpS390XCDFBRA + OpS390XCEGBRA + OpS390XCDGBRA + OpS390XCLFEBR + OpS390XCLFDBR + OpS390XCLGEBR + OpS390XCLGDBR + OpS390XCELFBR + OpS390XCDLFBR + OpS390XCELGBR + OpS390XCDLGBR + OpS390XLEDBR + OpS390XLDEBR + OpS390XMOVDaddr + OpS390XMOVDaddridx + OpS390XMOVBZload + OpS390XMOVBload + OpS390XMOVHZload + OpS390XMOVHload + OpS390XMOVWZload + OpS390XMOVWload + OpS390XMOVDload + OpS390XMOVWBR + OpS390XMOVDBR + OpS390XMOVHBRload + OpS390XMOVWBRload + OpS390XMOVDBRload + OpS390XMOVBstore + OpS390XMOVHstore + OpS390XMOVWstore + OpS390XMOVDstore + OpS390XMOVHBRstore + OpS390XMOVWBRstore + OpS390XMOVDBRstore + OpS390XMVC + OpS390XMOVBZloadidx + OpS390XMOVBloadidx + OpS390XMOVHZloadidx + OpS390XMOVHloadidx + OpS390XMOVWZloadidx + OpS390XMOVWloadidx + OpS390XMOVDloadidx + OpS390XMOVHBRloadidx + OpS390XMOVWBRloadidx + OpS390XMOVDBRloadidx + OpS390XMOVBstoreidx + OpS390XMOVHstoreidx + OpS390XMOVWstoreidx + OpS390XMOVDstoreidx + OpS390XMOVHBRstoreidx + OpS390XMOVWBRstoreidx + OpS390XMOVDBRstoreidx + OpS390XMOVBstoreconst + OpS390XMOVHstoreconst + OpS390XMOVWstoreconst + OpS390XMOVDstoreconst + OpS390XCLEAR + OpS390XCALLstatic + OpS390XCALLtail + OpS390XCALLclosure + OpS390XCALLinter + OpS390XInvertFlags + OpS390XLoweredGetG + OpS390XLoweredGetClosurePtr + OpS390XLoweredGetCallerSP + OpS390XLoweredGetCallerPC + OpS390XLoweredNilCheck + OpS390XLoweredRound32F + OpS390XLoweredRound64F + OpS390XLoweredWB + OpS390XLoweredPanicBoundsA + OpS390XLoweredPanicBoundsB + OpS390XLoweredPanicBoundsC + OpS390XFlagEQ + OpS390XFlagLT + OpS390XFlagGT + OpS390XFlagOV + OpS390XSYNC + OpS390XMOVBZatomicload + OpS390XMOVWZatomicload + OpS390XMOVDatomicload + OpS390XMOVBatomicstore + OpS390XMOVWatomicstore + OpS390XMOVDatomicstore + OpS390XLAA + OpS390XLAAG + OpS390XAddTupleFirst32 + OpS390XAddTupleFirst64 + OpS390XLAN + OpS390XLANfloor + OpS390XLAO + OpS390XLAOfloor + OpS390XLoweredAtomicCas32 + OpS390XLoweredAtomicCas64 + OpS390XLoweredAtomicExchange32 + OpS390XLoweredAtomicExchange64 + OpS390XFLOGR + OpS390XPOPCNT + OpS390XMLGR + OpS390XSumBytes2 + OpS390XSumBytes4 + OpS390XSumBytes8 + OpS390XSTMG2 + OpS390XSTMG3 + OpS390XSTMG4 + OpS390XSTM2 + OpS390XSTM3 + OpS390XSTM4 + OpS390XLoweredMove + OpS390XLoweredZero + + OpWasmLoweredStaticCall + OpWasmLoweredTailCall + OpWasmLoweredClosureCall + OpWasmLoweredInterCall + OpWasmLoweredAddr + OpWasmLoweredMove + OpWasmLoweredZero + OpWasmLoweredGetClosurePtr + OpWasmLoweredGetCallerPC + OpWasmLoweredGetCallerSP + OpWasmLoweredNilCheck + OpWasmLoweredWB + OpWasmLoweredConvert + OpWasmSelect + OpWasmI64Load8U + OpWasmI64Load8S + OpWasmI64Load16U + OpWasmI64Load16S + OpWasmI64Load32U + OpWasmI64Load32S + OpWasmI64Load + OpWasmI64Store8 + OpWasmI64Store16 + OpWasmI64Store32 + OpWasmI64Store + OpWasmF32Load + OpWasmF64Load + OpWasmF32Store + OpWasmF64Store + OpWasmI64Const + OpWasmF32Const + OpWasmF64Const + OpWasmI64Eqz + OpWasmI64Eq + OpWasmI64Ne + OpWasmI64LtS + OpWasmI64LtU + OpWasmI64GtS + OpWasmI64GtU + OpWasmI64LeS + OpWasmI64LeU + OpWasmI64GeS + OpWasmI64GeU + OpWasmF32Eq + OpWasmF32Ne + OpWasmF32Lt + OpWasmF32Gt + OpWasmF32Le + OpWasmF32Ge + OpWasmF64Eq + OpWasmF64Ne + OpWasmF64Lt + OpWasmF64Gt + OpWasmF64Le + OpWasmF64Ge + OpWasmI64Add + OpWasmI64AddConst + OpWasmI64Sub + OpWasmI64Mul + OpWasmI64DivS + OpWasmI64DivU + OpWasmI64RemS + OpWasmI64RemU + OpWasmI64And + OpWasmI64Or + OpWasmI64Xor + OpWasmI64Shl + OpWasmI64ShrS + OpWasmI64ShrU + OpWasmF32Neg + OpWasmF32Add + OpWasmF32Sub + OpWasmF32Mul + OpWasmF32Div + OpWasmF64Neg + OpWasmF64Add + OpWasmF64Sub + OpWasmF64Mul + OpWasmF64Div + OpWasmI64TruncSatF64S + OpWasmI64TruncSatF64U + OpWasmI64TruncSatF32S + OpWasmI64TruncSatF32U + OpWasmF32ConvertI64S + OpWasmF32ConvertI64U + OpWasmF64ConvertI64S + OpWasmF64ConvertI64U + OpWasmF32DemoteF64 + OpWasmF64PromoteF32 + OpWasmI64Extend8S + OpWasmI64Extend16S + OpWasmI64Extend32S + OpWasmF32Sqrt + OpWasmF32Trunc + OpWasmF32Ceil + OpWasmF32Floor + OpWasmF32Nearest + OpWasmF32Abs + OpWasmF32Copysign + OpWasmF64Sqrt + OpWasmF64Trunc + OpWasmF64Ceil + OpWasmF64Floor + OpWasmF64Nearest + OpWasmF64Abs + OpWasmF64Copysign + OpWasmI64Ctz + OpWasmI64Clz + OpWasmI32Rotl + OpWasmI64Rotl + OpWasmI64Popcnt + + OpAdd8 + OpAdd16 + OpAdd32 + OpAdd64 + OpAddPtr + OpAdd32F + OpAdd64F + OpSub8 + OpSub16 + OpSub32 + OpSub64 + OpSubPtr + OpSub32F + OpSub64F + OpMul8 + OpMul16 + OpMul32 + OpMul64 + OpMul32F + OpMul64F + OpDiv32F + OpDiv64F + OpHmul32 + OpHmul32u + OpHmul64 + OpHmul64u + OpMul32uhilo + OpMul64uhilo + OpMul32uover + OpMul64uover + OpAvg32u + OpAvg64u + OpDiv8 + OpDiv8u + OpDiv16 + OpDiv16u + OpDiv32 + OpDiv32u + OpDiv64 + OpDiv64u + OpDiv128u + OpMod8 + OpMod8u + OpMod16 + OpMod16u + OpMod32 + OpMod32u + OpMod64 + OpMod64u + OpAnd8 + OpAnd16 + OpAnd32 + OpAnd64 + OpOr8 + OpOr16 + OpOr32 + OpOr64 + OpXor8 + OpXor16 + OpXor32 + OpXor64 + OpLsh8x8 + OpLsh8x16 + OpLsh8x32 + OpLsh8x64 + OpLsh16x8 + OpLsh16x16 + OpLsh16x32 + OpLsh16x64 + OpLsh32x8 + OpLsh32x16 + OpLsh32x32 + OpLsh32x64 + OpLsh64x8 + OpLsh64x16 + OpLsh64x32 + OpLsh64x64 + OpRsh8x8 + OpRsh8x16 + OpRsh8x32 + OpRsh8x64 + OpRsh16x8 + OpRsh16x16 + OpRsh16x32 + OpRsh16x64 + OpRsh32x8 + OpRsh32x16 + OpRsh32x32 + OpRsh32x64 + OpRsh64x8 + OpRsh64x16 + OpRsh64x32 + OpRsh64x64 + OpRsh8Ux8 + OpRsh8Ux16 + OpRsh8Ux32 + OpRsh8Ux64 + OpRsh16Ux8 + OpRsh16Ux16 + OpRsh16Ux32 + OpRsh16Ux64 + OpRsh32Ux8 + OpRsh32Ux16 + OpRsh32Ux32 + OpRsh32Ux64 + OpRsh64Ux8 + OpRsh64Ux16 + OpRsh64Ux32 + OpRsh64Ux64 + OpEq8 + OpEq16 + OpEq32 + OpEq64 + OpEqPtr + OpEqInter + OpEqSlice + OpEq32F + OpEq64F + OpNeq8 + OpNeq16 + OpNeq32 + OpNeq64 + OpNeqPtr + OpNeqInter + OpNeqSlice + OpNeq32F + OpNeq64F + OpLess8 + OpLess8U + OpLess16 + OpLess16U + OpLess32 + OpLess32U + OpLess64 + OpLess64U + OpLess32F + OpLess64F + OpLeq8 + OpLeq8U + OpLeq16 + OpLeq16U + OpLeq32 + OpLeq32U + OpLeq64 + OpLeq64U + OpLeq32F + OpLeq64F + OpCondSelect + OpAndB + OpOrB + OpEqB + OpNeqB + OpNot + OpNeg8 + OpNeg16 + OpNeg32 + OpNeg64 + OpNeg32F + OpNeg64F + OpCom8 + OpCom16 + OpCom32 + OpCom64 + OpCtz8 + OpCtz16 + OpCtz32 + OpCtz64 + OpCtz8NonZero + OpCtz16NonZero + OpCtz32NonZero + OpCtz64NonZero + OpBitLen8 + OpBitLen16 + OpBitLen32 + OpBitLen64 + OpBswap16 + OpBswap32 + OpBswap64 + OpBitRev8 + OpBitRev16 + OpBitRev32 + OpBitRev64 + OpPopCount8 + OpPopCount16 + OpPopCount32 + OpPopCount64 + OpRotateLeft64 + OpRotateLeft32 + OpRotateLeft16 + OpRotateLeft8 + OpSqrt + OpSqrt32 + OpFloor + OpCeil + OpTrunc + OpRound + OpRoundToEven + OpAbs + OpCopysign + OpMin64F + OpMin32F + OpMax64F + OpMax32F + OpFMA + OpPhi + OpCopy + OpConvert + OpConstBool + OpConstString + OpConstNil + OpConst8 + OpConst16 + OpConst32 + OpConst64 + OpConst32F + OpConst64F + OpConstInterface + OpConstSlice + OpInitMem + OpArg + OpArgIntReg + OpArgFloatReg + OpAddr + OpLocalAddr + OpSP + OpSB + OpSPanchored + OpLoad + OpDereference + OpStore + OpMove + OpZero + OpStoreWB + OpMoveWB + OpZeroWB + OpWBend + OpWB + OpHasCPUFeature + OpPanicBounds + OpPanicExtend + OpClosureCall + OpStaticCall + OpInterCall + OpTailCall + OpClosureLECall + OpStaticLECall + OpInterLECall + OpTailLECall + OpSignExt8to16 + OpSignExt8to32 + OpSignExt8to64 + OpSignExt16to32 + OpSignExt16to64 + OpSignExt32to64 + OpZeroExt8to16 + OpZeroExt8to32 + OpZeroExt8to64 + OpZeroExt16to32 + OpZeroExt16to64 + OpZeroExt32to64 + OpTrunc16to8 + OpTrunc32to8 + OpTrunc32to16 + OpTrunc64to8 + OpTrunc64to16 + OpTrunc64to32 + OpCvt32to32F + OpCvt32to64F + OpCvt64to32F + OpCvt64to64F + OpCvt32Fto32 + OpCvt32Fto64 + OpCvt64Fto32 + OpCvt64Fto64 + OpCvt32Fto64F + OpCvt64Fto32F + OpCvtBoolToUint8 + OpRound32F + OpRound64F + OpIsNonNil + OpIsInBounds + OpIsSliceInBounds + OpNilCheck + OpGetG + OpGetClosurePtr + OpGetCallerPC + OpGetCallerSP + OpPtrIndex + OpOffPtr + OpSliceMake + OpSlicePtr + OpSliceLen + OpSliceCap + OpSlicePtrUnchecked + OpComplexMake + OpComplexReal + OpComplexImag + OpStringMake + OpStringPtr + OpStringLen + OpIMake + OpITab + OpIData + OpStructMake0 + OpStructMake1 + OpStructMake2 + OpStructMake3 + OpStructMake4 + OpStructSelect + OpArrayMake0 + OpArrayMake1 + OpArraySelect + OpStoreReg + OpLoadReg + OpFwdRef + OpUnknown + OpVarDef + OpVarLive + OpKeepAlive + OpInlMark + OpInt64Make + OpInt64Hi + OpInt64Lo + OpAdd32carry + OpAdd32withcarry + OpSub32carry + OpSub32withcarry + OpAdd64carry + OpSub64borrow + OpSignmask + OpZeromask + OpSlicemask + OpSpectreIndex + OpSpectreSliceIndex + OpCvt32Uto32F + OpCvt32Uto64F + OpCvt32Fto32U + OpCvt64Fto32U + OpCvt64Uto32F + OpCvt64Uto64F + OpCvt32Fto64U + OpCvt64Fto64U + OpSelect0 + OpSelect1 + OpSelectN + OpSelectNAddr + OpMakeResult + OpAtomicLoad8 + OpAtomicLoad32 + OpAtomicLoad64 + OpAtomicLoadPtr + OpAtomicLoadAcq32 + OpAtomicLoadAcq64 + OpAtomicStore8 + OpAtomicStore32 + OpAtomicStore64 + OpAtomicStorePtrNoWB + OpAtomicStoreRel32 + OpAtomicStoreRel64 + OpAtomicExchange32 + OpAtomicExchange64 + OpAtomicAdd32 + OpAtomicAdd64 + OpAtomicCompareAndSwap32 + OpAtomicCompareAndSwap64 + OpAtomicCompareAndSwapRel32 + OpAtomicAnd8 + OpAtomicAnd32 + OpAtomicOr8 + OpAtomicOr32 + OpAtomicAdd32Variant + OpAtomicAdd64Variant + OpAtomicExchange32Variant + OpAtomicExchange64Variant + OpAtomicCompareAndSwap32Variant + OpAtomicCompareAndSwap64Variant + OpAtomicAnd8Variant + OpAtomicAnd32Variant + OpAtomicOr8Variant + OpAtomicOr32Variant + OpPubBarrier + OpClobber + OpClobberReg + OpPrefetchCache + OpPrefetchCacheStreamed +) + +var opcodeTable = [...]opInfo{ + {name: "OpInvalid"}, + + { + name: "ADDSS", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: x86.AADDSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "ADDSD", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: x86.AADDSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "SUBSS", + argLen: 2, + resultInArg0: true, + asm: x86.ASUBSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "SUBSD", + argLen: 2, + resultInArg0: true, + asm: x86.ASUBSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "MULSS", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: x86.AMULSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "MULSD", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: x86.AMULSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "DIVSS", + argLen: 2, + resultInArg0: true, + asm: x86.ADIVSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "DIVSD", + argLen: 2, + resultInArg0: true, + asm: x86.ADIVSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "MOVSSload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AMOVSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "MOVSDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AMOVSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "MOVSSconst", + auxType: auxFloat32, + argLen: 0, + rematerializeable: true, + asm: x86.AMOVSS, + reg: regInfo{ + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "MOVSDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: x86.AMOVSD, + reg: regInfo{ + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "MOVSSloadidx1", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AMOVSS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "MOVSSloadidx4", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AMOVSS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "MOVSDloadidx1", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AMOVSD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "MOVSDloadidx8", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AMOVSD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "MOVSSstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AMOVSS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "MOVSDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AMOVSD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "MOVSSstoreidx1", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: x86.AMOVSS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {2, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "MOVSSstoreidx4", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: x86.AMOVSS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {2, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "MOVSDstoreidx1", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: x86.AMOVSD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {2, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "MOVSDstoreidx8", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: x86.AMOVSD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {2, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "ADDSSload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.AADDSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {1, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "ADDSDload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.AADDSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {1, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "SUBSSload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.ASUBSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {1, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "SUBSDload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.ASUBSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {1, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "MULSSload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.AMULSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {1, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "MULSDload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.AMULSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {1, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "DIVSSload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.ADIVSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {1, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "DIVSDload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.ADIVSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {1, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "ADDL", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: x86.AADDL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 239}, // AX CX DX BX BP SI DI + {0, 255}, // AX CX DX BX SP BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "ADDLconst", + auxType: auxInt32, + argLen: 1, + clobberFlags: true, + asm: x86.AADDL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 255}, // AX CX DX BX SP BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "ADDLcarry", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: x86.AADDL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + {1, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {1, 0}, + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "ADDLconstcarry", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + asm: x86.AADDL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {1, 0}, + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "ADCL", + argLen: 3, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: x86.AADCL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + {1, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "ADCLconst", + auxType: auxInt32, + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.AADCL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SUBL", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASUBL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + {1, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SUBLconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASUBL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SUBLcarry", + argLen: 2, + resultInArg0: true, + asm: x86.ASUBL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + {1, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {1, 0}, + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SUBLconstcarry", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + asm: x86.ASUBL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {1, 0}, + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SBBL", + argLen: 3, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASBBL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + {1, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SBBLconst", + auxType: auxInt32, + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASBBL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "MULL", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: x86.AIMULL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + {1, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "MULLconst", + auxType: auxInt32, + argLen: 1, + clobberFlags: true, + asm: x86.AIMUL3L, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "MULLU", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: x86.AMULL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 255}, // AX CX DX BX SP BP SI DI + }, + clobbers: 4, // DX + outputs: []outputInfo{ + {1, 0}, + {0, 1}, // AX + }, + }, + }, + { + name: "HMULL", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: x86.AIMULL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 255}, // AX CX DX BX SP BP SI DI + }, + clobbers: 1, // AX + outputs: []outputInfo{ + {0, 4}, // DX + }, + }, + }, + { + name: "HMULLU", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: x86.AMULL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 255}, // AX CX DX BX SP BP SI DI + }, + clobbers: 1, // AX + outputs: []outputInfo{ + {0, 4}, // DX + }, + }, + }, + { + name: "MULLQU", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: x86.AMULL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 255}, // AX CX DX BX SP BP SI DI + }, + outputs: []outputInfo{ + {0, 4}, // DX + {1, 1}, // AX + }, + }, + }, + { + name: "AVGLU", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + {1, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "DIVL", + auxType: auxBool, + argLen: 2, + clobberFlags: true, + asm: x86.AIDIVL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 251}, // AX CX BX SP BP SI DI + }, + clobbers: 4, // DX + outputs: []outputInfo{ + {0, 1}, // AX + }, + }, + }, + { + name: "DIVW", + auxType: auxBool, + argLen: 2, + clobberFlags: true, + asm: x86.AIDIVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 251}, // AX CX BX SP BP SI DI + }, + clobbers: 4, // DX + outputs: []outputInfo{ + {0, 1}, // AX + }, + }, + }, + { + name: "DIVLU", + argLen: 2, + clobberFlags: true, + asm: x86.ADIVL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 251}, // AX CX BX SP BP SI DI + }, + clobbers: 4, // DX + outputs: []outputInfo{ + {0, 1}, // AX + }, + }, + }, + { + name: "DIVWU", + argLen: 2, + clobberFlags: true, + asm: x86.ADIVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 251}, // AX CX BX SP BP SI DI + }, + clobbers: 4, // DX + outputs: []outputInfo{ + {0, 1}, // AX + }, + }, + }, + { + name: "MODL", + auxType: auxBool, + argLen: 2, + clobberFlags: true, + asm: x86.AIDIVL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 251}, // AX CX BX SP BP SI DI + }, + clobbers: 1, // AX + outputs: []outputInfo{ + {0, 4}, // DX + }, + }, + }, + { + name: "MODW", + auxType: auxBool, + argLen: 2, + clobberFlags: true, + asm: x86.AIDIVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 251}, // AX CX BX SP BP SI DI + }, + clobbers: 1, // AX + outputs: []outputInfo{ + {0, 4}, // DX + }, + }, + }, + { + name: "MODLU", + argLen: 2, + clobberFlags: true, + asm: x86.ADIVL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 251}, // AX CX BX SP BP SI DI + }, + clobbers: 1, // AX + outputs: []outputInfo{ + {0, 4}, // DX + }, + }, + }, + { + name: "MODWU", + argLen: 2, + clobberFlags: true, + asm: x86.ADIVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 251}, // AX CX BX SP BP SI DI + }, + clobbers: 1, // AX + outputs: []outputInfo{ + {0, 4}, // DX + }, + }, + }, + { + name: "ANDL", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: x86.AANDL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + {1, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "ANDLconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.AANDL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "ORL", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: x86.AORL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + {1, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "ORLconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.AORL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "XORL", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: x86.AXORL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + {1, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "XORLconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.AXORL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "CMPL", + argLen: 2, + asm: x86.ACMPL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 255}, // AX CX DX BX SP BP SI DI + {1, 255}, // AX CX DX BX SP BP SI DI + }, + }, + }, + { + name: "CMPW", + argLen: 2, + asm: x86.ACMPW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 255}, // AX CX DX BX SP BP SI DI + {1, 255}, // AX CX DX BX SP BP SI DI + }, + }, + }, + { + name: "CMPB", + argLen: 2, + asm: x86.ACMPB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 255}, // AX CX DX BX SP BP SI DI + {1, 255}, // AX CX DX BX SP BP SI DI + }, + }, + }, + { + name: "CMPLconst", + auxType: auxInt32, + argLen: 1, + asm: x86.ACMPL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 255}, // AX CX DX BX SP BP SI DI + }, + }, + }, + { + name: "CMPWconst", + auxType: auxInt16, + argLen: 1, + asm: x86.ACMPW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 255}, // AX CX DX BX SP BP SI DI + }, + }, + }, + { + name: "CMPBconst", + auxType: auxInt8, + argLen: 1, + asm: x86.ACMPB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 255}, // AX CX DX BX SP BP SI DI + }, + }, + }, + { + name: "CMPLload", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ACMPL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "CMPWload", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ACMPW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "CMPBload", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ACMPB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "CMPLconstload", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ACMPL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "CMPWconstload", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ACMPW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "CMPBconstload", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ACMPB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "UCOMISS", + argLen: 2, + asm: x86.AUCOMISS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "UCOMISD", + argLen: 2, + asm: x86.AUCOMISD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "TESTL", + argLen: 2, + commutative: true, + asm: x86.ATESTL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 255}, // AX CX DX BX SP BP SI DI + {1, 255}, // AX CX DX BX SP BP SI DI + }, + }, + }, + { + name: "TESTW", + argLen: 2, + commutative: true, + asm: x86.ATESTW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 255}, // AX CX DX BX SP BP SI DI + {1, 255}, // AX CX DX BX SP BP SI DI + }, + }, + }, + { + name: "TESTB", + argLen: 2, + commutative: true, + asm: x86.ATESTB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 255}, // AX CX DX BX SP BP SI DI + {1, 255}, // AX CX DX BX SP BP SI DI + }, + }, + }, + { + name: "TESTLconst", + auxType: auxInt32, + argLen: 1, + asm: x86.ATESTL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 255}, // AX CX DX BX SP BP SI DI + }, + }, + }, + { + name: "TESTWconst", + auxType: auxInt16, + argLen: 1, + asm: x86.ATESTW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 255}, // AX CX DX BX SP BP SI DI + }, + }, + }, + { + name: "TESTBconst", + auxType: auxInt8, + argLen: 1, + asm: x86.ATESTB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 255}, // AX CX DX BX SP BP SI DI + }, + }, + }, + { + name: "SHLL", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASHLL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SHLLconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASHLL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SHRL", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASHRL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SHRW", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASHRW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SHRB", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASHRB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SHRLconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASHRL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SHRWconst", + auxType: auxInt16, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASHRW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SHRBconst", + auxType: auxInt8, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASHRB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SARL", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASARL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SARW", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASARW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SARB", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASARB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SARLconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASARL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SARWconst", + auxType: auxInt16, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASARW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SARBconst", + auxType: auxInt8, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASARB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "ROLL", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.AROLL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "ROLW", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.AROLW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "ROLB", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.AROLB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "ROLLconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.AROLL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "ROLWconst", + auxType: auxInt16, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.AROLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "ROLBconst", + auxType: auxInt8, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.AROLB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "ADDLload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.AADDL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + {1, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SUBLload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.ASUBL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + {1, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "MULLload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.AIMULL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + {1, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "ANDLload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.AANDL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + {1, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "ORLload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.AORL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + {1, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "XORLload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.AXORL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + {1, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "ADDLloadidx4", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.AADDL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI + {1, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SUBLloadidx4", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.ASUBL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI + {1, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "MULLloadidx4", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.AIMULL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI + {1, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "ANDLloadidx4", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.AANDL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI + {1, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "ORLloadidx4", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.AORL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI + {1, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "XORLloadidx4", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.AXORL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI + {1, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "NEGL", + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ANEGL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "NOTL", + argLen: 1, + resultInArg0: true, + asm: x86.ANOTL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "BSFL", + argLen: 1, + clobberFlags: true, + asm: x86.ABSFL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "BSFW", + argLen: 1, + clobberFlags: true, + asm: x86.ABSFW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "LoweredCtz32", + argLen: 1, + clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "BSRL", + argLen: 1, + clobberFlags: true, + asm: x86.ABSRL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "BSRW", + argLen: 1, + clobberFlags: true, + asm: x86.ABSRW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "BSWAPL", + argLen: 1, + resultInArg0: true, + asm: x86.ABSWAPL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SQRTSD", + argLen: 1, + asm: x86.ASQRTSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "SQRTSS", + argLen: 1, + asm: x86.ASQRTSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "SBBLcarrymask", + argLen: 1, + asm: x86.ASBBL, + reg: regInfo{ + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SETEQ", + argLen: 1, + asm: x86.ASETEQ, + reg: regInfo{ + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SETNE", + argLen: 1, + asm: x86.ASETNE, + reg: regInfo{ + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SETL", + argLen: 1, + asm: x86.ASETLT, + reg: regInfo{ + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SETLE", + argLen: 1, + asm: x86.ASETLE, + reg: regInfo{ + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SETG", + argLen: 1, + asm: x86.ASETGT, + reg: regInfo{ + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SETGE", + argLen: 1, + asm: x86.ASETGE, + reg: regInfo{ + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SETB", + argLen: 1, + asm: x86.ASETCS, + reg: regInfo{ + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SETBE", + argLen: 1, + asm: x86.ASETLS, + reg: regInfo{ + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SETA", + argLen: 1, + asm: x86.ASETHI, + reg: regInfo{ + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SETAE", + argLen: 1, + asm: x86.ASETCC, + reg: regInfo{ + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SETO", + argLen: 1, + asm: x86.ASETOS, + reg: regInfo{ + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SETEQF", + argLen: 1, + clobberFlags: true, + asm: x86.ASETEQ, + reg: regInfo{ + clobbers: 1, // AX + outputs: []outputInfo{ + {0, 238}, // CX DX BX BP SI DI + }, + }, + }, + { + name: "SETNEF", + argLen: 1, + clobberFlags: true, + asm: x86.ASETNE, + reg: regInfo{ + clobbers: 1, // AX + outputs: []outputInfo{ + {0, 238}, // CX DX BX BP SI DI + }, + }, + }, + { + name: "SETORD", + argLen: 1, + asm: x86.ASETPC, + reg: regInfo{ + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SETNAN", + argLen: 1, + asm: x86.ASETPS, + reg: regInfo{ + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SETGF", + argLen: 1, + asm: x86.ASETHI, + reg: regInfo{ + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SETGEF", + argLen: 1, + asm: x86.ASETCC, + reg: regInfo{ + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "MOVBLSX", + argLen: 1, + asm: x86.AMOVBLSX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "MOVBLZX", + argLen: 1, + asm: x86.AMOVBLZX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "MOVWLSX", + argLen: 1, + asm: x86.AMOVWLSX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "MOVWLZX", + argLen: 1, + asm: x86.AMOVWLZX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "MOVLconst", + auxType: auxInt32, + argLen: 0, + rematerializeable: true, + asm: x86.AMOVL, + reg: regInfo{ + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "CVTTSD2SL", + argLen: 1, + asm: x86.ACVTTSD2SL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "CVTTSS2SL", + argLen: 1, + asm: x86.ACVTTSS2SL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "CVTSL2SS", + argLen: 1, + asm: x86.ACVTSL2SS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "CVTSL2SD", + argLen: 1, + asm: x86.ACVTSL2SD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "CVTSD2SS", + argLen: 1, + asm: x86.ACVTSD2SS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "CVTSS2SD", + argLen: 1, + asm: x86.ACVTSS2SD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "PXOR", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: x86.APXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "LEAL", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "LEAL1", + auxType: auxSymOff, + argLen: 2, + commutative: true, + symEffect: SymAddr, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "LEAL2", + auxType: auxSymOff, + argLen: 2, + symEffect: SymAddr, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "LEAL4", + auxType: auxSymOff, + argLen: 2, + symEffect: SymAddr, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "LEAL8", + auxType: auxSymOff, + argLen: 2, + symEffect: SymAddr, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AMOVBLZX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "MOVBLSXload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AMOVBLSX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AMOVWLZX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "MOVWLSXload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AMOVWLSX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "MOVLload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AMOVL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "MOVLstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AMOVL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "ADDLmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AADDL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "SUBLmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.ASUBL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "ANDLmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AANDL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "ORLmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AORL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "XORLmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AXORL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "ADDLmodifyidx4", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AADDL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "SUBLmodifyidx4", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.ASUBL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "ANDLmodifyidx4", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AANDL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "ORLmodifyidx4", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AORL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "XORLmodifyidx4", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AXORL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "ADDLconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AADDL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "ANDLconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AANDL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "ORLconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AORL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "XORLconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AXORL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "ADDLconstmodifyidx4", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AADDL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "ANDLconstmodifyidx4", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AANDL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "ORLconstmodifyidx4", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AORL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "XORLconstmodifyidx4", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AXORL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "MOVBloadidx1", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: x86.AMOVBLZX, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "MOVWloadidx1", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: x86.AMOVWLZX, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "MOVWloadidx2", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AMOVWLZX, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "MOVLloadidx1", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: x86.AMOVL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "MOVLloadidx4", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AMOVL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "MOVBstoreidx1", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: x86.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "MOVWstoreidx1", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: x86.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "MOVWstoreidx2", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: x86.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "MOVLstoreidx1", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: x86.AMOVL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "MOVLstoreidx4", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: x86.AMOVL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "MOVBstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "MOVWstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "MOVLstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AMOVL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "MOVBstoreconstidx1", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymWrite, + asm: x86.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "MOVWstoreconstidx1", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymWrite, + asm: x86.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "MOVWstoreconstidx2", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymWrite, + asm: x86.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "MOVLstoreconstidx1", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymWrite, + asm: x86.AMOVL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "MOVLstoreconstidx4", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymWrite, + asm: x86.AMOVL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "DUFFZERO", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 128}, // DI + {1, 1}, // AX + }, + clobbers: 130, // CX DI + }, + }, + { + name: "REPSTOSL", + argLen: 4, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 128}, // DI + {1, 2}, // CX + {2, 1}, // AX + }, + clobbers: 130, // CX DI + }, + }, + { + name: "CALLstatic", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + reg: regInfo{ + clobbers: 65519, // AX CX DX BX BP SI DI X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + { + name: "CALLtail", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 65519, // AX CX DX BX BP SI DI X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + { + name: "CALLclosure", + auxType: auxCallOff, + argLen: 3, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 4}, // DX + {0, 255}, // AX CX DX BX SP BP SI DI + }, + clobbers: 65519, // AX CX DX BX BP SI DI X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + { + name: "CALLinter", + auxType: auxCallOff, + argLen: 2, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + clobbers: 65519, // AX CX DX BX BP SI DI X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + { + name: "DUFFCOPY", + auxType: auxInt64, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 128}, // DI + {1, 64}, // SI + }, + clobbers: 194, // CX SI DI + }, + }, + { + name: "REPMOVSL", + argLen: 4, + faultOnNilArg0: true, + faultOnNilArg1: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 128}, // DI + {1, 64}, // SI + {2, 2}, // CX + }, + clobbers: 194, // CX SI DI + }, + }, + { + name: "InvertFlags", + argLen: 1, + reg: regInfo{}, + }, + { + name: "LoweredGetG", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 4}, // DX + }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "LoweredNilCheck", + argLen: 2, + clobberFlags: true, + nilCheck: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 255}, // AX CX DX BX SP BP SI DI + }, + }, + }, + { + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, + reg: regInfo{ + clobbers: 65280, // X0 X1 X2 X3 X4 X5 X6 X7 + outputs: []outputInfo{ + {0, 128}, // DI + }, + }, + }, + { + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4}, // DX + {1, 8}, // BX + }, + }, + }, + { + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // CX + {1, 4}, // DX + }, + }, + }, + { + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 2}, // CX + }, + }, + }, + { + name: "LoweredPanicExtendA", + auxType: auxInt64, + argLen: 4, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 64}, // SI + {1, 4}, // DX + {2, 8}, // BX + }, + }, + }, + { + name: "LoweredPanicExtendB", + auxType: auxInt64, + argLen: 4, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 64}, // SI + {1, 2}, // CX + {2, 4}, // DX + }, + }, + }, + { + name: "LoweredPanicExtendC", + auxType: auxInt64, + argLen: 4, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 64}, // SI + {1, 1}, // AX + {2, 2}, // CX + }, + }, + }, + { + name: "FlagEQ", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagLT_ULT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagLT_UGT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagGT_UGT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagGT_ULT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "MOVSSconst1", + auxType: auxFloat32, + argLen: 0, + reg: regInfo{ + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "MOVSDconst1", + auxType: auxFloat64, + argLen: 0, + reg: regInfo{ + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "MOVSSconst2", + argLen: 1, + asm: x86.AMOVSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "MOVSDconst2", + argLen: 1, + asm: x86.AMOVSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + + { + name: "ADDSS", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: x86.AADDSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "ADDSD", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: x86.AADDSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "SUBSS", + argLen: 2, + resultInArg0: true, + asm: x86.ASUBSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "SUBSD", + argLen: 2, + resultInArg0: true, + asm: x86.ASUBSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "MULSS", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: x86.AMULSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "MULSD", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: x86.AMULSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "DIVSS", + argLen: 2, + resultInArg0: true, + asm: x86.ADIVSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "DIVSD", + argLen: 2, + resultInArg0: true, + asm: x86.ADIVSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "MOVSSload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AMOVSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "MOVSDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AMOVSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "MOVSSconst", + auxType: auxFloat32, + argLen: 0, + rematerializeable: true, + asm: x86.AMOVSS, + reg: regInfo{ + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "MOVSDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: x86.AMOVSD, + reg: regInfo{ + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "MOVSSloadidx1", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AMOVSS, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "MOVSSloadidx4", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AMOVSS, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "MOVSDloadidx1", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AMOVSD, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "MOVSDloadidx8", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AMOVSD, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "MOVSSstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AMOVSS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, + { + name: "MOVSDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AMOVSD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, + { + name: "MOVSSstoreidx1", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: x86.AMOVSS, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, + { + name: "MOVSSstoreidx4", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: x86.AMOVSS, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, + { + name: "MOVSDstoreidx1", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: x86.AMOVSD, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, + { + name: "MOVSDstoreidx8", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: x86.AMOVSD, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, + { + name: "ADDSSload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.AADDSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "ADDSDload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.AADDSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "SUBSSload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.ASUBSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "SUBSDload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.ASUBSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "MULSSload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.AMULSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "MULSDload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.AMULSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "DIVSSload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.ADIVSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "DIVSDload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.ADIVSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "ADDSSloadidx1", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AADDSS, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "ADDSSloadidx4", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AADDSS, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "ADDSDloadidx1", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AADDSD, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "ADDSDloadidx8", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AADDSD, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "SUBSSloadidx1", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.ASUBSS, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "SUBSSloadidx4", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.ASUBSS, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "SUBSDloadidx1", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.ASUBSD, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "SUBSDloadidx8", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.ASUBSD, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "MULSSloadidx1", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AMULSS, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "MULSSloadidx4", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AMULSS, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "MULSDloadidx1", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AMULSD, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "MULSDloadidx8", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.AMULSD, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "DIVSSloadidx1", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.ADIVSS, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "DIVSSloadidx4", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.ADIVSS, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "DIVSDloadidx1", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.ADIVSD, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "DIVSDloadidx8", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + symEffect: SymRead, + asm: x86.ADIVSD, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "ADDQ", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: x86.AADDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ADDL", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: x86.AADDL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ADDQconst", + auxType: auxInt32, + argLen: 1, + clobberFlags: true, + asm: x86.AADDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ADDLconst", + auxType: auxInt32, + argLen: 1, + clobberFlags: true, + asm: x86.AADDL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ADDQconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AADDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ADDLconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AADDL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SUBQ", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASUBQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SUBL", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASUBL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SUBQconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASUBQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SUBLconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASUBL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MULQ", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: x86.AIMULQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MULL", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: x86.AIMULL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MULQconst", + auxType: auxInt32, + argLen: 1, + clobberFlags: true, + asm: x86.AIMUL3Q, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MULLconst", + auxType: auxInt32, + argLen: 1, + clobberFlags: true, + asm: x86.AIMUL3L, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MULLU", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: x86.AMULL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + clobbers: 4, // DX + outputs: []outputInfo{ + {1, 0}, + {0, 1}, // AX + }, + }, + }, + { + name: "MULQU", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: x86.AMULQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + clobbers: 4, // DX + outputs: []outputInfo{ + {1, 0}, + {0, 1}, // AX + }, + }, + }, + { + name: "HMULQ", + argLen: 2, + clobberFlags: true, + asm: x86.AIMULQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + clobbers: 1, // AX + outputs: []outputInfo{ + {0, 4}, // DX + }, + }, + }, + { + name: "HMULL", + argLen: 2, + clobberFlags: true, + asm: x86.AIMULL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + clobbers: 1, // AX + outputs: []outputInfo{ + {0, 4}, // DX + }, + }, + }, + { + name: "HMULQU", + argLen: 2, + clobberFlags: true, + asm: x86.AMULQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + clobbers: 1, // AX + outputs: []outputInfo{ + {0, 4}, // DX + }, + }, + }, + { + name: "HMULLU", + argLen: 2, + clobberFlags: true, + asm: x86.AMULL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + clobbers: 1, // AX + outputs: []outputInfo{ + {0, 4}, // DX + }, + }, + }, + { + name: "AVGQU", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "DIVQ", + auxType: auxBool, + argLen: 2, + clobberFlags: true, + asm: x86.AIDIVQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 1}, // AX + {1, 4}, // DX + }, + }, + }, + { + name: "DIVL", + auxType: auxBool, + argLen: 2, + clobberFlags: true, + asm: x86.AIDIVL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 1}, // AX + {1, 4}, // DX + }, + }, + }, + { + name: "DIVW", + auxType: auxBool, + argLen: 2, + clobberFlags: true, + asm: x86.AIDIVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 1}, // AX + {1, 4}, // DX + }, + }, + }, + { + name: "DIVQU", + argLen: 2, + clobberFlags: true, + asm: x86.ADIVQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 1}, // AX + {1, 4}, // DX + }, + }, + }, + { + name: "DIVLU", + argLen: 2, + clobberFlags: true, + asm: x86.ADIVL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 1}, // AX + {1, 4}, // DX + }, + }, + }, + { + name: "DIVWU", + argLen: 2, + clobberFlags: true, + asm: x86.ADIVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 1}, // AX + {1, 4}, // DX + }, + }, + }, + { + name: "NEGLflags", + argLen: 1, + resultInArg0: true, + asm: x86.ANEGL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ADDQcarry", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: x86.AADDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ADCQ", + argLen: 3, + commutative: true, + resultInArg0: true, + asm: x86.AADCQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ADDQconstcarry", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + asm: x86.AADDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ADCQconst", + auxType: auxInt32, + argLen: 2, + resultInArg0: true, + asm: x86.AADCQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SUBQborrow", + argLen: 2, + resultInArg0: true, + asm: x86.ASUBQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SBBQ", + argLen: 3, + resultInArg0: true, + asm: x86.ASBBQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SUBQconstborrow", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + asm: x86.ASUBQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SBBQconst", + auxType: auxInt32, + argLen: 2, + resultInArg0: true, + asm: x86.ASBBQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MULQU2", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: x86.AMULQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 4}, // DX + {1, 1}, // AX + }, + }, + }, + { + name: "DIVQU2", + argLen: 3, + clobberFlags: true, + asm: x86.ADIVQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4}, // DX + {1, 1}, // AX + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 1}, // AX + {1, 4}, // DX + }, + }, + }, + { + name: "ANDQ", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: x86.AANDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ANDL", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: x86.AANDL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ANDQconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.AANDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ANDLconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.AANDL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ANDQconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AANDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ANDLconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AANDL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ORQ", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: x86.AORQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ORL", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: x86.AORL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ORQconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.AORQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ORLconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.AORL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ORQconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AORQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ORLconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AORL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "XORQ", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: x86.AXORQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "XORL", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: x86.AXORL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "XORQconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.AXORQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "XORLconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.AXORL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "XORQconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AXORQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "XORLconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AXORL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "CMPQ", + argLen: 2, + asm: x86.ACMPQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMPL", + argLen: 2, + asm: x86.ACMPL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMPW", + argLen: 2, + asm: x86.ACMPW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMPB", + argLen: 2, + asm: x86.ACMPB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMPQconst", + auxType: auxInt32, + argLen: 1, + asm: x86.ACMPQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMPLconst", + auxType: auxInt32, + argLen: 1, + asm: x86.ACMPL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMPWconst", + auxType: auxInt16, + argLen: 1, + asm: x86.ACMPW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMPBconst", + auxType: auxInt8, + argLen: 1, + asm: x86.ACMPB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMPQload", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ACMPQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "CMPLload", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ACMPL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "CMPWload", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ACMPW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "CMPBload", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ACMPB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "CMPQconstload", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ACMPQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "CMPLconstload", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ACMPL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "CMPWconstload", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ACMPW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "CMPBconstload", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ACMPB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "CMPQloadidx8", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.ACMPQ, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "CMPQloadidx1", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymRead, + asm: x86.ACMPQ, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "CMPLloadidx4", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.ACMPL, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "CMPLloadidx1", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymRead, + asm: x86.ACMPL, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "CMPWloadidx2", + auxType: auxSymOff, + argLen: 4, + symEffect: SymRead, + asm: x86.ACMPW, + scale: 2, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "CMPWloadidx1", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymRead, + asm: x86.ACMPW, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "CMPBloadidx1", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymRead, + asm: x86.ACMPB, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "CMPQconstloadidx8", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.ACMPQ, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "CMPQconstloadidx1", + auxType: auxSymValAndOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: x86.ACMPQ, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "CMPLconstloadidx4", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.ACMPL, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "CMPLconstloadidx1", + auxType: auxSymValAndOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: x86.ACMPL, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "CMPWconstloadidx2", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymRead, + asm: x86.ACMPW, + scale: 2, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "CMPWconstloadidx1", + auxType: auxSymValAndOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: x86.ACMPW, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "CMPBconstloadidx1", + auxType: auxSymValAndOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: x86.ACMPB, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "UCOMISS", + argLen: 2, + asm: x86.AUCOMISS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "UCOMISD", + argLen: 2, + asm: x86.AUCOMISD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "BTL", + argLen: 2, + asm: x86.ABTL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "BTQ", + argLen: 2, + asm: x86.ABTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "BTCL", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ABTCL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "BTCQ", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ABTCQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "BTRL", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ABTRL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "BTRQ", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ABTRQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "BTSL", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ABTSL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "BTSQ", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ABTSQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "BTLconst", + auxType: auxInt8, + argLen: 1, + asm: x86.ABTL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "BTQconst", + auxType: auxInt8, + argLen: 1, + asm: x86.ABTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "BTCQconst", + auxType: auxInt8, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ABTCQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "BTRQconst", + auxType: auxInt8, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ABTRQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "BTSQconst", + auxType: auxInt8, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ABTSQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "BTSQconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.ABTSQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "BTRQconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.ABTRQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "BTCQconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.ABTCQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "TESTQ", + argLen: 2, + commutative: true, + asm: x86.ATESTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "TESTL", + argLen: 2, + commutative: true, + asm: x86.ATESTL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "TESTW", + argLen: 2, + commutative: true, + asm: x86.ATESTW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "TESTB", + argLen: 2, + commutative: true, + asm: x86.ATESTB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "TESTQconst", + auxType: auxInt32, + argLen: 1, + asm: x86.ATESTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "TESTLconst", + auxType: auxInt32, + argLen: 1, + asm: x86.ATESTL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "TESTWconst", + auxType: auxInt16, + argLen: 1, + asm: x86.ATESTW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "TESTBconst", + auxType: auxInt8, + argLen: 1, + asm: x86.ATESTB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHLQ", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASHLQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHLL", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASHLL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHLQconst", + auxType: auxInt8, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASHLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHLLconst", + auxType: auxInt8, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASHLL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHRQ", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASHRQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHRL", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASHRL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHRW", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASHRW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHRB", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASHRB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHRQconst", + auxType: auxInt8, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASHRQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHRLconst", + auxType: auxInt8, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASHRL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHRWconst", + auxType: auxInt8, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASHRW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHRBconst", + auxType: auxInt8, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASHRB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SARQ", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASARQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SARL", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASARL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SARW", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASARW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SARB", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASARB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SARQconst", + auxType: auxInt8, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASARQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SARLconst", + auxType: auxInt8, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASARL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SARWconst", + auxType: auxInt8, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASARW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SARBconst", + auxType: auxInt8, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASARB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHRDQ", + argLen: 3, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASHRQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 2}, // CX + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHLDQ", + argLen: 3, + resultInArg0: true, + clobberFlags: true, + asm: x86.ASHLQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 2}, // CX + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ROLQ", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.AROLQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ROLL", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.AROLL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ROLW", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.AROLW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ROLB", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.AROLB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "RORQ", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ARORQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "RORL", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ARORL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "RORW", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ARORW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "RORB", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.ARORB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ROLQconst", + auxType: auxInt8, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.AROLQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ROLLconst", + auxType: auxInt8, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.AROLL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ROLWconst", + auxType: auxInt8, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.AROLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ROLBconst", + auxType: auxInt8, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.AROLB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ADDLload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.AADDL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ADDQload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.AADDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SUBQload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.ASUBQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SUBLload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.ASUBL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ANDLload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.AANDL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ANDQload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.AANDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ORQload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.AORQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ORLload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.AORL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "XORQload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.AXORQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "XORLload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.AXORL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ADDLloadidx1", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.AADDL, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ADDLloadidx4", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.AADDL, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ADDLloadidx8", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.AADDL, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ADDQloadidx1", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.AADDQ, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ADDQloadidx8", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.AADDQ, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SUBLloadidx1", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.ASUBL, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SUBLloadidx4", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.ASUBL, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SUBLloadidx8", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.ASUBL, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SUBQloadidx1", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.ASUBQ, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SUBQloadidx8", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.ASUBQ, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ANDLloadidx1", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.AANDL, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ANDLloadidx4", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.AANDL, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ANDLloadidx8", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.AANDL, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ANDQloadidx1", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.AANDQ, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ANDQloadidx8", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.AANDQ, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ORLloadidx1", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.AORL, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ORLloadidx4", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.AORL, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ORLloadidx8", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.AORL, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ORQloadidx1", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.AORQ, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ORQloadidx8", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.AORQ, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "XORLloadidx1", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.AXORL, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "XORLloadidx4", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.AXORL, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "XORLloadidx8", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.AXORL, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "XORQloadidx1", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.AXORQ, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "XORQloadidx8", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + symEffect: SymRead, + asm: x86.AXORQ, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ADDQmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AADDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SUBQmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.ASUBQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ANDQmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AANDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ORQmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AORQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "XORQmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AXORQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ADDLmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AADDL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SUBLmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.ASUBL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ANDLmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AANDL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ORLmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AORL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "XORLmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AXORL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ADDQmodifyidx1", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AADDQ, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ADDQmodifyidx8", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AADDQ, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SUBQmodifyidx1", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.ASUBQ, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SUBQmodifyidx8", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.ASUBQ, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ANDQmodifyidx1", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AANDQ, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ANDQmodifyidx8", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AANDQ, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ORQmodifyidx1", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AORQ, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ORQmodifyidx8", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AORQ, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "XORQmodifyidx1", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AXORQ, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "XORQmodifyidx8", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AXORQ, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ADDLmodifyidx1", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AADDL, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ADDLmodifyidx4", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AADDL, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ADDLmodifyidx8", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AADDL, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SUBLmodifyidx1", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.ASUBL, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SUBLmodifyidx4", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.ASUBL, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SUBLmodifyidx8", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.ASUBL, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ANDLmodifyidx1", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AANDL, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ANDLmodifyidx4", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AANDL, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ANDLmodifyidx8", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AANDL, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ORLmodifyidx1", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AORL, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ORLmodifyidx4", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AORL, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ORLmodifyidx8", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AORL, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "XORLmodifyidx1", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AXORL, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "XORLmodifyidx4", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AXORL, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "XORLmodifyidx8", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AXORL, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ADDQconstmodifyidx1", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AADDQ, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ADDQconstmodifyidx8", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AADDQ, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ANDQconstmodifyidx1", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AANDQ, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ANDQconstmodifyidx8", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AANDQ, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ORQconstmodifyidx1", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AORQ, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ORQconstmodifyidx8", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AORQ, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "XORQconstmodifyidx1", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AXORQ, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "XORQconstmodifyidx8", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AXORQ, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ADDLconstmodifyidx1", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AADDL, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ADDLconstmodifyidx4", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AADDL, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ADDLconstmodifyidx8", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AADDL, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ANDLconstmodifyidx1", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AANDL, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ANDLconstmodifyidx4", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AANDL, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ANDLconstmodifyidx8", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AANDL, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ORLconstmodifyidx1", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AORL, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ORLconstmodifyidx4", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AORL, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ORLconstmodifyidx8", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AORL, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "XORLconstmodifyidx1", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AXORL, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "XORLconstmodifyidx4", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AXORL, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "XORLconstmodifyidx8", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + symEffect: SymRead | SymWrite, + asm: x86.AXORL, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "NEGQ", + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ANEGQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "NEGL", + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ANEGL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "NOTQ", + argLen: 1, + resultInArg0: true, + asm: x86.ANOTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "NOTL", + argLen: 1, + resultInArg0: true, + asm: x86.ANOTL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "BSFQ", + argLen: 1, + asm: x86.ABSFQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "BSFL", + argLen: 1, + clobberFlags: true, + asm: x86.ABSFL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "BSRQ", + argLen: 1, + asm: x86.ABSRQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "BSRL", + argLen: 1, + clobberFlags: true, + asm: x86.ABSRL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVQEQ", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVQEQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVQNE", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVQNE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVQLT", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVQLT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVQGT", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVQGT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVQLE", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVQLE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVQGE", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVQGE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVQLS", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVQLS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVQHI", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVQHI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVQCC", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVQCC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVQCS", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVQCS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVLEQ", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVLEQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVLNE", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVLNE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVLLT", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVLLT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVLGT", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVLGT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVLLE", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVLLE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVLGE", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVLGE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVLLS", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVLLS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVLHI", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVLHI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVLCC", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVLCC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVLCS", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVLCS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVWEQ", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVWEQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVWNE", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVWNE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVWLT", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVWLT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVWGT", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVWGT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVWLE", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVWLE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVWGE", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVWGE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVWLS", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVWLS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVWHI", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVWHI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVWCC", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVWCC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVWCS", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVWCS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVQEQF", + argLen: 3, + resultInArg0: true, + needIntTemp: true, + asm: x86.ACMOVQNE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVQNEF", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVQNE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVQGTF", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVQHI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVQGEF", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVQCC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVLEQF", + argLen: 3, + resultInArg0: true, + needIntTemp: true, + asm: x86.ACMOVLNE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVLNEF", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVLNE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVLGTF", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVLHI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVLGEF", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVLCC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVWEQF", + argLen: 3, + resultInArg0: true, + needIntTemp: true, + asm: x86.ACMOVWNE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVWNEF", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVWNE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVWGTF", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVWHI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMOVWGEF", + argLen: 3, + resultInArg0: true, + asm: x86.ACMOVWCC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "BSWAPQ", + argLen: 1, + resultInArg0: true, + asm: x86.ABSWAPQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "BSWAPL", + argLen: 1, + resultInArg0: true, + asm: x86.ABSWAPL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "POPCNTQ", + argLen: 1, + clobberFlags: true, + asm: x86.APOPCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "POPCNTL", + argLen: 1, + clobberFlags: true, + asm: x86.APOPCNTL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SQRTSD", + argLen: 1, + asm: x86.ASQRTSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "SQRTSS", + argLen: 1, + asm: x86.ASQRTSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "ROUNDSD", + auxType: auxInt8, + argLen: 1, + asm: x86.AROUNDSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VFMADD231SD", + argLen: 3, + resultInArg0: true, + asm: x86.AVFMADD231SD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "MINSD", + argLen: 2, + resultInArg0: true, + asm: x86.AMINSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "MINSS", + argLen: 2, + resultInArg0: true, + asm: x86.AMINSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "SBBQcarrymask", + argLen: 1, + asm: x86.ASBBQ, + reg: regInfo{ + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SBBLcarrymask", + argLen: 1, + asm: x86.ASBBL, + reg: regInfo{ + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SETEQ", + argLen: 1, + asm: x86.ASETEQ, + reg: regInfo{ + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SETNE", + argLen: 1, + asm: x86.ASETNE, + reg: regInfo{ + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SETL", + argLen: 1, + asm: x86.ASETLT, + reg: regInfo{ + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SETLE", + argLen: 1, + asm: x86.ASETLE, + reg: regInfo{ + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SETG", + argLen: 1, + asm: x86.ASETGT, + reg: regInfo{ + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SETGE", + argLen: 1, + asm: x86.ASETGE, + reg: regInfo{ + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SETB", + argLen: 1, + asm: x86.ASETCS, + reg: regInfo{ + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SETBE", + argLen: 1, + asm: x86.ASETLS, + reg: regInfo{ + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SETA", + argLen: 1, + asm: x86.ASETHI, + reg: regInfo{ + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SETAE", + argLen: 1, + asm: x86.ASETCC, + reg: regInfo{ + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SETO", + argLen: 1, + asm: x86.ASETOS, + reg: regInfo{ + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SETEQstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETEQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SETNEstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETNE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SETLstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETLT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SETLEstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETLE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SETGstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETGT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SETGEstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETGE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SETBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETCS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SETBEstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETLS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SETAstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETHI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SETAEstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETCC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SETEQstoreidx1", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: x86.ASETEQ, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SETNEstoreidx1", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: x86.ASETNE, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SETLstoreidx1", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: x86.ASETLT, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SETLEstoreidx1", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: x86.ASETLE, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SETGstoreidx1", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: x86.ASETGT, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SETGEstoreidx1", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: x86.ASETGE, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SETBstoreidx1", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: x86.ASETCS, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SETBEstoreidx1", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: x86.ASETLS, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SETAstoreidx1", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: x86.ASETHI, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SETAEstoreidx1", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: x86.ASETCC, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SETEQF", + argLen: 1, + clobberFlags: true, + needIntTemp: true, + asm: x86.ASETEQ, + reg: regInfo{ + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SETNEF", + argLen: 1, + clobberFlags: true, + needIntTemp: true, + asm: x86.ASETNE, + reg: regInfo{ + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SETORD", + argLen: 1, + asm: x86.ASETPC, + reg: regInfo{ + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SETNAN", + argLen: 1, + asm: x86.ASETPS, + reg: regInfo{ + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SETGF", + argLen: 1, + asm: x86.ASETHI, + reg: regInfo{ + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SETGEF", + argLen: 1, + asm: x86.ASETCC, + reg: regInfo{ + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVBQSX", + argLen: 1, + asm: x86.AMOVBQSX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVBQZX", + argLen: 1, + asm: x86.AMOVBLZX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVWQSX", + argLen: 1, + asm: x86.AMOVWQSX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVWQZX", + argLen: 1, + asm: x86.AMOVWLZX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVLQSX", + argLen: 1, + asm: x86.AMOVLQSX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVLQZX", + argLen: 1, + asm: x86.AMOVL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVLconst", + auxType: auxInt32, + argLen: 0, + rematerializeable: true, + asm: x86.AMOVL, + reg: regInfo{ + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVQconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: x86.AMOVQ, + reg: regInfo{ + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CVTTSD2SL", + argLen: 1, + asm: x86.ACVTTSD2SL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CVTTSD2SQ", + argLen: 1, + asm: x86.ACVTTSD2SQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CVTTSS2SL", + argLen: 1, + asm: x86.ACVTTSS2SL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CVTTSS2SQ", + argLen: 1, + asm: x86.ACVTTSS2SQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CVTSL2SS", + argLen: 1, + asm: x86.ACVTSL2SS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "CVTSL2SD", + argLen: 1, + asm: x86.ACVTSL2SD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "CVTSQ2SS", + argLen: 1, + asm: x86.ACVTSQ2SS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "CVTSQ2SD", + argLen: 1, + asm: x86.ACVTSQ2SD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "CVTSD2SS", + argLen: 1, + asm: x86.ACVTSD2SS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "CVTSS2SD", + argLen: 1, + asm: x86.ACVTSS2SD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "MOVQi2f", + argLen: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "MOVQf2i", + argLen: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVLi2f", + argLen: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "MOVLf2i", + argLen: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "PXOR", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: x86.APXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "POR", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: x86.APOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "LEAQ", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: x86.ALEAQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "LEAL", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: x86.ALEAL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "LEAW", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: x86.ALEAW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "LEAQ1", + auxType: auxSymOff, + argLen: 2, + commutative: true, + symEffect: SymAddr, + asm: x86.ALEAQ, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "LEAL1", + auxType: auxSymOff, + argLen: 2, + commutative: true, + symEffect: SymAddr, + asm: x86.ALEAL, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "LEAW1", + auxType: auxSymOff, + argLen: 2, + commutative: true, + symEffect: SymAddr, + asm: x86.ALEAW, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "LEAQ2", + auxType: auxSymOff, + argLen: 2, + symEffect: SymAddr, + asm: x86.ALEAQ, + scale: 2, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "LEAL2", + auxType: auxSymOff, + argLen: 2, + symEffect: SymAddr, + asm: x86.ALEAL, + scale: 2, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "LEAW2", + auxType: auxSymOff, + argLen: 2, + symEffect: SymAddr, + asm: x86.ALEAW, + scale: 2, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "LEAQ4", + auxType: auxSymOff, + argLen: 2, + symEffect: SymAddr, + asm: x86.ALEAQ, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "LEAL4", + auxType: auxSymOff, + argLen: 2, + symEffect: SymAddr, + asm: x86.ALEAL, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "LEAW4", + auxType: auxSymOff, + argLen: 2, + symEffect: SymAddr, + asm: x86.ALEAW, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "LEAQ8", + auxType: auxSymOff, + argLen: 2, + symEffect: SymAddr, + asm: x86.ALEAQ, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "LEAL8", + auxType: auxSymOff, + argLen: 2, + symEffect: SymAddr, + asm: x86.ALEAL, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "LEAW8", + auxType: auxSymOff, + argLen: 2, + symEffect: SymAddr, + asm: x86.ALEAW, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AMOVBLZX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVBQSXload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AMOVBQSX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AMOVWLZX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVWQSXload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AMOVWQSX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVLload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AMOVL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVLQSXload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AMOVLQSX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVQload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AMOVQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVLstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AMOVL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVQstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AMOVQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVOload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AMOVUPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "MOVOstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AMOVUPS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB + }, + }, + }, + { + name: "MOVBloadidx1", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: x86.AMOVBLZX, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVWloadidx1", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: x86.AMOVWLZX, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVWloadidx2", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AMOVWLZX, + scale: 2, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVLloadidx1", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: x86.AMOVL, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVLloadidx4", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AMOVL, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVLloadidx8", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AMOVL, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVQloadidx1", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: x86.AMOVQ, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVQloadidx8", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AMOVQ, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVBstoreidx1", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: x86.AMOVB, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVWstoreidx1", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: x86.AMOVW, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVWstoreidx2", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: x86.AMOVW, + scale: 2, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVLstoreidx1", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: x86.AMOVL, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVLstoreidx4", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: x86.AMOVL, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVLstoreidx8", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: x86.AMOVL, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVQstoreidx1", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: x86.AMOVQ, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVQstoreidx8", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: x86.AMOVQ, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVBstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVWstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVLstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AMOVL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVQstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AMOVQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVOstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AMOVUPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVBstoreconstidx1", + auxType: auxSymValAndOff, + argLen: 3, + commutative: true, + symEffect: SymWrite, + asm: x86.AMOVB, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVWstoreconstidx1", + auxType: auxSymValAndOff, + argLen: 3, + commutative: true, + symEffect: SymWrite, + asm: x86.AMOVW, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVWstoreconstidx2", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymWrite, + asm: x86.AMOVW, + scale: 2, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVLstoreconstidx1", + auxType: auxSymValAndOff, + argLen: 3, + commutative: true, + symEffect: SymWrite, + asm: x86.AMOVL, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVLstoreconstidx4", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymWrite, + asm: x86.AMOVL, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVQstoreconstidx1", + auxType: auxSymValAndOff, + argLen: 3, + commutative: true, + symEffect: SymWrite, + asm: x86.AMOVQ, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVQstoreconstidx8", + auxType: auxSymValAndOff, + argLen: 3, + symEffect: SymWrite, + asm: x86.AMOVQ, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "DUFFZERO", + auxType: auxInt64, + argLen: 2, + faultOnNilArg0: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 128}, // DI + }, + clobbers: 128, // DI + }, + }, + { + name: "REPSTOSQ", + argLen: 4, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 128}, // DI + {1, 2}, // CX + {2, 1}, // AX + }, + clobbers: 130, // CX DI + }, + }, + { + name: "CALLstatic", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + reg: regInfo{ + clobbers: 2147483631, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 g R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + { + name: "CALLtail", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 2147483631, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 g R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + { + name: "CALLclosure", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 4}, // DX + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + clobbers: 2147483631, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 g R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + { + name: "CALLinter", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + clobbers: 2147483631, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 g R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + { + name: "DUFFCOPY", + auxType: auxInt64, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 128}, // DI + {1, 64}, // SI + }, + clobbers: 65728, // SI DI X0 + }, + }, + { + name: "REPMOVSQ", + argLen: 4, + faultOnNilArg0: true, + faultOnNilArg1: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 128}, // DI + {1, 64}, // SI + {2, 2}, // CX + }, + clobbers: 194, // CX SI DI + }, + }, + { + name: "InvertFlags", + argLen: 1, + reg: regInfo{}, + }, + { + name: "LoweredGetG", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 4}, // DX + }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "LoweredNilCheck", + argLen: 2, + clobberFlags: true, + nilCheck: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, + reg: regInfo{ + clobbers: 2147418112, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + outputs: []outputInfo{ + {0, 2048}, // R11 + }, + }, + }, + { + name: "LoweredHasCPUFeature", + auxType: auxSym, + argLen: 0, + rematerializeable: true, + symEffect: SymNone, + reg: regInfo{ + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4}, // DX + {1, 8}, // BX + }, + }, + }, + { + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // CX + {1, 4}, // DX + }, + }, + }, + { + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 2}, // CX + }, + }, + }, + { + name: "FlagEQ", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagLT_ULT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagLT_UGT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagGT_UGT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagGT_ULT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "MOVBatomicload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVLatomicload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AMOVL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVQatomicload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AMOVQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "XCHGB", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + faultOnNilArg1: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: x86.AXCHGB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "XCHGL", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + faultOnNilArg1: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: x86.AXCHGL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "XCHGQ", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + faultOnNilArg1: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: x86.AXCHGQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "XADDLlock", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: x86.AXADDL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "XADDQlock", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: x86.AXADDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "AddTupleFirst32", + argLen: 2, + reg: regInfo{}, + }, + { + name: "AddTupleFirst64", + argLen: 2, + reg: regInfo{}, + }, + { + name: "CMPXCHGLlock", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: x86.ACMPXCHGL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1}, // AX + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + clobbers: 1, // AX + outputs: []outputInfo{ + {1, 0}, + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "CMPXCHGQlock", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: x86.ACMPXCHGQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1}, // AX + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + clobbers: 1, // AX + outputs: []outputInfo{ + {1, 0}, + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ANDBlock", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: x86.AANDB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ANDLlock", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: x86.AANDL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ORBlock", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: x86.AORB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ORLlock", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: x86.AORL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "PrefetchT0", + argLen: 2, + hasSideEffects: true, + asm: x86.APREFETCHT0, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "PrefetchNTA", + argLen: 2, + hasSideEffects: true, + asm: x86.APREFETCHNTA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "ANDNQ", + argLen: 2, + clobberFlags: true, + asm: x86.AANDNQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "ANDNL", + argLen: 2, + clobberFlags: true, + asm: x86.AANDNL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "BLSIQ", + argLen: 1, + clobberFlags: true, + asm: x86.ABLSIQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "BLSIL", + argLen: 1, + clobberFlags: true, + asm: x86.ABLSIL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "BLSMSKQ", + argLen: 1, + clobberFlags: true, + asm: x86.ABLSMSKQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "BLSMSKL", + argLen: 1, + clobberFlags: true, + asm: x86.ABLSMSKL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "BLSRQ", + argLen: 1, + asm: x86.ABLSRQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "BLSRL", + argLen: 1, + asm: x86.ABLSRL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "TZCNTQ", + argLen: 1, + clobberFlags: true, + asm: x86.ATZCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "TZCNTL", + argLen: 1, + clobberFlags: true, + asm: x86.ATZCNTL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "LZCNTQ", + argLen: 1, + clobberFlags: true, + asm: x86.ALZCNTQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "LZCNTL", + argLen: 1, + clobberFlags: true, + asm: x86.ALZCNTL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVBEWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AMOVBEW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVBELload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AMOVBEL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVBELstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AMOVBEL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVBEQload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.AMOVBEQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVBEQstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AMOVBEQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVBELloadidx1", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: x86.AMOVBEL, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVBELloadidx4", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AMOVBEL, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVBELloadidx8", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AMOVBEL, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVBEQloadidx1", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: x86.AMOVBEQ, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVBEQloadidx8", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AMOVBEQ, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "MOVBEWstoreidx1", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: x86.AMOVBEW, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVBEWstoreidx2", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: x86.AMOVBEW, + scale: 2, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVBELstoreidx1", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: x86.AMOVBEL, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVBELstoreidx4", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: x86.AMOVBEL, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVBELstoreidx8", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: x86.AMOVBEL, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVBEQstoreidx1", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: x86.AMOVBEQ, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "MOVBEQstoreidx8", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: x86.AMOVBEQ, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + }, + }, + { + name: "SARXQ", + argLen: 2, + asm: x86.ASARXQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SARXL", + argLen: 2, + asm: x86.ASARXL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHLXQ", + argLen: 2, + asm: x86.ASHLXQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHLXL", + argLen: 2, + asm: x86.ASHLXL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHRXQ", + argLen: 2, + asm: x86.ASHRXQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHRXL", + argLen: 2, + asm: x86.ASHRXL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SARXLload", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ASARXL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SARXQload", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ASARXQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHLXLload", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ASHLXL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHLXQload", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ASHLXQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHRXLload", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ASHRXL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHRXQload", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ASHRXQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SARXLloadidx1", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ASARXL, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SARXLloadidx4", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ASARXL, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SARXLloadidx8", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ASARXL, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SARXQloadidx1", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ASARXQ, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SARXQloadidx8", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ASARXQ, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHLXLloadidx1", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ASHLXL, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHLXLloadidx4", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ASHLXL, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHLXLloadidx8", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ASHLXL, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHLXQloadidx1", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ASHLXQ, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHLXQloadidx8", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ASHLXQ, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHRXLloadidx1", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ASHRXL, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHRXLloadidx4", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ASHRXL, + scale: 4, + reg: regInfo{ + inputs: []inputInfo{ + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHRXLloadidx8", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ASHRXL, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHRXQloadidx1", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ASHRXQ, + scale: 1, + reg: regInfo{ + inputs: []inputInfo{ + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + { + name: "SHRXQloadidx8", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ASHRXQ, + scale: 8, + reg: regInfo{ + inputs: []inputInfo{ + {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB + }, + outputs: []outputInfo{ + {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + }, + }, + }, + + { + name: "ADD", + argLen: 2, + commutative: true, + asm: arm.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ADDconst", + auxType: auxInt32, + argLen: 1, + asm: arm.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 30719}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SUB", + argLen: 2, + asm: arm.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SUBconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "RSB", + argLen: 2, + asm: arm.ARSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "RSBconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ARSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MUL", + argLen: 2, + commutative: true, + asm: arm.AMUL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "HMUL", + argLen: 2, + commutative: true, + asm: arm.AMULL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "HMULU", + argLen: 2, + commutative: true, + asm: arm.AMULLU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "CALLudiv", + argLen: 2, + clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 1}, // R0 + }, + clobbers: 20492, // R2 R3 R12 R14 + outputs: []outputInfo{ + {0, 1}, // R0 + {1, 2}, // R1 + }, + }, + }, + { + name: "ADDS", + argLen: 2, + commutative: true, + asm: arm.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ADDSconst", + auxType: auxInt32, + argLen: 1, + asm: arm.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ADC", + argLen: 3, + commutative: true, + asm: arm.AADC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ADCconst", + auxType: auxInt32, + argLen: 2, + asm: arm.AADC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SUBS", + argLen: 2, + asm: arm.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SUBSconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "RSBSconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ARSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SBC", + argLen: 3, + asm: arm.ASBC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SBCconst", + auxType: auxInt32, + argLen: 2, + asm: arm.ASBC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "RSCconst", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MULLU", + argLen: 2, + commutative: true, + asm: arm.AMULLU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MULA", + argLen: 3, + asm: arm.AMULA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MULS", + argLen: 3, + asm: arm.AMULS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ADDF", + argLen: 2, + commutative: true, + asm: arm.AADDF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "ADDD", + argLen: 2, + commutative: true, + asm: arm.AADDD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "SUBF", + argLen: 2, + asm: arm.ASUBF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "SUBD", + argLen: 2, + asm: arm.ASUBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "MULF", + argLen: 2, + commutative: true, + asm: arm.AMULF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "MULD", + argLen: 2, + commutative: true, + asm: arm.AMULD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "NMULF", + argLen: 2, + commutative: true, + asm: arm.ANMULF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "NMULD", + argLen: 2, + commutative: true, + asm: arm.ANMULD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "DIVF", + argLen: 2, + asm: arm.ADIVF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "DIVD", + argLen: 2, + asm: arm.ADIVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "MULAF", + argLen: 3, + resultInArg0: true, + asm: arm.AMULAF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "MULAD", + argLen: 3, + resultInArg0: true, + asm: arm.AMULAD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "MULSF", + argLen: 3, + resultInArg0: true, + asm: arm.AMULSF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "MULSD", + argLen: 3, + resultInArg0: true, + asm: arm.AMULSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMULAD", + argLen: 3, + resultInArg0: true, + asm: arm.AFMULAD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "AND", + argLen: 2, + commutative: true, + asm: arm.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ANDconst", + auxType: auxInt32, + argLen: 1, + asm: arm.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "OR", + argLen: 2, + commutative: true, + asm: arm.AORR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ORconst", + auxType: auxInt32, + argLen: 1, + asm: arm.AORR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "XOR", + argLen: 2, + commutative: true, + asm: arm.AEOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "XORconst", + auxType: auxInt32, + argLen: 1, + asm: arm.AEOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "BIC", + argLen: 2, + asm: arm.ABIC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "BICconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ABIC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "BFX", + auxType: auxInt32, + argLen: 1, + asm: arm.ABFX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "BFXU", + auxType: auxInt32, + argLen: 1, + asm: arm.ABFXU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MVN", + argLen: 1, + asm: arm.AMVN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "NEGF", + argLen: 1, + asm: arm.ANEGF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "NEGD", + argLen: 1, + asm: arm.ANEGD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "SQRTD", + argLen: 1, + asm: arm.ASQRTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "SQRTF", + argLen: 1, + asm: arm.ASQRTF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "ABSD", + argLen: 1, + asm: arm.AABSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CLZ", + argLen: 1, + asm: arm.ACLZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "REV", + argLen: 1, + asm: arm.AREV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "REV16", + argLen: 1, + asm: arm.AREV16, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "RBIT", + argLen: 1, + asm: arm.ARBIT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SLL", + argLen: 2, + asm: arm.ASLL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SLLconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ASLL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SRL", + argLen: 2, + asm: arm.ASRL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SRLconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ASRL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SRA", + argLen: 2, + asm: arm.ASRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SRAconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ASRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SRR", + argLen: 2, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SRRconst", + auxType: auxInt32, + argLen: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ADDshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ADDshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ADDshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SUBshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SUBshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SUBshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "RSBshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "RSBshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "RSBshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ANDshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ANDshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ANDshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ORshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.AORR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ORshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.AORR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ORshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.AORR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "XORshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.AEOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "XORshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.AEOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "XORshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.AEOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "XORshiftRR", + auxType: auxInt32, + argLen: 2, + asm: arm.AEOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "BICshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ABIC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "BICshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ABIC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "BICshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ABIC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MVNshiftLL", + auxType: auxInt32, + argLen: 1, + asm: arm.AMVN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MVNshiftRL", + auxType: auxInt32, + argLen: 1, + asm: arm.AMVN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MVNshiftRA", + auxType: auxInt32, + argLen: 1, + asm: arm.AMVN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ADCshiftLL", + auxType: auxInt32, + argLen: 3, + asm: arm.AADC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ADCshiftRL", + auxType: auxInt32, + argLen: 3, + asm: arm.AADC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ADCshiftRA", + auxType: auxInt32, + argLen: 3, + asm: arm.AADC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SBCshiftLL", + auxType: auxInt32, + argLen: 3, + asm: arm.ASBC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SBCshiftRL", + auxType: auxInt32, + argLen: 3, + asm: arm.ASBC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SBCshiftRA", + auxType: auxInt32, + argLen: 3, + asm: arm.ASBC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "RSCshiftLL", + auxType: auxInt32, + argLen: 3, + asm: arm.ARSC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "RSCshiftRL", + auxType: auxInt32, + argLen: 3, + asm: arm.ARSC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "RSCshiftRA", + auxType: auxInt32, + argLen: 3, + asm: arm.ARSC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ADDSshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ADDSshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ADDSshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SUBSshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SUBSshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SUBSshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "RSBSshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "RSBSshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "RSBSshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ADDshiftLLreg", + argLen: 3, + asm: arm.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ADDshiftRLreg", + argLen: 3, + asm: arm.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ADDshiftRAreg", + argLen: 3, + asm: arm.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SUBshiftLLreg", + argLen: 3, + asm: arm.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SUBshiftRLreg", + argLen: 3, + asm: arm.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SUBshiftRAreg", + argLen: 3, + asm: arm.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "RSBshiftLLreg", + argLen: 3, + asm: arm.ARSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "RSBshiftRLreg", + argLen: 3, + asm: arm.ARSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "RSBshiftRAreg", + argLen: 3, + asm: arm.ARSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ANDshiftLLreg", + argLen: 3, + asm: arm.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ANDshiftRLreg", + argLen: 3, + asm: arm.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ANDshiftRAreg", + argLen: 3, + asm: arm.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ORshiftLLreg", + argLen: 3, + asm: arm.AORR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ORshiftRLreg", + argLen: 3, + asm: arm.AORR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ORshiftRAreg", + argLen: 3, + asm: arm.AORR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "XORshiftLLreg", + argLen: 3, + asm: arm.AEOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "XORshiftRLreg", + argLen: 3, + asm: arm.AEOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "XORshiftRAreg", + argLen: 3, + asm: arm.AEOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "BICshiftLLreg", + argLen: 3, + asm: arm.ABIC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "BICshiftRLreg", + argLen: 3, + asm: arm.ABIC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "BICshiftRAreg", + argLen: 3, + asm: arm.ABIC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MVNshiftLLreg", + argLen: 2, + asm: arm.AMVN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MVNshiftRLreg", + argLen: 2, + asm: arm.AMVN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MVNshiftRAreg", + argLen: 2, + asm: arm.AMVN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ADCshiftLLreg", + argLen: 4, + asm: arm.AADC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ADCshiftRLreg", + argLen: 4, + asm: arm.AADC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ADCshiftRAreg", + argLen: 4, + asm: arm.AADC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SBCshiftLLreg", + argLen: 4, + asm: arm.ASBC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SBCshiftRLreg", + argLen: 4, + asm: arm.ASBC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SBCshiftRAreg", + argLen: 4, + asm: arm.ASBC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "RSCshiftLLreg", + argLen: 4, + asm: arm.ARSC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "RSCshiftRLreg", + argLen: 4, + asm: arm.ARSC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "RSCshiftRAreg", + argLen: 4, + asm: arm.ARSC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ADDSshiftLLreg", + argLen: 3, + asm: arm.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ADDSshiftRLreg", + argLen: 3, + asm: arm.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "ADDSshiftRAreg", + argLen: 3, + asm: arm.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SUBSshiftLLreg", + argLen: 3, + asm: arm.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SUBSshiftRLreg", + argLen: 3, + asm: arm.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SUBSshiftRAreg", + argLen: 3, + asm: arm.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "RSBSshiftLLreg", + argLen: 3, + asm: arm.ARSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "RSBSshiftRLreg", + argLen: 3, + asm: arm.ARSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "RSBSshiftRAreg", + argLen: 3, + asm: arm.ARSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "CMP", + argLen: 2, + asm: arm.ACMP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "CMPconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ACMP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "CMN", + argLen: 2, + commutative: true, + asm: arm.ACMN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "CMNconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ACMN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "TST", + argLen: 2, + commutative: true, + asm: arm.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "TSTconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "TEQ", + argLen: 2, + commutative: true, + asm: arm.ATEQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "TEQconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ATEQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "CMPF", + argLen: 2, + asm: arm.ACMPF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CMPD", + argLen: 2, + asm: arm.ACMPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CMPshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "CMPshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "CMPshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "CMNshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "CMNshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "CMNshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "TSTshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "TSTshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "TSTshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "TEQshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ATEQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "TEQshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ATEQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "TEQshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ATEQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "CMPshiftLLreg", + argLen: 3, + asm: arm.ACMP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "CMPshiftRLreg", + argLen: 3, + asm: arm.ACMP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "CMPshiftRAreg", + argLen: 3, + asm: arm.ACMP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "CMNshiftLLreg", + argLen: 3, + asm: arm.ACMN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "CMNshiftRLreg", + argLen: 3, + asm: arm.ACMN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "CMNshiftRAreg", + argLen: 3, + asm: arm.ACMN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "TSTshiftLLreg", + argLen: 3, + asm: arm.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "TSTshiftRLreg", + argLen: 3, + asm: arm.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "TSTshiftRAreg", + argLen: 3, + asm: arm.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "TEQshiftLLreg", + argLen: 3, + asm: arm.ATEQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "TEQshiftRLreg", + argLen: 3, + asm: arm.ATEQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "TEQshiftRAreg", + argLen: 3, + asm: arm.ATEQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "CMPF0", + argLen: 1, + asm: arm.ACMPF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CMPD0", + argLen: 1, + asm: arm.ACMPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "MOVWconst", + auxType: auxInt32, + argLen: 0, + rematerializeable: true, + asm: arm.AMOVW, + reg: regInfo{ + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MOVFconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: arm.AMOVF, + reg: regInfo{ + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "MOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: arm.AMOVD, + reg: regInfo{ + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "MOVWaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: arm.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294975488}, // SP SB + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MOVBUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVBU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MOVHUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVHU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MOVFload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + }, + }, + { + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + }, + }, + { + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + }, + }, + { + name: "MOVFstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm.AMOVF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "MOVWloadidx", + argLen: 3, + asm: arm.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MOVWloadshiftLL", + auxType: auxInt32, + argLen: 3, + asm: arm.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MOVWloadshiftRL", + auxType: auxInt32, + argLen: 3, + asm: arm.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MOVWloadshiftRA", + auxType: auxInt32, + argLen: 3, + asm: arm.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MOVBUloadidx", + argLen: 3, + asm: arm.AMOVBU, + reg: regInfo{ + inputs: []inputInfo{ + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MOVBloadidx", + argLen: 3, + asm: arm.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MOVHUloadidx", + argLen: 3, + asm: arm.AMOVHU, + reg: regInfo{ + inputs: []inputInfo{ + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MOVHloadidx", + argLen: 3, + asm: arm.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MOVWstoreidx", + argLen: 4, + asm: arm.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + }, + }, + { + name: "MOVWstoreshiftLL", + auxType: auxInt32, + argLen: 4, + asm: arm.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + }, + }, + { + name: "MOVWstoreshiftRL", + auxType: auxInt32, + argLen: 4, + asm: arm.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + }, + }, + { + name: "MOVWstoreshiftRA", + auxType: auxInt32, + argLen: 4, + asm: arm.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + }, + }, + { + name: "MOVBstoreidx", + argLen: 4, + asm: arm.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + }, + }, + { + name: "MOVHstoreidx", + argLen: 4, + asm: arm.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + }, + }, + { + name: "MOVBreg", + argLen: 1, + asm: arm.AMOVBS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MOVBUreg", + argLen: 1, + asm: arm.AMOVBU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MOVHreg", + argLen: 1, + asm: arm.AMOVHS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MOVHUreg", + argLen: 1, + asm: arm.AMOVHU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MOVWreg", + argLen: 1, + asm: arm.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MOVWnop", + argLen: 1, + resultInArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MOVWF", + argLen: 1, + asm: arm.AMOVWF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2147483648, // F15 + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "MOVWD", + argLen: 1, + asm: arm.AMOVWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2147483648, // F15 + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "MOVWUF", + argLen: 1, + asm: arm.AMOVWF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2147483648, // F15 + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "MOVWUD", + argLen: 1, + asm: arm.AMOVWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2147483648, // F15 + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "MOVFW", + argLen: 1, + asm: arm.AMOVFW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + clobbers: 2147483648, // F15 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MOVDW", + argLen: 1, + asm: arm.AMOVDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + clobbers: 2147483648, // F15 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MOVFWU", + argLen: 1, + asm: arm.AMOVFW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + clobbers: 2147483648, // F15 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MOVDWU", + argLen: 1, + asm: arm.AMOVDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + clobbers: 2147483648, // F15 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MOVFD", + argLen: 1, + asm: arm.AMOVFD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "MOVDF", + argLen: 1, + asm: arm.AMOVDF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CMOVWHSconst", + auxType: auxInt32, + argLen: 2, + resultInArg0: true, + asm: arm.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "CMOVWLSconst", + auxType: auxInt32, + argLen: 2, + resultInArg0: true, + asm: arm.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "SRAcond", + argLen: 3, + asm: arm.ASRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "CALLstatic", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + reg: regInfo{ + clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + { + name: "CALLtail", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + { + name: "CALLclosure", + auxType: auxCallOff, + argLen: 3, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 128}, // R7 + {0, 29695}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP R14 + }, + clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + { + name: "CALLinter", + auxType: auxCallOff, + argLen: 2, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + { + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "Equal", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "NotEqual", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "LessThan", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "LessEqual", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "GreaterThan", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "GreaterEqual", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "LessThanU", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "LessEqualU", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "GreaterThanU", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "GreaterEqualU", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "DUFFZERO", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 1}, // R0 + }, + clobbers: 20482, // R1 R12 R14 + }, + }, + { + name: "DUFFCOPY", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + faultOnNilArg1: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4}, // R2 + {1, 2}, // R1 + }, + clobbers: 20487, // R0 R1 R2 R12 R14 + }, + }, + { + name: "LoweredZero", + auxType: auxInt64, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2, // R1 + }, + }, + { + name: "LoweredMove", + auxType: auxInt64, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4}, // R2 + {1, 2}, // R1 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 6, // R1 R2 + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 128}, // R7 + }, + }, + }, + { + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4}, // R2 + {1, 8}, // R3 + }, + }, + }, + { + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 4}, // R2 + }, + }, + }, + { + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // R0 + {1, 2}, // R1 + }, + }, + }, + { + name: "LoweredPanicExtendA", + auxType: auxInt64, + argLen: 4, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 16}, // R4 + {1, 4}, // R2 + {2, 8}, // R3 + }, + }, + }, + { + name: "LoweredPanicExtendB", + auxType: auxInt64, + argLen: 4, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 16}, // R4 + {1, 2}, // R1 + {2, 4}, // R2 + }, + }, + }, + { + name: "LoweredPanicExtendC", + auxType: auxInt64, + argLen: 4, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 16}, // R4 + {1, 1}, // R0 + {2, 2}, // R1 + }, + }, + }, + { + name: "FlagConstant", + auxType: auxFlagConstant, + argLen: 0, + reg: regInfo{}, + }, + { + name: "InvertFlags", + argLen: 1, + reg: regInfo{}, + }, + { + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, + reg: regInfo{ + clobbers: 4294922240, // R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + outputs: []outputInfo{ + {0, 256}, // R8 + }, + }, + }, + + { + name: "ADCSflags", + argLen: 3, + commutative: true, + asm: arm64.AADCS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "ADCzerocarry", + argLen: 1, + asm: arm64.AADC, + reg: regInfo{ + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "ADD", + argLen: 2, + commutative: true, + asm: arm64.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "ADDconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1878786047}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "ADDSconstflags", + auxType: auxInt64, + argLen: 1, + asm: arm64.AADDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "ADDSflags", + argLen: 2, + commutative: true, + asm: arm64.AADDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "SUB", + argLen: 2, + asm: arm64.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "SUBconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "SBCSflags", + argLen: 3, + asm: arm64.ASBCS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "SUBSflags", + argLen: 2, + asm: arm64.ASUBS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MUL", + argLen: 2, + commutative: true, + asm: arm64.AMUL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MULW", + argLen: 2, + commutative: true, + asm: arm64.AMULW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MNEG", + argLen: 2, + commutative: true, + asm: arm64.AMNEG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MNEGW", + argLen: 2, + commutative: true, + asm: arm64.AMNEGW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MULH", + argLen: 2, + commutative: true, + asm: arm64.ASMULH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "UMULH", + argLen: 2, + commutative: true, + asm: arm64.AUMULH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MULL", + argLen: 2, + commutative: true, + asm: arm64.ASMULL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "UMULL", + argLen: 2, + commutative: true, + asm: arm64.AUMULL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "DIV", + argLen: 2, + asm: arm64.ASDIV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "UDIV", + argLen: 2, + asm: arm64.AUDIV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "DIVW", + argLen: 2, + asm: arm64.ASDIVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "UDIVW", + argLen: 2, + asm: arm64.AUDIVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MOD", + argLen: 2, + asm: arm64.AREM, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "UMOD", + argLen: 2, + asm: arm64.AUREM, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MODW", + argLen: 2, + asm: arm64.AREMW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "UMODW", + argLen: 2, + asm: arm64.AUREMW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "FADDS", + argLen: 2, + commutative: true, + asm: arm64.AFADDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FADDD", + argLen: 2, + commutative: true, + asm: arm64.AFADDD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FSUBS", + argLen: 2, + asm: arm64.AFSUBS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FSUBD", + argLen: 2, + asm: arm64.AFSUBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMULS", + argLen: 2, + commutative: true, + asm: arm64.AFMULS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMULD", + argLen: 2, + commutative: true, + asm: arm64.AFMULD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FNMULS", + argLen: 2, + commutative: true, + asm: arm64.AFNMULS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FNMULD", + argLen: 2, + commutative: true, + asm: arm64.AFNMULD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FDIVS", + argLen: 2, + asm: arm64.AFDIVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FDIVD", + argLen: 2, + asm: arm64.AFDIVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "AND", + argLen: 2, + commutative: true, + asm: arm64.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "ANDconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "OR", + argLen: 2, + commutative: true, + asm: arm64.AORR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "ORconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.AORR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "XOR", + argLen: 2, + commutative: true, + asm: arm64.AEOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "XORconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.AEOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "BIC", + argLen: 2, + asm: arm64.ABIC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "EON", + argLen: 2, + asm: arm64.AEON, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "ORN", + argLen: 2, + asm: arm64.AORN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MVN", + argLen: 1, + asm: arm64.AMVN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "NEG", + argLen: 1, + asm: arm64.ANEG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "NEGSflags", + argLen: 1, + asm: arm64.ANEGS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "NGCzerocarry", + argLen: 1, + asm: arm64.ANGC, + reg: regInfo{ + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "FABSD", + argLen: 1, + asm: arm64.AFABSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FNEGS", + argLen: 1, + asm: arm64.AFNEGS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FNEGD", + argLen: 1, + asm: arm64.AFNEGD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FSQRTD", + argLen: 1, + asm: arm64.AFSQRTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FSQRTS", + argLen: 1, + asm: arm64.AFSQRTS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMIND", + argLen: 2, + asm: arm64.AFMIND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMINS", + argLen: 2, + asm: arm64.AFMINS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMAXD", + argLen: 2, + asm: arm64.AFMAXD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMAXS", + argLen: 2, + asm: arm64.AFMAXS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "REV", + argLen: 1, + asm: arm64.AREV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "REVW", + argLen: 1, + asm: arm64.AREVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "REV16", + argLen: 1, + asm: arm64.AREV16, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "REV16W", + argLen: 1, + asm: arm64.AREV16W, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "RBIT", + argLen: 1, + asm: arm64.ARBIT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "RBITW", + argLen: 1, + asm: arm64.ARBITW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "CLZ", + argLen: 1, + asm: arm64.ACLZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "CLZW", + argLen: 1, + asm: arm64.ACLZW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "VCNT", + argLen: 1, + asm: arm64.AVCNT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "VUADDLV", + argLen: 1, + asm: arm64.AVUADDLV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "LoweredRound32F", + argLen: 1, + resultInArg0: true, + zeroWidth: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "LoweredRound64F", + argLen: 1, + resultInArg0: true, + zeroWidth: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMADDS", + argLen: 3, + asm: arm64.AFMADDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMADDD", + argLen: 3, + asm: arm64.AFMADDD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FNMADDS", + argLen: 3, + asm: arm64.AFNMADDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FNMADDD", + argLen: 3, + asm: arm64.AFNMADDD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMSUBS", + argLen: 3, + asm: arm64.AFMSUBS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMSUBD", + argLen: 3, + asm: arm64.AFMSUBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FNMSUBS", + argLen: 3, + asm: arm64.AFNMSUBS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FNMSUBD", + argLen: 3, + asm: arm64.AFNMSUBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MADD", + argLen: 3, + asm: arm64.AMADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MADDW", + argLen: 3, + asm: arm64.AMADDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MSUB", + argLen: 3, + asm: arm64.AMSUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MSUBW", + argLen: 3, + asm: arm64.AMSUBW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "SLL", + argLen: 2, + asm: arm64.ALSL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "SLLconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ALSL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "SRL", + argLen: 2, + asm: arm64.ALSR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "SRLconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ALSR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "SRA", + argLen: 2, + asm: arm64.AASR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "SRAconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.AASR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "ROR", + argLen: 2, + asm: arm64.AROR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "RORW", + argLen: 2, + asm: arm64.ARORW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "RORconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.AROR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "RORWconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ARORW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "EXTRconst", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEXTR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "EXTRWconst", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEXTRW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "CMP", + argLen: 2, + asm: arm64.ACMP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "CMPconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ACMP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "CMPW", + argLen: 2, + asm: arm64.ACMPW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "CMPWconst", + auxType: auxInt32, + argLen: 1, + asm: arm64.ACMPW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "CMN", + argLen: 2, + commutative: true, + asm: arm64.ACMN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "CMNconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ACMN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "CMNW", + argLen: 2, + commutative: true, + asm: arm64.ACMNW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "CMNWconst", + auxType: auxInt32, + argLen: 1, + asm: arm64.ACMNW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "TST", + argLen: 2, + commutative: true, + asm: arm64.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "TSTconst", + auxType: auxInt64, + argLen: 1, + asm: arm64.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "TSTW", + argLen: 2, + commutative: true, + asm: arm64.ATSTW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "TSTWconst", + auxType: auxInt32, + argLen: 1, + asm: arm64.ATSTW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "FCMPS", + argLen: 2, + asm: arm64.AFCMPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCMPD", + argLen: 2, + asm: arm64.AFCMPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCMPS0", + argLen: 1, + asm: arm64.AFCMPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCMPD0", + argLen: 1, + asm: arm64.AFCMPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MVNshiftLL", + auxType: auxInt64, + argLen: 1, + asm: arm64.AMVN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MVNshiftRL", + auxType: auxInt64, + argLen: 1, + asm: arm64.AMVN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MVNshiftRA", + auxType: auxInt64, + argLen: 1, + asm: arm64.AMVN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MVNshiftRO", + auxType: auxInt64, + argLen: 1, + asm: arm64.AMVN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "NEGshiftLL", + auxType: auxInt64, + argLen: 1, + asm: arm64.ANEG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "NEGshiftRL", + auxType: auxInt64, + argLen: 1, + asm: arm64.ANEG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "NEGshiftRA", + auxType: auxInt64, + argLen: 1, + asm: arm64.ANEG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "ADDshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "ADDshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "ADDshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "SUBshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "SUBshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "SUBshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "ANDshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "ANDshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "ANDshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "ANDshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "ORshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "ORshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "ORshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "ORshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "XORshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "XORshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "XORshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "XORshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "BICshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ABIC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "BICshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ABIC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "BICshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.ABIC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "BICshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.ABIC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "EONshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEON, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "EONshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEON, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "EONshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEON, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "EONshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.AEON, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "ORNshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "ORNshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "ORNshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "ORNshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.AORN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "CMPshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "CMPshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "CMPshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "CMNshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "CMNshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "CMNshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "TSTshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "TSTshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "TSTshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "TSTshiftRO", + auxType: auxInt64, + argLen: 2, + asm: arm64.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "BFI", + auxType: auxARM64BitField, + argLen: 2, + resultInArg0: true, + asm: arm64.ABFI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "BFXIL", + auxType: auxARM64BitField, + argLen: 2, + resultInArg0: true, + asm: arm64.ABFXIL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "SBFIZ", + auxType: auxARM64BitField, + argLen: 1, + asm: arm64.ASBFIZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "SBFX", + auxType: auxARM64BitField, + argLen: 1, + asm: arm64.ASBFX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "UBFIZ", + auxType: auxARM64BitField, + argLen: 1, + asm: arm64.AUBFIZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "UBFX", + auxType: auxARM64BitField, + argLen: 1, + asm: arm64.AUBFX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MOVDconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: arm64.AMOVD, + reg: regInfo{ + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "FMOVSconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: arm64.AFMOVS, + reg: regInfo{ + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: arm64.AFMOVD, + reg: regInfo{ + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVDaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: arm64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037928517632}, // SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MOVBUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVBU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MOVHUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVHU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MOVWUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LDP", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.ALDP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "FMOVSload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: arm64.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVDloadidx", + argLen: 3, + asm: arm64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MOVWloadidx", + argLen: 3, + asm: arm64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MOVWUloadidx", + argLen: 3, + asm: arm64.AMOVWU, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MOVHloadidx", + argLen: 3, + asm: arm64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MOVHUloadidx", + argLen: 3, + asm: arm64.AMOVHU, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MOVBloadidx", + argLen: 3, + asm: arm64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MOVBUloadidx", + argLen: 3, + asm: arm64.AMOVBU, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "FMOVSloadidx", + argLen: 3, + asm: arm64.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMOVDloadidx", + argLen: 3, + asm: arm64.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVHloadidx2", + argLen: 3, + asm: arm64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MOVHUloadidx2", + argLen: 3, + asm: arm64.AMOVHU, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MOVWloadidx4", + argLen: 3, + asm: arm64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MOVWUloadidx4", + argLen: 3, + asm: arm64.AMOVWU, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MOVDloadidx8", + argLen: 3, + asm: arm64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "FMOVSloadidx4", + argLen: 3, + asm: arm64.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMOVDloadidx8", + argLen: 3, + asm: arm64.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "STP", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.ASTP, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "FMOVSstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVBstoreidx", + argLen: 4, + asm: arm64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "MOVHstoreidx", + argLen: 4, + asm: arm64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "MOVWstoreidx", + argLen: 4, + asm: arm64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "MOVDstoreidx", + argLen: 4, + asm: arm64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "FMOVSstoreidx", + argLen: 4, + asm: arm64.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMOVDstoreidx", + argLen: 4, + asm: arm64.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVHstoreidx2", + argLen: 4, + asm: arm64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "MOVWstoreidx4", + argLen: 4, + asm: arm64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "MOVDstoreidx8", + argLen: 4, + asm: arm64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "FMOVSstoreidx4", + argLen: 4, + asm: arm64.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMOVDstoreidx8", + argLen: 4, + asm: arm64.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVBstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "MOVHstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "MOVWstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "MOVDstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "MOVQstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.ASTP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "MOVBstorezeroidx", + argLen: 3, + asm: arm64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "MOVHstorezeroidx", + argLen: 3, + asm: arm64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "MOVWstorezeroidx", + argLen: 3, + asm: arm64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "MOVDstorezeroidx", + argLen: 3, + asm: arm64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "MOVHstorezeroidx2", + argLen: 3, + asm: arm64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "MOVWstorezeroidx4", + argLen: 3, + asm: arm64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "MOVDstorezeroidx8", + argLen: 3, + asm: arm64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "FMOVDgpfp", + argLen: 1, + asm: arm64.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMOVDfpgp", + argLen: 1, + asm: arm64.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "FMOVSgpfp", + argLen: 1, + asm: arm64.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMOVSfpgp", + argLen: 1, + asm: arm64.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MOVBreg", + argLen: 1, + asm: arm64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MOVBUreg", + argLen: 1, + asm: arm64.AMOVBU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MOVHreg", + argLen: 1, + asm: arm64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MOVHUreg", + argLen: 1, + asm: arm64.AMOVHU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MOVWreg", + argLen: 1, + asm: arm64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MOVWUreg", + argLen: 1, + asm: arm64.AMOVWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MOVDreg", + argLen: 1, + asm: arm64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MOVDnop", + argLen: 1, + resultInArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "SCVTFWS", + argLen: 1, + asm: arm64.ASCVTFWS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "SCVTFWD", + argLen: 1, + asm: arm64.ASCVTFWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "UCVTFWS", + argLen: 1, + asm: arm64.AUCVTFWS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "UCVTFWD", + argLen: 1, + asm: arm64.AUCVTFWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "SCVTFS", + argLen: 1, + asm: arm64.ASCVTFS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "SCVTFD", + argLen: 1, + asm: arm64.ASCVTFD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "UCVTFS", + argLen: 1, + asm: arm64.AUCVTFS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "UCVTFD", + argLen: 1, + asm: arm64.AUCVTFD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTZSSW", + argLen: 1, + asm: arm64.AFCVTZSSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "FCVTZSDW", + argLen: 1, + asm: arm64.AFCVTZSDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "FCVTZUSW", + argLen: 1, + asm: arm64.AFCVTZUSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "FCVTZUDW", + argLen: 1, + asm: arm64.AFCVTZUDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "FCVTZSS", + argLen: 1, + asm: arm64.AFCVTZSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "FCVTZSD", + argLen: 1, + asm: arm64.AFCVTZSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "FCVTZUS", + argLen: 1, + asm: arm64.AFCVTZUS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "FCVTZUD", + argLen: 1, + asm: arm64.AFCVTZUD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "FCVTSD", + argLen: 1, + asm: arm64.AFCVTSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTDS", + argLen: 1, + asm: arm64.AFCVTDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FRINTAD", + argLen: 1, + asm: arm64.AFRINTAD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FRINTMD", + argLen: 1, + asm: arm64.AFRINTMD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FRINTND", + argLen: 1, + asm: arm64.AFRINTND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FRINTPD", + argLen: 1, + asm: arm64.AFRINTPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FRINTZD", + argLen: 1, + asm: arm64.AFRINTZD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CSEL", + auxType: auxCCop, + argLen: 3, + asm: arm64.ACSEL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "CSEL0", + auxType: auxCCop, + argLen: 2, + asm: arm64.ACSEL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "CSINC", + auxType: auxCCop, + argLen: 3, + asm: arm64.ACSINC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "CSINV", + auxType: auxCCop, + argLen: 3, + asm: arm64.ACSINV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "CSNEG", + auxType: auxCCop, + argLen: 3, + asm: arm64.ACSNEG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "CSETM", + auxType: auxCCop, + argLen: 1, + asm: arm64.ACSETM, + reg: regInfo{ + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "CALLstatic", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + reg: regInfo{ + clobbers: 9223372035512336383, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "CALLtail", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 9223372035512336383, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "CALLclosure", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 67108864}, // R26 + {0, 1744568319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 SP + }, + clobbers: 9223372035512336383, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "CALLinter", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + clobbers: 9223372035512336383, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "Equal", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "NotEqual", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LessThan", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LessEqual", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "GreaterThan", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "GreaterEqual", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LessThanU", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LessEqualU", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "GreaterThanU", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "GreaterEqualU", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LessThanF", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LessEqualF", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "GreaterThanF", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "GreaterEqualF", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "NotLessThanF", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "NotLessEqualF", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "NotGreaterThanF", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "NotGreaterEqualF", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LessThanNoov", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "GreaterEqualNoov", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "DUFFZERO", + auxType: auxInt64, + argLen: 2, + faultOnNilArg0: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1048576}, // R20 + }, + clobbers: 538116096, // R16 R17 R20 R30 + }, + }, + { + name: "LoweredZero", + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65536}, // R16 + {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + clobbers: 65536, // R16 + }, + }, + { + name: "DUFFCOPY", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + faultOnNilArg1: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2097152}, // R21 + {1, 1048576}, // R20 + }, + clobbers: 607322112, // R16 R17 R20 R21 R26 R30 + }, + }, + { + name: "LoweredMove", + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 131072}, // R17 + {1, 65536}, // R16 + {2, 637272063}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R26 R30 + }, + clobbers: 33751040, // R16 R17 R25 + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 67108864}, // R26 + }, + }, + }, + { + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "FlagConstant", + auxType: auxFlagConstant, + argLen: 0, + reg: regInfo{}, + }, + { + name: "InvertFlags", + argLen: 1, + reg: regInfo{}, + }, + { + name: "LDAR", + argLen: 2, + faultOnNilArg0: true, + asm: arm64.ALDAR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LDARB", + argLen: 2, + faultOnNilArg0: true, + asm: arm64.ALDARB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LDARW", + argLen: 2, + faultOnNilArg0: true, + asm: arm64.ALDARW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "STLRB", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: arm64.ASTLRB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "STLR", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: arm64.ASTLR, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "STLRW", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: arm64.ASTLRW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "LoweredAtomicExchange64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LoweredAtomicExchange32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LoweredAtomicExchange64Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LoweredAtomicExchange32Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LoweredAtomicAdd64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LoweredAtomicAdd32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LoweredAtomicAdd64Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LoweredAtomicAdd32Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LoweredAtomicCas64", + argLen: 4, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LoweredAtomicCas32", + argLen: 4, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LoweredAtomicCas64Variant", + argLen: 4, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LoweredAtomicCas32Variant", + argLen: 4, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LoweredAtomicAnd8", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: arm64.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LoweredAtomicAnd32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: arm64.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LoweredAtomicOr8", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: arm64.AORR, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LoweredAtomicOr32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: arm64.AORR, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LoweredAtomicAnd8Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LoweredAtomicAnd32Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LoweredAtomicOr8Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LoweredAtomicOr32Variant", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, + reg: regInfo{ + clobbers: 9223372035244359680, // R16 R17 R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + outputs: []outputInfo{ + {0, 33554432}, // R25 + }, + }, + }, + { + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4}, // R2 + {1, 8}, // R3 + }, + }, + }, + { + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 4}, // R2 + }, + }, + }, + { + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // R0 + {1, 2}, // R1 + }, + }, + }, + { + name: "PRFM", + auxType: auxInt64, + argLen: 2, + hasSideEffects: true, + asm: arm64.APRFM, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "DMB", + auxType: auxInt64, + argLen: 1, + hasSideEffects: true, + asm: arm64.ADMB, + reg: regInfo{}, + }, + + { + name: "ADDV", + argLen: 2, + commutative: true, + asm: loong64.AADDVU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "ADDVconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.AADDVU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SUBV", + argLen: 2, + asm: loong64.ASUBVU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SUBVconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASUBVU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MULV", + argLen: 2, + commutative: true, + asm: loong64.AMULV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MULHV", + argLen: 2, + commutative: true, + asm: loong64.AMULHV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MULHVU", + argLen: 2, + commutative: true, + asm: loong64.AMULHVU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "DIVV", + argLen: 2, + asm: loong64.ADIVV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "DIVVU", + argLen: 2, + asm: loong64.ADIVVU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "REMV", + argLen: 2, + asm: loong64.AREMV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "REMVU", + argLen: 2, + asm: loong64.AREMVU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "ADDF", + argLen: 2, + commutative: true, + asm: loong64.AADDF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "ADDD", + argLen: 2, + commutative: true, + asm: loong64.AADDD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "SUBF", + argLen: 2, + asm: loong64.ASUBF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "SUBD", + argLen: 2, + asm: loong64.ASUBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MULF", + argLen: 2, + commutative: true, + asm: loong64.AMULF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MULD", + argLen: 2, + commutative: true, + asm: loong64.AMULD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "DIVF", + argLen: 2, + asm: loong64.ADIVF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "DIVD", + argLen: 2, + asm: loong64.ADIVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "AND", + argLen: 2, + commutative: true, + asm: loong64.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "ANDconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "OR", + argLen: 2, + commutative: true, + asm: loong64.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "ORconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "XOR", + argLen: 2, + commutative: true, + asm: loong64.AXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "XORconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.AXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "NOR", + argLen: 2, + commutative: true, + asm: loong64.ANOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "NORconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ANOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "NEGV", + argLen: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "NEGF", + argLen: 1, + asm: loong64.ANEGF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "NEGD", + argLen: 1, + asm: loong64.ANEGD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "SQRTD", + argLen: 1, + asm: loong64.ASQRTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "SQRTF", + argLen: 1, + asm: loong64.ASQRTF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MASKEQZ", + argLen: 2, + asm: loong64.AMASKEQZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MASKNEZ", + argLen: 2, + asm: loong64.AMASKNEZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SLLV", + argLen: 2, + asm: loong64.ASLLV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SLLVconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASLLV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SRLV", + argLen: 2, + asm: loong64.ASRLV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SRLVconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASRLV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SRAV", + argLen: 2, + asm: loong64.ASRAV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SRAVconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASRAV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "ROTR", + argLen: 2, + asm: loong64.AROTR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "ROTRV", + argLen: 2, + asm: loong64.AROTRV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "ROTRconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.AROTR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "ROTRVconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.AROTRV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SGT", + argLen: 2, + asm: loong64.ASGT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SGTconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASGT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SGTU", + argLen: 2, + asm: loong64.ASGTU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SGTUconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASGTU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "CMPEQF", + argLen: 2, + asm: loong64.ACMPEQF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CMPEQD", + argLen: 2, + asm: loong64.ACMPEQD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CMPGEF", + argLen: 2, + asm: loong64.ACMPGEF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CMPGED", + argLen: 2, + asm: loong64.ACMPGED, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CMPGTF", + argLen: 2, + asm: loong64.ACMPGTF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CMPGTD", + argLen: 2, + asm: loong64.ACMPGTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVVconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: loong64.AMOVV, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVFconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: loong64.AMOVF, + reg: regInfo{ + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: loong64.AMOVD, + reg: regInfo{ + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVVaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: loong64.AMOVV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018427387908}, // SP SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVBUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVBU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVHUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVHU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVWUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVVload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVFload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "MOVVstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVV, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "MOVFstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVBstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "MOVHstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "MOVWstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "MOVVstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "MOVBreg", + argLen: 1, + asm: loong64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVBUreg", + argLen: 1, + asm: loong64.AMOVBU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVHreg", + argLen: 1, + asm: loong64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVHUreg", + argLen: 1, + asm: loong64.AMOVHU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVWreg", + argLen: 1, + asm: loong64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVWUreg", + argLen: 1, + asm: loong64.AMOVWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVVreg", + argLen: 1, + asm: loong64.AMOVV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVVnop", + argLen: 1, + resultInArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVWF", + argLen: 1, + asm: loong64.AMOVWF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVWD", + argLen: 1, + asm: loong64.AMOVWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVVF", + argLen: 1, + asm: loong64.AMOVVF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVVD", + argLen: 1, + asm: loong64.AMOVVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "TRUNCFW", + argLen: 1, + asm: loong64.ATRUNCFW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "TRUNCDW", + argLen: 1, + asm: loong64.ATRUNCDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "TRUNCFV", + argLen: 1, + asm: loong64.ATRUNCFV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "TRUNCDV", + argLen: 1, + asm: loong64.ATRUNCDV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVFD", + argLen: 1, + asm: loong64.AMOVFD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVDF", + argLen: 1, + asm: loong64.AMOVDF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CALLstatic", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + reg: regInfo{ + clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "CALLtail", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "CALLclosure", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 268435456}, // R29 + {0, 1071644668}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "CALLinter", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "DUFFZERO", + auxType: auxInt64, + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 524288}, // R20 + }, + clobbers: 524290, // R1 R20 + }, + }, + { + name: "DUFFCOPY", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + faultOnNilArg1: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1048576}, // R21 + {1, 524288}, // R20 + }, + clobbers: 1572866, // R1 R20 R21 + }, + }, + { + name: "LoweredZero", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 524288}, // R20 + {1, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + clobbers: 524288, // R20 + }, + }, + { + name: "LoweredMove", + auxType: auxInt64, + argLen: 4, + faultOnNilArg0: true, + faultOnNilArg1: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1048576}, // R21 + {1, 524288}, // R20 + {2, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + clobbers: 1572864, // R20 R21 + }, + }, + { + name: "LoweredAtomicLoad8", + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicLoad32", + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicLoad64", + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicStore8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "LoweredAtomicStore32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "LoweredAtomicStore64", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "LoweredAtomicStorezero32", + argLen: 2, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "LoweredAtomicStorezero64", + argLen: 2, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "LoweredAtomicExchange32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicExchange64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicAdd32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicAdd64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicAddconst32", + auxType: auxInt32, + argLen: 2, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicAddconst64", + auxType: auxInt64, + argLen: 2, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicCas32", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicCas64", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "FPFlagTrue", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "FPFlagFalse", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 268435456}, // R29 + }, + }, + }, + { + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, + reg: regInfo{ + clobbers: 4611686017353646082, // R1 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + outputs: []outputInfo{ + {0, 268435456}, // R29 + }, + }, + }, + { + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4194304}, // R23 + {1, 8388608}, // R24 + }, + }, + }, + { + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1048576}, // R21 + {1, 4194304}, // R23 + }, + }, + }, + { + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 524288}, // R20 + {1, 1048576}, // R21 + }, + }, + }, + + { + name: "ADD", + argLen: 2, + commutative: true, + asm: mips.AADDU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "ADDconst", + auxType: auxInt32, + argLen: 1, + asm: mips.AADDU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 536870910}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "SUB", + argLen: 2, + asm: mips.ASUBU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "SUBconst", + auxType: auxInt32, + argLen: 1, + asm: mips.ASUBU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "MUL", + argLen: 2, + commutative: true, + asm: mips.AMUL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + clobbers: 105553116266496, // HI LO + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "MULT", + argLen: 2, + commutative: true, + asm: mips.AMUL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 35184372088832}, // HI + {1, 70368744177664}, // LO + }, + }, + }, + { + name: "MULTU", + argLen: 2, + commutative: true, + asm: mips.AMULU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 35184372088832}, // HI + {1, 70368744177664}, // LO + }, + }, + }, + { + name: "DIV", + argLen: 2, + asm: mips.ADIV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 35184372088832}, // HI + {1, 70368744177664}, // LO + }, + }, + }, + { + name: "DIVU", + argLen: 2, + asm: mips.ADIVU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 35184372088832}, // HI + {1, 70368744177664}, // LO + }, + }, + }, + { + name: "ADDF", + argLen: 2, + commutative: true, + asm: mips.AADDF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + outputs: []outputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "ADDD", + argLen: 2, + commutative: true, + asm: mips.AADDD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + outputs: []outputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "SUBF", + argLen: 2, + asm: mips.ASUBF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + outputs: []outputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "SUBD", + argLen: 2, + asm: mips.ASUBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + outputs: []outputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "MULF", + argLen: 2, + commutative: true, + asm: mips.AMULF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + outputs: []outputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "MULD", + argLen: 2, + commutative: true, + asm: mips.AMULD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + outputs: []outputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "DIVF", + argLen: 2, + asm: mips.ADIVF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + outputs: []outputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "DIVD", + argLen: 2, + asm: mips.ADIVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + outputs: []outputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "AND", + argLen: 2, + commutative: true, + asm: mips.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "ANDconst", + auxType: auxInt32, + argLen: 1, + asm: mips.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "OR", + argLen: 2, + commutative: true, + asm: mips.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "ORconst", + auxType: auxInt32, + argLen: 1, + asm: mips.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "XOR", + argLen: 2, + commutative: true, + asm: mips.AXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "XORconst", + auxType: auxInt32, + argLen: 1, + asm: mips.AXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "NOR", + argLen: 2, + commutative: true, + asm: mips.ANOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "NORconst", + auxType: auxInt32, + argLen: 1, + asm: mips.ANOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "NEG", + argLen: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "NEGF", + argLen: 1, + asm: mips.ANEGF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + outputs: []outputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "NEGD", + argLen: 1, + asm: mips.ANEGD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + outputs: []outputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "ABSD", + argLen: 1, + asm: mips.AABSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + outputs: []outputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "SQRTD", + argLen: 1, + asm: mips.ASQRTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + outputs: []outputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "SQRTF", + argLen: 1, + asm: mips.ASQRTF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + outputs: []outputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "SLL", + argLen: 2, + asm: mips.ASLL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "SLLconst", + auxType: auxInt32, + argLen: 1, + asm: mips.ASLL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "SRL", + argLen: 2, + asm: mips.ASRL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "SRLconst", + auxType: auxInt32, + argLen: 1, + asm: mips.ASRL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "SRA", + argLen: 2, + asm: mips.ASRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "SRAconst", + auxType: auxInt32, + argLen: 1, + asm: mips.ASRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "CLZ", + argLen: 1, + asm: mips.ACLZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "SGT", + argLen: 2, + asm: mips.ASGT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "SGTconst", + auxType: auxInt32, + argLen: 1, + asm: mips.ASGT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "SGTzero", + argLen: 1, + asm: mips.ASGT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "SGTU", + argLen: 2, + asm: mips.ASGTU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "SGTUconst", + auxType: auxInt32, + argLen: 1, + asm: mips.ASGTU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "SGTUzero", + argLen: 1, + asm: mips.ASGTU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "CMPEQF", + argLen: 2, + asm: mips.ACMPEQF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "CMPEQD", + argLen: 2, + asm: mips.ACMPEQD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "CMPGEF", + argLen: 2, + asm: mips.ACMPGEF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "CMPGED", + argLen: 2, + asm: mips.ACMPGED, + reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "CMPGTF", + argLen: 2, + asm: mips.ACMPGTF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "CMPGTD", + argLen: 2, + asm: mips.ACMPGTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "MOVWconst", + auxType: auxInt32, + argLen: 0, + rematerializeable: true, + asm: mips.AMOVW, + reg: regInfo{ + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "MOVFconst", + auxType: auxFloat32, + argLen: 0, + rematerializeable: true, + asm: mips.AMOVF, + reg: regInfo{ + outputs: []outputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "MOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: mips.AMOVD, + reg: regInfo{ + outputs: []outputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "MOVWaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: mips.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 140737555464192}, // SP SB + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "MOVBUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVBU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "MOVHUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVHU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "MOVFload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + }, + }, + }, + { + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + }, + }, + }, + { + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + }, + }, + }, + { + name: "MOVFstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVF, + reg: regInfo{ + inputs: []inputInfo{ + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + }, + }, + }, + { + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + }, + }, + }, + { + name: "MOVBstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + }, + }, + }, + { + name: "MOVHstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + }, + }, + }, + { + name: "MOVWstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + }, + }, + }, + { + name: "MOVWfpgp", + argLen: 1, + asm: mips.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "MOVWgpfp", + argLen: 1, + asm: mips.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + outputs: []outputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "MOVBreg", + argLen: 1, + asm: mips.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "MOVBUreg", + argLen: 1, + asm: mips.AMOVBU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "MOVHreg", + argLen: 1, + asm: mips.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "MOVHUreg", + argLen: 1, + asm: mips.AMOVHU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "MOVWreg", + argLen: 1, + asm: mips.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "MOVWnop", + argLen: 1, + resultInArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "CMOVZ", + argLen: 3, + resultInArg0: true, + asm: mips.ACMOVZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "CMOVZzero", + argLen: 2, + resultInArg0: true, + asm: mips.ACMOVZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "MOVWF", + argLen: 1, + asm: mips.AMOVWF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + outputs: []outputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "MOVWD", + argLen: 1, + asm: mips.AMOVWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + outputs: []outputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "TRUNCFW", + argLen: 1, + asm: mips.ATRUNCFW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + outputs: []outputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "TRUNCDW", + argLen: 1, + asm: mips.ATRUNCDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + outputs: []outputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "MOVFD", + argLen: 1, + asm: mips.AMOVFD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + outputs: []outputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "MOVDF", + argLen: 1, + asm: mips.AMOVDF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + outputs: []outputInfo{ + {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 + }, + }, + }, + { + name: "CALLstatic", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + reg: regInfo{ + clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO + }, + }, + { + name: "CALLtail", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO + }, + }, + { + name: "CALLclosure", + auxType: auxCallOff, + argLen: 3, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 4194304}, // R22 + {0, 402653182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP R31 + }, + clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO + }, + }, + { + name: "CALLinter", + auxType: auxCallOff, + argLen: 2, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO + }, + }, + { + name: "LoweredAtomicLoad8", + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "LoweredAtomicLoad32", + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "LoweredAtomicStore8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + }, + }, + }, + { + name: "LoweredAtomicStore32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + }, + }, + }, + { + name: "LoweredAtomicStorezero", + argLen: 2, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + }, + }, + }, + { + name: "LoweredAtomicExchange", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "LoweredAtomicAdd", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "LoweredAtomicAddconst", + auxType: auxInt32, + argLen: 2, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "LoweredAtomicCas", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {2, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "LoweredAtomicAnd", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: mips.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + }, + }, + }, + { + name: "LoweredAtomicOr", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: mips.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB + }, + }, + }, + { + name: "LoweredZero", + auxType: auxInt32, + argLen: 3, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + clobbers: 2, // R1 + }, + }, + { + name: "LoweredMove", + auxType: auxInt32, + argLen: 4, + faultOnNilArg0: true, + faultOnNilArg1: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4}, // R2 + {1, 2}, // R1 + {2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + clobbers: 6, // R1 R2 + }, + }, + { + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 + }, + }, + }, + { + name: "FPFlagTrue", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "FPFlagFalse", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 4194304}, // R22 + }, + }, + }, + { + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, + { + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, + reg: regInfo{ + clobbers: 140737219919872, // R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO + outputs: []outputInfo{ + {0, 16777216}, // R25 + }, + }, + }, + { + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 8}, // R3 + {1, 16}, // R4 + }, + }, + }, + { + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4}, // R2 + {1, 8}, // R3 + }, + }, + }, + { + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 4}, // R2 + }, + }, + }, + { + name: "LoweredPanicExtendA", + auxType: auxInt64, + argLen: 4, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 32}, // R5 + {1, 8}, // R3 + {2, 16}, // R4 + }, + }, + }, + { + name: "LoweredPanicExtendB", + auxType: auxInt64, + argLen: 4, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 32}, // R5 + {1, 4}, // R2 + {2, 8}, // R3 + }, + }, + }, + { + name: "LoweredPanicExtendC", + auxType: auxInt64, + argLen: 4, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 32}, // R5 + {1, 2}, // R1 + {2, 4}, // R2 + }, + }, + }, + + { + name: "ADDV", + argLen: 2, + commutative: true, + asm: mips.AADDVU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "ADDVconst", + auxType: auxInt64, + argLen: 1, + asm: mips.AADDVU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 268435454}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "SUBV", + argLen: 2, + asm: mips.ASUBVU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "SUBVconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ASUBVU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "MULV", + argLen: 2, + commutative: true, + asm: mips.AMULV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 1152921504606846976}, // HI + {1, 2305843009213693952}, // LO + }, + }, + }, + { + name: "MULVU", + argLen: 2, + commutative: true, + asm: mips.AMULVU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 1152921504606846976}, // HI + {1, 2305843009213693952}, // LO + }, + }, + }, + { + name: "DIVV", + argLen: 2, + asm: mips.ADIVV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 1152921504606846976}, // HI + {1, 2305843009213693952}, // LO + }, + }, + }, + { + name: "DIVVU", + argLen: 2, + asm: mips.ADIVVU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 1152921504606846976}, // HI + {1, 2305843009213693952}, // LO + }, + }, + }, + { + name: "ADDF", + argLen: 2, + commutative: true, + asm: mips.AADDF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "ADDD", + argLen: 2, + commutative: true, + asm: mips.AADDD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "SUBF", + argLen: 2, + asm: mips.ASUBF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "SUBD", + argLen: 2, + asm: mips.ASUBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MULF", + argLen: 2, + commutative: true, + asm: mips.AMULF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MULD", + argLen: 2, + commutative: true, + asm: mips.AMULD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "DIVF", + argLen: 2, + asm: mips.ADIVF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "DIVD", + argLen: 2, + asm: mips.ADIVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "AND", + argLen: 2, + commutative: true, + asm: mips.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "ANDconst", + auxType: auxInt64, + argLen: 1, + asm: mips.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "OR", + argLen: 2, + commutative: true, + asm: mips.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "ORconst", + auxType: auxInt64, + argLen: 1, + asm: mips.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "XOR", + argLen: 2, + commutative: true, + asm: mips.AXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "XORconst", + auxType: auxInt64, + argLen: 1, + asm: mips.AXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "NOR", + argLen: 2, + commutative: true, + asm: mips.ANOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "NORconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ANOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "NEGV", + argLen: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "NEGF", + argLen: 1, + asm: mips.ANEGF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "NEGD", + argLen: 1, + asm: mips.ANEGD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "ABSD", + argLen: 1, + asm: mips.AABSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "SQRTD", + argLen: 1, + asm: mips.ASQRTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "SQRTF", + argLen: 1, + asm: mips.ASQRTF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "SLLV", + argLen: 2, + asm: mips.ASLLV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "SLLVconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ASLLV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "SRLV", + argLen: 2, + asm: mips.ASRLV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "SRLVconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ASRLV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "SRAV", + argLen: 2, + asm: mips.ASRAV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "SRAVconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ASRAV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "SGT", + argLen: 2, + asm: mips.ASGT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "SGTconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ASGT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "SGTU", + argLen: 2, + asm: mips.ASGTU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "SGTUconst", + auxType: auxInt64, + argLen: 1, + asm: mips.ASGTU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "CMPEQF", + argLen: 2, + asm: mips.ACMPEQF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CMPEQD", + argLen: 2, + asm: mips.ACMPEQD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CMPGEF", + argLen: 2, + asm: mips.ACMPGEF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CMPGED", + argLen: 2, + asm: mips.ACMPGED, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CMPGTF", + argLen: 2, + asm: mips.ACMPGTF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CMPGTD", + argLen: 2, + asm: mips.ACMPGTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVVconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: mips.AMOVV, + reg: regInfo{ + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "MOVFconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: mips.AMOVF, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: mips.AMOVD, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVVaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: mips.AMOVV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018460942336}, // SP SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "MOVBUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVBU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "MOVHUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVHU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "MOVWUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "MOVVload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "MOVFload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: mips.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + }, + }, + { + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + }, + }, + { + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + }, + }, + { + name: "MOVVstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVV, + reg: regInfo{ + inputs: []inputInfo{ + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + }, + }, + { + name: "MOVFstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVBstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + }, + }, + { + name: "MOVHstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + }, + }, + { + name: "MOVWstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + }, + }, + { + name: "MOVVstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: mips.AMOVV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + }, + }, + { + name: "MOVWfpgp", + argLen: 1, + asm: mips.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "MOVWgpfp", + argLen: 1, + asm: mips.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVVfpgp", + argLen: 1, + asm: mips.AMOVV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "MOVVgpfp", + argLen: 1, + asm: mips.AMOVV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVBreg", + argLen: 1, + asm: mips.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "MOVBUreg", + argLen: 1, + asm: mips.AMOVBU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "MOVHreg", + argLen: 1, + asm: mips.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "MOVHUreg", + argLen: 1, + asm: mips.AMOVHU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "MOVWreg", + argLen: 1, + asm: mips.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "MOVWUreg", + argLen: 1, + asm: mips.AMOVWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "MOVVreg", + argLen: 1, + asm: mips.AMOVV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "MOVVnop", + argLen: 1, + resultInArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "MOVWF", + argLen: 1, + asm: mips.AMOVWF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVWD", + argLen: 1, + asm: mips.AMOVWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVVF", + argLen: 1, + asm: mips.AMOVVF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVVD", + argLen: 1, + asm: mips.AMOVVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "TRUNCFW", + argLen: 1, + asm: mips.ATRUNCFW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "TRUNCDW", + argLen: 1, + asm: mips.ATRUNCDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "TRUNCFV", + argLen: 1, + asm: mips.ATRUNCFV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "TRUNCDV", + argLen: 1, + asm: mips.ATRUNCDV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVFD", + argLen: 1, + asm: mips.AMOVFD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVDF", + argLen: 1, + asm: mips.AMOVDF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CALLstatic", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + reg: regInfo{ + clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO + }, + }, + { + name: "CALLtail", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO + }, + }, + { + name: "CALLclosure", + auxType: auxCallOff, + argLen: 3, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 4194304}, // R22 + {0, 201326590}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP R31 + }, + clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO + }, + }, + { + name: "CALLinter", + auxType: auxCallOff, + argLen: 2, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO + }, + }, + { + name: "DUFFZERO", + auxType: auxInt64, + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + clobbers: 134217730, // R1 R31 + }, + }, + { + name: "DUFFCOPY", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + faultOnNilArg1: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4}, // R2 + {1, 2}, // R1 + }, + clobbers: 134217734, // R1 R2 R31 + }, + }, + { + name: "LoweredZero", + auxType: auxInt64, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + clobbers: 2, // R1 + }, + }, + { + name: "LoweredMove", + auxType: auxInt64, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4}, // R2 + {1, 2}, // R1 + {2, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + clobbers: 6, // R1 R2 + }, + }, + { + name: "LoweredAtomicAnd32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: mips.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + }, + }, + { + name: "LoweredAtomicOr32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + asm: mips.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + }, + }, + { + name: "LoweredAtomicLoad8", + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "LoweredAtomicLoad32", + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "LoweredAtomicLoad64", + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "LoweredAtomicStore8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + }, + }, + { + name: "LoweredAtomicStore32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + }, + }, + { + name: "LoweredAtomicStore64", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + }, + }, + { + name: "LoweredAtomicStorezero32", + argLen: 2, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + }, + }, + { + name: "LoweredAtomicStorezero64", + argLen: 2, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + }, + }, + { + name: "LoweredAtomicExchange32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "LoweredAtomicExchange64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "LoweredAtomicAdd32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "LoweredAtomicAdd64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "LoweredAtomicAddconst32", + auxType: auxInt32, + argLen: 2, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "LoweredAtomicAddconst64", + auxType: auxInt64, + argLen: 2, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "LoweredAtomicCas32", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {2, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "LoweredAtomicCas64", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {2, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + }, + }, + }, + { + name: "FPFlagTrue", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "FPFlagFalse", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 4194304}, // R22 + }, + }, + }, + { + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, + reg: regInfo{ + clobbers: 4611686018293170176, // R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO + outputs: []outputInfo{ + {0, 16777216}, // R25 + }, + }, + }, + { + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 8}, // R3 + {1, 16}, // R4 + }, + }, + }, + { + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4}, // R2 + {1, 8}, // R3 + }, + }, + }, + { + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 4}, // R2 + }, + }, + }, + + { + name: "ADD", + argLen: 2, + commutative: true, + asm: ppc64.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "ADDCC", + argLen: 2, + commutative: true, + asm: ppc64.AADDCC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "ADDconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "ADDCCconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AADDCCC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FADD", + argLen: 2, + commutative: true, + asm: ppc64.AFADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FADDS", + argLen: 2, + commutative: true, + asm: ppc64.AFADDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "SUB", + argLen: 2, + asm: ppc64.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "SUBCC", + argLen: 2, + asm: ppc64.ASUBCC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "SUBFCconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ASUBC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FSUB", + argLen: 2, + asm: ppc64.AFSUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FSUBS", + argLen: 2, + asm: ppc64.AFSUBS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "MULLD", + argLen: 2, + commutative: true, + asm: ppc64.AMULLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MULLW", + argLen: 2, + commutative: true, + asm: ppc64.AMULLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MULLDconst", + auxType: auxInt32, + argLen: 1, + asm: ppc64.AMULLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MULLWconst", + auxType: auxInt32, + argLen: 1, + asm: ppc64.AMULLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MADDLD", + argLen: 3, + asm: ppc64.AMADDLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MULHD", + argLen: 2, + commutative: true, + asm: ppc64.AMULHD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MULHW", + argLen: 2, + commutative: true, + asm: ppc64.AMULHW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MULHDU", + argLen: 2, + commutative: true, + asm: ppc64.AMULHDU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MULHWU", + argLen: 2, + commutative: true, + asm: ppc64.AMULHWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FMUL", + argLen: 2, + commutative: true, + asm: ppc64.AFMUL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FMULS", + argLen: 2, + commutative: true, + asm: ppc64.AFMULS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FMADD", + argLen: 3, + asm: ppc64.AFMADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FMADDS", + argLen: 3, + asm: ppc64.AFMADDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FMSUB", + argLen: 3, + asm: ppc64.AFMSUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FMSUBS", + argLen: 3, + asm: ppc64.AFMSUBS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "SRAD", + argLen: 2, + asm: ppc64.ASRAD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "SRAW", + argLen: 2, + asm: ppc64.ASRAW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "SRD", + argLen: 2, + asm: ppc64.ASRD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "SRW", + argLen: 2, + asm: ppc64.ASRW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "SLD", + argLen: 2, + asm: ppc64.ASLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "SLW", + argLen: 2, + asm: ppc64.ASLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "ROTL", + argLen: 2, + asm: ppc64.AROTL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "ROTLW", + argLen: 2, + asm: ppc64.AROTLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CLRLSLWI", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ACLRLSLWI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CLRLSLDI", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ACLRLSLDI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "ADDC", + argLen: 2, + commutative: true, + asm: ppc64.AADDC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "SUBC", + argLen: 2, + asm: ppc64.ASUBC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "ADDCconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AADDC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "SUBCconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ASUBC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "ADDE", + argLen: 3, + commutative: true, + asm: ppc64.AADDE, + reg: regInfo{ + inputs: []inputInfo{ + {2, 9223372036854775808}, // XER + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "SUBE", + argLen: 3, + asm: ppc64.ASUBE, + reg: regInfo{ + inputs: []inputInfo{ + {2, 9223372036854775808}, // XER + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {1, 9223372036854775808}, // XER + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "ADDZEzero", + argLen: 1, + asm: ppc64.AADDZE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372036854775808}, // XER + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "SUBZEzero", + argLen: 1, + asm: ppc64.ASUBZE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372036854775808}, // XER + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "SRADconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ASRAD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "SRAWconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ASRAW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 9223372036854775808, // XER + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "SRDconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ASRD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "SRWconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ASRW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "SLDconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ASLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "SLWconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ASLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "ROTLconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AROTL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "ROTLWconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AROTLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "EXTSWSLconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AEXTSWSLI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "RLWINM", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ARLWNM, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "RLWNM", + auxType: auxInt64, + argLen: 2, + asm: ppc64.ARLWNM, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "RLWMI", + auxType: auxInt64, + argLen: 2, + resultInArg0: true, + asm: ppc64.ARLWMI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "RLDICL", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ARLDICL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "RLDICR", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ARLDICR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CNTLZD", + argLen: 1, + asm: ppc64.ACNTLZD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CNTLZDCC", + argLen: 1, + asm: ppc64.ACNTLZDCC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CNTLZW", + argLen: 1, + asm: ppc64.ACNTLZW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CNTTZD", + argLen: 1, + asm: ppc64.ACNTTZD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CNTTZW", + argLen: 1, + asm: ppc64.ACNTTZW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "POPCNTD", + argLen: 1, + asm: ppc64.APOPCNTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "POPCNTW", + argLen: 1, + asm: ppc64.APOPCNTW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "POPCNTB", + argLen: 1, + asm: ppc64.APOPCNTB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FDIV", + argLen: 2, + asm: ppc64.AFDIV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FDIVS", + argLen: 2, + asm: ppc64.AFDIVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "DIVD", + argLen: 2, + asm: ppc64.ADIVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "DIVW", + argLen: 2, + asm: ppc64.ADIVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "DIVDU", + argLen: 2, + asm: ppc64.ADIVDU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "DIVWU", + argLen: 2, + asm: ppc64.ADIVWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MODUD", + argLen: 2, + asm: ppc64.AMODUD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MODSD", + argLen: 2, + asm: ppc64.AMODSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MODUW", + argLen: 2, + asm: ppc64.AMODUW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MODSW", + argLen: 2, + asm: ppc64.AMODSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FCTIDZ", + argLen: 1, + asm: ppc64.AFCTIDZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FCTIWZ", + argLen: 1, + asm: ppc64.AFCTIWZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FCFID", + argLen: 1, + asm: ppc64.AFCFID, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FCFIDS", + argLen: 1, + asm: ppc64.AFCFIDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FRSP", + argLen: 1, + asm: ppc64.AFRSP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "MFVSRD", + argLen: 1, + asm: ppc64.AMFVSRD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MTVSRD", + argLen: 1, + asm: ppc64.AMTVSRD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "AND", + argLen: 2, + commutative: true, + asm: ppc64.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "ANDN", + argLen: 2, + asm: ppc64.AANDN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "ANDNCC", + argLen: 2, + asm: ppc64.AANDNCC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "ANDCC", + argLen: 2, + commutative: true, + asm: ppc64.AANDCC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "OR", + argLen: 2, + commutative: true, + asm: ppc64.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "ORN", + argLen: 2, + asm: ppc64.AORN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "ORCC", + argLen: 2, + commutative: true, + asm: ppc64.AORCC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "NOR", + argLen: 2, + commutative: true, + asm: ppc64.ANOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "NORCC", + argLen: 2, + commutative: true, + asm: ppc64.ANORCC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "XOR", + argLen: 2, + commutative: true, + asm: ppc64.AXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "XORCC", + argLen: 2, + commutative: true, + asm: ppc64.AXORCC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "EQV", + argLen: 2, + commutative: true, + asm: ppc64.AEQV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "NEG", + argLen: 1, + asm: ppc64.ANEG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "NEGCC", + argLen: 1, + asm: ppc64.ANEGCC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "BRD", + argLen: 1, + asm: ppc64.ABRD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "BRW", + argLen: 1, + asm: ppc64.ABRW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "BRH", + argLen: 1, + asm: ppc64.ABRH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FNEG", + argLen: 1, + asm: ppc64.AFNEG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FSQRT", + argLen: 1, + asm: ppc64.AFSQRT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FSQRTS", + argLen: 1, + asm: ppc64.AFSQRTS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FFLOOR", + argLen: 1, + asm: ppc64.AFRIM, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FCEIL", + argLen: 1, + asm: ppc64.AFRIP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FTRUNC", + argLen: 1, + asm: ppc64.AFRIZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FROUND", + argLen: 1, + asm: ppc64.AFRIN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FABS", + argLen: 1, + asm: ppc64.AFABS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FNABS", + argLen: 1, + asm: ppc64.AFNABS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FCPSGN", + argLen: 2, + asm: ppc64.AFCPSGN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "ORconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "XORconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "ANDCCconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AANDCC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVBreg", + argLen: 1, + asm: ppc64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVBZreg", + argLen: 1, + asm: ppc64.AMOVBZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVHreg", + argLen: 1, + asm: ppc64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVHZreg", + argLen: 1, + asm: ppc64.AMOVHZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWreg", + argLen: 1, + asm: ppc64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWZreg", + argLen: 1, + asm: ppc64.AMOVWZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVBZload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVBZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVHZload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVHZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWZload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVWZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDBRload", + argLen: 2, + faultOnNilArg0: true, + asm: ppc64.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWBRload", + argLen: 2, + faultOnNilArg0: true, + asm: ppc64.AMOVWBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVHBRload", + argLen: 2, + faultOnNilArg0: true, + asm: ppc64.AMOVHBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVBZloadidx", + argLen: 3, + asm: ppc64.AMOVBZ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVHloadidx", + argLen: 3, + asm: ppc64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVHZloadidx", + argLen: 3, + asm: ppc64.AMOVHZ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWloadidx", + argLen: 3, + asm: ppc64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWZloadidx", + argLen: 3, + asm: ppc64.AMOVWZ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDloadidx", + argLen: 3, + asm: ppc64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVHBRloadidx", + argLen: 3, + asm: ppc64.AMOVHBR, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWBRloadidx", + argLen: 3, + asm: ppc64.AMOVWBR, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDBRloadidx", + argLen: 3, + asm: ppc64.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FMOVDloadidx", + argLen: 3, + asm: ppc64.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FMOVSloadidx", + argLen: 3, + asm: ppc64.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "DCBT", + auxType: auxInt64, + argLen: 2, + hasSideEffects: true, + asm: ppc64.ADCBT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDBRstore", + argLen: 3, + faultOnNilArg0: true, + asm: ppc64.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWBRstore", + argLen: 3, + faultOnNilArg0: true, + asm: ppc64.AMOVWBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVHBRstore", + argLen: 3, + faultOnNilArg0: true, + asm: ppc64.AMOVHBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FMOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FMOVSload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FMOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FMOVSstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "MOVBstoreidx", + argLen: 4, + asm: ppc64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVHstoreidx", + argLen: 4, + asm: ppc64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWstoreidx", + argLen: 4, + asm: ppc64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDstoreidx", + argLen: 4, + asm: ppc64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FMOVDstoreidx", + argLen: 4, + asm: ppc64.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FMOVSstoreidx", + argLen: 4, + asm: ppc64.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "MOVHBRstoreidx", + argLen: 4, + asm: ppc64.AMOVHBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWBRstoreidx", + argLen: 4, + asm: ppc64.AMOVWBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDBRstoreidx", + argLen: 4, + asm: ppc64.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVBstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVHstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: ppc64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: ppc64.AMOVD, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FMOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: ppc64.AFMOVD, + reg: regInfo{ + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FMOVSconst", + auxType: auxFloat32, + argLen: 0, + rematerializeable: true, + asm: ppc64.AFMOVS, + reg: regInfo{ + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "FCMPU", + argLen: 2, + asm: ppc64.AFCMPU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "CMP", + argLen: 2, + asm: ppc64.ACMP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CMPU", + argLen: 2, + asm: ppc64.ACMPU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CMPW", + argLen: 2, + asm: ppc64.ACMPW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CMPWU", + argLen: 2, + asm: ppc64.ACMPWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CMPconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ACMP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CMPUconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.ACMPU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CMPWconst", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ACMPW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CMPWUconst", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ACMPWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "ISEL", + auxType: auxInt32, + argLen: 3, + asm: ppc64.AISEL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "ISELZ", + auxType: auxInt32, + argLen: 2, + asm: ppc64.AISEL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "SETBC", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ASETBC, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "SETBCR", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ASETBCR, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "Equal", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "NotEqual", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LessThan", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FLessThan", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LessEqual", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FLessEqual", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "GreaterThan", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FGreaterThan", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "GreaterEqual", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FGreaterEqual", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 2048}, // R11 + }, + }, + }, + { + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredNilCheck", + argLen: 2, + clobberFlags: true, + nilCheck: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + clobbers: 2147483648, // R31 + }, + }, + { + name: "LoweredRound32F", + argLen: 1, + resultInArg0: true, + zeroWidth: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "LoweredRound64F", + argLen: 1, + resultInArg0: true, + zeroWidth: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + outputs: []outputInfo{ + {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 + }, + }, + }, + { + name: "CALLstatic", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + reg: regInfo{ + clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER + }, + }, + { + name: "CALLtail", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER + }, + }, + { + name: "CALLclosure", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4096}, // R12 + {1, 2048}, // R11 + }, + clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER + }, + }, + { + name: "CALLinter", + auxType: auxCallOff, + argLen: -1, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4096}, // R12 + }, + clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER + }, + }, + { + name: "LoweredZero", + auxType: auxInt64, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1048576}, // R20 + }, + clobbers: 1048576, // R20 + }, + }, + { + name: "LoweredZeroShort", + auxType: auxInt64, + argLen: 2, + faultOnNilArg0: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredQuadZeroShort", + auxType: auxInt64, + argLen: 2, + faultOnNilArg0: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredQuadZero", + auxType: auxInt64, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1048576}, // R20 + }, + clobbers: 1048576, // R20 + }, + }, + { + name: "LoweredMove", + auxType: auxInt64, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1048576}, // R20 + {1, 2097152}, // R21 + }, + clobbers: 3145728, // R20 R21 + }, + }, + { + name: "LoweredMoveShort", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + faultOnNilArg1: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredQuadMove", + auxType: auxInt64, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1048576}, // R20 + {1, 2097152}, // R21 + }, + clobbers: 3145728, // R20 R21 + }, + }, + { + name: "LoweredQuadMoveShort", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + faultOnNilArg1: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicStore8", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicStore32", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicStore64", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicLoad8", + auxType: auxInt64, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicLoad32", + auxType: auxInt64, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicLoad64", + auxType: auxInt64, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicLoadPtr", + auxType: auxInt64, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicAdd32", + argLen: 3, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicAdd64", + argLen: 3, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicExchange32", + argLen: 3, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicExchange64", + argLen: 3, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicCas64", + auxType: auxInt64, + argLen: 4, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicCas32", + auxType: auxInt64, + argLen: 4, + resultNotInArgs: true, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicAnd8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: ppc64.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicAnd32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: ppc64.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicOr8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: ppc64.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredAtomicOr32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: ppc64.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, + reg: regInfo{ + clobbers: 18446744072632408064, // R11 R12 R18 R19 R22 R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER + outputs: []outputInfo{ + {0, 536870912}, // R29 + }, + }, + }, + { + name: "LoweredPubBarrier", + argLen: 1, + hasSideEffects: true, + asm: ppc64.ALWSYNC, + reg: regInfo{}, + }, + { + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 32}, // R5 + {1, 64}, // R6 + }, + }, + }, + { + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 16}, // R4 + {1, 32}, // R5 + }, + }, + }, + { + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 8}, // R3 + {1, 16}, // R4 + }, + }, + }, + { + name: "InvertFlags", + argLen: 1, + reg: regInfo{}, + }, + { + name: "FlagEQ", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagLT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagGT", + argLen: 0, + reg: regInfo{}, + }, + + { + name: "ADD", + argLen: 2, + commutative: true, + asm: riscv.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "ADDI", + auxType: auxInt64, + argLen: 1, + asm: riscv.AADDI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "ADDIW", + auxType: auxInt64, + argLen: 1, + asm: riscv.AADDIW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "NEG", + argLen: 1, + asm: riscv.ANEG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "NEGW", + argLen: 1, + asm: riscv.ANEGW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SUB", + argLen: 2, + asm: riscv.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SUBW", + argLen: 2, + asm: riscv.ASUBW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MUL", + argLen: 2, + commutative: true, + asm: riscv.AMUL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MULW", + argLen: 2, + commutative: true, + asm: riscv.AMULW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MULH", + argLen: 2, + commutative: true, + asm: riscv.AMULH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MULHU", + argLen: 2, + commutative: true, + asm: riscv.AMULHU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredMuluhilo", + argLen: 2, + resultNotInArgs: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredMuluover", + argLen: 2, + resultNotInArgs: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "DIV", + argLen: 2, + asm: riscv.ADIV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "DIVU", + argLen: 2, + asm: riscv.ADIVU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "DIVW", + argLen: 2, + asm: riscv.ADIVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "DIVUW", + argLen: 2, + asm: riscv.ADIVUW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "REM", + argLen: 2, + asm: riscv.AREM, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "REMU", + argLen: 2, + asm: riscv.AREMU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "REMW", + argLen: 2, + asm: riscv.AREMW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "REMUW", + argLen: 2, + asm: riscv.AREMUW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: riscv.AMOV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVDconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: riscv.AMOV, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVBUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVBU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVHUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVHU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVWUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOV, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVBstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVHstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVWstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVDstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVBreg", + argLen: 1, + asm: riscv.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVHreg", + argLen: 1, + asm: riscv.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVWreg", + argLen: 1, + asm: riscv.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVDreg", + argLen: 1, + asm: riscv.AMOV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVBUreg", + argLen: 1, + asm: riscv.AMOVBU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVHUreg", + argLen: 1, + asm: riscv.AMOVHU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVWUreg", + argLen: 1, + asm: riscv.AMOVWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "MOVDnop", + argLen: 1, + resultInArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SLL", + argLen: 2, + asm: riscv.ASLL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SRA", + argLen: 2, + asm: riscv.ASRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SRAW", + argLen: 2, + asm: riscv.ASRAW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SRL", + argLen: 2, + asm: riscv.ASRL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SRLW", + argLen: 2, + asm: riscv.ASRLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SLLI", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASLLI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SRAI", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASRAI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SRAIW", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASRAIW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SRLI", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASRLI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SRLIW", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASRLIW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "XOR", + argLen: 2, + commutative: true, + asm: riscv.AXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "XORI", + auxType: auxInt64, + argLen: 1, + asm: riscv.AXORI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "OR", + argLen: 2, + commutative: true, + asm: riscv.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "ORI", + auxType: auxInt64, + argLen: 1, + asm: riscv.AORI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "AND", + argLen: 2, + commutative: true, + asm: riscv.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "ANDI", + auxType: auxInt64, + argLen: 1, + asm: riscv.AANDI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "NOT", + argLen: 1, + asm: riscv.ANOT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SEQZ", + argLen: 1, + asm: riscv.ASEQZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SNEZ", + argLen: 1, + asm: riscv.ASNEZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SLT", + argLen: 2, + asm: riscv.ASLT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SLTI", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASLTI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SLTU", + argLen: 2, + asm: riscv.ASLTU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "SLTIU", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASLTIU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredRound32F", + argLen: 1, + resultInArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "LoweredRound64F", + argLen: 1, + resultInArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CALLstatic", + auxType: auxCallOff, + argLen: -1, + call: true, + reg: regInfo{ + clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "CALLtail", + auxType: auxCallOff, + argLen: -1, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "CALLclosure", + auxType: auxCallOff, + argLen: -1, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 33554432}, // X26 + {0, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "CALLinter", + auxType: auxCallOff, + argLen: -1, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "DUFFZERO", + auxType: auxInt64, + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 16777216}, // X25 + }, + clobbers: 16777216, // X25 + }, + }, + { + name: "DUFFCOPY", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + faultOnNilArg1: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 16777216}, // X25 + {1, 8388608}, // X24 + }, + clobbers: 25165824, // X24 X25 + }, + }, + { + name: "LoweredZero", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 16}, // X5 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + clobbers: 16, // X5 + }, + }, + { + name: "LoweredMove", + auxType: auxInt64, + argLen: 4, + faultOnNilArg0: true, + faultOnNilArg1: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 16}, // X5 + {1, 32}, // X6 + {2, 1006632880}, // X5 X6 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + clobbers: 112, // X5 X6 X7 + }, + }, + { + name: "LoweredAtomicLoad8", + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredAtomicLoad32", + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredAtomicLoad64", + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredAtomicStore8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "LoweredAtomicStore32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "LoweredAtomicStore64", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + }, + }, + { + name: "LoweredAtomicExchange32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredAtomicExchange64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredAtomicAdd32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredAtomicAdd64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredAtomicCas32", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {2, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredAtomicCas64", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {2, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredAtomicAnd32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: riscv.AAMOANDW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + }, + }, + }, + { + name: "LoweredAtomicOr32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + asm: riscv.AAMOORW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 + {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB + }, + }, + }, + { + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + reg: regInfo{ + outputs: []outputInfo{ + {0, 33554432}, // X26 + }, + }, + }, + { + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, + reg: regInfo{ + clobbers: 9223372034707292160, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + outputs: []outputInfo{ + {0, 8388608}, // X24 + }, + }, + }, + { + name: "LoweredPubBarrier", + argLen: 1, + hasSideEffects: true, + asm: riscv.AFENCE, + reg: regInfo{}, + }, + { + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 64}, // X7 + {1, 134217728}, // X28 + }, + }, + }, + { + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 32}, // X6 + {1, 64}, // X7 + }, + }, + }, + { + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 16}, // X5 + {1, 32}, // X6 + }, + }, + }, + { + name: "FADDS", + argLen: 2, + commutative: true, + asm: riscv.AFADDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FSUBS", + argLen: 2, + asm: riscv.AFSUBS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMULS", + argLen: 2, + commutative: true, + asm: riscv.AFMULS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FDIVS", + argLen: 2, + asm: riscv.AFDIVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMADDS", + argLen: 3, + commutative: true, + asm: riscv.AFMADDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMSUBS", + argLen: 3, + commutative: true, + asm: riscv.AFMSUBS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FNMADDS", + argLen: 3, + commutative: true, + asm: riscv.AFNMADDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FNMSUBS", + argLen: 3, + commutative: true, + asm: riscv.AFNMSUBS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FSQRTS", + argLen: 1, + asm: riscv.AFSQRTS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FNEGS", + argLen: 1, + asm: riscv.AFNEGS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMVSX", + argLen: 1, + asm: riscv.AFMVSX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTSW", + argLen: 1, + asm: riscv.AFCVTSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTSL", + argLen: 1, + asm: riscv.AFCVTSL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTWS", + argLen: 1, + asm: riscv.AFCVTWS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FCVTLS", + argLen: 1, + asm: riscv.AFCVTLS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FMOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FEQS", + argLen: 2, + commutative: true, + asm: riscv.AFEQS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FNES", + argLen: 2, + commutative: true, + asm: riscv.AFNES, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FLTS", + argLen: 2, + asm: riscv.AFLTS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FLES", + argLen: 2, + asm: riscv.AFLES, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FADDD", + argLen: 2, + commutative: true, + asm: riscv.AFADDD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FSUBD", + argLen: 2, + asm: riscv.AFSUBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMULD", + argLen: 2, + commutative: true, + asm: riscv.AFMULD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FDIVD", + argLen: 2, + asm: riscv.AFDIVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMADDD", + argLen: 3, + commutative: true, + asm: riscv.AFMADDD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMSUBD", + argLen: 3, + commutative: true, + asm: riscv.AFMSUBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FNMADDD", + argLen: 3, + commutative: true, + asm: riscv.AFNMADDD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FNMSUBD", + argLen: 3, + commutative: true, + asm: riscv.AFNMSUBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FSQRTD", + argLen: 1, + asm: riscv.AFSQRTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FNEGD", + argLen: 1, + asm: riscv.AFNEGD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FABSD", + argLen: 1, + asm: riscv.AFABSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FSGNJD", + argLen: 2, + asm: riscv.AFSGNJD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMVDX", + argLen: 1, + asm: riscv.AFMVDX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTDW", + argLen: 1, + asm: riscv.AFCVTDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTDL", + argLen: 1, + asm: riscv.AFCVTDL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTWD", + argLen: 1, + asm: riscv.AFCVTWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FCVTLD", + argLen: 1, + asm: riscv.AFCVTLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FCVTDS", + argLen: 1, + asm: riscv.AFCVTDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTSD", + argLen: 1, + asm: riscv.AFCVTSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FEQD", + argLen: 2, + commutative: true, + asm: riscv.AFEQD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FNED", + argLen: 2, + commutative: true, + asm: riscv.AFNED, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FLTD", + argLen: 2, + asm: riscv.AFLTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + { + name: "FLED", + argLen: 2, + asm: riscv.AFLED, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, + + { + name: "FADDS", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: s390x.AFADDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FADD", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: s390x.AFADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FSUBS", + argLen: 2, + resultInArg0: true, + asm: s390x.AFSUBS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FSUB", + argLen: 2, + resultInArg0: true, + asm: s390x.AFSUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMULS", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: s390x.AFMULS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMUL", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: s390x.AFMUL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FDIVS", + argLen: 2, + resultInArg0: true, + asm: s390x.AFDIVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FDIV", + argLen: 2, + resultInArg0: true, + asm: s390x.AFDIV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FNEGS", + argLen: 1, + clobberFlags: true, + asm: s390x.AFNEGS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FNEG", + argLen: 1, + clobberFlags: true, + asm: s390x.AFNEG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMADDS", + argLen: 3, + resultInArg0: true, + asm: s390x.AFMADDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMADD", + argLen: 3, + resultInArg0: true, + asm: s390x.AFMADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMSUBS", + argLen: 3, + resultInArg0: true, + asm: s390x.AFMSUBS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMSUB", + argLen: 3, + resultInArg0: true, + asm: s390x.AFMSUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LPDFR", + argLen: 1, + asm: s390x.ALPDFR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LNDFR", + argLen: 1, + asm: s390x.ALNDFR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CPSDR", + argLen: 2, + asm: s390x.ACPSDR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FIDBR", + auxType: auxInt8, + argLen: 1, + asm: s390x.AFIDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVSload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVSconst", + auxType: auxFloat32, + argLen: 0, + rematerializeable: true, + asm: s390x.AFMOVS, + reg: regInfo{ + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: s390x.AFMOVD, + reg: regInfo{ + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVSloadidx", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: s390x.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVDloadidx", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: s390x.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVSstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVSstoreidx", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: s390x.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FMOVDstoreidx", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: s390x.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "ADD", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ADDW", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AADDW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ADDconst", + auxType: auxInt32, + argLen: 1, + clobberFlags: true, + asm: s390x.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ADDWconst", + auxType: auxInt32, + argLen: 1, + clobberFlags: true, + asm: s390x.AADDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ADDload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ADDWload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AADDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SUB", + argLen: 2, + clobberFlags: true, + asm: s390x.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SUBW", + argLen: 2, + clobberFlags: true, + asm: s390x.ASUBW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SUBconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SUBWconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ASUBW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SUBload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SUBWload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.ASUBW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MULLD", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MULLW", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MULLDconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MULLWconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MULLDload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AMULLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MULLWload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AMULLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MULHD", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULHD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MULHDU", + argLen: 2, + commutative: true, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMULHDU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "DIVD", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ADIVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "DIVW", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ADIVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "DIVDU", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ADIVDU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "DIVWU", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ADIVWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MODD", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMODD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MODW", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMODW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MODDU", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMODDU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MODWU", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AMODWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + clobbers: 2048, // R11 + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "AND", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ANDW", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AANDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ANDconst", + auxType: auxInt64, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ANDWconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AANDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ANDload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ANDWload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AANDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "OR", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ORW", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AORW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ORconst", + auxType: auxInt64, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ORWconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AORW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ORload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ORWload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AORW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "XOR", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "XORW", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: s390x.AXORW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "XORconst", + auxType: auxInt64, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "XORWconst", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: s390x.AXORW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "XORload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "XORWload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: s390x.AXORW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ADDC", + argLen: 2, + commutative: true, + asm: s390x.AADDC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ADDCconst", + auxType: auxInt16, + argLen: 1, + asm: s390x.AADDC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "ADDE", + argLen: 3, + commutative: true, + resultInArg0: true, + asm: s390x.AADDE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SUBC", + argLen: 2, + asm: s390x.ASUBC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SUBE", + argLen: 3, + resultInArg0: true, + asm: s390x.ASUBE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CMP", + argLen: 2, + asm: s390x.ACMP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "CMPW", + argLen: 2, + asm: s390x.ACMPW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "CMPU", + argLen: 2, + asm: s390x.ACMPU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "CMPWU", + argLen: 2, + asm: s390x.ACMPWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "CMPconst", + auxType: auxInt32, + argLen: 1, + asm: s390x.ACMP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "CMPWconst", + auxType: auxInt32, + argLen: 1, + asm: s390x.ACMPW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "CMPUconst", + auxType: auxInt32, + argLen: 1, + asm: s390x.ACMPU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "CMPWUconst", + auxType: auxInt32, + argLen: 1, + asm: s390x.ACMPWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "FCMPS", + argLen: 2, + asm: s390x.ACEBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FCMP", + argLen: 2, + asm: s390x.AFCMPU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LTDBR", + argLen: 1, + asm: s390x.ALTDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LTEBR", + argLen: 1, + asm: s390x.ALTEBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "SLD", + argLen: 2, + asm: s390x.ASLD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SLW", + argLen: 2, + asm: s390x.ASLW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SLDconst", + auxType: auxUInt8, + argLen: 1, + asm: s390x.ASLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SLWconst", + auxType: auxUInt8, + argLen: 1, + asm: s390x.ASLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SRD", + argLen: 2, + asm: s390x.ASRD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SRW", + argLen: 2, + asm: s390x.ASRW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SRDconst", + auxType: auxUInt8, + argLen: 1, + asm: s390x.ASRD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SRWconst", + auxType: auxUInt8, + argLen: 1, + asm: s390x.ASRW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SRAD", + argLen: 2, + clobberFlags: true, + asm: s390x.ASRAD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SRAW", + argLen: 2, + clobberFlags: true, + asm: s390x.ASRAW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SRADconst", + auxType: auxUInt8, + argLen: 1, + clobberFlags: true, + asm: s390x.ASRAD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SRAWconst", + auxType: auxUInt8, + argLen: 1, + clobberFlags: true, + asm: s390x.ASRAW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "RLLG", + argLen: 2, + asm: s390x.ARLLG, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "RLL", + argLen: 2, + asm: s390x.ARLL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "RLLconst", + auxType: auxUInt8, + argLen: 1, + asm: s390x.ARLL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "RXSBG", + auxType: auxS390XRotateParams, + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: s390x.ARXSBG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "RISBGZ", + auxType: auxS390XRotateParams, + argLen: 1, + clobberFlags: true, + asm: s390x.ARISBGZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "NEG", + argLen: 1, + clobberFlags: true, + asm: s390x.ANEG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "NEGW", + argLen: 1, + clobberFlags: true, + asm: s390x.ANEGW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "NOT", + argLen: 1, + resultInArg0: true, + clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "NOTW", + argLen: 1, + resultInArg0: true, + clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "FSQRT", + argLen: 1, + asm: s390x.AFSQRT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FSQRTS", + argLen: 1, + asm: s390x.AFSQRTS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LOCGR", + auxType: auxS390XCCMask, + argLen: 3, + resultInArg0: true, + asm: s390x.ALOCGR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVBreg", + argLen: 1, + asm: s390x.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVBZreg", + argLen: 1, + asm: s390x.AMOVBZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVHreg", + argLen: 1, + asm: s390x.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVHZreg", + argLen: 1, + asm: s390x.AMOVHZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWreg", + argLen: 1, + asm: s390x.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWZreg", + argLen: 1, + asm: s390x.AMOVWZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVDconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: s390x.AMOVD, + reg: regInfo{ + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "LDGR", + argLen: 1, + asm: s390x.ALDGR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LGDR", + argLen: 1, + asm: s390x.ALGDR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CFDBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACFDBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CGDBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACGDBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CFEBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACFEBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CGEBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACGEBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CEFBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACEFBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CDFBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACDFBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CEGBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACEGBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CDGBRA", + argLen: 1, + clobberFlags: true, + asm: s390x.ACDGBRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CLFEBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACLFEBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CLFDBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACLFDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CLGEBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACLGEBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CLGDBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACLGDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CELFBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACELFBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CDLFBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACDLFBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CELGBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACELGBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CDLGBR", + argLen: 1, + clobberFlags: true, + asm: s390x.ACDLGBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LEDBR", + argLen: 1, + asm: s390x.ALEDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LDEBR", + argLen: 1, + asm: s390x.ALDEBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "MOVDaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295000064}, // SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVDaddridx", + auxType: auxSymOff, + argLen: 2, + symEffect: SymAddr, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295000064}, // SP SB + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVBZload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVBZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVHZload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVHZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWZload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVWZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWBR", + argLen: 1, + asm: s390x.AMOVWBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVDBR", + argLen: 1, + asm: s390x.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVHBRload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVHBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWBRload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVWBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVDBRload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVHBRstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVHBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVWBRstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVWBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVDBRstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MVC", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, + symEffect: SymNone, + asm: s390x.AMVC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVBZloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVBZ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVBloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVHZloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVHZ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVHloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWZloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVWZ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVDloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVHBRloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVHBR, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWBRloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVWBR, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVDBRloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: s390x.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVBstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVHstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVWstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVDstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVHBRstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVHBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVWBRstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVWBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVDBRstoreidx", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: s390x.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVBstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + }, + }, + { + name: "MOVHstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + }, + }, + { + name: "MOVWstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + }, + }, + { + name: "MOVDstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + }, + }, + { + name: "CLEAR", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ACLEAR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "CALLstatic", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + reg: regInfo{ + clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + { + name: "CALLtail", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + { + name: "CALLclosure", + auxType: auxCallOff, + argLen: 3, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 4096}, // R12 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + { + name: "CALLinter", + auxType: auxCallOff, + argLen: 2, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + { + name: "InvertFlags", + argLen: 1, + reg: regInfo{}, + }, + { + name: "LoweredGetG", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 4096}, // R12 + }, + }, + }, + { + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "LoweredNilCheck", + argLen: 2, + clobberFlags: true, + nilCheck: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "LoweredRound32F", + argLen: 1, + resultInArg0: true, + zeroWidth: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LoweredRound64F", + argLen: 1, + resultInArg0: true, + zeroWidth: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + clobberFlags: true, + reg: regInfo{ + clobbers: 4294918146, // R1 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + outputs: []outputInfo{ + {0, 512}, // R9 + }, + }, + }, + { + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4}, // R2 + {1, 8}, // R3 + }, + }, + }, + { + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 4}, // R2 + }, + }, + }, + { + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // R0 + {1, 2}, // R1 + }, + }, + }, + { + name: "FlagEQ", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagLT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagGT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagOV", + argLen: 0, + reg: regInfo{}, + }, + { + name: "SYNC", + argLen: 1, + asm: s390x.ASYNC, + reg: regInfo{}, + }, + { + name: "MOVBZatomicload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVBZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVWZatomicload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVWZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVDatomicload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: s390x.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MOVBatomicstore", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymWrite, + asm: s390x.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVWatomicstore", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymWrite, + asm: s390x.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "MOVDatomicstore", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymWrite, + asm: s390x.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "LAA", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ALAA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "LAAG", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ALAAG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "AddTupleFirst32", + argLen: 2, + reg: regInfo{}, + }, + { + name: "AddTupleFirst64", + argLen: 2, + reg: regInfo{}, + }, + { + name: "LAN", + argLen: 3, + clobberFlags: true, + hasSideEffects: true, + asm: s390x.ALAN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "LANfloor", + argLen: 3, + clobberFlags: true, + hasSideEffects: true, + asm: s390x.ALAN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + clobbers: 2, // R1 + }, + }, + { + name: "LAO", + argLen: 3, + clobberFlags: true, + hasSideEffects: true, + asm: s390x.ALAO, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "LAOfloor", + argLen: 3, + clobberFlags: true, + hasSideEffects: true, + asm: s390x.ALAO, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + clobbers: 2, // R1 + }, + }, + { + name: "LoweredAtomicCas32", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ACS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1}, // R0 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + clobbers: 1, // R0 + outputs: []outputInfo{ + {1, 0}, + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "LoweredAtomicCas64", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ACSG, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1}, // R0 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + clobbers: 1, // R0 + outputs: []outputInfo{ + {1, 0}, + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "LoweredAtomicExchange32", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ACS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {1, 0}, + {0, 1}, // R0 + }, + }, + }, + { + name: "LoweredAtomicExchange64", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + hasSideEffects: true, + symEffect: SymRdWr, + asm: s390x.ACSG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + outputs: []outputInfo{ + {1, 0}, + {0, 1}, // R0 + }, + }, + }, + { + name: "FLOGR", + argLen: 1, + clobberFlags: true, + asm: s390x.AFLOGR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + clobbers: 2, // R1 + outputs: []outputInfo{ + {0, 1}, // R0 + }, + }, + }, + { + name: "POPCNT", + argLen: 1, + clobberFlags: true, + asm: s390x.APOPCNT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "MLGR", + argLen: 2, + asm: s390x.AMLGR, + reg: regInfo{ + inputs: []inputInfo{ + {1, 8}, // R3 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 4}, // R2 + {1, 8}, // R3 + }, + }, + }, + { + name: "SumBytes2", + argLen: 1, + reg: regInfo{}, + }, + { + name: "SumBytes4", + argLen: 1, + reg: regInfo{}, + }, + { + name: "SumBytes8", + argLen: 1, + reg: regInfo{}, + }, + { + name: "STMG2", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMG, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // R1 + {2, 4}, // R2 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "STMG3", + auxType: auxSymOff, + argLen: 5, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMG, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // R1 + {2, 4}, // R2 + {3, 8}, // R3 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "STMG4", + auxType: auxSymOff, + argLen: 6, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMG, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // R1 + {2, 4}, // R2 + {3, 8}, // R3 + {4, 16}, // R4 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "STM2", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMY, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // R1 + {2, 4}, // R2 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "STM3", + auxType: auxSymOff, + argLen: 5, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMY, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // R1 + {2, 4}, // R2 + {3, 8}, // R3 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "STM4", + auxType: auxSymOff, + argLen: 6, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: s390x.ASTMY, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // R1 + {2, 4}, // R2 + {3, 8}, // R3 + {4, 16}, // R4 + {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + }, + }, + { + name: "LoweredMove", + auxType: auxInt64, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 4}, // R2 + {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + clobbers: 6, // R1 R2 + }, + }, + { + name: "LoweredZero", + auxType: auxInt64, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2}, // R1 + {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP + }, + clobbers: 2, // R1 + }, + }, + + { + name: "LoweredStaticCall", + auxType: auxCallOff, + argLen: 1, + call: true, + reg: regInfo{ + clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + }, + }, + { + name: "LoweredTailCall", + auxType: auxCallOff, + argLen: 1, + call: true, + tailCall: true, + reg: regInfo{ + clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + }, + }, + { + name: "LoweredClosureCall", + auxType: auxCallOff, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + }, + }, + { + name: "LoweredInterCall", + auxType: auxCallOff, + argLen: 2, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + }, + }, + { + name: "LoweredAddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredMove", + auxType: auxInt64, + argLen: 3, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredZero", + auxType: auxInt64, + argLen: 2, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + reg: regInfo{ + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredGetCallerSP", + argLen: 1, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredWB", + auxType: auxInt64, + argLen: 1, + reg: regInfo{ + clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredConvert", + argLen: 2, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "Select", + argLen: 3, + asm: wasm.ASelect, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {2, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load8U", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load8U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load8S", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load8S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load16U", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load16U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load16S", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load16S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load32U", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load32U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load32S", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load32S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Load", + auxType: auxInt64, + argLen: 2, + asm: wasm.AI64Load, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Store8", + auxType: auxInt64, + argLen: 3, + asm: wasm.AI64Store8, + reg: regInfo{ + inputs: []inputInfo{ + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "I64Store16", + auxType: auxInt64, + argLen: 3, + asm: wasm.AI64Store16, + reg: regInfo{ + inputs: []inputInfo{ + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "I64Store32", + auxType: auxInt64, + argLen: 3, + asm: wasm.AI64Store32, + reg: regInfo{ + inputs: []inputInfo{ + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "I64Store", + auxType: auxInt64, + argLen: 3, + asm: wasm.AI64Store, + reg: regInfo{ + inputs: []inputInfo{ + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "F32Load", + auxType: auxInt64, + argLen: 2, + asm: wasm.AF32Load, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64Load", + auxType: auxInt64, + argLen: 2, + asm: wasm.AF64Load, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F32Store", + auxType: auxInt64, + argLen: 3, + asm: wasm.AF32Store, + reg: regInfo{ + inputs: []inputInfo{ + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "F64Store", + auxType: auxInt64, + argLen: 3, + asm: wasm.AF64Store, + reg: regInfo{ + inputs: []inputInfo{ + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB + }, + }, + }, + { + name: "I64Const", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Const", + auxType: auxFloat32, + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64Const", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "I64Eqz", + argLen: 1, + asm: wasm.AI64Eqz, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Eq", + argLen: 2, + asm: wasm.AI64Eq, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Ne", + argLen: 2, + asm: wasm.AI64Ne, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64LtS", + argLen: 2, + asm: wasm.AI64LtS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64LtU", + argLen: 2, + asm: wasm.AI64LtU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64GtS", + argLen: 2, + asm: wasm.AI64GtS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64GtU", + argLen: 2, + asm: wasm.AI64GtU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64LeS", + argLen: 2, + asm: wasm.AI64LeS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64LeU", + argLen: 2, + asm: wasm.AI64LeU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64GeS", + argLen: 2, + asm: wasm.AI64GeS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64GeU", + argLen: 2, + asm: wasm.AI64GeU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Eq", + argLen: 2, + asm: wasm.AF32Eq, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Ne", + argLen: 2, + asm: wasm.AF32Ne, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Lt", + argLen: 2, + asm: wasm.AF32Lt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Gt", + argLen: 2, + asm: wasm.AF32Gt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Le", + argLen: 2, + asm: wasm.AF32Le, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Ge", + argLen: 2, + asm: wasm.AF32Ge, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Eq", + argLen: 2, + asm: wasm.AF64Eq, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Ne", + argLen: 2, + asm: wasm.AF64Ne, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Lt", + argLen: 2, + asm: wasm.AF64Lt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Gt", + argLen: 2, + asm: wasm.AF64Gt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Le", + argLen: 2, + asm: wasm.AF64Le, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F64Ge", + argLen: 2, + asm: wasm.AF64Ge, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Add", + argLen: 2, + asm: wasm.AI64Add, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64AddConst", + auxType: auxInt64, + argLen: 1, + asm: wasm.AI64Add, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Sub", + argLen: 2, + asm: wasm.AI64Sub, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Mul", + argLen: 2, + asm: wasm.AI64Mul, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64DivS", + argLen: 2, + asm: wasm.AI64DivS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64DivU", + argLen: 2, + asm: wasm.AI64DivU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64RemS", + argLen: 2, + asm: wasm.AI64RemS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64RemU", + argLen: 2, + asm: wasm.AI64RemU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64And", + argLen: 2, + asm: wasm.AI64And, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Or", + argLen: 2, + asm: wasm.AI64Or, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Xor", + argLen: 2, + asm: wasm.AI64Xor, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Shl", + argLen: 2, + asm: wasm.AI64Shl, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64ShrS", + argLen: 2, + asm: wasm.AI64ShrS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64ShrU", + argLen: 2, + asm: wasm.AI64ShrU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Neg", + argLen: 1, + asm: wasm.AF32Neg, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Add", + argLen: 2, + asm: wasm.AF32Add, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Sub", + argLen: 2, + asm: wasm.AF32Sub, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Mul", + argLen: 2, + asm: wasm.AF32Mul, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Div", + argLen: 2, + asm: wasm.AF32Div, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64Neg", + argLen: 1, + asm: wasm.AF64Neg, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Add", + argLen: 2, + asm: wasm.AF64Add, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Sub", + argLen: 2, + asm: wasm.AF64Sub, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Mul", + argLen: 2, + asm: wasm.AF64Mul, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Div", + argLen: 2, + asm: wasm.AF64Div, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "I64TruncSatF64S", + argLen: 1, + asm: wasm.AI64TruncSatF64S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64TruncSatF64U", + argLen: 1, + asm: wasm.AI64TruncSatF64U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64TruncSatF32S", + argLen: 1, + asm: wasm.AI64TruncSatF32S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64TruncSatF32U", + argLen: 1, + asm: wasm.AI64TruncSatF32U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32ConvertI64S", + argLen: 1, + asm: wasm.AF32ConvertI64S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32ConvertI64U", + argLen: 1, + asm: wasm.AF32ConvertI64U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64ConvertI64S", + argLen: 1, + asm: wasm.AF64ConvertI64S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64ConvertI64U", + argLen: 1, + asm: wasm.AF64ConvertI64U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F32DemoteF64", + argLen: 1, + asm: wasm.AF32DemoteF64, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64PromoteF32", + argLen: 1, + asm: wasm.AF64PromoteF32, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "I64Extend8S", + argLen: 1, + asm: wasm.AI64Extend8S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Extend16S", + argLen: 1, + asm: wasm.AI64Extend16S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Extend32S", + argLen: 1, + asm: wasm.AI64Extend32S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "F32Sqrt", + argLen: 1, + asm: wasm.AF32Sqrt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Trunc", + argLen: 1, + asm: wasm.AF32Trunc, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Ceil", + argLen: 1, + asm: wasm.AF32Ceil, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Floor", + argLen: 1, + asm: wasm.AF32Floor, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Nearest", + argLen: 1, + asm: wasm.AF32Nearest, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Abs", + argLen: 1, + asm: wasm.AF32Abs, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F32Copysign", + argLen: 2, + asm: wasm.AF32Copysign, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "F64Sqrt", + argLen: 1, + asm: wasm.AF64Sqrt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Trunc", + argLen: 1, + asm: wasm.AF64Trunc, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Ceil", + argLen: 1, + asm: wasm.AF64Ceil, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Floor", + argLen: 1, + asm: wasm.AF64Floor, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Nearest", + argLen: 1, + asm: wasm.AF64Nearest, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Abs", + argLen: 1, + asm: wasm.AF64Abs, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "F64Copysign", + argLen: 2, + asm: wasm.AF64Copysign, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "I64Ctz", + argLen: 1, + asm: wasm.AI64Ctz, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Clz", + argLen: 1, + asm: wasm.AI64Clz, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I32Rotl", + argLen: 2, + asm: wasm.AI32Rotl, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Rotl", + argLen: 2, + asm: wasm.AI64Rotl, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I64Popcnt", + argLen: 1, + asm: wasm.AI64Popcnt, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + + { + name: "Add8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Add16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Add32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Add64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "AddPtr", + argLen: 2, + generic: true, + }, + { + name: "Add32F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Add64F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Sub8", + argLen: 2, + generic: true, + }, + { + name: "Sub16", + argLen: 2, + generic: true, + }, + { + name: "Sub32", + argLen: 2, + generic: true, + }, + { + name: "Sub64", + argLen: 2, + generic: true, + }, + { + name: "SubPtr", + argLen: 2, + generic: true, + }, + { + name: "Sub32F", + argLen: 2, + generic: true, + }, + { + name: "Sub64F", + argLen: 2, + generic: true, + }, + { + name: "Mul8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul32F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul64F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Div32F", + argLen: 2, + generic: true, + }, + { + name: "Div64F", + argLen: 2, + generic: true, + }, + { + name: "Hmul32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Hmul32u", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Hmul64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Hmul64u", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul32uhilo", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul64uhilo", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul32uover", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul64uover", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Avg32u", + argLen: 2, + generic: true, + }, + { + name: "Avg64u", + argLen: 2, + generic: true, + }, + { + name: "Div8", + argLen: 2, + generic: true, + }, + { + name: "Div8u", + argLen: 2, + generic: true, + }, + { + name: "Div16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Div16u", + argLen: 2, + generic: true, + }, + { + name: "Div32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Div32u", + argLen: 2, + generic: true, + }, + { + name: "Div64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Div64u", + argLen: 2, + generic: true, + }, + { + name: "Div128u", + argLen: 3, + generic: true, + }, + { + name: "Mod8", + argLen: 2, + generic: true, + }, + { + name: "Mod8u", + argLen: 2, + generic: true, + }, + { + name: "Mod16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Mod16u", + argLen: 2, + generic: true, + }, + { + name: "Mod32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Mod32u", + argLen: 2, + generic: true, + }, + { + name: "Mod64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Mod64u", + argLen: 2, + generic: true, + }, + { + name: "And8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "And16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "And32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "And64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Or8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Or16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Or32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Or64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Xor8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Xor16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Xor32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Xor64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Lsh8x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh8x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh8x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh8x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh16x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh16x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh16x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh16x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh32x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh32x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh32x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh32x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh64x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh64x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh64x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Lsh64x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64x8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64x16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64x32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64x64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8Ux8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8Ux16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8Ux32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh8Ux64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16Ux8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16Ux16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16Ux32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh16Ux64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32Ux8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32Ux16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32Ux32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh32Ux64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64Ux8", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64Ux16", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64Ux32", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Rsh64Ux64", + auxType: auxBool, + argLen: 2, + generic: true, + }, + { + name: "Eq8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Eq16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Eq32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Eq64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqPtr", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqInter", + argLen: 2, + generic: true, + }, + { + name: "EqSlice", + argLen: 2, + generic: true, + }, + { + name: "Eq32F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Eq64F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Neq8", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Neq16", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Neq32", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Neq64", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NeqPtr", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NeqInter", + argLen: 2, + generic: true, + }, + { + name: "NeqSlice", + argLen: 2, + generic: true, + }, + { + name: "Neq32F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Neq64F", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Less8", + argLen: 2, + generic: true, + }, + { + name: "Less8U", + argLen: 2, + generic: true, + }, + { + name: "Less16", + argLen: 2, + generic: true, + }, + { + name: "Less16U", + argLen: 2, + generic: true, + }, + { + name: "Less32", + argLen: 2, + generic: true, + }, + { + name: "Less32U", + argLen: 2, + generic: true, + }, + { + name: "Less64", + argLen: 2, + generic: true, + }, + { + name: "Less64U", + argLen: 2, + generic: true, + }, + { + name: "Less32F", + argLen: 2, + generic: true, + }, + { + name: "Less64F", + argLen: 2, + generic: true, + }, + { + name: "Leq8", + argLen: 2, + generic: true, + }, + { + name: "Leq8U", + argLen: 2, + generic: true, + }, + { + name: "Leq16", + argLen: 2, + generic: true, + }, + { + name: "Leq16U", + argLen: 2, + generic: true, + }, + { + name: "Leq32", + argLen: 2, + generic: true, + }, + { + name: "Leq32U", + argLen: 2, + generic: true, + }, + { + name: "Leq64", + argLen: 2, + generic: true, + }, + { + name: "Leq64U", + argLen: 2, + generic: true, + }, + { + name: "Leq32F", + argLen: 2, + generic: true, + }, + { + name: "Leq64F", + argLen: 2, + generic: true, + }, + { + name: "CondSelect", + argLen: 3, + generic: true, + }, + { + name: "AndB", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "OrB", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "EqB", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "NeqB", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Not", + argLen: 1, + generic: true, + }, + { + name: "Neg8", + argLen: 1, + generic: true, + }, + { + name: "Neg16", + argLen: 1, + generic: true, + }, + { + name: "Neg32", + argLen: 1, + generic: true, + }, + { + name: "Neg64", + argLen: 1, + generic: true, + }, + { + name: "Neg32F", + argLen: 1, + generic: true, + }, + { + name: "Neg64F", + argLen: 1, + generic: true, + }, + { + name: "Com8", + argLen: 1, + generic: true, + }, + { + name: "Com16", + argLen: 1, + generic: true, + }, + { + name: "Com32", + argLen: 1, + generic: true, + }, + { + name: "Com64", + argLen: 1, + generic: true, + }, + { + name: "Ctz8", + argLen: 1, + generic: true, + }, + { + name: "Ctz16", + argLen: 1, + generic: true, + }, + { + name: "Ctz32", + argLen: 1, + generic: true, + }, + { + name: "Ctz64", + argLen: 1, + generic: true, + }, + { + name: "Ctz8NonZero", + argLen: 1, + generic: true, + }, + { + name: "Ctz16NonZero", + argLen: 1, + generic: true, + }, + { + name: "Ctz32NonZero", + argLen: 1, + generic: true, + }, + { + name: "Ctz64NonZero", + argLen: 1, + generic: true, + }, + { + name: "BitLen8", + argLen: 1, + generic: true, + }, + { + name: "BitLen16", + argLen: 1, + generic: true, + }, + { + name: "BitLen32", + argLen: 1, + generic: true, + }, + { + name: "BitLen64", + argLen: 1, + generic: true, + }, + { + name: "Bswap16", + argLen: 1, + generic: true, + }, + { + name: "Bswap32", + argLen: 1, + generic: true, + }, + { + name: "Bswap64", + argLen: 1, + generic: true, + }, + { + name: "BitRev8", + argLen: 1, + generic: true, + }, + { + name: "BitRev16", + argLen: 1, + generic: true, + }, + { + name: "BitRev32", + argLen: 1, + generic: true, + }, + { + name: "BitRev64", + argLen: 1, + generic: true, + }, + { + name: "PopCount8", + argLen: 1, + generic: true, + }, + { + name: "PopCount16", + argLen: 1, + generic: true, + }, + { + name: "PopCount32", + argLen: 1, + generic: true, + }, + { + name: "PopCount64", + argLen: 1, + generic: true, + }, + { + name: "RotateLeft64", + argLen: 2, + generic: true, + }, + { + name: "RotateLeft32", + argLen: 2, + generic: true, + }, + { + name: "RotateLeft16", + argLen: 2, + generic: true, + }, + { + name: "RotateLeft8", + argLen: 2, + generic: true, + }, + { + name: "Sqrt", + argLen: 1, + generic: true, + }, + { + name: "Sqrt32", + argLen: 1, + generic: true, + }, + { + name: "Floor", + argLen: 1, + generic: true, + }, + { + name: "Ceil", + argLen: 1, + generic: true, + }, + { + name: "Trunc", + argLen: 1, + generic: true, + }, + { + name: "Round", + argLen: 1, + generic: true, + }, + { + name: "RoundToEven", + argLen: 1, + generic: true, + }, + { + name: "Abs", + argLen: 1, + generic: true, + }, + { + name: "Copysign", + argLen: 2, + generic: true, + }, + { + name: "Min64F", + argLen: 2, + generic: true, + }, + { + name: "Min32F", + argLen: 2, + generic: true, + }, + { + name: "Max64F", + argLen: 2, + generic: true, + }, + { + name: "Max32F", + argLen: 2, + generic: true, + }, + { + name: "FMA", + argLen: 3, + generic: true, + }, + { + name: "Phi", + argLen: -1, + zeroWidth: true, + generic: true, + }, + { + name: "Copy", + argLen: 1, + generic: true, + }, + { + name: "Convert", + argLen: 2, + resultInArg0: true, + zeroWidth: true, + generic: true, + }, + { + name: "ConstBool", + auxType: auxBool, + argLen: 0, + generic: true, + }, + { + name: "ConstString", + auxType: auxString, + argLen: 0, + generic: true, + }, + { + name: "ConstNil", + argLen: 0, + generic: true, + }, + { + name: "Const8", + auxType: auxInt8, + argLen: 0, + generic: true, + }, + { + name: "Const16", + auxType: auxInt16, + argLen: 0, + generic: true, + }, + { + name: "Const32", + auxType: auxInt32, + argLen: 0, + generic: true, + }, + { + name: "Const64", + auxType: auxInt64, + argLen: 0, + generic: true, + }, + { + name: "Const32F", + auxType: auxFloat32, + argLen: 0, + generic: true, + }, + { + name: "Const64F", + auxType: auxFloat64, + argLen: 0, + generic: true, + }, + { + name: "ConstInterface", + argLen: 0, + generic: true, + }, + { + name: "ConstSlice", + argLen: 0, + generic: true, + }, + { + name: "InitMem", + argLen: 0, + zeroWidth: true, + generic: true, + }, + { + name: "Arg", + auxType: auxSymOff, + argLen: 0, + zeroWidth: true, + symEffect: SymRead, + generic: true, + }, + { + name: "ArgIntReg", + auxType: auxNameOffsetInt8, + argLen: 0, + zeroWidth: true, + generic: true, + }, + { + name: "ArgFloatReg", + auxType: auxNameOffsetInt8, + argLen: 0, + zeroWidth: true, + generic: true, + }, + { + name: "Addr", + auxType: auxSym, + argLen: 1, + symEffect: SymAddr, + generic: true, + }, + { + name: "LocalAddr", + auxType: auxSym, + argLen: 2, + symEffect: SymAddr, + generic: true, + }, + { + name: "SP", + argLen: 0, + zeroWidth: true, + generic: true, + }, + { + name: "SB", + argLen: 0, + zeroWidth: true, + generic: true, + }, + { + name: "SPanchored", + argLen: 2, + zeroWidth: true, + generic: true, + }, + { + name: "Load", + argLen: 2, + generic: true, + }, + { + name: "Dereference", + argLen: 2, + generic: true, + }, + { + name: "Store", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "Move", + auxType: auxTypSize, + argLen: 3, + generic: true, + }, + { + name: "Zero", + auxType: auxTypSize, + argLen: 2, + generic: true, + }, + { + name: "StoreWB", + auxType: auxTyp, + argLen: 3, + generic: true, + }, + { + name: "MoveWB", + auxType: auxTypSize, + argLen: 3, + generic: true, + }, + { + name: "ZeroWB", + auxType: auxTypSize, + argLen: 2, + generic: true, + }, + { + name: "WBend", + argLen: 1, + generic: true, + }, + { + name: "WB", + auxType: auxInt64, + argLen: 1, + generic: true, + }, + { + name: "HasCPUFeature", + auxType: auxSym, + argLen: 0, + symEffect: SymNone, + generic: true, + }, + { + name: "PanicBounds", + auxType: auxInt64, + argLen: 3, + call: true, + generic: true, + }, + { + name: "PanicExtend", + auxType: auxInt64, + argLen: 4, + call: true, + generic: true, + }, + { + name: "ClosureCall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, + }, + { + name: "StaticCall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, + }, + { + name: "InterCall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, + }, + { + name: "TailCall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, + }, + { + name: "ClosureLECall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, + }, + { + name: "StaticLECall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, + }, + { + name: "InterLECall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, + }, + { + name: "TailLECall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, + }, + { + name: "SignExt8to16", + argLen: 1, + generic: true, + }, + { + name: "SignExt8to32", + argLen: 1, + generic: true, + }, + { + name: "SignExt8to64", + argLen: 1, + generic: true, + }, + { + name: "SignExt16to32", + argLen: 1, + generic: true, + }, + { + name: "SignExt16to64", + argLen: 1, + generic: true, + }, + { + name: "SignExt32to64", + argLen: 1, + generic: true, + }, + { + name: "ZeroExt8to16", + argLen: 1, + generic: true, + }, + { + name: "ZeroExt8to32", + argLen: 1, + generic: true, + }, + { + name: "ZeroExt8to64", + argLen: 1, + generic: true, + }, + { + name: "ZeroExt16to32", + argLen: 1, + generic: true, + }, + { + name: "ZeroExt16to64", + argLen: 1, + generic: true, + }, + { + name: "ZeroExt32to64", + argLen: 1, + generic: true, + }, + { + name: "Trunc16to8", + argLen: 1, + generic: true, + }, + { + name: "Trunc32to8", + argLen: 1, + generic: true, + }, + { + name: "Trunc32to16", + argLen: 1, + generic: true, + }, + { + name: "Trunc64to8", + argLen: 1, + generic: true, + }, + { + name: "Trunc64to16", + argLen: 1, + generic: true, + }, + { + name: "Trunc64to32", + argLen: 1, + generic: true, + }, + { + name: "Cvt32to32F", + argLen: 1, + generic: true, + }, + { + name: "Cvt32to64F", + argLen: 1, + generic: true, + }, + { + name: "Cvt64to32F", + argLen: 1, + generic: true, + }, + { + name: "Cvt64to64F", + argLen: 1, + generic: true, + }, + { + name: "Cvt32Fto32", + argLen: 1, + generic: true, + }, + { + name: "Cvt32Fto64", + argLen: 1, + generic: true, + }, + { + name: "Cvt64Fto32", + argLen: 1, + generic: true, + }, + { + name: "Cvt64Fto64", + argLen: 1, + generic: true, + }, + { + name: "Cvt32Fto64F", + argLen: 1, + generic: true, + }, + { + name: "Cvt64Fto32F", + argLen: 1, + generic: true, + }, + { + name: "CvtBoolToUint8", + argLen: 1, + generic: true, + }, + { + name: "Round32F", + argLen: 1, + generic: true, + }, + { + name: "Round64F", + argLen: 1, + generic: true, + }, + { + name: "IsNonNil", + argLen: 1, + generic: true, + }, + { + name: "IsInBounds", + argLen: 2, + generic: true, + }, + { + name: "IsSliceInBounds", + argLen: 2, + generic: true, + }, + { + name: "NilCheck", + argLen: 2, + nilCheck: true, + generic: true, + }, + { + name: "GetG", + argLen: 1, + zeroWidth: true, + generic: true, + }, + { + name: "GetClosurePtr", + argLen: 0, + generic: true, + }, + { + name: "GetCallerPC", + argLen: 0, + generic: true, + }, + { + name: "GetCallerSP", + argLen: 1, + generic: true, + }, + { + name: "PtrIndex", + argLen: 2, + generic: true, + }, + { + name: "OffPtr", + auxType: auxInt64, + argLen: 1, + generic: true, + }, + { + name: "SliceMake", + argLen: 3, + generic: true, + }, + { + name: "SlicePtr", + argLen: 1, + generic: true, + }, + { + name: "SliceLen", + argLen: 1, + generic: true, + }, + { + name: "SliceCap", + argLen: 1, + generic: true, + }, + { + name: "SlicePtrUnchecked", + argLen: 1, + generic: true, + }, + { + name: "ComplexMake", + argLen: 2, + generic: true, + }, + { + name: "ComplexReal", + argLen: 1, + generic: true, + }, + { + name: "ComplexImag", + argLen: 1, + generic: true, + }, + { + name: "StringMake", + argLen: 2, + generic: true, + }, + { + name: "StringPtr", + argLen: 1, + generic: true, + }, + { + name: "StringLen", + argLen: 1, + generic: true, + }, + { + name: "IMake", + argLen: 2, + generic: true, + }, + { + name: "ITab", + argLen: 1, + generic: true, + }, + { + name: "IData", + argLen: 1, + generic: true, + }, + { + name: "StructMake0", + argLen: 0, + generic: true, + }, + { + name: "StructMake1", + argLen: 1, + generic: true, + }, + { + name: "StructMake2", + argLen: 2, + generic: true, + }, + { + name: "StructMake3", + argLen: 3, + generic: true, + }, + { + name: "StructMake4", + argLen: 4, + generic: true, + }, + { + name: "StructSelect", + auxType: auxInt64, + argLen: 1, + generic: true, + }, + { + name: "ArrayMake0", + argLen: 0, + generic: true, + }, + { + name: "ArrayMake1", + argLen: 1, + generic: true, + }, + { + name: "ArraySelect", + auxType: auxInt64, + argLen: 1, + generic: true, + }, + { + name: "StoreReg", + argLen: 1, + generic: true, + }, + { + name: "LoadReg", + argLen: 1, + generic: true, + }, + { + name: "FwdRef", + auxType: auxSym, + argLen: 0, + symEffect: SymNone, + generic: true, + }, + { + name: "Unknown", + argLen: 0, + generic: true, + }, + { + name: "VarDef", + auxType: auxSym, + argLen: 1, + zeroWidth: true, + symEffect: SymNone, + generic: true, + }, + { + name: "VarLive", + auxType: auxSym, + argLen: 1, + zeroWidth: true, + symEffect: SymRead, + generic: true, + }, + { + name: "KeepAlive", + argLen: 2, + zeroWidth: true, + generic: true, + }, + { + name: "InlMark", + auxType: auxInt32, + argLen: 1, + generic: true, + }, + { + name: "Int64Make", + argLen: 2, + generic: true, + }, + { + name: "Int64Hi", + argLen: 1, + generic: true, + }, + { + name: "Int64Lo", + argLen: 1, + generic: true, + }, + { + name: "Add32carry", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Add32withcarry", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "Sub32carry", + argLen: 2, + generic: true, + }, + { + name: "Sub32withcarry", + argLen: 3, + generic: true, + }, + { + name: "Add64carry", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "Sub64borrow", + argLen: 3, + generic: true, + }, + { + name: "Signmask", + argLen: 1, + generic: true, + }, + { + name: "Zeromask", + argLen: 1, + generic: true, + }, + { + name: "Slicemask", + argLen: 1, + generic: true, + }, + { + name: "SpectreIndex", + argLen: 2, + generic: true, + }, + { + name: "SpectreSliceIndex", + argLen: 2, + generic: true, + }, + { + name: "Cvt32Uto32F", + argLen: 1, + generic: true, + }, + { + name: "Cvt32Uto64F", + argLen: 1, + generic: true, + }, + { + name: "Cvt32Fto32U", + argLen: 1, + generic: true, + }, + { + name: "Cvt64Fto32U", + argLen: 1, + generic: true, + }, + { + name: "Cvt64Uto32F", + argLen: 1, + generic: true, + }, + { + name: "Cvt64Uto64F", + argLen: 1, + generic: true, + }, + { + name: "Cvt32Fto64U", + argLen: 1, + generic: true, + }, + { + name: "Cvt64Fto64U", + argLen: 1, + generic: true, + }, + { + name: "Select0", + argLen: 1, + zeroWidth: true, + generic: true, + }, + { + name: "Select1", + argLen: 1, + zeroWidth: true, + generic: true, + }, + { + name: "SelectN", + auxType: auxInt64, + argLen: 1, + generic: true, + }, + { + name: "SelectNAddr", + auxType: auxInt64, + argLen: 1, + generic: true, + }, + { + name: "MakeResult", + argLen: -1, + generic: true, + }, + { + name: "AtomicLoad8", + argLen: 2, + generic: true, + }, + { + name: "AtomicLoad32", + argLen: 2, + generic: true, + }, + { + name: "AtomicLoad64", + argLen: 2, + generic: true, + }, + { + name: "AtomicLoadPtr", + argLen: 2, + generic: true, + }, + { + name: "AtomicLoadAcq32", + argLen: 2, + generic: true, + }, + { + name: "AtomicLoadAcq64", + argLen: 2, + generic: true, + }, + { + name: "AtomicStore8", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicStore32", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicStore64", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicStorePtrNoWB", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicStoreRel32", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicStoreRel64", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicExchange32", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicExchange64", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAdd32", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAdd64", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicCompareAndSwap32", + argLen: 4, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicCompareAndSwap64", + argLen: 4, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicCompareAndSwapRel32", + argLen: 4, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAnd8", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAnd32", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicOr8", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicOr32", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAdd32Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAdd64Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicExchange32Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicExchange64Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicCompareAndSwap32Variant", + argLen: 4, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicCompareAndSwap64Variant", + argLen: 4, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAnd8Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicAnd32Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicOr8Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "AtomicOr32Variant", + argLen: 3, + hasSideEffects: true, + generic: true, + }, + { + name: "PubBarrier", + argLen: 1, + hasSideEffects: true, + generic: true, + }, + { + name: "Clobber", + auxType: auxSymOff, + argLen: 0, + symEffect: SymNone, + generic: true, + }, + { + name: "ClobberReg", + argLen: 0, + generic: true, + }, + { + name: "PrefetchCache", + argLen: 2, + hasSideEffects: true, + generic: true, + }, + { + name: "PrefetchCacheStreamed", + argLen: 2, + hasSideEffects: true, + generic: true, + }, +} + +func (o Op) Asm() obj.As { return opcodeTable[o].asm } +func (o Op) Scale() int16 { return int16(opcodeTable[o].scale) } +func (o Op) String() string { return opcodeTable[o].name } +func (o Op) SymEffect() SymEffect { return opcodeTable[o].symEffect } +func (o Op) IsCall() bool { return opcodeTable[o].call } +func (o Op) IsTailCall() bool { return opcodeTable[o].tailCall } +func (o Op) HasSideEffects() bool { return opcodeTable[o].hasSideEffects } +func (o Op) UnsafePoint() bool { return opcodeTable[o].unsafePoint } +func (o Op) ResultInArg0() bool { return opcodeTable[o].resultInArg0 } + +var registers386 = [...]Register{ + {0, x86.REG_AX, 0, "AX"}, + {1, x86.REG_CX, 1, "CX"}, + {2, x86.REG_DX, 2, "DX"}, + {3, x86.REG_BX, 3, "BX"}, + {4, x86.REGSP, -1, "SP"}, + {5, x86.REG_BP, 4, "BP"}, + {6, x86.REG_SI, 5, "SI"}, + {7, x86.REG_DI, 6, "DI"}, + {8, x86.REG_X0, -1, "X0"}, + {9, x86.REG_X1, -1, "X1"}, + {10, x86.REG_X2, -1, "X2"}, + {11, x86.REG_X3, -1, "X3"}, + {12, x86.REG_X4, -1, "X4"}, + {13, x86.REG_X5, -1, "X5"}, + {14, x86.REG_X6, -1, "X6"}, + {15, x86.REG_X7, -1, "X7"}, + {16, 0, -1, "SB"}, +} +var paramIntReg386 = []int8(nil) +var paramFloatReg386 = []int8(nil) +var gpRegMask386 = regMask(239) +var fpRegMask386 = regMask(65280) +var specialRegMask386 = regMask(0) +var framepointerReg386 = int8(5) +var linkReg386 = int8(-1) +var registersAMD64 = [...]Register{ + {0, x86.REG_AX, 0, "AX"}, + {1, x86.REG_CX, 1, "CX"}, + {2, x86.REG_DX, 2, "DX"}, + {3, x86.REG_BX, 3, "BX"}, + {4, x86.REGSP, -1, "SP"}, + {5, x86.REG_BP, 4, "BP"}, + {6, x86.REG_SI, 5, "SI"}, + {7, x86.REG_DI, 6, "DI"}, + {8, x86.REG_R8, 7, "R8"}, + {9, x86.REG_R9, 8, "R9"}, + {10, x86.REG_R10, 9, "R10"}, + {11, x86.REG_R11, 10, "R11"}, + {12, x86.REG_R12, 11, "R12"}, + {13, x86.REG_R13, 12, "R13"}, + {14, x86.REGG, -1, "g"}, + {15, x86.REG_R15, 13, "R15"}, + {16, x86.REG_X0, -1, "X0"}, + {17, x86.REG_X1, -1, "X1"}, + {18, x86.REG_X2, -1, "X2"}, + {19, x86.REG_X3, -1, "X3"}, + {20, x86.REG_X4, -1, "X4"}, + {21, x86.REG_X5, -1, "X5"}, + {22, x86.REG_X6, -1, "X6"}, + {23, x86.REG_X7, -1, "X7"}, + {24, x86.REG_X8, -1, "X8"}, + {25, x86.REG_X9, -1, "X9"}, + {26, x86.REG_X10, -1, "X10"}, + {27, x86.REG_X11, -1, "X11"}, + {28, x86.REG_X12, -1, "X12"}, + {29, x86.REG_X13, -1, "X13"}, + {30, x86.REG_X14, -1, "X14"}, + {31, x86.REG_X15, -1, "X15"}, + {32, 0, -1, "SB"}, +} +var paramIntRegAMD64 = []int8{0, 3, 1, 7, 6, 8, 9, 10, 11} +var paramFloatRegAMD64 = []int8{16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30} +var gpRegMaskAMD64 = regMask(49135) +var fpRegMaskAMD64 = regMask(2147418112) +var specialRegMaskAMD64 = regMask(2147483648) +var framepointerRegAMD64 = int8(5) +var linkRegAMD64 = int8(-1) +var registersARM = [...]Register{ + {0, arm.REG_R0, 0, "R0"}, + {1, arm.REG_R1, 1, "R1"}, + {2, arm.REG_R2, 2, "R2"}, + {3, arm.REG_R3, 3, "R3"}, + {4, arm.REG_R4, 4, "R4"}, + {5, arm.REG_R5, 5, "R5"}, + {6, arm.REG_R6, 6, "R6"}, + {7, arm.REG_R7, 7, "R7"}, + {8, arm.REG_R8, 8, "R8"}, + {9, arm.REG_R9, 9, "R9"}, + {10, arm.REGG, -1, "g"}, + {11, arm.REG_R11, -1, "R11"}, + {12, arm.REG_R12, 10, "R12"}, + {13, arm.REGSP, -1, "SP"}, + {14, arm.REG_R14, 11, "R14"}, + {15, arm.REG_R15, -1, "R15"}, + {16, arm.REG_F0, -1, "F0"}, + {17, arm.REG_F1, -1, "F1"}, + {18, arm.REG_F2, -1, "F2"}, + {19, arm.REG_F3, -1, "F3"}, + {20, arm.REG_F4, -1, "F4"}, + {21, arm.REG_F5, -1, "F5"}, + {22, arm.REG_F6, -1, "F6"}, + {23, arm.REG_F7, -1, "F7"}, + {24, arm.REG_F8, -1, "F8"}, + {25, arm.REG_F9, -1, "F9"}, + {26, arm.REG_F10, -1, "F10"}, + {27, arm.REG_F11, -1, "F11"}, + {28, arm.REG_F12, -1, "F12"}, + {29, arm.REG_F13, -1, "F13"}, + {30, arm.REG_F14, -1, "F14"}, + {31, arm.REG_F15, -1, "F15"}, + {32, 0, -1, "SB"}, +} +var paramIntRegARM = []int8(nil) +var paramFloatRegARM = []int8(nil) +var gpRegMaskARM = regMask(21503) +var fpRegMaskARM = regMask(4294901760) +var specialRegMaskARM = regMask(0) +var framepointerRegARM = int8(-1) +var linkRegARM = int8(14) +var registersARM64 = [...]Register{ + {0, arm64.REG_R0, 0, "R0"}, + {1, arm64.REG_R1, 1, "R1"}, + {2, arm64.REG_R2, 2, "R2"}, + {3, arm64.REG_R3, 3, "R3"}, + {4, arm64.REG_R4, 4, "R4"}, + {5, arm64.REG_R5, 5, "R5"}, + {6, arm64.REG_R6, 6, "R6"}, + {7, arm64.REG_R7, 7, "R7"}, + {8, arm64.REG_R8, 8, "R8"}, + {9, arm64.REG_R9, 9, "R9"}, + {10, arm64.REG_R10, 10, "R10"}, + {11, arm64.REG_R11, 11, "R11"}, + {12, arm64.REG_R12, 12, "R12"}, + {13, arm64.REG_R13, 13, "R13"}, + {14, arm64.REG_R14, 14, "R14"}, + {15, arm64.REG_R15, 15, "R15"}, + {16, arm64.REG_R16, 16, "R16"}, + {17, arm64.REG_R17, 17, "R17"}, + {18, arm64.REG_R18, -1, "R18"}, + {19, arm64.REG_R19, 18, "R19"}, + {20, arm64.REG_R20, 19, "R20"}, + {21, arm64.REG_R21, 20, "R21"}, + {22, arm64.REG_R22, 21, "R22"}, + {23, arm64.REG_R23, 22, "R23"}, + {24, arm64.REG_R24, 23, "R24"}, + {25, arm64.REG_R25, 24, "R25"}, + {26, arm64.REG_R26, 25, "R26"}, + {27, arm64.REGG, -1, "g"}, + {28, arm64.REG_R29, -1, "R29"}, + {29, arm64.REG_R30, 26, "R30"}, + {30, arm64.REGSP, -1, "SP"}, + {31, arm64.REG_F0, -1, "F0"}, + {32, arm64.REG_F1, -1, "F1"}, + {33, arm64.REG_F2, -1, "F2"}, + {34, arm64.REG_F3, -1, "F3"}, + {35, arm64.REG_F4, -1, "F4"}, + {36, arm64.REG_F5, -1, "F5"}, + {37, arm64.REG_F6, -1, "F6"}, + {38, arm64.REG_F7, -1, "F7"}, + {39, arm64.REG_F8, -1, "F8"}, + {40, arm64.REG_F9, -1, "F9"}, + {41, arm64.REG_F10, -1, "F10"}, + {42, arm64.REG_F11, -1, "F11"}, + {43, arm64.REG_F12, -1, "F12"}, + {44, arm64.REG_F13, -1, "F13"}, + {45, arm64.REG_F14, -1, "F14"}, + {46, arm64.REG_F15, -1, "F15"}, + {47, arm64.REG_F16, -1, "F16"}, + {48, arm64.REG_F17, -1, "F17"}, + {49, arm64.REG_F18, -1, "F18"}, + {50, arm64.REG_F19, -1, "F19"}, + {51, arm64.REG_F20, -1, "F20"}, + {52, arm64.REG_F21, -1, "F21"}, + {53, arm64.REG_F22, -1, "F22"}, + {54, arm64.REG_F23, -1, "F23"}, + {55, arm64.REG_F24, -1, "F24"}, + {56, arm64.REG_F25, -1, "F25"}, + {57, arm64.REG_F26, -1, "F26"}, + {58, arm64.REG_F27, -1, "F27"}, + {59, arm64.REG_F28, -1, "F28"}, + {60, arm64.REG_F29, -1, "F29"}, + {61, arm64.REG_F30, -1, "F30"}, + {62, arm64.REG_F31, -1, "F31"}, + {63, 0, -1, "SB"}, +} +var paramIntRegARM64 = []int8{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} +var paramFloatRegARM64 = []int8{31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46} +var gpRegMaskARM64 = regMask(670826495) +var fpRegMaskARM64 = regMask(9223372034707292160) +var specialRegMaskARM64 = regMask(0) +var framepointerRegARM64 = int8(-1) +var linkRegARM64 = int8(29) +var registersLOONG64 = [...]Register{ + {0, loong64.REG_R0, -1, "R0"}, + {1, loong64.REG_R1, -1, "R1"}, + {2, loong64.REGSP, -1, "SP"}, + {3, loong64.REG_R4, 0, "R4"}, + {4, loong64.REG_R5, 1, "R5"}, + {5, loong64.REG_R6, 2, "R6"}, + {6, loong64.REG_R7, 3, "R7"}, + {7, loong64.REG_R8, 4, "R8"}, + {8, loong64.REG_R9, 5, "R9"}, + {9, loong64.REG_R10, 6, "R10"}, + {10, loong64.REG_R11, 7, "R11"}, + {11, loong64.REG_R12, 8, "R12"}, + {12, loong64.REG_R13, 9, "R13"}, + {13, loong64.REG_R14, 10, "R14"}, + {14, loong64.REG_R15, 11, "R15"}, + {15, loong64.REG_R16, 12, "R16"}, + {16, loong64.REG_R17, 13, "R17"}, + {17, loong64.REG_R18, 14, "R18"}, + {18, loong64.REG_R19, 15, "R19"}, + {19, loong64.REG_R20, 16, "R20"}, + {20, loong64.REG_R21, 17, "R21"}, + {21, loong64.REGG, -1, "g"}, + {22, loong64.REG_R23, 18, "R23"}, + {23, loong64.REG_R24, 19, "R24"}, + {24, loong64.REG_R25, 20, "R25"}, + {25, loong64.REG_R26, 21, "R26"}, + {26, loong64.REG_R27, 22, "R27"}, + {27, loong64.REG_R28, 23, "R28"}, + {28, loong64.REG_R29, 24, "R29"}, + {29, loong64.REG_R31, 25, "R31"}, + {30, loong64.REG_F0, -1, "F0"}, + {31, loong64.REG_F1, -1, "F1"}, + {32, loong64.REG_F2, -1, "F2"}, + {33, loong64.REG_F3, -1, "F3"}, + {34, loong64.REG_F4, -1, "F4"}, + {35, loong64.REG_F5, -1, "F5"}, + {36, loong64.REG_F6, -1, "F6"}, + {37, loong64.REG_F7, -1, "F7"}, + {38, loong64.REG_F8, -1, "F8"}, + {39, loong64.REG_F9, -1, "F9"}, + {40, loong64.REG_F10, -1, "F10"}, + {41, loong64.REG_F11, -1, "F11"}, + {42, loong64.REG_F12, -1, "F12"}, + {43, loong64.REG_F13, -1, "F13"}, + {44, loong64.REG_F14, -1, "F14"}, + {45, loong64.REG_F15, -1, "F15"}, + {46, loong64.REG_F16, -1, "F16"}, + {47, loong64.REG_F17, -1, "F17"}, + {48, loong64.REG_F18, -1, "F18"}, + {49, loong64.REG_F19, -1, "F19"}, + {50, loong64.REG_F20, -1, "F20"}, + {51, loong64.REG_F21, -1, "F21"}, + {52, loong64.REG_F22, -1, "F22"}, + {53, loong64.REG_F23, -1, "F23"}, + {54, loong64.REG_F24, -1, "F24"}, + {55, loong64.REG_F25, -1, "F25"}, + {56, loong64.REG_F26, -1, "F26"}, + {57, loong64.REG_F27, -1, "F27"}, + {58, loong64.REG_F28, -1, "F28"}, + {59, loong64.REG_F29, -1, "F29"}, + {60, loong64.REG_F30, -1, "F30"}, + {61, loong64.REG_F31, -1, "F31"}, + {62, 0, -1, "SB"}, +} +var paramIntRegLOONG64 = []int8{3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18} +var paramFloatRegLOONG64 = []int8{30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45} +var gpRegMaskLOONG64 = regMask(1071644664) +var fpRegMaskLOONG64 = regMask(4611686017353646080) +var specialRegMaskLOONG64 = regMask(0) +var framepointerRegLOONG64 = int8(-1) +var linkRegLOONG64 = int8(1) +var registersMIPS = [...]Register{ + {0, mips.REG_R0, -1, "R0"}, + {1, mips.REG_R1, 0, "R1"}, + {2, mips.REG_R2, 1, "R2"}, + {3, mips.REG_R3, 2, "R3"}, + {4, mips.REG_R4, 3, "R4"}, + {5, mips.REG_R5, 4, "R5"}, + {6, mips.REG_R6, 5, "R6"}, + {7, mips.REG_R7, 6, "R7"}, + {8, mips.REG_R8, 7, "R8"}, + {9, mips.REG_R9, 8, "R9"}, + {10, mips.REG_R10, 9, "R10"}, + {11, mips.REG_R11, 10, "R11"}, + {12, mips.REG_R12, 11, "R12"}, + {13, mips.REG_R13, 12, "R13"}, + {14, mips.REG_R14, 13, "R14"}, + {15, mips.REG_R15, 14, "R15"}, + {16, mips.REG_R16, 15, "R16"}, + {17, mips.REG_R17, 16, "R17"}, + {18, mips.REG_R18, 17, "R18"}, + {19, mips.REG_R19, 18, "R19"}, + {20, mips.REG_R20, 19, "R20"}, + {21, mips.REG_R21, 20, "R21"}, + {22, mips.REG_R22, 21, "R22"}, + {23, mips.REG_R24, 22, "R24"}, + {24, mips.REG_R25, 23, "R25"}, + {25, mips.REG_R28, 24, "R28"}, + {26, mips.REGSP, -1, "SP"}, + {27, mips.REGG, -1, "g"}, + {28, mips.REG_R31, 25, "R31"}, + {29, mips.REG_F0, -1, "F0"}, + {30, mips.REG_F2, -1, "F2"}, + {31, mips.REG_F4, -1, "F4"}, + {32, mips.REG_F6, -1, "F6"}, + {33, mips.REG_F8, -1, "F8"}, + {34, mips.REG_F10, -1, "F10"}, + {35, mips.REG_F12, -1, "F12"}, + {36, mips.REG_F14, -1, "F14"}, + {37, mips.REG_F16, -1, "F16"}, + {38, mips.REG_F18, -1, "F18"}, + {39, mips.REG_F20, -1, "F20"}, + {40, mips.REG_F22, -1, "F22"}, + {41, mips.REG_F24, -1, "F24"}, + {42, mips.REG_F26, -1, "F26"}, + {43, mips.REG_F28, -1, "F28"}, + {44, mips.REG_F30, -1, "F30"}, + {45, mips.REG_HI, -1, "HI"}, + {46, mips.REG_LO, -1, "LO"}, + {47, 0, -1, "SB"}, +} +var paramIntRegMIPS = []int8(nil) +var paramFloatRegMIPS = []int8(nil) +var gpRegMaskMIPS = regMask(335544318) +var fpRegMaskMIPS = regMask(35183835217920) +var specialRegMaskMIPS = regMask(105553116266496) +var framepointerRegMIPS = int8(-1) +var linkRegMIPS = int8(28) +var registersMIPS64 = [...]Register{ + {0, mips.REG_R0, -1, "R0"}, + {1, mips.REG_R1, 0, "R1"}, + {2, mips.REG_R2, 1, "R2"}, + {3, mips.REG_R3, 2, "R3"}, + {4, mips.REG_R4, 3, "R4"}, + {5, mips.REG_R5, 4, "R5"}, + {6, mips.REG_R6, 5, "R6"}, + {7, mips.REG_R7, 6, "R7"}, + {8, mips.REG_R8, 7, "R8"}, + {9, mips.REG_R9, 8, "R9"}, + {10, mips.REG_R10, 9, "R10"}, + {11, mips.REG_R11, 10, "R11"}, + {12, mips.REG_R12, 11, "R12"}, + {13, mips.REG_R13, 12, "R13"}, + {14, mips.REG_R14, 13, "R14"}, + {15, mips.REG_R15, 14, "R15"}, + {16, mips.REG_R16, 15, "R16"}, + {17, mips.REG_R17, 16, "R17"}, + {18, mips.REG_R18, 17, "R18"}, + {19, mips.REG_R19, 18, "R19"}, + {20, mips.REG_R20, 19, "R20"}, + {21, mips.REG_R21, 20, "R21"}, + {22, mips.REG_R22, 21, "R22"}, + {23, mips.REG_R24, 22, "R24"}, + {24, mips.REG_R25, 23, "R25"}, + {25, mips.REGSP, -1, "SP"}, + {26, mips.REGG, -1, "g"}, + {27, mips.REG_R31, 24, "R31"}, + {28, mips.REG_F0, -1, "F0"}, + {29, mips.REG_F1, -1, "F1"}, + {30, mips.REG_F2, -1, "F2"}, + {31, mips.REG_F3, -1, "F3"}, + {32, mips.REG_F4, -1, "F4"}, + {33, mips.REG_F5, -1, "F5"}, + {34, mips.REG_F6, -1, "F6"}, + {35, mips.REG_F7, -1, "F7"}, + {36, mips.REG_F8, -1, "F8"}, + {37, mips.REG_F9, -1, "F9"}, + {38, mips.REG_F10, -1, "F10"}, + {39, mips.REG_F11, -1, "F11"}, + {40, mips.REG_F12, -1, "F12"}, + {41, mips.REG_F13, -1, "F13"}, + {42, mips.REG_F14, -1, "F14"}, + {43, mips.REG_F15, -1, "F15"}, + {44, mips.REG_F16, -1, "F16"}, + {45, mips.REG_F17, -1, "F17"}, + {46, mips.REG_F18, -1, "F18"}, + {47, mips.REG_F19, -1, "F19"}, + {48, mips.REG_F20, -1, "F20"}, + {49, mips.REG_F21, -1, "F21"}, + {50, mips.REG_F22, -1, "F22"}, + {51, mips.REG_F23, -1, "F23"}, + {52, mips.REG_F24, -1, "F24"}, + {53, mips.REG_F25, -1, "F25"}, + {54, mips.REG_F26, -1, "F26"}, + {55, mips.REG_F27, -1, "F27"}, + {56, mips.REG_F28, -1, "F28"}, + {57, mips.REG_F29, -1, "F29"}, + {58, mips.REG_F30, -1, "F30"}, + {59, mips.REG_F31, -1, "F31"}, + {60, mips.REG_HI, -1, "HI"}, + {61, mips.REG_LO, -1, "LO"}, + {62, 0, -1, "SB"}, +} +var paramIntRegMIPS64 = []int8(nil) +var paramFloatRegMIPS64 = []int8(nil) +var gpRegMaskMIPS64 = regMask(167772158) +var fpRegMaskMIPS64 = regMask(1152921504338411520) +var specialRegMaskMIPS64 = regMask(3458764513820540928) +var framepointerRegMIPS64 = int8(-1) +var linkRegMIPS64 = int8(27) +var registersPPC64 = [...]Register{ + {0, ppc64.REG_R0, -1, "R0"}, + {1, ppc64.REGSP, -1, "SP"}, + {2, 0, -1, "SB"}, + {3, ppc64.REG_R3, 0, "R3"}, + {4, ppc64.REG_R4, 1, "R4"}, + {5, ppc64.REG_R5, 2, "R5"}, + {6, ppc64.REG_R6, 3, "R6"}, + {7, ppc64.REG_R7, 4, "R7"}, + {8, ppc64.REG_R8, 5, "R8"}, + {9, ppc64.REG_R9, 6, "R9"}, + {10, ppc64.REG_R10, 7, "R10"}, + {11, ppc64.REG_R11, 8, "R11"}, + {12, ppc64.REG_R12, 9, "R12"}, + {13, ppc64.REG_R13, -1, "R13"}, + {14, ppc64.REG_R14, 10, "R14"}, + {15, ppc64.REG_R15, 11, "R15"}, + {16, ppc64.REG_R16, 12, "R16"}, + {17, ppc64.REG_R17, 13, "R17"}, + {18, ppc64.REG_R18, 14, "R18"}, + {19, ppc64.REG_R19, 15, "R19"}, + {20, ppc64.REG_R20, 16, "R20"}, + {21, ppc64.REG_R21, 17, "R21"}, + {22, ppc64.REG_R22, 18, "R22"}, + {23, ppc64.REG_R23, 19, "R23"}, + {24, ppc64.REG_R24, 20, "R24"}, + {25, ppc64.REG_R25, 21, "R25"}, + {26, ppc64.REG_R26, 22, "R26"}, + {27, ppc64.REG_R27, 23, "R27"}, + {28, ppc64.REG_R28, 24, "R28"}, + {29, ppc64.REG_R29, 25, "R29"}, + {30, ppc64.REGG, -1, "g"}, + {31, ppc64.REG_R31, -1, "R31"}, + {32, ppc64.REG_F0, -1, "F0"}, + {33, ppc64.REG_F1, -1, "F1"}, + {34, ppc64.REG_F2, -1, "F2"}, + {35, ppc64.REG_F3, -1, "F3"}, + {36, ppc64.REG_F4, -1, "F4"}, + {37, ppc64.REG_F5, -1, "F5"}, + {38, ppc64.REG_F6, -1, "F6"}, + {39, ppc64.REG_F7, -1, "F7"}, + {40, ppc64.REG_F8, -1, "F8"}, + {41, ppc64.REG_F9, -1, "F9"}, + {42, ppc64.REG_F10, -1, "F10"}, + {43, ppc64.REG_F11, -1, "F11"}, + {44, ppc64.REG_F12, -1, "F12"}, + {45, ppc64.REG_F13, -1, "F13"}, + {46, ppc64.REG_F14, -1, "F14"}, + {47, ppc64.REG_F15, -1, "F15"}, + {48, ppc64.REG_F16, -1, "F16"}, + {49, ppc64.REG_F17, -1, "F17"}, + {50, ppc64.REG_F18, -1, "F18"}, + {51, ppc64.REG_F19, -1, "F19"}, + {52, ppc64.REG_F20, -1, "F20"}, + {53, ppc64.REG_F21, -1, "F21"}, + {54, ppc64.REG_F22, -1, "F22"}, + {55, ppc64.REG_F23, -1, "F23"}, + {56, ppc64.REG_F24, -1, "F24"}, + {57, ppc64.REG_F25, -1, "F25"}, + {58, ppc64.REG_F26, -1, "F26"}, + {59, ppc64.REG_F27, -1, "F27"}, + {60, ppc64.REG_F28, -1, "F28"}, + {61, ppc64.REG_F29, -1, "F29"}, + {62, ppc64.REG_F30, -1, "F30"}, + {63, ppc64.REG_XER, -1, "XER"}, +} +var paramIntRegPPC64 = []int8{3, 4, 5, 6, 7, 8, 9, 10, 14, 15, 16, 17} +var paramFloatRegPPC64 = []int8{33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44} +var gpRegMaskPPC64 = regMask(1073733624) +var fpRegMaskPPC64 = regMask(9223372032559808512) +var specialRegMaskPPC64 = regMask(9223372036854775808) +var framepointerRegPPC64 = int8(-1) +var linkRegPPC64 = int8(-1) +var registersRISCV64 = [...]Register{ + {0, riscv.REG_X0, -1, "X0"}, + {1, riscv.REGSP, -1, "SP"}, + {2, riscv.REG_X3, -1, "X3"}, + {3, riscv.REG_X4, -1, "X4"}, + {4, riscv.REG_X5, 0, "X5"}, + {5, riscv.REG_X6, 1, "X6"}, + {6, riscv.REG_X7, 2, "X7"}, + {7, riscv.REG_X8, 3, "X8"}, + {8, riscv.REG_X9, 4, "X9"}, + {9, riscv.REG_X10, 5, "X10"}, + {10, riscv.REG_X11, 6, "X11"}, + {11, riscv.REG_X12, 7, "X12"}, + {12, riscv.REG_X13, 8, "X13"}, + {13, riscv.REG_X14, 9, "X14"}, + {14, riscv.REG_X15, 10, "X15"}, + {15, riscv.REG_X16, 11, "X16"}, + {16, riscv.REG_X17, 12, "X17"}, + {17, riscv.REG_X18, 13, "X18"}, + {18, riscv.REG_X19, 14, "X19"}, + {19, riscv.REG_X20, 15, "X20"}, + {20, riscv.REG_X21, 16, "X21"}, + {21, riscv.REG_X22, 17, "X22"}, + {22, riscv.REG_X23, 18, "X23"}, + {23, riscv.REG_X24, 19, "X24"}, + {24, riscv.REG_X25, 20, "X25"}, + {25, riscv.REG_X26, 21, "X26"}, + {26, riscv.REGG, -1, "g"}, + {27, riscv.REG_X28, 22, "X28"}, + {28, riscv.REG_X29, 23, "X29"}, + {29, riscv.REG_X30, 24, "X30"}, + {30, riscv.REG_X31, -1, "X31"}, + {31, riscv.REG_F0, -1, "F0"}, + {32, riscv.REG_F1, -1, "F1"}, + {33, riscv.REG_F2, -1, "F2"}, + {34, riscv.REG_F3, -1, "F3"}, + {35, riscv.REG_F4, -1, "F4"}, + {36, riscv.REG_F5, -1, "F5"}, + {37, riscv.REG_F6, -1, "F6"}, + {38, riscv.REG_F7, -1, "F7"}, + {39, riscv.REG_F8, -1, "F8"}, + {40, riscv.REG_F9, -1, "F9"}, + {41, riscv.REG_F10, -1, "F10"}, + {42, riscv.REG_F11, -1, "F11"}, + {43, riscv.REG_F12, -1, "F12"}, + {44, riscv.REG_F13, -1, "F13"}, + {45, riscv.REG_F14, -1, "F14"}, + {46, riscv.REG_F15, -1, "F15"}, + {47, riscv.REG_F16, -1, "F16"}, + {48, riscv.REG_F17, -1, "F17"}, + {49, riscv.REG_F18, -1, "F18"}, + {50, riscv.REG_F19, -1, "F19"}, + {51, riscv.REG_F20, -1, "F20"}, + {52, riscv.REG_F21, -1, "F21"}, + {53, riscv.REG_F22, -1, "F22"}, + {54, riscv.REG_F23, -1, "F23"}, + {55, riscv.REG_F24, -1, "F24"}, + {56, riscv.REG_F25, -1, "F25"}, + {57, riscv.REG_F26, -1, "F26"}, + {58, riscv.REG_F27, -1, "F27"}, + {59, riscv.REG_F28, -1, "F28"}, + {60, riscv.REG_F29, -1, "F29"}, + {61, riscv.REG_F30, -1, "F30"}, + {62, riscv.REG_F31, -1, "F31"}, + {63, 0, -1, "SB"}, +} +var paramIntRegRISCV64 = []int8{9, 10, 11, 12, 13, 14, 15, 16, 7, 8, 17, 18, 19, 20, 21, 22} +var paramFloatRegRISCV64 = []int8{41, 42, 43, 44, 45, 46, 47, 48, 39, 40, 49, 50, 51, 52, 53, 54} +var gpRegMaskRISCV64 = regMask(1006632944) +var fpRegMaskRISCV64 = regMask(9223372034707292160) +var specialRegMaskRISCV64 = regMask(0) +var framepointerRegRISCV64 = int8(-1) +var linkRegRISCV64 = int8(0) +var registersS390X = [...]Register{ + {0, s390x.REG_R0, 0, "R0"}, + {1, s390x.REG_R1, 1, "R1"}, + {2, s390x.REG_R2, 2, "R2"}, + {3, s390x.REG_R3, 3, "R3"}, + {4, s390x.REG_R4, 4, "R4"}, + {5, s390x.REG_R5, 5, "R5"}, + {6, s390x.REG_R6, 6, "R6"}, + {7, s390x.REG_R7, 7, "R7"}, + {8, s390x.REG_R8, 8, "R8"}, + {9, s390x.REG_R9, 9, "R9"}, + {10, s390x.REG_R10, -1, "R10"}, + {11, s390x.REG_R11, 10, "R11"}, + {12, s390x.REG_R12, 11, "R12"}, + {13, s390x.REGG, -1, "g"}, + {14, s390x.REG_R14, 12, "R14"}, + {15, s390x.REGSP, -1, "SP"}, + {16, s390x.REG_F0, -1, "F0"}, + {17, s390x.REG_F1, -1, "F1"}, + {18, s390x.REG_F2, -1, "F2"}, + {19, s390x.REG_F3, -1, "F3"}, + {20, s390x.REG_F4, -1, "F4"}, + {21, s390x.REG_F5, -1, "F5"}, + {22, s390x.REG_F6, -1, "F6"}, + {23, s390x.REG_F7, -1, "F7"}, + {24, s390x.REG_F8, -1, "F8"}, + {25, s390x.REG_F9, -1, "F9"}, + {26, s390x.REG_F10, -1, "F10"}, + {27, s390x.REG_F11, -1, "F11"}, + {28, s390x.REG_F12, -1, "F12"}, + {29, s390x.REG_F13, -1, "F13"}, + {30, s390x.REG_F14, -1, "F14"}, + {31, s390x.REG_F15, -1, "F15"}, + {32, 0, -1, "SB"}, +} +var paramIntRegS390X = []int8(nil) +var paramFloatRegS390X = []int8(nil) +var gpRegMaskS390X = regMask(23551) +var fpRegMaskS390X = regMask(4294901760) +var specialRegMaskS390X = regMask(0) +var framepointerRegS390X = int8(-1) +var linkRegS390X = int8(14) +var registersWasm = [...]Register{ + {0, wasm.REG_R0, 0, "R0"}, + {1, wasm.REG_R1, 1, "R1"}, + {2, wasm.REG_R2, 2, "R2"}, + {3, wasm.REG_R3, 3, "R3"}, + {4, wasm.REG_R4, 4, "R4"}, + {5, wasm.REG_R5, 5, "R5"}, + {6, wasm.REG_R6, 6, "R6"}, + {7, wasm.REG_R7, 7, "R7"}, + {8, wasm.REG_R8, 8, "R8"}, + {9, wasm.REG_R9, 9, "R9"}, + {10, wasm.REG_R10, 10, "R10"}, + {11, wasm.REG_R11, 11, "R11"}, + {12, wasm.REG_R12, 12, "R12"}, + {13, wasm.REG_R13, 13, "R13"}, + {14, wasm.REG_R14, 14, "R14"}, + {15, wasm.REG_R15, 15, "R15"}, + {16, wasm.REG_F0, -1, "F0"}, + {17, wasm.REG_F1, -1, "F1"}, + {18, wasm.REG_F2, -1, "F2"}, + {19, wasm.REG_F3, -1, "F3"}, + {20, wasm.REG_F4, -1, "F4"}, + {21, wasm.REG_F5, -1, "F5"}, + {22, wasm.REG_F6, -1, "F6"}, + {23, wasm.REG_F7, -1, "F7"}, + {24, wasm.REG_F8, -1, "F8"}, + {25, wasm.REG_F9, -1, "F9"}, + {26, wasm.REG_F10, -1, "F10"}, + {27, wasm.REG_F11, -1, "F11"}, + {28, wasm.REG_F12, -1, "F12"}, + {29, wasm.REG_F13, -1, "F13"}, + {30, wasm.REG_F14, -1, "F14"}, + {31, wasm.REG_F15, -1, "F15"}, + {32, wasm.REG_F16, -1, "F16"}, + {33, wasm.REG_F17, -1, "F17"}, + {34, wasm.REG_F18, -1, "F18"}, + {35, wasm.REG_F19, -1, "F19"}, + {36, wasm.REG_F20, -1, "F20"}, + {37, wasm.REG_F21, -1, "F21"}, + {38, wasm.REG_F22, -1, "F22"}, + {39, wasm.REG_F23, -1, "F23"}, + {40, wasm.REG_F24, -1, "F24"}, + {41, wasm.REG_F25, -1, "F25"}, + {42, wasm.REG_F26, -1, "F26"}, + {43, wasm.REG_F27, -1, "F27"}, + {44, wasm.REG_F28, -1, "F28"}, + {45, wasm.REG_F29, -1, "F29"}, + {46, wasm.REG_F30, -1, "F30"}, + {47, wasm.REG_F31, -1, "F31"}, + {48, wasm.REGSP, -1, "SP"}, + {49, wasm.REGG, -1, "g"}, + {50, 0, -1, "SB"}, +} +var paramIntRegWasm = []int8(nil) +var paramFloatRegWasm = []int8(nil) +var gpRegMaskWasm = regMask(65535) +var fpRegMaskWasm = regMask(281474976645120) +var fp32RegMaskWasm = regMask(4294901760) +var fp64RegMaskWasm = regMask(281470681743360) +var specialRegMaskWasm = regMask(0) +var framepointerRegWasm = int8(-1) +var linkRegWasm = int8(-1) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/opt.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/opt.go new file mode 100644 index 0000000000000000000000000000000000000000..0f15c3db4a7ddb4124df2b40ad7f5ef176a13505 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/opt.go @@ -0,0 +1,10 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// machine-independent optimization. +func opt(f *Func) { + applyRewrite(f, rewriteBlockgeneric, rewriteValuegeneric, removeDeadValues) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/passbm_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/passbm_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3fd3eb579bc0954fd1beeacecadd8b55c3e686b7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/passbm_test.go @@ -0,0 +1,101 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/types" + "fmt" + "testing" +) + +const ( + blockCount = 1000 + passCount = 15000 +) + +type passFunc func(*Func) + +func BenchmarkDSEPass(b *testing.B) { benchFnPass(b, dse, blockCount, genFunction) } +func BenchmarkDSEPassBlock(b *testing.B) { benchFnBlock(b, dse, genFunction) } +func BenchmarkCSEPass(b *testing.B) { benchFnPass(b, cse, blockCount, genFunction) } +func BenchmarkCSEPassBlock(b *testing.B) { benchFnBlock(b, cse, genFunction) } +func BenchmarkDeadcodePass(b *testing.B) { benchFnPass(b, deadcode, blockCount, genFunction) } +func BenchmarkDeadcodePassBlock(b *testing.B) { benchFnBlock(b, deadcode, genFunction) } + +func multi(f *Func) { + cse(f) + dse(f) + deadcode(f) +} +func BenchmarkMultiPass(b *testing.B) { benchFnPass(b, multi, blockCount, genFunction) } +func BenchmarkMultiPassBlock(b *testing.B) { benchFnBlock(b, multi, genFunction) } + +// benchFnPass runs passFunc b.N times across a single function. +func benchFnPass(b *testing.B, fn passFunc, size int, bg blockGen) { + b.ReportAllocs() + c := testConfig(b) + fun := c.Fun("entry", bg(size)...) + CheckFunc(fun.f) + b.ResetTimer() + for i := 0; i < b.N; i++ { + fn(fun.f) + b.StopTimer() + CheckFunc(fun.f) + b.StartTimer() + } +} + +// benchFnPass runs passFunc across a function with b.N blocks. +func benchFnBlock(b *testing.B, fn passFunc, bg blockGen) { + b.ReportAllocs() + c := testConfig(b) + fun := c.Fun("entry", bg(b.N)...) + CheckFunc(fun.f) + b.ResetTimer() + for i := 0; i < passCount; i++ { + fn(fun.f) + } + b.StopTimer() +} + +func genFunction(size int) []bloc { + var blocs []bloc + elemType := types.Types[types.TINT64] + ptrType := elemType.PtrTo() + + valn := func(s string, m, n int) string { return fmt.Sprintf("%s%d-%d", s, m, n) } + blocs = append(blocs, + Bloc("entry", + Valu(valn("store", 0, 4), OpInitMem, types.TypeMem, 0, nil), + Valu("sb", OpSB, types.Types[types.TUINTPTR], 0, nil), + Goto(blockn(1)), + ), + ) + for i := 1; i < size+1; i++ { + blocs = append(blocs, Bloc(blockn(i), + Valu(valn("v", i, 0), OpConstBool, types.Types[types.TBOOL], 1, nil), + Valu(valn("addr", i, 1), OpAddr, ptrType, 0, nil, "sb"), + Valu(valn("addr", i, 2), OpAddr, ptrType, 0, nil, "sb"), + Valu(valn("addr", i, 3), OpAddr, ptrType, 0, nil, "sb"), + Valu(valn("zero", i, 1), OpZero, types.TypeMem, 8, elemType, valn("addr", i, 3), + valn("store", i-1, 4)), + Valu(valn("store", i, 1), OpStore, types.TypeMem, 0, elemType, valn("addr", i, 1), + valn("v", i, 0), valn("zero", i, 1)), + Valu(valn("store", i, 2), OpStore, types.TypeMem, 0, elemType, valn("addr", i, 2), + valn("v", i, 0), valn("store", i, 1)), + Valu(valn("store", i, 3), OpStore, types.TypeMem, 0, elemType, valn("addr", i, 1), + valn("v", i, 0), valn("store", i, 2)), + Valu(valn("store", i, 4), OpStore, types.TypeMem, 0, elemType, valn("addr", i, 3), + valn("v", i, 0), valn("store", i, 3)), + Goto(blockn(i+1)))) + } + + blocs = append(blocs, + Bloc(blockn(size+1), Goto("exit")), + Bloc("exit", Exit("store0-4")), + ) + + return blocs +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/phielim.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/phielim.go new file mode 100644 index 0000000000000000000000000000000000000000..4fc942375fdef36f75d2c6b07e1035142adbb306 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/phielim.go @@ -0,0 +1,75 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// phielim eliminates redundant phi values from f. +// A phi is redundant if its arguments are all equal. For +// purposes of counting, ignore the phi itself. Both of +// these phis are redundant: +// +// v = phi(x,x,x) +// v = phi(x,v,x,v) +// +// We repeat this process to also catch situations like: +// +// v = phi(x, phi(x, x), phi(x, v)) +// +// TODO: Can we also simplify cases like: +// +// v = phi(v, w, x) +// w = phi(v, w, x) +// +// and would that be useful? +func phielim(f *Func) { + for { + change := false + for _, b := range f.Blocks { + for _, v := range b.Values { + copyelimValue(v) + change = phielimValue(v) || change + } + } + if !change { + break + } + } +} + +// phielimValue tries to convert the phi v to a copy. +func phielimValue(v *Value) bool { + if v.Op != OpPhi { + return false + } + + // If there are two distinct args of v which + // are not v itself, then the phi must remain. + // Otherwise, we can replace it with a copy. + var w *Value + for _, x := range v.Args { + if x == v { + continue + } + if x == w { + continue + } + if w != nil { + return false + } + w = x + } + + if w == nil { + // v references only itself. It must be in + // a dead code loop. Don't bother modifying it. + return false + } + v.Op = OpCopy + v.SetArgs1(w) + f := v.Block.Func + if f.pass.debug > 0 { + f.Warnl(v.Pos, "eliminated phi") + } + return true +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/phiopt.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/phiopt.go new file mode 100644 index 0000000000000000000000000000000000000000..037845eacf2db6afeb1fe683ce253f2ee4526e0a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/phiopt.go @@ -0,0 +1,325 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// phiopt eliminates boolean Phis based on the previous if. +// +// Main use case is to transform: +// +// x := false +// if b { +// x = true +// } +// +// into x = b. +// +// In SSA code this appears as +// +// b0 +// If b -> b1 b2 +// b1 +// Plain -> b2 +// b2 +// x = (OpPhi (ConstBool [true]) (ConstBool [false])) +// +// In this case we can replace x with a copy of b. +func phiopt(f *Func) { + sdom := f.Sdom() + for _, b := range f.Blocks { + if len(b.Preds) != 2 || len(b.Values) == 0 { + // TODO: handle more than 2 predecessors, e.g. a || b || c. + continue + } + + pb0, b0 := b, b.Preds[0].b + for len(b0.Succs) == 1 && len(b0.Preds) == 1 { + pb0, b0 = b0, b0.Preds[0].b + } + if b0.Kind != BlockIf { + continue + } + pb1, b1 := b, b.Preds[1].b + for len(b1.Succs) == 1 && len(b1.Preds) == 1 { + pb1, b1 = b1, b1.Preds[0].b + } + if b1 != b0 { + continue + } + // b0 is the if block giving the boolean value. + // reverse is the predecessor from which the truth value comes. + var reverse int + if b0.Succs[0].b == pb0 && b0.Succs[1].b == pb1 { + reverse = 0 + } else if b0.Succs[0].b == pb1 && b0.Succs[1].b == pb0 { + reverse = 1 + } else { + b.Fatalf("invalid predecessors\n") + } + + for _, v := range b.Values { + if v.Op != OpPhi { + continue + } + + // Look for conversions from bool to 0/1. + if v.Type.IsInteger() { + phioptint(v, b0, reverse) + } + + if !v.Type.IsBoolean() { + continue + } + + // Replaces + // if a { x = true } else { x = false } with x = a + // and + // if a { x = false } else { x = true } with x = !a + if v.Args[0].Op == OpConstBool && v.Args[1].Op == OpConstBool { + if v.Args[reverse].AuxInt != v.Args[1-reverse].AuxInt { + ops := [2]Op{OpNot, OpCopy} + v.reset(ops[v.Args[reverse].AuxInt]) + v.AddArg(b0.Controls[0]) + if f.pass.debug > 0 { + f.Warnl(b.Pos, "converted OpPhi to %v", v.Op) + } + continue + } + } + + // Replaces + // if a { x = true } else { x = value } with x = a || value. + // Requires that value dominates x, meaning that regardless of a, + // value is always computed. This guarantees that the side effects + // of value are not seen if a is false. + if v.Args[reverse].Op == OpConstBool && v.Args[reverse].AuxInt == 1 { + if tmp := v.Args[1-reverse]; sdom.IsAncestorEq(tmp.Block, b) { + v.reset(OpOrB) + v.SetArgs2(b0.Controls[0], tmp) + if f.pass.debug > 0 { + f.Warnl(b.Pos, "converted OpPhi to %v", v.Op) + } + continue + } + } + + // Replaces + // if a { x = value } else { x = false } with x = a && value. + // Requires that value dominates x, meaning that regardless of a, + // value is always computed. This guarantees that the side effects + // of value are not seen if a is false. + if v.Args[1-reverse].Op == OpConstBool && v.Args[1-reverse].AuxInt == 0 { + if tmp := v.Args[reverse]; sdom.IsAncestorEq(tmp.Block, b) { + v.reset(OpAndB) + v.SetArgs2(b0.Controls[0], tmp) + if f.pass.debug > 0 { + f.Warnl(b.Pos, "converted OpPhi to %v", v.Op) + } + continue + } + } + } + } + // strengthen phi optimization. + // Main use case is to transform: + // x := false + // if c { + // x = true + // ... + // } + // into + // x := c + // if x { ... } + // + // For example, in SSA code a case appears as + // b0 + // If c -> b, sb0 + // sb0 + // If d -> sd0, sd1 + // sd1 + // ... + // sd0 + // Plain -> b + // b + // x = (OpPhi (ConstBool [true]) (ConstBool [false])) + // + // In this case we can also replace x with a copy of c. + // + // The optimization idea: + // 1. block b has a phi value x, x = OpPhi (ConstBool [true]) (ConstBool [false]), + // and len(b.Preds) is equal to 2. + // 2. find the common dominator(b0) of the predecessors(pb0, pb1) of block b, and the + // dominator(b0) is a If block. + // Special case: one of the predecessors(pb0 or pb1) is the dominator(b0). + // 3. the successors(sb0, sb1) of the dominator need to dominate the predecessors(pb0, pb1) + // of block b respectively. + // 4. replace this boolean Phi based on dominator block. + // + // b0(pb0) b0(pb1) b0 + // | \ / | / \ + // | sb1 sb0 | sb0 sb1 + // | ... ... | ... ... + // | pb1 pb0 | pb0 pb1 + // | / \ | \ / + // b b b + // + var lca *lcaRange + for _, b := range f.Blocks { + if len(b.Preds) != 2 || len(b.Values) == 0 { + // TODO: handle more than 2 predecessors, e.g. a || b || c. + continue + } + + for _, v := range b.Values { + // find a phi value v = OpPhi (ConstBool [true]) (ConstBool [false]). + // TODO: v = OpPhi (ConstBool [true]) (Arg {value}) + if v.Op != OpPhi { + continue + } + if v.Args[0].Op != OpConstBool || v.Args[1].Op != OpConstBool { + continue + } + if v.Args[0].AuxInt == v.Args[1].AuxInt { + continue + } + + pb0 := b.Preds[0].b + pb1 := b.Preds[1].b + if pb0.Kind == BlockIf && pb0 == sdom.Parent(b) { + // special case: pb0 is the dominator block b0. + // b0(pb0) + // | \ + // | sb1 + // | ... + // | pb1 + // | / + // b + // if another successor sb1 of b0(pb0) dominates pb1, do replace. + ei := b.Preds[0].i + sb1 := pb0.Succs[1-ei].b + if sdom.IsAncestorEq(sb1, pb1) { + convertPhi(pb0, v, ei) + break + } + } else if pb1.Kind == BlockIf && pb1 == sdom.Parent(b) { + // special case: pb1 is the dominator block b0. + // b0(pb1) + // / | + // sb0 | + // ... | + // pb0 | + // \ | + // b + // if another successor sb0 of b0(pb0) dominates pb0, do replace. + ei := b.Preds[1].i + sb0 := pb1.Succs[1-ei].b + if sdom.IsAncestorEq(sb0, pb0) { + convertPhi(pb1, v, 1-ei) + break + } + } else { + // b0 + // / \ + // sb0 sb1 + // ... ... + // pb0 pb1 + // \ / + // b + // + // Build data structure for fast least-common-ancestor queries. + if lca == nil { + lca = makeLCArange(f) + } + b0 := lca.find(pb0, pb1) + if b0.Kind != BlockIf { + break + } + sb0 := b0.Succs[0].b + sb1 := b0.Succs[1].b + var reverse int + if sdom.IsAncestorEq(sb0, pb0) && sdom.IsAncestorEq(sb1, pb1) { + reverse = 0 + } else if sdom.IsAncestorEq(sb1, pb0) && sdom.IsAncestorEq(sb0, pb1) { + reverse = 1 + } else { + break + } + if len(sb0.Preds) != 1 || len(sb1.Preds) != 1 { + // we can not replace phi value x in the following case. + // if gp == nil || sp < lo { x = true} + // if a || b { x = true } + // so the if statement can only have one condition. + break + } + convertPhi(b0, v, reverse) + } + } + } +} + +func phioptint(v *Value, b0 *Block, reverse int) { + a0 := v.Args[0] + a1 := v.Args[1] + if a0.Op != a1.Op { + return + } + + switch a0.Op { + case OpConst8, OpConst16, OpConst32, OpConst64: + default: + return + } + + negate := false + switch { + case a0.AuxInt == 0 && a1.AuxInt == 1: + negate = true + case a0.AuxInt == 1 && a1.AuxInt == 0: + default: + return + } + + if reverse == 1 { + negate = !negate + } + + a := b0.Controls[0] + if negate { + a = v.Block.NewValue1(v.Pos, OpNot, a.Type, a) + } + v.AddArg(a) + + cvt := v.Block.NewValue1(v.Pos, OpCvtBoolToUint8, v.Block.Func.Config.Types.UInt8, a) + switch v.Type.Size() { + case 1: + v.reset(OpCopy) + case 2: + v.reset(OpZeroExt8to16) + case 4: + v.reset(OpZeroExt8to32) + case 8: + v.reset(OpZeroExt8to64) + default: + v.Fatalf("bad int size %d", v.Type.Size()) + } + v.AddArg(cvt) + + f := b0.Func + if f.pass.debug > 0 { + f.Warnl(v.Block.Pos, "converted OpPhi bool -> int%d", v.Type.Size()*8) + } +} + +// b is the If block giving the boolean value. +// v is the phi value v = (OpPhi (ConstBool [true]) (ConstBool [false])). +// reverse is the predecessor from which the truth value comes. +func convertPhi(b *Block, v *Value, reverse int) { + f := b.Func + ops := [2]Op{OpNot, OpCopy} + v.reset(ops[v.Args[reverse].AuxInt]) + v.AddArg(b.Controls[0]) + if f.pass.debug > 0 { + f.Warnl(b.Pos, "converted OpPhi to %v", v.Op) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/poset.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/poset.go new file mode 100644 index 0000000000000000000000000000000000000000..7b64843fe964d2fe38921e101b41aa49c69c68c9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/poset.go @@ -0,0 +1,1358 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "fmt" + "os" +) + +// If true, check poset integrity after every mutation +var debugPoset = false + +const uintSize = 32 << (^uint(0) >> 63) // 32 or 64 + +// bitset is a bit array for dense indexes. +type bitset []uint + +func newBitset(n int) bitset { + return make(bitset, (n+uintSize-1)/uintSize) +} + +func (bs bitset) Reset() { + for i := range bs { + bs[i] = 0 + } +} + +func (bs bitset) Set(idx uint32) { + bs[idx/uintSize] |= 1 << (idx % uintSize) +} + +func (bs bitset) Clear(idx uint32) { + bs[idx/uintSize] &^= 1 << (idx % uintSize) +} + +func (bs bitset) Test(idx uint32) bool { + return bs[idx/uintSize]&(1<<(idx%uintSize)) != 0 +} + +type undoType uint8 + +const ( + undoInvalid undoType = iota + undoCheckpoint // a checkpoint to group undo passes + undoSetChl // change back left child of undo.idx to undo.edge + undoSetChr // change back right child of undo.idx to undo.edge + undoNonEqual // forget that SSA value undo.ID is non-equal to undo.idx (another ID) + undoNewNode // remove new node created for SSA value undo.ID + undoNewConstant // remove the constant node idx from the constants map + undoAliasNode // unalias SSA value undo.ID so that it points back to node index undo.idx + undoNewRoot // remove node undo.idx from root list + undoChangeRoot // remove node undo.idx from root list, and put back undo.edge.Target instead + undoMergeRoot // remove node undo.idx from root list, and put back its children instead +) + +// posetUndo represents an undo pass to be performed. +// It's a union of fields that can be used to store information, +// and typ is the discriminant, that specifies which kind +// of operation must be performed. Not all fields are always used. +type posetUndo struct { + typ undoType + idx uint32 + ID ID + edge posetEdge +} + +const ( + // Make poset handle constants as unsigned numbers. + posetFlagUnsigned = 1 << iota +) + +// A poset edge. The zero value is the null/empty edge. +// Packs target node index (31 bits) and strict flag (1 bit). +type posetEdge uint32 + +func newedge(t uint32, strict bool) posetEdge { + s := uint32(0) + if strict { + s = 1 + } + return posetEdge(t<<1 | s) +} +func (e posetEdge) Target() uint32 { return uint32(e) >> 1 } +func (e posetEdge) Strict() bool { return uint32(e)&1 != 0 } +func (e posetEdge) String() string { + s := fmt.Sprint(e.Target()) + if e.Strict() { + s += "*" + } + return s +} + +// posetNode is a node of a DAG within the poset. +type posetNode struct { + l, r posetEdge +} + +// poset is a union-find data structure that can represent a partially ordered set +// of SSA values. Given a binary relation that creates a partial order (eg: '<'), +// clients can record relations between SSA values using SetOrder, and later +// check relations (in the transitive closure) with Ordered. For instance, +// if SetOrder is called to record that A lower) { + lower = val2 + lowerptr = ptr + } else if val2 > val1 && (higherptr == 0 || val2 < higher) { + higher = val2 + higherptr = ptr + } + } + } else { + var lower, higher int64 + val1 := n.AuxInt + for val2, ptr := range po.constants { + if val1 == val2 { + panic("unreachable") + } + if val2 < val1 && (lowerptr == 0 || val2 > lower) { + lower = val2 + lowerptr = ptr + } else if val2 > val1 && (higherptr == 0 || val2 < higher) { + higher = val2 + higherptr = ptr + } + } + } + + if lowerptr == 0 && higherptr == 0 { + // This should not happen, as at least one + // other constant must exist if we get here. + panic("no constant found") + } + + // Create the new node and connect it to the bounds, so that + // lower < n < higher. We could have found both bounds or only one + // of them, depending on what other constants are present in the poset. + // Notice that we always link constants together, so they + // are always part of the same DAG. + switch { + case lowerptr != 0 && higherptr != 0: + // Both bounds are present, record lower < n < higher. + po.addchild(lowerptr, i, true) + po.addchild(i, higherptr, true) + + case lowerptr != 0: + // Lower bound only, record lower < n. + po.addchild(lowerptr, i, true) + + case higherptr != 0: + // Higher bound only. To record n < higher, we need + // an extra root: + // + // extra + // / \ + // root \ + // / n + // .... / + // \ / + // higher + // + i2 := higherptr + r2 := po.findroot(i2) + if r2 != po.roots[0] { // all constants should be in root #0 + panic("constant not in root #0") + } + extra := po.newnode(nil) + po.changeroot(r2, extra) + po.upush(undoChangeRoot, extra, newedge(r2, false)) + po.addchild(extra, r2, false) + po.addchild(extra, i, false) + po.addchild(i, i2, true) + } + + po.constants[val] = i + po.upushconst(i, 0) +} + +// aliasnewnode records that a single node n2 (not in the poset yet) is an alias +// of the master node n1. +func (po *poset) aliasnewnode(n1, n2 *Value) { + i1, i2 := po.values[n1.ID], po.values[n2.ID] + if i1 == 0 || i2 != 0 { + panic("aliasnewnode invalid arguments") + } + + po.values[n2.ID] = i1 + po.upushalias(n2.ID, 0) +} + +// aliasnodes records that all the nodes i2s are aliases of a single master node n1. +// aliasnodes takes care of rearranging the DAG, changing references of parent/children +// of nodes in i2s, so that they point to n1 instead. +// Complexity is O(n) (with n being the total number of nodes in the poset, not just +// the number of nodes being aliased). +func (po *poset) aliasnodes(n1 *Value, i2s bitset) { + i1 := po.values[n1.ID] + if i1 == 0 { + panic("aliasnode for non-existing node") + } + if i2s.Test(i1) { + panic("aliasnode i2s contains n1 node") + } + + // Go through all the nodes to adjust parent/chidlren of nodes in i2s + for idx, n := range po.nodes { + // Do not touch i1 itself, otherwise we can create useless self-loops + if uint32(idx) == i1 { + continue + } + l, r := n.l, n.r + + // Rename all references to i2s into i1 + if i2s.Test(l.Target()) { + po.setchl(uint32(idx), newedge(i1, l.Strict())) + po.upush(undoSetChl, uint32(idx), l) + } + if i2s.Test(r.Target()) { + po.setchr(uint32(idx), newedge(i1, r.Strict())) + po.upush(undoSetChr, uint32(idx), r) + } + + // Connect all children of i2s to i1 (unless those children + // are in i2s as well, in which case it would be useless) + if i2s.Test(uint32(idx)) { + if l != 0 && !i2s.Test(l.Target()) { + po.addchild(i1, l.Target(), l.Strict()) + } + if r != 0 && !i2s.Test(r.Target()) { + po.addchild(i1, r.Target(), r.Strict()) + } + po.setchl(uint32(idx), 0) + po.setchr(uint32(idx), 0) + po.upush(undoSetChl, uint32(idx), l) + po.upush(undoSetChr, uint32(idx), r) + } + } + + // Reassign all existing IDs that point to i2 to i1. + // This includes n2.ID. + for k, v := range po.values { + if i2s.Test(v) { + po.values[k] = i1 + po.upushalias(k, v) + } + } + + // If one of the aliased nodes is a constant, then make sure + // po.constants is updated to point to the master node. + for val, idx := range po.constants { + if i2s.Test(idx) { + po.constants[val] = i1 + po.upushconst(i1, idx) + } + } +} + +func (po *poset) isroot(r uint32) bool { + for i := range po.roots { + if po.roots[i] == r { + return true + } + } + return false +} + +func (po *poset) changeroot(oldr, newr uint32) { + for i := range po.roots { + if po.roots[i] == oldr { + po.roots[i] = newr + return + } + } + panic("changeroot on non-root") +} + +func (po *poset) removeroot(r uint32) { + for i := range po.roots { + if po.roots[i] == r { + po.roots = append(po.roots[:i], po.roots[i+1:]...) + return + } + } + panic("removeroot on non-root") +} + +// dfs performs a depth-first search within the DAG whose root is r. +// f is the visit function called for each node; if it returns true, +// the search is aborted and true is returned. The root node is +// visited too. +// If strict, ignore edges across a path until at least one +// strict edge is found. For instance, for a chain A<=B<=C 0 { + i := open[len(open)-1] + open = open[:len(open)-1] + + // Don't visit the same node twice. Notice that all nodes + // across non-strict paths are still visited at least once, so + // a non-strict path can never obscure a strict path to the + // same node. + if !closed.Test(i) { + closed.Set(i) + + l, r := po.children(i) + if l != 0 { + if l.Strict() { + next = append(next, l.Target()) + } else { + open = append(open, l.Target()) + } + } + if r != 0 { + if r.Strict() { + next = append(next, r.Target()) + } else { + open = append(open, r.Target()) + } + } + } + } + open = next + closed.Reset() + } + + for len(open) > 0 { + i := open[len(open)-1] + open = open[:len(open)-1] + + if !closed.Test(i) { + if f(i) { + return true + } + closed.Set(i) + l, r := po.children(i) + if l != 0 { + open = append(open, l.Target()) + } + if r != 0 { + open = append(open, r.Target()) + } + } + } + return false +} + +// Returns true if there is a path from i1 to i2. +// If strict == true: if the function returns true, then i1 < i2. +// If strict == false: if the function returns true, then i1 <= i2. +// If the function returns false, no relation is known. +func (po *poset) reaches(i1, i2 uint32, strict bool) bool { + return po.dfs(i1, strict, func(n uint32) bool { + return n == i2 + }) +} + +// findroot finds i's root, that is which DAG contains i. +// Returns the root; if i is itself a root, it is returned. +// Panic if i is not in any DAG. +func (po *poset) findroot(i uint32) uint32 { + // TODO(rasky): if needed, a way to speed up this search is + // storing a bitset for each root using it as a mini bloom filter + // of nodes present under that root. + for _, r := range po.roots { + if po.reaches(r, i, false) { + return r + } + } + panic("findroot didn't find any root") +} + +// mergeroot merges two DAGs into one DAG by creating a new extra root +func (po *poset) mergeroot(r1, r2 uint32) uint32 { + // Root #0 is special as it contains all constants. Since mergeroot + // discards r2 as root and keeps r1, make sure that r2 is not root #0, + // otherwise constants would move to a different root. + if r2 == po.roots[0] { + r1, r2 = r2, r1 + } + r := po.newnode(nil) + po.setchl(r, newedge(r1, false)) + po.setchr(r, newedge(r2, false)) + po.changeroot(r1, r) + po.removeroot(r2) + po.upush(undoMergeRoot, r, 0) + return r +} + +// collapsepath marks n1 and n2 as equal and collapses as equal all +// nodes across all paths between n1 and n2. If a strict edge is +// found, the function does not modify the DAG and returns false. +// Complexity is O(n). +func (po *poset) collapsepath(n1, n2 *Value) bool { + i1, i2 := po.values[n1.ID], po.values[n2.ID] + if po.reaches(i1, i2, true) { + return false + } + + // Find all the paths from i1 to i2 + paths := po.findpaths(i1, i2) + // Mark all nodes in all the paths as aliases of n1 + // (excluding n1 itself) + paths.Clear(i1) + po.aliasnodes(n1, paths) + return true +} + +// findpaths is a recursive function that calculates all paths from cur to dst +// and return them as a bitset (the index of a node is set in the bitset if +// that node is on at least one path from cur to dst). +// We do a DFS from cur (stopping going deep any time we reach dst, if ever), +// and mark as part of the paths any node that has a children which is already +// part of the path (or is dst itself). +func (po *poset) findpaths(cur, dst uint32) bitset { + seen := newBitset(int(po.lastidx + 1)) + path := newBitset(int(po.lastidx + 1)) + path.Set(dst) + po.findpaths1(cur, dst, seen, path) + return path +} + +func (po *poset) findpaths1(cur, dst uint32, seen bitset, path bitset) { + if cur == dst { + return + } + seen.Set(cur) + l, r := po.chl(cur), po.chr(cur) + if !seen.Test(l) { + po.findpaths1(l, dst, seen, path) + } + if !seen.Test(r) { + po.findpaths1(r, dst, seen, path) + } + if path.Test(l) || path.Test(r) { + path.Set(cur) + } +} + +// Check whether it is recorded that i1!=i2 +func (po *poset) isnoneq(i1, i2 uint32) bool { + if i1 == i2 { + return false + } + if i1 < i2 { + i1, i2 = i2, i1 + } + + // Check if we recorded a non-equal relation before + if bs, ok := po.noneq[i1]; ok && bs.Test(i2) { + return true + } + return false +} + +// Record that i1!=i2 +func (po *poset) setnoneq(n1, n2 *Value) { + i1, f1 := po.lookup(n1) + i2, f2 := po.lookup(n2) + + // If any of the nodes do not exist in the poset, allocate them. Since + // we don't know any relation (in the partial order) about them, they must + // become independent roots. + if !f1 { + i1 = po.newnode(n1) + po.roots = append(po.roots, i1) + po.upush(undoNewRoot, i1, 0) + } + if !f2 { + i2 = po.newnode(n2) + po.roots = append(po.roots, i2) + po.upush(undoNewRoot, i2, 0) + } + + if i1 == i2 { + panic("setnoneq on same node") + } + if i1 < i2 { + i1, i2 = i2, i1 + } + bs := po.noneq[i1] + if bs == nil { + // Given that we record non-equality relations using the + // higher index as a key, the bitsize will never change size. + // TODO(rasky): if memory is a problem, consider allocating + // a small bitset and lazily grow it when higher indices arrive. + bs = newBitset(int(i1)) + po.noneq[i1] = bs + } else if bs.Test(i2) { + // Already recorded + return + } + bs.Set(i2) + po.upushneq(i1, i2) +} + +// CheckIntegrity verifies internal integrity of a poset. It is intended +// for debugging purposes. +func (po *poset) CheckIntegrity() { + // Record which index is a constant + constants := newBitset(int(po.lastidx + 1)) + for _, c := range po.constants { + constants.Set(c) + } + + // Verify that each node appears in a single DAG, and that + // all constants are within the first DAG + seen := newBitset(int(po.lastidx + 1)) + for ridx, r := range po.roots { + if r == 0 { + panic("empty root") + } + + po.dfs(r, false, func(i uint32) bool { + if seen.Test(i) { + panic("duplicate node") + } + seen.Set(i) + if constants.Test(i) { + if ridx != 0 { + panic("constants not in the first DAG") + } + } + return false + }) + } + + // Verify that values contain the minimum set + for id, idx := range po.values { + if !seen.Test(idx) { + panic(fmt.Errorf("spurious value [%d]=%d", id, idx)) + } + } + + // Verify that only existing nodes have non-zero children + for i, n := range po.nodes { + if n.l|n.r != 0 { + if !seen.Test(uint32(i)) { + panic(fmt.Errorf("children of unknown node %d->%v", i, n)) + } + if n.l.Target() == uint32(i) || n.r.Target() == uint32(i) { + panic(fmt.Errorf("self-loop on node %d", i)) + } + } + } +} + +// CheckEmpty checks that a poset is completely empty. +// It can be used for debugging purposes, as a poset is supposed to +// be empty after it's fully rolled back through Undo. +func (po *poset) CheckEmpty() error { + if len(po.nodes) != 1 { + return fmt.Errorf("non-empty nodes list: %v", po.nodes) + } + if len(po.values) != 0 { + return fmt.Errorf("non-empty value map: %v", po.values) + } + if len(po.roots) != 0 { + return fmt.Errorf("non-empty root list: %v", po.roots) + } + if len(po.constants) != 0 { + return fmt.Errorf("non-empty constants: %v", po.constants) + } + if len(po.undo) != 0 { + return fmt.Errorf("non-empty undo list: %v", po.undo) + } + if po.lastidx != 0 { + return fmt.Errorf("lastidx index is not zero: %v", po.lastidx) + } + for _, bs := range po.noneq { + for _, x := range bs { + if x != 0 { + return fmt.Errorf("non-empty noneq map") + } + } + } + return nil +} + +// DotDump dumps the poset in graphviz format to file fn, with the specified title. +func (po *poset) DotDump(fn string, title string) error { + f, err := os.Create(fn) + if err != nil { + return err + } + defer f.Close() + + // Create reverse index mapping (taking aliases into account) + names := make(map[uint32]string) + for id, i := range po.values { + s := names[i] + if s == "" { + s = fmt.Sprintf("v%d", id) + } else { + s += fmt.Sprintf(", v%d", id) + } + names[i] = s + } + + // Create reverse constant mapping + consts := make(map[uint32]int64) + for val, idx := range po.constants { + consts[idx] = val + } + + fmt.Fprintf(f, "digraph poset {\n") + fmt.Fprintf(f, "\tedge [ fontsize=10 ]\n") + for ridx, r := range po.roots { + fmt.Fprintf(f, "\tsubgraph root%d {\n", ridx) + po.dfs(r, false, func(i uint32) bool { + if val, ok := consts[i]; ok { + // Constant + var vals string + if po.flags&posetFlagUnsigned != 0 { + vals = fmt.Sprint(uint64(val)) + } else { + vals = fmt.Sprint(int64(val)) + } + fmt.Fprintf(f, "\t\tnode%d [shape=box style=filled fillcolor=cadetblue1 label=<%s %s [%d]>]\n", + i, vals, names[i], i) + } else { + // Normal SSA value + fmt.Fprintf(f, "\t\tnode%d [label=<%s [%d]>]\n", i, names[i], i) + } + chl, chr := po.children(i) + for _, ch := range []posetEdge{chl, chr} { + if ch != 0 { + if ch.Strict() { + fmt.Fprintf(f, "\t\tnode%d -> node%d [label=\" <\" color=\"red\"]\n", i, ch.Target()) + } else { + fmt.Fprintf(f, "\t\tnode%d -> node%d [label=\" <=\" color=\"green\"]\n", i, ch.Target()) + } + } + } + return false + }) + fmt.Fprintf(f, "\t}\n") + } + fmt.Fprintf(f, "\tlabelloc=\"t\"\n") + fmt.Fprintf(f, "\tlabeldistance=\"3.0\"\n") + fmt.Fprintf(f, "\tlabel=%q\n", title) + fmt.Fprintf(f, "}\n") + return nil +} + +// Ordered reports whether n1 r i1 + // i2 \ / + // i2 + // + extra := po.newnode(nil) + po.changeroot(r, extra) + po.upush(undoChangeRoot, extra, newedge(r, false)) + po.addchild(extra, r, false) + po.addchild(extra, i1, false) + po.addchild(i1, i2, strict) + + case f1 && f2: + // If the nodes are aliased, fail only if we're setting a strict order + // (that is, we cannot set n1 0 { + pass := po.undo[len(po.undo)-1] + po.undo = po.undo[:len(po.undo)-1] + + switch pass.typ { + case undoCheckpoint: + return + + case undoSetChl: + po.setchl(pass.idx, pass.edge) + + case undoSetChr: + po.setchr(pass.idx, pass.edge) + + case undoNonEqual: + po.noneq[uint32(pass.ID)].Clear(pass.idx) + + case undoNewNode: + if pass.idx != po.lastidx { + panic("invalid newnode index") + } + if pass.ID != 0 { + if po.values[pass.ID] != pass.idx { + panic("invalid newnode undo pass") + } + delete(po.values, pass.ID) + } + po.setchl(pass.idx, 0) + po.setchr(pass.idx, 0) + po.nodes = po.nodes[:pass.idx] + po.lastidx-- + + case undoNewConstant: + // FIXME: remove this O(n) loop + var val int64 + var i uint32 + for val, i = range po.constants { + if i == pass.idx { + break + } + } + if i != pass.idx { + panic("constant not found in undo pass") + } + if pass.ID == 0 { + delete(po.constants, val) + } else { + // Restore previous index as constant node + // (also restoring the invariant on correct bounds) + oldidx := uint32(pass.ID) + po.constants[val] = oldidx + } + + case undoAliasNode: + ID, prev := pass.ID, pass.idx + cur := po.values[ID] + if prev == 0 { + // Born as an alias, die as an alias + delete(po.values, ID) + } else { + if cur == prev { + panic("invalid aliasnode undo pass") + } + // Give it back previous value + po.values[ID] = prev + } + + case undoNewRoot: + i := pass.idx + l, r := po.children(i) + if l|r != 0 { + panic("non-empty root in undo newroot") + } + po.removeroot(i) + + case undoChangeRoot: + i := pass.idx + l, r := po.children(i) + if l|r != 0 { + panic("non-empty root in undo changeroot") + } + po.changeroot(i, pass.edge.Target()) + + case undoMergeRoot: + i := pass.idx + l, r := po.children(i) + po.changeroot(i, l.Target()) + po.roots = append(po.roots, r.Target()) + + default: + panic(pass.typ) + } + } + + if debugPoset && po.CheckEmpty() != nil { + panic("poset not empty at the end of undo") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/poset_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/poset_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a6db1d1c24a8cfc69e98aec56e75b132469ed7a7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/poset_test.go @@ -0,0 +1,800 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "fmt" + "testing" +) + +const ( + SetOrder = "SetOrder" + SetOrder_Fail = "SetOrder_Fail" + SetOrderOrEqual = "SetOrderOrEqual" + SetOrderOrEqual_Fail = "SetOrderOrEqual_Fail" + Ordered = "Ordered" + Ordered_Fail = "Ordered_Fail" + OrderedOrEqual = "OrderedOrEqual" + OrderedOrEqual_Fail = "OrderedOrEqual_Fail" + SetEqual = "SetEqual" + SetEqual_Fail = "SetEqual_Fail" + Equal = "Equal" + Equal_Fail = "Equal_Fail" + SetNonEqual = "SetNonEqual" + SetNonEqual_Fail = "SetNonEqual_Fail" + NonEqual = "NonEqual" + NonEqual_Fail = "NonEqual_Fail" + Checkpoint = "Checkpoint" + Undo = "Undo" +) + +type posetTestOp struct { + typ string + a, b int +} + +func vconst(i int) int { + if i < -128 || i >= 128 { + panic("invalid const") + } + return 1000 + 128 + i +} + +func vconst2(i int) int { + if i < -128 || i >= 128 { + panic("invalid const") + } + return 1000 + 256 + i +} + +func testPosetOps(t *testing.T, unsigned bool, ops []posetTestOp) { + var v [1512]*Value + for i := range v { + v[i] = new(Value) + v[i].ID = ID(i) + if i >= 1000 && i < 1256 { + v[i].Op = OpConst64 + v[i].AuxInt = int64(i - 1000 - 128) + } + if i >= 1256 && i < 1512 { + v[i].Op = OpConst64 + v[i].AuxInt = int64(i - 1000 - 256) + } + } + + po := newPoset() + po.SetUnsigned(unsigned) + for idx, op := range ops { + t.Logf("op%d%v", idx, op) + switch op.typ { + case SetOrder: + if !po.SetOrder(v[op.a], v[op.b]) { + t.Errorf("FAILED: op%d%v failed", idx, op) + } + case SetOrder_Fail: + if po.SetOrder(v[op.a], v[op.b]) { + t.Errorf("FAILED: op%d%v passed", idx, op) + } + case SetOrderOrEqual: + if !po.SetOrderOrEqual(v[op.a], v[op.b]) { + t.Errorf("FAILED: op%d%v failed", idx, op) + } + case SetOrderOrEqual_Fail: + if po.SetOrderOrEqual(v[op.a], v[op.b]) { + t.Errorf("FAILED: op%d%v passed", idx, op) + } + case Ordered: + if !po.Ordered(v[op.a], v[op.b]) { + t.Errorf("FAILED: op%d%v failed", idx, op) + } + case Ordered_Fail: + if po.Ordered(v[op.a], v[op.b]) { + t.Errorf("FAILED: op%d%v passed", idx, op) + } + case OrderedOrEqual: + if !po.OrderedOrEqual(v[op.a], v[op.b]) { + t.Errorf("FAILED: op%d%v failed", idx, op) + } + case OrderedOrEqual_Fail: + if po.OrderedOrEqual(v[op.a], v[op.b]) { + t.Errorf("FAILED: op%d%v passed", idx, op) + } + case SetEqual: + if !po.SetEqual(v[op.a], v[op.b]) { + t.Errorf("FAILED: op%d%v failed", idx, op) + } + case SetEqual_Fail: + if po.SetEqual(v[op.a], v[op.b]) { + t.Errorf("FAILED: op%d%v passed", idx, op) + } + case Equal: + if !po.Equal(v[op.a], v[op.b]) { + t.Errorf("FAILED: op%d%v failed", idx, op) + } + case Equal_Fail: + if po.Equal(v[op.a], v[op.b]) { + t.Errorf("FAILED: op%d%v passed", idx, op) + } + case SetNonEqual: + if !po.SetNonEqual(v[op.a], v[op.b]) { + t.Errorf("FAILED: op%d%v failed", idx, op) + } + case SetNonEqual_Fail: + if po.SetNonEqual(v[op.a], v[op.b]) { + t.Errorf("FAILED: op%d%v passed", idx, op) + } + case NonEqual: + if !po.NonEqual(v[op.a], v[op.b]) { + t.Errorf("FAILED: op%d%v failed", idx, op) + } + case NonEqual_Fail: + if po.NonEqual(v[op.a], v[op.b]) { + t.Errorf("FAILED: op%d%v passed", idx, op) + } + case Checkpoint: + po.Checkpoint() + case Undo: + t.Log("Undo stack", po.undo) + po.Undo() + default: + panic("unimplemented") + } + + if false { + po.DotDump(fmt.Sprintf("op%d.dot", idx), fmt.Sprintf("Last op: %v", op)) + } + + po.CheckIntegrity() + } + + // Check that the poset is completely empty + if err := po.CheckEmpty(); err != nil { + t.Error(err) + } +} + +func TestPoset(t *testing.T) { + testPosetOps(t, false, []posetTestOp{ + {Ordered_Fail, 123, 124}, + + // Dag #0: 100<101 + {Checkpoint, 0, 0}, + {SetOrder, 100, 101}, + {Ordered, 100, 101}, + {Ordered_Fail, 101, 100}, + {SetOrder_Fail, 101, 100}, + {SetOrder, 100, 101}, // repeat + {NonEqual, 100, 101}, + {NonEqual, 101, 100}, + {SetEqual_Fail, 100, 101}, + + // Dag #1: 4<=7<12 + {Checkpoint, 0, 0}, + {SetOrderOrEqual, 4, 7}, + {OrderedOrEqual, 4, 7}, + {SetOrder, 7, 12}, + {Ordered, 7, 12}, + {Ordered, 4, 12}, + {Ordered_Fail, 12, 4}, + {NonEqual, 4, 12}, + {NonEqual, 12, 4}, + {NonEqual_Fail, 4, 100}, + {OrderedOrEqual, 4, 12}, + {OrderedOrEqual_Fail, 12, 4}, + {OrderedOrEqual, 4, 7}, + {OrderedOrEqual_Fail, 7, 4}, + + // Dag #1: 1<4<=7<12 + {Checkpoint, 0, 0}, + {SetOrder, 1, 4}, + {Ordered, 1, 4}, + {Ordered, 1, 12}, + {Ordered_Fail, 12, 1}, + + // Dag #1: 1<4<=7<12, 6<7 + {Checkpoint, 0, 0}, + {SetOrder, 6, 7}, + {Ordered, 6, 7}, + {Ordered, 6, 12}, + {SetOrder_Fail, 7, 4}, + {SetOrder_Fail, 7, 6}, + {SetOrder_Fail, 7, 1}, + + // Dag #1: 1<4<=7<12, 1<6<7 + {Checkpoint, 0, 0}, + {Ordered_Fail, 1, 6}, + {SetOrder, 1, 6}, + {Ordered, 1, 6}, + {SetOrder_Fail, 6, 1}, + + // Dag #1: 1<4<=7<12, 1<4<6<7 + {Checkpoint, 0, 0}, + {Ordered_Fail, 4, 6}, + {Ordered_Fail, 4, 7}, + {SetOrder, 4, 6}, + {Ordered, 4, 6}, + {OrderedOrEqual, 4, 6}, + {Ordered, 4, 7}, + {OrderedOrEqual, 4, 7}, + {SetOrder_Fail, 6, 4}, + {Ordered_Fail, 7, 6}, + {Ordered_Fail, 7, 4}, + {OrderedOrEqual_Fail, 7, 6}, + {OrderedOrEqual_Fail, 7, 4}, + + // Merge: 1<4<6, 4<=7<12, 6<101 + {Checkpoint, 0, 0}, + {Ordered_Fail, 6, 101}, + {SetOrder, 6, 101}, + {Ordered, 6, 101}, + {Ordered, 1, 101}, + + // Merge: 1<4<6, 4<=7<12, 6<100<101 + {Checkpoint, 0, 0}, + {Ordered_Fail, 6, 100}, + {SetOrder, 6, 100}, + {Ordered, 1, 100}, + + // Undo: 1<4<6<7<12, 6<101 + {Ordered, 100, 101}, + {Undo, 0, 0}, + {Ordered, 100, 101}, + {Ordered_Fail, 6, 100}, + {Ordered, 6, 101}, + {Ordered, 1, 101}, + + // Undo: 1<4<6<7<12, 100<101 + {Undo, 0, 0}, + {Ordered_Fail, 1, 100}, + {Ordered_Fail, 1, 101}, + {Ordered_Fail, 6, 100}, + {Ordered_Fail, 6, 101}, + + // Merge: 1<4<6<7<12, 6<100<101 + {Checkpoint, 0, 0}, + {Ordered, 100, 101}, + {SetOrder, 6, 100}, + {Ordered, 6, 100}, + {Ordered, 6, 101}, + {Ordered, 1, 101}, + + // Undo 2 times: 1<4<7<12, 1<6<7 + {Undo, 0, 0}, + {Undo, 0, 0}, + {Ordered, 1, 6}, + {Ordered, 4, 12}, + {Ordered_Fail, 4, 6}, + {SetOrder_Fail, 6, 1}, + + // Undo 2 times: 1<4<7<12 + {Undo, 0, 0}, + {Undo, 0, 0}, + {Ordered, 1, 12}, + {Ordered, 7, 12}, + {Ordered_Fail, 1, 6}, + {Ordered_Fail, 6, 7}, + {Ordered, 100, 101}, + {Ordered_Fail, 1, 101}, + + // Undo: 4<7<12 + {Undo, 0, 0}, + {Ordered_Fail, 1, 12}, + {Ordered_Fail, 1, 4}, + {Ordered, 4, 12}, + {Ordered, 100, 101}, + + // Undo: 100<101 + {Undo, 0, 0}, + {Ordered_Fail, 4, 7}, + {Ordered_Fail, 7, 12}, + {Ordered, 100, 101}, + + // Recreated DAG #1 from scratch, reusing same nodes. + // This also stresses that Undo has done its job correctly. + // DAG: 1<2<(5|6), 101<102<(105|106<107) + {Checkpoint, 0, 0}, + {SetOrder, 101, 102}, + {SetOrder, 102, 105}, + {SetOrder, 102, 106}, + {SetOrder, 106, 107}, + {SetOrder, 1, 2}, + {SetOrder, 2, 5}, + {SetOrder, 2, 6}, + {SetEqual_Fail, 1, 6}, + {SetEqual_Fail, 107, 102}, + + // Now Set 2 == 102 + // New DAG: (1|101)<2==102<(5|6|105|106<107) + {Checkpoint, 0, 0}, + {SetEqual, 2, 102}, + {Equal, 2, 102}, + {SetEqual, 2, 102}, // trivially pass + {SetNonEqual_Fail, 2, 102}, // trivially fail + {Ordered, 1, 107}, + {Ordered, 101, 6}, + {Ordered, 101, 105}, + {Ordered, 2, 106}, + {Ordered, 102, 6}, + + // Undo SetEqual + {Undo, 0, 0}, + {Equal_Fail, 2, 102}, + {Ordered_Fail, 2, 102}, + {Ordered_Fail, 1, 107}, + {Ordered_Fail, 101, 6}, + {Checkpoint, 0, 0}, + {SetEqual, 2, 100}, + {Ordered, 1, 107}, + {Ordered, 100, 6}, + + // SetEqual with new node + {Undo, 0, 0}, + {Checkpoint, 0, 0}, + {SetEqual, 2, 400}, + {SetEqual, 401, 2}, + {Equal, 400, 401}, + {Ordered, 1, 400}, + {Ordered, 400, 6}, + {Ordered, 1, 401}, + {Ordered, 401, 6}, + {Ordered_Fail, 2, 401}, + + // SetEqual unseen nodes and then connect + {Checkpoint, 0, 0}, + {SetEqual, 500, 501}, + {SetEqual, 102, 501}, + {Equal, 500, 102}, + {Ordered, 501, 106}, + {Ordered, 100, 500}, + {SetEqual, 500, 501}, + {Ordered_Fail, 500, 501}, + {Ordered_Fail, 102, 501}, + + // SetNonEqual relations + {Undo, 0, 0}, + {Checkpoint, 0, 0}, + {SetNonEqual, 600, 601}, + {NonEqual, 600, 601}, + {SetNonEqual, 601, 602}, + {NonEqual, 601, 602}, + {NonEqual_Fail, 600, 602}, // non-transitive + {SetEqual_Fail, 601, 602}, + + // Undo back to beginning, leave the poset empty + {Undo, 0, 0}, + {Undo, 0, 0}, + {Undo, 0, 0}, + {Undo, 0, 0}, + }) +} + +func TestPosetStrict(t *testing.T) { + + testPosetOps(t, false, []posetTestOp{ + {Checkpoint, 0, 0}, + // Build: 20!=30, 10<20<=30<40. The 20<=30 will become 20<30. + {SetNonEqual, 20, 30}, + {SetOrder, 10, 20}, + {SetOrderOrEqual, 20, 30}, // this is affected by 20!=30 + {SetOrder, 30, 40}, + + {Ordered, 10, 30}, + {Ordered, 20, 30}, + {Ordered, 10, 40}, + {OrderedOrEqual, 10, 30}, + {OrderedOrEqual, 20, 30}, + {OrderedOrEqual, 10, 40}, + + {Undo, 0, 0}, + + // Now do the opposite: first build the DAG and then learn non-equality + {Checkpoint, 0, 0}, + {SetOrder, 10, 20}, + {SetOrderOrEqual, 20, 30}, // this is affected by 20!=30 + {SetOrder, 30, 40}, + + {Ordered, 10, 30}, + {Ordered_Fail, 20, 30}, + {Ordered, 10, 40}, + {OrderedOrEqual, 10, 30}, + {OrderedOrEqual, 20, 30}, + {OrderedOrEqual, 10, 40}, + + {Checkpoint, 0, 0}, + {SetNonEqual, 20, 30}, + {Ordered, 10, 30}, + {Ordered, 20, 30}, + {Ordered, 10, 40}, + {OrderedOrEqual, 10, 30}, + {OrderedOrEqual, 20, 30}, + {OrderedOrEqual, 10, 40}, + {Undo, 0, 0}, + + {Checkpoint, 0, 0}, + {SetOrderOrEqual, 30, 35}, + {OrderedOrEqual, 20, 35}, + {Ordered_Fail, 20, 35}, + {SetNonEqual, 20, 35}, + {Ordered, 20, 35}, + {Undo, 0, 0}, + + // Learn <= and >= + {Checkpoint, 0, 0}, + {SetOrderOrEqual, 50, 60}, + {SetOrderOrEqual, 60, 50}, + {OrderedOrEqual, 50, 60}, + {OrderedOrEqual, 60, 50}, + {Ordered_Fail, 50, 60}, + {Ordered_Fail, 60, 50}, + {Equal, 50, 60}, + {Equal, 60, 50}, + {NonEqual_Fail, 50, 60}, + {NonEqual_Fail, 60, 50}, + {Undo, 0, 0}, + + {Undo, 0, 0}, + }) +} + +func TestPosetCollapse(t *testing.T) { + testPosetOps(t, false, []posetTestOp{ + {Checkpoint, 0, 0}, + // Create a complex graph of <= relations among nodes between 10 and 25. + {SetOrderOrEqual, 10, 15}, + {SetOrderOrEqual, 15, 20}, + {SetOrderOrEqual, 20, vconst(20)}, + {SetOrderOrEqual, vconst(20), 25}, + {SetOrderOrEqual, 10, 12}, + {SetOrderOrEqual, 12, 16}, + {SetOrderOrEqual, 16, vconst(20)}, + {SetOrderOrEqual, 10, 17}, + {SetOrderOrEqual, 17, 25}, + {SetOrderOrEqual, 15, 18}, + {SetOrderOrEqual, 18, vconst(20)}, + {SetOrderOrEqual, 15, 19}, + {SetOrderOrEqual, 19, 25}, + + // These are other paths not part of the main collapsing path + {SetOrderOrEqual, 10, 11}, + {SetOrderOrEqual, 11, 26}, + {SetOrderOrEqual, 13, 25}, + {SetOrderOrEqual, 100, 25}, + {SetOrderOrEqual, 101, 15}, + {SetOrderOrEqual, 102, 10}, + {SetOrderOrEqual, 25, 103}, + {SetOrderOrEqual, 20, 104}, + + {Checkpoint, 0, 0}, + // Collapse everything by setting 10 >= 25: this should make everything equal + {SetOrderOrEqual, 25, 10}, + + // Check that all nodes are pairwise equal now + {Equal, 10, 12}, + {Equal, 10, 15}, + {Equal, 10, 16}, + {Equal, 10, 17}, + {Equal, 10, 18}, + {Equal, 10, 19}, + {Equal, 10, vconst(20)}, + {Equal, 10, vconst2(20)}, + {Equal, 10, 25}, + + {Equal, 12, 15}, + {Equal, 12, 16}, + {Equal, 12, 17}, + {Equal, 12, 18}, + {Equal, 12, 19}, + {Equal, 12, vconst(20)}, + {Equal, 12, vconst2(20)}, + {Equal, 12, 25}, + + {Equal, 15, 16}, + {Equal, 15, 17}, + {Equal, 15, 18}, + {Equal, 15, 19}, + {Equal, 15, vconst(20)}, + {Equal, 15, vconst2(20)}, + {Equal, 15, 25}, + + {Equal, 16, 17}, + {Equal, 16, 18}, + {Equal, 16, 19}, + {Equal, 16, vconst(20)}, + {Equal, 16, vconst2(20)}, + {Equal, 16, 25}, + + {Equal, 17, 18}, + {Equal, 17, 19}, + {Equal, 17, vconst(20)}, + {Equal, 17, vconst2(20)}, + {Equal, 17, 25}, + + {Equal, 18, 19}, + {Equal, 18, vconst(20)}, + {Equal, 18, vconst2(20)}, + {Equal, 18, 25}, + + {Equal, 19, vconst(20)}, + {Equal, 19, vconst2(20)}, + {Equal, 19, 25}, + + {Equal, vconst(20), vconst2(20)}, + {Equal, vconst(20), 25}, + + {Equal, vconst2(20), 25}, + + // ... but not 11/26/100/101/102, which were on a different path + {Equal_Fail, 10, 11}, + {Equal_Fail, 10, 26}, + {Equal_Fail, 10, 100}, + {Equal_Fail, 10, 101}, + {Equal_Fail, 10, 102}, + {OrderedOrEqual, 10, 26}, + {OrderedOrEqual, 25, 26}, + {OrderedOrEqual, 13, 25}, + {OrderedOrEqual, 13, 10}, + + {Undo, 0, 0}, + {OrderedOrEqual, 10, 25}, + {Equal_Fail, 10, 12}, + {Equal_Fail, 10, 15}, + {Equal_Fail, 10, 25}, + + {Undo, 0, 0}, + }) + + testPosetOps(t, false, []posetTestOp{ + {Checkpoint, 0, 0}, + {SetOrderOrEqual, 10, 15}, + {SetOrderOrEqual, 15, 20}, + {SetOrderOrEqual, 20, 25}, + {SetOrder, 10, 16}, + {SetOrderOrEqual, 16, 20}, + // Check that we cannot collapse here because of the strict relation 10<16 + {SetOrderOrEqual_Fail, 20, 10}, + {Undo, 0, 0}, + }) +} + +func TestPosetSetEqual(t *testing.T) { + testPosetOps(t, false, []posetTestOp{ + // 10<=20<=30<40, 20<=100<110 + {Checkpoint, 0, 0}, + {SetOrderOrEqual, 10, 20}, + {SetOrderOrEqual, 20, 30}, + {SetOrder, 30, 40}, + {SetOrderOrEqual, 20, 100}, + {SetOrder, 100, 110}, + {OrderedOrEqual, 10, 30}, + {OrderedOrEqual_Fail, 30, 10}, + {Ordered_Fail, 10, 30}, + {Ordered_Fail, 30, 10}, + {Ordered, 10, 40}, + {Ordered_Fail, 40, 10}, + + // Try learning 10==20. + {Checkpoint, 0, 0}, + {SetEqual, 10, 20}, + {OrderedOrEqual, 10, 20}, + {Ordered_Fail, 10, 20}, + {Equal, 10, 20}, + {SetOrderOrEqual, 10, 20}, + {SetOrderOrEqual, 20, 10}, + {SetOrder_Fail, 10, 20}, + {SetOrder_Fail, 20, 10}, + {Undo, 0, 0}, + + // Try learning 20==10. + {Checkpoint, 0, 0}, + {SetEqual, 20, 10}, + {OrderedOrEqual, 10, 20}, + {Ordered_Fail, 10, 20}, + {Equal, 10, 20}, + {Undo, 0, 0}, + + // Try learning 10==40 or 30==40 or 10==110. + {Checkpoint, 0, 0}, + {SetEqual_Fail, 10, 40}, + {SetEqual_Fail, 40, 10}, + {SetEqual_Fail, 30, 40}, + {SetEqual_Fail, 40, 30}, + {SetEqual_Fail, 10, 110}, + {SetEqual_Fail, 110, 10}, + {Undo, 0, 0}, + + // Try learning 40==110, and then 10==40 or 10=110 + {Checkpoint, 0, 0}, + {SetEqual, 40, 110}, + {SetEqual_Fail, 10, 40}, + {SetEqual_Fail, 40, 10}, + {SetEqual_Fail, 10, 110}, + {SetEqual_Fail, 110, 10}, + {Undo, 0, 0}, + + // Try learning 40<20 or 30<20 or 110<10 + {Checkpoint, 0, 0}, + {SetOrder_Fail, 40, 20}, + {SetOrder_Fail, 30, 20}, + {SetOrder_Fail, 110, 10}, + {Undo, 0, 0}, + + // Try learning 30<=20 + {Checkpoint, 0, 0}, + {SetOrderOrEqual, 30, 20}, + {Equal, 30, 20}, + {OrderedOrEqual, 30, 100}, + {Ordered, 30, 110}, + {Undo, 0, 0}, + + {Undo, 0, 0}, + }) +} + +func TestPosetConst(t *testing.T) { + testPosetOps(t, false, []posetTestOp{ + {Checkpoint, 0, 0}, + {SetOrder, 1, vconst(15)}, + {SetOrderOrEqual, 100, vconst(120)}, + {Ordered, 1, vconst(15)}, + {Ordered, 1, vconst(120)}, + {OrderedOrEqual, 1, vconst(120)}, + {OrderedOrEqual, 100, vconst(120)}, + {Ordered_Fail, 100, vconst(15)}, + {Ordered_Fail, vconst(15), 100}, + + {Checkpoint, 0, 0}, + {SetOrderOrEqual, 1, 5}, + {SetOrderOrEqual, 5, 25}, + {SetEqual, 20, vconst(20)}, + {SetEqual, 25, vconst(25)}, + {Ordered, 1, 20}, + {Ordered, 1, vconst(30)}, + {Undo, 0, 0}, + + {Checkpoint, 0, 0}, + {SetOrderOrEqual, 1, 5}, + {SetOrderOrEqual, 5, 25}, + {SetEqual, vconst(-20), 5}, + {SetEqual, vconst(-25), 1}, + {Ordered, 1, 5}, + {Ordered, vconst(-30), 1}, + {Undo, 0, 0}, + + {Checkpoint, 0, 0}, + {SetNonEqual, 1, vconst(4)}, + {SetNonEqual, 1, vconst(6)}, + {NonEqual, 1, vconst(4)}, + {NonEqual_Fail, 1, vconst(5)}, + {NonEqual, 1, vconst(6)}, + {Equal_Fail, 1, vconst(4)}, + {Equal_Fail, 1, vconst(5)}, + {Equal_Fail, 1, vconst(6)}, + {Equal_Fail, 1, vconst(7)}, + {Undo, 0, 0}, + + {Undo, 0, 0}, + }) + + testPosetOps(t, true, []posetTestOp{ + {Checkpoint, 0, 0}, + {SetOrder, 1, vconst(15)}, + {SetOrderOrEqual, 100, vconst(-5)}, // -5 is a very big number in unsigned + {Ordered, 1, vconst(15)}, + {Ordered, 1, vconst(-5)}, + {OrderedOrEqual, 1, vconst(-5)}, + {OrderedOrEqual, 100, vconst(-5)}, + {Ordered_Fail, 100, vconst(15)}, + {Ordered_Fail, vconst(15), 100}, + + {Undo, 0, 0}, + }) + + testPosetOps(t, false, []posetTestOp{ + {Checkpoint, 0, 0}, + {SetOrderOrEqual, 1, vconst(3)}, + {SetNonEqual, 1, vconst(0)}, + {Ordered_Fail, 1, vconst(0)}, + {Undo, 0, 0}, + }) + + testPosetOps(t, false, []posetTestOp{ + // Check relations of a constant with itself + {Checkpoint, 0, 0}, + {SetOrderOrEqual, vconst(3), vconst2(3)}, + {Undo, 0, 0}, + {Checkpoint, 0, 0}, + {SetEqual, vconst(3), vconst2(3)}, + {Undo, 0, 0}, + {Checkpoint, 0, 0}, + {SetNonEqual_Fail, vconst(3), vconst2(3)}, + {Undo, 0, 0}, + {Checkpoint, 0, 0}, + {SetOrder_Fail, vconst(3), vconst2(3)}, + {Undo, 0, 0}, + + // Check relations of two constants among them, using + // different instances of the same constant + {Checkpoint, 0, 0}, + {SetOrderOrEqual, vconst(3), vconst(4)}, + {OrderedOrEqual, vconst(3), vconst2(4)}, + {Undo, 0, 0}, + {Checkpoint, 0, 0}, + {SetOrder, vconst(3), vconst(4)}, + {Ordered, vconst(3), vconst2(4)}, + {Undo, 0, 0}, + {Checkpoint, 0, 0}, + {SetEqual_Fail, vconst(3), vconst(4)}, + {SetEqual_Fail, vconst(3), vconst2(4)}, + {Undo, 0, 0}, + {Checkpoint, 0, 0}, + {NonEqual, vconst(3), vconst(4)}, + {NonEqual, vconst(3), vconst2(4)}, + {Undo, 0, 0}, + {Checkpoint, 0, 0}, + {Equal_Fail, vconst(3), vconst(4)}, + {Equal_Fail, vconst(3), vconst2(4)}, + {Undo, 0, 0}, + {Checkpoint, 0, 0}, + {SetNonEqual, vconst(3), vconst(4)}, + {SetNonEqual, vconst(3), vconst2(4)}, + {Undo, 0, 0}, + }) +} + +func TestPosetNonEqual(t *testing.T) { + testPosetOps(t, false, []posetTestOp{ + {Equal_Fail, 10, 20}, + {NonEqual_Fail, 10, 20}, + + // Learn 10!=20 + {Checkpoint, 0, 0}, + {SetNonEqual, 10, 20}, + {Equal_Fail, 10, 20}, + {NonEqual, 10, 20}, + {SetEqual_Fail, 10, 20}, + + // Learn again 10!=20 + {Checkpoint, 0, 0}, + {SetNonEqual, 10, 20}, + {Equal_Fail, 10, 20}, + {NonEqual, 10, 20}, + + // Undo. We still know 10!=20 + {Undo, 0, 0}, + {Equal_Fail, 10, 20}, + {NonEqual, 10, 20}, + {SetEqual_Fail, 10, 20}, + + // Undo again. Now we know nothing + {Undo, 0, 0}, + {Equal_Fail, 10, 20}, + {NonEqual_Fail, 10, 20}, + + // Learn 10==20 + {Checkpoint, 0, 0}, + {SetEqual, 10, 20}, + {Equal, 10, 20}, + {NonEqual_Fail, 10, 20}, + {SetNonEqual_Fail, 10, 20}, + + // Learn again 10==20 + {Checkpoint, 0, 0}, + {SetEqual, 10, 20}, + {Equal, 10, 20}, + {NonEqual_Fail, 10, 20}, + {SetNonEqual_Fail, 10, 20}, + + // Undo. We still know 10==20 + {Undo, 0, 0}, + {Equal, 10, 20}, + {NonEqual_Fail, 10, 20}, + {SetNonEqual_Fail, 10, 20}, + + // Undo. We know nothing + {Undo, 0, 0}, + {Equal_Fail, 10, 20}, + {NonEqual_Fail, 10, 20}, + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/print.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/print.go new file mode 100644 index 0000000000000000000000000000000000000000..0d3b5d9e34d1ff5afd1eb31b8a128e0f98363911 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/print.go @@ -0,0 +1,192 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "fmt" + "io" + "strings" + + "cmd/internal/notsha256" + "cmd/internal/src" +) + +func printFunc(f *Func) { + f.Logf("%s", f) +} + +func hashFunc(f *Func) []byte { + h := notsha256.New() + p := stringFuncPrinter{w: h, printDead: true} + fprintFunc(p, f) + return h.Sum(nil) +} + +func (f *Func) String() string { + var buf strings.Builder + p := stringFuncPrinter{w: &buf, printDead: true} + fprintFunc(p, f) + return buf.String() +} + +// rewriteHash returns a hash of f suitable for detecting rewrite cycles. +func (f *Func) rewriteHash() string { + h := notsha256.New() + p := stringFuncPrinter{w: h, printDead: false} + fprintFunc(p, f) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +type funcPrinter interface { + header(f *Func) + startBlock(b *Block, reachable bool) + endBlock(b *Block, reachable bool) + value(v *Value, live bool) + startDepCycle() + endDepCycle() + named(n LocalSlot, vals []*Value) +} + +type stringFuncPrinter struct { + w io.Writer + printDead bool +} + +func (p stringFuncPrinter) header(f *Func) { + fmt.Fprint(p.w, f.Name) + fmt.Fprint(p.w, " ") + fmt.Fprintln(p.w, f.Type) +} + +func (p stringFuncPrinter) startBlock(b *Block, reachable bool) { + if !p.printDead && !reachable { + return + } + fmt.Fprintf(p.w, " b%d:", b.ID) + if len(b.Preds) > 0 { + io.WriteString(p.w, " <-") + for _, e := range b.Preds { + pred := e.b + fmt.Fprintf(p.w, " b%d", pred.ID) + } + } + if !reachable { + fmt.Fprint(p.w, " DEAD") + } + io.WriteString(p.w, "\n") +} + +func (p stringFuncPrinter) endBlock(b *Block, reachable bool) { + if !p.printDead && !reachable { + return + } + fmt.Fprintln(p.w, " "+b.LongString()) +} + +func StmtString(p src.XPos) string { + linenumber := "(?) " + if p.IsKnown() { + pfx := "" + if p.IsStmt() == src.PosIsStmt { + pfx = "+" + } + if p.IsStmt() == src.PosNotStmt { + pfx = "-" + } + linenumber = fmt.Sprintf("(%s%d) ", pfx, p.Line()) + } + return linenumber +} + +func (p stringFuncPrinter) value(v *Value, live bool) { + if !p.printDead && !live { + return + } + fmt.Fprintf(p.w, " %s", StmtString(v.Pos)) + fmt.Fprint(p.w, v.LongString()) + if !live { + fmt.Fprint(p.w, " DEAD") + } + fmt.Fprintln(p.w) +} + +func (p stringFuncPrinter) startDepCycle() { + fmt.Fprintln(p.w, "dependency cycle!") +} + +func (p stringFuncPrinter) endDepCycle() {} + +func (p stringFuncPrinter) named(n LocalSlot, vals []*Value) { + fmt.Fprintf(p.w, "name %s: %v\n", n, vals) +} + +func fprintFunc(p funcPrinter, f *Func) { + reachable, live := findlive(f) + defer f.Cache.freeBoolSlice(live) + p.header(f) + printed := make([]bool, f.NumValues()) + for _, b := range f.Blocks { + p.startBlock(b, reachable[b.ID]) + + if f.scheduled { + // Order of Values has been decided - print in that order. + for _, v := range b.Values { + p.value(v, live[v.ID]) + printed[v.ID] = true + } + p.endBlock(b, reachable[b.ID]) + continue + } + + // print phis first since all value cycles contain a phi + n := 0 + for _, v := range b.Values { + if v.Op != OpPhi { + continue + } + p.value(v, live[v.ID]) + printed[v.ID] = true + n++ + } + + // print rest of values in dependency order + for n < len(b.Values) { + m := n + outer: + for _, v := range b.Values { + if printed[v.ID] { + continue + } + for _, w := range v.Args { + // w == nil shouldn't happen, but if it does, + // don't panic; we'll get a better diagnosis later. + if w != nil && w.Block == b && !printed[w.ID] { + continue outer + } + } + p.value(v, live[v.ID]) + printed[v.ID] = true + n++ + } + if m == n { + p.startDepCycle() + for _, v := range b.Values { + if printed[v.ID] { + continue + } + p.value(v, live[v.ID]) + printed[v.ID] = true + n++ + } + p.endDepCycle() + } + } + + p.endBlock(b, reachable[b.ID]) + } + for _, name := range f.Names { + p.named(*name, f.NamedValues[*name]) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/prove.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/prove.go new file mode 100644 index 0000000000000000000000000000000000000000..842719fb4c9820f57a1a8bbe24c53fc8c9be0a0c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/prove.go @@ -0,0 +1,1813 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/internal/src" + "fmt" + "math" +) + +type branch int + +const ( + unknown branch = iota + positive + negative + // The outedges from a jump table are jumpTable0, + // jumpTable0+1, jumpTable0+2, etc. There could be an + // arbitrary number so we can't list them all here. + jumpTable0 +) + +// relation represents the set of possible relations between +// pairs of variables (v, w). Without a priori knowledge the +// mask is lt | eq | gt meaning v can be less than, equal to or +// greater than w. When the execution path branches on the condition +// `v op w` the set of relations is updated to exclude any +// relation not possible due to `v op w` being true (or false). +// +// E.g. +// +// r := relation(...) +// +// if v < w { +// newR := r & lt +// } +// if v >= w { +// newR := r & (eq|gt) +// } +// if v != w { +// newR := r & (lt|gt) +// } +type relation uint + +const ( + lt relation = 1 << iota + eq + gt +) + +var relationStrings = [...]string{ + 0: "none", lt: "<", eq: "==", lt | eq: "<=", + gt: ">", gt | lt: "!=", gt | eq: ">=", gt | eq | lt: "any", +} + +func (r relation) String() string { + if r < relation(len(relationStrings)) { + return relationStrings[r] + } + return fmt.Sprintf("relation(%d)", uint(r)) +} + +// domain represents the domain of a variable pair in which a set +// of relations is known. For example, relations learned for unsigned +// pairs cannot be transferred to signed pairs because the same bit +// representation can mean something else. +type domain uint + +const ( + signed domain = 1 << iota + unsigned + pointer + boolean +) + +var domainStrings = [...]string{ + "signed", "unsigned", "pointer", "boolean", +} + +func (d domain) String() string { + s := "" + for i, ds := range domainStrings { + if d&(1< l2.max { + l.max = l2.max + } + if l.umax > l2.umax { + l.umax = l2.umax + } + return l +} + +var noLimit = limit{math.MinInt64, math.MaxInt64, 0, math.MaxUint64} + +// a limitFact is a limit known for a particular value. +type limitFact struct { + vid ID + limit limit +} + +// factsTable keeps track of relations between pairs of values. +// +// The fact table logic is sound, but incomplete. Outside of a few +// special cases, it performs no deduction or arithmetic. While there +// are known decision procedures for this, the ad hoc approach taken +// by the facts table is effective for real code while remaining very +// efficient. +type factsTable struct { + // unsat is true if facts contains a contradiction. + // + // Note that the factsTable logic is incomplete, so if unsat + // is false, the assertions in factsTable could be satisfiable + // *or* unsatisfiable. + unsat bool // true if facts contains a contradiction + unsatDepth int // number of unsat checkpoints + + facts map[pair]relation // current known set of relation + stack []fact // previous sets of relations + + // order* is a couple of partial order sets that record information + // about relations between SSA values in the signed and unsigned + // domain. + orderS *poset + orderU *poset + + // known lower and upper bounds on individual values. + limits map[ID]limit + limitStack []limitFact // previous entries + + // For each slice s, a map from s to a len(s)/cap(s) value (if any) + // TODO: check if there are cases that matter where we have + // more than one len(s) for a slice. We could keep a list if necessary. + lens map[ID]*Value + caps map[ID]*Value + + // zero is a zero-valued constant + zero *Value +} + +// checkpointFact is an invalid value used for checkpointing +// and restoring factsTable. +var checkpointFact = fact{} +var checkpointBound = limitFact{} + +func newFactsTable(f *Func) *factsTable { + ft := &factsTable{} + ft.orderS = f.newPoset() + ft.orderU = f.newPoset() + ft.orderS.SetUnsigned(false) + ft.orderU.SetUnsigned(true) + ft.facts = make(map[pair]relation) + ft.stack = make([]fact, 4) + ft.limits = make(map[ID]limit) + ft.limitStack = make([]limitFact, 4) + ft.zero = f.ConstInt64(f.Config.Types.Int64, 0) + return ft +} + +// update updates the set of relations between v and w in domain d +// restricting it to r. +func (ft *factsTable) update(parent *Block, v, w *Value, d domain, r relation) { + if parent.Func.pass.debug > 2 { + parent.Func.Warnl(parent.Pos, "parent=%s, update %s %s %s", parent, v, w, r) + } + // No need to do anything else if we already found unsat. + if ft.unsat { + return + } + + // Self-fact. It's wasteful to register it into the facts + // table, so just note whether it's satisfiable + if v == w { + if r&eq == 0 { + ft.unsat = true + } + return + } + + if d == signed || d == unsigned { + var ok bool + order := ft.orderS + if d == unsigned { + order = ft.orderU + } + switch r { + case lt: + ok = order.SetOrder(v, w) + case gt: + ok = order.SetOrder(w, v) + case lt | eq: + ok = order.SetOrderOrEqual(v, w) + case gt | eq: + ok = order.SetOrderOrEqual(w, v) + case eq: + ok = order.SetEqual(v, w) + case lt | gt: + ok = order.SetNonEqual(v, w) + default: + panic("unknown relation") + } + if !ok { + if parent.Func.pass.debug > 2 { + parent.Func.Warnl(parent.Pos, "unsat %s %s %s", v, w, r) + } + ft.unsat = true + return + } + } else { + if lessByID(w, v) { + v, w = w, v + r = reverseBits[r] + } + + p := pair{v, w, d} + oldR, ok := ft.facts[p] + if !ok { + if v == w { + oldR = eq + } else { + oldR = lt | eq | gt + } + } + // No changes compared to information already in facts table. + if oldR == r { + return + } + ft.stack = append(ft.stack, fact{p, oldR}) + ft.facts[p] = oldR & r + // If this relation is not satisfiable, mark it and exit right away + if oldR&r == 0 { + if parent.Func.pass.debug > 2 { + parent.Func.Warnl(parent.Pos, "unsat %s %s %s", v, w, r) + } + ft.unsat = true + return + } + } + + // Extract bounds when comparing against constants + if v.isGenericIntConst() { + v, w = w, v + r = reverseBits[r] + } + if v != nil && w.isGenericIntConst() { + // Note: all the +1/-1 below could overflow/underflow. Either will + // still generate correct results, it will just lead to imprecision. + // In fact if there is overflow/underflow, the corresponding + // code is unreachable because the known range is outside the range + // of the value's type. + old, ok := ft.limits[v.ID] + if !ok { + old = noLimit + if v.isGenericIntConst() { + switch d { + case signed: + old.min, old.max = v.AuxInt, v.AuxInt + if v.AuxInt >= 0 { + old.umin, old.umax = uint64(v.AuxInt), uint64(v.AuxInt) + } + case unsigned: + old.umin = v.AuxUnsigned() + old.umax = old.umin + if int64(old.umin) >= 0 { + old.min, old.max = int64(old.umin), int64(old.umin) + } + } + } + } + lim := noLimit + switch d { + case signed: + c := w.AuxInt + switch r { + case lt: + lim.max = c - 1 + case lt | eq: + lim.max = c + case gt | eq: + lim.min = c + case gt: + lim.min = c + 1 + case lt | gt: + lim = old + if c == lim.min { + lim.min++ + } + if c == lim.max { + lim.max-- + } + case eq: + lim.min = c + lim.max = c + } + if lim.min >= 0 { + // int(x) >= 0 && int(x) >= N ⇒ uint(x) >= N + lim.umin = uint64(lim.min) + } + if lim.max != noLimit.max && old.min >= 0 && lim.max >= 0 { + // 0 <= int(x) <= N ⇒ 0 <= uint(x) <= N + // This is for a max update, so the lower bound + // comes from what we already know (old). + lim.umax = uint64(lim.max) + } + case unsigned: + uc := w.AuxUnsigned() + switch r { + case lt: + lim.umax = uc - 1 + case lt | eq: + lim.umax = uc + case gt | eq: + lim.umin = uc + case gt: + lim.umin = uc + 1 + case lt | gt: + lim = old + if uc == lim.umin { + lim.umin++ + } + if uc == lim.umax { + lim.umax-- + } + case eq: + lim.umin = uc + lim.umax = uc + } + // We could use the contrapositives of the + // signed implications to derive signed facts, + // but it turns out not to matter. + } + ft.limitStack = append(ft.limitStack, limitFact{v.ID, old}) + lim = old.intersect(lim) + ft.limits[v.ID] = lim + if v.Block.Func.pass.debug > 2 { + v.Block.Func.Warnl(parent.Pos, "parent=%s, new limits %s %s %s %s", parent, v, w, r, lim.String()) + } + if lim.min > lim.max || lim.umin > lim.umax { + ft.unsat = true + return + } + } + + // Derived facts below here are only about numbers. + if d != signed && d != unsigned { + return + } + + // Additional facts we know given the relationship between len and cap. + // + // TODO: Since prove now derives transitive relations, it + // should be sufficient to learn that len(w) <= cap(w) at the + // beginning of prove where we look for all len/cap ops. + if v.Op == OpSliceLen && r< == 0 && ft.caps[v.Args[0].ID] != nil { + // len(s) > w implies cap(s) > w + // len(s) >= w implies cap(s) >= w + // len(s) == w implies cap(s) >= w + ft.update(parent, ft.caps[v.Args[0].ID], w, d, r|gt) + } + if w.Op == OpSliceLen && r> == 0 && ft.caps[w.Args[0].ID] != nil { + // same, length on the RHS. + ft.update(parent, v, ft.caps[w.Args[0].ID], d, r|lt) + } + if v.Op == OpSliceCap && r> == 0 && ft.lens[v.Args[0].ID] != nil { + // cap(s) < w implies len(s) < w + // cap(s) <= w implies len(s) <= w + // cap(s) == w implies len(s) <= w + ft.update(parent, ft.lens[v.Args[0].ID], w, d, r|lt) + } + if w.Op == OpSliceCap && r< == 0 && ft.lens[w.Args[0].ID] != nil { + // same, capacity on the RHS. + ft.update(parent, v, ft.lens[w.Args[0].ID], d, r|gt) + } + + // Process fence-post implications. + // + // First, make the condition > or >=. + if r == lt || r == lt|eq { + v, w = w, v + r = reverseBits[r] + } + switch r { + case gt: + if x, delta := isConstDelta(v); x != nil && delta == 1 { + // x+1 > w ⇒ x >= w + // + // This is useful for eliminating the + // growslice branch of append. + ft.update(parent, x, w, d, gt|eq) + } else if x, delta := isConstDelta(w); x != nil && delta == -1 { + // v > x-1 ⇒ v >= x + ft.update(parent, v, x, d, gt|eq) + } + case gt | eq: + if x, delta := isConstDelta(v); x != nil && delta == -1 { + // x-1 >= w && x > min ⇒ x > w + // + // Useful for i > 0; s[i-1]. + lim, ok := ft.limits[x.ID] + if ok && ((d == signed && lim.min > opMin[v.Op]) || (d == unsigned && lim.umin > 0)) { + ft.update(parent, x, w, d, gt) + } + } else if x, delta := isConstDelta(w); x != nil && delta == 1 { + // v >= x+1 && x < max ⇒ v > x + lim, ok := ft.limits[x.ID] + if ok && ((d == signed && lim.max < opMax[w.Op]) || (d == unsigned && lim.umax < opUMax[w.Op])) { + ft.update(parent, v, x, d, gt) + } + } + } + + // Process: x+delta > w (with delta constant) + // Only signed domain for now (useful for accesses to slices in loops). + if r == gt || r == gt|eq { + if x, delta := isConstDelta(v); x != nil && d == signed { + if parent.Func.pass.debug > 1 { + parent.Func.Warnl(parent.Pos, "x+d %s w; x:%v %v delta:%v w:%v d:%v", r, x, parent.String(), delta, w.AuxInt, d) + } + underflow := true + if l, has := ft.limits[x.ID]; has && delta < 0 { + if (x.Type.Size() == 8 && l.min >= math.MinInt64-delta) || + (x.Type.Size() == 4 && l.min >= math.MinInt32-delta) { + underflow = false + } + } + if delta < 0 && !underflow { + // If delta < 0 and x+delta cannot underflow then x > x+delta (that is, x > v) + ft.update(parent, x, v, signed, gt) + } + if !w.isGenericIntConst() { + // If we know that x+delta > w but w is not constant, we can derive: + // if delta < 0 and x+delta cannot underflow, then x > w + // This is useful for loops with bounds "len(slice)-K" (delta = -K) + if delta < 0 && !underflow { + ft.update(parent, x, w, signed, r) + } + } else { + // With w,delta constants, we want to derive: x+delta > w ⇒ x > w-delta + // + // We compute (using integers of the correct size): + // min = w - delta + // max = MaxInt - delta + // + // And we prove that: + // if minmax: min < x OR x <= max + // + // This is always correct, even in case of overflow. + // + // If the initial fact is x+delta >= w instead, the derived conditions are: + // if minmax: min <= x OR x <= max + // + // Notice the conditions for max are still <=, as they handle overflows. + var min, max int64 + var vmin, vmax *Value + switch x.Type.Size() { + case 8: + min = w.AuxInt - delta + max = int64(^uint64(0)>>1) - delta + + vmin = parent.NewValue0I(parent.Pos, OpConst64, parent.Func.Config.Types.Int64, min) + vmax = parent.NewValue0I(parent.Pos, OpConst64, parent.Func.Config.Types.Int64, max) + + case 4: + min = int64(int32(w.AuxInt) - int32(delta)) + max = int64(int32(^uint32(0)>>1) - int32(delta)) + + vmin = parent.NewValue0I(parent.Pos, OpConst32, parent.Func.Config.Types.Int32, min) + vmax = parent.NewValue0I(parent.Pos, OpConst32, parent.Func.Config.Types.Int32, max) + + case 2: + min = int64(int16(w.AuxInt) - int16(delta)) + max = int64(int16(^uint16(0)>>1) - int16(delta)) + + vmin = parent.NewValue0I(parent.Pos, OpConst16, parent.Func.Config.Types.Int16, min) + vmax = parent.NewValue0I(parent.Pos, OpConst16, parent.Func.Config.Types.Int16, max) + + case 1: + min = int64(int8(w.AuxInt) - int8(delta)) + max = int64(int8(^uint8(0)>>1) - int8(delta)) + + vmin = parent.NewValue0I(parent.Pos, OpConst8, parent.Func.Config.Types.Int8, min) + vmax = parent.NewValue0I(parent.Pos, OpConst8, parent.Func.Config.Types.Int8, max) + + default: + panic("unimplemented") + } + + if min < max { + // Record that x > min and max >= x + ft.update(parent, x, vmin, d, r) + ft.update(parent, vmax, x, d, r|eq) + } else { + // We know that either x>min OR x<=max. factsTable cannot record OR conditions, + // so let's see if we can already prove that one of them is false, in which case + // the other must be true + if l, has := ft.limits[x.ID]; has { + if l.max <= min { + if r&eq == 0 || l.max < min { + // x>min (x>=min) is impossible, so it must be x<=max + ft.update(parent, vmax, x, d, r|eq) + } + } else if l.min > max { + // x<=max is impossible, so it must be x>min + ft.update(parent, x, vmin, d, r) + } + } + } + } + } + } + + // Look through value-preserving extensions. + // If the domain is appropriate for the pre-extension Type, + // repeat the update with the pre-extension Value. + if isCleanExt(v) { + switch { + case d == signed && v.Args[0].Type.IsSigned(): + fallthrough + case d == unsigned && !v.Args[0].Type.IsSigned(): + ft.update(parent, v.Args[0], w, d, r) + } + } + if isCleanExt(w) { + switch { + case d == signed && w.Args[0].Type.IsSigned(): + fallthrough + case d == unsigned && !w.Args[0].Type.IsSigned(): + ft.update(parent, v, w.Args[0], d, r) + } + } +} + +var opMin = map[Op]int64{ + OpAdd64: math.MinInt64, OpSub64: math.MinInt64, + OpAdd32: math.MinInt32, OpSub32: math.MinInt32, +} + +var opMax = map[Op]int64{ + OpAdd64: math.MaxInt64, OpSub64: math.MaxInt64, + OpAdd32: math.MaxInt32, OpSub32: math.MaxInt32, +} + +var opUMax = map[Op]uint64{ + OpAdd64: math.MaxUint64, OpSub64: math.MaxUint64, + OpAdd32: math.MaxUint32, OpSub32: math.MaxUint32, +} + +// isNonNegative reports whether v is known to be non-negative. +func (ft *factsTable) isNonNegative(v *Value) bool { + if isNonNegative(v) { + return true + } + + var max int64 + switch v.Type.Size() { + case 1: + max = math.MaxInt8 + case 2: + max = math.MaxInt16 + case 4: + max = math.MaxInt32 + case 8: + max = math.MaxInt64 + default: + panic("unexpected integer size") + } + + // Check if the recorded limits can prove that the value is positive + + if l, has := ft.limits[v.ID]; has && (l.min >= 0 || l.umax <= uint64(max)) { + return true + } + + // Check if v = x+delta, and we can use x's limits to prove that it's positive + if x, delta := isConstDelta(v); x != nil { + if l, has := ft.limits[x.ID]; has { + if delta > 0 && l.min >= -delta && l.max <= max-delta { + return true + } + if delta < 0 && l.min >= -delta { + return true + } + } + } + + // Check if v is a value-preserving extension of a non-negative value. + if isCleanExt(v) && ft.isNonNegative(v.Args[0]) { + return true + } + + // Check if the signed poset can prove that the value is >= 0 + return ft.orderS.OrderedOrEqual(ft.zero, v) +} + +// checkpoint saves the current state of known relations. +// Called when descending on a branch. +func (ft *factsTable) checkpoint() { + if ft.unsat { + ft.unsatDepth++ + } + ft.stack = append(ft.stack, checkpointFact) + ft.limitStack = append(ft.limitStack, checkpointBound) + ft.orderS.Checkpoint() + ft.orderU.Checkpoint() +} + +// restore restores known relation to the state just +// before the previous checkpoint. +// Called when backing up on a branch. +func (ft *factsTable) restore() { + if ft.unsatDepth > 0 { + ft.unsatDepth-- + } else { + ft.unsat = false + } + for { + old := ft.stack[len(ft.stack)-1] + ft.stack = ft.stack[:len(ft.stack)-1] + if old == checkpointFact { + break + } + if old.r == lt|eq|gt { + delete(ft.facts, old.p) + } else { + ft.facts[old.p] = old.r + } + } + for { + old := ft.limitStack[len(ft.limitStack)-1] + ft.limitStack = ft.limitStack[:len(ft.limitStack)-1] + if old.vid == 0 { // checkpointBound + break + } + if old.limit == noLimit { + delete(ft.limits, old.vid) + } else { + ft.limits[old.vid] = old.limit + } + } + ft.orderS.Undo() + ft.orderU.Undo() +} + +func lessByID(v, w *Value) bool { + if v == nil && w == nil { + // Should not happen, but just in case. + return false + } + if v == nil { + return true + } + return w != nil && v.ID < w.ID +} + +var ( + reverseBits = [...]relation{0, 4, 2, 6, 1, 5, 3, 7} + + // maps what we learn when the positive branch is taken. + // For example: + // OpLess8: {signed, lt}, + // v1 = (OpLess8 v2 v3). + // If v1 branch is taken then we learn that the rangeMask + // can be at most lt. + domainRelationTable = map[Op]struct { + d domain + r relation + }{ + OpEq8: {signed | unsigned, eq}, + OpEq16: {signed | unsigned, eq}, + OpEq32: {signed | unsigned, eq}, + OpEq64: {signed | unsigned, eq}, + OpEqPtr: {pointer, eq}, + + OpNeq8: {signed | unsigned, lt | gt}, + OpNeq16: {signed | unsigned, lt | gt}, + OpNeq32: {signed | unsigned, lt | gt}, + OpNeq64: {signed | unsigned, lt | gt}, + OpNeqPtr: {pointer, lt | gt}, + + OpLess8: {signed, lt}, + OpLess8U: {unsigned, lt}, + OpLess16: {signed, lt}, + OpLess16U: {unsigned, lt}, + OpLess32: {signed, lt}, + OpLess32U: {unsigned, lt}, + OpLess64: {signed, lt}, + OpLess64U: {unsigned, lt}, + + OpLeq8: {signed, lt | eq}, + OpLeq8U: {unsigned, lt | eq}, + OpLeq16: {signed, lt | eq}, + OpLeq16U: {unsigned, lt | eq}, + OpLeq32: {signed, lt | eq}, + OpLeq32U: {unsigned, lt | eq}, + OpLeq64: {signed, lt | eq}, + OpLeq64U: {unsigned, lt | eq}, + + // For these ops, the negative branch is different: we can only + // prove signed/GE (signed/GT) if we can prove that arg0 is non-negative. + // See the special case in addBranchRestrictions. + OpIsInBounds: {signed | unsigned, lt}, // 0 <= arg0 < arg1 + OpIsSliceInBounds: {signed | unsigned, lt | eq}, // 0 <= arg0 <= arg1 + } +) + +// cleanup returns the posets to the free list +func (ft *factsTable) cleanup(f *Func) { + for _, po := range []*poset{ft.orderS, ft.orderU} { + // Make sure it's empty as it should be. A non-empty poset + // might cause errors and miscompilations if reused. + if checkEnabled { + if err := po.CheckEmpty(); err != nil { + f.Fatalf("poset not empty after function %s: %v", f.Name, err) + } + } + f.retPoset(po) + } +} + +// prove removes redundant BlockIf branches that can be inferred +// from previous dominating comparisons. +// +// By far, the most common redundant pair are generated by bounds checking. +// For example for the code: +// +// a[i] = 4 +// foo(a[i]) +// +// The compiler will generate the following code: +// +// if i >= len(a) { +// panic("not in bounds") +// } +// a[i] = 4 +// if i >= len(a) { +// panic("not in bounds") +// } +// foo(a[i]) +// +// The second comparison i >= len(a) is clearly redundant because if the +// else branch of the first comparison is executed, we already know that i < len(a). +// The code for the second panic can be removed. +// +// prove works by finding contradictions and trimming branches whose +// conditions are unsatisfiable given the branches leading up to them. +// It tracks a "fact table" of branch conditions. For each branching +// block, it asserts the branch conditions that uniquely dominate that +// block, and then separately asserts the block's branch condition and +// its negation. If either leads to a contradiction, it can trim that +// successor. +func prove(f *Func) { + // Find induction variables. Currently, findIndVars + // is limited to one induction variable per block. + var indVars map[*Block]indVar + for _, v := range findIndVar(f) { + ind := v.ind + if len(ind.Args) != 2 { + // the rewrite code assumes there is only ever two parents to loops + panic("unexpected induction with too many parents") + } + + nxt := v.nxt + if !(ind.Uses == 2 && // 2 used by comparison and next + nxt.Uses == 1) { // 1 used by induction + // ind or nxt is used inside the loop, add it for the facts table + if indVars == nil { + indVars = make(map[*Block]indVar) + } + indVars[v.entry] = v + continue + } else { + // Since this induction variable is not used for anything but counting the iterations, + // no point in putting it into the facts table. + } + + // try to rewrite to a downward counting loop checking against start if the + // loop body does not depends on ind or nxt and end is known before the loop. + // This reduce pressure on the register allocator because this do not need + // to use end on each iteration anymore. We compare against the start constant instead. + // That means this code: + // + // loop: + // ind = (Phi (Const [x]) nxt), + // if ind < end + // then goto enter_loop + // else goto exit_loop + // + // enter_loop: + // do something without using ind nor nxt + // nxt = inc + ind + // goto loop + // + // exit_loop: + // + // is rewritten to: + // + // loop: + // ind = (Phi end nxt) + // if (Const [x]) < ind + // then goto enter_loop + // else goto exit_loop + // + // enter_loop: + // do something without using ind nor nxt + // nxt = ind - inc + // goto loop + // + // exit_loop: + // + // this is better because it only require to keep ind then nxt alive while looping, + // while the original form keeps ind then nxt and end alive + start, end := v.min, v.max + if v.flags&indVarCountDown != 0 { + start, end = end, start + } + + if !(start.Op == OpConst8 || start.Op == OpConst16 || start.Op == OpConst32 || start.Op == OpConst64) { + // if start is not a constant we would be winning nothing from inverting the loop + continue + } + if end.Op == OpConst8 || end.Op == OpConst16 || end.Op == OpConst32 || end.Op == OpConst64 { + // TODO: if both start and end are constants we should rewrite such that the comparison + // is against zero and nxt is ++ or -- operation + // That means: + // for i := 2; i < 11; i += 2 { + // should be rewritten to: + // for i := 5; 0 < i; i-- { + continue + } + + header := ind.Block + check := header.Controls[0] + if check == nil { + // we don't know how to rewrite a loop that not simple comparison + continue + } + switch check.Op { + case OpLeq64, OpLeq32, OpLeq16, OpLeq8, + OpLess64, OpLess32, OpLess16, OpLess8: + default: + // we don't know how to rewrite a loop that not simple comparison + continue + } + if !((check.Args[0] == ind && check.Args[1] == end) || + (check.Args[1] == ind && check.Args[0] == end)) { + // we don't know how to rewrite a loop that not simple comparison + continue + } + if end.Block == ind.Block { + // we can't rewrite loops where the condition depends on the loop body + // this simple check is forced to work because if this is true a Phi in ind.Block must exists + continue + } + + // invert the check + check.Args[0], check.Args[1] = check.Args[1], check.Args[0] + + // invert start and end in the loop + for i, v := range check.Args { + if v != end { + continue + } + + check.SetArg(i, start) + goto replacedEnd + } + panic(fmt.Sprintf("unreachable, ind: %v, start: %v, end: %v", ind, start, end)) + replacedEnd: + + for i, v := range ind.Args { + if v != start { + continue + } + + ind.SetArg(i, end) + goto replacedStart + } + panic(fmt.Sprintf("unreachable, ind: %v, start: %v, end: %v", ind, start, end)) + replacedStart: + + if nxt.Args[0] != ind { + // unlike additions subtractions are not commutative so be sure we get it right + nxt.Args[0], nxt.Args[1] = nxt.Args[1], nxt.Args[0] + } + + switch nxt.Op { + case OpAdd8: + nxt.Op = OpSub8 + case OpAdd16: + nxt.Op = OpSub16 + case OpAdd32: + nxt.Op = OpSub32 + case OpAdd64: + nxt.Op = OpSub64 + case OpSub8: + nxt.Op = OpAdd8 + case OpSub16: + nxt.Op = OpAdd16 + case OpSub32: + nxt.Op = OpAdd32 + case OpSub64: + nxt.Op = OpAdd64 + default: + panic("unreachable") + } + + if f.pass.debug > 0 { + f.Warnl(ind.Pos, "Inverted loop iteration") + } + } + + ft := newFactsTable(f) + ft.checkpoint() + + var lensVars map[*Block][]*Value + var logicVars map[*Block][]*Value + + // Find length and capacity ops. + for _, b := range f.Blocks { + for _, v := range b.Values { + if v.Uses == 0 { + // We don't care about dead values. + // (There can be some that are CSEd but not removed yet.) + continue + } + switch v.Op { + case OpStringLen: + ft.update(b, v, ft.zero, signed, gt|eq) + case OpSliceLen: + if ft.lens == nil { + ft.lens = map[ID]*Value{} + } + // Set all len Values for the same slice as equal in the poset. + // The poset handles transitive relations, so Values related to + // any OpSliceLen for this slice will be correctly related to others. + if l, ok := ft.lens[v.Args[0].ID]; ok { + ft.update(b, v, l, signed, eq) + } else { + ft.lens[v.Args[0].ID] = v + } + ft.update(b, v, ft.zero, signed, gt|eq) + if v.Args[0].Op == OpSliceMake { + if lensVars == nil { + lensVars = make(map[*Block][]*Value) + } + lensVars[b] = append(lensVars[b], v) + } + case OpSliceCap: + if ft.caps == nil { + ft.caps = map[ID]*Value{} + } + // Same as case OpSliceLen above, but for slice cap. + if c, ok := ft.caps[v.Args[0].ID]; ok { + ft.update(b, v, c, signed, eq) + } else { + ft.caps[v.Args[0].ID] = v + } + ft.update(b, v, ft.zero, signed, gt|eq) + if v.Args[0].Op == OpSliceMake { + if lensVars == nil { + lensVars = make(map[*Block][]*Value) + } + lensVars[b] = append(lensVars[b], v) + } + case OpCtz64, OpCtz32, OpCtz16, OpCtz8, OpBitLen64, OpBitLen32, OpBitLen16, OpBitLen8: + ft.update(b, v, ft.zero, signed, gt|eq) + // TODO: we could also do <= 64/32/16/8, if that helped. + case OpAnd64, OpAnd32, OpAnd16, OpAnd8: + ft.update(b, v, v.Args[1], unsigned, lt|eq) + ft.update(b, v, v.Args[0], unsigned, lt|eq) + for i := 0; i < 2; i++ { + if isNonNegative(v.Args[i]) { + ft.update(b, v, v.Args[i], signed, lt|eq) + ft.update(b, v, ft.zero, signed, gt|eq) + } + } + if logicVars == nil { + logicVars = make(map[*Block][]*Value) + } + logicVars[b] = append(logicVars[b], v) + case OpOr64, OpOr32, OpOr16, OpOr8: + // TODO: investigate how to always add facts without much slowdown, see issue #57959. + if v.Args[0].isGenericIntConst() { + ft.update(b, v, v.Args[0], unsigned, gt|eq) + } + if v.Args[1].isGenericIntConst() { + ft.update(b, v, v.Args[1], unsigned, gt|eq) + } + case OpDiv64u, OpDiv32u, OpDiv16u, OpDiv8u, + OpRsh8Ux64, OpRsh8Ux32, OpRsh8Ux16, OpRsh8Ux8, + OpRsh16Ux64, OpRsh16Ux32, OpRsh16Ux16, OpRsh16Ux8, + OpRsh32Ux64, OpRsh32Ux32, OpRsh32Ux16, OpRsh32Ux8, + OpRsh64Ux64, OpRsh64Ux32, OpRsh64Ux16, OpRsh64Ux8: + ft.update(b, v, v.Args[0], unsigned, lt|eq) + case OpMod64u, OpMod32u, OpMod16u, OpMod8u: + ft.update(b, v, v.Args[0], unsigned, lt|eq) + ft.update(b, v, v.Args[1], unsigned, lt) + case OpPhi: + // Determine the min and max value of OpPhi composed entirely of integer constants. + // + // For example, for an OpPhi: + // + // v1 = OpConst64 [13] + // v2 = OpConst64 [7] + // v3 = OpConst64 [42] + // + // v4 = OpPhi(v1, v2, v3) + // + // We can prove: + // + // v4 >= 7 && v4 <= 42 + // + // TODO(jake-ciolek): Handle nested constant OpPhi's + sameConstOp := true + min := 0 + max := 0 + + if !v.Args[min].isGenericIntConst() { + break + } + + for k := range v.Args { + if v.Args[k].Op != v.Args[min].Op { + sameConstOp = false + break + } + if v.Args[k].AuxInt < v.Args[min].AuxInt { + min = k + } + if v.Args[k].AuxInt > v.Args[max].AuxInt { + max = k + } + } + + if sameConstOp { + ft.update(b, v, v.Args[min], signed, gt|eq) + ft.update(b, v, v.Args[max], signed, lt|eq) + } + // One might be tempted to create a v >= ft.zero relation for + // all OpPhi's composed of only provably-positive values + // but that bloats up the facts table for a very negligible gain. + // In Go itself, very few functions get improved (< 5) at a cost of 5-7% total increase + // of compile time. + } + } + } + + // current node state + type walkState int + const ( + descend walkState = iota + simplify + ) + // work maintains the DFS stack. + type bp struct { + block *Block // current handled block + state walkState // what's to do + } + work := make([]bp, 0, 256) + work = append(work, bp{ + block: f.Entry, + state: descend, + }) + + idom := f.Idom() + sdom := f.Sdom() + + // DFS on the dominator tree. + // + // For efficiency, we consider only the dominator tree rather + // than the entire flow graph. On the way down, we consider + // incoming branches and accumulate conditions that uniquely + // dominate the current block. If we discover a contradiction, + // we can eliminate the entire block and all of its children. + // On the way back up, we consider outgoing branches that + // haven't already been considered. This way we consider each + // branch condition only once. + for len(work) > 0 { + node := work[len(work)-1] + work = work[:len(work)-1] + parent := idom[node.block.ID] + branch := getBranch(sdom, parent, node.block) + + switch node.state { + case descend: + ft.checkpoint() + + // Entering the block, add the block-depending facts that we collected + // at the beginning: induction variables and lens/caps of slices. + if iv, ok := indVars[node.block]; ok { + addIndVarRestrictions(ft, parent, iv) + } + if lens, ok := lensVars[node.block]; ok { + for _, v := range lens { + switch v.Op { + case OpSliceLen: + ft.update(node.block, v, v.Args[0].Args[1], signed, eq) + case OpSliceCap: + ft.update(node.block, v, v.Args[0].Args[2], signed, eq) + } + } + } + + if branch != unknown { + addBranchRestrictions(ft, parent, branch) + // After we add the branch restriction, re-check the logic operations in the parent block, + // it may give us more info to omit some branches + if logic, ok := logicVars[parent]; ok { + for _, v := range logic { + // we only have OpAnd for now + ft.update(parent, v, v.Args[1], unsigned, lt|eq) + ft.update(parent, v, v.Args[0], unsigned, lt|eq) + for i := 0; i < 2; i++ { + if isNonNegative(v.Args[i]) { + ft.update(parent, v, v.Args[i], signed, lt|eq) + ft.update(parent, v, ft.zero, signed, gt|eq) + } + } + } + } + if ft.unsat { + // node.block is unreachable. + // Remove it and don't visit + // its children. + removeBranch(parent, branch) + ft.restore() + break + } + // Otherwise, we can now commit to + // taking this branch. We'll restore + // ft when we unwind. + } + + // Add inductive facts for phis in this block. + addLocalInductiveFacts(ft, node.block) + + work = append(work, bp{ + block: node.block, + state: simplify, + }) + for s := sdom.Child(node.block); s != nil; s = sdom.Sibling(s) { + work = append(work, bp{ + block: s, + state: descend, + }) + } + + case simplify: + simplifyBlock(sdom, ft, node.block) + ft.restore() + } + } + + ft.restore() + + ft.cleanup(f) +} + +// getBranch returns the range restrictions added by p +// when reaching b. p is the immediate dominator of b. +func getBranch(sdom SparseTree, p *Block, b *Block) branch { + if p == nil { + return unknown + } + switch p.Kind { + case BlockIf: + // If p and p.Succs[0] are dominators it means that every path + // from entry to b passes through p and p.Succs[0]. We care that + // no path from entry to b passes through p.Succs[1]. If p.Succs[0] + // has one predecessor then (apart from the degenerate case), + // there is no path from entry that can reach b through p.Succs[1]. + // TODO: how about p->yes->b->yes, i.e. a loop in yes. + if sdom.IsAncestorEq(p.Succs[0].b, b) && len(p.Succs[0].b.Preds) == 1 { + return positive + } + if sdom.IsAncestorEq(p.Succs[1].b, b) && len(p.Succs[1].b.Preds) == 1 { + return negative + } + case BlockJumpTable: + // TODO: this loop can lead to quadratic behavior, as + // getBranch can be called len(p.Succs) times. + for i, e := range p.Succs { + if sdom.IsAncestorEq(e.b, b) && len(e.b.Preds) == 1 { + return jumpTable0 + branch(i) + } + } + } + return unknown +} + +// addIndVarRestrictions updates the factsTables ft with the facts +// learned from the induction variable indVar which drives the loop +// starting in Block b. +func addIndVarRestrictions(ft *factsTable, b *Block, iv indVar) { + d := signed + if ft.isNonNegative(iv.min) && ft.isNonNegative(iv.max) { + d |= unsigned + } + + if iv.flags&indVarMinExc == 0 { + addRestrictions(b, ft, d, iv.min, iv.ind, lt|eq) + } else { + addRestrictions(b, ft, d, iv.min, iv.ind, lt) + } + + if iv.flags&indVarMaxInc == 0 { + addRestrictions(b, ft, d, iv.ind, iv.max, lt) + } else { + addRestrictions(b, ft, d, iv.ind, iv.max, lt|eq) + } +} + +// addBranchRestrictions updates the factsTables ft with the facts learned when +// branching from Block b in direction br. +func addBranchRestrictions(ft *factsTable, b *Block, br branch) { + c := b.Controls[0] + switch { + case br == negative: + addRestrictions(b, ft, boolean, nil, c, eq) + case br == positive: + addRestrictions(b, ft, boolean, nil, c, lt|gt) + case br >= jumpTable0: + idx := br - jumpTable0 + val := int64(idx) + if v, off := isConstDelta(c); v != nil { + // Establish the bound on the underlying value we're switching on, + // not on the offset-ed value used as the jump table index. + c = v + val -= off + } + old, ok := ft.limits[c.ID] + if !ok { + old = noLimit + } + ft.limitStack = append(ft.limitStack, limitFact{c.ID, old}) + if val < old.min || val > old.max || uint64(val) < old.umin || uint64(val) > old.umax { + ft.unsat = true + if b.Func.pass.debug > 2 { + b.Func.Warnl(b.Pos, "block=%s outedge=%d %s=%d unsat", b, idx, c, val) + } + } else { + ft.limits[c.ID] = limit{val, val, uint64(val), uint64(val)} + if b.Func.pass.debug > 2 { + b.Func.Warnl(b.Pos, "block=%s outedge=%d %s=%d", b, idx, c, val) + } + } + default: + panic("unknown branch") + } + if tr, has := domainRelationTable[c.Op]; has { + // When we branched from parent we learned a new set of + // restrictions. Update the factsTable accordingly. + d := tr.d + if d == signed && ft.isNonNegative(c.Args[0]) && ft.isNonNegative(c.Args[1]) { + d |= unsigned + } + switch c.Op { + case OpIsInBounds, OpIsSliceInBounds: + // 0 <= a0 < a1 (or 0 <= a0 <= a1) + // + // On the positive branch, we learn: + // signed: 0 <= a0 < a1 (or 0 <= a0 <= a1) + // unsigned: a0 < a1 (or a0 <= a1) + // + // On the negative branch, we learn (0 > a0 || + // a0 >= a1). In the unsigned domain, this is + // simply a0 >= a1 (which is the reverse of the + // positive branch, so nothing surprising). + // But in the signed domain, we can't express the || + // condition, so check if a0 is non-negative instead, + // to be able to learn something. + switch br { + case negative: + d = unsigned + if ft.isNonNegative(c.Args[0]) { + d |= signed + } + addRestrictions(b, ft, d, c.Args[0], c.Args[1], tr.r^(lt|gt|eq)) + case positive: + addRestrictions(b, ft, signed, ft.zero, c.Args[0], lt|eq) + addRestrictions(b, ft, d, c.Args[0], c.Args[1], tr.r) + } + default: + switch br { + case negative: + addRestrictions(b, ft, d, c.Args[0], c.Args[1], tr.r^(lt|gt|eq)) + case positive: + addRestrictions(b, ft, d, c.Args[0], c.Args[1], tr.r) + } + } + + } +} + +// addRestrictions updates restrictions from the immediate +// dominating block (p) using r. +func addRestrictions(parent *Block, ft *factsTable, t domain, v, w *Value, r relation) { + if t == 0 { + // Trivial case: nothing to do. + // Should not happen, but just in case. + return + } + for i := domain(1); i <= t; i <<= 1 { + if t&i == 0 { + continue + } + ft.update(parent, v, w, i, r) + } +} + +// addLocalInductiveFacts adds inductive facts when visiting b, where +// b is a join point in a loop. In contrast with findIndVar, this +// depends on facts established for b, which is why it happens when +// visiting b. +// +// TODO: It would be nice to combine this with findIndVar. +func addLocalInductiveFacts(ft *factsTable, b *Block) { + // This looks for a specific pattern of induction: + // + // 1. i1 = OpPhi(min, i2) in b + // 2. i2 = i1 + 1 + // 3. i2 < max at exit from b.Preds[1] + // 4. min < max + // + // If all of these conditions are true, then i1 < max and i1 >= min. + + // To ensure this is a loop header node. + if len(b.Preds) != 2 { + return + } + + for _, i1 := range b.Values { + if i1.Op != OpPhi { + continue + } + + // Check for conditions 1 and 2. This is easy to do + // and will throw out most phis. + min, i2 := i1.Args[0], i1.Args[1] + if i1q, delta := isConstDelta(i2); i1q != i1 || delta != 1 { + continue + } + + // Try to prove condition 3. We can't just query the + // fact table for this because we don't know what the + // facts of b.Preds[1] are (in general, b.Preds[1] is + // a loop-back edge, so we haven't even been there + // yet). As a conservative approximation, we look for + // this condition in the predecessor chain until we + // hit a join point. + uniquePred := func(b *Block) *Block { + if len(b.Preds) == 1 { + return b.Preds[0].b + } + return nil + } + pred, child := b.Preds[1].b, b + for ; pred != nil; pred, child = uniquePred(pred), pred { + if pred.Kind != BlockIf { + continue + } + control := pred.Controls[0] + + br := unknown + if pred.Succs[0].b == child { + br = positive + } + if pred.Succs[1].b == child { + if br != unknown { + continue + } + br = negative + } + if br == unknown { + continue + } + + tr, has := domainRelationTable[control.Op] + if !has { + continue + } + r := tr.r + if br == negative { + // Negative branch taken to reach b. + // Complement the relations. + r = (lt | eq | gt) ^ r + } + + // Check for i2 < max or max > i2. + var max *Value + if r == lt && control.Args[0] == i2 { + max = control.Args[1] + } else if r == gt && control.Args[1] == i2 { + max = control.Args[0] + } else { + continue + } + + // Check condition 4 now that we have a + // candidate max. For this we can query the + // fact table. We "prove" min < max by showing + // that min >= max is unsat. (This may simply + // compare two constants; that's fine.) + ft.checkpoint() + ft.update(b, min, max, tr.d, gt|eq) + proved := ft.unsat + ft.restore() + + if proved { + // We know that min <= i1 < max. + if b.Func.pass.debug > 0 { + printIndVar(b, i1, min, max, 1, 0) + } + ft.update(b, min, i1, tr.d, lt|eq) + ft.update(b, i1, max, tr.d, lt) + } + } + } +} + +var ctzNonZeroOp = map[Op]Op{OpCtz8: OpCtz8NonZero, OpCtz16: OpCtz16NonZero, OpCtz32: OpCtz32NonZero, OpCtz64: OpCtz64NonZero} +var mostNegativeDividend = map[Op]int64{ + OpDiv16: -1 << 15, + OpMod16: -1 << 15, + OpDiv32: -1 << 31, + OpMod32: -1 << 31, + OpDiv64: -1 << 63, + OpMod64: -1 << 63} + +// simplifyBlock simplifies some constant values in b and evaluates +// branches to non-uniquely dominated successors of b. +func simplifyBlock(sdom SparseTree, ft *factsTable, b *Block) { + for _, v := range b.Values { + switch v.Op { + case OpSlicemask: + // Replace OpSlicemask operations in b with constants where possible. + x, delta := isConstDelta(v.Args[0]) + if x == nil { + break + } + // slicemask(x + y) + // if x is larger than -y (y is negative), then slicemask is -1. + lim, ok := ft.limits[x.ID] + if !ok { + break + } + if lim.umin > uint64(-delta) { + if v.Args[0].Op == OpAdd64 { + v.reset(OpConst64) + } else { + v.reset(OpConst32) + } + if b.Func.pass.debug > 0 { + b.Func.Warnl(v.Pos, "Proved slicemask not needed") + } + v.AuxInt = -1 + } + case OpCtz8, OpCtz16, OpCtz32, OpCtz64: + // On some architectures, notably amd64, we can generate much better + // code for CtzNN if we know that the argument is non-zero. + // Capture that information here for use in arch-specific optimizations. + x := v.Args[0] + lim, ok := ft.limits[x.ID] + if !ok { + break + } + if lim.umin > 0 || lim.min > 0 || lim.max < 0 { + if b.Func.pass.debug > 0 { + b.Func.Warnl(v.Pos, "Proved %v non-zero", v.Op) + } + v.Op = ctzNonZeroOp[v.Op] + } + case OpRsh8x8, OpRsh8x16, OpRsh8x32, OpRsh8x64, + OpRsh16x8, OpRsh16x16, OpRsh16x32, OpRsh16x64, + OpRsh32x8, OpRsh32x16, OpRsh32x32, OpRsh32x64, + OpRsh64x8, OpRsh64x16, OpRsh64x32, OpRsh64x64: + // Check whether, for a >> b, we know that a is non-negative + // and b is all of a's bits except the MSB. If so, a is shifted to zero. + bits := 8 * v.Type.Size() + if v.Args[1].isGenericIntConst() && v.Args[1].AuxInt >= bits-1 && ft.isNonNegative(v.Args[0]) { + if b.Func.pass.debug > 0 { + b.Func.Warnl(v.Pos, "Proved %v shifts to zero", v.Op) + } + switch bits { + case 64: + v.reset(OpConst64) + case 32: + v.reset(OpConst32) + case 16: + v.reset(OpConst16) + case 8: + v.reset(OpConst8) + default: + panic("unexpected integer size") + } + v.AuxInt = 0 + break // Be sure not to fallthrough - this is no longer OpRsh. + } + // If the Rsh hasn't been replaced with 0, still check if it is bounded. + fallthrough + case OpLsh8x8, OpLsh8x16, OpLsh8x32, OpLsh8x64, + OpLsh16x8, OpLsh16x16, OpLsh16x32, OpLsh16x64, + OpLsh32x8, OpLsh32x16, OpLsh32x32, OpLsh32x64, + OpLsh64x8, OpLsh64x16, OpLsh64x32, OpLsh64x64, + OpRsh8Ux8, OpRsh8Ux16, OpRsh8Ux32, OpRsh8Ux64, + OpRsh16Ux8, OpRsh16Ux16, OpRsh16Ux32, OpRsh16Ux64, + OpRsh32Ux8, OpRsh32Ux16, OpRsh32Ux32, OpRsh32Ux64, + OpRsh64Ux8, OpRsh64Ux16, OpRsh64Ux32, OpRsh64Ux64: + // Check whether, for a << b, we know that b + // is strictly less than the number of bits in a. + by := v.Args[1] + lim, ok := ft.limits[by.ID] + if !ok { + break + } + bits := 8 * v.Args[0].Type.Size() + if lim.umax < uint64(bits) || (lim.max < bits && ft.isNonNegative(by)) { + v.AuxInt = 1 // see shiftIsBounded + if b.Func.pass.debug > 0 { + b.Func.Warnl(v.Pos, "Proved %v bounded", v.Op) + } + } + case OpDiv16, OpDiv32, OpDiv64, OpMod16, OpMod32, OpMod64: + // On amd64 and 386 fix-up code can be avoided if we know + // the divisor is not -1 or the dividend > MinIntNN. + // Don't modify AuxInt on other architectures, + // as that can interfere with CSE. + // TODO: add other architectures? + if b.Func.Config.arch != "386" && b.Func.Config.arch != "amd64" { + break + } + divr := v.Args[1] + divrLim, divrLimok := ft.limits[divr.ID] + divd := v.Args[0] + divdLim, divdLimok := ft.limits[divd.ID] + if (divrLimok && (divrLim.max < -1 || divrLim.min > -1)) || + (divdLimok && divdLim.min > mostNegativeDividend[v.Op]) { + // See DivisionNeedsFixUp in rewrite.go. + // v.AuxInt = 1 means we have proved both that the divisor is not -1 + // and that the dividend is not the most negative integer, + // so we do not need to add fix-up code. + v.AuxInt = 1 + if b.Func.pass.debug > 0 { + b.Func.Warnl(v.Pos, "Proved %v does not need fix-up", v.Op) + } + } + } + // Fold provable constant results. + // Helps in cases where we reuse a value after branching on its equality. + for i, arg := range v.Args { + switch arg.Op { + case OpConst64, OpConst32, OpConst16, OpConst8: + continue + } + lim, ok := ft.limits[arg.ID] + if !ok { + continue + } + + var constValue int64 + typ := arg.Type + bits := 8 * typ.Size() + switch { + case lim.min == lim.max: + constValue = lim.min + case lim.umin == lim.umax: + // truncate then sign extand + switch bits { + case 64: + constValue = int64(lim.umin) + case 32: + constValue = int64(int32(lim.umin)) + case 16: + constValue = int64(int16(lim.umin)) + case 8: + constValue = int64(int8(lim.umin)) + default: + panic("unexpected integer size") + } + default: + continue + } + var c *Value + f := b.Func + switch bits { + case 64: + c = f.ConstInt64(typ, constValue) + case 32: + c = f.ConstInt32(typ, int32(constValue)) + case 16: + c = f.ConstInt16(typ, int16(constValue)) + case 8: + c = f.ConstInt8(typ, int8(constValue)) + default: + panic("unexpected integer size") + } + v.SetArg(i, c) + if b.Func.pass.debug > 1 { + b.Func.Warnl(v.Pos, "Proved %v's arg %d (%v) is constant %d", v, i, arg, constValue) + } + } + } + + if b.Kind != BlockIf { + return + } + + // Consider outgoing edges from this block. + parent := b + for i, branch := range [...]branch{positive, negative} { + child := parent.Succs[i].b + if getBranch(sdom, parent, child) != unknown { + // For edges to uniquely dominated blocks, we + // already did this when we visited the child. + continue + } + // For edges to other blocks, this can trim a branch + // even if we couldn't get rid of the child itself. + ft.checkpoint() + addBranchRestrictions(ft, parent, branch) + unsat := ft.unsat + ft.restore() + if unsat { + // This branch is impossible, so remove it + // from the block. + removeBranch(parent, branch) + // No point in considering the other branch. + // (It *is* possible for both to be + // unsatisfiable since the fact table is + // incomplete. We could turn this into a + // BlockExit, but it doesn't seem worth it.) + break + } + } +} + +func removeBranch(b *Block, branch branch) { + c := b.Controls[0] + if b.Func.pass.debug > 0 { + verb := "Proved" + if branch == positive { + verb = "Disproved" + } + if b.Func.pass.debug > 1 { + b.Func.Warnl(b.Pos, "%s %s (%s)", verb, c.Op, c) + } else { + b.Func.Warnl(b.Pos, "%s %s", verb, c.Op) + } + } + if c != nil && c.Pos.IsStmt() == src.PosIsStmt && c.Pos.SameFileAndLine(b.Pos) { + // attempt to preserve statement marker. + b.Pos = b.Pos.WithIsStmt() + } + if branch == positive || branch == negative { + b.Kind = BlockFirst + b.ResetControls() + if branch == positive { + b.swapSuccessors() + } + } else { + // TODO: figure out how to remove an entry from a jump table + } +} + +// isNonNegative reports whether v is known to be greater or equal to zero. +func isNonNegative(v *Value) bool { + if !v.Type.IsInteger() { + v.Fatalf("isNonNegative bad type: %v", v.Type) + } + // TODO: return true if !v.Type.IsSigned() + // SSA isn't type-safe enough to do that now (issue 37753). + // The checks below depend only on the pattern of bits. + + switch v.Op { + case OpConst64: + return v.AuxInt >= 0 + + case OpConst32: + return int32(v.AuxInt) >= 0 + + case OpConst16: + return int16(v.AuxInt) >= 0 + + case OpConst8: + return int8(v.AuxInt) >= 0 + + case OpStringLen, OpSliceLen, OpSliceCap, + OpZeroExt8to64, OpZeroExt16to64, OpZeroExt32to64, + OpZeroExt8to32, OpZeroExt16to32, OpZeroExt8to16, + OpCtz64, OpCtz32, OpCtz16, OpCtz8, + OpCtz64NonZero, OpCtz32NonZero, OpCtz16NonZero, OpCtz8NonZero, + OpBitLen64, OpBitLen32, OpBitLen16, OpBitLen8: + return true + + case OpRsh64Ux64, OpRsh32Ux64: + by := v.Args[1] + return by.Op == OpConst64 && by.AuxInt > 0 + + case OpRsh64x64, OpRsh32x64, OpRsh8x64, OpRsh16x64, OpRsh32x32, OpRsh64x32, + OpSignExt32to64, OpSignExt16to64, OpSignExt8to64, OpSignExt16to32, OpSignExt8to32: + return isNonNegative(v.Args[0]) + + case OpAnd64, OpAnd32, OpAnd16, OpAnd8: + return isNonNegative(v.Args[0]) || isNonNegative(v.Args[1]) + + case OpMod64, OpMod32, OpMod16, OpMod8, + OpDiv64, OpDiv32, OpDiv16, OpDiv8, + OpOr64, OpOr32, OpOr16, OpOr8, + OpXor64, OpXor32, OpXor16, OpXor8: + return isNonNegative(v.Args[0]) && isNonNegative(v.Args[1]) + + // We could handle OpPhi here, but the improvements from doing + // so are very minor, and it is neither simple nor cheap. + } + return false +} + +// isConstDelta returns non-nil if v is equivalent to w+delta (signed). +func isConstDelta(v *Value) (w *Value, delta int64) { + cop := OpConst64 + switch v.Op { + case OpAdd32, OpSub32: + cop = OpConst32 + case OpAdd16, OpSub16: + cop = OpConst16 + case OpAdd8, OpSub8: + cop = OpConst8 + } + switch v.Op { + case OpAdd64, OpAdd32, OpAdd16, OpAdd8: + if v.Args[0].Op == cop { + return v.Args[1], v.Args[0].AuxInt + } + if v.Args[1].Op == cop { + return v.Args[0], v.Args[1].AuxInt + } + case OpSub64, OpSub32, OpSub16, OpSub8: + if v.Args[1].Op == cop { + aux := v.Args[1].AuxInt + if aux != -aux { // Overflow; too bad + return v.Args[0], -aux + } + } + } + return nil, 0 +} + +// isCleanExt reports whether v is the result of a value-preserving +// sign or zero extension. +func isCleanExt(v *Value) bool { + switch v.Op { + case OpSignExt8to16, OpSignExt8to32, OpSignExt8to64, + OpSignExt16to32, OpSignExt16to64, OpSignExt32to64: + // signed -> signed is the only value-preserving sign extension + return v.Args[0].Type.IsSigned() && v.Type.IsSigned() + + case OpZeroExt8to16, OpZeroExt8to32, OpZeroExt8to64, + OpZeroExt16to32, OpZeroExt16to64, OpZeroExt32to64: + // unsigned -> signed/unsigned are value-preserving zero extensions + return !v.Args[0].Type.IsSigned() + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/regalloc.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/regalloc.go new file mode 100644 index 0000000000000000000000000000000000000000..2325b9ee458412571decd28537f6165782b47494 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/regalloc.go @@ -0,0 +1,2947 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Register allocation. +// +// We use a version of a linear scan register allocator. We treat the +// whole function as a single long basic block and run through +// it using a greedy register allocator. Then all merge edges +// (those targeting a block with len(Preds)>1) are processed to +// shuffle data into the place that the target of the edge expects. +// +// The greedy allocator moves values into registers just before they +// are used, spills registers only when necessary, and spills the +// value whose next use is farthest in the future. +// +// The register allocator requires that a block is not scheduled until +// at least one of its predecessors have been scheduled. The most recent +// such predecessor provides the starting register state for a block. +// +// It also requires that there are no critical edges (critical = +// comes from a block with >1 successor and goes to a block with >1 +// predecessor). This makes it easy to add fixup code on merge edges - +// the source of a merge edge has only one successor, so we can add +// fixup code to the end of that block. + +// Spilling +// +// During the normal course of the allocator, we might throw a still-live +// value out of all registers. When that value is subsequently used, we must +// load it from a slot on the stack. We must also issue an instruction to +// initialize that stack location with a copy of v. +// +// pre-regalloc: +// (1) v = Op ... +// (2) x = Op ... +// (3) ... = Op v ... +// +// post-regalloc: +// (1) v = Op ... : AX // computes v, store result in AX +// s = StoreReg v // spill v to a stack slot +// (2) x = Op ... : AX // some other op uses AX +// c = LoadReg s : CX // restore v from stack slot +// (3) ... = Op c ... // use the restored value +// +// Allocation occurs normally until we reach (3) and we realize we have +// a use of v and it isn't in any register. At that point, we allocate +// a spill (a StoreReg) for v. We can't determine the correct place for +// the spill at this point, so we allocate the spill as blockless initially. +// The restore is then generated to load v back into a register so it can +// be used. Subsequent uses of v will use the restored value c instead. +// +// What remains is the question of where to schedule the spill. +// During allocation, we keep track of the dominator of all restores of v. +// The spill of v must dominate that block. The spill must also be issued at +// a point where v is still in a register. +// +// To find the right place, start at b, the block which dominates all restores. +// - If b is v.Block, then issue the spill right after v. +// It is known to be in a register at that point, and dominates any restores. +// - Otherwise, if v is in a register at the start of b, +// put the spill of v at the start of b. +// - Otherwise, set b = immediate dominator of b, and repeat. +// +// Phi values are special, as always. We define two kinds of phis, those +// where the merge happens in a register (a "register" phi) and those where +// the merge happens in a stack location (a "stack" phi). +// +// A register phi must have the phi and all of its inputs allocated to the +// same register. Register phis are spilled similarly to regular ops. +// +// A stack phi must have the phi and all of its inputs allocated to the same +// stack location. Stack phis start out life already spilled - each phi +// input must be a store (using StoreReg) at the end of the corresponding +// predecessor block. +// b1: y = ... : AX b2: z = ... : BX +// y2 = StoreReg y z2 = StoreReg z +// goto b3 goto b3 +// b3: x = phi(y2, z2) +// The stack allocator knows that StoreReg args of stack-allocated phis +// must be allocated to the same stack slot as the phi that uses them. +// x is now a spilled value and a restore must appear before its first use. + +// TODO + +// Use an affinity graph to mark two values which should use the +// same register. This affinity graph will be used to prefer certain +// registers for allocation. This affinity helps eliminate moves that +// are required for phi implementations and helps generate allocations +// for 2-register architectures. + +// Note: regalloc generates a not-quite-SSA output. If we have: +// +// b1: x = ... : AX +// x2 = StoreReg x +// ... AX gets reused for something else ... +// if ... goto b3 else b4 +// +// b3: x3 = LoadReg x2 : BX b4: x4 = LoadReg x2 : CX +// ... use x3 ... ... use x4 ... +// +// b2: ... use x3 ... +// +// If b3 is the primary predecessor of b2, then we use x3 in b2 and +// add a x4:CX->BX copy at the end of b4. +// But the definition of x3 doesn't dominate b2. We should really +// insert an extra phi at the start of b2 (x5=phi(x3,x4):BX) to keep +// SSA form. For now, we ignore this problem as remaining in strict +// SSA form isn't needed after regalloc. We'll just leave the use +// of x3 not dominated by the definition of x3, and the CX->BX copy +// will have no use (so don't run deadcode after regalloc!). +// TODO: maybe we should introduce these extra phis? + +package ssa + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "cmd/internal/src" + "cmd/internal/sys" + "fmt" + "internal/buildcfg" + "math/bits" + "unsafe" +) + +const ( + moveSpills = iota + logSpills + regDebug + stackDebug +) + +// distance is a measure of how far into the future values are used. +// distance is measured in units of instructions. +const ( + likelyDistance = 1 + normalDistance = 10 + unlikelyDistance = 100 +) + +// regalloc performs register allocation on f. It sets f.RegAlloc +// to the resulting allocation. +func regalloc(f *Func) { + var s regAllocState + s.init(f) + s.regalloc(f) + s.close() +} + +type register uint8 + +const noRegister register = 255 + +// For bulk initializing +var noRegisters [32]register = [32]register{ + noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, + noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, + noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, + noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, +} + +// A regMask encodes a set of machine registers. +// TODO: regMask -> regSet? +type regMask uint64 + +func (m regMask) String() string { + s := "" + for r := register(0); m != 0; r++ { + if m>>r&1 == 0 { + continue + } + m &^= regMask(1) << r + if s != "" { + s += " " + } + s += fmt.Sprintf("r%d", r) + } + return s +} + +func (s *regAllocState) RegMaskString(m regMask) string { + str := "" + for r := register(0); m != 0; r++ { + if m>>r&1 == 0 { + continue + } + m &^= regMask(1) << r + if str != "" { + str += " " + } + str += s.registers[r].String() + } + return str +} + +// countRegs returns the number of set bits in the register mask. +func countRegs(r regMask) int { + return bits.OnesCount64(uint64(r)) +} + +// pickReg picks an arbitrary register from the register mask. +func pickReg(r regMask) register { + if r == 0 { + panic("can't pick a register from an empty set") + } + // pick the lowest one + return register(bits.TrailingZeros64(uint64(r))) +} + +type use struct { + dist int32 // distance from start of the block to a use of a value + pos src.XPos // source position of the use + next *use // linked list of uses of a value in nondecreasing dist order +} + +// A valState records the register allocation state for a (pre-regalloc) value. +type valState struct { + regs regMask // the set of registers holding a Value (usually just one) + uses *use // list of uses in this block + spill *Value // spilled copy of the Value (if any) + restoreMin int32 // minimum of all restores' blocks' sdom.entry + restoreMax int32 // maximum of all restores' blocks' sdom.exit + needReg bool // cached value of !v.Type.IsMemory() && !v.Type.IsVoid() && !.v.Type.IsFlags() + rematerializeable bool // cached value of v.rematerializeable() +} + +type regState struct { + v *Value // Original (preregalloc) Value stored in this register. + c *Value // A Value equal to v which is currently in a register. Might be v or a copy of it. + // If a register is unused, v==c==nil +} + +type regAllocState struct { + f *Func + + sdom SparseTree + registers []Register + numRegs register + SPReg register + SBReg register + GReg register + allocatable regMask + + // live values at the end of each block. live[b.ID] is a list of value IDs + // which are live at the end of b, together with a count of how many instructions + // forward to the next use. + live [][]liveInfo + // desired register assignments at the end of each block. + // Note that this is a static map computed before allocation occurs. Dynamic + // register desires (from partially completed allocations) will trump + // this information. + desired []desiredState + + // current state of each (preregalloc) Value + values []valState + + // ID of SP, SB values + sp, sb ID + + // For each Value, map from its value ID back to the + // preregalloc Value it was derived from. + orig []*Value + + // current state of each register + regs []regState + + // registers that contain values which can't be kicked out + nospill regMask + + // mask of registers currently in use + used regMask + + // mask of registers used since the start of the current block + usedSinceBlockStart regMask + + // mask of registers used in the current instruction + tmpused regMask + + // current block we're working on + curBlock *Block + + // cache of use records + freeUseRecords *use + + // endRegs[blockid] is the register state at the end of each block. + // encoded as a set of endReg records. + endRegs [][]endReg + + // startRegs[blockid] is the register state at the start of merge blocks. + // saved state does not include the state of phi ops in the block. + startRegs [][]startReg + + // startRegsMask is a mask of the registers in startRegs[curBlock.ID]. + // Registers dropped from startRegsMask are later synchronoized back to + // startRegs by dropping from there as well. + startRegsMask regMask + + // spillLive[blockid] is the set of live spills at the end of each block + spillLive [][]ID + + // a set of copies we generated to move things around, and + // whether it is used in shuffle. Unused copies will be deleted. + copies map[*Value]bool + + loopnest *loopnest + + // choose a good order in which to visit blocks for allocation purposes. + visitOrder []*Block + + // blockOrder[b.ID] corresponds to the index of block b in visitOrder. + blockOrder []int32 + + // whether to insert instructions that clobber dead registers at call sites + doClobber bool +} + +type endReg struct { + r register + v *Value // pre-regalloc value held in this register (TODO: can we use ID here?) + c *Value // cached version of the value +} + +type startReg struct { + r register + v *Value // pre-regalloc value needed in this register + c *Value // cached version of the value + pos src.XPos // source position of use of this register +} + +// freeReg frees up register r. Any current user of r is kicked out. +func (s *regAllocState) freeReg(r register) { + v := s.regs[r].v + if v == nil { + s.f.Fatalf("tried to free an already free register %d\n", r) + } + + // Mark r as unused. + if s.f.pass.debug > regDebug { + fmt.Printf("freeReg %s (dump %s/%s)\n", &s.registers[r], v, s.regs[r].c) + } + s.regs[r] = regState{} + s.values[v.ID].regs &^= regMask(1) << r + s.used &^= regMask(1) << r +} + +// freeRegs frees up all registers listed in m. +func (s *regAllocState) freeRegs(m regMask) { + for m&s.used != 0 { + s.freeReg(pickReg(m & s.used)) + } +} + +// clobberRegs inserts instructions that clobber registers listed in m. +func (s *regAllocState) clobberRegs(m regMask) { + m &= s.allocatable & s.f.Config.gpRegMask // only integer register can contain pointers, only clobber them + for m != 0 { + r := pickReg(m) + m &^= 1 << r + x := s.curBlock.NewValue0(src.NoXPos, OpClobberReg, types.TypeVoid) + s.f.setHome(x, &s.registers[r]) + } +} + +// setOrig records that c's original value is the same as +// v's original value. +func (s *regAllocState) setOrig(c *Value, v *Value) { + if int(c.ID) >= cap(s.orig) { + x := s.f.Cache.allocValueSlice(int(c.ID) + 1) + copy(x, s.orig) + s.f.Cache.freeValueSlice(s.orig) + s.orig = x + } + for int(c.ID) >= len(s.orig) { + s.orig = append(s.orig, nil) + } + if s.orig[c.ID] != nil { + s.f.Fatalf("orig value set twice %s %s", c, v) + } + s.orig[c.ID] = s.orig[v.ID] +} + +// assignReg assigns register r to hold c, a copy of v. +// r must be unused. +func (s *regAllocState) assignReg(r register, v *Value, c *Value) { + if s.f.pass.debug > regDebug { + fmt.Printf("assignReg %s %s/%s\n", &s.registers[r], v, c) + } + if s.regs[r].v != nil { + s.f.Fatalf("tried to assign register %d to %s/%s but it is already used by %s", r, v, c, s.regs[r].v) + } + + // Update state. + s.regs[r] = regState{v, c} + s.values[v.ID].regs |= regMask(1) << r + s.used |= regMask(1) << r + s.f.setHome(c, &s.registers[r]) +} + +// allocReg chooses a register from the set of registers in mask. +// If there is no unused register, a Value will be kicked out of +// a register to make room. +func (s *regAllocState) allocReg(mask regMask, v *Value) register { + if v.OnWasmStack { + return noRegister + } + + mask &= s.allocatable + mask &^= s.nospill + if mask == 0 { + s.f.Fatalf("no register available for %s", v.LongString()) + } + + // Pick an unused register if one is available. + if mask&^s.used != 0 { + r := pickReg(mask &^ s.used) + s.usedSinceBlockStart |= regMask(1) << r + return r + } + + // Pick a value to spill. Spill the value with the + // farthest-in-the-future use. + // TODO: Prefer registers with already spilled Values? + // TODO: Modify preference using affinity graph. + // TODO: if a single value is in multiple registers, spill one of them + // before spilling a value in just a single register. + + // Find a register to spill. We spill the register containing the value + // whose next use is as far in the future as possible. + // https://en.wikipedia.org/wiki/Page_replacement_algorithm#The_theoretically_optimal_page_replacement_algorithm + var r register + maxuse := int32(-1) + for t := register(0); t < s.numRegs; t++ { + if mask>>t&1 == 0 { + continue + } + v := s.regs[t].v + if n := s.values[v.ID].uses.dist; n > maxuse { + // v's next use is farther in the future than any value + // we've seen so far. A new best spill candidate. + r = t + maxuse = n + } + } + if maxuse == -1 { + s.f.Fatalf("couldn't find register to spill") + } + + if s.f.Config.ctxt.Arch.Arch == sys.ArchWasm { + // TODO(neelance): In theory this should never happen, because all wasm registers are equal. + // So if there is still a free register, the allocation should have picked that one in the first place instead of + // trying to kick some other value out. In practice, this case does happen and it breaks the stack optimization. + s.freeReg(r) + return r + } + + // Try to move it around before kicking out, if there is a free register. + // We generate a Copy and record it. It will be deleted if never used. + v2 := s.regs[r].v + m := s.compatRegs(v2.Type) &^ s.used &^ s.tmpused &^ (regMask(1) << r) + if m != 0 && !s.values[v2.ID].rematerializeable && countRegs(s.values[v2.ID].regs) == 1 { + s.usedSinceBlockStart |= regMask(1) << r + r2 := pickReg(m) + c := s.curBlock.NewValue1(v2.Pos, OpCopy, v2.Type, s.regs[r].c) + s.copies[c] = false + if s.f.pass.debug > regDebug { + fmt.Printf("copy %s to %s : %s\n", v2, c, &s.registers[r2]) + } + s.setOrig(c, v2) + s.assignReg(r2, v2, c) + } + + // If the evicted register isn't used between the start of the block + // and now then there is no reason to even request it on entry. We can + // drop from startRegs in that case. + if s.usedSinceBlockStart&(regMask(1)< regDebug { + fmt.Printf("dropped from startRegs: %s\n", &s.registers[r]) + } + s.startRegsMask &^= regMask(1) << r + } + } + + s.freeReg(r) + s.usedSinceBlockStart |= regMask(1) << r + return r +} + +// makeSpill returns a Value which represents the spilled value of v. +// b is the block in which the spill is used. +func (s *regAllocState) makeSpill(v *Value, b *Block) *Value { + vi := &s.values[v.ID] + if vi.spill != nil { + // Final block not known - keep track of subtree where restores reside. + vi.restoreMin = min32(vi.restoreMin, s.sdom[b.ID].entry) + vi.restoreMax = max32(vi.restoreMax, s.sdom[b.ID].exit) + return vi.spill + } + // Make a spill for v. We don't know where we want + // to put it yet, so we leave it blockless for now. + spill := s.f.newValueNoBlock(OpStoreReg, v.Type, v.Pos) + // We also don't know what the spill's arg will be. + // Leave it argless for now. + s.setOrig(spill, v) + vi.spill = spill + vi.restoreMin = s.sdom[b.ID].entry + vi.restoreMax = s.sdom[b.ID].exit + return spill +} + +// allocValToReg allocates v to a register selected from regMask and +// returns the register copy of v. Any previous user is kicked out and spilled +// (if necessary). Load code is added at the current pc. If nospill is set the +// allocated register is marked nospill so the assignment cannot be +// undone until the caller allows it by clearing nospill. Returns a +// *Value which is either v or a copy of v allocated to the chosen register. +func (s *regAllocState) allocValToReg(v *Value, mask regMask, nospill bool, pos src.XPos) *Value { + if s.f.Config.ctxt.Arch.Arch == sys.ArchWasm && v.rematerializeable() { + c := v.copyIntoWithXPos(s.curBlock, pos) + c.OnWasmStack = true + s.setOrig(c, v) + return c + } + if v.OnWasmStack { + return v + } + + vi := &s.values[v.ID] + pos = pos.WithNotStmt() + // Check if v is already in a requested register. + if mask&vi.regs != 0 { + r := pickReg(mask & vi.regs) + if s.regs[r].v != v || s.regs[r].c == nil { + panic("bad register state") + } + if nospill { + s.nospill |= regMask(1) << r + } + s.usedSinceBlockStart |= regMask(1) << r + return s.regs[r].c + } + + var r register + // If nospill is set, the value is used immediately, so it can live on the WebAssembly stack. + onWasmStack := nospill && s.f.Config.ctxt.Arch.Arch == sys.ArchWasm + if !onWasmStack { + // Allocate a register. + r = s.allocReg(mask, v) + } + + // Allocate v to the new register. + var c *Value + if vi.regs != 0 { + // Copy from a register that v is already in. + r2 := pickReg(vi.regs) + if s.regs[r2].v != v { + panic("bad register state") + } + s.usedSinceBlockStart |= regMask(1) << r2 + c = s.curBlock.NewValue1(pos, OpCopy, v.Type, s.regs[r2].c) + } else if v.rematerializeable() { + // Rematerialize instead of loading from the spill location. + c = v.copyIntoWithXPos(s.curBlock, pos) + } else { + // Load v from its spill location. + spill := s.makeSpill(v, s.curBlock) + if s.f.pass.debug > logSpills { + s.f.Warnl(vi.spill.Pos, "load spill for %v from %v", v, spill) + } + c = s.curBlock.NewValue1(pos, OpLoadReg, v.Type, spill) + } + + s.setOrig(c, v) + + if onWasmStack { + c.OnWasmStack = true + return c + } + + s.assignReg(r, v, c) + if c.Op == OpLoadReg && s.isGReg(r) { + s.f.Fatalf("allocValToReg.OpLoadReg targeting g: " + c.LongString()) + } + if nospill { + s.nospill |= regMask(1) << r + } + return c +} + +// isLeaf reports whether f performs any calls. +func isLeaf(f *Func) bool { + for _, b := range f.Blocks { + for _, v := range b.Values { + if v.Op.IsCall() && !v.Op.IsTailCall() { + // tail call is not counted as it does not save the return PC or need a frame + return false + } + } + } + return true +} + +// needRegister reports whether v needs a register. +func (v *Value) needRegister() bool { + return !v.Type.IsMemory() && !v.Type.IsVoid() && !v.Type.IsFlags() && !v.Type.IsTuple() +} + +func (s *regAllocState) init(f *Func) { + s.f = f + s.f.RegAlloc = s.f.Cache.locs[:0] + s.registers = f.Config.registers + if nr := len(s.registers); nr == 0 || nr > int(noRegister) || nr > int(unsafe.Sizeof(regMask(0))*8) { + s.f.Fatalf("bad number of registers: %d", nr) + } else { + s.numRegs = register(nr) + } + // Locate SP, SB, and g registers. + s.SPReg = noRegister + s.SBReg = noRegister + s.GReg = noRegister + for r := register(0); r < s.numRegs; r++ { + switch s.registers[r].String() { + case "SP": + s.SPReg = r + case "SB": + s.SBReg = r + case "g": + s.GReg = r + } + } + // Make sure we found all required registers. + switch noRegister { + case s.SPReg: + s.f.Fatalf("no SP register found") + case s.SBReg: + s.f.Fatalf("no SB register found") + case s.GReg: + if f.Config.hasGReg { + s.f.Fatalf("no g register found") + } + } + + // Figure out which registers we're allowed to use. + s.allocatable = s.f.Config.gpRegMask | s.f.Config.fpRegMask | s.f.Config.specialRegMask + s.allocatable &^= 1 << s.SPReg + s.allocatable &^= 1 << s.SBReg + if s.f.Config.hasGReg { + s.allocatable &^= 1 << s.GReg + } + if buildcfg.FramePointerEnabled && s.f.Config.FPReg >= 0 { + s.allocatable &^= 1 << uint(s.f.Config.FPReg) + } + if s.f.Config.LinkReg != -1 { + if isLeaf(f) { + // Leaf functions don't save/restore the link register. + s.allocatable &^= 1 << uint(s.f.Config.LinkReg) + } + } + if s.f.Config.ctxt.Flag_dynlink { + switch s.f.Config.arch { + case "386": + // nothing to do. + // Note that for Flag_shared (position independent code) + // we do need to be careful, but that carefulness is hidden + // in the rewrite rules so we always have a free register + // available for global load/stores. See _gen/386.rules (search for Flag_shared). + case "amd64": + s.allocatable &^= 1 << 15 // R15 + case "arm": + s.allocatable &^= 1 << 9 // R9 + case "arm64": + // nothing to do + case "loong64": // R2 (aka TP) already reserved. + // nothing to do + case "ppc64le": // R2 already reserved. + // nothing to do + case "riscv64": // X3 (aka GP) and X4 (aka TP) already reserved. + // nothing to do + case "s390x": + s.allocatable &^= 1 << 11 // R11 + default: + s.f.fe.Fatalf(src.NoXPos, "arch %s not implemented", s.f.Config.arch) + } + } + + // Linear scan register allocation can be influenced by the order in which blocks appear. + // Decouple the register allocation order from the generated block order. + // This also creates an opportunity for experiments to find a better order. + s.visitOrder = layoutRegallocOrder(f) + + // Compute block order. This array allows us to distinguish forward edges + // from backward edges and compute how far they go. + s.blockOrder = make([]int32, f.NumBlocks()) + for i, b := range s.visitOrder { + s.blockOrder[b.ID] = int32(i) + } + + s.regs = make([]regState, s.numRegs) + nv := f.NumValues() + if cap(s.f.Cache.regallocValues) >= nv { + s.f.Cache.regallocValues = s.f.Cache.regallocValues[:nv] + } else { + s.f.Cache.regallocValues = make([]valState, nv) + } + s.values = s.f.Cache.regallocValues + s.orig = s.f.Cache.allocValueSlice(nv) + s.copies = make(map[*Value]bool) + for _, b := range s.visitOrder { + for _, v := range b.Values { + if v.needRegister() { + s.values[v.ID].needReg = true + s.values[v.ID].rematerializeable = v.rematerializeable() + s.orig[v.ID] = v + } + // Note: needReg is false for values returning Tuple types. + // Instead, we mark the corresponding Selects as needReg. + } + } + s.computeLive() + + s.endRegs = make([][]endReg, f.NumBlocks()) + s.startRegs = make([][]startReg, f.NumBlocks()) + s.spillLive = make([][]ID, f.NumBlocks()) + s.sdom = f.Sdom() + + // wasm: Mark instructions that can be optimized to have their values only on the WebAssembly stack. + if f.Config.ctxt.Arch.Arch == sys.ArchWasm { + canLiveOnStack := f.newSparseSet(f.NumValues()) + defer f.retSparseSet(canLiveOnStack) + for _, b := range f.Blocks { + // New block. Clear candidate set. + canLiveOnStack.clear() + for _, c := range b.ControlValues() { + if c.Uses == 1 && !opcodeTable[c.Op].generic { + canLiveOnStack.add(c.ID) + } + } + // Walking backwards. + for i := len(b.Values) - 1; i >= 0; i-- { + v := b.Values[i] + if canLiveOnStack.contains(v.ID) { + v.OnWasmStack = true + } else { + // Value can not live on stack. Values are not allowed to be reordered, so clear candidate set. + canLiveOnStack.clear() + } + for _, arg := range v.Args { + // Value can live on the stack if: + // - it is only used once + // - it is used in the same basic block + // - it is not a "mem" value + // - it is a WebAssembly op + if arg.Uses == 1 && arg.Block == v.Block && !arg.Type.IsMemory() && !opcodeTable[arg.Op].generic { + canLiveOnStack.add(arg.ID) + } + } + } + } + } + + // The clobberdeadreg experiment inserts code to clobber dead registers + // at call sites. + // Ignore huge functions to avoid doing too much work. + if base.Flag.ClobberDeadReg && len(s.f.Blocks) <= 10000 { + // TODO: honor GOCLOBBERDEADHASH, or maybe GOSSAHASH. + s.doClobber = true + } +} + +func (s *regAllocState) close() { + s.f.Cache.freeValueSlice(s.orig) +} + +// Adds a use record for id at distance dist from the start of the block. +// All calls to addUse must happen with nonincreasing dist. +func (s *regAllocState) addUse(id ID, dist int32, pos src.XPos) { + r := s.freeUseRecords + if r != nil { + s.freeUseRecords = r.next + } else { + r = &use{} + } + r.dist = dist + r.pos = pos + r.next = s.values[id].uses + s.values[id].uses = r + if r.next != nil && dist > r.next.dist { + s.f.Fatalf("uses added in wrong order") + } +} + +// advanceUses advances the uses of v's args from the state before v to the state after v. +// Any values which have no more uses are deallocated from registers. +func (s *regAllocState) advanceUses(v *Value) { + for _, a := range v.Args { + if !s.values[a.ID].needReg { + continue + } + ai := &s.values[a.ID] + r := ai.uses + ai.uses = r.next + if r.next == nil { + // Value is dead, free all registers that hold it. + s.freeRegs(ai.regs) + } + r.next = s.freeUseRecords + s.freeUseRecords = r + } +} + +// liveAfterCurrentInstruction reports whether v is live after +// the current instruction is completed. v must be used by the +// current instruction. +func (s *regAllocState) liveAfterCurrentInstruction(v *Value) bool { + u := s.values[v.ID].uses + if u == nil { + panic(fmt.Errorf("u is nil, v = %s, s.values[v.ID] = %v", v.LongString(), s.values[v.ID])) + } + d := u.dist + for u != nil && u.dist == d { + u = u.next + } + return u != nil && u.dist > d +} + +// Sets the state of the registers to that encoded in regs. +func (s *regAllocState) setState(regs []endReg) { + s.freeRegs(s.used) + for _, x := range regs { + s.assignReg(x.r, x.v, x.c) + } +} + +// compatRegs returns the set of registers which can store a type t. +func (s *regAllocState) compatRegs(t *types.Type) regMask { + var m regMask + if t.IsTuple() || t.IsFlags() { + return 0 + } + if t.IsFloat() || t == types.TypeInt128 { + if t.Kind() == types.TFLOAT32 && s.f.Config.fp32RegMask != 0 { + m = s.f.Config.fp32RegMask + } else if t.Kind() == types.TFLOAT64 && s.f.Config.fp64RegMask != 0 { + m = s.f.Config.fp64RegMask + } else { + m = s.f.Config.fpRegMask + } + } else { + m = s.f.Config.gpRegMask + } + return m & s.allocatable +} + +// regspec returns the regInfo for operation op. +func (s *regAllocState) regspec(v *Value) regInfo { + op := v.Op + if op == OpConvert { + // OpConvert is a generic op, so it doesn't have a + // register set in the static table. It can use any + // allocatable integer register. + m := s.allocatable & s.f.Config.gpRegMask + return regInfo{inputs: []inputInfo{{regs: m}}, outputs: []outputInfo{{regs: m}}} + } + if op == OpArgIntReg { + reg := v.Block.Func.Config.intParamRegs[v.AuxInt8()] + return regInfo{outputs: []outputInfo{{regs: 1 << uint(reg)}}} + } + if op == OpArgFloatReg { + reg := v.Block.Func.Config.floatParamRegs[v.AuxInt8()] + return regInfo{outputs: []outputInfo{{regs: 1 << uint(reg)}}} + } + if op.IsCall() { + if ac, ok := v.Aux.(*AuxCall); ok && ac.reg != nil { + return *ac.Reg(&opcodeTable[op].reg, s.f.Config) + } + } + if op == OpMakeResult && s.f.OwnAux.reg != nil { + return *s.f.OwnAux.ResultReg(s.f.Config) + } + return opcodeTable[op].reg +} + +func (s *regAllocState) isGReg(r register) bool { + return s.f.Config.hasGReg && s.GReg == r +} + +// Dummy value used to represent the value being held in a temporary register. +var tmpVal Value + +func (s *regAllocState) regalloc(f *Func) { + regValLiveSet := f.newSparseSet(f.NumValues()) // set of values that may be live in register + defer f.retSparseSet(regValLiveSet) + var oldSched []*Value + var phis []*Value + var phiRegs []register + var args []*Value + + // Data structure used for computing desired registers. + var desired desiredState + + // Desired registers for inputs & outputs for each instruction in the block. + type dentry struct { + out [4]register // desired output registers + in [3][4]register // desired input registers (for inputs 0,1, and 2) + } + var dinfo []dentry + + if f.Entry != f.Blocks[0] { + f.Fatalf("entry block must be first") + } + + for _, b := range s.visitOrder { + if s.f.pass.debug > regDebug { + fmt.Printf("Begin processing block %v\n", b) + } + s.curBlock = b + s.startRegsMask = 0 + s.usedSinceBlockStart = 0 + + // Initialize regValLiveSet and uses fields for this block. + // Walk backwards through the block doing liveness analysis. + regValLiveSet.clear() + for _, e := range s.live[b.ID] { + s.addUse(e.ID, int32(len(b.Values))+e.dist, e.pos) // pseudo-uses from beyond end of block + regValLiveSet.add(e.ID) + } + for _, v := range b.ControlValues() { + if s.values[v.ID].needReg { + s.addUse(v.ID, int32(len(b.Values)), b.Pos) // pseudo-use by control values + regValLiveSet.add(v.ID) + } + } + for i := len(b.Values) - 1; i >= 0; i-- { + v := b.Values[i] + regValLiveSet.remove(v.ID) + if v.Op == OpPhi { + // Remove v from the live set, but don't add + // any inputs. This is the state the len(b.Preds)>1 + // case below desires; it wants to process phis specially. + continue + } + if opcodeTable[v.Op].call { + // Function call clobbers all the registers but SP and SB. + regValLiveSet.clear() + if s.sp != 0 && s.values[s.sp].uses != nil { + regValLiveSet.add(s.sp) + } + if s.sb != 0 && s.values[s.sb].uses != nil { + regValLiveSet.add(s.sb) + } + } + for _, a := range v.Args { + if !s.values[a.ID].needReg { + continue + } + s.addUse(a.ID, int32(i), v.Pos) + regValLiveSet.add(a.ID) + } + } + if s.f.pass.debug > regDebug { + fmt.Printf("use distances for %s\n", b) + for i := range s.values { + vi := &s.values[i] + u := vi.uses + if u == nil { + continue + } + fmt.Printf(" v%d:", i) + for u != nil { + fmt.Printf(" %d", u.dist) + u = u.next + } + fmt.Println() + } + } + + // Make a copy of the block schedule so we can generate a new one in place. + // We make a separate copy for phis and regular values. + nphi := 0 + for _, v := range b.Values { + if v.Op != OpPhi { + break + } + nphi++ + } + phis = append(phis[:0], b.Values[:nphi]...) + oldSched = append(oldSched[:0], b.Values[nphi:]...) + b.Values = b.Values[:0] + + // Initialize start state of block. + if b == f.Entry { + // Regalloc state is empty to start. + if nphi > 0 { + f.Fatalf("phis in entry block") + } + } else if len(b.Preds) == 1 { + // Start regalloc state with the end state of the previous block. + s.setState(s.endRegs[b.Preds[0].b.ID]) + if nphi > 0 { + f.Fatalf("phis in single-predecessor block") + } + // Drop any values which are no longer live. + // This may happen because at the end of p, a value may be + // live but only used by some other successor of p. + for r := register(0); r < s.numRegs; r++ { + v := s.regs[r].v + if v != nil && !regValLiveSet.contains(v.ID) { + s.freeReg(r) + } + } + } else { + // This is the complicated case. We have more than one predecessor, + // which means we may have Phi ops. + + // Start with the final register state of the predecessor with least spill values. + // This is based on the following points: + // 1, The less spill value indicates that the register pressure of this path is smaller, + // so the values of this block are more likely to be allocated to registers. + // 2, Avoid the predecessor that contains the function call, because the predecessor that + // contains the function call usually generates a lot of spills and lose the previous + // allocation state. + // TODO: Improve this part. At least the size of endRegs of the predecessor also has + // an impact on the code size and compiler speed. But it is not easy to find a simple + // and efficient method that combines multiple factors. + idx := -1 + for i, p := range b.Preds { + // If the predecessor has not been visited yet, skip it because its end state + // (redRegs and spillLive) has not been computed yet. + pb := p.b + if s.blockOrder[pb.ID] >= s.blockOrder[b.ID] { + continue + } + if idx == -1 { + idx = i + continue + } + pSel := b.Preds[idx].b + if len(s.spillLive[pb.ID]) < len(s.spillLive[pSel.ID]) { + idx = i + } else if len(s.spillLive[pb.ID]) == len(s.spillLive[pSel.ID]) { + // Use a bit of likely information. After critical pass, pb and pSel must + // be plain blocks, so check edge pb->pb.Preds instead of edge pb->b. + // TODO: improve the prediction of the likely predecessor. The following + // method is only suitable for the simplest cases. For complex cases, + // the prediction may be inaccurate, but this does not affect the + // correctness of the program. + // According to the layout algorithm, the predecessor with the + // smaller blockOrder is the true branch, and the test results show + // that it is better to choose the predecessor with a smaller + // blockOrder than no choice. + if pb.likelyBranch() && !pSel.likelyBranch() || s.blockOrder[pb.ID] < s.blockOrder[pSel.ID] { + idx = i + } + } + } + if idx < 0 { + f.Fatalf("bad visitOrder, no predecessor of %s has been visited before it", b) + } + p := b.Preds[idx].b + s.setState(s.endRegs[p.ID]) + + if s.f.pass.debug > regDebug { + fmt.Printf("starting merge block %s with end state of %s:\n", b, p) + for _, x := range s.endRegs[p.ID] { + fmt.Printf(" %s: orig:%s cache:%s\n", &s.registers[x.r], x.v, x.c) + } + } + + // Decide on registers for phi ops. Use the registers determined + // by the primary predecessor if we can. + // TODO: pick best of (already processed) predecessors? + // Majority vote? Deepest nesting level? + phiRegs = phiRegs[:0] + var phiUsed regMask + + for _, v := range phis { + if !s.values[v.ID].needReg { + phiRegs = append(phiRegs, noRegister) + continue + } + a := v.Args[idx] + // Some instructions target not-allocatable registers. + // They're not suitable for further (phi-function) allocation. + m := s.values[a.ID].regs &^ phiUsed & s.allocatable + if m != 0 { + r := pickReg(m) + phiUsed |= regMask(1) << r + phiRegs = append(phiRegs, r) + } else { + phiRegs = append(phiRegs, noRegister) + } + } + + // Second pass - deallocate all in-register phi inputs. + for i, v := range phis { + if !s.values[v.ID].needReg { + continue + } + a := v.Args[idx] + r := phiRegs[i] + if r == noRegister { + continue + } + if regValLiveSet.contains(a.ID) { + // Input value is still live (it is used by something other than Phi). + // Try to move it around before kicking out, if there is a free register. + // We generate a Copy in the predecessor block and record it. It will be + // deleted later if never used. + // + // Pick a free register. At this point some registers used in the predecessor + // block may have been deallocated. Those are the ones used for Phis. Exclude + // them (and they are not going to be helpful anyway). + m := s.compatRegs(a.Type) &^ s.used &^ phiUsed + if m != 0 && !s.values[a.ID].rematerializeable && countRegs(s.values[a.ID].regs) == 1 { + r2 := pickReg(m) + c := p.NewValue1(a.Pos, OpCopy, a.Type, s.regs[r].c) + s.copies[c] = false + if s.f.pass.debug > regDebug { + fmt.Printf("copy %s to %s : %s\n", a, c, &s.registers[r2]) + } + s.setOrig(c, a) + s.assignReg(r2, a, c) + s.endRegs[p.ID] = append(s.endRegs[p.ID], endReg{r2, a, c}) + } + } + s.freeReg(r) + } + + // Copy phi ops into new schedule. + b.Values = append(b.Values, phis...) + + // Third pass - pick registers for phis whose input + // was not in a register in the primary predecessor. + for i, v := range phis { + if !s.values[v.ID].needReg { + continue + } + if phiRegs[i] != noRegister { + continue + } + m := s.compatRegs(v.Type) &^ phiUsed &^ s.used + // If one of the other inputs of v is in a register, and the register is available, + // select this register, which can save some unnecessary copies. + for i, pe := range b.Preds { + if i == idx { + continue + } + ri := noRegister + for _, er := range s.endRegs[pe.b.ID] { + if er.v == s.orig[v.Args[i].ID] { + ri = er.r + break + } + } + if ri != noRegister && m>>ri&1 != 0 { + m = regMask(1) << ri + break + } + } + if m != 0 { + r := pickReg(m) + phiRegs[i] = r + phiUsed |= regMask(1) << r + } + } + + // Set registers for phis. Add phi spill code. + for i, v := range phis { + if !s.values[v.ID].needReg { + continue + } + r := phiRegs[i] + if r == noRegister { + // stack-based phi + // Spills will be inserted in all the predecessors below. + s.values[v.ID].spill = v // v starts life spilled + continue + } + // register-based phi + s.assignReg(r, v, v) + } + + // Deallocate any values which are no longer live. Phis are excluded. + for r := register(0); r < s.numRegs; r++ { + if phiUsed>>r&1 != 0 { + continue + } + v := s.regs[r].v + if v != nil && !regValLiveSet.contains(v.ID) { + s.freeReg(r) + } + } + + // Save the starting state for use by merge edges. + // We append to a stack allocated variable that we'll + // later copy into s.startRegs in one fell swoop, to save + // on allocations. + regList := make([]startReg, 0, 32) + for r := register(0); r < s.numRegs; r++ { + v := s.regs[r].v + if v == nil { + continue + } + if phiUsed>>r&1 != 0 { + // Skip registers that phis used, we'll handle those + // specially during merge edge processing. + continue + } + regList = append(regList, startReg{r, v, s.regs[r].c, s.values[v.ID].uses.pos}) + s.startRegsMask |= regMask(1) << r + } + s.startRegs[b.ID] = make([]startReg, len(regList)) + copy(s.startRegs[b.ID], regList) + + if s.f.pass.debug > regDebug { + fmt.Printf("after phis\n") + for _, x := range s.startRegs[b.ID] { + fmt.Printf(" %s: v%d\n", &s.registers[x.r], x.v.ID) + } + } + } + + // Allocate space to record the desired registers for each value. + if l := len(oldSched); cap(dinfo) < l { + dinfo = make([]dentry, l) + } else { + dinfo = dinfo[:l] + for i := range dinfo { + dinfo[i] = dentry{} + } + } + + // Load static desired register info at the end of the block. + desired.copy(&s.desired[b.ID]) + + // Check actual assigned registers at the start of the next block(s). + // Dynamically assigned registers will trump the static + // desired registers computed during liveness analysis. + // Note that we do this phase after startRegs is set above, so that + // we get the right behavior for a block which branches to itself. + for _, e := range b.Succs { + succ := e.b + // TODO: prioritize likely successor? + for _, x := range s.startRegs[succ.ID] { + desired.add(x.v.ID, x.r) + } + // Process phi ops in succ. + pidx := e.i + for _, v := range succ.Values { + if v.Op != OpPhi { + break + } + if !s.values[v.ID].needReg { + continue + } + rp, ok := s.f.getHome(v.ID).(*Register) + if !ok { + // If v is not assigned a register, pick a register assigned to one of v's inputs. + // Hopefully v will get assigned that register later. + // If the inputs have allocated register information, add it to desired, + // which may reduce spill or copy operations when the register is available. + for _, a := range v.Args { + rp, ok = s.f.getHome(a.ID).(*Register) + if ok { + break + } + } + if !ok { + continue + } + } + desired.add(v.Args[pidx].ID, register(rp.num)) + } + } + // Walk values backwards computing desired register info. + // See computeLive for more comments. + for i := len(oldSched) - 1; i >= 0; i-- { + v := oldSched[i] + prefs := desired.remove(v.ID) + regspec := s.regspec(v) + desired.clobber(regspec.clobbers) + for _, j := range regspec.inputs { + if countRegs(j.regs) != 1 { + continue + } + desired.clobber(j.regs) + desired.add(v.Args[j.idx].ID, pickReg(j.regs)) + } + if opcodeTable[v.Op].resultInArg0 || v.Op == OpAMD64ADDQconst || v.Op == OpAMD64ADDLconst || v.Op == OpSelect0 { + if opcodeTable[v.Op].commutative { + desired.addList(v.Args[1].ID, prefs) + } + desired.addList(v.Args[0].ID, prefs) + } + // Save desired registers for this value. + dinfo[i].out = prefs + for j, a := range v.Args { + if j >= len(dinfo[i].in) { + break + } + dinfo[i].in[j] = desired.get(a.ID) + } + } + + // Process all the non-phi values. + for idx, v := range oldSched { + tmpReg := noRegister + if s.f.pass.debug > regDebug { + fmt.Printf(" processing %s\n", v.LongString()) + } + regspec := s.regspec(v) + if v.Op == OpPhi { + f.Fatalf("phi %s not at start of block", v) + } + if v.Op == OpSP { + s.assignReg(s.SPReg, v, v) + b.Values = append(b.Values, v) + s.advanceUses(v) + s.sp = v.ID + continue + } + if v.Op == OpSB { + s.assignReg(s.SBReg, v, v) + b.Values = append(b.Values, v) + s.advanceUses(v) + s.sb = v.ID + continue + } + if v.Op == OpSelect0 || v.Op == OpSelect1 || v.Op == OpSelectN { + if s.values[v.ID].needReg { + if v.Op == OpSelectN { + s.assignReg(register(s.f.getHome(v.Args[0].ID).(LocResults)[int(v.AuxInt)].(*Register).num), v, v) + } else { + var i = 0 + if v.Op == OpSelect1 { + i = 1 + } + s.assignReg(register(s.f.getHome(v.Args[0].ID).(LocPair)[i].(*Register).num), v, v) + } + } + b.Values = append(b.Values, v) + s.advanceUses(v) + continue + } + if v.Op == OpGetG && s.f.Config.hasGReg { + // use hardware g register + if s.regs[s.GReg].v != nil { + s.freeReg(s.GReg) // kick out the old value + } + s.assignReg(s.GReg, v, v) + b.Values = append(b.Values, v) + s.advanceUses(v) + continue + } + if v.Op == OpArg { + // Args are "pre-spilled" values. We don't allocate + // any register here. We just set up the spill pointer to + // point at itself and any later user will restore it to use it. + s.values[v.ID].spill = v + b.Values = append(b.Values, v) + s.advanceUses(v) + continue + } + if v.Op == OpKeepAlive { + // Make sure the argument to v is still live here. + s.advanceUses(v) + a := v.Args[0] + vi := &s.values[a.ID] + if vi.regs == 0 && !vi.rematerializeable { + // Use the spill location. + // This forces later liveness analysis to make the + // value live at this point. + v.SetArg(0, s.makeSpill(a, b)) + } else if _, ok := a.Aux.(*ir.Name); ok && vi.rematerializeable { + // Rematerializeable value with a gc.Node. This is the address of + // a stack object (e.g. an LEAQ). Keep the object live. + // Change it to VarLive, which is what plive expects for locals. + v.Op = OpVarLive + v.SetArgs1(v.Args[1]) + v.Aux = a.Aux + } else { + // In-register and rematerializeable values are already live. + // These are typically rematerializeable constants like nil, + // or values of a variable that were modified since the last call. + v.Op = OpCopy + v.SetArgs1(v.Args[1]) + } + b.Values = append(b.Values, v) + continue + } + if len(regspec.inputs) == 0 && len(regspec.outputs) == 0 { + // No register allocation required (or none specified yet) + if s.doClobber && v.Op.IsCall() { + s.clobberRegs(regspec.clobbers) + } + s.freeRegs(regspec.clobbers) + b.Values = append(b.Values, v) + s.advanceUses(v) + continue + } + + if s.values[v.ID].rematerializeable { + // Value is rematerializeable, don't issue it here. + // It will get issued just before each use (see + // allocValueToReg). + for _, a := range v.Args { + a.Uses-- + } + s.advanceUses(v) + continue + } + + if s.f.pass.debug > regDebug { + fmt.Printf("value %s\n", v.LongString()) + fmt.Printf(" out:") + for _, r := range dinfo[idx].out { + if r != noRegister { + fmt.Printf(" %s", &s.registers[r]) + } + } + fmt.Println() + for i := 0; i < len(v.Args) && i < 3; i++ { + fmt.Printf(" in%d:", i) + for _, r := range dinfo[idx].in[i] { + if r != noRegister { + fmt.Printf(" %s", &s.registers[r]) + } + } + fmt.Println() + } + } + + // Move arguments to registers. + // First, if an arg must be in a specific register and it is already + // in place, keep it. + args = append(args[:0], make([]*Value, len(v.Args))...) + for i, a := range v.Args { + if !s.values[a.ID].needReg { + args[i] = a + } + } + for _, i := range regspec.inputs { + mask := i.regs + if countRegs(mask) == 1 && mask&s.values[v.Args[i.idx].ID].regs != 0 { + args[i.idx] = s.allocValToReg(v.Args[i.idx], mask, true, v.Pos) + } + } + // Then, if an arg must be in a specific register and that + // register is free, allocate that one. Otherwise when processing + // another input we may kick a value into the free register, which + // then will be kicked out again. + // This is a common case for passing-in-register arguments for + // function calls. + for { + freed := false + for _, i := range regspec.inputs { + if args[i.idx] != nil { + continue // already allocated + } + mask := i.regs + if countRegs(mask) == 1 && mask&^s.used != 0 { + args[i.idx] = s.allocValToReg(v.Args[i.idx], mask, true, v.Pos) + // If the input is in other registers that will be clobbered by v, + // or the input is dead, free the registers. This may make room + // for other inputs. + oldregs := s.values[v.Args[i.idx].ID].regs + if oldregs&^regspec.clobbers == 0 || !s.liveAfterCurrentInstruction(v.Args[i.idx]) { + s.freeRegs(oldregs &^ mask &^ s.nospill) + freed = true + } + } + } + if !freed { + break + } + } + // Last, allocate remaining ones, in an ordering defined + // by the register specification (most constrained first). + for _, i := range regspec.inputs { + if args[i.idx] != nil { + continue // already allocated + } + mask := i.regs + if mask&s.values[v.Args[i.idx].ID].regs == 0 { + // Need a new register for the input. + mask &= s.allocatable + mask &^= s.nospill + // Used desired register if available. + if i.idx < 3 { + for _, r := range dinfo[idx].in[i.idx] { + if r != noRegister && (mask&^s.used)>>r&1 != 0 { + // Desired register is allowed and unused. + mask = regMask(1) << r + break + } + } + } + // Avoid registers we're saving for other values. + if mask&^desired.avoid != 0 { + mask &^= desired.avoid + } + } + args[i.idx] = s.allocValToReg(v.Args[i.idx], mask, true, v.Pos) + } + + // If the output clobbers the input register, make sure we have + // at least two copies of the input register so we don't + // have to reload the value from the spill location. + if opcodeTable[v.Op].resultInArg0 { + var m regMask + if !s.liveAfterCurrentInstruction(v.Args[0]) { + // arg0 is dead. We can clobber its register. + goto ok + } + if opcodeTable[v.Op].commutative && !s.liveAfterCurrentInstruction(v.Args[1]) { + args[0], args[1] = args[1], args[0] + goto ok + } + if s.values[v.Args[0].ID].rematerializeable { + // We can rematerialize the input, don't worry about clobbering it. + goto ok + } + if opcodeTable[v.Op].commutative && s.values[v.Args[1].ID].rematerializeable { + args[0], args[1] = args[1], args[0] + goto ok + } + if countRegs(s.values[v.Args[0].ID].regs) >= 2 { + // we have at least 2 copies of arg0. We can afford to clobber one. + goto ok + } + if opcodeTable[v.Op].commutative && countRegs(s.values[v.Args[1].ID].regs) >= 2 { + args[0], args[1] = args[1], args[0] + goto ok + } + + // We can't overwrite arg0 (or arg1, if commutative). So we + // need to make a copy of an input so we have a register we can modify. + + // Possible new registers to copy into. + m = s.compatRegs(v.Args[0].Type) &^ s.used + if m == 0 { + // No free registers. In this case we'll just clobber + // an input and future uses of that input must use a restore. + // TODO(khr): We should really do this like allocReg does it, + // spilling the value with the most distant next use. + goto ok + } + + // Try to move an input to the desired output, if allowed. + for _, r := range dinfo[idx].out { + if r != noRegister && (m®spec.outputs[0].regs)>>r&1 != 0 { + m = regMask(1) << r + args[0] = s.allocValToReg(v.Args[0], m, true, v.Pos) + // Note: we update args[0] so the instruction will + // use the register copy we just made. + goto ok + } + } + // Try to copy input to its desired location & use its old + // location as the result register. + for _, r := range dinfo[idx].in[0] { + if r != noRegister && m>>r&1 != 0 { + m = regMask(1) << r + c := s.allocValToReg(v.Args[0], m, true, v.Pos) + s.copies[c] = false + // Note: no update to args[0] so the instruction will + // use the original copy. + goto ok + } + } + if opcodeTable[v.Op].commutative { + for _, r := range dinfo[idx].in[1] { + if r != noRegister && m>>r&1 != 0 { + m = regMask(1) << r + c := s.allocValToReg(v.Args[1], m, true, v.Pos) + s.copies[c] = false + args[0], args[1] = args[1], args[0] + goto ok + } + } + } + + // Avoid future fixed uses if we can. + if m&^desired.avoid != 0 { + m &^= desired.avoid + } + // Save input 0 to a new register so we can clobber it. + c := s.allocValToReg(v.Args[0], m, true, v.Pos) + s.copies[c] = false + + // Normally we use the register of the old copy of input 0 as the target. + // However, if input 0 is already in its desired register then we use + // the register of the new copy instead. + if regspec.outputs[0].regs>>s.f.getHome(c.ID).(*Register).num&1 != 0 { + if rp, ok := s.f.getHome(args[0].ID).(*Register); ok { + r := register(rp.num) + for _, r2 := range dinfo[idx].in[0] { + if r == r2 { + args[0] = c + break + } + } + } + } + } + + ok: + // Pick a temporary register if needed. + // It should be distinct from all the input registers, so we + // allocate it after all the input registers, but before + // the input registers are freed via advanceUses below. + // (Not all instructions need that distinct part, but it is conservative.) + if opcodeTable[v.Op].needIntTemp { + m := s.allocatable & s.f.Config.gpRegMask + if m&^desired.avoid&^s.nospill != 0 { + m &^= desired.avoid + } + tmpReg = s.allocReg(m, &tmpVal) + s.nospill |= regMask(1) << tmpReg + } + + // Now that all args are in regs, we're ready to issue the value itself. + // Before we pick a register for the output value, allow input registers + // to be deallocated. We do this here so that the output can use the + // same register as a dying input. + if !opcodeTable[v.Op].resultNotInArgs { + s.tmpused = s.nospill + s.nospill = 0 + s.advanceUses(v) // frees any registers holding args that are no longer live + } + + // Dump any registers which will be clobbered + if s.doClobber && v.Op.IsCall() { + // clobber registers that are marked as clobber in regmask, but + // don't clobber inputs. + s.clobberRegs(regspec.clobbers &^ s.tmpused &^ s.nospill) + } + s.freeRegs(regspec.clobbers) + s.tmpused |= regspec.clobbers + + // Pick registers for outputs. + { + outRegs := noRegisters // TODO if this is costly, hoist and clear incrementally below. + maxOutIdx := -1 + var used regMask + if tmpReg != noRegister { + // Ensure output registers are distinct from the temporary register. + // (Not all instructions need that distinct part, but it is conservative.) + used |= regMask(1) << tmpReg + } + for _, out := range regspec.outputs { + mask := out.regs & s.allocatable &^ used + if mask == 0 { + continue + } + if opcodeTable[v.Op].resultInArg0 && out.idx == 0 { + if !opcodeTable[v.Op].commutative { + // Output must use the same register as input 0. + r := register(s.f.getHome(args[0].ID).(*Register).num) + if mask>>r&1 == 0 { + s.f.Fatalf("resultInArg0 value's input %v cannot be an output of %s", s.f.getHome(args[0].ID).(*Register), v.LongString()) + } + mask = regMask(1) << r + } else { + // Output must use the same register as input 0 or 1. + r0 := register(s.f.getHome(args[0].ID).(*Register).num) + r1 := register(s.f.getHome(args[1].ID).(*Register).num) + // Check r0 and r1 for desired output register. + found := false + for _, r := range dinfo[idx].out { + if (r == r0 || r == r1) && (mask&^s.used)>>r&1 != 0 { + mask = regMask(1) << r + found = true + if r == r1 { + args[0], args[1] = args[1], args[0] + } + break + } + } + if !found { + // Neither are desired, pick r0. + mask = regMask(1) << r0 + } + } + } + if out.idx == 0 { // desired registers only apply to the first element of a tuple result + for _, r := range dinfo[idx].out { + if r != noRegister && (mask&^s.used)>>r&1 != 0 { + // Desired register is allowed and unused. + mask = regMask(1) << r + break + } + } + } + // Avoid registers we're saving for other values. + if mask&^desired.avoid&^s.nospill&^s.used != 0 { + mask &^= desired.avoid + } + r := s.allocReg(mask, v) + if out.idx > maxOutIdx { + maxOutIdx = out.idx + } + outRegs[out.idx] = r + used |= regMask(1) << r + s.tmpused |= regMask(1) << r + } + // Record register choices + if v.Type.IsTuple() { + var outLocs LocPair + if r := outRegs[0]; r != noRegister { + outLocs[0] = &s.registers[r] + } + if r := outRegs[1]; r != noRegister { + outLocs[1] = &s.registers[r] + } + s.f.setHome(v, outLocs) + // Note that subsequent SelectX instructions will do the assignReg calls. + } else if v.Type.IsResults() { + // preallocate outLocs to the right size, which is maxOutIdx+1 + outLocs := make(LocResults, maxOutIdx+1, maxOutIdx+1) + for i := 0; i <= maxOutIdx; i++ { + if r := outRegs[i]; r != noRegister { + outLocs[i] = &s.registers[r] + } + } + s.f.setHome(v, outLocs) + } else { + if r := outRegs[0]; r != noRegister { + s.assignReg(r, v, v) + } + } + if tmpReg != noRegister { + // Remember the temp register allocation, if any. + if s.f.tempRegs == nil { + s.f.tempRegs = map[ID]*Register{} + } + s.f.tempRegs[v.ID] = &s.registers[tmpReg] + } + } + + // deallocate dead args, if we have not done so + if opcodeTable[v.Op].resultNotInArgs { + s.nospill = 0 + s.advanceUses(v) // frees any registers holding args that are no longer live + } + s.tmpused = 0 + + // Issue the Value itself. + for i, a := range args { + v.SetArg(i, a) // use register version of arguments + } + b.Values = append(b.Values, v) + } + + // Copy the control values - we need this so we can reduce the + // uses property of these values later. + controls := append(make([]*Value, 0, 2), b.ControlValues()...) + + // Load control values into registers. + for i, v := range b.ControlValues() { + if !s.values[v.ID].needReg { + continue + } + if s.f.pass.debug > regDebug { + fmt.Printf(" processing control %s\n", v.LongString()) + } + // We assume that a control input can be passed in any + // type-compatible register. If this turns out not to be true, + // we'll need to introduce a regspec for a block's control value. + b.ReplaceControl(i, s.allocValToReg(v, s.compatRegs(v.Type), false, b.Pos)) + } + + // Reduce the uses of the control values once registers have been loaded. + // This loop is equivalent to the advanceUses method. + for _, v := range controls { + vi := &s.values[v.ID] + if !vi.needReg { + continue + } + // Remove this use from the uses list. + u := vi.uses + vi.uses = u.next + if u.next == nil { + s.freeRegs(vi.regs) // value is dead + } + u.next = s.freeUseRecords + s.freeUseRecords = u + } + + // If we are approaching a merge point and we are the primary + // predecessor of it, find live values that we use soon after + // the merge point and promote them to registers now. + if len(b.Succs) == 1 { + if s.f.Config.hasGReg && s.regs[s.GReg].v != nil { + s.freeReg(s.GReg) // Spill value in G register before any merge. + } + // For this to be worthwhile, the loop must have no calls in it. + top := b.Succs[0].b + loop := s.loopnest.b2l[top.ID] + if loop == nil || loop.header != top || loop.containsUnavoidableCall { + goto badloop + } + + // TODO: sort by distance, pick the closest ones? + for _, live := range s.live[b.ID] { + if live.dist >= unlikelyDistance { + // Don't preload anything live after the loop. + continue + } + vid := live.ID + vi := &s.values[vid] + if vi.regs != 0 { + continue + } + if vi.rematerializeable { + continue + } + v := s.orig[vid] + m := s.compatRegs(v.Type) &^ s.used + // Used desired register if available. + outerloop: + for _, e := range desired.entries { + if e.ID != v.ID { + continue + } + for _, r := range e.regs { + if r != noRegister && m>>r&1 != 0 { + m = regMask(1) << r + break outerloop + } + } + } + if m&^desired.avoid != 0 { + m &^= desired.avoid + } + if m != 0 { + s.allocValToReg(v, m, false, b.Pos) + } + } + } + badloop: + ; + + // Save end-of-block register state. + // First count how many, this cuts allocations in half. + k := 0 + for r := register(0); r < s.numRegs; r++ { + v := s.regs[r].v + if v == nil { + continue + } + k++ + } + regList := make([]endReg, 0, k) + for r := register(0); r < s.numRegs; r++ { + v := s.regs[r].v + if v == nil { + continue + } + regList = append(regList, endReg{r, v, s.regs[r].c}) + } + s.endRegs[b.ID] = regList + + if checkEnabled { + regValLiveSet.clear() + for _, x := range s.live[b.ID] { + regValLiveSet.add(x.ID) + } + for r := register(0); r < s.numRegs; r++ { + v := s.regs[r].v + if v == nil { + continue + } + if !regValLiveSet.contains(v.ID) { + s.f.Fatalf("val %s is in reg but not live at end of %s", v, b) + } + } + } + + // If a value is live at the end of the block and + // isn't in a register, generate a use for the spill location. + // We need to remember this information so that + // the liveness analysis in stackalloc is correct. + for _, e := range s.live[b.ID] { + vi := &s.values[e.ID] + if vi.regs != 0 { + // in a register, we'll use that source for the merge. + continue + } + if vi.rematerializeable { + // we'll rematerialize during the merge. + continue + } + if s.f.pass.debug > regDebug { + fmt.Printf("live-at-end spill for %s at %s\n", s.orig[e.ID], b) + } + spill := s.makeSpill(s.orig[e.ID], b) + s.spillLive[b.ID] = append(s.spillLive[b.ID], spill.ID) + } + + // Clear any final uses. + // All that is left should be the pseudo-uses added for values which + // are live at the end of b. + for _, e := range s.live[b.ID] { + u := s.values[e.ID].uses + if u == nil { + f.Fatalf("live at end, no uses v%d", e.ID) + } + if u.next != nil { + f.Fatalf("live at end, too many uses v%d", e.ID) + } + s.values[e.ID].uses = nil + u.next = s.freeUseRecords + s.freeUseRecords = u + } + + // allocReg may have dropped registers from startRegsMask that + // aren't actually needed in startRegs. Synchronize back to + // startRegs. + // + // This must be done before placing spills, which will look at + // startRegs to decide if a block is a valid block for a spill. + if c := countRegs(s.startRegsMask); c != len(s.startRegs[b.ID]) { + regs := make([]startReg, 0, c) + for _, sr := range s.startRegs[b.ID] { + if s.startRegsMask&(regMask(1)< regDebug { + fmt.Printf("delete copied value %s\n", c.LongString()) + } + c.resetArgs() + f.freeValue(c) + delete(s.copies, c) + progress = true + } + } + if !progress { + break + } + } + + for _, b := range s.visitOrder { + i := 0 + for _, v := range b.Values { + if v.Op == OpInvalid { + continue + } + b.Values[i] = v + i++ + } + b.Values = b.Values[:i] + } +} + +func (s *regAllocState) placeSpills() { + mustBeFirst := func(op Op) bool { + return op.isLoweredGetClosurePtr() || op == OpPhi || op == OpArgIntReg || op == OpArgFloatReg + } + + // Start maps block IDs to the list of spills + // that go at the start of the block (but after any phis). + start := map[ID][]*Value{} + // After maps value IDs to the list of spills + // that go immediately after that value ID. + after := map[ID][]*Value{} + + for i := range s.values { + vi := s.values[i] + spill := vi.spill + if spill == nil { + continue + } + if spill.Block != nil { + // Some spills are already fully set up, + // like OpArgs and stack-based phis. + continue + } + v := s.orig[i] + + // Walk down the dominator tree looking for a good place to + // put the spill of v. At the start "best" is the best place + // we have found so far. + // TODO: find a way to make this O(1) without arbitrary cutoffs. + if v == nil { + panic(fmt.Errorf("nil v, s.orig[%d], vi = %v, spill = %s", i, vi, spill.LongString())) + } + best := v.Block + bestArg := v + var bestDepth int16 + if l := s.loopnest.b2l[best.ID]; l != nil { + bestDepth = l.depth + } + b := best + const maxSpillSearch = 100 + for i := 0; i < maxSpillSearch; i++ { + // Find the child of b in the dominator tree which + // dominates all restores. + p := b + b = nil + for c := s.sdom.Child(p); c != nil && i < maxSpillSearch; c, i = s.sdom.Sibling(c), i+1 { + if s.sdom[c.ID].entry <= vi.restoreMin && s.sdom[c.ID].exit >= vi.restoreMax { + // c also dominates all restores. Walk down into c. + b = c + break + } + } + if b == nil { + // Ran out of blocks which dominate all restores. + break + } + + var depth int16 + if l := s.loopnest.b2l[b.ID]; l != nil { + depth = l.depth + } + if depth > bestDepth { + // Don't push the spill into a deeper loop. + continue + } + + // If v is in a register at the start of b, we can + // place the spill here (after the phis). + if len(b.Preds) == 1 { + for _, e := range s.endRegs[b.Preds[0].b.ID] { + if e.v == v { + // Found a better spot for the spill. + best = b + bestArg = e.c + bestDepth = depth + break + } + } + } else { + for _, e := range s.startRegs[b.ID] { + if e.v == v { + // Found a better spot for the spill. + best = b + bestArg = e.c + bestDepth = depth + break + } + } + } + } + + // Put the spill in the best block we found. + spill.Block = best + spill.AddArg(bestArg) + if best == v.Block && !mustBeFirst(v.Op) { + // Place immediately after v. + after[v.ID] = append(after[v.ID], spill) + } else { + // Place at the start of best block. + start[best.ID] = append(start[best.ID], spill) + } + } + + // Insert spill instructions into the block schedules. + var oldSched []*Value + for _, b := range s.visitOrder { + nfirst := 0 + for _, v := range b.Values { + if !mustBeFirst(v.Op) { + break + } + nfirst++ + } + oldSched = append(oldSched[:0], b.Values[nfirst:]...) + b.Values = b.Values[:nfirst] + b.Values = append(b.Values, start[b.ID]...) + for _, v := range oldSched { + b.Values = append(b.Values, v) + b.Values = append(b.Values, after[v.ID]...) + } + } +} + +// shuffle fixes up all the merge edges (those going into blocks of indegree > 1). +func (s *regAllocState) shuffle(stacklive [][]ID) { + var e edgeState + e.s = s + e.cache = map[ID][]*Value{} + e.contents = map[Location]contentRecord{} + if s.f.pass.debug > regDebug { + fmt.Printf("shuffle %s\n", s.f.Name) + fmt.Println(s.f.String()) + } + + for _, b := range s.visitOrder { + if len(b.Preds) <= 1 { + continue + } + e.b = b + for i, edge := range b.Preds { + p := edge.b + e.p = p + e.setup(i, s.endRegs[p.ID], s.startRegs[b.ID], stacklive[p.ID]) + e.process() + } + } + + if s.f.pass.debug > regDebug { + fmt.Printf("post shuffle %s\n", s.f.Name) + fmt.Println(s.f.String()) + } +} + +type edgeState struct { + s *regAllocState + p, b *Block // edge goes from p->b. + + // for each pre-regalloc value, a list of equivalent cached values + cache map[ID][]*Value + cachedVals []ID // (superset of) keys of the above map, for deterministic iteration + + // map from location to the value it contains + contents map[Location]contentRecord + + // desired destination locations + destinations []dstRecord + extra []dstRecord + + usedRegs regMask // registers currently holding something + uniqueRegs regMask // registers holding the only copy of a value + finalRegs regMask // registers holding final target + rematerializeableRegs regMask // registers that hold rematerializeable values +} + +type contentRecord struct { + vid ID // pre-regalloc value + c *Value // cached value + final bool // this is a satisfied destination + pos src.XPos // source position of use of the value +} + +type dstRecord struct { + loc Location // register or stack slot + vid ID // pre-regalloc value it should contain + splice **Value // place to store reference to the generating instruction + pos src.XPos // source position of use of this location +} + +// setup initializes the edge state for shuffling. +func (e *edgeState) setup(idx int, srcReg []endReg, dstReg []startReg, stacklive []ID) { + if e.s.f.pass.debug > regDebug { + fmt.Printf("edge %s->%s\n", e.p, e.b) + } + + // Clear state. + for _, vid := range e.cachedVals { + delete(e.cache, vid) + } + e.cachedVals = e.cachedVals[:0] + for k := range e.contents { + delete(e.contents, k) + } + e.usedRegs = 0 + e.uniqueRegs = 0 + e.finalRegs = 0 + e.rematerializeableRegs = 0 + + // Live registers can be sources. + for _, x := range srcReg { + e.set(&e.s.registers[x.r], x.v.ID, x.c, false, src.NoXPos) // don't care the position of the source + } + // So can all of the spill locations. + for _, spillID := range stacklive { + v := e.s.orig[spillID] + spill := e.s.values[v.ID].spill + if !e.s.sdom.IsAncestorEq(spill.Block, e.p) { + // Spills were placed that only dominate the uses found + // during the first regalloc pass. The edge fixup code + // can't use a spill location if the spill doesn't dominate + // the edge. + // We are guaranteed that if the spill doesn't dominate this edge, + // then the value is available in a register (because we called + // makeSpill for every value not in a register at the start + // of an edge). + continue + } + e.set(e.s.f.getHome(spillID), v.ID, spill, false, src.NoXPos) // don't care the position of the source + } + + // Figure out all the destinations we need. + dsts := e.destinations[:0] + for _, x := range dstReg { + dsts = append(dsts, dstRecord{&e.s.registers[x.r], x.v.ID, nil, x.pos}) + } + // Phis need their args to end up in a specific location. + for _, v := range e.b.Values { + if v.Op != OpPhi { + break + } + loc := e.s.f.getHome(v.ID) + if loc == nil { + continue + } + dsts = append(dsts, dstRecord{loc, v.Args[idx].ID, &v.Args[idx], v.Pos}) + } + e.destinations = dsts + + if e.s.f.pass.debug > regDebug { + for _, vid := range e.cachedVals { + a := e.cache[vid] + for _, c := range a { + fmt.Printf("src %s: v%d cache=%s\n", e.s.f.getHome(c.ID), vid, c) + } + } + for _, d := range e.destinations { + fmt.Printf("dst %s: v%d\n", d.loc, d.vid) + } + } +} + +// process generates code to move all the values to the right destination locations. +func (e *edgeState) process() { + dsts := e.destinations + + // Process the destinations until they are all satisfied. + for len(dsts) > 0 { + i := 0 + for _, d := range dsts { + if !e.processDest(d.loc, d.vid, d.splice, d.pos) { + // Failed - save for next iteration. + dsts[i] = d + i++ + } + } + if i < len(dsts) { + // Made some progress. Go around again. + dsts = dsts[:i] + + // Append any extras destinations we generated. + dsts = append(dsts, e.extra...) + e.extra = e.extra[:0] + continue + } + + // We made no progress. That means that any + // remaining unsatisfied moves are in simple cycles. + // For example, A -> B -> C -> D -> A. + // A ----> B + // ^ | + // | | + // | v + // D <---- C + + // To break the cycle, we pick an unused register, say R, + // and put a copy of B there. + // A ----> B + // ^ | + // | | + // | v + // D <---- C <---- R=copyofB + // When we resume the outer loop, the A->B move can now proceed, + // and eventually the whole cycle completes. + + // Copy any cycle location to a temp register. This duplicates + // one of the cycle entries, allowing the just duplicated value + // to be overwritten and the cycle to proceed. + d := dsts[0] + loc := d.loc + vid := e.contents[loc].vid + c := e.contents[loc].c + r := e.findRegFor(c.Type) + if e.s.f.pass.debug > regDebug { + fmt.Printf("breaking cycle with v%d in %s:%s\n", vid, loc, c) + } + e.erase(r) + pos := d.pos.WithNotStmt() + if _, isReg := loc.(*Register); isReg { + c = e.p.NewValue1(pos, OpCopy, c.Type, c) + } else { + c = e.p.NewValue1(pos, OpLoadReg, c.Type, c) + } + e.set(r, vid, c, false, pos) + if c.Op == OpLoadReg && e.s.isGReg(register(r.(*Register).num)) { + e.s.f.Fatalf("process.OpLoadReg targeting g: " + c.LongString()) + } + } +} + +// processDest generates code to put value vid into location loc. Returns true +// if progress was made. +func (e *edgeState) processDest(loc Location, vid ID, splice **Value, pos src.XPos) bool { + pos = pos.WithNotStmt() + occupant := e.contents[loc] + if occupant.vid == vid { + // Value is already in the correct place. + e.contents[loc] = contentRecord{vid, occupant.c, true, pos} + if splice != nil { + (*splice).Uses-- + *splice = occupant.c + occupant.c.Uses++ + } + // Note: if splice==nil then c will appear dead. This is + // non-SSA formed code, so be careful after this pass not to run + // deadcode elimination. + if _, ok := e.s.copies[occupant.c]; ok { + // The copy at occupant.c was used to avoid spill. + e.s.copies[occupant.c] = true + } + return true + } + + // Check if we're allowed to clobber the destination location. + if len(e.cache[occupant.vid]) == 1 && !e.s.values[occupant.vid].rematerializeable { + // We can't overwrite the last copy + // of a value that needs to survive. + return false + } + + // Copy from a source of v, register preferred. + v := e.s.orig[vid] + var c *Value + var src Location + if e.s.f.pass.debug > regDebug { + fmt.Printf("moving v%d to %s\n", vid, loc) + fmt.Printf("sources of v%d:", vid) + } + for _, w := range e.cache[vid] { + h := e.s.f.getHome(w.ID) + if e.s.f.pass.debug > regDebug { + fmt.Printf(" %s:%s", h, w) + } + _, isreg := h.(*Register) + if src == nil || isreg { + c = w + src = h + } + } + if e.s.f.pass.debug > regDebug { + if src != nil { + fmt.Printf(" [use %s]\n", src) + } else { + fmt.Printf(" [no source]\n") + } + } + _, dstReg := loc.(*Register) + + // Pre-clobber destination. This avoids the + // following situation: + // - v is currently held in R0 and stacktmp0. + // - We want to copy stacktmp1 to stacktmp0. + // - We choose R0 as the temporary register. + // During the copy, both R0 and stacktmp0 are + // clobbered, losing both copies of v. Oops! + // Erasing the destination early means R0 will not + // be chosen as the temp register, as it will then + // be the last copy of v. + e.erase(loc) + var x *Value + if c == nil || e.s.values[vid].rematerializeable { + if !e.s.values[vid].rematerializeable { + e.s.f.Fatalf("can't find source for %s->%s: %s\n", e.p, e.b, v.LongString()) + } + if dstReg { + x = v.copyInto(e.p) + } else { + // Rematerialize into stack slot. Need a free + // register to accomplish this. + r := e.findRegFor(v.Type) + e.erase(r) + x = v.copyIntoWithXPos(e.p, pos) + e.set(r, vid, x, false, pos) + // Make sure we spill with the size of the slot, not the + // size of x (which might be wider due to our dropping + // of narrowing conversions). + x = e.p.NewValue1(pos, OpStoreReg, loc.(LocalSlot).Type, x) + } + } else { + // Emit move from src to dst. + _, srcReg := src.(*Register) + if srcReg { + if dstReg { + x = e.p.NewValue1(pos, OpCopy, c.Type, c) + } else { + x = e.p.NewValue1(pos, OpStoreReg, loc.(LocalSlot).Type, c) + } + } else { + if dstReg { + x = e.p.NewValue1(pos, OpLoadReg, c.Type, c) + } else { + // mem->mem. Use temp register. + r := e.findRegFor(c.Type) + e.erase(r) + t := e.p.NewValue1(pos, OpLoadReg, c.Type, c) + e.set(r, vid, t, false, pos) + x = e.p.NewValue1(pos, OpStoreReg, loc.(LocalSlot).Type, t) + } + } + } + e.set(loc, vid, x, true, pos) + if x.Op == OpLoadReg && e.s.isGReg(register(loc.(*Register).num)) { + e.s.f.Fatalf("processDest.OpLoadReg targeting g: " + x.LongString()) + } + if splice != nil { + (*splice).Uses-- + *splice = x + x.Uses++ + } + return true +} + +// set changes the contents of location loc to hold the given value and its cached representative. +func (e *edgeState) set(loc Location, vid ID, c *Value, final bool, pos src.XPos) { + e.s.f.setHome(c, loc) + e.contents[loc] = contentRecord{vid, c, final, pos} + a := e.cache[vid] + if len(a) == 0 { + e.cachedVals = append(e.cachedVals, vid) + } + a = append(a, c) + e.cache[vid] = a + if r, ok := loc.(*Register); ok { + if e.usedRegs&(regMask(1)< regDebug { + fmt.Printf("%s\n", c.LongString()) + fmt.Printf("v%d now available in %s:%s\n", vid, loc, c) + } +} + +// erase removes any user of loc. +func (e *edgeState) erase(loc Location) { + cr := e.contents[loc] + if cr.c == nil { + return + } + vid := cr.vid + + if cr.final { + // Add a destination to move this value back into place. + // Make sure it gets added to the tail of the destination queue + // so we make progress on other moves first. + e.extra = append(e.extra, dstRecord{loc, cr.vid, nil, cr.pos}) + } + + // Remove c from the list of cached values. + a := e.cache[vid] + for i, c := range a { + if e.s.f.getHome(c.ID) == loc { + if e.s.f.pass.debug > regDebug { + fmt.Printf("v%d no longer available in %s:%s\n", vid, loc, c) + } + a[i], a = a[len(a)-1], a[:len(a)-1] + break + } + } + e.cache[vid] = a + + // Update register masks. + if r, ok := loc.(*Register); ok { + e.usedRegs &^= regMask(1) << uint(r.num) + if cr.final { + e.finalRegs &^= regMask(1) << uint(r.num) + } + e.rematerializeableRegs &^= regMask(1) << uint(r.num) + } + if len(a) == 1 { + if r, ok := e.s.f.getHome(a[0].ID).(*Register); ok { + e.uniqueRegs |= regMask(1) << uint(r.num) + } + } +} + +// findRegFor finds a register we can use to make a temp copy of type typ. +func (e *edgeState) findRegFor(typ *types.Type) Location { + // Which registers are possibilities. + types := &e.s.f.Config.Types + m := e.s.compatRegs(typ) + + // Pick a register. In priority order: + // 1) an unused register + // 2) a non-unique register not holding a final value + // 3) a non-unique register + // 4) a register holding a rematerializeable value + x := m &^ e.usedRegs + if x != 0 { + return &e.s.registers[pickReg(x)] + } + x = m &^ e.uniqueRegs &^ e.finalRegs + if x != 0 { + return &e.s.registers[pickReg(x)] + } + x = m &^ e.uniqueRegs + if x != 0 { + return &e.s.registers[pickReg(x)] + } + x = m & e.rematerializeableRegs + if x != 0 { + return &e.s.registers[pickReg(x)] + } + + // No register is available. + // Pick a register to spill. + for _, vid := range e.cachedVals { + a := e.cache[vid] + for _, c := range a { + if r, ok := e.s.f.getHome(c.ID).(*Register); ok && m>>uint(r.num)&1 != 0 { + if !c.rematerializeable() { + x := e.p.NewValue1(c.Pos, OpStoreReg, c.Type, c) + // Allocate a temp location to spill a register to. + // The type of the slot is immaterial - it will not be live across + // any safepoint. Just use a type big enough to hold any register. + t := LocalSlot{N: e.s.f.NewLocal(c.Pos, types.Int64), Type: types.Int64} + // TODO: reuse these slots. They'll need to be erased first. + e.set(t, vid, x, false, c.Pos) + if e.s.f.pass.debug > regDebug { + fmt.Printf(" SPILL %s->%s %s\n", r, t, x.LongString()) + } + } + // r will now be overwritten by the caller. At some point + // later, the newly saved value will be moved back to its + // final destination in processDest. + return r + } + } + } + + fmt.Printf("m:%d unique:%d final:%d rematerializable:%d\n", m, e.uniqueRegs, e.finalRegs, e.rematerializeableRegs) + for _, vid := range e.cachedVals { + a := e.cache[vid] + for _, c := range a { + fmt.Printf("v%d: %s %s\n", vid, c, e.s.f.getHome(c.ID)) + } + } + e.s.f.Fatalf("can't find empty register on edge %s->%s", e.p, e.b) + return nil +} + +// rematerializeable reports whether the register allocator should recompute +// a value instead of spilling/restoring it. +func (v *Value) rematerializeable() bool { + if !opcodeTable[v.Op].rematerializeable { + return false + } + for _, a := range v.Args { + // SP and SB (generated by OpSP and OpSB) are always available. + if a.Op != OpSP && a.Op != OpSB { + return false + } + } + return true +} + +type liveInfo struct { + ID ID // ID of value + dist int32 // # of instructions before next use + pos src.XPos // source position of next use +} + +// computeLive computes a map from block ID to a list of value IDs live at the end +// of that block. Together with the value ID is a count of how many instructions +// to the next use of that value. The resulting map is stored in s.live. +// computeLive also computes the desired register information at the end of each block. +// This desired register information is stored in s.desired. +// TODO: this could be quadratic if lots of variables are live across lots of +// basic blocks. Figure out a way to make this function (or, more precisely, the user +// of this function) require only linear size & time. +func (s *regAllocState) computeLive() { + f := s.f + s.live = make([][]liveInfo, f.NumBlocks()) + s.desired = make([]desiredState, f.NumBlocks()) + var phis []*Value + + live := f.newSparseMapPos(f.NumValues()) + defer f.retSparseMapPos(live) + t := f.newSparseMapPos(f.NumValues()) + defer f.retSparseMapPos(t) + + // Keep track of which value we want in each register. + var desired desiredState + + // Instead of iterating over f.Blocks, iterate over their postordering. + // Liveness information flows backward, so starting at the end + // increases the probability that we will stabilize quickly. + // TODO: Do a better job yet. Here's one possibility: + // Calculate the dominator tree and locate all strongly connected components. + // If a value is live in one block of an SCC, it is live in all. + // Walk the dominator tree from end to beginning, just once, treating SCC + // components as single blocks, duplicated calculated liveness information + // out to all of them. + po := f.postorder() + s.loopnest = f.loopnest() + s.loopnest.calculateDepths() + for { + changed := false + + for _, b := range po { + // Start with known live values at the end of the block. + // Add len(b.Values) to adjust from end-of-block distance + // to beginning-of-block distance. + live.clear() + for _, e := range s.live[b.ID] { + live.set(e.ID, e.dist+int32(len(b.Values)), e.pos) + } + + // Mark control values as live + for _, c := range b.ControlValues() { + if s.values[c.ID].needReg { + live.set(c.ID, int32(len(b.Values)), b.Pos) + } + } + + // Propagate backwards to the start of the block + // Assumes Values have been scheduled. + phis = phis[:0] + for i := len(b.Values) - 1; i >= 0; i-- { + v := b.Values[i] + live.remove(v.ID) + if v.Op == OpPhi { + // save phi ops for later + phis = append(phis, v) + continue + } + if opcodeTable[v.Op].call { + c := live.contents() + for i := range c { + c[i].val += unlikelyDistance + } + } + for _, a := range v.Args { + if s.values[a.ID].needReg { + live.set(a.ID, int32(i), v.Pos) + } + } + } + // Propagate desired registers backwards. + desired.copy(&s.desired[b.ID]) + for i := len(b.Values) - 1; i >= 0; i-- { + v := b.Values[i] + prefs := desired.remove(v.ID) + if v.Op == OpPhi { + // TODO: if v is a phi, save desired register for phi inputs. + // For now, we just drop it and don't propagate + // desired registers back though phi nodes. + continue + } + regspec := s.regspec(v) + // Cancel desired registers if they get clobbered. + desired.clobber(regspec.clobbers) + // Update desired registers if there are any fixed register inputs. + for _, j := range regspec.inputs { + if countRegs(j.regs) != 1 { + continue + } + desired.clobber(j.regs) + desired.add(v.Args[j.idx].ID, pickReg(j.regs)) + } + // Set desired register of input 0 if this is a 2-operand instruction. + if opcodeTable[v.Op].resultInArg0 || v.Op == OpAMD64ADDQconst || v.Op == OpAMD64ADDLconst || v.Op == OpSelect0 { + // ADDQconst is added here because we want to treat it as resultInArg0 for + // the purposes of desired registers, even though it is not an absolute requirement. + // This is because we'd rather implement it as ADDQ instead of LEAQ. + // Same for ADDLconst + // Select0 is added here to propagate the desired register to the tuple-generating instruction. + if opcodeTable[v.Op].commutative { + desired.addList(v.Args[1].ID, prefs) + } + desired.addList(v.Args[0].ID, prefs) + } + } + + // For each predecessor of b, expand its list of live-at-end values. + // invariant: live contains the values live at the start of b (excluding phi inputs) + for i, e := range b.Preds { + p := e.b + // Compute additional distance for the edge. + // Note: delta must be at least 1 to distinguish the control + // value use from the first user in a successor block. + delta := int32(normalDistance) + if len(p.Succs) == 2 { + if p.Succs[0].b == b && p.Likely == BranchLikely || + p.Succs[1].b == b && p.Likely == BranchUnlikely { + delta = likelyDistance + } + if p.Succs[0].b == b && p.Likely == BranchUnlikely || + p.Succs[1].b == b && p.Likely == BranchLikely { + delta = unlikelyDistance + } + } + + // Update any desired registers at the end of p. + s.desired[p.ID].merge(&desired) + + // Start t off with the previously known live values at the end of p. + t.clear() + for _, e := range s.live[p.ID] { + t.set(e.ID, e.dist, e.pos) + } + update := false + + // Add new live values from scanning this block. + for _, e := range live.contents() { + d := e.val + delta + if !t.contains(e.key) || d < t.get(e.key) { + update = true + t.set(e.key, d, e.pos) + } + } + // Also add the correct arg from the saved phi values. + // All phis are at distance delta (we consider them + // simultaneously happening at the start of the block). + for _, v := range phis { + id := v.Args[i].ID + if s.values[id].needReg && (!t.contains(id) || delta < t.get(id)) { + update = true + t.set(id, delta, v.Pos) + } + } + + if !update { + continue + } + // The live set has changed, update it. + l := s.live[p.ID][:0] + if cap(l) < t.size() { + l = make([]liveInfo, 0, t.size()) + } + for _, e := range t.contents() { + l = append(l, liveInfo{e.key, e.val, e.pos}) + } + s.live[p.ID] = l + changed = true + } + } + + if !changed { + break + } + } + if f.pass.debug > regDebug { + fmt.Println("live values at end of each block") + for _, b := range f.Blocks { + fmt.Printf(" %s:", b) + for _, x := range s.live[b.ID] { + fmt.Printf(" v%d(%d)", x.ID, x.dist) + for _, e := range s.desired[b.ID].entries { + if e.ID != x.ID { + continue + } + fmt.Printf("[") + first := true + for _, r := range e.regs { + if r == noRegister { + continue + } + if !first { + fmt.Printf(",") + } + fmt.Print(&s.registers[r]) + first = false + } + fmt.Printf("]") + } + } + if avoid := s.desired[b.ID].avoid; avoid != 0 { + fmt.Printf(" avoid=%v", s.RegMaskString(avoid)) + } + fmt.Println() + } + } +} + +// A desiredState represents desired register assignments. +type desiredState struct { + // Desired assignments will be small, so we just use a list + // of valueID+registers entries. + entries []desiredStateEntry + // Registers that other values want to be in. This value will + // contain at least the union of the regs fields of entries, but + // may contain additional entries for values that were once in + // this data structure but are no longer. + avoid regMask +} +type desiredStateEntry struct { + // (pre-regalloc) value + ID ID + // Registers it would like to be in, in priority order. + // Unused slots are filled with noRegister. + // For opcodes that return tuples, we track desired registers only + // for the first element of the tuple. + regs [4]register +} + +func (d *desiredState) clear() { + d.entries = d.entries[:0] + d.avoid = 0 +} + +// get returns a list of desired registers for value vid. +func (d *desiredState) get(vid ID) [4]register { + for _, e := range d.entries { + if e.ID == vid { + return e.regs + } + } + return [4]register{noRegister, noRegister, noRegister, noRegister} +} + +// add records that we'd like value vid to be in register r. +func (d *desiredState) add(vid ID, r register) { + d.avoid |= regMask(1) << r + for i := range d.entries { + e := &d.entries[i] + if e.ID != vid { + continue + } + if e.regs[0] == r { + // Already known and highest priority + return + } + for j := 1; j < len(e.regs); j++ { + if e.regs[j] == r { + // Move from lower priority to top priority + copy(e.regs[1:], e.regs[:j]) + e.regs[0] = r + return + } + } + copy(e.regs[1:], e.regs[:]) + e.regs[0] = r + return + } + d.entries = append(d.entries, desiredStateEntry{vid, [4]register{r, noRegister, noRegister, noRegister}}) +} + +func (d *desiredState) addList(vid ID, regs [4]register) { + // regs is in priority order, so iterate in reverse order. + for i := len(regs) - 1; i >= 0; i-- { + r := regs[i] + if r != noRegister { + d.add(vid, r) + } + } +} + +// clobber erases any desired registers in the set m. +func (d *desiredState) clobber(m regMask) { + for i := 0; i < len(d.entries); { + e := &d.entries[i] + j := 0 + for _, r := range e.regs { + if r != noRegister && m>>r&1 == 0 { + e.regs[j] = r + j++ + } + } + if j == 0 { + // No more desired registers for this value. + d.entries[i] = d.entries[len(d.entries)-1] + d.entries = d.entries[:len(d.entries)-1] + continue + } + for ; j < len(e.regs); j++ { + e.regs[j] = noRegister + } + i++ + } + d.avoid &^= m +} + +// copy copies a desired state from another desiredState x. +func (d *desiredState) copy(x *desiredState) { + d.entries = append(d.entries[:0], x.entries...) + d.avoid = x.avoid +} + +// remove removes the desired registers for vid and returns them. +func (d *desiredState) remove(vid ID) [4]register { + for i := range d.entries { + if d.entries[i].ID == vid { + regs := d.entries[i].regs + d.entries[i] = d.entries[len(d.entries)-1] + d.entries = d.entries[:len(d.entries)-1] + return regs + } + } + return [4]register{noRegister, noRegister, noRegister, noRegister} +} + +// merge merges another desired state x into d. +func (d *desiredState) merge(x *desiredState) { + d.avoid |= x.avoid + // There should only be a few desired registers, so + // linear insert is ok. + for _, e := range x.entries { + d.addList(e.ID, e.regs) + } +} + +func min32(x, y int32) int32 { + if x < y { + return x + } + return y +} +func max32(x, y int32) int32 { + if x > y { + return x + } + return y +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/regalloc_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/regalloc_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7d804a0d30be33674b7eae36ca238586451e994c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/regalloc_test.go @@ -0,0 +1,229 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/types" + "testing" +) + +func TestLiveControlOps(t *testing.T) { + c := testConfig(t) + f := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("x", OpAMD64MOVLconst, c.config.Types.Int8, 1, nil), + Valu("y", OpAMD64MOVLconst, c.config.Types.Int8, 2, nil), + Valu("a", OpAMD64TESTB, types.TypeFlags, 0, nil, "x", "y"), + Valu("b", OpAMD64TESTB, types.TypeFlags, 0, nil, "y", "x"), + Eq("a", "if", "exit"), + ), + Bloc("if", + Eq("b", "plain", "exit"), + ), + Bloc("plain", + Goto("exit"), + ), + Bloc("exit", + Exit("mem"), + ), + ) + flagalloc(f.f) + regalloc(f.f) + checkFunc(f.f) +} + +// Test to make sure G register is never reloaded from spill (spill of G is okay) +// See #25504 +func TestNoGetgLoadReg(t *testing.T) { + /* + Original: + func fff3(i int) *g { + gee := getg() + if i == 0 { + fff() + } + return gee // here + } + */ + c := testConfigARM64(t) + f := c.Fun("b1", + Bloc("b1", + Valu("v1", OpInitMem, types.TypeMem, 0, nil), + Valu("v6", OpArg, c.config.Types.Int64, 0, c.Temp(c.config.Types.Int64)), + Valu("v8", OpGetG, c.config.Types.Int64.PtrTo(), 0, nil, "v1"), + Valu("v11", OpARM64CMPconst, types.TypeFlags, 0, nil, "v6"), + Eq("v11", "b2", "b4"), + ), + Bloc("b4", + Goto("b3"), + ), + Bloc("b3", + Valu("v14", OpPhi, types.TypeMem, 0, nil, "v1", "v12"), + Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil), + Valu("v16", OpARM64MOVDstore, types.TypeMem, 0, nil, "v8", "sb", "v14"), + Exit("v16"), + ), + Bloc("b2", + Valu("v12", OpARM64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "v1"), + Goto("b3"), + ), + ) + regalloc(f.f) + checkFunc(f.f) + // Double-check that we never restore to the G register. Regalloc should catch it, but check again anyway. + r := f.f.RegAlloc + for _, b := range f.blocks { + for _, v := range b.Values { + if v.Op == OpLoadReg && r[v.ID].String() == "g" { + t.Errorf("Saw OpLoadReg targeting g register: %s", v.LongString()) + } + } + } +} + +// Test to make sure we don't push spills into loops. +// See issue #19595. +func TestSpillWithLoop(t *testing.T) { + c := testConfig(t) + f := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("ptr", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Temp(c.config.Types.Int64)), + Valu("cond", OpArg, c.config.Types.Bool, 0, c.Temp(c.config.Types.Bool)), + Valu("ld", OpAMD64MOVQload, c.config.Types.Int64, 0, nil, "ptr", "mem"), // this value needs a spill + Goto("loop"), + ), + Bloc("loop", + Valu("memphi", OpPhi, types.TypeMem, 0, nil, "mem", "call"), + Valu("call", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "memphi"), + Valu("test", OpAMD64CMPBconst, types.TypeFlags, 0, nil, "cond"), + Eq("test", "next", "exit"), + ), + Bloc("next", + Goto("loop"), + ), + Bloc("exit", + Valu("store", OpAMD64MOVQstore, types.TypeMem, 0, nil, "ptr", "ld", "call"), + Exit("store"), + ), + ) + regalloc(f.f) + checkFunc(f.f) + for _, v := range f.blocks["loop"].Values { + if v.Op == OpStoreReg { + t.Errorf("spill inside loop %s", v.LongString()) + } + } +} + +func TestSpillMove1(t *testing.T) { + c := testConfig(t) + f := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("x", OpArg, c.config.Types.Int64, 0, c.Temp(c.config.Types.Int64)), + Valu("p", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Temp(c.config.Types.Int64.PtrTo())), + Valu("a", OpAMD64TESTQ, types.TypeFlags, 0, nil, "x", "x"), + Goto("loop1"), + ), + Bloc("loop1", + Valu("y", OpAMD64MULQ, c.config.Types.Int64, 0, nil, "x", "x"), + Eq("a", "loop2", "exit1"), + ), + Bloc("loop2", + Eq("a", "loop1", "exit2"), + ), + Bloc("exit1", + // store before call, y is available in a register + Valu("mem2", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem"), + Valu("mem3", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "mem2"), + Exit("mem3"), + ), + Bloc("exit2", + // store after call, y must be loaded from a spill location + Valu("mem4", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "mem"), + Valu("mem5", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem4"), + Exit("mem5"), + ), + ) + flagalloc(f.f) + regalloc(f.f) + checkFunc(f.f) + // Spill should be moved to exit2. + if numSpills(f.blocks["loop1"]) != 0 { + t.Errorf("spill present from loop1") + } + if numSpills(f.blocks["loop2"]) != 0 { + t.Errorf("spill present in loop2") + } + if numSpills(f.blocks["exit1"]) != 0 { + t.Errorf("spill present in exit1") + } + if numSpills(f.blocks["exit2"]) != 1 { + t.Errorf("spill missing in exit2") + } + +} + +func TestSpillMove2(t *testing.T) { + c := testConfig(t) + f := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("x", OpArg, c.config.Types.Int64, 0, c.Temp(c.config.Types.Int64)), + Valu("p", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Temp(c.config.Types.Int64.PtrTo())), + Valu("a", OpAMD64TESTQ, types.TypeFlags, 0, nil, "x", "x"), + Goto("loop1"), + ), + Bloc("loop1", + Valu("y", OpAMD64MULQ, c.config.Types.Int64, 0, nil, "x", "x"), + Eq("a", "loop2", "exit1"), + ), + Bloc("loop2", + Eq("a", "loop1", "exit2"), + ), + Bloc("exit1", + // store after call, y must be loaded from a spill location + Valu("mem2", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "mem"), + Valu("mem3", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem2"), + Exit("mem3"), + ), + Bloc("exit2", + // store after call, y must be loaded from a spill location + Valu("mem4", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "mem"), + Valu("mem5", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem4"), + Exit("mem5"), + ), + ) + flagalloc(f.f) + regalloc(f.f) + checkFunc(f.f) + // There should be a spill in loop1, and nowhere else. + // TODO: resurrect moving spills out of loops? We could put spills at the start of both exit1 and exit2. + if numSpills(f.blocks["loop1"]) != 1 { + t.Errorf("spill missing from loop1") + } + if numSpills(f.blocks["loop2"]) != 0 { + t.Errorf("spill present in loop2") + } + if numSpills(f.blocks["exit1"]) != 0 { + t.Errorf("spill present in exit1") + } + if numSpills(f.blocks["exit2"]) != 0 { + t.Errorf("spill present in exit2") + } + +} + +func numSpills(b *Block) int { + n := 0 + for _, v := range b.Values { + if v.Op == OpStoreReg { + n++ + } + } + return n +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewrite.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewrite.go new file mode 100644 index 0000000000000000000000000000000000000000..5ed7331703bf09f438b1fd8076cf700070b360bb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewrite.go @@ -0,0 +1,2213 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/logopt" + "cmd/compile/internal/reflectdata" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/obj/s390x" + "cmd/internal/objabi" + "cmd/internal/src" + "encoding/binary" + "fmt" + "internal/buildcfg" + "io" + "math" + "math/bits" + "os" + "path/filepath" + "strings" +) + +type deadValueChoice bool + +const ( + leaveDeadValues deadValueChoice = false + removeDeadValues = true +) + +// deadcode indicates whether rewrite should try to remove any values that become dead. +func applyRewrite(f *Func, rb blockRewriter, rv valueRewriter, deadcode deadValueChoice) { + // repeat rewrites until we find no more rewrites + pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block + pendingLines.clear() + debug := f.pass.debug + if debug > 1 { + fmt.Printf("%s: rewriting for %s\n", f.pass.name, f.Name) + } + var iters int + var states map[string]bool + for { + change := false + deadChange := false + for _, b := range f.Blocks { + var b0 *Block + if debug > 1 { + b0 = new(Block) + *b0 = *b + b0.Succs = append([]Edge{}, b.Succs...) // make a new copy, not aliasing + } + for i, c := range b.ControlValues() { + for c.Op == OpCopy { + c = c.Args[0] + b.ReplaceControl(i, c) + } + } + if rb(b) { + change = true + if debug > 1 { + fmt.Printf("rewriting %s -> %s\n", b0.LongString(), b.LongString()) + } + } + for j, v := range b.Values { + var v0 *Value + if debug > 1 { + v0 = new(Value) + *v0 = *v + v0.Args = append([]*Value{}, v.Args...) // make a new copy, not aliasing + } + if v.Uses == 0 && v.removeable() { + if v.Op != OpInvalid && deadcode == removeDeadValues { + // Reset any values that are now unused, so that we decrement + // the use count of all of its arguments. + // Not quite a deadcode pass, because it does not handle cycles. + // But it should help Uses==1 rules to fire. + v.reset(OpInvalid) + deadChange = true + } + // No point rewriting values which aren't used. + continue + } + + vchange := phielimValue(v) + if vchange && debug > 1 { + fmt.Printf("rewriting %s -> %s\n", v0.LongString(), v.LongString()) + } + + // Eliminate copy inputs. + // If any copy input becomes unused, mark it + // as invalid and discard its argument. Repeat + // recursively on the discarded argument. + // This phase helps remove phantom "dead copy" uses + // of a value so that a x.Uses==1 rule condition + // fires reliably. + for i, a := range v.Args { + if a.Op != OpCopy { + continue + } + aa := copySource(a) + v.SetArg(i, aa) + // If a, a copy, has a line boundary indicator, attempt to find a new value + // to hold it. The first candidate is the value that will replace a (aa), + // if it shares the same block and line and is eligible. + // The second option is v, which has a as an input. Because aa is earlier in + // the data flow, it is the better choice. + if a.Pos.IsStmt() == src.PosIsStmt { + if aa.Block == a.Block && aa.Pos.Line() == a.Pos.Line() && aa.Pos.IsStmt() != src.PosNotStmt { + aa.Pos = aa.Pos.WithIsStmt() + } else if v.Block == a.Block && v.Pos.Line() == a.Pos.Line() && v.Pos.IsStmt() != src.PosNotStmt { + v.Pos = v.Pos.WithIsStmt() + } else { + // Record the lost line and look for a new home after all rewrites are complete. + // TODO: it's possible (in FOR loops, in particular) for statement boundaries for the same + // line to appear in more than one block, but only one block is stored, so if both end + // up here, then one will be lost. + pendingLines.set(a.Pos, int32(a.Block.ID)) + } + a.Pos = a.Pos.WithNotStmt() + } + vchange = true + for a.Uses == 0 { + b := a.Args[0] + a.reset(OpInvalid) + a = b + } + } + if vchange && debug > 1 { + fmt.Printf("rewriting %s -> %s\n", v0.LongString(), v.LongString()) + } + + // apply rewrite function + if rv(v) { + vchange = true + // If value changed to a poor choice for a statement boundary, move the boundary + if v.Pos.IsStmt() == src.PosIsStmt { + if k := nextGoodStatementIndex(v, j, b); k != j { + v.Pos = v.Pos.WithNotStmt() + b.Values[k].Pos = b.Values[k].Pos.WithIsStmt() + } + } + } + + change = change || vchange + if vchange && debug > 1 { + fmt.Printf("rewriting %s -> %s\n", v0.LongString(), v.LongString()) + } + } + } + if !change && !deadChange { + break + } + iters++ + if (iters > 1000 || debug >= 2) && change { + // We've done a suspiciously large number of rewrites (or we're in debug mode). + // As of Sep 2021, 90% of rewrites complete in 4 iterations or fewer + // and the maximum value encountered during make.bash is 12. + // Start checking for cycles. (This is too expensive to do routinely.) + // Note: we avoid this path for deadChange-only iterations, to fix #51639. + if states == nil { + states = make(map[string]bool) + } + h := f.rewriteHash() + if _, ok := states[h]; ok { + // We've found a cycle. + // To diagnose it, set debug to 2 and start again, + // so that we'll print all rules applied until we complete another cycle. + // If debug is already >= 2, we've already done that, so it's time to crash. + if debug < 2 { + debug = 2 + states = make(map[string]bool) + } else { + f.Fatalf("rewrite cycle detected") + } + } + states[h] = true + } + } + // remove clobbered values + for _, b := range f.Blocks { + j := 0 + for i, v := range b.Values { + vl := v.Pos + if v.Op == OpInvalid { + if v.Pos.IsStmt() == src.PosIsStmt { + pendingLines.set(vl, int32(b.ID)) + } + f.freeValue(v) + continue + } + if v.Pos.IsStmt() != src.PosNotStmt && !notStmtBoundary(v.Op) && pendingLines.get(vl) == int32(b.ID) { + pendingLines.remove(vl) + v.Pos = v.Pos.WithIsStmt() + } + if i != j { + b.Values[j] = v + } + j++ + } + if pendingLines.get(b.Pos) == int32(b.ID) { + b.Pos = b.Pos.WithIsStmt() + pendingLines.remove(b.Pos) + } + b.truncateValues(j) + } +} + +// Common functions called from rewriting rules + +func is64BitFloat(t *types.Type) bool { + return t.Size() == 8 && t.IsFloat() +} + +func is32BitFloat(t *types.Type) bool { + return t.Size() == 4 && t.IsFloat() +} + +func is64BitInt(t *types.Type) bool { + return t.Size() == 8 && t.IsInteger() +} + +func is32BitInt(t *types.Type) bool { + return t.Size() == 4 && t.IsInteger() +} + +func is16BitInt(t *types.Type) bool { + return t.Size() == 2 && t.IsInteger() +} + +func is8BitInt(t *types.Type) bool { + return t.Size() == 1 && t.IsInteger() +} + +func isPtr(t *types.Type) bool { + return t.IsPtrShaped() +} + +// mergeSym merges two symbolic offsets. There is no real merging of +// offsets, we just pick the non-nil one. +func mergeSym(x, y Sym) Sym { + if x == nil { + return y + } + if y == nil { + return x + } + panic(fmt.Sprintf("mergeSym with two non-nil syms %v %v", x, y)) +} + +func canMergeSym(x, y Sym) bool { + return x == nil || y == nil +} + +// canMergeLoadClobber reports whether the load can be merged into target without +// invalidating the schedule. +// It also checks that the other non-load argument x is something we +// are ok with clobbering. +func canMergeLoadClobber(target, load, x *Value) bool { + // The register containing x is going to get clobbered. + // Don't merge if we still need the value of x. + // We don't have liveness information here, but we can + // approximate x dying with: + // 1) target is x's only use. + // 2) target is not in a deeper loop than x. + if x.Uses != 1 { + return false + } + loopnest := x.Block.Func.loopnest() + loopnest.calculateDepths() + if loopnest.depth(target.Block.ID) > loopnest.depth(x.Block.ID) { + return false + } + return canMergeLoad(target, load) +} + +// canMergeLoad reports whether the load can be merged into target without +// invalidating the schedule. +func canMergeLoad(target, load *Value) bool { + if target.Block.ID != load.Block.ID { + // If the load is in a different block do not merge it. + return false + } + + // We can't merge the load into the target if the load + // has more than one use. + if load.Uses != 1 { + return false + } + + mem := load.MemoryArg() + + // We need the load's memory arg to still be alive at target. That + // can't be the case if one of target's args depends on a memory + // state that is a successor of load's memory arg. + // + // For example, it would be invalid to merge load into target in + // the following situation because newmem has killed oldmem + // before target is reached: + // load = read ... oldmem + // newmem = write ... oldmem + // arg0 = read ... newmem + // target = add arg0 load + // + // If the argument comes from a different block then we can exclude + // it immediately because it must dominate load (which is in the + // same block as target). + var args []*Value + for _, a := range target.Args { + if a != load && a.Block.ID == target.Block.ID { + args = append(args, a) + } + } + + // memPreds contains memory states known to be predecessors of load's + // memory state. It is lazily initialized. + var memPreds map[*Value]bool + for i := 0; len(args) > 0; i++ { + const limit = 100 + if i >= limit { + // Give up if we have done a lot of iterations. + return false + } + v := args[len(args)-1] + args = args[:len(args)-1] + if target.Block.ID != v.Block.ID { + // Since target and load are in the same block + // we can stop searching when we leave the block. + continue + } + if v.Op == OpPhi { + // A Phi implies we have reached the top of the block. + // The memory phi, if it exists, is always + // the first logical store in the block. + continue + } + if v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() { + // We could handle this situation however it is likely + // to be very rare. + return false + } + if v.Op.SymEffect()&SymAddr != 0 { + // This case prevents an operation that calculates the + // address of a local variable from being forced to schedule + // before its corresponding VarDef. + // See issue 28445. + // v1 = LOAD ... + // v2 = VARDEF + // v3 = LEAQ + // v4 = CMPQ v1 v3 + // We don't want to combine the CMPQ with the load, because + // that would force the CMPQ to schedule before the VARDEF, which + // in turn requires the LEAQ to schedule before the VARDEF. + return false + } + if v.Type.IsMemory() { + if memPreds == nil { + // Initialise a map containing memory states + // known to be predecessors of load's memory + // state. + memPreds = make(map[*Value]bool) + m := mem + const limit = 50 + for i := 0; i < limit; i++ { + if m.Op == OpPhi { + // The memory phi, if it exists, is always + // the first logical store in the block. + break + } + if m.Block.ID != target.Block.ID { + break + } + if !m.Type.IsMemory() { + break + } + memPreds[m] = true + if len(m.Args) == 0 { + break + } + m = m.MemoryArg() + } + } + + // We can merge if v is a predecessor of mem. + // + // For example, we can merge load into target in the + // following scenario: + // x = read ... v + // mem = write ... v + // load = read ... mem + // target = add x load + if memPreds[v] { + continue + } + return false + } + if len(v.Args) > 0 && v.Args[len(v.Args)-1] == mem { + // If v takes mem as an input then we know mem + // is valid at this point. + continue + } + for _, a := range v.Args { + if target.Block.ID == a.Block.ID { + args = append(args, a) + } + } + } + + return true +} + +// isSameCall reports whether sym is the same as the given named symbol. +func isSameCall(sym interface{}, name string) bool { + fn := sym.(*AuxCall).Fn + return fn != nil && fn.String() == name +} + +// canLoadUnaligned reports if the architecture supports unaligned load operations. +func canLoadUnaligned(c *Config) bool { + return c.ctxt.Arch.Alignment == 1 +} + +// nlzX returns the number of leading zeros. +func nlz64(x int64) int { return bits.LeadingZeros64(uint64(x)) } +func nlz32(x int32) int { return bits.LeadingZeros32(uint32(x)) } +func nlz16(x int16) int { return bits.LeadingZeros16(uint16(x)) } +func nlz8(x int8) int { return bits.LeadingZeros8(uint8(x)) } + +// ntzX returns the number of trailing zeros. +func ntz64(x int64) int { return bits.TrailingZeros64(uint64(x)) } +func ntz32(x int32) int { return bits.TrailingZeros32(uint32(x)) } +func ntz16(x int16) int { return bits.TrailingZeros16(uint16(x)) } +func ntz8(x int8) int { return bits.TrailingZeros8(uint8(x)) } + +func oneBit(x int64) bool { return x&(x-1) == 0 && x != 0 } +func oneBit8(x int8) bool { return x&(x-1) == 0 && x != 0 } +func oneBit16(x int16) bool { return x&(x-1) == 0 && x != 0 } +func oneBit32(x int32) bool { return x&(x-1) == 0 && x != 0 } +func oneBit64(x int64) bool { return x&(x-1) == 0 && x != 0 } + +// nto returns the number of trailing ones. +func nto(x int64) int64 { + return int64(ntz64(^x)) +} + +// logX returns logarithm of n base 2. +// n must be a positive power of 2 (isPowerOfTwoX returns true). +func log8(n int8) int64 { + return int64(bits.Len8(uint8(n))) - 1 +} +func log16(n int16) int64 { + return int64(bits.Len16(uint16(n))) - 1 +} +func log32(n int32) int64 { + return int64(bits.Len32(uint32(n))) - 1 +} +func log64(n int64) int64 { + return int64(bits.Len64(uint64(n))) - 1 +} + +// log2uint32 returns logarithm in base 2 of uint32(n), with log2(0) = -1. +// Rounds down. +func log2uint32(n int64) int64 { + return int64(bits.Len32(uint32(n))) - 1 +} + +// isPowerOfTwoX functions report whether n is a power of 2. +func isPowerOfTwo8(n int8) bool { + return n > 0 && n&(n-1) == 0 +} +func isPowerOfTwo16(n int16) bool { + return n > 0 && n&(n-1) == 0 +} +func isPowerOfTwo32(n int32) bool { + return n > 0 && n&(n-1) == 0 +} +func isPowerOfTwo64(n int64) bool { + return n > 0 && n&(n-1) == 0 +} + +// isUint64PowerOfTwo reports whether uint64(n) is a power of 2. +func isUint64PowerOfTwo(in int64) bool { + n := uint64(in) + return n > 0 && n&(n-1) == 0 +} + +// isUint32PowerOfTwo reports whether uint32(n) is a power of 2. +func isUint32PowerOfTwo(in int64) bool { + n := uint64(uint32(in)) + return n > 0 && n&(n-1) == 0 +} + +// is32Bit reports whether n can be represented as a signed 32 bit integer. +func is32Bit(n int64) bool { + return n == int64(int32(n)) +} + +// is16Bit reports whether n can be represented as a signed 16 bit integer. +func is16Bit(n int64) bool { + return n == int64(int16(n)) +} + +// is8Bit reports whether n can be represented as a signed 8 bit integer. +func is8Bit(n int64) bool { + return n == int64(int8(n)) +} + +// isU8Bit reports whether n can be represented as an unsigned 8 bit integer. +func isU8Bit(n int64) bool { + return n == int64(uint8(n)) +} + +// isU12Bit reports whether n can be represented as an unsigned 12 bit integer. +func isU12Bit(n int64) bool { + return 0 <= n && n < (1<<12) +} + +// isU16Bit reports whether n can be represented as an unsigned 16 bit integer. +func isU16Bit(n int64) bool { + return n == int64(uint16(n)) +} + +// isU32Bit reports whether n can be represented as an unsigned 32 bit integer. +func isU32Bit(n int64) bool { + return n == int64(uint32(n)) +} + +// is20Bit reports whether n can be represented as a signed 20 bit integer. +func is20Bit(n int64) bool { + return -(1<<19) <= n && n < (1<<19) +} + +// b2i translates a boolean value to 0 or 1 for assigning to auxInt. +func b2i(b bool) int64 { + if b { + return 1 + } + return 0 +} + +// b2i32 translates a boolean value to 0 or 1. +func b2i32(b bool) int32 { + if b { + return 1 + } + return 0 +} + +// shiftIsBounded reports whether (left/right) shift Value v is known to be bounded. +// A shift is bounded if it is shifting by less than the width of the shifted value. +func shiftIsBounded(v *Value) bool { + return v.AuxInt != 0 +} + +// canonLessThan returns whether x is "ordered" less than y, for purposes of normalizing +// generated code as much as possible. +func canonLessThan(x, y *Value) bool { + if x.Op != y.Op { + return x.Op < y.Op + } + if !x.Pos.SameFileAndLine(y.Pos) { + return x.Pos.Before(y.Pos) + } + return x.ID < y.ID +} + +// truncate64Fto32F converts a float64 value to a float32 preserving the bit pattern +// of the mantissa. It will panic if the truncation results in lost information. +func truncate64Fto32F(f float64) float32 { + if !isExactFloat32(f) { + panic("truncate64Fto32F: truncation is not exact") + } + if !math.IsNaN(f) { + return float32(f) + } + // NaN bit patterns aren't necessarily preserved across conversion + // instructions so we need to do the conversion manually. + b := math.Float64bits(f) + m := b & ((1 << 52) - 1) // mantissa (a.k.a. significand) + // | sign | exponent | mantissa | + r := uint32(((b >> 32) & (1 << 31)) | 0x7f800000 | (m >> (52 - 23))) + return math.Float32frombits(r) +} + +// extend32Fto64F converts a float32 value to a float64 value preserving the bit +// pattern of the mantissa. +func extend32Fto64F(f float32) float64 { + if !math.IsNaN(float64(f)) { + return float64(f) + } + // NaN bit patterns aren't necessarily preserved across conversion + // instructions so we need to do the conversion manually. + b := uint64(math.Float32bits(f)) + // | sign | exponent | mantissa | + r := ((b << 32) & (1 << 63)) | (0x7ff << 52) | ((b & 0x7fffff) << (52 - 23)) + return math.Float64frombits(r) +} + +// DivisionNeedsFixUp reports whether the division needs fix-up code. +func DivisionNeedsFixUp(v *Value) bool { + return v.AuxInt == 0 +} + +// auxFrom64F encodes a float64 value so it can be stored in an AuxInt. +func auxFrom64F(f float64) int64 { + if f != f { + panic("can't encode a NaN in AuxInt field") + } + return int64(math.Float64bits(f)) +} + +// auxFrom32F encodes a float32 value so it can be stored in an AuxInt. +func auxFrom32F(f float32) int64 { + if f != f { + panic("can't encode a NaN in AuxInt field") + } + return int64(math.Float64bits(extend32Fto64F(f))) +} + +// auxTo32F decodes a float32 from the AuxInt value provided. +func auxTo32F(i int64) float32 { + return truncate64Fto32F(math.Float64frombits(uint64(i))) +} + +// auxTo64F decodes a float64 from the AuxInt value provided. +func auxTo64F(i int64) float64 { + return math.Float64frombits(uint64(i)) +} + +func auxIntToBool(i int64) bool { + if i == 0 { + return false + } + return true +} +func auxIntToInt8(i int64) int8 { + return int8(i) +} +func auxIntToInt16(i int64) int16 { + return int16(i) +} +func auxIntToInt32(i int64) int32 { + return int32(i) +} +func auxIntToInt64(i int64) int64 { + return i +} +func auxIntToUint8(i int64) uint8 { + return uint8(i) +} +func auxIntToFloat32(i int64) float32 { + return float32(math.Float64frombits(uint64(i))) +} +func auxIntToFloat64(i int64) float64 { + return math.Float64frombits(uint64(i)) +} +func auxIntToValAndOff(i int64) ValAndOff { + return ValAndOff(i) +} +func auxIntToArm64BitField(i int64) arm64BitField { + return arm64BitField(i) +} +func auxIntToInt128(x int64) int128 { + if x != 0 { + panic("nonzero int128 not allowed") + } + return 0 +} +func auxIntToFlagConstant(x int64) flagConstant { + return flagConstant(x) +} + +func auxIntToOp(cc int64) Op { + return Op(cc) +} + +func boolToAuxInt(b bool) int64 { + if b { + return 1 + } + return 0 +} +func int8ToAuxInt(i int8) int64 { + return int64(i) +} +func int16ToAuxInt(i int16) int64 { + return int64(i) +} +func int32ToAuxInt(i int32) int64 { + return int64(i) +} +func int64ToAuxInt(i int64) int64 { + return int64(i) +} +func uint8ToAuxInt(i uint8) int64 { + return int64(int8(i)) +} +func float32ToAuxInt(f float32) int64 { + return int64(math.Float64bits(float64(f))) +} +func float64ToAuxInt(f float64) int64 { + return int64(math.Float64bits(f)) +} +func valAndOffToAuxInt(v ValAndOff) int64 { + return int64(v) +} +func arm64BitFieldToAuxInt(v arm64BitField) int64 { + return int64(v) +} +func int128ToAuxInt(x int128) int64 { + if x != 0 { + panic("nonzero int128 not allowed") + } + return 0 +} +func flagConstantToAuxInt(x flagConstant) int64 { + return int64(x) +} + +func opToAuxInt(o Op) int64 { + return int64(o) +} + +// Aux is an interface to hold miscellaneous data in Blocks and Values. +type Aux interface { + CanBeAnSSAAux() +} + +// for now only used to mark moves that need to avoid clobbering flags +type auxMark bool + +func (auxMark) CanBeAnSSAAux() {} + +var AuxMark auxMark + +// stringAux wraps string values for use in Aux. +type stringAux string + +func (stringAux) CanBeAnSSAAux() {} + +func auxToString(i Aux) string { + return string(i.(stringAux)) +} +func auxToSym(i Aux) Sym { + // TODO: kind of a hack - allows nil interface through + s, _ := i.(Sym) + return s +} +func auxToType(i Aux) *types.Type { + return i.(*types.Type) +} +func auxToCall(i Aux) *AuxCall { + return i.(*AuxCall) +} +func auxToS390xCCMask(i Aux) s390x.CCMask { + return i.(s390x.CCMask) +} +func auxToS390xRotateParams(i Aux) s390x.RotateParams { + return i.(s390x.RotateParams) +} + +func StringToAux(s string) Aux { + return stringAux(s) +} +func symToAux(s Sym) Aux { + return s +} +func callToAux(s *AuxCall) Aux { + return s +} +func typeToAux(t *types.Type) Aux { + return t +} +func s390xCCMaskToAux(c s390x.CCMask) Aux { + return c +} +func s390xRotateParamsToAux(r s390x.RotateParams) Aux { + return r +} + +// uaddOvf reports whether unsigned a+b would overflow. +func uaddOvf(a, b int64) bool { + return uint64(a)+uint64(b) < uint64(a) +} + +// loadLSymOffset simulates reading a word at an offset into a +// read-only symbol's runtime memory. If it would read a pointer to +// another symbol, that symbol is returned. Otherwise, it returns nil. +func loadLSymOffset(lsym *obj.LSym, offset int64) *obj.LSym { + if lsym.Type != objabi.SRODATA { + return nil + } + + for _, r := range lsym.R { + if int64(r.Off) == offset && r.Type&^objabi.R_WEAK == objabi.R_ADDR && r.Add == 0 { + return r.Sym + } + } + + return nil +} + +func devirtLECall(v *Value, sym *obj.LSym) *Value { + v.Op = OpStaticLECall + auxcall := v.Aux.(*AuxCall) + auxcall.Fn = sym + // Remove first arg + v.Args[0].Uses-- + copy(v.Args[0:], v.Args[1:]) + v.Args[len(v.Args)-1] = nil // aid GC + v.Args = v.Args[:len(v.Args)-1] + if f := v.Block.Func; f.pass.debug > 0 { + f.Warnl(v.Pos, "de-virtualizing call") + } + return v +} + +// isSamePtr reports whether p1 and p2 point to the same address. +func isSamePtr(p1, p2 *Value) bool { + if p1 == p2 { + return true + } + if p1.Op != p2.Op { + return false + } + switch p1.Op { + case OpOffPtr: + return p1.AuxInt == p2.AuxInt && isSamePtr(p1.Args[0], p2.Args[0]) + case OpAddr, OpLocalAddr: + return p1.Aux == p2.Aux + case OpAddPtr: + return p1.Args[1] == p2.Args[1] && isSamePtr(p1.Args[0], p2.Args[0]) + } + return false +} + +func isStackPtr(v *Value) bool { + for v.Op == OpOffPtr || v.Op == OpAddPtr { + v = v.Args[0] + } + return v.Op == OpSP || v.Op == OpLocalAddr +} + +// disjoint reports whether the memory region specified by [p1:p1+n1) +// does not overlap with [p2:p2+n2). +// A return value of false does not imply the regions overlap. +func disjoint(p1 *Value, n1 int64, p2 *Value, n2 int64) bool { + if n1 == 0 || n2 == 0 { + return true + } + if p1 == p2 { + return false + } + baseAndOffset := func(ptr *Value) (base *Value, offset int64) { + base, offset = ptr, 0 + for base.Op == OpOffPtr { + offset += base.AuxInt + base = base.Args[0] + } + if opcodeTable[base.Op].nilCheck { + base = base.Args[0] + } + return base, offset + } + p1, off1 := baseAndOffset(p1) + p2, off2 := baseAndOffset(p2) + if isSamePtr(p1, p2) { + return !overlap(off1, n1, off2, n2) + } + // p1 and p2 are not the same, so if they are both OpAddrs then + // they point to different variables. + // If one pointer is on the stack and the other is an argument + // then they can't overlap. + switch p1.Op { + case OpAddr, OpLocalAddr: + if p2.Op == OpAddr || p2.Op == OpLocalAddr || p2.Op == OpSP { + return true + } + return (p2.Op == OpArg || p2.Op == OpArgIntReg) && p1.Args[0].Op == OpSP + case OpArg, OpArgIntReg: + if p2.Op == OpSP || p2.Op == OpLocalAddr { + return true + } + case OpSP: + return p2.Op == OpAddr || p2.Op == OpLocalAddr || p2.Op == OpArg || p2.Op == OpArgIntReg || p2.Op == OpSP + } + return false +} + +// moveSize returns the number of bytes an aligned MOV instruction moves. +func moveSize(align int64, c *Config) int64 { + switch { + case align%8 == 0 && c.PtrSize == 8: + return 8 + case align%4 == 0: + return 4 + case align%2 == 0: + return 2 + } + return 1 +} + +// mergePoint finds a block among a's blocks which dominates b and is itself +// dominated by all of a's blocks. Returns nil if it can't find one. +// Might return nil even if one does exist. +func mergePoint(b *Block, a ...*Value) *Block { + // Walk backward from b looking for one of the a's blocks. + + // Max distance + d := 100 + + for d > 0 { + for _, x := range a { + if b == x.Block { + goto found + } + } + if len(b.Preds) > 1 { + // Don't know which way to go back. Abort. + return nil + } + b = b.Preds[0].b + d-- + } + return nil // too far away +found: + // At this point, r is the first value in a that we find by walking backwards. + // if we return anything, r will be it. + r := b + + // Keep going, counting the other a's that we find. They must all dominate r. + na := 0 + for d > 0 { + for _, x := range a { + if b == x.Block { + na++ + } + } + if na == len(a) { + // Found all of a in a backwards walk. We can return r. + return r + } + if len(b.Preds) > 1 { + return nil + } + b = b.Preds[0].b + d-- + + } + return nil // too far away +} + +// clobber invalidates values. Returns true. +// clobber is used by rewrite rules to: +// +// A) make sure the values are really dead and never used again. +// B) decrement use counts of the values' args. +func clobber(vv ...*Value) bool { + for _, v := range vv { + v.reset(OpInvalid) + // Note: leave v.Block intact. The Block field is used after clobber. + } + return true +} + +// clobberIfDead resets v when use count is 1. Returns true. +// clobberIfDead is used by rewrite rules to decrement +// use counts of v's args when v is dead and never used. +func clobberIfDead(v *Value) bool { + if v.Uses == 1 { + v.reset(OpInvalid) + } + // Note: leave v.Block intact. The Block field is used after clobberIfDead. + return true +} + +// noteRule is an easy way to track if a rule is matched when writing +// new ones. Make the rule of interest also conditional on +// +// noteRule("note to self: rule of interest matched") +// +// and that message will print when the rule matches. +func noteRule(s string) bool { + fmt.Println(s) + return true +} + +// countRule increments Func.ruleMatches[key]. +// If Func.ruleMatches is non-nil at the end +// of compilation, it will be printed to stdout. +// This is intended to make it easier to find which functions +// which contain lots of rules matches when developing new rules. +func countRule(v *Value, key string) bool { + f := v.Block.Func + if f.ruleMatches == nil { + f.ruleMatches = make(map[string]int) + } + f.ruleMatches[key]++ + return true +} + +// warnRule generates compiler debug output with string s when +// v is not in autogenerated code, cond is true and the rule has fired. +func warnRule(cond bool, v *Value, s string) bool { + if pos := v.Pos; pos.Line() > 1 && cond { + v.Block.Func.Warnl(pos, s) + } + return true +} + +// for a pseudo-op like (LessThan x), extract x. +func flagArg(v *Value) *Value { + if len(v.Args) != 1 || !v.Args[0].Type.IsFlags() { + return nil + } + return v.Args[0] +} + +// arm64Negate finds the complement to an ARM64 condition code, +// for example !Equal -> NotEqual or !LessThan -> GreaterEqual +// +// For floating point, it's more subtle because NaN is unordered. We do +// !LessThanF -> NotLessThanF, the latter takes care of NaNs. +func arm64Negate(op Op) Op { + switch op { + case OpARM64LessThan: + return OpARM64GreaterEqual + case OpARM64LessThanU: + return OpARM64GreaterEqualU + case OpARM64GreaterThan: + return OpARM64LessEqual + case OpARM64GreaterThanU: + return OpARM64LessEqualU + case OpARM64LessEqual: + return OpARM64GreaterThan + case OpARM64LessEqualU: + return OpARM64GreaterThanU + case OpARM64GreaterEqual: + return OpARM64LessThan + case OpARM64GreaterEqualU: + return OpARM64LessThanU + case OpARM64Equal: + return OpARM64NotEqual + case OpARM64NotEqual: + return OpARM64Equal + case OpARM64LessThanF: + return OpARM64NotLessThanF + case OpARM64NotLessThanF: + return OpARM64LessThanF + case OpARM64LessEqualF: + return OpARM64NotLessEqualF + case OpARM64NotLessEqualF: + return OpARM64LessEqualF + case OpARM64GreaterThanF: + return OpARM64NotGreaterThanF + case OpARM64NotGreaterThanF: + return OpARM64GreaterThanF + case OpARM64GreaterEqualF: + return OpARM64NotGreaterEqualF + case OpARM64NotGreaterEqualF: + return OpARM64GreaterEqualF + default: + panic("unreachable") + } +} + +// arm64Invert evaluates (InvertFlags op), which +// is the same as altering the condition codes such +// that the same result would be produced if the arguments +// to the flag-generating instruction were reversed, e.g. +// (InvertFlags (CMP x y)) -> (CMP y x) +func arm64Invert(op Op) Op { + switch op { + case OpARM64LessThan: + return OpARM64GreaterThan + case OpARM64LessThanU: + return OpARM64GreaterThanU + case OpARM64GreaterThan: + return OpARM64LessThan + case OpARM64GreaterThanU: + return OpARM64LessThanU + case OpARM64LessEqual: + return OpARM64GreaterEqual + case OpARM64LessEqualU: + return OpARM64GreaterEqualU + case OpARM64GreaterEqual: + return OpARM64LessEqual + case OpARM64GreaterEqualU: + return OpARM64LessEqualU + case OpARM64Equal, OpARM64NotEqual: + return op + case OpARM64LessThanF: + return OpARM64GreaterThanF + case OpARM64GreaterThanF: + return OpARM64LessThanF + case OpARM64LessEqualF: + return OpARM64GreaterEqualF + case OpARM64GreaterEqualF: + return OpARM64LessEqualF + case OpARM64NotLessThanF: + return OpARM64NotGreaterThanF + case OpARM64NotGreaterThanF: + return OpARM64NotLessThanF + case OpARM64NotLessEqualF: + return OpARM64NotGreaterEqualF + case OpARM64NotGreaterEqualF: + return OpARM64NotLessEqualF + default: + panic("unreachable") + } +} + +// evaluate an ARM64 op against a flags value +// that is potentially constant; return 1 for true, +// -1 for false, and 0 for not constant. +func ccARM64Eval(op Op, flags *Value) int { + fop := flags.Op + if fop == OpARM64InvertFlags { + return -ccARM64Eval(op, flags.Args[0]) + } + if fop != OpARM64FlagConstant { + return 0 + } + fc := flagConstant(flags.AuxInt) + b2i := func(b bool) int { + if b { + return 1 + } + return -1 + } + switch op { + case OpARM64Equal: + return b2i(fc.eq()) + case OpARM64NotEqual: + return b2i(fc.ne()) + case OpARM64LessThan: + return b2i(fc.lt()) + case OpARM64LessThanU: + return b2i(fc.ult()) + case OpARM64GreaterThan: + return b2i(fc.gt()) + case OpARM64GreaterThanU: + return b2i(fc.ugt()) + case OpARM64LessEqual: + return b2i(fc.le()) + case OpARM64LessEqualU: + return b2i(fc.ule()) + case OpARM64GreaterEqual: + return b2i(fc.ge()) + case OpARM64GreaterEqualU: + return b2i(fc.uge()) + } + return 0 +} + +// logRule logs the use of the rule s. This will only be enabled if +// rewrite rules were generated with the -log option, see _gen/rulegen.go. +func logRule(s string) { + if ruleFile == nil { + // Open a log file to write log to. We open in append + // mode because all.bash runs the compiler lots of times, + // and we want the concatenation of all of those logs. + // This means, of course, that users need to rm the old log + // to get fresh data. + // TODO: all.bash runs compilers in parallel. Need to synchronize logging somehow? + w, err := os.OpenFile(filepath.Join(os.Getenv("GOROOT"), "src", "rulelog"), + os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) + if err != nil { + panic(err) + } + ruleFile = w + } + _, err := fmt.Fprintln(ruleFile, s) + if err != nil { + panic(err) + } +} + +var ruleFile io.Writer + +func min(x, y int64) int64 { + if x < y { + return x + } + return y +} +func max(x, y int64) int64 { + if x > y { + return x + } + return y +} + +func isConstZero(v *Value) bool { + switch v.Op { + case OpConstNil: + return true + case OpConst64, OpConst32, OpConst16, OpConst8, OpConstBool, OpConst32F, OpConst64F: + return v.AuxInt == 0 + } + return false +} + +// reciprocalExact64 reports whether 1/c is exactly representable. +func reciprocalExact64(c float64) bool { + b := math.Float64bits(c) + man := b & (1<<52 - 1) + if man != 0 { + return false // not a power of 2, denormal, or NaN + } + exp := b >> 52 & (1<<11 - 1) + // exponent bias is 0x3ff. So taking the reciprocal of a number + // changes the exponent to 0x7fe-exp. + switch exp { + case 0: + return false // ±0 + case 0x7ff: + return false // ±inf + case 0x7fe: + return false // exponent is not representable + default: + return true + } +} + +// reciprocalExact32 reports whether 1/c is exactly representable. +func reciprocalExact32(c float32) bool { + b := math.Float32bits(c) + man := b & (1<<23 - 1) + if man != 0 { + return false // not a power of 2, denormal, or NaN + } + exp := b >> 23 & (1<<8 - 1) + // exponent bias is 0x7f. So taking the reciprocal of a number + // changes the exponent to 0xfe-exp. + switch exp { + case 0: + return false // ±0 + case 0xff: + return false // ±inf + case 0xfe: + return false // exponent is not representable + default: + return true + } +} + +// check if an immediate can be directly encoded into an ARM's instruction. +func isARMImmRot(v uint32) bool { + for i := 0; i < 16; i++ { + if v&^0xff == 0 { + return true + } + v = v<<2 | v>>30 + } + + return false +} + +// overlap reports whether the ranges given by the given offset and +// size pairs overlap. +func overlap(offset1, size1, offset2, size2 int64) bool { + if offset1 >= offset2 && offset2+size2 > offset1 { + return true + } + if offset2 >= offset1 && offset1+size1 > offset2 { + return true + } + return false +} + +func areAdjacentOffsets(off1, off2, size int64) bool { + return off1+size == off2 || off1 == off2+size +} + +// check if value zeroes out upper 32-bit of 64-bit register. +// depth limits recursion depth. In AMD64.rules 3 is used as limit, +// because it catches same amount of cases as 4. +func zeroUpper32Bits(x *Value, depth int) bool { + switch x.Op { + case OpAMD64MOVLconst, OpAMD64MOVLload, OpAMD64MOVLQZX, OpAMD64MOVLloadidx1, + OpAMD64MOVWload, OpAMD64MOVWloadidx1, OpAMD64MOVBload, OpAMD64MOVBloadidx1, + OpAMD64MOVLloadidx4, OpAMD64ADDLload, OpAMD64SUBLload, OpAMD64ANDLload, + OpAMD64ORLload, OpAMD64XORLload, OpAMD64CVTTSD2SL, + OpAMD64ADDL, OpAMD64ADDLconst, OpAMD64SUBL, OpAMD64SUBLconst, + OpAMD64ANDL, OpAMD64ANDLconst, OpAMD64ORL, OpAMD64ORLconst, + OpAMD64XORL, OpAMD64XORLconst, OpAMD64NEGL, OpAMD64NOTL, + OpAMD64SHRL, OpAMD64SHRLconst, OpAMD64SARL, OpAMD64SARLconst, + OpAMD64SHLL, OpAMD64SHLLconst: + return true + case OpARM64REV16W, OpARM64REVW, OpARM64RBITW, OpARM64CLZW, OpARM64EXTRWconst, + OpARM64MULW, OpARM64MNEGW, OpARM64UDIVW, OpARM64DIVW, OpARM64UMODW, + OpARM64MADDW, OpARM64MSUBW, OpARM64RORW, OpARM64RORWconst: + return true + case OpArg: // note: but not ArgIntReg + // amd64 always loads args from the stack unsigned. + // most other architectures load them sign/zero extended based on the type. + return x.Type.Size() == 4 && (x.Type.IsUnsigned() || x.Block.Func.Config.arch == "amd64") + case OpPhi, OpSelect0, OpSelect1: + // Phis can use each-other as an arguments, instead of tracking visited values, + // just limit recursion depth. + if depth <= 0 { + return false + } + for i := range x.Args { + if !zeroUpper32Bits(x.Args[i], depth-1) { + return false + } + } + return true + + } + return false +} + +// zeroUpper48Bits is similar to zeroUpper32Bits, but for upper 48 bits. +func zeroUpper48Bits(x *Value, depth int) bool { + switch x.Op { + case OpAMD64MOVWQZX, OpAMD64MOVWload, OpAMD64MOVWloadidx1, OpAMD64MOVWloadidx2: + return true + case OpArg: // note: but not ArgIntReg + return x.Type.Size() == 2 && (x.Type.IsUnsigned() || x.Block.Func.Config.arch == "amd64") + case OpPhi, OpSelect0, OpSelect1: + // Phis can use each-other as an arguments, instead of tracking visited values, + // just limit recursion depth. + if depth <= 0 { + return false + } + for i := range x.Args { + if !zeroUpper48Bits(x.Args[i], depth-1) { + return false + } + } + return true + + } + return false +} + +// zeroUpper56Bits is similar to zeroUpper32Bits, but for upper 56 bits. +func zeroUpper56Bits(x *Value, depth int) bool { + switch x.Op { + case OpAMD64MOVBQZX, OpAMD64MOVBload, OpAMD64MOVBloadidx1: + return true + case OpArg: // note: but not ArgIntReg + return x.Type.Size() == 1 && (x.Type.IsUnsigned() || x.Block.Func.Config.arch == "amd64") + case OpPhi, OpSelect0, OpSelect1: + // Phis can use each-other as an arguments, instead of tracking visited values, + // just limit recursion depth. + if depth <= 0 { + return false + } + for i := range x.Args { + if !zeroUpper56Bits(x.Args[i], depth-1) { + return false + } + } + return true + + } + return false +} + +func isInlinableMemclr(c *Config, sz int64) bool { + if sz < 0 { + return false + } + // TODO: expand this check to allow other architectures + // see CL 454255 and issue 56997 + switch c.arch { + case "amd64", "arm64": + return true + case "ppc64le", "ppc64": + return sz < 512 + } + return false +} + +// isInlinableMemmove reports whether the given arch performs a Move of the given size +// faster than memmove. It will only return true if replacing the memmove with a Move is +// safe, either because Move will do all of its loads before any of its stores, or +// because the arguments are known to be disjoint. +// This is used as a check for replacing memmove with Move ops. +func isInlinableMemmove(dst, src *Value, sz int64, c *Config) bool { + // It is always safe to convert memmove into Move when its arguments are disjoint. + // Move ops may or may not be faster for large sizes depending on how the platform + // lowers them, so we only perform this optimization on platforms that we know to + // have fast Move ops. + switch c.arch { + case "amd64": + return sz <= 16 || (sz < 1024 && disjoint(dst, sz, src, sz)) + case "386", "arm64": + return sz <= 8 + case "s390x", "ppc64", "ppc64le": + return sz <= 8 || disjoint(dst, sz, src, sz) + case "arm", "loong64", "mips", "mips64", "mipsle", "mips64le": + return sz <= 4 + } + return false +} +func IsInlinableMemmove(dst, src *Value, sz int64, c *Config) bool { + return isInlinableMemmove(dst, src, sz, c) +} + +// logLargeCopy logs the occurrence of a large copy. +// The best place to do this is in the rewrite rules where the size of the move is easy to find. +// "Large" is arbitrarily chosen to be 128 bytes; this may change. +func logLargeCopy(v *Value, s int64) bool { + if s < 128 { + return true + } + if logopt.Enabled() { + logopt.LogOpt(v.Pos, "copy", "lower", v.Block.Func.Name, fmt.Sprintf("%d bytes", s)) + } + return true +} +func LogLargeCopy(funcName string, pos src.XPos, s int64) { + if s < 128 { + return + } + if logopt.Enabled() { + logopt.LogOpt(pos, "copy", "lower", funcName, fmt.Sprintf("%d bytes", s)) + } +} + +// hasSmallRotate reports whether the architecture has rotate instructions +// for sizes < 32-bit. This is used to decide whether to promote some rotations. +func hasSmallRotate(c *Config) bool { + switch c.arch { + case "amd64", "386": + return true + default: + return false + } +} + +func supportsPPC64PCRel() bool { + // PCRel is currently supported for >= power10, linux only + // Internal and external linking supports this on ppc64le; internal linking on ppc64. + return buildcfg.GOPPC64 >= 10 && buildcfg.GOOS == "linux" +} + +func newPPC64ShiftAuxInt(sh, mb, me, sz int64) int32 { + if sh < 0 || sh >= sz { + panic("PPC64 shift arg sh out of range") + } + if mb < 0 || mb >= sz { + panic("PPC64 shift arg mb out of range") + } + if me < 0 || me >= sz { + panic("PPC64 shift arg me out of range") + } + return int32(sh<<16 | mb<<8 | me) +} + +func GetPPC64Shiftsh(auxint int64) int64 { + return int64(int8(auxint >> 16)) +} + +func GetPPC64Shiftmb(auxint int64) int64 { + return int64(int8(auxint >> 8)) +} + +func GetPPC64Shiftme(auxint int64) int64 { + return int64(int8(auxint)) +} + +// Test if this value can encoded as a mask for a rlwinm like +// operation. Masks can also extend from the msb and wrap to +// the lsb too. That is, the valid masks are 32 bit strings +// of the form: 0..01..10..0 or 1..10..01..1 or 1...1 +func isPPC64WordRotateMask(v64 int64) bool { + // Isolate rightmost 1 (if none 0) and add. + v := uint32(v64) + vp := (v & -v) + v + // Likewise, for the wrapping case. + vn := ^v + vpn := (vn & -vn) + vn + return (v&vp == 0 || vn&vpn == 0) && v != 0 +} + +// Compress mask and shift into single value of the form +// me | mb<<8 | rotate<<16 | nbits<<24 where me and mb can +// be used to regenerate the input mask. +func encodePPC64RotateMask(rotate, mask, nbits int64) int64 { + var mb, me, mbn, men int + + // Determine boundaries and then decode them + if mask == 0 || ^mask == 0 || rotate >= nbits { + panic(fmt.Sprintf("invalid PPC64 rotate mask: %x %d %d", uint64(mask), rotate, nbits)) + } else if nbits == 32 { + mb = bits.LeadingZeros32(uint32(mask)) + me = 32 - bits.TrailingZeros32(uint32(mask)) + mbn = bits.LeadingZeros32(^uint32(mask)) + men = 32 - bits.TrailingZeros32(^uint32(mask)) + } else { + mb = bits.LeadingZeros64(uint64(mask)) + me = 64 - bits.TrailingZeros64(uint64(mask)) + mbn = bits.LeadingZeros64(^uint64(mask)) + men = 64 - bits.TrailingZeros64(^uint64(mask)) + } + // Check for a wrapping mask (e.g bits at 0 and 63) + if mb == 0 && me == int(nbits) { + // swap the inverted values + mb, me = men, mbn + } + + return int64(me) | int64(mb<<8) | int64(rotate<<16) | int64(nbits<<24) +} + +// Merge (RLDICL [encoded] (SRDconst [s] x)) into (RLDICL [new_encoded] x) +// SRDconst on PPC64 is an extended mnemonic of RLDICL. If the input to an +// RLDICL is an SRDconst, and the RLDICL does not rotate its value, the two +// operations can be combined. This functions assumes the two opcodes can +// be merged, and returns an encoded rotate+mask value of the combined RLDICL. +func mergePPC64RLDICLandSRDconst(encoded, s int64) int64 { + mb := s + r := 64 - s + // A larger mb is a smaller mask. + if (encoded>>8)&0xFF < mb { + encoded = (encoded &^ 0xFF00) | mb<<8 + } + // The rotate is expected to be 0. + if (encoded & 0xFF0000) != 0 { + panic("non-zero rotate") + } + return encoded | r<<16 +} + +// DecodePPC64RotateMask is the inverse operation of encodePPC64RotateMask. The values returned as +// mb and me satisfy the POWER ISA definition of MASK(x,y) where MASK(mb,me) = mask. +func DecodePPC64RotateMask(sauxint int64) (rotate, mb, me int64, mask uint64) { + auxint := uint64(sauxint) + rotate = int64((auxint >> 16) & 0xFF) + mb = int64((auxint >> 8) & 0xFF) + me = int64((auxint >> 0) & 0xFF) + nbits := int64((auxint >> 24) & 0xFF) + mask = ((1 << uint(nbits-mb)) - 1) ^ ((1 << uint(nbits-me)) - 1) + if mb > me { + mask = ^mask + } + if nbits == 32 { + mask = uint64(uint32(mask)) + } + + // Fixup ME to match ISA definition. The second argument to MASK(..,me) + // is inclusive. + me = (me - 1) & (nbits - 1) + return +} + +// This verifies that the mask is a set of +// consecutive bits including the least +// significant bit. +func isPPC64ValidShiftMask(v int64) bool { + if (v != 0) && ((v+1)&v) == 0 { + return true + } + return false +} + +func getPPC64ShiftMaskLength(v int64) int64 { + return int64(bits.Len64(uint64(v))) +} + +// Decompose a shift right into an equivalent rotate/mask, +// and return mask & m. +func mergePPC64RShiftMask(m, s, nbits int64) int64 { + smask := uint64((1<> uint(s) + return m & int64(smask) +} + +// Combine (ANDconst [m] (SRWconst [s])) into (RLWINM [y]) or return 0 +func mergePPC64AndSrwi(m, s int64) int64 { + mask := mergePPC64RShiftMask(m, s, 32) + if !isPPC64WordRotateMask(mask) { + return 0 + } + return encodePPC64RotateMask((32-s)&31, mask, 32) +} + +// Test if a shift right feeding into a CLRLSLDI can be merged into RLWINM. +// Return the encoded RLWINM constant, or 0 if they cannot be merged. +func mergePPC64ClrlsldiSrw(sld, srw int64) int64 { + mask_1 := uint64(0xFFFFFFFF >> uint(srw)) + // for CLRLSLDI, it's more convenient to think of it as a mask left bits then rotate left. + mask_2 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(GetPPC64Shiftmb(int64(sld))) + + // Rewrite mask to apply after the final left shift. + mask_3 := (mask_1 & mask_2) << uint(GetPPC64Shiftsh(sld)) + + r_1 := 32 - srw + r_2 := GetPPC64Shiftsh(sld) + r_3 := (r_1 + r_2) & 31 // This can wrap. + + if uint64(uint32(mask_3)) != mask_3 || mask_3 == 0 { + return 0 + } + return encodePPC64RotateMask(int64(r_3), int64(mask_3), 32) +} + +// Test if a RLWINM feeding into a CLRLSLDI can be merged into RLWINM. Return +// the encoded RLWINM constant, or 0 if they cannot be merged. +func mergePPC64ClrlsldiRlwinm(sld int32, rlw int64) int64 { + r_1, _, _, mask_1 := DecodePPC64RotateMask(rlw) + // for CLRLSLDI, it's more convenient to think of it as a mask left bits then rotate left. + mask_2 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(GetPPC64Shiftmb(int64(sld))) + + // combine the masks, and adjust for the final left shift. + mask_3 := (mask_1 & mask_2) << uint(GetPPC64Shiftsh(int64(sld))) + r_2 := GetPPC64Shiftsh(int64(sld)) + r_3 := (r_1 + r_2) & 31 // This can wrap. + + // Verify the result is still a valid bitmask of <= 32 bits. + if !isPPC64WordRotateMask(int64(mask_3)) || uint64(uint32(mask_3)) != mask_3 { + return 0 + } + return encodePPC64RotateMask(r_3, int64(mask_3), 32) +} + +// Compute the encoded RLWINM constant from combining (SLDconst [sld] (SRWconst [srw] x)), +// or return 0 if they cannot be combined. +func mergePPC64SldiSrw(sld, srw int64) int64 { + if sld > srw || srw >= 32 { + return 0 + } + mask_r := uint32(0xFFFFFFFF) >> uint(srw) + mask_l := uint32(0xFFFFFFFF) >> uint(sld) + mask := (mask_r & mask_l) << uint(sld) + return encodePPC64RotateMask((32-srw+sld)&31, int64(mask), 32) +} + +// Convert a PPC64 opcode from the Op to OpCC form. This converts (op x y) +// to (Select0 (opCC x y)) without having to explicitly fixup every user +// of op. +// +// E.g consider the case: +// a = (ADD x y) +// b = (CMPconst [0] a) +// c = (OR a z) +// +// A rule like (CMPconst [0] (ADD x y)) => (CMPconst [0] (Select0 (ADDCC x y))) +// would produce: +// a = (ADD x y) +// a' = (ADDCC x y) +// a” = (Select0 a') +// b = (CMPconst [0] a”) +// c = (OR a z) +// +// which makes it impossible to rewrite the second user. Instead the result +// of this conversion is: +// a' = (ADDCC x y) +// a = (Select0 a') +// b = (CMPconst [0] a) +// c = (OR a z) +// +// Which makes it trivial to rewrite b using a lowering rule. +func convertPPC64OpToOpCC(op *Value) *Value { + ccOpMap := map[Op]Op{ + OpPPC64ADD: OpPPC64ADDCC, + OpPPC64ADDconst: OpPPC64ADDCCconst, + OpPPC64AND: OpPPC64ANDCC, + OpPPC64ANDN: OpPPC64ANDNCC, + OpPPC64CNTLZD: OpPPC64CNTLZDCC, + OpPPC64OR: OpPPC64ORCC, + OpPPC64SUB: OpPPC64SUBCC, + OpPPC64NEG: OpPPC64NEGCC, + OpPPC64NOR: OpPPC64NORCC, + OpPPC64XOR: OpPPC64XORCC, + } + b := op.Block + opCC := b.NewValue0I(op.Pos, ccOpMap[op.Op], types.NewTuple(op.Type, types.TypeFlags), op.AuxInt) + opCC.AddArgs(op.Args...) + op.reset(OpSelect0) + op.AddArgs(opCC) + return op +} + +// Convenience function to rotate a 32 bit constant value by another constant. +func rotateLeft32(v, rotate int64) int64 { + return int64(bits.RotateLeft32(uint32(v), int(rotate))) +} + +func rotateRight64(v, rotate int64) int64 { + return int64(bits.RotateLeft64(uint64(v), int(-rotate))) +} + +// encodes the lsb and width for arm(64) bitfield ops into the expected auxInt format. +func armBFAuxInt(lsb, width int64) arm64BitField { + if lsb < 0 || lsb > 63 { + panic("ARM(64) bit field lsb constant out of range") + } + if width < 1 || lsb+width > 64 { + panic("ARM(64) bit field width constant out of range") + } + return arm64BitField(width | lsb<<8) +} + +// returns the lsb part of the auxInt field of arm64 bitfield ops. +func (bfc arm64BitField) getARM64BFlsb() int64 { + return int64(uint64(bfc) >> 8) +} + +// returns the width part of the auxInt field of arm64 bitfield ops. +func (bfc arm64BitField) getARM64BFwidth() int64 { + return int64(bfc) & 0xff +} + +// checks if mask >> rshift applied at lsb is a valid arm64 bitfield op mask. +func isARM64BFMask(lsb, mask, rshift int64) bool { + shiftedMask := int64(uint64(mask) >> uint64(rshift)) + return shiftedMask != 0 && isPowerOfTwo64(shiftedMask+1) && nto(shiftedMask)+lsb < 64 +} + +// returns the bitfield width of mask >> rshift for arm64 bitfield ops. +func arm64BFWidth(mask, rshift int64) int64 { + shiftedMask := int64(uint64(mask) >> uint64(rshift)) + if shiftedMask == 0 { + panic("ARM64 BF mask is zero") + } + return nto(shiftedMask) +} + +// sizeof returns the size of t in bytes. +// It will panic if t is not a *types.Type. +func sizeof(t interface{}) int64 { + return t.(*types.Type).Size() +} + +// registerizable reports whether t is a primitive type that fits in +// a register. It assumes float64 values will always fit into registers +// even if that isn't strictly true. +func registerizable(b *Block, typ *types.Type) bool { + if typ.IsPtrShaped() || typ.IsFloat() || typ.IsBoolean() { + return true + } + if typ.IsInteger() { + return typ.Size() <= b.Func.Config.RegSize + } + return false +} + +// needRaceCleanup reports whether this call to racefuncenter/exit isn't needed. +func needRaceCleanup(sym *AuxCall, v *Value) bool { + f := v.Block.Func + if !f.Config.Race { + return false + } + if !isSameCall(sym, "runtime.racefuncenter") && !isSameCall(sym, "runtime.racefuncexit") { + return false + } + for _, b := range f.Blocks { + for _, v := range b.Values { + switch v.Op { + case OpStaticCall, OpStaticLECall: + // Check for racefuncenter will encounter racefuncexit and vice versa. + // Allow calls to panic* + s := v.Aux.(*AuxCall).Fn.String() + switch s { + case "runtime.racefuncenter", "runtime.racefuncexit", + "runtime.panicdivide", "runtime.panicwrap", + "runtime.panicshift": + continue + } + // If we encountered any call, we need to keep racefunc*, + // for accurate stacktraces. + return false + case OpPanicBounds, OpPanicExtend: + // Note: these are panic generators that are ok (like the static calls above). + case OpClosureCall, OpInterCall, OpClosureLECall, OpInterLECall: + // We must keep the race functions if there are any other call types. + return false + } + } + } + if isSameCall(sym, "runtime.racefuncenter") { + // TODO REGISTER ABI this needs to be cleaned up. + // If we're removing racefuncenter, remove its argument as well. + if v.Args[0].Op != OpStore { + if v.Op == OpStaticLECall { + // there is no store, yet. + return true + } + return false + } + mem := v.Args[0].Args[2] + v.Args[0].reset(OpCopy) + v.Args[0].AddArg(mem) + } + return true +} + +// symIsRO reports whether sym is a read-only global. +func symIsRO(sym interface{}) bool { + lsym := sym.(*obj.LSym) + return lsym.Type == objabi.SRODATA && len(lsym.R) == 0 +} + +// symIsROZero reports whether sym is a read-only global whose data contains all zeros. +func symIsROZero(sym Sym) bool { + lsym := sym.(*obj.LSym) + if lsym.Type != objabi.SRODATA || len(lsym.R) != 0 { + return false + } + for _, b := range lsym.P { + if b != 0 { + return false + } + } + return true +} + +// isFixed32 returns true if the int32 at offset off in symbol sym +// is known and constant. +func isFixed32(c *Config, sym Sym, off int64) bool { + return isFixed(c, sym, off, 4) +} + +// isFixed returns true if the range [off,off+size] of the symbol sym +// is known and constant. +func isFixed(c *Config, sym Sym, off, size int64) bool { + lsym := sym.(*obj.LSym) + if lsym.Extra == nil { + return false + } + if _, ok := (*lsym.Extra).(*obj.TypeInfo); ok { + if off == 2*c.PtrSize && size == 4 { + return true // type hash field + } + } + return false +} +func fixed32(c *Config, sym Sym, off int64) int32 { + lsym := sym.(*obj.LSym) + if ti, ok := (*lsym.Extra).(*obj.TypeInfo); ok { + if off == 2*c.PtrSize { + return int32(types.TypeHash(ti.Type.(*types.Type))) + } + } + base.Fatalf("fixed32 data not known for %s:%d", sym, off) + return 0 +} + +// isFixedSym returns true if the contents of sym at the given offset +// is known and is the constant address of another symbol. +func isFixedSym(sym Sym, off int64) bool { + lsym := sym.(*obj.LSym) + switch { + case lsym.Type == objabi.SRODATA: + // itabs, dictionaries + default: + return false + } + for _, r := range lsym.R { + if (r.Type == objabi.R_ADDR || r.Type == objabi.R_WEAKADDR) && int64(r.Off) == off && r.Add == 0 { + return true + } + } + return false +} +func fixedSym(f *Func, sym Sym, off int64) Sym { + lsym := sym.(*obj.LSym) + for _, r := range lsym.R { + if (r.Type == objabi.R_ADDR || r.Type == objabi.R_WEAKADDR) && int64(r.Off) == off { + if strings.HasPrefix(r.Sym.Name, "type:") { + // In case we're loading a type out of a dictionary, we need to record + // that the containing function might put that type in an interface. + // That information is currently recorded in relocations in the dictionary, + // but if we perform this load at compile time then the dictionary + // might be dead. + reflectdata.MarkTypeSymUsedInInterface(r.Sym, f.fe.Func().Linksym()) + } else if strings.HasPrefix(r.Sym.Name, "go:itab") { + // Same, but if we're using an itab we need to record that the + // itab._type might be put in an interface. + reflectdata.MarkTypeSymUsedInInterface(r.Sym, f.fe.Func().Linksym()) + } + return r.Sym + } + } + base.Fatalf("fixedSym data not known for %s:%d", sym, off) + return nil +} + +// read8 reads one byte from the read-only global sym at offset off. +func read8(sym interface{}, off int64) uint8 { + lsym := sym.(*obj.LSym) + if off >= int64(len(lsym.P)) || off < 0 { + // Invalid index into the global sym. + // This can happen in dead code, so we don't want to panic. + // Just return any value, it will eventually get ignored. + // See issue 29215. + return 0 + } + return lsym.P[off] +} + +// read16 reads two bytes from the read-only global sym at offset off. +func read16(sym interface{}, off int64, byteorder binary.ByteOrder) uint16 { + lsym := sym.(*obj.LSym) + // lsym.P is written lazily. + // Bytes requested after the end of lsym.P are 0. + var src []byte + if 0 <= off && off < int64(len(lsym.P)) { + src = lsym.P[off:] + } + buf := make([]byte, 2) + copy(buf, src) + return byteorder.Uint16(buf) +} + +// read32 reads four bytes from the read-only global sym at offset off. +func read32(sym interface{}, off int64, byteorder binary.ByteOrder) uint32 { + lsym := sym.(*obj.LSym) + var src []byte + if 0 <= off && off < int64(len(lsym.P)) { + src = lsym.P[off:] + } + buf := make([]byte, 4) + copy(buf, src) + return byteorder.Uint32(buf) +} + +// read64 reads eight bytes from the read-only global sym at offset off. +func read64(sym interface{}, off int64, byteorder binary.ByteOrder) uint64 { + lsym := sym.(*obj.LSym) + var src []byte + if 0 <= off && off < int64(len(lsym.P)) { + src = lsym.P[off:] + } + buf := make([]byte, 8) + copy(buf, src) + return byteorder.Uint64(buf) +} + +// sequentialAddresses reports true if it can prove that x + n == y +func sequentialAddresses(x, y *Value, n int64) bool { + if x == y && n == 0 { + return true + } + if x.Op == Op386ADDL && y.Op == Op386LEAL1 && y.AuxInt == n && y.Aux == nil && + (x.Args[0] == y.Args[0] && x.Args[1] == y.Args[1] || + x.Args[0] == y.Args[1] && x.Args[1] == y.Args[0]) { + return true + } + if x.Op == Op386LEAL1 && y.Op == Op386LEAL1 && y.AuxInt == x.AuxInt+n && x.Aux == y.Aux && + (x.Args[0] == y.Args[0] && x.Args[1] == y.Args[1] || + x.Args[0] == y.Args[1] && x.Args[1] == y.Args[0]) { + return true + } + if x.Op == OpAMD64ADDQ && y.Op == OpAMD64LEAQ1 && y.AuxInt == n && y.Aux == nil && + (x.Args[0] == y.Args[0] && x.Args[1] == y.Args[1] || + x.Args[0] == y.Args[1] && x.Args[1] == y.Args[0]) { + return true + } + if x.Op == OpAMD64LEAQ1 && y.Op == OpAMD64LEAQ1 && y.AuxInt == x.AuxInt+n && x.Aux == y.Aux && + (x.Args[0] == y.Args[0] && x.Args[1] == y.Args[1] || + x.Args[0] == y.Args[1] && x.Args[1] == y.Args[0]) { + return true + } + return false +} + +// flagConstant represents the result of a compile-time comparison. +// The sense of these flags does not necessarily represent the hardware's notion +// of a flags register - these are just a compile-time construct. +// We happen to match the semantics to those of arm/arm64. +// Note that these semantics differ from x86: the carry flag has the opposite +// sense on a subtraction! +// +// On amd64, C=1 represents a borrow, e.g. SBB on amd64 does x - y - C. +// On arm64, C=0 represents a borrow, e.g. SBC on arm64 does x - y - ^C. +// (because it does x + ^y + C). +// +// See https://en.wikipedia.org/wiki/Carry_flag#Vs._borrow_flag +type flagConstant uint8 + +// N reports whether the result of an operation is negative (high bit set). +func (fc flagConstant) N() bool { + return fc&1 != 0 +} + +// Z reports whether the result of an operation is 0. +func (fc flagConstant) Z() bool { + return fc&2 != 0 +} + +// C reports whether an unsigned add overflowed (carry), or an +// unsigned subtract did not underflow (borrow). +func (fc flagConstant) C() bool { + return fc&4 != 0 +} + +// V reports whether a signed operation overflowed or underflowed. +func (fc flagConstant) V() bool { + return fc&8 != 0 +} + +func (fc flagConstant) eq() bool { + return fc.Z() +} +func (fc flagConstant) ne() bool { + return !fc.Z() +} +func (fc flagConstant) lt() bool { + return fc.N() != fc.V() +} +func (fc flagConstant) le() bool { + return fc.Z() || fc.lt() +} +func (fc flagConstant) gt() bool { + return !fc.Z() && fc.ge() +} +func (fc flagConstant) ge() bool { + return fc.N() == fc.V() +} +func (fc flagConstant) ult() bool { + return !fc.C() +} +func (fc flagConstant) ule() bool { + return fc.Z() || fc.ult() +} +func (fc flagConstant) ugt() bool { + return !fc.Z() && fc.uge() +} +func (fc flagConstant) uge() bool { + return fc.C() +} + +func (fc flagConstant) ltNoov() bool { + return fc.lt() && !fc.V() +} +func (fc flagConstant) leNoov() bool { + return fc.le() && !fc.V() +} +func (fc flagConstant) gtNoov() bool { + return fc.gt() && !fc.V() +} +func (fc flagConstant) geNoov() bool { + return fc.ge() && !fc.V() +} + +func (fc flagConstant) String() string { + return fmt.Sprintf("N=%v,Z=%v,C=%v,V=%v", fc.N(), fc.Z(), fc.C(), fc.V()) +} + +type flagConstantBuilder struct { + N bool + Z bool + C bool + V bool +} + +func (fcs flagConstantBuilder) encode() flagConstant { + var fc flagConstant + if fcs.N { + fc |= 1 + } + if fcs.Z { + fc |= 2 + } + if fcs.C { + fc |= 4 + } + if fcs.V { + fc |= 8 + } + return fc +} + +// Note: addFlags(x,y) != subFlags(x,-y) in some situations: +// - the results of the C flag are different +// - the results of the V flag when y==minint are different + +// addFlags64 returns the flags that would be set from computing x+y. +func addFlags64(x, y int64) flagConstant { + var fcb flagConstantBuilder + fcb.Z = x+y == 0 + fcb.N = x+y < 0 + fcb.C = uint64(x+y) < uint64(x) + fcb.V = x >= 0 && y >= 0 && x+y < 0 || x < 0 && y < 0 && x+y >= 0 + return fcb.encode() +} + +// subFlags64 returns the flags that would be set from computing x-y. +func subFlags64(x, y int64) flagConstant { + var fcb flagConstantBuilder + fcb.Z = x-y == 0 + fcb.N = x-y < 0 + fcb.C = uint64(y) <= uint64(x) // This code follows the arm carry flag model. + fcb.V = x >= 0 && y < 0 && x-y < 0 || x < 0 && y >= 0 && x-y >= 0 + return fcb.encode() +} + +// addFlags32 returns the flags that would be set from computing x+y. +func addFlags32(x, y int32) flagConstant { + var fcb flagConstantBuilder + fcb.Z = x+y == 0 + fcb.N = x+y < 0 + fcb.C = uint32(x+y) < uint32(x) + fcb.V = x >= 0 && y >= 0 && x+y < 0 || x < 0 && y < 0 && x+y >= 0 + return fcb.encode() +} + +// subFlags32 returns the flags that would be set from computing x-y. +func subFlags32(x, y int32) flagConstant { + var fcb flagConstantBuilder + fcb.Z = x-y == 0 + fcb.N = x-y < 0 + fcb.C = uint32(y) <= uint32(x) // This code follows the arm carry flag model. + fcb.V = x >= 0 && y < 0 && x-y < 0 || x < 0 && y >= 0 && x-y >= 0 + return fcb.encode() +} + +// logicFlags64 returns flags set to the sign/zeroness of x. +// C and V are set to false. +func logicFlags64(x int64) flagConstant { + var fcb flagConstantBuilder + fcb.Z = x == 0 + fcb.N = x < 0 + return fcb.encode() +} + +// logicFlags32 returns flags set to the sign/zeroness of x. +// C and V are set to false. +func logicFlags32(x int32) flagConstant { + var fcb flagConstantBuilder + fcb.Z = x == 0 + fcb.N = x < 0 + return fcb.encode() +} + +func makeJumpTableSym(b *Block) *obj.LSym { + s := base.Ctxt.Lookup(fmt.Sprintf("%s.jump%d", b.Func.fe.Func().LSym.Name, b.ID)) + // The jump table symbol is accessed only from the function symbol. + s.Set(obj.AttrStatic, true) + return s +} + +// canRotate reports whether the architecture supports +// rotates of integer registers with the given number of bits. +func canRotate(c *Config, bits int64) bool { + if bits > c.PtrSize*8 { + // Don't rewrite to rotates bigger than the machine word. + return false + } + switch c.arch { + case "386", "amd64", "arm64": + return true + case "arm", "s390x", "ppc64", "ppc64le", "wasm", "loong64": + return bits >= 32 + default: + return false + } +} + +// isARM64bitcon reports whether a constant can be encoded into a logical instruction. +func isARM64bitcon(x uint64) bool { + if x == 1<<64-1 || x == 0 { + return false + } + // determine the period and sign-extend a unit to 64 bits + switch { + case x != x>>32|x<<32: + // period is 64 + // nothing to do + case x != x>>16|x<<48: + // period is 32 + x = uint64(int64(int32(x))) + case x != x>>8|x<<56: + // period is 16 + x = uint64(int64(int16(x))) + case x != x>>4|x<<60: + // period is 8 + x = uint64(int64(int8(x))) + default: + // period is 4 or 2, always true + // 0001, 0010, 0100, 1000 -- 0001 rotate + // 0011, 0110, 1100, 1001 -- 0011 rotate + // 0111, 1011, 1101, 1110 -- 0111 rotate + // 0101, 1010 -- 01 rotate, repeat + return true + } + return sequenceOfOnes(x) || sequenceOfOnes(^x) +} + +// sequenceOfOnes tests whether a constant is a sequence of ones in binary, with leading and trailing zeros. +func sequenceOfOnes(x uint64) bool { + y := x & -x // lowest set bit of x. x is good iff x+y is a power of 2 + y += x + return (y-1)&y == 0 +} + +// isARM64addcon reports whether x can be encoded as the immediate value in an ADD or SUB instruction. +func isARM64addcon(v int64) bool { + /* uimm12 or uimm24? */ + if v < 0 { + return false + } + if (v & 0xFFF) == 0 { + v >>= 12 + } + return v <= 0xFFF +} + +// setPos sets the position of v to pos, then returns true. +// Useful for setting the result of a rewrite's position to +// something other than the default. +func setPos(v *Value, pos src.XPos) bool { + v.Pos = pos + return true +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewrite386.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewrite386.go new file mode 100644 index 0000000000000000000000000000000000000000..b0512676c9e8c5e7f434ba57a2707a3ad53ddf09 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewrite386.go @@ -0,0 +1,11602 @@ +// Code generated from _gen/386.rules using 'go generate'; DO NOT EDIT. + +package ssa + +import "math" +import "cmd/compile/internal/types" + +func rewriteValue386(v *Value) bool { + switch v.Op { + case Op386ADCL: + return rewriteValue386_Op386ADCL(v) + case Op386ADDL: + return rewriteValue386_Op386ADDL(v) + case Op386ADDLcarry: + return rewriteValue386_Op386ADDLcarry(v) + case Op386ADDLconst: + return rewriteValue386_Op386ADDLconst(v) + case Op386ADDLconstmodify: + return rewriteValue386_Op386ADDLconstmodify(v) + case Op386ADDLload: + return rewriteValue386_Op386ADDLload(v) + case Op386ADDLmodify: + return rewriteValue386_Op386ADDLmodify(v) + case Op386ADDSD: + return rewriteValue386_Op386ADDSD(v) + case Op386ADDSDload: + return rewriteValue386_Op386ADDSDload(v) + case Op386ADDSS: + return rewriteValue386_Op386ADDSS(v) + case Op386ADDSSload: + return rewriteValue386_Op386ADDSSload(v) + case Op386ANDL: + return rewriteValue386_Op386ANDL(v) + case Op386ANDLconst: + return rewriteValue386_Op386ANDLconst(v) + case Op386ANDLconstmodify: + return rewriteValue386_Op386ANDLconstmodify(v) + case Op386ANDLload: + return rewriteValue386_Op386ANDLload(v) + case Op386ANDLmodify: + return rewriteValue386_Op386ANDLmodify(v) + case Op386CMPB: + return rewriteValue386_Op386CMPB(v) + case Op386CMPBconst: + return rewriteValue386_Op386CMPBconst(v) + case Op386CMPBload: + return rewriteValue386_Op386CMPBload(v) + case Op386CMPL: + return rewriteValue386_Op386CMPL(v) + case Op386CMPLconst: + return rewriteValue386_Op386CMPLconst(v) + case Op386CMPLload: + return rewriteValue386_Op386CMPLload(v) + case Op386CMPW: + return rewriteValue386_Op386CMPW(v) + case Op386CMPWconst: + return rewriteValue386_Op386CMPWconst(v) + case Op386CMPWload: + return rewriteValue386_Op386CMPWload(v) + case Op386DIVSD: + return rewriteValue386_Op386DIVSD(v) + case Op386DIVSDload: + return rewriteValue386_Op386DIVSDload(v) + case Op386DIVSS: + return rewriteValue386_Op386DIVSS(v) + case Op386DIVSSload: + return rewriteValue386_Op386DIVSSload(v) + case Op386LEAL: + return rewriteValue386_Op386LEAL(v) + case Op386LEAL1: + return rewriteValue386_Op386LEAL1(v) + case Op386LEAL2: + return rewriteValue386_Op386LEAL2(v) + case Op386LEAL4: + return rewriteValue386_Op386LEAL4(v) + case Op386LEAL8: + return rewriteValue386_Op386LEAL8(v) + case Op386MOVBLSX: + return rewriteValue386_Op386MOVBLSX(v) + case Op386MOVBLSXload: + return rewriteValue386_Op386MOVBLSXload(v) + case Op386MOVBLZX: + return rewriteValue386_Op386MOVBLZX(v) + case Op386MOVBload: + return rewriteValue386_Op386MOVBload(v) + case Op386MOVBstore: + return rewriteValue386_Op386MOVBstore(v) + case Op386MOVBstoreconst: + return rewriteValue386_Op386MOVBstoreconst(v) + case Op386MOVLload: + return rewriteValue386_Op386MOVLload(v) + case Op386MOVLstore: + return rewriteValue386_Op386MOVLstore(v) + case Op386MOVLstoreconst: + return rewriteValue386_Op386MOVLstoreconst(v) + case Op386MOVSDconst: + return rewriteValue386_Op386MOVSDconst(v) + case Op386MOVSDload: + return rewriteValue386_Op386MOVSDload(v) + case Op386MOVSDstore: + return rewriteValue386_Op386MOVSDstore(v) + case Op386MOVSSconst: + return rewriteValue386_Op386MOVSSconst(v) + case Op386MOVSSload: + return rewriteValue386_Op386MOVSSload(v) + case Op386MOVSSstore: + return rewriteValue386_Op386MOVSSstore(v) + case Op386MOVWLSX: + return rewriteValue386_Op386MOVWLSX(v) + case Op386MOVWLSXload: + return rewriteValue386_Op386MOVWLSXload(v) + case Op386MOVWLZX: + return rewriteValue386_Op386MOVWLZX(v) + case Op386MOVWload: + return rewriteValue386_Op386MOVWload(v) + case Op386MOVWstore: + return rewriteValue386_Op386MOVWstore(v) + case Op386MOVWstoreconst: + return rewriteValue386_Op386MOVWstoreconst(v) + case Op386MULL: + return rewriteValue386_Op386MULL(v) + case Op386MULLconst: + return rewriteValue386_Op386MULLconst(v) + case Op386MULLload: + return rewriteValue386_Op386MULLload(v) + case Op386MULSD: + return rewriteValue386_Op386MULSD(v) + case Op386MULSDload: + return rewriteValue386_Op386MULSDload(v) + case Op386MULSS: + return rewriteValue386_Op386MULSS(v) + case Op386MULSSload: + return rewriteValue386_Op386MULSSload(v) + case Op386NEGL: + return rewriteValue386_Op386NEGL(v) + case Op386NOTL: + return rewriteValue386_Op386NOTL(v) + case Op386ORL: + return rewriteValue386_Op386ORL(v) + case Op386ORLconst: + return rewriteValue386_Op386ORLconst(v) + case Op386ORLconstmodify: + return rewriteValue386_Op386ORLconstmodify(v) + case Op386ORLload: + return rewriteValue386_Op386ORLload(v) + case Op386ORLmodify: + return rewriteValue386_Op386ORLmodify(v) + case Op386ROLB: + return rewriteValue386_Op386ROLB(v) + case Op386ROLBconst: + return rewriteValue386_Op386ROLBconst(v) + case Op386ROLL: + return rewriteValue386_Op386ROLL(v) + case Op386ROLLconst: + return rewriteValue386_Op386ROLLconst(v) + case Op386ROLW: + return rewriteValue386_Op386ROLW(v) + case Op386ROLWconst: + return rewriteValue386_Op386ROLWconst(v) + case Op386SARB: + return rewriteValue386_Op386SARB(v) + case Op386SARBconst: + return rewriteValue386_Op386SARBconst(v) + case Op386SARL: + return rewriteValue386_Op386SARL(v) + case Op386SARLconst: + return rewriteValue386_Op386SARLconst(v) + case Op386SARW: + return rewriteValue386_Op386SARW(v) + case Op386SARWconst: + return rewriteValue386_Op386SARWconst(v) + case Op386SBBL: + return rewriteValue386_Op386SBBL(v) + case Op386SBBLcarrymask: + return rewriteValue386_Op386SBBLcarrymask(v) + case Op386SETA: + return rewriteValue386_Op386SETA(v) + case Op386SETAE: + return rewriteValue386_Op386SETAE(v) + case Op386SETB: + return rewriteValue386_Op386SETB(v) + case Op386SETBE: + return rewriteValue386_Op386SETBE(v) + case Op386SETEQ: + return rewriteValue386_Op386SETEQ(v) + case Op386SETG: + return rewriteValue386_Op386SETG(v) + case Op386SETGE: + return rewriteValue386_Op386SETGE(v) + case Op386SETL: + return rewriteValue386_Op386SETL(v) + case Op386SETLE: + return rewriteValue386_Op386SETLE(v) + case Op386SETNE: + return rewriteValue386_Op386SETNE(v) + case Op386SHLL: + return rewriteValue386_Op386SHLL(v) + case Op386SHLLconst: + return rewriteValue386_Op386SHLLconst(v) + case Op386SHRB: + return rewriteValue386_Op386SHRB(v) + case Op386SHRBconst: + return rewriteValue386_Op386SHRBconst(v) + case Op386SHRL: + return rewriteValue386_Op386SHRL(v) + case Op386SHRLconst: + return rewriteValue386_Op386SHRLconst(v) + case Op386SHRW: + return rewriteValue386_Op386SHRW(v) + case Op386SHRWconst: + return rewriteValue386_Op386SHRWconst(v) + case Op386SUBL: + return rewriteValue386_Op386SUBL(v) + case Op386SUBLcarry: + return rewriteValue386_Op386SUBLcarry(v) + case Op386SUBLconst: + return rewriteValue386_Op386SUBLconst(v) + case Op386SUBLload: + return rewriteValue386_Op386SUBLload(v) + case Op386SUBLmodify: + return rewriteValue386_Op386SUBLmodify(v) + case Op386SUBSD: + return rewriteValue386_Op386SUBSD(v) + case Op386SUBSDload: + return rewriteValue386_Op386SUBSDload(v) + case Op386SUBSS: + return rewriteValue386_Op386SUBSS(v) + case Op386SUBSSload: + return rewriteValue386_Op386SUBSSload(v) + case Op386XORL: + return rewriteValue386_Op386XORL(v) + case Op386XORLconst: + return rewriteValue386_Op386XORLconst(v) + case Op386XORLconstmodify: + return rewriteValue386_Op386XORLconstmodify(v) + case Op386XORLload: + return rewriteValue386_Op386XORLload(v) + case Op386XORLmodify: + return rewriteValue386_Op386XORLmodify(v) + case OpAdd16: + v.Op = Op386ADDL + return true + case OpAdd32: + v.Op = Op386ADDL + return true + case OpAdd32F: + v.Op = Op386ADDSS + return true + case OpAdd32carry: + v.Op = Op386ADDLcarry + return true + case OpAdd32withcarry: + v.Op = Op386ADCL + return true + case OpAdd64F: + v.Op = Op386ADDSD + return true + case OpAdd8: + v.Op = Op386ADDL + return true + case OpAddPtr: + v.Op = Op386ADDL + return true + case OpAddr: + return rewriteValue386_OpAddr(v) + case OpAnd16: + v.Op = Op386ANDL + return true + case OpAnd32: + v.Op = Op386ANDL + return true + case OpAnd8: + v.Op = Op386ANDL + return true + case OpAndB: + v.Op = Op386ANDL + return true + case OpAvg32u: + v.Op = Op386AVGLU + return true + case OpBswap16: + return rewriteValue386_OpBswap16(v) + case OpBswap32: + v.Op = Op386BSWAPL + return true + case OpClosureCall: + v.Op = Op386CALLclosure + return true + case OpCom16: + v.Op = Op386NOTL + return true + case OpCom32: + v.Op = Op386NOTL + return true + case OpCom8: + v.Op = Op386NOTL + return true + case OpConst16: + return rewriteValue386_OpConst16(v) + case OpConst32: + v.Op = Op386MOVLconst + return true + case OpConst32F: + v.Op = Op386MOVSSconst + return true + case OpConst64F: + v.Op = Op386MOVSDconst + return true + case OpConst8: + return rewriteValue386_OpConst8(v) + case OpConstBool: + return rewriteValue386_OpConstBool(v) + case OpConstNil: + return rewriteValue386_OpConstNil(v) + case OpCtz16: + return rewriteValue386_OpCtz16(v) + case OpCtz16NonZero: + v.Op = Op386BSFL + return true + case OpCtz32: + v.Op = Op386LoweredCtz32 + return true + case OpCtz32NonZero: + v.Op = Op386BSFL + return true + case OpCtz8: + return rewriteValue386_OpCtz8(v) + case OpCtz8NonZero: + v.Op = Op386BSFL + return true + case OpCvt32Fto32: + v.Op = Op386CVTTSS2SL + return true + case OpCvt32Fto64F: + v.Op = Op386CVTSS2SD + return true + case OpCvt32to32F: + v.Op = Op386CVTSL2SS + return true + case OpCvt32to64F: + v.Op = Op386CVTSL2SD + return true + case OpCvt64Fto32: + v.Op = Op386CVTTSD2SL + return true + case OpCvt64Fto32F: + v.Op = Op386CVTSD2SS + return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true + case OpDiv16: + v.Op = Op386DIVW + return true + case OpDiv16u: + v.Op = Op386DIVWU + return true + case OpDiv32: + v.Op = Op386DIVL + return true + case OpDiv32F: + v.Op = Op386DIVSS + return true + case OpDiv32u: + v.Op = Op386DIVLU + return true + case OpDiv64F: + v.Op = Op386DIVSD + return true + case OpDiv8: + return rewriteValue386_OpDiv8(v) + case OpDiv8u: + return rewriteValue386_OpDiv8u(v) + case OpEq16: + return rewriteValue386_OpEq16(v) + case OpEq32: + return rewriteValue386_OpEq32(v) + case OpEq32F: + return rewriteValue386_OpEq32F(v) + case OpEq64F: + return rewriteValue386_OpEq64F(v) + case OpEq8: + return rewriteValue386_OpEq8(v) + case OpEqB: + return rewriteValue386_OpEqB(v) + case OpEqPtr: + return rewriteValue386_OpEqPtr(v) + case OpGetCallerPC: + v.Op = Op386LoweredGetCallerPC + return true + case OpGetCallerSP: + v.Op = Op386LoweredGetCallerSP + return true + case OpGetClosurePtr: + v.Op = Op386LoweredGetClosurePtr + return true + case OpGetG: + v.Op = Op386LoweredGetG + return true + case OpHmul32: + v.Op = Op386HMULL + return true + case OpHmul32u: + v.Op = Op386HMULLU + return true + case OpInterCall: + v.Op = Op386CALLinter + return true + case OpIsInBounds: + return rewriteValue386_OpIsInBounds(v) + case OpIsNonNil: + return rewriteValue386_OpIsNonNil(v) + case OpIsSliceInBounds: + return rewriteValue386_OpIsSliceInBounds(v) + case OpLeq16: + return rewriteValue386_OpLeq16(v) + case OpLeq16U: + return rewriteValue386_OpLeq16U(v) + case OpLeq32: + return rewriteValue386_OpLeq32(v) + case OpLeq32F: + return rewriteValue386_OpLeq32F(v) + case OpLeq32U: + return rewriteValue386_OpLeq32U(v) + case OpLeq64F: + return rewriteValue386_OpLeq64F(v) + case OpLeq8: + return rewriteValue386_OpLeq8(v) + case OpLeq8U: + return rewriteValue386_OpLeq8U(v) + case OpLess16: + return rewriteValue386_OpLess16(v) + case OpLess16U: + return rewriteValue386_OpLess16U(v) + case OpLess32: + return rewriteValue386_OpLess32(v) + case OpLess32F: + return rewriteValue386_OpLess32F(v) + case OpLess32U: + return rewriteValue386_OpLess32U(v) + case OpLess64F: + return rewriteValue386_OpLess64F(v) + case OpLess8: + return rewriteValue386_OpLess8(v) + case OpLess8U: + return rewriteValue386_OpLess8U(v) + case OpLoad: + return rewriteValue386_OpLoad(v) + case OpLocalAddr: + return rewriteValue386_OpLocalAddr(v) + case OpLsh16x16: + return rewriteValue386_OpLsh16x16(v) + case OpLsh16x32: + return rewriteValue386_OpLsh16x32(v) + case OpLsh16x64: + return rewriteValue386_OpLsh16x64(v) + case OpLsh16x8: + return rewriteValue386_OpLsh16x8(v) + case OpLsh32x16: + return rewriteValue386_OpLsh32x16(v) + case OpLsh32x32: + return rewriteValue386_OpLsh32x32(v) + case OpLsh32x64: + return rewriteValue386_OpLsh32x64(v) + case OpLsh32x8: + return rewriteValue386_OpLsh32x8(v) + case OpLsh8x16: + return rewriteValue386_OpLsh8x16(v) + case OpLsh8x32: + return rewriteValue386_OpLsh8x32(v) + case OpLsh8x64: + return rewriteValue386_OpLsh8x64(v) + case OpLsh8x8: + return rewriteValue386_OpLsh8x8(v) + case OpMod16: + v.Op = Op386MODW + return true + case OpMod16u: + v.Op = Op386MODWU + return true + case OpMod32: + v.Op = Op386MODL + return true + case OpMod32u: + v.Op = Op386MODLU + return true + case OpMod8: + return rewriteValue386_OpMod8(v) + case OpMod8u: + return rewriteValue386_OpMod8u(v) + case OpMove: + return rewriteValue386_OpMove(v) + case OpMul16: + v.Op = Op386MULL + return true + case OpMul32: + v.Op = Op386MULL + return true + case OpMul32F: + v.Op = Op386MULSS + return true + case OpMul32uhilo: + v.Op = Op386MULLQU + return true + case OpMul64F: + v.Op = Op386MULSD + return true + case OpMul8: + v.Op = Op386MULL + return true + case OpNeg16: + v.Op = Op386NEGL + return true + case OpNeg32: + v.Op = Op386NEGL + return true + case OpNeg32F: + return rewriteValue386_OpNeg32F(v) + case OpNeg64F: + return rewriteValue386_OpNeg64F(v) + case OpNeg8: + v.Op = Op386NEGL + return true + case OpNeq16: + return rewriteValue386_OpNeq16(v) + case OpNeq32: + return rewriteValue386_OpNeq32(v) + case OpNeq32F: + return rewriteValue386_OpNeq32F(v) + case OpNeq64F: + return rewriteValue386_OpNeq64F(v) + case OpNeq8: + return rewriteValue386_OpNeq8(v) + case OpNeqB: + return rewriteValue386_OpNeqB(v) + case OpNeqPtr: + return rewriteValue386_OpNeqPtr(v) + case OpNilCheck: + v.Op = Op386LoweredNilCheck + return true + case OpNot: + return rewriteValue386_OpNot(v) + case OpOffPtr: + return rewriteValue386_OpOffPtr(v) + case OpOr16: + v.Op = Op386ORL + return true + case OpOr32: + v.Op = Op386ORL + return true + case OpOr8: + v.Op = Op386ORL + return true + case OpOrB: + v.Op = Op386ORL + return true + case OpPanicBounds: + return rewriteValue386_OpPanicBounds(v) + case OpPanicExtend: + return rewriteValue386_OpPanicExtend(v) + case OpRotateLeft16: + v.Op = Op386ROLW + return true + case OpRotateLeft32: + v.Op = Op386ROLL + return true + case OpRotateLeft8: + v.Op = Op386ROLB + return true + case OpRound32F: + v.Op = OpCopy + return true + case OpRound64F: + v.Op = OpCopy + return true + case OpRsh16Ux16: + return rewriteValue386_OpRsh16Ux16(v) + case OpRsh16Ux32: + return rewriteValue386_OpRsh16Ux32(v) + case OpRsh16Ux64: + return rewriteValue386_OpRsh16Ux64(v) + case OpRsh16Ux8: + return rewriteValue386_OpRsh16Ux8(v) + case OpRsh16x16: + return rewriteValue386_OpRsh16x16(v) + case OpRsh16x32: + return rewriteValue386_OpRsh16x32(v) + case OpRsh16x64: + return rewriteValue386_OpRsh16x64(v) + case OpRsh16x8: + return rewriteValue386_OpRsh16x8(v) + case OpRsh32Ux16: + return rewriteValue386_OpRsh32Ux16(v) + case OpRsh32Ux32: + return rewriteValue386_OpRsh32Ux32(v) + case OpRsh32Ux64: + return rewriteValue386_OpRsh32Ux64(v) + case OpRsh32Ux8: + return rewriteValue386_OpRsh32Ux8(v) + case OpRsh32x16: + return rewriteValue386_OpRsh32x16(v) + case OpRsh32x32: + return rewriteValue386_OpRsh32x32(v) + case OpRsh32x64: + return rewriteValue386_OpRsh32x64(v) + case OpRsh32x8: + return rewriteValue386_OpRsh32x8(v) + case OpRsh8Ux16: + return rewriteValue386_OpRsh8Ux16(v) + case OpRsh8Ux32: + return rewriteValue386_OpRsh8Ux32(v) + case OpRsh8Ux64: + return rewriteValue386_OpRsh8Ux64(v) + case OpRsh8Ux8: + return rewriteValue386_OpRsh8Ux8(v) + case OpRsh8x16: + return rewriteValue386_OpRsh8x16(v) + case OpRsh8x32: + return rewriteValue386_OpRsh8x32(v) + case OpRsh8x64: + return rewriteValue386_OpRsh8x64(v) + case OpRsh8x8: + return rewriteValue386_OpRsh8x8(v) + case OpSelect0: + return rewriteValue386_OpSelect0(v) + case OpSelect1: + return rewriteValue386_OpSelect1(v) + case OpSignExt16to32: + v.Op = Op386MOVWLSX + return true + case OpSignExt8to16: + v.Op = Op386MOVBLSX + return true + case OpSignExt8to32: + v.Op = Op386MOVBLSX + return true + case OpSignmask: + return rewriteValue386_OpSignmask(v) + case OpSlicemask: + return rewriteValue386_OpSlicemask(v) + case OpSqrt: + v.Op = Op386SQRTSD + return true + case OpSqrt32: + v.Op = Op386SQRTSS + return true + case OpStaticCall: + v.Op = Op386CALLstatic + return true + case OpStore: + return rewriteValue386_OpStore(v) + case OpSub16: + v.Op = Op386SUBL + return true + case OpSub32: + v.Op = Op386SUBL + return true + case OpSub32F: + v.Op = Op386SUBSS + return true + case OpSub32carry: + v.Op = Op386SUBLcarry + return true + case OpSub32withcarry: + v.Op = Op386SBBL + return true + case OpSub64F: + v.Op = Op386SUBSD + return true + case OpSub8: + v.Op = Op386SUBL + return true + case OpSubPtr: + v.Op = Op386SUBL + return true + case OpTailCall: + v.Op = Op386CALLtail + return true + case OpTrunc16to8: + v.Op = OpCopy + return true + case OpTrunc32to16: + v.Op = OpCopy + return true + case OpTrunc32to8: + v.Op = OpCopy + return true + case OpWB: + v.Op = Op386LoweredWB + return true + case OpXor16: + v.Op = Op386XORL + return true + case OpXor32: + v.Op = Op386XORL + return true + case OpXor8: + v.Op = Op386XORL + return true + case OpZero: + return rewriteValue386_OpZero(v) + case OpZeroExt16to32: + v.Op = Op386MOVWLZX + return true + case OpZeroExt8to16: + v.Op = Op386MOVBLZX + return true + case OpZeroExt8to32: + v.Op = Op386MOVBLZX + return true + case OpZeromask: + return rewriteValue386_OpZeromask(v) + } + return false +} +func rewriteValue386_Op386ADCL(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADCL x (MOVLconst [c]) f) + // result: (ADCLconst [c] x f) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != Op386MOVLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + f := v_2 + v.reset(Op386ADCLconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, f) + return true + } + break + } + return false +} +func rewriteValue386_Op386ADDL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADDL x (MOVLconst [c])) + // cond: !t.IsPtr() + // result: (ADDLconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != Op386MOVLconst { + continue + } + t := v_1.Type + c := auxIntToInt32(v_1.AuxInt) + if !(!t.IsPtr()) { + continue + } + v.reset(Op386ADDLconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (ADDL x (SHLLconst [3] y)) + // result: (LEAL8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 3 { + continue + } + y := v_1.Args[0] + v.reset(Op386LEAL8) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADDL x (SHLLconst [2] y)) + // result: (LEAL4 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 2 { + continue + } + y := v_1.Args[0] + v.reset(Op386LEAL4) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADDL x (SHLLconst [1] y)) + // result: (LEAL2 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 1 { + continue + } + y := v_1.Args[0] + v.reset(Op386LEAL2) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADDL x (ADDL y y)) + // result: (LEAL2 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != Op386ADDL { + continue + } + y := v_1.Args[1] + if y != v_1.Args[0] { + continue + } + v.reset(Op386LEAL2) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADDL x (ADDL x y)) + // result: (LEAL2 y x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != Op386ADDL { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + y := v_1_1 + v.reset(Op386LEAL2) + v.AddArg2(y, x) + return true + } + } + break + } + // match: (ADDL (ADDLconst [c] x) y) + // result: (LEAL1 [c] x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != Op386ADDLconst { + continue + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + y := v_1 + v.reset(Op386LEAL1) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADDL x (LEAL [c] {s} y)) + // cond: x.Op != OpSB && y.Op != OpSB + // result: (LEAL1 [c] {s} x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != Op386LEAL { + continue + } + c := auxIntToInt32(v_1.AuxInt) + s := auxToSym(v_1.Aux) + y := v_1.Args[0] + if !(x.Op != OpSB && y.Op != OpSB) { + continue + } + v.reset(Op386LEAL1) + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADDL x l:(MOVLload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (ADDLload x [off] {sym} ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != Op386MOVLload { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(Op386ADDLload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + // match: (ADDL x (NEGL y)) + // result: (SUBL x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != Op386NEGL { + continue + } + y := v_1.Args[0] + v.reset(Op386SUBL) + v.AddArg2(x, y) + return true + } + break + } + return false +} +func rewriteValue386_Op386ADDLcarry(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADDLcarry x (MOVLconst [c])) + // result: (ADDLconstcarry [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != Op386MOVLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(Op386ADDLconstcarry) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValue386_Op386ADDLconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ADDLconst [c] (ADDL x y)) + // result: (LEAL1 [c] x y) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != Op386ADDL { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(Op386LEAL1) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + // match: (ADDLconst [c] (LEAL [d] {s} x)) + // cond: is32Bit(int64(c)+int64(d)) + // result: (LEAL [c+d] {s} x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != Op386LEAL { + break + } + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) + x := v_0.Args[0] + if !(is32Bit(int64(c) + int64(d))) { + break + } + v.reset(Op386LEAL) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg(x) + return true + } + // match: (ADDLconst [c] x:(SP)) + // result: (LEAL [c] x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if x.Op != OpSP { + break + } + v.reset(Op386LEAL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (ADDLconst [c] (LEAL1 [d] {s} x y)) + // cond: is32Bit(int64(c)+int64(d)) + // result: (LEAL1 [c+d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != Op386LEAL1 { + break + } + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) + y := v_0.Args[1] + x := v_0.Args[0] + if !(is32Bit(int64(c) + int64(d))) { + break + } + v.reset(Op386LEAL1) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (ADDLconst [c] (LEAL2 [d] {s} x y)) + // cond: is32Bit(int64(c)+int64(d)) + // result: (LEAL2 [c+d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != Op386LEAL2 { + break + } + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) + y := v_0.Args[1] + x := v_0.Args[0] + if !(is32Bit(int64(c) + int64(d))) { + break + } + v.reset(Op386LEAL2) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (ADDLconst [c] (LEAL4 [d] {s} x y)) + // cond: is32Bit(int64(c)+int64(d)) + // result: (LEAL4 [c+d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != Op386LEAL4 { + break + } + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) + y := v_0.Args[1] + x := v_0.Args[0] + if !(is32Bit(int64(c) + int64(d))) { + break + } + v.reset(Op386LEAL4) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (ADDLconst [c] (LEAL8 [d] {s} x y)) + // cond: is32Bit(int64(c)+int64(d)) + // result: (LEAL8 [c+d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != Op386LEAL8 { + break + } + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) + y := v_0.Args[1] + x := v_0.Args[0] + if !(is32Bit(int64(c) + int64(d))) { + break + } + v.reset(Op386LEAL8) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (ADDLconst [c] x) + // cond: c==0 + // result: x + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(c == 0) { + break + } + v.copyOf(x) + return true + } + // match: (ADDLconst [c] (MOVLconst [d])) + // result: (MOVLconst [c+d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != Op386MOVLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(c + d) + return true + } + // match: (ADDLconst [c] (ADDLconst [d] x)) + // result: (ADDLconst [c+d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != Op386ADDLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(Op386ADDLconst) + v.AuxInt = int32ToAuxInt(c + d) + v.AddArg(x) + return true + } + return false +} +func rewriteValue386_Op386ADDLconstmodify(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (ADDLconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem) + // cond: valoff1.canAdd32(off2) + // result: (ADDLconstmodify [valoff1.addOffset32(off2)] {sym} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + mem := v_1 + if !(valoff1.canAdd32(off2)) { + break + } + v.reset(Op386ADDLconstmodify) + v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(base, mem) + return true + } + // match: (ADDLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem) + // cond: valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (ADDLconstmodify [valoff1.addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386ADDLconstmodify) + v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValue386_Op386ADDLload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (ADDLload [off1] {sym} val (ADDLconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (ADDLload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386ADDLload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (ADDLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (ADDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386ADDLload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + return false +} +func rewriteValue386_Op386ADDLmodify(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (ADDLmodify [off1] {sym} (ADDLconst [off2] base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (ADDLmodify [off1+off2] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386ADDLmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (ADDLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (ADDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386ADDLmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + return false +} +func rewriteValue386_Op386ADDSD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (ADDSDload x [off] {sym} ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != Op386MOVSDload { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(Op386ADDSDload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValue386_Op386ADDSDload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (ADDSDload [off1] {sym} val (ADDLconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (ADDSDload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386ADDSDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (ADDSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (ADDSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386ADDSDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + return false +} +func rewriteValue386_Op386ADDSS(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (ADDSSload x [off] {sym} ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != Op386MOVSSload { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(Op386ADDSSload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValue386_Op386ADDSSload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (ADDSSload [off1] {sym} val (ADDLconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (ADDSSload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386ADDSSload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (ADDSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (ADDSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386ADDSSload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + return false +} +func rewriteValue386_Op386ANDL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ANDL x (MOVLconst [c])) + // result: (ANDLconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != Op386MOVLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(Op386ANDLconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (ANDL x l:(MOVLload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (ANDLload x [off] {sym} ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != Op386MOVLload { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(Op386ANDLload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + // match: (ANDL x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValue386_Op386ANDLconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ANDLconst [c] (ANDLconst [d] x)) + // result: (ANDLconst [c & d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != Op386ANDLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(Op386ANDLconst) + v.AuxInt = int32ToAuxInt(c & d) + v.AddArg(x) + return true + } + // match: (ANDLconst [c] _) + // cond: c==0 + // result: (MOVLconst [0]) + for { + c := auxIntToInt32(v.AuxInt) + if !(c == 0) { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (ANDLconst [c] x) + // cond: c==-1 + // result: x + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(c == -1) { + break + } + v.copyOf(x) + return true + } + // match: (ANDLconst [c] (MOVLconst [d])) + // result: (MOVLconst [c&d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != Op386MOVLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(c & d) + return true + } + return false +} +func rewriteValue386_Op386ANDLconstmodify(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (ANDLconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem) + // cond: valoff1.canAdd32(off2) + // result: (ANDLconstmodify [valoff1.addOffset32(off2)] {sym} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + mem := v_1 + if !(valoff1.canAdd32(off2)) { + break + } + v.reset(Op386ANDLconstmodify) + v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(base, mem) + return true + } + // match: (ANDLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem) + // cond: valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (ANDLconstmodify [valoff1.addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386ANDLconstmodify) + v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValue386_Op386ANDLload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (ANDLload [off1] {sym} val (ADDLconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (ANDLload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386ANDLload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (ANDLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (ANDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386ANDLload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + return false +} +func rewriteValue386_Op386ANDLmodify(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (ANDLmodify [off1] {sym} (ADDLconst [off2] base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (ANDLmodify [off1+off2] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386ANDLmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (ANDLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (ANDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386ANDLmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + return false +} +func rewriteValue386_Op386CMPB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMPB x (MOVLconst [c])) + // result: (CMPBconst x [int8(c)]) + for { + x := v_0 + if v_1.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(Op386CMPBconst) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (CMPB (MOVLconst [c]) x) + // result: (InvertFlags (CMPBconst x [int8(c)])) + for { + if v_0.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(Op386InvertFlags) + v0 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(int8(c)) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (CMPB x y) + // cond: canonLessThan(x,y) + // result: (InvertFlags (CMPB y x)) + for { + x := v_0 + y := v_1 + if !(canonLessThan(x, y)) { + break + } + v.reset(Op386InvertFlags) + v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + // match: (CMPB l:(MOVBload {sym} [off] ptr mem) x) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (CMPBload {sym} [off] ptr x mem) + for { + l := v_0 + if l.Op != Op386MOVBload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + x := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(Op386CMPBload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (CMPB x l:(MOVBload {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (InvertFlags (CMPBload {sym} [off] ptr x mem)) + for { + x := v_0 + l := v_1 + if l.Op != Op386MOVBload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(Op386InvertFlags) + v0 := b.NewValue0(l.Pos, Op386CMPBload, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg3(ptr, x, mem) + v.AddArg(v0) + return true + } + return false +} +func rewriteValue386_Op386CMPBconst(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CMPBconst (MOVLconst [x]) [y]) + // cond: int8(x)==y + // result: (FlagEQ) + for { + y := auxIntToInt8(v.AuxInt) + if v_0.Op != Op386MOVLconst { + break + } + x := auxIntToInt32(v_0.AuxInt) + if !(int8(x) == y) { + break + } + v.reset(Op386FlagEQ) + return true + } + // match: (CMPBconst (MOVLconst [x]) [y]) + // cond: int8(x)uint8(y) + // result: (FlagLT_UGT) + for { + y := auxIntToInt8(v.AuxInt) + if v_0.Op != Op386MOVLconst { + break + } + x := auxIntToInt32(v_0.AuxInt) + if !(int8(x) < y && uint8(x) > uint8(y)) { + break + } + v.reset(Op386FlagLT_UGT) + return true + } + // match: (CMPBconst (MOVLconst [x]) [y]) + // cond: int8(x)>y && uint8(x) y && uint8(x) < uint8(y)) { + break + } + v.reset(Op386FlagGT_ULT) + return true + } + // match: (CMPBconst (MOVLconst [x]) [y]) + // cond: int8(x)>y && uint8(x)>uint8(y) + // result: (FlagGT_UGT) + for { + y := auxIntToInt8(v.AuxInt) + if v_0.Op != Op386MOVLconst { + break + } + x := auxIntToInt32(v_0.AuxInt) + if !(int8(x) > y && uint8(x) > uint8(y)) { + break + } + v.reset(Op386FlagGT_UGT) + return true + } + // match: (CMPBconst (ANDLconst _ [m]) [n]) + // cond: 0 <= int8(m) && int8(m) < n + // result: (FlagLT_ULT) + for { + n := auxIntToInt8(v.AuxInt) + if v_0.Op != Op386ANDLconst { + break + } + m := auxIntToInt32(v_0.AuxInt) + if !(0 <= int8(m) && int8(m) < n) { + break + } + v.reset(Op386FlagLT_ULT) + return true + } + // match: (CMPBconst l:(ANDL x y) [0]) + // cond: l.Uses==1 + // result: (TESTB x y) + for { + if auxIntToInt8(v.AuxInt) != 0 { + break + } + l := v_0 + if l.Op != Op386ANDL { + break + } + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v.reset(Op386TESTB) + v.AddArg2(x, y) + return true + } + // match: (CMPBconst l:(ANDLconst [c] x) [0]) + // cond: l.Uses==1 + // result: (TESTBconst [int8(c)] x) + for { + if auxIntToInt8(v.AuxInt) != 0 { + break + } + l := v_0 + if l.Op != Op386ANDLconst { + break + } + c := auxIntToInt32(l.AuxInt) + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v.reset(Op386TESTBconst) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (CMPBconst x [0]) + // result: (TESTB x x) + for { + if auxIntToInt8(v.AuxInt) != 0 { + break + } + x := v_0 + v.reset(Op386TESTB) + v.AddArg2(x, x) + return true + } + // match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c]) + // cond: l.Uses == 1 && clobber(l) + // result: @l.Block (CMPBconstload {sym} [makeValAndOff(int32(c),off)] ptr mem) + for { + c := auxIntToInt8(v.AuxInt) + l := v_0 + if l.Op != Op386MOVBload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(l.Uses == 1 && clobber(l)) { + break + } + b = l.Block + v0 := b.NewValue0(l.Pos, Op386CMPBconstload, types.TypeFlags) + v.copyOf(v0) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValue386_Op386CMPBload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem) + // result: (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + mem := v_2 + v.reset(Op386CMPBconstload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValue386_Op386CMPL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMPL x (MOVLconst [c])) + // result: (CMPLconst x [c]) + for { + x := v_0 + if v_1.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(Op386CMPLconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (CMPL (MOVLconst [c]) x) + // result: (InvertFlags (CMPLconst x [c])) + for { + if v_0.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(Op386InvertFlags) + v0 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (CMPL x y) + // cond: canonLessThan(x,y) + // result: (InvertFlags (CMPL y x)) + for { + x := v_0 + y := v_1 + if !(canonLessThan(x, y)) { + break + } + v.reset(Op386InvertFlags) + v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + // match: (CMPL l:(MOVLload {sym} [off] ptr mem) x) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (CMPLload {sym} [off] ptr x mem) + for { + l := v_0 + if l.Op != Op386MOVLload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + x := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(Op386CMPLload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (CMPL x l:(MOVLload {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (InvertFlags (CMPLload {sym} [off] ptr x mem)) + for { + x := v_0 + l := v_1 + if l.Op != Op386MOVLload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(Op386InvertFlags) + v0 := b.NewValue0(l.Pos, Op386CMPLload, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg3(ptr, x, mem) + v.AddArg(v0) + return true + } + return false +} +func rewriteValue386_Op386CMPLconst(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CMPLconst (MOVLconst [x]) [y]) + // cond: x==y + // result: (FlagEQ) + for { + y := auxIntToInt32(v.AuxInt) + if v_0.Op != Op386MOVLconst { + break + } + x := auxIntToInt32(v_0.AuxInt) + if !(x == y) { + break + } + v.reset(Op386FlagEQ) + return true + } + // match: (CMPLconst (MOVLconst [x]) [y]) + // cond: xuint32(y) + // result: (FlagLT_UGT) + for { + y := auxIntToInt32(v.AuxInt) + if v_0.Op != Op386MOVLconst { + break + } + x := auxIntToInt32(v_0.AuxInt) + if !(x < y && uint32(x) > uint32(y)) { + break + } + v.reset(Op386FlagLT_UGT) + return true + } + // match: (CMPLconst (MOVLconst [x]) [y]) + // cond: x>y && uint32(x) y && uint32(x) < uint32(y)) { + break + } + v.reset(Op386FlagGT_ULT) + return true + } + // match: (CMPLconst (MOVLconst [x]) [y]) + // cond: x>y && uint32(x)>uint32(y) + // result: (FlagGT_UGT) + for { + y := auxIntToInt32(v.AuxInt) + if v_0.Op != Op386MOVLconst { + break + } + x := auxIntToInt32(v_0.AuxInt) + if !(x > y && uint32(x) > uint32(y)) { + break + } + v.reset(Op386FlagGT_UGT) + return true + } + // match: (CMPLconst (SHRLconst _ [c]) [n]) + // cond: 0 <= n && 0 < c && c <= 32 && (1<uint16(y) + // result: (FlagLT_UGT) + for { + y := auxIntToInt16(v.AuxInt) + if v_0.Op != Op386MOVLconst { + break + } + x := auxIntToInt32(v_0.AuxInt) + if !(int16(x) < y && uint16(x) > uint16(y)) { + break + } + v.reset(Op386FlagLT_UGT) + return true + } + // match: (CMPWconst (MOVLconst [x]) [y]) + // cond: int16(x)>y && uint16(x) y && uint16(x) < uint16(y)) { + break + } + v.reset(Op386FlagGT_ULT) + return true + } + // match: (CMPWconst (MOVLconst [x]) [y]) + // cond: int16(x)>y && uint16(x)>uint16(y) + // result: (FlagGT_UGT) + for { + y := auxIntToInt16(v.AuxInt) + if v_0.Op != Op386MOVLconst { + break + } + x := auxIntToInt32(v_0.AuxInt) + if !(int16(x) > y && uint16(x) > uint16(y)) { + break + } + v.reset(Op386FlagGT_UGT) + return true + } + // match: (CMPWconst (ANDLconst _ [m]) [n]) + // cond: 0 <= int16(m) && int16(m) < n + // result: (FlagLT_ULT) + for { + n := auxIntToInt16(v.AuxInt) + if v_0.Op != Op386ANDLconst { + break + } + m := auxIntToInt32(v_0.AuxInt) + if !(0 <= int16(m) && int16(m) < n) { + break + } + v.reset(Op386FlagLT_ULT) + return true + } + // match: (CMPWconst l:(ANDL x y) [0]) + // cond: l.Uses==1 + // result: (TESTW x y) + for { + if auxIntToInt16(v.AuxInt) != 0 { + break + } + l := v_0 + if l.Op != Op386ANDL { + break + } + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v.reset(Op386TESTW) + v.AddArg2(x, y) + return true + } + // match: (CMPWconst l:(ANDLconst [c] x) [0]) + // cond: l.Uses==1 + // result: (TESTWconst [int16(c)] x) + for { + if auxIntToInt16(v.AuxInt) != 0 { + break + } + l := v_0 + if l.Op != Op386ANDLconst { + break + } + c := auxIntToInt32(l.AuxInt) + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v.reset(Op386TESTWconst) + v.AuxInt = int16ToAuxInt(int16(c)) + v.AddArg(x) + return true + } + // match: (CMPWconst x [0]) + // result: (TESTW x x) + for { + if auxIntToInt16(v.AuxInt) != 0 { + break + } + x := v_0 + v.reset(Op386TESTW) + v.AddArg2(x, x) + return true + } + // match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c]) + // cond: l.Uses == 1 && clobber(l) + // result: @l.Block (CMPWconstload {sym} [makeValAndOff(int32(c),off)] ptr mem) + for { + c := auxIntToInt16(v.AuxInt) + l := v_0 + if l.Op != Op386MOVWload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(l.Uses == 1 && clobber(l)) { + break + } + b = l.Block + v0 := b.NewValue0(l.Pos, Op386CMPWconstload, types.TypeFlags) + v.copyOf(v0) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValue386_Op386CMPWload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem) + // result: (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + mem := v_2 + v.reset(Op386CMPWconstload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValue386_Op386DIVSD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (DIVSD x l:(MOVSDload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (DIVSDload x [off] {sym} ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != Op386MOVSDload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + break + } + v.reset(Op386DIVSDload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValue386_Op386DIVSDload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (DIVSDload [off1] {sym} val (ADDLconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (DIVSDload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386DIVSDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (DIVSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (DIVSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386DIVSDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + return false +} +func rewriteValue386_Op386DIVSS(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (DIVSS x l:(MOVSSload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (DIVSSload x [off] {sym} ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != Op386MOVSSload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + break + } + v.reset(Op386DIVSSload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValue386_Op386DIVSSload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (DIVSSload [off1] {sym} val (ADDLconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (DIVSSload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386DIVSSload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (DIVSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (DIVSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386DIVSSload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + return false +} +func rewriteValue386_Op386LEAL(v *Value) bool { + v_0 := v.Args[0] + // match: (LEAL [c] {s} (ADDLconst [d] x)) + // cond: is32Bit(int64(c)+int64(d)) + // result: (LEAL [c+d] {s} x) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + if v_0.Op != Op386ADDLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(int64(c) + int64(d))) { + break + } + v.reset(Op386LEAL) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg(x) + return true + } + // match: (LEAL [c] {s} (ADDL x y)) + // cond: x.Op != OpSB && y.Op != OpSB + // result: (LEAL1 [c] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + if v_0.Op != Op386ADDL { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if !(x.Op != OpSB && y.Op != OpSB) { + continue + } + v.reset(Op386LEAL1) + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + break + } + // match: (LEAL [off1] {sym1} (LEAL [off2] {sym2} x)) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (LEAL [off1+off2] {mergeSym(sym1,sym2)} x) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + x := v_0.Args[0] + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(Op386LEAL) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg(x) + return true + } + // match: (LEAL [off1] {sym1} (LEAL1 [off2] {sym2} x y)) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL1 { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + y := v_0.Args[1] + x := v_0.Args[0] + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(Op386LEAL1) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(x, y) + return true + } + // match: (LEAL [off1] {sym1} (LEAL2 [off2] {sym2} x y)) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL2 { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + y := v_0.Args[1] + x := v_0.Args[0] + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(Op386LEAL2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(x, y) + return true + } + // match: (LEAL [off1] {sym1} (LEAL4 [off2] {sym2} x y)) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL4 { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + y := v_0.Args[1] + x := v_0.Args[0] + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(Op386LEAL4) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(x, y) + return true + } + // match: (LEAL [off1] {sym1} (LEAL8 [off2] {sym2} x y)) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL8 { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + y := v_0.Args[1] + x := v_0.Args[0] + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(Op386LEAL8) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_Op386LEAL1(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LEAL1 [c] {s} (ADDLconst [d] x) y) + // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB + // result: (LEAL1 [c+d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != Op386ADDLconst { + continue + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + y := v_1 + if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { + continue + } + v.reset(Op386LEAL1) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + break + } + // match: (LEAL1 [c] {s} x (SHLLconst [1] y)) + // result: (LEAL2 [c] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 1 { + continue + } + y := v_1.Args[0] + v.reset(Op386LEAL2) + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + break + } + // match: (LEAL1 [c] {s} x (SHLLconst [2] y)) + // result: (LEAL4 [c] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 2 { + continue + } + y := v_1.Args[0] + v.reset(Op386LEAL4) + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + break + } + // match: (LEAL1 [c] {s} x (SHLLconst [3] y)) + // result: (LEAL8 [c] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 3 { + continue + } + y := v_1.Args[0] + v.reset(Op386LEAL8) + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + break + } + // match: (LEAL1 [off1] {sym1} (LEAL [off2] {sym2} x) y) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB + // result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != Op386LEAL { + continue + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + x := v_0.Args[0] + y := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) { + continue + } + v.reset(Op386LEAL1) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(x, y) + return true + } + break + } + // match: (LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} y y)) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (LEAL2 [off1+off2] {mergeSym(sym1, sym2)} x y) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != Op386LEAL1 { + continue + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + y := v_1.Args[1] + if y != v_1.Args[0] || !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + continue + } + v.reset(Op386LEAL2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(x, y) + return true + } + break + } + // match: (LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} x y)) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (LEAL2 [off1+off2] {mergeSym(sym1, sym2)} y x) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != Op386LEAL1 { + continue + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + y := v_1_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + continue + } + v.reset(Op386LEAL2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(y, x) + return true + } + } + break + } + // match: (LEAL1 [0] {nil} x y) + // result: (ADDL x y) + for { + if auxIntToInt32(v.AuxInt) != 0 || auxToSym(v.Aux) != nil { + break + } + x := v_0 + y := v_1 + v.reset(Op386ADDL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_Op386LEAL2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LEAL2 [c] {s} (ADDLconst [d] x) y) + // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB + // result: (LEAL2 [c+d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + if v_0.Op != Op386ADDLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + y := v_1 + if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { + break + } + v.reset(Op386LEAL2) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (LEAL2 [c] {s} x (ADDLconst [d] y)) + // cond: is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB + // result: (LEAL2 [c+2*d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + x := v_0 + if v_1.Op != Op386ADDLconst { + break + } + d := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) { + break + } + v.reset(Op386LEAL2) + v.AuxInt = int32ToAuxInt(c + 2*d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (LEAL2 [c] {s} x (SHLLconst [1] y)) + // result: (LEAL4 [c] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + x := v_0 + if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 1 { + break + } + y := v_1.Args[0] + v.reset(Op386LEAL4) + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (LEAL2 [c] {s} x (SHLLconst [2] y)) + // result: (LEAL8 [c] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + x := v_0 + if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 2 { + break + } + y := v_1.Args[0] + v.reset(Op386LEAL8) + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (LEAL2 [off1] {sym1} (LEAL [off2] {sym2} x) y) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB + // result: (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + x := v_0.Args[0] + y := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) { + break + } + v.reset(Op386LEAL2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(x, y) + return true + } + // match: (LEAL2 [off1] {sym} x (LEAL1 [off2] {nil} y y)) + // cond: is32Bit(int64(off1)+2*int64(off2)) + // result: (LEAL4 [off1+2*off2] {sym} x y) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if v_1.Op != Op386LEAL1 { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + if auxToSym(v_1.Aux) != nil { + break + } + y := v_1.Args[1] + if y != v_1.Args[0] || !(is32Bit(int64(off1) + 2*int64(off2))) { + break + } + v.reset(Op386LEAL4) + v.AuxInt = int32ToAuxInt(off1 + 2*off2) + v.Aux = symToAux(sym) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_Op386LEAL4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LEAL4 [c] {s} (ADDLconst [d] x) y) + // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB + // result: (LEAL4 [c+d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + if v_0.Op != Op386ADDLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + y := v_1 + if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { + break + } + v.reset(Op386LEAL4) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (LEAL4 [c] {s} x (ADDLconst [d] y)) + // cond: is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB + // result: (LEAL4 [c+4*d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + x := v_0 + if v_1.Op != Op386ADDLconst { + break + } + d := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) { + break + } + v.reset(Op386LEAL4) + v.AuxInt = int32ToAuxInt(c + 4*d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (LEAL4 [c] {s} x (SHLLconst [1] y)) + // result: (LEAL8 [c] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + x := v_0 + if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 1 { + break + } + y := v_1.Args[0] + v.reset(Op386LEAL8) + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (LEAL4 [off1] {sym1} (LEAL [off2] {sym2} x) y) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB + // result: (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + x := v_0.Args[0] + y := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) { + break + } + v.reset(Op386LEAL4) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(x, y) + return true + } + // match: (LEAL4 [off1] {sym} x (LEAL1 [off2] {nil} y y)) + // cond: is32Bit(int64(off1)+4*int64(off2)) + // result: (LEAL8 [off1+4*off2] {sym} x y) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if v_1.Op != Op386LEAL1 { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + if auxToSym(v_1.Aux) != nil { + break + } + y := v_1.Args[1] + if y != v_1.Args[0] || !(is32Bit(int64(off1) + 4*int64(off2))) { + break + } + v.reset(Op386LEAL8) + v.AuxInt = int32ToAuxInt(off1 + 4*off2) + v.Aux = symToAux(sym) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_Op386LEAL8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LEAL8 [c] {s} (ADDLconst [d] x) y) + // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB + // result: (LEAL8 [c+d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + if v_0.Op != Op386ADDLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + y := v_1 + if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { + break + } + v.reset(Op386LEAL8) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (LEAL8 [c] {s} x (ADDLconst [d] y)) + // cond: is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB + // result: (LEAL8 [c+8*d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + x := v_0 + if v_1.Op != Op386ADDLconst { + break + } + d := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) { + break + } + v.reset(Op386LEAL8) + v.AuxInt = int32ToAuxInt(c + 8*d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (LEAL8 [off1] {sym1} (LEAL [off2] {sym2} x) y) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB + // result: (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + x := v_0.Args[0] + y := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) { + break + } + v.reset(Op386LEAL8) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_Op386MOVBLSX(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MOVBLSX x:(MOVBload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBLSXload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != Op386MOVBload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, Op386MOVBLSXload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (MOVBLSX (ANDLconst [c] x)) + // cond: c & 0x80 == 0 + // result: (ANDLconst [c & 0x7f] x) + for { + if v_0.Op != Op386ANDLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(c&0x80 == 0) { + break + } + v.reset(Op386ANDLconst) + v.AuxInt = int32ToAuxInt(c & 0x7f) + v.AddArg(x) + return true + } + return false +} +func rewriteValue386_Op386MOVBLSXload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVBLSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVBLSX x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != Op386MOVBstore { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(Op386MOVBLSX) + v.AddArg(x) + return true + } + // match: (MOVBLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386MOVBLSXload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValue386_Op386MOVBLZX(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MOVBLZX x:(MOVBload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != Op386MOVBload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, Op386MOVBload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (MOVBLZX (ANDLconst [c] x)) + // result: (ANDLconst [c & 0xff] x) + for { + if v_0.Op != Op386ANDLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(Op386ANDLconst) + v.AuxInt = int32ToAuxInt(c & 0xff) + v.AddArg(x) + return true + } + return false +} +func rewriteValue386_Op386MOVBload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVBLZX x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != Op386MOVBstore { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(Op386MOVBLZX) + v.AddArg(x) + return true + } + // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MOVBload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386MOVBload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386MOVBload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + // match: (MOVBload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVLconst [int32(read8(sym, int64(off)))]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpSB || !(symIsRO(sym)) { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off)))) + return true + } + return false +} +func rewriteValue386_Op386MOVBstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVBstore [off] {sym} ptr (MOVBLSX x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != Op386MOVBLSX { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(Op386MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVBLZX x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != Op386MOVBLZX { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(Op386MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MOVBstore [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386MOVBstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) + // result: (MOVBstoreconst [makeValAndOff(c,off)] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + mem := v_2 + v.reset(Op386MOVBstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386MOVBstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + return false +} +func rewriteValue386_Op386MOVBstoreconst(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) + // cond: sc.canAdd32(off) + // result: (MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem) + for { + sc := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) + if v_0.Op != Op386ADDLconst { + break + } + off := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(sc.canAdd32(off)) { + break + } + v.reset(Op386MOVBstoreconst) + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(s) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL { + break + } + off := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386MOVBstoreconst) + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValue386_Op386MOVLload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: x + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != Op386MOVLstore { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.copyOf(x) + return true + } + // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MOVLload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386MOVLload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386MOVLload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + // match: (MOVLload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVLconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpSB || !(symIsRO(sym)) { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))) + return true + } + return false +} +func rewriteValue386_Op386MOVLstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MOVLstore [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386MOVLstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) + // result: (MOVLstoreconst [makeValAndOff(c,off)] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + mem := v_2 + v.reset(Op386MOVLstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386MOVLstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ADDLload x [off] {sym} ptr mem) mem) + // cond: y.Uses==1 && clobber(y) + // result: (ADDLmodify [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != Op386ADDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { + break + } + mem := y.Args[2] + x := y.Args[0] + if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { + break + } + v.reset(Op386ADDLmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ANDLload x [off] {sym} ptr mem) mem) + // cond: y.Uses==1 && clobber(y) + // result: (ANDLmodify [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != Op386ANDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { + break + } + mem := y.Args[2] + x := y.Args[0] + if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { + break + } + v.reset(Op386ANDLmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ORLload x [off] {sym} ptr mem) mem) + // cond: y.Uses==1 && clobber(y) + // result: (ORLmodify [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != Op386ORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { + break + } + mem := y.Args[2] + x := y.Args[0] + if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { + break + } + v.reset(Op386ORLmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(XORLload x [off] {sym} ptr mem) mem) + // cond: y.Uses==1 && clobber(y) + // result: (XORLmodify [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != Op386XORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { + break + } + mem := y.Args[2] + x := y.Args[0] + if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { + break + } + v.reset(Op386XORLmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ADDL l:(MOVLload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) + // result: (ADDLmodify [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != Op386ADDL { + break + } + _ = y.Args[1] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + l := y_0 + if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + continue + } + mem := l.Args[1] + if ptr != l.Args[0] { + continue + } + x := y_1 + if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { + continue + } + v.reset(Op386ADDLmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + break + } + // match: (MOVLstore {sym} [off] ptr y:(SUBL l:(MOVLload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) + // result: (SUBLmodify [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != Op386SUBL { + break + } + x := y.Args[1] + l := y.Args[0] + if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + break + } + mem := l.Args[1] + if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { + break + } + v.reset(Op386SUBLmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ANDL l:(MOVLload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) + // result: (ANDLmodify [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != Op386ANDL { + break + } + _ = y.Args[1] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + l := y_0 + if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + continue + } + mem := l.Args[1] + if ptr != l.Args[0] { + continue + } + x := y_1 + if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { + continue + } + v.reset(Op386ANDLmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + break + } + // match: (MOVLstore {sym} [off] ptr y:(ORL l:(MOVLload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) + // result: (ORLmodify [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != Op386ORL { + break + } + _ = y.Args[1] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + l := y_0 + if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + continue + } + mem := l.Args[1] + if ptr != l.Args[0] { + continue + } + x := y_1 + if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { + continue + } + v.reset(Op386ORLmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + break + } + // match: (MOVLstore {sym} [off] ptr y:(XORL l:(MOVLload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) + // result: (XORLmodify [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != Op386XORL { + break + } + _ = y.Args[1] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + l := y_0 + if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + continue + } + mem := l.Args[1] + if ptr != l.Args[0] { + continue + } + x := y_1 + if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { + continue + } + v.reset(Op386XORLmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + break + } + // match: (MOVLstore {sym} [off] ptr y:(ADDLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) + // result: (ADDLconstmodify [makeValAndOff(c,off)] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != Op386ADDLconst { + break + } + c := auxIntToInt32(y.AuxInt) + l := y.Args[0] + if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + break + } + mem := l.Args[1] + if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { + break + } + v.reset(Op386ADDLconstmodify) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ANDLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) + // result: (ANDLconstmodify [makeValAndOff(c,off)] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != Op386ANDLconst { + break + } + c := auxIntToInt32(y.AuxInt) + l := y.Args[0] + if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + break + } + mem := l.Args[1] + if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { + break + } + v.reset(Op386ANDLconstmodify) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ORLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) + // result: (ORLconstmodify [makeValAndOff(c,off)] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != Op386ORLconst { + break + } + c := auxIntToInt32(y.AuxInt) + l := y.Args[0] + if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + break + } + mem := l.Args[1] + if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { + break + } + v.reset(Op386ORLconstmodify) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(XORLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) + // result: (XORLconstmodify [makeValAndOff(c,off)] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != Op386XORLconst { + break + } + c := auxIntToInt32(y.AuxInt) + l := y.Args[0] + if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + break + } + mem := l.Args[1] + if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { + break + } + v.reset(Op386XORLconstmodify) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValue386_Op386MOVLstoreconst(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) + // cond: sc.canAdd32(off) + // result: (MOVLstoreconst [sc.addOffset32(off)] {s} ptr mem) + for { + sc := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) + if v_0.Op != Op386ADDLconst { + break + } + off := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(sc.canAdd32(off)) { + break + } + v.reset(Op386MOVLstoreconst) + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(s) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVLstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL { + break + } + off := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386MOVLstoreconst) + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValue386_Op386MOVSDconst(v *Value) bool { + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (MOVSDconst [c]) + // cond: config.ctxt.Flag_shared + // result: (MOVSDconst2 (MOVSDconst1 [c])) + for { + c := auxIntToFloat64(v.AuxInt) + if !(config.ctxt.Flag_shared) { + break + } + v.reset(Op386MOVSDconst2) + v0 := b.NewValue0(v.Pos, Op386MOVSDconst1, typ.UInt32) + v0.AuxInt = float64ToAuxInt(c) + v.AddArg(v0) + return true + } + return false +} +func rewriteValue386_Op386MOVSDload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVSDload [off1] {sym} (ADDLconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MOVSDload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386MOVSDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVSDload [off1] {sym1} (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386MOVSDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValue386_Op386MOVSDstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVSDstore [off1] {sym} (ADDLconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MOVSDstore [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386MOVSDstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVSDstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386MOVSDstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + return false +} +func rewriteValue386_Op386MOVSSconst(v *Value) bool { + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (MOVSSconst [c]) + // cond: config.ctxt.Flag_shared + // result: (MOVSSconst2 (MOVSSconst1 [c])) + for { + c := auxIntToFloat32(v.AuxInt) + if !(config.ctxt.Flag_shared) { + break + } + v.reset(Op386MOVSSconst2) + v0 := b.NewValue0(v.Pos, Op386MOVSSconst1, typ.UInt32) + v0.AuxInt = float32ToAuxInt(c) + v.AddArg(v0) + return true + } + return false +} +func rewriteValue386_Op386MOVSSload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVSSload [off1] {sym} (ADDLconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MOVSSload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386MOVSSload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVSSload [off1] {sym1} (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386MOVSSload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValue386_Op386MOVSSstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVSSstore [off1] {sym} (ADDLconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MOVSSstore [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386MOVSSstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVSSstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386MOVSSstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + return false +} +func rewriteValue386_Op386MOVWLSX(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MOVWLSX x:(MOVWload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWLSXload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != Op386MOVWload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, Op386MOVWLSXload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (MOVWLSX (ANDLconst [c] x)) + // cond: c & 0x8000 == 0 + // result: (ANDLconst [c & 0x7fff] x) + for { + if v_0.Op != Op386ANDLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(c&0x8000 == 0) { + break + } + v.reset(Op386ANDLconst) + v.AuxInt = int32ToAuxInt(c & 0x7fff) + v.AddArg(x) + return true + } + return false +} +func rewriteValue386_Op386MOVWLSXload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVWLSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVWLSX x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != Op386MOVWstore { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(Op386MOVWLSX) + v.AddArg(x) + return true + } + // match: (MOVWLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVWLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386MOVWLSXload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValue386_Op386MOVWLZX(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MOVWLZX x:(MOVWload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != Op386MOVWload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, Op386MOVWload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (MOVWLZX (ANDLconst [c] x)) + // result: (ANDLconst [c & 0xffff] x) + for { + if v_0.Op != Op386ANDLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(Op386ANDLconst) + v.AuxInt = int32ToAuxInt(c & 0xffff) + v.AddArg(x) + return true + } + return false +} +func rewriteValue386_Op386MOVWload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVWLZX x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != Op386MOVWstore { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(Op386MOVWLZX) + v.AddArg(x) + return true + } + // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MOVWload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386MOVWload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386MOVWload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + // match: (MOVWload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpSB || !(symIsRO(sym)) { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))) + return true + } + return false +} +func rewriteValue386_Op386MOVWstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVWstore [off] {sym} ptr (MOVWLSX x) mem) + // result: (MOVWstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != Op386MOVWLSX { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(Op386MOVWstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVWLZX x) mem) + // result: (MOVWstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != Op386MOVWLZX { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(Op386MOVWstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MOVWstore [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386MOVWstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) + // result: (MOVWstoreconst [makeValAndOff(c,off)] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + mem := v_2 + v.reset(Op386MOVWstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386MOVWstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + return false +} +func rewriteValue386_Op386MOVWstoreconst(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) + // cond: sc.canAdd32(off) + // result: (MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem) + for { + sc := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) + if v_0.Op != Op386ADDLconst { + break + } + off := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(sc.canAdd32(off)) { + break + } + v.reset(Op386MOVWstoreconst) + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(s) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVWstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL { + break + } + off := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386MOVWstoreconst) + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValue386_Op386MULL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MULL x (MOVLconst [c])) + // result: (MULLconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != Op386MOVLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(Op386MULLconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (MULL x l:(MOVLload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (MULLload x [off] {sym} ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != Op386MOVLload { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(Op386MULLload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValue386_Op386MULLconst(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MULLconst [c] (MULLconst [d] x)) + // result: (MULLconst [c * d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != Op386MULLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(Op386MULLconst) + v.AuxInt = int32ToAuxInt(c * d) + v.AddArg(x) + return true + } + // match: (MULLconst [-9] x) + // result: (NEGL (LEAL8 x x)) + for { + if auxIntToInt32(v.AuxInt) != -9 { + break + } + x := v_0 + v.reset(Op386NEGL) + v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + // match: (MULLconst [-5] x) + // result: (NEGL (LEAL4 x x)) + for { + if auxIntToInt32(v.AuxInt) != -5 { + break + } + x := v_0 + v.reset(Op386NEGL) + v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + // match: (MULLconst [-3] x) + // result: (NEGL (LEAL2 x x)) + for { + if auxIntToInt32(v.AuxInt) != -3 { + break + } + x := v_0 + v.reset(Op386NEGL) + v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + // match: (MULLconst [-1] x) + // result: (NEGL x) + for { + if auxIntToInt32(v.AuxInt) != -1 { + break + } + x := v_0 + v.reset(Op386NEGL) + v.AddArg(x) + return true + } + // match: (MULLconst [0] _) + // result: (MOVLconst [0]) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (MULLconst [1] x) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 1 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (MULLconst [3] x) + // result: (LEAL2 x x) + for { + if auxIntToInt32(v.AuxInt) != 3 { + break + } + x := v_0 + v.reset(Op386LEAL2) + v.AddArg2(x, x) + return true + } + // match: (MULLconst [5] x) + // result: (LEAL4 x x) + for { + if auxIntToInt32(v.AuxInt) != 5 { + break + } + x := v_0 + v.reset(Op386LEAL4) + v.AddArg2(x, x) + return true + } + // match: (MULLconst [7] x) + // result: (LEAL2 x (LEAL2 x x)) + for { + if auxIntToInt32(v.AuxInt) != 7 { + break + } + x := v_0 + v.reset(Op386LEAL2) + v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) + v0.AddArg2(x, x) + v.AddArg2(x, v0) + return true + } + // match: (MULLconst [9] x) + // result: (LEAL8 x x) + for { + if auxIntToInt32(v.AuxInt) != 9 { + break + } + x := v_0 + v.reset(Op386LEAL8) + v.AddArg2(x, x) + return true + } + // match: (MULLconst [11] x) + // result: (LEAL2 x (LEAL4 x x)) + for { + if auxIntToInt32(v.AuxInt) != 11 { + break + } + x := v_0 + v.reset(Op386LEAL2) + v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) + v0.AddArg2(x, x) + v.AddArg2(x, v0) + return true + } + // match: (MULLconst [13] x) + // result: (LEAL4 x (LEAL2 x x)) + for { + if auxIntToInt32(v.AuxInt) != 13 { + break + } + x := v_0 + v.reset(Op386LEAL4) + v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) + v0.AddArg2(x, x) + v.AddArg2(x, v0) + return true + } + // match: (MULLconst [19] x) + // result: (LEAL2 x (LEAL8 x x)) + for { + if auxIntToInt32(v.AuxInt) != 19 { + break + } + x := v_0 + v.reset(Op386LEAL2) + v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) + v0.AddArg2(x, x) + v.AddArg2(x, v0) + return true + } + // match: (MULLconst [21] x) + // result: (LEAL4 x (LEAL4 x x)) + for { + if auxIntToInt32(v.AuxInt) != 21 { + break + } + x := v_0 + v.reset(Op386LEAL4) + v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) + v0.AddArg2(x, x) + v.AddArg2(x, v0) + return true + } + // match: (MULLconst [25] x) + // result: (LEAL8 x (LEAL2 x x)) + for { + if auxIntToInt32(v.AuxInt) != 25 { + break + } + x := v_0 + v.reset(Op386LEAL8) + v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) + v0.AddArg2(x, x) + v.AddArg2(x, v0) + return true + } + // match: (MULLconst [27] x) + // result: (LEAL8 (LEAL2 x x) (LEAL2 x x)) + for { + if auxIntToInt32(v.AuxInt) != 27 { + break + } + x := v_0 + v.reset(Op386LEAL8) + v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) + v0.AddArg2(x, x) + v.AddArg2(v0, v0) + return true + } + // match: (MULLconst [37] x) + // result: (LEAL4 x (LEAL8 x x)) + for { + if auxIntToInt32(v.AuxInt) != 37 { + break + } + x := v_0 + v.reset(Op386LEAL4) + v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) + v0.AddArg2(x, x) + v.AddArg2(x, v0) + return true + } + // match: (MULLconst [41] x) + // result: (LEAL8 x (LEAL4 x x)) + for { + if auxIntToInt32(v.AuxInt) != 41 { + break + } + x := v_0 + v.reset(Op386LEAL8) + v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) + v0.AddArg2(x, x) + v.AddArg2(x, v0) + return true + } + // match: (MULLconst [45] x) + // result: (LEAL8 (LEAL4 x x) (LEAL4 x x)) + for { + if auxIntToInt32(v.AuxInt) != 45 { + break + } + x := v_0 + v.reset(Op386LEAL8) + v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) + v0.AddArg2(x, x) + v.AddArg2(v0, v0) + return true + } + // match: (MULLconst [73] x) + // result: (LEAL8 x (LEAL8 x x)) + for { + if auxIntToInt32(v.AuxInt) != 73 { + break + } + x := v_0 + v.reset(Op386LEAL8) + v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) + v0.AddArg2(x, x) + v.AddArg2(x, v0) + return true + } + // match: (MULLconst [81] x) + // result: (LEAL8 (LEAL8 x x) (LEAL8 x x)) + for { + if auxIntToInt32(v.AuxInt) != 81 { + break + } + x := v_0 + v.reset(Op386LEAL8) + v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) + v0.AddArg2(x, x) + v.AddArg2(v0, v0) + return true + } + // match: (MULLconst [c] x) + // cond: isPowerOfTwo32(c+1) && c >= 15 + // result: (SUBL (SHLLconst [int32(log32(c+1))] x) x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(isPowerOfTwo32(c+1) && c >= 15) { + break + } + v.reset(Op386SUBL) + v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c + 1))) + v0.AddArg(x) + v.AddArg2(v0, x) + return true + } + // match: (MULLconst [c] x) + // cond: isPowerOfTwo32(c-1) && c >= 17 + // result: (LEAL1 (SHLLconst [int32(log32(c-1))] x) x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(isPowerOfTwo32(c-1) && c >= 17) { + break + } + v.reset(Op386LEAL1) + v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c - 1))) + v0.AddArg(x) + v.AddArg2(v0, x) + return true + } + // match: (MULLconst [c] x) + // cond: isPowerOfTwo32(c-2) && c >= 34 + // result: (LEAL2 (SHLLconst [int32(log32(c-2))] x) x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(isPowerOfTwo32(c-2) && c >= 34) { + break + } + v.reset(Op386LEAL2) + v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c - 2))) + v0.AddArg(x) + v.AddArg2(v0, x) + return true + } + // match: (MULLconst [c] x) + // cond: isPowerOfTwo32(c-4) && c >= 68 + // result: (LEAL4 (SHLLconst [int32(log32(c-4))] x) x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(isPowerOfTwo32(c-4) && c >= 68) { + break + } + v.reset(Op386LEAL4) + v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c - 4))) + v0.AddArg(x) + v.AddArg2(v0, x) + return true + } + // match: (MULLconst [c] x) + // cond: isPowerOfTwo32(c-8) && c >= 136 + // result: (LEAL8 (SHLLconst [int32(log32(c-8))] x) x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(isPowerOfTwo32(c-8) && c >= 136) { + break + } + v.reset(Op386LEAL8) + v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c - 8))) + v0.AddArg(x) + v.AddArg2(v0, x) + return true + } + // match: (MULLconst [c] x) + // cond: c%3 == 0 && isPowerOfTwo32(c/3) + // result: (SHLLconst [int32(log32(c/3))] (LEAL2 x x)) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(c%3 == 0 && isPowerOfTwo32(c/3)) { + break + } + v.reset(Op386SHLLconst) + v.AuxInt = int32ToAuxInt(int32(log32(c / 3))) + v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + // match: (MULLconst [c] x) + // cond: c%5 == 0 && isPowerOfTwo32(c/5) + // result: (SHLLconst [int32(log32(c/5))] (LEAL4 x x)) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(c%5 == 0 && isPowerOfTwo32(c/5)) { + break + } + v.reset(Op386SHLLconst) + v.AuxInt = int32ToAuxInt(int32(log32(c / 5))) + v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + // match: (MULLconst [c] x) + // cond: c%9 == 0 && isPowerOfTwo32(c/9) + // result: (SHLLconst [int32(log32(c/9))] (LEAL8 x x)) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(c%9 == 0 && isPowerOfTwo32(c/9)) { + break + } + v.reset(Op386SHLLconst) + v.AuxInt = int32ToAuxInt(int32(log32(c / 9))) + v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + // match: (MULLconst [c] (MOVLconst [d])) + // result: (MOVLconst [c*d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != Op386MOVLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(c * d) + return true + } + return false +} +func rewriteValue386_Op386MULLload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MULLload [off1] {sym} val (ADDLconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MULLload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386MULLload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (MULLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MULLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386MULLload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + return false +} +func rewriteValue386_Op386MULSD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (MULSDload x [off] {sym} ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != Op386MOVSDload { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(Op386MULSDload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValue386_Op386MULSDload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MULSDload [off1] {sym} val (ADDLconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MULSDload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386MULSDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (MULSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MULSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386MULSDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + return false +} +func rewriteValue386_Op386MULSS(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (MULSSload x [off] {sym} ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != Op386MOVSSload { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(Op386MULSSload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValue386_Op386MULSSload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MULSSload [off1] {sym} val (ADDLconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MULSSload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386MULSSload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (MULSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MULSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386MULSSload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + return false +} +func rewriteValue386_Op386NEGL(v *Value) bool { + v_0 := v.Args[0] + // match: (NEGL (MOVLconst [c])) + // result: (MOVLconst [-c]) + for { + if v_0.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(-c) + return true + } + return false +} +func rewriteValue386_Op386NOTL(v *Value) bool { + v_0 := v.Args[0] + // match: (NOTL (MOVLconst [c])) + // result: (MOVLconst [^c]) + for { + if v_0.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(^c) + return true + } + return false +} +func rewriteValue386_Op386ORL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ORL x (MOVLconst [c])) + // result: (ORLconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != Op386MOVLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(Op386ORLconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (ORL x l:(MOVLload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (ORLload x [off] {sym} ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != Op386MOVLload { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(Op386ORLload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + // match: (ORL x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValue386_Op386ORLconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ORLconst [c] x) + // cond: c==0 + // result: x + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(c == 0) { + break + } + v.copyOf(x) + return true + } + // match: (ORLconst [c] _) + // cond: c==-1 + // result: (MOVLconst [-1]) + for { + c := auxIntToInt32(v.AuxInt) + if !(c == -1) { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(-1) + return true + } + // match: (ORLconst [c] (MOVLconst [d])) + // result: (MOVLconst [c|d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != Op386MOVLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(c | d) + return true + } + return false +} +func rewriteValue386_Op386ORLconstmodify(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (ORLconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem) + // cond: valoff1.canAdd32(off2) + // result: (ORLconstmodify [valoff1.addOffset32(off2)] {sym} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + mem := v_1 + if !(valoff1.canAdd32(off2)) { + break + } + v.reset(Op386ORLconstmodify) + v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(base, mem) + return true + } + // match: (ORLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem) + // cond: valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (ORLconstmodify [valoff1.addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386ORLconstmodify) + v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValue386_Op386ORLload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (ORLload [off1] {sym} val (ADDLconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (ORLload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386ORLload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (ORLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (ORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386ORLload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + return false +} +func rewriteValue386_Op386ORLmodify(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (ORLmodify [off1] {sym} (ADDLconst [off2] base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (ORLmodify [off1+off2] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386ORLmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (ORLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (ORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386ORLmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + return false +} +func rewriteValue386_Op386ROLB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ROLB x (MOVLconst [c])) + // result: (ROLBconst [int8(c&7)] x) + for { + x := v_0 + if v_1.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(Op386ROLBconst) + v.AuxInt = int8ToAuxInt(int8(c & 7)) + v.AddArg(x) + return true + } + return false +} +func rewriteValue386_Op386ROLBconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ROLBconst [0] x) + // result: x + for { + if auxIntToInt8(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + return false +} +func rewriteValue386_Op386ROLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ROLL x (MOVLconst [c])) + // result: (ROLLconst [c&31] x) + for { + x := v_0 + if v_1.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(Op386ROLLconst) + v.AuxInt = int32ToAuxInt(c & 31) + v.AddArg(x) + return true + } + return false +} +func rewriteValue386_Op386ROLLconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ROLLconst [0] x) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + return false +} +func rewriteValue386_Op386ROLW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ROLW x (MOVLconst [c])) + // result: (ROLWconst [int16(c&15)] x) + for { + x := v_0 + if v_1.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(Op386ROLWconst) + v.AuxInt = int16ToAuxInt(int16(c & 15)) + v.AddArg(x) + return true + } + return false +} +func rewriteValue386_Op386ROLWconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ROLWconst [0] x) + // result: x + for { + if auxIntToInt16(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + return false +} +func rewriteValue386_Op386SARB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SARB x (MOVLconst [c])) + // result: (SARBconst [int8(min(int64(c&31),7))] x) + for { + x := v_0 + if v_1.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(Op386SARBconst) + v.AuxInt = int8ToAuxInt(int8(min(int64(c&31), 7))) + v.AddArg(x) + return true + } + return false +} +func rewriteValue386_Op386SARBconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SARBconst x [0]) + // result: x + for { + if auxIntToInt8(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (SARBconst [c] (MOVLconst [d])) + // result: (MOVLconst [d>>uint64(c)]) + for { + c := auxIntToInt8(v.AuxInt) + if v_0.Op != Op386MOVLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(d >> uint64(c)) + return true + } + return false +} +func rewriteValue386_Op386SARL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SARL x (MOVLconst [c])) + // result: (SARLconst [c&31] x) + for { + x := v_0 + if v_1.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(Op386SARLconst) + v.AuxInt = int32ToAuxInt(c & 31) + v.AddArg(x) + return true + } + // match: (SARL x (ANDLconst [31] y)) + // result: (SARL x y) + for { + x := v_0 + if v_1.Op != Op386ANDLconst || auxIntToInt32(v_1.AuxInt) != 31 { + break + } + y := v_1.Args[0] + v.reset(Op386SARL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_Op386SARLconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SARLconst x [0]) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (SARLconst [c] (MOVLconst [d])) + // result: (MOVLconst [d>>uint64(c)]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != Op386MOVLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(d >> uint64(c)) + return true + } + return false +} +func rewriteValue386_Op386SARW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SARW x (MOVLconst [c])) + // result: (SARWconst [int16(min(int64(c&31),15))] x) + for { + x := v_0 + if v_1.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(Op386SARWconst) + v.AuxInt = int16ToAuxInt(int16(min(int64(c&31), 15))) + v.AddArg(x) + return true + } + return false +} +func rewriteValue386_Op386SARWconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SARWconst x [0]) + // result: x + for { + if auxIntToInt16(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (SARWconst [c] (MOVLconst [d])) + // result: (MOVLconst [d>>uint64(c)]) + for { + c := auxIntToInt16(v.AuxInt) + if v_0.Op != Op386MOVLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(d >> uint64(c)) + return true + } + return false +} +func rewriteValue386_Op386SBBL(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SBBL x (MOVLconst [c]) f) + // result: (SBBLconst [c] x f) + for { + x := v_0 + if v_1.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + f := v_2 + v.reset(Op386SBBLconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, f) + return true + } + return false +} +func rewriteValue386_Op386SBBLcarrymask(v *Value) bool { + v_0 := v.Args[0] + // match: (SBBLcarrymask (FlagEQ)) + // result: (MOVLconst [0]) + for { + if v_0.Op != Op386FlagEQ { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SBBLcarrymask (FlagLT_ULT)) + // result: (MOVLconst [-1]) + for { + if v_0.Op != Op386FlagLT_ULT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(-1) + return true + } + // match: (SBBLcarrymask (FlagLT_UGT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != Op386FlagLT_UGT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SBBLcarrymask (FlagGT_ULT)) + // result: (MOVLconst [-1]) + for { + if v_0.Op != Op386FlagGT_ULT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(-1) + return true + } + // match: (SBBLcarrymask (FlagGT_UGT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != Op386FlagGT_UGT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValue386_Op386SETA(v *Value) bool { + v_0 := v.Args[0] + // match: (SETA (InvertFlags x)) + // result: (SETB x) + for { + if v_0.Op != Op386InvertFlags { + break + } + x := v_0.Args[0] + v.reset(Op386SETB) + v.AddArg(x) + return true + } + // match: (SETA (FlagEQ)) + // result: (MOVLconst [0]) + for { + if v_0.Op != Op386FlagEQ { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETA (FlagLT_ULT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != Op386FlagLT_ULT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETA (FlagLT_UGT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != Op386FlagLT_UGT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETA (FlagGT_ULT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != Op386FlagGT_ULT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETA (FlagGT_UGT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != Op386FlagGT_UGT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + return false +} +func rewriteValue386_Op386SETAE(v *Value) bool { + v_0 := v.Args[0] + // match: (SETAE (InvertFlags x)) + // result: (SETBE x) + for { + if v_0.Op != Op386InvertFlags { + break + } + x := v_0.Args[0] + v.reset(Op386SETBE) + v.AddArg(x) + return true + } + // match: (SETAE (FlagEQ)) + // result: (MOVLconst [1]) + for { + if v_0.Op != Op386FlagEQ { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETAE (FlagLT_ULT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != Op386FlagLT_ULT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETAE (FlagLT_UGT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != Op386FlagLT_UGT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETAE (FlagGT_ULT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != Op386FlagGT_ULT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETAE (FlagGT_UGT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != Op386FlagGT_UGT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + return false +} +func rewriteValue386_Op386SETB(v *Value) bool { + v_0 := v.Args[0] + // match: (SETB (InvertFlags x)) + // result: (SETA x) + for { + if v_0.Op != Op386InvertFlags { + break + } + x := v_0.Args[0] + v.reset(Op386SETA) + v.AddArg(x) + return true + } + // match: (SETB (FlagEQ)) + // result: (MOVLconst [0]) + for { + if v_0.Op != Op386FlagEQ { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETB (FlagLT_ULT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != Op386FlagLT_ULT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETB (FlagLT_UGT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != Op386FlagLT_UGT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETB (FlagGT_ULT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != Op386FlagGT_ULT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETB (FlagGT_UGT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != Op386FlagGT_UGT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValue386_Op386SETBE(v *Value) bool { + v_0 := v.Args[0] + // match: (SETBE (InvertFlags x)) + // result: (SETAE x) + for { + if v_0.Op != Op386InvertFlags { + break + } + x := v_0.Args[0] + v.reset(Op386SETAE) + v.AddArg(x) + return true + } + // match: (SETBE (FlagEQ)) + // result: (MOVLconst [1]) + for { + if v_0.Op != Op386FlagEQ { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETBE (FlagLT_ULT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != Op386FlagLT_ULT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETBE (FlagLT_UGT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != Op386FlagLT_UGT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETBE (FlagGT_ULT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != Op386FlagGT_ULT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETBE (FlagGT_UGT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != Op386FlagGT_UGT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValue386_Op386SETEQ(v *Value) bool { + v_0 := v.Args[0] + // match: (SETEQ (InvertFlags x)) + // result: (SETEQ x) + for { + if v_0.Op != Op386InvertFlags { + break + } + x := v_0.Args[0] + v.reset(Op386SETEQ) + v.AddArg(x) + return true + } + // match: (SETEQ (FlagEQ)) + // result: (MOVLconst [1]) + for { + if v_0.Op != Op386FlagEQ { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETEQ (FlagLT_ULT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != Op386FlagLT_ULT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETEQ (FlagLT_UGT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != Op386FlagLT_UGT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETEQ (FlagGT_ULT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != Op386FlagGT_ULT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETEQ (FlagGT_UGT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != Op386FlagGT_UGT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValue386_Op386SETG(v *Value) bool { + v_0 := v.Args[0] + // match: (SETG (InvertFlags x)) + // result: (SETL x) + for { + if v_0.Op != Op386InvertFlags { + break + } + x := v_0.Args[0] + v.reset(Op386SETL) + v.AddArg(x) + return true + } + // match: (SETG (FlagEQ)) + // result: (MOVLconst [0]) + for { + if v_0.Op != Op386FlagEQ { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETG (FlagLT_ULT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != Op386FlagLT_ULT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETG (FlagLT_UGT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != Op386FlagLT_UGT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETG (FlagGT_ULT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != Op386FlagGT_ULT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETG (FlagGT_UGT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != Op386FlagGT_UGT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + return false +} +func rewriteValue386_Op386SETGE(v *Value) bool { + v_0 := v.Args[0] + // match: (SETGE (InvertFlags x)) + // result: (SETLE x) + for { + if v_0.Op != Op386InvertFlags { + break + } + x := v_0.Args[0] + v.reset(Op386SETLE) + v.AddArg(x) + return true + } + // match: (SETGE (FlagEQ)) + // result: (MOVLconst [1]) + for { + if v_0.Op != Op386FlagEQ { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETGE (FlagLT_ULT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != Op386FlagLT_ULT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETGE (FlagLT_UGT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != Op386FlagLT_UGT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETGE (FlagGT_ULT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != Op386FlagGT_ULT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETGE (FlagGT_UGT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != Op386FlagGT_UGT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + return false +} +func rewriteValue386_Op386SETL(v *Value) bool { + v_0 := v.Args[0] + // match: (SETL (InvertFlags x)) + // result: (SETG x) + for { + if v_0.Op != Op386InvertFlags { + break + } + x := v_0.Args[0] + v.reset(Op386SETG) + v.AddArg(x) + return true + } + // match: (SETL (FlagEQ)) + // result: (MOVLconst [0]) + for { + if v_0.Op != Op386FlagEQ { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETL (FlagLT_ULT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != Op386FlagLT_ULT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETL (FlagLT_UGT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != Op386FlagLT_UGT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETL (FlagGT_ULT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != Op386FlagGT_ULT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETL (FlagGT_UGT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != Op386FlagGT_UGT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValue386_Op386SETLE(v *Value) bool { + v_0 := v.Args[0] + // match: (SETLE (InvertFlags x)) + // result: (SETGE x) + for { + if v_0.Op != Op386InvertFlags { + break + } + x := v_0.Args[0] + v.reset(Op386SETGE) + v.AddArg(x) + return true + } + // match: (SETLE (FlagEQ)) + // result: (MOVLconst [1]) + for { + if v_0.Op != Op386FlagEQ { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETLE (FlagLT_ULT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != Op386FlagLT_ULT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETLE (FlagLT_UGT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != Op386FlagLT_UGT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETLE (FlagGT_ULT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != Op386FlagGT_ULT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETLE (FlagGT_UGT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != Op386FlagGT_UGT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValue386_Op386SETNE(v *Value) bool { + v_0 := v.Args[0] + // match: (SETNE (InvertFlags x)) + // result: (SETNE x) + for { + if v_0.Op != Op386InvertFlags { + break + } + x := v_0.Args[0] + v.reset(Op386SETNE) + v.AddArg(x) + return true + } + // match: (SETNE (FlagEQ)) + // result: (MOVLconst [0]) + for { + if v_0.Op != Op386FlagEQ { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETNE (FlagLT_ULT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != Op386FlagLT_ULT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETNE (FlagLT_UGT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != Op386FlagLT_UGT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETNE (FlagGT_ULT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != Op386FlagGT_ULT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETNE (FlagGT_UGT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != Op386FlagGT_UGT { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + return false +} +func rewriteValue386_Op386SHLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SHLL x (MOVLconst [c])) + // result: (SHLLconst [c&31] x) + for { + x := v_0 + if v_1.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(Op386SHLLconst) + v.AuxInt = int32ToAuxInt(c & 31) + v.AddArg(x) + return true + } + // match: (SHLL x (ANDLconst [31] y)) + // result: (SHLL x y) + for { + x := v_0 + if v_1.Op != Op386ANDLconst || auxIntToInt32(v_1.AuxInt) != 31 { + break + } + y := v_1.Args[0] + v.reset(Op386SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_Op386SHLLconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SHLLconst x [0]) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + return false +} +func rewriteValue386_Op386SHRB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SHRB x (MOVLconst [c])) + // cond: c&31 < 8 + // result: (SHRBconst [int8(c&31)] x) + for { + x := v_0 + if v_1.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(c&31 < 8) { + break + } + v.reset(Op386SHRBconst) + v.AuxInt = int8ToAuxInt(int8(c & 31)) + v.AddArg(x) + return true + } + // match: (SHRB _ (MOVLconst [c])) + // cond: c&31 >= 8 + // result: (MOVLconst [0]) + for { + if v_1.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(c&31 >= 8) { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValue386_Op386SHRBconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SHRBconst x [0]) + // result: x + for { + if auxIntToInt8(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + return false +} +func rewriteValue386_Op386SHRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SHRL x (MOVLconst [c])) + // result: (SHRLconst [c&31] x) + for { + x := v_0 + if v_1.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(Op386SHRLconst) + v.AuxInt = int32ToAuxInt(c & 31) + v.AddArg(x) + return true + } + // match: (SHRL x (ANDLconst [31] y)) + // result: (SHRL x y) + for { + x := v_0 + if v_1.Op != Op386ANDLconst || auxIntToInt32(v_1.AuxInt) != 31 { + break + } + y := v_1.Args[0] + v.reset(Op386SHRL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_Op386SHRLconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SHRLconst x [0]) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + return false +} +func rewriteValue386_Op386SHRW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SHRW x (MOVLconst [c])) + // cond: c&31 < 16 + // result: (SHRWconst [int16(c&31)] x) + for { + x := v_0 + if v_1.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(c&31 < 16) { + break + } + v.reset(Op386SHRWconst) + v.AuxInt = int16ToAuxInt(int16(c & 31)) + v.AddArg(x) + return true + } + // match: (SHRW _ (MOVLconst [c])) + // cond: c&31 >= 16 + // result: (MOVLconst [0]) + for { + if v_1.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(c&31 >= 16) { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValue386_Op386SHRWconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SHRWconst x [0]) + // result: x + for { + if auxIntToInt16(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + return false +} +func rewriteValue386_Op386SUBL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SUBL x (MOVLconst [c])) + // result: (SUBLconst x [c]) + for { + x := v_0 + if v_1.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(Op386SUBLconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (SUBL (MOVLconst [c]) x) + // result: (NEGL (SUBLconst x [c])) + for { + if v_0.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(Op386NEGL) + v0 := b.NewValue0(v.Pos, Op386SUBLconst, v.Type) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (SUBL x l:(MOVLload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (SUBLload x [off] {sym} ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != Op386MOVLload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + break + } + v.reset(Op386SUBLload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + // match: (SUBL x x) + // result: (MOVLconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValue386_Op386SUBLcarry(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SUBLcarry x (MOVLconst [c])) + // result: (SUBLconstcarry [c] x) + for { + x := v_0 + if v_1.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(Op386SUBLconstcarry) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + return false +} +func rewriteValue386_Op386SUBLconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SUBLconst [c] x) + // cond: c==0 + // result: x + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(c == 0) { + break + } + v.copyOf(x) + return true + } + // match: (SUBLconst [c] x) + // result: (ADDLconst [-c] x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + v.reset(Op386ADDLconst) + v.AuxInt = int32ToAuxInt(-c) + v.AddArg(x) + return true + } +} +func rewriteValue386_Op386SUBLload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (SUBLload [off1] {sym} val (ADDLconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (SUBLload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386SUBLload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (SUBLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (SUBLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386SUBLload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + return false +} +func rewriteValue386_Op386SUBLmodify(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (SUBLmodify [off1] {sym} (ADDLconst [off2] base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (SUBLmodify [off1+off2] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386SUBLmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (SUBLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (SUBLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386SUBLmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + return false +} +func rewriteValue386_Op386SUBSD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (SUBSDload x [off] {sym} ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != Op386MOVSDload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + break + } + v.reset(Op386SUBSDload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValue386_Op386SUBSDload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (SUBSDload [off1] {sym} val (ADDLconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (SUBSDload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386SUBSDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (SUBSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (SUBSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386SUBSDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + return false +} +func rewriteValue386_Op386SUBSS(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (SUBSSload x [off] {sym} ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != Op386MOVSSload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + break + } + v.reset(Op386SUBSSload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValue386_Op386SUBSSload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (SUBSSload [off1] {sym} val (ADDLconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (SUBSSload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386SUBSSload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (SUBSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (SUBSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386SUBSSload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + return false +} +func rewriteValue386_Op386XORL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XORL x (MOVLconst [c])) + // result: (XORLconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != Op386MOVLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(Op386XORLconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (XORL x l:(MOVLload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (XORLload x [off] {sym} ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != Op386MOVLload { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(Op386XORLload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + // match: (XORL x x) + // result: (MOVLconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValue386_Op386XORLconst(v *Value) bool { + v_0 := v.Args[0] + // match: (XORLconst [c] (XORLconst [d] x)) + // result: (XORLconst [c ^ d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != Op386XORLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(Op386XORLconst) + v.AuxInt = int32ToAuxInt(c ^ d) + v.AddArg(x) + return true + } + // match: (XORLconst [c] x) + // cond: c==0 + // result: x + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(c == 0) { + break + } + v.copyOf(x) + return true + } + // match: (XORLconst [c] (MOVLconst [d])) + // result: (MOVLconst [c^d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != Op386MOVLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(c ^ d) + return true + } + return false +} +func rewriteValue386_Op386XORLconstmodify(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (XORLconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem) + // cond: valoff1.canAdd32(off2) + // result: (XORLconstmodify [valoff1.addOffset32(off2)] {sym} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + mem := v_1 + if !(valoff1.canAdd32(off2)) { + break + } + v.reset(Op386XORLconstmodify) + v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(base, mem) + return true + } + // match: (XORLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem) + // cond: valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (XORLconstmodify [valoff1.addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386XORLconstmodify) + v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValue386_Op386XORLload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (XORLload [off1] {sym} val (ADDLconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (XORLload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386XORLload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (XORLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (XORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386XORLload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + return false +} +func rewriteValue386_Op386XORLmodify(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (XORLmodify [off1] {sym} (ADDLconst [off2] base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (XORLmodify [off1+off2] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != Op386ADDLconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(Op386XORLmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (XORLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (XORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != Op386LEAL { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386XORLmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + return false +} +func rewriteValue386_OpAddr(v *Value) bool { + v_0 := v.Args[0] + // match: (Addr {sym} base) + // result: (LEAL {sym} base) + for { + sym := auxToSym(v.Aux) + base := v_0 + v.reset(Op386LEAL) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } +} +func rewriteValue386_OpBswap16(v *Value) bool { + v_0 := v.Args[0] + // match: (Bswap16 x) + // result: (ROLWconst [8] x) + for { + x := v_0 + v.reset(Op386ROLWconst) + v.AuxInt = int16ToAuxInt(8) + v.AddArg(x) + return true + } +} +func rewriteValue386_OpConst16(v *Value) bool { + // match: (Const16 [c]) + // result: (MOVLconst [int32(c)]) + for { + c := auxIntToInt16(v.AuxInt) + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(int32(c)) + return true + } +} +func rewriteValue386_OpConst8(v *Value) bool { + // match: (Const8 [c]) + // result: (MOVLconst [int32(c)]) + for { + c := auxIntToInt8(v.AuxInt) + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(int32(c)) + return true + } +} +func rewriteValue386_OpConstBool(v *Value) bool { + // match: (ConstBool [c]) + // result: (MOVLconst [b2i32(c)]) + for { + c := auxIntToBool(v.AuxInt) + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(b2i32(c)) + return true + } +} +func rewriteValue386_OpConstNil(v *Value) bool { + // match: (ConstNil) + // result: (MOVLconst [0]) + for { + v.reset(Op386MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } +} +func rewriteValue386_OpCtz16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz16 x) + // result: (BSFL (ORLconst [0x10000] x)) + for { + x := v_0 + v.reset(Op386BSFL) + v0 := b.NewValue0(v.Pos, Op386ORLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(0x10000) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpCtz8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz8 x) + // result: (BSFL (ORLconst [0x100] x)) + for { + x := v_0 + v.reset(Op386BSFL) + v0 := b.NewValue0(v.Pos, Op386ORLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(0x100) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpDiv8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8 x y) + // result: (DIVW (SignExt8to16 x) (SignExt8to16 y)) + for { + x := v_0 + y := v_1 + v.reset(Op386DIVW) + v0 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValue386_OpDiv8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8u x y) + // result: (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)) + for { + x := v_0 + y := v_1 + v.reset(Op386DIVWU) + v0 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValue386_OpEq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq16 x y) + // result: (SETEQ (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETEQ) + v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpEq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq32 x y) + // result: (SETEQ (CMPL x y)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETEQ) + v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpEq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq32F x y) + // result: (SETEQF (UCOMISS x y)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETEQF) + v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpEq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq64F x y) + // result: (SETEQF (UCOMISD x y)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETEQF) + v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpEq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq8 x y) + // result: (SETEQ (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETEQ) + v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpEqB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (EqB x y) + // result: (SETEQ (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETEQ) + v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpEqPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (EqPtr x y) + // result: (SETEQ (CMPL x y)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETEQ) + v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpIsInBounds(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (IsInBounds idx len) + // result: (SETB (CMPL idx len)) + for { + idx := v_0 + len := v_1 + v.reset(Op386SETB) + v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) + v0.AddArg2(idx, len) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpIsNonNil(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (IsNonNil p) + // result: (SETNE (TESTL p p)) + for { + p := v_0 + v.reset(Op386SETNE) + v0 := b.NewValue0(v.Pos, Op386TESTL, types.TypeFlags) + v0.AddArg2(p, p) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpIsSliceInBounds(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (IsSliceInBounds idx len) + // result: (SETBE (CMPL idx len)) + for { + idx := v_0 + len := v_1 + v.reset(Op386SETBE) + v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) + v0.AddArg2(idx, len) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpLeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq16 x y) + // result: (SETLE (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETLE) + v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpLeq16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq16U x y) + // result: (SETBE (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETBE) + v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpLeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq32 x y) + // result: (SETLE (CMPL x y)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETLE) + v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpLeq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq32F x y) + // result: (SETGEF (UCOMISS y x)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETGEF) + v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpLeq32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq32U x y) + // result: (SETBE (CMPL x y)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETBE) + v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpLeq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq64F x y) + // result: (SETGEF (UCOMISD y x)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETGEF) + v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpLeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq8 x y) + // result: (SETLE (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETLE) + v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpLeq8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq8U x y) + // result: (SETBE (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETBE) + v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpLess16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less16 x y) + // result: (SETL (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETL) + v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpLess16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less16U x y) + // result: (SETB (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETB) + v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpLess32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less32 x y) + // result: (SETL (CMPL x y)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETL) + v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpLess32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less32F x y) + // result: (SETGF (UCOMISS y x)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETGF) + v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpLess32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less32U x y) + // result: (SETB (CMPL x y)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETB) + v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpLess64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less64F x y) + // result: (SETGF (UCOMISD y x)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETGF) + v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpLess8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less8 x y) + // result: (SETL (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETL) + v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpLess8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less8U x y) + // result: (SETB (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETB) + v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpLoad(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Load ptr mem) + // cond: (is32BitInt(t) || isPtr(t)) + // result: (MOVLload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitInt(t) || isPtr(t)) { + break + } + v.reset(Op386MOVLload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is16BitInt(t) + // result: (MOVWload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is16BitInt(t)) { + break + } + v.reset(Op386MOVWload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (t.IsBoolean() || is8BitInt(t)) + // result: (MOVBload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsBoolean() || is8BitInt(t)) { + break + } + v.reset(Op386MOVBload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitFloat(t) + // result: (MOVSSload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitFloat(t)) { + break + } + v.reset(Op386MOVSSload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is64BitFloat(t) + // result: (MOVSDload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitFloat(t)) { + break + } + v.reset(Op386MOVSDload) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValue386_OpLocalAddr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LocalAddr {sym} base mem) + // cond: t.Elem().HasPointers() + // result: (LEAL {sym} (SPanchored base mem)) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + mem := v_1 + if !(t.Elem().HasPointers()) { + break + } + v.reset(Op386LEAL) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) + v0.AddArg2(base, mem) + v.AddArg(v0) + return true + } + // match: (LocalAddr {sym} base _) + // cond: !t.Elem().HasPointers() + // result: (LEAL {sym} base) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + if !(!t.Elem().HasPointers()) { + break + } + v.reset(Op386LEAL) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } + return false +} +func rewriteValue386_OpLsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh16x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(Op386ANDL) + v0 := b.NewValue0(v.Pos, Op386SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHLL) + v.Type = t + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_OpLsh16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh16x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(Op386ANDL) + v0 := b.NewValue0(v.Pos, Op386SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHLL) + v.Type = t + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_OpLsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Lsh16x64 x (Const64 [c])) + // cond: uint64(c) < 16 + // result: (SHLLconst x [int32(c)]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 16) { + break + } + v.reset(Op386SHLLconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + // match: (Lsh16x64 _ (Const64 [c])) + // cond: uint64(c) >= 16 + // result: (Const16 [0]) + for { + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 16) { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(0) + return true + } + return false +} +func rewriteValue386_OpLsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh16x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(Op386ANDL) + v0 := b.NewValue0(v.Pos, Op386SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHLL) + v.Type = t + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_OpLsh32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh32x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(Op386ANDL) + v0 := b.NewValue0(v.Pos, Op386SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHLL) + v.Type = t + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_OpLsh32x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh32x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(Op386ANDL) + v0 := b.NewValue0(v.Pos, Op386SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHLL) + v.Type = t + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_OpLsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Lsh32x64 x (Const64 [c])) + // cond: uint64(c) < 32 + // result: (SHLLconst x [int32(c)]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 32) { + break + } + v.reset(Op386SHLLconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + // match: (Lsh32x64 _ (Const64 [c])) + // cond: uint64(c) >= 32 + // result: (Const32 [0]) + for { + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 32) { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValue386_OpLsh32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh32x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(Op386ANDL) + v0 := b.NewValue0(v.Pos, Op386SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHLL) + v.Type = t + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_OpLsh8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh8x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(Op386ANDL) + v0 := b.NewValue0(v.Pos, Op386SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHLL) + v.Type = t + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_OpLsh8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh8x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(Op386ANDL) + v0 := b.NewValue0(v.Pos, Op386SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHLL) + v.Type = t + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_OpLsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Lsh8x64 x (Const64 [c])) + // cond: uint64(c) < 8 + // result: (SHLLconst x [int32(c)]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 8) { + break + } + v.reset(Op386SHLLconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + // match: (Lsh8x64 _ (Const64 [c])) + // cond: uint64(c) >= 8 + // result: (Const8 [0]) + for { + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 8) { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(0) + return true + } + return false +} +func rewriteValue386_OpLsh8x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh8x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(Op386ANDL) + v0 := b.NewValue0(v.Pos, Op386SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHLL) + v.Type = t + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_OpMod8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8 x y) + // result: (MODW (SignExt8to16 x) (SignExt8to16 y)) + for { + x := v_0 + y := v_1 + v.reset(Op386MODW) + v0 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValue386_OpMod8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8u x y) + // result: (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y)) + for { + x := v_0 + y := v_1 + v.reset(Op386MODWU) + v0 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValue386_OpMove(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Move [0] _ _ mem) + // result: mem + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_2 + v.copyOf(mem) + return true + } + // match: (Move [1] dst src mem) + // result: (MOVBstore dst (MOVBload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(Op386MOVBstore) + v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [2] dst src mem) + // result: (MOVWstore dst (MOVWload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(Op386MOVWstore) + v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [4] dst src mem) + // result: (MOVLstore dst (MOVLload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(Op386MOVLstore) + v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [3] dst src mem) + // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(Op386MOVBstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(2) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, Op386MOVWstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [5] dst src mem) + // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 5 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(Op386MOVBstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [6] dst src mem) + // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 6 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(Op386MOVWstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [7] dst src mem) + // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 7 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(Op386MOVLstore) + v.AuxInt = int32ToAuxInt(3) + v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(3) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [8] dst src mem) + // result: (MOVLstore [4] dst (MOVLload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(Op386MOVLstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [s] dst src mem) + // cond: s > 8 && s%4 != 0 + // result: (Move [s-s%4] (ADDLconst dst [int32(s%4)]) (ADDLconst src [int32(s%4)]) (MOVLstore dst (MOVLload src mem) mem)) + for { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 8 && s%4 != 0) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(s - s%4) + v0 := b.NewValue0(v.Pos, Op386ADDLconst, dst.Type) + v0.AuxInt = int32ToAuxInt(int32(s % 4)) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, Op386ADDLconst, src.Type) + v1.AuxInt = int32ToAuxInt(int32(s % 4)) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem) + v3 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) + v3.AddArg2(src, mem) + v2.AddArg3(dst, v3, mem) + v.AddArg3(v0, v1, v2) + return true + } + // match: (Move [s] dst src mem) + // cond: s > 8 && s <= 4*128 && s%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s) + // result: (DUFFCOPY [10*(128-s/4)] dst src mem) + for { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 8 && s <= 4*128 && s%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) { + break + } + v.reset(Op386DUFFCOPY) + v.AuxInt = int64ToAuxInt(10 * (128 - s/4)) + v.AddArg3(dst, src, mem) + return true + } + // match: (Move [s] dst src mem) + // cond: (s > 4*128 || config.noDuffDevice) && s%4 == 0 && logLargeCopy(v, s) + // result: (REPMOVSL dst src (MOVLconst [int32(s/4)]) mem) + for { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !((s > 4*128 || config.noDuffDevice) && s%4 == 0 && logLargeCopy(v, s)) { + break + } + v.reset(Op386REPMOVSL) + v0 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(int32(s / 4)) + v.AddArg4(dst, src, v0, mem) + return true + } + return false +} +func rewriteValue386_OpNeg32F(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neg32F x) + // result: (PXOR x (MOVSSconst [float32(math.Copysign(0, -1))])) + for { + x := v_0 + v.reset(Op386PXOR) + v0 := b.NewValue0(v.Pos, Op386MOVSSconst, typ.Float32) + v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1))) + v.AddArg2(x, v0) + return true + } +} +func rewriteValue386_OpNeg64F(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neg64F x) + // result: (PXOR x (MOVSDconst [math.Copysign(0, -1)])) + for { + x := v_0 + v.reset(Op386PXOR) + v0 := b.NewValue0(v.Pos, Op386MOVSDconst, typ.Float64) + v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1)) + v.AddArg2(x, v0) + return true + } +} +func rewriteValue386_OpNeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq16 x y) + // result: (SETNE (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETNE) + v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpNeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq32 x y) + // result: (SETNE (CMPL x y)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETNE) + v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpNeq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq32F x y) + // result: (SETNEF (UCOMISS x y)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETNEF) + v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpNeq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq64F x y) + // result: (SETNEF (UCOMISD x y)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETNEF) + v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpNeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq8 x y) + // result: (SETNE (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETNE) + v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpNeqB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (NeqB x y) + // result: (SETNE (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETNE) + v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpNeqPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (NeqPtr x y) + // result: (SETNE (CMPL x y)) + for { + x := v_0 + y := v_1 + v.reset(Op386SETNE) + v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpNot(v *Value) bool { + v_0 := v.Args[0] + // match: (Not x) + // result: (XORLconst [1] x) + for { + x := v_0 + v.reset(Op386XORLconst) + v.AuxInt = int32ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValue386_OpOffPtr(v *Value) bool { + v_0 := v.Args[0] + // match: (OffPtr [off] ptr) + // result: (ADDLconst [int32(off)] ptr) + for { + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + v.reset(Op386ADDLconst) + v.AuxInt = int32ToAuxInt(int32(off)) + v.AddArg(ptr) + return true + } +} +func rewriteValue386_OpPanicBounds(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 0 + // result: (LoweredPanicBoundsA [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 0) { + break + } + v.reset(Op386LoweredPanicBoundsA) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 1 + // result: (LoweredPanicBoundsB [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 1) { + break + } + v.reset(Op386LoweredPanicBoundsB) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 2 + // result: (LoweredPanicBoundsC [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 2) { + break + } + v.reset(Op386LoweredPanicBoundsC) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + return false +} +func rewriteValue386_OpPanicExtend(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PanicExtend [kind] hi lo y mem) + // cond: boundsABI(kind) == 0 + // result: (LoweredPanicExtendA [kind] hi lo y mem) + for { + kind := auxIntToInt64(v.AuxInt) + hi := v_0 + lo := v_1 + y := v_2 + mem := v_3 + if !(boundsABI(kind) == 0) { + break + } + v.reset(Op386LoweredPanicExtendA) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg4(hi, lo, y, mem) + return true + } + // match: (PanicExtend [kind] hi lo y mem) + // cond: boundsABI(kind) == 1 + // result: (LoweredPanicExtendB [kind] hi lo y mem) + for { + kind := auxIntToInt64(v.AuxInt) + hi := v_0 + lo := v_1 + y := v_2 + mem := v_3 + if !(boundsABI(kind) == 1) { + break + } + v.reset(Op386LoweredPanicExtendB) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg4(hi, lo, y, mem) + return true + } + // match: (PanicExtend [kind] hi lo y mem) + // cond: boundsABI(kind) == 2 + // result: (LoweredPanicExtendC [kind] hi lo y mem) + for { + kind := auxIntToInt64(v.AuxInt) + hi := v_0 + lo := v_1 + y := v_2 + mem := v_3 + if !(boundsABI(kind) == 2) { + break + } + v.reset(Op386LoweredPanicExtendC) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg4(hi, lo, y, mem) + return true + } + return false +} +func rewriteValue386_OpRsh16Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh16Ux16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPWconst y [16]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(Op386ANDL) + v0 := b.NewValue0(v.Pos, Op386SHRW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(16) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh16Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SHRW x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHRW) + v.Type = t + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_OpRsh16Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh16Ux32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPLconst y [16]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(Op386ANDL) + v0 := b.NewValue0(v.Pos, Op386SHRW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(16) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh16Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SHRW x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHRW) + v.Type = t + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_OpRsh16Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Rsh16Ux64 x (Const64 [c])) + // cond: uint64(c) < 16 + // result: (SHRWconst x [int16(c)]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 16) { + break + } + v.reset(Op386SHRWconst) + v.AuxInt = int16ToAuxInt(int16(c)) + v.AddArg(x) + return true + } + // match: (Rsh16Ux64 _ (Const64 [c])) + // cond: uint64(c) >= 16 + // result: (Const16 [0]) + for { + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 16) { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(0) + return true + } + return false +} +func rewriteValue386_OpRsh16Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh16Ux8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPBconst y [16]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(Op386ANDL) + v0 := b.NewValue0(v.Pos, Op386SHRW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(16) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh16Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SHRW x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHRW) + v.Type = t + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_OpRsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh16x16 x y) + // cond: !shiftIsBounded(v) + // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [16]))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(Op386SARW) + v.Type = t + v0 := b.NewValue0(v.Pos, Op386ORL, y.Type) + v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type) + v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags) + v3.AuxInt = int16ToAuxInt(16) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SARW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SARW) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_OpRsh16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh16x32 x y) + // cond: !shiftIsBounded(v) + // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [16]))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(Op386SARW) + v.Type = t + v0 := b.NewValue0(v.Pos, Op386ORL, y.Type) + v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type) + v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(16) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SARW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SARW) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_OpRsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Rsh16x64 x (Const64 [c])) + // cond: uint64(c) < 16 + // result: (SARWconst x [int16(c)]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 16) { + break + } + v.reset(Op386SARWconst) + v.AuxInt = int16ToAuxInt(int16(c)) + v.AddArg(x) + return true + } + // match: (Rsh16x64 x (Const64 [c])) + // cond: uint64(c) >= 16 + // result: (SARWconst x [15]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 16) { + break + } + v.reset(Op386SARWconst) + v.AuxInt = int16ToAuxInt(15) + v.AddArg(x) + return true + } + return false +} +func rewriteValue386_OpRsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh16x8 x y) + // cond: !shiftIsBounded(v) + // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [16]))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(Op386SARW) + v.Type = t + v0 := b.NewValue0(v.Pos, Op386ORL, y.Type) + v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type) + v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) + v3.AuxInt = int8ToAuxInt(16) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SARW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SARW) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_OpRsh32Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh32Ux16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPWconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(Op386ANDL) + v0 := b.NewValue0(v.Pos, Op386SHRL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh32Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SHRL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHRL) + v.Type = t + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_OpRsh32Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh32Ux32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPLconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(Op386ANDL) + v0 := b.NewValue0(v.Pos, Op386SHRL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh32Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SHRL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHRL) + v.Type = t + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_OpRsh32Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Rsh32Ux64 x (Const64 [c])) + // cond: uint64(c) < 32 + // result: (SHRLconst x [int32(c)]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 32) { + break + } + v.reset(Op386SHRLconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + // match: (Rsh32Ux64 _ (Const64 [c])) + // cond: uint64(c) >= 32 + // result: (Const32 [0]) + for { + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 32) { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValue386_OpRsh32Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh32Ux8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPBconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(Op386ANDL) + v0 := b.NewValue0(v.Pos, Op386SHRL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh32Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SHRL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHRL) + v.Type = t + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_OpRsh32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh32x16 x y) + // cond: !shiftIsBounded(v) + // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [32]))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(Op386SARL) + v.Type = t + v0 := b.NewValue0(v.Pos, Op386ORL, y.Type) + v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type) + v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags) + v3.AuxInt = int16ToAuxInt(32) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SARL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SARL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_OpRsh32x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh32x32 x y) + // cond: !shiftIsBounded(v) + // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [32]))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(Op386SARL) + v.Type = t + v0 := b.NewValue0(v.Pos, Op386ORL, y.Type) + v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type) + v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(32) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SARL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SARL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_OpRsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Rsh32x64 x (Const64 [c])) + // cond: uint64(c) < 32 + // result: (SARLconst x [int32(c)]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 32) { + break + } + v.reset(Op386SARLconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + // match: (Rsh32x64 x (Const64 [c])) + // cond: uint64(c) >= 32 + // result: (SARLconst x [31]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 32) { + break + } + v.reset(Op386SARLconst) + v.AuxInt = int32ToAuxInt(31) + v.AddArg(x) + return true + } + return false +} +func rewriteValue386_OpRsh32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh32x8 x y) + // cond: !shiftIsBounded(v) + // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [32]))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(Op386SARL) + v.Type = t + v0 := b.NewValue0(v.Pos, Op386ORL, y.Type) + v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type) + v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) + v3.AuxInt = int8ToAuxInt(32) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SARL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SARL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_OpRsh8Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh8Ux16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPWconst y [8]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(Op386ANDL) + v0 := b.NewValue0(v.Pos, Op386SHRB, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(8) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh8Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SHRB x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHRB) + v.Type = t + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_OpRsh8Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh8Ux32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPLconst y [8]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(Op386ANDL) + v0 := b.NewValue0(v.Pos, Op386SHRB, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(8) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh8Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SHRB x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHRB) + v.Type = t + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_OpRsh8Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Rsh8Ux64 x (Const64 [c])) + // cond: uint64(c) < 8 + // result: (SHRBconst x [int8(c)]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 8) { + break + } + v.reset(Op386SHRBconst) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (Rsh8Ux64 _ (Const64 [c])) + // cond: uint64(c) >= 8 + // result: (Const8 [0]) + for { + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 8) { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(0) + return true + } + return false +} +func rewriteValue386_OpRsh8Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh8Ux8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPBconst y [8]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(Op386ANDL) + v0 := b.NewValue0(v.Pos, Op386SHRB, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(8) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh8Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SHRB x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHRB) + v.Type = t + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_OpRsh8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh8x16 x y) + // cond: !shiftIsBounded(v) + // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [8]))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(Op386SARB) + v.Type = t + v0 := b.NewValue0(v.Pos, Op386ORL, y.Type) + v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type) + v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags) + v3.AuxInt = int16ToAuxInt(8) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SARB x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SARB) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_OpRsh8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh8x32 x y) + // cond: !shiftIsBounded(v) + // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [8]))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(Op386SARB) + v.Type = t + v0 := b.NewValue0(v.Pos, Op386ORL, y.Type) + v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type) + v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(8) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SARB x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SARB) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_OpRsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Rsh8x64 x (Const64 [c])) + // cond: uint64(c) < 8 + // result: (SARBconst x [int8(c)]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 8) { + break + } + v.reset(Op386SARBconst) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (Rsh8x64 x (Const64 [c])) + // cond: uint64(c) >= 8 + // result: (SARBconst x [7]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 8) { + break + } + v.reset(Op386SARBconst) + v.AuxInt = int8ToAuxInt(7) + v.AddArg(x) + return true + } + return false +} +func rewriteValue386_OpRsh8x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh8x8 x y) + // cond: !shiftIsBounded(v) + // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [8]))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(Op386SARB) + v.Type = t + v0 := b.NewValue0(v.Pos, Op386ORL, y.Type) + v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type) + v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) + v3.AuxInt = int8ToAuxInt(8) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SARB x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SARB) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValue386_OpSelect0(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Select0 (Mul32uover x y)) + // result: (Select0 (MULLU x y)) + for { + if v_0.Op != OpMul32uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpSelect0) + v.Type = typ.UInt32 + v0 := b.NewValue0(v.Pos, Op386MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValue386_OpSelect1(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Select1 (Mul32uover x y)) + // result: (SETO (Select1 (MULLU x y))) + for { + if v_0.Op != OpMul32uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(Op386SETO) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, Op386MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValue386_OpSignmask(v *Value) bool { + v_0 := v.Args[0] + // match: (Signmask x) + // result: (SARLconst x [31]) + for { + x := v_0 + v.reset(Op386SARLconst) + v.AuxInt = int32ToAuxInt(31) + v.AddArg(x) + return true + } +} +func rewriteValue386_OpSlicemask(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Slicemask x) + // result: (SARLconst (NEGL x) [31]) + for { + t := v.Type + x := v_0 + v.reset(Op386SARLconst) + v.AuxInt = int32ToAuxInt(31) + v0 := b.NewValue0(v.Pos, Op386NEGL, t) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValue386_OpStore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Store {t} ptr val mem) + // cond: t.Size() == 8 && t.IsFloat() + // result: (MOVSDstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 8 && t.IsFloat()) { + break + } + v.reset(Op386MOVSDstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 && t.IsFloat() + // result: (MOVSSstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4 && t.IsFloat()) { + break + } + v.reset(Op386MOVSSstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 && !t.IsFloat() + // result: (MOVLstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4 && !t.IsFloat()) { + break + } + v.reset(Op386MOVLstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 2 + // result: (MOVWstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 2) { + break + } + v.reset(Op386MOVWstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 1 + // result: (MOVBstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 1) { + break + } + v.reset(Op386MOVBstore) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValue386_OpZero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Zero [0] _ mem) + // result: mem + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_1 + v.copyOf(mem) + return true + } + // match: (Zero [1] destptr mem) + // result: (MOVBstoreconst [0] destptr mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + destptr := v_0 + mem := v_1 + v.reset(Op386MOVBstoreconst) + v.AuxInt = valAndOffToAuxInt(0) + v.AddArg2(destptr, mem) + return true + } + // match: (Zero [2] destptr mem) + // result: (MOVWstoreconst [0] destptr mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + destptr := v_0 + mem := v_1 + v.reset(Op386MOVWstoreconst) + v.AuxInt = valAndOffToAuxInt(0) + v.AddArg2(destptr, mem) + return true + } + // match: (Zero [4] destptr mem) + // result: (MOVLstoreconst [0] destptr mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + destptr := v_0 + mem := v_1 + v.reset(Op386MOVLstoreconst) + v.AuxInt = valAndOffToAuxInt(0) + v.AddArg2(destptr, mem) + return true + } + // match: (Zero [3] destptr mem) + // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [makeValAndOff(0,0)] destptr mem)) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + destptr := v_0 + mem := v_1 + v.reset(Op386MOVBstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 2)) + v0 := b.NewValue0(v.Pos, Op386MOVWstoreconst, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [5] destptr mem) + // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) + for { + if auxIntToInt64(v.AuxInt) != 5 { + break + } + destptr := v_0 + mem := v_1 + v.reset(Op386MOVBstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4)) + v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [6] destptr mem) + // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) + for { + if auxIntToInt64(v.AuxInt) != 6 { + break + } + destptr := v_0 + mem := v_1 + v.reset(Op386MOVWstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4)) + v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [7] destptr mem) + // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) + for { + if auxIntToInt64(v.AuxInt) != 7 { + break + } + destptr := v_0 + mem := v_1 + v.reset(Op386MOVLstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 3)) + v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [s] destptr mem) + // cond: s%4 != 0 && s > 4 + // result: (Zero [s-s%4] (ADDLconst destptr [int32(s%4)]) (MOVLstoreconst [0] destptr mem)) + for { + s := auxIntToInt64(v.AuxInt) + destptr := v_0 + mem := v_1 + if !(s%4 != 0 && s > 4) { + break + } + v.reset(OpZero) + v.AuxInt = int64ToAuxInt(s - s%4) + v0 := b.NewValue0(v.Pos, Op386ADDLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(int32(s % 4)) + v0.AddArg(destptr) + v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) + v1.AuxInt = valAndOffToAuxInt(0) + v1.AddArg2(destptr, mem) + v.AddArg2(v0, v1) + return true + } + // match: (Zero [8] destptr mem) + // result: (MOVLstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + destptr := v_0 + mem := v_1 + v.reset(Op386MOVLstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4)) + v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [12] destptr mem) + // result: (MOVLstoreconst [makeValAndOff(0,8)] destptr (MOVLstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))) + for { + if auxIntToInt64(v.AuxInt) != 12 { + break + } + destptr := v_0 + mem := v_1 + v.reset(Op386MOVLstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8)) + v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4)) + v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) + v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v1.AddArg2(destptr, mem) + v0.AddArg2(destptr, v1) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [16] destptr mem) + // result: (MOVLstoreconst [makeValAndOff(0,12)] destptr (MOVLstoreconst [makeValAndOff(0,8)] destptr (MOVLstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)))) + for { + if auxIntToInt64(v.AuxInt) != 16 { + break + } + destptr := v_0 + mem := v_1 + v.reset(Op386MOVLstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 12)) + v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8)) + v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) + v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4)) + v2 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem) + v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v2.AddArg2(destptr, mem) + v1.AddArg2(destptr, v2) + v0.AddArg2(destptr, v1) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [s] destptr mem) + // cond: s > 16 && s <= 4*128 && s%4 == 0 && !config.noDuffDevice + // result: (DUFFZERO [1*(128-s/4)] destptr (MOVLconst [0]) mem) + for { + s := auxIntToInt64(v.AuxInt) + destptr := v_0 + mem := v_1 + if !(s > 16 && s <= 4*128 && s%4 == 0 && !config.noDuffDevice) { + break + } + v.reset(Op386DUFFZERO) + v.AuxInt = int64ToAuxInt(1 * (128 - s/4)) + v0 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(destptr, v0, mem) + return true + } + // match: (Zero [s] destptr mem) + // cond: (s > 4*128 || (config.noDuffDevice && s > 16)) && s%4 == 0 + // result: (REPSTOSL destptr (MOVLconst [int32(s/4)]) (MOVLconst [0]) mem) + for { + s := auxIntToInt64(v.AuxInt) + destptr := v_0 + mem := v_1 + if !((s > 4*128 || (config.noDuffDevice && s > 16)) && s%4 == 0) { + break + } + v.reset(Op386REPSTOSL) + v0 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(int32(s / 4)) + v1 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32) + v1.AuxInt = int32ToAuxInt(0) + v.AddArg4(destptr, v0, v1, mem) + return true + } + return false +} +func rewriteValue386_OpZeromask(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Zeromask x) + // result: (XORLconst [-1] (SBBLcarrymask (CMPLconst x [1]))) + for { + t := v.Type + x := v_0 + v.reset(Op386XORLconst) + v.AuxInt = int32ToAuxInt(-1) + v0 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t) + v1 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) + v1.AuxInt = int32ToAuxInt(1) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteBlock386(b *Block) bool { + switch b.Kind { + case Block386EQ: + // match: (EQ (InvertFlags cmp) yes no) + // result: (EQ cmp yes no) + for b.Controls[0].Op == Op386InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(Block386EQ, cmp) + return true + } + // match: (EQ (FlagEQ) yes no) + // result: (First yes no) + for b.Controls[0].Op == Op386FlagEQ { + b.Reset(BlockFirst) + return true + } + // match: (EQ (FlagLT_ULT) yes no) + // result: (First no yes) + for b.Controls[0].Op == Op386FlagLT_ULT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (EQ (FlagLT_UGT) yes no) + // result: (First no yes) + for b.Controls[0].Op == Op386FlagLT_UGT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (EQ (FlagGT_ULT) yes no) + // result: (First no yes) + for b.Controls[0].Op == Op386FlagGT_ULT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (EQ (FlagGT_UGT) yes no) + // result: (First no yes) + for b.Controls[0].Op == Op386FlagGT_UGT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case Block386GE: + // match: (GE (InvertFlags cmp) yes no) + // result: (LE cmp yes no) + for b.Controls[0].Op == Op386InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(Block386LE, cmp) + return true + } + // match: (GE (FlagEQ) yes no) + // result: (First yes no) + for b.Controls[0].Op == Op386FlagEQ { + b.Reset(BlockFirst) + return true + } + // match: (GE (FlagLT_ULT) yes no) + // result: (First no yes) + for b.Controls[0].Op == Op386FlagLT_ULT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (GE (FlagLT_UGT) yes no) + // result: (First no yes) + for b.Controls[0].Op == Op386FlagLT_UGT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (GE (FlagGT_ULT) yes no) + // result: (First yes no) + for b.Controls[0].Op == Op386FlagGT_ULT { + b.Reset(BlockFirst) + return true + } + // match: (GE (FlagGT_UGT) yes no) + // result: (First yes no) + for b.Controls[0].Op == Op386FlagGT_UGT { + b.Reset(BlockFirst) + return true + } + case Block386GT: + // match: (GT (InvertFlags cmp) yes no) + // result: (LT cmp yes no) + for b.Controls[0].Op == Op386InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(Block386LT, cmp) + return true + } + // match: (GT (FlagEQ) yes no) + // result: (First no yes) + for b.Controls[0].Op == Op386FlagEQ { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (GT (FlagLT_ULT) yes no) + // result: (First no yes) + for b.Controls[0].Op == Op386FlagLT_ULT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (GT (FlagLT_UGT) yes no) + // result: (First no yes) + for b.Controls[0].Op == Op386FlagLT_UGT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (GT (FlagGT_ULT) yes no) + // result: (First yes no) + for b.Controls[0].Op == Op386FlagGT_ULT { + b.Reset(BlockFirst) + return true + } + // match: (GT (FlagGT_UGT) yes no) + // result: (First yes no) + for b.Controls[0].Op == Op386FlagGT_UGT { + b.Reset(BlockFirst) + return true + } + case BlockIf: + // match: (If (SETL cmp) yes no) + // result: (LT cmp yes no) + for b.Controls[0].Op == Op386SETL { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(Block386LT, cmp) + return true + } + // match: (If (SETLE cmp) yes no) + // result: (LE cmp yes no) + for b.Controls[0].Op == Op386SETLE { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(Block386LE, cmp) + return true + } + // match: (If (SETG cmp) yes no) + // result: (GT cmp yes no) + for b.Controls[0].Op == Op386SETG { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(Block386GT, cmp) + return true + } + // match: (If (SETGE cmp) yes no) + // result: (GE cmp yes no) + for b.Controls[0].Op == Op386SETGE { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(Block386GE, cmp) + return true + } + // match: (If (SETEQ cmp) yes no) + // result: (EQ cmp yes no) + for b.Controls[0].Op == Op386SETEQ { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(Block386EQ, cmp) + return true + } + // match: (If (SETNE cmp) yes no) + // result: (NE cmp yes no) + for b.Controls[0].Op == Op386SETNE { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(Block386NE, cmp) + return true + } + // match: (If (SETB cmp) yes no) + // result: (ULT cmp yes no) + for b.Controls[0].Op == Op386SETB { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(Block386ULT, cmp) + return true + } + // match: (If (SETBE cmp) yes no) + // result: (ULE cmp yes no) + for b.Controls[0].Op == Op386SETBE { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(Block386ULE, cmp) + return true + } + // match: (If (SETA cmp) yes no) + // result: (UGT cmp yes no) + for b.Controls[0].Op == Op386SETA { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(Block386UGT, cmp) + return true + } + // match: (If (SETAE cmp) yes no) + // result: (UGE cmp yes no) + for b.Controls[0].Op == Op386SETAE { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(Block386UGE, cmp) + return true + } + // match: (If (SETO cmp) yes no) + // result: (OS cmp yes no) + for b.Controls[0].Op == Op386SETO { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(Block386OS, cmp) + return true + } + // match: (If (SETGF cmp) yes no) + // result: (UGT cmp yes no) + for b.Controls[0].Op == Op386SETGF { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(Block386UGT, cmp) + return true + } + // match: (If (SETGEF cmp) yes no) + // result: (UGE cmp yes no) + for b.Controls[0].Op == Op386SETGEF { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(Block386UGE, cmp) + return true + } + // match: (If (SETEQF cmp) yes no) + // result: (EQF cmp yes no) + for b.Controls[0].Op == Op386SETEQF { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(Block386EQF, cmp) + return true + } + // match: (If (SETNEF cmp) yes no) + // result: (NEF cmp yes no) + for b.Controls[0].Op == Op386SETNEF { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(Block386NEF, cmp) + return true + } + // match: (If cond yes no) + // result: (NE (TESTB cond cond) yes no) + for { + cond := b.Controls[0] + v0 := b.NewValue0(cond.Pos, Op386TESTB, types.TypeFlags) + v0.AddArg2(cond, cond) + b.resetWithControl(Block386NE, v0) + return true + } + case Block386LE: + // match: (LE (InvertFlags cmp) yes no) + // result: (GE cmp yes no) + for b.Controls[0].Op == Op386InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(Block386GE, cmp) + return true + } + // match: (LE (FlagEQ) yes no) + // result: (First yes no) + for b.Controls[0].Op == Op386FlagEQ { + b.Reset(BlockFirst) + return true + } + // match: (LE (FlagLT_ULT) yes no) + // result: (First yes no) + for b.Controls[0].Op == Op386FlagLT_ULT { + b.Reset(BlockFirst) + return true + } + // match: (LE (FlagLT_UGT) yes no) + // result: (First yes no) + for b.Controls[0].Op == Op386FlagLT_UGT { + b.Reset(BlockFirst) + return true + } + // match: (LE (FlagGT_ULT) yes no) + // result: (First no yes) + for b.Controls[0].Op == Op386FlagGT_ULT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (LE (FlagGT_UGT) yes no) + // result: (First no yes) + for b.Controls[0].Op == Op386FlagGT_UGT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case Block386LT: + // match: (LT (InvertFlags cmp) yes no) + // result: (GT cmp yes no) + for b.Controls[0].Op == Op386InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(Block386GT, cmp) + return true + } + // match: (LT (FlagEQ) yes no) + // result: (First no yes) + for b.Controls[0].Op == Op386FlagEQ { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (LT (FlagLT_ULT) yes no) + // result: (First yes no) + for b.Controls[0].Op == Op386FlagLT_ULT { + b.Reset(BlockFirst) + return true + } + // match: (LT (FlagLT_UGT) yes no) + // result: (First yes no) + for b.Controls[0].Op == Op386FlagLT_UGT { + b.Reset(BlockFirst) + return true + } + // match: (LT (FlagGT_ULT) yes no) + // result: (First no yes) + for b.Controls[0].Op == Op386FlagGT_ULT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (LT (FlagGT_UGT) yes no) + // result: (First no yes) + for b.Controls[0].Op == Op386FlagGT_UGT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case Block386NE: + // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) + // result: (LT cmp yes no) + for b.Controls[0].Op == Op386TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != Op386SETL { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != Op386SETL || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(Block386LT, cmp) + return true + } + // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) + // result: (LE cmp yes no) + for b.Controls[0].Op == Op386TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != Op386SETLE { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != Op386SETLE || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(Block386LE, cmp) + return true + } + // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) + // result: (GT cmp yes no) + for b.Controls[0].Op == Op386TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != Op386SETG { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != Op386SETG || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(Block386GT, cmp) + return true + } + // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) + // result: (GE cmp yes no) + for b.Controls[0].Op == Op386TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != Op386SETGE { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != Op386SETGE || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(Block386GE, cmp) + return true + } + // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) + // result: (EQ cmp yes no) + for b.Controls[0].Op == Op386TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != Op386SETEQ { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != Op386SETEQ || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(Block386EQ, cmp) + return true + } + // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) + // result: (NE cmp yes no) + for b.Controls[0].Op == Op386TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != Op386SETNE { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != Op386SETNE || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(Block386NE, cmp) + return true + } + // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) + // result: (ULT cmp yes no) + for b.Controls[0].Op == Op386TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != Op386SETB { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != Op386SETB || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(Block386ULT, cmp) + return true + } + // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) + // result: (ULE cmp yes no) + for b.Controls[0].Op == Op386TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != Op386SETBE { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != Op386SETBE || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(Block386ULE, cmp) + return true + } + // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) + // result: (UGT cmp yes no) + for b.Controls[0].Op == Op386TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != Op386SETA { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != Op386SETA || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(Block386UGT, cmp) + return true + } + // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) + // result: (UGE cmp yes no) + for b.Controls[0].Op == Op386TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != Op386SETAE { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != Op386SETAE || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(Block386UGE, cmp) + return true + } + // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no) + // result: (OS cmp yes no) + for b.Controls[0].Op == Op386TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != Op386SETO { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != Op386SETO || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(Block386OS, cmp) + return true + } + // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) + // result: (UGT cmp yes no) + for b.Controls[0].Op == Op386TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != Op386SETGF { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != Op386SETGF || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(Block386UGT, cmp) + return true + } + // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) + // result: (UGE cmp yes no) + for b.Controls[0].Op == Op386TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != Op386SETGEF { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != Op386SETGEF || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(Block386UGE, cmp) + return true + } + // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) + // result: (EQF cmp yes no) + for b.Controls[0].Op == Op386TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != Op386SETEQF { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != Op386SETEQF || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(Block386EQF, cmp) + return true + } + // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) + // result: (NEF cmp yes no) + for b.Controls[0].Op == Op386TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != Op386SETNEF { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != Op386SETNEF || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(Block386NEF, cmp) + return true + } + // match: (NE (InvertFlags cmp) yes no) + // result: (NE cmp yes no) + for b.Controls[0].Op == Op386InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(Block386NE, cmp) + return true + } + // match: (NE (FlagEQ) yes no) + // result: (First no yes) + for b.Controls[0].Op == Op386FlagEQ { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (NE (FlagLT_ULT) yes no) + // result: (First yes no) + for b.Controls[0].Op == Op386FlagLT_ULT { + b.Reset(BlockFirst) + return true + } + // match: (NE (FlagLT_UGT) yes no) + // result: (First yes no) + for b.Controls[0].Op == Op386FlagLT_UGT { + b.Reset(BlockFirst) + return true + } + // match: (NE (FlagGT_ULT) yes no) + // result: (First yes no) + for b.Controls[0].Op == Op386FlagGT_ULT { + b.Reset(BlockFirst) + return true + } + // match: (NE (FlagGT_UGT) yes no) + // result: (First yes no) + for b.Controls[0].Op == Op386FlagGT_UGT { + b.Reset(BlockFirst) + return true + } + case Block386UGE: + // match: (UGE (InvertFlags cmp) yes no) + // result: (ULE cmp yes no) + for b.Controls[0].Op == Op386InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(Block386ULE, cmp) + return true + } + // match: (UGE (FlagEQ) yes no) + // result: (First yes no) + for b.Controls[0].Op == Op386FlagEQ { + b.Reset(BlockFirst) + return true + } + // match: (UGE (FlagLT_ULT) yes no) + // result: (First no yes) + for b.Controls[0].Op == Op386FlagLT_ULT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (UGE (FlagLT_UGT) yes no) + // result: (First yes no) + for b.Controls[0].Op == Op386FlagLT_UGT { + b.Reset(BlockFirst) + return true + } + // match: (UGE (FlagGT_ULT) yes no) + // result: (First no yes) + for b.Controls[0].Op == Op386FlagGT_ULT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (UGE (FlagGT_UGT) yes no) + // result: (First yes no) + for b.Controls[0].Op == Op386FlagGT_UGT { + b.Reset(BlockFirst) + return true + } + case Block386UGT: + // match: (UGT (InvertFlags cmp) yes no) + // result: (ULT cmp yes no) + for b.Controls[0].Op == Op386InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(Block386ULT, cmp) + return true + } + // match: (UGT (FlagEQ) yes no) + // result: (First no yes) + for b.Controls[0].Op == Op386FlagEQ { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (UGT (FlagLT_ULT) yes no) + // result: (First no yes) + for b.Controls[0].Op == Op386FlagLT_ULT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (UGT (FlagLT_UGT) yes no) + // result: (First yes no) + for b.Controls[0].Op == Op386FlagLT_UGT { + b.Reset(BlockFirst) + return true + } + // match: (UGT (FlagGT_ULT) yes no) + // result: (First no yes) + for b.Controls[0].Op == Op386FlagGT_ULT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (UGT (FlagGT_UGT) yes no) + // result: (First yes no) + for b.Controls[0].Op == Op386FlagGT_UGT { + b.Reset(BlockFirst) + return true + } + case Block386ULE: + // match: (ULE (InvertFlags cmp) yes no) + // result: (UGE cmp yes no) + for b.Controls[0].Op == Op386InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(Block386UGE, cmp) + return true + } + // match: (ULE (FlagEQ) yes no) + // result: (First yes no) + for b.Controls[0].Op == Op386FlagEQ { + b.Reset(BlockFirst) + return true + } + // match: (ULE (FlagLT_ULT) yes no) + // result: (First yes no) + for b.Controls[0].Op == Op386FlagLT_ULT { + b.Reset(BlockFirst) + return true + } + // match: (ULE (FlagLT_UGT) yes no) + // result: (First no yes) + for b.Controls[0].Op == Op386FlagLT_UGT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (ULE (FlagGT_ULT) yes no) + // result: (First yes no) + for b.Controls[0].Op == Op386FlagGT_ULT { + b.Reset(BlockFirst) + return true + } + // match: (ULE (FlagGT_UGT) yes no) + // result: (First no yes) + for b.Controls[0].Op == Op386FlagGT_UGT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case Block386ULT: + // match: (ULT (InvertFlags cmp) yes no) + // result: (UGT cmp yes no) + for b.Controls[0].Op == Op386InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(Block386UGT, cmp) + return true + } + // match: (ULT (FlagEQ) yes no) + // result: (First no yes) + for b.Controls[0].Op == Op386FlagEQ { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (ULT (FlagLT_ULT) yes no) + // result: (First yes no) + for b.Controls[0].Op == Op386FlagLT_ULT { + b.Reset(BlockFirst) + return true + } + // match: (ULT (FlagLT_UGT) yes no) + // result: (First no yes) + for b.Controls[0].Op == Op386FlagLT_UGT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (ULT (FlagGT_ULT) yes no) + // result: (First yes no) + for b.Controls[0].Op == Op386FlagGT_ULT { + b.Reset(BlockFirst) + return true + } + // match: (ULT (FlagGT_UGT) yes no) + // result: (First no yes) + for b.Controls[0].Op == Op386FlagGT_UGT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewrite386splitload.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewrite386splitload.go new file mode 100644 index 0000000000000000000000000000000000000000..a8bd6aaff443e3e94a9c0e155377856321f28c23 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewrite386splitload.go @@ -0,0 +1,159 @@ +// Code generated from _gen/386splitload.rules using 'go generate'; DO NOT EDIT. + +package ssa + +func rewriteValue386splitload(v *Value) bool { + switch v.Op { + case Op386CMPBconstload: + return rewriteValue386splitload_Op386CMPBconstload(v) + case Op386CMPBload: + return rewriteValue386splitload_Op386CMPBload(v) + case Op386CMPLconstload: + return rewriteValue386splitload_Op386CMPLconstload(v) + case Op386CMPLload: + return rewriteValue386splitload_Op386CMPLload(v) + case Op386CMPWconstload: + return rewriteValue386splitload_Op386CMPWconstload(v) + case Op386CMPWload: + return rewriteValue386splitload_Op386CMPWload(v) + } + return false +} +func rewriteValue386splitload_Op386CMPBconstload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPBconstload {sym} [vo] ptr mem) + // result: (CMPBconst (MOVBload {sym} [vo.Off()] ptr mem) [vo.Val8()]) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + mem := v_1 + v.reset(Op386CMPBconst) + v.AuxInt = int8ToAuxInt(vo.Val8()) + v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(vo.Off()) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValue386splitload_Op386CMPBload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPBload {sym} [off] ptr x mem) + // result: (CMPB (MOVBload {sym} [off] ptr mem) x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + x := v_1 + mem := v_2 + v.reset(Op386CMPB) + v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + v.AddArg2(v0, x) + return true + } +} +func rewriteValue386splitload_Op386CMPLconstload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPLconstload {sym} [vo] ptr mem) + // result: (CMPLconst (MOVLload {sym} [vo.Off()] ptr mem) [vo.Val()]) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + mem := v_1 + v.reset(Op386CMPLconst) + v.AuxInt = int32ToAuxInt(vo.Val()) + v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(vo.Off()) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValue386splitload_Op386CMPLload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPLload {sym} [off] ptr x mem) + // result: (CMPL (MOVLload {sym} [off] ptr mem) x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + x := v_1 + mem := v_2 + v.reset(Op386CMPL) + v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + v.AddArg2(v0, x) + return true + } +} +func rewriteValue386splitload_Op386CMPWconstload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPWconstload {sym} [vo] ptr mem) + // result: (CMPWconst (MOVWload {sym} [vo.Off()] ptr mem) [vo.Val16()]) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + mem := v_1 + v.reset(Op386CMPWconst) + v.AuxInt = int16ToAuxInt(vo.Val16()) + v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(vo.Off()) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValue386splitload_Op386CMPWload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPWload {sym} [off] ptr x mem) + // result: (CMPW (MOVWload {sym} [off] ptr mem) x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + x := v_1 + mem := v_2 + v.reset(Op386CMPW) + v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + v.AddArg2(v0, x) + return true + } +} +func rewriteBlock386splitload(b *Block) bool { + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteAMD64.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteAMD64.go new file mode 100644 index 0000000000000000000000000000000000000000..ba71189703ddffa6d432eb991ff63dbba7bd1cb0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -0,0 +1,31752 @@ +// Code generated from _gen/AMD64.rules using 'go generate'; DO NOT EDIT. + +package ssa + +import "internal/buildcfg" +import "math" +import "cmd/internal/obj" +import "cmd/compile/internal/types" + +func rewriteValueAMD64(v *Value) bool { + switch v.Op { + case OpAMD64ADCQ: + return rewriteValueAMD64_OpAMD64ADCQ(v) + case OpAMD64ADCQconst: + return rewriteValueAMD64_OpAMD64ADCQconst(v) + case OpAMD64ADDL: + return rewriteValueAMD64_OpAMD64ADDL(v) + case OpAMD64ADDLconst: + return rewriteValueAMD64_OpAMD64ADDLconst(v) + case OpAMD64ADDLconstmodify: + return rewriteValueAMD64_OpAMD64ADDLconstmodify(v) + case OpAMD64ADDLload: + return rewriteValueAMD64_OpAMD64ADDLload(v) + case OpAMD64ADDLmodify: + return rewriteValueAMD64_OpAMD64ADDLmodify(v) + case OpAMD64ADDQ: + return rewriteValueAMD64_OpAMD64ADDQ(v) + case OpAMD64ADDQcarry: + return rewriteValueAMD64_OpAMD64ADDQcarry(v) + case OpAMD64ADDQconst: + return rewriteValueAMD64_OpAMD64ADDQconst(v) + case OpAMD64ADDQconstmodify: + return rewriteValueAMD64_OpAMD64ADDQconstmodify(v) + case OpAMD64ADDQload: + return rewriteValueAMD64_OpAMD64ADDQload(v) + case OpAMD64ADDQmodify: + return rewriteValueAMD64_OpAMD64ADDQmodify(v) + case OpAMD64ADDSD: + return rewriteValueAMD64_OpAMD64ADDSD(v) + case OpAMD64ADDSDload: + return rewriteValueAMD64_OpAMD64ADDSDload(v) + case OpAMD64ADDSS: + return rewriteValueAMD64_OpAMD64ADDSS(v) + case OpAMD64ADDSSload: + return rewriteValueAMD64_OpAMD64ADDSSload(v) + case OpAMD64ANDL: + return rewriteValueAMD64_OpAMD64ANDL(v) + case OpAMD64ANDLconst: + return rewriteValueAMD64_OpAMD64ANDLconst(v) + case OpAMD64ANDLconstmodify: + return rewriteValueAMD64_OpAMD64ANDLconstmodify(v) + case OpAMD64ANDLload: + return rewriteValueAMD64_OpAMD64ANDLload(v) + case OpAMD64ANDLmodify: + return rewriteValueAMD64_OpAMD64ANDLmodify(v) + case OpAMD64ANDNL: + return rewriteValueAMD64_OpAMD64ANDNL(v) + case OpAMD64ANDNQ: + return rewriteValueAMD64_OpAMD64ANDNQ(v) + case OpAMD64ANDQ: + return rewriteValueAMD64_OpAMD64ANDQ(v) + case OpAMD64ANDQconst: + return rewriteValueAMD64_OpAMD64ANDQconst(v) + case OpAMD64ANDQconstmodify: + return rewriteValueAMD64_OpAMD64ANDQconstmodify(v) + case OpAMD64ANDQload: + return rewriteValueAMD64_OpAMD64ANDQload(v) + case OpAMD64ANDQmodify: + return rewriteValueAMD64_OpAMD64ANDQmodify(v) + case OpAMD64BSFQ: + return rewriteValueAMD64_OpAMD64BSFQ(v) + case OpAMD64BSWAPL: + return rewriteValueAMD64_OpAMD64BSWAPL(v) + case OpAMD64BSWAPQ: + return rewriteValueAMD64_OpAMD64BSWAPQ(v) + case OpAMD64BTCQconst: + return rewriteValueAMD64_OpAMD64BTCQconst(v) + case OpAMD64BTLconst: + return rewriteValueAMD64_OpAMD64BTLconst(v) + case OpAMD64BTQconst: + return rewriteValueAMD64_OpAMD64BTQconst(v) + case OpAMD64BTRQconst: + return rewriteValueAMD64_OpAMD64BTRQconst(v) + case OpAMD64BTSQconst: + return rewriteValueAMD64_OpAMD64BTSQconst(v) + case OpAMD64CMOVLCC: + return rewriteValueAMD64_OpAMD64CMOVLCC(v) + case OpAMD64CMOVLCS: + return rewriteValueAMD64_OpAMD64CMOVLCS(v) + case OpAMD64CMOVLEQ: + return rewriteValueAMD64_OpAMD64CMOVLEQ(v) + case OpAMD64CMOVLGE: + return rewriteValueAMD64_OpAMD64CMOVLGE(v) + case OpAMD64CMOVLGT: + return rewriteValueAMD64_OpAMD64CMOVLGT(v) + case OpAMD64CMOVLHI: + return rewriteValueAMD64_OpAMD64CMOVLHI(v) + case OpAMD64CMOVLLE: + return rewriteValueAMD64_OpAMD64CMOVLLE(v) + case OpAMD64CMOVLLS: + return rewriteValueAMD64_OpAMD64CMOVLLS(v) + case OpAMD64CMOVLLT: + return rewriteValueAMD64_OpAMD64CMOVLLT(v) + case OpAMD64CMOVLNE: + return rewriteValueAMD64_OpAMD64CMOVLNE(v) + case OpAMD64CMOVQCC: + return rewriteValueAMD64_OpAMD64CMOVQCC(v) + case OpAMD64CMOVQCS: + return rewriteValueAMD64_OpAMD64CMOVQCS(v) + case OpAMD64CMOVQEQ: + return rewriteValueAMD64_OpAMD64CMOVQEQ(v) + case OpAMD64CMOVQGE: + return rewriteValueAMD64_OpAMD64CMOVQGE(v) + case OpAMD64CMOVQGT: + return rewriteValueAMD64_OpAMD64CMOVQGT(v) + case OpAMD64CMOVQHI: + return rewriteValueAMD64_OpAMD64CMOVQHI(v) + case OpAMD64CMOVQLE: + return rewriteValueAMD64_OpAMD64CMOVQLE(v) + case OpAMD64CMOVQLS: + return rewriteValueAMD64_OpAMD64CMOVQLS(v) + case OpAMD64CMOVQLT: + return rewriteValueAMD64_OpAMD64CMOVQLT(v) + case OpAMD64CMOVQNE: + return rewriteValueAMD64_OpAMD64CMOVQNE(v) + case OpAMD64CMOVWCC: + return rewriteValueAMD64_OpAMD64CMOVWCC(v) + case OpAMD64CMOVWCS: + return rewriteValueAMD64_OpAMD64CMOVWCS(v) + case OpAMD64CMOVWEQ: + return rewriteValueAMD64_OpAMD64CMOVWEQ(v) + case OpAMD64CMOVWGE: + return rewriteValueAMD64_OpAMD64CMOVWGE(v) + case OpAMD64CMOVWGT: + return rewriteValueAMD64_OpAMD64CMOVWGT(v) + case OpAMD64CMOVWHI: + return rewriteValueAMD64_OpAMD64CMOVWHI(v) + case OpAMD64CMOVWLE: + return rewriteValueAMD64_OpAMD64CMOVWLE(v) + case OpAMD64CMOVWLS: + return rewriteValueAMD64_OpAMD64CMOVWLS(v) + case OpAMD64CMOVWLT: + return rewriteValueAMD64_OpAMD64CMOVWLT(v) + case OpAMD64CMOVWNE: + return rewriteValueAMD64_OpAMD64CMOVWNE(v) + case OpAMD64CMPB: + return rewriteValueAMD64_OpAMD64CMPB(v) + case OpAMD64CMPBconst: + return rewriteValueAMD64_OpAMD64CMPBconst(v) + case OpAMD64CMPBconstload: + return rewriteValueAMD64_OpAMD64CMPBconstload(v) + case OpAMD64CMPBload: + return rewriteValueAMD64_OpAMD64CMPBload(v) + case OpAMD64CMPL: + return rewriteValueAMD64_OpAMD64CMPL(v) + case OpAMD64CMPLconst: + return rewriteValueAMD64_OpAMD64CMPLconst(v) + case OpAMD64CMPLconstload: + return rewriteValueAMD64_OpAMD64CMPLconstload(v) + case OpAMD64CMPLload: + return rewriteValueAMD64_OpAMD64CMPLload(v) + case OpAMD64CMPQ: + return rewriteValueAMD64_OpAMD64CMPQ(v) + case OpAMD64CMPQconst: + return rewriteValueAMD64_OpAMD64CMPQconst(v) + case OpAMD64CMPQconstload: + return rewriteValueAMD64_OpAMD64CMPQconstload(v) + case OpAMD64CMPQload: + return rewriteValueAMD64_OpAMD64CMPQload(v) + case OpAMD64CMPW: + return rewriteValueAMD64_OpAMD64CMPW(v) + case OpAMD64CMPWconst: + return rewriteValueAMD64_OpAMD64CMPWconst(v) + case OpAMD64CMPWconstload: + return rewriteValueAMD64_OpAMD64CMPWconstload(v) + case OpAMD64CMPWload: + return rewriteValueAMD64_OpAMD64CMPWload(v) + case OpAMD64CMPXCHGLlock: + return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v) + case OpAMD64CMPXCHGQlock: + return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v) + case OpAMD64DIVSD: + return rewriteValueAMD64_OpAMD64DIVSD(v) + case OpAMD64DIVSDload: + return rewriteValueAMD64_OpAMD64DIVSDload(v) + case OpAMD64DIVSS: + return rewriteValueAMD64_OpAMD64DIVSS(v) + case OpAMD64DIVSSload: + return rewriteValueAMD64_OpAMD64DIVSSload(v) + case OpAMD64HMULL: + return rewriteValueAMD64_OpAMD64HMULL(v) + case OpAMD64HMULLU: + return rewriteValueAMD64_OpAMD64HMULLU(v) + case OpAMD64HMULQ: + return rewriteValueAMD64_OpAMD64HMULQ(v) + case OpAMD64HMULQU: + return rewriteValueAMD64_OpAMD64HMULQU(v) + case OpAMD64LEAL: + return rewriteValueAMD64_OpAMD64LEAL(v) + case OpAMD64LEAL1: + return rewriteValueAMD64_OpAMD64LEAL1(v) + case OpAMD64LEAL2: + return rewriteValueAMD64_OpAMD64LEAL2(v) + case OpAMD64LEAL4: + return rewriteValueAMD64_OpAMD64LEAL4(v) + case OpAMD64LEAL8: + return rewriteValueAMD64_OpAMD64LEAL8(v) + case OpAMD64LEAQ: + return rewriteValueAMD64_OpAMD64LEAQ(v) + case OpAMD64LEAQ1: + return rewriteValueAMD64_OpAMD64LEAQ1(v) + case OpAMD64LEAQ2: + return rewriteValueAMD64_OpAMD64LEAQ2(v) + case OpAMD64LEAQ4: + return rewriteValueAMD64_OpAMD64LEAQ4(v) + case OpAMD64LEAQ8: + return rewriteValueAMD64_OpAMD64LEAQ8(v) + case OpAMD64MOVBELstore: + return rewriteValueAMD64_OpAMD64MOVBELstore(v) + case OpAMD64MOVBEQstore: + return rewriteValueAMD64_OpAMD64MOVBEQstore(v) + case OpAMD64MOVBEWstore: + return rewriteValueAMD64_OpAMD64MOVBEWstore(v) + case OpAMD64MOVBQSX: + return rewriteValueAMD64_OpAMD64MOVBQSX(v) + case OpAMD64MOVBQSXload: + return rewriteValueAMD64_OpAMD64MOVBQSXload(v) + case OpAMD64MOVBQZX: + return rewriteValueAMD64_OpAMD64MOVBQZX(v) + case OpAMD64MOVBatomicload: + return rewriteValueAMD64_OpAMD64MOVBatomicload(v) + case OpAMD64MOVBload: + return rewriteValueAMD64_OpAMD64MOVBload(v) + case OpAMD64MOVBstore: + return rewriteValueAMD64_OpAMD64MOVBstore(v) + case OpAMD64MOVBstoreconst: + return rewriteValueAMD64_OpAMD64MOVBstoreconst(v) + case OpAMD64MOVLQSX: + return rewriteValueAMD64_OpAMD64MOVLQSX(v) + case OpAMD64MOVLQSXload: + return rewriteValueAMD64_OpAMD64MOVLQSXload(v) + case OpAMD64MOVLQZX: + return rewriteValueAMD64_OpAMD64MOVLQZX(v) + case OpAMD64MOVLatomicload: + return rewriteValueAMD64_OpAMD64MOVLatomicload(v) + case OpAMD64MOVLf2i: + return rewriteValueAMD64_OpAMD64MOVLf2i(v) + case OpAMD64MOVLi2f: + return rewriteValueAMD64_OpAMD64MOVLi2f(v) + case OpAMD64MOVLload: + return rewriteValueAMD64_OpAMD64MOVLload(v) + case OpAMD64MOVLstore: + return rewriteValueAMD64_OpAMD64MOVLstore(v) + case OpAMD64MOVLstoreconst: + return rewriteValueAMD64_OpAMD64MOVLstoreconst(v) + case OpAMD64MOVOload: + return rewriteValueAMD64_OpAMD64MOVOload(v) + case OpAMD64MOVOstore: + return rewriteValueAMD64_OpAMD64MOVOstore(v) + case OpAMD64MOVOstoreconst: + return rewriteValueAMD64_OpAMD64MOVOstoreconst(v) + case OpAMD64MOVQatomicload: + return rewriteValueAMD64_OpAMD64MOVQatomicload(v) + case OpAMD64MOVQf2i: + return rewriteValueAMD64_OpAMD64MOVQf2i(v) + case OpAMD64MOVQi2f: + return rewriteValueAMD64_OpAMD64MOVQi2f(v) + case OpAMD64MOVQload: + return rewriteValueAMD64_OpAMD64MOVQload(v) + case OpAMD64MOVQstore: + return rewriteValueAMD64_OpAMD64MOVQstore(v) + case OpAMD64MOVQstoreconst: + return rewriteValueAMD64_OpAMD64MOVQstoreconst(v) + case OpAMD64MOVSDload: + return rewriteValueAMD64_OpAMD64MOVSDload(v) + case OpAMD64MOVSDstore: + return rewriteValueAMD64_OpAMD64MOVSDstore(v) + case OpAMD64MOVSSload: + return rewriteValueAMD64_OpAMD64MOVSSload(v) + case OpAMD64MOVSSstore: + return rewriteValueAMD64_OpAMD64MOVSSstore(v) + case OpAMD64MOVWQSX: + return rewriteValueAMD64_OpAMD64MOVWQSX(v) + case OpAMD64MOVWQSXload: + return rewriteValueAMD64_OpAMD64MOVWQSXload(v) + case OpAMD64MOVWQZX: + return rewriteValueAMD64_OpAMD64MOVWQZX(v) + case OpAMD64MOVWload: + return rewriteValueAMD64_OpAMD64MOVWload(v) + case OpAMD64MOVWstore: + return rewriteValueAMD64_OpAMD64MOVWstore(v) + case OpAMD64MOVWstoreconst: + return rewriteValueAMD64_OpAMD64MOVWstoreconst(v) + case OpAMD64MULL: + return rewriteValueAMD64_OpAMD64MULL(v) + case OpAMD64MULLconst: + return rewriteValueAMD64_OpAMD64MULLconst(v) + case OpAMD64MULQ: + return rewriteValueAMD64_OpAMD64MULQ(v) + case OpAMD64MULQconst: + return rewriteValueAMD64_OpAMD64MULQconst(v) + case OpAMD64MULSD: + return rewriteValueAMD64_OpAMD64MULSD(v) + case OpAMD64MULSDload: + return rewriteValueAMD64_OpAMD64MULSDload(v) + case OpAMD64MULSS: + return rewriteValueAMD64_OpAMD64MULSS(v) + case OpAMD64MULSSload: + return rewriteValueAMD64_OpAMD64MULSSload(v) + case OpAMD64NEGL: + return rewriteValueAMD64_OpAMD64NEGL(v) + case OpAMD64NEGQ: + return rewriteValueAMD64_OpAMD64NEGQ(v) + case OpAMD64NOTL: + return rewriteValueAMD64_OpAMD64NOTL(v) + case OpAMD64NOTQ: + return rewriteValueAMD64_OpAMD64NOTQ(v) + case OpAMD64ORL: + return rewriteValueAMD64_OpAMD64ORL(v) + case OpAMD64ORLconst: + return rewriteValueAMD64_OpAMD64ORLconst(v) + case OpAMD64ORLconstmodify: + return rewriteValueAMD64_OpAMD64ORLconstmodify(v) + case OpAMD64ORLload: + return rewriteValueAMD64_OpAMD64ORLload(v) + case OpAMD64ORLmodify: + return rewriteValueAMD64_OpAMD64ORLmodify(v) + case OpAMD64ORQ: + return rewriteValueAMD64_OpAMD64ORQ(v) + case OpAMD64ORQconst: + return rewriteValueAMD64_OpAMD64ORQconst(v) + case OpAMD64ORQconstmodify: + return rewriteValueAMD64_OpAMD64ORQconstmodify(v) + case OpAMD64ORQload: + return rewriteValueAMD64_OpAMD64ORQload(v) + case OpAMD64ORQmodify: + return rewriteValueAMD64_OpAMD64ORQmodify(v) + case OpAMD64ROLB: + return rewriteValueAMD64_OpAMD64ROLB(v) + case OpAMD64ROLBconst: + return rewriteValueAMD64_OpAMD64ROLBconst(v) + case OpAMD64ROLL: + return rewriteValueAMD64_OpAMD64ROLL(v) + case OpAMD64ROLLconst: + return rewriteValueAMD64_OpAMD64ROLLconst(v) + case OpAMD64ROLQ: + return rewriteValueAMD64_OpAMD64ROLQ(v) + case OpAMD64ROLQconst: + return rewriteValueAMD64_OpAMD64ROLQconst(v) + case OpAMD64ROLW: + return rewriteValueAMD64_OpAMD64ROLW(v) + case OpAMD64ROLWconst: + return rewriteValueAMD64_OpAMD64ROLWconst(v) + case OpAMD64RORB: + return rewriteValueAMD64_OpAMD64RORB(v) + case OpAMD64RORL: + return rewriteValueAMD64_OpAMD64RORL(v) + case OpAMD64RORQ: + return rewriteValueAMD64_OpAMD64RORQ(v) + case OpAMD64RORW: + return rewriteValueAMD64_OpAMD64RORW(v) + case OpAMD64SARB: + return rewriteValueAMD64_OpAMD64SARB(v) + case OpAMD64SARBconst: + return rewriteValueAMD64_OpAMD64SARBconst(v) + case OpAMD64SARL: + return rewriteValueAMD64_OpAMD64SARL(v) + case OpAMD64SARLconst: + return rewriteValueAMD64_OpAMD64SARLconst(v) + case OpAMD64SARQ: + return rewriteValueAMD64_OpAMD64SARQ(v) + case OpAMD64SARQconst: + return rewriteValueAMD64_OpAMD64SARQconst(v) + case OpAMD64SARW: + return rewriteValueAMD64_OpAMD64SARW(v) + case OpAMD64SARWconst: + return rewriteValueAMD64_OpAMD64SARWconst(v) + case OpAMD64SARXLload: + return rewriteValueAMD64_OpAMD64SARXLload(v) + case OpAMD64SARXQload: + return rewriteValueAMD64_OpAMD64SARXQload(v) + case OpAMD64SBBLcarrymask: + return rewriteValueAMD64_OpAMD64SBBLcarrymask(v) + case OpAMD64SBBQ: + return rewriteValueAMD64_OpAMD64SBBQ(v) + case OpAMD64SBBQcarrymask: + return rewriteValueAMD64_OpAMD64SBBQcarrymask(v) + case OpAMD64SBBQconst: + return rewriteValueAMD64_OpAMD64SBBQconst(v) + case OpAMD64SETA: + return rewriteValueAMD64_OpAMD64SETA(v) + case OpAMD64SETAE: + return rewriteValueAMD64_OpAMD64SETAE(v) + case OpAMD64SETAEstore: + return rewriteValueAMD64_OpAMD64SETAEstore(v) + case OpAMD64SETAstore: + return rewriteValueAMD64_OpAMD64SETAstore(v) + case OpAMD64SETB: + return rewriteValueAMD64_OpAMD64SETB(v) + case OpAMD64SETBE: + return rewriteValueAMD64_OpAMD64SETBE(v) + case OpAMD64SETBEstore: + return rewriteValueAMD64_OpAMD64SETBEstore(v) + case OpAMD64SETBstore: + return rewriteValueAMD64_OpAMD64SETBstore(v) + case OpAMD64SETEQ: + return rewriteValueAMD64_OpAMD64SETEQ(v) + case OpAMD64SETEQstore: + return rewriteValueAMD64_OpAMD64SETEQstore(v) + case OpAMD64SETG: + return rewriteValueAMD64_OpAMD64SETG(v) + case OpAMD64SETGE: + return rewriteValueAMD64_OpAMD64SETGE(v) + case OpAMD64SETGEstore: + return rewriteValueAMD64_OpAMD64SETGEstore(v) + case OpAMD64SETGstore: + return rewriteValueAMD64_OpAMD64SETGstore(v) + case OpAMD64SETL: + return rewriteValueAMD64_OpAMD64SETL(v) + case OpAMD64SETLE: + return rewriteValueAMD64_OpAMD64SETLE(v) + case OpAMD64SETLEstore: + return rewriteValueAMD64_OpAMD64SETLEstore(v) + case OpAMD64SETLstore: + return rewriteValueAMD64_OpAMD64SETLstore(v) + case OpAMD64SETNE: + return rewriteValueAMD64_OpAMD64SETNE(v) + case OpAMD64SETNEstore: + return rewriteValueAMD64_OpAMD64SETNEstore(v) + case OpAMD64SHLL: + return rewriteValueAMD64_OpAMD64SHLL(v) + case OpAMD64SHLLconst: + return rewriteValueAMD64_OpAMD64SHLLconst(v) + case OpAMD64SHLQ: + return rewriteValueAMD64_OpAMD64SHLQ(v) + case OpAMD64SHLQconst: + return rewriteValueAMD64_OpAMD64SHLQconst(v) + case OpAMD64SHLXLload: + return rewriteValueAMD64_OpAMD64SHLXLload(v) + case OpAMD64SHLXQload: + return rewriteValueAMD64_OpAMD64SHLXQload(v) + case OpAMD64SHRB: + return rewriteValueAMD64_OpAMD64SHRB(v) + case OpAMD64SHRBconst: + return rewriteValueAMD64_OpAMD64SHRBconst(v) + case OpAMD64SHRL: + return rewriteValueAMD64_OpAMD64SHRL(v) + case OpAMD64SHRLconst: + return rewriteValueAMD64_OpAMD64SHRLconst(v) + case OpAMD64SHRQ: + return rewriteValueAMD64_OpAMD64SHRQ(v) + case OpAMD64SHRQconst: + return rewriteValueAMD64_OpAMD64SHRQconst(v) + case OpAMD64SHRW: + return rewriteValueAMD64_OpAMD64SHRW(v) + case OpAMD64SHRWconst: + return rewriteValueAMD64_OpAMD64SHRWconst(v) + case OpAMD64SHRXLload: + return rewriteValueAMD64_OpAMD64SHRXLload(v) + case OpAMD64SHRXQload: + return rewriteValueAMD64_OpAMD64SHRXQload(v) + case OpAMD64SUBL: + return rewriteValueAMD64_OpAMD64SUBL(v) + case OpAMD64SUBLconst: + return rewriteValueAMD64_OpAMD64SUBLconst(v) + case OpAMD64SUBLload: + return rewriteValueAMD64_OpAMD64SUBLload(v) + case OpAMD64SUBLmodify: + return rewriteValueAMD64_OpAMD64SUBLmodify(v) + case OpAMD64SUBQ: + return rewriteValueAMD64_OpAMD64SUBQ(v) + case OpAMD64SUBQborrow: + return rewriteValueAMD64_OpAMD64SUBQborrow(v) + case OpAMD64SUBQconst: + return rewriteValueAMD64_OpAMD64SUBQconst(v) + case OpAMD64SUBQload: + return rewriteValueAMD64_OpAMD64SUBQload(v) + case OpAMD64SUBQmodify: + return rewriteValueAMD64_OpAMD64SUBQmodify(v) + case OpAMD64SUBSD: + return rewriteValueAMD64_OpAMD64SUBSD(v) + case OpAMD64SUBSDload: + return rewriteValueAMD64_OpAMD64SUBSDload(v) + case OpAMD64SUBSS: + return rewriteValueAMD64_OpAMD64SUBSS(v) + case OpAMD64SUBSSload: + return rewriteValueAMD64_OpAMD64SUBSSload(v) + case OpAMD64TESTB: + return rewriteValueAMD64_OpAMD64TESTB(v) + case OpAMD64TESTBconst: + return rewriteValueAMD64_OpAMD64TESTBconst(v) + case OpAMD64TESTL: + return rewriteValueAMD64_OpAMD64TESTL(v) + case OpAMD64TESTLconst: + return rewriteValueAMD64_OpAMD64TESTLconst(v) + case OpAMD64TESTQ: + return rewriteValueAMD64_OpAMD64TESTQ(v) + case OpAMD64TESTQconst: + return rewriteValueAMD64_OpAMD64TESTQconst(v) + case OpAMD64TESTW: + return rewriteValueAMD64_OpAMD64TESTW(v) + case OpAMD64TESTWconst: + return rewriteValueAMD64_OpAMD64TESTWconst(v) + case OpAMD64XADDLlock: + return rewriteValueAMD64_OpAMD64XADDLlock(v) + case OpAMD64XADDQlock: + return rewriteValueAMD64_OpAMD64XADDQlock(v) + case OpAMD64XCHGL: + return rewriteValueAMD64_OpAMD64XCHGL(v) + case OpAMD64XCHGQ: + return rewriteValueAMD64_OpAMD64XCHGQ(v) + case OpAMD64XORL: + return rewriteValueAMD64_OpAMD64XORL(v) + case OpAMD64XORLconst: + return rewriteValueAMD64_OpAMD64XORLconst(v) + case OpAMD64XORLconstmodify: + return rewriteValueAMD64_OpAMD64XORLconstmodify(v) + case OpAMD64XORLload: + return rewriteValueAMD64_OpAMD64XORLload(v) + case OpAMD64XORLmodify: + return rewriteValueAMD64_OpAMD64XORLmodify(v) + case OpAMD64XORQ: + return rewriteValueAMD64_OpAMD64XORQ(v) + case OpAMD64XORQconst: + return rewriteValueAMD64_OpAMD64XORQconst(v) + case OpAMD64XORQconstmodify: + return rewriteValueAMD64_OpAMD64XORQconstmodify(v) + case OpAMD64XORQload: + return rewriteValueAMD64_OpAMD64XORQload(v) + case OpAMD64XORQmodify: + return rewriteValueAMD64_OpAMD64XORQmodify(v) + case OpAdd16: + v.Op = OpAMD64ADDL + return true + case OpAdd32: + v.Op = OpAMD64ADDL + return true + case OpAdd32F: + v.Op = OpAMD64ADDSS + return true + case OpAdd64: + v.Op = OpAMD64ADDQ + return true + case OpAdd64F: + v.Op = OpAMD64ADDSD + return true + case OpAdd8: + v.Op = OpAMD64ADDL + return true + case OpAddPtr: + v.Op = OpAMD64ADDQ + return true + case OpAddr: + return rewriteValueAMD64_OpAddr(v) + case OpAnd16: + v.Op = OpAMD64ANDL + return true + case OpAnd32: + v.Op = OpAMD64ANDL + return true + case OpAnd64: + v.Op = OpAMD64ANDQ + return true + case OpAnd8: + v.Op = OpAMD64ANDL + return true + case OpAndB: + v.Op = OpAMD64ANDL + return true + case OpAtomicAdd32: + return rewriteValueAMD64_OpAtomicAdd32(v) + case OpAtomicAdd64: + return rewriteValueAMD64_OpAtomicAdd64(v) + case OpAtomicAnd32: + return rewriteValueAMD64_OpAtomicAnd32(v) + case OpAtomicAnd8: + return rewriteValueAMD64_OpAtomicAnd8(v) + case OpAtomicCompareAndSwap32: + return rewriteValueAMD64_OpAtomicCompareAndSwap32(v) + case OpAtomicCompareAndSwap64: + return rewriteValueAMD64_OpAtomicCompareAndSwap64(v) + case OpAtomicExchange32: + return rewriteValueAMD64_OpAtomicExchange32(v) + case OpAtomicExchange64: + return rewriteValueAMD64_OpAtomicExchange64(v) + case OpAtomicLoad32: + return rewriteValueAMD64_OpAtomicLoad32(v) + case OpAtomicLoad64: + return rewriteValueAMD64_OpAtomicLoad64(v) + case OpAtomicLoad8: + return rewriteValueAMD64_OpAtomicLoad8(v) + case OpAtomicLoadPtr: + return rewriteValueAMD64_OpAtomicLoadPtr(v) + case OpAtomicOr32: + return rewriteValueAMD64_OpAtomicOr32(v) + case OpAtomicOr8: + return rewriteValueAMD64_OpAtomicOr8(v) + case OpAtomicStore32: + return rewriteValueAMD64_OpAtomicStore32(v) + case OpAtomicStore64: + return rewriteValueAMD64_OpAtomicStore64(v) + case OpAtomicStore8: + return rewriteValueAMD64_OpAtomicStore8(v) + case OpAtomicStorePtrNoWB: + return rewriteValueAMD64_OpAtomicStorePtrNoWB(v) + case OpAvg64u: + v.Op = OpAMD64AVGQU + return true + case OpBitLen16: + return rewriteValueAMD64_OpBitLen16(v) + case OpBitLen32: + return rewriteValueAMD64_OpBitLen32(v) + case OpBitLen64: + return rewriteValueAMD64_OpBitLen64(v) + case OpBitLen8: + return rewriteValueAMD64_OpBitLen8(v) + case OpBswap16: + return rewriteValueAMD64_OpBswap16(v) + case OpBswap32: + v.Op = OpAMD64BSWAPL + return true + case OpBswap64: + v.Op = OpAMD64BSWAPQ + return true + case OpCeil: + return rewriteValueAMD64_OpCeil(v) + case OpClosureCall: + v.Op = OpAMD64CALLclosure + return true + case OpCom16: + v.Op = OpAMD64NOTL + return true + case OpCom32: + v.Op = OpAMD64NOTL + return true + case OpCom64: + v.Op = OpAMD64NOTQ + return true + case OpCom8: + v.Op = OpAMD64NOTL + return true + case OpCondSelect: + return rewriteValueAMD64_OpCondSelect(v) + case OpConst16: + return rewriteValueAMD64_OpConst16(v) + case OpConst32: + v.Op = OpAMD64MOVLconst + return true + case OpConst32F: + v.Op = OpAMD64MOVSSconst + return true + case OpConst64: + v.Op = OpAMD64MOVQconst + return true + case OpConst64F: + v.Op = OpAMD64MOVSDconst + return true + case OpConst8: + return rewriteValueAMD64_OpConst8(v) + case OpConstBool: + return rewriteValueAMD64_OpConstBool(v) + case OpConstNil: + return rewriteValueAMD64_OpConstNil(v) + case OpCtz16: + return rewriteValueAMD64_OpCtz16(v) + case OpCtz16NonZero: + return rewriteValueAMD64_OpCtz16NonZero(v) + case OpCtz32: + return rewriteValueAMD64_OpCtz32(v) + case OpCtz32NonZero: + return rewriteValueAMD64_OpCtz32NonZero(v) + case OpCtz64: + return rewriteValueAMD64_OpCtz64(v) + case OpCtz64NonZero: + return rewriteValueAMD64_OpCtz64NonZero(v) + case OpCtz8: + return rewriteValueAMD64_OpCtz8(v) + case OpCtz8NonZero: + return rewriteValueAMD64_OpCtz8NonZero(v) + case OpCvt32Fto32: + v.Op = OpAMD64CVTTSS2SL + return true + case OpCvt32Fto64: + v.Op = OpAMD64CVTTSS2SQ + return true + case OpCvt32Fto64F: + v.Op = OpAMD64CVTSS2SD + return true + case OpCvt32to32F: + v.Op = OpAMD64CVTSL2SS + return true + case OpCvt32to64F: + v.Op = OpAMD64CVTSL2SD + return true + case OpCvt64Fto32: + v.Op = OpAMD64CVTTSD2SL + return true + case OpCvt64Fto32F: + v.Op = OpAMD64CVTSD2SS + return true + case OpCvt64Fto64: + v.Op = OpAMD64CVTTSD2SQ + return true + case OpCvt64to32F: + v.Op = OpAMD64CVTSQ2SS + return true + case OpCvt64to64F: + v.Op = OpAMD64CVTSQ2SD + return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true + case OpDiv128u: + v.Op = OpAMD64DIVQU2 + return true + case OpDiv16: + return rewriteValueAMD64_OpDiv16(v) + case OpDiv16u: + return rewriteValueAMD64_OpDiv16u(v) + case OpDiv32: + return rewriteValueAMD64_OpDiv32(v) + case OpDiv32F: + v.Op = OpAMD64DIVSS + return true + case OpDiv32u: + return rewriteValueAMD64_OpDiv32u(v) + case OpDiv64: + return rewriteValueAMD64_OpDiv64(v) + case OpDiv64F: + v.Op = OpAMD64DIVSD + return true + case OpDiv64u: + return rewriteValueAMD64_OpDiv64u(v) + case OpDiv8: + return rewriteValueAMD64_OpDiv8(v) + case OpDiv8u: + return rewriteValueAMD64_OpDiv8u(v) + case OpEq16: + return rewriteValueAMD64_OpEq16(v) + case OpEq32: + return rewriteValueAMD64_OpEq32(v) + case OpEq32F: + return rewriteValueAMD64_OpEq32F(v) + case OpEq64: + return rewriteValueAMD64_OpEq64(v) + case OpEq64F: + return rewriteValueAMD64_OpEq64F(v) + case OpEq8: + return rewriteValueAMD64_OpEq8(v) + case OpEqB: + return rewriteValueAMD64_OpEqB(v) + case OpEqPtr: + return rewriteValueAMD64_OpEqPtr(v) + case OpFMA: + return rewriteValueAMD64_OpFMA(v) + case OpFloor: + return rewriteValueAMD64_OpFloor(v) + case OpGetCallerPC: + v.Op = OpAMD64LoweredGetCallerPC + return true + case OpGetCallerSP: + v.Op = OpAMD64LoweredGetCallerSP + return true + case OpGetClosurePtr: + v.Op = OpAMD64LoweredGetClosurePtr + return true + case OpGetG: + return rewriteValueAMD64_OpGetG(v) + case OpHasCPUFeature: + return rewriteValueAMD64_OpHasCPUFeature(v) + case OpHmul32: + v.Op = OpAMD64HMULL + return true + case OpHmul32u: + v.Op = OpAMD64HMULLU + return true + case OpHmul64: + v.Op = OpAMD64HMULQ + return true + case OpHmul64u: + v.Op = OpAMD64HMULQU + return true + case OpInterCall: + v.Op = OpAMD64CALLinter + return true + case OpIsInBounds: + return rewriteValueAMD64_OpIsInBounds(v) + case OpIsNonNil: + return rewriteValueAMD64_OpIsNonNil(v) + case OpIsSliceInBounds: + return rewriteValueAMD64_OpIsSliceInBounds(v) + case OpLeq16: + return rewriteValueAMD64_OpLeq16(v) + case OpLeq16U: + return rewriteValueAMD64_OpLeq16U(v) + case OpLeq32: + return rewriteValueAMD64_OpLeq32(v) + case OpLeq32F: + return rewriteValueAMD64_OpLeq32F(v) + case OpLeq32U: + return rewriteValueAMD64_OpLeq32U(v) + case OpLeq64: + return rewriteValueAMD64_OpLeq64(v) + case OpLeq64F: + return rewriteValueAMD64_OpLeq64F(v) + case OpLeq64U: + return rewriteValueAMD64_OpLeq64U(v) + case OpLeq8: + return rewriteValueAMD64_OpLeq8(v) + case OpLeq8U: + return rewriteValueAMD64_OpLeq8U(v) + case OpLess16: + return rewriteValueAMD64_OpLess16(v) + case OpLess16U: + return rewriteValueAMD64_OpLess16U(v) + case OpLess32: + return rewriteValueAMD64_OpLess32(v) + case OpLess32F: + return rewriteValueAMD64_OpLess32F(v) + case OpLess32U: + return rewriteValueAMD64_OpLess32U(v) + case OpLess64: + return rewriteValueAMD64_OpLess64(v) + case OpLess64F: + return rewriteValueAMD64_OpLess64F(v) + case OpLess64U: + return rewriteValueAMD64_OpLess64U(v) + case OpLess8: + return rewriteValueAMD64_OpLess8(v) + case OpLess8U: + return rewriteValueAMD64_OpLess8U(v) + case OpLoad: + return rewriteValueAMD64_OpLoad(v) + case OpLocalAddr: + return rewriteValueAMD64_OpLocalAddr(v) + case OpLsh16x16: + return rewriteValueAMD64_OpLsh16x16(v) + case OpLsh16x32: + return rewriteValueAMD64_OpLsh16x32(v) + case OpLsh16x64: + return rewriteValueAMD64_OpLsh16x64(v) + case OpLsh16x8: + return rewriteValueAMD64_OpLsh16x8(v) + case OpLsh32x16: + return rewriteValueAMD64_OpLsh32x16(v) + case OpLsh32x32: + return rewriteValueAMD64_OpLsh32x32(v) + case OpLsh32x64: + return rewriteValueAMD64_OpLsh32x64(v) + case OpLsh32x8: + return rewriteValueAMD64_OpLsh32x8(v) + case OpLsh64x16: + return rewriteValueAMD64_OpLsh64x16(v) + case OpLsh64x32: + return rewriteValueAMD64_OpLsh64x32(v) + case OpLsh64x64: + return rewriteValueAMD64_OpLsh64x64(v) + case OpLsh64x8: + return rewriteValueAMD64_OpLsh64x8(v) + case OpLsh8x16: + return rewriteValueAMD64_OpLsh8x16(v) + case OpLsh8x32: + return rewriteValueAMD64_OpLsh8x32(v) + case OpLsh8x64: + return rewriteValueAMD64_OpLsh8x64(v) + case OpLsh8x8: + return rewriteValueAMD64_OpLsh8x8(v) + case OpMax32F: + return rewriteValueAMD64_OpMax32F(v) + case OpMax64F: + return rewriteValueAMD64_OpMax64F(v) + case OpMin32F: + return rewriteValueAMD64_OpMin32F(v) + case OpMin64F: + return rewriteValueAMD64_OpMin64F(v) + case OpMod16: + return rewriteValueAMD64_OpMod16(v) + case OpMod16u: + return rewriteValueAMD64_OpMod16u(v) + case OpMod32: + return rewriteValueAMD64_OpMod32(v) + case OpMod32u: + return rewriteValueAMD64_OpMod32u(v) + case OpMod64: + return rewriteValueAMD64_OpMod64(v) + case OpMod64u: + return rewriteValueAMD64_OpMod64u(v) + case OpMod8: + return rewriteValueAMD64_OpMod8(v) + case OpMod8u: + return rewriteValueAMD64_OpMod8u(v) + case OpMove: + return rewriteValueAMD64_OpMove(v) + case OpMul16: + v.Op = OpAMD64MULL + return true + case OpMul32: + v.Op = OpAMD64MULL + return true + case OpMul32F: + v.Op = OpAMD64MULSS + return true + case OpMul64: + v.Op = OpAMD64MULQ + return true + case OpMul64F: + v.Op = OpAMD64MULSD + return true + case OpMul64uhilo: + v.Op = OpAMD64MULQU2 + return true + case OpMul8: + v.Op = OpAMD64MULL + return true + case OpNeg16: + v.Op = OpAMD64NEGL + return true + case OpNeg32: + v.Op = OpAMD64NEGL + return true + case OpNeg32F: + return rewriteValueAMD64_OpNeg32F(v) + case OpNeg64: + v.Op = OpAMD64NEGQ + return true + case OpNeg64F: + return rewriteValueAMD64_OpNeg64F(v) + case OpNeg8: + v.Op = OpAMD64NEGL + return true + case OpNeq16: + return rewriteValueAMD64_OpNeq16(v) + case OpNeq32: + return rewriteValueAMD64_OpNeq32(v) + case OpNeq32F: + return rewriteValueAMD64_OpNeq32F(v) + case OpNeq64: + return rewriteValueAMD64_OpNeq64(v) + case OpNeq64F: + return rewriteValueAMD64_OpNeq64F(v) + case OpNeq8: + return rewriteValueAMD64_OpNeq8(v) + case OpNeqB: + return rewriteValueAMD64_OpNeqB(v) + case OpNeqPtr: + return rewriteValueAMD64_OpNeqPtr(v) + case OpNilCheck: + v.Op = OpAMD64LoweredNilCheck + return true + case OpNot: + return rewriteValueAMD64_OpNot(v) + case OpOffPtr: + return rewriteValueAMD64_OpOffPtr(v) + case OpOr16: + v.Op = OpAMD64ORL + return true + case OpOr32: + v.Op = OpAMD64ORL + return true + case OpOr64: + v.Op = OpAMD64ORQ + return true + case OpOr8: + v.Op = OpAMD64ORL + return true + case OpOrB: + v.Op = OpAMD64ORL + return true + case OpPanicBounds: + return rewriteValueAMD64_OpPanicBounds(v) + case OpPopCount16: + return rewriteValueAMD64_OpPopCount16(v) + case OpPopCount32: + v.Op = OpAMD64POPCNTL + return true + case OpPopCount64: + v.Op = OpAMD64POPCNTQ + return true + case OpPopCount8: + return rewriteValueAMD64_OpPopCount8(v) + case OpPrefetchCache: + v.Op = OpAMD64PrefetchT0 + return true + case OpPrefetchCacheStreamed: + v.Op = OpAMD64PrefetchNTA + return true + case OpRotateLeft16: + v.Op = OpAMD64ROLW + return true + case OpRotateLeft32: + v.Op = OpAMD64ROLL + return true + case OpRotateLeft64: + v.Op = OpAMD64ROLQ + return true + case OpRotateLeft8: + v.Op = OpAMD64ROLB + return true + case OpRound32F: + v.Op = OpCopy + return true + case OpRound64F: + v.Op = OpCopy + return true + case OpRoundToEven: + return rewriteValueAMD64_OpRoundToEven(v) + case OpRsh16Ux16: + return rewriteValueAMD64_OpRsh16Ux16(v) + case OpRsh16Ux32: + return rewriteValueAMD64_OpRsh16Ux32(v) + case OpRsh16Ux64: + return rewriteValueAMD64_OpRsh16Ux64(v) + case OpRsh16Ux8: + return rewriteValueAMD64_OpRsh16Ux8(v) + case OpRsh16x16: + return rewriteValueAMD64_OpRsh16x16(v) + case OpRsh16x32: + return rewriteValueAMD64_OpRsh16x32(v) + case OpRsh16x64: + return rewriteValueAMD64_OpRsh16x64(v) + case OpRsh16x8: + return rewriteValueAMD64_OpRsh16x8(v) + case OpRsh32Ux16: + return rewriteValueAMD64_OpRsh32Ux16(v) + case OpRsh32Ux32: + return rewriteValueAMD64_OpRsh32Ux32(v) + case OpRsh32Ux64: + return rewriteValueAMD64_OpRsh32Ux64(v) + case OpRsh32Ux8: + return rewriteValueAMD64_OpRsh32Ux8(v) + case OpRsh32x16: + return rewriteValueAMD64_OpRsh32x16(v) + case OpRsh32x32: + return rewriteValueAMD64_OpRsh32x32(v) + case OpRsh32x64: + return rewriteValueAMD64_OpRsh32x64(v) + case OpRsh32x8: + return rewriteValueAMD64_OpRsh32x8(v) + case OpRsh64Ux16: + return rewriteValueAMD64_OpRsh64Ux16(v) + case OpRsh64Ux32: + return rewriteValueAMD64_OpRsh64Ux32(v) + case OpRsh64Ux64: + return rewriteValueAMD64_OpRsh64Ux64(v) + case OpRsh64Ux8: + return rewriteValueAMD64_OpRsh64Ux8(v) + case OpRsh64x16: + return rewriteValueAMD64_OpRsh64x16(v) + case OpRsh64x32: + return rewriteValueAMD64_OpRsh64x32(v) + case OpRsh64x64: + return rewriteValueAMD64_OpRsh64x64(v) + case OpRsh64x8: + return rewriteValueAMD64_OpRsh64x8(v) + case OpRsh8Ux16: + return rewriteValueAMD64_OpRsh8Ux16(v) + case OpRsh8Ux32: + return rewriteValueAMD64_OpRsh8Ux32(v) + case OpRsh8Ux64: + return rewriteValueAMD64_OpRsh8Ux64(v) + case OpRsh8Ux8: + return rewriteValueAMD64_OpRsh8Ux8(v) + case OpRsh8x16: + return rewriteValueAMD64_OpRsh8x16(v) + case OpRsh8x32: + return rewriteValueAMD64_OpRsh8x32(v) + case OpRsh8x64: + return rewriteValueAMD64_OpRsh8x64(v) + case OpRsh8x8: + return rewriteValueAMD64_OpRsh8x8(v) + case OpSelect0: + return rewriteValueAMD64_OpSelect0(v) + case OpSelect1: + return rewriteValueAMD64_OpSelect1(v) + case OpSelectN: + return rewriteValueAMD64_OpSelectN(v) + case OpSignExt16to32: + v.Op = OpAMD64MOVWQSX + return true + case OpSignExt16to64: + v.Op = OpAMD64MOVWQSX + return true + case OpSignExt32to64: + v.Op = OpAMD64MOVLQSX + return true + case OpSignExt8to16: + v.Op = OpAMD64MOVBQSX + return true + case OpSignExt8to32: + v.Op = OpAMD64MOVBQSX + return true + case OpSignExt8to64: + v.Op = OpAMD64MOVBQSX + return true + case OpSlicemask: + return rewriteValueAMD64_OpSlicemask(v) + case OpSpectreIndex: + return rewriteValueAMD64_OpSpectreIndex(v) + case OpSpectreSliceIndex: + return rewriteValueAMD64_OpSpectreSliceIndex(v) + case OpSqrt: + v.Op = OpAMD64SQRTSD + return true + case OpSqrt32: + v.Op = OpAMD64SQRTSS + return true + case OpStaticCall: + v.Op = OpAMD64CALLstatic + return true + case OpStore: + return rewriteValueAMD64_OpStore(v) + case OpSub16: + v.Op = OpAMD64SUBL + return true + case OpSub32: + v.Op = OpAMD64SUBL + return true + case OpSub32F: + v.Op = OpAMD64SUBSS + return true + case OpSub64: + v.Op = OpAMD64SUBQ + return true + case OpSub64F: + v.Op = OpAMD64SUBSD + return true + case OpSub8: + v.Op = OpAMD64SUBL + return true + case OpSubPtr: + v.Op = OpAMD64SUBQ + return true + case OpTailCall: + v.Op = OpAMD64CALLtail + return true + case OpTrunc: + return rewriteValueAMD64_OpTrunc(v) + case OpTrunc16to8: + v.Op = OpCopy + return true + case OpTrunc32to16: + v.Op = OpCopy + return true + case OpTrunc32to8: + v.Op = OpCopy + return true + case OpTrunc64to16: + v.Op = OpCopy + return true + case OpTrunc64to32: + v.Op = OpCopy + return true + case OpTrunc64to8: + v.Op = OpCopy + return true + case OpWB: + v.Op = OpAMD64LoweredWB + return true + case OpXor16: + v.Op = OpAMD64XORL + return true + case OpXor32: + v.Op = OpAMD64XORL + return true + case OpXor64: + v.Op = OpAMD64XORQ + return true + case OpXor8: + v.Op = OpAMD64XORL + return true + case OpZero: + return rewriteValueAMD64_OpZero(v) + case OpZeroExt16to32: + v.Op = OpAMD64MOVWQZX + return true + case OpZeroExt16to64: + v.Op = OpAMD64MOVWQZX + return true + case OpZeroExt32to64: + v.Op = OpAMD64MOVLQZX + return true + case OpZeroExt8to16: + v.Op = OpAMD64MOVBQZX + return true + case OpZeroExt8to32: + v.Op = OpAMD64MOVBQZX + return true + case OpZeroExt8to64: + v.Op = OpAMD64MOVBQZX + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ADCQ(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADCQ x (MOVQconst [c]) carry) + // cond: is32Bit(c) + // result: (ADCQconst x [int32(c)] carry) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + carry := v_2 + if !(is32Bit(c)) { + continue + } + v.reset(OpAMD64ADCQconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(x, carry) + return true + } + break + } + // match: (ADCQ x y (FlagEQ)) + // result: (ADDQcarry x y) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64ADDQcarry) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ADCQconst(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADCQconst x [c] (FlagEQ)) + // result: (ADDQconstcarry x [c]) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64ADDQconstcarry) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADDL x (MOVLconst [c])) + // result: (ADDLconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64ADDLconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (ADDL x (SHLLconst [3] y)) + // result: (LEAL8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 { + continue + } + y := v_1.Args[0] + v.reset(OpAMD64LEAL8) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADDL x (SHLLconst [2] y)) + // result: (LEAL4 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 { + continue + } + y := v_1.Args[0] + v.reset(OpAMD64LEAL4) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADDL x (SHLLconst [1] y)) + // result: (LEAL2 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 { + continue + } + y := v_1.Args[0] + v.reset(OpAMD64LEAL2) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADDL x (ADDL y y)) + // result: (LEAL2 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64ADDL { + continue + } + y := v_1.Args[1] + if y != v_1.Args[0] { + continue + } + v.reset(OpAMD64LEAL2) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADDL x (ADDL x y)) + // result: (LEAL2 y x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64ADDL { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + y := v_1_1 + v.reset(OpAMD64LEAL2) + v.AddArg2(y, x) + return true + } + } + break + } + // match: (ADDL (ADDLconst [c] x) y) + // result: (LEAL1 [c] x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64ADDLconst { + continue + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + y := v_1 + v.reset(OpAMD64LEAL1) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADDL x (LEAL [c] {s} y)) + // cond: x.Op != OpSB && y.Op != OpSB + // result: (LEAL1 [c] {s} x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64LEAL { + continue + } + c := auxIntToInt32(v_1.AuxInt) + s := auxToSym(v_1.Aux) + y := v_1.Args[0] + if !(x.Op != OpSB && y.Op != OpSB) { + continue + } + v.reset(OpAMD64LEAL1) + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADDL x (NEGL y)) + // result: (SUBL x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64NEGL { + continue + } + y := v_1.Args[0] + v.reset(OpAMD64SUBL) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADDL x l:(MOVLload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (ADDLload x [off] {sym} ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64MOVLload { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(OpAMD64ADDLload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ADDLconst [c] (ADDL x y)) + // result: (LEAL1 [c] x y) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64ADDL { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpAMD64LEAL1) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + // match: (ADDLconst [c] (SHLLconst [1] x)) + // result: (LEAL1 [c] x x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 { + break + } + x := v_0.Args[0] + v.reset(OpAMD64LEAL1) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, x) + return true + } + // match: (ADDLconst [c] (LEAL [d] {s} x)) + // cond: is32Bit(int64(c)+int64(d)) + // result: (LEAL [c+d] {s} x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64LEAL { + break + } + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) + x := v_0.Args[0] + if !(is32Bit(int64(c) + int64(d))) { + break + } + v.reset(OpAMD64LEAL) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg(x) + return true + } + // match: (ADDLconst [c] (LEAL1 [d] {s} x y)) + // cond: is32Bit(int64(c)+int64(d)) + // result: (LEAL1 [c+d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64LEAL1 { + break + } + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) + y := v_0.Args[1] + x := v_0.Args[0] + if !(is32Bit(int64(c) + int64(d))) { + break + } + v.reset(OpAMD64LEAL1) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (ADDLconst [c] (LEAL2 [d] {s} x y)) + // cond: is32Bit(int64(c)+int64(d)) + // result: (LEAL2 [c+d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64LEAL2 { + break + } + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) + y := v_0.Args[1] + x := v_0.Args[0] + if !(is32Bit(int64(c) + int64(d))) { + break + } + v.reset(OpAMD64LEAL2) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (ADDLconst [c] (LEAL4 [d] {s} x y)) + // cond: is32Bit(int64(c)+int64(d)) + // result: (LEAL4 [c+d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64LEAL4 { + break + } + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) + y := v_0.Args[1] + x := v_0.Args[0] + if !(is32Bit(int64(c) + int64(d))) { + break + } + v.reset(OpAMD64LEAL4) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (ADDLconst [c] (LEAL8 [d] {s} x y)) + // cond: is32Bit(int64(c)+int64(d)) + // result: (LEAL8 [c+d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64LEAL8 { + break + } + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) + y := v_0.Args[1] + x := v_0.Args[0] + if !(is32Bit(int64(c) + int64(d))) { + break + } + v.reset(OpAMD64LEAL8) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (ADDLconst [c] x) + // cond: c==0 + // result: x + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(c == 0) { + break + } + v.copyOf(x) + return true + } + // match: (ADDLconst [c] (MOVLconst [d])) + // result: (MOVLconst [c+d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(c + d) + return true + } + // match: (ADDLconst [c] (ADDLconst [d] x)) + // result: (ADDLconst [c+d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64ADDLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpAMD64ADDLconst) + v.AuxInt = int32ToAuxInt(c + d) + v.AddArg(x) + return true + } + // match: (ADDLconst [off] x:(SP)) + // result: (LEAL [off] x) + for { + off := auxIntToInt32(v.AuxInt) + x := v_0 + if x.Op != OpSP { + break + } + v.reset(OpAMD64LEAL) + v.AuxInt = int32ToAuxInt(off) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ADDLconstmodify(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (ADDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + mem := v_1 + if !(ValAndOff(valoff1).canAdd32(off2)) { + break + } + v.reset(OpAMD64ADDLconstmodify) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(base, mem) + return true + } + // match: (ADDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) + // result: (ADDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64ADDLconstmodify) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ADDLload [off1] {sym} val (ADDQconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (ADDLload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64ADDLload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (ADDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (ADDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64ADDLload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + // match: (ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) + // result: (ADDL x (MOVLf2i y)) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + ptr := v_1 + if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { + break + } + y := v_2.Args[1] + if ptr != v_2.Args[0] { + break + } + v.reset(OpAMD64ADDL) + v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADDLmodify [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (ADDLmodify [off1+off2] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64ADDLmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (ADDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (ADDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64ADDLmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADDQ x (MOVQconst [c])) + // cond: is32Bit(c) && !t.IsPtr() + // result: (ADDQconst [int32(c)] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + continue + } + t := v_1.Type + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c) && !t.IsPtr()) { + continue + } + v.reset(OpAMD64ADDQconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + break + } + // match: (ADDQ x (MOVLconst [c])) + // result: (ADDQconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64ADDQconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (ADDQ x (SHLQconst [3] y)) + // result: (LEAQ8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 { + continue + } + y := v_1.Args[0] + v.reset(OpAMD64LEAQ8) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADDQ x (SHLQconst [2] y)) + // result: (LEAQ4 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 { + continue + } + y := v_1.Args[0] + v.reset(OpAMD64LEAQ4) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADDQ x (SHLQconst [1] y)) + // result: (LEAQ2 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 { + continue + } + y := v_1.Args[0] + v.reset(OpAMD64LEAQ2) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADDQ x (ADDQ y y)) + // result: (LEAQ2 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64ADDQ { + continue + } + y := v_1.Args[1] + if y != v_1.Args[0] { + continue + } + v.reset(OpAMD64LEAQ2) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADDQ x (ADDQ x y)) + // result: (LEAQ2 y x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64ADDQ { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + y := v_1_1 + v.reset(OpAMD64LEAQ2) + v.AddArg2(y, x) + return true + } + } + break + } + // match: (ADDQ (ADDQconst [c] x) y) + // result: (LEAQ1 [c] x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64ADDQconst { + continue + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + y := v_1 + v.reset(OpAMD64LEAQ1) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADDQ x (LEAQ [c] {s} y)) + // cond: x.Op != OpSB && y.Op != OpSB + // result: (LEAQ1 [c] {s} x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64LEAQ { + continue + } + c := auxIntToInt32(v_1.AuxInt) + s := auxToSym(v_1.Aux) + y := v_1.Args[0] + if !(x.Op != OpSB && y.Op != OpSB) { + continue + } + v.reset(OpAMD64LEAQ1) + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADDQ x (NEGQ y)) + // result: (SUBQ x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64NEGQ { + continue + } + y := v_1.Args[0] + v.reset(OpAMD64SUBQ) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADDQ x l:(MOVQload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (ADDQload x [off] {sym} ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64MOVQload { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(OpAMD64ADDQload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64ADDQcarry(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADDQcarry x (MOVQconst [c])) + // cond: is32Bit(c) + // result: (ADDQconstcarry x [int32(c)]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c)) { + continue + } + v.reset(OpAMD64ADDQconstcarry) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ADDQconst [c] (ADDQ x y)) + // result: (LEAQ1 [c] x y) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64ADDQ { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpAMD64LEAQ1) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + // match: (ADDQconst [c] (SHLQconst [1] x)) + // result: (LEAQ1 [c] x x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 { + break + } + x := v_0.Args[0] + v.reset(OpAMD64LEAQ1) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, x) + return true + } + // match: (ADDQconst [c] (LEAQ [d] {s} x)) + // cond: is32Bit(int64(c)+int64(d)) + // result: (LEAQ [c+d] {s} x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64LEAQ { + break + } + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) + x := v_0.Args[0] + if !(is32Bit(int64(c) + int64(d))) { + break + } + v.reset(OpAMD64LEAQ) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg(x) + return true + } + // match: (ADDQconst [c] (LEAQ1 [d] {s} x y)) + // cond: is32Bit(int64(c)+int64(d)) + // result: (LEAQ1 [c+d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64LEAQ1 { + break + } + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) + y := v_0.Args[1] + x := v_0.Args[0] + if !(is32Bit(int64(c) + int64(d))) { + break + } + v.reset(OpAMD64LEAQ1) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) + // cond: is32Bit(int64(c)+int64(d)) + // result: (LEAQ2 [c+d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64LEAQ2 { + break + } + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) + y := v_0.Args[1] + x := v_0.Args[0] + if !(is32Bit(int64(c) + int64(d))) { + break + } + v.reset(OpAMD64LEAQ2) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) + // cond: is32Bit(int64(c)+int64(d)) + // result: (LEAQ4 [c+d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64LEAQ4 { + break + } + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) + y := v_0.Args[1] + x := v_0.Args[0] + if !(is32Bit(int64(c) + int64(d))) { + break + } + v.reset(OpAMD64LEAQ4) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) + // cond: is32Bit(int64(c)+int64(d)) + // result: (LEAQ8 [c+d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64LEAQ8 { + break + } + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) + y := v_0.Args[1] + x := v_0.Args[0] + if !(is32Bit(int64(c) + int64(d))) { + break + } + v.reset(OpAMD64LEAQ8) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (ADDQconst [0] x) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (ADDQconst [c] (MOVQconst [d])) + // result: (MOVQconst [int64(c)+d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVQconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(int64(c) + d) + return true + } + // match: (ADDQconst [c] (ADDQconst [d] x)) + // cond: is32Bit(int64(c)+int64(d)) + // result: (ADDQconst [c+d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64ADDQconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(int64(c) + int64(d))) { + break + } + v.reset(OpAMD64ADDQconst) + v.AuxInt = int32ToAuxInt(c + d) + v.AddArg(x) + return true + } + // match: (ADDQconst [off] x:(SP)) + // result: (LEAQ [off] x) + for { + off := auxIntToInt32(v.AuxInt) + x := v_0 + if x.Op != OpSP { + break + } + v.reset(OpAMD64LEAQ) + v.AuxInt = int32ToAuxInt(off) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ADDQconstmodify(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (ADDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + mem := v_1 + if !(ValAndOff(valoff1).canAdd32(off2)) { + break + } + v.reset(OpAMD64ADDQconstmodify) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(base, mem) + return true + } + // match: (ADDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) + // result: (ADDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64ADDQconstmodify) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ADDQload [off1] {sym} val (ADDQconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (ADDQload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64ADDQload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (ADDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (ADDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64ADDQload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + // match: (ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) + // result: (ADDQ x (MOVQf2i y)) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + ptr := v_1 + if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { + break + } + y := v_2.Args[1] + if ptr != v_2.Args[0] { + break + } + v.reset(OpAMD64ADDQ) + v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ADDQmodify(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADDQmodify [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (ADDQmodify [off1+off2] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64ADDQmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (ADDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (ADDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64ADDQmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ADDSD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (ADDSDload x [off] {sym} ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64MOVSDload { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(OpAMD64ADDSDload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ADDSDload [off1] {sym} val (ADDQconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (ADDSDload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64ADDSDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (ADDSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (ADDSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64ADDSDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + // match: (ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) + // result: (ADDSD x (MOVQi2f y)) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + ptr := v_1 + if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { + break + } + y := v_2.Args[1] + if ptr != v_2.Args[0] { + break + } + v.reset(OpAMD64ADDSD) + v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ADDSS(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (ADDSSload x [off] {sym} ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64MOVSSload { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(OpAMD64ADDSSload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ADDSSload [off1] {sym} val (ADDQconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (ADDSSload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64ADDSSload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (ADDSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (ADDSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64ADDSSload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + // match: (ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) + // result: (ADDSS x (MOVLi2f y)) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + ptr := v_1 + if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { + break + } + y := v_2.Args[1] + if ptr != v_2.Args[0] { + break + } + v.reset(OpAMD64ADDSS) + v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ANDL (NOTL (SHLL (MOVLconst [1]) y)) x) + // result: (BTRL x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64NOTL { + continue + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64SHLL { + continue + } + y := v_0_0.Args[1] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 { + continue + } + x := v_1 + v.reset(OpAMD64BTRL) + v.AddArg2(x, y) + return true + } + break + } + // match: (ANDL x (MOVLconst [c])) + // result: (ANDLconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64ANDLconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (ANDL x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + // match: (ANDL x l:(MOVLload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (ANDLload x [off] {sym} ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64MOVLload { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(OpAMD64ANDLload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + // match: (ANDL x (NOTL y)) + // cond: buildcfg.GOAMD64 >= 3 + // result: (ANDNL x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64NOTL { + continue + } + y := v_1.Args[0] + if !(buildcfg.GOAMD64 >= 3) { + continue + } + v.reset(OpAMD64ANDNL) + v.AddArg2(x, y) + return true + } + break + } + // match: (ANDL x (NEGL x)) + // cond: buildcfg.GOAMD64 >= 3 + // result: (BLSIL x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64NEGL || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) { + continue + } + v.reset(OpAMD64BLSIL) + v.AddArg(x) + return true + } + break + } + // match: (ANDL x (ADDLconst [-1] x)) + // cond: buildcfg.GOAMD64 >= 3 + // result: (Select0 (BLSRL x)) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) { + continue + } + v.reset(OpSelect0) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64BLSRL, types.NewTuple(typ.UInt32, types.TypeFlags)) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ANDLconst [c] (ANDLconst [d] x)) + // result: (ANDLconst [c & d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64ANDLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpAMD64ANDLconst) + v.AuxInt = int32ToAuxInt(c & d) + v.AddArg(x) + return true + } + // match: (ANDLconst [ 0xFF] x) + // result: (MOVBQZX x) + for { + if auxIntToInt32(v.AuxInt) != 0xFF { + break + } + x := v_0 + v.reset(OpAMD64MOVBQZX) + v.AddArg(x) + return true + } + // match: (ANDLconst [0xFFFF] x) + // result: (MOVWQZX x) + for { + if auxIntToInt32(v.AuxInt) != 0xFFFF { + break + } + x := v_0 + v.reset(OpAMD64MOVWQZX) + v.AddArg(x) + return true + } + // match: (ANDLconst [c] _) + // cond: c==0 + // result: (MOVLconst [0]) + for { + c := auxIntToInt32(v.AuxInt) + if !(c == 0) { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (ANDLconst [c] x) + // cond: c==-1 + // result: x + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(c == -1) { + break + } + v.copyOf(x) + return true + } + // match: (ANDLconst [c] (MOVLconst [d])) + // result: (MOVLconst [c&d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(c & d) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ANDLconstmodify(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ANDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (ANDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + mem := v_1 + if !(ValAndOff(valoff1).canAdd32(off2)) { + break + } + v.reset(OpAMD64ANDLconstmodify) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(base, mem) + return true + } + // match: (ANDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) + // result: (ANDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64ANDLconstmodify) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ANDLload [off1] {sym} val (ADDQconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (ANDLload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64ANDLload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (ANDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (ANDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64ANDLload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + // match: (ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) + // result: (ANDL x (MOVLf2i y)) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + ptr := v_1 + if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { + break + } + y := v_2.Args[1] + if ptr != v_2.Args[0] { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ANDLmodify [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (ANDLmodify [off1+off2] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64ANDLmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (ANDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (ANDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64ANDLmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ANDNL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ANDNL x (SHLL (MOVLconst [1]) y)) + // result: (BTRL x y) + for { + x := v_0 + if v_1.Op != OpAMD64SHLL { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0.AuxInt) != 1 { + break + } + v.reset(OpAMD64BTRL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ANDNQ(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ANDNQ x (SHLQ (MOVQconst [1]) y)) + // result: (BTRQ x y) + for { + x := v_0 + if v_1.Op != OpAMD64SHLQ { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0.AuxInt) != 1 { + break + } + v.reset(OpAMD64BTRQ) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ANDQ (NOTQ (SHLQ (MOVQconst [1]) y)) x) + // result: (BTRQ x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64NOTQ { + continue + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64SHLQ { + continue + } + y := v_0_0.Args[1] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { + continue + } + x := v_1 + v.reset(OpAMD64BTRQ) + v.AddArg2(x, y) + return true + } + break + } + // match: (ANDQ (MOVQconst [c]) x) + // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31 + // result: (BTRQconst [int8(log64(^c))] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64MOVQconst { + continue + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31) { + continue + } + v.reset(OpAMD64BTRQconst) + v.AuxInt = int8ToAuxInt(int8(log64(^c))) + v.AddArg(x) + return true + } + break + } + // match: (ANDQ x (MOVQconst [c])) + // cond: is32Bit(c) + // result: (ANDQconst [int32(c)] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c)) { + continue + } + v.reset(OpAMD64ANDQconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + break + } + // match: (ANDQ x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + // match: (ANDQ x l:(MOVQload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (ANDQload x [off] {sym} ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64MOVQload { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(OpAMD64ANDQload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + // match: (ANDQ x (NOTQ y)) + // cond: buildcfg.GOAMD64 >= 3 + // result: (ANDNQ x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64NOTQ { + continue + } + y := v_1.Args[0] + if !(buildcfg.GOAMD64 >= 3) { + continue + } + v.reset(OpAMD64ANDNQ) + v.AddArg2(x, y) + return true + } + break + } + // match: (ANDQ x (NEGQ x)) + // cond: buildcfg.GOAMD64 >= 3 + // result: (BLSIQ x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64NEGQ || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) { + continue + } + v.reset(OpAMD64BLSIQ) + v.AddArg(x) + return true + } + break + } + // match: (ANDQ x (ADDQconst [-1] x)) + // cond: buildcfg.GOAMD64 >= 3 + // result: (Select0 (BLSRQ x)) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64ADDQconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) { + continue + } + v.reset(OpSelect0) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64BLSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ANDQconst [c] (ANDQconst [d] x)) + // result: (ANDQconst [c & d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64ANDQconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpAMD64ANDQconst) + v.AuxInt = int32ToAuxInt(c & d) + v.AddArg(x) + return true + } + // match: (ANDQconst [ 0xFF] x) + // result: (MOVBQZX x) + for { + if auxIntToInt32(v.AuxInt) != 0xFF { + break + } + x := v_0 + v.reset(OpAMD64MOVBQZX) + v.AddArg(x) + return true + } + // match: (ANDQconst [0xFFFF] x) + // result: (MOVWQZX x) + for { + if auxIntToInt32(v.AuxInt) != 0xFFFF { + break + } + x := v_0 + v.reset(OpAMD64MOVWQZX) + v.AddArg(x) + return true + } + // match: (ANDQconst [0] _) + // result: (MOVQconst [0]) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (ANDQconst [-1] x) + // result: x + for { + if auxIntToInt32(v.AuxInt) != -1 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (ANDQconst [c] (MOVQconst [d])) + // result: (MOVQconst [int64(c)&d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVQconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(int64(c) & d) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ANDQconstmodify(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ANDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (ANDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + mem := v_1 + if !(ValAndOff(valoff1).canAdd32(off2)) { + break + } + v.reset(OpAMD64ANDQconstmodify) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(base, mem) + return true + } + // match: (ANDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) + // result: (ANDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64ANDQconstmodify) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ANDQload [off1] {sym} val (ADDQconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (ANDQload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64ANDQload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (ANDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (ANDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64ANDQload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + // match: (ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) + // result: (ANDQ x (MOVQf2i y)) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + ptr := v_1 + if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { + break + } + y := v_2.Args[1] + if ptr != v_2.Args[0] { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ANDQmodify [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (ANDQmodify [off1+off2] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64ANDQmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (ANDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (ANDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64ANDQmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64BSFQ(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (BSFQ (ORQconst [1<<8] (MOVBQZX x))) + // result: (BSFQ (ORQconst [1<<8] x)) + for { + if v_0.Op != OpAMD64ORQconst { + break + } + t := v_0.Type + if auxIntToInt32(v_0.AuxInt) != 1<<8 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64MOVBQZX { + break + } + x := v_0_0.Args[0] + v.reset(OpAMD64BSFQ) + v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) + v0.AuxInt = int32ToAuxInt(1 << 8) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (BSFQ (ORQconst [1<<16] (MOVWQZX x))) + // result: (BSFQ (ORQconst [1<<16] x)) + for { + if v_0.Op != OpAMD64ORQconst { + break + } + t := v_0.Type + if auxIntToInt32(v_0.AuxInt) != 1<<16 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64MOVWQZX { + break + } + x := v_0_0.Args[0] + v.reset(OpAMD64BSFQ) + v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) + v0.AuxInt = int32ToAuxInt(1 << 16) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64BSWAPL(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (BSWAPL (BSWAPL p)) + // result: p + for { + if v_0.Op != OpAMD64BSWAPL { + break + } + p := v_0.Args[0] + v.copyOf(p) + return true + } + // match: (BSWAPL x:(MOVLload [i] {s} p mem)) + // cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3 + // result: @x.Block (MOVBELload [i] {s} p mem) + for { + x := v_0 + if x.Op != OpAMD64MOVLload { + break + } + i := auxIntToInt32(x.AuxInt) + s := auxToSym(x.Aux) + mem := x.Args[1] + p := x.Args[0] + if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64MOVBELload, typ.UInt32) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(i) + v0.Aux = symToAux(s) + v0.AddArg2(p, mem) + return true + } + // match: (BSWAPL x:(MOVBELload [i] {s} p mem)) + // cond: x.Uses == 1 + // result: @x.Block (MOVLload [i] {s} p mem) + for { + x := v_0 + if x.Op != OpAMD64MOVBELload { + break + } + i := auxIntToInt32(x.AuxInt) + s := auxToSym(x.Aux) + mem := x.Args[1] + p := x.Args[0] + if !(x.Uses == 1) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, typ.UInt32) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(i) + v0.Aux = symToAux(s) + v0.AddArg2(p, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64BSWAPQ(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (BSWAPQ (BSWAPQ p)) + // result: p + for { + if v_0.Op != OpAMD64BSWAPQ { + break + } + p := v_0.Args[0] + v.copyOf(p) + return true + } + // match: (BSWAPQ x:(MOVQload [i] {s} p mem)) + // cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3 + // result: @x.Block (MOVBEQload [i] {s} p mem) + for { + x := v_0 + if x.Op != OpAMD64MOVQload { + break + } + i := auxIntToInt32(x.AuxInt) + s := auxToSym(x.Aux) + mem := x.Args[1] + p := x.Args[0] + if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64MOVBEQload, typ.UInt64) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(i) + v0.Aux = symToAux(s) + v0.AddArg2(p, mem) + return true + } + // match: (BSWAPQ x:(MOVBEQload [i] {s} p mem)) + // cond: x.Uses == 1 + // result: @x.Block (MOVQload [i] {s} p mem) + for { + x := v_0 + if x.Op != OpAMD64MOVBEQload { + break + } + i := auxIntToInt32(x.AuxInt) + s := auxToSym(x.Aux) + mem := x.Args[1] + p := x.Args[0] + if !(x.Uses == 1) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64MOVQload, typ.UInt64) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(i) + v0.Aux = symToAux(s) + v0.AddArg2(p, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64BTCQconst(v *Value) bool { + v_0 := v.Args[0] + // match: (BTCQconst [c] (MOVQconst [d])) + // result: (MOVQconst [d^(1<d + // result: (BTLconst [c-d] x) + for { + c := auxIntToInt8(v.AuxInt) + if v_0.Op != OpAMD64SHLQconst { + break + } + d := auxIntToInt8(v_0.AuxInt) + x := v_0.Args[0] + if !(c > d) { + break + } + v.reset(OpAMD64BTLconst) + v.AuxInt = int8ToAuxInt(c - d) + v.AddArg(x) + return true + } + // match: (BTLconst [0] s:(SHRQ x y)) + // result: (BTQ y x) + for { + if auxIntToInt8(v.AuxInt) != 0 { + break + } + s := v_0 + if s.Op != OpAMD64SHRQ { + break + } + y := s.Args[1] + x := s.Args[0] + v.reset(OpAMD64BTQ) + v.AddArg2(y, x) + return true + } + // match: (BTLconst [c] (SHRLconst [d] x)) + // cond: (c+d)<32 + // result: (BTLconst [c+d] x) + for { + c := auxIntToInt8(v.AuxInt) + if v_0.Op != OpAMD64SHRLconst { + break + } + d := auxIntToInt8(v_0.AuxInt) + x := v_0.Args[0] + if !((c + d) < 32) { + break + } + v.reset(OpAMD64BTLconst) + v.AuxInt = int8ToAuxInt(c + d) + v.AddArg(x) + return true + } + // match: (BTLconst [c] (SHLLconst [d] x)) + // cond: c>d + // result: (BTLconst [c-d] x) + for { + c := auxIntToInt8(v.AuxInt) + if v_0.Op != OpAMD64SHLLconst { + break + } + d := auxIntToInt8(v_0.AuxInt) + x := v_0.Args[0] + if !(c > d) { + break + } + v.reset(OpAMD64BTLconst) + v.AuxInt = int8ToAuxInt(c - d) + v.AddArg(x) + return true + } + // match: (BTLconst [0] s:(SHRL x y)) + // result: (BTL y x) + for { + if auxIntToInt8(v.AuxInt) != 0 { + break + } + s := v_0 + if s.Op != OpAMD64SHRL { + break + } + y := s.Args[1] + x := s.Args[0] + v.reset(OpAMD64BTL) + v.AddArg2(y, x) + return true + } + // match: (BTLconst [0] s:(SHRXL x y)) + // result: (BTL y x) + for { + if auxIntToInt8(v.AuxInt) != 0 { + break + } + s := v_0 + if s.Op != OpAMD64SHRXL { + break + } + y := s.Args[1] + x := s.Args[0] + v.reset(OpAMD64BTL) + v.AddArg2(y, x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool { + v_0 := v.Args[0] + // match: (BTQconst [c] (SHRQconst [d] x)) + // cond: (c+d)<64 + // result: (BTQconst [c+d] x) + for { + c := auxIntToInt8(v.AuxInt) + if v_0.Op != OpAMD64SHRQconst { + break + } + d := auxIntToInt8(v_0.AuxInt) + x := v_0.Args[0] + if !((c + d) < 64) { + break + } + v.reset(OpAMD64BTQconst) + v.AuxInt = int8ToAuxInt(c + d) + v.AddArg(x) + return true + } + // match: (BTQconst [c] (SHLQconst [d] x)) + // cond: c>d + // result: (BTQconst [c-d] x) + for { + c := auxIntToInt8(v.AuxInt) + if v_0.Op != OpAMD64SHLQconst { + break + } + d := auxIntToInt8(v_0.AuxInt) + x := v_0.Args[0] + if !(c > d) { + break + } + v.reset(OpAMD64BTQconst) + v.AuxInt = int8ToAuxInt(c - d) + v.AddArg(x) + return true + } + // match: (BTQconst [0] s:(SHRQ x y)) + // result: (BTQ y x) + for { + if auxIntToInt8(v.AuxInt) != 0 { + break + } + s := v_0 + if s.Op != OpAMD64SHRQ { + break + } + y := s.Args[1] + x := s.Args[0] + v.reset(OpAMD64BTQ) + v.AddArg2(y, x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64BTRQconst(v *Value) bool { + v_0 := v.Args[0] + // match: (BTRQconst [c] (BTSQconst [c] x)) + // result: (BTRQconst [c] x) + for { + c := auxIntToInt8(v.AuxInt) + if v_0.Op != OpAMD64BTSQconst || auxIntToInt8(v_0.AuxInt) != c { + break + } + x := v_0.Args[0] + v.reset(OpAMD64BTRQconst) + v.AuxInt = int8ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (BTRQconst [c] (BTCQconst [c] x)) + // result: (BTRQconst [c] x) + for { + c := auxIntToInt8(v.AuxInt) + if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c { + break + } + x := v_0.Args[0] + v.reset(OpAMD64BTRQconst) + v.AuxInt = int8ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (BTRQconst [c] (MOVQconst [d])) + // result: (MOVQconst [d&^(1< blsr)) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64TESTQ { + break + } + _ = v_2.Args[1] + v_2_0 := v_2.Args[0] + v_2_1 := v_2.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 { + s := v_2_0 + if s.Op != OpSelect0 { + continue + } + blsr := s.Args[0] + if blsr.Op != OpAMD64BLSRQ || s != v_2_1 { + continue + } + v.reset(OpAMD64CMOVLEQ) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v0.AddArg(blsr) + v.AddArg3(x, y, v0) + return true + } + break + } + // match: (CMOVLEQ x y (TESTL s:(Select0 blsr:(BLSRL _)) s)) + // result: (CMOVLEQ x y (Select1 blsr)) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64TESTL { + break + } + _ = v_2.Args[1] + v_2_0 := v_2.Args[0] + v_2_1 := v_2.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 { + s := v_2_0 + if s.Op != OpSelect0 { + continue + } + blsr := s.Args[0] + if blsr.Op != OpAMD64BLSRL || s != v_2_1 { + continue + } + v.reset(OpAMD64CMOVLEQ) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v0.AddArg(blsr) + v.AddArg3(x, y, v0) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMOVLGE x y (InvertFlags cond)) + // result: (CMOVLLE x y cond) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64InvertFlags { + break + } + cond := v_2.Args[0] + v.reset(OpAMD64CMOVLLE) + v.AddArg3(x, y, cond) + return true + } + // match: (CMOVLGE _ x (FlagEQ)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.copyOf(x) + return true + } + // match: (CMOVLGE _ x (FlagGT_UGT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagGT_UGT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVLGE _ x (FlagGT_ULT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagGT_ULT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVLGE y _ (FlagLT_ULT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagLT_ULT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVLGE y _ (FlagLT_UGT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagLT_UGT { + break + } + v.copyOf(y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMOVLGT x y (InvertFlags cond)) + // result: (CMOVLLT x y cond) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64InvertFlags { + break + } + cond := v_2.Args[0] + v.reset(OpAMD64CMOVLLT) + v.AddArg3(x, y, cond) + return true + } + // match: (CMOVLGT y _ (FlagEQ)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.copyOf(y) + return true + } + // match: (CMOVLGT _ x (FlagGT_UGT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagGT_UGT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVLGT _ x (FlagGT_ULT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagGT_ULT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVLGT y _ (FlagLT_ULT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagLT_ULT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVLGT y _ (FlagLT_UGT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagLT_UGT { + break + } + v.copyOf(y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMOVLHI x y (InvertFlags cond)) + // result: (CMOVLCS x y cond) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64InvertFlags { + break + } + cond := v_2.Args[0] + v.reset(OpAMD64CMOVLCS) + v.AddArg3(x, y, cond) + return true + } + // match: (CMOVLHI y _ (FlagEQ)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.copyOf(y) + return true + } + // match: (CMOVLHI _ x (FlagGT_UGT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagGT_UGT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVLHI y _ (FlagGT_ULT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagGT_ULT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVLHI y _ (FlagLT_ULT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagLT_ULT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVLHI _ x (FlagLT_UGT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagLT_UGT { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMOVLLE x y (InvertFlags cond)) + // result: (CMOVLGE x y cond) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64InvertFlags { + break + } + cond := v_2.Args[0] + v.reset(OpAMD64CMOVLGE) + v.AddArg3(x, y, cond) + return true + } + // match: (CMOVLLE _ x (FlagEQ)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.copyOf(x) + return true + } + // match: (CMOVLLE y _ (FlagGT_UGT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagGT_UGT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVLLE y _ (FlagGT_ULT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagGT_ULT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVLLE _ x (FlagLT_ULT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagLT_ULT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVLLE _ x (FlagLT_UGT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagLT_UGT { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMOVLLS x y (InvertFlags cond)) + // result: (CMOVLCC x y cond) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64InvertFlags { + break + } + cond := v_2.Args[0] + v.reset(OpAMD64CMOVLCC) + v.AddArg3(x, y, cond) + return true + } + // match: (CMOVLLS _ x (FlagEQ)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.copyOf(x) + return true + } + // match: (CMOVLLS y _ (FlagGT_UGT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagGT_UGT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVLLS _ x (FlagGT_ULT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagGT_ULT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVLLS _ x (FlagLT_ULT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagLT_ULT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVLLS y _ (FlagLT_UGT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagLT_UGT { + break + } + v.copyOf(y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMOVLLT x y (InvertFlags cond)) + // result: (CMOVLGT x y cond) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64InvertFlags { + break + } + cond := v_2.Args[0] + v.reset(OpAMD64CMOVLGT) + v.AddArg3(x, y, cond) + return true + } + // match: (CMOVLLT y _ (FlagEQ)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.copyOf(y) + return true + } + // match: (CMOVLLT y _ (FlagGT_UGT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagGT_UGT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVLLT y _ (FlagGT_ULT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagGT_ULT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVLLT _ x (FlagLT_ULT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagLT_ULT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVLLT _ x (FlagLT_UGT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagLT_UGT { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMOVLNE x y (InvertFlags cond)) + // result: (CMOVLNE x y cond) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64InvertFlags { + break + } + cond := v_2.Args[0] + v.reset(OpAMD64CMOVLNE) + v.AddArg3(x, y, cond) + return true + } + // match: (CMOVLNE y _ (FlagEQ)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.copyOf(y) + return true + } + // match: (CMOVLNE _ x (FlagGT_UGT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagGT_UGT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVLNE _ x (FlagGT_ULT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagGT_ULT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVLNE _ x (FlagLT_ULT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagLT_ULT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVLNE _ x (FlagLT_UGT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagLT_UGT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVLNE x y (TESTQ s:(Select0 blsr:(BLSRQ _)) s)) + // result: (CMOVLNE x y (Select1 blsr)) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64TESTQ { + break + } + _ = v_2.Args[1] + v_2_0 := v_2.Args[0] + v_2_1 := v_2.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 { + s := v_2_0 + if s.Op != OpSelect0 { + continue + } + blsr := s.Args[0] + if blsr.Op != OpAMD64BLSRQ || s != v_2_1 { + continue + } + v.reset(OpAMD64CMOVLNE) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v0.AddArg(blsr) + v.AddArg3(x, y, v0) + return true + } + break + } + // match: (CMOVLNE x y (TESTL s:(Select0 blsr:(BLSRL _)) s)) + // result: (CMOVLNE x y (Select1 blsr)) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64TESTL { + break + } + _ = v_2.Args[1] + v_2_0 := v_2.Args[0] + v_2_1 := v_2.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 { + s := v_2_0 + if s.Op != OpSelect0 { + continue + } + blsr := s.Args[0] + if blsr.Op != OpAMD64BLSRL || s != v_2_1 { + continue + } + v.reset(OpAMD64CMOVLNE) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v0.AddArg(blsr) + v.AddArg3(x, y, v0) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMOVQCC x y (InvertFlags cond)) + // result: (CMOVQLS x y cond) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64InvertFlags { + break + } + cond := v_2.Args[0] + v.reset(OpAMD64CMOVQLS) + v.AddArg3(x, y, cond) + return true + } + // match: (CMOVQCC _ x (FlagEQ)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.copyOf(x) + return true + } + // match: (CMOVQCC _ x (FlagGT_UGT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagGT_UGT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVQCC y _ (FlagGT_ULT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagGT_ULT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVQCC y _ (FlagLT_ULT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagLT_ULT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVQCC _ x (FlagLT_UGT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagLT_UGT { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMOVQCS x y (InvertFlags cond)) + // result: (CMOVQHI x y cond) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64InvertFlags { + break + } + cond := v_2.Args[0] + v.reset(OpAMD64CMOVQHI) + v.AddArg3(x, y, cond) + return true + } + // match: (CMOVQCS y _ (FlagEQ)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.copyOf(y) + return true + } + // match: (CMOVQCS y _ (FlagGT_UGT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagGT_UGT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVQCS _ x (FlagGT_ULT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagGT_ULT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVQCS _ x (FlagLT_ULT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagLT_ULT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVQCS y _ (FlagLT_UGT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagLT_UGT { + break + } + v.copyOf(y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMOVQEQ x y (InvertFlags cond)) + // result: (CMOVQEQ x y cond) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64InvertFlags { + break + } + cond := v_2.Args[0] + v.reset(OpAMD64CMOVQEQ) + v.AddArg3(x, y, cond) + return true + } + // match: (CMOVQEQ _ x (FlagEQ)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.copyOf(x) + return true + } + // match: (CMOVQEQ y _ (FlagGT_UGT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagGT_UGT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVQEQ y _ (FlagGT_ULT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagGT_ULT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVQEQ y _ (FlagLT_ULT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagLT_ULT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVQEQ y _ (FlagLT_UGT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagLT_UGT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) + // cond: c != 0 + // result: x + for { + x := v_0 + if v_2.Op != OpSelect1 { + break + } + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpAMD64BSFQ { + break + } + v_2_0_0 := v_2_0.Args[0] + if v_2_0_0.Op != OpAMD64ORQconst { + break + } + c := auxIntToInt32(v_2_0_0.AuxInt) + if !(c != 0) { + break + } + v.copyOf(x) + return true + } + // match: (CMOVQEQ x _ (Select1 (BSRQ (ORQconst [c] _)))) + // cond: c != 0 + // result: x + for { + x := v_0 + if v_2.Op != OpSelect1 { + break + } + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpAMD64BSRQ { + break + } + v_2_0_0 := v_2_0.Args[0] + if v_2_0_0.Op != OpAMD64ORQconst { + break + } + c := auxIntToInt32(v_2_0_0.AuxInt) + if !(c != 0) { + break + } + v.copyOf(x) + return true + } + // match: (CMOVQEQ x y (TESTQ s:(Select0 blsr:(BLSRQ _)) s)) + // result: (CMOVQEQ x y (Select1 blsr)) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64TESTQ { + break + } + _ = v_2.Args[1] + v_2_0 := v_2.Args[0] + v_2_1 := v_2.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 { + s := v_2_0 + if s.Op != OpSelect0 { + continue + } + blsr := s.Args[0] + if blsr.Op != OpAMD64BLSRQ || s != v_2_1 { + continue + } + v.reset(OpAMD64CMOVQEQ) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v0.AddArg(blsr) + v.AddArg3(x, y, v0) + return true + } + break + } + // match: (CMOVQEQ x y (TESTL s:(Select0 blsr:(BLSRL _)) s)) + // result: (CMOVQEQ x y (Select1 blsr)) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64TESTL { + break + } + _ = v_2.Args[1] + v_2_0 := v_2.Args[0] + v_2_1 := v_2.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 { + s := v_2_0 + if s.Op != OpSelect0 { + continue + } + blsr := s.Args[0] + if blsr.Op != OpAMD64BLSRL || s != v_2_1 { + continue + } + v.reset(OpAMD64CMOVQEQ) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v0.AddArg(blsr) + v.AddArg3(x, y, v0) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMOVQGE x y (InvertFlags cond)) + // result: (CMOVQLE x y cond) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64InvertFlags { + break + } + cond := v_2.Args[0] + v.reset(OpAMD64CMOVQLE) + v.AddArg3(x, y, cond) + return true + } + // match: (CMOVQGE _ x (FlagEQ)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.copyOf(x) + return true + } + // match: (CMOVQGE _ x (FlagGT_UGT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagGT_UGT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVQGE _ x (FlagGT_ULT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagGT_ULT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVQGE y _ (FlagLT_ULT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagLT_ULT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVQGE y _ (FlagLT_UGT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagLT_UGT { + break + } + v.copyOf(y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMOVQGT x y (InvertFlags cond)) + // result: (CMOVQLT x y cond) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64InvertFlags { + break + } + cond := v_2.Args[0] + v.reset(OpAMD64CMOVQLT) + v.AddArg3(x, y, cond) + return true + } + // match: (CMOVQGT y _ (FlagEQ)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.copyOf(y) + return true + } + // match: (CMOVQGT _ x (FlagGT_UGT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagGT_UGT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVQGT _ x (FlagGT_ULT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagGT_ULT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVQGT y _ (FlagLT_ULT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagLT_ULT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVQGT y _ (FlagLT_UGT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagLT_UGT { + break + } + v.copyOf(y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMOVQHI x y (InvertFlags cond)) + // result: (CMOVQCS x y cond) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64InvertFlags { + break + } + cond := v_2.Args[0] + v.reset(OpAMD64CMOVQCS) + v.AddArg3(x, y, cond) + return true + } + // match: (CMOVQHI y _ (FlagEQ)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.copyOf(y) + return true + } + // match: (CMOVQHI _ x (FlagGT_UGT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagGT_UGT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVQHI y _ (FlagGT_ULT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagGT_ULT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVQHI y _ (FlagLT_ULT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagLT_ULT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVQHI _ x (FlagLT_UGT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagLT_UGT { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMOVQLE x y (InvertFlags cond)) + // result: (CMOVQGE x y cond) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64InvertFlags { + break + } + cond := v_2.Args[0] + v.reset(OpAMD64CMOVQGE) + v.AddArg3(x, y, cond) + return true + } + // match: (CMOVQLE _ x (FlagEQ)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.copyOf(x) + return true + } + // match: (CMOVQLE y _ (FlagGT_UGT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagGT_UGT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVQLE y _ (FlagGT_ULT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagGT_ULT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVQLE _ x (FlagLT_ULT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagLT_ULT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVQLE _ x (FlagLT_UGT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagLT_UGT { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMOVQLS x y (InvertFlags cond)) + // result: (CMOVQCC x y cond) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64InvertFlags { + break + } + cond := v_2.Args[0] + v.reset(OpAMD64CMOVQCC) + v.AddArg3(x, y, cond) + return true + } + // match: (CMOVQLS _ x (FlagEQ)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.copyOf(x) + return true + } + // match: (CMOVQLS y _ (FlagGT_UGT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagGT_UGT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVQLS _ x (FlagGT_ULT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagGT_ULT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVQLS _ x (FlagLT_ULT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagLT_ULT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVQLS y _ (FlagLT_UGT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagLT_UGT { + break + } + v.copyOf(y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMOVQLT x y (InvertFlags cond)) + // result: (CMOVQGT x y cond) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64InvertFlags { + break + } + cond := v_2.Args[0] + v.reset(OpAMD64CMOVQGT) + v.AddArg3(x, y, cond) + return true + } + // match: (CMOVQLT y _ (FlagEQ)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.copyOf(y) + return true + } + // match: (CMOVQLT y _ (FlagGT_UGT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagGT_UGT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVQLT y _ (FlagGT_ULT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagGT_ULT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVQLT _ x (FlagLT_ULT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagLT_ULT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVQLT _ x (FlagLT_UGT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagLT_UGT { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMOVQNE x y (InvertFlags cond)) + // result: (CMOVQNE x y cond) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64InvertFlags { + break + } + cond := v_2.Args[0] + v.reset(OpAMD64CMOVQNE) + v.AddArg3(x, y, cond) + return true + } + // match: (CMOVQNE y _ (FlagEQ)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.copyOf(y) + return true + } + // match: (CMOVQNE _ x (FlagGT_UGT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagGT_UGT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVQNE _ x (FlagGT_ULT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagGT_ULT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVQNE _ x (FlagLT_ULT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagLT_ULT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVQNE _ x (FlagLT_UGT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagLT_UGT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVQNE x y (TESTQ s:(Select0 blsr:(BLSRQ _)) s)) + // result: (CMOVQNE x y (Select1 blsr)) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64TESTQ { + break + } + _ = v_2.Args[1] + v_2_0 := v_2.Args[0] + v_2_1 := v_2.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 { + s := v_2_0 + if s.Op != OpSelect0 { + continue + } + blsr := s.Args[0] + if blsr.Op != OpAMD64BLSRQ || s != v_2_1 { + continue + } + v.reset(OpAMD64CMOVQNE) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v0.AddArg(blsr) + v.AddArg3(x, y, v0) + return true + } + break + } + // match: (CMOVQNE x y (TESTL s:(Select0 blsr:(BLSRL _)) s)) + // result: (CMOVQNE x y (Select1 blsr)) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64TESTL { + break + } + _ = v_2.Args[1] + v_2_0 := v_2.Args[0] + v_2_1 := v_2.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 { + s := v_2_0 + if s.Op != OpSelect0 { + continue + } + blsr := s.Args[0] + if blsr.Op != OpAMD64BLSRL || s != v_2_1 { + continue + } + v.reset(OpAMD64CMOVQNE) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v0.AddArg(blsr) + v.AddArg3(x, y, v0) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMOVWCC x y (InvertFlags cond)) + // result: (CMOVWLS x y cond) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64InvertFlags { + break + } + cond := v_2.Args[0] + v.reset(OpAMD64CMOVWLS) + v.AddArg3(x, y, cond) + return true + } + // match: (CMOVWCC _ x (FlagEQ)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.copyOf(x) + return true + } + // match: (CMOVWCC _ x (FlagGT_UGT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagGT_UGT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVWCC y _ (FlagGT_ULT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagGT_ULT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVWCC y _ (FlagLT_ULT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagLT_ULT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVWCC _ x (FlagLT_UGT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagLT_UGT { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMOVWCS x y (InvertFlags cond)) + // result: (CMOVWHI x y cond) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64InvertFlags { + break + } + cond := v_2.Args[0] + v.reset(OpAMD64CMOVWHI) + v.AddArg3(x, y, cond) + return true + } + // match: (CMOVWCS y _ (FlagEQ)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.copyOf(y) + return true + } + // match: (CMOVWCS y _ (FlagGT_UGT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagGT_UGT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVWCS _ x (FlagGT_ULT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagGT_ULT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVWCS _ x (FlagLT_ULT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagLT_ULT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVWCS y _ (FlagLT_UGT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagLT_UGT { + break + } + v.copyOf(y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMOVWEQ x y (InvertFlags cond)) + // result: (CMOVWEQ x y cond) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64InvertFlags { + break + } + cond := v_2.Args[0] + v.reset(OpAMD64CMOVWEQ) + v.AddArg3(x, y, cond) + return true + } + // match: (CMOVWEQ _ x (FlagEQ)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.copyOf(x) + return true + } + // match: (CMOVWEQ y _ (FlagGT_UGT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagGT_UGT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVWEQ y _ (FlagGT_ULT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagGT_ULT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVWEQ y _ (FlagLT_ULT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagLT_ULT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVWEQ y _ (FlagLT_UGT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagLT_UGT { + break + } + v.copyOf(y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMOVWGE x y (InvertFlags cond)) + // result: (CMOVWLE x y cond) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64InvertFlags { + break + } + cond := v_2.Args[0] + v.reset(OpAMD64CMOVWLE) + v.AddArg3(x, y, cond) + return true + } + // match: (CMOVWGE _ x (FlagEQ)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.copyOf(x) + return true + } + // match: (CMOVWGE _ x (FlagGT_UGT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagGT_UGT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVWGE _ x (FlagGT_ULT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagGT_ULT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVWGE y _ (FlagLT_ULT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagLT_ULT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVWGE y _ (FlagLT_UGT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagLT_UGT { + break + } + v.copyOf(y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMOVWGT x y (InvertFlags cond)) + // result: (CMOVWLT x y cond) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64InvertFlags { + break + } + cond := v_2.Args[0] + v.reset(OpAMD64CMOVWLT) + v.AddArg3(x, y, cond) + return true + } + // match: (CMOVWGT y _ (FlagEQ)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.copyOf(y) + return true + } + // match: (CMOVWGT _ x (FlagGT_UGT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagGT_UGT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVWGT _ x (FlagGT_ULT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagGT_ULT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVWGT y _ (FlagLT_ULT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagLT_ULT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVWGT y _ (FlagLT_UGT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagLT_UGT { + break + } + v.copyOf(y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMOVWHI x y (InvertFlags cond)) + // result: (CMOVWCS x y cond) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64InvertFlags { + break + } + cond := v_2.Args[0] + v.reset(OpAMD64CMOVWCS) + v.AddArg3(x, y, cond) + return true + } + // match: (CMOVWHI y _ (FlagEQ)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.copyOf(y) + return true + } + // match: (CMOVWHI _ x (FlagGT_UGT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagGT_UGT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVWHI y _ (FlagGT_ULT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagGT_ULT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVWHI y _ (FlagLT_ULT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagLT_ULT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVWHI _ x (FlagLT_UGT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagLT_UGT { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMOVWLE x y (InvertFlags cond)) + // result: (CMOVWGE x y cond) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64InvertFlags { + break + } + cond := v_2.Args[0] + v.reset(OpAMD64CMOVWGE) + v.AddArg3(x, y, cond) + return true + } + // match: (CMOVWLE _ x (FlagEQ)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.copyOf(x) + return true + } + // match: (CMOVWLE y _ (FlagGT_UGT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagGT_UGT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVWLE y _ (FlagGT_ULT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagGT_ULT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVWLE _ x (FlagLT_ULT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagLT_ULT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVWLE _ x (FlagLT_UGT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagLT_UGT { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMOVWLS x y (InvertFlags cond)) + // result: (CMOVWCC x y cond) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64InvertFlags { + break + } + cond := v_2.Args[0] + v.reset(OpAMD64CMOVWCC) + v.AddArg3(x, y, cond) + return true + } + // match: (CMOVWLS _ x (FlagEQ)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.copyOf(x) + return true + } + // match: (CMOVWLS y _ (FlagGT_UGT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagGT_UGT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVWLS _ x (FlagGT_ULT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagGT_ULT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVWLS _ x (FlagLT_ULT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagLT_ULT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVWLS y _ (FlagLT_UGT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagLT_UGT { + break + } + v.copyOf(y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMOVWLT x y (InvertFlags cond)) + // result: (CMOVWGT x y cond) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64InvertFlags { + break + } + cond := v_2.Args[0] + v.reset(OpAMD64CMOVWGT) + v.AddArg3(x, y, cond) + return true + } + // match: (CMOVWLT y _ (FlagEQ)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.copyOf(y) + return true + } + // match: (CMOVWLT y _ (FlagGT_UGT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagGT_UGT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVWLT y _ (FlagGT_ULT)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagGT_ULT { + break + } + v.copyOf(y) + return true + } + // match: (CMOVWLT _ x (FlagLT_ULT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagLT_ULT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVWLT _ x (FlagLT_UGT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagLT_UGT { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMOVWNE x y (InvertFlags cond)) + // result: (CMOVWNE x y cond) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64InvertFlags { + break + } + cond := v_2.Args[0] + v.reset(OpAMD64CMOVWNE) + v.AddArg3(x, y, cond) + return true + } + // match: (CMOVWNE y _ (FlagEQ)) + // result: y + for { + y := v_0 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.copyOf(y) + return true + } + // match: (CMOVWNE _ x (FlagGT_UGT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagGT_UGT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVWNE _ x (FlagGT_ULT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagGT_ULT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVWNE _ x (FlagLT_ULT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagLT_ULT { + break + } + v.copyOf(x) + return true + } + // match: (CMOVWNE _ x (FlagLT_UGT)) + // result: x + for { + x := v_1 + if v_2.Op != OpAMD64FlagLT_UGT { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMPB x (MOVLconst [c])) + // result: (CMPBconst x [int8(c)]) + for { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64CMPBconst) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (CMPB (MOVLconst [c]) x) + // result: (InvertFlags (CMPBconst x [int8(c)])) + for { + if v_0.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpAMD64InvertFlags) + v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(int8(c)) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (CMPB x y) + // cond: canonLessThan(x,y) + // result: (InvertFlags (CMPB y x)) + for { + x := v_0 + y := v_1 + if !(canonLessThan(x, y)) { + break + } + v.reset(OpAMD64InvertFlags) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + // match: (CMPB l:(MOVBload {sym} [off] ptr mem) x) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (CMPBload {sym} [off] ptr x mem) + for { + l := v_0 + if l.Op != OpAMD64MOVBload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + x := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64CMPBload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (CMPB x l:(MOVBload {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (InvertFlags (CMPBload {sym} [off] ptr x mem)) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64MOVBload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64InvertFlags) + v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg3(ptr, x, mem) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CMPBconst (MOVLconst [x]) [y]) + // cond: int8(x)==y + // result: (FlagEQ) + for { + y := auxIntToInt8(v.AuxInt) + if v_0.Op != OpAMD64MOVLconst { + break + } + x := auxIntToInt32(v_0.AuxInt) + if !(int8(x) == y) { + break + } + v.reset(OpAMD64FlagEQ) + return true + } + // match: (CMPBconst (MOVLconst [x]) [y]) + // cond: int8(x)uint8(y) + // result: (FlagLT_UGT) + for { + y := auxIntToInt8(v.AuxInt) + if v_0.Op != OpAMD64MOVLconst { + break + } + x := auxIntToInt32(v_0.AuxInt) + if !(int8(x) < y && uint8(x) > uint8(y)) { + break + } + v.reset(OpAMD64FlagLT_UGT) + return true + } + // match: (CMPBconst (MOVLconst [x]) [y]) + // cond: int8(x)>y && uint8(x) y && uint8(x) < uint8(y)) { + break + } + v.reset(OpAMD64FlagGT_ULT) + return true + } + // match: (CMPBconst (MOVLconst [x]) [y]) + // cond: int8(x)>y && uint8(x)>uint8(y) + // result: (FlagGT_UGT) + for { + y := auxIntToInt8(v.AuxInt) + if v_0.Op != OpAMD64MOVLconst { + break + } + x := auxIntToInt32(v_0.AuxInt) + if !(int8(x) > y && uint8(x) > uint8(y)) { + break + } + v.reset(OpAMD64FlagGT_UGT) + return true + } + // match: (CMPBconst (ANDLconst _ [m]) [n]) + // cond: 0 <= int8(m) && int8(m) < n + // result: (FlagLT_ULT) + for { + n := auxIntToInt8(v.AuxInt) + if v_0.Op != OpAMD64ANDLconst { + break + } + m := auxIntToInt32(v_0.AuxInt) + if !(0 <= int8(m) && int8(m) < n) { + break + } + v.reset(OpAMD64FlagLT_ULT) + return true + } + // match: (CMPBconst a:(ANDL x y) [0]) + // cond: a.Uses == 1 + // result: (TESTB x y) + for { + if auxIntToInt8(v.AuxInt) != 0 { + break + } + a := v_0 + if a.Op != OpAMD64ANDL { + break + } + y := a.Args[1] + x := a.Args[0] + if !(a.Uses == 1) { + break + } + v.reset(OpAMD64TESTB) + v.AddArg2(x, y) + return true + } + // match: (CMPBconst a:(ANDLconst [c] x) [0]) + // cond: a.Uses == 1 + // result: (TESTBconst [int8(c)] x) + for { + if auxIntToInt8(v.AuxInt) != 0 { + break + } + a := v_0 + if a.Op != OpAMD64ANDLconst { + break + } + c := auxIntToInt32(a.AuxInt) + x := a.Args[0] + if !(a.Uses == 1) { + break + } + v.reset(OpAMD64TESTBconst) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + // match: (CMPBconst x [0]) + // result: (TESTB x x) + for { + if auxIntToInt8(v.AuxInt) != 0 { + break + } + x := v_0 + v.reset(OpAMD64TESTB) + v.AddArg2(x, x) + return true + } + // match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c]) + // cond: l.Uses == 1 && clobber(l) + // result: @l.Block (CMPBconstload {sym} [makeValAndOff(int32(c),off)] ptr mem) + for { + c := auxIntToInt8(v.AuxInt) + l := v_0 + if l.Op != OpAMD64MOVBload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(l.Uses == 1 && clobber(l)) { + break + } + b = l.Block + v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags) + v.copyOf(v0) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMPBconstload [valoff1] {sym} (ADDQconst [off2] base) mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (CMPBconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + mem := v_1 + if !(ValAndOff(valoff1).canAdd32(off2)) { + break + } + v.reset(OpAMD64CMPBconstload) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(base, mem) + return true + } + // match: (CMPBconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) + // result: (CMPBconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64CMPBconstload) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMPBload [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (CMPBload [off1+off2] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64CMPBload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (CMPBload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (CMPBload [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64CMPBload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + // match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem) + // result: (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + mem := v_2 + v.reset(OpAMD64CMPBconstload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMPL x (MOVLconst [c])) + // result: (CMPLconst x [c]) + for { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64CMPLconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (CMPL (MOVLconst [c]) x) + // result: (InvertFlags (CMPLconst x [c])) + for { + if v_0.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpAMD64InvertFlags) + v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (CMPL x y) + // cond: canonLessThan(x,y) + // result: (InvertFlags (CMPL y x)) + for { + x := v_0 + y := v_1 + if !(canonLessThan(x, y)) { + break + } + v.reset(OpAMD64InvertFlags) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + // match: (CMPL l:(MOVLload {sym} [off] ptr mem) x) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (CMPLload {sym} [off] ptr x mem) + for { + l := v_0 + if l.Op != OpAMD64MOVLload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + x := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64CMPLload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (CMPL x l:(MOVLload {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (InvertFlags (CMPLload {sym} [off] ptr x mem)) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64MOVLload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64InvertFlags) + v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg3(ptr, x, mem) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CMPLconst (MOVLconst [x]) [y]) + // cond: x==y + // result: (FlagEQ) + for { + y := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVLconst { + break + } + x := auxIntToInt32(v_0.AuxInt) + if !(x == y) { + break + } + v.reset(OpAMD64FlagEQ) + return true + } + // match: (CMPLconst (MOVLconst [x]) [y]) + // cond: xuint32(y) + // result: (FlagLT_UGT) + for { + y := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVLconst { + break + } + x := auxIntToInt32(v_0.AuxInt) + if !(x < y && uint32(x) > uint32(y)) { + break + } + v.reset(OpAMD64FlagLT_UGT) + return true + } + // match: (CMPLconst (MOVLconst [x]) [y]) + // cond: x>y && uint32(x) y && uint32(x) < uint32(y)) { + break + } + v.reset(OpAMD64FlagGT_ULT) + return true + } + // match: (CMPLconst (MOVLconst [x]) [y]) + // cond: x>y && uint32(x)>uint32(y) + // result: (FlagGT_UGT) + for { + y := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVLconst { + break + } + x := auxIntToInt32(v_0.AuxInt) + if !(x > y && uint32(x) > uint32(y)) { + break + } + v.reset(OpAMD64FlagGT_UGT) + return true + } + // match: (CMPLconst (SHRLconst _ [c]) [n]) + // cond: 0 <= n && 0 < c && c <= 32 && (1<uint64(y) + // result: (FlagLT_UGT) + for { + if v_0.Op != OpAMD64MOVQconst { + break + } + x := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpAMD64MOVQconst { + break + } + y := auxIntToInt64(v_1.AuxInt) + if !(x < y && uint64(x) > uint64(y)) { + break + } + v.reset(OpAMD64FlagLT_UGT) + return true + } + // match: (CMPQ (MOVQconst [x]) (MOVQconst [y])) + // cond: x>y && uint64(x) y && uint64(x) < uint64(y)) { + break + } + v.reset(OpAMD64FlagGT_ULT) + return true + } + // match: (CMPQ (MOVQconst [x]) (MOVQconst [y])) + // cond: x>y && uint64(x)>uint64(y) + // result: (FlagGT_UGT) + for { + if v_0.Op != OpAMD64MOVQconst { + break + } + x := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpAMD64MOVQconst { + break + } + y := auxIntToInt64(v_1.AuxInt) + if !(x > y && uint64(x) > uint64(y)) { + break + } + v.reset(OpAMD64FlagGT_UGT) + return true + } + // match: (CMPQ l:(MOVQload {sym} [off] ptr mem) x) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (CMPQload {sym} [off] ptr x mem) + for { + l := v_0 + if l.Op != OpAMD64MOVQload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + x := v_1 + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64CMPQload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (CMPQ x l:(MOVQload {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (InvertFlags (CMPQload {sym} [off] ptr x mem)) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64MOVQload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64InvertFlags) + v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg3(ptr, x, mem) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (CMPQconst (MOVQconst [x]) [y]) + // cond: x==int64(y) + // result: (FlagEQ) + for { + y := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVQconst { + break + } + x := auxIntToInt64(v_0.AuxInt) + if !(x == int64(y)) { + break + } + v.reset(OpAMD64FlagEQ) + return true + } + // match: (CMPQconst (MOVQconst [x]) [y]) + // cond: xuint64(int64(y)) + // result: (FlagLT_UGT) + for { + y := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVQconst { + break + } + x := auxIntToInt64(v_0.AuxInt) + if !(x < int64(y) && uint64(x) > uint64(int64(y))) { + break + } + v.reset(OpAMD64FlagLT_UGT) + return true + } + // match: (CMPQconst (MOVQconst [x]) [y]) + // cond: x>int64(y) && uint64(x) int64(y) && uint64(x) < uint64(int64(y))) { + break + } + v.reset(OpAMD64FlagGT_ULT) + return true + } + // match: (CMPQconst (MOVQconst [x]) [y]) + // cond: x>int64(y) && uint64(x)>uint64(int64(y)) + // result: (FlagGT_UGT) + for { + y := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVQconst { + break + } + x := auxIntToInt64(v_0.AuxInt) + if !(x > int64(y) && uint64(x) > uint64(int64(y))) { + break + } + v.reset(OpAMD64FlagGT_UGT) + return true + } + // match: (CMPQconst (MOVBQZX _) [c]) + // cond: 0xFF < c + // result: (FlagLT_ULT) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVBQZX || !(0xFF < c) { + break + } + v.reset(OpAMD64FlagLT_ULT) + return true + } + // match: (CMPQconst (MOVWQZX _) [c]) + // cond: 0xFFFF < c + // result: (FlagLT_ULT) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVWQZX || !(0xFFFF < c) { + break + } + v.reset(OpAMD64FlagLT_ULT) + return true + } + // match: (CMPQconst (SHRQconst _ [c]) [n]) + // cond: 0 <= n && 0 < c && c <= 64 && (1<uint16(y) + // result: (FlagLT_UGT) + for { + y := auxIntToInt16(v.AuxInt) + if v_0.Op != OpAMD64MOVLconst { + break + } + x := auxIntToInt32(v_0.AuxInt) + if !(int16(x) < y && uint16(x) > uint16(y)) { + break + } + v.reset(OpAMD64FlagLT_UGT) + return true + } + // match: (CMPWconst (MOVLconst [x]) [y]) + // cond: int16(x)>y && uint16(x) y && uint16(x) < uint16(y)) { + break + } + v.reset(OpAMD64FlagGT_ULT) + return true + } + // match: (CMPWconst (MOVLconst [x]) [y]) + // cond: int16(x)>y && uint16(x)>uint16(y) + // result: (FlagGT_UGT) + for { + y := auxIntToInt16(v.AuxInt) + if v_0.Op != OpAMD64MOVLconst { + break + } + x := auxIntToInt32(v_0.AuxInt) + if !(int16(x) > y && uint16(x) > uint16(y)) { + break + } + v.reset(OpAMD64FlagGT_UGT) + return true + } + // match: (CMPWconst (ANDLconst _ [m]) [n]) + // cond: 0 <= int16(m) && int16(m) < n + // result: (FlagLT_ULT) + for { + n := auxIntToInt16(v.AuxInt) + if v_0.Op != OpAMD64ANDLconst { + break + } + m := auxIntToInt32(v_0.AuxInt) + if !(0 <= int16(m) && int16(m) < n) { + break + } + v.reset(OpAMD64FlagLT_ULT) + return true + } + // match: (CMPWconst a:(ANDL x y) [0]) + // cond: a.Uses == 1 + // result: (TESTW x y) + for { + if auxIntToInt16(v.AuxInt) != 0 { + break + } + a := v_0 + if a.Op != OpAMD64ANDL { + break + } + y := a.Args[1] + x := a.Args[0] + if !(a.Uses == 1) { + break + } + v.reset(OpAMD64TESTW) + v.AddArg2(x, y) + return true + } + // match: (CMPWconst a:(ANDLconst [c] x) [0]) + // cond: a.Uses == 1 + // result: (TESTWconst [int16(c)] x) + for { + if auxIntToInt16(v.AuxInt) != 0 { + break + } + a := v_0 + if a.Op != OpAMD64ANDLconst { + break + } + c := auxIntToInt32(a.AuxInt) + x := a.Args[0] + if !(a.Uses == 1) { + break + } + v.reset(OpAMD64TESTWconst) + v.AuxInt = int16ToAuxInt(int16(c)) + v.AddArg(x) + return true + } + // match: (CMPWconst x [0]) + // result: (TESTW x x) + for { + if auxIntToInt16(v.AuxInt) != 0 { + break + } + x := v_0 + v.reset(OpAMD64TESTW) + v.AddArg2(x, x) + return true + } + // match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c]) + // cond: l.Uses == 1 && clobber(l) + // result: @l.Block (CMPWconstload {sym} [makeValAndOff(int32(c),off)] ptr mem) + for { + c := auxIntToInt16(v.AuxInt) + l := v_0 + if l.Op != OpAMD64MOVWload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(l.Uses == 1 && clobber(l)) { + break + } + b = l.Block + v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags) + v.copyOf(v0) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMPWconstload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMPWconstload [valoff1] {sym} (ADDQconst [off2] base) mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (CMPWconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + mem := v_1 + if !(ValAndOff(valoff1).canAdd32(off2)) { + break + } + v.reset(OpAMD64CMPWconstload) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(base, mem) + return true + } + // match: (CMPWconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) + // result: (CMPWconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64CMPWconstload) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMPWload [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (CMPWload [off1+off2] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64CMPWload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (CMPWload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (CMPWload [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64CMPWload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + // match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem) + // result: (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + mem := v_2 + v.reset(OpAMD64CMPWconstload) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + old := v_1 + new_ := v_2 + mem := v_3 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64CMPXCHGLlock) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg4(ptr, old, new_, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + old := v_1 + new_ := v_2 + mem := v_3 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64CMPXCHGQlock) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg4(ptr, old, new_, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64DIVSD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (DIVSD x l:(MOVSDload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (DIVSDload x [off] {sym} ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64MOVSDload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + break + } + v.reset(OpAMD64DIVSDload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64DIVSDload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (DIVSDload [off1] {sym} val (ADDQconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (DIVSDload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64DIVSDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (DIVSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (DIVSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64DIVSDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64DIVSS(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (DIVSS x l:(MOVSSload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (DIVSSload x [off] {sym} ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64MOVSSload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + break + } + v.reset(OpAMD64DIVSSload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64DIVSSload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (DIVSSload [off1] {sym} val (ADDQconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (DIVSSload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64DIVSSload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (DIVSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (DIVSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64DIVSSload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64HMULL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (HMULL x y) + // cond: !x.rematerializeable() && y.rematerializeable() + // result: (HMULL y x) + for { + x := v_0 + y := v_1 + if !(!x.rematerializeable() && y.rematerializeable()) { + break + } + v.reset(OpAMD64HMULL) + v.AddArg2(y, x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64HMULLU(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (HMULLU x y) + // cond: !x.rematerializeable() && y.rematerializeable() + // result: (HMULLU y x) + for { + x := v_0 + y := v_1 + if !(!x.rematerializeable() && y.rematerializeable()) { + break + } + v.reset(OpAMD64HMULLU) + v.AddArg2(y, x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64HMULQ(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (HMULQ x y) + // cond: !x.rematerializeable() && y.rematerializeable() + // result: (HMULQ y x) + for { + x := v_0 + y := v_1 + if !(!x.rematerializeable() && y.rematerializeable()) { + break + } + v.reset(OpAMD64HMULQ) + v.AddArg2(y, x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64HMULQU(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (HMULQU x y) + // cond: !x.rematerializeable() && y.rematerializeable() + // result: (HMULQU y x) + for { + x := v_0 + y := v_1 + if !(!x.rematerializeable() && y.rematerializeable()) { + break + } + v.reset(OpAMD64HMULQU) + v.AddArg2(y, x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool { + v_0 := v.Args[0] + // match: (LEAL [c] {s} (ADDLconst [d] x)) + // cond: is32Bit(int64(c)+int64(d)) + // result: (LEAL [c+d] {s} x) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(int64(c) + int64(d))) { + break + } + v.reset(OpAMD64LEAL) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg(x) + return true + } + // match: (LEAL [c] {s} (ADDL x y)) + // cond: x.Op != OpSB && y.Op != OpSB + // result: (LEAL1 [c] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDL { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if !(x.Op != OpSB && y.Op != OpSB) { + continue + } + v.reset(OpAMD64LEAL1) + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LEAL1 [c] {s} (ADDLconst [d] x) y) + // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB + // result: (LEAL1 [c+d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64ADDLconst { + continue + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + y := v_1 + if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { + continue + } + v.reset(OpAMD64LEAL1) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + break + } + // match: (LEAL1 [c] {s} x (SHLLconst [1] y)) + // result: (LEAL2 [c] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 { + continue + } + y := v_1.Args[0] + v.reset(OpAMD64LEAL2) + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + break + } + // match: (LEAL1 [c] {s} x (SHLLconst [2] y)) + // result: (LEAL4 [c] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 { + continue + } + y := v_1.Args[0] + v.reset(OpAMD64LEAL4) + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + break + } + // match: (LEAL1 [c] {s} x (SHLLconst [3] y)) + // result: (LEAL8 [c] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 { + continue + } + y := v_1.Args[0] + v.reset(OpAMD64LEAL8) + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LEAL2 [c] {s} (ADDLconst [d] x) y) + // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB + // result: (LEAL2 [c+d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + y := v_1 + if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { + break + } + v.reset(OpAMD64LEAL2) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (LEAL2 [c] {s} x (ADDLconst [d] y)) + // cond: is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB + // result: (LEAL2 [c+2*d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpAMD64ADDLconst { + break + } + d := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) { + break + } + v.reset(OpAMD64LEAL2) + v.AuxInt = int32ToAuxInt(c + 2*d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (LEAL2 [c] {s} x (SHLLconst [1] y)) + // result: (LEAL4 [c] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 { + break + } + y := v_1.Args[0] + v.reset(OpAMD64LEAL4) + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (LEAL2 [c] {s} x (SHLLconst [2] y)) + // result: (LEAL8 [c] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 { + break + } + y := v_1.Args[0] + v.reset(OpAMD64LEAL8) + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LEAL4 [c] {s} (ADDLconst [d] x) y) + // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB + // result: (LEAL4 [c+d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + y := v_1 + if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { + break + } + v.reset(OpAMD64LEAL4) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (LEAL4 [c] {s} x (ADDLconst [d] y)) + // cond: is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB + // result: (LEAL4 [c+4*d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpAMD64ADDLconst { + break + } + d := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) { + break + } + v.reset(OpAMD64LEAL4) + v.AuxInt = int32ToAuxInt(c + 4*d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (LEAL4 [c] {s} x (SHLLconst [1] y)) + // result: (LEAL8 [c] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 { + break + } + y := v_1.Args[0] + v.reset(OpAMD64LEAL8) + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64LEAL8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LEAL8 [c] {s} (ADDLconst [d] x) y) + // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB + // result: (LEAL8 [c+d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + y := v_1 + if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { + break + } + v.reset(OpAMD64LEAL8) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (LEAL8 [c] {s} x (ADDLconst [d] y)) + // cond: is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB + // result: (LEAL8 [c+8*d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpAMD64ADDLconst { + break + } + d := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) { + break + } + v.reset(OpAMD64LEAL8) + v.AuxInt = int32ToAuxInt(c + 8*d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool { + v_0 := v.Args[0] + // match: (LEAQ [c] {s} (ADDQconst [d] x)) + // cond: is32Bit(int64(c)+int64(d)) + // result: (LEAQ [c+d] {s} x) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(int64(c) + int64(d))) { + break + } + v.reset(OpAMD64LEAQ) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg(x) + return true + } + // match: (LEAQ [c] {s} (ADDQ x y)) + // cond: x.Op != OpSB && y.Op != OpSB + // result: (LEAQ1 [c] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQ { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if !(x.Op != OpSB && y.Op != OpSB) { + continue + } + v.reset(OpAMD64LEAQ1) + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + break + } + // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + x := v_0.Args[0] + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64LEAQ) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg(x) + return true + } + // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ1 { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + y := v_0.Args[1] + x := v_0.Args[0] + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64LEAQ1) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(x, y) + return true + } + // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ2 { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + y := v_0.Args[1] + x := v_0.Args[0] + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64LEAQ2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(x, y) + return true + } + // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ4 { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + y := v_0.Args[1] + x := v_0.Args[0] + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64LEAQ4) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(x, y) + return true + } + // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ8 { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + y := v_0.Args[1] + x := v_0.Args[0] + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64LEAQ8) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y) + // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB + // result: (LEAQ1 [c+d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64ADDQconst { + continue + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + y := v_1 + if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { + continue + } + v.reset(OpAMD64LEAQ1) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + break + } + // match: (LEAQ1 [c] {s} x (SHLQconst [1] y)) + // result: (LEAQ2 [c] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 { + continue + } + y := v_1.Args[0] + v.reset(OpAMD64LEAQ2) + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + break + } + // match: (LEAQ1 [c] {s} x (SHLQconst [2] y)) + // result: (LEAQ4 [c] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 { + continue + } + y := v_1.Args[0] + v.reset(OpAMD64LEAQ4) + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + break + } + // match: (LEAQ1 [c] {s} x (SHLQconst [3] y)) + // result: (LEAQ8 [c] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 { + continue + } + y := v_1.Args[0] + v.reset(OpAMD64LEAQ8) + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + break + } + // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB + // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64LEAQ { + continue + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + x := v_0.Args[0] + y := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) { + continue + } + v.reset(OpAMD64LEAQ1) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(x, y) + return true + } + break + } + // match: (LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} x y) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64LEAQ1 { + continue + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + y := v_1.Args[1] + if y != v_1.Args[0] || !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + continue + } + v.reset(OpAMD64LEAQ2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(x, y) + return true + } + break + } + // match: (LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} x y)) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} y x) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64LEAQ1 { + continue + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + y := v_1_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + continue + } + v.reset(OpAMD64LEAQ2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(y, x) + return true + } + } + break + } + // match: (LEAQ1 [0] x y) + // cond: v.Aux == nil + // result: (ADDQ x y) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + x := v_0 + y := v_1 + if !(v.Aux == nil) { + break + } + v.reset(OpAMD64ADDQ) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y) + // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB + // result: (LEAQ2 [c+d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + y := v_1 + if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { + break + } + v.reset(OpAMD64LEAQ2) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) + // cond: is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB + // result: (LEAQ2 [c+2*d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + d := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) { + break + } + v.reset(OpAMD64LEAQ2) + v.AuxInt = int32ToAuxInt(c + 2*d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (LEAQ2 [c] {s} x (SHLQconst [1] y)) + // result: (LEAQ4 [c] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 { + break + } + y := v_1.Args[0] + v.reset(OpAMD64LEAQ4) + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (LEAQ2 [c] {s} x (SHLQconst [2] y)) + // result: (LEAQ8 [c] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 { + break + } + y := v_1.Args[0] + v.reset(OpAMD64LEAQ8) + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB + // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + x := v_0.Args[0] + y := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) { + break + } + v.reset(OpAMD64LEAQ2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(x, y) + return true + } + // match: (LEAQ2 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) + // cond: is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil + // result: (LEAQ4 [off1+2*off2] {sym1} x y) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpAMD64LEAQ1 { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + y := v_1.Args[1] + if y != v_1.Args[0] || !(is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil) { + break + } + v.reset(OpAMD64LEAQ4) + v.AuxInt = int32ToAuxInt(off1 + 2*off2) + v.Aux = symToAux(sym1) + v.AddArg2(x, y) + return true + } + // match: (LEAQ2 [off] {sym} x (MOVQconst [scale])) + // cond: is32Bit(int64(off)+int64(scale)*2) + // result: (LEAQ [off+int32(scale)*2] {sym} x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + scale := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(int64(off) + int64(scale)*2)) { + break + } + v.reset(OpAMD64LEAQ) + v.AuxInt = int32ToAuxInt(off + int32(scale)*2) + v.Aux = symToAux(sym) + v.AddArg(x) + return true + } + // match: (LEAQ2 [off] {sym} x (MOVLconst [scale])) + // cond: is32Bit(int64(off)+int64(scale)*2) + // result: (LEAQ [off+int32(scale)*2] {sym} x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + scale := auxIntToInt32(v_1.AuxInt) + if !(is32Bit(int64(off) + int64(scale)*2)) { + break + } + v.reset(OpAMD64LEAQ) + v.AuxInt = int32ToAuxInt(off + int32(scale)*2) + v.Aux = symToAux(sym) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y) + // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB + // result: (LEAQ4 [c+d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + y := v_1 + if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { + break + } + v.reset(OpAMD64LEAQ4) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) + // cond: is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB + // result: (LEAQ4 [c+4*d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + d := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) { + break + } + v.reset(OpAMD64LEAQ4) + v.AuxInt = int32ToAuxInt(c + 4*d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (LEAQ4 [c] {s} x (SHLQconst [1] y)) + // result: (LEAQ8 [c] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 { + break + } + y := v_1.Args[0] + v.reset(OpAMD64LEAQ8) + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB + // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + x := v_0.Args[0] + y := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) { + break + } + v.reset(OpAMD64LEAQ4) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(x, y) + return true + } + // match: (LEAQ4 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) + // cond: is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil + // result: (LEAQ8 [off1+4*off2] {sym1} x y) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpAMD64LEAQ1 { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + y := v_1.Args[1] + if y != v_1.Args[0] || !(is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil) { + break + } + v.reset(OpAMD64LEAQ8) + v.AuxInt = int32ToAuxInt(off1 + 4*off2) + v.Aux = symToAux(sym1) + v.AddArg2(x, y) + return true + } + // match: (LEAQ4 [off] {sym} x (MOVQconst [scale])) + // cond: is32Bit(int64(off)+int64(scale)*4) + // result: (LEAQ [off+int32(scale)*4] {sym} x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + scale := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(int64(off) + int64(scale)*4)) { + break + } + v.reset(OpAMD64LEAQ) + v.AuxInt = int32ToAuxInt(off + int32(scale)*4) + v.Aux = symToAux(sym) + v.AddArg(x) + return true + } + // match: (LEAQ4 [off] {sym} x (MOVLconst [scale])) + // cond: is32Bit(int64(off)+int64(scale)*4) + // result: (LEAQ [off+int32(scale)*4] {sym} x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + scale := auxIntToInt32(v_1.AuxInt) + if !(is32Bit(int64(off) + int64(scale)*4)) { + break + } + v.reset(OpAMD64LEAQ) + v.AuxInt = int32ToAuxInt(off + int32(scale)*4) + v.Aux = symToAux(sym) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y) + // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB + // result: (LEAQ8 [c+d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + y := v_1 + if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) { + break + } + v.reset(OpAMD64LEAQ8) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (LEAQ8 [c] {s} x (ADDQconst [d] y)) + // cond: is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB + // result: (LEAQ8 [c+8*d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + d := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) { + break + } + v.reset(OpAMD64LEAQ8) + v.AuxInt = int32ToAuxInt(c + 8*d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB + // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + x := v_0.Args[0] + y := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) { + break + } + v.reset(OpAMD64LEAQ8) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(x, y) + return true + } + // match: (LEAQ8 [off] {sym} x (MOVQconst [scale])) + // cond: is32Bit(int64(off)+int64(scale)*8) + // result: (LEAQ [off+int32(scale)*8] {sym} x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + scale := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(int64(off) + int64(scale)*8)) { + break + } + v.reset(OpAMD64LEAQ) + v.AuxInt = int32ToAuxInt(off + int32(scale)*8) + v.Aux = symToAux(sym) + v.AddArg(x) + return true + } + // match: (LEAQ8 [off] {sym} x (MOVLconst [scale])) + // cond: is32Bit(int64(off)+int64(scale)*8) + // result: (LEAQ [off+int32(scale)*8] {sym} x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + scale := auxIntToInt32(v_1.AuxInt) + if !(is32Bit(int64(off) + int64(scale)*8)) { + break + } + v.reset(OpAMD64LEAQ) + v.AuxInt = int32ToAuxInt(off + int32(scale)*8) + v.Aux = symToAux(sym) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVBELstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBELstore [i] {s} p x:(BSWAPL w) mem) + // cond: x.Uses == 1 + // result: (MOVLstore [i] {s} p w mem) + for { + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + p := v_0 + x := v_1 + if x.Op != OpAMD64BSWAPL { + break + } + w := x.Args[0] + mem := v_2 + if !(x.Uses == 1) { + break + } + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) + v.AddArg3(p, w, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVBEQstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBEQstore [i] {s} p x:(BSWAPQ w) mem) + // cond: x.Uses == 1 + // result: (MOVQstore [i] {s} p w mem) + for { + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + p := v_0 + x := v_1 + if x.Op != OpAMD64BSWAPQ { + break + } + w := x.Args[0] + mem := v_2 + if !(x.Uses == 1) { + break + } + v.reset(OpAMD64MOVQstore) + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) + v.AddArg3(p, w, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVBEWstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBEWstore [i] {s} p x:(ROLWconst [8] w) mem) + // cond: x.Uses == 1 + // result: (MOVWstore [i] {s} p w mem) + for { + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + p := v_0 + x := v_1 + if x.Op != OpAMD64ROLWconst || auxIntToInt8(x.AuxInt) != 8 { + break + } + w := x.Args[0] + mem := v_2 + if !(x.Uses == 1) { + break + } + v.reset(OpAMD64MOVWstore) + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) + v.AddArg3(p, w, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBQSXload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpAMD64MOVBload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBQSXload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpAMD64MOVWload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBQSXload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpAMD64MOVLload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBQSXload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpAMD64MOVQload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (MOVBQSX (ANDLconst [c] x)) + // cond: c & 0x80 == 0 + // result: (ANDLconst [c & 0x7f] x) + for { + if v_0.Op != OpAMD64ANDLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(c&0x80 == 0) { + break + } + v.reset(OpAMD64ANDLconst) + v.AuxInt = int32ToAuxInt(c & 0x7f) + v.AddArg(x) + return true + } + // match: (MOVBQSX (MOVBQSX x)) + // result: (MOVBQSX x) + for { + if v_0.Op != OpAMD64MOVBQSX { + break + } + x := v_0.Args[0] + v.reset(OpAMD64MOVBQSX) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVBQSX x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVBstore { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpAMD64MOVBQSX) + v.AddArg(x) + return true + } + // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVBQSXload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpAMD64MOVBload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpAMD64MOVWload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpAMD64MOVLload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpAMD64MOVQload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (MOVBQZX (ANDLconst [c] x)) + // result: (ANDLconst [c & 0xff] x) + for { + if v_0.Op != OpAMD64ANDLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpAMD64ANDLconst) + v.AuxInt = int32ToAuxInt(c & 0xff) + v.AddArg(x) + return true + } + // match: (MOVBQZX (MOVBQZX x)) + // result: (MOVBQZX x) + for { + if v_0.Op != OpAMD64MOVBQZX { + break + } + x := v_0.Args[0] + v.reset(OpAMD64MOVBQZX) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVBatomicload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MOVBatomicload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64MOVBatomicload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVBatomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVBatomicload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVBQZX x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVBstore { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpAMD64MOVBQZX) + v.AddArg(x) + return true + } + // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MOVBload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64MOVBload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVBload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + // match: (MOVBload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVLconst [int32(read8(sym, int64(off)))]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpSB || !(symIsRO(sym)) { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off)))) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBstore [off] {sym} ptr y:(SETL x) mem) + // cond: y.Uses == 1 + // result: (SETLstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != OpAMD64SETL { + break + } + x := y.Args[0] + mem := v_2 + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETLstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr y:(SETLE x) mem) + // cond: y.Uses == 1 + // result: (SETLEstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != OpAMD64SETLE { + break + } + x := y.Args[0] + mem := v_2 + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETLEstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr y:(SETG x) mem) + // cond: y.Uses == 1 + // result: (SETGstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != OpAMD64SETG { + break + } + x := y.Args[0] + mem := v_2 + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETGstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr y:(SETGE x) mem) + // cond: y.Uses == 1 + // result: (SETGEstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != OpAMD64SETGE { + break + } + x := y.Args[0] + mem := v_2 + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETGEstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr y:(SETEQ x) mem) + // cond: y.Uses == 1 + // result: (SETEQstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != OpAMD64SETEQ { + break + } + x := y.Args[0] + mem := v_2 + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETEQstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr y:(SETNE x) mem) + // cond: y.Uses == 1 + // result: (SETNEstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != OpAMD64SETNE { + break + } + x := y.Args[0] + mem := v_2 + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETNEstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr y:(SETB x) mem) + // cond: y.Uses == 1 + // result: (SETBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != OpAMD64SETB { + break + } + x := y.Args[0] + mem := v_2 + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr y:(SETBE x) mem) + // cond: y.Uses == 1 + // result: (SETBEstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != OpAMD64SETBE { + break + } + x := y.Args[0] + mem := v_2 + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETBEstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr y:(SETA x) mem) + // cond: y.Uses == 1 + // result: (SETAstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != OpAMD64SETA { + break + } + x := y.Args[0] + mem := v_2 + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETAstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr y:(SETAE x) mem) + // cond: y.Uses == 1 + // result: (SETAEstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != OpAMD64SETAE { + break + } + x := y.Args[0] + mem := v_2 + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETAEstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVBQSX { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVBQZX { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MOVBstore [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) + // result: (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + mem := v_2 + v.reset(OpAMD64MOVBstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVQconst [c]) mem) + // result: (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + v.reset(OpAMD64MOVBstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) + // cond: ValAndOff(sc).canAdd32(off) + // result: (MOVBstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem) + for { + sc := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(ValAndOff(sc).canAdd32(off)) { + break + } + v.reset(OpAMD64MOVBstoreconst) + v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) + v.Aux = symToAux(s) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) + // result: (MOVBstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) { + break + } + v.reset(OpAMD64MOVBstoreconst) + v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVLQSXload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpAMD64MOVLload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVLQSXload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpAMD64MOVQload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (MOVLQSX (ANDLconst [c] x)) + // cond: uint32(c) & 0x80000000 == 0 + // result: (ANDLconst [c & 0x7fffffff] x) + for { + if v_0.Op != OpAMD64ANDLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(uint32(c)&0x80000000 == 0) { + break + } + v.reset(OpAMD64ANDLconst) + v.AuxInt = int32ToAuxInt(c & 0x7fffffff) + v.AddArg(x) + return true + } + // match: (MOVLQSX (MOVLQSX x)) + // result: (MOVLQSX x) + for { + if v_0.Op != OpAMD64MOVLQSX { + break + } + x := v_0.Args[0] + v.reset(OpAMD64MOVLQSX) + v.AddArg(x) + return true + } + // match: (MOVLQSX (MOVWQSX x)) + // result: (MOVWQSX x) + for { + if v_0.Op != OpAMD64MOVWQSX { + break + } + x := v_0.Args[0] + v.reset(OpAMD64MOVWQSX) + v.AddArg(x) + return true + } + // match: (MOVLQSX (MOVBQSX x)) + // result: (MOVBQSX x) + for { + if v_0.Op != OpAMD64MOVBQSX { + break + } + x := v_0.Args[0] + v.reset(OpAMD64MOVBQSX) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVLQSX x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVLstore { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpAMD64MOVLQSX) + v.AddArg(x) + return true + } + // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVLQSXload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVLload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpAMD64MOVLload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVLload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpAMD64MOVQload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (MOVLQZX (ANDLconst [c] x)) + // result: (ANDLconst [c] x) + for { + if v_0.Op != OpAMD64ANDLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpAMD64ANDLconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVLQZX (MOVLQZX x)) + // result: (MOVLQZX x) + for { + if v_0.Op != OpAMD64MOVLQZX { + break + } + x := v_0.Args[0] + v.reset(OpAMD64MOVLQZX) + v.AddArg(x) + return true + } + // match: (MOVLQZX (MOVWQZX x)) + // result: (MOVWQZX x) + for { + if v_0.Op != OpAMD64MOVWQZX { + break + } + x := v_0.Args[0] + v.reset(OpAMD64MOVWQZX) + v.AddArg(x) + return true + } + // match: (MOVLQZX (MOVBQZX x)) + // result: (MOVBQZX x) + for { + if v_0.Op != OpAMD64MOVBQZX { + break + } + x := v_0.Args[0] + v.reset(OpAMD64MOVBQZX) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MOVLatomicload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64MOVLatomicload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVLatomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVLatomicload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLf2i(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MOVLf2i (Arg [off] {sym})) + // cond: t.Size() == u.Size() + // result: @b.Func.Entry (Arg [off] {sym}) + for { + t := v.Type + if v_0.Op != OpArg { + break + } + u := v_0.Type + off := auxIntToInt32(v_0.AuxInt) + sym := auxToSym(v_0.Aux) + if !(t.Size() == u.Size()) { + break + } + b = b.Func.Entry + v0 := b.NewValue0(v.Pos, OpArg, t) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLi2f(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MOVLi2f (Arg [off] {sym})) + // cond: t.Size() == u.Size() + // result: @b.Func.Entry (Arg [off] {sym}) + for { + t := v.Type + if v_0.Op != OpArg { + break + } + u := v_0.Type + off := auxIntToInt32(v_0.AuxInt) + sym := auxToSym(v_0.Aux) + if !(t.Size() == u.Size()) { + break + } + b = b.Func.Entry + v0 := b.NewValue0(v.Pos, OpArg, t) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVLQZX x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVLstore { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpAMD64MOVLQZX) + v.AddArg(x) + return true + } + // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MOVLload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64MOVLload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVLload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + // match: (MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) + // result: (MOVLf2i val) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVSSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { + break + } + val := v_1.Args[1] + if ptr != v_1.Args[0] { + break + } + v.reset(OpAMD64MOVLf2i) + v.AddArg(val) + return true + } + // match: (MOVLload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVQconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpSB || !(symIsRO(sym)) { + break + } + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) + // result: (MOVLstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVLQSX { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) + // result: (MOVLstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVLQZX { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MOVLstore [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) + // result: (MOVLstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + mem := v_2 + v.reset(OpAMD64MOVLstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVLstore [off] {sym} ptr (MOVQconst [c]) mem) + // result: (MOVLstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + v.reset(OpAMD64MOVLstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ADDLload x [off] {sym} ptr mem) mem) + // cond: y.Uses==1 && clobber(y) + // result: (ADDLmodify [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != OpAMD64ADDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { + break + } + mem := y.Args[2] + x := y.Args[0] + if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { + break + } + v.reset(OpAMD64ADDLmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ANDLload x [off] {sym} ptr mem) mem) + // cond: y.Uses==1 && clobber(y) + // result: (ANDLmodify [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != OpAMD64ANDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { + break + } + mem := y.Args[2] + x := y.Args[0] + if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { + break + } + v.reset(OpAMD64ANDLmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ORLload x [off] {sym} ptr mem) mem) + // cond: y.Uses==1 && clobber(y) + // result: (ORLmodify [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != OpAMD64ORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { + break + } + mem := y.Args[2] + x := y.Args[0] + if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { + break + } + v.reset(OpAMD64ORLmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(XORLload x [off] {sym} ptr mem) mem) + // cond: y.Uses==1 && clobber(y) + // result: (XORLmodify [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != OpAMD64XORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { + break + } + mem := y.Args[2] + x := y.Args[0] + if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { + break + } + v.reset(OpAMD64XORLmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ADDL l:(MOVLload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) + // result: (ADDLmodify [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != OpAMD64ADDL { + break + } + _ = y.Args[1] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + l := y_0 + if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + continue + } + mem := l.Args[1] + if ptr != l.Args[0] { + continue + } + x := y_1 + if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { + continue + } + v.reset(OpAMD64ADDLmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + break + } + // match: (MOVLstore {sym} [off] ptr y:(SUBL l:(MOVLload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) + // result: (SUBLmodify [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != OpAMD64SUBL { + break + } + x := y.Args[1] + l := y.Args[0] + if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + break + } + mem := l.Args[1] + if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { + break + } + v.reset(OpAMD64SUBLmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ANDL l:(MOVLload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) + // result: (ANDLmodify [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != OpAMD64ANDL { + break + } + _ = y.Args[1] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + l := y_0 + if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + continue + } + mem := l.Args[1] + if ptr != l.Args[0] { + continue + } + x := y_1 + if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { + continue + } + v.reset(OpAMD64ANDLmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + break + } + // match: (MOVLstore {sym} [off] ptr y:(ORL l:(MOVLload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) + // result: (ORLmodify [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != OpAMD64ORL { + break + } + _ = y.Args[1] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + l := y_0 + if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + continue + } + mem := l.Args[1] + if ptr != l.Args[0] { + continue + } + x := y_1 + if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { + continue + } + v.reset(OpAMD64ORLmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + break + } + // match: (MOVLstore {sym} [off] ptr y:(XORL l:(MOVLload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) + // result: (XORLmodify [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != OpAMD64XORL { + break + } + _ = y.Args[1] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + l := y_0 + if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + continue + } + mem := l.Args[1] + if ptr != l.Args[0] { + continue + } + x := y_1 + if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { + continue + } + v.reset(OpAMD64XORLmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + break + } + // match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) + // result: (ADDLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + a := v_1 + if a.Op != OpAMD64ADDLconst { + break + } + c := auxIntToInt32(a.AuxInt) + l := a.Args[0] + if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + break + } + mem := l.Args[1] + ptr2 := l.Args[0] + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { + break + } + v.reset(OpAMD64ADDLconstmodify) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVLstore [off] {sym} ptr a:(ANDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) + // result: (ANDLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + a := v_1 + if a.Op != OpAMD64ANDLconst { + break + } + c := auxIntToInt32(a.AuxInt) + l := a.Args[0] + if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + break + } + mem := l.Args[1] + ptr2 := l.Args[0] + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { + break + } + v.reset(OpAMD64ANDLconstmodify) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVLstore [off] {sym} ptr a:(ORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) + // result: (ORLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + a := v_1 + if a.Op != OpAMD64ORLconst { + break + } + c := auxIntToInt32(a.AuxInt) + l := a.Args[0] + if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + break + } + mem := l.Args[1] + ptr2 := l.Args[0] + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { + break + } + v.reset(OpAMD64ORLconstmodify) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVLstore [off] {sym} ptr a:(XORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) + // result: (XORLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + a := v_1 + if a.Op != OpAMD64XORLconst { + break + } + c := auxIntToInt32(a.AuxInt) + l := a.Args[0] + if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + break + } + mem := l.Args[1] + ptr2 := l.Args[0] + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { + break + } + v.reset(OpAMD64XORLconstmodify) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVLstore [off] {sym} ptr (MOVLf2i val) mem) + // result: (MOVSSstore [off] {sym} ptr val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVLf2i { + break + } + val := v_1.Args[0] + mem := v_2 + v.reset(OpAMD64MOVSSstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVLstore [i] {s} p x:(BSWAPL w) mem) + // cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3 + // result: (MOVBELstore [i] {s} p w mem) + for { + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + p := v_0 + x := v_1 + if x.Op != OpAMD64BSWAPL { + break + } + w := x.Args[0] + mem := v_2 + if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64MOVBELstore) + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) + v.AddArg3(p, w, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) + // cond: ValAndOff(sc).canAdd32(off) + // result: (MOVLstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem) + for { + sc := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(ValAndOff(sc).canAdd32(off)) { + break + } + v.reset(OpAMD64MOVLstoreconst) + v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) + v.Aux = symToAux(s) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) + // result: (MOVLstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) { + break + } + v.reset(OpAMD64MOVLstoreconst) + v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MOVOload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64MOVOload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVOload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MOVOstore [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64MOVOstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVOstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + // match: (MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem) + // cond: symIsRO(srcSym) + // result: (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))]) (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))]) mem)) + for { + dstOff := auxIntToInt32(v.AuxInt) + dstSym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVOload { + break + } + srcOff := auxIntToInt32(v_1.AuxInt) + srcSym := auxToSym(v_1.Aux) + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpSB { + break + } + mem := v_2 + if !(symIsRO(srcSym)) { + break + } + v.reset(OpAMD64MOVQstore) + v.AuxInt = int32ToAuxInt(dstOff + 8) + v.Aux = symToAux(dstSym) + v0 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))) + v1 := b.NewValue0(v_1.Pos, OpAMD64MOVQstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(dstOff) + v1.Aux = symToAux(dstSym) + v2 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))) + v1.AddArg3(ptr, v2, mem) + v.AddArg3(ptr, v0, v1) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVOstoreconst(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVOstoreconst [sc] {s} (ADDQconst [off] ptr) mem) + // cond: ValAndOff(sc).canAdd32(off) + // result: (MOVOstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem) + for { + sc := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(ValAndOff(sc).canAdd32(off)) { + break + } + v.reset(OpAMD64MOVOstoreconst) + v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) + v.Aux = symToAux(s) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVOstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) + // result: (MOVOstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) { + break + } + v.reset(OpAMD64MOVOstoreconst) + v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MOVQatomicload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64MOVQatomicload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVQatomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVQatomicload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVQf2i(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MOVQf2i (Arg [off] {sym})) + // cond: t.Size() == u.Size() + // result: @b.Func.Entry (Arg [off] {sym}) + for { + t := v.Type + if v_0.Op != OpArg { + break + } + u := v_0.Type + off := auxIntToInt32(v_0.AuxInt) + sym := auxToSym(v_0.Aux) + if !(t.Size() == u.Size()) { + break + } + b = b.Func.Entry + v0 := b.NewValue0(v.Pos, OpArg, t) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVQi2f(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MOVQi2f (Arg [off] {sym})) + // cond: t.Size() == u.Size() + // result: @b.Func.Entry (Arg [off] {sym}) + for { + t := v.Type + if v_0.Op != OpArg { + break + } + u := v_0.Type + off := auxIntToInt32(v_0.AuxInt) + sym := auxToSym(v_0.Aux) + if !(t.Size() == u.Size()) { + break + } + b = b.Func.Entry + v0 := b.NewValue0(v.Pos, OpArg, t) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: x + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVQstore { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.copyOf(x) + return true + } + // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MOVQload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64MOVQload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVQload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + // match: (MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) + // result: (MOVQf2i val) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVSDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { + break + } + val := v_1.Args[1] + if ptr != v_1.Args[0] { + break + } + v.reset(OpAMD64MOVQf2i) + v.AddArg(val) + return true + } + // match: (MOVQload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVQconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpSB || !(symIsRO(sym)) { + break + } + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MOVQstore [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64MOVQstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) + // cond: validVal(c) + // result: (MOVQstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(validVal(c)) { + break + } + v.reset(OpAMD64MOVQstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVQstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + // match: (MOVQstore {sym} [off] ptr y:(ADDQload x [off] {sym} ptr mem) mem) + // cond: y.Uses==1 && clobber(y) + // result: (ADDQmodify [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != OpAMD64ADDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { + break + } + mem := y.Args[2] + x := y.Args[0] + if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { + break + } + v.reset(OpAMD64ADDQmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVQstore {sym} [off] ptr y:(ANDQload x [off] {sym} ptr mem) mem) + // cond: y.Uses==1 && clobber(y) + // result: (ANDQmodify [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != OpAMD64ANDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { + break + } + mem := y.Args[2] + x := y.Args[0] + if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { + break + } + v.reset(OpAMD64ANDQmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVQstore {sym} [off] ptr y:(ORQload x [off] {sym} ptr mem) mem) + // cond: y.Uses==1 && clobber(y) + // result: (ORQmodify [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != OpAMD64ORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { + break + } + mem := y.Args[2] + x := y.Args[0] + if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { + break + } + v.reset(OpAMD64ORQmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVQstore {sym} [off] ptr y:(XORQload x [off] {sym} ptr mem) mem) + // cond: y.Uses==1 && clobber(y) + // result: (XORQmodify [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != OpAMD64XORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { + break + } + mem := y.Args[2] + x := y.Args[0] + if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) { + break + } + v.reset(OpAMD64XORQmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVQstore {sym} [off] ptr y:(ADDQ l:(MOVQload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) + // result: (ADDQmodify [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != OpAMD64ADDQ { + break + } + _ = y.Args[1] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + l := y_0 + if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + continue + } + mem := l.Args[1] + if ptr != l.Args[0] { + continue + } + x := y_1 + if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { + continue + } + v.reset(OpAMD64ADDQmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + break + } + // match: (MOVQstore {sym} [off] ptr y:(SUBQ l:(MOVQload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) + // result: (SUBQmodify [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != OpAMD64SUBQ { + break + } + x := y.Args[1] + l := y.Args[0] + if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + break + } + mem := l.Args[1] + if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { + break + } + v.reset(OpAMD64SUBQmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVQstore {sym} [off] ptr y:(ANDQ l:(MOVQload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) + // result: (ANDQmodify [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != OpAMD64ANDQ { + break + } + _ = y.Args[1] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + l := y_0 + if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + continue + } + mem := l.Args[1] + if ptr != l.Args[0] { + continue + } + x := y_1 + if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { + continue + } + v.reset(OpAMD64ANDQmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + break + } + // match: (MOVQstore {sym} [off] ptr y:(ORQ l:(MOVQload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) + // result: (ORQmodify [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != OpAMD64ORQ { + break + } + _ = y.Args[1] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + l := y_0 + if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + continue + } + mem := l.Args[1] + if ptr != l.Args[0] { + continue + } + x := y_1 + if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { + continue + } + v.reset(OpAMD64ORQmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + break + } + // match: (MOVQstore {sym} [off] ptr y:(XORQ l:(MOVQload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) + // result: (XORQmodify [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + y := v_1 + if y.Op != OpAMD64XORQ { + break + } + _ = y.Args[1] + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + l := y_0 + if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + continue + } + mem := l.Args[1] + if ptr != l.Args[0] { + continue + } + x := y_1 + if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) { + continue + } + v.reset(OpAMD64XORQmodify) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + break + } + // match: (MOVQstore {sym} [off] ptr x:(BTSQconst [c] l:(MOVQload {sym} [off] ptr mem)) mem) + // cond: x.Uses == 1 && l.Uses == 1 && clobber(x, l) + // result: (BTSQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + x := v_1 + if x.Op != OpAMD64BTSQconst { + break + } + c := auxIntToInt8(x.AuxInt) + l := x.Args[0] + if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + break + } + mem := l.Args[1] + if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) { + break + } + v.reset(OpAMD64BTSQconstmodify) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVQstore {sym} [off] ptr x:(BTRQconst [c] l:(MOVQload {sym} [off] ptr mem)) mem) + // cond: x.Uses == 1 && l.Uses == 1 && clobber(x, l) + // result: (BTRQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + x := v_1 + if x.Op != OpAMD64BTRQconst { + break + } + c := auxIntToInt8(x.AuxInt) + l := x.Args[0] + if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + break + } + mem := l.Args[1] + if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) { + break + } + v.reset(OpAMD64BTRQconstmodify) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVQstore {sym} [off] ptr x:(BTCQconst [c] l:(MOVQload {sym} [off] ptr mem)) mem) + // cond: x.Uses == 1 && l.Uses == 1 && clobber(x, l) + // result: (BTCQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + x := v_1 + if x.Op != OpAMD64BTCQconst { + break + } + c := auxIntToInt8(x.AuxInt) + l := x.Args[0] + if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + break + } + mem := l.Args[1] + if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) { + break + } + v.reset(OpAMD64BTCQconstmodify) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) + // result: (ADDQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + a := v_1 + if a.Op != OpAMD64ADDQconst { + break + } + c := auxIntToInt32(a.AuxInt) + l := a.Args[0] + if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + break + } + mem := l.Args[1] + ptr2 := l.Args[0] + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { + break + } + v.reset(OpAMD64ADDQconstmodify) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVQstore [off] {sym} ptr a:(ANDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) + // result: (ANDQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + a := v_1 + if a.Op != OpAMD64ANDQconst { + break + } + c := auxIntToInt32(a.AuxInt) + l := a.Args[0] + if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + break + } + mem := l.Args[1] + ptr2 := l.Args[0] + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { + break + } + v.reset(OpAMD64ANDQconstmodify) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVQstore [off] {sym} ptr a:(ORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) + // result: (ORQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + a := v_1 + if a.Op != OpAMD64ORQconst { + break + } + c := auxIntToInt32(a.AuxInt) + l := a.Args[0] + if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + break + } + mem := l.Args[1] + ptr2 := l.Args[0] + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { + break + } + v.reset(OpAMD64ORQconstmodify) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVQstore [off] {sym} ptr a:(XORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) + // result: (XORQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + a := v_1 + if a.Op != OpAMD64XORQconst { + break + } + c := auxIntToInt32(a.AuxInt) + l := a.Args[0] + if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { + break + } + mem := l.Args[1] + ptr2 := l.Args[0] + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) { + break + } + v.reset(OpAMD64XORQconstmodify) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem) + // result: (MOVSDstore [off] {sym} ptr val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVQf2i { + break + } + val := v_1.Args[0] + mem := v_2 + v.reset(OpAMD64MOVSDstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVQstore [i] {s} p x:(BSWAPQ w) mem) + // cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3 + // result: (MOVBEQstore [i] {s} p w mem) + for { + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + p := v_0 + x := v_1 + if x.Op != OpAMD64BSWAPQ { + break + } + w := x.Args[0] + mem := v_2 + if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64MOVBEQstore) + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) + v.AddArg3(p, w, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) + // cond: ValAndOff(sc).canAdd32(off) + // result: (MOVQstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem) + for { + sc := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(ValAndOff(sc).canAdd32(off)) { + break + } + v.reset(OpAMD64MOVQstoreconst) + v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) + v.Aux = symToAux(s) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) + // result: (MOVQstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) { + break + } + v.reset(OpAMD64MOVQstoreconst) + v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVQstoreconst [c] {s} p1 x:(MOVQstoreconst [a] {s} p0 mem)) + // cond: config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x) + // result: (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p0 mem) + for { + c := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) + p1 := v_0 + x := v_1 + if x.Op != OpAMD64MOVQstoreconst { + break + } + a := auxIntToValAndOff(x.AuxInt) + if auxToSym(x.Aux) != s { + break + } + mem := x.Args[1] + p0 := x.Args[0] + if !(config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)) { + break + } + v.reset(OpAMD64MOVOstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, a.Off())) + v.Aux = symToAux(s) + v.AddArg2(p0, mem) + return true + } + // match: (MOVQstoreconst [a] {s} p0 x:(MOVQstoreconst [c] {s} p1 mem)) + // cond: config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x) + // result: (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p0 mem) + for { + a := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) + p0 := v_0 + x := v_1 + if x.Op != OpAMD64MOVQstoreconst { + break + } + c := auxIntToValAndOff(x.AuxInt) + if auxToSym(x.Aux) != s { + break + } + mem := x.Args[1] + p1 := x.Args[0] + if !(config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)) { + break + } + v.reset(OpAMD64MOVOstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, a.Off())) + v.Aux = symToAux(s) + v.AddArg2(p0, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MOVSDload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64MOVSDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVSDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + // match: (MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _)) + // result: (MOVQi2f val) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVQstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { + break + } + val := v_1.Args[1] + if ptr != v_1.Args[0] { + break + } + v.reset(OpAMD64MOVQi2f) + v.AddArg(val) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MOVSDstore [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64MOVSDstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVSDstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + // match: (MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) + // result: (MOVQstore [off] {sym} ptr val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVQi2f { + break + } + val := v_1.Args[0] + mem := v_2 + v.reset(OpAMD64MOVQstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MOVSSload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64MOVSSload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVSSload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + // match: (MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _)) + // result: (MOVLi2f val) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVLstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { + break + } + val := v_1.Args[1] + if ptr != v_1.Args[0] { + break + } + v.reset(OpAMD64MOVLi2f) + v.AddArg(val) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MOVSSstore [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64MOVSSstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVSSstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + // match: (MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) + // result: (MOVLstore [off] {sym} ptr val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVLi2f { + break + } + val := v_1.Args[0] + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWQSXload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpAMD64MOVWload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWQSXload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpAMD64MOVLload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWQSXload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpAMD64MOVQload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (MOVWQSX (ANDLconst [c] x)) + // cond: c & 0x8000 == 0 + // result: (ANDLconst [c & 0x7fff] x) + for { + if v_0.Op != OpAMD64ANDLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(c&0x8000 == 0) { + break + } + v.reset(OpAMD64ANDLconst) + v.AuxInt = int32ToAuxInt(c & 0x7fff) + v.AddArg(x) + return true + } + // match: (MOVWQSX (MOVWQSX x)) + // result: (MOVWQSX x) + for { + if v_0.Op != OpAMD64MOVWQSX { + break + } + x := v_0.Args[0] + v.reset(OpAMD64MOVWQSX) + v.AddArg(x) + return true + } + // match: (MOVWQSX (MOVBQSX x)) + // result: (MOVBQSX x) + for { + if v_0.Op != OpAMD64MOVBQSX { + break + } + x := v_0.Args[0] + v.reset(OpAMD64MOVBQSX) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVWQSX x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVWstore { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpAMD64MOVWQSX) + v.AddArg(x) + return true + } + // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVWQSXload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpAMD64MOVWload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpAMD64MOVLload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpAMD64MOVQload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (MOVWQZX (ANDLconst [c] x)) + // result: (ANDLconst [c & 0xffff] x) + for { + if v_0.Op != OpAMD64ANDLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpAMD64ANDLconst) + v.AuxInt = int32ToAuxInt(c & 0xffff) + v.AddArg(x) + return true + } + // match: (MOVWQZX (MOVWQZX x)) + // result: (MOVWQZX x) + for { + if v_0.Op != OpAMD64MOVWQZX { + break + } + x := v_0.Args[0] + v.reset(OpAMD64MOVWQZX) + v.AddArg(x) + return true + } + // match: (MOVWQZX (MOVBQZX x)) + // result: (MOVBQZX x) + for { + if v_0.Op != OpAMD64MOVBQZX { + break + } + x := v_0.Args[0] + v.reset(OpAMD64MOVBQZX) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVWQZX x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVWstore { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpAMD64MOVWQZX) + v.AddArg(x) + return true + } + // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MOVWload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64MOVWload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVWload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + // match: (MOVWload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpSB || !(symIsRO(sym)) { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) + // result: (MOVWstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVWQSX { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpAMD64MOVWstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) + // result: (MOVWstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVWQZX { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpAMD64MOVWstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MOVWstore [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64MOVWstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) + // result: (MOVWstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + mem := v_2 + v.reset(OpAMD64MOVWstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVQconst [c]) mem) + // result: (MOVWstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + v.reset(OpAMD64MOVWstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVWstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + // match: (MOVWstore [i] {s} p x:(ROLWconst [8] w) mem) + // cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3 + // result: (MOVBEWstore [i] {s} p w mem) + for { + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + p := v_0 + x := v_1 + if x.Op != OpAMD64ROLWconst || auxIntToInt8(x.AuxInt) != 8 { + break + } + w := x.Args[0] + mem := v_2 + if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64MOVBEWstore) + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) + v.AddArg3(p, w, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) + // cond: ValAndOff(sc).canAdd32(off) + // result: (MOVWstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem) + for { + sc := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(ValAndOff(sc).canAdd32(off)) { + break + } + v.reset(OpAMD64MOVWstoreconst) + v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) + v.Aux = symToAux(s) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) + // result: (MOVWstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) { + break + } + v.reset(OpAMD64MOVWstoreconst) + v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MULL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MULL x (MOVLconst [c])) + // result: (MULLconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64MULLconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MULLconst [c] (MULLconst [d] x)) + // result: (MULLconst [c * d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MULLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpAMD64MULLconst) + v.AuxInt = int32ToAuxInt(c * d) + v.AddArg(x) + return true + } + // match: (MULLconst [-9] x) + // result: (NEGL (LEAL8 x x)) + for { + if auxIntToInt32(v.AuxInt) != -9 { + break + } + x := v_0 + v.reset(OpAMD64NEGL) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + // match: (MULLconst [-5] x) + // result: (NEGL (LEAL4 x x)) + for { + if auxIntToInt32(v.AuxInt) != -5 { + break + } + x := v_0 + v.reset(OpAMD64NEGL) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + // match: (MULLconst [-3] x) + // result: (NEGL (LEAL2 x x)) + for { + if auxIntToInt32(v.AuxInt) != -3 { + break + } + x := v_0 + v.reset(OpAMD64NEGL) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + // match: (MULLconst [-1] x) + // result: (NEGL x) + for { + if auxIntToInt32(v.AuxInt) != -1 { + break + } + x := v_0 + v.reset(OpAMD64NEGL) + v.AddArg(x) + return true + } + // match: (MULLconst [ 0] _) + // result: (MOVLconst [0]) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (MULLconst [ 1] x) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 1 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (MULLconst [ 3] x) + // result: (LEAL2 x x) + for { + if auxIntToInt32(v.AuxInt) != 3 { + break + } + x := v_0 + v.reset(OpAMD64LEAL2) + v.AddArg2(x, x) + return true + } + // match: (MULLconst [ 5] x) + // result: (LEAL4 x x) + for { + if auxIntToInt32(v.AuxInt) != 5 { + break + } + x := v_0 + v.reset(OpAMD64LEAL4) + v.AddArg2(x, x) + return true + } + // match: (MULLconst [ 7] x) + // result: (LEAL2 x (LEAL2 x x)) + for { + if auxIntToInt32(v.AuxInt) != 7 { + break + } + x := v_0 + v.reset(OpAMD64LEAL2) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) + v0.AddArg2(x, x) + v.AddArg2(x, v0) + return true + } + // match: (MULLconst [ 9] x) + // result: (LEAL8 x x) + for { + if auxIntToInt32(v.AuxInt) != 9 { + break + } + x := v_0 + v.reset(OpAMD64LEAL8) + v.AddArg2(x, x) + return true + } + // match: (MULLconst [11] x) + // result: (LEAL2 x (LEAL4 x x)) + for { + if auxIntToInt32(v.AuxInt) != 11 { + break + } + x := v_0 + v.reset(OpAMD64LEAL2) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) + v0.AddArg2(x, x) + v.AddArg2(x, v0) + return true + } + // match: (MULLconst [13] x) + // result: (LEAL4 x (LEAL2 x x)) + for { + if auxIntToInt32(v.AuxInt) != 13 { + break + } + x := v_0 + v.reset(OpAMD64LEAL4) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) + v0.AddArg2(x, x) + v.AddArg2(x, v0) + return true + } + // match: (MULLconst [19] x) + // result: (LEAL2 x (LEAL8 x x)) + for { + if auxIntToInt32(v.AuxInt) != 19 { + break + } + x := v_0 + v.reset(OpAMD64LEAL2) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) + v0.AddArg2(x, x) + v.AddArg2(x, v0) + return true + } + // match: (MULLconst [21] x) + // result: (LEAL4 x (LEAL4 x x)) + for { + if auxIntToInt32(v.AuxInt) != 21 { + break + } + x := v_0 + v.reset(OpAMD64LEAL4) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) + v0.AddArg2(x, x) + v.AddArg2(x, v0) + return true + } + // match: (MULLconst [25] x) + // result: (LEAL8 x (LEAL2 x x)) + for { + if auxIntToInt32(v.AuxInt) != 25 { + break + } + x := v_0 + v.reset(OpAMD64LEAL8) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) + v0.AddArg2(x, x) + v.AddArg2(x, v0) + return true + } + // match: (MULLconst [27] x) + // result: (LEAL8 (LEAL2 x x) (LEAL2 x x)) + for { + if auxIntToInt32(v.AuxInt) != 27 { + break + } + x := v_0 + v.reset(OpAMD64LEAL8) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) + v0.AddArg2(x, x) + v.AddArg2(v0, v0) + return true + } + // match: (MULLconst [37] x) + // result: (LEAL4 x (LEAL8 x x)) + for { + if auxIntToInt32(v.AuxInt) != 37 { + break + } + x := v_0 + v.reset(OpAMD64LEAL4) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) + v0.AddArg2(x, x) + v.AddArg2(x, v0) + return true + } + // match: (MULLconst [41] x) + // result: (LEAL8 x (LEAL4 x x)) + for { + if auxIntToInt32(v.AuxInt) != 41 { + break + } + x := v_0 + v.reset(OpAMD64LEAL8) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) + v0.AddArg2(x, x) + v.AddArg2(x, v0) + return true + } + // match: (MULLconst [45] x) + // result: (LEAL8 (LEAL4 x x) (LEAL4 x x)) + for { + if auxIntToInt32(v.AuxInt) != 45 { + break + } + x := v_0 + v.reset(OpAMD64LEAL8) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) + v0.AddArg2(x, x) + v.AddArg2(v0, v0) + return true + } + // match: (MULLconst [73] x) + // result: (LEAL8 x (LEAL8 x x)) + for { + if auxIntToInt32(v.AuxInt) != 73 { + break + } + x := v_0 + v.reset(OpAMD64LEAL8) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) + v0.AddArg2(x, x) + v.AddArg2(x, v0) + return true + } + // match: (MULLconst [81] x) + // result: (LEAL8 (LEAL8 x x) (LEAL8 x x)) + for { + if auxIntToInt32(v.AuxInt) != 81 { + break + } + x := v_0 + v.reset(OpAMD64LEAL8) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) + v0.AddArg2(x, x) + v.AddArg2(v0, v0) + return true + } + // match: (MULLconst [c] x) + // cond: isPowerOfTwo64(int64(c)+1) && c >= 15 + // result: (SUBL (SHLLconst [int8(log64(int64(c)+1))] x) x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(isPowerOfTwo64(int64(c)+1) && c >= 15) { + break + } + v.reset(OpAMD64SUBL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) + v0.AuxInt = int8ToAuxInt(int8(log64(int64(c) + 1))) + v0.AddArg(x) + v.AddArg2(v0, x) + return true + } + // match: (MULLconst [c] x) + // cond: isPowerOfTwo32(c-1) && c >= 17 + // result: (LEAL1 (SHLLconst [int8(log32(c-1))] x) x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(isPowerOfTwo32(c-1) && c >= 17) { + break + } + v.reset(OpAMD64LEAL1) + v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) + v0.AuxInt = int8ToAuxInt(int8(log32(c - 1))) + v0.AddArg(x) + v.AddArg2(v0, x) + return true + } + // match: (MULLconst [c] x) + // cond: isPowerOfTwo32(c-2) && c >= 34 + // result: (LEAL2 (SHLLconst [int8(log32(c-2))] x) x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(isPowerOfTwo32(c-2) && c >= 34) { + break + } + v.reset(OpAMD64LEAL2) + v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) + v0.AuxInt = int8ToAuxInt(int8(log32(c - 2))) + v0.AddArg(x) + v.AddArg2(v0, x) + return true + } + // match: (MULLconst [c] x) + // cond: isPowerOfTwo32(c-4) && c >= 68 + // result: (LEAL4 (SHLLconst [int8(log32(c-4))] x) x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(isPowerOfTwo32(c-4) && c >= 68) { + break + } + v.reset(OpAMD64LEAL4) + v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) + v0.AuxInt = int8ToAuxInt(int8(log32(c - 4))) + v0.AddArg(x) + v.AddArg2(v0, x) + return true + } + // match: (MULLconst [c] x) + // cond: isPowerOfTwo32(c-8) && c >= 136 + // result: (LEAL8 (SHLLconst [int8(log32(c-8))] x) x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(isPowerOfTwo32(c-8) && c >= 136) { + break + } + v.reset(OpAMD64LEAL8) + v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) + v0.AuxInt = int8ToAuxInt(int8(log32(c - 8))) + v0.AddArg(x) + v.AddArg2(v0, x) + return true + } + // match: (MULLconst [c] x) + // cond: c%3 == 0 && isPowerOfTwo32(c/3) + // result: (SHLLconst [int8(log32(c/3))] (LEAL2 x x)) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(c%3 == 0 && isPowerOfTwo32(c/3)) { + break + } + v.reset(OpAMD64SHLLconst) + v.AuxInt = int8ToAuxInt(int8(log32(c / 3))) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + // match: (MULLconst [c] x) + // cond: c%5 == 0 && isPowerOfTwo32(c/5) + // result: (SHLLconst [int8(log32(c/5))] (LEAL4 x x)) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(c%5 == 0 && isPowerOfTwo32(c/5)) { + break + } + v.reset(OpAMD64SHLLconst) + v.AuxInt = int8ToAuxInt(int8(log32(c / 5))) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + // match: (MULLconst [c] x) + // cond: c%9 == 0 && isPowerOfTwo32(c/9) + // result: (SHLLconst [int8(log32(c/9))] (LEAL8 x x)) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(c%9 == 0 && isPowerOfTwo32(c/9)) { + break + } + v.reset(OpAMD64SHLLconst) + v.AuxInt = int8ToAuxInt(int8(log32(c / 9))) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + // match: (MULLconst [c] (MOVLconst [d])) + // result: (MOVLconst [c*d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(c * d) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MULQ(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MULQ x (MOVQconst [c])) + // cond: is32Bit(c) + // result: (MULQconst [int32(c)] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c)) { + continue + } + v.reset(OpAMD64MULQconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MULQconst [c] (MULQconst [d] x)) + // cond: is32Bit(int64(c)*int64(d)) + // result: (MULQconst [c * d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MULQconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(int64(c) * int64(d))) { + break + } + v.reset(OpAMD64MULQconst) + v.AuxInt = int32ToAuxInt(c * d) + v.AddArg(x) + return true + } + // match: (MULQconst [-9] x) + // result: (NEGQ (LEAQ8 x x)) + for { + if auxIntToInt32(v.AuxInt) != -9 { + break + } + x := v_0 + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + // match: (MULQconst [-5] x) + // result: (NEGQ (LEAQ4 x x)) + for { + if auxIntToInt32(v.AuxInt) != -5 { + break + } + x := v_0 + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + // match: (MULQconst [-3] x) + // result: (NEGQ (LEAQ2 x x)) + for { + if auxIntToInt32(v.AuxInt) != -3 { + break + } + x := v_0 + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + // match: (MULQconst [-1] x) + // result: (NEGQ x) + for { + if auxIntToInt32(v.AuxInt) != -1 { + break + } + x := v_0 + v.reset(OpAMD64NEGQ) + v.AddArg(x) + return true + } + // match: (MULQconst [ 0] _) + // result: (MOVQconst [0]) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (MULQconst [ 1] x) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 1 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (MULQconst [ 3] x) + // result: (LEAQ2 x x) + for { + if auxIntToInt32(v.AuxInt) != 3 { + break + } + x := v_0 + v.reset(OpAMD64LEAQ2) + v.AddArg2(x, x) + return true + } + // match: (MULQconst [ 5] x) + // result: (LEAQ4 x x) + for { + if auxIntToInt32(v.AuxInt) != 5 { + break + } + x := v_0 + v.reset(OpAMD64LEAQ4) + v.AddArg2(x, x) + return true + } + // match: (MULQconst [ 7] x) + // result: (LEAQ2 x (LEAQ2 x x)) + for { + if auxIntToInt32(v.AuxInt) != 7 { + break + } + x := v_0 + v.reset(OpAMD64LEAQ2) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) + v0.AddArg2(x, x) + v.AddArg2(x, v0) + return true + } + // match: (MULQconst [ 9] x) + // result: (LEAQ8 x x) + for { + if auxIntToInt32(v.AuxInt) != 9 { + break + } + x := v_0 + v.reset(OpAMD64LEAQ8) + v.AddArg2(x, x) + return true + } + // match: (MULQconst [11] x) + // result: (LEAQ2 x (LEAQ4 x x)) + for { + if auxIntToInt32(v.AuxInt) != 11 { + break + } + x := v_0 + v.reset(OpAMD64LEAQ2) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) + v0.AddArg2(x, x) + v.AddArg2(x, v0) + return true + } + // match: (MULQconst [13] x) + // result: (LEAQ4 x (LEAQ2 x x)) + for { + if auxIntToInt32(v.AuxInt) != 13 { + break + } + x := v_0 + v.reset(OpAMD64LEAQ4) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) + v0.AddArg2(x, x) + v.AddArg2(x, v0) + return true + } + // match: (MULQconst [19] x) + // result: (LEAQ2 x (LEAQ8 x x)) + for { + if auxIntToInt32(v.AuxInt) != 19 { + break + } + x := v_0 + v.reset(OpAMD64LEAQ2) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) + v0.AddArg2(x, x) + v.AddArg2(x, v0) + return true + } + // match: (MULQconst [21] x) + // result: (LEAQ4 x (LEAQ4 x x)) + for { + if auxIntToInt32(v.AuxInt) != 21 { + break + } + x := v_0 + v.reset(OpAMD64LEAQ4) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) + v0.AddArg2(x, x) + v.AddArg2(x, v0) + return true + } + // match: (MULQconst [25] x) + // result: (LEAQ8 x (LEAQ2 x x)) + for { + if auxIntToInt32(v.AuxInt) != 25 { + break + } + x := v_0 + v.reset(OpAMD64LEAQ8) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) + v0.AddArg2(x, x) + v.AddArg2(x, v0) + return true + } + // match: (MULQconst [27] x) + // result: (LEAQ8 (LEAQ2 x x) (LEAQ2 x x)) + for { + if auxIntToInt32(v.AuxInt) != 27 { + break + } + x := v_0 + v.reset(OpAMD64LEAQ8) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) + v0.AddArg2(x, x) + v.AddArg2(v0, v0) + return true + } + // match: (MULQconst [37] x) + // result: (LEAQ4 x (LEAQ8 x x)) + for { + if auxIntToInt32(v.AuxInt) != 37 { + break + } + x := v_0 + v.reset(OpAMD64LEAQ4) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) + v0.AddArg2(x, x) + v.AddArg2(x, v0) + return true + } + // match: (MULQconst [41] x) + // result: (LEAQ8 x (LEAQ4 x x)) + for { + if auxIntToInt32(v.AuxInt) != 41 { + break + } + x := v_0 + v.reset(OpAMD64LEAQ8) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) + v0.AddArg2(x, x) + v.AddArg2(x, v0) + return true + } + // match: (MULQconst [45] x) + // result: (LEAQ8 (LEAQ4 x x) (LEAQ4 x x)) + for { + if auxIntToInt32(v.AuxInt) != 45 { + break + } + x := v_0 + v.reset(OpAMD64LEAQ8) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) + v0.AddArg2(x, x) + v.AddArg2(v0, v0) + return true + } + // match: (MULQconst [73] x) + // result: (LEAQ8 x (LEAQ8 x x)) + for { + if auxIntToInt32(v.AuxInt) != 73 { + break + } + x := v_0 + v.reset(OpAMD64LEAQ8) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) + v0.AddArg2(x, x) + v.AddArg2(x, v0) + return true + } + // match: (MULQconst [81] x) + // result: (LEAQ8 (LEAQ8 x x) (LEAQ8 x x)) + for { + if auxIntToInt32(v.AuxInt) != 81 { + break + } + x := v_0 + v.reset(OpAMD64LEAQ8) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) + v0.AddArg2(x, x) + v.AddArg2(v0, v0) + return true + } + // match: (MULQconst [c] x) + // cond: isPowerOfTwo64(int64(c)+1) && c >= 15 + // result: (SUBQ (SHLQconst [int8(log64(int64(c)+1))] x) x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(isPowerOfTwo64(int64(c)+1) && c >= 15) { + break + } + v.reset(OpAMD64SUBQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) + v0.AuxInt = int8ToAuxInt(int8(log64(int64(c) + 1))) + v0.AddArg(x) + v.AddArg2(v0, x) + return true + } + // match: (MULQconst [c] x) + // cond: isPowerOfTwo32(c-1) && c >= 17 + // result: (LEAQ1 (SHLQconst [int8(log32(c-1))] x) x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(isPowerOfTwo32(c-1) && c >= 17) { + break + } + v.reset(OpAMD64LEAQ1) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) + v0.AuxInt = int8ToAuxInt(int8(log32(c - 1))) + v0.AddArg(x) + v.AddArg2(v0, x) + return true + } + // match: (MULQconst [c] x) + // cond: isPowerOfTwo32(c-2) && c >= 34 + // result: (LEAQ2 (SHLQconst [int8(log32(c-2))] x) x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(isPowerOfTwo32(c-2) && c >= 34) { + break + } + v.reset(OpAMD64LEAQ2) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) + v0.AuxInt = int8ToAuxInt(int8(log32(c - 2))) + v0.AddArg(x) + v.AddArg2(v0, x) + return true + } + // match: (MULQconst [c] x) + // cond: isPowerOfTwo32(c-4) && c >= 68 + // result: (LEAQ4 (SHLQconst [int8(log32(c-4))] x) x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(isPowerOfTwo32(c-4) && c >= 68) { + break + } + v.reset(OpAMD64LEAQ4) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) + v0.AuxInt = int8ToAuxInt(int8(log32(c - 4))) + v0.AddArg(x) + v.AddArg2(v0, x) + return true + } + // match: (MULQconst [c] x) + // cond: isPowerOfTwo32(c-8) && c >= 136 + // result: (LEAQ8 (SHLQconst [int8(log32(c-8))] x) x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(isPowerOfTwo32(c-8) && c >= 136) { + break + } + v.reset(OpAMD64LEAQ8) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) + v0.AuxInt = int8ToAuxInt(int8(log32(c - 8))) + v0.AddArg(x) + v.AddArg2(v0, x) + return true + } + // match: (MULQconst [c] x) + // cond: c%3 == 0 && isPowerOfTwo32(c/3) + // result: (SHLQconst [int8(log32(c/3))] (LEAQ2 x x)) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(c%3 == 0 && isPowerOfTwo32(c/3)) { + break + } + v.reset(OpAMD64SHLQconst) + v.AuxInt = int8ToAuxInt(int8(log32(c / 3))) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + // match: (MULQconst [c] x) + // cond: c%5 == 0 && isPowerOfTwo32(c/5) + // result: (SHLQconst [int8(log32(c/5))] (LEAQ4 x x)) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(c%5 == 0 && isPowerOfTwo32(c/5)) { + break + } + v.reset(OpAMD64SHLQconst) + v.AuxInt = int8ToAuxInt(int8(log32(c / 5))) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + // match: (MULQconst [c] x) + // cond: c%9 == 0 && isPowerOfTwo32(c/9) + // result: (SHLQconst [int8(log32(c/9))] (LEAQ8 x x)) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(c%9 == 0 && isPowerOfTwo32(c/9)) { + break + } + v.reset(OpAMD64SHLQconst) + v.AuxInt = int8ToAuxInt(int8(log32(c / 9))) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + // match: (MULQconst [c] (MOVQconst [d])) + // result: (MOVQconst [int64(c)*d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVQconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(int64(c) * d) + return true + } + // match: (MULQconst [c] (NEGQ x)) + // cond: c != -(1<<31) + // result: (MULQconst [-c] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64NEGQ { + break + } + x := v_0.Args[0] + if !(c != -(1 << 31)) { + break + } + v.reset(OpAMD64MULQconst) + v.AuxInt = int32ToAuxInt(-c) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MULSD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (MULSDload x [off] {sym} ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64MOVSDload { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(OpAMD64MULSDload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64MULSDload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MULSDload [off1] {sym} val (ADDQconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MULSDload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64MULSDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (MULSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MULSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MULSDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + // match: (MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) + // result: (MULSD x (MOVQi2f y)) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + ptr := v_1 + if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { + break + } + y := v_2.Args[1] + if ptr != v_2.Args[0] { + break + } + v.reset(OpAMD64MULSD) + v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MULSS(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (MULSSload x [off] {sym} ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64MOVSSload { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(OpAMD64MULSSload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64MULSSload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MULSSload [off1] {sym} val (ADDQconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (MULSSload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64MULSSload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (MULSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MULSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MULSSload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + // match: (MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) + // result: (MULSS x (MOVLi2f y)) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + ptr := v_1 + if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { + break + } + y := v_2.Args[1] + if ptr != v_2.Args[0] { + break + } + v.reset(OpAMD64MULSS) + v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64NEGL(v *Value) bool { + v_0 := v.Args[0] + // match: (NEGL (NEGL x)) + // result: x + for { + if v_0.Op != OpAMD64NEGL { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (NEGL s:(SUBL x y)) + // cond: s.Uses == 1 + // result: (SUBL y x) + for { + s := v_0 + if s.Op != OpAMD64SUBL { + break + } + y := s.Args[1] + x := s.Args[0] + if !(s.Uses == 1) { + break + } + v.reset(OpAMD64SUBL) + v.AddArg2(y, x) + return true + } + // match: (NEGL (MOVLconst [c])) + // result: (MOVLconst [-c]) + for { + if v_0.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(-c) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64NEGQ(v *Value) bool { + v_0 := v.Args[0] + // match: (NEGQ (NEGQ x)) + // result: x + for { + if v_0.Op != OpAMD64NEGQ { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (NEGQ s:(SUBQ x y)) + // cond: s.Uses == 1 + // result: (SUBQ y x) + for { + s := v_0 + if s.Op != OpAMD64SUBQ { + break + } + y := s.Args[1] + x := s.Args[0] + if !(s.Uses == 1) { + break + } + v.reset(OpAMD64SUBQ) + v.AddArg2(y, x) + return true + } + // match: (NEGQ (MOVQconst [c])) + // result: (MOVQconst [-c]) + for { + if v_0.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(-c) + return true + } + // match: (NEGQ (ADDQconst [c] (NEGQ x))) + // cond: c != -(1<<31) + // result: (ADDQconst [-c] x) + for { + if v_0.Op != OpAMD64ADDQconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64NEGQ { + break + } + x := v_0_0.Args[0] + if !(c != -(1 << 31)) { + break + } + v.reset(OpAMD64ADDQconst) + v.AuxInt = int32ToAuxInt(-c) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64NOTL(v *Value) bool { + v_0 := v.Args[0] + // match: (NOTL (MOVLconst [c])) + // result: (MOVLconst [^c]) + for { + if v_0.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(^c) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64NOTQ(v *Value) bool { + v_0 := v.Args[0] + // match: (NOTQ (MOVQconst [c])) + // result: (MOVQconst [^c]) + for { + if v_0.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(^c) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ORL (SHLL (MOVLconst [1]) y) x) + // result: (BTSL x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64SHLL { + continue + } + y := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 { + continue + } + x := v_1 + v.reset(OpAMD64BTSL) + v.AddArg2(x, y) + return true + } + break + } + // match: (ORL x (MOVLconst [c])) + // result: (ORLconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64ORLconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (ORL x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + // match: (ORL x l:(MOVLload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (ORLload x [off] {sym} ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64MOVLload { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(OpAMD64ORLload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ORLconst [c] (ORLconst [d] x)) + // result: (ORLconst [c | d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64ORLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpAMD64ORLconst) + v.AuxInt = int32ToAuxInt(c | d) + v.AddArg(x) + return true + } + // match: (ORLconst [c] x) + // cond: c==0 + // result: x + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(c == 0) { + break + } + v.copyOf(x) + return true + } + // match: (ORLconst [c] _) + // cond: c==-1 + // result: (MOVLconst [-1]) + for { + c := auxIntToInt32(v.AuxInt) + if !(c == -1) { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(-1) + return true + } + // match: (ORLconst [c] (MOVLconst [d])) + // result: (MOVLconst [c|d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(c | d) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ORLconstmodify(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (ORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + mem := v_1 + if !(ValAndOff(valoff1).canAdd32(off2)) { + break + } + v.reset(OpAMD64ORLconstmodify) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(base, mem) + return true + } + // match: (ORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) + // result: (ORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64ORLconstmodify) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ORLload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ORLload [off1] {sym} val (ADDQconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (ORLload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64ORLload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (ORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (ORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64ORLload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + // match: ( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) + // result: ( ORL x (MOVLf2i y)) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + ptr := v_1 + if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { + break + } + y := v_2.Args[1] + if ptr != v_2.Args[0] { + break + } + v.reset(OpAMD64ORL) + v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ORLmodify(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ORLmodify [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (ORLmodify [off1+off2] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64ORLmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (ORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (ORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64ORLmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ORQ (SHLQ (MOVQconst [1]) y) x) + // result: (BTSQ x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64SHLQ { + continue + } + y := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 { + continue + } + x := v_1 + v.reset(OpAMD64BTSQ) + v.AddArg2(x, y) + return true + } + break + } + // match: (ORQ (MOVQconst [c]) x) + // cond: isUint64PowerOfTwo(c) && uint64(c) >= 1<<31 + // result: (BTSQconst [int8(log64(c))] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64MOVQconst { + continue + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + if !(isUint64PowerOfTwo(c) && uint64(c) >= 1<<31) { + continue + } + v.reset(OpAMD64BTSQconst) + v.AuxInt = int8ToAuxInt(int8(log64(c))) + v.AddArg(x) + return true + } + break + } + // match: (ORQ x (MOVQconst [c])) + // cond: is32Bit(c) + // result: (ORQconst [int32(c)] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c)) { + continue + } + v.reset(OpAMD64ORQconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + break + } + // match: (ORQ x (MOVLconst [c])) + // result: (ORQconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64ORQconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (ORQ (SHRQ lo bits) (SHLQ hi (NEGQ bits))) + // result: (SHRDQ lo hi bits) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64SHRQ { + continue + } + bits := v_0.Args[1] + lo := v_0.Args[0] + if v_1.Op != OpAMD64SHLQ { + continue + } + _ = v_1.Args[1] + hi := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] { + continue + } + v.reset(OpAMD64SHRDQ) + v.AddArg3(lo, hi, bits) + return true + } + break + } + // match: (ORQ (SHLQ lo bits) (SHRQ hi (NEGQ bits))) + // result: (SHLDQ lo hi bits) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64SHLQ { + continue + } + bits := v_0.Args[1] + lo := v_0.Args[0] + if v_1.Op != OpAMD64SHRQ { + continue + } + _ = v_1.Args[1] + hi := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] { + continue + } + v.reset(OpAMD64SHLDQ) + v.AddArg3(lo, hi, bits) + return true + } + break + } + // match: (ORQ (SHRXQ lo bits) (SHLXQ hi (NEGQ bits))) + // result: (SHRDQ lo hi bits) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64SHRXQ { + continue + } + bits := v_0.Args[1] + lo := v_0.Args[0] + if v_1.Op != OpAMD64SHLXQ { + continue + } + _ = v_1.Args[1] + hi := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] { + continue + } + v.reset(OpAMD64SHRDQ) + v.AddArg3(lo, hi, bits) + return true + } + break + } + // match: (ORQ (SHLXQ lo bits) (SHRXQ hi (NEGQ bits))) + // result: (SHLDQ lo hi bits) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64SHLXQ { + continue + } + bits := v_0.Args[1] + lo := v_0.Args[0] + if v_1.Op != OpAMD64SHRXQ { + continue + } + _ = v_1.Args[1] + hi := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] { + continue + } + v.reset(OpAMD64SHLDQ) + v.AddArg3(lo, hi, bits) + return true + } + break + } + // match: (ORQ (MOVQconst [c]) (MOVQconst [d])) + // result: (MOVQconst [c|d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64MOVQconst { + continue + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpAMD64MOVQconst { + continue + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(c | d) + return true + } + break + } + // match: (ORQ x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + // match: (ORQ x l:(MOVQload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (ORQload x [off] {sym} ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64MOVQload { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(OpAMD64ORQload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64ORQconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ORQconst [c] (ORQconst [d] x)) + // result: (ORQconst [c | d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64ORQconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpAMD64ORQconst) + v.AuxInt = int32ToAuxInt(c | d) + v.AddArg(x) + return true + } + // match: (ORQconst [0] x) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (ORQconst [-1] _) + // result: (MOVQconst [-1]) + for { + if auxIntToInt32(v.AuxInt) != -1 { + break + } + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(-1) + return true + } + // match: (ORQconst [c] (MOVQconst [d])) + // result: (MOVQconst [int64(c)|d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVQconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(int64(c) | d) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ORQconstmodify(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ORQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (ORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + mem := v_1 + if !(ValAndOff(valoff1).canAdd32(off2)) { + break + } + v.reset(OpAMD64ORQconstmodify) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(base, mem) + return true + } + // match: (ORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) + // result: (ORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64ORQconstmodify) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ORQload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ORQload [off1] {sym} val (ADDQconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (ORQload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64ORQload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (ORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (ORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64ORQload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + // match: ( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) + // result: ( ORQ x (MOVQf2i y)) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + ptr := v_1 + if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { + break + } + y := v_2.Args[1] + if ptr != v_2.Args[0] { + break + } + v.reset(OpAMD64ORQ) + v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ORQmodify(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ORQmodify [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (ORQmodify [off1+off2] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64ORQmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (ORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (ORQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64ORQmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ROLB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ROLB x (NEGQ y)) + // result: (RORB x y) + for { + x := v_0 + if v_1.Op != OpAMD64NEGQ { + break + } + y := v_1.Args[0] + v.reset(OpAMD64RORB) + v.AddArg2(x, y) + return true + } + // match: (ROLB x (NEGL y)) + // result: (RORB x y) + for { + x := v_0 + if v_1.Op != OpAMD64NEGL { + break + } + y := v_1.Args[0] + v.reset(OpAMD64RORB) + v.AddArg2(x, y) + return true + } + // match: (ROLB x (MOVQconst [c])) + // result: (ROLBconst [int8(c&7) ] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64ROLBconst) + v.AuxInt = int8ToAuxInt(int8(c & 7)) + v.AddArg(x) + return true + } + // match: (ROLB x (MOVLconst [c])) + // result: (ROLBconst [int8(c&7) ] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64ROLBconst) + v.AuxInt = int8ToAuxInt(int8(c & 7)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ROLBconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ROLBconst x [0]) + // result: x + for { + if auxIntToInt8(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ROLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ROLL x (NEGQ y)) + // result: (RORL x y) + for { + x := v_0 + if v_1.Op != OpAMD64NEGQ { + break + } + y := v_1.Args[0] + v.reset(OpAMD64RORL) + v.AddArg2(x, y) + return true + } + // match: (ROLL x (NEGL y)) + // result: (RORL x y) + for { + x := v_0 + if v_1.Op != OpAMD64NEGL { + break + } + y := v_1.Args[0] + v.reset(OpAMD64RORL) + v.AddArg2(x, y) + return true + } + // match: (ROLL x (MOVQconst [c])) + // result: (ROLLconst [int8(c&31)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64ROLLconst) + v.AuxInt = int8ToAuxInt(int8(c & 31)) + v.AddArg(x) + return true + } + // match: (ROLL x (MOVLconst [c])) + // result: (ROLLconst [int8(c&31)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64ROLLconst) + v.AuxInt = int8ToAuxInt(int8(c & 31)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ROLLconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ROLLconst x [0]) + // result: x + for { + if auxIntToInt8(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ROLQ(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ROLQ x (NEGQ y)) + // result: (RORQ x y) + for { + x := v_0 + if v_1.Op != OpAMD64NEGQ { + break + } + y := v_1.Args[0] + v.reset(OpAMD64RORQ) + v.AddArg2(x, y) + return true + } + // match: (ROLQ x (NEGL y)) + // result: (RORQ x y) + for { + x := v_0 + if v_1.Op != OpAMD64NEGL { + break + } + y := v_1.Args[0] + v.reset(OpAMD64RORQ) + v.AddArg2(x, y) + return true + } + // match: (ROLQ x (MOVQconst [c])) + // result: (ROLQconst [int8(c&63)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64ROLQconst) + v.AuxInt = int8ToAuxInt(int8(c & 63)) + v.AddArg(x) + return true + } + // match: (ROLQ x (MOVLconst [c])) + // result: (ROLQconst [int8(c&63)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64ROLQconst) + v.AuxInt = int8ToAuxInt(int8(c & 63)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ROLQconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ROLQconst x [0]) + // result: x + for { + if auxIntToInt8(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ROLW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ROLW x (NEGQ y)) + // result: (RORW x y) + for { + x := v_0 + if v_1.Op != OpAMD64NEGQ { + break + } + y := v_1.Args[0] + v.reset(OpAMD64RORW) + v.AddArg2(x, y) + return true + } + // match: (ROLW x (NEGL y)) + // result: (RORW x y) + for { + x := v_0 + if v_1.Op != OpAMD64NEGL { + break + } + y := v_1.Args[0] + v.reset(OpAMD64RORW) + v.AddArg2(x, y) + return true + } + // match: (ROLW x (MOVQconst [c])) + // result: (ROLWconst [int8(c&15)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64ROLWconst) + v.AuxInt = int8ToAuxInt(int8(c & 15)) + v.AddArg(x) + return true + } + // match: (ROLW x (MOVLconst [c])) + // result: (ROLWconst [int8(c&15)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64ROLWconst) + v.AuxInt = int8ToAuxInt(int8(c & 15)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ROLWconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ROLWconst x [0]) + // result: x + for { + if auxIntToInt8(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64RORB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (RORB x (NEGQ y)) + // result: (ROLB x y) + for { + x := v_0 + if v_1.Op != OpAMD64NEGQ { + break + } + y := v_1.Args[0] + v.reset(OpAMD64ROLB) + v.AddArg2(x, y) + return true + } + // match: (RORB x (NEGL y)) + // result: (ROLB x y) + for { + x := v_0 + if v_1.Op != OpAMD64NEGL { + break + } + y := v_1.Args[0] + v.reset(OpAMD64ROLB) + v.AddArg2(x, y) + return true + } + // match: (RORB x (MOVQconst [c])) + // result: (ROLBconst [int8((-c)&7) ] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64ROLBconst) + v.AuxInt = int8ToAuxInt(int8((-c) & 7)) + v.AddArg(x) + return true + } + // match: (RORB x (MOVLconst [c])) + // result: (ROLBconst [int8((-c)&7) ] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64ROLBconst) + v.AuxInt = int8ToAuxInt(int8((-c) & 7)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64RORL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (RORL x (NEGQ y)) + // result: (ROLL x y) + for { + x := v_0 + if v_1.Op != OpAMD64NEGQ { + break + } + y := v_1.Args[0] + v.reset(OpAMD64ROLL) + v.AddArg2(x, y) + return true + } + // match: (RORL x (NEGL y)) + // result: (ROLL x y) + for { + x := v_0 + if v_1.Op != OpAMD64NEGL { + break + } + y := v_1.Args[0] + v.reset(OpAMD64ROLL) + v.AddArg2(x, y) + return true + } + // match: (RORL x (MOVQconst [c])) + // result: (ROLLconst [int8((-c)&31)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64ROLLconst) + v.AuxInt = int8ToAuxInt(int8((-c) & 31)) + v.AddArg(x) + return true + } + // match: (RORL x (MOVLconst [c])) + // result: (ROLLconst [int8((-c)&31)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64ROLLconst) + v.AuxInt = int8ToAuxInt(int8((-c) & 31)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64RORQ(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (RORQ x (NEGQ y)) + // result: (ROLQ x y) + for { + x := v_0 + if v_1.Op != OpAMD64NEGQ { + break + } + y := v_1.Args[0] + v.reset(OpAMD64ROLQ) + v.AddArg2(x, y) + return true + } + // match: (RORQ x (NEGL y)) + // result: (ROLQ x y) + for { + x := v_0 + if v_1.Op != OpAMD64NEGL { + break + } + y := v_1.Args[0] + v.reset(OpAMD64ROLQ) + v.AddArg2(x, y) + return true + } + // match: (RORQ x (MOVQconst [c])) + // result: (ROLQconst [int8((-c)&63)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64ROLQconst) + v.AuxInt = int8ToAuxInt(int8((-c) & 63)) + v.AddArg(x) + return true + } + // match: (RORQ x (MOVLconst [c])) + // result: (ROLQconst [int8((-c)&63)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64ROLQconst) + v.AuxInt = int8ToAuxInt(int8((-c) & 63)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64RORW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (RORW x (NEGQ y)) + // result: (ROLW x y) + for { + x := v_0 + if v_1.Op != OpAMD64NEGQ { + break + } + y := v_1.Args[0] + v.reset(OpAMD64ROLW) + v.AddArg2(x, y) + return true + } + // match: (RORW x (NEGL y)) + // result: (ROLW x y) + for { + x := v_0 + if v_1.Op != OpAMD64NEGL { + break + } + y := v_1.Args[0] + v.reset(OpAMD64ROLW) + v.AddArg2(x, y) + return true + } + // match: (RORW x (MOVQconst [c])) + // result: (ROLWconst [int8((-c)&15)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64ROLWconst) + v.AuxInt = int8ToAuxInt(int8((-c) & 15)) + v.AddArg(x) + return true + } + // match: (RORW x (MOVLconst [c])) + // result: (ROLWconst [int8((-c)&15)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64ROLWconst) + v.AuxInt = int8ToAuxInt(int8((-c) & 15)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SARB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SARB x (MOVQconst [c])) + // result: (SARBconst [int8(min(int64(c)&31,7))] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64SARBconst) + v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7))) + v.AddArg(x) + return true + } + // match: (SARB x (MOVLconst [c])) + // result: (SARBconst [int8(min(int64(c)&31,7))] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64SARBconst) + v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7))) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SARBconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SARBconst x [0]) + // result: x + for { + if auxIntToInt8(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (SARBconst [c] (MOVQconst [d])) + // result: (MOVQconst [int64(int8(d))>>uint64(c)]) + for { + c := auxIntToInt8(v.AuxInt) + if v_0.Op != OpAMD64MOVQconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(int64(int8(d)) >> uint64(c)) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SARL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SARL x (MOVQconst [c])) + // result: (SARLconst [int8(c&31)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64SARLconst) + v.AuxInt = int8ToAuxInt(int8(c & 31)) + v.AddArg(x) + return true + } + // match: (SARL x (MOVLconst [c])) + // result: (SARLconst [int8(c&31)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64SARLconst) + v.AuxInt = int8ToAuxInt(int8(c & 31)) + v.AddArg(x) + return true + } + // match: (SARL x (ADDQconst [c] y)) + // cond: c & 31 == 0 + // result: (SARL x y) + for { + x := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&31 == 0) { + break + } + v.reset(OpAMD64SARL) + v.AddArg2(x, y) + return true + } + // match: (SARL x (NEGQ (ADDQconst [c] y))) + // cond: c & 31 == 0 + // result: (SARL x (NEGQ y)) + for { + x := v_0 + if v_1.Op != OpAMD64NEGQ { + break + } + t := v_1.Type + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64ADDQconst { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + y := v_1_0.Args[0] + if !(c&31 == 0) { + break + } + v.reset(OpAMD64SARL) + v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (SARL x (ANDQconst [c] y)) + // cond: c & 31 == 31 + // result: (SARL x y) + for { + x := v_0 + if v_1.Op != OpAMD64ANDQconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&31 == 31) { + break + } + v.reset(OpAMD64SARL) + v.AddArg2(x, y) + return true + } + // match: (SARL x (NEGQ (ANDQconst [c] y))) + // cond: c & 31 == 31 + // result: (SARL x (NEGQ y)) + for { + x := v_0 + if v_1.Op != OpAMD64NEGQ { + break + } + t := v_1.Type + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64ANDQconst { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + y := v_1_0.Args[0] + if !(c&31 == 31) { + break + } + v.reset(OpAMD64SARL) + v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (SARL x (ADDLconst [c] y)) + // cond: c & 31 == 0 + // result: (SARL x y) + for { + x := v_0 + if v_1.Op != OpAMD64ADDLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&31 == 0) { + break + } + v.reset(OpAMD64SARL) + v.AddArg2(x, y) + return true + } + // match: (SARL x (NEGL (ADDLconst [c] y))) + // cond: c & 31 == 0 + // result: (SARL x (NEGL y)) + for { + x := v_0 + if v_1.Op != OpAMD64NEGL { + break + } + t := v_1.Type + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64ADDLconst { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + y := v_1_0.Args[0] + if !(c&31 == 0) { + break + } + v.reset(OpAMD64SARL) + v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (SARL x (ANDLconst [c] y)) + // cond: c & 31 == 31 + // result: (SARL x y) + for { + x := v_0 + if v_1.Op != OpAMD64ANDLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&31 == 31) { + break + } + v.reset(OpAMD64SARL) + v.AddArg2(x, y) + return true + } + // match: (SARL x (NEGL (ANDLconst [c] y))) + // cond: c & 31 == 31 + // result: (SARL x (NEGL y)) + for { + x := v_0 + if v_1.Op != OpAMD64NEGL { + break + } + t := v_1.Type + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64ANDLconst { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + y := v_1_0.Args[0] + if !(c&31 == 31) { + break + } + v.reset(OpAMD64SARL) + v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (SARL l:(MOVLload [off] {sym} ptr mem) x) + // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) + // result: (SARXLload [off] {sym} ptr x mem) + for { + l := v_0 + if l.Op != OpAMD64MOVLload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + x := v_1 + if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64SARXLload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SARLconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SARLconst x [0]) + // result: x + for { + if auxIntToInt8(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (SARLconst [c] (MOVQconst [d])) + // result: (MOVQconst [int64(int32(d))>>uint64(c)]) + for { + c := auxIntToInt8(v.AuxInt) + if v_0.Op != OpAMD64MOVQconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(int64(int32(d)) >> uint64(c)) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SARQ x (MOVQconst [c])) + // result: (SARQconst [int8(c&63)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64SARQconst) + v.AuxInt = int8ToAuxInt(int8(c & 63)) + v.AddArg(x) + return true + } + // match: (SARQ x (MOVLconst [c])) + // result: (SARQconst [int8(c&63)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64SARQconst) + v.AuxInt = int8ToAuxInt(int8(c & 63)) + v.AddArg(x) + return true + } + // match: (SARQ x (ADDQconst [c] y)) + // cond: c & 63 == 0 + // result: (SARQ x y) + for { + x := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&63 == 0) { + break + } + v.reset(OpAMD64SARQ) + v.AddArg2(x, y) + return true + } + // match: (SARQ x (NEGQ (ADDQconst [c] y))) + // cond: c & 63 == 0 + // result: (SARQ x (NEGQ y)) + for { + x := v_0 + if v_1.Op != OpAMD64NEGQ { + break + } + t := v_1.Type + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64ADDQconst { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + y := v_1_0.Args[0] + if !(c&63 == 0) { + break + } + v.reset(OpAMD64SARQ) + v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (SARQ x (ANDQconst [c] y)) + // cond: c & 63 == 63 + // result: (SARQ x y) + for { + x := v_0 + if v_1.Op != OpAMD64ANDQconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&63 == 63) { + break + } + v.reset(OpAMD64SARQ) + v.AddArg2(x, y) + return true + } + // match: (SARQ x (NEGQ (ANDQconst [c] y))) + // cond: c & 63 == 63 + // result: (SARQ x (NEGQ y)) + for { + x := v_0 + if v_1.Op != OpAMD64NEGQ { + break + } + t := v_1.Type + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64ANDQconst { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + y := v_1_0.Args[0] + if !(c&63 == 63) { + break + } + v.reset(OpAMD64SARQ) + v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (SARQ x (ADDLconst [c] y)) + // cond: c & 63 == 0 + // result: (SARQ x y) + for { + x := v_0 + if v_1.Op != OpAMD64ADDLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&63 == 0) { + break + } + v.reset(OpAMD64SARQ) + v.AddArg2(x, y) + return true + } + // match: (SARQ x (NEGL (ADDLconst [c] y))) + // cond: c & 63 == 0 + // result: (SARQ x (NEGL y)) + for { + x := v_0 + if v_1.Op != OpAMD64NEGL { + break + } + t := v_1.Type + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64ADDLconst { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + y := v_1_0.Args[0] + if !(c&63 == 0) { + break + } + v.reset(OpAMD64SARQ) + v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (SARQ x (ANDLconst [c] y)) + // cond: c & 63 == 63 + // result: (SARQ x y) + for { + x := v_0 + if v_1.Op != OpAMD64ANDLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&63 == 63) { + break + } + v.reset(OpAMD64SARQ) + v.AddArg2(x, y) + return true + } + // match: (SARQ x (NEGL (ANDLconst [c] y))) + // cond: c & 63 == 63 + // result: (SARQ x (NEGL y)) + for { + x := v_0 + if v_1.Op != OpAMD64NEGL { + break + } + t := v_1.Type + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64ANDLconst { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + y := v_1_0.Args[0] + if !(c&63 == 63) { + break + } + v.reset(OpAMD64SARQ) + v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (SARQ l:(MOVQload [off] {sym} ptr mem) x) + // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) + // result: (SARXQload [off] {sym} ptr x mem) + for { + l := v_0 + if l.Op != OpAMD64MOVQload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + x := v_1 + if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64SARXQload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SARQconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SARQconst x [0]) + // result: x + for { + if auxIntToInt8(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (SARQconst [c] (MOVQconst [d])) + // result: (MOVQconst [d>>uint64(c)]) + for { + c := auxIntToInt8(v.AuxInt) + if v_0.Op != OpAMD64MOVQconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(d >> uint64(c)) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SARW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SARW x (MOVQconst [c])) + // result: (SARWconst [int8(min(int64(c)&31,15))] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64SARWconst) + v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15))) + v.AddArg(x) + return true + } + // match: (SARW x (MOVLconst [c])) + // result: (SARWconst [int8(min(int64(c)&31,15))] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64SARWconst) + v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15))) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SARWconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SARWconst x [0]) + // result: x + for { + if auxIntToInt8(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (SARWconst [c] (MOVQconst [d])) + // result: (MOVQconst [int64(int16(d))>>uint64(c)]) + for { + c := auxIntToInt8(v.AuxInt) + if v_0.Op != OpAMD64MOVQconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(int64(int16(d)) >> uint64(c)) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SARXLload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SARXLload [off] {sym} ptr (MOVLconst [c]) mem) + // result: (SARLconst [int8(c&31)] (MOVLload [off] {sym} ptr mem)) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + mem := v_2 + v.reset(OpAMD64SARLconst) + v.AuxInt = int8ToAuxInt(int8(c & 31)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SARXQload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SARXQload [off] {sym} ptr (MOVQconst [c]) mem) + // result: (SARQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem)) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + v.reset(OpAMD64SARQconst) + v.AuxInt = int8ToAuxInt(int8(c & 63)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } + // match: (SARXQload [off] {sym} ptr (MOVLconst [c]) mem) + // result: (SARQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem)) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + mem := v_2 + v.reset(OpAMD64SARQconst) + v.AuxInt = int8ToAuxInt(int8(c & 63)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value) bool { + v_0 := v.Args[0] + // match: (SBBLcarrymask (FlagEQ)) + // result: (MOVLconst [0]) + for { + if v_0.Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SBBLcarrymask (FlagLT_ULT)) + // result: (MOVLconst [-1]) + for { + if v_0.Op != OpAMD64FlagLT_ULT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(-1) + return true + } + // match: (SBBLcarrymask (FlagLT_UGT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != OpAMD64FlagLT_UGT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SBBLcarrymask (FlagGT_ULT)) + // result: (MOVLconst [-1]) + for { + if v_0.Op != OpAMD64FlagGT_ULT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(-1) + return true + } + // match: (SBBLcarrymask (FlagGT_UGT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != OpAMD64FlagGT_UGT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SBBQ(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SBBQ x (MOVQconst [c]) borrow) + // cond: is32Bit(c) + // result: (SBBQconst x [int32(c)] borrow) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + borrow := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpAMD64SBBQconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(x, borrow) + return true + } + // match: (SBBQ x y (FlagEQ)) + // result: (SUBQborrow x y) + for { + x := v_0 + y := v_1 + if v_2.Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64SUBQborrow) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value) bool { + v_0 := v.Args[0] + // match: (SBBQcarrymask (FlagEQ)) + // result: (MOVQconst [0]) + for { + if v_0.Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SBBQcarrymask (FlagLT_ULT)) + // result: (MOVQconst [-1]) + for { + if v_0.Op != OpAMD64FlagLT_ULT { + break + } + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(-1) + return true + } + // match: (SBBQcarrymask (FlagLT_UGT)) + // result: (MOVQconst [0]) + for { + if v_0.Op != OpAMD64FlagLT_UGT { + break + } + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SBBQcarrymask (FlagGT_ULT)) + // result: (MOVQconst [-1]) + for { + if v_0.Op != OpAMD64FlagGT_ULT { + break + } + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(-1) + return true + } + // match: (SBBQcarrymask (FlagGT_UGT)) + // result: (MOVQconst [0]) + for { + if v_0.Op != OpAMD64FlagGT_UGT { + break + } + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SBBQconst(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SBBQconst x [c] (FlagEQ)) + // result: (SUBQconstborrow x [c]) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64SUBQconstborrow) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETA(v *Value) bool { + v_0 := v.Args[0] + // match: (SETA (InvertFlags x)) + // result: (SETB x) + for { + if v_0.Op != OpAMD64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETB) + v.AddArg(x) + return true + } + // match: (SETA (FlagEQ)) + // result: (MOVLconst [0]) + for { + if v_0.Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETA (FlagLT_ULT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != OpAMD64FlagLT_ULT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETA (FlagLT_UGT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != OpAMD64FlagLT_UGT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETA (FlagGT_ULT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != OpAMD64FlagGT_ULT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETA (FlagGT_UGT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != OpAMD64FlagGT_UGT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETAE(v *Value) bool { + v_0 := v.Args[0] + // match: (SETAE (TESTQ x x)) + // result: (ConstBool [true]) + for { + if v_0.Op != OpAMD64TESTQ { + break + } + x := v_0.Args[1] + if x != v_0.Args[0] { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (SETAE (TESTL x x)) + // result: (ConstBool [true]) + for { + if v_0.Op != OpAMD64TESTL { + break + } + x := v_0.Args[1] + if x != v_0.Args[0] { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (SETAE (TESTW x x)) + // result: (ConstBool [true]) + for { + if v_0.Op != OpAMD64TESTW { + break + } + x := v_0.Args[1] + if x != v_0.Args[0] { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (SETAE (TESTB x x)) + // result: (ConstBool [true]) + for { + if v_0.Op != OpAMD64TESTB { + break + } + x := v_0.Args[1] + if x != v_0.Args[0] { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (SETAE (InvertFlags x)) + // result: (SETBE x) + for { + if v_0.Op != OpAMD64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETBE) + v.AddArg(x) + return true + } + // match: (SETAE (FlagEQ)) + // result: (MOVLconst [1]) + for { + if v_0.Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETAE (FlagLT_ULT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != OpAMD64FlagLT_ULT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETAE (FlagLT_UGT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != OpAMD64FlagLT_UGT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETAE (FlagGT_ULT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != OpAMD64FlagGT_ULT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETAE (FlagGT_UGT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != OpAMD64FlagGT_UGT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SETAEstore [off] {sym} ptr (InvertFlags x) mem) + // result: (SETBEstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpAMD64SETBEstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (SETAEstore [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (SETAEstore [off1+off2] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64SETAEstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (SETAEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SETAEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64SETAEstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + // match: (SETAEstore [off] {sym} ptr (FlagEQ) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagEQ { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(1) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETAEstore [off] {sym} ptr (FlagLT_ULT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagLT_ULT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETAEstore [off] {sym} ptr (FlagLT_UGT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagLT_UGT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(1) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETAEstore [off] {sym} ptr (FlagGT_ULT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagGT_ULT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETAEstore [off] {sym} ptr (FlagGT_UGT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagGT_UGT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(1) + v.AddArg3(ptr, v0, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SETAstore [off] {sym} ptr (InvertFlags x) mem) + // result: (SETBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpAMD64SETBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (SETAstore [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (SETAstore [off1+off2] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64SETAstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (SETAstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SETAstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64SETAstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + // match: (SETAstore [off] {sym} ptr (FlagEQ) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagEQ { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETAstore [off] {sym} ptr (FlagLT_ULT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagLT_ULT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETAstore [off] {sym} ptr (FlagLT_UGT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagLT_UGT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(1) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETAstore [off] {sym} ptr (FlagGT_ULT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagGT_ULT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETAstore [off] {sym} ptr (FlagGT_UGT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagGT_UGT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(1) + v.AddArg3(ptr, v0, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETB(v *Value) bool { + v_0 := v.Args[0] + // match: (SETB (TESTQ x x)) + // result: (ConstBool [false]) + for { + if v_0.Op != OpAMD64TESTQ { + break + } + x := v_0.Args[1] + if x != v_0.Args[0] { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + // match: (SETB (TESTL x x)) + // result: (ConstBool [false]) + for { + if v_0.Op != OpAMD64TESTL { + break + } + x := v_0.Args[1] + if x != v_0.Args[0] { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + // match: (SETB (TESTW x x)) + // result: (ConstBool [false]) + for { + if v_0.Op != OpAMD64TESTW { + break + } + x := v_0.Args[1] + if x != v_0.Args[0] { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + // match: (SETB (TESTB x x)) + // result: (ConstBool [false]) + for { + if v_0.Op != OpAMD64TESTB { + break + } + x := v_0.Args[1] + if x != v_0.Args[0] { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + // match: (SETB (BTLconst [0] x)) + // result: (ANDLconst [1] x) + for { + if v_0.Op != OpAMD64BTLconst || auxIntToInt8(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + v.reset(OpAMD64ANDLconst) + v.AuxInt = int32ToAuxInt(1) + v.AddArg(x) + return true + } + // match: (SETB (BTQconst [0] x)) + // result: (ANDQconst [1] x) + for { + if v_0.Op != OpAMD64BTQconst || auxIntToInt8(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + v.reset(OpAMD64ANDQconst) + v.AuxInt = int32ToAuxInt(1) + v.AddArg(x) + return true + } + // match: (SETB (InvertFlags x)) + // result: (SETA x) + for { + if v_0.Op != OpAMD64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETA) + v.AddArg(x) + return true + } + // match: (SETB (FlagEQ)) + // result: (MOVLconst [0]) + for { + if v_0.Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETB (FlagLT_ULT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != OpAMD64FlagLT_ULT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETB (FlagLT_UGT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != OpAMD64FlagLT_UGT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETB (FlagGT_ULT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != OpAMD64FlagGT_ULT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETB (FlagGT_UGT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != OpAMD64FlagGT_UGT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETBE(v *Value) bool { + v_0 := v.Args[0] + // match: (SETBE (InvertFlags x)) + // result: (SETAE x) + for { + if v_0.Op != OpAMD64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETAE) + v.AddArg(x) + return true + } + // match: (SETBE (FlagEQ)) + // result: (MOVLconst [1]) + for { + if v_0.Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETBE (FlagLT_ULT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != OpAMD64FlagLT_ULT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETBE (FlagLT_UGT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != OpAMD64FlagLT_UGT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETBE (FlagGT_ULT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != OpAMD64FlagGT_ULT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETBE (FlagGT_UGT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != OpAMD64FlagGT_UGT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SETBEstore [off] {sym} ptr (InvertFlags x) mem) + // result: (SETAEstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpAMD64SETAEstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (SETBEstore [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (SETBEstore [off1+off2] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64SETBEstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (SETBEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SETBEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64SETBEstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + // match: (SETBEstore [off] {sym} ptr (FlagEQ) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagEQ { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(1) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETBEstore [off] {sym} ptr (FlagLT_ULT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagLT_ULT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(1) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETBEstore [off] {sym} ptr (FlagLT_UGT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagLT_UGT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETBEstore [off] {sym} ptr (FlagGT_ULT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagGT_ULT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(1) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETBEstore [off] {sym} ptr (FlagGT_UGT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagGT_UGT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SETBstore [off] {sym} ptr (InvertFlags x) mem) + // result: (SETAstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpAMD64SETAstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (SETBstore [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (SETBstore [off1+off2] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64SETBstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (SETBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SETBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64SETBstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + // match: (SETBstore [off] {sym} ptr (FlagEQ) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagEQ { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETBstore [off] {sym} ptr (FlagLT_ULT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagLT_ULT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(1) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETBstore [off] {sym} ptr (FlagLT_UGT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagLT_UGT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETBstore [off] {sym} ptr (FlagGT_ULT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagGT_ULT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(1) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETBstore [off] {sym} ptr (FlagGT_UGT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagGT_UGT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y)) + // result: (SETAE (BTL x y)) + for { + if v_0.Op != OpAMD64TESTL { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpAMD64SHLL { + continue + } + x := v_0_0.Args[1] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 { + continue + } + y := v_0_1 + v.reset(OpAMD64SETAE) + v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) + // result: (SETAE (BTQ x y)) + for { + if v_0.Op != OpAMD64TESTQ { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpAMD64SHLQ { + continue + } + x := v_0_0.Args[1] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { + continue + } + y := v_0_1 + v.reset(OpAMD64SETAE) + v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (SETEQ (TESTLconst [c] x)) + // cond: isUint32PowerOfTwo(int64(c)) + // result: (SETAE (BTLconst [int8(log32(c))] x)) + for { + if v_0.Op != OpAMD64TESTLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(isUint32PowerOfTwo(int64(c))) { + break + } + v.reset(OpAMD64SETAE) + v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(int8(log32(c))) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (SETEQ (TESTQconst [c] x)) + // cond: isUint64PowerOfTwo(int64(c)) + // result: (SETAE (BTQconst [int8(log32(c))] x)) + for { + if v_0.Op != OpAMD64TESTQconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(isUint64PowerOfTwo(int64(c))) { + break + } + v.reset(OpAMD64SETAE) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(int8(log32(c))) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (SETEQ (TESTQ (MOVQconst [c]) x)) + // cond: isUint64PowerOfTwo(c) + // result: (SETAE (BTQconst [int8(log64(c))] x)) + for { + if v_0.Op != OpAMD64TESTQ { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpAMD64MOVQconst { + continue + } + c := auxIntToInt64(v_0_0.AuxInt) + x := v_0_1 + if !(isUint64PowerOfTwo(c)) { + continue + } + v.reset(OpAMD64SETAE) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(int8(log64(c))) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (SETEQ (CMPLconst [1] s:(ANDLconst [1] _))) + // result: (SETNE (CMPLconst [0] s)) + for { + if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 { + break + } + s := v_0.Args[0] + if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 { + break + } + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(s) + v.AddArg(v0) + return true + } + // match: (SETEQ (CMPQconst [1] s:(ANDQconst [1] _))) + // result: (SETNE (CMPQconst [0] s)) + for { + if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 { + break + } + s := v_0.Args[0] + if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 { + break + } + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(s) + v.AddArg(v0) + return true + } + // match: (SETEQ (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) + // cond: z1==z2 + // result: (SETAE (BTQconst [63] x)) + for { + if v_0.Op != OpAMD64TESTQ { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 + if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 { + continue + } + x := z1_0.Args[0] + z2 := v_0_1 + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETAE) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(63) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (SETEQ (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) + // cond: z1==z2 + // result: (SETAE (BTQconst [31] x)) + for { + if v_0.Op != OpAMD64TESTL { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 + if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 { + continue + } + x := z1_0.Args[0] + z2 := v_0_1 + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETAE) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(31) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (SETEQ (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) + // cond: z1==z2 + // result: (SETAE (BTQconst [0] x)) + for { + if v_0.Op != OpAMD64TESTQ { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 + if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 { + continue + } + x := z1_0.Args[0] + z2 := v_0_1 + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETAE) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (SETEQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) + // cond: z1==z2 + // result: (SETAE (BTLconst [0] x)) + for { + if v_0.Op != OpAMD64TESTL { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 + if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 { + continue + } + x := z1_0.Args[0] + z2 := v_0_1 + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETAE) + v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (SETEQ (TESTQ z1:(SHRQconst [63] x) z2)) + // cond: z1==z2 + // result: (SETAE (BTQconst [63] x)) + for { + if v_0.Op != OpAMD64TESTQ { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 + if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { + continue + } + x := z1.Args[0] + z2 := v_0_1 + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETAE) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(63) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (SETEQ (TESTL z1:(SHRLconst [31] x) z2)) + // cond: z1==z2 + // result: (SETAE (BTLconst [31] x)) + for { + if v_0.Op != OpAMD64TESTL { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 + if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { + continue + } + x := z1.Args[0] + z2 := v_0_1 + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETAE) + v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(31) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (SETEQ (InvertFlags x)) + // result: (SETEQ x) + for { + if v_0.Op != OpAMD64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETEQ) + v.AddArg(x) + return true + } + // match: (SETEQ (FlagEQ)) + // result: (MOVLconst [1]) + for { + if v_0.Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETEQ (FlagLT_ULT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != OpAMD64FlagLT_ULT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETEQ (FlagLT_UGT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != OpAMD64FlagLT_UGT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETEQ (FlagGT_ULT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != OpAMD64FlagGT_ULT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETEQ (FlagGT_UGT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != OpAMD64FlagGT_UGT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETEQ (TESTQ s:(Select0 blsr:(BLSRQ _)) s)) + // result: (SETEQ (Select1 blsr)) + for { + if v_0.Op != OpAMD64TESTQ { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + s := v_0_0 + if s.Op != OpSelect0 { + continue + } + blsr := s.Args[0] + if blsr.Op != OpAMD64BLSRQ || s != v_0_1 { + continue + } + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v0.AddArg(blsr) + v.AddArg(v0) + return true + } + break + } + // match: (SETEQ (TESTL s:(Select0 blsr:(BLSRL _)) s)) + // result: (SETEQ (Select1 blsr)) + for { + if v_0.Op != OpAMD64TESTL { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + s := v_0_0 + if s.Op != OpSelect0 { + continue + } + blsr := s.Args[0] + if blsr.Op != OpAMD64BLSRL || s != v_0_1 { + continue + } + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v0.AddArg(blsr) + v.AddArg(v0) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SETEQstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) + // result: (SETAEstore [off] {sym} ptr (BTL x y) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64TESTL { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + if v_1_0.Op != OpAMD64SHLL { + continue + } + x := v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 { + continue + } + y := v_1_1 + mem := v_2 + v.reset(OpAMD64SETAEstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg3(ptr, v0, mem) + return true + } + break + } + // match: (SETEQstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) + // result: (SETAEstore [off] {sym} ptr (BTQ x y) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64TESTQ { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + if v_1_0.Op != OpAMD64SHLQ { + continue + } + x := v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 { + continue + } + y := v_1_1 + mem := v_2 + v.reset(OpAMD64SETAEstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg3(ptr, v0, mem) + return true + } + break + } + // match: (SETEQstore [off] {sym} ptr (TESTLconst [c] x) mem) + // cond: isUint32PowerOfTwo(int64(c)) + // result: (SETAEstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64TESTLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + x := v_1.Args[0] + mem := v_2 + if !(isUint32PowerOfTwo(int64(c))) { + break + } + v.reset(OpAMD64SETAEstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(int8(log32(c))) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETEQstore [off] {sym} ptr (TESTQconst [c] x) mem) + // cond: isUint64PowerOfTwo(int64(c)) + // result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64TESTQconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + x := v_1.Args[0] + mem := v_2 + if !(isUint64PowerOfTwo(int64(c))) { + break + } + v.reset(OpAMD64SETAEstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(int8(log32(c))) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETEQstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) + // cond: isUint64PowerOfTwo(c) + // result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64TESTQ { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + if v_1_0.Op != OpAMD64MOVQconst { + continue + } + c := auxIntToInt64(v_1_0.AuxInt) + x := v_1_1 + mem := v_2 + if !(isUint64PowerOfTwo(c)) { + continue + } + v.reset(OpAMD64SETAEstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(int8(log64(c))) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) + return true + } + break + } + // match: (SETEQstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) + // result: (SETNEstore [off] {sym} ptr (CMPLconst [0] s) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 { + break + } + s := v_1.Args[0] + if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 { + break + } + mem := v_2 + v.reset(OpAMD64SETNEstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(s) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETEQstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) + // result: (SETNEstore [off] {sym} ptr (CMPQconst [0] s) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 { + break + } + s := v_1.Args[0] + if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 { + break + } + mem := v_2 + v.reset(OpAMD64SETNEstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(s) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) + // cond: z1==z2 + // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64TESTQ { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + z1 := v_1_0 + if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 { + continue + } + x := z1_0.Args[0] + z2 := v_1_1 + mem := v_2 + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETAEstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(63) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) + return true + } + break + } + // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) + // cond: z1==z2 + // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64TESTL { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + z1 := v_1_0 + if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 { + continue + } + x := z1_0.Args[0] + z2 := v_1_1 + mem := v_2 + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETAEstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(31) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) + return true + } + break + } + // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) + // cond: z1==z2 + // result: (SETAEstore [off] {sym} ptr (BTQconst [0] x) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64TESTQ { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + z1 := v_1_0 + if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 { + continue + } + x := z1_0.Args[0] + z2 := v_1_1 + mem := v_2 + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETAEstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) + return true + } + break + } + // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) + // cond: z1==z2 + // result: (SETAEstore [off] {sym} ptr (BTLconst [0] x) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64TESTL { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + z1 := v_1_0 + if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 { + continue + } + x := z1_0.Args[0] + z2 := v_1_1 + mem := v_2 + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETAEstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) + return true + } + break + } + // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) + // cond: z1==z2 + // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64TESTQ { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + z1 := v_1_0 + if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { + continue + } + x := z1.Args[0] + z2 := v_1_1 + mem := v_2 + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETAEstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(63) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) + return true + } + break + } + // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) + // cond: z1==z2 + // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64TESTL { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + z1 := v_1_0 + if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { + continue + } + x := z1.Args[0] + z2 := v_1_1 + mem := v_2 + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETAEstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(31) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) + return true + } + break + } + // match: (SETEQstore [off] {sym} ptr (InvertFlags x) mem) + // result: (SETEQstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpAMD64SETEQstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (SETEQstore [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (SETEQstore [off1+off2] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64SETEQstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (SETEQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SETEQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64SETEQstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + // match: (SETEQstore [off] {sym} ptr (FlagEQ) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagEQ { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(1) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETEQstore [off] {sym} ptr (FlagLT_ULT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagLT_ULT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETEQstore [off] {sym} ptr (FlagLT_UGT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagLT_UGT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETEQstore [off] {sym} ptr (FlagGT_ULT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagGT_ULT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETEQstore [off] {sym} ptr (FlagGT_UGT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagGT_UGT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETG(v *Value) bool { + v_0 := v.Args[0] + // match: (SETG (InvertFlags x)) + // result: (SETL x) + for { + if v_0.Op != OpAMD64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETL) + v.AddArg(x) + return true + } + // match: (SETG (FlagEQ)) + // result: (MOVLconst [0]) + for { + if v_0.Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETG (FlagLT_ULT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != OpAMD64FlagLT_ULT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETG (FlagLT_UGT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != OpAMD64FlagLT_UGT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETG (FlagGT_ULT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != OpAMD64FlagGT_ULT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETG (FlagGT_UGT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != OpAMD64FlagGT_UGT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETGE(v *Value) bool { + v_0 := v.Args[0] + // match: (SETGE (InvertFlags x)) + // result: (SETLE x) + for { + if v_0.Op != OpAMD64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETLE) + v.AddArg(x) + return true + } + // match: (SETGE (FlagEQ)) + // result: (MOVLconst [1]) + for { + if v_0.Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETGE (FlagLT_ULT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != OpAMD64FlagLT_ULT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETGE (FlagLT_UGT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != OpAMD64FlagLT_UGT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETGE (FlagGT_ULT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != OpAMD64FlagGT_ULT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETGE (FlagGT_UGT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != OpAMD64FlagGT_UGT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SETGEstore [off] {sym} ptr (InvertFlags x) mem) + // result: (SETLEstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpAMD64SETLEstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (SETGEstore [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (SETGEstore [off1+off2] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64SETGEstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (SETGEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SETGEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64SETGEstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + // match: (SETGEstore [off] {sym} ptr (FlagEQ) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagEQ { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(1) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETGEstore [off] {sym} ptr (FlagLT_ULT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagLT_ULT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETGEstore [off] {sym} ptr (FlagLT_UGT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagLT_UGT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETGEstore [off] {sym} ptr (FlagGT_ULT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagGT_ULT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(1) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETGEstore [off] {sym} ptr (FlagGT_UGT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagGT_UGT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(1) + v.AddArg3(ptr, v0, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SETGstore [off] {sym} ptr (InvertFlags x) mem) + // result: (SETLstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpAMD64SETLstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (SETGstore [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (SETGstore [off1+off2] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64SETGstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (SETGstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SETGstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64SETGstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + // match: (SETGstore [off] {sym} ptr (FlagEQ) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagEQ { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETGstore [off] {sym} ptr (FlagLT_ULT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagLT_ULT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETGstore [off] {sym} ptr (FlagLT_UGT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagLT_UGT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETGstore [off] {sym} ptr (FlagGT_ULT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagGT_ULT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(1) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETGstore [off] {sym} ptr (FlagGT_UGT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagGT_UGT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(1) + v.AddArg3(ptr, v0, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETL(v *Value) bool { + v_0 := v.Args[0] + // match: (SETL (InvertFlags x)) + // result: (SETG x) + for { + if v_0.Op != OpAMD64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETG) + v.AddArg(x) + return true + } + // match: (SETL (FlagEQ)) + // result: (MOVLconst [0]) + for { + if v_0.Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETL (FlagLT_ULT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != OpAMD64FlagLT_ULT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETL (FlagLT_UGT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != OpAMD64FlagLT_UGT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETL (FlagGT_ULT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != OpAMD64FlagGT_ULT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETL (FlagGT_UGT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != OpAMD64FlagGT_UGT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETLE(v *Value) bool { + v_0 := v.Args[0] + // match: (SETLE (InvertFlags x)) + // result: (SETGE x) + for { + if v_0.Op != OpAMD64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETGE) + v.AddArg(x) + return true + } + // match: (SETLE (FlagEQ)) + // result: (MOVLconst [1]) + for { + if v_0.Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETLE (FlagLT_ULT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != OpAMD64FlagLT_ULT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETLE (FlagLT_UGT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != OpAMD64FlagLT_UGT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETLE (FlagGT_ULT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != OpAMD64FlagGT_ULT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETLE (FlagGT_UGT)) + // result: (MOVLconst [0]) + for { + if v_0.Op != OpAMD64FlagGT_UGT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SETLEstore [off] {sym} ptr (InvertFlags x) mem) + // result: (SETGEstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpAMD64SETGEstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (SETLEstore [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (SETLEstore [off1+off2] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64SETLEstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (SETLEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SETLEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64SETLEstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + // match: (SETLEstore [off] {sym} ptr (FlagEQ) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagEQ { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(1) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETLEstore [off] {sym} ptr (FlagLT_ULT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagLT_ULT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(1) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETLEstore [off] {sym} ptr (FlagLT_UGT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagLT_UGT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(1) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETLEstore [off] {sym} ptr (FlagGT_ULT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagGT_ULT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETLEstore [off] {sym} ptr (FlagGT_UGT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagGT_UGT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SETLstore [off] {sym} ptr (InvertFlags x) mem) + // result: (SETGstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpAMD64SETGstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (SETLstore [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (SETLstore [off1+off2] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64SETLstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (SETLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SETLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64SETLstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + // match: (SETLstore [off] {sym} ptr (FlagEQ) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagEQ { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETLstore [off] {sym} ptr (FlagLT_ULT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagLT_ULT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(1) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETLstore [off] {sym} ptr (FlagLT_UGT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagLT_UGT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(1) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETLstore [off] {sym} ptr (FlagGT_ULT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagGT_ULT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETLstore [off] {sym} ptr (FlagGT_UGT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagGT_UGT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (SETNE (TESTBconst [1] x)) + // result: (ANDLconst [1] x) + for { + if v_0.Op != OpAMD64TESTBconst || auxIntToInt8(v_0.AuxInt) != 1 { + break + } + x := v_0.Args[0] + v.reset(OpAMD64ANDLconst) + v.AuxInt = int32ToAuxInt(1) + v.AddArg(x) + return true + } + // match: (SETNE (TESTWconst [1] x)) + // result: (ANDLconst [1] x) + for { + if v_0.Op != OpAMD64TESTWconst || auxIntToInt16(v_0.AuxInt) != 1 { + break + } + x := v_0.Args[0] + v.reset(OpAMD64ANDLconst) + v.AuxInt = int32ToAuxInt(1) + v.AddArg(x) + return true + } + // match: (SETNE (TESTL (SHLL (MOVLconst [1]) x) y)) + // result: (SETB (BTL x y)) + for { + if v_0.Op != OpAMD64TESTL { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpAMD64SHLL { + continue + } + x := v_0_0.Args[1] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 { + continue + } + y := v_0_1 + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y)) + // result: (SETB (BTQ x y)) + for { + if v_0.Op != OpAMD64TESTQ { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpAMD64SHLQ { + continue + } + x := v_0_0.Args[1] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { + continue + } + y := v_0_1 + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (SETNE (TESTLconst [c] x)) + // cond: isUint32PowerOfTwo(int64(c)) + // result: (SETB (BTLconst [int8(log32(c))] x)) + for { + if v_0.Op != OpAMD64TESTLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(isUint32PowerOfTwo(int64(c))) { + break + } + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(int8(log32(c))) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (SETNE (TESTQconst [c] x)) + // cond: isUint64PowerOfTwo(int64(c)) + // result: (SETB (BTQconst [int8(log32(c))] x)) + for { + if v_0.Op != OpAMD64TESTQconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(isUint64PowerOfTwo(int64(c))) { + break + } + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(int8(log32(c))) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (SETNE (TESTQ (MOVQconst [c]) x)) + // cond: isUint64PowerOfTwo(c) + // result: (SETB (BTQconst [int8(log64(c))] x)) + for { + if v_0.Op != OpAMD64TESTQ { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpAMD64MOVQconst { + continue + } + c := auxIntToInt64(v_0_0.AuxInt) + x := v_0_1 + if !(isUint64PowerOfTwo(c)) { + continue + } + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(int8(log64(c))) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (SETNE (CMPLconst [1] s:(ANDLconst [1] _))) + // result: (SETEQ (CMPLconst [0] s)) + for { + if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 { + break + } + s := v_0.Args[0] + if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 { + break + } + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(s) + v.AddArg(v0) + return true + } + // match: (SETNE (CMPQconst [1] s:(ANDQconst [1] _))) + // result: (SETEQ (CMPQconst [0] s)) + for { + if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 { + break + } + s := v_0.Args[0] + if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 { + break + } + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(s) + v.AddArg(v0) + return true + } + // match: (SETNE (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) + // cond: z1==z2 + // result: (SETB (BTQconst [63] x)) + for { + if v_0.Op != OpAMD64TESTQ { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 + if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 { + continue + } + x := z1_0.Args[0] + z2 := v_0_1 + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(63) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (SETNE (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) + // cond: z1==z2 + // result: (SETB (BTQconst [31] x)) + for { + if v_0.Op != OpAMD64TESTL { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 + if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 { + continue + } + x := z1_0.Args[0] + z2 := v_0_1 + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(31) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (SETNE (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) + // cond: z1==z2 + // result: (SETB (BTQconst [0] x)) + for { + if v_0.Op != OpAMD64TESTQ { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 + if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 { + continue + } + x := z1_0.Args[0] + z2 := v_0_1 + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (SETNE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) + // cond: z1==z2 + // result: (SETB (BTLconst [0] x)) + for { + if v_0.Op != OpAMD64TESTL { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 + if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 { + continue + } + x := z1_0.Args[0] + z2 := v_0_1 + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (SETNE (TESTQ z1:(SHRQconst [63] x) z2)) + // cond: z1==z2 + // result: (SETB (BTQconst [63] x)) + for { + if v_0.Op != OpAMD64TESTQ { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 + if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { + continue + } + x := z1.Args[0] + z2 := v_0_1 + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(63) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (SETNE (TESTL z1:(SHRLconst [31] x) z2)) + // cond: z1==z2 + // result: (SETB (BTLconst [31] x)) + for { + if v_0.Op != OpAMD64TESTL { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 + if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { + continue + } + x := z1.Args[0] + z2 := v_0_1 + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(31) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (SETNE (InvertFlags x)) + // result: (SETNE x) + for { + if v_0.Op != OpAMD64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETNE) + v.AddArg(x) + return true + } + // match: (SETNE (FlagEQ)) + // result: (MOVLconst [0]) + for { + if v_0.Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SETNE (FlagLT_ULT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != OpAMD64FlagLT_ULT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETNE (FlagLT_UGT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != OpAMD64FlagLT_UGT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETNE (FlagGT_ULT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != OpAMD64FlagGT_ULT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETNE (FlagGT_UGT)) + // result: (MOVLconst [1]) + for { + if v_0.Op != OpAMD64FlagGT_UGT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SETNE (TESTQ s:(Select0 blsr:(BLSRQ _)) s)) + // result: (SETNE (Select1 blsr)) + for { + if v_0.Op != OpAMD64TESTQ { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + s := v_0_0 + if s.Op != OpSelect0 { + continue + } + blsr := s.Args[0] + if blsr.Op != OpAMD64BLSRQ || s != v_0_1 { + continue + } + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v0.AddArg(blsr) + v.AddArg(v0) + return true + } + break + } + // match: (SETNE (TESTL s:(Select0 blsr:(BLSRL _)) s)) + // result: (SETNE (Select1 blsr)) + for { + if v_0.Op != OpAMD64TESTL { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + s := v_0_0 + if s.Op != OpSelect0 { + continue + } + blsr := s.Args[0] + if blsr.Op != OpAMD64BLSRL || s != v_0_1 { + continue + } + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v0.AddArg(blsr) + v.AddArg(v0) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SETNEstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) + // result: (SETBstore [off] {sym} ptr (BTL x y) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64TESTL { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + if v_1_0.Op != OpAMD64SHLL { + continue + } + x := v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 { + continue + } + y := v_1_1 + mem := v_2 + v.reset(OpAMD64SETBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg3(ptr, v0, mem) + return true + } + break + } + // match: (SETNEstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) + // result: (SETBstore [off] {sym} ptr (BTQ x y) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64TESTQ { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + if v_1_0.Op != OpAMD64SHLQ { + continue + } + x := v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 { + continue + } + y := v_1_1 + mem := v_2 + v.reset(OpAMD64SETBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg3(ptr, v0, mem) + return true + } + break + } + // match: (SETNEstore [off] {sym} ptr (TESTLconst [c] x) mem) + // cond: isUint32PowerOfTwo(int64(c)) + // result: (SETBstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64TESTLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + x := v_1.Args[0] + mem := v_2 + if !(isUint32PowerOfTwo(int64(c))) { + break + } + v.reset(OpAMD64SETBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(int8(log32(c))) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETNEstore [off] {sym} ptr (TESTQconst [c] x) mem) + // cond: isUint64PowerOfTwo(int64(c)) + // result: (SETBstore [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64TESTQconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + x := v_1.Args[0] + mem := v_2 + if !(isUint64PowerOfTwo(int64(c))) { + break + } + v.reset(OpAMD64SETBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(int8(log32(c))) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETNEstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) + // cond: isUint64PowerOfTwo(c) + // result: (SETBstore [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64TESTQ { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + if v_1_0.Op != OpAMD64MOVQconst { + continue + } + c := auxIntToInt64(v_1_0.AuxInt) + x := v_1_1 + mem := v_2 + if !(isUint64PowerOfTwo(c)) { + continue + } + v.reset(OpAMD64SETBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(int8(log64(c))) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) + return true + } + break + } + // match: (SETNEstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) + // result: (SETEQstore [off] {sym} ptr (CMPLconst [0] s) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 { + break + } + s := v_1.Args[0] + if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 { + break + } + mem := v_2 + v.reset(OpAMD64SETEQstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(s) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETNEstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) + // result: (SETEQstore [off] {sym} ptr (CMPQconst [0] s) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 { + break + } + s := v_1.Args[0] + if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 { + break + } + mem := v_2 + v.reset(OpAMD64SETEQstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(s) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) + // cond: z1==z2 + // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64TESTQ { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + z1 := v_1_0 + if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 { + continue + } + x := z1_0.Args[0] + z2 := v_1_1 + mem := v_2 + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(63) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) + return true + } + break + } + // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) + // cond: z1==z2 + // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64TESTL { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + z1 := v_1_0 + if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 { + continue + } + x := z1_0.Args[0] + z2 := v_1_1 + mem := v_2 + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(31) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) + return true + } + break + } + // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) + // cond: z1==z2 + // result: (SETBstore [off] {sym} ptr (BTQconst [0] x) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64TESTQ { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + z1 := v_1_0 + if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 { + continue + } + x := z1_0.Args[0] + z2 := v_1_1 + mem := v_2 + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) + return true + } + break + } + // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) + // cond: z1==z2 + // result: (SETBstore [off] {sym} ptr (BTLconst [0] x) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64TESTL { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + z1 := v_1_0 + if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 { + continue + } + x := z1_0.Args[0] + z2 := v_1_1 + mem := v_2 + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) + return true + } + break + } + // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) + // cond: z1==z2 + // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64TESTQ { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + z1 := v_1_0 + if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { + continue + } + x := z1.Args[0] + z2 := v_1_1 + mem := v_2 + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(63) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) + return true + } + break + } + // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) + // cond: z1==z2 + // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64TESTL { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + z1 := v_1_0 + if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { + continue + } + x := z1.Args[0] + z2 := v_1_1 + mem := v_2 + if !(z1 == z2) { + continue + } + v.reset(OpAMD64SETBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(31) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) + return true + } + break + } + // match: (SETNEstore [off] {sym} ptr (InvertFlags x) mem) + // result: (SETNEstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpAMD64SETNEstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (SETNEstore [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (SETNEstore [off1+off2] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64SETNEstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (SETNEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SETNEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64SETNEstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + // match: (SETNEstore [off] {sym} ptr (FlagEQ) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagEQ { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETNEstore [off] {sym} ptr (FlagLT_ULT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagLT_ULT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(1) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETNEstore [off] {sym} ptr (FlagLT_UGT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagLT_UGT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(1) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETNEstore [off] {sym} ptr (FlagGT_ULT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagGT_ULT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(1) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (SETNEstore [off] {sym} ptr (FlagGT_UGT) mem) + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64FlagGT_UGT { + break + } + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) + v0.AuxInt = int32ToAuxInt(1) + v.AddArg3(ptr, v0, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SHLL x (MOVQconst [c])) + // result: (SHLLconst [int8(c&31)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64SHLLconst) + v.AuxInt = int8ToAuxInt(int8(c & 31)) + v.AddArg(x) + return true + } + // match: (SHLL x (MOVLconst [c])) + // result: (SHLLconst [int8(c&31)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64SHLLconst) + v.AuxInt = int8ToAuxInt(int8(c & 31)) + v.AddArg(x) + return true + } + // match: (SHLL x (ADDQconst [c] y)) + // cond: c & 31 == 0 + // result: (SHLL x y) + for { + x := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&31 == 0) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + // match: (SHLL x (NEGQ (ADDQconst [c] y))) + // cond: c & 31 == 0 + // result: (SHLL x (NEGQ y)) + for { + x := v_0 + if v_1.Op != OpAMD64NEGQ { + break + } + t := v_1.Type + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64ADDQconst { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + y := v_1_0.Args[0] + if !(c&31 == 0) { + break + } + v.reset(OpAMD64SHLL) + v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (SHLL x (ANDQconst [c] y)) + // cond: c & 31 == 31 + // result: (SHLL x y) + for { + x := v_0 + if v_1.Op != OpAMD64ANDQconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&31 == 31) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + // match: (SHLL x (NEGQ (ANDQconst [c] y))) + // cond: c & 31 == 31 + // result: (SHLL x (NEGQ y)) + for { + x := v_0 + if v_1.Op != OpAMD64NEGQ { + break + } + t := v_1.Type + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64ANDQconst { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + y := v_1_0.Args[0] + if !(c&31 == 31) { + break + } + v.reset(OpAMD64SHLL) + v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (SHLL x (ADDLconst [c] y)) + // cond: c & 31 == 0 + // result: (SHLL x y) + for { + x := v_0 + if v_1.Op != OpAMD64ADDLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&31 == 0) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + // match: (SHLL x (NEGL (ADDLconst [c] y))) + // cond: c & 31 == 0 + // result: (SHLL x (NEGL y)) + for { + x := v_0 + if v_1.Op != OpAMD64NEGL { + break + } + t := v_1.Type + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64ADDLconst { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + y := v_1_0.Args[0] + if !(c&31 == 0) { + break + } + v.reset(OpAMD64SHLL) + v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (SHLL x (ANDLconst [c] y)) + // cond: c & 31 == 31 + // result: (SHLL x y) + for { + x := v_0 + if v_1.Op != OpAMD64ANDLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&31 == 31) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + // match: (SHLL x (NEGL (ANDLconst [c] y))) + // cond: c & 31 == 31 + // result: (SHLL x (NEGL y)) + for { + x := v_0 + if v_1.Op != OpAMD64NEGL { + break + } + t := v_1.Type + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64ANDLconst { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + y := v_1_0.Args[0] + if !(c&31 == 31) { + break + } + v.reset(OpAMD64SHLL) + v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (SHLL l:(MOVLload [off] {sym} ptr mem) x) + // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) + // result: (SHLXLload [off] {sym} ptr x mem) + for { + l := v_0 + if l.Op != OpAMD64MOVLload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + x := v_1 + if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64SHLXLload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SHLLconst [1] (SHRLconst [1] x)) + // result: (ANDLconst [-2] x) + for { + if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRLconst || auxIntToInt8(v_0.AuxInt) != 1 { + break + } + x := v_0.Args[0] + v.reset(OpAMD64ANDLconst) + v.AuxInt = int32ToAuxInt(-2) + v.AddArg(x) + return true + } + // match: (SHLLconst x [0]) + // result: x + for { + if auxIntToInt8(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (SHLLconst [d] (MOVLconst [c])) + // result: (MOVLconst [c << uint64(d)]) + for { + d := auxIntToInt8(v.AuxInt) + if v_0.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(c << uint64(d)) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SHLQ x (MOVQconst [c])) + // result: (SHLQconst [int8(c&63)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64SHLQconst) + v.AuxInt = int8ToAuxInt(int8(c & 63)) + v.AddArg(x) + return true + } + // match: (SHLQ x (MOVLconst [c])) + // result: (SHLQconst [int8(c&63)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64SHLQconst) + v.AuxInt = int8ToAuxInt(int8(c & 63)) + v.AddArg(x) + return true + } + // match: (SHLQ x (ADDQconst [c] y)) + // cond: c & 63 == 0 + // result: (SHLQ x y) + for { + x := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&63 == 0) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) + return true + } + // match: (SHLQ x (NEGQ (ADDQconst [c] y))) + // cond: c & 63 == 0 + // result: (SHLQ x (NEGQ y)) + for { + x := v_0 + if v_1.Op != OpAMD64NEGQ { + break + } + t := v_1.Type + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64ADDQconst { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + y := v_1_0.Args[0] + if !(c&63 == 0) { + break + } + v.reset(OpAMD64SHLQ) + v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (SHLQ x (ANDQconst [c] y)) + // cond: c & 63 == 63 + // result: (SHLQ x y) + for { + x := v_0 + if v_1.Op != OpAMD64ANDQconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&63 == 63) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) + return true + } + // match: (SHLQ x (NEGQ (ANDQconst [c] y))) + // cond: c & 63 == 63 + // result: (SHLQ x (NEGQ y)) + for { + x := v_0 + if v_1.Op != OpAMD64NEGQ { + break + } + t := v_1.Type + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64ANDQconst { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + y := v_1_0.Args[0] + if !(c&63 == 63) { + break + } + v.reset(OpAMD64SHLQ) + v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (SHLQ x (ADDLconst [c] y)) + // cond: c & 63 == 0 + // result: (SHLQ x y) + for { + x := v_0 + if v_1.Op != OpAMD64ADDLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&63 == 0) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) + return true + } + // match: (SHLQ x (NEGL (ADDLconst [c] y))) + // cond: c & 63 == 0 + // result: (SHLQ x (NEGL y)) + for { + x := v_0 + if v_1.Op != OpAMD64NEGL { + break + } + t := v_1.Type + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64ADDLconst { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + y := v_1_0.Args[0] + if !(c&63 == 0) { + break + } + v.reset(OpAMD64SHLQ) + v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (SHLQ x (ANDLconst [c] y)) + // cond: c & 63 == 63 + // result: (SHLQ x y) + for { + x := v_0 + if v_1.Op != OpAMD64ANDLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&63 == 63) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) + return true + } + // match: (SHLQ x (NEGL (ANDLconst [c] y))) + // cond: c & 63 == 63 + // result: (SHLQ x (NEGL y)) + for { + x := v_0 + if v_1.Op != OpAMD64NEGL { + break + } + t := v_1.Type + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64ANDLconst { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + y := v_1_0.Args[0] + if !(c&63 == 63) { + break + } + v.reset(OpAMD64SHLQ) + v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (SHLQ l:(MOVQload [off] {sym} ptr mem) x) + // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) + // result: (SHLXQload [off] {sym} ptr x mem) + for { + l := v_0 + if l.Op != OpAMD64MOVQload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + x := v_1 + if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64SHLXQload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SHLQconst [1] (SHRQconst [1] x)) + // result: (ANDQconst [-2] x) + for { + if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRQconst || auxIntToInt8(v_0.AuxInt) != 1 { + break + } + x := v_0.Args[0] + v.reset(OpAMD64ANDQconst) + v.AuxInt = int32ToAuxInt(-2) + v.AddArg(x) + return true + } + // match: (SHLQconst x [0]) + // result: x + for { + if auxIntToInt8(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (SHLQconst [d] (MOVQconst [c])) + // result: (MOVQconst [c << uint64(d)]) + for { + d := auxIntToInt8(v.AuxInt) + if v_0.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(c << uint64(d)) + return true + } + // match: (SHLQconst [d] (MOVLconst [c])) + // result: (MOVQconst [int64(c) << uint64(d)]) + for { + d := auxIntToInt8(v.AuxInt) + if v_0.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(int64(c) << uint64(d)) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SHLXLload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SHLXLload [off] {sym} ptr (MOVLconst [c]) mem) + // result: (SHLLconst [int8(c&31)] (MOVLload [off] {sym} ptr mem)) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + mem := v_2 + v.reset(OpAMD64SHLLconst) + v.AuxInt = int8ToAuxInt(int8(c & 31)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SHLXQload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SHLXQload [off] {sym} ptr (MOVQconst [c]) mem) + // result: (SHLQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem)) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + v.reset(OpAMD64SHLQconst) + v.AuxInt = int8ToAuxInt(int8(c & 63)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } + // match: (SHLXQload [off] {sym} ptr (MOVLconst [c]) mem) + // result: (SHLQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem)) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + mem := v_2 + v.reset(OpAMD64SHLQconst) + v.AuxInt = int8ToAuxInt(int8(c & 63)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SHRB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SHRB x (MOVQconst [c])) + // cond: c&31 < 8 + // result: (SHRBconst [int8(c&31)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(c&31 < 8) { + break + } + v.reset(OpAMD64SHRBconst) + v.AuxInt = int8ToAuxInt(int8(c & 31)) + v.AddArg(x) + return true + } + // match: (SHRB x (MOVLconst [c])) + // cond: c&31 < 8 + // result: (SHRBconst [int8(c&31)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(c&31 < 8) { + break + } + v.reset(OpAMD64SHRBconst) + v.AuxInt = int8ToAuxInt(int8(c & 31)) + v.AddArg(x) + return true + } + // match: (SHRB _ (MOVQconst [c])) + // cond: c&31 >= 8 + // result: (MOVLconst [0]) + for { + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(c&31 >= 8) { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SHRB _ (MOVLconst [c])) + // cond: c&31 >= 8 + // result: (MOVLconst [0]) + for { + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(c&31 >= 8) { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SHRBconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SHRBconst x [0]) + // result: x + for { + if auxIntToInt8(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SHRL x (MOVQconst [c])) + // result: (SHRLconst [int8(c&31)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64SHRLconst) + v.AuxInt = int8ToAuxInt(int8(c & 31)) + v.AddArg(x) + return true + } + // match: (SHRL x (MOVLconst [c])) + // result: (SHRLconst [int8(c&31)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64SHRLconst) + v.AuxInt = int8ToAuxInt(int8(c & 31)) + v.AddArg(x) + return true + } + // match: (SHRL x (ADDQconst [c] y)) + // cond: c & 31 == 0 + // result: (SHRL x y) + for { + x := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&31 == 0) { + break + } + v.reset(OpAMD64SHRL) + v.AddArg2(x, y) + return true + } + // match: (SHRL x (NEGQ (ADDQconst [c] y))) + // cond: c & 31 == 0 + // result: (SHRL x (NEGQ y)) + for { + x := v_0 + if v_1.Op != OpAMD64NEGQ { + break + } + t := v_1.Type + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64ADDQconst { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + y := v_1_0.Args[0] + if !(c&31 == 0) { + break + } + v.reset(OpAMD64SHRL) + v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (SHRL x (ANDQconst [c] y)) + // cond: c & 31 == 31 + // result: (SHRL x y) + for { + x := v_0 + if v_1.Op != OpAMD64ANDQconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&31 == 31) { + break + } + v.reset(OpAMD64SHRL) + v.AddArg2(x, y) + return true + } + // match: (SHRL x (NEGQ (ANDQconst [c] y))) + // cond: c & 31 == 31 + // result: (SHRL x (NEGQ y)) + for { + x := v_0 + if v_1.Op != OpAMD64NEGQ { + break + } + t := v_1.Type + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64ANDQconst { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + y := v_1_0.Args[0] + if !(c&31 == 31) { + break + } + v.reset(OpAMD64SHRL) + v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (SHRL x (ADDLconst [c] y)) + // cond: c & 31 == 0 + // result: (SHRL x y) + for { + x := v_0 + if v_1.Op != OpAMD64ADDLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&31 == 0) { + break + } + v.reset(OpAMD64SHRL) + v.AddArg2(x, y) + return true + } + // match: (SHRL x (NEGL (ADDLconst [c] y))) + // cond: c & 31 == 0 + // result: (SHRL x (NEGL y)) + for { + x := v_0 + if v_1.Op != OpAMD64NEGL { + break + } + t := v_1.Type + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64ADDLconst { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + y := v_1_0.Args[0] + if !(c&31 == 0) { + break + } + v.reset(OpAMD64SHRL) + v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (SHRL x (ANDLconst [c] y)) + // cond: c & 31 == 31 + // result: (SHRL x y) + for { + x := v_0 + if v_1.Op != OpAMD64ANDLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&31 == 31) { + break + } + v.reset(OpAMD64SHRL) + v.AddArg2(x, y) + return true + } + // match: (SHRL x (NEGL (ANDLconst [c] y))) + // cond: c & 31 == 31 + // result: (SHRL x (NEGL y)) + for { + x := v_0 + if v_1.Op != OpAMD64NEGL { + break + } + t := v_1.Type + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64ANDLconst { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + y := v_1_0.Args[0] + if !(c&31 == 31) { + break + } + v.reset(OpAMD64SHRL) + v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (SHRL l:(MOVLload [off] {sym} ptr mem) x) + // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) + // result: (SHRXLload [off] {sym} ptr x mem) + for { + l := v_0 + if l.Op != OpAMD64MOVLload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + x := v_1 + if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64SHRXLload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SHRLconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SHRLconst [1] (SHLLconst [1] x)) + // result: (ANDLconst [0x7fffffff] x) + for { + if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 { + break + } + x := v_0.Args[0] + v.reset(OpAMD64ANDLconst) + v.AuxInt = int32ToAuxInt(0x7fffffff) + v.AddArg(x) + return true + } + // match: (SHRLconst x [0]) + // result: x + for { + if auxIntToInt8(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SHRQ x (MOVQconst [c])) + // result: (SHRQconst [int8(c&63)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpAMD64SHRQconst) + v.AuxInt = int8ToAuxInt(int8(c & 63)) + v.AddArg(x) + return true + } + // match: (SHRQ x (MOVLconst [c])) + // result: (SHRQconst [int8(c&63)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64SHRQconst) + v.AuxInt = int8ToAuxInt(int8(c & 63)) + v.AddArg(x) + return true + } + // match: (SHRQ x (ADDQconst [c] y)) + // cond: c & 63 == 0 + // result: (SHRQ x y) + for { + x := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&63 == 0) { + break + } + v.reset(OpAMD64SHRQ) + v.AddArg2(x, y) + return true + } + // match: (SHRQ x (NEGQ (ADDQconst [c] y))) + // cond: c & 63 == 0 + // result: (SHRQ x (NEGQ y)) + for { + x := v_0 + if v_1.Op != OpAMD64NEGQ { + break + } + t := v_1.Type + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64ADDQconst { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + y := v_1_0.Args[0] + if !(c&63 == 0) { + break + } + v.reset(OpAMD64SHRQ) + v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (SHRQ x (ANDQconst [c] y)) + // cond: c & 63 == 63 + // result: (SHRQ x y) + for { + x := v_0 + if v_1.Op != OpAMD64ANDQconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&63 == 63) { + break + } + v.reset(OpAMD64SHRQ) + v.AddArg2(x, y) + return true + } + // match: (SHRQ x (NEGQ (ANDQconst [c] y))) + // cond: c & 63 == 63 + // result: (SHRQ x (NEGQ y)) + for { + x := v_0 + if v_1.Op != OpAMD64NEGQ { + break + } + t := v_1.Type + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64ANDQconst { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + y := v_1_0.Args[0] + if !(c&63 == 63) { + break + } + v.reset(OpAMD64SHRQ) + v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (SHRQ x (ADDLconst [c] y)) + // cond: c & 63 == 0 + // result: (SHRQ x y) + for { + x := v_0 + if v_1.Op != OpAMD64ADDLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&63 == 0) { + break + } + v.reset(OpAMD64SHRQ) + v.AddArg2(x, y) + return true + } + // match: (SHRQ x (NEGL (ADDLconst [c] y))) + // cond: c & 63 == 0 + // result: (SHRQ x (NEGL y)) + for { + x := v_0 + if v_1.Op != OpAMD64NEGL { + break + } + t := v_1.Type + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64ADDLconst { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + y := v_1_0.Args[0] + if !(c&63 == 0) { + break + } + v.reset(OpAMD64SHRQ) + v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (SHRQ x (ANDLconst [c] y)) + // cond: c & 63 == 63 + // result: (SHRQ x y) + for { + x := v_0 + if v_1.Op != OpAMD64ANDLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&63 == 63) { + break + } + v.reset(OpAMD64SHRQ) + v.AddArg2(x, y) + return true + } + // match: (SHRQ x (NEGL (ANDLconst [c] y))) + // cond: c & 63 == 63 + // result: (SHRQ x (NEGL y)) + for { + x := v_0 + if v_1.Op != OpAMD64NEGL { + break + } + t := v_1.Type + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64ANDLconst { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + y := v_1_0.Args[0] + if !(c&63 == 63) { + break + } + v.reset(OpAMD64SHRQ) + v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (SHRQ l:(MOVQload [off] {sym} ptr mem) x) + // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) + // result: (SHRXQload [off] {sym} ptr x mem) + for { + l := v_0 + if l.Op != OpAMD64MOVQload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + x := v_1 + if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(OpAMD64SHRXQload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SHRQconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SHRQconst [1] (SHLQconst [1] x)) + // result: (BTRQconst [63] x) + for { + if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 { + break + } + x := v_0.Args[0] + v.reset(OpAMD64BTRQconst) + v.AuxInt = int8ToAuxInt(63) + v.AddArg(x) + return true + } + // match: (SHRQconst x [0]) + // result: x + for { + if auxIntToInt8(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SHRW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SHRW x (MOVQconst [c])) + // cond: c&31 < 16 + // result: (SHRWconst [int8(c&31)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(c&31 < 16) { + break + } + v.reset(OpAMD64SHRWconst) + v.AuxInt = int8ToAuxInt(int8(c & 31)) + v.AddArg(x) + return true + } + // match: (SHRW x (MOVLconst [c])) + // cond: c&31 < 16 + // result: (SHRWconst [int8(c&31)] x) + for { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(c&31 < 16) { + break + } + v.reset(OpAMD64SHRWconst) + v.AuxInt = int8ToAuxInt(int8(c & 31)) + v.AddArg(x) + return true + } + // match: (SHRW _ (MOVQconst [c])) + // cond: c&31 >= 16 + // result: (MOVLconst [0]) + for { + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(c&31 >= 16) { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SHRW _ (MOVLconst [c])) + // cond: c&31 >= 16 + // result: (MOVLconst [0]) + for { + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(c&31 >= 16) { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SHRWconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SHRWconst x [0]) + // result: x + for { + if auxIntToInt8(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SHRXLload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SHRXLload [off] {sym} ptr (MOVLconst [c]) mem) + // result: (SHRLconst [int8(c&31)] (MOVLload [off] {sym} ptr mem)) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + mem := v_2 + v.reset(OpAMD64SHRLconst) + v.AuxInt = int8ToAuxInt(int8(c & 31)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SHRXQload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SHRXQload [off] {sym} ptr (MOVQconst [c]) mem) + // result: (SHRQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem)) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + v.reset(OpAMD64SHRQconst) + v.AuxInt = int8ToAuxInt(int8(c & 63)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } + // match: (SHRXQload [off] {sym} ptr (MOVLconst [c]) mem) + // result: (SHRQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem)) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + mem := v_2 + v.reset(OpAMD64SHRQconst) + v.AuxInt = int8ToAuxInt(int8(c & 63)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SUBL x (MOVLconst [c])) + // result: (SUBLconst x [c]) + for { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64SUBLconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (SUBL (MOVLconst [c]) x) + // result: (NEGL (SUBLconst x [c])) + for { + if v_0.Op != OpAMD64MOVLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpAMD64NEGL) + v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (SUBL x x) + // result: (MOVLconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SUBL x l:(MOVLload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (SUBLload x [off] {sym} ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64MOVLload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + break + } + v.reset(OpAMD64SUBLload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SUBLconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SUBLconst [c] x) + // cond: c==0 + // result: x + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(c == 0) { + break + } + v.copyOf(x) + return true + } + // match: (SUBLconst [c] x) + // result: (ADDLconst [-c] x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + v.reset(OpAMD64ADDLconst) + v.AuxInt = int32ToAuxInt(-c) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpAMD64SUBLload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SUBLload [off1] {sym} val (ADDQconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (SUBLload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64SUBLload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (SUBLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SUBLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64SUBLload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + // match: (SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) + // result: (SUBL x (MOVLf2i y)) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + ptr := v_1 + if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { + break + } + y := v_2.Args[1] + if ptr != v_2.Args[0] { + break + } + v.reset(OpAMD64SUBL) + v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SUBLmodify(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SUBLmodify [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (SUBLmodify [off1+off2] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64SUBLmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (SUBLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SUBLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64SUBLmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SUBQ(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SUBQ x (MOVQconst [c])) + // cond: is32Bit(c) + // result: (SUBQconst x [int32(c)]) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c)) { + break + } + v.reset(OpAMD64SUBQconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + // match: (SUBQ (MOVQconst [c]) x) + // cond: is32Bit(c) + // result: (NEGQ (SUBQconst x [int32(c)])) + for { + if v_0.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + if !(is32Bit(c)) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (SUBQ x x) + // result: (MOVQconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SUBQ x l:(MOVQload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (SUBQload x [off] {sym} ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64MOVQload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + break + } + v.reset(OpAMD64SUBQload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SUBQborrow(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SUBQborrow x (MOVQconst [c])) + // cond: is32Bit(c) + // result: (SUBQconstborrow x [int32(c)]) + for { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c)) { + break + } + v.reset(OpAMD64SUBQconstborrow) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SUBQconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SUBQconst [0] x) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (SUBQconst [c] x) + // cond: c != -(1<<31) + // result: (ADDQconst [-c] x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(c != -(1 << 31)) { + break + } + v.reset(OpAMD64ADDQconst) + v.AuxInt = int32ToAuxInt(-c) + v.AddArg(x) + return true + } + // match: (SUBQconst (MOVQconst [d]) [c]) + // result: (MOVQconst [d-int64(c)]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVQconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(d - int64(c)) + return true + } + // match: (SUBQconst (SUBQconst x [d]) [c]) + // cond: is32Bit(int64(-c)-int64(d)) + // result: (ADDQconst [-c-d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64SUBQconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(int64(-c) - int64(d))) { + break + } + v.reset(OpAMD64ADDQconst) + v.AuxInt = int32ToAuxInt(-c - d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SUBQload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SUBQload [off1] {sym} val (ADDQconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (SUBQload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64SUBQload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (SUBQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SUBQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64SUBQload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + // match: (SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) + // result: (SUBQ x (MOVQf2i y)) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + ptr := v_1 + if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { + break + } + y := v_2.Args[1] + if ptr != v_2.Args[0] { + break + } + v.reset(OpAMD64SUBQ) + v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SUBQmodify(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SUBQmodify [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (SUBQmodify [off1+off2] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64SUBQmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (SUBQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SUBQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64SUBQmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SUBSD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (SUBSDload x [off] {sym} ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64MOVSDload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + break + } + v.reset(OpAMD64SUBSDload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SUBSDload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SUBSDload [off1] {sym} val (ADDQconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (SUBSDload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64SUBSDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (SUBSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SUBSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64SUBSDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + // match: (SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) + // result: (SUBSD x (MOVQi2f y)) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + ptr := v_1 + if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { + break + } + y := v_2.Args[1] + if ptr != v_2.Args[0] { + break + } + v.reset(OpAMD64SUBSD) + v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SUBSS(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (SUBSSload x [off] {sym} ptr mem) + for { + x := v_0 + l := v_1 + if l.Op != OpAMD64MOVSSload { + break + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + break + } + v.reset(OpAMD64SUBSSload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SUBSSload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SUBSSload [off1] {sym} val (ADDQconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (SUBSSload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64SUBSSload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (SUBSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SUBSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64SUBSSload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + // match: (SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) + // result: (SUBSS x (MOVLi2f y)) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + ptr := v_1 + if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { + break + } + y := v_2.Args[1] + if ptr != v_2.Args[0] { + break + } + v.reset(OpAMD64SUBSS) + v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64TESTB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TESTB (MOVLconst [c]) x) + // result: (TESTBconst [int8(c)] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64MOVLconst { + continue + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpAMD64TESTBconst) + v.AuxInt = int8ToAuxInt(int8(c)) + v.AddArg(x) + return true + } + break + } + // match: (TESTB l:(MOVBload {sym} [off] ptr mem) l2) + // cond: l == l2 && l.Uses == 2 && clobber(l) + // result: @l.Block (CMPBconstload {sym} [makeValAndOff(0, off)] ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + l := v_0 + if l.Op != OpAMD64MOVBload { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + l2 := v_1 + if !(l == l2 && l.Uses == 2 && clobber(l)) { + continue + } + b = l.Block + v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags) + v.copyOf(v0) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off)) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64TESTBconst(v *Value) bool { + v_0 := v.Args[0] + // match: (TESTBconst [-1] x) + // cond: x.Op != OpAMD64MOVLconst + // result: (TESTB x x) + for { + if auxIntToInt8(v.AuxInt) != -1 { + break + } + x := v_0 + if !(x.Op != OpAMD64MOVLconst) { + break + } + v.reset(OpAMD64TESTB) + v.AddArg2(x, x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64TESTL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TESTL (MOVLconst [c]) x) + // result: (TESTLconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64MOVLconst { + continue + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpAMD64TESTLconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (TESTL l:(MOVLload {sym} [off] ptr mem) l2) + // cond: l == l2 && l.Uses == 2 && clobber(l) + // result: @l.Block (CMPLconstload {sym} [makeValAndOff(0, off)] ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + l := v_0 + if l.Op != OpAMD64MOVLload { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + l2 := v_1 + if !(l == l2 && l.Uses == 2 && clobber(l)) { + continue + } + b = l.Block + v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags) + v.copyOf(v0) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off)) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + break + } + // match: (TESTL a:(ANDLload [off] {sym} x ptr mem) a) + // cond: a.Uses == 2 && a.Block == v.Block && clobber(a) + // result: (TESTL (MOVLload [off] {sym} ptr mem) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 + if a.Op != OpAMD64ANDLload { + continue + } + off := auxIntToInt32(a.AuxInt) + sym := auxToSym(a.Aux) + mem := a.Args[2] + x := a.Args[0] + ptr := a.Args[1] + if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) { + continue + } + v.reset(OpAMD64TESTL) + v0 := b.NewValue0(a.Pos, OpAMD64MOVLload, a.Type) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + v.AddArg2(v0, x) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64TESTLconst(v *Value) bool { + v_0 := v.Args[0] + // match: (TESTLconst [c] (MOVLconst [c])) + // cond: c == 0 + // result: (FlagEQ) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c == 0) { + break + } + v.reset(OpAMD64FlagEQ) + return true + } + // match: (TESTLconst [c] (MOVLconst [c])) + // cond: c < 0 + // result: (FlagLT_UGT) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c < 0) { + break + } + v.reset(OpAMD64FlagLT_UGT) + return true + } + // match: (TESTLconst [c] (MOVLconst [c])) + // cond: c > 0 + // result: (FlagGT_UGT) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c > 0) { + break + } + v.reset(OpAMD64FlagGT_UGT) + return true + } + // match: (TESTLconst [-1] x) + // cond: x.Op != OpAMD64MOVLconst + // result: (TESTL x x) + for { + if auxIntToInt32(v.AuxInt) != -1 { + break + } + x := v_0 + if !(x.Op != OpAMD64MOVLconst) { + break + } + v.reset(OpAMD64TESTL) + v.AddArg2(x, x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TESTQ (MOVQconst [c]) x) + // cond: is32Bit(c) + // result: (TESTQconst [int32(c)] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64MOVQconst { + continue + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + if !(is32Bit(c)) { + continue + } + v.reset(OpAMD64TESTQconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + break + } + // match: (TESTQ l:(MOVQload {sym} [off] ptr mem) l2) + // cond: l == l2 && l.Uses == 2 && clobber(l) + // result: @l.Block (CMPQconstload {sym} [makeValAndOff(0, off)] ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + l := v_0 + if l.Op != OpAMD64MOVQload { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + l2 := v_1 + if !(l == l2 && l.Uses == 2 && clobber(l)) { + continue + } + b = l.Block + v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags) + v.copyOf(v0) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off)) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + break + } + // match: (TESTQ a:(ANDQload [off] {sym} x ptr mem) a) + // cond: a.Uses == 2 && a.Block == v.Block && clobber(a) + // result: (TESTQ (MOVQload [off] {sym} ptr mem) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 + if a.Op != OpAMD64ANDQload { + continue + } + off := auxIntToInt32(a.AuxInt) + sym := auxToSym(a.Aux) + mem := a.Args[2] + x := a.Args[0] + ptr := a.Args[1] + if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) { + continue + } + v.reset(OpAMD64TESTQ) + v0 := b.NewValue0(a.Pos, OpAMD64MOVQload, a.Type) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + v.AddArg2(v0, x) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64TESTQconst(v *Value) bool { + v_0 := v.Args[0] + // match: (TESTQconst [c] (MOVQconst [d])) + // cond: int64(c) == d && c == 0 + // result: (FlagEQ) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVQconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + if !(int64(c) == d && c == 0) { + break + } + v.reset(OpAMD64FlagEQ) + return true + } + // match: (TESTQconst [c] (MOVQconst [d])) + // cond: int64(c) == d && c < 0 + // result: (FlagLT_UGT) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVQconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + if !(int64(c) == d && c < 0) { + break + } + v.reset(OpAMD64FlagLT_UGT) + return true + } + // match: (TESTQconst [c] (MOVQconst [d])) + // cond: int64(c) == d && c > 0 + // result: (FlagGT_UGT) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVQconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + if !(int64(c) == d && c > 0) { + break + } + v.reset(OpAMD64FlagGT_UGT) + return true + } + // match: (TESTQconst [-1] x) + // cond: x.Op != OpAMD64MOVQconst + // result: (TESTQ x x) + for { + if auxIntToInt32(v.AuxInt) != -1 { + break + } + x := v_0 + if !(x.Op != OpAMD64MOVQconst) { + break + } + v.reset(OpAMD64TESTQ) + v.AddArg2(x, x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64TESTW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TESTW (MOVLconst [c]) x) + // result: (TESTWconst [int16(c)] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64MOVLconst { + continue + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpAMD64TESTWconst) + v.AuxInt = int16ToAuxInt(int16(c)) + v.AddArg(x) + return true + } + break + } + // match: (TESTW l:(MOVWload {sym} [off] ptr mem) l2) + // cond: l == l2 && l.Uses == 2 && clobber(l) + // result: @l.Block (CMPWconstload {sym} [makeValAndOff(0, off)] ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + l := v_0 + if l.Op != OpAMD64MOVWload { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + l2 := v_1 + if !(l == l2 && l.Uses == 2 && clobber(l)) { + continue + } + b = l.Block + v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags) + v.copyOf(v0) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off)) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool { + v_0 := v.Args[0] + // match: (TESTWconst [-1] x) + // cond: x.Op != OpAMD64MOVLconst + // result: (TESTW x x) + for { + if auxIntToInt16(v.AuxInt) != -1 { + break + } + x := v_0 + if !(x.Op != OpAMD64MOVLconst) { + break + } + v.reset(OpAMD64TESTW) + v.AddArg2(x, x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (XADDLlock [off1+off2] {sym} val ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + ptr := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64XADDLlock) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XADDQlock(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (XADDQlock [off1+off2] {sym} val ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + ptr := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64XADDQlock) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XCHGL(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (XCHGL [off1+off2] {sym} val ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + ptr := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64XCHGL) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, ptr, mem) + return true + } + // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB + // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + ptr := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { + break + } + v.reset(OpAMD64XCHGL) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XCHGQ(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (XCHGQ [off1+off2] {sym} val ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + ptr := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64XCHGQ) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, ptr, mem) + return true + } + // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB + // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + ptr := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { + break + } + v.reset(OpAMD64XCHGQ) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XORL (SHLL (MOVLconst [1]) y) x) + // result: (BTCL x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64SHLL { + continue + } + y := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 { + continue + } + x := v_1 + v.reset(OpAMD64BTCL) + v.AddArg2(x, y) + return true + } + break + } + // match: (XORL x (MOVLconst [c])) + // result: (XORLconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64XORLconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (XORL x x) + // result: (MOVLconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (XORL x l:(MOVLload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (XORLload x [off] {sym} ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64MOVLload { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(OpAMD64XORLload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + // match: (XORL x (ADDLconst [-1] x)) + // cond: buildcfg.GOAMD64 >= 3 + // result: (BLSMSKL x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) { + continue + } + v.reset(OpAMD64BLSMSKL) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool { + v_0 := v.Args[0] + // match: (XORLconst [1] (SETNE x)) + // result: (SETEQ x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETNE { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETEQ) + v.AddArg(x) + return true + } + // match: (XORLconst [1] (SETEQ x)) + // result: (SETNE x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETEQ { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETNE) + v.AddArg(x) + return true + } + // match: (XORLconst [1] (SETL x)) + // result: (SETGE x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETL { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETGE) + v.AddArg(x) + return true + } + // match: (XORLconst [1] (SETGE x)) + // result: (SETL x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETGE { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETL) + v.AddArg(x) + return true + } + // match: (XORLconst [1] (SETLE x)) + // result: (SETG x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETLE { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETG) + v.AddArg(x) + return true + } + // match: (XORLconst [1] (SETG x)) + // result: (SETLE x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETG { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETLE) + v.AddArg(x) + return true + } + // match: (XORLconst [1] (SETB x)) + // result: (SETAE x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETB { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETAE) + v.AddArg(x) + return true + } + // match: (XORLconst [1] (SETAE x)) + // result: (SETB x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETAE { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETB) + v.AddArg(x) + return true + } + // match: (XORLconst [1] (SETBE x)) + // result: (SETA x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETBE { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETA) + v.AddArg(x) + return true + } + // match: (XORLconst [1] (SETA x)) + // result: (SETBE x) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETA { + break + } + x := v_0.Args[0] + v.reset(OpAMD64SETBE) + v.AddArg(x) + return true + } + // match: (XORLconst [c] (XORLconst [d] x)) + // result: (XORLconst [c ^ d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64XORLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpAMD64XORLconst) + v.AuxInt = int32ToAuxInt(c ^ d) + v.AddArg(x) + return true + } + // match: (XORLconst [c] x) + // cond: c==0 + // result: x + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(c == 0) { + break + } + v.copyOf(x) + return true + } + // match: (XORLconst [c] (MOVLconst [d])) + // result: (MOVLconst [c^d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(c ^ d) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XORLconstmodify(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (XORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + mem := v_1 + if !(ValAndOff(valoff1).canAdd32(off2)) { + break + } + v.reset(OpAMD64XORLconstmodify) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(base, mem) + return true + } + // match: (XORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) + // result: (XORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64XORLconstmodify) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XORLload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (XORLload [off1] {sym} val (ADDQconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (XORLload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64XORLload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (XORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (XORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64XORLload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + // match: (XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) + // result: (XORL x (MOVLf2i y)) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + ptr := v_1 + if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { + break + } + y := v_2.Args[1] + if ptr != v_2.Args[0] { + break + } + v.reset(OpAMD64XORL) + v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XORLmodify(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XORLmodify [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (XORLmodify [off1+off2] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64XORLmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (XORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (XORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64XORLmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XORQ (SHLQ (MOVQconst [1]) y) x) + // result: (BTCQ x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64SHLQ { + continue + } + y := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 { + continue + } + x := v_1 + v.reset(OpAMD64BTCQ) + v.AddArg2(x, y) + return true + } + break + } + // match: (XORQ (MOVQconst [c]) x) + // cond: isUint64PowerOfTwo(c) && uint64(c) >= 1<<31 + // result: (BTCQconst [int8(log64(c))] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAMD64MOVQconst { + continue + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + if !(isUint64PowerOfTwo(c) && uint64(c) >= 1<<31) { + continue + } + v.reset(OpAMD64BTCQconst) + v.AuxInt = int8ToAuxInt(int8(log64(c))) + v.AddArg(x) + return true + } + break + } + // match: (XORQ x (MOVQconst [c])) + // cond: is32Bit(c) + // result: (XORQconst [int32(c)] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c)) { + continue + } + v.reset(OpAMD64XORQconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + break + } + // match: (XORQ x x) + // result: (MOVQconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (XORQ x l:(MOVQload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (XORQload x [off] {sym} ptr mem) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + l := v_1 + if l.Op != OpAMD64MOVQload { + continue + } + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) + mem := l.Args[1] + ptr := l.Args[0] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + continue + } + v.reset(OpAMD64XORQload) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + // match: (XORQ x (ADDQconst [-1] x)) + // cond: buildcfg.GOAMD64 >= 3 + // result: (BLSMSKQ x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64ADDQconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) { + continue + } + v.reset(OpAMD64BLSMSKQ) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValueAMD64_OpAMD64XORQconst(v *Value) bool { + v_0 := v.Args[0] + // match: (XORQconst [c] (XORQconst [d] x)) + // result: (XORQconst [c ^ d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64XORQconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpAMD64XORQconst) + v.AuxInt = int32ToAuxInt(c ^ d) + v.AddArg(x) + return true + } + // match: (XORQconst [0] x) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (XORQconst [c] (MOVQconst [d])) + // result: (MOVQconst [int64(c)^d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVQconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(int64(c) ^ d) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XORQconstmodify(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XORQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) + // cond: ValAndOff(valoff1).canAdd32(off2) + // result: (XORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + mem := v_1 + if !(ValAndOff(valoff1).canAdd32(off2)) { + break + } + v.reset(OpAMD64XORQconstmodify) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(base, mem) + return true + } + // match: (XORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) + // result: (XORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) + for { + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64XORQconstmodify) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XORQload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (XORQload [off1] {sym} val (ADDQconst [off2] base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (XORQload [off1+off2] {sym} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64XORQload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(val, base, mem) + return true + } + // match: (XORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (XORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + val := v_0 + if v_1.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + base := v_1.Args[0] + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64XORQload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(val, base, mem) + return true + } + // match: (XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) + // result: (XORQ x (MOVQf2i y)) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + ptr := v_1 + if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { + break + } + y := v_2.Args[1] + if ptr != v_2.Args[0] { + break + } + v.reset(OpAMD64XORQ) + v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XORQmodify [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) + // result: (XORQmodify [off1+off2] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpAMD64XORQmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (XORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (XORQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64XORQmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + return false +} +func rewriteValueAMD64_OpAddr(v *Value) bool { + v_0 := v.Args[0] + // match: (Addr {sym} base) + // result: (LEAQ {sym} base) + for { + sym := auxToSym(v.Aux) + base := v_0 + v.reset(OpAMD64LEAQ) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } +} +func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicAdd32 ptr val mem) + // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64AddTupleFirst32) + v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg2(val, v0) + return true + } +} +func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicAdd64 ptr val mem) + // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64AddTupleFirst64) + v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg2(val, v0) + return true + } +} +func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicAnd32 ptr val mem) + // result: (ANDLlock ptr val mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ANDLlock) + v.AddArg3(ptr, val, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicAnd8 ptr val mem) + // result: (ANDBlock ptr val mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ANDBlock) + v.AddArg3(ptr, val, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicCompareAndSwap32 ptr old new_ mem) + // result: (CMPXCHGLlock ptr old new_ mem) + for { + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 + v.reset(OpAMD64CMPXCHGLlock) + v.AddArg4(ptr, old, new_, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicCompareAndSwap64 ptr old new_ mem) + // result: (CMPXCHGQlock ptr old new_ mem) + for { + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 + v.reset(OpAMD64CMPXCHGQlock) + v.AddArg4(ptr, old, new_, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicExchange32 ptr val mem) + // result: (XCHGL val ptr mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64XCHGL) + v.AddArg3(val, ptr, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicExchange64 ptr val mem) + // result: (XCHGQ val ptr mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64XCHGQ) + v.AddArg3(val, ptr, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicLoad32 ptr mem) + // result: (MOVLatomicload ptr mem) + for { + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVLatomicload) + v.AddArg2(ptr, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicLoad64 ptr mem) + // result: (MOVQatomicload ptr mem) + for { + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVQatomicload) + v.AddArg2(ptr, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicLoad8 ptr mem) + // result: (MOVBatomicload ptr mem) + for { + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVBatomicload) + v.AddArg2(ptr, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicLoadPtr ptr mem) + // result: (MOVQatomicload ptr mem) + for { + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVQatomicload) + v.AddArg2(ptr, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicOr32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicOr32 ptr val mem) + // result: (ORLlock ptr val mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ORLlock) + v.AddArg3(ptr, val, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicOr8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicOr8 ptr val mem) + // result: (ORBlock ptr val mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ORBlock) + v.AddArg3(ptr, val, mem) + return true + } +} +func rewriteValueAMD64_OpAtomicStore32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStore32 ptr val mem) + // result: (Select1 (XCHGL val ptr mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpAtomicStore64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStore64 ptr val mem) + // result: (Select1 (XCHGQ val ptr mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpAtomicStore8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStore8 ptr val mem) + // result: (Select1 (XCHGB val ptr mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStorePtrNoWB ptr val mem) + // result: (Select1 (XCHGQ val ptr mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpBitLen16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen16 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSRL (LEAL1 [1] (MOVWQZX x) (MOVWQZX x))) + for { + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSRL) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, v1) + v.AddArg(v0) + return true + } + // match: (BitLen16 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVWQZX x)))) + for { + t := v.Type + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-32) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, x.Type) + v2.AddArg(x) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpBitLen32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen32 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (Select0 (BSRQ (LEAQ1 [1] (MOVLQZX x) (MOVLQZX x)))) + for { + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64) + v1.AuxInt = int32ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) + v2.AddArg(x) + v1.AddArg2(v2, v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (BitLen32 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-32] (LZCNTL x))) + for { + t := v.Type + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-32) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpBitLen64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen64 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (ADDQconst [1] (CMOVQEQ (Select0 (BSRQ x)) (MOVQconst [-1]) (Select1 (BSRQ x)))) + for { + t := v.Type + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64ADDQconst) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) + v1 := b.NewValue0(v.Pos, OpSelect0, t) + v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v2.AddArg(x) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) + v3.AuxInt = int64ToAuxInt(-1) + v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4.AddArg(v2) + v0.AddArg3(v1, v3, v4) + v.AddArg(v0) + return true + } + // match: (BitLen64 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-64] (LZCNTQ x))) + for { + t := v.Type + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-64) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTQ, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpBitLen8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen8 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSRL (LEAL1 [1] (MOVBQZX x) (MOVBQZX x))) + for { + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSRL) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, v1) + v.AddArg(v0) + return true + } + // match: (BitLen8 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVBQZX x)))) + for { + t := v.Type + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-32) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, x.Type) + v2.AddArg(x) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpBswap16(v *Value) bool { + v_0 := v.Args[0] + // match: (Bswap16 x) + // result: (ROLWconst [8] x) + for { + x := v_0 + v.reset(OpAMD64ROLWconst) + v.AuxInt = int8ToAuxInt(8) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCeil(v *Value) bool { + v_0 := v.Args[0] + // match: (Ceil x) + // result: (ROUNDSD [2] x) + for { + x := v_0 + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(2) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpCondSelect(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CondSelect x y (SETEQ cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQEQ y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETEQ { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQEQ) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETNE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNE y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETNE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQNE) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETL cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQLT y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETL { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQLT) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETG cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGT y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETG { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGT) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETLE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQLE y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETLE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQLE) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETGE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGE y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGE) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETA cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQHI y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETA { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQHI) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETB cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQCS y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETB { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQCS) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETAE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQCC y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETAE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQCC) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETBE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQLS y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETBE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQLS) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETEQF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQEQF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETEQF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQEQF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETNEF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNEF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETNEF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQNEF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETGF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGTF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGTF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETGEF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGEF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGEF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGEF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETEQ cond)) + // cond: is32BitInt(t) + // result: (CMOVLEQ y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETEQ { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLEQ) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETNE cond)) + // cond: is32BitInt(t) + // result: (CMOVLNE y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETNE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNE) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETL cond)) + // cond: is32BitInt(t) + // result: (CMOVLLT y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETL { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLLT) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETG cond)) + // cond: is32BitInt(t) + // result: (CMOVLGT y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETG { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGT) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETLE cond)) + // cond: is32BitInt(t) + // result: (CMOVLLE y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETLE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLLE) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETGE cond)) + // cond: is32BitInt(t) + // result: (CMOVLGE y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGE) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETA cond)) + // cond: is32BitInt(t) + // result: (CMOVLHI y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETA { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLHI) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETB cond)) + // cond: is32BitInt(t) + // result: (CMOVLCS y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETB { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLCS) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETAE cond)) + // cond: is32BitInt(t) + // result: (CMOVLCC y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETAE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLCC) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETBE cond)) + // cond: is32BitInt(t) + // result: (CMOVLLS y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETBE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLLS) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETEQF cond)) + // cond: is32BitInt(t) + // result: (CMOVLEQF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETEQF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLEQF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETNEF cond)) + // cond: is32BitInt(t) + // result: (CMOVLNEF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETNEF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNEF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETGF cond)) + // cond: is32BitInt(t) + // result: (CMOVLGTF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGTF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETGEF cond)) + // cond: is32BitInt(t) + // result: (CMOVLGEF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGEF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGEF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETEQ cond)) + // cond: is16BitInt(t) + // result: (CMOVWEQ y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETEQ { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWEQ) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETNE cond)) + // cond: is16BitInt(t) + // result: (CMOVWNE y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETNE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNE) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETL cond)) + // cond: is16BitInt(t) + // result: (CMOVWLT y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETL { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWLT) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETG cond)) + // cond: is16BitInt(t) + // result: (CMOVWGT y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETG { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGT) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETLE cond)) + // cond: is16BitInt(t) + // result: (CMOVWLE y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETLE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWLE) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETGE cond)) + // cond: is16BitInt(t) + // result: (CMOVWGE y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGE) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETA cond)) + // cond: is16BitInt(t) + // result: (CMOVWHI y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETA { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWHI) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETB cond)) + // cond: is16BitInt(t) + // result: (CMOVWCS y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETB { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWCS) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETAE cond)) + // cond: is16BitInt(t) + // result: (CMOVWCC y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETAE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWCC) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETBE cond)) + // cond: is16BitInt(t) + // result: (CMOVWLS y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETBE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWLS) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETEQF cond)) + // cond: is16BitInt(t) + // result: (CMOVWEQF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETEQF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWEQF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETNEF cond)) + // cond: is16BitInt(t) + // result: (CMOVWNEF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETNEF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNEF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETGF cond)) + // cond: is16BitInt(t) + // result: (CMOVWGTF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGTF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y (SETGEF cond)) + // cond: is16BitInt(t) + // result: (CMOVWGEF y x cond) + for { + t := v.Type + x := v_0 + y := v_1 + if v_2.Op != OpAMD64SETGEF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGEF) + v.AddArg3(y, x, cond) + return true + } + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 1 + // result: (CondSelect x y (MOVBQZX check)) + for { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 1) { + break + } + v.reset(OpCondSelect) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64) + v0.AddArg(check) + v.AddArg3(x, y, v0) + return true + } + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 2 + // result: (CondSelect x y (MOVWQZX check)) + for { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 2) { + break + } + v.reset(OpCondSelect) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64) + v0.AddArg(check) + v.AddArg3(x, y, v0) + return true + } + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 4 + // result: (CondSelect x y (MOVLQZX check)) + for { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 4) { + break + } + v.reset(OpCondSelect) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) + v0.AddArg(check) + v.AddArg3(x, y, v0) + return true + } + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNE y x (CMPQconst [0] check)) + for { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) { + break + } + v.reset(OpAMD64CMOVQNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) + return true + } + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t) + // result: (CMOVLNE y x (CMPQconst [0] check)) + for { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) + return true + } + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t) + // result: (CMOVWNE y x (CMPQconst [0] check)) + for { + t := v.Type + x := v_0 + y := v_1 + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) + return true + } + return false +} +func rewriteValueAMD64_OpConst16(v *Value) bool { + // match: (Const16 [c]) + // result: (MOVLconst [int32(c)]) + for { + c := auxIntToInt16(v.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(int32(c)) + return true + } +} +func rewriteValueAMD64_OpConst8(v *Value) bool { + // match: (Const8 [c]) + // result: (MOVLconst [int32(c)]) + for { + c := auxIntToInt8(v.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(int32(c)) + return true + } +} +func rewriteValueAMD64_OpConstBool(v *Value) bool { + // match: (ConstBool [c]) + // result: (MOVLconst [b2i32(c)]) + for { + c := auxIntToBool(v.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(b2i32(c)) + return true + } +} +func rewriteValueAMD64_OpConstNil(v *Value) bool { + // match: (ConstNil ) + // result: (MOVQconst [0]) + for { + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(0) + return true + } +} +func rewriteValueAMD64_OpCtz16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz16 x) + // result: (BSFL (ORLconst [1<<16] x)) + for { + x := v_0 + v.reset(OpAMD64BSFL) + v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1 << 16) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCtz16NonZero(v *Value) bool { + v_0 := v.Args[0] + // match: (Ctz16NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) + for { + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) + return true + } + // match: (Ctz16NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSFL x) + for { + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSFL) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpCtz32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz32 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) + for { + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) + return true + } + // match: (Ctz32 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (Select0 (BSFQ (BTSQconst [32] x))) + for { + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64) + v1.AuxInt = int8ToAuxInt(32) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpCtz32NonZero(v *Value) bool { + v_0 := v.Args[0] + // match: (Ctz32NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) + for { + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) + return true + } + // match: (Ctz32NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSFL x) + for { + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSFL) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpCtz64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz64 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTQ x) + for { + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTQ) + v.AddArg(x) + return true + } + // match: (Ctz64 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (CMOVQEQ (Select0 (BSFQ x)) (MOVQconst [64]) (Select1 (BSFQ x))) + for { + t := v.Type + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64CMOVQEQ) + v0 := b.NewValue0(v.Pos, OpSelect0, t) + v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v3.AddArg(v1) + v.AddArg3(v0, v2, v3) + return true + } + return false +} +func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz64NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTQ x) + for { + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTQ) + v.AddArg(x) + return true + } + // match: (Ctz64NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (Select0 (BSFQ x)) + for { + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpCtz8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz8 x) + // result: (BSFL (ORLconst [1<<8 ] x)) + for { + x := v_0 + v.reset(OpAMD64BSFL) + v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1 << 8) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool { + v_0 := v.Args[0] + // match: (Ctz8NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) + for { + x := v_0 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) + return true + } + // match: (Ctz8NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSFL x) + for { + x := v_0 + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSFL) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpDiv16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16 [a] x y) + // result: (Select0 (DIVW [a] x y)) + for { + a := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpDiv16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16u x y) + // result: (Select0 (DIVWU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpDiv32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div32 [a] x y) + // result: (Select0 (DIVL [a] x y)) + for { + a := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpDiv32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div32u x y) + // result: (Select0 (DIVLU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpDiv64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div64 [a] x y) + // result: (Select0 (DIVQ [a] x y)) + for { + a := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpDiv64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div64u x y) + // result: (Select0 (DIVQU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpDiv8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8 x y) + // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpDiv8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8u x y) + // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq16 x y) + // result: (SETEQ (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq32 x y) + // result: (SETEQ (CMPL x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq32F x y) + // result: (SETEQF (UCOMISS x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq64 x y) + // result: (SETEQ (CMPQ x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq64F x y) + // result: (SETEQF (UCOMISD x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq8 x y) + // result: (SETEQ (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (EqB x y) + // result: (SETEQ (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpEqPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (EqPtr x y) + // result: (SETEQ (CMPQ x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpFMA(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMA x y z) + // result: (VFMADD231SD z x y) + for { + x := v_0 + y := v_1 + z := v_2 + v.reset(OpAMD64VFMADD231SD) + v.AddArg3(z, x, y) + return true + } +} +func rewriteValueAMD64_OpFloor(v *Value) bool { + v_0 := v.Args[0] + // match: (Floor x) + // result: (ROUNDSD [1] x) + for { + x := v_0 + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpGetG(v *Value) bool { + v_0 := v.Args[0] + // match: (GetG mem) + // cond: v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal + // result: (LoweredGetG mem) + for { + mem := v_0 + if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) { + break + } + v.reset(OpAMD64LoweredGetG) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (HasCPUFeature {s}) + // result: (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s}))) + for { + s := auxToSym(v.Aux) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64) + v1.Aux = symToAux(s) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpIsInBounds(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (IsInBounds idx len) + // result: (SETB (CMPQ idx len)) + for { + idx := v_0 + len := v_1 + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(idx, len) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpIsNonNil(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (IsNonNil p) + // result: (SETNE (TESTQ p p)) + for { + p := v_0 + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) + v0.AddArg2(p, p) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (IsSliceInBounds idx len) + // result: (SETBE (CMPQ idx len)) + for { + idx := v_0 + len := v_1 + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(idx, len) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq16 x y) + // result: (SETLE (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq16U x y) + // result: (SETBE (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq32 x y) + // result: (SETLE (CMPL x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq32F x y) + // result: (SETGEF (UCOMISS y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETGEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq32U x y) + // result: (SETBE (CMPL x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq64 x y) + // result: (SETLE (CMPQ x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq64F x y) + // result: (SETGEF (UCOMISD y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETGEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq64U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq64U x y) + // result: (SETBE (CMPQ x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq8 x y) + // result: (SETLE (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLeq8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq8U x y) + // result: (SETBE (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less16 x y) + // result: (SETL (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less16U x y) + // result: (SETB (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less32 x y) + // result: (SETL (CMPL x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less32F x y) + // result: (SETGF (UCOMISS y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETGF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less32U x y) + // result: (SETB (CMPL x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less64 x y) + // result: (SETL (CMPQ x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less64F x y) + // result: (SETGF (UCOMISD y x)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETGF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess64U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less64U x y) + // result: (SETB (CMPQ x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less8 x y) + // result: (SETL (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less8U x y) + // result: (SETB (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLoad(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Load ptr mem) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (MOVQload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64MOVQload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitInt(t) + // result: (MOVLload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64MOVLload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is16BitInt(t) + // result: (MOVWload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64MOVWload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (t.IsBoolean() || is8BitInt(t)) + // result: (MOVBload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsBoolean() || is8BitInt(t)) { + break + } + v.reset(OpAMD64MOVBload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitFloat(t) + // result: (MOVSSload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitFloat(t)) { + break + } + v.reset(OpAMD64MOVSSload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is64BitFloat(t) + // result: (MOVSDload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitFloat(t)) { + break + } + v.reset(OpAMD64MOVSDload) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueAMD64_OpLocalAddr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LocalAddr {sym} base mem) + // cond: t.Elem().HasPointers() + // result: (LEAQ {sym} (SPanchored base mem)) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + mem := v_1 + if !(t.Elem().HasPointers()) { + break + } + v.reset(OpAMD64LEAQ) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) + v0.AddArg2(base, mem) + v.AddArg(v0) + return true + } + // match: (LocalAddr {sym} base _) + // cond: !t.Elem().HasPointers() + // result: (LEAQ {sym} base) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + if !(!t.Elem().HasPointers()) { + break + } + v.reset(OpAMD64LEAQ) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } + return false +} +func rewriteValueAMD64_OpLsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh16x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh16x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh16x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh16x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh16x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh32x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh32x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh32x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh32x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh32x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh32x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh64x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh64x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPWconst y [64]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh64x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh64x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh64x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPLconst y [64]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh64x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh64x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh64x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPQconst y [64]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh64x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh64x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPBconst y [64]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh64x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh8x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh8x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh8x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh8x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpLsh8x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh8x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpMax32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Max32F x y) + // result: (Neg32F (Min32F (Neg32F x) (Neg32F y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpNeg32F) + v.Type = t + v0 := b.NewValue0(v.Pos, OpMin32F, t) + v1 := b.NewValue0(v.Pos, OpNeg32F, t) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpNeg32F, t) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMax64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Max64F x y) + // result: (Neg64F (Min64F (Neg64F x) (Neg64F y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpNeg64F) + v.Type = t + v0 := b.NewValue0(v.Pos, OpMin64F, t) + v1 := b.NewValue0(v.Pos, OpNeg64F, t) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpNeg64F, t) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMin32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Min32F x y) + // result: (POR (MINSS (MINSS x y) x) (MINSS x y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpAMD64POR) + v0 := b.NewValue0(v.Pos, OpAMD64MINSS, t) + v1 := b.NewValue0(v.Pos, OpAMD64MINSS, t) + v1.AddArg2(x, y) + v0.AddArg2(v1, x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueAMD64_OpMin64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Min64F x y) + // result: (POR (MINSD (MINSD x y) x) (MINSD x y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpAMD64POR) + v0 := b.NewValue0(v.Pos, OpAMD64MINSD, t) + v1 := b.NewValue0(v.Pos, OpAMD64MINSD, t) + v1.AddArg2(x, y) + v0.AddArg2(v1, x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueAMD64_OpMod16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod16 [a] x y) + // result: (Select1 (DIVW [a] x y)) + for { + a := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMod16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod16u x y) + // result: (Select1 (DIVWU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMod32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod32 [a] x y) + // result: (Select1 (DIVL [a] x y)) + for { + a := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMod32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod32u x y) + // result: (Select1 (DIVLU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMod64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod64 [a] x y) + // result: (Select1 (DIVQ [a] x y)) + for { + a := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMod64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod64u x y) + // result: (Select1 (DIVQU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMod8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8 x y) + // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMod8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8u x y) + // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMove(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Move [0] _ _ mem) + // result: mem + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_2 + v.copyOf(mem) + return true + } + // match: (Move [1] dst src mem) + // result: (MOVBstore dst (MOVBload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [2] dst src mem) + // result: (MOVWstore dst (MOVWload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVWstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [4] dst src mem) + // result: (MOVLstore dst (MOVLload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [8] dst src mem) + // result: (MOVQstore dst (MOVQload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [16] dst src mem) + // cond: config.useSSE + // result: (MOVOstore dst (MOVOload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 16 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + if !(config.useSSE) { + break + } + v.reset(OpAMD64MOVOstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [16] dst src mem) + // cond: !config.useSSE + // result: (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 16 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + if !(!config.useSSE) { + break + } + v.reset(OpAMD64MOVQstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [32] dst src mem) + // result: (Move [16] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) + for { + if auxIntToInt64(v.AuxInt) != 32 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(16) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) + v2.AuxInt = int64ToAuxInt(16) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) + return true + } + // match: (Move [48] dst src mem) + // cond: config.useSSE + // result: (Move [32] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) + for { + if auxIntToInt64(v.AuxInt) != 48 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + if !(config.useSSE) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) + v2.AuxInt = int64ToAuxInt(16) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) + return true + } + // match: (Move [64] dst src mem) + // cond: config.useSSE + // result: (Move [32] (OffPtr dst [32]) (OffPtr src [32]) (Move [32] dst src mem)) + for { + if auxIntToInt64(v.AuxInt) != 64 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + if !(config.useSSE) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(32) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(32) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) + v2.AuxInt = int64ToAuxInt(32) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) + return true + } + // match: (Move [3] dst src mem) + // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(2) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [5] dst src mem) + // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 5 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [6] dst src mem) + // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 6 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVWstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [7] dst src mem) + // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 7 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(3) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [9] dst src mem) + // result: (MOVBstore [8] dst (MOVBload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 9 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [10] dst src mem) + // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 10 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVWstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [11] dst src mem) + // result: (MOVLstore [7] dst (MOVLload [7] src mem) (MOVQstore dst (MOVQload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 11 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(7) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(7) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [12] dst src mem) + // result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 12 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [s] dst src mem) + // cond: s >= 13 && s <= 15 + // result: (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) (MOVQstore dst (MOVQload src mem) mem)) + for { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s >= 13 && s <= 15) { + break + } + v.reset(OpAMD64MOVQstore) + v.AuxInt = int32ToAuxInt(int32(s - 8)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(int32(s - 8)) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [s] dst src mem) + // cond: s > 16 && s%16 != 0 && s%16 <= 8 + // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) + for { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 16 && s%16 != 0 && s%16 <= 8) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(s - s%16) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(s % 16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(s % 16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v3.AddArg2(src, mem) + v2.AddArg3(dst, v3, mem) + v.AddArg3(v0, v1, v2) + return true + } + // match: (Move [s] dst src mem) + // cond: s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE + // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) + for { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(s - s%16) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(s % 16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(s % 16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) + v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) + v3.AddArg2(src, mem) + v2.AddArg3(dst, v3, mem) + v.AddArg3(v0, v1, v2) + return true + } + // match: (Move [s] dst src mem) + // cond: s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE + // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))) + for { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(s - s%16) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(s % 16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(s % 16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(8) + v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v3.AuxInt = int32ToAuxInt(8) + v3.AddArg2(src, mem) + v4 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v5 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v5.AddArg2(src, mem) + v4.AddArg3(dst, v5, mem) + v2.AddArg3(dst, v3, v4) + v.AddArg3(v0, v1, v2) + return true + } + // match: (Move [s] dst src mem) + // cond: s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s) + // result: (DUFFCOPY [s] dst src mem) + for { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) { + break + } + v.reset(OpAMD64DUFFCOPY) + v.AuxInt = int64ToAuxInt(s) + v.AddArg3(dst, src, mem) + return true + } + // match: (Move [s] dst src mem) + // cond: (s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s) + // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) + for { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !((s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s)) { + break + } + v.reset(OpAMD64REPMOVSQ) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(s / 8) + v.AddArg4(dst, src, v0, mem) + return true + } + return false +} +func rewriteValueAMD64_OpNeg32F(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neg32F x) + // result: (PXOR x (MOVSSconst [float32(math.Copysign(0, -1))])) + for { + x := v_0 + v.reset(OpAMD64PXOR) + v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) + v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1))) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpNeg64F(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neg64F x) + // result: (PXOR x (MOVSDconst [math.Copysign(0, -1)])) + for { + x := v_0 + v.reset(OpAMD64PXOR) + v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) + v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1)) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueAMD64_OpNeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq16 x y) + // result: (SETNE (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpNeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq32 x y) + // result: (SETNE (CMPL x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpNeq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq32F x y) + // result: (SETNEF (UCOMISS x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETNEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpNeq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq64 x y) + // result: (SETNE (CMPQ x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpNeq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq64F x y) + // result: (SETNEF (UCOMISD x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETNEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpNeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq8 x y) + // result: (SETNE (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpNeqB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (NeqB x y) + // result: (SETNE (CMPB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpNeqPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (NeqPtr x y) + // result: (SETNE (CMPQ x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpNot(v *Value) bool { + v_0 := v.Args[0] + // match: (Not x) + // result: (XORLconst [1] x) + for { + x := v_0 + v.reset(OpAMD64XORLconst) + v.AuxInt = int32ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpOffPtr(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (OffPtr [off] ptr) + // cond: is32Bit(off) + // result: (ADDQconst [int32(off)] ptr) + for { + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + if !(is32Bit(off)) { + break + } + v.reset(OpAMD64ADDQconst) + v.AuxInt = int32ToAuxInt(int32(off)) + v.AddArg(ptr) + return true + } + // match: (OffPtr [off] ptr) + // result: (ADDQ (MOVQconst [off]) ptr) + for { + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + v.reset(OpAMD64ADDQ) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(off) + v.AddArg2(v0, ptr) + return true + } +} +func rewriteValueAMD64_OpPanicBounds(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 0 + // result: (LoweredPanicBoundsA [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 0) { + break + } + v.reset(OpAMD64LoweredPanicBoundsA) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 1 + // result: (LoweredPanicBoundsB [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 1) { + break + } + v.reset(OpAMD64LoweredPanicBoundsB) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 2 + // result: (LoweredPanicBoundsC [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 2) { + break + } + v.reset(OpAMD64LoweredPanicBoundsC) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + return false +} +func rewriteValueAMD64_OpPopCount16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (PopCount16 x) + // result: (POPCNTL (MOVWQZX x)) + for { + x := v_0 + v.reset(OpAMD64POPCNTL) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpPopCount8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (PopCount8 x) + // result: (POPCNTL (MOVBQZX x)) + for { + x := v_0 + v.reset(OpAMD64POPCNTL) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpRoundToEven(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundToEven x) + // result: (ROUNDSD [0] x) + for { + x := v_0 + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh16Ux16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPWconst y [16]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(16) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh16Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SHRW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRW) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh16Ux32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPLconst y [16]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(16) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh16Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SHRW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRW) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh16Ux64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPQconst y [16]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(16) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh16Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SHRW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRW) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh16Ux8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPBconst y [16]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(16) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh16Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SHRW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRW) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh16x16 x y) + // cond: !shiftIsBounded(v) + // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [16]))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v3.AuxInt = int16ToAuxInt(16) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SARW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh16x32 x y) + // cond: !shiftIsBounded(v) + // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [16]))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(16) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SARW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh16x64 x y) + // cond: !shiftIsBounded(v) + // result: (SARW x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [16]))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(16) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh16x64 x y) + // cond: shiftIsBounded(v) + // result: (SARW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh16x8 x y) + // cond: !shiftIsBounded(v) + // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [16]))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v3.AuxInt = int8ToAuxInt(16) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SARW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARW) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh32Ux16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPWconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh32Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SHRL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh32Ux32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPLconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh32Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SHRL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh32Ux64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPQconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh32Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SHRL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh32Ux8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPBconst y [32]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh32Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SHRL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh32x16 x y) + // cond: !shiftIsBounded(v) + // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [32]))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v3.AuxInt = int16ToAuxInt(32) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SARL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh32x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh32x32 x y) + // cond: !shiftIsBounded(v) + // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [32]))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(32) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SARL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh32x64 x y) + // cond: !shiftIsBounded(v) + // result: (SARL x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [32]))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(32) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh32x64 x y) + // cond: shiftIsBounded(v) + // result: (SARL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh32x8 x y) + // cond: !shiftIsBounded(v) + // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [32]))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v3.AuxInt = int8ToAuxInt(32) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SARL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh64Ux16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPWconst y [64]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh64Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SHRQ x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRQ) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh64Ux32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPLconst y [64]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh64Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SHRQ x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRQ) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh64Ux64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPQconst y [64]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh64Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SHRQ x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRQ) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh64Ux8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPBconst y [64]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh64Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SHRQ x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRQ) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh64x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh64x16 x y) + // cond: !shiftIsBounded(v) + // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [64]))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v3.AuxInt = int16ToAuxInt(64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh64x16 x y) + // cond: shiftIsBounded(v) + // result: (SARQ x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh64x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh64x32 x y) + // cond: !shiftIsBounded(v) + // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [64]))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh64x32 x y) + // cond: shiftIsBounded(v) + // result: (SARQ x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh64x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh64x64 x y) + // cond: !shiftIsBounded(v) + // result: (SARQ x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [64]))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh64x64 x y) + // cond: shiftIsBounded(v) + // result: (SARQ x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh64x8 x y) + // cond: !shiftIsBounded(v) + // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [64]))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v3.AuxInt = int8ToAuxInt(64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh64x8 x y) + // cond: shiftIsBounded(v) + // result: (SARQ x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARQ) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh8Ux16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPWconst y [8]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(8) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh8Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SHRB x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRB) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh8Ux32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPLconst y [8]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(8) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh8Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SHRB x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRB) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh8Ux64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPQconst y [8]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(8) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh8Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SHRB x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRB) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh8Ux8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPBconst y [8]))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(8) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh8Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SHRB x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHRB) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh8x16 x y) + // cond: !shiftIsBounded(v) + // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [8]))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v3.AuxInt = int16ToAuxInt(8) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SARB x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh8x32 x y) + // cond: !shiftIsBounded(v) + // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [8]))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(8) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SARB x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh8x64 x y) + // cond: !shiftIsBounded(v) + // result: (SARB x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [8]))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(8) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh8x64 x y) + // cond: shiftIsBounded(v) + // result: (SARB x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh8x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh8x8 x y) + // cond: !shiftIsBounded(v) + // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [8]))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) + v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v3.AuxInt = int8ToAuxInt(8) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SARB x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SARB) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64_OpSelect0(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Select0 (Mul64uover x y)) + // result: (Select0 (MULQU x y)) + for { + if v_0.Op != OpMul64uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpSelect0) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (Select0 (Mul32uover x y)) + // result: (Select0 (MULLU x y)) + for { + if v_0.Op != OpMul32uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpSelect0) + v.Type = typ.UInt32 + v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (Select0 (Add64carry x y c)) + // result: (Select0 (ADCQ x y (Select1 (NEGLflags c)))) + for { + if v_0.Op != OpAdd64carry { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpSelect0) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) + v2.AddArg(c) + v1.AddArg(v2) + v0.AddArg3(x, y, v1) + v.AddArg(v0) + return true + } + // match: (Select0 (Sub64borrow x y c)) + // result: (Select0 (SBBQ x y (Select1 (NEGLflags c)))) + for { + if v_0.Op != OpSub64borrow { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpSelect0) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) + v2.AddArg(c) + v1.AddArg(v2) + v0.AddArg3(x, y, v1) + v.AddArg(v0) + return true + } + // match: (Select0 (AddTupleFirst32 val tuple)) + // result: (ADDL val (Select0 tuple)) + for { + t := v.Type + if v_0.Op != OpAMD64AddTupleFirst32 { + break + } + tuple := v_0.Args[1] + val := v_0.Args[0] + v.reset(OpAMD64ADDL) + v0 := b.NewValue0(v.Pos, OpSelect0, t) + v0.AddArg(tuple) + v.AddArg2(val, v0) + return true + } + // match: (Select0 (AddTupleFirst64 val tuple)) + // result: (ADDQ val (Select0 tuple)) + for { + t := v.Type + if v_0.Op != OpAMD64AddTupleFirst64 { + break + } + tuple := v_0.Args[1] + val := v_0.Args[0] + v.reset(OpAMD64ADDQ) + v0 := b.NewValue0(v.Pos, OpSelect0, t) + v0.AddArg(tuple) + v.AddArg2(val, v0) + return true + } + return false +} +func rewriteValueAMD64_OpSelect1(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Select1 (Mul64uover x y)) + // result: (SETO (Select1 (MULQU x y))) + for { + if v_0.Op != OpMul64uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpAMD64SETO) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Select1 (Mul32uover x y)) + // result: (SETO (Select1 (MULLU x y))) + for { + if v_0.Op != OpMul32uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpAMD64SETO) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Select1 (Add64carry x y c)) + // result: (NEGQ (SBBQcarrymask (Select1 (ADCQ x y (Select1 (NEGLflags c)))))) + for { + if v_0.Op != OpAdd64carry { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpAMD64NEGQ) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) + v4.AddArg(c) + v3.AddArg(v4) + v2.AddArg3(x, y, v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Select1 (Sub64borrow x y c)) + // result: (NEGQ (SBBQcarrymask (Select1 (SBBQ x y (Select1 (NEGLflags c)))))) + for { + if v_0.Op != OpSub64borrow { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpAMD64NEGQ) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) + v4.AddArg(c) + v3.AddArg(v4) + v2.AddArg3(x, y, v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Select1 (NEGLflags (MOVQconst [0]))) + // result: (FlagEQ) + for { + if v_0.Op != OpAMD64NEGLflags { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 0 { + break + } + v.reset(OpAMD64FlagEQ) + return true + } + // match: (Select1 (NEGLflags (NEGQ (SBBQcarrymask x)))) + // result: x + for { + if v_0.Op != OpAMD64NEGLflags { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64NEGQ { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpAMD64SBBQcarrymask { + break + } + x := v_0_0_0.Args[0] + v.copyOf(x) + return true + } + // match: (Select1 (AddTupleFirst32 _ tuple)) + // result: (Select1 tuple) + for { + if v_0.Op != OpAMD64AddTupleFirst32 { + break + } + tuple := v_0.Args[1] + v.reset(OpSelect1) + v.AddArg(tuple) + return true + } + // match: (Select1 (AddTupleFirst64 _ tuple)) + // result: (Select1 tuple) + for { + if v_0.Op != OpAMD64AddTupleFirst64 { + break + } + tuple := v_0.Args[1] + v.reset(OpSelect1) + v.AddArg(tuple) + return true + } + return false +} +func rewriteValueAMD64_OpSelectN(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (SelectN [0] call:(CALLstatic {sym} s1:(MOVQstoreconst _ [sc] s2:(MOVQstore _ src s3:(MOVQstore _ dst mem))))) + // cond: sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call) + // result: (Move [sc.Val64()] dst src mem) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + call := v_0 + if call.Op != OpAMD64CALLstatic || len(call.Args) != 1 { + break + } + sym := auxToCall(call.Aux) + s1 := call.Args[0] + if s1.Op != OpAMD64MOVQstoreconst { + break + } + sc := auxIntToValAndOff(s1.AuxInt) + _ = s1.Args[1] + s2 := s1.Args[1] + if s2.Op != OpAMD64MOVQstore { + break + } + _ = s2.Args[2] + src := s2.Args[1] + s3 := s2.Args[2] + if s3.Op != OpAMD64MOVQstore { + break + } + mem := s3.Args[2] + dst := s3.Args[1] + if !(sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(sc.Val64()) + v.AddArg3(dst, src, mem) + return true + } + // match: (SelectN [0] call:(CALLstatic {sym} dst src (MOVQconst [sz]) mem)) + // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call) + // result: (Move [sz] dst src mem) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + call := v_0 + if call.Op != OpAMD64CALLstatic || len(call.Args) != 4 { + break + } + sym := auxToCall(call.Aux) + mem := call.Args[3] + dst := call.Args[0] + src := call.Args[1] + call_2 := call.Args[2] + if call_2.Op != OpAMD64MOVQconst { + break + } + sz := auxIntToInt64(call_2.AuxInt) + if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(sz) + v.AddArg3(dst, src, mem) + return true + } + return false +} +func rewriteValueAMD64_OpSlicemask(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Slicemask x) + // result: (SARQconst (NEGQ x) [63]) + for { + t := v.Type + x := v_0 + v.reset(OpAMD64SARQconst) + v.AuxInt = int8ToAuxInt(63) + v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpSpectreIndex(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SpectreIndex x y) + // result: (CMOVQCC x (MOVQconst [0]) (CMPQ x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64CMOVQCC) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v1.AddArg2(x, y) + v.AddArg3(x, v0, v1) + return true + } +} +func rewriteValueAMD64_OpSpectreSliceIndex(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SpectreSliceIndex x y) + // result: (CMOVQHI x (MOVQconst [0]) (CMPQ x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64CMOVQHI) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v1.AddArg2(x, y) + v.AddArg3(x, v0, v1) + return true + } +} +func rewriteValueAMD64_OpStore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Store {t} ptr val mem) + // cond: t.Size() == 8 && t.IsFloat() + // result: (MOVSDstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 8 && t.IsFloat()) { + break + } + v.reset(OpAMD64MOVSDstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 && t.IsFloat() + // result: (MOVSSstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4 && t.IsFloat()) { + break + } + v.reset(OpAMD64MOVSSstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 8 && !t.IsFloat() + // result: (MOVQstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 8 && !t.IsFloat()) { + break + } + v.reset(OpAMD64MOVQstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 && !t.IsFloat() + // result: (MOVLstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4 && !t.IsFloat()) { + break + } + v.reset(OpAMD64MOVLstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 2 + // result: (MOVWstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 2) { + break + } + v.reset(OpAMD64MOVWstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 1 + // result: (MOVBstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 1) { + break + } + v.reset(OpAMD64MOVBstore) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueAMD64_OpTrunc(v *Value) bool { + v_0 := v.Args[0] + // match: (Trunc x) + // result: (ROUNDSD [3] x) + for { + x := v_0 + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(3) + v.AddArg(x) + return true + } +} +func rewriteValueAMD64_OpZero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Zero [0] _ mem) + // result: mem + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_1 + v.copyOf(mem) + return true + } + // match: (Zero [1] destptr mem) + // result: (MOVBstoreconst [makeValAndOff(0,0)] destptr mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVBstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v.AddArg2(destptr, mem) + return true + } + // match: (Zero [2] destptr mem) + // result: (MOVWstoreconst [makeValAndOff(0,0)] destptr mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVWstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v.AddArg2(destptr, mem) + return true + } + // match: (Zero [4] destptr mem) + // result: (MOVLstoreconst [makeValAndOff(0,0)] destptr mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVLstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v.AddArg2(destptr, mem) + return true + } + // match: (Zero [8] destptr mem) + // result: (MOVQstoreconst [makeValAndOff(0,0)] destptr mem) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVQstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v.AddArg2(destptr, mem) + return true + } + // match: (Zero [3] destptr mem) + // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [makeValAndOff(0,0)] destptr mem)) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVBstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 2)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [5] destptr mem) + // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) + for { + if auxIntToInt64(v.AuxInt) != 5 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVBstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [6] destptr mem) + // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) + for { + if auxIntToInt64(v.AuxInt) != 6 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVWstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [7] destptr mem) + // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) + for { + if auxIntToInt64(v.AuxInt) != 7 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVLstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 3)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [s] destptr mem) + // cond: s%8 != 0 && s > 8 && !config.useSSE + // result: (Zero [s-s%8] (OffPtr destptr [s%8]) (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) + for { + s := auxIntToInt64(v.AuxInt) + destptr := v_0 + mem := v_1 + if !(s%8 != 0 && s > 8 && !config.useSSE) { + break + } + v.reset(OpZero) + v.AuxInt = int64ToAuxInt(s - s%8) + v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) + v0.AuxInt = int64ToAuxInt(s % 8) + v0.AddArg(destptr) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) + v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v1.AddArg2(destptr, mem) + v.AddArg2(v0, v1) + return true + } + // match: (Zero [16] destptr mem) + // cond: !config.useSSE + // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) + for { + if auxIntToInt64(v.AuxInt) != 16 { + break + } + destptr := v_0 + mem := v_1 + if !(!config.useSSE) { + break + } + v.reset(OpAMD64MOVQstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [24] destptr mem) + // cond: !config.useSSE + // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))) + for { + if auxIntToInt64(v.AuxInt) != 24 { + break + } + destptr := v_0 + mem := v_1 + if !(!config.useSSE) { + break + } + v.reset(OpAMD64MOVQstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8)) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) + v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v1.AddArg2(destptr, mem) + v0.AddArg2(destptr, v1) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [32] destptr mem) + // cond: !config.useSSE + // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)))) + for { + if auxIntToInt64(v.AuxInt) != 32 { + break + } + destptr := v_0 + mem := v_1 + if !(!config.useSSE) { + break + } + v.reset(OpAMD64MOVQstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 24)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16)) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) + v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8)) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) + v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v2.AddArg2(destptr, mem) + v1.AddArg2(destptr, v2) + v0.AddArg2(destptr, v1) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [9] destptr mem) + // cond: config.useSSE + // result: (MOVBstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) + for { + if auxIntToInt64(v.AuxInt) != 9 { + break + } + destptr := v_0 + mem := v_1 + if !(config.useSSE) { + break + } + v.reset(OpAMD64MOVBstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [10] destptr mem) + // cond: config.useSSE + // result: (MOVWstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) + for { + if auxIntToInt64(v.AuxInt) != 10 { + break + } + destptr := v_0 + mem := v_1 + if !(config.useSSE) { + break + } + v.reset(OpAMD64MOVWstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [11] destptr mem) + // cond: config.useSSE + // result: (MOVLstoreconst [makeValAndOff(0,7)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) + for { + if auxIntToInt64(v.AuxInt) != 11 { + break + } + destptr := v_0 + mem := v_1 + if !(config.useSSE) { + break + } + v.reset(OpAMD64MOVLstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 7)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [12] destptr mem) + // cond: config.useSSE + // result: (MOVLstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) + for { + if auxIntToInt64(v.AuxInt) != 12 { + break + } + destptr := v_0 + mem := v_1 + if !(config.useSSE) { + break + } + v.reset(OpAMD64MOVLstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [s] destptr mem) + // cond: s > 12 && s < 16 && config.useSSE + // result: (MOVQstoreconst [makeValAndOff(0,int32(s-8))] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) + for { + s := auxIntToInt64(v.AuxInt) + destptr := v_0 + mem := v_1 + if !(s > 12 && s < 16 && config.useSSE) { + break + } + v.reset(OpAMD64MOVQstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, int32(s-8))) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [s] destptr mem) + // cond: s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE + // result: (Zero [s-s%16] (OffPtr destptr [s%16]) (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)) + for { + s := auxIntToInt64(v.AuxInt) + destptr := v_0 + mem := v_1 + if !(s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE) { + break + } + v.reset(OpZero) + v.AuxInt = int64ToAuxInt(s - s%16) + v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) + v0.AuxInt = int64ToAuxInt(s % 16) + v0.AddArg(destptr) + v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem) + v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v1.AddArg2(destptr, mem) + v.AddArg2(v0, v1) + return true + } + // match: (Zero [s] destptr mem) + // cond: s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE + // result: (Zero [s-s%16] (OffPtr destptr [s%16]) (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)) + for { + s := auxIntToInt64(v.AuxInt) + destptr := v_0 + mem := v_1 + if !(s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE) { + break + } + v.reset(OpZero) + v.AuxInt = int64ToAuxInt(s - s%16) + v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) + v0.AuxInt = int64ToAuxInt(s % 16) + v0.AddArg(destptr) + v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem) + v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v1.AddArg2(destptr, mem) + v.AddArg2(v0, v1) + return true + } + // match: (Zero [16] destptr mem) + // cond: config.useSSE + // result: (MOVOstoreconst [makeValAndOff(0,0)] destptr mem) + for { + if auxIntToInt64(v.AuxInt) != 16 { + break + } + destptr := v_0 + mem := v_1 + if !(config.useSSE) { + break + } + v.reset(OpAMD64MOVOstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v.AddArg2(destptr, mem) + return true + } + // match: (Zero [32] destptr mem) + // cond: config.useSSE + // result: (MOVOstoreconst [makeValAndOff(0,16)] destptr (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)) + for { + if auxIntToInt64(v.AuxInt) != 32 { + break + } + destptr := v_0 + mem := v_1 + if !(config.useSSE) { + break + } + v.reset(OpAMD64MOVOstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [48] destptr mem) + // cond: config.useSSE + // result: (MOVOstoreconst [makeValAndOff(0,32)] destptr (MOVOstoreconst [makeValAndOff(0,16)] destptr (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))) + for { + if auxIntToInt64(v.AuxInt) != 48 { + break + } + destptr := v_0 + mem := v_1 + if !(config.useSSE) { + break + } + v.reset(OpAMD64MOVOstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 32)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16)) + v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem) + v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v1.AddArg2(destptr, mem) + v0.AddArg2(destptr, v1) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [64] destptr mem) + // cond: config.useSSE + // result: (MOVOstoreconst [makeValAndOff(0,48)] destptr (MOVOstoreconst [makeValAndOff(0,32)] destptr (MOVOstoreconst [makeValAndOff(0,16)] destptr (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)))) + for { + if auxIntToInt64(v.AuxInt) != 64 { + break + } + destptr := v_0 + mem := v_1 + if !(config.useSSE) { + break + } + v.reset(OpAMD64MOVOstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 48)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 32)) + v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem) + v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16)) + v2 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem) + v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0)) + v2.AddArg2(destptr, mem) + v1.AddArg2(destptr, v2) + v0.AddArg2(destptr, v1) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [s] destptr mem) + // cond: s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice + // result: (DUFFZERO [s] destptr mem) + for { + s := auxIntToInt64(v.AuxInt) + destptr := v_0 + mem := v_1 + if !(s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice) { + break + } + v.reset(OpAMD64DUFFZERO) + v.AuxInt = int64ToAuxInt(s) + v.AddArg2(destptr, mem) + return true + } + // match: (Zero [s] destptr mem) + // cond: (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0 + // result: (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem) + for { + s := auxIntToInt64(v.AuxInt) + destptr := v_0 + mem := v_1 + if !((s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0) { + break + } + v.reset(OpAMD64REPSTOSQ) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(s / 8) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v.AddArg4(destptr, v0, v1, mem) + return true + } + return false +} +func rewriteBlockAMD64(b *Block) bool { + typ := &b.Func.Config.Types + switch b.Kind { + case BlockAMD64EQ: + // match: (EQ (TESTL (SHLL (MOVLconst [1]) x) y)) + // result: (UGE (BTL x y)) + for b.Controls[0].Op == OpAMD64TESTL { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpAMD64SHLL { + continue + } + x := v_0_0.Args[1] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 { + continue + } + y := v_0_1 + v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockAMD64UGE, v0) + return true + } + break + } + // match: (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) + // result: (UGE (BTQ x y)) + for b.Controls[0].Op == OpAMD64TESTQ { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpAMD64SHLQ { + continue + } + x := v_0_0.Args[1] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { + continue + } + y := v_0_1 + v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockAMD64UGE, v0) + return true + } + break + } + // match: (EQ (TESTLconst [c] x)) + // cond: isUint32PowerOfTwo(int64(c)) + // result: (UGE (BTLconst [int8(log32(c))] x)) + for b.Controls[0].Op == OpAMD64TESTLconst { + v_0 := b.Controls[0] + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(isUint32PowerOfTwo(int64(c))) { + break + } + v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(int8(log32(c))) + v0.AddArg(x) + b.resetWithControl(BlockAMD64UGE, v0) + return true + } + // match: (EQ (TESTQconst [c] x)) + // cond: isUint64PowerOfTwo(int64(c)) + // result: (UGE (BTQconst [int8(log32(c))] x)) + for b.Controls[0].Op == OpAMD64TESTQconst { + v_0 := b.Controls[0] + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(isUint64PowerOfTwo(int64(c))) { + break + } + v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(int8(log32(c))) + v0.AddArg(x) + b.resetWithControl(BlockAMD64UGE, v0) + return true + } + // match: (EQ (TESTQ (MOVQconst [c]) x)) + // cond: isUint64PowerOfTwo(c) + // result: (UGE (BTQconst [int8(log64(c))] x)) + for b.Controls[0].Op == OpAMD64TESTQ { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpAMD64MOVQconst { + continue + } + c := auxIntToInt64(v_0_0.AuxInt) + x := v_0_1 + if !(isUint64PowerOfTwo(c)) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(int8(log64(c))) + v0.AddArg(x) + b.resetWithControl(BlockAMD64UGE, v0) + return true + } + break + } + // match: (EQ (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) + // cond: z1==z2 + // result: (UGE (BTQconst [63] x)) + for b.Controls[0].Op == OpAMD64TESTQ { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 + if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 { + continue + } + x := z1_0.Args[0] + z2 := v_0_1 + if !(z1 == z2) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(63) + v0.AddArg(x) + b.resetWithControl(BlockAMD64UGE, v0) + return true + } + break + } + // match: (EQ (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) + // cond: z1==z2 + // result: (UGE (BTQconst [31] x)) + for b.Controls[0].Op == OpAMD64TESTL { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 + if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 { + continue + } + x := z1_0.Args[0] + z2 := v_0_1 + if !(z1 == z2) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(31) + v0.AddArg(x) + b.resetWithControl(BlockAMD64UGE, v0) + return true + } + break + } + // match: (EQ (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) + // cond: z1==z2 + // result: (UGE (BTQconst [0] x)) + for b.Controls[0].Op == OpAMD64TESTQ { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 + if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 { + continue + } + x := z1_0.Args[0] + z2 := v_0_1 + if !(z1 == z2) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg(x) + b.resetWithControl(BlockAMD64UGE, v0) + return true + } + break + } + // match: (EQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) + // cond: z1==z2 + // result: (UGE (BTLconst [0] x)) + for b.Controls[0].Op == OpAMD64TESTL { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 + if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 { + continue + } + x := z1_0.Args[0] + z2 := v_0_1 + if !(z1 == z2) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg(x) + b.resetWithControl(BlockAMD64UGE, v0) + return true + } + break + } + // match: (EQ (TESTQ z1:(SHRQconst [63] x) z2)) + // cond: z1==z2 + // result: (UGE (BTQconst [63] x)) + for b.Controls[0].Op == OpAMD64TESTQ { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 + if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { + continue + } + x := z1.Args[0] + z2 := v_0_1 + if !(z1 == z2) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(63) + v0.AddArg(x) + b.resetWithControl(BlockAMD64UGE, v0) + return true + } + break + } + // match: (EQ (TESTL z1:(SHRLconst [31] x) z2)) + // cond: z1==z2 + // result: (UGE (BTLconst [31] x)) + for b.Controls[0].Op == OpAMD64TESTL { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 + if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { + continue + } + x := z1.Args[0] + z2 := v_0_1 + if !(z1 == z2) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(31) + v0.AddArg(x) + b.resetWithControl(BlockAMD64UGE, v0) + return true + } + break + } + // match: (EQ (InvertFlags cmp) yes no) + // result: (EQ cmp yes no) + for b.Controls[0].Op == OpAMD64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockAMD64EQ, cmp) + return true + } + // match: (EQ (FlagEQ) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64FlagEQ { + b.Reset(BlockFirst) + return true + } + // match: (EQ (FlagLT_ULT) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64FlagLT_ULT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (EQ (FlagLT_UGT) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64FlagLT_UGT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (EQ (FlagGT_ULT) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64FlagGT_ULT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (EQ (FlagGT_UGT) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64FlagGT_UGT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (EQ (TESTQ s:(Select0 blsr:(BLSRQ _)) s) yes no) + // result: (EQ (Select1 blsr) yes no) + for b.Controls[0].Op == OpAMD64TESTQ { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + s := v_0_0 + if s.Op != OpSelect0 { + continue + } + blsr := s.Args[0] + if blsr.Op != OpAMD64BLSRQ || s != v_0_1 { + continue + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v0.AddArg(blsr) + b.resetWithControl(BlockAMD64EQ, v0) + return true + } + break + } + // match: (EQ (TESTL s:(Select0 blsr:(BLSRL _)) s) yes no) + // result: (EQ (Select1 blsr) yes no) + for b.Controls[0].Op == OpAMD64TESTL { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + s := v_0_0 + if s.Op != OpSelect0 { + continue + } + blsr := s.Args[0] + if blsr.Op != OpAMD64BLSRL || s != v_0_1 { + continue + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v0.AddArg(blsr) + b.resetWithControl(BlockAMD64EQ, v0) + return true + } + break + } + case BlockAMD64GE: + // match: (GE (InvertFlags cmp) yes no) + // result: (LE cmp yes no) + for b.Controls[0].Op == OpAMD64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockAMD64LE, cmp) + return true + } + // match: (GE (FlagEQ) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64FlagEQ { + b.Reset(BlockFirst) + return true + } + // match: (GE (FlagLT_ULT) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64FlagLT_ULT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (GE (FlagLT_UGT) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64FlagLT_UGT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (GE (FlagGT_ULT) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64FlagGT_ULT { + b.Reset(BlockFirst) + return true + } + // match: (GE (FlagGT_UGT) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64FlagGT_UGT { + b.Reset(BlockFirst) + return true + } + case BlockAMD64GT: + // match: (GT (InvertFlags cmp) yes no) + // result: (LT cmp yes no) + for b.Controls[0].Op == OpAMD64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockAMD64LT, cmp) + return true + } + // match: (GT (FlagEQ) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64FlagEQ { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (GT (FlagLT_ULT) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64FlagLT_ULT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (GT (FlagLT_UGT) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64FlagLT_UGT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (GT (FlagGT_ULT) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64FlagGT_ULT { + b.Reset(BlockFirst) + return true + } + // match: (GT (FlagGT_UGT) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64FlagGT_UGT { + b.Reset(BlockFirst) + return true + } + case BlockIf: + // match: (If (SETL cmp) yes no) + // result: (LT cmp yes no) + for b.Controls[0].Op == OpAMD64SETL { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockAMD64LT, cmp) + return true + } + // match: (If (SETLE cmp) yes no) + // result: (LE cmp yes no) + for b.Controls[0].Op == OpAMD64SETLE { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockAMD64LE, cmp) + return true + } + // match: (If (SETG cmp) yes no) + // result: (GT cmp yes no) + for b.Controls[0].Op == OpAMD64SETG { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockAMD64GT, cmp) + return true + } + // match: (If (SETGE cmp) yes no) + // result: (GE cmp yes no) + for b.Controls[0].Op == OpAMD64SETGE { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockAMD64GE, cmp) + return true + } + // match: (If (SETEQ cmp) yes no) + // result: (EQ cmp yes no) + for b.Controls[0].Op == OpAMD64SETEQ { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockAMD64EQ, cmp) + return true + } + // match: (If (SETNE cmp) yes no) + // result: (NE cmp yes no) + for b.Controls[0].Op == OpAMD64SETNE { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockAMD64NE, cmp) + return true + } + // match: (If (SETB cmp) yes no) + // result: (ULT cmp yes no) + for b.Controls[0].Op == OpAMD64SETB { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockAMD64ULT, cmp) + return true + } + // match: (If (SETBE cmp) yes no) + // result: (ULE cmp yes no) + for b.Controls[0].Op == OpAMD64SETBE { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockAMD64ULE, cmp) + return true + } + // match: (If (SETA cmp) yes no) + // result: (UGT cmp yes no) + for b.Controls[0].Op == OpAMD64SETA { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockAMD64UGT, cmp) + return true + } + // match: (If (SETAE cmp) yes no) + // result: (UGE cmp yes no) + for b.Controls[0].Op == OpAMD64SETAE { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockAMD64UGE, cmp) + return true + } + // match: (If (SETO cmp) yes no) + // result: (OS cmp yes no) + for b.Controls[0].Op == OpAMD64SETO { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockAMD64OS, cmp) + return true + } + // match: (If (SETGF cmp) yes no) + // result: (UGT cmp yes no) + for b.Controls[0].Op == OpAMD64SETGF { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockAMD64UGT, cmp) + return true + } + // match: (If (SETGEF cmp) yes no) + // result: (UGE cmp yes no) + for b.Controls[0].Op == OpAMD64SETGEF { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockAMD64UGE, cmp) + return true + } + // match: (If (SETEQF cmp) yes no) + // result: (EQF cmp yes no) + for b.Controls[0].Op == OpAMD64SETEQF { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockAMD64EQF, cmp) + return true + } + // match: (If (SETNEF cmp) yes no) + // result: (NEF cmp yes no) + for b.Controls[0].Op == OpAMD64SETNEF { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockAMD64NEF, cmp) + return true + } + // match: (If cond yes no) + // result: (NE (TESTB cond cond) yes no) + for { + cond := b.Controls[0] + v0 := b.NewValue0(cond.Pos, OpAMD64TESTB, types.TypeFlags) + v0.AddArg2(cond, cond) + b.resetWithControl(BlockAMD64NE, v0) + return true + } + case BlockJumpTable: + // match: (JumpTable idx) + // result: (JUMPTABLE {makeJumpTableSym(b)} idx (LEAQ {makeJumpTableSym(b)} (SB))) + for { + idx := b.Controls[0] + v0 := b.NewValue0(b.Pos, OpAMD64LEAQ, typ.Uintptr) + v0.Aux = symToAux(makeJumpTableSym(b)) + v1 := b.NewValue0(b.Pos, OpSB, typ.Uintptr) + v0.AddArg(v1) + b.resetWithControl2(BlockAMD64JUMPTABLE, idx, v0) + b.Aux = symToAux(makeJumpTableSym(b)) + return true + } + case BlockAMD64LE: + // match: (LE (InvertFlags cmp) yes no) + // result: (GE cmp yes no) + for b.Controls[0].Op == OpAMD64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockAMD64GE, cmp) + return true + } + // match: (LE (FlagEQ) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64FlagEQ { + b.Reset(BlockFirst) + return true + } + // match: (LE (FlagLT_ULT) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64FlagLT_ULT { + b.Reset(BlockFirst) + return true + } + // match: (LE (FlagLT_UGT) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64FlagLT_UGT { + b.Reset(BlockFirst) + return true + } + // match: (LE (FlagGT_ULT) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64FlagGT_ULT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (LE (FlagGT_UGT) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64FlagGT_UGT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockAMD64LT: + // match: (LT (InvertFlags cmp) yes no) + // result: (GT cmp yes no) + for b.Controls[0].Op == OpAMD64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockAMD64GT, cmp) + return true + } + // match: (LT (FlagEQ) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64FlagEQ { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (LT (FlagLT_ULT) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64FlagLT_ULT { + b.Reset(BlockFirst) + return true + } + // match: (LT (FlagLT_UGT) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64FlagLT_UGT { + b.Reset(BlockFirst) + return true + } + // match: (LT (FlagGT_ULT) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64FlagGT_ULT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (LT (FlagGT_UGT) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64FlagGT_UGT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockAMD64NE: + // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) + // result: (LT cmp yes no) + for b.Controls[0].Op == OpAMD64TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64SETL { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAMD64SETL || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(BlockAMD64LT, cmp) + return true + } + // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) + // result: (LE cmp yes no) + for b.Controls[0].Op == OpAMD64TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64SETLE { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAMD64SETLE || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(BlockAMD64LE, cmp) + return true + } + // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) + // result: (GT cmp yes no) + for b.Controls[0].Op == OpAMD64TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64SETG { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAMD64SETG || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(BlockAMD64GT, cmp) + return true + } + // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) + // result: (GE cmp yes no) + for b.Controls[0].Op == OpAMD64TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64SETGE { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAMD64SETGE || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(BlockAMD64GE, cmp) + return true + } + // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) + // result: (EQ cmp yes no) + for b.Controls[0].Op == OpAMD64TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64SETEQ { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAMD64SETEQ || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(BlockAMD64EQ, cmp) + return true + } + // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) + // result: (NE cmp yes no) + for b.Controls[0].Op == OpAMD64TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64SETNE { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAMD64SETNE || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(BlockAMD64NE, cmp) + return true + } + // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) + // result: (ULT cmp yes no) + for b.Controls[0].Op == OpAMD64TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64SETB { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAMD64SETB || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(BlockAMD64ULT, cmp) + return true + } + // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) + // result: (ULE cmp yes no) + for b.Controls[0].Op == OpAMD64TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64SETBE { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAMD64SETBE || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(BlockAMD64ULE, cmp) + return true + } + // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) + // result: (UGT cmp yes no) + for b.Controls[0].Op == OpAMD64TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64SETA { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAMD64SETA || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(BlockAMD64UGT, cmp) + return true + } + // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) + // result: (UGE cmp yes no) + for b.Controls[0].Op == OpAMD64TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64SETAE { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAMD64SETAE || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(BlockAMD64UGE, cmp) + return true + } + // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no) + // result: (OS cmp yes no) + for b.Controls[0].Op == OpAMD64TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64SETO { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAMD64SETO || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(BlockAMD64OS, cmp) + return true + } + // match: (NE (TESTL (SHLL (MOVLconst [1]) x) y)) + // result: (ULT (BTL x y)) + for b.Controls[0].Op == OpAMD64TESTL { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpAMD64SHLL { + continue + } + x := v_0_0.Args[1] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 { + continue + } + y := v_0_1 + v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockAMD64ULT, v0) + return true + } + break + } + // match: (NE (TESTQ (SHLQ (MOVQconst [1]) x) y)) + // result: (ULT (BTQ x y)) + for b.Controls[0].Op == OpAMD64TESTQ { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpAMD64SHLQ { + continue + } + x := v_0_0.Args[1] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { + continue + } + y := v_0_1 + v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockAMD64ULT, v0) + return true + } + break + } + // match: (NE (TESTLconst [c] x)) + // cond: isUint32PowerOfTwo(int64(c)) + // result: (ULT (BTLconst [int8(log32(c))] x)) + for b.Controls[0].Op == OpAMD64TESTLconst { + v_0 := b.Controls[0] + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(isUint32PowerOfTwo(int64(c))) { + break + } + v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(int8(log32(c))) + v0.AddArg(x) + b.resetWithControl(BlockAMD64ULT, v0) + return true + } + // match: (NE (TESTQconst [c] x)) + // cond: isUint64PowerOfTwo(int64(c)) + // result: (ULT (BTQconst [int8(log32(c))] x)) + for b.Controls[0].Op == OpAMD64TESTQconst { + v_0 := b.Controls[0] + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(isUint64PowerOfTwo(int64(c))) { + break + } + v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(int8(log32(c))) + v0.AddArg(x) + b.resetWithControl(BlockAMD64ULT, v0) + return true + } + // match: (NE (TESTQ (MOVQconst [c]) x)) + // cond: isUint64PowerOfTwo(c) + // result: (ULT (BTQconst [int8(log64(c))] x)) + for b.Controls[0].Op == OpAMD64TESTQ { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpAMD64MOVQconst { + continue + } + c := auxIntToInt64(v_0_0.AuxInt) + x := v_0_1 + if !(isUint64PowerOfTwo(c)) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(int8(log64(c))) + v0.AddArg(x) + b.resetWithControl(BlockAMD64ULT, v0) + return true + } + break + } + // match: (NE (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) + // cond: z1==z2 + // result: (ULT (BTQconst [63] x)) + for b.Controls[0].Op == OpAMD64TESTQ { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 + if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 { + continue + } + x := z1_0.Args[0] + z2 := v_0_1 + if !(z1 == z2) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(63) + v0.AddArg(x) + b.resetWithControl(BlockAMD64ULT, v0) + return true + } + break + } + // match: (NE (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) + // cond: z1==z2 + // result: (ULT (BTQconst [31] x)) + for b.Controls[0].Op == OpAMD64TESTL { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 + if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 { + continue + } + x := z1_0.Args[0] + z2 := v_0_1 + if !(z1 == z2) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(31) + v0.AddArg(x) + b.resetWithControl(BlockAMD64ULT, v0) + return true + } + break + } + // match: (NE (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) + // cond: z1==z2 + // result: (ULT (BTQconst [0] x)) + for b.Controls[0].Op == OpAMD64TESTQ { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 + if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 { + continue + } + x := z1_0.Args[0] + z2 := v_0_1 + if !(z1 == z2) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg(x) + b.resetWithControl(BlockAMD64ULT, v0) + return true + } + break + } + // match: (NE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) + // cond: z1==z2 + // result: (ULT (BTLconst [0] x)) + for b.Controls[0].Op == OpAMD64TESTL { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 + if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { + continue + } + z1_0 := z1.Args[0] + if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 { + continue + } + x := z1_0.Args[0] + z2 := v_0_1 + if !(z1 == z2) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg(x) + b.resetWithControl(BlockAMD64ULT, v0) + return true + } + break + } + // match: (NE (TESTQ z1:(SHRQconst [63] x) z2)) + // cond: z1==z2 + // result: (ULT (BTQconst [63] x)) + for b.Controls[0].Op == OpAMD64TESTQ { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 + if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 { + continue + } + x := z1.Args[0] + z2 := v_0_1 + if !(z1 == z2) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(63) + v0.AddArg(x) + b.resetWithControl(BlockAMD64ULT, v0) + return true + } + break + } + // match: (NE (TESTL z1:(SHRLconst [31] x) z2)) + // cond: z1==z2 + // result: (ULT (BTLconst [31] x)) + for b.Controls[0].Op == OpAMD64TESTL { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z1 := v_0_0 + if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 { + continue + } + x := z1.Args[0] + z2 := v_0_1 + if !(z1 == z2) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = int8ToAuxInt(31) + v0.AddArg(x) + b.resetWithControl(BlockAMD64ULT, v0) + return true + } + break + } + // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) + // result: (UGT cmp yes no) + for b.Controls[0].Op == OpAMD64TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64SETGF { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAMD64SETGF || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(BlockAMD64UGT, cmp) + return true + } + // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) + // result: (UGE cmp yes no) + for b.Controls[0].Op == OpAMD64TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64SETGEF { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAMD64SETGEF || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(BlockAMD64UGE, cmp) + return true + } + // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) + // result: (EQF cmp yes no) + for b.Controls[0].Op == OpAMD64TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64SETEQF { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAMD64SETEQF || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(BlockAMD64EQF, cmp) + return true + } + // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) + // result: (NEF cmp yes no) + for b.Controls[0].Op == OpAMD64TESTB { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64SETNEF { + break + } + cmp := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAMD64SETNEF || cmp != v_0_1.Args[0] { + break + } + b.resetWithControl(BlockAMD64NEF, cmp) + return true + } + // match: (NE (InvertFlags cmp) yes no) + // result: (NE cmp yes no) + for b.Controls[0].Op == OpAMD64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockAMD64NE, cmp) + return true + } + // match: (NE (FlagEQ) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64FlagEQ { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (NE (FlagLT_ULT) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64FlagLT_ULT { + b.Reset(BlockFirst) + return true + } + // match: (NE (FlagLT_UGT) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64FlagLT_UGT { + b.Reset(BlockFirst) + return true + } + // match: (NE (FlagGT_ULT) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64FlagGT_ULT { + b.Reset(BlockFirst) + return true + } + // match: (NE (FlagGT_UGT) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64FlagGT_UGT { + b.Reset(BlockFirst) + return true + } + // match: (NE (TESTQ s:(Select0 blsr:(BLSRQ _)) s) yes no) + // result: (NE (Select1 blsr) yes no) + for b.Controls[0].Op == OpAMD64TESTQ { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + s := v_0_0 + if s.Op != OpSelect0 { + continue + } + blsr := s.Args[0] + if blsr.Op != OpAMD64BLSRQ || s != v_0_1 { + continue + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v0.AddArg(blsr) + b.resetWithControl(BlockAMD64NE, v0) + return true + } + break + } + // match: (NE (TESTL s:(Select0 blsr:(BLSRL _)) s) yes no) + // result: (NE (Select1 blsr) yes no) + for b.Controls[0].Op == OpAMD64TESTL { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + s := v_0_0 + if s.Op != OpSelect0 { + continue + } + blsr := s.Args[0] + if blsr.Op != OpAMD64BLSRL || s != v_0_1 { + continue + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v0.AddArg(blsr) + b.resetWithControl(BlockAMD64NE, v0) + return true + } + break + } + case BlockAMD64UGE: + // match: (UGE (TESTQ x x) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64TESTQ { + v_0 := b.Controls[0] + x := v_0.Args[1] + if x != v_0.Args[0] { + break + } + b.Reset(BlockFirst) + return true + } + // match: (UGE (TESTL x x) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64TESTL { + v_0 := b.Controls[0] + x := v_0.Args[1] + if x != v_0.Args[0] { + break + } + b.Reset(BlockFirst) + return true + } + // match: (UGE (TESTW x x) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64TESTW { + v_0 := b.Controls[0] + x := v_0.Args[1] + if x != v_0.Args[0] { + break + } + b.Reset(BlockFirst) + return true + } + // match: (UGE (TESTB x x) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64TESTB { + v_0 := b.Controls[0] + x := v_0.Args[1] + if x != v_0.Args[0] { + break + } + b.Reset(BlockFirst) + return true + } + // match: (UGE (InvertFlags cmp) yes no) + // result: (ULE cmp yes no) + for b.Controls[0].Op == OpAMD64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockAMD64ULE, cmp) + return true + } + // match: (UGE (FlagEQ) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64FlagEQ { + b.Reset(BlockFirst) + return true + } + // match: (UGE (FlagLT_ULT) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64FlagLT_ULT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (UGE (FlagLT_UGT) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64FlagLT_UGT { + b.Reset(BlockFirst) + return true + } + // match: (UGE (FlagGT_ULT) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64FlagGT_ULT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (UGE (FlagGT_UGT) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64FlagGT_UGT { + b.Reset(BlockFirst) + return true + } + case BlockAMD64UGT: + // match: (UGT (InvertFlags cmp) yes no) + // result: (ULT cmp yes no) + for b.Controls[0].Op == OpAMD64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockAMD64ULT, cmp) + return true + } + // match: (UGT (FlagEQ) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64FlagEQ { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (UGT (FlagLT_ULT) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64FlagLT_ULT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (UGT (FlagLT_UGT) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64FlagLT_UGT { + b.Reset(BlockFirst) + return true + } + // match: (UGT (FlagGT_ULT) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64FlagGT_ULT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (UGT (FlagGT_UGT) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64FlagGT_UGT { + b.Reset(BlockFirst) + return true + } + case BlockAMD64ULE: + // match: (ULE (InvertFlags cmp) yes no) + // result: (UGE cmp yes no) + for b.Controls[0].Op == OpAMD64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockAMD64UGE, cmp) + return true + } + // match: (ULE (FlagEQ) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64FlagEQ { + b.Reset(BlockFirst) + return true + } + // match: (ULE (FlagLT_ULT) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64FlagLT_ULT { + b.Reset(BlockFirst) + return true + } + // match: (ULE (FlagLT_UGT) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64FlagLT_UGT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (ULE (FlagGT_ULT) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64FlagGT_ULT { + b.Reset(BlockFirst) + return true + } + // match: (ULE (FlagGT_UGT) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64FlagGT_UGT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockAMD64ULT: + // match: (ULT (TESTQ x x) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64TESTQ { + v_0 := b.Controls[0] + x := v_0.Args[1] + if x != v_0.Args[0] { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (ULT (TESTL x x) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64TESTL { + v_0 := b.Controls[0] + x := v_0.Args[1] + if x != v_0.Args[0] { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (ULT (TESTW x x) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64TESTW { + v_0 := b.Controls[0] + x := v_0.Args[1] + if x != v_0.Args[0] { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (ULT (TESTB x x) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64TESTB { + v_0 := b.Controls[0] + x := v_0.Args[1] + if x != v_0.Args[0] { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (ULT (InvertFlags cmp) yes no) + // result: (UGT cmp yes no) + for b.Controls[0].Op == OpAMD64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockAMD64UGT, cmp) + return true + } + // match: (ULT (FlagEQ) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64FlagEQ { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (ULT (FlagLT_ULT) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64FlagLT_ULT { + b.Reset(BlockFirst) + return true + } + // match: (ULT (FlagLT_UGT) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64FlagLT_UGT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (ULT (FlagGT_ULT) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpAMD64FlagGT_ULT { + b.Reset(BlockFirst) + return true + } + // match: (ULT (FlagGT_UGT) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpAMD64FlagGT_UGT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteAMD64latelower.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteAMD64latelower.go new file mode 100644 index 0000000000000000000000000000000000000000..11ecb0b285a22cb6effe8df9188d4374546e48aa --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteAMD64latelower.go @@ -0,0 +1,185 @@ +// Code generated from _gen/AMD64latelower.rules using 'go generate'; DO NOT EDIT. + +package ssa + +import "internal/buildcfg" + +func rewriteValueAMD64latelower(v *Value) bool { + switch v.Op { + case OpAMD64MOVBQZX: + return rewriteValueAMD64latelower_OpAMD64MOVBQZX(v) + case OpAMD64MOVLQZX: + return rewriteValueAMD64latelower_OpAMD64MOVLQZX(v) + case OpAMD64MOVWQZX: + return rewriteValueAMD64latelower_OpAMD64MOVWQZX(v) + case OpAMD64SARL: + return rewriteValueAMD64latelower_OpAMD64SARL(v) + case OpAMD64SARQ: + return rewriteValueAMD64latelower_OpAMD64SARQ(v) + case OpAMD64SHLL: + return rewriteValueAMD64latelower_OpAMD64SHLL(v) + case OpAMD64SHLQ: + return rewriteValueAMD64latelower_OpAMD64SHLQ(v) + case OpAMD64SHRL: + return rewriteValueAMD64latelower_OpAMD64SHRL(v) + case OpAMD64SHRQ: + return rewriteValueAMD64latelower_OpAMD64SHRQ(v) + } + return false +} +func rewriteValueAMD64latelower_OpAMD64MOVBQZX(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVBQZX x) + // cond: zeroUpper56Bits(x,3) + // result: x + for { + x := v_0 + if !(zeroUpper56Bits(x, 3)) { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64latelower_OpAMD64MOVLQZX(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVLQZX x) + // cond: zeroUpper32Bits(x,3) + // result: x + for { + x := v_0 + if !(zeroUpper32Bits(x, 3)) { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64latelower_OpAMD64MOVWQZX(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVWQZX x) + // cond: zeroUpper48Bits(x,3) + // result: x + for { + x := v_0 + if !(zeroUpper48Bits(x, 3)) { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueAMD64latelower_OpAMD64SARL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SARL x y) + // cond: buildcfg.GOAMD64 >= 3 + // result: (SARXL x y) + for { + x := v_0 + y := v_1 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64SARXL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64latelower_OpAMD64SARQ(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SARQ x y) + // cond: buildcfg.GOAMD64 >= 3 + // result: (SARXQ x y) + for { + x := v_0 + y := v_1 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64SARXQ) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64latelower_OpAMD64SHLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SHLL x y) + // cond: buildcfg.GOAMD64 >= 3 + // result: (SHLXL x y) + for { + x := v_0 + y := v_1 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64SHLXL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64latelower_OpAMD64SHLQ(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SHLQ x y) + // cond: buildcfg.GOAMD64 >= 3 + // result: (SHLXQ x y) + for { + x := v_0 + y := v_1 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64SHLXQ) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64latelower_OpAMD64SHRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SHRL x y) + // cond: buildcfg.GOAMD64 >= 3 + // result: (SHRXL x y) + for { + x := v_0 + y := v_1 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64SHRXL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueAMD64latelower_OpAMD64SHRQ(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SHRQ x y) + // cond: buildcfg.GOAMD64 >= 3 + // result: (SHRXQ x y) + for { + x := v_0 + y := v_1 + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64SHRXQ) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteBlockAMD64latelower(b *Block) bool { + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go new file mode 100644 index 0000000000000000000000000000000000000000..0dcb1b460f962732de57ad55b71f1d6e4ce386b8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go @@ -0,0 +1,850 @@ +// Code generated from _gen/AMD64splitload.rules using 'go generate'; DO NOT EDIT. + +package ssa + +func rewriteValueAMD64splitload(v *Value) bool { + switch v.Op { + case OpAMD64CMPBconstload: + return rewriteValueAMD64splitload_OpAMD64CMPBconstload(v) + case OpAMD64CMPBconstloadidx1: + return rewriteValueAMD64splitload_OpAMD64CMPBconstloadidx1(v) + case OpAMD64CMPBload: + return rewriteValueAMD64splitload_OpAMD64CMPBload(v) + case OpAMD64CMPBloadidx1: + return rewriteValueAMD64splitload_OpAMD64CMPBloadidx1(v) + case OpAMD64CMPLconstload: + return rewriteValueAMD64splitload_OpAMD64CMPLconstload(v) + case OpAMD64CMPLconstloadidx1: + return rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx1(v) + case OpAMD64CMPLconstloadidx4: + return rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx4(v) + case OpAMD64CMPLload: + return rewriteValueAMD64splitload_OpAMD64CMPLload(v) + case OpAMD64CMPLloadidx1: + return rewriteValueAMD64splitload_OpAMD64CMPLloadidx1(v) + case OpAMD64CMPLloadidx4: + return rewriteValueAMD64splitload_OpAMD64CMPLloadidx4(v) + case OpAMD64CMPQconstload: + return rewriteValueAMD64splitload_OpAMD64CMPQconstload(v) + case OpAMD64CMPQconstloadidx1: + return rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx1(v) + case OpAMD64CMPQconstloadidx8: + return rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx8(v) + case OpAMD64CMPQload: + return rewriteValueAMD64splitload_OpAMD64CMPQload(v) + case OpAMD64CMPQloadidx1: + return rewriteValueAMD64splitload_OpAMD64CMPQloadidx1(v) + case OpAMD64CMPQloadidx8: + return rewriteValueAMD64splitload_OpAMD64CMPQloadidx8(v) + case OpAMD64CMPWconstload: + return rewriteValueAMD64splitload_OpAMD64CMPWconstload(v) + case OpAMD64CMPWconstloadidx1: + return rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx1(v) + case OpAMD64CMPWconstloadidx2: + return rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx2(v) + case OpAMD64CMPWload: + return rewriteValueAMD64splitload_OpAMD64CMPWload(v) + case OpAMD64CMPWloadidx1: + return rewriteValueAMD64splitload_OpAMD64CMPWloadidx1(v) + case OpAMD64CMPWloadidx2: + return rewriteValueAMD64splitload_OpAMD64CMPWloadidx2(v) + } + return false +} +func rewriteValueAMD64splitload_OpAMD64CMPBconstload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPBconstload {sym} [vo] ptr mem) + // cond: vo.Val() == 0 + // result: (TESTB x:(MOVBload {sym} [vo.Off()] ptr mem) x) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + mem := v_1 + if !(vo.Val() == 0) { + break + } + v.reset(OpAMD64TESTB) + x := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + x.AuxInt = int32ToAuxInt(vo.Off()) + x.Aux = symToAux(sym) + x.AddArg2(ptr, mem) + v.AddArg2(x, x) + return true + } + // match: (CMPBconstload {sym} [vo] ptr mem) + // cond: vo.Val() != 0 + // result: (CMPBconst (MOVBload {sym} [vo.Off()] ptr mem) [vo.Val8()]) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + mem := v_1 + if !(vo.Val() != 0) { + break + } + v.reset(OpAMD64CMPBconst) + v.AuxInt = int8ToAuxInt(vo.Val8()) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(vo.Off()) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64splitload_OpAMD64CMPBconstloadidx1(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPBconstloadidx1 {sym} [vo] ptr idx mem) + // cond: vo.Val() == 0 + // result: (TESTB x:(MOVBloadidx1 {sym} [vo.Off()] ptr idx mem) x) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + mem := v_2 + if !(vo.Val() == 0) { + break + } + v.reset(OpAMD64TESTB) + x := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, typ.UInt8) + x.AuxInt = int32ToAuxInt(vo.Off()) + x.Aux = symToAux(sym) + x.AddArg3(ptr, idx, mem) + v.AddArg2(x, x) + return true + } + // match: (CMPBconstloadidx1 {sym} [vo] ptr idx mem) + // cond: vo.Val() != 0 + // result: (CMPBconst (MOVBloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val8()]) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + mem := v_2 + if !(vo.Val() != 0) { + break + } + v.reset(OpAMD64CMPBconst) + v.AuxInt = int8ToAuxInt(vo.Val8()) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, typ.UInt8) + v0.AuxInt = int32ToAuxInt(vo.Off()) + v0.Aux = symToAux(sym) + v0.AddArg3(ptr, idx, mem) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64splitload_OpAMD64CMPBload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPBload {sym} [off] ptr x mem) + // result: (CMPB (MOVBload {sym} [off] ptr mem) x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + x := v_1 + mem := v_2 + v.reset(OpAMD64CMPB) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueAMD64splitload_OpAMD64CMPBloadidx1(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPBloadidx1 {sym} [off] ptr idx x mem) + // result: (CMPB (MOVBloadidx1 {sym} [off] ptr idx mem) x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + x := v_2 + mem := v_3 + v.reset(OpAMD64CMPB) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, typ.UInt8) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg3(ptr, idx, mem) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueAMD64splitload_OpAMD64CMPLconstload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPLconstload {sym} [vo] ptr mem) + // cond: vo.Val() == 0 + // result: (TESTL x:(MOVLload {sym} [vo.Off()] ptr mem) x) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + mem := v_1 + if !(vo.Val() == 0) { + break + } + v.reset(OpAMD64TESTL) + x := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + x.AuxInt = int32ToAuxInt(vo.Off()) + x.Aux = symToAux(sym) + x.AddArg2(ptr, mem) + v.AddArg2(x, x) + return true + } + // match: (CMPLconstload {sym} [vo] ptr mem) + // cond: vo.Val() != 0 + // result: (CMPLconst (MOVLload {sym} [vo.Off()] ptr mem) [vo.Val()]) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + mem := v_1 + if !(vo.Val() != 0) { + break + } + v.reset(OpAMD64CMPLconst) + v.AuxInt = int32ToAuxInt(vo.Val()) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(vo.Off()) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx1(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPLconstloadidx1 {sym} [vo] ptr idx mem) + // cond: vo.Val() == 0 + // result: (TESTL x:(MOVLloadidx1 {sym} [vo.Off()] ptr idx mem) x) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + mem := v_2 + if !(vo.Val() == 0) { + break + } + v.reset(OpAMD64TESTL) + x := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) + x.AuxInt = int32ToAuxInt(vo.Off()) + x.Aux = symToAux(sym) + x.AddArg3(ptr, idx, mem) + v.AddArg2(x, x) + return true + } + // match: (CMPLconstloadidx1 {sym} [vo] ptr idx mem) + // cond: vo.Val() != 0 + // result: (CMPLconst (MOVLloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val()]) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + mem := v_2 + if !(vo.Val() != 0) { + break + } + v.reset(OpAMD64CMPLconst) + v.AuxInt = int32ToAuxInt(vo.Val()) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) + v0.AuxInt = int32ToAuxInt(vo.Off()) + v0.Aux = symToAux(sym) + v0.AddArg3(ptr, idx, mem) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPLconstloadidx4 {sym} [vo] ptr idx mem) + // cond: vo.Val() == 0 + // result: (TESTL x:(MOVLloadidx4 {sym} [vo.Off()] ptr idx mem) x) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + mem := v_2 + if !(vo.Val() == 0) { + break + } + v.reset(OpAMD64TESTL) + x := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, typ.UInt32) + x.AuxInt = int32ToAuxInt(vo.Off()) + x.Aux = symToAux(sym) + x.AddArg3(ptr, idx, mem) + v.AddArg2(x, x) + return true + } + // match: (CMPLconstloadidx4 {sym} [vo] ptr idx mem) + // cond: vo.Val() != 0 + // result: (CMPLconst (MOVLloadidx4 {sym} [vo.Off()] ptr idx mem) [vo.Val()]) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + mem := v_2 + if !(vo.Val() != 0) { + break + } + v.reset(OpAMD64CMPLconst) + v.AuxInt = int32ToAuxInt(vo.Val()) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, typ.UInt32) + v0.AuxInt = int32ToAuxInt(vo.Off()) + v0.Aux = symToAux(sym) + v0.AddArg3(ptr, idx, mem) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64splitload_OpAMD64CMPLload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPLload {sym} [off] ptr x mem) + // result: (CMPL (MOVLload {sym} [off] ptr mem) x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + x := v_1 + mem := v_2 + v.reset(OpAMD64CMPL) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueAMD64splitload_OpAMD64CMPLloadidx1(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPLloadidx1 {sym} [off] ptr idx x mem) + // result: (CMPL (MOVLloadidx1 {sym} [off] ptr idx mem) x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + x := v_2 + mem := v_3 + v.reset(OpAMD64CMPL) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg3(ptr, idx, mem) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueAMD64splitload_OpAMD64CMPLloadidx4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPLloadidx4 {sym} [off] ptr idx x mem) + // result: (CMPL (MOVLloadidx4 {sym} [off] ptr idx mem) x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + x := v_2 + mem := v_3 + v.reset(OpAMD64CMPL) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, typ.UInt32) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg3(ptr, idx, mem) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueAMD64splitload_OpAMD64CMPQconstload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPQconstload {sym} [vo] ptr mem) + // cond: vo.Val() == 0 + // result: (TESTQ x:(MOVQload {sym} [vo.Off()] ptr mem) x) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + mem := v_1 + if !(vo.Val() == 0) { + break + } + v.reset(OpAMD64TESTQ) + x := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + x.AuxInt = int32ToAuxInt(vo.Off()) + x.Aux = symToAux(sym) + x.AddArg2(ptr, mem) + v.AddArg2(x, x) + return true + } + // match: (CMPQconstload {sym} [vo] ptr mem) + // cond: vo.Val() != 0 + // result: (CMPQconst (MOVQload {sym} [vo.Off()] ptr mem) [vo.Val()]) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + mem := v_1 + if !(vo.Val() != 0) { + break + } + v.reset(OpAMD64CMPQconst) + v.AuxInt = int32ToAuxInt(vo.Val()) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(vo.Off()) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx1(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPQconstloadidx1 {sym} [vo] ptr idx mem) + // cond: vo.Val() == 0 + // result: (TESTQ x:(MOVQloadidx1 {sym} [vo.Off()] ptr idx mem) x) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + mem := v_2 + if !(vo.Val() == 0) { + break + } + v.reset(OpAMD64TESTQ) + x := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) + x.AuxInt = int32ToAuxInt(vo.Off()) + x.Aux = symToAux(sym) + x.AddArg3(ptr, idx, mem) + v.AddArg2(x, x) + return true + } + // match: (CMPQconstloadidx1 {sym} [vo] ptr idx mem) + // cond: vo.Val() != 0 + // result: (CMPQconst (MOVQloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val()]) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + mem := v_2 + if !(vo.Val() != 0) { + break + } + v.reset(OpAMD64CMPQconst) + v.AuxInt = int32ToAuxInt(vo.Val()) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) + v0.AuxInt = int32ToAuxInt(vo.Off()) + v0.Aux = symToAux(sym) + v0.AddArg3(ptr, idx, mem) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPQconstloadidx8 {sym} [vo] ptr idx mem) + // cond: vo.Val() == 0 + // result: (TESTQ x:(MOVQloadidx8 {sym} [vo.Off()] ptr idx mem) x) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + mem := v_2 + if !(vo.Val() == 0) { + break + } + v.reset(OpAMD64TESTQ) + x := b.NewValue0(v.Pos, OpAMD64MOVQloadidx8, typ.UInt64) + x.AuxInt = int32ToAuxInt(vo.Off()) + x.Aux = symToAux(sym) + x.AddArg3(ptr, idx, mem) + v.AddArg2(x, x) + return true + } + // match: (CMPQconstloadidx8 {sym} [vo] ptr idx mem) + // cond: vo.Val() != 0 + // result: (CMPQconst (MOVQloadidx8 {sym} [vo.Off()] ptr idx mem) [vo.Val()]) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + mem := v_2 + if !(vo.Val() != 0) { + break + } + v.reset(OpAMD64CMPQconst) + v.AuxInt = int32ToAuxInt(vo.Val()) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx8, typ.UInt64) + v0.AuxInt = int32ToAuxInt(vo.Off()) + v0.Aux = symToAux(sym) + v0.AddArg3(ptr, idx, mem) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64splitload_OpAMD64CMPQload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPQload {sym} [off] ptr x mem) + // result: (CMPQ (MOVQload {sym} [off] ptr mem) x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + x := v_1 + mem := v_2 + v.reset(OpAMD64CMPQ) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueAMD64splitload_OpAMD64CMPQloadidx1(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPQloadidx1 {sym} [off] ptr idx x mem) + // result: (CMPQ (MOVQloadidx1 {sym} [off] ptr idx mem) x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + x := v_2 + mem := v_3 + v.reset(OpAMD64CMPQ) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg3(ptr, idx, mem) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueAMD64splitload_OpAMD64CMPQloadidx8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPQloadidx8 {sym} [off] ptr idx x mem) + // result: (CMPQ (MOVQloadidx8 {sym} [off] ptr idx mem) x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + x := v_2 + mem := v_3 + v.reset(OpAMD64CMPQ) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx8, typ.UInt64) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg3(ptr, idx, mem) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueAMD64splitload_OpAMD64CMPWconstload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPWconstload {sym} [vo] ptr mem) + // cond: vo.Val() == 0 + // result: (TESTW x:(MOVWload {sym} [vo.Off()] ptr mem) x) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + mem := v_1 + if !(vo.Val() == 0) { + break + } + v.reset(OpAMD64TESTW) + x := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + x.AuxInt = int32ToAuxInt(vo.Off()) + x.Aux = symToAux(sym) + x.AddArg2(ptr, mem) + v.AddArg2(x, x) + return true + } + // match: (CMPWconstload {sym} [vo] ptr mem) + // cond: vo.Val() != 0 + // result: (CMPWconst (MOVWload {sym} [vo.Off()] ptr mem) [vo.Val16()]) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + mem := v_1 + if !(vo.Val() != 0) { + break + } + v.reset(OpAMD64CMPWconst) + v.AuxInt = int16ToAuxInt(vo.Val16()) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(vo.Off()) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx1(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPWconstloadidx1 {sym} [vo] ptr idx mem) + // cond: vo.Val() == 0 + // result: (TESTW x:(MOVWloadidx1 {sym} [vo.Off()] ptr idx mem) x) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + mem := v_2 + if !(vo.Val() == 0) { + break + } + v.reset(OpAMD64TESTW) + x := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) + x.AuxInt = int32ToAuxInt(vo.Off()) + x.Aux = symToAux(sym) + x.AddArg3(ptr, idx, mem) + v.AddArg2(x, x) + return true + } + // match: (CMPWconstloadidx1 {sym} [vo] ptr idx mem) + // cond: vo.Val() != 0 + // result: (CMPWconst (MOVWloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val16()]) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + mem := v_2 + if !(vo.Val() != 0) { + break + } + v.reset(OpAMD64CMPWconst) + v.AuxInt = int16ToAuxInt(vo.Val16()) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) + v0.AuxInt = int32ToAuxInt(vo.Off()) + v0.Aux = symToAux(sym) + v0.AddArg3(ptr, idx, mem) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPWconstloadidx2 {sym} [vo] ptr idx mem) + // cond: vo.Val() == 0 + // result: (TESTW x:(MOVWloadidx2 {sym} [vo.Off()] ptr idx mem) x) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + mem := v_2 + if !(vo.Val() == 0) { + break + } + v.reset(OpAMD64TESTW) + x := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, typ.UInt16) + x.AuxInt = int32ToAuxInt(vo.Off()) + x.Aux = symToAux(sym) + x.AddArg3(ptr, idx, mem) + v.AddArg2(x, x) + return true + } + // match: (CMPWconstloadidx2 {sym} [vo] ptr idx mem) + // cond: vo.Val() != 0 + // result: (CMPWconst (MOVWloadidx2 {sym} [vo.Off()] ptr idx mem) [vo.Val16()]) + for { + vo := auxIntToValAndOff(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + mem := v_2 + if !(vo.Val() != 0) { + break + } + v.reset(OpAMD64CMPWconst) + v.AuxInt = int16ToAuxInt(vo.Val16()) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, typ.UInt16) + v0.AuxInt = int32ToAuxInt(vo.Off()) + v0.Aux = symToAux(sym) + v0.AddArg3(ptr, idx, mem) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64splitload_OpAMD64CMPWload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPWload {sym} [off] ptr x mem) + // result: (CMPW (MOVWload {sym} [off] ptr mem) x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + x := v_1 + mem := v_2 + v.reset(OpAMD64CMPW) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueAMD64splitload_OpAMD64CMPWloadidx1(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPWloadidx1 {sym} [off] ptr idx x mem) + // result: (CMPW (MOVWloadidx1 {sym} [off] ptr idx mem) x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + x := v_2 + mem := v_3 + v.reset(OpAMD64CMPW) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg3(ptr, idx, mem) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueAMD64splitload_OpAMD64CMPWloadidx2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPWloadidx2 {sym} [off] ptr idx x mem) + // result: (CMPW (MOVWloadidx2 {sym} [off] ptr idx mem) x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + x := v_2 + mem := v_3 + v.reset(OpAMD64CMPW) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, typ.UInt16) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg3(ptr, idx, mem) + v.AddArg2(v0, x) + return true + } +} +func rewriteBlockAMD64splitload(b *Block) bool { + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteARM.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteARM.go new file mode 100644 index 0000000000000000000000000000000000000000..971c9a5d552acaf1b1af4931fffb57fa9dde8313 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteARM.go @@ -0,0 +1,21838 @@ +// Code generated from _gen/ARM.rules using 'go generate'; DO NOT EDIT. + +package ssa + +import "internal/buildcfg" +import "cmd/compile/internal/types" + +func rewriteValueARM(v *Value) bool { + switch v.Op { + case OpARMADC: + return rewriteValueARM_OpARMADC(v) + case OpARMADCconst: + return rewriteValueARM_OpARMADCconst(v) + case OpARMADCshiftLL: + return rewriteValueARM_OpARMADCshiftLL(v) + case OpARMADCshiftLLreg: + return rewriteValueARM_OpARMADCshiftLLreg(v) + case OpARMADCshiftRA: + return rewriteValueARM_OpARMADCshiftRA(v) + case OpARMADCshiftRAreg: + return rewriteValueARM_OpARMADCshiftRAreg(v) + case OpARMADCshiftRL: + return rewriteValueARM_OpARMADCshiftRL(v) + case OpARMADCshiftRLreg: + return rewriteValueARM_OpARMADCshiftRLreg(v) + case OpARMADD: + return rewriteValueARM_OpARMADD(v) + case OpARMADDD: + return rewriteValueARM_OpARMADDD(v) + case OpARMADDF: + return rewriteValueARM_OpARMADDF(v) + case OpARMADDS: + return rewriteValueARM_OpARMADDS(v) + case OpARMADDSshiftLL: + return rewriteValueARM_OpARMADDSshiftLL(v) + case OpARMADDSshiftLLreg: + return rewriteValueARM_OpARMADDSshiftLLreg(v) + case OpARMADDSshiftRA: + return rewriteValueARM_OpARMADDSshiftRA(v) + case OpARMADDSshiftRAreg: + return rewriteValueARM_OpARMADDSshiftRAreg(v) + case OpARMADDSshiftRL: + return rewriteValueARM_OpARMADDSshiftRL(v) + case OpARMADDSshiftRLreg: + return rewriteValueARM_OpARMADDSshiftRLreg(v) + case OpARMADDconst: + return rewriteValueARM_OpARMADDconst(v) + case OpARMADDshiftLL: + return rewriteValueARM_OpARMADDshiftLL(v) + case OpARMADDshiftLLreg: + return rewriteValueARM_OpARMADDshiftLLreg(v) + case OpARMADDshiftRA: + return rewriteValueARM_OpARMADDshiftRA(v) + case OpARMADDshiftRAreg: + return rewriteValueARM_OpARMADDshiftRAreg(v) + case OpARMADDshiftRL: + return rewriteValueARM_OpARMADDshiftRL(v) + case OpARMADDshiftRLreg: + return rewriteValueARM_OpARMADDshiftRLreg(v) + case OpARMAND: + return rewriteValueARM_OpARMAND(v) + case OpARMANDconst: + return rewriteValueARM_OpARMANDconst(v) + case OpARMANDshiftLL: + return rewriteValueARM_OpARMANDshiftLL(v) + case OpARMANDshiftLLreg: + return rewriteValueARM_OpARMANDshiftLLreg(v) + case OpARMANDshiftRA: + return rewriteValueARM_OpARMANDshiftRA(v) + case OpARMANDshiftRAreg: + return rewriteValueARM_OpARMANDshiftRAreg(v) + case OpARMANDshiftRL: + return rewriteValueARM_OpARMANDshiftRL(v) + case OpARMANDshiftRLreg: + return rewriteValueARM_OpARMANDshiftRLreg(v) + case OpARMBFX: + return rewriteValueARM_OpARMBFX(v) + case OpARMBFXU: + return rewriteValueARM_OpARMBFXU(v) + case OpARMBIC: + return rewriteValueARM_OpARMBIC(v) + case OpARMBICconst: + return rewriteValueARM_OpARMBICconst(v) + case OpARMBICshiftLL: + return rewriteValueARM_OpARMBICshiftLL(v) + case OpARMBICshiftLLreg: + return rewriteValueARM_OpARMBICshiftLLreg(v) + case OpARMBICshiftRA: + return rewriteValueARM_OpARMBICshiftRA(v) + case OpARMBICshiftRAreg: + return rewriteValueARM_OpARMBICshiftRAreg(v) + case OpARMBICshiftRL: + return rewriteValueARM_OpARMBICshiftRL(v) + case OpARMBICshiftRLreg: + return rewriteValueARM_OpARMBICshiftRLreg(v) + case OpARMCMN: + return rewriteValueARM_OpARMCMN(v) + case OpARMCMNconst: + return rewriteValueARM_OpARMCMNconst(v) + case OpARMCMNshiftLL: + return rewriteValueARM_OpARMCMNshiftLL(v) + case OpARMCMNshiftLLreg: + return rewriteValueARM_OpARMCMNshiftLLreg(v) + case OpARMCMNshiftRA: + return rewriteValueARM_OpARMCMNshiftRA(v) + case OpARMCMNshiftRAreg: + return rewriteValueARM_OpARMCMNshiftRAreg(v) + case OpARMCMNshiftRL: + return rewriteValueARM_OpARMCMNshiftRL(v) + case OpARMCMNshiftRLreg: + return rewriteValueARM_OpARMCMNshiftRLreg(v) + case OpARMCMOVWHSconst: + return rewriteValueARM_OpARMCMOVWHSconst(v) + case OpARMCMOVWLSconst: + return rewriteValueARM_OpARMCMOVWLSconst(v) + case OpARMCMP: + return rewriteValueARM_OpARMCMP(v) + case OpARMCMPD: + return rewriteValueARM_OpARMCMPD(v) + case OpARMCMPF: + return rewriteValueARM_OpARMCMPF(v) + case OpARMCMPconst: + return rewriteValueARM_OpARMCMPconst(v) + case OpARMCMPshiftLL: + return rewriteValueARM_OpARMCMPshiftLL(v) + case OpARMCMPshiftLLreg: + return rewriteValueARM_OpARMCMPshiftLLreg(v) + case OpARMCMPshiftRA: + return rewriteValueARM_OpARMCMPshiftRA(v) + case OpARMCMPshiftRAreg: + return rewriteValueARM_OpARMCMPshiftRAreg(v) + case OpARMCMPshiftRL: + return rewriteValueARM_OpARMCMPshiftRL(v) + case OpARMCMPshiftRLreg: + return rewriteValueARM_OpARMCMPshiftRLreg(v) + case OpARMEqual: + return rewriteValueARM_OpARMEqual(v) + case OpARMGreaterEqual: + return rewriteValueARM_OpARMGreaterEqual(v) + case OpARMGreaterEqualU: + return rewriteValueARM_OpARMGreaterEqualU(v) + case OpARMGreaterThan: + return rewriteValueARM_OpARMGreaterThan(v) + case OpARMGreaterThanU: + return rewriteValueARM_OpARMGreaterThanU(v) + case OpARMLessEqual: + return rewriteValueARM_OpARMLessEqual(v) + case OpARMLessEqualU: + return rewriteValueARM_OpARMLessEqualU(v) + case OpARMLessThan: + return rewriteValueARM_OpARMLessThan(v) + case OpARMLessThanU: + return rewriteValueARM_OpARMLessThanU(v) + case OpARMMOVBUload: + return rewriteValueARM_OpARMMOVBUload(v) + case OpARMMOVBUloadidx: + return rewriteValueARM_OpARMMOVBUloadidx(v) + case OpARMMOVBUreg: + return rewriteValueARM_OpARMMOVBUreg(v) + case OpARMMOVBload: + return rewriteValueARM_OpARMMOVBload(v) + case OpARMMOVBloadidx: + return rewriteValueARM_OpARMMOVBloadidx(v) + case OpARMMOVBreg: + return rewriteValueARM_OpARMMOVBreg(v) + case OpARMMOVBstore: + return rewriteValueARM_OpARMMOVBstore(v) + case OpARMMOVBstoreidx: + return rewriteValueARM_OpARMMOVBstoreidx(v) + case OpARMMOVDload: + return rewriteValueARM_OpARMMOVDload(v) + case OpARMMOVDstore: + return rewriteValueARM_OpARMMOVDstore(v) + case OpARMMOVFload: + return rewriteValueARM_OpARMMOVFload(v) + case OpARMMOVFstore: + return rewriteValueARM_OpARMMOVFstore(v) + case OpARMMOVHUload: + return rewriteValueARM_OpARMMOVHUload(v) + case OpARMMOVHUloadidx: + return rewriteValueARM_OpARMMOVHUloadidx(v) + case OpARMMOVHUreg: + return rewriteValueARM_OpARMMOVHUreg(v) + case OpARMMOVHload: + return rewriteValueARM_OpARMMOVHload(v) + case OpARMMOVHloadidx: + return rewriteValueARM_OpARMMOVHloadidx(v) + case OpARMMOVHreg: + return rewriteValueARM_OpARMMOVHreg(v) + case OpARMMOVHstore: + return rewriteValueARM_OpARMMOVHstore(v) + case OpARMMOVHstoreidx: + return rewriteValueARM_OpARMMOVHstoreidx(v) + case OpARMMOVWload: + return rewriteValueARM_OpARMMOVWload(v) + case OpARMMOVWloadidx: + return rewriteValueARM_OpARMMOVWloadidx(v) + case OpARMMOVWloadshiftLL: + return rewriteValueARM_OpARMMOVWloadshiftLL(v) + case OpARMMOVWloadshiftRA: + return rewriteValueARM_OpARMMOVWloadshiftRA(v) + case OpARMMOVWloadshiftRL: + return rewriteValueARM_OpARMMOVWloadshiftRL(v) + case OpARMMOVWnop: + return rewriteValueARM_OpARMMOVWnop(v) + case OpARMMOVWreg: + return rewriteValueARM_OpARMMOVWreg(v) + case OpARMMOVWstore: + return rewriteValueARM_OpARMMOVWstore(v) + case OpARMMOVWstoreidx: + return rewriteValueARM_OpARMMOVWstoreidx(v) + case OpARMMOVWstoreshiftLL: + return rewriteValueARM_OpARMMOVWstoreshiftLL(v) + case OpARMMOVWstoreshiftRA: + return rewriteValueARM_OpARMMOVWstoreshiftRA(v) + case OpARMMOVWstoreshiftRL: + return rewriteValueARM_OpARMMOVWstoreshiftRL(v) + case OpARMMUL: + return rewriteValueARM_OpARMMUL(v) + case OpARMMULA: + return rewriteValueARM_OpARMMULA(v) + case OpARMMULD: + return rewriteValueARM_OpARMMULD(v) + case OpARMMULF: + return rewriteValueARM_OpARMMULF(v) + case OpARMMULS: + return rewriteValueARM_OpARMMULS(v) + case OpARMMVN: + return rewriteValueARM_OpARMMVN(v) + case OpARMMVNshiftLL: + return rewriteValueARM_OpARMMVNshiftLL(v) + case OpARMMVNshiftLLreg: + return rewriteValueARM_OpARMMVNshiftLLreg(v) + case OpARMMVNshiftRA: + return rewriteValueARM_OpARMMVNshiftRA(v) + case OpARMMVNshiftRAreg: + return rewriteValueARM_OpARMMVNshiftRAreg(v) + case OpARMMVNshiftRL: + return rewriteValueARM_OpARMMVNshiftRL(v) + case OpARMMVNshiftRLreg: + return rewriteValueARM_OpARMMVNshiftRLreg(v) + case OpARMNEGD: + return rewriteValueARM_OpARMNEGD(v) + case OpARMNEGF: + return rewriteValueARM_OpARMNEGF(v) + case OpARMNMULD: + return rewriteValueARM_OpARMNMULD(v) + case OpARMNMULF: + return rewriteValueARM_OpARMNMULF(v) + case OpARMNotEqual: + return rewriteValueARM_OpARMNotEqual(v) + case OpARMOR: + return rewriteValueARM_OpARMOR(v) + case OpARMORconst: + return rewriteValueARM_OpARMORconst(v) + case OpARMORshiftLL: + return rewriteValueARM_OpARMORshiftLL(v) + case OpARMORshiftLLreg: + return rewriteValueARM_OpARMORshiftLLreg(v) + case OpARMORshiftRA: + return rewriteValueARM_OpARMORshiftRA(v) + case OpARMORshiftRAreg: + return rewriteValueARM_OpARMORshiftRAreg(v) + case OpARMORshiftRL: + return rewriteValueARM_OpARMORshiftRL(v) + case OpARMORshiftRLreg: + return rewriteValueARM_OpARMORshiftRLreg(v) + case OpARMRSB: + return rewriteValueARM_OpARMRSB(v) + case OpARMRSBSshiftLL: + return rewriteValueARM_OpARMRSBSshiftLL(v) + case OpARMRSBSshiftLLreg: + return rewriteValueARM_OpARMRSBSshiftLLreg(v) + case OpARMRSBSshiftRA: + return rewriteValueARM_OpARMRSBSshiftRA(v) + case OpARMRSBSshiftRAreg: + return rewriteValueARM_OpARMRSBSshiftRAreg(v) + case OpARMRSBSshiftRL: + return rewriteValueARM_OpARMRSBSshiftRL(v) + case OpARMRSBSshiftRLreg: + return rewriteValueARM_OpARMRSBSshiftRLreg(v) + case OpARMRSBconst: + return rewriteValueARM_OpARMRSBconst(v) + case OpARMRSBshiftLL: + return rewriteValueARM_OpARMRSBshiftLL(v) + case OpARMRSBshiftLLreg: + return rewriteValueARM_OpARMRSBshiftLLreg(v) + case OpARMRSBshiftRA: + return rewriteValueARM_OpARMRSBshiftRA(v) + case OpARMRSBshiftRAreg: + return rewriteValueARM_OpARMRSBshiftRAreg(v) + case OpARMRSBshiftRL: + return rewriteValueARM_OpARMRSBshiftRL(v) + case OpARMRSBshiftRLreg: + return rewriteValueARM_OpARMRSBshiftRLreg(v) + case OpARMRSCconst: + return rewriteValueARM_OpARMRSCconst(v) + case OpARMRSCshiftLL: + return rewriteValueARM_OpARMRSCshiftLL(v) + case OpARMRSCshiftLLreg: + return rewriteValueARM_OpARMRSCshiftLLreg(v) + case OpARMRSCshiftRA: + return rewriteValueARM_OpARMRSCshiftRA(v) + case OpARMRSCshiftRAreg: + return rewriteValueARM_OpARMRSCshiftRAreg(v) + case OpARMRSCshiftRL: + return rewriteValueARM_OpARMRSCshiftRL(v) + case OpARMRSCshiftRLreg: + return rewriteValueARM_OpARMRSCshiftRLreg(v) + case OpARMSBC: + return rewriteValueARM_OpARMSBC(v) + case OpARMSBCconst: + return rewriteValueARM_OpARMSBCconst(v) + case OpARMSBCshiftLL: + return rewriteValueARM_OpARMSBCshiftLL(v) + case OpARMSBCshiftLLreg: + return rewriteValueARM_OpARMSBCshiftLLreg(v) + case OpARMSBCshiftRA: + return rewriteValueARM_OpARMSBCshiftRA(v) + case OpARMSBCshiftRAreg: + return rewriteValueARM_OpARMSBCshiftRAreg(v) + case OpARMSBCshiftRL: + return rewriteValueARM_OpARMSBCshiftRL(v) + case OpARMSBCshiftRLreg: + return rewriteValueARM_OpARMSBCshiftRLreg(v) + case OpARMSLL: + return rewriteValueARM_OpARMSLL(v) + case OpARMSLLconst: + return rewriteValueARM_OpARMSLLconst(v) + case OpARMSRA: + return rewriteValueARM_OpARMSRA(v) + case OpARMSRAcond: + return rewriteValueARM_OpARMSRAcond(v) + case OpARMSRAconst: + return rewriteValueARM_OpARMSRAconst(v) + case OpARMSRL: + return rewriteValueARM_OpARMSRL(v) + case OpARMSRLconst: + return rewriteValueARM_OpARMSRLconst(v) + case OpARMSRR: + return rewriteValueARM_OpARMSRR(v) + case OpARMSUB: + return rewriteValueARM_OpARMSUB(v) + case OpARMSUBD: + return rewriteValueARM_OpARMSUBD(v) + case OpARMSUBF: + return rewriteValueARM_OpARMSUBF(v) + case OpARMSUBS: + return rewriteValueARM_OpARMSUBS(v) + case OpARMSUBSshiftLL: + return rewriteValueARM_OpARMSUBSshiftLL(v) + case OpARMSUBSshiftLLreg: + return rewriteValueARM_OpARMSUBSshiftLLreg(v) + case OpARMSUBSshiftRA: + return rewriteValueARM_OpARMSUBSshiftRA(v) + case OpARMSUBSshiftRAreg: + return rewriteValueARM_OpARMSUBSshiftRAreg(v) + case OpARMSUBSshiftRL: + return rewriteValueARM_OpARMSUBSshiftRL(v) + case OpARMSUBSshiftRLreg: + return rewriteValueARM_OpARMSUBSshiftRLreg(v) + case OpARMSUBconst: + return rewriteValueARM_OpARMSUBconst(v) + case OpARMSUBshiftLL: + return rewriteValueARM_OpARMSUBshiftLL(v) + case OpARMSUBshiftLLreg: + return rewriteValueARM_OpARMSUBshiftLLreg(v) + case OpARMSUBshiftRA: + return rewriteValueARM_OpARMSUBshiftRA(v) + case OpARMSUBshiftRAreg: + return rewriteValueARM_OpARMSUBshiftRAreg(v) + case OpARMSUBshiftRL: + return rewriteValueARM_OpARMSUBshiftRL(v) + case OpARMSUBshiftRLreg: + return rewriteValueARM_OpARMSUBshiftRLreg(v) + case OpARMTEQ: + return rewriteValueARM_OpARMTEQ(v) + case OpARMTEQconst: + return rewriteValueARM_OpARMTEQconst(v) + case OpARMTEQshiftLL: + return rewriteValueARM_OpARMTEQshiftLL(v) + case OpARMTEQshiftLLreg: + return rewriteValueARM_OpARMTEQshiftLLreg(v) + case OpARMTEQshiftRA: + return rewriteValueARM_OpARMTEQshiftRA(v) + case OpARMTEQshiftRAreg: + return rewriteValueARM_OpARMTEQshiftRAreg(v) + case OpARMTEQshiftRL: + return rewriteValueARM_OpARMTEQshiftRL(v) + case OpARMTEQshiftRLreg: + return rewriteValueARM_OpARMTEQshiftRLreg(v) + case OpARMTST: + return rewriteValueARM_OpARMTST(v) + case OpARMTSTconst: + return rewriteValueARM_OpARMTSTconst(v) + case OpARMTSTshiftLL: + return rewriteValueARM_OpARMTSTshiftLL(v) + case OpARMTSTshiftLLreg: + return rewriteValueARM_OpARMTSTshiftLLreg(v) + case OpARMTSTshiftRA: + return rewriteValueARM_OpARMTSTshiftRA(v) + case OpARMTSTshiftRAreg: + return rewriteValueARM_OpARMTSTshiftRAreg(v) + case OpARMTSTshiftRL: + return rewriteValueARM_OpARMTSTshiftRL(v) + case OpARMTSTshiftRLreg: + return rewriteValueARM_OpARMTSTshiftRLreg(v) + case OpARMXOR: + return rewriteValueARM_OpARMXOR(v) + case OpARMXORconst: + return rewriteValueARM_OpARMXORconst(v) + case OpARMXORshiftLL: + return rewriteValueARM_OpARMXORshiftLL(v) + case OpARMXORshiftLLreg: + return rewriteValueARM_OpARMXORshiftLLreg(v) + case OpARMXORshiftRA: + return rewriteValueARM_OpARMXORshiftRA(v) + case OpARMXORshiftRAreg: + return rewriteValueARM_OpARMXORshiftRAreg(v) + case OpARMXORshiftRL: + return rewriteValueARM_OpARMXORshiftRL(v) + case OpARMXORshiftRLreg: + return rewriteValueARM_OpARMXORshiftRLreg(v) + case OpARMXORshiftRR: + return rewriteValueARM_OpARMXORshiftRR(v) + case OpAbs: + v.Op = OpARMABSD + return true + case OpAdd16: + v.Op = OpARMADD + return true + case OpAdd32: + v.Op = OpARMADD + return true + case OpAdd32F: + v.Op = OpARMADDF + return true + case OpAdd32carry: + v.Op = OpARMADDS + return true + case OpAdd32withcarry: + v.Op = OpARMADC + return true + case OpAdd64F: + v.Op = OpARMADDD + return true + case OpAdd8: + v.Op = OpARMADD + return true + case OpAddPtr: + v.Op = OpARMADD + return true + case OpAddr: + return rewriteValueARM_OpAddr(v) + case OpAnd16: + v.Op = OpARMAND + return true + case OpAnd32: + v.Op = OpARMAND + return true + case OpAnd8: + v.Op = OpARMAND + return true + case OpAndB: + v.Op = OpARMAND + return true + case OpAvg32u: + return rewriteValueARM_OpAvg32u(v) + case OpBitLen32: + return rewriteValueARM_OpBitLen32(v) + case OpBswap32: + return rewriteValueARM_OpBswap32(v) + case OpClosureCall: + v.Op = OpARMCALLclosure + return true + case OpCom16: + v.Op = OpARMMVN + return true + case OpCom32: + v.Op = OpARMMVN + return true + case OpCom8: + v.Op = OpARMMVN + return true + case OpConst16: + return rewriteValueARM_OpConst16(v) + case OpConst32: + return rewriteValueARM_OpConst32(v) + case OpConst32F: + return rewriteValueARM_OpConst32F(v) + case OpConst64F: + return rewriteValueARM_OpConst64F(v) + case OpConst8: + return rewriteValueARM_OpConst8(v) + case OpConstBool: + return rewriteValueARM_OpConstBool(v) + case OpConstNil: + return rewriteValueARM_OpConstNil(v) + case OpCtz16: + return rewriteValueARM_OpCtz16(v) + case OpCtz16NonZero: + v.Op = OpCtz32 + return true + case OpCtz32: + return rewriteValueARM_OpCtz32(v) + case OpCtz32NonZero: + v.Op = OpCtz32 + return true + case OpCtz8: + return rewriteValueARM_OpCtz8(v) + case OpCtz8NonZero: + v.Op = OpCtz32 + return true + case OpCvt32Fto32: + v.Op = OpARMMOVFW + return true + case OpCvt32Fto32U: + v.Op = OpARMMOVFWU + return true + case OpCvt32Fto64F: + v.Op = OpARMMOVFD + return true + case OpCvt32Uto32F: + v.Op = OpARMMOVWUF + return true + case OpCvt32Uto64F: + v.Op = OpARMMOVWUD + return true + case OpCvt32to32F: + v.Op = OpARMMOVWF + return true + case OpCvt32to64F: + v.Op = OpARMMOVWD + return true + case OpCvt64Fto32: + v.Op = OpARMMOVDW + return true + case OpCvt64Fto32F: + v.Op = OpARMMOVDF + return true + case OpCvt64Fto32U: + v.Op = OpARMMOVDWU + return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true + case OpDiv16: + return rewriteValueARM_OpDiv16(v) + case OpDiv16u: + return rewriteValueARM_OpDiv16u(v) + case OpDiv32: + return rewriteValueARM_OpDiv32(v) + case OpDiv32F: + v.Op = OpARMDIVF + return true + case OpDiv32u: + return rewriteValueARM_OpDiv32u(v) + case OpDiv64F: + v.Op = OpARMDIVD + return true + case OpDiv8: + return rewriteValueARM_OpDiv8(v) + case OpDiv8u: + return rewriteValueARM_OpDiv8u(v) + case OpEq16: + return rewriteValueARM_OpEq16(v) + case OpEq32: + return rewriteValueARM_OpEq32(v) + case OpEq32F: + return rewriteValueARM_OpEq32F(v) + case OpEq64F: + return rewriteValueARM_OpEq64F(v) + case OpEq8: + return rewriteValueARM_OpEq8(v) + case OpEqB: + return rewriteValueARM_OpEqB(v) + case OpEqPtr: + return rewriteValueARM_OpEqPtr(v) + case OpFMA: + return rewriteValueARM_OpFMA(v) + case OpGetCallerPC: + v.Op = OpARMLoweredGetCallerPC + return true + case OpGetCallerSP: + v.Op = OpARMLoweredGetCallerSP + return true + case OpGetClosurePtr: + v.Op = OpARMLoweredGetClosurePtr + return true + case OpHmul32: + v.Op = OpARMHMUL + return true + case OpHmul32u: + v.Op = OpARMHMULU + return true + case OpInterCall: + v.Op = OpARMCALLinter + return true + case OpIsInBounds: + return rewriteValueARM_OpIsInBounds(v) + case OpIsNonNil: + return rewriteValueARM_OpIsNonNil(v) + case OpIsSliceInBounds: + return rewriteValueARM_OpIsSliceInBounds(v) + case OpLeq16: + return rewriteValueARM_OpLeq16(v) + case OpLeq16U: + return rewriteValueARM_OpLeq16U(v) + case OpLeq32: + return rewriteValueARM_OpLeq32(v) + case OpLeq32F: + return rewriteValueARM_OpLeq32F(v) + case OpLeq32U: + return rewriteValueARM_OpLeq32U(v) + case OpLeq64F: + return rewriteValueARM_OpLeq64F(v) + case OpLeq8: + return rewriteValueARM_OpLeq8(v) + case OpLeq8U: + return rewriteValueARM_OpLeq8U(v) + case OpLess16: + return rewriteValueARM_OpLess16(v) + case OpLess16U: + return rewriteValueARM_OpLess16U(v) + case OpLess32: + return rewriteValueARM_OpLess32(v) + case OpLess32F: + return rewriteValueARM_OpLess32F(v) + case OpLess32U: + return rewriteValueARM_OpLess32U(v) + case OpLess64F: + return rewriteValueARM_OpLess64F(v) + case OpLess8: + return rewriteValueARM_OpLess8(v) + case OpLess8U: + return rewriteValueARM_OpLess8U(v) + case OpLoad: + return rewriteValueARM_OpLoad(v) + case OpLocalAddr: + return rewriteValueARM_OpLocalAddr(v) + case OpLsh16x16: + return rewriteValueARM_OpLsh16x16(v) + case OpLsh16x32: + return rewriteValueARM_OpLsh16x32(v) + case OpLsh16x64: + return rewriteValueARM_OpLsh16x64(v) + case OpLsh16x8: + return rewriteValueARM_OpLsh16x8(v) + case OpLsh32x16: + return rewriteValueARM_OpLsh32x16(v) + case OpLsh32x32: + return rewriteValueARM_OpLsh32x32(v) + case OpLsh32x64: + return rewriteValueARM_OpLsh32x64(v) + case OpLsh32x8: + return rewriteValueARM_OpLsh32x8(v) + case OpLsh8x16: + return rewriteValueARM_OpLsh8x16(v) + case OpLsh8x32: + return rewriteValueARM_OpLsh8x32(v) + case OpLsh8x64: + return rewriteValueARM_OpLsh8x64(v) + case OpLsh8x8: + return rewriteValueARM_OpLsh8x8(v) + case OpMod16: + return rewriteValueARM_OpMod16(v) + case OpMod16u: + return rewriteValueARM_OpMod16u(v) + case OpMod32: + return rewriteValueARM_OpMod32(v) + case OpMod32u: + return rewriteValueARM_OpMod32u(v) + case OpMod8: + return rewriteValueARM_OpMod8(v) + case OpMod8u: + return rewriteValueARM_OpMod8u(v) + case OpMove: + return rewriteValueARM_OpMove(v) + case OpMul16: + v.Op = OpARMMUL + return true + case OpMul32: + v.Op = OpARMMUL + return true + case OpMul32F: + v.Op = OpARMMULF + return true + case OpMul32uhilo: + v.Op = OpARMMULLU + return true + case OpMul64F: + v.Op = OpARMMULD + return true + case OpMul8: + v.Op = OpARMMUL + return true + case OpNeg16: + return rewriteValueARM_OpNeg16(v) + case OpNeg32: + return rewriteValueARM_OpNeg32(v) + case OpNeg32F: + v.Op = OpARMNEGF + return true + case OpNeg64F: + v.Op = OpARMNEGD + return true + case OpNeg8: + return rewriteValueARM_OpNeg8(v) + case OpNeq16: + return rewriteValueARM_OpNeq16(v) + case OpNeq32: + return rewriteValueARM_OpNeq32(v) + case OpNeq32F: + return rewriteValueARM_OpNeq32F(v) + case OpNeq64F: + return rewriteValueARM_OpNeq64F(v) + case OpNeq8: + return rewriteValueARM_OpNeq8(v) + case OpNeqB: + v.Op = OpARMXOR + return true + case OpNeqPtr: + return rewriteValueARM_OpNeqPtr(v) + case OpNilCheck: + v.Op = OpARMLoweredNilCheck + return true + case OpNot: + return rewriteValueARM_OpNot(v) + case OpOffPtr: + return rewriteValueARM_OpOffPtr(v) + case OpOr16: + v.Op = OpARMOR + return true + case OpOr32: + v.Op = OpARMOR + return true + case OpOr8: + v.Op = OpARMOR + return true + case OpOrB: + v.Op = OpARMOR + return true + case OpPanicBounds: + return rewriteValueARM_OpPanicBounds(v) + case OpPanicExtend: + return rewriteValueARM_OpPanicExtend(v) + case OpRotateLeft16: + return rewriteValueARM_OpRotateLeft16(v) + case OpRotateLeft32: + return rewriteValueARM_OpRotateLeft32(v) + case OpRotateLeft8: + return rewriteValueARM_OpRotateLeft8(v) + case OpRound32F: + v.Op = OpCopy + return true + case OpRound64F: + v.Op = OpCopy + return true + case OpRsh16Ux16: + return rewriteValueARM_OpRsh16Ux16(v) + case OpRsh16Ux32: + return rewriteValueARM_OpRsh16Ux32(v) + case OpRsh16Ux64: + return rewriteValueARM_OpRsh16Ux64(v) + case OpRsh16Ux8: + return rewriteValueARM_OpRsh16Ux8(v) + case OpRsh16x16: + return rewriteValueARM_OpRsh16x16(v) + case OpRsh16x32: + return rewriteValueARM_OpRsh16x32(v) + case OpRsh16x64: + return rewriteValueARM_OpRsh16x64(v) + case OpRsh16x8: + return rewriteValueARM_OpRsh16x8(v) + case OpRsh32Ux16: + return rewriteValueARM_OpRsh32Ux16(v) + case OpRsh32Ux32: + return rewriteValueARM_OpRsh32Ux32(v) + case OpRsh32Ux64: + return rewriteValueARM_OpRsh32Ux64(v) + case OpRsh32Ux8: + return rewriteValueARM_OpRsh32Ux8(v) + case OpRsh32x16: + return rewriteValueARM_OpRsh32x16(v) + case OpRsh32x32: + return rewriteValueARM_OpRsh32x32(v) + case OpRsh32x64: + return rewriteValueARM_OpRsh32x64(v) + case OpRsh32x8: + return rewriteValueARM_OpRsh32x8(v) + case OpRsh8Ux16: + return rewriteValueARM_OpRsh8Ux16(v) + case OpRsh8Ux32: + return rewriteValueARM_OpRsh8Ux32(v) + case OpRsh8Ux64: + return rewriteValueARM_OpRsh8Ux64(v) + case OpRsh8Ux8: + return rewriteValueARM_OpRsh8Ux8(v) + case OpRsh8x16: + return rewriteValueARM_OpRsh8x16(v) + case OpRsh8x32: + return rewriteValueARM_OpRsh8x32(v) + case OpRsh8x64: + return rewriteValueARM_OpRsh8x64(v) + case OpRsh8x8: + return rewriteValueARM_OpRsh8x8(v) + case OpSelect0: + return rewriteValueARM_OpSelect0(v) + case OpSelect1: + return rewriteValueARM_OpSelect1(v) + case OpSignExt16to32: + v.Op = OpARMMOVHreg + return true + case OpSignExt8to16: + v.Op = OpARMMOVBreg + return true + case OpSignExt8to32: + v.Op = OpARMMOVBreg + return true + case OpSignmask: + return rewriteValueARM_OpSignmask(v) + case OpSlicemask: + return rewriteValueARM_OpSlicemask(v) + case OpSqrt: + v.Op = OpARMSQRTD + return true + case OpSqrt32: + v.Op = OpARMSQRTF + return true + case OpStaticCall: + v.Op = OpARMCALLstatic + return true + case OpStore: + return rewriteValueARM_OpStore(v) + case OpSub16: + v.Op = OpARMSUB + return true + case OpSub32: + v.Op = OpARMSUB + return true + case OpSub32F: + v.Op = OpARMSUBF + return true + case OpSub32carry: + v.Op = OpARMSUBS + return true + case OpSub32withcarry: + v.Op = OpARMSBC + return true + case OpSub64F: + v.Op = OpARMSUBD + return true + case OpSub8: + v.Op = OpARMSUB + return true + case OpSubPtr: + v.Op = OpARMSUB + return true + case OpTailCall: + v.Op = OpARMCALLtail + return true + case OpTrunc16to8: + v.Op = OpCopy + return true + case OpTrunc32to16: + v.Op = OpCopy + return true + case OpTrunc32to8: + v.Op = OpCopy + return true + case OpWB: + v.Op = OpARMLoweredWB + return true + case OpXor16: + v.Op = OpARMXOR + return true + case OpXor32: + v.Op = OpARMXOR + return true + case OpXor8: + v.Op = OpARMXOR + return true + case OpZero: + return rewriteValueARM_OpZero(v) + case OpZeroExt16to32: + v.Op = OpARMMOVHUreg + return true + case OpZeroExt8to16: + v.Op = OpARMMOVBUreg + return true + case OpZeroExt8to32: + v.Op = OpARMMOVBUreg + return true + case OpZeromask: + return rewriteValueARM_OpZeromask(v) + } + return false +} +func rewriteValueARM_OpARMADC(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADC (MOVWconst [c]) x flags) + // result: (ADCconst [c] x flags) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpARMMOVWconst { + continue + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + flags := v_2 + v.reset(OpARMADCconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, flags) + return true + } + break + } + // match: (ADC x (SLLconst [c] y) flags) + // result: (ADCshiftLL x y [c] flags) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSLLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + flags := v_2 + v.reset(OpARMADCshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(x, y, flags) + return true + } + break + } + // match: (ADC x (SRLconst [c] y) flags) + // result: (ADCshiftRL x y [c] flags) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + flags := v_2 + v.reset(OpARMADCshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(x, y, flags) + return true + } + break + } + // match: (ADC x (SRAconst [c] y) flags) + // result: (ADCshiftRA x y [c] flags) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRAconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + flags := v_2 + v.reset(OpARMADCshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(x, y, flags) + return true + } + break + } + // match: (ADC x (SLL y z) flags) + // result: (ADCshiftLLreg x y z flags) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSLL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + flags := v_2 + v.reset(OpARMADCshiftLLreg) + v.AddArg4(x, y, z, flags) + return true + } + break + } + // match: (ADC x (SRL y z) flags) + // result: (ADCshiftRLreg x y z flags) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + flags := v_2 + v.reset(OpARMADCshiftRLreg) + v.AddArg4(x, y, z, flags) + return true + } + break + } + // match: (ADC x (SRA y z) flags) + // result: (ADCshiftRAreg x y z flags) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRA { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + flags := v_2 + v.reset(OpARMADCshiftRAreg) + v.AddArg4(x, y, z, flags) + return true + } + break + } + return false +} +func rewriteValueARM_OpARMADCconst(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADCconst [c] (ADDconst [d] x) flags) + // result: (ADCconst [c+d] x flags) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMADDconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + flags := v_1 + v.reset(OpARMADCconst) + v.AuxInt = int32ToAuxInt(c + d) + v.AddArg2(x, flags) + return true + } + // match: (ADCconst [c] (SUBconst [d] x) flags) + // result: (ADCconst [c-d] x flags) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMSUBconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + flags := v_1 + v.reset(OpARMADCconst) + v.AuxInt = int32ToAuxInt(c - d) + v.AddArg2(x, flags) + return true + } + return false +} +func rewriteValueARM_OpARMADCshiftLL(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ADCshiftLL (MOVWconst [c]) x [d] flags) + // result: (ADCconst [c] (SLLconst x [d]) flags) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + flags := v_2 + v.reset(OpARMADCconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg2(v0, flags) + return true + } + // match: (ADCshiftLL x (MOVWconst [c]) [d] flags) + // result: (ADCconst x [c< x y) flags) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + flags := v_3 + v.reset(OpARMADCconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v0.AddArg2(x, y) + v.AddArg2(v0, flags) + return true + } + // match: (ADCshiftLLreg x y (MOVWconst [c]) flags) + // cond: 0 <= c && c < 32 + // result: (ADCshiftLL x y [c] flags) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + flags := v_3 + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMADCshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(x, y, flags) + return true + } + return false +} +func rewriteValueARM_OpARMADCshiftRA(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ADCshiftRA (MOVWconst [c]) x [d] flags) + // result: (ADCconst [c] (SRAconst x [d]) flags) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + flags := v_2 + v.reset(OpARMADCconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg2(v0, flags) + return true + } + // match: (ADCshiftRA x (MOVWconst [c]) [d] flags) + // result: (ADCconst x [c>>uint64(d)] flags) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + flags := v_2 + v.reset(OpARMADCconst) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) + v.AddArg2(x, flags) + return true + } + return false +} +func rewriteValueARM_OpARMADCshiftRAreg(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ADCshiftRAreg (MOVWconst [c]) x y flags) + // result: (ADCconst [c] (SRA x y) flags) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + flags := v_3 + v.reset(OpARMADCconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) + v0.AddArg2(x, y) + v.AddArg2(v0, flags) + return true + } + // match: (ADCshiftRAreg x y (MOVWconst [c]) flags) + // cond: 0 <= c && c < 32 + // result: (ADCshiftRA x y [c] flags) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + flags := v_3 + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMADCshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(x, y, flags) + return true + } + return false +} +func rewriteValueARM_OpARMADCshiftRL(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ADCshiftRL (MOVWconst [c]) x [d] flags) + // result: (ADCconst [c] (SRLconst x [d]) flags) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + flags := v_2 + v.reset(OpARMADCconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg2(v0, flags) + return true + } + // match: (ADCshiftRL x (MOVWconst [c]) [d] flags) + // result: (ADCconst x [int32(uint32(c)>>uint64(d))] flags) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + flags := v_2 + v.reset(OpARMADCconst) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) + v.AddArg2(x, flags) + return true + } + return false +} +func rewriteValueARM_OpARMADCshiftRLreg(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ADCshiftRLreg (MOVWconst [c]) x y flags) + // result: (ADCconst [c] (SRL x y) flags) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + flags := v_3 + v.reset(OpARMADCconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v0.AddArg2(x, y) + v.AddArg2(v0, flags) + return true + } + // match: (ADCshiftRLreg x y (MOVWconst [c]) flags) + // cond: 0 <= c && c < 32 + // result: (ADCshiftRL x y [c] flags) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + flags := v_3 + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMADCshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(x, y, flags) + return true + } + return false +} +func rewriteValueARM_OpARMADD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ADD x (MOVWconst [c])) + // cond: !t.IsPtr() + // result: (ADDconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMMOVWconst { + continue + } + t := v_1.Type + c := auxIntToInt32(v_1.AuxInt) + if !(!t.IsPtr()) { + continue + } + v.reset(OpARMADDconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (ADD x (SLLconst [c] y)) + // result: (ADDshiftLL x y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSLLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMADDshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADD x (SRLconst [c] y)) + // result: (ADDshiftRL x y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMADDshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADD x (SRAconst [c] y)) + // result: (ADDshiftRA x y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRAconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMADDshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADD x (SLL y z)) + // result: (ADDshiftLLreg x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSLL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMADDshiftLLreg) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (ADD x (SRL y z)) + // result: (ADDshiftRLreg x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMADDshiftRLreg) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (ADD x (SRA y z)) + // result: (ADDshiftRAreg x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRA { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMADDshiftRAreg) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (ADD x (RSBconst [0] y)) + // result: (SUB x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMRSBconst || auxIntToInt32(v_1.AuxInt) != 0 { + continue + } + y := v_1.Args[0] + v.reset(OpARMSUB) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADD (RSBconst [c] x) (RSBconst [d] y)) + // result: (RSBconst [c+d] (ADD x y)) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpARMRSBconst { + continue + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if v_1.Op != OpARMRSBconst { + continue + } + d := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMRSBconst) + v.AuxInt = int32ToAuxInt(c + d) + v0 := b.NewValue0(v.Pos, OpARMADD, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (ADD (MUL x y) a) + // result: (MULA x y a) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpARMMUL { + continue + } + y := v_0.Args[1] + x := v_0.Args[0] + a := v_1 + v.reset(OpARMMULA) + v.AddArg3(x, y, a) + return true + } + break + } + return false +} +func rewriteValueARM_OpARMADDD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADDD a (MULD x y)) + // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6 + // result: (MULAD a x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 + if v_1.Op != OpARMMULD { + continue + } + y := v_1.Args[1] + x := v_1.Args[0] + if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) { + continue + } + v.reset(OpARMMULAD) + v.AddArg3(a, x, y) + return true + } + break + } + // match: (ADDD a (NMULD x y)) + // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6 + // result: (MULSD a x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 + if v_1.Op != OpARMNMULD { + continue + } + y := v_1.Args[1] + x := v_1.Args[0] + if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) { + continue + } + v.reset(OpARMMULSD) + v.AddArg3(a, x, y) + return true + } + break + } + return false +} +func rewriteValueARM_OpARMADDF(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADDF a (MULF x y)) + // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6 + // result: (MULAF a x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 + if v_1.Op != OpARMMULF { + continue + } + y := v_1.Args[1] + x := v_1.Args[0] + if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) { + continue + } + v.reset(OpARMMULAF) + v.AddArg3(a, x, y) + return true + } + break + } + // match: (ADDF a (NMULF x y)) + // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6 + // result: (MULSF a x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 + if v_1.Op != OpARMNMULF { + continue + } + y := v_1.Args[1] + x := v_1.Args[0] + if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) { + continue + } + v.reset(OpARMMULSF) + v.AddArg3(a, x, y) + return true + } + break + } + return false +} +func rewriteValueARM_OpARMADDS(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADDS x (MOVWconst [c])) + // result: (ADDSconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMMOVWconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMADDSconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (ADDS x (SLLconst [c] y)) + // result: (ADDSshiftLL x y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSLLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMADDSshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADDS x (SRLconst [c] y)) + // result: (ADDSshiftRL x y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMADDSshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADDS x (SRAconst [c] y)) + // result: (ADDSshiftRA x y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRAconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMADDSshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADDS x (SLL y z)) + // result: (ADDSshiftLLreg x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSLL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMADDSshiftLLreg) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (ADDS x (SRL y z)) + // result: (ADDSshiftRLreg x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMADDSshiftRLreg) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (ADDS x (SRA y z)) + // result: (ADDSshiftRAreg x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRA { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMADDSshiftRAreg) + v.AddArg3(x, y, z) + return true + } + break + } + return false +} +func rewriteValueARM_OpARMADDSshiftLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ADDSshiftLL (MOVWconst [c]) x [d]) + // result: (ADDSconst [c] (SLLconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMADDSconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (ADDSshiftLL x (MOVWconst [c]) [d]) + // result: (ADDSconst x [c< x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMADDSconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (ADDSshiftLLreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (ADDSshiftLL x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMADDSshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMADDSshiftRA(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ADDSshiftRA (MOVWconst [c]) x [d]) + // result: (ADDSconst [c] (SRAconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMADDSconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (ADDSshiftRA x (MOVWconst [c]) [d]) + // result: (ADDSconst x [c>>uint64(d)]) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMADDSconst) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMADDSshiftRAreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ADDSshiftRAreg (MOVWconst [c]) x y) + // result: (ADDSconst [c] (SRA x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMADDSconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (ADDSshiftRAreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (ADDSshiftRA x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMADDSshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMADDSshiftRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ADDSshiftRL (MOVWconst [c]) x [d]) + // result: (ADDSconst [c] (SRLconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMADDSconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (ADDSshiftRL x (MOVWconst [c]) [d]) + // result: (ADDSconst x [int32(uint32(c)>>uint64(d))]) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMADDSconst) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMADDSshiftRLreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ADDSshiftRLreg (MOVWconst [c]) x y) + // result: (ADDSconst [c] (SRL x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMADDSconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (ADDSshiftRLreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (ADDSshiftRL x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMADDSshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMADDconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ADDconst [off1] (MOVWaddr [off2] {sym} ptr)) + // result: (MOVWaddr [off1+off2] {sym} ptr) + for { + off1 := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + v.reset(OpARMMOVWaddr) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg(ptr) + return true + } + // match: (ADDconst [0] x) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (ADDconst [c] x) + // cond: !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c)) + // result: (SUBconst [-c] x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(!isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c))) { + break + } + v.reset(OpARMSUBconst) + v.AuxInt = int32ToAuxInt(-c) + v.AddArg(x) + return true + } + // match: (ADDconst [c] x) + // cond: buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff + // result: (SUBconst [-c] x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(buildcfg.GOARM.Version == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) { + break + } + v.reset(OpARMSUBconst) + v.AuxInt = int32ToAuxInt(-c) + v.AddArg(x) + return true + } + // match: (ADDconst [c] (MOVWconst [d])) + // result: (MOVWconst [c+d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(c + d) + return true + } + // match: (ADDconst [c] (ADDconst [d] x)) + // result: (ADDconst [c+d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMADDconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpARMADDconst) + v.AuxInt = int32ToAuxInt(c + d) + v.AddArg(x) + return true + } + // match: (ADDconst [c] (SUBconst [d] x)) + // result: (ADDconst [c-d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMSUBconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpARMADDconst) + v.AuxInt = int32ToAuxInt(c - d) + v.AddArg(x) + return true + } + // match: (ADDconst [c] (RSBconst [d] x)) + // result: (RSBconst [c+d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMRSBconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpARMRSBconst) + v.AuxInt = int32ToAuxInt(c + d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMADDshiftLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ADDshiftLL (MOVWconst [c]) x [d]) + // result: (ADDconst [c] (SLLconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMADDconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (ADDshiftLL x (MOVWconst [c]) [d]) + // result: (ADDconst x [c< [8] (BFXU [int32(armBFAuxInt(8, 8))] x) x) + // result: (REV16 x) + for { + if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != int32(armBFAuxInt(8, 8)) { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARMREV16) + v.AddArg(x) + return true + } + // match: (ADDshiftLL [8] (SRLconst [24] (SLLconst [16] x)) x) + // cond: buildcfg.GOARM.Version>=6 + // result: (REV16 x) + for { + if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARMSLLconst || auxIntToInt32(v_0_0.AuxInt) != 16 { + break + } + x := v_0_0.Args[0] + if x != v_1 || !(buildcfg.GOARM.Version >= 6) { + break + } + v.reset(OpARMREV16) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMADDshiftLLreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ADDshiftLLreg (MOVWconst [c]) x y) + // result: (ADDconst [c] (SLL x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMADDconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (ADDshiftLLreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (ADDshiftLL x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMADDshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMADDshiftRA(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ADDshiftRA (MOVWconst [c]) x [d]) + // result: (ADDconst [c] (SRAconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMADDconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (ADDshiftRA x (MOVWconst [c]) [d]) + // result: (ADDconst x [c>>uint64(d)]) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMADDconst) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMADDshiftRAreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ADDshiftRAreg (MOVWconst [c]) x y) + // result: (ADDconst [c] (SRA x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMADDconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (ADDshiftRAreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (ADDshiftRA x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMADDshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMADDshiftRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ADDshiftRL (MOVWconst [c]) x [d]) + // result: (ADDconst [c] (SRLconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMADDconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (ADDshiftRL x (MOVWconst [c]) [d]) + // result: (ADDconst x [int32(uint32(c)>>uint64(d))]) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMADDconst) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMADDshiftRLreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ADDshiftRLreg (MOVWconst [c]) x y) + // result: (ADDconst [c] (SRL x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMADDconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (ADDshiftRLreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (ADDshiftRL x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMADDshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMAND(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AND x (MOVWconst [c])) + // result: (ANDconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMMOVWconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMANDconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (AND x (SLLconst [c] y)) + // result: (ANDshiftLL x y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSLLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMANDshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (AND x (SRLconst [c] y)) + // result: (ANDshiftRL x y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMANDshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (AND x (SRAconst [c] y)) + // result: (ANDshiftRA x y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRAconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMANDshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (AND x (SLL y z)) + // result: (ANDshiftLLreg x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSLL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMANDshiftLLreg) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (AND x (SRL y z)) + // result: (ANDshiftRLreg x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMANDshiftRLreg) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (AND x (SRA y z)) + // result: (ANDshiftRAreg x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRA { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMANDshiftRAreg) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (AND x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + // match: (AND x (MVN y)) + // result: (BIC x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMMVN { + continue + } + y := v_1.Args[0] + v.reset(OpARMBIC) + v.AddArg2(x, y) + return true + } + break + } + // match: (AND x (MVNshiftLL y [c])) + // result: (BICshiftLL x y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMMVNshiftLL { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMBICshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (AND x (MVNshiftRL y [c])) + // result: (BICshiftRL x y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMMVNshiftRL { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMBICshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (AND x (MVNshiftRA y [c])) + // result: (BICshiftRA x y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMMVNshiftRA { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMBICshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + return false +} +func rewriteValueARM_OpARMANDconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ANDconst [0] _) + // result: (MOVWconst [0]) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (ANDconst [c] x) + // cond: int32(c)==-1 + // result: x + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(int32(c) == -1) { + break + } + v.copyOf(x) + return true + } + // match: (ANDconst [c] x) + // cond: !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) + // result: (BICconst [int32(^uint32(c))] x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(!isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c))) { + break + } + v.reset(OpARMBICconst) + v.AuxInt = int32ToAuxInt(int32(^uint32(c))) + v.AddArg(x) + return true + } + // match: (ANDconst [c] x) + // cond: buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff + // result: (BICconst [int32(^uint32(c))] x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(buildcfg.GOARM.Version == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) { + break + } + v.reset(OpARMBICconst) + v.AuxInt = int32ToAuxInt(int32(^uint32(c))) + v.AddArg(x) + return true + } + // match: (ANDconst [c] (MOVWconst [d])) + // result: (MOVWconst [c&d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(c & d) + return true + } + // match: (ANDconst [c] (ANDconst [d] x)) + // result: (ANDconst [c&d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMANDconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpARMANDconst) + v.AuxInt = int32ToAuxInt(c & d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMANDshiftLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ANDshiftLL (MOVWconst [c]) x [d]) + // result: (ANDconst [c] (SLLconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMANDconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (ANDshiftLL x (MOVWconst [c]) [d]) + // result: (ANDconst x [c< x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMANDconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (ANDshiftLLreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (ANDshiftLL x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMANDshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMANDshiftRA(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ANDshiftRA (MOVWconst [c]) x [d]) + // result: (ANDconst [c] (SRAconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMANDconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (ANDshiftRA x (MOVWconst [c]) [d]) + // result: (ANDconst x [c>>uint64(d)]) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMANDconst) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) + v.AddArg(x) + return true + } + // match: (ANDshiftRA y:(SRAconst x [c]) x [c]) + // result: y + for { + c := auxIntToInt32(v.AuxInt) + y := v_0 + if y.Op != OpARMSRAconst || auxIntToInt32(y.AuxInt) != c { + break + } + x := y.Args[0] + if x != v_1 { + break + } + v.copyOf(y) + return true + } + return false +} +func rewriteValueARM_OpARMANDshiftRAreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ANDshiftRAreg (MOVWconst [c]) x y) + // result: (ANDconst [c] (SRA x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMANDconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (ANDshiftRAreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (ANDshiftRA x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMANDshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMANDshiftRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ANDshiftRL (MOVWconst [c]) x [d]) + // result: (ANDconst [c] (SRLconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMANDconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (ANDshiftRL x (MOVWconst [c]) [d]) + // result: (ANDconst x [int32(uint32(c)>>uint64(d))]) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMANDconst) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) + v.AddArg(x) + return true + } + // match: (ANDshiftRL y:(SRLconst x [c]) x [c]) + // result: y + for { + c := auxIntToInt32(v.AuxInt) + y := v_0 + if y.Op != OpARMSRLconst || auxIntToInt32(y.AuxInt) != c { + break + } + x := y.Args[0] + if x != v_1 { + break + } + v.copyOf(y) + return true + } + return false +} +func rewriteValueARM_OpARMANDshiftRLreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ANDshiftRLreg (MOVWconst [c]) x y) + // result: (ANDconst [c] (SRL x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMANDconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (ANDshiftRLreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (ANDshiftRL x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMANDshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMBFX(v *Value) bool { + v_0 := v.Args[0] + // match: (BFX [c] (MOVWconst [d])) + // result: (MOVWconst [d<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8))]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(d << (32 - uint32(c&0xff) - uint32(c>>8)) >> (32 - uint32(c>>8))) + return true + } + return false +} +func rewriteValueARM_OpARMBFXU(v *Value) bool { + v_0 := v.Args[0] + // match: (BFXU [c] (MOVWconst [d])) + // result: (MOVWconst [int32(uint32(d)<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8)))]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(int32(uint32(d) << (32 - uint32(c&0xff) - uint32(c>>8)) >> (32 - uint32(c>>8)))) + return true + } + return false +} +func rewriteValueARM_OpARMBIC(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (BIC x (MOVWconst [c])) + // result: (BICconst [c] x) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMBICconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (BIC x (SLLconst [c] y)) + // result: (BICshiftLL x y [c]) + for { + x := v_0 + if v_1.Op != OpARMSLLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMBICshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + // match: (BIC x (SRLconst [c] y)) + // result: (BICshiftRL x y [c]) + for { + x := v_0 + if v_1.Op != OpARMSRLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMBICshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + // match: (BIC x (SRAconst [c] y)) + // result: (BICshiftRA x y [c]) + for { + x := v_0 + if v_1.Op != OpARMSRAconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMBICshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + // match: (BIC x (SLL y z)) + // result: (BICshiftLLreg x y z) + for { + x := v_0 + if v_1.Op != OpARMSLL { + break + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMBICshiftLLreg) + v.AddArg3(x, y, z) + return true + } + // match: (BIC x (SRL y z)) + // result: (BICshiftRLreg x y z) + for { + x := v_0 + if v_1.Op != OpARMSRL { + break + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMBICshiftRLreg) + v.AddArg3(x, y, z) + return true + } + // match: (BIC x (SRA y z)) + // result: (BICshiftRAreg x y z) + for { + x := v_0 + if v_1.Op != OpARMSRA { + break + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMBICshiftRAreg) + v.AddArg3(x, y, z) + return true + } + // match: (BIC x x) + // result: (MOVWconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM_OpARMBICconst(v *Value) bool { + v_0 := v.Args[0] + // match: (BICconst [0] x) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (BICconst [c] _) + // cond: int32(c)==-1 + // result: (MOVWconst [0]) + for { + c := auxIntToInt32(v.AuxInt) + if !(int32(c) == -1) { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (BICconst [c] x) + // cond: !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) + // result: (ANDconst [int32(^uint32(c))] x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(!isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c))) { + break + } + v.reset(OpARMANDconst) + v.AuxInt = int32ToAuxInt(int32(^uint32(c))) + v.AddArg(x) + return true + } + // match: (BICconst [c] x) + // cond: buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff + // result: (ANDconst [int32(^uint32(c))] x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(buildcfg.GOARM.Version == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) { + break + } + v.reset(OpARMANDconst) + v.AuxInt = int32ToAuxInt(int32(^uint32(c))) + v.AddArg(x) + return true + } + // match: (BICconst [c] (MOVWconst [d])) + // result: (MOVWconst [d&^c]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(d &^ c) + return true + } + // match: (BICconst [c] (BICconst [d] x)) + // result: (BICconst [c|d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMBICconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpARMBICconst) + v.AuxInt = int32ToAuxInt(c | d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMBICshiftLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (BICshiftLL x (MOVWconst [c]) [d]) + // result: (BICconst x [c<>uint64(d)]) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMBICconst) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) + v.AddArg(x) + return true + } + // match: (BICshiftRA (SRAconst x [c]) x [c]) + // result: (MOVWconst [0]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMSRAconst || auxIntToInt32(v_0.AuxInt) != c { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM_OpARMBICshiftRAreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (BICshiftRAreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (BICshiftRA x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMBICshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMBICshiftRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (BICshiftRL x (MOVWconst [c]) [d]) + // result: (BICconst x [int32(uint32(c)>>uint64(d))]) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMBICconst) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) + v.AddArg(x) + return true + } + // match: (BICshiftRL (SRLconst x [c]) x [c]) + // result: (MOVWconst [0]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != c { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM_OpARMBICshiftRLreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (BICshiftRLreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (BICshiftRL x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMBICshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMCMN(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMN x (MOVWconst [c])) + // result: (CMNconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMMOVWconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMCMNconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (CMN x (SLLconst [c] y)) + // result: (CMNshiftLL x y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSLLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMCMNshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (CMN x (SRLconst [c] y)) + // result: (CMNshiftRL x y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMCMNshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (CMN x (SRAconst [c] y)) + // result: (CMNshiftRA x y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRAconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMCMNshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (CMN x (SLL y z)) + // result: (CMNshiftLLreg x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSLL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMCMNshiftLLreg) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (CMN x (SRL y z)) + // result: (CMNshiftRLreg x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMCMNshiftRLreg) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (CMN x (SRA y z)) + // result: (CMNshiftRAreg x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRA { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMCMNshiftRAreg) + v.AddArg3(x, y, z) + return true + } + break + } + return false +} +func rewriteValueARM_OpARMCMNconst(v *Value) bool { + v_0 := v.Args[0] + // match: (CMNconst (MOVWconst [x]) [y]) + // result: (FlagConstant [addFlags32(x,y)]) + for { + y := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + x := auxIntToInt32(v_0.AuxInt) + v.reset(OpARMFlagConstant) + v.AuxInt = flagConstantToAuxInt(addFlags32(x, y)) + return true + } + return false +} +func rewriteValueARM_OpARMCMNshiftLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMNshiftLL (MOVWconst [c]) x [d]) + // result: (CMNconst [c] (SLLconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMCMNconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (CMNshiftLL x (MOVWconst [c]) [d]) + // result: (CMNconst x [c< x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMCMNconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (CMNshiftLLreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (CMNshiftLL x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMCMNshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMCMNshiftRA(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMNshiftRA (MOVWconst [c]) x [d]) + // result: (CMNconst [c] (SRAconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMCMNconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (CMNshiftRA x (MOVWconst [c]) [d]) + // result: (CMNconst x [c>>uint64(d)]) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMCMNconst) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMCMNshiftRAreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMNshiftRAreg (MOVWconst [c]) x y) + // result: (CMNconst [c] (SRA x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMCMNconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (CMNshiftRAreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (CMNshiftRA x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMCMNshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMCMNshiftRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMNshiftRL (MOVWconst [c]) x [d]) + // result: (CMNconst [c] (SRLconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMCMNconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (CMNshiftRL x (MOVWconst [c]) [d]) + // result: (CMNconst x [int32(uint32(c)>>uint64(d))]) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMCMNconst) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMCMNshiftRLreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMNshiftRLreg (MOVWconst [c]) x y) + // result: (CMNconst [c] (SRL x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMCMNconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (CMNshiftRLreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (CMNshiftRL x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMCMNshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMCMOVWHSconst(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMOVWHSconst _ (FlagConstant [fc]) [c]) + // cond: fc.uge() + // result: (MOVWconst [c]) + for { + c := auxIntToInt32(v.AuxInt) + if v_1.Op != OpARMFlagConstant { + break + } + fc := auxIntToFlagConstant(v_1.AuxInt) + if !(fc.uge()) { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(c) + return true + } + // match: (CMOVWHSconst x (FlagConstant [fc]) [c]) + // cond: fc.ult() + // result: x + for { + x := v_0 + if v_1.Op != OpARMFlagConstant { + break + } + fc := auxIntToFlagConstant(v_1.AuxInt) + if !(fc.ult()) { + break + } + v.copyOf(x) + return true + } + // match: (CMOVWHSconst x (InvertFlags flags) [c]) + // result: (CMOVWLSconst x flags [c]) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMInvertFlags { + break + } + flags := v_1.Args[0] + v.reset(OpARMCMOVWLSconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, flags) + return true + } + return false +} +func rewriteValueARM_OpARMCMOVWLSconst(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMOVWLSconst _ (FlagConstant [fc]) [c]) + // cond: fc.ule() + // result: (MOVWconst [c]) + for { + c := auxIntToInt32(v.AuxInt) + if v_1.Op != OpARMFlagConstant { + break + } + fc := auxIntToFlagConstant(v_1.AuxInt) + if !(fc.ule()) { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(c) + return true + } + // match: (CMOVWLSconst x (FlagConstant [fc]) [c]) + // cond: fc.ugt() + // result: x + for { + x := v_0 + if v_1.Op != OpARMFlagConstant { + break + } + fc := auxIntToFlagConstant(v_1.AuxInt) + if !(fc.ugt()) { + break + } + v.copyOf(x) + return true + } + // match: (CMOVWLSconst x (InvertFlags flags) [c]) + // result: (CMOVWHSconst x flags [c]) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMInvertFlags { + break + } + flags := v_1.Args[0] + v.reset(OpARMCMOVWHSconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, flags) + return true + } + return false +} +func rewriteValueARM_OpARMCMP(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMP x (MOVWconst [c])) + // result: (CMPconst [c] x) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMCMPconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (CMP (MOVWconst [c]) x) + // result: (InvertFlags (CMPconst [c] x)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMInvertFlags) + v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (CMP x y) + // cond: canonLessThan(x,y) + // result: (InvertFlags (CMP y x)) + for { + x := v_0 + y := v_1 + if !(canonLessThan(x, y)) { + break + } + v.reset(OpARMInvertFlags) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + // match: (CMP x (SLLconst [c] y)) + // result: (CMPshiftLL x y [c]) + for { + x := v_0 + if v_1.Op != OpARMSLLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMCMPshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + // match: (CMP (SLLconst [c] y) x) + // result: (InvertFlags (CMPshiftLL x y [c])) + for { + if v_0.Op != OpARMSLLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + y := v_0.Args[0] + x := v_1 + v.reset(OpARMInvertFlags) + v0 := b.NewValue0(v.Pos, OpARMCMPshiftLL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (CMP x (SRLconst [c] y)) + // result: (CMPshiftRL x y [c]) + for { + x := v_0 + if v_1.Op != OpARMSRLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMCMPshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + // match: (CMP (SRLconst [c] y) x) + // result: (InvertFlags (CMPshiftRL x y [c])) + for { + if v_0.Op != OpARMSRLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + y := v_0.Args[0] + x := v_1 + v.reset(OpARMInvertFlags) + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (CMP x (SRAconst [c] y)) + // result: (CMPshiftRA x y [c]) + for { + x := v_0 + if v_1.Op != OpARMSRAconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMCMPshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + // match: (CMP (SRAconst [c] y) x) + // result: (InvertFlags (CMPshiftRA x y [c])) + for { + if v_0.Op != OpARMSRAconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + y := v_0.Args[0] + x := v_1 + v.reset(OpARMInvertFlags) + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRA, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (CMP x (SLL y z)) + // result: (CMPshiftLLreg x y z) + for { + x := v_0 + if v_1.Op != OpARMSLL { + break + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMCMPshiftLLreg) + v.AddArg3(x, y, z) + return true + } + // match: (CMP (SLL y z) x) + // result: (InvertFlags (CMPshiftLLreg x y z)) + for { + if v_0.Op != OpARMSLL { + break + } + z := v_0.Args[1] + y := v_0.Args[0] + x := v_1 + v.reset(OpARMInvertFlags) + v0 := b.NewValue0(v.Pos, OpARMCMPshiftLLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + v.AddArg(v0) + return true + } + // match: (CMP x (SRL y z)) + // result: (CMPshiftRLreg x y z) + for { + x := v_0 + if v_1.Op != OpARMSRL { + break + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMCMPshiftRLreg) + v.AddArg3(x, y, z) + return true + } + // match: (CMP (SRL y z) x) + // result: (InvertFlags (CMPshiftRLreg x y z)) + for { + if v_0.Op != OpARMSRL { + break + } + z := v_0.Args[1] + y := v_0.Args[0] + x := v_1 + v.reset(OpARMInvertFlags) + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + v.AddArg(v0) + return true + } + // match: (CMP x (SRA y z)) + // result: (CMPshiftRAreg x y z) + for { + x := v_0 + if v_1.Op != OpARMSRA { + break + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMCMPshiftRAreg) + v.AddArg3(x, y, z) + return true + } + // match: (CMP (SRA y z) x) + // result: (InvertFlags (CMPshiftRAreg x y z)) + for { + if v_0.Op != OpARMSRA { + break + } + z := v_0.Args[1] + y := v_0.Args[0] + x := v_1 + v.reset(OpARMInvertFlags) + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRAreg, types.TypeFlags) + v0.AddArg3(x, y, z) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueARM_OpARMCMPD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMPD x (MOVDconst [0])) + // result: (CMPD0 x) + for { + x := v_0 + if v_1.Op != OpARMMOVDconst || auxIntToFloat64(v_1.AuxInt) != 0 { + break + } + v.reset(OpARMCMPD0) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMCMPF(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMPF x (MOVFconst [0])) + // result: (CMPF0 x) + for { + x := v_0 + if v_1.Op != OpARMMOVFconst || auxIntToFloat64(v_1.AuxInt) != 0 { + break + } + v.reset(OpARMCMPF0) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMCMPconst(v *Value) bool { + v_0 := v.Args[0] + // match: (CMPconst (MOVWconst [x]) [y]) + // result: (FlagConstant [subFlags32(x,y)]) + for { + y := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + x := auxIntToInt32(v_0.AuxInt) + v.reset(OpARMFlagConstant) + v.AuxInt = flagConstantToAuxInt(subFlags32(x, y)) + return true + } + // match: (CMPconst (MOVBUreg _) [c]) + // cond: 0xff < c + // result: (FlagConstant [subFlags32(0, 1)]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVBUreg || !(0xff < c) { + break + } + v.reset(OpARMFlagConstant) + v.AuxInt = flagConstantToAuxInt(subFlags32(0, 1)) + return true + } + // match: (CMPconst (MOVHUreg _) [c]) + // cond: 0xffff < c + // result: (FlagConstant [subFlags32(0, 1)]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVHUreg || !(0xffff < c) { + break + } + v.reset(OpARMFlagConstant) + v.AuxInt = flagConstantToAuxInt(subFlags32(0, 1)) + return true + } + // match: (CMPconst (ANDconst _ [m]) [n]) + // cond: 0 <= m && m < n + // result: (FlagConstant [subFlags32(0, 1)]) + for { + n := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMANDconst { + break + } + m := auxIntToInt32(v_0.AuxInt) + if !(0 <= m && m < n) { + break + } + v.reset(OpARMFlagConstant) + v.AuxInt = flagConstantToAuxInt(subFlags32(0, 1)) + return true + } + // match: (CMPconst (SRLconst _ [c]) [n]) + // cond: 0 <= n && 0 < c && c <= 32 && (1< x [d]))) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMInvertFlags) + v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v1 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v1.AuxInt = int32ToAuxInt(d) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (CMPshiftLL x (MOVWconst [c]) [d]) + // result: (CMPconst x [c< x y))) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMInvertFlags) + v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v1 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v1.AddArg2(x, y) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (CMPshiftLLreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (CMPshiftLL x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMCMPshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMCMPshiftRA(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMPshiftRA (MOVWconst [c]) x [d]) + // result: (InvertFlags (CMPconst [c] (SRAconst x [d]))) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMInvertFlags) + v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v1 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) + v1.AuxInt = int32ToAuxInt(d) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (CMPshiftRA x (MOVWconst [c]) [d]) + // result: (CMPconst x [c>>uint64(d)]) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMCMPconst) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMCMPshiftRAreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMPshiftRAreg (MOVWconst [c]) x y) + // result: (InvertFlags (CMPconst [c] (SRA x y))) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMInvertFlags) + v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v1 := b.NewValue0(v.Pos, OpARMSRA, x.Type) + v1.AddArg2(x, y) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (CMPshiftRAreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (CMPshiftRA x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMCMPshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMCMPshiftRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMPshiftRL (MOVWconst [c]) x [d]) + // result: (InvertFlags (CMPconst [c] (SRLconst x [d]))) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMInvertFlags) + v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v1 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) + v1.AuxInt = int32ToAuxInt(d) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (CMPshiftRL x (MOVWconst [c]) [d]) + // result: (CMPconst x [int32(uint32(c)>>uint64(d))]) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMCMPconst) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMCMPshiftRLreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMPshiftRLreg (MOVWconst [c]) x y) + // result: (InvertFlags (CMPconst [c] (SRL x y))) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMInvertFlags) + v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v1 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v1.AddArg2(x, y) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (CMPshiftRLreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (CMPshiftRL x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMCMPshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMEqual(v *Value) bool { + v_0 := v.Args[0] + // match: (Equal (FlagConstant [fc])) + // result: (MOVWconst [b2i32(fc.eq())]) + for { + if v_0.Op != OpARMFlagConstant { + break + } + fc := auxIntToFlagConstant(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(b2i32(fc.eq())) + return true + } + // match: (Equal (InvertFlags x)) + // result: (Equal x) + for { + if v_0.Op != OpARMInvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARMEqual) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMGreaterEqual(v *Value) bool { + v_0 := v.Args[0] + // match: (GreaterEqual (FlagConstant [fc])) + // result: (MOVWconst [b2i32(fc.ge())]) + for { + if v_0.Op != OpARMFlagConstant { + break + } + fc := auxIntToFlagConstant(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(b2i32(fc.ge())) + return true + } + // match: (GreaterEqual (InvertFlags x)) + // result: (LessEqual x) + for { + if v_0.Op != OpARMInvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARMLessEqual) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMGreaterEqualU(v *Value) bool { + v_0 := v.Args[0] + // match: (GreaterEqualU (FlagConstant [fc])) + // result: (MOVWconst [b2i32(fc.uge())]) + for { + if v_0.Op != OpARMFlagConstant { + break + } + fc := auxIntToFlagConstant(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(b2i32(fc.uge())) + return true + } + // match: (GreaterEqualU (InvertFlags x)) + // result: (LessEqualU x) + for { + if v_0.Op != OpARMInvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARMLessEqualU) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMGreaterThan(v *Value) bool { + v_0 := v.Args[0] + // match: (GreaterThan (FlagConstant [fc])) + // result: (MOVWconst [b2i32(fc.gt())]) + for { + if v_0.Op != OpARMFlagConstant { + break + } + fc := auxIntToFlagConstant(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(b2i32(fc.gt())) + return true + } + // match: (GreaterThan (InvertFlags x)) + // result: (LessThan x) + for { + if v_0.Op != OpARMInvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARMLessThan) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMGreaterThanU(v *Value) bool { + v_0 := v.Args[0] + // match: (GreaterThanU (FlagConstant [fc])) + // result: (MOVWconst [b2i32(fc.ugt())]) + for { + if v_0.Op != OpARMFlagConstant { + break + } + fc := auxIntToFlagConstant(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(b2i32(fc.ugt())) + return true + } + // match: (GreaterThanU (InvertFlags x)) + // result: (LessThanU x) + for { + if v_0.Op != OpARMInvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARMLessThanU) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMLessEqual(v *Value) bool { + v_0 := v.Args[0] + // match: (LessEqual (FlagConstant [fc])) + // result: (MOVWconst [b2i32(fc.le())]) + for { + if v_0.Op != OpARMFlagConstant { + break + } + fc := auxIntToFlagConstant(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(b2i32(fc.le())) + return true + } + // match: (LessEqual (InvertFlags x)) + // result: (GreaterEqual x) + for { + if v_0.Op != OpARMInvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARMGreaterEqual) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMLessEqualU(v *Value) bool { + v_0 := v.Args[0] + // match: (LessEqualU (FlagConstant [fc])) + // result: (MOVWconst [b2i32(fc.ule())]) + for { + if v_0.Op != OpARMFlagConstant { + break + } + fc := auxIntToFlagConstant(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(b2i32(fc.ule())) + return true + } + // match: (LessEqualU (InvertFlags x)) + // result: (GreaterEqualU x) + for { + if v_0.Op != OpARMInvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARMGreaterEqualU) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMLessThan(v *Value) bool { + v_0 := v.Args[0] + // match: (LessThan (FlagConstant [fc])) + // result: (MOVWconst [b2i32(fc.lt())]) + for { + if v_0.Op != OpARMFlagConstant { + break + } + fc := auxIntToFlagConstant(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(b2i32(fc.lt())) + return true + } + // match: (LessThan (InvertFlags x)) + // result: (GreaterThan x) + for { + if v_0.Op != OpARMInvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARMGreaterThan) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMLessThanU(v *Value) bool { + v_0 := v.Args[0] + // match: (LessThanU (FlagConstant [fc])) + // result: (MOVWconst [b2i32(fc.ult())]) + for { + if v_0.Op != OpARMFlagConstant { + break + } + fc := auxIntToFlagConstant(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(b2i32(fc.ult())) + return true + } + // match: (LessThanU (InvertFlags x)) + // result: (GreaterThanU x) + for { + if v_0.Op != OpARMInvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARMGreaterThanU) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMMOVBUload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) + // result: (MOVBUload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARMADDconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + v.reset(OpARMMOVBUload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBUload [off1] {sym} (SUBconst [off2] ptr) mem) + // result: (MOVBUload [off1-off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARMSUBconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + v.reset(OpARMMOVBUload) + v.AuxInt = int32ToAuxInt(off1 - off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARMMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpARMMOVBUload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVBUreg x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARMMOVBstore { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpARMMOVBUreg) + v.AddArg(x) + return true + } + // match: (MOVBUload [0] {sym} (ADD ptr idx) mem) + // cond: sym == nil + // result: (MOVBUloadidx ptr idx mem) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + sym := auxToSym(v.Aux) + if v_0.Op != OpARMADD { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(sym == nil) { + break + } + v.reset(OpARMMOVBUloadidx) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVBUload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVWconst [int32(read8(sym, int64(off)))]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpSB || !(symIsRO(sym)) { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off)))) + return true + } + return false +} +func rewriteValueARM_OpARMMOVBUloadidx(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBUloadidx ptr idx (MOVBstoreidx ptr2 idx x _)) + // cond: isSamePtr(ptr, ptr2) + // result: (MOVBUreg x) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARMMOVBstoreidx { + break + } + x := v_2.Args[2] + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpARMMOVBUreg) + v.AddArg(x) + return true + } + // match: (MOVBUloadidx ptr (MOVWconst [c]) mem) + // result: (MOVBUload [c] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + mem := v_2 + v.reset(OpARMMOVBUload) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBUloadidx (MOVWconst [c]) ptr mem) + // result: (MOVBUload [c] ptr mem) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + ptr := v_1 + mem := v_2 + v.reset(OpARMMOVBUload) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueARM_OpARMMOVBUreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVBUreg x:(MOVBUload _ _)) + // result: (MOVWreg x) + for { + x := v_0 + if x.Op != OpARMMOVBUload { + break + } + v.reset(OpARMMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVBUreg (ANDconst [c] x)) + // result: (ANDconst [c&0xff] x) + for { + if v_0.Op != OpARMANDconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpARMANDconst) + v.AuxInt = int32ToAuxInt(c & 0xff) + v.AddArg(x) + return true + } + // match: (MOVBUreg x:(MOVBUreg _)) + // result: (MOVWreg x) + for { + x := v_0 + if x.Op != OpARMMOVBUreg { + break + } + v.reset(OpARMMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVBUreg (MOVWconst [c])) + // result: (MOVWconst [int32(uint8(c))]) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(int32(uint8(c))) + return true + } + return false +} +func rewriteValueARM_OpARMMOVBload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) + // result: (MOVBload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARMADDconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + v.reset(OpARMMOVBload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBload [off1] {sym} (SUBconst [off2] ptr) mem) + // result: (MOVBload [off1-off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARMSUBconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + v.reset(OpARMMOVBload) + v.AuxInt = int32ToAuxInt(off1 - off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARMMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpARMMOVBload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVBreg x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARMMOVBstore { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpARMMOVBreg) + v.AddArg(x) + return true + } + // match: (MOVBload [0] {sym} (ADD ptr idx) mem) + // cond: sym == nil + // result: (MOVBloadidx ptr idx mem) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + sym := auxToSym(v.Aux) + if v_0.Op != OpARMADD { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(sym == nil) { + break + } + v.reset(OpARMMOVBloadidx) + v.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValueARM_OpARMMOVBloadidx(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBloadidx ptr idx (MOVBstoreidx ptr2 idx x _)) + // cond: isSamePtr(ptr, ptr2) + // result: (MOVBreg x) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARMMOVBstoreidx { + break + } + x := v_2.Args[2] + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpARMMOVBreg) + v.AddArg(x) + return true + } + // match: (MOVBloadidx ptr (MOVWconst [c]) mem) + // result: (MOVBload [c] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + mem := v_2 + v.reset(OpARMMOVBload) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBloadidx (MOVWconst [c]) ptr mem) + // result: (MOVBload [c] ptr mem) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + ptr := v_1 + mem := v_2 + v.reset(OpARMMOVBload) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueARM_OpARMMOVBreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVBreg x:(MOVBload _ _)) + // result: (MOVWreg x) + for { + x := v_0 + if x.Op != OpARMMOVBload { + break + } + v.reset(OpARMMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVBreg (ANDconst [c] x)) + // cond: c & 0x80 == 0 + // result: (ANDconst [c&0x7f] x) + for { + if v_0.Op != OpARMANDconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(c&0x80 == 0) { + break + } + v.reset(OpARMANDconst) + v.AuxInt = int32ToAuxInt(c & 0x7f) + v.AddArg(x) + return true + } + // match: (MOVBreg x:(MOVBreg _)) + // result: (MOVWreg x) + for { + x := v_0 + if x.Op != OpARMMOVBreg { + break + } + v.reset(OpARMMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVBreg (MOVWconst [c])) + // result: (MOVWconst [int32(int8(c))]) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(int32(int8(c))) + return true + } + return false +} +func rewriteValueARM_OpARMMOVBstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // result: (MOVBstore [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARMADDconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + v.reset(OpARMMOVBstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVBstore [off1] {sym} (SUBconst [off2] ptr) val mem) + // result: (MOVBstore [off1-off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARMSUBconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + v.reset(OpARMMOVBstore) + v.AuxInt = int32ToAuxInt(off1 - off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARMMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpARMMOVBstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARMMOVBreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpARMMOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARMMOVBUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpARMMOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARMMOVHreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpARMMOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARMMOVHUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpARMMOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [0] {sym} (ADD ptr idx) val mem) + // cond: sym == nil + // result: (MOVBstoreidx ptr idx val mem) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + sym := auxToSym(v.Aux) + if v_0.Op != OpARMADD { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(sym == nil) { + break + } + v.reset(OpARMMOVBstoreidx) + v.AddArg4(ptr, idx, val, mem) + return true + } + return false +} +func rewriteValueARM_OpARMMOVBstoreidx(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBstoreidx ptr (MOVWconst [c]) val mem) + // result: (MOVBstore [c] ptr val mem) + for { + ptr := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + val := v_2 + mem := v_3 + v.reset(OpARMMOVBstore) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVBstoreidx (MOVWconst [c]) ptr val mem) + // result: (MOVBstore [c] ptr val mem) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + ptr := v_1 + val := v_2 + mem := v_3 + v.reset(OpARMMOVBstore) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueARM_OpARMMOVDload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) + // result: (MOVDload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARMADDconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + v.reset(OpARMMOVDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVDload [off1] {sym} (SUBconst [off2] ptr) mem) + // result: (MOVDload [off1-off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARMSUBconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + v.reset(OpARMMOVDload) + v.AuxInt = int32ToAuxInt(off1 - off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARMMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpARMMOVDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: x + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARMMOVDstore { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueARM_OpARMMOVDstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // result: (MOVDstore [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARMADDconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + v.reset(OpARMMOVDstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVDstore [off1] {sym} (SUBconst [off2] ptr) val mem) + // result: (MOVDstore [off1-off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARMSUBconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + v.reset(OpARMMOVDstore) + v.AuxInt = int32ToAuxInt(off1 - off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARMMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpARMMOVDstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueARM_OpARMMOVFload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVFload [off1] {sym} (ADDconst [off2] ptr) mem) + // result: (MOVFload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARMADDconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + v.reset(OpARMMOVFload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVFload [off1] {sym} (SUBconst [off2] ptr) mem) + // result: (MOVFload [off1-off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARMSUBconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + v.reset(OpARMMOVFload) + v.AuxInt = int32ToAuxInt(off1 - off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARMMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpARMMOVFload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: x + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARMMOVFstore { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueARM_OpARMMOVFstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVFstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // result: (MOVFstore [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARMADDconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + v.reset(OpARMMOVFstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVFstore [off1] {sym} (SUBconst [off2] ptr) val mem) + // result: (MOVFstore [off1-off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARMSUBconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + v.reset(OpARMMOVFstore) + v.AuxInt = int32ToAuxInt(off1 - off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARMMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpARMMOVFstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueARM_OpARMMOVHUload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) + // result: (MOVHUload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARMADDconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + v.reset(OpARMMOVHUload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHUload [off1] {sym} (SUBconst [off2] ptr) mem) + // result: (MOVHUload [off1-off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARMSUBconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + v.reset(OpARMMOVHUload) + v.AuxInt = int32ToAuxInt(off1 - off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARMMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpARMMOVHUload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVHUreg x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARMMOVHstore { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpARMMOVHUreg) + v.AddArg(x) + return true + } + // match: (MOVHUload [0] {sym} (ADD ptr idx) mem) + // cond: sym == nil + // result: (MOVHUloadidx ptr idx mem) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + sym := auxToSym(v.Aux) + if v_0.Op != OpARMADD { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(sym == nil) { + break + } + v.reset(OpARMMOVHUloadidx) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVHUload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVWconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpSB || !(symIsRO(sym)) { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))) + return true + } + return false +} +func rewriteValueARM_OpARMMOVHUloadidx(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHUloadidx ptr idx (MOVHstoreidx ptr2 idx x _)) + // cond: isSamePtr(ptr, ptr2) + // result: (MOVHUreg x) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARMMOVHstoreidx { + break + } + x := v_2.Args[2] + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpARMMOVHUreg) + v.AddArg(x) + return true + } + // match: (MOVHUloadidx ptr (MOVWconst [c]) mem) + // result: (MOVHUload [c] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + mem := v_2 + v.reset(OpARMMOVHUload) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHUloadidx (MOVWconst [c]) ptr mem) + // result: (MOVHUload [c] ptr mem) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + ptr := v_1 + mem := v_2 + v.reset(OpARMMOVHUload) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueARM_OpARMMOVHUreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVHUreg x:(MOVBUload _ _)) + // result: (MOVWreg x) + for { + x := v_0 + if x.Op != OpARMMOVBUload { + break + } + v.reset(OpARMMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVHUload _ _)) + // result: (MOVWreg x) + for { + x := v_0 + if x.Op != OpARMMOVHUload { + break + } + v.reset(OpARMMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg (ANDconst [c] x)) + // result: (ANDconst [c&0xffff] x) + for { + if v_0.Op != OpARMANDconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpARMANDconst) + v.AuxInt = int32ToAuxInt(c & 0xffff) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVBUreg _)) + // result: (MOVWreg x) + for { + x := v_0 + if x.Op != OpARMMOVBUreg { + break + } + v.reset(OpARMMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVHUreg _)) + // result: (MOVWreg x) + for { + x := v_0 + if x.Op != OpARMMOVHUreg { + break + } + v.reset(OpARMMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg (MOVWconst [c])) + // result: (MOVWconst [int32(uint16(c))]) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(int32(uint16(c))) + return true + } + return false +} +func rewriteValueARM_OpARMMOVHload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) + // result: (MOVHload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARMADDconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + v.reset(OpARMMOVHload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHload [off1] {sym} (SUBconst [off2] ptr) mem) + // result: (MOVHload [off1-off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARMSUBconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + v.reset(OpARMMOVHload) + v.AuxInt = int32ToAuxInt(off1 - off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARMMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpARMMOVHload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVHreg x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARMMOVHstore { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpARMMOVHreg) + v.AddArg(x) + return true + } + // match: (MOVHload [0] {sym} (ADD ptr idx) mem) + // cond: sym == nil + // result: (MOVHloadidx ptr idx mem) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + sym := auxToSym(v.Aux) + if v_0.Op != OpARMADD { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(sym == nil) { + break + } + v.reset(OpARMMOVHloadidx) + v.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValueARM_OpARMMOVHloadidx(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHloadidx ptr idx (MOVHstoreidx ptr2 idx x _)) + // cond: isSamePtr(ptr, ptr2) + // result: (MOVHreg x) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARMMOVHstoreidx { + break + } + x := v_2.Args[2] + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpARMMOVHreg) + v.AddArg(x) + return true + } + // match: (MOVHloadidx ptr (MOVWconst [c]) mem) + // result: (MOVHload [c] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + mem := v_2 + v.reset(OpARMMOVHload) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHloadidx (MOVWconst [c]) ptr mem) + // result: (MOVHload [c] ptr mem) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + ptr := v_1 + mem := v_2 + v.reset(OpARMMOVHload) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueARM_OpARMMOVHreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVHreg x:(MOVBload _ _)) + // result: (MOVWreg x) + for { + x := v_0 + if x.Op != OpARMMOVBload { + break + } + v.reset(OpARMMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBUload _ _)) + // result: (MOVWreg x) + for { + x := v_0 + if x.Op != OpARMMOVBUload { + break + } + v.reset(OpARMMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVHload _ _)) + // result: (MOVWreg x) + for { + x := v_0 + if x.Op != OpARMMOVHload { + break + } + v.reset(OpARMMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVHreg (ANDconst [c] x)) + // cond: c & 0x8000 == 0 + // result: (ANDconst [c&0x7fff] x) + for { + if v_0.Op != OpARMANDconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(c&0x8000 == 0) { + break + } + v.reset(OpARMANDconst) + v.AuxInt = int32ToAuxInt(c & 0x7fff) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBreg _)) + // result: (MOVWreg x) + for { + x := v_0 + if x.Op != OpARMMOVBreg { + break + } + v.reset(OpARMMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBUreg _)) + // result: (MOVWreg x) + for { + x := v_0 + if x.Op != OpARMMOVBUreg { + break + } + v.reset(OpARMMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVHreg _)) + // result: (MOVWreg x) + for { + x := v_0 + if x.Op != OpARMMOVHreg { + break + } + v.reset(OpARMMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVHreg (MOVWconst [c])) + // result: (MOVWconst [int32(int16(c))]) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(int32(int16(c))) + return true + } + return false +} +func rewriteValueARM_OpARMMOVHstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // result: (MOVHstore [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARMADDconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + v.reset(OpARMMOVHstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVHstore [off1] {sym} (SUBconst [off2] ptr) val mem) + // result: (MOVHstore [off1-off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARMSUBconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + v.reset(OpARMMOVHstore) + v.AuxInt = int32ToAuxInt(off1 - off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARMMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpARMMOVHstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARMMOVHreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpARMMOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARMMOVHUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpARMMOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVHstore [0] {sym} (ADD ptr idx) val mem) + // cond: sym == nil + // result: (MOVHstoreidx ptr idx val mem) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + sym := auxToSym(v.Aux) + if v_0.Op != OpARMADD { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(sym == nil) { + break + } + v.reset(OpARMMOVHstoreidx) + v.AddArg4(ptr, idx, val, mem) + return true + } + return false +} +func rewriteValueARM_OpARMMOVHstoreidx(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHstoreidx ptr (MOVWconst [c]) val mem) + // result: (MOVHstore [c] ptr val mem) + for { + ptr := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + val := v_2 + mem := v_3 + v.reset(OpARMMOVHstore) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVHstoreidx (MOVWconst [c]) ptr val mem) + // result: (MOVHstore [c] ptr val mem) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + ptr := v_1 + val := v_2 + mem := v_3 + v.reset(OpARMMOVHstore) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueARM_OpARMMOVWload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) + // result: (MOVWload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARMADDconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + v.reset(OpARMMOVWload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWload [off1] {sym} (SUBconst [off2] ptr) mem) + // result: (MOVWload [off1-off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARMSUBconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + v.reset(OpARMMOVWload) + v.AuxInt = int32ToAuxInt(off1 - off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARMMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpARMMOVWload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: x + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARMMOVWstore { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.copyOf(x) + return true + } + // match: (MOVWload [0] {sym} (ADD ptr idx) mem) + // cond: sym == nil + // result: (MOVWloadidx ptr idx mem) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + sym := auxToSym(v.Aux) + if v_0.Op != OpARMADD { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(sym == nil) { + break + } + v.reset(OpARMMOVWloadidx) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem) + // cond: sym == nil + // result: (MOVWloadshiftLL ptr idx [c] mem) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + sym := auxToSym(v.Aux) + if v_0.Op != OpARMADDshiftLL { + break + } + c := auxIntToInt32(v_0.AuxInt) + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(sym == nil) { + break + } + v.reset(OpARMMOVWloadshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem) + // cond: sym == nil + // result: (MOVWloadshiftRL ptr idx [c] mem) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + sym := auxToSym(v.Aux) + if v_0.Op != OpARMADDshiftRL { + break + } + c := auxIntToInt32(v_0.AuxInt) + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(sym == nil) { + break + } + v.reset(OpARMMOVWloadshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem) + // cond: sym == nil + // result: (MOVWloadshiftRA ptr idx [c] mem) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + sym := auxToSym(v.Aux) + if v_0.Op != OpARMADDshiftRA { + break + } + c := auxIntToInt32(v_0.AuxInt) + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(sym == nil) { + break + } + v.reset(OpARMMOVWloadshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVWload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVWconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpSB || !(symIsRO(sym)) { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))) + return true + } + return false +} +func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWloadidx ptr idx (MOVWstoreidx ptr2 idx x _)) + // cond: isSamePtr(ptr, ptr2) + // result: x + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARMMOVWstoreidx { + break + } + x := v_2.Args[2] + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) { + break + } + v.copyOf(x) + return true + } + // match: (MOVWloadidx ptr (MOVWconst [c]) mem) + // result: (MOVWload [c] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + mem := v_2 + v.reset(OpARMMOVWload) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWloadidx (MOVWconst [c]) ptr mem) + // result: (MOVWload [c] ptr mem) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + ptr := v_1 + mem := v_2 + v.reset(OpARMMOVWload) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWloadidx ptr (SLLconst idx [c]) mem) + // result: (MOVWloadshiftLL ptr idx [c] mem) + for { + ptr := v_0 + if v_1.Op != OpARMSLLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + idx := v_1.Args[0] + mem := v_2 + v.reset(OpARMMOVWloadshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVWloadidx (SLLconst idx [c]) ptr mem) + // result: (MOVWloadshiftLL ptr idx [c] mem) + for { + if v_0.Op != OpARMSLLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + idx := v_0.Args[0] + ptr := v_1 + mem := v_2 + v.reset(OpARMMOVWloadshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVWloadidx ptr (SRLconst idx [c]) mem) + // result: (MOVWloadshiftRL ptr idx [c] mem) + for { + ptr := v_0 + if v_1.Op != OpARMSRLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + idx := v_1.Args[0] + mem := v_2 + v.reset(OpARMMOVWloadshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVWloadidx (SRLconst idx [c]) ptr mem) + // result: (MOVWloadshiftRL ptr idx [c] mem) + for { + if v_0.Op != OpARMSRLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + idx := v_0.Args[0] + ptr := v_1 + mem := v_2 + v.reset(OpARMMOVWloadshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVWloadidx ptr (SRAconst idx [c]) mem) + // result: (MOVWloadshiftRA ptr idx [c] mem) + for { + ptr := v_0 + if v_1.Op != OpARMSRAconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + idx := v_1.Args[0] + mem := v_2 + v.reset(OpARMMOVWloadshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVWloadidx (SRAconst idx [c]) ptr mem) + // result: (MOVWloadshiftRA ptr idx [c] mem) + for { + if v_0.Op != OpARMSRAconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + idx := v_0.Args[0] + ptr := v_1 + mem := v_2 + v.reset(OpARMMOVWloadshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValueARM_OpARMMOVWloadshiftLL(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWloadshiftLL ptr idx [c] (MOVWstoreshiftLL ptr2 idx [d] x _)) + // cond: c==d && isSamePtr(ptr, ptr2) + // result: x + for { + c := auxIntToInt32(v.AuxInt) + ptr := v_0 + idx := v_1 + if v_2.Op != OpARMMOVWstoreshiftLL { + break + } + d := auxIntToInt32(v_2.AuxInt) + x := v_2.Args[2] + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] || !(c == d && isSamePtr(ptr, ptr2)) { + break + } + v.copyOf(x) + return true + } + // match: (MOVWloadshiftLL ptr (MOVWconst [c]) [d] mem) + // result: (MOVWload [int32(uint32(c)<>uint64(d)] ptr mem) + for { + d := auxIntToInt32(v.AuxInt) + ptr := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + mem := v_2 + v.reset(OpARMMOVWload) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueARM_OpARMMOVWloadshiftRL(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWloadshiftRL ptr idx [c] (MOVWstoreshiftRL ptr2 idx [d] x _)) + // cond: c==d && isSamePtr(ptr, ptr2) + // result: x + for { + c := auxIntToInt32(v.AuxInt) + ptr := v_0 + idx := v_1 + if v_2.Op != OpARMMOVWstoreshiftRL { + break + } + d := auxIntToInt32(v_2.AuxInt) + x := v_2.Args[2] + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] || !(c == d && isSamePtr(ptr, ptr2)) { + break + } + v.copyOf(x) + return true + } + // match: (MOVWloadshiftRL ptr (MOVWconst [c]) [d] mem) + // result: (MOVWload [int32(uint32(c)>>uint64(d))] ptr mem) + for { + d := auxIntToInt32(v.AuxInt) + ptr := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + mem := v_2 + v.reset(OpARMMOVWload) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueARM_OpARMMOVWnop(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVWnop (MOVWconst [c])) + // result: (MOVWconst [c]) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(c) + return true + } + return false +} +func rewriteValueARM_OpARMMOVWreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVWreg x) + // cond: x.Uses == 1 + // result: (MOVWnop x) + for { + x := v_0 + if !(x.Uses == 1) { + break + } + v.reset(OpARMMOVWnop) + v.AddArg(x) + return true + } + // match: (MOVWreg (MOVWconst [c])) + // result: (MOVWconst [c]) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(c) + return true + } + return false +} +func rewriteValueARM_OpARMMOVWstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // result: (MOVWstore [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARMADDconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + v.reset(OpARMMOVWstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVWstore [off1] {sym} (SUBconst [off2] ptr) val mem) + // result: (MOVWstore [off1-off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARMSUBconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + v.reset(OpARMMOVWstore) + v.AuxInt = int32ToAuxInt(off1 - off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARMMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpARMMOVWstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVWstore [0] {sym} (ADD ptr idx) val mem) + // cond: sym == nil + // result: (MOVWstoreidx ptr idx val mem) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + sym := auxToSym(v.Aux) + if v_0.Op != OpARMADD { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(sym == nil) { + break + } + v.reset(OpARMMOVWstoreidx) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem) + // cond: sym == nil + // result: (MOVWstoreshiftLL ptr idx [c] val mem) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + sym := auxToSym(v.Aux) + if v_0.Op != OpARMADDshiftLL { + break + } + c := auxIntToInt32(v_0.AuxInt) + idx := v_0.Args[1] + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(sym == nil) { + break + } + v.reset(OpARMMOVWstoreshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem) + // cond: sym == nil + // result: (MOVWstoreshiftRL ptr idx [c] val mem) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + sym := auxToSym(v.Aux) + if v_0.Op != OpARMADDshiftRL { + break + } + c := auxIntToInt32(v_0.AuxInt) + idx := v_0.Args[1] + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(sym == nil) { + break + } + v.reset(OpARMMOVWstoreshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem) + // cond: sym == nil + // result: (MOVWstoreshiftRA ptr idx [c] val mem) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + sym := auxToSym(v.Aux) + if v_0.Op != OpARMADDshiftRA { + break + } + c := auxIntToInt32(v_0.AuxInt) + idx := v_0.Args[1] + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(sym == nil) { + break + } + v.reset(OpARMMOVWstoreshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg4(ptr, idx, val, mem) + return true + } + return false +} +func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWstoreidx ptr (MOVWconst [c]) val mem) + // result: (MOVWstore [c] ptr val mem) + for { + ptr := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + val := v_2 + mem := v_3 + v.reset(OpARMMOVWstore) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVWstoreidx (MOVWconst [c]) ptr val mem) + // result: (MOVWstore [c] ptr val mem) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + ptr := v_1 + val := v_2 + mem := v_3 + v.reset(OpARMMOVWstore) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVWstoreidx ptr (SLLconst idx [c]) val mem) + // result: (MOVWstoreshiftLL ptr idx [c] val mem) + for { + ptr := v_0 + if v_1.Op != OpARMSLLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + idx := v_1.Args[0] + val := v_2 + mem := v_3 + v.reset(OpARMMOVWstoreshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVWstoreidx (SLLconst idx [c]) ptr val mem) + // result: (MOVWstoreshiftLL ptr idx [c] val mem) + for { + if v_0.Op != OpARMSLLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + idx := v_0.Args[0] + ptr := v_1 + val := v_2 + mem := v_3 + v.reset(OpARMMOVWstoreshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVWstoreidx ptr (SRLconst idx [c]) val mem) + // result: (MOVWstoreshiftRL ptr idx [c] val mem) + for { + ptr := v_0 + if v_1.Op != OpARMSRLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + idx := v_1.Args[0] + val := v_2 + mem := v_3 + v.reset(OpARMMOVWstoreshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVWstoreidx (SRLconst idx [c]) ptr val mem) + // result: (MOVWstoreshiftRL ptr idx [c] val mem) + for { + if v_0.Op != OpARMSRLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + idx := v_0.Args[0] + ptr := v_1 + val := v_2 + mem := v_3 + v.reset(OpARMMOVWstoreshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVWstoreidx ptr (SRAconst idx [c]) val mem) + // result: (MOVWstoreshiftRA ptr idx [c] val mem) + for { + ptr := v_0 + if v_1.Op != OpARMSRAconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + idx := v_1.Args[0] + val := v_2 + mem := v_3 + v.reset(OpARMMOVWstoreshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVWstoreidx (SRAconst idx [c]) ptr val mem) + // result: (MOVWstoreshiftRA ptr idx [c] val mem) + for { + if v_0.Op != OpARMSRAconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + idx := v_0.Args[0] + ptr := v_1 + val := v_2 + mem := v_3 + v.reset(OpARMMOVWstoreshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg4(ptr, idx, val, mem) + return true + } + return false +} +func rewriteValueARM_OpARMMOVWstoreshiftLL(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWstoreshiftLL ptr (MOVWconst [c]) [d] val mem) + // result: (MOVWstore [int32(uint32(c)<>uint64(d)] ptr val mem) + for { + d := auxIntToInt32(v.AuxInt) + ptr := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + val := v_2 + mem := v_3 + v.reset(OpARMMOVWstore) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueARM_OpARMMOVWstoreshiftRL(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWstoreshiftRL ptr (MOVWconst [c]) [d] val mem) + // result: (MOVWstore [int32(uint32(c)>>uint64(d))] ptr val mem) + for { + d := auxIntToInt32(v.AuxInt) + ptr := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + val := v_2 + mem := v_3 + v.reset(OpARMMOVWstore) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueARM_OpARMMUL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MUL x (MOVWconst [c])) + // cond: int32(c) == -1 + // result: (RSBconst [0] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMMOVWconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + if !(int32(c) == -1) { + continue + } + v.reset(OpARMRSBconst) + v.AuxInt = int32ToAuxInt(0) + v.AddArg(x) + return true + } + break + } + // match: (MUL _ (MOVWconst [0])) + // result: (MOVWconst [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_1.Op != OpARMMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 { + continue + } + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + break + } + // match: (MUL x (MOVWconst [1])) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMMOVWconst || auxIntToInt32(v_1.AuxInt) != 1 { + continue + } + v.copyOf(x) + return true + } + break + } + // match: (MUL x (MOVWconst [c])) + // cond: isPowerOfTwo32(c) + // result: (SLLconst [int32(log32(c))] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMMOVWconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + if !(isPowerOfTwo32(c)) { + continue + } + v.reset(OpARMSLLconst) + v.AuxInt = int32ToAuxInt(int32(log32(c))) + v.AddArg(x) + return true + } + break + } + // match: (MUL x (MOVWconst [c])) + // cond: isPowerOfTwo32(c-1) && c >= 3 + // result: (ADDshiftLL x x [int32(log32(c-1))]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMMOVWconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + if !(isPowerOfTwo32(c-1) && c >= 3) { + continue + } + v.reset(OpARMADDshiftLL) + v.AuxInt = int32ToAuxInt(int32(log32(c - 1))) + v.AddArg2(x, x) + return true + } + break + } + // match: (MUL x (MOVWconst [c])) + // cond: isPowerOfTwo32(c+1) && c >= 7 + // result: (RSBshiftLL x x [int32(log32(c+1))]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMMOVWconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + if !(isPowerOfTwo32(c+1) && c >= 7) { + continue + } + v.reset(OpARMRSBshiftLL) + v.AuxInt = int32ToAuxInt(int32(log32(c + 1))) + v.AddArg2(x, x) + return true + } + break + } + // match: (MUL x (MOVWconst [c])) + // cond: c%3 == 0 && isPowerOfTwo32(c/3) + // result: (SLLconst [int32(log32(c/3))] (ADDshiftLL x x [1])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMMOVWconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + if !(c%3 == 0 && isPowerOfTwo32(c/3)) { + continue + } + v.reset(OpARMSLLconst) + v.AuxInt = int32ToAuxInt(int32(log32(c / 3))) + v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v0.AuxInt = int32ToAuxInt(1) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + break + } + // match: (MUL x (MOVWconst [c])) + // cond: c%5 == 0 && isPowerOfTwo32(c/5) + // result: (SLLconst [int32(log32(c/5))] (ADDshiftLL x x [2])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMMOVWconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + if !(c%5 == 0 && isPowerOfTwo32(c/5)) { + continue + } + v.reset(OpARMSLLconst) + v.AuxInt = int32ToAuxInt(int32(log32(c / 5))) + v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v0.AuxInt = int32ToAuxInt(2) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + break + } + // match: (MUL x (MOVWconst [c])) + // cond: c%7 == 0 && isPowerOfTwo32(c/7) + // result: (SLLconst [int32(log32(c/7))] (RSBshiftLL x x [3])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMMOVWconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + if !(c%7 == 0 && isPowerOfTwo32(c/7)) { + continue + } + v.reset(OpARMSLLconst) + v.AuxInt = int32ToAuxInt(int32(log32(c / 7))) + v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) + v0.AuxInt = int32ToAuxInt(3) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + break + } + // match: (MUL x (MOVWconst [c])) + // cond: c%9 == 0 && isPowerOfTwo32(c/9) + // result: (SLLconst [int32(log32(c/9))] (ADDshiftLL x x [3])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMMOVWconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + if !(c%9 == 0 && isPowerOfTwo32(c/9)) { + continue + } + v.reset(OpARMSLLconst) + v.AuxInt = int32ToAuxInt(int32(log32(c / 9))) + v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v0.AuxInt = int32ToAuxInt(3) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + break + } + // match: (MUL (MOVWconst [c]) (MOVWconst [d])) + // result: (MOVWconst [c*d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpARMMOVWconst { + continue + } + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpARMMOVWconst { + continue + } + d := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(c * d) + return true + } + break + } + return false +} +func rewriteValueARM_OpARMMULA(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MULA x (MOVWconst [c]) a) + // cond: c == -1 + // result: (SUB a x) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + a := v_2 + if !(c == -1) { + break + } + v.reset(OpARMSUB) + v.AddArg2(a, x) + return true + } + // match: (MULA _ (MOVWconst [0]) a) + // result: a + for { + if v_1.Op != OpARMMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 { + break + } + a := v_2 + v.copyOf(a) + return true + } + // match: (MULA x (MOVWconst [1]) a) + // result: (ADD x a) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst || auxIntToInt32(v_1.AuxInt) != 1 { + break + } + a := v_2 + v.reset(OpARMADD) + v.AddArg2(x, a) + return true + } + // match: (MULA x (MOVWconst [c]) a) + // cond: isPowerOfTwo32(c) + // result: (ADD (SLLconst [int32(log32(c))] x) a) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + a := v_2 + if !(isPowerOfTwo32(c)) { + break + } + v.reset(OpARMADD) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c))) + v0.AddArg(x) + v.AddArg2(v0, a) + return true + } + // match: (MULA x (MOVWconst [c]) a) + // cond: isPowerOfTwo32(c-1) && c >= 3 + // result: (ADD (ADDshiftLL x x [int32(log32(c-1))]) a) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + a := v_2 + if !(isPowerOfTwo32(c-1) && c >= 3) { + break + } + v.reset(OpARMADD) + v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c - 1))) + v0.AddArg2(x, x) + v.AddArg2(v0, a) + return true + } + // match: (MULA x (MOVWconst [c]) a) + // cond: isPowerOfTwo32(c+1) && c >= 7 + // result: (ADD (RSBshiftLL x x [int32(log32(c+1))]) a) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + a := v_2 + if !(isPowerOfTwo32(c+1) && c >= 7) { + break + } + v.reset(OpARMADD) + v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c + 1))) + v0.AddArg2(x, x) + v.AddArg2(v0, a) + return true + } + // match: (MULA x (MOVWconst [c]) a) + // cond: c%3 == 0 && isPowerOfTwo32(c/3) + // result: (ADD (SLLconst [int32(log32(c/3))] (ADDshiftLL x x [1])) a) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + a := v_2 + if !(c%3 == 0 && isPowerOfTwo32(c/3)) { + break + } + v.reset(OpARMADD) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c / 3))) + v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v1.AuxInt = int32ToAuxInt(1) + v1.AddArg2(x, x) + v0.AddArg(v1) + v.AddArg2(v0, a) + return true + } + // match: (MULA x (MOVWconst [c]) a) + // cond: c%5 == 0 && isPowerOfTwo32(c/5) + // result: (ADD (SLLconst [int32(log32(c/5))] (ADDshiftLL x x [2])) a) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + a := v_2 + if !(c%5 == 0 && isPowerOfTwo32(c/5)) { + break + } + v.reset(OpARMADD) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c / 5))) + v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v1.AuxInt = int32ToAuxInt(2) + v1.AddArg2(x, x) + v0.AddArg(v1) + v.AddArg2(v0, a) + return true + } + // match: (MULA x (MOVWconst [c]) a) + // cond: c%7 == 0 && isPowerOfTwo32(c/7) + // result: (ADD (SLLconst [int32(log32(c/7))] (RSBshiftLL x x [3])) a) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + a := v_2 + if !(c%7 == 0 && isPowerOfTwo32(c/7)) { + break + } + v.reset(OpARMADD) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c / 7))) + v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) + v1.AuxInt = int32ToAuxInt(3) + v1.AddArg2(x, x) + v0.AddArg(v1) + v.AddArg2(v0, a) + return true + } + // match: (MULA x (MOVWconst [c]) a) + // cond: c%9 == 0 && isPowerOfTwo32(c/9) + // result: (ADD (SLLconst [int32(log32(c/9))] (ADDshiftLL x x [3])) a) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + a := v_2 + if !(c%9 == 0 && isPowerOfTwo32(c/9)) { + break + } + v.reset(OpARMADD) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c / 9))) + v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v1.AuxInt = int32ToAuxInt(3) + v1.AddArg2(x, x) + v0.AddArg(v1) + v.AddArg2(v0, a) + return true + } + // match: (MULA (MOVWconst [c]) x a) + // cond: c == -1 + // result: (SUB a x) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + a := v_2 + if !(c == -1) { + break + } + v.reset(OpARMSUB) + v.AddArg2(a, x) + return true + } + // match: (MULA (MOVWconst [0]) _ a) + // result: a + for { + if v_0.Op != OpARMMOVWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + a := v_2 + v.copyOf(a) + return true + } + // match: (MULA (MOVWconst [1]) x a) + // result: (ADD x a) + for { + if v_0.Op != OpARMMOVWconst || auxIntToInt32(v_0.AuxInt) != 1 { + break + } + x := v_1 + a := v_2 + v.reset(OpARMADD) + v.AddArg2(x, a) + return true + } + // match: (MULA (MOVWconst [c]) x a) + // cond: isPowerOfTwo32(c) + // result: (ADD (SLLconst [int32(log32(c))] x) a) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + a := v_2 + if !(isPowerOfTwo32(c)) { + break + } + v.reset(OpARMADD) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c))) + v0.AddArg(x) + v.AddArg2(v0, a) + return true + } + // match: (MULA (MOVWconst [c]) x a) + // cond: isPowerOfTwo32(c-1) && c >= 3 + // result: (ADD (ADDshiftLL x x [int32(log32(c-1))]) a) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + a := v_2 + if !(isPowerOfTwo32(c-1) && c >= 3) { + break + } + v.reset(OpARMADD) + v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c - 1))) + v0.AddArg2(x, x) + v.AddArg2(v0, a) + return true + } + // match: (MULA (MOVWconst [c]) x a) + // cond: isPowerOfTwo32(c+1) && c >= 7 + // result: (ADD (RSBshiftLL x x [int32(log32(c+1))]) a) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + a := v_2 + if !(isPowerOfTwo32(c+1) && c >= 7) { + break + } + v.reset(OpARMADD) + v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c + 1))) + v0.AddArg2(x, x) + v.AddArg2(v0, a) + return true + } + // match: (MULA (MOVWconst [c]) x a) + // cond: c%3 == 0 && isPowerOfTwo32(c/3) + // result: (ADD (SLLconst [int32(log32(c/3))] (ADDshiftLL x x [1])) a) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + a := v_2 + if !(c%3 == 0 && isPowerOfTwo32(c/3)) { + break + } + v.reset(OpARMADD) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c / 3))) + v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v1.AuxInt = int32ToAuxInt(1) + v1.AddArg2(x, x) + v0.AddArg(v1) + v.AddArg2(v0, a) + return true + } + // match: (MULA (MOVWconst [c]) x a) + // cond: c%5 == 0 && isPowerOfTwo32(c/5) + // result: (ADD (SLLconst [int32(log32(c/5))] (ADDshiftLL x x [2])) a) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + a := v_2 + if !(c%5 == 0 && isPowerOfTwo32(c/5)) { + break + } + v.reset(OpARMADD) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c / 5))) + v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v1.AuxInt = int32ToAuxInt(2) + v1.AddArg2(x, x) + v0.AddArg(v1) + v.AddArg2(v0, a) + return true + } + // match: (MULA (MOVWconst [c]) x a) + // cond: c%7 == 0 && isPowerOfTwo32(c/7) + // result: (ADD (SLLconst [int32(log32(c/7))] (RSBshiftLL x x [3])) a) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + a := v_2 + if !(c%7 == 0 && isPowerOfTwo32(c/7)) { + break + } + v.reset(OpARMADD) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c / 7))) + v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) + v1.AuxInt = int32ToAuxInt(3) + v1.AddArg2(x, x) + v0.AddArg(v1) + v.AddArg2(v0, a) + return true + } + // match: (MULA (MOVWconst [c]) x a) + // cond: c%9 == 0 && isPowerOfTwo32(c/9) + // result: (ADD (SLLconst [int32(log32(c/9))] (ADDshiftLL x x [3])) a) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + a := v_2 + if !(c%9 == 0 && isPowerOfTwo32(c/9)) { + break + } + v.reset(OpARMADD) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c / 9))) + v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v1.AuxInt = int32ToAuxInt(3) + v1.AddArg2(x, x) + v0.AddArg(v1) + v.AddArg2(v0, a) + return true + } + // match: (MULA (MOVWconst [c]) (MOVWconst [d]) a) + // result: (ADDconst [c*d] a) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpARMMOVWconst { + break + } + d := auxIntToInt32(v_1.AuxInt) + a := v_2 + v.reset(OpARMADDconst) + v.AuxInt = int32ToAuxInt(c * d) + v.AddArg(a) + return true + } + return false +} +func rewriteValueARM_OpARMMULD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MULD (NEGD x) y) + // cond: buildcfg.GOARM.Version >= 6 + // result: (NMULD x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpARMNEGD { + continue + } + x := v_0.Args[0] + y := v_1 + if !(buildcfg.GOARM.Version >= 6) { + continue + } + v.reset(OpARMNMULD) + v.AddArg2(x, y) + return true + } + break + } + return false +} +func rewriteValueARM_OpARMMULF(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MULF (NEGF x) y) + // cond: buildcfg.GOARM.Version >= 6 + // result: (NMULF x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpARMNEGF { + continue + } + x := v_0.Args[0] + y := v_1 + if !(buildcfg.GOARM.Version >= 6) { + continue + } + v.reset(OpARMNMULF) + v.AddArg2(x, y) + return true + } + break + } + return false +} +func rewriteValueARM_OpARMMULS(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MULS x (MOVWconst [c]) a) + // cond: c == -1 + // result: (ADD a x) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + a := v_2 + if !(c == -1) { + break + } + v.reset(OpARMADD) + v.AddArg2(a, x) + return true + } + // match: (MULS _ (MOVWconst [0]) a) + // result: a + for { + if v_1.Op != OpARMMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 { + break + } + a := v_2 + v.copyOf(a) + return true + } + // match: (MULS x (MOVWconst [1]) a) + // result: (RSB x a) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst || auxIntToInt32(v_1.AuxInt) != 1 { + break + } + a := v_2 + v.reset(OpARMRSB) + v.AddArg2(x, a) + return true + } + // match: (MULS x (MOVWconst [c]) a) + // cond: isPowerOfTwo32(c) + // result: (RSB (SLLconst [int32(log32(c))] x) a) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + a := v_2 + if !(isPowerOfTwo32(c)) { + break + } + v.reset(OpARMRSB) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c))) + v0.AddArg(x) + v.AddArg2(v0, a) + return true + } + // match: (MULS x (MOVWconst [c]) a) + // cond: isPowerOfTwo32(c-1) && c >= 3 + // result: (RSB (ADDshiftLL x x [int32(log32(c-1))]) a) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + a := v_2 + if !(isPowerOfTwo32(c-1) && c >= 3) { + break + } + v.reset(OpARMRSB) + v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c - 1))) + v0.AddArg2(x, x) + v.AddArg2(v0, a) + return true + } + // match: (MULS x (MOVWconst [c]) a) + // cond: isPowerOfTwo32(c+1) && c >= 7 + // result: (RSB (RSBshiftLL x x [int32(log32(c+1))]) a) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + a := v_2 + if !(isPowerOfTwo32(c+1) && c >= 7) { + break + } + v.reset(OpARMRSB) + v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c + 1))) + v0.AddArg2(x, x) + v.AddArg2(v0, a) + return true + } + // match: (MULS x (MOVWconst [c]) a) + // cond: c%3 == 0 && isPowerOfTwo32(c/3) + // result: (RSB (SLLconst [int32(log32(c/3))] (ADDshiftLL x x [1])) a) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + a := v_2 + if !(c%3 == 0 && isPowerOfTwo32(c/3)) { + break + } + v.reset(OpARMRSB) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c / 3))) + v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v1.AuxInt = int32ToAuxInt(1) + v1.AddArg2(x, x) + v0.AddArg(v1) + v.AddArg2(v0, a) + return true + } + // match: (MULS x (MOVWconst [c]) a) + // cond: c%5 == 0 && isPowerOfTwo32(c/5) + // result: (RSB (SLLconst [int32(log32(c/5))] (ADDshiftLL x x [2])) a) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + a := v_2 + if !(c%5 == 0 && isPowerOfTwo32(c/5)) { + break + } + v.reset(OpARMRSB) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c / 5))) + v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v1.AuxInt = int32ToAuxInt(2) + v1.AddArg2(x, x) + v0.AddArg(v1) + v.AddArg2(v0, a) + return true + } + // match: (MULS x (MOVWconst [c]) a) + // cond: c%7 == 0 && isPowerOfTwo32(c/7) + // result: (RSB (SLLconst [int32(log32(c/7))] (RSBshiftLL x x [3])) a) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + a := v_2 + if !(c%7 == 0 && isPowerOfTwo32(c/7)) { + break + } + v.reset(OpARMRSB) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c / 7))) + v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) + v1.AuxInt = int32ToAuxInt(3) + v1.AddArg2(x, x) + v0.AddArg(v1) + v.AddArg2(v0, a) + return true + } + // match: (MULS x (MOVWconst [c]) a) + // cond: c%9 == 0 && isPowerOfTwo32(c/9) + // result: (RSB (SLLconst [int32(log32(c/9))] (ADDshiftLL x x [3])) a) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + a := v_2 + if !(c%9 == 0 && isPowerOfTwo32(c/9)) { + break + } + v.reset(OpARMRSB) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c / 9))) + v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v1.AuxInt = int32ToAuxInt(3) + v1.AddArg2(x, x) + v0.AddArg(v1) + v.AddArg2(v0, a) + return true + } + // match: (MULS (MOVWconst [c]) x a) + // cond: c == -1 + // result: (ADD a x) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + a := v_2 + if !(c == -1) { + break + } + v.reset(OpARMADD) + v.AddArg2(a, x) + return true + } + // match: (MULS (MOVWconst [0]) _ a) + // result: a + for { + if v_0.Op != OpARMMOVWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + a := v_2 + v.copyOf(a) + return true + } + // match: (MULS (MOVWconst [1]) x a) + // result: (RSB x a) + for { + if v_0.Op != OpARMMOVWconst || auxIntToInt32(v_0.AuxInt) != 1 { + break + } + x := v_1 + a := v_2 + v.reset(OpARMRSB) + v.AddArg2(x, a) + return true + } + // match: (MULS (MOVWconst [c]) x a) + // cond: isPowerOfTwo32(c) + // result: (RSB (SLLconst [int32(log32(c))] x) a) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + a := v_2 + if !(isPowerOfTwo32(c)) { + break + } + v.reset(OpARMRSB) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c))) + v0.AddArg(x) + v.AddArg2(v0, a) + return true + } + // match: (MULS (MOVWconst [c]) x a) + // cond: isPowerOfTwo32(c-1) && c >= 3 + // result: (RSB (ADDshiftLL x x [int32(log32(c-1))]) a) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + a := v_2 + if !(isPowerOfTwo32(c-1) && c >= 3) { + break + } + v.reset(OpARMRSB) + v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c - 1))) + v0.AddArg2(x, x) + v.AddArg2(v0, a) + return true + } + // match: (MULS (MOVWconst [c]) x a) + // cond: isPowerOfTwo32(c+1) && c >= 7 + // result: (RSB (RSBshiftLL x x [int32(log32(c+1))]) a) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + a := v_2 + if !(isPowerOfTwo32(c+1) && c >= 7) { + break + } + v.reset(OpARMRSB) + v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c + 1))) + v0.AddArg2(x, x) + v.AddArg2(v0, a) + return true + } + // match: (MULS (MOVWconst [c]) x a) + // cond: c%3 == 0 && isPowerOfTwo32(c/3) + // result: (RSB (SLLconst [int32(log32(c/3))] (ADDshiftLL x x [1])) a) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + a := v_2 + if !(c%3 == 0 && isPowerOfTwo32(c/3)) { + break + } + v.reset(OpARMRSB) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c / 3))) + v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v1.AuxInt = int32ToAuxInt(1) + v1.AddArg2(x, x) + v0.AddArg(v1) + v.AddArg2(v0, a) + return true + } + // match: (MULS (MOVWconst [c]) x a) + // cond: c%5 == 0 && isPowerOfTwo32(c/5) + // result: (RSB (SLLconst [int32(log32(c/5))] (ADDshiftLL x x [2])) a) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + a := v_2 + if !(c%5 == 0 && isPowerOfTwo32(c/5)) { + break + } + v.reset(OpARMRSB) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c / 5))) + v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v1.AuxInt = int32ToAuxInt(2) + v1.AddArg2(x, x) + v0.AddArg(v1) + v.AddArg2(v0, a) + return true + } + // match: (MULS (MOVWconst [c]) x a) + // cond: c%7 == 0 && isPowerOfTwo32(c/7) + // result: (RSB (SLLconst [int32(log32(c/7))] (RSBshiftLL x x [3])) a) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + a := v_2 + if !(c%7 == 0 && isPowerOfTwo32(c/7)) { + break + } + v.reset(OpARMRSB) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c / 7))) + v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) + v1.AuxInt = int32ToAuxInt(3) + v1.AddArg2(x, x) + v0.AddArg(v1) + v.AddArg2(v0, a) + return true + } + // match: (MULS (MOVWconst [c]) x a) + // cond: c%9 == 0 && isPowerOfTwo32(c/9) + // result: (RSB (SLLconst [int32(log32(c/9))] (ADDshiftLL x x [3])) a) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + a := v_2 + if !(c%9 == 0 && isPowerOfTwo32(c/9)) { + break + } + v.reset(OpARMRSB) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(int32(log32(c / 9))) + v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v1.AuxInt = int32ToAuxInt(3) + v1.AddArg2(x, x) + v0.AddArg(v1) + v.AddArg2(v0, a) + return true + } + // match: (MULS (MOVWconst [c]) (MOVWconst [d]) a) + // result: (SUBconst [c*d] a) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpARMMOVWconst { + break + } + d := auxIntToInt32(v_1.AuxInt) + a := v_2 + v.reset(OpARMSUBconst) + v.AuxInt = int32ToAuxInt(c * d) + v.AddArg(a) + return true + } + return false +} +func rewriteValueARM_OpARMMVN(v *Value) bool { + v_0 := v.Args[0] + // match: (MVN (MOVWconst [c])) + // result: (MOVWconst [^c]) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(^c) + return true + } + // match: (MVN (SLLconst [c] x)) + // result: (MVNshiftLL x [c]) + for { + if v_0.Op != OpARMSLLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpARMMVNshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MVN (SRLconst [c] x)) + // result: (MVNshiftRL x [c]) + for { + if v_0.Op != OpARMSRLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpARMMVNshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MVN (SRAconst [c] x)) + // result: (MVNshiftRA x [c]) + for { + if v_0.Op != OpARMSRAconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpARMMVNshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MVN (SLL x y)) + // result: (MVNshiftLLreg x y) + for { + if v_0.Op != OpARMSLL { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpARMMVNshiftLLreg) + v.AddArg2(x, y) + return true + } + // match: (MVN (SRL x y)) + // result: (MVNshiftRLreg x y) + for { + if v_0.Op != OpARMSRL { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpARMMVNshiftRLreg) + v.AddArg2(x, y) + return true + } + // match: (MVN (SRA x y)) + // result: (MVNshiftRAreg x y) + for { + if v_0.Op != OpARMSRA { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpARMMVNshiftRAreg) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMMVNshiftLL(v *Value) bool { + v_0 := v.Args[0] + // match: (MVNshiftLL (MOVWconst [c]) [d]) + // result: (MOVWconst [^(c<>uint64(d)]) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(int32(c) >> uint64(d)) + return true + } + return false +} +func rewriteValueARM_OpARMMVNshiftRAreg(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MVNshiftRAreg x (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (MVNshiftRA x [c]) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMMVNshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMMVNshiftRL(v *Value) bool { + v_0 := v.Args[0] + // match: (MVNshiftRL (MOVWconst [c]) [d]) + // result: (MOVWconst [^int32(uint32(c)>>uint64(d))]) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(^int32(uint32(c) >> uint64(d))) + return true + } + return false +} +func rewriteValueARM_OpARMMVNshiftRLreg(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MVNshiftRLreg x (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (MVNshiftRL x [c]) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMMVNshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMNEGD(v *Value) bool { + v_0 := v.Args[0] + // match: (NEGD (MULD x y)) + // cond: buildcfg.GOARM.Version >= 6 + // result: (NMULD x y) + for { + if v_0.Op != OpARMMULD { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + if !(buildcfg.GOARM.Version >= 6) { + break + } + v.reset(OpARMNMULD) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMNEGF(v *Value) bool { + v_0 := v.Args[0] + // match: (NEGF (MULF x y)) + // cond: buildcfg.GOARM.Version >= 6 + // result: (NMULF x y) + for { + if v_0.Op != OpARMMULF { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + if !(buildcfg.GOARM.Version >= 6) { + break + } + v.reset(OpARMNMULF) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMNMULD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (NMULD (NEGD x) y) + // result: (MULD x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpARMNEGD { + continue + } + x := v_0.Args[0] + y := v_1 + v.reset(OpARMMULD) + v.AddArg2(x, y) + return true + } + break + } + return false +} +func rewriteValueARM_OpARMNMULF(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (NMULF (NEGF x) y) + // result: (MULF x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpARMNEGF { + continue + } + x := v_0.Args[0] + y := v_1 + v.reset(OpARMMULF) + v.AddArg2(x, y) + return true + } + break + } + return false +} +func rewriteValueARM_OpARMNotEqual(v *Value) bool { + v_0 := v.Args[0] + // match: (NotEqual (FlagConstant [fc])) + // result: (MOVWconst [b2i32(fc.ne())]) + for { + if v_0.Op != OpARMFlagConstant { + break + } + fc := auxIntToFlagConstant(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(b2i32(fc.ne())) + return true + } + // match: (NotEqual (InvertFlags x)) + // result: (NotEqual x) + for { + if v_0.Op != OpARMInvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARMNotEqual) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMOR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OR x (MOVWconst [c])) + // result: (ORconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMMOVWconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMORconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (OR x (SLLconst [c] y)) + // result: (ORshiftLL x y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSLLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMORshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (OR x (SRLconst [c] y)) + // result: (ORshiftRL x y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMORshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (OR x (SRAconst [c] y)) + // result: (ORshiftRA x y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRAconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMORshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (OR x (SLL y z)) + // result: (ORshiftLLreg x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSLL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMORshiftLLreg) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (OR x (SRL y z)) + // result: (ORshiftRLreg x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMORshiftRLreg) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (OR x (SRA y z)) + // result: (ORshiftRAreg x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRA { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMORshiftRAreg) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (OR x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueARM_OpARMORconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ORconst [0] x) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (ORconst [c] _) + // cond: int32(c)==-1 + // result: (MOVWconst [-1]) + for { + c := auxIntToInt32(v.AuxInt) + if !(int32(c) == -1) { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(-1) + return true + } + // match: (ORconst [c] (MOVWconst [d])) + // result: (MOVWconst [c|d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(c | d) + return true + } + // match: (ORconst [c] (ORconst [d] x)) + // result: (ORconst [c|d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMORconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpARMORconst) + v.AuxInt = int32ToAuxInt(c | d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMORshiftLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ORshiftLL (MOVWconst [c]) x [d]) + // result: (ORconst [c] (SLLconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMORconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (ORshiftLL x (MOVWconst [c]) [d]) + // result: (ORconst x [c< [8] (BFXU [int32(armBFAuxInt(8, 8))] x) x) + // result: (REV16 x) + for { + if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != int32(armBFAuxInt(8, 8)) { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARMREV16) + v.AddArg(x) + return true + } + // match: (ORshiftLL [8] (SRLconst [24] (SLLconst [16] x)) x) + // cond: buildcfg.GOARM.Version>=6 + // result: (REV16 x) + for { + if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARMSLLconst || auxIntToInt32(v_0_0.AuxInt) != 16 { + break + } + x := v_0_0.Args[0] + if x != v_1 || !(buildcfg.GOARM.Version >= 6) { + break + } + v.reset(OpARMREV16) + v.AddArg(x) + return true + } + // match: (ORshiftLL y:(SLLconst x [c]) x [c]) + // result: y + for { + c := auxIntToInt32(v.AuxInt) + y := v_0 + if y.Op != OpARMSLLconst || auxIntToInt32(y.AuxInt) != c { + break + } + x := y.Args[0] + if x != v_1 { + break + } + v.copyOf(y) + return true + } + return false +} +func rewriteValueARM_OpARMORshiftLLreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ORshiftLLreg (MOVWconst [c]) x y) + // result: (ORconst [c] (SLL x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMORconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (ORshiftLLreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (ORshiftLL x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMORshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMORshiftRA(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ORshiftRA (MOVWconst [c]) x [d]) + // result: (ORconst [c] (SRAconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMORconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (ORshiftRA x (MOVWconst [c]) [d]) + // result: (ORconst x [c>>uint64(d)]) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMORconst) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) + v.AddArg(x) + return true + } + // match: (ORshiftRA y:(SRAconst x [c]) x [c]) + // result: y + for { + c := auxIntToInt32(v.AuxInt) + y := v_0 + if y.Op != OpARMSRAconst || auxIntToInt32(y.AuxInt) != c { + break + } + x := y.Args[0] + if x != v_1 { + break + } + v.copyOf(y) + return true + } + return false +} +func rewriteValueARM_OpARMORshiftRAreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ORshiftRAreg (MOVWconst [c]) x y) + // result: (ORconst [c] (SRA x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMORconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (ORshiftRAreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (ORshiftRA x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMORshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMORshiftRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ORshiftRL (MOVWconst [c]) x [d]) + // result: (ORconst [c] (SRLconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMORconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (ORshiftRL x (MOVWconst [c]) [d]) + // result: (ORconst x [int32(uint32(c)>>uint64(d))]) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMORconst) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) + v.AddArg(x) + return true + } + // match: (ORshiftRL y:(SRLconst x [c]) x [c]) + // result: y + for { + c := auxIntToInt32(v.AuxInt) + y := v_0 + if y.Op != OpARMSRLconst || auxIntToInt32(y.AuxInt) != c { + break + } + x := y.Args[0] + if x != v_1 { + break + } + v.copyOf(y) + return true + } + return false +} +func rewriteValueARM_OpARMORshiftRLreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ORshiftRLreg (MOVWconst [c]) x y) + // result: (ORconst [c] (SRL x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMORconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (ORshiftRLreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (ORshiftRL x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMORshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMRSB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (RSB (MOVWconst [c]) x) + // result: (SUBconst [c] x) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMSUBconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (RSB x (MOVWconst [c])) + // result: (RSBconst [c] x) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMRSBconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (RSB x (SLLconst [c] y)) + // result: (RSBshiftLL x y [c]) + for { + x := v_0 + if v_1.Op != OpARMSLLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMRSBshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + // match: (RSB (SLLconst [c] y) x) + // result: (SUBshiftLL x y [c]) + for { + if v_0.Op != OpARMSLLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + y := v_0.Args[0] + x := v_1 + v.reset(OpARMSUBshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + // match: (RSB x (SRLconst [c] y)) + // result: (RSBshiftRL x y [c]) + for { + x := v_0 + if v_1.Op != OpARMSRLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMRSBshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + // match: (RSB (SRLconst [c] y) x) + // result: (SUBshiftRL x y [c]) + for { + if v_0.Op != OpARMSRLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + y := v_0.Args[0] + x := v_1 + v.reset(OpARMSUBshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + // match: (RSB x (SRAconst [c] y)) + // result: (RSBshiftRA x y [c]) + for { + x := v_0 + if v_1.Op != OpARMSRAconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMRSBshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + // match: (RSB (SRAconst [c] y) x) + // result: (SUBshiftRA x y [c]) + for { + if v_0.Op != OpARMSRAconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + y := v_0.Args[0] + x := v_1 + v.reset(OpARMSUBshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + // match: (RSB x (SLL y z)) + // result: (RSBshiftLLreg x y z) + for { + x := v_0 + if v_1.Op != OpARMSLL { + break + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMRSBshiftLLreg) + v.AddArg3(x, y, z) + return true + } + // match: (RSB (SLL y z) x) + // result: (SUBshiftLLreg x y z) + for { + if v_0.Op != OpARMSLL { + break + } + z := v_0.Args[1] + y := v_0.Args[0] + x := v_1 + v.reset(OpARMSUBshiftLLreg) + v.AddArg3(x, y, z) + return true + } + // match: (RSB x (SRL y z)) + // result: (RSBshiftRLreg x y z) + for { + x := v_0 + if v_1.Op != OpARMSRL { + break + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMRSBshiftRLreg) + v.AddArg3(x, y, z) + return true + } + // match: (RSB (SRL y z) x) + // result: (SUBshiftRLreg x y z) + for { + if v_0.Op != OpARMSRL { + break + } + z := v_0.Args[1] + y := v_0.Args[0] + x := v_1 + v.reset(OpARMSUBshiftRLreg) + v.AddArg3(x, y, z) + return true + } + // match: (RSB x (SRA y z)) + // result: (RSBshiftRAreg x y z) + for { + x := v_0 + if v_1.Op != OpARMSRA { + break + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMRSBshiftRAreg) + v.AddArg3(x, y, z) + return true + } + // match: (RSB (SRA y z) x) + // result: (SUBshiftRAreg x y z) + for { + if v_0.Op != OpARMSRA { + break + } + z := v_0.Args[1] + y := v_0.Args[0] + x := v_1 + v.reset(OpARMSUBshiftRAreg) + v.AddArg3(x, y, z) + return true + } + // match: (RSB x x) + // result: (MOVWconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (RSB (MUL x y) a) + // cond: buildcfg.GOARM.Version == 7 + // result: (MULS x y a) + for { + if v_0.Op != OpARMMUL { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + a := v_1 + if !(buildcfg.GOARM.Version == 7) { + break + } + v.reset(OpARMMULS) + v.AddArg3(x, y, a) + return true + } + return false +} +func rewriteValueARM_OpARMRSBSshiftLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RSBSshiftLL (MOVWconst [c]) x [d]) + // result: (SUBSconst [c] (SLLconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMSUBSconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (RSBSshiftLL x (MOVWconst [c]) [d]) + // result: (RSBSconst x [c< x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMSUBSconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (RSBSshiftLLreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (RSBSshiftLL x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMRSBSshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMRSBSshiftRA(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RSBSshiftRA (MOVWconst [c]) x [d]) + // result: (SUBSconst [c] (SRAconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMSUBSconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (RSBSshiftRA x (MOVWconst [c]) [d]) + // result: (RSBSconst x [c>>uint64(d)]) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMRSBSconst) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMRSBSshiftRAreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RSBSshiftRAreg (MOVWconst [c]) x y) + // result: (SUBSconst [c] (SRA x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMSUBSconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (RSBSshiftRAreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (RSBSshiftRA x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMRSBSshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMRSBSshiftRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RSBSshiftRL (MOVWconst [c]) x [d]) + // result: (SUBSconst [c] (SRLconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMSUBSconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (RSBSshiftRL x (MOVWconst [c]) [d]) + // result: (RSBSconst x [int32(uint32(c)>>uint64(d))]) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMRSBSconst) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMRSBSshiftRLreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RSBSshiftRLreg (MOVWconst [c]) x y) + // result: (SUBSconst [c] (SRL x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMSUBSconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (RSBSshiftRLreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (RSBSshiftRL x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMRSBSshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMRSBconst(v *Value) bool { + v_0 := v.Args[0] + // match: (RSBconst [c] (MOVWconst [d])) + // result: (MOVWconst [c-d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(c - d) + return true + } + // match: (RSBconst [c] (RSBconst [d] x)) + // result: (ADDconst [c-d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMRSBconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpARMADDconst) + v.AuxInt = int32ToAuxInt(c - d) + v.AddArg(x) + return true + } + // match: (RSBconst [c] (ADDconst [d] x)) + // result: (RSBconst [c-d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMADDconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpARMRSBconst) + v.AuxInt = int32ToAuxInt(c - d) + v.AddArg(x) + return true + } + // match: (RSBconst [c] (SUBconst [d] x)) + // result: (RSBconst [c+d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMSUBconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpARMRSBconst) + v.AuxInt = int32ToAuxInt(c + d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMRSBshiftLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RSBshiftLL (MOVWconst [c]) x [d]) + // result: (SUBconst [c] (SLLconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMSUBconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (RSBshiftLL x (MOVWconst [c]) [d]) + // result: (RSBconst x [c< x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMSUBconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (RSBshiftLLreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (RSBshiftLL x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMRSBshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMRSBshiftRA(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RSBshiftRA (MOVWconst [c]) x [d]) + // result: (SUBconst [c] (SRAconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMSUBconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (RSBshiftRA x (MOVWconst [c]) [d]) + // result: (RSBconst x [c>>uint64(d)]) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMRSBconst) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) + v.AddArg(x) + return true + } + // match: (RSBshiftRA (SRAconst x [c]) x [c]) + // result: (MOVWconst [0]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMSRAconst || auxIntToInt32(v_0.AuxInt) != c { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM_OpARMRSBshiftRAreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RSBshiftRAreg (MOVWconst [c]) x y) + // result: (SUBconst [c] (SRA x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMSUBconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (RSBshiftRAreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (RSBshiftRA x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMRSBshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMRSBshiftRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RSBshiftRL (MOVWconst [c]) x [d]) + // result: (SUBconst [c] (SRLconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMSUBconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (RSBshiftRL x (MOVWconst [c]) [d]) + // result: (RSBconst x [int32(uint32(c)>>uint64(d))]) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMRSBconst) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) + v.AddArg(x) + return true + } + // match: (RSBshiftRL (SRLconst x [c]) x [c]) + // result: (MOVWconst [0]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != c { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM_OpARMRSBshiftRLreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RSBshiftRLreg (MOVWconst [c]) x y) + // result: (SUBconst [c] (SRL x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMSUBconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (RSBshiftRLreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (RSBshiftRL x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMRSBshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMRSCconst(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (RSCconst [c] (ADDconst [d] x) flags) + // result: (RSCconst [c-d] x flags) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMADDconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + flags := v_1 + v.reset(OpARMRSCconst) + v.AuxInt = int32ToAuxInt(c - d) + v.AddArg2(x, flags) + return true + } + // match: (RSCconst [c] (SUBconst [d] x) flags) + // result: (RSCconst [c+d] x flags) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMSUBconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + flags := v_1 + v.reset(OpARMRSCconst) + v.AuxInt = int32ToAuxInt(c + d) + v.AddArg2(x, flags) + return true + } + return false +} +func rewriteValueARM_OpARMRSCshiftLL(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RSCshiftLL (MOVWconst [c]) x [d] flags) + // result: (SBCconst [c] (SLLconst x [d]) flags) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + flags := v_2 + v.reset(OpARMSBCconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg2(v0, flags) + return true + } + // match: (RSCshiftLL x (MOVWconst [c]) [d] flags) + // result: (RSCconst x [c< x y) flags) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + flags := v_3 + v.reset(OpARMSBCconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v0.AddArg2(x, y) + v.AddArg2(v0, flags) + return true + } + // match: (RSCshiftLLreg x y (MOVWconst [c]) flags) + // cond: 0 <= c && c < 32 + // result: (RSCshiftLL x y [c] flags) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + flags := v_3 + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMRSCshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(x, y, flags) + return true + } + return false +} +func rewriteValueARM_OpARMRSCshiftRA(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RSCshiftRA (MOVWconst [c]) x [d] flags) + // result: (SBCconst [c] (SRAconst x [d]) flags) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + flags := v_2 + v.reset(OpARMSBCconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg2(v0, flags) + return true + } + // match: (RSCshiftRA x (MOVWconst [c]) [d] flags) + // result: (RSCconst x [c>>uint64(d)] flags) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + flags := v_2 + v.reset(OpARMRSCconst) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) + v.AddArg2(x, flags) + return true + } + return false +} +func rewriteValueARM_OpARMRSCshiftRAreg(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RSCshiftRAreg (MOVWconst [c]) x y flags) + // result: (SBCconst [c] (SRA x y) flags) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + flags := v_3 + v.reset(OpARMSBCconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) + v0.AddArg2(x, y) + v.AddArg2(v0, flags) + return true + } + // match: (RSCshiftRAreg x y (MOVWconst [c]) flags) + // cond: 0 <= c && c < 32 + // result: (RSCshiftRA x y [c] flags) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + flags := v_3 + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMRSCshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(x, y, flags) + return true + } + return false +} +func rewriteValueARM_OpARMRSCshiftRL(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RSCshiftRL (MOVWconst [c]) x [d] flags) + // result: (SBCconst [c] (SRLconst x [d]) flags) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + flags := v_2 + v.reset(OpARMSBCconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg2(v0, flags) + return true + } + // match: (RSCshiftRL x (MOVWconst [c]) [d] flags) + // result: (RSCconst x [int32(uint32(c)>>uint64(d))] flags) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + flags := v_2 + v.reset(OpARMRSCconst) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) + v.AddArg2(x, flags) + return true + } + return false +} +func rewriteValueARM_OpARMRSCshiftRLreg(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RSCshiftRLreg (MOVWconst [c]) x y flags) + // result: (SBCconst [c] (SRL x y) flags) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + flags := v_3 + v.reset(OpARMSBCconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v0.AddArg2(x, y) + v.AddArg2(v0, flags) + return true + } + // match: (RSCshiftRLreg x y (MOVWconst [c]) flags) + // cond: 0 <= c && c < 32 + // result: (RSCshiftRL x y [c] flags) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + flags := v_3 + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMRSCshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(x, y, flags) + return true + } + return false +} +func rewriteValueARM_OpARMSBC(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SBC (MOVWconst [c]) x flags) + // result: (RSCconst [c] x flags) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + flags := v_2 + v.reset(OpARMRSCconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, flags) + return true + } + // match: (SBC x (MOVWconst [c]) flags) + // result: (SBCconst [c] x flags) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + flags := v_2 + v.reset(OpARMSBCconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, flags) + return true + } + // match: (SBC x (SLLconst [c] y) flags) + // result: (SBCshiftLL x y [c] flags) + for { + x := v_0 + if v_1.Op != OpARMSLLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + flags := v_2 + v.reset(OpARMSBCshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(x, y, flags) + return true + } + // match: (SBC (SLLconst [c] y) x flags) + // result: (RSCshiftLL x y [c] flags) + for { + if v_0.Op != OpARMSLLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + y := v_0.Args[0] + x := v_1 + flags := v_2 + v.reset(OpARMRSCshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(x, y, flags) + return true + } + // match: (SBC x (SRLconst [c] y) flags) + // result: (SBCshiftRL x y [c] flags) + for { + x := v_0 + if v_1.Op != OpARMSRLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + flags := v_2 + v.reset(OpARMSBCshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(x, y, flags) + return true + } + // match: (SBC (SRLconst [c] y) x flags) + // result: (RSCshiftRL x y [c] flags) + for { + if v_0.Op != OpARMSRLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + y := v_0.Args[0] + x := v_1 + flags := v_2 + v.reset(OpARMRSCshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(x, y, flags) + return true + } + // match: (SBC x (SRAconst [c] y) flags) + // result: (SBCshiftRA x y [c] flags) + for { + x := v_0 + if v_1.Op != OpARMSRAconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + flags := v_2 + v.reset(OpARMSBCshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(x, y, flags) + return true + } + // match: (SBC (SRAconst [c] y) x flags) + // result: (RSCshiftRA x y [c] flags) + for { + if v_0.Op != OpARMSRAconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + y := v_0.Args[0] + x := v_1 + flags := v_2 + v.reset(OpARMRSCshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(x, y, flags) + return true + } + // match: (SBC x (SLL y z) flags) + // result: (SBCshiftLLreg x y z flags) + for { + x := v_0 + if v_1.Op != OpARMSLL { + break + } + z := v_1.Args[1] + y := v_1.Args[0] + flags := v_2 + v.reset(OpARMSBCshiftLLreg) + v.AddArg4(x, y, z, flags) + return true + } + // match: (SBC (SLL y z) x flags) + // result: (RSCshiftLLreg x y z flags) + for { + if v_0.Op != OpARMSLL { + break + } + z := v_0.Args[1] + y := v_0.Args[0] + x := v_1 + flags := v_2 + v.reset(OpARMRSCshiftLLreg) + v.AddArg4(x, y, z, flags) + return true + } + // match: (SBC x (SRL y z) flags) + // result: (SBCshiftRLreg x y z flags) + for { + x := v_0 + if v_1.Op != OpARMSRL { + break + } + z := v_1.Args[1] + y := v_1.Args[0] + flags := v_2 + v.reset(OpARMSBCshiftRLreg) + v.AddArg4(x, y, z, flags) + return true + } + // match: (SBC (SRL y z) x flags) + // result: (RSCshiftRLreg x y z flags) + for { + if v_0.Op != OpARMSRL { + break + } + z := v_0.Args[1] + y := v_0.Args[0] + x := v_1 + flags := v_2 + v.reset(OpARMRSCshiftRLreg) + v.AddArg4(x, y, z, flags) + return true + } + // match: (SBC x (SRA y z) flags) + // result: (SBCshiftRAreg x y z flags) + for { + x := v_0 + if v_1.Op != OpARMSRA { + break + } + z := v_1.Args[1] + y := v_1.Args[0] + flags := v_2 + v.reset(OpARMSBCshiftRAreg) + v.AddArg4(x, y, z, flags) + return true + } + // match: (SBC (SRA y z) x flags) + // result: (RSCshiftRAreg x y z flags) + for { + if v_0.Op != OpARMSRA { + break + } + z := v_0.Args[1] + y := v_0.Args[0] + x := v_1 + flags := v_2 + v.reset(OpARMRSCshiftRAreg) + v.AddArg4(x, y, z, flags) + return true + } + return false +} +func rewriteValueARM_OpARMSBCconst(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SBCconst [c] (ADDconst [d] x) flags) + // result: (SBCconst [c-d] x flags) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMADDconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + flags := v_1 + v.reset(OpARMSBCconst) + v.AuxInt = int32ToAuxInt(c - d) + v.AddArg2(x, flags) + return true + } + // match: (SBCconst [c] (SUBconst [d] x) flags) + // result: (SBCconst [c+d] x flags) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMSUBconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + flags := v_1 + v.reset(OpARMSBCconst) + v.AuxInt = int32ToAuxInt(c + d) + v.AddArg2(x, flags) + return true + } + return false +} +func rewriteValueARM_OpARMSBCshiftLL(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SBCshiftLL (MOVWconst [c]) x [d] flags) + // result: (RSCconst [c] (SLLconst x [d]) flags) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + flags := v_2 + v.reset(OpARMRSCconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg2(v0, flags) + return true + } + // match: (SBCshiftLL x (MOVWconst [c]) [d] flags) + // result: (SBCconst x [c< x y) flags) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + flags := v_3 + v.reset(OpARMRSCconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v0.AddArg2(x, y) + v.AddArg2(v0, flags) + return true + } + // match: (SBCshiftLLreg x y (MOVWconst [c]) flags) + // cond: 0 <= c && c < 32 + // result: (SBCshiftLL x y [c] flags) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + flags := v_3 + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMSBCshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(x, y, flags) + return true + } + return false +} +func rewriteValueARM_OpARMSBCshiftRA(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SBCshiftRA (MOVWconst [c]) x [d] flags) + // result: (RSCconst [c] (SRAconst x [d]) flags) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + flags := v_2 + v.reset(OpARMRSCconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg2(v0, flags) + return true + } + // match: (SBCshiftRA x (MOVWconst [c]) [d] flags) + // result: (SBCconst x [c>>uint64(d)] flags) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + flags := v_2 + v.reset(OpARMSBCconst) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) + v.AddArg2(x, flags) + return true + } + return false +} +func rewriteValueARM_OpARMSBCshiftRAreg(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SBCshiftRAreg (MOVWconst [c]) x y flags) + // result: (RSCconst [c] (SRA x y) flags) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + flags := v_3 + v.reset(OpARMRSCconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) + v0.AddArg2(x, y) + v.AddArg2(v0, flags) + return true + } + // match: (SBCshiftRAreg x y (MOVWconst [c]) flags) + // cond: 0 <= c && c < 32 + // result: (SBCshiftRA x y [c] flags) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + flags := v_3 + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMSBCshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(x, y, flags) + return true + } + return false +} +func rewriteValueARM_OpARMSBCshiftRL(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SBCshiftRL (MOVWconst [c]) x [d] flags) + // result: (RSCconst [c] (SRLconst x [d]) flags) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + flags := v_2 + v.reset(OpARMRSCconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg2(v0, flags) + return true + } + // match: (SBCshiftRL x (MOVWconst [c]) [d] flags) + // result: (SBCconst x [int32(uint32(c)>>uint64(d))] flags) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + flags := v_2 + v.reset(OpARMSBCconst) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) + v.AddArg2(x, flags) + return true + } + return false +} +func rewriteValueARM_OpARMSBCshiftRLreg(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SBCshiftRLreg (MOVWconst [c]) x y flags) + // result: (RSCconst [c] (SRL x y) flags) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + flags := v_3 + v.reset(OpARMRSCconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v0.AddArg2(x, y) + v.AddArg2(v0, flags) + return true + } + // match: (SBCshiftRLreg x y (MOVWconst [c]) flags) + // cond: 0 <= c && c < 32 + // result: (SBCshiftRL x y [c] flags) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + flags := v_3 + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMSBCshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(x, y, flags) + return true + } + return false +} +func rewriteValueARM_OpARMSLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SLL x (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (SLLconst x [c]) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMSLLconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMSLLconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SLLconst [c] (MOVWconst [d])) + // result: (MOVWconst [d<>uint64(c)]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(d >> uint64(c)) + return true + } + // match: (SRAconst (SLLconst x [c]) [d]) + // cond: buildcfg.GOARM.Version==7 && uint64(d)>=uint64(c) && uint64(d)<=31 + // result: (BFX [(d-c)|(32-d)<<8] x) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMSLLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(buildcfg.GOARM.Version == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) { + break + } + v.reset(OpARMBFX) + v.AuxInt = int32ToAuxInt((d - c) | (32-d)<<8) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMSRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SRL x (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (SRLconst x [c]) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMSRLconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMSRLconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SRLconst [c] (MOVWconst [d])) + // result: (MOVWconst [int32(uint32(d)>>uint64(c))]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(int32(uint32(d) >> uint64(c))) + return true + } + // match: (SRLconst (SLLconst x [c]) [d]) + // cond: buildcfg.GOARM.Version==7 && uint64(d)>=uint64(c) && uint64(d)<=31 + // result: (BFXU [(d-c)|(32-d)<<8] x) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMSLLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(buildcfg.GOARM.Version == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) { + break + } + v.reset(OpARMBFXU) + v.AuxInt = int32ToAuxInt((d - c) | (32-d)<<8) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMSRR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SRR x (MOVWconst [c])) + // result: (SRRconst x [c&31]) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMSRRconst) + v.AuxInt = int32ToAuxInt(c & 31) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMSUB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SUB (MOVWconst [c]) x) + // result: (RSBconst [c] x) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMRSBconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (SUB x (MOVWconst [c])) + // result: (SUBconst [c] x) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMSUBconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (SUB x (SLLconst [c] y)) + // result: (SUBshiftLL x y [c]) + for { + x := v_0 + if v_1.Op != OpARMSLLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMSUBshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + // match: (SUB (SLLconst [c] y) x) + // result: (RSBshiftLL x y [c]) + for { + if v_0.Op != OpARMSLLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + y := v_0.Args[0] + x := v_1 + v.reset(OpARMRSBshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + // match: (SUB x (SRLconst [c] y)) + // result: (SUBshiftRL x y [c]) + for { + x := v_0 + if v_1.Op != OpARMSRLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMSUBshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + // match: (SUB (SRLconst [c] y) x) + // result: (RSBshiftRL x y [c]) + for { + if v_0.Op != OpARMSRLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + y := v_0.Args[0] + x := v_1 + v.reset(OpARMRSBshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + // match: (SUB x (SRAconst [c] y)) + // result: (SUBshiftRA x y [c]) + for { + x := v_0 + if v_1.Op != OpARMSRAconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMSUBshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + // match: (SUB (SRAconst [c] y) x) + // result: (RSBshiftRA x y [c]) + for { + if v_0.Op != OpARMSRAconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + y := v_0.Args[0] + x := v_1 + v.reset(OpARMRSBshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + // match: (SUB x (SLL y z)) + // result: (SUBshiftLLreg x y z) + for { + x := v_0 + if v_1.Op != OpARMSLL { + break + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMSUBshiftLLreg) + v.AddArg3(x, y, z) + return true + } + // match: (SUB (SLL y z) x) + // result: (RSBshiftLLreg x y z) + for { + if v_0.Op != OpARMSLL { + break + } + z := v_0.Args[1] + y := v_0.Args[0] + x := v_1 + v.reset(OpARMRSBshiftLLreg) + v.AddArg3(x, y, z) + return true + } + // match: (SUB x (SRL y z)) + // result: (SUBshiftRLreg x y z) + for { + x := v_0 + if v_1.Op != OpARMSRL { + break + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMSUBshiftRLreg) + v.AddArg3(x, y, z) + return true + } + // match: (SUB (SRL y z) x) + // result: (RSBshiftRLreg x y z) + for { + if v_0.Op != OpARMSRL { + break + } + z := v_0.Args[1] + y := v_0.Args[0] + x := v_1 + v.reset(OpARMRSBshiftRLreg) + v.AddArg3(x, y, z) + return true + } + // match: (SUB x (SRA y z)) + // result: (SUBshiftRAreg x y z) + for { + x := v_0 + if v_1.Op != OpARMSRA { + break + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMSUBshiftRAreg) + v.AddArg3(x, y, z) + return true + } + // match: (SUB (SRA y z) x) + // result: (RSBshiftRAreg x y z) + for { + if v_0.Op != OpARMSRA { + break + } + z := v_0.Args[1] + y := v_0.Args[0] + x := v_1 + v.reset(OpARMRSBshiftRAreg) + v.AddArg3(x, y, z) + return true + } + // match: (SUB x x) + // result: (MOVWconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SUB a (MUL x y)) + // cond: buildcfg.GOARM.Version == 7 + // result: (MULS x y a) + for { + a := v_0 + if v_1.Op != OpARMMUL { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + if !(buildcfg.GOARM.Version == 7) { + break + } + v.reset(OpARMMULS) + v.AddArg3(x, y, a) + return true + } + return false +} +func rewriteValueARM_OpARMSUBD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SUBD a (MULD x y)) + // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6 + // result: (MULSD a x y) + for { + a := v_0 + if v_1.Op != OpARMMULD { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) { + break + } + v.reset(OpARMMULSD) + v.AddArg3(a, x, y) + return true + } + // match: (SUBD a (NMULD x y)) + // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6 + // result: (MULAD a x y) + for { + a := v_0 + if v_1.Op != OpARMNMULD { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) { + break + } + v.reset(OpARMMULAD) + v.AddArg3(a, x, y) + return true + } + return false +} +func rewriteValueARM_OpARMSUBF(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SUBF a (MULF x y)) + // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6 + // result: (MULSF a x y) + for { + a := v_0 + if v_1.Op != OpARMMULF { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) { + break + } + v.reset(OpARMMULSF) + v.AddArg3(a, x, y) + return true + } + // match: (SUBF a (NMULF x y)) + // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6 + // result: (MULAF a x y) + for { + a := v_0 + if v_1.Op != OpARMNMULF { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) { + break + } + v.reset(OpARMMULAF) + v.AddArg3(a, x, y) + return true + } + return false +} +func rewriteValueARM_OpARMSUBS(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SUBS x (MOVWconst [c])) + // result: (SUBSconst [c] x) + for { + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMSUBSconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (SUBS x (SLLconst [c] y)) + // result: (SUBSshiftLL x y [c]) + for { + x := v_0 + if v_1.Op != OpARMSLLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMSUBSshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + // match: (SUBS (SLLconst [c] y) x) + // result: (RSBSshiftLL x y [c]) + for { + if v_0.Op != OpARMSLLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + y := v_0.Args[0] + x := v_1 + v.reset(OpARMRSBSshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + // match: (SUBS x (SRLconst [c] y)) + // result: (SUBSshiftRL x y [c]) + for { + x := v_0 + if v_1.Op != OpARMSRLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMSUBSshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + // match: (SUBS (SRLconst [c] y) x) + // result: (RSBSshiftRL x y [c]) + for { + if v_0.Op != OpARMSRLconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + y := v_0.Args[0] + x := v_1 + v.reset(OpARMRSBSshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + // match: (SUBS x (SRAconst [c] y)) + // result: (SUBSshiftRA x y [c]) + for { + x := v_0 + if v_1.Op != OpARMSRAconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMSUBSshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + // match: (SUBS (SRAconst [c] y) x) + // result: (RSBSshiftRA x y [c]) + for { + if v_0.Op != OpARMSRAconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + y := v_0.Args[0] + x := v_1 + v.reset(OpARMRSBSshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + // match: (SUBS x (SLL y z)) + // result: (SUBSshiftLLreg x y z) + for { + x := v_0 + if v_1.Op != OpARMSLL { + break + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMSUBSshiftLLreg) + v.AddArg3(x, y, z) + return true + } + // match: (SUBS (SLL y z) x) + // result: (RSBSshiftLLreg x y z) + for { + if v_0.Op != OpARMSLL { + break + } + z := v_0.Args[1] + y := v_0.Args[0] + x := v_1 + v.reset(OpARMRSBSshiftLLreg) + v.AddArg3(x, y, z) + return true + } + // match: (SUBS x (SRL y z)) + // result: (SUBSshiftRLreg x y z) + for { + x := v_0 + if v_1.Op != OpARMSRL { + break + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMSUBSshiftRLreg) + v.AddArg3(x, y, z) + return true + } + // match: (SUBS (SRL y z) x) + // result: (RSBSshiftRLreg x y z) + for { + if v_0.Op != OpARMSRL { + break + } + z := v_0.Args[1] + y := v_0.Args[0] + x := v_1 + v.reset(OpARMRSBSshiftRLreg) + v.AddArg3(x, y, z) + return true + } + // match: (SUBS x (SRA y z)) + // result: (SUBSshiftRAreg x y z) + for { + x := v_0 + if v_1.Op != OpARMSRA { + break + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMSUBSshiftRAreg) + v.AddArg3(x, y, z) + return true + } + // match: (SUBS (SRA y z) x) + // result: (RSBSshiftRAreg x y z) + for { + if v_0.Op != OpARMSRA { + break + } + z := v_0.Args[1] + y := v_0.Args[0] + x := v_1 + v.reset(OpARMRSBSshiftRAreg) + v.AddArg3(x, y, z) + return true + } + return false +} +func rewriteValueARM_OpARMSUBSshiftLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SUBSshiftLL (MOVWconst [c]) x [d]) + // result: (RSBSconst [c] (SLLconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMRSBSconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (SUBSshiftLL x (MOVWconst [c]) [d]) + // result: (SUBSconst x [c< x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMRSBSconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (SUBSshiftLLreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (SUBSshiftLL x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMSUBSshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMSUBSshiftRA(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SUBSshiftRA (MOVWconst [c]) x [d]) + // result: (RSBSconst [c] (SRAconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMRSBSconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (SUBSshiftRA x (MOVWconst [c]) [d]) + // result: (SUBSconst x [c>>uint64(d)]) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMSUBSconst) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMSUBSshiftRAreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SUBSshiftRAreg (MOVWconst [c]) x y) + // result: (RSBSconst [c] (SRA x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMRSBSconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (SUBSshiftRAreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (SUBSshiftRA x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMSUBSshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMSUBSshiftRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SUBSshiftRL (MOVWconst [c]) x [d]) + // result: (RSBSconst [c] (SRLconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMRSBSconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (SUBSshiftRL x (MOVWconst [c]) [d]) + // result: (SUBSconst x [int32(uint32(c)>>uint64(d))]) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMSUBSconst) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMSUBSshiftRLreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SUBSshiftRLreg (MOVWconst [c]) x y) + // result: (RSBSconst [c] (SRL x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMRSBSconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (SUBSshiftRLreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (SUBSshiftRL x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMSUBSshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMSUBconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SUBconst [off1] (MOVWaddr [off2] {sym} ptr)) + // result: (MOVWaddr [off2-off1] {sym} ptr) + for { + off1 := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + v.reset(OpARMMOVWaddr) + v.AuxInt = int32ToAuxInt(off2 - off1) + v.Aux = symToAux(sym) + v.AddArg(ptr) + return true + } + // match: (SUBconst [0] x) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (SUBconst [c] x) + // cond: !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c)) + // result: (ADDconst [-c] x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(!isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c))) { + break + } + v.reset(OpARMADDconst) + v.AuxInt = int32ToAuxInt(-c) + v.AddArg(x) + return true + } + // match: (SUBconst [c] x) + // cond: buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff + // result: (ADDconst [-c] x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(buildcfg.GOARM.Version == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) { + break + } + v.reset(OpARMADDconst) + v.AuxInt = int32ToAuxInt(-c) + v.AddArg(x) + return true + } + // match: (SUBconst [c] (MOVWconst [d])) + // result: (MOVWconst [d-c]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(d - c) + return true + } + // match: (SUBconst [c] (SUBconst [d] x)) + // result: (ADDconst [-c-d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMSUBconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpARMADDconst) + v.AuxInt = int32ToAuxInt(-c - d) + v.AddArg(x) + return true + } + // match: (SUBconst [c] (ADDconst [d] x)) + // result: (ADDconst [-c+d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMADDconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpARMADDconst) + v.AuxInt = int32ToAuxInt(-c + d) + v.AddArg(x) + return true + } + // match: (SUBconst [c] (RSBconst [d] x)) + // result: (RSBconst [-c+d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMRSBconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpARMRSBconst) + v.AuxInt = int32ToAuxInt(-c + d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMSUBshiftLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SUBshiftLL (MOVWconst [c]) x [d]) + // result: (RSBconst [c] (SLLconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMRSBconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (SUBshiftLL x (MOVWconst [c]) [d]) + // result: (SUBconst x [c< x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMRSBconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (SUBshiftLLreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (SUBshiftLL x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMSUBshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMSUBshiftRA(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SUBshiftRA (MOVWconst [c]) x [d]) + // result: (RSBconst [c] (SRAconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMRSBconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (SUBshiftRA x (MOVWconst [c]) [d]) + // result: (SUBconst x [c>>uint64(d)]) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMSUBconst) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) + v.AddArg(x) + return true + } + // match: (SUBshiftRA (SRAconst x [c]) x [c]) + // result: (MOVWconst [0]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMSRAconst || auxIntToInt32(v_0.AuxInt) != c { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM_OpARMSUBshiftRAreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SUBshiftRAreg (MOVWconst [c]) x y) + // result: (RSBconst [c] (SRA x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMRSBconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (SUBshiftRAreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (SUBshiftRA x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMSUBshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMSUBshiftRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SUBshiftRL (MOVWconst [c]) x [d]) + // result: (RSBconst [c] (SRLconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMRSBconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (SUBshiftRL x (MOVWconst [c]) [d]) + // result: (SUBconst x [int32(uint32(c)>>uint64(d))]) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMSUBconst) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) + v.AddArg(x) + return true + } + // match: (SUBshiftRL (SRLconst x [c]) x [c]) + // result: (MOVWconst [0]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != c { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM_OpARMSUBshiftRLreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SUBshiftRLreg (MOVWconst [c]) x y) + // result: (RSBconst [c] (SRL x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMRSBconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (SUBshiftRLreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (SUBshiftRL x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMSUBshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMTEQ(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (TEQ x (MOVWconst [c])) + // result: (TEQconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMMOVWconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMTEQconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (TEQ x (SLLconst [c] y)) + // result: (TEQshiftLL x y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSLLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMTEQshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (TEQ x (SRLconst [c] y)) + // result: (TEQshiftRL x y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMTEQshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (TEQ x (SRAconst [c] y)) + // result: (TEQshiftRA x y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRAconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMTEQshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (TEQ x (SLL y z)) + // result: (TEQshiftLLreg x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSLL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMTEQshiftLLreg) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (TEQ x (SRL y z)) + // result: (TEQshiftRLreg x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMTEQshiftRLreg) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (TEQ x (SRA y z)) + // result: (TEQshiftRAreg x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRA { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMTEQshiftRAreg) + v.AddArg3(x, y, z) + return true + } + break + } + return false +} +func rewriteValueARM_OpARMTEQconst(v *Value) bool { + v_0 := v.Args[0] + // match: (TEQconst (MOVWconst [x]) [y]) + // result: (FlagConstant [logicFlags32(x^y)]) + for { + y := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + x := auxIntToInt32(v_0.AuxInt) + v.reset(OpARMFlagConstant) + v.AuxInt = flagConstantToAuxInt(logicFlags32(x ^ y)) + return true + } + return false +} +func rewriteValueARM_OpARMTEQshiftLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TEQshiftLL (MOVWconst [c]) x [d]) + // result: (TEQconst [c] (SLLconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMTEQconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (TEQshiftLL x (MOVWconst [c]) [d]) + // result: (TEQconst x [c< x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMTEQconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (TEQshiftLLreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (TEQshiftLL x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMTEQshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMTEQshiftRA(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TEQshiftRA (MOVWconst [c]) x [d]) + // result: (TEQconst [c] (SRAconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMTEQconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (TEQshiftRA x (MOVWconst [c]) [d]) + // result: (TEQconst x [c>>uint64(d)]) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMTEQconst) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMTEQshiftRAreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TEQshiftRAreg (MOVWconst [c]) x y) + // result: (TEQconst [c] (SRA x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMTEQconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (TEQshiftRAreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (TEQshiftRA x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMTEQshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMTEQshiftRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TEQshiftRL (MOVWconst [c]) x [d]) + // result: (TEQconst [c] (SRLconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMTEQconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (TEQshiftRL x (MOVWconst [c]) [d]) + // result: (TEQconst x [int32(uint32(c)>>uint64(d))]) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMTEQconst) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMTEQshiftRLreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TEQshiftRLreg (MOVWconst [c]) x y) + // result: (TEQconst [c] (SRL x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMTEQconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (TEQshiftRLreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (TEQshiftRL x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMTEQshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMTST(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (TST x (MOVWconst [c])) + // result: (TSTconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMMOVWconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMTSTconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (TST x (SLLconst [c] y)) + // result: (TSTshiftLL x y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSLLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMTSTshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (TST x (SRLconst [c] y)) + // result: (TSTshiftRL x y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMTSTshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (TST x (SRAconst [c] y)) + // result: (TSTshiftRA x y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRAconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMTSTshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (TST x (SLL y z)) + // result: (TSTshiftLLreg x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSLL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMTSTshiftLLreg) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (TST x (SRL y z)) + // result: (TSTshiftRLreg x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMTSTshiftRLreg) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (TST x (SRA y z)) + // result: (TSTshiftRAreg x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRA { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMTSTshiftRAreg) + v.AddArg3(x, y, z) + return true + } + break + } + return false +} +func rewriteValueARM_OpARMTSTconst(v *Value) bool { + v_0 := v.Args[0] + // match: (TSTconst (MOVWconst [x]) [y]) + // result: (FlagConstant [logicFlags32(x&y)]) + for { + y := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + x := auxIntToInt32(v_0.AuxInt) + v.reset(OpARMFlagConstant) + v.AuxInt = flagConstantToAuxInt(logicFlags32(x & y)) + return true + } + return false +} +func rewriteValueARM_OpARMTSTshiftLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TSTshiftLL (MOVWconst [c]) x [d]) + // result: (TSTconst [c] (SLLconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMTSTconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (TSTshiftLL x (MOVWconst [c]) [d]) + // result: (TSTconst x [c< x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMTSTconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (TSTshiftLLreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (TSTshiftLL x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMTSTshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMTSTshiftRA(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TSTshiftRA (MOVWconst [c]) x [d]) + // result: (TSTconst [c] (SRAconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMTSTconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (TSTshiftRA x (MOVWconst [c]) [d]) + // result: (TSTconst x [c>>uint64(d)]) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMTSTconst) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMTSTshiftRAreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TSTshiftRAreg (MOVWconst [c]) x y) + // result: (TSTconst [c] (SRA x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMTSTconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (TSTshiftRAreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (TSTshiftRA x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMTSTshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMTSTshiftRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TSTshiftRL (MOVWconst [c]) x [d]) + // result: (TSTconst [c] (SRLconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMTSTconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (TSTshiftRL x (MOVWconst [c]) [d]) + // result: (TSTconst x [int32(uint32(c)>>uint64(d))]) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMTSTconst) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMTSTshiftRLreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TSTshiftRLreg (MOVWconst [c]) x y) + // result: (TSTconst [c] (SRL x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMTSTconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (TSTshiftRLreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (TSTshiftRL x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMTSTshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMXOR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XOR x (MOVWconst [c])) + // result: (XORconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMMOVWconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMXORconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (XOR x (SLLconst [c] y)) + // result: (XORshiftLL x y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSLLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMXORshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (XOR x (SRLconst [c] y)) + // result: (XORshiftRL x y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRLconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMXORshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (XOR x (SRAconst [c] y)) + // result: (XORshiftRA x y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRAconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMXORshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (XOR x (SRRconst [c] y)) + // result: (XORshiftRR x y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRRconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + v.reset(OpARMXORshiftRR) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + break + } + // match: (XOR x (SLL y z)) + // result: (XORshiftLLreg x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSLL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMXORshiftLLreg) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (XOR x (SRL y z)) + // result: (XORshiftRLreg x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRL { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMXORshiftRLreg) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (XOR x (SRA y z)) + // result: (XORshiftRAreg x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARMSRA { + continue + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARMXORshiftRAreg) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (XOR x x) + // result: (MOVWconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM_OpARMXORconst(v *Value) bool { + v_0 := v.Args[0] + // match: (XORconst [0] x) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (XORconst [c] (MOVWconst [d])) + // result: (MOVWconst [c^d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(c ^ d) + return true + } + // match: (XORconst [c] (XORconst [d] x)) + // result: (XORconst [c^d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMXORconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpARMXORconst) + v.AuxInt = int32ToAuxInt(c ^ d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMXORshiftLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (XORshiftLL (MOVWconst [c]) x [d]) + // result: (XORconst [c] (SLLconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMXORconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (XORshiftLL x (MOVWconst [c]) [d]) + // result: (XORconst x [c< [8] (BFXU [int32(armBFAuxInt(8, 8))] x) x) + // result: (REV16 x) + for { + if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != int32(armBFAuxInt(8, 8)) { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARMREV16) + v.AddArg(x) + return true + } + // match: (XORshiftLL [8] (SRLconst [24] (SLLconst [16] x)) x) + // cond: buildcfg.GOARM.Version>=6 + // result: (REV16 x) + for { + if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARMSLLconst || auxIntToInt32(v_0_0.AuxInt) != 16 { + break + } + x := v_0_0.Args[0] + if x != v_1 || !(buildcfg.GOARM.Version >= 6) { + break + } + v.reset(OpARMREV16) + v.AddArg(x) + return true + } + // match: (XORshiftLL (SLLconst x [c]) x [c]) + // result: (MOVWconst [0]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != c { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM_OpARMXORshiftLLreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (XORshiftLLreg (MOVWconst [c]) x y) + // result: (XORconst [c] (SLL x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMXORconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (XORshiftLLreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (XORshiftLL x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMXORshiftLL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMXORshiftRA(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (XORshiftRA (MOVWconst [c]) x [d]) + // result: (XORconst [c] (SRAconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMXORconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (XORshiftRA x (MOVWconst [c]) [d]) + // result: (XORconst x [c>>uint64(d)]) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMXORconst) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) + v.AddArg(x) + return true + } + // match: (XORshiftRA (SRAconst x [c]) x [c]) + // result: (MOVWconst [0]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMSRAconst || auxIntToInt32(v_0.AuxInt) != c { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM_OpARMXORshiftRAreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (XORshiftRAreg (MOVWconst [c]) x y) + // result: (XORconst [c] (SRA x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMXORconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (XORshiftRAreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (XORshiftRA x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMXORshiftRA) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMXORshiftRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (XORshiftRL (MOVWconst [c]) x [d]) + // result: (XORconst [c] (SRLconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMXORconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (XORshiftRL x (MOVWconst [c]) [d]) + // result: (XORconst x [int32(uint32(c)>>uint64(d))]) + for { + d := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpARMXORconst) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) + v.AddArg(x) + return true + } + // match: (XORshiftRL (SRLconst x [c]) x [c]) + // result: (MOVWconst [0]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != c { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM_OpARMXORshiftRLreg(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (XORshiftRLreg (MOVWconst [c]) x y) + // result: (XORconst [c] (SRL x y)) + for { + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARMXORconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (XORshiftRLreg x y (MOVWconst [c])) + // cond: 0 <= c && c < 32 + // result: (XORshiftRL x y [c]) + for { + x := v_0 + y := v_1 + if v_2.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(0 <= c && c < 32) { + break + } + v.reset(OpARMXORshiftRL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM_OpARMXORshiftRR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (XORshiftRR (MOVWconst [c]) x [d]) + // result: (XORconst [c] (SRRconst x [d])) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpARMXORconst) + v.AuxInt = int32ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARMSRRconst, x.Type) + v0.AuxInt = int32ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (XORshiftRR x (MOVWconst [c]) [d]) + // result: (XORconst x [int32(uint32(c)>>uint64(d)|uint32(c)<>uint64(d) | uint32(c)< x y) + // result: (ADD (SRLconst (SUB x y) [1]) y) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpARMADD) + v0 := b.NewValue0(v.Pos, OpARMSRLconst, t) + v0.AuxInt = int32ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpARMSUB, t) + v1.AddArg2(x, y) + v0.AddArg(v1) + v.AddArg2(v0, y) + return true + } +} +func rewriteValueARM_OpBitLen32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (BitLen32 x) + // result: (RSBconst [32] (CLZ x)) + for { + t := v.Type + x := v_0 + v.reset(OpARMRSBconst) + v.AuxInt = int32ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpARMCLZ, t) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpBswap32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Bswap32 x) + // cond: buildcfg.GOARM.Version==5 + // result: (XOR (SRLconst (BICconst (XOR x (SRRconst [16] x)) [0xff0000]) [8]) (SRRconst x [8])) + for { + t := v.Type + x := v_0 + if !(buildcfg.GOARM.Version == 5) { + break + } + v.reset(OpARMXOR) + v.Type = t + v0 := b.NewValue0(v.Pos, OpARMSRLconst, t) + v0.AuxInt = int32ToAuxInt(8) + v1 := b.NewValue0(v.Pos, OpARMBICconst, t) + v1.AuxInt = int32ToAuxInt(0xff0000) + v2 := b.NewValue0(v.Pos, OpARMXOR, t) + v3 := b.NewValue0(v.Pos, OpARMSRRconst, t) + v3.AuxInt = int32ToAuxInt(16) + v3.AddArg(x) + v2.AddArg2(x, v3) + v1.AddArg(v2) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpARMSRRconst, t) + v4.AuxInt = int32ToAuxInt(8) + v4.AddArg(x) + v.AddArg2(v0, v4) + return true + } + // match: (Bswap32 x) + // cond: buildcfg.GOARM.Version>=6 + // result: (REV x) + for { + x := v_0 + if !(buildcfg.GOARM.Version >= 6) { + break + } + v.reset(OpARMREV) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpConst16(v *Value) bool { + // match: (Const16 [val]) + // result: (MOVWconst [int32(val)]) + for { + val := auxIntToInt16(v.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(int32(val)) + return true + } +} +func rewriteValueARM_OpConst32(v *Value) bool { + // match: (Const32 [val]) + // result: (MOVWconst [int32(val)]) + for { + val := auxIntToInt32(v.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(int32(val)) + return true + } +} +func rewriteValueARM_OpConst32F(v *Value) bool { + // match: (Const32F [val]) + // result: (MOVFconst [float64(val)]) + for { + val := auxIntToFloat32(v.AuxInt) + v.reset(OpARMMOVFconst) + v.AuxInt = float64ToAuxInt(float64(val)) + return true + } +} +func rewriteValueARM_OpConst64F(v *Value) bool { + // match: (Const64F [val]) + // result: (MOVDconst [float64(val)]) + for { + val := auxIntToFloat64(v.AuxInt) + v.reset(OpARMMOVDconst) + v.AuxInt = float64ToAuxInt(float64(val)) + return true + } +} +func rewriteValueARM_OpConst8(v *Value) bool { + // match: (Const8 [val]) + // result: (MOVWconst [int32(val)]) + for { + val := auxIntToInt8(v.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(int32(val)) + return true + } +} +func rewriteValueARM_OpConstBool(v *Value) bool { + // match: (ConstBool [t]) + // result: (MOVWconst [b2i32(t)]) + for { + t := auxIntToBool(v.AuxInt) + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(b2i32(t)) + return true + } +} +func rewriteValueARM_OpConstNil(v *Value) bool { + // match: (ConstNil) + // result: (MOVWconst [0]) + for { + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } +} +func rewriteValueARM_OpCtz16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz16 x) + // cond: buildcfg.GOARM.Version<=6 + // result: (RSBconst [32] (CLZ (SUBconst (AND (ORconst [0x10000] x) (RSBconst [0] (ORconst [0x10000] x))) [1]))) + for { + t := v.Type + x := v_0 + if !(buildcfg.GOARM.Version <= 6) { + break + } + v.reset(OpARMRSBconst) + v.AuxInt = int32ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpARMCLZ, t) + v1 := b.NewValue0(v.Pos, OpARMSUBconst, typ.UInt32) + v1.AuxInt = int32ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpARMAND, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32) + v3.AuxInt = int32ToAuxInt(0x10000) + v3.AddArg(x) + v4 := b.NewValue0(v.Pos, OpARMRSBconst, typ.UInt32) + v4.AuxInt = int32ToAuxInt(0) + v4.AddArg(v3) + v2.AddArg2(v3, v4) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Ctz16 x) + // cond: buildcfg.GOARM.Version==7 + // result: (CLZ (RBIT (ORconst [0x10000] x))) + for { + t := v.Type + x := v_0 + if !(buildcfg.GOARM.Version == 7) { + break + } + v.reset(OpARMCLZ) + v.Type = t + v0 := b.NewValue0(v.Pos, OpARMRBIT, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32) + v1.AuxInt = int32ToAuxInt(0x10000) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueARM_OpCtz32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Ctz32 x) + // cond: buildcfg.GOARM.Version<=6 + // result: (RSBconst [32] (CLZ (SUBconst (AND x (RSBconst [0] x)) [1]))) + for { + t := v.Type + x := v_0 + if !(buildcfg.GOARM.Version <= 6) { + break + } + v.reset(OpARMRSBconst) + v.AuxInt = int32ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpARMCLZ, t) + v1 := b.NewValue0(v.Pos, OpARMSUBconst, t) + v1.AuxInt = int32ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpARMAND, t) + v3 := b.NewValue0(v.Pos, OpARMRSBconst, t) + v3.AuxInt = int32ToAuxInt(0) + v3.AddArg(x) + v2.AddArg2(x, v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Ctz32 x) + // cond: buildcfg.GOARM.Version==7 + // result: (CLZ (RBIT x)) + for { + t := v.Type + x := v_0 + if !(buildcfg.GOARM.Version == 7) { + break + } + v.reset(OpARMCLZ) + v.Type = t + v0 := b.NewValue0(v.Pos, OpARMRBIT, t) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueARM_OpCtz8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz8 x) + // cond: buildcfg.GOARM.Version<=6 + // result: (RSBconst [32] (CLZ (SUBconst (AND (ORconst [0x100] x) (RSBconst [0] (ORconst [0x100] x))) [1]))) + for { + t := v.Type + x := v_0 + if !(buildcfg.GOARM.Version <= 6) { + break + } + v.reset(OpARMRSBconst) + v.AuxInt = int32ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpARMCLZ, t) + v1 := b.NewValue0(v.Pos, OpARMSUBconst, typ.UInt32) + v1.AuxInt = int32ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpARMAND, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32) + v3.AuxInt = int32ToAuxInt(0x100) + v3.AddArg(x) + v4 := b.NewValue0(v.Pos, OpARMRSBconst, typ.UInt32) + v4.AuxInt = int32ToAuxInt(0) + v4.AddArg(v3) + v2.AddArg2(v3, v4) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Ctz8 x) + // cond: buildcfg.GOARM.Version==7 + // result: (CLZ (RBIT (ORconst [0x100] x))) + for { + t := v.Type + x := v_0 + if !(buildcfg.GOARM.Version == 7) { + break + } + v.reset(OpARMCLZ) + v.Type = t + v0 := b.NewValue0(v.Pos, OpARMRBIT, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32) + v1.AuxInt = int32ToAuxInt(0x100) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueARM_OpDiv16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16 x y) + // result: (Div32 (SignExt16to32 x) (SignExt16to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpDiv32) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueARM_OpDiv16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16u x y) + // result: (Div32u (ZeroExt16to32 x) (ZeroExt16to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpDiv32u) + v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueARM_OpDiv32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div32 x y) + // result: (SUB (XOR (Select0 (CALLudiv (SUB (XOR x (Signmask x)) (Signmask x)) (SUB (XOR y (Signmask y)) (Signmask y)))) (Signmask (XOR x y))) (Signmask (XOR x y))) + for { + x := v_0 + y := v_1 + v.reset(OpARMSUB) + v0 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32)) + v3 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32) + v4 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) + v5 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) + v5.AddArg(x) + v4.AddArg2(x, v5) + v3.AddArg2(v4, v5) + v6 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32) + v7 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) + v8 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) + v8.AddArg(y) + v7.AddArg2(y, v8) + v6.AddArg2(v7, v8) + v2.AddArg2(v3, v6) + v1.AddArg(v2) + v9 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) + v10 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) + v10.AddArg2(x, y) + v9.AddArg(v10) + v0.AddArg2(v1, v9) + v.AddArg2(v0, v9) + return true + } +} +func rewriteValueARM_OpDiv32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div32u x y) + // result: (Select0 (CALLudiv x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v.Type = typ.UInt32 + v0 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpDiv8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8 x y) + // result: (Div32 (SignExt8to32 x) (SignExt8to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpDiv32) + v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueARM_OpDiv8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8u x y) + // result: (Div32u (ZeroExt8to32 x) (ZeroExt8to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpDiv32u) + v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueARM_OpEq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq16 x y) + // result: (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpARMEqual) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpEq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq32 x y) + // result: (Equal (CMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARMEqual) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpEq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq32F x y) + // result: (Equal (CMPF x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARMEqual) + v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpEq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq64F x y) + // result: (Equal (CMPD x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARMEqual) + v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpEq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq8 x y) + // result: (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpARMEqual) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpEqB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqB x y) + // result: (XORconst [1] (XOR x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARMXORconst) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpARMXOR, typ.Bool) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpEqPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (EqPtr x y) + // result: (Equal (CMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARMEqual) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpFMA(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMA x y z) + // result: (FMULAD z x y) + for { + x := v_0 + y := v_1 + z := v_2 + v.reset(OpARMFMULAD) + v.AddArg3(z, x, y) + return true + } +} +func rewriteValueARM_OpIsInBounds(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (IsInBounds idx len) + // result: (LessThanU (CMP idx len)) + for { + idx := v_0 + len := v_1 + v.reset(OpARMLessThanU) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg2(idx, len) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpIsNonNil(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (IsNonNil ptr) + // result: (NotEqual (CMPconst [0] ptr)) + for { + ptr := v_0 + v.reset(OpARMNotEqual) + v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(ptr) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpIsSliceInBounds(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (IsSliceInBounds idx len) + // result: (LessEqualU (CMP idx len)) + for { + idx := v_0 + len := v_1 + v.reset(OpARMLessEqualU) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg2(idx, len) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpLeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq16 x y) + // result: (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpARMLessEqual) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpLeq16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq16U x y) + // result: (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpARMLessEqualU) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpLeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq32 x y) + // result: (LessEqual (CMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARMLessEqual) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpLeq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq32F x y) + // result: (GreaterEqual (CMPF y x)) + for { + x := v_0 + y := v_1 + v.reset(OpARMGreaterEqual) + v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpLeq32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq32U x y) + // result: (LessEqualU (CMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARMLessEqualU) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpLeq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq64F x y) + // result: (GreaterEqual (CMPD y x)) + for { + x := v_0 + y := v_1 + v.reset(OpARMGreaterEqual) + v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpLeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq8 x y) + // result: (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpARMLessEqual) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpLeq8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq8U x y) + // result: (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpARMLessEqualU) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpLess16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less16 x y) + // result: (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpARMLessThan) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpLess16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less16U x y) + // result: (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpARMLessThanU) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpLess32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less32 x y) + // result: (LessThan (CMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARMLessThan) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpLess32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less32F x y) + // result: (GreaterThan (CMPF y x)) + for { + x := v_0 + y := v_1 + v.reset(OpARMGreaterThan) + v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpLess32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less32U x y) + // result: (LessThanU (CMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARMLessThanU) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpLess64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less64F x y) + // result: (GreaterThan (CMPD y x)) + for { + x := v_0 + y := v_1 + v.reset(OpARMGreaterThan) + v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpLess8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less8 x y) + // result: (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpARMLessThan) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpLess8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less8U x y) + // result: (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpARMLessThanU) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpLoad(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Load ptr mem) + // cond: t.IsBoolean() + // result: (MOVBUload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsBoolean()) { + break + } + v.reset(OpARMMOVBUload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is8BitInt(t) && t.IsSigned()) + // result: (MOVBload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is8BitInt(t) && t.IsSigned()) { + break + } + v.reset(OpARMMOVBload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is8BitInt(t) && !t.IsSigned()) + // result: (MOVBUload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is8BitInt(t) && !t.IsSigned()) { + break + } + v.reset(OpARMMOVBUload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is16BitInt(t) && t.IsSigned()) + // result: (MOVHload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is16BitInt(t) && t.IsSigned()) { + break + } + v.reset(OpARMMOVHload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is16BitInt(t) && !t.IsSigned()) + // result: (MOVHUload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is16BitInt(t) && !t.IsSigned()) { + break + } + v.reset(OpARMMOVHUload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is32BitInt(t) || isPtr(t)) + // result: (MOVWload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitInt(t) || isPtr(t)) { + break + } + v.reset(OpARMMOVWload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitFloat(t) + // result: (MOVFload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitFloat(t)) { + break + } + v.reset(OpARMMOVFload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is64BitFloat(t) + // result: (MOVDload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitFloat(t)) { + break + } + v.reset(OpARMMOVDload) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueARM_OpLocalAddr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LocalAddr {sym} base mem) + // cond: t.Elem().HasPointers() + // result: (MOVWaddr {sym} (SPanchored base mem)) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + mem := v_1 + if !(t.Elem().HasPointers()) { + break + } + v.reset(OpARMMOVWaddr) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) + v0.AddArg2(base, mem) + v.AddArg(v0) + return true + } + // match: (LocalAddr {sym} base _) + // cond: !t.Elem().HasPointers() + // result: (MOVWaddr {sym} base) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + if !(!t.Elem().HasPointers()) { + break + } + v.reset(OpARMMOVWaddr) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } + return false +} +func rewriteValueARM_OpLsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x16 x y) + // result: (CMOVWHSconst (SLL x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) + for { + x := v_0 + y := v_1 + v.reset(OpARMCMOVWHSconst) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(256) + v2.AddArg(v1) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValueARM_OpLsh16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh16x32 x y) + // result: (CMOVWHSconst (SLL x y) (CMPconst [256] y) [0]) + for { + x := v_0 + y := v_1 + v.reset(OpARMCMOVWHSconst) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v1.AuxInt = int32ToAuxInt(256) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueARM_OpLsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Lsh16x64 x (Const64 [c])) + // cond: uint64(c) < 16 + // result: (SLLconst x [int32(c)]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 16) { + break + } + v.reset(OpARMSLLconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + // match: (Lsh16x64 _ (Const64 [c])) + // cond: uint64(c) >= 16 + // result: (Const16 [0]) + for { + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 16) { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM_OpLsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x8 x y) + // result: (SLL x (ZeroExt8to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpARMSLL) + v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueARM_OpLsh32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x16 x y) + // result: (CMOVWHSconst (SLL x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) + for { + x := v_0 + y := v_1 + v.reset(OpARMCMOVWHSconst) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(256) + v2.AddArg(v1) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValueARM_OpLsh32x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh32x32 x y) + // result: (CMOVWHSconst (SLL x y) (CMPconst [256] y) [0]) + for { + x := v_0 + y := v_1 + v.reset(OpARMCMOVWHSconst) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v1.AuxInt = int32ToAuxInt(256) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueARM_OpLsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Lsh32x64 x (Const64 [c])) + // cond: uint64(c) < 32 + // result: (SLLconst x [int32(c)]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 32) { + break + } + v.reset(OpARMSLLconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + // match: (Lsh32x64 _ (Const64 [c])) + // cond: uint64(c) >= 32 + // result: (Const32 [0]) + for { + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 32) { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM_OpLsh32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x8 x y) + // result: (SLL x (ZeroExt8to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpARMSLL) + v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueARM_OpLsh8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x16 x y) + // result: (CMOVWHSconst (SLL x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) + for { + x := v_0 + y := v_1 + v.reset(OpARMCMOVWHSconst) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(256) + v2.AddArg(v1) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValueARM_OpLsh8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh8x32 x y) + // result: (CMOVWHSconst (SLL x y) (CMPconst [256] y) [0]) + for { + x := v_0 + y := v_1 + v.reset(OpARMCMOVWHSconst) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v1.AuxInt = int32ToAuxInt(256) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueARM_OpLsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Lsh8x64 x (Const64 [c])) + // cond: uint64(c) < 8 + // result: (SLLconst x [int32(c)]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 8) { + break + } + v.reset(OpARMSLLconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + // match: (Lsh8x64 _ (Const64 [c])) + // cond: uint64(c) >= 8 + // result: (Const8 [0]) + for { + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 8) { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM_OpLsh8x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x8 x y) + // result: (SLL x (ZeroExt8to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpARMSLL) + v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueARM_OpMod16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod16 x y) + // result: (Mod32 (SignExt16to32 x) (SignExt16to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpMod32) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueARM_OpMod16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod16u x y) + // result: (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpMod32u) + v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueARM_OpMod32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod32 x y) + // result: (SUB (XOR (Select1 (CALLudiv (SUB (XOR x (Signmask x)) (Signmask x)) (SUB (XOR y (Signmask y)) (Signmask y)))) (Signmask x)) (Signmask x)) + for { + x := v_0 + y := v_1 + v.reset(OpARMSUB) + v0 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpSelect1, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32)) + v3 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32) + v4 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) + v5 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) + v5.AddArg(x) + v4.AddArg2(x, v5) + v3.AddArg2(v4, v5) + v6 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32) + v7 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32) + v8 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) + v8.AddArg(y) + v7.AddArg2(y, v8) + v6.AddArg2(v7, v8) + v2.AddArg2(v3, v6) + v1.AddArg(v2) + v0.AddArg2(v1, v5) + v.AddArg2(v0, v5) + return true + } +} +func rewriteValueARM_OpMod32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod32u x y) + // result: (Select1 (CALLudiv x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v.Type = typ.UInt32 + v0 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpMod8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8 x y) + // result: (Mod32 (SignExt8to32 x) (SignExt8to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpMod32) + v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueARM_OpMod8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8u x y) + // result: (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpMod32u) + v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueARM_OpMove(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Move [0] _ _ mem) + // result: mem + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_2 + v.copyOf(mem) + return true + } + // match: (Move [1] dst src mem) + // result: (MOVBstore dst (MOVBUload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpARMMOVBstore) + v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [2] {t} dst src mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore dst (MOVHUload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpARMMOVHstore) + v0 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [2] dst src mem) + // result: (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpARMMOVBstore) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(1) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [4] {t} dst src mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore dst (MOVWload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpARMMOVWstore) + v0 := b.NewValue0(v.Pos, OpARMMOVWload, typ.UInt32) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [4] {t} dst src mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpARMMOVHstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(2) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpARMMOVHstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [4] dst src mem) + // result: (MOVBstore [3] dst (MOVBUload [3] src mem) (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)))) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpARMMOVBstore) + v.AuxInt = int32ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(3) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(2) + v2 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) + v2.AuxInt = int32ToAuxInt(2) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) + v3.AuxInt = int32ToAuxInt(1) + v4 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) + v4.AuxInt = int32ToAuxInt(1) + v4.AddArg2(src, mem) + v5 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) + v6 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) + v6.AddArg2(src, mem) + v5.AddArg3(dst, v6, mem) + v3.AddArg3(dst, v4, v5) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [3] dst src mem) + // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpARMMOVBstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(2) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) + v2.AuxInt = int32ToAuxInt(1) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) + v4 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [s] {t} dst src mem) + // cond: s%4 == 0 && s > 4 && s <= 512 && t.Alignment()%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s) + // result: (DUFFCOPY [8 * (128 - s/4)] dst src mem) + for { + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(s%4 == 0 && s > 4 && s <= 512 && t.Alignment()%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) { + break + } + v.reset(OpARMDUFFCOPY) + v.AuxInt = int64ToAuxInt(8 * (128 - s/4)) + v.AddArg3(dst, src, mem) + return true + } + // match: (Move [s] {t} dst src mem) + // cond: ((s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0) && logLargeCopy(v, s) + // result: (LoweredMove [t.Alignment()] dst src (ADDconst src [int32(s-moveSize(t.Alignment(), config))]) mem) + for { + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(((s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0) && logLargeCopy(v, s)) { + break + } + v.reset(OpARMLoweredMove) + v.AuxInt = int64ToAuxInt(t.Alignment()) + v0 := b.NewValue0(v.Pos, OpARMADDconst, src.Type) + v0.AuxInt = int32ToAuxInt(int32(s - moveSize(t.Alignment(), config))) + v0.AddArg(src) + v.AddArg4(dst, src, v0, mem) + return true + } + return false +} +func rewriteValueARM_OpNeg16(v *Value) bool { + v_0 := v.Args[0] + // match: (Neg16 x) + // result: (RSBconst [0] x) + for { + x := v_0 + v.reset(OpARMRSBconst) + v.AuxInt = int32ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpNeg32(v *Value) bool { + v_0 := v.Args[0] + // match: (Neg32 x) + // result: (RSBconst [0] x) + for { + x := v_0 + v.reset(OpARMRSBconst) + v.AuxInt = int32ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpNeg8(v *Value) bool { + v_0 := v.Args[0] + // match: (Neg8 x) + // result: (RSBconst [0] x) + for { + x := v_0 + v.reset(OpARMRSBconst) + v.AuxInt = int32ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpNeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq16 x y) + // result: (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpARMNotEqual) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpNeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq32 x y) + // result: (NotEqual (CMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARMNotEqual) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpNeq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq32F x y) + // result: (NotEqual (CMPF x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARMNotEqual) + v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpNeq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq64F x y) + // result: (NotEqual (CMPD x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARMNotEqual) + v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpNeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq8 x y) + // result: (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpARMNotEqual) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpNeqPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (NeqPtr x y) + // result: (NotEqual (CMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARMNotEqual) + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpNot(v *Value) bool { + v_0 := v.Args[0] + // match: (Not x) + // result: (XORconst [1] x) + for { + x := v_0 + v.reset(OpARMXORconst) + v.AuxInt = int32ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpOffPtr(v *Value) bool { + v_0 := v.Args[0] + // match: (OffPtr [off] ptr:(SP)) + // result: (MOVWaddr [int32(off)] ptr) + for { + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + if ptr.Op != OpSP { + break + } + v.reset(OpARMMOVWaddr) + v.AuxInt = int32ToAuxInt(int32(off)) + v.AddArg(ptr) + return true + } + // match: (OffPtr [off] ptr) + // result: (ADDconst [int32(off)] ptr) + for { + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + v.reset(OpARMADDconst) + v.AuxInt = int32ToAuxInt(int32(off)) + v.AddArg(ptr) + return true + } +} +func rewriteValueARM_OpPanicBounds(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 0 + // result: (LoweredPanicBoundsA [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 0) { + break + } + v.reset(OpARMLoweredPanicBoundsA) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 1 + // result: (LoweredPanicBoundsB [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 1) { + break + } + v.reset(OpARMLoweredPanicBoundsB) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 2 + // result: (LoweredPanicBoundsC [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 2) { + break + } + v.reset(OpARMLoweredPanicBoundsC) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + return false +} +func rewriteValueARM_OpPanicExtend(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PanicExtend [kind] hi lo y mem) + // cond: boundsABI(kind) == 0 + // result: (LoweredPanicExtendA [kind] hi lo y mem) + for { + kind := auxIntToInt64(v.AuxInt) + hi := v_0 + lo := v_1 + y := v_2 + mem := v_3 + if !(boundsABI(kind) == 0) { + break + } + v.reset(OpARMLoweredPanicExtendA) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg4(hi, lo, y, mem) + return true + } + // match: (PanicExtend [kind] hi lo y mem) + // cond: boundsABI(kind) == 1 + // result: (LoweredPanicExtendB [kind] hi lo y mem) + for { + kind := auxIntToInt64(v.AuxInt) + hi := v_0 + lo := v_1 + y := v_2 + mem := v_3 + if !(boundsABI(kind) == 1) { + break + } + v.reset(OpARMLoweredPanicExtendB) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg4(hi, lo, y, mem) + return true + } + // match: (PanicExtend [kind] hi lo y mem) + // cond: boundsABI(kind) == 2 + // result: (LoweredPanicExtendC [kind] hi lo y mem) + for { + kind := auxIntToInt64(v.AuxInt) + hi := v_0 + lo := v_1 + y := v_2 + mem := v_3 + if !(boundsABI(kind) == 2) { + break + } + v.reset(OpARMLoweredPanicExtendC) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg4(hi, lo, y, mem) + return true + } + return false +} +func rewriteValueARM_OpRotateLeft16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft16 x (MOVWconst [c])) + // result: (Or16 (Lsh16x32 x (MOVWconst [c&15])) (Rsh16Ux32 x (MOVWconst [-c&15]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpOr16) + v0 := b.NewValue0(v.Pos, OpLsh16x32, t) + v1 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v1.AuxInt = int32ToAuxInt(c & 15) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpRsh16Ux32, t) + v3 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v3.AuxInt = int32ToAuxInt(-c & 15) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) + return true + } + return false +} +func rewriteValueARM_OpRotateLeft32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RotateLeft32 x y) + // result: (SRR x (RSBconst [0] y)) + for { + x := v_0 + y := v_1 + v.reset(OpARMSRR) + v0 := b.NewValue0(v.Pos, OpARMRSBconst, y.Type) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueARM_OpRotateLeft8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft8 x (MOVWconst [c])) + // result: (Or8 (Lsh8x32 x (MOVWconst [c&7])) (Rsh8Ux32 x (MOVWconst [-c&7]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpOr8) + v0 := b.NewValue0(v.Pos, OpLsh8x32, t) + v1 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v1.AuxInt = int32ToAuxInt(c & 7) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpRsh8Ux32, t) + v3 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v3.AuxInt = int32ToAuxInt(-c & 7) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) + return true + } + return false +} +func rewriteValueARM_OpRsh16Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux16 x y) + // result: (CMOVWHSconst (SRL (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) + for { + x := v_0 + y := v_1 + v.reset(OpARMCMOVWHSconst) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(256) + v3.AddArg(v2) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueARM_OpRsh16Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux32 x y) + // result: (CMOVWHSconst (SRL (ZeroExt16to32 x) y) (CMPconst [256] y) [0]) + for { + x := v_0 + y := v_1 + v.reset(OpARMCMOVWHSconst) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(256) + v2.AddArg(y) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValueARM_OpRsh16Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux64 x (Const64 [c])) + // cond: uint64(c) < 16 + // result: (SRLconst (SLLconst x [16]) [int32(c+16)]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 16) { + break + } + v.reset(OpARMSRLconst) + v.AuxInt = int32ToAuxInt(int32(c + 16)) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(16) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Rsh16Ux64 _ (Const64 [c])) + // cond: uint64(c) >= 16 + // result: (Const16 [0]) + for { + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 16) { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM_OpRsh16Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux8 x y) + // result: (SRL (ZeroExt16to32 x) (ZeroExt8to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpARMSRL) + v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueARM_OpRsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x16 x y) + // result: (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpARMSRAcond) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) + v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(256) + v2.AddArg(v1) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueARM_OpRsh16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x32 x y) + // result: (SRAcond (SignExt16to32 x) y (CMPconst [256] y)) + for { + x := v_0 + y := v_1 + v.reset(OpARMSRAcond) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v1.AuxInt = int32ToAuxInt(256) + v1.AddArg(y) + v.AddArg3(v0, y, v1) + return true + } +} +func rewriteValueARM_OpRsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x64 x (Const64 [c])) + // cond: uint64(c) < 16 + // result: (SRAconst (SLLconst x [16]) [int32(c+16)]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 16) { + break + } + v.reset(OpARMSRAconst) + v.AuxInt = int32ToAuxInt(int32(c + 16)) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(16) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Rsh16x64 x (Const64 [c])) + // cond: uint64(c) >= 16 + // result: (SRAconst (SLLconst x [16]) [31]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 16) { + break + } + v.reset(OpARMSRAconst) + v.AuxInt = int32ToAuxInt(31) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(16) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueARM_OpRsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x8 x y) + // result: (SRA (SignExt16to32 x) (ZeroExt8to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpARMSRA) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueARM_OpRsh32Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux16 x y) + // result: (CMOVWHSconst (SRL x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) + for { + x := v_0 + y := v_1 + v.reset(OpARMCMOVWHSconst) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(256) + v2.AddArg(v1) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValueARM_OpRsh32Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh32Ux32 x y) + // result: (CMOVWHSconst (SRL x y) (CMPconst [256] y) [0]) + for { + x := v_0 + y := v_1 + v.reset(OpARMCMOVWHSconst) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v1.AuxInt = int32ToAuxInt(256) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueARM_OpRsh32Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Rsh32Ux64 x (Const64 [c])) + // cond: uint64(c) < 32 + // result: (SRLconst x [int32(c)]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 32) { + break + } + v.reset(OpARMSRLconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + // match: (Rsh32Ux64 _ (Const64 [c])) + // cond: uint64(c) >= 32 + // result: (Const32 [0]) + for { + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 32) { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM_OpRsh32Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux8 x y) + // result: (SRL x (ZeroExt8to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpARMSRL) + v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueARM_OpRsh32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x16 x y) + // result: (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpARMSRAcond) + v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v1.AuxInt = int32ToAuxInt(256) + v1.AddArg(v0) + v.AddArg3(x, v0, v1) + return true + } +} +func rewriteValueARM_OpRsh32x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh32x32 x y) + // result: (SRAcond x y (CMPconst [256] y)) + for { + x := v_0 + y := v_1 + v.reset(OpARMSRAcond) + v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(256) + v0.AddArg(y) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueARM_OpRsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Rsh32x64 x (Const64 [c])) + // cond: uint64(c) < 32 + // result: (SRAconst x [int32(c)]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 32) { + break + } + v.reset(OpARMSRAconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + // match: (Rsh32x64 x (Const64 [c])) + // cond: uint64(c) >= 32 + // result: (SRAconst x [31]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 32) { + break + } + v.reset(OpARMSRAconst) + v.AuxInt = int32ToAuxInt(31) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpRsh32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x8 x y) + // result: (SRA x (ZeroExt8to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpARMSRA) + v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueARM_OpRsh8Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux16 x y) + // result: (CMOVWHSconst (SRL (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) + for { + x := v_0 + y := v_1 + v.reset(OpARMCMOVWHSconst) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(256) + v3.AddArg(v2) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueARM_OpRsh8Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux32 x y) + // result: (CMOVWHSconst (SRL (ZeroExt8to32 x) y) (CMPconst [256] y) [0]) + for { + x := v_0 + y := v_1 + v.reset(OpARMCMOVWHSconst) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(256) + v2.AddArg(y) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValueARM_OpRsh8Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux64 x (Const64 [c])) + // cond: uint64(c) < 8 + // result: (SRLconst (SLLconst x [24]) [int32(c+24)]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 8) { + break + } + v.reset(OpARMSRLconst) + v.AuxInt = int32ToAuxInt(int32(c + 24)) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(24) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Rsh8Ux64 _ (Const64 [c])) + // cond: uint64(c) >= 8 + // result: (Const8 [0]) + for { + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 8) { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM_OpRsh8Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux8 x y) + // result: (SRL (ZeroExt8to32 x) (ZeroExt8to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpARMSRL) + v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueARM_OpRsh8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x16 x y) + // result: (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpARMSRAcond) + v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) + v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(256) + v2.AddArg(v1) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueARM_OpRsh8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x32 x y) + // result: (SRAcond (SignExt8to32 x) y (CMPconst [256] y)) + for { + x := v_0 + y := v_1 + v.reset(OpARMSRAcond) + v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v1.AuxInt = int32ToAuxInt(256) + v1.AddArg(y) + v.AddArg3(v0, y, v1) + return true + } +} +func rewriteValueARM_OpRsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x64 x (Const64 [c])) + // cond: uint64(c) < 8 + // result: (SRAconst (SLLconst x [24]) [int32(c+24)]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 8) { + break + } + v.reset(OpARMSRAconst) + v.AuxInt = int32ToAuxInt(int32(c + 24)) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(24) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Rsh8x64 x (Const64 [c])) + // cond: uint64(c) >= 8 + // result: (SRAconst (SLLconst x [24]) [31]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 8) { + break + } + v.reset(OpARMSRAconst) + v.AuxInt = int32ToAuxInt(31) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(24) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueARM_OpRsh8x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x8 x y) + // result: (SRA (SignExt8to32 x) (ZeroExt8to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpARMSRA) + v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueARM_OpSelect0(v *Value) bool { + v_0 := v.Args[0] + // match: (Select0 (CALLudiv x (MOVWconst [1]))) + // result: x + for { + if v_0.Op != OpARMCALLudiv { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARMMOVWconst || auxIntToInt32(v_0_1.AuxInt) != 1 { + break + } + v.copyOf(x) + return true + } + // match: (Select0 (CALLudiv x (MOVWconst [c]))) + // cond: isPowerOfTwo32(c) + // result: (SRLconst [int32(log32(c))] x) + for { + if v_0.Op != OpARMCALLudiv { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0_1.AuxInt) + if !(isPowerOfTwo32(c)) { + break + } + v.reset(OpARMSRLconst) + v.AuxInt = int32ToAuxInt(int32(log32(c))) + v.AddArg(x) + return true + } + // match: (Select0 (CALLudiv (MOVWconst [c]) (MOVWconst [d]))) + // cond: d != 0 + // result: (MOVWconst [int32(uint32(c)/uint32(d))]) + for { + if v_0.Op != OpARMCALLudiv { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0_0.AuxInt) + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARMMOVWconst { + break + } + d := auxIntToInt32(v_0_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(int32(uint32(c) / uint32(d))) + return true + } + return false +} +func rewriteValueARM_OpSelect1(v *Value) bool { + v_0 := v.Args[0] + // match: (Select1 (CALLudiv _ (MOVWconst [1]))) + // result: (MOVWconst [0]) + for { + if v_0.Op != OpARMCALLudiv { + break + } + _ = v_0.Args[1] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARMMOVWconst || auxIntToInt32(v_0_1.AuxInt) != 1 { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (Select1 (CALLudiv x (MOVWconst [c]))) + // cond: isPowerOfTwo32(c) + // result: (ANDconst [c-1] x) + for { + if v_0.Op != OpARMCALLudiv { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0_1.AuxInt) + if !(isPowerOfTwo32(c)) { + break + } + v.reset(OpARMANDconst) + v.AuxInt = int32ToAuxInt(c - 1) + v.AddArg(x) + return true + } + // match: (Select1 (CALLudiv (MOVWconst [c]) (MOVWconst [d]))) + // cond: d != 0 + // result: (MOVWconst [int32(uint32(c)%uint32(d))]) + for { + if v_0.Op != OpARMCALLudiv { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARMMOVWconst { + break + } + c := auxIntToInt32(v_0_0.AuxInt) + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARMMOVWconst { + break + } + d := auxIntToInt32(v_0_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = int32ToAuxInt(int32(uint32(c) % uint32(d))) + return true + } + return false +} +func rewriteValueARM_OpSignmask(v *Value) bool { + v_0 := v.Args[0] + // match: (Signmask x) + // result: (SRAconst x [31]) + for { + x := v_0 + v.reset(OpARMSRAconst) + v.AuxInt = int32ToAuxInt(31) + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpSlicemask(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Slicemask x) + // result: (SRAconst (RSBconst [0] x) [31]) + for { + t := v.Type + x := v_0 + v.reset(OpARMSRAconst) + v.AuxInt = int32ToAuxInt(31) + v0 := b.NewValue0(v.Pos, OpARMRSBconst, t) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpStore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Store {t} ptr val mem) + // cond: t.Size() == 1 + // result: (MOVBstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 1) { + break + } + v.reset(OpARMMOVBstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 2 + // result: (MOVHstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 2) { + break + } + v.reset(OpARMMOVHstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 && !t.IsFloat() + // result: (MOVWstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4 && !t.IsFloat()) { + break + } + v.reset(OpARMMOVWstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 && t.IsFloat() + // result: (MOVFstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4 && t.IsFloat()) { + break + } + v.reset(OpARMMOVFstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 8 && t.IsFloat() + // result: (MOVDstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 8 && t.IsFloat()) { + break + } + v.reset(OpARMMOVDstore) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueARM_OpZero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Zero [0] _ mem) + // result: mem + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_1 + v.copyOf(mem) + return true + } + // match: (Zero [1] ptr mem) + // result: (MOVBstore ptr (MOVWconst [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpARMMOVBstore) + v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (Zero [2] {t} ptr mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore ptr (MOVWconst [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpARMMOVHstore) + v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (Zero [2] ptr mem) + // result: (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpARMMOVBstore) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(0) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [4] {t} ptr mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore ptr (MOVWconst [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpARMMOVWstore) + v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (Zero [4] {t} ptr mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpARMMOVHstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpARMMOVHstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(0) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [4] ptr mem) + // result: (MOVBstore [3] ptr (MOVWconst [0]) (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem)))) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpARMMOVBstore) + v.AuxInt = int32ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(2) + v2 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(1) + v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) + v3.AuxInt = int32ToAuxInt(0) + v3.AddArg3(ptr, v0, mem) + v2.AddArg3(ptr, v0, v3) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [3] ptr mem) + // result: (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem))) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpARMMOVBstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(0) + v2.AddArg3(ptr, v0, mem) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [s] {t} ptr mem) + // cond: s%4 == 0 && s > 4 && s <= 512 && t.Alignment()%4 == 0 && !config.noDuffDevice + // result: (DUFFZERO [4 * (128 - s/4)] ptr (MOVWconst [0]) mem) + for { + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(s%4 == 0 && s > 4 && s <= 512 && t.Alignment()%4 == 0 && !config.noDuffDevice) { + break + } + v.reset(OpARMDUFFZERO) + v.AuxInt = int64ToAuxInt(4 * (128 - s/4)) + v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (Zero [s] {t} ptr mem) + // cond: (s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0 + // result: (LoweredZero [t.Alignment()] ptr (ADDconst ptr [int32(s-moveSize(t.Alignment(), config))]) (MOVWconst [0]) mem) + for { + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !((s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0) { + break + } + v.reset(OpARMLoweredZero) + v.AuxInt = int64ToAuxInt(t.Alignment()) + v0 := b.NewValue0(v.Pos, OpARMADDconst, ptr.Type) + v0.AuxInt = int32ToAuxInt(int32(s - moveSize(t.Alignment(), config))) + v0.AddArg(ptr) + v1 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) + v1.AuxInt = int32ToAuxInt(0) + v.AddArg4(ptr, v0, v1, mem) + return true + } + return false +} +func rewriteValueARM_OpZeromask(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Zeromask x) + // result: (SRAconst (RSBshiftRL x x [1]) [31]) + for { + x := v_0 + v.reset(OpARMSRAconst) + v.AuxInt = int32ToAuxInt(31) + v0 := b.NewValue0(v.Pos, OpARMRSBshiftRL, typ.Int32) + v0.AuxInt = int32ToAuxInt(1) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } +} +func rewriteBlockARM(b *Block) bool { + switch b.Kind { + case BlockARMEQ: + // match: (EQ (FlagConstant [fc]) yes no) + // cond: fc.eq() + // result: (First yes no) + for b.Controls[0].Op == OpARMFlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(fc.eq()) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (EQ (FlagConstant [fc]) yes no) + // cond: !fc.eq() + // result: (First no yes) + for b.Controls[0].Op == OpARMFlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(!fc.eq()) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (EQ (InvertFlags cmp) yes no) + // result: (EQ cmp yes no) + for b.Controls[0].Op == OpARMInvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARMEQ, cmp) + return true + } + // match: (EQ (CMP x (RSBconst [0] y))) + // result: (EQ (CMN x y)) + for b.Controls[0].Op == OpARMCMP { + v_0 := b.Controls[0] + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 { + break + } + y := v_0_1.Args[0] + v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMN x (RSBconst [0] y))) + // result: (EQ (CMP x y)) + for b.Controls[0].Op == OpARMCMN { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 { + continue + } + y := v_0_1.Args[0] + v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) + return true + } + break + } + // match: (EQ (CMPconst [0] l:(SUB x y)) yes no) + // cond: l.Uses==1 + // result: (EQ (CMP x y) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUB { + break + } + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(MULS x y a)) yes no) + // cond: l.Uses==1 + // result: (EQ (CMP a (MUL x y)) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMMULS { + break + } + a := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(SUBconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (EQ (CMPconst [c] x) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBconst { + break + } + c := auxIntToInt32(l.AuxInt) + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (EQ (CMPshiftLL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftLL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (EQ (CMPshiftRL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftRL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (EQ (CMPshiftRA x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftRA { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (EQ (CMPshiftLLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftLLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (EQ (CMPshiftRLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftRLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (EQ (CMPshiftRAreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftRAreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(ADD x y)) yes no) + // cond: l.Uses==1 + // result: (EQ (CMN x y) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADD { + break + } + _ = l.Args[1] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 + if !(l.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) + return true + } + break + } + // match: (EQ (CMPconst [0] l:(MULA x y a)) yes no) + // cond: l.Uses==1 + // result: (EQ (CMN a (MUL x y)) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMMULA { + break + } + a := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(ADDconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (EQ (CMNconst [c] x) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDconst { + break + } + c := auxIntToInt32(l.AuxInt) + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (EQ (CMNshiftLL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftLL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (EQ (CMNshiftRL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftRL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (EQ (CMNshiftRA x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftRA { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (EQ (CMNshiftLLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftLLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (EQ (CMNshiftRLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftRLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (EQ (CMNshiftRAreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftRAreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(AND x y)) yes no) + // cond: l.Uses==1 + // result: (EQ (TST x y) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMAND { + break + } + _ = l.Args[1] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 + if !(l.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) + return true + } + break + } + // match: (EQ (CMPconst [0] l:(ANDconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (EQ (TSTconst [c] x) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDconst { + break + } + c := auxIntToInt32(l.AuxInt) + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (EQ (TSTshiftLL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftLL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (EQ (TSTshiftRL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftRL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (EQ (TSTshiftRA x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftRA { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (EQ (TSTshiftLLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftLLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (EQ (TSTshiftRLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftRLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (EQ (TSTshiftRAreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftRAreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(XOR x y)) yes no) + // cond: l.Uses==1 + // result: (EQ (TEQ x y) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXOR { + break + } + _ = l.Args[1] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 + if !(l.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) + return true + } + break + } + // match: (EQ (CMPconst [0] l:(XORconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (EQ (TEQconst [c] x) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORconst { + break + } + c := auxIntToInt32(l.AuxInt) + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(XORshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (EQ (TEQshiftLL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftLL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(XORshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (EQ (TEQshiftRL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftRL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(XORshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (EQ (TEQshiftRA x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftRA { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (EQ (TEQshiftLLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftLLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (EQ (TEQshiftRLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftRLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (EQ (TEQshiftRAreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftRAreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMEQ, v0) + return true + } + case BlockARMGE: + // match: (GE (FlagConstant [fc]) yes no) + // cond: fc.ge() + // result: (First yes no) + for b.Controls[0].Op == OpARMFlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(fc.ge()) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (GE (FlagConstant [fc]) yes no) + // cond: !fc.ge() + // result: (First no yes) + for b.Controls[0].Op == OpARMFlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(!fc.ge()) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (GE (InvertFlags cmp) yes no) + // result: (LE cmp yes no) + for b.Controls[0].Op == OpARMInvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARMLE, cmp) + return true + } + // match: (GE (CMPconst [0] l:(SUB x y)) yes no) + // cond: l.Uses==1 + // result: (GEnoov (CMP x y) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUB { + break + } + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(MULS x y a)) yes no) + // cond: l.Uses==1 + // result: (GEnoov (CMP a (MUL x y)) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMMULS { + break + } + a := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(SUBconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (GEnoov (CMPconst [c] x) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBconst { + break + } + c := auxIntToInt32(l.AuxInt) + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GEnoov (CMPshiftLL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftLL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GEnoov (CMPshiftRL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftRL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (GEnoov (CMPshiftRA x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftRA { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GEnoov (CMPshiftLLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftLLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GEnoov (CMPshiftRLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftRLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GEnoov (CMPshiftRAreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftRAreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(ADD x y)) yes no) + // cond: l.Uses==1 + // result: (GEnoov (CMN x y) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADD { + break + } + _ = l.Args[1] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 + if !(l.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + break + } + // match: (GE (CMPconst [0] l:(MULA x y a)) yes no) + // cond: l.Uses==1 + // result: (GEnoov (CMN a (MUL x y)) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMMULA { + break + } + a := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(ADDconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (GEnoov (CMNconst [c] x) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDconst { + break + } + c := auxIntToInt32(l.AuxInt) + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GEnoov (CMNshiftLL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftLL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GEnoov (CMNshiftRL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftRL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (GEnoov (CMNshiftRA x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftRA { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GEnoov (CMNshiftLLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftLLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GEnoov (CMNshiftRLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftRLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GEnoov (CMNshiftRAreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftRAreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(AND x y)) yes no) + // cond: l.Uses==1 + // result: (GEnoov (TST x y) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMAND { + break + } + _ = l.Args[1] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 + if !(l.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + break + } + // match: (GE (CMPconst [0] l:(ANDconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (GEnoov (TSTconst [c] x) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDconst { + break + } + c := auxIntToInt32(l.AuxInt) + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GEnoov (TSTshiftLL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftLL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GEnoov (TSTshiftRL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftRL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (GEnoov (TSTshiftRA x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftRA { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GEnoov (TSTshiftLLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftLLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GEnoov (TSTshiftRLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftRLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GEnoov (TSTshiftRAreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftRAreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(XOR x y)) yes no) + // cond: l.Uses==1 + // result: (GEnoov (TEQ x y) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXOR { + break + } + _ = l.Args[1] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 + if !(l.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + break + } + // match: (GE (CMPconst [0] l:(XORconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (GEnoov (TEQconst [c] x) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORconst { + break + } + c := auxIntToInt32(l.AuxInt) + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(XORshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GEnoov (TEQshiftLL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftLL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(XORshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GEnoov (TEQshiftRL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftRL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(XORshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (GEnoov (TEQshiftRA x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftRA { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GEnoov (TEQshiftLLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftLLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GEnoov (TEQshiftRLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftRLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + // match: (GE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GEnoov (TEQshiftRAreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftRAreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGEnoov, v0) + return true + } + case BlockARMGEnoov: + // match: (GEnoov (FlagConstant [fc]) yes no) + // cond: fc.geNoov() + // result: (First yes no) + for b.Controls[0].Op == OpARMFlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(fc.geNoov()) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (GEnoov (FlagConstant [fc]) yes no) + // cond: !fc.geNoov() + // result: (First no yes) + for b.Controls[0].Op == OpARMFlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(!fc.geNoov()) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (GEnoov (InvertFlags cmp) yes no) + // result: (LEnoov cmp yes no) + for b.Controls[0].Op == OpARMInvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARMLEnoov, cmp) + return true + } + case BlockARMGT: + // match: (GT (FlagConstant [fc]) yes no) + // cond: fc.gt() + // result: (First yes no) + for b.Controls[0].Op == OpARMFlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(fc.gt()) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (GT (FlagConstant [fc]) yes no) + // cond: !fc.gt() + // result: (First no yes) + for b.Controls[0].Op == OpARMFlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(!fc.gt()) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (GT (InvertFlags cmp) yes no) + // result: (LT cmp yes no) + for b.Controls[0].Op == OpARMInvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARMLT, cmp) + return true + } + // match: (GT (CMPconst [0] l:(SUB x y)) yes no) + // cond: l.Uses==1 + // result: (GTnoov (CMP x y) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUB { + break + } + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(MULS x y a)) yes no) + // cond: l.Uses==1 + // result: (GTnoov (CMP a (MUL x y)) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMMULS { + break + } + a := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(SUBconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (GTnoov (CMPconst [c] x) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBconst { + break + } + c := auxIntToInt32(l.AuxInt) + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GTnoov (CMPshiftLL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftLL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GTnoov (CMPshiftRL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftRL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (GTnoov (CMPshiftRA x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftRA { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GTnoov (CMPshiftLLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftLLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GTnoov (CMPshiftRLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftRLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GTnoov (CMPshiftRAreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftRAreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(ADD x y)) yes no) + // cond: l.Uses==1 + // result: (GTnoov (CMN x y) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADD { + break + } + _ = l.Args[1] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 + if !(l.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + break + } + // match: (GT (CMPconst [0] l:(ADDconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (GTnoov (CMNconst [c] x) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDconst { + break + } + c := auxIntToInt32(l.AuxInt) + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GTnoov (CMNshiftLL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftLL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GTnoov (CMNshiftRL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftRL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (GTnoov (CMNshiftRA x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftRA { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GTnoov (CMNshiftLLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftLLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GTnoov (CMNshiftRLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftRLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GTnoov (CMNshiftRAreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftRAreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(MULA x y a)) yes no) + // cond: l.Uses==1 + // result: (GTnoov (CMN a (MUL x y)) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMMULA { + break + } + a := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(AND x y)) yes no) + // cond: l.Uses==1 + // result: (GTnoov (TST x y) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMAND { + break + } + _ = l.Args[1] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 + if !(l.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + break + } + // match: (GT (CMPconst [0] l:(ANDconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (GTnoov (TSTconst [c] x) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDconst { + break + } + c := auxIntToInt32(l.AuxInt) + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GTnoov (TSTshiftLL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftLL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GTnoov (TSTshiftRL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftRL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (GTnoov (TSTshiftRA x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftRA { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GTnoov (TSTshiftLLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftLLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GTnoov (TSTshiftRLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftRLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GTnoov (TSTshiftRAreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftRAreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(XOR x y)) yes no) + // cond: l.Uses==1 + // result: (GTnoov (TEQ x y) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXOR { + break + } + _ = l.Args[1] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 + if !(l.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + break + } + // match: (GT (CMPconst [0] l:(XORconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (GTnoov (TEQconst [c] x) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORconst { + break + } + c := auxIntToInt32(l.AuxInt) + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(XORshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GTnoov (TEQshiftLL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftLL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(XORshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GTnoov (TEQshiftRL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftRL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(XORshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (GTnoov (TEQshiftRA x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftRA { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GTnoov (TEQshiftLLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftLLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GTnoov (TEQshiftRLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftRLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + // match: (GT (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GTnoov (TEQshiftRAreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftRAreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMGTnoov, v0) + return true + } + case BlockARMGTnoov: + // match: (GTnoov (FlagConstant [fc]) yes no) + // cond: fc.gtNoov() + // result: (First yes no) + for b.Controls[0].Op == OpARMFlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(fc.gtNoov()) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (GTnoov (FlagConstant [fc]) yes no) + // cond: !fc.gtNoov() + // result: (First no yes) + for b.Controls[0].Op == OpARMFlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(!fc.gtNoov()) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (GTnoov (InvertFlags cmp) yes no) + // result: (LTnoov cmp yes no) + for b.Controls[0].Op == OpARMInvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARMLTnoov, cmp) + return true + } + case BlockIf: + // match: (If (Equal cc) yes no) + // result: (EQ cc yes no) + for b.Controls[0].Op == OpARMEqual { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARMEQ, cc) + return true + } + // match: (If (NotEqual cc) yes no) + // result: (NE cc yes no) + for b.Controls[0].Op == OpARMNotEqual { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARMNE, cc) + return true + } + // match: (If (LessThan cc) yes no) + // result: (LT cc yes no) + for b.Controls[0].Op == OpARMLessThan { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARMLT, cc) + return true + } + // match: (If (LessThanU cc) yes no) + // result: (ULT cc yes no) + for b.Controls[0].Op == OpARMLessThanU { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARMULT, cc) + return true + } + // match: (If (LessEqual cc) yes no) + // result: (LE cc yes no) + for b.Controls[0].Op == OpARMLessEqual { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARMLE, cc) + return true + } + // match: (If (LessEqualU cc) yes no) + // result: (ULE cc yes no) + for b.Controls[0].Op == OpARMLessEqualU { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARMULE, cc) + return true + } + // match: (If (GreaterThan cc) yes no) + // result: (GT cc yes no) + for b.Controls[0].Op == OpARMGreaterThan { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARMGT, cc) + return true + } + // match: (If (GreaterThanU cc) yes no) + // result: (UGT cc yes no) + for b.Controls[0].Op == OpARMGreaterThanU { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARMUGT, cc) + return true + } + // match: (If (GreaterEqual cc) yes no) + // result: (GE cc yes no) + for b.Controls[0].Op == OpARMGreaterEqual { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARMGE, cc) + return true + } + // match: (If (GreaterEqualU cc) yes no) + // result: (UGE cc yes no) + for b.Controls[0].Op == OpARMGreaterEqualU { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARMUGE, cc) + return true + } + // match: (If cond yes no) + // result: (NE (CMPconst [0] cond) yes no) + for { + cond := b.Controls[0] + v0 := b.NewValue0(cond.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(cond) + b.resetWithControl(BlockARMNE, v0) + return true + } + case BlockARMLE: + // match: (LE (FlagConstant [fc]) yes no) + // cond: fc.le() + // result: (First yes no) + for b.Controls[0].Op == OpARMFlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(fc.le()) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (LE (FlagConstant [fc]) yes no) + // cond: !fc.le() + // result: (First no yes) + for b.Controls[0].Op == OpARMFlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(!fc.le()) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (LE (InvertFlags cmp) yes no) + // result: (GE cmp yes no) + for b.Controls[0].Op == OpARMInvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARMGE, cmp) + return true + } + // match: (LE (CMPconst [0] l:(SUB x y)) yes no) + // cond: l.Uses==1 + // result: (LEnoov (CMP x y) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUB { + break + } + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(MULS x y a)) yes no) + // cond: l.Uses==1 + // result: (LEnoov (CMP a (MUL x y)) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMMULS { + break + } + a := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(SUBconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (LEnoov (CMPconst [c] x) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBconst { + break + } + c := auxIntToInt32(l.AuxInt) + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LEnoov (CMPshiftLL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftLL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LEnoov (CMPshiftRL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftRL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (LEnoov (CMPshiftRA x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftRA { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LEnoov (CMPshiftLLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftLLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LEnoov (CMPshiftRLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftRLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LEnoov (CMPshiftRAreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftRAreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(ADD x y)) yes no) + // cond: l.Uses==1 + // result: (LEnoov (CMN x y) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADD { + break + } + _ = l.Args[1] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 + if !(l.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + break + } + // match: (LE (CMPconst [0] l:(MULA x y a)) yes no) + // cond: l.Uses==1 + // result: (LEnoov (CMN a (MUL x y)) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMMULA { + break + } + a := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(ADDconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (LEnoov (CMNconst [c] x) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDconst { + break + } + c := auxIntToInt32(l.AuxInt) + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LEnoov (CMNshiftLL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftLL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LEnoov (CMNshiftRL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftRL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (LEnoov (CMNshiftRA x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftRA { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LEnoov (CMNshiftLLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftLLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LEnoov (CMNshiftRLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftRLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LEnoov (CMNshiftRAreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftRAreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(AND x y)) yes no) + // cond: l.Uses==1 + // result: (LEnoov (TST x y) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMAND { + break + } + _ = l.Args[1] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 + if !(l.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + break + } + // match: (LE (CMPconst [0] l:(ANDconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (LEnoov (TSTconst [c] x) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDconst { + break + } + c := auxIntToInt32(l.AuxInt) + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LEnoov (TSTshiftLL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftLL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LEnoov (TSTshiftRL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftRL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (LEnoov (TSTshiftRA x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftRA { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LEnoov (TSTshiftLLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftLLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LEnoov (TSTshiftRLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftRLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LEnoov (TSTshiftRAreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftRAreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(XOR x y)) yes no) + // cond: l.Uses==1 + // result: (LEnoov (TEQ x y) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXOR { + break + } + _ = l.Args[1] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 + if !(l.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + break + } + // match: (LE (CMPconst [0] l:(XORconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (LEnoov (TEQconst [c] x) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORconst { + break + } + c := auxIntToInt32(l.AuxInt) + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(XORshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LEnoov (TEQshiftLL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftLL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(XORshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LEnoov (TEQshiftRL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftRL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(XORshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (LEnoov (TEQshiftRA x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftRA { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LEnoov (TEQshiftLLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftLLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LEnoov (TEQshiftRLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftRLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + // match: (LE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LEnoov (TEQshiftRAreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftRAreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLEnoov, v0) + return true + } + case BlockARMLEnoov: + // match: (LEnoov (FlagConstant [fc]) yes no) + // cond: fc.leNoov() + // result: (First yes no) + for b.Controls[0].Op == OpARMFlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(fc.leNoov()) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (LEnoov (FlagConstant [fc]) yes no) + // cond: !fc.leNoov() + // result: (First no yes) + for b.Controls[0].Op == OpARMFlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(!fc.leNoov()) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (LEnoov (InvertFlags cmp) yes no) + // result: (GEnoov cmp yes no) + for b.Controls[0].Op == OpARMInvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARMGEnoov, cmp) + return true + } + case BlockARMLT: + // match: (LT (FlagConstant [fc]) yes no) + // cond: fc.lt() + // result: (First yes no) + for b.Controls[0].Op == OpARMFlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(fc.lt()) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (LT (FlagConstant [fc]) yes no) + // cond: !fc.lt() + // result: (First no yes) + for b.Controls[0].Op == OpARMFlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(!fc.lt()) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (LT (InvertFlags cmp) yes no) + // result: (GT cmp yes no) + for b.Controls[0].Op == OpARMInvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARMGT, cmp) + return true + } + // match: (LT (CMPconst [0] l:(SUB x y)) yes no) + // cond: l.Uses==1 + // result: (LTnoov (CMP x y) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUB { + break + } + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(MULS x y a)) yes no) + // cond: l.Uses==1 + // result: (LTnoov (CMP a (MUL x y)) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMMULS { + break + } + a := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(SUBconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (LTnoov (CMPconst [c] x) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBconst { + break + } + c := auxIntToInt32(l.AuxInt) + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LTnoov (CMPshiftLL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftLL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LTnoov (CMPshiftRL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftRL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (LTnoov (CMPshiftRA x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftRA { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LTnoov (CMPshiftLLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftLLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LTnoov (CMPshiftRLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftRLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LTnoov (CMPshiftRAreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftRAreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(ADD x y)) yes no) + // cond: l.Uses==1 + // result: (LTnoov (CMN x y) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADD { + break + } + _ = l.Args[1] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 + if !(l.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + break + } + // match: (LT (CMPconst [0] l:(MULA x y a)) yes no) + // cond: l.Uses==1 + // result: (LTnoov (CMN a (MUL x y)) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMMULA { + break + } + a := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(ADDconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (LTnoov (CMNconst [c] x) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDconst { + break + } + c := auxIntToInt32(l.AuxInt) + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LTnoov (CMNshiftLL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftLL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LTnoov (CMNshiftRL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftRL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (LTnoov (CMNshiftRA x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftRA { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LTnoov (CMNshiftLLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftLLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LTnoov (CMNshiftRLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftRLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LTnoov (CMNshiftRAreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftRAreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(AND x y)) yes no) + // cond: l.Uses==1 + // result: (LTnoov (TST x y) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMAND { + break + } + _ = l.Args[1] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 + if !(l.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + break + } + // match: (LT (CMPconst [0] l:(ANDconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (LTnoov (TSTconst [c] x) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDconst { + break + } + c := auxIntToInt32(l.AuxInt) + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LTnoov (TSTshiftLL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftLL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LTnoov (TSTshiftRL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftRL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (LTnoov (TSTshiftRA x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftRA { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LTnoov (TSTshiftLLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftLLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LTnoov (TSTshiftRLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftRLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LTnoov (TSTshiftRAreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftRAreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(XOR x y)) yes no) + // cond: l.Uses==1 + // result: (LTnoov (TEQ x y) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXOR { + break + } + _ = l.Args[1] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 + if !(l.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + break + } + // match: (LT (CMPconst [0] l:(XORconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (LTnoov (TEQconst [c] x) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORconst { + break + } + c := auxIntToInt32(l.AuxInt) + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(XORshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LTnoov (TEQshiftLL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftLL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(XORshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LTnoov (TEQshiftRL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftRL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(XORshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (LTnoov (TEQshiftRA x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftRA { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LTnoov (TEQshiftLLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftLLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LTnoov (TEQshiftRLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftRLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + // match: (LT (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LTnoov (TEQshiftRAreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftRAreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMLTnoov, v0) + return true + } + case BlockARMLTnoov: + // match: (LTnoov (FlagConstant [fc]) yes no) + // cond: fc.ltNoov() + // result: (First yes no) + for b.Controls[0].Op == OpARMFlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(fc.ltNoov()) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (LTnoov (FlagConstant [fc]) yes no) + // cond: !fc.ltNoov() + // result: (First no yes) + for b.Controls[0].Op == OpARMFlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(!fc.ltNoov()) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (LTnoov (InvertFlags cmp) yes no) + // result: (GTnoov cmp yes no) + for b.Controls[0].Op == OpARMInvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARMGTnoov, cmp) + return true + } + case BlockARMNE: + // match: (NE (CMPconst [0] (Equal cc)) yes no) + // result: (EQ cc yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARMEqual { + break + } + cc := v_0_0.Args[0] + b.resetWithControl(BlockARMEQ, cc) + return true + } + // match: (NE (CMPconst [0] (NotEqual cc)) yes no) + // result: (NE cc yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARMNotEqual { + break + } + cc := v_0_0.Args[0] + b.resetWithControl(BlockARMNE, cc) + return true + } + // match: (NE (CMPconst [0] (LessThan cc)) yes no) + // result: (LT cc yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARMLessThan { + break + } + cc := v_0_0.Args[0] + b.resetWithControl(BlockARMLT, cc) + return true + } + // match: (NE (CMPconst [0] (LessThanU cc)) yes no) + // result: (ULT cc yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARMLessThanU { + break + } + cc := v_0_0.Args[0] + b.resetWithControl(BlockARMULT, cc) + return true + } + // match: (NE (CMPconst [0] (LessEqual cc)) yes no) + // result: (LE cc yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARMLessEqual { + break + } + cc := v_0_0.Args[0] + b.resetWithControl(BlockARMLE, cc) + return true + } + // match: (NE (CMPconst [0] (LessEqualU cc)) yes no) + // result: (ULE cc yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARMLessEqualU { + break + } + cc := v_0_0.Args[0] + b.resetWithControl(BlockARMULE, cc) + return true + } + // match: (NE (CMPconst [0] (GreaterThan cc)) yes no) + // result: (GT cc yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARMGreaterThan { + break + } + cc := v_0_0.Args[0] + b.resetWithControl(BlockARMGT, cc) + return true + } + // match: (NE (CMPconst [0] (GreaterThanU cc)) yes no) + // result: (UGT cc yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARMGreaterThanU { + break + } + cc := v_0_0.Args[0] + b.resetWithControl(BlockARMUGT, cc) + return true + } + // match: (NE (CMPconst [0] (GreaterEqual cc)) yes no) + // result: (GE cc yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARMGreaterEqual { + break + } + cc := v_0_0.Args[0] + b.resetWithControl(BlockARMGE, cc) + return true + } + // match: (NE (CMPconst [0] (GreaterEqualU cc)) yes no) + // result: (UGE cc yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARMGreaterEqualU { + break + } + cc := v_0_0.Args[0] + b.resetWithControl(BlockARMUGE, cc) + return true + } + // match: (NE (FlagConstant [fc]) yes no) + // cond: fc.ne() + // result: (First yes no) + for b.Controls[0].Op == OpARMFlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(fc.ne()) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (NE (FlagConstant [fc]) yes no) + // cond: !fc.ne() + // result: (First no yes) + for b.Controls[0].Op == OpARMFlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(!fc.ne()) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (NE (InvertFlags cmp) yes no) + // result: (NE cmp yes no) + for b.Controls[0].Op == OpARMInvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARMNE, cmp) + return true + } + // match: (NE (CMP x (RSBconst [0] y))) + // result: (NE (CMN x y)) + for b.Controls[0].Op == OpARMCMP { + v_0 := b.Controls[0] + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 { + break + } + y := v_0_1.Args[0] + v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMN x (RSBconst [0] y))) + // result: (NE (CMP x y)) + for b.Controls[0].Op == OpARMCMN { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 { + continue + } + y := v_0_1.Args[0] + v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) + return true + } + break + } + // match: (NE (CMPconst [0] l:(SUB x y)) yes no) + // cond: l.Uses==1 + // result: (NE (CMP x y) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUB { + break + } + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(MULS x y a)) yes no) + // cond: l.Uses==1 + // result: (NE (CMP a (MUL x y)) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMMULS { + break + } + a := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(SUBconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (NE (CMPconst [c] x) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBconst { + break + } + c := auxIntToInt32(l.AuxInt) + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (NE (CMPshiftLL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftLL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (NE (CMPshiftRL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftRL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (NE (CMPshiftRA x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftRA { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (NE (CMPshiftLLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftLLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (NE (CMPshiftRLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftRLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (NE (CMPshiftRAreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMSUBshiftRAreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(ADD x y)) yes no) + // cond: l.Uses==1 + // result: (NE (CMN x y) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADD { + break + } + _ = l.Args[1] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 + if !(l.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) + return true + } + break + } + // match: (NE (CMPconst [0] l:(MULA x y a)) yes no) + // cond: l.Uses==1 + // result: (NE (CMN a (MUL x y)) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMMULA { + break + } + a := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(ADDconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (NE (CMNconst [c] x) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDconst { + break + } + c := auxIntToInt32(l.AuxInt) + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (NE (CMNshiftLL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftLL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (NE (CMNshiftRL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftRL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (NE (CMNshiftRA x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftRA { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (NE (CMNshiftLLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftLLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (NE (CMNshiftRLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftRLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (NE (CMNshiftRAreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMADDshiftRAreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(AND x y)) yes no) + // cond: l.Uses==1 + // result: (NE (TST x y) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMAND { + break + } + _ = l.Args[1] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 + if !(l.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) + return true + } + break + } + // match: (NE (CMPconst [0] l:(ANDconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (NE (TSTconst [c] x) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDconst { + break + } + c := auxIntToInt32(l.AuxInt) + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (NE (TSTshiftLL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftLL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (NE (TSTshiftRL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftRL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (NE (TSTshiftRA x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftRA { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (NE (TSTshiftLLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftLLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (NE (TSTshiftRLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftRLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (NE (TSTshiftRAreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMANDshiftRAreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(XOR x y)) yes no) + // cond: l.Uses==1 + // result: (NE (TEQ x y) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXOR { + break + } + _ = l.Args[1] + l_0 := l.Args[0] + l_1 := l.Args[1] + for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 { + x := l_0 + y := l_1 + if !(l.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) + return true + } + break + } + // match: (NE (CMPconst [0] l:(XORconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (NE (TEQconst [c] x) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORconst { + break + } + c := auxIntToInt32(l.AuxInt) + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg(x) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(XORshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (NE (TEQshiftLL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftLL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(XORshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (NE (TEQshiftRL x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftRL { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(XORshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (NE (TEQshiftRA x y [c]) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftRA { + break + } + c := auxIntToInt32(l.AuxInt) + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (NE (TEQshiftLLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftLLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (NE (TEQshiftRLreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftRLreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (NE (TEQshiftRAreg x y z) yes no) + for b.Controls[0].Op == OpARMCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + l := v_0.Args[0] + if l.Op != OpARMXORshiftRAreg { + break + } + z := l.Args[2] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags) + v0.AddArg3(x, y, z) + b.resetWithControl(BlockARMNE, v0) + return true + } + case BlockARMUGE: + // match: (UGE (FlagConstant [fc]) yes no) + // cond: fc.uge() + // result: (First yes no) + for b.Controls[0].Op == OpARMFlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(fc.uge()) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (UGE (FlagConstant [fc]) yes no) + // cond: !fc.uge() + // result: (First no yes) + for b.Controls[0].Op == OpARMFlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(!fc.uge()) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (UGE (InvertFlags cmp) yes no) + // result: (ULE cmp yes no) + for b.Controls[0].Op == OpARMInvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARMULE, cmp) + return true + } + case BlockARMUGT: + // match: (UGT (FlagConstant [fc]) yes no) + // cond: fc.ugt() + // result: (First yes no) + for b.Controls[0].Op == OpARMFlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(fc.ugt()) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (UGT (FlagConstant [fc]) yes no) + // cond: !fc.ugt() + // result: (First no yes) + for b.Controls[0].Op == OpARMFlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(!fc.ugt()) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (UGT (InvertFlags cmp) yes no) + // result: (ULT cmp yes no) + for b.Controls[0].Op == OpARMInvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARMULT, cmp) + return true + } + case BlockARMULE: + // match: (ULE (FlagConstant [fc]) yes no) + // cond: fc.ule() + // result: (First yes no) + for b.Controls[0].Op == OpARMFlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(fc.ule()) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (ULE (FlagConstant [fc]) yes no) + // cond: !fc.ule() + // result: (First no yes) + for b.Controls[0].Op == OpARMFlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(!fc.ule()) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (ULE (InvertFlags cmp) yes no) + // result: (UGE cmp yes no) + for b.Controls[0].Op == OpARMInvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARMUGE, cmp) + return true + } + case BlockARMULT: + // match: (ULT (FlagConstant [fc]) yes no) + // cond: fc.ult() + // result: (First yes no) + for b.Controls[0].Op == OpARMFlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(fc.ult()) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (ULT (FlagConstant [fc]) yes no) + // cond: !fc.ult() + // result: (First no yes) + for b.Controls[0].Op == OpARMFlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(!fc.ult()) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (ULT (InvertFlags cmp) yes no) + // result: (UGT cmp yes no) + for b.Controls[0].Op == OpARMInvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARMUGT, cmp) + return true + } + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteARM64.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteARM64.go new file mode 100644 index 0000000000000000000000000000000000000000..8f60f023b18d4986ee98954c41da115212dade6b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteARM64.go @@ -0,0 +1,26539 @@ +// Code generated from _gen/ARM64.rules using 'go generate'; DO NOT EDIT. + +package ssa + +import "cmd/compile/internal/types" + +func rewriteValueARM64(v *Value) bool { + switch v.Op { + case OpARM64ADCSflags: + return rewriteValueARM64_OpARM64ADCSflags(v) + case OpARM64ADD: + return rewriteValueARM64_OpARM64ADD(v) + case OpARM64ADDSflags: + return rewriteValueARM64_OpARM64ADDSflags(v) + case OpARM64ADDconst: + return rewriteValueARM64_OpARM64ADDconst(v) + case OpARM64ADDshiftLL: + return rewriteValueARM64_OpARM64ADDshiftLL(v) + case OpARM64ADDshiftRA: + return rewriteValueARM64_OpARM64ADDshiftRA(v) + case OpARM64ADDshiftRL: + return rewriteValueARM64_OpARM64ADDshiftRL(v) + case OpARM64AND: + return rewriteValueARM64_OpARM64AND(v) + case OpARM64ANDconst: + return rewriteValueARM64_OpARM64ANDconst(v) + case OpARM64ANDshiftLL: + return rewriteValueARM64_OpARM64ANDshiftLL(v) + case OpARM64ANDshiftRA: + return rewriteValueARM64_OpARM64ANDshiftRA(v) + case OpARM64ANDshiftRL: + return rewriteValueARM64_OpARM64ANDshiftRL(v) + case OpARM64ANDshiftRO: + return rewriteValueARM64_OpARM64ANDshiftRO(v) + case OpARM64BIC: + return rewriteValueARM64_OpARM64BIC(v) + case OpARM64BICshiftLL: + return rewriteValueARM64_OpARM64BICshiftLL(v) + case OpARM64BICshiftRA: + return rewriteValueARM64_OpARM64BICshiftRA(v) + case OpARM64BICshiftRL: + return rewriteValueARM64_OpARM64BICshiftRL(v) + case OpARM64BICshiftRO: + return rewriteValueARM64_OpARM64BICshiftRO(v) + case OpARM64CMN: + return rewriteValueARM64_OpARM64CMN(v) + case OpARM64CMNW: + return rewriteValueARM64_OpARM64CMNW(v) + case OpARM64CMNWconst: + return rewriteValueARM64_OpARM64CMNWconst(v) + case OpARM64CMNconst: + return rewriteValueARM64_OpARM64CMNconst(v) + case OpARM64CMNshiftLL: + return rewriteValueARM64_OpARM64CMNshiftLL(v) + case OpARM64CMNshiftRA: + return rewriteValueARM64_OpARM64CMNshiftRA(v) + case OpARM64CMNshiftRL: + return rewriteValueARM64_OpARM64CMNshiftRL(v) + case OpARM64CMP: + return rewriteValueARM64_OpARM64CMP(v) + case OpARM64CMPW: + return rewriteValueARM64_OpARM64CMPW(v) + case OpARM64CMPWconst: + return rewriteValueARM64_OpARM64CMPWconst(v) + case OpARM64CMPconst: + return rewriteValueARM64_OpARM64CMPconst(v) + case OpARM64CMPshiftLL: + return rewriteValueARM64_OpARM64CMPshiftLL(v) + case OpARM64CMPshiftRA: + return rewriteValueARM64_OpARM64CMPshiftRA(v) + case OpARM64CMPshiftRL: + return rewriteValueARM64_OpARM64CMPshiftRL(v) + case OpARM64CSEL: + return rewriteValueARM64_OpARM64CSEL(v) + case OpARM64CSEL0: + return rewriteValueARM64_OpARM64CSEL0(v) + case OpARM64CSETM: + return rewriteValueARM64_OpARM64CSETM(v) + case OpARM64CSINC: + return rewriteValueARM64_OpARM64CSINC(v) + case OpARM64CSINV: + return rewriteValueARM64_OpARM64CSINV(v) + case OpARM64CSNEG: + return rewriteValueARM64_OpARM64CSNEG(v) + case OpARM64DIV: + return rewriteValueARM64_OpARM64DIV(v) + case OpARM64DIVW: + return rewriteValueARM64_OpARM64DIVW(v) + case OpARM64EON: + return rewriteValueARM64_OpARM64EON(v) + case OpARM64EONshiftLL: + return rewriteValueARM64_OpARM64EONshiftLL(v) + case OpARM64EONshiftRA: + return rewriteValueARM64_OpARM64EONshiftRA(v) + case OpARM64EONshiftRL: + return rewriteValueARM64_OpARM64EONshiftRL(v) + case OpARM64EONshiftRO: + return rewriteValueARM64_OpARM64EONshiftRO(v) + case OpARM64Equal: + return rewriteValueARM64_OpARM64Equal(v) + case OpARM64FADDD: + return rewriteValueARM64_OpARM64FADDD(v) + case OpARM64FADDS: + return rewriteValueARM64_OpARM64FADDS(v) + case OpARM64FCMPD: + return rewriteValueARM64_OpARM64FCMPD(v) + case OpARM64FCMPS: + return rewriteValueARM64_OpARM64FCMPS(v) + case OpARM64FMOVDfpgp: + return rewriteValueARM64_OpARM64FMOVDfpgp(v) + case OpARM64FMOVDgpfp: + return rewriteValueARM64_OpARM64FMOVDgpfp(v) + case OpARM64FMOVDload: + return rewriteValueARM64_OpARM64FMOVDload(v) + case OpARM64FMOVDloadidx: + return rewriteValueARM64_OpARM64FMOVDloadidx(v) + case OpARM64FMOVDloadidx8: + return rewriteValueARM64_OpARM64FMOVDloadidx8(v) + case OpARM64FMOVDstore: + return rewriteValueARM64_OpARM64FMOVDstore(v) + case OpARM64FMOVDstoreidx: + return rewriteValueARM64_OpARM64FMOVDstoreidx(v) + case OpARM64FMOVDstoreidx8: + return rewriteValueARM64_OpARM64FMOVDstoreidx8(v) + case OpARM64FMOVSload: + return rewriteValueARM64_OpARM64FMOVSload(v) + case OpARM64FMOVSloadidx: + return rewriteValueARM64_OpARM64FMOVSloadidx(v) + case OpARM64FMOVSloadidx4: + return rewriteValueARM64_OpARM64FMOVSloadidx4(v) + case OpARM64FMOVSstore: + return rewriteValueARM64_OpARM64FMOVSstore(v) + case OpARM64FMOVSstoreidx: + return rewriteValueARM64_OpARM64FMOVSstoreidx(v) + case OpARM64FMOVSstoreidx4: + return rewriteValueARM64_OpARM64FMOVSstoreidx4(v) + case OpARM64FMULD: + return rewriteValueARM64_OpARM64FMULD(v) + case OpARM64FMULS: + return rewriteValueARM64_OpARM64FMULS(v) + case OpARM64FNEGD: + return rewriteValueARM64_OpARM64FNEGD(v) + case OpARM64FNEGS: + return rewriteValueARM64_OpARM64FNEGS(v) + case OpARM64FNMULD: + return rewriteValueARM64_OpARM64FNMULD(v) + case OpARM64FNMULS: + return rewriteValueARM64_OpARM64FNMULS(v) + case OpARM64FSUBD: + return rewriteValueARM64_OpARM64FSUBD(v) + case OpARM64FSUBS: + return rewriteValueARM64_OpARM64FSUBS(v) + case OpARM64GreaterEqual: + return rewriteValueARM64_OpARM64GreaterEqual(v) + case OpARM64GreaterEqualF: + return rewriteValueARM64_OpARM64GreaterEqualF(v) + case OpARM64GreaterEqualNoov: + return rewriteValueARM64_OpARM64GreaterEqualNoov(v) + case OpARM64GreaterEqualU: + return rewriteValueARM64_OpARM64GreaterEqualU(v) + case OpARM64GreaterThan: + return rewriteValueARM64_OpARM64GreaterThan(v) + case OpARM64GreaterThanF: + return rewriteValueARM64_OpARM64GreaterThanF(v) + case OpARM64GreaterThanU: + return rewriteValueARM64_OpARM64GreaterThanU(v) + case OpARM64LDP: + return rewriteValueARM64_OpARM64LDP(v) + case OpARM64LessEqual: + return rewriteValueARM64_OpARM64LessEqual(v) + case OpARM64LessEqualF: + return rewriteValueARM64_OpARM64LessEqualF(v) + case OpARM64LessEqualU: + return rewriteValueARM64_OpARM64LessEqualU(v) + case OpARM64LessThan: + return rewriteValueARM64_OpARM64LessThan(v) + case OpARM64LessThanF: + return rewriteValueARM64_OpARM64LessThanF(v) + case OpARM64LessThanNoov: + return rewriteValueARM64_OpARM64LessThanNoov(v) + case OpARM64LessThanU: + return rewriteValueARM64_OpARM64LessThanU(v) + case OpARM64MADD: + return rewriteValueARM64_OpARM64MADD(v) + case OpARM64MADDW: + return rewriteValueARM64_OpARM64MADDW(v) + case OpARM64MNEG: + return rewriteValueARM64_OpARM64MNEG(v) + case OpARM64MNEGW: + return rewriteValueARM64_OpARM64MNEGW(v) + case OpARM64MOD: + return rewriteValueARM64_OpARM64MOD(v) + case OpARM64MODW: + return rewriteValueARM64_OpARM64MODW(v) + case OpARM64MOVBUload: + return rewriteValueARM64_OpARM64MOVBUload(v) + case OpARM64MOVBUloadidx: + return rewriteValueARM64_OpARM64MOVBUloadidx(v) + case OpARM64MOVBUreg: + return rewriteValueARM64_OpARM64MOVBUreg(v) + case OpARM64MOVBload: + return rewriteValueARM64_OpARM64MOVBload(v) + case OpARM64MOVBloadidx: + return rewriteValueARM64_OpARM64MOVBloadidx(v) + case OpARM64MOVBreg: + return rewriteValueARM64_OpARM64MOVBreg(v) + case OpARM64MOVBstore: + return rewriteValueARM64_OpARM64MOVBstore(v) + case OpARM64MOVBstoreidx: + return rewriteValueARM64_OpARM64MOVBstoreidx(v) + case OpARM64MOVBstorezero: + return rewriteValueARM64_OpARM64MOVBstorezero(v) + case OpARM64MOVBstorezeroidx: + return rewriteValueARM64_OpARM64MOVBstorezeroidx(v) + case OpARM64MOVDload: + return rewriteValueARM64_OpARM64MOVDload(v) + case OpARM64MOVDloadidx: + return rewriteValueARM64_OpARM64MOVDloadidx(v) + case OpARM64MOVDloadidx8: + return rewriteValueARM64_OpARM64MOVDloadidx8(v) + case OpARM64MOVDnop: + return rewriteValueARM64_OpARM64MOVDnop(v) + case OpARM64MOVDreg: + return rewriteValueARM64_OpARM64MOVDreg(v) + case OpARM64MOVDstore: + return rewriteValueARM64_OpARM64MOVDstore(v) + case OpARM64MOVDstoreidx: + return rewriteValueARM64_OpARM64MOVDstoreidx(v) + case OpARM64MOVDstoreidx8: + return rewriteValueARM64_OpARM64MOVDstoreidx8(v) + case OpARM64MOVDstorezero: + return rewriteValueARM64_OpARM64MOVDstorezero(v) + case OpARM64MOVDstorezeroidx: + return rewriteValueARM64_OpARM64MOVDstorezeroidx(v) + case OpARM64MOVDstorezeroidx8: + return rewriteValueARM64_OpARM64MOVDstorezeroidx8(v) + case OpARM64MOVHUload: + return rewriteValueARM64_OpARM64MOVHUload(v) + case OpARM64MOVHUloadidx: + return rewriteValueARM64_OpARM64MOVHUloadidx(v) + case OpARM64MOVHUloadidx2: + return rewriteValueARM64_OpARM64MOVHUloadidx2(v) + case OpARM64MOVHUreg: + return rewriteValueARM64_OpARM64MOVHUreg(v) + case OpARM64MOVHload: + return rewriteValueARM64_OpARM64MOVHload(v) + case OpARM64MOVHloadidx: + return rewriteValueARM64_OpARM64MOVHloadidx(v) + case OpARM64MOVHloadidx2: + return rewriteValueARM64_OpARM64MOVHloadidx2(v) + case OpARM64MOVHreg: + return rewriteValueARM64_OpARM64MOVHreg(v) + case OpARM64MOVHstore: + return rewriteValueARM64_OpARM64MOVHstore(v) + case OpARM64MOVHstoreidx: + return rewriteValueARM64_OpARM64MOVHstoreidx(v) + case OpARM64MOVHstoreidx2: + return rewriteValueARM64_OpARM64MOVHstoreidx2(v) + case OpARM64MOVHstorezero: + return rewriteValueARM64_OpARM64MOVHstorezero(v) + case OpARM64MOVHstorezeroidx: + return rewriteValueARM64_OpARM64MOVHstorezeroidx(v) + case OpARM64MOVHstorezeroidx2: + return rewriteValueARM64_OpARM64MOVHstorezeroidx2(v) + case OpARM64MOVQstorezero: + return rewriteValueARM64_OpARM64MOVQstorezero(v) + case OpARM64MOVWUload: + return rewriteValueARM64_OpARM64MOVWUload(v) + case OpARM64MOVWUloadidx: + return rewriteValueARM64_OpARM64MOVWUloadidx(v) + case OpARM64MOVWUloadidx4: + return rewriteValueARM64_OpARM64MOVWUloadidx4(v) + case OpARM64MOVWUreg: + return rewriteValueARM64_OpARM64MOVWUreg(v) + case OpARM64MOVWload: + return rewriteValueARM64_OpARM64MOVWload(v) + case OpARM64MOVWloadidx: + return rewriteValueARM64_OpARM64MOVWloadidx(v) + case OpARM64MOVWloadidx4: + return rewriteValueARM64_OpARM64MOVWloadidx4(v) + case OpARM64MOVWreg: + return rewriteValueARM64_OpARM64MOVWreg(v) + case OpARM64MOVWstore: + return rewriteValueARM64_OpARM64MOVWstore(v) + case OpARM64MOVWstoreidx: + return rewriteValueARM64_OpARM64MOVWstoreidx(v) + case OpARM64MOVWstoreidx4: + return rewriteValueARM64_OpARM64MOVWstoreidx4(v) + case OpARM64MOVWstorezero: + return rewriteValueARM64_OpARM64MOVWstorezero(v) + case OpARM64MOVWstorezeroidx: + return rewriteValueARM64_OpARM64MOVWstorezeroidx(v) + case OpARM64MOVWstorezeroidx4: + return rewriteValueARM64_OpARM64MOVWstorezeroidx4(v) + case OpARM64MSUB: + return rewriteValueARM64_OpARM64MSUB(v) + case OpARM64MSUBW: + return rewriteValueARM64_OpARM64MSUBW(v) + case OpARM64MUL: + return rewriteValueARM64_OpARM64MUL(v) + case OpARM64MULW: + return rewriteValueARM64_OpARM64MULW(v) + case OpARM64MVN: + return rewriteValueARM64_OpARM64MVN(v) + case OpARM64MVNshiftLL: + return rewriteValueARM64_OpARM64MVNshiftLL(v) + case OpARM64MVNshiftRA: + return rewriteValueARM64_OpARM64MVNshiftRA(v) + case OpARM64MVNshiftRL: + return rewriteValueARM64_OpARM64MVNshiftRL(v) + case OpARM64MVNshiftRO: + return rewriteValueARM64_OpARM64MVNshiftRO(v) + case OpARM64NEG: + return rewriteValueARM64_OpARM64NEG(v) + case OpARM64NEGshiftLL: + return rewriteValueARM64_OpARM64NEGshiftLL(v) + case OpARM64NEGshiftRA: + return rewriteValueARM64_OpARM64NEGshiftRA(v) + case OpARM64NEGshiftRL: + return rewriteValueARM64_OpARM64NEGshiftRL(v) + case OpARM64NotEqual: + return rewriteValueARM64_OpARM64NotEqual(v) + case OpARM64OR: + return rewriteValueARM64_OpARM64OR(v) + case OpARM64ORN: + return rewriteValueARM64_OpARM64ORN(v) + case OpARM64ORNshiftLL: + return rewriteValueARM64_OpARM64ORNshiftLL(v) + case OpARM64ORNshiftRA: + return rewriteValueARM64_OpARM64ORNshiftRA(v) + case OpARM64ORNshiftRL: + return rewriteValueARM64_OpARM64ORNshiftRL(v) + case OpARM64ORNshiftRO: + return rewriteValueARM64_OpARM64ORNshiftRO(v) + case OpARM64ORconst: + return rewriteValueARM64_OpARM64ORconst(v) + case OpARM64ORshiftLL: + return rewriteValueARM64_OpARM64ORshiftLL(v) + case OpARM64ORshiftRA: + return rewriteValueARM64_OpARM64ORshiftRA(v) + case OpARM64ORshiftRL: + return rewriteValueARM64_OpARM64ORshiftRL(v) + case OpARM64ORshiftRO: + return rewriteValueARM64_OpARM64ORshiftRO(v) + case OpARM64REV: + return rewriteValueARM64_OpARM64REV(v) + case OpARM64REVW: + return rewriteValueARM64_OpARM64REVW(v) + case OpARM64ROR: + return rewriteValueARM64_OpARM64ROR(v) + case OpARM64RORW: + return rewriteValueARM64_OpARM64RORW(v) + case OpARM64SBCSflags: + return rewriteValueARM64_OpARM64SBCSflags(v) + case OpARM64SLL: + return rewriteValueARM64_OpARM64SLL(v) + case OpARM64SLLconst: + return rewriteValueARM64_OpARM64SLLconst(v) + case OpARM64SRA: + return rewriteValueARM64_OpARM64SRA(v) + case OpARM64SRAconst: + return rewriteValueARM64_OpARM64SRAconst(v) + case OpARM64SRL: + return rewriteValueARM64_OpARM64SRL(v) + case OpARM64SRLconst: + return rewriteValueARM64_OpARM64SRLconst(v) + case OpARM64STP: + return rewriteValueARM64_OpARM64STP(v) + case OpARM64SUB: + return rewriteValueARM64_OpARM64SUB(v) + case OpARM64SUBconst: + return rewriteValueARM64_OpARM64SUBconst(v) + case OpARM64SUBshiftLL: + return rewriteValueARM64_OpARM64SUBshiftLL(v) + case OpARM64SUBshiftRA: + return rewriteValueARM64_OpARM64SUBshiftRA(v) + case OpARM64SUBshiftRL: + return rewriteValueARM64_OpARM64SUBshiftRL(v) + case OpARM64TST: + return rewriteValueARM64_OpARM64TST(v) + case OpARM64TSTW: + return rewriteValueARM64_OpARM64TSTW(v) + case OpARM64TSTWconst: + return rewriteValueARM64_OpARM64TSTWconst(v) + case OpARM64TSTconst: + return rewriteValueARM64_OpARM64TSTconst(v) + case OpARM64TSTshiftLL: + return rewriteValueARM64_OpARM64TSTshiftLL(v) + case OpARM64TSTshiftRA: + return rewriteValueARM64_OpARM64TSTshiftRA(v) + case OpARM64TSTshiftRL: + return rewriteValueARM64_OpARM64TSTshiftRL(v) + case OpARM64TSTshiftRO: + return rewriteValueARM64_OpARM64TSTshiftRO(v) + case OpARM64UBFIZ: + return rewriteValueARM64_OpARM64UBFIZ(v) + case OpARM64UBFX: + return rewriteValueARM64_OpARM64UBFX(v) + case OpARM64UDIV: + return rewriteValueARM64_OpARM64UDIV(v) + case OpARM64UDIVW: + return rewriteValueARM64_OpARM64UDIVW(v) + case OpARM64UMOD: + return rewriteValueARM64_OpARM64UMOD(v) + case OpARM64UMODW: + return rewriteValueARM64_OpARM64UMODW(v) + case OpARM64XOR: + return rewriteValueARM64_OpARM64XOR(v) + case OpARM64XORconst: + return rewriteValueARM64_OpARM64XORconst(v) + case OpARM64XORshiftLL: + return rewriteValueARM64_OpARM64XORshiftLL(v) + case OpARM64XORshiftRA: + return rewriteValueARM64_OpARM64XORshiftRA(v) + case OpARM64XORshiftRL: + return rewriteValueARM64_OpARM64XORshiftRL(v) + case OpARM64XORshiftRO: + return rewriteValueARM64_OpARM64XORshiftRO(v) + case OpAbs: + v.Op = OpARM64FABSD + return true + case OpAdd16: + v.Op = OpARM64ADD + return true + case OpAdd32: + v.Op = OpARM64ADD + return true + case OpAdd32F: + v.Op = OpARM64FADDS + return true + case OpAdd64: + v.Op = OpARM64ADD + return true + case OpAdd64F: + v.Op = OpARM64FADDD + return true + case OpAdd8: + v.Op = OpARM64ADD + return true + case OpAddPtr: + v.Op = OpARM64ADD + return true + case OpAddr: + return rewriteValueARM64_OpAddr(v) + case OpAnd16: + v.Op = OpARM64AND + return true + case OpAnd32: + v.Op = OpARM64AND + return true + case OpAnd64: + v.Op = OpARM64AND + return true + case OpAnd8: + v.Op = OpARM64AND + return true + case OpAndB: + v.Op = OpARM64AND + return true + case OpAtomicAdd32: + v.Op = OpARM64LoweredAtomicAdd32 + return true + case OpAtomicAdd32Variant: + v.Op = OpARM64LoweredAtomicAdd32Variant + return true + case OpAtomicAdd64: + v.Op = OpARM64LoweredAtomicAdd64 + return true + case OpAtomicAdd64Variant: + v.Op = OpARM64LoweredAtomicAdd64Variant + return true + case OpAtomicAnd32: + return rewriteValueARM64_OpAtomicAnd32(v) + case OpAtomicAnd32Variant: + return rewriteValueARM64_OpAtomicAnd32Variant(v) + case OpAtomicAnd8: + return rewriteValueARM64_OpAtomicAnd8(v) + case OpAtomicAnd8Variant: + return rewriteValueARM64_OpAtomicAnd8Variant(v) + case OpAtomicCompareAndSwap32: + v.Op = OpARM64LoweredAtomicCas32 + return true + case OpAtomicCompareAndSwap32Variant: + v.Op = OpARM64LoweredAtomicCas32Variant + return true + case OpAtomicCompareAndSwap64: + v.Op = OpARM64LoweredAtomicCas64 + return true + case OpAtomicCompareAndSwap64Variant: + v.Op = OpARM64LoweredAtomicCas64Variant + return true + case OpAtomicExchange32: + v.Op = OpARM64LoweredAtomicExchange32 + return true + case OpAtomicExchange32Variant: + v.Op = OpARM64LoweredAtomicExchange32Variant + return true + case OpAtomicExchange64: + v.Op = OpARM64LoweredAtomicExchange64 + return true + case OpAtomicExchange64Variant: + v.Op = OpARM64LoweredAtomicExchange64Variant + return true + case OpAtomicLoad32: + v.Op = OpARM64LDARW + return true + case OpAtomicLoad64: + v.Op = OpARM64LDAR + return true + case OpAtomicLoad8: + v.Op = OpARM64LDARB + return true + case OpAtomicLoadPtr: + v.Op = OpARM64LDAR + return true + case OpAtomicOr32: + return rewriteValueARM64_OpAtomicOr32(v) + case OpAtomicOr32Variant: + return rewriteValueARM64_OpAtomicOr32Variant(v) + case OpAtomicOr8: + return rewriteValueARM64_OpAtomicOr8(v) + case OpAtomicOr8Variant: + return rewriteValueARM64_OpAtomicOr8Variant(v) + case OpAtomicStore32: + v.Op = OpARM64STLRW + return true + case OpAtomicStore64: + v.Op = OpARM64STLR + return true + case OpAtomicStore8: + v.Op = OpARM64STLRB + return true + case OpAtomicStorePtrNoWB: + v.Op = OpARM64STLR + return true + case OpAvg64u: + return rewriteValueARM64_OpAvg64u(v) + case OpBitLen32: + return rewriteValueARM64_OpBitLen32(v) + case OpBitLen64: + return rewriteValueARM64_OpBitLen64(v) + case OpBitRev16: + return rewriteValueARM64_OpBitRev16(v) + case OpBitRev32: + v.Op = OpARM64RBITW + return true + case OpBitRev64: + v.Op = OpARM64RBIT + return true + case OpBitRev8: + return rewriteValueARM64_OpBitRev8(v) + case OpBswap16: + v.Op = OpARM64REV16W + return true + case OpBswap32: + v.Op = OpARM64REVW + return true + case OpBswap64: + v.Op = OpARM64REV + return true + case OpCeil: + v.Op = OpARM64FRINTPD + return true + case OpClosureCall: + v.Op = OpARM64CALLclosure + return true + case OpCom16: + v.Op = OpARM64MVN + return true + case OpCom32: + v.Op = OpARM64MVN + return true + case OpCom64: + v.Op = OpARM64MVN + return true + case OpCom8: + v.Op = OpARM64MVN + return true + case OpCondSelect: + return rewriteValueARM64_OpCondSelect(v) + case OpConst16: + return rewriteValueARM64_OpConst16(v) + case OpConst32: + return rewriteValueARM64_OpConst32(v) + case OpConst32F: + return rewriteValueARM64_OpConst32F(v) + case OpConst64: + return rewriteValueARM64_OpConst64(v) + case OpConst64F: + return rewriteValueARM64_OpConst64F(v) + case OpConst8: + return rewriteValueARM64_OpConst8(v) + case OpConstBool: + return rewriteValueARM64_OpConstBool(v) + case OpConstNil: + return rewriteValueARM64_OpConstNil(v) + case OpCtz16: + return rewriteValueARM64_OpCtz16(v) + case OpCtz16NonZero: + v.Op = OpCtz32 + return true + case OpCtz32: + return rewriteValueARM64_OpCtz32(v) + case OpCtz32NonZero: + v.Op = OpCtz32 + return true + case OpCtz64: + return rewriteValueARM64_OpCtz64(v) + case OpCtz64NonZero: + v.Op = OpCtz64 + return true + case OpCtz8: + return rewriteValueARM64_OpCtz8(v) + case OpCtz8NonZero: + v.Op = OpCtz32 + return true + case OpCvt32Fto32: + v.Op = OpARM64FCVTZSSW + return true + case OpCvt32Fto32U: + v.Op = OpARM64FCVTZUSW + return true + case OpCvt32Fto64: + v.Op = OpARM64FCVTZSS + return true + case OpCvt32Fto64F: + v.Op = OpARM64FCVTSD + return true + case OpCvt32Fto64U: + v.Op = OpARM64FCVTZUS + return true + case OpCvt32Uto32F: + v.Op = OpARM64UCVTFWS + return true + case OpCvt32Uto64F: + v.Op = OpARM64UCVTFWD + return true + case OpCvt32to32F: + v.Op = OpARM64SCVTFWS + return true + case OpCvt32to64F: + v.Op = OpARM64SCVTFWD + return true + case OpCvt64Fto32: + v.Op = OpARM64FCVTZSDW + return true + case OpCvt64Fto32F: + v.Op = OpARM64FCVTDS + return true + case OpCvt64Fto32U: + v.Op = OpARM64FCVTZUDW + return true + case OpCvt64Fto64: + v.Op = OpARM64FCVTZSD + return true + case OpCvt64Fto64U: + v.Op = OpARM64FCVTZUD + return true + case OpCvt64Uto32F: + v.Op = OpARM64UCVTFS + return true + case OpCvt64Uto64F: + v.Op = OpARM64UCVTFD + return true + case OpCvt64to32F: + v.Op = OpARM64SCVTFS + return true + case OpCvt64to64F: + v.Op = OpARM64SCVTFD + return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true + case OpDiv16: + return rewriteValueARM64_OpDiv16(v) + case OpDiv16u: + return rewriteValueARM64_OpDiv16u(v) + case OpDiv32: + return rewriteValueARM64_OpDiv32(v) + case OpDiv32F: + v.Op = OpARM64FDIVS + return true + case OpDiv32u: + v.Op = OpARM64UDIVW + return true + case OpDiv64: + return rewriteValueARM64_OpDiv64(v) + case OpDiv64F: + v.Op = OpARM64FDIVD + return true + case OpDiv64u: + v.Op = OpARM64UDIV + return true + case OpDiv8: + return rewriteValueARM64_OpDiv8(v) + case OpDiv8u: + return rewriteValueARM64_OpDiv8u(v) + case OpEq16: + return rewriteValueARM64_OpEq16(v) + case OpEq32: + return rewriteValueARM64_OpEq32(v) + case OpEq32F: + return rewriteValueARM64_OpEq32F(v) + case OpEq64: + return rewriteValueARM64_OpEq64(v) + case OpEq64F: + return rewriteValueARM64_OpEq64F(v) + case OpEq8: + return rewriteValueARM64_OpEq8(v) + case OpEqB: + return rewriteValueARM64_OpEqB(v) + case OpEqPtr: + return rewriteValueARM64_OpEqPtr(v) + case OpFMA: + return rewriteValueARM64_OpFMA(v) + case OpFloor: + v.Op = OpARM64FRINTMD + return true + case OpGetCallerPC: + v.Op = OpARM64LoweredGetCallerPC + return true + case OpGetCallerSP: + v.Op = OpARM64LoweredGetCallerSP + return true + case OpGetClosurePtr: + v.Op = OpARM64LoweredGetClosurePtr + return true + case OpHmul32: + return rewriteValueARM64_OpHmul32(v) + case OpHmul32u: + return rewriteValueARM64_OpHmul32u(v) + case OpHmul64: + v.Op = OpARM64MULH + return true + case OpHmul64u: + v.Op = OpARM64UMULH + return true + case OpInterCall: + v.Op = OpARM64CALLinter + return true + case OpIsInBounds: + return rewriteValueARM64_OpIsInBounds(v) + case OpIsNonNil: + return rewriteValueARM64_OpIsNonNil(v) + case OpIsSliceInBounds: + return rewriteValueARM64_OpIsSliceInBounds(v) + case OpLeq16: + return rewriteValueARM64_OpLeq16(v) + case OpLeq16U: + return rewriteValueARM64_OpLeq16U(v) + case OpLeq32: + return rewriteValueARM64_OpLeq32(v) + case OpLeq32F: + return rewriteValueARM64_OpLeq32F(v) + case OpLeq32U: + return rewriteValueARM64_OpLeq32U(v) + case OpLeq64: + return rewriteValueARM64_OpLeq64(v) + case OpLeq64F: + return rewriteValueARM64_OpLeq64F(v) + case OpLeq64U: + return rewriteValueARM64_OpLeq64U(v) + case OpLeq8: + return rewriteValueARM64_OpLeq8(v) + case OpLeq8U: + return rewriteValueARM64_OpLeq8U(v) + case OpLess16: + return rewriteValueARM64_OpLess16(v) + case OpLess16U: + return rewriteValueARM64_OpLess16U(v) + case OpLess32: + return rewriteValueARM64_OpLess32(v) + case OpLess32F: + return rewriteValueARM64_OpLess32F(v) + case OpLess32U: + return rewriteValueARM64_OpLess32U(v) + case OpLess64: + return rewriteValueARM64_OpLess64(v) + case OpLess64F: + return rewriteValueARM64_OpLess64F(v) + case OpLess64U: + return rewriteValueARM64_OpLess64U(v) + case OpLess8: + return rewriteValueARM64_OpLess8(v) + case OpLess8U: + return rewriteValueARM64_OpLess8U(v) + case OpLoad: + return rewriteValueARM64_OpLoad(v) + case OpLocalAddr: + return rewriteValueARM64_OpLocalAddr(v) + case OpLsh16x16: + return rewriteValueARM64_OpLsh16x16(v) + case OpLsh16x32: + return rewriteValueARM64_OpLsh16x32(v) + case OpLsh16x64: + return rewriteValueARM64_OpLsh16x64(v) + case OpLsh16x8: + return rewriteValueARM64_OpLsh16x8(v) + case OpLsh32x16: + return rewriteValueARM64_OpLsh32x16(v) + case OpLsh32x32: + return rewriteValueARM64_OpLsh32x32(v) + case OpLsh32x64: + return rewriteValueARM64_OpLsh32x64(v) + case OpLsh32x8: + return rewriteValueARM64_OpLsh32x8(v) + case OpLsh64x16: + return rewriteValueARM64_OpLsh64x16(v) + case OpLsh64x32: + return rewriteValueARM64_OpLsh64x32(v) + case OpLsh64x64: + return rewriteValueARM64_OpLsh64x64(v) + case OpLsh64x8: + return rewriteValueARM64_OpLsh64x8(v) + case OpLsh8x16: + return rewriteValueARM64_OpLsh8x16(v) + case OpLsh8x32: + return rewriteValueARM64_OpLsh8x32(v) + case OpLsh8x64: + return rewriteValueARM64_OpLsh8x64(v) + case OpLsh8x8: + return rewriteValueARM64_OpLsh8x8(v) + case OpMax32F: + v.Op = OpARM64FMAXS + return true + case OpMax64F: + v.Op = OpARM64FMAXD + return true + case OpMin32F: + v.Op = OpARM64FMINS + return true + case OpMin64F: + v.Op = OpARM64FMIND + return true + case OpMod16: + return rewriteValueARM64_OpMod16(v) + case OpMod16u: + return rewriteValueARM64_OpMod16u(v) + case OpMod32: + return rewriteValueARM64_OpMod32(v) + case OpMod32u: + v.Op = OpARM64UMODW + return true + case OpMod64: + return rewriteValueARM64_OpMod64(v) + case OpMod64u: + v.Op = OpARM64UMOD + return true + case OpMod8: + return rewriteValueARM64_OpMod8(v) + case OpMod8u: + return rewriteValueARM64_OpMod8u(v) + case OpMove: + return rewriteValueARM64_OpMove(v) + case OpMul16: + v.Op = OpARM64MULW + return true + case OpMul32: + v.Op = OpARM64MULW + return true + case OpMul32F: + v.Op = OpARM64FMULS + return true + case OpMul64: + v.Op = OpARM64MUL + return true + case OpMul64F: + v.Op = OpARM64FMULD + return true + case OpMul8: + v.Op = OpARM64MULW + return true + case OpNeg16: + v.Op = OpARM64NEG + return true + case OpNeg32: + v.Op = OpARM64NEG + return true + case OpNeg32F: + v.Op = OpARM64FNEGS + return true + case OpNeg64: + v.Op = OpARM64NEG + return true + case OpNeg64F: + v.Op = OpARM64FNEGD + return true + case OpNeg8: + v.Op = OpARM64NEG + return true + case OpNeq16: + return rewriteValueARM64_OpNeq16(v) + case OpNeq32: + return rewriteValueARM64_OpNeq32(v) + case OpNeq32F: + return rewriteValueARM64_OpNeq32F(v) + case OpNeq64: + return rewriteValueARM64_OpNeq64(v) + case OpNeq64F: + return rewriteValueARM64_OpNeq64F(v) + case OpNeq8: + return rewriteValueARM64_OpNeq8(v) + case OpNeqB: + v.Op = OpARM64XOR + return true + case OpNeqPtr: + return rewriteValueARM64_OpNeqPtr(v) + case OpNilCheck: + v.Op = OpARM64LoweredNilCheck + return true + case OpNot: + return rewriteValueARM64_OpNot(v) + case OpOffPtr: + return rewriteValueARM64_OpOffPtr(v) + case OpOr16: + v.Op = OpARM64OR + return true + case OpOr32: + v.Op = OpARM64OR + return true + case OpOr64: + v.Op = OpARM64OR + return true + case OpOr8: + v.Op = OpARM64OR + return true + case OpOrB: + v.Op = OpARM64OR + return true + case OpPanicBounds: + return rewriteValueARM64_OpPanicBounds(v) + case OpPopCount16: + return rewriteValueARM64_OpPopCount16(v) + case OpPopCount32: + return rewriteValueARM64_OpPopCount32(v) + case OpPopCount64: + return rewriteValueARM64_OpPopCount64(v) + case OpPrefetchCache: + return rewriteValueARM64_OpPrefetchCache(v) + case OpPrefetchCacheStreamed: + return rewriteValueARM64_OpPrefetchCacheStreamed(v) + case OpPubBarrier: + return rewriteValueARM64_OpPubBarrier(v) + case OpRotateLeft16: + return rewriteValueARM64_OpRotateLeft16(v) + case OpRotateLeft32: + return rewriteValueARM64_OpRotateLeft32(v) + case OpRotateLeft64: + return rewriteValueARM64_OpRotateLeft64(v) + case OpRotateLeft8: + return rewriteValueARM64_OpRotateLeft8(v) + case OpRound: + v.Op = OpARM64FRINTAD + return true + case OpRound32F: + v.Op = OpARM64LoweredRound32F + return true + case OpRound64F: + v.Op = OpARM64LoweredRound64F + return true + case OpRoundToEven: + v.Op = OpARM64FRINTND + return true + case OpRsh16Ux16: + return rewriteValueARM64_OpRsh16Ux16(v) + case OpRsh16Ux32: + return rewriteValueARM64_OpRsh16Ux32(v) + case OpRsh16Ux64: + return rewriteValueARM64_OpRsh16Ux64(v) + case OpRsh16Ux8: + return rewriteValueARM64_OpRsh16Ux8(v) + case OpRsh16x16: + return rewriteValueARM64_OpRsh16x16(v) + case OpRsh16x32: + return rewriteValueARM64_OpRsh16x32(v) + case OpRsh16x64: + return rewriteValueARM64_OpRsh16x64(v) + case OpRsh16x8: + return rewriteValueARM64_OpRsh16x8(v) + case OpRsh32Ux16: + return rewriteValueARM64_OpRsh32Ux16(v) + case OpRsh32Ux32: + return rewriteValueARM64_OpRsh32Ux32(v) + case OpRsh32Ux64: + return rewriteValueARM64_OpRsh32Ux64(v) + case OpRsh32Ux8: + return rewriteValueARM64_OpRsh32Ux8(v) + case OpRsh32x16: + return rewriteValueARM64_OpRsh32x16(v) + case OpRsh32x32: + return rewriteValueARM64_OpRsh32x32(v) + case OpRsh32x64: + return rewriteValueARM64_OpRsh32x64(v) + case OpRsh32x8: + return rewriteValueARM64_OpRsh32x8(v) + case OpRsh64Ux16: + return rewriteValueARM64_OpRsh64Ux16(v) + case OpRsh64Ux32: + return rewriteValueARM64_OpRsh64Ux32(v) + case OpRsh64Ux64: + return rewriteValueARM64_OpRsh64Ux64(v) + case OpRsh64Ux8: + return rewriteValueARM64_OpRsh64Ux8(v) + case OpRsh64x16: + return rewriteValueARM64_OpRsh64x16(v) + case OpRsh64x32: + return rewriteValueARM64_OpRsh64x32(v) + case OpRsh64x64: + return rewriteValueARM64_OpRsh64x64(v) + case OpRsh64x8: + return rewriteValueARM64_OpRsh64x8(v) + case OpRsh8Ux16: + return rewriteValueARM64_OpRsh8Ux16(v) + case OpRsh8Ux32: + return rewriteValueARM64_OpRsh8Ux32(v) + case OpRsh8Ux64: + return rewriteValueARM64_OpRsh8Ux64(v) + case OpRsh8Ux8: + return rewriteValueARM64_OpRsh8Ux8(v) + case OpRsh8x16: + return rewriteValueARM64_OpRsh8x16(v) + case OpRsh8x32: + return rewriteValueARM64_OpRsh8x32(v) + case OpRsh8x64: + return rewriteValueARM64_OpRsh8x64(v) + case OpRsh8x8: + return rewriteValueARM64_OpRsh8x8(v) + case OpSelect0: + return rewriteValueARM64_OpSelect0(v) + case OpSelect1: + return rewriteValueARM64_OpSelect1(v) + case OpSelectN: + return rewriteValueARM64_OpSelectN(v) + case OpSignExt16to32: + v.Op = OpARM64MOVHreg + return true + case OpSignExt16to64: + v.Op = OpARM64MOVHreg + return true + case OpSignExt32to64: + v.Op = OpARM64MOVWreg + return true + case OpSignExt8to16: + v.Op = OpARM64MOVBreg + return true + case OpSignExt8to32: + v.Op = OpARM64MOVBreg + return true + case OpSignExt8to64: + v.Op = OpARM64MOVBreg + return true + case OpSlicemask: + return rewriteValueARM64_OpSlicemask(v) + case OpSqrt: + v.Op = OpARM64FSQRTD + return true + case OpSqrt32: + v.Op = OpARM64FSQRTS + return true + case OpStaticCall: + v.Op = OpARM64CALLstatic + return true + case OpStore: + return rewriteValueARM64_OpStore(v) + case OpSub16: + v.Op = OpARM64SUB + return true + case OpSub32: + v.Op = OpARM64SUB + return true + case OpSub32F: + v.Op = OpARM64FSUBS + return true + case OpSub64: + v.Op = OpARM64SUB + return true + case OpSub64F: + v.Op = OpARM64FSUBD + return true + case OpSub8: + v.Op = OpARM64SUB + return true + case OpSubPtr: + v.Op = OpARM64SUB + return true + case OpTailCall: + v.Op = OpARM64CALLtail + return true + case OpTrunc: + v.Op = OpARM64FRINTZD + return true + case OpTrunc16to8: + v.Op = OpCopy + return true + case OpTrunc32to16: + v.Op = OpCopy + return true + case OpTrunc32to8: + v.Op = OpCopy + return true + case OpTrunc64to16: + v.Op = OpCopy + return true + case OpTrunc64to32: + v.Op = OpCopy + return true + case OpTrunc64to8: + v.Op = OpCopy + return true + case OpWB: + v.Op = OpARM64LoweredWB + return true + case OpXor16: + v.Op = OpARM64XOR + return true + case OpXor32: + v.Op = OpARM64XOR + return true + case OpXor64: + v.Op = OpARM64XOR + return true + case OpXor8: + v.Op = OpARM64XOR + return true + case OpZero: + return rewriteValueARM64_OpZero(v) + case OpZeroExt16to32: + v.Op = OpARM64MOVHUreg + return true + case OpZeroExt16to64: + v.Op = OpARM64MOVHUreg + return true + case OpZeroExt32to64: + v.Op = OpARM64MOVWUreg + return true + case OpZeroExt8to16: + v.Op = OpARM64MOVBUreg + return true + case OpZeroExt8to32: + v.Op = OpARM64MOVBUreg + return true + case OpZeroExt8to64: + v.Op = OpARM64MOVBUreg + return true + } + return false +} +func rewriteValueARM64_OpARM64ADCSflags(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ADCSflags x y (Select1 (ADDSconstflags [-1] (ADCzerocarry c)))) + // result: (ADCSflags x y c) + for { + x := v_0 + y := v_1 + if v_2.Op != OpSelect1 || v_2.Type != types.TypeFlags { + break + } + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpARM64ADDSconstflags || auxIntToInt64(v_2_0.AuxInt) != -1 { + break + } + v_2_0_0 := v_2_0.Args[0] + if v_2_0_0.Op != OpARM64ADCzerocarry || v_2_0_0.Type != typ.UInt64 { + break + } + c := v_2_0_0.Args[0] + v.reset(OpARM64ADCSflags) + v.AddArg3(x, y, c) + return true + } + // match: (ADCSflags x y (Select1 (ADDSconstflags [-1] (MOVDconst [0])))) + // result: (ADDSflags x y) + for { + x := v_0 + y := v_1 + if v_2.Op != OpSelect1 || v_2.Type != types.TypeFlags { + break + } + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpARM64ADDSconstflags || auxIntToInt64(v_2_0.AuxInt) != -1 { + break + } + v_2_0_0 := v_2_0.Args[0] + if v_2_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_2_0_0.AuxInt) != 0 { + break + } + v.reset(OpARM64ADDSflags) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM64_OpARM64ADD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADD x (MOVDconst [c])) + // cond: !t.IsPtr() + // result: (ADDconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + t := v_1.Type + c := auxIntToInt64(v_1.AuxInt) + if !(!t.IsPtr()) { + continue + } + v.reset(OpARM64ADDconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (ADD a l:(MUL x y)) + // cond: l.Uses==1 && clobber(l) + // result: (MADD a x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 + l := v_1 + if l.Op != OpARM64MUL { + continue + } + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1 && clobber(l)) { + continue + } + v.reset(OpARM64MADD) + v.AddArg3(a, x, y) + return true + } + break + } + // match: (ADD a l:(MNEG x y)) + // cond: l.Uses==1 && clobber(l) + // result: (MSUB a x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 + l := v_1 + if l.Op != OpARM64MNEG { + continue + } + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1 && clobber(l)) { + continue + } + v.reset(OpARM64MSUB) + v.AddArg3(a, x, y) + return true + } + break + } + // match: (ADD a l:(MULW x y)) + // cond: v.Type.Size() <= 4 && l.Uses==1 && clobber(l) + // result: (MADDW a x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 + l := v_1 + if l.Op != OpARM64MULW { + continue + } + y := l.Args[1] + x := l.Args[0] + if !(v.Type.Size() <= 4 && l.Uses == 1 && clobber(l)) { + continue + } + v.reset(OpARM64MADDW) + v.AddArg3(a, x, y) + return true + } + break + } + // match: (ADD a l:(MNEGW x y)) + // cond: v.Type.Size() <= 4 && l.Uses==1 && clobber(l) + // result: (MSUBW a x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 + l := v_1 + if l.Op != OpARM64MNEGW { + continue + } + y := l.Args[1] + x := l.Args[0] + if !(v.Type.Size() <= 4 && l.Uses == 1 && clobber(l)) { + continue + } + v.reset(OpARM64MSUBW) + v.AddArg3(a, x, y) + return true + } + break + } + // match: (ADD x (NEG y)) + // result: (SUB x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64NEG { + continue + } + y := v_1.Args[0] + v.reset(OpARM64SUB) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADD x0 x1:(SLLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (ADDshiftLL x0 y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SLLconst { + continue + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64ADDshiftLL) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + break + } + // match: (ADD x0 x1:(SRLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (ADDshiftRL x0 y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SRLconst { + continue + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64ADDshiftRL) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + break + } + // match: (ADD x0 x1:(SRAconst [c] y)) + // cond: clobberIfDead(x1) + // result: (ADDshiftRA x0 y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SRAconst { + continue + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64ADDshiftRA) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + break + } + return false +} +func rewriteValueARM64_OpARM64ADDSflags(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADDSflags x (MOVDconst [c])) + // result: (ADDSconstflags [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64ADDSconstflags) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValueARM64_OpARM64ADDconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ADDconst [off1] (MOVDaddr [off2] {sym} ptr)) + // cond: is32Bit(off1+int64(off2)) + // result: (MOVDaddr [int32(off1)+off2] {sym} ptr) + for { + off1 := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + if !(is32Bit(off1 + int64(off2))) { + break + } + v.reset(OpARM64MOVDaddr) + v.AuxInt = int32ToAuxInt(int32(off1) + off2) + v.Aux = symToAux(sym) + v.AddArg(ptr) + return true + } + // match: (ADDconst [c] y) + // cond: c < 0 + // result: (SUBconst [-c] y) + for { + c := auxIntToInt64(v.AuxInt) + y := v_0 + if !(c < 0) { + break + } + v.reset(OpARM64SUBconst) + v.AuxInt = int64ToAuxInt(-c) + v.AddArg(y) + return true + } + // match: (ADDconst [0] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (ADDconst [c] (MOVDconst [d])) + // result: (MOVDconst [c+d]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(c + d) + return true + } + // match: (ADDconst [c] (ADDconst [d] x)) + // result: (ADDconst [c+d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64ADDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpARM64ADDconst) + v.AuxInt = int64ToAuxInt(c + d) + v.AddArg(x) + return true + } + // match: (ADDconst [c] (SUBconst [d] x)) + // result: (ADDconst [c-d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64SUBconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpARM64ADDconst) + v.AuxInt = int64ToAuxInt(c - d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64ADDshiftLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ADDshiftLL (MOVDconst [c]) x [d]) + // result: (ADDconst [c] (SLLconst x [d])) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpARM64ADDconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = int64ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (ADDshiftLL x (MOVDconst [c]) [d]) + // result: (ADDconst x [int64(uint64(c)< [8] (UBFX [armBFAuxInt(8, 8)] x) x) + // result: (REV16W x) + for { + if v.Type != typ.UInt16 || auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 8) { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARM64REV16W) + v.AddArg(x) + return true + } + // match: (ADDshiftLL [8] (UBFX [armBFAuxInt(8, 24)] (ANDconst [c1] x)) (ANDconst [c2] x)) + // cond: uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff + // result: (REV16W x) + for { + if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 24) { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64ANDconst { + break + } + c1 := auxIntToInt64(v_0_0.AuxInt) + x := v_0_0.Args[0] + if v_1.Op != OpARM64ANDconst { + break + } + c2 := auxIntToInt64(v_1.AuxInt) + if x != v_1.Args[0] || !(uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff) { + break + } + v.reset(OpARM64REV16W) + v.AddArg(x) + return true + } + // match: (ADDshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x)) + // cond: (uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff) + // result: (REV16 x) + for { + if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64ANDconst { + break + } + c1 := auxIntToInt64(v_0_0.AuxInt) + x := v_0_0.Args[0] + if v_1.Op != OpARM64ANDconst { + break + } + c2 := auxIntToInt64(v_1.AuxInt) + if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff) { + break + } + v.reset(OpARM64REV16) + v.AddArg(x) + return true + } + // match: (ADDshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x)) + // cond: (uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff) + // result: (REV16 (ANDconst [0xffffffff] x)) + for { + if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64ANDconst { + break + } + c1 := auxIntToInt64(v_0_0.AuxInt) + x := v_0_0.Args[0] + if v_1.Op != OpARM64ANDconst { + break + } + c2 := auxIntToInt64(v_1.AuxInt) + if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff) { + break + } + v.reset(OpARM64REV16) + v0 := b.NewValue0(v.Pos, OpARM64ANDconst, x.Type) + v0.AuxInt = int64ToAuxInt(0xffffffff) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (ADDshiftLL [c] (SRLconst x [64-c]) x2) + // result: (EXTRconst [64-c] x2 x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c { + break + } + x := v_0.Args[0] + x2 := v_1 + v.reset(OpARM64EXTRconst) + v.AuxInt = int64ToAuxInt(64 - c) + v.AddArg2(x2, x) + return true + } + // match: (ADDshiftLL [c] (UBFX [bfc] x) x2) + // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) + // result: (EXTRWconst [32-c] x2 x) + for { + t := v.Type + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64UBFX { + break + } + bfc := auxIntToArm64BitField(v_0.AuxInt) + x := v_0.Args[0] + x2 := v_1 + if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) { + break + } + v.reset(OpARM64EXTRWconst) + v.AuxInt = int64ToAuxInt(32 - c) + v.AddArg2(x2, x) + return true + } + return false +} +func rewriteValueARM64_OpARM64ADDshiftRA(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ADDshiftRA (MOVDconst [c]) x [d]) + // result: (ADDconst [c] (SRAconst x [d])) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpARM64ADDconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) + v0.AuxInt = int64ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (ADDshiftRA x (MOVDconst [c]) [d]) + // result: (ADDconst x [c>>uint64(d)]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64ADDconst) + v.AuxInt = int64ToAuxInt(c >> uint64(d)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64ADDshiftRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ADDshiftRL (MOVDconst [c]) x [d]) + // result: (ADDconst [c] (SRLconst x [d])) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpARM64ADDconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) + v0.AuxInt = int64ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (ADDshiftRL x (MOVDconst [c]) [d]) + // result: (ADDconst x [int64(uint64(c)>>uint64(d))]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64ADDconst) + v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d))) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64AND(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AND x (MOVDconst [c])) + // result: (ANDconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64ANDconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (AND x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + // match: (AND x (MVN y)) + // result: (BIC x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MVN { + continue + } + y := v_1.Args[0] + v.reset(OpARM64BIC) + v.AddArg2(x, y) + return true + } + break + } + // match: (AND x0 x1:(SLLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (ANDshiftLL x0 y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SLLconst { + continue + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64ANDshiftLL) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + break + } + // match: (AND x0 x1:(SRLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (ANDshiftRL x0 y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SRLconst { + continue + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64ANDshiftRL) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + break + } + // match: (AND x0 x1:(SRAconst [c] y)) + // cond: clobberIfDead(x1) + // result: (ANDshiftRA x0 y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SRAconst { + continue + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64ANDshiftRA) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + break + } + // match: (AND x0 x1:(RORconst [c] y)) + // cond: clobberIfDead(x1) + // result: (ANDshiftRO x0 y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64RORconst { + continue + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64ANDshiftRO) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + break + } + return false +} +func rewriteValueARM64_OpARM64ANDconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ANDconst [0] _) + // result: (MOVDconst [0]) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (ANDconst [-1] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != -1 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (ANDconst [c] (MOVDconst [d])) + // result: (MOVDconst [c&d]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(c & d) + return true + } + // match: (ANDconst [c] (ANDconst [d] x)) + // result: (ANDconst [c&d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64ANDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpARM64ANDconst) + v.AuxInt = int64ToAuxInt(c & d) + v.AddArg(x) + return true + } + // match: (ANDconst [c] (MOVWUreg x)) + // result: (ANDconst [c&(1<<32-1)] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVWUreg { + break + } + x := v_0.Args[0] + v.reset(OpARM64ANDconst) + v.AuxInt = int64ToAuxInt(c & (1<<32 - 1)) + v.AddArg(x) + return true + } + // match: (ANDconst [c] (MOVHUreg x)) + // result: (ANDconst [c&(1<<16-1)] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVHUreg { + break + } + x := v_0.Args[0] + v.reset(OpARM64ANDconst) + v.AuxInt = int64ToAuxInt(c & (1<<16 - 1)) + v.AddArg(x) + return true + } + // match: (ANDconst [c] (MOVBUreg x)) + // result: (ANDconst [c&(1<<8-1)] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVBUreg { + break + } + x := v_0.Args[0] + v.reset(OpARM64ANDconst) + v.AuxInt = int64ToAuxInt(c & (1<<8 - 1)) + v.AddArg(x) + return true + } + // match: (ANDconst [ac] (SLLconst [sc] x)) + // cond: isARM64BFMask(sc, ac, sc) + // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x) + for { + ac := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64SLLconst { + break + } + sc := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(isARM64BFMask(sc, ac, sc)) { + break + } + v.reset(OpARM64UBFIZ) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, sc))) + v.AddArg(x) + return true + } + // match: (ANDconst [ac] (SRLconst [sc] x)) + // cond: isARM64BFMask(sc, ac, 0) + // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x) + for { + ac := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64SRLconst { + break + } + sc := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(isARM64BFMask(sc, ac, 0)) { + break + } + v.reset(OpARM64UBFX) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, 0))) + v.AddArg(x) + return true + } + // match: (ANDconst [c] (UBFX [bfc] x)) + // cond: isARM64BFMask(0, c, 0) + // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), arm64BFWidth(c, 0)))] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64UBFX { + break + } + bfc := auxIntToArm64BitField(v_0.AuxInt) + x := v_0.Args[0] + if !(isARM64BFMask(0, c, 0)) { + break + } + v.reset(OpARM64UBFX) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), arm64BFWidth(c, 0)))) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64ANDshiftLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ANDshiftLL (MOVDconst [c]) x [d]) + // result: (ANDconst [c] (SLLconst x [d])) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpARM64ANDconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = int64ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (ANDshiftLL x (MOVDconst [c]) [d]) + // result: (ANDconst x [int64(uint64(c)< x [d])) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpARM64ANDconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) + v0.AuxInt = int64ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (ANDshiftRA x (MOVDconst [c]) [d]) + // result: (ANDconst x [c>>uint64(d)]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64ANDconst) + v.AuxInt = int64ToAuxInt(c >> uint64(d)) + v.AddArg(x) + return true + } + // match: (ANDshiftRA y:(SRAconst x [c]) x [c]) + // result: y + for { + c := auxIntToInt64(v.AuxInt) + y := v_0 + if y.Op != OpARM64SRAconst || auxIntToInt64(y.AuxInt) != c { + break + } + x := y.Args[0] + if x != v_1 { + break + } + v.copyOf(y) + return true + } + return false +} +func rewriteValueARM64_OpARM64ANDshiftRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ANDshiftRL (MOVDconst [c]) x [d]) + // result: (ANDconst [c] (SRLconst x [d])) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpARM64ANDconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) + v0.AuxInt = int64ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (ANDshiftRL x (MOVDconst [c]) [d]) + // result: (ANDconst x [int64(uint64(c)>>uint64(d))]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64ANDconst) + v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d))) + v.AddArg(x) + return true + } + // match: (ANDshiftRL y:(SRLconst x [c]) x [c]) + // result: y + for { + c := auxIntToInt64(v.AuxInt) + y := v_0 + if y.Op != OpARM64SRLconst || auxIntToInt64(y.AuxInt) != c { + break + } + x := y.Args[0] + if x != v_1 { + break + } + v.copyOf(y) + return true + } + return false +} +func rewriteValueARM64_OpARM64ANDshiftRO(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ANDshiftRO (MOVDconst [c]) x [d]) + // result: (ANDconst [c] (RORconst x [d])) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpARM64ANDconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARM64RORconst, x.Type) + v0.AuxInt = int64ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (ANDshiftRO x (MOVDconst [c]) [d]) + // result: (ANDconst x [rotateRight64(c, d)]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64ANDconst) + v.AuxInt = int64ToAuxInt(rotateRight64(c, d)) + v.AddArg(x) + return true + } + // match: (ANDshiftRO y:(RORconst x [c]) x [c]) + // result: y + for { + c := auxIntToInt64(v.AuxInt) + y := v_0 + if y.Op != OpARM64RORconst || auxIntToInt64(y.AuxInt) != c { + break + } + x := y.Args[0] + if x != v_1 { + break + } + v.copyOf(y) + return true + } + return false +} +func rewriteValueARM64_OpARM64BIC(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (BIC x (MOVDconst [c])) + // result: (ANDconst [^c] x) + for { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64ANDconst) + v.AuxInt = int64ToAuxInt(^c) + v.AddArg(x) + return true + } + // match: (BIC x x) + // result: (MOVDconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (BIC x0 x1:(SLLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (BICshiftLL x0 y [c]) + for { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SLLconst { + break + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64BICshiftLL) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + // match: (BIC x0 x1:(SRLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (BICshiftRL x0 y [c]) + for { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SRLconst { + break + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64BICshiftRL) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + // match: (BIC x0 x1:(SRAconst [c] y)) + // cond: clobberIfDead(x1) + // result: (BICshiftRA x0 y [c]) + for { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SRAconst { + break + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64BICshiftRA) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + // match: (BIC x0 x1:(RORconst [c] y)) + // cond: clobberIfDead(x1) + // result: (BICshiftRO x0 y [c]) + for { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64RORconst { + break + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64BICshiftRO) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + return false +} +func rewriteValueARM64_OpARM64BICshiftLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (BICshiftLL x (MOVDconst [c]) [d]) + // result: (ANDconst x [^int64(uint64(c)<>uint64(d))]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64ANDconst) + v.AuxInt = int64ToAuxInt(^(c >> uint64(d))) + v.AddArg(x) + return true + } + // match: (BICshiftRA (SRAconst x [c]) x [c]) + // result: (MOVDconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM64_OpARM64BICshiftRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (BICshiftRL x (MOVDconst [c]) [d]) + // result: (ANDconst x [^int64(uint64(c)>>uint64(d))]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64ANDconst) + v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d))) + v.AddArg(x) + return true + } + // match: (BICshiftRL (SRLconst x [c]) x [c]) + // result: (MOVDconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM64_OpARM64BICshiftRO(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (BICshiftRO x (MOVDconst [c]) [d]) + // result: (ANDconst x [^rotateRight64(c, d)]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64ANDconst) + v.AuxInt = int64ToAuxInt(^rotateRight64(c, d)) + v.AddArg(x) + return true + } + // match: (BICshiftRO (RORconst x [c]) x [c]) + // result: (MOVDconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64RORconst || auxIntToInt64(v_0.AuxInt) != c { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM64_OpARM64CMN(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMN x (MOVDconst [c])) + // result: (CMNconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64CMNconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (CMN x0 x1:(SLLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (CMNshiftLL x0 y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SLLconst { + continue + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64CMNshiftLL) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + break + } + // match: (CMN x0 x1:(SRLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (CMNshiftRL x0 y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SRLconst { + continue + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64CMNshiftRL) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + break + } + // match: (CMN x0 x1:(SRAconst [c] y)) + // cond: clobberIfDead(x1) + // result: (CMNshiftRA x0 y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SRAconst { + continue + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64CMNshiftRA) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + break + } + return false +} +func rewriteValueARM64_OpARM64CMNW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMNW x (MOVDconst [c])) + // result: (CMNWconst [int32(c)] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64CMNWconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValueARM64_OpARM64CMNWconst(v *Value) bool { + v_0 := v.Args[0] + // match: (CMNWconst [c] y) + // cond: c < 0 && c != -1<<31 + // result: (CMPWconst [-c] y) + for { + c := auxIntToInt32(v.AuxInt) + y := v_0 + if !(c < 0 && c != -1<<31) { + break + } + v.reset(OpARM64CMPWconst) + v.AuxInt = int32ToAuxInt(-c) + v.AddArg(y) + return true + } + // match: (CMNWconst (MOVDconst [x]) [y]) + // result: (FlagConstant [addFlags32(int32(x),y)]) + for { + y := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + x := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64FlagConstant) + v.AuxInt = flagConstantToAuxInt(addFlags32(int32(x), y)) + return true + } + return false +} +func rewriteValueARM64_OpARM64CMNconst(v *Value) bool { + v_0 := v.Args[0] + // match: (CMNconst [c] y) + // cond: c < 0 && c != -1<<63 + // result: (CMPconst [-c] y) + for { + c := auxIntToInt64(v.AuxInt) + y := v_0 + if !(c < 0 && c != -1<<63) { + break + } + v.reset(OpARM64CMPconst) + v.AuxInt = int64ToAuxInt(-c) + v.AddArg(y) + return true + } + // match: (CMNconst (MOVDconst [x]) [y]) + // result: (FlagConstant [addFlags64(x,y)]) + for { + y := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + x := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64FlagConstant) + v.AuxInt = flagConstantToAuxInt(addFlags64(x, y)) + return true + } + return false +} +func rewriteValueARM64_OpARM64CMNshiftLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMNshiftLL (MOVDconst [c]) x [d]) + // result: (CMNconst [c] (SLLconst x [d])) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpARM64CMNconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = int64ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (CMNshiftLL x (MOVDconst [c]) [d]) + // result: (CMNconst x [int64(uint64(c)< x [d])) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpARM64CMNconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) + v0.AuxInt = int64ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (CMNshiftRA x (MOVDconst [c]) [d]) + // result: (CMNconst x [c>>uint64(d)]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64CMNconst) + v.AuxInt = int64ToAuxInt(c >> uint64(d)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64CMNshiftRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMNshiftRL (MOVDconst [c]) x [d]) + // result: (CMNconst [c] (SRLconst x [d])) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpARM64CMNconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) + v0.AuxInt = int64ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (CMNshiftRL x (MOVDconst [c]) [d]) + // result: (CMNconst x [int64(uint64(c)>>uint64(d))]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64CMNconst) + v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d))) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64CMP(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMP x (MOVDconst [c])) + // result: (CMPconst [c] x) + for { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64CMPconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (CMP (MOVDconst [c]) x) + // result: (InvertFlags (CMPconst [c] x)) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpARM64InvertFlags) + v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (CMP x y) + // cond: canonLessThan(x,y) + // result: (InvertFlags (CMP y x)) + for { + x := v_0 + y := v_1 + if !(canonLessThan(x, y)) { + break + } + v.reset(OpARM64InvertFlags) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + // match: (CMP x0 x1:(SLLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (CMPshiftLL x0 y [c]) + for { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SLLconst { + break + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64CMPshiftLL) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + // match: (CMP x0:(SLLconst [c] y) x1) + // cond: clobberIfDead(x0) + // result: (InvertFlags (CMPshiftLL x1 y [c])) + for { + x0 := v_0 + if x0.Op != OpARM64SLLconst { + break + } + c := auxIntToInt64(x0.AuxInt) + y := x0.Args[0] + x1 := v_1 + if !(clobberIfDead(x0)) { + break + } + v.reset(OpARM64InvertFlags) + v0 := b.NewValue0(v.Pos, OpARM64CMPshiftLL, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg2(x1, y) + v.AddArg(v0) + return true + } + // match: (CMP x0 x1:(SRLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (CMPshiftRL x0 y [c]) + for { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SRLconst { + break + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64CMPshiftRL) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + // match: (CMP x0:(SRLconst [c] y) x1) + // cond: clobberIfDead(x0) + // result: (InvertFlags (CMPshiftRL x1 y [c])) + for { + x0 := v_0 + if x0.Op != OpARM64SRLconst { + break + } + c := auxIntToInt64(x0.AuxInt) + y := x0.Args[0] + x1 := v_1 + if !(clobberIfDead(x0)) { + break + } + v.reset(OpARM64InvertFlags) + v0 := b.NewValue0(v.Pos, OpARM64CMPshiftRL, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg2(x1, y) + v.AddArg(v0) + return true + } + // match: (CMP x0 x1:(SRAconst [c] y)) + // cond: clobberIfDead(x1) + // result: (CMPshiftRA x0 y [c]) + for { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SRAconst { + break + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64CMPshiftRA) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + // match: (CMP x0:(SRAconst [c] y) x1) + // cond: clobberIfDead(x0) + // result: (InvertFlags (CMPshiftRA x1 y [c])) + for { + x0 := v_0 + if x0.Op != OpARM64SRAconst { + break + } + c := auxIntToInt64(x0.AuxInt) + y := x0.Args[0] + x1 := v_1 + if !(clobberIfDead(x0)) { + break + } + v.reset(OpARM64InvertFlags) + v0 := b.NewValue0(v.Pos, OpARM64CMPshiftRA, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg2(x1, y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueARM64_OpARM64CMPW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMPW x (MOVDconst [c])) + // result: (CMPWconst [int32(c)] x) + for { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64CMPWconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + // match: (CMPW (MOVDconst [c]) x) + // result: (InvertFlags (CMPWconst [int32(c)] x)) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpARM64InvertFlags) + v0 := b.NewValue0(v.Pos, OpARM64CMPWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (CMPW x y) + // cond: canonLessThan(x,y) + // result: (InvertFlags (CMPW y x)) + for { + x := v_0 + y := v_1 + if !(canonLessThan(x, y)) { + break + } + v.reset(OpARM64InvertFlags) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueARM64_OpARM64CMPWconst(v *Value) bool { + v_0 := v.Args[0] + // match: (CMPWconst [c] y) + // cond: c < 0 && c != -1<<31 + // result: (CMNWconst [-c] y) + for { + c := auxIntToInt32(v.AuxInt) + y := v_0 + if !(c < 0 && c != -1<<31) { + break + } + v.reset(OpARM64CMNWconst) + v.AuxInt = int32ToAuxInt(-c) + v.AddArg(y) + return true + } + // match: (CMPWconst (MOVDconst [x]) [y]) + // result: (FlagConstant [subFlags32(int32(x),y)]) + for { + y := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + x := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64FlagConstant) + v.AuxInt = flagConstantToAuxInt(subFlags32(int32(x), y)) + return true + } + // match: (CMPWconst (MOVBUreg _) [c]) + // cond: 0xff < c + // result: (FlagConstant [subFlags64(0,1)]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARM64MOVBUreg || !(0xff < c) { + break + } + v.reset(OpARM64FlagConstant) + v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1)) + return true + } + // match: (CMPWconst (MOVHUreg _) [c]) + // cond: 0xffff < c + // result: (FlagConstant [subFlags64(0,1)]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARM64MOVHUreg || !(0xffff < c) { + break + } + v.reset(OpARM64FlagConstant) + v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1)) + return true + } + return false +} +func rewriteValueARM64_OpARM64CMPconst(v *Value) bool { + v_0 := v.Args[0] + // match: (CMPconst [c] y) + // cond: c < 0 && c != -1<<63 + // result: (CMNconst [-c] y) + for { + c := auxIntToInt64(v.AuxInt) + y := v_0 + if !(c < 0 && c != -1<<63) { + break + } + v.reset(OpARM64CMNconst) + v.AuxInt = int64ToAuxInt(-c) + v.AddArg(y) + return true + } + // match: (CMPconst (MOVDconst [x]) [y]) + // result: (FlagConstant [subFlags64(x,y)]) + for { + y := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + x := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64FlagConstant) + v.AuxInt = flagConstantToAuxInt(subFlags64(x, y)) + return true + } + // match: (CMPconst (MOVBUreg _) [c]) + // cond: 0xff < c + // result: (FlagConstant [subFlags64(0,1)]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVBUreg || !(0xff < c) { + break + } + v.reset(OpARM64FlagConstant) + v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1)) + return true + } + // match: (CMPconst (MOVHUreg _) [c]) + // cond: 0xffff < c + // result: (FlagConstant [subFlags64(0,1)]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVHUreg || !(0xffff < c) { + break + } + v.reset(OpARM64FlagConstant) + v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1)) + return true + } + // match: (CMPconst (MOVWUreg _) [c]) + // cond: 0xffffffff < c + // result: (FlagConstant [subFlags64(0,1)]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVWUreg || !(0xffffffff < c) { + break + } + v.reset(OpARM64FlagConstant) + v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1)) + return true + } + // match: (CMPconst (ANDconst _ [m]) [n]) + // cond: 0 <= m && m < n + // result: (FlagConstant [subFlags64(0,1)]) + for { + n := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64ANDconst { + break + } + m := auxIntToInt64(v_0.AuxInt) + if !(0 <= m && m < n) { + break + } + v.reset(OpARM64FlagConstant) + v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1)) + return true + } + // match: (CMPconst (SRLconst _ [c]) [n]) + // cond: 0 <= n && 0 < c && c <= 63 && (1< x [d]))) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpARM64InvertFlags) + v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v1 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v1.AuxInt = int64ToAuxInt(d) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (CMPshiftLL x (MOVDconst [c]) [d]) + // result: (CMPconst x [int64(uint64(c)< x [d]))) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpARM64InvertFlags) + v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v1 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) + v1.AuxInt = int64ToAuxInt(d) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (CMPshiftRA x (MOVDconst [c]) [d]) + // result: (CMPconst x [c>>uint64(d)]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64CMPconst) + v.AuxInt = int64ToAuxInt(c >> uint64(d)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64CMPshiftRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMPshiftRL (MOVDconst [c]) x [d]) + // result: (InvertFlags (CMPconst [c] (SRLconst x [d]))) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpARM64InvertFlags) + v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v1 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) + v1.AuxInt = int64ToAuxInt(d) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (CMPshiftRL x (MOVDconst [c]) [d]) + // result: (CMPconst x [int64(uint64(c)>>uint64(d))]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64CMPconst) + v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d))) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64CSEL(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CSEL [cc] (MOVDconst [-1]) (MOVDconst [0]) flag) + // result: (CSETM [cc] flag) + for { + cc := auxIntToOp(v.AuxInt) + if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != -1 || v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + flag := v_2 + v.reset(OpARM64CSETM) + v.AuxInt = opToAuxInt(cc) + v.AddArg(flag) + return true + } + // match: (CSEL [cc] (MOVDconst [0]) (MOVDconst [-1]) flag) + // result: (CSETM [arm64Negate(cc)] flag) + for { + cc := auxIntToOp(v.AuxInt) + if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 0 || v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 { + break + } + flag := v_2 + v.reset(OpARM64CSETM) + v.AuxInt = opToAuxInt(arm64Negate(cc)) + v.AddArg(flag) + return true + } + // match: (CSEL [cc] x (MOVDconst [0]) flag) + // result: (CSEL0 [cc] x flag) + for { + cc := auxIntToOp(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + flag := v_2 + v.reset(OpARM64CSEL0) + v.AuxInt = opToAuxInt(cc) + v.AddArg2(x, flag) + return true + } + // match: (CSEL [cc] (MOVDconst [0]) y flag) + // result: (CSEL0 [arm64Negate(cc)] y flag) + for { + cc := auxIntToOp(v.AuxInt) + if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + y := v_1 + flag := v_2 + v.reset(OpARM64CSEL0) + v.AuxInt = opToAuxInt(arm64Negate(cc)) + v.AddArg2(y, flag) + return true + } + // match: (CSEL [cc] x (ADDconst [1] a) flag) + // result: (CSINC [cc] x a flag) + for { + cc := auxIntToOp(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 1 { + break + } + a := v_1.Args[0] + flag := v_2 + v.reset(OpARM64CSINC) + v.AuxInt = opToAuxInt(cc) + v.AddArg3(x, a, flag) + return true + } + // match: (CSEL [cc] (ADDconst [1] a) x flag) + // result: (CSINC [arm64Negate(cc)] x a flag) + for { + cc := auxIntToOp(v.AuxInt) + if v_0.Op != OpARM64ADDconst || auxIntToInt64(v_0.AuxInt) != 1 { + break + } + a := v_0.Args[0] + x := v_1 + flag := v_2 + v.reset(OpARM64CSINC) + v.AuxInt = opToAuxInt(arm64Negate(cc)) + v.AddArg3(x, a, flag) + return true + } + // match: (CSEL [cc] x (MVN a) flag) + // result: (CSINV [cc] x a flag) + for { + cc := auxIntToOp(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MVN { + break + } + a := v_1.Args[0] + flag := v_2 + v.reset(OpARM64CSINV) + v.AuxInt = opToAuxInt(cc) + v.AddArg3(x, a, flag) + return true + } + // match: (CSEL [cc] (MVN a) x flag) + // result: (CSINV [arm64Negate(cc)] x a flag) + for { + cc := auxIntToOp(v.AuxInt) + if v_0.Op != OpARM64MVN { + break + } + a := v_0.Args[0] + x := v_1 + flag := v_2 + v.reset(OpARM64CSINV) + v.AuxInt = opToAuxInt(arm64Negate(cc)) + v.AddArg3(x, a, flag) + return true + } + // match: (CSEL [cc] x (NEG a) flag) + // result: (CSNEG [cc] x a flag) + for { + cc := auxIntToOp(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64NEG { + break + } + a := v_1.Args[0] + flag := v_2 + v.reset(OpARM64CSNEG) + v.AuxInt = opToAuxInt(cc) + v.AddArg3(x, a, flag) + return true + } + // match: (CSEL [cc] (NEG a) x flag) + // result: (CSNEG [arm64Negate(cc)] x a flag) + for { + cc := auxIntToOp(v.AuxInt) + if v_0.Op != OpARM64NEG { + break + } + a := v_0.Args[0] + x := v_1 + flag := v_2 + v.reset(OpARM64CSNEG) + v.AuxInt = opToAuxInt(arm64Negate(cc)) + v.AddArg3(x, a, flag) + return true + } + // match: (CSEL [cc] x y (InvertFlags cmp)) + // result: (CSEL [arm64Invert(cc)] x y cmp) + for { + cc := auxIntToOp(v.AuxInt) + x := v_0 + y := v_1 + if v_2.Op != OpARM64InvertFlags { + break + } + cmp := v_2.Args[0] + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(arm64Invert(cc)) + v.AddArg3(x, y, cmp) + return true + } + // match: (CSEL [cc] x _ flag) + // cond: ccARM64Eval(cc, flag) > 0 + // result: x + for { + cc := auxIntToOp(v.AuxInt) + x := v_0 + flag := v_2 + if !(ccARM64Eval(cc, flag) > 0) { + break + } + v.copyOf(x) + return true + } + // match: (CSEL [cc] _ y flag) + // cond: ccARM64Eval(cc, flag) < 0 + // result: y + for { + cc := auxIntToOp(v.AuxInt) + y := v_1 + flag := v_2 + if !(ccARM64Eval(cc, flag) < 0) { + break + } + v.copyOf(y) + return true + } + // match: (CSEL [cc] x y (CMPWconst [0] boolval)) + // cond: cc == OpARM64NotEqual && flagArg(boolval) != nil + // result: (CSEL [boolval.Op] x y flagArg(boolval)) + for { + cc := auxIntToOp(v.AuxInt) + x := v_0 + y := v_1 + if v_2.Op != OpARM64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 { + break + } + boolval := v_2.Args[0] + if !(cc == OpARM64NotEqual && flagArg(boolval) != nil) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(boolval.Op) + v.AddArg3(x, y, flagArg(boolval)) + return true + } + // match: (CSEL [cc] x y (CMPWconst [0] boolval)) + // cond: cc == OpARM64Equal && flagArg(boolval) != nil + // result: (CSEL [arm64Negate(boolval.Op)] x y flagArg(boolval)) + for { + cc := auxIntToOp(v.AuxInt) + x := v_0 + y := v_1 + if v_2.Op != OpARM64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 { + break + } + boolval := v_2.Args[0] + if !(cc == OpARM64Equal && flagArg(boolval) != nil) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(arm64Negate(boolval.Op)) + v.AddArg3(x, y, flagArg(boolval)) + return true + } + return false +} +func rewriteValueARM64_OpARM64CSEL0(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CSEL0 [cc] x (InvertFlags cmp)) + // result: (CSEL0 [arm64Invert(cc)] x cmp) + for { + cc := auxIntToOp(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64InvertFlags { + break + } + cmp := v_1.Args[0] + v.reset(OpARM64CSEL0) + v.AuxInt = opToAuxInt(arm64Invert(cc)) + v.AddArg2(x, cmp) + return true + } + // match: (CSEL0 [cc] x flag) + // cond: ccARM64Eval(cc, flag) > 0 + // result: x + for { + cc := auxIntToOp(v.AuxInt) + x := v_0 + flag := v_1 + if !(ccARM64Eval(cc, flag) > 0) { + break + } + v.copyOf(x) + return true + } + // match: (CSEL0 [cc] _ flag) + // cond: ccARM64Eval(cc, flag) < 0 + // result: (MOVDconst [0]) + for { + cc := auxIntToOp(v.AuxInt) + flag := v_1 + if !(ccARM64Eval(cc, flag) < 0) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (CSEL0 [cc] x (CMPWconst [0] boolval)) + // cond: cc == OpARM64NotEqual && flagArg(boolval) != nil + // result: (CSEL0 [boolval.Op] x flagArg(boolval)) + for { + cc := auxIntToOp(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64CMPWconst || auxIntToInt32(v_1.AuxInt) != 0 { + break + } + boolval := v_1.Args[0] + if !(cc == OpARM64NotEqual && flagArg(boolval) != nil) { + break + } + v.reset(OpARM64CSEL0) + v.AuxInt = opToAuxInt(boolval.Op) + v.AddArg2(x, flagArg(boolval)) + return true + } + // match: (CSEL0 [cc] x (CMPWconst [0] boolval)) + // cond: cc == OpARM64Equal && flagArg(boolval) != nil + // result: (CSEL0 [arm64Negate(boolval.Op)] x flagArg(boolval)) + for { + cc := auxIntToOp(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64CMPWconst || auxIntToInt32(v_1.AuxInt) != 0 { + break + } + boolval := v_1.Args[0] + if !(cc == OpARM64Equal && flagArg(boolval) != nil) { + break + } + v.reset(OpARM64CSEL0) + v.AuxInt = opToAuxInt(arm64Negate(boolval.Op)) + v.AddArg2(x, flagArg(boolval)) + return true + } + return false +} +func rewriteValueARM64_OpARM64CSETM(v *Value) bool { + v_0 := v.Args[0] + // match: (CSETM [cc] (InvertFlags cmp)) + // result: (CSETM [arm64Invert(cc)] cmp) + for { + cc := auxIntToOp(v.AuxInt) + if v_0.Op != OpARM64InvertFlags { + break + } + cmp := v_0.Args[0] + v.reset(OpARM64CSETM) + v.AuxInt = opToAuxInt(arm64Invert(cc)) + v.AddArg(cmp) + return true + } + // match: (CSETM [cc] flag) + // cond: ccARM64Eval(cc, flag) > 0 + // result: (MOVDconst [-1]) + for { + cc := auxIntToOp(v.AuxInt) + flag := v_0 + if !(ccARM64Eval(cc, flag) > 0) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(-1) + return true + } + // match: (CSETM [cc] flag) + // cond: ccARM64Eval(cc, flag) < 0 + // result: (MOVDconst [0]) + for { + cc := auxIntToOp(v.AuxInt) + flag := v_0 + if !(ccARM64Eval(cc, flag) < 0) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM64_OpARM64CSINC(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CSINC [cc] x y (InvertFlags cmp)) + // result: (CSINC [arm64Invert(cc)] x y cmp) + for { + cc := auxIntToOp(v.AuxInt) + x := v_0 + y := v_1 + if v_2.Op != OpARM64InvertFlags { + break + } + cmp := v_2.Args[0] + v.reset(OpARM64CSINC) + v.AuxInt = opToAuxInt(arm64Invert(cc)) + v.AddArg3(x, y, cmp) + return true + } + // match: (CSINC [cc] x _ flag) + // cond: ccARM64Eval(cc, flag) > 0 + // result: x + for { + cc := auxIntToOp(v.AuxInt) + x := v_0 + flag := v_2 + if !(ccARM64Eval(cc, flag) > 0) { + break + } + v.copyOf(x) + return true + } + // match: (CSINC [cc] _ y flag) + // cond: ccARM64Eval(cc, flag) < 0 + // result: (ADDconst [1] y) + for { + cc := auxIntToOp(v.AuxInt) + y := v_1 + flag := v_2 + if !(ccARM64Eval(cc, flag) < 0) { + break + } + v.reset(OpARM64ADDconst) + v.AuxInt = int64ToAuxInt(1) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM64_OpARM64CSINV(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CSINV [cc] x y (InvertFlags cmp)) + // result: (CSINV [arm64Invert(cc)] x y cmp) + for { + cc := auxIntToOp(v.AuxInt) + x := v_0 + y := v_1 + if v_2.Op != OpARM64InvertFlags { + break + } + cmp := v_2.Args[0] + v.reset(OpARM64CSINV) + v.AuxInt = opToAuxInt(arm64Invert(cc)) + v.AddArg3(x, y, cmp) + return true + } + // match: (CSINV [cc] x _ flag) + // cond: ccARM64Eval(cc, flag) > 0 + // result: x + for { + cc := auxIntToOp(v.AuxInt) + x := v_0 + flag := v_2 + if !(ccARM64Eval(cc, flag) > 0) { + break + } + v.copyOf(x) + return true + } + // match: (CSINV [cc] _ y flag) + // cond: ccARM64Eval(cc, flag) < 0 + // result: (Not y) + for { + cc := auxIntToOp(v.AuxInt) + y := v_1 + flag := v_2 + if !(ccARM64Eval(cc, flag) < 0) { + break + } + v.reset(OpNot) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM64_OpARM64CSNEG(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CSNEG [cc] x y (InvertFlags cmp)) + // result: (CSNEG [arm64Invert(cc)] x y cmp) + for { + cc := auxIntToOp(v.AuxInt) + x := v_0 + y := v_1 + if v_2.Op != OpARM64InvertFlags { + break + } + cmp := v_2.Args[0] + v.reset(OpARM64CSNEG) + v.AuxInt = opToAuxInt(arm64Invert(cc)) + v.AddArg3(x, y, cmp) + return true + } + // match: (CSNEG [cc] x _ flag) + // cond: ccARM64Eval(cc, flag) > 0 + // result: x + for { + cc := auxIntToOp(v.AuxInt) + x := v_0 + flag := v_2 + if !(ccARM64Eval(cc, flag) > 0) { + break + } + v.copyOf(x) + return true + } + // match: (CSNEG [cc] _ y flag) + // cond: ccARM64Eval(cc, flag) < 0 + // result: (NEG y) + for { + cc := auxIntToOp(v.AuxInt) + y := v_1 + flag := v_2 + if !(ccARM64Eval(cc, flag) < 0) { + break + } + v.reset(OpARM64NEG) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM64_OpARM64DIV(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (DIV (MOVDconst [c]) (MOVDconst [d])) + // cond: d != 0 + // result: (MOVDconst [c/d]) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpARM64MOVDconst { + break + } + d := auxIntToInt64(v_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(c / d) + return true + } + return false +} +func rewriteValueARM64_OpARM64DIVW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (DIVW (MOVDconst [c]) (MOVDconst [d])) + // cond: d != 0 + // result: (MOVDconst [int64(uint32(int32(c)/int32(d)))]) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpARM64MOVDconst { + break + } + d := auxIntToInt64(v_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(uint32(int32(c) / int32(d)))) + return true + } + return false +} +func rewriteValueARM64_OpARM64EON(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EON x (MOVDconst [c])) + // result: (XORconst [^c] x) + for { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64XORconst) + v.AuxInt = int64ToAuxInt(^c) + v.AddArg(x) + return true + } + // match: (EON x x) + // result: (MOVDconst [-1]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(-1) + return true + } + // match: (EON x0 x1:(SLLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (EONshiftLL x0 y [c]) + for { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SLLconst { + break + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64EONshiftLL) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + // match: (EON x0 x1:(SRLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (EONshiftRL x0 y [c]) + for { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SRLconst { + break + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64EONshiftRL) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + // match: (EON x0 x1:(SRAconst [c] y)) + // cond: clobberIfDead(x1) + // result: (EONshiftRA x0 y [c]) + for { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SRAconst { + break + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64EONshiftRA) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + // match: (EON x0 x1:(RORconst [c] y)) + // cond: clobberIfDead(x1) + // result: (EONshiftRO x0 y [c]) + for { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64RORconst { + break + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64EONshiftRO) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + return false +} +func rewriteValueARM64_OpARM64EONshiftLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EONshiftLL x (MOVDconst [c]) [d]) + // result: (XORconst x [^int64(uint64(c)<>uint64(d))]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64XORconst) + v.AuxInt = int64ToAuxInt(^(c >> uint64(d))) + v.AddArg(x) + return true + } + // match: (EONshiftRA (SRAconst x [c]) x [c]) + // result: (MOVDconst [-1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(-1) + return true + } + return false +} +func rewriteValueARM64_OpARM64EONshiftRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EONshiftRL x (MOVDconst [c]) [d]) + // result: (XORconst x [^int64(uint64(c)>>uint64(d))]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64XORconst) + v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d))) + v.AddArg(x) + return true + } + // match: (EONshiftRL (SRLconst x [c]) x [c]) + // result: (MOVDconst [-1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(-1) + return true + } + return false +} +func rewriteValueARM64_OpARM64EONshiftRO(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EONshiftRO x (MOVDconst [c]) [d]) + // result: (XORconst x [^rotateRight64(c, d)]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64XORconst) + v.AuxInt = int64ToAuxInt(^rotateRight64(c, d)) + v.AddArg(x) + return true + } + // match: (EONshiftRO (RORconst x [c]) x [c]) + // result: (MOVDconst [-1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64RORconst || auxIntToInt64(v_0.AuxInt) != c { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(-1) + return true + } + return false +} +func rewriteValueARM64_OpARM64Equal(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Equal (CMPconst [0] z:(AND x y))) + // cond: z.Uses == 1 + // result: (Equal (TST x y)) + for { + if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64AND { + break + } + y := z.Args[1] + x := z.Args[0] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64Equal) + v0 := b.NewValue0(v.Pos, OpARM64TST, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (Equal (CMPWconst [0] x:(ANDconst [c] y))) + // cond: x.Uses == 1 + // result: (Equal (TSTWconst [int32(c)] y)) + for { + if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v.reset(OpARM64Equal) + v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (Equal (CMPWconst [0] z:(AND x y))) + // cond: z.Uses == 1 + // result: (Equal (TSTW x y)) + for { + if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64AND { + break + } + y := z.Args[1] + x := z.Args[0] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64Equal) + v0 := b.NewValue0(v.Pos, OpARM64TSTW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (Equal (CMPconst [0] x:(ANDconst [c] y))) + // cond: x.Uses == 1 + // result: (Equal (TSTconst [c] y)) + for { + if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v.reset(OpARM64Equal) + v0 := b.NewValue0(v.Pos, OpARM64TSTconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (Equal (CMP x z:(NEG y))) + // cond: z.Uses == 1 + // result: (Equal (CMN x y)) + for { + if v_0.Op != OpARM64CMP { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpARM64NEG { + break + } + y := z.Args[0] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64Equal) + v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (Equal (CMPW x z:(NEG y))) + // cond: z.Uses == 1 + // result: (Equal (CMNW x y)) + for { + if v_0.Op != OpARM64CMPW { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpARM64NEG { + break + } + y := z.Args[0] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64Equal) + v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (Equal (CMPconst [0] x:(ADDconst [c] y))) + // cond: x.Uses == 1 + // result: (Equal (CMNconst [c] y)) + for { + if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v.reset(OpARM64Equal) + v0 := b.NewValue0(v.Pos, OpARM64CMNconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (Equal (CMPWconst [0] x:(ADDconst [c] y))) + // cond: x.Uses == 1 + // result: (Equal (CMNWconst [int32(c)] y)) + for { + if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v.reset(OpARM64Equal) + v0 := b.NewValue0(v.Pos, OpARM64CMNWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (Equal (CMPconst [0] z:(ADD x y))) + // cond: z.Uses == 1 + // result: (Equal (CMN x y)) + for { + if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64ADD { + break + } + y := z.Args[1] + x := z.Args[0] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64Equal) + v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (Equal (CMPWconst [0] z:(ADD x y))) + // cond: z.Uses == 1 + // result: (Equal (CMNW x y)) + for { + if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64ADD { + break + } + y := z.Args[1] + x := z.Args[0] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64Equal) + v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (Equal (CMPconst [0] z:(MADD a x y))) + // cond: z.Uses == 1 + // result: (Equal (CMN a (MUL x y))) + for { + if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MADD { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64Equal) + v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (Equal (CMPconst [0] z:(MSUB a x y))) + // cond: z.Uses == 1 + // result: (Equal (CMP a (MUL x y))) + for { + if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MSUB { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64Equal) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (Equal (CMPWconst [0] z:(MADDW a x y))) + // cond: z.Uses == 1 + // result: (Equal (CMNW a (MULW x y))) + for { + if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MADDW { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64Equal) + v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (Equal (CMPWconst [0] z:(MSUBW a x y))) + // cond: z.Uses == 1 + // result: (Equal (CMPW a (MULW x y))) + for { + if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MSUBW { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64Equal) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (Equal (FlagConstant [fc])) + // result: (MOVDconst [b2i(fc.eq())]) + for { + if v_0.Op != OpARM64FlagConstant { + break + } + fc := auxIntToFlagConstant(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(b2i(fc.eq())) + return true + } + // match: (Equal (InvertFlags x)) + // result: (Equal x) + for { + if v_0.Op != OpARM64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARM64Equal) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64FADDD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FADDD a (FMULD x y)) + // cond: a.Block.Func.useFMA(v) + // result: (FMADDD a x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 + if v_1.Op != OpARM64FMULD { + continue + } + y := v_1.Args[1] + x := v_1.Args[0] + if !(a.Block.Func.useFMA(v)) { + continue + } + v.reset(OpARM64FMADDD) + v.AddArg3(a, x, y) + return true + } + break + } + // match: (FADDD a (FNMULD x y)) + // cond: a.Block.Func.useFMA(v) + // result: (FMSUBD a x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 + if v_1.Op != OpARM64FNMULD { + continue + } + y := v_1.Args[1] + x := v_1.Args[0] + if !(a.Block.Func.useFMA(v)) { + continue + } + v.reset(OpARM64FMSUBD) + v.AddArg3(a, x, y) + return true + } + break + } + return false +} +func rewriteValueARM64_OpARM64FADDS(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FADDS a (FMULS x y)) + // cond: a.Block.Func.useFMA(v) + // result: (FMADDS a x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 + if v_1.Op != OpARM64FMULS { + continue + } + y := v_1.Args[1] + x := v_1.Args[0] + if !(a.Block.Func.useFMA(v)) { + continue + } + v.reset(OpARM64FMADDS) + v.AddArg3(a, x, y) + return true + } + break + } + // match: (FADDS a (FNMULS x y)) + // cond: a.Block.Func.useFMA(v) + // result: (FMSUBS a x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 + if v_1.Op != OpARM64FNMULS { + continue + } + y := v_1.Args[1] + x := v_1.Args[0] + if !(a.Block.Func.useFMA(v)) { + continue + } + v.reset(OpARM64FMSUBS) + v.AddArg3(a, x, y) + return true + } + break + } + return false +} +func rewriteValueARM64_OpARM64FCMPD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FCMPD x (FMOVDconst [0])) + // result: (FCMPD0 x) + for { + x := v_0 + if v_1.Op != OpARM64FMOVDconst || auxIntToFloat64(v_1.AuxInt) != 0 { + break + } + v.reset(OpARM64FCMPD0) + v.AddArg(x) + return true + } + // match: (FCMPD (FMOVDconst [0]) x) + // result: (InvertFlags (FCMPD0 x)) + for { + if v_0.Op != OpARM64FMOVDconst || auxIntToFloat64(v_0.AuxInt) != 0 { + break + } + x := v_1 + v.reset(OpARM64InvertFlags) + v0 := b.NewValue0(v.Pos, OpARM64FCMPD0, types.TypeFlags) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueARM64_OpARM64FCMPS(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FCMPS x (FMOVSconst [0])) + // result: (FCMPS0 x) + for { + x := v_0 + if v_1.Op != OpARM64FMOVSconst || auxIntToFloat64(v_1.AuxInt) != 0 { + break + } + v.reset(OpARM64FCMPS0) + v.AddArg(x) + return true + } + // match: (FCMPS (FMOVSconst [0]) x) + // result: (InvertFlags (FCMPS0 x)) + for { + if v_0.Op != OpARM64FMOVSconst || auxIntToFloat64(v_0.AuxInt) != 0 { + break + } + x := v_1 + v.reset(OpARM64InvertFlags) + v0 := b.NewValue0(v.Pos, OpARM64FCMPS0, types.TypeFlags) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueARM64_OpARM64FMOVDfpgp(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (FMOVDfpgp (Arg [off] {sym})) + // result: @b.Func.Entry (Arg [off] {sym}) + for { + t := v.Type + if v_0.Op != OpArg { + break + } + off := auxIntToInt32(v_0.AuxInt) + sym := auxToSym(v_0.Aux) + b = b.Func.Entry + v0 := b.NewValue0(v.Pos, OpArg, t) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + return true + } + return false +} +func rewriteValueARM64_OpARM64FMOVDgpfp(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (FMOVDgpfp (Arg [off] {sym})) + // result: @b.Func.Entry (Arg [off] {sym}) + for { + t := v.Type + if v_0.Op != OpArg { + break + } + off := auxIntToInt32(v_0.AuxInt) + sym := auxToSym(v_0.Aux) + b = b.Func.Entry + v0 := b.NewValue0(v.Pos, OpArg, t) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + return true + } + return false +} +func rewriteValueARM64_OpARM64FMOVDload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr val _)) + // result: (FMOVDgpfp val) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64MOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { + break + } + val := v_1.Args[1] + if ptr != v_1.Args[0] { + break + } + v.reset(OpARM64FMOVDgpfp) + v.AddArg(val) + return true + } + // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (FMOVDload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64FMOVDload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (FMOVDload [off] {sym} (ADD ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (FMOVDloadidx ptr idx mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADD { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64FMOVDloadidx) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (FMOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (FMOVDloadidx8 ptr idx mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 3 { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64FMOVDloadidx8) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARM64MOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64FMOVDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64FMOVDloadidx(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMOVDloadidx ptr (MOVDconst [c]) mem) + // cond: is32Bit(c) + // result: (FMOVDload [int32(c)] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64FMOVDload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + // match: (FMOVDloadidx (MOVDconst [c]) ptr mem) + // cond: is32Bit(c) + // result: (FMOVDload [int32(c)] ptr mem) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + ptr := v_1 + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64FMOVDload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + // match: (FMOVDloadidx ptr (SLLconst [3] idx) mem) + // result: (FMOVDloadidx8 ptr idx mem) + for { + ptr := v_0 + if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 3 { + break + } + idx := v_1.Args[0] + mem := v_2 + v.reset(OpARM64FMOVDloadidx8) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (FMOVDloadidx (SLLconst [3] idx) ptr mem) + // result: (FMOVDloadidx8 ptr idx mem) + for { + if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 3 { + break + } + idx := v_0.Args[0] + ptr := v_1 + mem := v_2 + v.reset(OpARM64FMOVDloadidx8) + v.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64FMOVDloadidx8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMOVDloadidx8 ptr (MOVDconst [c]) mem) + // cond: is32Bit(c<<3) + // result: (FMOVDload ptr [int32(c)<<3] mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c << 3)) { + break + } + v.reset(OpARM64FMOVDload) + v.AuxInt = int32ToAuxInt(int32(c) << 3) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64FMOVDstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (FMOVDstore [off] {sym} ptr (FMOVDgpfp val) mem) + // result: (MOVDstore [off] {sym} ptr val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64FMOVDgpfp { + break + } + val := v_1.Args[0] + mem := v_2 + v.reset(OpARM64MOVDstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (FMOVDstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64FMOVDstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (FMOVDstore [off] {sym} (ADD ptr idx) val mem) + // cond: off == 0 && sym == nil + // result: (FMOVDstoreidx ptr idx val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADD { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64FMOVDstoreidx) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (FMOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem) + // cond: off == 0 && sym == nil + // result: (FMOVDstoreidx8 ptr idx val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 3 { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64FMOVDstoreidx8) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARM64MOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64FMOVDstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64FMOVDstoreidx(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMOVDstoreidx ptr (MOVDconst [c]) val mem) + // cond: is32Bit(c) + // result: (FMOVDstore [int32(c)] ptr val mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + val := v_2 + mem := v_3 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64FMOVDstore) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (FMOVDstoreidx (MOVDconst [c]) idx val mem) + // cond: is32Bit(c) + // result: (FMOVDstore [int32(c)] idx val mem) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + idx := v_1 + val := v_2 + mem := v_3 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64FMOVDstore) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg3(idx, val, mem) + return true + } + // match: (FMOVDstoreidx ptr (SLLconst [3] idx) val mem) + // result: (FMOVDstoreidx8 ptr idx val mem) + for { + ptr := v_0 + if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 3 { + break + } + idx := v_1.Args[0] + val := v_2 + mem := v_3 + v.reset(OpARM64FMOVDstoreidx8) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (FMOVDstoreidx (SLLconst [3] idx) ptr val mem) + // result: (FMOVDstoreidx8 ptr idx val mem) + for { + if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 3 { + break + } + idx := v_0.Args[0] + ptr := v_1 + val := v_2 + mem := v_3 + v.reset(OpARM64FMOVDstoreidx8) + v.AddArg4(ptr, idx, val, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64FMOVDstoreidx8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMOVDstoreidx8 ptr (MOVDconst [c]) val mem) + // cond: is32Bit(c<<3) + // result: (FMOVDstore [int32(c)<<3] ptr val mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + val := v_2 + mem := v_3 + if !(is32Bit(c << 3)) { + break + } + v.reset(OpARM64FMOVDstore) + v.AuxInt = int32ToAuxInt(int32(c) << 3) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64FMOVSload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (FMOVSload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) + // result: (FMOVSgpfp val) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64MOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { + break + } + val := v_1.Args[1] + if ptr != v_1.Args[0] { + break + } + v.reset(OpARM64FMOVSgpfp) + v.AddArg(val) + return true + } + // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (FMOVSload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64FMOVSload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (FMOVSload [off] {sym} (ADD ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (FMOVSloadidx ptr idx mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADD { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64FMOVSloadidx) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (FMOVSload [off] {sym} (ADDshiftLL [2] ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (FMOVSloadidx4 ptr idx mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64FMOVSloadidx4) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARM64MOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64FMOVSload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64FMOVSloadidx(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMOVSloadidx ptr (MOVDconst [c]) mem) + // cond: is32Bit(c) + // result: (FMOVSload [int32(c)] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64FMOVSload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + // match: (FMOVSloadidx (MOVDconst [c]) ptr mem) + // cond: is32Bit(c) + // result: (FMOVSload [int32(c)] ptr mem) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + ptr := v_1 + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64FMOVSload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + // match: (FMOVSloadidx ptr (SLLconst [2] idx) mem) + // result: (FMOVSloadidx4 ptr idx mem) + for { + ptr := v_0 + if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 { + break + } + idx := v_1.Args[0] + mem := v_2 + v.reset(OpARM64FMOVSloadidx4) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (FMOVSloadidx (SLLconst [2] idx) ptr mem) + // result: (FMOVSloadidx4 ptr idx mem) + for { + if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 { + break + } + idx := v_0.Args[0] + ptr := v_1 + mem := v_2 + v.reset(OpARM64FMOVSloadidx4) + v.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64FMOVSloadidx4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMOVSloadidx4 ptr (MOVDconst [c]) mem) + // cond: is32Bit(c<<2) + // result: (FMOVSload ptr [int32(c)<<2] mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c << 2)) { + break + } + v.reset(OpARM64FMOVSload) + v.AuxInt = int32ToAuxInt(int32(c) << 2) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64FMOVSstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (FMOVSstore [off] {sym} ptr (FMOVSgpfp val) mem) + // result: (MOVWstore [off] {sym} ptr val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64FMOVSgpfp { + break + } + val := v_1.Args[0] + mem := v_2 + v.reset(OpARM64MOVWstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (FMOVSstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64FMOVSstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (FMOVSstore [off] {sym} (ADD ptr idx) val mem) + // cond: off == 0 && sym == nil + // result: (FMOVSstoreidx ptr idx val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADD { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64FMOVSstoreidx) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (FMOVSstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem) + // cond: off == 0 && sym == nil + // result: (FMOVSstoreidx4 ptr idx val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64FMOVSstoreidx4) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARM64MOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64FMOVSstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64FMOVSstoreidx(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMOVSstoreidx ptr (MOVDconst [c]) val mem) + // cond: is32Bit(c) + // result: (FMOVSstore [int32(c)] ptr val mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + val := v_2 + mem := v_3 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64FMOVSstore) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (FMOVSstoreidx (MOVDconst [c]) idx val mem) + // cond: is32Bit(c) + // result: (FMOVSstore [int32(c)] idx val mem) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + idx := v_1 + val := v_2 + mem := v_3 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64FMOVSstore) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg3(idx, val, mem) + return true + } + // match: (FMOVSstoreidx ptr (SLLconst [2] idx) val mem) + // result: (FMOVSstoreidx4 ptr idx val mem) + for { + ptr := v_0 + if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 { + break + } + idx := v_1.Args[0] + val := v_2 + mem := v_3 + v.reset(OpARM64FMOVSstoreidx4) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (FMOVSstoreidx (SLLconst [2] idx) ptr val mem) + // result: (FMOVSstoreidx4 ptr idx val mem) + for { + if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 { + break + } + idx := v_0.Args[0] + ptr := v_1 + val := v_2 + mem := v_3 + v.reset(OpARM64FMOVSstoreidx4) + v.AddArg4(ptr, idx, val, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64FMOVSstoreidx4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMOVSstoreidx4 ptr (MOVDconst [c]) val mem) + // cond: is32Bit(c<<2) + // result: (FMOVSstore [int32(c)<<2] ptr val mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + val := v_2 + mem := v_3 + if !(is32Bit(c << 2)) { + break + } + v.reset(OpARM64FMOVSstore) + v.AuxInt = int32ToAuxInt(int32(c) << 2) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64FMULD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMULD (FNEGD x) y) + // result: (FNMULD x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpARM64FNEGD { + continue + } + x := v_0.Args[0] + y := v_1 + v.reset(OpARM64FNMULD) + v.AddArg2(x, y) + return true + } + break + } + return false +} +func rewriteValueARM64_OpARM64FMULS(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMULS (FNEGS x) y) + // result: (FNMULS x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpARM64FNEGS { + continue + } + x := v_0.Args[0] + y := v_1 + v.reset(OpARM64FNMULS) + v.AddArg2(x, y) + return true + } + break + } + return false +} +func rewriteValueARM64_OpARM64FNEGD(v *Value) bool { + v_0 := v.Args[0] + // match: (FNEGD (FMULD x y)) + // result: (FNMULD x y) + for { + if v_0.Op != OpARM64FMULD { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpARM64FNMULD) + v.AddArg2(x, y) + return true + } + // match: (FNEGD (FNMULD x y)) + // result: (FMULD x y) + for { + if v_0.Op != OpARM64FNMULD { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpARM64FMULD) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM64_OpARM64FNEGS(v *Value) bool { + v_0 := v.Args[0] + // match: (FNEGS (FMULS x y)) + // result: (FNMULS x y) + for { + if v_0.Op != OpARM64FMULS { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpARM64FNMULS) + v.AddArg2(x, y) + return true + } + // match: (FNEGS (FNMULS x y)) + // result: (FMULS x y) + for { + if v_0.Op != OpARM64FNMULS { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpARM64FMULS) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM64_OpARM64FNMULD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FNMULD (FNEGD x) y) + // result: (FMULD x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpARM64FNEGD { + continue + } + x := v_0.Args[0] + y := v_1 + v.reset(OpARM64FMULD) + v.AddArg2(x, y) + return true + } + break + } + return false +} +func rewriteValueARM64_OpARM64FNMULS(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FNMULS (FNEGS x) y) + // result: (FMULS x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpARM64FNEGS { + continue + } + x := v_0.Args[0] + y := v_1 + v.reset(OpARM64FMULS) + v.AddArg2(x, y) + return true + } + break + } + return false +} +func rewriteValueARM64_OpARM64FSUBD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FSUBD a (FMULD x y)) + // cond: a.Block.Func.useFMA(v) + // result: (FMSUBD a x y) + for { + a := v_0 + if v_1.Op != OpARM64FMULD { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + if !(a.Block.Func.useFMA(v)) { + break + } + v.reset(OpARM64FMSUBD) + v.AddArg3(a, x, y) + return true + } + // match: (FSUBD (FMULD x y) a) + // cond: a.Block.Func.useFMA(v) + // result: (FNMSUBD a x y) + for { + if v_0.Op != OpARM64FMULD { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + a := v_1 + if !(a.Block.Func.useFMA(v)) { + break + } + v.reset(OpARM64FNMSUBD) + v.AddArg3(a, x, y) + return true + } + // match: (FSUBD a (FNMULD x y)) + // cond: a.Block.Func.useFMA(v) + // result: (FMADDD a x y) + for { + a := v_0 + if v_1.Op != OpARM64FNMULD { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + if !(a.Block.Func.useFMA(v)) { + break + } + v.reset(OpARM64FMADDD) + v.AddArg3(a, x, y) + return true + } + // match: (FSUBD (FNMULD x y) a) + // cond: a.Block.Func.useFMA(v) + // result: (FNMADDD a x y) + for { + if v_0.Op != OpARM64FNMULD { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + a := v_1 + if !(a.Block.Func.useFMA(v)) { + break + } + v.reset(OpARM64FNMADDD) + v.AddArg3(a, x, y) + return true + } + return false +} +func rewriteValueARM64_OpARM64FSUBS(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FSUBS a (FMULS x y)) + // cond: a.Block.Func.useFMA(v) + // result: (FMSUBS a x y) + for { + a := v_0 + if v_1.Op != OpARM64FMULS { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + if !(a.Block.Func.useFMA(v)) { + break + } + v.reset(OpARM64FMSUBS) + v.AddArg3(a, x, y) + return true + } + // match: (FSUBS (FMULS x y) a) + // cond: a.Block.Func.useFMA(v) + // result: (FNMSUBS a x y) + for { + if v_0.Op != OpARM64FMULS { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + a := v_1 + if !(a.Block.Func.useFMA(v)) { + break + } + v.reset(OpARM64FNMSUBS) + v.AddArg3(a, x, y) + return true + } + // match: (FSUBS a (FNMULS x y)) + // cond: a.Block.Func.useFMA(v) + // result: (FMADDS a x y) + for { + a := v_0 + if v_1.Op != OpARM64FNMULS { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + if !(a.Block.Func.useFMA(v)) { + break + } + v.reset(OpARM64FMADDS) + v.AddArg3(a, x, y) + return true + } + // match: (FSUBS (FNMULS x y) a) + // cond: a.Block.Func.useFMA(v) + // result: (FNMADDS a x y) + for { + if v_0.Op != OpARM64FNMULS { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + a := v_1 + if !(a.Block.Func.useFMA(v)) { + break + } + v.reset(OpARM64FNMADDS) + v.AddArg3(a, x, y) + return true + } + return false +} +func rewriteValueARM64_OpARM64GreaterEqual(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (GreaterEqual (CMPconst [0] z:(AND x y))) + // cond: z.Uses == 1 + // result: (GreaterEqual (TST x y)) + for { + if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64AND { + break + } + y := z.Args[1] + x := z.Args[0] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64GreaterEqual) + v0 := b.NewValue0(v.Pos, OpARM64TST, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (GreaterEqual (CMPWconst [0] x:(ANDconst [c] y))) + // cond: x.Uses == 1 + // result: (GreaterEqual (TSTWconst [int32(c)] y)) + for { + if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v.reset(OpARM64GreaterEqual) + v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (GreaterEqual (CMPWconst [0] z:(AND x y))) + // cond: z.Uses == 1 + // result: (GreaterEqual (TSTW x y)) + for { + if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64AND { + break + } + y := z.Args[1] + x := z.Args[0] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64GreaterEqual) + v0 := b.NewValue0(v.Pos, OpARM64TSTW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (GreaterEqual (CMPconst [0] x:(ANDconst [c] y))) + // cond: x.Uses == 1 + // result: (GreaterEqual (TSTconst [c] y)) + for { + if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v.reset(OpARM64GreaterEqual) + v0 := b.NewValue0(v.Pos, OpARM64TSTconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (GreaterEqual (CMPconst [0] x:(ADDconst [c] y))) + // cond: x.Uses == 1 + // result: (GreaterEqualNoov (CMNconst [c] y)) + for { + if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v.reset(OpARM64GreaterEqualNoov) + v0 := b.NewValue0(v.Pos, OpARM64CMNconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (GreaterEqual (CMPWconst [0] x:(ADDconst [c] y))) + // cond: x.Uses == 1 + // result: (GreaterEqualNoov (CMNWconst [int32(c)] y)) + for { + if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v.reset(OpARM64GreaterEqualNoov) + v0 := b.NewValue0(v.Pos, OpARM64CMNWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (GreaterEqual (CMPconst [0] z:(ADD x y))) + // cond: z.Uses == 1 + // result: (GreaterEqualNoov (CMN x y)) + for { + if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64ADD { + break + } + y := z.Args[1] + x := z.Args[0] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64GreaterEqualNoov) + v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (GreaterEqual (CMPWconst [0] z:(ADD x y))) + // cond: z.Uses == 1 + // result: (GreaterEqualNoov (CMNW x y)) + for { + if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64ADD { + break + } + y := z.Args[1] + x := z.Args[0] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64GreaterEqualNoov) + v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (GreaterEqual (CMPconst [0] z:(MADD a x y))) + // cond: z.Uses == 1 + // result: (GreaterEqualNoov (CMN a (MUL x y))) + for { + if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MADD { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64GreaterEqualNoov) + v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (GreaterEqual (CMPconst [0] z:(MSUB a x y))) + // cond: z.Uses == 1 + // result: (GreaterEqualNoov (CMP a (MUL x y))) + for { + if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MSUB { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64GreaterEqualNoov) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (GreaterEqual (CMPWconst [0] z:(MADDW a x y))) + // cond: z.Uses == 1 + // result: (GreaterEqualNoov (CMNW a (MULW x y))) + for { + if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MADDW { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64GreaterEqualNoov) + v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (GreaterEqual (CMPWconst [0] z:(MSUBW a x y))) + // cond: z.Uses == 1 + // result: (GreaterEqualNoov (CMPW a (MULW x y))) + for { + if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MSUBW { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64GreaterEqualNoov) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (GreaterEqual (FlagConstant [fc])) + // result: (MOVDconst [b2i(fc.ge())]) + for { + if v_0.Op != OpARM64FlagConstant { + break + } + fc := auxIntToFlagConstant(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(b2i(fc.ge())) + return true + } + // match: (GreaterEqual (InvertFlags x)) + // result: (LessEqual x) + for { + if v_0.Op != OpARM64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARM64LessEqual) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64GreaterEqualF(v *Value) bool { + v_0 := v.Args[0] + // match: (GreaterEqualF (InvertFlags x)) + // result: (LessEqualF x) + for { + if v_0.Op != OpARM64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARM64LessEqualF) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64GreaterEqualNoov(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualNoov (InvertFlags x)) + // result: (CSINC [OpARM64NotEqual] (LessThanNoov x) (MOVDconst [0]) x) + for { + if v_0.Op != OpARM64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARM64CSINC) + v.AuxInt = opToAuxInt(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64LessThanNoov, typ.Bool) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v.AddArg3(v0, v1, x) + return true + } + return false +} +func rewriteValueARM64_OpARM64GreaterEqualU(v *Value) bool { + v_0 := v.Args[0] + // match: (GreaterEqualU (FlagConstant [fc])) + // result: (MOVDconst [b2i(fc.uge())]) + for { + if v_0.Op != OpARM64FlagConstant { + break + } + fc := auxIntToFlagConstant(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(b2i(fc.uge())) + return true + } + // match: (GreaterEqualU (InvertFlags x)) + // result: (LessEqualU x) + for { + if v_0.Op != OpARM64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARM64LessEqualU) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64GreaterThan(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (GreaterThan (CMPconst [0] z:(AND x y))) + // cond: z.Uses == 1 + // result: (GreaterThan (TST x y)) + for { + if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64AND { + break + } + y := z.Args[1] + x := z.Args[0] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64GreaterThan) + v0 := b.NewValue0(v.Pos, OpARM64TST, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (GreaterThan (CMPWconst [0] x:(ANDconst [c] y))) + // cond: x.Uses == 1 + // result: (GreaterThan (TSTWconst [int32(c)] y)) + for { + if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v.reset(OpARM64GreaterThan) + v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (GreaterThan (CMPWconst [0] z:(AND x y))) + // cond: z.Uses == 1 + // result: (GreaterThan (TSTW x y)) + for { + if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64AND { + break + } + y := z.Args[1] + x := z.Args[0] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64GreaterThan) + v0 := b.NewValue0(v.Pos, OpARM64TSTW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (GreaterThan (CMPconst [0] x:(ANDconst [c] y))) + // cond: x.Uses == 1 + // result: (GreaterThan (TSTconst [c] y)) + for { + if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v.reset(OpARM64GreaterThan) + v0 := b.NewValue0(v.Pos, OpARM64TSTconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (GreaterThan (FlagConstant [fc])) + // result: (MOVDconst [b2i(fc.gt())]) + for { + if v_0.Op != OpARM64FlagConstant { + break + } + fc := auxIntToFlagConstant(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(b2i(fc.gt())) + return true + } + // match: (GreaterThan (InvertFlags x)) + // result: (LessThan x) + for { + if v_0.Op != OpARM64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARM64LessThan) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64GreaterThanF(v *Value) bool { + v_0 := v.Args[0] + // match: (GreaterThanF (InvertFlags x)) + // result: (LessThanF x) + for { + if v_0.Op != OpARM64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARM64LessThanF) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64GreaterThanU(v *Value) bool { + v_0 := v.Args[0] + // match: (GreaterThanU (FlagConstant [fc])) + // result: (MOVDconst [b2i(fc.ugt())]) + for { + if v_0.Op != OpARM64FlagConstant { + break + } + fc := auxIntToFlagConstant(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(b2i(fc.ugt())) + return true + } + // match: (GreaterThanU (InvertFlags x)) + // result: (LessThanU x) + for { + if v_0.Op != OpARM64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARM64LessThanU) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64LDP(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (LDP [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (LDP [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64LDP) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (LDP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (LDP [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARM64MOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64LDP) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64LessEqual(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (LessEqual (CMPconst [0] z:(AND x y))) + // cond: z.Uses == 1 + // result: (LessEqual (TST x y)) + for { + if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64AND { + break + } + y := z.Args[1] + x := z.Args[0] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64LessEqual) + v0 := b.NewValue0(v.Pos, OpARM64TST, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (LessEqual (CMPWconst [0] x:(ANDconst [c] y))) + // cond: x.Uses == 1 + // result: (LessEqual (TSTWconst [int32(c)] y)) + for { + if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v.reset(OpARM64LessEqual) + v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (LessEqual (CMPWconst [0] z:(AND x y))) + // cond: z.Uses == 1 + // result: (LessEqual (TSTW x y)) + for { + if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64AND { + break + } + y := z.Args[1] + x := z.Args[0] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64LessEqual) + v0 := b.NewValue0(v.Pos, OpARM64TSTW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (LessEqual (CMPconst [0] x:(ANDconst [c] y))) + // cond: x.Uses == 1 + // result: (LessEqual (TSTconst [c] y)) + for { + if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v.reset(OpARM64LessEqual) + v0 := b.NewValue0(v.Pos, OpARM64TSTconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (LessEqual (FlagConstant [fc])) + // result: (MOVDconst [b2i(fc.le())]) + for { + if v_0.Op != OpARM64FlagConstant { + break + } + fc := auxIntToFlagConstant(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(b2i(fc.le())) + return true + } + // match: (LessEqual (InvertFlags x)) + // result: (GreaterEqual x) + for { + if v_0.Op != OpARM64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARM64GreaterEqual) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64LessEqualF(v *Value) bool { + v_0 := v.Args[0] + // match: (LessEqualF (InvertFlags x)) + // result: (GreaterEqualF x) + for { + if v_0.Op != OpARM64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARM64GreaterEqualF) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64LessEqualU(v *Value) bool { + v_0 := v.Args[0] + // match: (LessEqualU (FlagConstant [fc])) + // result: (MOVDconst [b2i(fc.ule())]) + for { + if v_0.Op != OpARM64FlagConstant { + break + } + fc := auxIntToFlagConstant(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(b2i(fc.ule())) + return true + } + // match: (LessEqualU (InvertFlags x)) + // result: (GreaterEqualU x) + for { + if v_0.Op != OpARM64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARM64GreaterEqualU) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64LessThan(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (LessThan (CMPconst [0] z:(AND x y))) + // cond: z.Uses == 1 + // result: (LessThan (TST x y)) + for { + if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64AND { + break + } + y := z.Args[1] + x := z.Args[0] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64LessThan) + v0 := b.NewValue0(v.Pos, OpARM64TST, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (LessThan (CMPWconst [0] x:(ANDconst [c] y))) + // cond: x.Uses == 1 + // result: (LessThan (TSTWconst [int32(c)] y)) + for { + if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v.reset(OpARM64LessThan) + v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (LessThan (CMPWconst [0] z:(AND x y))) + // cond: z.Uses == 1 + // result: (LessThan (TSTW x y)) + for { + if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64AND { + break + } + y := z.Args[1] + x := z.Args[0] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64LessThan) + v0 := b.NewValue0(v.Pos, OpARM64TSTW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (LessThan (CMPconst [0] x:(ANDconst [c] y))) + // cond: x.Uses == 1 + // result: (LessThan (TSTconst [c] y)) + for { + if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v.reset(OpARM64LessThan) + v0 := b.NewValue0(v.Pos, OpARM64TSTconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (LessThan (CMPconst [0] x:(ADDconst [c] y))) + // cond: x.Uses == 1 + // result: (LessThanNoov (CMNconst [c] y)) + for { + if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v.reset(OpARM64LessThanNoov) + v0 := b.NewValue0(v.Pos, OpARM64CMNconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (LessThan (CMPWconst [0] x:(ADDconst [c] y))) + // cond: x.Uses == 1 + // result: (LessThanNoov (CMNWconst [int32(c)] y)) + for { + if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v.reset(OpARM64LessThanNoov) + v0 := b.NewValue0(v.Pos, OpARM64CMNWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (LessThan (CMPconst [0] z:(ADD x y))) + // cond: z.Uses == 1 + // result: (LessThanNoov (CMN x y)) + for { + if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64ADD { + break + } + y := z.Args[1] + x := z.Args[0] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64LessThanNoov) + v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (LessThan (CMPWconst [0] z:(ADD x y))) + // cond: z.Uses == 1 + // result: (LessThanNoov (CMNW x y)) + for { + if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64ADD { + break + } + y := z.Args[1] + x := z.Args[0] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64LessThanNoov) + v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (LessThan (CMPconst [0] z:(MADD a x y))) + // cond: z.Uses == 1 + // result: (LessThanNoov (CMN a (MUL x y))) + for { + if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MADD { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64LessThanNoov) + v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (LessThan (CMPconst [0] z:(MSUB a x y))) + // cond: z.Uses == 1 + // result: (LessThanNoov (CMP a (MUL x y))) + for { + if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MSUB { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64LessThanNoov) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (LessThan (CMPWconst [0] z:(MADDW a x y))) + // cond: z.Uses == 1 + // result: (LessThanNoov (CMNW a (MULW x y))) + for { + if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MADDW { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64LessThanNoov) + v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (LessThan (CMPWconst [0] z:(MSUBW a x y))) + // cond: z.Uses == 1 + // result: (LessThanNoov (CMPW a (MULW x y))) + for { + if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MSUBW { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64LessThanNoov) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (LessThan (FlagConstant [fc])) + // result: (MOVDconst [b2i(fc.lt())]) + for { + if v_0.Op != OpARM64FlagConstant { + break + } + fc := auxIntToFlagConstant(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(b2i(fc.lt())) + return true + } + // match: (LessThan (InvertFlags x)) + // result: (GreaterThan x) + for { + if v_0.Op != OpARM64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARM64GreaterThan) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64LessThanF(v *Value) bool { + v_0 := v.Args[0] + // match: (LessThanF (InvertFlags x)) + // result: (GreaterThanF x) + for { + if v_0.Op != OpARM64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARM64GreaterThanF) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64LessThanNoov(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LessThanNoov (InvertFlags x)) + // result: (CSEL0 [OpARM64NotEqual] (GreaterEqualNoov x) x) + for { + if v_0.Op != OpARM64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARM64CSEL0) + v.AuxInt = opToAuxInt(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64GreaterEqualNoov, typ.Bool) + v0.AddArg(x) + v.AddArg2(v0, x) + return true + } + return false +} +func rewriteValueARM64_OpARM64LessThanU(v *Value) bool { + v_0 := v.Args[0] + // match: (LessThanU (FlagConstant [fc])) + // result: (MOVDconst [b2i(fc.ult())]) + for { + if v_0.Op != OpARM64FlagConstant { + break + } + fc := auxIntToFlagConstant(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(b2i(fc.ult())) + return true + } + // match: (LessThanU (InvertFlags x)) + // result: (GreaterThanU x) + for { + if v_0.Op != OpARM64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARM64GreaterThanU) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64MADD(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MADD a x (MOVDconst [-1])) + // result: (SUB a x) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != -1 { + break + } + v.reset(OpARM64SUB) + v.AddArg2(a, x) + return true + } + // match: (MADD a _ (MOVDconst [0])) + // result: a + for { + a := v_0 + if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 { + break + } + v.copyOf(a) + return true + } + // match: (MADD a x (MOVDconst [1])) + // result: (ADD a x) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 1 { + break + } + v.reset(OpARM64ADD) + v.AddArg2(a, x) + return true + } + // match: (MADD a x (MOVDconst [c])) + // cond: isPowerOfTwo64(c) + // result: (ADDshiftLL a x [log64(c)]) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(isPowerOfTwo64(c)) { + break + } + v.reset(OpARM64ADDshiftLL) + v.AuxInt = int64ToAuxInt(log64(c)) + v.AddArg2(a, x) + return true + } + // match: (MADD a x (MOVDconst [c])) + // cond: isPowerOfTwo64(c-1) && c>=3 + // result: (ADD a (ADDshiftLL x x [log64(c-1)])) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(isPowerOfTwo64(c-1) && c >= 3) { + break + } + v.reset(OpARM64ADD) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(log64(c - 1)) + v0.AddArg2(x, x) + v.AddArg2(a, v0) + return true + } + // match: (MADD a x (MOVDconst [c])) + // cond: isPowerOfTwo64(c+1) && c>=7 + // result: (SUB a (SUBshiftLL x x [log64(c+1)])) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(isPowerOfTwo64(c+1) && c >= 7) { + break + } + v.reset(OpARM64SUB) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(log64(c + 1)) + v0.AddArg2(x, x) + v.AddArg2(a, v0) + return true + } + // match: (MADD a x (MOVDconst [c])) + // cond: c%3 == 0 && isPowerOfTwo64(c/3) + // result: (SUBshiftLL a (SUBshiftLL x x [2]) [log64(c/3)]) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(c%3 == 0 && isPowerOfTwo64(c/3)) { + break + } + v.reset(OpARM64SUBshiftLL) + v.AuxInt = int64ToAuxInt(log64(c / 3)) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(2) + v0.AddArg2(x, x) + v.AddArg2(a, v0) + return true + } + // match: (MADD a x (MOVDconst [c])) + // cond: c%5 == 0 && isPowerOfTwo64(c/5) + // result: (ADDshiftLL a (ADDshiftLL x x [2]) [log64(c/5)]) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(c%5 == 0 && isPowerOfTwo64(c/5)) { + break + } + v.reset(OpARM64ADDshiftLL) + v.AuxInt = int64ToAuxInt(log64(c / 5)) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(2) + v0.AddArg2(x, x) + v.AddArg2(a, v0) + return true + } + // match: (MADD a x (MOVDconst [c])) + // cond: c%7 == 0 && isPowerOfTwo64(c/7) + // result: (SUBshiftLL a (SUBshiftLL x x [3]) [log64(c/7)]) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(c%7 == 0 && isPowerOfTwo64(c/7)) { + break + } + v.reset(OpARM64SUBshiftLL) + v.AuxInt = int64ToAuxInt(log64(c / 7)) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(3) + v0.AddArg2(x, x) + v.AddArg2(a, v0) + return true + } + // match: (MADD a x (MOVDconst [c])) + // cond: c%9 == 0 && isPowerOfTwo64(c/9) + // result: (ADDshiftLL a (ADDshiftLL x x [3]) [log64(c/9)]) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(c%9 == 0 && isPowerOfTwo64(c/9)) { + break + } + v.reset(OpARM64ADDshiftLL) + v.AuxInt = int64ToAuxInt(log64(c / 9)) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(3) + v0.AddArg2(x, x) + v.AddArg2(a, v0) + return true + } + // match: (MADD a (MOVDconst [-1]) x) + // result: (SUB a x) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 { + break + } + x := v_2 + v.reset(OpARM64SUB) + v.AddArg2(a, x) + return true + } + // match: (MADD a (MOVDconst [0]) _) + // result: a + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + v.copyOf(a) + return true + } + // match: (MADD a (MOVDconst [1]) x) + // result: (ADD a x) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 { + break + } + x := v_2 + v.reset(OpARM64ADD) + v.AddArg2(a, x) + return true + } + // match: (MADD a (MOVDconst [c]) x) + // cond: isPowerOfTwo64(c) + // result: (ADDshiftLL a x [log64(c)]) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(isPowerOfTwo64(c)) { + break + } + v.reset(OpARM64ADDshiftLL) + v.AuxInt = int64ToAuxInt(log64(c)) + v.AddArg2(a, x) + return true + } + // match: (MADD a (MOVDconst [c]) x) + // cond: isPowerOfTwo64(c-1) && c>=3 + // result: (ADD a (ADDshiftLL x x [log64(c-1)])) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(isPowerOfTwo64(c-1) && c >= 3) { + break + } + v.reset(OpARM64ADD) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(log64(c - 1)) + v0.AddArg2(x, x) + v.AddArg2(a, v0) + return true + } + // match: (MADD a (MOVDconst [c]) x) + // cond: isPowerOfTwo64(c+1) && c>=7 + // result: (SUB a (SUBshiftLL x x [log64(c+1)])) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(isPowerOfTwo64(c+1) && c >= 7) { + break + } + v.reset(OpARM64SUB) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(log64(c + 1)) + v0.AddArg2(x, x) + v.AddArg2(a, v0) + return true + } + // match: (MADD a (MOVDconst [c]) x) + // cond: c%3 == 0 && isPowerOfTwo64(c/3) + // result: (SUBshiftLL a (SUBshiftLL x x [2]) [log64(c/3)]) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(c%3 == 0 && isPowerOfTwo64(c/3)) { + break + } + v.reset(OpARM64SUBshiftLL) + v.AuxInt = int64ToAuxInt(log64(c / 3)) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(2) + v0.AddArg2(x, x) + v.AddArg2(a, v0) + return true + } + // match: (MADD a (MOVDconst [c]) x) + // cond: c%5 == 0 && isPowerOfTwo64(c/5) + // result: (ADDshiftLL a (ADDshiftLL x x [2]) [log64(c/5)]) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(c%5 == 0 && isPowerOfTwo64(c/5)) { + break + } + v.reset(OpARM64ADDshiftLL) + v.AuxInt = int64ToAuxInt(log64(c / 5)) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(2) + v0.AddArg2(x, x) + v.AddArg2(a, v0) + return true + } + // match: (MADD a (MOVDconst [c]) x) + // cond: c%7 == 0 && isPowerOfTwo64(c/7) + // result: (SUBshiftLL a (SUBshiftLL x x [3]) [log64(c/7)]) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(c%7 == 0 && isPowerOfTwo64(c/7)) { + break + } + v.reset(OpARM64SUBshiftLL) + v.AuxInt = int64ToAuxInt(log64(c / 7)) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(3) + v0.AddArg2(x, x) + v.AddArg2(a, v0) + return true + } + // match: (MADD a (MOVDconst [c]) x) + // cond: c%9 == 0 && isPowerOfTwo64(c/9) + // result: (ADDshiftLL a (ADDshiftLL x x [3]) [log64(c/9)]) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(c%9 == 0 && isPowerOfTwo64(c/9)) { + break + } + v.reset(OpARM64ADDshiftLL) + v.AuxInt = int64ToAuxInt(log64(c / 9)) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(3) + v0.AddArg2(x, x) + v.AddArg2(a, v0) + return true + } + // match: (MADD (MOVDconst [c]) x y) + // result: (ADDconst [c] (MUL x y)) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARM64ADDconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (MADD a (MOVDconst [c]) (MOVDconst [d])) + // result: (ADDconst [c*d] a) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if v_2.Op != OpARM64MOVDconst { + break + } + d := auxIntToInt64(v_2.AuxInt) + v.reset(OpARM64ADDconst) + v.AuxInt = int64ToAuxInt(c * d) + v.AddArg(a) + return true + } + return false +} +func rewriteValueARM64_OpARM64MADDW(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MADDW a x (MOVDconst [c])) + // cond: int32(c)==-1 + // result: (MOVWUreg (SUB a x)) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(int32(c) == -1) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type) + v0.AddArg2(a, x) + v.AddArg(v0) + return true + } + // match: (MADDW a _ (MOVDconst [c])) + // cond: int32(c)==0 + // result: (MOVWUreg a) + for { + a := v_0 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(int32(c) == 0) { + break + } + v.reset(OpARM64MOVWUreg) + v.AddArg(a) + return true + } + // match: (MADDW a x (MOVDconst [c])) + // cond: int32(c)==1 + // result: (MOVWUreg (ADD a x)) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(int32(c) == 1) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type) + v0.AddArg2(a, x) + v.AddArg(v0) + return true + } + // match: (MADDW a x (MOVDconst [c])) + // cond: isPowerOfTwo64(c) + // result: (MOVWUreg (ADDshiftLL a x [log64(c)])) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(isPowerOfTwo64(c)) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type) + v0.AuxInt = int64ToAuxInt(log64(c)) + v0.AddArg2(a, x) + v.AddArg(v0) + return true + } + // match: (MADDW a x (MOVDconst [c])) + // cond: isPowerOfTwo64(c-1) && int32(c)>=3 + // result: (MOVWUreg (ADD a (ADDshiftLL x x [log64(c-1)]))) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(isPowerOfTwo64(c-1) && int32(c) >= 3) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(log64(c - 1)) + v1.AddArg2(x, x) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (MADDW a x (MOVDconst [c])) + // cond: isPowerOfTwo64(c+1) && int32(c)>=7 + // result: (MOVWUreg (SUB a (SUBshiftLL x x [log64(c+1)]))) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(isPowerOfTwo64(c+1) && int32(c) >= 7) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type) + v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(log64(c + 1)) + v1.AddArg2(x, x) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (MADDW a x (MOVDconst [c])) + // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) + // result: (MOVWUreg (SUBshiftLL a (SUBshiftLL x x [2]) [log64(c/3)])) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type) + v0.AuxInt = int64ToAuxInt(log64(c / 3)) + v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(2) + v1.AddArg2(x, x) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (MADDW a x (MOVDconst [c])) + // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) + // result: (MOVWUreg (ADDshiftLL a (ADDshiftLL x x [2]) [log64(c/5)])) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type) + v0.AuxInt = int64ToAuxInt(log64(c / 5)) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(2) + v1.AddArg2(x, x) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (MADDW a x (MOVDconst [c])) + // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) + // result: (MOVWUreg (SUBshiftLL a (SUBshiftLL x x [3]) [log64(c/7)])) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type) + v0.AuxInt = int64ToAuxInt(log64(c / 7)) + v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(3) + v1.AddArg2(x, x) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (MADDW a x (MOVDconst [c])) + // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) + // result: (MOVWUreg (ADDshiftLL a (ADDshiftLL x x [3]) [log64(c/9)])) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type) + v0.AuxInt = int64ToAuxInt(log64(c / 9)) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(3) + v1.AddArg2(x, x) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (MADDW a (MOVDconst [c]) x) + // cond: int32(c)==-1 + // result: (MOVWUreg (SUB a x)) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(int32(c) == -1) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type) + v0.AddArg2(a, x) + v.AddArg(v0) + return true + } + // match: (MADDW a (MOVDconst [c]) _) + // cond: int32(c)==0 + // result: (MOVWUreg a) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(int32(c) == 0) { + break + } + v.reset(OpARM64MOVWUreg) + v.AddArg(a) + return true + } + // match: (MADDW a (MOVDconst [c]) x) + // cond: int32(c)==1 + // result: (MOVWUreg (ADD a x)) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(int32(c) == 1) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type) + v0.AddArg2(a, x) + v.AddArg(v0) + return true + } + // match: (MADDW a (MOVDconst [c]) x) + // cond: isPowerOfTwo64(c) + // result: (MOVWUreg (ADDshiftLL a x [log64(c)])) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(isPowerOfTwo64(c)) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type) + v0.AuxInt = int64ToAuxInt(log64(c)) + v0.AddArg2(a, x) + v.AddArg(v0) + return true + } + // match: (MADDW a (MOVDconst [c]) x) + // cond: isPowerOfTwo64(c-1) && int32(c)>=3 + // result: (MOVWUreg (ADD a (ADDshiftLL x x [log64(c-1)]))) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(isPowerOfTwo64(c-1) && int32(c) >= 3) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(log64(c - 1)) + v1.AddArg2(x, x) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (MADDW a (MOVDconst [c]) x) + // cond: isPowerOfTwo64(c+1) && int32(c)>=7 + // result: (MOVWUreg (SUB a (SUBshiftLL x x [log64(c+1)]))) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(isPowerOfTwo64(c+1) && int32(c) >= 7) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type) + v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(log64(c + 1)) + v1.AddArg2(x, x) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (MADDW a (MOVDconst [c]) x) + // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) + // result: (MOVWUreg (SUBshiftLL a (SUBshiftLL x x [2]) [log64(c/3)])) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type) + v0.AuxInt = int64ToAuxInt(log64(c / 3)) + v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(2) + v1.AddArg2(x, x) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (MADDW a (MOVDconst [c]) x) + // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) + // result: (MOVWUreg (ADDshiftLL a (ADDshiftLL x x [2]) [log64(c/5)])) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type) + v0.AuxInt = int64ToAuxInt(log64(c / 5)) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(2) + v1.AddArg2(x, x) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (MADDW a (MOVDconst [c]) x) + // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) + // result: (MOVWUreg (SUBshiftLL a (SUBshiftLL x x [3]) [log64(c/7)])) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type) + v0.AuxInt = int64ToAuxInt(log64(c / 7)) + v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(3) + v1.AddArg2(x, x) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (MADDW a (MOVDconst [c]) x) + // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) + // result: (MOVWUreg (ADDshiftLL a (ADDshiftLL x x [3]) [log64(c/9)])) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type) + v0.AuxInt = int64ToAuxInt(log64(c / 9)) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(3) + v1.AddArg2(x, x) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (MADDW (MOVDconst [c]) x y) + // result: (MOVWUreg (ADDconst [c] (MULW x y))) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64ADDconst, x.Type) + v0.AuxInt = int64ToAuxInt(c) + v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) + v1.AddArg2(x, y) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (MADDW a (MOVDconst [c]) (MOVDconst [d])) + // result: (MOVWUreg (ADDconst [c*d] a)) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if v_2.Op != OpARM64MOVDconst { + break + } + d := auxIntToInt64(v_2.AuxInt) + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64ADDconst, a.Type) + v0.AuxInt = int64ToAuxInt(c * d) + v0.AddArg(a) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueARM64_OpARM64MNEG(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MNEG x (MOVDconst [-1])) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 { + continue + } + v.copyOf(x) + return true + } + break + } + // match: (MNEG _ (MOVDconst [0])) + // result: (MOVDconst [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { + continue + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + break + } + // match: (MNEG x (MOVDconst [1])) + // result: (NEG x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 { + continue + } + v.reset(OpARM64NEG) + v.AddArg(x) + return true + } + break + } + // match: (MNEG x (MOVDconst [c])) + // cond: isPowerOfTwo64(c) + // result: (NEG (SLLconst [log64(c)] x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(isPowerOfTwo64(c)) { + continue + } + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = int64ToAuxInt(log64(c)) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (MNEG x (MOVDconst [c])) + // cond: isPowerOfTwo64(c-1) && c >= 3 + // result: (NEG (ADDshiftLL x x [log64(c-1)])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(isPowerOfTwo64(c-1) && c >= 3) { + continue + } + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(log64(c - 1)) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + break + } + // match: (MNEG x (MOVDconst [c])) + // cond: isPowerOfTwo64(c+1) && c >= 7 + // result: (NEG (ADDshiftLL (NEG x) x [log64(c+1)])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(isPowerOfTwo64(c+1) && c >= 7) { + continue + } + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(log64(c + 1)) + v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v1.AddArg(x) + v0.AddArg2(v1, x) + v.AddArg(v0) + return true + } + break + } + // match: (MNEG x (MOVDconst [c])) + // cond: c%3 == 0 && isPowerOfTwo64(c/3) + // result: (SLLconst [log64(c/3)] (SUBshiftLL x x [2])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(c%3 == 0 && isPowerOfTwo64(c/3)) { + continue + } + v.reset(OpARM64SLLconst) + v.Type = x.Type + v.AuxInt = int64ToAuxInt(log64(c / 3)) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(2) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + break + } + // match: (MNEG x (MOVDconst [c])) + // cond: c%5 == 0 && isPowerOfTwo64(c/5) + // result: (NEG (SLLconst [log64(c/5)] (ADDshiftLL x x [2]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(c%5 == 0 && isPowerOfTwo64(c/5)) { + continue + } + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = int64ToAuxInt(log64(c / 5)) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(2) + v1.AddArg2(x, x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + break + } + // match: (MNEG x (MOVDconst [c])) + // cond: c%7 == 0 && isPowerOfTwo64(c/7) + // result: (SLLconst [log64(c/7)] (SUBshiftLL x x [3])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(c%7 == 0 && isPowerOfTwo64(c/7)) { + continue + } + v.reset(OpARM64SLLconst) + v.Type = x.Type + v.AuxInt = int64ToAuxInt(log64(c / 7)) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(3) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + break + } + // match: (MNEG x (MOVDconst [c])) + // cond: c%9 == 0 && isPowerOfTwo64(c/9) + // result: (NEG (SLLconst [log64(c/9)] (ADDshiftLL x x [3]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(c%9 == 0 && isPowerOfTwo64(c/9)) { + continue + } + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = int64ToAuxInt(log64(c / 9)) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(3) + v1.AddArg2(x, x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + break + } + // match: (MNEG (MOVDconst [c]) (MOVDconst [d])) + // result: (MOVDconst [-c*d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpARM64MOVDconst { + continue + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(-c * d) + return true + } + break + } + return false +} +func rewriteValueARM64_OpARM64MNEGW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MNEGW x (MOVDconst [c])) + // cond: int32(c)==-1 + // result: (MOVWUreg x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(int32(c) == -1) { + continue + } + v.reset(OpARM64MOVWUreg) + v.AddArg(x) + return true + } + break + } + // match: (MNEGW _ (MOVDconst [c])) + // cond: int32(c)==0 + // result: (MOVDconst [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(int32(c) == 0) { + continue + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + break + } + // match: (MNEGW x (MOVDconst [c])) + // cond: int32(c)==1 + // result: (MOVWUreg (NEG x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(int32(c) == 1) { + continue + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (MNEGW x (MOVDconst [c])) + // cond: isPowerOfTwo64(c) + // result: (NEG (SLLconst [log64(c)] x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(isPowerOfTwo64(c)) { + continue + } + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = int64ToAuxInt(log64(c)) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (MNEGW x (MOVDconst [c])) + // cond: isPowerOfTwo64(c-1) && int32(c) >= 3 + // result: (MOVWUreg (NEG (ADDshiftLL x x [log64(c-1)]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(isPowerOfTwo64(c-1) && int32(c) >= 3) { + continue + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(log64(c - 1)) + v1.AddArg2(x, x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + break + } + // match: (MNEGW x (MOVDconst [c])) + // cond: isPowerOfTwo64(c+1) && int32(c) >= 7 + // result: (MOVWUreg (NEG (ADDshiftLL (NEG x) x [log64(c+1)]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(isPowerOfTwo64(c+1) && int32(c) >= 7) { + continue + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(log64(c + 1)) + v2 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v2.AddArg(x) + v1.AddArg2(v2, x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + break + } + // match: (MNEGW x (MOVDconst [c])) + // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) + // result: (MOVWUreg (SLLconst [log64(c/3)] (SUBshiftLL x x [2]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) { + continue + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = int64ToAuxInt(log64(c / 3)) + v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(2) + v1.AddArg2(x, x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + break + } + // match: (MNEGW x (MOVDconst [c])) + // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) + // result: (MOVWUreg (NEG (SLLconst [log64(c/5)] (ADDshiftLL x x [2])))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) { + continue + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v1 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v1.AuxInt = int64ToAuxInt(log64(c / 5)) + v2 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v2.AuxInt = int64ToAuxInt(2) + v2.AddArg2(x, x) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + break + } + // match: (MNEGW x (MOVDconst [c])) + // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) + // result: (MOVWUreg (SLLconst [log64(c/7)] (SUBshiftLL x x [3]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) { + continue + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = int64ToAuxInt(log64(c / 7)) + v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(3) + v1.AddArg2(x, x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + break + } + // match: (MNEGW x (MOVDconst [c])) + // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) + // result: (MOVWUreg (NEG (SLLconst [log64(c/9)] (ADDshiftLL x x [3])))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) { + continue + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v1 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v1.AuxInt = int64ToAuxInt(log64(c / 9)) + v2 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v2.AuxInt = int64ToAuxInt(3) + v2.AddArg2(x, x) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + break + } + // match: (MNEGW (MOVDconst [c]) (MOVDconst [d])) + // result: (MOVDconst [int64(uint32(-c*d))]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpARM64MOVDconst { + continue + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(uint32(-c * d))) + return true + } + break + } + return false +} +func rewriteValueARM64_OpARM64MOD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOD (MOVDconst [c]) (MOVDconst [d])) + // cond: d != 0 + // result: (MOVDconst [c%d]) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpARM64MOVDconst { + break + } + d := auxIntToInt64(v_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(c % d) + return true + } + return false +} +func rewriteValueARM64_OpARM64MODW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MODW (MOVDconst [c]) (MOVDconst [d])) + // cond: d != 0 + // result: (MOVDconst [int64(uint32(int32(c)%int32(d)))]) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpARM64MOVDconst { + break + } + d := auxIntToInt64(v_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(uint32(int32(c) % int32(d)))) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVBUload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVBUload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVBUload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBUload [off] {sym} (ADD ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVBUloadidx ptr idx mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADD { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVBUloadidx) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARM64MOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVBUload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBUload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVDconst [0]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64MOVBstorezero { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (MOVBUload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVDconst [int64(read8(sym, int64(off)))]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpSB || !(symIsRO(sym)) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(read8(sym, int64(off)))) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVBUloadidx(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBUloadidx ptr (MOVDconst [c]) mem) + // cond: is32Bit(c) + // result: (MOVBUload [int32(c)] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVBUload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBUloadidx (MOVDconst [c]) ptr mem) + // cond: is32Bit(c) + // result: (MOVBUload [int32(c)] ptr mem) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + ptr := v_1 + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVBUload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBUloadidx ptr idx (MOVBstorezeroidx ptr2 idx2 _)) + // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) + // result: (MOVDconst [0]) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVBstorezeroidx { + break + } + idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] + if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVBUreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVBUreg (ANDconst [c] x)) + // result: (ANDconst [c&(1<<8-1)] x) + for { + if v_0.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpARM64ANDconst) + v.AuxInt = int64ToAuxInt(c & (1<<8 - 1)) + v.AddArg(x) + return true + } + // match: (MOVBUreg (MOVDconst [c])) + // result: (MOVDconst [int64(uint8(c))]) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(uint8(c))) + return true + } + // match: (MOVBUreg x) + // cond: v.Type.Size() <= 1 + // result: x + for { + x := v_0 + if !(v.Type.Size() <= 1) { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg (SLLconst [lc] x)) + // cond: lc >= 8 + // result: (MOVDconst [0]) + for { + if v_0.Op != OpARM64SLLconst { + break + } + lc := auxIntToInt64(v_0.AuxInt) + if !(lc >= 8) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (MOVBUreg (SLLconst [lc] x)) + // cond: lc < 8 + // result: (UBFIZ [armBFAuxInt(lc, 8-lc)] x) + for { + if v_0.Op != OpARM64SLLconst { + break + } + lc := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(lc < 8) { + break + } + v.reset(OpARM64UBFIZ) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 8-lc)) + v.AddArg(x) + return true + } + // match: (MOVBUreg (SRLconst [rc] x)) + // cond: rc < 8 + // result: (UBFX [armBFAuxInt(rc, 8)] x) + for { + if v_0.Op != OpARM64SRLconst { + break + } + rc := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(rc < 8) { + break + } + v.reset(OpARM64UBFX) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 8)) + v.AddArg(x) + return true + } + // match: (MOVBUreg (UBFX [bfc] x)) + // cond: bfc.getARM64BFwidth() <= 8 + // result: (UBFX [bfc] x) + for { + if v_0.Op != OpARM64UBFX { + break + } + bfc := auxIntToArm64BitField(v_0.AuxInt) + x := v_0.Args[0] + if !(bfc.getARM64BFwidth() <= 8) { + break + } + v.reset(OpARM64UBFX) + v.AuxInt = arm64BitFieldToAuxInt(bfc) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVBload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVBload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVBload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBload [off] {sym} (ADD ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVBloadidx ptr idx mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADD { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVBloadidx) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARM64MOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVBload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVDconst [0]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64MOVBstorezero { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVBloadidx(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBloadidx ptr (MOVDconst [c]) mem) + // cond: is32Bit(c) + // result: (MOVBload [int32(c)] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVBload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBloadidx (MOVDconst [c]) ptr mem) + // cond: is32Bit(c) + // result: (MOVBload [int32(c)] ptr mem) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + ptr := v_1 + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVBload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBloadidx ptr idx (MOVBstorezeroidx ptr2 idx2 _)) + // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) + // result: (MOVDconst [0]) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVBstorezeroidx { + break + } + idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] + if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVBreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVBreg (MOVDconst [c])) + // result: (MOVDconst [int64(int8(c))]) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(int8(c))) + return true + } + // match: (MOVBreg x) + // cond: v.Type.Size() <= 1 + // result: x + for { + x := v_0 + if !(v.Type.Size() <= 1) { + break + } + v.copyOf(x) + return true + } + // match: (MOVBreg (ANDconst x [c])) + // cond: uint64(c) & uint64(0xffffffffffffff80) == 0 + // result: (ANDconst x [c]) + for { + t := v.Type + if v_0.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(uint64(c)&uint64(0xffffffffffffff80) == 0) { + break + } + v.reset(OpARM64ANDconst) + v.Type = t + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVBreg (SLLconst [lc] x)) + // cond: lc < 8 + // result: (SBFIZ [armBFAuxInt(lc, 8-lc)] x) + for { + if v_0.Op != OpARM64SLLconst { + break + } + lc := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(lc < 8) { + break + } + v.reset(OpARM64SBFIZ) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 8-lc)) + v.AddArg(x) + return true + } + // match: (MOVBreg (SBFX [bfc] x)) + // cond: bfc.getARM64BFwidth() <= 8 + // result: (SBFX [bfc] x) + for { + if v_0.Op != OpARM64SBFX { + break + } + bfc := auxIntToArm64BitField(v_0.AuxInt) + x := v_0.Args[0] + if !(bfc.getARM64BFwidth() <= 8) { + break + } + v.reset(OpARM64SBFX) + v.AuxInt = arm64BitFieldToAuxInt(bfc) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVBstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVBstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVBstore [off] {sym} (ADD ptr idx) val mem) + // cond: off == 0 && sym == nil + // result: (MOVBstoreidx ptr idx val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADD { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVBstoreidx) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARM64MOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVBstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) + // result: (MOVBstorezero [off] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + mem := v_2 + v.reset(OpARM64MOVBstorezero) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64MOVBreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpARM64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64MOVBUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpARM64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64MOVHreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpARM64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64MOVHUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpARM64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64MOVWreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpARM64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64MOVWUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpARM64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBstoreidx ptr (MOVDconst [c]) val mem) + // cond: is32Bit(c) + // result: (MOVBstore [int32(c)] ptr val mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + val := v_2 + mem := v_3 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVBstore) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVBstoreidx (MOVDconst [c]) idx val mem) + // cond: is32Bit(c) + // result: (MOVBstore [int32(c)] idx val mem) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + idx := v_1 + val := v_2 + mem := v_3 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVBstore) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg3(idx, val, mem) + return true + } + // match: (MOVBstoreidx ptr idx (MOVDconst [0]) mem) + // result: (MOVBstorezeroidx ptr idx mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 { + break + } + mem := v_3 + v.reset(OpARM64MOVBstorezeroidx) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVBstoreidx ptr idx (MOVBreg x) mem) + // result: (MOVBstoreidx ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVBreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpARM64MOVBstoreidx) + v.AddArg4(ptr, idx, x, mem) + return true + } + // match: (MOVBstoreidx ptr idx (MOVBUreg x) mem) + // result: (MOVBstoreidx ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVBUreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpARM64MOVBstoreidx) + v.AddArg4(ptr, idx, x, mem) + return true + } + // match: (MOVBstoreidx ptr idx (MOVHreg x) mem) + // result: (MOVBstoreidx ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVHreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpARM64MOVBstoreidx) + v.AddArg4(ptr, idx, x, mem) + return true + } + // match: (MOVBstoreidx ptr idx (MOVHUreg x) mem) + // result: (MOVBstoreidx ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVHUreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpARM64MOVBstoreidx) + v.AddArg4(ptr, idx, x, mem) + return true + } + // match: (MOVBstoreidx ptr idx (MOVWreg x) mem) + // result: (MOVBstoreidx ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVWreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpARM64MOVBstoreidx) + v.AddArg4(ptr, idx, x, mem) + return true + } + // match: (MOVBstoreidx ptr idx (MOVWUreg x) mem) + // result: (MOVBstoreidx ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVWUreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpARM64MOVBstoreidx) + v.AddArg4(ptr, idx, x, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVBstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVBstorezero [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVBstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARM64MOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVBstorezero) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBstorezero [off] {sym} (ADD ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVBstorezeroidx ptr idx mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADD { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVBstorezeroidx) + v.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVBstorezeroidx(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBstorezeroidx ptr (MOVDconst [c]) mem) + // cond: is32Bit(c) + // result: (MOVBstorezero [int32(c)] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVBstorezero) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBstorezeroidx (MOVDconst [c]) idx mem) + // cond: is32Bit(c) + // result: (MOVBstorezero [int32(c)] idx mem) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + idx := v_1 + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVBstorezero) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(idx, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVDload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr val _)) + // result: (FMOVDfpgp val) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64FMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { + break + } + val := v_1.Args[1] + if ptr != v_1.Args[0] { + break + } + v.reset(OpARM64FMOVDfpgp) + v.AddArg(val) + return true + } + // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVDload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVDload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVDload [off] {sym} (ADD ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVDloadidx ptr idx mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADD { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVDloadidx) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVDloadidx8 ptr idx mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 3 { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVDloadidx8) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARM64MOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVDload [off] {sym} ptr (MOVDstorezero [off2] {sym2} ptr2 _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVDconst [0]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64MOVDstorezero { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (MOVDload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVDconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpSB || !(symIsRO(sym)) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVDloadidx(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVDloadidx ptr (MOVDconst [c]) mem) + // cond: is32Bit(c) + // result: (MOVDload [int32(c)] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVDload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVDloadidx (MOVDconst [c]) ptr mem) + // cond: is32Bit(c) + // result: (MOVDload [int32(c)] ptr mem) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + ptr := v_1 + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVDload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVDloadidx ptr (SLLconst [3] idx) mem) + // result: (MOVDloadidx8 ptr idx mem) + for { + ptr := v_0 + if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 3 { + break + } + idx := v_1.Args[0] + mem := v_2 + v.reset(OpARM64MOVDloadidx8) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVDloadidx (SLLconst [3] idx) ptr mem) + // result: (MOVDloadidx8 ptr idx mem) + for { + if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 3 { + break + } + idx := v_0.Args[0] + ptr := v_1 + mem := v_2 + v.reset(OpARM64MOVDloadidx8) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVDloadidx ptr idx (MOVDstorezeroidx ptr2 idx2 _)) + // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) + // result: (MOVDconst [0]) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVDstorezeroidx { + break + } + idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] + if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVDloadidx8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVDloadidx8 ptr (MOVDconst [c]) mem) + // cond: is32Bit(c<<3) + // result: (MOVDload [int32(c)<<3] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c << 3)) { + break + } + v.reset(OpARM64MOVDload) + v.AuxInt = int32ToAuxInt(int32(c) << 3) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVDloadidx8 ptr idx (MOVDstorezeroidx8 ptr2 idx2 _)) + // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) + // result: (MOVDconst [0]) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVDstorezeroidx8 { + break + } + idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] + if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVDnop(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVDnop (MOVDconst [c])) + // result: (MOVDconst [c]) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(c) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVDreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVDreg x) + // cond: x.Uses == 1 + // result: (MOVDnop x) + for { + x := v_0 + if !(x.Uses == 1) { + break + } + v.reset(OpARM64MOVDnop) + v.AddArg(x) + return true + } + // match: (MOVDreg (MOVDconst [c])) + // result: (MOVDconst [c]) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(c) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVDstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVDstore [off] {sym} ptr (FMOVDfpgp val) mem) + // result: (FMOVDstore [off] {sym} ptr val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64FMOVDfpgp { + break + } + val := v_1.Args[0] + mem := v_2 + v.reset(OpARM64FMOVDstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVDstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVDstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVDstore [off] {sym} (ADD ptr idx) val mem) + // cond: off == 0 && sym == nil + // result: (MOVDstoreidx ptr idx val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADD { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVDstoreidx) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem) + // cond: off == 0 && sym == nil + // result: (MOVDstoreidx8 ptr idx val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 3 { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVDstoreidx8) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARM64MOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVDstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) + // result: (MOVDstorezero [off] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + mem := v_2 + v.reset(OpARM64MOVDstorezero) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVDstoreidx(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVDstoreidx ptr (MOVDconst [c]) val mem) + // cond: is32Bit(c) + // result: (MOVDstore [int32(c)] ptr val mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + val := v_2 + mem := v_3 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVDstore) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVDstoreidx (MOVDconst [c]) idx val mem) + // cond: is32Bit(c) + // result: (MOVDstore [int32(c)] idx val mem) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + idx := v_1 + val := v_2 + mem := v_3 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVDstore) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg3(idx, val, mem) + return true + } + // match: (MOVDstoreidx ptr (SLLconst [3] idx) val mem) + // result: (MOVDstoreidx8 ptr idx val mem) + for { + ptr := v_0 + if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 3 { + break + } + idx := v_1.Args[0] + val := v_2 + mem := v_3 + v.reset(OpARM64MOVDstoreidx8) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVDstoreidx (SLLconst [3] idx) ptr val mem) + // result: (MOVDstoreidx8 ptr idx val mem) + for { + if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 3 { + break + } + idx := v_0.Args[0] + ptr := v_1 + val := v_2 + mem := v_3 + v.reset(OpARM64MOVDstoreidx8) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVDstoreidx ptr idx (MOVDconst [0]) mem) + // result: (MOVDstorezeroidx ptr idx mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 { + break + } + mem := v_3 + v.reset(OpARM64MOVDstorezeroidx) + v.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVDstoreidx8(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVDstoreidx8 ptr (MOVDconst [c]) val mem) + // cond: is32Bit(c<<3) + // result: (MOVDstore [int32(c)<<3] ptr val mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + val := v_2 + mem := v_3 + if !(is32Bit(c << 3)) { + break + } + v.reset(OpARM64MOVDstore) + v.AuxInt = int32ToAuxInt(int32(c) << 3) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVDstoreidx8 ptr idx (MOVDconst [0]) mem) + // result: (MOVDstorezeroidx8 ptr idx mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 { + break + } + mem := v_3 + v.reset(OpARM64MOVDstorezeroidx8) + v.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVDstorezero {s} [i] ptr x:(MOVDstorezero {s} [i+8] ptr mem)) + // cond: x.Uses == 1 && setPos(v, x.Pos) && clobber(x) + // result: (MOVQstorezero {s} [i] ptr mem) + for { + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + ptr := v_0 + x := v_1 + if x.Op != OpARM64MOVDstorezero || auxIntToInt32(x.AuxInt) != i+8 || auxToSym(x.Aux) != s { + break + } + mem := x.Args[1] + if ptr != x.Args[0] || !(x.Uses == 1 && setPos(v, x.Pos) && clobber(x)) { + break + } + v.reset(OpARM64MOVQstorezero) + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVDstorezero {s} [i] ptr x:(MOVDstorezero {s} [i-8] ptr mem)) + // cond: x.Uses == 1 && setPos(v, x.Pos) && clobber(x) + // result: (MOVQstorezero {s} [i-8] ptr mem) + for { + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + ptr := v_0 + x := v_1 + if x.Op != OpARM64MOVDstorezero || auxIntToInt32(x.AuxInt) != i-8 || auxToSym(x.Aux) != s { + break + } + mem := x.Args[1] + if ptr != x.Args[0] || !(x.Uses == 1 && setPos(v, x.Pos) && clobber(x)) { + break + } + v.reset(OpARM64MOVQstorezero) + v.AuxInt = int32ToAuxInt(i - 8) + v.Aux = symToAux(s) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVDstorezero [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVDstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARM64MOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVDstorezero) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVDstorezero [off] {sym} (ADD ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVDstorezeroidx ptr idx mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADD { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVDstorezeroidx) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVDstorezero [off] {sym} (ADDshiftLL [3] ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVDstorezeroidx8 ptr idx mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 3 { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVDstorezeroidx8) + v.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVDstorezeroidx(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVDstorezeroidx ptr (MOVDconst [c]) mem) + // cond: is32Bit(c) + // result: (MOVDstorezero [int32(c)] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVDstorezero) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVDstorezeroidx (MOVDconst [c]) idx mem) + // cond: is32Bit(c) + // result: (MOVDstorezero [int32(c)] idx mem) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + idx := v_1 + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVDstorezero) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(idx, mem) + return true + } + // match: (MOVDstorezeroidx ptr (SLLconst [3] idx) mem) + // result: (MOVDstorezeroidx8 ptr idx mem) + for { + ptr := v_0 + if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 3 { + break + } + idx := v_1.Args[0] + mem := v_2 + v.reset(OpARM64MOVDstorezeroidx8) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVDstorezeroidx (SLLconst [3] idx) ptr mem) + // result: (MOVDstorezeroidx8 ptr idx mem) + for { + if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 3 { + break + } + idx := v_0.Args[0] + ptr := v_1 + mem := v_2 + v.reset(OpARM64MOVDstorezeroidx8) + v.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVDstorezeroidx8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVDstorezeroidx8 ptr (MOVDconst [c]) mem) + // cond: is32Bit(c<<3) + // result: (MOVDstorezero [int32(c<<3)] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c << 3)) { + break + } + v.reset(OpARM64MOVDstorezero) + v.AuxInt = int32ToAuxInt(int32(c << 3)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVHUload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVHUload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVHUload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHUload [off] {sym} (ADD ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVHUloadidx ptr idx mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADD { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVHUloadidx) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVHUload [off] {sym} (ADDshiftLL [1] ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVHUloadidx2 ptr idx mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVHUloadidx2) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARM64MOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVHUload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHUload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVDconst [0]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64MOVHstorezero { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (MOVHUload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVDconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpSB || !(symIsRO(sym)) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVHUloadidx(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHUloadidx ptr (MOVDconst [c]) mem) + // cond: is32Bit(c) + // result: (MOVHUload [int32(c)] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVHUload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHUloadidx (MOVDconst [c]) ptr mem) + // cond: is32Bit(c) + // result: (MOVHUload [int32(c)] ptr mem) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + ptr := v_1 + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVHUload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHUloadidx ptr (SLLconst [1] idx) mem) + // result: (MOVHUloadidx2 ptr idx mem) + for { + ptr := v_0 + if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 1 { + break + } + idx := v_1.Args[0] + mem := v_2 + v.reset(OpARM64MOVHUloadidx2) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVHUloadidx ptr (ADD idx idx) mem) + // result: (MOVHUloadidx2 ptr idx mem) + for { + ptr := v_0 + if v_1.Op != OpARM64ADD { + break + } + idx := v_1.Args[1] + if idx != v_1.Args[0] { + break + } + mem := v_2 + v.reset(OpARM64MOVHUloadidx2) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVHUloadidx (ADD idx idx) ptr mem) + // result: (MOVHUloadidx2 ptr idx mem) + for { + if v_0.Op != OpARM64ADD { + break + } + idx := v_0.Args[1] + if idx != v_0.Args[0] { + break + } + ptr := v_1 + mem := v_2 + v.reset(OpARM64MOVHUloadidx2) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVHUloadidx ptr idx (MOVHstorezeroidx ptr2 idx2 _)) + // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) + // result: (MOVDconst [0]) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVHstorezeroidx { + break + } + idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] + if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVHUloadidx2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHUloadidx2 ptr (MOVDconst [c]) mem) + // cond: is32Bit(c<<1) + // result: (MOVHUload [int32(c)<<1] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c << 1)) { + break + } + v.reset(OpARM64MOVHUload) + v.AuxInt = int32ToAuxInt(int32(c) << 1) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHUloadidx2 ptr idx (MOVHstorezeroidx2 ptr2 idx2 _)) + // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) + // result: (MOVDconst [0]) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVHstorezeroidx2 { + break + } + idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] + if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVHUreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVHUreg (ANDconst [c] x)) + // result: (ANDconst [c&(1<<16-1)] x) + for { + if v_0.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpARM64ANDconst) + v.AuxInt = int64ToAuxInt(c & (1<<16 - 1)) + v.AddArg(x) + return true + } + // match: (MOVHUreg (MOVDconst [c])) + // result: (MOVDconst [int64(uint16(c))]) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(uint16(c))) + return true + } + // match: (MOVHUreg x) + // cond: v.Type.Size() <= 2 + // result: x + for { + x := v_0 + if !(v.Type.Size() <= 2) { + break + } + v.copyOf(x) + return true + } + // match: (MOVHUreg (SLLconst [lc] x)) + // cond: lc >= 16 + // result: (MOVDconst [0]) + for { + if v_0.Op != OpARM64SLLconst { + break + } + lc := auxIntToInt64(v_0.AuxInt) + if !(lc >= 16) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (MOVHUreg (SLLconst [lc] x)) + // cond: lc < 16 + // result: (UBFIZ [armBFAuxInt(lc, 16-lc)] x) + for { + if v_0.Op != OpARM64SLLconst { + break + } + lc := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(lc < 16) { + break + } + v.reset(OpARM64UBFIZ) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 16-lc)) + v.AddArg(x) + return true + } + // match: (MOVHUreg (SRLconst [rc] x)) + // cond: rc < 16 + // result: (UBFX [armBFAuxInt(rc, 16)] x) + for { + if v_0.Op != OpARM64SRLconst { + break + } + rc := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(rc < 16) { + break + } + v.reset(OpARM64UBFX) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 16)) + v.AddArg(x) + return true + } + // match: (MOVHUreg (UBFX [bfc] x)) + // cond: bfc.getARM64BFwidth() <= 16 + // result: (UBFX [bfc] x) + for { + if v_0.Op != OpARM64UBFX { + break + } + bfc := auxIntToArm64BitField(v_0.AuxInt) + x := v_0.Args[0] + if !(bfc.getARM64BFwidth() <= 16) { + break + } + v.reset(OpARM64UBFX) + v.AuxInt = arm64BitFieldToAuxInt(bfc) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVHload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVHload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVHload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHload [off] {sym} (ADD ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVHloadidx ptr idx mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADD { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVHloadidx) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVHload [off] {sym} (ADDshiftLL [1] ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVHloadidx2 ptr idx mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVHloadidx2) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARM64MOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVHload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVDconst [0]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64MOVHstorezero { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVHloadidx(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHloadidx ptr (MOVDconst [c]) mem) + // cond: is32Bit(c) + // result: (MOVHload [int32(c)] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVHload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHloadidx (MOVDconst [c]) ptr mem) + // cond: is32Bit(c) + // result: (MOVHload [int32(c)] ptr mem) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + ptr := v_1 + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVHload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHloadidx ptr (SLLconst [1] idx) mem) + // result: (MOVHloadidx2 ptr idx mem) + for { + ptr := v_0 + if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 1 { + break + } + idx := v_1.Args[0] + mem := v_2 + v.reset(OpARM64MOVHloadidx2) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVHloadidx ptr (ADD idx idx) mem) + // result: (MOVHloadidx2 ptr idx mem) + for { + ptr := v_0 + if v_1.Op != OpARM64ADD { + break + } + idx := v_1.Args[1] + if idx != v_1.Args[0] { + break + } + mem := v_2 + v.reset(OpARM64MOVHloadidx2) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVHloadidx (ADD idx idx) ptr mem) + // result: (MOVHloadidx2 ptr idx mem) + for { + if v_0.Op != OpARM64ADD { + break + } + idx := v_0.Args[1] + if idx != v_0.Args[0] { + break + } + ptr := v_1 + mem := v_2 + v.reset(OpARM64MOVHloadidx2) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVHloadidx ptr idx (MOVHstorezeroidx ptr2 idx2 _)) + // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) + // result: (MOVDconst [0]) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVHstorezeroidx { + break + } + idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] + if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVHloadidx2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHloadidx2 ptr (MOVDconst [c]) mem) + // cond: is32Bit(c<<1) + // result: (MOVHload [int32(c)<<1] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c << 1)) { + break + } + v.reset(OpARM64MOVHload) + v.AuxInt = int32ToAuxInt(int32(c) << 1) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHloadidx2 ptr idx (MOVHstorezeroidx2 ptr2 idx2 _)) + // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) + // result: (MOVDconst [0]) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVHstorezeroidx2 { + break + } + idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] + if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVHreg (MOVDconst [c])) + // result: (MOVDconst [int64(int16(c))]) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(int16(c))) + return true + } + // match: (MOVHreg x) + // cond: v.Type.Size() <= 2 + // result: x + for { + x := v_0 + if !(v.Type.Size() <= 2) { + break + } + v.copyOf(x) + return true + } + // match: (MOVHreg (ANDconst x [c])) + // cond: uint64(c) & uint64(0xffffffffffff8000) == 0 + // result: (ANDconst x [c]) + for { + t := v.Type + if v_0.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(uint64(c)&uint64(0xffffffffffff8000) == 0) { + break + } + v.reset(OpARM64ANDconst) + v.Type = t + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVHreg (SLLconst [lc] x)) + // cond: lc < 16 + // result: (SBFIZ [armBFAuxInt(lc, 16-lc)] x) + for { + if v_0.Op != OpARM64SLLconst { + break + } + lc := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(lc < 16) { + break + } + v.reset(OpARM64SBFIZ) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 16-lc)) + v.AddArg(x) + return true + } + // match: (MOVHreg (SBFX [bfc] x)) + // cond: bfc.getARM64BFwidth() <= 16 + // result: (SBFX [bfc] x) + for { + if v_0.Op != OpARM64SBFX { + break + } + bfc := auxIntToArm64BitField(v_0.AuxInt) + x := v_0.Args[0] + if !(bfc.getARM64BFwidth() <= 16) { + break + } + v.reset(OpARM64SBFX) + v.AuxInt = arm64BitFieldToAuxInt(bfc) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVHstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVHstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVHstore [off] {sym} (ADD ptr idx) val mem) + // cond: off == 0 && sym == nil + // result: (MOVHstoreidx ptr idx val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADD { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVHstoreidx) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVHstore [off] {sym} (ADDshiftLL [1] ptr idx) val mem) + // cond: off == 0 && sym == nil + // result: (MOVHstoreidx2 ptr idx val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVHstoreidx2) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARM64MOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVHstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) + // result: (MOVHstorezero [off] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + mem := v_2 + v.reset(OpARM64MOVHstorezero) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64MOVHreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpARM64MOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64MOVHUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpARM64MOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64MOVWreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpARM64MOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64MOVWUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpARM64MOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHstoreidx ptr (MOVDconst [c]) val mem) + // cond: is32Bit(c) + // result: (MOVHstore [int32(c)] ptr val mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + val := v_2 + mem := v_3 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVHstore) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVHstoreidx (MOVDconst [c]) idx val mem) + // cond: is32Bit(c) + // result: (MOVHstore [int32(c)] idx val mem) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + idx := v_1 + val := v_2 + mem := v_3 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVHstore) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg3(idx, val, mem) + return true + } + // match: (MOVHstoreidx ptr (SLLconst [1] idx) val mem) + // result: (MOVHstoreidx2 ptr idx val mem) + for { + ptr := v_0 + if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 1 { + break + } + idx := v_1.Args[0] + val := v_2 + mem := v_3 + v.reset(OpARM64MOVHstoreidx2) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVHstoreidx ptr (ADD idx idx) val mem) + // result: (MOVHstoreidx2 ptr idx val mem) + for { + ptr := v_0 + if v_1.Op != OpARM64ADD { + break + } + idx := v_1.Args[1] + if idx != v_1.Args[0] { + break + } + val := v_2 + mem := v_3 + v.reset(OpARM64MOVHstoreidx2) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVHstoreidx (SLLconst [1] idx) ptr val mem) + // result: (MOVHstoreidx2 ptr idx val mem) + for { + if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 1 { + break + } + idx := v_0.Args[0] + ptr := v_1 + val := v_2 + mem := v_3 + v.reset(OpARM64MOVHstoreidx2) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVHstoreidx (ADD idx idx) ptr val mem) + // result: (MOVHstoreidx2 ptr idx val mem) + for { + if v_0.Op != OpARM64ADD { + break + } + idx := v_0.Args[1] + if idx != v_0.Args[0] { + break + } + ptr := v_1 + val := v_2 + mem := v_3 + v.reset(OpARM64MOVHstoreidx2) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVHstoreidx ptr idx (MOVDconst [0]) mem) + // result: (MOVHstorezeroidx ptr idx mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 { + break + } + mem := v_3 + v.reset(OpARM64MOVHstorezeroidx) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVHstoreidx ptr idx (MOVHreg x) mem) + // result: (MOVHstoreidx ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVHreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpARM64MOVHstoreidx) + v.AddArg4(ptr, idx, x, mem) + return true + } + // match: (MOVHstoreidx ptr idx (MOVHUreg x) mem) + // result: (MOVHstoreidx ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVHUreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpARM64MOVHstoreidx) + v.AddArg4(ptr, idx, x, mem) + return true + } + // match: (MOVHstoreidx ptr idx (MOVWreg x) mem) + // result: (MOVHstoreidx ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVWreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpARM64MOVHstoreidx) + v.AddArg4(ptr, idx, x, mem) + return true + } + // match: (MOVHstoreidx ptr idx (MOVWUreg x) mem) + // result: (MOVHstoreidx ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVWUreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpARM64MOVHstoreidx) + v.AddArg4(ptr, idx, x, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVHstoreidx2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHstoreidx2 ptr (MOVDconst [c]) val mem) + // cond: is32Bit(c<<1) + // result: (MOVHstore [int32(c)<<1] ptr val mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + val := v_2 + mem := v_3 + if !(is32Bit(c << 1)) { + break + } + v.reset(OpARM64MOVHstore) + v.AuxInt = int32ToAuxInt(int32(c) << 1) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVHstoreidx2 ptr idx (MOVDconst [0]) mem) + // result: (MOVHstorezeroidx2 ptr idx mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 { + break + } + mem := v_3 + v.reset(OpARM64MOVHstorezeroidx2) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVHstoreidx2 ptr idx (MOVHreg x) mem) + // result: (MOVHstoreidx2 ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVHreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpARM64MOVHstoreidx2) + v.AddArg4(ptr, idx, x, mem) + return true + } + // match: (MOVHstoreidx2 ptr idx (MOVHUreg x) mem) + // result: (MOVHstoreidx2 ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVHUreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpARM64MOVHstoreidx2) + v.AddArg4(ptr, idx, x, mem) + return true + } + // match: (MOVHstoreidx2 ptr idx (MOVWreg x) mem) + // result: (MOVHstoreidx2 ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVWreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpARM64MOVHstoreidx2) + v.AddArg4(ptr, idx, x, mem) + return true + } + // match: (MOVHstoreidx2 ptr idx (MOVWUreg x) mem) + // result: (MOVHstoreidx2 ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVWUreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpARM64MOVHstoreidx2) + v.AddArg4(ptr, idx, x, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVHstorezero [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVHstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARM64MOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVHstorezero) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHstorezero [off] {sym} (ADD ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVHstorezeroidx ptr idx mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADD { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVHstorezeroidx) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVHstorezero [off] {sym} (ADDshiftLL [1] ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVHstorezeroidx2 ptr idx mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVHstorezeroidx2) + v.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVHstorezeroidx(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHstorezeroidx ptr (MOVDconst [c]) mem) + // cond: is32Bit(c) + // result: (MOVHstorezero [int32(c)] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVHstorezero) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHstorezeroidx (MOVDconst [c]) idx mem) + // cond: is32Bit(c) + // result: (MOVHstorezero [int32(c)] idx mem) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + idx := v_1 + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVHstorezero) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(idx, mem) + return true + } + // match: (MOVHstorezeroidx ptr (SLLconst [1] idx) mem) + // result: (MOVHstorezeroidx2 ptr idx mem) + for { + ptr := v_0 + if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 1 { + break + } + idx := v_1.Args[0] + mem := v_2 + v.reset(OpARM64MOVHstorezeroidx2) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVHstorezeroidx ptr (ADD idx idx) mem) + // result: (MOVHstorezeroidx2 ptr idx mem) + for { + ptr := v_0 + if v_1.Op != OpARM64ADD { + break + } + idx := v_1.Args[1] + if idx != v_1.Args[0] { + break + } + mem := v_2 + v.reset(OpARM64MOVHstorezeroidx2) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVHstorezeroidx (SLLconst [1] idx) ptr mem) + // result: (MOVHstorezeroidx2 ptr idx mem) + for { + if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 1 { + break + } + idx := v_0.Args[0] + ptr := v_1 + mem := v_2 + v.reset(OpARM64MOVHstorezeroidx2) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVHstorezeroidx (ADD idx idx) ptr mem) + // result: (MOVHstorezeroidx2 ptr idx mem) + for { + if v_0.Op != OpARM64ADD { + break + } + idx := v_0.Args[1] + if idx != v_0.Args[0] { + break + } + ptr := v_1 + mem := v_2 + v.reset(OpARM64MOVHstorezeroidx2) + v.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVHstorezeroidx2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHstorezeroidx2 ptr (MOVDconst [c]) mem) + // cond: is32Bit(c<<1) + // result: (MOVHstorezero [int32(c<<1)] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c << 1)) { + break + } + v.reset(OpARM64MOVHstorezero) + v.AuxInt = int32ToAuxInt(int32(c << 1)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVQstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVQstorezero [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVQstorezero [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVQstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVQstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVQstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARM64MOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVQstorezero) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVWUload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVWUload [off] {sym} ptr (FMOVSstore [off] {sym} ptr val _)) + // result: (FMOVSfpgp val) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64FMOVSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { + break + } + val := v_1.Args[1] + if ptr != v_1.Args[0] { + break + } + v.reset(OpARM64FMOVSfpgp) + v.AddArg(val) + return true + } + // match: (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVWUload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVWUload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWUload [off] {sym} (ADD ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVWUloadidx ptr idx mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADD { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVWUloadidx) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVWUload [off] {sym} (ADDshiftLL [2] ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVWUloadidx4 ptr idx mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVWUloadidx4) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARM64MOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVWUload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWUload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVDconst [0]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64MOVWstorezero { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (MOVWUload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVDconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpSB || !(symIsRO(sym)) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVWUloadidx(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWUloadidx ptr (MOVDconst [c]) mem) + // cond: is32Bit(c) + // result: (MOVWUload [int32(c)] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVWUload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWUloadidx (MOVDconst [c]) ptr mem) + // cond: is32Bit(c) + // result: (MOVWUload [int32(c)] ptr mem) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + ptr := v_1 + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVWUload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWUloadidx ptr (SLLconst [2] idx) mem) + // result: (MOVWUloadidx4 ptr idx mem) + for { + ptr := v_0 + if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 { + break + } + idx := v_1.Args[0] + mem := v_2 + v.reset(OpARM64MOVWUloadidx4) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVWUloadidx (SLLconst [2] idx) ptr mem) + // result: (MOVWUloadidx4 ptr idx mem) + for { + if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 { + break + } + idx := v_0.Args[0] + ptr := v_1 + mem := v_2 + v.reset(OpARM64MOVWUloadidx4) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVWUloadidx ptr idx (MOVWstorezeroidx ptr2 idx2 _)) + // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) + // result: (MOVDconst [0]) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVWstorezeroidx { + break + } + idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] + if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVWUloadidx4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWUloadidx4 ptr (MOVDconst [c]) mem) + // cond: is32Bit(c<<2) + // result: (MOVWUload [int32(c)<<2] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c << 2)) { + break + } + v.reset(OpARM64MOVWUload) + v.AuxInt = int32ToAuxInt(int32(c) << 2) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWUloadidx4 ptr idx (MOVWstorezeroidx4 ptr2 idx2 _)) + // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) + // result: (MOVDconst [0]) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVWstorezeroidx4 { + break + } + idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] + if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVWUreg (ANDconst [c] x)) + // result: (ANDconst [c&(1<<32-1)] x) + for { + if v_0.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpARM64ANDconst) + v.AuxInt = int64ToAuxInt(c & (1<<32 - 1)) + v.AddArg(x) + return true + } + // match: (MOVWUreg (MOVDconst [c])) + // result: (MOVDconst [int64(uint32(c))]) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(uint32(c))) + return true + } + // match: (MOVWUreg x) + // cond: v.Type.Size() <= 4 + // result: x + for { + x := v_0 + if !(v.Type.Size() <= 4) { + break + } + v.copyOf(x) + return true + } + // match: (MOVWUreg (SLLconst [lc] x)) + // cond: lc >= 32 + // result: (MOVDconst [0]) + for { + if v_0.Op != OpARM64SLLconst { + break + } + lc := auxIntToInt64(v_0.AuxInt) + if !(lc >= 32) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (MOVWUreg (SLLconst [lc] x)) + // cond: lc < 32 + // result: (UBFIZ [armBFAuxInt(lc, 32-lc)] x) + for { + if v_0.Op != OpARM64SLLconst { + break + } + lc := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(lc < 32) { + break + } + v.reset(OpARM64UBFIZ) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 32-lc)) + v.AddArg(x) + return true + } + // match: (MOVWUreg (SRLconst [rc] x)) + // cond: rc < 32 + // result: (UBFX [armBFAuxInt(rc, 32)] x) + for { + if v_0.Op != OpARM64SRLconst { + break + } + rc := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(rc < 32) { + break + } + v.reset(OpARM64UBFX) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 32)) + v.AddArg(x) + return true + } + // match: (MOVWUreg (UBFX [bfc] x)) + // cond: bfc.getARM64BFwidth() <= 32 + // result: (UBFX [bfc] x) + for { + if v_0.Op != OpARM64UBFX { + break + } + bfc := auxIntToArm64BitField(v_0.AuxInt) + x := v_0.Args[0] + if !(bfc.getARM64BFwidth() <= 32) { + break + } + v.reset(OpARM64UBFX) + v.AuxInt = arm64BitFieldToAuxInt(bfc) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVWload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVWload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVWload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWload [off] {sym} (ADD ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVWloadidx ptr idx mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADD { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVWloadidx) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVWload [off] {sym} (ADDshiftLL [2] ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVWloadidx4 ptr idx mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVWloadidx4) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARM64MOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVWload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVDconst [0]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64MOVWstorezero { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVWloadidx(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWloadidx ptr (MOVDconst [c]) mem) + // cond: is32Bit(c) + // result: (MOVWload [int32(c)] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVWload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWloadidx (MOVDconst [c]) ptr mem) + // cond: is32Bit(c) + // result: (MOVWload [int32(c)] ptr mem) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + ptr := v_1 + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVWload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWloadidx ptr (SLLconst [2] idx) mem) + // result: (MOVWloadidx4 ptr idx mem) + for { + ptr := v_0 + if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 { + break + } + idx := v_1.Args[0] + mem := v_2 + v.reset(OpARM64MOVWloadidx4) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVWloadidx (SLLconst [2] idx) ptr mem) + // result: (MOVWloadidx4 ptr idx mem) + for { + if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 { + break + } + idx := v_0.Args[0] + ptr := v_1 + mem := v_2 + v.reset(OpARM64MOVWloadidx4) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVWloadidx ptr idx (MOVWstorezeroidx ptr2 idx2 _)) + // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) + // result: (MOVDconst [0]) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVWstorezeroidx { + break + } + idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] + if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVWloadidx4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWloadidx4 ptr (MOVDconst [c]) mem) + // cond: is32Bit(c<<2) + // result: (MOVWload [int32(c)<<2] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c << 2)) { + break + } + v.reset(OpARM64MOVWload) + v.AuxInt = int32ToAuxInt(int32(c) << 2) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWloadidx4 ptr idx (MOVWstorezeroidx4 ptr2 idx2 _)) + // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) + // result: (MOVDconst [0]) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVWstorezeroidx4 { + break + } + idx2 := v_2.Args[1] + ptr2 := v_2.Args[0] + if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVWreg (MOVDconst [c])) + // result: (MOVDconst [int64(int32(c))]) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(int32(c))) + return true + } + // match: (MOVWreg x) + // cond: v.Type.Size() <= 4 + // result: x + for { + x := v_0 + if !(v.Type.Size() <= 4) { + break + } + v.copyOf(x) + return true + } + // match: (MOVWreg (ANDconst x [c])) + // cond: uint64(c) & uint64(0xffffffff80000000) == 0 + // result: (ANDconst x [c]) + for { + t := v.Type + if v_0.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(uint64(c)&uint64(0xffffffff80000000) == 0) { + break + } + v.reset(OpARM64ANDconst) + v.Type = t + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVWreg (SLLconst [lc] x)) + // cond: lc < 32 + // result: (SBFIZ [armBFAuxInt(lc, 32-lc)] x) + for { + if v_0.Op != OpARM64SLLconst { + break + } + lc := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(lc < 32) { + break + } + v.reset(OpARM64SBFIZ) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 32-lc)) + v.AddArg(x) + return true + } + // match: (MOVWreg (SBFX [bfc] x)) + // cond: bfc.getARM64BFwidth() <= 32 + // result: (SBFX [bfc] x) + for { + if v_0.Op != OpARM64SBFX { + break + } + bfc := auxIntToArm64BitField(v_0.AuxInt) + x := v_0.Args[0] + if !(bfc.getARM64BFwidth() <= 32) { + break + } + v.reset(OpARM64SBFX) + v.AuxInt = arm64BitFieldToAuxInt(bfc) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVWstore [off] {sym} ptr (FMOVSfpgp val) mem) + // result: (FMOVSstore [off] {sym} ptr val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64FMOVSfpgp { + break + } + val := v_1.Args[0] + mem := v_2 + v.reset(OpARM64FMOVSstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVWstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVWstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVWstore [off] {sym} (ADD ptr idx) val mem) + // cond: off == 0 && sym == nil + // result: (MOVWstoreidx ptr idx val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADD { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVWstoreidx) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVWstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem) + // cond: off == 0 && sym == nil + // result: (MOVWstoreidx4 ptr idx val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVWstoreidx4) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARM64MOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVWstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) + // result: (MOVWstorezero [off] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + mem := v_2 + v.reset(OpARM64MOVWstorezero) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem) + // result: (MOVWstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64MOVWreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpARM64MOVWstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem) + // result: (MOVWstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64MOVWUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpARM64MOVWstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVWstoreidx(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWstoreidx ptr (MOVDconst [c]) val mem) + // cond: is32Bit(c) + // result: (MOVWstore [int32(c)] ptr val mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + val := v_2 + mem := v_3 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVWstore) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVWstoreidx (MOVDconst [c]) idx val mem) + // cond: is32Bit(c) + // result: (MOVWstore [int32(c)] idx val mem) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + idx := v_1 + val := v_2 + mem := v_3 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVWstore) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg3(idx, val, mem) + return true + } + // match: (MOVWstoreidx ptr (SLLconst [2] idx) val mem) + // result: (MOVWstoreidx4 ptr idx val mem) + for { + ptr := v_0 + if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 { + break + } + idx := v_1.Args[0] + val := v_2 + mem := v_3 + v.reset(OpARM64MOVWstoreidx4) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVWstoreidx (SLLconst [2] idx) ptr val mem) + // result: (MOVWstoreidx4 ptr idx val mem) + for { + if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 { + break + } + idx := v_0.Args[0] + ptr := v_1 + val := v_2 + mem := v_3 + v.reset(OpARM64MOVWstoreidx4) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVWstoreidx ptr idx (MOVDconst [0]) mem) + // result: (MOVWstorezeroidx ptr idx mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 { + break + } + mem := v_3 + v.reset(OpARM64MOVWstorezeroidx) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVWstoreidx ptr idx (MOVWreg x) mem) + // result: (MOVWstoreidx ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVWreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpARM64MOVWstoreidx) + v.AddArg4(ptr, idx, x, mem) + return true + } + // match: (MOVWstoreidx ptr idx (MOVWUreg x) mem) + // result: (MOVWstoreidx ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVWUreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpARM64MOVWstoreidx) + v.AddArg4(ptr, idx, x, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVWstoreidx4(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWstoreidx4 ptr (MOVDconst [c]) val mem) + // cond: is32Bit(c<<2) + // result: (MOVWstore [int32(c)<<2] ptr val mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + val := v_2 + mem := v_3 + if !(is32Bit(c << 2)) { + break + } + v.reset(OpARM64MOVWstore) + v.AuxInt = int32ToAuxInt(int32(c) << 2) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVWstoreidx4 ptr idx (MOVDconst [0]) mem) + // result: (MOVWstorezeroidx4 ptr idx mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 { + break + } + mem := v_3 + v.reset(OpARM64MOVWstorezeroidx4) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVWstoreidx4 ptr idx (MOVWreg x) mem) + // result: (MOVWstoreidx4 ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVWreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpARM64MOVWstoreidx4) + v.AddArg4(ptr, idx, x, mem) + return true + } + // match: (MOVWstoreidx4 ptr idx (MOVWUreg x) mem) + // result: (MOVWstoreidx4 ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpARM64MOVWUreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpARM64MOVWstoreidx4) + v.AddArg4(ptr, idx, x, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVWstorezero [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVWstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARM64MOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64MOVWstorezero) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWstorezero [off] {sym} (ADD ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVWstorezeroidx ptr idx mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADD { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVWstorezeroidx) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVWstorezero [off] {sym} (ADDshiftLL [2] ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVWstorezeroidx4 ptr idx mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 { + break + } + idx := v_0.Args[1] + ptr := v_0.Args[0] + mem := v_1 + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVWstorezeroidx4) + v.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVWstorezeroidx(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWstorezeroidx ptr (MOVDconst [c]) mem) + // cond: is32Bit(c) + // result: (MOVWstorezero [int32(c)] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVWstorezero) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWstorezeroidx (MOVDconst [c]) idx mem) + // cond: is32Bit(c) + // result: (MOVWstorezero [int32(c)] idx mem) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + idx := v_1 + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpARM64MOVWstorezero) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(idx, mem) + return true + } + // match: (MOVWstorezeroidx ptr (SLLconst [2] idx) mem) + // result: (MOVWstorezeroidx4 ptr idx mem) + for { + ptr := v_0 + if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 { + break + } + idx := v_1.Args[0] + mem := v_2 + v.reset(OpARM64MOVWstorezeroidx4) + v.AddArg3(ptr, idx, mem) + return true + } + // match: (MOVWstorezeroidx (SLLconst [2] idx) ptr mem) + // result: (MOVWstorezeroidx4 ptr idx mem) + for { + if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 { + break + } + idx := v_0.Args[0] + ptr := v_1 + mem := v_2 + v.reset(OpARM64MOVWstorezeroidx4) + v.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVWstorezeroidx4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWstorezeroidx4 ptr (MOVDconst [c]) mem) + // cond: is32Bit(c<<2) + // result: (MOVWstorezero [int32(c<<2)] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c << 2)) { + break + } + v.reset(OpARM64MOVWstorezero) + v.AuxInt = int32ToAuxInt(int32(c << 2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MSUB(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MSUB a x (MOVDconst [-1])) + // result: (ADD a x) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != -1 { + break + } + v.reset(OpARM64ADD) + v.AddArg2(a, x) + return true + } + // match: (MSUB a _ (MOVDconst [0])) + // result: a + for { + a := v_0 + if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 { + break + } + v.copyOf(a) + return true + } + // match: (MSUB a x (MOVDconst [1])) + // result: (SUB a x) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 1 { + break + } + v.reset(OpARM64SUB) + v.AddArg2(a, x) + return true + } + // match: (MSUB a x (MOVDconst [c])) + // cond: isPowerOfTwo64(c) + // result: (SUBshiftLL a x [log64(c)]) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(isPowerOfTwo64(c)) { + break + } + v.reset(OpARM64SUBshiftLL) + v.AuxInt = int64ToAuxInt(log64(c)) + v.AddArg2(a, x) + return true + } + // match: (MSUB a x (MOVDconst [c])) + // cond: isPowerOfTwo64(c-1) && c>=3 + // result: (SUB a (ADDshiftLL x x [log64(c-1)])) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(isPowerOfTwo64(c-1) && c >= 3) { + break + } + v.reset(OpARM64SUB) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(log64(c - 1)) + v0.AddArg2(x, x) + v.AddArg2(a, v0) + return true + } + // match: (MSUB a x (MOVDconst [c])) + // cond: isPowerOfTwo64(c+1) && c>=7 + // result: (ADD a (SUBshiftLL x x [log64(c+1)])) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(isPowerOfTwo64(c+1) && c >= 7) { + break + } + v.reset(OpARM64ADD) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(log64(c + 1)) + v0.AddArg2(x, x) + v.AddArg2(a, v0) + return true + } + // match: (MSUB a x (MOVDconst [c])) + // cond: c%3 == 0 && isPowerOfTwo64(c/3) + // result: (ADDshiftLL a (SUBshiftLL x x [2]) [log64(c/3)]) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(c%3 == 0 && isPowerOfTwo64(c/3)) { + break + } + v.reset(OpARM64ADDshiftLL) + v.AuxInt = int64ToAuxInt(log64(c / 3)) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(2) + v0.AddArg2(x, x) + v.AddArg2(a, v0) + return true + } + // match: (MSUB a x (MOVDconst [c])) + // cond: c%5 == 0 && isPowerOfTwo64(c/5) + // result: (SUBshiftLL a (ADDshiftLL x x [2]) [log64(c/5)]) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(c%5 == 0 && isPowerOfTwo64(c/5)) { + break + } + v.reset(OpARM64SUBshiftLL) + v.AuxInt = int64ToAuxInt(log64(c / 5)) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(2) + v0.AddArg2(x, x) + v.AddArg2(a, v0) + return true + } + // match: (MSUB a x (MOVDconst [c])) + // cond: c%7 == 0 && isPowerOfTwo64(c/7) + // result: (ADDshiftLL a (SUBshiftLL x x [3]) [log64(c/7)]) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(c%7 == 0 && isPowerOfTwo64(c/7)) { + break + } + v.reset(OpARM64ADDshiftLL) + v.AuxInt = int64ToAuxInt(log64(c / 7)) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(3) + v0.AddArg2(x, x) + v.AddArg2(a, v0) + return true + } + // match: (MSUB a x (MOVDconst [c])) + // cond: c%9 == 0 && isPowerOfTwo64(c/9) + // result: (SUBshiftLL a (ADDshiftLL x x [3]) [log64(c/9)]) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(c%9 == 0 && isPowerOfTwo64(c/9)) { + break + } + v.reset(OpARM64SUBshiftLL) + v.AuxInt = int64ToAuxInt(log64(c / 9)) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(3) + v0.AddArg2(x, x) + v.AddArg2(a, v0) + return true + } + // match: (MSUB a (MOVDconst [-1]) x) + // result: (ADD a x) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 { + break + } + x := v_2 + v.reset(OpARM64ADD) + v.AddArg2(a, x) + return true + } + // match: (MSUB a (MOVDconst [0]) _) + // result: a + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + v.copyOf(a) + return true + } + // match: (MSUB a (MOVDconst [1]) x) + // result: (SUB a x) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 { + break + } + x := v_2 + v.reset(OpARM64SUB) + v.AddArg2(a, x) + return true + } + // match: (MSUB a (MOVDconst [c]) x) + // cond: isPowerOfTwo64(c) + // result: (SUBshiftLL a x [log64(c)]) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(isPowerOfTwo64(c)) { + break + } + v.reset(OpARM64SUBshiftLL) + v.AuxInt = int64ToAuxInt(log64(c)) + v.AddArg2(a, x) + return true + } + // match: (MSUB a (MOVDconst [c]) x) + // cond: isPowerOfTwo64(c-1) && c>=3 + // result: (SUB a (ADDshiftLL x x [log64(c-1)])) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(isPowerOfTwo64(c-1) && c >= 3) { + break + } + v.reset(OpARM64SUB) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(log64(c - 1)) + v0.AddArg2(x, x) + v.AddArg2(a, v0) + return true + } + // match: (MSUB a (MOVDconst [c]) x) + // cond: isPowerOfTwo64(c+1) && c>=7 + // result: (ADD a (SUBshiftLL x x [log64(c+1)])) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(isPowerOfTwo64(c+1) && c >= 7) { + break + } + v.reset(OpARM64ADD) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(log64(c + 1)) + v0.AddArg2(x, x) + v.AddArg2(a, v0) + return true + } + // match: (MSUB a (MOVDconst [c]) x) + // cond: c%3 == 0 && isPowerOfTwo64(c/3) + // result: (ADDshiftLL a (SUBshiftLL x x [2]) [log64(c/3)]) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(c%3 == 0 && isPowerOfTwo64(c/3)) { + break + } + v.reset(OpARM64ADDshiftLL) + v.AuxInt = int64ToAuxInt(log64(c / 3)) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(2) + v0.AddArg2(x, x) + v.AddArg2(a, v0) + return true + } + // match: (MSUB a (MOVDconst [c]) x) + // cond: c%5 == 0 && isPowerOfTwo64(c/5) + // result: (SUBshiftLL a (ADDshiftLL x x [2]) [log64(c/5)]) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(c%5 == 0 && isPowerOfTwo64(c/5)) { + break + } + v.reset(OpARM64SUBshiftLL) + v.AuxInt = int64ToAuxInt(log64(c / 5)) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(2) + v0.AddArg2(x, x) + v.AddArg2(a, v0) + return true + } + // match: (MSUB a (MOVDconst [c]) x) + // cond: c%7 == 0 && isPowerOfTwo64(c/7) + // result: (ADDshiftLL a (SUBshiftLL x x [3]) [log64(c/7)]) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(c%7 == 0 && isPowerOfTwo64(c/7)) { + break + } + v.reset(OpARM64ADDshiftLL) + v.AuxInt = int64ToAuxInt(log64(c / 7)) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(3) + v0.AddArg2(x, x) + v.AddArg2(a, v0) + return true + } + // match: (MSUB a (MOVDconst [c]) x) + // cond: c%9 == 0 && isPowerOfTwo64(c/9) + // result: (SUBshiftLL a (ADDshiftLL x x [3]) [log64(c/9)]) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(c%9 == 0 && isPowerOfTwo64(c/9)) { + break + } + v.reset(OpARM64SUBshiftLL) + v.AuxInt = int64ToAuxInt(log64(c / 9)) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(3) + v0.AddArg2(x, x) + v.AddArg2(a, v0) + return true + } + // match: (MSUB (MOVDconst [c]) x y) + // result: (ADDconst [c] (MNEG x y)) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARM64ADDconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARM64MNEG, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (MSUB a (MOVDconst [c]) (MOVDconst [d])) + // result: (SUBconst [c*d] a) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if v_2.Op != OpARM64MOVDconst { + break + } + d := auxIntToInt64(v_2.AuxInt) + v.reset(OpARM64SUBconst) + v.AuxInt = int64ToAuxInt(c * d) + v.AddArg(a) + return true + } + return false +} +func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MSUBW a x (MOVDconst [c])) + // cond: int32(c)==-1 + // result: (MOVWUreg (ADD a x)) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(int32(c) == -1) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type) + v0.AddArg2(a, x) + v.AddArg(v0) + return true + } + // match: (MSUBW a _ (MOVDconst [c])) + // cond: int32(c)==0 + // result: (MOVWUreg a) + for { + a := v_0 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(int32(c) == 0) { + break + } + v.reset(OpARM64MOVWUreg) + v.AddArg(a) + return true + } + // match: (MSUBW a x (MOVDconst [c])) + // cond: int32(c)==1 + // result: (MOVWUreg (SUB a x)) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(int32(c) == 1) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type) + v0.AddArg2(a, x) + v.AddArg(v0) + return true + } + // match: (MSUBW a x (MOVDconst [c])) + // cond: isPowerOfTwo64(c) + // result: (MOVWUreg (SUBshiftLL a x [log64(c)])) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(isPowerOfTwo64(c)) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type) + v0.AuxInt = int64ToAuxInt(log64(c)) + v0.AddArg2(a, x) + v.AddArg(v0) + return true + } + // match: (MSUBW a x (MOVDconst [c])) + // cond: isPowerOfTwo64(c-1) && int32(c)>=3 + // result: (MOVWUreg (SUB a (ADDshiftLL x x [log64(c-1)]))) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(isPowerOfTwo64(c-1) && int32(c) >= 3) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(log64(c - 1)) + v1.AddArg2(x, x) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (MSUBW a x (MOVDconst [c])) + // cond: isPowerOfTwo64(c+1) && int32(c)>=7 + // result: (MOVWUreg (ADD a (SUBshiftLL x x [log64(c+1)]))) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(isPowerOfTwo64(c+1) && int32(c) >= 7) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type) + v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(log64(c + 1)) + v1.AddArg2(x, x) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (MSUBW a x (MOVDconst [c])) + // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) + // result: (MOVWUreg (ADDshiftLL a (SUBshiftLL x x [2]) [log64(c/3)])) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type) + v0.AuxInt = int64ToAuxInt(log64(c / 3)) + v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(2) + v1.AddArg2(x, x) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (MSUBW a x (MOVDconst [c])) + // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) + // result: (MOVWUreg (SUBshiftLL a (ADDshiftLL x x [2]) [log64(c/5)])) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type) + v0.AuxInt = int64ToAuxInt(log64(c / 5)) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(2) + v1.AddArg2(x, x) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (MSUBW a x (MOVDconst [c])) + // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) + // result: (MOVWUreg (ADDshiftLL a (SUBshiftLL x x [3]) [log64(c/7)])) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type) + v0.AuxInt = int64ToAuxInt(log64(c / 7)) + v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(3) + v1.AddArg2(x, x) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (MSUBW a x (MOVDconst [c])) + // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) + // result: (MOVWUreg (SUBshiftLL a (ADDshiftLL x x [3]) [log64(c/9)])) + for { + a := v_0 + x := v_1 + if v_2.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type) + v0.AuxInt = int64ToAuxInt(log64(c / 9)) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(3) + v1.AddArg2(x, x) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (MSUBW a (MOVDconst [c]) x) + // cond: int32(c)==-1 + // result: (MOVWUreg (ADD a x)) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(int32(c) == -1) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type) + v0.AddArg2(a, x) + v.AddArg(v0) + return true + } + // match: (MSUBW a (MOVDconst [c]) _) + // cond: int32(c)==0 + // result: (MOVWUreg a) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(int32(c) == 0) { + break + } + v.reset(OpARM64MOVWUreg) + v.AddArg(a) + return true + } + // match: (MSUBW a (MOVDconst [c]) x) + // cond: int32(c)==1 + // result: (MOVWUreg (SUB a x)) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(int32(c) == 1) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type) + v0.AddArg2(a, x) + v.AddArg(v0) + return true + } + // match: (MSUBW a (MOVDconst [c]) x) + // cond: isPowerOfTwo64(c) + // result: (MOVWUreg (SUBshiftLL a x [log64(c)])) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(isPowerOfTwo64(c)) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type) + v0.AuxInt = int64ToAuxInt(log64(c)) + v0.AddArg2(a, x) + v.AddArg(v0) + return true + } + // match: (MSUBW a (MOVDconst [c]) x) + // cond: isPowerOfTwo64(c-1) && int32(c)>=3 + // result: (MOVWUreg (SUB a (ADDshiftLL x x [log64(c-1)]))) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(isPowerOfTwo64(c-1) && int32(c) >= 3) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(log64(c - 1)) + v1.AddArg2(x, x) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (MSUBW a (MOVDconst [c]) x) + // cond: isPowerOfTwo64(c+1) && int32(c)>=7 + // result: (MOVWUreg (ADD a (SUBshiftLL x x [log64(c+1)]))) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(isPowerOfTwo64(c+1) && int32(c) >= 7) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type) + v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(log64(c + 1)) + v1.AddArg2(x, x) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (MSUBW a (MOVDconst [c]) x) + // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) + // result: (MOVWUreg (ADDshiftLL a (SUBshiftLL x x [2]) [log64(c/3)])) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type) + v0.AuxInt = int64ToAuxInt(log64(c / 3)) + v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(2) + v1.AddArg2(x, x) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (MSUBW a (MOVDconst [c]) x) + // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) + // result: (MOVWUreg (SUBshiftLL a (ADDshiftLL x x [2]) [log64(c/5)])) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type) + v0.AuxInt = int64ToAuxInt(log64(c / 5)) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(2) + v1.AddArg2(x, x) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (MSUBW a (MOVDconst [c]) x) + // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) + // result: (MOVWUreg (ADDshiftLL a (SUBshiftLL x x [3]) [log64(c/7)])) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type) + v0.AuxInt = int64ToAuxInt(log64(c / 7)) + v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(3) + v1.AddArg2(x, x) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (MSUBW a (MOVDconst [c]) x) + // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) + // result: (MOVWUreg (SUBshiftLL a (ADDshiftLL x x [3]) [log64(c/9)])) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + x := v_2 + if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) { + break + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type) + v0.AuxInt = int64ToAuxInt(log64(c / 9)) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(3) + v1.AddArg2(x, x) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (MSUBW (MOVDconst [c]) x y) + // result: (MOVWUreg (ADDconst [c] (MNEGW x y))) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + y := v_2 + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64ADDconst, x.Type) + v0.AuxInt = int64ToAuxInt(c) + v1 := b.NewValue0(v.Pos, OpARM64MNEGW, x.Type) + v1.AddArg2(x, y) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (MSUBW a (MOVDconst [c]) (MOVDconst [d])) + // result: (MOVWUreg (SUBconst [c*d] a)) + for { + a := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if v_2.Op != OpARM64MOVDconst { + break + } + d := auxIntToInt64(v_2.AuxInt) + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64SUBconst, a.Type) + v0.AuxInt = int64ToAuxInt(c * d) + v0.AddArg(a) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueARM64_OpARM64MUL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MUL (NEG x) y) + // result: (MNEG x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpARM64NEG { + continue + } + x := v_0.Args[0] + y := v_1 + v.reset(OpARM64MNEG) + v.AddArg2(x, y) + return true + } + break + } + // match: (MUL x (MOVDconst [-1])) + // result: (NEG x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 { + continue + } + v.reset(OpARM64NEG) + v.AddArg(x) + return true + } + break + } + // match: (MUL _ (MOVDconst [0])) + // result: (MOVDconst [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { + continue + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + break + } + // match: (MUL x (MOVDconst [1])) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 { + continue + } + v.copyOf(x) + return true + } + break + } + // match: (MUL x (MOVDconst [c])) + // cond: isPowerOfTwo64(c) + // result: (SLLconst [log64(c)] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(isPowerOfTwo64(c)) { + continue + } + v.reset(OpARM64SLLconst) + v.AuxInt = int64ToAuxInt(log64(c)) + v.AddArg(x) + return true + } + break + } + // match: (MUL x (MOVDconst [c])) + // cond: isPowerOfTwo64(c-1) && c >= 3 + // result: (ADDshiftLL x x [log64(c-1)]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(isPowerOfTwo64(c-1) && c >= 3) { + continue + } + v.reset(OpARM64ADDshiftLL) + v.AuxInt = int64ToAuxInt(log64(c - 1)) + v.AddArg2(x, x) + return true + } + break + } + // match: (MUL x (MOVDconst [c])) + // cond: isPowerOfTwo64(c+1) && c >= 7 + // result: (ADDshiftLL (NEG x) x [log64(c+1)]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(isPowerOfTwo64(c+1) && c >= 7) { + continue + } + v.reset(OpARM64ADDshiftLL) + v.AuxInt = int64ToAuxInt(log64(c + 1)) + v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v0.AddArg(x) + v.AddArg2(v0, x) + return true + } + break + } + // match: (MUL x (MOVDconst [c])) + // cond: c%3 == 0 && isPowerOfTwo64(c/3) + // result: (SLLconst [log64(c/3)] (ADDshiftLL x x [1])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(c%3 == 0 && isPowerOfTwo64(c/3)) { + continue + } + v.reset(OpARM64SLLconst) + v.AuxInt = int64ToAuxInt(log64(c / 3)) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(1) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + break + } + // match: (MUL x (MOVDconst [c])) + // cond: c%5 == 0 && isPowerOfTwo64(c/5) + // result: (SLLconst [log64(c/5)] (ADDshiftLL x x [2])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(c%5 == 0 && isPowerOfTwo64(c/5)) { + continue + } + v.reset(OpARM64SLLconst) + v.AuxInt = int64ToAuxInt(log64(c / 5)) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(2) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + break + } + // match: (MUL x (MOVDconst [c])) + // cond: c%7 == 0 && isPowerOfTwo64(c/7) + // result: (SLLconst [log64(c/7)] (ADDshiftLL (NEG x) x [3])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(c%7 == 0 && isPowerOfTwo64(c/7)) { + continue + } + v.reset(OpARM64SLLconst) + v.AuxInt = int64ToAuxInt(log64(c / 7)) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v1.AddArg(x) + v0.AddArg2(v1, x) + v.AddArg(v0) + return true + } + break + } + // match: (MUL x (MOVDconst [c])) + // cond: c%9 == 0 && isPowerOfTwo64(c/9) + // result: (SLLconst [log64(c/9)] (ADDshiftLL x x [3])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(c%9 == 0 && isPowerOfTwo64(c/9)) { + continue + } + v.reset(OpARM64SLLconst) + v.AuxInt = int64ToAuxInt(log64(c / 9)) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(3) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + break + } + // match: (MUL (MOVDconst [c]) (MOVDconst [d])) + // result: (MOVDconst [c*d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpARM64MOVDconst { + continue + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(c * d) + return true + } + break + } + return false +} +func rewriteValueARM64_OpARM64MULW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MULW (NEG x) y) + // result: (MNEGW x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpARM64NEG { + continue + } + x := v_0.Args[0] + y := v_1 + v.reset(OpARM64MNEGW) + v.AddArg2(x, y) + return true + } + break + } + // match: (MULW x (MOVDconst [c])) + // cond: int32(c)==-1 + // result: (MOVWUreg (NEG x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(int32(c) == -1) { + continue + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (MULW _ (MOVDconst [c])) + // cond: int32(c)==0 + // result: (MOVDconst [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(int32(c) == 0) { + continue + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + break + } + // match: (MULW x (MOVDconst [c])) + // cond: int32(c)==1 + // result: (MOVWUreg x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(int32(c) == 1) { + continue + } + v.reset(OpARM64MOVWUreg) + v.AddArg(x) + return true + } + break + } + // match: (MULW x (MOVDconst [c])) + // cond: isPowerOfTwo64(c) + // result: (MOVWUreg (SLLconst [log64(c)] x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(isPowerOfTwo64(c)) { + continue + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = int64ToAuxInt(log64(c)) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (MULW x (MOVDconst [c])) + // cond: isPowerOfTwo64(c-1) && int32(c) >= 3 + // result: (MOVWUreg (ADDshiftLL x x [log64(c-1)])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(isPowerOfTwo64(c-1) && int32(c) >= 3) { + continue + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(log64(c - 1)) + v0.AddArg2(x, x) + v.AddArg(v0) + return true + } + break + } + // match: (MULW x (MOVDconst [c])) + // cond: isPowerOfTwo64(c+1) && int32(c) >= 7 + // result: (MOVWUreg (ADDshiftLL (NEG x) x [log64(c+1)])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(isPowerOfTwo64(c+1) && int32(c) >= 7) { + continue + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = int64ToAuxInt(log64(c + 1)) + v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v1.AddArg(x) + v0.AddArg2(v1, x) + v.AddArg(v0) + return true + } + break + } + // match: (MULW x (MOVDconst [c])) + // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) + // result: (MOVWUreg (SLLconst [log64(c/3)] (ADDshiftLL x x [1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) { + continue + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = int64ToAuxInt(log64(c / 3)) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(1) + v1.AddArg2(x, x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + break + } + // match: (MULW x (MOVDconst [c])) + // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) + // result: (MOVWUreg (SLLconst [log64(c/5)] (ADDshiftLL x x [2]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) { + continue + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = int64ToAuxInt(log64(c / 5)) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(2) + v1.AddArg2(x, x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + break + } + // match: (MULW x (MOVDconst [c])) + // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) + // result: (MOVWUreg (SLLconst [log64(c/7)] (ADDshiftLL (NEG x) x [3]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) { + continue + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = int64ToAuxInt(log64(c / 7)) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(3) + v2 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v2.AddArg(x) + v1.AddArg2(v2, x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + break + } + // match: (MULW x (MOVDconst [c])) + // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) + // result: (MOVWUreg (SLLconst [log64(c/9)] (ADDshiftLL x x [3]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) { + continue + } + v.reset(OpARM64MOVWUreg) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = int64ToAuxInt(log64(c / 9)) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = int64ToAuxInt(3) + v1.AddArg2(x, x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + break + } + // match: (MULW (MOVDconst [c]) (MOVDconst [d])) + // result: (MOVDconst [int64(uint32(c*d))]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpARM64MOVDconst { + continue + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(uint32(c * d))) + return true + } + break + } + return false +} +func rewriteValueARM64_OpARM64MVN(v *Value) bool { + v_0 := v.Args[0] + // match: (MVN (XOR x y)) + // result: (EON x y) + for { + if v_0.Op != OpARM64XOR { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpARM64EON) + v.AddArg2(x, y) + return true + } + // match: (MVN (MOVDconst [c])) + // result: (MOVDconst [^c]) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(^c) + return true + } + // match: (MVN x:(SLLconst [c] y)) + // cond: clobberIfDead(x) + // result: (MVNshiftLL [c] y) + for { + x := v_0 + if x.Op != OpARM64SLLconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(clobberIfDead(x)) { + break + } + v.reset(OpARM64MVNshiftLL) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(y) + return true + } + // match: (MVN x:(SRLconst [c] y)) + // cond: clobberIfDead(x) + // result: (MVNshiftRL [c] y) + for { + x := v_0 + if x.Op != OpARM64SRLconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(clobberIfDead(x)) { + break + } + v.reset(OpARM64MVNshiftRL) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(y) + return true + } + // match: (MVN x:(SRAconst [c] y)) + // cond: clobberIfDead(x) + // result: (MVNshiftRA [c] y) + for { + x := v_0 + if x.Op != OpARM64SRAconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(clobberIfDead(x)) { + break + } + v.reset(OpARM64MVNshiftRA) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(y) + return true + } + // match: (MVN x:(RORconst [c] y)) + // cond: clobberIfDead(x) + // result: (MVNshiftRO [c] y) + for { + x := v_0 + if x.Op != OpARM64RORconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(clobberIfDead(x)) { + break + } + v.reset(OpARM64MVNshiftRO) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM64_OpARM64MVNshiftLL(v *Value) bool { + v_0 := v.Args[0] + // match: (MVNshiftLL (MOVDconst [c]) [d]) + // result: (MOVDconst [^int64(uint64(c)<>uint64(d))]) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(^(c >> uint64(d))) + return true + } + return false +} +func rewriteValueARM64_OpARM64MVNshiftRL(v *Value) bool { + v_0 := v.Args[0] + // match: (MVNshiftRL (MOVDconst [c]) [d]) + // result: (MOVDconst [^int64(uint64(c)>>uint64(d))]) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d))) + return true + } + return false +} +func rewriteValueARM64_OpARM64MVNshiftRO(v *Value) bool { + v_0 := v.Args[0] + // match: (MVNshiftRO (MOVDconst [c]) [d]) + // result: (MOVDconst [^rotateRight64(c, d)]) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(^rotateRight64(c, d)) + return true + } + return false +} +func rewriteValueARM64_OpARM64NEG(v *Value) bool { + v_0 := v.Args[0] + // match: (NEG (MUL x y)) + // result: (MNEG x y) + for { + if v_0.Op != OpARM64MUL { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpARM64MNEG) + v.AddArg2(x, y) + return true + } + // match: (NEG (MULW x y)) + // cond: v.Type.Size() <= 4 + // result: (MNEGW x y) + for { + if v_0.Op != OpARM64MULW { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + if !(v.Type.Size() <= 4) { + break + } + v.reset(OpARM64MNEGW) + v.AddArg2(x, y) + return true + } + // match: (NEG (NEG x)) + // result: x + for { + if v_0.Op != OpARM64NEG { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (NEG (MOVDconst [c])) + // result: (MOVDconst [-c]) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(-c) + return true + } + // match: (NEG x:(SLLconst [c] y)) + // cond: clobberIfDead(x) + // result: (NEGshiftLL [c] y) + for { + x := v_0 + if x.Op != OpARM64SLLconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(clobberIfDead(x)) { + break + } + v.reset(OpARM64NEGshiftLL) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(y) + return true + } + // match: (NEG x:(SRLconst [c] y)) + // cond: clobberIfDead(x) + // result: (NEGshiftRL [c] y) + for { + x := v_0 + if x.Op != OpARM64SRLconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(clobberIfDead(x)) { + break + } + v.reset(OpARM64NEGshiftRL) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(y) + return true + } + // match: (NEG x:(SRAconst [c] y)) + // cond: clobberIfDead(x) + // result: (NEGshiftRA [c] y) + for { + x := v_0 + if x.Op != OpARM64SRAconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(clobberIfDead(x)) { + break + } + v.reset(OpARM64NEGshiftRA) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM64_OpARM64NEGshiftLL(v *Value) bool { + v_0 := v.Args[0] + // match: (NEGshiftLL (MOVDconst [c]) [d]) + // result: (MOVDconst [-int64(uint64(c)<>uint64(d))]) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(-(c >> uint64(d))) + return true + } + return false +} +func rewriteValueARM64_OpARM64NEGshiftRL(v *Value) bool { + v_0 := v.Args[0] + // match: (NEGshiftRL (MOVDconst [c]) [d]) + // result: (MOVDconst [-int64(uint64(c)>>uint64(d))]) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(-int64(uint64(c) >> uint64(d))) + return true + } + return false +} +func rewriteValueARM64_OpARM64NotEqual(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (NotEqual (CMPconst [0] z:(AND x y))) + // cond: z.Uses == 1 + // result: (NotEqual (TST x y)) + for { + if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64AND { + break + } + y := z.Args[1] + x := z.Args[0] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64TST, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (NotEqual (CMPWconst [0] x:(ANDconst [c] y))) + // cond: x.Uses == 1 + // result: (NotEqual (TSTWconst [int32(c)] y)) + for { + if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (NotEqual (CMPWconst [0] z:(AND x y))) + // cond: z.Uses == 1 + // result: (NotEqual (TSTW x y)) + for { + if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64AND { + break + } + y := z.Args[1] + x := z.Args[0] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64TSTW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (NotEqual (CMPconst [0] x:(ANDconst [c] y))) + // cond: x.Uses == 1 + // result: (NotEqual (TSTconst [c] y)) + for { + if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64TSTconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (NotEqual (CMP x z:(NEG y))) + // cond: z.Uses == 1 + // result: (NotEqual (CMN x y)) + for { + if v_0.Op != OpARM64CMP { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpARM64NEG { + break + } + y := z.Args[0] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (NotEqual (CMPW x z:(NEG y))) + // cond: z.Uses == 1 + // result: (NotEqual (CMNW x y)) + for { + if v_0.Op != OpARM64CMPW { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpARM64NEG { + break + } + y := z.Args[0] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (NotEqual (CMPconst [0] x:(ADDconst [c] y))) + // cond: x.Uses == 1 + // result: (NotEqual (CMNconst [c] y)) + for { + if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMNconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (NotEqual (CMPWconst [0] x:(ADDconst [c] y))) + // cond: x.Uses == 1 + // result: (NotEqual (CMNWconst [int32(c)] y)) + for { + if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMNWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (NotEqual (CMPconst [0] z:(ADD x y))) + // cond: z.Uses == 1 + // result: (NotEqual (CMN x y)) + for { + if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64ADD { + break + } + y := z.Args[1] + x := z.Args[0] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (NotEqual (CMPWconst [0] z:(ADD x y))) + // cond: z.Uses == 1 + // result: (NotEqual (CMNW x y)) + for { + if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64ADD { + break + } + y := z.Args[1] + x := z.Args[0] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (NotEqual (CMPconst [0] z:(MADD a x y))) + // cond: z.Uses == 1 + // result: (NotEqual (CMN a (MUL x y))) + for { + if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MADD { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (NotEqual (CMPconst [0] z:(MSUB a x y))) + // cond: z.Uses == 1 + // result: (NotEqual (CMP a (MUL x y))) + for { + if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MSUB { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (NotEqual (CMPWconst [0] z:(MADDW a x y))) + // cond: z.Uses == 1 + // result: (NotEqual (CMNW a (MULW x y))) + for { + if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MADDW { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (NotEqual (CMPWconst [0] z:(MSUBW a x y))) + // cond: z.Uses == 1 + // result: (NotEqual (CMPW a (MULW x y))) + for { + if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MSUBW { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + v.AddArg(v0) + return true + } + // match: (NotEqual (FlagConstant [fc])) + // result: (MOVDconst [b2i(fc.ne())]) + for { + if v_0.Op != OpARM64FlagConstant { + break + } + fc := auxIntToFlagConstant(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(b2i(fc.ne())) + return true + } + // match: (NotEqual (InvertFlags x)) + // result: (NotEqual x) + for { + if v_0.Op != OpARM64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARM64NotEqual) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64OR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OR x (MOVDconst [c])) + // result: (ORconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64ORconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (OR x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + // match: (OR x (MVN y)) + // result: (ORN x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MVN { + continue + } + y := v_1.Args[0] + v.reset(OpARM64ORN) + v.AddArg2(x, y) + return true + } + break + } + // match: (OR x0 x1:(SLLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (ORshiftLL x0 y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SLLconst { + continue + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64ORshiftLL) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + break + } + // match: (OR x0 x1:(SRLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (ORshiftRL x0 y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SRLconst { + continue + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64ORshiftRL) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + break + } + // match: (OR x0 x1:(SRAconst [c] y)) + // cond: clobberIfDead(x1) + // result: (ORshiftRA x0 y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SRAconst { + continue + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64ORshiftRA) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + break + } + // match: (OR x0 x1:(RORconst [c] y)) + // cond: clobberIfDead(x1) + // result: (ORshiftRO x0 y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64RORconst { + continue + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64ORshiftRO) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + break + } + // match: (OR (UBFIZ [bfc] x) (ANDconst [ac] y)) + // cond: ac == ^((1<>uint64(d))]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64ORconst) + v.AuxInt = int64ToAuxInt(^(c >> uint64(d))) + v.AddArg(x) + return true + } + // match: (ORNshiftRA (SRAconst x [c]) x [c]) + // result: (MOVDconst [-1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(-1) + return true + } + return false +} +func rewriteValueARM64_OpARM64ORNshiftRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ORNshiftRL x (MOVDconst [c]) [d]) + // result: (ORconst x [^int64(uint64(c)>>uint64(d))]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64ORconst) + v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d))) + v.AddArg(x) + return true + } + // match: (ORNshiftRL (SRLconst x [c]) x [c]) + // result: (MOVDconst [-1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(-1) + return true + } + return false +} +func rewriteValueARM64_OpARM64ORNshiftRO(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ORNshiftRO x (MOVDconst [c]) [d]) + // result: (ORconst x [^rotateRight64(c, d)]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64ORconst) + v.AuxInt = int64ToAuxInt(^rotateRight64(c, d)) + v.AddArg(x) + return true + } + // match: (ORNshiftRO (RORconst x [c]) x [c]) + // result: (MOVDconst [-1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64RORconst || auxIntToInt64(v_0.AuxInt) != c { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(-1) + return true + } + return false +} +func rewriteValueARM64_OpARM64ORconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ORconst [0] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (ORconst [-1] _) + // result: (MOVDconst [-1]) + for { + if auxIntToInt64(v.AuxInt) != -1 { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(-1) + return true + } + // match: (ORconst [c] (MOVDconst [d])) + // result: (MOVDconst [c|d]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(c | d) + return true + } + // match: (ORconst [c] (ORconst [d] x)) + // result: (ORconst [c|d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64ORconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpARM64ORconst) + v.AuxInt = int64ToAuxInt(c | d) + v.AddArg(x) + return true + } + // match: (ORconst [c1] (ANDconst [c2] x)) + // cond: c2|c1 == ^0 + // result: (ORconst [c1] x) + for { + c1 := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64ANDconst { + break + } + c2 := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(c2|c1 == ^0) { + break + } + v.reset(OpARM64ORconst) + v.AuxInt = int64ToAuxInt(c1) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ORshiftLL (MOVDconst [c]) x [d]) + // result: (ORconst [c] (SLLconst x [d])) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpARM64ORconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = int64ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (ORshiftLL x (MOVDconst [c]) [d]) + // result: (ORconst x [int64(uint64(c)< [8] (UBFX [armBFAuxInt(8, 8)] x) x) + // result: (REV16W x) + for { + if v.Type != typ.UInt16 || auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 8) { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARM64REV16W) + v.AddArg(x) + return true + } + // match: (ORshiftLL [8] (UBFX [armBFAuxInt(8, 24)] (ANDconst [c1] x)) (ANDconst [c2] x)) + // cond: uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff + // result: (REV16W x) + for { + if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 24) { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64ANDconst { + break + } + c1 := auxIntToInt64(v_0_0.AuxInt) + x := v_0_0.Args[0] + if v_1.Op != OpARM64ANDconst { + break + } + c2 := auxIntToInt64(v_1.AuxInt) + if x != v_1.Args[0] || !(uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff) { + break + } + v.reset(OpARM64REV16W) + v.AddArg(x) + return true + } + // match: (ORshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x)) + // cond: (uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff) + // result: (REV16 x) + for { + if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64ANDconst { + break + } + c1 := auxIntToInt64(v_0_0.AuxInt) + x := v_0_0.Args[0] + if v_1.Op != OpARM64ANDconst { + break + } + c2 := auxIntToInt64(v_1.AuxInt) + if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff) { + break + } + v.reset(OpARM64REV16) + v.AddArg(x) + return true + } + // match: (ORshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x)) + // cond: (uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff) + // result: (REV16 (ANDconst [0xffffffff] x)) + for { + if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64ANDconst { + break + } + c1 := auxIntToInt64(v_0_0.AuxInt) + x := v_0_0.Args[0] + if v_1.Op != OpARM64ANDconst { + break + } + c2 := auxIntToInt64(v_1.AuxInt) + if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff) { + break + } + v.reset(OpARM64REV16) + v0 := b.NewValue0(v.Pos, OpARM64ANDconst, x.Type) + v0.AuxInt = int64ToAuxInt(0xffffffff) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: ( ORshiftLL [c] (SRLconst x [64-c]) x2) + // result: (EXTRconst [64-c] x2 x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c { + break + } + x := v_0.Args[0] + x2 := v_1 + v.reset(OpARM64EXTRconst) + v.AuxInt = int64ToAuxInt(64 - c) + v.AddArg2(x2, x) + return true + } + // match: ( ORshiftLL [c] (UBFX [bfc] x) x2) + // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) + // result: (EXTRWconst [32-c] x2 x) + for { + t := v.Type + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64UBFX { + break + } + bfc := auxIntToArm64BitField(v_0.AuxInt) + x := v_0.Args[0] + x2 := v_1 + if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) { + break + } + v.reset(OpARM64EXTRWconst) + v.AuxInt = int64ToAuxInt(32 - c) + v.AddArg2(x2, x) + return true + } + // match: (ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y)) + // cond: sc == bfc.getARM64BFwidth() + // result: (BFXIL [bfc] y x) + for { + sc := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64UBFX { + break + } + bfc := auxIntToArm64BitField(v_0.AuxInt) + x := v_0.Args[0] + if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != sc { + break + } + y := v_1.Args[0] + if !(sc == bfc.getARM64BFwidth()) { + break + } + v.reset(OpARM64BFXIL) + v.AuxInt = arm64BitFieldToAuxInt(bfc) + v.AddArg2(y, x) + return true + } + return false +} +func rewriteValueARM64_OpARM64ORshiftRA(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ORshiftRA (MOVDconst [c]) x [d]) + // result: (ORconst [c] (SRAconst x [d])) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpARM64ORconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) + v0.AuxInt = int64ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (ORshiftRA x (MOVDconst [c]) [d]) + // result: (ORconst x [c>>uint64(d)]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64ORconst) + v.AuxInt = int64ToAuxInt(c >> uint64(d)) + v.AddArg(x) + return true + } + // match: (ORshiftRA y:(SRAconst x [c]) x [c]) + // result: y + for { + c := auxIntToInt64(v.AuxInt) + y := v_0 + if y.Op != OpARM64SRAconst || auxIntToInt64(y.AuxInt) != c { + break + } + x := y.Args[0] + if x != v_1 { + break + } + v.copyOf(y) + return true + } + return false +} +func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ORshiftRL (MOVDconst [c]) x [d]) + // result: (ORconst [c] (SRLconst x [d])) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpARM64ORconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) + v0.AuxInt = int64ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (ORshiftRL x (MOVDconst [c]) [d]) + // result: (ORconst x [int64(uint64(c)>>uint64(d))]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64ORconst) + v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d))) + v.AddArg(x) + return true + } + // match: (ORshiftRL y:(SRLconst x [c]) x [c]) + // result: y + for { + c := auxIntToInt64(v.AuxInt) + y := v_0 + if y.Op != OpARM64SRLconst || auxIntToInt64(y.AuxInt) != c { + break + } + x := y.Args[0] + if x != v_1 { + break + } + v.copyOf(y) + return true + } + // match: (ORshiftRL [rc] (ANDconst [ac] x) (SLLconst [lc] y)) + // cond: lc > rc && ac == ^((1< rc && ac == ^((1< x [d])) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpARM64ORconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARM64RORconst, x.Type) + v0.AuxInt = int64ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (ORshiftRO x (MOVDconst [c]) [d]) + // result: (ORconst x [rotateRight64(c, d)]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64ORconst) + v.AuxInt = int64ToAuxInt(rotateRight64(c, d)) + v.AddArg(x) + return true + } + // match: (ORshiftRO y:(RORconst x [c]) x [c]) + // result: y + for { + c := auxIntToInt64(v.AuxInt) + y := v_0 + if y.Op != OpARM64RORconst || auxIntToInt64(y.AuxInt) != c { + break + } + x := y.Args[0] + if x != v_1 { + break + } + v.copyOf(y) + return true + } + return false +} +func rewriteValueARM64_OpARM64REV(v *Value) bool { + v_0 := v.Args[0] + // match: (REV (REV p)) + // result: p + for { + if v_0.Op != OpARM64REV { + break + } + p := v_0.Args[0] + v.copyOf(p) + return true + } + return false +} +func rewriteValueARM64_OpARM64REVW(v *Value) bool { + v_0 := v.Args[0] + // match: (REVW (REVW p)) + // result: p + for { + if v_0.Op != OpARM64REVW { + break + } + p := v_0.Args[0] + v.copyOf(p) + return true + } + return false +} +func rewriteValueARM64_OpARM64ROR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ROR x (MOVDconst [c])) + // result: (RORconst x [c&63]) + for { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64RORconst) + v.AuxInt = int64ToAuxInt(c & 63) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64RORW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (RORW x (MOVDconst [c])) + // result: (RORWconst x [c&31]) + for { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64RORWconst) + v.AuxInt = int64ToAuxInt(c & 31) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64SBCSflags(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SBCSflags x y (Select1 (NEGSflags (NEG (NGCzerocarry bo))))) + // result: (SBCSflags x y bo) + for { + x := v_0 + y := v_1 + if v_2.Op != OpSelect1 || v_2.Type != types.TypeFlags { + break + } + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpARM64NEGSflags { + break + } + v_2_0_0 := v_2_0.Args[0] + if v_2_0_0.Op != OpARM64NEG || v_2_0_0.Type != typ.UInt64 { + break + } + v_2_0_0_0 := v_2_0_0.Args[0] + if v_2_0_0_0.Op != OpARM64NGCzerocarry || v_2_0_0_0.Type != typ.UInt64 { + break + } + bo := v_2_0_0_0.Args[0] + v.reset(OpARM64SBCSflags) + v.AddArg3(x, y, bo) + return true + } + // match: (SBCSflags x y (Select1 (NEGSflags (MOVDconst [0])))) + // result: (SUBSflags x y) + for { + x := v_0 + y := v_1 + if v_2.Op != OpSelect1 || v_2.Type != types.TypeFlags { + break + } + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpARM64NEGSflags { + break + } + v_2_0_0 := v_2_0.Args[0] + if v_2_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_2_0_0.AuxInt) != 0 { + break + } + v.reset(OpARM64SUBSflags) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM64_OpARM64SLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SLL x (MOVDconst [c])) + // result: (SLLconst x [c&63]) + for { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64SLLconst) + v.AuxInt = int64ToAuxInt(c & 63) + v.AddArg(x) + return true + } + // match: (SLL x (ANDconst [63] y)) + // result: (SLL x y) + for { + x := v_0 + if v_1.Op != OpARM64ANDconst || auxIntToInt64(v_1.AuxInt) != 63 { + break + } + y := v_1.Args[0] + v.reset(OpARM64SLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM64_OpARM64SLLconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SLLconst [c] (MOVDconst [d])) + // result: (MOVDconst [d<>uint64(c)]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(d >> uint64(c)) + return true + } + // match: (SRAconst [rc] (SLLconst [lc] x)) + // cond: lc > rc + // result: (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x) + for { + rc := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64SLLconst { + break + } + lc := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(lc > rc) { + break + } + v.reset(OpARM64SBFIZ) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc-rc, 64-lc)) + v.AddArg(x) + return true + } + // match: (SRAconst [rc] (SLLconst [lc] x)) + // cond: lc <= rc + // result: (SBFX [armBFAuxInt(rc-lc, 64-rc)] x) + for { + rc := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64SLLconst { + break + } + lc := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(lc <= rc) { + break + } + v.reset(OpARM64SBFX) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc-lc, 64-rc)) + v.AddArg(x) + return true + } + // match: (SRAconst [rc] (MOVWreg x)) + // cond: rc < 32 + // result: (SBFX [armBFAuxInt(rc, 32-rc)] x) + for { + rc := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVWreg { + break + } + x := v_0.Args[0] + if !(rc < 32) { + break + } + v.reset(OpARM64SBFX) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 32-rc)) + v.AddArg(x) + return true + } + // match: (SRAconst [rc] (MOVHreg x)) + // cond: rc < 16 + // result: (SBFX [armBFAuxInt(rc, 16-rc)] x) + for { + rc := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVHreg { + break + } + x := v_0.Args[0] + if !(rc < 16) { + break + } + v.reset(OpARM64SBFX) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 16-rc)) + v.AddArg(x) + return true + } + // match: (SRAconst [rc] (MOVBreg x)) + // cond: rc < 8 + // result: (SBFX [armBFAuxInt(rc, 8-rc)] x) + for { + rc := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVBreg { + break + } + x := v_0.Args[0] + if !(rc < 8) { + break + } + v.reset(OpARM64SBFX) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 8-rc)) + v.AddArg(x) + return true + } + // match: (SRAconst [sc] (SBFIZ [bfc] x)) + // cond: sc < bfc.getARM64BFlsb() + // result: (SBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x) + for { + sc := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64SBFIZ { + break + } + bfc := auxIntToArm64BitField(v_0.AuxInt) + x := v_0.Args[0] + if !(sc < bfc.getARM64BFlsb()) { + break + } + v.reset(OpARM64SBFIZ) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())) + v.AddArg(x) + return true + } + // match: (SRAconst [sc] (SBFIZ [bfc] x)) + // cond: sc >= bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth() + // result: (SBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x) + for { + sc := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64SBFIZ { + break + } + bfc := auxIntToArm64BitField(v_0.AuxInt) + x := v_0.Args[0] + if !(sc >= bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) { + break + } + v.reset(OpARM64SBFX) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64SRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SRL x (MOVDconst [c])) + // result: (SRLconst x [c&63]) + for { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64SRLconst) + v.AuxInt = int64ToAuxInt(c & 63) + v.AddArg(x) + return true + } + // match: (SRL x (ANDconst [63] y)) + // result: (SRL x y) + for { + x := v_0 + if v_1.Op != OpARM64ANDconst || auxIntToInt64(v_1.AuxInt) != 63 { + break + } + y := v_1.Args[0] + v.reset(OpARM64SRL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM64_OpARM64SRLconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SRLconst [c] (MOVDconst [d])) + // result: (MOVDconst [int64(uint64(d)>>uint64(c))]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(uint64(d) >> uint64(c))) + return true + } + // match: (SRLconst [c] (SLLconst [c] x)) + // cond: 0 < c && c < 64 + // result: (ANDconst [1<= 32 + // result: (MOVDconst [0]) + for { + rc := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVWUreg { + break + } + if !(rc >= 32) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SRLconst [rc] (MOVHUreg x)) + // cond: rc >= 16 + // result: (MOVDconst [0]) + for { + rc := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVHUreg { + break + } + if !(rc >= 16) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SRLconst [rc] (MOVBUreg x)) + // cond: rc >= 8 + // result: (MOVDconst [0]) + for { + rc := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVBUreg { + break + } + if !(rc >= 8) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SRLconst [rc] (SLLconst [lc] x)) + // cond: lc > rc + // result: (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x) + for { + rc := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64SLLconst { + break + } + lc := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(lc > rc) { + break + } + v.reset(OpARM64UBFIZ) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc-rc, 64-lc)) + v.AddArg(x) + return true + } + // match: (SRLconst [rc] (SLLconst [lc] x)) + // cond: lc < rc + // result: (UBFX [armBFAuxInt(rc-lc, 64-rc)] x) + for { + rc := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64SLLconst { + break + } + lc := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(lc < rc) { + break + } + v.reset(OpARM64UBFX) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc-lc, 64-rc)) + v.AddArg(x) + return true + } + // match: (SRLconst [rc] (MOVWUreg x)) + // cond: rc < 32 + // result: (UBFX [armBFAuxInt(rc, 32-rc)] x) + for { + rc := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVWUreg { + break + } + x := v_0.Args[0] + if !(rc < 32) { + break + } + v.reset(OpARM64UBFX) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 32-rc)) + v.AddArg(x) + return true + } + // match: (SRLconst [rc] (MOVHUreg x)) + // cond: rc < 16 + // result: (UBFX [armBFAuxInt(rc, 16-rc)] x) + for { + rc := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVHUreg { + break + } + x := v_0.Args[0] + if !(rc < 16) { + break + } + v.reset(OpARM64UBFX) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 16-rc)) + v.AddArg(x) + return true + } + // match: (SRLconst [rc] (MOVBUreg x)) + // cond: rc < 8 + // result: (UBFX [armBFAuxInt(rc, 8-rc)] x) + for { + rc := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVBUreg { + break + } + x := v_0.Args[0] + if !(rc < 8) { + break + } + v.reset(OpARM64UBFX) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 8-rc)) + v.AddArg(x) + return true + } + // match: (SRLconst [sc] (ANDconst [ac] x)) + // cond: isARM64BFMask(sc, ac, sc) + // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x) + for { + sc := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64ANDconst { + break + } + ac := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(isARM64BFMask(sc, ac, sc)) { + break + } + v.reset(OpARM64UBFX) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, sc))) + v.AddArg(x) + return true + } + // match: (SRLconst [sc] (UBFX [bfc] x)) + // cond: sc < bfc.getARM64BFwidth() + // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x) + for { + sc := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64UBFX { + break + } + bfc := auxIntToArm64BitField(v_0.AuxInt) + x := v_0.Args[0] + if !(sc < bfc.getARM64BFwidth()) { + break + } + v.reset(OpARM64UBFX) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)) + v.AddArg(x) + return true + } + // match: (SRLconst [sc] (UBFIZ [bfc] x)) + // cond: sc == bfc.getARM64BFlsb() + // result: (ANDconst [1< bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth() + // result: (UBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x) + for { + sc := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64UBFIZ { + break + } + bfc := auxIntToArm64BitField(v_0.AuxInt) + x := v_0.Args[0] + if !(sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) { + break + } + v.reset(OpARM64UBFX) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64STP(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (STP [off1] {sym} (ADDconst [off2] ptr) val1 val2 mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (STP [off1+int32(off2)] {sym} ptr val1 val2 mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpARM64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val1 := v_1 + val2 := v_2 + mem := v_3 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64STP) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg4(ptr, val1, val2, mem) + return true + } + // match: (STP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val1 val2 mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (STP [off1+off2] {mergeSym(sym1,sym2)} ptr val1 val2 mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpARM64MOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val1 := v_1 + val2 := v_2 + mem := v_3 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpARM64STP) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg4(ptr, val1, val2, mem) + return true + } + // match: (STP [off] {sym} ptr (MOVDconst [0]) (MOVDconst [0]) mem) + // result: (MOVQstorezero [off] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 || v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 { + break + } + mem := v_3 + v.reset(OpARM64MOVQstorezero) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64SUB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SUB x (MOVDconst [c])) + // result: (SUBconst [c] x) + for { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64SUBconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (SUB a l:(MUL x y)) + // cond: l.Uses==1 && clobber(l) + // result: (MSUB a x y) + for { + a := v_0 + l := v_1 + if l.Op != OpARM64MUL { + break + } + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1 && clobber(l)) { + break + } + v.reset(OpARM64MSUB) + v.AddArg3(a, x, y) + return true + } + // match: (SUB a l:(MNEG x y)) + // cond: l.Uses==1 && clobber(l) + // result: (MADD a x y) + for { + a := v_0 + l := v_1 + if l.Op != OpARM64MNEG { + break + } + y := l.Args[1] + x := l.Args[0] + if !(l.Uses == 1 && clobber(l)) { + break + } + v.reset(OpARM64MADD) + v.AddArg3(a, x, y) + return true + } + // match: (SUB a l:(MULW x y)) + // cond: v.Type.Size() <= 4 && l.Uses==1 && clobber(l) + // result: (MSUBW a x y) + for { + a := v_0 + l := v_1 + if l.Op != OpARM64MULW { + break + } + y := l.Args[1] + x := l.Args[0] + if !(v.Type.Size() <= 4 && l.Uses == 1 && clobber(l)) { + break + } + v.reset(OpARM64MSUBW) + v.AddArg3(a, x, y) + return true + } + // match: (SUB a l:(MNEGW x y)) + // cond: v.Type.Size() <= 4 && l.Uses==1 && clobber(l) + // result: (MADDW a x y) + for { + a := v_0 + l := v_1 + if l.Op != OpARM64MNEGW { + break + } + y := l.Args[1] + x := l.Args[0] + if !(v.Type.Size() <= 4 && l.Uses == 1 && clobber(l)) { + break + } + v.reset(OpARM64MADDW) + v.AddArg3(a, x, y) + return true + } + // match: (SUB x x) + // result: (MOVDconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SUB x (SUB y z)) + // result: (SUB (ADD x z) y) + for { + x := v_0 + if v_1.Op != OpARM64SUB { + break + } + z := v_1.Args[1] + y := v_1.Args[0] + v.reset(OpARM64SUB) + v0 := b.NewValue0(v.Pos, OpARM64ADD, v.Type) + v0.AddArg2(x, z) + v.AddArg2(v0, y) + return true + } + // match: (SUB (SUB x y) z) + // result: (SUB x (ADD y z)) + for { + if v_0.Op != OpARM64SUB { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + z := v_1 + v.reset(OpARM64SUB) + v0 := b.NewValue0(v.Pos, OpARM64ADD, y.Type) + v0.AddArg2(y, z) + v.AddArg2(x, v0) + return true + } + // match: (SUB x0 x1:(SLLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (SUBshiftLL x0 y [c]) + for { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SLLconst { + break + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64SUBshiftLL) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + // match: (SUB x0 x1:(SRLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (SUBshiftRL x0 y [c]) + for { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SRLconst { + break + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64SUBshiftRL) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + // match: (SUB x0 x1:(SRAconst [c] y)) + // cond: clobberIfDead(x1) + // result: (SUBshiftRA x0 y [c]) + for { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SRAconst { + break + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64SUBshiftRA) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + return false +} +func rewriteValueARM64_OpARM64SUBconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SUBconst [0] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (SUBconst [c] (MOVDconst [d])) + // result: (MOVDconst [d-c]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(d - c) + return true + } + // match: (SUBconst [c] (SUBconst [d] x)) + // result: (ADDconst [-c-d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64SUBconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpARM64ADDconst) + v.AuxInt = int64ToAuxInt(-c - d) + v.AddArg(x) + return true + } + // match: (SUBconst [c] (ADDconst [d] x)) + // result: (ADDconst [-c+d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64ADDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpARM64ADDconst) + v.AuxInt = int64ToAuxInt(-c + d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64SUBshiftLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SUBshiftLL x (MOVDconst [c]) [d]) + // result: (SUBconst x [int64(uint64(c)<>uint64(d)]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64SUBconst) + v.AuxInt = int64ToAuxInt(c >> uint64(d)) + v.AddArg(x) + return true + } + // match: (SUBshiftRA (SRAconst x [c]) x [c]) + // result: (MOVDconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM64_OpARM64SUBshiftRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SUBshiftRL x (MOVDconst [c]) [d]) + // result: (SUBconst x [int64(uint64(c)>>uint64(d))]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64SUBconst) + v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d))) + v.AddArg(x) + return true + } + // match: (SUBshiftRL (SRLconst x [c]) x [c]) + // result: (MOVDconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM64_OpARM64TST(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (TST x (MOVDconst [c])) + // result: (TSTconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64TSTconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (TST x0 x1:(SLLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (TSTshiftLL x0 y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SLLconst { + continue + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64TSTshiftLL) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + break + } + // match: (TST x0 x1:(SRLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (TSTshiftRL x0 y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SRLconst { + continue + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64TSTshiftRL) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + break + } + // match: (TST x0 x1:(SRAconst [c] y)) + // cond: clobberIfDead(x1) + // result: (TSTshiftRA x0 y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SRAconst { + continue + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64TSTshiftRA) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + break + } + // match: (TST x0 x1:(RORconst [c] y)) + // cond: clobberIfDead(x1) + // result: (TSTshiftRO x0 y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64RORconst { + continue + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64TSTshiftRO) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + break + } + return false +} +func rewriteValueARM64_OpARM64TSTW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (TSTW x (MOVDconst [c])) + // result: (TSTWconst [int32(c)] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64TSTWconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValueARM64_OpARM64TSTWconst(v *Value) bool { + v_0 := v.Args[0] + // match: (TSTWconst (MOVDconst [x]) [y]) + // result: (FlagConstant [logicFlags32(int32(x)&y)]) + for { + y := auxIntToInt32(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + x := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64FlagConstant) + v.AuxInt = flagConstantToAuxInt(logicFlags32(int32(x) & y)) + return true + } + return false +} +func rewriteValueARM64_OpARM64TSTconst(v *Value) bool { + v_0 := v.Args[0] + // match: (TSTconst (MOVDconst [x]) [y]) + // result: (FlagConstant [logicFlags64(x&y)]) + for { + y := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + x := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64FlagConstant) + v.AuxInt = flagConstantToAuxInt(logicFlags64(x & y)) + return true + } + return false +} +func rewriteValueARM64_OpARM64TSTshiftLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TSTshiftLL (MOVDconst [c]) x [d]) + // result: (TSTconst [c] (SLLconst x [d])) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpARM64TSTconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = int64ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (TSTshiftLL x (MOVDconst [c]) [d]) + // result: (TSTconst x [int64(uint64(c)< x [d])) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpARM64TSTconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) + v0.AuxInt = int64ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (TSTshiftRA x (MOVDconst [c]) [d]) + // result: (TSTconst x [c>>uint64(d)]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64TSTconst) + v.AuxInt = int64ToAuxInt(c >> uint64(d)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64TSTshiftRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TSTshiftRL (MOVDconst [c]) x [d]) + // result: (TSTconst [c] (SRLconst x [d])) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpARM64TSTconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) + v0.AuxInt = int64ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (TSTshiftRL x (MOVDconst [c]) [d]) + // result: (TSTconst x [int64(uint64(c)>>uint64(d))]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64TSTconst) + v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d))) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64TSTshiftRO(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (TSTshiftRO (MOVDconst [c]) x [d]) + // result: (TSTconst [c] (RORconst x [d])) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpARM64TSTconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARM64RORconst, x.Type) + v0.AuxInt = int64ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (TSTshiftRO x (MOVDconst [c]) [d]) + // result: (TSTconst x [rotateRight64(c, d)]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64TSTconst) + v.AuxInt = int64ToAuxInt(rotateRight64(c, d)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64UBFIZ(v *Value) bool { + v_0 := v.Args[0] + // match: (UBFIZ [bfc] (SLLconst [sc] x)) + // cond: sc < bfc.getARM64BFwidth() + // result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x) + for { + bfc := auxIntToArm64BitField(v.AuxInt) + if v_0.Op != OpARM64SLLconst { + break + } + sc := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(sc < bfc.getARM64BFwidth()) { + break + } + v.reset(OpARM64UBFIZ) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64UBFX(v *Value) bool { + v_0 := v.Args[0] + // match: (UBFX [bfc] (ANDconst [c] x)) + // cond: isARM64BFMask(0, c, 0) && bfc.getARM64BFlsb() + bfc.getARM64BFwidth() <= arm64BFWidth(c, 0) + // result: (UBFX [bfc] x) + for { + bfc := auxIntToArm64BitField(v.AuxInt) + if v_0.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(isARM64BFMask(0, c, 0) && bfc.getARM64BFlsb()+bfc.getARM64BFwidth() <= arm64BFWidth(c, 0)) { + break + } + v.reset(OpARM64UBFX) + v.AuxInt = arm64BitFieldToAuxInt(bfc) + v.AddArg(x) + return true + } + // match: (UBFX [bfc] (SRLconst [sc] x)) + // cond: sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64 + // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x) + for { + bfc := auxIntToArm64BitField(v.AuxInt) + if v_0.Op != OpARM64SRLconst { + break + } + sc := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64) { + break + } + v.reset(OpARM64UBFX) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())) + v.AddArg(x) + return true + } + // match: (UBFX [bfc] (SLLconst [sc] x)) + // cond: sc == bfc.getARM64BFlsb() + // result: (ANDconst [1< bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth() + // result: (UBFIZ [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x) + for { + bfc := auxIntToArm64BitField(v.AuxInt) + if v_0.Op != OpARM64SLLconst { + break + } + sc := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) { + break + } + v.reset(OpARM64UBFIZ) + v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64UDIV(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (UDIV x (MOVDconst [1])) + // result: x + for { + x := v_0 + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 { + break + } + v.copyOf(x) + return true + } + // match: (UDIV x (MOVDconst [c])) + // cond: isPowerOfTwo64(c) + // result: (SRLconst [log64(c)] x) + for { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(isPowerOfTwo64(c)) { + break + } + v.reset(OpARM64SRLconst) + v.AuxInt = int64ToAuxInt(log64(c)) + v.AddArg(x) + return true + } + // match: (UDIV (MOVDconst [c]) (MOVDconst [d])) + // cond: d != 0 + // result: (MOVDconst [int64(uint64(c)/uint64(d))]) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpARM64MOVDconst { + break + } + d := auxIntToInt64(v_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(uint64(c) / uint64(d))) + return true + } + return false +} +func rewriteValueARM64_OpARM64UDIVW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (UDIVW x (MOVDconst [c])) + // cond: uint32(c)==1 + // result: (MOVWUreg x) + for { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint32(c) == 1) { + break + } + v.reset(OpARM64MOVWUreg) + v.AddArg(x) + return true + } + // match: (UDIVW x (MOVDconst [c])) + // cond: isPowerOfTwo64(c) && is32Bit(c) + // result: (SRLconst [log64(c)] (MOVWUreg x)) + for { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(isPowerOfTwo64(c) && is32Bit(c)) { + break + } + v.reset(OpARM64SRLconst) + v.AuxInt = int64ToAuxInt(log64(c)) + v0 := b.NewValue0(v.Pos, OpARM64MOVWUreg, v.Type) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (UDIVW (MOVDconst [c]) (MOVDconst [d])) + // cond: d != 0 + // result: (MOVDconst [int64(uint32(c)/uint32(d))]) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpARM64MOVDconst { + break + } + d := auxIntToInt64(v_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(uint32(c) / uint32(d))) + return true + } + return false +} +func rewriteValueARM64_OpARM64UMOD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (UMOD x y) + // result: (MSUB x y (UDIV x y)) + for { + if v.Type != typ.UInt64 { + break + } + x := v_0 + y := v_1 + v.reset(OpARM64MSUB) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpARM64UDIV, typ.UInt64) + v0.AddArg2(x, y) + v.AddArg3(x, y, v0) + return true + } + // match: (UMOD _ (MOVDconst [1])) + // result: (MOVDconst [0]) + for { + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (UMOD x (MOVDconst [c])) + // cond: isPowerOfTwo64(c) + // result: (ANDconst [c-1] x) + for { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(isPowerOfTwo64(c)) { + break + } + v.reset(OpARM64ANDconst) + v.AuxInt = int64ToAuxInt(c - 1) + v.AddArg(x) + return true + } + // match: (UMOD (MOVDconst [c]) (MOVDconst [d])) + // cond: d != 0 + // result: (MOVDconst [int64(uint64(c)%uint64(d))]) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpARM64MOVDconst { + break + } + d := auxIntToInt64(v_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(uint64(c) % uint64(d))) + return true + } + return false +} +func rewriteValueARM64_OpARM64UMODW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (UMODW x y) + // result: (MSUBW x y (UDIVW x y)) + for { + if v.Type != typ.UInt32 { + break + } + x := v_0 + y := v_1 + v.reset(OpARM64MSUBW) + v.Type = typ.UInt32 + v0 := b.NewValue0(v.Pos, OpARM64UDIVW, typ.UInt32) + v0.AddArg2(x, y) + v.AddArg3(x, y, v0) + return true + } + // match: (UMODW _ (MOVDconst [c])) + // cond: uint32(c)==1 + // result: (MOVDconst [0]) + for { + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint32(c) == 1) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (UMODW x (MOVDconst [c])) + // cond: isPowerOfTwo64(c) && is32Bit(c) + // result: (ANDconst [c-1] x) + for { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(isPowerOfTwo64(c) && is32Bit(c)) { + break + } + v.reset(OpARM64ANDconst) + v.AuxInt = int64ToAuxInt(c - 1) + v.AddArg(x) + return true + } + // match: (UMODW (MOVDconst [c]) (MOVDconst [d])) + // cond: d != 0 + // result: (MOVDconst [int64(uint32(c)%uint32(d))]) + for { + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpARM64MOVDconst { + break + } + d := auxIntToInt64(v_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(uint32(c) % uint32(d))) + return true + } + return false +} +func rewriteValueARM64_OpARM64XOR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XOR x (MOVDconst [c])) + // result: (XORconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64XORconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (XOR x x) + // result: (MOVDconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (XOR x (MVN y)) + // result: (EON x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpARM64MVN { + continue + } + y := v_1.Args[0] + v.reset(OpARM64EON) + v.AddArg2(x, y) + return true + } + break + } + // match: (XOR x0 x1:(SLLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (XORshiftLL x0 y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SLLconst { + continue + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64XORshiftLL) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + break + } + // match: (XOR x0 x1:(SRLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (XORshiftRL x0 y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SRLconst { + continue + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64XORshiftRL) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + break + } + // match: (XOR x0 x1:(SRAconst [c] y)) + // cond: clobberIfDead(x1) + // result: (XORshiftRA x0 y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64SRAconst { + continue + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64XORshiftRA) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + break + } + // match: (XOR x0 x1:(RORconst [c] y)) + // cond: clobberIfDead(x1) + // result: (XORshiftRO x0 y [c]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x0 := v_0 + x1 := v_1 + if x1.Op != OpARM64RORconst { + continue + } + c := auxIntToInt64(x1.AuxInt) + y := x1.Args[0] + if !(clobberIfDead(x1)) { + continue + } + v.reset(OpARM64XORshiftRO) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(x0, y) + return true + } + break + } + return false +} +func rewriteValueARM64_OpARM64XORconst(v *Value) bool { + v_0 := v.Args[0] + // match: (XORconst [0] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (XORconst [-1] x) + // result: (MVN x) + for { + if auxIntToInt64(v.AuxInt) != -1 { + break + } + x := v_0 + v.reset(OpARM64MVN) + v.AddArg(x) + return true + } + // match: (XORconst [c] (MOVDconst [d])) + // result: (MOVDconst [c^d]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(c ^ d) + return true + } + // match: (XORconst [c] (XORconst [d] x)) + // result: (XORconst [c^d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64XORconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpARM64XORconst) + v.AuxInt = int64ToAuxInt(c ^ d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64XORshiftLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (XORshiftLL (MOVDconst [c]) x [d]) + // result: (XORconst [c] (SLLconst x [d])) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpARM64XORconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = int64ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (XORshiftLL x (MOVDconst [c]) [d]) + // result: (XORconst x [int64(uint64(c)< [8] (UBFX [armBFAuxInt(8, 8)] x) x) + // result: (REV16W x) + for { + if v.Type != typ.UInt16 || auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 8) { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARM64REV16W) + v.AddArg(x) + return true + } + // match: (XORshiftLL [8] (UBFX [armBFAuxInt(8, 24)] (ANDconst [c1] x)) (ANDconst [c2] x)) + // cond: uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff + // result: (REV16W x) + for { + if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 24) { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64ANDconst { + break + } + c1 := auxIntToInt64(v_0_0.AuxInt) + x := v_0_0.Args[0] + if v_1.Op != OpARM64ANDconst { + break + } + c2 := auxIntToInt64(v_1.AuxInt) + if x != v_1.Args[0] || !(uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff) { + break + } + v.reset(OpARM64REV16W) + v.AddArg(x) + return true + } + // match: (XORshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x)) + // cond: (uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff) + // result: (REV16 x) + for { + if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64ANDconst { + break + } + c1 := auxIntToInt64(v_0_0.AuxInt) + x := v_0_0.Args[0] + if v_1.Op != OpARM64ANDconst { + break + } + c2 := auxIntToInt64(v_1.AuxInt) + if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff) { + break + } + v.reset(OpARM64REV16) + v.AddArg(x) + return true + } + // match: (XORshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x)) + // cond: (uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff) + // result: (REV16 (ANDconst [0xffffffff] x)) + for { + if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64ANDconst { + break + } + c1 := auxIntToInt64(v_0_0.AuxInt) + x := v_0_0.Args[0] + if v_1.Op != OpARM64ANDconst { + break + } + c2 := auxIntToInt64(v_1.AuxInt) + if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff) { + break + } + v.reset(OpARM64REV16) + v0 := b.NewValue0(v.Pos, OpARM64ANDconst, x.Type) + v0.AuxInt = int64ToAuxInt(0xffffffff) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (XORshiftLL [c] (SRLconst x [64-c]) x2) + // result: (EXTRconst [64-c] x2 x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c { + break + } + x := v_0.Args[0] + x2 := v_1 + v.reset(OpARM64EXTRconst) + v.AuxInt = int64ToAuxInt(64 - c) + v.AddArg2(x2, x) + return true + } + // match: (XORshiftLL [c] (UBFX [bfc] x) x2) + // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) + // result: (EXTRWconst [32-c] x2 x) + for { + t := v.Type + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64UBFX { + break + } + bfc := auxIntToArm64BitField(v_0.AuxInt) + x := v_0.Args[0] + x2 := v_1 + if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) { + break + } + v.reset(OpARM64EXTRWconst) + v.AuxInt = int64ToAuxInt(32 - c) + v.AddArg2(x2, x) + return true + } + return false +} +func rewriteValueARM64_OpARM64XORshiftRA(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (XORshiftRA (MOVDconst [c]) x [d]) + // result: (XORconst [c] (SRAconst x [d])) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpARM64XORconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) + v0.AuxInt = int64ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (XORshiftRA x (MOVDconst [c]) [d]) + // result: (XORconst x [c>>uint64(d)]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64XORconst) + v.AuxInt = int64ToAuxInt(c >> uint64(d)) + v.AddArg(x) + return true + } + // match: (XORshiftRA (SRAconst x [c]) x [c]) + // result: (MOVDconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM64_OpARM64XORshiftRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (XORshiftRL (MOVDconst [c]) x [d]) + // result: (XORconst [c] (SRLconst x [d])) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpARM64XORconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) + v0.AuxInt = int64ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (XORshiftRL x (MOVDconst [c]) [d]) + // result: (XORconst x [int64(uint64(c)>>uint64(d))]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64XORconst) + v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d))) + v.AddArg(x) + return true + } + // match: (XORshiftRL (SRLconst x [c]) x [c]) + // result: (MOVDconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM64_OpARM64XORshiftRO(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (XORshiftRO (MOVDconst [c]) x [d]) + // result: (XORconst [c] (RORconst x [d])) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpARM64XORconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpARM64RORconst, x.Type) + v0.AuxInt = int64ToAuxInt(d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (XORshiftRO x (MOVDconst [c]) [d]) + // result: (XORconst x [rotateRight64(c, d)]) + for { + d := auxIntToInt64(v.AuxInt) + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpARM64XORconst) + v.AuxInt = int64ToAuxInt(rotateRight64(c, d)) + v.AddArg(x) + return true + } + // match: (XORshiftRO (RORconst x [c]) x [c]) + // result: (MOVDconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpARM64RORconst || auxIntToInt64(v_0.AuxInt) != c { + break + } + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueARM64_OpAddr(v *Value) bool { + v_0 := v.Args[0] + // match: (Addr {sym} base) + // result: (MOVDaddr {sym} base) + for { + sym := auxToSym(v.Aux) + base := v_0 + v.reset(OpARM64MOVDaddr) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } +} +func rewriteValueARM64_OpAtomicAnd32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicAnd32 ptr val mem) + // result: (Select1 (LoweredAtomicAnd32 ptr val mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicAnd32, types.NewTuple(typ.UInt32, types.TypeMem)) + v0.AddArg3(ptr, val, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpAtomicAnd32Variant(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicAnd32Variant ptr val mem) + // result: (Select1 (LoweredAtomicAnd32Variant ptr val mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicAnd32Variant, types.NewTuple(typ.UInt32, types.TypeMem)) + v0.AddArg3(ptr, val, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpAtomicAnd8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicAnd8 ptr val mem) + // result: (Select1 (LoweredAtomicAnd8 ptr val mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicAnd8, types.NewTuple(typ.UInt8, types.TypeMem)) + v0.AddArg3(ptr, val, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpAtomicAnd8Variant(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicAnd8Variant ptr val mem) + // result: (Select1 (LoweredAtomicAnd8Variant ptr val mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicAnd8Variant, types.NewTuple(typ.UInt8, types.TypeMem)) + v0.AddArg3(ptr, val, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpAtomicOr32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicOr32 ptr val mem) + // result: (Select1 (LoweredAtomicOr32 ptr val mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicOr32, types.NewTuple(typ.UInt32, types.TypeMem)) + v0.AddArg3(ptr, val, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpAtomicOr32Variant(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicOr32Variant ptr val mem) + // result: (Select1 (LoweredAtomicOr32Variant ptr val mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicOr32Variant, types.NewTuple(typ.UInt32, types.TypeMem)) + v0.AddArg3(ptr, val, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpAtomicOr8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicOr8 ptr val mem) + // result: (Select1 (LoweredAtomicOr8 ptr val mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicOr8, types.NewTuple(typ.UInt8, types.TypeMem)) + v0.AddArg3(ptr, val, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpAtomicOr8Variant(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicOr8Variant ptr val mem) + // result: (Select1 (LoweredAtomicOr8Variant ptr val mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicOr8Variant, types.NewTuple(typ.UInt8, types.TypeMem)) + v0.AddArg3(ptr, val, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpAvg64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Avg64u x y) + // result: (ADD (SRLconst (SUB x y) [1]) y) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpARM64ADD) + v0 := b.NewValue0(v.Pos, OpARM64SRLconst, t) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpARM64SUB, t) + v1.AddArg2(x, y) + v0.AddArg(v1) + v.AddArg2(v0, y) + return true + } +} +func rewriteValueARM64_OpBitLen32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen32 x) + // result: (SUB (MOVDconst [32]) (CLZW x)) + for { + x := v_0 + v.reset(OpARM64SUB) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(32) + v1 := b.NewValue0(v.Pos, OpARM64CLZW, typ.Int) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueARM64_OpBitLen64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen64 x) + // result: (SUB (MOVDconst [64]) (CLZ x)) + for { + x := v_0 + v.reset(OpARM64SUB) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(64) + v1 := b.NewValue0(v.Pos, OpARM64CLZ, typ.Int) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueARM64_OpBitRev16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (BitRev16 x) + // result: (SRLconst [48] (RBIT x)) + for { + x := v_0 + v.reset(OpARM64SRLconst) + v.AuxInt = int64ToAuxInt(48) + v0 := b.NewValue0(v.Pos, OpARM64RBIT, typ.UInt64) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpBitRev8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (BitRev8 x) + // result: (SRLconst [56] (RBIT x)) + for { + x := v_0 + v.reset(OpARM64SRLconst) + v.AuxInt = int64ToAuxInt(56) + v0 := b.NewValue0(v.Pos, OpARM64RBIT, typ.UInt64) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpCondSelect(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CondSelect x y boolval) + // cond: flagArg(boolval) != nil + // result: (CSEL [boolval.Op] x y flagArg(boolval)) + for { + x := v_0 + y := v_1 + boolval := v_2 + if !(flagArg(boolval) != nil) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(boolval.Op) + v.AddArg3(x, y, flagArg(boolval)) + return true + } + // match: (CondSelect x y boolval) + // cond: flagArg(boolval) == nil + // result: (CSEL [OpARM64NotEqual] x y (TSTWconst [1] boolval)) + for { + x := v_0 + y := v_1 + boolval := v_2 + if !(flagArg(boolval) == nil) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(1) + v0.AddArg(boolval) + v.AddArg3(x, y, v0) + return true + } + return false +} +func rewriteValueARM64_OpConst16(v *Value) bool { + // match: (Const16 [val]) + // result: (MOVDconst [int64(val)]) + for { + val := auxIntToInt16(v.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueARM64_OpConst32(v *Value) bool { + // match: (Const32 [val]) + // result: (MOVDconst [int64(val)]) + for { + val := auxIntToInt32(v.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueARM64_OpConst32F(v *Value) bool { + // match: (Const32F [val]) + // result: (FMOVSconst [float64(val)]) + for { + val := auxIntToFloat32(v.AuxInt) + v.reset(OpARM64FMOVSconst) + v.AuxInt = float64ToAuxInt(float64(val)) + return true + } +} +func rewriteValueARM64_OpConst64(v *Value) bool { + // match: (Const64 [val]) + // result: (MOVDconst [int64(val)]) + for { + val := auxIntToInt64(v.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueARM64_OpConst64F(v *Value) bool { + // match: (Const64F [val]) + // result: (FMOVDconst [float64(val)]) + for { + val := auxIntToFloat64(v.AuxInt) + v.reset(OpARM64FMOVDconst) + v.AuxInt = float64ToAuxInt(float64(val)) + return true + } +} +func rewriteValueARM64_OpConst8(v *Value) bool { + // match: (Const8 [val]) + // result: (MOVDconst [int64(val)]) + for { + val := auxIntToInt8(v.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueARM64_OpConstBool(v *Value) bool { + // match: (ConstBool [t]) + // result: (MOVDconst [b2i(t)]) + for { + t := auxIntToBool(v.AuxInt) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(b2i(t)) + return true + } +} +func rewriteValueARM64_OpConstNil(v *Value) bool { + // match: (ConstNil) + // result: (MOVDconst [0]) + for { + v.reset(OpARM64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } +} +func rewriteValueARM64_OpCtz16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz16 x) + // result: (CLZW (RBITW (ORconst [0x10000] x))) + for { + t := v.Type + x := v_0 + v.reset(OpARM64CLZW) + v.Type = t + v0 := b.NewValue0(v.Pos, OpARM64RBITW, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpARM64ORconst, typ.UInt32) + v1.AuxInt = int64ToAuxInt(0x10000) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpCtz32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Ctz32 x) + // result: (CLZW (RBITW x)) + for { + t := v.Type + x := v_0 + v.reset(OpARM64CLZW) + v0 := b.NewValue0(v.Pos, OpARM64RBITW, t) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpCtz64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Ctz64 x) + // result: (CLZ (RBIT x)) + for { + t := v.Type + x := v_0 + v.reset(OpARM64CLZ) + v0 := b.NewValue0(v.Pos, OpARM64RBIT, t) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpCtz8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz8 x) + // result: (CLZW (RBITW (ORconst [0x100] x))) + for { + t := v.Type + x := v_0 + v.reset(OpARM64CLZW) + v.Type = t + v0 := b.NewValue0(v.Pos, OpARM64RBITW, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpARM64ORconst, typ.UInt32) + v1.AuxInt = int64ToAuxInt(0x100) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpDiv16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16 [false] x y) + // result: (DIVW (SignExt16to32 x) (SignExt16to32 y)) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + y := v_1 + v.reset(OpARM64DIVW) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } + return false +} +func rewriteValueARM64_OpDiv16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16u x y) + // result: (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64UDIVW) + v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueARM64_OpDiv32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Div32 [false] x y) + // result: (DIVW x y) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + y := v_1 + v.reset(OpARM64DIVW) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM64_OpDiv64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Div64 [false] x y) + // result: (DIV x y) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + y := v_1 + v.reset(OpARM64DIV) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM64_OpDiv8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8 x y) + // result: (DIVW (SignExt8to32 x) (SignExt8to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64DIVW) + v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueARM64_OpDiv8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8u x y) + // result: (UDIVW (ZeroExt8to32 x) (ZeroExt8to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64UDIVW) + v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueARM64_OpEq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq16 x y) + // result: (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpARM64Equal) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpEq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq32 x y) + // result: (Equal (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64Equal) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpEq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq32F x y) + // result: (Equal (FCMPS x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64Equal) + v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpEq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq64 x y) + // result: (Equal (CMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64Equal) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpEq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq64F x y) + // result: (Equal (FCMPD x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64Equal) + v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpEq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq8 x y) + // result: (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpARM64Equal) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpEqB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqB x y) + // result: (XOR (MOVDconst [1]) (XOR x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64XOR) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpARM64XOR, typ.Bool) + v1.AddArg2(x, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueARM64_OpEqPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (EqPtr x y) + // result: (Equal (CMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64Equal) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpFMA(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMA x y z) + // result: (FMADDD z x y) + for { + x := v_0 + y := v_1 + z := v_2 + v.reset(OpARM64FMADDD) + v.AddArg3(z, x, y) + return true + } +} +func rewriteValueARM64_OpHmul32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Hmul32 x y) + // result: (SRAconst (MULL x y) [32]) + for { + x := v_0 + y := v_1 + v.reset(OpARM64SRAconst) + v.AuxInt = int64ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpARM64MULL, typ.Int64) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpHmul32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Hmul32u x y) + // result: (SRAconst (UMULL x y) [32]) + for { + x := v_0 + y := v_1 + v.reset(OpARM64SRAconst) + v.AuxInt = int64ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpARM64UMULL, typ.UInt64) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpIsInBounds(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (IsInBounds idx len) + // result: (LessThanU (CMP idx len)) + for { + idx := v_0 + len := v_1 + v.reset(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg2(idx, len) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpIsNonNil(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (IsNonNil ptr) + // result: (NotEqual (CMPconst [0] ptr)) + for { + ptr := v_0 + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(0) + v0.AddArg(ptr) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpIsSliceInBounds(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (IsSliceInBounds idx len) + // result: (LessEqualU (CMP idx len)) + for { + idx := v_0 + len := v_1 + v.reset(OpARM64LessEqualU) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg2(idx, len) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq16 x y) + // result: (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpARM64LessEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLeq16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq16U x zero:(MOVDconst [0])) + // result: (Eq16 x zero) + for { + x := v_0 + zero := v_1 + if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 { + break + } + v.reset(OpEq16) + v.AddArg2(x, zero) + return true + } + // match: (Leq16U (MOVDconst [1]) x) + // result: (Neq16 (MOVDconst [0]) x) + for { + if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 { + break + } + x := v_1 + v.reset(OpNeq16) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, x) + return true + } + // match: (Leq16U x y) + // result: (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpARM64LessEqualU) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq32 x y) + // result: (LessEqual (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64LessEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLeq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq32F x y) + // result: (LessEqualF (FCMPS x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64LessEqualF) + v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLeq32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq32U x zero:(MOVDconst [0])) + // result: (Eq32 x zero) + for { + x := v_0 + zero := v_1 + if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 { + break + } + v.reset(OpEq32) + v.AddArg2(x, zero) + return true + } + // match: (Leq32U (MOVDconst [1]) x) + // result: (Neq32 (MOVDconst [0]) x) + for { + if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 { + break + } + x := v_1 + v.reset(OpNeq32) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, x) + return true + } + // match: (Leq32U x y) + // result: (LessEqualU (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64LessEqualU) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLeq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq64 x y) + // result: (LessEqual (CMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64LessEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLeq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq64F x y) + // result: (LessEqualF (FCMPD x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64LessEqualF) + v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLeq64U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq64U x zero:(MOVDconst [0])) + // result: (Eq64 x zero) + for { + x := v_0 + zero := v_1 + if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 { + break + } + v.reset(OpEq64) + v.AddArg2(x, zero) + return true + } + // match: (Leq64U (MOVDconst [1]) x) + // result: (Neq64 (MOVDconst [0]) x) + for { + if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 { + break + } + x := v_1 + v.reset(OpNeq64) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, x) + return true + } + // match: (Leq64U x y) + // result: (LessEqualU (CMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64LessEqualU) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq8 x y) + // result: (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpARM64LessEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLeq8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq8U x zero:(MOVDconst [0])) + // result: (Eq8 x zero) + for { + x := v_0 + zero := v_1 + if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 { + break + } + v.reset(OpEq8) + v.AddArg2(x, zero) + return true + } + // match: (Leq8U (MOVDconst [1]) x) + // result: (Neq8 (MOVDconst [0]) x) + for { + if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 { + break + } + x := v_1 + v.reset(OpNeq8) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, x) + return true + } + // match: (Leq8U x y) + // result: (LessEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpARM64LessEqualU) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLess16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less16 x y) + // result: (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpARM64LessThan) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLess16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less16U zero:(MOVDconst [0]) x) + // result: (Neq16 zero x) + for { + zero := v_0 + if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 { + break + } + x := v_1 + v.reset(OpNeq16) + v.AddArg2(zero, x) + return true + } + // match: (Less16U x (MOVDconst [1])) + // result: (Eq16 x (MOVDconst [0])) + for { + x := v_0 + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 { + break + } + v.reset(OpEq16) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(x, v0) + return true + } + // match: (Less16U x y) + // result: (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLess32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less32 x y) + // result: (LessThan (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64LessThan) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLess32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less32F x y) + // result: (LessThanF (FCMPS x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64LessThanF) + v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLess32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less32U zero:(MOVDconst [0]) x) + // result: (Neq32 zero x) + for { + zero := v_0 + if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 { + break + } + x := v_1 + v.reset(OpNeq32) + v.AddArg2(zero, x) + return true + } + // match: (Less32U x (MOVDconst [1])) + // result: (Eq32 x (MOVDconst [0])) + for { + x := v_0 + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 { + break + } + v.reset(OpEq32) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(x, v0) + return true + } + // match: (Less32U x y) + // result: (LessThanU (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLess64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less64 x y) + // result: (LessThan (CMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64LessThan) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLess64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less64F x y) + // result: (LessThanF (FCMPD x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64LessThanF) + v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLess64U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less64U zero:(MOVDconst [0]) x) + // result: (Neq64 zero x) + for { + zero := v_0 + if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 { + break + } + x := v_1 + v.reset(OpNeq64) + v.AddArg2(zero, x) + return true + } + // match: (Less64U x (MOVDconst [1])) + // result: (Eq64 x (MOVDconst [0])) + for { + x := v_0 + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 { + break + } + v.reset(OpEq64) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(x, v0) + return true + } + // match: (Less64U x y) + // result: (LessThanU (CMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLess8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less8 x y) + // result: (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpARM64LessThan) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLess8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less8U zero:(MOVDconst [0]) x) + // result: (Neq8 zero x) + for { + zero := v_0 + if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 { + break + } + x := v_1 + v.reset(OpNeq8) + v.AddArg2(zero, x) + return true + } + // match: (Less8U x (MOVDconst [1])) + // result: (Eq8 x (MOVDconst [0])) + for { + x := v_0 + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 { + break + } + v.reset(OpEq8) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(x, v0) + return true + } + // match: (Less8U x y) + // result: (LessThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLoad(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Load ptr mem) + // cond: t.IsBoolean() + // result: (MOVBUload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsBoolean()) { + break + } + v.reset(OpARM64MOVBUload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is8BitInt(t) && t.IsSigned()) + // result: (MOVBload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is8BitInt(t) && t.IsSigned()) { + break + } + v.reset(OpARM64MOVBload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is8BitInt(t) && !t.IsSigned()) + // result: (MOVBUload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is8BitInt(t) && !t.IsSigned()) { + break + } + v.reset(OpARM64MOVBUload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is16BitInt(t) && t.IsSigned()) + // result: (MOVHload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is16BitInt(t) && t.IsSigned()) { + break + } + v.reset(OpARM64MOVHload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is16BitInt(t) && !t.IsSigned()) + // result: (MOVHUload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is16BitInt(t) && !t.IsSigned()) { + break + } + v.reset(OpARM64MOVHUload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is32BitInt(t) && t.IsSigned()) + // result: (MOVWload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitInt(t) && t.IsSigned()) { + break + } + v.reset(OpARM64MOVWload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is32BitInt(t) && !t.IsSigned()) + // result: (MOVWUload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitInt(t) && !t.IsSigned()) { + break + } + v.reset(OpARM64MOVWUload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (MOVDload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpARM64MOVDload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitFloat(t) + // result: (FMOVSload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitFloat(t)) { + break + } + v.reset(OpARM64FMOVSload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is64BitFloat(t) + // result: (FMOVDload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitFloat(t)) { + break + } + v.reset(OpARM64FMOVDload) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueARM64_OpLocalAddr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LocalAddr {sym} base mem) + // cond: t.Elem().HasPointers() + // result: (MOVDaddr {sym} (SPanchored base mem)) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + mem := v_1 + if !(t.Elem().HasPointers()) { + break + } + v.reset(OpARM64MOVDaddr) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) + v0.AddArg2(base, mem) + v.AddArg(v0) + return true + } + // match: (LocalAddr {sym} base _) + // cond: !t.Elem().HasPointers() + // result: (MOVDaddr {sym} base) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + if !(!t.Elem().HasPointers()) { + break + } + v.reset(OpARM64MOVDaddr) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } + return false +} +func rewriteValueARM64_OpLsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SLL) + v.Type = t + v.AddArg2(x, y) + return true + } + // match: (Lsh16x16 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } + return false +} +func rewriteValueARM64_OpLsh16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SLL) + v.Type = t + v.AddArg2(x, y) + return true + } + // match: (Lsh16x32 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } + return false +} +func rewriteValueARM64_OpLsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh16x64 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SLL) + v.Type = t + v.AddArg2(x, y) + return true + } + // match: (Lsh16x64 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] y)) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(64) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } + return false +} +func rewriteValueARM64_OpLsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SLL) + v.Type = t + v.AddArg2(x, y) + return true + } + // match: (Lsh16x8 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } + return false +} +func rewriteValueARM64_OpLsh32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SLL) + v.Type = t + v.AddArg2(x, y) + return true + } + // match: (Lsh32x16 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } + return false +} +func rewriteValueARM64_OpLsh32x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SLL) + v.Type = t + v.AddArg2(x, y) + return true + } + // match: (Lsh32x32 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } + return false +} +func rewriteValueARM64_OpLsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh32x64 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SLL) + v.Type = t + v.AddArg2(x, y) + return true + } + // match: (Lsh32x64 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] y)) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(64) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } + return false +} +func rewriteValueARM64_OpLsh32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SLL) + v.Type = t + v.AddArg2(x, y) + return true + } + // match: (Lsh32x8 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } + return false +} +func rewriteValueARM64_OpLsh64x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x16 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SLL) + v.Type = t + v.AddArg2(x, y) + return true + } + // match: (Lsh64x16 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } + return false +} +func rewriteValueARM64_OpLsh64x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x32 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SLL) + v.Type = t + v.AddArg2(x, y) + return true + } + // match: (Lsh64x32 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } + return false +} +func rewriteValueARM64_OpLsh64x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh64x64 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SLL) + v.Type = t + v.AddArg2(x, y) + return true + } + // match: (Lsh64x64 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] y)) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(64) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } + return false +} +func rewriteValueARM64_OpLsh64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x8 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SLL) + v.Type = t + v.AddArg2(x, y) + return true + } + // match: (Lsh64x8 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } + return false +} +func rewriteValueARM64_OpLsh8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SLL) + v.Type = t + v.AddArg2(x, y) + return true + } + // match: (Lsh8x16 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } + return false +} +func rewriteValueARM64_OpLsh8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SLL) + v.Type = t + v.AddArg2(x, y) + return true + } + // match: (Lsh8x32 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } + return false +} +func rewriteValueARM64_OpLsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh8x64 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SLL) + v.Type = t + v.AddArg2(x, y) + return true + } + // match: (Lsh8x64 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] y)) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(64) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } + return false +} +func rewriteValueARM64_OpLsh8x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SLL) + v.Type = t + v.AddArg2(x, y) + return true + } + // match: (Lsh8x8 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } + return false +} +func rewriteValueARM64_OpMod16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod16 x y) + // result: (MODW (SignExt16to32 x) (SignExt16to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64MODW) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueARM64_OpMod16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod16u x y) + // result: (UMODW (ZeroExt16to32 x) (ZeroExt16to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64UMODW) + v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueARM64_OpMod32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Mod32 x y) + // result: (MODW x y) + for { + x := v_0 + y := v_1 + v.reset(OpARM64MODW) + v.AddArg2(x, y) + return true + } +} +func rewriteValueARM64_OpMod64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Mod64 x y) + // result: (MOD x y) + for { + x := v_0 + y := v_1 + v.reset(OpARM64MOD) + v.AddArg2(x, y) + return true + } +} +func rewriteValueARM64_OpMod8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8 x y) + // result: (MODW (SignExt8to32 x) (SignExt8to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64MODW) + v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueARM64_OpMod8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8u x y) + // result: (UMODW (ZeroExt8to32 x) (ZeroExt8to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64UMODW) + v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueARM64_OpMove(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Move [0] _ _ mem) + // result: mem + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_2 + v.copyOf(mem) + return true + } + // match: (Move [1] dst src mem) + // result: (MOVBstore dst (MOVBUload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpARM64MOVBstore) + v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [2] dst src mem) + // result: (MOVHstore dst (MOVHUload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpARM64MOVHstore) + v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [3] dst src mem) + // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpARM64MOVBstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(2) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [4] dst src mem) + // result: (MOVWstore dst (MOVWUload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpARM64MOVWstore) + v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [5] dst src mem) + // result: (MOVBstore [4] dst (MOVBUload [4] src mem) (MOVWstore dst (MOVWUload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 5 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpARM64MOVBstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [6] dst src mem) + // result: (MOVHstore [4] dst (MOVHUload [4] src mem) (MOVWstore dst (MOVWUload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 6 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpARM64MOVHstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [7] dst src mem) + // result: (MOVWstore [3] dst (MOVWUload [3] src mem) (MOVWstore dst (MOVWUload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 7 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpARM64MOVWstore) + v.AuxInt = int32ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(3) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [8] dst src mem) + // result: (MOVDstore dst (MOVDload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpARM64MOVDstore) + v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [9] dst src mem) + // result: (MOVBstore [8] dst (MOVBUload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 9 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpARM64MOVBstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [10] dst src mem) + // result: (MOVHstore [8] dst (MOVHUload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 10 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpARM64MOVHstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [11] dst src mem) + // result: (MOVDstore [3] dst (MOVDload [3] src mem) (MOVDstore dst (MOVDload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 11 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpARM64MOVDstore) + v.AuxInt = int32ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(3) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [12] dst src mem) + // result: (MOVWstore [8] dst (MOVWUload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 12 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpARM64MOVWstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [13] dst src mem) + // result: (MOVDstore [5] dst (MOVDload [5] src mem) (MOVDstore dst (MOVDload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 13 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpARM64MOVDstore) + v.AuxInt = int32ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(5) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [14] dst src mem) + // result: (MOVDstore [6] dst (MOVDload [6] src mem) (MOVDstore dst (MOVDload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 14 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpARM64MOVDstore) + v.AuxInt = int32ToAuxInt(6) + v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(6) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [15] dst src mem) + // result: (MOVDstore [7] dst (MOVDload [7] src mem) (MOVDstore dst (MOVDload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 15 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpARM64MOVDstore) + v.AuxInt = int32ToAuxInt(7) + v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(7) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [16] dst src mem) + // result: (STP dst (Select0 (LDP src mem)) (Select1 (LDP src mem)) mem) + for { + if auxIntToInt64(v.AuxInt) != 16 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpARM64STP) + v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64)) + v1.AddArg2(src, mem) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) + v2.AddArg(v1) + v.AddArg4(dst, v0, v2, mem) + return true + } + // match: (Move [32] dst src mem) + // result: (STP [16] dst (Select0 (LDP [16] src mem)) (Select1 (LDP [16] src mem)) (STP dst (Select0 (LDP src mem)) (Select1 (LDP src mem)) mem)) + for { + if auxIntToInt64(v.AuxInt) != 32 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpARM64STP) + v.AuxInt = int32ToAuxInt(16) + v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64)) + v1.AuxInt = int32ToAuxInt(16) + v1.AddArg2(src, mem) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) + v2.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) + v4 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) + v5 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64)) + v5.AddArg2(src, mem) + v4.AddArg(v5) + v6 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) + v6.AddArg(v5) + v3.AddArg4(dst, v4, v6, mem) + v.AddArg4(dst, v0, v2, v3) + return true + } + // match: (Move [48] dst src mem) + // result: (STP [32] dst (Select0 (LDP [32] src mem)) (Select1 (LDP [32] src mem)) (STP [16] dst (Select0 (LDP [16] src mem)) (Select1 (LDP [16] src mem)) (STP dst (Select0 (LDP src mem)) (Select1 (LDP src mem)) mem))) + for { + if auxIntToInt64(v.AuxInt) != 48 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpARM64STP) + v.AuxInt = int32ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64)) + v1.AuxInt = int32ToAuxInt(32) + v1.AddArg2(src, mem) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) + v2.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) + v3.AuxInt = int32ToAuxInt(16) + v4 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) + v5 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64)) + v5.AuxInt = int32ToAuxInt(16) + v5.AddArg2(src, mem) + v4.AddArg(v5) + v6 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) + v6.AddArg(v5) + v7 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) + v8 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) + v9 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64)) + v9.AddArg2(src, mem) + v8.AddArg(v9) + v10 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) + v10.AddArg(v9) + v7.AddArg4(dst, v8, v10, mem) + v3.AddArg4(dst, v4, v6, v7) + v.AddArg4(dst, v0, v2, v3) + return true + } + // match: (Move [64] dst src mem) + // result: (STP [48] dst (Select0 (LDP [48] src mem)) (Select1 (LDP [48] src mem)) (STP [32] dst (Select0 (LDP [32] src mem)) (Select1 (LDP [32] src mem)) (STP [16] dst (Select0 (LDP [16] src mem)) (Select1 (LDP [16] src mem)) (STP dst (Select0 (LDP src mem)) (Select1 (LDP src mem)) mem)))) + for { + if auxIntToInt64(v.AuxInt) != 64 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpARM64STP) + v.AuxInt = int32ToAuxInt(48) + v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64)) + v1.AuxInt = int32ToAuxInt(48) + v1.AddArg2(src, mem) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) + v2.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) + v3.AuxInt = int32ToAuxInt(32) + v4 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) + v5 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64)) + v5.AuxInt = int32ToAuxInt(32) + v5.AddArg2(src, mem) + v4.AddArg(v5) + v6 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) + v6.AddArg(v5) + v7 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) + v7.AuxInt = int32ToAuxInt(16) + v8 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) + v9 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64)) + v9.AuxInt = int32ToAuxInt(16) + v9.AddArg2(src, mem) + v8.AddArg(v9) + v10 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) + v10.AddArg(v9) + v11 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) + v12 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) + v13 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64)) + v13.AddArg2(src, mem) + v12.AddArg(v13) + v14 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) + v14.AddArg(v13) + v11.AddArg4(dst, v12, v14, mem) + v7.AddArg4(dst, v8, v10, v11) + v3.AddArg4(dst, v4, v6, v7) + v.AddArg4(dst, v0, v2, v3) + return true + } + // match: (Move [s] dst src mem) + // cond: s%16 != 0 && s%16 <= 8 && s > 16 + // result: (Move [8] (OffPtr dst [s-8]) (OffPtr src [s-8]) (Move [s-s%16] dst src mem)) + for { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s%16 != 0 && s%16 <= 8 && s > 16) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(s - 8) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(s - 8) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) + v2.AuxInt = int64ToAuxInt(s - s%16) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) + return true + } + // match: (Move [s] dst src mem) + // cond: s%16 != 0 && s%16 > 8 && s > 16 + // result: (Move [16] (OffPtr dst [s-16]) (OffPtr src [s-16]) (Move [s-s%16] dst src mem)) + for { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s%16 != 0 && s%16 > 8 && s > 16) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(16) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(s - 16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(s - 16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) + v2.AuxInt = int64ToAuxInt(s - s%16) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) + return true + } + // match: (Move [s] dst src mem) + // cond: s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s) + // result: (DUFFCOPY [8 * (64 - s/16)] dst src mem) + for { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) { + break + } + v.reset(OpARM64DUFFCOPY) + v.AuxInt = int64ToAuxInt(8 * (64 - s/16)) + v.AddArg3(dst, src, mem) + return true + } + // match: (Move [s] dst src mem) + // cond: s%16 == 0 && (s > 16*64 || config.noDuffDevice) && logLargeCopy(v, s) + // result: (LoweredMove dst src (ADDconst src [s-16]) mem) + for { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s%16 == 0 && (s > 16*64 || config.noDuffDevice) && logLargeCopy(v, s)) { + break + } + v.reset(OpARM64LoweredMove) + v0 := b.NewValue0(v.Pos, OpARM64ADDconst, src.Type) + v0.AuxInt = int64ToAuxInt(s - 16) + v0.AddArg(src) + v.AddArg4(dst, src, v0, mem) + return true + } + return false +} +func rewriteValueARM64_OpNeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq16 x y) + // result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpNeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq32 x y) + // result: (NotEqual (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpNeq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq32F x y) + // result: (NotEqual (FCMPS x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpNeq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq64 x y) + // result: (NotEqual (CMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpNeq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq64F x y) + // result: (NotEqual (FCMPD x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpNeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq8 x y) + // result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpNeqPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (NeqPtr x y) + // result: (NotEqual (CMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpNot(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Not x) + // result: (XOR (MOVDconst [1]) x) + for { + x := v_0 + v.reset(OpARM64XOR) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueARM64_OpOffPtr(v *Value) bool { + v_0 := v.Args[0] + // match: (OffPtr [off] ptr:(SP)) + // cond: is32Bit(off) + // result: (MOVDaddr [int32(off)] ptr) + for { + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + if ptr.Op != OpSP || !(is32Bit(off)) { + break + } + v.reset(OpARM64MOVDaddr) + v.AuxInt = int32ToAuxInt(int32(off)) + v.AddArg(ptr) + return true + } + // match: (OffPtr [off] ptr) + // result: (ADDconst [off] ptr) + for { + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + v.reset(OpARM64ADDconst) + v.AuxInt = int64ToAuxInt(off) + v.AddArg(ptr) + return true + } +} +func rewriteValueARM64_OpPanicBounds(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 0 + // result: (LoweredPanicBoundsA [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 0) { + break + } + v.reset(OpARM64LoweredPanicBoundsA) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 1 + // result: (LoweredPanicBoundsB [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 1) { + break + } + v.reset(OpARM64LoweredPanicBoundsB) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 2 + // result: (LoweredPanicBoundsC [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 2) { + break + } + v.reset(OpARM64LoweredPanicBoundsC) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + return false +} +func rewriteValueARM64_OpPopCount16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (PopCount16 x) + // result: (FMOVDfpgp (VUADDLV (VCNT (FMOVDgpfp (ZeroExt16to64 x))))) + for { + t := v.Type + x := v_0 + v.reset(OpARM64FMOVDfpgp) + v.Type = t + v0 := b.NewValue0(v.Pos, OpARM64VUADDLV, typ.Float64) + v1 := b.NewValue0(v.Pos, OpARM64VCNT, typ.Float64) + v2 := b.NewValue0(v.Pos, OpARM64FMOVDgpfp, typ.Float64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(x) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpPopCount32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (PopCount32 x) + // result: (FMOVDfpgp (VUADDLV (VCNT (FMOVDgpfp (ZeroExt32to64 x))))) + for { + t := v.Type + x := v_0 + v.reset(OpARM64FMOVDfpgp) + v.Type = t + v0 := b.NewValue0(v.Pos, OpARM64VUADDLV, typ.Float64) + v1 := b.NewValue0(v.Pos, OpARM64VCNT, typ.Float64) + v2 := b.NewValue0(v.Pos, OpARM64FMOVDgpfp, typ.Float64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(x) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpPopCount64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (PopCount64 x) + // result: (FMOVDfpgp (VUADDLV (VCNT (FMOVDgpfp x)))) + for { + t := v.Type + x := v_0 + v.reset(OpARM64FMOVDfpgp) + v.Type = t + v0 := b.NewValue0(v.Pos, OpARM64VUADDLV, typ.Float64) + v1 := b.NewValue0(v.Pos, OpARM64VCNT, typ.Float64) + v2 := b.NewValue0(v.Pos, OpARM64FMOVDgpfp, typ.Float64) + v2.AddArg(x) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpPrefetchCache(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PrefetchCache addr mem) + // result: (PRFM [0] addr mem) + for { + addr := v_0 + mem := v_1 + v.reset(OpARM64PRFM) + v.AuxInt = int64ToAuxInt(0) + v.AddArg2(addr, mem) + return true + } +} +func rewriteValueARM64_OpPrefetchCacheStreamed(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PrefetchCacheStreamed addr mem) + // result: (PRFM [1] addr mem) + for { + addr := v_0 + mem := v_1 + v.reset(OpARM64PRFM) + v.AuxInt = int64ToAuxInt(1) + v.AddArg2(addr, mem) + return true + } +} +func rewriteValueARM64_OpPubBarrier(v *Value) bool { + v_0 := v.Args[0] + // match: (PubBarrier mem) + // result: (DMB [0xe] mem) + for { + mem := v_0 + v.reset(OpARM64DMB) + v.AuxInt = int64ToAuxInt(0xe) + v.AddArg(mem) + return true + } +} +func rewriteValueARM64_OpRotateLeft16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft16 x (MOVDconst [c])) + // result: (Or16 (Lsh16x64 x (MOVDconst [c&15])) (Rsh16Ux64 x (MOVDconst [-c&15]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpOr16) + v0 := b.NewValue0(v.Pos, OpLsh16x64, t) + v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(c & 15) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t) + v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(-c & 15) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) + return true + } + // match: (RotateLeft16 x y) + // result: (RORW (ORshiftLL (ZeroExt16to32 x) (ZeroExt16to32 x) [16]) (NEG y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpARM64RORW) + v.Type = t + v0 := b.NewValue0(v.Pos, OpARM64ORshiftLL, typ.UInt32) + v0.AuxInt = int64ToAuxInt(16) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, v1) + v2 := b.NewValue0(v.Pos, OpARM64NEG, typ.Int64) + v2.AddArg(y) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValueARM64_OpRotateLeft32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RotateLeft32 x y) + // result: (RORW x (NEG y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64RORW) + v0 := b.NewValue0(v.Pos, OpARM64NEG, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueARM64_OpRotateLeft64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RotateLeft64 x y) + // result: (ROR x (NEG y)) + for { + x := v_0 + y := v_1 + v.reset(OpARM64ROR) + v0 := b.NewValue0(v.Pos, OpARM64NEG, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueARM64_OpRotateLeft8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft8 x (MOVDconst [c])) + // result: (Or8 (Lsh8x64 x (MOVDconst [c&7])) (Rsh8Ux64 x (MOVDconst [-c&7]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpARM64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpOr8) + v0 := b.NewValue0(v.Pos, OpLsh8x64, t) + v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(c & 7) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t) + v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(-c & 7) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) + return true + } + // match: (RotateLeft8 x y) + // result: (OR (SLL x (ANDconst [7] y)) (SRL (ZeroExt8to64 x) (ANDconst [7] (NEG y)))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpARM64OR) + v.Type = t + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v1 := b.NewValue0(v.Pos, OpARM64ANDconst, typ.Int64) + v1.AuxInt = int64ToAuxInt(7) + v1.AddArg(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpARM64SRL, t) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(x) + v4 := b.NewValue0(v.Pos, OpARM64ANDconst, typ.Int64) + v4.AuxInt = int64ToAuxInt(7) + v5 := b.NewValue0(v.Pos, OpARM64NEG, typ.Int64) + v5.AddArg(y) + v4.AddArg(v5) + v2.AddArg2(v3, v4) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValueARM64_OpRsh16Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt16to64 x) y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh16Ux16 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SRL (ZeroExt16to64 x) y) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg3(v0, v2, v3) + return true + } + return false +} +func rewriteValueARM64_OpRsh16Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt16to64 x) y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh16Ux32 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SRL (ZeroExt16to64 x) y) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg3(v0, v2, v3) + return true + } + return false +} +func rewriteValueARM64_OpRsh16Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt16to64 x) y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh16Ux64 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SRL (ZeroExt16to64 x) y) (Const64 [0]) (CMPconst [64] y)) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(64) + v3.AddArg(y) + v.AddArg3(v0, v2, v3) + return true + } + return false +} +func rewriteValueARM64_OpRsh16Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt16to64 x) y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh16Ux8 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SRL (ZeroExt16to64 x) y) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg3(v0, v2, v3) + return true + } + return false +} +func rewriteValueARM64_OpRsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt16to64 x) y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh16x16 x y) + // cond: !shiftIsBounded(v) + // result: (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) + for { + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v1.AuxInt = opToAuxInt(OpARM64LessThanU) + v2 := b.NewValue0(v.Pos, OpConst64, y.Type) + v2.AuxInt = int64ToAuxInt(63) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) + return true + } + return false +} +func rewriteValueARM64_OpRsh16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt16to64 x) y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh16x32 x y) + // cond: !shiftIsBounded(v) + // result: (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) + for { + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v1.AuxInt = opToAuxInt(OpARM64LessThanU) + v2 := b.NewValue0(v.Pos, OpConst64, y.Type) + v2.AuxInt = int64ToAuxInt(63) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) + return true + } + return false +} +func rewriteValueARM64_OpRsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x64 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt16to64 x) y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh16x64 x y) + // cond: !shiftIsBounded(v) + // result: (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] y))) + for { + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v1.AuxInt = opToAuxInt(OpARM64LessThanU) + v2 := b.NewValue0(v.Pos, OpConst64, y.Type) + v2.AuxInt = int64ToAuxInt(63) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(64) + v3.AddArg(y) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) + return true + } + return false +} +func rewriteValueARM64_OpRsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt16to64 x) y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh16x8 x y) + // cond: !shiftIsBounded(v) + // result: (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) + for { + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v1.AuxInt = opToAuxInt(OpARM64LessThanU) + v2 := b.NewValue0(v.Pos, OpConst64, y.Type) + v2.AuxInt = int64ToAuxInt(63) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) + return true + } + return false +} +func rewriteValueARM64_OpRsh32Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt32to64 x) y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh32Ux16 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SRL (ZeroExt32to64 x) y) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg3(v0, v2, v3) + return true + } + return false +} +func rewriteValueARM64_OpRsh32Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt32to64 x) y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh32Ux32 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SRL (ZeroExt32to64 x) y) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg3(v0, v2, v3) + return true + } + return false +} +func rewriteValueARM64_OpRsh32Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt32to64 x) y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh32Ux64 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SRL (ZeroExt32to64 x) y) (Const64 [0]) (CMPconst [64] y)) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(64) + v3.AddArg(y) + v.AddArg3(v0, v2, v3) + return true + } + return false +} +func rewriteValueARM64_OpRsh32Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt32to64 x) y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh32Ux8 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SRL (ZeroExt32to64 x) y) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg3(v0, v2, v3) + return true + } + return false +} +func rewriteValueARM64_OpRsh32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt32to64 x) y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh32x16 x y) + // cond: !shiftIsBounded(v) + // result: (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) + for { + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v1.AuxInt = opToAuxInt(OpARM64LessThanU) + v2 := b.NewValue0(v.Pos, OpConst64, y.Type) + v2.AuxInt = int64ToAuxInt(63) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) + return true + } + return false +} +func rewriteValueARM64_OpRsh32x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt32to64 x) y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh32x32 x y) + // cond: !shiftIsBounded(v) + // result: (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) + for { + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v1.AuxInt = opToAuxInt(OpARM64LessThanU) + v2 := b.NewValue0(v.Pos, OpConst64, y.Type) + v2.AuxInt = int64ToAuxInt(63) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) + return true + } + return false +} +func rewriteValueARM64_OpRsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x64 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt32to64 x) y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh32x64 x y) + // cond: !shiftIsBounded(v) + // result: (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] y))) + for { + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v1.AuxInt = opToAuxInt(OpARM64LessThanU) + v2 := b.NewValue0(v.Pos, OpConst64, y.Type) + v2.AuxInt = int64ToAuxInt(63) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(64) + v3.AddArg(y) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) + return true + } + return false +} +func rewriteValueARM64_OpRsh32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt32to64 x) y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh32x8 x y) + // cond: !shiftIsBounded(v) + // result: (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) + for { + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v1.AuxInt = opToAuxInt(OpARM64LessThanU) + v2 := b.NewValue0(v.Pos, OpConst64, y.Type) + v2.AuxInt = int64ToAuxInt(63) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) + return true + } + return false +} +func rewriteValueARM64_OpRsh64Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SRL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRL) + v.Type = t + v.AddArg2(x, y) + return true + } + // match: (Rsh64Ux16 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SRL x y) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } + return false +} +func rewriteValueARM64_OpRsh64Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SRL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRL) + v.Type = t + v.AddArg2(x, y) + return true + } + // match: (Rsh64Ux32 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SRL x y) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } + return false +} +func rewriteValueARM64_OpRsh64Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh64Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SRL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRL) + v.Type = t + v.AddArg2(x, y) + return true + } + // match: (Rsh64Ux64 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SRL x y) (Const64 [0]) (CMPconst [64] y)) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(64) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } + return false +} +func rewriteValueARM64_OpRsh64Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SRL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRL) + v.Type = t + v.AddArg2(x, y) + return true + } + // match: (Rsh64Ux8 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SRL x y) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } + return false +} +func rewriteValueARM64_OpRsh64x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x16 x y) + // cond: shiftIsBounded(v) + // result: (SRA x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v.Type = t + v.AddArg2(x, y) + return true + } + // match: (Rsh64x16 x y) + // cond: !shiftIsBounded(v) + // result: (SRA x (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) + for { + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v0.AuxInt = opToAuxInt(OpARM64LessThanU) + v1 := b.NewValue0(v.Pos, OpConst64, y.Type) + v1.AuxInt = int64ToAuxInt(63) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueARM64_OpRsh64x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x32 x y) + // cond: shiftIsBounded(v) + // result: (SRA x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v.Type = t + v.AddArg2(x, y) + return true + } + // match: (Rsh64x32 x y) + // cond: !shiftIsBounded(v) + // result: (SRA x (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) + for { + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v0.AuxInt = opToAuxInt(OpARM64LessThanU) + v1 := b.NewValue0(v.Pos, OpConst64, y.Type) + v1.AuxInt = int64ToAuxInt(63) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueARM64_OpRsh64x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh64x64 x y) + // cond: shiftIsBounded(v) + // result: (SRA x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v.Type = t + v.AddArg2(x, y) + return true + } + // match: (Rsh64x64 x y) + // cond: !shiftIsBounded(v) + // result: (SRA x (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] y))) + for { + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v0.AuxInt = opToAuxInt(OpARM64LessThanU) + v1 := b.NewValue0(v.Pos, OpConst64, y.Type) + v1.AuxInt = int64ToAuxInt(63) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(64) + v2.AddArg(y) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueARM64_OpRsh64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x8 x y) + // cond: shiftIsBounded(v) + // result: (SRA x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v.Type = t + v.AddArg2(x, y) + return true + } + // match: (Rsh64x8 x y) + // cond: !shiftIsBounded(v) + // result: (SRA x (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) + for { + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v0.AuxInt = opToAuxInt(OpARM64LessThanU) + v1 := b.NewValue0(v.Pos, OpConst64, y.Type) + v1.AuxInt = int64ToAuxInt(63) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueARM64_OpRsh8Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt8to64 x) y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh8Ux16 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SRL (ZeroExt8to64 x) y) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg3(v0, v2, v3) + return true + } + return false +} +func rewriteValueARM64_OpRsh8Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt8to64 x) y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh8Ux32 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SRL (ZeroExt8to64 x) y) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg3(v0, v2, v3) + return true + } + return false +} +func rewriteValueARM64_OpRsh8Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt8to64 x) y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh8Ux64 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SRL (ZeroExt8to64 x) y) (Const64 [0]) (CMPconst [64] y)) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(64) + v3.AddArg(y) + v.AddArg3(v0, v2, v3) + return true + } + return false +} +func rewriteValueARM64_OpRsh8Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt8to64 x) y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh8Ux8 x y) + // cond: !shiftIsBounded(v) + // result: (CSEL [OpARM64LessThanU] (SRL (ZeroExt8to64 x) y) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64CSEL) + v.AuxInt = opToAuxInt(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg3(v0, v2, v3) + return true + } + return false +} +func rewriteValueARM64_OpRsh8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt8to64 x) y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh8x16 x y) + // cond: !shiftIsBounded(v) + // result: (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) + for { + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v1.AuxInt = opToAuxInt(OpARM64LessThanU) + v2 := b.NewValue0(v.Pos, OpConst64, y.Type) + v2.AuxInt = int64ToAuxInt(63) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) + return true + } + return false +} +func rewriteValueARM64_OpRsh8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt8to64 x) y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh8x32 x y) + // cond: !shiftIsBounded(v) + // result: (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) + for { + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v1.AuxInt = opToAuxInt(OpARM64LessThanU) + v2 := b.NewValue0(v.Pos, OpConst64, y.Type) + v2.AuxInt = int64ToAuxInt(63) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) + return true + } + return false +} +func rewriteValueARM64_OpRsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x64 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt8to64 x) y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh8x64 x y) + // cond: !shiftIsBounded(v) + // result: (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] y))) + for { + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v1.AuxInt = opToAuxInt(OpARM64LessThanU) + v2 := b.NewValue0(v.Pos, OpConst64, y.Type) + v2.AuxInt = int64ToAuxInt(63) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(64) + v3.AddArg(y) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) + return true + } + return false +} +func rewriteValueARM64_OpRsh8x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt8to64 x) y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh8x8 x y) + // cond: !shiftIsBounded(v) + // result: (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) + for { + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpARM64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v1.AuxInt = opToAuxInt(OpARM64LessThanU) + v2 := b.NewValue0(v.Pos, OpConst64, y.Type) + v2.AuxInt = int64ToAuxInt(63) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) + return true + } + return false +} +func rewriteValueARM64_OpSelect0(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Select0 (Mul64uhilo x y)) + // result: (UMULH x y) + for { + if v_0.Op != OpMul64uhilo { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpARM64UMULH) + v.AddArg2(x, y) + return true + } + // match: (Select0 (Add64carry x y c)) + // result: (Select0 (ADCSflags x y (Select1 (ADDSconstflags [-1] c)))) + for { + if v_0.Op != OpAdd64carry { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpSelect0) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpARM64ADCSflags, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpARM64ADDSconstflags, types.NewTuple(typ.UInt64, types.TypeFlags)) + v2.AuxInt = int64ToAuxInt(-1) + v2.AddArg(c) + v1.AddArg(v2) + v0.AddArg3(x, y, v1) + v.AddArg(v0) + return true + } + // match: (Select0 (Sub64borrow x y bo)) + // result: (Select0 (SBCSflags x y (Select1 (NEGSflags bo)))) + for { + if v_0.Op != OpSub64borrow { + break + } + bo := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpSelect0) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpARM64SBCSflags, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpARM64NEGSflags, types.NewTuple(typ.UInt64, types.TypeFlags)) + v2.AddArg(bo) + v1.AddArg(v2) + v0.AddArg3(x, y, v1) + v.AddArg(v0) + return true + } + // match: (Select0 (Mul64uover x y)) + // result: (MUL x y) + for { + if v_0.Op != OpMul64uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpARM64MUL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueARM64_OpSelect1(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Select1 (Mul64uhilo x y)) + // result: (MUL x y) + for { + if v_0.Op != OpMul64uhilo { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpARM64MUL) + v.AddArg2(x, y) + return true + } + // match: (Select1 (Add64carry x y c)) + // result: (ADCzerocarry (Select1 (ADCSflags x y (Select1 (ADDSconstflags [-1] c))))) + for { + if v_0.Op != OpAdd64carry { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpARM64ADCzerocarry) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpARM64ADCSflags, types.NewTuple(typ.UInt64, types.TypeFlags)) + v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v3 := b.NewValue0(v.Pos, OpARM64ADDSconstflags, types.NewTuple(typ.UInt64, types.TypeFlags)) + v3.AuxInt = int64ToAuxInt(-1) + v3.AddArg(c) + v2.AddArg(v3) + v1.AddArg3(x, y, v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Select1 (Sub64borrow x y bo)) + // result: (NEG (NGCzerocarry (Select1 (SBCSflags x y (Select1 (NEGSflags bo)))))) + for { + if v_0.Op != OpSub64borrow { + break + } + bo := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpARM64NEG) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpARM64NGCzerocarry, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpARM64SBCSflags, types.NewTuple(typ.UInt64, types.TypeFlags)) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4 := b.NewValue0(v.Pos, OpARM64NEGSflags, types.NewTuple(typ.UInt64, types.TypeFlags)) + v4.AddArg(bo) + v3.AddArg(v4) + v2.AddArg3(x, y, v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Select1 (Mul64uover x y)) + // result: (NotEqual (CMPconst (UMULH x y) [0])) + for { + if v_0.Op != OpMul64uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpARM64UMULH, typ.UInt64) + v1.AddArg2(x, y) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueARM64_OpSelectN(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (SelectN [0] call:(CALLstatic {sym} s1:(MOVDstore _ (MOVDconst [sz]) s2:(MOVDstore _ src s3:(MOVDstore {t} _ dst mem))))) + // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(s1, s2, s3, call) + // result: (Move [sz] dst src mem) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + call := v_0 + if call.Op != OpARM64CALLstatic || len(call.Args) != 1 { + break + } + sym := auxToCall(call.Aux) + s1 := call.Args[0] + if s1.Op != OpARM64MOVDstore { + break + } + _ = s1.Args[2] + s1_1 := s1.Args[1] + if s1_1.Op != OpARM64MOVDconst { + break + } + sz := auxIntToInt64(s1_1.AuxInt) + s2 := s1.Args[2] + if s2.Op != OpARM64MOVDstore { + break + } + _ = s2.Args[2] + src := s2.Args[1] + s3 := s2.Args[2] + if s3.Op != OpARM64MOVDstore { + break + } + mem := s3.Args[2] + dst := s3.Args[1] + if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(s1, s2, s3, call)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(sz) + v.AddArg3(dst, src, mem) + return true + } + // match: (SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem)) + // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call) + // result: (Move [sz] dst src mem) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + call := v_0 + if call.Op != OpARM64CALLstatic || len(call.Args) != 4 { + break + } + sym := auxToCall(call.Aux) + mem := call.Args[3] + dst := call.Args[0] + src := call.Args[1] + call_2 := call.Args[2] + if call_2.Op != OpARM64MOVDconst { + break + } + sz := auxIntToInt64(call_2.AuxInt) + if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(sz) + v.AddArg3(dst, src, mem) + return true + } + return false +} +func rewriteValueARM64_OpSlicemask(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Slicemask x) + // result: (SRAconst (NEG x) [63]) + for { + t := v.Type + x := v_0 + v.reset(OpARM64SRAconst) + v.AuxInt = int64ToAuxInt(63) + v0 := b.NewValue0(v.Pos, OpARM64NEG, t) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpStore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Store {t} ptr val mem) + // cond: t.Size() == 1 + // result: (MOVBstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 1) { + break + } + v.reset(OpARM64MOVBstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 2 + // result: (MOVHstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 2) { + break + } + v.reset(OpARM64MOVHstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 && !t.IsFloat() + // result: (MOVWstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4 && !t.IsFloat()) { + break + } + v.reset(OpARM64MOVWstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 8 && !t.IsFloat() + // result: (MOVDstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 8 && !t.IsFloat()) { + break + } + v.reset(OpARM64MOVDstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 && t.IsFloat() + // result: (FMOVSstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4 && t.IsFloat()) { + break + } + v.reset(OpARM64FMOVSstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 8 && t.IsFloat() + // result: (FMOVDstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 8 && t.IsFloat()) { + break + } + v.reset(OpARM64FMOVDstore) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueARM64_OpZero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Zero [0] _ mem) + // result: mem + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_1 + v.copyOf(mem) + return true + } + // match: (Zero [1] ptr mem) + // result: (MOVBstore ptr (MOVDconst [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpARM64MOVBstore) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (Zero [2] ptr mem) + // result: (MOVHstore ptr (MOVDconst [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpARM64MOVHstore) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (Zero [4] ptr mem) + // result: (MOVWstore ptr (MOVDconst [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpARM64MOVWstore) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (Zero [3] ptr mem) + // result: (MOVBstore [2] ptr (MOVDconst [0]) (MOVHstore ptr (MOVDconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpARM64MOVBstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [5] ptr mem) + // result: (MOVBstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 5 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpARM64MOVBstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [6] ptr mem) + // result: (MOVHstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 6 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpARM64MOVHstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [7] ptr mem) + // result: (MOVWstore [3] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 7 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpARM64MOVWstore) + v.AuxInt = int32ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [8] ptr mem) + // result: (MOVDstore ptr (MOVDconst [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpARM64MOVDstore) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (Zero [9] ptr mem) + // result: (MOVBstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 9 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpARM64MOVBstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [10] ptr mem) + // result: (MOVHstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 10 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpARM64MOVHstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [11] ptr mem) + // result: (MOVDstore [3] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 11 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpARM64MOVDstore) + v.AuxInt = int32ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [12] ptr mem) + // result: (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 12 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpARM64MOVWstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [13] ptr mem) + // result: (MOVDstore [5] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 13 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpARM64MOVDstore) + v.AuxInt = int32ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [14] ptr mem) + // result: (MOVDstore [6] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 14 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpARM64MOVDstore) + v.AuxInt = int32ToAuxInt(6) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [15] ptr mem) + // result: (MOVDstore [7] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 15 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpARM64MOVDstore) + v.AuxInt = int32ToAuxInt(7) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [16] ptr mem) + // result: (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 16 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpARM64STP) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg4(ptr, v0, v0, mem) + return true + } + // match: (Zero [32] ptr mem) + // result: (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 32 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpARM64STP) + v.AuxInt = int32ToAuxInt(16) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) + v1.AuxInt = int32ToAuxInt(0) + v1.AddArg4(ptr, v0, v0, mem) + v.AddArg4(ptr, v0, v0, v1) + return true + } + // match: (Zero [48] ptr mem) + // result: (STP [32] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))) + for { + if auxIntToInt64(v.AuxInt) != 48 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpARM64STP) + v.AuxInt = int32ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) + v1.AuxInt = int32ToAuxInt(16) + v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) + v2.AuxInt = int32ToAuxInt(0) + v2.AddArg4(ptr, v0, v0, mem) + v1.AddArg4(ptr, v0, v0, v2) + v.AddArg4(ptr, v0, v0, v1) + return true + } + // match: (Zero [64] ptr mem) + // result: (STP [48] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [32] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)))) + for { + if auxIntToInt64(v.AuxInt) != 64 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpARM64STP) + v.AuxInt = int32ToAuxInt(48) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) + v1.AuxInt = int32ToAuxInt(32) + v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) + v2.AuxInt = int32ToAuxInt(16) + v3 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) + v3.AuxInt = int32ToAuxInt(0) + v3.AddArg4(ptr, v0, v0, mem) + v2.AddArg4(ptr, v0, v0, v3) + v1.AddArg4(ptr, v0, v0, v2) + v.AddArg4(ptr, v0, v0, v1) + return true + } + // match: (Zero [s] ptr mem) + // cond: s%16 != 0 && s%16 <= 8 && s > 16 + // result: (Zero [8] (OffPtr ptr [s-8]) (Zero [s-s%16] ptr mem)) + for { + s := auxIntToInt64(v.AuxInt) + ptr := v_0 + mem := v_1 + if !(s%16 != 0 && s%16 <= 8 && s > 16) { + break + } + v.reset(OpZero) + v.AuxInt = int64ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpOffPtr, ptr.Type) + v0.AuxInt = int64ToAuxInt(s - 8) + v0.AddArg(ptr) + v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem) + v1.AuxInt = int64ToAuxInt(s - s%16) + v1.AddArg2(ptr, mem) + v.AddArg2(v0, v1) + return true + } + // match: (Zero [s] ptr mem) + // cond: s%16 != 0 && s%16 > 8 && s > 16 + // result: (Zero [16] (OffPtr ptr [s-16]) (Zero [s-s%16] ptr mem)) + for { + s := auxIntToInt64(v.AuxInt) + ptr := v_0 + mem := v_1 + if !(s%16 != 0 && s%16 > 8 && s > 16) { + break + } + v.reset(OpZero) + v.AuxInt = int64ToAuxInt(16) + v0 := b.NewValue0(v.Pos, OpOffPtr, ptr.Type) + v0.AuxInt = int64ToAuxInt(s - 16) + v0.AddArg(ptr) + v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem) + v1.AuxInt = int64ToAuxInt(s - s%16) + v1.AddArg2(ptr, mem) + v.AddArg2(v0, v1) + return true + } + // match: (Zero [s] ptr mem) + // cond: s%16 == 0 && s > 64 && s <= 16*64 && !config.noDuffDevice + // result: (DUFFZERO [4 * (64 - s/16)] ptr mem) + for { + s := auxIntToInt64(v.AuxInt) + ptr := v_0 + mem := v_1 + if !(s%16 == 0 && s > 64 && s <= 16*64 && !config.noDuffDevice) { + break + } + v.reset(OpARM64DUFFZERO) + v.AuxInt = int64ToAuxInt(4 * (64 - s/16)) + v.AddArg2(ptr, mem) + return true + } + // match: (Zero [s] ptr mem) + // cond: s%16 == 0 && (s > 16*64 || config.noDuffDevice) + // result: (LoweredZero ptr (ADDconst [s-16] ptr) mem) + for { + s := auxIntToInt64(v.AuxInt) + ptr := v_0 + mem := v_1 + if !(s%16 == 0 && (s > 16*64 || config.noDuffDevice)) { + break + } + v.reset(OpARM64LoweredZero) + v0 := b.NewValue0(v.Pos, OpARM64ADDconst, ptr.Type) + v0.AuxInt = int64ToAuxInt(s - 16) + v0.AddArg(ptr) + v.AddArg3(ptr, v0, mem) + return true + } + return false +} +func rewriteBlockARM64(b *Block) bool { + typ := &b.Func.Config.Types + switch b.Kind { + case BlockARM64EQ: + // match: (EQ (CMPconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (EQ (TST x y) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64AND { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64EQ, v0) + return true + } + break + } + // match: (EQ (CMPconst [0] x:(ANDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (EQ (TSTconst [c] y) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(y) + b.resetWithControl(BlockARM64EQ, v0) + return true + } + // match: (EQ (CMPWconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (EQ (TSTW x y) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64AND { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64EQ, v0) + return true + } + break + } + // match: (EQ (CMPWconst [0] x:(ANDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (EQ (TSTWconst [int32(c)] y) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(y) + b.resetWithControl(BlockARM64EQ, v0) + return true + } + // match: (EQ (CMPconst [0] x:(ADDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (EQ (CMNconst [c] y) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(y) + b.resetWithControl(BlockARM64EQ, v0) + return true + } + // match: (EQ (CMPWconst [0] x:(ADDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (EQ (CMNWconst [int32(c)] y) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(y) + b.resetWithControl(BlockARM64EQ, v0) + return true + } + // match: (EQ (CMPconst [0] z:(ADD x y)) yes no) + // cond: z.Uses == 1 + // result: (EQ (CMN x y) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64ADD { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64EQ, v0) + return true + } + break + } + // match: (EQ (CMPWconst [0] z:(ADD x y)) yes no) + // cond: z.Uses == 1 + // result: (EQ (CMNW x y) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64ADD { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64EQ, v0) + return true + } + break + } + // match: (EQ (CMP x z:(NEG y)) yes no) + // cond: z.Uses == 1 + // result: (EQ (CMN x y) yes no) + for b.Controls[0].Op == OpARM64CMP { + v_0 := b.Controls[0] + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpARM64NEG { + break + } + y := z.Args[0] + if !(z.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64EQ, v0) + return true + } + // match: (EQ (CMPW x z:(NEG y)) yes no) + // cond: z.Uses == 1 + // result: (EQ (CMNW x y) yes no) + for b.Controls[0].Op == OpARM64CMPW { + v_0 := b.Controls[0] + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpARM64NEG { + break + } + y := z.Args[0] + if !(z.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64EQ, v0) + return true + } + // match: (EQ (CMPconst [0] x) yes no) + // result: (Z x yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + b.resetWithControl(BlockARM64Z, x) + return true + } + // match: (EQ (CMPWconst [0] x) yes no) + // result: (ZW x yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + b.resetWithControl(BlockARM64ZW, x) + return true + } + // match: (EQ (CMPconst [0] z:(MADD a x y)) yes no) + // cond: z.Uses==1 + // result: (EQ (CMN a (MUL x y)) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MADD { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64EQ, v0) + return true + } + // match: (EQ (CMPconst [0] z:(MSUB a x y)) yes no) + // cond: z.Uses==1 + // result: (EQ (CMP a (MUL x y)) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MSUB { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64EQ, v0) + return true + } + // match: (EQ (CMPWconst [0] z:(MADDW a x y)) yes no) + // cond: z.Uses==1 + // result: (EQ (CMNW a (MULW x y)) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MADDW { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64EQ, v0) + return true + } + // match: (EQ (CMPWconst [0] z:(MSUBW a x y)) yes no) + // cond: z.Uses==1 + // result: (EQ (CMPW a (MULW x y)) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MSUBW { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64EQ, v0) + return true + } + // match: (EQ (TSTconst [c] x) yes no) + // cond: oneBit(c) + // result: (TBZ [int64(ntz64(c))] x yes no) + for b.Controls[0].Op == OpARM64TSTconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(oneBit(c)) { + break + } + b.resetWithControl(BlockARM64TBZ, x) + b.AuxInt = int64ToAuxInt(int64(ntz64(c))) + return true + } + // match: (EQ (TSTWconst [c] x) yes no) + // cond: oneBit(int64(uint32(c))) + // result: (TBZ [int64(ntz64(int64(uint32(c))))] x yes no) + for b.Controls[0].Op == OpARM64TSTWconst { + v_0 := b.Controls[0] + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(oneBit(int64(uint32(c)))) { + break + } + b.resetWithControl(BlockARM64TBZ, x) + b.AuxInt = int64ToAuxInt(int64(ntz64(int64(uint32(c))))) + return true + } + // match: (EQ (FlagConstant [fc]) yes no) + // cond: fc.eq() + // result: (First yes no) + for b.Controls[0].Op == OpARM64FlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(fc.eq()) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (EQ (FlagConstant [fc]) yes no) + // cond: !fc.eq() + // result: (First no yes) + for b.Controls[0].Op == OpARM64FlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(!fc.eq()) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (EQ (InvertFlags cmp) yes no) + // result: (EQ cmp yes no) + for b.Controls[0].Op == OpARM64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARM64EQ, cmp) + return true + } + case BlockARM64FGE: + // match: (FGE (InvertFlags cmp) yes no) + // result: (FLE cmp yes no) + for b.Controls[0].Op == OpARM64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARM64FLE, cmp) + return true + } + case BlockARM64FGT: + // match: (FGT (InvertFlags cmp) yes no) + // result: (FLT cmp yes no) + for b.Controls[0].Op == OpARM64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARM64FLT, cmp) + return true + } + case BlockARM64FLE: + // match: (FLE (InvertFlags cmp) yes no) + // result: (FGE cmp yes no) + for b.Controls[0].Op == OpARM64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARM64FGE, cmp) + return true + } + case BlockARM64FLT: + // match: (FLT (InvertFlags cmp) yes no) + // result: (FGT cmp yes no) + for b.Controls[0].Op == OpARM64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARM64FGT, cmp) + return true + } + case BlockARM64GE: + // match: (GE (CMPconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (GE (TST x y) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64AND { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64GE, v0) + return true + } + break + } + // match: (GE (CMPconst [0] x:(ANDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (GE (TSTconst [c] y) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(y) + b.resetWithControl(BlockARM64GE, v0) + return true + } + // match: (GE (CMPWconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (GE (TSTW x y) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64AND { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64GE, v0) + return true + } + break + } + // match: (GE (CMPWconst [0] x:(ANDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (GE (TSTWconst [int32(c)] y) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(y) + b.resetWithControl(BlockARM64GE, v0) + return true + } + // match: (GE (CMPconst [0] x:(ADDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (GEnoov (CMNconst [c] y) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(y) + b.resetWithControl(BlockARM64GEnoov, v0) + return true + } + // match: (GE (CMPWconst [0] x:(ADDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (GEnoov (CMNWconst [int32(c)] y) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(y) + b.resetWithControl(BlockARM64GEnoov, v0) + return true + } + // match: (GE (CMPconst [0] z:(ADD x y)) yes no) + // cond: z.Uses == 1 + // result: (GEnoov (CMN x y) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64ADD { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64GEnoov, v0) + return true + } + break + } + // match: (GE (CMPWconst [0] z:(ADD x y)) yes no) + // cond: z.Uses == 1 + // result: (GEnoov (CMNW x y) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64ADD { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64GEnoov, v0) + return true + } + break + } + // match: (GE (CMPconst [0] z:(MADD a x y)) yes no) + // cond: z.Uses==1 + // result: (GEnoov (CMN a (MUL x y)) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MADD { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64GEnoov, v0) + return true + } + // match: (GE (CMPconst [0] z:(MSUB a x y)) yes no) + // cond: z.Uses==1 + // result: (GEnoov (CMP a (MUL x y)) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MSUB { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64GEnoov, v0) + return true + } + // match: (GE (CMPWconst [0] z:(MADDW a x y)) yes no) + // cond: z.Uses==1 + // result: (GEnoov (CMNW a (MULW x y)) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MADDW { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64GEnoov, v0) + return true + } + // match: (GE (CMPWconst [0] z:(MSUBW a x y)) yes no) + // cond: z.Uses==1 + // result: (GEnoov (CMPW a (MULW x y)) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MSUBW { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64GEnoov, v0) + return true + } + // match: (GE (CMPWconst [0] x) yes no) + // result: (TBZ [31] x yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + b.resetWithControl(BlockARM64TBZ, x) + b.AuxInt = int64ToAuxInt(31) + return true + } + // match: (GE (CMPconst [0] x) yes no) + // result: (TBZ [63] x yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + b.resetWithControl(BlockARM64TBZ, x) + b.AuxInt = int64ToAuxInt(63) + return true + } + // match: (GE (FlagConstant [fc]) yes no) + // cond: fc.ge() + // result: (First yes no) + for b.Controls[0].Op == OpARM64FlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(fc.ge()) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (GE (FlagConstant [fc]) yes no) + // cond: !fc.ge() + // result: (First no yes) + for b.Controls[0].Op == OpARM64FlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(!fc.ge()) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (GE (InvertFlags cmp) yes no) + // result: (LE cmp yes no) + for b.Controls[0].Op == OpARM64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARM64LE, cmp) + return true + } + case BlockARM64GEnoov: + // match: (GEnoov (FlagConstant [fc]) yes no) + // cond: fc.geNoov() + // result: (First yes no) + for b.Controls[0].Op == OpARM64FlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(fc.geNoov()) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (GEnoov (FlagConstant [fc]) yes no) + // cond: !fc.geNoov() + // result: (First no yes) + for b.Controls[0].Op == OpARM64FlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(!fc.geNoov()) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (GEnoov (InvertFlags cmp) yes no) + // result: (LEnoov cmp yes no) + for b.Controls[0].Op == OpARM64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARM64LEnoov, cmp) + return true + } + case BlockARM64GT: + // match: (GT (CMPconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (GT (TST x y) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64AND { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64GT, v0) + return true + } + break + } + // match: (GT (CMPconst [0] x:(ANDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (GT (TSTconst [c] y) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(y) + b.resetWithControl(BlockARM64GT, v0) + return true + } + // match: (GT (CMPWconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (GT (TSTW x y) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64AND { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64GT, v0) + return true + } + break + } + // match: (GT (CMPWconst [0] x:(ANDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (GT (TSTWconst [int32(c)] y) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(y) + b.resetWithControl(BlockARM64GT, v0) + return true + } + // match: (GT (CMPconst [0] x:(ADDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (GTnoov (CMNconst [c] y) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(y) + b.resetWithControl(BlockARM64GTnoov, v0) + return true + } + // match: (GT (CMPWconst [0] x:(ADDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (GTnoov (CMNWconst [int32(c)] y) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(y) + b.resetWithControl(BlockARM64GTnoov, v0) + return true + } + // match: (GT (CMPconst [0] z:(ADD x y)) yes no) + // cond: z.Uses == 1 + // result: (GTnoov (CMN x y) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64ADD { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64GTnoov, v0) + return true + } + break + } + // match: (GT (CMPWconst [0] z:(ADD x y)) yes no) + // cond: z.Uses == 1 + // result: (GTnoov (CMNW x y) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64ADD { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64GTnoov, v0) + return true + } + break + } + // match: (GT (CMPconst [0] z:(MADD a x y)) yes no) + // cond: z.Uses==1 + // result: (GTnoov (CMN a (MUL x y)) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MADD { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64GTnoov, v0) + return true + } + // match: (GT (CMPconst [0] z:(MSUB a x y)) yes no) + // cond: z.Uses==1 + // result: (GTnoov (CMP a (MUL x y)) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MSUB { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64GTnoov, v0) + return true + } + // match: (GT (CMPWconst [0] z:(MADDW a x y)) yes no) + // cond: z.Uses==1 + // result: (GTnoov (CMNW a (MULW x y)) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MADDW { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64GTnoov, v0) + return true + } + // match: (GT (CMPWconst [0] z:(MSUBW a x y)) yes no) + // cond: z.Uses==1 + // result: (GTnoov (CMPW a (MULW x y)) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MSUBW { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64GTnoov, v0) + return true + } + // match: (GT (FlagConstant [fc]) yes no) + // cond: fc.gt() + // result: (First yes no) + for b.Controls[0].Op == OpARM64FlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(fc.gt()) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (GT (FlagConstant [fc]) yes no) + // cond: !fc.gt() + // result: (First no yes) + for b.Controls[0].Op == OpARM64FlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(!fc.gt()) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (GT (InvertFlags cmp) yes no) + // result: (LT cmp yes no) + for b.Controls[0].Op == OpARM64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARM64LT, cmp) + return true + } + case BlockARM64GTnoov: + // match: (GTnoov (FlagConstant [fc]) yes no) + // cond: fc.gtNoov() + // result: (First yes no) + for b.Controls[0].Op == OpARM64FlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(fc.gtNoov()) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (GTnoov (FlagConstant [fc]) yes no) + // cond: !fc.gtNoov() + // result: (First no yes) + for b.Controls[0].Op == OpARM64FlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(!fc.gtNoov()) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (GTnoov (InvertFlags cmp) yes no) + // result: (LTnoov cmp yes no) + for b.Controls[0].Op == OpARM64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARM64LTnoov, cmp) + return true + } + case BlockIf: + // match: (If (Equal cc) yes no) + // result: (EQ cc yes no) + for b.Controls[0].Op == OpARM64Equal { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARM64EQ, cc) + return true + } + // match: (If (NotEqual cc) yes no) + // result: (NE cc yes no) + for b.Controls[0].Op == OpARM64NotEqual { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARM64NE, cc) + return true + } + // match: (If (LessThan cc) yes no) + // result: (LT cc yes no) + for b.Controls[0].Op == OpARM64LessThan { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARM64LT, cc) + return true + } + // match: (If (LessThanU cc) yes no) + // result: (ULT cc yes no) + for b.Controls[0].Op == OpARM64LessThanU { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARM64ULT, cc) + return true + } + // match: (If (LessEqual cc) yes no) + // result: (LE cc yes no) + for b.Controls[0].Op == OpARM64LessEqual { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARM64LE, cc) + return true + } + // match: (If (LessEqualU cc) yes no) + // result: (ULE cc yes no) + for b.Controls[0].Op == OpARM64LessEqualU { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARM64ULE, cc) + return true + } + // match: (If (GreaterThan cc) yes no) + // result: (GT cc yes no) + for b.Controls[0].Op == OpARM64GreaterThan { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARM64GT, cc) + return true + } + // match: (If (GreaterThanU cc) yes no) + // result: (UGT cc yes no) + for b.Controls[0].Op == OpARM64GreaterThanU { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARM64UGT, cc) + return true + } + // match: (If (GreaterEqual cc) yes no) + // result: (GE cc yes no) + for b.Controls[0].Op == OpARM64GreaterEqual { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARM64GE, cc) + return true + } + // match: (If (GreaterEqualU cc) yes no) + // result: (UGE cc yes no) + for b.Controls[0].Op == OpARM64GreaterEqualU { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARM64UGE, cc) + return true + } + // match: (If (LessThanF cc) yes no) + // result: (FLT cc yes no) + for b.Controls[0].Op == OpARM64LessThanF { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARM64FLT, cc) + return true + } + // match: (If (LessEqualF cc) yes no) + // result: (FLE cc yes no) + for b.Controls[0].Op == OpARM64LessEqualF { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARM64FLE, cc) + return true + } + // match: (If (GreaterThanF cc) yes no) + // result: (FGT cc yes no) + for b.Controls[0].Op == OpARM64GreaterThanF { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARM64FGT, cc) + return true + } + // match: (If (GreaterEqualF cc) yes no) + // result: (FGE cc yes no) + for b.Controls[0].Op == OpARM64GreaterEqualF { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARM64FGE, cc) + return true + } + // match: (If cond yes no) + // result: (TBNZ [0] cond yes no) + for { + cond := b.Controls[0] + b.resetWithControl(BlockARM64TBNZ, cond) + b.AuxInt = int64ToAuxInt(0) + return true + } + case BlockJumpTable: + // match: (JumpTable idx) + // result: (JUMPTABLE {makeJumpTableSym(b)} idx (MOVDaddr {makeJumpTableSym(b)} (SB))) + for { + idx := b.Controls[0] + v0 := b.NewValue0(b.Pos, OpARM64MOVDaddr, typ.Uintptr) + v0.Aux = symToAux(makeJumpTableSym(b)) + v1 := b.NewValue0(b.Pos, OpSB, typ.Uintptr) + v0.AddArg(v1) + b.resetWithControl2(BlockARM64JUMPTABLE, idx, v0) + b.Aux = symToAux(makeJumpTableSym(b)) + return true + } + case BlockARM64LE: + // match: (LE (CMPconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (LE (TST x y) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64AND { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64LE, v0) + return true + } + break + } + // match: (LE (CMPconst [0] x:(ANDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (LE (TSTconst [c] y) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(y) + b.resetWithControl(BlockARM64LE, v0) + return true + } + // match: (LE (CMPWconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (LE (TSTW x y) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64AND { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64LE, v0) + return true + } + break + } + // match: (LE (CMPWconst [0] x:(ANDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (LE (TSTWconst [int32(c)] y) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(y) + b.resetWithControl(BlockARM64LE, v0) + return true + } + // match: (LE (CMPconst [0] x:(ADDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (LEnoov (CMNconst [c] y) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(y) + b.resetWithControl(BlockARM64LEnoov, v0) + return true + } + // match: (LE (CMPWconst [0] x:(ADDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (LEnoov (CMNWconst [int32(c)] y) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(y) + b.resetWithControl(BlockARM64LEnoov, v0) + return true + } + // match: (LE (CMPconst [0] z:(ADD x y)) yes no) + // cond: z.Uses == 1 + // result: (LEnoov (CMN x y) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64ADD { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64LEnoov, v0) + return true + } + break + } + // match: (LE (CMPWconst [0] z:(ADD x y)) yes no) + // cond: z.Uses == 1 + // result: (LEnoov (CMNW x y) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64ADD { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64LEnoov, v0) + return true + } + break + } + // match: (LE (CMPconst [0] z:(MADD a x y)) yes no) + // cond: z.Uses==1 + // result: (LEnoov (CMN a (MUL x y)) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MADD { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64LEnoov, v0) + return true + } + // match: (LE (CMPconst [0] z:(MSUB a x y)) yes no) + // cond: z.Uses==1 + // result: (LEnoov (CMP a (MUL x y)) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MSUB { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64LEnoov, v0) + return true + } + // match: (LE (CMPWconst [0] z:(MADDW a x y)) yes no) + // cond: z.Uses==1 + // result: (LEnoov (CMNW a (MULW x y)) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MADDW { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64LEnoov, v0) + return true + } + // match: (LE (CMPWconst [0] z:(MSUBW a x y)) yes no) + // cond: z.Uses==1 + // result: (LEnoov (CMPW a (MULW x y)) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MSUBW { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64LEnoov, v0) + return true + } + // match: (LE (FlagConstant [fc]) yes no) + // cond: fc.le() + // result: (First yes no) + for b.Controls[0].Op == OpARM64FlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(fc.le()) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (LE (FlagConstant [fc]) yes no) + // cond: !fc.le() + // result: (First no yes) + for b.Controls[0].Op == OpARM64FlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(!fc.le()) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (LE (InvertFlags cmp) yes no) + // result: (GE cmp yes no) + for b.Controls[0].Op == OpARM64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARM64GE, cmp) + return true + } + case BlockARM64LEnoov: + // match: (LEnoov (FlagConstant [fc]) yes no) + // cond: fc.leNoov() + // result: (First yes no) + for b.Controls[0].Op == OpARM64FlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(fc.leNoov()) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (LEnoov (FlagConstant [fc]) yes no) + // cond: !fc.leNoov() + // result: (First no yes) + for b.Controls[0].Op == OpARM64FlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(!fc.leNoov()) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (LEnoov (InvertFlags cmp) yes no) + // result: (GEnoov cmp yes no) + for b.Controls[0].Op == OpARM64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARM64GEnoov, cmp) + return true + } + case BlockARM64LT: + // match: (LT (CMPconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (LT (TST x y) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64AND { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64LT, v0) + return true + } + break + } + // match: (LT (CMPconst [0] x:(ANDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (LT (TSTconst [c] y) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(y) + b.resetWithControl(BlockARM64LT, v0) + return true + } + // match: (LT (CMPWconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (LT (TSTW x y) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64AND { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64LT, v0) + return true + } + break + } + // match: (LT (CMPWconst [0] x:(ANDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (LT (TSTWconst [int32(c)] y) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(y) + b.resetWithControl(BlockARM64LT, v0) + return true + } + // match: (LT (CMPconst [0] x:(ADDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (LTnoov (CMNconst [c] y) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(y) + b.resetWithControl(BlockARM64LTnoov, v0) + return true + } + // match: (LT (CMPWconst [0] x:(ADDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (LTnoov (CMNWconst [int32(c)] y) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(y) + b.resetWithControl(BlockARM64LTnoov, v0) + return true + } + // match: (LT (CMPconst [0] z:(ADD x y)) yes no) + // cond: z.Uses == 1 + // result: (LTnoov (CMN x y) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64ADD { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64LTnoov, v0) + return true + } + break + } + // match: (LT (CMPWconst [0] z:(ADD x y)) yes no) + // cond: z.Uses == 1 + // result: (LTnoov (CMNW x y) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64ADD { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64LTnoov, v0) + return true + } + break + } + // match: (LT (CMPconst [0] z:(MADD a x y)) yes no) + // cond: z.Uses==1 + // result: (LTnoov (CMN a (MUL x y)) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MADD { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64LTnoov, v0) + return true + } + // match: (LT (CMPconst [0] z:(MSUB a x y)) yes no) + // cond: z.Uses==1 + // result: (LTnoov (CMP a (MUL x y)) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MSUB { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64LTnoov, v0) + return true + } + // match: (LT (CMPWconst [0] z:(MADDW a x y)) yes no) + // cond: z.Uses==1 + // result: (LTnoov (CMNW a (MULW x y)) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MADDW { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64LTnoov, v0) + return true + } + // match: (LT (CMPWconst [0] z:(MSUBW a x y)) yes no) + // cond: z.Uses==1 + // result: (LTnoov (CMPW a (MULW x y)) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MSUBW { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64LTnoov, v0) + return true + } + // match: (LT (CMPWconst [0] x) yes no) + // result: (TBNZ [31] x yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + b.resetWithControl(BlockARM64TBNZ, x) + b.AuxInt = int64ToAuxInt(31) + return true + } + // match: (LT (CMPconst [0] x) yes no) + // result: (TBNZ [63] x yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + b.resetWithControl(BlockARM64TBNZ, x) + b.AuxInt = int64ToAuxInt(63) + return true + } + // match: (LT (FlagConstant [fc]) yes no) + // cond: fc.lt() + // result: (First yes no) + for b.Controls[0].Op == OpARM64FlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(fc.lt()) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (LT (FlagConstant [fc]) yes no) + // cond: !fc.lt() + // result: (First no yes) + for b.Controls[0].Op == OpARM64FlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(!fc.lt()) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (LT (InvertFlags cmp) yes no) + // result: (GT cmp yes no) + for b.Controls[0].Op == OpARM64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARM64GT, cmp) + return true + } + case BlockARM64LTnoov: + // match: (LTnoov (FlagConstant [fc]) yes no) + // cond: fc.ltNoov() + // result: (First yes no) + for b.Controls[0].Op == OpARM64FlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(fc.ltNoov()) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (LTnoov (FlagConstant [fc]) yes no) + // cond: !fc.ltNoov() + // result: (First no yes) + for b.Controls[0].Op == OpARM64FlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(!fc.ltNoov()) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (LTnoov (InvertFlags cmp) yes no) + // result: (GTnoov cmp yes no) + for b.Controls[0].Op == OpARM64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARM64GTnoov, cmp) + return true + } + case BlockARM64NE: + // match: (NE (CMPconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (NE (TST x y) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64AND { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64NE, v0) + return true + } + break + } + // match: (NE (CMPconst [0] x:(ANDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (NE (TSTconst [c] y) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(y) + b.resetWithControl(BlockARM64NE, v0) + return true + } + // match: (NE (CMPWconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (NE (TSTW x y) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64AND { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64NE, v0) + return true + } + break + } + // match: (NE (CMPWconst [0] x:(ANDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (NE (TSTWconst [int32(c)] y) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(y) + b.resetWithControl(BlockARM64NE, v0) + return true + } + // match: (NE (CMPconst [0] x:(ADDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (NE (CMNconst [c] y) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(y) + b.resetWithControl(BlockARM64NE, v0) + return true + } + // match: (NE (CMPWconst [0] x:(ADDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (NE (CMNWconst [int32(c)] y) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := auxIntToInt64(x.AuxInt) + y := x.Args[0] + if !(x.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(y) + b.resetWithControl(BlockARM64NE, v0) + return true + } + // match: (NE (CMPconst [0] z:(ADD x y)) yes no) + // cond: z.Uses == 1 + // result: (NE (CMN x y) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64ADD { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64NE, v0) + return true + } + break + } + // match: (NE (CMPWconst [0] z:(ADD x y)) yes no) + // cond: z.Uses == 1 + // result: (NE (CMNW x y) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64ADD { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64NE, v0) + return true + } + break + } + // match: (NE (CMP x z:(NEG y)) yes no) + // cond: z.Uses == 1 + // result: (NE (CMN x y) yes no) + for b.Controls[0].Op == OpARM64CMP { + v_0 := b.Controls[0] + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpARM64NEG { + break + } + y := z.Args[0] + if !(z.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64NE, v0) + return true + } + // match: (NE (CMPW x z:(NEG y)) yes no) + // cond: z.Uses == 1 + // result: (NE (CMNW x y) yes no) + for b.Controls[0].Op == OpARM64CMPW { + v_0 := b.Controls[0] + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpARM64NEG { + break + } + y := z.Args[0] + if !(z.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARM64NE, v0) + return true + } + // match: (NE (CMPconst [0] x) yes no) + // result: (NZ x yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + b.resetWithControl(BlockARM64NZ, x) + return true + } + // match: (NE (CMPWconst [0] x) yes no) + // result: (NZW x yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + b.resetWithControl(BlockARM64NZW, x) + return true + } + // match: (NE (CMPconst [0] z:(MADD a x y)) yes no) + // cond: z.Uses==1 + // result: (NE (CMN a (MUL x y)) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MADD { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64NE, v0) + return true + } + // match: (NE (CMPconst [0] z:(MSUB a x y)) yes no) + // cond: z.Uses==1 + // result: (NE (CMP a (MUL x y)) yes no) + for b.Controls[0].Op == OpARM64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MSUB { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64NE, v0) + return true + } + // match: (NE (CMPWconst [0] z:(MADDW a x y)) yes no) + // cond: z.Uses==1 + // result: (NE (CMNW a (MULW x y)) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MADDW { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64NE, v0) + return true + } + // match: (NE (CMPWconst [0] z:(MSUBW a x y)) yes no) + // cond: z.Uses==1 + // result: (NE (CMPW a (MULW x y)) yes no) + for b.Controls[0].Op == OpARM64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpARM64MSUBW { + break + } + y := z.Args[2] + a := z.Args[0] + x := z.Args[1] + if !(z.Uses == 1) { + break + } + v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) + v1.AddArg2(x, y) + v0.AddArg2(a, v1) + b.resetWithControl(BlockARM64NE, v0) + return true + } + // match: (NE (TSTconst [c] x) yes no) + // cond: oneBit(c) + // result: (TBNZ [int64(ntz64(c))] x yes no) + for b.Controls[0].Op == OpARM64TSTconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(oneBit(c)) { + break + } + b.resetWithControl(BlockARM64TBNZ, x) + b.AuxInt = int64ToAuxInt(int64(ntz64(c))) + return true + } + // match: (NE (TSTWconst [c] x) yes no) + // cond: oneBit(int64(uint32(c))) + // result: (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no) + for b.Controls[0].Op == OpARM64TSTWconst { + v_0 := b.Controls[0] + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(oneBit(int64(uint32(c)))) { + break + } + b.resetWithControl(BlockARM64TBNZ, x) + b.AuxInt = int64ToAuxInt(int64(ntz64(int64(uint32(c))))) + return true + } + // match: (NE (FlagConstant [fc]) yes no) + // cond: fc.ne() + // result: (First yes no) + for b.Controls[0].Op == OpARM64FlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(fc.ne()) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (NE (FlagConstant [fc]) yes no) + // cond: !fc.ne() + // result: (First no yes) + for b.Controls[0].Op == OpARM64FlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(!fc.ne()) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (NE (InvertFlags cmp) yes no) + // result: (NE cmp yes no) + for b.Controls[0].Op == OpARM64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARM64NE, cmp) + return true + } + case BlockARM64NZ: + // match: (NZ (Equal cc) yes no) + // result: (EQ cc yes no) + for b.Controls[0].Op == OpARM64Equal { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARM64EQ, cc) + return true + } + // match: (NZ (NotEqual cc) yes no) + // result: (NE cc yes no) + for b.Controls[0].Op == OpARM64NotEqual { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARM64NE, cc) + return true + } + // match: (NZ (LessThan cc) yes no) + // result: (LT cc yes no) + for b.Controls[0].Op == OpARM64LessThan { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARM64LT, cc) + return true + } + // match: (NZ (LessThanU cc) yes no) + // result: (ULT cc yes no) + for b.Controls[0].Op == OpARM64LessThanU { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARM64ULT, cc) + return true + } + // match: (NZ (LessEqual cc) yes no) + // result: (LE cc yes no) + for b.Controls[0].Op == OpARM64LessEqual { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARM64LE, cc) + return true + } + // match: (NZ (LessEqualU cc) yes no) + // result: (ULE cc yes no) + for b.Controls[0].Op == OpARM64LessEqualU { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARM64ULE, cc) + return true + } + // match: (NZ (GreaterThan cc) yes no) + // result: (GT cc yes no) + for b.Controls[0].Op == OpARM64GreaterThan { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARM64GT, cc) + return true + } + // match: (NZ (GreaterThanU cc) yes no) + // result: (UGT cc yes no) + for b.Controls[0].Op == OpARM64GreaterThanU { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARM64UGT, cc) + return true + } + // match: (NZ (GreaterEqual cc) yes no) + // result: (GE cc yes no) + for b.Controls[0].Op == OpARM64GreaterEqual { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARM64GE, cc) + return true + } + // match: (NZ (GreaterEqualU cc) yes no) + // result: (UGE cc yes no) + for b.Controls[0].Op == OpARM64GreaterEqualU { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARM64UGE, cc) + return true + } + // match: (NZ (LessThanF cc) yes no) + // result: (FLT cc yes no) + for b.Controls[0].Op == OpARM64LessThanF { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARM64FLT, cc) + return true + } + // match: (NZ (LessEqualF cc) yes no) + // result: (FLE cc yes no) + for b.Controls[0].Op == OpARM64LessEqualF { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARM64FLE, cc) + return true + } + // match: (NZ (GreaterThanF cc) yes no) + // result: (FGT cc yes no) + for b.Controls[0].Op == OpARM64GreaterThanF { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARM64FGT, cc) + return true + } + // match: (NZ (GreaterEqualF cc) yes no) + // result: (FGE cc yes no) + for b.Controls[0].Op == OpARM64GreaterEqualF { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockARM64FGE, cc) + return true + } + // match: (NZ (ANDconst [c] x) yes no) + // cond: oneBit(c) + // result: (TBNZ [int64(ntz64(c))] x yes no) + for b.Controls[0].Op == OpARM64ANDconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(oneBit(c)) { + break + } + b.resetWithControl(BlockARM64TBNZ, x) + b.AuxInt = int64ToAuxInt(int64(ntz64(c))) + return true + } + // match: (NZ (MOVDconst [0]) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpARM64MOVDconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (NZ (MOVDconst [c]) yes no) + // cond: c != 0 + // result: (First yes no) + for b.Controls[0].Op == OpARM64MOVDconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c != 0) { + break + } + b.Reset(BlockFirst) + return true + } + case BlockARM64NZW: + // match: (NZW (ANDconst [c] x) yes no) + // cond: oneBit(int64(uint32(c))) + // result: (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no) + for b.Controls[0].Op == OpARM64ANDconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(oneBit(int64(uint32(c)))) { + break + } + b.resetWithControl(BlockARM64TBNZ, x) + b.AuxInt = int64ToAuxInt(int64(ntz64(int64(uint32(c))))) + return true + } + // match: (NZW (MOVDconst [c]) yes no) + // cond: int32(c) == 0 + // result: (First no yes) + for b.Controls[0].Op == OpARM64MOVDconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(int32(c) == 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (NZW (MOVDconst [c]) yes no) + // cond: int32(c) != 0 + // result: (First yes no) + for b.Controls[0].Op == OpARM64MOVDconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(int32(c) != 0) { + break + } + b.Reset(BlockFirst) + return true + } + case BlockARM64TBNZ: + // match: (TBNZ [0] (Equal cc) yes no) + // result: (EQ cc yes no) + for b.Controls[0].Op == OpARM64Equal { + v_0 := b.Controls[0] + cc := v_0.Args[0] + if auxIntToInt64(b.AuxInt) != 0 { + break + } + b.resetWithControl(BlockARM64EQ, cc) + return true + } + // match: (TBNZ [0] (NotEqual cc) yes no) + // result: (NE cc yes no) + for b.Controls[0].Op == OpARM64NotEqual { + v_0 := b.Controls[0] + cc := v_0.Args[0] + if auxIntToInt64(b.AuxInt) != 0 { + break + } + b.resetWithControl(BlockARM64NE, cc) + return true + } + // match: (TBNZ [0] (LessThan cc) yes no) + // result: (LT cc yes no) + for b.Controls[0].Op == OpARM64LessThan { + v_0 := b.Controls[0] + cc := v_0.Args[0] + if auxIntToInt64(b.AuxInt) != 0 { + break + } + b.resetWithControl(BlockARM64LT, cc) + return true + } + // match: (TBNZ [0] (LessThanU cc) yes no) + // result: (ULT cc yes no) + for b.Controls[0].Op == OpARM64LessThanU { + v_0 := b.Controls[0] + cc := v_0.Args[0] + if auxIntToInt64(b.AuxInt) != 0 { + break + } + b.resetWithControl(BlockARM64ULT, cc) + return true + } + // match: (TBNZ [0] (LessEqual cc) yes no) + // result: (LE cc yes no) + for b.Controls[0].Op == OpARM64LessEqual { + v_0 := b.Controls[0] + cc := v_0.Args[0] + if auxIntToInt64(b.AuxInt) != 0 { + break + } + b.resetWithControl(BlockARM64LE, cc) + return true + } + // match: (TBNZ [0] (LessEqualU cc) yes no) + // result: (ULE cc yes no) + for b.Controls[0].Op == OpARM64LessEqualU { + v_0 := b.Controls[0] + cc := v_0.Args[0] + if auxIntToInt64(b.AuxInt) != 0 { + break + } + b.resetWithControl(BlockARM64ULE, cc) + return true + } + // match: (TBNZ [0] (GreaterThan cc) yes no) + // result: (GT cc yes no) + for b.Controls[0].Op == OpARM64GreaterThan { + v_0 := b.Controls[0] + cc := v_0.Args[0] + if auxIntToInt64(b.AuxInt) != 0 { + break + } + b.resetWithControl(BlockARM64GT, cc) + return true + } + // match: (TBNZ [0] (GreaterThanU cc) yes no) + // result: (UGT cc yes no) + for b.Controls[0].Op == OpARM64GreaterThanU { + v_0 := b.Controls[0] + cc := v_0.Args[0] + if auxIntToInt64(b.AuxInt) != 0 { + break + } + b.resetWithControl(BlockARM64UGT, cc) + return true + } + // match: (TBNZ [0] (GreaterEqual cc) yes no) + // result: (GE cc yes no) + for b.Controls[0].Op == OpARM64GreaterEqual { + v_0 := b.Controls[0] + cc := v_0.Args[0] + if auxIntToInt64(b.AuxInt) != 0 { + break + } + b.resetWithControl(BlockARM64GE, cc) + return true + } + // match: (TBNZ [0] (GreaterEqualU cc) yes no) + // result: (UGE cc yes no) + for b.Controls[0].Op == OpARM64GreaterEqualU { + v_0 := b.Controls[0] + cc := v_0.Args[0] + if auxIntToInt64(b.AuxInt) != 0 { + break + } + b.resetWithControl(BlockARM64UGE, cc) + return true + } + // match: (TBNZ [0] (LessThanF cc) yes no) + // result: (FLT cc yes no) + for b.Controls[0].Op == OpARM64LessThanF { + v_0 := b.Controls[0] + cc := v_0.Args[0] + if auxIntToInt64(b.AuxInt) != 0 { + break + } + b.resetWithControl(BlockARM64FLT, cc) + return true + } + // match: (TBNZ [0] (LessEqualF cc) yes no) + // result: (FLE cc yes no) + for b.Controls[0].Op == OpARM64LessEqualF { + v_0 := b.Controls[0] + cc := v_0.Args[0] + if auxIntToInt64(b.AuxInt) != 0 { + break + } + b.resetWithControl(BlockARM64FLE, cc) + return true + } + // match: (TBNZ [0] (GreaterThanF cc) yes no) + // result: (FGT cc yes no) + for b.Controls[0].Op == OpARM64GreaterThanF { + v_0 := b.Controls[0] + cc := v_0.Args[0] + if auxIntToInt64(b.AuxInt) != 0 { + break + } + b.resetWithControl(BlockARM64FGT, cc) + return true + } + // match: (TBNZ [0] (GreaterEqualF cc) yes no) + // result: (FGE cc yes no) + for b.Controls[0].Op == OpARM64GreaterEqualF { + v_0 := b.Controls[0] + cc := v_0.Args[0] + if auxIntToInt64(b.AuxInt) != 0 { + break + } + b.resetWithControl(BlockARM64FGE, cc) + return true + } + case BlockARM64UGE: + // match: (UGE (FlagConstant [fc]) yes no) + // cond: fc.uge() + // result: (First yes no) + for b.Controls[0].Op == OpARM64FlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(fc.uge()) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (UGE (FlagConstant [fc]) yes no) + // cond: !fc.uge() + // result: (First no yes) + for b.Controls[0].Op == OpARM64FlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(!fc.uge()) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (UGE (InvertFlags cmp) yes no) + // result: (ULE cmp yes no) + for b.Controls[0].Op == OpARM64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARM64ULE, cmp) + return true + } + case BlockARM64UGT: + // match: (UGT (FlagConstant [fc]) yes no) + // cond: fc.ugt() + // result: (First yes no) + for b.Controls[0].Op == OpARM64FlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(fc.ugt()) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (UGT (FlagConstant [fc]) yes no) + // cond: !fc.ugt() + // result: (First no yes) + for b.Controls[0].Op == OpARM64FlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(!fc.ugt()) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (UGT (InvertFlags cmp) yes no) + // result: (ULT cmp yes no) + for b.Controls[0].Op == OpARM64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARM64ULT, cmp) + return true + } + case BlockARM64ULE: + // match: (ULE (FlagConstant [fc]) yes no) + // cond: fc.ule() + // result: (First yes no) + for b.Controls[0].Op == OpARM64FlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(fc.ule()) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (ULE (FlagConstant [fc]) yes no) + // cond: !fc.ule() + // result: (First no yes) + for b.Controls[0].Op == OpARM64FlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(!fc.ule()) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (ULE (InvertFlags cmp) yes no) + // result: (UGE cmp yes no) + for b.Controls[0].Op == OpARM64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARM64UGE, cmp) + return true + } + case BlockARM64ULT: + // match: (ULT (FlagConstant [fc]) yes no) + // cond: fc.ult() + // result: (First yes no) + for b.Controls[0].Op == OpARM64FlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(fc.ult()) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (ULT (FlagConstant [fc]) yes no) + // cond: !fc.ult() + // result: (First no yes) + for b.Controls[0].Op == OpARM64FlagConstant { + v_0 := b.Controls[0] + fc := auxIntToFlagConstant(v_0.AuxInt) + if !(!fc.ult()) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (ULT (InvertFlags cmp) yes no) + // result: (UGT cmp yes no) + for b.Controls[0].Op == OpARM64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockARM64UGT, cmp) + return true + } + case BlockARM64Z: + // match: (Z (ANDconst [c] x) yes no) + // cond: oneBit(c) + // result: (TBZ [int64(ntz64(c))] x yes no) + for b.Controls[0].Op == OpARM64ANDconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(oneBit(c)) { + break + } + b.resetWithControl(BlockARM64TBZ, x) + b.AuxInt = int64ToAuxInt(int64(ntz64(c))) + return true + } + // match: (Z (MOVDconst [0]) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpARM64MOVDconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + b.Reset(BlockFirst) + return true + } + // match: (Z (MOVDconst [c]) yes no) + // cond: c != 0 + // result: (First no yes) + for b.Controls[0].Op == OpARM64MOVDconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c != 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockARM64ZW: + // match: (ZW (ANDconst [c] x) yes no) + // cond: oneBit(int64(uint32(c))) + // result: (TBZ [int64(ntz64(int64(uint32(c))))] x yes no) + for b.Controls[0].Op == OpARM64ANDconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(oneBit(int64(uint32(c)))) { + break + } + b.resetWithControl(BlockARM64TBZ, x) + b.AuxInt = int64ToAuxInt(int64(ntz64(int64(uint32(c))))) + return true + } + // match: (ZW (MOVDconst [c]) yes no) + // cond: int32(c) == 0 + // result: (First yes no) + for b.Controls[0].Op == OpARM64MOVDconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(int32(c) == 0) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (ZW (MOVDconst [c]) yes no) + // cond: int32(c) != 0 + // result: (First no yes) + for b.Controls[0].Op == OpARM64MOVDconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(int32(c) != 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteARM64latelower.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteARM64latelower.go new file mode 100644 index 0000000000000000000000000000000000000000..6873fd79968514b92a32243d96b51b57ebbea514 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteARM64latelower.go @@ -0,0 +1,1036 @@ +// Code generated from _gen/ARM64latelower.rules using 'go generate'; DO NOT EDIT. + +package ssa + +func rewriteValueARM64latelower(v *Value) bool { + switch v.Op { + case OpARM64ADDSconstflags: + return rewriteValueARM64latelower_OpARM64ADDSconstflags(v) + case OpARM64ADDconst: + return rewriteValueARM64latelower_OpARM64ADDconst(v) + case OpARM64ANDconst: + return rewriteValueARM64latelower_OpARM64ANDconst(v) + case OpARM64CMNWconst: + return rewriteValueARM64latelower_OpARM64CMNWconst(v) + case OpARM64CMNconst: + return rewriteValueARM64latelower_OpARM64CMNconst(v) + case OpARM64CMPWconst: + return rewriteValueARM64latelower_OpARM64CMPWconst(v) + case OpARM64CMPconst: + return rewriteValueARM64latelower_OpARM64CMPconst(v) + case OpARM64MOVBUreg: + return rewriteValueARM64latelower_OpARM64MOVBUreg(v) + case OpARM64MOVBreg: + return rewriteValueARM64latelower_OpARM64MOVBreg(v) + case OpARM64MOVHUreg: + return rewriteValueARM64latelower_OpARM64MOVHUreg(v) + case OpARM64MOVHreg: + return rewriteValueARM64latelower_OpARM64MOVHreg(v) + case OpARM64MOVWUreg: + return rewriteValueARM64latelower_OpARM64MOVWUreg(v) + case OpARM64MOVWreg: + return rewriteValueARM64latelower_OpARM64MOVWreg(v) + case OpARM64ORconst: + return rewriteValueARM64latelower_OpARM64ORconst(v) + case OpARM64SUBconst: + return rewriteValueARM64latelower_OpARM64SUBconst(v) + case OpARM64TSTWconst: + return rewriteValueARM64latelower_OpARM64TSTWconst(v) + case OpARM64TSTconst: + return rewriteValueARM64latelower_OpARM64TSTconst(v) + case OpARM64XORconst: + return rewriteValueARM64latelower_OpARM64XORconst(v) + } + return false +} +func rewriteValueARM64latelower_OpARM64ADDSconstflags(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ADDSconstflags [c] x) + // cond: !isARM64addcon(c) + // result: (ADDSflags x (MOVDconst [c])) + for { + c := auxIntToInt64(v.AuxInt) + x := v_0 + if !(!isARM64addcon(c)) { + break + } + v.reset(OpARM64ADDSflags) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(c) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueARM64latelower_OpARM64ADDconst(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ADDconst [c] x) + // cond: !isARM64addcon(c) + // result: (ADD x (MOVDconst [c])) + for { + c := auxIntToInt64(v.AuxInt) + x := v_0 + if !(!isARM64addcon(c)) { + break + } + v.reset(OpARM64ADD) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(c) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueARM64latelower_OpARM64ANDconst(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ANDconst [c] x) + // cond: !isARM64bitcon(uint64(c)) + // result: (AND x (MOVDconst [c])) + for { + c := auxIntToInt64(v.AuxInt) + x := v_0 + if !(!isARM64bitcon(uint64(c))) { + break + } + v.reset(OpARM64AND) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(c) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueARM64latelower_OpARM64CMNWconst(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMNWconst [c] x) + // cond: !isARM64addcon(int64(c)) + // result: (CMNW x (MOVDconst [int64(c)])) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(!isARM64addcon(int64(c))) { + break + } + v.reset(OpARM64CMNW) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(int64(c)) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueARM64latelower_OpARM64CMNconst(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMNconst [c] x) + // cond: !isARM64addcon(c) + // result: (CMN x (MOVDconst [c])) + for { + c := auxIntToInt64(v.AuxInt) + x := v_0 + if !(!isARM64addcon(c)) { + break + } + v.reset(OpARM64CMN) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(c) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueARM64latelower_OpARM64CMPWconst(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPWconst [c] x) + // cond: !isARM64addcon(int64(c)) + // result: (CMPW x (MOVDconst [int64(c)])) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(!isARM64addcon(int64(c))) { + break + } + v.reset(OpARM64CMPW) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(int64(c)) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueARM64latelower_OpARM64CMPconst(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CMPconst [c] x) + // cond: !isARM64addcon(c) + // result: (CMP x (MOVDconst [c])) + for { + c := auxIntToInt64(v.AuxInt) + x := v_0 + if !(!isARM64addcon(c)) { + break + } + v.reset(OpARM64CMP) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(c) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueARM64latelower_OpARM64MOVBUreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVBUreg x:(Equal _)) + // result: x + for { + x := v_0 + if x.Op != OpARM64Equal { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(NotEqual _)) + // result: x + for { + x := v_0 + if x.Op != OpARM64NotEqual { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(LessThan _)) + // result: x + for { + x := v_0 + if x.Op != OpARM64LessThan { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(LessThanU _)) + // result: x + for { + x := v_0 + if x.Op != OpARM64LessThanU { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(LessThanF _)) + // result: x + for { + x := v_0 + if x.Op != OpARM64LessThanF { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(LessEqual _)) + // result: x + for { + x := v_0 + if x.Op != OpARM64LessEqual { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(LessEqualU _)) + // result: x + for { + x := v_0 + if x.Op != OpARM64LessEqualU { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(LessEqualF _)) + // result: x + for { + x := v_0 + if x.Op != OpARM64LessEqualF { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(GreaterThan _)) + // result: x + for { + x := v_0 + if x.Op != OpARM64GreaterThan { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(GreaterThanU _)) + // result: x + for { + x := v_0 + if x.Op != OpARM64GreaterThanU { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(GreaterThanF _)) + // result: x + for { + x := v_0 + if x.Op != OpARM64GreaterThanF { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(GreaterEqual _)) + // result: x + for { + x := v_0 + if x.Op != OpARM64GreaterEqual { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(GreaterEqualU _)) + // result: x + for { + x := v_0 + if x.Op != OpARM64GreaterEqualU { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(GreaterEqualF _)) + // result: x + for { + x := v_0 + if x.Op != OpARM64GreaterEqualF { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(MOVBUload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVBUload { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVBUreg x:(MOVBUloadidx _ _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVBUloadidx { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVBUreg x:(MOVBUreg _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVBUreg { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64latelower_OpARM64MOVBreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVBreg x:(MOVBload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVBload { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVBreg x:(MOVBloadidx _ _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVBloadidx { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVBreg x:(MOVBreg _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVBreg { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64latelower_OpARM64MOVHUreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVHUreg x:(MOVBUload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVBUload { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVHUload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVHUload { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVBUloadidx _ _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVBUloadidx { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVHUloadidx _ _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVHUloadidx { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVHUloadidx2 _ _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVHUloadidx2 { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVBUreg _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVBUreg { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVHUreg _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVHUreg { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64latelower_OpARM64MOVHreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVHreg x:(MOVBload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVBload { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBUload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVBUload { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVHload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVHload { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBloadidx _ _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVBloadidx { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBUloadidx _ _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVBUloadidx { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVHloadidx _ _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVHloadidx { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVHloadidx2 _ _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVHloadidx2 { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBreg _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVBreg { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBUreg _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVBUreg { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVHreg _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVHreg { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64latelower_OpARM64MOVWUreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVWUreg x) + // cond: zeroUpper32Bits(x, 3) + // result: x + for { + x := v_0 + if !(zeroUpper32Bits(x, 3)) { + break + } + v.copyOf(x) + return true + } + // match: (MOVWUreg x:(MOVBUload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVBUload { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVHUload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVHUload { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVWUload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVWUload { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVBUloadidx _ _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVBUloadidx { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVHUloadidx _ _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVHUloadidx { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVWUloadidx _ _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVWUloadidx { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVHUloadidx2 _ _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVHUloadidx2 { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVWUloadidx4 _ _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVWUloadidx4 { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVBUreg _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVBUreg { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVHUreg _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVHUreg { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVWUreg _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVWUreg { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64latelower_OpARM64MOVWreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVWreg x:(MOVBload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVBload { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVBUload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVBUload { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVHload { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHUload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVHUload { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVWload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVWload { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVBloadidx _ _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVBloadidx { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVBUloadidx _ _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVBUloadidx { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHloadidx _ _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVHloadidx { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHUloadidx _ _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVHUloadidx { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVWloadidx _ _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVWloadidx { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHloadidx2 _ _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVHloadidx2 { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHUloadidx2 _ _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVHUloadidx2 { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVWloadidx4 _ _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVWloadidx4 { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVBreg _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVBreg { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVBUreg _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVBUreg { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHreg _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVHreg { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVWreg _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpARM64MOVWreg { + break + } + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64latelower_OpARM64ORconst(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ORconst [c] x) + // cond: !isARM64bitcon(uint64(c)) + // result: (OR x (MOVDconst [c])) + for { + c := auxIntToInt64(v.AuxInt) + x := v_0 + if !(!isARM64bitcon(uint64(c))) { + break + } + v.reset(OpARM64OR) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(c) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueARM64latelower_OpARM64SUBconst(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SUBconst [c] x) + // cond: !isARM64addcon(c) + // result: (SUB x (MOVDconst [c])) + for { + c := auxIntToInt64(v.AuxInt) + x := v_0 + if !(!isARM64addcon(c)) { + break + } + v.reset(OpARM64SUB) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(c) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueARM64latelower_OpARM64TSTWconst(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (TSTWconst [c] x) + // cond: !isARM64bitcon(uint64(c)|uint64(c)<<32) + // result: (TSTW x (MOVDconst [int64(c)])) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(!isARM64bitcon(uint64(c) | uint64(c)<<32)) { + break + } + v.reset(OpARM64TSTW) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(int64(c)) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueARM64latelower_OpARM64TSTconst(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (TSTconst [c] x) + // cond: !isARM64bitcon(uint64(c)) + // result: (TST x (MOVDconst [c])) + for { + c := auxIntToInt64(v.AuxInt) + x := v_0 + if !(!isARM64bitcon(uint64(c))) { + break + } + v.reset(OpARM64TST) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(c) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValueARM64latelower_OpARM64XORconst(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (XORconst [c] x) + // cond: !isARM64bitcon(uint64(c)) + // result: (XOR x (MOVDconst [c])) + for { + c := auxIntToInt64(v.AuxInt) + x := v_0 + if !(!isARM64bitcon(uint64(c))) { + break + } + v.reset(OpARM64XOR) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(c) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteBlockARM64latelower(b *Block) bool { + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteCond_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteCond_test.go new file mode 100644 index 0000000000000000000000000000000000000000..eb5c1de6de735be5082411731ef9b130e4ccea26 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteCond_test.go @@ -0,0 +1,635 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "math" + "math/rand" + "testing" +) + +var ( + x64 int64 = math.MaxInt64 - 2 + x64b int64 = math.MaxInt64 - 2 + x64c int64 = math.MaxInt64 - 2 + y64 int64 = math.MinInt64 + 1 + x32 int32 = math.MaxInt32 - 2 + x32b int32 = math.MaxInt32 - 2 + x32c int32 = math.MaxInt32 - 2 + y32 int32 = math.MinInt32 + 1 + one64 int64 = 1 + one32 int32 = 1 + v64 int64 = 11 // ensure it's not 2**n +/- 1 + v64_n int64 = -11 + v32 int32 = 11 + v32_n int32 = -11 + uv32 uint32 = 19 + uz uint8 = 1 // for lowering to SLL/SRL/SRA +) + +var crTests = []struct { + name string + tf func(t *testing.T) +}{ + {"AddConst64", testAddConst64}, + {"AddConst32", testAddConst32}, + {"AddVar64", testAddVar64}, + {"AddVar64Cset", testAddVar64Cset}, + {"AddVar32", testAddVar32}, + {"MAddVar64", testMAddVar64}, + {"MAddVar32", testMAddVar32}, + {"MSubVar64", testMSubVar64}, + {"MSubVar32", testMSubVar32}, + {"AddShift32", testAddShift32}, + {"SubShift32", testSubShift32}, +} + +var crBenches = []struct { + name string + bf func(b *testing.B) +}{ + {"SoloJump", benchSoloJump}, + {"CombJump", benchCombJump}, +} + +// Test int32/int64's add/sub/madd/msub operations with boundary values to +// ensure the optimization to 'comparing to zero' expressions of if-statements +// yield expected results. +// 32 rewriting rules are covered. At least two scenarios for "Canonicalize +// the order of arguments to comparisons", which helps with CSE, are covered. +// The tedious if-else structures are necessary to ensure all concerned rules +// and machine code sequences are covered. +// It's for arm64 initially, please see https://github.com/golang/go/issues/38740 +func TestCondRewrite(t *testing.T) { + for _, test := range crTests { + t.Run(test.name, test.tf) + } +} + +// Profile the aforementioned optimization from two angles: +// +// SoloJump: generated branching code has one 'jump', for '<' and '>=' +// CombJump: generated branching code has two consecutive 'jump', for '<=' and '>' +// +// We expect that 'CombJump' is generally on par with the non-optimized code, and +// 'SoloJump' demonstrates some improvement. +// It's for arm64 initially, please see https://github.com/golang/go/issues/38740 +func BenchmarkCondRewrite(b *testing.B) { + for _, bench := range crBenches { + b.Run(bench.name, bench.bf) + } +} + +// var +/- const +func testAddConst64(t *testing.T) { + if x64+11 < 0 { + } else { + t.Errorf("'%#x + 11 < 0' failed", x64) + } + + if x64+13 <= 0 { + } else { + t.Errorf("'%#x + 13 <= 0' failed", x64) + } + + if y64-11 > 0 { + } else { + t.Errorf("'%#x - 11 > 0' failed", y64) + } + + if y64-13 >= 0 { + } else { + t.Errorf("'%#x - 13 >= 0' failed", y64) + } + + if x64+19 > 0 { + t.Errorf("'%#x + 19 > 0' failed", x64) + } + + if x64+23 >= 0 { + t.Errorf("'%#x + 23 >= 0' failed", x64) + } + + if y64-19 < 0 { + t.Errorf("'%#x - 19 < 0' failed", y64) + } + + if y64-23 <= 0 { + t.Errorf("'%#x - 23 <= 0' failed", y64) + } +} + +// 32-bit var +/- const +func testAddConst32(t *testing.T) { + if x32+11 < 0 { + } else { + t.Errorf("'%#x + 11 < 0' failed", x32) + } + + if x32+13 <= 0 { + } else { + t.Errorf("'%#x + 13 <= 0' failed", x32) + } + + if y32-11 > 0 { + } else { + t.Errorf("'%#x - 11 > 0' failed", y32) + } + + if y32-13 >= 0 { + } else { + t.Errorf("'%#x - 13 >= 0' failed", y32) + } + + if x32+19 > 0 { + t.Errorf("'%#x + 19 > 0' failed", x32) + } + + if x32+23 >= 0 { + t.Errorf("'%#x + 23 >= 0' failed", x32) + } + + if y32-19 < 0 { + t.Errorf("'%#x - 19 < 0' failed", y32) + } + + if y32-23 <= 0 { + t.Errorf("'%#x - 23 <= 0' failed", y32) + } +} + +// var + var +func testAddVar64(t *testing.T) { + if x64+v64 < 0 { + } else { + t.Errorf("'%#x + %#x < 0' failed", x64, v64) + } + + if x64+v64 <= 0 { + } else { + t.Errorf("'%#x + %#x <= 0' failed", x64, v64) + } + + if y64+v64_n > 0 { + } else { + t.Errorf("'%#x + %#x > 0' failed", y64, v64_n) + } + + if y64+v64_n >= 0 { + } else { + t.Errorf("'%#x + %#x >= 0' failed", y64, v64_n) + } + + if x64+v64 > 0 { + t.Errorf("'%#x + %#x > 0' failed", x64, v64) + } + + if x64+v64 >= 0 { + t.Errorf("'%#x + %#x >= 0' failed", x64, v64) + } + + if y64+v64_n < 0 { + t.Errorf("'%#x + %#x < 0' failed", y64, v64_n) + } + + if y64+v64_n <= 0 { + t.Errorf("'%#x + %#x <= 0' failed", y64, v64_n) + } +} + +// var + var, cset +func testAddVar64Cset(t *testing.T) { + var a int + if x64+v64 < 0 { + a = 1 + } + if a != 1 { + t.Errorf("'%#x + %#x < 0' failed", x64, v64) + } + + a = 0 + if y64+v64_n >= 0 { + a = 1 + } + if a != 1 { + t.Errorf("'%#x + %#x >= 0' failed", y64, v64_n) + } + + a = 1 + if x64+v64 >= 0 { + a = 0 + } + if a == 0 { + t.Errorf("'%#x + %#x >= 0' failed", x64, v64) + } + + a = 1 + if y64+v64_n < 0 { + a = 0 + } + if a == 0 { + t.Errorf("'%#x + %#x < 0' failed", y64, v64_n) + } +} + +// 32-bit var+var +func testAddVar32(t *testing.T) { + if x32+v32 < 0 { + } else { + t.Errorf("'%#x + %#x < 0' failed", x32, v32) + } + + if x32+v32 <= 0 { + } else { + t.Errorf("'%#x + %#x <= 0' failed", x32, v32) + } + + if y32+v32_n > 0 { + } else { + t.Errorf("'%#x + %#x > 0' failed", y32, v32_n) + } + + if y32+v32_n >= 0 { + } else { + t.Errorf("'%#x + %#x >= 0' failed", y32, v32_n) + } + + if x32+v32 > 0 { + t.Errorf("'%#x + %#x > 0' failed", x32, v32) + } + + if x32+v32 >= 0 { + t.Errorf("'%#x + %#x >= 0' failed", x32, v32) + } + + if y32+v32_n < 0 { + t.Errorf("'%#x + %#x < 0' failed", y32, v32_n) + } + + if y32+v32_n <= 0 { + t.Errorf("'%#x + %#x <= 0' failed", y32, v32_n) + } +} + +// multiply-add +func testMAddVar64(t *testing.T) { + if x64+v64*one64 < 0 { + } else { + t.Errorf("'%#x + %#x*1 < 0' failed", x64, v64) + } + + if x64+v64*one64 <= 0 { + } else { + t.Errorf("'%#x + %#x*1 <= 0' failed", x64, v64) + } + + if y64+v64_n*one64 > 0 { + } else { + t.Errorf("'%#x + %#x*1 > 0' failed", y64, v64_n) + } + + if y64+v64_n*one64 >= 0 { + } else { + t.Errorf("'%#x + %#x*1 >= 0' failed", y64, v64_n) + } + + if x64+v64*one64 > 0 { + t.Errorf("'%#x + %#x*1 > 0' failed", x64, v64) + } + + if x64+v64*one64 >= 0 { + t.Errorf("'%#x + %#x*1 >= 0' failed", x64, v64) + } + + if y64+v64_n*one64 < 0 { + t.Errorf("'%#x + %#x*1 < 0' failed", y64, v64_n) + } + + if y64+v64_n*one64 <= 0 { + t.Errorf("'%#x + %#x*1 <= 0' failed", y64, v64_n) + } +} + +// 32-bit multiply-add +func testMAddVar32(t *testing.T) { + if x32+v32*one32 < 0 { + } else { + t.Errorf("'%#x + %#x*1 < 0' failed", x32, v32) + } + + if x32+v32*one32 <= 0 { + } else { + t.Errorf("'%#x + %#x*1 <= 0' failed", x32, v32) + } + + if y32+v32_n*one32 > 0 { + } else { + t.Errorf("'%#x + %#x*1 > 0' failed", y32, v32_n) + } + + if y32+v32_n*one32 >= 0 { + } else { + t.Errorf("'%#x + %#x*1 >= 0' failed", y32, v32_n) + } + + if x32+v32*one32 > 0 { + t.Errorf("'%#x + %#x*1 > 0' failed", x32, v32) + } + + if x32+v32*one32 >= 0 { + t.Errorf("'%#x + %#x*1 >= 0' failed", x32, v32) + } + + if y32+v32_n*one32 < 0 { + t.Errorf("'%#x + %#x*1 < 0' failed", y32, v32_n) + } + + if y32+v32_n*one32 <= 0 { + t.Errorf("'%#x + %#x*1 <= 0' failed", y32, v32_n) + } +} + +// multiply-sub +func testMSubVar64(t *testing.T) { + if x64-v64_n*one64 < 0 { + } else { + t.Errorf("'%#x - %#x*1 < 0' failed", x64, v64_n) + } + + if x64-v64_n*one64 <= 0 { + } else { + t.Errorf("'%#x - %#x*1 <= 0' failed", x64, v64_n) + } + + if y64-v64*one64 > 0 { + } else { + t.Errorf("'%#x - %#x*1 > 0' failed", y64, v64) + } + + if y64-v64*one64 >= 0 { + } else { + t.Errorf("'%#x - %#x*1 >= 0' failed", y64, v64) + } + + if x64-v64_n*one64 > 0 { + t.Errorf("'%#x - %#x*1 > 0' failed", x64, v64_n) + } + + if x64-v64_n*one64 >= 0 { + t.Errorf("'%#x - %#x*1 >= 0' failed", x64, v64_n) + } + + if y64-v64*one64 < 0 { + t.Errorf("'%#x - %#x*1 < 0' failed", y64, v64) + } + + if y64-v64*one64 <= 0 { + t.Errorf("'%#x - %#x*1 <= 0' failed", y64, v64) + } + + if x64-x64b*one64 < 0 { + t.Errorf("'%#x - %#x*1 < 0' failed", x64, x64b) + } + + if x64-x64b*one64 >= 0 { + } else { + t.Errorf("'%#x - %#x*1 >= 0' failed", x64, x64b) + } +} + +// 32-bit multiply-sub +func testMSubVar32(t *testing.T) { + if x32-v32_n*one32 < 0 { + } else { + t.Errorf("'%#x - %#x*1 < 0' failed", x32, v32_n) + } + + if x32-v32_n*one32 <= 0 { + } else { + t.Errorf("'%#x - %#x*1 <= 0' failed", x32, v32_n) + } + + if y32-v32*one32 > 0 { + } else { + t.Errorf("'%#x - %#x*1 > 0' failed", y32, v32) + } + + if y32-v32*one32 >= 0 { + } else { + t.Errorf("'%#x - %#x*1 >= 0' failed", y32, v32) + } + + if x32-v32_n*one32 > 0 { + t.Errorf("'%#x - %#x*1 > 0' failed", x32, v32_n) + } + + if x32-v32_n*one32 >= 0 { + t.Errorf("'%#x - %#x*1 >= 0' failed", x32, v32_n) + } + + if y32-v32*one32 < 0 { + t.Errorf("'%#x - %#x*1 < 0' failed", y32, v32) + } + + if y32-v32*one32 <= 0 { + t.Errorf("'%#x - %#x*1 <= 0' failed", y32, v32) + } + + if x32-x32b*one32 < 0 { + t.Errorf("'%#x - %#x*1 < 0' failed", x32, x32b) + } + + if x32-x32b*one32 >= 0 { + } else { + t.Errorf("'%#x - %#x*1 >= 0' failed", x32, x32b) + } +} + +// 32-bit ADDshift, pick up 1~2 scenarios randomly for each condition +func testAddShift32(t *testing.T) { + if x32+v32<<1 < 0 { + } else { + t.Errorf("'%#x + %#x<<%#x < 0' failed", x32, v32, 1) + } + + if x32+v32>>1 <= 0 { + } else { + t.Errorf("'%#x + %#x>>%#x <= 0' failed", x32, v32, 1) + } + + if x32+int32(uv32>>1) > 0 { + t.Errorf("'%#x + int32(%#x>>%#x) > 0' failed", x32, uv32, 1) + } + + if x32+v32<= 0 { + t.Errorf("'%#x + %#x<<%#x >= 0' failed", x32, v32, uz) + } + + if x32+v32>>uz > 0 { + t.Errorf("'%#x + %#x>>%#x > 0' failed", x32, v32, uz) + } + + if x32+int32(uv32>>uz) < 0 { + } else { + t.Errorf("'%#x + int32(%#x>>%#x) < 0' failed", x32, uv32, uz) + } +} + +// 32-bit SUBshift, pick up 1~2 scenarios randomly for each condition +func testSubShift32(t *testing.T) { + if y32-v32<<1 > 0 { + } else { + t.Errorf("'%#x - %#x<<%#x > 0' failed", y32, v32, 1) + } + + if y32-v32>>1 < 0 { + t.Errorf("'%#x - %#x>>%#x < 0' failed", y32, v32, 1) + } + + if y32-int32(uv32>>1) >= 0 { + } else { + t.Errorf("'%#x - int32(%#x>>%#x) >= 0' failed", y32, uv32, 1) + } + + if y32-v32<>uz >= 0 { + } else { + t.Errorf("'%#x - %#x>>%#x >= 0' failed", y32, v32, uz) + } + + if y32-int32(uv32>>uz) <= 0 { + t.Errorf("'%#x - int32(%#x>>%#x) <= 0' failed", y32, uv32, uz) + } +} + +var rnd = rand.New(rand.NewSource(0)) +var sink int64 + +func benchSoloJump(b *testing.B) { + r1 := x64 + r2 := x64b + r3 := x64c + r4 := y64 + d := rnd.Int63n(10) + + // 6 out 10 conditions evaluate to true + for i := 0; i < b.N; i++ { + if r1+r2 < 0 { + d *= 2 + d /= 2 + } + + if r1+r3 >= 0 { + d *= 2 + d /= 2 + } + + if r1+r2*one64 < 0 { + d *= 2 + d /= 2 + } + + if r2+r3*one64 >= 0 { + d *= 2 + d /= 2 + } + + if r1-r2*v64 >= 0 { + d *= 2 + d /= 2 + } + + if r3-r4*v64 < 0 { + d *= 2 + d /= 2 + } + + if r1+11 < 0 { + d *= 2 + d /= 2 + } + + if r1+13 >= 0 { + d *= 2 + d /= 2 + } + + if r4-17 < 0 { + d *= 2 + d /= 2 + } + + if r4-19 >= 0 { + d *= 2 + d /= 2 + } + } + sink = d +} + +func benchCombJump(b *testing.B) { + r1 := x64 + r2 := x64b + r3 := x64c + r4 := y64 + d := rnd.Int63n(10) + + // 6 out 10 conditions evaluate to true + for i := 0; i < b.N; i++ { + if r1+r2 <= 0 { + d *= 2 + d /= 2 + } + + if r1+r3 > 0 { + d *= 2 + d /= 2 + } + + if r1+r2*one64 <= 0 { + d *= 2 + d /= 2 + } + + if r2+r3*one64 > 0 { + d *= 2 + d /= 2 + } + + if r1-r2*v64 > 0 { + d *= 2 + d /= 2 + } + + if r3-r4*v64 <= 0 { + d *= 2 + d /= 2 + } + + if r1+11 <= 0 { + d *= 2 + d /= 2 + } + + if r1+13 > 0 { + d *= 2 + d /= 2 + } + + if r4-17 <= 0 { + d *= 2 + d /= 2 + } + + if r4-19 > 0 { + d *= 2 + d /= 2 + } + } + sink = d +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteLOONG64.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteLOONG64.go new file mode 100644 index 0000000000000000000000000000000000000000..edd3ffe6b9adca7c810bf64d031748eb2e119251 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteLOONG64.go @@ -0,0 +1,8037 @@ +// Code generated from _gen/LOONG64.rules using 'go generate'; DO NOT EDIT. + +package ssa + +import "cmd/compile/internal/types" + +func rewriteValueLOONG64(v *Value) bool { + switch v.Op { + case OpAdd16: + v.Op = OpLOONG64ADDV + return true + case OpAdd32: + v.Op = OpLOONG64ADDV + return true + case OpAdd32F: + v.Op = OpLOONG64ADDF + return true + case OpAdd64: + v.Op = OpLOONG64ADDV + return true + case OpAdd64F: + v.Op = OpLOONG64ADDD + return true + case OpAdd8: + v.Op = OpLOONG64ADDV + return true + case OpAddPtr: + v.Op = OpLOONG64ADDV + return true + case OpAddr: + return rewriteValueLOONG64_OpAddr(v) + case OpAnd16: + v.Op = OpLOONG64AND + return true + case OpAnd32: + v.Op = OpLOONG64AND + return true + case OpAnd64: + v.Op = OpLOONG64AND + return true + case OpAnd8: + v.Op = OpLOONG64AND + return true + case OpAndB: + v.Op = OpLOONG64AND + return true + case OpAtomicAdd32: + v.Op = OpLOONG64LoweredAtomicAdd32 + return true + case OpAtomicAdd64: + v.Op = OpLOONG64LoweredAtomicAdd64 + return true + case OpAtomicCompareAndSwap32: + return rewriteValueLOONG64_OpAtomicCompareAndSwap32(v) + case OpAtomicCompareAndSwap64: + v.Op = OpLOONG64LoweredAtomicCas64 + return true + case OpAtomicExchange32: + v.Op = OpLOONG64LoweredAtomicExchange32 + return true + case OpAtomicExchange64: + v.Op = OpLOONG64LoweredAtomicExchange64 + return true + case OpAtomicLoad32: + v.Op = OpLOONG64LoweredAtomicLoad32 + return true + case OpAtomicLoad64: + v.Op = OpLOONG64LoweredAtomicLoad64 + return true + case OpAtomicLoad8: + v.Op = OpLOONG64LoweredAtomicLoad8 + return true + case OpAtomicLoadPtr: + v.Op = OpLOONG64LoweredAtomicLoad64 + return true + case OpAtomicStore32: + v.Op = OpLOONG64LoweredAtomicStore32 + return true + case OpAtomicStore64: + v.Op = OpLOONG64LoweredAtomicStore64 + return true + case OpAtomicStore8: + v.Op = OpLOONG64LoweredAtomicStore8 + return true + case OpAtomicStorePtrNoWB: + v.Op = OpLOONG64LoweredAtomicStore64 + return true + case OpAvg64u: + return rewriteValueLOONG64_OpAvg64u(v) + case OpClosureCall: + v.Op = OpLOONG64CALLclosure + return true + case OpCom16: + return rewriteValueLOONG64_OpCom16(v) + case OpCom32: + return rewriteValueLOONG64_OpCom32(v) + case OpCom64: + return rewriteValueLOONG64_OpCom64(v) + case OpCom8: + return rewriteValueLOONG64_OpCom8(v) + case OpCondSelect: + return rewriteValueLOONG64_OpCondSelect(v) + case OpConst16: + return rewriteValueLOONG64_OpConst16(v) + case OpConst32: + return rewriteValueLOONG64_OpConst32(v) + case OpConst32F: + return rewriteValueLOONG64_OpConst32F(v) + case OpConst64: + return rewriteValueLOONG64_OpConst64(v) + case OpConst64F: + return rewriteValueLOONG64_OpConst64F(v) + case OpConst8: + return rewriteValueLOONG64_OpConst8(v) + case OpConstBool: + return rewriteValueLOONG64_OpConstBool(v) + case OpConstNil: + return rewriteValueLOONG64_OpConstNil(v) + case OpCvt32Fto32: + v.Op = OpLOONG64TRUNCFW + return true + case OpCvt32Fto64: + v.Op = OpLOONG64TRUNCFV + return true + case OpCvt32Fto64F: + v.Op = OpLOONG64MOVFD + return true + case OpCvt32to32F: + v.Op = OpLOONG64MOVWF + return true + case OpCvt32to64F: + v.Op = OpLOONG64MOVWD + return true + case OpCvt64Fto32: + v.Op = OpLOONG64TRUNCDW + return true + case OpCvt64Fto32F: + v.Op = OpLOONG64MOVDF + return true + case OpCvt64Fto64: + v.Op = OpLOONG64TRUNCDV + return true + case OpCvt64to32F: + v.Op = OpLOONG64MOVVF + return true + case OpCvt64to64F: + v.Op = OpLOONG64MOVVD + return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true + case OpDiv16: + return rewriteValueLOONG64_OpDiv16(v) + case OpDiv16u: + return rewriteValueLOONG64_OpDiv16u(v) + case OpDiv32: + return rewriteValueLOONG64_OpDiv32(v) + case OpDiv32F: + v.Op = OpLOONG64DIVF + return true + case OpDiv32u: + return rewriteValueLOONG64_OpDiv32u(v) + case OpDiv64: + return rewriteValueLOONG64_OpDiv64(v) + case OpDiv64F: + v.Op = OpLOONG64DIVD + return true + case OpDiv64u: + v.Op = OpLOONG64DIVVU + return true + case OpDiv8: + return rewriteValueLOONG64_OpDiv8(v) + case OpDiv8u: + return rewriteValueLOONG64_OpDiv8u(v) + case OpEq16: + return rewriteValueLOONG64_OpEq16(v) + case OpEq32: + return rewriteValueLOONG64_OpEq32(v) + case OpEq32F: + return rewriteValueLOONG64_OpEq32F(v) + case OpEq64: + return rewriteValueLOONG64_OpEq64(v) + case OpEq64F: + return rewriteValueLOONG64_OpEq64F(v) + case OpEq8: + return rewriteValueLOONG64_OpEq8(v) + case OpEqB: + return rewriteValueLOONG64_OpEqB(v) + case OpEqPtr: + return rewriteValueLOONG64_OpEqPtr(v) + case OpGetCallerPC: + v.Op = OpLOONG64LoweredGetCallerPC + return true + case OpGetCallerSP: + v.Op = OpLOONG64LoweredGetCallerSP + return true + case OpGetClosurePtr: + v.Op = OpLOONG64LoweredGetClosurePtr + return true + case OpHmul32: + return rewriteValueLOONG64_OpHmul32(v) + case OpHmul32u: + return rewriteValueLOONG64_OpHmul32u(v) + case OpHmul64: + v.Op = OpLOONG64MULHV + return true + case OpHmul64u: + v.Op = OpLOONG64MULHVU + return true + case OpInterCall: + v.Op = OpLOONG64CALLinter + return true + case OpIsInBounds: + return rewriteValueLOONG64_OpIsInBounds(v) + case OpIsNonNil: + return rewriteValueLOONG64_OpIsNonNil(v) + case OpIsSliceInBounds: + return rewriteValueLOONG64_OpIsSliceInBounds(v) + case OpLOONG64ADDV: + return rewriteValueLOONG64_OpLOONG64ADDV(v) + case OpLOONG64ADDVconst: + return rewriteValueLOONG64_OpLOONG64ADDVconst(v) + case OpLOONG64AND: + return rewriteValueLOONG64_OpLOONG64AND(v) + case OpLOONG64ANDconst: + return rewriteValueLOONG64_OpLOONG64ANDconst(v) + case OpLOONG64DIVV: + return rewriteValueLOONG64_OpLOONG64DIVV(v) + case OpLOONG64DIVVU: + return rewriteValueLOONG64_OpLOONG64DIVVU(v) + case OpLOONG64LoweredAtomicAdd32: + return rewriteValueLOONG64_OpLOONG64LoweredAtomicAdd32(v) + case OpLOONG64LoweredAtomicAdd64: + return rewriteValueLOONG64_OpLOONG64LoweredAtomicAdd64(v) + case OpLOONG64LoweredAtomicStore32: + return rewriteValueLOONG64_OpLOONG64LoweredAtomicStore32(v) + case OpLOONG64LoweredAtomicStore64: + return rewriteValueLOONG64_OpLOONG64LoweredAtomicStore64(v) + case OpLOONG64MASKEQZ: + return rewriteValueLOONG64_OpLOONG64MASKEQZ(v) + case OpLOONG64MASKNEZ: + return rewriteValueLOONG64_OpLOONG64MASKNEZ(v) + case OpLOONG64MOVBUload: + return rewriteValueLOONG64_OpLOONG64MOVBUload(v) + case OpLOONG64MOVBUreg: + return rewriteValueLOONG64_OpLOONG64MOVBUreg(v) + case OpLOONG64MOVBload: + return rewriteValueLOONG64_OpLOONG64MOVBload(v) + case OpLOONG64MOVBreg: + return rewriteValueLOONG64_OpLOONG64MOVBreg(v) + case OpLOONG64MOVBstore: + return rewriteValueLOONG64_OpLOONG64MOVBstore(v) + case OpLOONG64MOVBstorezero: + return rewriteValueLOONG64_OpLOONG64MOVBstorezero(v) + case OpLOONG64MOVDload: + return rewriteValueLOONG64_OpLOONG64MOVDload(v) + case OpLOONG64MOVDstore: + return rewriteValueLOONG64_OpLOONG64MOVDstore(v) + case OpLOONG64MOVFload: + return rewriteValueLOONG64_OpLOONG64MOVFload(v) + case OpLOONG64MOVFstore: + return rewriteValueLOONG64_OpLOONG64MOVFstore(v) + case OpLOONG64MOVHUload: + return rewriteValueLOONG64_OpLOONG64MOVHUload(v) + case OpLOONG64MOVHUreg: + return rewriteValueLOONG64_OpLOONG64MOVHUreg(v) + case OpLOONG64MOVHload: + return rewriteValueLOONG64_OpLOONG64MOVHload(v) + case OpLOONG64MOVHreg: + return rewriteValueLOONG64_OpLOONG64MOVHreg(v) + case OpLOONG64MOVHstore: + return rewriteValueLOONG64_OpLOONG64MOVHstore(v) + case OpLOONG64MOVHstorezero: + return rewriteValueLOONG64_OpLOONG64MOVHstorezero(v) + case OpLOONG64MOVVload: + return rewriteValueLOONG64_OpLOONG64MOVVload(v) + case OpLOONG64MOVVreg: + return rewriteValueLOONG64_OpLOONG64MOVVreg(v) + case OpLOONG64MOVVstore: + return rewriteValueLOONG64_OpLOONG64MOVVstore(v) + case OpLOONG64MOVVstorezero: + return rewriteValueLOONG64_OpLOONG64MOVVstorezero(v) + case OpLOONG64MOVWUload: + return rewriteValueLOONG64_OpLOONG64MOVWUload(v) + case OpLOONG64MOVWUreg: + return rewriteValueLOONG64_OpLOONG64MOVWUreg(v) + case OpLOONG64MOVWload: + return rewriteValueLOONG64_OpLOONG64MOVWload(v) + case OpLOONG64MOVWreg: + return rewriteValueLOONG64_OpLOONG64MOVWreg(v) + case OpLOONG64MOVWstore: + return rewriteValueLOONG64_OpLOONG64MOVWstore(v) + case OpLOONG64MOVWstorezero: + return rewriteValueLOONG64_OpLOONG64MOVWstorezero(v) + case OpLOONG64MULV: + return rewriteValueLOONG64_OpLOONG64MULV(v) + case OpLOONG64NEGV: + return rewriteValueLOONG64_OpLOONG64NEGV(v) + case OpLOONG64NOR: + return rewriteValueLOONG64_OpLOONG64NOR(v) + case OpLOONG64NORconst: + return rewriteValueLOONG64_OpLOONG64NORconst(v) + case OpLOONG64OR: + return rewriteValueLOONG64_OpLOONG64OR(v) + case OpLOONG64ORconst: + return rewriteValueLOONG64_OpLOONG64ORconst(v) + case OpLOONG64REMV: + return rewriteValueLOONG64_OpLOONG64REMV(v) + case OpLOONG64REMVU: + return rewriteValueLOONG64_OpLOONG64REMVU(v) + case OpLOONG64ROTR: + return rewriteValueLOONG64_OpLOONG64ROTR(v) + case OpLOONG64ROTRV: + return rewriteValueLOONG64_OpLOONG64ROTRV(v) + case OpLOONG64SGT: + return rewriteValueLOONG64_OpLOONG64SGT(v) + case OpLOONG64SGTU: + return rewriteValueLOONG64_OpLOONG64SGTU(v) + case OpLOONG64SGTUconst: + return rewriteValueLOONG64_OpLOONG64SGTUconst(v) + case OpLOONG64SGTconst: + return rewriteValueLOONG64_OpLOONG64SGTconst(v) + case OpLOONG64SLLV: + return rewriteValueLOONG64_OpLOONG64SLLV(v) + case OpLOONG64SLLVconst: + return rewriteValueLOONG64_OpLOONG64SLLVconst(v) + case OpLOONG64SRAV: + return rewriteValueLOONG64_OpLOONG64SRAV(v) + case OpLOONG64SRAVconst: + return rewriteValueLOONG64_OpLOONG64SRAVconst(v) + case OpLOONG64SRLV: + return rewriteValueLOONG64_OpLOONG64SRLV(v) + case OpLOONG64SRLVconst: + return rewriteValueLOONG64_OpLOONG64SRLVconst(v) + case OpLOONG64SUBV: + return rewriteValueLOONG64_OpLOONG64SUBV(v) + case OpLOONG64SUBVconst: + return rewriteValueLOONG64_OpLOONG64SUBVconst(v) + case OpLOONG64XOR: + return rewriteValueLOONG64_OpLOONG64XOR(v) + case OpLOONG64XORconst: + return rewriteValueLOONG64_OpLOONG64XORconst(v) + case OpLeq16: + return rewriteValueLOONG64_OpLeq16(v) + case OpLeq16U: + return rewriteValueLOONG64_OpLeq16U(v) + case OpLeq32: + return rewriteValueLOONG64_OpLeq32(v) + case OpLeq32F: + return rewriteValueLOONG64_OpLeq32F(v) + case OpLeq32U: + return rewriteValueLOONG64_OpLeq32U(v) + case OpLeq64: + return rewriteValueLOONG64_OpLeq64(v) + case OpLeq64F: + return rewriteValueLOONG64_OpLeq64F(v) + case OpLeq64U: + return rewriteValueLOONG64_OpLeq64U(v) + case OpLeq8: + return rewriteValueLOONG64_OpLeq8(v) + case OpLeq8U: + return rewriteValueLOONG64_OpLeq8U(v) + case OpLess16: + return rewriteValueLOONG64_OpLess16(v) + case OpLess16U: + return rewriteValueLOONG64_OpLess16U(v) + case OpLess32: + return rewriteValueLOONG64_OpLess32(v) + case OpLess32F: + return rewriteValueLOONG64_OpLess32F(v) + case OpLess32U: + return rewriteValueLOONG64_OpLess32U(v) + case OpLess64: + return rewriteValueLOONG64_OpLess64(v) + case OpLess64F: + return rewriteValueLOONG64_OpLess64F(v) + case OpLess64U: + return rewriteValueLOONG64_OpLess64U(v) + case OpLess8: + return rewriteValueLOONG64_OpLess8(v) + case OpLess8U: + return rewriteValueLOONG64_OpLess8U(v) + case OpLoad: + return rewriteValueLOONG64_OpLoad(v) + case OpLocalAddr: + return rewriteValueLOONG64_OpLocalAddr(v) + case OpLsh16x16: + return rewriteValueLOONG64_OpLsh16x16(v) + case OpLsh16x32: + return rewriteValueLOONG64_OpLsh16x32(v) + case OpLsh16x64: + return rewriteValueLOONG64_OpLsh16x64(v) + case OpLsh16x8: + return rewriteValueLOONG64_OpLsh16x8(v) + case OpLsh32x16: + return rewriteValueLOONG64_OpLsh32x16(v) + case OpLsh32x32: + return rewriteValueLOONG64_OpLsh32x32(v) + case OpLsh32x64: + return rewriteValueLOONG64_OpLsh32x64(v) + case OpLsh32x8: + return rewriteValueLOONG64_OpLsh32x8(v) + case OpLsh64x16: + return rewriteValueLOONG64_OpLsh64x16(v) + case OpLsh64x32: + return rewriteValueLOONG64_OpLsh64x32(v) + case OpLsh64x64: + return rewriteValueLOONG64_OpLsh64x64(v) + case OpLsh64x8: + return rewriteValueLOONG64_OpLsh64x8(v) + case OpLsh8x16: + return rewriteValueLOONG64_OpLsh8x16(v) + case OpLsh8x32: + return rewriteValueLOONG64_OpLsh8x32(v) + case OpLsh8x64: + return rewriteValueLOONG64_OpLsh8x64(v) + case OpLsh8x8: + return rewriteValueLOONG64_OpLsh8x8(v) + case OpMod16: + return rewriteValueLOONG64_OpMod16(v) + case OpMod16u: + return rewriteValueLOONG64_OpMod16u(v) + case OpMod32: + return rewriteValueLOONG64_OpMod32(v) + case OpMod32u: + return rewriteValueLOONG64_OpMod32u(v) + case OpMod64: + return rewriteValueLOONG64_OpMod64(v) + case OpMod64u: + v.Op = OpLOONG64REMVU + return true + case OpMod8: + return rewriteValueLOONG64_OpMod8(v) + case OpMod8u: + return rewriteValueLOONG64_OpMod8u(v) + case OpMove: + return rewriteValueLOONG64_OpMove(v) + case OpMul16: + v.Op = OpLOONG64MULV + return true + case OpMul32: + v.Op = OpLOONG64MULV + return true + case OpMul32F: + v.Op = OpLOONG64MULF + return true + case OpMul64: + v.Op = OpLOONG64MULV + return true + case OpMul64F: + v.Op = OpLOONG64MULD + return true + case OpMul8: + v.Op = OpLOONG64MULV + return true + case OpNeg16: + v.Op = OpLOONG64NEGV + return true + case OpNeg32: + v.Op = OpLOONG64NEGV + return true + case OpNeg32F: + v.Op = OpLOONG64NEGF + return true + case OpNeg64: + v.Op = OpLOONG64NEGV + return true + case OpNeg64F: + v.Op = OpLOONG64NEGD + return true + case OpNeg8: + v.Op = OpLOONG64NEGV + return true + case OpNeq16: + return rewriteValueLOONG64_OpNeq16(v) + case OpNeq32: + return rewriteValueLOONG64_OpNeq32(v) + case OpNeq32F: + return rewriteValueLOONG64_OpNeq32F(v) + case OpNeq64: + return rewriteValueLOONG64_OpNeq64(v) + case OpNeq64F: + return rewriteValueLOONG64_OpNeq64F(v) + case OpNeq8: + return rewriteValueLOONG64_OpNeq8(v) + case OpNeqB: + v.Op = OpLOONG64XOR + return true + case OpNeqPtr: + return rewriteValueLOONG64_OpNeqPtr(v) + case OpNilCheck: + v.Op = OpLOONG64LoweredNilCheck + return true + case OpNot: + return rewriteValueLOONG64_OpNot(v) + case OpOffPtr: + return rewriteValueLOONG64_OpOffPtr(v) + case OpOr16: + v.Op = OpLOONG64OR + return true + case OpOr32: + v.Op = OpLOONG64OR + return true + case OpOr64: + v.Op = OpLOONG64OR + return true + case OpOr8: + v.Op = OpLOONG64OR + return true + case OpOrB: + v.Op = OpLOONG64OR + return true + case OpPanicBounds: + return rewriteValueLOONG64_OpPanicBounds(v) + case OpRotateLeft16: + return rewriteValueLOONG64_OpRotateLeft16(v) + case OpRotateLeft32: + return rewriteValueLOONG64_OpRotateLeft32(v) + case OpRotateLeft64: + return rewriteValueLOONG64_OpRotateLeft64(v) + case OpRotateLeft8: + return rewriteValueLOONG64_OpRotateLeft8(v) + case OpRound32F: + v.Op = OpCopy + return true + case OpRound64F: + v.Op = OpCopy + return true + case OpRsh16Ux16: + return rewriteValueLOONG64_OpRsh16Ux16(v) + case OpRsh16Ux32: + return rewriteValueLOONG64_OpRsh16Ux32(v) + case OpRsh16Ux64: + return rewriteValueLOONG64_OpRsh16Ux64(v) + case OpRsh16Ux8: + return rewriteValueLOONG64_OpRsh16Ux8(v) + case OpRsh16x16: + return rewriteValueLOONG64_OpRsh16x16(v) + case OpRsh16x32: + return rewriteValueLOONG64_OpRsh16x32(v) + case OpRsh16x64: + return rewriteValueLOONG64_OpRsh16x64(v) + case OpRsh16x8: + return rewriteValueLOONG64_OpRsh16x8(v) + case OpRsh32Ux16: + return rewriteValueLOONG64_OpRsh32Ux16(v) + case OpRsh32Ux32: + return rewriteValueLOONG64_OpRsh32Ux32(v) + case OpRsh32Ux64: + return rewriteValueLOONG64_OpRsh32Ux64(v) + case OpRsh32Ux8: + return rewriteValueLOONG64_OpRsh32Ux8(v) + case OpRsh32x16: + return rewriteValueLOONG64_OpRsh32x16(v) + case OpRsh32x32: + return rewriteValueLOONG64_OpRsh32x32(v) + case OpRsh32x64: + return rewriteValueLOONG64_OpRsh32x64(v) + case OpRsh32x8: + return rewriteValueLOONG64_OpRsh32x8(v) + case OpRsh64Ux16: + return rewriteValueLOONG64_OpRsh64Ux16(v) + case OpRsh64Ux32: + return rewriteValueLOONG64_OpRsh64Ux32(v) + case OpRsh64Ux64: + return rewriteValueLOONG64_OpRsh64Ux64(v) + case OpRsh64Ux8: + return rewriteValueLOONG64_OpRsh64Ux8(v) + case OpRsh64x16: + return rewriteValueLOONG64_OpRsh64x16(v) + case OpRsh64x32: + return rewriteValueLOONG64_OpRsh64x32(v) + case OpRsh64x64: + return rewriteValueLOONG64_OpRsh64x64(v) + case OpRsh64x8: + return rewriteValueLOONG64_OpRsh64x8(v) + case OpRsh8Ux16: + return rewriteValueLOONG64_OpRsh8Ux16(v) + case OpRsh8Ux32: + return rewriteValueLOONG64_OpRsh8Ux32(v) + case OpRsh8Ux64: + return rewriteValueLOONG64_OpRsh8Ux64(v) + case OpRsh8Ux8: + return rewriteValueLOONG64_OpRsh8Ux8(v) + case OpRsh8x16: + return rewriteValueLOONG64_OpRsh8x16(v) + case OpRsh8x32: + return rewriteValueLOONG64_OpRsh8x32(v) + case OpRsh8x64: + return rewriteValueLOONG64_OpRsh8x64(v) + case OpRsh8x8: + return rewriteValueLOONG64_OpRsh8x8(v) + case OpSelect0: + return rewriteValueLOONG64_OpSelect0(v) + case OpSelect1: + return rewriteValueLOONG64_OpSelect1(v) + case OpSignExt16to32: + v.Op = OpLOONG64MOVHreg + return true + case OpSignExt16to64: + v.Op = OpLOONG64MOVHreg + return true + case OpSignExt32to64: + v.Op = OpLOONG64MOVWreg + return true + case OpSignExt8to16: + v.Op = OpLOONG64MOVBreg + return true + case OpSignExt8to32: + v.Op = OpLOONG64MOVBreg + return true + case OpSignExt8to64: + v.Op = OpLOONG64MOVBreg + return true + case OpSlicemask: + return rewriteValueLOONG64_OpSlicemask(v) + case OpSqrt: + v.Op = OpLOONG64SQRTD + return true + case OpSqrt32: + v.Op = OpLOONG64SQRTF + return true + case OpStaticCall: + v.Op = OpLOONG64CALLstatic + return true + case OpStore: + return rewriteValueLOONG64_OpStore(v) + case OpSub16: + v.Op = OpLOONG64SUBV + return true + case OpSub32: + v.Op = OpLOONG64SUBV + return true + case OpSub32F: + v.Op = OpLOONG64SUBF + return true + case OpSub64: + v.Op = OpLOONG64SUBV + return true + case OpSub64F: + v.Op = OpLOONG64SUBD + return true + case OpSub8: + v.Op = OpLOONG64SUBV + return true + case OpSubPtr: + v.Op = OpLOONG64SUBV + return true + case OpTailCall: + v.Op = OpLOONG64CALLtail + return true + case OpTrunc16to8: + v.Op = OpCopy + return true + case OpTrunc32to16: + v.Op = OpCopy + return true + case OpTrunc32to8: + v.Op = OpCopy + return true + case OpTrunc64to16: + v.Op = OpCopy + return true + case OpTrunc64to32: + v.Op = OpCopy + return true + case OpTrunc64to8: + v.Op = OpCopy + return true + case OpWB: + v.Op = OpLOONG64LoweredWB + return true + case OpXor16: + v.Op = OpLOONG64XOR + return true + case OpXor32: + v.Op = OpLOONG64XOR + return true + case OpXor64: + v.Op = OpLOONG64XOR + return true + case OpXor8: + v.Op = OpLOONG64XOR + return true + case OpZero: + return rewriteValueLOONG64_OpZero(v) + case OpZeroExt16to32: + v.Op = OpLOONG64MOVHUreg + return true + case OpZeroExt16to64: + v.Op = OpLOONG64MOVHUreg + return true + case OpZeroExt32to64: + v.Op = OpLOONG64MOVWUreg + return true + case OpZeroExt8to16: + v.Op = OpLOONG64MOVBUreg + return true + case OpZeroExt8to32: + v.Op = OpLOONG64MOVBUreg + return true + case OpZeroExt8to64: + v.Op = OpLOONG64MOVBUreg + return true + } + return false +} +func rewriteValueLOONG64_OpAddr(v *Value) bool { + v_0 := v.Args[0] + // match: (Addr {sym} base) + // result: (MOVVaddr {sym} base) + for { + sym := auxToSym(v.Aux) + base := v_0 + v.reset(OpLOONG64MOVVaddr) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } +} +func rewriteValueLOONG64_OpAtomicCompareAndSwap32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicCompareAndSwap32 ptr old new mem) + // result: (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem) + for { + ptr := v_0 + old := v_1 + new := v_2 + mem := v_3 + v.reset(OpLOONG64LoweredAtomicCas32) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(old) + v.AddArg4(ptr, v0, new, mem) + return true + } +} +func rewriteValueLOONG64_OpAvg64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Avg64u x y) + // result: (ADDV (SRLVconst (SUBV x y) [1]) y) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64ADDV) + v0 := b.NewValue0(v.Pos, OpLOONG64SRLVconst, t) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64SUBV, t) + v1.AddArg2(x, y) + v0.AddArg(v1) + v.AddArg2(v0, y) + return true + } +} +func rewriteValueLOONG64_OpCom16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Com16 x) + // result: (NOR (MOVVconst [0]) x) + for { + x := v_0 + v.reset(OpLOONG64NOR) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueLOONG64_OpCom32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Com32 x) + // result: (NOR (MOVVconst [0]) x) + for { + x := v_0 + v.reset(OpLOONG64NOR) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueLOONG64_OpCom64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Com64 x) + // result: (NOR (MOVVconst [0]) x) + for { + x := v_0 + v.reset(OpLOONG64NOR) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueLOONG64_OpCom8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Com8 x) + // result: (NOR (MOVVconst [0]) x) + for { + x := v_0 + v.reset(OpLOONG64NOR) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueLOONG64_OpCondSelect(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CondSelect x y cond) + // result: (OR (MASKEQZ x cond) (MASKNEZ y cond)) + for { + t := v.Type + x := v_0 + y := v_1 + cond := v_2 + v.reset(OpLOONG64OR) + v0 := b.NewValue0(v.Pos, OpLOONG64MASKEQZ, t) + v0.AddArg2(x, cond) + v1 := b.NewValue0(v.Pos, OpLOONG64MASKNEZ, t) + v1.AddArg2(y, cond) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpConst16(v *Value) bool { + // match: (Const16 [val]) + // result: (MOVVconst [int64(val)]) + for { + val := auxIntToInt16(v.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueLOONG64_OpConst32(v *Value) bool { + // match: (Const32 [val]) + // result: (MOVVconst [int64(val)]) + for { + val := auxIntToInt32(v.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueLOONG64_OpConst32F(v *Value) bool { + // match: (Const32F [val]) + // result: (MOVFconst [float64(val)]) + for { + val := auxIntToFloat32(v.AuxInt) + v.reset(OpLOONG64MOVFconst) + v.AuxInt = float64ToAuxInt(float64(val)) + return true + } +} +func rewriteValueLOONG64_OpConst64(v *Value) bool { + // match: (Const64 [val]) + // result: (MOVVconst [int64(val)]) + for { + val := auxIntToInt64(v.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueLOONG64_OpConst64F(v *Value) bool { + // match: (Const64F [val]) + // result: (MOVDconst [float64(val)]) + for { + val := auxIntToFloat64(v.AuxInt) + v.reset(OpLOONG64MOVDconst) + v.AuxInt = float64ToAuxInt(float64(val)) + return true + } +} +func rewriteValueLOONG64_OpConst8(v *Value) bool { + // match: (Const8 [val]) + // result: (MOVVconst [int64(val)]) + for { + val := auxIntToInt8(v.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueLOONG64_OpConstBool(v *Value) bool { + // match: (ConstBool [t]) + // result: (MOVVconst [int64(b2i(t))]) + for { + t := auxIntToBool(v.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(b2i(t))) + return true + } +} +func rewriteValueLOONG64_OpConstNil(v *Value) bool { + // match: (ConstNil) + // result: (MOVVconst [0]) + for { + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } +} +func rewriteValueLOONG64_OpDiv16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16 x y) + // result: (DIVV (SignExt16to64 x) (SignExt16to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64DIVV) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpDiv16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16u x y) + // result: (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64DIVVU) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpDiv32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div32 x y) + // result: (DIVV (SignExt32to64 x) (SignExt32to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64DIVV) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpDiv32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div32u x y) + // result: (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64DIVVU) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpDiv64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Div64 x y) + // result: (DIVV x y) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64DIVV) + v.AddArg2(x, y) + return true + } +} +func rewriteValueLOONG64_OpDiv8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8 x y) + // result: (DIVV (SignExt8to64 x) (SignExt8to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64DIVV) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpDiv8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8u x y) + // result: (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64DIVVU) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpEq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq16 x y) + // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGTU) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpEq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq32 x y) + // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGTU) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpEq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq32F x y) + // result: (FPFlagTrue (CMPEQF x y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64FPFlagTrue) + v0 := b.NewValue0(v.Pos, OpLOONG64CMPEQF, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpEq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq64 x y) + // result: (SGTU (MOVVconst [1]) (XOR x y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGTU) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) + v1.AddArg2(x, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpEq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq64F x y) + // result: (FPFlagTrue (CMPEQD x y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64FPFlagTrue) + v0 := b.NewValue0(v.Pos, OpLOONG64CMPEQD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpEq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq8 x y) + // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGTU) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpEqB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqB x y) + // result: (XOR (MOVVconst [1]) (XOR x y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64XOR) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.Bool) + v1.AddArg2(x, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpEqPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqPtr x y) + // result: (SGTU (MOVVconst [1]) (XOR x y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGTU) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) + v1.AddArg2(x, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpHmul32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Hmul32 x y) + // result: (SRAVconst (MULV (SignExt32to64 x) (SignExt32to64 y)) [32]) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAVconst) + v.AuxInt = int64ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpLOONG64MULV, typ.Int64) + v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpHmul32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Hmul32u x y) + // result: (SRLVconst (MULV (ZeroExt32to64 x) (ZeroExt32to64 y)) [32]) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SRLVconst) + v.AuxInt = int64ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpLOONG64MULV, typ.Int64) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpIsInBounds(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsInBounds idx len) + // result: (SGTU len idx) + for { + idx := v_0 + len := v_1 + v.reset(OpLOONG64SGTU) + v.AddArg2(len, idx) + return true + } +} +func rewriteValueLOONG64_OpIsNonNil(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (IsNonNil ptr) + // result: (SGTU ptr (MOVVconst [0])) + for { + ptr := v_0 + v.reset(OpLOONG64SGTU) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(ptr, v0) + return true + } +} +func rewriteValueLOONG64_OpIsSliceInBounds(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (IsSliceInBounds idx len) + // result: (XOR (MOVVconst [1]) (SGTU idx len)) + for { + idx := v_0 + len := v_1 + v.reset(OpLOONG64XOR) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v1.AddArg2(idx, len) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLOONG64ADDV(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADDV x (MOVVconst [c])) + // cond: is32Bit(c) && !t.IsPtr() + // result: (ADDVconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + continue + } + t := v_1.Type + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c) && !t.IsPtr()) { + continue + } + v.reset(OpLOONG64ADDVconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (ADDV x (NEGV y)) + // result: (SUBV x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpLOONG64NEGV { + continue + } + y := v_1.Args[0] + v.reset(OpLOONG64SUBV) + v.AddArg2(x, y) + return true + } + break + } + return false +} +func rewriteValueLOONG64_OpLOONG64ADDVconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) + // cond: is32Bit(off1+int64(off2)) + // result: (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr) + for { + off1 := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + if !(is32Bit(off1 + int64(off2))) { + break + } + v.reset(OpLOONG64MOVVaddr) + v.AuxInt = int32ToAuxInt(int32(off1) + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg(ptr) + return true + } + // match: (ADDVconst [0] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (ADDVconst [c] (MOVVconst [d])) + // result: (MOVVconst [c+d]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(c + d) + return true + } + // match: (ADDVconst [c] (ADDVconst [d] x)) + // cond: is32Bit(c+d) + // result: (ADDVconst [c+d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64ADDVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(c + d)) { + break + } + v.reset(OpLOONG64ADDVconst) + v.AuxInt = int64ToAuxInt(c + d) + v.AddArg(x) + return true + } + // match: (ADDVconst [c] (SUBVconst [d] x)) + // cond: is32Bit(c-d) + // result: (ADDVconst [c-d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64SUBVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(c - d)) { + break + } + v.reset(OpLOONG64ADDVconst) + v.AuxInt = int64ToAuxInt(c - d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64AND(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AND x (MOVVconst [c])) + // cond: is32Bit(c) + // result: (ANDconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c)) { + continue + } + v.reset(OpLOONG64ANDconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (AND x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64ANDconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ANDconst [0] _) + // result: (MOVVconst [0]) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (ANDconst [-1] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != -1 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (ANDconst [c] (MOVVconst [d])) + // result: (MOVVconst [c&d]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(c & d) + return true + } + // match: (ANDconst [c] (ANDconst [d] x)) + // result: (ANDconst [c&d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64ANDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpLOONG64ANDconst) + v.AuxInt = int64ToAuxInt(c & d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64DIVV(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (DIVV (MOVVconst [c]) (MOVVconst [d])) + // cond: d != 0 + // result: (MOVVconst [c/d]) + for { + if v_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(c / d) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64DIVVU(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (DIVVU x (MOVVconst [1])) + // result: x + for { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_1.AuxInt) != 1 { + break + } + v.copyOf(x) + return true + } + // match: (DIVVU x (MOVVconst [c])) + // cond: isPowerOfTwo64(c) + // result: (SRLVconst [log64(c)] x) + for { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(isPowerOfTwo64(c)) { + break + } + v.reset(OpLOONG64SRLVconst) + v.AuxInt = int64ToAuxInt(log64(c)) + v.AddArg(x) + return true + } + // match: (DIVVU (MOVVconst [c]) (MOVVconst [d])) + // cond: d != 0 + // result: (MOVVconst [int64(uint64(c)/uint64(d))]) + for { + if v_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(uint64(c) / uint64(d))) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64LoweredAtomicAdd32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) + // cond: is32Bit(c) + // result: (LoweredAtomicAddconst32 [int32(c)] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpLOONG64LoweredAtomicAddconst32) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64LoweredAtomicAdd64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) + // cond: is32Bit(c) + // result: (LoweredAtomicAddconst64 [c] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpLOONG64LoweredAtomicAddconst64) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64LoweredAtomicStore32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LoweredAtomicStore32 ptr (MOVVconst [0]) mem) + // result: (LoweredAtomicStorezero32 ptr mem) + for { + ptr := v_0 + if v_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + mem := v_2 + v.reset(OpLOONG64LoweredAtomicStorezero32) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64LoweredAtomicStore64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LoweredAtomicStore64 ptr (MOVVconst [0]) mem) + // result: (LoweredAtomicStorezero64 ptr mem) + for { + ptr := v_0 + if v_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + mem := v_2 + v.reset(OpLOONG64LoweredAtomicStorezero64) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MASKEQZ(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MASKEQZ (MOVVconst [0]) cond) + // result: (MOVVconst [0]) + for { + if v_0.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (MASKEQZ x (MOVVconst [c])) + // cond: c == 0 + // result: (MOVVconst [0]) + for { + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(c == 0) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (MASKEQZ x (MOVVconst [c])) + // cond: c != 0 + // result: x + for { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(c != 0) { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MASKNEZ(v *Value) bool { + v_0 := v.Args[0] + // match: (MASKNEZ (MOVVconst [0]) cond) + // result: (MOVVconst [0]) + for { + if v_0.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVBUload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVBUload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVBUload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVBUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVBUload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVBUreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVBUreg x:(SGT _ _)) + // result: x + for { + x := v_0 + if x.Op != OpLOONG64SGT { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(SGTU _ _)) + // result: x + for { + x := v_0 + if x.Op != OpLOONG64SGTU { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(MOVBUload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBUload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVBUreg x:(MOVBUreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBUreg { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVBUreg (MOVVconst [c])) + // result: (MOVVconst [int64(uint8(c))]) + for { + if v_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(uint8(c))) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVBload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVBload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVBload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVBload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVBload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVBreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVBreg x:(MOVBload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVBreg x:(MOVBreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBreg { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVBreg (MOVVconst [c])) + // result: (MOVVconst [int64(int8(c))]) + for { + if v_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(int8(c))) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVBstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVBstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVBstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVBstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVBstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpLOONG64MOVBreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpLOONG64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpLOONG64MOVBUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpLOONG64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpLOONG64MOVHreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpLOONG64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpLOONG64MOVHUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpLOONG64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpLOONG64MOVWreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpLOONG64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpLOONG64MOVWUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpLOONG64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVBstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVBstorezero [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVBstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVBstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVDload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVDload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVDload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVDload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVDload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVDstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVDstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVDstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVDstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVFload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVFload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVFload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVFload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVFload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVFstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVFstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVFstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVFstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVFstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVHUload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVHUload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVHUload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVHUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVHUload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVHUreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVHUreg x:(MOVBUload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBUload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVHUload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVHUload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVBUreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBUreg { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVHUreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVHUreg { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg (MOVVconst [c])) + // result: (MOVVconst [int64(uint16(c))]) + for { + if v_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(uint16(c))) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVHload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVHload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVHload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVHload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVHload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVHreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVHreg x:(MOVBload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBUload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBUload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVHload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVHload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBreg { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBUreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBUreg { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVHreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVHreg { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHreg (MOVVconst [c])) + // result: (MOVVconst [int64(int16(c))]) + for { + if v_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(int16(c))) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVHstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVHstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVHstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVHstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVHstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpLOONG64MOVHreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpLOONG64MOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpLOONG64MOVHUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpLOONG64MOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpLOONG64MOVWreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpLOONG64MOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpLOONG64MOVWUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpLOONG64MOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVHstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVHstorezero [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVHstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVHstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVVload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVVload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVVload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVVload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVVload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVVreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVVreg x) + // cond: x.Uses == 1 + // result: (MOVVnop x) + for { + x := v_0 + if !(x.Uses == 1) { + break + } + v.reset(OpLOONG64MOVVnop) + v.AddArg(x) + return true + } + // match: (MOVVreg (MOVVconst [c])) + // result: (MOVVconst [c]) + for { + if v_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(c) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVVstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVVstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVVstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVVstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVVstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVVstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVVstorezero [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVVstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVVstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVWUload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVWUload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVWUload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVWUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVWUload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVWUreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVWUreg x:(MOVBUload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBUload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVHUload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVHUload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVWUload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVWUload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVBUreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBUreg { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVHUreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVHUreg { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVWUreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVWUreg { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg (MOVVconst [c])) + // result: (MOVVconst [int64(uint32(c))]) + for { + if v_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(uint32(c))) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVWload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVWload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVWload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVWload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVWload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVWreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVWreg x:(MOVBload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVBUload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBUload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVHload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHUload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVHUload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVWload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVWload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVBreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBreg { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVBUreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBUreg { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVHreg { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVWreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVWreg { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWreg (MOVVconst [c])) + // result: (MOVVconst [int64(int32(c))]) + for { + if v_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(int32(c))) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVWstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVWstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVWstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVWstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVWstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem) + // result: (MOVWstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpLOONG64MOVWreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpLOONG64MOVWstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem) + // result: (MOVWstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpLOONG64MOVWUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpLOONG64MOVWstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVWstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVWstorezero [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVWstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) + // result: (MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { + break + } + v.reset(OpLOONG64MOVWstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MULV(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MULV x (MOVVconst [-1])) + // result: (NEGV x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_1.AuxInt) != -1 { + continue + } + v.reset(OpLOONG64NEGV) + v.AddArg(x) + return true + } + break + } + // match: (MULV _ (MOVVconst [0])) + // result: (MOVVconst [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 { + continue + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + break + } + // match: (MULV x (MOVVconst [1])) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_1.AuxInt) != 1 { + continue + } + v.copyOf(x) + return true + } + break + } + // match: (MULV x (MOVVconst [c])) + // cond: isPowerOfTwo64(c) + // result: (SLLVconst [log64(c)] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(isPowerOfTwo64(c)) { + continue + } + v.reset(OpLOONG64SLLVconst) + v.AuxInt = int64ToAuxInt(log64(c)) + v.AddArg(x) + return true + } + break + } + // match: (MULV (MOVVconst [c]) (MOVVconst [d])) + // result: (MOVVconst [c*d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLOONG64MOVVconst { + continue + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpLOONG64MOVVconst { + continue + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(c * d) + return true + } + break + } + return false +} +func rewriteValueLOONG64_OpLOONG64NEGV(v *Value) bool { + v_0 := v.Args[0] + // match: (NEGV (MOVVconst [c])) + // result: (MOVVconst [-c]) + for { + if v_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(-c) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64NOR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (NOR x (MOVVconst [c])) + // cond: is32Bit(c) + // result: (NORconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c)) { + continue + } + v.reset(OpLOONG64NORconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValueLOONG64_OpLOONG64NORconst(v *Value) bool { + v_0 := v.Args[0] + // match: (NORconst [c] (MOVVconst [d])) + // result: (MOVVconst [^(c|d)]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(^(c | d)) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64OR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OR x (MOVVconst [c])) + // cond: is32Bit(c) + // result: (ORconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c)) { + continue + } + v.reset(OpLOONG64ORconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (OR x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64ORconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ORconst [0] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (ORconst [-1] _) + // result: (MOVVconst [-1]) + for { + if auxIntToInt64(v.AuxInt) != -1 { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(-1) + return true + } + // match: (ORconst [c] (MOVVconst [d])) + // result: (MOVVconst [c|d]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(c | d) + return true + } + // match: (ORconst [c] (ORconst [d] x)) + // cond: is32Bit(c|d) + // result: (ORconst [c|d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64ORconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(c | d)) { + break + } + v.reset(OpLOONG64ORconst) + v.AuxInt = int64ToAuxInt(c | d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64REMV(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (REMV (MOVVconst [c]) (MOVVconst [d])) + // cond: d != 0 + // result: (MOVVconst [c%d]) + for { + if v_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(c % d) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64REMVU(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (REMVU _ (MOVVconst [1])) + // result: (MOVVconst [0]) + for { + if v_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_1.AuxInt) != 1 { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (REMVU x (MOVVconst [c])) + // cond: isPowerOfTwo64(c) + // result: (ANDconst [c-1] x) + for { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(isPowerOfTwo64(c)) { + break + } + v.reset(OpLOONG64ANDconst) + v.AuxInt = int64ToAuxInt(c - 1) + v.AddArg(x) + return true + } + // match: (REMVU (MOVVconst [c]) (MOVVconst [d])) + // cond: d != 0 + // result: (MOVVconst [int64(uint64(c)%uint64(d))]) + for { + if v_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(uint64(c) % uint64(d))) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64ROTR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ROTR x (MOVVconst [c])) + // result: (ROTRconst x [c&31]) + for { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpLOONG64ROTRconst) + v.AuxInt = int64ToAuxInt(c & 31) + v.AddArg(x) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64ROTRV(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ROTRV x (MOVVconst [c])) + // result: (ROTRVconst x [c&63]) + for { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpLOONG64ROTRVconst) + v.AuxInt = int64ToAuxInt(c & 63) + v.AddArg(x) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64SGT(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SGT (MOVVconst [c]) x) + // cond: is32Bit(c) + // result: (SGTconst [c] x) + for { + if v_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + if !(is32Bit(c)) { + break + } + v.reset(OpLOONG64SGTconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (SGT x x) + // result: (MOVVconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64SGTU(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SGTU (MOVVconst [c]) x) + // cond: is32Bit(c) + // result: (SGTUconst [c] x) + for { + if v_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + if !(is32Bit(c)) { + break + } + v.reset(OpLOONG64SGTUconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (SGTU x x) + // result: (MOVVconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64SGTUconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SGTUconst [c] (MOVVconst [d])) + // cond: uint64(c)>uint64(d) + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + if !(uint64(c) > uint64(d)) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTUconst [c] (MOVVconst [d])) + // cond: uint64(c)<=uint64(d) + // result: (MOVVconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + if !(uint64(c) <= uint64(d)) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SGTUconst [c] (MOVBUreg _)) + // cond: 0xff < uint64(c) + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVBUreg || !(0xff < uint64(c)) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTUconst [c] (MOVHUreg _)) + // cond: 0xffff < uint64(c) + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVHUreg || !(0xffff < uint64(c)) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTUconst [c] (ANDconst [m] _)) + // cond: uint64(m) < uint64(c) + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64ANDconst { + break + } + m := auxIntToInt64(v_0.AuxInt) + if !(uint64(m) < uint64(c)) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTUconst [c] (SRLVconst _ [d])) + // cond: 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64SRLVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + if !(0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64SGTconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SGTconst [c] (MOVVconst [d])) + // cond: c>d + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + if !(c > d) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTconst [c] (MOVVconst [d])) + // cond: c<=d + // result: (MOVVconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + if !(c <= d) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SGTconst [c] (MOVBreg _)) + // cond: 0x7f < c + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVBreg || !(0x7f < c) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTconst [c] (MOVBreg _)) + // cond: c <= -0x80 + // result: (MOVVconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVBreg || !(c <= -0x80) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SGTconst [c] (MOVBUreg _)) + // cond: 0xff < c + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVBUreg || !(0xff < c) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTconst [c] (MOVBUreg _)) + // cond: c < 0 + // result: (MOVVconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVBUreg || !(c < 0) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SGTconst [c] (MOVHreg _)) + // cond: 0x7fff < c + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVHreg || !(0x7fff < c) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTconst [c] (MOVHreg _)) + // cond: c <= -0x8000 + // result: (MOVVconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVHreg || !(c <= -0x8000) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SGTconst [c] (MOVHUreg _)) + // cond: 0xffff < c + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVHUreg || !(0xffff < c) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTconst [c] (MOVHUreg _)) + // cond: c < 0 + // result: (MOVVconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVHUreg || !(c < 0) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SGTconst [c] (MOVWUreg _)) + // cond: c < 0 + // result: (MOVVconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVWUreg || !(c < 0) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SGTconst [c] (ANDconst [m] _)) + // cond: 0 <= m && m < c + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64ANDconst { + break + } + m := auxIntToInt64(v_0.AuxInt) + if !(0 <= m && m < c) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTconst [c] (SRLVconst _ [d])) + // cond: 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64SRLVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + if !(0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64SLLV(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SLLV _ (MOVVconst [c])) + // cond: uint64(c)>=64 + // result: (MOVVconst [0]) + for { + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 64) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SLLV x (MOVVconst [c])) + // result: (SLLVconst x [c]) + for { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpLOONG64SLLVconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64SLLVconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SLLVconst [c] (MOVVconst [d])) + // result: (MOVVconst [d<=64 + // result: (SRAVconst x [63]) + for { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 64) { + break + } + v.reset(OpLOONG64SRAVconst) + v.AuxInt = int64ToAuxInt(63) + v.AddArg(x) + return true + } + // match: (SRAV x (MOVVconst [c])) + // result: (SRAVconst x [c]) + for { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpLOONG64SRAVconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64SRAVconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SRAVconst [c] (MOVVconst [d])) + // result: (MOVVconst [d>>uint64(c)]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(d >> uint64(c)) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64SRLV(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SRLV _ (MOVVconst [c])) + // cond: uint64(c)>=64 + // result: (MOVVconst [0]) + for { + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 64) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SRLV x (MOVVconst [c])) + // result: (SRLVconst x [c]) + for { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpLOONG64SRLVconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64SRLVconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SRLVconst [c] (MOVVconst [d])) + // result: (MOVVconst [int64(uint64(d)>>uint64(c))]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(uint64(d) >> uint64(c))) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64SUBV(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SUBV x (MOVVconst [c])) + // cond: is32Bit(c) + // result: (SUBVconst [c] x) + for { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c)) { + break + } + v.reset(OpLOONG64SUBVconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (SUBV x x) + // result: (MOVVconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SUBV (MOVVconst [0]) x) + // result: (NEGV x) + for { + if v_0.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_1 + v.reset(OpLOONG64NEGV) + v.AddArg(x) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64SUBVconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SUBVconst [0] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (SUBVconst [c] (MOVVconst [d])) + // result: (MOVVconst [d-c]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(d - c) + return true + } + // match: (SUBVconst [c] (SUBVconst [d] x)) + // cond: is32Bit(-c-d) + // result: (ADDVconst [-c-d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64SUBVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(-c - d)) { + break + } + v.reset(OpLOONG64ADDVconst) + v.AuxInt = int64ToAuxInt(-c - d) + v.AddArg(x) + return true + } + // match: (SUBVconst [c] (ADDVconst [d] x)) + // cond: is32Bit(-c+d) + // result: (ADDVconst [-c+d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64ADDVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(-c + d)) { + break + } + v.reset(OpLOONG64ADDVconst) + v.AuxInt = int64ToAuxInt(-c + d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64XOR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XOR x (MOVVconst [c])) + // cond: is32Bit(c) + // result: (XORconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c)) { + continue + } + v.reset(OpLOONG64XORconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (XOR x x) + // result: (MOVVconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64XORconst(v *Value) bool { + v_0 := v.Args[0] + // match: (XORconst [0] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (XORconst [-1] x) + // result: (NORconst [0] x) + for { + if auxIntToInt64(v.AuxInt) != -1 { + break + } + x := v_0 + v.reset(OpLOONG64NORconst) + v.AuxInt = int64ToAuxInt(0) + v.AddArg(x) + return true + } + // match: (XORconst [c] (MOVVconst [d])) + // result: (MOVVconst [c^d]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(c ^ d) + return true + } + // match: (XORconst [c] (XORconst [d] x)) + // cond: is32Bit(c^d) + // result: (XORconst [c^d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64XORconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(c ^ d)) { + break + } + v.reset(OpLOONG64XORconst) + v.AuxInt = int64ToAuxInt(c ^ d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueLOONG64_OpLeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq16 x y) + // result: (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64XOR) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64SGT, typ.Bool) + v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLeq16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq16U x y) + // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64XOR) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq32 x y) + // result: (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64XOR) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64SGT, typ.Bool) + v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLeq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq32F x y) + // result: (FPFlagTrue (CMPGEF y x)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64FPFlagTrue) + v0 := b.NewValue0(v.Pos, OpLOONG64CMPGEF, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpLeq32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq32U x y) + // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64XOR) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLeq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq64 x y) + // result: (XOR (MOVVconst [1]) (SGT x y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64XOR) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64SGT, typ.Bool) + v1.AddArg2(x, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLeq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq64F x y) + // result: (FPFlagTrue (CMPGED y x)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64FPFlagTrue) + v0 := b.NewValue0(v.Pos, OpLOONG64CMPGED, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpLeq64U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq64U x y) + // result: (XOR (MOVVconst [1]) (SGTU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64XOR) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v1.AddArg2(x, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq8 x y) + // result: (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64XOR) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64SGT, typ.Bool) + v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLeq8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq8U x y) + // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64XOR) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLess16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less16 x y) + // result: (SGT (SignExt16to64 y) (SignExt16to64 x)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGT) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLess16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less16U x y) + // result: (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGTU) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLess32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less32 x y) + // result: (SGT (SignExt32to64 y) (SignExt32to64 x)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGT) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLess32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less32F x y) + // result: (FPFlagTrue (CMPGTF y x)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64FPFlagTrue) + v0 := b.NewValue0(v.Pos, OpLOONG64CMPGTF, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpLess32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less32U x y) + // result: (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGTU) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLess64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Less64 x y) + // result: (SGT y x) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGT) + v.AddArg2(y, x) + return true + } +} +func rewriteValueLOONG64_OpLess64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less64F x y) + // result: (FPFlagTrue (CMPGTD y x)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64FPFlagTrue) + v0 := b.NewValue0(v.Pos, OpLOONG64CMPGTD, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpLess64U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Less64U x y) + // result: (SGTU y x) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGTU) + v.AddArg2(y, x) + return true + } +} +func rewriteValueLOONG64_OpLess8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less8 x y) + // result: (SGT (SignExt8to64 y) (SignExt8to64 x)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGT) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLess8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less8U x y) + // result: (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGTU) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLoad(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Load ptr mem) + // cond: t.IsBoolean() + // result: (MOVBUload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsBoolean()) { + break + } + v.reset(OpLOONG64MOVBUload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is8BitInt(t) && t.IsSigned()) + // result: (MOVBload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is8BitInt(t) && t.IsSigned()) { + break + } + v.reset(OpLOONG64MOVBload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is8BitInt(t) && !t.IsSigned()) + // result: (MOVBUload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is8BitInt(t) && !t.IsSigned()) { + break + } + v.reset(OpLOONG64MOVBUload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is16BitInt(t) && t.IsSigned()) + // result: (MOVHload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is16BitInt(t) && t.IsSigned()) { + break + } + v.reset(OpLOONG64MOVHload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is16BitInt(t) && !t.IsSigned()) + // result: (MOVHUload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is16BitInt(t) && !t.IsSigned()) { + break + } + v.reset(OpLOONG64MOVHUload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is32BitInt(t) && t.IsSigned()) + // result: (MOVWload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitInt(t) && t.IsSigned()) { + break + } + v.reset(OpLOONG64MOVWload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is32BitInt(t) && !t.IsSigned()) + // result: (MOVWUload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitInt(t) && !t.IsSigned()) { + break + } + v.reset(OpLOONG64MOVWUload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (MOVVload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpLOONG64MOVVload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitFloat(t) + // result: (MOVFload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitFloat(t)) { + break + } + v.reset(OpLOONG64MOVFload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is64BitFloat(t) + // result: (MOVDload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitFloat(t)) { + break + } + v.reset(OpLOONG64MOVDload) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLocalAddr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LocalAddr {sym} base mem) + // cond: t.Elem().HasPointers() + // result: (MOVVaddr {sym} (SPanchored base mem)) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + mem := v_1 + if !(t.Elem().HasPointers()) { + break + } + v.reset(OpLOONG64MOVVaddr) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) + v0.AddArg2(base, mem) + v.AddArg(v0) + return true + } + // match: (LocalAddr {sym} base _) + // cond: !t.Elem().HasPointers() + // result: (MOVVaddr {sym} base) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + if !(!t.Elem().HasPointers()) { + break + } + v.reset(OpLOONG64MOVVaddr) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } + return false +} +func rewriteValueLOONG64_OpLsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x16 x y) + // result: (MASKEQZ (SLLV x (ZeroExt16to64 y)) (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(64) + v2.AddArg2(v3, v1) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValueLOONG64_OpLsh16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x32 x y) + // result: (MASKEQZ (SLLV x (ZeroExt32to64 y)) (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(64) + v2.AddArg2(v3, v1) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValueLOONG64_OpLsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x64 x y) + // result: (MASKEQZ (SLLV x y) (SGTU (MOVVconst [64]) y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v1.AddArg2(v2, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x8 x y) + // result: (MASKEQZ (SLLV x (ZeroExt8to64 y)) (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(64) + v2.AddArg2(v3, v1) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValueLOONG64_OpLsh32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x16 x y) + // result: (MASKEQZ (SLLV x (ZeroExt16to64 y)) (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(64) + v2.AddArg2(v3, v1) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValueLOONG64_OpLsh32x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x32 x y) + // result: (MASKEQZ (SLLV x (ZeroExt32to64 y)) (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(64) + v2.AddArg2(v3, v1) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValueLOONG64_OpLsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x64 x y) + // result: (MASKEQZ (SLLV x y) (SGTU (MOVVconst [64]) y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v1.AddArg2(v2, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLsh32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x8 x y) + // result: (MASKEQZ (SLLV x (ZeroExt8to64 y)) (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(64) + v2.AddArg2(v3, v1) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValueLOONG64_OpLsh64x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x16 x y) + // result: (MASKEQZ (SLLV x (ZeroExt16to64 y)) (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(64) + v2.AddArg2(v3, v1) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValueLOONG64_OpLsh64x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x32 x y) + // result: (MASKEQZ (SLLV x (ZeroExt32to64 y)) (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(64) + v2.AddArg2(v3, v1) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValueLOONG64_OpLsh64x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x64 x y) + // result: (MASKEQZ (SLLV x y) (SGTU (MOVVconst [64]) y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v1.AddArg2(v2, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLsh64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x8 x y) + // result: (MASKEQZ (SLLV x (ZeroExt8to64 y)) (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(64) + v2.AddArg2(v3, v1) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValueLOONG64_OpLsh8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x16 x y) + // result: (MASKEQZ (SLLV x (ZeroExt16to64 y)) (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(64) + v2.AddArg2(v3, v1) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValueLOONG64_OpLsh8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x32 x y) + // result: (MASKEQZ (SLLV x (ZeroExt32to64 y)) (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(64) + v2.AddArg2(v3, v1) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValueLOONG64_OpLsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x64 x y) + // result: (MASKEQZ (SLLV x y) (SGTU (MOVVconst [64]) y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v1.AddArg2(v2, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLsh8x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x8 x y) + // result: (MASKEQZ (SLLV x (ZeroExt8to64 y)) (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(64) + v2.AddArg2(v3, v1) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValueLOONG64_OpMod16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod16 x y) + // result: (REMV (SignExt16to64 x) (SignExt16to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64REMV) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpMod16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod16u x y) + // result: (REMVU (ZeroExt16to64 x) (ZeroExt16to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64REMVU) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpMod32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod32 x y) + // result: (REMV (SignExt32to64 x) (SignExt32to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64REMV) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpMod32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod32u x y) + // result: (REMVU (ZeroExt32to64 x) (ZeroExt32to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64REMVU) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpMod64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Mod64 x y) + // result: (REMV x y) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64REMV) + v.AddArg2(x, y) + return true + } +} +func rewriteValueLOONG64_OpMod8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8 x y) + // result: (REMV (SignExt8to64 x) (SignExt8to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64REMV) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpMod8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8u x y) + // result: (REMVU (ZeroExt8to64 x) (ZeroExt8to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64REMVU) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpMove(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Move [0] _ _ mem) + // result: mem + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_2 + v.copyOf(mem) + return true + } + // match: (Move [1] dst src mem) + // result: (MOVBstore dst (MOVBload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpLOONG64MOVBstore) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [2] {t} dst src mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore dst (MOVHload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpLOONG64MOVHstore) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [2] dst src mem) + // result: (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpLOONG64MOVBstore) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) + v0.AuxInt = int32ToAuxInt(1) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [4] {t} dst src mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore dst (MOVWload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpLOONG64MOVWstore) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [4] {t} dst src mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpLOONG64MOVHstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) + v0.AuxInt = int32ToAuxInt(2) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [4] dst src mem) + // result: (MOVBstore [3] dst (MOVBload [3] src mem) (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem)))) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpLOONG64MOVBstore) + v.AuxInt = int32ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) + v0.AuxInt = int32ToAuxInt(3) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(2) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) + v2.AuxInt = int32ToAuxInt(2) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) + v3.AuxInt = int32ToAuxInt(1) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) + v4.AuxInt = int32ToAuxInt(1) + v4.AddArg2(src, mem) + v5 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) + v6 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) + v6.AddArg2(src, mem) + v5.AddArg3(dst, v6, mem) + v3.AddArg3(dst, v4, v5) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [8] {t} dst src mem) + // cond: t.Alignment()%8 == 0 + // result: (MOVVstore dst (MOVVload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%8 == 0) { + break + } + v.reset(OpLOONG64MOVVstore) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [8] {t} dst src mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpLOONG64MOVWstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [8] {t} dst src mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpLOONG64MOVHstore) + v.AuxInt = int32ToAuxInt(6) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) + v0.AuxInt = int32ToAuxInt(6) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(4) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) + v2.AuxInt = int32ToAuxInt(4) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) + v3.AuxInt = int32ToAuxInt(2) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) + v4.AuxInt = int32ToAuxInt(2) + v4.AddArg2(src, mem) + v5 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) + v6 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) + v6.AddArg2(src, mem) + v5.AddArg3(dst, v6, mem) + v3.AddArg3(dst, v4, v5) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [3] dst src mem) + // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem))) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpLOONG64MOVBstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) + v0.AuxInt = int32ToAuxInt(2) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) + v2.AuxInt = int32ToAuxInt(1) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [6] {t} dst src mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))) + for { + if auxIntToInt64(v.AuxInt) != 6 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpLOONG64MOVHstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(2) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) + v2.AuxInt = int32ToAuxInt(2) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [12] {t} dst src mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))) + for { + if auxIntToInt64(v.AuxInt) != 12 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpLOONG64MOVWstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(4) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32) + v2.AuxInt = int32ToAuxInt(4) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [16] {t} dst src mem) + // cond: t.Alignment()%8 == 0 + // result: (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 16 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%8 == 0) { + break + } + v.reset(OpLOONG64MOVVstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [24] {t} dst src mem) + // cond: t.Alignment()%8 == 0 + // result: (MOVVstore [16] dst (MOVVload [16] src mem) (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem))) + for { + if auxIntToInt64(v.AuxInt) != 24 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%8 == 0) { + break + } + v.reset(OpLOONG64MOVVstore) + v.AuxInt = int32ToAuxInt(16) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(16) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(8) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64) + v2.AuxInt = int32ToAuxInt(8) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [s] {t} dst src mem) + // cond: s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s) + // result: (DUFFCOPY [16 * (128 - s/8)] dst src mem) + for { + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) { + break + } + v.reset(OpLOONG64DUFFCOPY) + v.AuxInt = int64ToAuxInt(16 * (128 - s/8)) + v.AddArg3(dst, src, mem) + return true + } + // match: (Move [s] {t} dst src mem) + // cond: s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0 + // result: (LoweredMove [t.Alignment()] dst src (ADDVconst src [s-moveSize(t.Alignment(), config)]) mem) + for { + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0) { + break + } + v.reset(OpLOONG64LoweredMove) + v.AuxInt = int64ToAuxInt(t.Alignment()) + v0 := b.NewValue0(v.Pos, OpLOONG64ADDVconst, src.Type) + v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config)) + v0.AddArg(src) + v.AddArg4(dst, src, v0, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpNeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq16 x y) + // result: (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0])) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGTU) + v0 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueLOONG64_OpNeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq32 x y) + // result: (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0])) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGTU) + v0 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueLOONG64_OpNeq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq32F x y) + // result: (FPFlagFalse (CMPEQF x y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64FPFlagFalse) + v0 := b.NewValue0(v.Pos, OpLOONG64CMPEQF, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpNeq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq64 x y) + // result: (SGTU (XOR x y) (MOVVconst [0])) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGTU) + v0 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpNeq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq64F x y) + // result: (FPFlagFalse (CMPEQD x y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64FPFlagFalse) + v0 := b.NewValue0(v.Pos, OpLOONG64CMPEQD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpNeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq8 x y) + // result: (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0])) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGTU) + v0 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueLOONG64_OpNeqPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NeqPtr x y) + // result: (SGTU (XOR x y) (MOVVconst [0])) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGTU) + v0 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpNot(v *Value) bool { + v_0 := v.Args[0] + // match: (Not x) + // result: (XORconst [1] x) + for { + x := v_0 + v.reset(OpLOONG64XORconst) + v.AuxInt = int64ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueLOONG64_OpOffPtr(v *Value) bool { + v_0 := v.Args[0] + // match: (OffPtr [off] ptr:(SP)) + // result: (MOVVaddr [int32(off)] ptr) + for { + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + if ptr.Op != OpSP { + break + } + v.reset(OpLOONG64MOVVaddr) + v.AuxInt = int32ToAuxInt(int32(off)) + v.AddArg(ptr) + return true + } + // match: (OffPtr [off] ptr) + // result: (ADDVconst [off] ptr) + for { + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + v.reset(OpLOONG64ADDVconst) + v.AuxInt = int64ToAuxInt(off) + v.AddArg(ptr) + return true + } +} +func rewriteValueLOONG64_OpPanicBounds(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 0 + // result: (LoweredPanicBoundsA [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 0) { + break + } + v.reset(OpLOONG64LoweredPanicBoundsA) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 1 + // result: (LoweredPanicBoundsB [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 1) { + break + } + v.reset(OpLOONG64LoweredPanicBoundsB) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 2 + // result: (LoweredPanicBoundsC [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 2) { + break + } + v.reset(OpLOONG64LoweredPanicBoundsC) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpRotateLeft16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft16 x (MOVVconst [c])) + // result: (Or16 (Lsh16x64 x (MOVVconst [c&15])) (Rsh16Ux64 x (MOVVconst [-c&15]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpOr16) + v0 := b.NewValue0(v.Pos, OpLsh16x64, t) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(c & 15) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(-c & 15) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) + return true + } + return false +} +func rewriteValueLOONG64_OpRotateLeft32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RotateLeft32 x y) + // result: (ROTR x (NEGV y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64ROTR) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueLOONG64_OpRotateLeft64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (RotateLeft64 x y) + // result: (ROTRV x (NEGV y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64ROTRV) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueLOONG64_OpRotateLeft8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft8 x (MOVVconst [c])) + // result: (Or8 (Lsh8x64 x (MOVVconst [c&7])) (Rsh8Ux64 x (MOVVconst [-c&7]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpOr8) + v0 := b.NewValue0(v.Pos, OpLsh8x64, t) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(c & 7) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(-c & 7) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) + return true + } + return false +} +func rewriteValueLOONG64_OpRsh16Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux16 x y) + // result: (MASKEQZ (SRLV (ZeroExt16to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(64) + v3.AddArg2(v4, v2) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueLOONG64_OpRsh16Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux32 x y) + // result: (MASKEQZ (SRLV (ZeroExt16to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(64) + v3.AddArg2(v4, v2) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueLOONG64_OpRsh16Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux64 x y) + // result: (MASKEQZ (SRLV (ZeroExt16to64 x) y) (SGTU (MOVVconst [64]) y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(64) + v2.AddArg2(v3, y) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValueLOONG64_OpRsh16Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux8 x y) + // result: (MASKEQZ (SRLV (ZeroExt16to64 x) (ZeroExt8to64 y)) (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(64) + v3.AddArg2(v4, v2) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueLOONG64_OpRsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x16 x y) + // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpRsh16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x32 x y) + // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpRsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x64 x y) + // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(63) + v3.AddArg2(y, v4) + v2.AddArg(v3) + v1.AddArg2(v2, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpRsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x8 x y) + // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpRsh32Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux16 x y) + // result: (MASKEQZ (SRLV (ZeroExt32to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(64) + v3.AddArg2(v4, v2) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueLOONG64_OpRsh32Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux32 x y) + // result: (MASKEQZ (SRLV (ZeroExt32to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(64) + v3.AddArg2(v4, v2) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueLOONG64_OpRsh32Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux64 x y) + // result: (MASKEQZ (SRLV (ZeroExt32to64 x) y) (SGTU (MOVVconst [64]) y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(64) + v2.AddArg2(v3, y) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValueLOONG64_OpRsh32Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux8 x y) + // result: (MASKEQZ (SRLV (ZeroExt32to64 x) (ZeroExt8to64 y)) (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(64) + v3.AddArg2(v4, v2) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueLOONG64_OpRsh32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x16 x y) + // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpRsh32x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x32 x y) + // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpRsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x64 x y) + // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(63) + v3.AddArg2(y, v4) + v2.AddArg(v3) + v1.AddArg2(v2, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpRsh32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x8 x y) + // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpRsh64Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux16 x y) + // result: (MASKEQZ (SRLV x (ZeroExt16to64 y)) (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(64) + v2.AddArg2(v3, v1) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValueLOONG64_OpRsh64Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux32 x y) + // result: (MASKEQZ (SRLV x (ZeroExt32to64 y)) (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(64) + v2.AddArg2(v3, v1) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValueLOONG64_OpRsh64Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux64 x y) + // result: (MASKEQZ (SRLV x y) (SGTU (MOVVconst [64]) y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v1.AddArg2(v2, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpRsh64Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux8 x y) + // result: (MASKEQZ (SRLV x (ZeroExt8to64 y)) (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(64) + v2.AddArg2(v3, v1) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValueLOONG64_OpRsh64x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x16 x y) + // result: (SRAV x (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v1 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(63) + v2.AddArg2(v3, v4) + v1.AddArg(v2) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueLOONG64_OpRsh64x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x32 x y) + // result: (SRAV x (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v1 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(63) + v2.AddArg2(v3, v4) + v1.AddArg(v2) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueLOONG64_OpRsh64x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x64 x y) + // result: (SRAV x (OR (NEGV (SGTU y (MOVVconst [63]))) y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v1 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(63) + v2.AddArg2(y, v3) + v1.AddArg(v2) + v0.AddArg2(v1, y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueLOONG64_OpRsh64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x8 x y) + // result: (SRAV x (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v1 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(63) + v2.AddArg2(v3, v4) + v1.AddArg(v2) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueLOONG64_OpRsh8Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux16 x y) + // result: (MASKEQZ (SRLV (ZeroExt8to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(64) + v3.AddArg2(v4, v2) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueLOONG64_OpRsh8Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux32 x y) + // result: (MASKEQZ (SRLV (ZeroExt8to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(64) + v3.AddArg2(v4, v2) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueLOONG64_OpRsh8Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux64 x y) + // result: (MASKEQZ (SRLV (ZeroExt8to64 x) y) (SGTU (MOVVconst [64]) y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(64) + v2.AddArg2(v3, y) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValueLOONG64_OpRsh8Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux8 x y) + // result: (MASKEQZ (SRLV (ZeroExt8to64 x) (ZeroExt8to64 y)) (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64MASKEQZ) + v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(64) + v3.AddArg2(v4, v2) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueLOONG64_OpRsh8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x16 x y) + // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpRsh8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x32 x y) + // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpRsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x64 x y) + // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(63) + v3.AddArg2(y, v4) + v2.AddArg(v3) + v1.AddArg2(v2, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpRsh8x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x8 x y) + // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpSelect0(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Select0 (Mul64uhilo x y)) + // result: (MULHVU x y) + for { + if v_0.Op != OpMul64uhilo { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpLOONG64MULHVU) + v.AddArg2(x, y) + return true + } + // match: (Select0 (Mul64uover x y)) + // result: (MULV x y) + for { + if v_0.Op != OpMul64uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpLOONG64MULV) + v.AddArg2(x, y) + return true + } + // match: (Select0 (Add64carry x y c)) + // result: (ADDV (ADDV x y) c) + for { + t := v.Type + if v_0.Op != OpAdd64carry { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpLOONG64ADDV) + v0 := b.NewValue0(v.Pos, OpLOONG64ADDV, t) + v0.AddArg2(x, y) + v.AddArg2(v0, c) + return true + } + // match: (Select0 (Sub64borrow x y c)) + // result: (SUBV (SUBV x y) c) + for { + t := v.Type + if v_0.Op != OpSub64borrow { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpLOONG64SUBV) + v0 := b.NewValue0(v.Pos, OpLOONG64SUBV, t) + v0.AddArg2(x, y) + v.AddArg2(v0, c) + return true + } + return false +} +func rewriteValueLOONG64_OpSelect1(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Select1 (Mul64uhilo x y)) + // result: (MULV x y) + for { + if v_0.Op != OpMul64uhilo { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpLOONG64MULV) + v.AddArg2(x, y) + return true + } + // match: (Select1 (Mul64uover x y)) + // result: (SGTU (MULHVU x y) (MOVVconst [0])) + for { + if v_0.Op != OpMul64uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpLOONG64SGTU) + v.Type = typ.Bool + v0 := b.NewValue0(v.Pos, OpLOONG64MULHVU, typ.UInt64) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, v1) + return true + } + // match: (Select1 (Add64carry x y c)) + // result: (OR (SGTU x s:(ADDV x y)) (SGTU s (ADDV s c))) + for { + t := v.Type + if v_0.Op != OpAdd64carry { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpLOONG64OR) + v0 := b.NewValue0(v.Pos, OpLOONG64SGTU, t) + s := b.NewValue0(v.Pos, OpLOONG64ADDV, t) + s.AddArg2(x, y) + v0.AddArg2(x, s) + v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, t) + v3 := b.NewValue0(v.Pos, OpLOONG64ADDV, t) + v3.AddArg2(s, c) + v2.AddArg2(s, v3) + v.AddArg2(v0, v2) + return true + } + // match: (Select1 (Sub64borrow x y c)) + // result: (OR (SGTU s:(SUBV x y) x) (SGTU (SUBV s c) s)) + for { + t := v.Type + if v_0.Op != OpSub64borrow { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpLOONG64OR) + v0 := b.NewValue0(v.Pos, OpLOONG64SGTU, t) + s := b.NewValue0(v.Pos, OpLOONG64SUBV, t) + s.AddArg2(x, y) + v0.AddArg2(s, x) + v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, t) + v3 := b.NewValue0(v.Pos, OpLOONG64SUBV, t) + v3.AddArg2(s, c) + v2.AddArg2(v3, s) + v.AddArg2(v0, v2) + return true + } + return false +} +func rewriteValueLOONG64_OpSlicemask(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Slicemask x) + // result: (SRAVconst (NEGV x) [63]) + for { + t := v.Type + x := v_0 + v.reset(OpLOONG64SRAVconst) + v.AuxInt = int64ToAuxInt(63) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpStore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Store {t} ptr val mem) + // cond: t.Size() == 1 + // result: (MOVBstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 1) { + break + } + v.reset(OpLOONG64MOVBstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 2 + // result: (MOVHstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 2) { + break + } + v.reset(OpLOONG64MOVHstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 && !t.IsFloat() + // result: (MOVWstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4 && !t.IsFloat()) { + break + } + v.reset(OpLOONG64MOVWstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 8 && !t.IsFloat() + // result: (MOVVstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 8 && !t.IsFloat()) { + break + } + v.reset(OpLOONG64MOVVstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 && t.IsFloat() + // result: (MOVFstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4 && t.IsFloat()) { + break + } + v.reset(OpLOONG64MOVFstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 8 && t.IsFloat() + // result: (MOVDstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 8 && t.IsFloat()) { + break + } + v.reset(OpLOONG64MOVDstore) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpZero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Zero [0] _ mem) + // result: mem + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_1 + v.copyOf(mem) + return true + } + // match: (Zero [1] ptr mem) + // result: (MOVBstore ptr (MOVVconst [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpLOONG64MOVBstore) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (Zero [2] {t} ptr mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore ptr (MOVVconst [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpLOONG64MOVHstore) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (Zero [2] ptr mem) + // result: (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpLOONG64MOVBstore) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(0) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [4] {t} ptr mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore ptr (MOVVconst [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpLOONG64MOVWstore) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (Zero [4] {t} ptr mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpLOONG64MOVHstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(0) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [4] ptr mem) + // result: (MOVBstore [3] ptr (MOVVconst [0]) (MOVBstore [2] ptr (MOVVconst [0]) (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem)))) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpLOONG64MOVBstore) + v.AuxInt = int32ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(2) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(1) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) + v3.AuxInt = int32ToAuxInt(0) + v3.AddArg3(ptr, v0, mem) + v2.AddArg3(ptr, v0, v3) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [8] {t} ptr mem) + // cond: t.Alignment()%8 == 0 + // result: (MOVVstore ptr (MOVVconst [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%8 == 0) { + break + } + v.reset(OpLOONG64MOVVstore) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (Zero [8] {t} ptr mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpLOONG64MOVWstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(0) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [8] {t} ptr mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [6] ptr (MOVVconst [0]) (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem)))) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpLOONG64MOVHstore) + v.AuxInt = int32ToAuxInt(6) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(4) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(2) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) + v3.AuxInt = int32ToAuxInt(0) + v3.AddArg3(ptr, v0, mem) + v2.AddArg3(ptr, v0, v3) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [3] ptr mem) + // result: (MOVBstore [2] ptr (MOVVconst [0]) (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem))) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpLOONG64MOVBstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(0) + v2.AddArg3(ptr, v0, mem) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [6] {t} ptr mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem))) + for { + if auxIntToInt64(v.AuxInt) != 6 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpLOONG64MOVHstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(2) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(0) + v2.AddArg3(ptr, v0, mem) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [12] {t} ptr mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore [8] ptr (MOVVconst [0]) (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem))) + for { + if auxIntToInt64(v.AuxInt) != 12 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpLOONG64MOVWstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(4) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(0) + v2.AddArg3(ptr, v0, mem) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [16] {t} ptr mem) + // cond: t.Alignment()%8 == 0 + // result: (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 16 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%8 == 0) { + break + } + v.reset(OpLOONG64MOVVstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(0) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [24] {t} ptr mem) + // cond: t.Alignment()%8 == 0 + // result: (MOVVstore [16] ptr (MOVVconst [0]) (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem))) + for { + if auxIntToInt64(v.AuxInt) != 24 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%8 == 0) { + break + } + v.reset(OpLOONG64MOVVstore) + v.AuxInt = int32ToAuxInt(16) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(8) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(0) + v2.AddArg3(ptr, v0, mem) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [s] {t} ptr mem) + // cond: s%8 == 0 && s > 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice + // result: (DUFFZERO [8 * (128 - s/8)] ptr mem) + for { + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(s%8 == 0 && s > 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice) { + break + } + v.reset(OpLOONG64DUFFZERO) + v.AuxInt = int64ToAuxInt(8 * (128 - s/8)) + v.AddArg2(ptr, mem) + return true + } + // match: (Zero [s] {t} ptr mem) + // cond: (s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0 + // result: (LoweredZero [t.Alignment()] ptr (ADDVconst ptr [s-moveSize(t.Alignment(), config)]) mem) + for { + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !((s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0) { + break + } + v.reset(OpLOONG64LoweredZero) + v.AuxInt = int64ToAuxInt(t.Alignment()) + v0 := b.NewValue0(v.Pos, OpLOONG64ADDVconst, ptr.Type) + v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config)) + v0.AddArg(ptr) + v.AddArg3(ptr, v0, mem) + return true + } + return false +} +func rewriteBlockLOONG64(b *Block) bool { + typ := &b.Func.Config.Types + switch b.Kind { + case BlockLOONG64EQ: + // match: (EQ (FPFlagTrue cmp) yes no) + // result: (FPF cmp yes no) + for b.Controls[0].Op == OpLOONG64FPFlagTrue { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockLOONG64FPF, cmp) + return true + } + // match: (EQ (FPFlagFalse cmp) yes no) + // result: (FPT cmp yes no) + for b.Controls[0].Op == OpLOONG64FPFlagFalse { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockLOONG64FPT, cmp) + return true + } + // match: (EQ (XORconst [1] cmp:(SGT _ _)) yes no) + // result: (NE cmp yes no) + for b.Controls[0].Op == OpLOONG64XORconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpLOONG64SGT { + break + } + b.resetWithControl(BlockLOONG64NE, cmp) + return true + } + // match: (EQ (XORconst [1] cmp:(SGTU _ _)) yes no) + // result: (NE cmp yes no) + for b.Controls[0].Op == OpLOONG64XORconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpLOONG64SGTU { + break + } + b.resetWithControl(BlockLOONG64NE, cmp) + return true + } + // match: (EQ (XORconst [1] cmp:(SGTconst _)) yes no) + // result: (NE cmp yes no) + for b.Controls[0].Op == OpLOONG64XORconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpLOONG64SGTconst { + break + } + b.resetWithControl(BlockLOONG64NE, cmp) + return true + } + // match: (EQ (XORconst [1] cmp:(SGTUconst _)) yes no) + // result: (NE cmp yes no) + for b.Controls[0].Op == OpLOONG64XORconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpLOONG64SGTUconst { + break + } + b.resetWithControl(BlockLOONG64NE, cmp) + return true + } + // match: (EQ (SGTUconst [1] x) yes no) + // result: (NE x yes no) + for b.Controls[0].Op == OpLOONG64SGTUconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + x := v_0.Args[0] + b.resetWithControl(BlockLOONG64NE, x) + return true + } + // match: (EQ (SGTU x (MOVVconst [0])) yes no) + // result: (EQ x yes no) + for b.Controls[0].Op == OpLOONG64SGTU { + v_0 := b.Controls[0] + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { + break + } + b.resetWithControl(BlockLOONG64EQ, x) + return true + } + // match: (EQ (SGTconst [0] x) yes no) + // result: (GEZ x yes no) + for b.Controls[0].Op == OpLOONG64SGTconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + b.resetWithControl(BlockLOONG64GEZ, x) + return true + } + // match: (EQ (SGT x (MOVVconst [0])) yes no) + // result: (LEZ x yes no) + for b.Controls[0].Op == OpLOONG64SGT { + v_0 := b.Controls[0] + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { + break + } + b.resetWithControl(BlockLOONG64LEZ, x) + return true + } + // match: (EQ (MOVVconst [0]) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpLOONG64MOVVconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + b.Reset(BlockFirst) + return true + } + // match: (EQ (MOVVconst [c]) yes no) + // cond: c != 0 + // result: (First no yes) + for b.Controls[0].Op == OpLOONG64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c != 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockLOONG64GEZ: + // match: (GEZ (MOVVconst [c]) yes no) + // cond: c >= 0 + // result: (First yes no) + for b.Controls[0].Op == OpLOONG64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c >= 0) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (GEZ (MOVVconst [c]) yes no) + // cond: c < 0 + // result: (First no yes) + for b.Controls[0].Op == OpLOONG64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c < 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockLOONG64GTZ: + // match: (GTZ (MOVVconst [c]) yes no) + // cond: c > 0 + // result: (First yes no) + for b.Controls[0].Op == OpLOONG64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c > 0) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (GTZ (MOVVconst [c]) yes no) + // cond: c <= 0 + // result: (First no yes) + for b.Controls[0].Op == OpLOONG64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c <= 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockIf: + // match: (If cond yes no) + // result: (NE (MOVBUreg cond) yes no) + for { + cond := b.Controls[0] + v0 := b.NewValue0(cond.Pos, OpLOONG64MOVBUreg, typ.UInt64) + v0.AddArg(cond) + b.resetWithControl(BlockLOONG64NE, v0) + return true + } + case BlockLOONG64LEZ: + // match: (LEZ (MOVVconst [c]) yes no) + // cond: c <= 0 + // result: (First yes no) + for b.Controls[0].Op == OpLOONG64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c <= 0) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (LEZ (MOVVconst [c]) yes no) + // cond: c > 0 + // result: (First no yes) + for b.Controls[0].Op == OpLOONG64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c > 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockLOONG64LTZ: + // match: (LTZ (MOVVconst [c]) yes no) + // cond: c < 0 + // result: (First yes no) + for b.Controls[0].Op == OpLOONG64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c < 0) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (LTZ (MOVVconst [c]) yes no) + // cond: c >= 0 + // result: (First no yes) + for b.Controls[0].Op == OpLOONG64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c >= 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockLOONG64NE: + // match: (NE (FPFlagTrue cmp) yes no) + // result: (FPT cmp yes no) + for b.Controls[0].Op == OpLOONG64FPFlagTrue { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockLOONG64FPT, cmp) + return true + } + // match: (NE (FPFlagFalse cmp) yes no) + // result: (FPF cmp yes no) + for b.Controls[0].Op == OpLOONG64FPFlagFalse { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockLOONG64FPF, cmp) + return true + } + // match: (NE (XORconst [1] cmp:(SGT _ _)) yes no) + // result: (EQ cmp yes no) + for b.Controls[0].Op == OpLOONG64XORconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpLOONG64SGT { + break + } + b.resetWithControl(BlockLOONG64EQ, cmp) + return true + } + // match: (NE (XORconst [1] cmp:(SGTU _ _)) yes no) + // result: (EQ cmp yes no) + for b.Controls[0].Op == OpLOONG64XORconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpLOONG64SGTU { + break + } + b.resetWithControl(BlockLOONG64EQ, cmp) + return true + } + // match: (NE (XORconst [1] cmp:(SGTconst _)) yes no) + // result: (EQ cmp yes no) + for b.Controls[0].Op == OpLOONG64XORconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpLOONG64SGTconst { + break + } + b.resetWithControl(BlockLOONG64EQ, cmp) + return true + } + // match: (NE (XORconst [1] cmp:(SGTUconst _)) yes no) + // result: (EQ cmp yes no) + for b.Controls[0].Op == OpLOONG64XORconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpLOONG64SGTUconst { + break + } + b.resetWithControl(BlockLOONG64EQ, cmp) + return true + } + // match: (NE (SGTUconst [1] x) yes no) + // result: (EQ x yes no) + for b.Controls[0].Op == OpLOONG64SGTUconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + x := v_0.Args[0] + b.resetWithControl(BlockLOONG64EQ, x) + return true + } + // match: (NE (SGTU x (MOVVconst [0])) yes no) + // result: (NE x yes no) + for b.Controls[0].Op == OpLOONG64SGTU { + v_0 := b.Controls[0] + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { + break + } + b.resetWithControl(BlockLOONG64NE, x) + return true + } + // match: (NE (SGTconst [0] x) yes no) + // result: (LTZ x yes no) + for b.Controls[0].Op == OpLOONG64SGTconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + b.resetWithControl(BlockLOONG64LTZ, x) + return true + } + // match: (NE (SGT x (MOVVconst [0])) yes no) + // result: (GTZ x yes no) + for b.Controls[0].Op == OpLOONG64SGT { + v_0 := b.Controls[0] + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { + break + } + b.resetWithControl(BlockLOONG64GTZ, x) + return true + } + // match: (NE (MOVVconst [0]) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpLOONG64MOVVconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (NE (MOVVconst [c]) yes no) + // cond: c != 0 + // result: (First yes no) + for b.Controls[0].Op == OpLOONG64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c != 0) { + break + } + b.Reset(BlockFirst) + return true + } + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteMIPS.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteMIPS.go new file mode 100644 index 0000000000000000000000000000000000000000..6a259f5a475172334fbbb7b94f6130f607f8ee46 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteMIPS.go @@ -0,0 +1,7660 @@ +// Code generated from _gen/MIPS.rules using 'go generate'; DO NOT EDIT. + +package ssa + +import "cmd/compile/internal/types" + +func rewriteValueMIPS(v *Value) bool { + switch v.Op { + case OpAbs: + v.Op = OpMIPSABSD + return true + case OpAdd16: + v.Op = OpMIPSADD + return true + case OpAdd32: + v.Op = OpMIPSADD + return true + case OpAdd32F: + v.Op = OpMIPSADDF + return true + case OpAdd32withcarry: + return rewriteValueMIPS_OpAdd32withcarry(v) + case OpAdd64F: + v.Op = OpMIPSADDD + return true + case OpAdd8: + v.Op = OpMIPSADD + return true + case OpAddPtr: + v.Op = OpMIPSADD + return true + case OpAddr: + return rewriteValueMIPS_OpAddr(v) + case OpAnd16: + v.Op = OpMIPSAND + return true + case OpAnd32: + v.Op = OpMIPSAND + return true + case OpAnd8: + v.Op = OpMIPSAND + return true + case OpAndB: + v.Op = OpMIPSAND + return true + case OpAtomicAdd32: + v.Op = OpMIPSLoweredAtomicAdd + return true + case OpAtomicAnd32: + v.Op = OpMIPSLoweredAtomicAnd + return true + case OpAtomicAnd8: + return rewriteValueMIPS_OpAtomicAnd8(v) + case OpAtomicCompareAndSwap32: + v.Op = OpMIPSLoweredAtomicCas + return true + case OpAtomicExchange32: + v.Op = OpMIPSLoweredAtomicExchange + return true + case OpAtomicLoad32: + v.Op = OpMIPSLoweredAtomicLoad32 + return true + case OpAtomicLoad8: + v.Op = OpMIPSLoweredAtomicLoad8 + return true + case OpAtomicLoadPtr: + v.Op = OpMIPSLoweredAtomicLoad32 + return true + case OpAtomicOr32: + v.Op = OpMIPSLoweredAtomicOr + return true + case OpAtomicOr8: + return rewriteValueMIPS_OpAtomicOr8(v) + case OpAtomicStore32: + v.Op = OpMIPSLoweredAtomicStore32 + return true + case OpAtomicStore8: + v.Op = OpMIPSLoweredAtomicStore8 + return true + case OpAtomicStorePtrNoWB: + v.Op = OpMIPSLoweredAtomicStore32 + return true + case OpAvg32u: + return rewriteValueMIPS_OpAvg32u(v) + case OpBitLen32: + return rewriteValueMIPS_OpBitLen32(v) + case OpClosureCall: + v.Op = OpMIPSCALLclosure + return true + case OpCom16: + return rewriteValueMIPS_OpCom16(v) + case OpCom32: + return rewriteValueMIPS_OpCom32(v) + case OpCom8: + return rewriteValueMIPS_OpCom8(v) + case OpConst16: + return rewriteValueMIPS_OpConst16(v) + case OpConst32: + return rewriteValueMIPS_OpConst32(v) + case OpConst32F: + v.Op = OpMIPSMOVFconst + return true + case OpConst64F: + v.Op = OpMIPSMOVDconst + return true + case OpConst8: + return rewriteValueMIPS_OpConst8(v) + case OpConstBool: + return rewriteValueMIPS_OpConstBool(v) + case OpConstNil: + return rewriteValueMIPS_OpConstNil(v) + case OpCtz32: + return rewriteValueMIPS_OpCtz32(v) + case OpCtz32NonZero: + v.Op = OpCtz32 + return true + case OpCvt32Fto32: + v.Op = OpMIPSTRUNCFW + return true + case OpCvt32Fto64F: + v.Op = OpMIPSMOVFD + return true + case OpCvt32to32F: + v.Op = OpMIPSMOVWF + return true + case OpCvt32to64F: + v.Op = OpMIPSMOVWD + return true + case OpCvt64Fto32: + v.Op = OpMIPSTRUNCDW + return true + case OpCvt64Fto32F: + v.Op = OpMIPSMOVDF + return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true + case OpDiv16: + return rewriteValueMIPS_OpDiv16(v) + case OpDiv16u: + return rewriteValueMIPS_OpDiv16u(v) + case OpDiv32: + return rewriteValueMIPS_OpDiv32(v) + case OpDiv32F: + v.Op = OpMIPSDIVF + return true + case OpDiv32u: + return rewriteValueMIPS_OpDiv32u(v) + case OpDiv64F: + v.Op = OpMIPSDIVD + return true + case OpDiv8: + return rewriteValueMIPS_OpDiv8(v) + case OpDiv8u: + return rewriteValueMIPS_OpDiv8u(v) + case OpEq16: + return rewriteValueMIPS_OpEq16(v) + case OpEq32: + return rewriteValueMIPS_OpEq32(v) + case OpEq32F: + return rewriteValueMIPS_OpEq32F(v) + case OpEq64F: + return rewriteValueMIPS_OpEq64F(v) + case OpEq8: + return rewriteValueMIPS_OpEq8(v) + case OpEqB: + return rewriteValueMIPS_OpEqB(v) + case OpEqPtr: + return rewriteValueMIPS_OpEqPtr(v) + case OpGetCallerPC: + v.Op = OpMIPSLoweredGetCallerPC + return true + case OpGetCallerSP: + v.Op = OpMIPSLoweredGetCallerSP + return true + case OpGetClosurePtr: + v.Op = OpMIPSLoweredGetClosurePtr + return true + case OpHmul32: + return rewriteValueMIPS_OpHmul32(v) + case OpHmul32u: + return rewriteValueMIPS_OpHmul32u(v) + case OpInterCall: + v.Op = OpMIPSCALLinter + return true + case OpIsInBounds: + return rewriteValueMIPS_OpIsInBounds(v) + case OpIsNonNil: + return rewriteValueMIPS_OpIsNonNil(v) + case OpIsSliceInBounds: + return rewriteValueMIPS_OpIsSliceInBounds(v) + case OpLeq16: + return rewriteValueMIPS_OpLeq16(v) + case OpLeq16U: + return rewriteValueMIPS_OpLeq16U(v) + case OpLeq32: + return rewriteValueMIPS_OpLeq32(v) + case OpLeq32F: + return rewriteValueMIPS_OpLeq32F(v) + case OpLeq32U: + return rewriteValueMIPS_OpLeq32U(v) + case OpLeq64F: + return rewriteValueMIPS_OpLeq64F(v) + case OpLeq8: + return rewriteValueMIPS_OpLeq8(v) + case OpLeq8U: + return rewriteValueMIPS_OpLeq8U(v) + case OpLess16: + return rewriteValueMIPS_OpLess16(v) + case OpLess16U: + return rewriteValueMIPS_OpLess16U(v) + case OpLess32: + return rewriteValueMIPS_OpLess32(v) + case OpLess32F: + return rewriteValueMIPS_OpLess32F(v) + case OpLess32U: + return rewriteValueMIPS_OpLess32U(v) + case OpLess64F: + return rewriteValueMIPS_OpLess64F(v) + case OpLess8: + return rewriteValueMIPS_OpLess8(v) + case OpLess8U: + return rewriteValueMIPS_OpLess8U(v) + case OpLoad: + return rewriteValueMIPS_OpLoad(v) + case OpLocalAddr: + return rewriteValueMIPS_OpLocalAddr(v) + case OpLsh16x16: + return rewriteValueMIPS_OpLsh16x16(v) + case OpLsh16x32: + return rewriteValueMIPS_OpLsh16x32(v) + case OpLsh16x64: + return rewriteValueMIPS_OpLsh16x64(v) + case OpLsh16x8: + return rewriteValueMIPS_OpLsh16x8(v) + case OpLsh32x16: + return rewriteValueMIPS_OpLsh32x16(v) + case OpLsh32x32: + return rewriteValueMIPS_OpLsh32x32(v) + case OpLsh32x64: + return rewriteValueMIPS_OpLsh32x64(v) + case OpLsh32x8: + return rewriteValueMIPS_OpLsh32x8(v) + case OpLsh8x16: + return rewriteValueMIPS_OpLsh8x16(v) + case OpLsh8x32: + return rewriteValueMIPS_OpLsh8x32(v) + case OpLsh8x64: + return rewriteValueMIPS_OpLsh8x64(v) + case OpLsh8x8: + return rewriteValueMIPS_OpLsh8x8(v) + case OpMIPSADD: + return rewriteValueMIPS_OpMIPSADD(v) + case OpMIPSADDconst: + return rewriteValueMIPS_OpMIPSADDconst(v) + case OpMIPSAND: + return rewriteValueMIPS_OpMIPSAND(v) + case OpMIPSANDconst: + return rewriteValueMIPS_OpMIPSANDconst(v) + case OpMIPSCMOVZ: + return rewriteValueMIPS_OpMIPSCMOVZ(v) + case OpMIPSCMOVZzero: + return rewriteValueMIPS_OpMIPSCMOVZzero(v) + case OpMIPSLoweredAtomicAdd: + return rewriteValueMIPS_OpMIPSLoweredAtomicAdd(v) + case OpMIPSLoweredAtomicStore32: + return rewriteValueMIPS_OpMIPSLoweredAtomicStore32(v) + case OpMIPSMOVBUload: + return rewriteValueMIPS_OpMIPSMOVBUload(v) + case OpMIPSMOVBUreg: + return rewriteValueMIPS_OpMIPSMOVBUreg(v) + case OpMIPSMOVBload: + return rewriteValueMIPS_OpMIPSMOVBload(v) + case OpMIPSMOVBreg: + return rewriteValueMIPS_OpMIPSMOVBreg(v) + case OpMIPSMOVBstore: + return rewriteValueMIPS_OpMIPSMOVBstore(v) + case OpMIPSMOVBstorezero: + return rewriteValueMIPS_OpMIPSMOVBstorezero(v) + case OpMIPSMOVDload: + return rewriteValueMIPS_OpMIPSMOVDload(v) + case OpMIPSMOVDstore: + return rewriteValueMIPS_OpMIPSMOVDstore(v) + case OpMIPSMOVFload: + return rewriteValueMIPS_OpMIPSMOVFload(v) + case OpMIPSMOVFstore: + return rewriteValueMIPS_OpMIPSMOVFstore(v) + case OpMIPSMOVHUload: + return rewriteValueMIPS_OpMIPSMOVHUload(v) + case OpMIPSMOVHUreg: + return rewriteValueMIPS_OpMIPSMOVHUreg(v) + case OpMIPSMOVHload: + return rewriteValueMIPS_OpMIPSMOVHload(v) + case OpMIPSMOVHreg: + return rewriteValueMIPS_OpMIPSMOVHreg(v) + case OpMIPSMOVHstore: + return rewriteValueMIPS_OpMIPSMOVHstore(v) + case OpMIPSMOVHstorezero: + return rewriteValueMIPS_OpMIPSMOVHstorezero(v) + case OpMIPSMOVWload: + return rewriteValueMIPS_OpMIPSMOVWload(v) + case OpMIPSMOVWnop: + return rewriteValueMIPS_OpMIPSMOVWnop(v) + case OpMIPSMOVWreg: + return rewriteValueMIPS_OpMIPSMOVWreg(v) + case OpMIPSMOVWstore: + return rewriteValueMIPS_OpMIPSMOVWstore(v) + case OpMIPSMOVWstorezero: + return rewriteValueMIPS_OpMIPSMOVWstorezero(v) + case OpMIPSMUL: + return rewriteValueMIPS_OpMIPSMUL(v) + case OpMIPSNEG: + return rewriteValueMIPS_OpMIPSNEG(v) + case OpMIPSNOR: + return rewriteValueMIPS_OpMIPSNOR(v) + case OpMIPSNORconst: + return rewriteValueMIPS_OpMIPSNORconst(v) + case OpMIPSOR: + return rewriteValueMIPS_OpMIPSOR(v) + case OpMIPSORconst: + return rewriteValueMIPS_OpMIPSORconst(v) + case OpMIPSSGT: + return rewriteValueMIPS_OpMIPSSGT(v) + case OpMIPSSGTU: + return rewriteValueMIPS_OpMIPSSGTU(v) + case OpMIPSSGTUconst: + return rewriteValueMIPS_OpMIPSSGTUconst(v) + case OpMIPSSGTUzero: + return rewriteValueMIPS_OpMIPSSGTUzero(v) + case OpMIPSSGTconst: + return rewriteValueMIPS_OpMIPSSGTconst(v) + case OpMIPSSGTzero: + return rewriteValueMIPS_OpMIPSSGTzero(v) + case OpMIPSSLL: + return rewriteValueMIPS_OpMIPSSLL(v) + case OpMIPSSLLconst: + return rewriteValueMIPS_OpMIPSSLLconst(v) + case OpMIPSSRA: + return rewriteValueMIPS_OpMIPSSRA(v) + case OpMIPSSRAconst: + return rewriteValueMIPS_OpMIPSSRAconst(v) + case OpMIPSSRL: + return rewriteValueMIPS_OpMIPSSRL(v) + case OpMIPSSRLconst: + return rewriteValueMIPS_OpMIPSSRLconst(v) + case OpMIPSSUB: + return rewriteValueMIPS_OpMIPSSUB(v) + case OpMIPSSUBconst: + return rewriteValueMIPS_OpMIPSSUBconst(v) + case OpMIPSXOR: + return rewriteValueMIPS_OpMIPSXOR(v) + case OpMIPSXORconst: + return rewriteValueMIPS_OpMIPSXORconst(v) + case OpMod16: + return rewriteValueMIPS_OpMod16(v) + case OpMod16u: + return rewriteValueMIPS_OpMod16u(v) + case OpMod32: + return rewriteValueMIPS_OpMod32(v) + case OpMod32u: + return rewriteValueMIPS_OpMod32u(v) + case OpMod8: + return rewriteValueMIPS_OpMod8(v) + case OpMod8u: + return rewriteValueMIPS_OpMod8u(v) + case OpMove: + return rewriteValueMIPS_OpMove(v) + case OpMul16: + v.Op = OpMIPSMUL + return true + case OpMul32: + v.Op = OpMIPSMUL + return true + case OpMul32F: + v.Op = OpMIPSMULF + return true + case OpMul32uhilo: + v.Op = OpMIPSMULTU + return true + case OpMul64F: + v.Op = OpMIPSMULD + return true + case OpMul8: + v.Op = OpMIPSMUL + return true + case OpNeg16: + v.Op = OpMIPSNEG + return true + case OpNeg32: + v.Op = OpMIPSNEG + return true + case OpNeg32F: + v.Op = OpMIPSNEGF + return true + case OpNeg64F: + v.Op = OpMIPSNEGD + return true + case OpNeg8: + v.Op = OpMIPSNEG + return true + case OpNeq16: + return rewriteValueMIPS_OpNeq16(v) + case OpNeq32: + return rewriteValueMIPS_OpNeq32(v) + case OpNeq32F: + return rewriteValueMIPS_OpNeq32F(v) + case OpNeq64F: + return rewriteValueMIPS_OpNeq64F(v) + case OpNeq8: + return rewriteValueMIPS_OpNeq8(v) + case OpNeqB: + v.Op = OpMIPSXOR + return true + case OpNeqPtr: + return rewriteValueMIPS_OpNeqPtr(v) + case OpNilCheck: + v.Op = OpMIPSLoweredNilCheck + return true + case OpNot: + return rewriteValueMIPS_OpNot(v) + case OpOffPtr: + return rewriteValueMIPS_OpOffPtr(v) + case OpOr16: + v.Op = OpMIPSOR + return true + case OpOr32: + v.Op = OpMIPSOR + return true + case OpOr8: + v.Op = OpMIPSOR + return true + case OpOrB: + v.Op = OpMIPSOR + return true + case OpPanicBounds: + return rewriteValueMIPS_OpPanicBounds(v) + case OpPanicExtend: + return rewriteValueMIPS_OpPanicExtend(v) + case OpRotateLeft16: + return rewriteValueMIPS_OpRotateLeft16(v) + case OpRotateLeft32: + return rewriteValueMIPS_OpRotateLeft32(v) + case OpRotateLeft64: + return rewriteValueMIPS_OpRotateLeft64(v) + case OpRotateLeft8: + return rewriteValueMIPS_OpRotateLeft8(v) + case OpRound32F: + v.Op = OpCopy + return true + case OpRound64F: + v.Op = OpCopy + return true + case OpRsh16Ux16: + return rewriteValueMIPS_OpRsh16Ux16(v) + case OpRsh16Ux32: + return rewriteValueMIPS_OpRsh16Ux32(v) + case OpRsh16Ux64: + return rewriteValueMIPS_OpRsh16Ux64(v) + case OpRsh16Ux8: + return rewriteValueMIPS_OpRsh16Ux8(v) + case OpRsh16x16: + return rewriteValueMIPS_OpRsh16x16(v) + case OpRsh16x32: + return rewriteValueMIPS_OpRsh16x32(v) + case OpRsh16x64: + return rewriteValueMIPS_OpRsh16x64(v) + case OpRsh16x8: + return rewriteValueMIPS_OpRsh16x8(v) + case OpRsh32Ux16: + return rewriteValueMIPS_OpRsh32Ux16(v) + case OpRsh32Ux32: + return rewriteValueMIPS_OpRsh32Ux32(v) + case OpRsh32Ux64: + return rewriteValueMIPS_OpRsh32Ux64(v) + case OpRsh32Ux8: + return rewriteValueMIPS_OpRsh32Ux8(v) + case OpRsh32x16: + return rewriteValueMIPS_OpRsh32x16(v) + case OpRsh32x32: + return rewriteValueMIPS_OpRsh32x32(v) + case OpRsh32x64: + return rewriteValueMIPS_OpRsh32x64(v) + case OpRsh32x8: + return rewriteValueMIPS_OpRsh32x8(v) + case OpRsh8Ux16: + return rewriteValueMIPS_OpRsh8Ux16(v) + case OpRsh8Ux32: + return rewriteValueMIPS_OpRsh8Ux32(v) + case OpRsh8Ux64: + return rewriteValueMIPS_OpRsh8Ux64(v) + case OpRsh8Ux8: + return rewriteValueMIPS_OpRsh8Ux8(v) + case OpRsh8x16: + return rewriteValueMIPS_OpRsh8x16(v) + case OpRsh8x32: + return rewriteValueMIPS_OpRsh8x32(v) + case OpRsh8x64: + return rewriteValueMIPS_OpRsh8x64(v) + case OpRsh8x8: + return rewriteValueMIPS_OpRsh8x8(v) + case OpSelect0: + return rewriteValueMIPS_OpSelect0(v) + case OpSelect1: + return rewriteValueMIPS_OpSelect1(v) + case OpSignExt16to32: + v.Op = OpMIPSMOVHreg + return true + case OpSignExt8to16: + v.Op = OpMIPSMOVBreg + return true + case OpSignExt8to32: + v.Op = OpMIPSMOVBreg + return true + case OpSignmask: + return rewriteValueMIPS_OpSignmask(v) + case OpSlicemask: + return rewriteValueMIPS_OpSlicemask(v) + case OpSqrt: + v.Op = OpMIPSSQRTD + return true + case OpSqrt32: + v.Op = OpMIPSSQRTF + return true + case OpStaticCall: + v.Op = OpMIPSCALLstatic + return true + case OpStore: + return rewriteValueMIPS_OpStore(v) + case OpSub16: + v.Op = OpMIPSSUB + return true + case OpSub32: + v.Op = OpMIPSSUB + return true + case OpSub32F: + v.Op = OpMIPSSUBF + return true + case OpSub32withcarry: + return rewriteValueMIPS_OpSub32withcarry(v) + case OpSub64F: + v.Op = OpMIPSSUBD + return true + case OpSub8: + v.Op = OpMIPSSUB + return true + case OpSubPtr: + v.Op = OpMIPSSUB + return true + case OpTailCall: + v.Op = OpMIPSCALLtail + return true + case OpTrunc16to8: + v.Op = OpCopy + return true + case OpTrunc32to16: + v.Op = OpCopy + return true + case OpTrunc32to8: + v.Op = OpCopy + return true + case OpWB: + v.Op = OpMIPSLoweredWB + return true + case OpXor16: + v.Op = OpMIPSXOR + return true + case OpXor32: + v.Op = OpMIPSXOR + return true + case OpXor8: + v.Op = OpMIPSXOR + return true + case OpZero: + return rewriteValueMIPS_OpZero(v) + case OpZeroExt16to32: + v.Op = OpMIPSMOVHUreg + return true + case OpZeroExt8to16: + v.Op = OpMIPSMOVBUreg + return true + case OpZeroExt8to32: + v.Op = OpMIPSMOVBUreg + return true + case OpZeromask: + return rewriteValueMIPS_OpZeromask(v) + } + return false +} +func rewriteValueMIPS_OpAdd32withcarry(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Add32withcarry x y c) + // result: (ADD c (ADD x y)) + for { + t := v.Type + x := v_0 + y := v_1 + c := v_2 + v.reset(OpMIPSADD) + v0 := b.NewValue0(v.Pos, OpMIPSADD, t) + v0.AddArg2(x, y) + v.AddArg2(c, v0) + return true + } +} +func rewriteValueMIPS_OpAddr(v *Value) bool { + v_0 := v.Args[0] + // match: (Addr {sym} base) + // result: (MOVWaddr {sym} base) + for { + sym := auxToSym(v.Aux) + base := v_0 + v.reset(OpMIPSMOVWaddr) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } +} +func rewriteValueMIPS_OpAtomicAnd8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (AtomicAnd8 ptr val mem) + // cond: !config.BigEndian + // result: (LoweredAtomicAnd (AND (MOVWconst [^3]) ptr) (OR (SLL (ZeroExt8to32 val) (SLLconst [3] (ANDconst [3] ptr))) (NORconst [0] (SLL (MOVWconst [0xff]) (SLLconst [3] (ANDconst [3] ptr))))) mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + if !(!config.BigEndian) { + break + } + v.reset(OpMIPSLoweredAtomicAnd) + v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr) + v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v1.AuxInt = int32ToAuxInt(^3) + v0.AddArg2(v1, ptr) + v2 := b.NewValue0(v.Pos, OpMIPSOR, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32) + v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v4.AddArg(val) + v5 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32) + v5.AuxInt = int32ToAuxInt(3) + v6 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32) + v6.AuxInt = int32ToAuxInt(3) + v6.AddArg(ptr) + v5.AddArg(v6) + v3.AddArg2(v4, v5) + v7 := b.NewValue0(v.Pos, OpMIPSNORconst, typ.UInt32) + v7.AuxInt = int32ToAuxInt(0) + v8 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32) + v9 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v9.AuxInt = int32ToAuxInt(0xff) + v8.AddArg2(v9, v5) + v7.AddArg(v8) + v2.AddArg2(v3, v7) + v.AddArg3(v0, v2, mem) + return true + } + // match: (AtomicAnd8 ptr val mem) + // cond: config.BigEndian + // result: (LoweredAtomicAnd (AND (MOVWconst [^3]) ptr) (OR (SLL (ZeroExt8to32 val) (SLLconst [3] (ANDconst [3] (XORconst [3] ptr)))) (NORconst [0] (SLL (MOVWconst [0xff]) (SLLconst [3] (ANDconst [3] (XORconst [3] ptr)))))) mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + if !(config.BigEndian) { + break + } + v.reset(OpMIPSLoweredAtomicAnd) + v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr) + v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v1.AuxInt = int32ToAuxInt(^3) + v0.AddArg2(v1, ptr) + v2 := b.NewValue0(v.Pos, OpMIPSOR, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32) + v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v4.AddArg(val) + v5 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32) + v5.AuxInt = int32ToAuxInt(3) + v6 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32) + v6.AuxInt = int32ToAuxInt(3) + v7 := b.NewValue0(v.Pos, OpMIPSXORconst, typ.UInt32) + v7.AuxInt = int32ToAuxInt(3) + v7.AddArg(ptr) + v6.AddArg(v7) + v5.AddArg(v6) + v3.AddArg2(v4, v5) + v8 := b.NewValue0(v.Pos, OpMIPSNORconst, typ.UInt32) + v8.AuxInt = int32ToAuxInt(0) + v9 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32) + v10 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v10.AuxInt = int32ToAuxInt(0xff) + v9.AddArg2(v10, v5) + v8.AddArg(v9) + v2.AddArg2(v3, v8) + v.AddArg3(v0, v2, mem) + return true + } + return false +} +func rewriteValueMIPS_OpAtomicOr8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (AtomicOr8 ptr val mem) + // cond: !config.BigEndian + // result: (LoweredAtomicOr (AND (MOVWconst [^3]) ptr) (SLL (ZeroExt8to32 val) (SLLconst [3] (ANDconst [3] ptr))) mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + if !(!config.BigEndian) { + break + } + v.reset(OpMIPSLoweredAtomicOr) + v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr) + v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v1.AuxInt = int32ToAuxInt(^3) + v0.AddArg2(v1, ptr) + v2 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v3.AddArg(val) + v4 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32) + v4.AuxInt = int32ToAuxInt(3) + v5 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32) + v5.AuxInt = int32ToAuxInt(3) + v5.AddArg(ptr) + v4.AddArg(v5) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v2, mem) + return true + } + // match: (AtomicOr8 ptr val mem) + // cond: config.BigEndian + // result: (LoweredAtomicOr (AND (MOVWconst [^3]) ptr) (SLL (ZeroExt8to32 val) (SLLconst [3] (ANDconst [3] (XORconst [3] ptr)))) mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + if !(config.BigEndian) { + break + } + v.reset(OpMIPSLoweredAtomicOr) + v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr) + v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v1.AuxInt = int32ToAuxInt(^3) + v0.AddArg2(v1, ptr) + v2 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v3.AddArg(val) + v4 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32) + v4.AuxInt = int32ToAuxInt(3) + v5 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32) + v5.AuxInt = int32ToAuxInt(3) + v6 := b.NewValue0(v.Pos, OpMIPSXORconst, typ.UInt32) + v6.AuxInt = int32ToAuxInt(3) + v6.AddArg(ptr) + v5.AddArg(v6) + v4.AddArg(v5) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v2, mem) + return true + } + return false +} +func rewriteValueMIPS_OpAvg32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Avg32u x y) + // result: (ADD (SRLconst (SUB x y) [1]) y) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPSADD) + v0 := b.NewValue0(v.Pos, OpMIPSSRLconst, t) + v0.AuxInt = int32ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpMIPSSUB, t) + v1.AddArg2(x, y) + v0.AddArg(v1) + v.AddArg2(v0, y) + return true + } +} +func rewriteValueMIPS_OpBitLen32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen32 x) + // result: (SUB (MOVWconst [32]) (CLZ x)) + for { + t := v.Type + x := v_0 + v.reset(OpMIPSSUB) + v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(32) + v1 := b.NewValue0(v.Pos, OpMIPSCLZ, t) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS_OpCom16(v *Value) bool { + v_0 := v.Args[0] + // match: (Com16 x) + // result: (NORconst [0] x) + for { + x := v_0 + v.reset(OpMIPSNORconst) + v.AuxInt = int32ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueMIPS_OpCom32(v *Value) bool { + v_0 := v.Args[0] + // match: (Com32 x) + // result: (NORconst [0] x) + for { + x := v_0 + v.reset(OpMIPSNORconst) + v.AuxInt = int32ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueMIPS_OpCom8(v *Value) bool { + v_0 := v.Args[0] + // match: (Com8 x) + // result: (NORconst [0] x) + for { + x := v_0 + v.reset(OpMIPSNORconst) + v.AuxInt = int32ToAuxInt(0) + v.AddArg(x) + return true + } +} +func rewriteValueMIPS_OpConst16(v *Value) bool { + // match: (Const16 [val]) + // result: (MOVWconst [int32(val)]) + for { + val := auxIntToInt16(v.AuxInt) + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(int32(val)) + return true + } +} +func rewriteValueMIPS_OpConst32(v *Value) bool { + // match: (Const32 [val]) + // result: (MOVWconst [int32(val)]) + for { + val := auxIntToInt32(v.AuxInt) + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(int32(val)) + return true + } +} +func rewriteValueMIPS_OpConst8(v *Value) bool { + // match: (Const8 [val]) + // result: (MOVWconst [int32(val)]) + for { + val := auxIntToInt8(v.AuxInt) + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(int32(val)) + return true + } +} +func rewriteValueMIPS_OpConstBool(v *Value) bool { + // match: (ConstBool [t]) + // result: (MOVWconst [b2i32(t)]) + for { + t := auxIntToBool(v.AuxInt) + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(b2i32(t)) + return true + } +} +func rewriteValueMIPS_OpConstNil(v *Value) bool { + // match: (ConstNil) + // result: (MOVWconst [0]) + for { + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } +} +func rewriteValueMIPS_OpCtz32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz32 x) + // result: (SUB (MOVWconst [32]) (CLZ (SUBconst [1] (AND x (NEG x))))) + for { + t := v.Type + x := v_0 + v.reset(OpMIPSSUB) + v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(32) + v1 := b.NewValue0(v.Pos, OpMIPSCLZ, t) + v2 := b.NewValue0(v.Pos, OpMIPSSUBconst, t) + v2.AuxInt = int32ToAuxInt(1) + v3 := b.NewValue0(v.Pos, OpMIPSAND, t) + v4 := b.NewValue0(v.Pos, OpMIPSNEG, t) + v4.AddArg(x) + v3.AddArg2(x, v4) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS_OpDiv16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16 x y) + // result: (Select1 (DIV (SignExt16to32 x) (SignExt16to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32)) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpDiv16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16u x y) + // result: (Select1 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32)) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpDiv32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div32 x y) + // result: (Select1 (DIV x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpDiv32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div32u x y) + // result: (Select1 (DIVU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpDiv8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8 x y) + // result: (Select1 (DIV (SignExt8to32 x) (SignExt8to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32)) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpDiv8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8u x y) + // result: (Select1 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32)) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpEq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq16 x y) + // result: (SGTUconst [1] (XOR (ZeroExt16to32 x) (ZeroExt16to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSSGTUconst) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpEq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq32 x y) + // result: (SGTUconst [1] (XOR x y)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSSGTUconst) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpEq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq32F x y) + // result: (FPFlagTrue (CMPEQF x y)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSFPFlagTrue) + v0 := b.NewValue0(v.Pos, OpMIPSCMPEQF, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpEq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq64F x y) + // result: (FPFlagTrue (CMPEQD x y)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSFPFlagTrue) + v0 := b.NewValue0(v.Pos, OpMIPSCMPEQD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpEq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq8 x y) + // result: (SGTUconst [1] (XOR (ZeroExt8to32 x) (ZeroExt8to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSSGTUconst) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpEqB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqB x y) + // result: (XORconst [1] (XOR x y)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSXORconst) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.Bool) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpEqPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqPtr x y) + // result: (SGTUconst [1] (XOR x y)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSSGTUconst) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpHmul32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Hmul32 x y) + // result: (Select0 (MULT x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpMIPSMULT, types.NewTuple(typ.Int32, typ.Int32)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpHmul32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Hmul32u x y) + // result: (Select0 (MULTU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpMIPSMULTU, types.NewTuple(typ.UInt32, typ.UInt32)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpIsInBounds(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsInBounds idx len) + // result: (SGTU len idx) + for { + idx := v_0 + len := v_1 + v.reset(OpMIPSSGTU) + v.AddArg2(len, idx) + return true + } +} +func rewriteValueMIPS_OpIsNonNil(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (IsNonNil ptr) + // result: (SGTU ptr (MOVWconst [0])) + for { + ptr := v_0 + v.reset(OpMIPSSGTU) + v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg2(ptr, v0) + return true + } +} +func rewriteValueMIPS_OpIsSliceInBounds(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (IsSliceInBounds idx len) + // result: (XORconst [1] (SGTU idx len)) + for { + idx := v_0 + len := v_1 + v.reset(OpMIPSXORconst) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) + v0.AddArg2(idx, len) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpLeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq16 x y) + // result: (XORconst [1] (SGT (SignExt16to32 x) (SignExt16to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSXORconst) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpLeq16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq16U x y) + // result: (XORconst [1] (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSXORconst) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpLeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq32 x y) + // result: (XORconst [1] (SGT x y)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSXORconst) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpLeq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq32F x y) + // result: (FPFlagTrue (CMPGEF y x)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSFPFlagTrue) + v0 := b.NewValue0(v.Pos, OpMIPSCMPGEF, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpLeq32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq32U x y) + // result: (XORconst [1] (SGTU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSXORconst) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpLeq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq64F x y) + // result: (FPFlagTrue (CMPGED y x)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSFPFlagTrue) + v0 := b.NewValue0(v.Pos, OpMIPSCMPGED, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpLeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq8 x y) + // result: (XORconst [1] (SGT (SignExt8to32 x) (SignExt8to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSXORconst) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpLeq8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq8U x y) + // result: (XORconst [1] (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSXORconst) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpLess16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less16 x y) + // result: (SGT (SignExt16to32 y) (SignExt16to32 x)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSSGT) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS_OpLess16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less16U x y) + // result: (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSSGTU) + v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS_OpLess32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Less32 x y) + // result: (SGT y x) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSSGT) + v.AddArg2(y, x) + return true + } +} +func rewriteValueMIPS_OpLess32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less32F x y) + // result: (FPFlagTrue (CMPGTF y x)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSFPFlagTrue) + v0 := b.NewValue0(v.Pos, OpMIPSCMPGTF, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpLess32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Less32U x y) + // result: (SGTU y x) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSSGTU) + v.AddArg2(y, x) + return true + } +} +func rewriteValueMIPS_OpLess64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less64F x y) + // result: (FPFlagTrue (CMPGTD y x)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSFPFlagTrue) + v0 := b.NewValue0(v.Pos, OpMIPSCMPGTD, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpLess8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less8 x y) + // result: (SGT (SignExt8to32 y) (SignExt8to32 x)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSSGT) + v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS_OpLess8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less8U x y) + // result: (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSSGTU) + v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS_OpLoad(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Load ptr mem) + // cond: t.IsBoolean() + // result: (MOVBUload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsBoolean()) { + break + } + v.reset(OpMIPSMOVBUload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is8BitInt(t) && t.IsSigned()) + // result: (MOVBload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is8BitInt(t) && t.IsSigned()) { + break + } + v.reset(OpMIPSMOVBload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is8BitInt(t) && !t.IsSigned()) + // result: (MOVBUload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is8BitInt(t) && !t.IsSigned()) { + break + } + v.reset(OpMIPSMOVBUload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is16BitInt(t) && t.IsSigned()) + // result: (MOVHload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is16BitInt(t) && t.IsSigned()) { + break + } + v.reset(OpMIPSMOVHload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is16BitInt(t) && !t.IsSigned()) + // result: (MOVHUload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is16BitInt(t) && !t.IsSigned()) { + break + } + v.reset(OpMIPSMOVHUload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is32BitInt(t) || isPtr(t)) + // result: (MOVWload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitInt(t) || isPtr(t)) { + break + } + v.reset(OpMIPSMOVWload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitFloat(t) + // result: (MOVFload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitFloat(t)) { + break + } + v.reset(OpMIPSMOVFload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is64BitFloat(t) + // result: (MOVDload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitFloat(t)) { + break + } + v.reset(OpMIPSMOVDload) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueMIPS_OpLocalAddr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LocalAddr {sym} base mem) + // cond: t.Elem().HasPointers() + // result: (MOVWaddr {sym} (SPanchored base mem)) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + mem := v_1 + if !(t.Elem().HasPointers()) { + break + } + v.reset(OpMIPSMOVWaddr) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) + v0.AddArg2(base, mem) + v.AddArg(v0) + return true + } + // match: (LocalAddr {sym} base _) + // cond: !t.Elem().HasPointers() + // result: (MOVWaddr {sym} base) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + if !(!t.Elem().HasPointers()) { + break + } + v.reset(OpMIPSMOVWaddr) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } + return false +} +func rewriteValueMIPS_OpLsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x16 x y) + // result: (CMOVZ (SLL x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPSCMOVZ) + v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v2.AuxInt = int32ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) + v3.AuxInt = int32ToAuxInt(32) + v3.AddArg(v1) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValueMIPS_OpLsh16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x32 x y) + // result: (CMOVZ (SLL x y) (MOVWconst [0]) (SGTUconst [32] y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPSCMOVZ) + v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v1.AuxInt = int32ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueMIPS_OpLsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Lsh16x64 x (Const64 [c])) + // cond: uint32(c) < 16 + // result: (SLLconst x [int32(c)]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint32(c) < 16) { + break + } + v.reset(OpMIPSSLLconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + // match: (Lsh16x64 _ (Const64 [c])) + // cond: uint32(c) >= 16 + // result: (MOVWconst [0]) + for { + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint32(c) >= 16) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueMIPS_OpLsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x8 x y) + // result: (CMOVZ (SLL x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPSCMOVZ) + v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v2.AuxInt = int32ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) + v3.AuxInt = int32ToAuxInt(32) + v3.AddArg(v1) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValueMIPS_OpLsh32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x16 x y) + // result: (CMOVZ (SLL x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPSCMOVZ) + v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v2.AuxInt = int32ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) + v3.AuxInt = int32ToAuxInt(32) + v3.AddArg(v1) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValueMIPS_OpLsh32x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x32 x y) + // result: (CMOVZ (SLL x y) (MOVWconst [0]) (SGTUconst [32] y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPSCMOVZ) + v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v1.AuxInt = int32ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueMIPS_OpLsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Lsh32x64 x (Const64 [c])) + // cond: uint32(c) < 32 + // result: (SLLconst x [int32(c)]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint32(c) < 32) { + break + } + v.reset(OpMIPSSLLconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + // match: (Lsh32x64 _ (Const64 [c])) + // cond: uint32(c) >= 32 + // result: (MOVWconst [0]) + for { + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint32(c) >= 32) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueMIPS_OpLsh32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x8 x y) + // result: (CMOVZ (SLL x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPSCMOVZ) + v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v2.AuxInt = int32ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) + v3.AuxInt = int32ToAuxInt(32) + v3.AddArg(v1) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValueMIPS_OpLsh8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x16 x y) + // result: (CMOVZ (SLL x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPSCMOVZ) + v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v2.AuxInt = int32ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) + v3.AuxInt = int32ToAuxInt(32) + v3.AddArg(v1) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValueMIPS_OpLsh8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x32 x y) + // result: (CMOVZ (SLL x y) (MOVWconst [0]) (SGTUconst [32] y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPSCMOVZ) + v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v1.AuxInt = int32ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueMIPS_OpLsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Lsh8x64 x (Const64 [c])) + // cond: uint32(c) < 8 + // result: (SLLconst x [int32(c)]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint32(c) < 8) { + break + } + v.reset(OpMIPSSLLconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + // match: (Lsh8x64 _ (Const64 [c])) + // cond: uint32(c) >= 8 + // result: (MOVWconst [0]) + for { + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint32(c) >= 8) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueMIPS_OpLsh8x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x8 x y) + // result: (CMOVZ (SLL x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPSCMOVZ) + v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v2.AuxInt = int32ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) + v3.AuxInt = int32ToAuxInt(32) + v3.AddArg(v1) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValueMIPS_OpMIPSADD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADD x (MOVWconst [c])) + // cond: !t.IsPtr() + // result: (ADDconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMIPSMOVWconst { + continue + } + t := v_1.Type + c := auxIntToInt32(v_1.AuxInt) + if !(!t.IsPtr()) { + continue + } + v.reset(OpMIPSADDconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (ADD x (NEG y)) + // result: (SUB x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMIPSNEG { + continue + } + y := v_1.Args[0] + v.reset(OpMIPSSUB) + v.AddArg2(x, y) + return true + } + break + } + return false +} +func rewriteValueMIPS_OpMIPSADDconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ADDconst [off1] (MOVWaddr [off2] {sym} ptr)) + // result: (MOVWaddr [off1+off2] {sym} ptr) + for { + off1 := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + v.reset(OpMIPSMOVWaddr) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg(ptr) + return true + } + // match: (ADDconst [0] x) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (ADDconst [c] (MOVWconst [d])) + // result: (MOVWconst [int32(c+d)]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSMOVWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(int32(c + d)) + return true + } + // match: (ADDconst [c] (ADDconst [d] x)) + // result: (ADDconst [c+d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSADDconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpMIPSADDconst) + v.AuxInt = int32ToAuxInt(c + d) + v.AddArg(x) + return true + } + // match: (ADDconst [c] (SUBconst [d] x)) + // result: (ADDconst [c-d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSSUBconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpMIPSADDconst) + v.AuxInt = int32ToAuxInt(c - d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSAND(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AND x (MOVWconst [c])) + // result: (ANDconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMIPSMOVWconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpMIPSANDconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (AND x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + // match: (AND (SGTUconst [1] x) (SGTUconst [1] y)) + // result: (SGTUconst [1] (OR x y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpMIPSSGTUconst || auxIntToInt32(v_0.AuxInt) != 1 { + continue + } + x := v_0.Args[0] + if v_1.Op != OpMIPSSGTUconst || auxIntToInt32(v_1.AuxInt) != 1 { + continue + } + y := v_1.Args[0] + v.reset(OpMIPSSGTUconst) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpMIPSOR, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + return false +} +func rewriteValueMIPS_OpMIPSANDconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ANDconst [0] _) + // result: (MOVWconst [0]) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (ANDconst [-1] x) + // result: x + for { + if auxIntToInt32(v.AuxInt) != -1 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (ANDconst [c] (MOVWconst [d])) + // result: (MOVWconst [c&d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSMOVWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(c & d) + return true + } + // match: (ANDconst [c] (ANDconst [d] x)) + // result: (ANDconst [c&d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSANDconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpMIPSANDconst) + v.AuxInt = int32ToAuxInt(c & d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSCMOVZ(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMOVZ _ f (MOVWconst [0])) + // result: f + for { + f := v_1 + if v_2.Op != OpMIPSMOVWconst || auxIntToInt32(v_2.AuxInt) != 0 { + break + } + v.copyOf(f) + return true + } + // match: (CMOVZ a _ (MOVWconst [c])) + // cond: c!=0 + // result: a + for { + a := v_0 + if v_2.Op != OpMIPSMOVWconst { + break + } + c := auxIntToInt32(v_2.AuxInt) + if !(c != 0) { + break + } + v.copyOf(a) + return true + } + // match: (CMOVZ a (MOVWconst [0]) c) + // result: (CMOVZzero a c) + for { + a := v_0 + if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 { + break + } + c := v_2 + v.reset(OpMIPSCMOVZzero) + v.AddArg2(a, c) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSCMOVZzero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CMOVZzero _ (MOVWconst [0])) + // result: (MOVWconst [0]) + for { + if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (CMOVZzero a (MOVWconst [c])) + // cond: c!=0 + // result: a + for { + a := v_0 + if v_1.Op != OpMIPSMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(c != 0) { + break + } + v.copyOf(a) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSLoweredAtomicAdd(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LoweredAtomicAdd ptr (MOVWconst [c]) mem) + // cond: is16Bit(int64(c)) + // result: (LoweredAtomicAddconst [c] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpMIPSMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + mem := v_2 + if !(is16Bit(int64(c))) { + break + } + v.reset(OpMIPSLoweredAtomicAddconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSLoweredAtomicStore32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LoweredAtomicStore32 ptr (MOVWconst [0]) mem) + // result: (LoweredAtomicStorezero ptr mem) + for { + ptr := v_0 + if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 { + break + } + mem := v_2 + v.reset(OpMIPSLoweredAtomicStorezero) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSMOVBUload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBUload [off1] {sym} x:(ADDconst [off2] ptr) mem) + // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1) + // result: (MOVBUload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if x.Op != OpMIPSADDconst { + break + } + off2 := auxIntToInt32(x.AuxInt) + ptr := x.Args[0] + mem := v_1 + if !(is16Bit(int64(off1+off2)) || x.Uses == 1) { + break + } + v.reset(OpMIPSMOVBUload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPSMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpMIPSMOVBUload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVBUreg x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPSMOVBstore { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpMIPSMOVBUreg) + v.AddArg(x) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSMOVBUreg(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MOVBUreg x:(MOVBUload _ _)) + // result: (MOVWreg x) + for { + x := v_0 + if x.Op != OpMIPSMOVBUload { + break + } + v.reset(OpMIPSMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVBUreg x:(MOVBUreg _)) + // result: (MOVWreg x) + for { + x := v_0 + if x.Op != OpMIPSMOVBUreg { + break + } + v.reset(OpMIPSMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVBUreg x:(MOVBload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBUload [off] {sym} ptr mem) + for { + t := v.Type + x := v_0 + if x.Op != OpMIPSMOVBload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpMIPSMOVBUload, t) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (MOVBUreg (ANDconst [c] x)) + // result: (ANDconst [c&0xff] x) + for { + if v_0.Op != OpMIPSANDconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpMIPSANDconst) + v.AuxInt = int32ToAuxInt(c & 0xff) + v.AddArg(x) + return true + } + // match: (MOVBUreg (MOVWconst [c])) + // result: (MOVWconst [int32(uint8(c))]) + for { + if v_0.Op != OpMIPSMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(int32(uint8(c))) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSMOVBload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBload [off1] {sym} x:(ADDconst [off2] ptr) mem) + // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1) + // result: (MOVBload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if x.Op != OpMIPSADDconst { + break + } + off2 := auxIntToInt32(x.AuxInt) + ptr := x.Args[0] + mem := v_1 + if !(is16Bit(int64(off1+off2)) || x.Uses == 1) { + break + } + v.reset(OpMIPSMOVBload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPSMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpMIPSMOVBload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVBreg x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPSMOVBstore { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpMIPSMOVBreg) + v.AddArg(x) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSMOVBreg(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MOVBreg x:(MOVBload _ _)) + // result: (MOVWreg x) + for { + x := v_0 + if x.Op != OpMIPSMOVBload { + break + } + v.reset(OpMIPSMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVBreg x:(MOVBreg _)) + // result: (MOVWreg x) + for { + x := v_0 + if x.Op != OpMIPSMOVBreg { + break + } + v.reset(OpMIPSMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVBreg x:(MOVBUload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBload [off] {sym} ptr mem) + for { + t := v.Type + x := v_0 + if x.Op != OpMIPSMOVBUload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpMIPSMOVBload, t) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (MOVBreg (ANDconst [c] x)) + // cond: c & 0x80 == 0 + // result: (ANDconst [c&0x7f] x) + for { + if v_0.Op != OpMIPSANDconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(c&0x80 == 0) { + break + } + v.reset(OpMIPSANDconst) + v.AuxInt = int32ToAuxInt(c & 0x7f) + v.AddArg(x) + return true + } + // match: (MOVBreg (MOVWconst [c])) + // result: (MOVWconst [int32(int8(c))]) + for { + if v_0.Op != OpMIPSMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(int32(int8(c))) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSMOVBstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) + // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1) + // result: (MOVBstore [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if x.Op != OpMIPSADDconst { + break + } + off2 := auxIntToInt32(x.AuxInt) + ptr := x.Args[0] + val := v_1 + mem := v_2 + if !(is16Bit(int64(off1+off2)) || x.Uses == 1) { + break + } + v.reset(OpMIPSMOVBstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPSMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpMIPSMOVBstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVWconst [0]) mem) + // result: (MOVBstorezero [off] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 { + break + } + mem := v_2 + v.reset(OpMIPSMOVBstorezero) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPSMOVBreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpMIPSMOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPSMOVBUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpMIPSMOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPSMOVHreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpMIPSMOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPSMOVHUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpMIPSMOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPSMOVWreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpMIPSMOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSMOVBstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) + // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1) + // result: (MOVBstorezero [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if x.Op != OpMIPSADDconst { + break + } + off2 := auxIntToInt32(x.AuxInt) + ptr := x.Args[0] + mem := v_1 + if !(is16Bit(int64(off1+off2)) || x.Uses == 1) { + break + } + v.reset(OpMIPSMOVBstorezero) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPSMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpMIPSMOVBstorezero) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSMOVDload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVDload [off1] {sym} x:(ADDconst [off2] ptr) mem) + // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1) + // result: (MOVDload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if x.Op != OpMIPSADDconst { + break + } + off2 := auxIntToInt32(x.AuxInt) + ptr := x.Args[0] + mem := v_1 + if !(is16Bit(int64(off1+off2)) || x.Uses == 1) { + break + } + v.reset(OpMIPSMOVDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPSMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpMIPSMOVDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: x + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPSMOVDstore { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSMOVDstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVDstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) + // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1) + // result: (MOVDstore [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if x.Op != OpMIPSADDconst { + break + } + off2 := auxIntToInt32(x.AuxInt) + ptr := x.Args[0] + val := v_1 + mem := v_2 + if !(is16Bit(int64(off1+off2)) || x.Uses == 1) { + break + } + v.reset(OpMIPSMOVDstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPSMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpMIPSMOVDstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSMOVFload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVFload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) + // result: (MOVWgpfp val) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPSMOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { + break + } + val := v_1.Args[1] + if ptr != v_1.Args[0] { + break + } + v.reset(OpMIPSMOVWgpfp) + v.AddArg(val) + return true + } + // match: (MOVFload [off1] {sym} x:(ADDconst [off2] ptr) mem) + // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1) + // result: (MOVFload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if x.Op != OpMIPSADDconst { + break + } + off2 := auxIntToInt32(x.AuxInt) + ptr := x.Args[0] + mem := v_1 + if !(is16Bit(int64(off1+off2)) || x.Uses == 1) { + break + } + v.reset(OpMIPSMOVFload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPSMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpMIPSMOVFload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: x + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPSMOVFstore { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSMOVFstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVFstore [off] {sym} ptr (MOVWgpfp val) mem) + // result: (MOVWstore [off] {sym} ptr val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPSMOVWgpfp { + break + } + val := v_1.Args[0] + mem := v_2 + v.reset(OpMIPSMOVWstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVFstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) + // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1) + // result: (MOVFstore [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if x.Op != OpMIPSADDconst { + break + } + off2 := auxIntToInt32(x.AuxInt) + ptr := x.Args[0] + val := v_1 + mem := v_2 + if !(is16Bit(int64(off1+off2)) || x.Uses == 1) { + break + } + v.reset(OpMIPSMOVFstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPSMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpMIPSMOVFstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSMOVHUload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHUload [off1] {sym} x:(ADDconst [off2] ptr) mem) + // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1) + // result: (MOVHUload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if x.Op != OpMIPSADDconst { + break + } + off2 := auxIntToInt32(x.AuxInt) + ptr := x.Args[0] + mem := v_1 + if !(is16Bit(int64(off1+off2)) || x.Uses == 1) { + break + } + v.reset(OpMIPSMOVHUload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPSMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpMIPSMOVHUload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVHUreg x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPSMOVHstore { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpMIPSMOVHUreg) + v.AddArg(x) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSMOVHUreg(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MOVHUreg x:(MOVBUload _ _)) + // result: (MOVWreg x) + for { + x := v_0 + if x.Op != OpMIPSMOVBUload { + break + } + v.reset(OpMIPSMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVHUload _ _)) + // result: (MOVWreg x) + for { + x := v_0 + if x.Op != OpMIPSMOVHUload { + break + } + v.reset(OpMIPSMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVBUreg _)) + // result: (MOVWreg x) + for { + x := v_0 + if x.Op != OpMIPSMOVBUreg { + break + } + v.reset(OpMIPSMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVHUreg _)) + // result: (MOVWreg x) + for { + x := v_0 + if x.Op != OpMIPSMOVHUreg { + break + } + v.reset(OpMIPSMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVHload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVHUload [off] {sym} ptr mem) + for { + t := v.Type + x := v_0 + if x.Op != OpMIPSMOVHload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpMIPSMOVHUload, t) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (MOVHUreg (ANDconst [c] x)) + // result: (ANDconst [c&0xffff] x) + for { + if v_0.Op != OpMIPSANDconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpMIPSANDconst) + v.AuxInt = int32ToAuxInt(c & 0xffff) + v.AddArg(x) + return true + } + // match: (MOVHUreg (MOVWconst [c])) + // result: (MOVWconst [int32(uint16(c))]) + for { + if v_0.Op != OpMIPSMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(int32(uint16(c))) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSMOVHload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHload [off1] {sym} x:(ADDconst [off2] ptr) mem) + // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1) + // result: (MOVHload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if x.Op != OpMIPSADDconst { + break + } + off2 := auxIntToInt32(x.AuxInt) + ptr := x.Args[0] + mem := v_1 + if !(is16Bit(int64(off1+off2)) || x.Uses == 1) { + break + } + v.reset(OpMIPSMOVHload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPSMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpMIPSMOVHload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVHreg x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPSMOVHstore { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpMIPSMOVHreg) + v.AddArg(x) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSMOVHreg(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MOVHreg x:(MOVBload _ _)) + // result: (MOVWreg x) + for { + x := v_0 + if x.Op != OpMIPSMOVBload { + break + } + v.reset(OpMIPSMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBUload _ _)) + // result: (MOVWreg x) + for { + x := v_0 + if x.Op != OpMIPSMOVBUload { + break + } + v.reset(OpMIPSMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVHload _ _)) + // result: (MOVWreg x) + for { + x := v_0 + if x.Op != OpMIPSMOVHload { + break + } + v.reset(OpMIPSMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBreg _)) + // result: (MOVWreg x) + for { + x := v_0 + if x.Op != OpMIPSMOVBreg { + break + } + v.reset(OpMIPSMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBUreg _)) + // result: (MOVWreg x) + for { + x := v_0 + if x.Op != OpMIPSMOVBUreg { + break + } + v.reset(OpMIPSMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVHreg _)) + // result: (MOVWreg x) + for { + x := v_0 + if x.Op != OpMIPSMOVHreg { + break + } + v.reset(OpMIPSMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVHUload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVHload [off] {sym} ptr mem) + for { + t := v.Type + x := v_0 + if x.Op != OpMIPSMOVHUload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpMIPSMOVHload, t) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (MOVHreg (ANDconst [c] x)) + // cond: c & 0x8000 == 0 + // result: (ANDconst [c&0x7fff] x) + for { + if v_0.Op != OpMIPSANDconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(c&0x8000 == 0) { + break + } + v.reset(OpMIPSANDconst) + v.AuxInt = int32ToAuxInt(c & 0x7fff) + v.AddArg(x) + return true + } + // match: (MOVHreg (MOVWconst [c])) + // result: (MOVWconst [int32(int16(c))]) + for { + if v_0.Op != OpMIPSMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(int32(int16(c))) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSMOVHstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) + // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1) + // result: (MOVHstore [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if x.Op != OpMIPSADDconst { + break + } + off2 := auxIntToInt32(x.AuxInt) + ptr := x.Args[0] + val := v_1 + mem := v_2 + if !(is16Bit(int64(off1+off2)) || x.Uses == 1) { + break + } + v.reset(OpMIPSMOVHstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPSMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpMIPSMOVHstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVWconst [0]) mem) + // result: (MOVHstorezero [off] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 { + break + } + mem := v_2 + v.reset(OpMIPSMOVHstorezero) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPSMOVHreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpMIPSMOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPSMOVHUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpMIPSMOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPSMOVWreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpMIPSMOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSMOVHstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) + // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1) + // result: (MOVHstorezero [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if x.Op != OpMIPSADDconst { + break + } + off2 := auxIntToInt32(x.AuxInt) + ptr := x.Args[0] + mem := v_1 + if !(is16Bit(int64(off1+off2)) || x.Uses == 1) { + break + } + v.reset(OpMIPSMOVHstorezero) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPSMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpMIPSMOVHstorezero) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSMOVWload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWload [off] {sym} ptr (MOVFstore [off] {sym} ptr val _)) + // result: (MOVWfpgp val) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPSMOVFstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { + break + } + val := v_1.Args[1] + if ptr != v_1.Args[0] { + break + } + v.reset(OpMIPSMOVWfpgp) + v.AddArg(val) + return true + } + // match: (MOVWload [off1] {sym} x:(ADDconst [off2] ptr) mem) + // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1) + // result: (MOVWload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if x.Op != OpMIPSADDconst { + break + } + off2 := auxIntToInt32(x.AuxInt) + ptr := x.Args[0] + mem := v_1 + if !(is16Bit(int64(off1+off2)) || x.Uses == 1) { + break + } + v.reset(OpMIPSMOVWload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPSMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpMIPSMOVWload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: x + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPSMOVWstore { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSMOVWnop(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVWnop (MOVWconst [c])) + // result: (MOVWconst [c]) + for { + if v_0.Op != OpMIPSMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(c) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSMOVWreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVWreg x) + // cond: x.Uses == 1 + // result: (MOVWnop x) + for { + x := v_0 + if !(x.Uses == 1) { + break + } + v.reset(OpMIPSMOVWnop) + v.AddArg(x) + return true + } + // match: (MOVWreg (MOVWconst [c])) + // result: (MOVWconst [c]) + for { + if v_0.Op != OpMIPSMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(c) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSMOVWstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWstore [off] {sym} ptr (MOVWfpgp val) mem) + // result: (MOVFstore [off] {sym} ptr val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPSMOVWfpgp { + break + } + val := v_1.Args[0] + mem := v_2 + v.reset(OpMIPSMOVFstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVWstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) + // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1) + // result: (MOVWstore [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if x.Op != OpMIPSADDconst { + break + } + off2 := auxIntToInt32(x.AuxInt) + ptr := x.Args[0] + val := v_1 + mem := v_2 + if !(is16Bit(int64(off1+off2)) || x.Uses == 1) { + break + } + v.reset(OpMIPSMOVWstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPSMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpMIPSMOVWstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) + // result: (MOVWstorezero [off] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 { + break + } + mem := v_2 + v.reset(OpMIPSMOVWstorezero) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem) + // result: (MOVWstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPSMOVWreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpMIPSMOVWstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSMOVWstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) + // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1) + // result: (MOVWstorezero [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if x.Op != OpMIPSADDconst { + break + } + off2 := auxIntToInt32(x.AuxInt) + ptr := x.Args[0] + mem := v_1 + if !(is16Bit(int64(off1+off2)) || x.Uses == 1) { + break + } + v.reset(OpMIPSMOVWstorezero) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPSMOVWaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpMIPSMOVWstorezero) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSMUL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MUL (MOVWconst [0]) _ ) + // result: (MOVWconst [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0.AuxInt) != 0 { + continue + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + break + } + // match: (MUL (MOVWconst [1]) x ) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0.AuxInt) != 1 { + continue + } + x := v_1 + v.copyOf(x) + return true + } + break + } + // match: (MUL (MOVWconst [-1]) x ) + // result: (NEG x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0.AuxInt) != -1 { + continue + } + x := v_1 + v.reset(OpMIPSNEG) + v.AddArg(x) + return true + } + break + } + // match: (MUL (MOVWconst [c]) x ) + // cond: isPowerOfTwo64(int64(uint32(c))) + // result: (SLLconst [int32(log2uint32(int64(c)))] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpMIPSMOVWconst { + continue + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + if !(isPowerOfTwo64(int64(uint32(c)))) { + continue + } + v.reset(OpMIPSSLLconst) + v.AuxInt = int32ToAuxInt(int32(log2uint32(int64(c)))) + v.AddArg(x) + return true + } + break + } + // match: (MUL (MOVWconst [c]) (MOVWconst [d])) + // result: (MOVWconst [c*d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpMIPSMOVWconst { + continue + } + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpMIPSMOVWconst { + continue + } + d := auxIntToInt32(v_1.AuxInt) + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(c * d) + return true + } + break + } + return false +} +func rewriteValueMIPS_OpMIPSNEG(v *Value) bool { + v_0 := v.Args[0] + // match: (NEG (MOVWconst [c])) + // result: (MOVWconst [-c]) + for { + if v_0.Op != OpMIPSMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(-c) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSNOR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (NOR x (MOVWconst [c])) + // result: (NORconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMIPSMOVWconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpMIPSNORconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValueMIPS_OpMIPSNORconst(v *Value) bool { + v_0 := v.Args[0] + // match: (NORconst [c] (MOVWconst [d])) + // result: (MOVWconst [^(c|d)]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSMOVWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(^(c | d)) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSOR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OR x (MOVWconst [c])) + // result: (ORconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMIPSMOVWconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpMIPSORconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (OR x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + // match: (OR (SGTUzero x) (SGTUzero y)) + // result: (SGTUzero (OR x y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpMIPSSGTUzero { + continue + } + x := v_0.Args[0] + if v_1.Op != OpMIPSSGTUzero { + continue + } + y := v_1.Args[0] + v.reset(OpMIPSSGTUzero) + v0 := b.NewValue0(v.Pos, OpMIPSOR, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + return false +} +func rewriteValueMIPS_OpMIPSORconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ORconst [0] x) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (ORconst [-1] _) + // result: (MOVWconst [-1]) + for { + if auxIntToInt32(v.AuxInt) != -1 { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(-1) + return true + } + // match: (ORconst [c] (MOVWconst [d])) + // result: (MOVWconst [c|d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSMOVWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(c | d) + return true + } + // match: (ORconst [c] (ORconst [d] x)) + // result: (ORconst [c|d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSORconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpMIPSORconst) + v.AuxInt = int32ToAuxInt(c | d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSSGT(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SGT (MOVWconst [c]) x) + // result: (SGTconst [c] x) + for { + if v_0.Op != OpMIPSMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpMIPSSGTconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (SGT x (MOVWconst [0])) + // result: (SGTzero x) + for { + x := v_0 + if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 { + break + } + v.reset(OpMIPSSGTzero) + v.AddArg(x) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSSGTU(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SGTU (MOVWconst [c]) x) + // result: (SGTUconst [c] x) + for { + if v_0.Op != OpMIPSMOVWconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + x := v_1 + v.reset(OpMIPSSGTUconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (SGTU x (MOVWconst [0])) + // result: (SGTUzero x) + for { + x := v_0 + if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 { + break + } + v.reset(OpMIPSSGTUzero) + v.AddArg(x) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSSGTUconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SGTUconst [c] (MOVWconst [d])) + // cond: uint32(c) > uint32(d) + // result: (MOVWconst [1]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSMOVWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + if !(uint32(c) > uint32(d)) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SGTUconst [c] (MOVWconst [d])) + // cond: uint32(c) <= uint32(d) + // result: (MOVWconst [0]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSMOVWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + if !(uint32(c) <= uint32(d)) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SGTUconst [c] (MOVBUreg _)) + // cond: 0xff < uint32(c) + // result: (MOVWconst [1]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSMOVBUreg || !(0xff < uint32(c)) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SGTUconst [c] (MOVHUreg _)) + // cond: 0xffff < uint32(c) + // result: (MOVWconst [1]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSMOVHUreg || !(0xffff < uint32(c)) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SGTUconst [c] (ANDconst [m] _)) + // cond: uint32(m) < uint32(c) + // result: (MOVWconst [1]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSANDconst { + break + } + m := auxIntToInt32(v_0.AuxInt) + if !(uint32(m) < uint32(c)) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SGTUconst [c] (SRLconst _ [d])) + // cond: uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c) + // result: (MOVWconst [1]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSSRLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + if !(uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c)) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSSGTUzero(v *Value) bool { + v_0 := v.Args[0] + // match: (SGTUzero (MOVWconst [d])) + // cond: d != 0 + // result: (MOVWconst [1]) + for { + if v_0.Op != OpMIPSMOVWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + if !(d != 0) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SGTUzero (MOVWconst [d])) + // cond: d == 0 + // result: (MOVWconst [0]) + for { + if v_0.Op != OpMIPSMOVWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + if !(d == 0) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSSGTconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SGTconst [c] (MOVWconst [d])) + // cond: c > d + // result: (MOVWconst [1]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSMOVWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + if !(c > d) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SGTconst [c] (MOVWconst [d])) + // cond: c <= d + // result: (MOVWconst [0]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSMOVWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + if !(c <= d) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SGTconst [c] (MOVBreg _)) + // cond: 0x7f < c + // result: (MOVWconst [1]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSMOVBreg || !(0x7f < c) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SGTconst [c] (MOVBreg _)) + // cond: c <= -0x80 + // result: (MOVWconst [0]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSMOVBreg || !(c <= -0x80) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SGTconst [c] (MOVBUreg _)) + // cond: 0xff < c + // result: (MOVWconst [1]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSMOVBUreg || !(0xff < c) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SGTconst [c] (MOVBUreg _)) + // cond: c < 0 + // result: (MOVWconst [0]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSMOVBUreg || !(c < 0) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SGTconst [c] (MOVHreg _)) + // cond: 0x7fff < c + // result: (MOVWconst [1]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSMOVHreg || !(0x7fff < c) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SGTconst [c] (MOVHreg _)) + // cond: c <= -0x8000 + // result: (MOVWconst [0]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSMOVHreg || !(c <= -0x8000) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SGTconst [c] (MOVHUreg _)) + // cond: 0xffff < c + // result: (MOVWconst [1]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSMOVHUreg || !(0xffff < c) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SGTconst [c] (MOVHUreg _)) + // cond: c < 0 + // result: (MOVWconst [0]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSMOVHUreg || !(c < 0) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SGTconst [c] (ANDconst [m] _)) + // cond: 0 <= m && m < c + // result: (MOVWconst [1]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSANDconst { + break + } + m := auxIntToInt32(v_0.AuxInt) + if !(0 <= m && m < c) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SGTconst [c] (SRLconst _ [d])) + // cond: 0 <= c && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c) + // result: (MOVWconst [1]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSSRLconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + if !(0 <= c && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c)) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSSGTzero(v *Value) bool { + v_0 := v.Args[0] + // match: (SGTzero (MOVWconst [d])) + // cond: d > 0 + // result: (MOVWconst [1]) + for { + if v_0.Op != OpMIPSMOVWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + if !(d > 0) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (SGTzero (MOVWconst [d])) + // cond: d <= 0 + // result: (MOVWconst [0]) + for { + if v_0.Op != OpMIPSMOVWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + if !(d <= 0) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSSLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SLL x (MOVWconst [c])) + // result: (SLLconst x [c&31]) + for { + x := v_0 + if v_1.Op != OpMIPSMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpMIPSSLLconst) + v.AuxInt = int32ToAuxInt(c & 31) + v.AddArg(x) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSSLLconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SLLconst [c] (MOVWconst [d])) + // result: (MOVWconst [d<>uint32(c)]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSMOVWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(d >> uint32(c)) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSSRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SRL x (MOVWconst [c])) + // result: (SRLconst x [c&31]) + for { + x := v_0 + if v_1.Op != OpMIPSMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpMIPSSRLconst) + v.AuxInt = int32ToAuxInt(c & 31) + v.AddArg(x) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSSRLconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SRLconst [c] (MOVWconst [d])) + // result: (MOVWconst [int32(uint32(d)>>uint32(c))]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSMOVWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(int32(uint32(d) >> uint32(c))) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSSUB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SUB x (MOVWconst [c])) + // result: (SUBconst [c] x) + for { + x := v_0 + if v_1.Op != OpMIPSMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpMIPSSUBconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (SUB x x) + // result: (MOVWconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (SUB (MOVWconst [0]) x) + // result: (NEG x) + for { + if v_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_1 + v.reset(OpMIPSNEG) + v.AddArg(x) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSSUBconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SUBconst [0] x) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (SUBconst [c] (MOVWconst [d])) + // result: (MOVWconst [d-c]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSMOVWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(d - c) + return true + } + // match: (SUBconst [c] (SUBconst [d] x)) + // result: (ADDconst [-c-d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSSUBconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpMIPSADDconst) + v.AuxInt = int32ToAuxInt(-c - d) + v.AddArg(x) + return true + } + // match: (SUBconst [c] (ADDconst [d] x)) + // result: (ADDconst [-c+d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSADDconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpMIPSADDconst) + v.AuxInt = int32ToAuxInt(-c + d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSXOR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XOR x (MOVWconst [c])) + // result: (XORconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMIPSMOVWconst { + continue + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpMIPSXORconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (XOR x x) + // result: (MOVWconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueMIPS_OpMIPSXORconst(v *Value) bool { + v_0 := v.Args[0] + // match: (XORconst [0] x) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (XORconst [-1] x) + // result: (NORconst [0] x) + for { + if auxIntToInt32(v.AuxInt) != -1 { + break + } + x := v_0 + v.reset(OpMIPSNORconst) + v.AuxInt = int32ToAuxInt(0) + v.AddArg(x) + return true + } + // match: (XORconst [c] (MOVWconst [d])) + // result: (MOVWconst [c^d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSMOVWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(c ^ d) + return true + } + // match: (XORconst [c] (XORconst [d] x)) + // result: (XORconst [c^d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpMIPSXORconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpMIPSXORconst) + v.AuxInt = int32ToAuxInt(c ^ d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueMIPS_OpMod16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod16 x y) + // result: (Select0 (DIV (SignExt16to32 x) (SignExt16to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32)) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpMod16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod16u x y) + // result: (Select0 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32)) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpMod32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod32 x y) + // result: (Select0 (DIV x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpMod32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod32u x y) + // result: (Select0 (DIVU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpMod8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8 x y) + // result: (Select0 (DIV (SignExt8to32 x) (SignExt8to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32)) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpMod8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8u x y) + // result: (Select0 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32)) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpMove(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Move [0] _ _ mem) + // result: mem + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_2 + v.copyOf(mem) + return true + } + // match: (Move [1] dst src mem) + // result: (MOVBstore dst (MOVBUload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMIPSMOVBstore) + v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [2] {t} dst src mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore dst (MOVHUload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpMIPSMOVHstore) + v0 := b.NewValue0(v.Pos, OpMIPSMOVHUload, typ.UInt16) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [2] dst src mem) + // result: (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMIPSMOVBstore) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(1) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [4] {t} dst src mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore dst (MOVWload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpMIPSMOVWstore) + v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [4] {t} dst src mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpMIPSMOVHstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpMIPSMOVHUload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(2) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpMIPSMOVHUload, typ.UInt16) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [4] dst src mem) + // result: (MOVBstore [3] dst (MOVBUload [3] src mem) (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)))) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMIPSMOVBstore) + v.AuxInt = int32ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(3) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(2) + v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) + v2.AuxInt = int32ToAuxInt(2) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) + v3.AuxInt = int32ToAuxInt(1) + v4 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) + v4.AuxInt = int32ToAuxInt(1) + v4.AddArg2(src, mem) + v5 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) + v6 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) + v6.AddArg2(src, mem) + v5.AddArg3(dst, v6, mem) + v3.AddArg3(dst, v4, v5) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [3] dst src mem) + // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMIPSMOVBstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(2) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) + v2.AuxInt = int32ToAuxInt(1) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) + v4 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [8] {t} dst src mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpMIPSMOVWstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [8] {t} dst src mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpMIPSMOVHstore) + v.AuxInt = int32ToAuxInt(6) + v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16) + v0.AuxInt = int32ToAuxInt(6) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(4) + v2 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16) + v2.AuxInt = int32ToAuxInt(4) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) + v3.AuxInt = int32ToAuxInt(2) + v4 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16) + v4.AuxInt = int32ToAuxInt(2) + v4.AddArg2(src, mem) + v5 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) + v6 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16) + v6.AddArg2(src, mem) + v5.AddArg3(dst, v6, mem) + v3.AddArg3(dst, v4, v5) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [6] {t} dst src mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))) + for { + if auxIntToInt64(v.AuxInt) != 6 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpMIPSMOVHstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(2) + v2 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16) + v2.AuxInt = int32ToAuxInt(2) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) + v4 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [12] {t} dst src mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))) + for { + if auxIntToInt64(v.AuxInt) != 12 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpMIPSMOVWstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(4) + v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) + v2.AuxInt = int32ToAuxInt(4) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) + v4 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [16] {t} dst src mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore [12] dst (MOVWload [12] src mem) (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)))) + for { + if auxIntToInt64(v.AuxInt) != 16 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpMIPSMOVWstore) + v.AuxInt = int32ToAuxInt(12) + v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(12) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(8) + v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) + v2.AuxInt = int32ToAuxInt(8) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) + v3.AuxInt = int32ToAuxInt(4) + v4 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) + v4.AuxInt = int32ToAuxInt(4) + v4.AddArg2(src, mem) + v5 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) + v6 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) + v6.AddArg2(src, mem) + v5.AddArg3(dst, v6, mem) + v3.AddArg3(dst, v4, v5) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [s] {t} dst src mem) + // cond: (s > 16 && logLargeCopy(v, s) || t.Alignment()%4 != 0) + // result: (LoweredMove [int32(t.Alignment())] dst src (ADDconst src [int32(s-moveSize(t.Alignment(), config))]) mem) + for { + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 16 && logLargeCopy(v, s) || t.Alignment()%4 != 0) { + break + } + v.reset(OpMIPSLoweredMove) + v.AuxInt = int32ToAuxInt(int32(t.Alignment())) + v0 := b.NewValue0(v.Pos, OpMIPSADDconst, src.Type) + v0.AuxInt = int32ToAuxInt(int32(s - moveSize(t.Alignment(), config))) + v0.AddArg(src) + v.AddArg4(dst, src, v0, mem) + return true + } + return false +} +func rewriteValueMIPS_OpNeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq16 x y) + // result: (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)) (MOVWconst [0])) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSSGTU) + v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v3.AuxInt = int32ToAuxInt(0) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueMIPS_OpNeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq32 x y) + // result: (SGTU (XOR x y) (MOVWconst [0])) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSSGTU) + v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v1.AuxInt = int32ToAuxInt(0) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS_OpNeq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq32F x y) + // result: (FPFlagFalse (CMPEQF x y)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSFPFlagFalse) + v0 := b.NewValue0(v.Pos, OpMIPSCMPEQF, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpNeq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq64F x y) + // result: (FPFlagFalse (CMPEQD x y)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSFPFlagFalse) + v0 := b.NewValue0(v.Pos, OpMIPSCMPEQD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpNeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq8 x y) + // result: (SGTU (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)) (MOVWconst [0])) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSSGTU) + v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v3.AuxInt = int32ToAuxInt(0) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueMIPS_OpNeqPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NeqPtr x y) + // result: (SGTU (XOR x y) (MOVWconst [0])) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSSGTU) + v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v1.AuxInt = int32ToAuxInt(0) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS_OpNot(v *Value) bool { + v_0 := v.Args[0] + // match: (Not x) + // result: (XORconst [1] x) + for { + x := v_0 + v.reset(OpMIPSXORconst) + v.AuxInt = int32ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueMIPS_OpOffPtr(v *Value) bool { + v_0 := v.Args[0] + // match: (OffPtr [off] ptr:(SP)) + // result: (MOVWaddr [int32(off)] ptr) + for { + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + if ptr.Op != OpSP { + break + } + v.reset(OpMIPSMOVWaddr) + v.AuxInt = int32ToAuxInt(int32(off)) + v.AddArg(ptr) + return true + } + // match: (OffPtr [off] ptr) + // result: (ADDconst [int32(off)] ptr) + for { + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + v.reset(OpMIPSADDconst) + v.AuxInt = int32ToAuxInt(int32(off)) + v.AddArg(ptr) + return true + } +} +func rewriteValueMIPS_OpPanicBounds(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 0 + // result: (LoweredPanicBoundsA [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 0) { + break + } + v.reset(OpMIPSLoweredPanicBoundsA) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 1 + // result: (LoweredPanicBoundsB [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 1) { + break + } + v.reset(OpMIPSLoweredPanicBoundsB) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 2 + // result: (LoweredPanicBoundsC [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 2) { + break + } + v.reset(OpMIPSLoweredPanicBoundsC) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + return false +} +func rewriteValueMIPS_OpPanicExtend(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PanicExtend [kind] hi lo y mem) + // cond: boundsABI(kind) == 0 + // result: (LoweredPanicExtendA [kind] hi lo y mem) + for { + kind := auxIntToInt64(v.AuxInt) + hi := v_0 + lo := v_1 + y := v_2 + mem := v_3 + if !(boundsABI(kind) == 0) { + break + } + v.reset(OpMIPSLoweredPanicExtendA) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg4(hi, lo, y, mem) + return true + } + // match: (PanicExtend [kind] hi lo y mem) + // cond: boundsABI(kind) == 1 + // result: (LoweredPanicExtendB [kind] hi lo y mem) + for { + kind := auxIntToInt64(v.AuxInt) + hi := v_0 + lo := v_1 + y := v_2 + mem := v_3 + if !(boundsABI(kind) == 1) { + break + } + v.reset(OpMIPSLoweredPanicExtendB) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg4(hi, lo, y, mem) + return true + } + // match: (PanicExtend [kind] hi lo y mem) + // cond: boundsABI(kind) == 2 + // result: (LoweredPanicExtendC [kind] hi lo y mem) + for { + kind := auxIntToInt64(v.AuxInt) + hi := v_0 + lo := v_1 + y := v_2 + mem := v_3 + if !(boundsABI(kind) == 2) { + break + } + v.reset(OpMIPSLoweredPanicExtendC) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg4(hi, lo, y, mem) + return true + } + return false +} +func rewriteValueMIPS_OpRotateLeft16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft16 x (MOVWconst [c])) + // result: (Or16 (Lsh16x32 x (MOVWconst [c&15])) (Rsh16Ux32 x (MOVWconst [-c&15]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpMIPSMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpOr16) + v0 := b.NewValue0(v.Pos, OpLsh16x32, t) + v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v1.AuxInt = int32ToAuxInt(c & 15) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpRsh16Ux32, t) + v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v3.AuxInt = int32ToAuxInt(-c & 15) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) + return true + } + return false +} +func rewriteValueMIPS_OpRotateLeft32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft32 x (MOVWconst [c])) + // result: (Or32 (Lsh32x32 x (MOVWconst [c&31])) (Rsh32Ux32 x (MOVWconst [-c&31]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpMIPSMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpOr32) + v0 := b.NewValue0(v.Pos, OpLsh32x32, t) + v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v1.AuxInt = int32ToAuxInt(c & 31) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpRsh32Ux32, t) + v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v3.AuxInt = int32ToAuxInt(-c & 31) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) + return true + } + return false +} +func rewriteValueMIPS_OpRotateLeft64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft64 x (MOVWconst [c])) + // result: (Or64 (Lsh64x32 x (MOVWconst [c&63])) (Rsh64Ux32 x (MOVWconst [-c&63]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpMIPSMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpOr64) + v0 := b.NewValue0(v.Pos, OpLsh64x32, t) + v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v1.AuxInt = int32ToAuxInt(c & 63) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpRsh64Ux32, t) + v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v3.AuxInt = int32ToAuxInt(-c & 63) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) + return true + } + return false +} +func rewriteValueMIPS_OpRotateLeft8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft8 x (MOVWconst [c])) + // result: (Or8 (Lsh8x32 x (MOVWconst [c&7])) (Rsh8Ux32 x (MOVWconst [-c&7]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpMIPSMOVWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpOr8) + v0 := b.NewValue0(v.Pos, OpLsh8x32, t) + v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v1.AuxInt = int32ToAuxInt(c & 7) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpRsh8Ux32, t) + v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v3.AuxInt = int32ToAuxInt(-c & 7) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) + return true + } + return false +} +func rewriteValueMIPS_OpRsh16Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux16 x y) + // result: (CMOVZ (SRL (ZeroExt16to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPSCMOVZ) + v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v3.AuxInt = int32ToAuxInt(0) + v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) + v4.AuxInt = int32ToAuxInt(32) + v4.AddArg(v2) + v.AddArg3(v0, v3, v4) + return true + } +} +func rewriteValueMIPS_OpRsh16Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux32 x y) + // result: (CMOVZ (SRL (ZeroExt16to32 x) y) (MOVWconst [0]) (SGTUconst [32] y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPSCMOVZ) + v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v2.AuxInt = int32ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) + v3.AuxInt = int32ToAuxInt(32) + v3.AddArg(y) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValueMIPS_OpRsh16Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux64 x (Const64 [c])) + // cond: uint32(c) < 16 + // result: (SRLconst (SLLconst x [16]) [int32(c+16)]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint32(c) < 16) { + break + } + v.reset(OpMIPSSRLconst) + v.AuxInt = int32ToAuxInt(int32(c + 16)) + v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(16) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Rsh16Ux64 _ (Const64 [c])) + // cond: uint32(c) >= 16 + // result: (MOVWconst [0]) + for { + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint32(c) >= 16) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueMIPS_OpRsh16Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux8 x y) + // result: (CMOVZ (SRL (ZeroExt16to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPSCMOVZ) + v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v3.AuxInt = int32ToAuxInt(0) + v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) + v4.AuxInt = int32ToAuxInt(32) + v4.AddArg(v2) + v.AddArg3(v0, v3, v4) + return true + } +} +func rewriteValueMIPS_OpRsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x16 x y) + // result: (SRA (SignExt16to32 x) ( CMOVZ (ZeroExt16to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt16to32 y)))) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSSRA) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v3.AuxInt = int32ToAuxInt(31) + v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) + v4.AuxInt = int32ToAuxInt(32) + v4.AddArg(v2) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS_OpRsh16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x32 x y) + // result: (SRA (SignExt16to32 x) ( CMOVZ y (MOVWconst [31]) (SGTUconst [32] y))) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSSRA) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v2.AuxInt = int32ToAuxInt(31) + v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) + v3.AuxInt = int32ToAuxInt(32) + v3.AddArg(y) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS_OpRsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x64 x (Const64 [c])) + // cond: uint32(c) < 16 + // result: (SRAconst (SLLconst x [16]) [int32(c+16)]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint32(c) < 16) { + break + } + v.reset(OpMIPSSRAconst) + v.AuxInt = int32ToAuxInt(int32(c + 16)) + v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(16) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Rsh16x64 x (Const64 [c])) + // cond: uint32(c) >= 16 + // result: (SRAconst (SLLconst x [16]) [31]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint32(c) >= 16) { + break + } + v.reset(OpMIPSSRAconst) + v.AuxInt = int32ToAuxInt(31) + v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(16) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueMIPS_OpRsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x8 x y) + // result: (SRA (SignExt16to32 x) ( CMOVZ (ZeroExt8to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt8to32 y)))) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSSRA) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v3.AuxInt = int32ToAuxInt(31) + v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) + v4.AuxInt = int32ToAuxInt(32) + v4.AddArg(v2) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS_OpRsh32Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux16 x y) + // result: (CMOVZ (SRL x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPSCMOVZ) + v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v2.AuxInt = int32ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) + v3.AuxInt = int32ToAuxInt(32) + v3.AddArg(v1) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValueMIPS_OpRsh32Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux32 x y) + // result: (CMOVZ (SRL x y) (MOVWconst [0]) (SGTUconst [32] y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPSCMOVZ) + v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v1.AuxInt = int32ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueMIPS_OpRsh32Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Rsh32Ux64 x (Const64 [c])) + // cond: uint32(c) < 32 + // result: (SRLconst x [int32(c)]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint32(c) < 32) { + break + } + v.reset(OpMIPSSRLconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + // match: (Rsh32Ux64 _ (Const64 [c])) + // cond: uint32(c) >= 32 + // result: (MOVWconst [0]) + for { + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint32(c) >= 32) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueMIPS_OpRsh32Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux8 x y) + // result: (CMOVZ (SRL x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPSCMOVZ) + v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v2.AuxInt = int32ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) + v3.AuxInt = int32ToAuxInt(32) + v3.AddArg(v1) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValueMIPS_OpRsh32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x16 x y) + // result: (SRA x ( CMOVZ (ZeroExt16to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt16to32 y)))) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSSRA) + v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) + v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v2.AuxInt = int32ToAuxInt(31) + v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) + v3.AuxInt = int32ToAuxInt(32) + v3.AddArg(v1) + v0.AddArg3(v1, v2, v3) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueMIPS_OpRsh32x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x32 x y) + // result: (SRA x ( CMOVZ y (MOVWconst [31]) (SGTUconst [32] y))) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSSRA) + v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v1.AuxInt = int32ToAuxInt(31) + v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueMIPS_OpRsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Rsh32x64 x (Const64 [c])) + // cond: uint32(c) < 32 + // result: (SRAconst x [int32(c)]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint32(c) < 32) { + break + } + v.reset(OpMIPSSRAconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + // match: (Rsh32x64 x (Const64 [c])) + // cond: uint32(c) >= 32 + // result: (SRAconst x [31]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint32(c) >= 32) { + break + } + v.reset(OpMIPSSRAconst) + v.AuxInt = int32ToAuxInt(31) + v.AddArg(x) + return true + } + return false +} +func rewriteValueMIPS_OpRsh32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x8 x y) + // result: (SRA x ( CMOVZ (ZeroExt8to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt8to32 y)))) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSSRA) + v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(y) + v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v2.AuxInt = int32ToAuxInt(31) + v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) + v3.AuxInt = int32ToAuxInt(32) + v3.AddArg(v1) + v0.AddArg3(v1, v2, v3) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueMIPS_OpRsh8Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux16 x y) + // result: (CMOVZ (SRL (ZeroExt8to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPSCMOVZ) + v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v3.AuxInt = int32ToAuxInt(0) + v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) + v4.AuxInt = int32ToAuxInt(32) + v4.AddArg(v2) + v.AddArg3(v0, v3, v4) + return true + } +} +func rewriteValueMIPS_OpRsh8Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux32 x y) + // result: (CMOVZ (SRL (ZeroExt8to32 x) y) (MOVWconst [0]) (SGTUconst [32] y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPSCMOVZ) + v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v2.AuxInt = int32ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) + v3.AuxInt = int32ToAuxInt(32) + v3.AddArg(y) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValueMIPS_OpRsh8Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux64 x (Const64 [c])) + // cond: uint32(c) < 8 + // result: (SRLconst (SLLconst x [24]) [int32(c+24)]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint32(c) < 8) { + break + } + v.reset(OpMIPSSRLconst) + v.AuxInt = int32ToAuxInt(int32(c + 24)) + v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(24) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Rsh8Ux64 _ (Const64 [c])) + // cond: uint32(c) >= 8 + // result: (MOVWconst [0]) + for { + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint32(c) >= 8) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValueMIPS_OpRsh8Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux8 x y) + // result: (CMOVZ (SRL (ZeroExt8to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPSCMOVZ) + v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v3.AuxInt = int32ToAuxInt(0) + v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) + v4.AuxInt = int32ToAuxInt(32) + v4.AddArg(v2) + v.AddArg3(v0, v3, v4) + return true + } +} +func rewriteValueMIPS_OpRsh8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x16 x y) + // result: (SRA (SignExt16to32 x) ( CMOVZ (ZeroExt16to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt16to32 y)))) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSSRA) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v3.AuxInt = int32ToAuxInt(31) + v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) + v4.AuxInt = int32ToAuxInt(32) + v4.AddArg(v2) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS_OpRsh8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x32 x y) + // result: (SRA (SignExt16to32 x) ( CMOVZ y (MOVWconst [31]) (SGTUconst [32] y))) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSSRA) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v2.AuxInt = int32ToAuxInt(31) + v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) + v3.AuxInt = int32ToAuxInt(32) + v3.AddArg(y) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS_OpRsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x64 x (Const64 [c])) + // cond: uint32(c) < 8 + // result: (SRAconst (SLLconst x [24]) [int32(c+24)]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint32(c) < 8) { + break + } + v.reset(OpMIPSSRAconst) + v.AuxInt = int32ToAuxInt(int32(c + 24)) + v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(24) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Rsh8x64 x (Const64 [c])) + // cond: uint32(c) >= 8 + // result: (SRAconst (SLLconst x [24]) [31]) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint32(c) >= 8) { + break + } + v.reset(OpMIPSSRAconst) + v.AuxInt = int32ToAuxInt(31) + v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(24) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueMIPS_OpRsh8x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x8 x y) + // result: (SRA (SignExt16to32 x) ( CMOVZ (ZeroExt8to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt8to32 y)))) + for { + x := v_0 + y := v_1 + v.reset(OpMIPSSRA) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v3.AuxInt = int32ToAuxInt(31) + v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) + v4.AuxInt = int32ToAuxInt(32) + v4.AddArg(v2) + v1.AddArg3(v2, v3, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS_OpSelect0(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Select0 (Add32carry x y)) + // result: (ADD x y) + for { + if v_0.Op != OpAdd32carry { + break + } + t := v_0.Type + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpMIPSADD) + v.Type = t.FieldType(0) + v.AddArg2(x, y) + return true + } + // match: (Select0 (Sub32carry x y)) + // result: (SUB x y) + for { + if v_0.Op != OpSub32carry { + break + } + t := v_0.Type + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpMIPSSUB) + v.Type = t.FieldType(0) + v.AddArg2(x, y) + return true + } + // match: (Select0 (MULTU (MOVWconst [0]) _ )) + // result: (MOVWconst [0]) + for { + if v_0.Op != OpMIPSMULTU { + break + } + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != 0 { + continue + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + break + } + // match: (Select0 (MULTU (MOVWconst [1]) _ )) + // result: (MOVWconst [0]) + for { + if v_0.Op != OpMIPSMULTU { + break + } + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != 1 { + continue + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + break + } + // match: (Select0 (MULTU (MOVWconst [-1]) x )) + // result: (CMOVZ (ADDconst [-1] x) (MOVWconst [0]) x) + for { + if v_0.Op != OpMIPSMULTU { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != -1 { + continue + } + x := v_0_1 + v.reset(OpMIPSCMOVZ) + v0 := b.NewValue0(v.Pos, OpMIPSADDconst, x.Type) + v0.AuxInt = int32ToAuxInt(-1) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v1.AuxInt = int32ToAuxInt(0) + v.AddArg3(v0, v1, x) + return true + } + break + } + // match: (Select0 (MULTU (MOVWconst [c]) x )) + // cond: isPowerOfTwo64(int64(uint32(c))) + // result: (SRLconst [int32(32-log2uint32(int64(c)))] x) + for { + if v_0.Op != OpMIPSMULTU { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpMIPSMOVWconst { + continue + } + c := auxIntToInt32(v_0_0.AuxInt) + x := v_0_1 + if !(isPowerOfTwo64(int64(uint32(c)))) { + continue + } + v.reset(OpMIPSSRLconst) + v.AuxInt = int32ToAuxInt(int32(32 - log2uint32(int64(c)))) + v.AddArg(x) + return true + } + break + } + // match: (Select0 (MULTU (MOVWconst [c]) (MOVWconst [d]))) + // result: (MOVWconst [int32((int64(uint32(c))*int64(uint32(d)))>>32)]) + for { + if v_0.Op != OpMIPSMULTU { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpMIPSMOVWconst { + continue + } + c := auxIntToInt32(v_0_0.AuxInt) + if v_0_1.Op != OpMIPSMOVWconst { + continue + } + d := auxIntToInt32(v_0_1.AuxInt) + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(int32((int64(uint32(c)) * int64(uint32(d))) >> 32)) + return true + } + break + } + // match: (Select0 (DIV (MOVWconst [c]) (MOVWconst [d]))) + // cond: d != 0 + // result: (MOVWconst [c%d]) + for { + if v_0.Op != OpMIPSDIV { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpMIPSMOVWconst { + break + } + c := auxIntToInt32(v_0_0.AuxInt) + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpMIPSMOVWconst { + break + } + d := auxIntToInt32(v_0_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(c % d) + return true + } + // match: (Select0 (DIVU (MOVWconst [c]) (MOVWconst [d]))) + // cond: d != 0 + // result: (MOVWconst [int32(uint32(c)%uint32(d))]) + for { + if v_0.Op != OpMIPSDIVU { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpMIPSMOVWconst { + break + } + c := auxIntToInt32(v_0_0.AuxInt) + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpMIPSMOVWconst { + break + } + d := auxIntToInt32(v_0_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(int32(uint32(c) % uint32(d))) + return true + } + return false +} +func rewriteValueMIPS_OpSelect1(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Select1 (Add32carry x y)) + // result: (SGTU x (ADD x y)) + for { + if v_0.Op != OpAdd32carry { + break + } + t := v_0.Type + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpMIPSSGTU) + v.Type = typ.Bool + v0 := b.NewValue0(v.Pos, OpMIPSADD, t.FieldType(0)) + v0.AddArg2(x, y) + v.AddArg2(x, v0) + return true + } + // match: (Select1 (Sub32carry x y)) + // result: (SGTU (SUB x y) x) + for { + if v_0.Op != OpSub32carry { + break + } + t := v_0.Type + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpMIPSSGTU) + v.Type = typ.Bool + v0 := b.NewValue0(v.Pos, OpMIPSSUB, t.FieldType(0)) + v0.AddArg2(x, y) + v.AddArg2(v0, x) + return true + } + // match: (Select1 (MULTU (MOVWconst [0]) _ )) + // result: (MOVWconst [0]) + for { + if v_0.Op != OpMIPSMULTU { + break + } + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != 0 { + continue + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(0) + return true + } + break + } + // match: (Select1 (MULTU (MOVWconst [1]) x )) + // result: x + for { + if v_0.Op != OpMIPSMULTU { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != 1 { + continue + } + x := v_0_1 + v.copyOf(x) + return true + } + break + } + // match: (Select1 (MULTU (MOVWconst [-1]) x )) + // result: (NEG x) + for { + if v_0.Op != OpMIPSMULTU { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != -1 { + continue + } + x := v_0_1 + v.reset(OpMIPSNEG) + v.Type = x.Type + v.AddArg(x) + return true + } + break + } + // match: (Select1 (MULTU (MOVWconst [c]) x )) + // cond: isPowerOfTwo64(int64(uint32(c))) + // result: (SLLconst [int32(log2uint32(int64(c)))] x) + for { + if v_0.Op != OpMIPSMULTU { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpMIPSMOVWconst { + continue + } + c := auxIntToInt32(v_0_0.AuxInt) + x := v_0_1 + if !(isPowerOfTwo64(int64(uint32(c)))) { + continue + } + v.reset(OpMIPSSLLconst) + v.AuxInt = int32ToAuxInt(int32(log2uint32(int64(c)))) + v.AddArg(x) + return true + } + break + } + // match: (Select1 (MULTU (MOVWconst [c]) (MOVWconst [d]))) + // result: (MOVWconst [int32(uint32(c)*uint32(d))]) + for { + if v_0.Op != OpMIPSMULTU { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpMIPSMOVWconst { + continue + } + c := auxIntToInt32(v_0_0.AuxInt) + if v_0_1.Op != OpMIPSMOVWconst { + continue + } + d := auxIntToInt32(v_0_1.AuxInt) + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(int32(uint32(c) * uint32(d))) + return true + } + break + } + // match: (Select1 (DIV (MOVWconst [c]) (MOVWconst [d]))) + // cond: d != 0 + // result: (MOVWconst [c/d]) + for { + if v_0.Op != OpMIPSDIV { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpMIPSMOVWconst { + break + } + c := auxIntToInt32(v_0_0.AuxInt) + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpMIPSMOVWconst { + break + } + d := auxIntToInt32(v_0_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(c / d) + return true + } + // match: (Select1 (DIVU (MOVWconst [c]) (MOVWconst [d]))) + // cond: d != 0 + // result: (MOVWconst [int32(uint32(c)/uint32(d))]) + for { + if v_0.Op != OpMIPSDIVU { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpMIPSMOVWconst { + break + } + c := auxIntToInt32(v_0_0.AuxInt) + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpMIPSMOVWconst { + break + } + d := auxIntToInt32(v_0_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(int32(uint32(c) / uint32(d))) + return true + } + return false +} +func rewriteValueMIPS_OpSignmask(v *Value) bool { + v_0 := v.Args[0] + // match: (Signmask x) + // result: (SRAconst x [31]) + for { + x := v_0 + v.reset(OpMIPSSRAconst) + v.AuxInt = int32ToAuxInt(31) + v.AddArg(x) + return true + } +} +func rewriteValueMIPS_OpSlicemask(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Slicemask x) + // result: (SRAconst (NEG x) [31]) + for { + t := v.Type + x := v_0 + v.reset(OpMIPSSRAconst) + v.AuxInt = int32ToAuxInt(31) + v0 := b.NewValue0(v.Pos, OpMIPSNEG, t) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS_OpStore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Store {t} ptr val mem) + // cond: t.Size() == 1 + // result: (MOVBstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 1) { + break + } + v.reset(OpMIPSMOVBstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 2 + // result: (MOVHstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 2) { + break + } + v.reset(OpMIPSMOVHstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 && !t.IsFloat() + // result: (MOVWstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4 && !t.IsFloat()) { + break + } + v.reset(OpMIPSMOVWstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 && t.IsFloat() + // result: (MOVFstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4 && t.IsFloat()) { + break + } + v.reset(OpMIPSMOVFstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 8 && t.IsFloat() + // result: (MOVDstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 8 && t.IsFloat()) { + break + } + v.reset(OpMIPSMOVDstore) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueMIPS_OpSub32withcarry(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Sub32withcarry x y c) + // result: (SUB (SUB x y) c) + for { + t := v.Type + x := v_0 + y := v_1 + c := v_2 + v.reset(OpMIPSSUB) + v0 := b.NewValue0(v.Pos, OpMIPSSUB, t) + v0.AddArg2(x, y) + v.AddArg2(v0, c) + return true + } +} +func rewriteValueMIPS_OpZero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Zero [0] _ mem) + // result: mem + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_1 + v.copyOf(mem) + return true + } + // match: (Zero [1] ptr mem) + // result: (MOVBstore ptr (MOVWconst [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpMIPSMOVBstore) + v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (Zero [2] {t} ptr mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore ptr (MOVWconst [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpMIPSMOVHstore) + v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (Zero [2] ptr mem) + // result: (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpMIPSMOVBstore) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(0) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [4] {t} ptr mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore ptr (MOVWconst [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpMIPSMOVWstore) + v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (Zero [4] {t} ptr mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpMIPSMOVHstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(0) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [4] ptr mem) + // result: (MOVBstore [3] ptr (MOVWconst [0]) (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem)))) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpMIPSMOVBstore) + v.AuxInt = int32ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(2) + v2 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(1) + v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) + v3.AuxInt = int32ToAuxInt(0) + v3.AddArg3(ptr, v0, mem) + v2.AddArg3(ptr, v0, v3) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [3] ptr mem) + // result: (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem))) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpMIPSMOVBstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(0) + v2.AddArg3(ptr, v0, mem) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [6] {t} ptr mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [4] ptr (MOVWconst [0]) (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem))) + for { + if auxIntToInt64(v.AuxInt) != 6 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpMIPSMOVHstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(2) + v2 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(0) + v2.AddArg3(ptr, v0, mem) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [8] {t} ptr mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpMIPSMOVWstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(0) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [12] {t} ptr mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore [8] ptr (MOVWconst [0]) (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem))) + for { + if auxIntToInt64(v.AuxInt) != 12 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpMIPSMOVWstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(4) + v2 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(0) + v2.AddArg3(ptr, v0, mem) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [16] {t} ptr mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore [12] ptr (MOVWconst [0]) (MOVWstore [8] ptr (MOVWconst [0]) (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem)))) + for { + if auxIntToInt64(v.AuxInt) != 16 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpMIPSMOVWstore) + v.AuxInt = int32ToAuxInt(12) + v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(8) + v2 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(4) + v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) + v3.AuxInt = int32ToAuxInt(0) + v3.AddArg3(ptr, v0, mem) + v2.AddArg3(ptr, v0, v3) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [s] {t} ptr mem) + // cond: (s > 16 || t.Alignment()%4 != 0) + // result: (LoweredZero [int32(t.Alignment())] ptr (ADDconst ptr [int32(s-moveSize(t.Alignment(), config))]) mem) + for { + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(s > 16 || t.Alignment()%4 != 0) { + break + } + v.reset(OpMIPSLoweredZero) + v.AuxInt = int32ToAuxInt(int32(t.Alignment())) + v0 := b.NewValue0(v.Pos, OpMIPSADDconst, ptr.Type) + v0.AuxInt = int32ToAuxInt(int32(s - moveSize(t.Alignment(), config))) + v0.AddArg(ptr) + v.AddArg3(ptr, v0, mem) + return true + } + return false +} +func rewriteValueMIPS_OpZeromask(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Zeromask x) + // result: (NEG (SGTU x (MOVWconst [0]))) + for { + x := v_0 + v.reset(OpMIPSNEG) + v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) + v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) + v1.AuxInt = int32ToAuxInt(0) + v0.AddArg2(x, v1) + v.AddArg(v0) + return true + } +} +func rewriteBlockMIPS(b *Block) bool { + switch b.Kind { + case BlockMIPSEQ: + // match: (EQ (FPFlagTrue cmp) yes no) + // result: (FPF cmp yes no) + for b.Controls[0].Op == OpMIPSFPFlagTrue { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockMIPSFPF, cmp) + return true + } + // match: (EQ (FPFlagFalse cmp) yes no) + // result: (FPT cmp yes no) + for b.Controls[0].Op == OpMIPSFPFlagFalse { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockMIPSFPT, cmp) + return true + } + // match: (EQ (XORconst [1] cmp:(SGT _ _)) yes no) + // result: (NE cmp yes no) + for b.Controls[0].Op == OpMIPSXORconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpMIPSSGT { + break + } + b.resetWithControl(BlockMIPSNE, cmp) + return true + } + // match: (EQ (XORconst [1] cmp:(SGTU _ _)) yes no) + // result: (NE cmp yes no) + for b.Controls[0].Op == OpMIPSXORconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpMIPSSGTU { + break + } + b.resetWithControl(BlockMIPSNE, cmp) + return true + } + // match: (EQ (XORconst [1] cmp:(SGTconst _)) yes no) + // result: (NE cmp yes no) + for b.Controls[0].Op == OpMIPSXORconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpMIPSSGTconst { + break + } + b.resetWithControl(BlockMIPSNE, cmp) + return true + } + // match: (EQ (XORconst [1] cmp:(SGTUconst _)) yes no) + // result: (NE cmp yes no) + for b.Controls[0].Op == OpMIPSXORconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpMIPSSGTUconst { + break + } + b.resetWithControl(BlockMIPSNE, cmp) + return true + } + // match: (EQ (XORconst [1] cmp:(SGTzero _)) yes no) + // result: (NE cmp yes no) + for b.Controls[0].Op == OpMIPSXORconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpMIPSSGTzero { + break + } + b.resetWithControl(BlockMIPSNE, cmp) + return true + } + // match: (EQ (XORconst [1] cmp:(SGTUzero _)) yes no) + // result: (NE cmp yes no) + for b.Controls[0].Op == OpMIPSXORconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpMIPSSGTUzero { + break + } + b.resetWithControl(BlockMIPSNE, cmp) + return true + } + // match: (EQ (SGTUconst [1] x) yes no) + // result: (NE x yes no) + for b.Controls[0].Op == OpMIPSSGTUconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 1 { + break + } + x := v_0.Args[0] + b.resetWithControl(BlockMIPSNE, x) + return true + } + // match: (EQ (SGTUzero x) yes no) + // result: (EQ x yes no) + for b.Controls[0].Op == OpMIPSSGTUzero { + v_0 := b.Controls[0] + x := v_0.Args[0] + b.resetWithControl(BlockMIPSEQ, x) + return true + } + // match: (EQ (SGTconst [0] x) yes no) + // result: (GEZ x yes no) + for b.Controls[0].Op == OpMIPSSGTconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + b.resetWithControl(BlockMIPSGEZ, x) + return true + } + // match: (EQ (SGTzero x) yes no) + // result: (LEZ x yes no) + for b.Controls[0].Op == OpMIPSSGTzero { + v_0 := b.Controls[0] + x := v_0.Args[0] + b.resetWithControl(BlockMIPSLEZ, x) + return true + } + // match: (EQ (MOVWconst [0]) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpMIPSMOVWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + b.Reset(BlockFirst) + return true + } + // match: (EQ (MOVWconst [c]) yes no) + // cond: c != 0 + // result: (First no yes) + for b.Controls[0].Op == OpMIPSMOVWconst { + v_0 := b.Controls[0] + c := auxIntToInt32(v_0.AuxInt) + if !(c != 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockMIPSGEZ: + // match: (GEZ (MOVWconst [c]) yes no) + // cond: c >= 0 + // result: (First yes no) + for b.Controls[0].Op == OpMIPSMOVWconst { + v_0 := b.Controls[0] + c := auxIntToInt32(v_0.AuxInt) + if !(c >= 0) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (GEZ (MOVWconst [c]) yes no) + // cond: c < 0 + // result: (First no yes) + for b.Controls[0].Op == OpMIPSMOVWconst { + v_0 := b.Controls[0] + c := auxIntToInt32(v_0.AuxInt) + if !(c < 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockMIPSGTZ: + // match: (GTZ (MOVWconst [c]) yes no) + // cond: c > 0 + // result: (First yes no) + for b.Controls[0].Op == OpMIPSMOVWconst { + v_0 := b.Controls[0] + c := auxIntToInt32(v_0.AuxInt) + if !(c > 0) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (GTZ (MOVWconst [c]) yes no) + // cond: c <= 0 + // result: (First no yes) + for b.Controls[0].Op == OpMIPSMOVWconst { + v_0 := b.Controls[0] + c := auxIntToInt32(v_0.AuxInt) + if !(c <= 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockIf: + // match: (If cond yes no) + // result: (NE cond yes no) + for { + cond := b.Controls[0] + b.resetWithControl(BlockMIPSNE, cond) + return true + } + case BlockMIPSLEZ: + // match: (LEZ (MOVWconst [c]) yes no) + // cond: c <= 0 + // result: (First yes no) + for b.Controls[0].Op == OpMIPSMOVWconst { + v_0 := b.Controls[0] + c := auxIntToInt32(v_0.AuxInt) + if !(c <= 0) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (LEZ (MOVWconst [c]) yes no) + // cond: c > 0 + // result: (First no yes) + for b.Controls[0].Op == OpMIPSMOVWconst { + v_0 := b.Controls[0] + c := auxIntToInt32(v_0.AuxInt) + if !(c > 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockMIPSLTZ: + // match: (LTZ (MOVWconst [c]) yes no) + // cond: c < 0 + // result: (First yes no) + for b.Controls[0].Op == OpMIPSMOVWconst { + v_0 := b.Controls[0] + c := auxIntToInt32(v_0.AuxInt) + if !(c < 0) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (LTZ (MOVWconst [c]) yes no) + // cond: c >= 0 + // result: (First no yes) + for b.Controls[0].Op == OpMIPSMOVWconst { + v_0 := b.Controls[0] + c := auxIntToInt32(v_0.AuxInt) + if !(c >= 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockMIPSNE: + // match: (NE (FPFlagTrue cmp) yes no) + // result: (FPT cmp yes no) + for b.Controls[0].Op == OpMIPSFPFlagTrue { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockMIPSFPT, cmp) + return true + } + // match: (NE (FPFlagFalse cmp) yes no) + // result: (FPF cmp yes no) + for b.Controls[0].Op == OpMIPSFPFlagFalse { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockMIPSFPF, cmp) + return true + } + // match: (NE (XORconst [1] cmp:(SGT _ _)) yes no) + // result: (EQ cmp yes no) + for b.Controls[0].Op == OpMIPSXORconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpMIPSSGT { + break + } + b.resetWithControl(BlockMIPSEQ, cmp) + return true + } + // match: (NE (XORconst [1] cmp:(SGTU _ _)) yes no) + // result: (EQ cmp yes no) + for b.Controls[0].Op == OpMIPSXORconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpMIPSSGTU { + break + } + b.resetWithControl(BlockMIPSEQ, cmp) + return true + } + // match: (NE (XORconst [1] cmp:(SGTconst _)) yes no) + // result: (EQ cmp yes no) + for b.Controls[0].Op == OpMIPSXORconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpMIPSSGTconst { + break + } + b.resetWithControl(BlockMIPSEQ, cmp) + return true + } + // match: (NE (XORconst [1] cmp:(SGTUconst _)) yes no) + // result: (EQ cmp yes no) + for b.Controls[0].Op == OpMIPSXORconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpMIPSSGTUconst { + break + } + b.resetWithControl(BlockMIPSEQ, cmp) + return true + } + // match: (NE (XORconst [1] cmp:(SGTzero _)) yes no) + // result: (EQ cmp yes no) + for b.Controls[0].Op == OpMIPSXORconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpMIPSSGTzero { + break + } + b.resetWithControl(BlockMIPSEQ, cmp) + return true + } + // match: (NE (XORconst [1] cmp:(SGTUzero _)) yes no) + // result: (EQ cmp yes no) + for b.Controls[0].Op == OpMIPSXORconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpMIPSSGTUzero { + break + } + b.resetWithControl(BlockMIPSEQ, cmp) + return true + } + // match: (NE (SGTUconst [1] x) yes no) + // result: (EQ x yes no) + for b.Controls[0].Op == OpMIPSSGTUconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 1 { + break + } + x := v_0.Args[0] + b.resetWithControl(BlockMIPSEQ, x) + return true + } + // match: (NE (SGTUzero x) yes no) + // result: (NE x yes no) + for b.Controls[0].Op == OpMIPSSGTUzero { + v_0 := b.Controls[0] + x := v_0.Args[0] + b.resetWithControl(BlockMIPSNE, x) + return true + } + // match: (NE (SGTconst [0] x) yes no) + // result: (LTZ x yes no) + for b.Controls[0].Op == OpMIPSSGTconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + b.resetWithControl(BlockMIPSLTZ, x) + return true + } + // match: (NE (SGTzero x) yes no) + // result: (GTZ x yes no) + for b.Controls[0].Op == OpMIPSSGTzero { + v_0 := b.Controls[0] + x := v_0.Args[0] + b.resetWithControl(BlockMIPSGTZ, x) + return true + } + // match: (NE (MOVWconst [0]) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpMIPSMOVWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (NE (MOVWconst [c]) yes no) + // cond: c != 0 + // result: (First yes no) + for b.Controls[0].Op == OpMIPSMOVWconst { + v_0 := b.Controls[0] + c := auxIntToInt32(v_0.AuxInt) + if !(c != 0) { + break + } + b.Reset(BlockFirst) + return true + } + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteMIPS64.go new file mode 100644 index 0000000000000000000000000000000000000000..764465d0b726044c422919baefaf529c9339f7fb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteMIPS64.go @@ -0,0 +1,8604 @@ +// Code generated from _gen/MIPS64.rules using 'go generate'; DO NOT EDIT. + +package ssa + +import "cmd/compile/internal/types" + +func rewriteValueMIPS64(v *Value) bool { + switch v.Op { + case OpAbs: + v.Op = OpMIPS64ABSD + return true + case OpAdd16: + v.Op = OpMIPS64ADDV + return true + case OpAdd32: + v.Op = OpMIPS64ADDV + return true + case OpAdd32F: + v.Op = OpMIPS64ADDF + return true + case OpAdd64: + v.Op = OpMIPS64ADDV + return true + case OpAdd64F: + v.Op = OpMIPS64ADDD + return true + case OpAdd8: + v.Op = OpMIPS64ADDV + return true + case OpAddPtr: + v.Op = OpMIPS64ADDV + return true + case OpAddr: + return rewriteValueMIPS64_OpAddr(v) + case OpAnd16: + v.Op = OpMIPS64AND + return true + case OpAnd32: + v.Op = OpMIPS64AND + return true + case OpAnd64: + v.Op = OpMIPS64AND + return true + case OpAnd8: + v.Op = OpMIPS64AND + return true + case OpAndB: + v.Op = OpMIPS64AND + return true + case OpAtomicAdd32: + v.Op = OpMIPS64LoweredAtomicAdd32 + return true + case OpAtomicAdd64: + v.Op = OpMIPS64LoweredAtomicAdd64 + return true + case OpAtomicAnd32: + v.Op = OpMIPS64LoweredAtomicAnd32 + return true + case OpAtomicAnd8: + return rewriteValueMIPS64_OpAtomicAnd8(v) + case OpAtomicCompareAndSwap32: + return rewriteValueMIPS64_OpAtomicCompareAndSwap32(v) + case OpAtomicCompareAndSwap64: + v.Op = OpMIPS64LoweredAtomicCas64 + return true + case OpAtomicExchange32: + v.Op = OpMIPS64LoweredAtomicExchange32 + return true + case OpAtomicExchange64: + v.Op = OpMIPS64LoweredAtomicExchange64 + return true + case OpAtomicLoad32: + v.Op = OpMIPS64LoweredAtomicLoad32 + return true + case OpAtomicLoad64: + v.Op = OpMIPS64LoweredAtomicLoad64 + return true + case OpAtomicLoad8: + v.Op = OpMIPS64LoweredAtomicLoad8 + return true + case OpAtomicLoadPtr: + v.Op = OpMIPS64LoweredAtomicLoad64 + return true + case OpAtomicOr32: + v.Op = OpMIPS64LoweredAtomicOr32 + return true + case OpAtomicOr8: + return rewriteValueMIPS64_OpAtomicOr8(v) + case OpAtomicStore32: + v.Op = OpMIPS64LoweredAtomicStore32 + return true + case OpAtomicStore64: + v.Op = OpMIPS64LoweredAtomicStore64 + return true + case OpAtomicStore8: + v.Op = OpMIPS64LoweredAtomicStore8 + return true + case OpAtomicStorePtrNoWB: + v.Op = OpMIPS64LoweredAtomicStore64 + return true + case OpAvg64u: + return rewriteValueMIPS64_OpAvg64u(v) + case OpClosureCall: + v.Op = OpMIPS64CALLclosure + return true + case OpCom16: + return rewriteValueMIPS64_OpCom16(v) + case OpCom32: + return rewriteValueMIPS64_OpCom32(v) + case OpCom64: + return rewriteValueMIPS64_OpCom64(v) + case OpCom8: + return rewriteValueMIPS64_OpCom8(v) + case OpConst16: + return rewriteValueMIPS64_OpConst16(v) + case OpConst32: + return rewriteValueMIPS64_OpConst32(v) + case OpConst32F: + return rewriteValueMIPS64_OpConst32F(v) + case OpConst64: + return rewriteValueMIPS64_OpConst64(v) + case OpConst64F: + return rewriteValueMIPS64_OpConst64F(v) + case OpConst8: + return rewriteValueMIPS64_OpConst8(v) + case OpConstBool: + return rewriteValueMIPS64_OpConstBool(v) + case OpConstNil: + return rewriteValueMIPS64_OpConstNil(v) + case OpCvt32Fto32: + v.Op = OpMIPS64TRUNCFW + return true + case OpCvt32Fto64: + v.Op = OpMIPS64TRUNCFV + return true + case OpCvt32Fto64F: + v.Op = OpMIPS64MOVFD + return true + case OpCvt32to32F: + v.Op = OpMIPS64MOVWF + return true + case OpCvt32to64F: + v.Op = OpMIPS64MOVWD + return true + case OpCvt64Fto32: + v.Op = OpMIPS64TRUNCDW + return true + case OpCvt64Fto32F: + v.Op = OpMIPS64MOVDF + return true + case OpCvt64Fto64: + v.Op = OpMIPS64TRUNCDV + return true + case OpCvt64to32F: + v.Op = OpMIPS64MOVVF + return true + case OpCvt64to64F: + v.Op = OpMIPS64MOVVD + return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true + case OpDiv16: + return rewriteValueMIPS64_OpDiv16(v) + case OpDiv16u: + return rewriteValueMIPS64_OpDiv16u(v) + case OpDiv32: + return rewriteValueMIPS64_OpDiv32(v) + case OpDiv32F: + v.Op = OpMIPS64DIVF + return true + case OpDiv32u: + return rewriteValueMIPS64_OpDiv32u(v) + case OpDiv64: + return rewriteValueMIPS64_OpDiv64(v) + case OpDiv64F: + v.Op = OpMIPS64DIVD + return true + case OpDiv64u: + return rewriteValueMIPS64_OpDiv64u(v) + case OpDiv8: + return rewriteValueMIPS64_OpDiv8(v) + case OpDiv8u: + return rewriteValueMIPS64_OpDiv8u(v) + case OpEq16: + return rewriteValueMIPS64_OpEq16(v) + case OpEq32: + return rewriteValueMIPS64_OpEq32(v) + case OpEq32F: + return rewriteValueMIPS64_OpEq32F(v) + case OpEq64: + return rewriteValueMIPS64_OpEq64(v) + case OpEq64F: + return rewriteValueMIPS64_OpEq64F(v) + case OpEq8: + return rewriteValueMIPS64_OpEq8(v) + case OpEqB: + return rewriteValueMIPS64_OpEqB(v) + case OpEqPtr: + return rewriteValueMIPS64_OpEqPtr(v) + case OpGetCallerPC: + v.Op = OpMIPS64LoweredGetCallerPC + return true + case OpGetCallerSP: + v.Op = OpMIPS64LoweredGetCallerSP + return true + case OpGetClosurePtr: + v.Op = OpMIPS64LoweredGetClosurePtr + return true + case OpHmul32: + return rewriteValueMIPS64_OpHmul32(v) + case OpHmul32u: + return rewriteValueMIPS64_OpHmul32u(v) + case OpHmul64: + return rewriteValueMIPS64_OpHmul64(v) + case OpHmul64u: + return rewriteValueMIPS64_OpHmul64u(v) + case OpInterCall: + v.Op = OpMIPS64CALLinter + return true + case OpIsInBounds: + return rewriteValueMIPS64_OpIsInBounds(v) + case OpIsNonNil: + return rewriteValueMIPS64_OpIsNonNil(v) + case OpIsSliceInBounds: + return rewriteValueMIPS64_OpIsSliceInBounds(v) + case OpLeq16: + return rewriteValueMIPS64_OpLeq16(v) + case OpLeq16U: + return rewriteValueMIPS64_OpLeq16U(v) + case OpLeq32: + return rewriteValueMIPS64_OpLeq32(v) + case OpLeq32F: + return rewriteValueMIPS64_OpLeq32F(v) + case OpLeq32U: + return rewriteValueMIPS64_OpLeq32U(v) + case OpLeq64: + return rewriteValueMIPS64_OpLeq64(v) + case OpLeq64F: + return rewriteValueMIPS64_OpLeq64F(v) + case OpLeq64U: + return rewriteValueMIPS64_OpLeq64U(v) + case OpLeq8: + return rewriteValueMIPS64_OpLeq8(v) + case OpLeq8U: + return rewriteValueMIPS64_OpLeq8U(v) + case OpLess16: + return rewriteValueMIPS64_OpLess16(v) + case OpLess16U: + return rewriteValueMIPS64_OpLess16U(v) + case OpLess32: + return rewriteValueMIPS64_OpLess32(v) + case OpLess32F: + return rewriteValueMIPS64_OpLess32F(v) + case OpLess32U: + return rewriteValueMIPS64_OpLess32U(v) + case OpLess64: + return rewriteValueMIPS64_OpLess64(v) + case OpLess64F: + return rewriteValueMIPS64_OpLess64F(v) + case OpLess64U: + return rewriteValueMIPS64_OpLess64U(v) + case OpLess8: + return rewriteValueMIPS64_OpLess8(v) + case OpLess8U: + return rewriteValueMIPS64_OpLess8U(v) + case OpLoad: + return rewriteValueMIPS64_OpLoad(v) + case OpLocalAddr: + return rewriteValueMIPS64_OpLocalAddr(v) + case OpLsh16x16: + return rewriteValueMIPS64_OpLsh16x16(v) + case OpLsh16x32: + return rewriteValueMIPS64_OpLsh16x32(v) + case OpLsh16x64: + return rewriteValueMIPS64_OpLsh16x64(v) + case OpLsh16x8: + return rewriteValueMIPS64_OpLsh16x8(v) + case OpLsh32x16: + return rewriteValueMIPS64_OpLsh32x16(v) + case OpLsh32x32: + return rewriteValueMIPS64_OpLsh32x32(v) + case OpLsh32x64: + return rewriteValueMIPS64_OpLsh32x64(v) + case OpLsh32x8: + return rewriteValueMIPS64_OpLsh32x8(v) + case OpLsh64x16: + return rewriteValueMIPS64_OpLsh64x16(v) + case OpLsh64x32: + return rewriteValueMIPS64_OpLsh64x32(v) + case OpLsh64x64: + return rewriteValueMIPS64_OpLsh64x64(v) + case OpLsh64x8: + return rewriteValueMIPS64_OpLsh64x8(v) + case OpLsh8x16: + return rewriteValueMIPS64_OpLsh8x16(v) + case OpLsh8x32: + return rewriteValueMIPS64_OpLsh8x32(v) + case OpLsh8x64: + return rewriteValueMIPS64_OpLsh8x64(v) + case OpLsh8x8: + return rewriteValueMIPS64_OpLsh8x8(v) + case OpMIPS64ADDV: + return rewriteValueMIPS64_OpMIPS64ADDV(v) + case OpMIPS64ADDVconst: + return rewriteValueMIPS64_OpMIPS64ADDVconst(v) + case OpMIPS64AND: + return rewriteValueMIPS64_OpMIPS64AND(v) + case OpMIPS64ANDconst: + return rewriteValueMIPS64_OpMIPS64ANDconst(v) + case OpMIPS64LoweredAtomicAdd32: + return rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd32(v) + case OpMIPS64LoweredAtomicAdd64: + return rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd64(v) + case OpMIPS64LoweredAtomicStore32: + return rewriteValueMIPS64_OpMIPS64LoweredAtomicStore32(v) + case OpMIPS64LoweredAtomicStore64: + return rewriteValueMIPS64_OpMIPS64LoweredAtomicStore64(v) + case OpMIPS64MOVBUload: + return rewriteValueMIPS64_OpMIPS64MOVBUload(v) + case OpMIPS64MOVBUreg: + return rewriteValueMIPS64_OpMIPS64MOVBUreg(v) + case OpMIPS64MOVBload: + return rewriteValueMIPS64_OpMIPS64MOVBload(v) + case OpMIPS64MOVBreg: + return rewriteValueMIPS64_OpMIPS64MOVBreg(v) + case OpMIPS64MOVBstore: + return rewriteValueMIPS64_OpMIPS64MOVBstore(v) + case OpMIPS64MOVBstorezero: + return rewriteValueMIPS64_OpMIPS64MOVBstorezero(v) + case OpMIPS64MOVDload: + return rewriteValueMIPS64_OpMIPS64MOVDload(v) + case OpMIPS64MOVDstore: + return rewriteValueMIPS64_OpMIPS64MOVDstore(v) + case OpMIPS64MOVFload: + return rewriteValueMIPS64_OpMIPS64MOVFload(v) + case OpMIPS64MOVFstore: + return rewriteValueMIPS64_OpMIPS64MOVFstore(v) + case OpMIPS64MOVHUload: + return rewriteValueMIPS64_OpMIPS64MOVHUload(v) + case OpMIPS64MOVHUreg: + return rewriteValueMIPS64_OpMIPS64MOVHUreg(v) + case OpMIPS64MOVHload: + return rewriteValueMIPS64_OpMIPS64MOVHload(v) + case OpMIPS64MOVHreg: + return rewriteValueMIPS64_OpMIPS64MOVHreg(v) + case OpMIPS64MOVHstore: + return rewriteValueMIPS64_OpMIPS64MOVHstore(v) + case OpMIPS64MOVHstorezero: + return rewriteValueMIPS64_OpMIPS64MOVHstorezero(v) + case OpMIPS64MOVVload: + return rewriteValueMIPS64_OpMIPS64MOVVload(v) + case OpMIPS64MOVVnop: + return rewriteValueMIPS64_OpMIPS64MOVVnop(v) + case OpMIPS64MOVVreg: + return rewriteValueMIPS64_OpMIPS64MOVVreg(v) + case OpMIPS64MOVVstore: + return rewriteValueMIPS64_OpMIPS64MOVVstore(v) + case OpMIPS64MOVVstorezero: + return rewriteValueMIPS64_OpMIPS64MOVVstorezero(v) + case OpMIPS64MOVWUload: + return rewriteValueMIPS64_OpMIPS64MOVWUload(v) + case OpMIPS64MOVWUreg: + return rewriteValueMIPS64_OpMIPS64MOVWUreg(v) + case OpMIPS64MOVWload: + return rewriteValueMIPS64_OpMIPS64MOVWload(v) + case OpMIPS64MOVWreg: + return rewriteValueMIPS64_OpMIPS64MOVWreg(v) + case OpMIPS64MOVWstore: + return rewriteValueMIPS64_OpMIPS64MOVWstore(v) + case OpMIPS64MOVWstorezero: + return rewriteValueMIPS64_OpMIPS64MOVWstorezero(v) + case OpMIPS64NEGV: + return rewriteValueMIPS64_OpMIPS64NEGV(v) + case OpMIPS64NOR: + return rewriteValueMIPS64_OpMIPS64NOR(v) + case OpMIPS64NORconst: + return rewriteValueMIPS64_OpMIPS64NORconst(v) + case OpMIPS64OR: + return rewriteValueMIPS64_OpMIPS64OR(v) + case OpMIPS64ORconst: + return rewriteValueMIPS64_OpMIPS64ORconst(v) + case OpMIPS64SGT: + return rewriteValueMIPS64_OpMIPS64SGT(v) + case OpMIPS64SGTU: + return rewriteValueMIPS64_OpMIPS64SGTU(v) + case OpMIPS64SGTUconst: + return rewriteValueMIPS64_OpMIPS64SGTUconst(v) + case OpMIPS64SGTconst: + return rewriteValueMIPS64_OpMIPS64SGTconst(v) + case OpMIPS64SLLV: + return rewriteValueMIPS64_OpMIPS64SLLV(v) + case OpMIPS64SLLVconst: + return rewriteValueMIPS64_OpMIPS64SLLVconst(v) + case OpMIPS64SRAV: + return rewriteValueMIPS64_OpMIPS64SRAV(v) + case OpMIPS64SRAVconst: + return rewriteValueMIPS64_OpMIPS64SRAVconst(v) + case OpMIPS64SRLV: + return rewriteValueMIPS64_OpMIPS64SRLV(v) + case OpMIPS64SRLVconst: + return rewriteValueMIPS64_OpMIPS64SRLVconst(v) + case OpMIPS64SUBV: + return rewriteValueMIPS64_OpMIPS64SUBV(v) + case OpMIPS64SUBVconst: + return rewriteValueMIPS64_OpMIPS64SUBVconst(v) + case OpMIPS64XOR: + return rewriteValueMIPS64_OpMIPS64XOR(v) + case OpMIPS64XORconst: + return rewriteValueMIPS64_OpMIPS64XORconst(v) + case OpMod16: + return rewriteValueMIPS64_OpMod16(v) + case OpMod16u: + return rewriteValueMIPS64_OpMod16u(v) + case OpMod32: + return rewriteValueMIPS64_OpMod32(v) + case OpMod32u: + return rewriteValueMIPS64_OpMod32u(v) + case OpMod64: + return rewriteValueMIPS64_OpMod64(v) + case OpMod64u: + return rewriteValueMIPS64_OpMod64u(v) + case OpMod8: + return rewriteValueMIPS64_OpMod8(v) + case OpMod8u: + return rewriteValueMIPS64_OpMod8u(v) + case OpMove: + return rewriteValueMIPS64_OpMove(v) + case OpMul16: + return rewriteValueMIPS64_OpMul16(v) + case OpMul32: + return rewriteValueMIPS64_OpMul32(v) + case OpMul32F: + v.Op = OpMIPS64MULF + return true + case OpMul64: + return rewriteValueMIPS64_OpMul64(v) + case OpMul64F: + v.Op = OpMIPS64MULD + return true + case OpMul64uhilo: + v.Op = OpMIPS64MULVU + return true + case OpMul8: + return rewriteValueMIPS64_OpMul8(v) + case OpNeg16: + v.Op = OpMIPS64NEGV + return true + case OpNeg32: + v.Op = OpMIPS64NEGV + return true + case OpNeg32F: + v.Op = OpMIPS64NEGF + return true + case OpNeg64: + v.Op = OpMIPS64NEGV + return true + case OpNeg64F: + v.Op = OpMIPS64NEGD + return true + case OpNeg8: + v.Op = OpMIPS64NEGV + return true + case OpNeq16: + return rewriteValueMIPS64_OpNeq16(v) + case OpNeq32: + return rewriteValueMIPS64_OpNeq32(v) + case OpNeq32F: + return rewriteValueMIPS64_OpNeq32F(v) + case OpNeq64: + return rewriteValueMIPS64_OpNeq64(v) + case OpNeq64F: + return rewriteValueMIPS64_OpNeq64F(v) + case OpNeq8: + return rewriteValueMIPS64_OpNeq8(v) + case OpNeqB: + v.Op = OpMIPS64XOR + return true + case OpNeqPtr: + return rewriteValueMIPS64_OpNeqPtr(v) + case OpNilCheck: + v.Op = OpMIPS64LoweredNilCheck + return true + case OpNot: + return rewriteValueMIPS64_OpNot(v) + case OpOffPtr: + return rewriteValueMIPS64_OpOffPtr(v) + case OpOr16: + v.Op = OpMIPS64OR + return true + case OpOr32: + v.Op = OpMIPS64OR + return true + case OpOr64: + v.Op = OpMIPS64OR + return true + case OpOr8: + v.Op = OpMIPS64OR + return true + case OpOrB: + v.Op = OpMIPS64OR + return true + case OpPanicBounds: + return rewriteValueMIPS64_OpPanicBounds(v) + case OpRotateLeft16: + return rewriteValueMIPS64_OpRotateLeft16(v) + case OpRotateLeft32: + return rewriteValueMIPS64_OpRotateLeft32(v) + case OpRotateLeft64: + return rewriteValueMIPS64_OpRotateLeft64(v) + case OpRotateLeft8: + return rewriteValueMIPS64_OpRotateLeft8(v) + case OpRound32F: + v.Op = OpCopy + return true + case OpRound64F: + v.Op = OpCopy + return true + case OpRsh16Ux16: + return rewriteValueMIPS64_OpRsh16Ux16(v) + case OpRsh16Ux32: + return rewriteValueMIPS64_OpRsh16Ux32(v) + case OpRsh16Ux64: + return rewriteValueMIPS64_OpRsh16Ux64(v) + case OpRsh16Ux8: + return rewriteValueMIPS64_OpRsh16Ux8(v) + case OpRsh16x16: + return rewriteValueMIPS64_OpRsh16x16(v) + case OpRsh16x32: + return rewriteValueMIPS64_OpRsh16x32(v) + case OpRsh16x64: + return rewriteValueMIPS64_OpRsh16x64(v) + case OpRsh16x8: + return rewriteValueMIPS64_OpRsh16x8(v) + case OpRsh32Ux16: + return rewriteValueMIPS64_OpRsh32Ux16(v) + case OpRsh32Ux32: + return rewriteValueMIPS64_OpRsh32Ux32(v) + case OpRsh32Ux64: + return rewriteValueMIPS64_OpRsh32Ux64(v) + case OpRsh32Ux8: + return rewriteValueMIPS64_OpRsh32Ux8(v) + case OpRsh32x16: + return rewriteValueMIPS64_OpRsh32x16(v) + case OpRsh32x32: + return rewriteValueMIPS64_OpRsh32x32(v) + case OpRsh32x64: + return rewriteValueMIPS64_OpRsh32x64(v) + case OpRsh32x8: + return rewriteValueMIPS64_OpRsh32x8(v) + case OpRsh64Ux16: + return rewriteValueMIPS64_OpRsh64Ux16(v) + case OpRsh64Ux32: + return rewriteValueMIPS64_OpRsh64Ux32(v) + case OpRsh64Ux64: + return rewriteValueMIPS64_OpRsh64Ux64(v) + case OpRsh64Ux8: + return rewriteValueMIPS64_OpRsh64Ux8(v) + case OpRsh64x16: + return rewriteValueMIPS64_OpRsh64x16(v) + case OpRsh64x32: + return rewriteValueMIPS64_OpRsh64x32(v) + case OpRsh64x64: + return rewriteValueMIPS64_OpRsh64x64(v) + case OpRsh64x8: + return rewriteValueMIPS64_OpRsh64x8(v) + case OpRsh8Ux16: + return rewriteValueMIPS64_OpRsh8Ux16(v) + case OpRsh8Ux32: + return rewriteValueMIPS64_OpRsh8Ux32(v) + case OpRsh8Ux64: + return rewriteValueMIPS64_OpRsh8Ux64(v) + case OpRsh8Ux8: + return rewriteValueMIPS64_OpRsh8Ux8(v) + case OpRsh8x16: + return rewriteValueMIPS64_OpRsh8x16(v) + case OpRsh8x32: + return rewriteValueMIPS64_OpRsh8x32(v) + case OpRsh8x64: + return rewriteValueMIPS64_OpRsh8x64(v) + case OpRsh8x8: + return rewriteValueMIPS64_OpRsh8x8(v) + case OpSelect0: + return rewriteValueMIPS64_OpSelect0(v) + case OpSelect1: + return rewriteValueMIPS64_OpSelect1(v) + case OpSignExt16to32: + v.Op = OpMIPS64MOVHreg + return true + case OpSignExt16to64: + v.Op = OpMIPS64MOVHreg + return true + case OpSignExt32to64: + v.Op = OpMIPS64MOVWreg + return true + case OpSignExt8to16: + v.Op = OpMIPS64MOVBreg + return true + case OpSignExt8to32: + v.Op = OpMIPS64MOVBreg + return true + case OpSignExt8to64: + v.Op = OpMIPS64MOVBreg + return true + case OpSlicemask: + return rewriteValueMIPS64_OpSlicemask(v) + case OpSqrt: + v.Op = OpMIPS64SQRTD + return true + case OpSqrt32: + v.Op = OpMIPS64SQRTF + return true + case OpStaticCall: + v.Op = OpMIPS64CALLstatic + return true + case OpStore: + return rewriteValueMIPS64_OpStore(v) + case OpSub16: + v.Op = OpMIPS64SUBV + return true + case OpSub32: + v.Op = OpMIPS64SUBV + return true + case OpSub32F: + v.Op = OpMIPS64SUBF + return true + case OpSub64: + v.Op = OpMIPS64SUBV + return true + case OpSub64F: + v.Op = OpMIPS64SUBD + return true + case OpSub8: + v.Op = OpMIPS64SUBV + return true + case OpSubPtr: + v.Op = OpMIPS64SUBV + return true + case OpTailCall: + v.Op = OpMIPS64CALLtail + return true + case OpTrunc16to8: + v.Op = OpCopy + return true + case OpTrunc32to16: + v.Op = OpCopy + return true + case OpTrunc32to8: + v.Op = OpCopy + return true + case OpTrunc64to16: + v.Op = OpCopy + return true + case OpTrunc64to32: + v.Op = OpCopy + return true + case OpTrunc64to8: + v.Op = OpCopy + return true + case OpWB: + v.Op = OpMIPS64LoweredWB + return true + case OpXor16: + v.Op = OpMIPS64XOR + return true + case OpXor32: + v.Op = OpMIPS64XOR + return true + case OpXor64: + v.Op = OpMIPS64XOR + return true + case OpXor8: + v.Op = OpMIPS64XOR + return true + case OpZero: + return rewriteValueMIPS64_OpZero(v) + case OpZeroExt16to32: + v.Op = OpMIPS64MOVHUreg + return true + case OpZeroExt16to64: + v.Op = OpMIPS64MOVHUreg + return true + case OpZeroExt32to64: + v.Op = OpMIPS64MOVWUreg + return true + case OpZeroExt8to16: + v.Op = OpMIPS64MOVBUreg + return true + case OpZeroExt8to32: + v.Op = OpMIPS64MOVBUreg + return true + case OpZeroExt8to64: + v.Op = OpMIPS64MOVBUreg + return true + } + return false +} +func rewriteValueMIPS64_OpAddr(v *Value) bool { + v_0 := v.Args[0] + // match: (Addr {sym} base) + // result: (MOVVaddr {sym} base) + for { + sym := auxToSym(v.Aux) + base := v_0 + v.reset(OpMIPS64MOVVaddr) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } +} +func rewriteValueMIPS64_OpAtomicAnd8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (AtomicAnd8 ptr val mem) + // cond: !config.BigEndian + // result: (LoweredAtomicAnd32 (AND (MOVVconst [^3]) ptr) (OR (SLLV (ZeroExt8to32 val) (SLLVconst [3] (ANDconst [3] ptr))) (NORconst [0] (SLLV (MOVVconst [0xff]) (SLLVconst [3] (ANDconst [3] ptr))))) mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + if !(!config.BigEndian) { + break + } + v.reset(OpMIPS64LoweredAtomicAnd32) + v0 := b.NewValue0(v.Pos, OpMIPS64AND, typ.UInt32Ptr) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(^3) + v0.AddArg2(v1, ptr) + v2 := b.NewValue0(v.Pos, OpMIPS64OR, typ.UInt64) + v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, typ.UInt32) + v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v4.AddArg(val) + v5 := b.NewValue0(v.Pos, OpMIPS64SLLVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(3) + v6 := b.NewValue0(v.Pos, OpMIPS64ANDconst, typ.UInt64) + v6.AuxInt = int64ToAuxInt(3) + v6.AddArg(ptr) + v5.AddArg(v6) + v3.AddArg2(v4, v5) + v7 := b.NewValue0(v.Pos, OpMIPS64NORconst, typ.UInt64) + v7.AuxInt = int64ToAuxInt(0) + v8 := b.NewValue0(v.Pos, OpMIPS64SLLV, typ.UInt64) + v9 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v9.AuxInt = int64ToAuxInt(0xff) + v8.AddArg2(v9, v5) + v7.AddArg(v8) + v2.AddArg2(v3, v7) + v.AddArg3(v0, v2, mem) + return true + } + // match: (AtomicAnd8 ptr val mem) + // cond: config.BigEndian + // result: (LoweredAtomicAnd32 (AND (MOVVconst [^3]) ptr) (OR (SLLV (ZeroExt8to32 val) (SLLVconst [3] (ANDconst [3] (XORconst [3] ptr)))) (NORconst [0] (SLLV (MOVVconst [0xff]) (SLLVconst [3] (ANDconst [3] (XORconst [3] ptr)))))) mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + if !(config.BigEndian) { + break + } + v.reset(OpMIPS64LoweredAtomicAnd32) + v0 := b.NewValue0(v.Pos, OpMIPS64AND, typ.UInt32Ptr) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(^3) + v0.AddArg2(v1, ptr) + v2 := b.NewValue0(v.Pos, OpMIPS64OR, typ.UInt64) + v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, typ.UInt32) + v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v4.AddArg(val) + v5 := b.NewValue0(v.Pos, OpMIPS64SLLVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(3) + v6 := b.NewValue0(v.Pos, OpMIPS64ANDconst, typ.UInt64) + v6.AuxInt = int64ToAuxInt(3) + v7 := b.NewValue0(v.Pos, OpMIPS64XORconst, typ.UInt64) + v7.AuxInt = int64ToAuxInt(3) + v7.AddArg(ptr) + v6.AddArg(v7) + v5.AddArg(v6) + v3.AddArg2(v4, v5) + v8 := b.NewValue0(v.Pos, OpMIPS64NORconst, typ.UInt64) + v8.AuxInt = int64ToAuxInt(0) + v9 := b.NewValue0(v.Pos, OpMIPS64SLLV, typ.UInt64) + v10 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v10.AuxInt = int64ToAuxInt(0xff) + v9.AddArg2(v10, v5) + v8.AddArg(v9) + v2.AddArg2(v3, v8) + v.AddArg3(v0, v2, mem) + return true + } + return false +} +func rewriteValueMIPS64_OpAtomicCompareAndSwap32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicCompareAndSwap32 ptr old new mem) + // result: (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem) + for { + ptr := v_0 + old := v_1 + new := v_2 + mem := v_3 + v.reset(OpMIPS64LoweredAtomicCas32) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(old) + v.AddArg4(ptr, v0, new, mem) + return true + } +} +func rewriteValueMIPS64_OpAtomicOr8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (AtomicOr8 ptr val mem) + // cond: !config.BigEndian + // result: (LoweredAtomicOr32 (AND (MOVVconst [^3]) ptr) (SLLV (ZeroExt8to32 val) (SLLVconst [3] (ANDconst [3] ptr))) mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + if !(!config.BigEndian) { + break + } + v.reset(OpMIPS64LoweredAtomicOr32) + v0 := b.NewValue0(v.Pos, OpMIPS64AND, typ.UInt32Ptr) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(^3) + v0.AddArg2(v1, ptr) + v2 := b.NewValue0(v.Pos, OpMIPS64SLLV, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v3.AddArg(val) + v4 := b.NewValue0(v.Pos, OpMIPS64SLLVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(3) + v5 := b.NewValue0(v.Pos, OpMIPS64ANDconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(3) + v5.AddArg(ptr) + v4.AddArg(v5) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v2, mem) + return true + } + // match: (AtomicOr8 ptr val mem) + // cond: config.BigEndian + // result: (LoweredAtomicOr32 (AND (MOVVconst [^3]) ptr) (SLLV (ZeroExt8to32 val) (SLLVconst [3] (ANDconst [3] (XORconst [3] ptr)))) mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + if !(config.BigEndian) { + break + } + v.reset(OpMIPS64LoweredAtomicOr32) + v0 := b.NewValue0(v.Pos, OpMIPS64AND, typ.UInt32Ptr) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(^3) + v0.AddArg2(v1, ptr) + v2 := b.NewValue0(v.Pos, OpMIPS64SLLV, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v3.AddArg(val) + v4 := b.NewValue0(v.Pos, OpMIPS64SLLVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(3) + v5 := b.NewValue0(v.Pos, OpMIPS64ANDconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(3) + v6 := b.NewValue0(v.Pos, OpMIPS64XORconst, typ.UInt64) + v6.AuxInt = int64ToAuxInt(3) + v6.AddArg(ptr) + v5.AddArg(v6) + v4.AddArg(v5) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v2, mem) + return true + } + return false +} +func rewriteValueMIPS64_OpAvg64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Avg64u x y) + // result: (ADDV (SRLVconst (SUBV x y) [1]) y) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64ADDV) + v0 := b.NewValue0(v.Pos, OpMIPS64SRLVconst, t) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpMIPS64SUBV, t) + v1.AddArg2(x, y) + v0.AddArg(v1) + v.AddArg2(v0, y) + return true + } +} +func rewriteValueMIPS64_OpCom16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Com16 x) + // result: (NOR (MOVVconst [0]) x) + for { + x := v_0 + v.reset(OpMIPS64NOR) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueMIPS64_OpCom32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Com32 x) + // result: (NOR (MOVVconst [0]) x) + for { + x := v_0 + v.reset(OpMIPS64NOR) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueMIPS64_OpCom64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Com64 x) + // result: (NOR (MOVVconst [0]) x) + for { + x := v_0 + v.reset(OpMIPS64NOR) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueMIPS64_OpCom8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Com8 x) + // result: (NOR (MOVVconst [0]) x) + for { + x := v_0 + v.reset(OpMIPS64NOR) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueMIPS64_OpConst16(v *Value) bool { + // match: (Const16 [val]) + // result: (MOVVconst [int64(val)]) + for { + val := auxIntToInt16(v.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueMIPS64_OpConst32(v *Value) bool { + // match: (Const32 [val]) + // result: (MOVVconst [int64(val)]) + for { + val := auxIntToInt32(v.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueMIPS64_OpConst32F(v *Value) bool { + // match: (Const32F [val]) + // result: (MOVFconst [float64(val)]) + for { + val := auxIntToFloat32(v.AuxInt) + v.reset(OpMIPS64MOVFconst) + v.AuxInt = float64ToAuxInt(float64(val)) + return true + } +} +func rewriteValueMIPS64_OpConst64(v *Value) bool { + // match: (Const64 [val]) + // result: (MOVVconst [int64(val)]) + for { + val := auxIntToInt64(v.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueMIPS64_OpConst64F(v *Value) bool { + // match: (Const64F [val]) + // result: (MOVDconst [float64(val)]) + for { + val := auxIntToFloat64(v.AuxInt) + v.reset(OpMIPS64MOVDconst) + v.AuxInt = float64ToAuxInt(float64(val)) + return true + } +} +func rewriteValueMIPS64_OpConst8(v *Value) bool { + // match: (Const8 [val]) + // result: (MOVVconst [int64(val)]) + for { + val := auxIntToInt8(v.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueMIPS64_OpConstBool(v *Value) bool { + // match: (ConstBool [t]) + // result: (MOVVconst [int64(b2i(t))]) + for { + t := auxIntToBool(v.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(b2i(t))) + return true + } +} +func rewriteValueMIPS64_OpConstNil(v *Value) bool { + // match: (ConstNil) + // result: (MOVVconst [0]) + for { + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } +} +func rewriteValueMIPS64_OpDiv16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16 x y) + // result: (Select1 (DIVV (SignExt16to64 x) (SignExt16to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) + v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpDiv16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16u x y) + // result: (Select1 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpDiv32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div32 x y) + // result: (Select1 (DIVV (SignExt32to64 x) (SignExt32to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) + v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpDiv32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div32u x y) + // result: (Select1 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpDiv64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div64 x y) + // result: (Select1 (DIVV x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpDiv64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div64u x y) + // result: (Select1 (DIVVU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpDiv8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8 x y) + // result: (Select1 (DIVV (SignExt8to64 x) (SignExt8to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) + v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpDiv8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8u x y) + // result: (Select1 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpEq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq16 x y) + // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64SGTU) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpEq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq32 x y) + // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64SGTU) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpEq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq32F x y) + // result: (FPFlagTrue (CMPEQF x y)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64FPFlagTrue) + v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQF, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpEq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq64 x y) + // result: (SGTU (MOVVconst [1]) (XOR x y)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64SGTU) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) + v1.AddArg2(x, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpEq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq64F x y) + // result: (FPFlagTrue (CMPEQD x y)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64FPFlagTrue) + v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpEq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq8 x y) + // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64SGTU) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpEqB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqB x y) + // result: (XOR (MOVVconst [1]) (XOR x y)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64XOR) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.Bool) + v1.AddArg2(x, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpEqPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqPtr x y) + // result: (SGTU (MOVVconst [1]) (XOR x y)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64SGTU) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) + v1.AddArg2(x, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpHmul32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Hmul32 x y) + // result: (SRAVconst (Select1 (MULV (SignExt32to64 x) (SignExt32to64 y))) [32]) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64SRAVconst) + v.AuxInt = int64ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpSelect1, typ.Int64) + v1 := b.NewValue0(v.Pos, OpMIPS64MULV, types.NewTuple(typ.Int64, typ.Int64)) + v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpHmul32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Hmul32u x y) + // result: (SRLVconst (Select1 (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32]) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64SRLVconst) + v.AuxInt = int64ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpHmul64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Hmul64 x y) + // result: (Select0 (MULV x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpMIPS64MULV, types.NewTuple(typ.Int64, typ.Int64)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpHmul64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Hmul64u x y) + // result: (Select0 (MULVU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpIsInBounds(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsInBounds idx len) + // result: (SGTU len idx) + for { + idx := v_0 + len := v_1 + v.reset(OpMIPS64SGTU) + v.AddArg2(len, idx) + return true + } +} +func rewriteValueMIPS64_OpIsNonNil(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (IsNonNil ptr) + // result: (SGTU ptr (MOVVconst [0])) + for { + ptr := v_0 + v.reset(OpMIPS64SGTU) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(ptr, v0) + return true + } +} +func rewriteValueMIPS64_OpIsSliceInBounds(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (IsSliceInBounds idx len) + // result: (XOR (MOVVconst [1]) (SGTU idx len)) + for { + idx := v_0 + len := v_1 + v.reset(OpMIPS64XOR) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v1.AddArg2(idx, len) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpLeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq16 x y) + // result: (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64XOR) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool) + v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpLeq16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq16U x y) + // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64XOR) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpLeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq32 x y) + // result: (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64XOR) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool) + v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpLeq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq32F x y) + // result: (FPFlagTrue (CMPGEF y x)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64FPFlagTrue) + v0 := b.NewValue0(v.Pos, OpMIPS64CMPGEF, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpLeq32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq32U x y) + // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64XOR) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpLeq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq64 x y) + // result: (XOR (MOVVconst [1]) (SGT x y)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64XOR) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool) + v1.AddArg2(x, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpLeq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq64F x y) + // result: (FPFlagTrue (CMPGED y x)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64FPFlagTrue) + v0 := b.NewValue0(v.Pos, OpMIPS64CMPGED, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpLeq64U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq64U x y) + // result: (XOR (MOVVconst [1]) (SGTU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64XOR) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v1.AddArg2(x, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpLeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq8 x y) + // result: (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64XOR) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool) + v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpLeq8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq8U x y) + // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64XOR) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpLess16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less16 x y) + // result: (SGT (SignExt16to64 y) (SignExt16to64 x)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64SGT) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpLess16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less16U x y) + // result: (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64SGTU) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpLess32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less32 x y) + // result: (SGT (SignExt32to64 y) (SignExt32to64 x)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64SGT) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpLess32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less32F x y) + // result: (FPFlagTrue (CMPGTF y x)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64FPFlagTrue) + v0 := b.NewValue0(v.Pos, OpMIPS64CMPGTF, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpLess32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less32U x y) + // result: (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64SGTU) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpLess64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Less64 x y) + // result: (SGT y x) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64SGT) + v.AddArg2(y, x) + return true + } +} +func rewriteValueMIPS64_OpLess64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less64F x y) + // result: (FPFlagTrue (CMPGTD y x)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64FPFlagTrue) + v0 := b.NewValue0(v.Pos, OpMIPS64CMPGTD, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpLess64U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Less64U x y) + // result: (SGTU y x) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64SGTU) + v.AddArg2(y, x) + return true + } +} +func rewriteValueMIPS64_OpLess8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less8 x y) + // result: (SGT (SignExt8to64 y) (SignExt8to64 x)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64SGT) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpLess8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less8U x y) + // result: (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64SGTU) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpLoad(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Load ptr mem) + // cond: t.IsBoolean() + // result: (MOVBUload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsBoolean()) { + break + } + v.reset(OpMIPS64MOVBUload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is8BitInt(t) && t.IsSigned()) + // result: (MOVBload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is8BitInt(t) && t.IsSigned()) { + break + } + v.reset(OpMIPS64MOVBload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is8BitInt(t) && !t.IsSigned()) + // result: (MOVBUload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is8BitInt(t) && !t.IsSigned()) { + break + } + v.reset(OpMIPS64MOVBUload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is16BitInt(t) && t.IsSigned()) + // result: (MOVHload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is16BitInt(t) && t.IsSigned()) { + break + } + v.reset(OpMIPS64MOVHload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is16BitInt(t) && !t.IsSigned()) + // result: (MOVHUload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is16BitInt(t) && !t.IsSigned()) { + break + } + v.reset(OpMIPS64MOVHUload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is32BitInt(t) && t.IsSigned()) + // result: (MOVWload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitInt(t) && t.IsSigned()) { + break + } + v.reset(OpMIPS64MOVWload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is32BitInt(t) && !t.IsSigned()) + // result: (MOVWUload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitInt(t) && !t.IsSigned()) { + break + } + v.reset(OpMIPS64MOVWUload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (MOVVload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpMIPS64MOVVload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitFloat(t) + // result: (MOVFload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitFloat(t)) { + break + } + v.reset(OpMIPS64MOVFload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is64BitFloat(t) + // result: (MOVDload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitFloat(t)) { + break + } + v.reset(OpMIPS64MOVDload) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueMIPS64_OpLocalAddr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LocalAddr {sym} base mem) + // cond: t.Elem().HasPointers() + // result: (MOVVaddr {sym} (SPanchored base mem)) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + mem := v_1 + if !(t.Elem().HasPointers()) { + break + } + v.reset(OpMIPS64MOVVaddr) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) + v0.AddArg2(base, mem) + v.AddArg(v0) + return true + } + // match: (LocalAddr {sym} base _) + // cond: !t.Elem().HasPointers() + // result: (MOVVaddr {sym} base) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + if !(!t.Elem().HasPointers()) { + break + } + v.reset(OpMIPS64MOVVaddr) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } + return false +} +func rewriteValueMIPS64_OpLsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x16 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueMIPS64_OpLsh16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x32 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueMIPS64_OpLsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x64 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v1.AddArg2(v2, y) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) + v3.AddArg2(x, y) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueMIPS64_OpLsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x8 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueMIPS64_OpLsh32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x16 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueMIPS64_OpLsh32x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x32 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueMIPS64_OpLsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x64 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v1.AddArg2(v2, y) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) + v3.AddArg2(x, y) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueMIPS64_OpLsh32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x8 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueMIPS64_OpLsh64x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x16 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueMIPS64_OpLsh64x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x32 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueMIPS64_OpLsh64x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x64 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v1.AddArg2(v2, y) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) + v3.AddArg2(x, y) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueMIPS64_OpLsh64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x8 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueMIPS64_OpLsh8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x16 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueMIPS64_OpLsh8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x32 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueMIPS64_OpLsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x64 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v1.AddArg2(v2, y) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) + v3.AddArg2(x, y) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueMIPS64_OpLsh8x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x8 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueMIPS64_OpMIPS64ADDV(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADDV x (MOVVconst [c])) + // cond: is32Bit(c) && !t.IsPtr() + // result: (ADDVconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMIPS64MOVVconst { + continue + } + t := v_1.Type + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c) && !t.IsPtr()) { + continue + } + v.reset(OpMIPS64ADDVconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (ADDV x (NEGV y)) + // result: (SUBV x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMIPS64NEGV { + continue + } + y := v_1.Args[0] + v.reset(OpMIPS64SUBV) + v.AddArg2(x, y) + return true + } + break + } + return false +} +func rewriteValueMIPS64_OpMIPS64ADDVconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) + // cond: is32Bit(off1+int64(off2)) + // result: (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr) + for { + off1 := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + if !(is32Bit(off1 + int64(off2))) { + break + } + v.reset(OpMIPS64MOVVaddr) + v.AuxInt = int32ToAuxInt(int32(off1) + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg(ptr) + return true + } + // match: (ADDVconst [0] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (ADDVconst [c] (MOVVconst [d])) + // result: (MOVVconst [c+d]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(c + d) + return true + } + // match: (ADDVconst [c] (ADDVconst [d] x)) + // cond: is32Bit(c+d) + // result: (ADDVconst [c+d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64ADDVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(c + d)) { + break + } + v.reset(OpMIPS64ADDVconst) + v.AuxInt = int64ToAuxInt(c + d) + v.AddArg(x) + return true + } + // match: (ADDVconst [c] (SUBVconst [d] x)) + // cond: is32Bit(c-d) + // result: (ADDVconst [c-d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64SUBVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(c - d)) { + break + } + v.reset(OpMIPS64ADDVconst) + v.AuxInt = int64ToAuxInt(c - d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64AND(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AND x (MOVVconst [c])) + // cond: is32Bit(c) + // result: (ANDconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMIPS64MOVVconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c)) { + continue + } + v.reset(OpMIPS64ANDconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (AND x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64ANDconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ANDconst [0] _) + // result: (MOVVconst [0]) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (ANDconst [-1] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != -1 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (ANDconst [c] (MOVVconst [d])) + // result: (MOVVconst [c&d]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(c & d) + return true + } + // match: (ANDconst [c] (ANDconst [d] x)) + // result: (ANDconst [c&d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64ANDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpMIPS64ANDconst) + v.AuxInt = int64ToAuxInt(c & d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) + // cond: is32Bit(c) + // result: (LoweredAtomicAddconst32 [int32(c)] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpMIPS64LoweredAtomicAddconst32) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) + // cond: is32Bit(c) + // result: (LoweredAtomicAddconst64 [c] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpMIPS64LoweredAtomicAddconst64) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64LoweredAtomicStore32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LoweredAtomicStore32 ptr (MOVVconst [0]) mem) + // result: (LoweredAtomicStorezero32 ptr mem) + for { + ptr := v_0 + if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + mem := v_2 + v.reset(OpMIPS64LoweredAtomicStorezero32) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64LoweredAtomicStore64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LoweredAtomicStore64 ptr (MOVVconst [0]) mem) + // result: (LoweredAtomicStorezero64 ptr mem) + for { + ptr := v_0 + if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + mem := v_2 + v.reset(OpMIPS64LoweredAtomicStorezero64) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64MOVBUload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBUload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpMIPS64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVBUload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPS64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVBUload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64MOVBUreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVBUreg x:(MOVBUload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVBUload { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVBUreg x:(MOVBUreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVBUreg { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVBUreg (MOVVconst [c])) + // result: (MOVVconst [int64(uint8(c))]) + for { + if v_0.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(uint8(c))) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64MOVBload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpMIPS64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVBload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPS64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVBload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVVconst [int64(read8(sym, int64(off)))]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpSB || !(symIsRO(sym)) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(read8(sym, int64(off)))) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64MOVBreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVBreg x:(MOVBload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVBload { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVBreg x:(MOVBreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVBreg { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVBreg (MOVVconst [c])) + // result: (MOVVconst [int64(int8(c))]) + for { + if v_0.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(int8(c))) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpMIPS64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVBstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPS64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVBstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVVconst [0]) mem) + // result: (MOVBstorezero [off] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + mem := v_2 + v.reset(OpMIPS64MOVBstorezero) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPS64MOVBreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpMIPS64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPS64MOVBUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpMIPS64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPS64MOVHreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpMIPS64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPS64MOVHUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpMIPS64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPS64MOVWreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpMIPS64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPS64MOVWUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpMIPS64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64MOVBstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBstorezero [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpMIPS64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVBstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPS64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVBstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64MOVDload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVDload [off] {sym} ptr (MOVVstore [off] {sym} ptr val _)) + // result: (MOVVgpfp val) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPS64MOVVstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { + break + } + val := v_1.Args[1] + if ptr != v_1.Args[0] { + break + } + v.reset(OpMIPS64MOVVgpfp) + v.AddArg(val) + return true + } + // match: (MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVDload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpMIPS64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVDload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVDload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPS64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVDload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64MOVDstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVDstore [off] {sym} ptr (MOVVgpfp val) mem) + // result: (MOVVstore [off] {sym} ptr val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPS64MOVVgpfp { + break + } + val := v_1.Args[0] + mem := v_2 + v.reset(OpMIPS64MOVVstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVDstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpMIPS64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVDstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPS64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVDstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64MOVFload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVFload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) + // result: (MOVWgpfp val) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPS64MOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { + break + } + val := v_1.Args[1] + if ptr != v_1.Args[0] { + break + } + v.reset(OpMIPS64MOVWgpfp) + v.AddArg(val) + return true + } + // match: (MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVFload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpMIPS64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVFload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVFload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPS64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVFload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64MOVFstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVFstore [off] {sym} ptr (MOVWgpfp val) mem) + // result: (MOVWstore [off] {sym} ptr val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPS64MOVWgpfp { + break + } + val := v_1.Args[0] + mem := v_2 + v.reset(OpMIPS64MOVWstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVFstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpMIPS64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVFstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVFstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPS64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVFstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64MOVHUload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVHUload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpMIPS64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVHUload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVHUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPS64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVHUload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64MOVHUreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVHUreg x:(MOVBUload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVBUload { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVHUload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVHUload { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVBUreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVBUreg { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVHUreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVHUreg { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg (MOVVconst [c])) + // result: (MOVVconst [int64(uint16(c))]) + for { + if v_0.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(uint16(c))) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64MOVHload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVHload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpMIPS64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVHload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVHload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPS64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVHload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVVconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpSB || !(symIsRO(sym)) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64MOVHreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVHreg x:(MOVBload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVBload { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBUload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVBUload { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVHload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVHload { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVBreg { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBUreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVBUreg { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVHreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVHreg { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHreg (MOVVconst [c])) + // result: (MOVVconst [int64(int16(c))]) + for { + if v_0.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(int16(c))) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVHstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpMIPS64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVHstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVHstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPS64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVHstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVVconst [0]) mem) + // result: (MOVHstorezero [off] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + mem := v_2 + v.reset(OpMIPS64MOVHstorezero) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPS64MOVHreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpMIPS64MOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPS64MOVHUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpMIPS64MOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPS64MOVWreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpMIPS64MOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPS64MOVWUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpMIPS64MOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64MOVHstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVHstorezero [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpMIPS64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVHstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPS64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVHstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64MOVVload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVVload [off] {sym} ptr (MOVDstore [off] {sym} ptr val _)) + // result: (MOVVfpgp val) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPS64MOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { + break + } + val := v_1.Args[1] + if ptr != v_1.Args[0] { + break + } + v.reset(OpMIPS64MOVVfpgp) + v.AddArg(val) + return true + } + // match: (MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVVload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpMIPS64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVVload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVVload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPS64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVVload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVVload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVVconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpSB || !(symIsRO(sym)) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64MOVVnop(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVVnop (MOVVconst [c])) + // result: (MOVVconst [c]) + for { + if v_0.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(c) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64MOVVreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVVreg x) + // cond: x.Uses == 1 + // result: (MOVVnop x) + for { + x := v_0 + if !(x.Uses == 1) { + break + } + v.reset(OpMIPS64MOVVnop) + v.AddArg(x) + return true + } + // match: (MOVVreg (MOVVconst [c])) + // result: (MOVVconst [c]) + for { + if v_0.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(c) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64MOVVstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVVstore [off] {sym} ptr (MOVVfpgp val) mem) + // result: (MOVDstore [off] {sym} ptr val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPS64MOVVfpgp { + break + } + val := v_1.Args[0] + mem := v_2 + v.reset(OpMIPS64MOVDstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVVstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpMIPS64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVVstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVVstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPS64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVVstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVVstore [off] {sym} ptr (MOVVconst [0]) mem) + // result: (MOVVstorezero [off] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + mem := v_2 + v.reset(OpMIPS64MOVVstorezero) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64MOVVstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVVstorezero [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpMIPS64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVVstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPS64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVVstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64MOVWUload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (MOVWUload [off] {sym} ptr (MOVFstore [off] {sym} ptr val _)) + // result: (ZeroExt32to64 (MOVWfpgp val)) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPS64MOVFstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { + break + } + val := v_1.Args[1] + if ptr != v_1.Args[0] { + break + } + v.reset(OpZeroExt32to64) + v0 := b.NewValue0(v_1.Pos, OpMIPS64MOVWfpgp, typ.Float32) + v0.AddArg(val) + v.AddArg(v0) + return true + } + // match: (MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVWUload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpMIPS64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVWUload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVWUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPS64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVWUload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64MOVWUreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVWUreg x:(MOVBUload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVBUload { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVHUload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVHUload { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVWUload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVWUload { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVBUreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVBUreg { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVHUreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVHUreg { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVWUreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVWUreg { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg (MOVVconst [c])) + // result: (MOVVconst [int64(uint32(c))]) + for { + if v_0.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(uint32(c))) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64MOVWload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVWload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpMIPS64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVWload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVWload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPS64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVWload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVVconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))]) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpSB || !(symIsRO(sym)) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64MOVWreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVWreg x:(MOVBload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVBload { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVBUload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVBUload { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVHload { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHUload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVHUload { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVWload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVWload { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVBreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVBreg { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVBUreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVBUreg { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVHreg { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVWreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpMIPS64MOVWreg { + break + } + v.reset(OpMIPS64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWreg (MOVVconst [c])) + // result: (MOVVconst [int64(int32(c))]) + for { + if v_0.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(int32(c))) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64MOVWstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVWstore [off] {sym} ptr (MOVWfpgp val) mem) + // result: (MOVFstore [off] {sym} ptr val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPS64MOVWfpgp { + break + } + val := v_1.Args[0] + mem := v_2 + v.reset(OpMIPS64MOVFstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVWstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpMIPS64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVWstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVWstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPS64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVWstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVVconst [0]) mem) + // result: (MOVWstorezero [off] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + mem := v_2 + v.reset(OpMIPS64MOVWstorezero) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem) + // result: (MOVWstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPS64MOVWreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpMIPS64MOVWstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem) + // result: (MOVWstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpMIPS64MOVWUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpMIPS64MOVWstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64MOVWstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVWstorezero [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpMIPS64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVWstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpMIPS64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpMIPS64MOVWstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64NEGV(v *Value) bool { + v_0 := v.Args[0] + // match: (NEGV (MOVVconst [c])) + // result: (MOVVconst [-c]) + for { + if v_0.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(-c) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64NOR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (NOR x (MOVVconst [c])) + // cond: is32Bit(c) + // result: (NORconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMIPS64MOVVconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c)) { + continue + } + v.reset(OpMIPS64NORconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValueMIPS64_OpMIPS64NORconst(v *Value) bool { + v_0 := v.Args[0] + // match: (NORconst [c] (MOVVconst [d])) + // result: (MOVVconst [^(c|d)]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(^(c | d)) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64OR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OR x (MOVVconst [c])) + // cond: is32Bit(c) + // result: (ORconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMIPS64MOVVconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c)) { + continue + } + v.reset(OpMIPS64ORconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (OR x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64ORconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ORconst [0] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (ORconst [-1] _) + // result: (MOVVconst [-1]) + for { + if auxIntToInt64(v.AuxInt) != -1 { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(-1) + return true + } + // match: (ORconst [c] (MOVVconst [d])) + // result: (MOVVconst [c|d]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(c | d) + return true + } + // match: (ORconst [c] (ORconst [d] x)) + // cond: is32Bit(c|d) + // result: (ORconst [c|d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64ORconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(c | d)) { + break + } + v.reset(OpMIPS64ORconst) + v.AuxInt = int64ToAuxInt(c | d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64SGT(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SGT (MOVVconst [c]) x) + // cond: is32Bit(c) + // result: (SGTconst [c] x) + for { + if v_0.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + if !(is32Bit(c)) { + break + } + v.reset(OpMIPS64SGTconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (SGT x x) + // result: (MOVVconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64SGTU(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SGTU (MOVVconst [c]) x) + // cond: is32Bit(c) + // result: (SGTUconst [c] x) + for { + if v_0.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + if !(is32Bit(c)) { + break + } + v.reset(OpMIPS64SGTUconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (SGTU x x) + // result: (MOVVconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64SGTUconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SGTUconst [c] (MOVVconst [d])) + // cond: uint64(c)>uint64(d) + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + if !(uint64(c) > uint64(d)) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTUconst [c] (MOVVconst [d])) + // cond: uint64(c)<=uint64(d) + // result: (MOVVconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + if !(uint64(c) <= uint64(d)) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SGTUconst [c] (MOVBUreg _)) + // cond: 0xff < uint64(c) + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64MOVBUreg || !(0xff < uint64(c)) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTUconst [c] (MOVHUreg _)) + // cond: 0xffff < uint64(c) + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64MOVHUreg || !(0xffff < uint64(c)) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTUconst [c] (ANDconst [m] _)) + // cond: uint64(m) < uint64(c) + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64ANDconst { + break + } + m := auxIntToInt64(v_0.AuxInt) + if !(uint64(m) < uint64(c)) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTUconst [c] (SRLVconst _ [d])) + // cond: 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64SRLVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + if !(0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64SGTconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SGTconst [c] (MOVVconst [d])) + // cond: c>d + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + if !(c > d) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTconst [c] (MOVVconst [d])) + // cond: c<=d + // result: (MOVVconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + if !(c <= d) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SGTconst [c] (MOVBreg _)) + // cond: 0x7f < c + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64MOVBreg || !(0x7f < c) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTconst [c] (MOVBreg _)) + // cond: c <= -0x80 + // result: (MOVVconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64MOVBreg || !(c <= -0x80) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SGTconst [c] (MOVBUreg _)) + // cond: 0xff < c + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64MOVBUreg || !(0xff < c) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTconst [c] (MOVBUreg _)) + // cond: c < 0 + // result: (MOVVconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64MOVBUreg || !(c < 0) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SGTconst [c] (MOVHreg _)) + // cond: 0x7fff < c + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64MOVHreg || !(0x7fff < c) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTconst [c] (MOVHreg _)) + // cond: c <= -0x8000 + // result: (MOVVconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64MOVHreg || !(c <= -0x8000) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SGTconst [c] (MOVHUreg _)) + // cond: 0xffff < c + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64MOVHUreg || !(0xffff < c) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTconst [c] (MOVHUreg _)) + // cond: c < 0 + // result: (MOVVconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64MOVHUreg || !(c < 0) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SGTconst [c] (MOVWUreg _)) + // cond: c < 0 + // result: (MOVVconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64MOVWUreg || !(c < 0) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SGTconst [c] (ANDconst [m] _)) + // cond: 0 <= m && m < c + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64ANDconst { + break + } + m := auxIntToInt64(v_0.AuxInt) + if !(0 <= m && m < c) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTconst [c] (SRLVconst _ [d])) + // cond: 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64SRLVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + if !(0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64SLLV(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SLLV _ (MOVVconst [c])) + // cond: uint64(c)>=64 + // result: (MOVVconst [0]) + for { + if v_1.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 64) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SLLV x (MOVVconst [c])) + // result: (SLLVconst x [c]) + for { + x := v_0 + if v_1.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpMIPS64SLLVconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64SLLVconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SLLVconst [c] (MOVVconst [d])) + // result: (MOVVconst [d<=64 + // result: (SRAVconst x [63]) + for { + x := v_0 + if v_1.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 64) { + break + } + v.reset(OpMIPS64SRAVconst) + v.AuxInt = int64ToAuxInt(63) + v.AddArg(x) + return true + } + // match: (SRAV x (MOVVconst [c])) + // result: (SRAVconst x [c]) + for { + x := v_0 + if v_1.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpMIPS64SRAVconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64SRAVconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SRAVconst [c] (MOVVconst [d])) + // result: (MOVVconst [d>>uint64(c)]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(d >> uint64(c)) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64SRLV(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SRLV _ (MOVVconst [c])) + // cond: uint64(c)>=64 + // result: (MOVVconst [0]) + for { + if v_1.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 64) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SRLV x (MOVVconst [c])) + // result: (SRLVconst x [c]) + for { + x := v_0 + if v_1.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpMIPS64SRLVconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64SRLVconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SRLVconst [c] (MOVVconst [d])) + // result: (MOVVconst [int64(uint64(d)>>uint64(c))]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(uint64(d) >> uint64(c))) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64SUBV(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SUBV x (MOVVconst [c])) + // cond: is32Bit(c) + // result: (SUBVconst [c] x) + for { + x := v_0 + if v_1.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c)) { + break + } + v.reset(OpMIPS64SUBVconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (SUBV x x) + // result: (MOVVconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SUBV (MOVVconst [0]) x) + // result: (NEGV x) + for { + if v_0.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_1 + v.reset(OpMIPS64NEGV) + v.AddArg(x) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64SUBVconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SUBVconst [0] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (SUBVconst [c] (MOVVconst [d])) + // result: (MOVVconst [d-c]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(d - c) + return true + } + // match: (SUBVconst [c] (SUBVconst [d] x)) + // cond: is32Bit(-c-d) + // result: (ADDVconst [-c-d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64SUBVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(-c - d)) { + break + } + v.reset(OpMIPS64ADDVconst) + v.AuxInt = int64ToAuxInt(-c - d) + v.AddArg(x) + return true + } + // match: (SUBVconst [c] (ADDVconst [d] x)) + // cond: is32Bit(-c+d) + // result: (ADDVconst [-c+d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64ADDVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(-c + d)) { + break + } + v.reset(OpMIPS64ADDVconst) + v.AuxInt = int64ToAuxInt(-c + d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64XOR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XOR x (MOVVconst [c])) + // cond: is32Bit(c) + // result: (XORconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMIPS64MOVVconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c)) { + continue + } + v.reset(OpMIPS64XORconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (XOR x x) + // result: (MOVVconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64XORconst(v *Value) bool { + v_0 := v.Args[0] + // match: (XORconst [0] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (XORconst [-1] x) + // result: (NORconst [0] x) + for { + if auxIntToInt64(v.AuxInt) != -1 { + break + } + x := v_0 + v.reset(OpMIPS64NORconst) + v.AuxInt = int64ToAuxInt(0) + v.AddArg(x) + return true + } + // match: (XORconst [c] (MOVVconst [d])) + // result: (MOVVconst [c^d]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(c ^ d) + return true + } + // match: (XORconst [c] (XORconst [d] x)) + // cond: is32Bit(c^d) + // result: (XORconst [c^d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpMIPS64XORconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(c ^ d)) { + break + } + v.reset(OpMIPS64XORconst) + v.AuxInt = int64ToAuxInt(c ^ d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueMIPS64_OpMod16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod16 x y) + // result: (Select0 (DIVV (SignExt16to64 x) (SignExt16to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) + v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpMod16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod16u x y) + // result: (Select0 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpMod32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod32 x y) + // result: (Select0 (DIVV (SignExt32to64 x) (SignExt32to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) + v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpMod32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod32u x y) + // result: (Select0 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpMod64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod64 x y) + // result: (Select0 (DIVV x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpMod64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod64u x y) + // result: (Select0 (DIVVU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpMod8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8 x y) + // result: (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64)) + v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpMod8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8u x y) + // result: (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpMove(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Move [0] _ _ mem) + // result: mem + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_2 + v.copyOf(mem) + return true + } + // match: (Move [1] dst src mem) + // result: (MOVBstore dst (MOVBload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMIPS64MOVBstore) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [2] {t} dst src mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore dst (MOVHload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpMIPS64MOVHstore) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [2] dst src mem) + // result: (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMIPS64MOVBstore) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) + v0.AuxInt = int32ToAuxInt(1) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [4] {t} dst src mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore dst (MOVWload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpMIPS64MOVWstore) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [4] {t} dst src mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpMIPS64MOVHstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) + v0.AuxInt = int32ToAuxInt(2) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [4] dst src mem) + // result: (MOVBstore [3] dst (MOVBload [3] src mem) (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem)))) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMIPS64MOVBstore) + v.AuxInt = int32ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) + v0.AuxInt = int32ToAuxInt(3) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(2) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) + v2.AuxInt = int32ToAuxInt(2) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) + v3.AuxInt = int32ToAuxInt(1) + v4 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) + v4.AuxInt = int32ToAuxInt(1) + v4.AddArg2(src, mem) + v5 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) + v6 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) + v6.AddArg2(src, mem) + v5.AddArg3(dst, v6, mem) + v3.AddArg3(dst, v4, v5) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [8] {t} dst src mem) + // cond: t.Alignment()%8 == 0 + // result: (MOVVstore dst (MOVVload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%8 == 0) { + break + } + v.reset(OpMIPS64MOVVstore) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [8] {t} dst src mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpMIPS64MOVWstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [8] {t} dst src mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpMIPS64MOVHstore) + v.AuxInt = int32ToAuxInt(6) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) + v0.AuxInt = int32ToAuxInt(6) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(4) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) + v2.AuxInt = int32ToAuxInt(4) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) + v3.AuxInt = int32ToAuxInt(2) + v4 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) + v4.AuxInt = int32ToAuxInt(2) + v4.AddArg2(src, mem) + v5 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) + v6 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) + v6.AddArg2(src, mem) + v5.AddArg3(dst, v6, mem) + v3.AddArg3(dst, v4, v5) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [3] dst src mem) + // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem))) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMIPS64MOVBstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) + v0.AuxInt = int32ToAuxInt(2) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) + v2.AuxInt = int32ToAuxInt(1) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) + v4 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [6] {t} dst src mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))) + for { + if auxIntToInt64(v.AuxInt) != 6 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpMIPS64MOVHstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(2) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) + v2.AuxInt = int32ToAuxInt(2) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) + v4 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [12] {t} dst src mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))) + for { + if auxIntToInt64(v.AuxInt) != 12 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpMIPS64MOVWstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(4) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32) + v2.AuxInt = int32ToAuxInt(4) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem) + v4 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [16] {t} dst src mem) + // cond: t.Alignment()%8 == 0 + // result: (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 16 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%8 == 0) { + break + } + v.reset(OpMIPS64MOVVstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [24] {t} dst src mem) + // cond: t.Alignment()%8 == 0 + // result: (MOVVstore [16] dst (MOVVload [16] src mem) (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem))) + for { + if auxIntToInt64(v.AuxInt) != 24 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%8 == 0) { + break + } + v.reset(OpMIPS64MOVVstore) + v.AuxInt = int32ToAuxInt(16) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(16) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(8) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64) + v2.AuxInt = int32ToAuxInt(8) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem) + v4 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [s] {t} dst src mem) + // cond: s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s) + // result: (DUFFCOPY [16 * (128 - s/8)] dst src mem) + for { + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) { + break + } + v.reset(OpMIPS64DUFFCOPY) + v.AuxInt = int64ToAuxInt(16 * (128 - s/8)) + v.AddArg3(dst, src, mem) + return true + } + // match: (Move [s] {t} dst src mem) + // cond: s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0 + // result: (LoweredMove [t.Alignment()] dst src (ADDVconst src [s-moveSize(t.Alignment(), config)]) mem) + for { + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0) { + break + } + v.reset(OpMIPS64LoweredMove) + v.AuxInt = int64ToAuxInt(t.Alignment()) + v0 := b.NewValue0(v.Pos, OpMIPS64ADDVconst, src.Type) + v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config)) + v0.AddArg(src) + v.AddArg4(dst, src, v0, mem) + return true + } + return false +} +func rewriteValueMIPS64_OpMul16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mul16 x y) + // result: (Select1 (MULVU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpMul32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mul32 x y) + // result: (Select1 (MULVU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpMul64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mul64 x y) + // result: (Select1 (MULVU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpMul8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mul8 x y) + // result: (Select1 (MULVU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpNeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq16 x y) + // result: (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0])) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64SGTU) + v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueMIPS64_OpNeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq32 x y) + // result: (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0])) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64SGTU) + v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueMIPS64_OpNeq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq32F x y) + // result: (FPFlagFalse (CMPEQF x y)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64FPFlagFalse) + v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQF, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpNeq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq64 x y) + // result: (SGTU (XOR x y) (MOVVconst [0])) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64SGTU) + v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpNeq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq64F x y) + // result: (FPFlagFalse (CMPEQD x y)) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64FPFlagFalse) + v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpNeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq8 x y) + // result: (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0])) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64SGTU) + v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueMIPS64_OpNeqPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NeqPtr x y) + // result: (SGTU (XOR x y) (MOVVconst [0])) + for { + x := v_0 + y := v_1 + v.reset(OpMIPS64SGTU) + v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpNot(v *Value) bool { + v_0 := v.Args[0] + // match: (Not x) + // result: (XORconst [1] x) + for { + x := v_0 + v.reset(OpMIPS64XORconst) + v.AuxInt = int64ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueMIPS64_OpOffPtr(v *Value) bool { + v_0 := v.Args[0] + // match: (OffPtr [off] ptr:(SP)) + // cond: is32Bit(off) + // result: (MOVVaddr [int32(off)] ptr) + for { + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + if ptr.Op != OpSP || !(is32Bit(off)) { + break + } + v.reset(OpMIPS64MOVVaddr) + v.AuxInt = int32ToAuxInt(int32(off)) + v.AddArg(ptr) + return true + } + // match: (OffPtr [off] ptr) + // result: (ADDVconst [off] ptr) + for { + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + v.reset(OpMIPS64ADDVconst) + v.AuxInt = int64ToAuxInt(off) + v.AddArg(ptr) + return true + } +} +func rewriteValueMIPS64_OpPanicBounds(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 0 + // result: (LoweredPanicBoundsA [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 0) { + break + } + v.reset(OpMIPS64LoweredPanicBoundsA) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 1 + // result: (LoweredPanicBoundsB [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 1) { + break + } + v.reset(OpMIPS64LoweredPanicBoundsB) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 2 + // result: (LoweredPanicBoundsC [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 2) { + break + } + v.reset(OpMIPS64LoweredPanicBoundsC) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + return false +} +func rewriteValueMIPS64_OpRotateLeft16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft16 x (MOVVconst [c])) + // result: (Or16 (Lsh16x64 x (MOVVconst [c&15])) (Rsh16Ux64 x (MOVVconst [-c&15]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpOr16) + v0 := b.NewValue0(v.Pos, OpLsh16x64, t) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(c & 15) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t) + v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(-c & 15) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) + return true + } + return false +} +func rewriteValueMIPS64_OpRotateLeft32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft32 x (MOVVconst [c])) + // result: (Or32 (Lsh32x64 x (MOVVconst [c&31])) (Rsh32Ux64 x (MOVVconst [-c&31]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpOr32) + v0 := b.NewValue0(v.Pos, OpLsh32x64, t) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(c & 31) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpRsh32Ux64, t) + v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(-c & 31) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) + return true + } + return false +} +func rewriteValueMIPS64_OpRotateLeft64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft64 x (MOVVconst [c])) + // result: (Or64 (Lsh64x64 x (MOVVconst [c&63])) (Rsh64Ux64 x (MOVVconst [-c&63]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpOr64) + v0 := b.NewValue0(v.Pos, OpLsh64x64, t) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(c & 63) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpRsh64Ux64, t) + v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(-c & 63) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) + return true + } + return false +} +func rewriteValueMIPS64_OpRotateLeft8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft8 x (MOVVconst [c])) + // result: (Or8 (Lsh8x64 x (MOVVconst [c&7])) (Rsh8Ux64 x (MOVVconst [-c&7]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpOr8) + v0 := b.NewValue0(v.Pos, OpLsh8x64, t) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(c & 7) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t) + v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(-c & 7) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) + return true + } + return false +} +func rewriteValueMIPS64_OpRsh16Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux16 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) + v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v5.AddArg(x) + v4.AddArg2(v5, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueMIPS64_OpRsh16Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux32 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) + v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v5.AddArg(x) + v4.AddArg2(v5, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueMIPS64_OpRsh16Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux64 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt16to64 x) y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v1.AddArg2(v2, y) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(x) + v3.AddArg2(v4, y) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueMIPS64_OpRsh16Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux8 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) + v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v5.AddArg(x) + v4.AddArg2(v5, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueMIPS64_OpRsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x16 x y) + // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) + v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpRsh16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x32 x y) + // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) + v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpRsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x64 x y) + // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) + v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(63) + v3.AddArg2(y, v4) + v2.AddArg(v3) + v1.AddArg2(v2, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpRsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x8 x y) + // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) + v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpRsh32Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux16 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) + v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v5.AddArg(x) + v4.AddArg2(v5, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueMIPS64_OpRsh32Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux32 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) + v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v5.AddArg(x) + v4.AddArg2(v5, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueMIPS64_OpRsh32Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux64 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt32to64 x) y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v1.AddArg2(v2, y) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(x) + v3.AddArg2(v4, y) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueMIPS64_OpRsh32Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux8 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) + v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v5.AddArg(x) + v4.AddArg2(v5, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueMIPS64_OpRsh32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x16 x y) + // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) + v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpRsh32x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x32 x y) + // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) + v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpRsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x64 x y) + // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) + v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(63) + v3.AddArg2(y, v4) + v2.AddArg(v3) + v1.AddArg2(v2, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpRsh32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x8 x y) + // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) + v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpRsh64Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux16 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV x (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueMIPS64_OpRsh64Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux32 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV x (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueMIPS64_OpRsh64Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux64 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV x y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v1.AddArg2(v2, y) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) + v3.AddArg2(x, y) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueMIPS64_OpRsh64Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux8 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV x (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueMIPS64_OpRsh64x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x16 x y) + // result: (SRAV x (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64SRAV) + v0 := b.NewValue0(v.Pos, OpMIPS64OR, t) + v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(63) + v2.AddArg2(v3, v4) + v1.AddArg(v2) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueMIPS64_OpRsh64x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x32 x y) + // result: (SRAV x (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64SRAV) + v0 := b.NewValue0(v.Pos, OpMIPS64OR, t) + v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(63) + v2.AddArg2(v3, v4) + v1.AddArg(v2) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueMIPS64_OpRsh64x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x64 x y) + // result: (SRAV x (OR (NEGV (SGTU y (MOVVconst [63]))) y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64SRAV) + v0 := b.NewValue0(v.Pos, OpMIPS64OR, t) + v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(63) + v2.AddArg2(y, v3) + v1.AddArg(v2) + v0.AddArg2(v1, y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueMIPS64_OpRsh64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x8 x y) + // result: (SRAV x (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64SRAV) + v0 := b.NewValue0(v.Pos, OpMIPS64OR, t) + v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(63) + v2.AddArg2(v3, v4) + v1.AddArg(v2) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueMIPS64_OpRsh8Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux16 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) + v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v5.AddArg(x) + v4.AddArg2(v5, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueMIPS64_OpRsh8Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux32 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) + v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v5.AddArg(x) + v4.AddArg2(v5, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueMIPS64_OpRsh8Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux64 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt8to64 x) y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v1.AddArg2(v2, y) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(x) + v3.AddArg2(v4, y) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueMIPS64_OpRsh8Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux8 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64AND) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t) + v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v5.AddArg(x) + v4.AddArg2(v5, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueMIPS64_OpRsh8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x16 x y) + // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) + v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpRsh8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x32 x y) + // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) + v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpRsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x64 x y) + // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) + v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(63) + v3.AddArg2(y, v4) + v2.AddArg(v3) + v1.AddArg2(v2, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpRsh8x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x8 x y) + // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpMIPS64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpMIPS64OR, t) + v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueMIPS64_OpSelect0(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Select0 (Mul64uover x y)) + // result: (Select1 (MULVU x y)) + for { + if v_0.Op != OpMul64uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpSelect1) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (Select0 (Add64carry x y c)) + // result: (ADDV (ADDV x y) c) + for { + t := v.Type + if v_0.Op != OpAdd64carry { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpMIPS64ADDV) + v0 := b.NewValue0(v.Pos, OpMIPS64ADDV, t) + v0.AddArg2(x, y) + v.AddArg2(v0, c) + return true + } + // match: (Select0 (Sub64borrow x y c)) + // result: (SUBV (SUBV x y) c) + for { + t := v.Type + if v_0.Op != OpSub64borrow { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpMIPS64SUBV) + v0 := b.NewValue0(v.Pos, OpMIPS64SUBV, t) + v0.AddArg2(x, y) + v.AddArg2(v0, c) + return true + } + // match: (Select0 (DIVVU _ (MOVVconst [1]))) + // result: (MOVVconst [0]) + for { + if v_0.Op != OpMIPS64DIVVU { + break + } + _ = v_0.Args[1] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 1 { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (Select0 (DIVVU x (MOVVconst [c]))) + // cond: isPowerOfTwo64(c) + // result: (ANDconst [c-1] x) + for { + if v_0.Op != OpMIPS64DIVVU { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_0_1.AuxInt) + if !(isPowerOfTwo64(c)) { + break + } + v.reset(OpMIPS64ANDconst) + v.AuxInt = int64ToAuxInt(c - 1) + v.AddArg(x) + return true + } + // match: (Select0 (DIVV (MOVVconst [c]) (MOVVconst [d]))) + // cond: d != 0 + // result: (MOVVconst [c%d]) + for { + if v_0.Op != OpMIPS64DIVV { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_0_0.AuxInt) + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpMIPS64MOVVconst { + break + } + d := auxIntToInt64(v_0_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(c % d) + return true + } + // match: (Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) + // cond: d != 0 + // result: (MOVVconst [int64(uint64(c)%uint64(d))]) + for { + if v_0.Op != OpMIPS64DIVVU { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_0_0.AuxInt) + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpMIPS64MOVVconst { + break + } + d := auxIntToInt64(v_0_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(uint64(c) % uint64(d))) + return true + } + return false +} +func rewriteValueMIPS64_OpSelect1(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Select1 (Mul64uover x y)) + // result: (SGTU (Select0 (MULVU x y)) (MOVVconst [0])) + for { + if v_0.Op != OpMul64uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpMIPS64SGTU) + v.Type = typ.Bool + v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v1.AddArg2(x, y) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, v2) + return true + } + // match: (Select1 (Add64carry x y c)) + // result: (OR (SGTU x s:(ADDV x y)) (SGTU s (ADDV s c))) + for { + t := v.Type + if v_0.Op != OpAdd64carry { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpMIPS64OR) + v0 := b.NewValue0(v.Pos, OpMIPS64SGTU, t) + s := b.NewValue0(v.Pos, OpMIPS64ADDV, t) + s.AddArg2(x, y) + v0.AddArg2(x, s) + v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, t) + v3 := b.NewValue0(v.Pos, OpMIPS64ADDV, t) + v3.AddArg2(s, c) + v2.AddArg2(s, v3) + v.AddArg2(v0, v2) + return true + } + // match: (Select1 (Sub64borrow x y c)) + // result: (OR (SGTU s:(SUBV x y) x) (SGTU (SUBV s c) s)) + for { + t := v.Type + if v_0.Op != OpSub64borrow { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpMIPS64OR) + v0 := b.NewValue0(v.Pos, OpMIPS64SGTU, t) + s := b.NewValue0(v.Pos, OpMIPS64SUBV, t) + s.AddArg2(x, y) + v0.AddArg2(s, x) + v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, t) + v3 := b.NewValue0(v.Pos, OpMIPS64SUBV, t) + v3.AddArg2(s, c) + v2.AddArg2(v3, s) + v.AddArg2(v0, v2) + return true + } + // match: (Select1 (MULVU x (MOVVconst [-1]))) + // result: (NEGV x) + for { + if v_0.Op != OpMIPS64MULVU { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != -1 { + continue + } + v.reset(OpMIPS64NEGV) + v.AddArg(x) + return true + } + break + } + // match: (Select1 (MULVU _ (MOVVconst [0]))) + // result: (MOVVconst [0]) + for { + if v_0.Op != OpMIPS64MULVU { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { + continue + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + break + } + // match: (Select1 (MULVU x (MOVVconst [1]))) + // result: x + for { + if v_0.Op != OpMIPS64MULVU { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 1 { + continue + } + v.copyOf(x) + return true + } + break + } + // match: (Select1 (MULVU x (MOVVconst [c]))) + // cond: isPowerOfTwo64(c) + // result: (SLLVconst [log64(c)] x) + for { + if v_0.Op != OpMIPS64MULVU { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpMIPS64MOVVconst { + continue + } + c := auxIntToInt64(v_0_1.AuxInt) + if !(isPowerOfTwo64(c)) { + continue + } + v.reset(OpMIPS64SLLVconst) + v.AuxInt = int64ToAuxInt(log64(c)) + v.AddArg(x) + return true + } + break + } + // match: (Select1 (DIVVU x (MOVVconst [1]))) + // result: x + for { + if v_0.Op != OpMIPS64DIVVU { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 1 { + break + } + v.copyOf(x) + return true + } + // match: (Select1 (DIVVU x (MOVVconst [c]))) + // cond: isPowerOfTwo64(c) + // result: (SRLVconst [log64(c)] x) + for { + if v_0.Op != OpMIPS64DIVVU { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_0_1.AuxInt) + if !(isPowerOfTwo64(c)) { + break + } + v.reset(OpMIPS64SRLVconst) + v.AuxInt = int64ToAuxInt(log64(c)) + v.AddArg(x) + return true + } + // match: (Select1 (MULVU (MOVVconst [c]) (MOVVconst [d]))) + // result: (MOVVconst [c*d]) + for { + if v_0.Op != OpMIPS64MULVU { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpMIPS64MOVVconst { + continue + } + c := auxIntToInt64(v_0_0.AuxInt) + if v_0_1.Op != OpMIPS64MOVVconst { + continue + } + d := auxIntToInt64(v_0_1.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(c * d) + return true + } + break + } + // match: (Select1 (DIVV (MOVVconst [c]) (MOVVconst [d]))) + // cond: d != 0 + // result: (MOVVconst [c/d]) + for { + if v_0.Op != OpMIPS64DIVV { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_0_0.AuxInt) + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpMIPS64MOVVconst { + break + } + d := auxIntToInt64(v_0_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(c / d) + return true + } + // match: (Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) + // cond: d != 0 + // result: (MOVVconst [int64(uint64(c)/uint64(d))]) + for { + if v_0.Op != OpMIPS64DIVVU { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpMIPS64MOVVconst { + break + } + c := auxIntToInt64(v_0_0.AuxInt) + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpMIPS64MOVVconst { + break + } + d := auxIntToInt64(v_0_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(uint64(c) / uint64(d))) + return true + } + return false +} +func rewriteValueMIPS64_OpSlicemask(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Slicemask x) + // result: (SRAVconst (NEGV x) [63]) + for { + t := v.Type + x := v_0 + v.reset(OpMIPS64SRAVconst) + v.AuxInt = int64ToAuxInt(63) + v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueMIPS64_OpStore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Store {t} ptr val mem) + // cond: t.Size() == 1 + // result: (MOVBstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 1) { + break + } + v.reset(OpMIPS64MOVBstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 2 + // result: (MOVHstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 2) { + break + } + v.reset(OpMIPS64MOVHstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 && !t.IsFloat() + // result: (MOVWstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4 && !t.IsFloat()) { + break + } + v.reset(OpMIPS64MOVWstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 8 && !t.IsFloat() + // result: (MOVVstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 8 && !t.IsFloat()) { + break + } + v.reset(OpMIPS64MOVVstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 && t.IsFloat() + // result: (MOVFstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4 && t.IsFloat()) { + break + } + v.reset(OpMIPS64MOVFstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 8 && t.IsFloat() + // result: (MOVDstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 8 && t.IsFloat()) { + break + } + v.reset(OpMIPS64MOVDstore) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueMIPS64_OpZero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Zero [0] _ mem) + // result: mem + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_1 + v.copyOf(mem) + return true + } + // match: (Zero [1] ptr mem) + // result: (MOVBstore ptr (MOVVconst [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpMIPS64MOVBstore) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (Zero [2] {t} ptr mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore ptr (MOVVconst [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpMIPS64MOVHstore) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (Zero [2] ptr mem) + // result: (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpMIPS64MOVBstore) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(0) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [4] {t} ptr mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore ptr (MOVVconst [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpMIPS64MOVWstore) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (Zero [4] {t} ptr mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpMIPS64MOVHstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(0) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [4] ptr mem) + // result: (MOVBstore [3] ptr (MOVVconst [0]) (MOVBstore [2] ptr (MOVVconst [0]) (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem)))) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpMIPS64MOVBstore) + v.AuxInt = int32ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(2) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(1) + v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) + v3.AuxInt = int32ToAuxInt(0) + v3.AddArg3(ptr, v0, mem) + v2.AddArg3(ptr, v0, v3) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [8] {t} ptr mem) + // cond: t.Alignment()%8 == 0 + // result: (MOVVstore ptr (MOVVconst [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%8 == 0) { + break + } + v.reset(OpMIPS64MOVVstore) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (Zero [8] {t} ptr mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpMIPS64MOVWstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(0) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [8] {t} ptr mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [6] ptr (MOVVconst [0]) (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem)))) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpMIPS64MOVHstore) + v.AuxInt = int32ToAuxInt(6) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(4) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(2) + v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) + v3.AuxInt = int32ToAuxInt(0) + v3.AddArg3(ptr, v0, mem) + v2.AddArg3(ptr, v0, v3) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [3] ptr mem) + // result: (MOVBstore [2] ptr (MOVVconst [0]) (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem))) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpMIPS64MOVBstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(0) + v2.AddArg3(ptr, v0, mem) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [6] {t} ptr mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem))) + for { + if auxIntToInt64(v.AuxInt) != 6 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpMIPS64MOVHstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(2) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(0) + v2.AddArg3(ptr, v0, mem) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [12] {t} ptr mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore [8] ptr (MOVVconst [0]) (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem))) + for { + if auxIntToInt64(v.AuxInt) != 12 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpMIPS64MOVWstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(4) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(0) + v2.AddArg3(ptr, v0, mem) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [16] {t} ptr mem) + // cond: t.Alignment()%8 == 0 + // result: (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 16 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%8 == 0) { + break + } + v.reset(OpMIPS64MOVVstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(0) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [24] {t} ptr mem) + // cond: t.Alignment()%8 == 0 + // result: (MOVVstore [16] ptr (MOVVconst [0]) (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem))) + for { + if auxIntToInt64(v.AuxInt) != 24 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%8 == 0) { + break + } + v.reset(OpMIPS64MOVVstore) + v.AuxInt = int32ToAuxInt(16) + v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(8) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(0) + v2.AddArg3(ptr, v0, mem) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [s] {t} ptr mem) + // cond: s%8 == 0 && s > 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice + // result: (DUFFZERO [8 * (128 - s/8)] ptr mem) + for { + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(s%8 == 0 && s > 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice) { + break + } + v.reset(OpMIPS64DUFFZERO) + v.AuxInt = int64ToAuxInt(8 * (128 - s/8)) + v.AddArg2(ptr, mem) + return true + } + // match: (Zero [s] {t} ptr mem) + // cond: (s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0 + // result: (LoweredZero [t.Alignment()] ptr (ADDVconst ptr [s-moveSize(t.Alignment(), config)]) mem) + for { + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !((s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0) { + break + } + v.reset(OpMIPS64LoweredZero) + v.AuxInt = int64ToAuxInt(t.Alignment()) + v0 := b.NewValue0(v.Pos, OpMIPS64ADDVconst, ptr.Type) + v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config)) + v0.AddArg(ptr) + v.AddArg3(ptr, v0, mem) + return true + } + return false +} +func rewriteBlockMIPS64(b *Block) bool { + switch b.Kind { + case BlockMIPS64EQ: + // match: (EQ (FPFlagTrue cmp) yes no) + // result: (FPF cmp yes no) + for b.Controls[0].Op == OpMIPS64FPFlagTrue { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockMIPS64FPF, cmp) + return true + } + // match: (EQ (FPFlagFalse cmp) yes no) + // result: (FPT cmp yes no) + for b.Controls[0].Op == OpMIPS64FPFlagFalse { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockMIPS64FPT, cmp) + return true + } + // match: (EQ (XORconst [1] cmp:(SGT _ _)) yes no) + // result: (NE cmp yes no) + for b.Controls[0].Op == OpMIPS64XORconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpMIPS64SGT { + break + } + b.resetWithControl(BlockMIPS64NE, cmp) + return true + } + // match: (EQ (XORconst [1] cmp:(SGTU _ _)) yes no) + // result: (NE cmp yes no) + for b.Controls[0].Op == OpMIPS64XORconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpMIPS64SGTU { + break + } + b.resetWithControl(BlockMIPS64NE, cmp) + return true + } + // match: (EQ (XORconst [1] cmp:(SGTconst _)) yes no) + // result: (NE cmp yes no) + for b.Controls[0].Op == OpMIPS64XORconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpMIPS64SGTconst { + break + } + b.resetWithControl(BlockMIPS64NE, cmp) + return true + } + // match: (EQ (XORconst [1] cmp:(SGTUconst _)) yes no) + // result: (NE cmp yes no) + for b.Controls[0].Op == OpMIPS64XORconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpMIPS64SGTUconst { + break + } + b.resetWithControl(BlockMIPS64NE, cmp) + return true + } + // match: (EQ (SGTUconst [1] x) yes no) + // result: (NE x yes no) + for b.Controls[0].Op == OpMIPS64SGTUconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + x := v_0.Args[0] + b.resetWithControl(BlockMIPS64NE, x) + return true + } + // match: (EQ (SGTU x (MOVVconst [0])) yes no) + // result: (EQ x yes no) + for b.Controls[0].Op == OpMIPS64SGTU { + v_0 := b.Controls[0] + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { + break + } + b.resetWithControl(BlockMIPS64EQ, x) + return true + } + // match: (EQ (SGTconst [0] x) yes no) + // result: (GEZ x yes no) + for b.Controls[0].Op == OpMIPS64SGTconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + b.resetWithControl(BlockMIPS64GEZ, x) + return true + } + // match: (EQ (SGT x (MOVVconst [0])) yes no) + // result: (LEZ x yes no) + for b.Controls[0].Op == OpMIPS64SGT { + v_0 := b.Controls[0] + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { + break + } + b.resetWithControl(BlockMIPS64LEZ, x) + return true + } + // match: (EQ (MOVVconst [0]) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpMIPS64MOVVconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + b.Reset(BlockFirst) + return true + } + // match: (EQ (MOVVconst [c]) yes no) + // cond: c != 0 + // result: (First no yes) + for b.Controls[0].Op == OpMIPS64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c != 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockMIPS64GEZ: + // match: (GEZ (MOVVconst [c]) yes no) + // cond: c >= 0 + // result: (First yes no) + for b.Controls[0].Op == OpMIPS64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c >= 0) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (GEZ (MOVVconst [c]) yes no) + // cond: c < 0 + // result: (First no yes) + for b.Controls[0].Op == OpMIPS64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c < 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockMIPS64GTZ: + // match: (GTZ (MOVVconst [c]) yes no) + // cond: c > 0 + // result: (First yes no) + for b.Controls[0].Op == OpMIPS64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c > 0) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (GTZ (MOVVconst [c]) yes no) + // cond: c <= 0 + // result: (First no yes) + for b.Controls[0].Op == OpMIPS64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c <= 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockIf: + // match: (If cond yes no) + // result: (NE cond yes no) + for { + cond := b.Controls[0] + b.resetWithControl(BlockMIPS64NE, cond) + return true + } + case BlockMIPS64LEZ: + // match: (LEZ (MOVVconst [c]) yes no) + // cond: c <= 0 + // result: (First yes no) + for b.Controls[0].Op == OpMIPS64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c <= 0) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (LEZ (MOVVconst [c]) yes no) + // cond: c > 0 + // result: (First no yes) + for b.Controls[0].Op == OpMIPS64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c > 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockMIPS64LTZ: + // match: (LTZ (MOVVconst [c]) yes no) + // cond: c < 0 + // result: (First yes no) + for b.Controls[0].Op == OpMIPS64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c < 0) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (LTZ (MOVVconst [c]) yes no) + // cond: c >= 0 + // result: (First no yes) + for b.Controls[0].Op == OpMIPS64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c >= 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockMIPS64NE: + // match: (NE (FPFlagTrue cmp) yes no) + // result: (FPT cmp yes no) + for b.Controls[0].Op == OpMIPS64FPFlagTrue { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockMIPS64FPT, cmp) + return true + } + // match: (NE (FPFlagFalse cmp) yes no) + // result: (FPF cmp yes no) + for b.Controls[0].Op == OpMIPS64FPFlagFalse { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockMIPS64FPF, cmp) + return true + } + // match: (NE (XORconst [1] cmp:(SGT _ _)) yes no) + // result: (EQ cmp yes no) + for b.Controls[0].Op == OpMIPS64XORconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpMIPS64SGT { + break + } + b.resetWithControl(BlockMIPS64EQ, cmp) + return true + } + // match: (NE (XORconst [1] cmp:(SGTU _ _)) yes no) + // result: (EQ cmp yes no) + for b.Controls[0].Op == OpMIPS64XORconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpMIPS64SGTU { + break + } + b.resetWithControl(BlockMIPS64EQ, cmp) + return true + } + // match: (NE (XORconst [1] cmp:(SGTconst _)) yes no) + // result: (EQ cmp yes no) + for b.Controls[0].Op == OpMIPS64XORconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpMIPS64SGTconst { + break + } + b.resetWithControl(BlockMIPS64EQ, cmp) + return true + } + // match: (NE (XORconst [1] cmp:(SGTUconst _)) yes no) + // result: (EQ cmp yes no) + for b.Controls[0].Op == OpMIPS64XORconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpMIPS64SGTUconst { + break + } + b.resetWithControl(BlockMIPS64EQ, cmp) + return true + } + // match: (NE (SGTUconst [1] x) yes no) + // result: (EQ x yes no) + for b.Controls[0].Op == OpMIPS64SGTUconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + x := v_0.Args[0] + b.resetWithControl(BlockMIPS64EQ, x) + return true + } + // match: (NE (SGTU x (MOVVconst [0])) yes no) + // result: (NE x yes no) + for b.Controls[0].Op == OpMIPS64SGTU { + v_0 := b.Controls[0] + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { + break + } + b.resetWithControl(BlockMIPS64NE, x) + return true + } + // match: (NE (SGTconst [0] x) yes no) + // result: (LTZ x yes no) + for b.Controls[0].Op == OpMIPS64SGTconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + b.resetWithControl(BlockMIPS64LTZ, x) + return true + } + // match: (NE (SGT x (MOVVconst [0])) yes no) + // result: (GTZ x yes no) + for b.Controls[0].Op == OpMIPS64SGT { + v_0 := b.Controls[0] + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { + break + } + b.resetWithControl(BlockMIPS64GTZ, x) + return true + } + // match: (NE (MOVVconst [0]) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpMIPS64MOVVconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (NE (MOVVconst [c]) yes no) + // cond: c != 0 + // result: (First yes no) + for b.Controls[0].Op == OpMIPS64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c != 0) { + break + } + b.Reset(BlockFirst) + return true + } + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewritePPC64.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewritePPC64.go new file mode 100644 index 0000000000000000000000000000000000000000..473a8ff9d95527c949db2272e03040539177e26c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -0,0 +1,16564 @@ +// Code generated from _gen/PPC64.rules using 'go generate'; DO NOT EDIT. + +package ssa + +import "internal/buildcfg" +import "math" +import "cmd/compile/internal/types" + +func rewriteValuePPC64(v *Value) bool { + switch v.Op { + case OpAbs: + v.Op = OpPPC64FABS + return true + case OpAdd16: + v.Op = OpPPC64ADD + return true + case OpAdd32: + v.Op = OpPPC64ADD + return true + case OpAdd32F: + v.Op = OpPPC64FADDS + return true + case OpAdd64: + v.Op = OpPPC64ADD + return true + case OpAdd64F: + v.Op = OpPPC64FADD + return true + case OpAdd8: + v.Op = OpPPC64ADD + return true + case OpAddPtr: + v.Op = OpPPC64ADD + return true + case OpAddr: + return rewriteValuePPC64_OpAddr(v) + case OpAnd16: + v.Op = OpPPC64AND + return true + case OpAnd32: + v.Op = OpPPC64AND + return true + case OpAnd64: + v.Op = OpPPC64AND + return true + case OpAnd8: + v.Op = OpPPC64AND + return true + case OpAndB: + v.Op = OpPPC64AND + return true + case OpAtomicAdd32: + v.Op = OpPPC64LoweredAtomicAdd32 + return true + case OpAtomicAdd64: + v.Op = OpPPC64LoweredAtomicAdd64 + return true + case OpAtomicAnd32: + v.Op = OpPPC64LoweredAtomicAnd32 + return true + case OpAtomicAnd8: + v.Op = OpPPC64LoweredAtomicAnd8 + return true + case OpAtomicCompareAndSwap32: + return rewriteValuePPC64_OpAtomicCompareAndSwap32(v) + case OpAtomicCompareAndSwap64: + return rewriteValuePPC64_OpAtomicCompareAndSwap64(v) + case OpAtomicCompareAndSwapRel32: + return rewriteValuePPC64_OpAtomicCompareAndSwapRel32(v) + case OpAtomicExchange32: + v.Op = OpPPC64LoweredAtomicExchange32 + return true + case OpAtomicExchange64: + v.Op = OpPPC64LoweredAtomicExchange64 + return true + case OpAtomicLoad32: + return rewriteValuePPC64_OpAtomicLoad32(v) + case OpAtomicLoad64: + return rewriteValuePPC64_OpAtomicLoad64(v) + case OpAtomicLoad8: + return rewriteValuePPC64_OpAtomicLoad8(v) + case OpAtomicLoadAcq32: + return rewriteValuePPC64_OpAtomicLoadAcq32(v) + case OpAtomicLoadAcq64: + return rewriteValuePPC64_OpAtomicLoadAcq64(v) + case OpAtomicLoadPtr: + return rewriteValuePPC64_OpAtomicLoadPtr(v) + case OpAtomicOr32: + v.Op = OpPPC64LoweredAtomicOr32 + return true + case OpAtomicOr8: + v.Op = OpPPC64LoweredAtomicOr8 + return true + case OpAtomicStore32: + return rewriteValuePPC64_OpAtomicStore32(v) + case OpAtomicStore64: + return rewriteValuePPC64_OpAtomicStore64(v) + case OpAtomicStore8: + return rewriteValuePPC64_OpAtomicStore8(v) + case OpAtomicStoreRel32: + return rewriteValuePPC64_OpAtomicStoreRel32(v) + case OpAtomicStoreRel64: + return rewriteValuePPC64_OpAtomicStoreRel64(v) + case OpAvg64u: + return rewriteValuePPC64_OpAvg64u(v) + case OpBitLen32: + return rewriteValuePPC64_OpBitLen32(v) + case OpBitLen64: + return rewriteValuePPC64_OpBitLen64(v) + case OpBswap16: + return rewriteValuePPC64_OpBswap16(v) + case OpBswap32: + return rewriteValuePPC64_OpBswap32(v) + case OpBswap64: + return rewriteValuePPC64_OpBswap64(v) + case OpCeil: + v.Op = OpPPC64FCEIL + return true + case OpClosureCall: + v.Op = OpPPC64CALLclosure + return true + case OpCom16: + return rewriteValuePPC64_OpCom16(v) + case OpCom32: + return rewriteValuePPC64_OpCom32(v) + case OpCom64: + return rewriteValuePPC64_OpCom64(v) + case OpCom8: + return rewriteValuePPC64_OpCom8(v) + case OpCondSelect: + return rewriteValuePPC64_OpCondSelect(v) + case OpConst16: + return rewriteValuePPC64_OpConst16(v) + case OpConst32: + return rewriteValuePPC64_OpConst32(v) + case OpConst32F: + v.Op = OpPPC64FMOVSconst + return true + case OpConst64: + return rewriteValuePPC64_OpConst64(v) + case OpConst64F: + v.Op = OpPPC64FMOVDconst + return true + case OpConst8: + return rewriteValuePPC64_OpConst8(v) + case OpConstBool: + return rewriteValuePPC64_OpConstBool(v) + case OpConstNil: + return rewriteValuePPC64_OpConstNil(v) + case OpCopysign: + return rewriteValuePPC64_OpCopysign(v) + case OpCtz16: + return rewriteValuePPC64_OpCtz16(v) + case OpCtz32: + return rewriteValuePPC64_OpCtz32(v) + case OpCtz32NonZero: + v.Op = OpCtz32 + return true + case OpCtz64: + return rewriteValuePPC64_OpCtz64(v) + case OpCtz64NonZero: + v.Op = OpCtz64 + return true + case OpCtz8: + return rewriteValuePPC64_OpCtz8(v) + case OpCvt32Fto32: + return rewriteValuePPC64_OpCvt32Fto32(v) + case OpCvt32Fto64: + return rewriteValuePPC64_OpCvt32Fto64(v) + case OpCvt32Fto64F: + v.Op = OpCopy + return true + case OpCvt32to32F: + return rewriteValuePPC64_OpCvt32to32F(v) + case OpCvt32to64F: + return rewriteValuePPC64_OpCvt32to64F(v) + case OpCvt64Fto32: + return rewriteValuePPC64_OpCvt64Fto32(v) + case OpCvt64Fto32F: + v.Op = OpPPC64FRSP + return true + case OpCvt64Fto64: + return rewriteValuePPC64_OpCvt64Fto64(v) + case OpCvt64to32F: + return rewriteValuePPC64_OpCvt64to32F(v) + case OpCvt64to64F: + return rewriteValuePPC64_OpCvt64to64F(v) + case OpCvtBoolToUint8: + v.Op = OpCopy + return true + case OpDiv16: + return rewriteValuePPC64_OpDiv16(v) + case OpDiv16u: + return rewriteValuePPC64_OpDiv16u(v) + case OpDiv32: + return rewriteValuePPC64_OpDiv32(v) + case OpDiv32F: + v.Op = OpPPC64FDIVS + return true + case OpDiv32u: + v.Op = OpPPC64DIVWU + return true + case OpDiv64: + return rewriteValuePPC64_OpDiv64(v) + case OpDiv64F: + v.Op = OpPPC64FDIV + return true + case OpDiv64u: + v.Op = OpPPC64DIVDU + return true + case OpDiv8: + return rewriteValuePPC64_OpDiv8(v) + case OpDiv8u: + return rewriteValuePPC64_OpDiv8u(v) + case OpEq16: + return rewriteValuePPC64_OpEq16(v) + case OpEq32: + return rewriteValuePPC64_OpEq32(v) + case OpEq32F: + return rewriteValuePPC64_OpEq32F(v) + case OpEq64: + return rewriteValuePPC64_OpEq64(v) + case OpEq64F: + return rewriteValuePPC64_OpEq64F(v) + case OpEq8: + return rewriteValuePPC64_OpEq8(v) + case OpEqB: + return rewriteValuePPC64_OpEqB(v) + case OpEqPtr: + return rewriteValuePPC64_OpEqPtr(v) + case OpFMA: + v.Op = OpPPC64FMADD + return true + case OpFloor: + v.Op = OpPPC64FFLOOR + return true + case OpGetCallerPC: + v.Op = OpPPC64LoweredGetCallerPC + return true + case OpGetCallerSP: + v.Op = OpPPC64LoweredGetCallerSP + return true + case OpGetClosurePtr: + v.Op = OpPPC64LoweredGetClosurePtr + return true + case OpHmul32: + v.Op = OpPPC64MULHW + return true + case OpHmul32u: + v.Op = OpPPC64MULHWU + return true + case OpHmul64: + v.Op = OpPPC64MULHD + return true + case OpHmul64u: + v.Op = OpPPC64MULHDU + return true + case OpInterCall: + v.Op = OpPPC64CALLinter + return true + case OpIsInBounds: + return rewriteValuePPC64_OpIsInBounds(v) + case OpIsNonNil: + return rewriteValuePPC64_OpIsNonNil(v) + case OpIsSliceInBounds: + return rewriteValuePPC64_OpIsSliceInBounds(v) + case OpLeq16: + return rewriteValuePPC64_OpLeq16(v) + case OpLeq16U: + return rewriteValuePPC64_OpLeq16U(v) + case OpLeq32: + return rewriteValuePPC64_OpLeq32(v) + case OpLeq32F: + return rewriteValuePPC64_OpLeq32F(v) + case OpLeq32U: + return rewriteValuePPC64_OpLeq32U(v) + case OpLeq64: + return rewriteValuePPC64_OpLeq64(v) + case OpLeq64F: + return rewriteValuePPC64_OpLeq64F(v) + case OpLeq64U: + return rewriteValuePPC64_OpLeq64U(v) + case OpLeq8: + return rewriteValuePPC64_OpLeq8(v) + case OpLeq8U: + return rewriteValuePPC64_OpLeq8U(v) + case OpLess16: + return rewriteValuePPC64_OpLess16(v) + case OpLess16U: + return rewriteValuePPC64_OpLess16U(v) + case OpLess32: + return rewriteValuePPC64_OpLess32(v) + case OpLess32F: + return rewriteValuePPC64_OpLess32F(v) + case OpLess32U: + return rewriteValuePPC64_OpLess32U(v) + case OpLess64: + return rewriteValuePPC64_OpLess64(v) + case OpLess64F: + return rewriteValuePPC64_OpLess64F(v) + case OpLess64U: + return rewriteValuePPC64_OpLess64U(v) + case OpLess8: + return rewriteValuePPC64_OpLess8(v) + case OpLess8U: + return rewriteValuePPC64_OpLess8U(v) + case OpLoad: + return rewriteValuePPC64_OpLoad(v) + case OpLocalAddr: + return rewriteValuePPC64_OpLocalAddr(v) + case OpLsh16x16: + return rewriteValuePPC64_OpLsh16x16(v) + case OpLsh16x32: + return rewriteValuePPC64_OpLsh16x32(v) + case OpLsh16x64: + return rewriteValuePPC64_OpLsh16x64(v) + case OpLsh16x8: + return rewriteValuePPC64_OpLsh16x8(v) + case OpLsh32x16: + return rewriteValuePPC64_OpLsh32x16(v) + case OpLsh32x32: + return rewriteValuePPC64_OpLsh32x32(v) + case OpLsh32x64: + return rewriteValuePPC64_OpLsh32x64(v) + case OpLsh32x8: + return rewriteValuePPC64_OpLsh32x8(v) + case OpLsh64x16: + return rewriteValuePPC64_OpLsh64x16(v) + case OpLsh64x32: + return rewriteValuePPC64_OpLsh64x32(v) + case OpLsh64x64: + return rewriteValuePPC64_OpLsh64x64(v) + case OpLsh64x8: + return rewriteValuePPC64_OpLsh64x8(v) + case OpLsh8x16: + return rewriteValuePPC64_OpLsh8x16(v) + case OpLsh8x32: + return rewriteValuePPC64_OpLsh8x32(v) + case OpLsh8x64: + return rewriteValuePPC64_OpLsh8x64(v) + case OpLsh8x8: + return rewriteValuePPC64_OpLsh8x8(v) + case OpMod16: + return rewriteValuePPC64_OpMod16(v) + case OpMod16u: + return rewriteValuePPC64_OpMod16u(v) + case OpMod32: + return rewriteValuePPC64_OpMod32(v) + case OpMod32u: + return rewriteValuePPC64_OpMod32u(v) + case OpMod64: + return rewriteValuePPC64_OpMod64(v) + case OpMod64u: + return rewriteValuePPC64_OpMod64u(v) + case OpMod8: + return rewriteValuePPC64_OpMod8(v) + case OpMod8u: + return rewriteValuePPC64_OpMod8u(v) + case OpMove: + return rewriteValuePPC64_OpMove(v) + case OpMul16: + v.Op = OpPPC64MULLW + return true + case OpMul32: + v.Op = OpPPC64MULLW + return true + case OpMul32F: + v.Op = OpPPC64FMULS + return true + case OpMul64: + v.Op = OpPPC64MULLD + return true + case OpMul64F: + v.Op = OpPPC64FMUL + return true + case OpMul8: + v.Op = OpPPC64MULLW + return true + case OpNeg16: + v.Op = OpPPC64NEG + return true + case OpNeg32: + v.Op = OpPPC64NEG + return true + case OpNeg32F: + v.Op = OpPPC64FNEG + return true + case OpNeg64: + v.Op = OpPPC64NEG + return true + case OpNeg64F: + v.Op = OpPPC64FNEG + return true + case OpNeg8: + v.Op = OpPPC64NEG + return true + case OpNeq16: + return rewriteValuePPC64_OpNeq16(v) + case OpNeq32: + return rewriteValuePPC64_OpNeq32(v) + case OpNeq32F: + return rewriteValuePPC64_OpNeq32F(v) + case OpNeq64: + return rewriteValuePPC64_OpNeq64(v) + case OpNeq64F: + return rewriteValuePPC64_OpNeq64F(v) + case OpNeq8: + return rewriteValuePPC64_OpNeq8(v) + case OpNeqB: + v.Op = OpPPC64XOR + return true + case OpNeqPtr: + return rewriteValuePPC64_OpNeqPtr(v) + case OpNilCheck: + v.Op = OpPPC64LoweredNilCheck + return true + case OpNot: + return rewriteValuePPC64_OpNot(v) + case OpOffPtr: + return rewriteValuePPC64_OpOffPtr(v) + case OpOr16: + v.Op = OpPPC64OR + return true + case OpOr32: + v.Op = OpPPC64OR + return true + case OpOr64: + v.Op = OpPPC64OR + return true + case OpOr8: + v.Op = OpPPC64OR + return true + case OpOrB: + v.Op = OpPPC64OR + return true + case OpPPC64ADD: + return rewriteValuePPC64_OpPPC64ADD(v) + case OpPPC64ADDE: + return rewriteValuePPC64_OpPPC64ADDE(v) + case OpPPC64ADDconst: + return rewriteValuePPC64_OpPPC64ADDconst(v) + case OpPPC64AND: + return rewriteValuePPC64_OpPPC64AND(v) + case OpPPC64ANDCCconst: + return rewriteValuePPC64_OpPPC64ANDCCconst(v) + case OpPPC64ANDN: + return rewriteValuePPC64_OpPPC64ANDN(v) + case OpPPC64BRD: + return rewriteValuePPC64_OpPPC64BRD(v) + case OpPPC64BRH: + return rewriteValuePPC64_OpPPC64BRH(v) + case OpPPC64BRW: + return rewriteValuePPC64_OpPPC64BRW(v) + case OpPPC64CLRLSLDI: + return rewriteValuePPC64_OpPPC64CLRLSLDI(v) + case OpPPC64CMP: + return rewriteValuePPC64_OpPPC64CMP(v) + case OpPPC64CMPU: + return rewriteValuePPC64_OpPPC64CMPU(v) + case OpPPC64CMPUconst: + return rewriteValuePPC64_OpPPC64CMPUconst(v) + case OpPPC64CMPW: + return rewriteValuePPC64_OpPPC64CMPW(v) + case OpPPC64CMPWU: + return rewriteValuePPC64_OpPPC64CMPWU(v) + case OpPPC64CMPWUconst: + return rewriteValuePPC64_OpPPC64CMPWUconst(v) + case OpPPC64CMPWconst: + return rewriteValuePPC64_OpPPC64CMPWconst(v) + case OpPPC64CMPconst: + return rewriteValuePPC64_OpPPC64CMPconst(v) + case OpPPC64Equal: + return rewriteValuePPC64_OpPPC64Equal(v) + case OpPPC64FABS: + return rewriteValuePPC64_OpPPC64FABS(v) + case OpPPC64FADD: + return rewriteValuePPC64_OpPPC64FADD(v) + case OpPPC64FADDS: + return rewriteValuePPC64_OpPPC64FADDS(v) + case OpPPC64FCEIL: + return rewriteValuePPC64_OpPPC64FCEIL(v) + case OpPPC64FFLOOR: + return rewriteValuePPC64_OpPPC64FFLOOR(v) + case OpPPC64FGreaterEqual: + return rewriteValuePPC64_OpPPC64FGreaterEqual(v) + case OpPPC64FGreaterThan: + return rewriteValuePPC64_OpPPC64FGreaterThan(v) + case OpPPC64FLessEqual: + return rewriteValuePPC64_OpPPC64FLessEqual(v) + case OpPPC64FLessThan: + return rewriteValuePPC64_OpPPC64FLessThan(v) + case OpPPC64FMOVDload: + return rewriteValuePPC64_OpPPC64FMOVDload(v) + case OpPPC64FMOVDstore: + return rewriteValuePPC64_OpPPC64FMOVDstore(v) + case OpPPC64FMOVSload: + return rewriteValuePPC64_OpPPC64FMOVSload(v) + case OpPPC64FMOVSstore: + return rewriteValuePPC64_OpPPC64FMOVSstore(v) + case OpPPC64FNEG: + return rewriteValuePPC64_OpPPC64FNEG(v) + case OpPPC64FSQRT: + return rewriteValuePPC64_OpPPC64FSQRT(v) + case OpPPC64FSUB: + return rewriteValuePPC64_OpPPC64FSUB(v) + case OpPPC64FSUBS: + return rewriteValuePPC64_OpPPC64FSUBS(v) + case OpPPC64FTRUNC: + return rewriteValuePPC64_OpPPC64FTRUNC(v) + case OpPPC64GreaterEqual: + return rewriteValuePPC64_OpPPC64GreaterEqual(v) + case OpPPC64GreaterThan: + return rewriteValuePPC64_OpPPC64GreaterThan(v) + case OpPPC64ISEL: + return rewriteValuePPC64_OpPPC64ISEL(v) + case OpPPC64LessEqual: + return rewriteValuePPC64_OpPPC64LessEqual(v) + case OpPPC64LessThan: + return rewriteValuePPC64_OpPPC64LessThan(v) + case OpPPC64MFVSRD: + return rewriteValuePPC64_OpPPC64MFVSRD(v) + case OpPPC64MOVBZload: + return rewriteValuePPC64_OpPPC64MOVBZload(v) + case OpPPC64MOVBZloadidx: + return rewriteValuePPC64_OpPPC64MOVBZloadidx(v) + case OpPPC64MOVBZreg: + return rewriteValuePPC64_OpPPC64MOVBZreg(v) + case OpPPC64MOVBreg: + return rewriteValuePPC64_OpPPC64MOVBreg(v) + case OpPPC64MOVBstore: + return rewriteValuePPC64_OpPPC64MOVBstore(v) + case OpPPC64MOVBstoreidx: + return rewriteValuePPC64_OpPPC64MOVBstoreidx(v) + case OpPPC64MOVBstorezero: + return rewriteValuePPC64_OpPPC64MOVBstorezero(v) + case OpPPC64MOVDaddr: + return rewriteValuePPC64_OpPPC64MOVDaddr(v) + case OpPPC64MOVDload: + return rewriteValuePPC64_OpPPC64MOVDload(v) + case OpPPC64MOVDloadidx: + return rewriteValuePPC64_OpPPC64MOVDloadidx(v) + case OpPPC64MOVDstore: + return rewriteValuePPC64_OpPPC64MOVDstore(v) + case OpPPC64MOVDstoreidx: + return rewriteValuePPC64_OpPPC64MOVDstoreidx(v) + case OpPPC64MOVDstorezero: + return rewriteValuePPC64_OpPPC64MOVDstorezero(v) + case OpPPC64MOVHBRstore: + return rewriteValuePPC64_OpPPC64MOVHBRstore(v) + case OpPPC64MOVHZload: + return rewriteValuePPC64_OpPPC64MOVHZload(v) + case OpPPC64MOVHZloadidx: + return rewriteValuePPC64_OpPPC64MOVHZloadidx(v) + case OpPPC64MOVHZreg: + return rewriteValuePPC64_OpPPC64MOVHZreg(v) + case OpPPC64MOVHload: + return rewriteValuePPC64_OpPPC64MOVHload(v) + case OpPPC64MOVHloadidx: + return rewriteValuePPC64_OpPPC64MOVHloadidx(v) + case OpPPC64MOVHreg: + return rewriteValuePPC64_OpPPC64MOVHreg(v) + case OpPPC64MOVHstore: + return rewriteValuePPC64_OpPPC64MOVHstore(v) + case OpPPC64MOVHstoreidx: + return rewriteValuePPC64_OpPPC64MOVHstoreidx(v) + case OpPPC64MOVHstorezero: + return rewriteValuePPC64_OpPPC64MOVHstorezero(v) + case OpPPC64MOVWBRstore: + return rewriteValuePPC64_OpPPC64MOVWBRstore(v) + case OpPPC64MOVWZload: + return rewriteValuePPC64_OpPPC64MOVWZload(v) + case OpPPC64MOVWZloadidx: + return rewriteValuePPC64_OpPPC64MOVWZloadidx(v) + case OpPPC64MOVWZreg: + return rewriteValuePPC64_OpPPC64MOVWZreg(v) + case OpPPC64MOVWload: + return rewriteValuePPC64_OpPPC64MOVWload(v) + case OpPPC64MOVWloadidx: + return rewriteValuePPC64_OpPPC64MOVWloadidx(v) + case OpPPC64MOVWreg: + return rewriteValuePPC64_OpPPC64MOVWreg(v) + case OpPPC64MOVWstore: + return rewriteValuePPC64_OpPPC64MOVWstore(v) + case OpPPC64MOVWstoreidx: + return rewriteValuePPC64_OpPPC64MOVWstoreidx(v) + case OpPPC64MOVWstorezero: + return rewriteValuePPC64_OpPPC64MOVWstorezero(v) + case OpPPC64MTVSRD: + return rewriteValuePPC64_OpPPC64MTVSRD(v) + case OpPPC64MULLD: + return rewriteValuePPC64_OpPPC64MULLD(v) + case OpPPC64MULLW: + return rewriteValuePPC64_OpPPC64MULLW(v) + case OpPPC64NEG: + return rewriteValuePPC64_OpPPC64NEG(v) + case OpPPC64NOR: + return rewriteValuePPC64_OpPPC64NOR(v) + case OpPPC64NotEqual: + return rewriteValuePPC64_OpPPC64NotEqual(v) + case OpPPC64OR: + return rewriteValuePPC64_OpPPC64OR(v) + case OpPPC64ORN: + return rewriteValuePPC64_OpPPC64ORN(v) + case OpPPC64ORconst: + return rewriteValuePPC64_OpPPC64ORconst(v) + case OpPPC64ROTL: + return rewriteValuePPC64_OpPPC64ROTL(v) + case OpPPC64ROTLW: + return rewriteValuePPC64_OpPPC64ROTLW(v) + case OpPPC64ROTLWconst: + return rewriteValuePPC64_OpPPC64ROTLWconst(v) + case OpPPC64SETBC: + return rewriteValuePPC64_OpPPC64SETBC(v) + case OpPPC64SETBCR: + return rewriteValuePPC64_OpPPC64SETBCR(v) + case OpPPC64SLD: + return rewriteValuePPC64_OpPPC64SLD(v) + case OpPPC64SLDconst: + return rewriteValuePPC64_OpPPC64SLDconst(v) + case OpPPC64SLW: + return rewriteValuePPC64_OpPPC64SLW(v) + case OpPPC64SLWconst: + return rewriteValuePPC64_OpPPC64SLWconst(v) + case OpPPC64SRAD: + return rewriteValuePPC64_OpPPC64SRAD(v) + case OpPPC64SRAW: + return rewriteValuePPC64_OpPPC64SRAW(v) + case OpPPC64SRD: + return rewriteValuePPC64_OpPPC64SRD(v) + case OpPPC64SRW: + return rewriteValuePPC64_OpPPC64SRW(v) + case OpPPC64SRWconst: + return rewriteValuePPC64_OpPPC64SRWconst(v) + case OpPPC64SUB: + return rewriteValuePPC64_OpPPC64SUB(v) + case OpPPC64SUBE: + return rewriteValuePPC64_OpPPC64SUBE(v) + case OpPPC64SUBFCconst: + return rewriteValuePPC64_OpPPC64SUBFCconst(v) + case OpPPC64XOR: + return rewriteValuePPC64_OpPPC64XOR(v) + case OpPPC64XORconst: + return rewriteValuePPC64_OpPPC64XORconst(v) + case OpPanicBounds: + return rewriteValuePPC64_OpPanicBounds(v) + case OpPopCount16: + return rewriteValuePPC64_OpPopCount16(v) + case OpPopCount32: + return rewriteValuePPC64_OpPopCount32(v) + case OpPopCount64: + v.Op = OpPPC64POPCNTD + return true + case OpPopCount8: + return rewriteValuePPC64_OpPopCount8(v) + case OpPrefetchCache: + return rewriteValuePPC64_OpPrefetchCache(v) + case OpPrefetchCacheStreamed: + return rewriteValuePPC64_OpPrefetchCacheStreamed(v) + case OpPubBarrier: + v.Op = OpPPC64LoweredPubBarrier + return true + case OpRotateLeft16: + return rewriteValuePPC64_OpRotateLeft16(v) + case OpRotateLeft32: + v.Op = OpPPC64ROTLW + return true + case OpRotateLeft64: + v.Op = OpPPC64ROTL + return true + case OpRotateLeft8: + return rewriteValuePPC64_OpRotateLeft8(v) + case OpRound: + v.Op = OpPPC64FROUND + return true + case OpRound32F: + v.Op = OpPPC64LoweredRound32F + return true + case OpRound64F: + v.Op = OpPPC64LoweredRound64F + return true + case OpRsh16Ux16: + return rewriteValuePPC64_OpRsh16Ux16(v) + case OpRsh16Ux32: + return rewriteValuePPC64_OpRsh16Ux32(v) + case OpRsh16Ux64: + return rewriteValuePPC64_OpRsh16Ux64(v) + case OpRsh16Ux8: + return rewriteValuePPC64_OpRsh16Ux8(v) + case OpRsh16x16: + return rewriteValuePPC64_OpRsh16x16(v) + case OpRsh16x32: + return rewriteValuePPC64_OpRsh16x32(v) + case OpRsh16x64: + return rewriteValuePPC64_OpRsh16x64(v) + case OpRsh16x8: + return rewriteValuePPC64_OpRsh16x8(v) + case OpRsh32Ux16: + return rewriteValuePPC64_OpRsh32Ux16(v) + case OpRsh32Ux32: + return rewriteValuePPC64_OpRsh32Ux32(v) + case OpRsh32Ux64: + return rewriteValuePPC64_OpRsh32Ux64(v) + case OpRsh32Ux8: + return rewriteValuePPC64_OpRsh32Ux8(v) + case OpRsh32x16: + return rewriteValuePPC64_OpRsh32x16(v) + case OpRsh32x32: + return rewriteValuePPC64_OpRsh32x32(v) + case OpRsh32x64: + return rewriteValuePPC64_OpRsh32x64(v) + case OpRsh32x8: + return rewriteValuePPC64_OpRsh32x8(v) + case OpRsh64Ux16: + return rewriteValuePPC64_OpRsh64Ux16(v) + case OpRsh64Ux32: + return rewriteValuePPC64_OpRsh64Ux32(v) + case OpRsh64Ux64: + return rewriteValuePPC64_OpRsh64Ux64(v) + case OpRsh64Ux8: + return rewriteValuePPC64_OpRsh64Ux8(v) + case OpRsh64x16: + return rewriteValuePPC64_OpRsh64x16(v) + case OpRsh64x32: + return rewriteValuePPC64_OpRsh64x32(v) + case OpRsh64x64: + return rewriteValuePPC64_OpRsh64x64(v) + case OpRsh64x8: + return rewriteValuePPC64_OpRsh64x8(v) + case OpRsh8Ux16: + return rewriteValuePPC64_OpRsh8Ux16(v) + case OpRsh8Ux32: + return rewriteValuePPC64_OpRsh8Ux32(v) + case OpRsh8Ux64: + return rewriteValuePPC64_OpRsh8Ux64(v) + case OpRsh8Ux8: + return rewriteValuePPC64_OpRsh8Ux8(v) + case OpRsh8x16: + return rewriteValuePPC64_OpRsh8x16(v) + case OpRsh8x32: + return rewriteValuePPC64_OpRsh8x32(v) + case OpRsh8x64: + return rewriteValuePPC64_OpRsh8x64(v) + case OpRsh8x8: + return rewriteValuePPC64_OpRsh8x8(v) + case OpSelect0: + return rewriteValuePPC64_OpSelect0(v) + case OpSelect1: + return rewriteValuePPC64_OpSelect1(v) + case OpSelectN: + return rewriteValuePPC64_OpSelectN(v) + case OpSignExt16to32: + v.Op = OpPPC64MOVHreg + return true + case OpSignExt16to64: + v.Op = OpPPC64MOVHreg + return true + case OpSignExt32to64: + v.Op = OpPPC64MOVWreg + return true + case OpSignExt8to16: + v.Op = OpPPC64MOVBreg + return true + case OpSignExt8to32: + v.Op = OpPPC64MOVBreg + return true + case OpSignExt8to64: + v.Op = OpPPC64MOVBreg + return true + case OpSlicemask: + return rewriteValuePPC64_OpSlicemask(v) + case OpSqrt: + v.Op = OpPPC64FSQRT + return true + case OpSqrt32: + v.Op = OpPPC64FSQRTS + return true + case OpStaticCall: + v.Op = OpPPC64CALLstatic + return true + case OpStore: + return rewriteValuePPC64_OpStore(v) + case OpSub16: + v.Op = OpPPC64SUB + return true + case OpSub32: + v.Op = OpPPC64SUB + return true + case OpSub32F: + v.Op = OpPPC64FSUBS + return true + case OpSub64: + v.Op = OpPPC64SUB + return true + case OpSub64F: + v.Op = OpPPC64FSUB + return true + case OpSub8: + v.Op = OpPPC64SUB + return true + case OpSubPtr: + v.Op = OpPPC64SUB + return true + case OpTailCall: + v.Op = OpPPC64CALLtail + return true + case OpTrunc: + v.Op = OpPPC64FTRUNC + return true + case OpTrunc16to8: + return rewriteValuePPC64_OpTrunc16to8(v) + case OpTrunc32to16: + return rewriteValuePPC64_OpTrunc32to16(v) + case OpTrunc32to8: + return rewriteValuePPC64_OpTrunc32to8(v) + case OpTrunc64to16: + return rewriteValuePPC64_OpTrunc64to16(v) + case OpTrunc64to32: + return rewriteValuePPC64_OpTrunc64to32(v) + case OpTrunc64to8: + return rewriteValuePPC64_OpTrunc64to8(v) + case OpWB: + v.Op = OpPPC64LoweredWB + return true + case OpXor16: + v.Op = OpPPC64XOR + return true + case OpXor32: + v.Op = OpPPC64XOR + return true + case OpXor64: + v.Op = OpPPC64XOR + return true + case OpXor8: + v.Op = OpPPC64XOR + return true + case OpZero: + return rewriteValuePPC64_OpZero(v) + case OpZeroExt16to32: + v.Op = OpPPC64MOVHZreg + return true + case OpZeroExt16to64: + v.Op = OpPPC64MOVHZreg + return true + case OpZeroExt32to64: + v.Op = OpPPC64MOVWZreg + return true + case OpZeroExt8to16: + v.Op = OpPPC64MOVBZreg + return true + case OpZeroExt8to32: + v.Op = OpPPC64MOVBZreg + return true + case OpZeroExt8to64: + v.Op = OpPPC64MOVBZreg + return true + } + return false +} +func rewriteValuePPC64_OpAddr(v *Value) bool { + v_0 := v.Args[0] + // match: (Addr {sym} base) + // result: (MOVDaddr {sym} [0] base) + for { + sym := auxToSym(v.Aux) + base := v_0 + v.reset(OpPPC64MOVDaddr) + v.AuxInt = int32ToAuxInt(0) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } +} +func rewriteValuePPC64_OpAtomicCompareAndSwap32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicCompareAndSwap32 ptr old new_ mem) + // result: (LoweredAtomicCas32 [1] ptr old new_ mem) + for { + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 + v.reset(OpPPC64LoweredAtomicCas32) + v.AuxInt = int64ToAuxInt(1) + v.AddArg4(ptr, old, new_, mem) + return true + } +} +func rewriteValuePPC64_OpAtomicCompareAndSwap64(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicCompareAndSwap64 ptr old new_ mem) + // result: (LoweredAtomicCas64 [1] ptr old new_ mem) + for { + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 + v.reset(OpPPC64LoweredAtomicCas64) + v.AuxInt = int64ToAuxInt(1) + v.AddArg4(ptr, old, new_, mem) + return true + } +} +func rewriteValuePPC64_OpAtomicCompareAndSwapRel32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicCompareAndSwapRel32 ptr old new_ mem) + // result: (LoweredAtomicCas32 [0] ptr old new_ mem) + for { + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 + v.reset(OpPPC64LoweredAtomicCas32) + v.AuxInt = int64ToAuxInt(0) + v.AddArg4(ptr, old, new_, mem) + return true + } +} +func rewriteValuePPC64_OpAtomicLoad32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicLoad32 ptr mem) + // result: (LoweredAtomicLoad32 [1] ptr mem) + for { + ptr := v_0 + mem := v_1 + v.reset(OpPPC64LoweredAtomicLoad32) + v.AuxInt = int64ToAuxInt(1) + v.AddArg2(ptr, mem) + return true + } +} +func rewriteValuePPC64_OpAtomicLoad64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicLoad64 ptr mem) + // result: (LoweredAtomicLoad64 [1] ptr mem) + for { + ptr := v_0 + mem := v_1 + v.reset(OpPPC64LoweredAtomicLoad64) + v.AuxInt = int64ToAuxInt(1) + v.AddArg2(ptr, mem) + return true + } +} +func rewriteValuePPC64_OpAtomicLoad8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicLoad8 ptr mem) + // result: (LoweredAtomicLoad8 [1] ptr mem) + for { + ptr := v_0 + mem := v_1 + v.reset(OpPPC64LoweredAtomicLoad8) + v.AuxInt = int64ToAuxInt(1) + v.AddArg2(ptr, mem) + return true + } +} +func rewriteValuePPC64_OpAtomicLoadAcq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicLoadAcq32 ptr mem) + // result: (LoweredAtomicLoad32 [0] ptr mem) + for { + ptr := v_0 + mem := v_1 + v.reset(OpPPC64LoweredAtomicLoad32) + v.AuxInt = int64ToAuxInt(0) + v.AddArg2(ptr, mem) + return true + } +} +func rewriteValuePPC64_OpAtomicLoadAcq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicLoadAcq64 ptr mem) + // result: (LoweredAtomicLoad64 [0] ptr mem) + for { + ptr := v_0 + mem := v_1 + v.reset(OpPPC64LoweredAtomicLoad64) + v.AuxInt = int64ToAuxInt(0) + v.AddArg2(ptr, mem) + return true + } +} +func rewriteValuePPC64_OpAtomicLoadPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicLoadPtr ptr mem) + // result: (LoweredAtomicLoadPtr [1] ptr mem) + for { + ptr := v_0 + mem := v_1 + v.reset(OpPPC64LoweredAtomicLoadPtr) + v.AuxInt = int64ToAuxInt(1) + v.AddArg2(ptr, mem) + return true + } +} +func rewriteValuePPC64_OpAtomicStore32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicStore32 ptr val mem) + // result: (LoweredAtomicStore32 [1] ptr val mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpPPC64LoweredAtomicStore32) + v.AuxInt = int64ToAuxInt(1) + v.AddArg3(ptr, val, mem) + return true + } +} +func rewriteValuePPC64_OpAtomicStore64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicStore64 ptr val mem) + // result: (LoweredAtomicStore64 [1] ptr val mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpPPC64LoweredAtomicStore64) + v.AuxInt = int64ToAuxInt(1) + v.AddArg3(ptr, val, mem) + return true + } +} +func rewriteValuePPC64_OpAtomicStore8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicStore8 ptr val mem) + // result: (LoweredAtomicStore8 [1] ptr val mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpPPC64LoweredAtomicStore8) + v.AuxInt = int64ToAuxInt(1) + v.AddArg3(ptr, val, mem) + return true + } +} +func rewriteValuePPC64_OpAtomicStoreRel32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicStoreRel32 ptr val mem) + // result: (LoweredAtomicStore32 [0] ptr val mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpPPC64LoweredAtomicStore32) + v.AuxInt = int64ToAuxInt(0) + v.AddArg3(ptr, val, mem) + return true + } +} +func rewriteValuePPC64_OpAtomicStoreRel64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicStoreRel64 ptr val mem) + // result: (LoweredAtomicStore64 [0] ptr val mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpPPC64LoweredAtomicStore64) + v.AuxInt = int64ToAuxInt(0) + v.AddArg3(ptr, val, mem) + return true + } +} +func rewriteValuePPC64_OpAvg64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Avg64u x y) + // result: (ADD (SRDconst (SUB x y) [1]) y) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ADD) + v0 := b.NewValue0(v.Pos, OpPPC64SRDconst, t) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpPPC64SUB, t) + v1.AddArg2(x, y) + v0.AddArg(v1) + v.AddArg2(v0, y) + return true + } +} +func rewriteValuePPC64_OpBitLen32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen32 x) + // result: (SUBFCconst [32] (CNTLZW x)) + for { + x := v_0 + v.reset(OpPPC64SUBFCconst) + v.AuxInt = int64ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpPPC64CNTLZW, typ.Int) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpBitLen64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen64 x) + // result: (SUBFCconst [64] (CNTLZD x)) + for { + x := v_0 + v.reset(OpPPC64SUBFCconst) + v.AuxInt = int64ToAuxInt(64) + v0 := b.NewValue0(v.Pos, OpPPC64CNTLZD, typ.Int) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpBswap16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Bswap16 x) + // cond: buildcfg.GOPPC64>=10 + // result: (BRH x) + for { + x := v_0 + if !(buildcfg.GOPPC64 >= 10) { + break + } + v.reset(OpPPC64BRH) + v.AddArg(x) + return true + } + // match: (Bswap16 x:(MOVHZload [off] {sym} ptr mem)) + // result: @x.Block (MOVHBRload (MOVDaddr [off] {sym} ptr) mem) + for { + x := v_0 + if x.Op != OpPPC64MOVHZload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + b = x.Block + v0 := b.NewValue0(x.Pos, OpPPC64MOVHBRload, typ.UInt16) + v.copyOf(v0) + v1 := b.NewValue0(x.Pos, OpPPC64MOVDaddr, ptr.Type) + v1.AuxInt = int32ToAuxInt(off) + v1.Aux = symToAux(sym) + v1.AddArg(ptr) + v0.AddArg2(v1, mem) + return true + } + // match: (Bswap16 x:(MOVHZloadidx ptr idx mem)) + // result: @x.Block (MOVHBRloadidx ptr idx mem) + for { + x := v_0 + if x.Op != OpPPC64MOVHZloadidx { + break + } + mem := x.Args[2] + ptr := x.Args[0] + idx := x.Args[1] + b = x.Block + v0 := b.NewValue0(v.Pos, OpPPC64MOVHBRloadidx, typ.Int16) + v.copyOf(v0) + v0.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValuePPC64_OpBswap32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Bswap32 x) + // cond: buildcfg.GOPPC64>=10 + // result: (BRW x) + for { + x := v_0 + if !(buildcfg.GOPPC64 >= 10) { + break + } + v.reset(OpPPC64BRW) + v.AddArg(x) + return true + } + // match: (Bswap32 x:(MOVWZload [off] {sym} ptr mem)) + // result: @x.Block (MOVWBRload (MOVDaddr [off] {sym} ptr) mem) + for { + x := v_0 + if x.Op != OpPPC64MOVWZload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + b = x.Block + v0 := b.NewValue0(x.Pos, OpPPC64MOVWBRload, typ.UInt32) + v.copyOf(v0) + v1 := b.NewValue0(x.Pos, OpPPC64MOVDaddr, ptr.Type) + v1.AuxInt = int32ToAuxInt(off) + v1.Aux = symToAux(sym) + v1.AddArg(ptr) + v0.AddArg2(v1, mem) + return true + } + // match: (Bswap32 x:(MOVWZloadidx ptr idx mem)) + // result: @x.Block (MOVWBRloadidx ptr idx mem) + for { + x := v_0 + if x.Op != OpPPC64MOVWZloadidx { + break + } + mem := x.Args[2] + ptr := x.Args[0] + idx := x.Args[1] + b = x.Block + v0 := b.NewValue0(v.Pos, OpPPC64MOVWBRloadidx, typ.Int32) + v.copyOf(v0) + v0.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValuePPC64_OpBswap64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Bswap64 x) + // cond: buildcfg.GOPPC64>=10 + // result: (BRD x) + for { + x := v_0 + if !(buildcfg.GOPPC64 >= 10) { + break + } + v.reset(OpPPC64BRD) + v.AddArg(x) + return true + } + // match: (Bswap64 x:(MOVDload [off] {sym} ptr mem)) + // result: @x.Block (MOVDBRload (MOVDaddr [off] {sym} ptr) mem) + for { + x := v_0 + if x.Op != OpPPC64MOVDload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + b = x.Block + v0 := b.NewValue0(x.Pos, OpPPC64MOVDBRload, typ.UInt64) + v.copyOf(v0) + v1 := b.NewValue0(x.Pos, OpPPC64MOVDaddr, ptr.Type) + v1.AuxInt = int32ToAuxInt(off) + v1.Aux = symToAux(sym) + v1.AddArg(ptr) + v0.AddArg2(v1, mem) + return true + } + // match: (Bswap64 x:(MOVDloadidx ptr idx mem)) + // result: @x.Block (MOVDBRloadidx ptr idx mem) + for { + x := v_0 + if x.Op != OpPPC64MOVDloadidx { + break + } + mem := x.Args[2] + ptr := x.Args[0] + idx := x.Args[1] + b = x.Block + v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRloadidx, typ.Int64) + v.copyOf(v0) + v0.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValuePPC64_OpCom16(v *Value) bool { + v_0 := v.Args[0] + // match: (Com16 x) + // result: (NOR x x) + for { + x := v_0 + v.reset(OpPPC64NOR) + v.AddArg2(x, x) + return true + } +} +func rewriteValuePPC64_OpCom32(v *Value) bool { + v_0 := v.Args[0] + // match: (Com32 x) + // result: (NOR x x) + for { + x := v_0 + v.reset(OpPPC64NOR) + v.AddArg2(x, x) + return true + } +} +func rewriteValuePPC64_OpCom64(v *Value) bool { + v_0 := v.Args[0] + // match: (Com64 x) + // result: (NOR x x) + for { + x := v_0 + v.reset(OpPPC64NOR) + v.AddArg2(x, x) + return true + } +} +func rewriteValuePPC64_OpCom8(v *Value) bool { + v_0 := v.Args[0] + // match: (Com8 x) + // result: (NOR x x) + for { + x := v_0 + v.reset(OpPPC64NOR) + v.AddArg2(x, x) + return true + } +} +func rewriteValuePPC64_OpCondSelect(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (CondSelect x y (SETBC [a] cmp)) + // result: (ISEL [a] x y cmp) + for { + x := v_0 + y := v_1 + if v_2.Op != OpPPC64SETBC { + break + } + a := auxIntToInt32(v_2.AuxInt) + cmp := v_2.Args[0] + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(a) + v.AddArg3(x, y, cmp) + return true + } + // match: (CondSelect x y (SETBCR [a] cmp)) + // result: (ISEL [a+4] x y cmp) + for { + x := v_0 + y := v_1 + if v_2.Op != OpPPC64SETBCR { + break + } + a := auxIntToInt32(v_2.AuxInt) + cmp := v_2.Args[0] + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(a + 4) + v.AddArg3(x, y, cmp) + return true + } + // match: (CondSelect x y bool) + // cond: flagArg(bool) == nil + // result: (ISEL [6] x y (Select1 (ANDCCconst [1] bool))) + for { + x := v_0 + y := v_1 + bool := v_2 + if !(flagArg(bool) == nil) { + break + } + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(6) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(1) + v1.AddArg(bool) + v0.AddArg(v1) + v.AddArg3(x, y, v0) + return true + } + return false +} +func rewriteValuePPC64_OpConst16(v *Value) bool { + // match: (Const16 [val]) + // result: (MOVDconst [int64(val)]) + for { + val := auxIntToInt16(v.AuxInt) + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValuePPC64_OpConst32(v *Value) bool { + // match: (Const32 [val]) + // result: (MOVDconst [int64(val)]) + for { + val := auxIntToInt32(v.AuxInt) + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValuePPC64_OpConst64(v *Value) bool { + // match: (Const64 [val]) + // result: (MOVDconst [int64(val)]) + for { + val := auxIntToInt64(v.AuxInt) + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValuePPC64_OpConst8(v *Value) bool { + // match: (Const8 [val]) + // result: (MOVDconst [int64(val)]) + for { + val := auxIntToInt8(v.AuxInt) + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValuePPC64_OpConstBool(v *Value) bool { + // match: (ConstBool [t]) + // result: (MOVDconst [b2i(t)]) + for { + t := auxIntToBool(v.AuxInt) + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(b2i(t)) + return true + } +} +func rewriteValuePPC64_OpConstNil(v *Value) bool { + // match: (ConstNil) + // result: (MOVDconst [0]) + for { + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } +} +func rewriteValuePPC64_OpCopysign(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Copysign x y) + // result: (FCPSGN y x) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64FCPSGN) + v.AddArg2(y, x) + return true + } +} +func rewriteValuePPC64_OpCtz16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz16 x) + // result: (POPCNTW (MOVHZreg (ANDN (ADDconst [-1] x) x))) + for { + x := v_0 + v.reset(OpPPC64POPCNTW) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) + v1 := b.NewValue0(v.Pos, OpPPC64ANDN, typ.Int16) + v2 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.Int16) + v2.AuxInt = int64ToAuxInt(-1) + v2.AddArg(x) + v1.AddArg2(v2, x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpCtz32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz32 x) + // cond: buildcfg.GOPPC64<=8 + // result: (POPCNTW (MOVWZreg (ANDN (ADDconst [-1] x) x))) + for { + x := v_0 + if !(buildcfg.GOPPC64 <= 8) { + break + } + v.reset(OpPPC64POPCNTW) + v0 := b.NewValue0(v.Pos, OpPPC64MOVWZreg, typ.Int64) + v1 := b.NewValue0(v.Pos, OpPPC64ANDN, typ.Int) + v2 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.Int) + v2.AuxInt = int64ToAuxInt(-1) + v2.AddArg(x) + v1.AddArg2(v2, x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Ctz32 x) + // result: (CNTTZW (MOVWZreg x)) + for { + x := v_0 + v.reset(OpPPC64CNTTZW) + v0 := b.NewValue0(v.Pos, OpPPC64MOVWZreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpCtz64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz64 x) + // cond: buildcfg.GOPPC64<=8 + // result: (POPCNTD (ANDN (ADDconst [-1] x) x)) + for { + x := v_0 + if !(buildcfg.GOPPC64 <= 8) { + break + } + v.reset(OpPPC64POPCNTD) + v0 := b.NewValue0(v.Pos, OpPPC64ANDN, typ.Int64) + v1 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.Int64) + v1.AuxInt = int64ToAuxInt(-1) + v1.AddArg(x) + v0.AddArg2(v1, x) + v.AddArg(v0) + return true + } + // match: (Ctz64 x) + // result: (CNTTZD x) + for { + x := v_0 + v.reset(OpPPC64CNTTZD) + v.AddArg(x) + return true + } +} +func rewriteValuePPC64_OpCtz8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz8 x) + // result: (POPCNTB (MOVBZreg (ANDN (ADDconst [-1] x) x))) + for { + x := v_0 + v.reset(OpPPC64POPCNTB) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) + v1 := b.NewValue0(v.Pos, OpPPC64ANDN, typ.UInt8) + v2 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.UInt8) + v2.AuxInt = int64ToAuxInt(-1) + v2.AddArg(x) + v1.AddArg2(v2, x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpCvt32Fto32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Cvt32Fto32 x) + // result: (MFVSRD (FCTIWZ x)) + for { + x := v_0 + v.reset(OpPPC64MFVSRD) + v0 := b.NewValue0(v.Pos, OpPPC64FCTIWZ, typ.Float64) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpCvt32Fto64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Cvt32Fto64 x) + // result: (MFVSRD (FCTIDZ x)) + for { + x := v_0 + v.reset(OpPPC64MFVSRD) + v0 := b.NewValue0(v.Pos, OpPPC64FCTIDZ, typ.Float64) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpCvt32to32F(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Cvt32to32F x) + // result: (FCFIDS (MTVSRD (SignExt32to64 x))) + for { + x := v_0 + v.reset(OpPPC64FCFIDS) + v0 := b.NewValue0(v.Pos, OpPPC64MTVSRD, typ.Float64) + v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpCvt32to64F(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Cvt32to64F x) + // result: (FCFID (MTVSRD (SignExt32to64 x))) + for { + x := v_0 + v.reset(OpPPC64FCFID) + v0 := b.NewValue0(v.Pos, OpPPC64MTVSRD, typ.Float64) + v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpCvt64Fto32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Cvt64Fto32 x) + // result: (MFVSRD (FCTIWZ x)) + for { + x := v_0 + v.reset(OpPPC64MFVSRD) + v0 := b.NewValue0(v.Pos, OpPPC64FCTIWZ, typ.Float64) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpCvt64Fto64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Cvt64Fto64 x) + // result: (MFVSRD (FCTIDZ x)) + for { + x := v_0 + v.reset(OpPPC64MFVSRD) + v0 := b.NewValue0(v.Pos, OpPPC64FCTIDZ, typ.Float64) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpCvt64to32F(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Cvt64to32F x) + // result: (FCFIDS (MTVSRD x)) + for { + x := v_0 + v.reset(OpPPC64FCFIDS) + v0 := b.NewValue0(v.Pos, OpPPC64MTVSRD, typ.Float64) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpCvt64to64F(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Cvt64to64F x) + // result: (FCFID (MTVSRD x)) + for { + x := v_0 + v.reset(OpPPC64FCFID) + v0 := b.NewValue0(v.Pos, OpPPC64MTVSRD, typ.Float64) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpDiv16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16 [false] x y) + // result: (DIVW (SignExt16to32 x) (SignExt16to32 y)) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + y := v_1 + v.reset(OpPPC64DIVW) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } + return false +} +func rewriteValuePPC64_OpDiv16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16u x y) + // result: (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64DIVWU) + v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValuePPC64_OpDiv32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Div32 [false] x y) + // result: (DIVW x y) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + y := v_1 + v.reset(OpPPC64DIVW) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValuePPC64_OpDiv64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Div64 [false] x y) + // result: (DIVD x y) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + y := v_1 + v.reset(OpPPC64DIVD) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValuePPC64_OpDiv8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8 x y) + // result: (DIVW (SignExt8to32 x) (SignExt8to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64DIVW) + v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValuePPC64_OpDiv8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8u x y) + // result: (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64DIVWU) + v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValuePPC64_OpEq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq16 x y) + // cond: x.Type.IsSigned() && y.Type.IsSigned() + // result: (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + y := v_1 + if !(x.Type.IsSigned() && y.Type.IsSigned()) { + continue + } + v.reset(OpPPC64Equal) + v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } + break + } + // match: (Eq16 x y) + // result: (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64Equal) + v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpEq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq32 x y) + // result: (Equal (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64Equal) + v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpEq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq32F x y) + // result: (Equal (FCMPU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64Equal) + v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpEq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq64 x y) + // result: (Equal (CMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64Equal) + v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpEq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq64F x y) + // result: (Equal (FCMPU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64Equal) + v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpEq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq8 x y) + // cond: x.Type.IsSigned() && y.Type.IsSigned() + // result: (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + y := v_1 + if !(x.Type.IsSigned() && y.Type.IsSigned()) { + continue + } + v.reset(OpPPC64Equal) + v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } + break + } + // match: (Eq8 x y) + // result: (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64Equal) + v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpEqB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqB x y) + // result: (Select0 (ANDCCconst [1] (EQV x y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v.Type = typ.Int + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpPPC64EQV, typ.Int64) + v1.AddArg2(x, y) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpEqPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (EqPtr x y) + // result: (Equal (CMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64Equal) + v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpIsInBounds(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (IsInBounds idx len) + // result: (LessThan (CMPU idx len)) + for { + idx := v_0 + len := v_1 + v.reset(OpPPC64LessThan) + v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags) + v0.AddArg2(idx, len) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpIsNonNil(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (IsNonNil ptr) + // result: (NotEqual (CMPconst [0] ptr)) + for { + ptr := v_0 + v.reset(OpPPC64NotEqual) + v0 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(0) + v0.AddArg(ptr) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpIsSliceInBounds(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (IsSliceInBounds idx len) + // result: (LessEqual (CMPU idx len)) + for { + idx := v_0 + len := v_1 + v.reset(OpPPC64LessEqual) + v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags) + v0.AddArg2(idx, len) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpLeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq16 x y) + // result: (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64LessEqual) + v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpLeq16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq16U x y) + // result: (LessEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64LessEqual) + v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpLeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq32 x y) + // result: (LessEqual (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64LessEqual) + v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpLeq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq32F x y) + // result: (FLessEqual (FCMPU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64FLessEqual) + v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpLeq32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq32U x y) + // result: (LessEqual (CMPWU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64LessEqual) + v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpLeq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq64 x y) + // result: (LessEqual (CMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64LessEqual) + v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpLeq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq64F x y) + // result: (FLessEqual (FCMPU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64FLessEqual) + v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpLeq64U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq64U x y) + // result: (LessEqual (CMPU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64LessEqual) + v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpLeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq8 x y) + // result: (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64LessEqual) + v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpLeq8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq8U x y) + // result: (LessEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64LessEqual) + v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpLess16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less16 x y) + // result: (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64LessThan) + v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpLess16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less16U x y) + // result: (LessThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64LessThan) + v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpLess32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less32 x y) + // result: (LessThan (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64LessThan) + v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpLess32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less32F x y) + // result: (FLessThan (FCMPU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64FLessThan) + v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpLess32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less32U x y) + // result: (LessThan (CMPWU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64LessThan) + v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpLess64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less64 x y) + // result: (LessThan (CMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64LessThan) + v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpLess64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less64F x y) + // result: (FLessThan (FCMPU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64FLessThan) + v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpLess64U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less64U x y) + // result: (LessThan (CMPU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64LessThan) + v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpLess8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less8 x y) + // result: (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64LessThan) + v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpLess8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less8U x y) + // result: (LessThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64LessThan) + v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpLoad(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Load ptr mem) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (MOVDload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpPPC64MOVDload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitInt(t) && t.IsSigned() + // result: (MOVWload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitInt(t) && t.IsSigned()) { + break + } + v.reset(OpPPC64MOVWload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitInt(t) && !t.IsSigned() + // result: (MOVWZload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitInt(t) && !t.IsSigned()) { + break + } + v.reset(OpPPC64MOVWZload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is16BitInt(t) && t.IsSigned() + // result: (MOVHload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is16BitInt(t) && t.IsSigned()) { + break + } + v.reset(OpPPC64MOVHload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is16BitInt(t) && !t.IsSigned() + // result: (MOVHZload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is16BitInt(t) && !t.IsSigned()) { + break + } + v.reset(OpPPC64MOVHZload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: t.IsBoolean() + // result: (MOVBZload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsBoolean()) { + break + } + v.reset(OpPPC64MOVBZload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is8BitInt(t) && t.IsSigned() + // result: (MOVBreg (MOVBZload ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is8BitInt(t) && t.IsSigned()) { + break + } + v.reset(OpPPC64MOVBreg) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } + // match: (Load ptr mem) + // cond: is8BitInt(t) && !t.IsSigned() + // result: (MOVBZload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is8BitInt(t) && !t.IsSigned()) { + break + } + v.reset(OpPPC64MOVBZload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitFloat(t) + // result: (FMOVSload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitFloat(t)) { + break + } + v.reset(OpPPC64FMOVSload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is64BitFloat(t) + // result: (FMOVDload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitFloat(t)) { + break + } + v.reset(OpPPC64FMOVDload) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValuePPC64_OpLocalAddr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LocalAddr {sym} base mem) + // cond: t.Elem().HasPointers() + // result: (MOVDaddr {sym} (SPanchored base mem)) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + mem := v_1 + if !(t.Elem().HasPointers()) { + break + } + v.reset(OpPPC64MOVDaddr) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) + v0.AddArg2(base, mem) + v.AddArg(v0) + return true + } + // match: (LocalAddr {sym} base _) + // cond: !t.Elem().HasPointers() + // result: (MOVDaddr {sym} base) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + if !(!t.Elem().HasPointers()) { + break + } + v.reset(OpPPC64MOVDaddr) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } + return false +} +func rewriteValuePPC64_OpLsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SLD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLD) + v.AddArg2(x, y) + return true + } + // match: (Lsh16x16 x y) + // result: (ISEL [2] (SLD (MOVHZreg x) y) (MOVDconst [0]) (Select1 (ANDCCconst [0xFFF0] y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpPPC64SLD, t) + v1 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v4.AuxInt = int64ToAuxInt(0xFFF0) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValuePPC64_OpLsh16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SLD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLD) + v.AddArg2(x, y) + return true + } + // match: (Lsh16x32 x y) + // result: (ISEL [0] (SLD (MOVHZreg x) y) (MOVDconst [0]) (CMPWUconst y [16])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpPPC64SLD, t) + v1 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(16) + v3.AddArg(y) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValuePPC64_OpLsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x64 x (MOVDconst [c])) + // cond: uint64(c) < 16 + // result: (SLWconst x [c]) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 16) { + break + } + v.reset(OpPPC64SLWconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (Lsh16x64 x y) + // cond: shiftIsBounded(v) + // result: (SLD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLD) + v.AddArg2(x, y) + return true + } + // match: (Lsh16x64 x y) + // result: (ISEL [0] (SLD (MOVHZreg x) y) (MOVDconst [0]) (CMPUconst y [16])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpPPC64SLD, t) + v1 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(16) + v3.AddArg(y) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValuePPC64_OpLsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SLD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLD) + v.AddArg2(x, y) + return true + } + // match: (Lsh16x8 x y) + // result: (ISEL [2] (SLD (MOVHZreg x) y) (MOVDconst [0]) (Select1 (ANDCCconst [0x00F0] y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpPPC64SLD, t) + v1 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v4.AuxInt = int64ToAuxInt(0x00F0) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValuePPC64_OpLsh32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SLW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLW) + v.AddArg2(x, y) + return true + } + // match: (Lsh32x16 x y) + // result: (ISEL [2] (SLW x y) (MOVDconst [0]) (Select1 (ANDCCconst [0xFFE0] y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpPPC64SLW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3.AuxInt = int64ToAuxInt(0xFFE0) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValuePPC64_OpLsh32x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SLW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLW) + v.AddArg2(x, y) + return true + } + // match: (Lsh32x32 x y) + // result: (ISEL [0] (SLW x y) (MOVDconst [0]) (CMPWUconst y [32])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpPPC64SLW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValuePPC64_OpLsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x64 x (MOVDconst [c])) + // cond: uint64(c) < 32 + // result: (SLWconst x [c]) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 32) { + break + } + v.reset(OpPPC64SLWconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (Lsh32x64 x y) + // cond: shiftIsBounded(v) + // result: (SLW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLW) + v.AddArg2(x, y) + return true + } + // match: (Lsh32x64 x y) + // result: (ISEL [0] (SLW x y) (MOVDconst [0]) (CMPUconst y [32])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpPPC64SLW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(32) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValuePPC64_OpLsh32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SLW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLW) + v.AddArg2(x, y) + return true + } + // match: (Lsh32x8 x y) + // result: (ISEL [2] (SLW x y) (MOVDconst [0]) (Select1 (ANDCCconst [0x00E0] y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpPPC64SLW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3.AuxInt = int64ToAuxInt(0x00E0) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValuePPC64_OpLsh64x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x16 x y) + // cond: shiftIsBounded(v) + // result: (SLD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLD) + v.AddArg2(x, y) + return true + } + // match: (Lsh64x16 x y) + // result: (ISEL [2] (SLD x y) (MOVDconst [0]) (Select1 (ANDCCconst [0xFFC0] y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpPPC64SLD, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3.AuxInt = int64ToAuxInt(0xFFC0) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValuePPC64_OpLsh64x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x32 x y) + // cond: shiftIsBounded(v) + // result: (SLD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLD) + v.AddArg2(x, y) + return true + } + // match: (Lsh64x32 x y) + // result: (ISEL [0] (SLD x y) (MOVDconst [0]) (CMPWUconst y [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpPPC64SLD, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValuePPC64_OpLsh64x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x64 x (MOVDconst [c])) + // cond: uint64(c) < 64 + // result: (SLDconst x [c]) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 64) { + break + } + v.reset(OpPPC64SLDconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (Lsh64x64 x y) + // cond: shiftIsBounded(v) + // result: (SLD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLD) + v.AddArg2(x, y) + return true + } + // match: (Lsh64x64 x y) + // result: (ISEL [0] (SLD x y) (MOVDconst [0]) (CMPUconst y [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpPPC64SLD, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(64) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValuePPC64_OpLsh64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x8 x y) + // cond: shiftIsBounded(v) + // result: (SLD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLD) + v.AddArg2(x, y) + return true + } + // match: (Lsh64x8 x y) + // result: (ISEL [2] (SLD x y) (MOVDconst [0]) (Select1 (ANDCCconst [0x00C0] y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpPPC64SLD, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3.AuxInt = int64ToAuxInt(0x00C0) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValuePPC64_OpLsh8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SLD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLD) + v.AddArg2(x, y) + return true + } + // match: (Lsh8x16 x y) + // result: (ISEL [2] (SLD (MOVBZreg x) y) (MOVDconst [0]) (Select1 (ANDCCconst [0xFFF8] y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpPPC64SLD, t) + v1 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v4.AuxInt = int64ToAuxInt(0xFFF8) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValuePPC64_OpLsh8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SLD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLD) + v.AddArg2(x, y) + return true + } + // match: (Lsh8x32 x y) + // result: (ISEL [0] (SLD (MOVBZreg x) y) (MOVDconst [0]) (CMPWUconst y [8])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpPPC64SLD, t) + v1 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(8) + v3.AddArg(y) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValuePPC64_OpLsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x64 x (MOVDconst [c])) + // cond: uint64(c) < 8 + // result: (SLWconst x [c]) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 8) { + break + } + v.reset(OpPPC64SLWconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (Lsh8x64 x y) + // cond: shiftIsBounded(v) + // result: (SLD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLD) + v.AddArg2(x, y) + return true + } + // match: (Lsh8x64 x y) + // result: (ISEL [0] (SLD (MOVBZreg x) y) (MOVDconst [0]) (CMPUconst y [8])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpPPC64SLD, t) + v1 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(8) + v3.AddArg(y) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValuePPC64_OpLsh8x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SLD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLD) + v.AddArg2(x, y) + return true + } + // match: (Lsh8x8 x y) + // result: (ISEL [2] (SLD (MOVBZreg x) y) (MOVDconst [0]) (Select1 (ANDCCconst [0x00F8] y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpPPC64SLD, t) + v1 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v4.AuxInt = int64ToAuxInt(0x00F8) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValuePPC64_OpMod16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod16 x y) + // result: (Mod32 (SignExt16to32 x) (SignExt16to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpMod32) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValuePPC64_OpMod16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod16u x y) + // result: (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpMod32u) + v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValuePPC64_OpMod32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod32 x y) + // cond: buildcfg.GOPPC64 >= 9 + // result: (MODSW x y) + for { + x := v_0 + y := v_1 + if !(buildcfg.GOPPC64 >= 9) { + break + } + v.reset(OpPPC64MODSW) + v.AddArg2(x, y) + return true + } + // match: (Mod32 x y) + // cond: buildcfg.GOPPC64 <= 8 + // result: (SUB x (MULLW y (DIVW x y))) + for { + x := v_0 + y := v_1 + if !(buildcfg.GOPPC64 <= 8) { + break + } + v.reset(OpPPC64SUB) + v0 := b.NewValue0(v.Pos, OpPPC64MULLW, typ.Int32) + v1 := b.NewValue0(v.Pos, OpPPC64DIVW, typ.Int32) + v1.AddArg2(x, y) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValuePPC64_OpMod32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod32u x y) + // cond: buildcfg.GOPPC64 >= 9 + // result: (MODUW x y) + for { + x := v_0 + y := v_1 + if !(buildcfg.GOPPC64 >= 9) { + break + } + v.reset(OpPPC64MODUW) + v.AddArg2(x, y) + return true + } + // match: (Mod32u x y) + // cond: buildcfg.GOPPC64 <= 8 + // result: (SUB x (MULLW y (DIVWU x y))) + for { + x := v_0 + y := v_1 + if !(buildcfg.GOPPC64 <= 8) { + break + } + v.reset(OpPPC64SUB) + v0 := b.NewValue0(v.Pos, OpPPC64MULLW, typ.Int32) + v1 := b.NewValue0(v.Pos, OpPPC64DIVWU, typ.Int32) + v1.AddArg2(x, y) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValuePPC64_OpMod64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod64 x y) + // cond: buildcfg.GOPPC64 >=9 + // result: (MODSD x y) + for { + x := v_0 + y := v_1 + if !(buildcfg.GOPPC64 >= 9) { + break + } + v.reset(OpPPC64MODSD) + v.AddArg2(x, y) + return true + } + // match: (Mod64 x y) + // cond: buildcfg.GOPPC64 <=8 + // result: (SUB x (MULLD y (DIVD x y))) + for { + x := v_0 + y := v_1 + if !(buildcfg.GOPPC64 <= 8) { + break + } + v.reset(OpPPC64SUB) + v0 := b.NewValue0(v.Pos, OpPPC64MULLD, typ.Int64) + v1 := b.NewValue0(v.Pos, OpPPC64DIVD, typ.Int64) + v1.AddArg2(x, y) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValuePPC64_OpMod64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod64u x y) + // cond: buildcfg.GOPPC64 >= 9 + // result: (MODUD x y) + for { + x := v_0 + y := v_1 + if !(buildcfg.GOPPC64 >= 9) { + break + } + v.reset(OpPPC64MODUD) + v.AddArg2(x, y) + return true + } + // match: (Mod64u x y) + // cond: buildcfg.GOPPC64 <= 8 + // result: (SUB x (MULLD y (DIVDU x y))) + for { + x := v_0 + y := v_1 + if !(buildcfg.GOPPC64 <= 8) { + break + } + v.reset(OpPPC64SUB) + v0 := b.NewValue0(v.Pos, OpPPC64MULLD, typ.Int64) + v1 := b.NewValue0(v.Pos, OpPPC64DIVDU, typ.Int64) + v1.AddArg2(x, y) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValuePPC64_OpMod8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8 x y) + // result: (Mod32 (SignExt8to32 x) (SignExt8to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpMod32) + v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValuePPC64_OpMod8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8u x y) + // result: (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpMod32u) + v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValuePPC64_OpMove(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Move [0] _ _ mem) + // result: mem + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_2 + v.copyOf(mem) + return true + } + // match: (Move [1] dst src mem) + // result: (MOVBstore dst (MOVBZload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpPPC64MOVBstore) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [2] dst src mem) + // result: (MOVHstore dst (MOVHZload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpPPC64MOVHstore) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, typ.UInt16) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [4] dst src mem) + // result: (MOVWstore dst (MOVWZload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpPPC64MOVWstore) + v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [8] {t} dst src mem) + // result: (MOVDstore dst (MOVDload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpPPC64MOVDstore) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, typ.Int64) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [3] dst src mem) + // result: (MOVBstore [2] dst (MOVBZload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpPPC64MOVBstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(2) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpPPC64MOVHstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpPPC64MOVHload, typ.Int16) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [5] dst src mem) + // result: (MOVBstore [4] dst (MOVBZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 5 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpPPC64MOVBstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [6] dst src mem) + // result: (MOVHstore [4] dst (MOVHZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 6 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpPPC64MOVHstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [7] dst src mem) + // result: (MOVBstore [6] dst (MOVBZload [6] src mem) (MOVHstore [4] dst (MOVHZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem))) + for { + if auxIntToInt64(v.AuxInt) != 7 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpPPC64MOVBstore) + v.AuxInt = int32ToAuxInt(6) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(6) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpPPC64MOVHstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(4) + v2 := b.NewValue0(v.Pos, OpPPC64MOVHZload, typ.UInt16) + v2.AuxInt = int32ToAuxInt(4) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem) + v4 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [s] dst src mem) + // cond: s > 8 && buildcfg.GOPPC64 <= 8 && logLargeCopy(v, s) + // result: (LoweredMove [s] dst src mem) + for { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 8 && buildcfg.GOPPC64 <= 8 && logLargeCopy(v, s)) { + break + } + v.reset(OpPPC64LoweredMove) + v.AuxInt = int64ToAuxInt(s) + v.AddArg3(dst, src, mem) + return true + } + // match: (Move [s] dst src mem) + // cond: s > 8 && s <= 64 && buildcfg.GOPPC64 >= 9 + // result: (LoweredQuadMoveShort [s] dst src mem) + for { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 8 && s <= 64 && buildcfg.GOPPC64 >= 9) { + break + } + v.reset(OpPPC64LoweredQuadMoveShort) + v.AuxInt = int64ToAuxInt(s) + v.AddArg3(dst, src, mem) + return true + } + // match: (Move [s] dst src mem) + // cond: s > 8 && buildcfg.GOPPC64 >= 9 && logLargeCopy(v, s) + // result: (LoweredQuadMove [s] dst src mem) + for { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 8 && buildcfg.GOPPC64 >= 9 && logLargeCopy(v, s)) { + break + } + v.reset(OpPPC64LoweredQuadMove) + v.AuxInt = int64ToAuxInt(s) + v.AddArg3(dst, src, mem) + return true + } + return false +} +func rewriteValuePPC64_OpNeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq16 x y) + // cond: x.Type.IsSigned() && y.Type.IsSigned() + // result: (NotEqual (CMPW (SignExt16to32 x) (SignExt16to32 y))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + y := v_1 + if !(x.Type.IsSigned() && y.Type.IsSigned()) { + continue + } + v.reset(OpPPC64NotEqual) + v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } + break + } + // match: (Neq16 x y) + // result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64NotEqual) + v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpNeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq32 x y) + // result: (NotEqual (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64NotEqual) + v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpNeq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq32F x y) + // result: (NotEqual (FCMPU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64NotEqual) + v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpNeq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq64 x y) + // result: (NotEqual (CMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64NotEqual) + v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpNeq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq64F x y) + // result: (NotEqual (FCMPU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64NotEqual) + v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpNeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq8 x y) + // cond: x.Type.IsSigned() && y.Type.IsSigned() + // result: (NotEqual (CMPW (SignExt8to32 x) (SignExt8to32 y))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + y := v_1 + if !(x.Type.IsSigned() && y.Type.IsSigned()) { + continue + } + v.reset(OpPPC64NotEqual) + v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } + break + } + // match: (Neq8 x y) + // result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64NotEqual) + v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpNeqPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (NeqPtr x y) + // result: (NotEqual (CMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpPPC64NotEqual) + v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpNot(v *Value) bool { + v_0 := v.Args[0] + // match: (Not x) + // result: (XORconst [1] x) + for { + x := v_0 + v.reset(OpPPC64XORconst) + v.AuxInt = int64ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValuePPC64_OpOffPtr(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (OffPtr [off] ptr) + // result: (ADD (MOVDconst [off]) ptr) + for { + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + v.reset(OpPPC64ADD) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v0.AuxInt = int64ToAuxInt(off) + v.AddArg2(v0, ptr) + return true + } +} +func rewriteValuePPC64_OpPPC64ADD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADD l:(MULLD x y) z) + // cond: buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) + // result: (MADDLD x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + l := v_0 + if l.Op != OpPPC64MULLD { + continue + } + y := l.Args[1] + x := l.Args[0] + z := v_1 + if !(buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l)) { + continue + } + v.reset(OpPPC64MADDLD) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (ADD x (MOVDconst [c])) + // cond: is32Bit(c) && !t.IsPtr() + // result: (ADDconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + continue + } + t := v_1.Type + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c) && !t.IsPtr()) { + continue + } + v.reset(OpPPC64ADDconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValuePPC64_OpPPC64ADDE(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ADDE x y (Select1 (ADDCconst (MOVDconst [0]) [-1]))) + // result: (ADDC x y) + for { + x := v_0 + y := v_1 + if v_2.Op != OpSelect1 || v_2.Type != typ.UInt64 { + break + } + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpPPC64ADDCconst || auxIntToInt64(v_2_0.AuxInt) != -1 { + break + } + v_2_0_0 := v_2_0.Args[0] + if v_2_0_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_2_0_0.AuxInt) != 0 { + break + } + v.reset(OpPPC64ADDC) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64ADDconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ADDconst [c] (ADDconst [d] x)) + // cond: is32Bit(c+d) + // result: (ADDconst [c+d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpPPC64ADDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(c + d)) { + break + } + v.reset(OpPPC64ADDconst) + v.AuxInt = int64ToAuxInt(c + d) + v.AddArg(x) + return true + } + // match: (ADDconst [0] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (ADDconst [c] (MOVDaddr [d] {sym} x)) + // cond: is32Bit(c+int64(d)) + // result: (MOVDaddr [int32(c+int64(d))] {sym} x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpPPC64MOVDaddr { + break + } + d := auxIntToInt32(v_0.AuxInt) + sym := auxToSym(v_0.Aux) + x := v_0.Args[0] + if !(is32Bit(c + int64(d))) { + break + } + v.reset(OpPPC64MOVDaddr) + v.AuxInt = int32ToAuxInt(int32(c + int64(d))) + v.Aux = symToAux(sym) + v.AddArg(x) + return true + } + // match: (ADDconst [c] x:(SP)) + // cond: is32Bit(c) + // result: (MOVDaddr [int32(c)] x) + for { + c := auxIntToInt64(v.AuxInt) + x := v_0 + if x.Op != OpSP || !(is32Bit(c)) { + break + } + v.reset(OpPPC64MOVDaddr) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + // match: (ADDconst [c] (SUBFCconst [d] x)) + // cond: is32Bit(c+d) + // result: (SUBFCconst [c+d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpPPC64SUBFCconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(c + d)) { + break + } + v.reset(OpPPC64SUBFCconst) + v.AuxInt = int64ToAuxInt(c + d) + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64AND(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AND (MOVDconst [m]) (ROTLWconst [r] x)) + // cond: isPPC64WordRotateMask(m) + // result: (RLWINM [encodePPC64RotateMask(r,m,32)] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpPPC64MOVDconst { + continue + } + m := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpPPC64ROTLWconst { + continue + } + r := auxIntToInt64(v_1.AuxInt) + x := v_1.Args[0] + if !(isPPC64WordRotateMask(m)) { + continue + } + v.reset(OpPPC64RLWINM) + v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, m, 32)) + v.AddArg(x) + return true + } + break + } + // match: (AND (MOVDconst [m]) (ROTLW x r)) + // cond: isPPC64WordRotateMask(m) + // result: (RLWNM [encodePPC64RotateMask(0,m,32)] x r) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpPPC64MOVDconst { + continue + } + m := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpPPC64ROTLW { + continue + } + r := v_1.Args[1] + x := v_1.Args[0] + if !(isPPC64WordRotateMask(m)) { + continue + } + v.reset(OpPPC64RLWNM) + v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 32)) + v.AddArg2(x, r) + return true + } + break + } + // match: (AND (MOVDconst [m]) (SRWconst x [s])) + // cond: mergePPC64RShiftMask(m,s,32) == 0 + // result: (MOVDconst [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpPPC64MOVDconst { + continue + } + m := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpPPC64SRWconst { + continue + } + s := auxIntToInt64(v_1.AuxInt) + if !(mergePPC64RShiftMask(m, s, 32) == 0) { + continue + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + break + } + // match: (AND (MOVDconst [m]) (SRWconst x [s])) + // cond: mergePPC64AndSrwi(m,s) != 0 + // result: (RLWINM [mergePPC64AndSrwi(m,s)] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpPPC64MOVDconst { + continue + } + m := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpPPC64SRWconst { + continue + } + s := auxIntToInt64(v_1.AuxInt) + x := v_1.Args[0] + if !(mergePPC64AndSrwi(m, s) != 0) { + continue + } + v.reset(OpPPC64RLWINM) + v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m, s)) + v.AddArg(x) + return true + } + break + } + // match: (AND x (NOR y y)) + // result: (ANDN x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpPPC64NOR { + continue + } + y := v_1.Args[1] + if y != v_1.Args[0] { + continue + } + v.reset(OpPPC64ANDN) + v.AddArg2(x, y) + return true + } + break + } + // match: (AND (MOVDconst [c]) (MOVDconst [d])) + // result: (MOVDconst [c&d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpPPC64MOVDconst { + continue + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpPPC64MOVDconst { + continue + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(c & d) + return true + } + break + } + // match: (AND x (MOVDconst [-1])) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 { + continue + } + v.copyOf(x) + return true + } + break + } + // match: (AND x (MOVDconst [c])) + // cond: isU16Bit(c) + // result: (Select0 (ANDCCconst [c] x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(isU16Bit(c)) { + continue + } + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (AND (MOVDconst [c]) y:(MOVWZreg _)) + // cond: c&0xFFFFFFFF == 0xFFFFFFFF + // result: y + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpPPC64MOVDconst { + continue + } + c := auxIntToInt64(v_0.AuxInt) + y := v_1 + if y.Op != OpPPC64MOVWZreg || !(c&0xFFFFFFFF == 0xFFFFFFFF) { + continue + } + v.copyOf(y) + return true + } + break + } + // match: (AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x)) + // result: (MOVWZreg x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_0.AuxInt) != 0xFFFFFFFF { + continue + } + y := v_1 + if y.Op != OpPPC64MOVWreg { + continue + } + x := y.Args[0] + v.reset(OpPPC64MOVWZreg) + v.AddArg(x) + return true + } + break + } + // match: (AND (MOVDconst [c]) x:(MOVBZload _ _)) + // result: (Select0 (ANDCCconst [c&0xFF] x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpPPC64MOVDconst { + continue + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + if x.Op != OpPPC64MOVBZload { + continue + } + v.reset(OpSelect0) + v0 := b.NewValue0(x.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v0.AuxInt = int64ToAuxInt(c & 0xFF) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + return false +} +func rewriteValuePPC64_OpPPC64ANDCCconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ANDCCconst [c] (Select0 (ANDCCconst [d] x))) + // result: (ANDCCconst [c&d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpSelect0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64ANDCCconst { + break + } + d := auxIntToInt64(v_0_0.AuxInt) + x := v_0_0.Args[0] + v.reset(OpPPC64ANDCCconst) + v.AuxInt = int64ToAuxInt(c & d) + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64ANDN(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ANDN (MOVDconst [c]) (MOVDconst [d])) + // result: (MOVDconst [c&^d]) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpPPC64MOVDconst { + break + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(c &^ d) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64BRD(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (BRD x:(MOVDload [off] {sym} ptr mem)) + // cond: x.Uses == 1 + // result: @x.Block (MOVDBRload (MOVDaddr [off] {sym} ptr) mem) + for { + x := v_0 + if x.Op != OpPPC64MOVDload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpPPC64MOVDBRload, typ.UInt64) + v.copyOf(v0) + v1 := b.NewValue0(x.Pos, OpPPC64MOVDaddr, ptr.Type) + v1.AuxInt = int32ToAuxInt(off) + v1.Aux = symToAux(sym) + v1.AddArg(ptr) + v0.AddArg2(v1, mem) + return true + } + // match: (BRD x:(MOVDloadidx ptr idx mem)) + // cond: x.Uses == 1 + // result: @x.Block (MOVDBRloadidx ptr idx mem) + for { + x := v_0 + if x.Op != OpPPC64MOVDloadidx { + break + } + mem := x.Args[2] + ptr := x.Args[0] + idx := x.Args[1] + if !(x.Uses == 1) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRloadidx, typ.Int64) + v.copyOf(v0) + v0.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64BRH(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (BRH x:(MOVHZload [off] {sym} ptr mem)) + // cond: x.Uses == 1 + // result: @x.Block (MOVHBRload (MOVDaddr [off] {sym} ptr) mem) + for { + x := v_0 + if x.Op != OpPPC64MOVHZload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpPPC64MOVHBRload, typ.UInt16) + v.copyOf(v0) + v1 := b.NewValue0(x.Pos, OpPPC64MOVDaddr, ptr.Type) + v1.AuxInt = int32ToAuxInt(off) + v1.Aux = symToAux(sym) + v1.AddArg(ptr) + v0.AddArg2(v1, mem) + return true + } + // match: (BRH x:(MOVHZloadidx ptr idx mem)) + // cond: x.Uses == 1 + // result: @x.Block (MOVHBRloadidx ptr idx mem) + for { + x := v_0 + if x.Op != OpPPC64MOVHZloadidx { + break + } + mem := x.Args[2] + ptr := x.Args[0] + idx := x.Args[1] + if !(x.Uses == 1) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpPPC64MOVHBRloadidx, typ.Int16) + v.copyOf(v0) + v0.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64BRW(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (BRW x:(MOVWZload [off] {sym} ptr mem)) + // cond: x.Uses == 1 + // result: @x.Block (MOVWBRload (MOVDaddr [off] {sym} ptr) mem) + for { + x := v_0 + if x.Op != OpPPC64MOVWZload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpPPC64MOVWBRload, typ.UInt32) + v.copyOf(v0) + v1 := b.NewValue0(x.Pos, OpPPC64MOVDaddr, ptr.Type) + v1.AuxInt = int32ToAuxInt(off) + v1.Aux = symToAux(sym) + v1.AddArg(ptr) + v0.AddArg2(v1, mem) + return true + } + // match: (BRW x:(MOVWZloadidx ptr idx mem)) + // cond: x.Uses == 1 + // result: @x.Block (MOVWBRloadidx ptr idx mem) + for { + x := v_0 + if x.Op != OpPPC64MOVWZloadidx { + break + } + mem := x.Args[2] + ptr := x.Args[0] + idx := x.Args[1] + if !(x.Uses == 1) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpPPC64MOVWBRloadidx, typ.Int32) + v.copyOf(v0) + v0.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64CLRLSLDI(v *Value) bool { + v_0 := v.Args[0] + // match: (CLRLSLDI [c] (SRWconst [s] x)) + // cond: mergePPC64ClrlsldiSrw(int64(c),s) != 0 + // result: (RLWINM [mergePPC64ClrlsldiSrw(int64(c),s)] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpPPC64SRWconst { + break + } + s := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(mergePPC64ClrlsldiSrw(int64(c), s) != 0) { + break + } + v.reset(OpPPC64RLWINM) + v.AuxInt = int64ToAuxInt(mergePPC64ClrlsldiSrw(int64(c), s)) + v.AddArg(x) + return true + } + // match: (CLRLSLDI [c] i:(RLWINM [s] x)) + // cond: mergePPC64ClrlsldiRlwinm(c,s) != 0 + // result: (RLWINM [mergePPC64ClrlsldiRlwinm(c,s)] x) + for { + c := auxIntToInt32(v.AuxInt) + i := v_0 + if i.Op != OpPPC64RLWINM { + break + } + s := auxIntToInt64(i.AuxInt) + x := i.Args[0] + if !(mergePPC64ClrlsldiRlwinm(c, s) != 0) { + break + } + v.reset(OpPPC64RLWINM) + v.AuxInt = int64ToAuxInt(mergePPC64ClrlsldiRlwinm(c, s)) + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64CMP(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMP x (MOVDconst [c])) + // cond: is16Bit(c) + // result: (CMPconst x [c]) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(is16Bit(c)) { + break + } + v.reset(OpPPC64CMPconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (CMP (MOVDconst [c]) y) + // cond: is16Bit(c) + // result: (InvertFlags (CMPconst y [c])) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + y := v_1 + if !(is16Bit(c)) { + break + } + v.reset(OpPPC64InvertFlags) + v0 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (CMP x y) + // cond: canonLessThan(x,y) + // result: (InvertFlags (CMP y x)) + for { + x := v_0 + y := v_1 + if !(canonLessThan(x, y)) { + break + } + v.reset(OpPPC64InvertFlags) + v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64CMPU(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMPU x (MOVDconst [c])) + // cond: isU16Bit(c) + // result: (CMPUconst x [c]) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(isU16Bit(c)) { + break + } + v.reset(OpPPC64CMPUconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (CMPU (MOVDconst [c]) y) + // cond: isU16Bit(c) + // result: (InvertFlags (CMPUconst y [c])) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + y := v_1 + if !(isU16Bit(c)) { + break + } + v.reset(OpPPC64InvertFlags) + v0 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (CMPU x y) + // cond: canonLessThan(x,y) + // result: (InvertFlags (CMPU y x)) + for { + x := v_0 + y := v_1 + if !(canonLessThan(x, y)) { + break + } + v.reset(OpPPC64InvertFlags) + v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64CMPUconst(v *Value) bool { + v_0 := v.Args[0] + // match: (CMPUconst [d] (Select0 (ANDCCconst z [c]))) + // cond: uint64(d) > uint64(c) + // result: (FlagLT) + for { + d := auxIntToInt64(v.AuxInt) + if v_0.Op != OpSelect0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0_0.AuxInt) + if !(uint64(d) > uint64(c)) { + break + } + v.reset(OpPPC64FlagLT) + return true + } + // match: (CMPUconst (MOVDconst [x]) [y]) + // cond: x==y + // result: (FlagEQ) + for { + y := auxIntToInt64(v.AuxInt) + if v_0.Op != OpPPC64MOVDconst { + break + } + x := auxIntToInt64(v_0.AuxInt) + if !(x == y) { + break + } + v.reset(OpPPC64FlagEQ) + return true + } + // match: (CMPUconst (MOVDconst [x]) [y]) + // cond: uint64(x)uint64(y) + // result: (FlagGT) + for { + y := auxIntToInt64(v.AuxInt) + if v_0.Op != OpPPC64MOVDconst { + break + } + x := auxIntToInt64(v_0.AuxInt) + if !(uint64(x) > uint64(y)) { + break + } + v.reset(OpPPC64FlagGT) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64CMPW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMPW x (MOVWreg y)) + // result: (CMPW x y) + for { + x := v_0 + if v_1.Op != OpPPC64MOVWreg { + break + } + y := v_1.Args[0] + v.reset(OpPPC64CMPW) + v.AddArg2(x, y) + return true + } + // match: (CMPW (MOVWreg x) y) + // result: (CMPW x y) + for { + if v_0.Op != OpPPC64MOVWreg { + break + } + x := v_0.Args[0] + y := v_1 + v.reset(OpPPC64CMPW) + v.AddArg2(x, y) + return true + } + // match: (CMPW x (MOVDconst [c])) + // cond: is16Bit(c) + // result: (CMPWconst x [int32(c)]) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(is16Bit(c)) { + break + } + v.reset(OpPPC64CMPWconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + // match: (CMPW (MOVDconst [c]) y) + // cond: is16Bit(c) + // result: (InvertFlags (CMPWconst y [int32(c)])) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + y := v_1 + if !(is16Bit(c)) { + break + } + v.reset(OpPPC64InvertFlags) + v0 := b.NewValue0(v.Pos, OpPPC64CMPWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (CMPW x y) + // cond: canonLessThan(x,y) + // result: (InvertFlags (CMPW y x)) + for { + x := v_0 + y := v_1 + if !(canonLessThan(x, y)) { + break + } + v.reset(OpPPC64InvertFlags) + v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64CMPWU(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMPWU x (MOVWZreg y)) + // result: (CMPWU x y) + for { + x := v_0 + if v_1.Op != OpPPC64MOVWZreg { + break + } + y := v_1.Args[0] + v.reset(OpPPC64CMPWU) + v.AddArg2(x, y) + return true + } + // match: (CMPWU (MOVWZreg x) y) + // result: (CMPWU x y) + for { + if v_0.Op != OpPPC64MOVWZreg { + break + } + x := v_0.Args[0] + y := v_1 + v.reset(OpPPC64CMPWU) + v.AddArg2(x, y) + return true + } + // match: (CMPWU x (MOVDconst [c])) + // cond: isU16Bit(c) + // result: (CMPWUconst x [int32(c)]) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(isU16Bit(c)) { + break + } + v.reset(OpPPC64CMPWUconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + // match: (CMPWU (MOVDconst [c]) y) + // cond: isU16Bit(c) + // result: (InvertFlags (CMPWUconst y [int32(c)])) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + y := v_1 + if !(isU16Bit(c)) { + break + } + v.reset(OpPPC64InvertFlags) + v0 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (CMPWU x y) + // cond: canonLessThan(x,y) + // result: (InvertFlags (CMPWU y x)) + for { + x := v_0 + y := v_1 + if !(canonLessThan(x, y)) { + break + } + v.reset(OpPPC64InvertFlags) + v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64CMPWUconst(v *Value) bool { + v_0 := v.Args[0] + // match: (CMPWUconst [d] (Select0 (ANDCCconst z [c]))) + // cond: uint64(d) > uint64(c) + // result: (FlagLT) + for { + d := auxIntToInt32(v.AuxInt) + if v_0.Op != OpSelect0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0_0.AuxInt) + if !(uint64(d) > uint64(c)) { + break + } + v.reset(OpPPC64FlagLT) + return true + } + // match: (CMPWUconst (MOVDconst [x]) [y]) + // cond: int32(x)==int32(y) + // result: (FlagEQ) + for { + y := auxIntToInt32(v.AuxInt) + if v_0.Op != OpPPC64MOVDconst { + break + } + x := auxIntToInt64(v_0.AuxInt) + if !(int32(x) == int32(y)) { + break + } + v.reset(OpPPC64FlagEQ) + return true + } + // match: (CMPWUconst (MOVDconst [x]) [y]) + // cond: uint32(x)uint32(y) + // result: (FlagGT) + for { + y := auxIntToInt32(v.AuxInt) + if v_0.Op != OpPPC64MOVDconst { + break + } + x := auxIntToInt64(v_0.AuxInt) + if !(uint32(x) > uint32(y)) { + break + } + v.reset(OpPPC64FlagGT) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64CMPWconst(v *Value) bool { + v_0 := v.Args[0] + // match: (CMPWconst (MOVDconst [x]) [y]) + // cond: int32(x)==int32(y) + // result: (FlagEQ) + for { + y := auxIntToInt32(v.AuxInt) + if v_0.Op != OpPPC64MOVDconst { + break + } + x := auxIntToInt64(v_0.AuxInt) + if !(int32(x) == int32(y)) { + break + } + v.reset(OpPPC64FlagEQ) + return true + } + // match: (CMPWconst (MOVDconst [x]) [y]) + // cond: int32(x)int32(y) + // result: (FlagGT) + for { + y := auxIntToInt32(v.AuxInt) + if v_0.Op != OpPPC64MOVDconst { + break + } + x := auxIntToInt64(v_0.AuxInt) + if !(int32(x) > int32(y)) { + break + } + v.reset(OpPPC64FlagGT) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64CMPconst(v *Value) bool { + v_0 := v.Args[0] + // match: (CMPconst (MOVDconst [x]) [y]) + // cond: x==y + // result: (FlagEQ) + for { + y := auxIntToInt64(v.AuxInt) + if v_0.Op != OpPPC64MOVDconst { + break + } + x := auxIntToInt64(v_0.AuxInt) + if !(x == y) { + break + } + v.reset(OpPPC64FlagEQ) + return true + } + // match: (CMPconst (MOVDconst [x]) [y]) + // cond: xy + // result: (FlagGT) + for { + y := auxIntToInt64(v.AuxInt) + if v_0.Op != OpPPC64MOVDconst { + break + } + x := auxIntToInt64(v_0.AuxInt) + if !(x > y) { + break + } + v.reset(OpPPC64FlagGT) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64Equal(v *Value) bool { + v_0 := v.Args[0] + // match: (Equal (FlagEQ)) + // result: (MOVDconst [1]) + for { + if v_0.Op != OpPPC64FlagEQ { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (Equal (FlagLT)) + // result: (MOVDconst [0]) + for { + if v_0.Op != OpPPC64FlagLT { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (Equal (FlagGT)) + // result: (MOVDconst [0]) + for { + if v_0.Op != OpPPC64FlagGT { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (Equal (InvertFlags x)) + // result: (Equal x) + for { + if v_0.Op != OpPPC64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpPPC64Equal) + v.AddArg(x) + return true + } + // match: (Equal cmp) + // result: (SETBC [2] cmp) + for { + cmp := v_0 + v.reset(OpPPC64SETBC) + v.AuxInt = int32ToAuxInt(2) + v.AddArg(cmp) + return true + } +} +func rewriteValuePPC64_OpPPC64FABS(v *Value) bool { + v_0 := v.Args[0] + // match: (FABS (FMOVDconst [x])) + // result: (FMOVDconst [math.Abs(x)]) + for { + if v_0.Op != OpPPC64FMOVDconst { + break + } + x := auxIntToFloat64(v_0.AuxInt) + v.reset(OpPPC64FMOVDconst) + v.AuxInt = float64ToAuxInt(math.Abs(x)) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64FADD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FADD (FMUL x y) z) + // cond: x.Block.Func.useFMA(v) + // result: (FMADD x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpPPC64FMUL { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + z := v_1 + if !(x.Block.Func.useFMA(v)) { + continue + } + v.reset(OpPPC64FMADD) + v.AddArg3(x, y, z) + return true + } + } + break + } + return false +} +func rewriteValuePPC64_OpPPC64FADDS(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FADDS (FMULS x y) z) + // cond: x.Block.Func.useFMA(v) + // result: (FMADDS x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpPPC64FMULS { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + z := v_1 + if !(x.Block.Func.useFMA(v)) { + continue + } + v.reset(OpPPC64FMADDS) + v.AddArg3(x, y, z) + return true + } + } + break + } + return false +} +func rewriteValuePPC64_OpPPC64FCEIL(v *Value) bool { + v_0 := v.Args[0] + // match: (FCEIL (FMOVDconst [x])) + // result: (FMOVDconst [math.Ceil(x)]) + for { + if v_0.Op != OpPPC64FMOVDconst { + break + } + x := auxIntToFloat64(v_0.AuxInt) + v.reset(OpPPC64FMOVDconst) + v.AuxInt = float64ToAuxInt(math.Ceil(x)) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64FFLOOR(v *Value) bool { + v_0 := v.Args[0] + // match: (FFLOOR (FMOVDconst [x])) + // result: (FMOVDconst [math.Floor(x)]) + for { + if v_0.Op != OpPPC64FMOVDconst { + break + } + x := auxIntToFloat64(v_0.AuxInt) + v.reset(OpPPC64FMOVDconst) + v.AuxInt = float64ToAuxInt(math.Floor(x)) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64FGreaterEqual(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (FGreaterEqual cmp) + // result: (OR (SETBC [2] cmp) (SETBC [1] cmp)) + for { + cmp := v_0 + v.reset(OpPPC64OR) + v0 := b.NewValue0(v.Pos, OpPPC64SETBC, typ.Int32) + v0.AuxInt = int32ToAuxInt(2) + v0.AddArg(cmp) + v1 := b.NewValue0(v.Pos, OpPPC64SETBC, typ.Int32) + v1.AuxInt = int32ToAuxInt(1) + v1.AddArg(cmp) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValuePPC64_OpPPC64FGreaterThan(v *Value) bool { + v_0 := v.Args[0] + // match: (FGreaterThan cmp) + // result: (SETBC [1] cmp) + for { + cmp := v_0 + v.reset(OpPPC64SETBC) + v.AuxInt = int32ToAuxInt(1) + v.AddArg(cmp) + return true + } +} +func rewriteValuePPC64_OpPPC64FLessEqual(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (FLessEqual cmp) + // result: (OR (SETBC [2] cmp) (SETBC [0] cmp)) + for { + cmp := v_0 + v.reset(OpPPC64OR) + v0 := b.NewValue0(v.Pos, OpPPC64SETBC, typ.Int32) + v0.AuxInt = int32ToAuxInt(2) + v0.AddArg(cmp) + v1 := b.NewValue0(v.Pos, OpPPC64SETBC, typ.Int32) + v1.AuxInt = int32ToAuxInt(0) + v1.AddArg(cmp) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValuePPC64_OpPPC64FLessThan(v *Value) bool { + v_0 := v.Args[0] + // match: (FLessThan cmp) + // result: (SETBC [0] cmp) + for { + cmp := v_0 + v.reset(OpPPC64SETBC) + v.AuxInt = int32ToAuxInt(0) + v.AddArg(cmp) + return true + } +} +func rewriteValuePPC64_OpPPC64FMOVDload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr x _)) + // result: (MTVSRD x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpPPC64MOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { + break + } + x := v_1.Args[1] + if ptr != v_1.Args[0] { + break + } + v.reset(OpPPC64MTVSRD) + v.AddArg(x) + return true + } + // match: (FMOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) + // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64MOVDaddr { + break + } + off2 := auxIntToInt32(p.AuxInt) + sym2 := auxToSym(p.Aux) + ptr := p.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) { + break + } + v.reset(OpPPC64FMOVDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) + // result: (FMOVDload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpPPC64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) { + break + } + v.reset(OpPPC64FMOVDload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64FMOVDstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMOVDstore [off] {sym} ptr (MTVSRD x) mem) + // result: (MOVDstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpPPC64MTVSRD { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpPPC64MOVDstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) + // result: (FMOVDstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpPPC64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) { + break + } + v.reset(OpPPC64FMOVDstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (FMOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) + // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64MOVDaddr { + break + } + off2 := auxIntToInt32(p.AuxInt) + sym2 := auxToSym(p.Aux) + ptr := p.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) { + break + } + v.reset(OpPPC64FMOVDstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64FMOVSload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMOVSload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) + // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64MOVDaddr { + break + } + off2 := auxIntToInt32(p.AuxInt) + sym2 := auxToSym(p.Aux) + ptr := p.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) { + break + } + v.reset(OpPPC64FMOVSload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) + // result: (FMOVSload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpPPC64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) { + break + } + v.reset(OpPPC64FMOVSload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64FMOVSstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) + // result: (FMOVSstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpPPC64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) { + break + } + v.reset(OpPPC64FMOVSstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (FMOVSstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) + // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64MOVDaddr { + break + } + off2 := auxIntToInt32(p.AuxInt) + sym2 := auxToSym(p.Aux) + ptr := p.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) { + break + } + v.reset(OpPPC64FMOVSstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64FNEG(v *Value) bool { + v_0 := v.Args[0] + // match: (FNEG (FABS x)) + // result: (FNABS x) + for { + if v_0.Op != OpPPC64FABS { + break + } + x := v_0.Args[0] + v.reset(OpPPC64FNABS) + v.AddArg(x) + return true + } + // match: (FNEG (FNABS x)) + // result: (FABS x) + for { + if v_0.Op != OpPPC64FNABS { + break + } + x := v_0.Args[0] + v.reset(OpPPC64FABS) + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64FSQRT(v *Value) bool { + v_0 := v.Args[0] + // match: (FSQRT (FMOVDconst [x])) + // cond: x >= 0 + // result: (FMOVDconst [math.Sqrt(x)]) + for { + if v_0.Op != OpPPC64FMOVDconst { + break + } + x := auxIntToFloat64(v_0.AuxInt) + if !(x >= 0) { + break + } + v.reset(OpPPC64FMOVDconst) + v.AuxInt = float64ToAuxInt(math.Sqrt(x)) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64FSUB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FSUB (FMUL x y) z) + // cond: x.Block.Func.useFMA(v) + // result: (FMSUB x y z) + for { + if v_0.Op != OpPPC64FMUL { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + z := v_1 + if !(x.Block.Func.useFMA(v)) { + continue + } + v.reset(OpPPC64FMSUB) + v.AddArg3(x, y, z) + return true + } + break + } + return false +} +func rewriteValuePPC64_OpPPC64FSUBS(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FSUBS (FMULS x y) z) + // cond: x.Block.Func.useFMA(v) + // result: (FMSUBS x y z) + for { + if v_0.Op != OpPPC64FMULS { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + z := v_1 + if !(x.Block.Func.useFMA(v)) { + continue + } + v.reset(OpPPC64FMSUBS) + v.AddArg3(x, y, z) + return true + } + break + } + return false +} +func rewriteValuePPC64_OpPPC64FTRUNC(v *Value) bool { + v_0 := v.Args[0] + // match: (FTRUNC (FMOVDconst [x])) + // result: (FMOVDconst [math.Trunc(x)]) + for { + if v_0.Op != OpPPC64FMOVDconst { + break + } + x := auxIntToFloat64(v_0.AuxInt) + v.reset(OpPPC64FMOVDconst) + v.AuxInt = float64ToAuxInt(math.Trunc(x)) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64GreaterEqual(v *Value) bool { + v_0 := v.Args[0] + // match: (GreaterEqual (FlagEQ)) + // result: (MOVDconst [1]) + for { + if v_0.Op != OpPPC64FlagEQ { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (GreaterEqual (FlagLT)) + // result: (MOVDconst [0]) + for { + if v_0.Op != OpPPC64FlagLT { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (GreaterEqual (FlagGT)) + // result: (MOVDconst [1]) + for { + if v_0.Op != OpPPC64FlagGT { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (GreaterEqual (InvertFlags x)) + // result: (LessEqual x) + for { + if v_0.Op != OpPPC64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpPPC64LessEqual) + v.AddArg(x) + return true + } + // match: (GreaterEqual cmp) + // result: (SETBCR [0] cmp) + for { + cmp := v_0 + v.reset(OpPPC64SETBCR) + v.AuxInt = int32ToAuxInt(0) + v.AddArg(cmp) + return true + } +} +func rewriteValuePPC64_OpPPC64GreaterThan(v *Value) bool { + v_0 := v.Args[0] + // match: (GreaterThan (FlagEQ)) + // result: (MOVDconst [0]) + for { + if v_0.Op != OpPPC64FlagEQ { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (GreaterThan (FlagLT)) + // result: (MOVDconst [0]) + for { + if v_0.Op != OpPPC64FlagLT { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (GreaterThan (FlagGT)) + // result: (MOVDconst [1]) + for { + if v_0.Op != OpPPC64FlagGT { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (GreaterThan (InvertFlags x)) + // result: (LessThan x) + for { + if v_0.Op != OpPPC64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpPPC64LessThan) + v.AddArg(x) + return true + } + // match: (GreaterThan cmp) + // result: (SETBC [1] cmp) + for { + cmp := v_0 + v.reset(OpPPC64SETBC) + v.AuxInt = int32ToAuxInt(1) + v.AddArg(cmp) + return true + } +} +func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ISEL [6] x y (Select1 (ANDCCconst [1] (SETBC [c] cmp)))) + // result: (ISEL [c] x y cmp) + for { + if auxIntToInt32(v.AuxInt) != 6 { + break + } + x := v_0 + y := v_1 + if v_2.Op != OpSelect1 { + break + } + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_2_0.AuxInt) != 1 { + break + } + v_2_0_0 := v_2_0.Args[0] + if v_2_0_0.Op != OpPPC64SETBC { + break + } + c := auxIntToInt32(v_2_0_0.AuxInt) + cmp := v_2_0_0.Args[0] + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(x, y, cmp) + return true + } + // match: (ISEL [6] x y (CMPconst [0] (SETBC [c] cmp))) + // result: (ISEL [c] x y cmp) + for { + if auxIntToInt32(v.AuxInt) != 6 { + break + } + x := v_0 + y := v_1 + if v_2.Op != OpPPC64CMPconst || auxIntToInt64(v_2.AuxInt) != 0 { + break + } + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpPPC64SETBC { + break + } + c := auxIntToInt32(v_2_0.AuxInt) + cmp := v_2_0.Args[0] + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(x, y, cmp) + return true + } + // match: (ISEL [6] x y (CMPWconst [0] (SETBC [c] cmp))) + // result: (ISEL [c] x y cmp) + for { + if auxIntToInt32(v.AuxInt) != 6 { + break + } + x := v_0 + y := v_1 + if v_2.Op != OpPPC64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 { + break + } + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpPPC64SETBC { + break + } + c := auxIntToInt32(v_2_0.AuxInt) + cmp := v_2_0.Args[0] + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(c) + v.AddArg3(x, y, cmp) + return true + } + // match: (ISEL [6] x y (CMPconst [0] (SETBCR [c] cmp))) + // result: (ISEL [c+4] x y cmp) + for { + if auxIntToInt32(v.AuxInt) != 6 { + break + } + x := v_0 + y := v_1 + if v_2.Op != OpPPC64CMPconst || auxIntToInt64(v_2.AuxInt) != 0 { + break + } + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpPPC64SETBCR { + break + } + c := auxIntToInt32(v_2_0.AuxInt) + cmp := v_2_0.Args[0] + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(c + 4) + v.AddArg3(x, y, cmp) + return true + } + // match: (ISEL [6] x y (CMPWconst [0] (SETBCR [c] cmp))) + // result: (ISEL [c+4] x y cmp) + for { + if auxIntToInt32(v.AuxInt) != 6 { + break + } + x := v_0 + y := v_1 + if v_2.Op != OpPPC64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 { + break + } + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpPPC64SETBCR { + break + } + c := auxIntToInt32(v_2_0.AuxInt) + cmp := v_2_0.Args[0] + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(c + 4) + v.AddArg3(x, y, cmp) + return true + } + // match: (ISEL [2] x _ (FlagEQ)) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 2 { + break + } + x := v_0 + if v_2.Op != OpPPC64FlagEQ { + break + } + v.copyOf(x) + return true + } + // match: (ISEL [2] _ y (FlagLT)) + // result: y + for { + if auxIntToInt32(v.AuxInt) != 2 { + break + } + y := v_1 + if v_2.Op != OpPPC64FlagLT { + break + } + v.copyOf(y) + return true + } + // match: (ISEL [2] _ y (FlagGT)) + // result: y + for { + if auxIntToInt32(v.AuxInt) != 2 { + break + } + y := v_1 + if v_2.Op != OpPPC64FlagGT { + break + } + v.copyOf(y) + return true + } + // match: (ISEL [6] _ y (FlagEQ)) + // result: y + for { + if auxIntToInt32(v.AuxInt) != 6 { + break + } + y := v_1 + if v_2.Op != OpPPC64FlagEQ { + break + } + v.copyOf(y) + return true + } + // match: (ISEL [6] x _ (FlagLT)) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 6 { + break + } + x := v_0 + if v_2.Op != OpPPC64FlagLT { + break + } + v.copyOf(x) + return true + } + // match: (ISEL [6] x _ (FlagGT)) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 6 { + break + } + x := v_0 + if v_2.Op != OpPPC64FlagGT { + break + } + v.copyOf(x) + return true + } + // match: (ISEL [0] _ y (FlagEQ)) + // result: y + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + y := v_1 + if v_2.Op != OpPPC64FlagEQ { + break + } + v.copyOf(y) + return true + } + // match: (ISEL [0] _ y (FlagGT)) + // result: y + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + y := v_1 + if v_2.Op != OpPPC64FlagGT { + break + } + v.copyOf(y) + return true + } + // match: (ISEL [0] x _ (FlagLT)) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + x := v_0 + if v_2.Op != OpPPC64FlagLT { + break + } + v.copyOf(x) + return true + } + // match: (ISEL [5] _ x (FlagEQ)) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 5 { + break + } + x := v_1 + if v_2.Op != OpPPC64FlagEQ { + break + } + v.copyOf(x) + return true + } + // match: (ISEL [5] _ x (FlagLT)) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 5 { + break + } + x := v_1 + if v_2.Op != OpPPC64FlagLT { + break + } + v.copyOf(x) + return true + } + // match: (ISEL [5] y _ (FlagGT)) + // result: y + for { + if auxIntToInt32(v.AuxInt) != 5 { + break + } + y := v_0 + if v_2.Op != OpPPC64FlagGT { + break + } + v.copyOf(y) + return true + } + // match: (ISEL [1] _ y (FlagEQ)) + // result: y + for { + if auxIntToInt32(v.AuxInt) != 1 { + break + } + y := v_1 + if v_2.Op != OpPPC64FlagEQ { + break + } + v.copyOf(y) + return true + } + // match: (ISEL [1] _ y (FlagLT)) + // result: y + for { + if auxIntToInt32(v.AuxInt) != 1 { + break + } + y := v_1 + if v_2.Op != OpPPC64FlagLT { + break + } + v.copyOf(y) + return true + } + // match: (ISEL [1] x _ (FlagGT)) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 1 { + break + } + x := v_0 + if v_2.Op != OpPPC64FlagGT { + break + } + v.copyOf(x) + return true + } + // match: (ISEL [4] x _ (FlagEQ)) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 4 { + break + } + x := v_0 + if v_2.Op != OpPPC64FlagEQ { + break + } + v.copyOf(x) + return true + } + // match: (ISEL [4] x _ (FlagGT)) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 4 { + break + } + x := v_0 + if v_2.Op != OpPPC64FlagGT { + break + } + v.copyOf(x) + return true + } + // match: (ISEL [4] _ y (FlagLT)) + // result: y + for { + if auxIntToInt32(v.AuxInt) != 4 { + break + } + y := v_1 + if v_2.Op != OpPPC64FlagLT { + break + } + v.copyOf(y) + return true + } + // match: (ISEL [2] x y (CMPconst [0] (Select0 (ANDCCconst [n] z)))) + // result: (ISEL [2] x y (Select1 (ANDCCconst [n] z ))) + for { + if auxIntToInt32(v.AuxInt) != 2 { + break + } + x := v_0 + y := v_1 + if v_2.Op != OpPPC64CMPconst || auxIntToInt64(v_2.AuxInt) != 0 { + break + } + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpSelect0 { + break + } + v_2_0_0 := v_2_0.Args[0] + if v_2_0_0.Op != OpPPC64ANDCCconst { + break + } + n := auxIntToInt64(v_2_0_0.AuxInt) + z := v_2_0_0.Args[0] + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(n) + v1.AddArg(z) + v0.AddArg(v1) + v.AddArg3(x, y, v0) + return true + } + // match: (ISEL [2] x y (CMPWconst [0] (Select0 (ANDCCconst [n] z)))) + // result: (ISEL [2] x y (Select1 (ANDCCconst [n] z ))) + for { + if auxIntToInt32(v.AuxInt) != 2 { + break + } + x := v_0 + y := v_1 + if v_2.Op != OpPPC64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 { + break + } + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpSelect0 { + break + } + v_2_0_0 := v_2_0.Args[0] + if v_2_0_0.Op != OpPPC64ANDCCconst { + break + } + n := auxIntToInt64(v_2_0_0.AuxInt) + z := v_2_0_0.Args[0] + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(n) + v1.AddArg(z) + v0.AddArg(v1) + v.AddArg3(x, y, v0) + return true + } + // match: (ISEL [6] x y (CMPconst [0] (Select0 (ANDCCconst [n] z)))) + // result: (ISEL [6] x y (Select1 (ANDCCconst [n] z ))) + for { + if auxIntToInt32(v.AuxInt) != 6 { + break + } + x := v_0 + y := v_1 + if v_2.Op != OpPPC64CMPconst || auxIntToInt64(v_2.AuxInt) != 0 { + break + } + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpSelect0 { + break + } + v_2_0_0 := v_2_0.Args[0] + if v_2_0_0.Op != OpPPC64ANDCCconst { + break + } + n := auxIntToInt64(v_2_0_0.AuxInt) + z := v_2_0_0.Args[0] + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(6) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(n) + v1.AddArg(z) + v0.AddArg(v1) + v.AddArg3(x, y, v0) + return true + } + // match: (ISEL [6] x y (CMPWconst [0] (Select0 (ANDCCconst [n] z)))) + // result: (ISEL [6] x y (Select1 (ANDCCconst [n] z ))) + for { + if auxIntToInt32(v.AuxInt) != 6 { + break + } + x := v_0 + y := v_1 + if v_2.Op != OpPPC64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 { + break + } + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpSelect0 { + break + } + v_2_0_0 := v_2_0.Args[0] + if v_2_0_0.Op != OpPPC64ANDCCconst { + break + } + n := auxIntToInt64(v_2_0_0.AuxInt) + z := v_2_0_0.Args[0] + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(6) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(n) + v1.AddArg(z) + v0.AddArg(v1) + v.AddArg3(x, y, v0) + return true + } + // match: (ISEL [n] x y (InvertFlags bool)) + // cond: n%4 == 0 + // result: (ISEL [n+1] x y bool) + for { + n := auxIntToInt32(v.AuxInt) + x := v_0 + y := v_1 + if v_2.Op != OpPPC64InvertFlags { + break + } + bool := v_2.Args[0] + if !(n%4 == 0) { + break + } + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(n + 1) + v.AddArg3(x, y, bool) + return true + } + // match: (ISEL [n] x y (InvertFlags bool)) + // cond: n%4 == 1 + // result: (ISEL [n-1] x y bool) + for { + n := auxIntToInt32(v.AuxInt) + x := v_0 + y := v_1 + if v_2.Op != OpPPC64InvertFlags { + break + } + bool := v_2.Args[0] + if !(n%4 == 1) { + break + } + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(n - 1) + v.AddArg3(x, y, bool) + return true + } + // match: (ISEL [n] x y (InvertFlags bool)) + // cond: n%4 == 2 + // result: (ISEL [n] x y bool) + for { + n := auxIntToInt32(v.AuxInt) + x := v_0 + y := v_1 + if v_2.Op != OpPPC64InvertFlags { + break + } + bool := v_2.Args[0] + if !(n%4 == 2) { + break + } + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(n) + v.AddArg3(x, y, bool) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64LessEqual(v *Value) bool { + v_0 := v.Args[0] + // match: (LessEqual (FlagEQ)) + // result: (MOVDconst [1]) + for { + if v_0.Op != OpPPC64FlagEQ { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (LessEqual (FlagLT)) + // result: (MOVDconst [1]) + for { + if v_0.Op != OpPPC64FlagLT { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (LessEqual (FlagGT)) + // result: (MOVDconst [0]) + for { + if v_0.Op != OpPPC64FlagGT { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (LessEqual (InvertFlags x)) + // result: (GreaterEqual x) + for { + if v_0.Op != OpPPC64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpPPC64GreaterEqual) + v.AddArg(x) + return true + } + // match: (LessEqual cmp) + // result: (SETBCR [1] cmp) + for { + cmp := v_0 + v.reset(OpPPC64SETBCR) + v.AuxInt = int32ToAuxInt(1) + v.AddArg(cmp) + return true + } +} +func rewriteValuePPC64_OpPPC64LessThan(v *Value) bool { + v_0 := v.Args[0] + // match: (LessThan (FlagEQ)) + // result: (MOVDconst [0]) + for { + if v_0.Op != OpPPC64FlagEQ { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (LessThan (FlagLT)) + // result: (MOVDconst [1]) + for { + if v_0.Op != OpPPC64FlagLT { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (LessThan (FlagGT)) + // result: (MOVDconst [0]) + for { + if v_0.Op != OpPPC64FlagGT { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (LessThan (InvertFlags x)) + // result: (GreaterThan x) + for { + if v_0.Op != OpPPC64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpPPC64GreaterThan) + v.AddArg(x) + return true + } + // match: (LessThan cmp) + // result: (SETBC [0] cmp) + for { + cmp := v_0 + v.reset(OpPPC64SETBC) + v.AuxInt = int32ToAuxInt(0) + v.AddArg(cmp) + return true + } +} +func rewriteValuePPC64_OpPPC64MFVSRD(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MFVSRD (FMOVDconst [c])) + // result: (MOVDconst [int64(math.Float64bits(c))]) + for { + if v_0.Op != OpPPC64FMOVDconst { + break + } + c := auxIntToFloat64(v_0.AuxInt) + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(math.Float64bits(c))) + return true + } + // match: (MFVSRD x:(FMOVDload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVDload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpPPC64FMOVDload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpPPC64MOVDload, typ.Int64) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVBZload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) + // result: (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64MOVDaddr { + break + } + off2 := auxIntToInt32(p.AuxInt) + sym2 := auxToSym(p.Aux) + ptr := p.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) { + break + } + v.reset(OpPPC64MOVBZload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBZload [off1] {sym} (ADDconst [off2] x) mem) + // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) + // result: (MOVBZload [off1+int32(off2)] {sym} x mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpPPC64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + mem := v_1 + if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) { + break + } + v.reset(OpPPC64MOVBZload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(x, mem) + return true + } + // match: (MOVBZload [0] {sym} p:(ADD ptr idx) mem) + // cond: sym == nil && p.Uses == 1 + // result: (MOVBZloadidx ptr idx mem) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + sym := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64ADD { + break + } + idx := p.Args[1] + ptr := p.Args[0] + mem := v_1 + if !(sym == nil && p.Uses == 1) { + break + } + v.reset(OpPPC64MOVBZloadidx) + v.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVBZloadidx(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBZloadidx ptr (MOVDconst [c]) mem) + // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) + // result: (MOVBZload [int32(c)] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) { + break + } + v.reset(OpPPC64MOVBZload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBZloadidx (MOVDconst [c]) ptr mem) + // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) + // result: (MOVBZload [int32(c)] ptr mem) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + ptr := v_1 + mem := v_2 + if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) { + break + } + v.reset(OpPPC64MOVBZload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MOVBZreg y:(Select0 (ANDCCconst [c] _))) + // cond: uint64(c) <= 0xFF + // result: y + for { + y := v_0 + if y.Op != OpSelect0 { + break + } + y_0 := y.Args[0] + if y_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(y_0.AuxInt) + if !(uint64(c) <= 0xFF) { + break + } + v.copyOf(y) + return true + } + // match: (MOVBZreg (SRWconst [c] (MOVBZreg x))) + // result: (SRWconst [c] (MOVBZreg x)) + for { + if v_0.Op != OpPPC64SRWconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVBZreg { + break + } + x := v_0_0.Args[0] + v.reset(OpPPC64SRWconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MOVBZreg (SRWconst [c] x)) + // cond: sizeof(x.Type) == 8 + // result: (SRWconst [c] x) + for { + if v_0.Op != OpPPC64SRWconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(sizeof(x.Type) == 8) { + break + } + v.reset(OpPPC64SRWconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVBZreg (SRDconst [c] x)) + // cond: c>=56 + // result: (SRDconst [c] x) + for { + if v_0.Op != OpPPC64SRDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(c >= 56) { + break + } + v.reset(OpPPC64SRDconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVBZreg (SRWconst [c] x)) + // cond: c>=24 + // result: (SRWconst [c] x) + for { + if v_0.Op != OpPPC64SRWconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(c >= 24) { + break + } + v.reset(OpPPC64SRWconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVBZreg y:(MOVBZreg _)) + // result: y + for { + y := v_0 + if y.Op != OpPPC64MOVBZreg { + break + } + v.copyOf(y) + return true + } + // match: (MOVBZreg (MOVBreg x)) + // result: (MOVBZreg x) + for { + if v_0.Op != OpPPC64MOVBreg { + break + } + x := v_0.Args[0] + v.reset(OpPPC64MOVBZreg) + v.AddArg(x) + return true + } + // match: (MOVBZreg (OR x (MOVWZreg y))) + // result: (MOVBZreg (OR x y)) + for { + if v_0.Op != OpPPC64OR { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVWZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVBZreg) + v0 := b.NewValue0(v.Pos, OpPPC64OR, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVBZreg (XOR x (MOVWZreg y))) + // result: (MOVBZreg (XOR x y)) + for { + if v_0.Op != OpPPC64XOR { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVWZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVBZreg) + v0 := b.NewValue0(v.Pos, OpPPC64XOR, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVBZreg (AND x (MOVWZreg y))) + // result: (MOVBZreg (AND x y)) + for { + if v_0.Op != OpPPC64AND { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVWZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVBZreg) + v0 := b.NewValue0(v.Pos, OpPPC64AND, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVBZreg (OR x (MOVHZreg y))) + // result: (MOVBZreg (OR x y)) + for { + if v_0.Op != OpPPC64OR { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVHZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVBZreg) + v0 := b.NewValue0(v.Pos, OpPPC64OR, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVBZreg (XOR x (MOVHZreg y))) + // result: (MOVBZreg (XOR x y)) + for { + if v_0.Op != OpPPC64XOR { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVHZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVBZreg) + v0 := b.NewValue0(v.Pos, OpPPC64XOR, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVBZreg (AND x (MOVHZreg y))) + // result: (MOVBZreg (AND x y)) + for { + if v_0.Op != OpPPC64AND { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVHZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVBZreg) + v0 := b.NewValue0(v.Pos, OpPPC64AND, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVBZreg (OR x (MOVBZreg y))) + // result: (MOVBZreg (OR x y)) + for { + if v_0.Op != OpPPC64OR { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVBZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVBZreg) + v0 := b.NewValue0(v.Pos, OpPPC64OR, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVBZreg (XOR x (MOVBZreg y))) + // result: (MOVBZreg (XOR x y)) + for { + if v_0.Op != OpPPC64XOR { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVBZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVBZreg) + v0 := b.NewValue0(v.Pos, OpPPC64XOR, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVBZreg (AND x (MOVBZreg y))) + // result: (MOVBZreg (AND x y)) + for { + if v_0.Op != OpPPC64AND { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVBZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVBZreg) + v0 := b.NewValue0(v.Pos, OpPPC64AND, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVBZreg z:(Select0 (ANDCCconst [c] (MOVBZload ptr x)))) + // result: z + for { + z := v_0 + if z.Op != OpSelect0 { + break + } + z_0 := z.Args[0] + if z_0.Op != OpPPC64ANDCCconst { + break + } + z_0_0 := z_0.Args[0] + if z_0_0.Op != OpPPC64MOVBZload { + break + } + v.copyOf(z) + return true + } + // match: (MOVBZreg z:(AND y (MOVBZload ptr x))) + // result: z + for { + z := v_0 + if z.Op != OpPPC64AND { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + if z_1.Op != OpPPC64MOVBZload { + continue + } + v.copyOf(z) + return true + } + break + } + // match: (MOVBZreg x:(MOVBZload _ _)) + // result: x + for { + x := v_0 + if x.Op != OpPPC64MOVBZload { + break + } + v.copyOf(x) + return true + } + // match: (MOVBZreg x:(MOVBZloadidx _ _ _)) + // result: x + for { + x := v_0 + if x.Op != OpPPC64MOVBZloadidx { + break + } + v.copyOf(x) + return true + } + // match: (MOVBZreg x:(Select0 (LoweredAtomicLoad8 _ _))) + // result: x + for { + x := v_0 + if x.Op != OpSelect0 { + break + } + x_0 := x.Args[0] + if x_0.Op != OpPPC64LoweredAtomicLoad8 { + break + } + v.copyOf(x) + return true + } + // match: (MOVBZreg x:(Arg )) + // cond: is8BitInt(t) && !t.IsSigned() + // result: x + for { + x := v_0 + if x.Op != OpArg { + break + } + t := x.Type + if !(is8BitInt(t) && !t.IsSigned()) { + break + } + v.copyOf(x) + return true + } + // match: (MOVBZreg (MOVDconst [c])) + // result: (MOVDconst [int64(uint8(c))]) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(uint8(c))) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVBreg(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MOVBreg y:(Select0 (ANDCCconst [c] _))) + // cond: uint64(c) <= 0x7F + // result: y + for { + y := v_0 + if y.Op != OpSelect0 { + break + } + y_0 := y.Args[0] + if y_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(y_0.AuxInt) + if !(uint64(c) <= 0x7F) { + break + } + v.copyOf(y) + return true + } + // match: (MOVBreg (SRAWconst [c] (MOVBreg x))) + // result: (SRAWconst [c] (MOVBreg x)) + for { + if v_0.Op != OpPPC64SRAWconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVBreg { + break + } + x := v_0_0.Args[0] + v.reset(OpPPC64SRAWconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MOVBreg (SRAWconst [c] x)) + // cond: sizeof(x.Type) == 8 + // result: (SRAWconst [c] x) + for { + if v_0.Op != OpPPC64SRAWconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(sizeof(x.Type) == 8) { + break + } + v.reset(OpPPC64SRAWconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVBreg (SRDconst [c] x)) + // cond: c>56 + // result: (SRDconst [c] x) + for { + if v_0.Op != OpPPC64SRDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(c > 56) { + break + } + v.reset(OpPPC64SRDconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVBreg (SRDconst [c] x)) + // cond: c==56 + // result: (SRADconst [c] x) + for { + if v_0.Op != OpPPC64SRDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(c == 56) { + break + } + v.reset(OpPPC64SRADconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVBreg (SRADconst [c] x)) + // cond: c>=56 + // result: (SRADconst [c] x) + for { + if v_0.Op != OpPPC64SRADconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(c >= 56) { + break + } + v.reset(OpPPC64SRADconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVBreg (SRWconst [c] x)) + // cond: c>24 + // result: (SRWconst [c] x) + for { + if v_0.Op != OpPPC64SRWconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(c > 24) { + break + } + v.reset(OpPPC64SRWconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVBreg (SRWconst [c] x)) + // cond: c==24 + // result: (SRAWconst [c] x) + for { + if v_0.Op != OpPPC64SRWconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(c == 24) { + break + } + v.reset(OpPPC64SRAWconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVBreg (SRAWconst [c] x)) + // cond: c>=24 + // result: (SRAWconst [c] x) + for { + if v_0.Op != OpPPC64SRAWconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(c >= 24) { + break + } + v.reset(OpPPC64SRAWconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVBreg y:(MOVBreg _)) + // result: y + for { + y := v_0 + if y.Op != OpPPC64MOVBreg { + break + } + v.copyOf(y) + return true + } + // match: (MOVBreg (MOVBZreg x)) + // result: (MOVBreg x) + for { + if v_0.Op != OpPPC64MOVBZreg { + break + } + x := v_0.Args[0] + v.reset(OpPPC64MOVBreg) + v.AddArg(x) + return true + } + // match: (MOVBreg x:(Arg )) + // cond: is8BitInt(t) && t.IsSigned() + // result: x + for { + x := v_0 + if x.Op != OpArg { + break + } + t := x.Type + if !(is8BitInt(t) && t.IsSigned()) { + break + } + v.copyOf(x) + return true + } + // match: (MOVBreg (MOVDconst [c])) + // result: (MOVDconst [int64(int8(c))]) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(int8(c))) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem) + // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) + // result: (MOVBstore [off1+int32(off2)] {sym} x val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpPPC64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) { + break + } + v.reset(OpPPC64MOVBstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(x, val, mem) + return true + } + // match: (MOVBstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) + // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64MOVDaddr { + break + } + off2 := auxIntToInt32(p.AuxInt) + sym2 := auxToSym(p.Aux) + ptr := p.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) { + break + } + v.reset(OpPPC64MOVBstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) + // result: (MOVBstorezero [off] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + mem := v_2 + v.reset(OpPPC64MOVBstorezero) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBstore [0] {sym} p:(ADD ptr idx) val mem) + // cond: sym == nil && p.Uses == 1 + // result: (MOVBstoreidx ptr idx val mem) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + sym := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64ADD { + break + } + idx := p.Args[1] + ptr := p.Args[0] + val := v_1 + mem := v_2 + if !(sym == nil && p.Uses == 1) { + break + } + v.reset(OpPPC64MOVBstoreidx) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpPPC64MOVBreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpPPC64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVBZreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpPPC64MOVBZreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpPPC64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpPPC64MOVHreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpPPC64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVHZreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpPPC64MOVHZreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpPPC64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpPPC64MOVWreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpPPC64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVWZreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpPPC64MOVWZreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpPPC64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (SRWconst (MOVHreg x) [c]) mem) + // cond: c <= 8 + // result: (MOVBstore [off] {sym} ptr (SRWconst x [c]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpPPC64SRWconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64MOVHreg { + break + } + x := v_1_0.Args[0] + mem := v_2 + if !(c <= 8) { + break + } + v.reset(OpPPC64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (SRWconst (MOVHZreg x) [c]) mem) + // cond: c <= 8 + // result: (MOVBstore [off] {sym} ptr (SRWconst x [c]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpPPC64SRWconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64MOVHZreg { + break + } + x := v_1_0.Args[0] + mem := v_2 + if !(c <= 8) { + break + } + v.reset(OpPPC64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (SRWconst (MOVWreg x) [c]) mem) + // cond: c <= 24 + // result: (MOVBstore [off] {sym} ptr (SRWconst x [c]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpPPC64SRWconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64MOVWreg { + break + } + x := v_1_0.Args[0] + mem := v_2 + if !(c <= 24) { + break + } + v.reset(OpPPC64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (SRWconst (MOVWZreg x) [c]) mem) + // cond: c <= 24 + // result: (MOVBstore [off] {sym} ptr (SRWconst x [c]) mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpPPC64SRWconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64MOVWZreg { + break + } + x := v_1_0.Args[0] + mem := v_2 + if !(c <= 24) { + break + } + v.reset(OpPPC64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(x) + v.AddArg3(ptr, v0, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MOVBstoreidx ptr (MOVDconst [c]) val mem) + // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) + // result: (MOVBstore [int32(c)] ptr val mem) + for { + ptr := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + val := v_2 + mem := v_3 + if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) { + break + } + v.reset(OpPPC64MOVBstore) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVBstoreidx (MOVDconst [c]) ptr val mem) + // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) + // result: (MOVBstore [int32(c)] ptr val mem) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + ptr := v_1 + val := v_2 + mem := v_3 + if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) { + break + } + v.reset(OpPPC64MOVBstore) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVBstoreidx ptr idx (MOVBreg x) mem) + // result: (MOVBstoreidx ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpPPC64MOVBreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpPPC64MOVBstoreidx) + v.AddArg4(ptr, idx, x, mem) + return true + } + // match: (MOVBstoreidx ptr idx (MOVBZreg x) mem) + // result: (MOVBstoreidx ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpPPC64MOVBZreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpPPC64MOVBstoreidx) + v.AddArg4(ptr, idx, x, mem) + return true + } + // match: (MOVBstoreidx ptr idx (MOVHreg x) mem) + // result: (MOVBstoreidx ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpPPC64MOVHreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpPPC64MOVBstoreidx) + v.AddArg4(ptr, idx, x, mem) + return true + } + // match: (MOVBstoreidx ptr idx (MOVHZreg x) mem) + // result: (MOVBstoreidx ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpPPC64MOVHZreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpPPC64MOVBstoreidx) + v.AddArg4(ptr, idx, x, mem) + return true + } + // match: (MOVBstoreidx ptr idx (MOVWreg x) mem) + // result: (MOVBstoreidx ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpPPC64MOVWreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpPPC64MOVBstoreidx) + v.AddArg4(ptr, idx, x, mem) + return true + } + // match: (MOVBstoreidx ptr idx (MOVWZreg x) mem) + // result: (MOVBstoreidx ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpPPC64MOVWZreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpPPC64MOVBstoreidx) + v.AddArg4(ptr, idx, x, mem) + return true + } + // match: (MOVBstoreidx ptr idx (SRWconst (MOVHreg x) [c]) mem) + // cond: c <= 8 + // result: (MOVBstoreidx ptr idx (SRWconst x [c]) mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpPPC64SRWconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpPPC64MOVHreg { + break + } + x := v_2_0.Args[0] + mem := v_3 + if !(c <= 8) { + break + } + v.reset(OpPPC64MOVBstoreidx) + v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(x) + v.AddArg4(ptr, idx, v0, mem) + return true + } + // match: (MOVBstoreidx ptr idx (SRWconst (MOVHZreg x) [c]) mem) + // cond: c <= 8 + // result: (MOVBstoreidx ptr idx (SRWconst x [c]) mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpPPC64SRWconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpPPC64MOVHZreg { + break + } + x := v_2_0.Args[0] + mem := v_3 + if !(c <= 8) { + break + } + v.reset(OpPPC64MOVBstoreidx) + v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(x) + v.AddArg4(ptr, idx, v0, mem) + return true + } + // match: (MOVBstoreidx ptr idx (SRWconst (MOVWreg x) [c]) mem) + // cond: c <= 24 + // result: (MOVBstoreidx ptr idx (SRWconst x [c]) mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpPPC64SRWconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpPPC64MOVWreg { + break + } + x := v_2_0.Args[0] + mem := v_3 + if !(c <= 24) { + break + } + v.reset(OpPPC64MOVBstoreidx) + v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(x) + v.AddArg4(ptr, idx, v0, mem) + return true + } + // match: (MOVBstoreidx ptr idx (SRWconst (MOVWZreg x) [c]) mem) + // cond: c <= 24 + // result: (MOVBstoreidx ptr idx (SRWconst x [c]) mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpPPC64SRWconst { + break + } + c := auxIntToInt64(v_2.AuxInt) + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpPPC64MOVWZreg { + break + } + x := v_2_0.Args[0] + mem := v_3 + if !(c <= 24) { + break + } + v.reset(OpPPC64MOVBstoreidx) + v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(x) + v.AddArg4(ptr, idx, v0, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVBstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem) + // cond: ((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1)+off2))) + // result: (MOVBstorezero [off1+int32(off2)] {sym} x mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpPPC64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + mem := v_1 + if !((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1) + off2))) { + break + } + v.reset(OpPPC64MOVBstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(x, mem) + return true + } + // match: (MOVBstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) + // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) + // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64MOVDaddr { + break + } + off2 := auxIntToInt32(p.AuxInt) + sym2 := auxToSym(p.Aux) + x := p.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) { + break + } + v.reset(OpPPC64MOVBstorezero) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(x, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVDaddr(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVDaddr {sym} [n] p:(ADD x y)) + // cond: sym == nil && n == 0 + // result: p + for { + n := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64ADD { + break + } + if !(sym == nil && n == 0) { + break + } + v.copyOf(p) + return true + } + // match: (MOVDaddr {sym} [n] ptr) + // cond: sym == nil && n == 0 && (ptr.Op == OpArgIntReg || ptr.Op == OpPhi) + // result: ptr + for { + n := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if !(sym == nil && n == 0 && (ptr.Op == OpArgIntReg || ptr.Op == OpPhi)) { + break + } + v.copyOf(ptr) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVDload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr x _)) + // result: (MFVSRD x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpPPC64FMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { + break + } + x := v_1.Args[1] + if ptr != v_1.Args[0] { + break + } + v.reset(OpPPC64MFVSRD) + v.AddArg(x) + return true + } + // match: (MOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) + // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64MOVDaddr { + break + } + off2 := auxIntToInt32(p.AuxInt) + sym2 := auxToSym(p.Aux) + ptr := p.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) { + break + } + v.reset(OpPPC64MOVDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVDload [off1] {sym} (ADDconst [off2] x) mem) + // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) + // result: (MOVDload [off1+int32(off2)] {sym} x mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpPPC64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + mem := v_1 + if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) { + break + } + v.reset(OpPPC64MOVDload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(x, mem) + return true + } + // match: (MOVDload [0] {sym} p:(ADD ptr idx) mem) + // cond: sym == nil && p.Uses == 1 + // result: (MOVDloadidx ptr idx mem) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + sym := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64ADD { + break + } + idx := p.Args[1] + ptr := p.Args[0] + mem := v_1 + if !(sym == nil && p.Uses == 1) { + break + } + v.reset(OpPPC64MOVDloadidx) + v.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVDloadidx(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVDloadidx ptr (MOVDconst [c]) mem) + // cond: ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) + // result: (MOVDload [int32(c)] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) { + break + } + v.reset(OpPPC64MOVDload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVDloadidx (MOVDconst [c]) ptr mem) + // cond: ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) + // result: (MOVDload [int32(c)] ptr mem) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + ptr := v_1 + mem := v_2 + if !((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) { + break + } + v.reset(OpPPC64MOVDload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVDstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MOVDstore [off] {sym} ptr (MFVSRD x) mem) + // result: (FMOVDstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpPPC64MFVSRD { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpPPC64FMOVDstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem) + // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) + // result: (MOVDstore [off1+int32(off2)] {sym} x val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpPPC64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) { + break + } + v.reset(OpPPC64MOVDstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(x, val, mem) + return true + } + // match: (MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) + // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64MOVDaddr { + break + } + off2 := auxIntToInt32(p.AuxInt) + sym2 := auxToSym(p.Aux) + ptr := p.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) { + break + } + v.reset(OpPPC64MOVDstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) + // result: (MOVDstorezero [off] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + mem := v_2 + v.reset(OpPPC64MOVDstorezero) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVDstore [0] {sym} p:(ADD ptr idx) val mem) + // cond: sym == nil && p.Uses == 1 + // result: (MOVDstoreidx ptr idx val mem) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + sym := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64ADD { + break + } + idx := p.Args[1] + ptr := p.Args[0] + val := v_1 + mem := v_2 + if !(sym == nil && p.Uses == 1) { + break + } + v.reset(OpPPC64MOVDstoreidx) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVDstore [off] {sym} ptr r:(BRD val) mem) + // cond: r.Uses == 1 + // result: (MOVDBRstore (MOVDaddr [off] {sym} ptr) val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + r := v_1 + if r.Op != OpPPC64BRD { + break + } + val := r.Args[0] + mem := v_2 + if !(r.Uses == 1) { + break + } + v.reset(OpPPC64MOVDBRstore) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, ptr.Type) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg(ptr) + v.AddArg3(v0, val, mem) + return true + } + // match: (MOVDstore [off] {sym} ptr (Bswap64 val) mem) + // result: (MOVDBRstore (MOVDaddr [off] {sym} ptr) val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpBswap64 { + break + } + val := v_1.Args[0] + mem := v_2 + v.reset(OpPPC64MOVDBRstore) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, ptr.Type) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg(ptr) + v.AddArg3(v0, val, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVDstoreidx(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVDstoreidx ptr (MOVDconst [c]) val mem) + // cond: ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) + // result: (MOVDstore [int32(c)] ptr val mem) + for { + ptr := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + val := v_2 + mem := v_3 + if !((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) { + break + } + v.reset(OpPPC64MOVDstore) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVDstoreidx (MOVDconst [c]) ptr val mem) + // cond: ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) + // result: (MOVDstore [int32(c)] ptr val mem) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + ptr := v_1 + val := v_2 + mem := v_3 + if !((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) { + break + } + v.reset(OpPPC64MOVDstore) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVDstoreidx ptr idx r:(BRD val) mem) + // cond: r.Uses == 1 + // result: (MOVDBRstoreidx ptr idx val mem) + for { + ptr := v_0 + idx := v_1 + r := v_2 + if r.Op != OpPPC64BRD { + break + } + val := r.Args[0] + mem := v_3 + if !(r.Uses == 1) { + break + } + v.reset(OpPPC64MOVDBRstoreidx) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVDstoreidx ptr idx (Bswap64 val) mem) + // result: (MOVDBRstoreidx ptr idx val mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpBswap64 { + break + } + val := v_2.Args[0] + mem := v_3 + v.reset(OpPPC64MOVDBRstoreidx) + v.AddArg4(ptr, idx, val, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem) + // cond: ((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1)+off2))) + // result: (MOVDstorezero [off1+int32(off2)] {sym} x mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpPPC64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + mem := v_1 + if !((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1) + off2))) { + break + } + v.reset(OpPPC64MOVDstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(x, mem) + return true + } + // match: (MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) + // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) + // result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64MOVDaddr { + break + } + off2 := auxIntToInt32(p.AuxInt) + sym2 := auxToSym(p.Aux) + x := p.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) { + break + } + v.reset(OpPPC64MOVDstorezero) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(x, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVHBRstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHBRstore ptr (MOVHreg x) mem) + // result: (MOVHBRstore ptr x mem) + for { + ptr := v_0 + if v_1.Op != OpPPC64MOVHreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpPPC64MOVHBRstore) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVHBRstore ptr (MOVHZreg x) mem) + // result: (MOVHBRstore ptr x mem) + for { + ptr := v_0 + if v_1.Op != OpPPC64MOVHZreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpPPC64MOVHBRstore) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVHBRstore ptr (MOVWreg x) mem) + // result: (MOVHBRstore ptr x mem) + for { + ptr := v_0 + if v_1.Op != OpPPC64MOVWreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpPPC64MOVHBRstore) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVHBRstore ptr (MOVWZreg x) mem) + // result: (MOVHBRstore ptr x mem) + for { + ptr := v_0 + if v_1.Op != OpPPC64MOVWZreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpPPC64MOVHBRstore) + v.AddArg3(ptr, x, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVHZload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) + // result: (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64MOVDaddr { + break + } + off2 := auxIntToInt32(p.AuxInt) + sym2 := auxToSym(p.Aux) + ptr := p.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) { + break + } + v.reset(OpPPC64MOVHZload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHZload [off1] {sym} (ADDconst [off2] x) mem) + // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) + // result: (MOVHZload [off1+int32(off2)] {sym} x mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpPPC64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + mem := v_1 + if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) { + break + } + v.reset(OpPPC64MOVHZload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(x, mem) + return true + } + // match: (MOVHZload [0] {sym} p:(ADD ptr idx) mem) + // cond: sym == nil && p.Uses == 1 + // result: (MOVHZloadidx ptr idx mem) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + sym := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64ADD { + break + } + idx := p.Args[1] + ptr := p.Args[0] + mem := v_1 + if !(sym == nil && p.Uses == 1) { + break + } + v.reset(OpPPC64MOVHZloadidx) + v.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVHZloadidx(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHZloadidx ptr (MOVDconst [c]) mem) + // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) + // result: (MOVHZload [int32(c)] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) { + break + } + v.reset(OpPPC64MOVHZload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHZloadidx (MOVDconst [c]) ptr mem) + // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) + // result: (MOVHZload [int32(c)] ptr mem) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + ptr := v_1 + mem := v_2 + if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) { + break + } + v.reset(OpPPC64MOVHZload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MOVHZreg y:(Select0 (ANDCCconst [c] _))) + // cond: uint64(c) <= 0xFFFF + // result: y + for { + y := v_0 + if y.Op != OpSelect0 { + break + } + y_0 := y.Args[0] + if y_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(y_0.AuxInt) + if !(uint64(c) <= 0xFFFF) { + break + } + v.copyOf(y) + return true + } + // match: (MOVHZreg (SRWconst [c] (MOVBZreg x))) + // result: (SRWconst [c] (MOVBZreg x)) + for { + if v_0.Op != OpPPC64SRWconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVBZreg { + break + } + x := v_0_0.Args[0] + v.reset(OpPPC64SRWconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MOVHZreg (SRWconst [c] (MOVHZreg x))) + // result: (SRWconst [c] (MOVHZreg x)) + for { + if v_0.Op != OpPPC64SRWconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVHZreg { + break + } + x := v_0_0.Args[0] + v.reset(OpPPC64SRWconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MOVHZreg (SRWconst [c] x)) + // cond: sizeof(x.Type) <= 16 + // result: (SRWconst [c] x) + for { + if v_0.Op != OpPPC64SRWconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(sizeof(x.Type) <= 16) { + break + } + v.reset(OpPPC64SRWconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVHZreg (SRDconst [c] x)) + // cond: c>=48 + // result: (SRDconst [c] x) + for { + if v_0.Op != OpPPC64SRDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(c >= 48) { + break + } + v.reset(OpPPC64SRDconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVHZreg (SRWconst [c] x)) + // cond: c>=16 + // result: (SRWconst [c] x) + for { + if v_0.Op != OpPPC64SRWconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(c >= 16) { + break + } + v.reset(OpPPC64SRWconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVHZreg y:(MOVHZreg _)) + // result: y + for { + y := v_0 + if y.Op != OpPPC64MOVHZreg { + break + } + v.copyOf(y) + return true + } + // match: (MOVHZreg y:(MOVBZreg _)) + // result: y + for { + y := v_0 + if y.Op != OpPPC64MOVBZreg { + break + } + v.copyOf(y) + return true + } + // match: (MOVHZreg y:(MOVHBRload _ _)) + // result: y + for { + y := v_0 + if y.Op != OpPPC64MOVHBRload { + break + } + v.copyOf(y) + return true + } + // match: (MOVHZreg y:(MOVHreg x)) + // result: (MOVHZreg x) + for { + y := v_0 + if y.Op != OpPPC64MOVHreg { + break + } + x := y.Args[0] + v.reset(OpPPC64MOVHZreg) + v.AddArg(x) + return true + } + // match: (MOVHZreg (OR x (MOVWZreg y))) + // result: (MOVHZreg (OR x y)) + for { + if v_0.Op != OpPPC64OR { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVWZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVHZreg) + v0 := b.NewValue0(v.Pos, OpPPC64OR, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVHZreg (XOR x (MOVWZreg y))) + // result: (MOVHZreg (XOR x y)) + for { + if v_0.Op != OpPPC64XOR { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVWZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVHZreg) + v0 := b.NewValue0(v.Pos, OpPPC64XOR, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVHZreg (AND x (MOVWZreg y))) + // result: (MOVHZreg (AND x y)) + for { + if v_0.Op != OpPPC64AND { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVWZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVHZreg) + v0 := b.NewValue0(v.Pos, OpPPC64AND, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVHZreg (OR x (MOVHZreg y))) + // result: (MOVHZreg (OR x y)) + for { + if v_0.Op != OpPPC64OR { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVHZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVHZreg) + v0 := b.NewValue0(v.Pos, OpPPC64OR, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVHZreg (XOR x (MOVHZreg y))) + // result: (MOVHZreg (XOR x y)) + for { + if v_0.Op != OpPPC64XOR { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVHZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVHZreg) + v0 := b.NewValue0(v.Pos, OpPPC64XOR, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVHZreg (AND x (MOVHZreg y))) + // result: (MOVHZreg (AND x y)) + for { + if v_0.Op != OpPPC64AND { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVHZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVHZreg) + v0 := b.NewValue0(v.Pos, OpPPC64AND, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVHZreg z:(Select0 (ANDCCconst [c] (MOVBZload ptr x)))) + // result: z + for { + z := v_0 + if z.Op != OpSelect0 { + break + } + z_0 := z.Args[0] + if z_0.Op != OpPPC64ANDCCconst { + break + } + z_0_0 := z_0.Args[0] + if z_0_0.Op != OpPPC64MOVBZload { + break + } + v.copyOf(z) + return true + } + // match: (MOVHZreg z:(AND y (MOVHZload ptr x))) + // result: z + for { + z := v_0 + if z.Op != OpPPC64AND { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + if z_1.Op != OpPPC64MOVHZload { + continue + } + v.copyOf(z) + return true + } + break + } + // match: (MOVHZreg z:(Select0 (ANDCCconst [c] (MOVHZload ptr x)))) + // result: z + for { + z := v_0 + if z.Op != OpSelect0 { + break + } + z_0 := z.Args[0] + if z_0.Op != OpPPC64ANDCCconst { + break + } + z_0_0 := z_0.Args[0] + if z_0_0.Op != OpPPC64MOVHZload { + break + } + v.copyOf(z) + return true + } + // match: (MOVHZreg x:(MOVBZload _ _)) + // result: x + for { + x := v_0 + if x.Op != OpPPC64MOVBZload { + break + } + v.copyOf(x) + return true + } + // match: (MOVHZreg x:(MOVBZloadidx _ _ _)) + // result: x + for { + x := v_0 + if x.Op != OpPPC64MOVBZloadidx { + break + } + v.copyOf(x) + return true + } + // match: (MOVHZreg x:(MOVHZload _ _)) + // result: x + for { + x := v_0 + if x.Op != OpPPC64MOVHZload { + break + } + v.copyOf(x) + return true + } + // match: (MOVHZreg x:(MOVHZloadidx _ _ _)) + // result: x + for { + x := v_0 + if x.Op != OpPPC64MOVHZloadidx { + break + } + v.copyOf(x) + return true + } + // match: (MOVHZreg x:(Arg )) + // cond: (is8BitInt(t) || is16BitInt(t)) && !t.IsSigned() + // result: x + for { + x := v_0 + if x.Op != OpArg { + break + } + t := x.Type + if !((is8BitInt(t) || is16BitInt(t)) && !t.IsSigned()) { + break + } + v.copyOf(x) + return true + } + // match: (MOVHZreg (MOVDconst [c])) + // result: (MOVDconst [int64(uint16(c))]) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(uint16(c))) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVHload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) + // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64MOVDaddr { + break + } + off2 := auxIntToInt32(p.AuxInt) + sym2 := auxToSym(p.Aux) + ptr := p.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) { + break + } + v.reset(OpPPC64MOVHload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHload [off1] {sym} (ADDconst [off2] x) mem) + // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) + // result: (MOVHload [off1+int32(off2)] {sym} x mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpPPC64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + mem := v_1 + if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) { + break + } + v.reset(OpPPC64MOVHload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(x, mem) + return true + } + // match: (MOVHload [0] {sym} p:(ADD ptr idx) mem) + // cond: sym == nil && p.Uses == 1 + // result: (MOVHloadidx ptr idx mem) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + sym := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64ADD { + break + } + idx := p.Args[1] + ptr := p.Args[0] + mem := v_1 + if !(sym == nil && p.Uses == 1) { + break + } + v.reset(OpPPC64MOVHloadidx) + v.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVHloadidx(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHloadidx ptr (MOVDconst [c]) mem) + // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) + // result: (MOVHload [int32(c)] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) { + break + } + v.reset(OpPPC64MOVHload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHloadidx (MOVDconst [c]) ptr mem) + // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) + // result: (MOVHload [int32(c)] ptr mem) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + ptr := v_1 + mem := v_2 + if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) { + break + } + v.reset(OpPPC64MOVHload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVHreg(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MOVHreg y:(Select0 (ANDCCconst [c] _))) + // cond: uint64(c) <= 0x7FFF + // result: y + for { + y := v_0 + if y.Op != OpSelect0 { + break + } + y_0 := y.Args[0] + if y_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(y_0.AuxInt) + if !(uint64(c) <= 0x7FFF) { + break + } + v.copyOf(y) + return true + } + // match: (MOVHreg (SRAWconst [c] (MOVBreg x))) + // result: (SRAWconst [c] (MOVBreg x)) + for { + if v_0.Op != OpPPC64SRAWconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVBreg { + break + } + x := v_0_0.Args[0] + v.reset(OpPPC64SRAWconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MOVHreg (SRAWconst [c] (MOVHreg x))) + // result: (SRAWconst [c] (MOVHreg x)) + for { + if v_0.Op != OpPPC64SRAWconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVHreg { + break + } + x := v_0_0.Args[0] + v.reset(OpPPC64SRAWconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MOVHreg (SRAWconst [c] x)) + // cond: sizeof(x.Type) <= 16 + // result: (SRAWconst [c] x) + for { + if v_0.Op != OpPPC64SRAWconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(sizeof(x.Type) <= 16) { + break + } + v.reset(OpPPC64SRAWconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVHreg (SRDconst [c] x)) + // cond: c>48 + // result: (SRDconst [c] x) + for { + if v_0.Op != OpPPC64SRDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(c > 48) { + break + } + v.reset(OpPPC64SRDconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVHreg (SRDconst [c] x)) + // cond: c==48 + // result: (SRADconst [c] x) + for { + if v_0.Op != OpPPC64SRDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(c == 48) { + break + } + v.reset(OpPPC64SRADconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVHreg (SRADconst [c] x)) + // cond: c>=48 + // result: (SRADconst [c] x) + for { + if v_0.Op != OpPPC64SRADconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(c >= 48) { + break + } + v.reset(OpPPC64SRADconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVHreg (SRWconst [c] x)) + // cond: c>16 + // result: (SRWconst [c] x) + for { + if v_0.Op != OpPPC64SRWconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(c > 16) { + break + } + v.reset(OpPPC64SRWconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVHreg (SRAWconst [c] x)) + // cond: c>=16 + // result: (SRAWconst [c] x) + for { + if v_0.Op != OpPPC64SRAWconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(c >= 16) { + break + } + v.reset(OpPPC64SRAWconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVHreg (SRWconst [c] x)) + // cond: c==16 + // result: (SRAWconst [c] x) + for { + if v_0.Op != OpPPC64SRWconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(c == 16) { + break + } + v.reset(OpPPC64SRAWconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVHreg y:(MOVHreg _)) + // result: y + for { + y := v_0 + if y.Op != OpPPC64MOVHreg { + break + } + v.copyOf(y) + return true + } + // match: (MOVHreg y:(MOVBreg _)) + // result: y + for { + y := v_0 + if y.Op != OpPPC64MOVBreg { + break + } + v.copyOf(y) + return true + } + // match: (MOVHreg y:(MOVHZreg x)) + // result: (MOVHreg x) + for { + y := v_0 + if y.Op != OpPPC64MOVHZreg { + break + } + x := y.Args[0] + v.reset(OpPPC64MOVHreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVHload _ _)) + // result: x + for { + x := v_0 + if x.Op != OpPPC64MOVHload { + break + } + v.copyOf(x) + return true + } + // match: (MOVHreg x:(MOVHloadidx _ _ _)) + // result: x + for { + x := v_0 + if x.Op != OpPPC64MOVHloadidx { + break + } + v.copyOf(x) + return true + } + // match: (MOVHreg x:(Arg )) + // cond: (is8BitInt(t) || is16BitInt(t)) && t.IsSigned() + // result: x + for { + x := v_0 + if x.Op != OpArg { + break + } + t := x.Type + if !((is8BitInt(t) || is16BitInt(t)) && t.IsSigned()) { + break + } + v.copyOf(x) + return true + } + // match: (MOVHreg (MOVDconst [c])) + // result: (MOVDconst [int64(int16(c))]) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(int16(c))) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem) + // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) + // result: (MOVHstore [off1+int32(off2)] {sym} x val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpPPC64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) { + break + } + v.reset(OpPPC64MOVHstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(x, val, mem) + return true + } + // match: (MOVHstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) + // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64MOVDaddr { + break + } + off2 := auxIntToInt32(p.AuxInt) + sym2 := auxToSym(p.Aux) + ptr := p.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) { + break + } + v.reset(OpPPC64MOVHstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) + // result: (MOVHstorezero [off] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + mem := v_2 + v.reset(OpPPC64MOVHstorezero) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHstore [0] {sym} p:(ADD ptr idx) val mem) + // cond: sym == nil && p.Uses == 1 + // result: (MOVHstoreidx ptr idx val mem) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + sym := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64ADD { + break + } + idx := p.Args[1] + ptr := p.Args[0] + val := v_1 + mem := v_2 + if !(sym == nil && p.Uses == 1) { + break + } + v.reset(OpPPC64MOVHstoreidx) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpPPC64MOVHreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpPPC64MOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVHZreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpPPC64MOVHZreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpPPC64MOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpPPC64MOVWreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpPPC64MOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVWZreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpPPC64MOVWZreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpPPC64MOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr r:(BRH val) mem) + // cond: r.Uses == 1 + // result: (MOVHBRstore (MOVDaddr [off] {sym} ptr) val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + r := v_1 + if r.Op != OpPPC64BRH { + break + } + val := r.Args[0] + mem := v_2 + if !(r.Uses == 1) { + break + } + v.reset(OpPPC64MOVHBRstore) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, ptr.Type) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg(ptr) + v.AddArg3(v0, val, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (Bswap16 val) mem) + // result: (MOVHBRstore (MOVDaddr [off] {sym} ptr) val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpBswap16 { + break + } + val := v_1.Args[0] + mem := v_2 + v.reset(OpPPC64MOVHBRstore) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, ptr.Type) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg(ptr) + v.AddArg3(v0, val, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVHstoreidx(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHstoreidx ptr (MOVDconst [c]) val mem) + // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) + // result: (MOVHstore [int32(c)] ptr val mem) + for { + ptr := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + val := v_2 + mem := v_3 + if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) { + break + } + v.reset(OpPPC64MOVHstore) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVHstoreidx (MOVDconst [c]) ptr val mem) + // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) + // result: (MOVHstore [int32(c)] ptr val mem) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + ptr := v_1 + val := v_2 + mem := v_3 + if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) { + break + } + v.reset(OpPPC64MOVHstore) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVHstoreidx ptr idx (MOVHreg x) mem) + // result: (MOVHstoreidx ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpPPC64MOVHreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpPPC64MOVHstoreidx) + v.AddArg4(ptr, idx, x, mem) + return true + } + // match: (MOVHstoreidx ptr idx (MOVHZreg x) mem) + // result: (MOVHstoreidx ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpPPC64MOVHZreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpPPC64MOVHstoreidx) + v.AddArg4(ptr, idx, x, mem) + return true + } + // match: (MOVHstoreidx ptr idx (MOVWreg x) mem) + // result: (MOVHstoreidx ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpPPC64MOVWreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpPPC64MOVHstoreidx) + v.AddArg4(ptr, idx, x, mem) + return true + } + // match: (MOVHstoreidx ptr idx (MOVWZreg x) mem) + // result: (MOVHstoreidx ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpPPC64MOVWZreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpPPC64MOVHstoreidx) + v.AddArg4(ptr, idx, x, mem) + return true + } + // match: (MOVHstoreidx ptr idx r:(BRH val) mem) + // cond: r.Uses == 1 + // result: (MOVHBRstoreidx ptr idx val mem) + for { + ptr := v_0 + idx := v_1 + r := v_2 + if r.Op != OpPPC64BRH { + break + } + val := r.Args[0] + mem := v_3 + if !(r.Uses == 1) { + break + } + v.reset(OpPPC64MOVHBRstoreidx) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVHstoreidx ptr idx (Bswap16 val) mem) + // result: (MOVHBRstoreidx ptr idx val mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpBswap16 { + break + } + val := v_2.Args[0] + mem := v_3 + v.reset(OpPPC64MOVHBRstoreidx) + v.AddArg4(ptr, idx, val, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVHstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem) + // cond: ((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1)+off2))) + // result: (MOVHstorezero [off1+int32(off2)] {sym} x mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpPPC64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + mem := v_1 + if !((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1) + off2))) { + break + } + v.reset(OpPPC64MOVHstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(x, mem) + return true + } + // match: (MOVHstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) + // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) + // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64MOVDaddr { + break + } + off2 := auxIntToInt32(p.AuxInt) + sym2 := auxToSym(p.Aux) + x := p.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) { + break + } + v.reset(OpPPC64MOVHstorezero) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(x, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVWBRstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWBRstore ptr (MOVWreg x) mem) + // result: (MOVWBRstore ptr x mem) + for { + ptr := v_0 + if v_1.Op != OpPPC64MOVWreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpPPC64MOVWBRstore) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVWBRstore ptr (MOVWZreg x) mem) + // result: (MOVWBRstore ptr x mem) + for { + ptr := v_0 + if v_1.Op != OpPPC64MOVWZreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpPPC64MOVWBRstore) + v.AddArg3(ptr, x, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVWZload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) + // result: (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64MOVDaddr { + break + } + off2 := auxIntToInt32(p.AuxInt) + sym2 := auxToSym(p.Aux) + ptr := p.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) { + break + } + v.reset(OpPPC64MOVWZload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWZload [off1] {sym} (ADDconst [off2] x) mem) + // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) + // result: (MOVWZload [off1+int32(off2)] {sym} x mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpPPC64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + mem := v_1 + if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) { + break + } + v.reset(OpPPC64MOVWZload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(x, mem) + return true + } + // match: (MOVWZload [0] {sym} p:(ADD ptr idx) mem) + // cond: sym == nil && p.Uses == 1 + // result: (MOVWZloadidx ptr idx mem) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + sym := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64ADD { + break + } + idx := p.Args[1] + ptr := p.Args[0] + mem := v_1 + if !(sym == nil && p.Uses == 1) { + break + } + v.reset(OpPPC64MOVWZloadidx) + v.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVWZloadidx(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWZloadidx ptr (MOVDconst [c]) mem) + // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) + // result: (MOVWZload [int32(c)] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) { + break + } + v.reset(OpPPC64MOVWZload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWZloadidx (MOVDconst [c]) ptr mem) + // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) + // result: (MOVWZload [int32(c)] ptr mem) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + ptr := v_1 + mem := v_2 + if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) { + break + } + v.reset(OpPPC64MOVWZload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MOVWZreg y:(Select0 (ANDCCconst [c] _))) + // cond: uint64(c) <= 0xFFFFFFFF + // result: y + for { + y := v_0 + if y.Op != OpSelect0 { + break + } + y_0 := y.Args[0] + if y_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(y_0.AuxInt) + if !(uint64(c) <= 0xFFFFFFFF) { + break + } + v.copyOf(y) + return true + } + // match: (MOVWZreg y:(AND (MOVDconst [c]) _)) + // cond: uint64(c) <= 0xFFFFFFFF + // result: y + for { + y := v_0 + if y.Op != OpPPC64AND { + break + } + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + if y_0.Op != OpPPC64MOVDconst { + continue + } + c := auxIntToInt64(y_0.AuxInt) + if !(uint64(c) <= 0xFFFFFFFF) { + continue + } + v.copyOf(y) + return true + } + break + } + // match: (MOVWZreg (SRWconst [c] (MOVBZreg x))) + // result: (SRWconst [c] (MOVBZreg x)) + for { + if v_0.Op != OpPPC64SRWconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVBZreg { + break + } + x := v_0_0.Args[0] + v.reset(OpPPC64SRWconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MOVWZreg (SRWconst [c] (MOVHZreg x))) + // result: (SRWconst [c] (MOVHZreg x)) + for { + if v_0.Op != OpPPC64SRWconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVHZreg { + break + } + x := v_0_0.Args[0] + v.reset(OpPPC64SRWconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MOVWZreg (SRWconst [c] (MOVWZreg x))) + // result: (SRWconst [c] (MOVWZreg x)) + for { + if v_0.Op != OpPPC64SRWconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVWZreg { + break + } + x := v_0_0.Args[0] + v.reset(OpPPC64SRWconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpPPC64MOVWZreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MOVWZreg (SRWconst [c] x)) + // cond: sizeof(x.Type) <= 32 + // result: (SRWconst [c] x) + for { + if v_0.Op != OpPPC64SRWconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(sizeof(x.Type) <= 32) { + break + } + v.reset(OpPPC64SRWconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVWZreg (SRDconst [c] x)) + // cond: c>=32 + // result: (SRDconst [c] x) + for { + if v_0.Op != OpPPC64SRDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(c >= 32) { + break + } + v.reset(OpPPC64SRDconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVWZreg y:(MOVWZreg _)) + // result: y + for { + y := v_0 + if y.Op != OpPPC64MOVWZreg { + break + } + v.copyOf(y) + return true + } + // match: (MOVWZreg y:(MOVHZreg _)) + // result: y + for { + y := v_0 + if y.Op != OpPPC64MOVHZreg { + break + } + v.copyOf(y) + return true + } + // match: (MOVWZreg y:(MOVBZreg _)) + // result: y + for { + y := v_0 + if y.Op != OpPPC64MOVBZreg { + break + } + v.copyOf(y) + return true + } + // match: (MOVWZreg y:(MOVHBRload _ _)) + // result: y + for { + y := v_0 + if y.Op != OpPPC64MOVHBRload { + break + } + v.copyOf(y) + return true + } + // match: (MOVWZreg y:(MOVWBRload _ _)) + // result: y + for { + y := v_0 + if y.Op != OpPPC64MOVWBRload { + break + } + v.copyOf(y) + return true + } + // match: (MOVWZreg y:(MOVWreg x)) + // result: (MOVWZreg x) + for { + y := v_0 + if y.Op != OpPPC64MOVWreg { + break + } + x := y.Args[0] + v.reset(OpPPC64MOVWZreg) + v.AddArg(x) + return true + } + // match: (MOVWZreg (OR x (MOVWZreg y))) + // result: (MOVWZreg (OR x y)) + for { + if v_0.Op != OpPPC64OR { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVWZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVWZreg) + v0 := b.NewValue0(v.Pos, OpPPC64OR, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVWZreg (XOR x (MOVWZreg y))) + // result: (MOVWZreg (XOR x y)) + for { + if v_0.Op != OpPPC64XOR { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVWZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVWZreg) + v0 := b.NewValue0(v.Pos, OpPPC64XOR, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVWZreg (AND x (MOVWZreg y))) + // result: (MOVWZreg (AND x y)) + for { + if v_0.Op != OpPPC64AND { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVWZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVWZreg) + v0 := b.NewValue0(v.Pos, OpPPC64AND, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVWZreg z:(Select0 (ANDCCconst [c] (MOVBZload ptr x)))) + // result: z + for { + z := v_0 + if z.Op != OpSelect0 { + break + } + z_0 := z.Args[0] + if z_0.Op != OpPPC64ANDCCconst { + break + } + z_0_0 := z_0.Args[0] + if z_0_0.Op != OpPPC64MOVBZload { + break + } + v.copyOf(z) + return true + } + // match: (MOVWZreg z:(AND y (MOVWZload ptr x))) + // result: z + for { + z := v_0 + if z.Op != OpPPC64AND { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + if z_1.Op != OpPPC64MOVWZload { + continue + } + v.copyOf(z) + return true + } + break + } + // match: (MOVWZreg z:(Select0 (ANDCCconst [c] (MOVHZload ptr x)))) + // result: z + for { + z := v_0 + if z.Op != OpSelect0 { + break + } + z_0 := z.Args[0] + if z_0.Op != OpPPC64ANDCCconst { + break + } + z_0_0 := z_0.Args[0] + if z_0_0.Op != OpPPC64MOVHZload { + break + } + v.copyOf(z) + return true + } + // match: (MOVWZreg z:(Select0 (ANDCCconst [c] (MOVWZload ptr x)))) + // result: z + for { + z := v_0 + if z.Op != OpSelect0 { + break + } + z_0 := z.Args[0] + if z_0.Op != OpPPC64ANDCCconst { + break + } + z_0_0 := z_0.Args[0] + if z_0_0.Op != OpPPC64MOVWZload { + break + } + v.copyOf(z) + return true + } + // match: (MOVWZreg x:(MOVBZload _ _)) + // result: x + for { + x := v_0 + if x.Op != OpPPC64MOVBZload { + break + } + v.copyOf(x) + return true + } + // match: (MOVWZreg x:(MOVBZloadidx _ _ _)) + // result: x + for { + x := v_0 + if x.Op != OpPPC64MOVBZloadidx { + break + } + v.copyOf(x) + return true + } + // match: (MOVWZreg x:(MOVHZload _ _)) + // result: x + for { + x := v_0 + if x.Op != OpPPC64MOVHZload { + break + } + v.copyOf(x) + return true + } + // match: (MOVWZreg x:(MOVHZloadidx _ _ _)) + // result: x + for { + x := v_0 + if x.Op != OpPPC64MOVHZloadidx { + break + } + v.copyOf(x) + return true + } + // match: (MOVWZreg x:(MOVWZload _ _)) + // result: x + for { + x := v_0 + if x.Op != OpPPC64MOVWZload { + break + } + v.copyOf(x) + return true + } + // match: (MOVWZreg x:(MOVWZloadidx _ _ _)) + // result: x + for { + x := v_0 + if x.Op != OpPPC64MOVWZloadidx { + break + } + v.copyOf(x) + return true + } + // match: (MOVWZreg x:(Select0 (LoweredAtomicLoad32 _ _))) + // result: x + for { + x := v_0 + if x.Op != OpSelect0 { + break + } + x_0 := x.Args[0] + if x_0.Op != OpPPC64LoweredAtomicLoad32 { + break + } + v.copyOf(x) + return true + } + // match: (MOVWZreg x:(Arg )) + // cond: (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !t.IsSigned() + // result: x + for { + x := v_0 + if x.Op != OpArg { + break + } + t := x.Type + if !((is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !t.IsSigned()) { + break + } + v.copyOf(x) + return true + } + // match: (MOVWZreg (MOVDconst [c])) + // result: (MOVDconst [int64(uint32(c))]) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(uint32(c))) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVWload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) + // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64MOVDaddr { + break + } + off2 := auxIntToInt32(p.AuxInt) + sym2 := auxToSym(p.Aux) + ptr := p.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) { + break + } + v.reset(OpPPC64MOVWload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWload [off1] {sym} (ADDconst [off2] x) mem) + // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) + // result: (MOVWload [off1+int32(off2)] {sym} x mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpPPC64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + mem := v_1 + if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) { + break + } + v.reset(OpPPC64MOVWload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(x, mem) + return true + } + // match: (MOVWload [0] {sym} p:(ADD ptr idx) mem) + // cond: sym == nil && p.Uses == 1 + // result: (MOVWloadidx ptr idx mem) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + sym := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64ADD { + break + } + idx := p.Args[1] + ptr := p.Args[0] + mem := v_1 + if !(sym == nil && p.Uses == 1) { + break + } + v.reset(OpPPC64MOVWloadidx) + v.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVWloadidx(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWloadidx ptr (MOVDconst [c]) mem) + // cond: ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) + // result: (MOVWload [int32(c)] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) { + break + } + v.reset(OpPPC64MOVWload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWloadidx (MOVDconst [c]) ptr mem) + // cond: ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) + // result: (MOVWload [int32(c)] ptr mem) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + ptr := v_1 + mem := v_2 + if !((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) { + break + } + v.reset(OpPPC64MOVWload) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MOVWreg y:(Select0 (ANDCCconst [c] _))) + // cond: uint64(c) <= 0xFFFF + // result: y + for { + y := v_0 + if y.Op != OpSelect0 { + break + } + y_0 := y.Args[0] + if y_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(y_0.AuxInt) + if !(uint64(c) <= 0xFFFF) { + break + } + v.copyOf(y) + return true + } + // match: (MOVWreg y:(AND (MOVDconst [c]) _)) + // cond: uint64(c) <= 0x7FFFFFFF + // result: y + for { + y := v_0 + if y.Op != OpPPC64AND { + break + } + y_0 := y.Args[0] + y_1 := y.Args[1] + for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { + if y_0.Op != OpPPC64MOVDconst { + continue + } + c := auxIntToInt64(y_0.AuxInt) + if !(uint64(c) <= 0x7FFFFFFF) { + continue + } + v.copyOf(y) + return true + } + break + } + // match: (MOVWreg (SRAWconst [c] (MOVBreg x))) + // result: (SRAWconst [c] (MOVBreg x)) + for { + if v_0.Op != OpPPC64SRAWconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVBreg { + break + } + x := v_0_0.Args[0] + v.reset(OpPPC64SRAWconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MOVWreg (SRAWconst [c] (MOVHreg x))) + // result: (SRAWconst [c] (MOVHreg x)) + for { + if v_0.Op != OpPPC64SRAWconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVHreg { + break + } + x := v_0_0.Args[0] + v.reset(OpPPC64SRAWconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MOVWreg (SRAWconst [c] (MOVWreg x))) + // result: (SRAWconst [c] (MOVWreg x)) + for { + if v_0.Op != OpPPC64SRAWconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVWreg { + break + } + x := v_0_0.Args[0] + v.reset(OpPPC64SRAWconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpPPC64MOVWreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MOVWreg (SRAWconst [c] x)) + // cond: sizeof(x.Type) <= 32 + // result: (SRAWconst [c] x) + for { + if v_0.Op != OpPPC64SRAWconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(sizeof(x.Type) <= 32) { + break + } + v.reset(OpPPC64SRAWconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVWreg (SRDconst [c] x)) + // cond: c>32 + // result: (SRDconst [c] x) + for { + if v_0.Op != OpPPC64SRDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(c > 32) { + break + } + v.reset(OpPPC64SRDconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVWreg (SRADconst [c] x)) + // cond: c>=32 + // result: (SRADconst [c] x) + for { + if v_0.Op != OpPPC64SRADconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(c >= 32) { + break + } + v.reset(OpPPC64SRADconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVWreg (SRDconst [c] x)) + // cond: c==32 + // result: (SRADconst [c] x) + for { + if v_0.Op != OpPPC64SRDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(c == 32) { + break + } + v.reset(OpPPC64SRADconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (MOVWreg y:(MOVWreg _)) + // result: y + for { + y := v_0 + if y.Op != OpPPC64MOVWreg { + break + } + v.copyOf(y) + return true + } + // match: (MOVWreg y:(MOVHreg _)) + // result: y + for { + y := v_0 + if y.Op != OpPPC64MOVHreg { + break + } + v.copyOf(y) + return true + } + // match: (MOVWreg y:(MOVBreg _)) + // result: y + for { + y := v_0 + if y.Op != OpPPC64MOVBreg { + break + } + v.copyOf(y) + return true + } + // match: (MOVWreg y:(MOVWZreg x)) + // result: (MOVWreg x) + for { + y := v_0 + if y.Op != OpPPC64MOVWZreg { + break + } + x := y.Args[0] + v.reset(OpPPC64MOVWreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHload _ _)) + // result: x + for { + x := v_0 + if x.Op != OpPPC64MOVHload { + break + } + v.copyOf(x) + return true + } + // match: (MOVWreg x:(MOVHloadidx _ _ _)) + // result: x + for { + x := v_0 + if x.Op != OpPPC64MOVHloadidx { + break + } + v.copyOf(x) + return true + } + // match: (MOVWreg x:(MOVWload _ _)) + // result: x + for { + x := v_0 + if x.Op != OpPPC64MOVWload { + break + } + v.copyOf(x) + return true + } + // match: (MOVWreg x:(MOVWloadidx _ _ _)) + // result: x + for { + x := v_0 + if x.Op != OpPPC64MOVWloadidx { + break + } + v.copyOf(x) + return true + } + // match: (MOVWreg x:(Arg )) + // cond: (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && t.IsSigned() + // result: x + for { + x := v_0 + if x.Op != OpArg { + break + } + t := x.Type + if !((is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && t.IsSigned()) { + break + } + v.copyOf(x) + return true + } + // match: (MOVWreg (MOVDconst [c])) + // result: (MOVDconst [int64(int32(c))]) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(int32(c))) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVWstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem) + // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) + // result: (MOVWstore [off1+int32(off2)] {sym} x val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpPPC64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) { + break + } + v.reset(OpPPC64MOVWstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(x, val, mem) + return true + } + // match: (MOVWstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) + // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64MOVDaddr { + break + } + off2 := auxIntToInt32(p.AuxInt) + sym2 := auxToSym(p.Aux) + ptr := p.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) { + break + } + v.reset(OpPPC64MOVWstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) + // result: (MOVWstorezero [off] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + mem := v_2 + v.reset(OpPPC64MOVWstorezero) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWstore [0] {sym} p:(ADD ptr idx) val mem) + // cond: sym == nil && p.Uses == 1 + // result: (MOVWstoreidx ptr idx val mem) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + sym := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64ADD { + break + } + idx := p.Args[1] + ptr := p.Args[0] + val := v_1 + mem := v_2 + if !(sym == nil && p.Uses == 1) { + break + } + v.reset(OpPPC64MOVWstoreidx) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem) + // result: (MOVWstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpPPC64MOVWreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpPPC64MOVWstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVWZreg x) mem) + // result: (MOVWstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpPPC64MOVWZreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpPPC64MOVWstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr r:(BRW val) mem) + // cond: r.Uses == 1 + // result: (MOVWBRstore (MOVDaddr [off] {sym} ptr) val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + r := v_1 + if r.Op != OpPPC64BRW { + break + } + val := r.Args[0] + mem := v_2 + if !(r.Uses == 1) { + break + } + v.reset(OpPPC64MOVWBRstore) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, ptr.Type) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg(ptr) + v.AddArg3(v0, val, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (Bswap32 val) mem) + // result: (MOVWBRstore (MOVDaddr [off] {sym} ptr) val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpBswap32 { + break + } + val := v_1.Args[0] + mem := v_2 + v.reset(OpPPC64MOVWBRstore) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, ptr.Type) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg(ptr) + v.AddArg3(v0, val, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVWstoreidx(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWstoreidx ptr (MOVDconst [c]) val mem) + // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) + // result: (MOVWstore [int32(c)] ptr val mem) + for { + ptr := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + val := v_2 + mem := v_3 + if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) { + break + } + v.reset(OpPPC64MOVWstore) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVWstoreidx (MOVDconst [c]) ptr val mem) + // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) + // result: (MOVWstore [int32(c)] ptr val mem) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + ptr := v_1 + val := v_2 + mem := v_3 + if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) { + break + } + v.reset(OpPPC64MOVWstore) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVWstoreidx ptr idx (MOVWreg x) mem) + // result: (MOVWstoreidx ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpPPC64MOVWreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpPPC64MOVWstoreidx) + v.AddArg4(ptr, idx, x, mem) + return true + } + // match: (MOVWstoreidx ptr idx (MOVWZreg x) mem) + // result: (MOVWstoreidx ptr idx x mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpPPC64MOVWZreg { + break + } + x := v_2.Args[0] + mem := v_3 + v.reset(OpPPC64MOVWstoreidx) + v.AddArg4(ptr, idx, x, mem) + return true + } + // match: (MOVWstoreidx ptr idx r:(BRW val) mem) + // cond: r.Uses == 1 + // result: (MOVWBRstoreidx ptr idx val mem) + for { + ptr := v_0 + idx := v_1 + r := v_2 + if r.Op != OpPPC64BRW { + break + } + val := r.Args[0] + mem := v_3 + if !(r.Uses == 1) { + break + } + v.reset(OpPPC64MOVWBRstoreidx) + v.AddArg4(ptr, idx, val, mem) + return true + } + // match: (MOVWstoreidx ptr idx (Bswap32 val) mem) + // result: (MOVWBRstoreidx ptr idx val mem) + for { + ptr := v_0 + idx := v_1 + if v_2.Op != OpBswap32 { + break + } + val := v_2.Args[0] + mem := v_3 + v.reset(OpPPC64MOVWBRstoreidx) + v.AddArg4(ptr, idx, val, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVWstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem) + // cond: ((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1)+off2))) + // result: (MOVWstorezero [off1+int32(off2)] {sym} x mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpPPC64ADDconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + mem := v_1 + if !((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1) + off2))) { + break + } + v.reset(OpPPC64MOVWstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(x, mem) + return true + } + // match: (MOVWstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) + // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) + // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + p := v_0 + if p.Op != OpPPC64MOVDaddr { + break + } + off2 := auxIntToInt32(p.AuxInt) + sym2 := auxToSym(p.Aux) + x := p.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) { + break + } + v.reset(OpPPC64MOVWstorezero) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(x, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MTVSRD(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MTVSRD (MOVDconst [c])) + // cond: !math.IsNaN(math.Float64frombits(uint64(c))) + // result: (FMOVDconst [math.Float64frombits(uint64(c))]) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + if !(!math.IsNaN(math.Float64frombits(uint64(c)))) { + break + } + v.reset(OpPPC64FMOVDconst) + v.AuxInt = float64ToAuxInt(math.Float64frombits(uint64(c))) + return true + } + // match: (MTVSRD x:(MOVDload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (FMOVDload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpPPC64MOVDload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpPPC64FMOVDload, typ.Float64) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MULLD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MULLD x (MOVDconst [c])) + // cond: is16Bit(c) + // result: (MULLDconst [int32(c)] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(is16Bit(c)) { + continue + } + v.reset(OpPPC64MULLDconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValuePPC64_OpPPC64MULLW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MULLW x (MOVDconst [c])) + // cond: is16Bit(c) + // result: (MULLWconst [int32(c)] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(is16Bit(c)) { + continue + } + v.reset(OpPPC64MULLWconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValuePPC64_OpPPC64NEG(v *Value) bool { + v_0 := v.Args[0] + // match: (NEG (ADDconst [c] x)) + // cond: is32Bit(-c) + // result: (SUBFCconst [-c] x) + for { + if v_0.Op != OpPPC64ADDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(-c)) { + break + } + v.reset(OpPPC64SUBFCconst) + v.AuxInt = int64ToAuxInt(-c) + v.AddArg(x) + return true + } + // match: (NEG (SUBFCconst [c] x)) + // cond: is32Bit(-c) + // result: (ADDconst [-c] x) + for { + if v_0.Op != OpPPC64SUBFCconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(-c)) { + break + } + v.reset(OpPPC64ADDconst) + v.AuxInt = int64ToAuxInt(-c) + v.AddArg(x) + return true + } + // match: (NEG (SUB x y)) + // result: (SUB y x) + for { + if v_0.Op != OpPPC64SUB { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpPPC64SUB) + v.AddArg2(y, x) + return true + } + // match: (NEG (NEG x)) + // result: x + for { + if v_0.Op != OpPPC64NEG { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64NOR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (NOR (MOVDconst [c]) (MOVDconst [d])) + // result: (MOVDconst [^(c|d)]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpPPC64MOVDconst { + continue + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpPPC64MOVDconst { + continue + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(^(c | d)) + return true + } + break + } + return false +} +func rewriteValuePPC64_OpPPC64NotEqual(v *Value) bool { + v_0 := v.Args[0] + // match: (NotEqual (FlagEQ)) + // result: (MOVDconst [0]) + for { + if v_0.Op != OpPPC64FlagEQ { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (NotEqual (FlagLT)) + // result: (MOVDconst [1]) + for { + if v_0.Op != OpPPC64FlagLT { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (NotEqual (FlagGT)) + // result: (MOVDconst [1]) + for { + if v_0.Op != OpPPC64FlagGT { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (NotEqual (InvertFlags x)) + // result: (NotEqual x) + for { + if v_0.Op != OpPPC64InvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpPPC64NotEqual) + v.AddArg(x) + return true + } + // match: (NotEqual cmp) + // result: (SETBCR [2] cmp) + for { + cmp := v_0 + v.reset(OpPPC64SETBCR) + v.AuxInt = int32ToAuxInt(2) + v.AddArg(cmp) + return true + } +} +func rewriteValuePPC64_OpPPC64OR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OR x (NOR y y)) + // result: (ORN x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpPPC64NOR { + continue + } + y := v_1.Args[1] + if y != v_1.Args[0] { + continue + } + v.reset(OpPPC64ORN) + v.AddArg2(x, y) + return true + } + break + } + // match: (OR (MOVDconst [c]) (MOVDconst [d])) + // result: (MOVDconst [c|d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpPPC64MOVDconst { + continue + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpPPC64MOVDconst { + continue + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(c | d) + return true + } + break + } + // match: (OR x (MOVDconst [c])) + // cond: isU32Bit(c) + // result: (ORconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(isU32Bit(c)) { + continue + } + v.reset(OpPPC64ORconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValuePPC64_OpPPC64ORN(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ORN x (MOVDconst [-1])) + // result: x + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 { + break + } + v.copyOf(x) + return true + } + // match: (ORN (MOVDconst [c]) (MOVDconst [d])) + // result: (MOVDconst [c|^d]) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpPPC64MOVDconst { + break + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(c | ^d) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64ORconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ORconst [c] (ORconst [d] x)) + // result: (ORconst [c|d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpPPC64ORconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpPPC64ORconst) + v.AuxInt = int64ToAuxInt(c | d) + v.AddArg(x) + return true + } + // match: (ORconst [-1] _) + // result: (MOVDconst [-1]) + for { + if auxIntToInt64(v.AuxInt) != -1 { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(-1) + return true + } + // match: (ORconst [0] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64ROTL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ROTL x (MOVDconst [c])) + // result: (ROTLconst x [c&63]) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpPPC64ROTLconst) + v.AuxInt = int64ToAuxInt(c & 63) + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64ROTLW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ROTLW x (MOVDconst [c])) + // result: (ROTLWconst x [c&31]) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpPPC64ROTLWconst) + v.AuxInt = int64ToAuxInt(c & 31) + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64ROTLWconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ROTLWconst [r] (AND (MOVDconst [m]) x)) + // cond: isPPC64WordRotateMask(m) + // result: (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x) + for { + r := auxIntToInt64(v.AuxInt) + if v_0.Op != OpPPC64AND { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpPPC64MOVDconst { + continue + } + m := auxIntToInt64(v_0_0.AuxInt) + x := v_0_1 + if !(isPPC64WordRotateMask(m)) { + continue + } + v.reset(OpPPC64RLWINM) + v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, rotateLeft32(m, r), 32)) + v.AddArg(x) + return true + } + break + } + // match: (ROTLWconst [r] (Select0 (ANDCCconst [m] x))) + // cond: isPPC64WordRotateMask(m) + // result: (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x) + for { + r := auxIntToInt64(v.AuxInt) + if v_0.Op != OpSelect0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64ANDCCconst { + break + } + m := auxIntToInt64(v_0_0.AuxInt) + x := v_0_0.Args[0] + if !(isPPC64WordRotateMask(m)) { + break + } + v.reset(OpPPC64RLWINM) + v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, rotateLeft32(m, r), 32)) + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64SETBC(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SETBC [0] (FlagLT)) + // result: (MOVDconst [1]) + for { + if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpPPC64FlagLT { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SETBC [0] (FlagGT)) + // result: (MOVDconst [0]) + for { + if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpPPC64FlagGT { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SETBC [0] (FlagEQ)) + // result: (MOVDconst [0]) + for { + if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpPPC64FlagEQ { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SETBC [1] (FlagGT)) + // result: (MOVDconst [1]) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpPPC64FlagGT { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SETBC [1] (FlagLT)) + // result: (MOVDconst [0]) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpPPC64FlagLT { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SETBC [1] (FlagEQ)) + // result: (MOVDconst [0]) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpPPC64FlagEQ { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SETBC [2] (FlagEQ)) + // result: (MOVDconst [1]) + for { + if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64FlagEQ { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SETBC [2] (FlagLT)) + // result: (MOVDconst [0]) + for { + if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64FlagLT { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SETBC [2] (FlagGT)) + // result: (MOVDconst [0]) + for { + if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64FlagGT { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SETBC [0] (InvertFlags bool)) + // result: (SETBC [1] bool) + for { + if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpPPC64InvertFlags { + break + } + bool := v_0.Args[0] + v.reset(OpPPC64SETBC) + v.AuxInt = int32ToAuxInt(1) + v.AddArg(bool) + return true + } + // match: (SETBC [1] (InvertFlags bool)) + // result: (SETBC [0] bool) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpPPC64InvertFlags { + break + } + bool := v_0.Args[0] + v.reset(OpPPC64SETBC) + v.AuxInt = int32ToAuxInt(0) + v.AddArg(bool) + return true + } + // match: (SETBC [2] (InvertFlags bool)) + // result: (SETBC [2] bool) + for { + if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64InvertFlags { + break + } + bool := v_0.Args[0] + v.reset(OpPPC64SETBC) + v.AuxInt = int32ToAuxInt(2) + v.AddArg(bool) + return true + } + // match: (SETBC [n] (InvertFlags bool)) + // result: (SETBCR [n] bool) + for { + n := auxIntToInt32(v.AuxInt) + if v_0.Op != OpPPC64InvertFlags { + break + } + bool := v_0.Args[0] + v.reset(OpPPC64SETBCR) + v.AuxInt = int32ToAuxInt(n) + v.AddArg(bool) + return true + } + // match: (SETBC [2] (CMPconst [0] (Select0 (ANDCCconst [1] z)))) + // result: (XORconst [1] (Select0 (ANDCCconst [1] z ))) + for { + if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { + break + } + z := v_0_0_0.Args[0] + v.reset(OpPPC64XORconst) + v.AuxInt = int64ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(1) + v1.AddArg(z) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (SETBC [2] (CMPWconst [0] (Select0 (ANDCCconst [1] z)))) + // result: (XORconst [1] (Select0 (ANDCCconst [1] z ))) + for { + if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { + break + } + z := v_0_0_0.Args[0] + v.reset(OpPPC64XORconst) + v.AuxInt = int64ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(1) + v1.AddArg(z) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (SETBC [2] (CMPWconst [0] (Select0 (ANDCCconst [n] z)))) + // result: (SETBC [2] (Select1 (ANDCCconst [n] z ))) + for { + if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst { + break + } + n := auxIntToInt64(v_0_0_0.AuxInt) + z := v_0_0_0.Args[0] + v.reset(OpPPC64SETBC) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(n) + v1.AddArg(z) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (SETBC [2] (CMPconst [0] a:(AND y z))) + // cond: a.Uses == 1 + // result: (SETBC [2] (Select1 (ANDCC y z ))) + for { + if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + a := v_0.Args[0] + if a.Op != OpPPC64AND { + break + } + z := a.Args[1] + y := a.Args[0] + if !(a.Uses == 1) { + break + } + v.reset(OpPPC64SETBC) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCC, types.NewTuple(typ.Int64, types.TypeFlags)) + v1.AddArg2(y, z) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (SETBC [2] (CMPconst [0] o:(OR y z))) + // cond: o.Uses == 1 + // result: (SETBC [2] (Select1 (ORCC y z ))) + for { + if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + o := v_0.Args[0] + if o.Op != OpPPC64OR { + break + } + z := o.Args[1] + y := o.Args[0] + if !(o.Uses == 1) { + break + } + v.reset(OpPPC64SETBC) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpPPC64ORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(y, z) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (SETBC [2] (CMPconst [0] a:(XOR y z))) + // cond: a.Uses == 1 + // result: (SETBC [2] (Select1 (XORCC y z ))) + for { + if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + a := v_0.Args[0] + if a.Op != OpPPC64XOR { + break + } + z := a.Args[1] + y := a.Args[0] + if !(a.Uses == 1) { + break + } + v.reset(OpPPC64SETBC) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpPPC64XORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(y, z) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64SETBCR(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SETBCR [0] (FlagLT)) + // result: (MOVDconst [0]) + for { + if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpPPC64FlagLT { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SETBCR [0] (FlagGT)) + // result: (MOVDconst [1]) + for { + if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpPPC64FlagGT { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SETBCR [0] (FlagEQ)) + // result: (MOVDconst [1]) + for { + if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpPPC64FlagEQ { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SETBCR [1] (FlagGT)) + // result: (MOVDconst [0]) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpPPC64FlagGT { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SETBCR [1] (FlagLT)) + // result: (MOVDconst [1]) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpPPC64FlagLT { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SETBCR [1] (FlagEQ)) + // result: (MOVDconst [1]) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpPPC64FlagEQ { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SETBCR [2] (FlagEQ)) + // result: (MOVDconst [0]) + for { + if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64FlagEQ { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SETBCR [2] (FlagLT)) + // result: (MOVDconst [1]) + for { + if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64FlagLT { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SETBCR [2] (FlagGT)) + // result: (MOVDconst [1]) + for { + if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64FlagGT { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SETBCR [0] (InvertFlags bool)) + // result: (SETBCR [1] bool) + for { + if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpPPC64InvertFlags { + break + } + bool := v_0.Args[0] + v.reset(OpPPC64SETBCR) + v.AuxInt = int32ToAuxInt(1) + v.AddArg(bool) + return true + } + // match: (SETBCR [1] (InvertFlags bool)) + // result: (SETBCR [0] bool) + for { + if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpPPC64InvertFlags { + break + } + bool := v_0.Args[0] + v.reset(OpPPC64SETBCR) + v.AuxInt = int32ToAuxInt(0) + v.AddArg(bool) + return true + } + // match: (SETBCR [2] (InvertFlags bool)) + // result: (SETBCR [2] bool) + for { + if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64InvertFlags { + break + } + bool := v_0.Args[0] + v.reset(OpPPC64SETBCR) + v.AuxInt = int32ToAuxInt(2) + v.AddArg(bool) + return true + } + // match: (SETBCR [n] (InvertFlags bool)) + // result: (SETBC [n] bool) + for { + n := auxIntToInt32(v.AuxInt) + if v_0.Op != OpPPC64InvertFlags { + break + } + bool := v_0.Args[0] + v.reset(OpPPC64SETBC) + v.AuxInt = int32ToAuxInt(n) + v.AddArg(bool) + return true + } + // match: (SETBCR [2] (CMPconst [0] (Select0 (ANDCCconst [1] z)))) + // result: (Select0 (ANDCCconst [1] z )) + for { + if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { + break + } + z := v_0_0_0.Args[0] + v.reset(OpSelect0) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v0.AuxInt = int64ToAuxInt(1) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (SETBCR [2] (CMPWconst [0] (Select0 (ANDCCconst [1] z)))) + // result: (Select0 (ANDCCconst [1] z )) + for { + if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { + break + } + z := v_0_0_0.Args[0] + v.reset(OpSelect0) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v0.AuxInt = int64ToAuxInt(1) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (SETBCR [2] (CMPWconst [0] (Select0 (ANDCCconst [n] z)))) + // result: (SETBCR [2] (Select1 (ANDCCconst [n] z ))) + for { + if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst { + break + } + n := auxIntToInt64(v_0_0_0.AuxInt) + z := v_0_0_0.Args[0] + v.reset(OpPPC64SETBCR) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(n) + v1.AddArg(z) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (SETBCR [2] (CMPconst [0] a:(AND y z))) + // cond: a.Uses == 1 + // result: (SETBCR [2] (Select1 (ANDCC y z ))) + for { + if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + a := v_0.Args[0] + if a.Op != OpPPC64AND { + break + } + z := a.Args[1] + y := a.Args[0] + if !(a.Uses == 1) { + break + } + v.reset(OpPPC64SETBCR) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCC, types.NewTuple(typ.Int64, types.TypeFlags)) + v1.AddArg2(y, z) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (SETBCR [2] (CMPconst [0] o:(OR y z))) + // cond: o.Uses == 1 + // result: (SETBCR [2] (Select1 (ORCC y z ))) + for { + if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + o := v_0.Args[0] + if o.Op != OpPPC64OR { + break + } + z := o.Args[1] + y := o.Args[0] + if !(o.Uses == 1) { + break + } + v.reset(OpPPC64SETBCR) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpPPC64ORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(y, z) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (SETBCR [2] (CMPconst [0] a:(XOR y z))) + // cond: a.Uses == 1 + // result: (SETBCR [2] (Select1 (XORCC y z ))) + for { + if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + a := v_0.Args[0] + if a.Op != OpPPC64XOR { + break + } + z := a.Args[1] + y := a.Args[0] + if !(a.Uses == 1) { + break + } + v.reset(OpPPC64SETBCR) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpPPC64XORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(y, z) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64SLD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SLD x (MOVDconst [c])) + // result: (SLDconst [c&63 | (c>>6&1*63)] x) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpPPC64SLDconst) + v.AuxInt = int64ToAuxInt(c&63 | (c >> 6 & 1 * 63)) + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64SLDconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SLDconst [l] (SRWconst [r] x)) + // cond: mergePPC64SldiSrw(l,r) != 0 + // result: (RLWINM [mergePPC64SldiSrw(l,r)] x) + for { + l := auxIntToInt64(v.AuxInt) + if v_0.Op != OpPPC64SRWconst { + break + } + r := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(mergePPC64SldiSrw(l, r) != 0) { + break + } + v.reset(OpPPC64RLWINM) + v.AuxInt = int64ToAuxInt(mergePPC64SldiSrw(l, r)) + v.AddArg(x) + return true + } + // match: (SLDconst [c] z:(MOVBZreg x)) + // cond: c < 8 && z.Uses == 1 + // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,56,63,64)] x) + for { + c := auxIntToInt64(v.AuxInt) + z := v_0 + if z.Op != OpPPC64MOVBZreg { + break + } + x := z.Args[0] + if !(c < 8 && z.Uses == 1) { + break + } + v.reset(OpPPC64CLRLSLDI) + v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 56, 63, 64)) + v.AddArg(x) + return true + } + // match: (SLDconst [c] z:(MOVHZreg x)) + // cond: c < 16 && z.Uses == 1 + // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,48,63,64)] x) + for { + c := auxIntToInt64(v.AuxInt) + z := v_0 + if z.Op != OpPPC64MOVHZreg { + break + } + x := z.Args[0] + if !(c < 16 && z.Uses == 1) { + break + } + v.reset(OpPPC64CLRLSLDI) + v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 48, 63, 64)) + v.AddArg(x) + return true + } + // match: (SLDconst [c] z:(MOVWZreg x)) + // cond: c < 32 && z.Uses == 1 + // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,32,63,64)] x) + for { + c := auxIntToInt64(v.AuxInt) + z := v_0 + if z.Op != OpPPC64MOVWZreg { + break + } + x := z.Args[0] + if !(c < 32 && z.Uses == 1) { + break + } + v.reset(OpPPC64CLRLSLDI) + v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 32, 63, 64)) + v.AddArg(x) + return true + } + // match: (SLDconst [c] z:(Select0 (ANDCCconst [d] x))) + // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d)) + // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x) + for { + c := auxIntToInt64(v.AuxInt) + z := v_0 + if z.Op != OpSelect0 { + break + } + z_0 := z.Args[0] + if z_0.Op != OpPPC64ANDCCconst { + break + } + d := auxIntToInt64(z_0.AuxInt) + x := z_0.Args[0] + if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d))) { + break + } + v.reset(OpPPC64CLRLSLDI) + v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 64-getPPC64ShiftMaskLength(d), 63, 64)) + v.AddArg(x) + return true + } + // match: (SLDconst [c] z:(AND (MOVDconst [d]) x)) + // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(64-getPPC64ShiftMaskLength(d)) + // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x) + for { + c := auxIntToInt64(v.AuxInt) + z := v_0 + if z.Op != OpPPC64AND { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + if z_0.Op != OpPPC64MOVDconst { + continue + } + d := auxIntToInt64(z_0.AuxInt) + x := z_1 + if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d))) { + continue + } + v.reset(OpPPC64CLRLSLDI) + v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 64-getPPC64ShiftMaskLength(d), 63, 64)) + v.AddArg(x) + return true + } + break + } + // match: (SLDconst [c] z:(MOVWreg x)) + // cond: c < 32 && buildcfg.GOPPC64 >= 9 + // result: (EXTSWSLconst [c] x) + for { + c := auxIntToInt64(v.AuxInt) + z := v_0 + if z.Op != OpPPC64MOVWreg { + break + } + x := z.Args[0] + if !(c < 32 && buildcfg.GOPPC64 >= 9) { + break + } + v.reset(OpPPC64EXTSWSLconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64SLW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SLW x (MOVDconst [c])) + // result: (SLWconst [c&31 | (c>>5&1*31)] x) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpPPC64SLWconst) + v.AuxInt = int64ToAuxInt(c&31 | (c >> 5 & 1 * 31)) + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64SLWconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SLWconst [c] z:(MOVBZreg x)) + // cond: z.Uses == 1 && c < 8 + // result: (CLRLSLWI [newPPC64ShiftAuxInt(c,24,31,32)] x) + for { + c := auxIntToInt64(v.AuxInt) + z := v_0 + if z.Op != OpPPC64MOVBZreg { + break + } + x := z.Args[0] + if !(z.Uses == 1 && c < 8) { + break + } + v.reset(OpPPC64CLRLSLWI) + v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 24, 31, 32)) + v.AddArg(x) + return true + } + // match: (SLWconst [c] z:(MOVHZreg x)) + // cond: z.Uses == 1 && c < 16 + // result: (CLRLSLWI [newPPC64ShiftAuxInt(c,16,31,32)] x) + for { + c := auxIntToInt64(v.AuxInt) + z := v_0 + if z.Op != OpPPC64MOVHZreg { + break + } + x := z.Args[0] + if !(z.Uses == 1 && c < 16) { + break + } + v.reset(OpPPC64CLRLSLWI) + v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 16, 31, 32)) + v.AddArg(x) + return true + } + // match: (SLWconst [c] z:(Select0 (ANDCCconst [d] x))) + // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) + // result: (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x) + for { + c := auxIntToInt64(v.AuxInt) + z := v_0 + if z.Op != OpSelect0 { + break + } + z_0 := z.Args[0] + if z_0.Op != OpPPC64ANDCCconst { + break + } + d := auxIntToInt64(z_0.AuxInt) + x := z_0.Args[0] + if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (32-getPPC64ShiftMaskLength(d))) { + break + } + v.reset(OpPPC64CLRLSLWI) + v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 32-getPPC64ShiftMaskLength(d), 31, 32)) + v.AddArg(x) + return true + } + // match: (SLWconst [c] z:(AND (MOVDconst [d]) x)) + // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) + // result: (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x) + for { + c := auxIntToInt64(v.AuxInt) + z := v_0 + if z.Op != OpPPC64AND { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + if z_0.Op != OpPPC64MOVDconst { + continue + } + d := auxIntToInt64(z_0.AuxInt) + x := z_1 + if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (32-getPPC64ShiftMaskLength(d))) { + continue + } + v.reset(OpPPC64CLRLSLWI) + v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 32-getPPC64ShiftMaskLength(d), 31, 32)) + v.AddArg(x) + return true + } + break + } + // match: (SLWconst [c] z:(MOVWreg x)) + // cond: c < 32 && buildcfg.GOPPC64 >= 9 + // result: (EXTSWSLconst [c] x) + for { + c := auxIntToInt64(v.AuxInt) + z := v_0 + if z.Op != OpPPC64MOVWreg { + break + } + x := z.Args[0] + if !(c < 32 && buildcfg.GOPPC64 >= 9) { + break + } + v.reset(OpPPC64EXTSWSLconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64SRAD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SRAD x (MOVDconst [c])) + // result: (SRADconst [c&63 | (c>>6&1*63)] x) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpPPC64SRADconst) + v.AuxInt = int64ToAuxInt(c&63 | (c >> 6 & 1 * 63)) + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64SRAW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SRAW x (MOVDconst [c])) + // result: (SRAWconst [c&31 | (c>>5&1*31)] x) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpPPC64SRAWconst) + v.AuxInt = int64ToAuxInt(c&31 | (c >> 5 & 1 * 31)) + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64SRD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SRD x (MOVDconst [c])) + // result: (SRDconst [c&63 | (c>>6&1*63)] x) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpPPC64SRDconst) + v.AuxInt = int64ToAuxInt(c&63 | (c >> 6 & 1 * 63)) + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64SRW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SRW x (MOVDconst [c])) + // result: (SRWconst [c&31 | (c>>5&1*31)] x) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpPPC64SRWconst) + v.AuxInt = int64ToAuxInt(c&31 | (c >> 5 & 1 * 31)) + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64SRWconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SRWconst (Select0 (ANDCCconst [m] x)) [s]) + // cond: mergePPC64RShiftMask(m>>uint(s),s,32) == 0 + // result: (MOVDconst [0]) + for { + s := auxIntToInt64(v.AuxInt) + if v_0.Op != OpSelect0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64ANDCCconst { + break + } + m := auxIntToInt64(v_0_0.AuxInt) + if !(mergePPC64RShiftMask(m>>uint(s), s, 32) == 0) { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SRWconst (Select0 (ANDCCconst [m] x)) [s]) + // cond: mergePPC64AndSrwi(m>>uint(s),s) != 0 + // result: (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x) + for { + s := auxIntToInt64(v.AuxInt) + if v_0.Op != OpSelect0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64ANDCCconst { + break + } + m := auxIntToInt64(v_0_0.AuxInt) + x := v_0_0.Args[0] + if !(mergePPC64AndSrwi(m>>uint(s), s) != 0) { + break + } + v.reset(OpPPC64RLWINM) + v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m>>uint(s), s)) + v.AddArg(x) + return true + } + // match: (SRWconst (AND (MOVDconst [m]) x) [s]) + // cond: mergePPC64RShiftMask(m>>uint(s),s,32) == 0 + // result: (MOVDconst [0]) + for { + s := auxIntToInt64(v.AuxInt) + if v_0.Op != OpPPC64AND { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpPPC64MOVDconst { + continue + } + m := auxIntToInt64(v_0_0.AuxInt) + if !(mergePPC64RShiftMask(m>>uint(s), s, 32) == 0) { + continue + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + break + } + // match: (SRWconst (AND (MOVDconst [m]) x) [s]) + // cond: mergePPC64AndSrwi(m>>uint(s),s) != 0 + // result: (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x) + for { + s := auxIntToInt64(v.AuxInt) + if v_0.Op != OpPPC64AND { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpPPC64MOVDconst { + continue + } + m := auxIntToInt64(v_0_0.AuxInt) + x := v_0_1 + if !(mergePPC64AndSrwi(m>>uint(s), s) != 0) { + continue + } + v.reset(OpPPC64RLWINM) + v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m>>uint(s), s)) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValuePPC64_OpPPC64SUB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SUB x (MOVDconst [c])) + // cond: is32Bit(-c) + // result: (ADDconst [-c] x) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(-c)) { + break + } + v.reset(OpPPC64ADDconst) + v.AuxInt = int64ToAuxInt(-c) + v.AddArg(x) + return true + } + // match: (SUB (MOVDconst [c]) x) + // cond: is32Bit(c) + // result: (SUBFCconst [c] x) + for { + if v_0.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + if !(is32Bit(c)) { + break + } + v.reset(OpPPC64SUBFCconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64SUBE(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SUBE x y (Select1 (SUBCconst (MOVDconst [0]) [0]))) + // result: (SUBC x y) + for { + x := v_0 + y := v_1 + if v_2.Op != OpSelect1 || v_2.Type != typ.UInt64 { + break + } + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpPPC64SUBCconst || auxIntToInt64(v_2_0.AuxInt) != 0 { + break + } + v_2_0_0 := v_2_0.Args[0] + if v_2_0_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_2_0_0.AuxInt) != 0 { + break + } + v.reset(OpPPC64SUBC) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64SUBFCconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SUBFCconst [c] (NEG x)) + // result: (ADDconst [c] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpPPC64NEG { + break + } + x := v_0.Args[0] + v.reset(OpPPC64ADDconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (SUBFCconst [c] (SUBFCconst [d] x)) + // cond: is32Bit(c-d) + // result: (ADDconst [c-d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpPPC64SUBFCconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(c - d)) { + break + } + v.reset(OpPPC64ADDconst) + v.AuxInt = int64ToAuxInt(c - d) + v.AddArg(x) + return true + } + // match: (SUBFCconst [0] x) + // result: (NEG x) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.reset(OpPPC64NEG) + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64XOR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XOR (MOVDconst [c]) (MOVDconst [d])) + // result: (MOVDconst [c^d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpPPC64MOVDconst { + continue + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpPPC64MOVDconst { + continue + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(c ^ d) + return true + } + break + } + // match: (XOR x (MOVDconst [c])) + // cond: isU32Bit(c) + // result: (XORconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(isU32Bit(c)) { + continue + } + v.reset(OpPPC64XORconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValuePPC64_OpPPC64XORconst(v *Value) bool { + v_0 := v.Args[0] + // match: (XORconst [c] (XORconst [d] x)) + // result: (XORconst [c^d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpPPC64XORconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpPPC64XORconst) + v.AuxInt = int64ToAuxInt(c ^ d) + v.AddArg(x) + return true + } + // match: (XORconst [0] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (XORconst [1] (SETBCR [n] cmp)) + // result: (SETBC [n] cmp) + for { + if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpPPC64SETBCR { + break + } + n := auxIntToInt32(v_0.AuxInt) + cmp := v_0.Args[0] + v.reset(OpPPC64SETBC) + v.AuxInt = int32ToAuxInt(n) + v.AddArg(cmp) + return true + } + // match: (XORconst [1] (SETBC [n] cmp)) + // result: (SETBCR [n] cmp) + for { + if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpPPC64SETBC { + break + } + n := auxIntToInt32(v_0.AuxInt) + cmp := v_0.Args[0] + v.reset(OpPPC64SETBCR) + v.AuxInt = int32ToAuxInt(n) + v.AddArg(cmp) + return true + } + return false +} +func rewriteValuePPC64_OpPanicBounds(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 0 + // result: (LoweredPanicBoundsA [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 0) { + break + } + v.reset(OpPPC64LoweredPanicBoundsA) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 1 + // result: (LoweredPanicBoundsB [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 1) { + break + } + v.reset(OpPPC64LoweredPanicBoundsB) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 2 + // result: (LoweredPanicBoundsC [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 2) { + break + } + v.reset(OpPPC64LoweredPanicBoundsC) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + return false +} +func rewriteValuePPC64_OpPopCount16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (PopCount16 x) + // result: (POPCNTW (MOVHZreg x)) + for { + x := v_0 + v.reset(OpPPC64POPCNTW) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpPopCount32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (PopCount32 x) + // result: (POPCNTW (MOVWZreg x)) + for { + x := v_0 + v.reset(OpPPC64POPCNTW) + v0 := b.NewValue0(v.Pos, OpPPC64MOVWZreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpPopCount8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (PopCount8 x) + // result: (POPCNTB (MOVBZreg x)) + for { + x := v_0 + v.reset(OpPPC64POPCNTB) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpPrefetchCache(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PrefetchCache ptr mem) + // result: (DCBT ptr mem [0]) + for { + ptr := v_0 + mem := v_1 + v.reset(OpPPC64DCBT) + v.AuxInt = int64ToAuxInt(0) + v.AddArg2(ptr, mem) + return true + } +} +func rewriteValuePPC64_OpPrefetchCacheStreamed(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PrefetchCacheStreamed ptr mem) + // result: (DCBT ptr mem [16]) + for { + ptr := v_0 + mem := v_1 + v.reset(OpPPC64DCBT) + v.AuxInt = int64ToAuxInt(16) + v.AddArg2(ptr, mem) + return true + } +} +func rewriteValuePPC64_OpRotateLeft16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft16 x (MOVDconst [c])) + // result: (Or16 (Lsh16x64 x (MOVDconst [c&15])) (Rsh16Ux64 x (MOVDconst [-c&15]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpOr16) + v0 := b.NewValue0(v.Pos, OpLsh16x64, t) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = int64ToAuxInt(c & 15) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t) + v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v3.AuxInt = int64ToAuxInt(-c & 15) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) + return true + } + return false +} +func rewriteValuePPC64_OpRotateLeft8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft8 x (MOVDconst [c])) + // result: (Or8 (Lsh8x64 x (MOVDconst [c&7])) (Rsh8Ux64 x (MOVDconst [-c&7]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpOr8) + v0 := b.NewValue0(v.Pos, OpLsh8x64, t) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = int64ToAuxInt(c & 7) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t) + v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v3.AuxInt = int64ToAuxInt(-c & 7) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) + return true + } + return false +} +func rewriteValuePPC64_OpRsh16Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SRD (MOVHZreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRD) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh16Ux16 x y) + // result: (ISEL [2] (SRD (MOVHZreg x) y) (MOVDconst [0]) (Select1 (ANDCCconst [0xFFF0] y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpPPC64SRD, t) + v1 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v4.AuxInt = int64ToAuxInt(0xFFF0) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValuePPC64_OpRsh16Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SRD (MOVHZreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRD) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh16Ux32 x y) + // result: (ISEL [0] (SRD (MOVHZreg x) y) (MOVDconst [0]) (CMPWUconst y [16])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpPPC64SRD, t) + v1 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(16) + v3.AddArg(y) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValuePPC64_OpRsh16Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux64 x (MOVDconst [c])) + // cond: uint64(c) < 16 + // result: (SRWconst (ZeroExt16to32 x) [c]) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 16) { + break + } + v.reset(OpPPC64SRWconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Rsh16Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SRD (MOVHZreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRD) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh16Ux64 x y) + // result: (ISEL [0] (SRD (MOVHZreg x) y) (MOVDconst [0]) (CMPUconst y [16])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpPPC64SRD, t) + v1 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(16) + v3.AddArg(y) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValuePPC64_OpRsh16Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SRD (MOVHZreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRD) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh16Ux8 x y) + // result: (ISEL [2] (SRD (MOVHZreg x) y) (MOVDconst [0]) (Select1 (ANDCCconst [0x00F0] y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpPPC64SRD, t) + v1 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v4.AuxInt = int64ToAuxInt(0x00F0) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValuePPC64_OpRsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SRAD (MOVHreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAD) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh16x16 x y) + // result: (ISEL [2] (SRAD (MOVHreg x) y) (SRADconst (MOVHreg x) [15]) (Select1 (ANDCCconst [0xFFF0] y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpPPC64SRAD, t) + v1 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpPPC64SRADconst, t) + v2.AuxInt = int64ToAuxInt(15) + v2.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v4.AuxInt = int64ToAuxInt(0xFFF0) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValuePPC64_OpRsh16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SRAD (MOVHreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAD) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh16x32 x y) + // result: (ISEL [0] (SRAD (MOVHreg x) y) (SRADconst (MOVHreg x) [15]) (CMPWUconst y [16])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpPPC64SRAD, t) + v1 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpPPC64SRADconst, t) + v2.AuxInt = int64ToAuxInt(15) + v2.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(16) + v3.AddArg(y) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValuePPC64_OpRsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x64 x (MOVDconst [c])) + // cond: uint64(c) >= 16 + // result: (SRAWconst (SignExt16to32 x) [63]) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 16) { + break + } + v.reset(OpPPC64SRAWconst) + v.AuxInt = int64ToAuxInt(63) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Rsh16x64 x (MOVDconst [c])) + // cond: uint64(c) < 16 + // result: (SRAWconst (SignExt16to32 x) [c]) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 16) { + break + } + v.reset(OpPPC64SRAWconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Rsh16x64 x y) + // cond: shiftIsBounded(v) + // result: (SRAD (MOVHreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAD) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh16x64 x y) + // result: (ISEL [0] (SRAD (MOVHreg x) y) (SRADconst (MOVHreg x) [15]) (CMPUconst y [16])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpPPC64SRAD, t) + v1 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpPPC64SRADconst, t) + v2.AuxInt = int64ToAuxInt(15) + v2.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(16) + v3.AddArg(y) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValuePPC64_OpRsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SRAD (MOVHreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAD) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh16x8 x y) + // result: (ISEL [2] (SRAD (MOVHreg x) y) (SRADconst (MOVHreg x) [15]) (Select1 (ANDCCconst [0x00F0] y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpPPC64SRAD, t) + v1 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpPPC64SRADconst, t) + v2.AuxInt = int64ToAuxInt(15) + v2.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v4.AuxInt = int64ToAuxInt(0x00F0) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValuePPC64_OpRsh32Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SRW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRW) + v.AddArg2(x, y) + return true + } + // match: (Rsh32Ux16 x y) + // result: (ISEL [2] (SRW x y) (MOVDconst [0]) (Select1 (ANDCCconst [0xFFE0] y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpPPC64SRW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3.AuxInt = int64ToAuxInt(0xFFE0) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValuePPC64_OpRsh32Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SRW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRW) + v.AddArg2(x, y) + return true + } + // match: (Rsh32Ux32 x y) + // result: (ISEL [0] (SRW x y) (MOVDconst [0]) (CMPWUconst y [32])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpPPC64SRW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux64 x (MOVDconst [c])) + // cond: uint64(c) < 32 + // result: (SRWconst x [c]) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 32) { + break + } + v.reset(OpPPC64SRWconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (Rsh32Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SRW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRW) + v.AddArg2(x, y) + return true + } + // match: (Rsh32Ux64 x y) + // result: (ISEL [0] (SRW x y) (MOVDconst [0]) (CMPUconst y [32])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpPPC64SRW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(32) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValuePPC64_OpRsh32Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SRW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRW) + v.AddArg2(x, y) + return true + } + // match: (Rsh32Ux8 x y) + // result: (ISEL [2] (SRW x y) (MOVDconst [0]) (Select1 (ANDCCconst [0x00E0] y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpPPC64SRW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3.AuxInt = int64ToAuxInt(0x00E0) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValuePPC64_OpRsh32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SRAW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAW) + v.AddArg2(x, y) + return true + } + // match: (Rsh32x16 x y) + // result: (ISEL [2] (SRAW x y) (SRAWconst x [31]) (Select1 (ANDCCconst [0xFFE0] y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpPPC64SRAW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpPPC64SRAWconst, t) + v1.AuxInt = int64ToAuxInt(31) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3.AuxInt = int64ToAuxInt(0xFFE0) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValuePPC64_OpRsh32x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SRAW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAW) + v.AddArg2(x, y) + return true + } + // match: (Rsh32x32 x y) + // result: (ISEL [0] (SRAW x y) (SRAWconst x [31]) (CMPWUconst y [32])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpPPC64SRAW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpPPC64SRAWconst, t) + v1.AuxInt = int64ToAuxInt(31) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValuePPC64_OpRsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh32x64 x (MOVDconst [c])) + // cond: uint64(c) >= 32 + // result: (SRAWconst x [63]) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 32) { + break + } + v.reset(OpPPC64SRAWconst) + v.AuxInt = int64ToAuxInt(63) + v.AddArg(x) + return true + } + // match: (Rsh32x64 x (MOVDconst [c])) + // cond: uint64(c) < 32 + // result: (SRAWconst x [c]) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 32) { + break + } + v.reset(OpPPC64SRAWconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (Rsh32x64 x y) + // cond: shiftIsBounded(v) + // result: (SRAW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAW) + v.AddArg2(x, y) + return true + } + // match: (Rsh32x64 x y) + // result: (ISEL [0] (SRAW x y) (SRAWconst x [31]) (CMPUconst y [32])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpPPC64SRAW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpPPC64SRAWconst, t) + v1.AuxInt = int64ToAuxInt(31) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(32) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValuePPC64_OpRsh32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SRAW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAW) + v.AddArg2(x, y) + return true + } + // match: (Rsh32x8 x y) + // result: (ISEL [2] (SRAW x y) (SRAWconst x [31]) (Select1 (ANDCCconst [0x00E0] y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpPPC64SRAW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpPPC64SRAWconst, t) + v1.AuxInt = int64ToAuxInt(31) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3.AuxInt = int64ToAuxInt(0x00E0) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValuePPC64_OpRsh64Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SRD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRD) + v.AddArg2(x, y) + return true + } + // match: (Rsh64Ux16 x y) + // result: (ISEL [2] (SRD x y) (MOVDconst [0]) (Select1 (ANDCCconst [0xFFC0] y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpPPC64SRD, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3.AuxInt = int64ToAuxInt(0xFFC0) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValuePPC64_OpRsh64Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SRD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRD) + v.AddArg2(x, y) + return true + } + // match: (Rsh64Ux32 x y) + // result: (ISEL [0] (SRD x y) (MOVDconst [0]) (CMPWUconst y [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpPPC64SRD, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux64 x (MOVDconst [c])) + // cond: uint64(c) < 64 + // result: (SRDconst x [c]) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 64) { + break + } + v.reset(OpPPC64SRDconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (Rsh64Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SRD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRD) + v.AddArg2(x, y) + return true + } + // match: (Rsh64Ux64 x y) + // result: (ISEL [0] (SRD x y) (MOVDconst [0]) (CMPUconst y [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpPPC64SRD, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(64) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValuePPC64_OpRsh64Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SRD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRD) + v.AddArg2(x, y) + return true + } + // match: (Rsh64Ux8 x y) + // result: (ISEL [2] (SRD x y) (MOVDconst [0]) (Select1 (ANDCCconst [0x00C0] y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpPPC64SRD, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3.AuxInt = int64ToAuxInt(0x00C0) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValuePPC64_OpRsh64x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x16 x y) + // cond: shiftIsBounded(v) + // result: (SRAD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAD) + v.AddArg2(x, y) + return true + } + // match: (Rsh64x16 x y) + // result: (ISEL [2] (SRAD x y) (SRADconst x [63]) (Select1 (ANDCCconst [0xFFC0] y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpPPC64SRAD, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpPPC64SRADconst, t) + v1.AuxInt = int64ToAuxInt(63) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3.AuxInt = int64ToAuxInt(0xFFC0) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValuePPC64_OpRsh64x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh64x32 x y) + // cond: shiftIsBounded(v) + // result: (SRAD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAD) + v.AddArg2(x, y) + return true + } + // match: (Rsh64x32 x y) + // result: (ISEL [0] (SRAD x y) (SRADconst x [63]) (CMPWUconst y [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpPPC64SRAD, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpPPC64SRADconst, t) + v1.AuxInt = int64ToAuxInt(63) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValuePPC64_OpRsh64x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh64x64 x (MOVDconst [c])) + // cond: uint64(c) >= 64 + // result: (SRADconst x [63]) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 64) { + break + } + v.reset(OpPPC64SRADconst) + v.AuxInt = int64ToAuxInt(63) + v.AddArg(x) + return true + } + // match: (Rsh64x64 x (MOVDconst [c])) + // cond: uint64(c) < 64 + // result: (SRADconst x [c]) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 64) { + break + } + v.reset(OpPPC64SRADconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (Rsh64x64 x y) + // cond: shiftIsBounded(v) + // result: (SRAD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAD) + v.AddArg2(x, y) + return true + } + // match: (Rsh64x64 x y) + // result: (ISEL [0] (SRAD x y) (SRADconst x [63]) (CMPUconst y [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpPPC64SRAD, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpPPC64SRADconst, t) + v1.AuxInt = int64ToAuxInt(63) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags) + v2.AuxInt = int64ToAuxInt(64) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValuePPC64_OpRsh64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x8 x y) + // cond: shiftIsBounded(v) + // result: (SRAD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAD) + v.AddArg2(x, y) + return true + } + // match: (Rsh64x8 x y) + // result: (ISEL [2] (SRAD x y) (SRADconst x [63]) (Select1 (ANDCCconst [0x00C0] y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpPPC64SRAD, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpPPC64SRADconst, t) + v1.AuxInt = int64ToAuxInt(63) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3.AuxInt = int64ToAuxInt(0x00C0) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValuePPC64_OpRsh8Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SRD (MOVBZreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRD) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh8Ux16 x y) + // result: (ISEL [2] (SRD (MOVBZreg x) y) (MOVDconst [0]) (Select1 (ANDCCconst [0xFFF8] y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpPPC64SRD, t) + v1 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v4.AuxInt = int64ToAuxInt(0xFFF8) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValuePPC64_OpRsh8Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SRD (MOVBZreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRD) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh8Ux32 x y) + // result: (ISEL [0] (SRD (MOVBZreg x) y) (MOVDconst [0]) (CMPWUconst y [8])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpPPC64SRD, t) + v1 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(8) + v3.AddArg(y) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValuePPC64_OpRsh8Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux64 x (MOVDconst [c])) + // cond: uint64(c) < 8 + // result: (SRWconst (ZeroExt8to32 x) [c]) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 8) { + break + } + v.reset(OpPPC64SRWconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Rsh8Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SRD (MOVBZreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRD) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh8Ux64 x y) + // result: (ISEL [0] (SRD (MOVBZreg x) y) (MOVDconst [0]) (CMPUconst y [8])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpPPC64SRD, t) + v1 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(8) + v3.AddArg(y) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValuePPC64_OpRsh8Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SRD (MOVBZreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRD) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh8Ux8 x y) + // result: (ISEL [2] (SRD (MOVBZreg x) y) (MOVDconst [0]) (Select1 (ANDCCconst [0x00F8] y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpPPC64SRD, t) + v1 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v4.AuxInt = int64ToAuxInt(0x00F8) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValuePPC64_OpRsh8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SRAD (MOVBreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAD) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh8x16 x y) + // result: (ISEL [2] (SRAD (MOVBreg x) y) (SRADconst (MOVBreg x) [7]) (Select1 (ANDCCconst [0xFFF8] y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpPPC64SRAD, t) + v1 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpPPC64SRADconst, t) + v2.AuxInt = int64ToAuxInt(7) + v2.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v4.AuxInt = int64ToAuxInt(0xFFF8) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValuePPC64_OpRsh8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SRAD (MOVBreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAD) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh8x32 x y) + // result: (ISEL [0] (SRAD (MOVBreg x) y) (SRADconst (MOVBreg x) [7]) (CMPWUconst y [8])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpPPC64SRAD, t) + v1 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpPPC64SRADconst, t) + v2.AuxInt = int64ToAuxInt(7) + v2.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(8) + v3.AddArg(y) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValuePPC64_OpRsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x64 x (MOVDconst [c])) + // cond: uint64(c) >= 8 + // result: (SRAWconst (SignExt8to32 x) [63]) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 8) { + break + } + v.reset(OpPPC64SRAWconst) + v.AuxInt = int64ToAuxInt(63) + v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Rsh8x64 x (MOVDconst [c])) + // cond: uint64(c) < 8 + // result: (SRAWconst (SignExt8to32 x) [c]) + for { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 8) { + break + } + v.reset(OpPPC64SRAWconst) + v.AuxInt = int64ToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Rsh8x64 x y) + // cond: shiftIsBounded(v) + // result: (SRAD (MOVBreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAD) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh8x64 x y) + // result: (ISEL [0] (SRAD (MOVBreg x) y) (SRADconst (MOVBreg x) [7]) (CMPUconst y [8])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpPPC64SRAD, t) + v1 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpPPC64SRADconst, t) + v2.AuxInt = int64ToAuxInt(7) + v2.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags) + v3.AuxInt = int64ToAuxInt(8) + v3.AddArg(y) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValuePPC64_OpRsh8x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SRAD (MOVBreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAD) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh8x8 x y) + // result: (ISEL [2] (SRAD (MOVBreg x) y) (SRADconst (MOVBreg x) [7]) (Select1 (ANDCCconst [0x00F8] y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpPPC64SRAD, t) + v1 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpPPC64SRADconst, t) + v2.AuxInt = int64ToAuxInt(7) + v2.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v4.AuxInt = int64ToAuxInt(0x00F8) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValuePPC64_OpSelect0(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Select0 (Mul64uhilo x y)) + // result: (MULHDU x y) + for { + if v_0.Op != OpMul64uhilo { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpPPC64MULHDU) + v.AddArg2(x, y) + return true + } + // match: (Select0 (Add64carry x y c)) + // result: (Select0 (ADDE x y (Select1 (ADDCconst c [-1])))) + for { + if v_0.Op != OpAdd64carry { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpSelect0) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpPPC64ADDE, types.NewTuple(typ.UInt64, typ.UInt64)) + v1 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpPPC64ADDCconst, types.NewTuple(typ.UInt64, typ.UInt64)) + v2.AuxInt = int64ToAuxInt(-1) + v2.AddArg(c) + v1.AddArg(v2) + v0.AddArg3(x, y, v1) + v.AddArg(v0) + return true + } + // match: (Select0 (Sub64borrow x y c)) + // result: (Select0 (SUBE x y (Select1 (SUBCconst c [0])))) + for { + if v_0.Op != OpSub64borrow { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpSelect0) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpPPC64SUBE, types.NewTuple(typ.UInt64, typ.UInt64)) + v1 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpPPC64SUBCconst, types.NewTuple(typ.UInt64, typ.UInt64)) + v2.AuxInt = int64ToAuxInt(0) + v2.AddArg(c) + v1.AddArg(v2) + v0.AddArg3(x, y, v1) + v.AddArg(v0) + return true + } + // match: (Select0 (ANDCCconst [m] (ROTLWconst [r] x))) + // cond: isPPC64WordRotateMask(m) + // result: (RLWINM [encodePPC64RotateMask(r,m,32)] x) + for { + if v_0.Op != OpPPC64ANDCCconst { + break + } + m := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64ROTLWconst { + break + } + r := auxIntToInt64(v_0_0.AuxInt) + x := v_0_0.Args[0] + if !(isPPC64WordRotateMask(m)) { + break + } + v.reset(OpPPC64RLWINM) + v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, m, 32)) + v.AddArg(x) + return true + } + // match: (Select0 (ANDCCconst [m] (ROTLW x r))) + // cond: isPPC64WordRotateMask(m) + // result: (RLWNM [encodePPC64RotateMask(0,m,32)] x r) + for { + if v_0.Op != OpPPC64ANDCCconst { + break + } + m := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64ROTLW { + break + } + r := v_0_0.Args[1] + x := v_0_0.Args[0] + if !(isPPC64WordRotateMask(m)) { + break + } + v.reset(OpPPC64RLWNM) + v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 32)) + v.AddArg2(x, r) + return true + } + // match: (Select0 (ANDCCconst [m] (SRWconst x [s]))) + // cond: mergePPC64RShiftMask(m,s,32) == 0 + // result: (MOVDconst [0]) + for { + if v_0.Op != OpPPC64ANDCCconst { + break + } + m := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64SRWconst { + break + } + s := auxIntToInt64(v_0_0.AuxInt) + if !(mergePPC64RShiftMask(m, s, 32) == 0) { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (Select0 (ANDCCconst [m] (SRWconst x [s]))) + // cond: mergePPC64AndSrwi(m,s) != 0 + // result: (RLWINM [mergePPC64AndSrwi(m,s)] x) + for { + if v_0.Op != OpPPC64ANDCCconst { + break + } + m := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64SRWconst { + break + } + s := auxIntToInt64(v_0_0.AuxInt) + x := v_0_0.Args[0] + if !(mergePPC64AndSrwi(m, s) != 0) { + break + } + v.reset(OpPPC64RLWINM) + v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m, s)) + v.AddArg(x) + return true + } + // match: (Select0 (ANDCCconst [-1] x)) + // result: x + for { + if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != -1 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (Select0 (ANDCCconst [0] _)) + // result: (MOVDconst [0]) + for { + if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (Select0 (ANDCCconst [c] y:(MOVBZreg _))) + // cond: c&0xFF == 0xFF + // result: y + for { + if v_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + y := v_0.Args[0] + if y.Op != OpPPC64MOVBZreg || !(c&0xFF == 0xFF) { + break + } + v.copyOf(y) + return true + } + // match: (Select0 (ANDCCconst [0xFF] (MOVBreg x))) + // result: (MOVBZreg x) + for { + if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 0xFF { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVBreg { + break + } + x := v_0_0.Args[0] + v.reset(OpPPC64MOVBZreg) + v.AddArg(x) + return true + } + // match: (Select0 (ANDCCconst [c] y:(MOVHZreg _))) + // cond: c&0xFFFF == 0xFFFF + // result: y + for { + if v_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + y := v_0.Args[0] + if y.Op != OpPPC64MOVHZreg || !(c&0xFFFF == 0xFFFF) { + break + } + v.copyOf(y) + return true + } + // match: (Select0 (ANDCCconst [0xFFFF] (MOVHreg x))) + // result: (MOVHZreg x) + for { + if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 0xFFFF { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVHreg { + break + } + x := v_0_0.Args[0] + v.reset(OpPPC64MOVHZreg) + v.AddArg(x) + return true + } + // match: (Select0 (ANDCCconst [c] (MOVBZreg x))) + // result: (Select0 (ANDCCconst [c&0xFF] x)) + for { + if v_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVBZreg { + break + } + x := v_0_0.Args[0] + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v0.AuxInt = int64ToAuxInt(c & 0xFF) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Select0 (ANDCCconst [c] (MOVHZreg x))) + // result: (Select0 (ANDCCconst [c&0xFFFF] x)) + for { + if v_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVHZreg { + break + } + x := v_0_0.Args[0] + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v0.AuxInt = int64ToAuxInt(c & 0xFFFF) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Select0 (ANDCCconst [c] (MOVWZreg x))) + // result: (Select0 (ANDCCconst [c&0xFFFFFFFF] x)) + for { + if v_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVWZreg { + break + } + x := v_0_0.Args[0] + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v0.AuxInt = int64ToAuxInt(c & 0xFFFFFFFF) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Select0 (ANDCCconst [1] z:(SRADconst [63] x))) + // cond: z.Uses == 1 + // result: (SRDconst [63] x) + for { + if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 1 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64SRADconst || auxIntToInt64(z.AuxInt) != 63 { + break + } + x := z.Args[0] + if !(z.Uses == 1) { + break + } + v.reset(OpPPC64SRDconst) + v.AuxInt = int64ToAuxInt(63) + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpSelect1(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Select1 (Mul64uhilo x y)) + // result: (MULLD x y) + for { + if v_0.Op != OpMul64uhilo { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpPPC64MULLD) + v.AddArg2(x, y) + return true + } + // match: (Select1 (Add64carry x y c)) + // result: (ADDZEzero (Select1 (ADDE x y (Select1 (ADDCconst c [-1]))))) + for { + if v_0.Op != OpAdd64carry { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpPPC64ADDZEzero) + v0 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpPPC64ADDE, types.NewTuple(typ.UInt64, typ.UInt64)) + v2 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) + v3 := b.NewValue0(v.Pos, OpPPC64ADDCconst, types.NewTuple(typ.UInt64, typ.UInt64)) + v3.AuxInt = int64ToAuxInt(-1) + v3.AddArg(c) + v2.AddArg(v3) + v1.AddArg3(x, y, v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Select1 (ADDCconst n:(ADDZEzero x) [-1])) + // cond: n.Uses <= 2 + // result: x + for { + if v_0.Op != OpPPC64ADDCconst || auxIntToInt64(v_0.AuxInt) != -1 { + break + } + n := v_0.Args[0] + if n.Op != OpPPC64ADDZEzero { + break + } + x := n.Args[0] + if !(n.Uses <= 2) { + break + } + v.copyOf(x) + return true + } + // match: (Select1 (Sub64borrow x y c)) + // result: (NEG (SUBZEzero (Select1 (SUBE x y (Select1 (SUBCconst c [0])))))) + for { + if v_0.Op != OpSub64borrow { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpPPC64NEG) + v0 := b.NewValue0(v.Pos, OpPPC64SUBZEzero, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpPPC64SUBE, types.NewTuple(typ.UInt64, typ.UInt64)) + v3 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) + v4 := b.NewValue0(v.Pos, OpPPC64SUBCconst, types.NewTuple(typ.UInt64, typ.UInt64)) + v4.AuxInt = int64ToAuxInt(0) + v4.AddArg(c) + v3.AddArg(v4) + v2.AddArg3(x, y, v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Select1 (SUBCconst n:(NEG (SUBZEzero x)) [0])) + // cond: n.Uses <= 2 + // result: x + for { + if v_0.Op != OpPPC64SUBCconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + n := v_0.Args[0] + if n.Op != OpPPC64NEG { + break + } + n_0 := n.Args[0] + if n_0.Op != OpPPC64SUBZEzero { + break + } + x := n_0.Args[0] + if !(n.Uses <= 2) { + break + } + v.copyOf(x) + return true + } + // match: (Select1 (ANDCCconst [0] _)) + // result: (FlagEQ) + for { + if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v.reset(OpPPC64FlagEQ) + return true + } + return false +} +func rewriteValuePPC64_OpSelectN(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (SelectN [0] call:(CALLstatic {sym} s1:(MOVDstore _ (MOVDconst [sz]) s2:(MOVDstore _ src s3:(MOVDstore {t} _ dst mem))))) + // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(s1, s2, s3, call) + // result: (Move [sz] dst src mem) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + call := v_0 + if call.Op != OpPPC64CALLstatic || len(call.Args) != 1 { + break + } + sym := auxToCall(call.Aux) + s1 := call.Args[0] + if s1.Op != OpPPC64MOVDstore { + break + } + _ = s1.Args[2] + s1_1 := s1.Args[1] + if s1_1.Op != OpPPC64MOVDconst { + break + } + sz := auxIntToInt64(s1_1.AuxInt) + s2 := s1.Args[2] + if s2.Op != OpPPC64MOVDstore { + break + } + _ = s2.Args[2] + src := s2.Args[1] + s3 := s2.Args[2] + if s3.Op != OpPPC64MOVDstore { + break + } + mem := s3.Args[2] + dst := s3.Args[1] + if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(s1, s2, s3, call)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(sz) + v.AddArg3(dst, src, mem) + return true + } + // match: (SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem)) + // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call) + // result: (Move [sz] dst src mem) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + call := v_0 + if call.Op != OpPPC64CALLstatic || len(call.Args) != 4 { + break + } + sym := auxToCall(call.Aux) + mem := call.Args[3] + dst := call.Args[0] + src := call.Args[1] + call_2 := call.Args[2] + if call_2.Op != OpPPC64MOVDconst { + break + } + sz := auxIntToInt64(call_2.AuxInt) + if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(sz) + v.AddArg3(dst, src, mem) + return true + } + return false +} +func rewriteValuePPC64_OpSlicemask(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Slicemask x) + // result: (SRADconst (NEG x) [63]) + for { + t := v.Type + x := v_0 + v.reset(OpPPC64SRADconst) + v.AuxInt = int64ToAuxInt(63) + v0 := b.NewValue0(v.Pos, OpPPC64NEG, t) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValuePPC64_OpStore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Store {t} ptr val mem) + // cond: t.Size() == 8 && t.IsFloat() + // result: (FMOVDstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 8 && t.IsFloat()) { + break + } + v.reset(OpPPC64FMOVDstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 && t.IsFloat() + // result: (FMOVSstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4 && t.IsFloat()) { + break + } + v.reset(OpPPC64FMOVSstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 8 && !t.IsFloat() + // result: (MOVDstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 8 && !t.IsFloat()) { + break + } + v.reset(OpPPC64MOVDstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 && !t.IsFloat() + // result: (MOVWstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4 && !t.IsFloat()) { + break + } + v.reset(OpPPC64MOVWstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 2 + // result: (MOVHstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 2) { + break + } + v.reset(OpPPC64MOVHstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 1 + // result: (MOVBstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 1) { + break + } + v.reset(OpPPC64MOVBstore) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValuePPC64_OpTrunc16to8(v *Value) bool { + v_0 := v.Args[0] + // match: (Trunc16to8 x) + // cond: t.IsSigned() + // result: (MOVBreg x) + for { + t := v.Type + x := v_0 + if !(t.IsSigned()) { + break + } + v.reset(OpPPC64MOVBreg) + v.AddArg(x) + return true + } + // match: (Trunc16to8 x) + // result: (MOVBZreg x) + for { + x := v_0 + v.reset(OpPPC64MOVBZreg) + v.AddArg(x) + return true + } +} +func rewriteValuePPC64_OpTrunc32to16(v *Value) bool { + v_0 := v.Args[0] + // match: (Trunc32to16 x) + // cond: t.IsSigned() + // result: (MOVHreg x) + for { + t := v.Type + x := v_0 + if !(t.IsSigned()) { + break + } + v.reset(OpPPC64MOVHreg) + v.AddArg(x) + return true + } + // match: (Trunc32to16 x) + // result: (MOVHZreg x) + for { + x := v_0 + v.reset(OpPPC64MOVHZreg) + v.AddArg(x) + return true + } +} +func rewriteValuePPC64_OpTrunc32to8(v *Value) bool { + v_0 := v.Args[0] + // match: (Trunc32to8 x) + // cond: t.IsSigned() + // result: (MOVBreg x) + for { + t := v.Type + x := v_0 + if !(t.IsSigned()) { + break + } + v.reset(OpPPC64MOVBreg) + v.AddArg(x) + return true + } + // match: (Trunc32to8 x) + // result: (MOVBZreg x) + for { + x := v_0 + v.reset(OpPPC64MOVBZreg) + v.AddArg(x) + return true + } +} +func rewriteValuePPC64_OpTrunc64to16(v *Value) bool { + v_0 := v.Args[0] + // match: (Trunc64to16 x) + // cond: t.IsSigned() + // result: (MOVHreg x) + for { + t := v.Type + x := v_0 + if !(t.IsSigned()) { + break + } + v.reset(OpPPC64MOVHreg) + v.AddArg(x) + return true + } + // match: (Trunc64to16 x) + // result: (MOVHZreg x) + for { + x := v_0 + v.reset(OpPPC64MOVHZreg) + v.AddArg(x) + return true + } +} +func rewriteValuePPC64_OpTrunc64to32(v *Value) bool { + v_0 := v.Args[0] + // match: (Trunc64to32 x) + // cond: t.IsSigned() + // result: (MOVWreg x) + for { + t := v.Type + x := v_0 + if !(t.IsSigned()) { + break + } + v.reset(OpPPC64MOVWreg) + v.AddArg(x) + return true + } + // match: (Trunc64to32 x) + // result: (MOVWZreg x) + for { + x := v_0 + v.reset(OpPPC64MOVWZreg) + v.AddArg(x) + return true + } +} +func rewriteValuePPC64_OpTrunc64to8(v *Value) bool { + v_0 := v.Args[0] + // match: (Trunc64to8 x) + // cond: t.IsSigned() + // result: (MOVBreg x) + for { + t := v.Type + x := v_0 + if !(t.IsSigned()) { + break + } + v.reset(OpPPC64MOVBreg) + v.AddArg(x) + return true + } + // match: (Trunc64to8 x) + // result: (MOVBZreg x) + for { + x := v_0 + v.reset(OpPPC64MOVBZreg) + v.AddArg(x) + return true + } +} +func rewriteValuePPC64_OpZero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Zero [0] _ mem) + // result: mem + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_1 + v.copyOf(mem) + return true + } + // match: (Zero [1] destptr mem) + // result: (MOVBstorezero destptr mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpPPC64MOVBstorezero) + v.AddArg2(destptr, mem) + return true + } + // match: (Zero [2] destptr mem) + // result: (MOVHstorezero destptr mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpPPC64MOVHstorezero) + v.AddArg2(destptr, mem) + return true + } + // match: (Zero [3] destptr mem) + // result: (MOVBstorezero [2] destptr (MOVHstorezero destptr mem)) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpPPC64MOVBstorezero) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHstorezero, types.TypeMem) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [4] destptr mem) + // result: (MOVWstorezero destptr mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpPPC64MOVWstorezero) + v.AddArg2(destptr, mem) + return true + } + // match: (Zero [5] destptr mem) + // result: (MOVBstorezero [4] destptr (MOVWstorezero destptr mem)) + for { + if auxIntToInt64(v.AuxInt) != 5 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpPPC64MOVBstorezero) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [6] destptr mem) + // result: (MOVHstorezero [4] destptr (MOVWstorezero destptr mem)) + for { + if auxIntToInt64(v.AuxInt) != 6 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpPPC64MOVHstorezero) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [7] destptr mem) + // result: (MOVBstorezero [6] destptr (MOVHstorezero [4] destptr (MOVWstorezero destptr mem))) + for { + if auxIntToInt64(v.AuxInt) != 7 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpPPC64MOVBstorezero) + v.AuxInt = int32ToAuxInt(6) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHstorezero, types.TypeMem) + v0.AuxInt = int32ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem) + v1.AddArg2(destptr, mem) + v0.AddArg2(destptr, v1) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [8] {t} destptr mem) + // result: (MOVDstorezero destptr mem) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpPPC64MOVDstorezero) + v.AddArg2(destptr, mem) + return true + } + // match: (Zero [12] {t} destptr mem) + // result: (MOVWstorezero [8] destptr (MOVDstorezero [0] destptr mem)) + for { + if auxIntToInt64(v.AuxInt) != 12 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpPPC64MOVWstorezero) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [16] {t} destptr mem) + // result: (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem)) + for { + if auxIntToInt64(v.AuxInt) != 16 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpPPC64MOVDstorezero) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [24] {t} destptr mem) + // result: (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem))) + for { + if auxIntToInt64(v.AuxInt) != 24 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpPPC64MOVDstorezero) + v.AuxInt = int32ToAuxInt(16) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) + v0.AuxInt = int32ToAuxInt(8) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) + v1.AuxInt = int32ToAuxInt(0) + v1.AddArg2(destptr, mem) + v0.AddArg2(destptr, v1) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [32] {t} destptr mem) + // result: (MOVDstorezero [24] destptr (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem)))) + for { + if auxIntToInt64(v.AuxInt) != 32 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpPPC64MOVDstorezero) + v.AuxInt = int32ToAuxInt(24) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) + v0.AuxInt = int32ToAuxInt(16) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) + v1.AuxInt = int32ToAuxInt(8) + v2 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) + v2.AuxInt = int32ToAuxInt(0) + v2.AddArg2(destptr, mem) + v1.AddArg2(destptr, v2) + v0.AddArg2(destptr, v1) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [s] ptr mem) + // cond: buildcfg.GOPPC64 <= 8 && s < 64 + // result: (LoweredZeroShort [s] ptr mem) + for { + s := auxIntToInt64(v.AuxInt) + ptr := v_0 + mem := v_1 + if !(buildcfg.GOPPC64 <= 8 && s < 64) { + break + } + v.reset(OpPPC64LoweredZeroShort) + v.AuxInt = int64ToAuxInt(s) + v.AddArg2(ptr, mem) + return true + } + // match: (Zero [s] ptr mem) + // cond: buildcfg.GOPPC64 <= 8 + // result: (LoweredZero [s] ptr mem) + for { + s := auxIntToInt64(v.AuxInt) + ptr := v_0 + mem := v_1 + if !(buildcfg.GOPPC64 <= 8) { + break + } + v.reset(OpPPC64LoweredZero) + v.AuxInt = int64ToAuxInt(s) + v.AddArg2(ptr, mem) + return true + } + // match: (Zero [s] ptr mem) + // cond: s < 128 && buildcfg.GOPPC64 >= 9 + // result: (LoweredQuadZeroShort [s] ptr mem) + for { + s := auxIntToInt64(v.AuxInt) + ptr := v_0 + mem := v_1 + if !(s < 128 && buildcfg.GOPPC64 >= 9) { + break + } + v.reset(OpPPC64LoweredQuadZeroShort) + v.AuxInt = int64ToAuxInt(s) + v.AddArg2(ptr, mem) + return true + } + // match: (Zero [s] ptr mem) + // cond: buildcfg.GOPPC64 >= 9 + // result: (LoweredQuadZero [s] ptr mem) + for { + s := auxIntToInt64(v.AuxInt) + ptr := v_0 + mem := v_1 + if !(buildcfg.GOPPC64 >= 9) { + break + } + v.reset(OpPPC64LoweredQuadZero) + v.AuxInt = int64ToAuxInt(s) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteBlockPPC64(b *Block) bool { + typ := &b.Func.Config.Types + switch b.Kind { + case BlockPPC64EQ: + // match: (EQ (FlagEQ) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpPPC64FlagEQ { + b.Reset(BlockFirst) + return true + } + // match: (EQ (FlagLT) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpPPC64FlagLT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (EQ (FlagGT) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpPPC64FlagGT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (EQ (InvertFlags cmp) yes no) + // result: (EQ cmp yes no) + for b.Controls[0].Op == OpPPC64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockPPC64EQ, cmp) + return true + } + // match: (EQ (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) + // result: (EQ (Select1 z) yes no) + for b.Controls[0].Op == OpPPC64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + z := v_0_0.Args[0] + if z.Op != OpPPC64ANDCCconst { + break + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v0.AddArg(z) + b.resetWithControl(BlockPPC64EQ, v0) + return true + } + // match: (EQ (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) + // result: (EQ (Select1 z) yes no) + for b.Controls[0].Op == OpPPC64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + z := v_0_0.Args[0] + if z.Op != OpPPC64ANDCCconst { + break + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v0.AddArg(z) + b.resetWithControl(BlockPPC64EQ, v0) + return true + } + // match: (EQ (CMPconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (EQ (Select1 (ANDCC x y)) yes no) + for b.Controls[0].Op == OpPPC64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64AND { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.NewTuple(typ.Int64, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) + b.resetWithControl(BlockPPC64EQ, v0) + return true + } + break + } + // match: (EQ (CMPconst [0] z:(OR x y)) yes no) + // cond: z.Uses == 1 + // result: (EQ (Select1 (ORCC x y)) yes no) + for b.Controls[0].Op == OpPPC64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64OR { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) + b.resetWithControl(BlockPPC64EQ, v0) + return true + } + break + } + // match: (EQ (CMPconst [0] z:(XOR x y)) yes no) + // cond: z.Uses == 1 + // result: (EQ (Select1 (XORCC x y)) yes no) + for b.Controls[0].Op == OpPPC64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64XOR { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) + b.resetWithControl(BlockPPC64EQ, v0) + return true + } + break + } + case BlockPPC64GE: + // match: (GE (FlagEQ) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpPPC64FlagEQ { + b.Reset(BlockFirst) + return true + } + // match: (GE (FlagLT) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpPPC64FlagLT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (GE (FlagGT) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpPPC64FlagGT { + b.Reset(BlockFirst) + return true + } + // match: (GE (InvertFlags cmp) yes no) + // result: (LE cmp yes no) + for b.Controls[0].Op == OpPPC64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockPPC64LE, cmp) + return true + } + // match: (GE (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) + // result: (GE (Select1 z) yes no) + for b.Controls[0].Op == OpPPC64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + z := v_0_0.Args[0] + if z.Op != OpPPC64ANDCCconst { + break + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v0.AddArg(z) + b.resetWithControl(BlockPPC64GE, v0) + return true + } + // match: (GE (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) + // result: (GE (Select1 z) yes no) + for b.Controls[0].Op == OpPPC64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + z := v_0_0.Args[0] + if z.Op != OpPPC64ANDCCconst { + break + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v0.AddArg(z) + b.resetWithControl(BlockPPC64GE, v0) + return true + } + // match: (GE (CMPconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (GE (Select1 (ANDCC x y)) yes no) + for b.Controls[0].Op == OpPPC64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64AND { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.NewTuple(typ.Int64, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) + b.resetWithControl(BlockPPC64GE, v0) + return true + } + break + } + // match: (GE (CMPconst [0] z:(OR x y)) yes no) + // cond: z.Uses == 1 + // result: (GE (Select1 (ORCC x y)) yes no) + for b.Controls[0].Op == OpPPC64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64OR { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) + b.resetWithControl(BlockPPC64GE, v0) + return true + } + break + } + // match: (GE (CMPconst [0] z:(XOR x y)) yes no) + // cond: z.Uses == 1 + // result: (GE (Select1 (XORCC x y)) yes no) + for b.Controls[0].Op == OpPPC64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64XOR { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) + b.resetWithControl(BlockPPC64GE, v0) + return true + } + break + } + case BlockPPC64GT: + // match: (GT (FlagEQ) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpPPC64FlagEQ { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (GT (FlagLT) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpPPC64FlagLT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (GT (FlagGT) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpPPC64FlagGT { + b.Reset(BlockFirst) + return true + } + // match: (GT (InvertFlags cmp) yes no) + // result: (LT cmp yes no) + for b.Controls[0].Op == OpPPC64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockPPC64LT, cmp) + return true + } + // match: (GT (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) + // result: (GT (Select1 z) yes no) + for b.Controls[0].Op == OpPPC64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + z := v_0_0.Args[0] + if z.Op != OpPPC64ANDCCconst { + break + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v0.AddArg(z) + b.resetWithControl(BlockPPC64GT, v0) + return true + } + // match: (GT (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) + // result: (GT (Select1 z) yes no) + for b.Controls[0].Op == OpPPC64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + z := v_0_0.Args[0] + if z.Op != OpPPC64ANDCCconst { + break + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v0.AddArg(z) + b.resetWithControl(BlockPPC64GT, v0) + return true + } + // match: (GT (CMPconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (GT (Select1 (ANDCC x y)) yes no) + for b.Controls[0].Op == OpPPC64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64AND { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.NewTuple(typ.Int64, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) + b.resetWithControl(BlockPPC64GT, v0) + return true + } + break + } + // match: (GT (CMPconst [0] z:(OR x y)) yes no) + // cond: z.Uses == 1 + // result: (GT (Select1 (ORCC x y)) yes no) + for b.Controls[0].Op == OpPPC64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64OR { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) + b.resetWithControl(BlockPPC64GT, v0) + return true + } + break + } + // match: (GT (CMPconst [0] z:(XOR x y)) yes no) + // cond: z.Uses == 1 + // result: (GT (Select1 (XORCC x y)) yes no) + for b.Controls[0].Op == OpPPC64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64XOR { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) + b.resetWithControl(BlockPPC64GT, v0) + return true + } + break + } + case BlockIf: + // match: (If (Equal cc) yes no) + // result: (EQ cc yes no) + for b.Controls[0].Op == OpPPC64Equal { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockPPC64EQ, cc) + return true + } + // match: (If (NotEqual cc) yes no) + // result: (NE cc yes no) + for b.Controls[0].Op == OpPPC64NotEqual { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockPPC64NE, cc) + return true + } + // match: (If (LessThan cc) yes no) + // result: (LT cc yes no) + for b.Controls[0].Op == OpPPC64LessThan { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockPPC64LT, cc) + return true + } + // match: (If (LessEqual cc) yes no) + // result: (LE cc yes no) + for b.Controls[0].Op == OpPPC64LessEqual { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockPPC64LE, cc) + return true + } + // match: (If (GreaterThan cc) yes no) + // result: (GT cc yes no) + for b.Controls[0].Op == OpPPC64GreaterThan { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockPPC64GT, cc) + return true + } + // match: (If (GreaterEqual cc) yes no) + // result: (GE cc yes no) + for b.Controls[0].Op == OpPPC64GreaterEqual { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockPPC64GE, cc) + return true + } + // match: (If (FLessThan cc) yes no) + // result: (FLT cc yes no) + for b.Controls[0].Op == OpPPC64FLessThan { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockPPC64FLT, cc) + return true + } + // match: (If (FLessEqual cc) yes no) + // result: (FLE cc yes no) + for b.Controls[0].Op == OpPPC64FLessEqual { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockPPC64FLE, cc) + return true + } + // match: (If (FGreaterThan cc) yes no) + // result: (FGT cc yes no) + for b.Controls[0].Op == OpPPC64FGreaterThan { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockPPC64FGT, cc) + return true + } + // match: (If (FGreaterEqual cc) yes no) + // result: (FGE cc yes no) + for b.Controls[0].Op == OpPPC64FGreaterEqual { + v_0 := b.Controls[0] + cc := v_0.Args[0] + b.resetWithControl(BlockPPC64FGE, cc) + return true + } + // match: (If cond yes no) + // result: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] cond))) yes no) + for { + cond := b.Controls[0] + v0 := b.NewValue0(cond.Pos, OpPPC64CMPWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v1 := b.NewValue0(cond.Pos, OpSelect0, typ.UInt32) + v2 := b.NewValue0(cond.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v2.AuxInt = int64ToAuxInt(1) + v2.AddArg(cond) + v1.AddArg(v2) + v0.AddArg(v1) + b.resetWithControl(BlockPPC64NE, v0) + return true + } + case BlockPPC64LE: + // match: (LE (FlagEQ) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpPPC64FlagEQ { + b.Reset(BlockFirst) + return true + } + // match: (LE (FlagLT) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpPPC64FlagLT { + b.Reset(BlockFirst) + return true + } + // match: (LE (FlagGT) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpPPC64FlagGT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (LE (InvertFlags cmp) yes no) + // result: (GE cmp yes no) + for b.Controls[0].Op == OpPPC64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockPPC64GE, cmp) + return true + } + // match: (LE (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) + // result: (LE (Select1 z) yes no) + for b.Controls[0].Op == OpPPC64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + z := v_0_0.Args[0] + if z.Op != OpPPC64ANDCCconst { + break + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v0.AddArg(z) + b.resetWithControl(BlockPPC64LE, v0) + return true + } + // match: (LE (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) + // result: (LE (Select1 z) yes no) + for b.Controls[0].Op == OpPPC64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + z := v_0_0.Args[0] + if z.Op != OpPPC64ANDCCconst { + break + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v0.AddArg(z) + b.resetWithControl(BlockPPC64LE, v0) + return true + } + // match: (LE (CMPconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (LE (Select1 (ANDCC x y)) yes no) + for b.Controls[0].Op == OpPPC64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64AND { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.NewTuple(typ.Int64, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) + b.resetWithControl(BlockPPC64LE, v0) + return true + } + break + } + // match: (LE (CMPconst [0] z:(OR x y)) yes no) + // cond: z.Uses == 1 + // result: (LE (Select1 (ORCC x y)) yes no) + for b.Controls[0].Op == OpPPC64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64OR { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) + b.resetWithControl(BlockPPC64LE, v0) + return true + } + break + } + // match: (LE (CMPconst [0] z:(XOR x y)) yes no) + // cond: z.Uses == 1 + // result: (LE (Select1 (XORCC x y)) yes no) + for b.Controls[0].Op == OpPPC64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64XOR { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) + b.resetWithControl(BlockPPC64LE, v0) + return true + } + break + } + case BlockPPC64LT: + // match: (LT (FlagEQ) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpPPC64FlagEQ { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (LT (FlagLT) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpPPC64FlagLT { + b.Reset(BlockFirst) + return true + } + // match: (LT (FlagGT) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpPPC64FlagGT { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (LT (InvertFlags cmp) yes no) + // result: (GT cmp yes no) + for b.Controls[0].Op == OpPPC64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockPPC64GT, cmp) + return true + } + // match: (LT (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) + // result: (LT (Select1 z) yes no) + for b.Controls[0].Op == OpPPC64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + z := v_0_0.Args[0] + if z.Op != OpPPC64ANDCCconst { + break + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v0.AddArg(z) + b.resetWithControl(BlockPPC64LT, v0) + return true + } + // match: (LT (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) + // result: (LT (Select1 z) yes no) + for b.Controls[0].Op == OpPPC64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + z := v_0_0.Args[0] + if z.Op != OpPPC64ANDCCconst { + break + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v0.AddArg(z) + b.resetWithControl(BlockPPC64LT, v0) + return true + } + // match: (LT (CMPconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (LT (Select1 (ANDCC x y)) yes no) + for b.Controls[0].Op == OpPPC64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64AND { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.NewTuple(typ.Int64, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) + b.resetWithControl(BlockPPC64LT, v0) + return true + } + break + } + // match: (LT (CMPconst [0] z:(OR x y)) yes no) + // cond: z.Uses == 1 + // result: (LT (Select1 (ORCC x y)) yes no) + for b.Controls[0].Op == OpPPC64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64OR { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) + b.resetWithControl(BlockPPC64LT, v0) + return true + } + break + } + // match: (LT (CMPconst [0] z:(XOR x y)) yes no) + // cond: z.Uses == 1 + // result: (LT (Select1 (XORCC x y)) yes no) + for b.Controls[0].Op == OpPPC64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64XOR { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) + b.resetWithControl(BlockPPC64LT, v0) + return true + } + break + } + case BlockPPC64NE: + // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (Equal cc)))) yes no) + // result: (EQ cc yes no) + for b.Controls[0].Op == OpPPC64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { + break + } + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpPPC64Equal { + break + } + cc := v_0_0_0_0.Args[0] + b.resetWithControl(BlockPPC64EQ, cc) + return true + } + // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (NotEqual cc)))) yes no) + // result: (NE cc yes no) + for b.Controls[0].Op == OpPPC64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { + break + } + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpPPC64NotEqual { + break + } + cc := v_0_0_0_0.Args[0] + b.resetWithControl(BlockPPC64NE, cc) + return true + } + // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (LessThan cc)))) yes no) + // result: (LT cc yes no) + for b.Controls[0].Op == OpPPC64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { + break + } + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpPPC64LessThan { + break + } + cc := v_0_0_0_0.Args[0] + b.resetWithControl(BlockPPC64LT, cc) + return true + } + // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (LessEqual cc)))) yes no) + // result: (LE cc yes no) + for b.Controls[0].Op == OpPPC64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { + break + } + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpPPC64LessEqual { + break + } + cc := v_0_0_0_0.Args[0] + b.resetWithControl(BlockPPC64LE, cc) + return true + } + // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (GreaterThan cc)))) yes no) + // result: (GT cc yes no) + for b.Controls[0].Op == OpPPC64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { + break + } + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpPPC64GreaterThan { + break + } + cc := v_0_0_0_0.Args[0] + b.resetWithControl(BlockPPC64GT, cc) + return true + } + // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (GreaterEqual cc)))) yes no) + // result: (GE cc yes no) + for b.Controls[0].Op == OpPPC64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { + break + } + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpPPC64GreaterEqual { + break + } + cc := v_0_0_0_0.Args[0] + b.resetWithControl(BlockPPC64GE, cc) + return true + } + // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (FLessThan cc)))) yes no) + // result: (FLT cc yes no) + for b.Controls[0].Op == OpPPC64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { + break + } + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpPPC64FLessThan { + break + } + cc := v_0_0_0_0.Args[0] + b.resetWithControl(BlockPPC64FLT, cc) + return true + } + // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (FLessEqual cc)))) yes no) + // result: (FLE cc yes no) + for b.Controls[0].Op == OpPPC64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { + break + } + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpPPC64FLessEqual { + break + } + cc := v_0_0_0_0.Args[0] + b.resetWithControl(BlockPPC64FLE, cc) + return true + } + // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (FGreaterThan cc)))) yes no) + // result: (FGT cc yes no) + for b.Controls[0].Op == OpPPC64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { + break + } + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpPPC64FGreaterThan { + break + } + cc := v_0_0_0_0.Args[0] + b.resetWithControl(BlockPPC64FGT, cc) + return true + } + // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (FGreaterEqual cc)))) yes no) + // result: (FGE cc yes no) + for b.Controls[0].Op == OpPPC64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { + break + } + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpPPC64FGreaterEqual { + break + } + cc := v_0_0_0_0.Args[0] + b.resetWithControl(BlockPPC64FGE, cc) + return true + } + // match: (NE (FlagEQ) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpPPC64FlagEQ { + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (NE (FlagLT) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpPPC64FlagLT { + b.Reset(BlockFirst) + return true + } + // match: (NE (FlagGT) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpPPC64FlagGT { + b.Reset(BlockFirst) + return true + } + // match: (NE (InvertFlags cmp) yes no) + // result: (NE cmp yes no) + for b.Controls[0].Op == OpPPC64InvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockPPC64NE, cmp) + return true + } + // match: (NE (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) + // result: (NE (Select1 z) yes no) + for b.Controls[0].Op == OpPPC64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + z := v_0_0.Args[0] + if z.Op != OpPPC64ANDCCconst { + break + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v0.AddArg(z) + b.resetWithControl(BlockPPC64NE, v0) + return true + } + // match: (NE (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) + // result: (NE (Select1 z) yes no) + for b.Controls[0].Op == OpPPC64CMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + z := v_0_0.Args[0] + if z.Op != OpPPC64ANDCCconst { + break + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v0.AddArg(z) + b.resetWithControl(BlockPPC64NE, v0) + return true + } + // match: (NE (CMPconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (NE (Select1 (ANDCC x y)) yes no) + for b.Controls[0].Op == OpPPC64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64AND { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.NewTuple(typ.Int64, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) + b.resetWithControl(BlockPPC64NE, v0) + return true + } + break + } + // match: (NE (CMPconst [0] z:(OR x y)) yes no) + // cond: z.Uses == 1 + // result: (NE (Select1 (ORCC x y)) yes no) + for b.Controls[0].Op == OpPPC64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64OR { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) + b.resetWithControl(BlockPPC64NE, v0) + return true + } + break + } + // match: (NE (CMPconst [0] z:(XOR x y)) yes no) + // cond: z.Uses == 1 + // result: (NE (Select1 (XORCC x y)) yes no) + for b.Controls[0].Op == OpPPC64CMPconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64XOR { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + x := z_0 + y := z_1 + if !(z.Uses == 1) { + continue + } + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AddArg2(x, y) + v0.AddArg(v1) + b.resetWithControl(BlockPPC64NE, v0) + return true + } + break + } + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewritePPC64latelower.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewritePPC64latelower.go new file mode 100644 index 0000000000000000000000000000000000000000..771dd6aaa2496d22b6cc766b7e6d995b6c272a45 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewritePPC64latelower.go @@ -0,0 +1,705 @@ +// Code generated from _gen/PPC64latelower.rules using 'go generate'; DO NOT EDIT. + +package ssa + +import "internal/buildcfg" +import "cmd/compile/internal/types" + +func rewriteValuePPC64latelower(v *Value) bool { + switch v.Op { + case OpPPC64ADD: + return rewriteValuePPC64latelower_OpPPC64ADD(v) + case OpPPC64AND: + return rewriteValuePPC64latelower_OpPPC64AND(v) + case OpPPC64CMPconst: + return rewriteValuePPC64latelower_OpPPC64CMPconst(v) + case OpPPC64ISEL: + return rewriteValuePPC64latelower_OpPPC64ISEL(v) + case OpPPC64RLDICL: + return rewriteValuePPC64latelower_OpPPC64RLDICL(v) + case OpPPC64SETBC: + return rewriteValuePPC64latelower_OpPPC64SETBC(v) + case OpPPC64SETBCR: + return rewriteValuePPC64latelower_OpPPC64SETBCR(v) + case OpSelect0: + return rewriteValuePPC64latelower_OpSelect0(v) + } + return false +} +func rewriteValuePPC64latelower_OpPPC64ADD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADD (MOVDconst [m]) x) + // cond: supportsPPC64PCRel() && (m<<30)>>30 == m + // result: (ADDconst [m] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpPPC64MOVDconst { + continue + } + m := auxIntToInt64(v_0.AuxInt) + x := v_1 + if !(supportsPPC64PCRel() && (m<<30)>>30 == m) { + continue + } + v.reset(OpPPC64ADDconst) + v.AuxInt = int64ToAuxInt(m) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValuePPC64latelower_OpPPC64AND(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AND x:(MOVDconst [m]) n) + // cond: t.Size() <= 2 + // result: (Select0 (ANDCCconst [int64(int16(m))] n)) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if x.Op != OpPPC64MOVDconst { + continue + } + m := auxIntToInt64(x.AuxInt) + n := v_1 + if !(t.Size() <= 2) { + continue + } + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v0.AuxInt = int64ToAuxInt(int64(int16(m))) + v0.AddArg(n) + v.AddArg(v0) + return true + } + break + } + // match: (AND x:(MOVDconst [m]) n) + // cond: isPPC64ValidShiftMask(m) + // result: (RLDICL [encodePPC64RotateMask(0,m,64)] n) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if x.Op != OpPPC64MOVDconst { + continue + } + m := auxIntToInt64(x.AuxInt) + n := v_1 + if !(isPPC64ValidShiftMask(m)) { + continue + } + v.reset(OpPPC64RLDICL) + v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 64)) + v.AddArg(n) + return true + } + break + } + // match: (AND x:(MOVDconst [m]) n) + // cond: m != 0 && isPPC64ValidShiftMask(^m) + // result: (RLDICR [encodePPC64RotateMask(0,m,64)] n) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if x.Op != OpPPC64MOVDconst { + continue + } + m := auxIntToInt64(x.AuxInt) + n := v_1 + if !(m != 0 && isPPC64ValidShiftMask(^m)) { + continue + } + v.reset(OpPPC64RLDICR) + v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 64)) + v.AddArg(n) + return true + } + break + } + // match: (AND x:(MOVDconst [m]) n) + // cond: t.Size() == 4 && isPPC64WordRotateMask(m) + // result: (RLWINM [encodePPC64RotateMask(0,m,32)] n) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if x.Op != OpPPC64MOVDconst { + continue + } + m := auxIntToInt64(x.AuxInt) + n := v_1 + if !(t.Size() == 4 && isPPC64WordRotateMask(m)) { + continue + } + v.reset(OpPPC64RLWINM) + v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 32)) + v.AddArg(n) + return true + } + break + } + return false +} +func rewriteValuePPC64latelower_OpPPC64CMPconst(v *Value) bool { + v_0 := v.Args[0] + // match: (CMPconst [0] z:(ADD x y)) + // cond: v.Block == z.Block + // result: (CMPconst [0] convertPPC64OpToOpCC(z)) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + z := v_0 + if z.Op != OpPPC64ADD { + break + } + if !(v.Block == z.Block) { + break + } + v.reset(OpPPC64CMPconst) + v.AuxInt = int64ToAuxInt(0) + v.AddArg(convertPPC64OpToOpCC(z)) + return true + } + // match: (CMPconst [0] z:(AND x y)) + // cond: v.Block == z.Block + // result: (CMPconst [0] convertPPC64OpToOpCC(z)) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + z := v_0 + if z.Op != OpPPC64AND { + break + } + if !(v.Block == z.Block) { + break + } + v.reset(OpPPC64CMPconst) + v.AuxInt = int64ToAuxInt(0) + v.AddArg(convertPPC64OpToOpCC(z)) + return true + } + // match: (CMPconst [0] z:(ANDN x y)) + // cond: v.Block == z.Block + // result: (CMPconst [0] convertPPC64OpToOpCC(z)) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + z := v_0 + if z.Op != OpPPC64ANDN { + break + } + if !(v.Block == z.Block) { + break + } + v.reset(OpPPC64CMPconst) + v.AuxInt = int64ToAuxInt(0) + v.AddArg(convertPPC64OpToOpCC(z)) + return true + } + // match: (CMPconst [0] z:(OR x y)) + // cond: v.Block == z.Block + // result: (CMPconst [0] convertPPC64OpToOpCC(z)) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + z := v_0 + if z.Op != OpPPC64OR { + break + } + if !(v.Block == z.Block) { + break + } + v.reset(OpPPC64CMPconst) + v.AuxInt = int64ToAuxInt(0) + v.AddArg(convertPPC64OpToOpCC(z)) + return true + } + // match: (CMPconst [0] z:(SUB x y)) + // cond: v.Block == z.Block + // result: (CMPconst [0] convertPPC64OpToOpCC(z)) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + z := v_0 + if z.Op != OpPPC64SUB { + break + } + if !(v.Block == z.Block) { + break + } + v.reset(OpPPC64CMPconst) + v.AuxInt = int64ToAuxInt(0) + v.AddArg(convertPPC64OpToOpCC(z)) + return true + } + // match: (CMPconst [0] z:(NOR x y)) + // cond: v.Block == z.Block + // result: (CMPconst [0] convertPPC64OpToOpCC(z)) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + z := v_0 + if z.Op != OpPPC64NOR { + break + } + if !(v.Block == z.Block) { + break + } + v.reset(OpPPC64CMPconst) + v.AuxInt = int64ToAuxInt(0) + v.AddArg(convertPPC64OpToOpCC(z)) + return true + } + // match: (CMPconst [0] z:(XOR x y)) + // cond: v.Block == z.Block + // result: (CMPconst [0] convertPPC64OpToOpCC(z)) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + z := v_0 + if z.Op != OpPPC64XOR { + break + } + if !(v.Block == z.Block) { + break + } + v.reset(OpPPC64CMPconst) + v.AuxInt = int64ToAuxInt(0) + v.AddArg(convertPPC64OpToOpCC(z)) + return true + } + // match: (CMPconst [0] z:(NEG x)) + // cond: v.Block == z.Block + // result: (CMPconst [0] convertPPC64OpToOpCC(z)) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + z := v_0 + if z.Op != OpPPC64NEG { + break + } + if !(v.Block == z.Block) { + break + } + v.reset(OpPPC64CMPconst) + v.AuxInt = int64ToAuxInt(0) + v.AddArg(convertPPC64OpToOpCC(z)) + return true + } + // match: (CMPconst [0] z:(CNTLZD x)) + // cond: v.Block == z.Block + // result: (CMPconst [0] convertPPC64OpToOpCC(z)) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + z := v_0 + if z.Op != OpPPC64CNTLZD { + break + } + if !(v.Block == z.Block) { + break + } + v.reset(OpPPC64CMPconst) + v.AuxInt = int64ToAuxInt(0) + v.AddArg(convertPPC64OpToOpCC(z)) + return true + } + // match: (CMPconst [0] z:(ADDconst [c] x)) + // cond: int64(int16(c)) == c && v.Block == z.Block + // result: (CMPconst [0] convertPPC64OpToOpCC(z)) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + z := v_0 + if z.Op != OpPPC64ADDconst { + break + } + c := auxIntToInt64(z.AuxInt) + if !(int64(int16(c)) == c && v.Block == z.Block) { + break + } + v.reset(OpPPC64CMPconst) + v.AuxInt = int64ToAuxInt(0) + v.AddArg(convertPPC64OpToOpCC(z)) + return true + } + // match: (CMPconst [0] (Select0 z:(ADDCC x y))) + // result: (Select1 z) + for { + t := v.Type + if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64ADDCC { + break + } + v.reset(OpSelect1) + v.Type = t + v.AddArg(z) + return true + } + // match: (CMPconst [0] (Select0 z:(ANDCC x y))) + // result: (Select1 z) + for { + t := v.Type + if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64ANDCC { + break + } + v.reset(OpSelect1) + v.Type = t + v.AddArg(z) + return true + } + // match: (CMPconst [0] (Select0 z:(ANDNCC x y))) + // result: (Select1 z) + for { + t := v.Type + if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64ANDNCC { + break + } + v.reset(OpSelect1) + v.Type = t + v.AddArg(z) + return true + } + // match: (CMPconst [0] (Select0 z:(ORCC x y))) + // result: (Select1 z) + for { + t := v.Type + if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64ORCC { + break + } + v.reset(OpSelect1) + v.Type = t + v.AddArg(z) + return true + } + // match: (CMPconst [0] (Select0 z:(SUBCC x y))) + // result: (Select1 z) + for { + t := v.Type + if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64SUBCC { + break + } + v.reset(OpSelect1) + v.Type = t + v.AddArg(z) + return true + } + // match: (CMPconst [0] (Select0 z:(NORCC x y))) + // result: (Select1 z) + for { + t := v.Type + if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64NORCC { + break + } + v.reset(OpSelect1) + v.Type = t + v.AddArg(z) + return true + } + // match: (CMPconst [0] (Select0 z:(XORCC x y))) + // result: (Select1 z) + for { + t := v.Type + if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64XORCC { + break + } + v.reset(OpSelect1) + v.Type = t + v.AddArg(z) + return true + } + // match: (CMPconst [0] (Select0 z:(ADDCCconst y))) + // result: (Select1 z) + for { + t := v.Type + if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64ADDCCconst { + break + } + v.reset(OpSelect1) + v.Type = t + v.AddArg(z) + return true + } + // match: (CMPconst [0] (Select0 z:(NEGCC y))) + // result: (Select1 z) + for { + t := v.Type + if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64NEGCC { + break + } + v.reset(OpSelect1) + v.Type = t + v.AddArg(z) + return true + } + // match: (CMPconst [0] (Select0 z:(CNTLZDCC y))) + // result: (Select1 z) + for { + t := v.Type + if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 { + break + } + z := v_0.Args[0] + if z.Op != OpPPC64CNTLZDCC { + break + } + v.reset(OpSelect1) + v.Type = t + v.AddArg(z) + return true + } + return false +} +func rewriteValuePPC64latelower_OpPPC64ISEL(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ISEL [a] x (MOVDconst [0]) z) + // result: (ISELZ [a] x z) + for { + a := auxIntToInt32(v.AuxInt) + x := v_0 + if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + z := v_2 + v.reset(OpPPC64ISELZ) + v.AuxInt = int32ToAuxInt(a) + v.AddArg2(x, z) + return true + } + // match: (ISEL [a] (MOVDconst [0]) y z) + // result: (ISELZ [a^0x4] y z) + for { + a := auxIntToInt32(v.AuxInt) + if v_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + y := v_1 + z := v_2 + v.reset(OpPPC64ISELZ) + v.AuxInt = int32ToAuxInt(a ^ 0x4) + v.AddArg2(y, z) + return true + } + return false +} +func rewriteValuePPC64latelower_OpPPC64RLDICL(v *Value) bool { + v_0 := v.Args[0] + // match: (RLDICL [em] x:(SRDconst [s] a)) + // cond: (em&0xFF0000) == 0 + // result: (RLDICL [mergePPC64RLDICLandSRDconst(em, s)] a) + for { + em := auxIntToInt64(v.AuxInt) + x := v_0 + if x.Op != OpPPC64SRDconst { + break + } + s := auxIntToInt64(x.AuxInt) + a := x.Args[0] + if !((em & 0xFF0000) == 0) { + break + } + v.reset(OpPPC64RLDICL) + v.AuxInt = int64ToAuxInt(mergePPC64RLDICLandSRDconst(em, s)) + v.AddArg(a) + return true + } + return false +} +func rewriteValuePPC64latelower_OpPPC64SETBC(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SETBC [2] cmp) + // cond: buildcfg.GOPPC64 <= 9 + // result: (ISELZ [2] (MOVDconst [1]) cmp) + for { + if auxIntToInt32(v.AuxInt) != 2 { + break + } + cmp := v_0 + if !(buildcfg.GOPPC64 <= 9) { + break + } + v.reset(OpPPC64ISELZ) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v0.AuxInt = int64ToAuxInt(1) + v.AddArg2(v0, cmp) + return true + } + // match: (SETBC [0] cmp) + // cond: buildcfg.GOPPC64 <= 9 + // result: (ISELZ [0] (MOVDconst [1]) cmp) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + cmp := v_0 + if !(buildcfg.GOPPC64 <= 9) { + break + } + v.reset(OpPPC64ISELZ) + v.AuxInt = int32ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v0.AuxInt = int64ToAuxInt(1) + v.AddArg2(v0, cmp) + return true + } + // match: (SETBC [1] cmp) + // cond: buildcfg.GOPPC64 <= 9 + // result: (ISELZ [1] (MOVDconst [1]) cmp) + for { + if auxIntToInt32(v.AuxInt) != 1 { + break + } + cmp := v_0 + if !(buildcfg.GOPPC64 <= 9) { + break + } + v.reset(OpPPC64ISELZ) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v0.AuxInt = int64ToAuxInt(1) + v.AddArg2(v0, cmp) + return true + } + return false +} +func rewriteValuePPC64latelower_OpPPC64SETBCR(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SETBCR [2] cmp) + // cond: buildcfg.GOPPC64 <= 9 + // result: (ISELZ [6] (MOVDconst [1]) cmp) + for { + if auxIntToInt32(v.AuxInt) != 2 { + break + } + cmp := v_0 + if !(buildcfg.GOPPC64 <= 9) { + break + } + v.reset(OpPPC64ISELZ) + v.AuxInt = int32ToAuxInt(6) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v0.AuxInt = int64ToAuxInt(1) + v.AddArg2(v0, cmp) + return true + } + // match: (SETBCR [0] cmp) + // cond: buildcfg.GOPPC64 <= 9 + // result: (ISELZ [4] (MOVDconst [1]) cmp) + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + cmp := v_0 + if !(buildcfg.GOPPC64 <= 9) { + break + } + v.reset(OpPPC64ISELZ) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v0.AuxInt = int64ToAuxInt(1) + v.AddArg2(v0, cmp) + return true + } + // match: (SETBCR [1] cmp) + // cond: buildcfg.GOPPC64 <= 9 + // result: (ISELZ [5] (MOVDconst [1]) cmp) + for { + if auxIntToInt32(v.AuxInt) != 1 { + break + } + cmp := v_0 + if !(buildcfg.GOPPC64 <= 9) { + break + } + v.reset(OpPPC64ISELZ) + v.AuxInt = int32ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v0.AuxInt = int64ToAuxInt(1) + v.AddArg2(v0, cmp) + return true + } + return false +} +func rewriteValuePPC64latelower_OpSelect0(v *Value) bool { + v_0 := v.Args[0] + // match: (Select0 z:(ANDCCconst [m] x)) + // cond: z.Uses == 1 && isPPC64ValidShiftMask(m) + // result: (RLDICL [encodePPC64RotateMask(0,m,64)] x) + for { + z := v_0 + if z.Op != OpPPC64ANDCCconst { + break + } + m := auxIntToInt64(z.AuxInt) + x := z.Args[0] + if !(z.Uses == 1 && isPPC64ValidShiftMask(m)) { + break + } + v.reset(OpPPC64RLDICL) + v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 64)) + v.AddArg(x) + return true + } + return false +} +func rewriteBlockPPC64latelower(b *Block) bool { + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteRISCV64.go new file mode 100644 index 0000000000000000000000000000000000000000..52ddca1c7d5e9ed099b7c433d465d6804497135d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteRISCV64.go @@ -0,0 +1,9124 @@ +// Code generated from _gen/RISCV64.rules using 'go generate'; DO NOT EDIT. + +package ssa + +import "math" +import "cmd/compile/internal/types" + +func rewriteValueRISCV64(v *Value) bool { + switch v.Op { + case OpAbs: + v.Op = OpRISCV64FABSD + return true + case OpAdd16: + v.Op = OpRISCV64ADD + return true + case OpAdd32: + v.Op = OpRISCV64ADD + return true + case OpAdd32F: + v.Op = OpRISCV64FADDS + return true + case OpAdd64: + v.Op = OpRISCV64ADD + return true + case OpAdd64F: + v.Op = OpRISCV64FADDD + return true + case OpAdd8: + v.Op = OpRISCV64ADD + return true + case OpAddPtr: + v.Op = OpRISCV64ADD + return true + case OpAddr: + return rewriteValueRISCV64_OpAddr(v) + case OpAnd16: + v.Op = OpRISCV64AND + return true + case OpAnd32: + v.Op = OpRISCV64AND + return true + case OpAnd64: + v.Op = OpRISCV64AND + return true + case OpAnd8: + v.Op = OpRISCV64AND + return true + case OpAndB: + v.Op = OpRISCV64AND + return true + case OpAtomicAdd32: + v.Op = OpRISCV64LoweredAtomicAdd32 + return true + case OpAtomicAdd64: + v.Op = OpRISCV64LoweredAtomicAdd64 + return true + case OpAtomicAnd32: + v.Op = OpRISCV64LoweredAtomicAnd32 + return true + case OpAtomicAnd8: + return rewriteValueRISCV64_OpAtomicAnd8(v) + case OpAtomicCompareAndSwap32: + return rewriteValueRISCV64_OpAtomicCompareAndSwap32(v) + case OpAtomicCompareAndSwap64: + v.Op = OpRISCV64LoweredAtomicCas64 + return true + case OpAtomicExchange32: + v.Op = OpRISCV64LoweredAtomicExchange32 + return true + case OpAtomicExchange64: + v.Op = OpRISCV64LoweredAtomicExchange64 + return true + case OpAtomicLoad32: + v.Op = OpRISCV64LoweredAtomicLoad32 + return true + case OpAtomicLoad64: + v.Op = OpRISCV64LoweredAtomicLoad64 + return true + case OpAtomicLoad8: + v.Op = OpRISCV64LoweredAtomicLoad8 + return true + case OpAtomicLoadPtr: + v.Op = OpRISCV64LoweredAtomicLoad64 + return true + case OpAtomicOr32: + v.Op = OpRISCV64LoweredAtomicOr32 + return true + case OpAtomicOr8: + return rewriteValueRISCV64_OpAtomicOr8(v) + case OpAtomicStore32: + v.Op = OpRISCV64LoweredAtomicStore32 + return true + case OpAtomicStore64: + v.Op = OpRISCV64LoweredAtomicStore64 + return true + case OpAtomicStore8: + v.Op = OpRISCV64LoweredAtomicStore8 + return true + case OpAtomicStorePtrNoWB: + v.Op = OpRISCV64LoweredAtomicStore64 + return true + case OpAvg64u: + return rewriteValueRISCV64_OpAvg64u(v) + case OpClosureCall: + v.Op = OpRISCV64CALLclosure + return true + case OpCom16: + v.Op = OpRISCV64NOT + return true + case OpCom32: + v.Op = OpRISCV64NOT + return true + case OpCom64: + v.Op = OpRISCV64NOT + return true + case OpCom8: + v.Op = OpRISCV64NOT + return true + case OpConst16: + return rewriteValueRISCV64_OpConst16(v) + case OpConst32: + return rewriteValueRISCV64_OpConst32(v) + case OpConst32F: + return rewriteValueRISCV64_OpConst32F(v) + case OpConst64: + return rewriteValueRISCV64_OpConst64(v) + case OpConst64F: + return rewriteValueRISCV64_OpConst64F(v) + case OpConst8: + return rewriteValueRISCV64_OpConst8(v) + case OpConstBool: + return rewriteValueRISCV64_OpConstBool(v) + case OpConstNil: + return rewriteValueRISCV64_OpConstNil(v) + case OpCopysign: + v.Op = OpRISCV64FSGNJD + return true + case OpCvt32Fto32: + v.Op = OpRISCV64FCVTWS + return true + case OpCvt32Fto64: + v.Op = OpRISCV64FCVTLS + return true + case OpCvt32Fto64F: + v.Op = OpRISCV64FCVTDS + return true + case OpCvt32to32F: + v.Op = OpRISCV64FCVTSW + return true + case OpCvt32to64F: + v.Op = OpRISCV64FCVTDW + return true + case OpCvt64Fto32: + v.Op = OpRISCV64FCVTWD + return true + case OpCvt64Fto32F: + v.Op = OpRISCV64FCVTSD + return true + case OpCvt64Fto64: + v.Op = OpRISCV64FCVTLD + return true + case OpCvt64to32F: + v.Op = OpRISCV64FCVTSL + return true + case OpCvt64to64F: + v.Op = OpRISCV64FCVTDL + return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true + case OpDiv16: + return rewriteValueRISCV64_OpDiv16(v) + case OpDiv16u: + return rewriteValueRISCV64_OpDiv16u(v) + case OpDiv32: + return rewriteValueRISCV64_OpDiv32(v) + case OpDiv32F: + v.Op = OpRISCV64FDIVS + return true + case OpDiv32u: + v.Op = OpRISCV64DIVUW + return true + case OpDiv64: + return rewriteValueRISCV64_OpDiv64(v) + case OpDiv64F: + v.Op = OpRISCV64FDIVD + return true + case OpDiv64u: + v.Op = OpRISCV64DIVU + return true + case OpDiv8: + return rewriteValueRISCV64_OpDiv8(v) + case OpDiv8u: + return rewriteValueRISCV64_OpDiv8u(v) + case OpEq16: + return rewriteValueRISCV64_OpEq16(v) + case OpEq32: + return rewriteValueRISCV64_OpEq32(v) + case OpEq32F: + v.Op = OpRISCV64FEQS + return true + case OpEq64: + return rewriteValueRISCV64_OpEq64(v) + case OpEq64F: + v.Op = OpRISCV64FEQD + return true + case OpEq8: + return rewriteValueRISCV64_OpEq8(v) + case OpEqB: + return rewriteValueRISCV64_OpEqB(v) + case OpEqPtr: + return rewriteValueRISCV64_OpEqPtr(v) + case OpFMA: + v.Op = OpRISCV64FMADDD + return true + case OpGetCallerPC: + v.Op = OpRISCV64LoweredGetCallerPC + return true + case OpGetCallerSP: + v.Op = OpRISCV64LoweredGetCallerSP + return true + case OpGetClosurePtr: + v.Op = OpRISCV64LoweredGetClosurePtr + return true + case OpHmul32: + return rewriteValueRISCV64_OpHmul32(v) + case OpHmul32u: + return rewriteValueRISCV64_OpHmul32u(v) + case OpHmul64: + v.Op = OpRISCV64MULH + return true + case OpHmul64u: + v.Op = OpRISCV64MULHU + return true + case OpInterCall: + v.Op = OpRISCV64CALLinter + return true + case OpIsInBounds: + v.Op = OpLess64U + return true + case OpIsNonNil: + v.Op = OpRISCV64SNEZ + return true + case OpIsSliceInBounds: + v.Op = OpLeq64U + return true + case OpLeq16: + return rewriteValueRISCV64_OpLeq16(v) + case OpLeq16U: + return rewriteValueRISCV64_OpLeq16U(v) + case OpLeq32: + return rewriteValueRISCV64_OpLeq32(v) + case OpLeq32F: + v.Op = OpRISCV64FLES + return true + case OpLeq32U: + return rewriteValueRISCV64_OpLeq32U(v) + case OpLeq64: + return rewriteValueRISCV64_OpLeq64(v) + case OpLeq64F: + v.Op = OpRISCV64FLED + return true + case OpLeq64U: + return rewriteValueRISCV64_OpLeq64U(v) + case OpLeq8: + return rewriteValueRISCV64_OpLeq8(v) + case OpLeq8U: + return rewriteValueRISCV64_OpLeq8U(v) + case OpLess16: + return rewriteValueRISCV64_OpLess16(v) + case OpLess16U: + return rewriteValueRISCV64_OpLess16U(v) + case OpLess32: + return rewriteValueRISCV64_OpLess32(v) + case OpLess32F: + v.Op = OpRISCV64FLTS + return true + case OpLess32U: + return rewriteValueRISCV64_OpLess32U(v) + case OpLess64: + v.Op = OpRISCV64SLT + return true + case OpLess64F: + v.Op = OpRISCV64FLTD + return true + case OpLess64U: + v.Op = OpRISCV64SLTU + return true + case OpLess8: + return rewriteValueRISCV64_OpLess8(v) + case OpLess8U: + return rewriteValueRISCV64_OpLess8U(v) + case OpLoad: + return rewriteValueRISCV64_OpLoad(v) + case OpLocalAddr: + return rewriteValueRISCV64_OpLocalAddr(v) + case OpLsh16x16: + return rewriteValueRISCV64_OpLsh16x16(v) + case OpLsh16x32: + return rewriteValueRISCV64_OpLsh16x32(v) + case OpLsh16x64: + return rewriteValueRISCV64_OpLsh16x64(v) + case OpLsh16x8: + return rewriteValueRISCV64_OpLsh16x8(v) + case OpLsh32x16: + return rewriteValueRISCV64_OpLsh32x16(v) + case OpLsh32x32: + return rewriteValueRISCV64_OpLsh32x32(v) + case OpLsh32x64: + return rewriteValueRISCV64_OpLsh32x64(v) + case OpLsh32x8: + return rewriteValueRISCV64_OpLsh32x8(v) + case OpLsh64x16: + return rewriteValueRISCV64_OpLsh64x16(v) + case OpLsh64x32: + return rewriteValueRISCV64_OpLsh64x32(v) + case OpLsh64x64: + return rewriteValueRISCV64_OpLsh64x64(v) + case OpLsh64x8: + return rewriteValueRISCV64_OpLsh64x8(v) + case OpLsh8x16: + return rewriteValueRISCV64_OpLsh8x16(v) + case OpLsh8x32: + return rewriteValueRISCV64_OpLsh8x32(v) + case OpLsh8x64: + return rewriteValueRISCV64_OpLsh8x64(v) + case OpLsh8x8: + return rewriteValueRISCV64_OpLsh8x8(v) + case OpMod16: + return rewriteValueRISCV64_OpMod16(v) + case OpMod16u: + return rewriteValueRISCV64_OpMod16u(v) + case OpMod32: + return rewriteValueRISCV64_OpMod32(v) + case OpMod32u: + v.Op = OpRISCV64REMUW + return true + case OpMod64: + return rewriteValueRISCV64_OpMod64(v) + case OpMod64u: + v.Op = OpRISCV64REMU + return true + case OpMod8: + return rewriteValueRISCV64_OpMod8(v) + case OpMod8u: + return rewriteValueRISCV64_OpMod8u(v) + case OpMove: + return rewriteValueRISCV64_OpMove(v) + case OpMul16: + return rewriteValueRISCV64_OpMul16(v) + case OpMul32: + v.Op = OpRISCV64MULW + return true + case OpMul32F: + v.Op = OpRISCV64FMULS + return true + case OpMul64: + v.Op = OpRISCV64MUL + return true + case OpMul64F: + v.Op = OpRISCV64FMULD + return true + case OpMul64uhilo: + v.Op = OpRISCV64LoweredMuluhilo + return true + case OpMul64uover: + v.Op = OpRISCV64LoweredMuluover + return true + case OpMul8: + return rewriteValueRISCV64_OpMul8(v) + case OpNeg16: + v.Op = OpRISCV64NEG + return true + case OpNeg32: + v.Op = OpRISCV64NEG + return true + case OpNeg32F: + v.Op = OpRISCV64FNEGS + return true + case OpNeg64: + v.Op = OpRISCV64NEG + return true + case OpNeg64F: + v.Op = OpRISCV64FNEGD + return true + case OpNeg8: + v.Op = OpRISCV64NEG + return true + case OpNeq16: + return rewriteValueRISCV64_OpNeq16(v) + case OpNeq32: + return rewriteValueRISCV64_OpNeq32(v) + case OpNeq32F: + v.Op = OpRISCV64FNES + return true + case OpNeq64: + return rewriteValueRISCV64_OpNeq64(v) + case OpNeq64F: + v.Op = OpRISCV64FNED + return true + case OpNeq8: + return rewriteValueRISCV64_OpNeq8(v) + case OpNeqB: + return rewriteValueRISCV64_OpNeqB(v) + case OpNeqPtr: + return rewriteValueRISCV64_OpNeqPtr(v) + case OpNilCheck: + v.Op = OpRISCV64LoweredNilCheck + return true + case OpNot: + v.Op = OpRISCV64SEQZ + return true + case OpOffPtr: + return rewriteValueRISCV64_OpOffPtr(v) + case OpOr16: + v.Op = OpRISCV64OR + return true + case OpOr32: + v.Op = OpRISCV64OR + return true + case OpOr64: + v.Op = OpRISCV64OR + return true + case OpOr8: + v.Op = OpRISCV64OR + return true + case OpOrB: + v.Op = OpRISCV64OR + return true + case OpPanicBounds: + return rewriteValueRISCV64_OpPanicBounds(v) + case OpPubBarrier: + v.Op = OpRISCV64LoweredPubBarrier + return true + case OpRISCV64ADD: + return rewriteValueRISCV64_OpRISCV64ADD(v) + case OpRISCV64ADDI: + return rewriteValueRISCV64_OpRISCV64ADDI(v) + case OpRISCV64AND: + return rewriteValueRISCV64_OpRISCV64AND(v) + case OpRISCV64ANDI: + return rewriteValueRISCV64_OpRISCV64ANDI(v) + case OpRISCV64FADDD: + return rewriteValueRISCV64_OpRISCV64FADDD(v) + case OpRISCV64FADDS: + return rewriteValueRISCV64_OpRISCV64FADDS(v) + case OpRISCV64FMADDD: + return rewriteValueRISCV64_OpRISCV64FMADDD(v) + case OpRISCV64FMADDS: + return rewriteValueRISCV64_OpRISCV64FMADDS(v) + case OpRISCV64FMSUBD: + return rewriteValueRISCV64_OpRISCV64FMSUBD(v) + case OpRISCV64FMSUBS: + return rewriteValueRISCV64_OpRISCV64FMSUBS(v) + case OpRISCV64FNMADDD: + return rewriteValueRISCV64_OpRISCV64FNMADDD(v) + case OpRISCV64FNMADDS: + return rewriteValueRISCV64_OpRISCV64FNMADDS(v) + case OpRISCV64FNMSUBD: + return rewriteValueRISCV64_OpRISCV64FNMSUBD(v) + case OpRISCV64FNMSUBS: + return rewriteValueRISCV64_OpRISCV64FNMSUBS(v) + case OpRISCV64FSUBD: + return rewriteValueRISCV64_OpRISCV64FSUBD(v) + case OpRISCV64FSUBS: + return rewriteValueRISCV64_OpRISCV64FSUBS(v) + case OpRISCV64MOVBUload: + return rewriteValueRISCV64_OpRISCV64MOVBUload(v) + case OpRISCV64MOVBUreg: + return rewriteValueRISCV64_OpRISCV64MOVBUreg(v) + case OpRISCV64MOVBload: + return rewriteValueRISCV64_OpRISCV64MOVBload(v) + case OpRISCV64MOVBreg: + return rewriteValueRISCV64_OpRISCV64MOVBreg(v) + case OpRISCV64MOVBstore: + return rewriteValueRISCV64_OpRISCV64MOVBstore(v) + case OpRISCV64MOVBstorezero: + return rewriteValueRISCV64_OpRISCV64MOVBstorezero(v) + case OpRISCV64MOVDload: + return rewriteValueRISCV64_OpRISCV64MOVDload(v) + case OpRISCV64MOVDnop: + return rewriteValueRISCV64_OpRISCV64MOVDnop(v) + case OpRISCV64MOVDreg: + return rewriteValueRISCV64_OpRISCV64MOVDreg(v) + case OpRISCV64MOVDstore: + return rewriteValueRISCV64_OpRISCV64MOVDstore(v) + case OpRISCV64MOVDstorezero: + return rewriteValueRISCV64_OpRISCV64MOVDstorezero(v) + case OpRISCV64MOVHUload: + return rewriteValueRISCV64_OpRISCV64MOVHUload(v) + case OpRISCV64MOVHUreg: + return rewriteValueRISCV64_OpRISCV64MOVHUreg(v) + case OpRISCV64MOVHload: + return rewriteValueRISCV64_OpRISCV64MOVHload(v) + case OpRISCV64MOVHreg: + return rewriteValueRISCV64_OpRISCV64MOVHreg(v) + case OpRISCV64MOVHstore: + return rewriteValueRISCV64_OpRISCV64MOVHstore(v) + case OpRISCV64MOVHstorezero: + return rewriteValueRISCV64_OpRISCV64MOVHstorezero(v) + case OpRISCV64MOVWUload: + return rewriteValueRISCV64_OpRISCV64MOVWUload(v) + case OpRISCV64MOVWUreg: + return rewriteValueRISCV64_OpRISCV64MOVWUreg(v) + case OpRISCV64MOVWload: + return rewriteValueRISCV64_OpRISCV64MOVWload(v) + case OpRISCV64MOVWreg: + return rewriteValueRISCV64_OpRISCV64MOVWreg(v) + case OpRISCV64MOVWstore: + return rewriteValueRISCV64_OpRISCV64MOVWstore(v) + case OpRISCV64MOVWstorezero: + return rewriteValueRISCV64_OpRISCV64MOVWstorezero(v) + case OpRISCV64NEG: + return rewriteValueRISCV64_OpRISCV64NEG(v) + case OpRISCV64NEGW: + return rewriteValueRISCV64_OpRISCV64NEGW(v) + case OpRISCV64OR: + return rewriteValueRISCV64_OpRISCV64OR(v) + case OpRISCV64ORI: + return rewriteValueRISCV64_OpRISCV64ORI(v) + case OpRISCV64SEQZ: + return rewriteValueRISCV64_OpRISCV64SEQZ(v) + case OpRISCV64SLL: + return rewriteValueRISCV64_OpRISCV64SLL(v) + case OpRISCV64SLLI: + return rewriteValueRISCV64_OpRISCV64SLLI(v) + case OpRISCV64SLT: + return rewriteValueRISCV64_OpRISCV64SLT(v) + case OpRISCV64SLTI: + return rewriteValueRISCV64_OpRISCV64SLTI(v) + case OpRISCV64SLTIU: + return rewriteValueRISCV64_OpRISCV64SLTIU(v) + case OpRISCV64SLTU: + return rewriteValueRISCV64_OpRISCV64SLTU(v) + case OpRISCV64SNEZ: + return rewriteValueRISCV64_OpRISCV64SNEZ(v) + case OpRISCV64SRA: + return rewriteValueRISCV64_OpRISCV64SRA(v) + case OpRISCV64SRAI: + return rewriteValueRISCV64_OpRISCV64SRAI(v) + case OpRISCV64SRAW: + return rewriteValueRISCV64_OpRISCV64SRAW(v) + case OpRISCV64SRL: + return rewriteValueRISCV64_OpRISCV64SRL(v) + case OpRISCV64SRLI: + return rewriteValueRISCV64_OpRISCV64SRLI(v) + case OpRISCV64SRLW: + return rewriteValueRISCV64_OpRISCV64SRLW(v) + case OpRISCV64SUB: + return rewriteValueRISCV64_OpRISCV64SUB(v) + case OpRISCV64SUBW: + return rewriteValueRISCV64_OpRISCV64SUBW(v) + case OpRISCV64XOR: + return rewriteValueRISCV64_OpRISCV64XOR(v) + case OpRotateLeft16: + return rewriteValueRISCV64_OpRotateLeft16(v) + case OpRotateLeft32: + return rewriteValueRISCV64_OpRotateLeft32(v) + case OpRotateLeft64: + return rewriteValueRISCV64_OpRotateLeft64(v) + case OpRotateLeft8: + return rewriteValueRISCV64_OpRotateLeft8(v) + case OpRound32F: + v.Op = OpRISCV64LoweredRound32F + return true + case OpRound64F: + v.Op = OpRISCV64LoweredRound64F + return true + case OpRsh16Ux16: + return rewriteValueRISCV64_OpRsh16Ux16(v) + case OpRsh16Ux32: + return rewriteValueRISCV64_OpRsh16Ux32(v) + case OpRsh16Ux64: + return rewriteValueRISCV64_OpRsh16Ux64(v) + case OpRsh16Ux8: + return rewriteValueRISCV64_OpRsh16Ux8(v) + case OpRsh16x16: + return rewriteValueRISCV64_OpRsh16x16(v) + case OpRsh16x32: + return rewriteValueRISCV64_OpRsh16x32(v) + case OpRsh16x64: + return rewriteValueRISCV64_OpRsh16x64(v) + case OpRsh16x8: + return rewriteValueRISCV64_OpRsh16x8(v) + case OpRsh32Ux16: + return rewriteValueRISCV64_OpRsh32Ux16(v) + case OpRsh32Ux32: + return rewriteValueRISCV64_OpRsh32Ux32(v) + case OpRsh32Ux64: + return rewriteValueRISCV64_OpRsh32Ux64(v) + case OpRsh32Ux8: + return rewriteValueRISCV64_OpRsh32Ux8(v) + case OpRsh32x16: + return rewriteValueRISCV64_OpRsh32x16(v) + case OpRsh32x32: + return rewriteValueRISCV64_OpRsh32x32(v) + case OpRsh32x64: + return rewriteValueRISCV64_OpRsh32x64(v) + case OpRsh32x8: + return rewriteValueRISCV64_OpRsh32x8(v) + case OpRsh64Ux16: + return rewriteValueRISCV64_OpRsh64Ux16(v) + case OpRsh64Ux32: + return rewriteValueRISCV64_OpRsh64Ux32(v) + case OpRsh64Ux64: + return rewriteValueRISCV64_OpRsh64Ux64(v) + case OpRsh64Ux8: + return rewriteValueRISCV64_OpRsh64Ux8(v) + case OpRsh64x16: + return rewriteValueRISCV64_OpRsh64x16(v) + case OpRsh64x32: + return rewriteValueRISCV64_OpRsh64x32(v) + case OpRsh64x64: + return rewriteValueRISCV64_OpRsh64x64(v) + case OpRsh64x8: + return rewriteValueRISCV64_OpRsh64x8(v) + case OpRsh8Ux16: + return rewriteValueRISCV64_OpRsh8Ux16(v) + case OpRsh8Ux32: + return rewriteValueRISCV64_OpRsh8Ux32(v) + case OpRsh8Ux64: + return rewriteValueRISCV64_OpRsh8Ux64(v) + case OpRsh8Ux8: + return rewriteValueRISCV64_OpRsh8Ux8(v) + case OpRsh8x16: + return rewriteValueRISCV64_OpRsh8x16(v) + case OpRsh8x32: + return rewriteValueRISCV64_OpRsh8x32(v) + case OpRsh8x64: + return rewriteValueRISCV64_OpRsh8x64(v) + case OpRsh8x8: + return rewriteValueRISCV64_OpRsh8x8(v) + case OpSelect0: + return rewriteValueRISCV64_OpSelect0(v) + case OpSelect1: + return rewriteValueRISCV64_OpSelect1(v) + case OpSignExt16to32: + v.Op = OpRISCV64MOVHreg + return true + case OpSignExt16to64: + v.Op = OpRISCV64MOVHreg + return true + case OpSignExt32to64: + v.Op = OpRISCV64MOVWreg + return true + case OpSignExt8to16: + v.Op = OpRISCV64MOVBreg + return true + case OpSignExt8to32: + v.Op = OpRISCV64MOVBreg + return true + case OpSignExt8to64: + v.Op = OpRISCV64MOVBreg + return true + case OpSlicemask: + return rewriteValueRISCV64_OpSlicemask(v) + case OpSqrt: + v.Op = OpRISCV64FSQRTD + return true + case OpSqrt32: + v.Op = OpRISCV64FSQRTS + return true + case OpStaticCall: + v.Op = OpRISCV64CALLstatic + return true + case OpStore: + return rewriteValueRISCV64_OpStore(v) + case OpSub16: + v.Op = OpRISCV64SUB + return true + case OpSub32: + v.Op = OpRISCV64SUB + return true + case OpSub32F: + v.Op = OpRISCV64FSUBS + return true + case OpSub64: + v.Op = OpRISCV64SUB + return true + case OpSub64F: + v.Op = OpRISCV64FSUBD + return true + case OpSub8: + v.Op = OpRISCV64SUB + return true + case OpSubPtr: + v.Op = OpRISCV64SUB + return true + case OpTailCall: + v.Op = OpRISCV64CALLtail + return true + case OpTrunc16to8: + v.Op = OpCopy + return true + case OpTrunc32to16: + v.Op = OpCopy + return true + case OpTrunc32to8: + v.Op = OpCopy + return true + case OpTrunc64to16: + v.Op = OpCopy + return true + case OpTrunc64to32: + v.Op = OpCopy + return true + case OpTrunc64to8: + v.Op = OpCopy + return true + case OpWB: + v.Op = OpRISCV64LoweredWB + return true + case OpXor16: + v.Op = OpRISCV64XOR + return true + case OpXor32: + v.Op = OpRISCV64XOR + return true + case OpXor64: + v.Op = OpRISCV64XOR + return true + case OpXor8: + v.Op = OpRISCV64XOR + return true + case OpZero: + return rewriteValueRISCV64_OpZero(v) + case OpZeroExt16to32: + v.Op = OpRISCV64MOVHUreg + return true + case OpZeroExt16to64: + v.Op = OpRISCV64MOVHUreg + return true + case OpZeroExt32to64: + v.Op = OpRISCV64MOVWUreg + return true + case OpZeroExt8to16: + v.Op = OpRISCV64MOVBUreg + return true + case OpZeroExt8to32: + v.Op = OpRISCV64MOVBUreg + return true + case OpZeroExt8to64: + v.Op = OpRISCV64MOVBUreg + return true + } + return false +} +func rewriteValueRISCV64_OpAddr(v *Value) bool { + v_0 := v.Args[0] + // match: (Addr {sym} base) + // result: (MOVaddr {sym} [0] base) + for { + sym := auxToSym(v.Aux) + base := v_0 + v.reset(OpRISCV64MOVaddr) + v.AuxInt = int32ToAuxInt(0) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } +} +func rewriteValueRISCV64_OpAtomicAnd8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicAnd8 ptr val mem) + // result: (LoweredAtomicAnd32 (ANDI [^3] ptr) (NOT (SLL (XORI [0xff] (ZeroExt8to32 val)) (SLLI [3] (ANDI [3] ptr)))) mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpRISCV64LoweredAtomicAnd32) + v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Uintptr) + v0.AuxInt = int64ToAuxInt(^3) + v0.AddArg(ptr) + v1 := b.NewValue0(v.Pos, OpRISCV64NOT, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpRISCV64SLL, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpRISCV64XORI, typ.UInt32) + v3.AuxInt = int64ToAuxInt(0xff) + v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v4.AddArg(val) + v3.AddArg(v4) + v5 := b.NewValue0(v.Pos, OpRISCV64SLLI, typ.UInt64) + v5.AuxInt = int64ToAuxInt(3) + v6 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.UInt64) + v6.AuxInt = int64ToAuxInt(3) + v6.AddArg(ptr) + v5.AddArg(v6) + v2.AddArg2(v3, v5) + v1.AddArg(v2) + v.AddArg3(v0, v1, mem) + return true + } +} +func rewriteValueRISCV64_OpAtomicCompareAndSwap32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicCompareAndSwap32 ptr old new mem) + // result: (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem) + for { + ptr := v_0 + old := v_1 + new := v_2 + mem := v_3 + v.reset(OpRISCV64LoweredAtomicCas32) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(old) + v.AddArg4(ptr, v0, new, mem) + return true + } +} +func rewriteValueRISCV64_OpAtomicOr8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicOr8 ptr val mem) + // result: (LoweredAtomicOr32 (ANDI [^3] ptr) (SLL (ZeroExt8to32 val) (SLLI [3] (ANDI [3] ptr))) mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpRISCV64LoweredAtomicOr32) + v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Uintptr) + v0.AuxInt = int64ToAuxInt(^3) + v0.AddArg(ptr) + v1 := b.NewValue0(v.Pos, OpRISCV64SLL, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(val) + v3 := b.NewValue0(v.Pos, OpRISCV64SLLI, typ.UInt64) + v3.AuxInt = int64ToAuxInt(3) + v4 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.UInt64) + v4.AuxInt = int64ToAuxInt(3) + v4.AddArg(ptr) + v3.AddArg(v4) + v1.AddArg2(v2, v3) + v.AddArg3(v0, v1, mem) + return true + } +} +func rewriteValueRISCV64_OpAvg64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Avg64u x y) + // result: (ADD (ADD (SRLI [1] x) (SRLI [1] y)) (ANDI [1] (AND x y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpRISCV64ADD) + v0 := b.NewValue0(v.Pos, OpRISCV64ADD, t) + v1 := b.NewValue0(v.Pos, OpRISCV64SRLI, t) + v1.AuxInt = int64ToAuxInt(1) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpRISCV64SRLI, t) + v2.AuxInt = int64ToAuxInt(1) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpRISCV64ANDI, t) + v3.AuxInt = int64ToAuxInt(1) + v4 := b.NewValue0(v.Pos, OpRISCV64AND, t) + v4.AddArg2(x, y) + v3.AddArg(v4) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueRISCV64_OpConst16(v *Value) bool { + // match: (Const16 [val]) + // result: (MOVDconst [int64(val)]) + for { + val := auxIntToInt16(v.AuxInt) + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueRISCV64_OpConst32(v *Value) bool { + // match: (Const32 [val]) + // result: (MOVDconst [int64(val)]) + for { + val := auxIntToInt32(v.AuxInt) + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueRISCV64_OpConst32F(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Const32F [val]) + // result: (FMVSX (MOVDconst [int64(math.Float32bits(val))])) + for { + val := auxIntToFloat32(v.AuxInt) + v.reset(OpRISCV64FMVSX) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(int64(math.Float32bits(val))) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpConst64(v *Value) bool { + // match: (Const64 [val]) + // result: (MOVDconst [int64(val)]) + for { + val := auxIntToInt64(v.AuxInt) + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueRISCV64_OpConst64F(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Const64F [val]) + // result: (FMVDX (MOVDconst [int64(math.Float64bits(val))])) + for { + val := auxIntToFloat64(v.AuxInt) + v.reset(OpRISCV64FMVDX) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(int64(math.Float64bits(val))) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpConst8(v *Value) bool { + // match: (Const8 [val]) + // result: (MOVDconst [int64(val)]) + for { + val := auxIntToInt8(v.AuxInt) + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueRISCV64_OpConstBool(v *Value) bool { + // match: (ConstBool [val]) + // result: (MOVDconst [int64(b2i(val))]) + for { + val := auxIntToBool(v.AuxInt) + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(b2i(val))) + return true + } +} +func rewriteValueRISCV64_OpConstNil(v *Value) bool { + // match: (ConstNil) + // result: (MOVDconst [0]) + for { + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } +} +func rewriteValueRISCV64_OpDiv16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16 x y [false]) + // result: (DIVW (SignExt16to32 x) (SignExt16to32 y)) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + y := v_1 + v.reset(OpRISCV64DIVW) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } + return false +} +func rewriteValueRISCV64_OpDiv16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16u x y) + // result: (DIVUW (ZeroExt16to32 x) (ZeroExt16to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpRISCV64DIVUW) + v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueRISCV64_OpDiv32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Div32 x y [false]) + // result: (DIVW x y) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + y := v_1 + v.reset(OpRISCV64DIVW) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpDiv64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Div64 x y [false]) + // result: (DIV x y) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + y := v_1 + v.reset(OpRISCV64DIV) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpDiv8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8 x y) + // result: (DIVW (SignExt8to32 x) (SignExt8to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpRISCV64DIVW) + v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueRISCV64_OpDiv8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8u x y) + // result: (DIVUW (ZeroExt8to32 x) (ZeroExt8to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpRISCV64DIVUW) + v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueRISCV64_OpEq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq16 x y) + // result: (SEQZ (SUB (ZeroExt16to64 x) (ZeroExt16to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpRISCV64SEQZ) + v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpEq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq32 x y) + // cond: x.Type.IsSigned() + // result: (SEQZ (SUB (SignExt32to64 x) (SignExt32to64 y))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + y := v_1 + if !(x.Type.IsSigned()) { + continue + } + v.reset(OpRISCV64SEQZ) + v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) + v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } + break + } + // match: (Eq32 x y) + // cond: !x.Type.IsSigned() + // result: (SEQZ (SUB (ZeroExt32to64 x) (ZeroExt32to64 y))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + y := v_1 + if !(!x.Type.IsSigned()) { + continue + } + v.reset(OpRISCV64SEQZ) + v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } + break + } + return false +} +func rewriteValueRISCV64_OpEq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq64 x y) + // result: (SEQZ (SUB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpRISCV64SEQZ) + v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpEq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq8 x y) + // result: (SEQZ (SUB (ZeroExt8to64 x) (ZeroExt8to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpRISCV64SEQZ) + v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpEqB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqB x y) + // result: (SEQZ (SUB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpRISCV64SEQZ) + v0 := b.NewValue0(v.Pos, OpRISCV64SUB, typ.Bool) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpEqPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqPtr x y) + // result: (SEQZ (SUB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpRISCV64SEQZ) + v0 := b.NewValue0(v.Pos, OpRISCV64SUB, typ.Uintptr) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpHmul32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Hmul32 x y) + // result: (SRAI [32] (MUL (SignExt32to64 x) (SignExt32to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpRISCV64SRAI) + v.AuxInt = int64ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpRISCV64MUL, typ.Int64) + v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpHmul32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Hmul32u x y) + // result: (SRLI [32] (MUL (ZeroExt32to64 x) (ZeroExt32to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpRISCV64SRLI) + v.AuxInt = int64ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpRISCV64MUL, typ.Int64) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpLeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq16 x y) + // result: (Not (Less16 y x)) + for { + x := v_0 + y := v_1 + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess16, typ.Bool) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpLeq16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq16U x y) + // result: (Not (Less16U y x)) + for { + x := v_0 + y := v_1 + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess16U, typ.Bool) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpLeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq32 x y) + // result: (Not (Less32 y x)) + for { + x := v_0 + y := v_1 + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess32, typ.Bool) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpLeq32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq32U x y) + // result: (Not (Less32U y x)) + for { + x := v_0 + y := v_1 + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess32U, typ.Bool) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpLeq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq64 x y) + // result: (Not (Less64 y x)) + for { + x := v_0 + y := v_1 + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess64, typ.Bool) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpLeq64U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq64U x y) + // result: (Not (Less64U y x)) + for { + x := v_0 + y := v_1 + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess64U, typ.Bool) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpLeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq8 x y) + // result: (Not (Less8 y x)) + for { + x := v_0 + y := v_1 + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess8, typ.Bool) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpLeq8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq8U x y) + // result: (Not (Less8U y x)) + for { + x := v_0 + y := v_1 + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess8U, typ.Bool) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpLess16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less16 x y) + // result: (SLT (SignExt16to64 x) (SignExt16to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpRISCV64SLT) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueRISCV64_OpLess16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less16U x y) + // result: (SLTU (ZeroExt16to64 x) (ZeroExt16to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpRISCV64SLTU) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueRISCV64_OpLess32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less32 x y) + // result: (SLT (SignExt32to64 x) (SignExt32to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpRISCV64SLT) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueRISCV64_OpLess32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less32U x y) + // result: (SLTU (ZeroExt32to64 x) (ZeroExt32to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpRISCV64SLTU) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueRISCV64_OpLess8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less8 x y) + // result: (SLT (SignExt8to64 x) (SignExt8to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpRISCV64SLT) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueRISCV64_OpLess8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less8U x y) + // result: (SLTU (ZeroExt8to64 x) (ZeroExt8to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpRISCV64SLTU) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueRISCV64_OpLoad(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Load ptr mem) + // cond: t.IsBoolean() + // result: (MOVBUload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsBoolean()) { + break + } + v.reset(OpRISCV64MOVBUload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: ( is8BitInt(t) && t.IsSigned()) + // result: (MOVBload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is8BitInt(t) && t.IsSigned()) { + break + } + v.reset(OpRISCV64MOVBload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: ( is8BitInt(t) && !t.IsSigned()) + // result: (MOVBUload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is8BitInt(t) && !t.IsSigned()) { + break + } + v.reset(OpRISCV64MOVBUload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is16BitInt(t) && t.IsSigned()) + // result: (MOVHload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is16BitInt(t) && t.IsSigned()) { + break + } + v.reset(OpRISCV64MOVHload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is16BitInt(t) && !t.IsSigned()) + // result: (MOVHUload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is16BitInt(t) && !t.IsSigned()) { + break + } + v.reset(OpRISCV64MOVHUload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is32BitInt(t) && t.IsSigned()) + // result: (MOVWload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitInt(t) && t.IsSigned()) { + break + } + v.reset(OpRISCV64MOVWload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is32BitInt(t) && !t.IsSigned()) + // result: (MOVWUload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitInt(t) && !t.IsSigned()) { + break + } + v.reset(OpRISCV64MOVWUload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (MOVDload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpRISCV64MOVDload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitFloat(t) + // result: (FMOVWload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitFloat(t)) { + break + } + v.reset(OpRISCV64FMOVWload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is64BitFloat(t) + // result: (FMOVDload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitFloat(t)) { + break + } + v.reset(OpRISCV64FMOVDload) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpLocalAddr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LocalAddr {sym} base mem) + // cond: t.Elem().HasPointers() + // result: (MOVaddr {sym} (SPanchored base mem)) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + mem := v_1 + if !(t.Elem().HasPointers()) { + break + } + v.reset(OpRISCV64MOVaddr) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) + v0.AddArg2(base, mem) + v.AddArg(v0) + return true + } + // match: (LocalAddr {sym} base _) + // cond: !t.Elem().HasPointers() + // result: (MOVaddr {sym} base) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + if !(!t.Elem().HasPointers()) { + break + } + v.reset(OpRISCV64MOVaddr) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } + return false +} +func rewriteValueRISCV64_OpLsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x16 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SLL x y) (Neg16 (SLTIU [64] (ZeroExt16to64 y)))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpNeg16, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpLsh16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x32 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SLL x y) (Neg16 (SLTIU [64] (ZeroExt32to64 y)))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpNeg16, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpLsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh16x64 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SLL x y) (Neg16 (SLTIU [64] y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpNeg16, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = int64ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh16x64 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpLsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x8 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SLL x y) (Neg16 (SLTIU [64] (ZeroExt8to64 y)))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpNeg16, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpLsh32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x16 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SLL x y) (Neg32 (SLTIU [64] (ZeroExt16to64 y)))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpNeg32, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpLsh32x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x32 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SLL x y) (Neg32 (SLTIU [64] (ZeroExt32to64 y)))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpNeg32, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpLsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh32x64 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SLL x y) (Neg32 (SLTIU [64] y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpNeg32, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = int64ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh32x64 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpLsh32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x8 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SLL x y) (Neg32 (SLTIU [64] (ZeroExt8to64 y)))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpNeg32, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpLsh64x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x16 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SLL x y) (Neg64 (SLTIU [64] (ZeroExt16to64 y)))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpNeg64, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh64x16 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpLsh64x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x32 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SLL x y) (Neg64 (SLTIU [64] (ZeroExt32to64 y)))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpNeg64, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh64x32 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpLsh64x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh64x64 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SLL x y) (Neg64 (SLTIU [64] y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpNeg64, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = int64ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh64x64 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpLsh64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x8 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SLL x y) (Neg64 (SLTIU [64] (ZeroExt8to64 y)))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpNeg64, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh64x8 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpLsh8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x16 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SLL x y) (Neg8 (SLTIU [64] (ZeroExt16to64 y)))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpNeg8, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpLsh8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x32 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SLL x y) (Neg8 (SLTIU [64] (ZeroExt32to64 y)))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpNeg8, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpLsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh8x64 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SLL x y) (Neg8 (SLTIU [64] y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpNeg8, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = int64ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh8x64 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpLsh8x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x8 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SLL x y) (Neg8 (SLTIU [64] (ZeroExt8to64 y)))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpNeg8, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Lsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SLL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SLL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpMod16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod16 x y [false]) + // result: (REMW (SignExt16to32 x) (SignExt16to32 y)) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + y := v_1 + v.reset(OpRISCV64REMW) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } + return false +} +func rewriteValueRISCV64_OpMod16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod16u x y) + // result: (REMUW (ZeroExt16to32 x) (ZeroExt16to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpRISCV64REMUW) + v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueRISCV64_OpMod32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Mod32 x y [false]) + // result: (REMW x y) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + y := v_1 + v.reset(OpRISCV64REMW) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpMod64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Mod64 x y [false]) + // result: (REM x y) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + y := v_1 + v.reset(OpRISCV64REM) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpMod8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8 x y) + // result: (REMW (SignExt8to32 x) (SignExt8to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpRISCV64REMW) + v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueRISCV64_OpMod8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8u x y) + // result: (REMUW (ZeroExt8to32 x) (ZeroExt8to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpRISCV64REMUW) + v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueRISCV64_OpMove(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Move [0] _ _ mem) + // result: mem + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_2 + v.copyOf(mem) + return true + } + // match: (Move [1] dst src mem) + // result: (MOVBstore dst (MOVBload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpRISCV64MOVBstore) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [2] {t} dst src mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore dst (MOVHload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpRISCV64MOVHstore) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [2] dst src mem) + // result: (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpRISCV64MOVBstore) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8) + v0.AuxInt = int32ToAuxInt(1) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [4] {t} dst src mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore dst (MOVWload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpRISCV64MOVWstore) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [4] {t} dst src mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpRISCV64MOVHstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16) + v0.AuxInt = int32ToAuxInt(2) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [4] dst src mem) + // result: (MOVBstore [3] dst (MOVBload [3] src mem) (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem)))) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpRISCV64MOVBstore) + v.AuxInt = int32ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8) + v0.AuxInt = int32ToAuxInt(3) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(2) + v2 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8) + v2.AuxInt = int32ToAuxInt(2) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem) + v3.AuxInt = int32ToAuxInt(1) + v4 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8) + v4.AuxInt = int32ToAuxInt(1) + v4.AddArg2(src, mem) + v5 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem) + v6 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8) + v6.AddArg2(src, mem) + v5.AddArg3(dst, v6, mem) + v3.AddArg3(dst, v4, v5) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [8] {t} dst src mem) + // cond: t.Alignment()%8 == 0 + // result: (MOVDstore dst (MOVDload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%8 == 0) { + break + } + v.reset(OpRISCV64MOVDstore) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [8] {t} dst src mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpRISCV64MOVWstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [8] {t} dst src mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpRISCV64MOVHstore) + v.AuxInt = int32ToAuxInt(6) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16) + v0.AuxInt = int32ToAuxInt(6) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(4) + v2 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16) + v2.AuxInt = int32ToAuxInt(4) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem) + v3.AuxInt = int32ToAuxInt(2) + v4 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16) + v4.AuxInt = int32ToAuxInt(2) + v4.AddArg2(src, mem) + v5 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem) + v6 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16) + v6.AddArg2(src, mem) + v5.AddArg3(dst, v6, mem) + v3.AddArg3(dst, v4, v5) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [3] dst src mem) + // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem))) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpRISCV64MOVBstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8) + v0.AuxInt = int32ToAuxInt(2) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8) + v2.AuxInt = int32ToAuxInt(1) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem) + v4 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [6] {t} dst src mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))) + for { + if auxIntToInt64(v.AuxInt) != 6 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpRISCV64MOVHstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(2) + v2 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16) + v2.AuxInt = int32ToAuxInt(2) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem) + v4 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [12] {t} dst src mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))) + for { + if auxIntToInt64(v.AuxInt) != 12 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpRISCV64MOVWstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(4) + v2 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32) + v2.AuxInt = int32ToAuxInt(4) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem) + v4 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [16] {t} dst src mem) + // cond: t.Alignment()%8 == 0 + // result: (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 16 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%8 == 0) { + break + } + v.reset(OpRISCV64MOVDstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [24] {t} dst src mem) + // cond: t.Alignment()%8 == 0 + // result: (MOVDstore [16] dst (MOVDload [16] src mem) (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))) + for { + if auxIntToInt64(v.AuxInt) != 24 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%8 == 0) { + break + } + v.reset(OpRISCV64MOVDstore) + v.AuxInt = int32ToAuxInt(16) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64) + v0.AuxInt = int32ToAuxInt(16) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(8) + v2 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64) + v2.AuxInt = int32ToAuxInt(8) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem) + v4 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [32] {t} dst src mem) + // cond: t.Alignment()%8 == 0 + // result: (MOVDstore [24] dst (MOVDload [24] src mem) (MOVDstore [16] dst (MOVDload [16] src mem) (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)))) + for { + if auxIntToInt64(v.AuxInt) != 32 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%8 == 0) { + break + } + v.reset(OpRISCV64MOVDstore) + v.AuxInt = int32ToAuxInt(24) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64) + v0.AuxInt = int32ToAuxInt(24) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(16) + v2 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64) + v2.AuxInt = int32ToAuxInt(16) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem) + v3.AuxInt = int32ToAuxInt(8) + v4 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64) + v4.AuxInt = int32ToAuxInt(8) + v4.AddArg2(src, mem) + v5 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem) + v6 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64) + v6.AddArg2(src, mem) + v5.AddArg3(dst, v6, mem) + v3.AddArg3(dst, v4, v5) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [s] {t} dst src mem) + // cond: s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s) + // result: (DUFFCOPY [16 * (128 - s/8)] dst src mem) + for { + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) { + break + } + v.reset(OpRISCV64DUFFCOPY) + v.AuxInt = int64ToAuxInt(16 * (128 - s/8)) + v.AddArg3(dst, src, mem) + return true + } + // match: (Move [s] {t} dst src mem) + // cond: (s <= 16 || logLargeCopy(v, s)) + // result: (LoweredMove [t.Alignment()] dst src (ADDI [s-moveSize(t.Alignment(), config)] src) mem) + for { + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(s <= 16 || logLargeCopy(v, s)) { + break + } + v.reset(OpRISCV64LoweredMove) + v.AuxInt = int64ToAuxInt(t.Alignment()) + v0 := b.NewValue0(v.Pos, OpRISCV64ADDI, src.Type) + v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config)) + v0.AddArg(src) + v.AddArg4(dst, src, v0, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpMul16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mul16 x y) + // result: (MULW (SignExt16to32 x) (SignExt16to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpRISCV64MULW) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueRISCV64_OpMul8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mul8 x y) + // result: (MULW (SignExt8to32 x) (SignExt8to32 y)) + for { + x := v_0 + y := v_1 + v.reset(OpRISCV64MULW) + v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueRISCV64_OpNeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq16 x y) + // result: (Not (Eq16 x y)) + for { + x := v_0 + y := v_1 + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpEq16, typ.Bool) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpNeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq32 x y) + // result: (Not (Eq32 x y)) + for { + x := v_0 + y := v_1 + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpEq32, typ.Bool) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpNeq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq64 x y) + // result: (Not (Eq64 x y)) + for { + x := v_0 + y := v_1 + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpEq64, typ.Bool) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpNeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq8 x y) + // result: (Not (Eq8 x y)) + for { + x := v_0 + y := v_1 + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpEq8, typ.Bool) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpNeqB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NeqB x y) + // result: (SNEZ (SUB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpRISCV64SNEZ) + v0 := b.NewValue0(v.Pos, OpRISCV64SUB, typ.Bool) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpNeqPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NeqPtr x y) + // result: (Not (EqPtr x y)) + for { + x := v_0 + y := v_1 + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpEqPtr, typ.Bool) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpOffPtr(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (OffPtr [off] ptr:(SP)) + // cond: is32Bit(off) + // result: (MOVaddr [int32(off)] ptr) + for { + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + if ptr.Op != OpSP || !(is32Bit(off)) { + break + } + v.reset(OpRISCV64MOVaddr) + v.AuxInt = int32ToAuxInt(int32(off)) + v.AddArg(ptr) + return true + } + // match: (OffPtr [off] ptr) + // cond: is32Bit(off) + // result: (ADDI [off] ptr) + for { + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + if !(is32Bit(off)) { + break + } + v.reset(OpRISCV64ADDI) + v.AuxInt = int64ToAuxInt(off) + v.AddArg(ptr) + return true + } + // match: (OffPtr [off] ptr) + // result: (ADD (MOVDconst [off]) ptr) + for { + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + v.reset(OpRISCV64ADD) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(off) + v.AddArg2(v0, ptr) + return true + } +} +func rewriteValueRISCV64_OpPanicBounds(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 0 + // result: (LoweredPanicBoundsA [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 0) { + break + } + v.reset(OpRISCV64LoweredPanicBoundsA) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 1 + // result: (LoweredPanicBoundsB [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 1) { + break + } + v.reset(OpRISCV64LoweredPanicBoundsB) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 2 + // result: (LoweredPanicBoundsC [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 2) { + break + } + v.reset(OpRISCV64LoweredPanicBoundsC) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64ADD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADD (MOVDconst [val]) x) + // cond: is32Bit(val) && !t.IsPtr() + // result: (ADDI [val] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpRISCV64MOVDconst { + continue + } + t := v_0.Type + val := auxIntToInt64(v_0.AuxInt) + x := v_1 + if !(is32Bit(val) && !t.IsPtr()) { + continue + } + v.reset(OpRISCV64ADDI) + v.AuxInt = int64ToAuxInt(val) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValueRISCV64_OpRISCV64ADDI(v *Value) bool { + v_0 := v.Args[0] + // match: (ADDI [c] (MOVaddr [d] {s} x)) + // cond: is32Bit(c+int64(d)) + // result: (MOVaddr [int32(c)+d] {s} x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVaddr { + break + } + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) + x := v_0.Args[0] + if !(is32Bit(c + int64(d))) { + break + } + v.reset(OpRISCV64MOVaddr) + v.AuxInt = int32ToAuxInt(int32(c) + d) + v.Aux = symToAux(s) + v.AddArg(x) + return true + } + // match: (ADDI [0] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (ADDI [x] (MOVDconst [y])) + // cond: is32Bit(x + y) + // result: (MOVDconst [x + y]) + for { + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVDconst { + break + } + y := auxIntToInt64(v_0.AuxInt) + if !(is32Bit(x + y)) { + break + } + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(x + y) + return true + } + // match: (ADDI [x] (ADDI [y] z)) + // cond: is32Bit(x + y) + // result: (ADDI [x + y] z) + for { + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64ADDI { + break + } + y := auxIntToInt64(v_0.AuxInt) + z := v_0.Args[0] + if !(is32Bit(x + y)) { + break + } + v.reset(OpRISCV64ADDI) + v.AuxInt = int64ToAuxInt(x + y) + v.AddArg(z) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64AND(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AND (MOVDconst [val]) x) + // cond: is32Bit(val) + // result: (ANDI [val] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpRISCV64MOVDconst { + continue + } + val := auxIntToInt64(v_0.AuxInt) + x := v_1 + if !(is32Bit(val)) { + continue + } + v.reset(OpRISCV64ANDI) + v.AuxInt = int64ToAuxInt(val) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValueRISCV64_OpRISCV64ANDI(v *Value) bool { + v_0 := v.Args[0] + // match: (ANDI [0] x) + // result: (MOVDconst [0]) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (ANDI [-1] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != -1 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (ANDI [x] (MOVDconst [y])) + // result: (MOVDconst [x & y]) + for { + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVDconst { + break + } + y := auxIntToInt64(v_0.AuxInt) + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(x & y) + return true + } + // match: (ANDI [x] (ANDI [y] z)) + // result: (ANDI [x & y] z) + for { + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64ANDI { + break + } + y := auxIntToInt64(v_0.AuxInt) + z := v_0.Args[0] + v.reset(OpRISCV64ANDI) + v.AuxInt = int64ToAuxInt(x & y) + v.AddArg(z) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64FADDD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FADDD a (FMULD x y)) + // cond: a.Block.Func.useFMA(v) + // result: (FMADDD x y a) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 + if v_1.Op != OpRISCV64FMULD { + continue + } + y := v_1.Args[1] + x := v_1.Args[0] + if !(a.Block.Func.useFMA(v)) { + continue + } + v.reset(OpRISCV64FMADDD) + v.AddArg3(x, y, a) + return true + } + break + } + return false +} +func rewriteValueRISCV64_OpRISCV64FADDS(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FADDS a (FMULS x y)) + // cond: a.Block.Func.useFMA(v) + // result: (FMADDS x y a) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + a := v_0 + if v_1.Op != OpRISCV64FMULS { + continue + } + y := v_1.Args[1] + x := v_1.Args[0] + if !(a.Block.Func.useFMA(v)) { + continue + } + v.reset(OpRISCV64FMADDS) + v.AddArg3(x, y, a) + return true + } + break + } + return false +} +func rewriteValueRISCV64_OpRISCV64FMADDD(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMADDD neg:(FNEGD x) y z) + // cond: neg.Uses == 1 + // result: (FNMSUBD x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + neg := v_0 + if neg.Op != OpRISCV64FNEGD { + continue + } + x := neg.Args[0] + y := v_1 + z := v_2 + if !(neg.Uses == 1) { + continue + } + v.reset(OpRISCV64FNMSUBD) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (FMADDD x y neg:(FNEGD z)) + // cond: neg.Uses == 1 + // result: (FMSUBD x y z) + for { + x := v_0 + y := v_1 + neg := v_2 + if neg.Op != OpRISCV64FNEGD { + break + } + z := neg.Args[0] + if !(neg.Uses == 1) { + break + } + v.reset(OpRISCV64FMSUBD) + v.AddArg3(x, y, z) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64FMADDS(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMADDS neg:(FNEGS x) y z) + // cond: neg.Uses == 1 + // result: (FNMSUBS x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + neg := v_0 + if neg.Op != OpRISCV64FNEGS { + continue + } + x := neg.Args[0] + y := v_1 + z := v_2 + if !(neg.Uses == 1) { + continue + } + v.reset(OpRISCV64FNMSUBS) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (FMADDS x y neg:(FNEGS z)) + // cond: neg.Uses == 1 + // result: (FMSUBS x y z) + for { + x := v_0 + y := v_1 + neg := v_2 + if neg.Op != OpRISCV64FNEGS { + break + } + z := neg.Args[0] + if !(neg.Uses == 1) { + break + } + v.reset(OpRISCV64FMSUBS) + v.AddArg3(x, y, z) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64FMSUBD(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMSUBD neg:(FNEGD x) y z) + // cond: neg.Uses == 1 + // result: (FNMADDD x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + neg := v_0 + if neg.Op != OpRISCV64FNEGD { + continue + } + x := neg.Args[0] + y := v_1 + z := v_2 + if !(neg.Uses == 1) { + continue + } + v.reset(OpRISCV64FNMADDD) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (FMSUBD x y neg:(FNEGD z)) + // cond: neg.Uses == 1 + // result: (FMADDD x y z) + for { + x := v_0 + y := v_1 + neg := v_2 + if neg.Op != OpRISCV64FNEGD { + break + } + z := neg.Args[0] + if !(neg.Uses == 1) { + break + } + v.reset(OpRISCV64FMADDD) + v.AddArg3(x, y, z) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64FMSUBS(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMSUBS neg:(FNEGS x) y z) + // cond: neg.Uses == 1 + // result: (FNMADDS x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + neg := v_0 + if neg.Op != OpRISCV64FNEGS { + continue + } + x := neg.Args[0] + y := v_1 + z := v_2 + if !(neg.Uses == 1) { + continue + } + v.reset(OpRISCV64FNMADDS) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (FMSUBS x y neg:(FNEGS z)) + // cond: neg.Uses == 1 + // result: (FMADDS x y z) + for { + x := v_0 + y := v_1 + neg := v_2 + if neg.Op != OpRISCV64FNEGS { + break + } + z := neg.Args[0] + if !(neg.Uses == 1) { + break + } + v.reset(OpRISCV64FMADDS) + v.AddArg3(x, y, z) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64FNMADDD(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FNMADDD neg:(FNEGD x) y z) + // cond: neg.Uses == 1 + // result: (FMSUBD x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + neg := v_0 + if neg.Op != OpRISCV64FNEGD { + continue + } + x := neg.Args[0] + y := v_1 + z := v_2 + if !(neg.Uses == 1) { + continue + } + v.reset(OpRISCV64FMSUBD) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (FNMADDD x y neg:(FNEGD z)) + // cond: neg.Uses == 1 + // result: (FNMSUBD x y z) + for { + x := v_0 + y := v_1 + neg := v_2 + if neg.Op != OpRISCV64FNEGD { + break + } + z := neg.Args[0] + if !(neg.Uses == 1) { + break + } + v.reset(OpRISCV64FNMSUBD) + v.AddArg3(x, y, z) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64FNMADDS(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FNMADDS neg:(FNEGS x) y z) + // cond: neg.Uses == 1 + // result: (FMSUBS x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + neg := v_0 + if neg.Op != OpRISCV64FNEGS { + continue + } + x := neg.Args[0] + y := v_1 + z := v_2 + if !(neg.Uses == 1) { + continue + } + v.reset(OpRISCV64FMSUBS) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (FNMADDS x y neg:(FNEGS z)) + // cond: neg.Uses == 1 + // result: (FNMSUBS x y z) + for { + x := v_0 + y := v_1 + neg := v_2 + if neg.Op != OpRISCV64FNEGS { + break + } + z := neg.Args[0] + if !(neg.Uses == 1) { + break + } + v.reset(OpRISCV64FNMSUBS) + v.AddArg3(x, y, z) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64FNMSUBD(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FNMSUBD neg:(FNEGD x) y z) + // cond: neg.Uses == 1 + // result: (FMADDD x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + neg := v_0 + if neg.Op != OpRISCV64FNEGD { + continue + } + x := neg.Args[0] + y := v_1 + z := v_2 + if !(neg.Uses == 1) { + continue + } + v.reset(OpRISCV64FMADDD) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (FNMSUBD x y neg:(FNEGD z)) + // cond: neg.Uses == 1 + // result: (FNMADDD x y z) + for { + x := v_0 + y := v_1 + neg := v_2 + if neg.Op != OpRISCV64FNEGD { + break + } + z := neg.Args[0] + if !(neg.Uses == 1) { + break + } + v.reset(OpRISCV64FNMADDD) + v.AddArg3(x, y, z) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64FNMSUBS(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FNMSUBS neg:(FNEGS x) y z) + // cond: neg.Uses == 1 + // result: (FMADDS x y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + neg := v_0 + if neg.Op != OpRISCV64FNEGS { + continue + } + x := neg.Args[0] + y := v_1 + z := v_2 + if !(neg.Uses == 1) { + continue + } + v.reset(OpRISCV64FMADDS) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (FNMSUBS x y neg:(FNEGS z)) + // cond: neg.Uses == 1 + // result: (FNMADDS x y z) + for { + x := v_0 + y := v_1 + neg := v_2 + if neg.Op != OpRISCV64FNEGS { + break + } + z := neg.Args[0] + if !(neg.Uses == 1) { + break + } + v.reset(OpRISCV64FNMADDS) + v.AddArg3(x, y, z) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64FSUBD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FSUBD a (FMULD x y)) + // cond: a.Block.Func.useFMA(v) + // result: (FNMSUBD x y a) + for { + a := v_0 + if v_1.Op != OpRISCV64FMULD { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + if !(a.Block.Func.useFMA(v)) { + break + } + v.reset(OpRISCV64FNMSUBD) + v.AddArg3(x, y, a) + return true + } + // match: (FSUBD (FMULD x y) a) + // cond: a.Block.Func.useFMA(v) + // result: (FMSUBD x y a) + for { + if v_0.Op != OpRISCV64FMULD { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + a := v_1 + if !(a.Block.Func.useFMA(v)) { + break + } + v.reset(OpRISCV64FMSUBD) + v.AddArg3(x, y, a) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64FSUBS(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FSUBS a (FMULS x y)) + // cond: a.Block.Func.useFMA(v) + // result: (FNMSUBS x y a) + for { + a := v_0 + if v_1.Op != OpRISCV64FMULS { + break + } + y := v_1.Args[1] + x := v_1.Args[0] + if !(a.Block.Func.useFMA(v)) { + break + } + v.reset(OpRISCV64FNMSUBS) + v.AddArg3(x, y, a) + return true + } + // match: (FSUBS (FMULS x y) a) + // cond: a.Block.Func.useFMA(v) + // result: (FMSUBS x y a) + for { + if v_0.Op != OpRISCV64FMULS { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + a := v_1 + if !(a.Block.Func.useFMA(v)) { + break + } + v.reset(OpRISCV64FMSUBS) + v.AddArg3(x, y, a) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVBUload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpRISCV64MOVBUload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + // match: (MOVBUload [off1] {sym} (ADDI [off2] base) mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVBUload [off1+int32(off2)] {sym} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpRISCV64MOVBUload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVBUreg(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MOVBUreg x:(FLES _ _)) + // result: x + for { + x := v_0 + if x.Op != OpRISCV64FLES { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(FLTS _ _)) + // result: x + for { + x := v_0 + if x.Op != OpRISCV64FLTS { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(FEQS _ _)) + // result: x + for { + x := v_0 + if x.Op != OpRISCV64FEQS { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(FNES _ _)) + // result: x + for { + x := v_0 + if x.Op != OpRISCV64FNES { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(FLED _ _)) + // result: x + for { + x := v_0 + if x.Op != OpRISCV64FLED { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(FLTD _ _)) + // result: x + for { + x := v_0 + if x.Op != OpRISCV64FLTD { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(FEQD _ _)) + // result: x + for { + x := v_0 + if x.Op != OpRISCV64FEQD { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(FNED _ _)) + // result: x + for { + x := v_0 + if x.Op != OpRISCV64FNED { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(SEQZ _)) + // result: x + for { + x := v_0 + if x.Op != OpRISCV64SEQZ { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(SNEZ _)) + // result: x + for { + x := v_0 + if x.Op != OpRISCV64SNEZ { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(SLT _ _)) + // result: x + for { + x := v_0 + if x.Op != OpRISCV64SLT { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(SLTU _ _)) + // result: x + for { + x := v_0 + if x.Op != OpRISCV64SLTU { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(ANDI [c] y)) + // cond: c >= 0 && int64(uint8(c)) == c + // result: x + for { + x := v_0 + if x.Op != OpRISCV64ANDI { + break + } + c := auxIntToInt64(x.AuxInt) + if !(c >= 0 && int64(uint8(c)) == c) { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg (ANDI [c] x)) + // cond: c < 0 + // result: (ANDI [int64(uint8(c))] x) + for { + if v_0.Op != OpRISCV64ANDI { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(c < 0) { + break + } + v.reset(OpRISCV64ANDI) + v.AuxInt = int64ToAuxInt(int64(uint8(c))) + v.AddArg(x) + return true + } + // match: (MOVBUreg (MOVDconst [c])) + // result: (MOVDconst [int64(uint8(c))]) + for { + if v_0.Op != OpRISCV64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(uint8(c))) + return true + } + // match: (MOVBUreg x:(MOVBUload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVBUload { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVBUreg x:(Select0 (LoweredAtomicLoad8 _ _))) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpSelect0 { + break + } + x_0 := x.Args[0] + if x_0.Op != OpRISCV64LoweredAtomicLoad8 { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVBUreg x:(Select0 (LoweredAtomicCas32 _ _ _ _))) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpSelect0 { + break + } + x_0 := x.Args[0] + if x_0.Op != OpRISCV64LoweredAtomicCas32 { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVBUreg x:(Select0 (LoweredAtomicCas64 _ _ _ _))) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpSelect0 { + break + } + x_0 := x.Args[0] + if x_0.Op != OpRISCV64LoweredAtomicCas64 { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVBUreg x:(MOVBUreg _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVBUreg { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVBUreg x:(MOVBload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBUload [off] {sym} ptr mem) + for { + t := v.Type + x := v_0 + if x.Op != OpRISCV64MOVBload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpRISCV64MOVBUload, t) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVBload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpRISCV64MOVBload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + // match: (MOVBload [off1] {sym} (ADDI [off2] base) mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVBload [off1+int32(off2)] {sym} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpRISCV64MOVBload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVBreg(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MOVBreg x:(ANDI [c] y)) + // cond: c >= 0 && int64(int8(c)) == c + // result: x + for { + x := v_0 + if x.Op != OpRISCV64ANDI { + break + } + c := auxIntToInt64(x.AuxInt) + if !(c >= 0 && int64(int8(c)) == c) { + break + } + v.copyOf(x) + return true + } + // match: (MOVBreg (MOVDconst [c])) + // result: (MOVDconst [int64(int8(c))]) + for { + if v_0.Op != OpRISCV64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(int8(c))) + return true + } + // match: (MOVBreg x:(MOVBload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVBload { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVBreg x:(MOVBreg _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVBreg { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVBreg x:(MOVBUload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBload [off] {sym} ptr mem) + for { + t := v.Type + x := v_0 + if x.Op != OpRISCV64MOVBUload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpRISCV64MOVBload, t) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVBstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpRISCV64MOVBstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + // match: (MOVBstore [off1] {sym} (ADDI [off2] base) val mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVBstore [off1+int32(off2)] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpRISCV64MOVBstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) + // result: (MOVBstorezero [off] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + mem := v_2 + v.reset(OpRISCV64MOVBstorezero) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpRISCV64MOVBreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpRISCV64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpRISCV64MOVHreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpRISCV64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpRISCV64MOVWreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpRISCV64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpRISCV64MOVBUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpRISCV64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpRISCV64MOVHUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpRISCV64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpRISCV64MOVWUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpRISCV64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVBstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpRISCV64MOVBstorezero) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBstorezero [off1] {sym} (ADDI [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVBstorezero [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpRISCV64MOVBstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVDload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVDload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpRISCV64MOVDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + // match: (MOVDload [off1] {sym} (ADDI [off2] base) mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVDload [off1+int32(off2)] {sym} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpRISCV64MOVDload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVDnop(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVDnop (MOVDconst [c])) + // result: (MOVDconst [c]) + for { + if v_0.Op != OpRISCV64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(c) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVDreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVDreg x) + // cond: x.Uses == 1 + // result: (MOVDnop x) + for { + x := v_0 + if !(x.Uses == 1) { + break + } + v.reset(OpRISCV64MOVDnop) + v.AddArg(x) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVDstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpRISCV64MOVDstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + // match: (MOVDstore [off1] {sym} (ADDI [off2] base) val mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVDstore [off1+int32(off2)] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpRISCV64MOVDstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) + // result: (MOVDstorezero [off] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + mem := v_2 + v.reset(OpRISCV64MOVDstorezero) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVDstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpRISCV64MOVDstorezero) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVDstorezero [off1] {sym} (ADDI [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVDstorezero [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpRISCV64MOVDstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVHUload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpRISCV64MOVHUload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + // match: (MOVHUload [off1] {sym} (ADDI [off2] base) mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVHUload [off1+int32(off2)] {sym} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpRISCV64MOVHUload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVHUreg(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MOVHUreg x:(ANDI [c] y)) + // cond: c >= 0 && int64(uint16(c)) == c + // result: x + for { + x := v_0 + if x.Op != OpRISCV64ANDI { + break + } + c := auxIntToInt64(x.AuxInt) + if !(c >= 0 && int64(uint16(c)) == c) { + break + } + v.copyOf(x) + return true + } + // match: (MOVHUreg (ANDI [c] x)) + // cond: c < 0 + // result: (ANDI [int64(uint16(c))] x) + for { + if v_0.Op != OpRISCV64ANDI { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(c < 0) { + break + } + v.reset(OpRISCV64ANDI) + v.AuxInt = int64ToAuxInt(int64(uint16(c))) + v.AddArg(x) + return true + } + // match: (MOVHUreg (MOVDconst [c])) + // result: (MOVDconst [int64(uint16(c))]) + for { + if v_0.Op != OpRISCV64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(uint16(c))) + return true + } + // match: (MOVHUreg x:(MOVBUload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVBUload { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVHUload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVHUload { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVBUreg _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVBUreg { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVHUreg _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVHUreg { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVHload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVHUload [off] {sym} ptr mem) + for { + t := v.Type + x := v_0 + if x.Op != OpRISCV64MOVHload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpRISCV64MOVHUload, t) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVHload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpRISCV64MOVHload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + // match: (MOVHload [off1] {sym} (ADDI [off2] base) mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVHload [off1+int32(off2)] {sym} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpRISCV64MOVHload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVHreg(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MOVHreg x:(ANDI [c] y)) + // cond: c >= 0 && int64(int16(c)) == c + // result: x + for { + x := v_0 + if x.Op != OpRISCV64ANDI { + break + } + c := auxIntToInt64(x.AuxInt) + if !(c >= 0 && int64(int16(c)) == c) { + break + } + v.copyOf(x) + return true + } + // match: (MOVHreg (MOVDconst [c])) + // result: (MOVDconst [int64(int16(c))]) + for { + if v_0.Op != OpRISCV64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(int16(c))) + return true + } + // match: (MOVHreg x:(MOVBload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVBload { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBUload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVBUload { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVHload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVHload { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBreg _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVBreg { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBUreg _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVBUreg { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVHreg _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVHreg { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVHUload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVHload [off] {sym} ptr mem) + for { + t := v.Type + x := v_0 + if x.Op != OpRISCV64MOVHUload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpRISCV64MOVHload, t) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVHstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpRISCV64MOVHstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + // match: (MOVHstore [off1] {sym} (ADDI [off2] base) val mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVHstore [off1+int32(off2)] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpRISCV64MOVHstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) + // result: (MOVHstorezero [off] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + mem := v_2 + v.reset(OpRISCV64MOVHstorezero) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpRISCV64MOVHreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpRISCV64MOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpRISCV64MOVWreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpRISCV64MOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpRISCV64MOVHUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpRISCV64MOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpRISCV64MOVWUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpRISCV64MOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVHstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpRISCV64MOVHstorezero) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHstorezero [off1] {sym} (ADDI [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVHstorezero [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpRISCV64MOVHstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVWUload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpRISCV64MOVWUload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + // match: (MOVWUload [off1] {sym} (ADDI [off2] base) mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVWUload [off1+int32(off2)] {sym} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpRISCV64MOVWUload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVWUreg(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MOVWUreg x:(ANDI [c] y)) + // cond: c >= 0 && int64(uint32(c)) == c + // result: x + for { + x := v_0 + if x.Op != OpRISCV64ANDI { + break + } + c := auxIntToInt64(x.AuxInt) + if !(c >= 0 && int64(uint32(c)) == c) { + break + } + v.copyOf(x) + return true + } + // match: (MOVWUreg (ANDI [c] x)) + // cond: c < 0 + // result: (AND (MOVDconst [int64(uint32(c))]) x) + for { + if v_0.Op != OpRISCV64ANDI { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(c < 0) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(int64(uint32(c))) + v.AddArg2(v0, x) + return true + } + // match: (MOVWUreg (MOVDconst [c])) + // result: (MOVDconst [int64(uint32(c))]) + for { + if v_0.Op != OpRISCV64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(uint32(c))) + return true + } + // match: (MOVWUreg x:(MOVBUload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVBUload { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVHUload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVHUload { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVWUload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVWUload { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVBUreg _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVBUreg { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVHUreg _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVHUreg { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVWUreg _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVWUreg { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVWload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWUload [off] {sym} ptr mem) + for { + t := v.Type + x := v_0 + if x.Op != OpRISCV64MOVWload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpRISCV64MOVWUload, t) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVWload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpRISCV64MOVWload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + // match: (MOVWload [off1] {sym} (ADDI [off2] base) mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVWload [off1+int32(off2)] {sym} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpRISCV64MOVWload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVWreg(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MOVWreg x:(ANDI [c] y)) + // cond: c >= 0 && int64(int32(c)) == c + // result: x + for { + x := v_0 + if x.Op != OpRISCV64ANDI { + break + } + c := auxIntToInt64(x.AuxInt) + if !(c >= 0 && int64(int32(c)) == c) { + break + } + v.copyOf(x) + return true + } + // match: (MOVWreg (MOVDconst [c])) + // result: (MOVDconst [int64(int32(c))]) + for { + if v_0.Op != OpRISCV64MOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(int32(c))) + return true + } + // match: (MOVWreg x:(MOVBload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVBload { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVBUload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVBUload { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVHload { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHUload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVHUload { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVWload _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVWload { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(ADDIW _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64ADDIW { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(SUBW _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64SUBW { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(NEGW _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64NEGW { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MULW _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MULW { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(DIVW _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64DIVW { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(DIVUW _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64DIVUW { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(REMW _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64REMW { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(REMUW _ _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64REMUW { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVBreg _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVBreg { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVBUreg _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVBUreg { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHreg _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVHreg { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVWreg _)) + // result: (MOVDreg x) + for { + x := v_0 + if x.Op != OpRISCV64MOVWreg { + break + } + v.reset(OpRISCV64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVWUload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWload [off] {sym} ptr mem) + for { + t := v.Type + x := v_0 + if x.Op != OpRISCV64MOVWUload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpRISCV64MOVWload, t) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVWstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpRISCV64MOVWstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + // match: (MOVWstore [off1] {sym} (ADDI [off2] base) val mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVWstore [off1+int32(off2)] {sym} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpRISCV64MOVWstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(base, val, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) + // result: (MOVWstorezero [off] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + mem := v_2 + v.reset(OpRISCV64MOVWstorezero) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem) + // result: (MOVWstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpRISCV64MOVWreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpRISCV64MOVWstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem) + // result: (MOVWstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpRISCV64MOVWUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpRISCV64MOVWstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVWstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpRISCV64MOVWstorezero) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWstorezero [off1] {sym} (ADDI [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVWstorezero [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpRISCV64MOVWstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64NEG(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (NEG (SUB x y)) + // result: (SUB y x) + for { + if v_0.Op != OpRISCV64SUB { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpRISCV64SUB) + v.AddArg2(y, x) + return true + } + // match: (NEG s:(ADDI [val] (SUB x y))) + // cond: s.Uses == 1 && is32Bit(-val) + // result: (ADDI [-val] (SUB y x)) + for { + t := v.Type + s := v_0 + if s.Op != OpRISCV64ADDI { + break + } + val := auxIntToInt64(s.AuxInt) + s_0 := s.Args[0] + if s_0.Op != OpRISCV64SUB { + break + } + y := s_0.Args[1] + x := s_0.Args[0] + if !(s.Uses == 1 && is32Bit(-val)) { + break + } + v.reset(OpRISCV64ADDI) + v.AuxInt = int64ToAuxInt(-val) + v0 := b.NewValue0(v.Pos, OpRISCV64SUB, t) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + // match: (NEG (NEG x)) + // result: x + for { + if v_0.Op != OpRISCV64NEG { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (NEG (MOVDconst [x])) + // result: (MOVDconst [-x]) + for { + if v_0.Op != OpRISCV64MOVDconst { + break + } + x := auxIntToInt64(v_0.AuxInt) + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(-x) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64NEGW(v *Value) bool { + v_0 := v.Args[0] + // match: (NEGW (MOVDconst [x])) + // result: (MOVDconst [int64(int32(-x))]) + for { + if v_0.Op != OpRISCV64MOVDconst { + break + } + x := auxIntToInt64(v_0.AuxInt) + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(int32(-x))) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64OR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OR (MOVDconst [val]) x) + // cond: is32Bit(val) + // result: (ORI [val] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpRISCV64MOVDconst { + continue + } + val := auxIntToInt64(v_0.AuxInt) + x := v_1 + if !(is32Bit(val)) { + continue + } + v.reset(OpRISCV64ORI) + v.AuxInt = int64ToAuxInt(val) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValueRISCV64_OpRISCV64ORI(v *Value) bool { + v_0 := v.Args[0] + // match: (ORI [0] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (ORI [-1] x) + // result: (MOVDconst [-1]) + for { + if auxIntToInt64(v.AuxInt) != -1 { + break + } + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(-1) + return true + } + // match: (ORI [x] (MOVDconst [y])) + // result: (MOVDconst [x | y]) + for { + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVDconst { + break + } + y := auxIntToInt64(v_0.AuxInt) + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(x | y) + return true + } + // match: (ORI [x] (ORI [y] z)) + // result: (ORI [x | y] z) + for { + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64ORI { + break + } + y := auxIntToInt64(v_0.AuxInt) + z := v_0.Args[0] + v.reset(OpRISCV64ORI) + v.AuxInt = int64ToAuxInt(x | y) + v.AddArg(z) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64SEQZ(v *Value) bool { + v_0 := v.Args[0] + // match: (SEQZ (NEG x)) + // result: (SEQZ x) + for { + if v_0.Op != OpRISCV64NEG { + break + } + x := v_0.Args[0] + v.reset(OpRISCV64SEQZ) + v.AddArg(x) + return true + } + // match: (SEQZ (SEQZ x)) + // result: (SNEZ x) + for { + if v_0.Op != OpRISCV64SEQZ { + break + } + x := v_0.Args[0] + v.reset(OpRISCV64SNEZ) + v.AddArg(x) + return true + } + // match: (SEQZ (SNEZ x)) + // result: (SEQZ x) + for { + if v_0.Op != OpRISCV64SNEZ { + break + } + x := v_0.Args[0] + v.reset(OpRISCV64SEQZ) + v.AddArg(x) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64SLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SLL x (MOVDconst [val])) + // result: (SLLI [int64(val&63)] x) + for { + x := v_0 + if v_1.Op != OpRISCV64MOVDconst { + break + } + val := auxIntToInt64(v_1.AuxInt) + v.reset(OpRISCV64SLLI) + v.AuxInt = int64ToAuxInt(int64(val & 63)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64SLLI(v *Value) bool { + v_0 := v.Args[0] + // match: (SLLI [x] (MOVDconst [y])) + // cond: is32Bit(y << uint32(x)) + // result: (MOVDconst [y << uint32(x)]) + for { + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVDconst { + break + } + y := auxIntToInt64(v_0.AuxInt) + if !(is32Bit(y << uint32(x))) { + break + } + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(y << uint32(x)) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64SLT(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SLT x (MOVDconst [val])) + // cond: val >= -2048 && val <= 2047 + // result: (SLTI [val] x) + for { + x := v_0 + if v_1.Op != OpRISCV64MOVDconst { + break + } + val := auxIntToInt64(v_1.AuxInt) + if !(val >= -2048 && val <= 2047) { + break + } + v.reset(OpRISCV64SLTI) + v.AuxInt = int64ToAuxInt(val) + v.AddArg(x) + return true + } + // match: (SLT x x) + // result: (MOVDconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64SLTI(v *Value) bool { + v_0 := v.Args[0] + // match: (SLTI [x] (MOVDconst [y])) + // result: (MOVDconst [b2i(int64(y) < int64(x))]) + for { + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVDconst { + break + } + y := auxIntToInt64(v_0.AuxInt) + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(b2i(int64(y) < int64(x))) + return true + } + // match: (SLTI [x] (ANDI [y] _)) + // cond: y >= 0 && int64(y) < int64(x) + // result: (MOVDconst [1]) + for { + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64ANDI { + break + } + y := auxIntToInt64(v_0.AuxInt) + if !(y >= 0 && int64(y) < int64(x)) { + break + } + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SLTI [x] (ORI [y] _)) + // cond: y >= 0 && int64(y) >= int64(x) + // result: (MOVDconst [0]) + for { + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64ORI { + break + } + y := auxIntToInt64(v_0.AuxInt) + if !(y >= 0 && int64(y) >= int64(x)) { + break + } + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64SLTIU(v *Value) bool { + v_0 := v.Args[0] + // match: (SLTIU [x] (MOVDconst [y])) + // result: (MOVDconst [b2i(uint64(y) < uint64(x))]) + for { + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVDconst { + break + } + y := auxIntToInt64(v_0.AuxInt) + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(b2i(uint64(y) < uint64(x))) + return true + } + // match: (SLTIU [x] (ANDI [y] _)) + // cond: y >= 0 && uint64(y) < uint64(x) + // result: (MOVDconst [1]) + for { + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64ANDI { + break + } + y := auxIntToInt64(v_0.AuxInt) + if !(y >= 0 && uint64(y) < uint64(x)) { + break + } + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SLTIU [x] (ORI [y] _)) + // cond: y >= 0 && uint64(y) >= uint64(x) + // result: (MOVDconst [0]) + for { + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64ORI { + break + } + y := auxIntToInt64(v_0.AuxInt) + if !(y >= 0 && uint64(y) >= uint64(x)) { + break + } + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64SLTU(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SLTU x (MOVDconst [val])) + // cond: val >= -2048 && val <= 2047 + // result: (SLTIU [val] x) + for { + x := v_0 + if v_1.Op != OpRISCV64MOVDconst { + break + } + val := auxIntToInt64(v_1.AuxInt) + if !(val >= -2048 && val <= 2047) { + break + } + v.reset(OpRISCV64SLTIU) + v.AuxInt = int64ToAuxInt(val) + v.AddArg(x) + return true + } + // match: (SLTU x x) + // result: (MOVDconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64SNEZ(v *Value) bool { + v_0 := v.Args[0] + // match: (SNEZ (NEG x)) + // result: (SNEZ x) + for { + if v_0.Op != OpRISCV64NEG { + break + } + x := v_0.Args[0] + v.reset(OpRISCV64SNEZ) + v.AddArg(x) + return true + } + // match: (SNEZ (SEQZ x)) + // result: (SEQZ x) + for { + if v_0.Op != OpRISCV64SEQZ { + break + } + x := v_0.Args[0] + v.reset(OpRISCV64SEQZ) + v.AddArg(x) + return true + } + // match: (SNEZ (SNEZ x)) + // result: (SNEZ x) + for { + if v_0.Op != OpRISCV64SNEZ { + break + } + x := v_0.Args[0] + v.reset(OpRISCV64SNEZ) + v.AddArg(x) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64SRA(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SRA x (MOVDconst [val])) + // result: (SRAI [int64(val&63)] x) + for { + x := v_0 + if v_1.Op != OpRISCV64MOVDconst { + break + } + val := auxIntToInt64(v_1.AuxInt) + v.reset(OpRISCV64SRAI) + v.AuxInt = int64ToAuxInt(int64(val & 63)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64SRAI(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (SRAI [x] (MOVWreg y)) + // cond: x >= 0 && x <= 31 + // result: (SRAIW [int64(x)] y) + for { + t := v.Type + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVWreg { + break + } + y := v_0.Args[0] + if !(x >= 0 && x <= 31) { + break + } + v.reset(OpRISCV64SRAIW) + v.Type = t + v.AuxInt = int64ToAuxInt(int64(x)) + v.AddArg(y) + return true + } + // match: (SRAI [x] (MOVBreg y)) + // cond: x >= 8 + // result: (SRAI [63] (SLLI [56] y)) + for { + t := v.Type + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVBreg { + break + } + y := v_0.Args[0] + if !(x >= 8) { + break + } + v.reset(OpRISCV64SRAI) + v.AuxInt = int64ToAuxInt(63) + v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) + v0.AuxInt = int64ToAuxInt(56) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (SRAI [x] (MOVHreg y)) + // cond: x >= 16 + // result: (SRAI [63] (SLLI [48] y)) + for { + t := v.Type + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVHreg { + break + } + y := v_0.Args[0] + if !(x >= 16) { + break + } + v.reset(OpRISCV64SRAI) + v.AuxInt = int64ToAuxInt(63) + v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) + v0.AuxInt = int64ToAuxInt(48) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (SRAI [x] (MOVWreg y)) + // cond: x >= 32 + // result: (SRAIW [31] y) + for { + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVWreg { + break + } + y := v_0.Args[0] + if !(x >= 32) { + break + } + v.reset(OpRISCV64SRAIW) + v.AuxInt = int64ToAuxInt(31) + v.AddArg(y) + return true + } + // match: (SRAI [x] (MOVDconst [y])) + // result: (MOVDconst [int64(y) >> uint32(x)]) + for { + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVDconst { + break + } + y := auxIntToInt64(v_0.AuxInt) + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(y) >> uint32(x)) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64SRAW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SRAW x (MOVDconst [val])) + // result: (SRAIW [int64(val&31)] x) + for { + x := v_0 + if v_1.Op != OpRISCV64MOVDconst { + break + } + val := auxIntToInt64(v_1.AuxInt) + v.reset(OpRISCV64SRAIW) + v.AuxInt = int64ToAuxInt(int64(val & 31)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64SRL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SRL x (MOVDconst [val])) + // result: (SRLI [int64(val&63)] x) + for { + x := v_0 + if v_1.Op != OpRISCV64MOVDconst { + break + } + val := auxIntToInt64(v_1.AuxInt) + v.reset(OpRISCV64SRLI) + v.AuxInt = int64ToAuxInt(int64(val & 63)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64SRLI(v *Value) bool { + v_0 := v.Args[0] + // match: (SRLI [x] (MOVWUreg y)) + // cond: x >= 0 && x <= 31 + // result: (SRLIW [int64(x)] y) + for { + t := v.Type + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVWUreg { + break + } + y := v_0.Args[0] + if !(x >= 0 && x <= 31) { + break + } + v.reset(OpRISCV64SRLIW) + v.Type = t + v.AuxInt = int64ToAuxInt(int64(x)) + v.AddArg(y) + return true + } + // match: (SRLI [x] (MOVBUreg y)) + // cond: x >= 8 + // result: (MOVDconst [0]) + for { + t := v.Type + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVBUreg { + break + } + if !(x >= 8) { + break + } + v.reset(OpRISCV64MOVDconst) + v.Type = t + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SRLI [x] (MOVHUreg y)) + // cond: x >= 16 + // result: (MOVDconst [0]) + for { + t := v.Type + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVHUreg { + break + } + if !(x >= 16) { + break + } + v.reset(OpRISCV64MOVDconst) + v.Type = t + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SRLI [x] (MOVWUreg y)) + // cond: x >= 32 + // result: (MOVDconst [0]) + for { + t := v.Type + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVWUreg { + break + } + if !(x >= 32) { + break + } + v.reset(OpRISCV64MOVDconst) + v.Type = t + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SRLI [x] (MOVDconst [y])) + // result: (MOVDconst [int64(uint64(y) >> uint32(x))]) + for { + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVDconst { + break + } + y := auxIntToInt64(v_0.AuxInt) + v.reset(OpRISCV64MOVDconst) + v.AuxInt = int64ToAuxInt(int64(uint64(y) >> uint32(x))) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64SRLW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SRLW x (MOVDconst [val])) + // result: (SRLIW [int64(val&31)] x) + for { + x := v_0 + if v_1.Op != OpRISCV64MOVDconst { + break + } + val := auxIntToInt64(v_1.AuxInt) + v.reset(OpRISCV64SRLIW) + v.AuxInt = int64ToAuxInt(int64(val & 31)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64SUB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SUB x (MOVDconst [val])) + // cond: is32Bit(-val) + // result: (ADDI [-val] x) + for { + x := v_0 + if v_1.Op != OpRISCV64MOVDconst { + break + } + val := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(-val)) { + break + } + v.reset(OpRISCV64ADDI) + v.AuxInt = int64ToAuxInt(-val) + v.AddArg(x) + return true + } + // match: (SUB (MOVDconst [val]) y) + // cond: is32Bit(-val) + // result: (NEG (ADDI [-val] y)) + for { + t := v.Type + if v_0.Op != OpRISCV64MOVDconst { + break + } + val := auxIntToInt64(v_0.AuxInt) + y := v_1 + if !(is32Bit(-val)) { + break + } + v.reset(OpRISCV64NEG) + v0 := b.NewValue0(v.Pos, OpRISCV64ADDI, t) + v0.AuxInt = int64ToAuxInt(-val) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (SUB x (MOVDconst [0])) + // result: x + for { + x := v_0 + if v_1.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + v.copyOf(x) + return true + } + // match: (SUB (MOVDconst [0]) x) + // result: (NEG x) + for { + if v_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_1 + v.reset(OpRISCV64NEG) + v.AddArg(x) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64SUBW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SUBW x (MOVDconst [0])) + // result: (ADDIW [0] x) + for { + x := v_0 + if v_1.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + v.reset(OpRISCV64ADDIW) + v.AuxInt = int64ToAuxInt(0) + v.AddArg(x) + return true + } + // match: (SUBW (MOVDconst [0]) x) + // result: (NEGW x) + for { + if v_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_1 + v.reset(OpRISCV64NEGW) + v.AddArg(x) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64XOR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XOR (MOVDconst [val]) x) + // cond: is32Bit(val) + // result: (XORI [val] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpRISCV64MOVDconst { + continue + } + val := auxIntToInt64(v_0.AuxInt) + x := v_1 + if !(is32Bit(val)) { + continue + } + v.reset(OpRISCV64XORI) + v.AuxInt = int64ToAuxInt(val) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValueRISCV64_OpRotateLeft16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft16 x (MOVDconst [c])) + // result: (Or16 (Lsh16x64 x (MOVDconst [c&15])) (Rsh16Ux64 x (MOVDconst [-c&15]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpRISCV64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpOr16) + v0 := b.NewValue0(v.Pos, OpLsh16x64, t) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(c & 15) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t) + v3 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(-c & 15) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) + return true + } + return false +} +func rewriteValueRISCV64_OpRotateLeft32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft32 x (MOVDconst [c])) + // result: (Or32 (Lsh32x64 x (MOVDconst [c&31])) (Rsh32Ux64 x (MOVDconst [-c&31]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpRISCV64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpOr32) + v0 := b.NewValue0(v.Pos, OpLsh32x64, t) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(c & 31) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpRsh32Ux64, t) + v3 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(-c & 31) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) + return true + } + return false +} +func rewriteValueRISCV64_OpRotateLeft64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft64 x (MOVDconst [c])) + // result: (Or64 (Lsh64x64 x (MOVDconst [c&63])) (Rsh64Ux64 x (MOVDconst [-c&63]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpRISCV64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpOr64) + v0 := b.NewValue0(v.Pos, OpLsh64x64, t) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(c & 63) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpRsh64Ux64, t) + v3 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(-c & 63) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) + return true + } + return false +} +func rewriteValueRISCV64_OpRotateLeft8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft8 x (MOVDconst [c])) + // result: (Or8 (Lsh8x64 x (MOVDconst [c&7])) (Rsh8Ux64 x (MOVDconst [-c&7]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpRISCV64MOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpOr8) + v0 := b.NewValue0(v.Pos, OpLsh8x64, t) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(c & 7) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t) + v3 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(-c & 7) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh16Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux16 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt16to64 y)))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpNeg16, t) + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v.AddArg2(v0, v2) + return true + } + // match: (Rsh16Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt16to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRL) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh16Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux32 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt32to64 y)))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpNeg16, t) + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v.AddArg2(v0, v2) + return true + } + // match: (Rsh16Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt16to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRL) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh16Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux64 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpNeg16, t) + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v3.AuxInt = int64ToAuxInt(64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg2(v0, v2) + return true + } + // match: (Rsh16Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt16to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRL) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh16Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux8 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt8to64 y)))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpNeg16, t) + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v.AddArg2(v0, v2) + return true + } + // match: (Rsh16Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt16to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRL) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x16 x y) + // cond: !shiftIsBounded(v) + // result: (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v2.AuxInt = int64ToAuxInt(-1) + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt16to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x32 x y) + // cond: !shiftIsBounded(v) + // result: (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v2.AuxInt = int64ToAuxInt(-1) + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt16to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x64 x y) + // cond: !shiftIsBounded(v) + // result: (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] y)))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v2.AuxInt = int64ToAuxInt(-1) + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v3.AuxInt = int64ToAuxInt(64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh16x64 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt16to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x8 x y) + // cond: !shiftIsBounded(v) + // result: (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v2.AuxInt = int64ToAuxInt(-1) + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt16to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh32Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux16 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SRLW x y) (Neg32 (SLTIU [32] (ZeroExt16to64 y)))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRLW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpNeg32, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = int64ToAuxInt(32) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh32Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SRLW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRLW) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh32Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux32 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SRLW x y) (Neg32 (SLTIU [32] (ZeroExt32to64 y)))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRLW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpNeg32, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = int64ToAuxInt(32) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh32Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SRLW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRLW) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh32Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh32Ux64 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SRLW x y) (Neg32 (SLTIU [32] y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRLW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpNeg32, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = int64ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh32Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SRLW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRLW) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh32Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux8 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SRLW x y) (Neg32 (SLTIU [32] (ZeroExt8to64 y)))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRLW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpNeg32, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = int64ToAuxInt(32) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh32Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SRLW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRLW) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x16 x y) + // cond: !shiftIsBounded(v) + // result: (SRAW x (OR y (ADDI [-1] (SLTIU [32] (ZeroExt16to64 y))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRAW) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v1.AuxInt = int64ToAuxInt(-1) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v2.AuxInt = int64ToAuxInt(32) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SRAW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRAW) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh32x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x32 x y) + // cond: !shiftIsBounded(v) + // result: (SRAW x (OR y (ADDI [-1] (SLTIU [32] (ZeroExt32to64 y))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRAW) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v1.AuxInt = int64ToAuxInt(-1) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v2.AuxInt = int64ToAuxInt(32) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SRAW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRAW) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh32x64 x y) + // cond: !shiftIsBounded(v) + // result: (SRAW x (OR y (ADDI [-1] (SLTIU [32] y)))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRAW) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v1.AuxInt = int64ToAuxInt(-1) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v2.AuxInt = int64ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh32x64 x y) + // cond: shiftIsBounded(v) + // result: (SRAW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRAW) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x8 x y) + // cond: !shiftIsBounded(v) + // result: (SRAW x (OR y (ADDI [-1] (SLTIU [32] (ZeroExt8to64 y))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRAW) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v1.AuxInt = int64ToAuxInt(-1) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v2.AuxInt = int64ToAuxInt(32) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SRAW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRAW) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh64Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux16 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt16to64 y)))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpNeg64, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh64Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SRL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh64Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux32 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt32to64 y)))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpNeg64, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh64Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SRL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh64Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh64Ux64 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SRL x y) (Neg64 (SLTIU [64] y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpNeg64, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = int64ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh64Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SRL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh64Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux8 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt8to64 y)))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpNeg64, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh64Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SRL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh64x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x16 x y) + // cond: !shiftIsBounded(v) + // result: (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v1.AuxInt = int64ToAuxInt(-1) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh64x16 x y) + // cond: shiftIsBounded(v) + // result: (SRA x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh64x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x32 x y) + // cond: !shiftIsBounded(v) + // result: (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v1.AuxInt = int64ToAuxInt(-1) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh64x32 x y) + // cond: shiftIsBounded(v) + // result: (SRA x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh64x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh64x64 x y) + // cond: !shiftIsBounded(v) + // result: (SRA x (OR y (ADDI [-1] (SLTIU [64] y)))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v1.AuxInt = int64ToAuxInt(-1) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v2.AuxInt = int64ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh64x64 x y) + // cond: shiftIsBounded(v) + // result: (SRA x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x8 x y) + // cond: !shiftIsBounded(v) + // result: (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v1.AuxInt = int64ToAuxInt(-1) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) + return true + } + // match: (Rsh64x8 x y) + // cond: shiftIsBounded(v) + // result: (SRA x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh8Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux16 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt16to64 y)))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpNeg8, t) + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v.AddArg2(v0, v2) + return true + } + // match: (Rsh8Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt8to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRL) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh8Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux32 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt32to64 y)))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpNeg8, t) + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v.AddArg2(v0, v2) + return true + } + // match: (Rsh8Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt8to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRL) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh8Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux64 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] y))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpNeg8, t) + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v3.AuxInt = int64ToAuxInt(64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg2(v0, v2) + return true + } + // match: (Rsh8Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt8to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRL) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh8Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux8 x y) + // cond: !shiftIsBounded(v) + // result: (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt8to64 y)))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpNeg8, t) + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v.AddArg2(v0, v2) + return true + } + // match: (Rsh8Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SRL (ZeroExt8to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRL) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x16 x y) + // cond: !shiftIsBounded(v) + // result: (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v2.AuxInt = int64ToAuxInt(-1) + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt8to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x32 x y) + // cond: !shiftIsBounded(v) + // result: (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v2.AuxInt = int64ToAuxInt(-1) + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt8to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x64 x y) + // cond: !shiftIsBounded(v) + // result: (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] y)))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v2.AuxInt = int64ToAuxInt(-1) + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v3.AuxInt = int64ToAuxInt(64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh8x64 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt8to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false +} +func rewriteValueRISCV64_OpRsh8x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x8 x y) + // cond: !shiftIsBounded(v) + // result: (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) + for { + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v2.AuxInt = int64ToAuxInt(-1) + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v3.AuxInt = int64ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg2(y, v2) + v.AddArg2(v0, v1) + return true + } + // match: (Rsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SRA (SignExt8to64 x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpRISCV64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + return false +} +func rewriteValueRISCV64_OpSelect0(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Select0 (Add64carry x y c)) + // result: (ADD (ADD x y) c) + for { + if v_0.Op != OpAdd64carry { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpRISCV64ADD) + v0 := b.NewValue0(v.Pos, OpRISCV64ADD, typ.UInt64) + v0.AddArg2(x, y) + v.AddArg2(v0, c) + return true + } + // match: (Select0 (Sub64borrow x y c)) + // result: (SUB (SUB x y) c) + for { + if v_0.Op != OpSub64borrow { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpRISCV64SUB) + v0 := b.NewValue0(v.Pos, OpRISCV64SUB, typ.UInt64) + v0.AddArg2(x, y) + v.AddArg2(v0, c) + return true + } + // match: (Select0 m:(LoweredMuluhilo x y)) + // cond: m.Uses == 1 + // result: (MULHU x y) + for { + m := v_0 + if m.Op != OpRISCV64LoweredMuluhilo { + break + } + y := m.Args[1] + x := m.Args[0] + if !(m.Uses == 1) { + break + } + v.reset(OpRISCV64MULHU) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpSelect1(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Select1 (Add64carry x y c)) + // result: (OR (SLTU s:(ADD x y) x) (SLTU (ADD s c) s)) + for { + if v_0.Op != OpAdd64carry { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpRISCV64OR) + v0 := b.NewValue0(v.Pos, OpRISCV64SLTU, typ.UInt64) + s := b.NewValue0(v.Pos, OpRISCV64ADD, typ.UInt64) + s.AddArg2(x, y) + v0.AddArg2(s, x) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTU, typ.UInt64) + v3 := b.NewValue0(v.Pos, OpRISCV64ADD, typ.UInt64) + v3.AddArg2(s, c) + v2.AddArg2(v3, s) + v.AddArg2(v0, v2) + return true + } + // match: (Select1 (Sub64borrow x y c)) + // result: (OR (SLTU x s:(SUB x y)) (SLTU s (SUB s c))) + for { + if v_0.Op != OpSub64borrow { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpRISCV64OR) + v0 := b.NewValue0(v.Pos, OpRISCV64SLTU, typ.UInt64) + s := b.NewValue0(v.Pos, OpRISCV64SUB, typ.UInt64) + s.AddArg2(x, y) + v0.AddArg2(x, s) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTU, typ.UInt64) + v3 := b.NewValue0(v.Pos, OpRISCV64SUB, typ.UInt64) + v3.AddArg2(s, c) + v2.AddArg2(s, v3) + v.AddArg2(v0, v2) + return true + } + // match: (Select1 m:(LoweredMuluhilo x y)) + // cond: m.Uses == 1 + // result: (MUL x y) + for { + m := v_0 + if m.Op != OpRISCV64LoweredMuluhilo { + break + } + y := m.Args[1] + x := m.Args[0] + if !(m.Uses == 1) { + break + } + v.reset(OpRISCV64MUL) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueRISCV64_OpSlicemask(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Slicemask x) + // result: (SRAI [63] (NEG x)) + for { + t := v.Type + x := v_0 + v.reset(OpRISCV64SRAI) + v.AuxInt = int64ToAuxInt(63) + v0 := b.NewValue0(v.Pos, OpRISCV64NEG, t) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpStore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Store {t} ptr val mem) + // cond: t.Size() == 1 + // result: (MOVBstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 1) { + break + } + v.reset(OpRISCV64MOVBstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 2 + // result: (MOVHstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 2) { + break + } + v.reset(OpRISCV64MOVHstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 && !t.IsFloat() + // result: (MOVWstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4 && !t.IsFloat()) { + break + } + v.reset(OpRISCV64MOVWstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 8 && !t.IsFloat() + // result: (MOVDstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 8 && !t.IsFloat()) { + break + } + v.reset(OpRISCV64MOVDstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 && t.IsFloat() + // result: (FMOVWstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4 && t.IsFloat()) { + break + } + v.reset(OpRISCV64FMOVWstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 8 && t.IsFloat() + // result: (FMOVDstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 8 && t.IsFloat()) { + break + } + v.reset(OpRISCV64FMOVDstore) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueRISCV64_OpZero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Zero [0] _ mem) + // result: mem + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_1 + v.copyOf(mem) + return true + } + // match: (Zero [1] ptr mem) + // result: (MOVBstore ptr (MOVDconst [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpRISCV64MOVBstore) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (Zero [2] {t} ptr mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore ptr (MOVDconst [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpRISCV64MOVHstore) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (Zero [2] ptr mem) + // result: (MOVBstore [1] ptr (MOVDconst [0]) (MOVBstore ptr (MOVDconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpRISCV64MOVBstore) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [4] {t} ptr mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore ptr (MOVDconst [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpRISCV64MOVWstore) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (Zero [4] {t} ptr mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [2] ptr (MOVDconst [0]) (MOVHstore ptr (MOVDconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpRISCV64MOVHstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [4] ptr mem) + // result: (MOVBstore [3] ptr (MOVDconst [0]) (MOVBstore [2] ptr (MOVDconst [0]) (MOVBstore [1] ptr (MOVDconst [0]) (MOVBstore ptr (MOVDconst [0]) mem)))) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpRISCV64MOVBstore) + v.AuxInt = int32ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(2) + v2 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(1) + v3 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem) + v3.AddArg3(ptr, v0, mem) + v2.AddArg3(ptr, v0, v3) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [8] {t} ptr mem) + // cond: t.Alignment()%8 == 0 + // result: (MOVDstore ptr (MOVDconst [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%8 == 0) { + break + } + v.reset(OpRISCV64MOVDstore) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (Zero [8] {t} ptr mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpRISCV64MOVWstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [8] {t} ptr mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [6] ptr (MOVDconst [0]) (MOVHstore [4] ptr (MOVDconst [0]) (MOVHstore [2] ptr (MOVDconst [0]) (MOVHstore ptr (MOVDconst [0]) mem)))) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpRISCV64MOVHstore) + v.AuxInt = int32ToAuxInt(6) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(4) + v2 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(2) + v3 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem) + v3.AddArg3(ptr, v0, mem) + v2.AddArg3(ptr, v0, v3) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [3] ptr mem) + // result: (MOVBstore [2] ptr (MOVDconst [0]) (MOVBstore [1] ptr (MOVDconst [0]) (MOVBstore ptr (MOVDconst [0]) mem))) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpRISCV64MOVBstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem) + v2.AddArg3(ptr, v0, mem) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [6] {t} ptr mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [4] ptr (MOVDconst [0]) (MOVHstore [2] ptr (MOVDconst [0]) (MOVHstore ptr (MOVDconst [0]) mem))) + for { + if auxIntToInt64(v.AuxInt) != 6 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpRISCV64MOVHstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(2) + v2 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem) + v2.AddArg3(ptr, v0, mem) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [12] {t} ptr mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore [8] ptr (MOVDconst [0]) (MOVWstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem))) + for { + if auxIntToInt64(v.AuxInt) != 12 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpRISCV64MOVWstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(4) + v2 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem) + v2.AddArg3(ptr, v0, mem) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [16] {t} ptr mem) + // cond: t.Alignment()%8 == 0 + // result: (MOVDstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 16 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%8 == 0) { + break + } + v.reset(OpRISCV64MOVDstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [24] {t} ptr mem) + // cond: t.Alignment()%8 == 0 + // result: (MOVDstore [16] ptr (MOVDconst [0]) (MOVDstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))) + for { + if auxIntToInt64(v.AuxInt) != 24 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%8 == 0) { + break + } + v.reset(OpRISCV64MOVDstore) + v.AuxInt = int32ToAuxInt(16) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(8) + v2 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem) + v2.AddArg3(ptr, v0, mem) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [32] {t} ptr mem) + // cond: t.Alignment()%8 == 0 + // result: (MOVDstore [24] ptr (MOVDconst [0]) (MOVDstore [16] ptr (MOVDconst [0]) (MOVDstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)))) + for { + if auxIntToInt64(v.AuxInt) != 32 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%8 == 0) { + break + } + v.reset(OpRISCV64MOVDstore) + v.AuxInt = int32ToAuxInt(24) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(16) + v2 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(8) + v3 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem) + v3.AddArg3(ptr, v0, mem) + v2.AddArg3(ptr, v0, v3) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [s] {t} ptr mem) + // cond: s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice + // result: (DUFFZERO [8 * (128 - s/8)] ptr mem) + for { + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice) { + break + } + v.reset(OpRISCV64DUFFZERO) + v.AuxInt = int64ToAuxInt(8 * (128 - s/8)) + v.AddArg2(ptr, mem) + return true + } + // match: (Zero [s] {t} ptr mem) + // result: (LoweredZero [t.Alignment()] ptr (ADD ptr (MOVDconst [s-moveSize(t.Alignment(), config)])) mem) + for { + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + v.reset(OpRISCV64LoweredZero) + v.AuxInt = int64ToAuxInt(t.Alignment()) + v0 := b.NewValue0(v.Pos, OpRISCV64ADD, ptr.Type) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config)) + v0.AddArg2(ptr, v1) + v.AddArg3(ptr, v0, mem) + return true + } +} +func rewriteBlockRISCV64(b *Block) bool { + typ := &b.Func.Config.Types + switch b.Kind { + case BlockRISCV64BEQ: + // match: (BEQ (MOVDconst [0]) cond yes no) + // result: (BEQZ cond yes no) + for b.Controls[0].Op == OpRISCV64MOVDconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + cond := b.Controls[1] + b.resetWithControl(BlockRISCV64BEQZ, cond) + return true + } + // match: (BEQ cond (MOVDconst [0]) yes no) + // result: (BEQZ cond yes no) + for b.Controls[1].Op == OpRISCV64MOVDconst { + cond := b.Controls[0] + v_1 := b.Controls[1] + if auxIntToInt64(v_1.AuxInt) != 0 { + break + } + b.resetWithControl(BlockRISCV64BEQZ, cond) + return true + } + case BlockRISCV64BEQZ: + // match: (BEQZ (SEQZ x) yes no) + // result: (BNEZ x yes no) + for b.Controls[0].Op == OpRISCV64SEQZ { + v_0 := b.Controls[0] + x := v_0.Args[0] + b.resetWithControl(BlockRISCV64BNEZ, x) + return true + } + // match: (BEQZ (SNEZ x) yes no) + // result: (BEQZ x yes no) + for b.Controls[0].Op == OpRISCV64SNEZ { + v_0 := b.Controls[0] + x := v_0.Args[0] + b.resetWithControl(BlockRISCV64BEQZ, x) + return true + } + // match: (BEQZ (NEG x) yes no) + // result: (BEQZ x yes no) + for b.Controls[0].Op == OpRISCV64NEG { + v_0 := b.Controls[0] + x := v_0.Args[0] + b.resetWithControl(BlockRISCV64BEQZ, x) + return true + } + // match: (BEQZ (FNES x y) yes no) + // result: (BNEZ (FEQS x y) yes no) + for b.Controls[0].Op == OpRISCV64FNES { + v_0 := b.Controls[0] + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + v0 := b.NewValue0(v_0.Pos, OpRISCV64FEQS, t) + v0.AddArg2(x, y) + b.resetWithControl(BlockRISCV64BNEZ, v0) + return true + } + } + // match: (BEQZ (FNED x y) yes no) + // result: (BNEZ (FEQD x y) yes no) + for b.Controls[0].Op == OpRISCV64FNED { + v_0 := b.Controls[0] + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + v0 := b.NewValue0(v_0.Pos, OpRISCV64FEQD, t) + v0.AddArg2(x, y) + b.resetWithControl(BlockRISCV64BNEZ, v0) + return true + } + } + // match: (BEQZ (SUB x y) yes no) + // result: (BEQ x y yes no) + for b.Controls[0].Op == OpRISCV64SUB { + v_0 := b.Controls[0] + y := v_0.Args[1] + x := v_0.Args[0] + b.resetWithControl2(BlockRISCV64BEQ, x, y) + return true + } + // match: (BEQZ (SLT x y) yes no) + // result: (BGE x y yes no) + for b.Controls[0].Op == OpRISCV64SLT { + v_0 := b.Controls[0] + y := v_0.Args[1] + x := v_0.Args[0] + b.resetWithControl2(BlockRISCV64BGE, x, y) + return true + } + // match: (BEQZ (SLTU x y) yes no) + // result: (BGEU x y yes no) + for b.Controls[0].Op == OpRISCV64SLTU { + v_0 := b.Controls[0] + y := v_0.Args[1] + x := v_0.Args[0] + b.resetWithControl2(BlockRISCV64BGEU, x, y) + return true + } + // match: (BEQZ (SLTI [x] y) yes no) + // result: (BGE y (MOVDconst [x]) yes no) + for b.Controls[0].Op == OpRISCV64SLTI { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := v_0.Args[0] + v0 := b.NewValue0(b.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(x) + b.resetWithControl2(BlockRISCV64BGE, y, v0) + return true + } + // match: (BEQZ (SLTIU [x] y) yes no) + // result: (BGEU y (MOVDconst [x]) yes no) + for b.Controls[0].Op == OpRISCV64SLTIU { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := v_0.Args[0] + v0 := b.NewValue0(b.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(x) + b.resetWithControl2(BlockRISCV64BGEU, y, v0) + return true + } + case BlockRISCV64BGE: + // match: (BGE (MOVDconst [0]) cond yes no) + // result: (BLEZ cond yes no) + for b.Controls[0].Op == OpRISCV64MOVDconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + cond := b.Controls[1] + b.resetWithControl(BlockRISCV64BLEZ, cond) + return true + } + // match: (BGE cond (MOVDconst [0]) yes no) + // result: (BGEZ cond yes no) + for b.Controls[1].Op == OpRISCV64MOVDconst { + cond := b.Controls[0] + v_1 := b.Controls[1] + if auxIntToInt64(v_1.AuxInt) != 0 { + break + } + b.resetWithControl(BlockRISCV64BGEZ, cond) + return true + } + case BlockRISCV64BLT: + // match: (BLT (MOVDconst [0]) cond yes no) + // result: (BGTZ cond yes no) + for b.Controls[0].Op == OpRISCV64MOVDconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + cond := b.Controls[1] + b.resetWithControl(BlockRISCV64BGTZ, cond) + return true + } + // match: (BLT cond (MOVDconst [0]) yes no) + // result: (BLTZ cond yes no) + for b.Controls[1].Op == OpRISCV64MOVDconst { + cond := b.Controls[0] + v_1 := b.Controls[1] + if auxIntToInt64(v_1.AuxInt) != 0 { + break + } + b.resetWithControl(BlockRISCV64BLTZ, cond) + return true + } + case BlockRISCV64BNE: + // match: (BNE (MOVDconst [0]) cond yes no) + // result: (BNEZ cond yes no) + for b.Controls[0].Op == OpRISCV64MOVDconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + cond := b.Controls[1] + b.resetWithControl(BlockRISCV64BNEZ, cond) + return true + } + // match: (BNE cond (MOVDconst [0]) yes no) + // result: (BNEZ cond yes no) + for b.Controls[1].Op == OpRISCV64MOVDconst { + cond := b.Controls[0] + v_1 := b.Controls[1] + if auxIntToInt64(v_1.AuxInt) != 0 { + break + } + b.resetWithControl(BlockRISCV64BNEZ, cond) + return true + } + case BlockRISCV64BNEZ: + // match: (BNEZ (SEQZ x) yes no) + // result: (BEQZ x yes no) + for b.Controls[0].Op == OpRISCV64SEQZ { + v_0 := b.Controls[0] + x := v_0.Args[0] + b.resetWithControl(BlockRISCV64BEQZ, x) + return true + } + // match: (BNEZ (SNEZ x) yes no) + // result: (BNEZ x yes no) + for b.Controls[0].Op == OpRISCV64SNEZ { + v_0 := b.Controls[0] + x := v_0.Args[0] + b.resetWithControl(BlockRISCV64BNEZ, x) + return true + } + // match: (BNEZ (NEG x) yes no) + // result: (BNEZ x yes no) + for b.Controls[0].Op == OpRISCV64NEG { + v_0 := b.Controls[0] + x := v_0.Args[0] + b.resetWithControl(BlockRISCV64BNEZ, x) + return true + } + // match: (BNEZ (FNES x y) yes no) + // result: (BEQZ (FEQS x y) yes no) + for b.Controls[0].Op == OpRISCV64FNES { + v_0 := b.Controls[0] + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + v0 := b.NewValue0(v_0.Pos, OpRISCV64FEQS, t) + v0.AddArg2(x, y) + b.resetWithControl(BlockRISCV64BEQZ, v0) + return true + } + } + // match: (BNEZ (FNED x y) yes no) + // result: (BEQZ (FEQD x y) yes no) + for b.Controls[0].Op == OpRISCV64FNED { + v_0 := b.Controls[0] + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + v0 := b.NewValue0(v_0.Pos, OpRISCV64FEQD, t) + v0.AddArg2(x, y) + b.resetWithControl(BlockRISCV64BEQZ, v0) + return true + } + } + // match: (BNEZ (SUB x y) yes no) + // result: (BNE x y yes no) + for b.Controls[0].Op == OpRISCV64SUB { + v_0 := b.Controls[0] + y := v_0.Args[1] + x := v_0.Args[0] + b.resetWithControl2(BlockRISCV64BNE, x, y) + return true + } + // match: (BNEZ (SLT x y) yes no) + // result: (BLT x y yes no) + for b.Controls[0].Op == OpRISCV64SLT { + v_0 := b.Controls[0] + y := v_0.Args[1] + x := v_0.Args[0] + b.resetWithControl2(BlockRISCV64BLT, x, y) + return true + } + // match: (BNEZ (SLTU x y) yes no) + // result: (BLTU x y yes no) + for b.Controls[0].Op == OpRISCV64SLTU { + v_0 := b.Controls[0] + y := v_0.Args[1] + x := v_0.Args[0] + b.resetWithControl2(BlockRISCV64BLTU, x, y) + return true + } + // match: (BNEZ (SLTI [x] y) yes no) + // result: (BLT y (MOVDconst [x]) yes no) + for b.Controls[0].Op == OpRISCV64SLTI { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := v_0.Args[0] + v0 := b.NewValue0(b.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(x) + b.resetWithControl2(BlockRISCV64BLT, y, v0) + return true + } + // match: (BNEZ (SLTIU [x] y) yes no) + // result: (BLTU y (MOVDconst [x]) yes no) + for b.Controls[0].Op == OpRISCV64SLTIU { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := v_0.Args[0] + v0 := b.NewValue0(b.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(x) + b.resetWithControl2(BlockRISCV64BLTU, y, v0) + return true + } + case BlockIf: + // match: (If cond yes no) + // result: (BNEZ (MOVBUreg cond) yes no) + for { + cond := b.Controls[0] + v0 := b.NewValue0(cond.Pos, OpRISCV64MOVBUreg, typ.UInt64) + v0.AddArg(cond) + b.resetWithControl(BlockRISCV64BNEZ, v0) + return true + } + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteRISCV64latelower.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteRISCV64latelower.go new file mode 100644 index 0000000000000000000000000000000000000000..6dd97d65bdc06196fa2933f7b1f81b0bd2e7ca70 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteRISCV64latelower.go @@ -0,0 +1,246 @@ +// Code generated from _gen/RISCV64latelower.rules using 'go generate'; DO NOT EDIT. + +package ssa + +func rewriteValueRISCV64latelower(v *Value) bool { + switch v.Op { + case OpRISCV64SLLI: + return rewriteValueRISCV64latelower_OpRISCV64SLLI(v) + case OpRISCV64SRAI: + return rewriteValueRISCV64latelower_OpRISCV64SRAI(v) + case OpRISCV64SRLI: + return rewriteValueRISCV64latelower_OpRISCV64SRLI(v) + } + return false +} +func rewriteValueRISCV64latelower_OpRISCV64SLLI(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SLLI [c] (MOVBUreg x)) + // cond: c <= 56 + // result: (SRLI [56-c] (SLLI [56] x)) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVBUreg { + break + } + x := v_0.Args[0] + if !(c <= 56) { + break + } + v.reset(OpRISCV64SRLI) + v.AuxInt = int64ToAuxInt(56 - c) + v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, typ.UInt64) + v0.AuxInt = int64ToAuxInt(56) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (SLLI [c] (MOVHUreg x)) + // cond: c <= 48 + // result: (SRLI [48-c] (SLLI [48] x)) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVHUreg { + break + } + x := v_0.Args[0] + if !(c <= 48) { + break + } + v.reset(OpRISCV64SRLI) + v.AuxInt = int64ToAuxInt(48 - c) + v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, typ.UInt64) + v0.AuxInt = int64ToAuxInt(48) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (SLLI [c] (MOVWUreg x)) + // cond: c <= 32 + // result: (SRLI [32-c] (SLLI [32] x)) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVWUreg { + break + } + x := v_0.Args[0] + if !(c <= 32) { + break + } + v.reset(OpRISCV64SRLI) + v.AuxInt = int64ToAuxInt(32 - c) + v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, typ.UInt64) + v0.AuxInt = int64ToAuxInt(32) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (SLLI [0] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + return false +} +func rewriteValueRISCV64latelower_OpRISCV64SRAI(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SRAI [c] (MOVBreg x)) + // cond: c < 8 + // result: (SRAI [56+c] (SLLI [56] x)) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVBreg { + break + } + x := v_0.Args[0] + if !(c < 8) { + break + } + v.reset(OpRISCV64SRAI) + v.AuxInt = int64ToAuxInt(56 + c) + v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, typ.Int64) + v0.AuxInt = int64ToAuxInt(56) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (SRAI [c] (MOVHreg x)) + // cond: c < 16 + // result: (SRAI [48+c] (SLLI [48] x)) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVHreg { + break + } + x := v_0.Args[0] + if !(c < 16) { + break + } + v.reset(OpRISCV64SRAI) + v.AuxInt = int64ToAuxInt(48 + c) + v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, typ.Int64) + v0.AuxInt = int64ToAuxInt(48) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (SRAI [c] (MOVWreg x)) + // cond: c < 32 + // result: (SRAI [32+c] (SLLI [32] x)) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVWreg { + break + } + x := v_0.Args[0] + if !(c < 32) { + break + } + v.reset(OpRISCV64SRAI) + v.AuxInt = int64ToAuxInt(32 + c) + v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, typ.Int64) + v0.AuxInt = int64ToAuxInt(32) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (SRAI [0] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + return false +} +func rewriteValueRISCV64latelower_OpRISCV64SRLI(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SRLI [c] (MOVBUreg x)) + // cond: c < 8 + // result: (SRLI [56+c] (SLLI [56] x)) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVBUreg { + break + } + x := v_0.Args[0] + if !(c < 8) { + break + } + v.reset(OpRISCV64SRLI) + v.AuxInt = int64ToAuxInt(56 + c) + v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, typ.UInt64) + v0.AuxInt = int64ToAuxInt(56) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (SRLI [c] (MOVHUreg x)) + // cond: c < 16 + // result: (SRLI [48+c] (SLLI [48] x)) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVHUreg { + break + } + x := v_0.Args[0] + if !(c < 16) { + break + } + v.reset(OpRISCV64SRLI) + v.AuxInt = int64ToAuxInt(48 + c) + v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, typ.UInt64) + v0.AuxInt = int64ToAuxInt(48) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (SRLI [c] (MOVWUreg x)) + // cond: c < 32 + // result: (SRLI [32+c] (SLLI [32] x)) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVWUreg { + break + } + x := v_0.Args[0] + if !(c < 32) { + break + } + v.reset(OpRISCV64SRLI) + v.AuxInt = int64ToAuxInt(32 + c) + v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, typ.UInt64) + v0.AuxInt = int64ToAuxInt(32) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (SRLI [0] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + return false +} +func rewriteBlockRISCV64latelower(b *Block) bool { + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteS390X.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteS390X.go new file mode 100644 index 0000000000000000000000000000000000000000..c2342c944d55b4221a5fce948e43b5a8b12838c0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteS390X.go @@ -0,0 +1,16638 @@ +// Code generated from _gen/S390X.rules using 'go generate'; DO NOT EDIT. + +package ssa + +import "math" +import "cmd/compile/internal/types" +import "cmd/internal/obj/s390x" + +func rewriteValueS390X(v *Value) bool { + switch v.Op { + case OpAdd16: + v.Op = OpS390XADDW + return true + case OpAdd32: + v.Op = OpS390XADDW + return true + case OpAdd32F: + return rewriteValueS390X_OpAdd32F(v) + case OpAdd64: + v.Op = OpS390XADD + return true + case OpAdd64F: + return rewriteValueS390X_OpAdd64F(v) + case OpAdd8: + v.Op = OpS390XADDW + return true + case OpAddPtr: + v.Op = OpS390XADD + return true + case OpAddr: + return rewriteValueS390X_OpAddr(v) + case OpAnd16: + v.Op = OpS390XANDW + return true + case OpAnd32: + v.Op = OpS390XANDW + return true + case OpAnd64: + v.Op = OpS390XAND + return true + case OpAnd8: + v.Op = OpS390XANDW + return true + case OpAndB: + v.Op = OpS390XANDW + return true + case OpAtomicAdd32: + return rewriteValueS390X_OpAtomicAdd32(v) + case OpAtomicAdd64: + return rewriteValueS390X_OpAtomicAdd64(v) + case OpAtomicAnd32: + v.Op = OpS390XLAN + return true + case OpAtomicAnd8: + return rewriteValueS390X_OpAtomicAnd8(v) + case OpAtomicCompareAndSwap32: + return rewriteValueS390X_OpAtomicCompareAndSwap32(v) + case OpAtomicCompareAndSwap64: + return rewriteValueS390X_OpAtomicCompareAndSwap64(v) + case OpAtomicExchange32: + return rewriteValueS390X_OpAtomicExchange32(v) + case OpAtomicExchange64: + return rewriteValueS390X_OpAtomicExchange64(v) + case OpAtomicLoad32: + return rewriteValueS390X_OpAtomicLoad32(v) + case OpAtomicLoad64: + return rewriteValueS390X_OpAtomicLoad64(v) + case OpAtomicLoad8: + return rewriteValueS390X_OpAtomicLoad8(v) + case OpAtomicLoadAcq32: + return rewriteValueS390X_OpAtomicLoadAcq32(v) + case OpAtomicLoadPtr: + return rewriteValueS390X_OpAtomicLoadPtr(v) + case OpAtomicOr32: + v.Op = OpS390XLAO + return true + case OpAtomicOr8: + return rewriteValueS390X_OpAtomicOr8(v) + case OpAtomicStore32: + return rewriteValueS390X_OpAtomicStore32(v) + case OpAtomicStore64: + return rewriteValueS390X_OpAtomicStore64(v) + case OpAtomicStore8: + return rewriteValueS390X_OpAtomicStore8(v) + case OpAtomicStorePtrNoWB: + return rewriteValueS390X_OpAtomicStorePtrNoWB(v) + case OpAtomicStoreRel32: + return rewriteValueS390X_OpAtomicStoreRel32(v) + case OpAvg64u: + return rewriteValueS390X_OpAvg64u(v) + case OpBitLen64: + return rewriteValueS390X_OpBitLen64(v) + case OpBswap16: + return rewriteValueS390X_OpBswap16(v) + case OpBswap32: + v.Op = OpS390XMOVWBR + return true + case OpBswap64: + v.Op = OpS390XMOVDBR + return true + case OpCeil: + return rewriteValueS390X_OpCeil(v) + case OpClosureCall: + v.Op = OpS390XCALLclosure + return true + case OpCom16: + v.Op = OpS390XNOTW + return true + case OpCom32: + v.Op = OpS390XNOTW + return true + case OpCom64: + v.Op = OpS390XNOT + return true + case OpCom8: + v.Op = OpS390XNOTW + return true + case OpConst16: + return rewriteValueS390X_OpConst16(v) + case OpConst32: + return rewriteValueS390X_OpConst32(v) + case OpConst32F: + v.Op = OpS390XFMOVSconst + return true + case OpConst64: + return rewriteValueS390X_OpConst64(v) + case OpConst64F: + v.Op = OpS390XFMOVDconst + return true + case OpConst8: + return rewriteValueS390X_OpConst8(v) + case OpConstBool: + return rewriteValueS390X_OpConstBool(v) + case OpConstNil: + return rewriteValueS390X_OpConstNil(v) + case OpCtz32: + return rewriteValueS390X_OpCtz32(v) + case OpCtz32NonZero: + v.Op = OpCtz32 + return true + case OpCtz64: + return rewriteValueS390X_OpCtz64(v) + case OpCtz64NonZero: + v.Op = OpCtz64 + return true + case OpCvt32Fto32: + v.Op = OpS390XCFEBRA + return true + case OpCvt32Fto32U: + v.Op = OpS390XCLFEBR + return true + case OpCvt32Fto64: + v.Op = OpS390XCGEBRA + return true + case OpCvt32Fto64F: + v.Op = OpS390XLDEBR + return true + case OpCvt32Fto64U: + v.Op = OpS390XCLGEBR + return true + case OpCvt32Uto32F: + v.Op = OpS390XCELFBR + return true + case OpCvt32Uto64F: + v.Op = OpS390XCDLFBR + return true + case OpCvt32to32F: + v.Op = OpS390XCEFBRA + return true + case OpCvt32to64F: + v.Op = OpS390XCDFBRA + return true + case OpCvt64Fto32: + v.Op = OpS390XCFDBRA + return true + case OpCvt64Fto32F: + v.Op = OpS390XLEDBR + return true + case OpCvt64Fto32U: + v.Op = OpS390XCLFDBR + return true + case OpCvt64Fto64: + v.Op = OpS390XCGDBRA + return true + case OpCvt64Fto64U: + v.Op = OpS390XCLGDBR + return true + case OpCvt64Uto32F: + v.Op = OpS390XCELGBR + return true + case OpCvt64Uto64F: + v.Op = OpS390XCDLGBR + return true + case OpCvt64to32F: + v.Op = OpS390XCEGBRA + return true + case OpCvt64to64F: + v.Op = OpS390XCDGBRA + return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true + case OpDiv16: + return rewriteValueS390X_OpDiv16(v) + case OpDiv16u: + return rewriteValueS390X_OpDiv16u(v) + case OpDiv32: + return rewriteValueS390X_OpDiv32(v) + case OpDiv32F: + v.Op = OpS390XFDIVS + return true + case OpDiv32u: + return rewriteValueS390X_OpDiv32u(v) + case OpDiv64: + return rewriteValueS390X_OpDiv64(v) + case OpDiv64F: + v.Op = OpS390XFDIV + return true + case OpDiv64u: + v.Op = OpS390XDIVDU + return true + case OpDiv8: + return rewriteValueS390X_OpDiv8(v) + case OpDiv8u: + return rewriteValueS390X_OpDiv8u(v) + case OpEq16: + return rewriteValueS390X_OpEq16(v) + case OpEq32: + return rewriteValueS390X_OpEq32(v) + case OpEq32F: + return rewriteValueS390X_OpEq32F(v) + case OpEq64: + return rewriteValueS390X_OpEq64(v) + case OpEq64F: + return rewriteValueS390X_OpEq64F(v) + case OpEq8: + return rewriteValueS390X_OpEq8(v) + case OpEqB: + return rewriteValueS390X_OpEqB(v) + case OpEqPtr: + return rewriteValueS390X_OpEqPtr(v) + case OpFMA: + return rewriteValueS390X_OpFMA(v) + case OpFloor: + return rewriteValueS390X_OpFloor(v) + case OpGetCallerPC: + v.Op = OpS390XLoweredGetCallerPC + return true + case OpGetCallerSP: + v.Op = OpS390XLoweredGetCallerSP + return true + case OpGetClosurePtr: + v.Op = OpS390XLoweredGetClosurePtr + return true + case OpGetG: + v.Op = OpS390XLoweredGetG + return true + case OpHmul32: + return rewriteValueS390X_OpHmul32(v) + case OpHmul32u: + return rewriteValueS390X_OpHmul32u(v) + case OpHmul64: + v.Op = OpS390XMULHD + return true + case OpHmul64u: + v.Op = OpS390XMULHDU + return true + case OpITab: + return rewriteValueS390X_OpITab(v) + case OpInterCall: + v.Op = OpS390XCALLinter + return true + case OpIsInBounds: + return rewriteValueS390X_OpIsInBounds(v) + case OpIsNonNil: + return rewriteValueS390X_OpIsNonNil(v) + case OpIsSliceInBounds: + return rewriteValueS390X_OpIsSliceInBounds(v) + case OpLeq16: + return rewriteValueS390X_OpLeq16(v) + case OpLeq16U: + return rewriteValueS390X_OpLeq16U(v) + case OpLeq32: + return rewriteValueS390X_OpLeq32(v) + case OpLeq32F: + return rewriteValueS390X_OpLeq32F(v) + case OpLeq32U: + return rewriteValueS390X_OpLeq32U(v) + case OpLeq64: + return rewriteValueS390X_OpLeq64(v) + case OpLeq64F: + return rewriteValueS390X_OpLeq64F(v) + case OpLeq64U: + return rewriteValueS390X_OpLeq64U(v) + case OpLeq8: + return rewriteValueS390X_OpLeq8(v) + case OpLeq8U: + return rewriteValueS390X_OpLeq8U(v) + case OpLess16: + return rewriteValueS390X_OpLess16(v) + case OpLess16U: + return rewriteValueS390X_OpLess16U(v) + case OpLess32: + return rewriteValueS390X_OpLess32(v) + case OpLess32F: + return rewriteValueS390X_OpLess32F(v) + case OpLess32U: + return rewriteValueS390X_OpLess32U(v) + case OpLess64: + return rewriteValueS390X_OpLess64(v) + case OpLess64F: + return rewriteValueS390X_OpLess64F(v) + case OpLess64U: + return rewriteValueS390X_OpLess64U(v) + case OpLess8: + return rewriteValueS390X_OpLess8(v) + case OpLess8U: + return rewriteValueS390X_OpLess8U(v) + case OpLoad: + return rewriteValueS390X_OpLoad(v) + case OpLocalAddr: + return rewriteValueS390X_OpLocalAddr(v) + case OpLsh16x16: + return rewriteValueS390X_OpLsh16x16(v) + case OpLsh16x32: + return rewriteValueS390X_OpLsh16x32(v) + case OpLsh16x64: + return rewriteValueS390X_OpLsh16x64(v) + case OpLsh16x8: + return rewriteValueS390X_OpLsh16x8(v) + case OpLsh32x16: + return rewriteValueS390X_OpLsh32x16(v) + case OpLsh32x32: + return rewriteValueS390X_OpLsh32x32(v) + case OpLsh32x64: + return rewriteValueS390X_OpLsh32x64(v) + case OpLsh32x8: + return rewriteValueS390X_OpLsh32x8(v) + case OpLsh64x16: + return rewriteValueS390X_OpLsh64x16(v) + case OpLsh64x32: + return rewriteValueS390X_OpLsh64x32(v) + case OpLsh64x64: + return rewriteValueS390X_OpLsh64x64(v) + case OpLsh64x8: + return rewriteValueS390X_OpLsh64x8(v) + case OpLsh8x16: + return rewriteValueS390X_OpLsh8x16(v) + case OpLsh8x32: + return rewriteValueS390X_OpLsh8x32(v) + case OpLsh8x64: + return rewriteValueS390X_OpLsh8x64(v) + case OpLsh8x8: + return rewriteValueS390X_OpLsh8x8(v) + case OpMod16: + return rewriteValueS390X_OpMod16(v) + case OpMod16u: + return rewriteValueS390X_OpMod16u(v) + case OpMod32: + return rewriteValueS390X_OpMod32(v) + case OpMod32u: + return rewriteValueS390X_OpMod32u(v) + case OpMod64: + return rewriteValueS390X_OpMod64(v) + case OpMod64u: + v.Op = OpS390XMODDU + return true + case OpMod8: + return rewriteValueS390X_OpMod8(v) + case OpMod8u: + return rewriteValueS390X_OpMod8u(v) + case OpMove: + return rewriteValueS390X_OpMove(v) + case OpMul16: + v.Op = OpS390XMULLW + return true + case OpMul32: + v.Op = OpS390XMULLW + return true + case OpMul32F: + v.Op = OpS390XFMULS + return true + case OpMul64: + v.Op = OpS390XMULLD + return true + case OpMul64F: + v.Op = OpS390XFMUL + return true + case OpMul64uhilo: + v.Op = OpS390XMLGR + return true + case OpMul8: + v.Op = OpS390XMULLW + return true + case OpNeg16: + v.Op = OpS390XNEGW + return true + case OpNeg32: + v.Op = OpS390XNEGW + return true + case OpNeg32F: + v.Op = OpS390XFNEGS + return true + case OpNeg64: + v.Op = OpS390XNEG + return true + case OpNeg64F: + v.Op = OpS390XFNEG + return true + case OpNeg8: + v.Op = OpS390XNEGW + return true + case OpNeq16: + return rewriteValueS390X_OpNeq16(v) + case OpNeq32: + return rewriteValueS390X_OpNeq32(v) + case OpNeq32F: + return rewriteValueS390X_OpNeq32F(v) + case OpNeq64: + return rewriteValueS390X_OpNeq64(v) + case OpNeq64F: + return rewriteValueS390X_OpNeq64F(v) + case OpNeq8: + return rewriteValueS390X_OpNeq8(v) + case OpNeqB: + return rewriteValueS390X_OpNeqB(v) + case OpNeqPtr: + return rewriteValueS390X_OpNeqPtr(v) + case OpNilCheck: + v.Op = OpS390XLoweredNilCheck + return true + case OpNot: + return rewriteValueS390X_OpNot(v) + case OpOffPtr: + return rewriteValueS390X_OpOffPtr(v) + case OpOr16: + v.Op = OpS390XORW + return true + case OpOr32: + v.Op = OpS390XORW + return true + case OpOr64: + v.Op = OpS390XOR + return true + case OpOr8: + v.Op = OpS390XORW + return true + case OpOrB: + v.Op = OpS390XORW + return true + case OpPanicBounds: + return rewriteValueS390X_OpPanicBounds(v) + case OpPopCount16: + return rewriteValueS390X_OpPopCount16(v) + case OpPopCount32: + return rewriteValueS390X_OpPopCount32(v) + case OpPopCount64: + return rewriteValueS390X_OpPopCount64(v) + case OpPopCount8: + return rewriteValueS390X_OpPopCount8(v) + case OpRotateLeft16: + return rewriteValueS390X_OpRotateLeft16(v) + case OpRotateLeft32: + v.Op = OpS390XRLL + return true + case OpRotateLeft64: + v.Op = OpS390XRLLG + return true + case OpRotateLeft8: + return rewriteValueS390X_OpRotateLeft8(v) + case OpRound: + return rewriteValueS390X_OpRound(v) + case OpRound32F: + v.Op = OpS390XLoweredRound32F + return true + case OpRound64F: + v.Op = OpS390XLoweredRound64F + return true + case OpRoundToEven: + return rewriteValueS390X_OpRoundToEven(v) + case OpRsh16Ux16: + return rewriteValueS390X_OpRsh16Ux16(v) + case OpRsh16Ux32: + return rewriteValueS390X_OpRsh16Ux32(v) + case OpRsh16Ux64: + return rewriteValueS390X_OpRsh16Ux64(v) + case OpRsh16Ux8: + return rewriteValueS390X_OpRsh16Ux8(v) + case OpRsh16x16: + return rewriteValueS390X_OpRsh16x16(v) + case OpRsh16x32: + return rewriteValueS390X_OpRsh16x32(v) + case OpRsh16x64: + return rewriteValueS390X_OpRsh16x64(v) + case OpRsh16x8: + return rewriteValueS390X_OpRsh16x8(v) + case OpRsh32Ux16: + return rewriteValueS390X_OpRsh32Ux16(v) + case OpRsh32Ux32: + return rewriteValueS390X_OpRsh32Ux32(v) + case OpRsh32Ux64: + return rewriteValueS390X_OpRsh32Ux64(v) + case OpRsh32Ux8: + return rewriteValueS390X_OpRsh32Ux8(v) + case OpRsh32x16: + return rewriteValueS390X_OpRsh32x16(v) + case OpRsh32x32: + return rewriteValueS390X_OpRsh32x32(v) + case OpRsh32x64: + return rewriteValueS390X_OpRsh32x64(v) + case OpRsh32x8: + return rewriteValueS390X_OpRsh32x8(v) + case OpRsh64Ux16: + return rewriteValueS390X_OpRsh64Ux16(v) + case OpRsh64Ux32: + return rewriteValueS390X_OpRsh64Ux32(v) + case OpRsh64Ux64: + return rewriteValueS390X_OpRsh64Ux64(v) + case OpRsh64Ux8: + return rewriteValueS390X_OpRsh64Ux8(v) + case OpRsh64x16: + return rewriteValueS390X_OpRsh64x16(v) + case OpRsh64x32: + return rewriteValueS390X_OpRsh64x32(v) + case OpRsh64x64: + return rewriteValueS390X_OpRsh64x64(v) + case OpRsh64x8: + return rewriteValueS390X_OpRsh64x8(v) + case OpRsh8Ux16: + return rewriteValueS390X_OpRsh8Ux16(v) + case OpRsh8Ux32: + return rewriteValueS390X_OpRsh8Ux32(v) + case OpRsh8Ux64: + return rewriteValueS390X_OpRsh8Ux64(v) + case OpRsh8Ux8: + return rewriteValueS390X_OpRsh8Ux8(v) + case OpRsh8x16: + return rewriteValueS390X_OpRsh8x16(v) + case OpRsh8x32: + return rewriteValueS390X_OpRsh8x32(v) + case OpRsh8x64: + return rewriteValueS390X_OpRsh8x64(v) + case OpRsh8x8: + return rewriteValueS390X_OpRsh8x8(v) + case OpS390XADD: + return rewriteValueS390X_OpS390XADD(v) + case OpS390XADDC: + return rewriteValueS390X_OpS390XADDC(v) + case OpS390XADDE: + return rewriteValueS390X_OpS390XADDE(v) + case OpS390XADDW: + return rewriteValueS390X_OpS390XADDW(v) + case OpS390XADDWconst: + return rewriteValueS390X_OpS390XADDWconst(v) + case OpS390XADDWload: + return rewriteValueS390X_OpS390XADDWload(v) + case OpS390XADDconst: + return rewriteValueS390X_OpS390XADDconst(v) + case OpS390XADDload: + return rewriteValueS390X_OpS390XADDload(v) + case OpS390XAND: + return rewriteValueS390X_OpS390XAND(v) + case OpS390XANDW: + return rewriteValueS390X_OpS390XANDW(v) + case OpS390XANDWconst: + return rewriteValueS390X_OpS390XANDWconst(v) + case OpS390XANDWload: + return rewriteValueS390X_OpS390XANDWload(v) + case OpS390XANDconst: + return rewriteValueS390X_OpS390XANDconst(v) + case OpS390XANDload: + return rewriteValueS390X_OpS390XANDload(v) + case OpS390XCMP: + return rewriteValueS390X_OpS390XCMP(v) + case OpS390XCMPU: + return rewriteValueS390X_OpS390XCMPU(v) + case OpS390XCMPUconst: + return rewriteValueS390X_OpS390XCMPUconst(v) + case OpS390XCMPW: + return rewriteValueS390X_OpS390XCMPW(v) + case OpS390XCMPWU: + return rewriteValueS390X_OpS390XCMPWU(v) + case OpS390XCMPWUconst: + return rewriteValueS390X_OpS390XCMPWUconst(v) + case OpS390XCMPWconst: + return rewriteValueS390X_OpS390XCMPWconst(v) + case OpS390XCMPconst: + return rewriteValueS390X_OpS390XCMPconst(v) + case OpS390XCPSDR: + return rewriteValueS390X_OpS390XCPSDR(v) + case OpS390XFCMP: + return rewriteValueS390X_OpS390XFCMP(v) + case OpS390XFCMPS: + return rewriteValueS390X_OpS390XFCMPS(v) + case OpS390XFMOVDload: + return rewriteValueS390X_OpS390XFMOVDload(v) + case OpS390XFMOVDstore: + return rewriteValueS390X_OpS390XFMOVDstore(v) + case OpS390XFMOVSload: + return rewriteValueS390X_OpS390XFMOVSload(v) + case OpS390XFMOVSstore: + return rewriteValueS390X_OpS390XFMOVSstore(v) + case OpS390XFNEG: + return rewriteValueS390X_OpS390XFNEG(v) + case OpS390XFNEGS: + return rewriteValueS390X_OpS390XFNEGS(v) + case OpS390XLDGR: + return rewriteValueS390X_OpS390XLDGR(v) + case OpS390XLEDBR: + return rewriteValueS390X_OpS390XLEDBR(v) + case OpS390XLGDR: + return rewriteValueS390X_OpS390XLGDR(v) + case OpS390XLOCGR: + return rewriteValueS390X_OpS390XLOCGR(v) + case OpS390XLTDBR: + return rewriteValueS390X_OpS390XLTDBR(v) + case OpS390XLTEBR: + return rewriteValueS390X_OpS390XLTEBR(v) + case OpS390XLoweredRound32F: + return rewriteValueS390X_OpS390XLoweredRound32F(v) + case OpS390XLoweredRound64F: + return rewriteValueS390X_OpS390XLoweredRound64F(v) + case OpS390XMOVBZload: + return rewriteValueS390X_OpS390XMOVBZload(v) + case OpS390XMOVBZreg: + return rewriteValueS390X_OpS390XMOVBZreg(v) + case OpS390XMOVBload: + return rewriteValueS390X_OpS390XMOVBload(v) + case OpS390XMOVBreg: + return rewriteValueS390X_OpS390XMOVBreg(v) + case OpS390XMOVBstore: + return rewriteValueS390X_OpS390XMOVBstore(v) + case OpS390XMOVBstoreconst: + return rewriteValueS390X_OpS390XMOVBstoreconst(v) + case OpS390XMOVDBR: + return rewriteValueS390X_OpS390XMOVDBR(v) + case OpS390XMOVDaddridx: + return rewriteValueS390X_OpS390XMOVDaddridx(v) + case OpS390XMOVDload: + return rewriteValueS390X_OpS390XMOVDload(v) + case OpS390XMOVDstore: + return rewriteValueS390X_OpS390XMOVDstore(v) + case OpS390XMOVDstoreconst: + return rewriteValueS390X_OpS390XMOVDstoreconst(v) + case OpS390XMOVDstoreidx: + return rewriteValueS390X_OpS390XMOVDstoreidx(v) + case OpS390XMOVHZload: + return rewriteValueS390X_OpS390XMOVHZload(v) + case OpS390XMOVHZreg: + return rewriteValueS390X_OpS390XMOVHZreg(v) + case OpS390XMOVHload: + return rewriteValueS390X_OpS390XMOVHload(v) + case OpS390XMOVHreg: + return rewriteValueS390X_OpS390XMOVHreg(v) + case OpS390XMOVHstore: + return rewriteValueS390X_OpS390XMOVHstore(v) + case OpS390XMOVHstoreconst: + return rewriteValueS390X_OpS390XMOVHstoreconst(v) + case OpS390XMOVHstoreidx: + return rewriteValueS390X_OpS390XMOVHstoreidx(v) + case OpS390XMOVWBR: + return rewriteValueS390X_OpS390XMOVWBR(v) + case OpS390XMOVWZload: + return rewriteValueS390X_OpS390XMOVWZload(v) + case OpS390XMOVWZreg: + return rewriteValueS390X_OpS390XMOVWZreg(v) + case OpS390XMOVWload: + return rewriteValueS390X_OpS390XMOVWload(v) + case OpS390XMOVWreg: + return rewriteValueS390X_OpS390XMOVWreg(v) + case OpS390XMOVWstore: + return rewriteValueS390X_OpS390XMOVWstore(v) + case OpS390XMOVWstoreconst: + return rewriteValueS390X_OpS390XMOVWstoreconst(v) + case OpS390XMOVWstoreidx: + return rewriteValueS390X_OpS390XMOVWstoreidx(v) + case OpS390XMULLD: + return rewriteValueS390X_OpS390XMULLD(v) + case OpS390XMULLDconst: + return rewriteValueS390X_OpS390XMULLDconst(v) + case OpS390XMULLDload: + return rewriteValueS390X_OpS390XMULLDload(v) + case OpS390XMULLW: + return rewriteValueS390X_OpS390XMULLW(v) + case OpS390XMULLWconst: + return rewriteValueS390X_OpS390XMULLWconst(v) + case OpS390XMULLWload: + return rewriteValueS390X_OpS390XMULLWload(v) + case OpS390XNEG: + return rewriteValueS390X_OpS390XNEG(v) + case OpS390XNEGW: + return rewriteValueS390X_OpS390XNEGW(v) + case OpS390XNOT: + return rewriteValueS390X_OpS390XNOT(v) + case OpS390XNOTW: + return rewriteValueS390X_OpS390XNOTW(v) + case OpS390XOR: + return rewriteValueS390X_OpS390XOR(v) + case OpS390XORW: + return rewriteValueS390X_OpS390XORW(v) + case OpS390XORWconst: + return rewriteValueS390X_OpS390XORWconst(v) + case OpS390XORWload: + return rewriteValueS390X_OpS390XORWload(v) + case OpS390XORconst: + return rewriteValueS390X_OpS390XORconst(v) + case OpS390XORload: + return rewriteValueS390X_OpS390XORload(v) + case OpS390XRISBGZ: + return rewriteValueS390X_OpS390XRISBGZ(v) + case OpS390XRLL: + return rewriteValueS390X_OpS390XRLL(v) + case OpS390XRLLG: + return rewriteValueS390X_OpS390XRLLG(v) + case OpS390XSLD: + return rewriteValueS390X_OpS390XSLD(v) + case OpS390XSLDconst: + return rewriteValueS390X_OpS390XSLDconst(v) + case OpS390XSLW: + return rewriteValueS390X_OpS390XSLW(v) + case OpS390XSLWconst: + return rewriteValueS390X_OpS390XSLWconst(v) + case OpS390XSRAD: + return rewriteValueS390X_OpS390XSRAD(v) + case OpS390XSRADconst: + return rewriteValueS390X_OpS390XSRADconst(v) + case OpS390XSRAW: + return rewriteValueS390X_OpS390XSRAW(v) + case OpS390XSRAWconst: + return rewriteValueS390X_OpS390XSRAWconst(v) + case OpS390XSRD: + return rewriteValueS390X_OpS390XSRD(v) + case OpS390XSRDconst: + return rewriteValueS390X_OpS390XSRDconst(v) + case OpS390XSRW: + return rewriteValueS390X_OpS390XSRW(v) + case OpS390XSRWconst: + return rewriteValueS390X_OpS390XSRWconst(v) + case OpS390XSTM2: + return rewriteValueS390X_OpS390XSTM2(v) + case OpS390XSTMG2: + return rewriteValueS390X_OpS390XSTMG2(v) + case OpS390XSUB: + return rewriteValueS390X_OpS390XSUB(v) + case OpS390XSUBE: + return rewriteValueS390X_OpS390XSUBE(v) + case OpS390XSUBW: + return rewriteValueS390X_OpS390XSUBW(v) + case OpS390XSUBWconst: + return rewriteValueS390X_OpS390XSUBWconst(v) + case OpS390XSUBWload: + return rewriteValueS390X_OpS390XSUBWload(v) + case OpS390XSUBconst: + return rewriteValueS390X_OpS390XSUBconst(v) + case OpS390XSUBload: + return rewriteValueS390X_OpS390XSUBload(v) + case OpS390XSumBytes2: + return rewriteValueS390X_OpS390XSumBytes2(v) + case OpS390XSumBytes4: + return rewriteValueS390X_OpS390XSumBytes4(v) + case OpS390XSumBytes8: + return rewriteValueS390X_OpS390XSumBytes8(v) + case OpS390XXOR: + return rewriteValueS390X_OpS390XXOR(v) + case OpS390XXORW: + return rewriteValueS390X_OpS390XXORW(v) + case OpS390XXORWconst: + return rewriteValueS390X_OpS390XXORWconst(v) + case OpS390XXORWload: + return rewriteValueS390X_OpS390XXORWload(v) + case OpS390XXORconst: + return rewriteValueS390X_OpS390XXORconst(v) + case OpS390XXORload: + return rewriteValueS390X_OpS390XXORload(v) + case OpSelect0: + return rewriteValueS390X_OpSelect0(v) + case OpSelect1: + return rewriteValueS390X_OpSelect1(v) + case OpSignExt16to32: + v.Op = OpS390XMOVHreg + return true + case OpSignExt16to64: + v.Op = OpS390XMOVHreg + return true + case OpSignExt32to64: + v.Op = OpS390XMOVWreg + return true + case OpSignExt8to16: + v.Op = OpS390XMOVBreg + return true + case OpSignExt8to32: + v.Op = OpS390XMOVBreg + return true + case OpSignExt8to64: + v.Op = OpS390XMOVBreg + return true + case OpSlicemask: + return rewriteValueS390X_OpSlicemask(v) + case OpSqrt: + v.Op = OpS390XFSQRT + return true + case OpSqrt32: + v.Op = OpS390XFSQRTS + return true + case OpStaticCall: + v.Op = OpS390XCALLstatic + return true + case OpStore: + return rewriteValueS390X_OpStore(v) + case OpSub16: + v.Op = OpS390XSUBW + return true + case OpSub32: + v.Op = OpS390XSUBW + return true + case OpSub32F: + return rewriteValueS390X_OpSub32F(v) + case OpSub64: + v.Op = OpS390XSUB + return true + case OpSub64F: + return rewriteValueS390X_OpSub64F(v) + case OpSub8: + v.Op = OpS390XSUBW + return true + case OpSubPtr: + v.Op = OpS390XSUB + return true + case OpTailCall: + v.Op = OpS390XCALLtail + return true + case OpTrunc: + return rewriteValueS390X_OpTrunc(v) + case OpTrunc16to8: + v.Op = OpCopy + return true + case OpTrunc32to16: + v.Op = OpCopy + return true + case OpTrunc32to8: + v.Op = OpCopy + return true + case OpTrunc64to16: + v.Op = OpCopy + return true + case OpTrunc64to32: + v.Op = OpCopy + return true + case OpTrunc64to8: + v.Op = OpCopy + return true + case OpWB: + v.Op = OpS390XLoweredWB + return true + case OpXor16: + v.Op = OpS390XXORW + return true + case OpXor32: + v.Op = OpS390XXORW + return true + case OpXor64: + v.Op = OpS390XXOR + return true + case OpXor8: + v.Op = OpS390XXORW + return true + case OpZero: + return rewriteValueS390X_OpZero(v) + case OpZeroExt16to32: + v.Op = OpS390XMOVHZreg + return true + case OpZeroExt16to64: + v.Op = OpS390XMOVHZreg + return true + case OpZeroExt32to64: + v.Op = OpS390XMOVWZreg + return true + case OpZeroExt8to16: + v.Op = OpS390XMOVBZreg + return true + case OpZeroExt8to32: + v.Op = OpS390XMOVBZreg + return true + case OpZeroExt8to64: + v.Op = OpS390XMOVBZreg + return true + } + return false +} +func rewriteValueS390X_OpAdd32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Add32F x y) + // result: (Select0 (FADDS x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpS390XFADDS, types.NewTuple(typ.Float32, types.TypeFlags)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpAdd64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Add64F x y) + // result: (Select0 (FADD x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpS390XFADD, types.NewTuple(typ.Float64, types.TypeFlags)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpAddr(v *Value) bool { + v_0 := v.Args[0] + // match: (Addr {sym} base) + // result: (MOVDaddr {sym} base) + for { + sym := auxToSym(v.Aux) + base := v_0 + v.reset(OpS390XMOVDaddr) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } +} +func rewriteValueS390X_OpAtomicAdd32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicAdd32 ptr val mem) + // result: (AddTupleFirst32 val (LAA ptr val mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpS390XAddTupleFirst32) + v0 := b.NewValue0(v.Pos, OpS390XLAA, types.NewTuple(typ.UInt32, types.TypeMem)) + v0.AddArg3(ptr, val, mem) + v.AddArg2(val, v0) + return true + } +} +func rewriteValueS390X_OpAtomicAdd64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicAdd64 ptr val mem) + // result: (AddTupleFirst64 val (LAAG ptr val mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpS390XAddTupleFirst64) + v0 := b.NewValue0(v.Pos, OpS390XLAAG, types.NewTuple(typ.UInt64, types.TypeMem)) + v0.AddArg3(ptr, val, mem) + v.AddArg2(val, v0) + return true + } +} +func rewriteValueS390X_OpAtomicAnd8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicAnd8 ptr val mem) + // result: (LANfloor ptr (RLL (ORWconst val [-1<<8]) (RXSBG {s390x.NewRotateParams(59, 60, 3)} (MOVDconst [3<<3]) ptr)) mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpS390XLANfloor) + v0 := b.NewValue0(v.Pos, OpS390XRLL, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpS390XORWconst, typ.UInt32) + v1.AuxInt = int32ToAuxInt(-1 << 8) + v1.AddArg(val) + v2 := b.NewValue0(v.Pos, OpS390XRXSBG, typ.UInt32) + v2.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(59, 60, 3)) + v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(3 << 3) + v2.AddArg2(v3, ptr) + v0.AddArg2(v1, v2) + v.AddArg3(ptr, v0, mem) + return true + } +} +func rewriteValueS390X_OpAtomicCompareAndSwap32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicCompareAndSwap32 ptr old new_ mem) + // result: (LoweredAtomicCas32 ptr old new_ mem) + for { + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 + v.reset(OpS390XLoweredAtomicCas32) + v.AddArg4(ptr, old, new_, mem) + return true + } +} +func rewriteValueS390X_OpAtomicCompareAndSwap64(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicCompareAndSwap64 ptr old new_ mem) + // result: (LoweredAtomicCas64 ptr old new_ mem) + for { + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 + v.reset(OpS390XLoweredAtomicCas64) + v.AddArg4(ptr, old, new_, mem) + return true + } +} +func rewriteValueS390X_OpAtomicExchange32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicExchange32 ptr val mem) + // result: (LoweredAtomicExchange32 ptr val mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpS390XLoweredAtomicExchange32) + v.AddArg3(ptr, val, mem) + return true + } +} +func rewriteValueS390X_OpAtomicExchange64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicExchange64 ptr val mem) + // result: (LoweredAtomicExchange64 ptr val mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpS390XLoweredAtomicExchange64) + v.AddArg3(ptr, val, mem) + return true + } +} +func rewriteValueS390X_OpAtomicLoad32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicLoad32 ptr mem) + // result: (MOVWZatomicload ptr mem) + for { + ptr := v_0 + mem := v_1 + v.reset(OpS390XMOVWZatomicload) + v.AddArg2(ptr, mem) + return true + } +} +func rewriteValueS390X_OpAtomicLoad64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicLoad64 ptr mem) + // result: (MOVDatomicload ptr mem) + for { + ptr := v_0 + mem := v_1 + v.reset(OpS390XMOVDatomicload) + v.AddArg2(ptr, mem) + return true + } +} +func rewriteValueS390X_OpAtomicLoad8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicLoad8 ptr mem) + // result: (MOVBZatomicload ptr mem) + for { + ptr := v_0 + mem := v_1 + v.reset(OpS390XMOVBZatomicload) + v.AddArg2(ptr, mem) + return true + } +} +func rewriteValueS390X_OpAtomicLoadAcq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicLoadAcq32 ptr mem) + // result: (MOVWZatomicload ptr mem) + for { + ptr := v_0 + mem := v_1 + v.reset(OpS390XMOVWZatomicload) + v.AddArg2(ptr, mem) + return true + } +} +func rewriteValueS390X_OpAtomicLoadPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicLoadPtr ptr mem) + // result: (MOVDatomicload ptr mem) + for { + ptr := v_0 + mem := v_1 + v.reset(OpS390XMOVDatomicload) + v.AddArg2(ptr, mem) + return true + } +} +func rewriteValueS390X_OpAtomicOr8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicOr8 ptr val mem) + // result: (LAOfloor ptr (SLW (MOVBZreg val) (RXSBG {s390x.NewRotateParams(59, 60, 3)} (MOVDconst [3<<3]) ptr)) mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpS390XLAOfloor) + v0 := b.NewValue0(v.Pos, OpS390XSLW, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt32) + v1.AddArg(val) + v2 := b.NewValue0(v.Pos, OpS390XRXSBG, typ.UInt32) + v2.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(59, 60, 3)) + v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(3 << 3) + v2.AddArg2(v3, ptr) + v0.AddArg2(v1, v2) + v.AddArg3(ptr, v0, mem) + return true + } +} +func rewriteValueS390X_OpAtomicStore32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AtomicStore32 ptr val mem) + // result: (SYNC (MOVWatomicstore ptr val mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpS390XSYNC) + v0 := b.NewValue0(v.Pos, OpS390XMOVWatomicstore, types.TypeMem) + v0.AddArg3(ptr, val, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpAtomicStore64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AtomicStore64 ptr val mem) + // result: (SYNC (MOVDatomicstore ptr val mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpS390XSYNC) + v0 := b.NewValue0(v.Pos, OpS390XMOVDatomicstore, types.TypeMem) + v0.AddArg3(ptr, val, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpAtomicStore8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AtomicStore8 ptr val mem) + // result: (SYNC (MOVBatomicstore ptr val mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpS390XSYNC) + v0 := b.NewValue0(v.Pos, OpS390XMOVBatomicstore, types.TypeMem) + v0.AddArg3(ptr, val, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpAtomicStorePtrNoWB(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AtomicStorePtrNoWB ptr val mem) + // result: (SYNC (MOVDatomicstore ptr val mem)) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpS390XSYNC) + v0 := b.NewValue0(v.Pos, OpS390XMOVDatomicstore, types.TypeMem) + v0.AddArg3(ptr, val, mem) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpAtomicStoreRel32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AtomicStoreRel32 ptr val mem) + // result: (MOVWatomicstore ptr val mem) + for { + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpS390XMOVWatomicstore) + v.AddArg3(ptr, val, mem) + return true + } +} +func rewriteValueS390X_OpAvg64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Avg64u x y) + // result: (ADD (SRDconst (SUB x y) [1]) y) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XADD) + v0 := b.NewValue0(v.Pos, OpS390XSRDconst, t) + v0.AuxInt = uint8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpS390XSUB, t) + v1.AddArg2(x, y) + v0.AddArg(v1) + v.AddArg2(v0, y) + return true + } +} +func rewriteValueS390X_OpBitLen64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen64 x) + // result: (SUB (MOVDconst [64]) (FLOGR x)) + for { + x := v_0 + v.reset(OpS390XSUB) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(64) + v1 := b.NewValue0(v.Pos, OpS390XFLOGR, typ.UInt64) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueS390X_OpBswap16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Bswap16 x:(MOVHZload [off] {sym} ptr mem)) + // result: @x.Block (MOVHZreg (MOVHBRload [off] {sym} ptr mem)) + for { + x := v_0 + if x.Op != OpS390XMOVHZload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + b = x.Block + v0 := b.NewValue0(x.Pos, OpS390XMOVHZreg, typ.UInt64) + v.copyOf(v0) + v1 := b.NewValue0(x.Pos, OpS390XMOVHBRload, typ.UInt16) + v1.AuxInt = int32ToAuxInt(off) + v1.Aux = symToAux(sym) + v1.AddArg2(ptr, mem) + v0.AddArg(v1) + return true + } + // match: (Bswap16 x:(MOVHZloadidx [off] {sym} ptr idx mem)) + // result: @x.Block (MOVHZreg (MOVHBRloadidx [off] {sym} ptr idx mem)) + for { + x := v_0 + if x.Op != OpS390XMOVHZloadidx { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[2] + ptr := x.Args[0] + idx := x.Args[1] + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v.copyOf(v0) + v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16) + v1.AuxInt = int32ToAuxInt(off) + v1.Aux = symToAux(sym) + v1.AddArg3(ptr, idx, mem) + v0.AddArg(v1) + return true + } + return false +} +func rewriteValueS390X_OpCeil(v *Value) bool { + v_0 := v.Args[0] + // match: (Ceil x) + // result: (FIDBR [6] x) + for { + x := v_0 + v.reset(OpS390XFIDBR) + v.AuxInt = int8ToAuxInt(6) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpConst16(v *Value) bool { + // match: (Const16 [val]) + // result: (MOVDconst [int64(val)]) + for { + val := auxIntToInt16(v.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueS390X_OpConst32(v *Value) bool { + // match: (Const32 [val]) + // result: (MOVDconst [int64(val)]) + for { + val := auxIntToInt32(v.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueS390X_OpConst64(v *Value) bool { + // match: (Const64 [val]) + // result: (MOVDconst [int64(val)]) + for { + val := auxIntToInt64(v.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueS390X_OpConst8(v *Value) bool { + // match: (Const8 [val]) + // result: (MOVDconst [int64(val)]) + for { + val := auxIntToInt8(v.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueS390X_OpConstBool(v *Value) bool { + // match: (ConstBool [t]) + // result: (MOVDconst [b2i(t)]) + for { + t := auxIntToBool(v.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(b2i(t)) + return true + } +} +func rewriteValueS390X_OpConstNil(v *Value) bool { + // match: (ConstNil) + // result: (MOVDconst [0]) + for { + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } +} +func rewriteValueS390X_OpCtz32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz32 x) + // result: (SUB (MOVDconst [64]) (FLOGR (MOVWZreg (ANDW (SUBWconst [1] x) (NOTW x))))) + for { + t := v.Type + x := v_0 + v.reset(OpS390XSUB) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(64) + v1 := b.NewValue0(v.Pos, OpS390XFLOGR, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) + v3 := b.NewValue0(v.Pos, OpS390XANDW, t) + v4 := b.NewValue0(v.Pos, OpS390XSUBWconst, t) + v4.AuxInt = int32ToAuxInt(1) + v4.AddArg(x) + v5 := b.NewValue0(v.Pos, OpS390XNOTW, t) + v5.AddArg(x) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueS390X_OpCtz64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz64 x) + // result: (SUB (MOVDconst [64]) (FLOGR (AND (SUBconst [1] x) (NOT x)))) + for { + t := v.Type + x := v_0 + v.reset(OpS390XSUB) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(64) + v1 := b.NewValue0(v.Pos, OpS390XFLOGR, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpS390XAND, t) + v3 := b.NewValue0(v.Pos, OpS390XSUBconst, t) + v3.AuxInt = int32ToAuxInt(1) + v3.AddArg(x) + v4 := b.NewValue0(v.Pos, OpS390XNOT, t) + v4.AddArg(x) + v2.AddArg2(v3, v4) + v1.AddArg(v2) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueS390X_OpDiv16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16 x y) + // result: (DIVW (MOVHreg x) (MOVHreg y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XDIVW) + v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueS390X_OpDiv16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16u x y) + // result: (DIVWU (MOVHZreg x) (MOVHZreg y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XDIVWU) + v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueS390X_OpDiv32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div32 x y) + // result: (DIVW (MOVWreg x) y) + for { + x := v_0 + y := v_1 + v.reset(OpS390XDIVW) + v0 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } +} +func rewriteValueS390X_OpDiv32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div32u x y) + // result: (DIVWU (MOVWZreg x) y) + for { + x := v_0 + y := v_1 + v.reset(OpS390XDIVWU) + v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } +} +func rewriteValueS390X_OpDiv64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Div64 x y) + // result: (DIVD x y) + for { + x := v_0 + y := v_1 + v.reset(OpS390XDIVD) + v.AddArg2(x, y) + return true + } +} +func rewriteValueS390X_OpDiv8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8 x y) + // result: (DIVW (MOVBreg x) (MOVBreg y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XDIVW) + v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueS390X_OpDiv8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8u x y) + // result: (DIVWU (MOVBZreg x) (MOVBZreg y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XDIVWU) + v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueS390X_OpEq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq16 x y) + // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVHreg x) (MOVHreg y))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.Equal) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) + v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) + v3.AddArg(x) + v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) + v4.AddArg(y) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpEq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq32 x y) + // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.Equal) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpEq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq32F x y) + // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.Equal) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpEq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq64 x y) + // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.Equal) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpEq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq64F x y) + // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.Equal) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpEq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq8 x y) + // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.Equal) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) + v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) + v3.AddArg(x) + v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) + v4.AddArg(y) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpEqB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqB x y) + // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.Equal) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) + v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) + v3.AddArg(x) + v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) + v4.AddArg(y) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpEqPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqPtr x y) + // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.Equal) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpFMA(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMA x y z) + // result: (FMADD z x y) + for { + x := v_0 + y := v_1 + z := v_2 + v.reset(OpS390XFMADD) + v.AddArg3(z, x, y) + return true + } +} +func rewriteValueS390X_OpFloor(v *Value) bool { + v_0 := v.Args[0] + // match: (Floor x) + // result: (FIDBR [7] x) + for { + x := v_0 + v.reset(OpS390XFIDBR) + v.AuxInt = int8ToAuxInt(7) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpHmul32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Hmul32 x y) + // result: (SRDconst [32] (MULLD (MOVWreg x) (MOVWreg y))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XSRDconst) + v.AuxInt = uint8ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpS390XMULLD, typ.Int64) + v1 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpHmul32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Hmul32u x y) + // result: (SRDconst [32] (MULLD (MOVWZreg x) (MOVWZreg y))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XSRDconst) + v.AuxInt = uint8ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpS390XMULLD, typ.Int64) + v1 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpITab(v *Value) bool { + v_0 := v.Args[0] + // match: (ITab (Load ptr mem)) + // result: (MOVDload ptr mem) + for { + if v_0.Op != OpLoad { + break + } + mem := v_0.Args[1] + ptr := v_0.Args[0] + v.reset(OpS390XMOVDload) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueS390X_OpIsInBounds(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (IsInBounds idx len) + // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len)) + for { + idx := v_0 + len := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.Less) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags) + v2.AddArg2(idx, len) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpIsNonNil(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (IsNonNil p) + // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPconst p [0])) + for { + p := v_0 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.NotEqual) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMPconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(0) + v2.AddArg(p) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpIsSliceInBounds(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (IsSliceInBounds idx len) + // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len)) + for { + idx := v_0 + len := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.LessOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags) + v2.AddArg2(idx, len) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq16 x y) + // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVHreg x) (MOVHreg y))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.LessOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) + v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) + v3.AddArg(x) + v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) + v4.AddArg(y) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLeq16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq16U x y) + // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOVHZreg x) (MOVHZreg y))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.LessOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags) + v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v3.AddArg(x) + v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v4.AddArg(y) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq32 x y) + // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.LessOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLeq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq32F x y) + // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.LessOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLeq32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq32U x y) + // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.LessOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLeq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq64 x y) + // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.LessOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLeq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq64F x y) + // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.LessOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLeq64U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq64U x y) + // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.LessOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq8 x y) + // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.LessOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) + v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) + v3.AddArg(x) + v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) + v4.AddArg(y) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLeq8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq8U x y) + // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOVBZreg x) (MOVBZreg y))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.LessOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags) + v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v3.AddArg(x) + v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v4.AddArg(y) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLess16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less16 x y) + // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVHreg x) (MOVHreg y))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.Less) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) + v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) + v3.AddArg(x) + v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) + v4.AddArg(y) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLess16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less16U x y) + // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOVHZreg x) (MOVHZreg y))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.Less) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags) + v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v3.AddArg(x) + v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v4.AddArg(y) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLess32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less32 x y) + // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.Less) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLess32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less32F x y) + // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.Less) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLess32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less32U x y) + // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.Less) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLess64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less64 x y) + // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.Less) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLess64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less64F x y) + // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.Less) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLess64U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less64U x y) + // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.Less) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLess8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less8 x y) + // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.Less) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) + v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) + v3.AddArg(x) + v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) + v4.AddArg(y) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLess8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less8U x y) + // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOVBZreg x) (MOVBZreg y))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.Less) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags) + v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v3.AddArg(x) + v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v4.AddArg(y) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLoad(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Load ptr mem) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (MOVDload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpS390XMOVDload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitInt(t) && t.IsSigned() + // result: (MOVWload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitInt(t) && t.IsSigned()) { + break + } + v.reset(OpS390XMOVWload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitInt(t) && !t.IsSigned() + // result: (MOVWZload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitInt(t) && !t.IsSigned()) { + break + } + v.reset(OpS390XMOVWZload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is16BitInt(t) && t.IsSigned() + // result: (MOVHload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is16BitInt(t) && t.IsSigned()) { + break + } + v.reset(OpS390XMOVHload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is16BitInt(t) && !t.IsSigned() + // result: (MOVHZload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is16BitInt(t) && !t.IsSigned()) { + break + } + v.reset(OpS390XMOVHZload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is8BitInt(t) && t.IsSigned() + // result: (MOVBload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is8BitInt(t) && t.IsSigned()) { + break + } + v.reset(OpS390XMOVBload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (t.IsBoolean() || (is8BitInt(t) && !t.IsSigned())) + // result: (MOVBZload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsBoolean() || (is8BitInt(t) && !t.IsSigned())) { + break + } + v.reset(OpS390XMOVBZload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitFloat(t) + // result: (FMOVSload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitFloat(t)) { + break + } + v.reset(OpS390XFMOVSload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is64BitFloat(t) + // result: (FMOVDload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitFloat(t)) { + break + } + v.reset(OpS390XFMOVDload) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueS390X_OpLocalAddr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LocalAddr {sym} base mem) + // cond: t.Elem().HasPointers() + // result: (MOVDaddr {sym} (SPanchored base mem)) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + mem := v_1 + if !(t.Elem().HasPointers()) { + break + } + v.reset(OpS390XMOVDaddr) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) + v0.AddArg2(base, mem) + v.AddArg(v0) + return true + } + // match: (LocalAddr {sym} base _) + // cond: !t.Elem().HasPointers() + // result: (MOVDaddr {sym} base) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + if !(!t.Elem().HasPointers()) { + break + } + v.reset(OpS390XMOVDaddr) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } + return false +} +func rewriteValueS390X_OpLsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SLW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSLW) + v.AddArg2(x, y) + return true + } + // match: (Lsh16x16 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SLW x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSLW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLsh16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SLW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSLW) + v.AddArg2(x, y) + return true + } + // match: (Lsh16x32 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SLW x y) (MOVDconst [0]) (CMPWUconst y [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSLW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x64 x y) + // cond: shiftIsBounded(v) + // result: (SLW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSLW) + v.AddArg2(x, y) + return true + } + // match: (Lsh16x64 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SLW x y) (MOVDconst [0]) (CMPUconst y [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSLW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SLW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSLW) + v.AddArg2(x, y) + return true + } + // match: (Lsh16x8 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SLW x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSLW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLsh32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SLW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSLW) + v.AddArg2(x, y) + return true + } + // match: (Lsh32x16 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SLW x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSLW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLsh32x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SLW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSLW) + v.AddArg2(x, y) + return true + } + // match: (Lsh32x32 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SLW x y) (MOVDconst [0]) (CMPWUconst y [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSLW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x64 x y) + // cond: shiftIsBounded(v) + // result: (SLW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSLW) + v.AddArg2(x, y) + return true + } + // match: (Lsh32x64 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SLW x y) (MOVDconst [0]) (CMPUconst y [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSLW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLsh32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SLW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSLW) + v.AddArg2(x, y) + return true + } + // match: (Lsh32x8 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SLW x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSLW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLsh64x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x16 x y) + // cond: shiftIsBounded(v) + // result: (SLD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSLD) + v.AddArg2(x, y) + return true + } + // match: (Lsh64x16 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SLD x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSLD, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLsh64x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x32 x y) + // cond: shiftIsBounded(v) + // result: (SLD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSLD) + v.AddArg2(x, y) + return true + } + // match: (Lsh64x32 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SLD x y) (MOVDconst [0]) (CMPWUconst y [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSLD, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLsh64x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x64 x y) + // cond: shiftIsBounded(v) + // result: (SLD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSLD) + v.AddArg2(x, y) + return true + } + // match: (Lsh64x64 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SLD x y) (MOVDconst [0]) (CMPUconst y [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSLD, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLsh64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x8 x y) + // cond: shiftIsBounded(v) + // result: (SLD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSLD) + v.AddArg2(x, y) + return true + } + // match: (Lsh64x8 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SLD x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSLD, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLsh8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SLW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSLW) + v.AddArg2(x, y) + return true + } + // match: (Lsh8x16 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SLW x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSLW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLsh8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SLW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSLW) + v.AddArg2(x, y) + return true + } + // match: (Lsh8x32 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SLW x y) (MOVDconst [0]) (CMPWUconst y [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSLW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x64 x y) + // cond: shiftIsBounded(v) + // result: (SLW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSLW) + v.AddArg2(x, y) + return true + } + // match: (Lsh8x64 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SLW x y) (MOVDconst [0]) (CMPUconst y [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSLW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpLsh8x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SLW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSLW) + v.AddArg2(x, y) + return true + } + // match: (Lsh8x8 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SLW x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSLW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpMod16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod16 x y) + // result: (MODW (MOVHreg x) (MOVHreg y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XMODW) + v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueS390X_OpMod16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod16u x y) + // result: (MODWU (MOVHZreg x) (MOVHZreg y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XMODWU) + v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueS390X_OpMod32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod32 x y) + // result: (MODW (MOVWreg x) y) + for { + x := v_0 + y := v_1 + v.reset(OpS390XMODW) + v0 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } +} +func rewriteValueS390X_OpMod32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod32u x y) + // result: (MODWU (MOVWZreg x) y) + for { + x := v_0 + y := v_1 + v.reset(OpS390XMODWU) + v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } +} +func rewriteValueS390X_OpMod64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Mod64 x y) + // result: (MODD x y) + for { + x := v_0 + y := v_1 + v.reset(OpS390XMODD) + v.AddArg2(x, y) + return true + } +} +func rewriteValueS390X_OpMod8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8 x y) + // result: (MODW (MOVBreg x) (MOVBreg y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XMODW) + v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueS390X_OpMod8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8u x y) + // result: (MODWU (MOVBZreg x) (MOVBZreg y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XMODWU) + v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueS390X_OpMove(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Move [0] _ _ mem) + // result: mem + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_2 + v.copyOf(mem) + return true + } + // match: (Move [1] dst src mem) + // result: (MOVBstore dst (MOVBZload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpS390XMOVBstore) + v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [2] dst src mem) + // result: (MOVHstore dst (MOVHZload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpS390XMOVHstore) + v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [4] dst src mem) + // result: (MOVWstore dst (MOVWZload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpS390XMOVWstore) + v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [8] dst src mem) + // result: (MOVDstore dst (MOVDload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpS390XMOVDstore) + v0 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [16] dst src mem) + // result: (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 16 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpS390XMOVDstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpS390XMOVDstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [24] dst src mem) + // result: (MOVDstore [16] dst (MOVDload [16] src mem) (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))) + for { + if auxIntToInt64(v.AuxInt) != 24 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpS390XMOVDstore) + v.AuxInt = int32ToAuxInt(16) + v0 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(16) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpS390XMOVDstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(8) + v2 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64) + v2.AuxInt = int32ToAuxInt(8) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpS390XMOVDstore, types.TypeMem) + v4 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [3] dst src mem) + // result: (MOVBstore [2] dst (MOVBZload [2] src mem) (MOVHstore dst (MOVHZload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpS390XMOVBstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(2) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpS390XMOVHstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [5] dst src mem) + // result: (MOVBstore [4] dst (MOVBZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 5 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpS390XMOVBstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpS390XMOVWstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [6] dst src mem) + // result: (MOVHstore [4] dst (MOVHZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 6 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpS390XMOVHstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpS390XMOVWstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [7] dst src mem) + // result: (MOVBstore [6] dst (MOVBZload [6] src mem) (MOVHstore [4] dst (MOVHZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem))) + for { + if auxIntToInt64(v.AuxInt) != 7 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpS390XMOVBstore) + v.AuxInt = int32ToAuxInt(6) + v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(6) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpS390XMOVHstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(4) + v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16) + v2.AuxInt = int32ToAuxInt(4) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpS390XMOVWstore, types.TypeMem) + v4 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [s] dst src mem) + // cond: s > 0 && s <= 256 && logLargeCopy(v, s) + // result: (MVC [makeValAndOff(int32(s), 0)] dst src mem) + for { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 0 && s <= 256 && logLargeCopy(v, s)) { + break + } + v.reset(OpS390XMVC) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(s), 0)) + v.AddArg3(dst, src, mem) + return true + } + // match: (Move [s] dst src mem) + // cond: s > 256 && s <= 512 && logLargeCopy(v, s) + // result: (MVC [makeValAndOff(int32(s)-256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem)) + for { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 256 && s <= 512 && logLargeCopy(v, s)) { + break + } + v.reset(OpS390XMVC) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(s)-256, 256)) + v0 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(256, 0)) + v0.AddArg3(dst, src, mem) + v.AddArg3(dst, src, v0) + return true + } + // match: (Move [s] dst src mem) + // cond: s > 512 && s <= 768 && logLargeCopy(v, s) + // result: (MVC [makeValAndOff(int32(s)-512, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))) + for { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 512 && s <= 768 && logLargeCopy(v, s)) { + break + } + v.reset(OpS390XMVC) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(s)-512, 512)) + v0 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(256, 256)) + v1 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem) + v1.AuxInt = valAndOffToAuxInt(makeValAndOff(256, 0)) + v1.AddArg3(dst, src, mem) + v0.AddArg3(dst, src, v1) + v.AddArg3(dst, src, v0) + return true + } + // match: (Move [s] dst src mem) + // cond: s > 768 && s <= 1024 && logLargeCopy(v, s) + // result: (MVC [makeValAndOff(int32(s)-768, 768)] dst src (MVC [makeValAndOff(256, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem)))) + for { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 768 && s <= 1024 && logLargeCopy(v, s)) { + break + } + v.reset(OpS390XMVC) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(s)-768, 768)) + v0 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(makeValAndOff(256, 512)) + v1 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem) + v1.AuxInt = valAndOffToAuxInt(makeValAndOff(256, 256)) + v2 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem) + v2.AuxInt = valAndOffToAuxInt(makeValAndOff(256, 0)) + v2.AddArg3(dst, src, mem) + v1.AddArg3(dst, src, v2) + v0.AddArg3(dst, src, v1) + v.AddArg3(dst, src, v0) + return true + } + // match: (Move [s] dst src mem) + // cond: s > 1024 && logLargeCopy(v, s) + // result: (LoweredMove [s%256] dst src (ADD src (MOVDconst [(s/256)*256])) mem) + for { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 1024 && logLargeCopy(v, s)) { + break + } + v.reset(OpS390XLoweredMove) + v.AuxInt = int64ToAuxInt(s % 256) + v0 := b.NewValue0(v.Pos, OpS390XADD, src.Type) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt((s / 256) * 256) + v0.AddArg2(src, v1) + v.AddArg4(dst, src, v0, mem) + return true + } + return false +} +func rewriteValueS390X_OpNeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq16 x y) + // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVHreg x) (MOVHreg y))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.NotEqual) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) + v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) + v3.AddArg(x) + v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) + v4.AddArg(y) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpNeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq32 x y) + // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.NotEqual) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpNeq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq32F x y) + // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.NotEqual) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpNeq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq64 x y) + // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.NotEqual) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpNeq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq64F x y) + // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.NotEqual) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpNeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq8 x y) + // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.NotEqual) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) + v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) + v3.AddArg(x) + v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) + v4.AddArg(y) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpNeqB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NeqB x y) + // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.NotEqual) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) + v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) + v3.AddArg(x) + v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) + v4.AddArg(y) + v2.AddArg2(v3, v4) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpNeqPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NeqPtr x y) + // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) + for { + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(s390x.NotEqual) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags) + v2.AddArg2(x, y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpNot(v *Value) bool { + v_0 := v.Args[0] + // match: (Not x) + // result: (XORWconst [1] x) + for { + x := v_0 + v.reset(OpS390XXORWconst) + v.AuxInt = int32ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpOffPtr(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (OffPtr [off] ptr:(SP)) + // result: (MOVDaddr [int32(off)] ptr) + for { + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + if ptr.Op != OpSP { + break + } + v.reset(OpS390XMOVDaddr) + v.AuxInt = int32ToAuxInt(int32(off)) + v.AddArg(ptr) + return true + } + // match: (OffPtr [off] ptr) + // cond: is32Bit(off) + // result: (ADDconst [int32(off)] ptr) + for { + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + if !(is32Bit(off)) { + break + } + v.reset(OpS390XADDconst) + v.AuxInt = int32ToAuxInt(int32(off)) + v.AddArg(ptr) + return true + } + // match: (OffPtr [off] ptr) + // result: (ADD (MOVDconst [off]) ptr) + for { + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + v.reset(OpS390XADD) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(off) + v.AddArg2(v0, ptr) + return true + } +} +func rewriteValueS390X_OpPanicBounds(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 0 + // result: (LoweredPanicBoundsA [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 0) { + break + } + v.reset(OpS390XLoweredPanicBoundsA) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 1 + // result: (LoweredPanicBoundsB [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 1) { + break + } + v.reset(OpS390XLoweredPanicBoundsB) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 2 + // result: (LoweredPanicBoundsC [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 2) { + break + } + v.reset(OpS390XLoweredPanicBoundsC) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + return false +} +func rewriteValueS390X_OpPopCount16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (PopCount16 x) + // result: (MOVBZreg (SumBytes2 (POPCNT x))) + for { + x := v_0 + v.reset(OpS390XMOVBZreg) + v0 := b.NewValue0(v.Pos, OpS390XSumBytes2, typ.UInt8) + v1 := b.NewValue0(v.Pos, OpS390XPOPCNT, typ.UInt16) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpPopCount32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (PopCount32 x) + // result: (MOVBZreg (SumBytes4 (POPCNT x))) + for { + x := v_0 + v.reset(OpS390XMOVBZreg) + v0 := b.NewValue0(v.Pos, OpS390XSumBytes4, typ.UInt8) + v1 := b.NewValue0(v.Pos, OpS390XPOPCNT, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpPopCount64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (PopCount64 x) + // result: (MOVBZreg (SumBytes8 (POPCNT x))) + for { + x := v_0 + v.reset(OpS390XMOVBZreg) + v0 := b.NewValue0(v.Pos, OpS390XSumBytes8, typ.UInt8) + v1 := b.NewValue0(v.Pos, OpS390XPOPCNT, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpPopCount8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (PopCount8 x) + // result: (POPCNT (MOVBZreg x)) + for { + x := v_0 + v.reset(OpS390XPOPCNT) + v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpRotateLeft16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft16 x (MOVDconst [c])) + // result: (Or16 (Lsh16x64 x (MOVDconst [c&15])) (Rsh16Ux64 x (MOVDconst [-c&15]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpOr16) + v0 := b.NewValue0(v.Pos, OpLsh16x64, t) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(c & 15) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t) + v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(-c & 15) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) + return true + } + return false +} +func rewriteValueS390X_OpRotateLeft8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft8 x (MOVDconst [c])) + // result: (Or8 (Lsh8x64 x (MOVDconst [c&7])) (Rsh8Ux64 x (MOVDconst [-c&7]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpOr8) + v0 := b.NewValue0(v.Pos, OpLsh8x64, t) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(c & 7) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t) + v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(-c & 7) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) + return true + } + return false +} +func rewriteValueS390X_OpRound(v *Value) bool { + v_0 := v.Args[0] + // match: (Round x) + // result: (FIDBR [1] x) + for { + x := v_0 + v.reset(OpS390XFIDBR) + v.AuxInt = int8ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpRoundToEven(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundToEven x) + // result: (FIDBR [4] x) + for { + x := v_0 + v.reset(OpS390XFIDBR) + v.AuxInt = int8ToAuxInt(4) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpRsh16Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SRW (MOVHZreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRW) + v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh16Ux16 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SRW (MOVHZreg x) y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSRW, t) + v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValueS390X_OpRsh16Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SRW (MOVHZreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRW) + v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh16Ux32 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SRW (MOVHZreg x) y) (MOVDconst [0]) (CMPWUconst y [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSRW, t) + v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(64) + v3.AddArg(y) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValueS390X_OpRsh16Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SRW (MOVHZreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRW) + v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh16Ux64 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SRW (MOVHZreg x) y) (MOVDconst [0]) (CMPUconst y [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSRW, t) + v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(64) + v3.AddArg(y) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValueS390X_OpRsh16Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SRW (MOVHZreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRW) + v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh16Ux8 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SRW (MOVHZreg x) y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSRW, t) + v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValueS390X_OpRsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SRAW (MOVHreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRAW) + v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh16x16 x y) + // result: (SRAW (MOVHreg x) (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst (MOVHZreg y) [64]))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XSRAW) + v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) + v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) + v2.AuxInt = int64ToAuxInt(63) + v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueS390X_OpRsh16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SRAW (MOVHreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRAW) + v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh16x32 x y) + // result: (SRAW (MOVHreg x) (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst y [64]))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XSRAW) + v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) + v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) + v2.AuxInt = int64ToAuxInt(63) + v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(64) + v3.AddArg(y) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueS390X_OpRsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x64 x y) + // cond: shiftIsBounded(v) + // result: (SRAW (MOVHreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRAW) + v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh16x64 x y) + // result: (SRAW (MOVHreg x) (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPUconst y [64]))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XSRAW) + v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) + v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) + v2.AuxInt = int64ToAuxInt(63) + v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(64) + v3.AddArg(y) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueS390X_OpRsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SRAW (MOVHreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRAW) + v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh16x8 x y) + // result: (SRAW (MOVHreg x) (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst (MOVBZreg y) [64]))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XSRAW) + v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) + v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) + v2.AuxInt = int64ToAuxInt(63) + v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueS390X_OpRsh32Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SRW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRW) + v.AddArg2(x, y) + return true + } + // match: (Rsh32Ux16 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SRW x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSRW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpRsh32Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SRW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRW) + v.AddArg2(x, y) + return true + } + // match: (Rsh32Ux32 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SRW x y) (MOVDconst [0]) (CMPWUconst y [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSRW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpRsh32Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SRW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRW) + v.AddArg2(x, y) + return true + } + // match: (Rsh32Ux64 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SRW x y) (MOVDconst [0]) (CMPUconst y [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSRW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpRsh32Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SRW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRW) + v.AddArg2(x, y) + return true + } + // match: (Rsh32Ux8 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SRW x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSRW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpRsh32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SRAW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRAW) + v.AddArg2(x, y) + return true + } + // match: (Rsh32x16 x y) + // result: (SRAW x (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst (MOVHZreg y) [64]))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XSRAW) + v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) + v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) + v1.AuxInt = int64ToAuxInt(63) + v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueS390X_OpRsh32x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SRAW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRAW) + v.AddArg2(x, y) + return true + } + // match: (Rsh32x32 x y) + // result: (SRAW x (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst y [64]))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XSRAW) + v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) + v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) + v1.AuxInt = int64ToAuxInt(63) + v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueS390X_OpRsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh32x64 x y) + // cond: shiftIsBounded(v) + // result: (SRAW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRAW) + v.AddArg2(x, y) + return true + } + // match: (Rsh32x64 x y) + // result: (SRAW x (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPUconst y [64]))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XSRAW) + v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) + v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) + v1.AuxInt = int64ToAuxInt(63) + v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueS390X_OpRsh32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SRAW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRAW) + v.AddArg2(x, y) + return true + } + // match: (Rsh32x8 x y) + // result: (SRAW x (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst (MOVBZreg y) [64]))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XSRAW) + v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) + v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) + v1.AuxInt = int64ToAuxInt(63) + v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueS390X_OpRsh64Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SRD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRD) + v.AddArg2(x, y) + return true + } + // match: (Rsh64Ux16 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SRD x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSRD, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpRsh64Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SRD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRD) + v.AddArg2(x, y) + return true + } + // match: (Rsh64Ux32 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SRD x y) (MOVDconst [0]) (CMPWUconst y [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSRD, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpRsh64Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SRD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRD) + v.AddArg2(x, y) + return true + } + // match: (Rsh64Ux64 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SRD x y) (MOVDconst [0]) (CMPUconst y [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSRD, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpRsh64Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SRD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRD) + v.AddArg2(x, y) + return true + } + // match: (Rsh64Ux8 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SRD x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSRD, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueS390X_OpRsh64x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x16 x y) + // cond: shiftIsBounded(v) + // result: (SRAD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRAD) + v.AddArg2(x, y) + return true + } + // match: (Rsh64x16 x y) + // result: (SRAD x (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst (MOVHZreg y) [64]))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XSRAD) + v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) + v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) + v1.AuxInt = int64ToAuxInt(63) + v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueS390X_OpRsh64x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh64x32 x y) + // cond: shiftIsBounded(v) + // result: (SRAD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRAD) + v.AddArg2(x, y) + return true + } + // match: (Rsh64x32 x y) + // result: (SRAD x (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst y [64]))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XSRAD) + v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) + v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) + v1.AuxInt = int64ToAuxInt(63) + v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueS390X_OpRsh64x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh64x64 x y) + // cond: shiftIsBounded(v) + // result: (SRAD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRAD) + v.AddArg2(x, y) + return true + } + // match: (Rsh64x64 x y) + // result: (SRAD x (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPUconst y [64]))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XSRAD) + v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) + v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) + v1.AuxInt = int64ToAuxInt(63) + v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueS390X_OpRsh64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x8 x y) + // cond: shiftIsBounded(v) + // result: (SRAD x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRAD) + v.AddArg2(x, y) + return true + } + // match: (Rsh64x8 x y) + // result: (SRAD x (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst (MOVBZreg y) [64]))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XSRAD) + v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) + v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) + v1.AuxInt = int64ToAuxInt(63) + v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueS390X_OpRsh8Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SRW (MOVBZreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRW) + v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh8Ux16 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SRW (MOVBZreg x) y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSRW, t) + v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValueS390X_OpRsh8Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SRW (MOVBZreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRW) + v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh8Ux32 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SRW (MOVBZreg x) y) (MOVDconst [0]) (CMPWUconst y [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSRW, t) + v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(64) + v3.AddArg(y) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValueS390X_OpRsh8Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SRW (MOVBZreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRW) + v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh8Ux64 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SRW (MOVBZreg x) y) (MOVDconst [0]) (CMPUconst y [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSRW, t) + v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(64) + v3.AddArg(y) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValueS390X_OpRsh8Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SRW (MOVBZreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRW) + v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh8Ux8 x y) + // result: (LOCGR {s390x.GreaterOrEqual} (SRW (MOVBZreg x) y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64])) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpS390XLOCGR) + v.Type = t + v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v0 := b.NewValue0(v.Pos, OpS390XSRW, t) + v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v1.AddArg(x) + v0.AddArg2(v1, y) + v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg3(v0, v2, v3) + return true + } +} +func rewriteValueS390X_OpRsh8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SRAW (MOVBreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRAW) + v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh8x16 x y) + // result: (SRAW (MOVBreg x) (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst (MOVHZreg y) [64]))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XSRAW) + v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) + v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) + v2.AuxInt = int64ToAuxInt(63) + v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueS390X_OpRsh8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SRAW (MOVBreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRAW) + v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh8x32 x y) + // result: (SRAW (MOVBreg x) (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst y [64]))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XSRAW) + v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) + v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) + v2.AuxInt = int64ToAuxInt(63) + v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(64) + v3.AddArg(y) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueS390X_OpRsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x64 x y) + // cond: shiftIsBounded(v) + // result: (SRAW (MOVBreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRAW) + v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh8x64 x y) + // result: (SRAW (MOVBreg x) (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPUconst y [64]))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XSRAW) + v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) + v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) + v2.AuxInt = int64ToAuxInt(63) + v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(64) + v3.AddArg(y) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueS390X_OpRsh8x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SRAW (MOVBreg x) y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpS390XSRAW) + v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } + // match: (Rsh8x8 x y) + // result: (SRAW (MOVBreg x) (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst (MOVBZreg y) [64]))) + for { + x := v_0 + y := v_1 + v.reset(OpS390XSRAW) + v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type) + v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type) + v2.AuxInt = int64ToAuxInt(63) + v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v3.AuxInt = int32ToAuxInt(64) + v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v1.AddArg3(y, v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueS390X_OpS390XADD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADD x (MOVDconst [c])) + // cond: is32Bit(c) && !t.IsPtr() + // result: (ADDconst [int32(c)] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpS390XMOVDconst { + continue + } + t := v_1.Type + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c) && !t.IsPtr()) { + continue + } + v.reset(OpS390XADDconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + break + } + // match: (ADD idx (MOVDaddr [c] {s} ptr)) + // cond: ptr.Op != OpSB + // result: (MOVDaddridx [c] {s} ptr idx) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + idx := v_0 + if v_1.Op != OpS390XMOVDaddr { + continue + } + c := auxIntToInt32(v_1.AuxInt) + s := auxToSym(v_1.Aux) + ptr := v_1.Args[0] + if !(ptr.Op != OpSB) { + continue + } + v.reset(OpS390XMOVDaddridx) + v.AuxInt = int32ToAuxInt(c) + v.Aux = symToAux(s) + v.AddArg2(ptr, idx) + return true + } + break + } + // match: (ADD x (NEG y)) + // result: (SUB x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpS390XNEG { + continue + } + y := v_1.Args[0] + v.reset(OpS390XSUB) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADD x g:(MOVDload [off] {sym} ptr mem)) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) + // result: (ADDload [off] {sym} x ptr mem) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 + if g.Op != OpS390XMOVDload { + continue + } + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XADDload) + v.Type = t + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueS390X_OpS390XADDC(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADDC x (MOVDconst [c])) + // cond: is16Bit(c) + // result: (ADDCconst x [int16(c)]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpS390XMOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(is16Bit(c)) { + continue + } + v.reset(OpS390XADDCconst) + v.AuxInt = int16ToAuxInt(int16(c)) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValueS390X_OpS390XADDE(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADDE x y (FlagEQ)) + // result: (ADDC x y) + for { + x := v_0 + y := v_1 + if v_2.Op != OpS390XFlagEQ { + break + } + v.reset(OpS390XADDC) + v.AddArg2(x, y) + return true + } + // match: (ADDE x y (FlagLT)) + // result: (ADDC x y) + for { + x := v_0 + y := v_1 + if v_2.Op != OpS390XFlagLT { + break + } + v.reset(OpS390XADDC) + v.AddArg2(x, y) + return true + } + // match: (ADDE x y (Select1 (ADDCconst [-1] (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) c))))) + // result: (ADDE x y c) + for { + x := v_0 + y := v_1 + if v_2.Op != OpSelect1 { + break + } + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpS390XADDCconst || auxIntToInt16(v_2_0.AuxInt) != -1 { + break + } + v_2_0_0 := v_2_0.Args[0] + if v_2_0_0.Op != OpSelect0 { + break + } + v_2_0_0_0 := v_2_0_0.Args[0] + if v_2_0_0_0.Op != OpS390XADDE { + break + } + c := v_2_0_0_0.Args[2] + v_2_0_0_0_0 := v_2_0_0_0.Args[0] + if v_2_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_2_0_0_0_0.AuxInt) != 0 { + break + } + v_2_0_0_0_1 := v_2_0_0_0.Args[1] + if v_2_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_2_0_0_0_1.AuxInt) != 0 { + break + } + v.reset(OpS390XADDE) + v.AddArg3(x, y, c) + return true + } + return false +} +func rewriteValueS390X_OpS390XADDW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADDW x (MOVDconst [c])) + // result: (ADDWconst [int32(c)] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpS390XMOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpS390XADDWconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + break + } + // match: (ADDW x (NEGW y)) + // result: (SUBW x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpS390XNEGW { + continue + } + y := v_1.Args[0] + v.reset(OpS390XSUBW) + v.AddArg2(x, y) + return true + } + break + } + // match: (ADDW x g:(MOVWload [off] {sym} ptr mem)) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) + // result: (ADDWload [off] {sym} x ptr mem) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 + if g.Op != OpS390XMOVWload { + continue + } + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XADDWload) + v.Type = t + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + // match: (ADDW x g:(MOVWZload [off] {sym} ptr mem)) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) + // result: (ADDWload [off] {sym} x ptr mem) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 + if g.Op != OpS390XMOVWZload { + continue + } + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XADDWload) + v.Type = t + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueS390X_OpS390XADDWconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ADDWconst [c] x) + // cond: int32(c)==0 + // result: x + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(int32(c) == 0) { + break + } + v.copyOf(x) + return true + } + // match: (ADDWconst [c] (MOVDconst [d])) + // result: (MOVDconst [int64(c)+d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(int64(c) + d) + return true + } + // match: (ADDWconst [c] (ADDWconst [d] x)) + // result: (ADDWconst [int32(c+d)] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XADDWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpS390XADDWconst) + v.AuxInt = int32ToAuxInt(int32(c + d)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XADDWload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADDWload [off1] {sym} x (ADDconst [off2] ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) + // result: (ADDWload [off1+off2] {sym} x ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpS390XADDconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + ptr := v_1.Args[0] + mem := v_2 + if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpS390XADDWload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + // match: (ADDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) + // result: (ADDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + for { + o1 := auxIntToInt32(v.AuxInt) + s1 := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpS390XMOVDaddr { + break + } + o2 := auxIntToInt32(v_1.AuxInt) + s2 := auxToSym(v_1.Aux) + ptr := v_1.Args[0] + mem := v_2 + if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) { + break + } + v.reset(OpS390XADDWload) + v.AuxInt = int32ToAuxInt(o1 + o2) + v.Aux = symToAux(mergeSym(s1, s2)) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XADDconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ADDconst [c] (MOVDaddr [d] {s} x:(SB))) + // cond: ((c+d)&1 == 0) && is32Bit(int64(c)+int64(d)) + // result: (MOVDaddr [c+d] {s} x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVDaddr { + break + } + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) + x := v_0.Args[0] + if x.Op != OpSB || !(((c+d)&1 == 0) && is32Bit(int64(c)+int64(d))) { + break + } + v.reset(OpS390XMOVDaddr) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg(x) + return true + } + // match: (ADDconst [c] (MOVDaddr [d] {s} x)) + // cond: x.Op != OpSB && is20Bit(int64(c)+int64(d)) + // result: (MOVDaddr [c+d] {s} x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVDaddr { + break + } + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) + x := v_0.Args[0] + if !(x.Op != OpSB && is20Bit(int64(c)+int64(d))) { + break + } + v.reset(OpS390XMOVDaddr) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg(x) + return true + } + // match: (ADDconst [c] (MOVDaddridx [d] {s} x y)) + // cond: is20Bit(int64(c)+int64(d)) + // result: (MOVDaddridx [c+d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVDaddridx { + break + } + d := auxIntToInt32(v_0.AuxInt) + s := auxToSym(v_0.Aux) + y := v_0.Args[1] + x := v_0.Args[0] + if !(is20Bit(int64(c) + int64(d))) { + break + } + v.reset(OpS390XMOVDaddridx) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (ADDconst [0] x) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (ADDconst [c] (MOVDconst [d])) + // result: (MOVDconst [int64(c)+d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(int64(c) + d) + return true + } + // match: (ADDconst [c] (ADDconst [d] x)) + // cond: is32Bit(int64(c)+int64(d)) + // result: (ADDconst [c+d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XADDconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(int64(c) + int64(d))) { + break + } + v.reset(OpS390XADDconst) + v.AuxInt = int32ToAuxInt(c + d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XADDload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ADDload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) + // cond: isSamePtr(ptr1, ptr2) + // result: (ADD x (LGDR y)) + for { + t := v.Type + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + ptr1 := v_1 + if v_2.Op != OpS390XFMOVDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { + break + } + y := v_2.Args[1] + ptr2 := v_2.Args[0] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.reset(OpS390XADD) + v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (ADDload [off1] {sym} x (ADDconst [off2] ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) + // result: (ADDload [off1+off2] {sym} x ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpS390XADDconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + ptr := v_1.Args[0] + mem := v_2 + if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpS390XADDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + // match: (ADDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) + // result: (ADDload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + for { + o1 := auxIntToInt32(v.AuxInt) + s1 := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpS390XMOVDaddr { + break + } + o2 := auxIntToInt32(v_1.AuxInt) + s2 := auxToSym(v_1.Aux) + ptr := v_1.Args[0] + mem := v_2 + if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) { + break + } + v.reset(OpS390XADDload) + v.AuxInt = int32ToAuxInt(o1 + o2) + v.Aux = symToAux(mergeSym(s1, s2)) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XAND(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (AND x (MOVDconst [c])) + // cond: s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c)) != nil + // result: (RISBGZ x {*s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c))}) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpS390XMOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c)) != nil) { + continue + } + v.reset(OpS390XRISBGZ) + v.Aux = s390xRotateParamsToAux(*s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c))) + v.AddArg(x) + return true + } + break + } + // match: (AND x (MOVDconst [c])) + // cond: is32Bit(c) && c < 0 + // result: (ANDconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpS390XMOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c) && c < 0) { + continue + } + v.reset(OpS390XANDconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (AND x (MOVDconst [c])) + // cond: is32Bit(c) && c >= 0 + // result: (MOVWZreg (ANDWconst [int32(c)] x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpS390XMOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c) && c >= 0) { + continue + } + v.reset(OpS390XMOVWZreg) + v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (AND (MOVDconst [c]) (MOVDconst [d])) + // result: (MOVDconst [c&d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpS390XMOVDconst { + continue + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpS390XMOVDconst { + continue + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(c & d) + return true + } + break + } + // match: (AND x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + // match: (AND x g:(MOVDload [off] {sym} ptr mem)) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) + // result: (ANDload [off] {sym} x ptr mem) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 + if g.Op != OpS390XMOVDload { + continue + } + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XANDload) + v.Type = t + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueS390X_OpS390XANDW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ANDW x (MOVDconst [c])) + // result: (ANDWconst [int32(c)] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpS390XMOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpS390XANDWconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + break + } + // match: (ANDW x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + // match: (ANDW x g:(MOVWload [off] {sym} ptr mem)) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) + // result: (ANDWload [off] {sym} x ptr mem) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 + if g.Op != OpS390XMOVWload { + continue + } + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XANDWload) + v.Type = t + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + // match: (ANDW x g:(MOVWZload [off] {sym} ptr mem)) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) + // result: (ANDWload [off] {sym} x ptr mem) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 + if g.Op != OpS390XMOVWZload { + continue + } + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XANDWload) + v.Type = t + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueS390X_OpS390XANDWconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ANDWconst [c] (ANDWconst [d] x)) + // result: (ANDWconst [c&d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XANDWconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpS390XANDWconst) + v.AuxInt = int32ToAuxInt(c & d) + v.AddArg(x) + return true + } + // match: (ANDWconst [0x00ff] x) + // result: (MOVBZreg x) + for { + if auxIntToInt32(v.AuxInt) != 0x00ff { + break + } + x := v_0 + v.reset(OpS390XMOVBZreg) + v.AddArg(x) + return true + } + // match: (ANDWconst [0xffff] x) + // result: (MOVHZreg x) + for { + if auxIntToInt32(v.AuxInt) != 0xffff { + break + } + x := v_0 + v.reset(OpS390XMOVHZreg) + v.AddArg(x) + return true + } + // match: (ANDWconst [c] _) + // cond: int32(c)==0 + // result: (MOVDconst [0]) + for { + c := auxIntToInt32(v.AuxInt) + if !(int32(c) == 0) { + break + } + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (ANDWconst [c] x) + // cond: int32(c)==-1 + // result: x + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(int32(c) == -1) { + break + } + v.copyOf(x) + return true + } + // match: (ANDWconst [c] (MOVDconst [d])) + // result: (MOVDconst [int64(c)&d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(int64(c) & d) + return true + } + return false +} +func rewriteValueS390X_OpS390XANDWload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ANDWload [off1] {sym} x (ADDconst [off2] ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) + // result: (ANDWload [off1+off2] {sym} x ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpS390XADDconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + ptr := v_1.Args[0] + mem := v_2 + if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpS390XANDWload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + // match: (ANDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) + // result: (ANDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + for { + o1 := auxIntToInt32(v.AuxInt) + s1 := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpS390XMOVDaddr { + break + } + o2 := auxIntToInt32(v_1.AuxInt) + s2 := auxToSym(v_1.Aux) + ptr := v_1.Args[0] + mem := v_2 + if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) { + break + } + v.reset(OpS390XANDWload) + v.AuxInt = int32ToAuxInt(o1 + o2) + v.Aux = symToAux(mergeSym(s1, s2)) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XANDconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ANDconst [c] (ANDconst [d] x)) + // result: (ANDconst [c&d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpS390XANDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpS390XANDconst) + v.AuxInt = int64ToAuxInt(c & d) + v.AddArg(x) + return true + } + // match: (ANDconst [0] _) + // result: (MOVDconst [0]) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (ANDconst [-1] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != -1 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (ANDconst [c] (MOVDconst [d])) + // result: (MOVDconst [c&d]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpS390XMOVDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(c & d) + return true + } + return false +} +func rewriteValueS390X_OpS390XANDload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ANDload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) + // cond: isSamePtr(ptr1, ptr2) + // result: (AND x (LGDR y)) + for { + t := v.Type + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + ptr1 := v_1 + if v_2.Op != OpS390XFMOVDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { + break + } + y := v_2.Args[1] + ptr2 := v_2.Args[0] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.reset(OpS390XAND) + v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (ANDload [off1] {sym} x (ADDconst [off2] ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) + // result: (ANDload [off1+off2] {sym} x ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpS390XADDconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + ptr := v_1.Args[0] + mem := v_2 + if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpS390XANDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + // match: (ANDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) + // result: (ANDload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + for { + o1 := auxIntToInt32(v.AuxInt) + s1 := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpS390XMOVDaddr { + break + } + o2 := auxIntToInt32(v_1.AuxInt) + s2 := auxToSym(v_1.Aux) + ptr := v_1.Args[0] + mem := v_2 + if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) { + break + } + v.reset(OpS390XANDload) + v.AuxInt = int32ToAuxInt(o1 + o2) + v.Aux = symToAux(mergeSym(s1, s2)) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XCMP(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMP x (MOVDconst [c])) + // cond: is32Bit(c) + // result: (CMPconst x [int32(c)]) + for { + x := v_0 + if v_1.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c)) { + break + } + v.reset(OpS390XCMPconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + // match: (CMP (MOVDconst [c]) x) + // cond: is32Bit(c) + // result: (InvertFlags (CMPconst x [int32(c)])) + for { + if v_0.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + if !(is32Bit(c)) { + break + } + v.reset(OpS390XInvertFlags) + v0 := b.NewValue0(v.Pos, OpS390XCMPconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (CMP x y) + // cond: canonLessThan(x,y) + // result: (InvertFlags (CMP y x)) + for { + x := v_0 + y := v_1 + if !(canonLessThan(x, y)) { + break + } + v.reset(OpS390XInvertFlags) + v0 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueS390X_OpS390XCMPU(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMPU x (MOVDconst [c])) + // cond: isU32Bit(c) + // result: (CMPUconst x [int32(c)]) + for { + x := v_0 + if v_1.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(isU32Bit(c)) { + break + } + v.reset(OpS390XCMPUconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + // match: (CMPU (MOVDconst [c]) x) + // cond: isU32Bit(c) + // result: (InvertFlags (CMPUconst x [int32(c)])) + for { + if v_0.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + if !(isU32Bit(c)) { + break + } + v.reset(OpS390XInvertFlags) + v0 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (CMPU x y) + // cond: canonLessThan(x,y) + // result: (InvertFlags (CMPU y x)) + for { + x := v_0 + y := v_1 + if !(canonLessThan(x, y)) { + break + } + v.reset(OpS390XInvertFlags) + v0 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueS390X_OpS390XCMPUconst(v *Value) bool { + v_0 := v.Args[0] + // match: (CMPUconst (MOVDconst [x]) [y]) + // cond: uint64(x)==uint64(y) + // result: (FlagEQ) + for { + y := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVDconst { + break + } + x := auxIntToInt64(v_0.AuxInt) + if !(uint64(x) == uint64(y)) { + break + } + v.reset(OpS390XFlagEQ) + return true + } + // match: (CMPUconst (MOVDconst [x]) [y]) + // cond: uint64(x)uint64(y) + // result: (FlagGT) + for { + y := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVDconst { + break + } + x := auxIntToInt64(v_0.AuxInt) + if !(uint64(x) > uint64(y)) { + break + } + v.reset(OpS390XFlagGT) + return true + } + // match: (CMPUconst (SRDconst _ [c]) [n]) + // cond: c > 0 && c < 64 && (1< 0 && c < 64 && (1<= 0 + // result: (CMPWUconst x [c]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVWZreg { + break + } + x := v_0.Args[0] + if x.Op != OpS390XANDWconst { + break + } + m := auxIntToInt32(x.AuxInt) + if !(int32(m) >= 0) { + break + } + v.reset(OpS390XCMPWUconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (CMPUconst (MOVWreg x:(ANDWconst [m] _)) [c]) + // cond: int32(m) >= 0 + // result: (CMPWUconst x [c]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVWreg { + break + } + x := v_0.Args[0] + if x.Op != OpS390XANDWconst { + break + } + m := auxIntToInt32(x.AuxInt) + if !(int32(m) >= 0) { + break + } + v.reset(OpS390XCMPWUconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XCMPW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMPW x (MOVDconst [c])) + // result: (CMPWconst x [int32(c)]) + for { + x := v_0 + if v_1.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpS390XCMPWconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + // match: (CMPW (MOVDconst [c]) x) + // result: (InvertFlags (CMPWconst x [int32(c)])) + for { + if v_0.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpS390XInvertFlags) + v0 := b.NewValue0(v.Pos, OpS390XCMPWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (CMPW x y) + // cond: canonLessThan(x,y) + // result: (InvertFlags (CMPW y x)) + for { + x := v_0 + y := v_1 + if !(canonLessThan(x, y)) { + break + } + v.reset(OpS390XInvertFlags) + v0 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + // match: (CMPW x (MOVWreg y)) + // result: (CMPW x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVWreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XCMPW) + v.AddArg2(x, y) + return true + } + // match: (CMPW x (MOVWZreg y)) + // result: (CMPW x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVWZreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XCMPW) + v.AddArg2(x, y) + return true + } + // match: (CMPW (MOVWreg x) y) + // result: (CMPW x y) + for { + if v_0.Op != OpS390XMOVWreg { + break + } + x := v_0.Args[0] + y := v_1 + v.reset(OpS390XCMPW) + v.AddArg2(x, y) + return true + } + // match: (CMPW (MOVWZreg x) y) + // result: (CMPW x y) + for { + if v_0.Op != OpS390XMOVWZreg { + break + } + x := v_0.Args[0] + y := v_1 + v.reset(OpS390XCMPW) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueS390X_OpS390XCMPWU(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (CMPWU x (MOVDconst [c])) + // result: (CMPWUconst x [int32(c)]) + for { + x := v_0 + if v_1.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpS390XCMPWUconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + // match: (CMPWU (MOVDconst [c]) x) + // result: (InvertFlags (CMPWUconst x [int32(c)])) + for { + if v_0.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpS390XInvertFlags) + v0 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (CMPWU x y) + // cond: canonLessThan(x,y) + // result: (InvertFlags (CMPWU y x)) + for { + x := v_0 + y := v_1 + if !(canonLessThan(x, y)) { + break + } + v.reset(OpS390XInvertFlags) + v0 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + // match: (CMPWU x (MOVWreg y)) + // result: (CMPWU x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVWreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XCMPWU) + v.AddArg2(x, y) + return true + } + // match: (CMPWU x (MOVWZreg y)) + // result: (CMPWU x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVWZreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XCMPWU) + v.AddArg2(x, y) + return true + } + // match: (CMPWU (MOVWreg x) y) + // result: (CMPWU x y) + for { + if v_0.Op != OpS390XMOVWreg { + break + } + x := v_0.Args[0] + y := v_1 + v.reset(OpS390XCMPWU) + v.AddArg2(x, y) + return true + } + // match: (CMPWU (MOVWZreg x) y) + // result: (CMPWU x y) + for { + if v_0.Op != OpS390XMOVWZreg { + break + } + x := v_0.Args[0] + y := v_1 + v.reset(OpS390XCMPWU) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueS390X_OpS390XCMPWUconst(v *Value) bool { + v_0 := v.Args[0] + // match: (CMPWUconst (MOVDconst [x]) [y]) + // cond: uint32(x)==uint32(y) + // result: (FlagEQ) + for { + y := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVDconst { + break + } + x := auxIntToInt64(v_0.AuxInt) + if !(uint32(x) == uint32(y)) { + break + } + v.reset(OpS390XFlagEQ) + return true + } + // match: (CMPWUconst (MOVDconst [x]) [y]) + // cond: uint32(x)uint32(y) + // result: (FlagGT) + for { + y := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVDconst { + break + } + x := auxIntToInt64(v_0.AuxInt) + if !(uint32(x) > uint32(y)) { + break + } + v.reset(OpS390XFlagGT) + return true + } + // match: (CMPWUconst (MOVBZreg _) [c]) + // cond: 0xff < c + // result: (FlagLT) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVBZreg || !(0xff < c) { + break + } + v.reset(OpS390XFlagLT) + return true + } + // match: (CMPWUconst (MOVHZreg _) [c]) + // cond: 0xffff < c + // result: (FlagLT) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVHZreg || !(0xffff < c) { + break + } + v.reset(OpS390XFlagLT) + return true + } + // match: (CMPWUconst (SRWconst _ [c]) [n]) + // cond: c > 0 && c < 32 && (1< 0 && c < 32 && (1<int32(y) + // result: (FlagGT) + for { + y := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVDconst { + break + } + x := auxIntToInt64(v_0.AuxInt) + if !(int32(x) > int32(y)) { + break + } + v.reset(OpS390XFlagGT) + return true + } + // match: (CMPWconst (MOVBZreg _) [c]) + // cond: 0xff < c + // result: (FlagLT) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVBZreg || !(0xff < c) { + break + } + v.reset(OpS390XFlagLT) + return true + } + // match: (CMPWconst (MOVHZreg _) [c]) + // cond: 0xffff < c + // result: (FlagLT) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVHZreg || !(0xffff < c) { + break + } + v.reset(OpS390XFlagLT) + return true + } + // match: (CMPWconst (SRWconst _ [c]) [n]) + // cond: c > 0 && n < 0 + // result: (FlagGT) + for { + n := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XSRWconst { + break + } + c := auxIntToUint8(v_0.AuxInt) + if !(c > 0 && n < 0) { + break + } + v.reset(OpS390XFlagGT) + return true + } + // match: (CMPWconst (ANDWconst _ [m]) [n]) + // cond: int32(m) >= 0 && int32(m) < int32(n) + // result: (FlagLT) + for { + n := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XANDWconst { + break + } + m := auxIntToInt32(v_0.AuxInt) + if !(int32(m) >= 0 && int32(m) < int32(n)) { + break + } + v.reset(OpS390XFlagLT) + return true + } + // match: (CMPWconst x:(SRWconst _ [c]) [n]) + // cond: c > 0 && n >= 0 + // result: (CMPWUconst x [n]) + for { + n := auxIntToInt32(v.AuxInt) + x := v_0 + if x.Op != OpS390XSRWconst { + break + } + c := auxIntToUint8(x.AuxInt) + if !(c > 0 && n >= 0) { + break + } + v.reset(OpS390XCMPWUconst) + v.AuxInt = int32ToAuxInt(n) + v.AddArg(x) + return true + } + // match: (CMPWconst (MOVWreg x) [c]) + // result: (CMPWconst x [c]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVWreg { + break + } + x := v_0.Args[0] + v.reset(OpS390XCMPWconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (CMPWconst (MOVWZreg x) [c]) + // result: (CMPWconst x [c]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVWZreg { + break + } + x := v_0.Args[0] + v.reset(OpS390XCMPWconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XCMPconst(v *Value) bool { + v_0 := v.Args[0] + // match: (CMPconst (MOVDconst [x]) [y]) + // cond: x==int64(y) + // result: (FlagEQ) + for { + y := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVDconst { + break + } + x := auxIntToInt64(v_0.AuxInt) + if !(x == int64(y)) { + break + } + v.reset(OpS390XFlagEQ) + return true + } + // match: (CMPconst (MOVDconst [x]) [y]) + // cond: xint64(y) + // result: (FlagGT) + for { + y := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVDconst { + break + } + x := auxIntToInt64(v_0.AuxInt) + if !(x > int64(y)) { + break + } + v.reset(OpS390XFlagGT) + return true + } + // match: (CMPconst (SRDconst _ [c]) [n]) + // cond: c > 0 && n < 0 + // result: (FlagGT) + for { + n := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XSRDconst { + break + } + c := auxIntToUint8(v_0.AuxInt) + if !(c > 0 && n < 0) { + break + } + v.reset(OpS390XFlagGT) + return true + } + // match: (CMPconst (RISBGZ x {r}) [c]) + // cond: c > 0 && r.OutMask() < uint64(c) + // result: (FlagLT) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XRISBGZ { + break + } + r := auxToS390xRotateParams(v_0.Aux) + if !(c > 0 && r.OutMask() < uint64(c)) { + break + } + v.reset(OpS390XFlagLT) + return true + } + // match: (CMPconst (MOVWreg x) [c]) + // result: (CMPWconst x [c]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVWreg { + break + } + x := v_0.Args[0] + v.reset(OpS390XCMPWconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (CMPconst x:(MOVHreg _) [c]) + // result: (CMPWconst x [c]) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if x.Op != OpS390XMOVHreg { + break + } + v.reset(OpS390XCMPWconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (CMPconst x:(MOVHZreg _) [c]) + // result: (CMPWconst x [c]) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if x.Op != OpS390XMOVHZreg { + break + } + v.reset(OpS390XCMPWconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (CMPconst x:(MOVBreg _) [c]) + // result: (CMPWconst x [c]) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if x.Op != OpS390XMOVBreg { + break + } + v.reset(OpS390XCMPWconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (CMPconst x:(MOVBZreg _) [c]) + // result: (CMPWconst x [c]) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if x.Op != OpS390XMOVBZreg { + break + } + v.reset(OpS390XCMPWconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (CMPconst (MOVWZreg x:(ANDWconst [m] _)) [c]) + // cond: int32(m) >= 0 && c >= 0 + // result: (CMPWUconst x [c]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVWZreg { + break + } + x := v_0.Args[0] + if x.Op != OpS390XANDWconst { + break + } + m := auxIntToInt32(x.AuxInt) + if !(int32(m) >= 0 && c >= 0) { + break + } + v.reset(OpS390XCMPWUconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (CMPconst (MOVWreg x:(ANDWconst [m] _)) [c]) + // cond: int32(m) >= 0 && c >= 0 + // result: (CMPWUconst x [c]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVWreg { + break + } + x := v_0.Args[0] + if x.Op != OpS390XANDWconst { + break + } + m := auxIntToInt32(x.AuxInt) + if !(int32(m) >= 0 && c >= 0) { + break + } + v.reset(OpS390XCMPWUconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (CMPconst x:(SRDconst _ [c]) [n]) + // cond: c > 0 && n >= 0 + // result: (CMPUconst x [n]) + for { + n := auxIntToInt32(v.AuxInt) + x := v_0 + if x.Op != OpS390XSRDconst { + break + } + c := auxIntToUint8(x.AuxInt) + if !(c > 0 && n >= 0) { + break + } + v.reset(OpS390XCMPUconst) + v.AuxInt = int32ToAuxInt(n) + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XCPSDR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (CPSDR y (FMOVDconst [c])) + // cond: !math.Signbit(c) + // result: (LPDFR y) + for { + y := v_0 + if v_1.Op != OpS390XFMOVDconst { + break + } + c := auxIntToFloat64(v_1.AuxInt) + if !(!math.Signbit(c)) { + break + } + v.reset(OpS390XLPDFR) + v.AddArg(y) + return true + } + // match: (CPSDR y (FMOVDconst [c])) + // cond: math.Signbit(c) + // result: (LNDFR y) + for { + y := v_0 + if v_1.Op != OpS390XFMOVDconst { + break + } + c := auxIntToFloat64(v_1.AuxInt) + if !(math.Signbit(c)) { + break + } + v.reset(OpS390XLNDFR) + v.AddArg(y) + return true + } + return false +} +func rewriteValueS390X_OpS390XFCMP(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FCMP x (FMOVDconst [0.0])) + // result: (LTDBR x) + for { + x := v_0 + if v_1.Op != OpS390XFMOVDconst || auxIntToFloat64(v_1.AuxInt) != 0.0 { + break + } + v.reset(OpS390XLTDBR) + v.AddArg(x) + return true + } + // match: (FCMP (FMOVDconst [0.0]) x) + // result: (InvertFlags (LTDBR x)) + for { + if v_0.Op != OpS390XFMOVDconst || auxIntToFloat64(v_0.AuxInt) != 0.0 { + break + } + x := v_1 + v.reset(OpS390XInvertFlags) + v0 := b.NewValue0(v.Pos, OpS390XLTDBR, v.Type) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueS390X_OpS390XFCMPS(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (FCMPS x (FMOVSconst [0.0])) + // result: (LTEBR x) + for { + x := v_0 + if v_1.Op != OpS390XFMOVSconst || auxIntToFloat32(v_1.AuxInt) != 0.0 { + break + } + v.reset(OpS390XLTEBR) + v.AddArg(x) + return true + } + // match: (FCMPS (FMOVSconst [0.0]) x) + // result: (InvertFlags (LTEBR x)) + for { + if v_0.Op != OpS390XFMOVSconst || auxIntToFloat32(v_0.AuxInt) != 0.0 { + break + } + x := v_1 + v.reset(OpS390XInvertFlags) + v0 := b.NewValue0(v.Pos, OpS390XLTEBR, v.Type) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueS390X_OpS390XFMOVDload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _)) + // cond: isSamePtr(ptr1, ptr2) + // result: (LDGR x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr1 := v_0 + if v_1.Op != OpS390XMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { + break + } + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.reset(OpS390XLDGR) + v.AddArg(x) + return true + } + // match: (FMOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) + // cond: isSamePtr(ptr1, ptr2) + // result: x + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr1 := v_0 + if v_1.Op != OpS390XFMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { + break + } + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.copyOf(x) + return true + } + // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is20Bit(int64(off1)+int64(off2)) + // result: (FMOVDload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpS390XADDconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is20Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpS390XFMOVDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpS390XMOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XFMOVDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XFMOVDstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: is20Bit(int64(off1)+int64(off2)) + // result: (FMOVDstore [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpS390XADDconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is20Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpS390XFMOVDstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpS390XMOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XFMOVDstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XFMOVSload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMOVSload [off] {sym} ptr1 (FMOVSstore [off] {sym} ptr2 x _)) + // cond: isSamePtr(ptr1, ptr2) + // result: x + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr1 := v_0 + if v_1.Op != OpS390XFMOVSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { + break + } + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.copyOf(x) + return true + } + // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is20Bit(int64(off1)+int64(off2)) + // result: (FMOVSload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpS390XADDconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is20Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpS390XFMOVSload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpS390XMOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XFMOVSload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XFMOVSstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: is20Bit(int64(off1)+int64(off2)) + // result: (FMOVSstore [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpS390XADDconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is20Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpS390XFMOVSstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpS390XMOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XFMOVSstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XFNEG(v *Value) bool { + v_0 := v.Args[0] + // match: (FNEG (LPDFR x)) + // result: (LNDFR x) + for { + if v_0.Op != OpS390XLPDFR { + break + } + x := v_0.Args[0] + v.reset(OpS390XLNDFR) + v.AddArg(x) + return true + } + // match: (FNEG (LNDFR x)) + // result: (LPDFR x) + for { + if v_0.Op != OpS390XLNDFR { + break + } + x := v_0.Args[0] + v.reset(OpS390XLPDFR) + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XFNEGS(v *Value) bool { + v_0 := v.Args[0] + // match: (FNEGS (LPDFR x)) + // result: (LNDFR x) + for { + if v_0.Op != OpS390XLPDFR { + break + } + x := v_0.Args[0] + v.reset(OpS390XLNDFR) + v.AddArg(x) + return true + } + // match: (FNEGS (LNDFR x)) + // result: (LPDFR x) + for { + if v_0.Op != OpS390XLNDFR { + break + } + x := v_0.Args[0] + v.reset(OpS390XLPDFR) + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XLDGR(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (LDGR (RISBGZ x {r})) + // cond: r == s390x.NewRotateParams(1, 63, 0) + // result: (LPDFR (LDGR x)) + for { + t := v.Type + if v_0.Op != OpS390XRISBGZ { + break + } + r := auxToS390xRotateParams(v_0.Aux) + x := v_0.Args[0] + if !(r == s390x.NewRotateParams(1, 63, 0)) { + break + } + v.reset(OpS390XLPDFR) + v0 := b.NewValue0(v.Pos, OpS390XLDGR, t) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (LDGR (OR (MOVDconst [-1<<63]) x)) + // result: (LNDFR (LDGR x)) + for { + t := v.Type + if v_0.Op != OpS390XOR { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0.AuxInt) != -1<<63 { + continue + } + x := v_0_1 + v.reset(OpS390XLNDFR) + v0 := b.NewValue0(v.Pos, OpS390XLDGR, t) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (LDGR x:(ORload [off] {sym} (MOVDconst [-1<<63]) ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (LNDFR (LDGR (MOVDload [off] {sym} ptr mem))) + for { + t := v.Type + x := v_0 + if x.Op != OpS390XORload { + break + } + t1 := x.Type + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[2] + x_0 := x.Args[0] + if x_0.Op != OpS390XMOVDconst || auxIntToInt64(x_0.AuxInt) != -1<<63 { + break + } + ptr := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpS390XLNDFR, t) + v.copyOf(v0) + v1 := b.NewValue0(x.Pos, OpS390XLDGR, t) + v2 := b.NewValue0(x.Pos, OpS390XMOVDload, t1) + v2.AuxInt = int32ToAuxInt(off) + v2.Aux = symToAux(sym) + v2.AddArg2(ptr, mem) + v1.AddArg(v2) + v0.AddArg(v1) + return true + } + // match: (LDGR (LGDR x)) + // result: x + for { + if v_0.Op != OpS390XLGDR { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XLEDBR(v *Value) bool { + v_0 := v.Args[0] + // match: (LEDBR (LPDFR (LDEBR x))) + // result: (LPDFR x) + for { + if v_0.Op != OpS390XLPDFR { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XLDEBR { + break + } + x := v_0_0.Args[0] + v.reset(OpS390XLPDFR) + v.AddArg(x) + return true + } + // match: (LEDBR (LNDFR (LDEBR x))) + // result: (LNDFR x) + for { + if v_0.Op != OpS390XLNDFR { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XLDEBR { + break + } + x := v_0_0.Args[0] + v.reset(OpS390XLNDFR) + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XLGDR(v *Value) bool { + v_0 := v.Args[0] + // match: (LGDR (LDGR x)) + // result: x + for { + if v_0.Op != OpS390XLDGR { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XLOCGR(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LOCGR {c} x y (InvertFlags cmp)) + // result: (LOCGR {c.ReverseComparison()} x y cmp) + for { + c := auxToS390xCCMask(v.Aux) + x := v_0 + y := v_1 + if v_2.Op != OpS390XInvertFlags { + break + } + cmp := v_2.Args[0] + v.reset(OpS390XLOCGR) + v.Aux = s390xCCMaskToAux(c.ReverseComparison()) + v.AddArg3(x, y, cmp) + return true + } + // match: (LOCGR {c} _ x (FlagEQ)) + // cond: c&s390x.Equal != 0 + // result: x + for { + c := auxToS390xCCMask(v.Aux) + x := v_1 + if v_2.Op != OpS390XFlagEQ || !(c&s390x.Equal != 0) { + break + } + v.copyOf(x) + return true + } + // match: (LOCGR {c} _ x (FlagLT)) + // cond: c&s390x.Less != 0 + // result: x + for { + c := auxToS390xCCMask(v.Aux) + x := v_1 + if v_2.Op != OpS390XFlagLT || !(c&s390x.Less != 0) { + break + } + v.copyOf(x) + return true + } + // match: (LOCGR {c} _ x (FlagGT)) + // cond: c&s390x.Greater != 0 + // result: x + for { + c := auxToS390xCCMask(v.Aux) + x := v_1 + if v_2.Op != OpS390XFlagGT || !(c&s390x.Greater != 0) { + break + } + v.copyOf(x) + return true + } + // match: (LOCGR {c} _ x (FlagOV)) + // cond: c&s390x.Unordered != 0 + // result: x + for { + c := auxToS390xCCMask(v.Aux) + x := v_1 + if v_2.Op != OpS390XFlagOV || !(c&s390x.Unordered != 0) { + break + } + v.copyOf(x) + return true + } + // match: (LOCGR {c} x _ (FlagEQ)) + // cond: c&s390x.Equal == 0 + // result: x + for { + c := auxToS390xCCMask(v.Aux) + x := v_0 + if v_2.Op != OpS390XFlagEQ || !(c&s390x.Equal == 0) { + break + } + v.copyOf(x) + return true + } + // match: (LOCGR {c} x _ (FlagLT)) + // cond: c&s390x.Less == 0 + // result: x + for { + c := auxToS390xCCMask(v.Aux) + x := v_0 + if v_2.Op != OpS390XFlagLT || !(c&s390x.Less == 0) { + break + } + v.copyOf(x) + return true + } + // match: (LOCGR {c} x _ (FlagGT)) + // cond: c&s390x.Greater == 0 + // result: x + for { + c := auxToS390xCCMask(v.Aux) + x := v_0 + if v_2.Op != OpS390XFlagGT || !(c&s390x.Greater == 0) { + break + } + v.copyOf(x) + return true + } + // match: (LOCGR {c} x _ (FlagOV)) + // cond: c&s390x.Unordered == 0 + // result: x + for { + c := auxToS390xCCMask(v.Aux) + x := v_0 + if v_2.Op != OpS390XFlagOV || !(c&s390x.Unordered == 0) { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XLTDBR(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (LTDBR (Select0 x:(FADD _ _))) + // cond: b == x.Block + // result: (Select1 x) + for { + if v_0.Op != OpSelect0 { + break + } + x := v_0.Args[0] + if x.Op != OpS390XFADD || !(b == x.Block) { + break + } + v.reset(OpSelect1) + v.AddArg(x) + return true + } + // match: (LTDBR (Select0 x:(FSUB _ _))) + // cond: b == x.Block + // result: (Select1 x) + for { + if v_0.Op != OpSelect0 { + break + } + x := v_0.Args[0] + if x.Op != OpS390XFSUB || !(b == x.Block) { + break + } + v.reset(OpSelect1) + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XLTEBR(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (LTEBR (Select0 x:(FADDS _ _))) + // cond: b == x.Block + // result: (Select1 x) + for { + if v_0.Op != OpSelect0 { + break + } + x := v_0.Args[0] + if x.Op != OpS390XFADDS || !(b == x.Block) { + break + } + v.reset(OpSelect1) + v.AddArg(x) + return true + } + // match: (LTEBR (Select0 x:(FSUBS _ _))) + // cond: b == x.Block + // result: (Select1 x) + for { + if v_0.Op != OpSelect0 { + break + } + x := v_0.Args[0] + if x.Op != OpS390XFSUBS || !(b == x.Block) { + break + } + v.reset(OpSelect1) + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XLoweredRound32F(v *Value) bool { + v_0 := v.Args[0] + // match: (LoweredRound32F x:(FMOVSconst)) + // result: x + for { + x := v_0 + if x.Op != OpS390XFMOVSconst { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XLoweredRound64F(v *Value) bool { + v_0 := v.Args[0] + // match: (LoweredRound64F x:(FMOVDconst)) + // result: x + for { + x := v_0 + if x.Op != OpS390XFMOVDconst { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVBZload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBZload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _)) + // cond: isSamePtr(ptr1, ptr2) + // result: (MOVBZreg x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr1 := v_0 + if v_1.Op != OpS390XMOVBstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { + break + } + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.reset(OpS390XMOVBZreg) + v.AddArg(x) + return true + } + // match: (MOVBZload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is20Bit(int64(off1)+int64(off2)) + // result: (MOVBZload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpS390XADDconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is20Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpS390XMOVBZload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpS390XMOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XMOVBZload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MOVBZreg e:(MOVBreg x)) + // cond: clobberIfDead(e) + // result: (MOVBZreg x) + for { + e := v_0 + if e.Op != OpS390XMOVBreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVBZreg) + v.AddArg(x) + return true + } + // match: (MOVBZreg e:(MOVHreg x)) + // cond: clobberIfDead(e) + // result: (MOVBZreg x) + for { + e := v_0 + if e.Op != OpS390XMOVHreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVBZreg) + v.AddArg(x) + return true + } + // match: (MOVBZreg e:(MOVWreg x)) + // cond: clobberIfDead(e) + // result: (MOVBZreg x) + for { + e := v_0 + if e.Op != OpS390XMOVWreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVBZreg) + v.AddArg(x) + return true + } + // match: (MOVBZreg e:(MOVBZreg x)) + // cond: clobberIfDead(e) + // result: (MOVBZreg x) + for { + e := v_0 + if e.Op != OpS390XMOVBZreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVBZreg) + v.AddArg(x) + return true + } + // match: (MOVBZreg e:(MOVHZreg x)) + // cond: clobberIfDead(e) + // result: (MOVBZreg x) + for { + e := v_0 + if e.Op != OpS390XMOVHZreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVBZreg) + v.AddArg(x) + return true + } + // match: (MOVBZreg e:(MOVWZreg x)) + // cond: clobberIfDead(e) + // result: (MOVBZreg x) + for { + e := v_0 + if e.Op != OpS390XMOVWZreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVBZreg) + v.AddArg(x) + return true + } + // match: (MOVBZreg x:(MOVBZload _ _)) + // cond: (!x.Type.IsSigned() || x.Type.Size() > 1) + // result: x + for { + x := v_0 + if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) { + break + } + v.copyOf(x) + return true + } + // match: (MOVBZreg x:(MOVBload [o] {s} p mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBZload [o] {s} p mem) + for { + t := v.Type + x := v_0 + if x.Op != OpS390XMOVBload { + break + } + o := auxIntToInt32(x.AuxInt) + s := auxToSym(x.Aux) + mem := x.Args[1] + p := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpS390XMOVBZload, t) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(o) + v0.Aux = symToAux(s) + v0.AddArg2(p, mem) + return true + } + // match: (MOVBZreg x:(Arg )) + // cond: !t.IsSigned() && t.Size() == 1 + // result: x + for { + x := v_0 + if x.Op != OpArg { + break + } + t := x.Type + if !(!t.IsSigned() && t.Size() == 1) { + break + } + v.copyOf(x) + return true + } + // match: (MOVBZreg (MOVDconst [c])) + // result: (MOVDconst [int64( uint8(c))]) + for { + if v_0.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(int64(uint8(c))) + return true + } + // match: (MOVBZreg x:(LOCGR (MOVDconst [c]) (MOVDconst [d]) _)) + // cond: int64(uint8(c)) == c && int64(uint8(d)) == d && (!x.Type.IsSigned() || x.Type.Size() > 1) + // result: x + for { + x := v_0 + if x.Op != OpS390XLOCGR { + break + } + _ = x.Args[1] + x_0 := x.Args[0] + if x_0.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(x_0.AuxInt) + x_1 := x.Args[1] + if x_1.Op != OpS390XMOVDconst { + break + } + d := auxIntToInt64(x_1.AuxInt) + if !(int64(uint8(c)) == c && int64(uint8(d)) == d && (!x.Type.IsSigned() || x.Type.Size() > 1)) { + break + } + v.copyOf(x) + return true + } + // match: (MOVBZreg (RISBGZ x {r})) + // cond: r.OutMerge(0x000000ff) != nil + // result: (RISBGZ x {*r.OutMerge(0x000000ff)}) + for { + if v_0.Op != OpS390XRISBGZ { + break + } + r := auxToS390xRotateParams(v_0.Aux) + x := v_0.Args[0] + if !(r.OutMerge(0x000000ff) != nil) { + break + } + v.reset(OpS390XRISBGZ) + v.Aux = s390xRotateParamsToAux(*r.OutMerge(0x000000ff)) + v.AddArg(x) + return true + } + // match: (MOVBZreg (ANDWconst [m] x)) + // result: (MOVWZreg (ANDWconst [int32( uint8(m))] x)) + for { + if v_0.Op != OpS390XANDWconst { + break + } + m := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpS390XMOVWZreg) + v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(int32(uint8(m))) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVBload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _)) + // cond: isSamePtr(ptr1, ptr2) + // result: (MOVBreg x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr1 := v_0 + if v_1.Op != OpS390XMOVBstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { + break + } + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.reset(OpS390XMOVBreg) + v.AddArg(x) + return true + } + // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is20Bit(int64(off1)+int64(off2)) + // result: (MOVBload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpS390XADDconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is20Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpS390XMOVBload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpS390XMOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XMOVBload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVBreg(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MOVBreg e:(MOVBreg x)) + // cond: clobberIfDead(e) + // result: (MOVBreg x) + for { + e := v_0 + if e.Op != OpS390XMOVBreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVBreg) + v.AddArg(x) + return true + } + // match: (MOVBreg e:(MOVHreg x)) + // cond: clobberIfDead(e) + // result: (MOVBreg x) + for { + e := v_0 + if e.Op != OpS390XMOVHreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVBreg) + v.AddArg(x) + return true + } + // match: (MOVBreg e:(MOVWreg x)) + // cond: clobberIfDead(e) + // result: (MOVBreg x) + for { + e := v_0 + if e.Op != OpS390XMOVWreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVBreg) + v.AddArg(x) + return true + } + // match: (MOVBreg e:(MOVBZreg x)) + // cond: clobberIfDead(e) + // result: (MOVBreg x) + for { + e := v_0 + if e.Op != OpS390XMOVBZreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVBreg) + v.AddArg(x) + return true + } + // match: (MOVBreg e:(MOVHZreg x)) + // cond: clobberIfDead(e) + // result: (MOVBreg x) + for { + e := v_0 + if e.Op != OpS390XMOVHZreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVBreg) + v.AddArg(x) + return true + } + // match: (MOVBreg e:(MOVWZreg x)) + // cond: clobberIfDead(e) + // result: (MOVBreg x) + for { + e := v_0 + if e.Op != OpS390XMOVWZreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVBreg) + v.AddArg(x) + return true + } + // match: (MOVBreg x:(MOVBload _ _)) + // cond: (x.Type.IsSigned() || x.Type.Size() == 8) + // result: x + for { + x := v_0 + if x.Op != OpS390XMOVBload || !(x.Type.IsSigned() || x.Type.Size() == 8) { + break + } + v.copyOf(x) + return true + } + // match: (MOVBreg x:(MOVBZload [o] {s} p mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBload [o] {s} p mem) + for { + t := v.Type + x := v_0 + if x.Op != OpS390XMOVBZload { + break + } + o := auxIntToInt32(x.AuxInt) + s := auxToSym(x.Aux) + mem := x.Args[1] + p := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpS390XMOVBload, t) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(o) + v0.Aux = symToAux(s) + v0.AddArg2(p, mem) + return true + } + // match: (MOVBreg x:(Arg )) + // cond: t.IsSigned() && t.Size() == 1 + // result: x + for { + x := v_0 + if x.Op != OpArg { + break + } + t := x.Type + if !(t.IsSigned() && t.Size() == 1) { + break + } + v.copyOf(x) + return true + } + // match: (MOVBreg (MOVDconst [c])) + // result: (MOVDconst [int64( int8(c))]) + for { + if v_0.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(int64(int8(c))) + return true + } + // match: (MOVBreg (ANDWconst [m] x)) + // cond: int8(m) >= 0 + // result: (MOVWZreg (ANDWconst [int32( uint8(m))] x)) + for { + if v_0.Op != OpS390XANDWconst { + break + } + m := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(int8(m) >= 0) { + break + } + v.reset(OpS390XMOVWZreg) + v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(int32(uint8(m))) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpS390XMOVBreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpS390XMOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVBZreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpS390XMOVBZreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpS390XMOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: is20Bit(int64(off1)+int64(off2)) + // result: (MOVBstore [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpS390XADDconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is20Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpS390XMOVBstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) + // cond: is20Bit(int64(off)) && ptr.Op != OpSB + // result: (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is20Bit(int64(off)) && ptr.Op != OpSB) { + break + } + v.reset(OpS390XMOVBstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpS390XMOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XMOVBstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVBstoreconst(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem) + // cond: is20Bit(sc.Off64()+int64(off)) + // result: (MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem) + for { + sc := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) + if v_0.Op != OpS390XADDconst { + break + } + off := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is20Bit(sc.Off64() + int64(off))) { + break + } + v.reset(OpS390XMOVBstoreconst) + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(s) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) + // cond: ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) + // result: (MOVBstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpS390XMOVDaddr { + break + } + off := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)) { + break + } + v.reset(OpS390XMOVBstoreconst) + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDBR(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MOVDBR x:(MOVDload [off] {sym} ptr mem)) + // cond: x.Uses == 1 + // result: @x.Block (MOVDBRload [off] {sym} ptr mem) + for { + x := v_0 + if x.Op != OpS390XMOVDload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpS390XMOVDBRload, typ.UInt64) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg2(ptr, mem) + return true + } + // match: (MOVDBR x:(MOVDloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 + // result: @x.Block (MOVDBRloadidx [off] {sym} ptr idx mem) + for { + x := v_0 + if x.Op != OpS390XMOVDloadidx { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[2] + ptr := x.Args[0] + idx := x.Args[1] + if !(x.Uses == 1) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, typ.Int64) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) + v0.AddArg3(ptr, idx, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDaddridx(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVDaddridx [c] {s} (ADDconst [d] x) y) + // cond: is20Bit(int64(c)+int64(d)) + // result: (MOVDaddridx [c+d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + if v_0.Op != OpS390XADDconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + y := v_1 + if !(is20Bit(int64(c) + int64(d))) { + break + } + v.reset(OpS390XMOVDaddridx) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (MOVDaddridx [c] {s} x (ADDconst [d] y)) + // cond: is20Bit(int64(c)+int64(d)) + // result: (MOVDaddridx [c+d] {s} x y) + for { + c := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpS390XADDconst { + break + } + d := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(is20Bit(int64(c) + int64(d))) { + break + } + v.reset(OpS390XMOVDaddridx) + v.AuxInt = int32ToAuxInt(c + d) + v.Aux = symToAux(s) + v.AddArg2(x, y) + return true + } + // match: (MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB + // result: (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpS390XMOVDaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + x := v_0.Args[0] + y := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) { + break + } + v.reset(OpS390XMOVDaddridx) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(x, y) + return true + } + // match: (MOVDaddridx [off1] {sym1} x (MOVDaddr [off2] {sym2} y)) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && y.Op != OpSB + // result: (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpS390XMOVDaddr { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) + y := v_1.Args[0] + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && y.Op != OpSB) { + break + } + v.reset(OpS390XMOVDaddridx) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _)) + // cond: isSamePtr(ptr1, ptr2) + // result: x + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr1 := v_0 + if v_1.Op != OpS390XMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { + break + } + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.copyOf(x) + return true + } + // match: (MOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) + // cond: isSamePtr(ptr1, ptr2) + // result: (LGDR x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr1 := v_0 + if v_1.Op != OpS390XFMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { + break + } + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.reset(OpS390XLGDR) + v.AddArg(x) + return true + } + // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is20Bit(int64(off1)+int64(off2)) + // result: (MOVDload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpS390XADDconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is20Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpS390XMOVDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0)) + // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpS390XMOVDaddr { + break + } + t := v_0.Type + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0))) { + break + } + v.reset(OpS390XMOVDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: is20Bit(int64(off1)+int64(off2)) + // result: (MOVDstore [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpS390XADDconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is20Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpS390XMOVDstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) + // cond: is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB + // result: (MOVDstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB) { + break + } + v.reset(OpS390XMOVDstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0)) + // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpS390XMOVDaddr { + break + } + t := v_0.Type + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0))) { + break + } + v.reset(OpS390XMOVDstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + // match: (MOVDstore [i] {s} p w1 x:(MOVDstore [i-8] {s} p w0 mem)) + // cond: p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-8) && setPos(v, x.Pos) && clobber(x) + // result: (STMG2 [i-8] {s} p w0 w1 mem) + for { + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + p := v_0 + w1 := v_1 + x := v_2 + if x.Op != OpS390XMOVDstore || auxIntToInt32(x.AuxInt) != i-8 || auxToSym(x.Aux) != s { + break + } + mem := x.Args[2] + if p != x.Args[0] { + break + } + w0 := x.Args[1] + if !(p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-8) && setPos(v, x.Pos) && clobber(x)) { + break + } + v.reset(OpS390XSTMG2) + v.AuxInt = int32ToAuxInt(i - 8) + v.Aux = symToAux(s) + v.AddArg4(p, w0, w1, mem) + return true + } + // match: (MOVDstore [i] {s} p w2 x:(STMG2 [i-16] {s} p w0 w1 mem)) + // cond: x.Uses == 1 && is20Bit(int64(i)-16) && setPos(v, x.Pos) && clobber(x) + // result: (STMG3 [i-16] {s} p w0 w1 w2 mem) + for { + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + p := v_0 + w2 := v_1 + x := v_2 + if x.Op != OpS390XSTMG2 || auxIntToInt32(x.AuxInt) != i-16 || auxToSym(x.Aux) != s { + break + } + mem := x.Args[3] + if p != x.Args[0] { + break + } + w0 := x.Args[1] + w1 := x.Args[2] + if !(x.Uses == 1 && is20Bit(int64(i)-16) && setPos(v, x.Pos) && clobber(x)) { + break + } + v.reset(OpS390XSTMG3) + v.AuxInt = int32ToAuxInt(i - 16) + v.Aux = symToAux(s) + v.AddArg5(p, w0, w1, w2, mem) + return true + } + // match: (MOVDstore [i] {s} p w3 x:(STMG3 [i-24] {s} p w0 w1 w2 mem)) + // cond: x.Uses == 1 && is20Bit(int64(i)-24) && setPos(v, x.Pos) && clobber(x) + // result: (STMG4 [i-24] {s} p w0 w1 w2 w3 mem) + for { + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + p := v_0 + w3 := v_1 + x := v_2 + if x.Op != OpS390XSTMG3 || auxIntToInt32(x.AuxInt) != i-24 || auxToSym(x.Aux) != s { + break + } + mem := x.Args[4] + if p != x.Args[0] { + break + } + w0 := x.Args[1] + w1 := x.Args[2] + w2 := x.Args[3] + if !(x.Uses == 1 && is20Bit(int64(i)-24) && setPos(v, x.Pos) && clobber(x)) { + break + } + v.reset(OpS390XSTMG4) + v.AuxInt = int32ToAuxInt(i - 24) + v.Aux = symToAux(s) + v.AddArg6(p, w0, w1, w2, w3, mem) + return true + } + // match: (MOVDstore [off] {sym} ptr r:(MOVDBR x) mem) + // cond: r.Uses == 1 + // result: (MOVDBRstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + r := v_1 + if r.Op != OpS390XMOVDBR { + break + } + x := r.Args[0] + mem := v_2 + if !(r.Uses == 1) { + break + } + v.reset(OpS390XMOVDBRstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDstoreconst(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem) + // cond: isU12Bit(sc.Off64()+int64(off)) + // result: (MOVDstoreconst [sc.addOffset32(off)] {s} ptr mem) + for { + sc := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) + if v_0.Op != OpS390XADDconst { + break + } + off := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(isU12Bit(sc.Off64() + int64(off))) { + break + } + v.reset(OpS390XMOVDstoreconst) + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(s) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVDstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) + // cond: ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) + // result: (MOVDstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpS390XMOVDaddr { + break + } + off := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)) { + break + } + v.reset(OpS390XMOVDstoreconst) + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDstoreidx(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVDstoreidx [off] {sym} ptr idx r:(MOVDBR x) mem) + // cond: r.Uses == 1 + // result: (MOVDBRstoreidx [off] {sym} ptr idx x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + r := v_2 + if r.Op != OpS390XMOVDBR { + break + } + x := r.Args[0] + mem := v_3 + if !(r.Uses == 1) { + break + } + v.reset(OpS390XMOVDBRstoreidx) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(ptr, idx, x, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVHZload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHZload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _)) + // cond: isSamePtr(ptr1, ptr2) + // result: (MOVHZreg x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr1 := v_0 + if v_1.Op != OpS390XMOVHstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { + break + } + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.reset(OpS390XMOVHZreg) + v.AddArg(x) + return true + } + // match: (MOVHZload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is20Bit(int64(off1)+int64(off2)) + // result: (MOVHZload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpS390XADDconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is20Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpS390XMOVHZload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) + // result: (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpS390XMOVDaddr { + break + } + t := v_0.Type + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))) { + break + } + v.reset(OpS390XMOVHZload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MOVHZreg e:(MOVBZreg x)) + // cond: clobberIfDead(e) + // result: (MOVBZreg x) + for { + e := v_0 + if e.Op != OpS390XMOVBZreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVBZreg) + v.AddArg(x) + return true + } + // match: (MOVHZreg e:(MOVHreg x)) + // cond: clobberIfDead(e) + // result: (MOVHZreg x) + for { + e := v_0 + if e.Op != OpS390XMOVHreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVHZreg) + v.AddArg(x) + return true + } + // match: (MOVHZreg e:(MOVWreg x)) + // cond: clobberIfDead(e) + // result: (MOVHZreg x) + for { + e := v_0 + if e.Op != OpS390XMOVWreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVHZreg) + v.AddArg(x) + return true + } + // match: (MOVHZreg e:(MOVHZreg x)) + // cond: clobberIfDead(e) + // result: (MOVHZreg x) + for { + e := v_0 + if e.Op != OpS390XMOVHZreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVHZreg) + v.AddArg(x) + return true + } + // match: (MOVHZreg e:(MOVWZreg x)) + // cond: clobberIfDead(e) + // result: (MOVHZreg x) + for { + e := v_0 + if e.Op != OpS390XMOVWZreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVHZreg) + v.AddArg(x) + return true + } + // match: (MOVHZreg x:(MOVBZload _ _)) + // cond: (!x.Type.IsSigned() || x.Type.Size() > 1) + // result: x + for { + x := v_0 + if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) { + break + } + v.copyOf(x) + return true + } + // match: (MOVHZreg x:(MOVHZload _ _)) + // cond: (!x.Type.IsSigned() || x.Type.Size() > 2) + // result: x + for { + x := v_0 + if x.Op != OpS390XMOVHZload || !(!x.Type.IsSigned() || x.Type.Size() > 2) { + break + } + v.copyOf(x) + return true + } + // match: (MOVHZreg x:(MOVHload [o] {s} p mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVHZload [o] {s} p mem) + for { + t := v.Type + x := v_0 + if x.Op != OpS390XMOVHload { + break + } + o := auxIntToInt32(x.AuxInt) + s := auxToSym(x.Aux) + mem := x.Args[1] + p := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpS390XMOVHZload, t) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(o) + v0.Aux = symToAux(s) + v0.AddArg2(p, mem) + return true + } + // match: (MOVHZreg x:(Arg )) + // cond: !t.IsSigned() && t.Size() <= 2 + // result: x + for { + x := v_0 + if x.Op != OpArg { + break + } + t := x.Type + if !(!t.IsSigned() && t.Size() <= 2) { + break + } + v.copyOf(x) + return true + } + // match: (MOVHZreg (MOVDconst [c])) + // result: (MOVDconst [int64(uint16(c))]) + for { + if v_0.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(int64(uint16(c))) + return true + } + // match: (MOVHZreg (RISBGZ x {r})) + // cond: r.OutMerge(0x0000ffff) != nil + // result: (RISBGZ x {*r.OutMerge(0x0000ffff)}) + for { + if v_0.Op != OpS390XRISBGZ { + break + } + r := auxToS390xRotateParams(v_0.Aux) + x := v_0.Args[0] + if !(r.OutMerge(0x0000ffff) != nil) { + break + } + v.reset(OpS390XRISBGZ) + v.Aux = s390xRotateParamsToAux(*r.OutMerge(0x0000ffff)) + v.AddArg(x) + return true + } + // match: (MOVHZreg (ANDWconst [m] x)) + // result: (MOVWZreg (ANDWconst [int32(uint16(m))] x)) + for { + if v_0.Op != OpS390XANDWconst { + break + } + m := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpS390XMOVWZreg) + v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(int32(uint16(m))) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVHload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _)) + // cond: isSamePtr(ptr1, ptr2) + // result: (MOVHreg x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr1 := v_0 + if v_1.Op != OpS390XMOVHstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { + break + } + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.reset(OpS390XMOVHreg) + v.AddArg(x) + return true + } + // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is20Bit(int64(off1)+int64(off2)) + // result: (MOVHload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpS390XADDconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is20Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpS390XMOVHload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) + // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpS390XMOVDaddr { + break + } + t := v_0.Type + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))) { + break + } + v.reset(OpS390XMOVHload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MOVHreg e:(MOVBreg x)) + // cond: clobberIfDead(e) + // result: (MOVBreg x) + for { + e := v_0 + if e.Op != OpS390XMOVBreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVBreg) + v.AddArg(x) + return true + } + // match: (MOVHreg e:(MOVHreg x)) + // cond: clobberIfDead(e) + // result: (MOVHreg x) + for { + e := v_0 + if e.Op != OpS390XMOVHreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVHreg) + v.AddArg(x) + return true + } + // match: (MOVHreg e:(MOVWreg x)) + // cond: clobberIfDead(e) + // result: (MOVHreg x) + for { + e := v_0 + if e.Op != OpS390XMOVWreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVHreg) + v.AddArg(x) + return true + } + // match: (MOVHreg e:(MOVHZreg x)) + // cond: clobberIfDead(e) + // result: (MOVHreg x) + for { + e := v_0 + if e.Op != OpS390XMOVHZreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVHreg) + v.AddArg(x) + return true + } + // match: (MOVHreg e:(MOVWZreg x)) + // cond: clobberIfDead(e) + // result: (MOVHreg x) + for { + e := v_0 + if e.Op != OpS390XMOVWZreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVHreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBload _ _)) + // cond: (x.Type.IsSigned() || x.Type.Size() == 8) + // result: x + for { + x := v_0 + if x.Op != OpS390XMOVBload || !(x.Type.IsSigned() || x.Type.Size() == 8) { + break + } + v.copyOf(x) + return true + } + // match: (MOVHreg x:(MOVHload _ _)) + // cond: (x.Type.IsSigned() || x.Type.Size() == 8) + // result: x + for { + x := v_0 + if x.Op != OpS390XMOVHload || !(x.Type.IsSigned() || x.Type.Size() == 8) { + break + } + v.copyOf(x) + return true + } + // match: (MOVHreg x:(MOVBZload _ _)) + // cond: (!x.Type.IsSigned() || x.Type.Size() > 1) + // result: x + for { + x := v_0 + if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) { + break + } + v.copyOf(x) + return true + } + // match: (MOVHreg x:(MOVHZload [o] {s} p mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVHload [o] {s} p mem) + for { + t := v.Type + x := v_0 + if x.Op != OpS390XMOVHZload { + break + } + o := auxIntToInt32(x.AuxInt) + s := auxToSym(x.Aux) + mem := x.Args[1] + p := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpS390XMOVHload, t) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(o) + v0.Aux = symToAux(s) + v0.AddArg2(p, mem) + return true + } + // match: (MOVHreg x:(Arg )) + // cond: t.IsSigned() && t.Size() <= 2 + // result: x + for { + x := v_0 + if x.Op != OpArg { + break + } + t := x.Type + if !(t.IsSigned() && t.Size() <= 2) { + break + } + v.copyOf(x) + return true + } + // match: (MOVHreg (MOVDconst [c])) + // result: (MOVDconst [int64(int16(c))]) + for { + if v_0.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(int64(int16(c))) + return true + } + // match: (MOVHreg (ANDWconst [m] x)) + // cond: int16(m) >= 0 + // result: (MOVWZreg (ANDWconst [int32(uint16(m))] x)) + for { + if v_0.Op != OpS390XANDWconst { + break + } + m := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(int16(m) >= 0) { + break + } + v.reset(OpS390XMOVWZreg) + v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(int32(uint16(m))) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpS390XMOVHreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpS390XMOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVHZreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpS390XMOVHZreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpS390XMOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: is20Bit(int64(off1)+int64(off2)) + // result: (MOVHstore [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpS390XADDconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is20Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpS390XMOVHstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) + // cond: isU12Bit(int64(off)) && ptr.Op != OpSB + // result: (MOVHstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(isU12Bit(int64(off)) && ptr.Op != OpSB) { + break + } + v.reset(OpS390XMOVHstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) + // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpS390XMOVDaddr { + break + } + t := v_0.Type + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))) { + break + } + v.reset(OpS390XMOVHstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (Bswap16 val) mem) + // result: (MOVHBRstore [off] {sym} ptr val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpBswap16 { + break + } + val := v_1.Args[0] + mem := v_2 + v.reset(OpS390XMOVHBRstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVHstoreconst(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem) + // cond: isU12Bit(sc.Off64()+int64(off)) + // result: (MOVHstoreconst [sc.addOffset32(off)] {s} ptr mem) + for { + sc := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) + if v_0.Op != OpS390XADDconst { + break + } + off := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(isU12Bit(sc.Off64() + int64(off))) { + break + } + v.reset(OpS390XMOVHstoreconst) + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(s) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) + // cond: ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) + // result: (MOVHstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpS390XMOVDaddr { + break + } + off := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)) { + break + } + v.reset(OpS390XMOVHstoreconst) + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHstoreidx [off] {sym} ptr idx (Bswap16 val) mem) + // result: (MOVHBRstoreidx [off] {sym} ptr idx val mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + if v_2.Op != OpBswap16 { + break + } + val := v_2.Args[0] + mem := v_3 + v.reset(OpS390XMOVHBRstoreidx) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(ptr, idx, val, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVWBR(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MOVWBR x:(MOVWZload [off] {sym} ptr mem)) + // cond: x.Uses == 1 + // result: @x.Block (MOVWZreg (MOVWBRload [off] {sym} ptr mem)) + for { + x := v_0 + if x.Op != OpS390XMOVWZload { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[1] + ptr := x.Args[0] + if !(x.Uses == 1) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpS390XMOVWZreg, typ.UInt64) + v.copyOf(v0) + v1 := b.NewValue0(x.Pos, OpS390XMOVWBRload, typ.UInt32) + v1.AuxInt = int32ToAuxInt(off) + v1.Aux = symToAux(sym) + v1.AddArg2(ptr, mem) + v0.AddArg(v1) + return true + } + // match: (MOVWBR x:(MOVWZloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 + // result: @x.Block (MOVWZreg (MOVWBRloadidx [off] {sym} ptr idx mem)) + for { + x := v_0 + if x.Op != OpS390XMOVWZloadidx { + break + } + off := auxIntToInt32(x.AuxInt) + sym := auxToSym(x.Aux) + mem := x.Args[2] + ptr := x.Args[0] + idx := x.Args[1] + if !(x.Uses == 1) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) + v.copyOf(v0) + v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32) + v1.AuxInt = int32ToAuxInt(off) + v1.Aux = symToAux(sym) + v1.AddArg3(ptr, idx, mem) + v0.AddArg(v1) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVWZload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWZload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _)) + // cond: isSamePtr(ptr1, ptr2) + // result: (MOVWZreg x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr1 := v_0 + if v_1.Op != OpS390XMOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { + break + } + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.reset(OpS390XMOVWZreg) + v.AddArg(x) + return true + } + // match: (MOVWZload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is20Bit(int64(off1)+int64(off2)) + // result: (MOVWZload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpS390XADDconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is20Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpS390XMOVWZload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) + // result: (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpS390XMOVDaddr { + break + } + t := v_0.Type + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))) { + break + } + v.reset(OpS390XMOVWZload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MOVWZreg e:(MOVBZreg x)) + // cond: clobberIfDead(e) + // result: (MOVBZreg x) + for { + e := v_0 + if e.Op != OpS390XMOVBZreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVBZreg) + v.AddArg(x) + return true + } + // match: (MOVWZreg e:(MOVHZreg x)) + // cond: clobberIfDead(e) + // result: (MOVHZreg x) + for { + e := v_0 + if e.Op != OpS390XMOVHZreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVHZreg) + v.AddArg(x) + return true + } + // match: (MOVWZreg e:(MOVWreg x)) + // cond: clobberIfDead(e) + // result: (MOVWZreg x) + for { + e := v_0 + if e.Op != OpS390XMOVWreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVWZreg) + v.AddArg(x) + return true + } + // match: (MOVWZreg e:(MOVWZreg x)) + // cond: clobberIfDead(e) + // result: (MOVWZreg x) + for { + e := v_0 + if e.Op != OpS390XMOVWZreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVWZreg) + v.AddArg(x) + return true + } + // match: (MOVWZreg x:(MOVBZload _ _)) + // cond: (!x.Type.IsSigned() || x.Type.Size() > 1) + // result: x + for { + x := v_0 + if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) { + break + } + v.copyOf(x) + return true + } + // match: (MOVWZreg x:(MOVHZload _ _)) + // cond: (!x.Type.IsSigned() || x.Type.Size() > 2) + // result: x + for { + x := v_0 + if x.Op != OpS390XMOVHZload || !(!x.Type.IsSigned() || x.Type.Size() > 2) { + break + } + v.copyOf(x) + return true + } + // match: (MOVWZreg x:(MOVWZload _ _)) + // cond: (!x.Type.IsSigned() || x.Type.Size() > 4) + // result: x + for { + x := v_0 + if x.Op != OpS390XMOVWZload || !(!x.Type.IsSigned() || x.Type.Size() > 4) { + break + } + v.copyOf(x) + return true + } + // match: (MOVWZreg x:(MOVWload [o] {s} p mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWZload [o] {s} p mem) + for { + t := v.Type + x := v_0 + if x.Op != OpS390XMOVWload { + break + } + o := auxIntToInt32(x.AuxInt) + s := auxToSym(x.Aux) + mem := x.Args[1] + p := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpS390XMOVWZload, t) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(o) + v0.Aux = symToAux(s) + v0.AddArg2(p, mem) + return true + } + // match: (MOVWZreg x:(Arg )) + // cond: !t.IsSigned() && t.Size() <= 4 + // result: x + for { + x := v_0 + if x.Op != OpArg { + break + } + t := x.Type + if !(!t.IsSigned() && t.Size() <= 4) { + break + } + v.copyOf(x) + return true + } + // match: (MOVWZreg (MOVDconst [c])) + // result: (MOVDconst [int64(uint32(c))]) + for { + if v_0.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(int64(uint32(c))) + return true + } + // match: (MOVWZreg (RISBGZ x {r})) + // cond: r.OutMerge(0xffffffff) != nil + // result: (RISBGZ x {*r.OutMerge(0xffffffff)}) + for { + if v_0.Op != OpS390XRISBGZ { + break + } + r := auxToS390xRotateParams(v_0.Aux) + x := v_0.Args[0] + if !(r.OutMerge(0xffffffff) != nil) { + break + } + v.reset(OpS390XRISBGZ) + v.Aux = s390xRotateParamsToAux(*r.OutMerge(0xffffffff)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVWload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _)) + // cond: isSamePtr(ptr1, ptr2) + // result: (MOVWreg x) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr1 := v_0 + if v_1.Op != OpS390XMOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { + break + } + x := v_1.Args[1] + ptr2 := v_1.Args[0] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.reset(OpS390XMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is20Bit(int64(off1)+int64(off2)) + // result: (MOVWload [off1+off2] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpS390XADDconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is20Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpS390XMOVWload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) + // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpS390XMOVDaddr { + break + } + t := v_0.Type + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))) { + break + } + v.reset(OpS390XMOVWload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(base, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MOVWreg e:(MOVBreg x)) + // cond: clobberIfDead(e) + // result: (MOVBreg x) + for { + e := v_0 + if e.Op != OpS390XMOVBreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVBreg) + v.AddArg(x) + return true + } + // match: (MOVWreg e:(MOVHreg x)) + // cond: clobberIfDead(e) + // result: (MOVHreg x) + for { + e := v_0 + if e.Op != OpS390XMOVHreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVHreg) + v.AddArg(x) + return true + } + // match: (MOVWreg e:(MOVWreg x)) + // cond: clobberIfDead(e) + // result: (MOVWreg x) + for { + e := v_0 + if e.Op != OpS390XMOVWreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVWreg e:(MOVWZreg x)) + // cond: clobberIfDead(e) + // result: (MOVWreg x) + for { + e := v_0 + if e.Op != OpS390XMOVWZreg { + break + } + x := e.Args[0] + if !(clobberIfDead(e)) { + break + } + v.reset(OpS390XMOVWreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVBload _ _)) + // cond: (x.Type.IsSigned() || x.Type.Size() == 8) + // result: x + for { + x := v_0 + if x.Op != OpS390XMOVBload || !(x.Type.IsSigned() || x.Type.Size() == 8) { + break + } + v.copyOf(x) + return true + } + // match: (MOVWreg x:(MOVHload _ _)) + // cond: (x.Type.IsSigned() || x.Type.Size() == 8) + // result: x + for { + x := v_0 + if x.Op != OpS390XMOVHload || !(x.Type.IsSigned() || x.Type.Size() == 8) { + break + } + v.copyOf(x) + return true + } + // match: (MOVWreg x:(MOVWload _ _)) + // cond: (x.Type.IsSigned() || x.Type.Size() == 8) + // result: x + for { + x := v_0 + if x.Op != OpS390XMOVWload || !(x.Type.IsSigned() || x.Type.Size() == 8) { + break + } + v.copyOf(x) + return true + } + // match: (MOVWreg x:(MOVBZload _ _)) + // cond: (!x.Type.IsSigned() || x.Type.Size() > 1) + // result: x + for { + x := v_0 + if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) { + break + } + v.copyOf(x) + return true + } + // match: (MOVWreg x:(MOVHZload _ _)) + // cond: (!x.Type.IsSigned() || x.Type.Size() > 2) + // result: x + for { + x := v_0 + if x.Op != OpS390XMOVHZload || !(!x.Type.IsSigned() || x.Type.Size() > 2) { + break + } + v.copyOf(x) + return true + } + // match: (MOVWreg x:(MOVWZload [o] {s} p mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWload [o] {s} p mem) + for { + t := v.Type + x := v_0 + if x.Op != OpS390XMOVWZload { + break + } + o := auxIntToInt32(x.AuxInt) + s := auxToSym(x.Aux) + mem := x.Args[1] + p := x.Args[0] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(x.Pos, OpS390XMOVWload, t) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(o) + v0.Aux = symToAux(s) + v0.AddArg2(p, mem) + return true + } + // match: (MOVWreg x:(Arg )) + // cond: t.IsSigned() && t.Size() <= 4 + // result: x + for { + x := v_0 + if x.Op != OpArg { + break + } + t := x.Type + if !(t.IsSigned() && t.Size() <= 4) { + break + } + v.copyOf(x) + return true + } + // match: (MOVWreg (MOVDconst [c])) + // result: (MOVDconst [int64(int32(c))]) + for { + if v_0.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(int64(int32(c))) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem) + // result: (MOVWstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpS390XMOVWreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpS390XMOVWstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVWZreg x) mem) + // result: (MOVWstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpS390XMOVWZreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpS390XMOVWstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: is20Bit(int64(off1)+int64(off2)) + // result: (MOVWstore [off1+off2] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpS390XADDconst { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is20Bit(int64(off1) + int64(off2))) { + break + } + v.reset(OpS390XMOVWstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) + // cond: is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB + // result: (MOVWstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB) { + break + } + v.reset(OpS390XMOVWstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) + // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpS390XMOVDaddr { + break + } + t := v_0.Type + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + base := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))) { + break + } + v.reset(OpS390XMOVWstore) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true + } + // match: (MOVWstore [i] {s} p w1 x:(MOVWstore [i-4] {s} p w0 mem)) + // cond: p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-4) && setPos(v, x.Pos) && clobber(x) + // result: (STM2 [i-4] {s} p w0 w1 mem) + for { + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + p := v_0 + w1 := v_1 + x := v_2 + if x.Op != OpS390XMOVWstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s { + break + } + mem := x.Args[2] + if p != x.Args[0] { + break + } + w0 := x.Args[1] + if !(p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-4) && setPos(v, x.Pos) && clobber(x)) { + break + } + v.reset(OpS390XSTM2) + v.AuxInt = int32ToAuxInt(i - 4) + v.Aux = symToAux(s) + v.AddArg4(p, w0, w1, mem) + return true + } + // match: (MOVWstore [i] {s} p w2 x:(STM2 [i-8] {s} p w0 w1 mem)) + // cond: x.Uses == 1 && is20Bit(int64(i)-8) && setPos(v, x.Pos) && clobber(x) + // result: (STM3 [i-8] {s} p w0 w1 w2 mem) + for { + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + p := v_0 + w2 := v_1 + x := v_2 + if x.Op != OpS390XSTM2 || auxIntToInt32(x.AuxInt) != i-8 || auxToSym(x.Aux) != s { + break + } + mem := x.Args[3] + if p != x.Args[0] { + break + } + w0 := x.Args[1] + w1 := x.Args[2] + if !(x.Uses == 1 && is20Bit(int64(i)-8) && setPos(v, x.Pos) && clobber(x)) { + break + } + v.reset(OpS390XSTM3) + v.AuxInt = int32ToAuxInt(i - 8) + v.Aux = symToAux(s) + v.AddArg5(p, w0, w1, w2, mem) + return true + } + // match: (MOVWstore [i] {s} p w3 x:(STM3 [i-12] {s} p w0 w1 w2 mem)) + // cond: x.Uses == 1 && is20Bit(int64(i)-12) && setPos(v, x.Pos) && clobber(x) + // result: (STM4 [i-12] {s} p w0 w1 w2 w3 mem) + for { + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + p := v_0 + w3 := v_1 + x := v_2 + if x.Op != OpS390XSTM3 || auxIntToInt32(x.AuxInt) != i-12 || auxToSym(x.Aux) != s { + break + } + mem := x.Args[4] + if p != x.Args[0] { + break + } + w0 := x.Args[1] + w1 := x.Args[2] + w2 := x.Args[3] + if !(x.Uses == 1 && is20Bit(int64(i)-12) && setPos(v, x.Pos) && clobber(x)) { + break + } + v.reset(OpS390XSTM4) + v.AuxInt = int32ToAuxInt(i - 12) + v.Aux = symToAux(s) + v.AddArg6(p, w0, w1, w2, w3, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr r:(MOVWBR x) mem) + // cond: r.Uses == 1 + // result: (MOVWBRstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + r := v_1 + if r.Op != OpS390XMOVWBR { + break + } + x := r.Args[0] + mem := v_2 + if !(r.Uses == 1) { + break + } + v.reset(OpS390XMOVWBRstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVWstoreconst(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem) + // cond: isU12Bit(sc.Off64()+int64(off)) + // result: (MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem) + for { + sc := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) + if v_0.Op != OpS390XADDconst { + break + } + off := auxIntToInt32(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(isU12Bit(sc.Off64() + int64(off))) { + break + } + v.reset(OpS390XMOVWstoreconst) + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(s) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) + // cond: ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) + // result: (MOVWstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpS390XMOVDaddr { + break + } + off := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)) { + break + } + v.reset(OpS390XMOVWstoreconst) + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVWstoreidx(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWstoreidx [off] {sym} ptr idx r:(MOVWBR x) mem) + // cond: r.Uses == 1 + // result: (MOVWBRstoreidx [off] {sym} ptr idx x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + idx := v_1 + r := v_2 + if r.Op != OpS390XMOVWBR { + break + } + x := r.Args[0] + mem := v_3 + if !(r.Uses == 1) { + break + } + v.reset(OpS390XMOVWBRstoreidx) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg4(ptr, idx, x, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMULLD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MULLD x (MOVDconst [c])) + // cond: is32Bit(c) + // result: (MULLDconst [int32(c)] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpS390XMOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c)) { + continue + } + v.reset(OpS390XMULLDconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + break + } + // match: (MULLD x g:(MOVDload [off] {sym} ptr mem)) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) + // result: (MULLDload [off] {sym} x ptr mem) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 + if g.Op != OpS390XMOVDload { + continue + } + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XMULLDload) + v.Type = t + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueS390X_OpS390XMULLDconst(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MULLDconst x [c]) + // cond: isPowerOfTwo32(c&(c-1)) + // result: (ADD (SLDconst x [uint8(log32(c&(c-1)))]) (SLDconst x [uint8(log32(c&^(c-1)))])) + for { + t := v.Type + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(isPowerOfTwo32(c & (c - 1))) { + break + } + v.reset(OpS390XADD) + v0 := b.NewValue0(v.Pos, OpS390XSLDconst, t) + v0.AuxInt = uint8ToAuxInt(uint8(log32(c & (c - 1)))) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpS390XSLDconst, t) + v1.AuxInt = uint8ToAuxInt(uint8(log32(c &^ (c - 1)))) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } + // match: (MULLDconst x [c]) + // cond: isPowerOfTwo32(c+(c&^(c-1))) + // result: (SUB (SLDconst x [uint8(log32(c+(c&^(c-1))))]) (SLDconst x [uint8(log32(c&^(c-1)))])) + for { + t := v.Type + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(isPowerOfTwo32(c + (c &^ (c - 1)))) { + break + } + v.reset(OpS390XSUB) + v0 := b.NewValue0(v.Pos, OpS390XSLDconst, t) + v0.AuxInt = uint8ToAuxInt(uint8(log32(c + (c &^ (c - 1))))) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpS390XSLDconst, t) + v1.AuxInt = uint8ToAuxInt(uint8(log32(c &^ (c - 1)))) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } + // match: (MULLDconst x [c]) + // cond: isPowerOfTwo32(-c+(-c&^(-c-1))) + // result: (SUB (SLDconst x [uint8(log32(-c&^(-c-1)))]) (SLDconst x [uint8(log32(-c+(-c&^(-c-1))))])) + for { + t := v.Type + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(isPowerOfTwo32(-c + (-c &^ (-c - 1)))) { + break + } + v.reset(OpS390XSUB) + v0 := b.NewValue0(v.Pos, OpS390XSLDconst, t) + v0.AuxInt = uint8ToAuxInt(uint8(log32(-c &^ (-c - 1)))) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpS390XSLDconst, t) + v1.AuxInt = uint8ToAuxInt(uint8(log32(-c + (-c &^ (-c - 1))))) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } + // match: (MULLDconst [c] (MOVDconst [d])) + // result: (MOVDconst [int64(c)*d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(int64(c) * d) + return true + } + return false +} +func rewriteValueS390X_OpS390XMULLDload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MULLDload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) + // cond: isSamePtr(ptr1, ptr2) + // result: (MULLD x (LGDR y)) + for { + t := v.Type + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + ptr1 := v_1 + if v_2.Op != OpS390XFMOVDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { + break + } + y := v_2.Args[1] + ptr2 := v_2.Args[0] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.reset(OpS390XMULLD) + v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (MULLDload [off1] {sym} x (ADDconst [off2] ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) + // result: (MULLDload [off1+off2] {sym} x ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpS390XADDconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + ptr := v_1.Args[0] + mem := v_2 + if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpS390XMULLDload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + // match: (MULLDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) + // result: (MULLDload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + for { + o1 := auxIntToInt32(v.AuxInt) + s1 := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpS390XMOVDaddr { + break + } + o2 := auxIntToInt32(v_1.AuxInt) + s2 := auxToSym(v_1.Aux) + ptr := v_1.Args[0] + mem := v_2 + if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) { + break + } + v.reset(OpS390XMULLDload) + v.AuxInt = int32ToAuxInt(o1 + o2) + v.Aux = symToAux(mergeSym(s1, s2)) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMULLW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MULLW x (MOVDconst [c])) + // result: (MULLWconst [int32(c)] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpS390XMOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpS390XMULLWconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + break + } + // match: (MULLW x g:(MOVWload [off] {sym} ptr mem)) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) + // result: (MULLWload [off] {sym} x ptr mem) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 + if g.Op != OpS390XMOVWload { + continue + } + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XMULLWload) + v.Type = t + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + // match: (MULLW x g:(MOVWZload [off] {sym} ptr mem)) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) + // result: (MULLWload [off] {sym} x ptr mem) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 + if g.Op != OpS390XMOVWZload { + continue + } + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XMULLWload) + v.Type = t + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueS390X_OpS390XMULLWconst(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (MULLWconst x [c]) + // cond: isPowerOfTwo32(c&(c-1)) + // result: (ADDW (SLWconst x [uint8(log32(c&(c-1)))]) (SLWconst x [uint8(log32(c&^(c-1)))])) + for { + t := v.Type + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(isPowerOfTwo32(c & (c - 1))) { + break + } + v.reset(OpS390XADDW) + v0 := b.NewValue0(v.Pos, OpS390XSLWconst, t) + v0.AuxInt = uint8ToAuxInt(uint8(log32(c & (c - 1)))) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpS390XSLWconst, t) + v1.AuxInt = uint8ToAuxInt(uint8(log32(c &^ (c - 1)))) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } + // match: (MULLWconst x [c]) + // cond: isPowerOfTwo32(c+(c&^(c-1))) + // result: (SUBW (SLWconst x [uint8(log32(c+(c&^(c-1))))]) (SLWconst x [uint8(log32(c&^(c-1)))])) + for { + t := v.Type + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(isPowerOfTwo32(c + (c &^ (c - 1)))) { + break + } + v.reset(OpS390XSUBW) + v0 := b.NewValue0(v.Pos, OpS390XSLWconst, t) + v0.AuxInt = uint8ToAuxInt(uint8(log32(c + (c &^ (c - 1))))) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpS390XSLWconst, t) + v1.AuxInt = uint8ToAuxInt(uint8(log32(c &^ (c - 1)))) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } + // match: (MULLWconst x [c]) + // cond: isPowerOfTwo32(-c+(-c&^(-c-1))) + // result: (SUBW (SLWconst x [uint8(log32(-c&^(-c-1)))]) (SLWconst x [uint8(log32(-c+(-c&^(-c-1))))])) + for { + t := v.Type + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(isPowerOfTwo32(-c + (-c &^ (-c - 1)))) { + break + } + v.reset(OpS390XSUBW) + v0 := b.NewValue0(v.Pos, OpS390XSLWconst, t) + v0.AuxInt = uint8ToAuxInt(uint8(log32(-c &^ (-c - 1)))) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpS390XSLWconst, t) + v1.AuxInt = uint8ToAuxInt(uint8(log32(-c + (-c &^ (-c - 1))))) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } + // match: (MULLWconst [c] (MOVDconst [d])) + // result: (MOVDconst [int64(c*int32(d))]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(int64(c * int32(d))) + return true + } + return false +} +func rewriteValueS390X_OpS390XMULLWload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MULLWload [off1] {sym} x (ADDconst [off2] ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) + // result: (MULLWload [off1+off2] {sym} x ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpS390XADDconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + ptr := v_1.Args[0] + mem := v_2 + if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpS390XMULLWload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + // match: (MULLWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) + // result: (MULLWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + for { + o1 := auxIntToInt32(v.AuxInt) + s1 := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpS390XMOVDaddr { + break + } + o2 := auxIntToInt32(v_1.AuxInt) + s2 := auxToSym(v_1.Aux) + ptr := v_1.Args[0] + mem := v_2 + if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) { + break + } + v.reset(OpS390XMULLWload) + v.AuxInt = int32ToAuxInt(o1 + o2) + v.Aux = symToAux(mergeSym(s1, s2)) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XNEG(v *Value) bool { + v_0 := v.Args[0] + // match: (NEG (MOVDconst [c])) + // result: (MOVDconst [-c]) + for { + if v_0.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(-c) + return true + } + // match: (NEG (ADDconst [c] (NEG x))) + // cond: c != -(1<<31) + // result: (ADDconst [-c] x) + for { + if v_0.Op != OpS390XADDconst { + break + } + c := auxIntToInt32(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XNEG { + break + } + x := v_0_0.Args[0] + if !(c != -(1 << 31)) { + break + } + v.reset(OpS390XADDconst) + v.AuxInt = int32ToAuxInt(-c) + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XNEGW(v *Value) bool { + v_0 := v.Args[0] + // match: (NEGW (MOVDconst [c])) + // result: (MOVDconst [int64(int32(-c))]) + for { + if v_0.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(int64(int32(-c))) + return true + } + return false +} +func rewriteValueS390X_OpS390XNOT(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NOT x) + // result: (XOR (MOVDconst [-1]) x) + for { + x := v_0 + v.reset(OpS390XXOR) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(-1) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueS390X_OpS390XNOTW(v *Value) bool { + v_0 := v.Args[0] + // match: (NOTW x) + // result: (XORWconst [-1] x) + for { + x := v_0 + v.reset(OpS390XXORWconst) + v.AuxInt = int32ToAuxInt(-1) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpS390XOR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OR x (MOVDconst [c])) + // cond: isU32Bit(c) + // result: (ORconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpS390XMOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(isU32Bit(c)) { + continue + } + v.reset(OpS390XORconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (OR (MOVDconst [-1<<63]) (LGDR x)) + // result: (LGDR (LNDFR x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0.AuxInt) != -1<<63 || v_1.Op != OpS390XLGDR { + continue + } + t := v_1.Type + x := v_1.Args[0] + v.reset(OpS390XLGDR) + v.Type = t + v0 := b.NewValue0(v.Pos, OpS390XLNDFR, x.Type) + v0.AddArg(x) + v.AddArg(v0) + return true + } + break + } + // match: (OR (RISBGZ (LGDR x) {r}) (LGDR (LPDFR y))) + // cond: r == s390x.NewRotateParams(0, 0, 0) + // result: (LGDR (CPSDR y x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpS390XRISBGZ { + continue + } + r := auxToS390xRotateParams(v_0.Aux) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XLGDR { + continue + } + x := v_0_0.Args[0] + if v_1.Op != OpS390XLGDR { + continue + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpS390XLPDFR { + continue + } + t := v_1_0.Type + y := v_1_0.Args[0] + if !(r == s390x.NewRotateParams(0, 0, 0)) { + continue + } + v.reset(OpS390XLGDR) + v0 := b.NewValue0(v.Pos, OpS390XCPSDR, t) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + break + } + // match: (OR (RISBGZ (LGDR x) {r}) (MOVDconst [c])) + // cond: c >= 0 && r == s390x.NewRotateParams(0, 0, 0) + // result: (LGDR (CPSDR (FMOVDconst [math.Float64frombits(uint64(c))]) x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpS390XRISBGZ { + continue + } + r := auxToS390xRotateParams(v_0.Aux) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XLGDR { + continue + } + x := v_0_0.Args[0] + if v_1.Op != OpS390XMOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(c >= 0 && r == s390x.NewRotateParams(0, 0, 0)) { + continue + } + v.reset(OpS390XLGDR) + v0 := b.NewValue0(v.Pos, OpS390XCPSDR, x.Type) + v1 := b.NewValue0(v.Pos, OpS390XFMOVDconst, x.Type) + v1.AuxInt = float64ToAuxInt(math.Float64frombits(uint64(c))) + v0.AddArg2(v1, x) + v.AddArg(v0) + return true + } + break + } + // match: (OR (MOVDconst [c]) (MOVDconst [d])) + // result: (MOVDconst [c|d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpS390XMOVDconst { + continue + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpS390XMOVDconst { + continue + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(c | d) + return true + } + break + } + // match: (OR x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + // match: (OR x g:(MOVDload [off] {sym} ptr mem)) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) + // result: (ORload [off] {sym} x ptr mem) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 + if g.Op != OpS390XMOVDload { + continue + } + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XORload) + v.Type = t + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueS390X_OpS390XORW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ORW x (MOVDconst [c])) + // result: (ORWconst [int32(c)] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpS390XMOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpS390XORWconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + break + } + // match: (ORW x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + // match: (ORW x g:(MOVWload [off] {sym} ptr mem)) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) + // result: (ORWload [off] {sym} x ptr mem) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 + if g.Op != OpS390XMOVWload { + continue + } + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XORWload) + v.Type = t + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + // match: (ORW x g:(MOVWZload [off] {sym} ptr mem)) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) + // result: (ORWload [off] {sym} x ptr mem) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 + if g.Op != OpS390XMOVWZload { + continue + } + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XORWload) + v.Type = t + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueS390X_OpS390XORWconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ORWconst [c] x) + // cond: int32(c)==0 + // result: x + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(int32(c) == 0) { + break + } + v.copyOf(x) + return true + } + // match: (ORWconst [c] _) + // cond: int32(c)==-1 + // result: (MOVDconst [-1]) + for { + c := auxIntToInt32(v.AuxInt) + if !(int32(c) == -1) { + break + } + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(-1) + return true + } + // match: (ORWconst [c] (MOVDconst [d])) + // result: (MOVDconst [int64(c)|d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(int64(c) | d) + return true + } + return false +} +func rewriteValueS390X_OpS390XORWload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ORWload [off1] {sym} x (ADDconst [off2] ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) + // result: (ORWload [off1+off2] {sym} x ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpS390XADDconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + ptr := v_1.Args[0] + mem := v_2 + if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpS390XORWload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + // match: (ORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) + // result: (ORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + for { + o1 := auxIntToInt32(v.AuxInt) + s1 := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpS390XMOVDaddr { + break + } + o2 := auxIntToInt32(v_1.AuxInt) + s2 := auxToSym(v_1.Aux) + ptr := v_1.Args[0] + mem := v_2 + if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) { + break + } + v.reset(OpS390XORWload) + v.AuxInt = int32ToAuxInt(o1 + o2) + v.Aux = symToAux(mergeSym(s1, s2)) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XORconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ORconst [0] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (ORconst [-1] _) + // result: (MOVDconst [-1]) + for { + if auxIntToInt64(v.AuxInt) != -1 { + break + } + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(-1) + return true + } + // match: (ORconst [c] (MOVDconst [d])) + // result: (MOVDconst [c|d]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpS390XMOVDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(c | d) + return true + } + return false +} +func rewriteValueS390X_OpS390XORload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (ORload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) + // cond: isSamePtr(ptr1, ptr2) + // result: (OR x (LGDR y)) + for { + t := v.Type + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + ptr1 := v_1 + if v_2.Op != OpS390XFMOVDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { + break + } + y := v_2.Args[1] + ptr2 := v_2.Args[0] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.reset(OpS390XOR) + v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (ORload [off1] {sym} x (ADDconst [off2] ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) + // result: (ORload [off1+off2] {sym} x ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpS390XADDconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + ptr := v_1.Args[0] + mem := v_2 + if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpS390XORload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + // match: (ORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) + // result: (ORload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + for { + o1 := auxIntToInt32(v.AuxInt) + s1 := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpS390XMOVDaddr { + break + } + o2 := auxIntToInt32(v_1.AuxInt) + s2 := auxToSym(v_1.Aux) + ptr := v_1.Args[0] + mem := v_2 + if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) { + break + } + v.reset(OpS390XORload) + v.AuxInt = int32ToAuxInt(o1 + o2) + v.Aux = symToAux(mergeSym(s1, s2)) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XRISBGZ(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (RISBGZ (MOVWZreg x) {r}) + // cond: r.InMerge(0xffffffff) != nil + // result: (RISBGZ x {*r.InMerge(0xffffffff)}) + for { + r := auxToS390xRotateParams(v.Aux) + if v_0.Op != OpS390XMOVWZreg { + break + } + x := v_0.Args[0] + if !(r.InMerge(0xffffffff) != nil) { + break + } + v.reset(OpS390XRISBGZ) + v.Aux = s390xRotateParamsToAux(*r.InMerge(0xffffffff)) + v.AddArg(x) + return true + } + // match: (RISBGZ (MOVHZreg x) {r}) + // cond: r.InMerge(0x0000ffff) != nil + // result: (RISBGZ x {*r.InMerge(0x0000ffff)}) + for { + r := auxToS390xRotateParams(v.Aux) + if v_0.Op != OpS390XMOVHZreg { + break + } + x := v_0.Args[0] + if !(r.InMerge(0x0000ffff) != nil) { + break + } + v.reset(OpS390XRISBGZ) + v.Aux = s390xRotateParamsToAux(*r.InMerge(0x0000ffff)) + v.AddArg(x) + return true + } + // match: (RISBGZ (MOVBZreg x) {r}) + // cond: r.InMerge(0x000000ff) != nil + // result: (RISBGZ x {*r.InMerge(0x000000ff)}) + for { + r := auxToS390xRotateParams(v.Aux) + if v_0.Op != OpS390XMOVBZreg { + break + } + x := v_0.Args[0] + if !(r.InMerge(0x000000ff) != nil) { + break + } + v.reset(OpS390XRISBGZ) + v.Aux = s390xRotateParamsToAux(*r.InMerge(0x000000ff)) + v.AddArg(x) + return true + } + // match: (RISBGZ (SLDconst x [c]) {r}) + // cond: r.InMerge(^uint64(0)<>c) != nil + // result: (RISBGZ x {(*r.InMerge(^uint64(0)>>c)).RotateLeft(-c)}) + for { + r := auxToS390xRotateParams(v.Aux) + if v_0.Op != OpS390XSRDconst { + break + } + c := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + if !(r.InMerge(^uint64(0)>>c) != nil) { + break + } + v.reset(OpS390XRISBGZ) + v.Aux = s390xRotateParamsToAux((*r.InMerge(^uint64(0) >> c)).RotateLeft(-c)) + v.AddArg(x) + return true + } + // match: (RISBGZ (RISBGZ x {y}) {z}) + // cond: z.InMerge(y.OutMask()) != nil + // result: (RISBGZ x {(*z.InMerge(y.OutMask())).RotateLeft(y.Amount)}) + for { + z := auxToS390xRotateParams(v.Aux) + if v_0.Op != OpS390XRISBGZ { + break + } + y := auxToS390xRotateParams(v_0.Aux) + x := v_0.Args[0] + if !(z.InMerge(y.OutMask()) != nil) { + break + } + v.reset(OpS390XRISBGZ) + v.Aux = s390xRotateParamsToAux((*z.InMerge(y.OutMask())).RotateLeft(y.Amount)) + v.AddArg(x) + return true + } + // match: (RISBGZ x {r}) + // cond: r.End == 63 && r.Start == -r.Amount&63 + // result: (SRDconst x [-r.Amount&63]) + for { + r := auxToS390xRotateParams(v.Aux) + x := v_0 + if !(r.End == 63 && r.Start == -r.Amount&63) { + break + } + v.reset(OpS390XSRDconst) + v.AuxInt = uint8ToAuxInt(-r.Amount & 63) + v.AddArg(x) + return true + } + // match: (RISBGZ x {r}) + // cond: r.Start == 0 && r.End == 63-r.Amount + // result: (SLDconst x [r.Amount]) + for { + r := auxToS390xRotateParams(v.Aux) + x := v_0 + if !(r.Start == 0 && r.End == 63-r.Amount) { + break + } + v.reset(OpS390XSLDconst) + v.AuxInt = uint8ToAuxInt(r.Amount) + v.AddArg(x) + return true + } + // match: (RISBGZ (SRADconst x [c]) {r}) + // cond: r.Start == r.End && (r.Start+r.Amount)&63 <= c + // result: (RISBGZ x {s390x.NewRotateParams(r.Start, r.Start, -r.Start&63)}) + for { + r := auxToS390xRotateParams(v.Aux) + if v_0.Op != OpS390XSRADconst { + break + } + c := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + if !(r.Start == r.End && (r.Start+r.Amount)&63 <= c) { + break + } + v.reset(OpS390XRISBGZ) + v.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(r.Start, r.Start, -r.Start&63)) + v.AddArg(x) + return true + } + // match: (RISBGZ x {r}) + // cond: r == s390x.NewRotateParams(56, 63, 0) + // result: (MOVBZreg x) + for { + r := auxToS390xRotateParams(v.Aux) + x := v_0 + if !(r == s390x.NewRotateParams(56, 63, 0)) { + break + } + v.reset(OpS390XMOVBZreg) + v.AddArg(x) + return true + } + // match: (RISBGZ x {r}) + // cond: r == s390x.NewRotateParams(48, 63, 0) + // result: (MOVHZreg x) + for { + r := auxToS390xRotateParams(v.Aux) + x := v_0 + if !(r == s390x.NewRotateParams(48, 63, 0)) { + break + } + v.reset(OpS390XMOVHZreg) + v.AddArg(x) + return true + } + // match: (RISBGZ x {r}) + // cond: r == s390x.NewRotateParams(32, 63, 0) + // result: (MOVWZreg x) + for { + r := auxToS390xRotateParams(v.Aux) + x := v_0 + if !(r == s390x.NewRotateParams(32, 63, 0)) { + break + } + v.reset(OpS390XMOVWZreg) + v.AddArg(x) + return true + } + // match: (RISBGZ (LGDR x) {r}) + // cond: r == s390x.NewRotateParams(1, 63, 0) + // result: (LGDR (LPDFR x)) + for { + r := auxToS390xRotateParams(v.Aux) + if v_0.Op != OpS390XLGDR { + break + } + t := v_0.Type + x := v_0.Args[0] + if !(r == s390x.NewRotateParams(1, 63, 0)) { + break + } + v.reset(OpS390XLGDR) + v.Type = t + v0 := b.NewValue0(v.Pos, OpS390XLPDFR, x.Type) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueS390X_OpS390XRLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (RLL x (MOVDconst [c])) + // result: (RLLconst x [uint8(c&31)]) + for { + x := v_0 + if v_1.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpS390XRLLconst) + v.AuxInt = uint8ToAuxInt(uint8(c & 31)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XRLLG(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (RLLG x (MOVDconst [c])) + // result: (RISBGZ x {s390x.NewRotateParams(0, 63, uint8(c&63))}) + for { + x := v_0 + if v_1.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpS390XRISBGZ) + v.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(0, 63, uint8(c&63))) + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XSLD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SLD x (MOVDconst [c])) + // result: (SLDconst x [uint8(c&63)]) + for { + x := v_0 + if v_1.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpS390XSLDconst) + v.AuxInt = uint8ToAuxInt(uint8(c & 63)) + v.AddArg(x) + return true + } + // match: (SLD x (RISBGZ y {r})) + // cond: r.Amount == 0 && r.OutMask()&63 == 63 + // result: (SLD x y) + for { + x := v_0 + if v_1.Op != OpS390XRISBGZ { + break + } + r := auxToS390xRotateParams(v_1.Aux) + y := v_1.Args[0] + if !(r.Amount == 0 && r.OutMask()&63 == 63) { + break + } + v.reset(OpS390XSLD) + v.AddArg2(x, y) + return true + } + // match: (SLD x (AND (MOVDconst [c]) y)) + // result: (SLD x (ANDWconst [int32(c&63)] y)) + for { + x := v_0 + if v_1.Op != OpS390XAND { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + if v_1_0.Op != OpS390XMOVDconst { + continue + } + c := auxIntToInt64(v_1_0.AuxInt) + y := v_1_1 + v.reset(OpS390XSLD) + v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(int32(c & 63)) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (SLD x (ANDWconst [c] y)) + // cond: c&63 == 63 + // result: (SLD x y) + for { + x := v_0 + if v_1.Op != OpS390XANDWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&63 == 63) { + break + } + v.reset(OpS390XSLD) + v.AddArg2(x, y) + return true + } + // match: (SLD x (MOVWreg y)) + // result: (SLD x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVWreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSLD) + v.AddArg2(x, y) + return true + } + // match: (SLD x (MOVHreg y)) + // result: (SLD x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVHreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSLD) + v.AddArg2(x, y) + return true + } + // match: (SLD x (MOVBreg y)) + // result: (SLD x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVBreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSLD) + v.AddArg2(x, y) + return true + } + // match: (SLD x (MOVWZreg y)) + // result: (SLD x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVWZreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSLD) + v.AddArg2(x, y) + return true + } + // match: (SLD x (MOVHZreg y)) + // result: (SLD x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVHZreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSLD) + v.AddArg2(x, y) + return true + } + // match: (SLD x (MOVBZreg y)) + // result: (SLD x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVBZreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSLD) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueS390X_OpS390XSLDconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SLDconst (SRDconst x [c]) [d]) + // result: (RISBGZ x {s390x.NewRotateParams(uint8(max8(0, int8(c-d))), 63-d, uint8(int8(d-c)&63))}) + for { + d := auxIntToUint8(v.AuxInt) + if v_0.Op != OpS390XSRDconst { + break + } + c := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpS390XRISBGZ) + v.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(uint8(max8(0, int8(c-d))), 63-d, uint8(int8(d-c)&63))) + v.AddArg(x) + return true + } + // match: (SLDconst (RISBGZ x {r}) [c]) + // cond: s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask()) != nil + // result: (RISBGZ x {(*s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask())).RotateLeft(r.Amount)}) + for { + c := auxIntToUint8(v.AuxInt) + if v_0.Op != OpS390XRISBGZ { + break + } + r := auxToS390xRotateParams(v_0.Aux) + x := v_0.Args[0] + if !(s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask()) != nil) { + break + } + v.reset(OpS390XRISBGZ) + v.Aux = s390xRotateParamsToAux((*s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask())).RotateLeft(r.Amount)) + v.AddArg(x) + return true + } + // match: (SLDconst x [0]) + // result: x + for { + if auxIntToUint8(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XSLW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SLW x (MOVDconst [c])) + // cond: c&32 == 0 + // result: (SLWconst x [uint8(c&31)]) + for { + x := v_0 + if v_1.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(c&32 == 0) { + break + } + v.reset(OpS390XSLWconst) + v.AuxInt = uint8ToAuxInt(uint8(c & 31)) + v.AddArg(x) + return true + } + // match: (SLW _ (MOVDconst [c])) + // cond: c&32 != 0 + // result: (MOVDconst [0]) + for { + if v_1.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(c&32 != 0) { + break + } + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SLW x (RISBGZ y {r})) + // cond: r.Amount == 0 && r.OutMask()&63 == 63 + // result: (SLW x y) + for { + x := v_0 + if v_1.Op != OpS390XRISBGZ { + break + } + r := auxToS390xRotateParams(v_1.Aux) + y := v_1.Args[0] + if !(r.Amount == 0 && r.OutMask()&63 == 63) { + break + } + v.reset(OpS390XSLW) + v.AddArg2(x, y) + return true + } + // match: (SLW x (AND (MOVDconst [c]) y)) + // result: (SLW x (ANDWconst [int32(c&63)] y)) + for { + x := v_0 + if v_1.Op != OpS390XAND { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + if v_1_0.Op != OpS390XMOVDconst { + continue + } + c := auxIntToInt64(v_1_0.AuxInt) + y := v_1_1 + v.reset(OpS390XSLW) + v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(int32(c & 63)) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (SLW x (ANDWconst [c] y)) + // cond: c&63 == 63 + // result: (SLW x y) + for { + x := v_0 + if v_1.Op != OpS390XANDWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&63 == 63) { + break + } + v.reset(OpS390XSLW) + v.AddArg2(x, y) + return true + } + // match: (SLW x (MOVWreg y)) + // result: (SLW x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVWreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSLW) + v.AddArg2(x, y) + return true + } + // match: (SLW x (MOVHreg y)) + // result: (SLW x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVHreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSLW) + v.AddArg2(x, y) + return true + } + // match: (SLW x (MOVBreg y)) + // result: (SLW x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVBreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSLW) + v.AddArg2(x, y) + return true + } + // match: (SLW x (MOVWZreg y)) + // result: (SLW x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVWZreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSLW) + v.AddArg2(x, y) + return true + } + // match: (SLW x (MOVHZreg y)) + // result: (SLW x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVHZreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSLW) + v.AddArg2(x, y) + return true + } + // match: (SLW x (MOVBZreg y)) + // result: (SLW x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVBZreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSLW) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueS390X_OpS390XSLWconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SLWconst x [0]) + // result: x + for { + if auxIntToUint8(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XSRAD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SRAD x (MOVDconst [c])) + // result: (SRADconst x [uint8(c&63)]) + for { + x := v_0 + if v_1.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpS390XSRADconst) + v.AuxInt = uint8ToAuxInt(uint8(c & 63)) + v.AddArg(x) + return true + } + // match: (SRAD x (RISBGZ y {r})) + // cond: r.Amount == 0 && r.OutMask()&63 == 63 + // result: (SRAD x y) + for { + x := v_0 + if v_1.Op != OpS390XRISBGZ { + break + } + r := auxToS390xRotateParams(v_1.Aux) + y := v_1.Args[0] + if !(r.Amount == 0 && r.OutMask()&63 == 63) { + break + } + v.reset(OpS390XSRAD) + v.AddArg2(x, y) + return true + } + // match: (SRAD x (AND (MOVDconst [c]) y)) + // result: (SRAD x (ANDWconst [int32(c&63)] y)) + for { + x := v_0 + if v_1.Op != OpS390XAND { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + if v_1_0.Op != OpS390XMOVDconst { + continue + } + c := auxIntToInt64(v_1_0.AuxInt) + y := v_1_1 + v.reset(OpS390XSRAD) + v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(int32(c & 63)) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (SRAD x (ANDWconst [c] y)) + // cond: c&63 == 63 + // result: (SRAD x y) + for { + x := v_0 + if v_1.Op != OpS390XANDWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&63 == 63) { + break + } + v.reset(OpS390XSRAD) + v.AddArg2(x, y) + return true + } + // match: (SRAD x (MOVWreg y)) + // result: (SRAD x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVWreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSRAD) + v.AddArg2(x, y) + return true + } + // match: (SRAD x (MOVHreg y)) + // result: (SRAD x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVHreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSRAD) + v.AddArg2(x, y) + return true + } + // match: (SRAD x (MOVBreg y)) + // result: (SRAD x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVBreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSRAD) + v.AddArg2(x, y) + return true + } + // match: (SRAD x (MOVWZreg y)) + // result: (SRAD x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVWZreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSRAD) + v.AddArg2(x, y) + return true + } + // match: (SRAD x (MOVHZreg y)) + // result: (SRAD x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVHZreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSRAD) + v.AddArg2(x, y) + return true + } + // match: (SRAD x (MOVBZreg y)) + // result: (SRAD x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVBZreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSRAD) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueS390X_OpS390XSRADconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SRADconst x [0]) + // result: x + for { + if auxIntToUint8(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (SRADconst [c] (MOVDconst [d])) + // result: (MOVDconst [d>>uint64(c)]) + for { + c := auxIntToUint8(v.AuxInt) + if v_0.Op != OpS390XMOVDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(d >> uint64(c)) + return true + } + return false +} +func rewriteValueS390X_OpS390XSRAW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SRAW x (MOVDconst [c])) + // cond: c&32 == 0 + // result: (SRAWconst x [uint8(c&31)]) + for { + x := v_0 + if v_1.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(c&32 == 0) { + break + } + v.reset(OpS390XSRAWconst) + v.AuxInt = uint8ToAuxInt(uint8(c & 31)) + v.AddArg(x) + return true + } + // match: (SRAW x (MOVDconst [c])) + // cond: c&32 != 0 + // result: (SRAWconst x [31]) + for { + x := v_0 + if v_1.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(c&32 != 0) { + break + } + v.reset(OpS390XSRAWconst) + v.AuxInt = uint8ToAuxInt(31) + v.AddArg(x) + return true + } + // match: (SRAW x (RISBGZ y {r})) + // cond: r.Amount == 0 && r.OutMask()&63 == 63 + // result: (SRAW x y) + for { + x := v_0 + if v_1.Op != OpS390XRISBGZ { + break + } + r := auxToS390xRotateParams(v_1.Aux) + y := v_1.Args[0] + if !(r.Amount == 0 && r.OutMask()&63 == 63) { + break + } + v.reset(OpS390XSRAW) + v.AddArg2(x, y) + return true + } + // match: (SRAW x (AND (MOVDconst [c]) y)) + // result: (SRAW x (ANDWconst [int32(c&63)] y)) + for { + x := v_0 + if v_1.Op != OpS390XAND { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + if v_1_0.Op != OpS390XMOVDconst { + continue + } + c := auxIntToInt64(v_1_0.AuxInt) + y := v_1_1 + v.reset(OpS390XSRAW) + v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(int32(c & 63)) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (SRAW x (ANDWconst [c] y)) + // cond: c&63 == 63 + // result: (SRAW x y) + for { + x := v_0 + if v_1.Op != OpS390XANDWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&63 == 63) { + break + } + v.reset(OpS390XSRAW) + v.AddArg2(x, y) + return true + } + // match: (SRAW x (MOVWreg y)) + // result: (SRAW x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVWreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSRAW) + v.AddArg2(x, y) + return true + } + // match: (SRAW x (MOVHreg y)) + // result: (SRAW x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVHreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSRAW) + v.AddArg2(x, y) + return true + } + // match: (SRAW x (MOVBreg y)) + // result: (SRAW x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVBreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSRAW) + v.AddArg2(x, y) + return true + } + // match: (SRAW x (MOVWZreg y)) + // result: (SRAW x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVWZreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSRAW) + v.AddArg2(x, y) + return true + } + // match: (SRAW x (MOVHZreg y)) + // result: (SRAW x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVHZreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSRAW) + v.AddArg2(x, y) + return true + } + // match: (SRAW x (MOVBZreg y)) + // result: (SRAW x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVBZreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSRAW) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueS390X_OpS390XSRAWconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SRAWconst x [0]) + // result: x + for { + if auxIntToUint8(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (SRAWconst [c] (MOVDconst [d])) + // result: (MOVDconst [int64(int32(d))>>uint64(c)]) + for { + c := auxIntToUint8(v.AuxInt) + if v_0.Op != OpS390XMOVDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(int64(int32(d)) >> uint64(c)) + return true + } + return false +} +func rewriteValueS390X_OpS390XSRD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SRD x (MOVDconst [c])) + // result: (SRDconst x [uint8(c&63)]) + for { + x := v_0 + if v_1.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpS390XSRDconst) + v.AuxInt = uint8ToAuxInt(uint8(c & 63)) + v.AddArg(x) + return true + } + // match: (SRD x (RISBGZ y {r})) + // cond: r.Amount == 0 && r.OutMask()&63 == 63 + // result: (SRD x y) + for { + x := v_0 + if v_1.Op != OpS390XRISBGZ { + break + } + r := auxToS390xRotateParams(v_1.Aux) + y := v_1.Args[0] + if !(r.Amount == 0 && r.OutMask()&63 == 63) { + break + } + v.reset(OpS390XSRD) + v.AddArg2(x, y) + return true + } + // match: (SRD x (AND (MOVDconst [c]) y)) + // result: (SRD x (ANDWconst [int32(c&63)] y)) + for { + x := v_0 + if v_1.Op != OpS390XAND { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + if v_1_0.Op != OpS390XMOVDconst { + continue + } + c := auxIntToInt64(v_1_0.AuxInt) + y := v_1_1 + v.reset(OpS390XSRD) + v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(int32(c & 63)) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (SRD x (ANDWconst [c] y)) + // cond: c&63 == 63 + // result: (SRD x y) + for { + x := v_0 + if v_1.Op != OpS390XANDWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&63 == 63) { + break + } + v.reset(OpS390XSRD) + v.AddArg2(x, y) + return true + } + // match: (SRD x (MOVWreg y)) + // result: (SRD x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVWreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSRD) + v.AddArg2(x, y) + return true + } + // match: (SRD x (MOVHreg y)) + // result: (SRD x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVHreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSRD) + v.AddArg2(x, y) + return true + } + // match: (SRD x (MOVBreg y)) + // result: (SRD x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVBreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSRD) + v.AddArg2(x, y) + return true + } + // match: (SRD x (MOVWZreg y)) + // result: (SRD x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVWZreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSRD) + v.AddArg2(x, y) + return true + } + // match: (SRD x (MOVHZreg y)) + // result: (SRD x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVHZreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSRD) + v.AddArg2(x, y) + return true + } + // match: (SRD x (MOVBZreg y)) + // result: (SRD x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVBZreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSRD) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueS390X_OpS390XSRDconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SRDconst (SLDconst x [c]) [d]) + // result: (RISBGZ x {s390x.NewRotateParams(d, uint8(min8(63, int8(63-c+d))), uint8(int8(c-d)&63))}) + for { + d := auxIntToUint8(v.AuxInt) + if v_0.Op != OpS390XSLDconst { + break + } + c := auxIntToUint8(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpS390XRISBGZ) + v.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(d, uint8(min8(63, int8(63-c+d))), uint8(int8(c-d)&63))) + v.AddArg(x) + return true + } + // match: (SRDconst (RISBGZ x {r}) [c]) + // cond: s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask()) != nil + // result: (RISBGZ x {(*s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask())).RotateLeft(r.Amount)}) + for { + c := auxIntToUint8(v.AuxInt) + if v_0.Op != OpS390XRISBGZ { + break + } + r := auxToS390xRotateParams(v_0.Aux) + x := v_0.Args[0] + if !(s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask()) != nil) { + break + } + v.reset(OpS390XRISBGZ) + v.Aux = s390xRotateParamsToAux((*s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask())).RotateLeft(r.Amount)) + v.AddArg(x) + return true + } + // match: (SRDconst x [0]) + // result: x + for { + if auxIntToUint8(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XSRW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SRW x (MOVDconst [c])) + // cond: c&32 == 0 + // result: (SRWconst x [uint8(c&31)]) + for { + x := v_0 + if v_1.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(c&32 == 0) { + break + } + v.reset(OpS390XSRWconst) + v.AuxInt = uint8ToAuxInt(uint8(c & 31)) + v.AddArg(x) + return true + } + // match: (SRW _ (MOVDconst [c])) + // cond: c&32 != 0 + // result: (MOVDconst [0]) + for { + if v_1.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(c&32 != 0) { + break + } + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SRW x (RISBGZ y {r})) + // cond: r.Amount == 0 && r.OutMask()&63 == 63 + // result: (SRW x y) + for { + x := v_0 + if v_1.Op != OpS390XRISBGZ { + break + } + r := auxToS390xRotateParams(v_1.Aux) + y := v_1.Args[0] + if !(r.Amount == 0 && r.OutMask()&63 == 63) { + break + } + v.reset(OpS390XSRW) + v.AddArg2(x, y) + return true + } + // match: (SRW x (AND (MOVDconst [c]) y)) + // result: (SRW x (ANDWconst [int32(c&63)] y)) + for { + x := v_0 + if v_1.Op != OpS390XAND { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + if v_1_0.Op != OpS390XMOVDconst { + continue + } + c := auxIntToInt64(v_1_0.AuxInt) + y := v_1_1 + v.reset(OpS390XSRW) + v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(int32(c & 63)) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (SRW x (ANDWconst [c] y)) + // cond: c&63 == 63 + // result: (SRW x y) + for { + x := v_0 + if v_1.Op != OpS390XANDWconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + y := v_1.Args[0] + if !(c&63 == 63) { + break + } + v.reset(OpS390XSRW) + v.AddArg2(x, y) + return true + } + // match: (SRW x (MOVWreg y)) + // result: (SRW x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVWreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSRW) + v.AddArg2(x, y) + return true + } + // match: (SRW x (MOVHreg y)) + // result: (SRW x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVHreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSRW) + v.AddArg2(x, y) + return true + } + // match: (SRW x (MOVBreg y)) + // result: (SRW x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVBreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSRW) + v.AddArg2(x, y) + return true + } + // match: (SRW x (MOVWZreg y)) + // result: (SRW x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVWZreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSRW) + v.AddArg2(x, y) + return true + } + // match: (SRW x (MOVHZreg y)) + // result: (SRW x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVHZreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSRW) + v.AddArg2(x, y) + return true + } + // match: (SRW x (MOVBZreg y)) + // result: (SRW x y) + for { + x := v_0 + if v_1.Op != OpS390XMOVBZreg { + break + } + y := v_1.Args[0] + v.reset(OpS390XSRW) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueS390X_OpS390XSRWconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SRWconst x [0]) + // result: x + for { + if auxIntToUint8(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XSTM2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (STM2 [i] {s} p w2 w3 x:(STM2 [i-8] {s} p w0 w1 mem)) + // cond: x.Uses == 1 && is20Bit(int64(i)-8) && setPos(v, x.Pos) && clobber(x) + // result: (STM4 [i-8] {s} p w0 w1 w2 w3 mem) + for { + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + p := v_0 + w2 := v_1 + w3 := v_2 + x := v_3 + if x.Op != OpS390XSTM2 || auxIntToInt32(x.AuxInt) != i-8 || auxToSym(x.Aux) != s { + break + } + mem := x.Args[3] + if p != x.Args[0] { + break + } + w0 := x.Args[1] + w1 := x.Args[2] + if !(x.Uses == 1 && is20Bit(int64(i)-8) && setPos(v, x.Pos) && clobber(x)) { + break + } + v.reset(OpS390XSTM4) + v.AuxInt = int32ToAuxInt(i - 8) + v.Aux = symToAux(s) + v.AddArg6(p, w0, w1, w2, w3, mem) + return true + } + // match: (STM2 [i] {s} p (SRDconst [32] x) x mem) + // result: (MOVDstore [i] {s} p x mem) + for { + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + p := v_0 + if v_1.Op != OpS390XSRDconst || auxIntToUint8(v_1.AuxInt) != 32 { + break + } + x := v_1.Args[0] + if x != v_2 { + break + } + mem := v_3 + v.reset(OpS390XMOVDstore) + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) + v.AddArg3(p, x, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XSTMG2(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (STMG2 [i] {s} p w2 w3 x:(STMG2 [i-16] {s} p w0 w1 mem)) + // cond: x.Uses == 1 && is20Bit(int64(i)-16) && setPos(v, x.Pos) && clobber(x) + // result: (STMG4 [i-16] {s} p w0 w1 w2 w3 mem) + for { + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) + p := v_0 + w2 := v_1 + w3 := v_2 + x := v_3 + if x.Op != OpS390XSTMG2 || auxIntToInt32(x.AuxInt) != i-16 || auxToSym(x.Aux) != s { + break + } + mem := x.Args[3] + if p != x.Args[0] { + break + } + w0 := x.Args[1] + w1 := x.Args[2] + if !(x.Uses == 1 && is20Bit(int64(i)-16) && setPos(v, x.Pos) && clobber(x)) { + break + } + v.reset(OpS390XSTMG4) + v.AuxInt = int32ToAuxInt(i - 16) + v.Aux = symToAux(s) + v.AddArg6(p, w0, w1, w2, w3, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XSUB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SUB x (MOVDconst [c])) + // cond: is32Bit(c) + // result: (SUBconst x [int32(c)]) + for { + x := v_0 + if v_1.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c)) { + break + } + v.reset(OpS390XSUBconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + // match: (SUB (MOVDconst [c]) x) + // cond: is32Bit(c) + // result: (NEG (SUBconst x [int32(c)])) + for { + if v_0.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + if !(is32Bit(c)) { + break + } + v.reset(OpS390XNEG) + v0 := b.NewValue0(v.Pos, OpS390XSUBconst, v.Type) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (SUB x x) + // result: (MOVDconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SUB x g:(MOVDload [off] {sym} ptr mem)) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) + // result: (SUBload [off] {sym} x ptr mem) + for { + t := v.Type + x := v_0 + g := v_1 + if g.Op != OpS390XMOVDload { + break + } + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { + break + } + v.reset(OpS390XSUBload) + v.Type = t + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XSUBE(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SUBE x y (FlagGT)) + // result: (SUBC x y) + for { + x := v_0 + y := v_1 + if v_2.Op != OpS390XFlagGT { + break + } + v.reset(OpS390XSUBC) + v.AddArg2(x, y) + return true + } + // match: (SUBE x y (FlagOV)) + // result: (SUBC x y) + for { + x := v_0 + y := v_1 + if v_2.Op != OpS390XFlagOV { + break + } + v.reset(OpS390XSUBC) + v.AddArg2(x, y) + return true + } + // match: (SUBE x y (Select1 (SUBC (MOVDconst [0]) (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) c)))))) + // result: (SUBE x y c) + for { + x := v_0 + y := v_1 + if v_2.Op != OpSelect1 { + break + } + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpS390XSUBC { + break + } + _ = v_2_0.Args[1] + v_2_0_0 := v_2_0.Args[0] + if v_2_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_2_0_0.AuxInt) != 0 { + break + } + v_2_0_1 := v_2_0.Args[1] + if v_2_0_1.Op != OpS390XNEG { + break + } + v_2_0_1_0 := v_2_0_1.Args[0] + if v_2_0_1_0.Op != OpSelect0 { + break + } + v_2_0_1_0_0 := v_2_0_1_0.Args[0] + if v_2_0_1_0_0.Op != OpS390XSUBE { + break + } + c := v_2_0_1_0_0.Args[2] + v_2_0_1_0_0_0 := v_2_0_1_0_0.Args[0] + if v_2_0_1_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_2_0_1_0_0_0.AuxInt) != 0 { + break + } + v_2_0_1_0_0_1 := v_2_0_1_0_0.Args[1] + if v_2_0_1_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_2_0_1_0_0_1.AuxInt) != 0 { + break + } + v.reset(OpS390XSUBE) + v.AddArg3(x, y, c) + return true + } + return false +} +func rewriteValueS390X_OpS390XSUBW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SUBW x (MOVDconst [c])) + // result: (SUBWconst x [int32(c)]) + for { + x := v_0 + if v_1.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpS390XSUBWconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + // match: (SUBW (MOVDconst [c]) x) + // result: (NEGW (SUBWconst x [int32(c)])) + for { + if v_0.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + v.reset(OpS390XNEGW) + v0 := b.NewValue0(v.Pos, OpS390XSUBWconst, v.Type) + v0.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (SUBW x x) + // result: (MOVDconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SUBW x g:(MOVWload [off] {sym} ptr mem)) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) + // result: (SUBWload [off] {sym} x ptr mem) + for { + t := v.Type + x := v_0 + g := v_1 + if g.Op != OpS390XMOVWload { + break + } + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { + break + } + v.reset(OpS390XSUBWload) + v.Type = t + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + // match: (SUBW x g:(MOVWZload [off] {sym} ptr mem)) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) + // result: (SUBWload [off] {sym} x ptr mem) + for { + t := v.Type + x := v_0 + g := v_1 + if g.Op != OpS390XMOVWZload { + break + } + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { + break + } + v.reset(OpS390XSUBWload) + v.Type = t + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XSUBWconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SUBWconst [c] x) + // cond: int32(c) == 0 + // result: x + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(int32(c) == 0) { + break + } + v.copyOf(x) + return true + } + // match: (SUBWconst [c] x) + // result: (ADDWconst [-int32(c)] x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + v.reset(OpS390XADDWconst) + v.AuxInt = int32ToAuxInt(-int32(c)) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpS390XSUBWload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SUBWload [off1] {sym} x (ADDconst [off2] ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) + // result: (SUBWload [off1+off2] {sym} x ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpS390XADDconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + ptr := v_1.Args[0] + mem := v_2 + if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpS390XSUBWload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + // match: (SUBWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) + // result: (SUBWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + for { + o1 := auxIntToInt32(v.AuxInt) + s1 := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpS390XMOVDaddr { + break + } + o2 := auxIntToInt32(v_1.AuxInt) + s2 := auxToSym(v_1.Aux) + ptr := v_1.Args[0] + mem := v_2 + if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) { + break + } + v.reset(OpS390XSUBWload) + v.AuxInt = int32ToAuxInt(o1 + o2) + v.Aux = symToAux(mergeSym(s1, s2)) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XSUBconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SUBconst [0] x) + // result: x + for { + if auxIntToInt32(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (SUBconst [c] x) + // cond: c != -(1<<31) + // result: (ADDconst [-c] x) + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(c != -(1 << 31)) { + break + } + v.reset(OpS390XADDconst) + v.AuxInt = int32ToAuxInt(-c) + v.AddArg(x) + return true + } + // match: (SUBconst (MOVDconst [d]) [c]) + // result: (MOVDconst [d-int64(c)]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(d - int64(c)) + return true + } + // match: (SUBconst (SUBconst x [d]) [c]) + // cond: is32Bit(-int64(c)-int64(d)) + // result: (ADDconst [-c-d] x) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XSUBconst { + break + } + d := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(-int64(c) - int64(d))) { + break + } + v.reset(OpS390XADDconst) + v.AuxInt = int32ToAuxInt(-c - d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XSUBload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SUBload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) + // cond: isSamePtr(ptr1, ptr2) + // result: (SUB x (LGDR y)) + for { + t := v.Type + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + ptr1 := v_1 + if v_2.Op != OpS390XFMOVDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { + break + } + y := v_2.Args[1] + ptr2 := v_2.Args[0] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.reset(OpS390XSUB) + v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (SUBload [off1] {sym} x (ADDconst [off2] ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) + // result: (SUBload [off1+off2] {sym} x ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpS390XADDconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + ptr := v_1.Args[0] + mem := v_2 + if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpS390XSUBload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + // match: (SUBload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) + // result: (SUBload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + for { + o1 := auxIntToInt32(v.AuxInt) + s1 := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpS390XMOVDaddr { + break + } + o2 := auxIntToInt32(v_1.AuxInt) + s2 := auxToSym(v_1.Aux) + ptr := v_1.Args[0] + mem := v_2 + if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) { + break + } + v.reset(OpS390XSUBload) + v.AuxInt = int32ToAuxInt(o1 + o2) + v.Aux = symToAux(mergeSym(s1, s2)) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XSumBytes2(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SumBytes2 x) + // result: (ADDW (SRWconst x [8]) x) + for { + x := v_0 + v.reset(OpS390XADDW) + v0 := b.NewValue0(v.Pos, OpS390XSRWconst, typ.UInt8) + v0.AuxInt = uint8ToAuxInt(8) + v0.AddArg(x) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueS390X_OpS390XSumBytes4(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SumBytes4 x) + // result: (SumBytes2 (ADDW (SRWconst x [16]) x)) + for { + x := v_0 + v.reset(OpS390XSumBytes2) + v0 := b.NewValue0(v.Pos, OpS390XADDW, typ.UInt16) + v1 := b.NewValue0(v.Pos, OpS390XSRWconst, typ.UInt16) + v1.AuxInt = uint8ToAuxInt(16) + v1.AddArg(x) + v0.AddArg2(v1, x) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpS390XSumBytes8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SumBytes8 x) + // result: (SumBytes4 (ADDW (SRDconst x [32]) x)) + for { + x := v_0 + v.reset(OpS390XSumBytes4) + v0 := b.NewValue0(v.Pos, OpS390XADDW, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpS390XSRDconst, typ.UInt32) + v1.AuxInt = uint8ToAuxInt(32) + v1.AddArg(x) + v0.AddArg2(v1, x) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpS390XXOR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XOR x (MOVDconst [c])) + // cond: isU32Bit(c) + // result: (XORconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpS390XMOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(isU32Bit(c)) { + continue + } + v.reset(OpS390XXORconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (XOR (MOVDconst [c]) (MOVDconst [d])) + // result: (MOVDconst [c^d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpS390XMOVDconst { + continue + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpS390XMOVDconst { + continue + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(c ^ d) + return true + } + break + } + // match: (XOR x x) + // result: (MOVDconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (XOR x g:(MOVDload [off] {sym} ptr mem)) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) + // result: (XORload [off] {sym} x ptr mem) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 + if g.Op != OpS390XMOVDload { + continue + } + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XXORload) + v.Type = t + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueS390X_OpS390XXORW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XORW x (MOVDconst [c])) + // result: (XORWconst [int32(c)] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpS390XMOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpS390XXORWconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + break + } + // match: (XORW x x) + // result: (MOVDconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (XORW x g:(MOVWload [off] {sym} ptr mem)) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) + // result: (XORWload [off] {sym} x ptr mem) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 + if g.Op != OpS390XMOVWload { + continue + } + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XXORWload) + v.Type = t + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + // match: (XORW x g:(MOVWZload [off] {sym} ptr mem)) + // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g) + // result: (XORWload [off] {sym} x ptr mem) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + g := v_1 + if g.Op != OpS390XMOVWZload { + continue + } + off := auxIntToInt32(g.AuxInt) + sym := auxToSym(g.Aux) + mem := g.Args[1] + ptr := g.Args[0] + if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) { + continue + } + v.reset(OpS390XXORWload) + v.Type = t + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + break + } + return false +} +func rewriteValueS390X_OpS390XXORWconst(v *Value) bool { + v_0 := v.Args[0] + // match: (XORWconst [c] x) + // cond: int32(c)==0 + // result: x + for { + c := auxIntToInt32(v.AuxInt) + x := v_0 + if !(int32(c) == 0) { + break + } + v.copyOf(x) + return true + } + // match: (XORWconst [c] (MOVDconst [d])) + // result: (MOVDconst [int64(c)^d]) + for { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpS390XMOVDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(int64(c) ^ d) + return true + } + return false +} +func rewriteValueS390X_OpS390XXORWload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XORWload [off1] {sym} x (ADDconst [off2] ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) + // result: (XORWload [off1+off2] {sym} x ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpS390XADDconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + ptr := v_1.Args[0] + mem := v_2 + if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpS390XXORWload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + // match: (XORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) + // result: (XORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + for { + o1 := auxIntToInt32(v.AuxInt) + s1 := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpS390XMOVDaddr { + break + } + o2 := auxIntToInt32(v_1.AuxInt) + s2 := auxToSym(v_1.Aux) + ptr := v_1.Args[0] + mem := v_2 + if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) { + break + } + v.reset(OpS390XXORWload) + v.AuxInt = int32ToAuxInt(o1 + o2) + v.Aux = symToAux(mergeSym(s1, s2)) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XXORconst(v *Value) bool { + v_0 := v.Args[0] + // match: (XORconst [0] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (XORconst [c] (MOVDconst [d])) + // result: (MOVDconst [c^d]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpS390XMOVDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(c ^ d) + return true + } + return false +} +func rewriteValueS390X_OpS390XXORload(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (XORload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) + // cond: isSamePtr(ptr1, ptr2) + // result: (XOR x (LGDR y)) + for { + t := v.Type + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + ptr1 := v_1 + if v_2.Op != OpS390XFMOVDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { + break + } + y := v_2.Args[1] + ptr2 := v_2.Args[0] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.reset(OpS390XXOR) + v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (XORload [off1] {sym} x (ADDconst [off2] ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) + // result: (XORload [off1+off2] {sym} x ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpS390XADDconst { + break + } + off2 := auxIntToInt32(v_1.AuxInt) + ptr := v_1.Args[0] + mem := v_2 + if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpS390XXORload) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) + v.AddArg3(x, ptr, mem) + return true + } + // match: (XORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) + // result: (XORload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + for { + o1 := auxIntToInt32(v.AuxInt) + s1 := auxToSym(v.Aux) + x := v_0 + if v_1.Op != OpS390XMOVDaddr { + break + } + o2 := auxIntToInt32(v_1.AuxInt) + s2 := auxToSym(v_1.Aux) + ptr := v_1.Args[0] + mem := v_2 + if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) { + break + } + v.reset(OpS390XXORload) + v.AuxInt = int32ToAuxInt(o1 + o2) + v.Aux = symToAux(mergeSym(s1, s2)) + v.AddArg3(x, ptr, mem) + return true + } + return false +} +func rewriteValueS390X_OpSelect0(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Select0 (Add64carry x y c)) + // result: (Select0 (ADDE x y (Select1 (ADDCconst c [-1])))) + for { + if v_0.Op != OpAdd64carry { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpSelect0) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpS390XADDE, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpS390XADDCconst, types.NewTuple(typ.UInt64, types.TypeFlags)) + v2.AuxInt = int16ToAuxInt(-1) + v2.AddArg(c) + v1.AddArg(v2) + v0.AddArg3(x, y, v1) + v.AddArg(v0) + return true + } + // match: (Select0 (Sub64borrow x y c)) + // result: (Select0 (SUBE x y (Select1 (SUBC (MOVDconst [0]) c)))) + for { + if v_0.Op != OpSub64borrow { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpSelect0) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpS390XSUBE, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpS390XSUBC, types.NewTuple(typ.UInt64, types.TypeFlags)) + v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(0) + v2.AddArg2(v3, c) + v1.AddArg(v2) + v0.AddArg3(x, y, v1) + v.AddArg(v0) + return true + } + // match: (Select0 (AddTupleFirst32 val tuple)) + // result: (ADDW val (Select0 tuple)) + for { + t := v.Type + if v_0.Op != OpS390XAddTupleFirst32 { + break + } + tuple := v_0.Args[1] + val := v_0.Args[0] + v.reset(OpS390XADDW) + v0 := b.NewValue0(v.Pos, OpSelect0, t) + v0.AddArg(tuple) + v.AddArg2(val, v0) + return true + } + // match: (Select0 (AddTupleFirst64 val tuple)) + // result: (ADD val (Select0 tuple)) + for { + t := v.Type + if v_0.Op != OpS390XAddTupleFirst64 { + break + } + tuple := v_0.Args[1] + val := v_0.Args[0] + v.reset(OpS390XADD) + v0 := b.NewValue0(v.Pos, OpSelect0, t) + v0.AddArg(tuple) + v.AddArg2(val, v0) + return true + } + // match: (Select0 (ADDCconst (MOVDconst [c]) [d])) + // result: (MOVDconst [c+int64(d)]) + for { + if v_0.Op != OpS390XADDCconst { + break + } + d := auxIntToInt16(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_0_0.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(c + int64(d)) + return true + } + // match: (Select0 (SUBC (MOVDconst [c]) (MOVDconst [d]))) + // result: (MOVDconst [c-d]) + for { + if v_0.Op != OpS390XSUBC { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_0_0.AuxInt) + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpS390XMOVDconst { + break + } + d := auxIntToInt64(v_0_1.AuxInt) + v.reset(OpS390XMOVDconst) + v.AuxInt = int64ToAuxInt(c - d) + return true + } + // match: (Select0 (FADD (FMUL y z) x)) + // cond: x.Block.Func.useFMA(v) + // result: (FMADD x y z) + for { + if v_0.Op != OpS390XFADD { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpS390XFMUL { + continue + } + z := v_0_0.Args[1] + y := v_0_0.Args[0] + x := v_0_1 + if !(x.Block.Func.useFMA(v)) { + continue + } + v.reset(OpS390XFMADD) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (Select0 (FSUB (FMUL y z) x)) + // cond: x.Block.Func.useFMA(v) + // result: (FMSUB x y z) + for { + if v_0.Op != OpS390XFSUB { + break + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XFMUL { + break + } + z := v_0_0.Args[1] + y := v_0_0.Args[0] + if !(x.Block.Func.useFMA(v)) { + break + } + v.reset(OpS390XFMSUB) + v.AddArg3(x, y, z) + return true + } + // match: (Select0 (FADDS (FMULS y z) x)) + // cond: x.Block.Func.useFMA(v) + // result: (FMADDS x y z) + for { + if v_0.Op != OpS390XFADDS { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpS390XFMULS { + continue + } + z := v_0_0.Args[1] + y := v_0_0.Args[0] + x := v_0_1 + if !(x.Block.Func.useFMA(v)) { + continue + } + v.reset(OpS390XFMADDS) + v.AddArg3(x, y, z) + return true + } + break + } + // match: (Select0 (FSUBS (FMULS y z) x)) + // cond: x.Block.Func.useFMA(v) + // result: (FMSUBS x y z) + for { + if v_0.Op != OpS390XFSUBS { + break + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XFMULS { + break + } + z := v_0_0.Args[1] + y := v_0_0.Args[0] + if !(x.Block.Func.useFMA(v)) { + break + } + v.reset(OpS390XFMSUBS) + v.AddArg3(x, y, z) + return true + } + return false +} +func rewriteValueS390X_OpSelect1(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Select1 (Add64carry x y c)) + // result: (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) (Select1 (ADDE x y (Select1 (ADDCconst c [-1])))))) + for { + if v_0.Op != OpAdd64carry { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpSelect0) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpS390XADDE, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v3 := b.NewValue0(v.Pos, OpS390XADDE, types.NewTuple(typ.UInt64, types.TypeFlags)) + v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v5 := b.NewValue0(v.Pos, OpS390XADDCconst, types.NewTuple(typ.UInt64, types.TypeFlags)) + v5.AuxInt = int16ToAuxInt(-1) + v5.AddArg(c) + v4.AddArg(v5) + v3.AddArg3(x, y, v4) + v2.AddArg(v3) + v0.AddArg3(v1, v1, v2) + v.AddArg(v0) + return true + } + // match: (Select1 (Sub64borrow x y c)) + // result: (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) (Select1 (SUBE x y (Select1 (SUBC (MOVDconst [0]) c))))))) + for { + if v_0.Op != OpSub64borrow { + break + } + c := v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpS390XNEG) + v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpS390XSUBE, types.NewTuple(typ.UInt64, types.TypeFlags)) + v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(0) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4 := b.NewValue0(v.Pos, OpS390XSUBE, types.NewTuple(typ.UInt64, types.TypeFlags)) + v5 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v6 := b.NewValue0(v.Pos, OpS390XSUBC, types.NewTuple(typ.UInt64, types.TypeFlags)) + v6.AddArg2(v2, c) + v5.AddArg(v6) + v4.AddArg3(x, y, v5) + v3.AddArg(v4) + v1.AddArg3(v2, v2, v3) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Select1 (AddTupleFirst32 _ tuple)) + // result: (Select1 tuple) + for { + if v_0.Op != OpS390XAddTupleFirst32 { + break + } + tuple := v_0.Args[1] + v.reset(OpSelect1) + v.AddArg(tuple) + return true + } + // match: (Select1 (AddTupleFirst64 _ tuple)) + // result: (Select1 tuple) + for { + if v_0.Op != OpS390XAddTupleFirst64 { + break + } + tuple := v_0.Args[1] + v.reset(OpSelect1) + v.AddArg(tuple) + return true + } + // match: (Select1 (ADDCconst (MOVDconst [c]) [d])) + // cond: uint64(c+int64(d)) >= uint64(c) && c+int64(d) == 0 + // result: (FlagEQ) + for { + if v_0.Op != OpS390XADDCconst { + break + } + d := auxIntToInt16(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_0_0.AuxInt) + if !(uint64(c+int64(d)) >= uint64(c) && c+int64(d) == 0) { + break + } + v.reset(OpS390XFlagEQ) + return true + } + // match: (Select1 (ADDCconst (MOVDconst [c]) [d])) + // cond: uint64(c+int64(d)) >= uint64(c) && c+int64(d) != 0 + // result: (FlagLT) + for { + if v_0.Op != OpS390XADDCconst { + break + } + d := auxIntToInt16(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_0_0.AuxInt) + if !(uint64(c+int64(d)) >= uint64(c) && c+int64(d) != 0) { + break + } + v.reset(OpS390XFlagLT) + return true + } + // match: (Select1 (SUBC (MOVDconst [c]) (MOVDconst [d]))) + // cond: uint64(d) <= uint64(c) && c-d == 0 + // result: (FlagGT) + for { + if v_0.Op != OpS390XSUBC { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_0_0.AuxInt) + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpS390XMOVDconst { + break + } + d := auxIntToInt64(v_0_1.AuxInt) + if !(uint64(d) <= uint64(c) && c-d == 0) { + break + } + v.reset(OpS390XFlagGT) + return true + } + // match: (Select1 (SUBC (MOVDconst [c]) (MOVDconst [d]))) + // cond: uint64(d) <= uint64(c) && c-d != 0 + // result: (FlagOV) + for { + if v_0.Op != OpS390XSUBC { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XMOVDconst { + break + } + c := auxIntToInt64(v_0_0.AuxInt) + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpS390XMOVDconst { + break + } + d := auxIntToInt64(v_0_1.AuxInt) + if !(uint64(d) <= uint64(c) && c-d != 0) { + break + } + v.reset(OpS390XFlagOV) + return true + } + return false +} +func rewriteValueS390X_OpSlicemask(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Slicemask x) + // result: (SRADconst (NEG x) [63]) + for { + t := v.Type + x := v_0 + v.reset(OpS390XSRADconst) + v.AuxInt = uint8ToAuxInt(63) + v0 := b.NewValue0(v.Pos, OpS390XNEG, t) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpStore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Store {t} ptr val mem) + // cond: t.Size() == 8 && t.IsFloat() + // result: (FMOVDstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 8 && t.IsFloat()) { + break + } + v.reset(OpS390XFMOVDstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 && t.IsFloat() + // result: (FMOVSstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4 && t.IsFloat()) { + break + } + v.reset(OpS390XFMOVSstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 8 && !t.IsFloat() + // result: (MOVDstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 8 && !t.IsFloat()) { + break + } + v.reset(OpS390XMOVDstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 && !t.IsFloat() + // result: (MOVWstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4 && !t.IsFloat()) { + break + } + v.reset(OpS390XMOVWstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 2 + // result: (MOVHstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 2) { + break + } + v.reset(OpS390XMOVHstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 1 + // result: (MOVBstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 1) { + break + } + v.reset(OpS390XMOVBstore) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueS390X_OpSub32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Sub32F x y) + // result: (Select0 (FSUBS x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpS390XFSUBS, types.NewTuple(typ.Float32, types.TypeFlags)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpSub64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Sub64F x y) + // result: (Select0 (FSUB x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpS390XFSUB, types.NewTuple(typ.Float64, types.TypeFlags)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpTrunc(v *Value) bool { + v_0 := v.Args[0] + // match: (Trunc x) + // result: (FIDBR [5] x) + for { + x := v_0 + v.reset(OpS390XFIDBR) + v.AuxInt = int8ToAuxInt(5) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpZero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Zero [0] _ mem) + // result: mem + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_1 + v.copyOf(mem) + return true + } + // match: (Zero [1] destptr mem) + // result: (MOVBstoreconst [0] destptr mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpS390XMOVBstoreconst) + v.AuxInt = valAndOffToAuxInt(0) + v.AddArg2(destptr, mem) + return true + } + // match: (Zero [2] destptr mem) + // result: (MOVHstoreconst [0] destptr mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpS390XMOVHstoreconst) + v.AuxInt = valAndOffToAuxInt(0) + v.AddArg2(destptr, mem) + return true + } + // match: (Zero [4] destptr mem) + // result: (MOVWstoreconst [0] destptr mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpS390XMOVWstoreconst) + v.AuxInt = valAndOffToAuxInt(0) + v.AddArg2(destptr, mem) + return true + } + // match: (Zero [8] destptr mem) + // result: (MOVDstoreconst [0] destptr mem) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpS390XMOVDstoreconst) + v.AuxInt = valAndOffToAuxInt(0) + v.AddArg2(destptr, mem) + return true + } + // match: (Zero [3] destptr mem) + // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVHstoreconst [0] destptr mem)) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpS390XMOVBstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 2)) + v0 := b.NewValue0(v.Pos, OpS390XMOVHstoreconst, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [5] destptr mem) + // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVWstoreconst [0] destptr mem)) + for { + if auxIntToInt64(v.AuxInt) != 5 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpS390XMOVBstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4)) + v0 := b.NewValue0(v.Pos, OpS390XMOVWstoreconst, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [6] destptr mem) + // result: (MOVHstoreconst [makeValAndOff(0,4)] destptr (MOVWstoreconst [0] destptr mem)) + for { + if auxIntToInt64(v.AuxInt) != 6 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpS390XMOVHstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4)) + v0 := b.NewValue0(v.Pos, OpS390XMOVWstoreconst, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [7] destptr mem) + // result: (MOVWstoreconst [makeValAndOff(0,3)] destptr (MOVWstoreconst [0] destptr mem)) + for { + if auxIntToInt64(v.AuxInt) != 7 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpS390XMOVWstoreconst) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 3)) + v0 := b.NewValue0(v.Pos, OpS390XMOVWstoreconst, types.TypeMem) + v0.AuxInt = valAndOffToAuxInt(0) + v0.AddArg2(destptr, mem) + v.AddArg2(destptr, v0) + return true + } + // match: (Zero [s] destptr mem) + // cond: s > 0 && s <= 1024 + // result: (CLEAR [makeValAndOff(int32(s), 0)] destptr mem) + for { + s := auxIntToInt64(v.AuxInt) + destptr := v_0 + mem := v_1 + if !(s > 0 && s <= 1024) { + break + } + v.reset(OpS390XCLEAR) + v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(s), 0)) + v.AddArg2(destptr, mem) + return true + } + // match: (Zero [s] destptr mem) + // cond: s > 1024 + // result: (LoweredZero [s%256] destptr (ADDconst destptr [(int32(s)/256)*256]) mem) + for { + s := auxIntToInt64(v.AuxInt) + destptr := v_0 + mem := v_1 + if !(s > 1024) { + break + } + v.reset(OpS390XLoweredZero) + v.AuxInt = int64ToAuxInt(s % 256) + v0 := b.NewValue0(v.Pos, OpS390XADDconst, destptr.Type) + v0.AuxInt = int32ToAuxInt((int32(s) / 256) * 256) + v0.AddArg(destptr) + v.AddArg3(destptr, v0, mem) + return true + } + return false +} +func rewriteBlockS390X(b *Block) bool { + typ := &b.Func.Config.Types + switch b.Kind { + case BlockS390XBRC: + // match: (BRC {c} x:(CMP _ _) yes no) + // cond: c&s390x.Unordered != 0 + // result: (BRC {c&^s390x.Unordered} x yes no) + for b.Controls[0].Op == OpS390XCMP { + x := b.Controls[0] + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Unordered != 0) { + break + } + b.resetWithControl(BlockS390XBRC, x) + b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered) + return true + } + // match: (BRC {c} x:(CMPW _ _) yes no) + // cond: c&s390x.Unordered != 0 + // result: (BRC {c&^s390x.Unordered} x yes no) + for b.Controls[0].Op == OpS390XCMPW { + x := b.Controls[0] + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Unordered != 0) { + break + } + b.resetWithControl(BlockS390XBRC, x) + b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered) + return true + } + // match: (BRC {c} x:(CMPU _ _) yes no) + // cond: c&s390x.Unordered != 0 + // result: (BRC {c&^s390x.Unordered} x yes no) + for b.Controls[0].Op == OpS390XCMPU { + x := b.Controls[0] + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Unordered != 0) { + break + } + b.resetWithControl(BlockS390XBRC, x) + b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered) + return true + } + // match: (BRC {c} x:(CMPWU _ _) yes no) + // cond: c&s390x.Unordered != 0 + // result: (BRC {c&^s390x.Unordered} x yes no) + for b.Controls[0].Op == OpS390XCMPWU { + x := b.Controls[0] + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Unordered != 0) { + break + } + b.resetWithControl(BlockS390XBRC, x) + b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered) + return true + } + // match: (BRC {c} x:(CMPconst _) yes no) + // cond: c&s390x.Unordered != 0 + // result: (BRC {c&^s390x.Unordered} x yes no) + for b.Controls[0].Op == OpS390XCMPconst { + x := b.Controls[0] + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Unordered != 0) { + break + } + b.resetWithControl(BlockS390XBRC, x) + b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered) + return true + } + // match: (BRC {c} x:(CMPWconst _) yes no) + // cond: c&s390x.Unordered != 0 + // result: (BRC {c&^s390x.Unordered} x yes no) + for b.Controls[0].Op == OpS390XCMPWconst { + x := b.Controls[0] + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Unordered != 0) { + break + } + b.resetWithControl(BlockS390XBRC, x) + b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered) + return true + } + // match: (BRC {c} x:(CMPUconst _) yes no) + // cond: c&s390x.Unordered != 0 + // result: (BRC {c&^s390x.Unordered} x yes no) + for b.Controls[0].Op == OpS390XCMPUconst { + x := b.Controls[0] + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Unordered != 0) { + break + } + b.resetWithControl(BlockS390XBRC, x) + b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered) + return true + } + // match: (BRC {c} x:(CMPWUconst _) yes no) + // cond: c&s390x.Unordered != 0 + // result: (BRC {c&^s390x.Unordered} x yes no) + for b.Controls[0].Op == OpS390XCMPWUconst { + x := b.Controls[0] + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Unordered != 0) { + break + } + b.resetWithControl(BlockS390XBRC, x) + b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered) + return true + } + // match: (BRC {c} (CMP x y) yes no) + // result: (CGRJ {c&^s390x.Unordered} x y yes no) + for b.Controls[0].Op == OpS390XCMP { + v_0 := b.Controls[0] + y := v_0.Args[1] + x := v_0.Args[0] + c := auxToS390xCCMask(b.Aux) + b.resetWithControl2(BlockS390XCGRJ, x, y) + b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered) + return true + } + // match: (BRC {c} (CMPW x y) yes no) + // result: (CRJ {c&^s390x.Unordered} x y yes no) + for b.Controls[0].Op == OpS390XCMPW { + v_0 := b.Controls[0] + y := v_0.Args[1] + x := v_0.Args[0] + c := auxToS390xCCMask(b.Aux) + b.resetWithControl2(BlockS390XCRJ, x, y) + b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered) + return true + } + // match: (BRC {c} (CMPU x y) yes no) + // result: (CLGRJ {c&^s390x.Unordered} x y yes no) + for b.Controls[0].Op == OpS390XCMPU { + v_0 := b.Controls[0] + y := v_0.Args[1] + x := v_0.Args[0] + c := auxToS390xCCMask(b.Aux) + b.resetWithControl2(BlockS390XCLGRJ, x, y) + b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered) + return true + } + // match: (BRC {c} (CMPWU x y) yes no) + // result: (CLRJ {c&^s390x.Unordered} x y yes no) + for b.Controls[0].Op == OpS390XCMPWU { + v_0 := b.Controls[0] + y := v_0.Args[1] + x := v_0.Args[0] + c := auxToS390xCCMask(b.Aux) + b.resetWithControl2(BlockS390XCLRJ, x, y) + b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered) + return true + } + // match: (BRC {c} (CMPconst x [y]) yes no) + // cond: y == int32( int8(y)) + // result: (CGIJ {c&^s390x.Unordered} x [ int8(y)] yes no) + for b.Controls[0].Op == OpS390XCMPconst { + v_0 := b.Controls[0] + y := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + c := auxToS390xCCMask(b.Aux) + if !(y == int32(int8(y))) { + break + } + b.resetWithControl(BlockS390XCGIJ, x) + b.AuxInt = int8ToAuxInt(int8(y)) + b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered) + return true + } + // match: (BRC {c} (CMPWconst x [y]) yes no) + // cond: y == int32( int8(y)) + // result: (CIJ {c&^s390x.Unordered} x [ int8(y)] yes no) + for b.Controls[0].Op == OpS390XCMPWconst { + v_0 := b.Controls[0] + y := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + c := auxToS390xCCMask(b.Aux) + if !(y == int32(int8(y))) { + break + } + b.resetWithControl(BlockS390XCIJ, x) + b.AuxInt = int8ToAuxInt(int8(y)) + b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered) + return true + } + // match: (BRC {c} (CMPUconst x [y]) yes no) + // cond: y == int32(uint8(y)) + // result: (CLGIJ {c&^s390x.Unordered} x [uint8(y)] yes no) + for b.Controls[0].Op == OpS390XCMPUconst { + v_0 := b.Controls[0] + y := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + c := auxToS390xCCMask(b.Aux) + if !(y == int32(uint8(y))) { + break + } + b.resetWithControl(BlockS390XCLGIJ, x) + b.AuxInt = uint8ToAuxInt(uint8(y)) + b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered) + return true + } + // match: (BRC {c} (CMPWUconst x [y]) yes no) + // cond: y == int32(uint8(y)) + // result: (CLIJ {c&^s390x.Unordered} x [uint8(y)] yes no) + for b.Controls[0].Op == OpS390XCMPWUconst { + v_0 := b.Controls[0] + y := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + c := auxToS390xCCMask(b.Aux) + if !(y == int32(uint8(y))) { + break + } + b.resetWithControl(BlockS390XCLIJ, x) + b.AuxInt = uint8ToAuxInt(uint8(y)) + b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered) + return true + } + // match: (BRC {s390x.Less} (CMPconst x [ 128]) yes no) + // result: (CGIJ {s390x.LessOrEqual} x [ 127] yes no) + for b.Controls[0].Op == OpS390XCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 128 { + break + } + x := v_0.Args[0] + if auxToS390xCCMask(b.Aux) != s390x.Less { + break + } + b.resetWithControl(BlockS390XCGIJ, x) + b.AuxInt = int8ToAuxInt(127) + b.Aux = s390xCCMaskToAux(s390x.LessOrEqual) + return true + } + // match: (BRC {s390x.Less} (CMPWconst x [ 128]) yes no) + // result: (CIJ {s390x.LessOrEqual} x [ 127] yes no) + for b.Controls[0].Op == OpS390XCMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 128 { + break + } + x := v_0.Args[0] + if auxToS390xCCMask(b.Aux) != s390x.Less { + break + } + b.resetWithControl(BlockS390XCIJ, x) + b.AuxInt = int8ToAuxInt(127) + b.Aux = s390xCCMaskToAux(s390x.LessOrEqual) + return true + } + // match: (BRC {s390x.LessOrEqual} (CMPconst x [-129]) yes no) + // result: (CGIJ {s390x.Less} x [-128] yes no) + for b.Controls[0].Op == OpS390XCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != -129 { + break + } + x := v_0.Args[0] + if auxToS390xCCMask(b.Aux) != s390x.LessOrEqual { + break + } + b.resetWithControl(BlockS390XCGIJ, x) + b.AuxInt = int8ToAuxInt(-128) + b.Aux = s390xCCMaskToAux(s390x.Less) + return true + } + // match: (BRC {s390x.LessOrEqual} (CMPWconst x [-129]) yes no) + // result: (CIJ {s390x.Less} x [-128] yes no) + for b.Controls[0].Op == OpS390XCMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != -129 { + break + } + x := v_0.Args[0] + if auxToS390xCCMask(b.Aux) != s390x.LessOrEqual { + break + } + b.resetWithControl(BlockS390XCIJ, x) + b.AuxInt = int8ToAuxInt(-128) + b.Aux = s390xCCMaskToAux(s390x.Less) + return true + } + // match: (BRC {s390x.Greater} (CMPconst x [-129]) yes no) + // result: (CGIJ {s390x.GreaterOrEqual} x [-128] yes no) + for b.Controls[0].Op == OpS390XCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != -129 { + break + } + x := v_0.Args[0] + if auxToS390xCCMask(b.Aux) != s390x.Greater { + break + } + b.resetWithControl(BlockS390XCGIJ, x) + b.AuxInt = int8ToAuxInt(-128) + b.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + return true + } + // match: (BRC {s390x.Greater} (CMPWconst x [-129]) yes no) + // result: (CIJ {s390x.GreaterOrEqual} x [-128] yes no) + for b.Controls[0].Op == OpS390XCMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != -129 { + break + } + x := v_0.Args[0] + if auxToS390xCCMask(b.Aux) != s390x.Greater { + break + } + b.resetWithControl(BlockS390XCIJ, x) + b.AuxInt = int8ToAuxInt(-128) + b.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual) + return true + } + // match: (BRC {s390x.GreaterOrEqual} (CMPconst x [ 128]) yes no) + // result: (CGIJ {s390x.Greater} x [ 127] yes no) + for b.Controls[0].Op == OpS390XCMPconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 128 { + break + } + x := v_0.Args[0] + if auxToS390xCCMask(b.Aux) != s390x.GreaterOrEqual { + break + } + b.resetWithControl(BlockS390XCGIJ, x) + b.AuxInt = int8ToAuxInt(127) + b.Aux = s390xCCMaskToAux(s390x.Greater) + return true + } + // match: (BRC {s390x.GreaterOrEqual} (CMPWconst x [ 128]) yes no) + // result: (CIJ {s390x.Greater} x [ 127] yes no) + for b.Controls[0].Op == OpS390XCMPWconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 128 { + break + } + x := v_0.Args[0] + if auxToS390xCCMask(b.Aux) != s390x.GreaterOrEqual { + break + } + b.resetWithControl(BlockS390XCIJ, x) + b.AuxInt = int8ToAuxInt(127) + b.Aux = s390xCCMaskToAux(s390x.Greater) + return true + } + // match: (BRC {s390x.Less} (CMPWUconst x [256]) yes no) + // result: (CLIJ {s390x.LessOrEqual} x [255] yes no) + for b.Controls[0].Op == OpS390XCMPWUconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 256 { + break + } + x := v_0.Args[0] + if auxToS390xCCMask(b.Aux) != s390x.Less { + break + } + b.resetWithControl(BlockS390XCLIJ, x) + b.AuxInt = uint8ToAuxInt(255) + b.Aux = s390xCCMaskToAux(s390x.LessOrEqual) + return true + } + // match: (BRC {s390x.Less} (CMPUconst x [256]) yes no) + // result: (CLGIJ {s390x.LessOrEqual} x [255] yes no) + for b.Controls[0].Op == OpS390XCMPUconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 256 { + break + } + x := v_0.Args[0] + if auxToS390xCCMask(b.Aux) != s390x.Less { + break + } + b.resetWithControl(BlockS390XCLGIJ, x) + b.AuxInt = uint8ToAuxInt(255) + b.Aux = s390xCCMaskToAux(s390x.LessOrEqual) + return true + } + // match: (BRC {s390x.GreaterOrEqual} (CMPWUconst x [256]) yes no) + // result: (CLIJ {s390x.Greater} x [255] yes no) + for b.Controls[0].Op == OpS390XCMPWUconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 256 { + break + } + x := v_0.Args[0] + if auxToS390xCCMask(b.Aux) != s390x.GreaterOrEqual { + break + } + b.resetWithControl(BlockS390XCLIJ, x) + b.AuxInt = uint8ToAuxInt(255) + b.Aux = s390xCCMaskToAux(s390x.Greater) + return true + } + // match: (BRC {s390x.GreaterOrEqual} (CMPUconst x [256]) yes no) + // result: (CLGIJ {s390x.Greater} x [255] yes no) + for b.Controls[0].Op == OpS390XCMPUconst { + v_0 := b.Controls[0] + if auxIntToInt32(v_0.AuxInt) != 256 { + break + } + x := v_0.Args[0] + if auxToS390xCCMask(b.Aux) != s390x.GreaterOrEqual { + break + } + b.resetWithControl(BlockS390XCLGIJ, x) + b.AuxInt = uint8ToAuxInt(255) + b.Aux = s390xCCMaskToAux(s390x.Greater) + return true + } + // match: (BRC {c} (CMPconst x [y]) yes no) + // cond: y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) + // result: (CLGIJ {c} x [uint8(y)] yes no) + for b.Controls[0].Op == OpS390XCMPconst { + v_0 := b.Controls[0] + y := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + c := auxToS390xCCMask(b.Aux) + if !(y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater)) { + break + } + b.resetWithControl(BlockS390XCLGIJ, x) + b.AuxInt = uint8ToAuxInt(uint8(y)) + b.Aux = s390xCCMaskToAux(c) + return true + } + // match: (BRC {c} (CMPWconst x [y]) yes no) + // cond: y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) + // result: (CLIJ {c} x [uint8(y)] yes no) + for b.Controls[0].Op == OpS390XCMPWconst { + v_0 := b.Controls[0] + y := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + c := auxToS390xCCMask(b.Aux) + if !(y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater)) { + break + } + b.resetWithControl(BlockS390XCLIJ, x) + b.AuxInt = uint8ToAuxInt(uint8(y)) + b.Aux = s390xCCMaskToAux(c) + return true + } + // match: (BRC {c} (CMPUconst x [y]) yes no) + // cond: y == int32( int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) + // result: (CGIJ {c} x [ int8(y)] yes no) + for b.Controls[0].Op == OpS390XCMPUconst { + v_0 := b.Controls[0] + y := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + c := auxToS390xCCMask(b.Aux) + if !(y == int32(int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater)) { + break + } + b.resetWithControl(BlockS390XCGIJ, x) + b.AuxInt = int8ToAuxInt(int8(y)) + b.Aux = s390xCCMaskToAux(c) + return true + } + // match: (BRC {c} (CMPWUconst x [y]) yes no) + // cond: y == int32( int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) + // result: (CIJ {c} x [ int8(y)] yes no) + for b.Controls[0].Op == OpS390XCMPWUconst { + v_0 := b.Controls[0] + y := auxIntToInt32(v_0.AuxInt) + x := v_0.Args[0] + c := auxToS390xCCMask(b.Aux) + if !(y == int32(int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater)) { + break + } + b.resetWithControl(BlockS390XCIJ, x) + b.AuxInt = int8ToAuxInt(int8(y)) + b.Aux = s390xCCMaskToAux(c) + return true + } + // match: (BRC {c} (InvertFlags cmp) yes no) + // result: (BRC {c.ReverseComparison()} cmp yes no) + for b.Controls[0].Op == OpS390XInvertFlags { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + c := auxToS390xCCMask(b.Aux) + b.resetWithControl(BlockS390XBRC, cmp) + b.Aux = s390xCCMaskToAux(c.ReverseComparison()) + return true + } + // match: (BRC {c} (FlagEQ) yes no) + // cond: c&s390x.Equal != 0 + // result: (First yes no) + for b.Controls[0].Op == OpS390XFlagEQ { + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Equal != 0) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (BRC {c} (FlagLT) yes no) + // cond: c&s390x.Less != 0 + // result: (First yes no) + for b.Controls[0].Op == OpS390XFlagLT { + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Less != 0) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (BRC {c} (FlagGT) yes no) + // cond: c&s390x.Greater != 0 + // result: (First yes no) + for b.Controls[0].Op == OpS390XFlagGT { + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Greater != 0) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (BRC {c} (FlagOV) yes no) + // cond: c&s390x.Unordered != 0 + // result: (First yes no) + for b.Controls[0].Op == OpS390XFlagOV { + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Unordered != 0) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (BRC {c} (FlagEQ) yes no) + // cond: c&s390x.Equal == 0 + // result: (First no yes) + for b.Controls[0].Op == OpS390XFlagEQ { + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Equal == 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (BRC {c} (FlagLT) yes no) + // cond: c&s390x.Less == 0 + // result: (First no yes) + for b.Controls[0].Op == OpS390XFlagLT { + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Less == 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (BRC {c} (FlagGT) yes no) + // cond: c&s390x.Greater == 0 + // result: (First no yes) + for b.Controls[0].Op == OpS390XFlagGT { + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Greater == 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (BRC {c} (FlagOV) yes no) + // cond: c&s390x.Unordered == 0 + // result: (First no yes) + for b.Controls[0].Op == OpS390XFlagOV { + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Unordered == 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockS390XCGIJ: + // match: (CGIJ {c} (MOVDconst [x]) [y] yes no) + // cond: c&s390x.Equal != 0 && int64(x) == int64(y) + // result: (First yes no) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := auxIntToInt8(b.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Equal != 0 && int64(x) == int64(y)) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (CGIJ {c} (MOVDconst [x]) [y] yes no) + // cond: c&s390x.Less != 0 && int64(x) < int64(y) + // result: (First yes no) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := auxIntToInt8(b.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Less != 0 && int64(x) < int64(y)) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (CGIJ {c} (MOVDconst [x]) [y] yes no) + // cond: c&s390x.Greater != 0 && int64(x) > int64(y) + // result: (First yes no) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := auxIntToInt8(b.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Greater != 0 && int64(x) > int64(y)) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (CGIJ {c} (MOVDconst [x]) [y] yes no) + // cond: c&s390x.Equal == 0 && int64(x) == int64(y) + // result: (First no yes) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := auxIntToInt8(b.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Equal == 0 && int64(x) == int64(y)) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (CGIJ {c} (MOVDconst [x]) [y] yes no) + // cond: c&s390x.Less == 0 && int64(x) < int64(y) + // result: (First no yes) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := auxIntToInt8(b.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Less == 0 && int64(x) < int64(y)) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (CGIJ {c} (MOVDconst [x]) [y] yes no) + // cond: c&s390x.Greater == 0 && int64(x) > int64(y) + // result: (First no yes) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := auxIntToInt8(b.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Greater == 0 && int64(x) > int64(y)) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (CGIJ {s390x.Equal} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0]) + // result: (BRC {s390x.NoCarry} carry) + for b.Controls[0].Op == OpSelect0 { + v_0 := b.Controls[0] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XADDE { + break + } + carry := v_0_0.Args[2] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 { + break + } + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Equal { + break + } + b.resetWithControl(BlockS390XBRC, carry) + b.Aux = s390xCCMaskToAux(s390x.NoCarry) + return true + } + // match: (CGIJ {s390x.Equal} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1]) + // result: (BRC {s390x.Carry} carry) + for b.Controls[0].Op == OpSelect0 { + v_0 := b.Controls[0] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XADDE { + break + } + carry := v_0_0.Args[2] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 { + break + } + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.Equal { + break + } + b.resetWithControl(BlockS390XBRC, carry) + b.Aux = s390xCCMaskToAux(s390x.Carry) + return true + } + // match: (CGIJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0]) + // result: (BRC {s390x.Carry} carry) + for b.Controls[0].Op == OpSelect0 { + v_0 := b.Controls[0] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XADDE { + break + } + carry := v_0_0.Args[2] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 { + break + } + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater { + break + } + b.resetWithControl(BlockS390XBRC, carry) + b.Aux = s390xCCMaskToAux(s390x.Carry) + return true + } + // match: (CGIJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1]) + // result: (BRC {s390x.NoCarry} carry) + for b.Controls[0].Op == OpSelect0 { + v_0 := b.Controls[0] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XADDE { + break + } + carry := v_0_0.Args[2] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 { + break + } + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater { + break + } + b.resetWithControl(BlockS390XBRC, carry) + b.Aux = s390xCCMaskToAux(s390x.NoCarry) + return true + } + // match: (CGIJ {s390x.Greater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0]) + // result: (BRC {s390x.Carry} carry) + for b.Controls[0].Op == OpSelect0 { + v_0 := b.Controls[0] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XADDE { + break + } + carry := v_0_0.Args[2] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 { + break + } + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Greater { + break + } + b.resetWithControl(BlockS390XBRC, carry) + b.Aux = s390xCCMaskToAux(s390x.Carry) + return true + } + // match: (CGIJ {s390x.Equal} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0]) + // result: (BRC {s390x.NoBorrow} borrow) + for b.Controls[0].Op == OpS390XNEG { + v_0 := b.Controls[0] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpS390XSUBE { + break + } + borrow := v_0_0_0.Args[2] + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 { + break + } + v_0_0_0_1 := v_0_0_0.Args[1] + if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Equal { + break + } + b.resetWithControl(BlockS390XBRC, borrow) + b.Aux = s390xCCMaskToAux(s390x.NoBorrow) + return true + } + // match: (CGIJ {s390x.Equal} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1]) + // result: (BRC {s390x.Borrow} borrow) + for b.Controls[0].Op == OpS390XNEG { + v_0 := b.Controls[0] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpS390XSUBE { + break + } + borrow := v_0_0_0.Args[2] + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 { + break + } + v_0_0_0_1 := v_0_0_0.Args[1] + if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.Equal { + break + } + b.resetWithControl(BlockS390XBRC, borrow) + b.Aux = s390xCCMaskToAux(s390x.Borrow) + return true + } + // match: (CGIJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0]) + // result: (BRC {s390x.Borrow} borrow) + for b.Controls[0].Op == OpS390XNEG { + v_0 := b.Controls[0] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpS390XSUBE { + break + } + borrow := v_0_0_0.Args[2] + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 { + break + } + v_0_0_0_1 := v_0_0_0.Args[1] + if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater { + break + } + b.resetWithControl(BlockS390XBRC, borrow) + b.Aux = s390xCCMaskToAux(s390x.Borrow) + return true + } + // match: (CGIJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1]) + // result: (BRC {s390x.NoBorrow} borrow) + for b.Controls[0].Op == OpS390XNEG { + v_0 := b.Controls[0] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpS390XSUBE { + break + } + borrow := v_0_0_0.Args[2] + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 { + break + } + v_0_0_0_1 := v_0_0_0.Args[1] + if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater { + break + } + b.resetWithControl(BlockS390XBRC, borrow) + b.Aux = s390xCCMaskToAux(s390x.NoBorrow) + return true + } + // match: (CGIJ {s390x.Greater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0]) + // result: (BRC {s390x.Borrow} borrow) + for b.Controls[0].Op == OpS390XNEG { + v_0 := b.Controls[0] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpS390XSUBE { + break + } + borrow := v_0_0_0.Args[2] + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 { + break + } + v_0_0_0_1 := v_0_0_0.Args[1] + if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Greater { + break + } + b.resetWithControl(BlockS390XBRC, borrow) + b.Aux = s390xCCMaskToAux(s390x.Borrow) + return true + } + case BlockS390XCGRJ: + // match: (CGRJ {c} x (MOVDconst [y]) yes no) + // cond: is8Bit(y) + // result: (CGIJ {c} x [ int8(y)] yes no) + for b.Controls[1].Op == OpS390XMOVDconst { + x := b.Controls[0] + v_1 := b.Controls[1] + y := auxIntToInt64(v_1.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(is8Bit(y)) { + break + } + b.resetWithControl(BlockS390XCGIJ, x) + b.AuxInt = int8ToAuxInt(int8(y)) + b.Aux = s390xCCMaskToAux(c) + return true + } + // match: (CGRJ {c} (MOVDconst [x]) y yes no) + // cond: is8Bit(x) + // result: (CGIJ {c.ReverseComparison()} y [ int8(x)] yes no) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := b.Controls[1] + c := auxToS390xCCMask(b.Aux) + if !(is8Bit(x)) { + break + } + b.resetWithControl(BlockS390XCGIJ, y) + b.AuxInt = int8ToAuxInt(int8(x)) + b.Aux = s390xCCMaskToAux(c.ReverseComparison()) + return true + } + // match: (CGRJ {c} x (MOVDconst [y]) yes no) + // cond: !is8Bit(y) && is32Bit(y) + // result: (BRC {c} (CMPconst x [int32(y)]) yes no) + for b.Controls[1].Op == OpS390XMOVDconst { + x := b.Controls[0] + v_1 := b.Controls[1] + y := auxIntToInt64(v_1.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(!is8Bit(y) && is32Bit(y)) { + break + } + v0 := b.NewValue0(x.Pos, OpS390XCMPconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(y)) + v0.AddArg(x) + b.resetWithControl(BlockS390XBRC, v0) + b.Aux = s390xCCMaskToAux(c) + return true + } + // match: (CGRJ {c} (MOVDconst [x]) y yes no) + // cond: !is8Bit(x) && is32Bit(x) + // result: (BRC {c.ReverseComparison()} (CMPconst y [int32(x)]) yes no) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := b.Controls[1] + c := auxToS390xCCMask(b.Aux) + if !(!is8Bit(x) && is32Bit(x)) { + break + } + v0 := b.NewValue0(v_0.Pos, OpS390XCMPconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(x)) + v0.AddArg(y) + b.resetWithControl(BlockS390XBRC, v0) + b.Aux = s390xCCMaskToAux(c.ReverseComparison()) + return true + } + // match: (CGRJ {c} x y yes no) + // cond: x == y && c&s390x.Equal != 0 + // result: (First yes no) + for { + x := b.Controls[0] + y := b.Controls[1] + c := auxToS390xCCMask(b.Aux) + if !(x == y && c&s390x.Equal != 0) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (CGRJ {c} x y yes no) + // cond: x == y && c&s390x.Equal == 0 + // result: (First no yes) + for { + x := b.Controls[0] + y := b.Controls[1] + c := auxToS390xCCMask(b.Aux) + if !(x == y && c&s390x.Equal == 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockS390XCIJ: + // match: (CIJ {c} (MOVWreg x) [y] yes no) + // result: (CIJ {c} x [y] yes no) + for b.Controls[0].Op == OpS390XMOVWreg { + v_0 := b.Controls[0] + x := v_0.Args[0] + y := auxIntToInt8(b.AuxInt) + c := auxToS390xCCMask(b.Aux) + b.resetWithControl(BlockS390XCIJ, x) + b.AuxInt = int8ToAuxInt(y) + b.Aux = s390xCCMaskToAux(c) + return true + } + // match: (CIJ {c} (MOVWZreg x) [y] yes no) + // result: (CIJ {c} x [y] yes no) + for b.Controls[0].Op == OpS390XMOVWZreg { + v_0 := b.Controls[0] + x := v_0.Args[0] + y := auxIntToInt8(b.AuxInt) + c := auxToS390xCCMask(b.Aux) + b.resetWithControl(BlockS390XCIJ, x) + b.AuxInt = int8ToAuxInt(y) + b.Aux = s390xCCMaskToAux(c) + return true + } + // match: (CIJ {c} (MOVDconst [x]) [y] yes no) + // cond: c&s390x.Equal != 0 && int32(x) == int32(y) + // result: (First yes no) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := auxIntToInt8(b.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Equal != 0 && int32(x) == int32(y)) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (CIJ {c} (MOVDconst [x]) [y] yes no) + // cond: c&s390x.Less != 0 && int32(x) < int32(y) + // result: (First yes no) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := auxIntToInt8(b.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Less != 0 && int32(x) < int32(y)) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (CIJ {c} (MOVDconst [x]) [y] yes no) + // cond: c&s390x.Greater != 0 && int32(x) > int32(y) + // result: (First yes no) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := auxIntToInt8(b.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Greater != 0 && int32(x) > int32(y)) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (CIJ {c} (MOVDconst [x]) [y] yes no) + // cond: c&s390x.Equal == 0 && int32(x) == int32(y) + // result: (First no yes) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := auxIntToInt8(b.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Equal == 0 && int32(x) == int32(y)) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (CIJ {c} (MOVDconst [x]) [y] yes no) + // cond: c&s390x.Less == 0 && int32(x) < int32(y) + // result: (First no yes) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := auxIntToInt8(b.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Less == 0 && int32(x) < int32(y)) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (CIJ {c} (MOVDconst [x]) [y] yes no) + // cond: c&s390x.Greater == 0 && int32(x) > int32(y) + // result: (First no yes) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := auxIntToInt8(b.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Greater == 0 && int32(x) > int32(y)) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockS390XCLGIJ: + // match: (CLGIJ {c} (MOVDconst [x]) [y] yes no) + // cond: c&s390x.Equal != 0 && uint64(x) == uint64(y) + // result: (First yes no) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := auxIntToUint8(b.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Equal != 0 && uint64(x) == uint64(y)) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (CLGIJ {c} (MOVDconst [x]) [y] yes no) + // cond: c&s390x.Less != 0 && uint64(x) < uint64(y) + // result: (First yes no) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := auxIntToUint8(b.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Less != 0 && uint64(x) < uint64(y)) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (CLGIJ {c} (MOVDconst [x]) [y] yes no) + // cond: c&s390x.Greater != 0 && uint64(x) > uint64(y) + // result: (First yes no) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := auxIntToUint8(b.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Greater != 0 && uint64(x) > uint64(y)) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (CLGIJ {c} (MOVDconst [x]) [y] yes no) + // cond: c&s390x.Equal == 0 && uint64(x) == uint64(y) + // result: (First no yes) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := auxIntToUint8(b.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Equal == 0 && uint64(x) == uint64(y)) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (CLGIJ {c} (MOVDconst [x]) [y] yes no) + // cond: c&s390x.Less == 0 && uint64(x) < uint64(y) + // result: (First no yes) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := auxIntToUint8(b.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Less == 0 && uint64(x) < uint64(y)) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (CLGIJ {c} (MOVDconst [x]) [y] yes no) + // cond: c&s390x.Greater == 0 && uint64(x) > uint64(y) + // result: (First no yes) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := auxIntToUint8(b.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Greater == 0 && uint64(x) > uint64(y)) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (CLGIJ {s390x.GreaterOrEqual} _ [0] yes no) + // result: (First yes no) + for { + if auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.GreaterOrEqual { + break + } + b.Reset(BlockFirst) + return true + } + // match: (CLGIJ {s390x.Less} _ [0] yes no) + // result: (First no yes) + for { + if auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Less { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (CLGIJ {s390x.Equal} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0]) + // result: (BRC {s390x.NoCarry} carry) + for b.Controls[0].Op == OpSelect0 { + v_0 := b.Controls[0] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XADDE { + break + } + carry := v_0_0.Args[2] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 { + break + } + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Equal { + break + } + b.resetWithControl(BlockS390XBRC, carry) + b.Aux = s390xCCMaskToAux(s390x.NoCarry) + return true + } + // match: (CLGIJ {s390x.Equal} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1]) + // result: (BRC {s390x.Carry} carry) + for b.Controls[0].Op == OpSelect0 { + v_0 := b.Controls[0] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XADDE { + break + } + carry := v_0_0.Args[2] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 { + break + } + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.Equal { + break + } + b.resetWithControl(BlockS390XBRC, carry) + b.Aux = s390xCCMaskToAux(s390x.Carry) + return true + } + // match: (CLGIJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0]) + // result: (BRC {s390x.Carry} carry) + for b.Controls[0].Op == OpSelect0 { + v_0 := b.Controls[0] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XADDE { + break + } + carry := v_0_0.Args[2] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 { + break + } + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater { + break + } + b.resetWithControl(BlockS390XBRC, carry) + b.Aux = s390xCCMaskToAux(s390x.Carry) + return true + } + // match: (CLGIJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1]) + // result: (BRC {s390x.NoCarry} carry) + for b.Controls[0].Op == OpSelect0 { + v_0 := b.Controls[0] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XADDE { + break + } + carry := v_0_0.Args[2] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 { + break + } + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater { + break + } + b.resetWithControl(BlockS390XBRC, carry) + b.Aux = s390xCCMaskToAux(s390x.NoCarry) + return true + } + // match: (CLGIJ {s390x.Greater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0]) + // result: (BRC {s390x.Carry} carry) + for b.Controls[0].Op == OpSelect0 { + v_0 := b.Controls[0] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XADDE { + break + } + carry := v_0_0.Args[2] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 { + break + } + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Greater { + break + } + b.resetWithControl(BlockS390XBRC, carry) + b.Aux = s390xCCMaskToAux(s390x.Carry) + return true + } + // match: (CLGIJ {s390x.Equal} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0]) + // result: (BRC {s390x.NoBorrow} borrow) + for b.Controls[0].Op == OpS390XNEG { + v_0 := b.Controls[0] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpS390XSUBE { + break + } + borrow := v_0_0_0.Args[2] + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 { + break + } + v_0_0_0_1 := v_0_0_0.Args[1] + if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Equal { + break + } + b.resetWithControl(BlockS390XBRC, borrow) + b.Aux = s390xCCMaskToAux(s390x.NoBorrow) + return true + } + // match: (CLGIJ {s390x.Equal} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1]) + // result: (BRC {s390x.Borrow} borrow) + for b.Controls[0].Op == OpS390XNEG { + v_0 := b.Controls[0] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpS390XSUBE { + break + } + borrow := v_0_0_0.Args[2] + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 { + break + } + v_0_0_0_1 := v_0_0_0.Args[1] + if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.Equal { + break + } + b.resetWithControl(BlockS390XBRC, borrow) + b.Aux = s390xCCMaskToAux(s390x.Borrow) + return true + } + // match: (CLGIJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0]) + // result: (BRC {s390x.Borrow} borrow) + for b.Controls[0].Op == OpS390XNEG { + v_0 := b.Controls[0] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpS390XSUBE { + break + } + borrow := v_0_0_0.Args[2] + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 { + break + } + v_0_0_0_1 := v_0_0_0.Args[1] + if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater { + break + } + b.resetWithControl(BlockS390XBRC, borrow) + b.Aux = s390xCCMaskToAux(s390x.Borrow) + return true + } + // match: (CLGIJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1]) + // result: (BRC {s390x.NoBorrow} borrow) + for b.Controls[0].Op == OpS390XNEG { + v_0 := b.Controls[0] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpS390XSUBE { + break + } + borrow := v_0_0_0.Args[2] + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 { + break + } + v_0_0_0_1 := v_0_0_0.Args[1] + if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater { + break + } + b.resetWithControl(BlockS390XBRC, borrow) + b.Aux = s390xCCMaskToAux(s390x.NoBorrow) + return true + } + // match: (CLGIJ {s390x.Greater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0]) + // result: (BRC {s390x.Borrow} borrow) + for b.Controls[0].Op == OpS390XNEG { + v_0 := b.Controls[0] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpS390XSUBE { + break + } + borrow := v_0_0_0.Args[2] + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 { + break + } + v_0_0_0_1 := v_0_0_0.Args[1] + if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Greater { + break + } + b.resetWithControl(BlockS390XBRC, borrow) + b.Aux = s390xCCMaskToAux(s390x.Borrow) + return true + } + case BlockS390XCLGRJ: + // match: (CLGRJ {c} x (MOVDconst [y]) yes no) + // cond: isU8Bit(y) + // result: (CLGIJ {c} x [uint8(y)] yes no) + for b.Controls[1].Op == OpS390XMOVDconst { + x := b.Controls[0] + v_1 := b.Controls[1] + y := auxIntToInt64(v_1.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(isU8Bit(y)) { + break + } + b.resetWithControl(BlockS390XCLGIJ, x) + b.AuxInt = uint8ToAuxInt(uint8(y)) + b.Aux = s390xCCMaskToAux(c) + return true + } + // match: (CLGRJ {c} (MOVDconst [x]) y yes no) + // cond: isU8Bit(x) + // result: (CLGIJ {c.ReverseComparison()} y [uint8(x)] yes no) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := b.Controls[1] + c := auxToS390xCCMask(b.Aux) + if !(isU8Bit(x)) { + break + } + b.resetWithControl(BlockS390XCLGIJ, y) + b.AuxInt = uint8ToAuxInt(uint8(x)) + b.Aux = s390xCCMaskToAux(c.ReverseComparison()) + return true + } + // match: (CLGRJ {c} x (MOVDconst [y]) yes no) + // cond: !isU8Bit(y) && isU32Bit(y) + // result: (BRC {c} (CMPUconst x [int32(y)]) yes no) + for b.Controls[1].Op == OpS390XMOVDconst { + x := b.Controls[0] + v_1 := b.Controls[1] + y := auxIntToInt64(v_1.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(!isU8Bit(y) && isU32Bit(y)) { + break + } + v0 := b.NewValue0(x.Pos, OpS390XCMPUconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(y)) + v0.AddArg(x) + b.resetWithControl(BlockS390XBRC, v0) + b.Aux = s390xCCMaskToAux(c) + return true + } + // match: (CLGRJ {c} (MOVDconst [x]) y yes no) + // cond: !isU8Bit(x) && isU32Bit(x) + // result: (BRC {c.ReverseComparison()} (CMPUconst y [int32(x)]) yes no) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := b.Controls[1] + c := auxToS390xCCMask(b.Aux) + if !(!isU8Bit(x) && isU32Bit(x)) { + break + } + v0 := b.NewValue0(v_0.Pos, OpS390XCMPUconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(x)) + v0.AddArg(y) + b.resetWithControl(BlockS390XBRC, v0) + b.Aux = s390xCCMaskToAux(c.ReverseComparison()) + return true + } + // match: (CLGRJ {c} x y yes no) + // cond: x == y && c&s390x.Equal != 0 + // result: (First yes no) + for { + x := b.Controls[0] + y := b.Controls[1] + c := auxToS390xCCMask(b.Aux) + if !(x == y && c&s390x.Equal != 0) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (CLGRJ {c} x y yes no) + // cond: x == y && c&s390x.Equal == 0 + // result: (First no yes) + for { + x := b.Controls[0] + y := b.Controls[1] + c := auxToS390xCCMask(b.Aux) + if !(x == y && c&s390x.Equal == 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockS390XCLIJ: + // match: (CLIJ {s390x.LessOrGreater} (LOCGR {d} (MOVDconst [0]) (MOVDconst [x]) cmp) [0] yes no) + // cond: int32(x) != 0 + // result: (BRC {d} cmp yes no) + for b.Controls[0].Op == OpS390XLOCGR { + v_0 := b.Controls[0] + d := auxToS390xCCMask(v_0.Aux) + cmp := v_0.Args[2] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0.AuxInt) != 0 { + break + } + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpS390XMOVDconst { + break + } + x := auxIntToInt64(v_0_1.AuxInt) + if auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater || !(int32(x) != 0) { + break + } + b.resetWithControl(BlockS390XBRC, cmp) + b.Aux = s390xCCMaskToAux(d) + return true + } + // match: (CLIJ {c} (MOVWreg x) [y] yes no) + // result: (CLIJ {c} x [y] yes no) + for b.Controls[0].Op == OpS390XMOVWreg { + v_0 := b.Controls[0] + x := v_0.Args[0] + y := auxIntToUint8(b.AuxInt) + c := auxToS390xCCMask(b.Aux) + b.resetWithControl(BlockS390XCLIJ, x) + b.AuxInt = uint8ToAuxInt(y) + b.Aux = s390xCCMaskToAux(c) + return true + } + // match: (CLIJ {c} (MOVWZreg x) [y] yes no) + // result: (CLIJ {c} x [y] yes no) + for b.Controls[0].Op == OpS390XMOVWZreg { + v_0 := b.Controls[0] + x := v_0.Args[0] + y := auxIntToUint8(b.AuxInt) + c := auxToS390xCCMask(b.Aux) + b.resetWithControl(BlockS390XCLIJ, x) + b.AuxInt = uint8ToAuxInt(y) + b.Aux = s390xCCMaskToAux(c) + return true + } + // match: (CLIJ {c} (MOVDconst [x]) [y] yes no) + // cond: c&s390x.Equal != 0 && uint32(x) == uint32(y) + // result: (First yes no) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := auxIntToUint8(b.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Equal != 0 && uint32(x) == uint32(y)) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (CLIJ {c} (MOVDconst [x]) [y] yes no) + // cond: c&s390x.Less != 0 && uint32(x) < uint32(y) + // result: (First yes no) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := auxIntToUint8(b.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Less != 0 && uint32(x) < uint32(y)) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (CLIJ {c} (MOVDconst [x]) [y] yes no) + // cond: c&s390x.Greater != 0 && uint32(x) > uint32(y) + // result: (First yes no) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := auxIntToUint8(b.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Greater != 0 && uint32(x) > uint32(y)) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (CLIJ {c} (MOVDconst [x]) [y] yes no) + // cond: c&s390x.Equal == 0 && uint32(x) == uint32(y) + // result: (First no yes) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := auxIntToUint8(b.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Equal == 0 && uint32(x) == uint32(y)) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (CLIJ {c} (MOVDconst [x]) [y] yes no) + // cond: c&s390x.Less == 0 && uint32(x) < uint32(y) + // result: (First no yes) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := auxIntToUint8(b.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Less == 0 && uint32(x) < uint32(y)) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (CLIJ {c} (MOVDconst [x]) [y] yes no) + // cond: c&s390x.Greater == 0 && uint32(x) > uint32(y) + // result: (First no yes) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := auxIntToUint8(b.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(c&s390x.Greater == 0 && uint32(x) > uint32(y)) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (CLIJ {s390x.GreaterOrEqual} _ [0] yes no) + // result: (First yes no) + for { + if auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.GreaterOrEqual { + break + } + b.Reset(BlockFirst) + return true + } + // match: (CLIJ {s390x.Less} _ [0] yes no) + // result: (First no yes) + for { + if auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Less { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockS390XCLRJ: + // match: (CLRJ {c} x (MOVDconst [y]) yes no) + // cond: isU8Bit(y) + // result: (CLIJ {c} x [uint8(y)] yes no) + for b.Controls[1].Op == OpS390XMOVDconst { + x := b.Controls[0] + v_1 := b.Controls[1] + y := auxIntToInt64(v_1.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(isU8Bit(y)) { + break + } + b.resetWithControl(BlockS390XCLIJ, x) + b.AuxInt = uint8ToAuxInt(uint8(y)) + b.Aux = s390xCCMaskToAux(c) + return true + } + // match: (CLRJ {c} (MOVDconst [x]) y yes no) + // cond: isU8Bit(x) + // result: (CLIJ {c.ReverseComparison()} y [uint8(x)] yes no) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := b.Controls[1] + c := auxToS390xCCMask(b.Aux) + if !(isU8Bit(x)) { + break + } + b.resetWithControl(BlockS390XCLIJ, y) + b.AuxInt = uint8ToAuxInt(uint8(x)) + b.Aux = s390xCCMaskToAux(c.ReverseComparison()) + return true + } + // match: (CLRJ {c} x (MOVDconst [y]) yes no) + // cond: !isU8Bit(y) && isU32Bit(y) + // result: (BRC {c} (CMPWUconst x [int32(y)]) yes no) + for b.Controls[1].Op == OpS390XMOVDconst { + x := b.Controls[0] + v_1 := b.Controls[1] + y := auxIntToInt64(v_1.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(!isU8Bit(y) && isU32Bit(y)) { + break + } + v0 := b.NewValue0(x.Pos, OpS390XCMPWUconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(y)) + v0.AddArg(x) + b.resetWithControl(BlockS390XBRC, v0) + b.Aux = s390xCCMaskToAux(c) + return true + } + // match: (CLRJ {c} (MOVDconst [x]) y yes no) + // cond: !isU8Bit(x) && isU32Bit(x) + // result: (BRC {c.ReverseComparison()} (CMPWUconst y [int32(x)]) yes no) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := b.Controls[1] + c := auxToS390xCCMask(b.Aux) + if !(!isU8Bit(x) && isU32Bit(x)) { + break + } + v0 := b.NewValue0(v_0.Pos, OpS390XCMPWUconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(x)) + v0.AddArg(y) + b.resetWithControl(BlockS390XBRC, v0) + b.Aux = s390xCCMaskToAux(c.ReverseComparison()) + return true + } + // match: (CLRJ {c} x y yes no) + // cond: x == y && c&s390x.Equal != 0 + // result: (First yes no) + for { + x := b.Controls[0] + y := b.Controls[1] + c := auxToS390xCCMask(b.Aux) + if !(x == y && c&s390x.Equal != 0) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (CLRJ {c} x y yes no) + // cond: x == y && c&s390x.Equal == 0 + // result: (First no yes) + for { + x := b.Controls[0] + y := b.Controls[1] + c := auxToS390xCCMask(b.Aux) + if !(x == y && c&s390x.Equal == 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockS390XCRJ: + // match: (CRJ {c} x (MOVDconst [y]) yes no) + // cond: is8Bit(y) + // result: (CIJ {c} x [ int8(y)] yes no) + for b.Controls[1].Op == OpS390XMOVDconst { + x := b.Controls[0] + v_1 := b.Controls[1] + y := auxIntToInt64(v_1.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(is8Bit(y)) { + break + } + b.resetWithControl(BlockS390XCIJ, x) + b.AuxInt = int8ToAuxInt(int8(y)) + b.Aux = s390xCCMaskToAux(c) + return true + } + // match: (CRJ {c} (MOVDconst [x]) y yes no) + // cond: is8Bit(x) + // result: (CIJ {c.ReverseComparison()} y [ int8(x)] yes no) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := b.Controls[1] + c := auxToS390xCCMask(b.Aux) + if !(is8Bit(x)) { + break + } + b.resetWithControl(BlockS390XCIJ, y) + b.AuxInt = int8ToAuxInt(int8(x)) + b.Aux = s390xCCMaskToAux(c.ReverseComparison()) + return true + } + // match: (CRJ {c} x (MOVDconst [y]) yes no) + // cond: !is8Bit(y) && is32Bit(y) + // result: (BRC {c} (CMPWconst x [int32(y)]) yes no) + for b.Controls[1].Op == OpS390XMOVDconst { + x := b.Controls[0] + v_1 := b.Controls[1] + y := auxIntToInt64(v_1.AuxInt) + c := auxToS390xCCMask(b.Aux) + if !(!is8Bit(y) && is32Bit(y)) { + break + } + v0 := b.NewValue0(x.Pos, OpS390XCMPWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(y)) + v0.AddArg(x) + b.resetWithControl(BlockS390XBRC, v0) + b.Aux = s390xCCMaskToAux(c) + return true + } + // match: (CRJ {c} (MOVDconst [x]) y yes no) + // cond: !is8Bit(x) && is32Bit(x) + // result: (BRC {c.ReverseComparison()} (CMPWconst y [int32(x)]) yes no) + for b.Controls[0].Op == OpS390XMOVDconst { + v_0 := b.Controls[0] + x := auxIntToInt64(v_0.AuxInt) + y := b.Controls[1] + c := auxToS390xCCMask(b.Aux) + if !(!is8Bit(x) && is32Bit(x)) { + break + } + v0 := b.NewValue0(v_0.Pos, OpS390XCMPWconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(int32(x)) + v0.AddArg(y) + b.resetWithControl(BlockS390XBRC, v0) + b.Aux = s390xCCMaskToAux(c.ReverseComparison()) + return true + } + // match: (CRJ {c} x y yes no) + // cond: x == y && c&s390x.Equal != 0 + // result: (First yes no) + for { + x := b.Controls[0] + y := b.Controls[1] + c := auxToS390xCCMask(b.Aux) + if !(x == y && c&s390x.Equal != 0) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (CRJ {c} x y yes no) + // cond: x == y && c&s390x.Equal == 0 + // result: (First no yes) + for { + x := b.Controls[0] + y := b.Controls[1] + c := auxToS390xCCMask(b.Aux) + if !(x == y && c&s390x.Equal == 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockIf: + // match: (If cond yes no) + // result: (CLIJ {s390x.LessOrGreater} (MOVBZreg cond) [0] yes no) + for { + cond := b.Controls[0] + v0 := b.NewValue0(cond.Pos, OpS390XMOVBZreg, typ.Bool) + v0.AddArg(cond) + b.resetWithControl(BlockS390XCLIJ, v0) + b.AuxInt = uint8ToAuxInt(0) + b.Aux = s390xCCMaskToAux(s390x.LessOrGreater) + return true + } + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteWasm.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteWasm.go new file mode 100644 index 0000000000000000000000000000000000000000..6f83aea13afc5a656c5f98136cae6a0f7ce2b2b1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewriteWasm.go @@ -0,0 +1,4877 @@ +// Code generated from _gen/Wasm.rules using 'go generate'; DO NOT EDIT. + +package ssa + +import "internal/buildcfg" +import "math" +import "cmd/compile/internal/types" + +func rewriteValueWasm(v *Value) bool { + switch v.Op { + case OpAbs: + v.Op = OpWasmF64Abs + return true + case OpAdd16: + v.Op = OpWasmI64Add + return true + case OpAdd32: + v.Op = OpWasmI64Add + return true + case OpAdd32F: + v.Op = OpWasmF32Add + return true + case OpAdd64: + v.Op = OpWasmI64Add + return true + case OpAdd64F: + v.Op = OpWasmF64Add + return true + case OpAdd8: + v.Op = OpWasmI64Add + return true + case OpAddPtr: + v.Op = OpWasmI64Add + return true + case OpAddr: + return rewriteValueWasm_OpAddr(v) + case OpAnd16: + v.Op = OpWasmI64And + return true + case OpAnd32: + v.Op = OpWasmI64And + return true + case OpAnd64: + v.Op = OpWasmI64And + return true + case OpAnd8: + v.Op = OpWasmI64And + return true + case OpAndB: + v.Op = OpWasmI64And + return true + case OpBitLen64: + return rewriteValueWasm_OpBitLen64(v) + case OpCeil: + v.Op = OpWasmF64Ceil + return true + case OpClosureCall: + v.Op = OpWasmLoweredClosureCall + return true + case OpCom16: + return rewriteValueWasm_OpCom16(v) + case OpCom32: + return rewriteValueWasm_OpCom32(v) + case OpCom64: + return rewriteValueWasm_OpCom64(v) + case OpCom8: + return rewriteValueWasm_OpCom8(v) + case OpCondSelect: + v.Op = OpWasmSelect + return true + case OpConst16: + return rewriteValueWasm_OpConst16(v) + case OpConst32: + return rewriteValueWasm_OpConst32(v) + case OpConst32F: + v.Op = OpWasmF32Const + return true + case OpConst64: + v.Op = OpWasmI64Const + return true + case OpConst64F: + v.Op = OpWasmF64Const + return true + case OpConst8: + return rewriteValueWasm_OpConst8(v) + case OpConstBool: + return rewriteValueWasm_OpConstBool(v) + case OpConstNil: + return rewriteValueWasm_OpConstNil(v) + case OpConvert: + v.Op = OpWasmLoweredConvert + return true + case OpCopysign: + v.Op = OpWasmF64Copysign + return true + case OpCtz16: + return rewriteValueWasm_OpCtz16(v) + case OpCtz16NonZero: + v.Op = OpWasmI64Ctz + return true + case OpCtz32: + return rewriteValueWasm_OpCtz32(v) + case OpCtz32NonZero: + v.Op = OpWasmI64Ctz + return true + case OpCtz64: + v.Op = OpWasmI64Ctz + return true + case OpCtz64NonZero: + v.Op = OpWasmI64Ctz + return true + case OpCtz8: + return rewriteValueWasm_OpCtz8(v) + case OpCtz8NonZero: + v.Op = OpWasmI64Ctz + return true + case OpCvt32Fto32: + v.Op = OpWasmI64TruncSatF32S + return true + case OpCvt32Fto32U: + v.Op = OpWasmI64TruncSatF32U + return true + case OpCvt32Fto64: + v.Op = OpWasmI64TruncSatF32S + return true + case OpCvt32Fto64F: + v.Op = OpWasmF64PromoteF32 + return true + case OpCvt32Fto64U: + v.Op = OpWasmI64TruncSatF32U + return true + case OpCvt32Uto32F: + return rewriteValueWasm_OpCvt32Uto32F(v) + case OpCvt32Uto64F: + return rewriteValueWasm_OpCvt32Uto64F(v) + case OpCvt32to32F: + return rewriteValueWasm_OpCvt32to32F(v) + case OpCvt32to64F: + return rewriteValueWasm_OpCvt32to64F(v) + case OpCvt64Fto32: + v.Op = OpWasmI64TruncSatF64S + return true + case OpCvt64Fto32F: + v.Op = OpWasmF32DemoteF64 + return true + case OpCvt64Fto32U: + v.Op = OpWasmI64TruncSatF64U + return true + case OpCvt64Fto64: + v.Op = OpWasmI64TruncSatF64S + return true + case OpCvt64Fto64U: + v.Op = OpWasmI64TruncSatF64U + return true + case OpCvt64Uto32F: + v.Op = OpWasmF32ConvertI64U + return true + case OpCvt64Uto64F: + v.Op = OpWasmF64ConvertI64U + return true + case OpCvt64to32F: + v.Op = OpWasmF32ConvertI64S + return true + case OpCvt64to64F: + v.Op = OpWasmF64ConvertI64S + return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true + case OpDiv16: + return rewriteValueWasm_OpDiv16(v) + case OpDiv16u: + return rewriteValueWasm_OpDiv16u(v) + case OpDiv32: + return rewriteValueWasm_OpDiv32(v) + case OpDiv32F: + v.Op = OpWasmF32Div + return true + case OpDiv32u: + return rewriteValueWasm_OpDiv32u(v) + case OpDiv64: + return rewriteValueWasm_OpDiv64(v) + case OpDiv64F: + v.Op = OpWasmF64Div + return true + case OpDiv64u: + v.Op = OpWasmI64DivU + return true + case OpDiv8: + return rewriteValueWasm_OpDiv8(v) + case OpDiv8u: + return rewriteValueWasm_OpDiv8u(v) + case OpEq16: + return rewriteValueWasm_OpEq16(v) + case OpEq32: + return rewriteValueWasm_OpEq32(v) + case OpEq32F: + v.Op = OpWasmF32Eq + return true + case OpEq64: + v.Op = OpWasmI64Eq + return true + case OpEq64F: + v.Op = OpWasmF64Eq + return true + case OpEq8: + return rewriteValueWasm_OpEq8(v) + case OpEqB: + v.Op = OpWasmI64Eq + return true + case OpEqPtr: + v.Op = OpWasmI64Eq + return true + case OpFloor: + v.Op = OpWasmF64Floor + return true + case OpGetCallerPC: + v.Op = OpWasmLoweredGetCallerPC + return true + case OpGetCallerSP: + v.Op = OpWasmLoweredGetCallerSP + return true + case OpGetClosurePtr: + v.Op = OpWasmLoweredGetClosurePtr + return true + case OpInterCall: + v.Op = OpWasmLoweredInterCall + return true + case OpIsInBounds: + v.Op = OpWasmI64LtU + return true + case OpIsNonNil: + return rewriteValueWasm_OpIsNonNil(v) + case OpIsSliceInBounds: + v.Op = OpWasmI64LeU + return true + case OpLeq16: + return rewriteValueWasm_OpLeq16(v) + case OpLeq16U: + return rewriteValueWasm_OpLeq16U(v) + case OpLeq32: + return rewriteValueWasm_OpLeq32(v) + case OpLeq32F: + v.Op = OpWasmF32Le + return true + case OpLeq32U: + return rewriteValueWasm_OpLeq32U(v) + case OpLeq64: + v.Op = OpWasmI64LeS + return true + case OpLeq64F: + v.Op = OpWasmF64Le + return true + case OpLeq64U: + v.Op = OpWasmI64LeU + return true + case OpLeq8: + return rewriteValueWasm_OpLeq8(v) + case OpLeq8U: + return rewriteValueWasm_OpLeq8U(v) + case OpLess16: + return rewriteValueWasm_OpLess16(v) + case OpLess16U: + return rewriteValueWasm_OpLess16U(v) + case OpLess32: + return rewriteValueWasm_OpLess32(v) + case OpLess32F: + v.Op = OpWasmF32Lt + return true + case OpLess32U: + return rewriteValueWasm_OpLess32U(v) + case OpLess64: + v.Op = OpWasmI64LtS + return true + case OpLess64F: + v.Op = OpWasmF64Lt + return true + case OpLess64U: + v.Op = OpWasmI64LtU + return true + case OpLess8: + return rewriteValueWasm_OpLess8(v) + case OpLess8U: + return rewriteValueWasm_OpLess8U(v) + case OpLoad: + return rewriteValueWasm_OpLoad(v) + case OpLocalAddr: + return rewriteValueWasm_OpLocalAddr(v) + case OpLsh16x16: + return rewriteValueWasm_OpLsh16x16(v) + case OpLsh16x32: + return rewriteValueWasm_OpLsh16x32(v) + case OpLsh16x64: + v.Op = OpLsh64x64 + return true + case OpLsh16x8: + return rewriteValueWasm_OpLsh16x8(v) + case OpLsh32x16: + return rewriteValueWasm_OpLsh32x16(v) + case OpLsh32x32: + return rewriteValueWasm_OpLsh32x32(v) + case OpLsh32x64: + v.Op = OpLsh64x64 + return true + case OpLsh32x8: + return rewriteValueWasm_OpLsh32x8(v) + case OpLsh64x16: + return rewriteValueWasm_OpLsh64x16(v) + case OpLsh64x32: + return rewriteValueWasm_OpLsh64x32(v) + case OpLsh64x64: + return rewriteValueWasm_OpLsh64x64(v) + case OpLsh64x8: + return rewriteValueWasm_OpLsh64x8(v) + case OpLsh8x16: + return rewriteValueWasm_OpLsh8x16(v) + case OpLsh8x32: + return rewriteValueWasm_OpLsh8x32(v) + case OpLsh8x64: + v.Op = OpLsh64x64 + return true + case OpLsh8x8: + return rewriteValueWasm_OpLsh8x8(v) + case OpMod16: + return rewriteValueWasm_OpMod16(v) + case OpMod16u: + return rewriteValueWasm_OpMod16u(v) + case OpMod32: + return rewriteValueWasm_OpMod32(v) + case OpMod32u: + return rewriteValueWasm_OpMod32u(v) + case OpMod64: + return rewriteValueWasm_OpMod64(v) + case OpMod64u: + v.Op = OpWasmI64RemU + return true + case OpMod8: + return rewriteValueWasm_OpMod8(v) + case OpMod8u: + return rewriteValueWasm_OpMod8u(v) + case OpMove: + return rewriteValueWasm_OpMove(v) + case OpMul16: + v.Op = OpWasmI64Mul + return true + case OpMul32: + v.Op = OpWasmI64Mul + return true + case OpMul32F: + v.Op = OpWasmF32Mul + return true + case OpMul64: + v.Op = OpWasmI64Mul + return true + case OpMul64F: + v.Op = OpWasmF64Mul + return true + case OpMul8: + v.Op = OpWasmI64Mul + return true + case OpNeg16: + return rewriteValueWasm_OpNeg16(v) + case OpNeg32: + return rewriteValueWasm_OpNeg32(v) + case OpNeg32F: + v.Op = OpWasmF32Neg + return true + case OpNeg64: + return rewriteValueWasm_OpNeg64(v) + case OpNeg64F: + v.Op = OpWasmF64Neg + return true + case OpNeg8: + return rewriteValueWasm_OpNeg8(v) + case OpNeq16: + return rewriteValueWasm_OpNeq16(v) + case OpNeq32: + return rewriteValueWasm_OpNeq32(v) + case OpNeq32F: + v.Op = OpWasmF32Ne + return true + case OpNeq64: + v.Op = OpWasmI64Ne + return true + case OpNeq64F: + v.Op = OpWasmF64Ne + return true + case OpNeq8: + return rewriteValueWasm_OpNeq8(v) + case OpNeqB: + v.Op = OpWasmI64Ne + return true + case OpNeqPtr: + v.Op = OpWasmI64Ne + return true + case OpNilCheck: + v.Op = OpWasmLoweredNilCheck + return true + case OpNot: + v.Op = OpWasmI64Eqz + return true + case OpOffPtr: + v.Op = OpWasmI64AddConst + return true + case OpOr16: + v.Op = OpWasmI64Or + return true + case OpOr32: + v.Op = OpWasmI64Or + return true + case OpOr64: + v.Op = OpWasmI64Or + return true + case OpOr8: + v.Op = OpWasmI64Or + return true + case OpOrB: + v.Op = OpWasmI64Or + return true + case OpPopCount16: + return rewriteValueWasm_OpPopCount16(v) + case OpPopCount32: + return rewriteValueWasm_OpPopCount32(v) + case OpPopCount64: + v.Op = OpWasmI64Popcnt + return true + case OpPopCount8: + return rewriteValueWasm_OpPopCount8(v) + case OpRotateLeft16: + return rewriteValueWasm_OpRotateLeft16(v) + case OpRotateLeft32: + v.Op = OpWasmI32Rotl + return true + case OpRotateLeft64: + v.Op = OpWasmI64Rotl + return true + case OpRotateLeft8: + return rewriteValueWasm_OpRotateLeft8(v) + case OpRound32F: + v.Op = OpCopy + return true + case OpRound64F: + v.Op = OpCopy + return true + case OpRoundToEven: + v.Op = OpWasmF64Nearest + return true + case OpRsh16Ux16: + return rewriteValueWasm_OpRsh16Ux16(v) + case OpRsh16Ux32: + return rewriteValueWasm_OpRsh16Ux32(v) + case OpRsh16Ux64: + return rewriteValueWasm_OpRsh16Ux64(v) + case OpRsh16Ux8: + return rewriteValueWasm_OpRsh16Ux8(v) + case OpRsh16x16: + return rewriteValueWasm_OpRsh16x16(v) + case OpRsh16x32: + return rewriteValueWasm_OpRsh16x32(v) + case OpRsh16x64: + return rewriteValueWasm_OpRsh16x64(v) + case OpRsh16x8: + return rewriteValueWasm_OpRsh16x8(v) + case OpRsh32Ux16: + return rewriteValueWasm_OpRsh32Ux16(v) + case OpRsh32Ux32: + return rewriteValueWasm_OpRsh32Ux32(v) + case OpRsh32Ux64: + return rewriteValueWasm_OpRsh32Ux64(v) + case OpRsh32Ux8: + return rewriteValueWasm_OpRsh32Ux8(v) + case OpRsh32x16: + return rewriteValueWasm_OpRsh32x16(v) + case OpRsh32x32: + return rewriteValueWasm_OpRsh32x32(v) + case OpRsh32x64: + return rewriteValueWasm_OpRsh32x64(v) + case OpRsh32x8: + return rewriteValueWasm_OpRsh32x8(v) + case OpRsh64Ux16: + return rewriteValueWasm_OpRsh64Ux16(v) + case OpRsh64Ux32: + return rewriteValueWasm_OpRsh64Ux32(v) + case OpRsh64Ux64: + return rewriteValueWasm_OpRsh64Ux64(v) + case OpRsh64Ux8: + return rewriteValueWasm_OpRsh64Ux8(v) + case OpRsh64x16: + return rewriteValueWasm_OpRsh64x16(v) + case OpRsh64x32: + return rewriteValueWasm_OpRsh64x32(v) + case OpRsh64x64: + return rewriteValueWasm_OpRsh64x64(v) + case OpRsh64x8: + return rewriteValueWasm_OpRsh64x8(v) + case OpRsh8Ux16: + return rewriteValueWasm_OpRsh8Ux16(v) + case OpRsh8Ux32: + return rewriteValueWasm_OpRsh8Ux32(v) + case OpRsh8Ux64: + return rewriteValueWasm_OpRsh8Ux64(v) + case OpRsh8Ux8: + return rewriteValueWasm_OpRsh8Ux8(v) + case OpRsh8x16: + return rewriteValueWasm_OpRsh8x16(v) + case OpRsh8x32: + return rewriteValueWasm_OpRsh8x32(v) + case OpRsh8x64: + return rewriteValueWasm_OpRsh8x64(v) + case OpRsh8x8: + return rewriteValueWasm_OpRsh8x8(v) + case OpSignExt16to32: + return rewriteValueWasm_OpSignExt16to32(v) + case OpSignExt16to64: + return rewriteValueWasm_OpSignExt16to64(v) + case OpSignExt32to64: + return rewriteValueWasm_OpSignExt32to64(v) + case OpSignExt8to16: + return rewriteValueWasm_OpSignExt8to16(v) + case OpSignExt8to32: + return rewriteValueWasm_OpSignExt8to32(v) + case OpSignExt8to64: + return rewriteValueWasm_OpSignExt8to64(v) + case OpSlicemask: + return rewriteValueWasm_OpSlicemask(v) + case OpSqrt: + v.Op = OpWasmF64Sqrt + return true + case OpSqrt32: + v.Op = OpWasmF32Sqrt + return true + case OpStaticCall: + v.Op = OpWasmLoweredStaticCall + return true + case OpStore: + return rewriteValueWasm_OpStore(v) + case OpSub16: + v.Op = OpWasmI64Sub + return true + case OpSub32: + v.Op = OpWasmI64Sub + return true + case OpSub32F: + v.Op = OpWasmF32Sub + return true + case OpSub64: + v.Op = OpWasmI64Sub + return true + case OpSub64F: + v.Op = OpWasmF64Sub + return true + case OpSub8: + v.Op = OpWasmI64Sub + return true + case OpSubPtr: + v.Op = OpWasmI64Sub + return true + case OpTailCall: + v.Op = OpWasmLoweredTailCall + return true + case OpTrunc: + v.Op = OpWasmF64Trunc + return true + case OpTrunc16to8: + v.Op = OpCopy + return true + case OpTrunc32to16: + v.Op = OpCopy + return true + case OpTrunc32to8: + v.Op = OpCopy + return true + case OpTrunc64to16: + v.Op = OpCopy + return true + case OpTrunc64to32: + v.Op = OpCopy + return true + case OpTrunc64to8: + v.Op = OpCopy + return true + case OpWB: + v.Op = OpWasmLoweredWB + return true + case OpWasmF64Add: + return rewriteValueWasm_OpWasmF64Add(v) + case OpWasmF64Mul: + return rewriteValueWasm_OpWasmF64Mul(v) + case OpWasmI64Add: + return rewriteValueWasm_OpWasmI64Add(v) + case OpWasmI64AddConst: + return rewriteValueWasm_OpWasmI64AddConst(v) + case OpWasmI64And: + return rewriteValueWasm_OpWasmI64And(v) + case OpWasmI64Eq: + return rewriteValueWasm_OpWasmI64Eq(v) + case OpWasmI64Eqz: + return rewriteValueWasm_OpWasmI64Eqz(v) + case OpWasmI64LeU: + return rewriteValueWasm_OpWasmI64LeU(v) + case OpWasmI64Load: + return rewriteValueWasm_OpWasmI64Load(v) + case OpWasmI64Load16S: + return rewriteValueWasm_OpWasmI64Load16S(v) + case OpWasmI64Load16U: + return rewriteValueWasm_OpWasmI64Load16U(v) + case OpWasmI64Load32S: + return rewriteValueWasm_OpWasmI64Load32S(v) + case OpWasmI64Load32U: + return rewriteValueWasm_OpWasmI64Load32U(v) + case OpWasmI64Load8S: + return rewriteValueWasm_OpWasmI64Load8S(v) + case OpWasmI64Load8U: + return rewriteValueWasm_OpWasmI64Load8U(v) + case OpWasmI64LtU: + return rewriteValueWasm_OpWasmI64LtU(v) + case OpWasmI64Mul: + return rewriteValueWasm_OpWasmI64Mul(v) + case OpWasmI64Ne: + return rewriteValueWasm_OpWasmI64Ne(v) + case OpWasmI64Or: + return rewriteValueWasm_OpWasmI64Or(v) + case OpWasmI64Shl: + return rewriteValueWasm_OpWasmI64Shl(v) + case OpWasmI64ShrS: + return rewriteValueWasm_OpWasmI64ShrS(v) + case OpWasmI64ShrU: + return rewriteValueWasm_OpWasmI64ShrU(v) + case OpWasmI64Store: + return rewriteValueWasm_OpWasmI64Store(v) + case OpWasmI64Store16: + return rewriteValueWasm_OpWasmI64Store16(v) + case OpWasmI64Store32: + return rewriteValueWasm_OpWasmI64Store32(v) + case OpWasmI64Store8: + return rewriteValueWasm_OpWasmI64Store8(v) + case OpWasmI64Xor: + return rewriteValueWasm_OpWasmI64Xor(v) + case OpXor16: + v.Op = OpWasmI64Xor + return true + case OpXor32: + v.Op = OpWasmI64Xor + return true + case OpXor64: + v.Op = OpWasmI64Xor + return true + case OpXor8: + v.Op = OpWasmI64Xor + return true + case OpZero: + return rewriteValueWasm_OpZero(v) + case OpZeroExt16to32: + return rewriteValueWasm_OpZeroExt16to32(v) + case OpZeroExt16to64: + return rewriteValueWasm_OpZeroExt16to64(v) + case OpZeroExt32to64: + return rewriteValueWasm_OpZeroExt32to64(v) + case OpZeroExt8to16: + return rewriteValueWasm_OpZeroExt8to16(v) + case OpZeroExt8to32: + return rewriteValueWasm_OpZeroExt8to32(v) + case OpZeroExt8to64: + return rewriteValueWasm_OpZeroExt8to64(v) + } + return false +} +func rewriteValueWasm_OpAddr(v *Value) bool { + v_0 := v.Args[0] + // match: (Addr {sym} base) + // result: (LoweredAddr {sym} [0] base) + for { + sym := auxToSym(v.Aux) + base := v_0 + v.reset(OpWasmLoweredAddr) + v.AuxInt = int32ToAuxInt(0) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } +} +func rewriteValueWasm_OpBitLen64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen64 x) + // result: (I64Sub (I64Const [64]) (I64Clz x)) + for { + x := v_0 + v.reset(OpWasmI64Sub) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(64) + v1 := b.NewValue0(v.Pos, OpWasmI64Clz, typ.Int64) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpCom16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Com16 x) + // result: (I64Xor x (I64Const [-1])) + for { + x := v_0 + v.reset(OpWasmI64Xor) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(-1) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueWasm_OpCom32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Com32 x) + // result: (I64Xor x (I64Const [-1])) + for { + x := v_0 + v.reset(OpWasmI64Xor) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(-1) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueWasm_OpCom64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Com64 x) + // result: (I64Xor x (I64Const [-1])) + for { + x := v_0 + v.reset(OpWasmI64Xor) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(-1) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueWasm_OpCom8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Com8 x) + // result: (I64Xor x (I64Const [-1])) + for { + x := v_0 + v.reset(OpWasmI64Xor) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(-1) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueWasm_OpConst16(v *Value) bool { + // match: (Const16 [c]) + // result: (I64Const [int64(c)]) + for { + c := auxIntToInt16(v.AuxInt) + v.reset(OpWasmI64Const) + v.AuxInt = int64ToAuxInt(int64(c)) + return true + } +} +func rewriteValueWasm_OpConst32(v *Value) bool { + // match: (Const32 [c]) + // result: (I64Const [int64(c)]) + for { + c := auxIntToInt32(v.AuxInt) + v.reset(OpWasmI64Const) + v.AuxInt = int64ToAuxInt(int64(c)) + return true + } +} +func rewriteValueWasm_OpConst8(v *Value) bool { + // match: (Const8 [c]) + // result: (I64Const [int64(c)]) + for { + c := auxIntToInt8(v.AuxInt) + v.reset(OpWasmI64Const) + v.AuxInt = int64ToAuxInt(int64(c)) + return true + } +} +func rewriteValueWasm_OpConstBool(v *Value) bool { + // match: (ConstBool [c]) + // result: (I64Const [b2i(c)]) + for { + c := auxIntToBool(v.AuxInt) + v.reset(OpWasmI64Const) + v.AuxInt = int64ToAuxInt(b2i(c)) + return true + } +} +func rewriteValueWasm_OpConstNil(v *Value) bool { + // match: (ConstNil) + // result: (I64Const [0]) + for { + v.reset(OpWasmI64Const) + v.AuxInt = int64ToAuxInt(0) + return true + } +} +func rewriteValueWasm_OpCtz16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz16 x) + // result: (I64Ctz (I64Or x (I64Const [0x10000]))) + for { + x := v_0 + v.reset(OpWasmI64Ctz) + v0 := b.NewValue0(v.Pos, OpWasmI64Or, typ.Int64) + v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v1.AuxInt = int64ToAuxInt(0x10000) + v0.AddArg2(x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueWasm_OpCtz32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz32 x) + // result: (I64Ctz (I64Or x (I64Const [0x100000000]))) + for { + x := v_0 + v.reset(OpWasmI64Ctz) + v0 := b.NewValue0(v.Pos, OpWasmI64Or, typ.Int64) + v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v1.AuxInt = int64ToAuxInt(0x100000000) + v0.AddArg2(x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueWasm_OpCtz8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz8 x) + // result: (I64Ctz (I64Or x (I64Const [0x100]))) + for { + x := v_0 + v.reset(OpWasmI64Ctz) + v0 := b.NewValue0(v.Pos, OpWasmI64Or, typ.Int64) + v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v1.AuxInt = int64ToAuxInt(0x100) + v0.AddArg2(x, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueWasm_OpCvt32Uto32F(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Cvt32Uto32F x) + // result: (F32ConvertI64U (ZeroExt32to64 x)) + for { + x := v_0 + v.reset(OpWasmF32ConvertI64U) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueWasm_OpCvt32Uto64F(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Cvt32Uto64F x) + // result: (F64ConvertI64U (ZeroExt32to64 x)) + for { + x := v_0 + v.reset(OpWasmF64ConvertI64U) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueWasm_OpCvt32to32F(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Cvt32to32F x) + // result: (F32ConvertI64S (SignExt32to64 x)) + for { + x := v_0 + v.reset(OpWasmF32ConvertI64S) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueWasm_OpCvt32to64F(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Cvt32to64F x) + // result: (F64ConvertI64S (SignExt32to64 x)) + for { + x := v_0 + v.reset(OpWasmF64ConvertI64S) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueWasm_OpDiv16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16 [false] x y) + // result: (I64DivS (SignExt16to64 x) (SignExt16to64 y)) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + y := v_1 + v.reset(OpWasmI64DivS) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } + return false +} +func rewriteValueWasm_OpDiv16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16u x y) + // result: (I64DivU (ZeroExt16to64 x) (ZeroExt16to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64DivU) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpDiv32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div32 [false] x y) + // result: (I64DivS (SignExt32to64 x) (SignExt32to64 y)) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + y := v_1 + v.reset(OpWasmI64DivS) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } + return false +} +func rewriteValueWasm_OpDiv32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div32u x y) + // result: (I64DivU (ZeroExt32to64 x) (ZeroExt32to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64DivU) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpDiv64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Div64 [false] x y) + // result: (I64DivS x y) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + y := v_1 + v.reset(OpWasmI64DivS) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueWasm_OpDiv8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8 x y) + // result: (I64DivS (SignExt8to64 x) (SignExt8to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64DivS) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpDiv8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8u x y) + // result: (I64DivU (ZeroExt8to64 x) (ZeroExt8to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64DivU) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpEq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq16 x y) + // result: (I64Eq (ZeroExt16to64 x) (ZeroExt16to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64Eq) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpEq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq32 x y) + // result: (I64Eq (ZeroExt32to64 x) (ZeroExt32to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64Eq) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpEq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq8 x y) + // result: (I64Eq (ZeroExt8to64 x) (ZeroExt8to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64Eq) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpIsNonNil(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (IsNonNil p) + // result: (I64Eqz (I64Eqz p)) + for { + p := v_0 + v.reset(OpWasmI64Eqz) + v0 := b.NewValue0(v.Pos, OpWasmI64Eqz, typ.Bool) + v0.AddArg(p) + v.AddArg(v0) + return true + } +} +func rewriteValueWasm_OpLeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq16 x y) + // result: (I64LeS (SignExt16to64 x) (SignExt16to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64LeS) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpLeq16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq16U x y) + // result: (I64LeU (ZeroExt16to64 x) (ZeroExt16to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64LeU) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpLeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq32 x y) + // result: (I64LeS (SignExt32to64 x) (SignExt32to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64LeS) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpLeq32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq32U x y) + // result: (I64LeU (ZeroExt32to64 x) (ZeroExt32to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64LeU) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpLeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq8 x y) + // result: (I64LeS (SignExt8to64 x) (SignExt8to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64LeS) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpLeq8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq8U x y) + // result: (I64LeU (ZeroExt8to64 x) (ZeroExt8to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64LeU) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpLess16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less16 x y) + // result: (I64LtS (SignExt16to64 x) (SignExt16to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64LtS) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpLess16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less16U x y) + // result: (I64LtU (ZeroExt16to64 x) (ZeroExt16to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64LtU) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpLess32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less32 x y) + // result: (I64LtS (SignExt32to64 x) (SignExt32to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64LtS) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpLess32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less32U x y) + // result: (I64LtU (ZeroExt32to64 x) (ZeroExt32to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64LtU) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpLess8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less8 x y) + // result: (I64LtS (SignExt8to64 x) (SignExt8to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64LtS) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpLess8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less8U x y) + // result: (I64LtU (ZeroExt8to64 x) (ZeroExt8to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64LtU) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpLoad(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Load ptr mem) + // cond: is32BitFloat(t) + // result: (F32Load ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitFloat(t)) { + break + } + v.reset(OpWasmF32Load) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is64BitFloat(t) + // result: (F64Load ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitFloat(t)) { + break + } + v.reset(OpWasmF64Load) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: t.Size() == 8 + // result: (I64Load ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 8) { + break + } + v.reset(OpWasmI64Load) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: t.Size() == 4 && !t.IsSigned() + // result: (I64Load32U ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 4 && !t.IsSigned()) { + break + } + v.reset(OpWasmI64Load32U) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: t.Size() == 4 && t.IsSigned() + // result: (I64Load32S ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 4 && t.IsSigned()) { + break + } + v.reset(OpWasmI64Load32S) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: t.Size() == 2 && !t.IsSigned() + // result: (I64Load16U ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 2 && !t.IsSigned()) { + break + } + v.reset(OpWasmI64Load16U) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: t.Size() == 2 && t.IsSigned() + // result: (I64Load16S ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 2 && t.IsSigned()) { + break + } + v.reset(OpWasmI64Load16S) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: t.Size() == 1 && !t.IsSigned() + // result: (I64Load8U ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 1 && !t.IsSigned()) { + break + } + v.reset(OpWasmI64Load8U) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: t.Size() == 1 && t.IsSigned() + // result: (I64Load8S ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 1 && t.IsSigned()) { + break + } + v.reset(OpWasmI64Load8S) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueWasm_OpLocalAddr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (LocalAddr {sym} base mem) + // cond: t.Elem().HasPointers() + // result: (LoweredAddr {sym} (SPanchored base mem)) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + mem := v_1 + if !(t.Elem().HasPointers()) { + break + } + v.reset(OpWasmLoweredAddr) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) + v0.AddArg2(base, mem) + v.AddArg(v0) + return true + } + // match: (LocalAddr {sym} base _) + // cond: !t.Elem().HasPointers() + // result: (LoweredAddr {sym} base) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + if !(!t.Elem().HasPointers()) { + break + } + v.reset(OpWasmLoweredAddr) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } + return false +} +func rewriteValueWasm_OpLsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x16 [c] x y) + // result: (Lsh64x64 [c] x (ZeroExt16to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpLsh64x64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueWasm_OpLsh16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x32 [c] x y) + // result: (Lsh64x64 [c] x (ZeroExt32to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpLsh64x64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueWasm_OpLsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x8 [c] x y) + // result: (Lsh64x64 [c] x (ZeroExt8to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpLsh64x64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueWasm_OpLsh32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x16 [c] x y) + // result: (Lsh64x64 [c] x (ZeroExt16to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpLsh64x64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueWasm_OpLsh32x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x32 [c] x y) + // result: (Lsh64x64 [c] x (ZeroExt32to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpLsh64x64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueWasm_OpLsh32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x8 [c] x y) + // result: (Lsh64x64 [c] x (ZeroExt8to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpLsh64x64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueWasm_OpLsh64x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x16 [c] x y) + // result: (Lsh64x64 [c] x (ZeroExt16to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpLsh64x64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueWasm_OpLsh64x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x32 [c] x y) + // result: (Lsh64x64 [c] x (ZeroExt32to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpLsh64x64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueWasm_OpLsh64x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x64 x y) + // cond: shiftIsBounded(v) + // result: (I64Shl x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpWasmI64Shl) + v.AddArg2(x, y) + return true + } + // match: (Lsh64x64 x (I64Const [c])) + // cond: uint64(c) < 64 + // result: (I64Shl x (I64Const [c])) + for { + x := v_0 + if v_1.Op != OpWasmI64Const { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 64) { + break + } + v.reset(OpWasmI64Shl) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(c) + v.AddArg2(x, v0) + return true + } + // match: (Lsh64x64 x (I64Const [c])) + // cond: uint64(c) >= 64 + // result: (I64Const [0]) + for { + if v_1.Op != OpWasmI64Const { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 64) { + break + } + v.reset(OpWasmI64Const) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (Lsh64x64 x y) + // result: (Select (I64Shl x y) (I64Const [0]) (I64LtU y (I64Const [64]))) + for { + x := v_0 + y := v_1 + v.reset(OpWasmSelect) + v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpWasmI64LtU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v3.AuxInt = int64ToAuxInt(64) + v2.AddArg2(y, v3) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueWasm_OpLsh64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x8 [c] x y) + // result: (Lsh64x64 [c] x (ZeroExt8to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpLsh64x64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueWasm_OpLsh8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x16 [c] x y) + // result: (Lsh64x64 [c] x (ZeroExt16to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpLsh64x64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueWasm_OpLsh8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x32 [c] x y) + // result: (Lsh64x64 [c] x (ZeroExt32to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpLsh64x64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueWasm_OpLsh8x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x8 [c] x y) + // result: (Lsh64x64 [c] x (ZeroExt8to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpLsh64x64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueWasm_OpMod16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod16 [false] x y) + // result: (I64RemS (SignExt16to64 x) (SignExt16to64 y)) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + y := v_1 + v.reset(OpWasmI64RemS) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } + return false +} +func rewriteValueWasm_OpMod16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod16u x y) + // result: (I64RemU (ZeroExt16to64 x) (ZeroExt16to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64RemU) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpMod32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod32 [false] x y) + // result: (I64RemS (SignExt32to64 x) (SignExt32to64 y)) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + y := v_1 + v.reset(OpWasmI64RemS) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } + return false +} +func rewriteValueWasm_OpMod32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod32u x y) + // result: (I64RemU (ZeroExt32to64 x) (ZeroExt32to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64RemU) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpMod64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Mod64 [false] x y) + // result: (I64RemS x y) + for { + if auxIntToBool(v.AuxInt) != false { + break + } + x := v_0 + y := v_1 + v.reset(OpWasmI64RemS) + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueWasm_OpMod8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8 x y) + // result: (I64RemS (SignExt8to64 x) (SignExt8to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64RemS) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpMod8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8u x y) + // result: (I64RemU (ZeroExt8to64 x) (ZeroExt8to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64RemU) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpMove(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Move [0] _ _ mem) + // result: mem + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_2 + v.copyOf(mem) + return true + } + // match: (Move [1] dst src mem) + // result: (I64Store8 dst (I64Load8U src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpWasmI64Store8) + v0 := b.NewValue0(v.Pos, OpWasmI64Load8U, typ.UInt8) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [2] dst src mem) + // result: (I64Store16 dst (I64Load16U src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpWasmI64Store16) + v0 := b.NewValue0(v.Pos, OpWasmI64Load16U, typ.UInt16) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [4] dst src mem) + // result: (I64Store32 dst (I64Load32U src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpWasmI64Store32) + v0 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [8] dst src mem) + // result: (I64Store dst (I64Load src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpWasmI64Store) + v0 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [16] dst src mem) + // result: (I64Store [8] dst (I64Load [8] src mem) (I64Store dst (I64Load src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 16 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpWasmI64Store) + v.AuxInt = int64ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64) + v0.AuxInt = int64ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [3] dst src mem) + // result: (I64Store8 [2] dst (I64Load8U [2] src mem) (I64Store16 dst (I64Load16U src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpWasmI64Store8) + v.AuxInt = int64ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpWasmI64Load8U, typ.UInt8) + v0.AuxInt = int64ToAuxInt(2) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpWasmI64Store16, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpWasmI64Load16U, typ.UInt16) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [5] dst src mem) + // result: (I64Store8 [4] dst (I64Load8U [4] src mem) (I64Store32 dst (I64Load32U src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 5 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpWasmI64Store8) + v.AuxInt = int64ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpWasmI64Load8U, typ.UInt8) + v0.AuxInt = int64ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [6] dst src mem) + // result: (I64Store16 [4] dst (I64Load16U [4] src mem) (I64Store32 dst (I64Load32U src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 6 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpWasmI64Store16) + v.AuxInt = int64ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpWasmI64Load16U, typ.UInt16) + v0.AuxInt = int64ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [7] dst src mem) + // result: (I64Store32 [3] dst (I64Load32U [3] src mem) (I64Store32 dst (I64Load32U src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 7 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpWasmI64Store32) + v.AuxInt = int64ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32) + v0.AuxInt = int64ToAuxInt(3) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [s] dst src mem) + // cond: s > 8 && s < 16 + // result: (I64Store [s-8] dst (I64Load [s-8] src mem) (I64Store dst (I64Load src mem) mem)) + for { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 8 && s < 16) { + break + } + v.reset(OpWasmI64Store) + v.AuxInt = int64ToAuxInt(s - 8) + v0 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64) + v0.AuxInt = int64ToAuxInt(s - 8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [s] dst src mem) + // cond: logLargeCopy(v, s) + // result: (LoweredMove [s] dst src mem) + for { + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(logLargeCopy(v, s)) { + break + } + v.reset(OpWasmLoweredMove) + v.AuxInt = int64ToAuxInt(s) + v.AddArg3(dst, src, mem) + return true + } + return false +} +func rewriteValueWasm_OpNeg16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neg16 x) + // result: (I64Sub (I64Const [0]) x) + for { + x := v_0 + v.reset(OpWasmI64Sub) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueWasm_OpNeg32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neg32 x) + // result: (I64Sub (I64Const [0]) x) + for { + x := v_0 + v.reset(OpWasmI64Sub) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueWasm_OpNeg64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neg64 x) + // result: (I64Sub (I64Const [0]) x) + for { + x := v_0 + v.reset(OpWasmI64Sub) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueWasm_OpNeg8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neg8 x) + // result: (I64Sub (I64Const [0]) x) + for { + x := v_0 + v.reset(OpWasmI64Sub) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueWasm_OpNeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq16 x y) + // result: (I64Ne (ZeroExt16to64 x) (ZeroExt16to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64Ne) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpNeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq32 x y) + // result: (I64Ne (ZeroExt32to64 x) (ZeroExt32to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64Ne) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpNeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq8 x y) + // result: (I64Ne (ZeroExt8to64 x) (ZeroExt8to64 y)) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64Ne) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpPopCount16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (PopCount16 x) + // result: (I64Popcnt (ZeroExt16to64 x)) + for { + x := v_0 + v.reset(OpWasmI64Popcnt) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueWasm_OpPopCount32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (PopCount32 x) + // result: (I64Popcnt (ZeroExt32to64 x)) + for { + x := v_0 + v.reset(OpWasmI64Popcnt) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueWasm_OpPopCount8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (PopCount8 x) + // result: (I64Popcnt (ZeroExt8to64 x)) + for { + x := v_0 + v.reset(OpWasmI64Popcnt) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueWasm_OpRotateLeft16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft16 x (I64Const [c])) + // result: (Or16 (Lsh16x64 x (I64Const [c&15])) (Rsh16Ux64 x (I64Const [-c&15]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpWasmI64Const { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpOr16) + v0 := b.NewValue0(v.Pos, OpLsh16x64, t) + v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v1.AuxInt = int64ToAuxInt(c & 15) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t) + v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v3.AuxInt = int64ToAuxInt(-c & 15) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) + return true + } + return false +} +func rewriteValueWasm_OpRotateLeft8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft8 x (I64Const [c])) + // result: (Or8 (Lsh8x64 x (I64Const [c&7])) (Rsh8Ux64 x (I64Const [-c&7]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpWasmI64Const { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpOr8) + v0 := b.NewValue0(v.Pos, OpLsh8x64, t) + v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v1.AuxInt = int64ToAuxInt(c & 7) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t) + v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v3.AuxInt = int64ToAuxInt(-c & 7) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) + return true + } + return false +} +func rewriteValueWasm_OpRsh16Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux16 [c] x y) + // result: (Rsh64Ux64 [c] (ZeroExt16to64 x) (ZeroExt16to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64Ux64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpRsh16Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux32 [c] x y) + // result: (Rsh64Ux64 [c] (ZeroExt16to64 x) (ZeroExt32to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64Ux64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpRsh16Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux64 [c] x y) + // result: (Rsh64Ux64 [c] (ZeroExt16to64 x) y) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64Ux64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } +} +func rewriteValueWasm_OpRsh16Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux8 [c] x y) + // result: (Rsh64Ux64 [c] (ZeroExt16to64 x) (ZeroExt8to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64Ux64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpRsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x16 [c] x y) + // result: (Rsh64x64 [c] (SignExt16to64 x) (ZeroExt16to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64x64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpRsh16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x32 [c] x y) + // result: (Rsh64x64 [c] (SignExt16to64 x) (ZeroExt32to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64x64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpRsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x64 [c] x y) + // result: (Rsh64x64 [c] (SignExt16to64 x) y) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64x64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } +} +func rewriteValueWasm_OpRsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x8 [c] x y) + // result: (Rsh64x64 [c] (SignExt16to64 x) (ZeroExt8to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64x64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpRsh32Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux16 [c] x y) + // result: (Rsh64Ux64 [c] (ZeroExt32to64 x) (ZeroExt16to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64Ux64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpRsh32Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux32 [c] x y) + // result: (Rsh64Ux64 [c] (ZeroExt32to64 x) (ZeroExt32to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64Ux64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpRsh32Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux64 [c] x y) + // result: (Rsh64Ux64 [c] (ZeroExt32to64 x) y) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64Ux64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } +} +func rewriteValueWasm_OpRsh32Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux8 [c] x y) + // result: (Rsh64Ux64 [c] (ZeroExt32to64 x) (ZeroExt8to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64Ux64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpRsh32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x16 [c] x y) + // result: (Rsh64x64 [c] (SignExt32to64 x) (ZeroExt16to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64x64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpRsh32x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x32 [c] x y) + // result: (Rsh64x64 [c] (SignExt32to64 x) (ZeroExt32to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64x64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpRsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x64 [c] x y) + // result: (Rsh64x64 [c] (SignExt32to64 x) y) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64x64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } +} +func rewriteValueWasm_OpRsh32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x8 [c] x y) + // result: (Rsh64x64 [c] (SignExt32to64 x) (ZeroExt8to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64x64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpRsh64Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux16 [c] x y) + // result: (Rsh64Ux64 [c] x (ZeroExt16to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64Ux64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueWasm_OpRsh64Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux32 [c] x y) + // result: (Rsh64Ux64 [c] x (ZeroExt32to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64Ux64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueWasm_OpRsh64Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux64 x y) + // cond: shiftIsBounded(v) + // result: (I64ShrU x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpWasmI64ShrU) + v.AddArg2(x, y) + return true + } + // match: (Rsh64Ux64 x (I64Const [c])) + // cond: uint64(c) < 64 + // result: (I64ShrU x (I64Const [c])) + for { + x := v_0 + if v_1.Op != OpWasmI64Const { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 64) { + break + } + v.reset(OpWasmI64ShrU) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(c) + v.AddArg2(x, v0) + return true + } + // match: (Rsh64Ux64 x (I64Const [c])) + // cond: uint64(c) >= 64 + // result: (I64Const [0]) + for { + if v_1.Op != OpWasmI64Const { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 64) { + break + } + v.reset(OpWasmI64Const) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (Rsh64Ux64 x y) + // result: (Select (I64ShrU x y) (I64Const [0]) (I64LtU y (I64Const [64]))) + for { + x := v_0 + y := v_1 + v.reset(OpWasmSelect) + v0 := b.NewValue0(v.Pos, OpWasmI64ShrU, typ.Int64) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v1.AuxInt = int64ToAuxInt(0) + v2 := b.NewValue0(v.Pos, OpWasmI64LtU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v3.AuxInt = int64ToAuxInt(64) + v2.AddArg2(y, v3) + v.AddArg3(v0, v1, v2) + return true + } +} +func rewriteValueWasm_OpRsh64Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux8 [c] x y) + // result: (Rsh64Ux64 [c] x (ZeroExt8to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64Ux64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueWasm_OpRsh64x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x16 [c] x y) + // result: (Rsh64x64 [c] x (ZeroExt16to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64x64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueWasm_OpRsh64x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x32 [c] x y) + // result: (Rsh64x64 [c] x (ZeroExt32to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64x64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueWasm_OpRsh64x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x64 x y) + // cond: shiftIsBounded(v) + // result: (I64ShrS x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpWasmI64ShrS) + v.AddArg2(x, y) + return true + } + // match: (Rsh64x64 x (I64Const [c])) + // cond: uint64(c) < 64 + // result: (I64ShrS x (I64Const [c])) + for { + x := v_0 + if v_1.Op != OpWasmI64Const { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) < 64) { + break + } + v.reset(OpWasmI64ShrS) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(c) + v.AddArg2(x, v0) + return true + } + // match: (Rsh64x64 x (I64Const [c])) + // cond: uint64(c) >= 64 + // result: (I64ShrS x (I64Const [63])) + for { + x := v_0 + if v_1.Op != OpWasmI64Const { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 64) { + break + } + v.reset(OpWasmI64ShrS) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(63) + v.AddArg2(x, v0) + return true + } + // match: (Rsh64x64 x y) + // result: (I64ShrS x (Select y (I64Const [63]) (I64LtU y (I64Const [64])))) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64ShrS) + v0 := b.NewValue0(v.Pos, OpWasmSelect, typ.Int64) + v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v1.AuxInt = int64ToAuxInt(63) + v2 := b.NewValue0(v.Pos, OpWasmI64LtU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v3.AuxInt = int64ToAuxInt(64) + v2.AddArg2(y, v3) + v0.AddArg3(y, v1, v2) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueWasm_OpRsh64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x8 [c] x y) + // result: (Rsh64x64 [c] x (ZeroExt8to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64x64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueWasm_OpRsh8Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux16 [c] x y) + // result: (Rsh64Ux64 [c] (ZeroExt8to64 x) (ZeroExt16to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64Ux64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpRsh8Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux32 [c] x y) + // result: (Rsh64Ux64 [c] (ZeroExt8to64 x) (ZeroExt32to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64Ux64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpRsh8Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux64 [c] x y) + // result: (Rsh64Ux64 [c] (ZeroExt8to64 x) y) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64Ux64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } +} +func rewriteValueWasm_OpRsh8Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux8 [c] x y) + // result: (Rsh64Ux64 [c] (ZeroExt8to64 x) (ZeroExt8to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64Ux64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpRsh8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x16 [c] x y) + // result: (Rsh64x64 [c] (SignExt8to64 x) (ZeroExt16to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64x64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpRsh8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x32 [c] x y) + // result: (Rsh64x64 [c] (SignExt8to64 x) (ZeroExt32to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64x64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpRsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x64 [c] x y) + // result: (Rsh64x64 [c] (SignExt8to64 x) y) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64x64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v.AddArg2(v0, y) + return true + } +} +func rewriteValueWasm_OpRsh8x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x8 [c] x y) + // result: (Rsh64x64 [c] (SignExt8to64 x) (ZeroExt8to64 y)) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpRsh64x64) + v.AuxInt = boolToAuxInt(c) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpSignExt16to32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SignExt16to32 x:(I64Load16S _ _)) + // result: x + for { + x := v_0 + if x.Op != OpWasmI64Load16S { + break + } + v.copyOf(x) + return true + } + // match: (SignExt16to32 x) + // cond: buildcfg.GOWASM.SignExt + // result: (I64Extend16S x) + for { + x := v_0 + if !(buildcfg.GOWASM.SignExt) { + break + } + v.reset(OpWasmI64Extend16S) + v.AddArg(x) + return true + } + // match: (SignExt16to32 x) + // result: (I64ShrS (I64Shl x (I64Const [48])) (I64Const [48])) + for { + x := v_0 + v.reset(OpWasmI64ShrS) + v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) + v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v1.AuxInt = int64ToAuxInt(48) + v0.AddArg2(x, v1) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpSignExt16to64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SignExt16to64 x:(I64Load16S _ _)) + // result: x + for { + x := v_0 + if x.Op != OpWasmI64Load16S { + break + } + v.copyOf(x) + return true + } + // match: (SignExt16to64 x) + // cond: buildcfg.GOWASM.SignExt + // result: (I64Extend16S x) + for { + x := v_0 + if !(buildcfg.GOWASM.SignExt) { + break + } + v.reset(OpWasmI64Extend16S) + v.AddArg(x) + return true + } + // match: (SignExt16to64 x) + // result: (I64ShrS (I64Shl x (I64Const [48])) (I64Const [48])) + for { + x := v_0 + v.reset(OpWasmI64ShrS) + v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) + v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v1.AuxInt = int64ToAuxInt(48) + v0.AddArg2(x, v1) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpSignExt32to64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SignExt32to64 x:(I64Load32S _ _)) + // result: x + for { + x := v_0 + if x.Op != OpWasmI64Load32S { + break + } + v.copyOf(x) + return true + } + // match: (SignExt32to64 x) + // cond: buildcfg.GOWASM.SignExt + // result: (I64Extend32S x) + for { + x := v_0 + if !(buildcfg.GOWASM.SignExt) { + break + } + v.reset(OpWasmI64Extend32S) + v.AddArg(x) + return true + } + // match: (SignExt32to64 x) + // result: (I64ShrS (I64Shl x (I64Const [32])) (I64Const [32])) + for { + x := v_0 + v.reset(OpWasmI64ShrS) + v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) + v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v1.AuxInt = int64ToAuxInt(32) + v0.AddArg2(x, v1) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpSignExt8to16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SignExt8to16 x:(I64Load8S _ _)) + // result: x + for { + x := v_0 + if x.Op != OpWasmI64Load8S { + break + } + v.copyOf(x) + return true + } + // match: (SignExt8to16 x) + // cond: buildcfg.GOWASM.SignExt + // result: (I64Extend8S x) + for { + x := v_0 + if !(buildcfg.GOWASM.SignExt) { + break + } + v.reset(OpWasmI64Extend8S) + v.AddArg(x) + return true + } + // match: (SignExt8to16 x) + // result: (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56])) + for { + x := v_0 + v.reset(OpWasmI64ShrS) + v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) + v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v1.AuxInt = int64ToAuxInt(56) + v0.AddArg2(x, v1) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpSignExt8to32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SignExt8to32 x:(I64Load8S _ _)) + // result: x + for { + x := v_0 + if x.Op != OpWasmI64Load8S { + break + } + v.copyOf(x) + return true + } + // match: (SignExt8to32 x) + // cond: buildcfg.GOWASM.SignExt + // result: (I64Extend8S x) + for { + x := v_0 + if !(buildcfg.GOWASM.SignExt) { + break + } + v.reset(OpWasmI64Extend8S) + v.AddArg(x) + return true + } + // match: (SignExt8to32 x) + // result: (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56])) + for { + x := v_0 + v.reset(OpWasmI64ShrS) + v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) + v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v1.AuxInt = int64ToAuxInt(56) + v0.AddArg2(x, v1) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpSignExt8to64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SignExt8to64 x:(I64Load8S _ _)) + // result: x + for { + x := v_0 + if x.Op != OpWasmI64Load8S { + break + } + v.copyOf(x) + return true + } + // match: (SignExt8to64 x) + // cond: buildcfg.GOWASM.SignExt + // result: (I64Extend8S x) + for { + x := v_0 + if !(buildcfg.GOWASM.SignExt) { + break + } + v.reset(OpWasmI64Extend8S) + v.AddArg(x) + return true + } + // match: (SignExt8to64 x) + // result: (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56])) + for { + x := v_0 + v.reset(OpWasmI64ShrS) + v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) + v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v1.AuxInt = int64ToAuxInt(56) + v0.AddArg2(x, v1) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueWasm_OpSlicemask(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Slicemask x) + // result: (I64ShrS (I64Sub (I64Const [0]) x) (I64Const [63])) + for { + x := v_0 + v.reset(OpWasmI64ShrS) + v0 := b.NewValue0(v.Pos, OpWasmI64Sub, typ.Int64) + v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v1.AuxInt = int64ToAuxInt(0) + v0.AddArg2(v1, x) + v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v2.AuxInt = int64ToAuxInt(63) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValueWasm_OpStore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Store {t} ptr val mem) + // cond: is64BitFloat(t) + // result: (F64Store ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(is64BitFloat(t)) { + break + } + v.reset(OpWasmF64Store) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: is32BitFloat(t) + // result: (F32Store ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(is32BitFloat(t)) { + break + } + v.reset(OpWasmF32Store) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 8 + // result: (I64Store ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 8) { + break + } + v.reset(OpWasmI64Store) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 + // result: (I64Store32 ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4) { + break + } + v.reset(OpWasmI64Store32) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 2 + // result: (I64Store16 ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 2) { + break + } + v.reset(OpWasmI64Store16) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 1 + // result: (I64Store8 ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 1) { + break + } + v.reset(OpWasmI64Store8) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueWasm_OpWasmF64Add(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (F64Add (F64Const [x]) (F64Const [y])) + // result: (F64Const [x + y]) + for { + if v_0.Op != OpWasmF64Const { + break + } + x := auxIntToFloat64(v_0.AuxInt) + if v_1.Op != OpWasmF64Const { + break + } + y := auxIntToFloat64(v_1.AuxInt) + v.reset(OpWasmF64Const) + v.AuxInt = float64ToAuxInt(x + y) + return true + } + // match: (F64Add (F64Const [x]) y) + // cond: y.Op != OpWasmF64Const + // result: (F64Add y (F64Const [x])) + for { + if v_0.Op != OpWasmF64Const { + break + } + x := auxIntToFloat64(v_0.AuxInt) + y := v_1 + if !(y.Op != OpWasmF64Const) { + break + } + v.reset(OpWasmF64Add) + v0 := b.NewValue0(v.Pos, OpWasmF64Const, typ.Float64) + v0.AuxInt = float64ToAuxInt(x) + v.AddArg2(y, v0) + return true + } + return false +} +func rewriteValueWasm_OpWasmF64Mul(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (F64Mul (F64Const [x]) (F64Const [y])) + // cond: !math.IsNaN(x * y) + // result: (F64Const [x * y]) + for { + if v_0.Op != OpWasmF64Const { + break + } + x := auxIntToFloat64(v_0.AuxInt) + if v_1.Op != OpWasmF64Const { + break + } + y := auxIntToFloat64(v_1.AuxInt) + if !(!math.IsNaN(x * y)) { + break + } + v.reset(OpWasmF64Const) + v.AuxInt = float64ToAuxInt(x * y) + return true + } + // match: (F64Mul (F64Const [x]) y) + // cond: y.Op != OpWasmF64Const + // result: (F64Mul y (F64Const [x])) + for { + if v_0.Op != OpWasmF64Const { + break + } + x := auxIntToFloat64(v_0.AuxInt) + y := v_1 + if !(y.Op != OpWasmF64Const) { + break + } + v.reset(OpWasmF64Mul) + v0 := b.NewValue0(v.Pos, OpWasmF64Const, typ.Float64) + v0.AuxInt = float64ToAuxInt(x) + v.AddArg2(y, v0) + return true + } + return false +} +func rewriteValueWasm_OpWasmI64Add(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (I64Add (I64Const [x]) (I64Const [y])) + // result: (I64Const [x + y]) + for { + if v_0.Op != OpWasmI64Const { + break + } + x := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpWasmI64Const { + break + } + y := auxIntToInt64(v_1.AuxInt) + v.reset(OpWasmI64Const) + v.AuxInt = int64ToAuxInt(x + y) + return true + } + // match: (I64Add (I64Const [x]) y) + // cond: y.Op != OpWasmI64Const + // result: (I64Add y (I64Const [x])) + for { + if v_0.Op != OpWasmI64Const { + break + } + x := auxIntToInt64(v_0.AuxInt) + y := v_1 + if !(y.Op != OpWasmI64Const) { + break + } + v.reset(OpWasmI64Add) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(x) + v.AddArg2(y, v0) + return true + } + // match: (I64Add x (I64Const [y])) + // cond: !t.IsPtr() + // result: (I64AddConst [y] x) + for { + x := v_0 + if v_1.Op != OpWasmI64Const { + break + } + t := v_1.Type + y := auxIntToInt64(v_1.AuxInt) + if !(!t.IsPtr()) { + break + } + v.reset(OpWasmI64AddConst) + v.AuxInt = int64ToAuxInt(y) + v.AddArg(x) + return true + } + return false +} +func rewriteValueWasm_OpWasmI64AddConst(v *Value) bool { + v_0 := v.Args[0] + // match: (I64AddConst [0] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (I64AddConst [off] (LoweredAddr {sym} [off2] base)) + // cond: isU32Bit(off+int64(off2)) + // result: (LoweredAddr {sym} [int32(off)+off2] base) + for { + off := auxIntToInt64(v.AuxInt) + if v_0.Op != OpWasmLoweredAddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym := auxToSym(v_0.Aux) + base := v_0.Args[0] + if !(isU32Bit(off + int64(off2))) { + break + } + v.reset(OpWasmLoweredAddr) + v.AuxInt = int32ToAuxInt(int32(off) + off2) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } + // match: (I64AddConst [off] x:(SP)) + // cond: isU32Bit(off) + // result: (LoweredAddr [int32(off)] x) + for { + off := auxIntToInt64(v.AuxInt) + x := v_0 + if x.Op != OpSP || !(isU32Bit(off)) { + break + } + v.reset(OpWasmLoweredAddr) + v.AuxInt = int32ToAuxInt(int32(off)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueWasm_OpWasmI64And(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (I64And (I64Const [x]) (I64Const [y])) + // result: (I64Const [x & y]) + for { + if v_0.Op != OpWasmI64Const { + break + } + x := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpWasmI64Const { + break + } + y := auxIntToInt64(v_1.AuxInt) + v.reset(OpWasmI64Const) + v.AuxInt = int64ToAuxInt(x & y) + return true + } + // match: (I64And (I64Const [x]) y) + // cond: y.Op != OpWasmI64Const + // result: (I64And y (I64Const [x])) + for { + if v_0.Op != OpWasmI64Const { + break + } + x := auxIntToInt64(v_0.AuxInt) + y := v_1 + if !(y.Op != OpWasmI64Const) { + break + } + v.reset(OpWasmI64And) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(x) + v.AddArg2(y, v0) + return true + } + return false +} +func rewriteValueWasm_OpWasmI64Eq(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (I64Eq (I64Const [x]) (I64Const [y])) + // cond: x == y + // result: (I64Const [1]) + for { + if v_0.Op != OpWasmI64Const { + break + } + x := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpWasmI64Const { + break + } + y := auxIntToInt64(v_1.AuxInt) + if !(x == y) { + break + } + v.reset(OpWasmI64Const) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (I64Eq (I64Const [x]) (I64Const [y])) + // cond: x != y + // result: (I64Const [0]) + for { + if v_0.Op != OpWasmI64Const { + break + } + x := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpWasmI64Const { + break + } + y := auxIntToInt64(v_1.AuxInt) + if !(x != y) { + break + } + v.reset(OpWasmI64Const) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (I64Eq (I64Const [x]) y) + // cond: y.Op != OpWasmI64Const + // result: (I64Eq y (I64Const [x])) + for { + if v_0.Op != OpWasmI64Const { + break + } + x := auxIntToInt64(v_0.AuxInt) + y := v_1 + if !(y.Op != OpWasmI64Const) { + break + } + v.reset(OpWasmI64Eq) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(x) + v.AddArg2(y, v0) + return true + } + // match: (I64Eq x (I64Const [0])) + // result: (I64Eqz x) + for { + x := v_0 + if v_1.Op != OpWasmI64Const || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + v.reset(OpWasmI64Eqz) + v.AddArg(x) + return true + } + return false +} +func rewriteValueWasm_OpWasmI64Eqz(v *Value) bool { + v_0 := v.Args[0] + // match: (I64Eqz (I64Eqz (I64Eqz x))) + // result: (I64Eqz x) + for { + if v_0.Op != OpWasmI64Eqz { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpWasmI64Eqz { + break + } + x := v_0_0.Args[0] + v.reset(OpWasmI64Eqz) + v.AddArg(x) + return true + } + return false +} +func rewriteValueWasm_OpWasmI64LeU(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (I64LeU x (I64Const [0])) + // result: (I64Eqz x) + for { + x := v_0 + if v_1.Op != OpWasmI64Const || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + v.reset(OpWasmI64Eqz) + v.AddArg(x) + return true + } + // match: (I64LeU (I64Const [1]) x) + // result: (I64Eqz (I64Eqz x)) + for { + if v_0.Op != OpWasmI64Const || auxIntToInt64(v_0.AuxInt) != 1 { + break + } + x := v_1 + v.reset(OpWasmI64Eqz) + v0 := b.NewValue0(v.Pos, OpWasmI64Eqz, typ.Bool) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueWasm_OpWasmI64Load(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (I64Load [off] (I64AddConst [off2] ptr) mem) + // cond: isU32Bit(off+off2) + // result: (I64Load [off+off2] ptr mem) + for { + off := auxIntToInt64(v.AuxInt) + if v_0.Op != OpWasmI64AddConst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(isU32Bit(off + off2)) { + break + } + v.reset(OpWasmI64Load) + v.AuxInt = int64ToAuxInt(off + off2) + v.AddArg2(ptr, mem) + return true + } + // match: (I64Load [off] (LoweredAddr {sym} [off2] (SB)) _) + // cond: symIsRO(sym) && isU32Bit(off+int64(off2)) + // result: (I64Const [int64(read64(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))]) + for { + off := auxIntToInt64(v.AuxInt) + if v_0.Op != OpWasmLoweredAddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym := auxToSym(v_0.Aux) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSB || !(symIsRO(sym) && isU32Bit(off+int64(off2))) { + break + } + v.reset(OpWasmI64Const) + v.AuxInt = int64ToAuxInt(int64(read64(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))) + return true + } + return false +} +func rewriteValueWasm_OpWasmI64Load16S(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (I64Load16S [off] (I64AddConst [off2] ptr) mem) + // cond: isU32Bit(off+off2) + // result: (I64Load16S [off+off2] ptr mem) + for { + off := auxIntToInt64(v.AuxInt) + if v_0.Op != OpWasmI64AddConst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(isU32Bit(off + off2)) { + break + } + v.reset(OpWasmI64Load16S) + v.AuxInt = int64ToAuxInt(off + off2) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueWasm_OpWasmI64Load16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (I64Load16U [off] (I64AddConst [off2] ptr) mem) + // cond: isU32Bit(off+off2) + // result: (I64Load16U [off+off2] ptr mem) + for { + off := auxIntToInt64(v.AuxInt) + if v_0.Op != OpWasmI64AddConst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(isU32Bit(off + off2)) { + break + } + v.reset(OpWasmI64Load16U) + v.AuxInt = int64ToAuxInt(off + off2) + v.AddArg2(ptr, mem) + return true + } + // match: (I64Load16U [off] (LoweredAddr {sym} [off2] (SB)) _) + // cond: symIsRO(sym) && isU32Bit(off+int64(off2)) + // result: (I64Const [int64(read16(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))]) + for { + off := auxIntToInt64(v.AuxInt) + if v_0.Op != OpWasmLoweredAddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym := auxToSym(v_0.Aux) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSB || !(symIsRO(sym) && isU32Bit(off+int64(off2))) { + break + } + v.reset(OpWasmI64Const) + v.AuxInt = int64ToAuxInt(int64(read16(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))) + return true + } + return false +} +func rewriteValueWasm_OpWasmI64Load32S(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (I64Load32S [off] (I64AddConst [off2] ptr) mem) + // cond: isU32Bit(off+off2) + // result: (I64Load32S [off+off2] ptr mem) + for { + off := auxIntToInt64(v.AuxInt) + if v_0.Op != OpWasmI64AddConst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(isU32Bit(off + off2)) { + break + } + v.reset(OpWasmI64Load32S) + v.AuxInt = int64ToAuxInt(off + off2) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueWasm_OpWasmI64Load32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (I64Load32U [off] (I64AddConst [off2] ptr) mem) + // cond: isU32Bit(off+off2) + // result: (I64Load32U [off+off2] ptr mem) + for { + off := auxIntToInt64(v.AuxInt) + if v_0.Op != OpWasmI64AddConst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(isU32Bit(off + off2)) { + break + } + v.reset(OpWasmI64Load32U) + v.AuxInt = int64ToAuxInt(off + off2) + v.AddArg2(ptr, mem) + return true + } + // match: (I64Load32U [off] (LoweredAddr {sym} [off2] (SB)) _) + // cond: symIsRO(sym) && isU32Bit(off+int64(off2)) + // result: (I64Const [int64(read32(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))]) + for { + off := auxIntToInt64(v.AuxInt) + if v_0.Op != OpWasmLoweredAddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym := auxToSym(v_0.Aux) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSB || !(symIsRO(sym) && isU32Bit(off+int64(off2))) { + break + } + v.reset(OpWasmI64Const) + v.AuxInt = int64ToAuxInt(int64(read32(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))) + return true + } + return false +} +func rewriteValueWasm_OpWasmI64Load8S(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (I64Load8S [off] (I64AddConst [off2] ptr) mem) + // cond: isU32Bit(off+off2) + // result: (I64Load8S [off+off2] ptr mem) + for { + off := auxIntToInt64(v.AuxInt) + if v_0.Op != OpWasmI64AddConst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(isU32Bit(off + off2)) { + break + } + v.reset(OpWasmI64Load8S) + v.AuxInt = int64ToAuxInt(off + off2) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueWasm_OpWasmI64Load8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (I64Load8U [off] (I64AddConst [off2] ptr) mem) + // cond: isU32Bit(off+off2) + // result: (I64Load8U [off+off2] ptr mem) + for { + off := auxIntToInt64(v.AuxInt) + if v_0.Op != OpWasmI64AddConst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(isU32Bit(off + off2)) { + break + } + v.reset(OpWasmI64Load8U) + v.AuxInt = int64ToAuxInt(off + off2) + v.AddArg2(ptr, mem) + return true + } + // match: (I64Load8U [off] (LoweredAddr {sym} [off2] (SB)) _) + // cond: symIsRO(sym) && isU32Bit(off+int64(off2)) + // result: (I64Const [int64(read8(sym, off+int64(off2)))]) + for { + off := auxIntToInt64(v.AuxInt) + if v_0.Op != OpWasmLoweredAddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym := auxToSym(v_0.Aux) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSB || !(symIsRO(sym) && isU32Bit(off+int64(off2))) { + break + } + v.reset(OpWasmI64Const) + v.AuxInt = int64ToAuxInt(int64(read8(sym, off+int64(off2)))) + return true + } + return false +} +func rewriteValueWasm_OpWasmI64LtU(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (I64LtU (I64Const [0]) x) + // result: (I64Eqz (I64Eqz x)) + for { + if v_0.Op != OpWasmI64Const || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_1 + v.reset(OpWasmI64Eqz) + v0 := b.NewValue0(v.Pos, OpWasmI64Eqz, typ.Bool) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (I64LtU x (I64Const [1])) + // result: (I64Eqz x) + for { + x := v_0 + if v_1.Op != OpWasmI64Const || auxIntToInt64(v_1.AuxInt) != 1 { + break + } + v.reset(OpWasmI64Eqz) + v.AddArg(x) + return true + } + return false +} +func rewriteValueWasm_OpWasmI64Mul(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (I64Mul (I64Const [x]) (I64Const [y])) + // result: (I64Const [x * y]) + for { + if v_0.Op != OpWasmI64Const { + break + } + x := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpWasmI64Const { + break + } + y := auxIntToInt64(v_1.AuxInt) + v.reset(OpWasmI64Const) + v.AuxInt = int64ToAuxInt(x * y) + return true + } + // match: (I64Mul (I64Const [x]) y) + // cond: y.Op != OpWasmI64Const + // result: (I64Mul y (I64Const [x])) + for { + if v_0.Op != OpWasmI64Const { + break + } + x := auxIntToInt64(v_0.AuxInt) + y := v_1 + if !(y.Op != OpWasmI64Const) { + break + } + v.reset(OpWasmI64Mul) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(x) + v.AddArg2(y, v0) + return true + } + return false +} +func rewriteValueWasm_OpWasmI64Ne(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (I64Ne (I64Const [x]) (I64Const [y])) + // cond: x == y + // result: (I64Const [0]) + for { + if v_0.Op != OpWasmI64Const { + break + } + x := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpWasmI64Const { + break + } + y := auxIntToInt64(v_1.AuxInt) + if !(x == y) { + break + } + v.reset(OpWasmI64Const) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (I64Ne (I64Const [x]) (I64Const [y])) + // cond: x != y + // result: (I64Const [1]) + for { + if v_0.Op != OpWasmI64Const { + break + } + x := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpWasmI64Const { + break + } + y := auxIntToInt64(v_1.AuxInt) + if !(x != y) { + break + } + v.reset(OpWasmI64Const) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (I64Ne (I64Const [x]) y) + // cond: y.Op != OpWasmI64Const + // result: (I64Ne y (I64Const [x])) + for { + if v_0.Op != OpWasmI64Const { + break + } + x := auxIntToInt64(v_0.AuxInt) + y := v_1 + if !(y.Op != OpWasmI64Const) { + break + } + v.reset(OpWasmI64Ne) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(x) + v.AddArg2(y, v0) + return true + } + // match: (I64Ne x (I64Const [0])) + // result: (I64Eqz (I64Eqz x)) + for { + x := v_0 + if v_1.Op != OpWasmI64Const || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + v.reset(OpWasmI64Eqz) + v0 := b.NewValue0(v.Pos, OpWasmI64Eqz, typ.Bool) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueWasm_OpWasmI64Or(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (I64Or (I64Const [x]) (I64Const [y])) + // result: (I64Const [x | y]) + for { + if v_0.Op != OpWasmI64Const { + break + } + x := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpWasmI64Const { + break + } + y := auxIntToInt64(v_1.AuxInt) + v.reset(OpWasmI64Const) + v.AuxInt = int64ToAuxInt(x | y) + return true + } + // match: (I64Or (I64Const [x]) y) + // cond: y.Op != OpWasmI64Const + // result: (I64Or y (I64Const [x])) + for { + if v_0.Op != OpWasmI64Const { + break + } + x := auxIntToInt64(v_0.AuxInt) + y := v_1 + if !(y.Op != OpWasmI64Const) { + break + } + v.reset(OpWasmI64Or) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(x) + v.AddArg2(y, v0) + return true + } + return false +} +func rewriteValueWasm_OpWasmI64Shl(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (I64Shl (I64Const [x]) (I64Const [y])) + // result: (I64Const [x << uint64(y)]) + for { + if v_0.Op != OpWasmI64Const { + break + } + x := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpWasmI64Const { + break + } + y := auxIntToInt64(v_1.AuxInt) + v.reset(OpWasmI64Const) + v.AuxInt = int64ToAuxInt(x << uint64(y)) + return true + } + return false +} +func rewriteValueWasm_OpWasmI64ShrS(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (I64ShrS (I64Const [x]) (I64Const [y])) + // result: (I64Const [x >> uint64(y)]) + for { + if v_0.Op != OpWasmI64Const { + break + } + x := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpWasmI64Const { + break + } + y := auxIntToInt64(v_1.AuxInt) + v.reset(OpWasmI64Const) + v.AuxInt = int64ToAuxInt(x >> uint64(y)) + return true + } + return false +} +func rewriteValueWasm_OpWasmI64ShrU(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (I64ShrU (I64Const [x]) (I64Const [y])) + // result: (I64Const [int64(uint64(x) >> uint64(y))]) + for { + if v_0.Op != OpWasmI64Const { + break + } + x := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpWasmI64Const { + break + } + y := auxIntToInt64(v_1.AuxInt) + v.reset(OpWasmI64Const) + v.AuxInt = int64ToAuxInt(int64(uint64(x) >> uint64(y))) + return true + } + return false +} +func rewriteValueWasm_OpWasmI64Store(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (I64Store [off] (I64AddConst [off2] ptr) val mem) + // cond: isU32Bit(off+off2) + // result: (I64Store [off+off2] ptr val mem) + for { + off := auxIntToInt64(v.AuxInt) + if v_0.Op != OpWasmI64AddConst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(isU32Bit(off + off2)) { + break + } + v.reset(OpWasmI64Store) + v.AuxInt = int64ToAuxInt(off + off2) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueWasm_OpWasmI64Store16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (I64Store16 [off] (I64AddConst [off2] ptr) val mem) + // cond: isU32Bit(off+off2) + // result: (I64Store16 [off+off2] ptr val mem) + for { + off := auxIntToInt64(v.AuxInt) + if v_0.Op != OpWasmI64AddConst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(isU32Bit(off + off2)) { + break + } + v.reset(OpWasmI64Store16) + v.AuxInt = int64ToAuxInt(off + off2) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueWasm_OpWasmI64Store32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (I64Store32 [off] (I64AddConst [off2] ptr) val mem) + // cond: isU32Bit(off+off2) + // result: (I64Store32 [off+off2] ptr val mem) + for { + off := auxIntToInt64(v.AuxInt) + if v_0.Op != OpWasmI64AddConst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(isU32Bit(off + off2)) { + break + } + v.reset(OpWasmI64Store32) + v.AuxInt = int64ToAuxInt(off + off2) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueWasm_OpWasmI64Store8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (I64Store8 [off] (I64AddConst [off2] ptr) val mem) + // cond: isU32Bit(off+off2) + // result: (I64Store8 [off+off2] ptr val mem) + for { + off := auxIntToInt64(v.AuxInt) + if v_0.Op != OpWasmI64AddConst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(isU32Bit(off + off2)) { + break + } + v.reset(OpWasmI64Store8) + v.AuxInt = int64ToAuxInt(off + off2) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueWasm_OpWasmI64Xor(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (I64Xor (I64Const [x]) (I64Const [y])) + // result: (I64Const [x ^ y]) + for { + if v_0.Op != OpWasmI64Const { + break + } + x := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpWasmI64Const { + break + } + y := auxIntToInt64(v_1.AuxInt) + v.reset(OpWasmI64Const) + v.AuxInt = int64ToAuxInt(x ^ y) + return true + } + // match: (I64Xor (I64Const [x]) y) + // cond: y.Op != OpWasmI64Const + // result: (I64Xor y (I64Const [x])) + for { + if v_0.Op != OpWasmI64Const { + break + } + x := auxIntToInt64(v_0.AuxInt) + y := v_1 + if !(y.Op != OpWasmI64Const) { + break + } + v.reset(OpWasmI64Xor) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(x) + v.AddArg2(y, v0) + return true + } + return false +} +func rewriteValueWasm_OpZero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Zero [0] _ mem) + // result: mem + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_1 + v.copyOf(mem) + return true + } + // match: (Zero [1] destptr mem) + // result: (I64Store8 destptr (I64Const [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpWasmI64Store8) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg3(destptr, v0, mem) + return true + } + // match: (Zero [2] destptr mem) + // result: (I64Store16 destptr (I64Const [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpWasmI64Store16) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg3(destptr, v0, mem) + return true + } + // match: (Zero [4] destptr mem) + // result: (I64Store32 destptr (I64Const [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpWasmI64Store32) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg3(destptr, v0, mem) + return true + } + // match: (Zero [8] destptr mem) + // result: (I64Store destptr (I64Const [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpWasmI64Store) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg3(destptr, v0, mem) + return true + } + // match: (Zero [3] destptr mem) + // result: (I64Store8 [2] destptr (I64Const [0]) (I64Store16 destptr (I64Const [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpWasmI64Store8) + v.AuxInt = int64ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpWasmI64Store16, types.TypeMem) + v1.AddArg3(destptr, v0, mem) + v.AddArg3(destptr, v0, v1) + return true + } + // match: (Zero [5] destptr mem) + // result: (I64Store8 [4] destptr (I64Const [0]) (I64Store32 destptr (I64Const [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 5 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpWasmI64Store8) + v.AuxInt = int64ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem) + v1.AddArg3(destptr, v0, mem) + v.AddArg3(destptr, v0, v1) + return true + } + // match: (Zero [6] destptr mem) + // result: (I64Store16 [4] destptr (I64Const [0]) (I64Store32 destptr (I64Const [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 6 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpWasmI64Store16) + v.AuxInt = int64ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem) + v1.AddArg3(destptr, v0, mem) + v.AddArg3(destptr, v0, v1) + return true + } + // match: (Zero [7] destptr mem) + // result: (I64Store32 [3] destptr (I64Const [0]) (I64Store32 destptr (I64Const [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 7 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpWasmI64Store32) + v.AuxInt = int64ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem) + v1.AddArg3(destptr, v0, mem) + v.AddArg3(destptr, v0, v1) + return true + } + // match: (Zero [s] destptr mem) + // cond: s%8 != 0 && s > 8 && s < 32 + // result: (Zero [s-s%8] (OffPtr destptr [s%8]) (I64Store destptr (I64Const [0]) mem)) + for { + s := auxIntToInt64(v.AuxInt) + destptr := v_0 + mem := v_1 + if !(s%8 != 0 && s > 8 && s < 32) { + break + } + v.reset(OpZero) + v.AuxInt = int64ToAuxInt(s - s%8) + v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) + v0.AuxInt = int64ToAuxInt(s % 8) + v0.AddArg(destptr) + v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v2.AuxInt = int64ToAuxInt(0) + v1.AddArg3(destptr, v2, mem) + v.AddArg2(v0, v1) + return true + } + // match: (Zero [16] destptr mem) + // result: (I64Store [8] destptr (I64Const [0]) (I64Store destptr (I64Const [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 16 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpWasmI64Store) + v.AuxInt = int64ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) + v1.AddArg3(destptr, v0, mem) + v.AddArg3(destptr, v0, v1) + return true + } + // match: (Zero [24] destptr mem) + // result: (I64Store [16] destptr (I64Const [0]) (I64Store [8] destptr (I64Const [0]) (I64Store destptr (I64Const [0]) mem))) + for { + if auxIntToInt64(v.AuxInt) != 24 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpWasmI64Store) + v.AuxInt = int64ToAuxInt(16) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) + v1.AuxInt = int64ToAuxInt(8) + v2 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) + v2.AddArg3(destptr, v0, mem) + v1.AddArg3(destptr, v0, v2) + v.AddArg3(destptr, v0, v1) + return true + } + // match: (Zero [32] destptr mem) + // result: (I64Store [24] destptr (I64Const [0]) (I64Store [16] destptr (I64Const [0]) (I64Store [8] destptr (I64Const [0]) (I64Store destptr (I64Const [0]) mem)))) + for { + if auxIntToInt64(v.AuxInt) != 32 { + break + } + destptr := v_0 + mem := v_1 + v.reset(OpWasmI64Store) + v.AuxInt = int64ToAuxInt(24) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) + v1.AuxInt = int64ToAuxInt(16) + v2 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) + v2.AuxInt = int64ToAuxInt(8) + v3 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem) + v3.AddArg3(destptr, v0, mem) + v2.AddArg3(destptr, v0, v3) + v1.AddArg3(destptr, v0, v2) + v.AddArg3(destptr, v0, v1) + return true + } + // match: (Zero [s] destptr mem) + // result: (LoweredZero [s] destptr mem) + for { + s := auxIntToInt64(v.AuxInt) + destptr := v_0 + mem := v_1 + v.reset(OpWasmLoweredZero) + v.AuxInt = int64ToAuxInt(s) + v.AddArg2(destptr, mem) + return true + } +} +func rewriteValueWasm_OpZeroExt16to32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ZeroExt16to32 x:(I64Load16U _ _)) + // result: x + for { + x := v_0 + if x.Op != OpWasmI64Load16U { + break + } + v.copyOf(x) + return true + } + // match: (ZeroExt16to32 x) + // result: (I64And x (I64Const [0xffff])) + for { + x := v_0 + v.reset(OpWasmI64And) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(0xffff) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueWasm_OpZeroExt16to64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ZeroExt16to64 x:(I64Load16U _ _)) + // result: x + for { + x := v_0 + if x.Op != OpWasmI64Load16U { + break + } + v.copyOf(x) + return true + } + // match: (ZeroExt16to64 x) + // result: (I64And x (I64Const [0xffff])) + for { + x := v_0 + v.reset(OpWasmI64And) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(0xffff) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueWasm_OpZeroExt32to64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ZeroExt32to64 x:(I64Load32U _ _)) + // result: x + for { + x := v_0 + if x.Op != OpWasmI64Load32U { + break + } + v.copyOf(x) + return true + } + // match: (ZeroExt32to64 x) + // result: (I64And x (I64Const [0xffffffff])) + for { + x := v_0 + v.reset(OpWasmI64And) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(0xffffffff) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueWasm_OpZeroExt8to16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ZeroExt8to16 x:(I64Load8U _ _)) + // result: x + for { + x := v_0 + if x.Op != OpWasmI64Load8U { + break + } + v.copyOf(x) + return true + } + // match: (ZeroExt8to16 x) + // result: (I64And x (I64Const [0xff])) + for { + x := v_0 + v.reset(OpWasmI64And) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(0xff) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueWasm_OpZeroExt8to32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ZeroExt8to32 x:(I64Load8U _ _)) + // result: x + for { + x := v_0 + if x.Op != OpWasmI64Load8U { + break + } + v.copyOf(x) + return true + } + // match: (ZeroExt8to32 x) + // result: (I64And x (I64Const [0xff])) + for { + x := v_0 + v.reset(OpWasmI64And) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(0xff) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueWasm_OpZeroExt8to64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ZeroExt8to64 x:(I64Load8U _ _)) + // result: x + for { + x := v_0 + if x.Op != OpWasmI64Load8U { + break + } + v.copyOf(x) + return true + } + // match: (ZeroExt8to64 x) + // result: (I64And x (I64Const [0xff])) + for { + x := v_0 + v.reset(OpWasmI64And) + v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v0.AuxInt = int64ToAuxInt(0xff) + v.AddArg2(x, v0) + return true + } +} +func rewriteBlockWasm(b *Block) bool { + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewrite_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewrite_test.go new file mode 100644 index 0000000000000000000000000000000000000000..357fe1183fad56da349da9af4de47cdff074c59b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewrite_test.go @@ -0,0 +1,220 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import "testing" + +// We generate memmove for copy(x[1:], x[:]), however we may change it to OpMove, +// because size is known. Check that OpMove is alias-safe, or we did call memmove. +func TestMove(t *testing.T) { + x := [...]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40} + copy(x[1:], x[:]) + for i := 1; i < len(x); i++ { + if int(x[i]) != i { + t.Errorf("Memmove got converted to OpMove in alias-unsafe way. Got %d instead of %d in position %d", int(x[i]), i, i+1) + } + } +} + +func TestMoveSmall(t *testing.T) { + x := [...]byte{1, 2, 3, 4, 5, 6, 7} + copy(x[1:], x[:]) + for i := 1; i < len(x); i++ { + if int(x[i]) != i { + t.Errorf("Memmove got converted to OpMove in alias-unsafe way. Got %d instead of %d in position %d", int(x[i]), i, i+1) + } + } +} + +func TestSubFlags(t *testing.T) { + if !subFlags32(0, 1).lt() { + t.Errorf("subFlags32(0,1).lt() returned false") + } + if !subFlags32(0, 1).ult() { + t.Errorf("subFlags32(0,1).ult() returned false") + } +} + +func TestIsPPC64WordRotateMask(t *testing.T) { + tests := []struct { + input int64 + expected bool + }{ + {0x00000001, true}, + {0x80000001, true}, + {0x80010001, false}, + {0xFFFFFFFA, false}, + {0xF0F0F0F0, false}, + {0xFFFFFFFD, true}, + {0x80000000, true}, + {0x00000000, false}, + {0xFFFFFFFF, true}, + {0x0000FFFF, true}, + {0xFF0000FF, true}, + {0x00FFFF00, true}, + } + + for _, v := range tests { + if v.expected != isPPC64WordRotateMask(v.input) { + t.Errorf("isPPC64WordRotateMask(0x%x) failed", v.input) + } + } +} + +func TestEncodeDecodePPC64WordRotateMask(t *testing.T) { + tests := []struct { + rotate int64 + mask uint64 + nbits, + mb, + me, + encoded int64 + }{ + {1, 0x00000001, 32, 31, 31, 0x20011f20}, + {2, 0x80000001, 32, 31, 0, 0x20021f01}, + {3, 0xFFFFFFFD, 32, 31, 29, 0x20031f1e}, + {4, 0x80000000, 32, 0, 0, 0x20040001}, + {5, 0xFFFFFFFF, 32, 0, 31, 0x20050020}, + {6, 0x0000FFFF, 32, 16, 31, 0x20061020}, + {7, 0xFF0000FF, 32, 24, 7, 0x20071808}, + {8, 0x00FFFF00, 32, 8, 23, 0x20080818}, + + {9, 0x0000000000FFFF00, 64, 40, 55, 0x40092838}, + {10, 0xFFFF000000000000, 64, 0, 15, 0x400A0010}, + {10, 0xFFFF000000000001, 64, 63, 15, 0x400A3f10}, + } + + for i, v := range tests { + result := encodePPC64RotateMask(v.rotate, int64(v.mask), v.nbits) + if result != v.encoded { + t.Errorf("encodePPC64RotateMask(%d,0x%x,%d) = 0x%x, expected 0x%x", v.rotate, v.mask, v.nbits, result, v.encoded) + } + rotate, mb, me, mask := DecodePPC64RotateMask(result) + if rotate != v.rotate || mb != v.mb || me != v.me || mask != v.mask { + t.Errorf("DecodePPC64Failure(Test %d) got (%d, %d, %d, %x) expected (%d, %d, %d, %x)", i, rotate, mb, me, mask, v.rotate, v.mb, v.me, v.mask) + } + } +} + +func TestMergePPC64ClrlsldiSrw(t *testing.T) { + tests := []struct { + clrlsldi int32 + srw int64 + valid bool + rotate int64 + mask uint64 + }{ + // ((x>>4)&0xFF)<<4 + {newPPC64ShiftAuxInt(4, 56, 63, 64), 4, true, 0, 0xFF0}, + // ((x>>4)&0xFFFF)<<4 + {newPPC64ShiftAuxInt(4, 48, 63, 64), 4, true, 0, 0xFFFF0}, + // ((x>>4)&0xFFFF)<<17 + {newPPC64ShiftAuxInt(17, 48, 63, 64), 4, false, 0, 0}, + // ((x>>4)&0xFFFF)<<16 + {newPPC64ShiftAuxInt(16, 48, 63, 64), 4, true, 12, 0xFFFF0000}, + // ((x>>32)&0xFFFF)<<17 + {newPPC64ShiftAuxInt(17, 48, 63, 64), 32, false, 0, 0}, + } + for i, v := range tests { + result := mergePPC64ClrlsldiSrw(int64(v.clrlsldi), v.srw) + if v.valid && result == 0 { + t.Errorf("mergePPC64ClrlsldiSrw(Test %d) did not merge", i) + } else if !v.valid && result != 0 { + t.Errorf("mergePPC64ClrlsldiSrw(Test %d) should return 0", i) + } else if r, _, _, m := DecodePPC64RotateMask(result); v.rotate != r || v.mask != m { + t.Errorf("mergePPC64ClrlsldiSrw(Test %d) got (%d,0x%x) expected (%d,0x%x)", i, r, m, v.rotate, v.mask) + } + } +} + +func TestMergePPC64ClrlsldiRlwinm(t *testing.T) { + tests := []struct { + clrlsldi int32 + rlwinm int64 + valid bool + rotate int64 + mask uint64 + }{ + // ((x<<4)&0xFF00)<<4 + {newPPC64ShiftAuxInt(4, 56, 63, 64), encodePPC64RotateMask(4, 0xFF00, 32), false, 0, 0}, + // ((x>>4)&0xFF)<<4 + {newPPC64ShiftAuxInt(4, 56, 63, 64), encodePPC64RotateMask(28, 0x0FFFFFFF, 32), true, 0, 0xFF0}, + // ((x>>4)&0xFFFF)<<4 + {newPPC64ShiftAuxInt(4, 48, 63, 64), encodePPC64RotateMask(28, 0xFFFF, 32), true, 0, 0xFFFF0}, + // ((x>>4)&0xFFFF)<<17 + {newPPC64ShiftAuxInt(17, 48, 63, 64), encodePPC64RotateMask(28, 0xFFFF, 32), false, 0, 0}, + // ((x>>4)&0xFFFF)<<16 + {newPPC64ShiftAuxInt(16, 48, 63, 64), encodePPC64RotateMask(28, 0xFFFF, 32), true, 12, 0xFFFF0000}, + // ((x>>4)&0xF000FFFF)<<16 + {newPPC64ShiftAuxInt(16, 48, 63, 64), encodePPC64RotateMask(28, 0xF000FFFF, 32), true, 12, 0xFFFF0000}, + } + for i, v := range tests { + result := mergePPC64ClrlsldiRlwinm(v.clrlsldi, v.rlwinm) + if v.valid && result == 0 { + t.Errorf("mergePPC64ClrlsldiRlwinm(Test %d) did not merge", i) + } else if !v.valid && result != 0 { + t.Errorf("mergePPC64ClrlsldiRlwinm(Test %d) should return 0", i) + } else if r, _, _, m := DecodePPC64RotateMask(result); v.rotate != r || v.mask != m { + t.Errorf("mergePPC64ClrlsldiRlwinm(Test %d) got (%d,0x%x) expected (%d,0x%x)", i, r, m, v.rotate, v.mask) + } + } +} + +func TestMergePPC64SldiSrw(t *testing.T) { + tests := []struct { + sld int64 + srw int64 + valid bool + rotate int64 + mask uint64 + }{ + {4, 4, true, 0, 0xFFFFFFF0}, + {4, 8, true, 28, 0x0FFFFFF0}, + {0, 0, true, 0, 0xFFFFFFFF}, + {8, 4, false, 0, 0}, + {0, 32, false, 0, 0}, + {0, 31, true, 1, 0x1}, + {31, 31, true, 0, 0x80000000}, + {32, 32, false, 0, 0}, + } + for i, v := range tests { + result := mergePPC64SldiSrw(v.sld, v.srw) + if v.valid && result == 0 { + t.Errorf("mergePPC64SldiSrw(Test %d) did not merge", i) + } else if !v.valid && result != 0 { + t.Errorf("mergePPC64SldiSrw(Test %d) should return 0", i) + } else if r, _, _, m := DecodePPC64RotateMask(result); v.rotate != r || v.mask != m { + t.Errorf("mergePPC64SldiSrw(Test %d) got (%d,0x%x) expected (%d,0x%x)", i, r, m, v.rotate, v.mask) + } + } +} + +func TestMergePPC64AndSrwi(t *testing.T) { + tests := []struct { + and int64 + srw int64 + valid bool + rotate int64 + mask uint64 + }{ + {0x000000FF, 8, true, 24, 0xFF}, + {0xF00000FF, 8, true, 24, 0xFF}, + {0x0F0000FF, 4, false, 0, 0}, + {0x00000000, 4, false, 0, 0}, + {0xF0000000, 4, false, 0, 0}, + {0xF0000000, 32, false, 0, 0}, + {0xFFFFFFFF, 0, true, 0, 0xFFFFFFFF}, + } + for i, v := range tests { + result := mergePPC64AndSrwi(v.and, v.srw) + if v.valid && result == 0 { + t.Errorf("mergePPC64AndSrwi(Test %d) did not merge", i) + } else if !v.valid && result != 0 { + t.Errorf("mergePPC64AndSrwi(Test %d) should return 0", i) + } else if r, _, _, m := DecodePPC64RotateMask(result); v.rotate != r || v.mask != m { + t.Errorf("mergePPC64AndSrwi(Test %d) got (%d,0x%x) expected (%d,0x%x)", i, r, m, v.rotate, v.mask) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewritedec.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewritedec.go new file mode 100644 index 0000000000000000000000000000000000000000..3c481adc15879f92dac313e50c7bbbb1976e3e1b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewritedec.go @@ -0,0 +1,1094 @@ +// Code generated from _gen/dec.rules using 'go generate'; DO NOT EDIT. + +package ssa + +import "cmd/compile/internal/types" + +func rewriteValuedec(v *Value) bool { + switch v.Op { + case OpArrayMake1: + return rewriteValuedec_OpArrayMake1(v) + case OpArraySelect: + return rewriteValuedec_OpArraySelect(v) + case OpComplexImag: + return rewriteValuedec_OpComplexImag(v) + case OpComplexReal: + return rewriteValuedec_OpComplexReal(v) + case OpIData: + return rewriteValuedec_OpIData(v) + case OpIMake: + return rewriteValuedec_OpIMake(v) + case OpITab: + return rewriteValuedec_OpITab(v) + case OpLoad: + return rewriteValuedec_OpLoad(v) + case OpSliceCap: + return rewriteValuedec_OpSliceCap(v) + case OpSliceLen: + return rewriteValuedec_OpSliceLen(v) + case OpSlicePtr: + return rewriteValuedec_OpSlicePtr(v) + case OpSlicePtrUnchecked: + return rewriteValuedec_OpSlicePtrUnchecked(v) + case OpStore: + return rewriteValuedec_OpStore(v) + case OpStringLen: + return rewriteValuedec_OpStringLen(v) + case OpStringPtr: + return rewriteValuedec_OpStringPtr(v) + case OpStructMake1: + return rewriteValuedec_OpStructMake1(v) + case OpStructSelect: + return rewriteValuedec_OpStructSelect(v) + } + return false +} +func rewriteValuedec_OpArrayMake1(v *Value) bool { + v_0 := v.Args[0] + // match: (ArrayMake1 x) + // cond: x.Type.IsPtrShaped() + // result: x + for { + x := v_0 + if !(x.Type.IsPtrShaped()) { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValuedec_OpArraySelect(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (ArraySelect [0] x) + // cond: x.Type.IsPtrShaped() + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + if !(x.Type.IsPtrShaped()) { + break + } + v.copyOf(x) + return true + } + // match: (ArraySelect (ArrayMake1 x)) + // result: x + for { + if v_0.Op != OpArrayMake1 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (ArraySelect [0] (IData x)) + // result: (IData x) + for { + if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpIData { + break + } + x := v_0.Args[0] + v.reset(OpIData) + v.AddArg(x) + return true + } + // match: (ArraySelect [i] x:(Load ptr mem)) + // result: @x.Block (Load (OffPtr [t.Elem().Size()*i] ptr) mem) + for { + i := auxIntToInt64(v.AuxInt) + x := v_0 + if x.Op != OpLoad { + break + } + t := x.Type + mem := x.Args[1] + ptr := x.Args[0] + b = x.Block + v0 := b.NewValue0(v.Pos, OpLoad, v.Type) + v.copyOf(v0) + v1 := b.NewValue0(v.Pos, OpOffPtr, v.Type.PtrTo()) + v1.AuxInt = int64ToAuxInt(t.Elem().Size() * i) + v1.AddArg(ptr) + v0.AddArg2(v1, mem) + return true + } + return false +} +func rewriteValuedec_OpComplexImag(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ComplexImag (ComplexMake _ imag )) + // result: imag + for { + if v_0.Op != OpComplexMake { + break + } + imag := v_0.Args[1] + v.copyOf(imag) + return true + } + // match: (ComplexImag x:(Load ptr mem)) + // cond: t.IsComplex() && t.Size() == 8 + // result: @x.Block (Load (OffPtr [4] ptr) mem) + for { + x := v_0 + if x.Op != OpLoad { + break + } + t := x.Type + mem := x.Args[1] + ptr := x.Args[0] + if !(t.IsComplex() && t.Size() == 8) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpLoad, typ.Float32) + v.copyOf(v0) + v1 := b.NewValue0(v.Pos, OpOffPtr, typ.Float32Ptr) + v1.AuxInt = int64ToAuxInt(4) + v1.AddArg(ptr) + v0.AddArg2(v1, mem) + return true + } + // match: (ComplexImag x:(Load ptr mem)) + // cond: t.IsComplex() && t.Size() == 16 + // result: @x.Block (Load (OffPtr [8] ptr) mem) + for { + x := v_0 + if x.Op != OpLoad { + break + } + t := x.Type + mem := x.Args[1] + ptr := x.Args[0] + if !(t.IsComplex() && t.Size() == 16) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpLoad, typ.Float64) + v.copyOf(v0) + v1 := b.NewValue0(v.Pos, OpOffPtr, typ.Float64Ptr) + v1.AuxInt = int64ToAuxInt(8) + v1.AddArg(ptr) + v0.AddArg2(v1, mem) + return true + } + return false +} +func rewriteValuedec_OpComplexReal(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ComplexReal (ComplexMake real _ )) + // result: real + for { + if v_0.Op != OpComplexMake { + break + } + real := v_0.Args[0] + v.copyOf(real) + return true + } + // match: (ComplexReal x:(Load ptr mem)) + // cond: t.IsComplex() && t.Size() == 8 + // result: @x.Block (Load ptr mem) + for { + x := v_0 + if x.Op != OpLoad { + break + } + t := x.Type + mem := x.Args[1] + ptr := x.Args[0] + if !(t.IsComplex() && t.Size() == 8) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpLoad, typ.Float32) + v.copyOf(v0) + v0.AddArg2(ptr, mem) + return true + } + // match: (ComplexReal x:(Load ptr mem)) + // cond: t.IsComplex() && t.Size() == 16 + // result: @x.Block (Load ptr mem) + for { + x := v_0 + if x.Op != OpLoad { + break + } + t := x.Type + mem := x.Args[1] + ptr := x.Args[0] + if !(t.IsComplex() && t.Size() == 16) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpLoad, typ.Float64) + v.copyOf(v0) + v0.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValuedec_OpIData(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (IData (IMake _ data)) + // result: data + for { + if v_0.Op != OpIMake { + break + } + data := v_0.Args[1] + v.copyOf(data) + return true + } + // match: (IData x:(Load ptr mem)) + // cond: t.IsInterface() + // result: @x.Block (Load (OffPtr [config.PtrSize] ptr) mem) + for { + x := v_0 + if x.Op != OpLoad { + break + } + t := x.Type + mem := x.Args[1] + ptr := x.Args[0] + if !(t.IsInterface()) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpLoad, typ.BytePtr) + v.copyOf(v0) + v1 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtrPtr) + v1.AuxInt = int64ToAuxInt(config.PtrSize) + v1.AddArg(ptr) + v0.AddArg2(v1, mem) + return true + } + return false +} +func rewriteValuedec_OpIMake(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IMake _typ (StructMake1 val)) + // result: (IMake _typ val) + for { + _typ := v_0 + if v_1.Op != OpStructMake1 { + break + } + val := v_1.Args[0] + v.reset(OpIMake) + v.AddArg2(_typ, val) + return true + } + return false +} +func rewriteValuedec_OpITab(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ITab (IMake itab _)) + // result: itab + for { + if v_0.Op != OpIMake { + break + } + itab := v_0.Args[0] + v.copyOf(itab) + return true + } + // match: (ITab x:(Load ptr mem)) + // cond: t.IsInterface() + // result: @x.Block (Load ptr mem) + for { + x := v_0 + if x.Op != OpLoad { + break + } + t := x.Type + mem := x.Args[1] + ptr := x.Args[0] + if !(t.IsInterface()) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpLoad, typ.Uintptr) + v.copyOf(v0) + v0.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValuedec_OpLoad(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Load ptr mem) + // cond: t.IsComplex() && t.Size() == 8 + // result: (ComplexMake (Load ptr mem) (Load (OffPtr [4] ptr) mem) ) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsComplex() && t.Size() == 8) { + break + } + v.reset(OpComplexMake) + v0 := b.NewValue0(v.Pos, OpLoad, typ.Float32) + v0.AddArg2(ptr, mem) + v1 := b.NewValue0(v.Pos, OpLoad, typ.Float32) + v2 := b.NewValue0(v.Pos, OpOffPtr, typ.Float32Ptr) + v2.AuxInt = int64ToAuxInt(4) + v2.AddArg(ptr) + v1.AddArg2(v2, mem) + v.AddArg2(v0, v1) + return true + } + // match: (Load ptr mem) + // cond: t.IsComplex() && t.Size() == 16 + // result: (ComplexMake (Load ptr mem) (Load (OffPtr [8] ptr) mem) ) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsComplex() && t.Size() == 16) { + break + } + v.reset(OpComplexMake) + v0 := b.NewValue0(v.Pos, OpLoad, typ.Float64) + v0.AddArg2(ptr, mem) + v1 := b.NewValue0(v.Pos, OpLoad, typ.Float64) + v2 := b.NewValue0(v.Pos, OpOffPtr, typ.Float64Ptr) + v2.AuxInt = int64ToAuxInt(8) + v2.AddArg(ptr) + v1.AddArg2(v2, mem) + v.AddArg2(v0, v1) + return true + } + // match: (Load ptr mem) + // cond: t.IsString() + // result: (StringMake (Load ptr mem) (Load (OffPtr [config.PtrSize] ptr) mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsString()) { + break + } + v.reset(OpStringMake) + v0 := b.NewValue0(v.Pos, OpLoad, typ.BytePtr) + v0.AddArg2(ptr, mem) + v1 := b.NewValue0(v.Pos, OpLoad, typ.Int) + v2 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr) + v2.AuxInt = int64ToAuxInt(config.PtrSize) + v2.AddArg(ptr) + v1.AddArg2(v2, mem) + v.AddArg2(v0, v1) + return true + } + // match: (Load ptr mem) + // cond: t.IsSlice() + // result: (SliceMake (Load ptr mem) (Load (OffPtr [config.PtrSize] ptr) mem) (Load (OffPtr [2*config.PtrSize] ptr) mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsSlice()) { + break + } + v.reset(OpSliceMake) + v0 := b.NewValue0(v.Pos, OpLoad, t.Elem().PtrTo()) + v0.AddArg2(ptr, mem) + v1 := b.NewValue0(v.Pos, OpLoad, typ.Int) + v2 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr) + v2.AuxInt = int64ToAuxInt(config.PtrSize) + v2.AddArg(ptr) + v1.AddArg2(v2, mem) + v3 := b.NewValue0(v.Pos, OpLoad, typ.Int) + v4 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr) + v4.AuxInt = int64ToAuxInt(2 * config.PtrSize) + v4.AddArg(ptr) + v3.AddArg2(v4, mem) + v.AddArg3(v0, v1, v3) + return true + } + // match: (Load ptr mem) + // cond: t.IsInterface() + // result: (IMake (Load ptr mem) (Load (OffPtr [config.PtrSize] ptr) mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsInterface()) { + break + } + v.reset(OpIMake) + v0 := b.NewValue0(v.Pos, OpLoad, typ.Uintptr) + v0.AddArg2(ptr, mem) + v1 := b.NewValue0(v.Pos, OpLoad, typ.BytePtr) + v2 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtrPtr) + v2.AuxInt = int64ToAuxInt(config.PtrSize) + v2.AddArg(ptr) + v1.AddArg2(v2, mem) + v.AddArg2(v0, v1) + return true + } + return false +} +func rewriteValuedec_OpSliceCap(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (SliceCap (SliceMake _ _ cap)) + // result: cap + for { + if v_0.Op != OpSliceMake { + break + } + cap := v_0.Args[2] + v.copyOf(cap) + return true + } + // match: (SliceCap x:(Load ptr mem)) + // cond: t.IsSlice() + // result: @x.Block (Load (OffPtr [2*config.PtrSize] ptr) mem) + for { + x := v_0 + if x.Op != OpLoad { + break + } + t := x.Type + mem := x.Args[1] + ptr := x.Args[0] + if !(t.IsSlice()) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpLoad, typ.Int) + v.copyOf(v0) + v1 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr) + v1.AuxInt = int64ToAuxInt(2 * config.PtrSize) + v1.AddArg(ptr) + v0.AddArg2(v1, mem) + return true + } + return false +} +func rewriteValuedec_OpSliceLen(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (SliceLen (SliceMake _ len _)) + // result: len + for { + if v_0.Op != OpSliceMake { + break + } + len := v_0.Args[1] + v.copyOf(len) + return true + } + // match: (SliceLen x:(Load ptr mem)) + // cond: t.IsSlice() + // result: @x.Block (Load (OffPtr [config.PtrSize] ptr) mem) + for { + x := v_0 + if x.Op != OpLoad { + break + } + t := x.Type + mem := x.Args[1] + ptr := x.Args[0] + if !(t.IsSlice()) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpLoad, typ.Int) + v.copyOf(v0) + v1 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr) + v1.AuxInt = int64ToAuxInt(config.PtrSize) + v1.AddArg(ptr) + v0.AddArg2(v1, mem) + return true + } + return false +} +func rewriteValuedec_OpSlicePtr(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (SlicePtr (SliceMake ptr _ _ )) + // result: ptr + for { + if v_0.Op != OpSliceMake { + break + } + ptr := v_0.Args[0] + v.copyOf(ptr) + return true + } + // match: (SlicePtr x:(Load ptr mem)) + // cond: t.IsSlice() + // result: @x.Block (Load ptr mem) + for { + x := v_0 + if x.Op != OpLoad { + break + } + t := x.Type + mem := x.Args[1] + ptr := x.Args[0] + if !(t.IsSlice()) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpLoad, t.Elem().PtrTo()) + v.copyOf(v0) + v0.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValuedec_OpSlicePtrUnchecked(v *Value) bool { + v_0 := v.Args[0] + // match: (SlicePtrUnchecked (SliceMake ptr _ _ )) + // result: ptr + for { + if v_0.Op != OpSliceMake { + break + } + ptr := v_0.Args[0] + v.copyOf(ptr) + return true + } + return false +} +func rewriteValuedec_OpStore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Store {t} _ _ mem) + // cond: t.Size() == 0 + // result: mem + for { + t := auxToType(v.Aux) + mem := v_2 + if !(t.Size() == 0) { + break + } + v.copyOf(mem) + return true + } + // match: (Store {t} dst (ComplexMake real imag) mem) + // cond: t.Size() == 8 + // result: (Store {typ.Float32} (OffPtr [4] dst) imag (Store {typ.Float32} dst real mem)) + for { + t := auxToType(v.Aux) + dst := v_0 + if v_1.Op != OpComplexMake { + break + } + imag := v_1.Args[1] + real := v_1.Args[0] + mem := v_2 + if !(t.Size() == 8) { + break + } + v.reset(OpStore) + v.Aux = typeToAux(typ.Float32) + v0 := b.NewValue0(v.Pos, OpOffPtr, typ.Float32Ptr) + v0.AuxInt = int64ToAuxInt(4) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(typ.Float32) + v1.AddArg3(dst, real, mem) + v.AddArg3(v0, imag, v1) + return true + } + // match: (Store {t} dst (ComplexMake real imag) mem) + // cond: t.Size() == 16 + // result: (Store {typ.Float64} (OffPtr [8] dst) imag (Store {typ.Float64} dst real mem)) + for { + t := auxToType(v.Aux) + dst := v_0 + if v_1.Op != OpComplexMake { + break + } + imag := v_1.Args[1] + real := v_1.Args[0] + mem := v_2 + if !(t.Size() == 16) { + break + } + v.reset(OpStore) + v.Aux = typeToAux(typ.Float64) + v0 := b.NewValue0(v.Pos, OpOffPtr, typ.Float64Ptr) + v0.AuxInt = int64ToAuxInt(8) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(typ.Float64) + v1.AddArg3(dst, real, mem) + v.AddArg3(v0, imag, v1) + return true + } + // match: (Store dst (StringMake ptr len) mem) + // result: (Store {typ.Int} (OffPtr [config.PtrSize] dst) len (Store {typ.BytePtr} dst ptr mem)) + for { + dst := v_0 + if v_1.Op != OpStringMake { + break + } + len := v_1.Args[1] + ptr := v_1.Args[0] + mem := v_2 + v.reset(OpStore) + v.Aux = typeToAux(typ.Int) + v0 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr) + v0.AuxInt = int64ToAuxInt(config.PtrSize) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(typ.BytePtr) + v1.AddArg3(dst, ptr, mem) + v.AddArg3(v0, len, v1) + return true + } + // match: (Store {t} dst (SliceMake ptr len cap) mem) + // result: (Store {typ.Int} (OffPtr [2*config.PtrSize] dst) cap (Store {typ.Int} (OffPtr [config.PtrSize] dst) len (Store {t.Elem().PtrTo()} dst ptr mem))) + for { + t := auxToType(v.Aux) + dst := v_0 + if v_1.Op != OpSliceMake { + break + } + cap := v_1.Args[2] + ptr := v_1.Args[0] + len := v_1.Args[1] + mem := v_2 + v.reset(OpStore) + v.Aux = typeToAux(typ.Int) + v0 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr) + v0.AuxInt = int64ToAuxInt(2 * config.PtrSize) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(typ.Int) + v2 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr) + v2.AuxInt = int64ToAuxInt(config.PtrSize) + v2.AddArg(dst) + v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v3.Aux = typeToAux(t.Elem().PtrTo()) + v3.AddArg3(dst, ptr, mem) + v1.AddArg3(v2, len, v3) + v.AddArg3(v0, cap, v1) + return true + } + // match: (Store dst (IMake itab data) mem) + // result: (Store {typ.BytePtr} (OffPtr [config.PtrSize] dst) data (Store {typ.Uintptr} dst itab mem)) + for { + dst := v_0 + if v_1.Op != OpIMake { + break + } + data := v_1.Args[1] + itab := v_1.Args[0] + mem := v_2 + v.reset(OpStore) + v.Aux = typeToAux(typ.BytePtr) + v0 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtrPtr) + v0.AuxInt = int64ToAuxInt(config.PtrSize) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(typ.Uintptr) + v1.AddArg3(dst, itab, mem) + v.AddArg3(v0, data, v1) + return true + } + // match: (Store dst (StructMake1 f0) mem) + // result: (Store {t.FieldType(0)} (OffPtr [0] dst) f0 mem) + for { + dst := v_0 + if v_1.Op != OpStructMake1 { + break + } + t := v_1.Type + f0 := v_1.Args[0] + mem := v_2 + v.reset(OpStore) + v.Aux = typeToAux(t.FieldType(0)) + v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) + v0.AuxInt = int64ToAuxInt(0) + v0.AddArg(dst) + v.AddArg3(v0, f0, mem) + return true + } + // match: (Store dst (StructMake2 f0 f1) mem) + // result: (Store {t.FieldType(1)} (OffPtr [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr [0] dst) f0 mem)) + for { + dst := v_0 + if v_1.Op != OpStructMake2 { + break + } + t := v_1.Type + f1 := v_1.Args[1] + f0 := v_1.Args[0] + mem := v_2 + v.reset(OpStore) + v.Aux = typeToAux(t.FieldType(1)) + v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo()) + v0.AuxInt = int64ToAuxInt(t.FieldOff(1)) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(t.FieldType(0)) + v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) + v2.AuxInt = int64ToAuxInt(0) + v2.AddArg(dst) + v1.AddArg3(v2, f0, mem) + v.AddArg3(v0, f1, v1) + return true + } + // match: (Store dst (StructMake3 f0 f1 f2) mem) + // result: (Store {t.FieldType(2)} (OffPtr [t.FieldOff(2)] dst) f2 (Store {t.FieldType(1)} (OffPtr [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr [0] dst) f0 mem))) + for { + dst := v_0 + if v_1.Op != OpStructMake3 { + break + } + t := v_1.Type + f2 := v_1.Args[2] + f0 := v_1.Args[0] + f1 := v_1.Args[1] + mem := v_2 + v.reset(OpStore) + v.Aux = typeToAux(t.FieldType(2)) + v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo()) + v0.AuxInt = int64ToAuxInt(t.FieldOff(2)) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(t.FieldType(1)) + v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo()) + v2.AuxInt = int64ToAuxInt(t.FieldOff(1)) + v2.AddArg(dst) + v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v3.Aux = typeToAux(t.FieldType(0)) + v4 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) + v4.AuxInt = int64ToAuxInt(0) + v4.AddArg(dst) + v3.AddArg3(v4, f0, mem) + v1.AddArg3(v2, f1, v3) + v.AddArg3(v0, f2, v1) + return true + } + // match: (Store dst (StructMake4 f0 f1 f2 f3) mem) + // result: (Store {t.FieldType(3)} (OffPtr [t.FieldOff(3)] dst) f3 (Store {t.FieldType(2)} (OffPtr [t.FieldOff(2)] dst) f2 (Store {t.FieldType(1)} (OffPtr [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr [0] dst) f0 mem)))) + for { + dst := v_0 + if v_1.Op != OpStructMake4 { + break + } + t := v_1.Type + f3 := v_1.Args[3] + f0 := v_1.Args[0] + f1 := v_1.Args[1] + f2 := v_1.Args[2] + mem := v_2 + v.reset(OpStore) + v.Aux = typeToAux(t.FieldType(3)) + v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(3).PtrTo()) + v0.AuxInt = int64ToAuxInt(t.FieldOff(3)) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(t.FieldType(2)) + v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo()) + v2.AuxInt = int64ToAuxInt(t.FieldOff(2)) + v2.AddArg(dst) + v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v3.Aux = typeToAux(t.FieldType(1)) + v4 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo()) + v4.AuxInt = int64ToAuxInt(t.FieldOff(1)) + v4.AddArg(dst) + v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v5.Aux = typeToAux(t.FieldType(0)) + v6 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) + v6.AuxInt = int64ToAuxInt(0) + v6.AddArg(dst) + v5.AddArg3(v6, f0, mem) + v3.AddArg3(v4, f1, v5) + v1.AddArg3(v2, f2, v3) + v.AddArg3(v0, f3, v1) + return true + } + // match: (Store dst (ArrayMake1 e) mem) + // result: (Store {e.Type} dst e mem) + for { + dst := v_0 + if v_1.Op != OpArrayMake1 { + break + } + e := v_1.Args[0] + mem := v_2 + v.reset(OpStore) + v.Aux = typeToAux(e.Type) + v.AddArg3(dst, e, mem) + return true + } + return false +} +func rewriteValuedec_OpStringLen(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (StringLen (StringMake _ len)) + // result: len + for { + if v_0.Op != OpStringMake { + break + } + len := v_0.Args[1] + v.copyOf(len) + return true + } + // match: (StringLen x:(Load ptr mem)) + // cond: t.IsString() + // result: @x.Block (Load (OffPtr [config.PtrSize] ptr) mem) + for { + x := v_0 + if x.Op != OpLoad { + break + } + t := x.Type + mem := x.Args[1] + ptr := x.Args[0] + if !(t.IsString()) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpLoad, typ.Int) + v.copyOf(v0) + v1 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr) + v1.AuxInt = int64ToAuxInt(config.PtrSize) + v1.AddArg(ptr) + v0.AddArg2(v1, mem) + return true + } + return false +} +func rewriteValuedec_OpStringPtr(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (StringPtr (StringMake ptr _)) + // result: ptr + for { + if v_0.Op != OpStringMake { + break + } + ptr := v_0.Args[0] + v.copyOf(ptr) + return true + } + // match: (StringPtr x:(Load ptr mem)) + // cond: t.IsString() + // result: @x.Block (Load ptr mem) + for { + x := v_0 + if x.Op != OpLoad { + break + } + t := x.Type + mem := x.Args[1] + ptr := x.Args[0] + if !(t.IsString()) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpLoad, typ.BytePtr) + v.copyOf(v0) + v0.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValuedec_OpStructMake1(v *Value) bool { + v_0 := v.Args[0] + // match: (StructMake1 x) + // cond: x.Type.IsPtrShaped() + // result: x + for { + x := v_0 + if !(x.Type.IsPtrShaped()) { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValuedec_OpStructSelect(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (StructSelect [0] (IData x)) + // result: (IData x) + for { + if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpIData { + break + } + x := v_0.Args[0] + v.reset(OpIData) + v.AddArg(x) + return true + } + // match: (StructSelect (StructMake1 x)) + // result: x + for { + if v_0.Op != OpStructMake1 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (StructSelect [0] (StructMake2 x _)) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpStructMake2 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (StructSelect [1] (StructMake2 _ x)) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStructMake2 { + break + } + x := v_0.Args[1] + v.copyOf(x) + return true + } + // match: (StructSelect [0] (StructMake3 x _ _)) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpStructMake3 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (StructSelect [1] (StructMake3 _ x _)) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStructMake3 { + break + } + x := v_0.Args[1] + v.copyOf(x) + return true + } + // match: (StructSelect [2] (StructMake3 _ _ x)) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 2 || v_0.Op != OpStructMake3 { + break + } + x := v_0.Args[2] + v.copyOf(x) + return true + } + // match: (StructSelect [0] (StructMake4 x _ _ _)) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpStructMake4 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (StructSelect [1] (StructMake4 _ x _ _)) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStructMake4 { + break + } + x := v_0.Args[1] + v.copyOf(x) + return true + } + // match: (StructSelect [2] (StructMake4 _ _ x _)) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 2 || v_0.Op != OpStructMake4 { + break + } + x := v_0.Args[2] + v.copyOf(x) + return true + } + // match: (StructSelect [3] (StructMake4 _ _ _ x)) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 3 || v_0.Op != OpStructMake4 { + break + } + x := v_0.Args[3] + v.copyOf(x) + return true + } + // match: (StructSelect [0] x) + // cond: x.Type.IsPtrShaped() + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + if !(x.Type.IsPtrShaped()) { + break + } + v.copyOf(x) + return true + } + // match: (StructSelect [i] x:(Load ptr mem)) + // result: @x.Block (Load (OffPtr [t.FieldOff(int(i))] ptr) mem) + for { + i := auxIntToInt64(v.AuxInt) + x := v_0 + if x.Op != OpLoad { + break + } + t := x.Type + mem := x.Args[1] + ptr := x.Args[0] + b = x.Block + v0 := b.NewValue0(v.Pos, OpLoad, v.Type) + v.copyOf(v0) + v1 := b.NewValue0(v.Pos, OpOffPtr, v.Type.PtrTo()) + v1.AuxInt = int64ToAuxInt(t.FieldOff(int(i))) + v1.AddArg(ptr) + v0.AddArg2(v1, mem) + return true + } + return false +} +func rewriteBlockdec(b *Block) bool { + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewritedec64.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewritedec64.go new file mode 100644 index 0000000000000000000000000000000000000000..901dc758c30cd3d1f33bbb8c50ef010195cbecbf --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewritedec64.go @@ -0,0 +1,2537 @@ +// Code generated from _gen/dec64.rules using 'go generate'; DO NOT EDIT. + +package ssa + +import "cmd/compile/internal/types" + +func rewriteValuedec64(v *Value) bool { + switch v.Op { + case OpAdd64: + return rewriteValuedec64_OpAdd64(v) + case OpAnd64: + return rewriteValuedec64_OpAnd64(v) + case OpArg: + return rewriteValuedec64_OpArg(v) + case OpBitLen64: + return rewriteValuedec64_OpBitLen64(v) + case OpBswap64: + return rewriteValuedec64_OpBswap64(v) + case OpCom64: + return rewriteValuedec64_OpCom64(v) + case OpConst64: + return rewriteValuedec64_OpConst64(v) + case OpCtz64: + return rewriteValuedec64_OpCtz64(v) + case OpCtz64NonZero: + v.Op = OpCtz64 + return true + case OpEq64: + return rewriteValuedec64_OpEq64(v) + case OpInt64Hi: + return rewriteValuedec64_OpInt64Hi(v) + case OpInt64Lo: + return rewriteValuedec64_OpInt64Lo(v) + case OpLeq64: + return rewriteValuedec64_OpLeq64(v) + case OpLeq64U: + return rewriteValuedec64_OpLeq64U(v) + case OpLess64: + return rewriteValuedec64_OpLess64(v) + case OpLess64U: + return rewriteValuedec64_OpLess64U(v) + case OpLoad: + return rewriteValuedec64_OpLoad(v) + case OpLsh16x64: + return rewriteValuedec64_OpLsh16x64(v) + case OpLsh32x64: + return rewriteValuedec64_OpLsh32x64(v) + case OpLsh64x16: + return rewriteValuedec64_OpLsh64x16(v) + case OpLsh64x32: + return rewriteValuedec64_OpLsh64x32(v) + case OpLsh64x64: + return rewriteValuedec64_OpLsh64x64(v) + case OpLsh64x8: + return rewriteValuedec64_OpLsh64x8(v) + case OpLsh8x64: + return rewriteValuedec64_OpLsh8x64(v) + case OpMul64: + return rewriteValuedec64_OpMul64(v) + case OpNeg64: + return rewriteValuedec64_OpNeg64(v) + case OpNeq64: + return rewriteValuedec64_OpNeq64(v) + case OpOr32: + return rewriteValuedec64_OpOr32(v) + case OpOr64: + return rewriteValuedec64_OpOr64(v) + case OpRotateLeft16: + return rewriteValuedec64_OpRotateLeft16(v) + case OpRotateLeft32: + return rewriteValuedec64_OpRotateLeft32(v) + case OpRotateLeft64: + return rewriteValuedec64_OpRotateLeft64(v) + case OpRotateLeft8: + return rewriteValuedec64_OpRotateLeft8(v) + case OpRsh16Ux64: + return rewriteValuedec64_OpRsh16Ux64(v) + case OpRsh16x64: + return rewriteValuedec64_OpRsh16x64(v) + case OpRsh32Ux64: + return rewriteValuedec64_OpRsh32Ux64(v) + case OpRsh32x64: + return rewriteValuedec64_OpRsh32x64(v) + case OpRsh64Ux16: + return rewriteValuedec64_OpRsh64Ux16(v) + case OpRsh64Ux32: + return rewriteValuedec64_OpRsh64Ux32(v) + case OpRsh64Ux64: + return rewriteValuedec64_OpRsh64Ux64(v) + case OpRsh64Ux8: + return rewriteValuedec64_OpRsh64Ux8(v) + case OpRsh64x16: + return rewriteValuedec64_OpRsh64x16(v) + case OpRsh64x32: + return rewriteValuedec64_OpRsh64x32(v) + case OpRsh64x64: + return rewriteValuedec64_OpRsh64x64(v) + case OpRsh64x8: + return rewriteValuedec64_OpRsh64x8(v) + case OpRsh8Ux64: + return rewriteValuedec64_OpRsh8Ux64(v) + case OpRsh8x64: + return rewriteValuedec64_OpRsh8x64(v) + case OpSignExt16to64: + return rewriteValuedec64_OpSignExt16to64(v) + case OpSignExt32to64: + return rewriteValuedec64_OpSignExt32to64(v) + case OpSignExt8to64: + return rewriteValuedec64_OpSignExt8to64(v) + case OpStore: + return rewriteValuedec64_OpStore(v) + case OpSub64: + return rewriteValuedec64_OpSub64(v) + case OpTrunc64to16: + return rewriteValuedec64_OpTrunc64to16(v) + case OpTrunc64to32: + return rewriteValuedec64_OpTrunc64to32(v) + case OpTrunc64to8: + return rewriteValuedec64_OpTrunc64to8(v) + case OpXor64: + return rewriteValuedec64_OpXor64(v) + case OpZeroExt16to64: + return rewriteValuedec64_OpZeroExt16to64(v) + case OpZeroExt32to64: + return rewriteValuedec64_OpZeroExt32to64(v) + case OpZeroExt8to64: + return rewriteValuedec64_OpZeroExt8to64(v) + } + return false +} +func rewriteValuedec64_OpAdd64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Add64 x y) + // result: (Int64Make (Add32withcarry (Int64Hi x) (Int64Hi y) (Select1 (Add32carry (Int64Lo x) (Int64Lo y)))) (Select0 (Add32carry (Int64Lo x) (Int64Lo y)))) + for { + x := v_0 + y := v_1 + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpAdd32withcarry, typ.Int32) + v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v2.AddArg(y) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4 := b.NewValue0(v.Pos, OpAdd32carry, types.NewTuple(typ.UInt32, types.TypeFlags)) + v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v5.AddArg(x) + v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v6.AddArg(y) + v4.AddArg2(v5, v6) + v3.AddArg(v4) + v0.AddArg3(v1, v2, v3) + v7 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32) + v7.AddArg(v4) + v.AddArg2(v0, v7) + return true + } +} +func rewriteValuedec64_OpAnd64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (And64 x y) + // result: (Int64Make (And32 (Int64Hi x) (Int64Hi y)) (And32 (Int64Lo x) (Int64Lo y))) + for { + x := v_0 + y := v_1 + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32) + v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v4.AddArg(x) + v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v5.AddArg(y) + v3.AddArg2(v4, v5) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValuedec64_OpArg(v *Value) bool { + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Arg {n} [off]) + // cond: is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") + // result: (Int64Make (Arg {n} [off+4]) (Arg {n} [off])) + for { + off := auxIntToInt32(v.AuxInt) + n := auxToSym(v.Aux) + if !(is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")) { + break + } + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpArg, typ.Int32) + v0.AuxInt = int32ToAuxInt(off + 4) + v0.Aux = symToAux(n) + v1 := b.NewValue0(v.Pos, OpArg, typ.UInt32) + v1.AuxInt = int32ToAuxInt(off) + v1.Aux = symToAux(n) + v.AddArg2(v0, v1) + return true + } + // match: (Arg {n} [off]) + // cond: is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") + // result: (Int64Make (Arg {n} [off+4]) (Arg {n} [off])) + for { + off := auxIntToInt32(v.AuxInt) + n := auxToSym(v.Aux) + if !(is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")) { + break + } + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpArg, typ.UInt32) + v0.AuxInt = int32ToAuxInt(off + 4) + v0.Aux = symToAux(n) + v1 := b.NewValue0(v.Pos, OpArg, typ.UInt32) + v1.AuxInt = int32ToAuxInt(off) + v1.Aux = symToAux(n) + v.AddArg2(v0, v1) + return true + } + // match: (Arg {n} [off]) + // cond: is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") + // result: (Int64Make (Arg {n} [off]) (Arg {n} [off+4])) + for { + off := auxIntToInt32(v.AuxInt) + n := auxToSym(v.Aux) + if !(is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")) { + break + } + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpArg, typ.Int32) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(n) + v1 := b.NewValue0(v.Pos, OpArg, typ.UInt32) + v1.AuxInt = int32ToAuxInt(off + 4) + v1.Aux = symToAux(n) + v.AddArg2(v0, v1) + return true + } + // match: (Arg {n} [off]) + // cond: is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") + // result: (Int64Make (Arg {n} [off]) (Arg {n} [off+4])) + for { + off := auxIntToInt32(v.AuxInt) + n := auxToSym(v.Aux) + if !(is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")) { + break + } + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpArg, typ.UInt32) + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(n) + v1 := b.NewValue0(v.Pos, OpArg, typ.UInt32) + v1.AuxInt = int32ToAuxInt(off + 4) + v1.Aux = symToAux(n) + v.AddArg2(v0, v1) + return true + } + return false +} +func rewriteValuedec64_OpBitLen64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen64 x) + // result: (Add32 (BitLen32 (Int64Hi x)) (BitLen32 (Or32 (Int64Lo x) (Zeromask (Int64Hi x))))) + for { + x := v_0 + v.reset(OpAdd32) + v.Type = typ.Int + v0 := b.NewValue0(v.Pos, OpBitLen32, typ.Int) + v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpBitLen32, typ.Int) + v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v4.AddArg(x) + v5 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v5.AddArg(v1) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValuedec64_OpBswap64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Bswap64 x) + // result: (Int64Make (Bswap32 (Int64Lo x)) (Bswap32 (Int64Hi x))) + for { + x := v_0 + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpBswap32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpBswap32, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v3.AddArg(x) + v2.AddArg(v3) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValuedec64_OpCom64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Com64 x) + // result: (Int64Make (Com32 (Int64Hi x)) (Com32 (Int64Lo x))) + for { + x := v_0 + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpCom32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpCom32, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v3.AddArg(x) + v2.AddArg(v3) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValuedec64_OpConst64(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Const64 [c]) + // cond: t.IsSigned() + // result: (Int64Make (Const32 [int32(c>>32)]) (Const32 [int32(c)])) + for { + t := v.Type + c := auxIntToInt64(v.AuxInt) + if !(t.IsSigned()) { + break + } + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpConst32, typ.Int32) + v0.AuxInt = int32ToAuxInt(int32(c >> 32)) + v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v1.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(v0, v1) + return true + } + // match: (Const64 [c]) + // cond: !t.IsSigned() + // result: (Int64Make (Const32 [int32(c>>32)]) (Const32 [int32(c)])) + for { + t := v.Type + c := auxIntToInt64(v.AuxInt) + if !(!t.IsSigned()) { + break + } + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v0.AuxInt = int32ToAuxInt(int32(c >> 32)) + v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v1.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(v0, v1) + return true + } + return false +} +func rewriteValuedec64_OpCtz64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz64 x) + // result: (Add32 (Ctz32 (Int64Lo x)) (And32 (Com32 (Zeromask (Int64Lo x))) (Ctz32 (Int64Hi x)))) + for { + x := v_0 + v.reset(OpAdd32) + v.Type = typ.UInt32 + v0 := b.NewValue0(v.Pos, OpCtz32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpCom32, typ.UInt32) + v4 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v4.AddArg(v1) + v3.AddArg(v4) + v5 := b.NewValue0(v.Pos, OpCtz32, typ.UInt32) + v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v6.AddArg(x) + v5.AddArg(v6) + v2.AddArg2(v3, v5) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValuedec64_OpEq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq64 x y) + // result: (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Eq32 (Int64Lo x) (Int64Lo y))) + for { + x := v_0 + y := v_1 + v.reset(OpAndB) + v0 := b.NewValue0(v.Pos, OpEq32, typ.Bool) + v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpEq32, typ.Bool) + v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v4.AddArg(x) + v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v5.AddArg(y) + v3.AddArg2(v4, v5) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValuedec64_OpInt64Hi(v *Value) bool { + v_0 := v.Args[0] + // match: (Int64Hi (Int64Make hi _)) + // result: hi + for { + if v_0.Op != OpInt64Make { + break + } + hi := v_0.Args[0] + v.copyOf(hi) + return true + } + return false +} +func rewriteValuedec64_OpInt64Lo(v *Value) bool { + v_0 := v.Args[0] + // match: (Int64Lo (Int64Make _ lo)) + // result: lo + for { + if v_0.Op != OpInt64Make { + break + } + lo := v_0.Args[1] + v.copyOf(lo) + return true + } + return false +} +func rewriteValuedec64_OpLeq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq64 x y) + // result: (OrB (Less32 (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Leq32U (Int64Lo x) (Int64Lo y)))) + for { + x := v_0 + y := v_1 + v.reset(OpOrB) + v0 := b.NewValue0(v.Pos, OpLess32, typ.Bool) + v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool) + v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool) + v4.AddArg2(v1, v2) + v5 := b.NewValue0(v.Pos, OpLeq32U, typ.Bool) + v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v6.AddArg(x) + v7 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v7.AddArg(y) + v5.AddArg2(v6, v7) + v3.AddArg2(v4, v5) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValuedec64_OpLeq64U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq64U x y) + // result: (OrB (Less32U (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Leq32U (Int64Lo x) (Int64Lo y)))) + for { + x := v_0 + y := v_1 + v.reset(OpOrB) + v0 := b.NewValue0(v.Pos, OpLess32U, typ.Bool) + v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool) + v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool) + v4.AddArg2(v1, v2) + v5 := b.NewValue0(v.Pos, OpLeq32U, typ.Bool) + v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v6.AddArg(x) + v7 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v7.AddArg(y) + v5.AddArg2(v6, v7) + v3.AddArg2(v4, v5) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValuedec64_OpLess64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less64 x y) + // result: (OrB (Less32 (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Less32U (Int64Lo x) (Int64Lo y)))) + for { + x := v_0 + y := v_1 + v.reset(OpOrB) + v0 := b.NewValue0(v.Pos, OpLess32, typ.Bool) + v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool) + v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool) + v4.AddArg2(v1, v2) + v5 := b.NewValue0(v.Pos, OpLess32U, typ.Bool) + v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v6.AddArg(x) + v7 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v7.AddArg(y) + v5.AddArg2(v6, v7) + v3.AddArg2(v4, v5) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValuedec64_OpLess64U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less64U x y) + // result: (OrB (Less32U (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Less32U (Int64Lo x) (Int64Lo y)))) + for { + x := v_0 + y := v_1 + v.reset(OpOrB) + v0 := b.NewValue0(v.Pos, OpLess32U, typ.Bool) + v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool) + v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool) + v4.AddArg2(v1, v2) + v5 := b.NewValue0(v.Pos, OpLess32U, typ.Bool) + v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v6.AddArg(x) + v7 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v7.AddArg(y) + v5.AddArg2(v6, v7) + v3.AddArg2(v4, v5) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValuedec64_OpLoad(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Load ptr mem) + // cond: is64BitInt(t) && !config.BigEndian && t.IsSigned() + // result: (Int64Make (Load (OffPtr [4] ptr) mem) (Load ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitInt(t) && !config.BigEndian && t.IsSigned()) { + break + } + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpLoad, typ.Int32) + v1 := b.NewValue0(v.Pos, OpOffPtr, typ.Int32Ptr) + v1.AuxInt = int64ToAuxInt(4) + v1.AddArg(ptr) + v0.AddArg2(v1, mem) + v2 := b.NewValue0(v.Pos, OpLoad, typ.UInt32) + v2.AddArg2(ptr, mem) + v.AddArg2(v0, v2) + return true + } + // match: (Load ptr mem) + // cond: is64BitInt(t) && !config.BigEndian && !t.IsSigned() + // result: (Int64Make (Load (OffPtr [4] ptr) mem) (Load ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitInt(t) && !config.BigEndian && !t.IsSigned()) { + break + } + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpLoad, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpOffPtr, typ.UInt32Ptr) + v1.AuxInt = int64ToAuxInt(4) + v1.AddArg(ptr) + v0.AddArg2(v1, mem) + v2 := b.NewValue0(v.Pos, OpLoad, typ.UInt32) + v2.AddArg2(ptr, mem) + v.AddArg2(v0, v2) + return true + } + // match: (Load ptr mem) + // cond: is64BitInt(t) && config.BigEndian && t.IsSigned() + // result: (Int64Make (Load ptr mem) (Load (OffPtr [4] ptr) mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitInt(t) && config.BigEndian && t.IsSigned()) { + break + } + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpLoad, typ.Int32) + v0.AddArg2(ptr, mem) + v1 := b.NewValue0(v.Pos, OpLoad, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpOffPtr, typ.UInt32Ptr) + v2.AuxInt = int64ToAuxInt(4) + v2.AddArg(ptr) + v1.AddArg2(v2, mem) + v.AddArg2(v0, v1) + return true + } + // match: (Load ptr mem) + // cond: is64BitInt(t) && config.BigEndian && !t.IsSigned() + // result: (Int64Make (Load ptr mem) (Load (OffPtr [4] ptr) mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitInt(t) && config.BigEndian && !t.IsSigned()) { + break + } + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpLoad, typ.UInt32) + v0.AddArg2(ptr, mem) + v1 := b.NewValue0(v.Pos, OpLoad, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpOffPtr, typ.UInt32Ptr) + v2.AuxInt = int64ToAuxInt(4) + v2.AddArg(ptr) + v1.AddArg2(v2, mem) + v.AddArg2(v0, v1) + return true + } + return false +} +func rewriteValuedec64_OpLsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x64 _ (Int64Make (Const32 [c]) _)) + // cond: c != 0 + // result: (Const32 [0]) + for { + if v_1.Op != OpInt64Make { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + if !(c != 0) { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (Lsh16x64 [c] x (Int64Make (Const32 [0]) lo)) + // result: (Lsh16x32 [c] x lo) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 { + break + } + v.reset(OpLsh16x32) + v.AuxInt = boolToAuxInt(c) + v.AddArg2(x, lo) + return true + } + // match: (Lsh16x64 x (Int64Make hi lo)) + // cond: hi.Op != OpConst32 + // result: (Lsh16x32 x (Or32 (Zeromask hi) lo)) + for { + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + hi := v_1.Args[0] + if !(hi.Op != OpConst32) { + break + } + v.reset(OpLsh16x32) + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v1.AddArg(hi) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) + return true + } + // match: (Lsh16x64 x y) + // result: (Lsh16x32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y))) + for { + x := v_0 + y := v_1 + v.reset(OpLsh16x32) + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v2.AddArg(y) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v3.AddArg(y) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) + return true + } +} +func rewriteValuedec64_OpLsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x64 _ (Int64Make (Const32 [c]) _)) + // cond: c != 0 + // result: (Const32 [0]) + for { + if v_1.Op != OpInt64Make { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + if !(c != 0) { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (Lsh32x64 [c] x (Int64Make (Const32 [0]) lo)) + // result: (Lsh32x32 [c] x lo) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 { + break + } + v.reset(OpLsh32x32) + v.AuxInt = boolToAuxInt(c) + v.AddArg2(x, lo) + return true + } + // match: (Lsh32x64 x (Int64Make hi lo)) + // cond: hi.Op != OpConst32 + // result: (Lsh32x32 x (Or32 (Zeromask hi) lo)) + for { + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + hi := v_1.Args[0] + if !(hi.Op != OpConst32) { + break + } + v.reset(OpLsh32x32) + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v1.AddArg(hi) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) + return true + } + // match: (Lsh32x64 x y) + // result: (Lsh32x32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y))) + for { + x := v_0 + y := v_1 + v.reset(OpLsh32x32) + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v2.AddArg(y) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v3.AddArg(y) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) + return true + } +} +func rewriteValuedec64_OpLsh64x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x16 x s) + // result: (Int64Make (Or32 (Or32 (Lsh32x16 (Int64Hi x) s) (Rsh32Ux16 (Int64Lo x) (Sub16 (Const16 [32]) s))) (Lsh32x16 (Int64Lo x) (Sub16 s (Const16 [32])))) (Lsh32x16 (Int64Lo x) s)) + for { + x := v_0 + s := v_1 + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v3.AddArg(x) + v2.AddArg2(v3, s) + v4 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32) + v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v5.AddArg(x) + v6 := b.NewValue0(v.Pos, OpSub16, typ.UInt16) + v7 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v7.AuxInt = int16ToAuxInt(32) + v6.AddArg2(v7, s) + v4.AddArg2(v5, v6) + v1.AddArg2(v2, v4) + v8 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32) + v9 := b.NewValue0(v.Pos, OpSub16, typ.UInt16) + v9.AddArg2(s, v7) + v8.AddArg2(v5, v9) + v0.AddArg2(v1, v8) + v10 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32) + v10.AddArg2(v5, s) + v.AddArg2(v0, v10) + return true + } +} +func rewriteValuedec64_OpLsh64x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x32 x s) + // result: (Int64Make (Or32 (Or32 (Lsh32x32 (Int64Hi x) s) (Rsh32Ux32 (Int64Lo x) (Sub32 (Const32 [32]) s))) (Lsh32x32 (Int64Lo x) (Sub32 s (Const32 [32])))) (Lsh32x32 (Int64Lo x) s)) + for { + x := v_0 + s := v_1 + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v3.AddArg(x) + v2.AddArg2(v3, s) + v4 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) + v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v5.AddArg(x) + v6 := b.NewValue0(v.Pos, OpSub32, typ.UInt32) + v7 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v7.AuxInt = int32ToAuxInt(32) + v6.AddArg2(v7, s) + v4.AddArg2(v5, v6) + v1.AddArg2(v2, v4) + v8 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) + v9 := b.NewValue0(v.Pos, OpSub32, typ.UInt32) + v9.AddArg2(s, v7) + v8.AddArg2(v5, v9) + v0.AddArg2(v1, v8) + v10 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) + v10.AddArg2(v5, s) + v.AddArg2(v0, v10) + return true + } +} +func rewriteValuedec64_OpLsh64x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x64 _ (Int64Make (Const32 [c]) _)) + // cond: c != 0 + // result: (Const64 [0]) + for { + if v_1.Op != OpInt64Make { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + if !(c != 0) { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (Lsh64x64 [c] x (Int64Make (Const32 [0]) lo)) + // result: (Lsh64x32 [c] x lo) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 { + break + } + v.reset(OpLsh64x32) + v.AuxInt = boolToAuxInt(c) + v.AddArg2(x, lo) + return true + } + // match: (Lsh64x64 x (Int64Make hi lo)) + // cond: hi.Op != OpConst32 + // result: (Lsh64x32 x (Or32 (Zeromask hi) lo)) + for { + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + hi := v_1.Args[0] + if !(hi.Op != OpConst32) { + break + } + v.reset(OpLsh64x32) + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v1.AddArg(hi) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) + return true + } + // match: (Lsh64x64 x y) + // result: (Lsh64x32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y))) + for { + x := v_0 + y := v_1 + v.reset(OpLsh64x32) + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v2.AddArg(y) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v3.AddArg(y) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) + return true + } +} +func rewriteValuedec64_OpLsh64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x8 x s) + // result: (Int64Make (Or32 (Or32 (Lsh32x8 (Int64Hi x) s) (Rsh32Ux8 (Int64Lo x) (Sub8 (Const8 [32]) s))) (Lsh32x8 (Int64Lo x) (Sub8 s (Const8 [32])))) (Lsh32x8 (Int64Lo x) s)) + for { + x := v_0 + s := v_1 + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v3.AddArg(x) + v2.AddArg2(v3, s) + v4 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32) + v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v5.AddArg(x) + v6 := b.NewValue0(v.Pos, OpSub8, typ.UInt8) + v7 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) + v7.AuxInt = int8ToAuxInt(32) + v6.AddArg2(v7, s) + v4.AddArg2(v5, v6) + v1.AddArg2(v2, v4) + v8 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32) + v9 := b.NewValue0(v.Pos, OpSub8, typ.UInt8) + v9.AddArg2(s, v7) + v8.AddArg2(v5, v9) + v0.AddArg2(v1, v8) + v10 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32) + v10.AddArg2(v5, s) + v.AddArg2(v0, v10) + return true + } +} +func rewriteValuedec64_OpLsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x64 _ (Int64Make (Const32 [c]) _)) + // cond: c != 0 + // result: (Const32 [0]) + for { + if v_1.Op != OpInt64Make { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + if !(c != 0) { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (Lsh8x64 [c] x (Int64Make (Const32 [0]) lo)) + // result: (Lsh8x32 [c] x lo) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 { + break + } + v.reset(OpLsh8x32) + v.AuxInt = boolToAuxInt(c) + v.AddArg2(x, lo) + return true + } + // match: (Lsh8x64 x (Int64Make hi lo)) + // cond: hi.Op != OpConst32 + // result: (Lsh8x32 x (Or32 (Zeromask hi) lo)) + for { + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + hi := v_1.Args[0] + if !(hi.Op != OpConst32) { + break + } + v.reset(OpLsh8x32) + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v1.AddArg(hi) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) + return true + } + // match: (Lsh8x64 x y) + // result: (Lsh8x32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y))) + for { + x := v_0 + y := v_1 + v.reset(OpLsh8x32) + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v2.AddArg(y) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v3.AddArg(y) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) + return true + } +} +func rewriteValuedec64_OpMul64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mul64 x y) + // result: (Int64Make (Add32 (Mul32 (Int64Lo x) (Int64Hi y)) (Add32 (Mul32 (Int64Hi x) (Int64Lo y)) (Select0 (Mul32uhilo (Int64Lo x) (Int64Lo y))))) (Select1 (Mul32uhilo (Int64Lo x) (Int64Lo y)))) + for { + x := v_0 + y := v_1 + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v4 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) + v5 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v6.AddArg(x) + v7 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v7.AddArg(y) + v5.AddArg2(v6, v7) + v8 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32) + v9 := b.NewValue0(v.Pos, OpMul32uhilo, types.NewTuple(typ.UInt32, typ.UInt32)) + v9.AddArg2(v2, v7) + v8.AddArg(v9) + v4.AddArg2(v5, v8) + v0.AddArg2(v1, v4) + v10 := b.NewValue0(v.Pos, OpSelect1, typ.UInt32) + v10.AddArg(v9) + v.AddArg2(v0, v10) + return true + } +} +func rewriteValuedec64_OpNeg64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Neg64 x) + // result: (Sub64 (Const64 [0]) x) + for { + t := v.Type + x := v_0 + v.reset(OpSub64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, x) + return true + } +} +func rewriteValuedec64_OpNeq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq64 x y) + // result: (OrB (Neq32 (Int64Hi x) (Int64Hi y)) (Neq32 (Int64Lo x) (Int64Lo y))) + for { + x := v_0 + y := v_1 + v.reset(OpOrB) + v0 := b.NewValue0(v.Pos, OpNeq32, typ.Bool) + v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpNeq32, typ.Bool) + v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v4.AddArg(x) + v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v5.AddArg(y) + v3.AddArg2(v4, v5) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValuedec64_OpOr32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Or32 (Zeromask (Const32 [c])) y) + // cond: c == 0 + // result: y + for { + if v.Type != typ.UInt32 { + break + } + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpZeromask { + continue + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0_0.AuxInt) + y := v_1 + if !(c == 0) { + continue + } + v.copyOf(y) + return true + } + break + } + // match: (Or32 (Zeromask (Const32 [c])) y) + // cond: c != 0 + // result: (Const32 [-1]) + for { + if v.Type != typ.UInt32 { + break + } + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpZeromask { + continue + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0_0.AuxInt) + if !(c != 0) { + continue + } + v.reset(OpConst32) + v.Type = typ.UInt32 + v.AuxInt = int32ToAuxInt(-1) + return true + } + break + } + return false +} +func rewriteValuedec64_OpOr64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Or64 x y) + // result: (Int64Make (Or32 (Int64Hi x) (Int64Hi y)) (Or32 (Int64Lo x) (Int64Lo y))) + for { + x := v_0 + y := v_1 + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v4.AddArg(x) + v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v5.AddArg(y) + v3.AddArg2(v4, v5) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValuedec64_OpRotateLeft16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (RotateLeft16 x (Int64Make hi lo)) + // result: (RotateLeft16 x lo) + for { + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + v.reset(OpRotateLeft16) + v.AddArg2(x, lo) + return true + } + return false +} +func rewriteValuedec64_OpRotateLeft32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (RotateLeft32 x (Int64Make hi lo)) + // result: (RotateLeft32 x lo) + for { + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + v.reset(OpRotateLeft32) + v.AddArg2(x, lo) + return true + } + return false +} +func rewriteValuedec64_OpRotateLeft64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (RotateLeft64 x (Int64Make hi lo)) + // result: (RotateLeft64 x lo) + for { + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + v.reset(OpRotateLeft64) + v.AddArg2(x, lo) + return true + } + return false +} +func rewriteValuedec64_OpRotateLeft8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (RotateLeft8 x (Int64Make hi lo)) + // result: (RotateLeft8 x lo) + for { + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + v.reset(OpRotateLeft8) + v.AddArg2(x, lo) + return true + } + return false +} +func rewriteValuedec64_OpRsh16Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux64 _ (Int64Make (Const32 [c]) _)) + // cond: c != 0 + // result: (Const32 [0]) + for { + if v_1.Op != OpInt64Make { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + if !(c != 0) { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (Rsh16Ux64 [c] x (Int64Make (Const32 [0]) lo)) + // result: (Rsh16Ux32 [c] x lo) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 { + break + } + v.reset(OpRsh16Ux32) + v.AuxInt = boolToAuxInt(c) + v.AddArg2(x, lo) + return true + } + // match: (Rsh16Ux64 x (Int64Make hi lo)) + // cond: hi.Op != OpConst32 + // result: (Rsh16Ux32 x (Or32 (Zeromask hi) lo)) + for { + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + hi := v_1.Args[0] + if !(hi.Op != OpConst32) { + break + } + v.reset(OpRsh16Ux32) + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v1.AddArg(hi) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) + return true + } + // match: (Rsh16Ux64 x y) + // result: (Rsh16Ux32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y))) + for { + x := v_0 + y := v_1 + v.reset(OpRsh16Ux32) + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v2.AddArg(y) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v3.AddArg(y) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) + return true + } +} +func rewriteValuedec64_OpRsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x64 x (Int64Make (Const32 [c]) _)) + // cond: c != 0 + // result: (Signmask (SignExt16to32 x)) + for { + x := v_0 + if v_1.Op != OpInt64Make { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + if !(c != 0) { + break + } + v.reset(OpSignmask) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Rsh16x64 [c] x (Int64Make (Const32 [0]) lo)) + // result: (Rsh16x32 [c] x lo) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 { + break + } + v.reset(OpRsh16x32) + v.AuxInt = boolToAuxInt(c) + v.AddArg2(x, lo) + return true + } + // match: (Rsh16x64 x (Int64Make hi lo)) + // cond: hi.Op != OpConst32 + // result: (Rsh16x32 x (Or32 (Zeromask hi) lo)) + for { + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + hi := v_1.Args[0] + if !(hi.Op != OpConst32) { + break + } + v.reset(OpRsh16x32) + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v1.AddArg(hi) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) + return true + } + // match: (Rsh16x64 x y) + // result: (Rsh16x32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y))) + for { + x := v_0 + y := v_1 + v.reset(OpRsh16x32) + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v2.AddArg(y) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v3.AddArg(y) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) + return true + } +} +func rewriteValuedec64_OpRsh32Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux64 _ (Int64Make (Const32 [c]) _)) + // cond: c != 0 + // result: (Const32 [0]) + for { + if v_1.Op != OpInt64Make { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + if !(c != 0) { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (Rsh32Ux64 [c] x (Int64Make (Const32 [0]) lo)) + // result: (Rsh32Ux32 [c] x lo) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 { + break + } + v.reset(OpRsh32Ux32) + v.AuxInt = boolToAuxInt(c) + v.AddArg2(x, lo) + return true + } + // match: (Rsh32Ux64 x (Int64Make hi lo)) + // cond: hi.Op != OpConst32 + // result: (Rsh32Ux32 x (Or32 (Zeromask hi) lo)) + for { + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + hi := v_1.Args[0] + if !(hi.Op != OpConst32) { + break + } + v.reset(OpRsh32Ux32) + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v1.AddArg(hi) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) + return true + } + // match: (Rsh32Ux64 x y) + // result: (Rsh32Ux32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y))) + for { + x := v_0 + y := v_1 + v.reset(OpRsh32Ux32) + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v2.AddArg(y) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v3.AddArg(y) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) + return true + } +} +func rewriteValuedec64_OpRsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x64 x (Int64Make (Const32 [c]) _)) + // cond: c != 0 + // result: (Signmask x) + for { + x := v_0 + if v_1.Op != OpInt64Make { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + if !(c != 0) { + break + } + v.reset(OpSignmask) + v.AddArg(x) + return true + } + // match: (Rsh32x64 [c] x (Int64Make (Const32 [0]) lo)) + // result: (Rsh32x32 [c] x lo) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 { + break + } + v.reset(OpRsh32x32) + v.AuxInt = boolToAuxInt(c) + v.AddArg2(x, lo) + return true + } + // match: (Rsh32x64 x (Int64Make hi lo)) + // cond: hi.Op != OpConst32 + // result: (Rsh32x32 x (Or32 (Zeromask hi) lo)) + for { + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + hi := v_1.Args[0] + if !(hi.Op != OpConst32) { + break + } + v.reset(OpRsh32x32) + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v1.AddArg(hi) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) + return true + } + // match: (Rsh32x64 x y) + // result: (Rsh32x32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y))) + for { + x := v_0 + y := v_1 + v.reset(OpRsh32x32) + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v2.AddArg(y) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v3.AddArg(y) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) + return true + } +} +func rewriteValuedec64_OpRsh64Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux16 x s) + // result: (Int64Make (Rsh32Ux16 (Int64Hi x) s) (Or32 (Or32 (Rsh32Ux16 (Int64Lo x) s) (Lsh32x16 (Int64Hi x) (Sub16 (Const16 [32]) s))) (Rsh32Ux16 (Int64Hi x) (Sub16 s (Const16 [32]))))) + for { + x := v_0 + s := v_1 + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, s) + v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v4 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32) + v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v5.AddArg(x) + v4.AddArg2(v5, s) + v6 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32) + v7 := b.NewValue0(v.Pos, OpSub16, typ.UInt16) + v8 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v8.AuxInt = int16ToAuxInt(32) + v7.AddArg2(v8, s) + v6.AddArg2(v1, v7) + v3.AddArg2(v4, v6) + v9 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32) + v10 := b.NewValue0(v.Pos, OpSub16, typ.UInt16) + v10.AddArg2(s, v8) + v9.AddArg2(v1, v10) + v2.AddArg2(v3, v9) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValuedec64_OpRsh64Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux32 x s) + // result: (Int64Make (Rsh32Ux32 (Int64Hi x) s) (Or32 (Or32 (Rsh32Ux32 (Int64Lo x) s) (Lsh32x32 (Int64Hi x) (Sub32 (Const32 [32]) s))) (Rsh32Ux32 (Int64Hi x) (Sub32 s (Const32 [32]))))) + for { + x := v_0 + s := v_1 + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, s) + v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v4 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) + v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v5.AddArg(x) + v4.AddArg2(v5, s) + v6 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) + v7 := b.NewValue0(v.Pos, OpSub32, typ.UInt32) + v8 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v8.AuxInt = int32ToAuxInt(32) + v7.AddArg2(v8, s) + v6.AddArg2(v1, v7) + v3.AddArg2(v4, v6) + v9 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) + v10 := b.NewValue0(v.Pos, OpSub32, typ.UInt32) + v10.AddArg2(s, v8) + v9.AddArg2(v1, v10) + v2.AddArg2(v3, v9) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValuedec64_OpRsh64Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux64 _ (Int64Make (Const32 [c]) _)) + // cond: c != 0 + // result: (Const64 [0]) + for { + if v_1.Op != OpInt64Make { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + if !(c != 0) { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (Rsh64Ux64 [c] x (Int64Make (Const32 [0]) lo)) + // result: (Rsh64Ux32 [c] x lo) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 { + break + } + v.reset(OpRsh64Ux32) + v.AuxInt = boolToAuxInt(c) + v.AddArg2(x, lo) + return true + } + // match: (Rsh64Ux64 x (Int64Make hi lo)) + // cond: hi.Op != OpConst32 + // result: (Rsh64Ux32 x (Or32 (Zeromask hi) lo)) + for { + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + hi := v_1.Args[0] + if !(hi.Op != OpConst32) { + break + } + v.reset(OpRsh64Ux32) + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v1.AddArg(hi) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) + return true + } + // match: (Rsh64Ux64 x y) + // result: (Rsh64Ux32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y))) + for { + x := v_0 + y := v_1 + v.reset(OpRsh64Ux32) + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v2.AddArg(y) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v3.AddArg(y) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) + return true + } +} +func rewriteValuedec64_OpRsh64Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux8 x s) + // result: (Int64Make (Rsh32Ux8 (Int64Hi x) s) (Or32 (Or32 (Rsh32Ux8 (Int64Lo x) s) (Lsh32x8 (Int64Hi x) (Sub8 (Const8 [32]) s))) (Rsh32Ux8 (Int64Hi x) (Sub8 s (Const8 [32]))))) + for { + x := v_0 + s := v_1 + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, s) + v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v4 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32) + v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v5.AddArg(x) + v4.AddArg2(v5, s) + v6 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32) + v7 := b.NewValue0(v.Pos, OpSub8, typ.UInt8) + v8 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) + v8.AuxInt = int8ToAuxInt(32) + v7.AddArg2(v8, s) + v6.AddArg2(v1, v7) + v3.AddArg2(v4, v6) + v9 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32) + v10 := b.NewValue0(v.Pos, OpSub8, typ.UInt8) + v10.AddArg2(s, v8) + v9.AddArg2(v1, v10) + v2.AddArg2(v3, v9) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValuedec64_OpRsh64x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x16 x s) + // result: (Int64Make (Rsh32x16 (Int64Hi x) s) (Or32 (Or32 (Rsh32Ux16 (Int64Lo x) s) (Lsh32x16 (Int64Hi x) (Sub16 (Const16 [32]) s))) (And32 (Rsh32x16 (Int64Hi x) (Sub16 s (Const16 [32]))) (Zeromask (ZeroExt16to32 (Rsh16Ux32 s (Const32 [5]))))))) + for { + x := v_0 + s := v_1 + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpRsh32x16, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, s) + v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v4 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32) + v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v5.AddArg(x) + v4.AddArg2(v5, s) + v6 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32) + v7 := b.NewValue0(v.Pos, OpSub16, typ.UInt16) + v8 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v8.AuxInt = int16ToAuxInt(32) + v7.AddArg2(v8, s) + v6.AddArg2(v1, v7) + v3.AddArg2(v4, v6) + v9 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32) + v10 := b.NewValue0(v.Pos, OpRsh32x16, typ.UInt32) + v11 := b.NewValue0(v.Pos, OpSub16, typ.UInt16) + v11.AddArg2(s, v8) + v10.AddArg2(v1, v11) + v12 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v13 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v14 := b.NewValue0(v.Pos, OpRsh16Ux32, typ.UInt16) + v15 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v15.AuxInt = int32ToAuxInt(5) + v14.AddArg2(s, v15) + v13.AddArg(v14) + v12.AddArg(v13) + v9.AddArg2(v10, v12) + v2.AddArg2(v3, v9) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValuedec64_OpRsh64x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x32 x s) + // result: (Int64Make (Rsh32x32 (Int64Hi x) s) (Or32 (Or32 (Rsh32Ux32 (Int64Lo x) s) (Lsh32x32 (Int64Hi x) (Sub32 (Const32 [32]) s))) (And32 (Rsh32x32 (Int64Hi x) (Sub32 s (Const32 [32]))) (Zeromask (Rsh32Ux32 s (Const32 [5])))))) + for { + x := v_0 + s := v_1 + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpRsh32x32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, s) + v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v4 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) + v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v5.AddArg(x) + v4.AddArg2(v5, s) + v6 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32) + v7 := b.NewValue0(v.Pos, OpSub32, typ.UInt32) + v8 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v8.AuxInt = int32ToAuxInt(32) + v7.AddArg2(v8, s) + v6.AddArg2(v1, v7) + v3.AddArg2(v4, v6) + v9 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32) + v10 := b.NewValue0(v.Pos, OpRsh32x32, typ.UInt32) + v11 := b.NewValue0(v.Pos, OpSub32, typ.UInt32) + v11.AddArg2(s, v8) + v10.AddArg2(v1, v11) + v12 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v13 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32) + v14 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v14.AuxInt = int32ToAuxInt(5) + v13.AddArg2(s, v14) + v12.AddArg(v13) + v9.AddArg2(v10, v12) + v2.AddArg2(v3, v9) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValuedec64_OpRsh64x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x64 x (Int64Make (Const32 [c]) _)) + // cond: c != 0 + // result: (Int64Make (Signmask (Int64Hi x)) (Signmask (Int64Hi x))) + for { + x := v_0 + if v_1.Op != OpInt64Make { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + if !(c != 0) { + break + } + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) + v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg2(v0, v0) + return true + } + // match: (Rsh64x64 [c] x (Int64Make (Const32 [0]) lo)) + // result: (Rsh64x32 [c] x lo) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 { + break + } + v.reset(OpRsh64x32) + v.AuxInt = boolToAuxInt(c) + v.AddArg2(x, lo) + return true + } + // match: (Rsh64x64 x (Int64Make hi lo)) + // cond: hi.Op != OpConst32 + // result: (Rsh64x32 x (Or32 (Zeromask hi) lo)) + for { + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + hi := v_1.Args[0] + if !(hi.Op != OpConst32) { + break + } + v.reset(OpRsh64x32) + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v1.AddArg(hi) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) + return true + } + // match: (Rsh64x64 x y) + // result: (Rsh64x32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y))) + for { + x := v_0 + y := v_1 + v.reset(OpRsh64x32) + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v2.AddArg(y) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v3.AddArg(y) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) + return true + } +} +func rewriteValuedec64_OpRsh64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x8 x s) + // result: (Int64Make (Rsh32x8 (Int64Hi x) s) (Or32 (Or32 (Rsh32Ux8 (Int64Lo x) s) (Lsh32x8 (Int64Hi x) (Sub8 (Const8 [32]) s))) (And32 (Rsh32x8 (Int64Hi x) (Sub8 s (Const8 [32]))) (Zeromask (ZeroExt8to32 (Rsh8Ux32 s (Const32 [5]))))))) + for { + x := v_0 + s := v_1 + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpRsh32x8, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, s) + v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v4 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32) + v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v5.AddArg(x) + v4.AddArg2(v5, s) + v6 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32) + v7 := b.NewValue0(v.Pos, OpSub8, typ.UInt8) + v8 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) + v8.AuxInt = int8ToAuxInt(32) + v7.AddArg2(v8, s) + v6.AddArg2(v1, v7) + v3.AddArg2(v4, v6) + v9 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32) + v10 := b.NewValue0(v.Pos, OpRsh32x8, typ.UInt32) + v11 := b.NewValue0(v.Pos, OpSub8, typ.UInt8) + v11.AddArg2(s, v8) + v10.AddArg2(v1, v11) + v12 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v13 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v14 := b.NewValue0(v.Pos, OpRsh8Ux32, typ.UInt8) + v15 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v15.AuxInt = int32ToAuxInt(5) + v14.AddArg2(s, v15) + v13.AddArg(v14) + v12.AddArg(v13) + v9.AddArg2(v10, v12) + v2.AddArg2(v3, v9) + v.AddArg2(v0, v2) + return true + } +} +func rewriteValuedec64_OpRsh8Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux64 _ (Int64Make (Const32 [c]) _)) + // cond: c != 0 + // result: (Const32 [0]) + for { + if v_1.Op != OpInt64Make { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + if !(c != 0) { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (Rsh8Ux64 [c] x (Int64Make (Const32 [0]) lo)) + // result: (Rsh8Ux32 [c] x lo) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 { + break + } + v.reset(OpRsh8Ux32) + v.AuxInt = boolToAuxInt(c) + v.AddArg2(x, lo) + return true + } + // match: (Rsh8Ux64 x (Int64Make hi lo)) + // cond: hi.Op != OpConst32 + // result: (Rsh8Ux32 x (Or32 (Zeromask hi) lo)) + for { + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + hi := v_1.Args[0] + if !(hi.Op != OpConst32) { + break + } + v.reset(OpRsh8Ux32) + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v1.AddArg(hi) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) + return true + } + // match: (Rsh8Ux64 x y) + // result: (Rsh8Ux32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y))) + for { + x := v_0 + y := v_1 + v.reset(OpRsh8Ux32) + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v2.AddArg(y) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v3.AddArg(y) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) + return true + } +} +func rewriteValuedec64_OpRsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x64 x (Int64Make (Const32 [c]) _)) + // cond: c != 0 + // result: (Signmask (SignExt8to32 x)) + for { + x := v_0 + if v_1.Op != OpInt64Make { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + if !(c != 0) { + break + } + v.reset(OpSignmask) + v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Rsh8x64 [c] x (Int64Make (Const32 [0]) lo)) + // result: (Rsh8x32 [c] x lo) + for { + c := auxIntToBool(v.AuxInt) + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 { + break + } + v.reset(OpRsh8x32) + v.AuxInt = boolToAuxInt(c) + v.AddArg2(x, lo) + return true + } + // match: (Rsh8x64 x (Int64Make hi lo)) + // cond: hi.Op != OpConst32 + // result: (Rsh8x32 x (Or32 (Zeromask hi) lo)) + for { + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + hi := v_1.Args[0] + if !(hi.Op != OpConst32) { + break + } + v.reset(OpRsh8x32) + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v1.AddArg(hi) + v0.AddArg2(v1, lo) + v.AddArg2(x, v0) + return true + } + // match: (Rsh8x64 x y) + // result: (Rsh8x32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y))) + for { + x := v_0 + y := v_1 + v.reset(OpRsh8x32) + v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v2.AddArg(y) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v3.AddArg(y) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) + return true + } +} +func rewriteValuedec64_OpSignExt16to64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SignExt16to64 x) + // result: (SignExt32to64 (SignExt16to32 x)) + for { + x := v_0 + v.reset(OpSignExt32to64) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValuedec64_OpSignExt32to64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SignExt32to64 x) + // result: (Int64Make (Signmask x) x) + for { + x := v_0 + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpSignmask, typ.Int32) + v0.AddArg(x) + v.AddArg2(v0, x) + return true + } +} +func rewriteValuedec64_OpSignExt8to64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (SignExt8to64 x) + // result: (SignExt32to64 (SignExt8to32 x)) + for { + x := v_0 + v.reset(OpSignExt32to64) + v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValuedec64_OpStore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (Store {t} dst (Int64Make hi lo) mem) + // cond: t.Size() == 8 && !config.BigEndian + // result: (Store {hi.Type} (OffPtr [4] dst) hi (Store {lo.Type} dst lo mem)) + for { + t := auxToType(v.Aux) + dst := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + hi := v_1.Args[0] + mem := v_2 + if !(t.Size() == 8 && !config.BigEndian) { + break + } + v.reset(OpStore) + v.Aux = typeToAux(hi.Type) + v0 := b.NewValue0(v.Pos, OpOffPtr, hi.Type.PtrTo()) + v0.AuxInt = int64ToAuxInt(4) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(lo.Type) + v1.AddArg3(dst, lo, mem) + v.AddArg3(v0, hi, v1) + return true + } + // match: (Store {t} dst (Int64Make hi lo) mem) + // cond: t.Size() == 8 && config.BigEndian + // result: (Store {lo.Type} (OffPtr [4] dst) lo (Store {hi.Type} dst hi mem)) + for { + t := auxToType(v.Aux) + dst := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + hi := v_1.Args[0] + mem := v_2 + if !(t.Size() == 8 && config.BigEndian) { + break + } + v.reset(OpStore) + v.Aux = typeToAux(lo.Type) + v0 := b.NewValue0(v.Pos, OpOffPtr, lo.Type.PtrTo()) + v0.AuxInt = int64ToAuxInt(4) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(hi.Type) + v1.AddArg3(dst, hi, mem) + v.AddArg3(v0, lo, v1) + return true + } + return false +} +func rewriteValuedec64_OpSub64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Sub64 x y) + // result: (Int64Make (Sub32withcarry (Int64Hi x) (Int64Hi y) (Select1 (Sub32carry (Int64Lo x) (Int64Lo y)))) (Select0 (Sub32carry (Int64Lo x) (Int64Lo y)))) + for { + x := v_0 + y := v_1 + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpSub32withcarry, typ.Int32) + v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v2.AddArg(y) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4 := b.NewValue0(v.Pos, OpSub32carry, types.NewTuple(typ.UInt32, types.TypeFlags)) + v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v5.AddArg(x) + v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v6.AddArg(y) + v4.AddArg2(v5, v6) + v3.AddArg(v4) + v0.AddArg3(v1, v2, v3) + v7 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32) + v7.AddArg(v4) + v.AddArg2(v0, v7) + return true + } +} +func rewriteValuedec64_OpTrunc64to16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Trunc64to16 (Int64Make _ lo)) + // result: (Trunc32to16 lo) + for { + if v_0.Op != OpInt64Make { + break + } + lo := v_0.Args[1] + v.reset(OpTrunc32to16) + v.AddArg(lo) + return true + } + // match: (Trunc64to16 x) + // result: (Trunc32to16 (Int64Lo x)) + for { + x := v_0 + v.reset(OpTrunc32to16) + v0 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValuedec64_OpTrunc64to32(v *Value) bool { + v_0 := v.Args[0] + // match: (Trunc64to32 (Int64Make _ lo)) + // result: lo + for { + if v_0.Op != OpInt64Make { + break + } + lo := v_0.Args[1] + v.copyOf(lo) + return true + } + // match: (Trunc64to32 x) + // result: (Int64Lo x) + for { + x := v_0 + v.reset(OpInt64Lo) + v.AddArg(x) + return true + } +} +func rewriteValuedec64_OpTrunc64to8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Trunc64to8 (Int64Make _ lo)) + // result: (Trunc32to8 lo) + for { + if v_0.Op != OpInt64Make { + break + } + lo := v_0.Args[1] + v.reset(OpTrunc32to8) + v.AddArg(lo) + return true + } + // match: (Trunc64to8 x) + // result: (Trunc32to8 (Int64Lo x)) + for { + x := v_0 + v.reset(OpTrunc32to8) + v0 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValuedec64_OpXor64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Xor64 x y) + // result: (Int64Make (Xor32 (Int64Hi x) (Int64Hi y)) (Xor32 (Int64Lo x) (Int64Lo y))) + for { + x := v_0 + y := v_1 + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpXor32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpXor32, typ.UInt32) + v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v4.AddArg(x) + v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32) + v5.AddArg(y) + v3.AddArg2(v4, v5) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValuedec64_OpZeroExt16to64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ZeroExt16to64 x) + // result: (ZeroExt32to64 (ZeroExt16to32 x)) + for { + x := v_0 + v.reset(OpZeroExt32to64) + v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValuedec64_OpZeroExt32to64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ZeroExt32to64 x) + // result: (Int64Make (Const32 [0]) x) + for { + x := v_0 + v.reset(OpInt64Make) + v0 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg2(v0, x) + return true + } +} +func rewriteValuedec64_OpZeroExt8to64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ZeroExt8to64 x) + // result: (ZeroExt32to64 (ZeroExt8to32 x)) + for { + x := v_0 + v.reset(OpZeroExt32to64) + v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteBlockdec64(b *Block) bool { + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewritegeneric.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewritegeneric.go new file mode 100644 index 0000000000000000000000000000000000000000..a018ca04b635077b82ce3ae0026d0b5f1caef076 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -0,0 +1,33938 @@ +// Code generated from _gen/generic.rules using 'go generate'; DO NOT EDIT. + +package ssa + +import "math" +import "cmd/internal/obj" +import "cmd/compile/internal/types" +import "cmd/compile/internal/ir" + +func rewriteValuegeneric(v *Value) bool { + switch v.Op { + case OpAdd16: + return rewriteValuegeneric_OpAdd16(v) + case OpAdd32: + return rewriteValuegeneric_OpAdd32(v) + case OpAdd32F: + return rewriteValuegeneric_OpAdd32F(v) + case OpAdd64: + return rewriteValuegeneric_OpAdd64(v) + case OpAdd64F: + return rewriteValuegeneric_OpAdd64F(v) + case OpAdd8: + return rewriteValuegeneric_OpAdd8(v) + case OpAddPtr: + return rewriteValuegeneric_OpAddPtr(v) + case OpAnd16: + return rewriteValuegeneric_OpAnd16(v) + case OpAnd32: + return rewriteValuegeneric_OpAnd32(v) + case OpAnd64: + return rewriteValuegeneric_OpAnd64(v) + case OpAnd8: + return rewriteValuegeneric_OpAnd8(v) + case OpAndB: + return rewriteValuegeneric_OpAndB(v) + case OpArraySelect: + return rewriteValuegeneric_OpArraySelect(v) + case OpCeil: + return rewriteValuegeneric_OpCeil(v) + case OpCom16: + return rewriteValuegeneric_OpCom16(v) + case OpCom32: + return rewriteValuegeneric_OpCom32(v) + case OpCom64: + return rewriteValuegeneric_OpCom64(v) + case OpCom8: + return rewriteValuegeneric_OpCom8(v) + case OpConstInterface: + return rewriteValuegeneric_OpConstInterface(v) + case OpConstSlice: + return rewriteValuegeneric_OpConstSlice(v) + case OpConstString: + return rewriteValuegeneric_OpConstString(v) + case OpConvert: + return rewriteValuegeneric_OpConvert(v) + case OpCtz16: + return rewriteValuegeneric_OpCtz16(v) + case OpCtz32: + return rewriteValuegeneric_OpCtz32(v) + case OpCtz64: + return rewriteValuegeneric_OpCtz64(v) + case OpCtz8: + return rewriteValuegeneric_OpCtz8(v) + case OpCvt32Fto32: + return rewriteValuegeneric_OpCvt32Fto32(v) + case OpCvt32Fto64: + return rewriteValuegeneric_OpCvt32Fto64(v) + case OpCvt32Fto64F: + return rewriteValuegeneric_OpCvt32Fto64F(v) + case OpCvt32to32F: + return rewriteValuegeneric_OpCvt32to32F(v) + case OpCvt32to64F: + return rewriteValuegeneric_OpCvt32to64F(v) + case OpCvt64Fto32: + return rewriteValuegeneric_OpCvt64Fto32(v) + case OpCvt64Fto32F: + return rewriteValuegeneric_OpCvt64Fto32F(v) + case OpCvt64Fto64: + return rewriteValuegeneric_OpCvt64Fto64(v) + case OpCvt64to32F: + return rewriteValuegeneric_OpCvt64to32F(v) + case OpCvt64to64F: + return rewriteValuegeneric_OpCvt64to64F(v) + case OpCvtBoolToUint8: + return rewriteValuegeneric_OpCvtBoolToUint8(v) + case OpDiv16: + return rewriteValuegeneric_OpDiv16(v) + case OpDiv16u: + return rewriteValuegeneric_OpDiv16u(v) + case OpDiv32: + return rewriteValuegeneric_OpDiv32(v) + case OpDiv32F: + return rewriteValuegeneric_OpDiv32F(v) + case OpDiv32u: + return rewriteValuegeneric_OpDiv32u(v) + case OpDiv64: + return rewriteValuegeneric_OpDiv64(v) + case OpDiv64F: + return rewriteValuegeneric_OpDiv64F(v) + case OpDiv64u: + return rewriteValuegeneric_OpDiv64u(v) + case OpDiv8: + return rewriteValuegeneric_OpDiv8(v) + case OpDiv8u: + return rewriteValuegeneric_OpDiv8u(v) + case OpEq16: + return rewriteValuegeneric_OpEq16(v) + case OpEq32: + return rewriteValuegeneric_OpEq32(v) + case OpEq32F: + return rewriteValuegeneric_OpEq32F(v) + case OpEq64: + return rewriteValuegeneric_OpEq64(v) + case OpEq64F: + return rewriteValuegeneric_OpEq64F(v) + case OpEq8: + return rewriteValuegeneric_OpEq8(v) + case OpEqB: + return rewriteValuegeneric_OpEqB(v) + case OpEqInter: + return rewriteValuegeneric_OpEqInter(v) + case OpEqPtr: + return rewriteValuegeneric_OpEqPtr(v) + case OpEqSlice: + return rewriteValuegeneric_OpEqSlice(v) + case OpFloor: + return rewriteValuegeneric_OpFloor(v) + case OpIMake: + return rewriteValuegeneric_OpIMake(v) + case OpInterLECall: + return rewriteValuegeneric_OpInterLECall(v) + case OpIsInBounds: + return rewriteValuegeneric_OpIsInBounds(v) + case OpIsNonNil: + return rewriteValuegeneric_OpIsNonNil(v) + case OpIsSliceInBounds: + return rewriteValuegeneric_OpIsSliceInBounds(v) + case OpLeq16: + return rewriteValuegeneric_OpLeq16(v) + case OpLeq16U: + return rewriteValuegeneric_OpLeq16U(v) + case OpLeq32: + return rewriteValuegeneric_OpLeq32(v) + case OpLeq32F: + return rewriteValuegeneric_OpLeq32F(v) + case OpLeq32U: + return rewriteValuegeneric_OpLeq32U(v) + case OpLeq64: + return rewriteValuegeneric_OpLeq64(v) + case OpLeq64F: + return rewriteValuegeneric_OpLeq64F(v) + case OpLeq64U: + return rewriteValuegeneric_OpLeq64U(v) + case OpLeq8: + return rewriteValuegeneric_OpLeq8(v) + case OpLeq8U: + return rewriteValuegeneric_OpLeq8U(v) + case OpLess16: + return rewriteValuegeneric_OpLess16(v) + case OpLess16U: + return rewriteValuegeneric_OpLess16U(v) + case OpLess32: + return rewriteValuegeneric_OpLess32(v) + case OpLess32F: + return rewriteValuegeneric_OpLess32F(v) + case OpLess32U: + return rewriteValuegeneric_OpLess32U(v) + case OpLess64: + return rewriteValuegeneric_OpLess64(v) + case OpLess64F: + return rewriteValuegeneric_OpLess64F(v) + case OpLess64U: + return rewriteValuegeneric_OpLess64U(v) + case OpLess8: + return rewriteValuegeneric_OpLess8(v) + case OpLess8U: + return rewriteValuegeneric_OpLess8U(v) + case OpLoad: + return rewriteValuegeneric_OpLoad(v) + case OpLsh16x16: + return rewriteValuegeneric_OpLsh16x16(v) + case OpLsh16x32: + return rewriteValuegeneric_OpLsh16x32(v) + case OpLsh16x64: + return rewriteValuegeneric_OpLsh16x64(v) + case OpLsh16x8: + return rewriteValuegeneric_OpLsh16x8(v) + case OpLsh32x16: + return rewriteValuegeneric_OpLsh32x16(v) + case OpLsh32x32: + return rewriteValuegeneric_OpLsh32x32(v) + case OpLsh32x64: + return rewriteValuegeneric_OpLsh32x64(v) + case OpLsh32x8: + return rewriteValuegeneric_OpLsh32x8(v) + case OpLsh64x16: + return rewriteValuegeneric_OpLsh64x16(v) + case OpLsh64x32: + return rewriteValuegeneric_OpLsh64x32(v) + case OpLsh64x64: + return rewriteValuegeneric_OpLsh64x64(v) + case OpLsh64x8: + return rewriteValuegeneric_OpLsh64x8(v) + case OpLsh8x16: + return rewriteValuegeneric_OpLsh8x16(v) + case OpLsh8x32: + return rewriteValuegeneric_OpLsh8x32(v) + case OpLsh8x64: + return rewriteValuegeneric_OpLsh8x64(v) + case OpLsh8x8: + return rewriteValuegeneric_OpLsh8x8(v) + case OpMod16: + return rewriteValuegeneric_OpMod16(v) + case OpMod16u: + return rewriteValuegeneric_OpMod16u(v) + case OpMod32: + return rewriteValuegeneric_OpMod32(v) + case OpMod32u: + return rewriteValuegeneric_OpMod32u(v) + case OpMod64: + return rewriteValuegeneric_OpMod64(v) + case OpMod64u: + return rewriteValuegeneric_OpMod64u(v) + case OpMod8: + return rewriteValuegeneric_OpMod8(v) + case OpMod8u: + return rewriteValuegeneric_OpMod8u(v) + case OpMove: + return rewriteValuegeneric_OpMove(v) + case OpMul16: + return rewriteValuegeneric_OpMul16(v) + case OpMul32: + return rewriteValuegeneric_OpMul32(v) + case OpMul32F: + return rewriteValuegeneric_OpMul32F(v) + case OpMul64: + return rewriteValuegeneric_OpMul64(v) + case OpMul64F: + return rewriteValuegeneric_OpMul64F(v) + case OpMul8: + return rewriteValuegeneric_OpMul8(v) + case OpNeg16: + return rewriteValuegeneric_OpNeg16(v) + case OpNeg32: + return rewriteValuegeneric_OpNeg32(v) + case OpNeg32F: + return rewriteValuegeneric_OpNeg32F(v) + case OpNeg64: + return rewriteValuegeneric_OpNeg64(v) + case OpNeg64F: + return rewriteValuegeneric_OpNeg64F(v) + case OpNeg8: + return rewriteValuegeneric_OpNeg8(v) + case OpNeq16: + return rewriteValuegeneric_OpNeq16(v) + case OpNeq32: + return rewriteValuegeneric_OpNeq32(v) + case OpNeq32F: + return rewriteValuegeneric_OpNeq32F(v) + case OpNeq64: + return rewriteValuegeneric_OpNeq64(v) + case OpNeq64F: + return rewriteValuegeneric_OpNeq64F(v) + case OpNeq8: + return rewriteValuegeneric_OpNeq8(v) + case OpNeqB: + return rewriteValuegeneric_OpNeqB(v) + case OpNeqInter: + return rewriteValuegeneric_OpNeqInter(v) + case OpNeqPtr: + return rewriteValuegeneric_OpNeqPtr(v) + case OpNeqSlice: + return rewriteValuegeneric_OpNeqSlice(v) + case OpNilCheck: + return rewriteValuegeneric_OpNilCheck(v) + case OpNot: + return rewriteValuegeneric_OpNot(v) + case OpOffPtr: + return rewriteValuegeneric_OpOffPtr(v) + case OpOr16: + return rewriteValuegeneric_OpOr16(v) + case OpOr32: + return rewriteValuegeneric_OpOr32(v) + case OpOr64: + return rewriteValuegeneric_OpOr64(v) + case OpOr8: + return rewriteValuegeneric_OpOr8(v) + case OpOrB: + return rewriteValuegeneric_OpOrB(v) + case OpPhi: + return rewriteValuegeneric_OpPhi(v) + case OpPtrIndex: + return rewriteValuegeneric_OpPtrIndex(v) + case OpRotateLeft16: + return rewriteValuegeneric_OpRotateLeft16(v) + case OpRotateLeft32: + return rewriteValuegeneric_OpRotateLeft32(v) + case OpRotateLeft64: + return rewriteValuegeneric_OpRotateLeft64(v) + case OpRotateLeft8: + return rewriteValuegeneric_OpRotateLeft8(v) + case OpRound32F: + return rewriteValuegeneric_OpRound32F(v) + case OpRound64F: + return rewriteValuegeneric_OpRound64F(v) + case OpRoundToEven: + return rewriteValuegeneric_OpRoundToEven(v) + case OpRsh16Ux16: + return rewriteValuegeneric_OpRsh16Ux16(v) + case OpRsh16Ux32: + return rewriteValuegeneric_OpRsh16Ux32(v) + case OpRsh16Ux64: + return rewriteValuegeneric_OpRsh16Ux64(v) + case OpRsh16Ux8: + return rewriteValuegeneric_OpRsh16Ux8(v) + case OpRsh16x16: + return rewriteValuegeneric_OpRsh16x16(v) + case OpRsh16x32: + return rewriteValuegeneric_OpRsh16x32(v) + case OpRsh16x64: + return rewriteValuegeneric_OpRsh16x64(v) + case OpRsh16x8: + return rewriteValuegeneric_OpRsh16x8(v) + case OpRsh32Ux16: + return rewriteValuegeneric_OpRsh32Ux16(v) + case OpRsh32Ux32: + return rewriteValuegeneric_OpRsh32Ux32(v) + case OpRsh32Ux64: + return rewriteValuegeneric_OpRsh32Ux64(v) + case OpRsh32Ux8: + return rewriteValuegeneric_OpRsh32Ux8(v) + case OpRsh32x16: + return rewriteValuegeneric_OpRsh32x16(v) + case OpRsh32x32: + return rewriteValuegeneric_OpRsh32x32(v) + case OpRsh32x64: + return rewriteValuegeneric_OpRsh32x64(v) + case OpRsh32x8: + return rewriteValuegeneric_OpRsh32x8(v) + case OpRsh64Ux16: + return rewriteValuegeneric_OpRsh64Ux16(v) + case OpRsh64Ux32: + return rewriteValuegeneric_OpRsh64Ux32(v) + case OpRsh64Ux64: + return rewriteValuegeneric_OpRsh64Ux64(v) + case OpRsh64Ux8: + return rewriteValuegeneric_OpRsh64Ux8(v) + case OpRsh64x16: + return rewriteValuegeneric_OpRsh64x16(v) + case OpRsh64x32: + return rewriteValuegeneric_OpRsh64x32(v) + case OpRsh64x64: + return rewriteValuegeneric_OpRsh64x64(v) + case OpRsh64x8: + return rewriteValuegeneric_OpRsh64x8(v) + case OpRsh8Ux16: + return rewriteValuegeneric_OpRsh8Ux16(v) + case OpRsh8Ux32: + return rewriteValuegeneric_OpRsh8Ux32(v) + case OpRsh8Ux64: + return rewriteValuegeneric_OpRsh8Ux64(v) + case OpRsh8Ux8: + return rewriteValuegeneric_OpRsh8Ux8(v) + case OpRsh8x16: + return rewriteValuegeneric_OpRsh8x16(v) + case OpRsh8x32: + return rewriteValuegeneric_OpRsh8x32(v) + case OpRsh8x64: + return rewriteValuegeneric_OpRsh8x64(v) + case OpRsh8x8: + return rewriteValuegeneric_OpRsh8x8(v) + case OpSelect0: + return rewriteValuegeneric_OpSelect0(v) + case OpSelect1: + return rewriteValuegeneric_OpSelect1(v) + case OpSelectN: + return rewriteValuegeneric_OpSelectN(v) + case OpSignExt16to32: + return rewriteValuegeneric_OpSignExt16to32(v) + case OpSignExt16to64: + return rewriteValuegeneric_OpSignExt16to64(v) + case OpSignExt32to64: + return rewriteValuegeneric_OpSignExt32to64(v) + case OpSignExt8to16: + return rewriteValuegeneric_OpSignExt8to16(v) + case OpSignExt8to32: + return rewriteValuegeneric_OpSignExt8to32(v) + case OpSignExt8to64: + return rewriteValuegeneric_OpSignExt8to64(v) + case OpSliceCap: + return rewriteValuegeneric_OpSliceCap(v) + case OpSliceLen: + return rewriteValuegeneric_OpSliceLen(v) + case OpSlicePtr: + return rewriteValuegeneric_OpSlicePtr(v) + case OpSlicemask: + return rewriteValuegeneric_OpSlicemask(v) + case OpSqrt: + return rewriteValuegeneric_OpSqrt(v) + case OpStaticCall: + return rewriteValuegeneric_OpStaticCall(v) + case OpStaticLECall: + return rewriteValuegeneric_OpStaticLECall(v) + case OpStore: + return rewriteValuegeneric_OpStore(v) + case OpStringLen: + return rewriteValuegeneric_OpStringLen(v) + case OpStringPtr: + return rewriteValuegeneric_OpStringPtr(v) + case OpStructSelect: + return rewriteValuegeneric_OpStructSelect(v) + case OpSub16: + return rewriteValuegeneric_OpSub16(v) + case OpSub32: + return rewriteValuegeneric_OpSub32(v) + case OpSub32F: + return rewriteValuegeneric_OpSub32F(v) + case OpSub64: + return rewriteValuegeneric_OpSub64(v) + case OpSub64F: + return rewriteValuegeneric_OpSub64F(v) + case OpSub8: + return rewriteValuegeneric_OpSub8(v) + case OpTrunc: + return rewriteValuegeneric_OpTrunc(v) + case OpTrunc16to8: + return rewriteValuegeneric_OpTrunc16to8(v) + case OpTrunc32to16: + return rewriteValuegeneric_OpTrunc32to16(v) + case OpTrunc32to8: + return rewriteValuegeneric_OpTrunc32to8(v) + case OpTrunc64to16: + return rewriteValuegeneric_OpTrunc64to16(v) + case OpTrunc64to32: + return rewriteValuegeneric_OpTrunc64to32(v) + case OpTrunc64to8: + return rewriteValuegeneric_OpTrunc64to8(v) + case OpXor16: + return rewriteValuegeneric_OpXor16(v) + case OpXor32: + return rewriteValuegeneric_OpXor32(v) + case OpXor64: + return rewriteValuegeneric_OpXor64(v) + case OpXor8: + return rewriteValuegeneric_OpXor8(v) + case OpZero: + return rewriteValuegeneric_OpZero(v) + case OpZeroExt16to32: + return rewriteValuegeneric_OpZeroExt16to32(v) + case OpZeroExt16to64: + return rewriteValuegeneric_OpZeroExt16to64(v) + case OpZeroExt32to64: + return rewriteValuegeneric_OpZeroExt32to64(v) + case OpZeroExt8to16: + return rewriteValuegeneric_OpZeroExt8to16(v) + case OpZeroExt8to32: + return rewriteValuegeneric_OpZeroExt8to32(v) + case OpZeroExt8to64: + return rewriteValuegeneric_OpZeroExt8to64(v) + } + return false +} +func rewriteValuegeneric_OpAdd16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (Add16 (Const16 [c]) (Const16 [d])) + // result: (Const16 [c+d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1.AuxInt) + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(c + d) + return true + } + break + } + // match: (Add16 (Mul16 x y) (Mul16 x z)) + // result: (Mul16 x (Add16 y z)) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpMul16 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if v_1.Op != OpMul16 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + z := v_1_1 + v.reset(OpMul16) + v0 := b.NewValue0(v.Pos, OpAdd16, t) + v0.AddArg2(y, z) + v.AddArg2(x, v0) + return true + } + } + } + break + } + // match: (Add16 (Const16 [0]) x) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 { + continue + } + x := v_1 + v.copyOf(x) + return true + } + break + } + // match: (Add16 x (Neg16 y)) + // result: (Sub16 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpNeg16 { + continue + } + y := v_1.Args[0] + v.reset(OpSub16) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add16 (Com16 x) x) + // result: (Const16 [-1]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpCom16 { + continue + } + x := v_0.Args[0] + if x != v_1 { + continue + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(-1) + return true + } + break + } + // match: (Add16 (Sub16 x t) (Add16 t y)) + // result: (Add16 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpSub16 { + continue + } + t := v_0.Args[1] + x := v_0.Args[0] + if v_1.Op != OpAdd16 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if t != v_1_0 { + continue + } + y := v_1_1 + v.reset(OpAdd16) + v.AddArg2(x, y) + return true + } + } + break + } + // match: (Add16 (Const16 [1]) (Com16 x)) + // result: (Neg16 x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 1 || v_1.Op != OpCom16 { + continue + } + x := v_1.Args[0] + v.reset(OpNeg16) + v.AddArg(x) + return true + } + break + } + // match: (Add16 x (Sub16 y x)) + // result: y + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpSub16 { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if x != v_1.Args[1] { + continue + } + v.copyOf(y) + return true + } + break + } + // match: (Add16 x (Add16 y (Sub16 z x))) + // result: (Add16 y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAdd16 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpSub16 { + continue + } + _ = v_1_1.Args[1] + z := v_1_1.Args[0] + if x != v_1_1.Args[1] { + continue + } + v.reset(OpAdd16) + v.AddArg2(y, z) + return true + } + } + break + } + // match: (Add16 (Add16 i:(Const16 ) z) x) + // cond: (z.Op != OpConst16 && x.Op != OpConst16) + // result: (Add16 i (Add16 z x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAdd16 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 + if i.Op != OpConst16 { + continue + } + t := i.Type + z := v_0_1 + x := v_1 + if !(z.Op != OpConst16 && x.Op != OpConst16) { + continue + } + v.reset(OpAdd16) + v0 := b.NewValue0(v.Pos, OpAdd16, t) + v0.AddArg2(z, x) + v.AddArg2(i, v0) + return true + } + } + break + } + // match: (Add16 (Sub16 i:(Const16 ) z) x) + // cond: (z.Op != OpConst16 && x.Op != OpConst16) + // result: (Add16 i (Sub16 x z)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpSub16 { + continue + } + z := v_0.Args[1] + i := v_0.Args[0] + if i.Op != OpConst16 { + continue + } + t := i.Type + x := v_1 + if !(z.Op != OpConst16 && x.Op != OpConst16) { + continue + } + v.reset(OpAdd16) + v0 := b.NewValue0(v.Pos, OpSub16, t) + v0.AddArg2(x, z) + v.AddArg2(i, v0) + return true + } + break + } + // match: (Add16 (Const16 [c]) (Add16 (Const16 [d]) x)) + // result: (Add16 (Const16 [c+d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 { + continue + } + t := v_0.Type + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpAdd16 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst16 || v_1_0.Type != t { + continue + } + d := auxIntToInt16(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpAdd16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(c + d) + v.AddArg2(v0, x) + return true + } + } + break + } + // match: (Add16 (Const16 [c]) (Sub16 (Const16 [d]) x)) + // result: (Sub16 (Const16 [c+d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 { + continue + } + t := v_0.Type + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpSub16 { + continue + } + x := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst16 || v_1_0.Type != t { + continue + } + d := auxIntToInt16(v_1_0.AuxInt) + v.reset(OpSub16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(c + d) + v.AddArg2(v0, x) + return true + } + break + } + // match: (Add16 (Lsh16x64 x z:(Const64 [c])) (Rsh16Ux64 x (Const64 [d]))) + // cond: c < 16 && d == 16-c && canRotate(config, 16) + // result: (RotateLeft16 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLsh16x64 { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpConst64 { + continue + } + c := auxIntToInt64(z.AuxInt) + if v_1.Op != OpRsh16Ux64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(c < 16 && d == 16-c && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add16 left:(Lsh16x64 x y) right:(Rsh16Ux64 x (Sub64 (Const64 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh16x64 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh16Ux64 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub64 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add16 left:(Lsh16x32 x y) right:(Rsh16Ux32 x (Sub32 (Const32 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh16x32 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh16Ux32 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub32 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add16 left:(Lsh16x16 x y) right:(Rsh16Ux16 x (Sub16 (Const16 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh16x16 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh16Ux16 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub16 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add16 left:(Lsh16x8 x y) right:(Rsh16Ux8 x (Sub8 (Const8 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh16x8 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh16Ux8 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub8 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add16 right:(Rsh16Ux64 x y) left:(Lsh16x64 x z:(Sub64 (Const64 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh16Ux64 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh16x64 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub64 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add16 right:(Rsh16Ux32 x y) left:(Lsh16x32 x z:(Sub32 (Const32 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh16Ux32 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh16x32 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub32 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add16 right:(Rsh16Ux16 x y) left:(Lsh16x16 x z:(Sub16 (Const16 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh16Ux16 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh16x16 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub16 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add16 right:(Rsh16Ux8 x y) left:(Lsh16x8 x z:(Sub8 (Const8 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh16Ux8 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh16x8 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub8 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpAdd32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (Add32 (Const32 [c]) (Const32 [d])) + // result: (Const32 [c+d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1.AuxInt) + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(c + d) + return true + } + break + } + // match: (Add32 (Mul32 x y) (Mul32 x z)) + // result: (Mul32 x (Add32 y z)) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpMul32 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if v_1.Op != OpMul32 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + z := v_1_1 + v.reset(OpMul32) + v0 := b.NewValue0(v.Pos, OpAdd32, t) + v0.AddArg2(y, z) + v.AddArg2(x, v0) + return true + } + } + } + break + } + // match: (Add32 (Const32 [0]) x) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 { + continue + } + x := v_1 + v.copyOf(x) + return true + } + break + } + // match: (Add32 x (Neg32 y)) + // result: (Sub32 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpNeg32 { + continue + } + y := v_1.Args[0] + v.reset(OpSub32) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add32 (Com32 x) x) + // result: (Const32 [-1]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpCom32 { + continue + } + x := v_0.Args[0] + if x != v_1 { + continue + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(-1) + return true + } + break + } + // match: (Add32 (Sub32 x t) (Add32 t y)) + // result: (Add32 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpSub32 { + continue + } + t := v_0.Args[1] + x := v_0.Args[0] + if v_1.Op != OpAdd32 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if t != v_1_0 { + continue + } + y := v_1_1 + v.reset(OpAdd32) + v.AddArg2(x, y) + return true + } + } + break + } + // match: (Add32 (Const32 [1]) (Com32 x)) + // result: (Neg32 x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 1 || v_1.Op != OpCom32 { + continue + } + x := v_1.Args[0] + v.reset(OpNeg32) + v.AddArg(x) + return true + } + break + } + // match: (Add32 x (Sub32 y x)) + // result: y + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpSub32 { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if x != v_1.Args[1] { + continue + } + v.copyOf(y) + return true + } + break + } + // match: (Add32 x (Add32 y (Sub32 z x))) + // result: (Add32 y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAdd32 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpSub32 { + continue + } + _ = v_1_1.Args[1] + z := v_1_1.Args[0] + if x != v_1_1.Args[1] { + continue + } + v.reset(OpAdd32) + v.AddArg2(y, z) + return true + } + } + break + } + // match: (Add32 (Add32 i:(Const32 ) z) x) + // cond: (z.Op != OpConst32 && x.Op != OpConst32) + // result: (Add32 i (Add32 z x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAdd32 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 + if i.Op != OpConst32 { + continue + } + t := i.Type + z := v_0_1 + x := v_1 + if !(z.Op != OpConst32 && x.Op != OpConst32) { + continue + } + v.reset(OpAdd32) + v0 := b.NewValue0(v.Pos, OpAdd32, t) + v0.AddArg2(z, x) + v.AddArg2(i, v0) + return true + } + } + break + } + // match: (Add32 (Sub32 i:(Const32 ) z) x) + // cond: (z.Op != OpConst32 && x.Op != OpConst32) + // result: (Add32 i (Sub32 x z)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpSub32 { + continue + } + z := v_0.Args[1] + i := v_0.Args[0] + if i.Op != OpConst32 { + continue + } + t := i.Type + x := v_1 + if !(z.Op != OpConst32 && x.Op != OpConst32) { + continue + } + v.reset(OpAdd32) + v0 := b.NewValue0(v.Pos, OpSub32, t) + v0.AddArg2(x, z) + v.AddArg2(i, v0) + return true + } + break + } + // match: (Add32 (Const32 [c]) (Add32 (Const32 [d]) x)) + // result: (Add32 (Const32 [c+d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 { + continue + } + t := v_0.Type + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpAdd32 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst32 || v_1_0.Type != t { + continue + } + d := auxIntToInt32(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpAdd32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(c + d) + v.AddArg2(v0, x) + return true + } + } + break + } + // match: (Add32 (Const32 [c]) (Sub32 (Const32 [d]) x)) + // result: (Sub32 (Const32 [c+d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 { + continue + } + t := v_0.Type + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpSub32 { + continue + } + x := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 || v_1_0.Type != t { + continue + } + d := auxIntToInt32(v_1_0.AuxInt) + v.reset(OpSub32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(c + d) + v.AddArg2(v0, x) + return true + } + break + } + // match: (Add32 (Lsh32x64 x z:(Const64 [c])) (Rsh32Ux64 x (Const64 [d]))) + // cond: c < 32 && d == 32-c && canRotate(config, 32) + // result: (RotateLeft32 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLsh32x64 { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpConst64 { + continue + } + c := auxIntToInt64(z.AuxInt) + if v_1.Op != OpRsh32Ux64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(c < 32 && d == 32-c && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add32 left:(Lsh32x64 x y) right:(Rsh32Ux64 x (Sub64 (Const64 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh32x64 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh32Ux64 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub64 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add32 left:(Lsh32x32 x y) right:(Rsh32Ux32 x (Sub32 (Const32 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh32x32 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh32Ux32 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub32 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add32 left:(Lsh32x16 x y) right:(Rsh32Ux16 x (Sub16 (Const16 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh32x16 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh32Ux16 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub16 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add32 left:(Lsh32x8 x y) right:(Rsh32Ux8 x (Sub8 (Const8 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh32x8 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh32Ux8 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub8 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add32 right:(Rsh32Ux64 x y) left:(Lsh32x64 x z:(Sub64 (Const64 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh32Ux64 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh32x64 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub64 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add32 right:(Rsh32Ux32 x y) left:(Lsh32x32 x z:(Sub32 (Const32 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh32Ux32 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh32x32 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub32 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add32 right:(Rsh32Ux16 x y) left:(Lsh32x16 x z:(Sub16 (Const16 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh32Ux16 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh32x16 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub16 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add32 right:(Rsh32Ux8 x y) left:(Lsh32x8 x z:(Sub8 (Const8 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh32Ux8 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh32x8 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub8 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpAdd32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Add32F (Const32F [c]) (Const32F [d])) + // cond: c+d == c+d + // result: (Const32F [c+d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32F { + continue + } + c := auxIntToFloat32(v_0.AuxInt) + if v_1.Op != OpConst32F { + continue + } + d := auxIntToFloat32(v_1.AuxInt) + if !(c+d == c+d) { + continue + } + v.reset(OpConst32F) + v.AuxInt = float32ToAuxInt(c + d) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpAdd64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (Add64 (Const64 [c]) (Const64 [d])) + // result: (Const64 [c+d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(c + d) + return true + } + break + } + // match: (Add64 (Mul64 x y) (Mul64 x z)) + // result: (Mul64 x (Add64 y z)) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpMul64 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if v_1.Op != OpMul64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + z := v_1_1 + v.reset(OpMul64) + v0 := b.NewValue0(v.Pos, OpAdd64, t) + v0.AddArg2(y, z) + v.AddArg2(x, v0) + return true + } + } + } + break + } + // match: (Add64 (Const64 [0]) x) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 { + continue + } + x := v_1 + v.copyOf(x) + return true + } + break + } + // match: (Add64 x (Neg64 y)) + // result: (Sub64 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpNeg64 { + continue + } + y := v_1.Args[0] + v.reset(OpSub64) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add64 (Com64 x) x) + // result: (Const64 [-1]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpCom64 { + continue + } + x := v_0.Args[0] + if x != v_1 { + continue + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(-1) + return true + } + break + } + // match: (Add64 (Sub64 x t) (Add64 t y)) + // result: (Add64 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpSub64 { + continue + } + t := v_0.Args[1] + x := v_0.Args[0] + if v_1.Op != OpAdd64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if t != v_1_0 { + continue + } + y := v_1_1 + v.reset(OpAdd64) + v.AddArg2(x, y) + return true + } + } + break + } + // match: (Add64 (Const64 [1]) (Com64 x)) + // result: (Neg64 x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 1 || v_1.Op != OpCom64 { + continue + } + x := v_1.Args[0] + v.reset(OpNeg64) + v.AddArg(x) + return true + } + break + } + // match: (Add64 x (Sub64 y x)) + // result: y + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpSub64 { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if x != v_1.Args[1] { + continue + } + v.copyOf(y) + return true + } + break + } + // match: (Add64 x (Add64 y (Sub64 z x))) + // result: (Add64 y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAdd64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpSub64 { + continue + } + _ = v_1_1.Args[1] + z := v_1_1.Args[0] + if x != v_1_1.Args[1] { + continue + } + v.reset(OpAdd64) + v.AddArg2(y, z) + return true + } + } + break + } + // match: (Add64 (Add64 i:(Const64 ) z) x) + // cond: (z.Op != OpConst64 && x.Op != OpConst64) + // result: (Add64 i (Add64 z x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAdd64 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 + if i.Op != OpConst64 { + continue + } + t := i.Type + z := v_0_1 + x := v_1 + if !(z.Op != OpConst64 && x.Op != OpConst64) { + continue + } + v.reset(OpAdd64) + v0 := b.NewValue0(v.Pos, OpAdd64, t) + v0.AddArg2(z, x) + v.AddArg2(i, v0) + return true + } + } + break + } + // match: (Add64 (Sub64 i:(Const64 ) z) x) + // cond: (z.Op != OpConst64 && x.Op != OpConst64) + // result: (Add64 i (Sub64 x z)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpSub64 { + continue + } + z := v_0.Args[1] + i := v_0.Args[0] + if i.Op != OpConst64 { + continue + } + t := i.Type + x := v_1 + if !(z.Op != OpConst64 && x.Op != OpConst64) { + continue + } + v.reset(OpAdd64) + v0 := b.NewValue0(v.Pos, OpSub64, t) + v0.AddArg2(x, z) + v.AddArg2(i, v0) + return true + } + break + } + // match: (Add64 (Const64 [c]) (Add64 (Const64 [d]) x)) + // result: (Add64 (Const64 [c+d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 { + continue + } + t := v_0.Type + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpAdd64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst64 || v_1_0.Type != t { + continue + } + d := auxIntToInt64(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpAdd64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c + d) + v.AddArg2(v0, x) + return true + } + } + break + } + // match: (Add64 (Const64 [c]) (Sub64 (Const64 [d]) x)) + // result: (Sub64 (Const64 [c+d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 { + continue + } + t := v_0.Type + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpSub64 { + continue + } + x := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst64 || v_1_0.Type != t { + continue + } + d := auxIntToInt64(v_1_0.AuxInt) + v.reset(OpSub64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c + d) + v.AddArg2(v0, x) + return true + } + break + } + // match: (Add64 (Lsh64x64 x z:(Const64 [c])) (Rsh64Ux64 x (Const64 [d]))) + // cond: c < 64 && d == 64-c && canRotate(config, 64) + // result: (RotateLeft64 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLsh64x64 { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpConst64 { + continue + } + c := auxIntToInt64(z.AuxInt) + if v_1.Op != OpRsh64Ux64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(c < 64 && d == 64-c && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add64 left:(Lsh64x64 x y) right:(Rsh64Ux64 x (Sub64 (Const64 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh64x64 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh64Ux64 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub64 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add64 left:(Lsh64x32 x y) right:(Rsh64Ux32 x (Sub32 (Const32 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh64x32 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh64Ux32 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub32 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add64 left:(Lsh64x16 x y) right:(Rsh64Ux16 x (Sub16 (Const16 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh64x16 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh64Ux16 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub16 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add64 left:(Lsh64x8 x y) right:(Rsh64Ux8 x (Sub8 (Const8 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh64x8 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh64Ux8 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub8 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add64 right:(Rsh64Ux64 x y) left:(Lsh64x64 x z:(Sub64 (Const64 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh64Ux64 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh64x64 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub64 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add64 right:(Rsh64Ux32 x y) left:(Lsh64x32 x z:(Sub32 (Const32 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh64Ux32 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh64x32 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub32 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add64 right:(Rsh64Ux16 x y) left:(Lsh64x16 x z:(Sub16 (Const16 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh64Ux16 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh64x16 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub16 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add64 right:(Rsh64Ux8 x y) left:(Lsh64x8 x z:(Sub8 (Const8 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh64Ux8 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh64x8 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub8 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, z) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpAdd64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Add64F (Const64F [c]) (Const64F [d])) + // cond: c+d == c+d + // result: (Const64F [c+d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64F { + continue + } + c := auxIntToFloat64(v_0.AuxInt) + if v_1.Op != OpConst64F { + continue + } + d := auxIntToFloat64(v_1.AuxInt) + if !(c+d == c+d) { + continue + } + v.reset(OpConst64F) + v.AuxInt = float64ToAuxInt(c + d) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpAdd8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (Add8 (Const8 [c]) (Const8 [d])) + // result: (Const8 [c+d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1.AuxInt) + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(c + d) + return true + } + break + } + // match: (Add8 (Mul8 x y) (Mul8 x z)) + // result: (Mul8 x (Add8 y z)) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpMul8 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if v_1.Op != OpMul8 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + z := v_1_1 + v.reset(OpMul8) + v0 := b.NewValue0(v.Pos, OpAdd8, t) + v0.AddArg2(y, z) + v.AddArg2(x, v0) + return true + } + } + } + break + } + // match: (Add8 (Const8 [0]) x) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 { + continue + } + x := v_1 + v.copyOf(x) + return true + } + break + } + // match: (Add8 x (Neg8 y)) + // result: (Sub8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpNeg8 { + continue + } + y := v_1.Args[0] + v.reset(OpSub8) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add8 (Com8 x) x) + // result: (Const8 [-1]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpCom8 { + continue + } + x := v_0.Args[0] + if x != v_1 { + continue + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(-1) + return true + } + break + } + // match: (Add8 (Sub8 x t) (Add8 t y)) + // result: (Add8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpSub8 { + continue + } + t := v_0.Args[1] + x := v_0.Args[0] + if v_1.Op != OpAdd8 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if t != v_1_0 { + continue + } + y := v_1_1 + v.reset(OpAdd8) + v.AddArg2(x, y) + return true + } + } + break + } + // match: (Add8 (Const8 [1]) (Com8 x)) + // result: (Neg8 x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 1 || v_1.Op != OpCom8 { + continue + } + x := v_1.Args[0] + v.reset(OpNeg8) + v.AddArg(x) + return true + } + break + } + // match: (Add8 x (Sub8 y x)) + // result: y + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpSub8 { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if x != v_1.Args[1] { + continue + } + v.copyOf(y) + return true + } + break + } + // match: (Add8 x (Add8 y (Sub8 z x))) + // result: (Add8 y z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAdd8 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpSub8 { + continue + } + _ = v_1_1.Args[1] + z := v_1_1.Args[0] + if x != v_1_1.Args[1] { + continue + } + v.reset(OpAdd8) + v.AddArg2(y, z) + return true + } + } + break + } + // match: (Add8 (Add8 i:(Const8 ) z) x) + // cond: (z.Op != OpConst8 && x.Op != OpConst8) + // result: (Add8 i (Add8 z x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAdd8 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 + if i.Op != OpConst8 { + continue + } + t := i.Type + z := v_0_1 + x := v_1 + if !(z.Op != OpConst8 && x.Op != OpConst8) { + continue + } + v.reset(OpAdd8) + v0 := b.NewValue0(v.Pos, OpAdd8, t) + v0.AddArg2(z, x) + v.AddArg2(i, v0) + return true + } + } + break + } + // match: (Add8 (Sub8 i:(Const8 ) z) x) + // cond: (z.Op != OpConst8 && x.Op != OpConst8) + // result: (Add8 i (Sub8 x z)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpSub8 { + continue + } + z := v_0.Args[1] + i := v_0.Args[0] + if i.Op != OpConst8 { + continue + } + t := i.Type + x := v_1 + if !(z.Op != OpConst8 && x.Op != OpConst8) { + continue + } + v.reset(OpAdd8) + v0 := b.NewValue0(v.Pos, OpSub8, t) + v0.AddArg2(x, z) + v.AddArg2(i, v0) + return true + } + break + } + // match: (Add8 (Const8 [c]) (Add8 (Const8 [d]) x)) + // result: (Add8 (Const8 [c+d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 { + continue + } + t := v_0.Type + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpAdd8 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst8 || v_1_0.Type != t { + continue + } + d := auxIntToInt8(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpAdd8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(c + d) + v.AddArg2(v0, x) + return true + } + } + break + } + // match: (Add8 (Const8 [c]) (Sub8 (Const8 [d]) x)) + // result: (Sub8 (Const8 [c+d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 { + continue + } + t := v_0.Type + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpSub8 { + continue + } + x := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst8 || v_1_0.Type != t { + continue + } + d := auxIntToInt8(v_1_0.AuxInt) + v.reset(OpSub8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(c + d) + v.AddArg2(v0, x) + return true + } + break + } + // match: (Add8 (Lsh8x64 x z:(Const64 [c])) (Rsh8Ux64 x (Const64 [d]))) + // cond: c < 8 && d == 8-c && canRotate(config, 8) + // result: (RotateLeft8 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLsh8x64 { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpConst64 { + continue + } + c := auxIntToInt64(z.AuxInt) + if v_1.Op != OpRsh8Ux64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(c < 8 && d == 8-c && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add8 left:(Lsh8x64 x y) right:(Rsh8Ux64 x (Sub64 (Const64 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh8x64 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh8Ux64 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub64 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add8 left:(Lsh8x32 x y) right:(Rsh8Ux32 x (Sub32 (Const32 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh8x32 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh8Ux32 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub32 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add8 left:(Lsh8x16 x y) right:(Rsh8Ux16 x (Sub16 (Const16 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh8x16 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh8Ux16 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub16 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add8 left:(Lsh8x8 x y) right:(Rsh8Ux8 x (Sub8 (Const8 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh8x8 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh8Ux8 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub8 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add8 right:(Rsh8Ux64 x y) left:(Lsh8x64 x z:(Sub64 (Const64 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh8Ux64 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh8x64 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub64 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add8 right:(Rsh8Ux32 x y) left:(Lsh8x32 x z:(Sub32 (Const32 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh8Ux32 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh8x32 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub32 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add8 right:(Rsh8Ux16 x y) left:(Lsh8x16 x z:(Sub16 (Const16 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh8Ux16 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh8x16 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub16 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add8 right:(Rsh8Ux8 x y) left:(Lsh8x8 x z:(Sub8 (Const8 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh8Ux8 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh8x8 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub8 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, z) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpAddPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AddPtr x (Const64 [c])) + // result: (OffPtr x [c]) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpOffPtr) + v.Type = t + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (AddPtr x (Const32 [c])) + // result: (OffPtr x [int64(c)]) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpOffPtr) + v.Type = t + v.AuxInt = int64ToAuxInt(int64(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValuegeneric_OpAnd16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (And16 (Const16 [c]) (Const16 [d])) + // result: (Const16 [c&d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1.AuxInt) + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(c & d) + return true + } + break + } + // match: (And16 (Com16 x) (Com16 y)) + // result: (Com16 (Or16 x y)) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpCom16 { + continue + } + x := v_0.Args[0] + if v_1.Op != OpCom16 { + continue + } + y := v_1.Args[0] + v.reset(OpCom16) + v0 := b.NewValue0(v.Pos, OpOr16, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (And16 (Const16 [m]) (Rsh16Ux64 _ (Const64 [c]))) + // cond: c >= int64(16-ntz16(m)) + // result: (Const16 [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 { + continue + } + m := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpRsh16Ux64 { + continue + } + _ = v_1.Args[1] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c >= int64(16-ntz16(m))) { + continue + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(0) + return true + } + break + } + // match: (And16 (Const16 [m]) (Lsh16x64 _ (Const64 [c]))) + // cond: c >= int64(16-nlz16(m)) + // result: (Const16 [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 { + continue + } + m := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpLsh16x64 { + continue + } + _ = v_1.Args[1] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c >= int64(16-nlz16(m))) { + continue + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(0) + return true + } + break + } + // match: (And16 x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + // match: (And16 (Const16 [-1]) x) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != -1 { + continue + } + x := v_1 + v.copyOf(x) + return true + } + break + } + // match: (And16 (Const16 [0]) _) + // result: (Const16 [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 { + continue + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(0) + return true + } + break + } + // match: (And16 (Com16 x) x) + // result: (Const16 [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpCom16 { + continue + } + x := v_0.Args[0] + if x != v_1 { + continue + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(0) + return true + } + break + } + // match: (And16 x (And16 x y)) + // result: (And16 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAnd16 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + y := v_1_1 + v.reset(OpAnd16) + v.AddArg2(x, y) + return true + } + } + break + } + // match: (And16 (And16 i:(Const16 ) z) x) + // cond: (z.Op != OpConst16 && x.Op != OpConst16) + // result: (And16 i (And16 z x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAnd16 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 + if i.Op != OpConst16 { + continue + } + t := i.Type + z := v_0_1 + x := v_1 + if !(z.Op != OpConst16 && x.Op != OpConst16) { + continue + } + v.reset(OpAnd16) + v0 := b.NewValue0(v.Pos, OpAnd16, t) + v0.AddArg2(z, x) + v.AddArg2(i, v0) + return true + } + } + break + } + // match: (And16 (Const16 [c]) (And16 (Const16 [d]) x)) + // result: (And16 (Const16 [c&d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 { + continue + } + t := v_0.Type + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpAnd16 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst16 || v_1_0.Type != t { + continue + } + d := auxIntToInt16(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpAnd16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(c & d) + v.AddArg2(v0, x) + return true + } + } + break + } + return false +} +func rewriteValuegeneric_OpAnd32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (And32 (Const32 [c]) (Const32 [d])) + // result: (Const32 [c&d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1.AuxInt) + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(c & d) + return true + } + break + } + // match: (And32 (Com32 x) (Com32 y)) + // result: (Com32 (Or32 x y)) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpCom32 { + continue + } + x := v_0.Args[0] + if v_1.Op != OpCom32 { + continue + } + y := v_1.Args[0] + v.reset(OpCom32) + v0 := b.NewValue0(v.Pos, OpOr32, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (And32 (Const32 [m]) (Rsh32Ux64 _ (Const64 [c]))) + // cond: c >= int64(32-ntz32(m)) + // result: (Const32 [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 { + continue + } + m := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpRsh32Ux64 { + continue + } + _ = v_1.Args[1] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c >= int64(32-ntz32(m))) { + continue + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + break + } + // match: (And32 (Const32 [m]) (Lsh32x64 _ (Const64 [c]))) + // cond: c >= int64(32-nlz32(m)) + // result: (Const32 [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 { + continue + } + m := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpLsh32x64 { + continue + } + _ = v_1.Args[1] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c >= int64(32-nlz32(m))) { + continue + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + break + } + // match: (And32 x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + // match: (And32 (Const32 [-1]) x) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != -1 { + continue + } + x := v_1 + v.copyOf(x) + return true + } + break + } + // match: (And32 (Const32 [0]) _) + // result: (Const32 [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 { + continue + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + break + } + // match: (And32 (Com32 x) x) + // result: (Const32 [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpCom32 { + continue + } + x := v_0.Args[0] + if x != v_1 { + continue + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + break + } + // match: (And32 x (And32 x y)) + // result: (And32 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAnd32 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + y := v_1_1 + v.reset(OpAnd32) + v.AddArg2(x, y) + return true + } + } + break + } + // match: (And32 (And32 i:(Const32 ) z) x) + // cond: (z.Op != OpConst32 && x.Op != OpConst32) + // result: (And32 i (And32 z x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAnd32 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 + if i.Op != OpConst32 { + continue + } + t := i.Type + z := v_0_1 + x := v_1 + if !(z.Op != OpConst32 && x.Op != OpConst32) { + continue + } + v.reset(OpAnd32) + v0 := b.NewValue0(v.Pos, OpAnd32, t) + v0.AddArg2(z, x) + v.AddArg2(i, v0) + return true + } + } + break + } + // match: (And32 (Const32 [c]) (And32 (Const32 [d]) x)) + // result: (And32 (Const32 [c&d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 { + continue + } + t := v_0.Type + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpAnd32 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst32 || v_1_0.Type != t { + continue + } + d := auxIntToInt32(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpAnd32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(c & d) + v.AddArg2(v0, x) + return true + } + } + break + } + return false +} +func rewriteValuegeneric_OpAnd64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (And64 (Const64 [c]) (Const64 [d])) + // result: (Const64 [c&d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(c & d) + return true + } + break + } + // match: (And64 (Com64 x) (Com64 y)) + // result: (Com64 (Or64 x y)) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpCom64 { + continue + } + x := v_0.Args[0] + if v_1.Op != OpCom64 { + continue + } + y := v_1.Args[0] + v.reset(OpCom64) + v0 := b.NewValue0(v.Pos, OpOr64, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (And64 (Const64 [m]) (Rsh64Ux64 _ (Const64 [c]))) + // cond: c >= int64(64-ntz64(m)) + // result: (Const64 [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 { + continue + } + m := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpRsh64Ux64 { + continue + } + _ = v_1.Args[1] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c >= int64(64-ntz64(m))) { + continue + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) + return true + } + break + } + // match: (And64 (Const64 [m]) (Lsh64x64 _ (Const64 [c]))) + // cond: c >= int64(64-nlz64(m)) + // result: (Const64 [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 { + continue + } + m := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpLsh64x64 { + continue + } + _ = v_1.Args[1] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c >= int64(64-nlz64(m))) { + continue + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) + return true + } + break + } + // match: (And64 x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + // match: (And64 (Const64 [-1]) x) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != -1 { + continue + } + x := v_1 + v.copyOf(x) + return true + } + break + } + // match: (And64 (Const64 [0]) _) + // result: (Const64 [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 { + continue + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) + return true + } + break + } + // match: (And64 (Com64 x) x) + // result: (Const64 [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpCom64 { + continue + } + x := v_0.Args[0] + if x != v_1 { + continue + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) + return true + } + break + } + // match: (And64 x (And64 x y)) + // result: (And64 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAnd64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + y := v_1_1 + v.reset(OpAnd64) + v.AddArg2(x, y) + return true + } + } + break + } + // match: (And64 (And64 i:(Const64 ) z) x) + // cond: (z.Op != OpConst64 && x.Op != OpConst64) + // result: (And64 i (And64 z x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAnd64 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 + if i.Op != OpConst64 { + continue + } + t := i.Type + z := v_0_1 + x := v_1 + if !(z.Op != OpConst64 && x.Op != OpConst64) { + continue + } + v.reset(OpAnd64) + v0 := b.NewValue0(v.Pos, OpAnd64, t) + v0.AddArg2(z, x) + v.AddArg2(i, v0) + return true + } + } + break + } + // match: (And64 (Const64 [c]) (And64 (Const64 [d]) x)) + // result: (And64 (Const64 [c&d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 { + continue + } + t := v_0.Type + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpAnd64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst64 || v_1_0.Type != t { + continue + } + d := auxIntToInt64(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpAnd64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c & d) + v.AddArg2(v0, x) + return true + } + } + break + } + return false +} +func rewriteValuegeneric_OpAnd8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (And8 (Const8 [c]) (Const8 [d])) + // result: (Const8 [c&d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1.AuxInt) + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(c & d) + return true + } + break + } + // match: (And8 (Com8 x) (Com8 y)) + // result: (Com8 (Or8 x y)) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpCom8 { + continue + } + x := v_0.Args[0] + if v_1.Op != OpCom8 { + continue + } + y := v_1.Args[0] + v.reset(OpCom8) + v0 := b.NewValue0(v.Pos, OpOr8, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (And8 (Const8 [m]) (Rsh8Ux64 _ (Const64 [c]))) + // cond: c >= int64(8-ntz8(m)) + // result: (Const8 [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 { + continue + } + m := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpRsh8Ux64 { + continue + } + _ = v_1.Args[1] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c >= int64(8-ntz8(m))) { + continue + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(0) + return true + } + break + } + // match: (And8 (Const8 [m]) (Lsh8x64 _ (Const64 [c]))) + // cond: c >= int64(8-nlz8(m)) + // result: (Const8 [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 { + continue + } + m := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpLsh8x64 { + continue + } + _ = v_1.Args[1] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c >= int64(8-nlz8(m))) { + continue + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(0) + return true + } + break + } + // match: (And8 x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + // match: (And8 (Const8 [-1]) x) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != -1 { + continue + } + x := v_1 + v.copyOf(x) + return true + } + break + } + // match: (And8 (Const8 [0]) _) + // result: (Const8 [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 { + continue + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(0) + return true + } + break + } + // match: (And8 (Com8 x) x) + // result: (Const8 [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpCom8 { + continue + } + x := v_0.Args[0] + if x != v_1 { + continue + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(0) + return true + } + break + } + // match: (And8 x (And8 x y)) + // result: (And8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAnd8 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + y := v_1_1 + v.reset(OpAnd8) + v.AddArg2(x, y) + return true + } + } + break + } + // match: (And8 (And8 i:(Const8 ) z) x) + // cond: (z.Op != OpConst8 && x.Op != OpConst8) + // result: (And8 i (And8 z x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAnd8 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 + if i.Op != OpConst8 { + continue + } + t := i.Type + z := v_0_1 + x := v_1 + if !(z.Op != OpConst8 && x.Op != OpConst8) { + continue + } + v.reset(OpAnd8) + v0 := b.NewValue0(v.Pos, OpAnd8, t) + v0.AddArg2(z, x) + v.AddArg2(i, v0) + return true + } + } + break + } + // match: (And8 (Const8 [c]) (And8 (Const8 [d]) x)) + // result: (And8 (Const8 [c&d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 { + continue + } + t := v_0.Type + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpAnd8 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst8 || v_1_0.Type != t { + continue + } + d := auxIntToInt8(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpAnd8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(c & d) + v.AddArg2(v0, x) + return true + } + } + break + } + return false +} +func rewriteValuegeneric_OpAndB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (AndB (Leq64 (Const64 [c]) x) (Less64 x (Const64 [d]))) + // cond: d >= c + // result: (Less64U (Sub64 x (Const64 [c])) (Const64 [d-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq64 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0_0.AuxInt) + if v_1.Op != OpLess64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(d >= c) { + continue + } + v.reset(OpLess64U) + v0 := b.NewValue0(v.Pos, OpSub64, x.Type) + v1 := b.NewValue0(v.Pos, OpConst64, x.Type) + v1.AuxInt = int64ToAuxInt(c) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = int64ToAuxInt(d - c) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq64 (Const64 [c]) x) (Leq64 x (Const64 [d]))) + // cond: d >= c + // result: (Leq64U (Sub64 x (Const64 [c])) (Const64 [d-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq64 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0_0.AuxInt) + if v_1.Op != OpLeq64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(d >= c) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpSub64, x.Type) + v1 := b.NewValue0(v.Pos, OpConst64, x.Type) + v1.AuxInt = int64ToAuxInt(c) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = int64ToAuxInt(d - c) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq32 (Const32 [c]) x) (Less32 x (Const32 [d]))) + // cond: d >= c + // result: (Less32U (Sub32 x (Const32 [c])) (Const32 [d-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq32 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0_0.AuxInt) + if v_1.Op != OpLess32 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1_1.AuxInt) + if !(d >= c) { + continue + } + v.reset(OpLess32U) + v0 := b.NewValue0(v.Pos, OpSub32, x.Type) + v1 := b.NewValue0(v.Pos, OpConst32, x.Type) + v1.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = int32ToAuxInt(d - c) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq32 (Const32 [c]) x) (Leq32 x (Const32 [d]))) + // cond: d >= c + // result: (Leq32U (Sub32 x (Const32 [c])) (Const32 [d-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq32 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0_0.AuxInt) + if v_1.Op != OpLeq32 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1_1.AuxInt) + if !(d >= c) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpSub32, x.Type) + v1 := b.NewValue0(v.Pos, OpConst32, x.Type) + v1.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = int32ToAuxInt(d - c) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq16 (Const16 [c]) x) (Less16 x (Const16 [d]))) + // cond: d >= c + // result: (Less16U (Sub16 x (Const16 [c])) (Const16 [d-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq16 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0_0.AuxInt) + if v_1.Op != OpLess16 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1_1.AuxInt) + if !(d >= c) { + continue + } + v.reset(OpLess16U) + v0 := b.NewValue0(v.Pos, OpSub16, x.Type) + v1 := b.NewValue0(v.Pos, OpConst16, x.Type) + v1.AuxInt = int16ToAuxInt(c) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = int16ToAuxInt(d - c) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq16 (Const16 [c]) x) (Leq16 x (Const16 [d]))) + // cond: d >= c + // result: (Leq16U (Sub16 x (Const16 [c])) (Const16 [d-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq16 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0_0.AuxInt) + if v_1.Op != OpLeq16 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1_1.AuxInt) + if !(d >= c) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpSub16, x.Type) + v1 := b.NewValue0(v.Pos, OpConst16, x.Type) + v1.AuxInt = int16ToAuxInt(c) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = int16ToAuxInt(d - c) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq8 (Const8 [c]) x) (Less8 x (Const8 [d]))) + // cond: d >= c + // result: (Less8U (Sub8 x (Const8 [c])) (Const8 [d-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq8 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0_0.AuxInt) + if v_1.Op != OpLess8 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1_1.AuxInt) + if !(d >= c) { + continue + } + v.reset(OpLess8U) + v0 := b.NewValue0(v.Pos, OpSub8, x.Type) + v1 := b.NewValue0(v.Pos, OpConst8, x.Type) + v1.AuxInt = int8ToAuxInt(c) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = int8ToAuxInt(d - c) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq8 (Const8 [c]) x) (Leq8 x (Const8 [d]))) + // cond: d >= c + // result: (Leq8U (Sub8 x (Const8 [c])) (Const8 [d-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq8 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0_0.AuxInt) + if v_1.Op != OpLeq8 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1_1.AuxInt) + if !(d >= c) { + continue + } + v.reset(OpLeq8U) + v0 := b.NewValue0(v.Pos, OpSub8, x.Type) + v1 := b.NewValue0(v.Pos, OpConst8, x.Type) + v1.AuxInt = int8ToAuxInt(c) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = int8ToAuxInt(d - c) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less64 (Const64 [c]) x) (Less64 x (Const64 [d]))) + // cond: d >= c+1 && c+1 > c + // result: (Less64U (Sub64 x (Const64 [c+1])) (Const64 [d-c-1])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess64 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0_0.AuxInt) + if v_1.Op != OpLess64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(d >= c+1 && c+1 > c) { + continue + } + v.reset(OpLess64U) + v0 := b.NewValue0(v.Pos, OpSub64, x.Type) + v1 := b.NewValue0(v.Pos, OpConst64, x.Type) + v1.AuxInt = int64ToAuxInt(c + 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = int64ToAuxInt(d - c - 1) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less64 (Const64 [c]) x) (Leq64 x (Const64 [d]))) + // cond: d >= c+1 && c+1 > c + // result: (Leq64U (Sub64 x (Const64 [c+1])) (Const64 [d-c-1])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess64 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0_0.AuxInt) + if v_1.Op != OpLeq64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(d >= c+1 && c+1 > c) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpSub64, x.Type) + v1 := b.NewValue0(v.Pos, OpConst64, x.Type) + v1.AuxInt = int64ToAuxInt(c + 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = int64ToAuxInt(d - c - 1) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less32 (Const32 [c]) x) (Less32 x (Const32 [d]))) + // cond: d >= c+1 && c+1 > c + // result: (Less32U (Sub32 x (Const32 [c+1])) (Const32 [d-c-1])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess32 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0_0.AuxInt) + if v_1.Op != OpLess32 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1_1.AuxInt) + if !(d >= c+1 && c+1 > c) { + continue + } + v.reset(OpLess32U) + v0 := b.NewValue0(v.Pos, OpSub32, x.Type) + v1 := b.NewValue0(v.Pos, OpConst32, x.Type) + v1.AuxInt = int32ToAuxInt(c + 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = int32ToAuxInt(d - c - 1) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less32 (Const32 [c]) x) (Leq32 x (Const32 [d]))) + // cond: d >= c+1 && c+1 > c + // result: (Leq32U (Sub32 x (Const32 [c+1])) (Const32 [d-c-1])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess32 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0_0.AuxInt) + if v_1.Op != OpLeq32 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1_1.AuxInt) + if !(d >= c+1 && c+1 > c) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpSub32, x.Type) + v1 := b.NewValue0(v.Pos, OpConst32, x.Type) + v1.AuxInt = int32ToAuxInt(c + 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = int32ToAuxInt(d - c - 1) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less16 (Const16 [c]) x) (Less16 x (Const16 [d]))) + // cond: d >= c+1 && c+1 > c + // result: (Less16U (Sub16 x (Const16 [c+1])) (Const16 [d-c-1])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess16 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0_0.AuxInt) + if v_1.Op != OpLess16 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1_1.AuxInt) + if !(d >= c+1 && c+1 > c) { + continue + } + v.reset(OpLess16U) + v0 := b.NewValue0(v.Pos, OpSub16, x.Type) + v1 := b.NewValue0(v.Pos, OpConst16, x.Type) + v1.AuxInt = int16ToAuxInt(c + 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = int16ToAuxInt(d - c - 1) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less16 (Const16 [c]) x) (Leq16 x (Const16 [d]))) + // cond: d >= c+1 && c+1 > c + // result: (Leq16U (Sub16 x (Const16 [c+1])) (Const16 [d-c-1])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess16 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0_0.AuxInt) + if v_1.Op != OpLeq16 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1_1.AuxInt) + if !(d >= c+1 && c+1 > c) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpSub16, x.Type) + v1 := b.NewValue0(v.Pos, OpConst16, x.Type) + v1.AuxInt = int16ToAuxInt(c + 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = int16ToAuxInt(d - c - 1) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less8 (Const8 [c]) x) (Less8 x (Const8 [d]))) + // cond: d >= c+1 && c+1 > c + // result: (Less8U (Sub8 x (Const8 [c+1])) (Const8 [d-c-1])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess8 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0_0.AuxInt) + if v_1.Op != OpLess8 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1_1.AuxInt) + if !(d >= c+1 && c+1 > c) { + continue + } + v.reset(OpLess8U) + v0 := b.NewValue0(v.Pos, OpSub8, x.Type) + v1 := b.NewValue0(v.Pos, OpConst8, x.Type) + v1.AuxInt = int8ToAuxInt(c + 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = int8ToAuxInt(d - c - 1) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less8 (Const8 [c]) x) (Leq8 x (Const8 [d]))) + // cond: d >= c+1 && c+1 > c + // result: (Leq8U (Sub8 x (Const8 [c+1])) (Const8 [d-c-1])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess8 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0_0.AuxInt) + if v_1.Op != OpLeq8 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1_1.AuxInt) + if !(d >= c+1 && c+1 > c) { + continue + } + v.reset(OpLeq8U) + v0 := b.NewValue0(v.Pos, OpSub8, x.Type) + v1 := b.NewValue0(v.Pos, OpConst8, x.Type) + v1.AuxInt = int8ToAuxInt(c + 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = int8ToAuxInt(d - c - 1) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq64U (Const64 [c]) x) (Less64U x (Const64 [d]))) + // cond: uint64(d) >= uint64(c) + // result: (Less64U (Sub64 x (Const64 [c])) (Const64 [d-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq64U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0_0.AuxInt) + if v_1.Op != OpLess64U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(uint64(d) >= uint64(c)) { + continue + } + v.reset(OpLess64U) + v0 := b.NewValue0(v.Pos, OpSub64, x.Type) + v1 := b.NewValue0(v.Pos, OpConst64, x.Type) + v1.AuxInt = int64ToAuxInt(c) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = int64ToAuxInt(d - c) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq64U (Const64 [c]) x) (Leq64U x (Const64 [d]))) + // cond: uint64(d) >= uint64(c) + // result: (Leq64U (Sub64 x (Const64 [c])) (Const64 [d-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq64U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0_0.AuxInt) + if v_1.Op != OpLeq64U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(uint64(d) >= uint64(c)) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpSub64, x.Type) + v1 := b.NewValue0(v.Pos, OpConst64, x.Type) + v1.AuxInt = int64ToAuxInt(c) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = int64ToAuxInt(d - c) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq32U (Const32 [c]) x) (Less32U x (Const32 [d]))) + // cond: uint32(d) >= uint32(c) + // result: (Less32U (Sub32 x (Const32 [c])) (Const32 [d-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq32U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0_0.AuxInt) + if v_1.Op != OpLess32U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1_1.AuxInt) + if !(uint32(d) >= uint32(c)) { + continue + } + v.reset(OpLess32U) + v0 := b.NewValue0(v.Pos, OpSub32, x.Type) + v1 := b.NewValue0(v.Pos, OpConst32, x.Type) + v1.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = int32ToAuxInt(d - c) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq32U (Const32 [c]) x) (Leq32U x (Const32 [d]))) + // cond: uint32(d) >= uint32(c) + // result: (Leq32U (Sub32 x (Const32 [c])) (Const32 [d-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq32U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0_0.AuxInt) + if v_1.Op != OpLeq32U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1_1.AuxInt) + if !(uint32(d) >= uint32(c)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpSub32, x.Type) + v1 := b.NewValue0(v.Pos, OpConst32, x.Type) + v1.AuxInt = int32ToAuxInt(c) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = int32ToAuxInt(d - c) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq16U (Const16 [c]) x) (Less16U x (Const16 [d]))) + // cond: uint16(d) >= uint16(c) + // result: (Less16U (Sub16 x (Const16 [c])) (Const16 [d-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq16U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0_0.AuxInt) + if v_1.Op != OpLess16U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1_1.AuxInt) + if !(uint16(d) >= uint16(c)) { + continue + } + v.reset(OpLess16U) + v0 := b.NewValue0(v.Pos, OpSub16, x.Type) + v1 := b.NewValue0(v.Pos, OpConst16, x.Type) + v1.AuxInt = int16ToAuxInt(c) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = int16ToAuxInt(d - c) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq16U (Const16 [c]) x) (Leq16U x (Const16 [d]))) + // cond: uint16(d) >= uint16(c) + // result: (Leq16U (Sub16 x (Const16 [c])) (Const16 [d-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq16U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0_0.AuxInt) + if v_1.Op != OpLeq16U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1_1.AuxInt) + if !(uint16(d) >= uint16(c)) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpSub16, x.Type) + v1 := b.NewValue0(v.Pos, OpConst16, x.Type) + v1.AuxInt = int16ToAuxInt(c) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = int16ToAuxInt(d - c) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq8U (Const8 [c]) x) (Less8U x (Const8 [d]))) + // cond: uint8(d) >= uint8(c) + // result: (Less8U (Sub8 x (Const8 [c])) (Const8 [d-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq8U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0_0.AuxInt) + if v_1.Op != OpLess8U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1_1.AuxInt) + if !(uint8(d) >= uint8(c)) { + continue + } + v.reset(OpLess8U) + v0 := b.NewValue0(v.Pos, OpSub8, x.Type) + v1 := b.NewValue0(v.Pos, OpConst8, x.Type) + v1.AuxInt = int8ToAuxInt(c) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = int8ToAuxInt(d - c) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Leq8U (Const8 [c]) x) (Leq8U x (Const8 [d]))) + // cond: uint8(d) >= uint8(c) + // result: (Leq8U (Sub8 x (Const8 [c])) (Const8 [d-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq8U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0_0.AuxInt) + if v_1.Op != OpLeq8U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1_1.AuxInt) + if !(uint8(d) >= uint8(c)) { + continue + } + v.reset(OpLeq8U) + v0 := b.NewValue0(v.Pos, OpSub8, x.Type) + v1 := b.NewValue0(v.Pos, OpConst8, x.Type) + v1.AuxInt = int8ToAuxInt(c) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = int8ToAuxInt(d - c) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less64U (Const64 [c]) x) (Less64U x (Const64 [d]))) + // cond: uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c) + // result: (Less64U (Sub64 x (Const64 [c+1])) (Const64 [d-c-1])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess64U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0_0.AuxInt) + if v_1.Op != OpLess64U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c)) { + continue + } + v.reset(OpLess64U) + v0 := b.NewValue0(v.Pos, OpSub64, x.Type) + v1 := b.NewValue0(v.Pos, OpConst64, x.Type) + v1.AuxInt = int64ToAuxInt(c + 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = int64ToAuxInt(d - c - 1) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less64U (Const64 [c]) x) (Leq64U x (Const64 [d]))) + // cond: uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c) + // result: (Leq64U (Sub64 x (Const64 [c+1])) (Const64 [d-c-1])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess64U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0_0.AuxInt) + if v_1.Op != OpLeq64U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c)) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpSub64, x.Type) + v1 := b.NewValue0(v.Pos, OpConst64, x.Type) + v1.AuxInt = int64ToAuxInt(c + 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = int64ToAuxInt(d - c - 1) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less32U (Const32 [c]) x) (Less32U x (Const32 [d]))) + // cond: uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c) + // result: (Less32U (Sub32 x (Const32 [c+1])) (Const32 [d-c-1])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess32U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0_0.AuxInt) + if v_1.Op != OpLess32U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1_1.AuxInt) + if !(uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c)) { + continue + } + v.reset(OpLess32U) + v0 := b.NewValue0(v.Pos, OpSub32, x.Type) + v1 := b.NewValue0(v.Pos, OpConst32, x.Type) + v1.AuxInt = int32ToAuxInt(c + 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = int32ToAuxInt(d - c - 1) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less32U (Const32 [c]) x) (Leq32U x (Const32 [d]))) + // cond: uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c) + // result: (Leq32U (Sub32 x (Const32 [c+1])) (Const32 [d-c-1])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess32U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0_0.AuxInt) + if v_1.Op != OpLeq32U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1_1.AuxInt) + if !(uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpSub32, x.Type) + v1 := b.NewValue0(v.Pos, OpConst32, x.Type) + v1.AuxInt = int32ToAuxInt(c + 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = int32ToAuxInt(d - c - 1) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less16U (Const16 [c]) x) (Less16U x (Const16 [d]))) + // cond: uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c) + // result: (Less16U (Sub16 x (Const16 [c+1])) (Const16 [d-c-1])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess16U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0_0.AuxInt) + if v_1.Op != OpLess16U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1_1.AuxInt) + if !(uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c)) { + continue + } + v.reset(OpLess16U) + v0 := b.NewValue0(v.Pos, OpSub16, x.Type) + v1 := b.NewValue0(v.Pos, OpConst16, x.Type) + v1.AuxInt = int16ToAuxInt(c + 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = int16ToAuxInt(d - c - 1) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less16U (Const16 [c]) x) (Leq16U x (Const16 [d]))) + // cond: uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c) + // result: (Leq16U (Sub16 x (Const16 [c+1])) (Const16 [d-c-1])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess16U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0_0.AuxInt) + if v_1.Op != OpLeq16U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1_1.AuxInt) + if !(uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c)) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpSub16, x.Type) + v1 := b.NewValue0(v.Pos, OpConst16, x.Type) + v1.AuxInt = int16ToAuxInt(c + 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = int16ToAuxInt(d - c - 1) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less8U (Const8 [c]) x) (Less8U x (Const8 [d]))) + // cond: uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c) + // result: (Less8U (Sub8 x (Const8 [c+1])) (Const8 [d-c-1])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess8U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0_0.AuxInt) + if v_1.Op != OpLess8U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1_1.AuxInt) + if !(uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c)) { + continue + } + v.reset(OpLess8U) + v0 := b.NewValue0(v.Pos, OpSub8, x.Type) + v1 := b.NewValue0(v.Pos, OpConst8, x.Type) + v1.AuxInt = int8ToAuxInt(c + 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = int8ToAuxInt(d - c - 1) + v.AddArg2(v0, v2) + return true + } + break + } + // match: (AndB (Less8U (Const8 [c]) x) (Leq8U x (Const8 [d]))) + // cond: uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c) + // result: (Leq8U (Sub8 x (Const8 [c+1])) (Const8 [d-c-1])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess8U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0_0.AuxInt) + if v_1.Op != OpLeq8U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1_1.AuxInt) + if !(uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c)) { + continue + } + v.reset(OpLeq8U) + v0 := b.NewValue0(v.Pos, OpSub8, x.Type) + v1 := b.NewValue0(v.Pos, OpConst8, x.Type) + v1.AuxInt = int8ToAuxInt(c + 1) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = int8ToAuxInt(d - c - 1) + v.AddArg2(v0, v2) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpArraySelect(v *Value) bool { + v_0 := v.Args[0] + // match: (ArraySelect (ArrayMake1 x)) + // result: x + for { + if v_0.Op != OpArrayMake1 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (ArraySelect [0] (IData x)) + // result: (IData x) + for { + if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpIData { + break + } + x := v_0.Args[0] + v.reset(OpIData) + v.AddArg(x) + return true + } + return false +} +func rewriteValuegeneric_OpCeil(v *Value) bool { + v_0 := v.Args[0] + // match: (Ceil (Const64F [c])) + // result: (Const64F [math.Ceil(c)]) + for { + if v_0.Op != OpConst64F { + break + } + c := auxIntToFloat64(v_0.AuxInt) + v.reset(OpConst64F) + v.AuxInt = float64ToAuxInt(math.Ceil(c)) + return true + } + return false +} +func rewriteValuegeneric_OpCom16(v *Value) bool { + v_0 := v.Args[0] + // match: (Com16 (Com16 x)) + // result: x + for { + if v_0.Op != OpCom16 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (Com16 (Const16 [c])) + // result: (Const16 [^c]) + for { + if v_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_0.AuxInt) + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(^c) + return true + } + // match: (Com16 (Add16 (Const16 [-1]) x)) + // result: (Neg16 x) + for { + if v_0.Op != OpAdd16 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpConst16 || auxIntToInt16(v_0_0.AuxInt) != -1 { + continue + } + x := v_0_1 + v.reset(OpNeg16) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpCom32(v *Value) bool { + v_0 := v.Args[0] + // match: (Com32 (Com32 x)) + // result: x + for { + if v_0.Op != OpCom32 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (Com32 (Const32 [c])) + // result: (Const32 [^c]) + for { + if v_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(^c) + return true + } + // match: (Com32 (Add32 (Const32 [-1]) x)) + // result: (Neg32 x) + for { + if v_0.Op != OpAdd32 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpConst32 || auxIntToInt32(v_0_0.AuxInt) != -1 { + continue + } + x := v_0_1 + v.reset(OpNeg32) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpCom64(v *Value) bool { + v_0 := v.Args[0] + // match: (Com64 (Com64 x)) + // result: x + for { + if v_0.Op != OpCom64 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (Com64 (Const64 [c])) + // result: (Const64 [^c]) + for { + if v_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(^c) + return true + } + // match: (Com64 (Add64 (Const64 [-1]) x)) + // result: (Neg64 x) + for { + if v_0.Op != OpAdd64 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpConst64 || auxIntToInt64(v_0_0.AuxInt) != -1 { + continue + } + x := v_0_1 + v.reset(OpNeg64) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpCom8(v *Value) bool { + v_0 := v.Args[0] + // match: (Com8 (Com8 x)) + // result: x + for { + if v_0.Op != OpCom8 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (Com8 (Const8 [c])) + // result: (Const8 [^c]) + for { + if v_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_0.AuxInt) + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(^c) + return true + } + // match: (Com8 (Add8 (Const8 [-1]) x)) + // result: (Neg8 x) + for { + if v_0.Op != OpAdd8 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpConst8 || auxIntToInt8(v_0_0.AuxInt) != -1 { + continue + } + x := v_0_1 + v.reset(OpNeg8) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpConstInterface(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (ConstInterface) + // result: (IMake (ConstNil ) (ConstNil )) + for { + v.reset(OpIMake) + v0 := b.NewValue0(v.Pos, OpConstNil, typ.Uintptr) + v1 := b.NewValue0(v.Pos, OpConstNil, typ.BytePtr) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValuegeneric_OpConstSlice(v *Value) bool { + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (ConstSlice) + // cond: config.PtrSize == 4 + // result: (SliceMake (ConstNil ) (Const32 [0]) (Const32 [0])) + for { + if !(config.PtrSize == 4) { + break + } + v.reset(OpSliceMake) + v0 := b.NewValue0(v.Pos, OpConstNil, v.Type.Elem().PtrTo()) + v1 := b.NewValue0(v.Pos, OpConst32, typ.Int) + v1.AuxInt = int32ToAuxInt(0) + v.AddArg3(v0, v1, v1) + return true + } + // match: (ConstSlice) + // cond: config.PtrSize == 8 + // result: (SliceMake (ConstNil ) (Const64 [0]) (Const64 [0])) + for { + if !(config.PtrSize == 8) { + break + } + v.reset(OpSliceMake) + v0 := b.NewValue0(v.Pos, OpConstNil, v.Type.Elem().PtrTo()) + v1 := b.NewValue0(v.Pos, OpConst64, typ.Int) + v1.AuxInt = int64ToAuxInt(0) + v.AddArg3(v0, v1, v1) + return true + } + return false +} +func rewriteValuegeneric_OpConstString(v *Value) bool { + b := v.Block + config := b.Func.Config + fe := b.Func.fe + typ := &b.Func.Config.Types + // match: (ConstString {str}) + // cond: config.PtrSize == 4 && str == "" + // result: (StringMake (ConstNil) (Const32 [0])) + for { + str := auxToString(v.Aux) + if !(config.PtrSize == 4 && str == "") { + break + } + v.reset(OpStringMake) + v0 := b.NewValue0(v.Pos, OpConstNil, typ.BytePtr) + v1 := b.NewValue0(v.Pos, OpConst32, typ.Int) + v1.AuxInt = int32ToAuxInt(0) + v.AddArg2(v0, v1) + return true + } + // match: (ConstString {str}) + // cond: config.PtrSize == 8 && str == "" + // result: (StringMake (ConstNil) (Const64 [0])) + for { + str := auxToString(v.Aux) + if !(config.PtrSize == 8 && str == "") { + break + } + v.reset(OpStringMake) + v0 := b.NewValue0(v.Pos, OpConstNil, typ.BytePtr) + v1 := b.NewValue0(v.Pos, OpConst64, typ.Int) + v1.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, v1) + return true + } + // match: (ConstString {str}) + // cond: config.PtrSize == 4 && str != "" + // result: (StringMake (Addr {fe.StringData(str)} (SB)) (Const32 [int32(len(str))])) + for { + str := auxToString(v.Aux) + if !(config.PtrSize == 4 && str != "") { + break + } + v.reset(OpStringMake) + v0 := b.NewValue0(v.Pos, OpAddr, typ.BytePtr) + v0.Aux = symToAux(fe.StringData(str)) + v1 := b.NewValue0(v.Pos, OpSB, typ.Uintptr) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpConst32, typ.Int) + v2.AuxInt = int32ToAuxInt(int32(len(str))) + v.AddArg2(v0, v2) + return true + } + // match: (ConstString {str}) + // cond: config.PtrSize == 8 && str != "" + // result: (StringMake (Addr {fe.StringData(str)} (SB)) (Const64 [int64(len(str))])) + for { + str := auxToString(v.Aux) + if !(config.PtrSize == 8 && str != "") { + break + } + v.reset(OpStringMake) + v0 := b.NewValue0(v.Pos, OpAddr, typ.BytePtr) + v0.Aux = symToAux(fe.StringData(str)) + v1 := b.NewValue0(v.Pos, OpSB, typ.Uintptr) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpConst64, typ.Int) + v2.AuxInt = int64ToAuxInt(int64(len(str))) + v.AddArg2(v0, v2) + return true + } + return false +} +func rewriteValuegeneric_OpConvert(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Convert (Add64 (Convert ptr mem) off) mem) + // result: (AddPtr ptr off) + for { + if v_0.Op != OpAdd64 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpConvert { + continue + } + mem := v_0_0.Args[1] + ptr := v_0_0.Args[0] + off := v_0_1 + if mem != v_1 { + continue + } + v.reset(OpAddPtr) + v.AddArg2(ptr, off) + return true + } + break + } + // match: (Convert (Add32 (Convert ptr mem) off) mem) + // result: (AddPtr ptr off) + for { + if v_0.Op != OpAdd32 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpConvert { + continue + } + mem := v_0_0.Args[1] + ptr := v_0_0.Args[0] + off := v_0_1 + if mem != v_1 { + continue + } + v.reset(OpAddPtr) + v.AddArg2(ptr, off) + return true + } + break + } + // match: (Convert (Convert ptr mem) mem) + // result: ptr + for { + if v_0.Op != OpConvert { + break + } + mem := v_0.Args[1] + ptr := v_0.Args[0] + if mem != v_1 { + break + } + v.copyOf(ptr) + return true + } + return false +} +func rewriteValuegeneric_OpCtz16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (Ctz16 (Const16 [c])) + // cond: config.PtrSize == 4 + // result: (Const32 [int32(ntz16(c))]) + for { + if v_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_0.AuxInt) + if !(config.PtrSize == 4) { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(int32(ntz16(c))) + return true + } + // match: (Ctz16 (Const16 [c])) + // cond: config.PtrSize == 8 + // result: (Const64 [int64(ntz16(c))]) + for { + if v_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_0.AuxInt) + if !(config.PtrSize == 8) { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(int64(ntz16(c))) + return true + } + return false +} +func rewriteValuegeneric_OpCtz32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (Ctz32 (Const32 [c])) + // cond: config.PtrSize == 4 + // result: (Const32 [int32(ntz32(c))]) + for { + if v_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_0.AuxInt) + if !(config.PtrSize == 4) { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(int32(ntz32(c))) + return true + } + // match: (Ctz32 (Const32 [c])) + // cond: config.PtrSize == 8 + // result: (Const64 [int64(ntz32(c))]) + for { + if v_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_0.AuxInt) + if !(config.PtrSize == 8) { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(int64(ntz32(c))) + return true + } + return false +} +func rewriteValuegeneric_OpCtz64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (Ctz64 (Const64 [c])) + // cond: config.PtrSize == 4 + // result: (Const32 [int32(ntz64(c))]) + for { + if v_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0.AuxInt) + if !(config.PtrSize == 4) { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(int32(ntz64(c))) + return true + } + // match: (Ctz64 (Const64 [c])) + // cond: config.PtrSize == 8 + // result: (Const64 [int64(ntz64(c))]) + for { + if v_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0.AuxInt) + if !(config.PtrSize == 8) { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(int64(ntz64(c))) + return true + } + return false +} +func rewriteValuegeneric_OpCtz8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (Ctz8 (Const8 [c])) + // cond: config.PtrSize == 4 + // result: (Const32 [int32(ntz8(c))]) + for { + if v_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_0.AuxInt) + if !(config.PtrSize == 4) { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(int32(ntz8(c))) + return true + } + // match: (Ctz8 (Const8 [c])) + // cond: config.PtrSize == 8 + // result: (Const64 [int64(ntz8(c))]) + for { + if v_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_0.AuxInt) + if !(config.PtrSize == 8) { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(int64(ntz8(c))) + return true + } + return false +} +func rewriteValuegeneric_OpCvt32Fto32(v *Value) bool { + v_0 := v.Args[0] + // match: (Cvt32Fto32 (Const32F [c])) + // result: (Const32 [int32(c)]) + for { + if v_0.Op != OpConst32F { + break + } + c := auxIntToFloat32(v_0.AuxInt) + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(int32(c)) + return true + } + return false +} +func rewriteValuegeneric_OpCvt32Fto64(v *Value) bool { + v_0 := v.Args[0] + // match: (Cvt32Fto64 (Const32F [c])) + // result: (Const64 [int64(c)]) + for { + if v_0.Op != OpConst32F { + break + } + c := auxIntToFloat32(v_0.AuxInt) + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(int64(c)) + return true + } + return false +} +func rewriteValuegeneric_OpCvt32Fto64F(v *Value) bool { + v_0 := v.Args[0] + // match: (Cvt32Fto64F (Const32F [c])) + // result: (Const64F [float64(c)]) + for { + if v_0.Op != OpConst32F { + break + } + c := auxIntToFloat32(v_0.AuxInt) + v.reset(OpConst64F) + v.AuxInt = float64ToAuxInt(float64(c)) + return true + } + return false +} +func rewriteValuegeneric_OpCvt32to32F(v *Value) bool { + v_0 := v.Args[0] + // match: (Cvt32to32F (Const32 [c])) + // result: (Const32F [float32(c)]) + for { + if v_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpConst32F) + v.AuxInt = float32ToAuxInt(float32(c)) + return true + } + return false +} +func rewriteValuegeneric_OpCvt32to64F(v *Value) bool { + v_0 := v.Args[0] + // match: (Cvt32to64F (Const32 [c])) + // result: (Const64F [float64(c)]) + for { + if v_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpConst64F) + v.AuxInt = float64ToAuxInt(float64(c)) + return true + } + return false +} +func rewriteValuegeneric_OpCvt64Fto32(v *Value) bool { + v_0 := v.Args[0] + // match: (Cvt64Fto32 (Const64F [c])) + // result: (Const32 [int32(c)]) + for { + if v_0.Op != OpConst64F { + break + } + c := auxIntToFloat64(v_0.AuxInt) + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(int32(c)) + return true + } + return false +} +func rewriteValuegeneric_OpCvt64Fto32F(v *Value) bool { + v_0 := v.Args[0] + // match: (Cvt64Fto32F (Const64F [c])) + // result: (Const32F [float32(c)]) + for { + if v_0.Op != OpConst64F { + break + } + c := auxIntToFloat64(v_0.AuxInt) + v.reset(OpConst32F) + v.AuxInt = float32ToAuxInt(float32(c)) + return true + } + // match: (Cvt64Fto32F sqrt0:(Sqrt (Cvt32Fto64F x))) + // cond: sqrt0.Uses==1 + // result: (Sqrt32 x) + for { + sqrt0 := v_0 + if sqrt0.Op != OpSqrt { + break + } + sqrt0_0 := sqrt0.Args[0] + if sqrt0_0.Op != OpCvt32Fto64F { + break + } + x := sqrt0_0.Args[0] + if !(sqrt0.Uses == 1) { + break + } + v.reset(OpSqrt32) + v.AddArg(x) + return true + } + return false +} +func rewriteValuegeneric_OpCvt64Fto64(v *Value) bool { + v_0 := v.Args[0] + // match: (Cvt64Fto64 (Const64F [c])) + // result: (Const64 [int64(c)]) + for { + if v_0.Op != OpConst64F { + break + } + c := auxIntToFloat64(v_0.AuxInt) + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(int64(c)) + return true + } + return false +} +func rewriteValuegeneric_OpCvt64to32F(v *Value) bool { + v_0 := v.Args[0] + // match: (Cvt64to32F (Const64 [c])) + // result: (Const32F [float32(c)]) + for { + if v_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpConst32F) + v.AuxInt = float32ToAuxInt(float32(c)) + return true + } + return false +} +func rewriteValuegeneric_OpCvt64to64F(v *Value) bool { + v_0 := v.Args[0] + // match: (Cvt64to64F (Const64 [c])) + // result: (Const64F [float64(c)]) + for { + if v_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpConst64F) + v.AuxInt = float64ToAuxInt(float64(c)) + return true + } + return false +} +func rewriteValuegeneric_OpCvtBoolToUint8(v *Value) bool { + v_0 := v.Args[0] + // match: (CvtBoolToUint8 (ConstBool [false])) + // result: (Const8 [0]) + for { + if v_0.Op != OpConstBool || auxIntToBool(v_0.AuxInt) != false { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(0) + return true + } + // match: (CvtBoolToUint8 (ConstBool [true])) + // result: (Const8 [1]) + for { + if v_0.Op != OpConstBool || auxIntToBool(v_0.AuxInt) != true { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(1) + return true + } + return false +} +func rewriteValuegeneric_OpDiv16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16 (Const16 [c]) (Const16 [d])) + // cond: d != 0 + // result: (Const16 [c/d]) + for { + if v_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpConst16 { + break + } + d := auxIntToInt16(v_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(c / d) + return true + } + // match: (Div16 n (Const16 [c])) + // cond: isNonNegative(n) && isPowerOfTwo16(c) + // result: (Rsh16Ux64 n (Const64 [log16(c)])) + for { + n := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + if !(isNonNegative(n) && isPowerOfTwo16(c)) { + break + } + v.reset(OpRsh16Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = int64ToAuxInt(log16(c)) + v.AddArg2(n, v0) + return true + } + // match: (Div16 n (Const16 [c])) + // cond: c < 0 && c != -1<<15 + // result: (Neg16 (Div16 n (Const16 [-c]))) + for { + t := v.Type + n := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + if !(c < 0 && c != -1<<15) { + break + } + v.reset(OpNeg16) + v0 := b.NewValue0(v.Pos, OpDiv16, t) + v1 := b.NewValue0(v.Pos, OpConst16, t) + v1.AuxInt = int16ToAuxInt(-c) + v0.AddArg2(n, v1) + v.AddArg(v0) + return true + } + // match: (Div16 x (Const16 [-1<<15])) + // result: (Rsh16Ux64 (And16 x (Neg16 x)) (Const64 [15])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != -1<<15 { + break + } + v.reset(OpRsh16Ux64) + v0 := b.NewValue0(v.Pos, OpAnd16, t) + v1 := b.NewValue0(v.Pos, OpNeg16, t) + v1.AddArg(x) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2.AuxInt = int64ToAuxInt(15) + v.AddArg2(v0, v2) + return true + } + // match: (Div16 n (Const16 [c])) + // cond: isPowerOfTwo16(c) + // result: (Rsh16x64 (Add16 n (Rsh16Ux64 (Rsh16x64 n (Const64 [15])) (Const64 [int64(16-log16(c))]))) (Const64 [int64(log16(c))])) + for { + t := v.Type + n := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + if !(isPowerOfTwo16(c)) { + break + } + v.reset(OpRsh16x64) + v0 := b.NewValue0(v.Pos, OpAdd16, t) + v1 := b.NewValue0(v.Pos, OpRsh16Ux64, t) + v2 := b.NewValue0(v.Pos, OpRsh16x64, t) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(15) + v2.AddArg2(n, v3) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(int64(16 - log16(c))) + v1.AddArg2(v2, v4) + v0.AddArg2(n, v1) + v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v5.AuxInt = int64ToAuxInt(int64(log16(c))) + v.AddArg2(v0, v5) + return true + } + // match: (Div16 x (Const16 [c])) + // cond: smagicOK16(c) + // result: (Sub16 (Rsh32x64 (Mul32 (Const32 [int32(smagic16(c).m)]) (SignExt16to32 x)) (Const64 [16+smagic16(c).s])) (Rsh32x64 (SignExt16to32 x) (Const64 [31]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + if !(smagicOK16(c)) { + break + } + v.reset(OpSub16) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRsh32x64, t) + v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v2.AuxInt = int32ToAuxInt(int32(smagic16(c).m)) + v3 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v3.AddArg(x) + v1.AddArg2(v2, v3) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(16 + smagic16(c).s) + v0.AddArg2(v1, v4) + v5 := b.NewValue0(v.Pos, OpRsh32x64, t) + v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v6.AuxInt = int64ToAuxInt(31) + v5.AddArg2(v3, v6) + v.AddArg2(v0, v5) + return true + } + return false +} +func rewriteValuegeneric_OpDiv16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Div16u (Const16 [c]) (Const16 [d])) + // cond: d != 0 + // result: (Const16 [int16(uint16(c)/uint16(d))]) + for { + if v_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpConst16 { + break + } + d := auxIntToInt16(v_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(int16(uint16(c) / uint16(d))) + return true + } + // match: (Div16u n (Const16 [c])) + // cond: isPowerOfTwo16(c) + // result: (Rsh16Ux64 n (Const64 [log16(c)])) + for { + n := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + if !(isPowerOfTwo16(c)) { + break + } + v.reset(OpRsh16Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = int64ToAuxInt(log16(c)) + v.AddArg2(n, v0) + return true + } + // match: (Div16u x (Const16 [c])) + // cond: umagicOK16(c) && config.RegSize == 8 + // result: (Trunc64to16 (Rsh64Ux64 (Mul64 (Const64 [int64(1<<16+umagic16(c).m)]) (ZeroExt16to64 x)) (Const64 [16+umagic16(c).s]))) + for { + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + if !(umagicOK16(c) && config.RegSize == 8) { + break + } + v.reset(OpTrunc64to16) + v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2.AuxInt = int64ToAuxInt(int64(1<<16 + umagic16(c).m)) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(x) + v1.AddArg2(v2, v3) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(16 + umagic16(c).s) + v0.AddArg2(v1, v4) + v.AddArg(v0) + return true + } + // match: (Div16u x (Const16 [c])) + // cond: umagicOK16(c) && config.RegSize == 4 && umagic16(c).m&1 == 0 + // result: (Trunc32to16 (Rsh32Ux64 (Mul32 (Const32 [int32(1<<15+umagic16(c).m/2)]) (ZeroExt16to32 x)) (Const64 [16+umagic16(c).s-1]))) + for { + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + if !(umagicOK16(c) && config.RegSize == 4 && umagic16(c).m&1 == 0) { + break + } + v.reset(OpTrunc32to16) + v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v2.AuxInt = int32ToAuxInt(int32(1<<15 + umagic16(c).m/2)) + v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v3.AddArg(x) + v1.AddArg2(v2, v3) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(16 + umagic16(c).s - 1) + v0.AddArg2(v1, v4) + v.AddArg(v0) + return true + } + // match: (Div16u x (Const16 [c])) + // cond: umagicOK16(c) && config.RegSize == 4 && c&1 == 0 + // result: (Trunc32to16 (Rsh32Ux64 (Mul32 (Const32 [int32(1<<15+(umagic16(c).m+1)/2)]) (Rsh32Ux64 (ZeroExt16to32 x) (Const64 [1]))) (Const64 [16+umagic16(c).s-2]))) + for { + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + if !(umagicOK16(c) && config.RegSize == 4 && c&1 == 0) { + break + } + v.reset(OpTrunc32to16) + v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v2.AuxInt = int32ToAuxInt(int32(1<<15 + (umagic16(c).m+1)/2)) + v3 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32) + v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v4.AddArg(x) + v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v5.AuxInt = int64ToAuxInt(1) + v3.AddArg2(v4, v5) + v1.AddArg2(v2, v3) + v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v6.AuxInt = int64ToAuxInt(16 + umagic16(c).s - 2) + v0.AddArg2(v1, v6) + v.AddArg(v0) + return true + } + // match: (Div16u x (Const16 [c])) + // cond: umagicOK16(c) && config.RegSize == 4 && config.useAvg + // result: (Trunc32to16 (Rsh32Ux64 (Avg32u (Lsh32x64 (ZeroExt16to32 x) (Const64 [16])) (Mul32 (Const32 [int32(umagic16(c).m)]) (ZeroExt16to32 x))) (Const64 [16+umagic16(c).s-1]))) + for { + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + if !(umagicOK16(c) && config.RegSize == 4 && config.useAvg) { + break + } + v.reset(OpTrunc32to16) + v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpAvg32u, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpLsh32x64, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v3.AddArg(x) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(16) + v2.AddArg2(v3, v4) + v5 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v6.AuxInt = int32ToAuxInt(int32(umagic16(c).m)) + v5.AddArg2(v6, v3) + v1.AddArg2(v2, v5) + v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v7.AuxInt = int64ToAuxInt(16 + umagic16(c).s - 1) + v0.AddArg2(v1, v7) + v.AddArg(v0) + return true + } + return false +} +func rewriteValuegeneric_OpDiv32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Div32 (Const32 [c]) (Const32 [d])) + // cond: d != 0 + // result: (Const32 [c/d]) + for { + if v_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpConst32 { + break + } + d := auxIntToInt32(v_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(c / d) + return true + } + // match: (Div32 n (Const32 [c])) + // cond: isNonNegative(n) && isPowerOfTwo32(c) + // result: (Rsh32Ux64 n (Const64 [log32(c)])) + for { + n := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(isNonNegative(n) && isPowerOfTwo32(c)) { + break + } + v.reset(OpRsh32Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = int64ToAuxInt(log32(c)) + v.AddArg2(n, v0) + return true + } + // match: (Div32 n (Const32 [c])) + // cond: c < 0 && c != -1<<31 + // result: (Neg32 (Div32 n (Const32 [-c]))) + for { + t := v.Type + n := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(c < 0 && c != -1<<31) { + break + } + v.reset(OpNeg32) + v0 := b.NewValue0(v.Pos, OpDiv32, t) + v1 := b.NewValue0(v.Pos, OpConst32, t) + v1.AuxInt = int32ToAuxInt(-c) + v0.AddArg2(n, v1) + v.AddArg(v0) + return true + } + // match: (Div32 x (Const32 [-1<<31])) + // result: (Rsh32Ux64 (And32 x (Neg32 x)) (Const64 [31])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != -1<<31 { + break + } + v.reset(OpRsh32Ux64) + v0 := b.NewValue0(v.Pos, OpAnd32, t) + v1 := b.NewValue0(v.Pos, OpNeg32, t) + v1.AddArg(x) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2.AuxInt = int64ToAuxInt(31) + v.AddArg2(v0, v2) + return true + } + // match: (Div32 n (Const32 [c])) + // cond: isPowerOfTwo32(c) + // result: (Rsh32x64 (Add32 n (Rsh32Ux64 (Rsh32x64 n (Const64 [31])) (Const64 [int64(32-log32(c))]))) (Const64 [int64(log32(c))])) + for { + t := v.Type + n := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(isPowerOfTwo32(c)) { + break + } + v.reset(OpRsh32x64) + v0 := b.NewValue0(v.Pos, OpAdd32, t) + v1 := b.NewValue0(v.Pos, OpRsh32Ux64, t) + v2 := b.NewValue0(v.Pos, OpRsh32x64, t) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(31) + v2.AddArg2(n, v3) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(int64(32 - log32(c))) + v1.AddArg2(v2, v4) + v0.AddArg2(n, v1) + v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v5.AuxInt = int64ToAuxInt(int64(log32(c))) + v.AddArg2(v0, v5) + return true + } + // match: (Div32 x (Const32 [c])) + // cond: smagicOK32(c) && config.RegSize == 8 + // result: (Sub32 (Rsh64x64 (Mul64 (Const64 [int64(smagic32(c).m)]) (SignExt32to64 x)) (Const64 [32+smagic32(c).s])) (Rsh64x64 (SignExt32to64 x) (Const64 [63]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(smagicOK32(c) && config.RegSize == 8) { + break + } + v.reset(OpSub32) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRsh64x64, t) + v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2.AuxInt = int64ToAuxInt(int64(smagic32(c).m)) + v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v3.AddArg(x) + v1.AddArg2(v2, v3) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(32 + smagic32(c).s) + v0.AddArg2(v1, v4) + v5 := b.NewValue0(v.Pos, OpRsh64x64, t) + v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v6.AuxInt = int64ToAuxInt(63) + v5.AddArg2(v3, v6) + v.AddArg2(v0, v5) + return true + } + // match: (Div32 x (Const32 [c])) + // cond: smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 == 0 && config.useHmul + // result: (Sub32 (Rsh32x64 (Hmul32 (Const32 [int32(smagic32(c).m/2)]) x) (Const64 [smagic32(c).s-1])) (Rsh32x64 x (Const64 [31]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 == 0 && config.useHmul) { + break + } + v.reset(OpSub32) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRsh32x64, t) + v1 := b.NewValue0(v.Pos, OpHmul32, t) + v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v2.AuxInt = int32ToAuxInt(int32(smagic32(c).m / 2)) + v1.AddArg2(v2, x) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(smagic32(c).s - 1) + v0.AddArg2(v1, v3) + v4 := b.NewValue0(v.Pos, OpRsh32x64, t) + v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v5.AuxInt = int64ToAuxInt(31) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) + return true + } + // match: (Div32 x (Const32 [c])) + // cond: smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 != 0 && config.useHmul + // result: (Sub32 (Rsh32x64 (Add32 (Hmul32 (Const32 [int32(smagic32(c).m)]) x) x) (Const64 [smagic32(c).s])) (Rsh32x64 x (Const64 [31]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 != 0 && config.useHmul) { + break + } + v.reset(OpSub32) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRsh32x64, t) + v1 := b.NewValue0(v.Pos, OpAdd32, t) + v2 := b.NewValue0(v.Pos, OpHmul32, t) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int32ToAuxInt(int32(smagic32(c).m)) + v2.AddArg2(v3, x) + v1.AddArg2(v2, x) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(smagic32(c).s) + v0.AddArg2(v1, v4) + v5 := b.NewValue0(v.Pos, OpRsh32x64, t) + v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v6.AuxInt = int64ToAuxInt(31) + v5.AddArg2(x, v6) + v.AddArg2(v0, v5) + return true + } + return false +} +func rewriteValuegeneric_OpDiv32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Div32F (Const32F [c]) (Const32F [d])) + // cond: c/d == c/d + // result: (Const32F [c/d]) + for { + if v_0.Op != OpConst32F { + break + } + c := auxIntToFloat32(v_0.AuxInt) + if v_1.Op != OpConst32F { + break + } + d := auxIntToFloat32(v_1.AuxInt) + if !(c/d == c/d) { + break + } + v.reset(OpConst32F) + v.AuxInt = float32ToAuxInt(c / d) + return true + } + // match: (Div32F x (Const32F [c])) + // cond: reciprocalExact32(c) + // result: (Mul32F x (Const32F [1/c])) + for { + x := v_0 + if v_1.Op != OpConst32F { + break + } + t := v_1.Type + c := auxIntToFloat32(v_1.AuxInt) + if !(reciprocalExact32(c)) { + break + } + v.reset(OpMul32F) + v0 := b.NewValue0(v.Pos, OpConst32F, t) + v0.AuxInt = float32ToAuxInt(1 / c) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValuegeneric_OpDiv32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Div32u (Const32 [c]) (Const32 [d])) + // cond: d != 0 + // result: (Const32 [int32(uint32(c)/uint32(d))]) + for { + if v_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpConst32 { + break + } + d := auxIntToInt32(v_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(int32(uint32(c) / uint32(d))) + return true + } + // match: (Div32u n (Const32 [c])) + // cond: isPowerOfTwo32(c) + // result: (Rsh32Ux64 n (Const64 [log32(c)])) + for { + n := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(isPowerOfTwo32(c)) { + break + } + v.reset(OpRsh32Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = int64ToAuxInt(log32(c)) + v.AddArg2(n, v0) + return true + } + // match: (Div32u x (Const32 [c])) + // cond: umagicOK32(c) && config.RegSize == 4 && umagic32(c).m&1 == 0 && config.useHmul + // result: (Rsh32Ux64 (Hmul32u (Const32 [int32(1<<31+umagic32(c).m/2)]) x) (Const64 [umagic32(c).s-1])) + for { + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(umagicOK32(c) && config.RegSize == 4 && umagic32(c).m&1 == 0 && config.useHmul) { + break + } + v.reset(OpRsh32Ux64) + v.Type = typ.UInt32 + v0 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v1.AuxInt = int32ToAuxInt(int32(1<<31 + umagic32(c).m/2)) + v0.AddArg2(v1, x) + v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2.AuxInt = int64ToAuxInt(umagic32(c).s - 1) + v.AddArg2(v0, v2) + return true + } + // match: (Div32u x (Const32 [c])) + // cond: umagicOK32(c) && config.RegSize == 4 && c&1 == 0 && config.useHmul + // result: (Rsh32Ux64 (Hmul32u (Const32 [int32(1<<31+(umagic32(c).m+1)/2)]) (Rsh32Ux64 x (Const64 [1]))) (Const64 [umagic32(c).s-2])) + for { + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(umagicOK32(c) && config.RegSize == 4 && c&1 == 0 && config.useHmul) { + break + } + v.reset(OpRsh32Ux64) + v.Type = typ.UInt32 + v0 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v1.AuxInt = int32ToAuxInt(int32(1<<31 + (umagic32(c).m+1)/2)) + v2 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(1) + v2.AddArg2(x, v3) + v0.AddArg2(v1, v2) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(umagic32(c).s - 2) + v.AddArg2(v0, v4) + return true + } + // match: (Div32u x (Const32 [c])) + // cond: umagicOK32(c) && config.RegSize == 4 && config.useAvg && config.useHmul + // result: (Rsh32Ux64 (Avg32u x (Hmul32u (Const32 [int32(umagic32(c).m)]) x)) (Const64 [umagic32(c).s-1])) + for { + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(umagicOK32(c) && config.RegSize == 4 && config.useAvg && config.useHmul) { + break + } + v.reset(OpRsh32Ux64) + v.Type = typ.UInt32 + v0 := b.NewValue0(v.Pos, OpAvg32u, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v2.AuxInt = int32ToAuxInt(int32(umagic32(c).m)) + v1.AddArg2(v2, x) + v0.AddArg2(x, v1) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(umagic32(c).s - 1) + v.AddArg2(v0, v3) + return true + } + // match: (Div32u x (Const32 [c])) + // cond: umagicOK32(c) && config.RegSize == 8 && umagic32(c).m&1 == 0 + // result: (Trunc64to32 (Rsh64Ux64 (Mul64 (Const64 [int64(1<<31+umagic32(c).m/2)]) (ZeroExt32to64 x)) (Const64 [32+umagic32(c).s-1]))) + for { + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(umagicOK32(c) && config.RegSize == 8 && umagic32(c).m&1 == 0) { + break + } + v.reset(OpTrunc64to32) + v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2.AuxInt = int64ToAuxInt(int64(1<<31 + umagic32(c).m/2)) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(x) + v1.AddArg2(v2, v3) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(32 + umagic32(c).s - 1) + v0.AddArg2(v1, v4) + v.AddArg(v0) + return true + } + // match: (Div32u x (Const32 [c])) + // cond: umagicOK32(c) && config.RegSize == 8 && c&1 == 0 + // result: (Trunc64to32 (Rsh64Ux64 (Mul64 (Const64 [int64(1<<31+(umagic32(c).m+1)/2)]) (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1]))) (Const64 [32+umagic32(c).s-2]))) + for { + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(umagicOK32(c) && config.RegSize == 8 && c&1 == 0) { + break + } + v.reset(OpTrunc64to32) + v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2.AuxInt = int64ToAuxInt(int64(1<<31 + (umagic32(c).m+1)/2)) + v3 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64) + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(x) + v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v5.AuxInt = int64ToAuxInt(1) + v3.AddArg2(v4, v5) + v1.AddArg2(v2, v3) + v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v6.AuxInt = int64ToAuxInt(32 + umagic32(c).s - 2) + v0.AddArg2(v1, v6) + v.AddArg(v0) + return true + } + // match: (Div32u x (Const32 [c])) + // cond: umagicOK32(c) && config.RegSize == 8 && config.useAvg + // result: (Trunc64to32 (Rsh64Ux64 (Avg64u (Lsh64x64 (ZeroExt32to64 x) (Const64 [32])) (Mul64 (Const64 [int64(umagic32(c).m)]) (ZeroExt32to64 x))) (Const64 [32+umagic32(c).s-1]))) + for { + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(umagicOK32(c) && config.RegSize == 8 && config.useAvg) { + break + } + v.reset(OpTrunc64to32) + v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpAvg64u, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpLsh64x64, typ.UInt64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(x) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(32) + v2.AddArg2(v3, v4) + v5 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) + v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt32) + v6.AuxInt = int64ToAuxInt(int64(umagic32(c).m)) + v5.AddArg2(v6, v3) + v1.AddArg2(v2, v5) + v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v7.AuxInt = int64ToAuxInt(32 + umagic32(c).s - 1) + v0.AddArg2(v1, v7) + v.AddArg(v0) + return true + } + return false +} +func rewriteValuegeneric_OpDiv64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Div64 (Const64 [c]) (Const64 [d])) + // cond: d != 0 + // result: (Const64 [c/d]) + for { + if v_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(c / d) + return true + } + // match: (Div64 n (Const64 [c])) + // cond: isNonNegative(n) && isPowerOfTwo64(c) + // result: (Rsh64Ux64 n (Const64 [log64(c)])) + for { + n := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(isNonNegative(n) && isPowerOfTwo64(c)) { + break + } + v.reset(OpRsh64Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = int64ToAuxInt(log64(c)) + v.AddArg2(n, v0) + return true + } + // match: (Div64 n (Const64 [-1<<63])) + // cond: isNonNegative(n) + // result: (Const64 [0]) + for { + n := v_0 + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != -1<<63 || !(isNonNegative(n)) { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (Div64 n (Const64 [c])) + // cond: c < 0 && c != -1<<63 + // result: (Neg64 (Div64 n (Const64 [-c]))) + for { + t := v.Type + n := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(c < 0 && c != -1<<63) { + break + } + v.reset(OpNeg64) + v0 := b.NewValue0(v.Pos, OpDiv64, t) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(-c) + v0.AddArg2(n, v1) + v.AddArg(v0) + return true + } + // match: (Div64 x (Const64 [-1<<63])) + // result: (Rsh64Ux64 (And64 x (Neg64 x)) (Const64 [63])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != -1<<63 { + break + } + v.reset(OpRsh64Ux64) + v0 := b.NewValue0(v.Pos, OpAnd64, t) + v1 := b.NewValue0(v.Pos, OpNeg64, t) + v1.AddArg(x) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2.AuxInt = int64ToAuxInt(63) + v.AddArg2(v0, v2) + return true + } + // match: (Div64 n (Const64 [c])) + // cond: isPowerOfTwo64(c) + // result: (Rsh64x64 (Add64 n (Rsh64Ux64 (Rsh64x64 n (Const64 [63])) (Const64 [int64(64-log64(c))]))) (Const64 [int64(log64(c))])) + for { + t := v.Type + n := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(isPowerOfTwo64(c)) { + break + } + v.reset(OpRsh64x64) + v0 := b.NewValue0(v.Pos, OpAdd64, t) + v1 := b.NewValue0(v.Pos, OpRsh64Ux64, t) + v2 := b.NewValue0(v.Pos, OpRsh64x64, t) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(63) + v2.AddArg2(n, v3) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(int64(64 - log64(c))) + v1.AddArg2(v2, v4) + v0.AddArg2(n, v1) + v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v5.AuxInt = int64ToAuxInt(int64(log64(c))) + v.AddArg2(v0, v5) + return true + } + // match: (Div64 x (Const64 [c])) + // cond: smagicOK64(c) && smagic64(c).m&1 == 0 && config.useHmul + // result: (Sub64 (Rsh64x64 (Hmul64 (Const64 [int64(smagic64(c).m/2)]) x) (Const64 [smagic64(c).s-1])) (Rsh64x64 x (Const64 [63]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(smagicOK64(c) && smagic64(c).m&1 == 0 && config.useHmul) { + break + } + v.reset(OpSub64) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRsh64x64, t) + v1 := b.NewValue0(v.Pos, OpHmul64, t) + v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2.AuxInt = int64ToAuxInt(int64(smagic64(c).m / 2)) + v1.AddArg2(v2, x) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(smagic64(c).s - 1) + v0.AddArg2(v1, v3) + v4 := b.NewValue0(v.Pos, OpRsh64x64, t) + v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v4.AddArg2(x, v5) + v.AddArg2(v0, v4) + return true + } + // match: (Div64 x (Const64 [c])) + // cond: smagicOK64(c) && smagic64(c).m&1 != 0 && config.useHmul + // result: (Sub64 (Rsh64x64 (Add64 (Hmul64 (Const64 [int64(smagic64(c).m)]) x) x) (Const64 [smagic64(c).s])) (Rsh64x64 x (Const64 [63]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(smagicOK64(c) && smagic64(c).m&1 != 0 && config.useHmul) { + break + } + v.reset(OpSub64) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRsh64x64, t) + v1 := b.NewValue0(v.Pos, OpAdd64, t) + v2 := b.NewValue0(v.Pos, OpHmul64, t) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(int64(smagic64(c).m)) + v2.AddArg2(v3, x) + v1.AddArg2(v2, x) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(smagic64(c).s) + v0.AddArg2(v1, v4) + v5 := b.NewValue0(v.Pos, OpRsh64x64, t) + v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v6.AuxInt = int64ToAuxInt(63) + v5.AddArg2(x, v6) + v.AddArg2(v0, v5) + return true + } + return false +} +func rewriteValuegeneric_OpDiv64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Div64F (Const64F [c]) (Const64F [d])) + // cond: c/d == c/d + // result: (Const64F [c/d]) + for { + if v_0.Op != OpConst64F { + break + } + c := auxIntToFloat64(v_0.AuxInt) + if v_1.Op != OpConst64F { + break + } + d := auxIntToFloat64(v_1.AuxInt) + if !(c/d == c/d) { + break + } + v.reset(OpConst64F) + v.AuxInt = float64ToAuxInt(c / d) + return true + } + // match: (Div64F x (Const64F [c])) + // cond: reciprocalExact64(c) + // result: (Mul64F x (Const64F [1/c])) + for { + x := v_0 + if v_1.Op != OpConst64F { + break + } + t := v_1.Type + c := auxIntToFloat64(v_1.AuxInt) + if !(reciprocalExact64(c)) { + break + } + v.reset(OpMul64F) + v0 := b.NewValue0(v.Pos, OpConst64F, t) + v0.AuxInt = float64ToAuxInt(1 / c) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValuegeneric_OpDiv64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Div64u (Const64 [c]) (Const64 [d])) + // cond: d != 0 + // result: (Const64 [int64(uint64(c)/uint64(d))]) + for { + if v_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(int64(uint64(c) / uint64(d))) + return true + } + // match: (Div64u n (Const64 [c])) + // cond: isPowerOfTwo64(c) + // result: (Rsh64Ux64 n (Const64 [log64(c)])) + for { + n := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(isPowerOfTwo64(c)) { + break + } + v.reset(OpRsh64Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = int64ToAuxInt(log64(c)) + v.AddArg2(n, v0) + return true + } + // match: (Div64u n (Const64 [-1<<63])) + // result: (Rsh64Ux64 n (Const64 [63])) + for { + n := v_0 + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != -1<<63 { + break + } + v.reset(OpRsh64Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = int64ToAuxInt(63) + v.AddArg2(n, v0) + return true + } + // match: (Div64u x (Const64 [c])) + // cond: c > 0 && c <= 0xFFFF && umagicOK32(int32(c)) && config.RegSize == 4 && config.useHmul + // result: (Add64 (Add64 (Add64 (Lsh64x64 (ZeroExt32to64 (Div32u (Trunc64to32 (Rsh64Ux64 x (Const64 [32]))) (Const32 [int32(c)]))) (Const64 [32])) (ZeroExt32to64 (Div32u (Trunc64to32 x) (Const32 [int32(c)])))) (Mul64 (ZeroExt32to64 (Mod32u (Trunc64to32 (Rsh64Ux64 x (Const64 [32]))) (Const32 [int32(c)]))) (Const64 [int64((1<<32)/c)]))) (ZeroExt32to64 (Div32u (Add32 (Mod32u (Trunc64to32 x) (Const32 [int32(c)])) (Mul32 (Mod32u (Trunc64to32 (Rsh64Ux64 x (Const64 [32]))) (Const32 [int32(c)])) (Const32 [int32((1<<32)%c)]))) (Const32 [int32(c)])))) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(c > 0 && c <= 0xFFFF && umagicOK32(int32(c)) && config.RegSize == 4 && config.useHmul) { + break + } + v.reset(OpAdd64) + v0 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpLsh64x64, typ.UInt64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4 := b.NewValue0(v.Pos, OpDiv32u, typ.UInt32) + v5 := b.NewValue0(v.Pos, OpTrunc64to32, typ.UInt32) + v6 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64) + v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v7.AuxInt = int64ToAuxInt(32) + v6.AddArg2(x, v7) + v5.AddArg(v6) + v8 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v8.AuxInt = int32ToAuxInt(int32(c)) + v4.AddArg2(v5, v8) + v3.AddArg(v4) + v2.AddArg2(v3, v7) + v9 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v10 := b.NewValue0(v.Pos, OpDiv32u, typ.UInt32) + v11 := b.NewValue0(v.Pos, OpTrunc64to32, typ.UInt32) + v11.AddArg(x) + v10.AddArg2(v11, v8) + v9.AddArg(v10) + v1.AddArg2(v2, v9) + v12 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) + v13 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v14 := b.NewValue0(v.Pos, OpMod32u, typ.UInt32) + v14.AddArg2(v5, v8) + v13.AddArg(v14) + v15 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v15.AuxInt = int64ToAuxInt(int64((1 << 32) / c)) + v12.AddArg2(v13, v15) + v0.AddArg2(v1, v12) + v16 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v17 := b.NewValue0(v.Pos, OpDiv32u, typ.UInt32) + v18 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) + v19 := b.NewValue0(v.Pos, OpMod32u, typ.UInt32) + v19.AddArg2(v11, v8) + v20 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v21 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v21.AuxInt = int32ToAuxInt(int32((1 << 32) % c)) + v20.AddArg2(v14, v21) + v18.AddArg2(v19, v20) + v17.AddArg2(v18, v8) + v16.AddArg(v17) + v.AddArg2(v0, v16) + return true + } + // match: (Div64u x (Const64 [c])) + // cond: umagicOK64(c) && config.RegSize == 8 && umagic64(c).m&1 == 0 && config.useHmul + // result: (Rsh64Ux64 (Hmul64u (Const64 [int64(1<<63+umagic64(c).m/2)]) x) (Const64 [umagic64(c).s-1])) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(umagicOK64(c) && config.RegSize == 8 && umagic64(c).m&1 == 0 && config.useHmul) { + break + } + v.reset(OpRsh64Ux64) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v1.AuxInt = int64ToAuxInt(int64(1<<63 + umagic64(c).m/2)) + v0.AddArg2(v1, x) + v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2.AuxInt = int64ToAuxInt(umagic64(c).s - 1) + v.AddArg2(v0, v2) + return true + } + // match: (Div64u x (Const64 [c])) + // cond: umagicOK64(c) && config.RegSize == 8 && c&1 == 0 && config.useHmul + // result: (Rsh64Ux64 (Hmul64u (Const64 [int64(1<<63+(umagic64(c).m+1)/2)]) (Rsh64Ux64 x (Const64 [1]))) (Const64 [umagic64(c).s-2])) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(umagicOK64(c) && config.RegSize == 8 && c&1 == 0 && config.useHmul) { + break + } + v.reset(OpRsh64Ux64) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v1.AuxInt = int64ToAuxInt(int64(1<<63 + (umagic64(c).m+1)/2)) + v2 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(1) + v2.AddArg2(x, v3) + v0.AddArg2(v1, v2) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(umagic64(c).s - 2) + v.AddArg2(v0, v4) + return true + } + // match: (Div64u x (Const64 [c])) + // cond: umagicOK64(c) && config.RegSize == 8 && config.useAvg && config.useHmul + // result: (Rsh64Ux64 (Avg64u x (Hmul64u (Const64 [int64(umagic64(c).m)]) x)) (Const64 [umagic64(c).s-1])) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(umagicOK64(c) && config.RegSize == 8 && config.useAvg && config.useHmul) { + break + } + v.reset(OpRsh64Ux64) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAvg64u, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2.AuxInt = int64ToAuxInt(int64(umagic64(c).m)) + v1.AddArg2(v2, x) + v0.AddArg2(x, v1) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(umagic64(c).s - 1) + v.AddArg2(v0, v3) + return true + } + return false +} +func rewriteValuegeneric_OpDiv8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8 (Const8 [c]) (Const8 [d])) + // cond: d != 0 + // result: (Const8 [c/d]) + for { + if v_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpConst8 { + break + } + d := auxIntToInt8(v_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(c / d) + return true + } + // match: (Div8 n (Const8 [c])) + // cond: isNonNegative(n) && isPowerOfTwo8(c) + // result: (Rsh8Ux64 n (Const64 [log8(c)])) + for { + n := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + if !(isNonNegative(n) && isPowerOfTwo8(c)) { + break + } + v.reset(OpRsh8Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = int64ToAuxInt(log8(c)) + v.AddArg2(n, v0) + return true + } + // match: (Div8 n (Const8 [c])) + // cond: c < 0 && c != -1<<7 + // result: (Neg8 (Div8 n (Const8 [-c]))) + for { + t := v.Type + n := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + if !(c < 0 && c != -1<<7) { + break + } + v.reset(OpNeg8) + v0 := b.NewValue0(v.Pos, OpDiv8, t) + v1 := b.NewValue0(v.Pos, OpConst8, t) + v1.AuxInt = int8ToAuxInt(-c) + v0.AddArg2(n, v1) + v.AddArg(v0) + return true + } + // match: (Div8 x (Const8 [-1<<7 ])) + // result: (Rsh8Ux64 (And8 x (Neg8 x)) (Const64 [7 ])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != -1<<7 { + break + } + v.reset(OpRsh8Ux64) + v0 := b.NewValue0(v.Pos, OpAnd8, t) + v1 := b.NewValue0(v.Pos, OpNeg8, t) + v1.AddArg(x) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2.AuxInt = int64ToAuxInt(7) + v.AddArg2(v0, v2) + return true + } + // match: (Div8 n (Const8 [c])) + // cond: isPowerOfTwo8(c) + // result: (Rsh8x64 (Add8 n (Rsh8Ux64 (Rsh8x64 n (Const64 [ 7])) (Const64 [int64( 8-log8(c))]))) (Const64 [int64(log8(c))])) + for { + t := v.Type + n := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + if !(isPowerOfTwo8(c)) { + break + } + v.reset(OpRsh8x64) + v0 := b.NewValue0(v.Pos, OpAdd8, t) + v1 := b.NewValue0(v.Pos, OpRsh8Ux64, t) + v2 := b.NewValue0(v.Pos, OpRsh8x64, t) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(7) + v2.AddArg2(n, v3) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(int64(8 - log8(c))) + v1.AddArg2(v2, v4) + v0.AddArg2(n, v1) + v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v5.AuxInt = int64ToAuxInt(int64(log8(c))) + v.AddArg2(v0, v5) + return true + } + // match: (Div8 x (Const8 [c])) + // cond: smagicOK8(c) + // result: (Sub8 (Rsh32x64 (Mul32 (Const32 [int32(smagic8(c).m)]) (SignExt8to32 x)) (Const64 [8+smagic8(c).s])) (Rsh32x64 (SignExt8to32 x) (Const64 [31]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + if !(smagicOK8(c)) { + break + } + v.reset(OpSub8) + v.Type = t + v0 := b.NewValue0(v.Pos, OpRsh32x64, t) + v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v2.AuxInt = int32ToAuxInt(int32(smagic8(c).m)) + v3 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v3.AddArg(x) + v1.AddArg2(v2, v3) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(8 + smagic8(c).s) + v0.AddArg2(v1, v4) + v5 := b.NewValue0(v.Pos, OpRsh32x64, t) + v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v6.AuxInt = int64ToAuxInt(31) + v5.AddArg2(v3, v6) + v.AddArg2(v0, v5) + return true + } + return false +} +func rewriteValuegeneric_OpDiv8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8u (Const8 [c]) (Const8 [d])) + // cond: d != 0 + // result: (Const8 [int8(uint8(c)/uint8(d))]) + for { + if v_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpConst8 { + break + } + d := auxIntToInt8(v_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(int8(uint8(c) / uint8(d))) + return true + } + // match: (Div8u n (Const8 [c])) + // cond: isPowerOfTwo8(c) + // result: (Rsh8Ux64 n (Const64 [log8(c)])) + for { + n := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + if !(isPowerOfTwo8(c)) { + break + } + v.reset(OpRsh8Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = int64ToAuxInt(log8(c)) + v.AddArg2(n, v0) + return true + } + // match: (Div8u x (Const8 [c])) + // cond: umagicOK8(c) + // result: (Trunc32to8 (Rsh32Ux64 (Mul32 (Const32 [int32(1<<8+umagic8(c).m)]) (ZeroExt8to32 x)) (Const64 [8+umagic8(c).s]))) + for { + x := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + if !(umagicOK8(c)) { + break + } + v.reset(OpTrunc32to8) + v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v2.AuxInt = int32ToAuxInt(int32(1<<8 + umagic8(c).m)) + v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v3.AddArg(x) + v1.AddArg2(v2, v3) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(8 + umagic8(c).s) + v0.AddArg2(v1, v4) + v.AddArg(v0) + return true + } + return false +} +func rewriteValuegeneric_OpEq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Eq16 x x) + // result: (ConstBool [true]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (Eq16 (Const16 [c]) (Add16 (Const16 [d]) x)) + // result: (Eq16 (Const16 [c-d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 { + continue + } + t := v_0.Type + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpAdd16 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst16 || v_1_0.Type != t { + continue + } + d := auxIntToInt16(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpEq16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(c - d) + v.AddArg2(v0, x) + return true + } + } + break + } + // match: (Eq16 (Const16 [c]) (Const16 [d])) + // result: (ConstBool [c == d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c == d) + return true + } + break + } + // match: (Eq16 (Mod16u x (Const16 [c])) (Const16 [0])) + // cond: x.Op != OpConst16 && udivisibleOK16(c) && !hasSmallRotate(config) + // result: (Eq32 (Mod32u (ZeroExt16to32 x) (Const32 [int32(uint16(c))])) (Const32 [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpMod16u { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0_1.AuxInt) + if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != 0 || !(x.Op != OpConst16 && udivisibleOK16(c) && !hasSmallRotate(config)) { + continue + } + v.reset(OpEq32) + v0 := b.NewValue0(v.Pos, OpMod32u, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v2.AuxInt = int32ToAuxInt(int32(uint16(c))) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int32ToAuxInt(0) + v.AddArg2(v0, v3) + return true + } + break + } + // match: (Eq16 (Mod16 x (Const16 [c])) (Const16 [0])) + // cond: x.Op != OpConst16 && sdivisibleOK16(c) && !hasSmallRotate(config) + // result: (Eq32 (Mod32 (SignExt16to32 x) (Const32 [int32(c)])) (Const32 [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpMod16 { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0_1.AuxInt) + if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != 0 || !(x.Op != OpConst16 && sdivisibleOK16(c) && !hasSmallRotate(config)) { + continue + } + v.reset(OpEq32) + v0 := b.NewValue0(v.Pos, OpMod32, typ.Int32) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpConst32, typ.Int32) + v2.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpConst32, typ.Int32) + v3.AuxInt = int32ToAuxInt(0) + v.AddArg2(v0, v3) + return true + } + break + } + // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc64to16 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (ZeroExt16to64 x)) (Const64 [s]))) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic16(c).m) && s == 16+umagic16(c).s && x.Op != OpConst16 && udivisibleOK16(c) + // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int16(udivisible16(c).m)]) x) (Const16 [int16(16-udivisible16(c).k)]) ) (Const16 [int16(udivisible16(c).max)]) ) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul16 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_0.AuxInt) + if v_1_1.Op != OpTrunc64to16 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh64Ux64 { + continue + } + _ = v_1_1_0.Args[1] + mul := v_1_1_0.Args[0] + if mul.Op != OpMul64 { + continue + } + _ = mul.Args[1] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { + if mul_0.Op != OpConst64 { + continue + } + m := auxIntToInt64(mul_0.AuxInt) + if mul_1.Op != OpZeroExt16to64 || x != mul_1.Args[0] { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := auxIntToInt64(v_1_1_0_1.AuxInt) + if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic16(c).m) && s == 16+umagic16(c).s && x.Op != OpConst16 && udivisibleOK16(c)) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) + v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) + v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v2.AuxInt = int16ToAuxInt(int16(udivisible16(c).m)) + v1.AddArg2(v2, x) + v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v3.AuxInt = int16ToAuxInt(int16(16 - udivisible16(c).k)) + v0.AddArg2(v1, v3) + v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v4.AuxInt = int16ToAuxInt(int16(udivisible16(c).max)) + v.AddArg2(v0, v4) + return true + } + } + } + break + } + // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (ZeroExt16to32 x)) (Const64 [s]))) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<15+umagic16(c).m/2) && s == 16+umagic16(c).s-1 && x.Op != OpConst16 && udivisibleOK16(c) + // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int16(udivisible16(c).m)]) x) (Const16 [int16(16-udivisible16(c).k)]) ) (Const16 [int16(udivisible16(c).max)]) ) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul16 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_0.AuxInt) + if v_1_1.Op != OpTrunc32to16 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh32Ux64 { + continue + } + _ = v_1_1_0.Args[1] + mul := v_1_1_0.Args[0] + if mul.Op != OpMul32 { + continue + } + _ = mul.Args[1] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { + if mul_0.Op != OpConst32 { + continue + } + m := auxIntToInt32(mul_0.AuxInt) + if mul_1.Op != OpZeroExt16to32 || x != mul_1.Args[0] { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := auxIntToInt64(v_1_1_0_1.AuxInt) + if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<15+umagic16(c).m/2) && s == 16+umagic16(c).s-1 && x.Op != OpConst16 && udivisibleOK16(c)) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) + v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) + v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v2.AuxInt = int16ToAuxInt(int16(udivisible16(c).m)) + v1.AddArg2(v2, x) + v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v3.AuxInt = int16ToAuxInt(int16(16 - udivisible16(c).k)) + v0.AddArg2(v1, v3) + v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v4.AuxInt = int16ToAuxInt(int16(udivisible16(c).max)) + v.AddArg2(v0, v4) + return true + } + } + } + break + } + // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (Rsh32Ux64 (ZeroExt16to32 x) (Const64 [1]))) (Const64 [s]))) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<15+(umagic16(c).m+1)/2) && s == 16+umagic16(c).s-2 && x.Op != OpConst16 && udivisibleOK16(c) + // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int16(udivisible16(c).m)]) x) (Const16 [int16(16-udivisible16(c).k)]) ) (Const16 [int16(udivisible16(c).max)]) ) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul16 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_0.AuxInt) + if v_1_1.Op != OpTrunc32to16 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh32Ux64 { + continue + } + _ = v_1_1_0.Args[1] + mul := v_1_1_0.Args[0] + if mul.Op != OpMul32 { + continue + } + _ = mul.Args[1] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { + if mul_0.Op != OpConst32 { + continue + } + m := auxIntToInt32(mul_0.AuxInt) + if mul_1.Op != OpRsh32Ux64 { + continue + } + _ = mul_1.Args[1] + mul_1_0 := mul_1.Args[0] + if mul_1_0.Op != OpZeroExt16to32 || x != mul_1_0.Args[0] { + continue + } + mul_1_1 := mul_1.Args[1] + if mul_1_1.Op != OpConst64 || auxIntToInt64(mul_1_1.AuxInt) != 1 { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := auxIntToInt64(v_1_1_0_1.AuxInt) + if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<15+(umagic16(c).m+1)/2) && s == 16+umagic16(c).s-2 && x.Op != OpConst16 && udivisibleOK16(c)) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) + v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) + v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v2.AuxInt = int16ToAuxInt(int16(udivisible16(c).m)) + v1.AddArg2(v2, x) + v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v3.AuxInt = int16ToAuxInt(int16(16 - udivisible16(c).k)) + v0.AddArg2(v1, v3) + v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v4.AuxInt = int16ToAuxInt(int16(udivisible16(c).max)) + v.AddArg2(v0, v4) + return true + } + } + } + break + } + // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 (Avg32u (Lsh32x64 (ZeroExt16to32 x) (Const64 [16])) mul:(Mul32 (Const32 [m]) (ZeroExt16to32 x))) (Const64 [s]))) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(umagic16(c).m) && s == 16+umagic16(c).s-1 && x.Op != OpConst16 && udivisibleOK16(c) + // result: (Leq16U (RotateLeft16 (Mul16 (Const16 [int16(udivisible16(c).m)]) x) (Const16 [int16(16-udivisible16(c).k)]) ) (Const16 [int16(udivisible16(c).max)]) ) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul16 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_0.AuxInt) + if v_1_1.Op != OpTrunc32to16 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh32Ux64 { + continue + } + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpAvg32u { + continue + } + _ = v_1_1_0_0.Args[1] + v_1_1_0_0_0 := v_1_1_0_0.Args[0] + if v_1_1_0_0_0.Op != OpLsh32x64 { + continue + } + _ = v_1_1_0_0_0.Args[1] + v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] + if v_1_1_0_0_0_0.Op != OpZeroExt16to32 || x != v_1_1_0_0_0_0.Args[0] { + continue + } + v_1_1_0_0_0_1 := v_1_1_0_0_0.Args[1] + if v_1_1_0_0_0_1.Op != OpConst64 || auxIntToInt64(v_1_1_0_0_0_1.AuxInt) != 16 { + continue + } + mul := v_1_1_0_0.Args[1] + if mul.Op != OpMul32 { + continue + } + _ = mul.Args[1] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { + if mul_0.Op != OpConst32 { + continue + } + m := auxIntToInt32(mul_0.AuxInt) + if mul_1.Op != OpZeroExt16to32 || x != mul_1.Args[0] { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := auxIntToInt64(v_1_1_0_1.AuxInt) + if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(umagic16(c).m) && s == 16+umagic16(c).s-1 && x.Op != OpConst16 && udivisibleOK16(c)) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) + v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) + v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v2.AuxInt = int16ToAuxInt(int16(udivisible16(c).m)) + v1.AddArg2(v2, x) + v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v3.AuxInt = int16ToAuxInt(int16(16 - udivisible16(c).k)) + v0.AddArg2(v1, v3) + v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v4.AuxInt = int16ToAuxInt(int16(udivisible16(c).max)) + v.AddArg2(v0, v4) + return true + } + } + } + break + } + // match: (Eq16 x (Mul16 (Const16 [c]) (Sub16 (Rsh32x64 mul:(Mul32 (Const32 [m]) (SignExt16to32 x)) (Const64 [s])) (Rsh32x64 (SignExt16to32 x) (Const64 [31]))) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic16(c).m) && s == 16+smagic16(c).s && x.Op != OpConst16 && sdivisibleOK16(c) + // result: (Leq16U (RotateLeft16 (Add16 (Mul16 (Const16 [int16(sdivisible16(c).m)]) x) (Const16 [int16(sdivisible16(c).a)]) ) (Const16 [int16(16-sdivisible16(c).k)]) ) (Const16 [int16(sdivisible16(c).max)]) ) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul16 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_0.AuxInt) + if v_1_1.Op != OpSub16 { + continue + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh32x64 { + continue + } + _ = v_1_1_0.Args[1] + mul := v_1_1_0.Args[0] + if mul.Op != OpMul32 { + continue + } + _ = mul.Args[1] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { + if mul_0.Op != OpConst32 { + continue + } + m := auxIntToInt32(mul_0.AuxInt) + if mul_1.Op != OpSignExt16to32 || x != mul_1.Args[0] { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := auxIntToInt64(v_1_1_0_1.AuxInt) + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpRsh32x64 { + continue + } + _ = v_1_1_1.Args[1] + v_1_1_1_0 := v_1_1_1.Args[0] + if v_1_1_1_0.Op != OpSignExt16to32 || x != v_1_1_1_0.Args[0] { + continue + } + v_1_1_1_1 := v_1_1_1.Args[1] + if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic16(c).m) && s == 16+smagic16(c).s && x.Op != OpConst16 && sdivisibleOK16(c)) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16) + v1 := b.NewValue0(v.Pos, OpAdd16, typ.UInt16) + v2 := b.NewValue0(v.Pos, OpMul16, typ.UInt16) + v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v3.AuxInt = int16ToAuxInt(int16(sdivisible16(c).m)) + v2.AddArg2(v3, x) + v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v4.AuxInt = int16ToAuxInt(int16(sdivisible16(c).a)) + v1.AddArg2(v2, v4) + v5 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v5.AuxInt = int16ToAuxInt(int16(16 - sdivisible16(c).k)) + v0.AddArg2(v1, v5) + v6 := b.NewValue0(v.Pos, OpConst16, typ.UInt16) + v6.AuxInt = int16ToAuxInt(int16(sdivisible16(c).max)) + v.AddArg2(v0, v6) + return true + } + } + } + break + } + // match: (Eq16 n (Lsh16x64 (Rsh16x64 (Add16 n (Rsh16Ux64 (Rsh16x64 n (Const64 [15])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) ) + // cond: k > 0 && k < 15 && kbar == 16 - k + // result: (Eq16 (And16 n (Const16 [1< [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 + if v_1.Op != OpLsh16x64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpRsh16x64 { + continue + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAdd16 { + continue + } + t := v_1_0_0.Type + _ = v_1_0_0.Args[1] + v_1_0_0_0 := v_1_0_0.Args[0] + v_1_0_0_1 := v_1_0_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 { + if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh16Ux64 || v_1_0_0_1.Type != t { + continue + } + _ = v_1_0_0_1.Args[1] + v_1_0_0_1_0 := v_1_0_0_1.Args[0] + if v_1_0_0_1_0.Op != OpRsh16x64 || v_1_0_0_1_0.Type != t { + continue + } + _ = v_1_0_0_1_0.Args[1] + if n != v_1_0_0_1_0.Args[0] { + continue + } + v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1] + if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 15 { + continue + } + v_1_0_0_1_1 := v_1_0_0_1.Args[1] + if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 { + continue + } + kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt) + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { + continue + } + k := auxIntToInt64(v_1_0_1.AuxInt) + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 15 && kbar == 16-k) { + continue + } + v.reset(OpEq16) + v0 := b.NewValue0(v.Pos, OpAnd16, t) + v1 := b.NewValue0(v.Pos, OpConst16, t) + v1.AuxInt = int16ToAuxInt(1< x (Const16 [y])) (Const16 [y])) + // cond: oneBit16(y) + // result: (Neq16 (And16 x (Const16 [y])) (Const16 [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAnd16 { + continue + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpConst16 || v_0_1.Type != t { + continue + } + y := auxIntToInt16(v_0_1.AuxInt) + if v_1.Op != OpConst16 || v_1.Type != t || auxIntToInt16(v_1.AuxInt) != y || !(oneBit16(y)) { + continue + } + v.reset(OpNeq16) + v0 := b.NewValue0(v.Pos, OpAnd16, t) + v1 := b.NewValue0(v.Pos, OpConst16, t) + v1.AuxInt = int16ToAuxInt(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst16, t) + v2.AuxInt = int16ToAuxInt(0) + v.AddArg2(v0, v2) + return true + } + } + break + } + return false +} +func rewriteValuegeneric_OpEq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq32 x x) + // result: (ConstBool [true]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (Eq32 (Const32 [c]) (Add32 (Const32 [d]) x)) + // result: (Eq32 (Const32 [c-d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 { + continue + } + t := v_0.Type + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpAdd32 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst32 || v_1_0.Type != t { + continue + } + d := auxIntToInt32(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpEq32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(c - d) + v.AddArg2(v0, x) + return true + } + } + break + } + // match: (Eq32 (Const32 [c]) (Const32 [d])) + // result: (ConstBool [c == d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c == d) + return true + } + break + } + // match: (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 mul:(Hmul32u (Const32 [m]) x) (Const64 [s])) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<31+umagic32(c).m/2) && s == umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c) + // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int32(udivisible32(c).m)]) x) (Const32 [int32(32-udivisible32(c).k)]) ) (Const32 [int32(udivisible32(c).max)]) ) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul32 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_0.AuxInt) + if v_1_1.Op != OpRsh32Ux64 { + continue + } + _ = v_1_1.Args[1] + mul := v_1_1.Args[0] + if mul.Op != OpHmul32u { + continue + } + _ = mul.Args[1] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { + if mul_0.Op != OpConst32 { + continue + } + m := auxIntToInt32(mul_0.AuxInt) + if x != mul_1 { + continue + } + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpConst64 { + continue + } + s := auxIntToInt64(v_1_1_1.AuxInt) + if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<31+umagic32(c).m/2) && s == umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m)) + v1.AddArg2(v2, x) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k)) + v0.AddArg2(v1, v3) + v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max)) + v.AddArg2(v0, v4) + return true + } + } + } + break + } + // match: (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 mul:(Hmul32u (Const32 [m]) (Rsh32Ux64 x (Const64 [1]))) (Const64 [s])) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<31+(umagic32(c).m+1)/2) && s == umagic32(c).s-2 && x.Op != OpConst32 && udivisibleOK32(c) + // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int32(udivisible32(c).m)]) x) (Const32 [int32(32-udivisible32(c).k)]) ) (Const32 [int32(udivisible32(c).max)]) ) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul32 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_0.AuxInt) + if v_1_1.Op != OpRsh32Ux64 { + continue + } + _ = v_1_1.Args[1] + mul := v_1_1.Args[0] + if mul.Op != OpHmul32u { + continue + } + _ = mul.Args[1] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { + if mul_0.Op != OpConst32 || mul_0.Type != typ.UInt32 { + continue + } + m := auxIntToInt32(mul_0.AuxInt) + if mul_1.Op != OpRsh32Ux64 { + continue + } + _ = mul_1.Args[1] + if x != mul_1.Args[0] { + continue + } + mul_1_1 := mul_1.Args[1] + if mul_1_1.Op != OpConst64 || auxIntToInt64(mul_1_1.AuxInt) != 1 { + continue + } + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpConst64 { + continue + } + s := auxIntToInt64(v_1_1_1.AuxInt) + if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<31+(umagic32(c).m+1)/2) && s == umagic32(c).s-2 && x.Op != OpConst32 && udivisibleOK32(c)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m)) + v1.AddArg2(v2, x) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k)) + v0.AddArg2(v1, v3) + v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max)) + v.AddArg2(v0, v4) + return true + } + } + } + break + } + // match: (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 (Avg32u x mul:(Hmul32u (Const32 [m]) x)) (Const64 [s])) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(umagic32(c).m) && s == umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c) + // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int32(udivisible32(c).m)]) x) (Const32 [int32(32-udivisible32(c).k)]) ) (Const32 [int32(udivisible32(c).max)]) ) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul32 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_0.AuxInt) + if v_1_1.Op != OpRsh32Ux64 { + continue + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpAvg32u { + continue + } + _ = v_1_1_0.Args[1] + if x != v_1_1_0.Args[0] { + continue + } + mul := v_1_1_0.Args[1] + if mul.Op != OpHmul32u { + continue + } + _ = mul.Args[1] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { + if mul_0.Op != OpConst32 { + continue + } + m := auxIntToInt32(mul_0.AuxInt) + if x != mul_1 { + continue + } + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpConst64 { + continue + } + s := auxIntToInt64(v_1_1_1.AuxInt) + if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(umagic32(c).m) && s == umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m)) + v1.AddArg2(v2, x) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k)) + v0.AddArg2(v1, v3) + v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max)) + v.AddArg2(v0, v4) + return true + } + } + } + break + } + // match: (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (ZeroExt32to64 x)) (Const64 [s]))) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic32(c).m/2) && s == 32+umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c) + // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int32(udivisible32(c).m)]) x) (Const32 [int32(32-udivisible32(c).k)]) ) (Const32 [int32(udivisible32(c).max)]) ) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul32 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_0.AuxInt) + if v_1_1.Op != OpTrunc64to32 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh64Ux64 { + continue + } + _ = v_1_1_0.Args[1] + mul := v_1_1_0.Args[0] + if mul.Op != OpMul64 { + continue + } + _ = mul.Args[1] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { + if mul_0.Op != OpConst64 { + continue + } + m := auxIntToInt64(mul_0.AuxInt) + if mul_1.Op != OpZeroExt32to64 || x != mul_1.Args[0] { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := auxIntToInt64(v_1_1_0_1.AuxInt) + if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic32(c).m/2) && s == 32+umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m)) + v1.AddArg2(v2, x) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k)) + v0.AddArg2(v1, v3) + v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max)) + v.AddArg2(v0, v4) + return true + } + } + } + break + } + // match: (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1]))) (Const64 [s]))) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic32(c).m+1)/2) && s == 32+umagic32(c).s-2 && x.Op != OpConst32 && udivisibleOK32(c) + // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int32(udivisible32(c).m)]) x) (Const32 [int32(32-udivisible32(c).k)]) ) (Const32 [int32(udivisible32(c).max)]) ) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul32 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_0.AuxInt) + if v_1_1.Op != OpTrunc64to32 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh64Ux64 { + continue + } + _ = v_1_1_0.Args[1] + mul := v_1_1_0.Args[0] + if mul.Op != OpMul64 { + continue + } + _ = mul.Args[1] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { + if mul_0.Op != OpConst64 { + continue + } + m := auxIntToInt64(mul_0.AuxInt) + if mul_1.Op != OpRsh64Ux64 { + continue + } + _ = mul_1.Args[1] + mul_1_0 := mul_1.Args[0] + if mul_1_0.Op != OpZeroExt32to64 || x != mul_1_0.Args[0] { + continue + } + mul_1_1 := mul_1.Args[1] + if mul_1_1.Op != OpConst64 || auxIntToInt64(mul_1_1.AuxInt) != 1 { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := auxIntToInt64(v_1_1_0_1.AuxInt) + if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic32(c).m+1)/2) && s == 32+umagic32(c).s-2 && x.Op != OpConst32 && udivisibleOK32(c)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m)) + v1.AddArg2(v2, x) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k)) + v0.AddArg2(v1, v3) + v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max)) + v.AddArg2(v0, v4) + return true + } + } + } + break + } + // match: (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 (Avg64u (Lsh64x64 (ZeroExt32to64 x) (Const64 [32])) mul:(Mul64 (Const64 [m]) (ZeroExt32to64 x))) (Const64 [s]))) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic32(c).m) && s == 32+umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c) + // result: (Leq32U (RotateLeft32 (Mul32 (Const32 [int32(udivisible32(c).m)]) x) (Const32 [int32(32-udivisible32(c).k)]) ) (Const32 [int32(udivisible32(c).max)]) ) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul32 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_0.AuxInt) + if v_1_1.Op != OpTrunc64to32 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh64Ux64 { + continue + } + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpAvg64u { + continue + } + _ = v_1_1_0_0.Args[1] + v_1_1_0_0_0 := v_1_1_0_0.Args[0] + if v_1_1_0_0_0.Op != OpLsh64x64 { + continue + } + _ = v_1_1_0_0_0.Args[1] + v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] + if v_1_1_0_0_0_0.Op != OpZeroExt32to64 || x != v_1_1_0_0_0_0.Args[0] { + continue + } + v_1_1_0_0_0_1 := v_1_1_0_0_0.Args[1] + if v_1_1_0_0_0_1.Op != OpConst64 || auxIntToInt64(v_1_1_0_0_0_1.AuxInt) != 32 { + continue + } + mul := v_1_1_0_0.Args[1] + if mul.Op != OpMul64 { + continue + } + _ = mul.Args[1] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { + if mul_0.Op != OpConst64 { + continue + } + m := auxIntToInt64(mul_0.AuxInt) + if mul_1.Op != OpZeroExt32to64 || x != mul_1.Args[0] { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := auxIntToInt64(v_1_1_0_1.AuxInt) + if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic32(c).m) && s == 32+umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m)) + v1.AddArg2(v2, x) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k)) + v0.AddArg2(v1, v3) + v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max)) + v.AddArg2(v0, v4) + return true + } + } + } + break + } + // match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh64x64 mul:(Mul64 (Const64 [m]) (SignExt32to64 x)) (Const64 [s])) (Rsh64x64 (SignExt32to64 x) (Const64 [63]))) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic32(c).m) && s == 32+smagic32(c).s && x.Op != OpConst32 && sdivisibleOK32(c) + // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int32(sdivisible32(c).m)]) x) (Const32 [int32(sdivisible32(c).a)]) ) (Const32 [int32(32-sdivisible32(c).k)]) ) (Const32 [int32(sdivisible32(c).max)]) ) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul32 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_0.AuxInt) + if v_1_1.Op != OpSub32 { + continue + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh64x64 { + continue + } + _ = v_1_1_0.Args[1] + mul := v_1_1_0.Args[0] + if mul.Op != OpMul64 { + continue + } + _ = mul.Args[1] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { + if mul_0.Op != OpConst64 { + continue + } + m := auxIntToInt64(mul_0.AuxInt) + if mul_1.Op != OpSignExt32to64 || x != mul_1.Args[0] { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := auxIntToInt64(v_1_1_0_1.AuxInt) + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpRsh64x64 { + continue + } + _ = v_1_1_1.Args[1] + v_1_1_1_0 := v_1_1_1.Args[0] + if v_1_1_1_0.Op != OpSignExt32to64 || x != v_1_1_1_0.Args[0] { + continue + } + v_1_1_1_1 := v_1_1_1.Args[1] + if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic32(c).m) && s == 32+smagic32(c).s && x.Op != OpConst32 && sdivisibleOK32(c)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int32ToAuxInt(int32(sdivisible32(c).m)) + v2.AddArg2(v3, x) + v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v4.AuxInt = int32ToAuxInt(int32(sdivisible32(c).a)) + v1.AddArg2(v2, v4) + v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v5.AuxInt = int32ToAuxInt(int32(32 - sdivisible32(c).k)) + v0.AddArg2(v1, v5) + v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v6.AuxInt = int32ToAuxInt(int32(sdivisible32(c).max)) + v.AddArg2(v0, v6) + return true + } + } + } + break + } + // match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 mul:(Hmul32 (Const32 [m]) x) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic32(c).m/2) && s == smagic32(c).s-1 && x.Op != OpConst32 && sdivisibleOK32(c) + // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int32(sdivisible32(c).m)]) x) (Const32 [int32(sdivisible32(c).a)]) ) (Const32 [int32(32-sdivisible32(c).k)]) ) (Const32 [int32(sdivisible32(c).max)]) ) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul32 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_0.AuxInt) + if v_1_1.Op != OpSub32 { + continue + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh32x64 { + continue + } + _ = v_1_1_0.Args[1] + mul := v_1_1_0.Args[0] + if mul.Op != OpHmul32 { + continue + } + _ = mul.Args[1] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { + if mul_0.Op != OpConst32 { + continue + } + m := auxIntToInt32(mul_0.AuxInt) + if x != mul_1 { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := auxIntToInt64(v_1_1_0_1.AuxInt) + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpRsh32x64 { + continue + } + _ = v_1_1_1.Args[1] + if x != v_1_1_1.Args[0] { + continue + } + v_1_1_1_1 := v_1_1_1.Args[1] + if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic32(c).m/2) && s == smagic32(c).s-1 && x.Op != OpConst32 && sdivisibleOK32(c)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int32ToAuxInt(int32(sdivisible32(c).m)) + v2.AddArg2(v3, x) + v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v4.AuxInt = int32ToAuxInt(int32(sdivisible32(c).a)) + v1.AddArg2(v2, v4) + v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v5.AuxInt = int32ToAuxInt(int32(32 - sdivisible32(c).k)) + v0.AddArg2(v1, v5) + v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v6.AuxInt = int32ToAuxInt(int32(sdivisible32(c).max)) + v.AddArg2(v0, v6) + return true + } + } + } + break + } + // match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 (Add32 mul:(Hmul32 (Const32 [m]) x) x) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic32(c).m) && s == smagic32(c).s && x.Op != OpConst32 && sdivisibleOK32(c) + // result: (Leq32U (RotateLeft32 (Add32 (Mul32 (Const32 [int32(sdivisible32(c).m)]) x) (Const32 [int32(sdivisible32(c).a)]) ) (Const32 [int32(32-sdivisible32(c).k)]) ) (Const32 [int32(sdivisible32(c).max)]) ) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul32 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_0.AuxInt) + if v_1_1.Op != OpSub32 { + continue + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh32x64 { + continue + } + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpAdd32 { + continue + } + _ = v_1_1_0_0.Args[1] + v_1_1_0_0_0 := v_1_1_0_0.Args[0] + v_1_1_0_0_1 := v_1_1_0_0.Args[1] + for _i2 := 0; _i2 <= 1; _i2, v_1_1_0_0_0, v_1_1_0_0_1 = _i2+1, v_1_1_0_0_1, v_1_1_0_0_0 { + mul := v_1_1_0_0_0 + if mul.Op != OpHmul32 { + continue + } + _ = mul.Args[1] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i3 := 0; _i3 <= 1; _i3, mul_0, mul_1 = _i3+1, mul_1, mul_0 { + if mul_0.Op != OpConst32 { + continue + } + m := auxIntToInt32(mul_0.AuxInt) + if x != mul_1 || x != v_1_1_0_0_1 { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := auxIntToInt64(v_1_1_0_1.AuxInt) + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpRsh32x64 { + continue + } + _ = v_1_1_1.Args[1] + if x != v_1_1_1.Args[0] { + continue + } + v_1_1_1_1 := v_1_1_1.Args[1] + if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic32(c).m) && s == smagic32(c).s && x.Op != OpConst32 && sdivisibleOK32(c)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int32ToAuxInt(int32(sdivisible32(c).m)) + v2.AddArg2(v3, x) + v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v4.AuxInt = int32ToAuxInt(int32(sdivisible32(c).a)) + v1.AddArg2(v2, v4) + v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v5.AuxInt = int32ToAuxInt(int32(32 - sdivisible32(c).k)) + v0.AddArg2(v1, v5) + v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v6.AuxInt = int32ToAuxInt(int32(sdivisible32(c).max)) + v.AddArg2(v0, v6) + return true + } + } + } + } + break + } + // match: (Eq32 n (Lsh32x64 (Rsh32x64 (Add32 n (Rsh32Ux64 (Rsh32x64 n (Const64 [31])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) ) + // cond: k > 0 && k < 31 && kbar == 32 - k + // result: (Eq32 (And32 n (Const32 [1< [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 + if v_1.Op != OpLsh32x64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpRsh32x64 { + continue + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAdd32 { + continue + } + t := v_1_0_0.Type + _ = v_1_0_0.Args[1] + v_1_0_0_0 := v_1_0_0.Args[0] + v_1_0_0_1 := v_1_0_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 { + if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh32Ux64 || v_1_0_0_1.Type != t { + continue + } + _ = v_1_0_0_1.Args[1] + v_1_0_0_1_0 := v_1_0_0_1.Args[0] + if v_1_0_0_1_0.Op != OpRsh32x64 || v_1_0_0_1_0.Type != t { + continue + } + _ = v_1_0_0_1_0.Args[1] + if n != v_1_0_0_1_0.Args[0] { + continue + } + v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1] + if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 31 { + continue + } + v_1_0_0_1_1 := v_1_0_0_1.Args[1] + if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 { + continue + } + kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt) + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { + continue + } + k := auxIntToInt64(v_1_0_1.AuxInt) + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 31 && kbar == 32-k) { + continue + } + v.reset(OpEq32) + v0 := b.NewValue0(v.Pos, OpAnd32, t) + v1 := b.NewValue0(v.Pos, OpConst32, t) + v1.AuxInt = int32ToAuxInt(1< x (Const32 [y])) (Const32 [y])) + // cond: oneBit32(y) + // result: (Neq32 (And32 x (Const32 [y])) (Const32 [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAnd32 { + continue + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpConst32 || v_0_1.Type != t { + continue + } + y := auxIntToInt32(v_0_1.AuxInt) + if v_1.Op != OpConst32 || v_1.Type != t || auxIntToInt32(v_1.AuxInt) != y || !(oneBit32(y)) { + continue + } + v.reset(OpNeq32) + v0 := b.NewValue0(v.Pos, OpAnd32, t) + v1 := b.NewValue0(v.Pos, OpConst32, t) + v1.AuxInt = int32ToAuxInt(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst32, t) + v2.AuxInt = int32ToAuxInt(0) + v.AddArg2(v0, v2) + return true + } + } + break + } + return false +} +func rewriteValuegeneric_OpEq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Eq32F (Const32F [c]) (Const32F [d])) + // result: (ConstBool [c == d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32F { + continue + } + c := auxIntToFloat32(v_0.AuxInt) + if v_1.Op != OpConst32F { + continue + } + d := auxIntToFloat32(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c == d) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpEq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq64 x x) + // result: (ConstBool [true]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (Eq64 (Const64 [c]) (Add64 (Const64 [d]) x)) + // result: (Eq64 (Const64 [c-d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 { + continue + } + t := v_0.Type + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpAdd64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst64 || v_1_0.Type != t { + continue + } + d := auxIntToInt64(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpEq64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c - d) + v.AddArg2(v0, x) + return true + } + } + break + } + // match: (Eq64 (Const64 [c]) (Const64 [d])) + // result: (ConstBool [c == d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c == d) + return true + } + break + } + // match: (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 mul:(Hmul64u (Const64 [m]) x) (Const64 [s])) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic64(c).m/2) && s == umagic64(c).s-1 && x.Op != OpConst64 && udivisibleOK64(c) + // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible64(c).m)]) x) (Const64 [64-udivisible64(c).k]) ) (Const64 [int64(udivisible64(c).max)]) ) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_0.AuxInt) + if v_1_1.Op != OpRsh64Ux64 { + continue + } + _ = v_1_1.Args[1] + mul := v_1_1.Args[0] + if mul.Op != OpHmul64u { + continue + } + _ = mul.Args[1] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { + if mul_0.Op != OpConst64 { + continue + } + m := auxIntToInt64(mul_0.AuxInt) + if x != mul_1 { + continue + } + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpConst64 { + continue + } + s := auxIntToInt64(v_1_1_1.AuxInt) + if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic64(c).m/2) && s == umagic64(c).s-1 && x.Op != OpConst64 && udivisibleOK64(c)) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2.AuxInt = int64ToAuxInt(int64(udivisible64(c).m)) + v1.AddArg2(v2, x) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(64 - udivisible64(c).k) + v0.AddArg2(v1, v3) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(int64(udivisible64(c).max)) + v.AddArg2(v0, v4) + return true + } + } + } + break + } + // match: (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 mul:(Hmul64u (Const64 [m]) (Rsh64Ux64 x (Const64 [1]))) (Const64 [s])) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic64(c).m+1)/2) && s == umagic64(c).s-2 && x.Op != OpConst64 && udivisibleOK64(c) + // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible64(c).m)]) x) (Const64 [64-udivisible64(c).k]) ) (Const64 [int64(udivisible64(c).max)]) ) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_0.AuxInt) + if v_1_1.Op != OpRsh64Ux64 { + continue + } + _ = v_1_1.Args[1] + mul := v_1_1.Args[0] + if mul.Op != OpHmul64u { + continue + } + _ = mul.Args[1] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { + if mul_0.Op != OpConst64 { + continue + } + m := auxIntToInt64(mul_0.AuxInt) + if mul_1.Op != OpRsh64Ux64 { + continue + } + _ = mul_1.Args[1] + if x != mul_1.Args[0] { + continue + } + mul_1_1 := mul_1.Args[1] + if mul_1_1.Op != OpConst64 || auxIntToInt64(mul_1_1.AuxInt) != 1 { + continue + } + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpConst64 { + continue + } + s := auxIntToInt64(v_1_1_1.AuxInt) + if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic64(c).m+1)/2) && s == umagic64(c).s-2 && x.Op != OpConst64 && udivisibleOK64(c)) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2.AuxInt = int64ToAuxInt(int64(udivisible64(c).m)) + v1.AddArg2(v2, x) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(64 - udivisible64(c).k) + v0.AddArg2(v1, v3) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(int64(udivisible64(c).max)) + v.AddArg2(v0, v4) + return true + } + } + } + break + } + // match: (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 (Avg64u x mul:(Hmul64u (Const64 [m]) x)) (Const64 [s])) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic64(c).m) && s == umagic64(c).s-1 && x.Op != OpConst64 && udivisibleOK64(c) + // result: (Leq64U (RotateLeft64 (Mul64 (Const64 [int64(udivisible64(c).m)]) x) (Const64 [64-udivisible64(c).k]) ) (Const64 [int64(udivisible64(c).max)]) ) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_0.AuxInt) + if v_1_1.Op != OpRsh64Ux64 { + continue + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpAvg64u { + continue + } + _ = v_1_1_0.Args[1] + if x != v_1_1_0.Args[0] { + continue + } + mul := v_1_1_0.Args[1] + if mul.Op != OpHmul64u { + continue + } + _ = mul.Args[1] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { + if mul_0.Op != OpConst64 { + continue + } + m := auxIntToInt64(mul_0.AuxInt) + if x != mul_1 { + continue + } + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpConst64 { + continue + } + s := auxIntToInt64(v_1_1_1.AuxInt) + if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic64(c).m) && s == umagic64(c).s-1 && x.Op != OpConst64 && udivisibleOK64(c)) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2.AuxInt = int64ToAuxInt(int64(udivisible64(c).m)) + v1.AddArg2(v2, x) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(64 - udivisible64(c).k) + v0.AddArg2(v1, v3) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(int64(udivisible64(c).max)) + v.AddArg2(v0, v4) + return true + } + } + } + break + } + // match: (Eq64 x (Mul64 (Const64 [c]) (Sub64 (Rsh64x64 mul:(Hmul64 (Const64 [m]) x) (Const64 [s])) (Rsh64x64 x (Const64 [63]))) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic64(c).m/2) && s == smagic64(c).s-1 && x.Op != OpConst64 && sdivisibleOK64(c) + // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible64(c).m)]) x) (Const64 [int64(sdivisible64(c).a)]) ) (Const64 [64-sdivisible64(c).k]) ) (Const64 [int64(sdivisible64(c).max)]) ) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_0.AuxInt) + if v_1_1.Op != OpSub64 { + continue + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh64x64 { + continue + } + _ = v_1_1_0.Args[1] + mul := v_1_1_0.Args[0] + if mul.Op != OpHmul64 { + continue + } + _ = mul.Args[1] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { + if mul_0.Op != OpConst64 { + continue + } + m := auxIntToInt64(mul_0.AuxInt) + if x != mul_1 { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := auxIntToInt64(v_1_1_0_1.AuxInt) + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpRsh64x64 { + continue + } + _ = v_1_1_1.Args[1] + if x != v_1_1_1.Args[0] { + continue + } + v_1_1_1_1 := v_1_1_1.Args[1] + if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic64(c).m/2) && s == smagic64(c).s-1 && x.Op != OpConst64 && sdivisibleOK64(c)) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(int64(sdivisible64(c).m)) + v2.AddArg2(v3, x) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(int64(sdivisible64(c).a)) + v1.AddArg2(v2, v4) + v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v5.AuxInt = int64ToAuxInt(64 - sdivisible64(c).k) + v0.AddArg2(v1, v5) + v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v6.AuxInt = int64ToAuxInt(int64(sdivisible64(c).max)) + v.AddArg2(v0, v6) + return true + } + } + } + break + } + // match: (Eq64 x (Mul64 (Const64 [c]) (Sub64 (Rsh64x64 (Add64 mul:(Hmul64 (Const64 [m]) x) x) (Const64 [s])) (Rsh64x64 x (Const64 [63]))) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic64(c).m) && s == smagic64(c).s && x.Op != OpConst64 && sdivisibleOK64(c) + // result: (Leq64U (RotateLeft64 (Add64 (Mul64 (Const64 [int64(sdivisible64(c).m)]) x) (Const64 [int64(sdivisible64(c).a)]) ) (Const64 [64-sdivisible64(c).k]) ) (Const64 [int64(sdivisible64(c).max)]) ) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_0.AuxInt) + if v_1_1.Op != OpSub64 { + continue + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh64x64 { + continue + } + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpAdd64 { + continue + } + _ = v_1_1_0_0.Args[1] + v_1_1_0_0_0 := v_1_1_0_0.Args[0] + v_1_1_0_0_1 := v_1_1_0_0.Args[1] + for _i2 := 0; _i2 <= 1; _i2, v_1_1_0_0_0, v_1_1_0_0_1 = _i2+1, v_1_1_0_0_1, v_1_1_0_0_0 { + mul := v_1_1_0_0_0 + if mul.Op != OpHmul64 { + continue + } + _ = mul.Args[1] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i3 := 0; _i3 <= 1; _i3, mul_0, mul_1 = _i3+1, mul_1, mul_0 { + if mul_0.Op != OpConst64 { + continue + } + m := auxIntToInt64(mul_0.AuxInt) + if x != mul_1 || x != v_1_1_0_0_1 { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := auxIntToInt64(v_1_1_0_1.AuxInt) + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpRsh64x64 { + continue + } + _ = v_1_1_1.Args[1] + if x != v_1_1_1.Args[0] { + continue + } + v_1_1_1_1 := v_1_1_1.Args[1] + if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic64(c).m) && s == smagic64(c).s && x.Op != OpConst64 && sdivisibleOK64(c)) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64) + v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3.AuxInt = int64ToAuxInt(int64(sdivisible64(c).m)) + v2.AddArg2(v3, x) + v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4.AuxInt = int64ToAuxInt(int64(sdivisible64(c).a)) + v1.AddArg2(v2, v4) + v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v5.AuxInt = int64ToAuxInt(64 - sdivisible64(c).k) + v0.AddArg2(v1, v5) + v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v6.AuxInt = int64ToAuxInt(int64(sdivisible64(c).max)) + v.AddArg2(v0, v6) + return true + } + } + } + } + break + } + // match: (Eq64 n (Lsh64x64 (Rsh64x64 (Add64 n (Rsh64Ux64 (Rsh64x64 n (Const64 [63])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) ) + // cond: k > 0 && k < 63 && kbar == 64 - k + // result: (Eq64 (And64 n (Const64 [1< [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 + if v_1.Op != OpLsh64x64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpRsh64x64 { + continue + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAdd64 { + continue + } + t := v_1_0_0.Type + _ = v_1_0_0.Args[1] + v_1_0_0_0 := v_1_0_0.Args[0] + v_1_0_0_1 := v_1_0_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 { + if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh64Ux64 || v_1_0_0_1.Type != t { + continue + } + _ = v_1_0_0_1.Args[1] + v_1_0_0_1_0 := v_1_0_0_1.Args[0] + if v_1_0_0_1_0.Op != OpRsh64x64 || v_1_0_0_1_0.Type != t { + continue + } + _ = v_1_0_0_1_0.Args[1] + if n != v_1_0_0_1_0.Args[0] { + continue + } + v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1] + if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 63 { + continue + } + v_1_0_0_1_1 := v_1_0_0_1.Args[1] + if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 { + continue + } + kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt) + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { + continue + } + k := auxIntToInt64(v_1_0_1.AuxInt) + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 63 && kbar == 64-k) { + continue + } + v.reset(OpEq64) + v0 := b.NewValue0(v.Pos, OpAnd64, t) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(1< x (Const64 [y])) (Const64 [y])) + // cond: oneBit64(y) + // result: (Neq64 (And64 x (Const64 [y])) (Const64 [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAnd64 { + continue + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpConst64 || v_0_1.Type != t { + continue + } + y := auxIntToInt64(v_0_1.AuxInt) + if v_1.Op != OpConst64 || v_1.Type != t || auxIntToInt64(v_1.AuxInt) != y || !(oneBit64(y)) { + continue + } + v.reset(OpNeq64) + v0 := b.NewValue0(v.Pos, OpAnd64, t) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, v2) + return true + } + } + break + } + return false +} +func rewriteValuegeneric_OpEq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Eq64F (Const64F [c]) (Const64F [d])) + // result: (ConstBool [c == d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64F { + continue + } + c := auxIntToFloat64(v_0.AuxInt) + if v_1.Op != OpConst64F { + continue + } + d := auxIntToFloat64(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c == d) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpEq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Eq8 x x) + // result: (ConstBool [true]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (Eq8 (Const8 [c]) (Add8 (Const8 [d]) x)) + // result: (Eq8 (Const8 [c-d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 { + continue + } + t := v_0.Type + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpAdd8 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst8 || v_1_0.Type != t { + continue + } + d := auxIntToInt8(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpEq8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(c - d) + v.AddArg2(v0, x) + return true + } + } + break + } + // match: (Eq8 (Const8 [c]) (Const8 [d])) + // result: (ConstBool [c == d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c == d) + return true + } + break + } + // match: (Eq8 (Mod8u x (Const8 [c])) (Const8 [0])) + // cond: x.Op != OpConst8 && udivisibleOK8(c) && !hasSmallRotate(config) + // result: (Eq32 (Mod32u (ZeroExt8to32 x) (Const32 [int32(uint8(c))])) (Const32 [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpMod8u { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0_1.AuxInt) + if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != 0 || !(x.Op != OpConst8 && udivisibleOK8(c) && !hasSmallRotate(config)) { + continue + } + v.reset(OpEq32) + v0 := b.NewValue0(v.Pos, OpMod32u, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v2.AuxInt = int32ToAuxInt(int32(uint8(c))) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32) + v3.AuxInt = int32ToAuxInt(0) + v.AddArg2(v0, v3) + return true + } + break + } + // match: (Eq8 (Mod8 x (Const8 [c])) (Const8 [0])) + // cond: x.Op != OpConst8 && sdivisibleOK8(c) && !hasSmallRotate(config) + // result: (Eq32 (Mod32 (SignExt8to32 x) (Const32 [int32(c)])) (Const32 [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpMod8 { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0_1.AuxInt) + if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != 0 || !(x.Op != OpConst8 && sdivisibleOK8(c) && !hasSmallRotate(config)) { + continue + } + v.reset(OpEq32) + v0 := b.NewValue0(v.Pos, OpMod32, typ.Int32) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpConst32, typ.Int32) + v2.AuxInt = int32ToAuxInt(int32(c)) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpConst32, typ.Int32) + v3.AuxInt = int32ToAuxInt(0) + v.AddArg2(v0, v3) + return true + } + break + } + // match: (Eq8 x (Mul8 (Const8 [c]) (Trunc32to8 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (ZeroExt8to32 x)) (Const64 [s]))) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<8+umagic8(c).m) && s == 8+umagic8(c).s && x.Op != OpConst8 && udivisibleOK8(c) + // result: (Leq8U (RotateLeft8 (Mul8 (Const8 [int8(udivisible8(c).m)]) x) (Const8 [int8(8-udivisible8(c).k)]) ) (Const8 [int8(udivisible8(c).max)]) ) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul8 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_0.AuxInt) + if v_1_1.Op != OpTrunc32to8 { + continue + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh32Ux64 { + continue + } + _ = v_1_1_0.Args[1] + mul := v_1_1_0.Args[0] + if mul.Op != OpMul32 { + continue + } + _ = mul.Args[1] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { + if mul_0.Op != OpConst32 { + continue + } + m := auxIntToInt32(mul_0.AuxInt) + if mul_1.Op != OpZeroExt8to32 || x != mul_1.Args[0] { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := auxIntToInt64(v_1_1_0_1.AuxInt) + if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<8+umagic8(c).m) && s == 8+umagic8(c).s && x.Op != OpConst8 && udivisibleOK8(c)) { + continue + } + v.reset(OpLeq8U) + v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8) + v1 := b.NewValue0(v.Pos, OpMul8, typ.UInt8) + v2 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) + v2.AuxInt = int8ToAuxInt(int8(udivisible8(c).m)) + v1.AddArg2(v2, x) + v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) + v3.AuxInt = int8ToAuxInt(int8(8 - udivisible8(c).k)) + v0.AddArg2(v1, v3) + v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) + v4.AuxInt = int8ToAuxInt(int8(udivisible8(c).max)) + v.AddArg2(v0, v4) + return true + } + } + } + break + } + // match: (Eq8 x (Mul8 (Const8 [c]) (Sub8 (Rsh32x64 mul:(Mul32 (Const32 [m]) (SignExt8to32 x)) (Const64 [s])) (Rsh32x64 (SignExt8to32 x) (Const64 [31]))) ) ) + // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic8(c).m) && s == 8+smagic8(c).s && x.Op != OpConst8 && sdivisibleOK8(c) + // result: (Leq8U (RotateLeft8 (Add8 (Mul8 (Const8 [int8(sdivisible8(c).m)]) x) (Const8 [int8(sdivisible8(c).a)]) ) (Const8 [int8(8-sdivisible8(c).k)]) ) (Const8 [int8(sdivisible8(c).max)]) ) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpMul8 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_0.AuxInt) + if v_1_1.Op != OpSub8 { + continue + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpRsh32x64 { + continue + } + _ = v_1_1_0.Args[1] + mul := v_1_1_0.Args[0] + if mul.Op != OpMul32 { + continue + } + _ = mul.Args[1] + mul_0 := mul.Args[0] + mul_1 := mul.Args[1] + for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 { + if mul_0.Op != OpConst32 { + continue + } + m := auxIntToInt32(mul_0.AuxInt) + if mul_1.Op != OpSignExt8to32 || x != mul_1.Args[0] { + continue + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpConst64 { + continue + } + s := auxIntToInt64(v_1_1_0_1.AuxInt) + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpRsh32x64 { + continue + } + _ = v_1_1_1.Args[1] + v_1_1_1_0 := v_1_1_1.Args[0] + if v_1_1_1_0.Op != OpSignExt8to32 || x != v_1_1_1_0.Args[0] { + continue + } + v_1_1_1_1 := v_1_1_1.Args[1] + if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic8(c).m) && s == 8+smagic8(c).s && x.Op != OpConst8 && sdivisibleOK8(c)) { + continue + } + v.reset(OpLeq8U) + v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8) + v1 := b.NewValue0(v.Pos, OpAdd8, typ.UInt8) + v2 := b.NewValue0(v.Pos, OpMul8, typ.UInt8) + v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) + v3.AuxInt = int8ToAuxInt(int8(sdivisible8(c).m)) + v2.AddArg2(v3, x) + v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) + v4.AuxInt = int8ToAuxInt(int8(sdivisible8(c).a)) + v1.AddArg2(v2, v4) + v5 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) + v5.AuxInt = int8ToAuxInt(int8(8 - sdivisible8(c).k)) + v0.AddArg2(v1, v5) + v6 := b.NewValue0(v.Pos, OpConst8, typ.UInt8) + v6.AuxInt = int8ToAuxInt(int8(sdivisible8(c).max)) + v.AddArg2(v0, v6) + return true + } + } + } + break + } + // match: (Eq8 n (Lsh8x64 (Rsh8x64 (Add8 n (Rsh8Ux64 (Rsh8x64 n (Const64 [ 7])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) ) + // cond: k > 0 && k < 7 && kbar == 8 - k + // result: (Eq8 (And8 n (Const8 [1< [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 + if v_1.Op != OpLsh8x64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpRsh8x64 { + continue + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAdd8 { + continue + } + t := v_1_0_0.Type + _ = v_1_0_0.Args[1] + v_1_0_0_0 := v_1_0_0.Args[0] + v_1_0_0_1 := v_1_0_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 { + if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh8Ux64 || v_1_0_0_1.Type != t { + continue + } + _ = v_1_0_0_1.Args[1] + v_1_0_0_1_0 := v_1_0_0_1.Args[0] + if v_1_0_0_1_0.Op != OpRsh8x64 || v_1_0_0_1_0.Type != t { + continue + } + _ = v_1_0_0_1_0.Args[1] + if n != v_1_0_0_1_0.Args[0] { + continue + } + v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1] + if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 7 { + continue + } + v_1_0_0_1_1 := v_1_0_0_1.Args[1] + if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 { + continue + } + kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt) + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { + continue + } + k := auxIntToInt64(v_1_0_1.AuxInt) + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 7 && kbar == 8-k) { + continue + } + v.reset(OpEq8) + v0 := b.NewValue0(v.Pos, OpAnd8, t) + v1 := b.NewValue0(v.Pos, OpConst8, t) + v1.AuxInt = int8ToAuxInt(1< x (Const8 [y])) (Const8 [y])) + // cond: oneBit8(y) + // result: (Neq8 (And8 x (Const8 [y])) (Const8 [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAnd8 { + continue + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpConst8 || v_0_1.Type != t { + continue + } + y := auxIntToInt8(v_0_1.AuxInt) + if v_1.Op != OpConst8 || v_1.Type != t || auxIntToInt8(v_1.AuxInt) != y || !(oneBit8(y)) { + continue + } + v.reset(OpNeq8) + v0 := b.NewValue0(v.Pos, OpAnd8, t) + v1 := b.NewValue0(v.Pos, OpConst8, t) + v1.AuxInt = int8ToAuxInt(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst8, t) + v2.AuxInt = int8ToAuxInt(0) + v.AddArg2(v0, v2) + return true + } + } + break + } + return false +} +func rewriteValuegeneric_OpEqB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (EqB (ConstBool [c]) (ConstBool [d])) + // result: (ConstBool [c == d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConstBool { + continue + } + c := auxIntToBool(v_0.AuxInt) + if v_1.Op != OpConstBool { + continue + } + d := auxIntToBool(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c == d) + return true + } + break + } + // match: (EqB (ConstBool [false]) x) + // result: (Not x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConstBool || auxIntToBool(v_0.AuxInt) != false { + continue + } + x := v_1 + v.reset(OpNot) + v.AddArg(x) + return true + } + break + } + // match: (EqB (ConstBool [true]) x) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConstBool || auxIntToBool(v_0.AuxInt) != true { + continue + } + x := v_1 + v.copyOf(x) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpEqInter(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqInter x y) + // result: (EqPtr (ITab x) (ITab y)) + for { + x := v_0 + y := v_1 + v.reset(OpEqPtr) + v0 := b.NewValue0(v.Pos, OpITab, typ.Uintptr) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpITab, typ.Uintptr) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValuegeneric_OpEqPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqPtr x x) + // result: (ConstBool [true]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (EqPtr (Addr {x} _) (Addr {y} _)) + // result: (ConstBool [x == y]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAddr { + continue + } + x := auxToSym(v_0.Aux) + if v_1.Op != OpAddr { + continue + } + y := auxToSym(v_1.Aux) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(x == y) + return true + } + break + } + // match: (EqPtr (Addr {x} _) (OffPtr [o] (Addr {y} _))) + // result: (ConstBool [x == y && o == 0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAddr { + continue + } + x := auxToSym(v_0.Aux) + if v_1.Op != OpOffPtr { + continue + } + o := auxIntToInt64(v_1.AuxInt) + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAddr { + continue + } + y := auxToSym(v_1_0.Aux) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(x == y && o == 0) + return true + } + break + } + // match: (EqPtr (OffPtr [o1] (Addr {x} _)) (OffPtr [o2] (Addr {y} _))) + // result: (ConstBool [x == y && o1 == o2]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpOffPtr { + continue + } + o1 := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAddr { + continue + } + x := auxToSym(v_0_0.Aux) + if v_1.Op != OpOffPtr { + continue + } + o2 := auxIntToInt64(v_1.AuxInt) + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAddr { + continue + } + y := auxToSym(v_1_0.Aux) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(x == y && o1 == o2) + return true + } + break + } + // match: (EqPtr (LocalAddr {x} _ _) (LocalAddr {y} _ _)) + // result: (ConstBool [x == y]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLocalAddr { + continue + } + x := auxToSym(v_0.Aux) + if v_1.Op != OpLocalAddr { + continue + } + y := auxToSym(v_1.Aux) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(x == y) + return true + } + break + } + // match: (EqPtr (LocalAddr {x} _ _) (OffPtr [o] (LocalAddr {y} _ _))) + // result: (ConstBool [x == y && o == 0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLocalAddr { + continue + } + x := auxToSym(v_0.Aux) + if v_1.Op != OpOffPtr { + continue + } + o := auxIntToInt64(v_1.AuxInt) + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpLocalAddr { + continue + } + y := auxToSym(v_1_0.Aux) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(x == y && o == 0) + return true + } + break + } + // match: (EqPtr (OffPtr [o1] (LocalAddr {x} _ _)) (OffPtr [o2] (LocalAddr {y} _ _))) + // result: (ConstBool [x == y && o1 == o2]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpOffPtr { + continue + } + o1 := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpLocalAddr { + continue + } + x := auxToSym(v_0_0.Aux) + if v_1.Op != OpOffPtr { + continue + } + o2 := auxIntToInt64(v_1.AuxInt) + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpLocalAddr { + continue + } + y := auxToSym(v_1_0.Aux) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(x == y && o1 == o2) + return true + } + break + } + // match: (EqPtr (OffPtr [o1] p1) p2) + // cond: isSamePtr(p1, p2) + // result: (ConstBool [o1 == 0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpOffPtr { + continue + } + o1 := auxIntToInt64(v_0.AuxInt) + p1 := v_0.Args[0] + p2 := v_1 + if !(isSamePtr(p1, p2)) { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(o1 == 0) + return true + } + break + } + // match: (EqPtr (OffPtr [o1] p1) (OffPtr [o2] p2)) + // cond: isSamePtr(p1, p2) + // result: (ConstBool [o1 == o2]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpOffPtr { + continue + } + o1 := auxIntToInt64(v_0.AuxInt) + p1 := v_0.Args[0] + if v_1.Op != OpOffPtr { + continue + } + o2 := auxIntToInt64(v_1.AuxInt) + p2 := v_1.Args[0] + if !(isSamePtr(p1, p2)) { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(o1 == o2) + return true + } + break + } + // match: (EqPtr (Const32 [c]) (Const32 [d])) + // result: (ConstBool [c == d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c == d) + return true + } + break + } + // match: (EqPtr (Const64 [c]) (Const64 [d])) + // result: (ConstBool [c == d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c == d) + return true + } + break + } + // match: (EqPtr (Convert (Addr {x} _) _) (Addr {y} _)) + // result: (ConstBool [x==y]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConvert { + continue + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAddr { + continue + } + x := auxToSym(v_0_0.Aux) + if v_1.Op != OpAddr { + continue + } + y := auxToSym(v_1.Aux) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(x == y) + return true + } + break + } + // match: (EqPtr (LocalAddr _ _) (Addr _)) + // result: (ConstBool [false]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLocalAddr || v_1.Op != OpAddr { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + break + } + // match: (EqPtr (OffPtr (LocalAddr _ _)) (Addr _)) + // result: (ConstBool [false]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpOffPtr { + continue + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpLocalAddr || v_1.Op != OpAddr { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + break + } + // match: (EqPtr (LocalAddr _ _) (OffPtr (Addr _))) + // result: (ConstBool [false]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLocalAddr || v_1.Op != OpOffPtr { + continue + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAddr { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + break + } + // match: (EqPtr (OffPtr (LocalAddr _ _)) (OffPtr (Addr _))) + // result: (ConstBool [false]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpOffPtr { + continue + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpLocalAddr || v_1.Op != OpOffPtr { + continue + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAddr { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + break + } + // match: (EqPtr (AddPtr p1 o1) p2) + // cond: isSamePtr(p1, p2) + // result: (Not (IsNonNil o1)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAddPtr { + continue + } + o1 := v_0.Args[1] + p1 := v_0.Args[0] + p2 := v_1 + if !(isSamePtr(p1, p2)) { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool) + v0.AddArg(o1) + v.AddArg(v0) + return true + } + break + } + // match: (EqPtr (Const32 [0]) p) + // result: (Not (IsNonNil p)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 { + continue + } + p := v_1 + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool) + v0.AddArg(p) + v.AddArg(v0) + return true + } + break + } + // match: (EqPtr (Const64 [0]) p) + // result: (Not (IsNonNil p)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 { + continue + } + p := v_1 + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool) + v0.AddArg(p) + v.AddArg(v0) + return true + } + break + } + // match: (EqPtr (ConstNil) p) + // result: (Not (IsNonNil p)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConstNil { + continue + } + p := v_1 + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool) + v0.AddArg(p) + v.AddArg(v0) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpEqSlice(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqSlice x y) + // result: (EqPtr (SlicePtr x) (SlicePtr y)) + for { + x := v_0 + y := v_1 + v.reset(OpEqPtr) + v0 := b.NewValue0(v.Pos, OpSlicePtr, typ.BytePtr) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSlicePtr, typ.BytePtr) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValuegeneric_OpFloor(v *Value) bool { + v_0 := v.Args[0] + // match: (Floor (Const64F [c])) + // result: (Const64F [math.Floor(c)]) + for { + if v_0.Op != OpConst64F { + break + } + c := auxIntToFloat64(v_0.AuxInt) + v.reset(OpConst64F) + v.AuxInt = float64ToAuxInt(math.Floor(c)) + return true + } + return false +} +func rewriteValuegeneric_OpIMake(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IMake _typ (StructMake1 val)) + // result: (IMake _typ val) + for { + _typ := v_0 + if v_1.Op != OpStructMake1 { + break + } + val := v_1.Args[0] + v.reset(OpIMake) + v.AddArg2(_typ, val) + return true + } + // match: (IMake _typ (ArrayMake1 val)) + // result: (IMake _typ val) + for { + _typ := v_0 + if v_1.Op != OpArrayMake1 { + break + } + val := v_1.Args[0] + v.reset(OpIMake) + v.AddArg2(_typ, val) + return true + } + return false +} +func rewriteValuegeneric_OpInterLECall(v *Value) bool { + // match: (InterLECall [argsize] {auxCall} (Addr {fn} (SB)) ___) + // result: devirtLECall(v, fn.(*obj.LSym)) + for { + if len(v.Args) < 1 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpAddr { + break + } + fn := auxToSym(v_0.Aux) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSB { + break + } + v.copyOf(devirtLECall(v, fn.(*obj.LSym))) + return true + } + return false +} +func rewriteValuegeneric_OpIsInBounds(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsInBounds (ZeroExt8to32 _) (Const32 [c])) + // cond: (1 << 8) <= c + // result: (ConstBool [true]) + for { + if v_0.Op != OpZeroExt8to32 || v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !((1 << 8) <= c) { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (IsInBounds (ZeroExt8to64 _) (Const64 [c])) + // cond: (1 << 8) <= c + // result: (ConstBool [true]) + for { + if v_0.Op != OpZeroExt8to64 || v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !((1 << 8) <= c) { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (IsInBounds (ZeroExt16to32 _) (Const32 [c])) + // cond: (1 << 16) <= c + // result: (ConstBool [true]) + for { + if v_0.Op != OpZeroExt16to32 || v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !((1 << 16) <= c) { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (IsInBounds (ZeroExt16to64 _) (Const64 [c])) + // cond: (1 << 16) <= c + // result: (ConstBool [true]) + for { + if v_0.Op != OpZeroExt16to64 || v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !((1 << 16) <= c) { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (IsInBounds x x) + // result: (ConstBool [false]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + // match: (IsInBounds (And8 (Const8 [c]) _) (Const8 [d])) + // cond: 0 <= c && c < d + // result: (ConstBool [true]) + for { + if v_0.Op != OpAnd8 { + break + } + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0_0.AuxInt) + if v_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1.AuxInt) + if !(0 <= c && c < d) { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + break + } + // match: (IsInBounds (ZeroExt8to16 (And8 (Const8 [c]) _)) (Const16 [d])) + // cond: 0 <= c && int16(c) < d + // result: (ConstBool [true]) + for { + if v_0.Op != OpZeroExt8to16 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAnd8 { + break + } + v_0_0_0 := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 { + if v_0_0_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0_0_0.AuxInt) + if v_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1.AuxInt) + if !(0 <= c && int16(c) < d) { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + break + } + // match: (IsInBounds (ZeroExt8to32 (And8 (Const8 [c]) _)) (Const32 [d])) + // cond: 0 <= c && int32(c) < d + // result: (ConstBool [true]) + for { + if v_0.Op != OpZeroExt8to32 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAnd8 { + break + } + v_0_0_0 := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 { + if v_0_0_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0_0_0.AuxInt) + if v_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1.AuxInt) + if !(0 <= c && int32(c) < d) { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + break + } + // match: (IsInBounds (ZeroExt8to64 (And8 (Const8 [c]) _)) (Const64 [d])) + // cond: 0 <= c && int64(c) < d + // result: (ConstBool [true]) + for { + if v_0.Op != OpZeroExt8to64 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAnd8 { + break + } + v_0_0_0 := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 { + if v_0_0_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0_0_0.AuxInt) + if v_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1.AuxInt) + if !(0 <= c && int64(c) < d) { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + break + } + // match: (IsInBounds (And16 (Const16 [c]) _) (Const16 [d])) + // cond: 0 <= c && c < d + // result: (ConstBool [true]) + for { + if v_0.Op != OpAnd16 { + break + } + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0_0.AuxInt) + if v_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1.AuxInt) + if !(0 <= c && c < d) { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + break + } + // match: (IsInBounds (ZeroExt16to32 (And16 (Const16 [c]) _)) (Const32 [d])) + // cond: 0 <= c && int32(c) < d + // result: (ConstBool [true]) + for { + if v_0.Op != OpZeroExt16to32 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAnd16 { + break + } + v_0_0_0 := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 { + if v_0_0_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0_0_0.AuxInt) + if v_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1.AuxInt) + if !(0 <= c && int32(c) < d) { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + break + } + // match: (IsInBounds (ZeroExt16to64 (And16 (Const16 [c]) _)) (Const64 [d])) + // cond: 0 <= c && int64(c) < d + // result: (ConstBool [true]) + for { + if v_0.Op != OpZeroExt16to64 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAnd16 { + break + } + v_0_0_0 := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 { + if v_0_0_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0_0_0.AuxInt) + if v_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1.AuxInt) + if !(0 <= c && int64(c) < d) { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + break + } + // match: (IsInBounds (And32 (Const32 [c]) _) (Const32 [d])) + // cond: 0 <= c && c < d + // result: (ConstBool [true]) + for { + if v_0.Op != OpAnd32 { + break + } + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0_0.AuxInt) + if v_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1.AuxInt) + if !(0 <= c && c < d) { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + break + } + // match: (IsInBounds (ZeroExt32to64 (And32 (Const32 [c]) _)) (Const64 [d])) + // cond: 0 <= c && int64(c) < d + // result: (ConstBool [true]) + for { + if v_0.Op != OpZeroExt32to64 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAnd32 { + break + } + v_0_0_0 := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 { + if v_0_0_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0_0_0.AuxInt) + if v_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1.AuxInt) + if !(0 <= c && int64(c) < d) { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + break + } + // match: (IsInBounds (And64 (Const64 [c]) _) (Const64 [d])) + // cond: 0 <= c && c < d + // result: (ConstBool [true]) + for { + if v_0.Op != OpAnd64 { + break + } + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0_0.AuxInt) + if v_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1.AuxInt) + if !(0 <= c && c < d) { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + break + } + // match: (IsInBounds (Const32 [c]) (Const32 [d])) + // result: (ConstBool [0 <= c && c < d]) + for { + if v_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpConst32 { + break + } + d := auxIntToInt32(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(0 <= c && c < d) + return true + } + // match: (IsInBounds (Const64 [c]) (Const64 [d])) + // result: (ConstBool [0 <= c && c < d]) + for { + if v_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(0 <= c && c < d) + return true + } + // match: (IsInBounds (Mod32u _ y) y) + // result: (ConstBool [true]) + for { + if v_0.Op != OpMod32u { + break + } + y := v_0.Args[1] + if y != v_1 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (IsInBounds (Mod64u _ y) y) + // result: (ConstBool [true]) + for { + if v_0.Op != OpMod64u { + break + } + y := v_0.Args[1] + if y != v_1 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (IsInBounds (ZeroExt8to64 (Rsh8Ux64 _ (Const64 [c]))) (Const64 [d])) + // cond: 0 < c && c < 8 && 1<= 0 + // result: (ConstBool [true]) + for { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 || v_1.Op != OpAnd16 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + if v_1_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_1.AuxInt) + if !(c >= 0) { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + break + } + // match: (Leq16 (Const16 [0]) (Rsh16Ux64 _ (Const64 [c]))) + // cond: c > 0 + // result: (ConstBool [true]) + for { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 || v_1.Op != OpRsh16Ux64 { + break + } + _ = v_1.Args[1] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c > 0) { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (Leq16 x (Const16 [-1])) + // result: (Less16 x (Const16 [0])) + for { + x := v_0 + if v_1.Op != OpConst16 { + break + } + t := v_1.Type + if auxIntToInt16(v_1.AuxInt) != -1 { + break + } + v.reset(OpLess16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(0) + v.AddArg2(x, v0) + return true + } + // match: (Leq16 (Const16 [1]) x) + // result: (Less16 (Const16 [0]) x) + for { + if v_0.Op != OpConst16 { + break + } + t := v_0.Type + if auxIntToInt16(v_0.AuxInt) != 1 { + break + } + x := v_1 + v.reset(OpLess16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(0) + v.AddArg2(v0, x) + return true + } + return false +} +func rewriteValuegeneric_OpLeq16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq16U (Const16 [c]) (Const16 [d])) + // result: (ConstBool [uint16(c) <= uint16(d)]) + for { + if v_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpConst16 { + break + } + d := auxIntToInt16(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(uint16(c) <= uint16(d)) + return true + } + // match: (Leq16U (Const16 [1]) x) + // result: (Neq16 (Const16 [0]) x) + for { + if v_0.Op != OpConst16 { + break + } + t := v_0.Type + if auxIntToInt16(v_0.AuxInt) != 1 { + break + } + x := v_1 + v.reset(OpNeq16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(0) + v.AddArg2(v0, x) + return true + } + // match: (Leq16U (Const16 [0]) _) + // result: (ConstBool [true]) + for { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + return false +} +func rewriteValuegeneric_OpLeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq32 (Const32 [c]) (Const32 [d])) + // result: (ConstBool [c <= d]) + for { + if v_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpConst32 { + break + } + d := auxIntToInt32(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c <= d) + return true + } + // match: (Leq32 (Const32 [0]) (And32 _ (Const32 [c]))) + // cond: c >= 0 + // result: (ConstBool [true]) + for { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 || v_1.Op != OpAnd32 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + if v_1_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_1.AuxInt) + if !(c >= 0) { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + break + } + // match: (Leq32 (Const32 [0]) (Rsh32Ux64 _ (Const64 [c]))) + // cond: c > 0 + // result: (ConstBool [true]) + for { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 || v_1.Op != OpRsh32Ux64 { + break + } + _ = v_1.Args[1] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c > 0) { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (Leq32 x (Const32 [-1])) + // result: (Less32 x (Const32 [0])) + for { + x := v_0 + if v_1.Op != OpConst32 { + break + } + t := v_1.Type + if auxIntToInt32(v_1.AuxInt) != -1 { + break + } + v.reset(OpLess32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg2(x, v0) + return true + } + // match: (Leq32 (Const32 [1]) x) + // result: (Less32 (Const32 [0]) x) + for { + if v_0.Op != OpConst32 { + break + } + t := v_0.Type + if auxIntToInt32(v_0.AuxInt) != 1 { + break + } + x := v_1 + v.reset(OpLess32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg2(v0, x) + return true + } + return false +} +func rewriteValuegeneric_OpLeq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Leq32F (Const32F [c]) (Const32F [d])) + // result: (ConstBool [c <= d]) + for { + if v_0.Op != OpConst32F { + break + } + c := auxIntToFloat32(v_0.AuxInt) + if v_1.Op != OpConst32F { + break + } + d := auxIntToFloat32(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c <= d) + return true + } + return false +} +func rewriteValuegeneric_OpLeq32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq32U (Const32 [c]) (Const32 [d])) + // result: (ConstBool [uint32(c) <= uint32(d)]) + for { + if v_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpConst32 { + break + } + d := auxIntToInt32(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(uint32(c) <= uint32(d)) + return true + } + // match: (Leq32U (Const32 [1]) x) + // result: (Neq32 (Const32 [0]) x) + for { + if v_0.Op != OpConst32 { + break + } + t := v_0.Type + if auxIntToInt32(v_0.AuxInt) != 1 { + break + } + x := v_1 + v.reset(OpNeq32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg2(v0, x) + return true + } + // match: (Leq32U (Const32 [0]) _) + // result: (ConstBool [true]) + for { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + return false +} +func rewriteValuegeneric_OpLeq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq64 (Const64 [c]) (Const64 [d])) + // result: (ConstBool [c <= d]) + for { + if v_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c <= d) + return true + } + // match: (Leq64 (Const64 [0]) (And64 _ (Const64 [c]))) + // cond: c >= 0 + // result: (ConstBool [true]) + for { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 || v_1.Op != OpAnd64 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + if v_1_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c >= 0) { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + break + } + // match: (Leq64 (Const64 [0]) (Rsh64Ux64 _ (Const64 [c]))) + // cond: c > 0 + // result: (ConstBool [true]) + for { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 || v_1.Op != OpRsh64Ux64 { + break + } + _ = v_1.Args[1] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c > 0) { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (Leq64 x (Const64 [-1])) + // result: (Less64 x (Const64 [0])) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + t := v_1.Type + if auxIntToInt64(v_1.AuxInt) != -1 { + break + } + v.reset(OpLess64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(x, v0) + return true + } + // match: (Leq64 (Const64 [1]) x) + // result: (Less64 (Const64 [0]) x) + for { + if v_0.Op != OpConst64 { + break + } + t := v_0.Type + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + x := v_1 + v.reset(OpLess64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, x) + return true + } + return false +} +func rewriteValuegeneric_OpLeq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Leq64F (Const64F [c]) (Const64F [d])) + // result: (ConstBool [c <= d]) + for { + if v_0.Op != OpConst64F { + break + } + c := auxIntToFloat64(v_0.AuxInt) + if v_1.Op != OpConst64F { + break + } + d := auxIntToFloat64(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c <= d) + return true + } + return false +} +func rewriteValuegeneric_OpLeq64U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq64U (Const64 [c]) (Const64 [d])) + // result: (ConstBool [uint64(c) <= uint64(d)]) + for { + if v_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(uint64(c) <= uint64(d)) + return true + } + // match: (Leq64U (Const64 [1]) x) + // result: (Neq64 (Const64 [0]) x) + for { + if v_0.Op != OpConst64 { + break + } + t := v_0.Type + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + x := v_1 + v.reset(OpNeq64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, x) + return true + } + // match: (Leq64U (Const64 [0]) _) + // result: (ConstBool [true]) + for { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + return false +} +func rewriteValuegeneric_OpLeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq8 (Const8 [c]) (Const8 [d])) + // result: (ConstBool [c <= d]) + for { + if v_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpConst8 { + break + } + d := auxIntToInt8(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c <= d) + return true + } + // match: (Leq8 (Const8 [0]) (And8 _ (Const8 [c]))) + // cond: c >= 0 + // result: (ConstBool [true]) + for { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 || v_1.Op != OpAnd8 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + if v_1_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_1.AuxInt) + if !(c >= 0) { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + break + } + // match: (Leq8 (Const8 [0]) (Rsh8Ux64 _ (Const64 [c]))) + // cond: c > 0 + // result: (ConstBool [true]) + for { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 || v_1.Op != OpRsh8Ux64 { + break + } + _ = v_1.Args[1] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c > 0) { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (Leq8 x (Const8 [-1])) + // result: (Less8 x (Const8 [0])) + for { + x := v_0 + if v_1.Op != OpConst8 { + break + } + t := v_1.Type + if auxIntToInt8(v_1.AuxInt) != -1 { + break + } + v.reset(OpLess8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, v0) + return true + } + // match: (Leq8 (Const8 [1]) x) + // result: (Less8 (Const8 [0]) x) + for { + if v_0.Op != OpConst8 { + break + } + t := v_0.Type + if auxIntToInt8(v_0.AuxInt) != 1 { + break + } + x := v_1 + v.reset(OpLess8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(0) + v.AddArg2(v0, x) + return true + } + return false +} +func rewriteValuegeneric_OpLeq8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq8U (Const8 [c]) (Const8 [d])) + // result: (ConstBool [ uint8(c) <= uint8(d)]) + for { + if v_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpConst8 { + break + } + d := auxIntToInt8(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(uint8(c) <= uint8(d)) + return true + } + // match: (Leq8U (Const8 [1]) x) + // result: (Neq8 (Const8 [0]) x) + for { + if v_0.Op != OpConst8 { + break + } + t := v_0.Type + if auxIntToInt8(v_0.AuxInt) != 1 { + break + } + x := v_1 + v.reset(OpNeq8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(0) + v.AddArg2(v0, x) + return true + } + // match: (Leq8U (Const8 [0]) _) + // result: (ConstBool [true]) + for { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + return false +} +func rewriteValuegeneric_OpLess16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less16 (Const16 [c]) (Const16 [d])) + // result: (ConstBool [c < d]) + for { + if v_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpConst16 { + break + } + d := auxIntToInt16(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c < d) + return true + } + // match: (Less16 (Const16 [0]) x) + // cond: isNonNegative(x) + // result: (Neq16 (Const16 [0]) x) + for { + if v_0.Op != OpConst16 { + break + } + t := v_0.Type + if auxIntToInt16(v_0.AuxInt) != 0 { + break + } + x := v_1 + if !(isNonNegative(x)) { + break + } + v.reset(OpNeq16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(0) + v.AddArg2(v0, x) + return true + } + // match: (Less16 x (Const16 [1])) + // cond: isNonNegative(x) + // result: (Eq16 (Const16 [0]) x) + for { + x := v_0 + if v_1.Op != OpConst16 { + break + } + t := v_1.Type + if auxIntToInt16(v_1.AuxInt) != 1 || !(isNonNegative(x)) { + break + } + v.reset(OpEq16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(0) + v.AddArg2(v0, x) + return true + } + // match: (Less16 x (Const16 [1])) + // result: (Leq16 x (Const16 [0])) + for { + x := v_0 + if v_1.Op != OpConst16 { + break + } + t := v_1.Type + if auxIntToInt16(v_1.AuxInt) != 1 { + break + } + v.reset(OpLeq16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(0) + v.AddArg2(x, v0) + return true + } + // match: (Less16 (Const16 [-1]) x) + // result: (Leq16 (Const16 [0]) x) + for { + if v_0.Op != OpConst16 { + break + } + t := v_0.Type + if auxIntToInt16(v_0.AuxInt) != -1 { + break + } + x := v_1 + v.reset(OpLeq16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(0) + v.AddArg2(v0, x) + return true + } + return false +} +func rewriteValuegeneric_OpLess16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less16U (Const16 [c]) (Const16 [d])) + // result: (ConstBool [uint16(c) < uint16(d)]) + for { + if v_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpConst16 { + break + } + d := auxIntToInt16(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(uint16(c) < uint16(d)) + return true + } + // match: (Less16U x (Const16 [1])) + // result: (Eq16 (Const16 [0]) x) + for { + x := v_0 + if v_1.Op != OpConst16 { + break + } + t := v_1.Type + if auxIntToInt16(v_1.AuxInt) != 1 { + break + } + v.reset(OpEq16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(0) + v.AddArg2(v0, x) + return true + } + // match: (Less16U _ (Const16 [0])) + // result: (ConstBool [false]) + for { + if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != 0 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + return false +} +func rewriteValuegeneric_OpLess32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less32 (Const32 [c]) (Const32 [d])) + // result: (ConstBool [c < d]) + for { + if v_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpConst32 { + break + } + d := auxIntToInt32(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c < d) + return true + } + // match: (Less32 (Const32 [0]) x) + // cond: isNonNegative(x) + // result: (Neq32 (Const32 [0]) x) + for { + if v_0.Op != OpConst32 { + break + } + t := v_0.Type + if auxIntToInt32(v_0.AuxInt) != 0 { + break + } + x := v_1 + if !(isNonNegative(x)) { + break + } + v.reset(OpNeq32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg2(v0, x) + return true + } + // match: (Less32 x (Const32 [1])) + // cond: isNonNegative(x) + // result: (Eq32 (Const32 [0]) x) + for { + x := v_0 + if v_1.Op != OpConst32 { + break + } + t := v_1.Type + if auxIntToInt32(v_1.AuxInt) != 1 || !(isNonNegative(x)) { + break + } + v.reset(OpEq32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg2(v0, x) + return true + } + // match: (Less32 x (Const32 [1])) + // result: (Leq32 x (Const32 [0])) + for { + x := v_0 + if v_1.Op != OpConst32 { + break + } + t := v_1.Type + if auxIntToInt32(v_1.AuxInt) != 1 { + break + } + v.reset(OpLeq32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg2(x, v0) + return true + } + // match: (Less32 (Const32 [-1]) x) + // result: (Leq32 (Const32 [0]) x) + for { + if v_0.Op != OpConst32 { + break + } + t := v_0.Type + if auxIntToInt32(v_0.AuxInt) != -1 { + break + } + x := v_1 + v.reset(OpLeq32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg2(v0, x) + return true + } + return false +} +func rewriteValuegeneric_OpLess32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Less32F (Const32F [c]) (Const32F [d])) + // result: (ConstBool [c < d]) + for { + if v_0.Op != OpConst32F { + break + } + c := auxIntToFloat32(v_0.AuxInt) + if v_1.Op != OpConst32F { + break + } + d := auxIntToFloat32(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c < d) + return true + } + return false +} +func rewriteValuegeneric_OpLess32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less32U (Const32 [c]) (Const32 [d])) + // result: (ConstBool [uint32(c) < uint32(d)]) + for { + if v_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpConst32 { + break + } + d := auxIntToInt32(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(uint32(c) < uint32(d)) + return true + } + // match: (Less32U x (Const32 [1])) + // result: (Eq32 (Const32 [0]) x) + for { + x := v_0 + if v_1.Op != OpConst32 { + break + } + t := v_1.Type + if auxIntToInt32(v_1.AuxInt) != 1 { + break + } + v.reset(OpEq32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg2(v0, x) + return true + } + // match: (Less32U _ (Const32 [0])) + // result: (ConstBool [false]) + for { + if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != 0 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + return false +} +func rewriteValuegeneric_OpLess64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less64 (Const64 [c]) (Const64 [d])) + // result: (ConstBool [c < d]) + for { + if v_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c < d) + return true + } + // match: (Less64 (Const64 [0]) x) + // cond: isNonNegative(x) + // result: (Neq64 (Const64 [0]) x) + for { + if v_0.Op != OpConst64 { + break + } + t := v_0.Type + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_1 + if !(isNonNegative(x)) { + break + } + v.reset(OpNeq64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, x) + return true + } + // match: (Less64 x (Const64 [1])) + // cond: isNonNegative(x) + // result: (Eq64 (Const64 [0]) x) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + t := v_1.Type + if auxIntToInt64(v_1.AuxInt) != 1 || !(isNonNegative(x)) { + break + } + v.reset(OpEq64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, x) + return true + } + // match: (Less64 x (Const64 [1])) + // result: (Leq64 x (Const64 [0])) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + t := v_1.Type + if auxIntToInt64(v_1.AuxInt) != 1 { + break + } + v.reset(OpLeq64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(x, v0) + return true + } + // match: (Less64 (Const64 [-1]) x) + // result: (Leq64 (Const64 [0]) x) + for { + if v_0.Op != OpConst64 { + break + } + t := v_0.Type + if auxIntToInt64(v_0.AuxInt) != -1 { + break + } + x := v_1 + v.reset(OpLeq64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, x) + return true + } + return false +} +func rewriteValuegeneric_OpLess64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Less64F (Const64F [c]) (Const64F [d])) + // result: (ConstBool [c < d]) + for { + if v_0.Op != OpConst64F { + break + } + c := auxIntToFloat64(v_0.AuxInt) + if v_1.Op != OpConst64F { + break + } + d := auxIntToFloat64(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c < d) + return true + } + return false +} +func rewriteValuegeneric_OpLess64U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less64U (Const64 [c]) (Const64 [d])) + // result: (ConstBool [uint64(c) < uint64(d)]) + for { + if v_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(uint64(c) < uint64(d)) + return true + } + // match: (Less64U x (Const64 [1])) + // result: (Eq64 (Const64 [0]) x) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + t := v_1.Type + if auxIntToInt64(v_1.AuxInt) != 1 { + break + } + v.reset(OpEq64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, x) + return true + } + // match: (Less64U _ (Const64 [0])) + // result: (ConstBool [false]) + for { + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + return false +} +func rewriteValuegeneric_OpLess8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less8 (Const8 [c]) (Const8 [d])) + // result: (ConstBool [c < d]) + for { + if v_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpConst8 { + break + } + d := auxIntToInt8(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c < d) + return true + } + // match: (Less8 (Const8 [0]) x) + // cond: isNonNegative(x) + // result: (Neq8 (Const8 [0]) x) + for { + if v_0.Op != OpConst8 { + break + } + t := v_0.Type + if auxIntToInt8(v_0.AuxInt) != 0 { + break + } + x := v_1 + if !(isNonNegative(x)) { + break + } + v.reset(OpNeq8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(0) + v.AddArg2(v0, x) + return true + } + // match: (Less8 x (Const8 [1])) + // cond: isNonNegative(x) + // result: (Eq8 (Const8 [0]) x) + for { + x := v_0 + if v_1.Op != OpConst8 { + break + } + t := v_1.Type + if auxIntToInt8(v_1.AuxInt) != 1 || !(isNonNegative(x)) { + break + } + v.reset(OpEq8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(0) + v.AddArg2(v0, x) + return true + } + // match: (Less8 x (Const8 [1])) + // result: (Leq8 x (Const8 [0])) + for { + x := v_0 + if v_1.Op != OpConst8 { + break + } + t := v_1.Type + if auxIntToInt8(v_1.AuxInt) != 1 { + break + } + v.reset(OpLeq8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, v0) + return true + } + // match: (Less8 (Const8 [-1]) x) + // result: (Leq8 (Const8 [0]) x) + for { + if v_0.Op != OpConst8 { + break + } + t := v_0.Type + if auxIntToInt8(v_0.AuxInt) != -1 { + break + } + x := v_1 + v.reset(OpLeq8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(0) + v.AddArg2(v0, x) + return true + } + return false +} +func rewriteValuegeneric_OpLess8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less8U (Const8 [c]) (Const8 [d])) + // result: (ConstBool [ uint8(c) < uint8(d)]) + for { + if v_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpConst8 { + break + } + d := auxIntToInt8(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(uint8(c) < uint8(d)) + return true + } + // match: (Less8U x (Const8 [1])) + // result: (Eq8 (Const8 [0]) x) + for { + x := v_0 + if v_1.Op != OpConst8 { + break + } + t := v_1.Type + if auxIntToInt8(v_1.AuxInt) != 1 { + break + } + v.reset(OpEq8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(0) + v.AddArg2(v0, x) + return true + } + // match: (Less8U _ (Const8 [0])) + // result: (ConstBool [false]) + for { + if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != 0 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + return false +} +func rewriteValuegeneric_OpLoad(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (Load p1 (Store {t2} p2 x _)) + // cond: isSamePtr(p1, p2) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() + // result: x + for { + t1 := v.Type + p1 := v_0 + if v_1.Op != OpStore { + break + } + t2 := auxToType(v_1.Aux) + x := v_1.Args[1] + p2 := v_1.Args[0] + if !(isSamePtr(p1, p2) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size()) { + break + } + v.copyOf(x) + return true + } + // match: (Load p1 (Store {t2} p2 _ (Store {t3} p3 x _))) + // cond: isSamePtr(p1, p3) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() && disjoint(p3, t3.Size(), p2, t2.Size()) + // result: x + for { + t1 := v.Type + p1 := v_0 + if v_1.Op != OpStore { + break + } + t2 := auxToType(v_1.Aux) + _ = v_1.Args[2] + p2 := v_1.Args[0] + v_1_2 := v_1.Args[2] + if v_1_2.Op != OpStore { + break + } + t3 := auxToType(v_1_2.Aux) + x := v_1_2.Args[1] + p3 := v_1_2.Args[0] + if !(isSamePtr(p1, p3) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() && disjoint(p3, t3.Size(), p2, t2.Size())) { + break + } + v.copyOf(x) + return true + } + // match: (Load p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 x _)))) + // cond: isSamePtr(p1, p4) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() && disjoint(p4, t4.Size(), p2, t2.Size()) && disjoint(p4, t4.Size(), p3, t3.Size()) + // result: x + for { + t1 := v.Type + p1 := v_0 + if v_1.Op != OpStore { + break + } + t2 := auxToType(v_1.Aux) + _ = v_1.Args[2] + p2 := v_1.Args[0] + v_1_2 := v_1.Args[2] + if v_1_2.Op != OpStore { + break + } + t3 := auxToType(v_1_2.Aux) + _ = v_1_2.Args[2] + p3 := v_1_2.Args[0] + v_1_2_2 := v_1_2.Args[2] + if v_1_2_2.Op != OpStore { + break + } + t4 := auxToType(v_1_2_2.Aux) + x := v_1_2_2.Args[1] + p4 := v_1_2_2.Args[0] + if !(isSamePtr(p1, p4) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() && disjoint(p4, t4.Size(), p2, t2.Size()) && disjoint(p4, t4.Size(), p3, t3.Size())) { + break + } + v.copyOf(x) + return true + } + // match: (Load p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 x _))))) + // cond: isSamePtr(p1, p5) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() && disjoint(p5, t5.Size(), p2, t2.Size()) && disjoint(p5, t5.Size(), p3, t3.Size()) && disjoint(p5, t5.Size(), p4, t4.Size()) + // result: x + for { + t1 := v.Type + p1 := v_0 + if v_1.Op != OpStore { + break + } + t2 := auxToType(v_1.Aux) + _ = v_1.Args[2] + p2 := v_1.Args[0] + v_1_2 := v_1.Args[2] + if v_1_2.Op != OpStore { + break + } + t3 := auxToType(v_1_2.Aux) + _ = v_1_2.Args[2] + p3 := v_1_2.Args[0] + v_1_2_2 := v_1_2.Args[2] + if v_1_2_2.Op != OpStore { + break + } + t4 := auxToType(v_1_2_2.Aux) + _ = v_1_2_2.Args[2] + p4 := v_1_2_2.Args[0] + v_1_2_2_2 := v_1_2_2.Args[2] + if v_1_2_2_2.Op != OpStore { + break + } + t5 := auxToType(v_1_2_2_2.Aux) + x := v_1_2_2_2.Args[1] + p5 := v_1_2_2_2.Args[0] + if !(isSamePtr(p1, p5) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() && disjoint(p5, t5.Size(), p2, t2.Size()) && disjoint(p5, t5.Size(), p3, t3.Size()) && disjoint(p5, t5.Size(), p4, t4.Size())) { + break + } + v.copyOf(x) + return true + } + // match: (Load p1 (Store {t2} p2 (Const64 [x]) _)) + // cond: isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x))) + // result: (Const64F [math.Float64frombits(uint64(x))]) + for { + t1 := v.Type + p1 := v_0 + if v_1.Op != OpStore { + break + } + t2 := auxToType(v_1.Aux) + _ = v_1.Args[1] + p2 := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + break + } + x := auxIntToInt64(v_1_1.AuxInt) + if !(isSamePtr(p1, p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x)))) { + break + } + v.reset(OpConst64F) + v.AuxInt = float64ToAuxInt(math.Float64frombits(uint64(x))) + return true + } + // match: (Load p1 (Store {t2} p2 (Const32 [x]) _)) + // cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x)))) + // result: (Const32F [math.Float32frombits(uint32(x))]) + for { + t1 := v.Type + p1 := v_0 + if v_1.Op != OpStore { + break + } + t2 := auxToType(v_1.Aux) + _ = v_1.Args[1] + p2 := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + break + } + x := auxIntToInt32(v_1_1.AuxInt) + if !(isSamePtr(p1, p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x))))) { + break + } + v.reset(OpConst32F) + v.AuxInt = float32ToAuxInt(math.Float32frombits(uint32(x))) + return true + } + // match: (Load p1 (Store {t2} p2 (Const64F [x]) _)) + // cond: isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitInt(t1) + // result: (Const64 [int64(math.Float64bits(x))]) + for { + t1 := v.Type + p1 := v_0 + if v_1.Op != OpStore { + break + } + t2 := auxToType(v_1.Aux) + _ = v_1.Args[1] + p2 := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64F { + break + } + x := auxIntToFloat64(v_1_1.AuxInt) + if !(isSamePtr(p1, p2) && sizeof(t2) == 8 && is64BitInt(t1)) { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(int64(math.Float64bits(x))) + return true + } + // match: (Load p1 (Store {t2} p2 (Const32F [x]) _)) + // cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1) + // result: (Const32 [int32(math.Float32bits(x))]) + for { + t1 := v.Type + p1 := v_0 + if v_1.Op != OpStore { + break + } + t2 := auxToType(v_1.Aux) + _ = v_1.Args[1] + p2 := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32F { + break + } + x := auxIntToFloat32(v_1_1.AuxInt) + if !(isSamePtr(p1, p2) && sizeof(t2) == 4 && is32BitInt(t1)) { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(int32(math.Float32bits(x))) + return true + } + // match: (Load op:(OffPtr [o1] p1) (Store {t2} p2 _ mem:(Zero [n] p3 _))) + // cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) + // result: @mem.Block (Load (OffPtr [o1] p3) mem) + for { + t1 := v.Type + op := v_0 + if op.Op != OpOffPtr { + break + } + o1 := auxIntToInt64(op.AuxInt) + p1 := op.Args[0] + if v_1.Op != OpStore { + break + } + t2 := auxToType(v_1.Aux) + _ = v_1.Args[2] + p2 := v_1.Args[0] + mem := v_1.Args[2] + if mem.Op != OpZero { + break + } + n := auxIntToInt64(mem.AuxInt) + p3 := mem.Args[0] + if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size())) { + break + } + b = mem.Block + v0 := b.NewValue0(v.Pos, OpLoad, t1) + v.copyOf(v0) + v1 := b.NewValue0(v.Pos, OpOffPtr, op.Type) + v1.AuxInt = int64ToAuxInt(o1) + v1.AddArg(p3) + v0.AddArg2(v1, mem) + return true + } + // match: (Load op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ mem:(Zero [n] p4 _)))) + // cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) + // result: @mem.Block (Load (OffPtr [o1] p4) mem) + for { + t1 := v.Type + op := v_0 + if op.Op != OpOffPtr { + break + } + o1 := auxIntToInt64(op.AuxInt) + p1 := op.Args[0] + if v_1.Op != OpStore { + break + } + t2 := auxToType(v_1.Aux) + _ = v_1.Args[2] + p2 := v_1.Args[0] + v_1_2 := v_1.Args[2] + if v_1_2.Op != OpStore { + break + } + t3 := auxToType(v_1_2.Aux) + _ = v_1_2.Args[2] + p3 := v_1_2.Args[0] + mem := v_1_2.Args[2] + if mem.Op != OpZero { + break + } + n := auxIntToInt64(mem.AuxInt) + p4 := mem.Args[0] + if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size())) { + break + } + b = mem.Block + v0 := b.NewValue0(v.Pos, OpLoad, t1) + v.copyOf(v0) + v1 := b.NewValue0(v.Pos, OpOffPtr, op.Type) + v1.AuxInt = int64ToAuxInt(o1) + v1.AddArg(p4) + v0.AddArg2(v1, mem) + return true + } + // match: (Load op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ mem:(Zero [n] p5 _))))) + // cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size()) + // result: @mem.Block (Load (OffPtr [o1] p5) mem) + for { + t1 := v.Type + op := v_0 + if op.Op != OpOffPtr { + break + } + o1 := auxIntToInt64(op.AuxInt) + p1 := op.Args[0] + if v_1.Op != OpStore { + break + } + t2 := auxToType(v_1.Aux) + _ = v_1.Args[2] + p2 := v_1.Args[0] + v_1_2 := v_1.Args[2] + if v_1_2.Op != OpStore { + break + } + t3 := auxToType(v_1_2.Aux) + _ = v_1_2.Args[2] + p3 := v_1_2.Args[0] + v_1_2_2 := v_1_2.Args[2] + if v_1_2_2.Op != OpStore { + break + } + t4 := auxToType(v_1_2_2.Aux) + _ = v_1_2_2.Args[2] + p4 := v_1_2_2.Args[0] + mem := v_1_2_2.Args[2] + if mem.Op != OpZero { + break + } + n := auxIntToInt64(mem.AuxInt) + p5 := mem.Args[0] + if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size())) { + break + } + b = mem.Block + v0 := b.NewValue0(v.Pos, OpLoad, t1) + v.copyOf(v0) + v1 := b.NewValue0(v.Pos, OpOffPtr, op.Type) + v1.AuxInt = int64ToAuxInt(o1) + v1.AddArg(p5) + v0.AddArg2(v1, mem) + return true + } + // match: (Load op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 _ mem:(Zero [n] p6 _)))))) + // cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size()) && disjoint(op, t1.Size(), p5, t5.Size()) + // result: @mem.Block (Load (OffPtr [o1] p6) mem) + for { + t1 := v.Type + op := v_0 + if op.Op != OpOffPtr { + break + } + o1 := auxIntToInt64(op.AuxInt) + p1 := op.Args[0] + if v_1.Op != OpStore { + break + } + t2 := auxToType(v_1.Aux) + _ = v_1.Args[2] + p2 := v_1.Args[0] + v_1_2 := v_1.Args[2] + if v_1_2.Op != OpStore { + break + } + t3 := auxToType(v_1_2.Aux) + _ = v_1_2.Args[2] + p3 := v_1_2.Args[0] + v_1_2_2 := v_1_2.Args[2] + if v_1_2_2.Op != OpStore { + break + } + t4 := auxToType(v_1_2_2.Aux) + _ = v_1_2_2.Args[2] + p4 := v_1_2_2.Args[0] + v_1_2_2_2 := v_1_2_2.Args[2] + if v_1_2_2_2.Op != OpStore { + break + } + t5 := auxToType(v_1_2_2_2.Aux) + _ = v_1_2_2_2.Args[2] + p5 := v_1_2_2_2.Args[0] + mem := v_1_2_2_2.Args[2] + if mem.Op != OpZero { + break + } + n := auxIntToInt64(mem.AuxInt) + p6 := mem.Args[0] + if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size()) && disjoint(op, t1.Size(), p5, t5.Size())) { + break + } + b = mem.Block + v0 := b.NewValue0(v.Pos, OpLoad, t1) + v.copyOf(v0) + v1 := b.NewValue0(v.Pos, OpOffPtr, op.Type) + v1.AuxInt = int64ToAuxInt(o1) + v1.AddArg(p6) + v0.AddArg2(v1, mem) + return true + } + // match: (Load (OffPtr [o] p1) (Zero [n] p2 _)) + // cond: t1.IsBoolean() && isSamePtr(p1, p2) && n >= o + 1 + // result: (ConstBool [false]) + for { + t1 := v.Type + if v_0.Op != OpOffPtr { + break + } + o := auxIntToInt64(v_0.AuxInt) + p1 := v_0.Args[0] + if v_1.Op != OpZero { + break + } + n := auxIntToInt64(v_1.AuxInt) + p2 := v_1.Args[0] + if !(t1.IsBoolean() && isSamePtr(p1, p2) && n >= o+1) { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + // match: (Load (OffPtr [o] p1) (Zero [n] p2 _)) + // cond: is8BitInt(t1) && isSamePtr(p1, p2) && n >= o + 1 + // result: (Const8 [0]) + for { + t1 := v.Type + if v_0.Op != OpOffPtr { + break + } + o := auxIntToInt64(v_0.AuxInt) + p1 := v_0.Args[0] + if v_1.Op != OpZero { + break + } + n := auxIntToInt64(v_1.AuxInt) + p2 := v_1.Args[0] + if !(is8BitInt(t1) && isSamePtr(p1, p2) && n >= o+1) { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(0) + return true + } + // match: (Load (OffPtr [o] p1) (Zero [n] p2 _)) + // cond: is16BitInt(t1) && isSamePtr(p1, p2) && n >= o + 2 + // result: (Const16 [0]) + for { + t1 := v.Type + if v_0.Op != OpOffPtr { + break + } + o := auxIntToInt64(v_0.AuxInt) + p1 := v_0.Args[0] + if v_1.Op != OpZero { + break + } + n := auxIntToInt64(v_1.AuxInt) + p2 := v_1.Args[0] + if !(is16BitInt(t1) && isSamePtr(p1, p2) && n >= o+2) { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(0) + return true + } + // match: (Load (OffPtr [o] p1) (Zero [n] p2 _)) + // cond: is32BitInt(t1) && isSamePtr(p1, p2) && n >= o + 4 + // result: (Const32 [0]) + for { + t1 := v.Type + if v_0.Op != OpOffPtr { + break + } + o := auxIntToInt64(v_0.AuxInt) + p1 := v_0.Args[0] + if v_1.Op != OpZero { + break + } + n := auxIntToInt64(v_1.AuxInt) + p2 := v_1.Args[0] + if !(is32BitInt(t1) && isSamePtr(p1, p2) && n >= o+4) { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (Load (OffPtr [o] p1) (Zero [n] p2 _)) + // cond: is64BitInt(t1) && isSamePtr(p1, p2) && n >= o + 8 + // result: (Const64 [0]) + for { + t1 := v.Type + if v_0.Op != OpOffPtr { + break + } + o := auxIntToInt64(v_0.AuxInt) + p1 := v_0.Args[0] + if v_1.Op != OpZero { + break + } + n := auxIntToInt64(v_1.AuxInt) + p2 := v_1.Args[0] + if !(is64BitInt(t1) && isSamePtr(p1, p2) && n >= o+8) { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (Load (OffPtr [o] p1) (Zero [n] p2 _)) + // cond: is32BitFloat(t1) && isSamePtr(p1, p2) && n >= o + 4 + // result: (Const32F [0]) + for { + t1 := v.Type + if v_0.Op != OpOffPtr { + break + } + o := auxIntToInt64(v_0.AuxInt) + p1 := v_0.Args[0] + if v_1.Op != OpZero { + break + } + n := auxIntToInt64(v_1.AuxInt) + p2 := v_1.Args[0] + if !(is32BitFloat(t1) && isSamePtr(p1, p2) && n >= o+4) { + break + } + v.reset(OpConst32F) + v.AuxInt = float32ToAuxInt(0) + return true + } + // match: (Load (OffPtr [o] p1) (Zero [n] p2 _)) + // cond: is64BitFloat(t1) && isSamePtr(p1, p2) && n >= o + 8 + // result: (Const64F [0]) + for { + t1 := v.Type + if v_0.Op != OpOffPtr { + break + } + o := auxIntToInt64(v_0.AuxInt) + p1 := v_0.Args[0] + if v_1.Op != OpZero { + break + } + n := auxIntToInt64(v_1.AuxInt) + p2 := v_1.Args[0] + if !(is64BitFloat(t1) && isSamePtr(p1, p2) && n >= o+8) { + break + } + v.reset(OpConst64F) + v.AuxInt = float64ToAuxInt(0) + return true + } + // match: (Load _ _) + // cond: t.IsStruct() && t.NumFields() == 0 && CanSSA(t) + // result: (StructMake0) + for { + t := v.Type + if !(t.IsStruct() && t.NumFields() == 0 && CanSSA(t)) { + break + } + v.reset(OpStructMake0) + return true + } + // match: (Load ptr mem) + // cond: t.IsStruct() && t.NumFields() == 1 && CanSSA(t) + // result: (StructMake1 (Load (OffPtr [0] ptr) mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsStruct() && t.NumFields() == 1 && CanSSA(t)) { + break + } + v.reset(OpStructMake1) + v0 := b.NewValue0(v.Pos, OpLoad, t.FieldType(0)) + v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) + v1.AuxInt = int64ToAuxInt(0) + v1.AddArg(ptr) + v0.AddArg2(v1, mem) + v.AddArg(v0) + return true + } + // match: (Load ptr mem) + // cond: t.IsStruct() && t.NumFields() == 2 && CanSSA(t) + // result: (StructMake2 (Load (OffPtr [0] ptr) mem) (Load (OffPtr [t.FieldOff(1)] ptr) mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsStruct() && t.NumFields() == 2 && CanSSA(t)) { + break + } + v.reset(OpStructMake2) + v0 := b.NewValue0(v.Pos, OpLoad, t.FieldType(0)) + v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) + v1.AuxInt = int64ToAuxInt(0) + v1.AddArg(ptr) + v0.AddArg2(v1, mem) + v2 := b.NewValue0(v.Pos, OpLoad, t.FieldType(1)) + v3 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo()) + v3.AuxInt = int64ToAuxInt(t.FieldOff(1)) + v3.AddArg(ptr) + v2.AddArg2(v3, mem) + v.AddArg2(v0, v2) + return true + } + // match: (Load ptr mem) + // cond: t.IsStruct() && t.NumFields() == 3 && CanSSA(t) + // result: (StructMake3 (Load (OffPtr [0] ptr) mem) (Load (OffPtr [t.FieldOff(1)] ptr) mem) (Load (OffPtr [t.FieldOff(2)] ptr) mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsStruct() && t.NumFields() == 3 && CanSSA(t)) { + break + } + v.reset(OpStructMake3) + v0 := b.NewValue0(v.Pos, OpLoad, t.FieldType(0)) + v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) + v1.AuxInt = int64ToAuxInt(0) + v1.AddArg(ptr) + v0.AddArg2(v1, mem) + v2 := b.NewValue0(v.Pos, OpLoad, t.FieldType(1)) + v3 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo()) + v3.AuxInt = int64ToAuxInt(t.FieldOff(1)) + v3.AddArg(ptr) + v2.AddArg2(v3, mem) + v4 := b.NewValue0(v.Pos, OpLoad, t.FieldType(2)) + v5 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo()) + v5.AuxInt = int64ToAuxInt(t.FieldOff(2)) + v5.AddArg(ptr) + v4.AddArg2(v5, mem) + v.AddArg3(v0, v2, v4) + return true + } + // match: (Load ptr mem) + // cond: t.IsStruct() && t.NumFields() == 4 && CanSSA(t) + // result: (StructMake4 (Load (OffPtr [0] ptr) mem) (Load (OffPtr [t.FieldOff(1)] ptr) mem) (Load (OffPtr [t.FieldOff(2)] ptr) mem) (Load (OffPtr [t.FieldOff(3)] ptr) mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsStruct() && t.NumFields() == 4 && CanSSA(t)) { + break + } + v.reset(OpStructMake4) + v0 := b.NewValue0(v.Pos, OpLoad, t.FieldType(0)) + v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) + v1.AuxInt = int64ToAuxInt(0) + v1.AddArg(ptr) + v0.AddArg2(v1, mem) + v2 := b.NewValue0(v.Pos, OpLoad, t.FieldType(1)) + v3 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo()) + v3.AuxInt = int64ToAuxInt(t.FieldOff(1)) + v3.AddArg(ptr) + v2.AddArg2(v3, mem) + v4 := b.NewValue0(v.Pos, OpLoad, t.FieldType(2)) + v5 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo()) + v5.AuxInt = int64ToAuxInt(t.FieldOff(2)) + v5.AddArg(ptr) + v4.AddArg2(v5, mem) + v6 := b.NewValue0(v.Pos, OpLoad, t.FieldType(3)) + v7 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(3).PtrTo()) + v7.AuxInt = int64ToAuxInt(t.FieldOff(3)) + v7.AddArg(ptr) + v6.AddArg2(v7, mem) + v.AddArg4(v0, v2, v4, v6) + return true + } + // match: (Load _ _) + // cond: t.IsArray() && t.NumElem() == 0 + // result: (ArrayMake0) + for { + t := v.Type + if !(t.IsArray() && t.NumElem() == 0) { + break + } + v.reset(OpArrayMake0) + return true + } + // match: (Load ptr mem) + // cond: t.IsArray() && t.NumElem() == 1 && CanSSA(t) + // result: (ArrayMake1 (Load ptr mem)) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsArray() && t.NumElem() == 1 && CanSSA(t)) { + break + } + v.reset(OpArrayMake1) + v0 := b.NewValue0(v.Pos, OpLoad, t.Elem()) + v0.AddArg2(ptr, mem) + v.AddArg(v0) + return true + } + // match: (Load (OffPtr [off] (Addr {s} sb) ) _) + // cond: t.IsUintptr() && isFixedSym(s, off) + // result: (Addr {fixedSym(b.Func, s, off)} sb) + for { + t := v.Type + if v_0.Op != OpOffPtr { + break + } + off := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAddr { + break + } + s := auxToSym(v_0_0.Aux) + sb := v_0_0.Args[0] + if !(t.IsUintptr() && isFixedSym(s, off)) { + break + } + v.reset(OpAddr) + v.Aux = symToAux(fixedSym(b.Func, s, off)) + v.AddArg(sb) + return true + } + // match: (Load (OffPtr [off] (Convert (Addr {s} sb) _) ) _) + // cond: t.IsUintptr() && isFixedSym(s, off) + // result: (Addr {fixedSym(b.Func, s, off)} sb) + for { + t := v.Type + if v_0.Op != OpOffPtr { + break + } + off := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConvert { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpAddr { + break + } + s := auxToSym(v_0_0_0.Aux) + sb := v_0_0_0.Args[0] + if !(t.IsUintptr() && isFixedSym(s, off)) { + break + } + v.reset(OpAddr) + v.Aux = symToAux(fixedSym(b.Func, s, off)) + v.AddArg(sb) + return true + } + // match: (Load (OffPtr [off] (ITab (IMake (Addr {s} sb) _))) _) + // cond: t.IsUintptr() && isFixedSym(s, off) + // result: (Addr {fixedSym(b.Func, s, off)} sb) + for { + t := v.Type + if v_0.Op != OpOffPtr { + break + } + off := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpITab { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpIMake { + break + } + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpAddr { + break + } + s := auxToSym(v_0_0_0_0.Aux) + sb := v_0_0_0_0.Args[0] + if !(t.IsUintptr() && isFixedSym(s, off)) { + break + } + v.reset(OpAddr) + v.Aux = symToAux(fixedSym(b.Func, s, off)) + v.AddArg(sb) + return true + } + // match: (Load (OffPtr [off] (ITab (IMake (Convert (Addr {s} sb) _) _))) _) + // cond: t.IsUintptr() && isFixedSym(s, off) + // result: (Addr {fixedSym(b.Func, s, off)} sb) + for { + t := v.Type + if v_0.Op != OpOffPtr { + break + } + off := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpITab { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpIMake { + break + } + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpConvert { + break + } + v_0_0_0_0_0 := v_0_0_0_0.Args[0] + if v_0_0_0_0_0.Op != OpAddr { + break + } + s := auxToSym(v_0_0_0_0_0.Aux) + sb := v_0_0_0_0_0.Args[0] + if !(t.IsUintptr() && isFixedSym(s, off)) { + break + } + v.reset(OpAddr) + v.Aux = symToAux(fixedSym(b.Func, s, off)) + v.AddArg(sb) + return true + } + // match: (Load (OffPtr [off] (Addr {sym} _) ) _) + // cond: t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off) + // result: (Const32 [fixed32(config, sym, off)]) + for { + t := v.Type + if v_0.Op != OpOffPtr { + break + } + off := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAddr { + break + } + sym := auxToSym(v_0_0.Aux) + if !(t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off)) { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(fixed32(config, sym, off)) + return true + } + // match: (Load (OffPtr [off] (Convert (Addr {sym} _) _) ) _) + // cond: t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off) + // result: (Const32 [fixed32(config, sym, off)]) + for { + t := v.Type + if v_0.Op != OpOffPtr { + break + } + off := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConvert { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpAddr { + break + } + sym := auxToSym(v_0_0_0.Aux) + if !(t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off)) { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(fixed32(config, sym, off)) + return true + } + // match: (Load (OffPtr [off] (ITab (IMake (Addr {sym} _) _))) _) + // cond: t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off) + // result: (Const32 [fixed32(config, sym, off)]) + for { + t := v.Type + if v_0.Op != OpOffPtr { + break + } + off := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpITab { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpIMake { + break + } + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpAddr { + break + } + sym := auxToSym(v_0_0_0_0.Aux) + if !(t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off)) { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(fixed32(config, sym, off)) + return true + } + // match: (Load (OffPtr [off] (ITab (IMake (Convert (Addr {sym} _) _) _))) _) + // cond: t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off) + // result: (Const32 [fixed32(config, sym, off)]) + for { + t := v.Type + if v_0.Op != OpOffPtr { + break + } + off := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpITab { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpIMake { + break + } + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpConvert { + break + } + v_0_0_0_0_0 := v_0_0_0_0.Args[0] + if v_0_0_0_0_0.Op != OpAddr { + break + } + sym := auxToSym(v_0_0_0_0_0.Aux) + if !(t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off)) { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(fixed32(config, sym, off)) + return true + } + return false +} +func rewriteValuegeneric_OpLsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh16x16 x (Const16 [c])) + // result: (Lsh16x64 x (Const64 [int64(uint16(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + v.reset(OpLsh16x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint16(c))) + v.AddArg2(x, v0) + return true + } + // match: (Lsh16x16 (Const16 [0]) _) + // result: (Const16 [0]) + for { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpLsh16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh16x32 x (Const32 [c])) + // result: (Lsh16x64 x (Const64 [int64(uint32(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpLsh16x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint32(c))) + v.AddArg2(x, v0) + return true + } + // match: (Lsh16x32 (Const16 [0]) _) + // result: (Const16 [0]) + for { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpLsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x64 (Const16 [c]) (Const64 [d])) + // result: (Const16 [c << uint64(d)]) + for { + if v_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(c << uint64(d)) + return true + } + // match: (Lsh16x64 x (Const64 [0])) + // result: x + for { + x := v_0 + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + v.copyOf(x) + return true + } + // match: (Lsh16x64 (Const16 [0]) _) + // result: (Const16 [0]) + for { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(0) + return true + } + // match: (Lsh16x64 _ (Const64 [c])) + // cond: uint64(c) >= 16 + // result: (Const16 [0]) + for { + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 16) { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(0) + return true + } + // match: (Lsh16x64 (Lsh16x64 x (Const64 [c])) (Const64 [d])) + // cond: !uaddOvf(c,d) + // result: (Lsh16x64 x (Const64 [c+d])) + for { + t := v.Type + if v_0.Op != OpLsh16x64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0_1.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + if !(!uaddOvf(c, d)) { + break + } + v.reset(OpLsh16x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c + d) + v.AddArg2(x, v0) + return true + } + // match: (Lsh16x64 i:(Rsh16x64 x (Const64 [c])) (Const64 [c])) + // cond: c >= 0 && c < 16 && i.Uses == 1 + // result: (And16 x (Const16 [int16(-1) << c])) + for { + i := v_0 + if i.Op != OpRsh16x64 { + break + } + _ = i.Args[1] + x := i.Args[0] + i_1 := i.Args[1] + if i_1.Op != OpConst64 { + break + } + c := auxIntToInt64(i_1.AuxInt) + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c || !(c >= 0 && c < 16 && i.Uses == 1) { + break + } + v.reset(OpAnd16) + v0 := b.NewValue0(v.Pos, OpConst16, v.Type) + v0.AuxInt = int16ToAuxInt(int16(-1) << c) + v.AddArg2(x, v0) + return true + } + // match: (Lsh16x64 i:(Rsh16Ux64 x (Const64 [c])) (Const64 [c])) + // cond: c >= 0 && c < 16 && i.Uses == 1 + // result: (And16 x (Const16 [int16(-1) << c])) + for { + i := v_0 + if i.Op != OpRsh16Ux64 { + break + } + _ = i.Args[1] + x := i.Args[0] + i_1 := i.Args[1] + if i_1.Op != OpConst64 { + break + } + c := auxIntToInt64(i_1.AuxInt) + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c || !(c >= 0 && c < 16 && i.Uses == 1) { + break + } + v.reset(OpAnd16) + v0 := b.NewValue0(v.Pos, OpConst16, v.Type) + v0.AuxInt = int16ToAuxInt(int16(-1) << c) + v.AddArg2(x, v0) + return true + } + // match: (Lsh16x64 (Rsh16Ux64 (Lsh16x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) + // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) + // result: (Lsh16x64 x (Const64 [c1-c2+c3])) + for { + if v_0.Op != OpRsh16Ux64 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpLsh16x64 { + break + } + _ = v_0_0.Args[1] + x := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpConst64 { + break + } + c1 := auxIntToInt64(v_0_0_1.AuxInt) + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 { + break + } + c2 := auxIntToInt64(v_0_1.AuxInt) + if v_1.Op != OpConst64 { + break + } + c3 := auxIntToInt64(v_1.AuxInt) + if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) { + break + } + v.reset(OpLsh16x64) + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = int64ToAuxInt(c1 - c2 + c3) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValuegeneric_OpLsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh16x8 x (Const8 [c])) + // result: (Lsh16x64 x (Const64 [int64(uint8(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + v.reset(OpLsh16x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint8(c))) + v.AddArg2(x, v0) + return true + } + // match: (Lsh16x8 (Const16 [0]) _) + // result: (Const16 [0]) + for { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpLsh32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh32x16 x (Const16 [c])) + // result: (Lsh32x64 x (Const64 [int64(uint16(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + v.reset(OpLsh32x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint16(c))) + v.AddArg2(x, v0) + return true + } + // match: (Lsh32x16 (Const32 [0]) _) + // result: (Const32 [0]) + for { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpLsh32x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh32x32 x (Const32 [c])) + // result: (Lsh32x64 x (Const64 [int64(uint32(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpLsh32x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint32(c))) + v.AddArg2(x, v0) + return true + } + // match: (Lsh32x32 (Const32 [0]) _) + // result: (Const32 [0]) + for { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpLsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x64 (Const32 [c]) (Const64 [d])) + // result: (Const32 [c << uint64(d)]) + for { + if v_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(c << uint64(d)) + return true + } + // match: (Lsh32x64 x (Const64 [0])) + // result: x + for { + x := v_0 + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + v.copyOf(x) + return true + } + // match: (Lsh32x64 (Const32 [0]) _) + // result: (Const32 [0]) + for { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (Lsh32x64 _ (Const64 [c])) + // cond: uint64(c) >= 32 + // result: (Const32 [0]) + for { + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 32) { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (Lsh32x64 (Lsh32x64 x (Const64 [c])) (Const64 [d])) + // cond: !uaddOvf(c,d) + // result: (Lsh32x64 x (Const64 [c+d])) + for { + t := v.Type + if v_0.Op != OpLsh32x64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0_1.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + if !(!uaddOvf(c, d)) { + break + } + v.reset(OpLsh32x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c + d) + v.AddArg2(x, v0) + return true + } + // match: (Lsh32x64 i:(Rsh32x64 x (Const64 [c])) (Const64 [c])) + // cond: c >= 0 && c < 32 && i.Uses == 1 + // result: (And32 x (Const32 [int32(-1) << c])) + for { + i := v_0 + if i.Op != OpRsh32x64 { + break + } + _ = i.Args[1] + x := i.Args[0] + i_1 := i.Args[1] + if i_1.Op != OpConst64 { + break + } + c := auxIntToInt64(i_1.AuxInt) + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c || !(c >= 0 && c < 32 && i.Uses == 1) { + break + } + v.reset(OpAnd32) + v0 := b.NewValue0(v.Pos, OpConst32, v.Type) + v0.AuxInt = int32ToAuxInt(int32(-1) << c) + v.AddArg2(x, v0) + return true + } + // match: (Lsh32x64 i:(Rsh32Ux64 x (Const64 [c])) (Const64 [c])) + // cond: c >= 0 && c < 32 && i.Uses == 1 + // result: (And32 x (Const32 [int32(-1) << c])) + for { + i := v_0 + if i.Op != OpRsh32Ux64 { + break + } + _ = i.Args[1] + x := i.Args[0] + i_1 := i.Args[1] + if i_1.Op != OpConst64 { + break + } + c := auxIntToInt64(i_1.AuxInt) + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c || !(c >= 0 && c < 32 && i.Uses == 1) { + break + } + v.reset(OpAnd32) + v0 := b.NewValue0(v.Pos, OpConst32, v.Type) + v0.AuxInt = int32ToAuxInt(int32(-1) << c) + v.AddArg2(x, v0) + return true + } + // match: (Lsh32x64 (Rsh32Ux64 (Lsh32x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) + // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) + // result: (Lsh32x64 x (Const64 [c1-c2+c3])) + for { + if v_0.Op != OpRsh32Ux64 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpLsh32x64 { + break + } + _ = v_0_0.Args[1] + x := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpConst64 { + break + } + c1 := auxIntToInt64(v_0_0_1.AuxInt) + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 { + break + } + c2 := auxIntToInt64(v_0_1.AuxInt) + if v_1.Op != OpConst64 { + break + } + c3 := auxIntToInt64(v_1.AuxInt) + if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) { + break + } + v.reset(OpLsh32x64) + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = int64ToAuxInt(c1 - c2 + c3) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValuegeneric_OpLsh32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh32x8 x (Const8 [c])) + // result: (Lsh32x64 x (Const64 [int64(uint8(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + v.reset(OpLsh32x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint8(c))) + v.AddArg2(x, v0) + return true + } + // match: (Lsh32x8 (Const32 [0]) _) + // result: (Const32 [0]) + for { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpLsh64x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh64x16 x (Const16 [c])) + // result: (Lsh64x64 x (Const64 [int64(uint16(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + v.reset(OpLsh64x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint16(c))) + v.AddArg2(x, v0) + return true + } + // match: (Lsh64x16 (Const64 [0]) _) + // result: (Const64 [0]) + for { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpLsh64x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh64x32 x (Const32 [c])) + // result: (Lsh64x64 x (Const64 [int64(uint32(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpLsh64x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint32(c))) + v.AddArg2(x, v0) + return true + } + // match: (Lsh64x32 (Const64 [0]) _) + // result: (Const64 [0]) + for { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpLsh64x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x64 (Const64 [c]) (Const64 [d])) + // result: (Const64 [c << uint64(d)]) + for { + if v_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(c << uint64(d)) + return true + } + // match: (Lsh64x64 x (Const64 [0])) + // result: x + for { + x := v_0 + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + v.copyOf(x) + return true + } + // match: (Lsh64x64 (Const64 [0]) _) + // result: (Const64 [0]) + for { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (Lsh64x64 _ (Const64 [c])) + // cond: uint64(c) >= 64 + // result: (Const64 [0]) + for { + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 64) { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (Lsh64x64 (Lsh64x64 x (Const64 [c])) (Const64 [d])) + // cond: !uaddOvf(c,d) + // result: (Lsh64x64 x (Const64 [c+d])) + for { + t := v.Type + if v_0.Op != OpLsh64x64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0_1.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + if !(!uaddOvf(c, d)) { + break + } + v.reset(OpLsh64x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c + d) + v.AddArg2(x, v0) + return true + } + // match: (Lsh64x64 i:(Rsh64x64 x (Const64 [c])) (Const64 [c])) + // cond: c >= 0 && c < 64 && i.Uses == 1 + // result: (And64 x (Const64 [int64(-1) << c])) + for { + i := v_0 + if i.Op != OpRsh64x64 { + break + } + _ = i.Args[1] + x := i.Args[0] + i_1 := i.Args[1] + if i_1.Op != OpConst64 { + break + } + c := auxIntToInt64(i_1.AuxInt) + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c || !(c >= 0 && c < 64 && i.Uses == 1) { + break + } + v.reset(OpAnd64) + v0 := b.NewValue0(v.Pos, OpConst64, v.Type) + v0.AuxInt = int64ToAuxInt(int64(-1) << c) + v.AddArg2(x, v0) + return true + } + // match: (Lsh64x64 i:(Rsh64Ux64 x (Const64 [c])) (Const64 [c])) + // cond: c >= 0 && c < 64 && i.Uses == 1 + // result: (And64 x (Const64 [int64(-1) << c])) + for { + i := v_0 + if i.Op != OpRsh64Ux64 { + break + } + _ = i.Args[1] + x := i.Args[0] + i_1 := i.Args[1] + if i_1.Op != OpConst64 { + break + } + c := auxIntToInt64(i_1.AuxInt) + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c || !(c >= 0 && c < 64 && i.Uses == 1) { + break + } + v.reset(OpAnd64) + v0 := b.NewValue0(v.Pos, OpConst64, v.Type) + v0.AuxInt = int64ToAuxInt(int64(-1) << c) + v.AddArg2(x, v0) + return true + } + // match: (Lsh64x64 (Rsh64Ux64 (Lsh64x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) + // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) + // result: (Lsh64x64 x (Const64 [c1-c2+c3])) + for { + if v_0.Op != OpRsh64Ux64 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpLsh64x64 { + break + } + _ = v_0_0.Args[1] + x := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpConst64 { + break + } + c1 := auxIntToInt64(v_0_0_1.AuxInt) + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 { + break + } + c2 := auxIntToInt64(v_0_1.AuxInt) + if v_1.Op != OpConst64 { + break + } + c3 := auxIntToInt64(v_1.AuxInt) + if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) { + break + } + v.reset(OpLsh64x64) + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = int64ToAuxInt(c1 - c2 + c3) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValuegeneric_OpLsh64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh64x8 x (Const8 [c])) + // result: (Lsh64x64 x (Const64 [int64(uint8(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + v.reset(OpLsh64x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint8(c))) + v.AddArg2(x, v0) + return true + } + // match: (Lsh64x8 (Const64 [0]) _) + // result: (Const64 [0]) + for { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpLsh8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh8x16 x (Const16 [c])) + // result: (Lsh8x64 x (Const64 [int64(uint16(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + v.reset(OpLsh8x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint16(c))) + v.AddArg2(x, v0) + return true + } + // match: (Lsh8x16 (Const8 [0]) _) + // result: (Const8 [0]) + for { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpLsh8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh8x32 x (Const32 [c])) + // result: (Lsh8x64 x (Const64 [int64(uint32(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpLsh8x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint32(c))) + v.AddArg2(x, v0) + return true + } + // match: (Lsh8x32 (Const8 [0]) _) + // result: (Const8 [0]) + for { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpLsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x64 (Const8 [c]) (Const64 [d])) + // result: (Const8 [c << uint64(d)]) + for { + if v_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(c << uint64(d)) + return true + } + // match: (Lsh8x64 x (Const64 [0])) + // result: x + for { + x := v_0 + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + v.copyOf(x) + return true + } + // match: (Lsh8x64 (Const8 [0]) _) + // result: (Const8 [0]) + for { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(0) + return true + } + // match: (Lsh8x64 _ (Const64 [c])) + // cond: uint64(c) >= 8 + // result: (Const8 [0]) + for { + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 8) { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(0) + return true + } + // match: (Lsh8x64 (Lsh8x64 x (Const64 [c])) (Const64 [d])) + // cond: !uaddOvf(c,d) + // result: (Lsh8x64 x (Const64 [c+d])) + for { + t := v.Type + if v_0.Op != OpLsh8x64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0_1.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + if !(!uaddOvf(c, d)) { + break + } + v.reset(OpLsh8x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c + d) + v.AddArg2(x, v0) + return true + } + // match: (Lsh8x64 i:(Rsh8x64 x (Const64 [c])) (Const64 [c])) + // cond: c >= 0 && c < 8 && i.Uses == 1 + // result: (And8 x (Const8 [int8(-1) << c])) + for { + i := v_0 + if i.Op != OpRsh8x64 { + break + } + _ = i.Args[1] + x := i.Args[0] + i_1 := i.Args[1] + if i_1.Op != OpConst64 { + break + } + c := auxIntToInt64(i_1.AuxInt) + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c || !(c >= 0 && c < 8 && i.Uses == 1) { + break + } + v.reset(OpAnd8) + v0 := b.NewValue0(v.Pos, OpConst8, v.Type) + v0.AuxInt = int8ToAuxInt(int8(-1) << c) + v.AddArg2(x, v0) + return true + } + // match: (Lsh8x64 i:(Rsh8Ux64 x (Const64 [c])) (Const64 [c])) + // cond: c >= 0 && c < 8 && i.Uses == 1 + // result: (And8 x (Const8 [int8(-1) << c])) + for { + i := v_0 + if i.Op != OpRsh8Ux64 { + break + } + _ = i.Args[1] + x := i.Args[0] + i_1 := i.Args[1] + if i_1.Op != OpConst64 { + break + } + c := auxIntToInt64(i_1.AuxInt) + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c || !(c >= 0 && c < 8 && i.Uses == 1) { + break + } + v.reset(OpAnd8) + v0 := b.NewValue0(v.Pos, OpConst8, v.Type) + v0.AuxInt = int8ToAuxInt(int8(-1) << c) + v.AddArg2(x, v0) + return true + } + // match: (Lsh8x64 (Rsh8Ux64 (Lsh8x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) + // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) + // result: (Lsh8x64 x (Const64 [c1-c2+c3])) + for { + if v_0.Op != OpRsh8Ux64 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpLsh8x64 { + break + } + _ = v_0_0.Args[1] + x := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpConst64 { + break + } + c1 := auxIntToInt64(v_0_0_1.AuxInt) + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 { + break + } + c2 := auxIntToInt64(v_0_1.AuxInt) + if v_1.Op != OpConst64 { + break + } + c3 := auxIntToInt64(v_1.AuxInt) + if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) { + break + } + v.reset(OpLsh8x64) + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = int64ToAuxInt(c1 - c2 + c3) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValuegeneric_OpLsh8x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Lsh8x8 x (Const8 [c])) + // result: (Lsh8x64 x (Const64 [int64(uint8(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + v.reset(OpLsh8x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint8(c))) + v.AddArg2(x, v0) + return true + } + // match: (Lsh8x8 (Const8 [0]) _) + // result: (Const8 [0]) + for { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpMod16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Mod16 (Const16 [c]) (Const16 [d])) + // cond: d != 0 + // result: (Const16 [c % d]) + for { + if v_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpConst16 { + break + } + d := auxIntToInt16(v_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(c % d) + return true + } + // match: (Mod16 n (Const16 [c])) + // cond: isNonNegative(n) && isPowerOfTwo16(c) + // result: (And16 n (Const16 [c-1])) + for { + t := v.Type + n := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + if !(isNonNegative(n) && isPowerOfTwo16(c)) { + break + } + v.reset(OpAnd16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(c - 1) + v.AddArg2(n, v0) + return true + } + // match: (Mod16 n (Const16 [c])) + // cond: c < 0 && c != -1<<15 + // result: (Mod16 n (Const16 [-c])) + for { + t := v.Type + n := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + if !(c < 0 && c != -1<<15) { + break + } + v.reset(OpMod16) + v.Type = t + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(-c) + v.AddArg2(n, v0) + return true + } + // match: (Mod16 x (Const16 [c])) + // cond: x.Op != OpConst16 && (c > 0 || c == -1<<15) + // result: (Sub16 x (Mul16 (Div16 x (Const16 [c])) (Const16 [c]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + if !(x.Op != OpConst16 && (c > 0 || c == -1<<15)) { + break + } + v.reset(OpSub16) + v0 := b.NewValue0(v.Pos, OpMul16, t) + v1 := b.NewValue0(v.Pos, OpDiv16, t) + v2 := b.NewValue0(v.Pos, OpConst16, t) + v2.AuxInt = int16ToAuxInt(c) + v1.AddArg2(x, v2) + v0.AddArg2(v1, v2) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValuegeneric_OpMod16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Mod16u (Const16 [c]) (Const16 [d])) + // cond: d != 0 + // result: (Const16 [int16(uint16(c) % uint16(d))]) + for { + if v_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpConst16 { + break + } + d := auxIntToInt16(v_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(int16(uint16(c) % uint16(d))) + return true + } + // match: (Mod16u n (Const16 [c])) + // cond: isPowerOfTwo16(c) + // result: (And16 n (Const16 [c-1])) + for { + t := v.Type + n := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + if !(isPowerOfTwo16(c)) { + break + } + v.reset(OpAnd16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(c - 1) + v.AddArg2(n, v0) + return true + } + // match: (Mod16u x (Const16 [c])) + // cond: x.Op != OpConst16 && c > 0 && umagicOK16(c) + // result: (Sub16 x (Mul16 (Div16u x (Const16 [c])) (Const16 [c]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + if !(x.Op != OpConst16 && c > 0 && umagicOK16(c)) { + break + } + v.reset(OpSub16) + v0 := b.NewValue0(v.Pos, OpMul16, t) + v1 := b.NewValue0(v.Pos, OpDiv16u, t) + v2 := b.NewValue0(v.Pos, OpConst16, t) + v2.AuxInt = int16ToAuxInt(c) + v1.AddArg2(x, v2) + v0.AddArg2(v1, v2) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValuegeneric_OpMod32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Mod32 (Const32 [c]) (Const32 [d])) + // cond: d != 0 + // result: (Const32 [c % d]) + for { + if v_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpConst32 { + break + } + d := auxIntToInt32(v_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(c % d) + return true + } + // match: (Mod32 n (Const32 [c])) + // cond: isNonNegative(n) && isPowerOfTwo32(c) + // result: (And32 n (Const32 [c-1])) + for { + t := v.Type + n := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(isNonNegative(n) && isPowerOfTwo32(c)) { + break + } + v.reset(OpAnd32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(c - 1) + v.AddArg2(n, v0) + return true + } + // match: (Mod32 n (Const32 [c])) + // cond: c < 0 && c != -1<<31 + // result: (Mod32 n (Const32 [-c])) + for { + t := v.Type + n := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(c < 0 && c != -1<<31) { + break + } + v.reset(OpMod32) + v.Type = t + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(-c) + v.AddArg2(n, v0) + return true + } + // match: (Mod32 x (Const32 [c])) + // cond: x.Op != OpConst32 && (c > 0 || c == -1<<31) + // result: (Sub32 x (Mul32 (Div32 x (Const32 [c])) (Const32 [c]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(x.Op != OpConst32 && (c > 0 || c == -1<<31)) { + break + } + v.reset(OpSub32) + v0 := b.NewValue0(v.Pos, OpMul32, t) + v1 := b.NewValue0(v.Pos, OpDiv32, t) + v2 := b.NewValue0(v.Pos, OpConst32, t) + v2.AuxInt = int32ToAuxInt(c) + v1.AddArg2(x, v2) + v0.AddArg2(v1, v2) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValuegeneric_OpMod32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Mod32u (Const32 [c]) (Const32 [d])) + // cond: d != 0 + // result: (Const32 [int32(uint32(c) % uint32(d))]) + for { + if v_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpConst32 { + break + } + d := auxIntToInt32(v_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(int32(uint32(c) % uint32(d))) + return true + } + // match: (Mod32u n (Const32 [c])) + // cond: isPowerOfTwo32(c) + // result: (And32 n (Const32 [c-1])) + for { + t := v.Type + n := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(isPowerOfTwo32(c)) { + break + } + v.reset(OpAnd32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(c - 1) + v.AddArg2(n, v0) + return true + } + // match: (Mod32u x (Const32 [c])) + // cond: x.Op != OpConst32 && c > 0 && umagicOK32(c) + // result: (Sub32 x (Mul32 (Div32u x (Const32 [c])) (Const32 [c]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(x.Op != OpConst32 && c > 0 && umagicOK32(c)) { + break + } + v.reset(OpSub32) + v0 := b.NewValue0(v.Pos, OpMul32, t) + v1 := b.NewValue0(v.Pos, OpDiv32u, t) + v2 := b.NewValue0(v.Pos, OpConst32, t) + v2.AuxInt = int32ToAuxInt(c) + v1.AddArg2(x, v2) + v0.AddArg2(v1, v2) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValuegeneric_OpMod64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Mod64 (Const64 [c]) (Const64 [d])) + // cond: d != 0 + // result: (Const64 [c % d]) + for { + if v_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(c % d) + return true + } + // match: (Mod64 n (Const64 [c])) + // cond: isNonNegative(n) && isPowerOfTwo64(c) + // result: (And64 n (Const64 [c-1])) + for { + t := v.Type + n := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(isNonNegative(n) && isPowerOfTwo64(c)) { + break + } + v.reset(OpAnd64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c - 1) + v.AddArg2(n, v0) + return true + } + // match: (Mod64 n (Const64 [-1<<63])) + // cond: isNonNegative(n) + // result: n + for { + n := v_0 + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != -1<<63 || !(isNonNegative(n)) { + break + } + v.copyOf(n) + return true + } + // match: (Mod64 n (Const64 [c])) + // cond: c < 0 && c != -1<<63 + // result: (Mod64 n (Const64 [-c])) + for { + t := v.Type + n := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(c < 0 && c != -1<<63) { + break + } + v.reset(OpMod64) + v.Type = t + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(-c) + v.AddArg2(n, v0) + return true + } + // match: (Mod64 x (Const64 [c])) + // cond: x.Op != OpConst64 && (c > 0 || c == -1<<63) + // result: (Sub64 x (Mul64 (Div64 x (Const64 [c])) (Const64 [c]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(x.Op != OpConst64 && (c > 0 || c == -1<<63)) { + break + } + v.reset(OpSub64) + v0 := b.NewValue0(v.Pos, OpMul64, t) + v1 := b.NewValue0(v.Pos, OpDiv64, t) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = int64ToAuxInt(c) + v1.AddArg2(x, v2) + v0.AddArg2(v1, v2) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValuegeneric_OpMod64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Mod64u (Const64 [c]) (Const64 [d])) + // cond: d != 0 + // result: (Const64 [int64(uint64(c) % uint64(d))]) + for { + if v_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(int64(uint64(c) % uint64(d))) + return true + } + // match: (Mod64u n (Const64 [c])) + // cond: isPowerOfTwo64(c) + // result: (And64 n (Const64 [c-1])) + for { + t := v.Type + n := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(isPowerOfTwo64(c)) { + break + } + v.reset(OpAnd64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c - 1) + v.AddArg2(n, v0) + return true + } + // match: (Mod64u n (Const64 [-1<<63])) + // result: (And64 n (Const64 [1<<63-1])) + for { + t := v.Type + n := v_0 + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != -1<<63 { + break + } + v.reset(OpAnd64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(1<<63 - 1) + v.AddArg2(n, v0) + return true + } + // match: (Mod64u x (Const64 [c])) + // cond: x.Op != OpConst64 && c > 0 && umagicOK64(c) + // result: (Sub64 x (Mul64 (Div64u x (Const64 [c])) (Const64 [c]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(x.Op != OpConst64 && c > 0 && umagicOK64(c)) { + break + } + v.reset(OpSub64) + v0 := b.NewValue0(v.Pos, OpMul64, t) + v1 := b.NewValue0(v.Pos, OpDiv64u, t) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = int64ToAuxInt(c) + v1.AddArg2(x, v2) + v0.AddArg2(v1, v2) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValuegeneric_OpMod8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Mod8 (Const8 [c]) (Const8 [d])) + // cond: d != 0 + // result: (Const8 [c % d]) + for { + if v_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpConst8 { + break + } + d := auxIntToInt8(v_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(c % d) + return true + } + // match: (Mod8 n (Const8 [c])) + // cond: isNonNegative(n) && isPowerOfTwo8(c) + // result: (And8 n (Const8 [c-1])) + for { + t := v.Type + n := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + if !(isNonNegative(n) && isPowerOfTwo8(c)) { + break + } + v.reset(OpAnd8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(c - 1) + v.AddArg2(n, v0) + return true + } + // match: (Mod8 n (Const8 [c])) + // cond: c < 0 && c != -1<<7 + // result: (Mod8 n (Const8 [-c])) + for { + t := v.Type + n := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + if !(c < 0 && c != -1<<7) { + break + } + v.reset(OpMod8) + v.Type = t + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(-c) + v.AddArg2(n, v0) + return true + } + // match: (Mod8 x (Const8 [c])) + // cond: x.Op != OpConst8 && (c > 0 || c == -1<<7) + // result: (Sub8 x (Mul8 (Div8 x (Const8 [c])) (Const8 [c]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + if !(x.Op != OpConst8 && (c > 0 || c == -1<<7)) { + break + } + v.reset(OpSub8) + v0 := b.NewValue0(v.Pos, OpMul8, t) + v1 := b.NewValue0(v.Pos, OpDiv8, t) + v2 := b.NewValue0(v.Pos, OpConst8, t) + v2.AuxInt = int8ToAuxInt(c) + v1.AddArg2(x, v2) + v0.AddArg2(v1, v2) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValuegeneric_OpMod8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Mod8u (Const8 [c]) (Const8 [d])) + // cond: d != 0 + // result: (Const8 [int8(uint8(c) % uint8(d))]) + for { + if v_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpConst8 { + break + } + d := auxIntToInt8(v_1.AuxInt) + if !(d != 0) { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(int8(uint8(c) % uint8(d))) + return true + } + // match: (Mod8u n (Const8 [c])) + // cond: isPowerOfTwo8(c) + // result: (And8 n (Const8 [c-1])) + for { + t := v.Type + n := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + if !(isPowerOfTwo8(c)) { + break + } + v.reset(OpAnd8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(c - 1) + v.AddArg2(n, v0) + return true + } + // match: (Mod8u x (Const8 [c])) + // cond: x.Op != OpConst8 && c > 0 && umagicOK8( c) + // result: (Sub8 x (Mul8 (Div8u x (Const8 [c])) (Const8 [c]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + if !(x.Op != OpConst8 && c > 0 && umagicOK8(c)) { + break + } + v.reset(OpSub8) + v0 := b.NewValue0(v.Pos, OpMul8, t) + v1 := b.NewValue0(v.Pos, OpDiv8u, t) + v2 := b.NewValue0(v.Pos, OpConst8, t) + v2.AuxInt = int8ToAuxInt(c) + v1.AddArg2(x, v2) + v0.AddArg2(v1, v2) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValuegeneric_OpMove(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (Move {t} [n] dst1 src mem:(Zero {t} [n] dst2 _)) + // cond: isSamePtr(src, dst2) + // result: (Zero {t} [n] dst1 mem) + for { + n := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + dst1 := v_0 + src := v_1 + mem := v_2 + if mem.Op != OpZero || auxIntToInt64(mem.AuxInt) != n || auxToType(mem.Aux) != t { + break + } + dst2 := mem.Args[0] + if !(isSamePtr(src, dst2)) { + break + } + v.reset(OpZero) + v.AuxInt = int64ToAuxInt(n) + v.Aux = typeToAux(t) + v.AddArg2(dst1, mem) + return true + } + // match: (Move {t} [n] dst1 src mem:(VarDef (Zero {t} [n] dst0 _))) + // cond: isSamePtr(src, dst0) + // result: (Zero {t} [n] dst1 mem) + for { + n := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + dst1 := v_0 + src := v_1 + mem := v_2 + if mem.Op != OpVarDef { + break + } + mem_0 := mem.Args[0] + if mem_0.Op != OpZero || auxIntToInt64(mem_0.AuxInt) != n || auxToType(mem_0.Aux) != t { + break + } + dst0 := mem_0.Args[0] + if !(isSamePtr(src, dst0)) { + break + } + v.reset(OpZero) + v.AuxInt = int64ToAuxInt(n) + v.Aux = typeToAux(t) + v.AddArg2(dst1, mem) + return true + } + // match: (Move {t} [n] dst (Addr {sym} (SB)) mem) + // cond: symIsROZero(sym) + // result: (Zero {t} [n] dst mem) + for { + n := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + dst := v_0 + if v_1.Op != OpAddr { + break + } + sym := auxToSym(v_1.Aux) + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpSB { + break + } + mem := v_2 + if !(symIsROZero(sym)) { + break + } + v.reset(OpZero) + v.AuxInt = int64ToAuxInt(n) + v.Aux = typeToAux(t) + v.AddArg2(dst, mem) + return true + } + // match: (Move {t1} [n] dst1 src1 store:(Store {t2} op:(OffPtr [o2] dst2) _ mem)) + // cond: isSamePtr(dst1, dst2) && store.Uses == 1 && n >= o2 + t2.Size() && disjoint(src1, n, op, t2.Size()) && clobber(store) + // result: (Move {t1} [n] dst1 src1 mem) + for { + n := auxIntToInt64(v.AuxInt) + t1 := auxToType(v.Aux) + dst1 := v_0 + src1 := v_1 + store := v_2 + if store.Op != OpStore { + break + } + t2 := auxToType(store.Aux) + mem := store.Args[2] + op := store.Args[0] + if op.Op != OpOffPtr { + break + } + o2 := auxIntToInt64(op.AuxInt) + dst2 := op.Args[0] + if !(isSamePtr(dst1, dst2) && store.Uses == 1 && n >= o2+t2.Size() && disjoint(src1, n, op, t2.Size()) && clobber(store)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(n) + v.Aux = typeToAux(t1) + v.AddArg3(dst1, src1, mem) + return true + } + // match: (Move {t} [n] dst1 src1 move:(Move {t} [n] dst2 _ mem)) + // cond: move.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(move) + // result: (Move {t} [n] dst1 src1 mem) + for { + n := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + dst1 := v_0 + src1 := v_1 + move := v_2 + if move.Op != OpMove || auxIntToInt64(move.AuxInt) != n || auxToType(move.Aux) != t { + break + } + mem := move.Args[2] + dst2 := move.Args[0] + if !(move.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(move)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(n) + v.Aux = typeToAux(t) + v.AddArg3(dst1, src1, mem) + return true + } + // match: (Move {t} [n] dst1 src1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem))) + // cond: move.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(move, vardef) + // result: (Move {t} [n] dst1 src1 (VarDef {x} mem)) + for { + n := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + dst1 := v_0 + src1 := v_1 + vardef := v_2 + if vardef.Op != OpVarDef { + break + } + x := auxToSym(vardef.Aux) + move := vardef.Args[0] + if move.Op != OpMove || auxIntToInt64(move.AuxInt) != n || auxToType(move.Aux) != t { + break + } + mem := move.Args[2] + dst2 := move.Args[0] + if !(move.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(move, vardef)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(n) + v.Aux = typeToAux(t) + v0 := b.NewValue0(v.Pos, OpVarDef, types.TypeMem) + v0.Aux = symToAux(x) + v0.AddArg(mem) + v.AddArg3(dst1, src1, v0) + return true + } + // match: (Move {t} [n] dst1 src1 zero:(Zero {t} [n] dst2 mem)) + // cond: zero.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(zero) + // result: (Move {t} [n] dst1 src1 mem) + for { + n := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + dst1 := v_0 + src1 := v_1 + zero := v_2 + if zero.Op != OpZero || auxIntToInt64(zero.AuxInt) != n || auxToType(zero.Aux) != t { + break + } + mem := zero.Args[1] + dst2 := zero.Args[0] + if !(zero.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(zero)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(n) + v.Aux = typeToAux(t) + v.AddArg3(dst1, src1, mem) + return true + } + // match: (Move {t} [n] dst1 src1 vardef:(VarDef {x} zero:(Zero {t} [n] dst2 mem))) + // cond: zero.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(zero, vardef) + // result: (Move {t} [n] dst1 src1 (VarDef {x} mem)) + for { + n := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + dst1 := v_0 + src1 := v_1 + vardef := v_2 + if vardef.Op != OpVarDef { + break + } + x := auxToSym(vardef.Aux) + zero := vardef.Args[0] + if zero.Op != OpZero || auxIntToInt64(zero.AuxInt) != n || auxToType(zero.Aux) != t { + break + } + mem := zero.Args[1] + dst2 := zero.Args[0] + if !(zero.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(zero, vardef)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(n) + v.Aux = typeToAux(t) + v0 := b.NewValue0(v.Pos, OpVarDef, types.TypeMem) + v0.Aux = symToAux(x) + v0.AddArg(mem) + v.AddArg3(dst1, src1, v0) + return true + } + // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [0] p3) d2 _))) + // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && o2 == t3.Size() && n == t2.Size() + t3.Size() + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [0] dst) d2 mem)) + for { + n := auxIntToInt64(v.AuxInt) + t1 := auxToType(v.Aux) + dst := v_0 + p1 := v_1 + mem := v_2 + if mem.Op != OpStore { + break + } + t2 := auxToType(mem.Aux) + _ = mem.Args[2] + op2 := mem.Args[0] + if op2.Op != OpOffPtr { + break + } + tt2 := op2.Type + o2 := auxIntToInt64(op2.AuxInt) + p2 := op2.Args[0] + d1 := mem.Args[1] + mem_2 := mem.Args[2] + if mem_2.Op != OpStore { + break + } + t3 := auxToType(mem_2.Aux) + d2 := mem_2.Args[1] + op3 := mem_2.Args[0] + if op3.Op != OpOffPtr { + break + } + tt3 := op3.Type + if auxIntToInt64(op3.AuxInt) != 0 { + break + } + p3 := op3.Args[0] + if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && o2 == t3.Size() && n == t2.Size()+t3.Size()) { + break + } + v.reset(OpStore) + v.Aux = typeToAux(t2) + v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) + v0.AuxInt = int64ToAuxInt(o2) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(t3) + v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) + v2.AuxInt = int64ToAuxInt(0) + v2.AddArg(dst) + v1.AddArg3(v2, d2, mem) + v.AddArg3(v0, d1, v1) + return true + } + // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [o3] p3) d2 (Store {t4} op4:(OffPtr [0] p4) d3 _)))) + // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size() + t3.Size() + t4.Size() + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Store {t4} (OffPtr [0] dst) d3 mem))) + for { + n := auxIntToInt64(v.AuxInt) + t1 := auxToType(v.Aux) + dst := v_0 + p1 := v_1 + mem := v_2 + if mem.Op != OpStore { + break + } + t2 := auxToType(mem.Aux) + _ = mem.Args[2] + op2 := mem.Args[0] + if op2.Op != OpOffPtr { + break + } + tt2 := op2.Type + o2 := auxIntToInt64(op2.AuxInt) + p2 := op2.Args[0] + d1 := mem.Args[1] + mem_2 := mem.Args[2] + if mem_2.Op != OpStore { + break + } + t3 := auxToType(mem_2.Aux) + _ = mem_2.Args[2] + op3 := mem_2.Args[0] + if op3.Op != OpOffPtr { + break + } + tt3 := op3.Type + o3 := auxIntToInt64(op3.AuxInt) + p3 := op3.Args[0] + d2 := mem_2.Args[1] + mem_2_2 := mem_2.Args[2] + if mem_2_2.Op != OpStore { + break + } + t4 := auxToType(mem_2_2.Aux) + d3 := mem_2_2.Args[1] + op4 := mem_2_2.Args[0] + if op4.Op != OpOffPtr { + break + } + tt4 := op4.Type + if auxIntToInt64(op4.AuxInt) != 0 { + break + } + p4 := op4.Args[0] + if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size()+t3.Size()+t4.Size()) { + break + } + v.reset(OpStore) + v.Aux = typeToAux(t2) + v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) + v0.AuxInt = int64ToAuxInt(o2) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(t3) + v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) + v2.AuxInt = int64ToAuxInt(o3) + v2.AddArg(dst) + v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v3.Aux = typeToAux(t4) + v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) + v4.AuxInt = int64ToAuxInt(0) + v4.AddArg(dst) + v3.AddArg3(v4, d3, mem) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) + return true + } + // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [o3] p3) d2 (Store {t4} op4:(OffPtr [o4] p4) d3 (Store {t5} op5:(OffPtr [0] p5) d4 _))))) + // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == t5.Size() && o3-o4 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size() + t3.Size() + t4.Size() + t5.Size() + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Store {t4} (OffPtr [o4] dst) d3 (Store {t5} (OffPtr [0] dst) d4 mem)))) + for { + n := auxIntToInt64(v.AuxInt) + t1 := auxToType(v.Aux) + dst := v_0 + p1 := v_1 + mem := v_2 + if mem.Op != OpStore { + break + } + t2 := auxToType(mem.Aux) + _ = mem.Args[2] + op2 := mem.Args[0] + if op2.Op != OpOffPtr { + break + } + tt2 := op2.Type + o2 := auxIntToInt64(op2.AuxInt) + p2 := op2.Args[0] + d1 := mem.Args[1] + mem_2 := mem.Args[2] + if mem_2.Op != OpStore { + break + } + t3 := auxToType(mem_2.Aux) + _ = mem_2.Args[2] + op3 := mem_2.Args[0] + if op3.Op != OpOffPtr { + break + } + tt3 := op3.Type + o3 := auxIntToInt64(op3.AuxInt) + p3 := op3.Args[0] + d2 := mem_2.Args[1] + mem_2_2 := mem_2.Args[2] + if mem_2_2.Op != OpStore { + break + } + t4 := auxToType(mem_2_2.Aux) + _ = mem_2_2.Args[2] + op4 := mem_2_2.Args[0] + if op4.Op != OpOffPtr { + break + } + tt4 := op4.Type + o4 := auxIntToInt64(op4.AuxInt) + p4 := op4.Args[0] + d3 := mem_2_2.Args[1] + mem_2_2_2 := mem_2_2.Args[2] + if mem_2_2_2.Op != OpStore { + break + } + t5 := auxToType(mem_2_2_2.Aux) + d4 := mem_2_2_2.Args[1] + op5 := mem_2_2_2.Args[0] + if op5.Op != OpOffPtr { + break + } + tt5 := op5.Type + if auxIntToInt64(op5.AuxInt) != 0 { + break + } + p5 := op5.Args[0] + if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == t5.Size() && o3-o4 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size()+t3.Size()+t4.Size()+t5.Size()) { + break + } + v.reset(OpStore) + v.Aux = typeToAux(t2) + v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) + v0.AuxInt = int64ToAuxInt(o2) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(t3) + v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) + v2.AuxInt = int64ToAuxInt(o3) + v2.AddArg(dst) + v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v3.Aux = typeToAux(t4) + v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) + v4.AuxInt = int64ToAuxInt(o4) + v4.AddArg(dst) + v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v5.Aux = typeToAux(t5) + v6 := b.NewValue0(v.Pos, OpOffPtr, tt5) + v6.AuxInt = int64ToAuxInt(0) + v6.AddArg(dst) + v5.AddArg3(v6, d4, mem) + v3.AddArg3(v4, d3, v5) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) + return true + } + // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [0] p3) d2 _)))) + // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && o2 == t3.Size() && n == t2.Size() + t3.Size() + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [0] dst) d2 mem)) + for { + n := auxIntToInt64(v.AuxInt) + t1 := auxToType(v.Aux) + dst := v_0 + p1 := v_1 + mem := v_2 + if mem.Op != OpVarDef { + break + } + mem_0 := mem.Args[0] + if mem_0.Op != OpStore { + break + } + t2 := auxToType(mem_0.Aux) + _ = mem_0.Args[2] + op2 := mem_0.Args[0] + if op2.Op != OpOffPtr { + break + } + tt2 := op2.Type + o2 := auxIntToInt64(op2.AuxInt) + p2 := op2.Args[0] + d1 := mem_0.Args[1] + mem_0_2 := mem_0.Args[2] + if mem_0_2.Op != OpStore { + break + } + t3 := auxToType(mem_0_2.Aux) + d2 := mem_0_2.Args[1] + op3 := mem_0_2.Args[0] + if op3.Op != OpOffPtr { + break + } + tt3 := op3.Type + if auxIntToInt64(op3.AuxInt) != 0 { + break + } + p3 := op3.Args[0] + if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && o2 == t3.Size() && n == t2.Size()+t3.Size()) { + break + } + v.reset(OpStore) + v.Aux = typeToAux(t2) + v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) + v0.AuxInt = int64ToAuxInt(o2) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(t3) + v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) + v2.AuxInt = int64ToAuxInt(0) + v2.AddArg(dst) + v1.AddArg3(v2, d2, mem) + v.AddArg3(v0, d1, v1) + return true + } + // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [o3] p3) d2 (Store {t4} op4:(OffPtr [0] p4) d3 _))))) + // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size() + t3.Size() + t4.Size() + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Store {t4} (OffPtr [0] dst) d3 mem))) + for { + n := auxIntToInt64(v.AuxInt) + t1 := auxToType(v.Aux) + dst := v_0 + p1 := v_1 + mem := v_2 + if mem.Op != OpVarDef { + break + } + mem_0 := mem.Args[0] + if mem_0.Op != OpStore { + break + } + t2 := auxToType(mem_0.Aux) + _ = mem_0.Args[2] + op2 := mem_0.Args[0] + if op2.Op != OpOffPtr { + break + } + tt2 := op2.Type + o2 := auxIntToInt64(op2.AuxInt) + p2 := op2.Args[0] + d1 := mem_0.Args[1] + mem_0_2 := mem_0.Args[2] + if mem_0_2.Op != OpStore { + break + } + t3 := auxToType(mem_0_2.Aux) + _ = mem_0_2.Args[2] + op3 := mem_0_2.Args[0] + if op3.Op != OpOffPtr { + break + } + tt3 := op3.Type + o3 := auxIntToInt64(op3.AuxInt) + p3 := op3.Args[0] + d2 := mem_0_2.Args[1] + mem_0_2_2 := mem_0_2.Args[2] + if mem_0_2_2.Op != OpStore { + break + } + t4 := auxToType(mem_0_2_2.Aux) + d3 := mem_0_2_2.Args[1] + op4 := mem_0_2_2.Args[0] + if op4.Op != OpOffPtr { + break + } + tt4 := op4.Type + if auxIntToInt64(op4.AuxInt) != 0 { + break + } + p4 := op4.Args[0] + if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size()+t3.Size()+t4.Size()) { + break + } + v.reset(OpStore) + v.Aux = typeToAux(t2) + v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) + v0.AuxInt = int64ToAuxInt(o2) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(t3) + v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) + v2.AuxInt = int64ToAuxInt(o3) + v2.AddArg(dst) + v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v3.Aux = typeToAux(t4) + v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) + v4.AuxInt = int64ToAuxInt(0) + v4.AddArg(dst) + v3.AddArg3(v4, d3, mem) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) + return true + } + // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [o3] p3) d2 (Store {t4} op4:(OffPtr [o4] p4) d3 (Store {t5} op5:(OffPtr [0] p5) d4 _)))))) + // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == t5.Size() && o3-o4 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size() + t3.Size() + t4.Size() + t5.Size() + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Store {t4} (OffPtr [o4] dst) d3 (Store {t5} (OffPtr [0] dst) d4 mem)))) + for { + n := auxIntToInt64(v.AuxInt) + t1 := auxToType(v.Aux) + dst := v_0 + p1 := v_1 + mem := v_2 + if mem.Op != OpVarDef { + break + } + mem_0 := mem.Args[0] + if mem_0.Op != OpStore { + break + } + t2 := auxToType(mem_0.Aux) + _ = mem_0.Args[2] + op2 := mem_0.Args[0] + if op2.Op != OpOffPtr { + break + } + tt2 := op2.Type + o2 := auxIntToInt64(op2.AuxInt) + p2 := op2.Args[0] + d1 := mem_0.Args[1] + mem_0_2 := mem_0.Args[2] + if mem_0_2.Op != OpStore { + break + } + t3 := auxToType(mem_0_2.Aux) + _ = mem_0_2.Args[2] + op3 := mem_0_2.Args[0] + if op3.Op != OpOffPtr { + break + } + tt3 := op3.Type + o3 := auxIntToInt64(op3.AuxInt) + p3 := op3.Args[0] + d2 := mem_0_2.Args[1] + mem_0_2_2 := mem_0_2.Args[2] + if mem_0_2_2.Op != OpStore { + break + } + t4 := auxToType(mem_0_2_2.Aux) + _ = mem_0_2_2.Args[2] + op4 := mem_0_2_2.Args[0] + if op4.Op != OpOffPtr { + break + } + tt4 := op4.Type + o4 := auxIntToInt64(op4.AuxInt) + p4 := op4.Args[0] + d3 := mem_0_2_2.Args[1] + mem_0_2_2_2 := mem_0_2_2.Args[2] + if mem_0_2_2_2.Op != OpStore { + break + } + t5 := auxToType(mem_0_2_2_2.Aux) + d4 := mem_0_2_2_2.Args[1] + op5 := mem_0_2_2_2.Args[0] + if op5.Op != OpOffPtr { + break + } + tt5 := op5.Type + if auxIntToInt64(op5.AuxInt) != 0 { + break + } + p5 := op5.Args[0] + if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == t5.Size() && o3-o4 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size()+t3.Size()+t4.Size()+t5.Size()) { + break + } + v.reset(OpStore) + v.Aux = typeToAux(t2) + v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) + v0.AuxInt = int64ToAuxInt(o2) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(t3) + v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) + v2.AuxInt = int64ToAuxInt(o3) + v2.AddArg(dst) + v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v3.Aux = typeToAux(t4) + v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) + v4.AuxInt = int64ToAuxInt(o4) + v4.AddArg(dst) + v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v5.Aux = typeToAux(t5) + v6 := b.NewValue0(v.Pos, OpOffPtr, tt5) + v6.AuxInt = int64ToAuxInt(0) + v6.AddArg(dst) + v5.AddArg3(v6, d4, mem) + v3.AddArg3(v4, d3, v5) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) + return true + } + // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr [o2] p2) d1 (Zero {t3} [n] p3 _))) + // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && n >= o2 + t2.Size() + // result: (Store {t2} (OffPtr [o2] dst) d1 (Zero {t1} [n] dst mem)) + for { + n := auxIntToInt64(v.AuxInt) + t1 := auxToType(v.Aux) + dst := v_0 + p1 := v_1 + mem := v_2 + if mem.Op != OpStore { + break + } + t2 := auxToType(mem.Aux) + _ = mem.Args[2] + op2 := mem.Args[0] + if op2.Op != OpOffPtr { + break + } + tt2 := op2.Type + o2 := auxIntToInt64(op2.AuxInt) + p2 := op2.Args[0] + d1 := mem.Args[1] + mem_2 := mem.Args[2] + if mem_2.Op != OpZero || auxIntToInt64(mem_2.AuxInt) != n { + break + } + t3 := auxToType(mem_2.Aux) + p3 := mem_2.Args[0] + if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && n >= o2+t2.Size()) { + break + } + v.reset(OpStore) + v.Aux = typeToAux(t2) + v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) + v0.AuxInt = int64ToAuxInt(o2) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem) + v1.AuxInt = int64ToAuxInt(n) + v1.Aux = typeToAux(t1) + v1.AddArg2(dst, mem) + v.AddArg3(v0, d1, v1) + return true + } + // match: (Move {t1} [n] dst p1 mem:(Store {t2} (OffPtr [o2] p2) d1 (Store {t3} (OffPtr [o3] p3) d2 (Zero {t4} [n] p4 _)))) + // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && n >= o2 + t2.Size() && n >= o3 + t3.Size() + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Zero {t1} [n] dst mem))) + for { + n := auxIntToInt64(v.AuxInt) + t1 := auxToType(v.Aux) + dst := v_0 + p1 := v_1 + mem := v_2 + if mem.Op != OpStore { + break + } + t2 := auxToType(mem.Aux) + _ = mem.Args[2] + mem_0 := mem.Args[0] + if mem_0.Op != OpOffPtr { + break + } + tt2 := mem_0.Type + o2 := auxIntToInt64(mem_0.AuxInt) + p2 := mem_0.Args[0] + d1 := mem.Args[1] + mem_2 := mem.Args[2] + if mem_2.Op != OpStore { + break + } + t3 := auxToType(mem_2.Aux) + _ = mem_2.Args[2] + mem_2_0 := mem_2.Args[0] + if mem_2_0.Op != OpOffPtr { + break + } + tt3 := mem_2_0.Type + o3 := auxIntToInt64(mem_2_0.AuxInt) + p3 := mem_2_0.Args[0] + d2 := mem_2.Args[1] + mem_2_2 := mem_2.Args[2] + if mem_2_2.Op != OpZero || auxIntToInt64(mem_2_2.AuxInt) != n { + break + } + t4 := auxToType(mem_2_2.Aux) + p4 := mem_2_2.Args[0] + if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && n >= o2+t2.Size() && n >= o3+t3.Size()) { + break + } + v.reset(OpStore) + v.Aux = typeToAux(t2) + v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) + v0.AuxInt = int64ToAuxInt(o2) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(t3) + v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) + v2.AuxInt = int64ToAuxInt(o3) + v2.AddArg(dst) + v3 := b.NewValue0(v.Pos, OpZero, types.TypeMem) + v3.AuxInt = int64ToAuxInt(n) + v3.Aux = typeToAux(t1) + v3.AddArg2(dst, mem) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) + return true + } + // match: (Move {t1} [n] dst p1 mem:(Store {t2} (OffPtr [o2] p2) d1 (Store {t3} (OffPtr [o3] p3) d2 (Store {t4} (OffPtr [o4] p4) d3 (Zero {t5} [n] p5 _))))) + // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && n >= o2 + t2.Size() && n >= o3 + t3.Size() && n >= o4 + t4.Size() + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Store {t4} (OffPtr [o4] dst) d3 (Zero {t1} [n] dst mem)))) + for { + n := auxIntToInt64(v.AuxInt) + t1 := auxToType(v.Aux) + dst := v_0 + p1 := v_1 + mem := v_2 + if mem.Op != OpStore { + break + } + t2 := auxToType(mem.Aux) + _ = mem.Args[2] + mem_0 := mem.Args[0] + if mem_0.Op != OpOffPtr { + break + } + tt2 := mem_0.Type + o2 := auxIntToInt64(mem_0.AuxInt) + p2 := mem_0.Args[0] + d1 := mem.Args[1] + mem_2 := mem.Args[2] + if mem_2.Op != OpStore { + break + } + t3 := auxToType(mem_2.Aux) + _ = mem_2.Args[2] + mem_2_0 := mem_2.Args[0] + if mem_2_0.Op != OpOffPtr { + break + } + tt3 := mem_2_0.Type + o3 := auxIntToInt64(mem_2_0.AuxInt) + p3 := mem_2_0.Args[0] + d2 := mem_2.Args[1] + mem_2_2 := mem_2.Args[2] + if mem_2_2.Op != OpStore { + break + } + t4 := auxToType(mem_2_2.Aux) + _ = mem_2_2.Args[2] + mem_2_2_0 := mem_2_2.Args[0] + if mem_2_2_0.Op != OpOffPtr { + break + } + tt4 := mem_2_2_0.Type + o4 := auxIntToInt64(mem_2_2_0.AuxInt) + p4 := mem_2_2_0.Args[0] + d3 := mem_2_2.Args[1] + mem_2_2_2 := mem_2_2.Args[2] + if mem_2_2_2.Op != OpZero || auxIntToInt64(mem_2_2_2.AuxInt) != n { + break + } + t5 := auxToType(mem_2_2_2.Aux) + p5 := mem_2_2_2.Args[0] + if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && n >= o2+t2.Size() && n >= o3+t3.Size() && n >= o4+t4.Size()) { + break + } + v.reset(OpStore) + v.Aux = typeToAux(t2) + v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) + v0.AuxInt = int64ToAuxInt(o2) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(t3) + v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) + v2.AuxInt = int64ToAuxInt(o3) + v2.AddArg(dst) + v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v3.Aux = typeToAux(t4) + v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) + v4.AuxInt = int64ToAuxInt(o4) + v4.AddArg(dst) + v5 := b.NewValue0(v.Pos, OpZero, types.TypeMem) + v5.AuxInt = int64ToAuxInt(n) + v5.Aux = typeToAux(t1) + v5.AddArg2(dst, mem) + v3.AddArg3(v4, d3, v5) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) + return true + } + // match: (Move {t1} [n] dst p1 mem:(Store {t2} (OffPtr [o2] p2) d1 (Store {t3} (OffPtr [o3] p3) d2 (Store {t4} (OffPtr [o4] p4) d3 (Store {t5} (OffPtr [o5] p5) d4 (Zero {t6} [n] p6 _)))))) + // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && t6.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && n >= o2 + t2.Size() && n >= o3 + t3.Size() && n >= o4 + t4.Size() && n >= o5 + t5.Size() + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Store {t4} (OffPtr [o4] dst) d3 (Store {t5} (OffPtr [o5] dst) d4 (Zero {t1} [n] dst mem))))) + for { + n := auxIntToInt64(v.AuxInt) + t1 := auxToType(v.Aux) + dst := v_0 + p1 := v_1 + mem := v_2 + if mem.Op != OpStore { + break + } + t2 := auxToType(mem.Aux) + _ = mem.Args[2] + mem_0 := mem.Args[0] + if mem_0.Op != OpOffPtr { + break + } + tt2 := mem_0.Type + o2 := auxIntToInt64(mem_0.AuxInt) + p2 := mem_0.Args[0] + d1 := mem.Args[1] + mem_2 := mem.Args[2] + if mem_2.Op != OpStore { + break + } + t3 := auxToType(mem_2.Aux) + _ = mem_2.Args[2] + mem_2_0 := mem_2.Args[0] + if mem_2_0.Op != OpOffPtr { + break + } + tt3 := mem_2_0.Type + o3 := auxIntToInt64(mem_2_0.AuxInt) + p3 := mem_2_0.Args[0] + d2 := mem_2.Args[1] + mem_2_2 := mem_2.Args[2] + if mem_2_2.Op != OpStore { + break + } + t4 := auxToType(mem_2_2.Aux) + _ = mem_2_2.Args[2] + mem_2_2_0 := mem_2_2.Args[0] + if mem_2_2_0.Op != OpOffPtr { + break + } + tt4 := mem_2_2_0.Type + o4 := auxIntToInt64(mem_2_2_0.AuxInt) + p4 := mem_2_2_0.Args[0] + d3 := mem_2_2.Args[1] + mem_2_2_2 := mem_2_2.Args[2] + if mem_2_2_2.Op != OpStore { + break + } + t5 := auxToType(mem_2_2_2.Aux) + _ = mem_2_2_2.Args[2] + mem_2_2_2_0 := mem_2_2_2.Args[0] + if mem_2_2_2_0.Op != OpOffPtr { + break + } + tt5 := mem_2_2_2_0.Type + o5 := auxIntToInt64(mem_2_2_2_0.AuxInt) + p5 := mem_2_2_2_0.Args[0] + d4 := mem_2_2_2.Args[1] + mem_2_2_2_2 := mem_2_2_2.Args[2] + if mem_2_2_2_2.Op != OpZero || auxIntToInt64(mem_2_2_2_2.AuxInt) != n { + break + } + t6 := auxToType(mem_2_2_2_2.Aux) + p6 := mem_2_2_2_2.Args[0] + if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && t6.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && n >= o2+t2.Size() && n >= o3+t3.Size() && n >= o4+t4.Size() && n >= o5+t5.Size()) { + break + } + v.reset(OpStore) + v.Aux = typeToAux(t2) + v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) + v0.AuxInt = int64ToAuxInt(o2) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(t3) + v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) + v2.AuxInt = int64ToAuxInt(o3) + v2.AddArg(dst) + v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v3.Aux = typeToAux(t4) + v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) + v4.AuxInt = int64ToAuxInt(o4) + v4.AddArg(dst) + v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v5.Aux = typeToAux(t5) + v6 := b.NewValue0(v.Pos, OpOffPtr, tt5) + v6.AuxInt = int64ToAuxInt(o5) + v6.AddArg(dst) + v7 := b.NewValue0(v.Pos, OpZero, types.TypeMem) + v7.AuxInt = int64ToAuxInt(n) + v7.Aux = typeToAux(t1) + v7.AddArg2(dst, mem) + v5.AddArg3(v6, d4, v7) + v3.AddArg3(v4, d3, v5) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) + return true + } + // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr [o2] p2) d1 (Zero {t3} [n] p3 _)))) + // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && n >= o2 + t2.Size() + // result: (Store {t2} (OffPtr [o2] dst) d1 (Zero {t1} [n] dst mem)) + for { + n := auxIntToInt64(v.AuxInt) + t1 := auxToType(v.Aux) + dst := v_0 + p1 := v_1 + mem := v_2 + if mem.Op != OpVarDef { + break + } + mem_0 := mem.Args[0] + if mem_0.Op != OpStore { + break + } + t2 := auxToType(mem_0.Aux) + _ = mem_0.Args[2] + op2 := mem_0.Args[0] + if op2.Op != OpOffPtr { + break + } + tt2 := op2.Type + o2 := auxIntToInt64(op2.AuxInt) + p2 := op2.Args[0] + d1 := mem_0.Args[1] + mem_0_2 := mem_0.Args[2] + if mem_0_2.Op != OpZero || auxIntToInt64(mem_0_2.AuxInt) != n { + break + } + t3 := auxToType(mem_0_2.Aux) + p3 := mem_0_2.Args[0] + if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && n >= o2+t2.Size()) { + break + } + v.reset(OpStore) + v.Aux = typeToAux(t2) + v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) + v0.AuxInt = int64ToAuxInt(o2) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem) + v1.AuxInt = int64ToAuxInt(n) + v1.Aux = typeToAux(t1) + v1.AddArg2(dst, mem) + v.AddArg3(v0, d1, v1) + return true + } + // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} (OffPtr [o2] p2) d1 (Store {t3} (OffPtr [o3] p3) d2 (Zero {t4} [n] p4 _))))) + // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && n >= o2 + t2.Size() && n >= o3 + t3.Size() + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Zero {t1} [n] dst mem))) + for { + n := auxIntToInt64(v.AuxInt) + t1 := auxToType(v.Aux) + dst := v_0 + p1 := v_1 + mem := v_2 + if mem.Op != OpVarDef { + break + } + mem_0 := mem.Args[0] + if mem_0.Op != OpStore { + break + } + t2 := auxToType(mem_0.Aux) + _ = mem_0.Args[2] + mem_0_0 := mem_0.Args[0] + if mem_0_0.Op != OpOffPtr { + break + } + tt2 := mem_0_0.Type + o2 := auxIntToInt64(mem_0_0.AuxInt) + p2 := mem_0_0.Args[0] + d1 := mem_0.Args[1] + mem_0_2 := mem_0.Args[2] + if mem_0_2.Op != OpStore { + break + } + t3 := auxToType(mem_0_2.Aux) + _ = mem_0_2.Args[2] + mem_0_2_0 := mem_0_2.Args[0] + if mem_0_2_0.Op != OpOffPtr { + break + } + tt3 := mem_0_2_0.Type + o3 := auxIntToInt64(mem_0_2_0.AuxInt) + p3 := mem_0_2_0.Args[0] + d2 := mem_0_2.Args[1] + mem_0_2_2 := mem_0_2.Args[2] + if mem_0_2_2.Op != OpZero || auxIntToInt64(mem_0_2_2.AuxInt) != n { + break + } + t4 := auxToType(mem_0_2_2.Aux) + p4 := mem_0_2_2.Args[0] + if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && n >= o2+t2.Size() && n >= o3+t3.Size()) { + break + } + v.reset(OpStore) + v.Aux = typeToAux(t2) + v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) + v0.AuxInt = int64ToAuxInt(o2) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(t3) + v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) + v2.AuxInt = int64ToAuxInt(o3) + v2.AddArg(dst) + v3 := b.NewValue0(v.Pos, OpZero, types.TypeMem) + v3.AuxInt = int64ToAuxInt(n) + v3.Aux = typeToAux(t1) + v3.AddArg2(dst, mem) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) + return true + } + // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} (OffPtr [o2] p2) d1 (Store {t3} (OffPtr [o3] p3) d2 (Store {t4} (OffPtr [o4] p4) d3 (Zero {t5} [n] p5 _)))))) + // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && n >= o2 + t2.Size() && n >= o3 + t3.Size() && n >= o4 + t4.Size() + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Store {t4} (OffPtr [o4] dst) d3 (Zero {t1} [n] dst mem)))) + for { + n := auxIntToInt64(v.AuxInt) + t1 := auxToType(v.Aux) + dst := v_0 + p1 := v_1 + mem := v_2 + if mem.Op != OpVarDef { + break + } + mem_0 := mem.Args[0] + if mem_0.Op != OpStore { + break + } + t2 := auxToType(mem_0.Aux) + _ = mem_0.Args[2] + mem_0_0 := mem_0.Args[0] + if mem_0_0.Op != OpOffPtr { + break + } + tt2 := mem_0_0.Type + o2 := auxIntToInt64(mem_0_0.AuxInt) + p2 := mem_0_0.Args[0] + d1 := mem_0.Args[1] + mem_0_2 := mem_0.Args[2] + if mem_0_2.Op != OpStore { + break + } + t3 := auxToType(mem_0_2.Aux) + _ = mem_0_2.Args[2] + mem_0_2_0 := mem_0_2.Args[0] + if mem_0_2_0.Op != OpOffPtr { + break + } + tt3 := mem_0_2_0.Type + o3 := auxIntToInt64(mem_0_2_0.AuxInt) + p3 := mem_0_2_0.Args[0] + d2 := mem_0_2.Args[1] + mem_0_2_2 := mem_0_2.Args[2] + if mem_0_2_2.Op != OpStore { + break + } + t4 := auxToType(mem_0_2_2.Aux) + _ = mem_0_2_2.Args[2] + mem_0_2_2_0 := mem_0_2_2.Args[0] + if mem_0_2_2_0.Op != OpOffPtr { + break + } + tt4 := mem_0_2_2_0.Type + o4 := auxIntToInt64(mem_0_2_2_0.AuxInt) + p4 := mem_0_2_2_0.Args[0] + d3 := mem_0_2_2.Args[1] + mem_0_2_2_2 := mem_0_2_2.Args[2] + if mem_0_2_2_2.Op != OpZero || auxIntToInt64(mem_0_2_2_2.AuxInt) != n { + break + } + t5 := auxToType(mem_0_2_2_2.Aux) + p5 := mem_0_2_2_2.Args[0] + if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && n >= o2+t2.Size() && n >= o3+t3.Size() && n >= o4+t4.Size()) { + break + } + v.reset(OpStore) + v.Aux = typeToAux(t2) + v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) + v0.AuxInt = int64ToAuxInt(o2) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(t3) + v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) + v2.AuxInt = int64ToAuxInt(o3) + v2.AddArg(dst) + v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v3.Aux = typeToAux(t4) + v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) + v4.AuxInt = int64ToAuxInt(o4) + v4.AddArg(dst) + v5 := b.NewValue0(v.Pos, OpZero, types.TypeMem) + v5.AuxInt = int64ToAuxInt(n) + v5.Aux = typeToAux(t1) + v5.AddArg2(dst, mem) + v3.AddArg3(v4, d3, v5) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) + return true + } + // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} (OffPtr [o2] p2) d1 (Store {t3} (OffPtr [o3] p3) d2 (Store {t4} (OffPtr [o4] p4) d3 (Store {t5} (OffPtr [o5] p5) d4 (Zero {t6} [n] p6 _))))))) + // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && t6.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && n >= o2 + t2.Size() && n >= o3 + t3.Size() && n >= o4 + t4.Size() && n >= o5 + t5.Size() + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Store {t4} (OffPtr [o4] dst) d3 (Store {t5} (OffPtr [o5] dst) d4 (Zero {t1} [n] dst mem))))) + for { + n := auxIntToInt64(v.AuxInt) + t1 := auxToType(v.Aux) + dst := v_0 + p1 := v_1 + mem := v_2 + if mem.Op != OpVarDef { + break + } + mem_0 := mem.Args[0] + if mem_0.Op != OpStore { + break + } + t2 := auxToType(mem_0.Aux) + _ = mem_0.Args[2] + mem_0_0 := mem_0.Args[0] + if mem_0_0.Op != OpOffPtr { + break + } + tt2 := mem_0_0.Type + o2 := auxIntToInt64(mem_0_0.AuxInt) + p2 := mem_0_0.Args[0] + d1 := mem_0.Args[1] + mem_0_2 := mem_0.Args[2] + if mem_0_2.Op != OpStore { + break + } + t3 := auxToType(mem_0_2.Aux) + _ = mem_0_2.Args[2] + mem_0_2_0 := mem_0_2.Args[0] + if mem_0_2_0.Op != OpOffPtr { + break + } + tt3 := mem_0_2_0.Type + o3 := auxIntToInt64(mem_0_2_0.AuxInt) + p3 := mem_0_2_0.Args[0] + d2 := mem_0_2.Args[1] + mem_0_2_2 := mem_0_2.Args[2] + if mem_0_2_2.Op != OpStore { + break + } + t4 := auxToType(mem_0_2_2.Aux) + _ = mem_0_2_2.Args[2] + mem_0_2_2_0 := mem_0_2_2.Args[0] + if mem_0_2_2_0.Op != OpOffPtr { + break + } + tt4 := mem_0_2_2_0.Type + o4 := auxIntToInt64(mem_0_2_2_0.AuxInt) + p4 := mem_0_2_2_0.Args[0] + d3 := mem_0_2_2.Args[1] + mem_0_2_2_2 := mem_0_2_2.Args[2] + if mem_0_2_2_2.Op != OpStore { + break + } + t5 := auxToType(mem_0_2_2_2.Aux) + _ = mem_0_2_2_2.Args[2] + mem_0_2_2_2_0 := mem_0_2_2_2.Args[0] + if mem_0_2_2_2_0.Op != OpOffPtr { + break + } + tt5 := mem_0_2_2_2_0.Type + o5 := auxIntToInt64(mem_0_2_2_2_0.AuxInt) + p5 := mem_0_2_2_2_0.Args[0] + d4 := mem_0_2_2_2.Args[1] + mem_0_2_2_2_2 := mem_0_2_2_2.Args[2] + if mem_0_2_2_2_2.Op != OpZero || auxIntToInt64(mem_0_2_2_2_2.AuxInt) != n { + break + } + t6 := auxToType(mem_0_2_2_2_2.Aux) + p6 := mem_0_2_2_2_2.Args[0] + if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && t6.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && n >= o2+t2.Size() && n >= o3+t3.Size() && n >= o4+t4.Size() && n >= o5+t5.Size()) { + break + } + v.reset(OpStore) + v.Aux = typeToAux(t2) + v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) + v0.AuxInt = int64ToAuxInt(o2) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(t3) + v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) + v2.AuxInt = int64ToAuxInt(o3) + v2.AddArg(dst) + v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v3.Aux = typeToAux(t4) + v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) + v4.AuxInt = int64ToAuxInt(o4) + v4.AddArg(dst) + v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v5.Aux = typeToAux(t5) + v6 := b.NewValue0(v.Pos, OpOffPtr, tt5) + v6.AuxInt = int64ToAuxInt(o5) + v6.AddArg(dst) + v7 := b.NewValue0(v.Pos, OpZero, types.TypeMem) + v7.AuxInt = int64ToAuxInt(n) + v7.Aux = typeToAux(t1) + v7.AddArg2(dst, mem) + v5.AddArg3(v6, d4, v7) + v3.AddArg3(v4, d3, v5) + v1.AddArg3(v2, d2, v3) + v.AddArg3(v0, d1, v1) + return true + } + // match: (Move {t1} [s] dst tmp1 midmem:(Move {t2} [s] tmp2 src _)) + // cond: t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config)) + // result: (Move {t1} [s] dst src midmem) + for { + s := auxIntToInt64(v.AuxInt) + t1 := auxToType(v.Aux) + dst := v_0 + tmp1 := v_1 + midmem := v_2 + if midmem.Op != OpMove || auxIntToInt64(midmem.AuxInt) != s { + break + } + t2 := auxToType(midmem.Aux) + src := midmem.Args[1] + tmp2 := midmem.Args[0] + if !(t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(s) + v.Aux = typeToAux(t1) + v.AddArg3(dst, src, midmem) + return true + } + // match: (Move {t1} [s] dst tmp1 midmem:(VarDef (Move {t2} [s] tmp2 src _))) + // cond: t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config)) + // result: (Move {t1} [s] dst src midmem) + for { + s := auxIntToInt64(v.AuxInt) + t1 := auxToType(v.Aux) + dst := v_0 + tmp1 := v_1 + midmem := v_2 + if midmem.Op != OpVarDef { + break + } + midmem_0 := midmem.Args[0] + if midmem_0.Op != OpMove || auxIntToInt64(midmem_0.AuxInt) != s { + break + } + t2 := auxToType(midmem_0.Aux) + src := midmem_0.Args[1] + tmp2 := midmem_0.Args[0] + if !(t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(s) + v.Aux = typeToAux(t1) + v.AddArg3(dst, src, midmem) + return true + } + // match: (Move dst src mem) + // cond: isSamePtr(dst, src) + // result: mem + for { + dst := v_0 + src := v_1 + mem := v_2 + if !(isSamePtr(dst, src)) { + break + } + v.copyOf(mem) + return true + } + return false +} +func rewriteValuegeneric_OpMul16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mul16 (Const16 [c]) (Const16 [d])) + // result: (Const16 [c*d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1.AuxInt) + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(c * d) + return true + } + break + } + // match: (Mul16 (Const16 [1]) x) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 1 { + continue + } + x := v_1 + v.copyOf(x) + return true + } + break + } + // match: (Mul16 (Const16 [-1]) x) + // result: (Neg16 x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != -1 { + continue + } + x := v_1 + v.reset(OpNeg16) + v.AddArg(x) + return true + } + break + } + // match: (Mul16 n (Const16 [c])) + // cond: isPowerOfTwo16(c) + // result: (Lsh16x64 n (Const64 [log16(c)])) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 + if v_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1.AuxInt) + if !(isPowerOfTwo16(c)) { + continue + } + v.reset(OpLsh16x64) + v.Type = t + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = int64ToAuxInt(log16(c)) + v.AddArg2(n, v0) + return true + } + break + } + // match: (Mul16 n (Const16 [c])) + // cond: t.IsSigned() && isPowerOfTwo16(-c) + // result: (Neg16 (Lsh16x64 n (Const64 [log16(-c)]))) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 + if v_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1.AuxInt) + if !(t.IsSigned() && isPowerOfTwo16(-c)) { + continue + } + v.reset(OpNeg16) + v0 := b.NewValue0(v.Pos, OpLsh16x64, t) + v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v1.AuxInt = int64ToAuxInt(log16(-c)) + v0.AddArg2(n, v1) + v.AddArg(v0) + return true + } + break + } + // match: (Mul16 (Const16 [0]) _) + // result: (Const16 [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 { + continue + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(0) + return true + } + break + } + // match: (Mul16 (Mul16 i:(Const16 ) z) x) + // cond: (z.Op != OpConst16 && x.Op != OpConst16) + // result: (Mul16 i (Mul16 x z)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpMul16 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 + if i.Op != OpConst16 { + continue + } + t := i.Type + z := v_0_1 + x := v_1 + if !(z.Op != OpConst16 && x.Op != OpConst16) { + continue + } + v.reset(OpMul16) + v0 := b.NewValue0(v.Pos, OpMul16, t) + v0.AddArg2(x, z) + v.AddArg2(i, v0) + return true + } + } + break + } + // match: (Mul16 (Const16 [c]) (Mul16 (Const16 [d]) x)) + // result: (Mul16 (Const16 [c*d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 { + continue + } + t := v_0.Type + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpMul16 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst16 || v_1_0.Type != t { + continue + } + d := auxIntToInt16(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpMul16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(c * d) + v.AddArg2(v0, x) + return true + } + } + break + } + return false +} +func rewriteValuegeneric_OpMul32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mul32 (Const32 [c]) (Const32 [d])) + // result: (Const32 [c*d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1.AuxInt) + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(c * d) + return true + } + break + } + // match: (Mul32 (Const32 [1]) x) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 1 { + continue + } + x := v_1 + v.copyOf(x) + return true + } + break + } + // match: (Mul32 (Const32 [-1]) x) + // result: (Neg32 x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != -1 { + continue + } + x := v_1 + v.reset(OpNeg32) + v.AddArg(x) + return true + } + break + } + // match: (Mul32 n (Const32 [c])) + // cond: isPowerOfTwo32(c) + // result: (Lsh32x64 n (Const64 [log32(c)])) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 + if v_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1.AuxInt) + if !(isPowerOfTwo32(c)) { + continue + } + v.reset(OpLsh32x64) + v.Type = t + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = int64ToAuxInt(log32(c)) + v.AddArg2(n, v0) + return true + } + break + } + // match: (Mul32 n (Const32 [c])) + // cond: t.IsSigned() && isPowerOfTwo32(-c) + // result: (Neg32 (Lsh32x64 n (Const64 [log32(-c)]))) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 + if v_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1.AuxInt) + if !(t.IsSigned() && isPowerOfTwo32(-c)) { + continue + } + v.reset(OpNeg32) + v0 := b.NewValue0(v.Pos, OpLsh32x64, t) + v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v1.AuxInt = int64ToAuxInt(log32(-c)) + v0.AddArg2(n, v1) + v.AddArg(v0) + return true + } + break + } + // match: (Mul32 (Const32 [c]) (Add32 (Const32 [d]) x)) + // result: (Add32 (Const32 [c*d]) (Mul32 (Const32 [c]) x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 { + continue + } + t := v_0.Type + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpAdd32 || v_1.Type != t { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst32 || v_1_0.Type != t { + continue + } + d := auxIntToInt32(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpAdd32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(c * d) + v1 := b.NewValue0(v.Pos, OpMul32, t) + v2 := b.NewValue0(v.Pos, OpConst32, t) + v2.AuxInt = int32ToAuxInt(c) + v1.AddArg2(v2, x) + v.AddArg2(v0, v1) + return true + } + } + break + } + // match: (Mul32 (Const32 [0]) _) + // result: (Const32 [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 { + continue + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + break + } + // match: (Mul32 (Mul32 i:(Const32 ) z) x) + // cond: (z.Op != OpConst32 && x.Op != OpConst32) + // result: (Mul32 i (Mul32 x z)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpMul32 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 + if i.Op != OpConst32 { + continue + } + t := i.Type + z := v_0_1 + x := v_1 + if !(z.Op != OpConst32 && x.Op != OpConst32) { + continue + } + v.reset(OpMul32) + v0 := b.NewValue0(v.Pos, OpMul32, t) + v0.AddArg2(x, z) + v.AddArg2(i, v0) + return true + } + } + break + } + // match: (Mul32 (Const32 [c]) (Mul32 (Const32 [d]) x)) + // result: (Mul32 (Const32 [c*d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 { + continue + } + t := v_0.Type + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpMul32 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst32 || v_1_0.Type != t { + continue + } + d := auxIntToInt32(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpMul32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(c * d) + v.AddArg2(v0, x) + return true + } + } + break + } + return false +} +func rewriteValuegeneric_OpMul32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Mul32F (Const32F [c]) (Const32F [d])) + // cond: c*d == c*d + // result: (Const32F [c*d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32F { + continue + } + c := auxIntToFloat32(v_0.AuxInt) + if v_1.Op != OpConst32F { + continue + } + d := auxIntToFloat32(v_1.AuxInt) + if !(c*d == c*d) { + continue + } + v.reset(OpConst32F) + v.AuxInt = float32ToAuxInt(c * d) + return true + } + break + } + // match: (Mul32F x (Const32F [1])) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpConst32F || auxIntToFloat32(v_1.AuxInt) != 1 { + continue + } + v.copyOf(x) + return true + } + break + } + // match: (Mul32F x (Const32F [-1])) + // result: (Neg32F x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpConst32F || auxIntToFloat32(v_1.AuxInt) != -1 { + continue + } + v.reset(OpNeg32F) + v.AddArg(x) + return true + } + break + } + // match: (Mul32F x (Const32F [2])) + // result: (Add32F x x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpConst32F || auxIntToFloat32(v_1.AuxInt) != 2 { + continue + } + v.reset(OpAdd32F) + v.AddArg2(x, x) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpMul64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mul64 (Const64 [c]) (Const64 [d])) + // result: (Const64 [c*d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(c * d) + return true + } + break + } + // match: (Mul64 (Const64 [1]) x) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 1 { + continue + } + x := v_1 + v.copyOf(x) + return true + } + break + } + // match: (Mul64 (Const64 [-1]) x) + // result: (Neg64 x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != -1 { + continue + } + x := v_1 + v.reset(OpNeg64) + v.AddArg(x) + return true + } + break + } + // match: (Mul64 n (Const64 [c])) + // cond: isPowerOfTwo64(c) + // result: (Lsh64x64 n (Const64 [log64(c)])) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 + if v_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(isPowerOfTwo64(c)) { + continue + } + v.reset(OpLsh64x64) + v.Type = t + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = int64ToAuxInt(log64(c)) + v.AddArg2(n, v0) + return true + } + break + } + // match: (Mul64 n (Const64 [c])) + // cond: t.IsSigned() && isPowerOfTwo64(-c) + // result: (Neg64 (Lsh64x64 n (Const64 [log64(-c)]))) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 + if v_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(t.IsSigned() && isPowerOfTwo64(-c)) { + continue + } + v.reset(OpNeg64) + v0 := b.NewValue0(v.Pos, OpLsh64x64, t) + v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v1.AuxInt = int64ToAuxInt(log64(-c)) + v0.AddArg2(n, v1) + v.AddArg(v0) + return true + } + break + } + // match: (Mul64 (Const64 [c]) (Add64 (Const64 [d]) x)) + // result: (Add64 (Const64 [c*d]) (Mul64 (Const64 [c]) x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 { + continue + } + t := v_0.Type + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpAdd64 || v_1.Type != t { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst64 || v_1_0.Type != t { + continue + } + d := auxIntToInt64(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpAdd64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c * d) + v1 := b.NewValue0(v.Pos, OpMul64, t) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = int64ToAuxInt(c) + v1.AddArg2(v2, x) + v.AddArg2(v0, v1) + return true + } + } + break + } + // match: (Mul64 (Const64 [0]) _) + // result: (Const64 [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 { + continue + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) + return true + } + break + } + // match: (Mul64 (Mul64 i:(Const64 ) z) x) + // cond: (z.Op != OpConst64 && x.Op != OpConst64) + // result: (Mul64 i (Mul64 x z)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpMul64 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 + if i.Op != OpConst64 { + continue + } + t := i.Type + z := v_0_1 + x := v_1 + if !(z.Op != OpConst64 && x.Op != OpConst64) { + continue + } + v.reset(OpMul64) + v0 := b.NewValue0(v.Pos, OpMul64, t) + v0.AddArg2(x, z) + v.AddArg2(i, v0) + return true + } + } + break + } + // match: (Mul64 (Const64 [c]) (Mul64 (Const64 [d]) x)) + // result: (Mul64 (Const64 [c*d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 { + continue + } + t := v_0.Type + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpMul64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst64 || v_1_0.Type != t { + continue + } + d := auxIntToInt64(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpMul64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c * d) + v.AddArg2(v0, x) + return true + } + } + break + } + return false +} +func rewriteValuegeneric_OpMul64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Mul64F (Const64F [c]) (Const64F [d])) + // cond: c*d == c*d + // result: (Const64F [c*d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64F { + continue + } + c := auxIntToFloat64(v_0.AuxInt) + if v_1.Op != OpConst64F { + continue + } + d := auxIntToFloat64(v_1.AuxInt) + if !(c*d == c*d) { + continue + } + v.reset(OpConst64F) + v.AuxInt = float64ToAuxInt(c * d) + return true + } + break + } + // match: (Mul64F x (Const64F [1])) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpConst64F || auxIntToFloat64(v_1.AuxInt) != 1 { + continue + } + v.copyOf(x) + return true + } + break + } + // match: (Mul64F x (Const64F [-1])) + // result: (Neg64F x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpConst64F || auxIntToFloat64(v_1.AuxInt) != -1 { + continue + } + v.reset(OpNeg64F) + v.AddArg(x) + return true + } + break + } + // match: (Mul64F x (Const64F [2])) + // result: (Add64F x x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpConst64F || auxIntToFloat64(v_1.AuxInt) != 2 { + continue + } + v.reset(OpAdd64F) + v.AddArg2(x, x) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpMul8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mul8 (Const8 [c]) (Const8 [d])) + // result: (Const8 [c*d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1.AuxInt) + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(c * d) + return true + } + break + } + // match: (Mul8 (Const8 [1]) x) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 1 { + continue + } + x := v_1 + v.copyOf(x) + return true + } + break + } + // match: (Mul8 (Const8 [-1]) x) + // result: (Neg8 x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != -1 { + continue + } + x := v_1 + v.reset(OpNeg8) + v.AddArg(x) + return true + } + break + } + // match: (Mul8 n (Const8 [c])) + // cond: isPowerOfTwo8(c) + // result: (Lsh8x64 n (Const64 [log8(c)])) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 + if v_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1.AuxInt) + if !(isPowerOfTwo8(c)) { + continue + } + v.reset(OpLsh8x64) + v.Type = t + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = int64ToAuxInt(log8(c)) + v.AddArg2(n, v0) + return true + } + break + } + // match: (Mul8 n (Const8 [c])) + // cond: t.IsSigned() && isPowerOfTwo8(-c) + // result: (Neg8 (Lsh8x64 n (Const64 [log8(-c)]))) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 + if v_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1.AuxInt) + if !(t.IsSigned() && isPowerOfTwo8(-c)) { + continue + } + v.reset(OpNeg8) + v0 := b.NewValue0(v.Pos, OpLsh8x64, t) + v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v1.AuxInt = int64ToAuxInt(log8(-c)) + v0.AddArg2(n, v1) + v.AddArg(v0) + return true + } + break + } + // match: (Mul8 (Const8 [0]) _) + // result: (Const8 [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 { + continue + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(0) + return true + } + break + } + // match: (Mul8 (Mul8 i:(Const8 ) z) x) + // cond: (z.Op != OpConst8 && x.Op != OpConst8) + // result: (Mul8 i (Mul8 x z)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpMul8 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 + if i.Op != OpConst8 { + continue + } + t := i.Type + z := v_0_1 + x := v_1 + if !(z.Op != OpConst8 && x.Op != OpConst8) { + continue + } + v.reset(OpMul8) + v0 := b.NewValue0(v.Pos, OpMul8, t) + v0.AddArg2(x, z) + v.AddArg2(i, v0) + return true + } + } + break + } + // match: (Mul8 (Const8 [c]) (Mul8 (Const8 [d]) x)) + // result: (Mul8 (Const8 [c*d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 { + continue + } + t := v_0.Type + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpMul8 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst8 || v_1_0.Type != t { + continue + } + d := auxIntToInt8(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpMul8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(c * d) + v.AddArg2(v0, x) + return true + } + } + break + } + return false +} +func rewriteValuegeneric_OpNeg16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Neg16 (Const16 [c])) + // result: (Const16 [-c]) + for { + if v_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_0.AuxInt) + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(-c) + return true + } + // match: (Neg16 (Sub16 x y)) + // result: (Sub16 y x) + for { + if v_0.Op != OpSub16 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpSub16) + v.AddArg2(y, x) + return true + } + // match: (Neg16 (Neg16 x)) + // result: x + for { + if v_0.Op != OpNeg16 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (Neg16 (Com16 x)) + // result: (Add16 (Const16 [1]) x) + for { + t := v.Type + if v_0.Op != OpCom16 { + break + } + x := v_0.Args[0] + v.reset(OpAdd16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(1) + v.AddArg2(v0, x) + return true + } + return false +} +func rewriteValuegeneric_OpNeg32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Neg32 (Const32 [c])) + // result: (Const32 [-c]) + for { + if v_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(-c) + return true + } + // match: (Neg32 (Sub32 x y)) + // result: (Sub32 y x) + for { + if v_0.Op != OpSub32 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpSub32) + v.AddArg2(y, x) + return true + } + // match: (Neg32 (Neg32 x)) + // result: x + for { + if v_0.Op != OpNeg32 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (Neg32 (Com32 x)) + // result: (Add32 (Const32 [1]) x) + for { + t := v.Type + if v_0.Op != OpCom32 { + break + } + x := v_0.Args[0] + v.reset(OpAdd32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(1) + v.AddArg2(v0, x) + return true + } + return false +} +func rewriteValuegeneric_OpNeg32F(v *Value) bool { + v_0 := v.Args[0] + // match: (Neg32F (Const32F [c])) + // cond: c != 0 + // result: (Const32F [-c]) + for { + if v_0.Op != OpConst32F { + break + } + c := auxIntToFloat32(v_0.AuxInt) + if !(c != 0) { + break + } + v.reset(OpConst32F) + v.AuxInt = float32ToAuxInt(-c) + return true + } + return false +} +func rewriteValuegeneric_OpNeg64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Neg64 (Const64 [c])) + // result: (Const64 [-c]) + for { + if v_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(-c) + return true + } + // match: (Neg64 (Sub64 x y)) + // result: (Sub64 y x) + for { + if v_0.Op != OpSub64 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpSub64) + v.AddArg2(y, x) + return true + } + // match: (Neg64 (Neg64 x)) + // result: x + for { + if v_0.Op != OpNeg64 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (Neg64 (Com64 x)) + // result: (Add64 (Const64 [1]) x) + for { + t := v.Type + if v_0.Op != OpCom64 { + break + } + x := v_0.Args[0] + v.reset(OpAdd64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(1) + v.AddArg2(v0, x) + return true + } + return false +} +func rewriteValuegeneric_OpNeg64F(v *Value) bool { + v_0 := v.Args[0] + // match: (Neg64F (Const64F [c])) + // cond: c != 0 + // result: (Const64F [-c]) + for { + if v_0.Op != OpConst64F { + break + } + c := auxIntToFloat64(v_0.AuxInt) + if !(c != 0) { + break + } + v.reset(OpConst64F) + v.AuxInt = float64ToAuxInt(-c) + return true + } + return false +} +func rewriteValuegeneric_OpNeg8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Neg8 (Const8 [c])) + // result: (Const8 [-c]) + for { + if v_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_0.AuxInt) + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(-c) + return true + } + // match: (Neg8 (Sub8 x y)) + // result: (Sub8 y x) + for { + if v_0.Op != OpSub8 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpSub8) + v.AddArg2(y, x) + return true + } + // match: (Neg8 (Neg8 x)) + // result: x + for { + if v_0.Op != OpNeg8 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (Neg8 (Com8 x)) + // result: (Add8 (Const8 [1]) x) + for { + t := v.Type + if v_0.Op != OpCom8 { + break + } + x := v_0.Args[0] + v.reset(OpAdd8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(1) + v.AddArg2(v0, x) + return true + } + return false +} +func rewriteValuegeneric_OpNeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq16 x x) + // result: (ConstBool [false]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + // match: (Neq16 (Const16 [c]) (Add16 (Const16 [d]) x)) + // result: (Neq16 (Const16 [c-d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 { + continue + } + t := v_0.Type + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpAdd16 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst16 || v_1_0.Type != t { + continue + } + d := auxIntToInt16(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpNeq16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(c - d) + v.AddArg2(v0, x) + return true + } + } + break + } + // match: (Neq16 (Const16 [c]) (Const16 [d])) + // result: (ConstBool [c != d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c != d) + return true + } + break + } + // match: (Neq16 n (Lsh16x64 (Rsh16x64 (Add16 n (Rsh16Ux64 (Rsh16x64 n (Const64 [15])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) ) + // cond: k > 0 && k < 15 && kbar == 16 - k + // result: (Neq16 (And16 n (Const16 [1< [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 + if v_1.Op != OpLsh16x64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpRsh16x64 { + continue + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAdd16 { + continue + } + t := v_1_0_0.Type + _ = v_1_0_0.Args[1] + v_1_0_0_0 := v_1_0_0.Args[0] + v_1_0_0_1 := v_1_0_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 { + if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh16Ux64 || v_1_0_0_1.Type != t { + continue + } + _ = v_1_0_0_1.Args[1] + v_1_0_0_1_0 := v_1_0_0_1.Args[0] + if v_1_0_0_1_0.Op != OpRsh16x64 || v_1_0_0_1_0.Type != t { + continue + } + _ = v_1_0_0_1_0.Args[1] + if n != v_1_0_0_1_0.Args[0] { + continue + } + v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1] + if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 15 { + continue + } + v_1_0_0_1_1 := v_1_0_0_1.Args[1] + if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 { + continue + } + kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt) + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { + continue + } + k := auxIntToInt64(v_1_0_1.AuxInt) + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 15 && kbar == 16-k) { + continue + } + v.reset(OpNeq16) + v0 := b.NewValue0(v.Pos, OpAnd16, t) + v1 := b.NewValue0(v.Pos, OpConst16, t) + v1.AuxInt = int16ToAuxInt(1< x (Const16 [y])) (Const16 [y])) + // cond: oneBit16(y) + // result: (Eq16 (And16 x (Const16 [y])) (Const16 [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAnd16 { + continue + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpConst16 || v_0_1.Type != t { + continue + } + y := auxIntToInt16(v_0_1.AuxInt) + if v_1.Op != OpConst16 || v_1.Type != t || auxIntToInt16(v_1.AuxInt) != y || !(oneBit16(y)) { + continue + } + v.reset(OpEq16) + v0 := b.NewValue0(v.Pos, OpAnd16, t) + v1 := b.NewValue0(v.Pos, OpConst16, t) + v1.AuxInt = int16ToAuxInt(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst16, t) + v2.AuxInt = int16ToAuxInt(0) + v.AddArg2(v0, v2) + return true + } + } + break + } + return false +} +func rewriteValuegeneric_OpNeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq32 x x) + // result: (ConstBool [false]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + // match: (Neq32 (Const32 [c]) (Add32 (Const32 [d]) x)) + // result: (Neq32 (Const32 [c-d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 { + continue + } + t := v_0.Type + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpAdd32 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst32 || v_1_0.Type != t { + continue + } + d := auxIntToInt32(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpNeq32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(c - d) + v.AddArg2(v0, x) + return true + } + } + break + } + // match: (Neq32 (Const32 [c]) (Const32 [d])) + // result: (ConstBool [c != d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c != d) + return true + } + break + } + // match: (Neq32 n (Lsh32x64 (Rsh32x64 (Add32 n (Rsh32Ux64 (Rsh32x64 n (Const64 [31])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) ) + // cond: k > 0 && k < 31 && kbar == 32 - k + // result: (Neq32 (And32 n (Const32 [1< [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 + if v_1.Op != OpLsh32x64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpRsh32x64 { + continue + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAdd32 { + continue + } + t := v_1_0_0.Type + _ = v_1_0_0.Args[1] + v_1_0_0_0 := v_1_0_0.Args[0] + v_1_0_0_1 := v_1_0_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 { + if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh32Ux64 || v_1_0_0_1.Type != t { + continue + } + _ = v_1_0_0_1.Args[1] + v_1_0_0_1_0 := v_1_0_0_1.Args[0] + if v_1_0_0_1_0.Op != OpRsh32x64 || v_1_0_0_1_0.Type != t { + continue + } + _ = v_1_0_0_1_0.Args[1] + if n != v_1_0_0_1_0.Args[0] { + continue + } + v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1] + if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 31 { + continue + } + v_1_0_0_1_1 := v_1_0_0_1.Args[1] + if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 { + continue + } + kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt) + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { + continue + } + k := auxIntToInt64(v_1_0_1.AuxInt) + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 31 && kbar == 32-k) { + continue + } + v.reset(OpNeq32) + v0 := b.NewValue0(v.Pos, OpAnd32, t) + v1 := b.NewValue0(v.Pos, OpConst32, t) + v1.AuxInt = int32ToAuxInt(1< x (Const32 [y])) (Const32 [y])) + // cond: oneBit32(y) + // result: (Eq32 (And32 x (Const32 [y])) (Const32 [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAnd32 { + continue + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpConst32 || v_0_1.Type != t { + continue + } + y := auxIntToInt32(v_0_1.AuxInt) + if v_1.Op != OpConst32 || v_1.Type != t || auxIntToInt32(v_1.AuxInt) != y || !(oneBit32(y)) { + continue + } + v.reset(OpEq32) + v0 := b.NewValue0(v.Pos, OpAnd32, t) + v1 := b.NewValue0(v.Pos, OpConst32, t) + v1.AuxInt = int32ToAuxInt(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst32, t) + v2.AuxInt = int32ToAuxInt(0) + v.AddArg2(v0, v2) + return true + } + } + break + } + return false +} +func rewriteValuegeneric_OpNeq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Neq32F (Const32F [c]) (Const32F [d])) + // result: (ConstBool [c != d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32F { + continue + } + c := auxIntToFloat32(v_0.AuxInt) + if v_1.Op != OpConst32F { + continue + } + d := auxIntToFloat32(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c != d) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpNeq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq64 x x) + // result: (ConstBool [false]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + // match: (Neq64 (Const64 [c]) (Add64 (Const64 [d]) x)) + // result: (Neq64 (Const64 [c-d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 { + continue + } + t := v_0.Type + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpAdd64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst64 || v_1_0.Type != t { + continue + } + d := auxIntToInt64(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpNeq64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c - d) + v.AddArg2(v0, x) + return true + } + } + break + } + // match: (Neq64 (Const64 [c]) (Const64 [d])) + // result: (ConstBool [c != d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c != d) + return true + } + break + } + // match: (Neq64 n (Lsh64x64 (Rsh64x64 (Add64 n (Rsh64Ux64 (Rsh64x64 n (Const64 [63])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) ) + // cond: k > 0 && k < 63 && kbar == 64 - k + // result: (Neq64 (And64 n (Const64 [1< [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 + if v_1.Op != OpLsh64x64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpRsh64x64 { + continue + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAdd64 { + continue + } + t := v_1_0_0.Type + _ = v_1_0_0.Args[1] + v_1_0_0_0 := v_1_0_0.Args[0] + v_1_0_0_1 := v_1_0_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 { + if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh64Ux64 || v_1_0_0_1.Type != t { + continue + } + _ = v_1_0_0_1.Args[1] + v_1_0_0_1_0 := v_1_0_0_1.Args[0] + if v_1_0_0_1_0.Op != OpRsh64x64 || v_1_0_0_1_0.Type != t { + continue + } + _ = v_1_0_0_1_0.Args[1] + if n != v_1_0_0_1_0.Args[0] { + continue + } + v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1] + if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 63 { + continue + } + v_1_0_0_1_1 := v_1_0_0_1.Args[1] + if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 { + continue + } + kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt) + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { + continue + } + k := auxIntToInt64(v_1_0_1.AuxInt) + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 63 && kbar == 64-k) { + continue + } + v.reset(OpNeq64) + v0 := b.NewValue0(v.Pos, OpAnd64, t) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(1< x (Const64 [y])) (Const64 [y])) + // cond: oneBit64(y) + // result: (Eq64 (And64 x (Const64 [y])) (Const64 [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAnd64 { + continue + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpConst64 || v_0_1.Type != t { + continue + } + y := auxIntToInt64(v_0_1.AuxInt) + if v_1.Op != OpConst64 || v_1.Type != t || auxIntToInt64(v_1.AuxInt) != y || !(oneBit64(y)) { + continue + } + v.reset(OpEq64) + v0 := b.NewValue0(v.Pos, OpAnd64, t) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = int64ToAuxInt(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, v2) + return true + } + } + break + } + return false +} +func rewriteValuegeneric_OpNeq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Neq64F (Const64F [c]) (Const64F [d])) + // result: (ConstBool [c != d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64F { + continue + } + c := auxIntToFloat64(v_0.AuxInt) + if v_1.Op != OpConst64F { + continue + } + d := auxIntToFloat64(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c != d) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpNeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq8 x x) + // result: (ConstBool [false]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + // match: (Neq8 (Const8 [c]) (Add8 (Const8 [d]) x)) + // result: (Neq8 (Const8 [c-d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 { + continue + } + t := v_0.Type + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpAdd8 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst8 || v_1_0.Type != t { + continue + } + d := auxIntToInt8(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpNeq8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(c - d) + v.AddArg2(v0, x) + return true + } + } + break + } + // match: (Neq8 (Const8 [c]) (Const8 [d])) + // result: (ConstBool [c != d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c != d) + return true + } + break + } + // match: (Neq8 n (Lsh8x64 (Rsh8x64 (Add8 n (Rsh8Ux64 (Rsh8x64 n (Const64 [ 7])) (Const64 [kbar]))) (Const64 [k])) (Const64 [k])) ) + // cond: k > 0 && k < 7 && kbar == 8 - k + // result: (Neq8 (And8 n (Const8 [1< [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + n := v_0 + if v_1.Op != OpLsh8x64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpRsh8x64 { + continue + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAdd8 { + continue + } + t := v_1_0_0.Type + _ = v_1_0_0.Args[1] + v_1_0_0_0 := v_1_0_0.Args[0] + v_1_0_0_1 := v_1_0_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 { + if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh8Ux64 || v_1_0_0_1.Type != t { + continue + } + _ = v_1_0_0_1.Args[1] + v_1_0_0_1_0 := v_1_0_0_1.Args[0] + if v_1_0_0_1_0.Op != OpRsh8x64 || v_1_0_0_1_0.Type != t { + continue + } + _ = v_1_0_0_1_0.Args[1] + if n != v_1_0_0_1_0.Args[0] { + continue + } + v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1] + if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 7 { + continue + } + v_1_0_0_1_1 := v_1_0_0_1.Args[1] + if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 { + continue + } + kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt) + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 { + continue + } + k := auxIntToInt64(v_1_0_1.AuxInt) + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 7 && kbar == 8-k) { + continue + } + v.reset(OpNeq8) + v0 := b.NewValue0(v.Pos, OpAnd8, t) + v1 := b.NewValue0(v.Pos, OpConst8, t) + v1.AuxInt = int8ToAuxInt(1< x (Const8 [y])) (Const8 [y])) + // cond: oneBit8(y) + // result: (Eq8 (And8 x (Const8 [y])) (Const8 [0])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAnd8 { + continue + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpConst8 || v_0_1.Type != t { + continue + } + y := auxIntToInt8(v_0_1.AuxInt) + if v_1.Op != OpConst8 || v_1.Type != t || auxIntToInt8(v_1.AuxInt) != y || !(oneBit8(y)) { + continue + } + v.reset(OpEq8) + v0 := b.NewValue0(v.Pos, OpAnd8, t) + v1 := b.NewValue0(v.Pos, OpConst8, t) + v1.AuxInt = int8ToAuxInt(y) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpConst8, t) + v2.AuxInt = int8ToAuxInt(0) + v.AddArg2(v0, v2) + return true + } + } + break + } + return false +} +func rewriteValuegeneric_OpNeqB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (NeqB (ConstBool [c]) (ConstBool [d])) + // result: (ConstBool [c != d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConstBool { + continue + } + c := auxIntToBool(v_0.AuxInt) + if v_1.Op != OpConstBool { + continue + } + d := auxIntToBool(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c != d) + return true + } + break + } + // match: (NeqB (ConstBool [false]) x) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConstBool || auxIntToBool(v_0.AuxInt) != false { + continue + } + x := v_1 + v.copyOf(x) + return true + } + break + } + // match: (NeqB (ConstBool [true]) x) + // result: (Not x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConstBool || auxIntToBool(v_0.AuxInt) != true { + continue + } + x := v_1 + v.reset(OpNot) + v.AddArg(x) + return true + } + break + } + // match: (NeqB (Not x) (Not y)) + // result: (NeqB x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNot { + continue + } + x := v_0.Args[0] + if v_1.Op != OpNot { + continue + } + y := v_1.Args[0] + v.reset(OpNeqB) + v.AddArg2(x, y) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpNeqInter(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NeqInter x y) + // result: (NeqPtr (ITab x) (ITab y)) + for { + x := v_0 + y := v_1 + v.reset(OpNeqPtr) + v0 := b.NewValue0(v.Pos, OpITab, typ.Uintptr) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpITab, typ.Uintptr) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValuegeneric_OpNeqPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (NeqPtr x x) + // result: (ConstBool [false]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + // match: (NeqPtr (Addr {x} _) (Addr {y} _)) + // result: (ConstBool [x != y]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAddr { + continue + } + x := auxToSym(v_0.Aux) + if v_1.Op != OpAddr { + continue + } + y := auxToSym(v_1.Aux) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(x != y) + return true + } + break + } + // match: (NeqPtr (Addr {x} _) (OffPtr [o] (Addr {y} _))) + // result: (ConstBool [x != y || o != 0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAddr { + continue + } + x := auxToSym(v_0.Aux) + if v_1.Op != OpOffPtr { + continue + } + o := auxIntToInt64(v_1.AuxInt) + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAddr { + continue + } + y := auxToSym(v_1_0.Aux) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(x != y || o != 0) + return true + } + break + } + // match: (NeqPtr (OffPtr [o1] (Addr {x} _)) (OffPtr [o2] (Addr {y} _))) + // result: (ConstBool [x != y || o1 != o2]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpOffPtr { + continue + } + o1 := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAddr { + continue + } + x := auxToSym(v_0_0.Aux) + if v_1.Op != OpOffPtr { + continue + } + o2 := auxIntToInt64(v_1.AuxInt) + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAddr { + continue + } + y := auxToSym(v_1_0.Aux) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(x != y || o1 != o2) + return true + } + break + } + // match: (NeqPtr (LocalAddr {x} _ _) (LocalAddr {y} _ _)) + // result: (ConstBool [x != y]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLocalAddr { + continue + } + x := auxToSym(v_0.Aux) + if v_1.Op != OpLocalAddr { + continue + } + y := auxToSym(v_1.Aux) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(x != y) + return true + } + break + } + // match: (NeqPtr (LocalAddr {x} _ _) (OffPtr [o] (LocalAddr {y} _ _))) + // result: (ConstBool [x != y || o != 0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLocalAddr { + continue + } + x := auxToSym(v_0.Aux) + if v_1.Op != OpOffPtr { + continue + } + o := auxIntToInt64(v_1.AuxInt) + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpLocalAddr { + continue + } + y := auxToSym(v_1_0.Aux) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(x != y || o != 0) + return true + } + break + } + // match: (NeqPtr (OffPtr [o1] (LocalAddr {x} _ _)) (OffPtr [o2] (LocalAddr {y} _ _))) + // result: (ConstBool [x != y || o1 != o2]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpOffPtr { + continue + } + o1 := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpLocalAddr { + continue + } + x := auxToSym(v_0_0.Aux) + if v_1.Op != OpOffPtr { + continue + } + o2 := auxIntToInt64(v_1.AuxInt) + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpLocalAddr { + continue + } + y := auxToSym(v_1_0.Aux) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(x != y || o1 != o2) + return true + } + break + } + // match: (NeqPtr (OffPtr [o1] p1) p2) + // cond: isSamePtr(p1, p2) + // result: (ConstBool [o1 != 0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpOffPtr { + continue + } + o1 := auxIntToInt64(v_0.AuxInt) + p1 := v_0.Args[0] + p2 := v_1 + if !(isSamePtr(p1, p2)) { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(o1 != 0) + return true + } + break + } + // match: (NeqPtr (OffPtr [o1] p1) (OffPtr [o2] p2)) + // cond: isSamePtr(p1, p2) + // result: (ConstBool [o1 != o2]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpOffPtr { + continue + } + o1 := auxIntToInt64(v_0.AuxInt) + p1 := v_0.Args[0] + if v_1.Op != OpOffPtr { + continue + } + o2 := auxIntToInt64(v_1.AuxInt) + p2 := v_1.Args[0] + if !(isSamePtr(p1, p2)) { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(o1 != o2) + return true + } + break + } + // match: (NeqPtr (Const32 [c]) (Const32 [d])) + // result: (ConstBool [c != d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c != d) + return true + } + break + } + // match: (NeqPtr (Const64 [c]) (Const64 [d])) + // result: (ConstBool [c != d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(c != d) + return true + } + break + } + // match: (NeqPtr (Convert (Addr {x} _) _) (Addr {y} _)) + // result: (ConstBool [x!=y]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConvert { + continue + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAddr { + continue + } + x := auxToSym(v_0_0.Aux) + if v_1.Op != OpAddr { + continue + } + y := auxToSym(v_1.Aux) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(x != y) + return true + } + break + } + // match: (NeqPtr (LocalAddr _ _) (Addr _)) + // result: (ConstBool [true]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLocalAddr || v_1.Op != OpAddr { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + break + } + // match: (NeqPtr (OffPtr (LocalAddr _ _)) (Addr _)) + // result: (ConstBool [true]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpOffPtr { + continue + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpLocalAddr || v_1.Op != OpAddr { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + break + } + // match: (NeqPtr (LocalAddr _ _) (OffPtr (Addr _))) + // result: (ConstBool [true]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLocalAddr || v_1.Op != OpOffPtr { + continue + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAddr { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + break + } + // match: (NeqPtr (OffPtr (LocalAddr _ _)) (OffPtr (Addr _))) + // result: (ConstBool [true]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpOffPtr { + continue + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpLocalAddr || v_1.Op != OpOffPtr { + continue + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAddr { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + break + } + // match: (NeqPtr (AddPtr p1 o1) p2) + // cond: isSamePtr(p1, p2) + // result: (IsNonNil o1) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAddPtr { + continue + } + o1 := v_0.Args[1] + p1 := v_0.Args[0] + p2 := v_1 + if !(isSamePtr(p1, p2)) { + continue + } + v.reset(OpIsNonNil) + v.AddArg(o1) + return true + } + break + } + // match: (NeqPtr (Const32 [0]) p) + // result: (IsNonNil p) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 { + continue + } + p := v_1 + v.reset(OpIsNonNil) + v.AddArg(p) + return true + } + break + } + // match: (NeqPtr (Const64 [0]) p) + // result: (IsNonNil p) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 { + continue + } + p := v_1 + v.reset(OpIsNonNil) + v.AddArg(p) + return true + } + break + } + // match: (NeqPtr (ConstNil) p) + // result: (IsNonNil p) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConstNil { + continue + } + p := v_1 + v.reset(OpIsNonNil) + v.AddArg(p) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpNeqSlice(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NeqSlice x y) + // result: (NeqPtr (SlicePtr x) (SlicePtr y)) + for { + x := v_0 + y := v_1 + v.reset(OpNeqPtr) + v0 := b.NewValue0(v.Pos, OpSlicePtr, typ.BytePtr) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpSlicePtr, typ.BytePtr) + v1.AddArg(y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValuegeneric_OpNilCheck(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + fe := b.Func.fe + // match: (NilCheck ptr:(GetG mem) mem) + // result: ptr + for { + ptr := v_0 + if ptr.Op != OpGetG { + break + } + mem := ptr.Args[0] + if mem != v_1 { + break + } + v.copyOf(ptr) + return true + } + // match: (NilCheck ptr:(SelectN [0] call:(StaticLECall _ _)) _) + // cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check") + // result: ptr + for { + ptr := v_0 + if ptr.Op != OpSelectN || auxIntToInt64(ptr.AuxInt) != 0 { + break + } + call := ptr.Args[0] + if call.Op != OpStaticLECall || len(call.Args) != 2 || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) { + break + } + v.copyOf(ptr) + return true + } + // match: (NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall _ _))) _) + // cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check") + // result: ptr + for { + ptr := v_0 + if ptr.Op != OpOffPtr { + break + } + ptr_0 := ptr.Args[0] + if ptr_0.Op != OpSelectN || auxIntToInt64(ptr_0.AuxInt) != 0 { + break + } + call := ptr_0.Args[0] + if call.Op != OpStaticLECall || len(call.Args) != 2 || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) { + break + } + v.copyOf(ptr) + return true + } + // match: (NilCheck ptr:(Addr {_} (SB)) _) + // result: ptr + for { + ptr := v_0 + if ptr.Op != OpAddr { + break + } + ptr_0 := ptr.Args[0] + if ptr_0.Op != OpSB { + break + } + v.copyOf(ptr) + return true + } + // match: (NilCheck ptr:(Convert (Addr {_} (SB)) _) _) + // result: ptr + for { + ptr := v_0 + if ptr.Op != OpConvert { + break + } + ptr_0 := ptr.Args[0] + if ptr_0.Op != OpAddr { + break + } + ptr_0_0 := ptr_0.Args[0] + if ptr_0_0.Op != OpSB { + break + } + v.copyOf(ptr) + return true + } + return false +} +func rewriteValuegeneric_OpNot(v *Value) bool { + v_0 := v.Args[0] + // match: (Not (ConstBool [c])) + // result: (ConstBool [!c]) + for { + if v_0.Op != OpConstBool { + break + } + c := auxIntToBool(v_0.AuxInt) + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(!c) + return true + } + // match: (Not (Eq64 x y)) + // result: (Neq64 x y) + for { + if v_0.Op != OpEq64 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpNeq64) + v.AddArg2(x, y) + return true + } + // match: (Not (Eq32 x y)) + // result: (Neq32 x y) + for { + if v_0.Op != OpEq32 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpNeq32) + v.AddArg2(x, y) + return true + } + // match: (Not (Eq16 x y)) + // result: (Neq16 x y) + for { + if v_0.Op != OpEq16 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpNeq16) + v.AddArg2(x, y) + return true + } + // match: (Not (Eq8 x y)) + // result: (Neq8 x y) + for { + if v_0.Op != OpEq8 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpNeq8) + v.AddArg2(x, y) + return true + } + // match: (Not (EqB x y)) + // result: (NeqB x y) + for { + if v_0.Op != OpEqB { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpNeqB) + v.AddArg2(x, y) + return true + } + // match: (Not (EqPtr x y)) + // result: (NeqPtr x y) + for { + if v_0.Op != OpEqPtr { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpNeqPtr) + v.AddArg2(x, y) + return true + } + // match: (Not (Eq64F x y)) + // result: (Neq64F x y) + for { + if v_0.Op != OpEq64F { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpNeq64F) + v.AddArg2(x, y) + return true + } + // match: (Not (Eq32F x y)) + // result: (Neq32F x y) + for { + if v_0.Op != OpEq32F { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpNeq32F) + v.AddArg2(x, y) + return true + } + // match: (Not (Neq64 x y)) + // result: (Eq64 x y) + for { + if v_0.Op != OpNeq64 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpEq64) + v.AddArg2(x, y) + return true + } + // match: (Not (Neq32 x y)) + // result: (Eq32 x y) + for { + if v_0.Op != OpNeq32 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpEq32) + v.AddArg2(x, y) + return true + } + // match: (Not (Neq16 x y)) + // result: (Eq16 x y) + for { + if v_0.Op != OpNeq16 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpEq16) + v.AddArg2(x, y) + return true + } + // match: (Not (Neq8 x y)) + // result: (Eq8 x y) + for { + if v_0.Op != OpNeq8 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpEq8) + v.AddArg2(x, y) + return true + } + // match: (Not (NeqB x y)) + // result: (EqB x y) + for { + if v_0.Op != OpNeqB { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpEqB) + v.AddArg2(x, y) + return true + } + // match: (Not (NeqPtr x y)) + // result: (EqPtr x y) + for { + if v_0.Op != OpNeqPtr { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpEqPtr) + v.AddArg2(x, y) + return true + } + // match: (Not (Neq64F x y)) + // result: (Eq64F x y) + for { + if v_0.Op != OpNeq64F { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpEq64F) + v.AddArg2(x, y) + return true + } + // match: (Not (Neq32F x y)) + // result: (Eq32F x y) + for { + if v_0.Op != OpNeq32F { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpEq32F) + v.AddArg2(x, y) + return true + } + // match: (Not (Less64 x y)) + // result: (Leq64 y x) + for { + if v_0.Op != OpLess64 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpLeq64) + v.AddArg2(y, x) + return true + } + // match: (Not (Less32 x y)) + // result: (Leq32 y x) + for { + if v_0.Op != OpLess32 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpLeq32) + v.AddArg2(y, x) + return true + } + // match: (Not (Less16 x y)) + // result: (Leq16 y x) + for { + if v_0.Op != OpLess16 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpLeq16) + v.AddArg2(y, x) + return true + } + // match: (Not (Less8 x y)) + // result: (Leq8 y x) + for { + if v_0.Op != OpLess8 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpLeq8) + v.AddArg2(y, x) + return true + } + // match: (Not (Less64U x y)) + // result: (Leq64U y x) + for { + if v_0.Op != OpLess64U { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpLeq64U) + v.AddArg2(y, x) + return true + } + // match: (Not (Less32U x y)) + // result: (Leq32U y x) + for { + if v_0.Op != OpLess32U { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpLeq32U) + v.AddArg2(y, x) + return true + } + // match: (Not (Less16U x y)) + // result: (Leq16U y x) + for { + if v_0.Op != OpLess16U { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpLeq16U) + v.AddArg2(y, x) + return true + } + // match: (Not (Less8U x y)) + // result: (Leq8U y x) + for { + if v_0.Op != OpLess8U { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpLeq8U) + v.AddArg2(y, x) + return true + } + // match: (Not (Leq64 x y)) + // result: (Less64 y x) + for { + if v_0.Op != OpLeq64 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpLess64) + v.AddArg2(y, x) + return true + } + // match: (Not (Leq32 x y)) + // result: (Less32 y x) + for { + if v_0.Op != OpLeq32 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpLess32) + v.AddArg2(y, x) + return true + } + // match: (Not (Leq16 x y)) + // result: (Less16 y x) + for { + if v_0.Op != OpLeq16 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpLess16) + v.AddArg2(y, x) + return true + } + // match: (Not (Leq8 x y)) + // result: (Less8 y x) + for { + if v_0.Op != OpLeq8 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpLess8) + v.AddArg2(y, x) + return true + } + // match: (Not (Leq64U x y)) + // result: (Less64U y x) + for { + if v_0.Op != OpLeq64U { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpLess64U) + v.AddArg2(y, x) + return true + } + // match: (Not (Leq32U x y)) + // result: (Less32U y x) + for { + if v_0.Op != OpLeq32U { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpLess32U) + v.AddArg2(y, x) + return true + } + // match: (Not (Leq16U x y)) + // result: (Less16U y x) + for { + if v_0.Op != OpLeq16U { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpLess16U) + v.AddArg2(y, x) + return true + } + // match: (Not (Leq8U x y)) + // result: (Less8U y x) + for { + if v_0.Op != OpLeq8U { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpLess8U) + v.AddArg2(y, x) + return true + } + return false +} +func rewriteValuegeneric_OpOffPtr(v *Value) bool { + v_0 := v.Args[0] + // match: (OffPtr (OffPtr p [y]) [x]) + // result: (OffPtr p [x+y]) + for { + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpOffPtr { + break + } + y := auxIntToInt64(v_0.AuxInt) + p := v_0.Args[0] + v.reset(OpOffPtr) + v.AuxInt = int64ToAuxInt(x + y) + v.AddArg(p) + return true + } + // match: (OffPtr p [0]) + // cond: v.Type.Compare(p.Type) == types.CMPeq + // result: p + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + p := v_0 + if !(v.Type.Compare(p.Type) == types.CMPeq) { + break + } + v.copyOf(p) + return true + } + return false +} +func rewriteValuegeneric_OpOr16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (Or16 (Const16 [c]) (Const16 [d])) + // result: (Const16 [c|d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1.AuxInt) + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(c | d) + return true + } + break + } + // match: (Or16 (Com16 x) (Com16 y)) + // result: (Com16 (And16 x y)) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpCom16 { + continue + } + x := v_0.Args[0] + if v_1.Op != OpCom16 { + continue + } + y := v_1.Args[0] + v.reset(OpCom16) + v0 := b.NewValue0(v.Pos, OpAnd16, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (Or16 x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + // match: (Or16 (Const16 [0]) x) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 { + continue + } + x := v_1 + v.copyOf(x) + return true + } + break + } + // match: (Or16 (Const16 [-1]) _) + // result: (Const16 [-1]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != -1 { + continue + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(-1) + return true + } + break + } + // match: (Or16 (Com16 x) x) + // result: (Const16 [-1]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpCom16 { + continue + } + x := v_0.Args[0] + if x != v_1 { + continue + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(-1) + return true + } + break + } + // match: (Or16 x (Or16 x y)) + // result: (Or16 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpOr16 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + y := v_1_1 + v.reset(OpOr16) + v.AddArg2(x, y) + return true + } + } + break + } + // match: (Or16 (And16 x (Const16 [c2])) (Const16 [c1])) + // cond: ^(c1 | c2) == 0 + // result: (Or16 (Const16 [c1]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAnd16 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpConst16 { + continue + } + c2 := auxIntToInt16(v_0_1.AuxInt) + if v_1.Op != OpConst16 { + continue + } + t := v_1.Type + c1 := auxIntToInt16(v_1.AuxInt) + if !(^(c1 | c2) == 0) { + continue + } + v.reset(OpOr16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(c1) + v.AddArg2(v0, x) + return true + } + } + break + } + // match: (Or16 (Or16 i:(Const16 ) z) x) + // cond: (z.Op != OpConst16 && x.Op != OpConst16) + // result: (Or16 i (Or16 z x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpOr16 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 + if i.Op != OpConst16 { + continue + } + t := i.Type + z := v_0_1 + x := v_1 + if !(z.Op != OpConst16 && x.Op != OpConst16) { + continue + } + v.reset(OpOr16) + v0 := b.NewValue0(v.Pos, OpOr16, t) + v0.AddArg2(z, x) + v.AddArg2(i, v0) + return true + } + } + break + } + // match: (Or16 (Const16 [c]) (Or16 (Const16 [d]) x)) + // result: (Or16 (Const16 [c|d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 { + continue + } + t := v_0.Type + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpOr16 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst16 || v_1_0.Type != t { + continue + } + d := auxIntToInt16(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpOr16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(c | d) + v.AddArg2(v0, x) + return true + } + } + break + } + // match: (Or16 (Lsh16x64 x z:(Const64 [c])) (Rsh16Ux64 x (Const64 [d]))) + // cond: c < 16 && d == 16-c && canRotate(config, 16) + // result: (RotateLeft16 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLsh16x64 { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpConst64 { + continue + } + c := auxIntToInt64(z.AuxInt) + if v_1.Op != OpRsh16Ux64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(c < 16 && d == 16-c && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) + return true + } + break + } + // match: (Or16 left:(Lsh16x64 x y) right:(Rsh16Ux64 x (Sub64 (Const64 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh16x64 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh16Ux64 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub64 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (Or16 left:(Lsh16x32 x y) right:(Rsh16Ux32 x (Sub32 (Const32 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh16x32 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh16Ux32 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub32 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (Or16 left:(Lsh16x16 x y) right:(Rsh16Ux16 x (Sub16 (Const16 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh16x16 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh16Ux16 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub16 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (Or16 left:(Lsh16x8 x y) right:(Rsh16Ux8 x (Sub8 (Const8 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh16x8 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh16Ux8 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub8 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (Or16 right:(Rsh16Ux64 x y) left:(Lsh16x64 x z:(Sub64 (Const64 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh16Ux64 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh16x64 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub64 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) + return true + } + break + } + // match: (Or16 right:(Rsh16Ux32 x y) left:(Lsh16x32 x z:(Sub32 (Const32 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh16Ux32 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh16x32 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub32 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) + return true + } + break + } + // match: (Or16 right:(Rsh16Ux16 x y) left:(Lsh16x16 x z:(Sub16 (Const16 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh16Ux16 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh16x16 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub16 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) + return true + } + break + } + // match: (Or16 right:(Rsh16Ux8 x y) left:(Lsh16x8 x z:(Sub8 (Const8 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh16Ux8 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh16x8 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub8 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpOr32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (Or32 (Const32 [c]) (Const32 [d])) + // result: (Const32 [c|d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1.AuxInt) + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(c | d) + return true + } + break + } + // match: (Or32 (Com32 x) (Com32 y)) + // result: (Com32 (And32 x y)) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpCom32 { + continue + } + x := v_0.Args[0] + if v_1.Op != OpCom32 { + continue + } + y := v_1.Args[0] + v.reset(OpCom32) + v0 := b.NewValue0(v.Pos, OpAnd32, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (Or32 x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + // match: (Or32 (Const32 [0]) x) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 { + continue + } + x := v_1 + v.copyOf(x) + return true + } + break + } + // match: (Or32 (Const32 [-1]) _) + // result: (Const32 [-1]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != -1 { + continue + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(-1) + return true + } + break + } + // match: (Or32 (Com32 x) x) + // result: (Const32 [-1]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpCom32 { + continue + } + x := v_0.Args[0] + if x != v_1 { + continue + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(-1) + return true + } + break + } + // match: (Or32 x (Or32 x y)) + // result: (Or32 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpOr32 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + y := v_1_1 + v.reset(OpOr32) + v.AddArg2(x, y) + return true + } + } + break + } + // match: (Or32 (And32 x (Const32 [c2])) (Const32 [c1])) + // cond: ^(c1 | c2) == 0 + // result: (Or32 (Const32 [c1]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAnd32 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpConst32 { + continue + } + c2 := auxIntToInt32(v_0_1.AuxInt) + if v_1.Op != OpConst32 { + continue + } + t := v_1.Type + c1 := auxIntToInt32(v_1.AuxInt) + if !(^(c1 | c2) == 0) { + continue + } + v.reset(OpOr32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(c1) + v.AddArg2(v0, x) + return true + } + } + break + } + // match: (Or32 (Or32 i:(Const32 ) z) x) + // cond: (z.Op != OpConst32 && x.Op != OpConst32) + // result: (Or32 i (Or32 z x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpOr32 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 + if i.Op != OpConst32 { + continue + } + t := i.Type + z := v_0_1 + x := v_1 + if !(z.Op != OpConst32 && x.Op != OpConst32) { + continue + } + v.reset(OpOr32) + v0 := b.NewValue0(v.Pos, OpOr32, t) + v0.AddArg2(z, x) + v.AddArg2(i, v0) + return true + } + } + break + } + // match: (Or32 (Const32 [c]) (Or32 (Const32 [d]) x)) + // result: (Or32 (Const32 [c|d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 { + continue + } + t := v_0.Type + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpOr32 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst32 || v_1_0.Type != t { + continue + } + d := auxIntToInt32(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpOr32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(c | d) + v.AddArg2(v0, x) + return true + } + } + break + } + // match: (Or32 (Lsh32x64 x z:(Const64 [c])) (Rsh32Ux64 x (Const64 [d]))) + // cond: c < 32 && d == 32-c && canRotate(config, 32) + // result: (RotateLeft32 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLsh32x64 { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpConst64 { + continue + } + c := auxIntToInt64(z.AuxInt) + if v_1.Op != OpRsh32Ux64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(c < 32 && d == 32-c && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) + return true + } + break + } + // match: (Or32 left:(Lsh32x64 x y) right:(Rsh32Ux64 x (Sub64 (Const64 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh32x64 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh32Ux64 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub64 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (Or32 left:(Lsh32x32 x y) right:(Rsh32Ux32 x (Sub32 (Const32 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh32x32 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh32Ux32 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub32 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (Or32 left:(Lsh32x16 x y) right:(Rsh32Ux16 x (Sub16 (Const16 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh32x16 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh32Ux16 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub16 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (Or32 left:(Lsh32x8 x y) right:(Rsh32Ux8 x (Sub8 (Const8 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh32x8 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh32Ux8 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub8 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (Or32 right:(Rsh32Ux64 x y) left:(Lsh32x64 x z:(Sub64 (Const64 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh32Ux64 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh32x64 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub64 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) + return true + } + break + } + // match: (Or32 right:(Rsh32Ux32 x y) left:(Lsh32x32 x z:(Sub32 (Const32 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh32Ux32 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh32x32 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub32 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) + return true + } + break + } + // match: (Or32 right:(Rsh32Ux16 x y) left:(Lsh32x16 x z:(Sub16 (Const16 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh32Ux16 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh32x16 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub16 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) + return true + } + break + } + // match: (Or32 right:(Rsh32Ux8 x y) left:(Lsh32x8 x z:(Sub8 (Const8 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh32Ux8 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh32x8 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub8 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpOr64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (Or64 (Const64 [c]) (Const64 [d])) + // result: (Const64 [c|d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(c | d) + return true + } + break + } + // match: (Or64 (Com64 x) (Com64 y)) + // result: (Com64 (And64 x y)) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpCom64 { + continue + } + x := v_0.Args[0] + if v_1.Op != OpCom64 { + continue + } + y := v_1.Args[0] + v.reset(OpCom64) + v0 := b.NewValue0(v.Pos, OpAnd64, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (Or64 x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + // match: (Or64 (Const64 [0]) x) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 { + continue + } + x := v_1 + v.copyOf(x) + return true + } + break + } + // match: (Or64 (Const64 [-1]) _) + // result: (Const64 [-1]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != -1 { + continue + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(-1) + return true + } + break + } + // match: (Or64 (Com64 x) x) + // result: (Const64 [-1]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpCom64 { + continue + } + x := v_0.Args[0] + if x != v_1 { + continue + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(-1) + return true + } + break + } + // match: (Or64 x (Or64 x y)) + // result: (Or64 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpOr64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + y := v_1_1 + v.reset(OpOr64) + v.AddArg2(x, y) + return true + } + } + break + } + // match: (Or64 (And64 x (Const64 [c2])) (Const64 [c1])) + // cond: ^(c1 | c2) == 0 + // result: (Or64 (Const64 [c1]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAnd64 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpConst64 { + continue + } + c2 := auxIntToInt64(v_0_1.AuxInt) + if v_1.Op != OpConst64 { + continue + } + t := v_1.Type + c1 := auxIntToInt64(v_1.AuxInt) + if !(^(c1 | c2) == 0) { + continue + } + v.reset(OpOr64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c1) + v.AddArg2(v0, x) + return true + } + } + break + } + // match: (Or64 (Or64 i:(Const64 ) z) x) + // cond: (z.Op != OpConst64 && x.Op != OpConst64) + // result: (Or64 i (Or64 z x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpOr64 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 + if i.Op != OpConst64 { + continue + } + t := i.Type + z := v_0_1 + x := v_1 + if !(z.Op != OpConst64 && x.Op != OpConst64) { + continue + } + v.reset(OpOr64) + v0 := b.NewValue0(v.Pos, OpOr64, t) + v0.AddArg2(z, x) + v.AddArg2(i, v0) + return true + } + } + break + } + // match: (Or64 (Const64 [c]) (Or64 (Const64 [d]) x)) + // result: (Or64 (Const64 [c|d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 { + continue + } + t := v_0.Type + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpOr64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst64 || v_1_0.Type != t { + continue + } + d := auxIntToInt64(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpOr64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c | d) + v.AddArg2(v0, x) + return true + } + } + break + } + // match: (Or64 (Lsh64x64 x z:(Const64 [c])) (Rsh64Ux64 x (Const64 [d]))) + // cond: c < 64 && d == 64-c && canRotate(config, 64) + // result: (RotateLeft64 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLsh64x64 { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpConst64 { + continue + } + c := auxIntToInt64(z.AuxInt) + if v_1.Op != OpRsh64Ux64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(c < 64 && d == 64-c && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, z) + return true + } + break + } + // match: (Or64 left:(Lsh64x64 x y) right:(Rsh64Ux64 x (Sub64 (Const64 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh64x64 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh64Ux64 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub64 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (Or64 left:(Lsh64x32 x y) right:(Rsh64Ux32 x (Sub32 (Const32 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh64x32 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh64Ux32 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub32 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (Or64 left:(Lsh64x16 x y) right:(Rsh64Ux16 x (Sub16 (Const16 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh64x16 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh64Ux16 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub16 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (Or64 left:(Lsh64x8 x y) right:(Rsh64Ux8 x (Sub8 (Const8 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh64x8 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh64Ux8 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub8 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (Or64 right:(Rsh64Ux64 x y) left:(Lsh64x64 x z:(Sub64 (Const64 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh64Ux64 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh64x64 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub64 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, z) + return true + } + break + } + // match: (Or64 right:(Rsh64Ux32 x y) left:(Lsh64x32 x z:(Sub32 (Const32 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh64Ux32 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh64x32 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub32 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, z) + return true + } + break + } + // match: (Or64 right:(Rsh64Ux16 x y) left:(Lsh64x16 x z:(Sub16 (Const16 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh64Ux16 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh64x16 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub16 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, z) + return true + } + break + } + // match: (Or64 right:(Rsh64Ux8 x y) left:(Lsh64x8 x z:(Sub8 (Const8 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh64Ux8 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh64x8 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub8 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, z) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpOr8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (Or8 (Const8 [c]) (Const8 [d])) + // result: (Const8 [c|d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1.AuxInt) + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(c | d) + return true + } + break + } + // match: (Or8 (Com8 x) (Com8 y)) + // result: (Com8 (And8 x y)) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpCom8 { + continue + } + x := v_0.Args[0] + if v_1.Op != OpCom8 { + continue + } + y := v_1.Args[0] + v.reset(OpCom8) + v0 := b.NewValue0(v.Pos, OpAnd8, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (Or8 x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + // match: (Or8 (Const8 [0]) x) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 { + continue + } + x := v_1 + v.copyOf(x) + return true + } + break + } + // match: (Or8 (Const8 [-1]) _) + // result: (Const8 [-1]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != -1 { + continue + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(-1) + return true + } + break + } + // match: (Or8 (Com8 x) x) + // result: (Const8 [-1]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpCom8 { + continue + } + x := v_0.Args[0] + if x != v_1 { + continue + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(-1) + return true + } + break + } + // match: (Or8 x (Or8 x y)) + // result: (Or8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpOr8 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + y := v_1_1 + v.reset(OpOr8) + v.AddArg2(x, y) + return true + } + } + break + } + // match: (Or8 (And8 x (Const8 [c2])) (Const8 [c1])) + // cond: ^(c1 | c2) == 0 + // result: (Or8 (Const8 [c1]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAnd8 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpConst8 { + continue + } + c2 := auxIntToInt8(v_0_1.AuxInt) + if v_1.Op != OpConst8 { + continue + } + t := v_1.Type + c1 := auxIntToInt8(v_1.AuxInt) + if !(^(c1 | c2) == 0) { + continue + } + v.reset(OpOr8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(c1) + v.AddArg2(v0, x) + return true + } + } + break + } + // match: (Or8 (Or8 i:(Const8 ) z) x) + // cond: (z.Op != OpConst8 && x.Op != OpConst8) + // result: (Or8 i (Or8 z x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpOr8 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 + if i.Op != OpConst8 { + continue + } + t := i.Type + z := v_0_1 + x := v_1 + if !(z.Op != OpConst8 && x.Op != OpConst8) { + continue + } + v.reset(OpOr8) + v0 := b.NewValue0(v.Pos, OpOr8, t) + v0.AddArg2(z, x) + v.AddArg2(i, v0) + return true + } + } + break + } + // match: (Or8 (Const8 [c]) (Or8 (Const8 [d]) x)) + // result: (Or8 (Const8 [c|d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 { + continue + } + t := v_0.Type + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpOr8 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst8 || v_1_0.Type != t { + continue + } + d := auxIntToInt8(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpOr8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(c | d) + v.AddArg2(v0, x) + return true + } + } + break + } + // match: (Or8 (Lsh8x64 x z:(Const64 [c])) (Rsh8Ux64 x (Const64 [d]))) + // cond: c < 8 && d == 8-c && canRotate(config, 8) + // result: (RotateLeft8 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLsh8x64 { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpConst64 { + continue + } + c := auxIntToInt64(z.AuxInt) + if v_1.Op != OpRsh8Ux64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(c < 8 && d == 8-c && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, z) + return true + } + break + } + // match: (Or8 left:(Lsh8x64 x y) right:(Rsh8Ux64 x (Sub64 (Const64 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh8x64 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh8Ux64 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub64 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (Or8 left:(Lsh8x32 x y) right:(Rsh8Ux32 x (Sub32 (Const32 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh8x32 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh8Ux32 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub32 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (Or8 left:(Lsh8x16 x y) right:(Rsh8Ux16 x (Sub16 (Const16 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh8x16 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh8Ux16 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub16 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (Or8 left:(Lsh8x8 x y) right:(Rsh8Ux8 x (Sub8 (Const8 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh8x8 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh8Ux8 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub8 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (Or8 right:(Rsh8Ux64 x y) left:(Lsh8x64 x z:(Sub64 (Const64 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh8Ux64 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh8x64 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub64 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, z) + return true + } + break + } + // match: (Or8 right:(Rsh8Ux32 x y) left:(Lsh8x32 x z:(Sub32 (Const32 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh8Ux32 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh8x32 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub32 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, z) + return true + } + break + } + // match: (Or8 right:(Rsh8Ux16 x y) left:(Lsh8x16 x z:(Sub16 (Const16 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh8Ux16 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh8x16 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub16 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, z) + return true + } + break + } + // match: (Or8 right:(Rsh8Ux8 x y) left:(Lsh8x8 x z:(Sub8 (Const8 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh8Ux8 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh8x8 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub8 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, z) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpOrB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OrB (Less64 (Const64 [c]) x) (Less64 x (Const64 [d]))) + // cond: c >= d + // result: (Less64U (Const64 [c-d]) (Sub64 x (Const64 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess64 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0_0.AuxInt) + if v_1.Op != OpLess64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(c >= d) { + continue + } + v.reset(OpLess64U) + v0 := b.NewValue0(v.Pos, OpConst64, x.Type) + v0.AuxInt = int64ToAuxInt(c - d) + v1 := b.NewValue0(v.Pos, OpSub64, x.Type) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = int64ToAuxInt(d) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq64 (Const64 [c]) x) (Less64 x (Const64 [d]))) + // cond: c >= d + // result: (Leq64U (Const64 [c-d]) (Sub64 x (Const64 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq64 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0_0.AuxInt) + if v_1.Op != OpLess64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(c >= d) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpConst64, x.Type) + v0.AuxInt = int64ToAuxInt(c - d) + v1 := b.NewValue0(v.Pos, OpSub64, x.Type) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = int64ToAuxInt(d) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less32 (Const32 [c]) x) (Less32 x (Const32 [d]))) + // cond: c >= d + // result: (Less32U (Const32 [c-d]) (Sub32 x (Const32 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess32 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0_0.AuxInt) + if v_1.Op != OpLess32 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1_1.AuxInt) + if !(c >= d) { + continue + } + v.reset(OpLess32U) + v0 := b.NewValue0(v.Pos, OpConst32, x.Type) + v0.AuxInt = int32ToAuxInt(c - d) + v1 := b.NewValue0(v.Pos, OpSub32, x.Type) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = int32ToAuxInt(d) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq32 (Const32 [c]) x) (Less32 x (Const32 [d]))) + // cond: c >= d + // result: (Leq32U (Const32 [c-d]) (Sub32 x (Const32 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq32 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0_0.AuxInt) + if v_1.Op != OpLess32 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1_1.AuxInt) + if !(c >= d) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpConst32, x.Type) + v0.AuxInt = int32ToAuxInt(c - d) + v1 := b.NewValue0(v.Pos, OpSub32, x.Type) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = int32ToAuxInt(d) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less16 (Const16 [c]) x) (Less16 x (Const16 [d]))) + // cond: c >= d + // result: (Less16U (Const16 [c-d]) (Sub16 x (Const16 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess16 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0_0.AuxInt) + if v_1.Op != OpLess16 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1_1.AuxInt) + if !(c >= d) { + continue + } + v.reset(OpLess16U) + v0 := b.NewValue0(v.Pos, OpConst16, x.Type) + v0.AuxInt = int16ToAuxInt(c - d) + v1 := b.NewValue0(v.Pos, OpSub16, x.Type) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = int16ToAuxInt(d) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq16 (Const16 [c]) x) (Less16 x (Const16 [d]))) + // cond: c >= d + // result: (Leq16U (Const16 [c-d]) (Sub16 x (Const16 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq16 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0_0.AuxInt) + if v_1.Op != OpLess16 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1_1.AuxInt) + if !(c >= d) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpConst16, x.Type) + v0.AuxInt = int16ToAuxInt(c - d) + v1 := b.NewValue0(v.Pos, OpSub16, x.Type) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = int16ToAuxInt(d) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less8 (Const8 [c]) x) (Less8 x (Const8 [d]))) + // cond: c >= d + // result: (Less8U (Const8 [c-d]) (Sub8 x (Const8 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess8 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0_0.AuxInt) + if v_1.Op != OpLess8 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1_1.AuxInt) + if !(c >= d) { + continue + } + v.reset(OpLess8U) + v0 := b.NewValue0(v.Pos, OpConst8, x.Type) + v0.AuxInt = int8ToAuxInt(c - d) + v1 := b.NewValue0(v.Pos, OpSub8, x.Type) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = int8ToAuxInt(d) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq8 (Const8 [c]) x) (Less8 x (Const8 [d]))) + // cond: c >= d + // result: (Leq8U (Const8 [c-d]) (Sub8 x (Const8 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq8 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0_0.AuxInt) + if v_1.Op != OpLess8 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1_1.AuxInt) + if !(c >= d) { + continue + } + v.reset(OpLeq8U) + v0 := b.NewValue0(v.Pos, OpConst8, x.Type) + v0.AuxInt = int8ToAuxInt(c - d) + v1 := b.NewValue0(v.Pos, OpSub8, x.Type) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = int8ToAuxInt(d) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less64 (Const64 [c]) x) (Leq64 x (Const64 [d]))) + // cond: c >= d+1 && d+1 > d + // result: (Less64U (Const64 [c-d-1]) (Sub64 x (Const64 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess64 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0_0.AuxInt) + if v_1.Op != OpLeq64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(c >= d+1 && d+1 > d) { + continue + } + v.reset(OpLess64U) + v0 := b.NewValue0(v.Pos, OpConst64, x.Type) + v0.AuxInt = int64ToAuxInt(c - d - 1) + v1 := b.NewValue0(v.Pos, OpSub64, x.Type) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = int64ToAuxInt(d + 1) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq64 (Const64 [c]) x) (Leq64 x (Const64 [d]))) + // cond: c >= d+1 && d+1 > d + // result: (Leq64U (Const64 [c-d-1]) (Sub64 x (Const64 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq64 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0_0.AuxInt) + if v_1.Op != OpLeq64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(c >= d+1 && d+1 > d) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpConst64, x.Type) + v0.AuxInt = int64ToAuxInt(c - d - 1) + v1 := b.NewValue0(v.Pos, OpSub64, x.Type) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = int64ToAuxInt(d + 1) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less32 (Const32 [c]) x) (Leq32 x (Const32 [d]))) + // cond: c >= d+1 && d+1 > d + // result: (Less32U (Const32 [c-d-1]) (Sub32 x (Const32 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess32 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0_0.AuxInt) + if v_1.Op != OpLeq32 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1_1.AuxInt) + if !(c >= d+1 && d+1 > d) { + continue + } + v.reset(OpLess32U) + v0 := b.NewValue0(v.Pos, OpConst32, x.Type) + v0.AuxInt = int32ToAuxInt(c - d - 1) + v1 := b.NewValue0(v.Pos, OpSub32, x.Type) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = int32ToAuxInt(d + 1) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq32 (Const32 [c]) x) (Leq32 x (Const32 [d]))) + // cond: c >= d+1 && d+1 > d + // result: (Leq32U (Const32 [c-d-1]) (Sub32 x (Const32 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq32 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0_0.AuxInt) + if v_1.Op != OpLeq32 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1_1.AuxInt) + if !(c >= d+1 && d+1 > d) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpConst32, x.Type) + v0.AuxInt = int32ToAuxInt(c - d - 1) + v1 := b.NewValue0(v.Pos, OpSub32, x.Type) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = int32ToAuxInt(d + 1) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less16 (Const16 [c]) x) (Leq16 x (Const16 [d]))) + // cond: c >= d+1 && d+1 > d + // result: (Less16U (Const16 [c-d-1]) (Sub16 x (Const16 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess16 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0_0.AuxInt) + if v_1.Op != OpLeq16 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1_1.AuxInt) + if !(c >= d+1 && d+1 > d) { + continue + } + v.reset(OpLess16U) + v0 := b.NewValue0(v.Pos, OpConst16, x.Type) + v0.AuxInt = int16ToAuxInt(c - d - 1) + v1 := b.NewValue0(v.Pos, OpSub16, x.Type) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = int16ToAuxInt(d + 1) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq16 (Const16 [c]) x) (Leq16 x (Const16 [d]))) + // cond: c >= d+1 && d+1 > d + // result: (Leq16U (Const16 [c-d-1]) (Sub16 x (Const16 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq16 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0_0.AuxInt) + if v_1.Op != OpLeq16 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1_1.AuxInt) + if !(c >= d+1 && d+1 > d) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpConst16, x.Type) + v0.AuxInt = int16ToAuxInt(c - d - 1) + v1 := b.NewValue0(v.Pos, OpSub16, x.Type) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = int16ToAuxInt(d + 1) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less8 (Const8 [c]) x) (Leq8 x (Const8 [d]))) + // cond: c >= d+1 && d+1 > d + // result: (Less8U (Const8 [c-d-1]) (Sub8 x (Const8 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess8 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0_0.AuxInt) + if v_1.Op != OpLeq8 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1_1.AuxInt) + if !(c >= d+1 && d+1 > d) { + continue + } + v.reset(OpLess8U) + v0 := b.NewValue0(v.Pos, OpConst8, x.Type) + v0.AuxInt = int8ToAuxInt(c - d - 1) + v1 := b.NewValue0(v.Pos, OpSub8, x.Type) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = int8ToAuxInt(d + 1) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq8 (Const8 [c]) x) (Leq8 x (Const8 [d]))) + // cond: c >= d+1 && d+1 > d + // result: (Leq8U (Const8 [c-d-1]) (Sub8 x (Const8 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq8 { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0_0.AuxInt) + if v_1.Op != OpLeq8 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1_1.AuxInt) + if !(c >= d+1 && d+1 > d) { + continue + } + v.reset(OpLeq8U) + v0 := b.NewValue0(v.Pos, OpConst8, x.Type) + v0.AuxInt = int8ToAuxInt(c - d - 1) + v1 := b.NewValue0(v.Pos, OpSub8, x.Type) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = int8ToAuxInt(d + 1) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less64U (Const64 [c]) x) (Less64U x (Const64 [d]))) + // cond: uint64(c) >= uint64(d) + // result: (Less64U (Const64 [c-d]) (Sub64 x (Const64 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess64U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0_0.AuxInt) + if v_1.Op != OpLess64U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(uint64(c) >= uint64(d)) { + continue + } + v.reset(OpLess64U) + v0 := b.NewValue0(v.Pos, OpConst64, x.Type) + v0.AuxInt = int64ToAuxInt(c - d) + v1 := b.NewValue0(v.Pos, OpSub64, x.Type) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = int64ToAuxInt(d) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq64U (Const64 [c]) x) (Less64U x (Const64 [d]))) + // cond: uint64(c) >= uint64(d) + // result: (Leq64U (Const64 [c-d]) (Sub64 x (Const64 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq64U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0_0.AuxInt) + if v_1.Op != OpLess64U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(uint64(c) >= uint64(d)) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpConst64, x.Type) + v0.AuxInt = int64ToAuxInt(c - d) + v1 := b.NewValue0(v.Pos, OpSub64, x.Type) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = int64ToAuxInt(d) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less32U (Const32 [c]) x) (Less32U x (Const32 [d]))) + // cond: uint32(c) >= uint32(d) + // result: (Less32U (Const32 [c-d]) (Sub32 x (Const32 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess32U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0_0.AuxInt) + if v_1.Op != OpLess32U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1_1.AuxInt) + if !(uint32(c) >= uint32(d)) { + continue + } + v.reset(OpLess32U) + v0 := b.NewValue0(v.Pos, OpConst32, x.Type) + v0.AuxInt = int32ToAuxInt(c - d) + v1 := b.NewValue0(v.Pos, OpSub32, x.Type) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = int32ToAuxInt(d) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq32U (Const32 [c]) x) (Less32U x (Const32 [d]))) + // cond: uint32(c) >= uint32(d) + // result: (Leq32U (Const32 [c-d]) (Sub32 x (Const32 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq32U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0_0.AuxInt) + if v_1.Op != OpLess32U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1_1.AuxInt) + if !(uint32(c) >= uint32(d)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpConst32, x.Type) + v0.AuxInt = int32ToAuxInt(c - d) + v1 := b.NewValue0(v.Pos, OpSub32, x.Type) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = int32ToAuxInt(d) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less16U (Const16 [c]) x) (Less16U x (Const16 [d]))) + // cond: uint16(c) >= uint16(d) + // result: (Less16U (Const16 [c-d]) (Sub16 x (Const16 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess16U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0_0.AuxInt) + if v_1.Op != OpLess16U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1_1.AuxInt) + if !(uint16(c) >= uint16(d)) { + continue + } + v.reset(OpLess16U) + v0 := b.NewValue0(v.Pos, OpConst16, x.Type) + v0.AuxInt = int16ToAuxInt(c - d) + v1 := b.NewValue0(v.Pos, OpSub16, x.Type) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = int16ToAuxInt(d) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq16U (Const16 [c]) x) (Less16U x (Const16 [d]))) + // cond: uint16(c) >= uint16(d) + // result: (Leq16U (Const16 [c-d]) (Sub16 x (Const16 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq16U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0_0.AuxInt) + if v_1.Op != OpLess16U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1_1.AuxInt) + if !(uint16(c) >= uint16(d)) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpConst16, x.Type) + v0.AuxInt = int16ToAuxInt(c - d) + v1 := b.NewValue0(v.Pos, OpSub16, x.Type) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = int16ToAuxInt(d) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less8U (Const8 [c]) x) (Less8U x (Const8 [d]))) + // cond: uint8(c) >= uint8(d) + // result: (Less8U (Const8 [c-d]) (Sub8 x (Const8 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess8U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0_0.AuxInt) + if v_1.Op != OpLess8U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1_1.AuxInt) + if !(uint8(c) >= uint8(d)) { + continue + } + v.reset(OpLess8U) + v0 := b.NewValue0(v.Pos, OpConst8, x.Type) + v0.AuxInt = int8ToAuxInt(c - d) + v1 := b.NewValue0(v.Pos, OpSub8, x.Type) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = int8ToAuxInt(d) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq8U (Const8 [c]) x) (Less8U x (Const8 [d]))) + // cond: uint8(c) >= uint8(d) + // result: (Leq8U (Const8 [c-d]) (Sub8 x (Const8 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq8U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0_0.AuxInt) + if v_1.Op != OpLess8U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1_1.AuxInt) + if !(uint8(c) >= uint8(d)) { + continue + } + v.reset(OpLeq8U) + v0 := b.NewValue0(v.Pos, OpConst8, x.Type) + v0.AuxInt = int8ToAuxInt(c - d) + v1 := b.NewValue0(v.Pos, OpSub8, x.Type) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = int8ToAuxInt(d) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less64U (Const64 [c]) x) (Leq64U x (Const64 [d]))) + // cond: uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d) + // result: (Less64U (Const64 [c-d-1]) (Sub64 x (Const64 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess64U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0_0.AuxInt) + if v_1.Op != OpLeq64U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)) { + continue + } + v.reset(OpLess64U) + v0 := b.NewValue0(v.Pos, OpConst64, x.Type) + v0.AuxInt = int64ToAuxInt(c - d - 1) + v1 := b.NewValue0(v.Pos, OpSub64, x.Type) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = int64ToAuxInt(d + 1) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq64U (Const64 [c]) x) (Leq64U x (Const64 [d]))) + // cond: uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d) + // result: (Leq64U (Const64 [c-d-1]) (Sub64 x (Const64 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq64U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0_0.AuxInt) + if v_1.Op != OpLeq64U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpConst64, x.Type) + v0.AuxInt = int64ToAuxInt(c - d - 1) + v1 := b.NewValue0(v.Pos, OpSub64, x.Type) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = int64ToAuxInt(d + 1) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less32U (Const32 [c]) x) (Leq32U x (Const32 [d]))) + // cond: uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d) + // result: (Less32U (Const32 [c-d-1]) (Sub32 x (Const32 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess32U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0_0.AuxInt) + if v_1.Op != OpLeq32U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1_1.AuxInt) + if !(uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)) { + continue + } + v.reset(OpLess32U) + v0 := b.NewValue0(v.Pos, OpConst32, x.Type) + v0.AuxInt = int32ToAuxInt(c - d - 1) + v1 := b.NewValue0(v.Pos, OpSub32, x.Type) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = int32ToAuxInt(d + 1) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq32U (Const32 [c]) x) (Leq32U x (Const32 [d]))) + // cond: uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d) + // result: (Leq32U (Const32 [c-d-1]) (Sub32 x (Const32 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq32U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0_0.AuxInt) + if v_1.Op != OpLeq32U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1_1.AuxInt) + if !(uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpConst32, x.Type) + v0.AuxInt = int32ToAuxInt(c - d - 1) + v1 := b.NewValue0(v.Pos, OpSub32, x.Type) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = int32ToAuxInt(d + 1) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less16U (Const16 [c]) x) (Leq16U x (Const16 [d]))) + // cond: uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d) + // result: (Less16U (Const16 [c-d-1]) (Sub16 x (Const16 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess16U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0_0.AuxInt) + if v_1.Op != OpLeq16U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1_1.AuxInt) + if !(uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)) { + continue + } + v.reset(OpLess16U) + v0 := b.NewValue0(v.Pos, OpConst16, x.Type) + v0.AuxInt = int16ToAuxInt(c - d - 1) + v1 := b.NewValue0(v.Pos, OpSub16, x.Type) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = int16ToAuxInt(d + 1) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq16U (Const16 [c]) x) (Leq16U x (Const16 [d]))) + // cond: uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d) + // result: (Leq16U (Const16 [c-d-1]) (Sub16 x (Const16 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq16U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0_0.AuxInt) + if v_1.Op != OpLeq16U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1_1.AuxInt) + if !(uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpConst16, x.Type) + v0.AuxInt = int16ToAuxInt(c - d - 1) + v1 := b.NewValue0(v.Pos, OpSub16, x.Type) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = int16ToAuxInt(d + 1) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less8U (Const8 [c]) x) (Leq8U x (Const8 [d]))) + // cond: uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d) + // result: (Less8U (Const8 [c-d-1]) (Sub8 x (Const8 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess8U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0_0.AuxInt) + if v_1.Op != OpLeq8U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1_1.AuxInt) + if !(uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)) { + continue + } + v.reset(OpLess8U) + v0 := b.NewValue0(v.Pos, OpConst8, x.Type) + v0.AuxInt = int8ToAuxInt(c - d - 1) + v1 := b.NewValue0(v.Pos, OpSub8, x.Type) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = int8ToAuxInt(d + 1) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq8U (Const8 [c]) x) (Leq8U x (Const8 [d]))) + // cond: uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d) + // result: (Leq8U (Const8 [c-d-1]) (Sub8 x (Const8 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq8U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0_0.AuxInt) + if v_1.Op != OpLeq8U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1_1.AuxInt) + if !(uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)) { + continue + } + v.reset(OpLeq8U) + v0 := b.NewValue0(v.Pos, OpConst8, x.Type) + v0.AuxInt = int8ToAuxInt(c - d - 1) + v1 := b.NewValue0(v.Pos, OpSub8, x.Type) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = int8ToAuxInt(d + 1) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpPhi(v *Value) bool { + b := v.Block + // match: (Phi (Const8 [c]) (Const8 [c])) + // result: (Const8 [c]) + for { + if len(v.Args) != 2 { + break + } + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_0.AuxInt) + v_1 := v.Args[1] + if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != c { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(c) + return true + } + // match: (Phi (Const16 [c]) (Const16 [c])) + // result: (Const16 [c]) + for { + if len(v.Args) != 2 { + break + } + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_0.AuxInt) + v_1 := v.Args[1] + if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != c { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(c) + return true + } + // match: (Phi (Const32 [c]) (Const32 [c])) + // result: (Const32 [c]) + for { + if len(v.Args) != 2 { + break + } + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_0.AuxInt) + v_1 := v.Args[1] + if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != c { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(c) + return true + } + // match: (Phi (Const64 [c]) (Const64 [c])) + // result: (Const64 [c]) + for { + if len(v.Args) != 2 { + break + } + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0.AuxInt) + v_1 := v.Args[1] + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(c) + return true + } + // match: (Phi nx:(Not x) ny:(Not y)) + // cond: nx.Uses == 1 && ny.Uses == 1 + // result: (Not (Phi x y)) + for { + if len(v.Args) != 2 { + break + } + t := v.Type + _ = v.Args[1] + nx := v.Args[0] + if nx.Op != OpNot { + break + } + x := nx.Args[0] + ny := v.Args[1] + if ny.Op != OpNot { + break + } + y := ny.Args[0] + if !(nx.Uses == 1 && ny.Uses == 1) { + break + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpPhi, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValuegeneric_OpPtrIndex(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (PtrIndex ptr idx) + // cond: config.PtrSize == 4 && is32Bit(t.Elem().Size()) + // result: (AddPtr ptr (Mul32 idx (Const32 [int32(t.Elem().Size())]))) + for { + t := v.Type + ptr := v_0 + idx := v_1 + if !(config.PtrSize == 4 && is32Bit(t.Elem().Size())) { + break + } + v.reset(OpAddPtr) + v0 := b.NewValue0(v.Pos, OpMul32, typ.Int) + v1 := b.NewValue0(v.Pos, OpConst32, typ.Int) + v1.AuxInt = int32ToAuxInt(int32(t.Elem().Size())) + v0.AddArg2(idx, v1) + v.AddArg2(ptr, v0) + return true + } + // match: (PtrIndex ptr idx) + // cond: config.PtrSize == 8 + // result: (AddPtr ptr (Mul64 idx (Const64 [t.Elem().Size()]))) + for { + t := v.Type + ptr := v_0 + idx := v_1 + if !(config.PtrSize == 8) { + break + } + v.reset(OpAddPtr) + v0 := b.NewValue0(v.Pos, OpMul64, typ.Int) + v1 := b.NewValue0(v.Pos, OpConst64, typ.Int) + v1.AuxInt = int64ToAuxInt(t.Elem().Size()) + v0.AddArg2(idx, v1) + v.AddArg2(ptr, v0) + return true + } + return false +} +func rewriteValuegeneric_OpRotateLeft16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (RotateLeft16 x (Const16 [c])) + // cond: c%16 == 0 + // result: x + for { + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + if !(c%16 == 0) { + break + } + v.copyOf(x) + return true + } + // match: (RotateLeft16 x (And64 y (Const64 [c]))) + // cond: c&15 == 15 + // result: (RotateLeft16 x y) + for { + x := v_0 + if v_1.Op != OpAnd64 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c&15 == 15) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft16 x (And32 y (Const32 [c]))) + // cond: c&15 == 15 + // result: (RotateLeft16 x y) + for { + x := v_0 + if v_1.Op != OpAnd32 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_1.AuxInt) + if !(c&15 == 15) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft16 x (And16 y (Const16 [c]))) + // cond: c&15 == 15 + // result: (RotateLeft16 x y) + for { + x := v_0 + if v_1.Op != OpAnd16 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_1.AuxInt) + if !(c&15 == 15) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft16 x (And8 y (Const8 [c]))) + // cond: c&15 == 15 + // result: (RotateLeft16 x y) + for { + x := v_0 + if v_1.Op != OpAnd8 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_1.AuxInt) + if !(c&15 == 15) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft16 x (Neg64 (And64 y (Const64 [c])))) + // cond: c&15 == 15 + // result: (RotateLeft16 x (Neg64 y)) + for { + x := v_0 + if v_1.Op != OpNeg64 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd64 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_0_1.AuxInt) + if !(c&15 == 15) { + continue + } + v.reset(OpRotateLeft16) + v0 := b.NewValue0(v.Pos, OpNeg64, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft16 x (Neg32 (And32 y (Const32 [c])))) + // cond: c&15 == 15 + // result: (RotateLeft16 x (Neg32 y)) + for { + x := v_0 + if v_1.Op != OpNeg32 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd32 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_0_1.AuxInt) + if !(c&15 == 15) { + continue + } + v.reset(OpRotateLeft16) + v0 := b.NewValue0(v.Pos, OpNeg32, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft16 x (Neg16 (And16 y (Const16 [c])))) + // cond: c&15 == 15 + // result: (RotateLeft16 x (Neg16 y)) + for { + x := v_0 + if v_1.Op != OpNeg16 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd16 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_0_1.AuxInt) + if !(c&15 == 15) { + continue + } + v.reset(OpRotateLeft16) + v0 := b.NewValue0(v.Pos, OpNeg16, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft16 x (Neg8 (And8 y (Const8 [c])))) + // cond: c&15 == 15 + // result: (RotateLeft16 x (Neg8 y)) + for { + x := v_0 + if v_1.Op != OpNeg8 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd8 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_0_1.AuxInt) + if !(c&15 == 15) { + continue + } + v.reset(OpRotateLeft16) + v0 := b.NewValue0(v.Pos, OpNeg8, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft16 x (Add64 y (Const64 [c]))) + // cond: c&15 == 0 + // result: (RotateLeft16 x y) + for { + x := v_0 + if v_1.Op != OpAdd64 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c&15 == 0) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft16 x (Add32 y (Const32 [c]))) + // cond: c&15 == 0 + // result: (RotateLeft16 x y) + for { + x := v_0 + if v_1.Op != OpAdd32 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_1.AuxInt) + if !(c&15 == 0) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft16 x (Add16 y (Const16 [c]))) + // cond: c&15 == 0 + // result: (RotateLeft16 x y) + for { + x := v_0 + if v_1.Op != OpAdd16 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_1.AuxInt) + if !(c&15 == 0) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft16 x (Add8 y (Const8 [c]))) + // cond: c&15 == 0 + // result: (RotateLeft16 x y) + for { + x := v_0 + if v_1.Op != OpAdd8 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_1.AuxInt) + if !(c&15 == 0) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft16 x (Sub64 (Const64 [c]) y)) + // cond: c&15 == 0 + // result: (RotateLeft16 x (Neg64 y)) + for { + x := v_0 + if v_1.Op != OpSub64 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1_0.AuxInt) + if !(c&15 == 0) { + break + } + v.reset(OpRotateLeft16) + v0 := b.NewValue0(v.Pos, OpNeg64, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft16 x (Sub32 (Const32 [c]) y)) + // cond: c&15 == 0 + // result: (RotateLeft16 x (Neg32 y)) + for { + x := v_0 + if v_1.Op != OpSub32 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + if !(c&15 == 0) { + break + } + v.reset(OpRotateLeft16) + v0 := b.NewValue0(v.Pos, OpNeg32, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft16 x (Sub16 (Const16 [c]) y)) + // cond: c&15 == 0 + // result: (RotateLeft16 x (Neg16 y)) + for { + x := v_0 + if v_1.Op != OpSub16 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1_0.AuxInt) + if !(c&15 == 0) { + break + } + v.reset(OpRotateLeft16) + v0 := b.NewValue0(v.Pos, OpNeg16, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft16 x (Sub8 (Const8 [c]) y)) + // cond: c&15 == 0 + // result: (RotateLeft16 x (Neg8 y)) + for { + x := v_0 + if v_1.Op != OpSub8 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1_0.AuxInt) + if !(c&15 == 0) { + break + } + v.reset(OpRotateLeft16) + v0 := b.NewValue0(v.Pos, OpNeg8, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft16 x (Const64 [c])) + // cond: config.PtrSize == 4 + // result: (RotateLeft16 x (Const32 [int32(c)])) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + t := v_1.Type + c := auxIntToInt64(v_1.AuxInt) + if !(config.PtrSize == 4) { + break + } + v.reset(OpRotateLeft16) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft16 (RotateLeft16 x c) d) + // cond: c.Type.Size() == 8 && d.Type.Size() == 8 + // result: (RotateLeft16 x (Add64 c d)) + for { + if v_0.Op != OpRotateLeft16 { + break + } + c := v_0.Args[1] + x := v_0.Args[0] + d := v_1 + if !(c.Type.Size() == 8 && d.Type.Size() == 8) { + break + } + v.reset(OpRotateLeft16) + v0 := b.NewValue0(v.Pos, OpAdd64, c.Type) + v0.AddArg2(c, d) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft16 (RotateLeft16 x c) d) + // cond: c.Type.Size() == 4 && d.Type.Size() == 4 + // result: (RotateLeft16 x (Add32 c d)) + for { + if v_0.Op != OpRotateLeft16 { + break + } + c := v_0.Args[1] + x := v_0.Args[0] + d := v_1 + if !(c.Type.Size() == 4 && d.Type.Size() == 4) { + break + } + v.reset(OpRotateLeft16) + v0 := b.NewValue0(v.Pos, OpAdd32, c.Type) + v0.AddArg2(c, d) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft16 (RotateLeft16 x c) d) + // cond: c.Type.Size() == 2 && d.Type.Size() == 2 + // result: (RotateLeft16 x (Add16 c d)) + for { + if v_0.Op != OpRotateLeft16 { + break + } + c := v_0.Args[1] + x := v_0.Args[0] + d := v_1 + if !(c.Type.Size() == 2 && d.Type.Size() == 2) { + break + } + v.reset(OpRotateLeft16) + v0 := b.NewValue0(v.Pos, OpAdd16, c.Type) + v0.AddArg2(c, d) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft16 (RotateLeft16 x c) d) + // cond: c.Type.Size() == 1 && d.Type.Size() == 1 + // result: (RotateLeft16 x (Add8 c d)) + for { + if v_0.Op != OpRotateLeft16 { + break + } + c := v_0.Args[1] + x := v_0.Args[0] + d := v_1 + if !(c.Type.Size() == 1 && d.Type.Size() == 1) { + break + } + v.reset(OpRotateLeft16) + v0 := b.NewValue0(v.Pos, OpAdd8, c.Type) + v0.AddArg2(c, d) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValuegeneric_OpRotateLeft32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (RotateLeft32 x (Const32 [c])) + // cond: c%32 == 0 + // result: x + for { + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(c%32 == 0) { + break + } + v.copyOf(x) + return true + } + // match: (RotateLeft32 x (And64 y (Const64 [c]))) + // cond: c&31 == 31 + // result: (RotateLeft32 x y) + for { + x := v_0 + if v_1.Op != OpAnd64 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c&31 == 31) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft32 x (And32 y (Const32 [c]))) + // cond: c&31 == 31 + // result: (RotateLeft32 x y) + for { + x := v_0 + if v_1.Op != OpAnd32 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_1.AuxInt) + if !(c&31 == 31) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft32 x (And16 y (Const16 [c]))) + // cond: c&31 == 31 + // result: (RotateLeft32 x y) + for { + x := v_0 + if v_1.Op != OpAnd16 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_1.AuxInt) + if !(c&31 == 31) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft32 x (And8 y (Const8 [c]))) + // cond: c&31 == 31 + // result: (RotateLeft32 x y) + for { + x := v_0 + if v_1.Op != OpAnd8 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_1.AuxInt) + if !(c&31 == 31) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft32 x (Neg64 (And64 y (Const64 [c])))) + // cond: c&31 == 31 + // result: (RotateLeft32 x (Neg64 y)) + for { + x := v_0 + if v_1.Op != OpNeg64 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd64 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_0_1.AuxInt) + if !(c&31 == 31) { + continue + } + v.reset(OpRotateLeft32) + v0 := b.NewValue0(v.Pos, OpNeg64, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft32 x (Neg32 (And32 y (Const32 [c])))) + // cond: c&31 == 31 + // result: (RotateLeft32 x (Neg32 y)) + for { + x := v_0 + if v_1.Op != OpNeg32 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd32 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_0_1.AuxInt) + if !(c&31 == 31) { + continue + } + v.reset(OpRotateLeft32) + v0 := b.NewValue0(v.Pos, OpNeg32, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft32 x (Neg16 (And16 y (Const16 [c])))) + // cond: c&31 == 31 + // result: (RotateLeft32 x (Neg16 y)) + for { + x := v_0 + if v_1.Op != OpNeg16 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd16 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_0_1.AuxInt) + if !(c&31 == 31) { + continue + } + v.reset(OpRotateLeft32) + v0 := b.NewValue0(v.Pos, OpNeg16, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft32 x (Neg8 (And8 y (Const8 [c])))) + // cond: c&31 == 31 + // result: (RotateLeft32 x (Neg8 y)) + for { + x := v_0 + if v_1.Op != OpNeg8 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd8 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_0_1.AuxInt) + if !(c&31 == 31) { + continue + } + v.reset(OpRotateLeft32) + v0 := b.NewValue0(v.Pos, OpNeg8, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft32 x (Add64 y (Const64 [c]))) + // cond: c&31 == 0 + // result: (RotateLeft32 x y) + for { + x := v_0 + if v_1.Op != OpAdd64 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c&31 == 0) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft32 x (Add32 y (Const32 [c]))) + // cond: c&31 == 0 + // result: (RotateLeft32 x y) + for { + x := v_0 + if v_1.Op != OpAdd32 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_1.AuxInt) + if !(c&31 == 0) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft32 x (Add16 y (Const16 [c]))) + // cond: c&31 == 0 + // result: (RotateLeft32 x y) + for { + x := v_0 + if v_1.Op != OpAdd16 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_1.AuxInt) + if !(c&31 == 0) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft32 x (Add8 y (Const8 [c]))) + // cond: c&31 == 0 + // result: (RotateLeft32 x y) + for { + x := v_0 + if v_1.Op != OpAdd8 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_1.AuxInt) + if !(c&31 == 0) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft32 x (Sub64 (Const64 [c]) y)) + // cond: c&31 == 0 + // result: (RotateLeft32 x (Neg64 y)) + for { + x := v_0 + if v_1.Op != OpSub64 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1_0.AuxInt) + if !(c&31 == 0) { + break + } + v.reset(OpRotateLeft32) + v0 := b.NewValue0(v.Pos, OpNeg64, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft32 x (Sub32 (Const32 [c]) y)) + // cond: c&31 == 0 + // result: (RotateLeft32 x (Neg32 y)) + for { + x := v_0 + if v_1.Op != OpSub32 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + if !(c&31 == 0) { + break + } + v.reset(OpRotateLeft32) + v0 := b.NewValue0(v.Pos, OpNeg32, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft32 x (Sub16 (Const16 [c]) y)) + // cond: c&31 == 0 + // result: (RotateLeft32 x (Neg16 y)) + for { + x := v_0 + if v_1.Op != OpSub16 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1_0.AuxInt) + if !(c&31 == 0) { + break + } + v.reset(OpRotateLeft32) + v0 := b.NewValue0(v.Pos, OpNeg16, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft32 x (Sub8 (Const8 [c]) y)) + // cond: c&31 == 0 + // result: (RotateLeft32 x (Neg8 y)) + for { + x := v_0 + if v_1.Op != OpSub8 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1_0.AuxInt) + if !(c&31 == 0) { + break + } + v.reset(OpRotateLeft32) + v0 := b.NewValue0(v.Pos, OpNeg8, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft32 x (Const64 [c])) + // cond: config.PtrSize == 4 + // result: (RotateLeft32 x (Const32 [int32(c)])) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + t := v_1.Type + c := auxIntToInt64(v_1.AuxInt) + if !(config.PtrSize == 4) { + break + } + v.reset(OpRotateLeft32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft32 (RotateLeft32 x c) d) + // cond: c.Type.Size() == 8 && d.Type.Size() == 8 + // result: (RotateLeft32 x (Add64 c d)) + for { + if v_0.Op != OpRotateLeft32 { + break + } + c := v_0.Args[1] + x := v_0.Args[0] + d := v_1 + if !(c.Type.Size() == 8 && d.Type.Size() == 8) { + break + } + v.reset(OpRotateLeft32) + v0 := b.NewValue0(v.Pos, OpAdd64, c.Type) + v0.AddArg2(c, d) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft32 (RotateLeft32 x c) d) + // cond: c.Type.Size() == 4 && d.Type.Size() == 4 + // result: (RotateLeft32 x (Add32 c d)) + for { + if v_0.Op != OpRotateLeft32 { + break + } + c := v_0.Args[1] + x := v_0.Args[0] + d := v_1 + if !(c.Type.Size() == 4 && d.Type.Size() == 4) { + break + } + v.reset(OpRotateLeft32) + v0 := b.NewValue0(v.Pos, OpAdd32, c.Type) + v0.AddArg2(c, d) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft32 (RotateLeft32 x c) d) + // cond: c.Type.Size() == 2 && d.Type.Size() == 2 + // result: (RotateLeft32 x (Add16 c d)) + for { + if v_0.Op != OpRotateLeft32 { + break + } + c := v_0.Args[1] + x := v_0.Args[0] + d := v_1 + if !(c.Type.Size() == 2 && d.Type.Size() == 2) { + break + } + v.reset(OpRotateLeft32) + v0 := b.NewValue0(v.Pos, OpAdd16, c.Type) + v0.AddArg2(c, d) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft32 (RotateLeft32 x c) d) + // cond: c.Type.Size() == 1 && d.Type.Size() == 1 + // result: (RotateLeft32 x (Add8 c d)) + for { + if v_0.Op != OpRotateLeft32 { + break + } + c := v_0.Args[1] + x := v_0.Args[0] + d := v_1 + if !(c.Type.Size() == 1 && d.Type.Size() == 1) { + break + } + v.reset(OpRotateLeft32) + v0 := b.NewValue0(v.Pos, OpAdd8, c.Type) + v0.AddArg2(c, d) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValuegeneric_OpRotateLeft64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (RotateLeft64 x (Const64 [c])) + // cond: c%64 == 0 + // result: x + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(c%64 == 0) { + break + } + v.copyOf(x) + return true + } + // match: (RotateLeft64 x (And64 y (Const64 [c]))) + // cond: c&63 == 63 + // result: (RotateLeft64 x y) + for { + x := v_0 + if v_1.Op != OpAnd64 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c&63 == 63) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft64 x (And32 y (Const32 [c]))) + // cond: c&63 == 63 + // result: (RotateLeft64 x y) + for { + x := v_0 + if v_1.Op != OpAnd32 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_1.AuxInt) + if !(c&63 == 63) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft64 x (And16 y (Const16 [c]))) + // cond: c&63 == 63 + // result: (RotateLeft64 x y) + for { + x := v_0 + if v_1.Op != OpAnd16 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_1.AuxInt) + if !(c&63 == 63) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft64 x (And8 y (Const8 [c]))) + // cond: c&63 == 63 + // result: (RotateLeft64 x y) + for { + x := v_0 + if v_1.Op != OpAnd8 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_1.AuxInt) + if !(c&63 == 63) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft64 x (Neg64 (And64 y (Const64 [c])))) + // cond: c&63 == 63 + // result: (RotateLeft64 x (Neg64 y)) + for { + x := v_0 + if v_1.Op != OpNeg64 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd64 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_0_1.AuxInt) + if !(c&63 == 63) { + continue + } + v.reset(OpRotateLeft64) + v0 := b.NewValue0(v.Pos, OpNeg64, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft64 x (Neg32 (And32 y (Const32 [c])))) + // cond: c&63 == 63 + // result: (RotateLeft64 x (Neg32 y)) + for { + x := v_0 + if v_1.Op != OpNeg32 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd32 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_0_1.AuxInt) + if !(c&63 == 63) { + continue + } + v.reset(OpRotateLeft64) + v0 := b.NewValue0(v.Pos, OpNeg32, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft64 x (Neg16 (And16 y (Const16 [c])))) + // cond: c&63 == 63 + // result: (RotateLeft64 x (Neg16 y)) + for { + x := v_0 + if v_1.Op != OpNeg16 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd16 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_0_1.AuxInt) + if !(c&63 == 63) { + continue + } + v.reset(OpRotateLeft64) + v0 := b.NewValue0(v.Pos, OpNeg16, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft64 x (Neg8 (And8 y (Const8 [c])))) + // cond: c&63 == 63 + // result: (RotateLeft64 x (Neg8 y)) + for { + x := v_0 + if v_1.Op != OpNeg8 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd8 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_0_1.AuxInt) + if !(c&63 == 63) { + continue + } + v.reset(OpRotateLeft64) + v0 := b.NewValue0(v.Pos, OpNeg8, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft64 x (Add64 y (Const64 [c]))) + // cond: c&63 == 0 + // result: (RotateLeft64 x y) + for { + x := v_0 + if v_1.Op != OpAdd64 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c&63 == 0) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft64 x (Add32 y (Const32 [c]))) + // cond: c&63 == 0 + // result: (RotateLeft64 x y) + for { + x := v_0 + if v_1.Op != OpAdd32 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_1.AuxInt) + if !(c&63 == 0) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft64 x (Add16 y (Const16 [c]))) + // cond: c&63 == 0 + // result: (RotateLeft64 x y) + for { + x := v_0 + if v_1.Op != OpAdd16 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_1.AuxInt) + if !(c&63 == 0) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft64 x (Add8 y (Const8 [c]))) + // cond: c&63 == 0 + // result: (RotateLeft64 x y) + for { + x := v_0 + if v_1.Op != OpAdd8 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_1.AuxInt) + if !(c&63 == 0) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft64 x (Sub64 (Const64 [c]) y)) + // cond: c&63 == 0 + // result: (RotateLeft64 x (Neg64 y)) + for { + x := v_0 + if v_1.Op != OpSub64 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1_0.AuxInt) + if !(c&63 == 0) { + break + } + v.reset(OpRotateLeft64) + v0 := b.NewValue0(v.Pos, OpNeg64, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft64 x (Sub32 (Const32 [c]) y)) + // cond: c&63 == 0 + // result: (RotateLeft64 x (Neg32 y)) + for { + x := v_0 + if v_1.Op != OpSub32 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + if !(c&63 == 0) { + break + } + v.reset(OpRotateLeft64) + v0 := b.NewValue0(v.Pos, OpNeg32, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft64 x (Sub16 (Const16 [c]) y)) + // cond: c&63 == 0 + // result: (RotateLeft64 x (Neg16 y)) + for { + x := v_0 + if v_1.Op != OpSub16 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1_0.AuxInt) + if !(c&63 == 0) { + break + } + v.reset(OpRotateLeft64) + v0 := b.NewValue0(v.Pos, OpNeg16, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft64 x (Sub8 (Const8 [c]) y)) + // cond: c&63 == 0 + // result: (RotateLeft64 x (Neg8 y)) + for { + x := v_0 + if v_1.Op != OpSub8 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1_0.AuxInt) + if !(c&63 == 0) { + break + } + v.reset(OpRotateLeft64) + v0 := b.NewValue0(v.Pos, OpNeg8, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft64 x (Const64 [c])) + // cond: config.PtrSize == 4 + // result: (RotateLeft64 x (Const32 [int32(c)])) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + t := v_1.Type + c := auxIntToInt64(v_1.AuxInt) + if !(config.PtrSize == 4) { + break + } + v.reset(OpRotateLeft64) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft64 (RotateLeft64 x c) d) + // cond: c.Type.Size() == 8 && d.Type.Size() == 8 + // result: (RotateLeft64 x (Add64 c d)) + for { + if v_0.Op != OpRotateLeft64 { + break + } + c := v_0.Args[1] + x := v_0.Args[0] + d := v_1 + if !(c.Type.Size() == 8 && d.Type.Size() == 8) { + break + } + v.reset(OpRotateLeft64) + v0 := b.NewValue0(v.Pos, OpAdd64, c.Type) + v0.AddArg2(c, d) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft64 (RotateLeft64 x c) d) + // cond: c.Type.Size() == 4 && d.Type.Size() == 4 + // result: (RotateLeft64 x (Add32 c d)) + for { + if v_0.Op != OpRotateLeft64 { + break + } + c := v_0.Args[1] + x := v_0.Args[0] + d := v_1 + if !(c.Type.Size() == 4 && d.Type.Size() == 4) { + break + } + v.reset(OpRotateLeft64) + v0 := b.NewValue0(v.Pos, OpAdd32, c.Type) + v0.AddArg2(c, d) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft64 (RotateLeft64 x c) d) + // cond: c.Type.Size() == 2 && d.Type.Size() == 2 + // result: (RotateLeft64 x (Add16 c d)) + for { + if v_0.Op != OpRotateLeft64 { + break + } + c := v_0.Args[1] + x := v_0.Args[0] + d := v_1 + if !(c.Type.Size() == 2 && d.Type.Size() == 2) { + break + } + v.reset(OpRotateLeft64) + v0 := b.NewValue0(v.Pos, OpAdd16, c.Type) + v0.AddArg2(c, d) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft64 (RotateLeft64 x c) d) + // cond: c.Type.Size() == 1 && d.Type.Size() == 1 + // result: (RotateLeft64 x (Add8 c d)) + for { + if v_0.Op != OpRotateLeft64 { + break + } + c := v_0.Args[1] + x := v_0.Args[0] + d := v_1 + if !(c.Type.Size() == 1 && d.Type.Size() == 1) { + break + } + v.reset(OpRotateLeft64) + v0 := b.NewValue0(v.Pos, OpAdd8, c.Type) + v0.AddArg2(c, d) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValuegeneric_OpRotateLeft8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (RotateLeft8 x (Const8 [c])) + // cond: c%8 == 0 + // result: x + for { + x := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + if !(c%8 == 0) { + break + } + v.copyOf(x) + return true + } + // match: (RotateLeft8 x (And64 y (Const64 [c]))) + // cond: c&7 == 7 + // result: (RotateLeft8 x y) + for { + x := v_0 + if v_1.Op != OpAnd64 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c&7 == 7) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft8 x (And32 y (Const32 [c]))) + // cond: c&7 == 7 + // result: (RotateLeft8 x y) + for { + x := v_0 + if v_1.Op != OpAnd32 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_1.AuxInt) + if !(c&7 == 7) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft8 x (And16 y (Const16 [c]))) + // cond: c&7 == 7 + // result: (RotateLeft8 x y) + for { + x := v_0 + if v_1.Op != OpAnd16 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_1.AuxInt) + if !(c&7 == 7) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft8 x (And8 y (Const8 [c]))) + // cond: c&7 == 7 + // result: (RotateLeft8 x y) + for { + x := v_0 + if v_1.Op != OpAnd8 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_1.AuxInt) + if !(c&7 == 7) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft8 x (Neg64 (And64 y (Const64 [c])))) + // cond: c&7 == 7 + // result: (RotateLeft8 x (Neg64 y)) + for { + x := v_0 + if v_1.Op != OpNeg64 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd64 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_0_1.AuxInt) + if !(c&7 == 7) { + continue + } + v.reset(OpRotateLeft8) + v0 := b.NewValue0(v.Pos, OpNeg64, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft8 x (Neg32 (And32 y (Const32 [c])))) + // cond: c&7 == 7 + // result: (RotateLeft8 x (Neg32 y)) + for { + x := v_0 + if v_1.Op != OpNeg32 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd32 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_0_1.AuxInt) + if !(c&7 == 7) { + continue + } + v.reset(OpRotateLeft8) + v0 := b.NewValue0(v.Pos, OpNeg32, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft8 x (Neg16 (And16 y (Const16 [c])))) + // cond: c&7 == 7 + // result: (RotateLeft8 x (Neg16 y)) + for { + x := v_0 + if v_1.Op != OpNeg16 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd16 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_0_1.AuxInt) + if !(c&7 == 7) { + continue + } + v.reset(OpRotateLeft8) + v0 := b.NewValue0(v.Pos, OpNeg16, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft8 x (Neg8 (And8 y (Const8 [c])))) + // cond: c&7 == 7 + // result: (RotateLeft8 x (Neg8 y)) + for { + x := v_0 + if v_1.Op != OpNeg8 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd8 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_0_1.AuxInt) + if !(c&7 == 7) { + continue + } + v.reset(OpRotateLeft8) + v0 := b.NewValue0(v.Pos, OpNeg8, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft8 x (Add64 y (Const64 [c]))) + // cond: c&7 == 0 + // result: (RotateLeft8 x y) + for { + x := v_0 + if v_1.Op != OpAdd64 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c&7 == 0) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft8 x (Add32 y (Const32 [c]))) + // cond: c&7 == 0 + // result: (RotateLeft8 x y) + for { + x := v_0 + if v_1.Op != OpAdd32 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_1.AuxInt) + if !(c&7 == 0) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft8 x (Add16 y (Const16 [c]))) + // cond: c&7 == 0 + // result: (RotateLeft8 x y) + for { + x := v_0 + if v_1.Op != OpAdd16 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_1.AuxInt) + if !(c&7 == 0) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft8 x (Add8 y (Const8 [c]))) + // cond: c&7 == 0 + // result: (RotateLeft8 x y) + for { + x := v_0 + if v_1.Op != OpAdd8 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_1.AuxInt) + if !(c&7 == 0) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft8 x (Sub64 (Const64 [c]) y)) + // cond: c&7 == 0 + // result: (RotateLeft8 x (Neg64 y)) + for { + x := v_0 + if v_1.Op != OpSub64 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1_0.AuxInt) + if !(c&7 == 0) { + break + } + v.reset(OpRotateLeft8) + v0 := b.NewValue0(v.Pos, OpNeg64, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft8 x (Sub32 (Const32 [c]) y)) + // cond: c&7 == 0 + // result: (RotateLeft8 x (Neg32 y)) + for { + x := v_0 + if v_1.Op != OpSub32 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + if !(c&7 == 0) { + break + } + v.reset(OpRotateLeft8) + v0 := b.NewValue0(v.Pos, OpNeg32, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft8 x (Sub16 (Const16 [c]) y)) + // cond: c&7 == 0 + // result: (RotateLeft8 x (Neg16 y)) + for { + x := v_0 + if v_1.Op != OpSub16 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1_0.AuxInt) + if !(c&7 == 0) { + break + } + v.reset(OpRotateLeft8) + v0 := b.NewValue0(v.Pos, OpNeg16, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft8 x (Sub8 (Const8 [c]) y)) + // cond: c&7 == 0 + // result: (RotateLeft8 x (Neg8 y)) + for { + x := v_0 + if v_1.Op != OpSub8 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1_0.AuxInt) + if !(c&7 == 0) { + break + } + v.reset(OpRotateLeft8) + v0 := b.NewValue0(v.Pos, OpNeg8, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft8 x (Const64 [c])) + // cond: config.PtrSize == 4 + // result: (RotateLeft8 x (Const32 [int32(c)])) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + t := v_1.Type + c := auxIntToInt64(v_1.AuxInt) + if !(config.PtrSize == 4) { + break + } + v.reset(OpRotateLeft8) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft8 (RotateLeft8 x c) d) + // cond: c.Type.Size() == 8 && d.Type.Size() == 8 + // result: (RotateLeft8 x (Add64 c d)) + for { + if v_0.Op != OpRotateLeft8 { + break + } + c := v_0.Args[1] + x := v_0.Args[0] + d := v_1 + if !(c.Type.Size() == 8 && d.Type.Size() == 8) { + break + } + v.reset(OpRotateLeft8) + v0 := b.NewValue0(v.Pos, OpAdd64, c.Type) + v0.AddArg2(c, d) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft8 (RotateLeft8 x c) d) + // cond: c.Type.Size() == 4 && d.Type.Size() == 4 + // result: (RotateLeft8 x (Add32 c d)) + for { + if v_0.Op != OpRotateLeft8 { + break + } + c := v_0.Args[1] + x := v_0.Args[0] + d := v_1 + if !(c.Type.Size() == 4 && d.Type.Size() == 4) { + break + } + v.reset(OpRotateLeft8) + v0 := b.NewValue0(v.Pos, OpAdd32, c.Type) + v0.AddArg2(c, d) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft8 (RotateLeft8 x c) d) + // cond: c.Type.Size() == 2 && d.Type.Size() == 2 + // result: (RotateLeft8 x (Add16 c d)) + for { + if v_0.Op != OpRotateLeft8 { + break + } + c := v_0.Args[1] + x := v_0.Args[0] + d := v_1 + if !(c.Type.Size() == 2 && d.Type.Size() == 2) { + break + } + v.reset(OpRotateLeft8) + v0 := b.NewValue0(v.Pos, OpAdd16, c.Type) + v0.AddArg2(c, d) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft8 (RotateLeft8 x c) d) + // cond: c.Type.Size() == 1 && d.Type.Size() == 1 + // result: (RotateLeft8 x (Add8 c d)) + for { + if v_0.Op != OpRotateLeft8 { + break + } + c := v_0.Args[1] + x := v_0.Args[0] + d := v_1 + if !(c.Type.Size() == 1 && d.Type.Size() == 1) { + break + } + v.reset(OpRotateLeft8) + v0 := b.NewValue0(v.Pos, OpAdd8, c.Type) + v0.AddArg2(c, d) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValuegeneric_OpRound32F(v *Value) bool { + v_0 := v.Args[0] + // match: (Round32F x:(Const32F)) + // result: x + for { + x := v_0 + if x.Op != OpConst32F { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValuegeneric_OpRound64F(v *Value) bool { + v_0 := v.Args[0] + // match: (Round64F x:(Const64F)) + // result: x + for { + x := v_0 + if x.Op != OpConst64F { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValuegeneric_OpRoundToEven(v *Value) bool { + v_0 := v.Args[0] + // match: (RoundToEven (Const64F [c])) + // result: (Const64F [math.RoundToEven(c)]) + for { + if v_0.Op != OpConst64F { + break + } + c := auxIntToFloat64(v_0.AuxInt) + v.reset(OpConst64F) + v.AuxInt = float64ToAuxInt(math.RoundToEven(c)) + return true + } + return false +} +func rewriteValuegeneric_OpRsh16Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh16Ux16 x (Const16 [c])) + // result: (Rsh16Ux64 x (Const64 [int64(uint16(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + v.reset(OpRsh16Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint16(c))) + v.AddArg2(x, v0) + return true + } + // match: (Rsh16Ux16 (Const16 [0]) _) + // result: (Const16 [0]) + for { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh16Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh16Ux32 x (Const32 [c])) + // result: (Rsh16Ux64 x (Const64 [int64(uint32(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpRsh16Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint32(c))) + v.AddArg2(x, v0) + return true + } + // match: (Rsh16Ux32 (Const16 [0]) _) + // result: (Const16 [0]) + for { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh16Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux64 (Const16 [c]) (Const64 [d])) + // result: (Const16 [int16(uint16(c) >> uint64(d))]) + for { + if v_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(int16(uint16(c) >> uint64(d))) + return true + } + // match: (Rsh16Ux64 x (Const64 [0])) + // result: x + for { + x := v_0 + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + v.copyOf(x) + return true + } + // match: (Rsh16Ux64 (Const16 [0]) _) + // result: (Const16 [0]) + for { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(0) + return true + } + // match: (Rsh16Ux64 _ (Const64 [c])) + // cond: uint64(c) >= 16 + // result: (Const16 [0]) + for { + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 16) { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(0) + return true + } + // match: (Rsh16Ux64 (Rsh16Ux64 x (Const64 [c])) (Const64 [d])) + // cond: !uaddOvf(c,d) + // result: (Rsh16Ux64 x (Const64 [c+d])) + for { + t := v.Type + if v_0.Op != OpRsh16Ux64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0_1.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + if !(!uaddOvf(c, d)) { + break + } + v.reset(OpRsh16Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c + d) + v.AddArg2(x, v0) + return true + } + // match: (Rsh16Ux64 (Rsh16x64 x _) (Const64 [15])) + // result: (Rsh16Ux64 x (Const64 [15])) + for { + if v_0.Op != OpRsh16x64 { + break + } + x := v_0.Args[0] + if v_1.Op != OpConst64 { + break + } + t := v_1.Type + if auxIntToInt64(v_1.AuxInt) != 15 { + break + } + v.reset(OpRsh16Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(15) + v.AddArg2(x, v0) + return true + } + // match: (Rsh16Ux64 i:(Lsh16x64 x (Const64 [c])) (Const64 [c])) + // cond: c >= 0 && c < 16 && i.Uses == 1 + // result: (And16 x (Const16 [int16(^uint16(0)>>c)])) + for { + i := v_0 + if i.Op != OpLsh16x64 { + break + } + _ = i.Args[1] + x := i.Args[0] + i_1 := i.Args[1] + if i_1.Op != OpConst64 { + break + } + c := auxIntToInt64(i_1.AuxInt) + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c || !(c >= 0 && c < 16 && i.Uses == 1) { + break + } + v.reset(OpAnd16) + v0 := b.NewValue0(v.Pos, OpConst16, v.Type) + v0.AuxInt = int16ToAuxInt(int16(^uint16(0) >> c)) + v.AddArg2(x, v0) + return true + } + // match: (Rsh16Ux64 (Lsh16x64 (Rsh16Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) + // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) + // result: (Rsh16Ux64 x (Const64 [c1-c2+c3])) + for { + if v_0.Op != OpLsh16x64 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpRsh16Ux64 { + break + } + _ = v_0_0.Args[1] + x := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpConst64 { + break + } + c1 := auxIntToInt64(v_0_0_1.AuxInt) + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 { + break + } + c2 := auxIntToInt64(v_0_1.AuxInt) + if v_1.Op != OpConst64 { + break + } + c3 := auxIntToInt64(v_1.AuxInt) + if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) { + break + } + v.reset(OpRsh16Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = int64ToAuxInt(c1 - c2 + c3) + v.AddArg2(x, v0) + return true + } + // match: (Rsh16Ux64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) + // result: (ZeroExt8to16 (Trunc16to8 x)) + for { + if v_0.Op != OpLsh16x64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 8 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 8 { + break + } + v.reset(OpZeroExt8to16) + v0 := b.NewValue0(v.Pos, OpTrunc16to8, typ.UInt8) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh16Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh16Ux8 x (Const8 [c])) + // result: (Rsh16Ux64 x (Const64 [int64(uint8(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + v.reset(OpRsh16Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint8(c))) + v.AddArg2(x, v0) + return true + } + // match: (Rsh16Ux8 (Const16 [0]) _) + // result: (Const16 [0]) + for { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh16x16 x (Const16 [c])) + // result: (Rsh16x64 x (Const64 [int64(uint16(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + v.reset(OpRsh16x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint16(c))) + v.AddArg2(x, v0) + return true + } + // match: (Rsh16x16 (Const16 [0]) _) + // result: (Const16 [0]) + for { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh16x32 x (Const32 [c])) + // result: (Rsh16x64 x (Const64 [int64(uint32(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpRsh16x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint32(c))) + v.AddArg2(x, v0) + return true + } + // match: (Rsh16x32 (Const16 [0]) _) + // result: (Const16 [0]) + for { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x64 (Const16 [c]) (Const64 [d])) + // result: (Const16 [c >> uint64(d)]) + for { + if v_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(c >> uint64(d)) + return true + } + // match: (Rsh16x64 x (Const64 [0])) + // result: x + for { + x := v_0 + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + v.copyOf(x) + return true + } + // match: (Rsh16x64 (Const16 [0]) _) + // result: (Const16 [0]) + for { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(0) + return true + } + // match: (Rsh16x64 (Rsh16x64 x (Const64 [c])) (Const64 [d])) + // cond: !uaddOvf(c,d) + // result: (Rsh16x64 x (Const64 [c+d])) + for { + t := v.Type + if v_0.Op != OpRsh16x64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0_1.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + if !(!uaddOvf(c, d)) { + break + } + v.reset(OpRsh16x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c + d) + v.AddArg2(x, v0) + return true + } + // match: (Rsh16x64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) + // result: (SignExt8to16 (Trunc16to8 x)) + for { + if v_0.Op != OpLsh16x64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 8 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 8 { + break + } + v.reset(OpSignExt8to16) + v0 := b.NewValue0(v.Pos, OpTrunc16to8, typ.Int8) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh16x8 x (Const8 [c])) + // result: (Rsh16x64 x (Const64 [int64(uint8(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + v.reset(OpRsh16x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint8(c))) + v.AddArg2(x, v0) + return true + } + // match: (Rsh16x8 (Const16 [0]) _) + // result: (Const16 [0]) + for { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh32Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh32Ux16 x (Const16 [c])) + // result: (Rsh32Ux64 x (Const64 [int64(uint16(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + v.reset(OpRsh32Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint16(c))) + v.AddArg2(x, v0) + return true + } + // match: (Rsh32Ux16 (Const32 [0]) _) + // result: (Const32 [0]) + for { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh32Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh32Ux32 x (Const32 [c])) + // result: (Rsh32Ux64 x (Const64 [int64(uint32(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpRsh32Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint32(c))) + v.AddArg2(x, v0) + return true + } + // match: (Rsh32Ux32 (Const32 [0]) _) + // result: (Const32 [0]) + for { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh32Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux64 (Const32 [c]) (Const64 [d])) + // result: (Const32 [int32(uint32(c) >> uint64(d))]) + for { + if v_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) + return true + } + // match: (Rsh32Ux64 x (Const64 [0])) + // result: x + for { + x := v_0 + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + v.copyOf(x) + return true + } + // match: (Rsh32Ux64 (Const32 [0]) _) + // result: (Const32 [0]) + for { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (Rsh32Ux64 _ (Const64 [c])) + // cond: uint64(c) >= 32 + // result: (Const32 [0]) + for { + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 32) { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (Rsh32Ux64 (Rsh32Ux64 x (Const64 [c])) (Const64 [d])) + // cond: !uaddOvf(c,d) + // result: (Rsh32Ux64 x (Const64 [c+d])) + for { + t := v.Type + if v_0.Op != OpRsh32Ux64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0_1.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + if !(!uaddOvf(c, d)) { + break + } + v.reset(OpRsh32Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c + d) + v.AddArg2(x, v0) + return true + } + // match: (Rsh32Ux64 (Rsh32x64 x _) (Const64 [31])) + // result: (Rsh32Ux64 x (Const64 [31])) + for { + if v_0.Op != OpRsh32x64 { + break + } + x := v_0.Args[0] + if v_1.Op != OpConst64 { + break + } + t := v_1.Type + if auxIntToInt64(v_1.AuxInt) != 31 { + break + } + v.reset(OpRsh32Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(31) + v.AddArg2(x, v0) + return true + } + // match: (Rsh32Ux64 i:(Lsh32x64 x (Const64 [c])) (Const64 [c])) + // cond: c >= 0 && c < 32 && i.Uses == 1 + // result: (And32 x (Const32 [int32(^uint32(0)>>c)])) + for { + i := v_0 + if i.Op != OpLsh32x64 { + break + } + _ = i.Args[1] + x := i.Args[0] + i_1 := i.Args[1] + if i_1.Op != OpConst64 { + break + } + c := auxIntToInt64(i_1.AuxInt) + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c || !(c >= 0 && c < 32 && i.Uses == 1) { + break + } + v.reset(OpAnd32) + v0 := b.NewValue0(v.Pos, OpConst32, v.Type) + v0.AuxInt = int32ToAuxInt(int32(^uint32(0) >> c)) + v.AddArg2(x, v0) + return true + } + // match: (Rsh32Ux64 (Lsh32x64 (Rsh32Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) + // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) + // result: (Rsh32Ux64 x (Const64 [c1-c2+c3])) + for { + if v_0.Op != OpLsh32x64 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpRsh32Ux64 { + break + } + _ = v_0_0.Args[1] + x := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpConst64 { + break + } + c1 := auxIntToInt64(v_0_0_1.AuxInt) + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 { + break + } + c2 := auxIntToInt64(v_0_1.AuxInt) + if v_1.Op != OpConst64 { + break + } + c3 := auxIntToInt64(v_1.AuxInt) + if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) { + break + } + v.reset(OpRsh32Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = int64ToAuxInt(c1 - c2 + c3) + v.AddArg2(x, v0) + return true + } + // match: (Rsh32Ux64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) + // result: (ZeroExt8to32 (Trunc32to8 x)) + for { + if v_0.Op != OpLsh32x64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 24 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 24 { + break + } + v.reset(OpZeroExt8to32) + v0 := b.NewValue0(v.Pos, OpTrunc32to8, typ.UInt8) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Rsh32Ux64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) + // result: (ZeroExt16to32 (Trunc32to16 x)) + for { + if v_0.Op != OpLsh32x64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 16 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 16 { + break + } + v.reset(OpZeroExt16to32) + v0 := b.NewValue0(v.Pos, OpTrunc32to16, typ.UInt16) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh32Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh32Ux8 x (Const8 [c])) + // result: (Rsh32Ux64 x (Const64 [int64(uint8(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + v.reset(OpRsh32Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint8(c))) + v.AddArg2(x, v0) + return true + } + // match: (Rsh32Ux8 (Const32 [0]) _) + // result: (Const32 [0]) + for { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh32x16 x (Const16 [c])) + // result: (Rsh32x64 x (Const64 [int64(uint16(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + v.reset(OpRsh32x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint16(c))) + v.AddArg2(x, v0) + return true + } + // match: (Rsh32x16 (Const32 [0]) _) + // result: (Const32 [0]) + for { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh32x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh32x32 x (Const32 [c])) + // result: (Rsh32x64 x (Const64 [int64(uint32(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpRsh32x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint32(c))) + v.AddArg2(x, v0) + return true + } + // match: (Rsh32x32 (Const32 [0]) _) + // result: (Const32 [0]) + for { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x64 (Const32 [c]) (Const64 [d])) + // result: (Const32 [c >> uint64(d)]) + for { + if v_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) + return true + } + // match: (Rsh32x64 x (Const64 [0])) + // result: x + for { + x := v_0 + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + v.copyOf(x) + return true + } + // match: (Rsh32x64 (Const32 [0]) _) + // result: (Const32 [0]) + for { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (Rsh32x64 (Rsh32x64 x (Const64 [c])) (Const64 [d])) + // cond: !uaddOvf(c,d) + // result: (Rsh32x64 x (Const64 [c+d])) + for { + t := v.Type + if v_0.Op != OpRsh32x64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0_1.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + if !(!uaddOvf(c, d)) { + break + } + v.reset(OpRsh32x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c + d) + v.AddArg2(x, v0) + return true + } + // match: (Rsh32x64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) + // result: (SignExt8to32 (Trunc32to8 x)) + for { + if v_0.Op != OpLsh32x64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 24 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 24 { + break + } + v.reset(OpSignExt8to32) + v0 := b.NewValue0(v.Pos, OpTrunc32to8, typ.Int8) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Rsh32x64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) + // result: (SignExt16to32 (Trunc32to16 x)) + for { + if v_0.Op != OpLsh32x64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 16 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 16 { + break + } + v.reset(OpSignExt16to32) + v0 := b.NewValue0(v.Pos, OpTrunc32to16, typ.Int16) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh32x8 x (Const8 [c])) + // result: (Rsh32x64 x (Const64 [int64(uint8(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + v.reset(OpRsh32x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint8(c))) + v.AddArg2(x, v0) + return true + } + // match: (Rsh32x8 (Const32 [0]) _) + // result: (Const32 [0]) + for { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh64Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh64Ux16 x (Const16 [c])) + // result: (Rsh64Ux64 x (Const64 [int64(uint16(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + v.reset(OpRsh64Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint16(c))) + v.AddArg2(x, v0) + return true + } + // match: (Rsh64Ux16 (Const64 [0]) _) + // result: (Const64 [0]) + for { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh64Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh64Ux32 x (Const32 [c])) + // result: (Rsh64Ux64 x (Const64 [int64(uint32(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpRsh64Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint32(c))) + v.AddArg2(x, v0) + return true + } + // match: (Rsh64Ux32 (Const64 [0]) _) + // result: (Const64 [0]) + for { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh64Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux64 (Const64 [c]) (Const64 [d])) + // result: (Const64 [int64(uint64(c) >> uint64(d))]) + for { + if v_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d))) + return true + } + // match: (Rsh64Ux64 x (Const64 [0])) + // result: x + for { + x := v_0 + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + v.copyOf(x) + return true + } + // match: (Rsh64Ux64 (Const64 [0]) _) + // result: (Const64 [0]) + for { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (Rsh64Ux64 _ (Const64 [c])) + // cond: uint64(c) >= 64 + // result: (Const64 [0]) + for { + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 64) { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (Rsh64Ux64 (Rsh64Ux64 x (Const64 [c])) (Const64 [d])) + // cond: !uaddOvf(c,d) + // result: (Rsh64Ux64 x (Const64 [c+d])) + for { + t := v.Type + if v_0.Op != OpRsh64Ux64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0_1.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + if !(!uaddOvf(c, d)) { + break + } + v.reset(OpRsh64Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c + d) + v.AddArg2(x, v0) + return true + } + // match: (Rsh64Ux64 (Rsh64x64 x _) (Const64 [63])) + // result: (Rsh64Ux64 x (Const64 [63])) + for { + if v_0.Op != OpRsh64x64 { + break + } + x := v_0.Args[0] + if v_1.Op != OpConst64 { + break + } + t := v_1.Type + if auxIntToInt64(v_1.AuxInt) != 63 { + break + } + v.reset(OpRsh64Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(63) + v.AddArg2(x, v0) + return true + } + // match: (Rsh64Ux64 i:(Lsh64x64 x (Const64 [c])) (Const64 [c])) + // cond: c >= 0 && c < 64 && i.Uses == 1 + // result: (And64 x (Const64 [int64(^uint64(0)>>c)])) + for { + i := v_0 + if i.Op != OpLsh64x64 { + break + } + _ = i.Args[1] + x := i.Args[0] + i_1 := i.Args[1] + if i_1.Op != OpConst64 { + break + } + c := auxIntToInt64(i_1.AuxInt) + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c || !(c >= 0 && c < 64 && i.Uses == 1) { + break + } + v.reset(OpAnd64) + v0 := b.NewValue0(v.Pos, OpConst64, v.Type) + v0.AuxInt = int64ToAuxInt(int64(^uint64(0) >> c)) + v.AddArg2(x, v0) + return true + } + // match: (Rsh64Ux64 (Lsh64x64 (Rsh64Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) + // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) + // result: (Rsh64Ux64 x (Const64 [c1-c2+c3])) + for { + if v_0.Op != OpLsh64x64 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpRsh64Ux64 { + break + } + _ = v_0_0.Args[1] + x := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpConst64 { + break + } + c1 := auxIntToInt64(v_0_0_1.AuxInt) + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 { + break + } + c2 := auxIntToInt64(v_0_1.AuxInt) + if v_1.Op != OpConst64 { + break + } + c3 := auxIntToInt64(v_1.AuxInt) + if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) { + break + } + v.reset(OpRsh64Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = int64ToAuxInt(c1 - c2 + c3) + v.AddArg2(x, v0) + return true + } + // match: (Rsh64Ux64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) + // result: (ZeroExt8to64 (Trunc64to8 x)) + for { + if v_0.Op != OpLsh64x64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 56 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 56 { + break + } + v.reset(OpZeroExt8to64) + v0 := b.NewValue0(v.Pos, OpTrunc64to8, typ.UInt8) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Rsh64Ux64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) + // result: (ZeroExt16to64 (Trunc64to16 x)) + for { + if v_0.Op != OpLsh64x64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 48 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 48 { + break + } + v.reset(OpZeroExt16to64) + v0 := b.NewValue0(v.Pos, OpTrunc64to16, typ.UInt16) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Rsh64Ux64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) + // result: (ZeroExt32to64 (Trunc64to32 x)) + for { + if v_0.Op != OpLsh64x64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 32 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 32 { + break + } + v.reset(OpZeroExt32to64) + v0 := b.NewValue0(v.Pos, OpTrunc64to32, typ.UInt32) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh64Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh64Ux8 x (Const8 [c])) + // result: (Rsh64Ux64 x (Const64 [int64(uint8(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + v.reset(OpRsh64Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint8(c))) + v.AddArg2(x, v0) + return true + } + // match: (Rsh64Ux8 (Const64 [0]) _) + // result: (Const64 [0]) + for { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh64x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh64x16 x (Const16 [c])) + // result: (Rsh64x64 x (Const64 [int64(uint16(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + v.reset(OpRsh64x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint16(c))) + v.AddArg2(x, v0) + return true + } + // match: (Rsh64x16 (Const64 [0]) _) + // result: (Const64 [0]) + for { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh64x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh64x32 x (Const32 [c])) + // result: (Rsh64x64 x (Const64 [int64(uint32(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpRsh64x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint32(c))) + v.AddArg2(x, v0) + return true + } + // match: (Rsh64x32 (Const64 [0]) _) + // result: (Const64 [0]) + for { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh64x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x64 (Const64 [c]) (Const64 [d])) + // result: (Const64 [c >> uint64(d)]) + for { + if v_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(c >> uint64(d)) + return true + } + // match: (Rsh64x64 x (Const64 [0])) + // result: x + for { + x := v_0 + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + v.copyOf(x) + return true + } + // match: (Rsh64x64 (Const64 [0]) _) + // result: (Const64 [0]) + for { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (Rsh64x64 (Rsh64x64 x (Const64 [c])) (Const64 [d])) + // cond: !uaddOvf(c,d) + // result: (Rsh64x64 x (Const64 [c+d])) + for { + t := v.Type + if v_0.Op != OpRsh64x64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0_1.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + if !(!uaddOvf(c, d)) { + break + } + v.reset(OpRsh64x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c + d) + v.AddArg2(x, v0) + return true + } + // match: (Rsh64x64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) + // result: (SignExt8to64 (Trunc64to8 x)) + for { + if v_0.Op != OpLsh64x64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 56 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 56 { + break + } + v.reset(OpSignExt8to64) + v0 := b.NewValue0(v.Pos, OpTrunc64to8, typ.Int8) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Rsh64x64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) + // result: (SignExt16to64 (Trunc64to16 x)) + for { + if v_0.Op != OpLsh64x64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 48 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 48 { + break + } + v.reset(OpSignExt16to64) + v0 := b.NewValue0(v.Pos, OpTrunc64to16, typ.Int16) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Rsh64x64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) + // result: (SignExt32to64 (Trunc64to32 x)) + for { + if v_0.Op != OpLsh64x64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 32 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 32 { + break + } + v.reset(OpSignExt32to64) + v0 := b.NewValue0(v.Pos, OpTrunc64to32, typ.Int32) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh64x8 x (Const8 [c])) + // result: (Rsh64x64 x (Const64 [int64(uint8(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + v.reset(OpRsh64x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint8(c))) + v.AddArg2(x, v0) + return true + } + // match: (Rsh64x8 (Const64 [0]) _) + // result: (Const64 [0]) + for { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh8Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh8Ux16 x (Const16 [c])) + // result: (Rsh8Ux64 x (Const64 [int64(uint16(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + v.reset(OpRsh8Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint16(c))) + v.AddArg2(x, v0) + return true + } + // match: (Rsh8Ux16 (Const8 [0]) _) + // result: (Const8 [0]) + for { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh8Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh8Ux32 x (Const32 [c])) + // result: (Rsh8Ux64 x (Const64 [int64(uint32(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpRsh8Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint32(c))) + v.AddArg2(x, v0) + return true + } + // match: (Rsh8Ux32 (Const8 [0]) _) + // result: (Const8 [0]) + for { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh8Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux64 (Const8 [c]) (Const64 [d])) + // result: (Const8 [int8(uint8(c) >> uint64(d))]) + for { + if v_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(int8(uint8(c) >> uint64(d))) + return true + } + // match: (Rsh8Ux64 x (Const64 [0])) + // result: x + for { + x := v_0 + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + v.copyOf(x) + return true + } + // match: (Rsh8Ux64 (Const8 [0]) _) + // result: (Const8 [0]) + for { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(0) + return true + } + // match: (Rsh8Ux64 _ (Const64 [c])) + // cond: uint64(c) >= 8 + // result: (Const8 [0]) + for { + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 8) { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(0) + return true + } + // match: (Rsh8Ux64 (Rsh8Ux64 x (Const64 [c])) (Const64 [d])) + // cond: !uaddOvf(c,d) + // result: (Rsh8Ux64 x (Const64 [c+d])) + for { + t := v.Type + if v_0.Op != OpRsh8Ux64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0_1.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + if !(!uaddOvf(c, d)) { + break + } + v.reset(OpRsh8Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c + d) + v.AddArg2(x, v0) + return true + } + // match: (Rsh8Ux64 (Rsh8x64 x _) (Const64 [7] )) + // result: (Rsh8Ux64 x (Const64 [7] )) + for { + if v_0.Op != OpRsh8x64 { + break + } + x := v_0.Args[0] + if v_1.Op != OpConst64 { + break + } + t := v_1.Type + if auxIntToInt64(v_1.AuxInt) != 7 { + break + } + v.reset(OpRsh8Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(7) + v.AddArg2(x, v0) + return true + } + // match: (Rsh8Ux64 i:(Lsh8x64 x (Const64 [c])) (Const64 [c])) + // cond: c >= 0 && c < 8 && i.Uses == 1 + // result: (And8 x (Const8 [int8 (^uint8 (0)>>c)])) + for { + i := v_0 + if i.Op != OpLsh8x64 { + break + } + _ = i.Args[1] + x := i.Args[0] + i_1 := i.Args[1] + if i_1.Op != OpConst64 { + break + } + c := auxIntToInt64(i_1.AuxInt) + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c || !(c >= 0 && c < 8 && i.Uses == 1) { + break + } + v.reset(OpAnd8) + v0 := b.NewValue0(v.Pos, OpConst8, v.Type) + v0.AuxInt = int8ToAuxInt(int8(^uint8(0) >> c)) + v.AddArg2(x, v0) + return true + } + // match: (Rsh8Ux64 (Lsh8x64 (Rsh8Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) + // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) + // result: (Rsh8Ux64 x (Const64 [c1-c2+c3])) + for { + if v_0.Op != OpLsh8x64 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpRsh8Ux64 { + break + } + _ = v_0_0.Args[1] + x := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpConst64 { + break + } + c1 := auxIntToInt64(v_0_0_1.AuxInt) + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 { + break + } + c2 := auxIntToInt64(v_0_1.AuxInt) + if v_1.Op != OpConst64 { + break + } + c3 := auxIntToInt64(v_1.AuxInt) + if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) { + break + } + v.reset(OpRsh8Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = int64ToAuxInt(c1 - c2 + c3) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh8Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh8Ux8 x (Const8 [c])) + // result: (Rsh8Ux64 x (Const64 [int64(uint8(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + v.reset(OpRsh8Ux64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint8(c))) + v.AddArg2(x, v0) + return true + } + // match: (Rsh8Ux8 (Const8 [0]) _) + // result: (Const8 [0]) + for { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh8x16 x (Const16 [c])) + // result: (Rsh8x64 x (Const64 [int64(uint16(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + v.reset(OpRsh8x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint16(c))) + v.AddArg2(x, v0) + return true + } + // match: (Rsh8x16 (Const8 [0]) _) + // result: (Const8 [0]) + for { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh8x32 x (Const32 [c])) + // result: (Rsh8x64 x (Const64 [int64(uint32(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpRsh8x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint32(c))) + v.AddArg2(x, v0) + return true + } + // match: (Rsh8x32 (Const8 [0]) _) + // result: (Const8 [0]) + for { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh8x64 (Const8 [c]) (Const64 [d])) + // result: (Const8 [c >> uint64(d)]) + for { + if v_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(c >> uint64(d)) + return true + } + // match: (Rsh8x64 x (Const64 [0])) + // result: x + for { + x := v_0 + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + v.copyOf(x) + return true + } + // match: (Rsh8x64 (Const8 [0]) _) + // result: (Const8 [0]) + for { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(0) + return true + } + // match: (Rsh8x64 (Rsh8x64 x (Const64 [c])) (Const64 [d])) + // cond: !uaddOvf(c,d) + // result: (Rsh8x64 x (Const64 [c+d])) + for { + t := v.Type + if v_0.Op != OpRsh8x64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0_1.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + if !(!uaddOvf(c, d)) { + break + } + v.reset(OpRsh8x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c + d) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValuegeneric_OpRsh8x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Rsh8x8 x (Const8 [c])) + // result: (Rsh8x64 x (Const64 [int64(uint8(c))])) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + v.reset(OpRsh8x64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(int64(uint8(c))) + v.AddArg2(x, v0) + return true + } + // match: (Rsh8x8 (Const8 [0]) _) + // result: (Const8 [0]) + for { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpSelect0(v *Value) bool { + v_0 := v.Args[0] + // match: (Select0 (Div128u (Const64 [0]) lo y)) + // result: (Div64u lo y) + for { + if v_0.Op != OpDiv128u { + break + } + y := v_0.Args[2] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 || auxIntToInt64(v_0_0.AuxInt) != 0 { + break + } + lo := v_0.Args[1] + v.reset(OpDiv64u) + v.AddArg2(lo, y) + return true + } + // match: (Select0 (Mul32uover (Const32 [1]) x)) + // result: x + for { + if v_0.Op != OpMul32uover { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpConst32 || auxIntToInt32(v_0_0.AuxInt) != 1 { + continue + } + x := v_0_1 + v.copyOf(x) + return true + } + break + } + // match: (Select0 (Mul64uover (Const64 [1]) x)) + // result: x + for { + if v_0.Op != OpMul64uover { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpConst64 || auxIntToInt64(v_0_0.AuxInt) != 1 { + continue + } + x := v_0_1 + v.copyOf(x) + return true + } + break + } + // match: (Select0 (Mul64uover (Const64 [0]) x)) + // result: (Const64 [0]) + for { + if v_0.Op != OpMul64uover { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpConst64 || auxIntToInt64(v_0_0.AuxInt) != 0 { + continue + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) + return true + } + break + } + // match: (Select0 (Mul32uover (Const32 [0]) x)) + // result: (Const32 [0]) + for { + if v_0.Op != OpMul32uover { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpConst32 || auxIntToInt32(v_0_0.AuxInt) != 0 { + continue + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpSelect1(v *Value) bool { + v_0 := v.Args[0] + // match: (Select1 (Div128u (Const64 [0]) lo y)) + // result: (Mod64u lo y) + for { + if v_0.Op != OpDiv128u { + break + } + y := v_0.Args[2] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 || auxIntToInt64(v_0_0.AuxInt) != 0 { + break + } + lo := v_0.Args[1] + v.reset(OpMod64u) + v.AddArg2(lo, y) + return true + } + // match: (Select1 (Mul32uover (Const32 [1]) x)) + // result: (ConstBool [false]) + for { + if v_0.Op != OpMul32uover { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpConst32 || auxIntToInt32(v_0_0.AuxInt) != 1 { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + break + } + // match: (Select1 (Mul64uover (Const64 [1]) x)) + // result: (ConstBool [false]) + for { + if v_0.Op != OpMul64uover { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpConst64 || auxIntToInt64(v_0_0.AuxInt) != 1 { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + break + } + // match: (Select1 (Mul64uover (Const64 [0]) x)) + // result: (ConstBool [false]) + for { + if v_0.Op != OpMul64uover { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpConst64 || auxIntToInt64(v_0_0.AuxInt) != 0 { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + break + } + // match: (Select1 (Mul32uover (Const32 [0]) x)) + // result: (ConstBool [false]) + for { + if v_0.Op != OpMul32uover { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpConst32 || auxIntToInt32(v_0_0.AuxInt) != 0 { + continue + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpSelectN(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (SelectN [0] (MakeResult x ___)) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpMakeResult || len(v_0.Args) < 1 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (SelectN [1] (MakeResult x y ___)) + // result: y + for { + if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpMakeResult || len(v_0.Args) < 2 { + break + } + y := v_0.Args[1] + v.copyOf(y) + return true + } + // match: (SelectN [2] (MakeResult x y z ___)) + // result: z + for { + if auxIntToInt64(v.AuxInt) != 2 || v_0.Op != OpMakeResult || len(v_0.Args) < 3 { + break + } + z := v_0.Args[2] + v.copyOf(z) + return true + } + // match: (SelectN [0] call:(StaticCall {sym} sptr (Const64 [c]) mem)) + // cond: isInlinableMemclr(config, int64(c)) && isSameCall(sym, "runtime.memclrNoHeapPointers") && call.Uses == 1 && clobber(call) + // result: (Zero {types.Types[types.TUINT8]} [int64(c)] sptr mem) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + call := v_0 + if call.Op != OpStaticCall || len(call.Args) != 3 { + break + } + sym := auxToCall(call.Aux) + mem := call.Args[2] + sptr := call.Args[0] + call_1 := call.Args[1] + if call_1.Op != OpConst64 { + break + } + c := auxIntToInt64(call_1.AuxInt) + if !(isInlinableMemclr(config, int64(c)) && isSameCall(sym, "runtime.memclrNoHeapPointers") && call.Uses == 1 && clobber(call)) { + break + } + v.reset(OpZero) + v.AuxInt = int64ToAuxInt(int64(c)) + v.Aux = typeToAux(types.Types[types.TUINT8]) + v.AddArg2(sptr, mem) + return true + } + // match: (SelectN [0] call:(StaticCall {sym} sptr (Const32 [c]) mem)) + // cond: isInlinableMemclr(config, int64(c)) && isSameCall(sym, "runtime.memclrNoHeapPointers") && call.Uses == 1 && clobber(call) + // result: (Zero {types.Types[types.TUINT8]} [int64(c)] sptr mem) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + call := v_0 + if call.Op != OpStaticCall || len(call.Args) != 3 { + break + } + sym := auxToCall(call.Aux) + mem := call.Args[2] + sptr := call.Args[0] + call_1 := call.Args[1] + if call_1.Op != OpConst32 { + break + } + c := auxIntToInt32(call_1.AuxInt) + if !(isInlinableMemclr(config, int64(c)) && isSameCall(sym, "runtime.memclrNoHeapPointers") && call.Uses == 1 && clobber(call)) { + break + } + v.reset(OpZero) + v.AuxInt = int64ToAuxInt(int64(c)) + v.Aux = typeToAux(types.Types[types.TUINT8]) + v.AddArg2(sptr, mem) + return true + } + // match: (SelectN [0] call:(StaticCall {sym} s1:(Store _ (Const64 [sz]) s2:(Store _ src s3:(Store {t} _ dst mem))))) + // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3, call) + // result: (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + call := v_0 + if call.Op != OpStaticCall || len(call.Args) != 1 { + break + } + sym := auxToCall(call.Aux) + s1 := call.Args[0] + if s1.Op != OpStore { + break + } + _ = s1.Args[2] + s1_1 := s1.Args[1] + if s1_1.Op != OpConst64 { + break + } + sz := auxIntToInt64(s1_1.AuxInt) + s2 := s1.Args[2] + if s2.Op != OpStore { + break + } + _ = s2.Args[2] + src := s2.Args[1] + s3 := s2.Args[2] + if s3.Op != OpStore { + break + } + mem := s3.Args[2] + dst := s3.Args[1] + if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3, call)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(int64(sz)) + v.Aux = typeToAux(types.Types[types.TUINT8]) + v.AddArg3(dst, src, mem) + return true + } + // match: (SelectN [0] call:(StaticCall {sym} s1:(Store _ (Const32 [sz]) s2:(Store _ src s3:(Store {t} _ dst mem))))) + // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3, call) + // result: (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + call := v_0 + if call.Op != OpStaticCall || len(call.Args) != 1 { + break + } + sym := auxToCall(call.Aux) + s1 := call.Args[0] + if s1.Op != OpStore { + break + } + _ = s1.Args[2] + s1_1 := s1.Args[1] + if s1_1.Op != OpConst32 { + break + } + sz := auxIntToInt32(s1_1.AuxInt) + s2 := s1.Args[2] + if s2.Op != OpStore { + break + } + _ = s2.Args[2] + src := s2.Args[1] + s3 := s2.Args[2] + if s3.Op != OpStore { + break + } + mem := s3.Args[2] + dst := s3.Args[1] + if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3, call)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(int64(sz)) + v.Aux = typeToAux(types.Types[types.TUINT8]) + v.AddArg3(dst, src, mem) + return true + } + // match: (SelectN [0] call:(StaticCall {sym} dst src (Const64 [sz]) mem)) + // cond: sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call) + // result: (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + call := v_0 + if call.Op != OpStaticCall || len(call.Args) != 4 { + break + } + sym := auxToCall(call.Aux) + mem := call.Args[3] + dst := call.Args[0] + src := call.Args[1] + call_2 := call.Args[2] + if call_2.Op != OpConst64 { + break + } + sz := auxIntToInt64(call_2.AuxInt) + if !(sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(int64(sz)) + v.Aux = typeToAux(types.Types[types.TUINT8]) + v.AddArg3(dst, src, mem) + return true + } + // match: (SelectN [0] call:(StaticCall {sym} dst src (Const32 [sz]) mem)) + // cond: sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call) + // result: (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + call := v_0 + if call.Op != OpStaticCall || len(call.Args) != 4 { + break + } + sym := auxToCall(call.Aux) + mem := call.Args[3] + dst := call.Args[0] + src := call.Args[1] + call_2 := call.Args[2] + if call_2.Op != OpConst32 { + break + } + sz := auxIntToInt32(call_2.AuxInt) + if !(sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(int64(sz)) + v.Aux = typeToAux(types.Types[types.TUINT8]) + v.AddArg3(dst, src, mem) + return true + } + // match: (SelectN [0] call:(StaticLECall {sym} dst src (Const64 [sz]) mem)) + // cond: sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call) + // result: (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + call := v_0 + if call.Op != OpStaticLECall || len(call.Args) != 4 { + break + } + sym := auxToCall(call.Aux) + mem := call.Args[3] + dst := call.Args[0] + src := call.Args[1] + call_2 := call.Args[2] + if call_2.Op != OpConst64 { + break + } + sz := auxIntToInt64(call_2.AuxInt) + if !(sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(int64(sz)) + v.Aux = typeToAux(types.Types[types.TUINT8]) + v.AddArg3(dst, src, mem) + return true + } + // match: (SelectN [0] call:(StaticLECall {sym} dst src (Const32 [sz]) mem)) + // cond: sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call) + // result: (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + call := v_0 + if call.Op != OpStaticLECall || len(call.Args) != 4 { + break + } + sym := auxToCall(call.Aux) + mem := call.Args[3] + dst := call.Args[0] + src := call.Args[1] + call_2 := call.Args[2] + if call_2.Op != OpConst32 { + break + } + sz := auxIntToInt32(call_2.AuxInt) + if !(sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(int64(sz)) + v.Aux = typeToAux(types.Types[types.TUINT8]) + v.AddArg3(dst, src, mem) + return true + } + // match: (SelectN [0] call:(StaticLECall {sym} a x)) + // cond: needRaceCleanup(sym, call) && clobber(call) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + call := v_0 + if call.Op != OpStaticLECall || len(call.Args) != 2 { + break + } + sym := auxToCall(call.Aux) + x := call.Args[1] + if !(needRaceCleanup(sym, call) && clobber(call)) { + break + } + v.copyOf(x) + return true + } + // match: (SelectN [0] call:(StaticLECall {sym} x)) + // cond: needRaceCleanup(sym, call) && clobber(call) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + call := v_0 + if call.Op != OpStaticLECall || len(call.Args) != 1 { + break + } + sym := auxToCall(call.Aux) + x := call.Args[0] + if !(needRaceCleanup(sym, call) && clobber(call)) { + break + } + v.copyOf(x) + return true + } + // match: (SelectN [1] (StaticCall {sym} _ newLen:(Const64) _ _ _ _)) + // cond: v.Type.IsInteger() && isSameCall(sym, "runtime.growslice") + // result: newLen + for { + if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStaticCall || len(v_0.Args) != 6 { + break + } + sym := auxToCall(v_0.Aux) + _ = v_0.Args[1] + newLen := v_0.Args[1] + if newLen.Op != OpConst64 || !(v.Type.IsInteger() && isSameCall(sym, "runtime.growslice")) { + break + } + v.copyOf(newLen) + return true + } + // match: (SelectN [1] (StaticCall {sym} _ newLen:(Const32) _ _ _ _)) + // cond: v.Type.IsInteger() && isSameCall(sym, "runtime.growslice") + // result: newLen + for { + if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStaticCall || len(v_0.Args) != 6 { + break + } + sym := auxToCall(v_0.Aux) + _ = v_0.Args[1] + newLen := v_0.Args[1] + if newLen.Op != OpConst32 || !(v.Type.IsInteger() && isSameCall(sym, "runtime.growslice")) { + break + } + v.copyOf(newLen) + return true + } + return false +} +func rewriteValuegeneric_OpSignExt16to32(v *Value) bool { + v_0 := v.Args[0] + // match: (SignExt16to32 (Const16 [c])) + // result: (Const32 [int32(c)]) + for { + if v_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_0.AuxInt) + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(int32(c)) + return true + } + // match: (SignExt16to32 (Trunc32to16 x:(Rsh32x64 _ (Const64 [s])))) + // cond: s >= 16 + // result: x + for { + if v_0.Op != OpTrunc32to16 { + break + } + x := v_0.Args[0] + if x.Op != OpRsh32x64 { + break + } + _ = x.Args[1] + x_1 := x.Args[1] + if x_1.Op != OpConst64 { + break + } + s := auxIntToInt64(x_1.AuxInt) + if !(s >= 16) { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValuegeneric_OpSignExt16to64(v *Value) bool { + v_0 := v.Args[0] + // match: (SignExt16to64 (Const16 [c])) + // result: (Const64 [int64(c)]) + for { + if v_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_0.AuxInt) + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(int64(c)) + return true + } + // match: (SignExt16to64 (Trunc64to16 x:(Rsh64x64 _ (Const64 [s])))) + // cond: s >= 48 + // result: x + for { + if v_0.Op != OpTrunc64to16 { + break + } + x := v_0.Args[0] + if x.Op != OpRsh64x64 { + break + } + _ = x.Args[1] + x_1 := x.Args[1] + if x_1.Op != OpConst64 { + break + } + s := auxIntToInt64(x_1.AuxInt) + if !(s >= 48) { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValuegeneric_OpSignExt32to64(v *Value) bool { + v_0 := v.Args[0] + // match: (SignExt32to64 (Const32 [c])) + // result: (Const64 [int64(c)]) + for { + if v_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(int64(c)) + return true + } + // match: (SignExt32to64 (Trunc64to32 x:(Rsh64x64 _ (Const64 [s])))) + // cond: s >= 32 + // result: x + for { + if v_0.Op != OpTrunc64to32 { + break + } + x := v_0.Args[0] + if x.Op != OpRsh64x64 { + break + } + _ = x.Args[1] + x_1 := x.Args[1] + if x_1.Op != OpConst64 { + break + } + s := auxIntToInt64(x_1.AuxInt) + if !(s >= 32) { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValuegeneric_OpSignExt8to16(v *Value) bool { + v_0 := v.Args[0] + // match: (SignExt8to16 (Const8 [c])) + // result: (Const16 [int16(c)]) + for { + if v_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_0.AuxInt) + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(int16(c)) + return true + } + // match: (SignExt8to16 (Trunc16to8 x:(Rsh16x64 _ (Const64 [s])))) + // cond: s >= 8 + // result: x + for { + if v_0.Op != OpTrunc16to8 { + break + } + x := v_0.Args[0] + if x.Op != OpRsh16x64 { + break + } + _ = x.Args[1] + x_1 := x.Args[1] + if x_1.Op != OpConst64 { + break + } + s := auxIntToInt64(x_1.AuxInt) + if !(s >= 8) { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValuegeneric_OpSignExt8to32(v *Value) bool { + v_0 := v.Args[0] + // match: (SignExt8to32 (Const8 [c])) + // result: (Const32 [int32(c)]) + for { + if v_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_0.AuxInt) + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(int32(c)) + return true + } + // match: (SignExt8to32 (Trunc32to8 x:(Rsh32x64 _ (Const64 [s])))) + // cond: s >= 24 + // result: x + for { + if v_0.Op != OpTrunc32to8 { + break + } + x := v_0.Args[0] + if x.Op != OpRsh32x64 { + break + } + _ = x.Args[1] + x_1 := x.Args[1] + if x_1.Op != OpConst64 { + break + } + s := auxIntToInt64(x_1.AuxInt) + if !(s >= 24) { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValuegeneric_OpSignExt8to64(v *Value) bool { + v_0 := v.Args[0] + // match: (SignExt8to64 (Const8 [c])) + // result: (Const64 [int64(c)]) + for { + if v_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_0.AuxInt) + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(int64(c)) + return true + } + // match: (SignExt8to64 (Trunc64to8 x:(Rsh64x64 _ (Const64 [s])))) + // cond: s >= 56 + // result: x + for { + if v_0.Op != OpTrunc64to8 { + break + } + x := v_0.Args[0] + if x.Op != OpRsh64x64 { + break + } + _ = x.Args[1] + x_1 := x.Args[1] + if x_1.Op != OpConst64 { + break + } + s := auxIntToInt64(x_1.AuxInt) + if !(s >= 56) { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValuegeneric_OpSliceCap(v *Value) bool { + v_0 := v.Args[0] + // match: (SliceCap (SliceMake _ _ (Const64 [c]))) + // result: (Const64 [c]) + for { + if v_0.Op != OpSliceMake { + break + } + _ = v_0.Args[2] + v_0_2 := v_0.Args[2] + if v_0_2.Op != OpConst64 { + break + } + t := v_0_2.Type + c := auxIntToInt64(v_0_2.AuxInt) + v.reset(OpConst64) + v.Type = t + v.AuxInt = int64ToAuxInt(c) + return true + } + // match: (SliceCap (SliceMake _ _ (Const32 [c]))) + // result: (Const32 [c]) + for { + if v_0.Op != OpSliceMake { + break + } + _ = v_0.Args[2] + v_0_2 := v_0.Args[2] + if v_0_2.Op != OpConst32 { + break + } + t := v_0_2.Type + c := auxIntToInt32(v_0_2.AuxInt) + v.reset(OpConst32) + v.Type = t + v.AuxInt = int32ToAuxInt(c) + return true + } + // match: (SliceCap (SliceMake _ _ (SliceCap x))) + // result: (SliceCap x) + for { + if v_0.Op != OpSliceMake { + break + } + _ = v_0.Args[2] + v_0_2 := v_0.Args[2] + if v_0_2.Op != OpSliceCap { + break + } + x := v_0_2.Args[0] + v.reset(OpSliceCap) + v.AddArg(x) + return true + } + // match: (SliceCap (SliceMake _ _ (SliceLen x))) + // result: (SliceLen x) + for { + if v_0.Op != OpSliceMake { + break + } + _ = v_0.Args[2] + v_0_2 := v_0.Args[2] + if v_0_2.Op != OpSliceLen { + break + } + x := v_0_2.Args[0] + v.reset(OpSliceLen) + v.AddArg(x) + return true + } + return false +} +func rewriteValuegeneric_OpSliceLen(v *Value) bool { + v_0 := v.Args[0] + // match: (SliceLen (SliceMake _ (Const64 [c]) _)) + // result: (Const64 [c]) + for { + if v_0.Op != OpSliceMake { + break + } + _ = v_0.Args[1] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 { + break + } + t := v_0_1.Type + c := auxIntToInt64(v_0_1.AuxInt) + v.reset(OpConst64) + v.Type = t + v.AuxInt = int64ToAuxInt(c) + return true + } + // match: (SliceLen (SliceMake _ (Const32 [c]) _)) + // result: (Const32 [c]) + for { + if v_0.Op != OpSliceMake { + break + } + _ = v_0.Args[1] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst32 { + break + } + t := v_0_1.Type + c := auxIntToInt32(v_0_1.AuxInt) + v.reset(OpConst32) + v.Type = t + v.AuxInt = int32ToAuxInt(c) + return true + } + // match: (SliceLen (SliceMake _ (SliceLen x) _)) + // result: (SliceLen x) + for { + if v_0.Op != OpSliceMake { + break + } + _ = v_0.Args[1] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpSliceLen { + break + } + x := v_0_1.Args[0] + v.reset(OpSliceLen) + v.AddArg(x) + return true + } + // match: (SliceLen (SelectN [0] (StaticLECall {sym} _ newLen:(Const64) _ _ _ _))) + // cond: isSameCall(sym, "runtime.growslice") + // result: newLen + for { + if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpStaticLECall || len(v_0_0.Args) != 6 { + break + } + sym := auxToCall(v_0_0.Aux) + _ = v_0_0.Args[1] + newLen := v_0_0.Args[1] + if newLen.Op != OpConst64 || !(isSameCall(sym, "runtime.growslice")) { + break + } + v.copyOf(newLen) + return true + } + // match: (SliceLen (SelectN [0] (StaticLECall {sym} _ newLen:(Const32) _ _ _ _))) + // cond: isSameCall(sym, "runtime.growslice") + // result: newLen + for { + if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpStaticLECall || len(v_0_0.Args) != 6 { + break + } + sym := auxToCall(v_0_0.Aux) + _ = v_0_0.Args[1] + newLen := v_0_0.Args[1] + if newLen.Op != OpConst32 || !(isSameCall(sym, "runtime.growslice")) { + break + } + v.copyOf(newLen) + return true + } + return false +} +func rewriteValuegeneric_OpSlicePtr(v *Value) bool { + v_0 := v.Args[0] + // match: (SlicePtr (SliceMake (SlicePtr x) _ _)) + // result: (SlicePtr x) + for { + if v_0.Op != OpSliceMake { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSlicePtr { + break + } + x := v_0_0.Args[0] + v.reset(OpSlicePtr) + v.AddArg(x) + return true + } + return false +} +func rewriteValuegeneric_OpSlicemask(v *Value) bool { + v_0 := v.Args[0] + // match: (Slicemask (Const32 [x])) + // cond: x > 0 + // result: (Const32 [-1]) + for { + if v_0.Op != OpConst32 { + break + } + x := auxIntToInt32(v_0.AuxInt) + if !(x > 0) { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(-1) + return true + } + // match: (Slicemask (Const32 [0])) + // result: (Const32 [0]) + for { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (Slicemask (Const64 [x])) + // cond: x > 0 + // result: (Const64 [-1]) + for { + if v_0.Op != OpConst64 { + break + } + x := auxIntToInt64(v_0.AuxInt) + if !(x > 0) { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(-1) + return true + } + // match: (Slicemask (Const64 [0])) + // result: (Const64 [0]) + for { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValuegeneric_OpSqrt(v *Value) bool { + v_0 := v.Args[0] + // match: (Sqrt (Const64F [c])) + // cond: !math.IsNaN(math.Sqrt(c)) + // result: (Const64F [math.Sqrt(c)]) + for { + if v_0.Op != OpConst64F { + break + } + c := auxIntToFloat64(v_0.AuxInt) + if !(!math.IsNaN(math.Sqrt(c))) { + break + } + v.reset(OpConst64F) + v.AuxInt = float64ToAuxInt(math.Sqrt(c)) + return true + } + return false +} +func rewriteValuegeneric_OpStaticCall(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (StaticCall {callAux} p q _ mem) + // cond: isSameCall(callAux, "runtime.memequal") && isSamePtr(p, q) + // result: (MakeResult (ConstBool [true]) mem) + for { + if len(v.Args) != 4 { + break + } + callAux := auxToCall(v.Aux) + mem := v.Args[3] + p := v.Args[0] + q := v.Args[1] + if !(isSameCall(callAux, "runtime.memequal") && isSamePtr(p, q)) { + break + } + v.reset(OpMakeResult) + v0 := b.NewValue0(v.Pos, OpConstBool, typ.Bool) + v0.AuxInt = boolToAuxInt(true) + v.AddArg2(v0, mem) + return true + } + return false +} +func rewriteValuegeneric_OpStaticLECall(v *Value) bool { + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [1]) mem) + // cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) + // result: (MakeResult (Eq8 (Load sptr mem) (Const8 [int8(read8(scon,0))])) mem) + for { + if len(v.Args) != 4 { + break + } + callAux := auxToCall(v.Aux) + mem := v.Args[3] + sptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAddr { + break + } + scon := auxToSym(v_1.Aux) + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpSB { + break + } + v_2 := v.Args[2] + if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 1 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon)) { + break + } + v.reset(OpMakeResult) + v0 := b.NewValue0(v.Pos, OpEq8, typ.Bool) + v1 := b.NewValue0(v.Pos, OpLoad, typ.Int8) + v1.AddArg2(sptr, mem) + v2 := b.NewValue0(v.Pos, OpConst8, typ.Int8) + v2.AuxInt = int8ToAuxInt(int8(read8(scon, 0))) + v0.AddArg2(v1, v2) + v.AddArg2(v0, mem) + return true + } + // match: (StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [1]) mem) + // cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) + // result: (MakeResult (Eq8 (Load sptr mem) (Const8 [int8(read8(scon,0))])) mem) + for { + if len(v.Args) != 4 { + break + } + callAux := auxToCall(v.Aux) + mem := v.Args[3] + v_0 := v.Args[0] + if v_0.Op != OpAddr { + break + } + scon := auxToSym(v_0.Aux) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSB { + break + } + sptr := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 1 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon)) { + break + } + v.reset(OpMakeResult) + v0 := b.NewValue0(v.Pos, OpEq8, typ.Bool) + v1 := b.NewValue0(v.Pos, OpLoad, typ.Int8) + v1.AddArg2(sptr, mem) + v2 := b.NewValue0(v.Pos, OpConst8, typ.Int8) + v2.AuxInt = int8ToAuxInt(int8(read8(scon, 0))) + v0.AddArg2(v1, v2) + v.AddArg2(v0, mem) + return true + } + // match: (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [2]) mem) + // cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) + // result: (MakeResult (Eq16 (Load sptr mem) (Const16 [int16(read16(scon,0,config.ctxt.Arch.ByteOrder))])) mem) + for { + if len(v.Args) != 4 { + break + } + callAux := auxToCall(v.Aux) + mem := v.Args[3] + sptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAddr { + break + } + scon := auxToSym(v_1.Aux) + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpSB { + break + } + v_2 := v.Args[2] + if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 2 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config)) { + break + } + v.reset(OpMakeResult) + v0 := b.NewValue0(v.Pos, OpEq16, typ.Bool) + v1 := b.NewValue0(v.Pos, OpLoad, typ.Int16) + v1.AddArg2(sptr, mem) + v2 := b.NewValue0(v.Pos, OpConst16, typ.Int16) + v2.AuxInt = int16ToAuxInt(int16(read16(scon, 0, config.ctxt.Arch.ByteOrder))) + v0.AddArg2(v1, v2) + v.AddArg2(v0, mem) + return true + } + // match: (StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [2]) mem) + // cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) + // result: (MakeResult (Eq16 (Load sptr mem) (Const16 [int16(read16(scon,0,config.ctxt.Arch.ByteOrder))])) mem) + for { + if len(v.Args) != 4 { + break + } + callAux := auxToCall(v.Aux) + mem := v.Args[3] + v_0 := v.Args[0] + if v_0.Op != OpAddr { + break + } + scon := auxToSym(v_0.Aux) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSB { + break + } + sptr := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 2 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config)) { + break + } + v.reset(OpMakeResult) + v0 := b.NewValue0(v.Pos, OpEq16, typ.Bool) + v1 := b.NewValue0(v.Pos, OpLoad, typ.Int16) + v1.AddArg2(sptr, mem) + v2 := b.NewValue0(v.Pos, OpConst16, typ.Int16) + v2.AuxInt = int16ToAuxInt(int16(read16(scon, 0, config.ctxt.Arch.ByteOrder))) + v0.AddArg2(v1, v2) + v.AddArg2(v0, mem) + return true + } + // match: (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [4]) mem) + // cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) + // result: (MakeResult (Eq32 (Load sptr mem) (Const32 [int32(read32(scon,0,config.ctxt.Arch.ByteOrder))])) mem) + for { + if len(v.Args) != 4 { + break + } + callAux := auxToCall(v.Aux) + mem := v.Args[3] + sptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAddr { + break + } + scon := auxToSym(v_1.Aux) + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpSB { + break + } + v_2 := v.Args[2] + if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 4 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config)) { + break + } + v.reset(OpMakeResult) + v0 := b.NewValue0(v.Pos, OpEq32, typ.Bool) + v1 := b.NewValue0(v.Pos, OpLoad, typ.Int32) + v1.AddArg2(sptr, mem) + v2 := b.NewValue0(v.Pos, OpConst32, typ.Int32) + v2.AuxInt = int32ToAuxInt(int32(read32(scon, 0, config.ctxt.Arch.ByteOrder))) + v0.AddArg2(v1, v2) + v.AddArg2(v0, mem) + return true + } + // match: (StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [4]) mem) + // cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) + // result: (MakeResult (Eq32 (Load sptr mem) (Const32 [int32(read32(scon,0,config.ctxt.Arch.ByteOrder))])) mem) + for { + if len(v.Args) != 4 { + break + } + callAux := auxToCall(v.Aux) + mem := v.Args[3] + v_0 := v.Args[0] + if v_0.Op != OpAddr { + break + } + scon := auxToSym(v_0.Aux) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSB { + break + } + sptr := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 4 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config)) { + break + } + v.reset(OpMakeResult) + v0 := b.NewValue0(v.Pos, OpEq32, typ.Bool) + v1 := b.NewValue0(v.Pos, OpLoad, typ.Int32) + v1.AddArg2(sptr, mem) + v2 := b.NewValue0(v.Pos, OpConst32, typ.Int32) + v2.AuxInt = int32ToAuxInt(int32(read32(scon, 0, config.ctxt.Arch.ByteOrder))) + v0.AddArg2(v1, v2) + v.AddArg2(v0, mem) + return true + } + // match: (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [8]) mem) + // cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8 + // result: (MakeResult (Eq64 (Load sptr mem) (Const64 [int64(read64(scon,0,config.ctxt.Arch.ByteOrder))])) mem) + for { + if len(v.Args) != 4 { + break + } + callAux := auxToCall(v.Aux) + mem := v.Args[3] + sptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAddr { + break + } + scon := auxToSym(v_1.Aux) + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpSB { + break + } + v_2 := v.Args[2] + if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 8 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8) { + break + } + v.reset(OpMakeResult) + v0 := b.NewValue0(v.Pos, OpEq64, typ.Bool) + v1 := b.NewValue0(v.Pos, OpLoad, typ.Int64) + v1.AddArg2(sptr, mem) + v2 := b.NewValue0(v.Pos, OpConst64, typ.Int64) + v2.AuxInt = int64ToAuxInt(int64(read64(scon, 0, config.ctxt.Arch.ByteOrder))) + v0.AddArg2(v1, v2) + v.AddArg2(v0, mem) + return true + } + // match: (StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [8]) mem) + // cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8 + // result: (MakeResult (Eq64 (Load sptr mem) (Const64 [int64(read64(scon,0,config.ctxt.Arch.ByteOrder))])) mem) + for { + if len(v.Args) != 4 { + break + } + callAux := auxToCall(v.Aux) + mem := v.Args[3] + v_0 := v.Args[0] + if v_0.Op != OpAddr { + break + } + scon := auxToSym(v_0.Aux) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSB { + break + } + sptr := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 8 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8) { + break + } + v.reset(OpMakeResult) + v0 := b.NewValue0(v.Pos, OpEq64, typ.Bool) + v1 := b.NewValue0(v.Pos, OpLoad, typ.Int64) + v1.AddArg2(sptr, mem) + v2 := b.NewValue0(v.Pos, OpConst64, typ.Int64) + v2.AuxInt = int64ToAuxInt(int64(read64(scon, 0, config.ctxt.Arch.ByteOrder))) + v0.AddArg2(v1, v2) + v.AddArg2(v0, mem) + return true + } + // match: (StaticLECall {callAux} _ _ (Const64 [0]) mem) + // cond: isSameCall(callAux, "runtime.memequal") + // result: (MakeResult (ConstBool [true]) mem) + for { + if len(v.Args) != 4 { + break + } + callAux := auxToCall(v.Aux) + mem := v.Args[3] + v_2 := v.Args[2] + if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 0 || !(isSameCall(callAux, "runtime.memequal")) { + break + } + v.reset(OpMakeResult) + v0 := b.NewValue0(v.Pos, OpConstBool, typ.Bool) + v0.AuxInt = boolToAuxInt(true) + v.AddArg2(v0, mem) + return true + } + // match: (StaticLECall {callAux} p q _ mem) + // cond: isSameCall(callAux, "runtime.memequal") && isSamePtr(p, q) + // result: (MakeResult (ConstBool [true]) mem) + for { + if len(v.Args) != 4 { + break + } + callAux := auxToCall(v.Aux) + mem := v.Args[3] + p := v.Args[0] + q := v.Args[1] + if !(isSameCall(callAux, "runtime.memequal") && isSamePtr(p, q)) { + break + } + v.reset(OpMakeResult) + v0 := b.NewValue0(v.Pos, OpConstBool, typ.Bool) + v0.AuxInt = boolToAuxInt(true) + v.AddArg2(v0, mem) + return true + } + // match: (StaticLECall {callAux} _ (Const64 [0]) (Const64 [0]) mem) + // cond: isSameCall(callAux, "runtime.makeslice") + // result: (MakeResult (Addr {ir.Syms.Zerobase} (SB)) mem) + for { + if len(v.Args) != 4 { + break + } + callAux := auxToCall(v.Aux) + mem := v.Args[3] + v_1 := v.Args[1] + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + v_2 := v.Args[2] + if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 0 || !(isSameCall(callAux, "runtime.makeslice")) { + break + } + v.reset(OpMakeResult) + v0 := b.NewValue0(v.Pos, OpAddr, v.Type.FieldType(0)) + v0.Aux = symToAux(ir.Syms.Zerobase) + v1 := b.NewValue0(v.Pos, OpSB, typ.Uintptr) + v0.AddArg(v1) + v.AddArg2(v0, mem) + return true + } + // match: (StaticLECall {callAux} _ (Const32 [0]) (Const32 [0]) mem) + // cond: isSameCall(callAux, "runtime.makeslice") + // result: (MakeResult (Addr {ir.Syms.Zerobase} (SB)) mem) + for { + if len(v.Args) != 4 { + break + } + callAux := auxToCall(v.Aux) + mem := v.Args[3] + v_1 := v.Args[1] + if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != 0 { + break + } + v_2 := v.Args[2] + if v_2.Op != OpConst32 || auxIntToInt32(v_2.AuxInt) != 0 || !(isSameCall(callAux, "runtime.makeslice")) { + break + } + v.reset(OpMakeResult) + v0 := b.NewValue0(v.Pos, OpAddr, v.Type.FieldType(0)) + v0.Aux = symToAux(ir.Syms.Zerobase) + v1 := b.NewValue0(v.Pos, OpSB, typ.Uintptr) + v0.AddArg(v1) + v.AddArg2(v0, mem) + return true + } + return false +} +func rewriteValuegeneric_OpStore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Store {t1} p1 (Load p2 mem) mem) + // cond: isSamePtr(p1, p2) && t2.Size() == t1.Size() + // result: mem + for { + t1 := auxToType(v.Aux) + p1 := v_0 + if v_1.Op != OpLoad { + break + } + t2 := v_1.Type + mem := v_1.Args[1] + p2 := v_1.Args[0] + if mem != v_2 || !(isSamePtr(p1, p2) && t2.Size() == t1.Size()) { + break + } + v.copyOf(mem) + return true + } + // match: (Store {t1} p1 (Load p2 oldmem) mem:(Store {t3} p3 _ oldmem)) + // cond: isSamePtr(p1, p2) && t2.Size() == t1.Size() && disjoint(p1, t1.Size(), p3, t3.Size()) + // result: mem + for { + t1 := auxToType(v.Aux) + p1 := v_0 + if v_1.Op != OpLoad { + break + } + t2 := v_1.Type + oldmem := v_1.Args[1] + p2 := v_1.Args[0] + mem := v_2 + if mem.Op != OpStore { + break + } + t3 := auxToType(mem.Aux) + _ = mem.Args[2] + p3 := mem.Args[0] + if oldmem != mem.Args[2] || !(isSamePtr(p1, p2) && t2.Size() == t1.Size() && disjoint(p1, t1.Size(), p3, t3.Size())) { + break + } + v.copyOf(mem) + return true + } + // match: (Store {t1} p1 (Load p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ oldmem))) + // cond: isSamePtr(p1, p2) && t2.Size() == t1.Size() && disjoint(p1, t1.Size(), p3, t3.Size()) && disjoint(p1, t1.Size(), p4, t4.Size()) + // result: mem + for { + t1 := auxToType(v.Aux) + p1 := v_0 + if v_1.Op != OpLoad { + break + } + t2 := v_1.Type + oldmem := v_1.Args[1] + p2 := v_1.Args[0] + mem := v_2 + if mem.Op != OpStore { + break + } + t3 := auxToType(mem.Aux) + _ = mem.Args[2] + p3 := mem.Args[0] + mem_2 := mem.Args[2] + if mem_2.Op != OpStore { + break + } + t4 := auxToType(mem_2.Aux) + _ = mem_2.Args[2] + p4 := mem_2.Args[0] + if oldmem != mem_2.Args[2] || !(isSamePtr(p1, p2) && t2.Size() == t1.Size() && disjoint(p1, t1.Size(), p3, t3.Size()) && disjoint(p1, t1.Size(), p4, t4.Size())) { + break + } + v.copyOf(mem) + return true + } + // match: (Store {t1} p1 (Load p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 _ oldmem)))) + // cond: isSamePtr(p1, p2) && t2.Size() == t1.Size() && disjoint(p1, t1.Size(), p3, t3.Size()) && disjoint(p1, t1.Size(), p4, t4.Size()) && disjoint(p1, t1.Size(), p5, t5.Size()) + // result: mem + for { + t1 := auxToType(v.Aux) + p1 := v_0 + if v_1.Op != OpLoad { + break + } + t2 := v_1.Type + oldmem := v_1.Args[1] + p2 := v_1.Args[0] + mem := v_2 + if mem.Op != OpStore { + break + } + t3 := auxToType(mem.Aux) + _ = mem.Args[2] + p3 := mem.Args[0] + mem_2 := mem.Args[2] + if mem_2.Op != OpStore { + break + } + t4 := auxToType(mem_2.Aux) + _ = mem_2.Args[2] + p4 := mem_2.Args[0] + mem_2_2 := mem_2.Args[2] + if mem_2_2.Op != OpStore { + break + } + t5 := auxToType(mem_2_2.Aux) + _ = mem_2_2.Args[2] + p5 := mem_2_2.Args[0] + if oldmem != mem_2_2.Args[2] || !(isSamePtr(p1, p2) && t2.Size() == t1.Size() && disjoint(p1, t1.Size(), p3, t3.Size()) && disjoint(p1, t1.Size(), p4, t4.Size()) && disjoint(p1, t1.Size(), p5, t5.Size())) { + break + } + v.copyOf(mem) + return true + } + // match: (Store {t} (OffPtr [o] p1) x mem:(Zero [n] p2 _)) + // cond: isConstZero(x) && o >= 0 && t.Size() + o <= n && isSamePtr(p1, p2) + // result: mem + for { + t := auxToType(v.Aux) + if v_0.Op != OpOffPtr { + break + } + o := auxIntToInt64(v_0.AuxInt) + p1 := v_0.Args[0] + x := v_1 + mem := v_2 + if mem.Op != OpZero { + break + } + n := auxIntToInt64(mem.AuxInt) + p2 := mem.Args[0] + if !(isConstZero(x) && o >= 0 && t.Size()+o <= n && isSamePtr(p1, p2)) { + break + } + v.copyOf(mem) + return true + } + // match: (Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Zero [n] p3 _))) + // cond: isConstZero(x) && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p3) && disjoint(op, t1.Size(), p2, t2.Size()) + // result: mem + for { + t1 := auxToType(v.Aux) + op := v_0 + if op.Op != OpOffPtr { + break + } + o1 := auxIntToInt64(op.AuxInt) + p1 := op.Args[0] + x := v_1 + mem := v_2 + if mem.Op != OpStore { + break + } + t2 := auxToType(mem.Aux) + _ = mem.Args[2] + p2 := mem.Args[0] + mem_2 := mem.Args[2] + if mem_2.Op != OpZero { + break + } + n := auxIntToInt64(mem_2.AuxInt) + p3 := mem_2.Args[0] + if !(isConstZero(x) && o1 >= 0 && t1.Size()+o1 <= n && isSamePtr(p1, p3) && disjoint(op, t1.Size(), p2, t2.Size())) { + break + } + v.copyOf(mem) + return true + } + // match: (Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Zero [n] p4 _)))) + // cond: isConstZero(x) && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p4) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) + // result: mem + for { + t1 := auxToType(v.Aux) + op := v_0 + if op.Op != OpOffPtr { + break + } + o1 := auxIntToInt64(op.AuxInt) + p1 := op.Args[0] + x := v_1 + mem := v_2 + if mem.Op != OpStore { + break + } + t2 := auxToType(mem.Aux) + _ = mem.Args[2] + p2 := mem.Args[0] + mem_2 := mem.Args[2] + if mem_2.Op != OpStore { + break + } + t3 := auxToType(mem_2.Aux) + _ = mem_2.Args[2] + p3 := mem_2.Args[0] + mem_2_2 := mem_2.Args[2] + if mem_2_2.Op != OpZero { + break + } + n := auxIntToInt64(mem_2_2.AuxInt) + p4 := mem_2_2.Args[0] + if !(isConstZero(x) && o1 >= 0 && t1.Size()+o1 <= n && isSamePtr(p1, p4) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size())) { + break + } + v.copyOf(mem) + return true + } + // match: (Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Zero [n] p5 _))))) + // cond: isConstZero(x) && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p5) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size()) + // result: mem + for { + t1 := auxToType(v.Aux) + op := v_0 + if op.Op != OpOffPtr { + break + } + o1 := auxIntToInt64(op.AuxInt) + p1 := op.Args[0] + x := v_1 + mem := v_2 + if mem.Op != OpStore { + break + } + t2 := auxToType(mem.Aux) + _ = mem.Args[2] + p2 := mem.Args[0] + mem_2 := mem.Args[2] + if mem_2.Op != OpStore { + break + } + t3 := auxToType(mem_2.Aux) + _ = mem_2.Args[2] + p3 := mem_2.Args[0] + mem_2_2 := mem_2.Args[2] + if mem_2_2.Op != OpStore { + break + } + t4 := auxToType(mem_2_2.Aux) + _ = mem_2_2.Args[2] + p4 := mem_2_2.Args[0] + mem_2_2_2 := mem_2_2.Args[2] + if mem_2_2_2.Op != OpZero { + break + } + n := auxIntToInt64(mem_2_2_2.AuxInt) + p5 := mem_2_2_2.Args[0] + if !(isConstZero(x) && o1 >= 0 && t1.Size()+o1 <= n && isSamePtr(p1, p5) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size())) { + break + } + v.copyOf(mem) + return true + } + // match: (Store _ (StructMake0) mem) + // result: mem + for { + if v_1.Op != OpStructMake0 { + break + } + mem := v_2 + v.copyOf(mem) + return true + } + // match: (Store dst (StructMake1 f0) mem) + // result: (Store {t.FieldType(0)} (OffPtr [0] dst) f0 mem) + for { + dst := v_0 + if v_1.Op != OpStructMake1 { + break + } + t := v_1.Type + f0 := v_1.Args[0] + mem := v_2 + v.reset(OpStore) + v.Aux = typeToAux(t.FieldType(0)) + v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) + v0.AuxInt = int64ToAuxInt(0) + v0.AddArg(dst) + v.AddArg3(v0, f0, mem) + return true + } + // match: (Store dst (StructMake2 f0 f1) mem) + // result: (Store {t.FieldType(1)} (OffPtr [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr [0] dst) f0 mem)) + for { + dst := v_0 + if v_1.Op != OpStructMake2 { + break + } + t := v_1.Type + f1 := v_1.Args[1] + f0 := v_1.Args[0] + mem := v_2 + v.reset(OpStore) + v.Aux = typeToAux(t.FieldType(1)) + v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo()) + v0.AuxInt = int64ToAuxInt(t.FieldOff(1)) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(t.FieldType(0)) + v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) + v2.AuxInt = int64ToAuxInt(0) + v2.AddArg(dst) + v1.AddArg3(v2, f0, mem) + v.AddArg3(v0, f1, v1) + return true + } + // match: (Store dst (StructMake3 f0 f1 f2) mem) + // result: (Store {t.FieldType(2)} (OffPtr [t.FieldOff(2)] dst) f2 (Store {t.FieldType(1)} (OffPtr [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr [0] dst) f0 mem))) + for { + dst := v_0 + if v_1.Op != OpStructMake3 { + break + } + t := v_1.Type + f2 := v_1.Args[2] + f0 := v_1.Args[0] + f1 := v_1.Args[1] + mem := v_2 + v.reset(OpStore) + v.Aux = typeToAux(t.FieldType(2)) + v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo()) + v0.AuxInt = int64ToAuxInt(t.FieldOff(2)) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(t.FieldType(1)) + v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo()) + v2.AuxInt = int64ToAuxInt(t.FieldOff(1)) + v2.AddArg(dst) + v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v3.Aux = typeToAux(t.FieldType(0)) + v4 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) + v4.AuxInt = int64ToAuxInt(0) + v4.AddArg(dst) + v3.AddArg3(v4, f0, mem) + v1.AddArg3(v2, f1, v3) + v.AddArg3(v0, f2, v1) + return true + } + // match: (Store dst (StructMake4 f0 f1 f2 f3) mem) + // result: (Store {t.FieldType(3)} (OffPtr [t.FieldOff(3)] dst) f3 (Store {t.FieldType(2)} (OffPtr [t.FieldOff(2)] dst) f2 (Store {t.FieldType(1)} (OffPtr [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr [0] dst) f0 mem)))) + for { + dst := v_0 + if v_1.Op != OpStructMake4 { + break + } + t := v_1.Type + f3 := v_1.Args[3] + f0 := v_1.Args[0] + f1 := v_1.Args[1] + f2 := v_1.Args[2] + mem := v_2 + v.reset(OpStore) + v.Aux = typeToAux(t.FieldType(3)) + v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(3).PtrTo()) + v0.AuxInt = int64ToAuxInt(t.FieldOff(3)) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(t.FieldType(2)) + v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo()) + v2.AuxInt = int64ToAuxInt(t.FieldOff(2)) + v2.AddArg(dst) + v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v3.Aux = typeToAux(t.FieldType(1)) + v4 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo()) + v4.AuxInt = int64ToAuxInt(t.FieldOff(1)) + v4.AddArg(dst) + v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v5.Aux = typeToAux(t.FieldType(0)) + v6 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) + v6.AuxInt = int64ToAuxInt(0) + v6.AddArg(dst) + v5.AddArg3(v6, f0, mem) + v3.AddArg3(v4, f1, v5) + v1.AddArg3(v2, f2, v3) + v.AddArg3(v0, f3, v1) + return true + } + // match: (Store {t} dst (Load src mem) mem) + // cond: !CanSSA(t) + // result: (Move {t} [t.Size()] dst src mem) + for { + t := auxToType(v.Aux) + dst := v_0 + if v_1.Op != OpLoad { + break + } + mem := v_1.Args[1] + src := v_1.Args[0] + if mem != v_2 || !(!CanSSA(t)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(t.Size()) + v.Aux = typeToAux(t) + v.AddArg3(dst, src, mem) + return true + } + // match: (Store {t} dst (Load src mem) (VarDef {x} mem)) + // cond: !CanSSA(t) + // result: (Move {t} [t.Size()] dst src (VarDef {x} mem)) + for { + t := auxToType(v.Aux) + dst := v_0 + if v_1.Op != OpLoad { + break + } + mem := v_1.Args[1] + src := v_1.Args[0] + if v_2.Op != OpVarDef { + break + } + x := auxToSym(v_2.Aux) + if mem != v_2.Args[0] || !(!CanSSA(t)) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(t.Size()) + v.Aux = typeToAux(t) + v0 := b.NewValue0(v.Pos, OpVarDef, types.TypeMem) + v0.Aux = symToAux(x) + v0.AddArg(mem) + v.AddArg3(dst, src, v0) + return true + } + // match: (Store _ (ArrayMake0) mem) + // result: mem + for { + if v_1.Op != OpArrayMake0 { + break + } + mem := v_2 + v.copyOf(mem) + return true + } + // match: (Store dst (ArrayMake1 e) mem) + // result: (Store {e.Type} dst e mem) + for { + dst := v_0 + if v_1.Op != OpArrayMake1 { + break + } + e := v_1.Args[0] + mem := v_2 + v.reset(OpStore) + v.Aux = typeToAux(e.Type) + v.AddArg3(dst, e, mem) + return true + } + // match: (Store (SelectN [0] call:(StaticLECall _ _)) x mem:(SelectN [1] call)) + // cond: isConstZero(x) && isSameCall(call.Aux, "runtime.newobject") + // result: mem + for { + if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + call := v_0.Args[0] + if call.Op != OpStaticLECall || len(call.Args) != 2 { + break + } + x := v_1 + mem := v_2 + if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")) { + break + } + v.copyOf(mem) + return true + } + // match: (Store (OffPtr (SelectN [0] call:(StaticLECall _ _))) x mem:(SelectN [1] call)) + // cond: isConstZero(x) && isSameCall(call.Aux, "runtime.newobject") + // result: mem + for { + if v_0.Op != OpOffPtr { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelectN || auxIntToInt64(v_0_0.AuxInt) != 0 { + break + } + call := v_0_0.Args[0] + if call.Op != OpStaticLECall || len(call.Args) != 2 { + break + } + x := v_1 + mem := v_2 + if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")) { + break + } + v.copyOf(mem) + return true + } + // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [0] p2) d2 m3:(Move [n] p3 _ mem))) + // cond: m2.Uses == 1 && m3.Uses == 1 && o1 == t2.Size() && n == t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && clobber(m2, m3) + // result: (Store {t1} op1 d1 (Store {t2} op2 d2 mem)) + for { + t1 := auxToType(v.Aux) + op1 := v_0 + if op1.Op != OpOffPtr { + break + } + o1 := auxIntToInt64(op1.AuxInt) + p1 := op1.Args[0] + d1 := v_1 + m2 := v_2 + if m2.Op != OpStore { + break + } + t2 := auxToType(m2.Aux) + _ = m2.Args[2] + op2 := m2.Args[0] + if op2.Op != OpOffPtr || auxIntToInt64(op2.AuxInt) != 0 { + break + } + p2 := op2.Args[0] + d2 := m2.Args[1] + m3 := m2.Args[2] + if m3.Op != OpMove { + break + } + n := auxIntToInt64(m3.AuxInt) + mem := m3.Args[2] + p3 := m3.Args[0] + if !(m2.Uses == 1 && m3.Uses == 1 && o1 == t2.Size() && n == t2.Size()+t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && clobber(m2, m3)) { + break + } + v.reset(OpStore) + v.Aux = typeToAux(t1) + v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v0.Aux = typeToAux(t2) + v0.AddArg3(op2, d2, mem) + v.AddArg3(op1, d1, v0) + return true + } + // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [0] p3) d3 m4:(Move [n] p4 _ mem)))) + // cond: m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && o2 == t3.Size() && o1-o2 == t2.Size() && n == t3.Size() + t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && clobber(m2, m3, m4) + // result: (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 mem))) + for { + t1 := auxToType(v.Aux) + op1 := v_0 + if op1.Op != OpOffPtr { + break + } + o1 := auxIntToInt64(op1.AuxInt) + p1 := op1.Args[0] + d1 := v_1 + m2 := v_2 + if m2.Op != OpStore { + break + } + t2 := auxToType(m2.Aux) + _ = m2.Args[2] + op2 := m2.Args[0] + if op2.Op != OpOffPtr { + break + } + o2 := auxIntToInt64(op2.AuxInt) + p2 := op2.Args[0] + d2 := m2.Args[1] + m3 := m2.Args[2] + if m3.Op != OpStore { + break + } + t3 := auxToType(m3.Aux) + _ = m3.Args[2] + op3 := m3.Args[0] + if op3.Op != OpOffPtr || auxIntToInt64(op3.AuxInt) != 0 { + break + } + p3 := op3.Args[0] + d3 := m3.Args[1] + m4 := m3.Args[2] + if m4.Op != OpMove { + break + } + n := auxIntToInt64(m4.AuxInt) + mem := m4.Args[2] + p4 := m4.Args[0] + if !(m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && o2 == t3.Size() && o1-o2 == t2.Size() && n == t3.Size()+t2.Size()+t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && clobber(m2, m3, m4)) { + break + } + v.reset(OpStore) + v.Aux = typeToAux(t1) + v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v0.Aux = typeToAux(t2) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(t3) + v1.AddArg3(op3, d3, mem) + v0.AddArg3(op2, d2, v1) + v.AddArg3(op1, d1, v0) + return true + } + // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [o3] p3) d3 m4:(Store {t4} op4:(OffPtr [0] p4) d4 m5:(Move [n] p5 _ mem))))) + // cond: m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1 && o3 == t4.Size() && o2-o3 == t3.Size() && o1-o2 == t2.Size() && n == t4.Size() + t3.Size() + t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && clobber(m2, m3, m4, m5) + // result: (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 (Store {t4} op4 d4 mem)))) + for { + t1 := auxToType(v.Aux) + op1 := v_0 + if op1.Op != OpOffPtr { + break + } + o1 := auxIntToInt64(op1.AuxInt) + p1 := op1.Args[0] + d1 := v_1 + m2 := v_2 + if m2.Op != OpStore { + break + } + t2 := auxToType(m2.Aux) + _ = m2.Args[2] + op2 := m2.Args[0] + if op2.Op != OpOffPtr { + break + } + o2 := auxIntToInt64(op2.AuxInt) + p2 := op2.Args[0] + d2 := m2.Args[1] + m3 := m2.Args[2] + if m3.Op != OpStore { + break + } + t3 := auxToType(m3.Aux) + _ = m3.Args[2] + op3 := m3.Args[0] + if op3.Op != OpOffPtr { + break + } + o3 := auxIntToInt64(op3.AuxInt) + p3 := op3.Args[0] + d3 := m3.Args[1] + m4 := m3.Args[2] + if m4.Op != OpStore { + break + } + t4 := auxToType(m4.Aux) + _ = m4.Args[2] + op4 := m4.Args[0] + if op4.Op != OpOffPtr || auxIntToInt64(op4.AuxInt) != 0 { + break + } + p4 := op4.Args[0] + d4 := m4.Args[1] + m5 := m4.Args[2] + if m5.Op != OpMove { + break + } + n := auxIntToInt64(m5.AuxInt) + mem := m5.Args[2] + p5 := m5.Args[0] + if !(m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1 && o3 == t4.Size() && o2-o3 == t3.Size() && o1-o2 == t2.Size() && n == t4.Size()+t3.Size()+t2.Size()+t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && clobber(m2, m3, m4, m5)) { + break + } + v.reset(OpStore) + v.Aux = typeToAux(t1) + v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v0.Aux = typeToAux(t2) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(t3) + v2 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v2.Aux = typeToAux(t4) + v2.AddArg3(op4, d4, mem) + v1.AddArg3(op3, d3, v2) + v0.AddArg3(op2, d2, v1) + v.AddArg3(op1, d1, v0) + return true + } + // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [0] p2) d2 m3:(Zero [n] p3 mem))) + // cond: m2.Uses == 1 && m3.Uses == 1 && o1 == t2.Size() && n == t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && clobber(m2, m3) + // result: (Store {t1} op1 d1 (Store {t2} op2 d2 mem)) + for { + t1 := auxToType(v.Aux) + op1 := v_0 + if op1.Op != OpOffPtr { + break + } + o1 := auxIntToInt64(op1.AuxInt) + p1 := op1.Args[0] + d1 := v_1 + m2 := v_2 + if m2.Op != OpStore { + break + } + t2 := auxToType(m2.Aux) + _ = m2.Args[2] + op2 := m2.Args[0] + if op2.Op != OpOffPtr || auxIntToInt64(op2.AuxInt) != 0 { + break + } + p2 := op2.Args[0] + d2 := m2.Args[1] + m3 := m2.Args[2] + if m3.Op != OpZero { + break + } + n := auxIntToInt64(m3.AuxInt) + mem := m3.Args[1] + p3 := m3.Args[0] + if !(m2.Uses == 1 && m3.Uses == 1 && o1 == t2.Size() && n == t2.Size()+t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && clobber(m2, m3)) { + break + } + v.reset(OpStore) + v.Aux = typeToAux(t1) + v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v0.Aux = typeToAux(t2) + v0.AddArg3(op2, d2, mem) + v.AddArg3(op1, d1, v0) + return true + } + // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [0] p3) d3 m4:(Zero [n] p4 mem)))) + // cond: m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && o2 == t3.Size() && o1-o2 == t2.Size() && n == t3.Size() + t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && clobber(m2, m3, m4) + // result: (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 mem))) + for { + t1 := auxToType(v.Aux) + op1 := v_0 + if op1.Op != OpOffPtr { + break + } + o1 := auxIntToInt64(op1.AuxInt) + p1 := op1.Args[0] + d1 := v_1 + m2 := v_2 + if m2.Op != OpStore { + break + } + t2 := auxToType(m2.Aux) + _ = m2.Args[2] + op2 := m2.Args[0] + if op2.Op != OpOffPtr { + break + } + o2 := auxIntToInt64(op2.AuxInt) + p2 := op2.Args[0] + d2 := m2.Args[1] + m3 := m2.Args[2] + if m3.Op != OpStore { + break + } + t3 := auxToType(m3.Aux) + _ = m3.Args[2] + op3 := m3.Args[0] + if op3.Op != OpOffPtr || auxIntToInt64(op3.AuxInt) != 0 { + break + } + p3 := op3.Args[0] + d3 := m3.Args[1] + m4 := m3.Args[2] + if m4.Op != OpZero { + break + } + n := auxIntToInt64(m4.AuxInt) + mem := m4.Args[1] + p4 := m4.Args[0] + if !(m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && o2 == t3.Size() && o1-o2 == t2.Size() && n == t3.Size()+t2.Size()+t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && clobber(m2, m3, m4)) { + break + } + v.reset(OpStore) + v.Aux = typeToAux(t1) + v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v0.Aux = typeToAux(t2) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(t3) + v1.AddArg3(op3, d3, mem) + v0.AddArg3(op2, d2, v1) + v.AddArg3(op1, d1, v0) + return true + } + // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [o3] p3) d3 m4:(Store {t4} op4:(OffPtr [0] p4) d4 m5:(Zero [n] p5 mem))))) + // cond: m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1 && o3 == t4.Size() && o2-o3 == t3.Size() && o1-o2 == t2.Size() && n == t4.Size() + t3.Size() + t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && clobber(m2, m3, m4, m5) + // result: (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 (Store {t4} op4 d4 mem)))) + for { + t1 := auxToType(v.Aux) + op1 := v_0 + if op1.Op != OpOffPtr { + break + } + o1 := auxIntToInt64(op1.AuxInt) + p1 := op1.Args[0] + d1 := v_1 + m2 := v_2 + if m2.Op != OpStore { + break + } + t2 := auxToType(m2.Aux) + _ = m2.Args[2] + op2 := m2.Args[0] + if op2.Op != OpOffPtr { + break + } + o2 := auxIntToInt64(op2.AuxInt) + p2 := op2.Args[0] + d2 := m2.Args[1] + m3 := m2.Args[2] + if m3.Op != OpStore { + break + } + t3 := auxToType(m3.Aux) + _ = m3.Args[2] + op3 := m3.Args[0] + if op3.Op != OpOffPtr { + break + } + o3 := auxIntToInt64(op3.AuxInt) + p3 := op3.Args[0] + d3 := m3.Args[1] + m4 := m3.Args[2] + if m4.Op != OpStore { + break + } + t4 := auxToType(m4.Aux) + _ = m4.Args[2] + op4 := m4.Args[0] + if op4.Op != OpOffPtr || auxIntToInt64(op4.AuxInt) != 0 { + break + } + p4 := op4.Args[0] + d4 := m4.Args[1] + m5 := m4.Args[2] + if m5.Op != OpZero { + break + } + n := auxIntToInt64(m5.AuxInt) + mem := m5.Args[1] + p5 := m5.Args[0] + if !(m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1 && o3 == t4.Size() && o2-o3 == t3.Size() && o1-o2 == t2.Size() && n == t4.Size()+t3.Size()+t2.Size()+t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && clobber(m2, m3, m4, m5)) { + break + } + v.reset(OpStore) + v.Aux = typeToAux(t1) + v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v0.Aux = typeToAux(t2) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = typeToAux(t3) + v2 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v2.Aux = typeToAux(t4) + v2.AddArg3(op4, d4, mem) + v1.AddArg3(op3, d3, v2) + v0.AddArg3(op2, d2, v1) + v.AddArg3(op1, d1, v0) + return true + } + return false +} +func rewriteValuegeneric_OpStringLen(v *Value) bool { + v_0 := v.Args[0] + // match: (StringLen (StringMake _ (Const64 [c]))) + // result: (Const64 [c]) + for { + if v_0.Op != OpStringMake { + break + } + _ = v_0.Args[1] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 { + break + } + t := v_0_1.Type + c := auxIntToInt64(v_0_1.AuxInt) + v.reset(OpConst64) + v.Type = t + v.AuxInt = int64ToAuxInt(c) + return true + } + return false +} +func rewriteValuegeneric_OpStringPtr(v *Value) bool { + v_0 := v.Args[0] + // match: (StringPtr (StringMake (Addr {s} base) _)) + // result: (Addr {s} base) + for { + if v_0.Op != OpStringMake { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAddr { + break + } + t := v_0_0.Type + s := auxToSym(v_0_0.Aux) + base := v_0_0.Args[0] + v.reset(OpAddr) + v.Type = t + v.Aux = symToAux(s) + v.AddArg(base) + return true + } + return false +} +func rewriteValuegeneric_OpStructSelect(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (StructSelect (StructMake1 x)) + // result: x + for { + if v_0.Op != OpStructMake1 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (StructSelect [0] (StructMake2 x _)) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpStructMake2 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (StructSelect [1] (StructMake2 _ x)) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStructMake2 { + break + } + x := v_0.Args[1] + v.copyOf(x) + return true + } + // match: (StructSelect [0] (StructMake3 x _ _)) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpStructMake3 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (StructSelect [1] (StructMake3 _ x _)) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStructMake3 { + break + } + x := v_0.Args[1] + v.copyOf(x) + return true + } + // match: (StructSelect [2] (StructMake3 _ _ x)) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 2 || v_0.Op != OpStructMake3 { + break + } + x := v_0.Args[2] + v.copyOf(x) + return true + } + // match: (StructSelect [0] (StructMake4 x _ _ _)) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpStructMake4 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (StructSelect [1] (StructMake4 _ x _ _)) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStructMake4 { + break + } + x := v_0.Args[1] + v.copyOf(x) + return true + } + // match: (StructSelect [2] (StructMake4 _ _ x _)) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 2 || v_0.Op != OpStructMake4 { + break + } + x := v_0.Args[2] + v.copyOf(x) + return true + } + // match: (StructSelect [3] (StructMake4 _ _ _ x)) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 3 || v_0.Op != OpStructMake4 { + break + } + x := v_0.Args[3] + v.copyOf(x) + return true + } + // match: (StructSelect [i] x:(Load ptr mem)) + // cond: !CanSSA(t) + // result: @x.Block (Load (OffPtr [t.FieldOff(int(i))] ptr) mem) + for { + i := auxIntToInt64(v.AuxInt) + x := v_0 + if x.Op != OpLoad { + break + } + t := x.Type + mem := x.Args[1] + ptr := x.Args[0] + if !(!CanSSA(t)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpLoad, v.Type) + v.copyOf(v0) + v1 := b.NewValue0(v.Pos, OpOffPtr, v.Type.PtrTo()) + v1.AuxInt = int64ToAuxInt(t.FieldOff(int(i))) + v1.AddArg(ptr) + v0.AddArg2(v1, mem) + return true + } + // match: (StructSelect [0] (IData x)) + // result: (IData x) + for { + if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpIData { + break + } + x := v_0.Args[0] + v.reset(OpIData) + v.AddArg(x) + return true + } + return false +} +func rewriteValuegeneric_OpSub16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Sub16 (Const16 [c]) (Const16 [d])) + // result: (Const16 [c-d]) + for { + if v_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpConst16 { + break + } + d := auxIntToInt16(v_1.AuxInt) + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(c - d) + return true + } + // match: (Sub16 x (Const16 [c])) + // cond: x.Op != OpConst16 + // result: (Add16 (Const16 [-c]) x) + for { + x := v_0 + if v_1.Op != OpConst16 { + break + } + t := v_1.Type + c := auxIntToInt16(v_1.AuxInt) + if !(x.Op != OpConst16) { + break + } + v.reset(OpAdd16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(-c) + v.AddArg2(v0, x) + return true + } + // match: (Sub16 (Mul16 x y) (Mul16 x z)) + // result: (Mul16 x (Sub16 y z)) + for { + t := v.Type + if v_0.Op != OpMul16 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if v_1.Op != OpMul16 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + z := v_1_1 + v.reset(OpMul16) + v0 := b.NewValue0(v.Pos, OpSub16, t) + v0.AddArg2(y, z) + v.AddArg2(x, v0) + return true + } + } + break + } + // match: (Sub16 x x) + // result: (Const16 [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(0) + return true + } + // match: (Sub16 (Neg16 x) (Com16 x)) + // result: (Const16 [1]) + for { + if v_0.Op != OpNeg16 { + break + } + x := v_0.Args[0] + if v_1.Op != OpCom16 || x != v_1.Args[0] { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(1) + return true + } + // match: (Sub16 (Com16 x) (Neg16 x)) + // result: (Const16 [-1]) + for { + if v_0.Op != OpCom16 { + break + } + x := v_0.Args[0] + if v_1.Op != OpNeg16 || x != v_1.Args[0] { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(-1) + return true + } + // match: (Sub16 (Add16 t x) (Add16 t y)) + // result: (Sub16 x y) + for { + if v_0.Op != OpAdd16 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + t := v_0_0 + x := v_0_1 + if v_1.Op != OpAdd16 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if t != v_1_0 { + continue + } + y := v_1_1 + v.reset(OpSub16) + v.AddArg2(x, y) + return true + } + } + break + } + // match: (Sub16 (Add16 x y) x) + // result: y + for { + if v_0.Op != OpAdd16 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if x != v_1 { + continue + } + v.copyOf(y) + return true + } + break + } + // match: (Sub16 (Add16 x y) y) + // result: x + for { + if v_0.Op != OpAdd16 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if y != v_1 { + continue + } + v.copyOf(x) + return true + } + break + } + // match: (Sub16 (Sub16 x y) x) + // result: (Neg16 y) + for { + if v_0.Op != OpSub16 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpNeg16) + v.AddArg(y) + return true + } + // match: (Sub16 x (Add16 x y)) + // result: (Neg16 y) + for { + x := v_0 + if v_1.Op != OpAdd16 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + y := v_1_1 + v.reset(OpNeg16) + v.AddArg(y) + return true + } + break + } + // match: (Sub16 x (Sub16 i:(Const16 ) z)) + // cond: (z.Op != OpConst16 && x.Op != OpConst16) + // result: (Sub16 (Add16 x z) i) + for { + x := v_0 + if v_1.Op != OpSub16 { + break + } + z := v_1.Args[1] + i := v_1.Args[0] + if i.Op != OpConst16 { + break + } + t := i.Type + if !(z.Op != OpConst16 && x.Op != OpConst16) { + break + } + v.reset(OpSub16) + v0 := b.NewValue0(v.Pos, OpAdd16, t) + v0.AddArg2(x, z) + v.AddArg2(v0, i) + return true + } + // match: (Sub16 x (Add16 z i:(Const16 ))) + // cond: (z.Op != OpConst16 && x.Op != OpConst16) + // result: (Sub16 (Sub16 x z) i) + for { + x := v_0 + if v_1.Op != OpAdd16 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + z := v_1_0 + i := v_1_1 + if i.Op != OpConst16 { + continue + } + t := i.Type + if !(z.Op != OpConst16 && x.Op != OpConst16) { + continue + } + v.reset(OpSub16) + v0 := b.NewValue0(v.Pos, OpSub16, t) + v0.AddArg2(x, z) + v.AddArg2(v0, i) + return true + } + break + } + // match: (Sub16 (Sub16 i:(Const16 ) z) x) + // cond: (z.Op != OpConst16 && x.Op != OpConst16) + // result: (Sub16 i (Add16 z x)) + for { + if v_0.Op != OpSub16 { + break + } + z := v_0.Args[1] + i := v_0.Args[0] + if i.Op != OpConst16 { + break + } + t := i.Type + x := v_1 + if !(z.Op != OpConst16 && x.Op != OpConst16) { + break + } + v.reset(OpSub16) + v0 := b.NewValue0(v.Pos, OpAdd16, t) + v0.AddArg2(z, x) + v.AddArg2(i, v0) + return true + } + // match: (Sub16 (Add16 z i:(Const16 )) x) + // cond: (z.Op != OpConst16 && x.Op != OpConst16) + // result: (Add16 i (Sub16 z x)) + for { + if v_0.Op != OpAdd16 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z := v_0_0 + i := v_0_1 + if i.Op != OpConst16 { + continue + } + t := i.Type + x := v_1 + if !(z.Op != OpConst16 && x.Op != OpConst16) { + continue + } + v.reset(OpAdd16) + v0 := b.NewValue0(v.Pos, OpSub16, t) + v0.AddArg2(z, x) + v.AddArg2(i, v0) + return true + } + break + } + // match: (Sub16 (Const16 [c]) (Sub16 (Const16 [d]) x)) + // result: (Add16 (Const16 [c-d]) x) + for { + if v_0.Op != OpConst16 { + break + } + t := v_0.Type + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpSub16 { + break + } + x := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst16 || v_1_0.Type != t { + break + } + d := auxIntToInt16(v_1_0.AuxInt) + v.reset(OpAdd16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(c - d) + v.AddArg2(v0, x) + return true + } + // match: (Sub16 (Const16 [c]) (Add16 (Const16 [d]) x)) + // result: (Sub16 (Const16 [c-d]) x) + for { + if v_0.Op != OpConst16 { + break + } + t := v_0.Type + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpAdd16 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst16 || v_1_0.Type != t { + continue + } + d := auxIntToInt16(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpSub16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(c - d) + v.AddArg2(v0, x) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpSub32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Sub32 (Const32 [c]) (Const32 [d])) + // result: (Const32 [c-d]) + for { + if v_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpConst32 { + break + } + d := auxIntToInt32(v_1.AuxInt) + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(c - d) + return true + } + // match: (Sub32 x (Const32 [c])) + // cond: x.Op != OpConst32 + // result: (Add32 (Const32 [-c]) x) + for { + x := v_0 + if v_1.Op != OpConst32 { + break + } + t := v_1.Type + c := auxIntToInt32(v_1.AuxInt) + if !(x.Op != OpConst32) { + break + } + v.reset(OpAdd32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(-c) + v.AddArg2(v0, x) + return true + } + // match: (Sub32 (Mul32 x y) (Mul32 x z)) + // result: (Mul32 x (Sub32 y z)) + for { + t := v.Type + if v_0.Op != OpMul32 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if v_1.Op != OpMul32 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + z := v_1_1 + v.reset(OpMul32) + v0 := b.NewValue0(v.Pos, OpSub32, t) + v0.AddArg2(y, z) + v.AddArg2(x, v0) + return true + } + } + break + } + // match: (Sub32 x x) + // result: (Const32 [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (Sub32 (Neg32 x) (Com32 x)) + // result: (Const32 [1]) + for { + if v_0.Op != OpNeg32 { + break + } + x := v_0.Args[0] + if v_1.Op != OpCom32 || x != v_1.Args[0] { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(1) + return true + } + // match: (Sub32 (Com32 x) (Neg32 x)) + // result: (Const32 [-1]) + for { + if v_0.Op != OpCom32 { + break + } + x := v_0.Args[0] + if v_1.Op != OpNeg32 || x != v_1.Args[0] { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(-1) + return true + } + // match: (Sub32 (Add32 t x) (Add32 t y)) + // result: (Sub32 x y) + for { + if v_0.Op != OpAdd32 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + t := v_0_0 + x := v_0_1 + if v_1.Op != OpAdd32 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if t != v_1_0 { + continue + } + y := v_1_1 + v.reset(OpSub32) + v.AddArg2(x, y) + return true + } + } + break + } + // match: (Sub32 (Add32 x y) x) + // result: y + for { + if v_0.Op != OpAdd32 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if x != v_1 { + continue + } + v.copyOf(y) + return true + } + break + } + // match: (Sub32 (Add32 x y) y) + // result: x + for { + if v_0.Op != OpAdd32 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if y != v_1 { + continue + } + v.copyOf(x) + return true + } + break + } + // match: (Sub32 (Sub32 x y) x) + // result: (Neg32 y) + for { + if v_0.Op != OpSub32 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpNeg32) + v.AddArg(y) + return true + } + // match: (Sub32 x (Add32 x y)) + // result: (Neg32 y) + for { + x := v_0 + if v_1.Op != OpAdd32 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + y := v_1_1 + v.reset(OpNeg32) + v.AddArg(y) + return true + } + break + } + // match: (Sub32 x (Sub32 i:(Const32 ) z)) + // cond: (z.Op != OpConst32 && x.Op != OpConst32) + // result: (Sub32 (Add32 x z) i) + for { + x := v_0 + if v_1.Op != OpSub32 { + break + } + z := v_1.Args[1] + i := v_1.Args[0] + if i.Op != OpConst32 { + break + } + t := i.Type + if !(z.Op != OpConst32 && x.Op != OpConst32) { + break + } + v.reset(OpSub32) + v0 := b.NewValue0(v.Pos, OpAdd32, t) + v0.AddArg2(x, z) + v.AddArg2(v0, i) + return true + } + // match: (Sub32 x (Add32 z i:(Const32 ))) + // cond: (z.Op != OpConst32 && x.Op != OpConst32) + // result: (Sub32 (Sub32 x z) i) + for { + x := v_0 + if v_1.Op != OpAdd32 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + z := v_1_0 + i := v_1_1 + if i.Op != OpConst32 { + continue + } + t := i.Type + if !(z.Op != OpConst32 && x.Op != OpConst32) { + continue + } + v.reset(OpSub32) + v0 := b.NewValue0(v.Pos, OpSub32, t) + v0.AddArg2(x, z) + v.AddArg2(v0, i) + return true + } + break + } + // match: (Sub32 (Sub32 i:(Const32 ) z) x) + // cond: (z.Op != OpConst32 && x.Op != OpConst32) + // result: (Sub32 i (Add32 z x)) + for { + if v_0.Op != OpSub32 { + break + } + z := v_0.Args[1] + i := v_0.Args[0] + if i.Op != OpConst32 { + break + } + t := i.Type + x := v_1 + if !(z.Op != OpConst32 && x.Op != OpConst32) { + break + } + v.reset(OpSub32) + v0 := b.NewValue0(v.Pos, OpAdd32, t) + v0.AddArg2(z, x) + v.AddArg2(i, v0) + return true + } + // match: (Sub32 (Add32 z i:(Const32 )) x) + // cond: (z.Op != OpConst32 && x.Op != OpConst32) + // result: (Add32 i (Sub32 z x)) + for { + if v_0.Op != OpAdd32 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z := v_0_0 + i := v_0_1 + if i.Op != OpConst32 { + continue + } + t := i.Type + x := v_1 + if !(z.Op != OpConst32 && x.Op != OpConst32) { + continue + } + v.reset(OpAdd32) + v0 := b.NewValue0(v.Pos, OpSub32, t) + v0.AddArg2(z, x) + v.AddArg2(i, v0) + return true + } + break + } + // match: (Sub32 (Const32 [c]) (Sub32 (Const32 [d]) x)) + // result: (Add32 (Const32 [c-d]) x) + for { + if v_0.Op != OpConst32 { + break + } + t := v_0.Type + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpSub32 { + break + } + x := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 || v_1_0.Type != t { + break + } + d := auxIntToInt32(v_1_0.AuxInt) + v.reset(OpAdd32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(c - d) + v.AddArg2(v0, x) + return true + } + // match: (Sub32 (Const32 [c]) (Add32 (Const32 [d]) x)) + // result: (Sub32 (Const32 [c-d]) x) + for { + if v_0.Op != OpConst32 { + break + } + t := v_0.Type + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpAdd32 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst32 || v_1_0.Type != t { + continue + } + d := auxIntToInt32(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpSub32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(c - d) + v.AddArg2(v0, x) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpSub32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Sub32F (Const32F [c]) (Const32F [d])) + // cond: c-d == c-d + // result: (Const32F [c-d]) + for { + if v_0.Op != OpConst32F { + break + } + c := auxIntToFloat32(v_0.AuxInt) + if v_1.Op != OpConst32F { + break + } + d := auxIntToFloat32(v_1.AuxInt) + if !(c-d == c-d) { + break + } + v.reset(OpConst32F) + v.AuxInt = float32ToAuxInt(c - d) + return true + } + return false +} +func rewriteValuegeneric_OpSub64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Sub64 (Const64 [c]) (Const64 [d])) + // result: (Const64 [c-d]) + for { + if v_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpConst64 { + break + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(c - d) + return true + } + // match: (Sub64 x (Const64 [c])) + // cond: x.Op != OpConst64 + // result: (Add64 (Const64 [-c]) x) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + t := v_1.Type + c := auxIntToInt64(v_1.AuxInt) + if !(x.Op != OpConst64) { + break + } + v.reset(OpAdd64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(-c) + v.AddArg2(v0, x) + return true + } + // match: (Sub64 (Mul64 x y) (Mul64 x z)) + // result: (Mul64 x (Sub64 y z)) + for { + t := v.Type + if v_0.Op != OpMul64 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if v_1.Op != OpMul64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + z := v_1_1 + v.reset(OpMul64) + v0 := b.NewValue0(v.Pos, OpSub64, t) + v0.AddArg2(y, z) + v.AddArg2(x, v0) + return true + } + } + break + } + // match: (Sub64 x x) + // result: (Const64 [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (Sub64 (Neg64 x) (Com64 x)) + // result: (Const64 [1]) + for { + if v_0.Op != OpNeg64 { + break + } + x := v_0.Args[0] + if v_1.Op != OpCom64 || x != v_1.Args[0] { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (Sub64 (Com64 x) (Neg64 x)) + // result: (Const64 [-1]) + for { + if v_0.Op != OpCom64 { + break + } + x := v_0.Args[0] + if v_1.Op != OpNeg64 || x != v_1.Args[0] { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(-1) + return true + } + // match: (Sub64 (Add64 t x) (Add64 t y)) + // result: (Sub64 x y) + for { + if v_0.Op != OpAdd64 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + t := v_0_0 + x := v_0_1 + if v_1.Op != OpAdd64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if t != v_1_0 { + continue + } + y := v_1_1 + v.reset(OpSub64) + v.AddArg2(x, y) + return true + } + } + break + } + // match: (Sub64 (Add64 x y) x) + // result: y + for { + if v_0.Op != OpAdd64 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if x != v_1 { + continue + } + v.copyOf(y) + return true + } + break + } + // match: (Sub64 (Add64 x y) y) + // result: x + for { + if v_0.Op != OpAdd64 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if y != v_1 { + continue + } + v.copyOf(x) + return true + } + break + } + // match: (Sub64 (Sub64 x y) x) + // result: (Neg64 y) + for { + if v_0.Op != OpSub64 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpNeg64) + v.AddArg(y) + return true + } + // match: (Sub64 x (Add64 x y)) + // result: (Neg64 y) + for { + x := v_0 + if v_1.Op != OpAdd64 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + y := v_1_1 + v.reset(OpNeg64) + v.AddArg(y) + return true + } + break + } + // match: (Sub64 x (Sub64 i:(Const64 ) z)) + // cond: (z.Op != OpConst64 && x.Op != OpConst64) + // result: (Sub64 (Add64 x z) i) + for { + x := v_0 + if v_1.Op != OpSub64 { + break + } + z := v_1.Args[1] + i := v_1.Args[0] + if i.Op != OpConst64 { + break + } + t := i.Type + if !(z.Op != OpConst64 && x.Op != OpConst64) { + break + } + v.reset(OpSub64) + v0 := b.NewValue0(v.Pos, OpAdd64, t) + v0.AddArg2(x, z) + v.AddArg2(v0, i) + return true + } + // match: (Sub64 x (Add64 z i:(Const64 ))) + // cond: (z.Op != OpConst64 && x.Op != OpConst64) + // result: (Sub64 (Sub64 x z) i) + for { + x := v_0 + if v_1.Op != OpAdd64 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + z := v_1_0 + i := v_1_1 + if i.Op != OpConst64 { + continue + } + t := i.Type + if !(z.Op != OpConst64 && x.Op != OpConst64) { + continue + } + v.reset(OpSub64) + v0 := b.NewValue0(v.Pos, OpSub64, t) + v0.AddArg2(x, z) + v.AddArg2(v0, i) + return true + } + break + } + // match: (Sub64 (Sub64 i:(Const64 ) z) x) + // cond: (z.Op != OpConst64 && x.Op != OpConst64) + // result: (Sub64 i (Add64 z x)) + for { + if v_0.Op != OpSub64 { + break + } + z := v_0.Args[1] + i := v_0.Args[0] + if i.Op != OpConst64 { + break + } + t := i.Type + x := v_1 + if !(z.Op != OpConst64 && x.Op != OpConst64) { + break + } + v.reset(OpSub64) + v0 := b.NewValue0(v.Pos, OpAdd64, t) + v0.AddArg2(z, x) + v.AddArg2(i, v0) + return true + } + // match: (Sub64 (Add64 z i:(Const64 )) x) + // cond: (z.Op != OpConst64 && x.Op != OpConst64) + // result: (Add64 i (Sub64 z x)) + for { + if v_0.Op != OpAdd64 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z := v_0_0 + i := v_0_1 + if i.Op != OpConst64 { + continue + } + t := i.Type + x := v_1 + if !(z.Op != OpConst64 && x.Op != OpConst64) { + continue + } + v.reset(OpAdd64) + v0 := b.NewValue0(v.Pos, OpSub64, t) + v0.AddArg2(z, x) + v.AddArg2(i, v0) + return true + } + break + } + // match: (Sub64 (Const64 [c]) (Sub64 (Const64 [d]) x)) + // result: (Add64 (Const64 [c-d]) x) + for { + if v_0.Op != OpConst64 { + break + } + t := v_0.Type + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpSub64 { + break + } + x := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst64 || v_1_0.Type != t { + break + } + d := auxIntToInt64(v_1_0.AuxInt) + v.reset(OpAdd64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c - d) + v.AddArg2(v0, x) + return true + } + // match: (Sub64 (Const64 [c]) (Add64 (Const64 [d]) x)) + // result: (Sub64 (Const64 [c-d]) x) + for { + if v_0.Op != OpConst64 { + break + } + t := v_0.Type + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpAdd64 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst64 || v_1_0.Type != t { + continue + } + d := auxIntToInt64(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpSub64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c - d) + v.AddArg2(v0, x) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpSub64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Sub64F (Const64F [c]) (Const64F [d])) + // cond: c-d == c-d + // result: (Const64F [c-d]) + for { + if v_0.Op != OpConst64F { + break + } + c := auxIntToFloat64(v_0.AuxInt) + if v_1.Op != OpConst64F { + break + } + d := auxIntToFloat64(v_1.AuxInt) + if !(c-d == c-d) { + break + } + v.reset(OpConst64F) + v.AuxInt = float64ToAuxInt(c - d) + return true + } + return false +} +func rewriteValuegeneric_OpSub8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Sub8 (Const8 [c]) (Const8 [d])) + // result: (Const8 [c-d]) + for { + if v_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpConst8 { + break + } + d := auxIntToInt8(v_1.AuxInt) + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(c - d) + return true + } + // match: (Sub8 x (Const8 [c])) + // cond: x.Op != OpConst8 + // result: (Add8 (Const8 [-c]) x) + for { + x := v_0 + if v_1.Op != OpConst8 { + break + } + t := v_1.Type + c := auxIntToInt8(v_1.AuxInt) + if !(x.Op != OpConst8) { + break + } + v.reset(OpAdd8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(-c) + v.AddArg2(v0, x) + return true + } + // match: (Sub8 (Mul8 x y) (Mul8 x z)) + // result: (Mul8 x (Sub8 y z)) + for { + t := v.Type + if v_0.Op != OpMul8 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if v_1.Op != OpMul8 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + z := v_1_1 + v.reset(OpMul8) + v0 := b.NewValue0(v.Pos, OpSub8, t) + v0.AddArg2(y, z) + v.AddArg2(x, v0) + return true + } + } + break + } + // match: (Sub8 x x) + // result: (Const8 [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(0) + return true + } + // match: (Sub8 (Neg8 x) (Com8 x)) + // result: (Const8 [1]) + for { + if v_0.Op != OpNeg8 { + break + } + x := v_0.Args[0] + if v_1.Op != OpCom8 || x != v_1.Args[0] { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(1) + return true + } + // match: (Sub8 (Com8 x) (Neg8 x)) + // result: (Const8 [-1]) + for { + if v_0.Op != OpCom8 { + break + } + x := v_0.Args[0] + if v_1.Op != OpNeg8 || x != v_1.Args[0] { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(-1) + return true + } + // match: (Sub8 (Add8 t x) (Add8 t y)) + // result: (Sub8 x y) + for { + if v_0.Op != OpAdd8 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + t := v_0_0 + x := v_0_1 + if v_1.Op != OpAdd8 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if t != v_1_0 { + continue + } + y := v_1_1 + v.reset(OpSub8) + v.AddArg2(x, y) + return true + } + } + break + } + // match: (Sub8 (Add8 x y) x) + // result: y + for { + if v_0.Op != OpAdd8 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if x != v_1 { + continue + } + v.copyOf(y) + return true + } + break + } + // match: (Sub8 (Add8 x y) y) + // result: x + for { + if v_0.Op != OpAdd8 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if y != v_1 { + continue + } + v.copyOf(x) + return true + } + break + } + // match: (Sub8 (Sub8 x y) x) + // result: (Neg8 y) + for { + if v_0.Op != OpSub8 { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + if x != v_1 { + break + } + v.reset(OpNeg8) + v.AddArg(y) + return true + } + // match: (Sub8 x (Add8 x y)) + // result: (Neg8 y) + for { + x := v_0 + if v_1.Op != OpAdd8 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + y := v_1_1 + v.reset(OpNeg8) + v.AddArg(y) + return true + } + break + } + // match: (Sub8 x (Sub8 i:(Const8 ) z)) + // cond: (z.Op != OpConst8 && x.Op != OpConst8) + // result: (Sub8 (Add8 x z) i) + for { + x := v_0 + if v_1.Op != OpSub8 { + break + } + z := v_1.Args[1] + i := v_1.Args[0] + if i.Op != OpConst8 { + break + } + t := i.Type + if !(z.Op != OpConst8 && x.Op != OpConst8) { + break + } + v.reset(OpSub8) + v0 := b.NewValue0(v.Pos, OpAdd8, t) + v0.AddArg2(x, z) + v.AddArg2(v0, i) + return true + } + // match: (Sub8 x (Add8 z i:(Const8 ))) + // cond: (z.Op != OpConst8 && x.Op != OpConst8) + // result: (Sub8 (Sub8 x z) i) + for { + x := v_0 + if v_1.Op != OpAdd8 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + z := v_1_0 + i := v_1_1 + if i.Op != OpConst8 { + continue + } + t := i.Type + if !(z.Op != OpConst8 && x.Op != OpConst8) { + continue + } + v.reset(OpSub8) + v0 := b.NewValue0(v.Pos, OpSub8, t) + v0.AddArg2(x, z) + v.AddArg2(v0, i) + return true + } + break + } + // match: (Sub8 (Sub8 i:(Const8 ) z) x) + // cond: (z.Op != OpConst8 && x.Op != OpConst8) + // result: (Sub8 i (Add8 z x)) + for { + if v_0.Op != OpSub8 { + break + } + z := v_0.Args[1] + i := v_0.Args[0] + if i.Op != OpConst8 { + break + } + t := i.Type + x := v_1 + if !(z.Op != OpConst8 && x.Op != OpConst8) { + break + } + v.reset(OpSub8) + v0 := b.NewValue0(v.Pos, OpAdd8, t) + v0.AddArg2(z, x) + v.AddArg2(i, v0) + return true + } + // match: (Sub8 (Add8 z i:(Const8 )) x) + // cond: (z.Op != OpConst8 && x.Op != OpConst8) + // result: (Add8 i (Sub8 z x)) + for { + if v_0.Op != OpAdd8 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + z := v_0_0 + i := v_0_1 + if i.Op != OpConst8 { + continue + } + t := i.Type + x := v_1 + if !(z.Op != OpConst8 && x.Op != OpConst8) { + continue + } + v.reset(OpAdd8) + v0 := b.NewValue0(v.Pos, OpSub8, t) + v0.AddArg2(z, x) + v.AddArg2(i, v0) + return true + } + break + } + // match: (Sub8 (Const8 [c]) (Sub8 (Const8 [d]) x)) + // result: (Add8 (Const8 [c-d]) x) + for { + if v_0.Op != OpConst8 { + break + } + t := v_0.Type + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpSub8 { + break + } + x := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst8 || v_1_0.Type != t { + break + } + d := auxIntToInt8(v_1_0.AuxInt) + v.reset(OpAdd8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(c - d) + v.AddArg2(v0, x) + return true + } + // match: (Sub8 (Const8 [c]) (Add8 (Const8 [d]) x)) + // result: (Sub8 (Const8 [c-d]) x) + for { + if v_0.Op != OpConst8 { + break + } + t := v_0.Type + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpAdd8 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst8 || v_1_0.Type != t { + continue + } + d := auxIntToInt8(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpSub8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(c - d) + v.AddArg2(v0, x) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpTrunc(v *Value) bool { + v_0 := v.Args[0] + // match: (Trunc (Const64F [c])) + // result: (Const64F [math.Trunc(c)]) + for { + if v_0.Op != OpConst64F { + break + } + c := auxIntToFloat64(v_0.AuxInt) + v.reset(OpConst64F) + v.AuxInt = float64ToAuxInt(math.Trunc(c)) + return true + } + return false +} +func rewriteValuegeneric_OpTrunc16to8(v *Value) bool { + v_0 := v.Args[0] + // match: (Trunc16to8 (Const16 [c])) + // result: (Const8 [int8(c)]) + for { + if v_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_0.AuxInt) + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(int8(c)) + return true + } + // match: (Trunc16to8 (ZeroExt8to16 x)) + // result: x + for { + if v_0.Op != OpZeroExt8to16 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (Trunc16to8 (SignExt8to16 x)) + // result: x + for { + if v_0.Op != OpSignExt8to16 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (Trunc16to8 (And16 (Const16 [y]) x)) + // cond: y&0xFF == 0xFF + // result: (Trunc16to8 x) + for { + if v_0.Op != OpAnd16 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpConst16 { + continue + } + y := auxIntToInt16(v_0_0.AuxInt) + x := v_0_1 + if !(y&0xFF == 0xFF) { + continue + } + v.reset(OpTrunc16to8) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpTrunc32to16(v *Value) bool { + v_0 := v.Args[0] + // match: (Trunc32to16 (Const32 [c])) + // result: (Const16 [int16(c)]) + for { + if v_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(int16(c)) + return true + } + // match: (Trunc32to16 (ZeroExt8to32 x)) + // result: (ZeroExt8to16 x) + for { + if v_0.Op != OpZeroExt8to32 { + break + } + x := v_0.Args[0] + v.reset(OpZeroExt8to16) + v.AddArg(x) + return true + } + // match: (Trunc32to16 (ZeroExt16to32 x)) + // result: x + for { + if v_0.Op != OpZeroExt16to32 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (Trunc32to16 (SignExt8to32 x)) + // result: (SignExt8to16 x) + for { + if v_0.Op != OpSignExt8to32 { + break + } + x := v_0.Args[0] + v.reset(OpSignExt8to16) + v.AddArg(x) + return true + } + // match: (Trunc32to16 (SignExt16to32 x)) + // result: x + for { + if v_0.Op != OpSignExt16to32 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (Trunc32to16 (And32 (Const32 [y]) x)) + // cond: y&0xFFFF == 0xFFFF + // result: (Trunc32to16 x) + for { + if v_0.Op != OpAnd32 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpConst32 { + continue + } + y := auxIntToInt32(v_0_0.AuxInt) + x := v_0_1 + if !(y&0xFFFF == 0xFFFF) { + continue + } + v.reset(OpTrunc32to16) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpTrunc32to8(v *Value) bool { + v_0 := v.Args[0] + // match: (Trunc32to8 (Const32 [c])) + // result: (Const8 [int8(c)]) + for { + if v_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(int8(c)) + return true + } + // match: (Trunc32to8 (ZeroExt8to32 x)) + // result: x + for { + if v_0.Op != OpZeroExt8to32 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (Trunc32to8 (SignExt8to32 x)) + // result: x + for { + if v_0.Op != OpSignExt8to32 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (Trunc32to8 (And32 (Const32 [y]) x)) + // cond: y&0xFF == 0xFF + // result: (Trunc32to8 x) + for { + if v_0.Op != OpAnd32 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpConst32 { + continue + } + y := auxIntToInt32(v_0_0.AuxInt) + x := v_0_1 + if !(y&0xFF == 0xFF) { + continue + } + v.reset(OpTrunc32to8) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpTrunc64to16(v *Value) bool { + v_0 := v.Args[0] + // match: (Trunc64to16 (Const64 [c])) + // result: (Const16 [int16(c)]) + for { + if v_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(int16(c)) + return true + } + // match: (Trunc64to16 (ZeroExt8to64 x)) + // result: (ZeroExt8to16 x) + for { + if v_0.Op != OpZeroExt8to64 { + break + } + x := v_0.Args[0] + v.reset(OpZeroExt8to16) + v.AddArg(x) + return true + } + // match: (Trunc64to16 (ZeroExt16to64 x)) + // result: x + for { + if v_0.Op != OpZeroExt16to64 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (Trunc64to16 (SignExt8to64 x)) + // result: (SignExt8to16 x) + for { + if v_0.Op != OpSignExt8to64 { + break + } + x := v_0.Args[0] + v.reset(OpSignExt8to16) + v.AddArg(x) + return true + } + // match: (Trunc64to16 (SignExt16to64 x)) + // result: x + for { + if v_0.Op != OpSignExt16to64 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (Trunc64to16 (And64 (Const64 [y]) x)) + // cond: y&0xFFFF == 0xFFFF + // result: (Trunc64to16 x) + for { + if v_0.Op != OpAnd64 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpConst64 { + continue + } + y := auxIntToInt64(v_0_0.AuxInt) + x := v_0_1 + if !(y&0xFFFF == 0xFFFF) { + continue + } + v.reset(OpTrunc64to16) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpTrunc64to32(v *Value) bool { + v_0 := v.Args[0] + // match: (Trunc64to32 (Const64 [c])) + // result: (Const32 [int32(c)]) + for { + if v_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(int32(c)) + return true + } + // match: (Trunc64to32 (ZeroExt8to64 x)) + // result: (ZeroExt8to32 x) + for { + if v_0.Op != OpZeroExt8to64 { + break + } + x := v_0.Args[0] + v.reset(OpZeroExt8to32) + v.AddArg(x) + return true + } + // match: (Trunc64to32 (ZeroExt16to64 x)) + // result: (ZeroExt16to32 x) + for { + if v_0.Op != OpZeroExt16to64 { + break + } + x := v_0.Args[0] + v.reset(OpZeroExt16to32) + v.AddArg(x) + return true + } + // match: (Trunc64to32 (ZeroExt32to64 x)) + // result: x + for { + if v_0.Op != OpZeroExt32to64 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (Trunc64to32 (SignExt8to64 x)) + // result: (SignExt8to32 x) + for { + if v_0.Op != OpSignExt8to64 { + break + } + x := v_0.Args[0] + v.reset(OpSignExt8to32) + v.AddArg(x) + return true + } + // match: (Trunc64to32 (SignExt16to64 x)) + // result: (SignExt16to32 x) + for { + if v_0.Op != OpSignExt16to64 { + break + } + x := v_0.Args[0] + v.reset(OpSignExt16to32) + v.AddArg(x) + return true + } + // match: (Trunc64to32 (SignExt32to64 x)) + // result: x + for { + if v_0.Op != OpSignExt32to64 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (Trunc64to32 (And64 (Const64 [y]) x)) + // cond: y&0xFFFFFFFF == 0xFFFFFFFF + // result: (Trunc64to32 x) + for { + if v_0.Op != OpAnd64 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpConst64 { + continue + } + y := auxIntToInt64(v_0_0.AuxInt) + x := v_0_1 + if !(y&0xFFFFFFFF == 0xFFFFFFFF) { + continue + } + v.reset(OpTrunc64to32) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpTrunc64to8(v *Value) bool { + v_0 := v.Args[0] + // match: (Trunc64to8 (Const64 [c])) + // result: (Const8 [int8(c)]) + for { + if v_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(int8(c)) + return true + } + // match: (Trunc64to8 (ZeroExt8to64 x)) + // result: x + for { + if v_0.Op != OpZeroExt8to64 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (Trunc64to8 (SignExt8to64 x)) + // result: x + for { + if v_0.Op != OpSignExt8to64 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (Trunc64to8 (And64 (Const64 [y]) x)) + // cond: y&0xFF == 0xFF + // result: (Trunc64to8 x) + for { + if v_0.Op != OpAnd64 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpConst64 { + continue + } + y := auxIntToInt64(v_0_0.AuxInt) + x := v_0_1 + if !(y&0xFF == 0xFF) { + continue + } + v.reset(OpTrunc64to8) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpXor16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (Xor16 (Const16 [c]) (Const16 [d])) + // result: (Const16 [c^d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1.AuxInt) + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(c ^ d) + return true + } + break + } + // match: (Xor16 x x) + // result: (Const16 [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(0) + return true + } + // match: (Xor16 (Const16 [0]) x) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 { + continue + } + x := v_1 + v.copyOf(x) + return true + } + break + } + // match: (Xor16 (Com16 x) x) + // result: (Const16 [-1]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpCom16 { + continue + } + x := v_0.Args[0] + if x != v_1 { + continue + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(-1) + return true + } + break + } + // match: (Xor16 (Const16 [-1]) x) + // result: (Com16 x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != -1 { + continue + } + x := v_1 + v.reset(OpCom16) + v.AddArg(x) + return true + } + break + } + // match: (Xor16 x (Xor16 x y)) + // result: y + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpXor16 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + y := v_1_1 + v.copyOf(y) + return true + } + } + break + } + // match: (Xor16 (Xor16 i:(Const16 ) z) x) + // cond: (z.Op != OpConst16 && x.Op != OpConst16) + // result: (Xor16 i (Xor16 z x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpXor16 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 + if i.Op != OpConst16 { + continue + } + t := i.Type + z := v_0_1 + x := v_1 + if !(z.Op != OpConst16 && x.Op != OpConst16) { + continue + } + v.reset(OpXor16) + v0 := b.NewValue0(v.Pos, OpXor16, t) + v0.AddArg2(z, x) + v.AddArg2(i, v0) + return true + } + } + break + } + // match: (Xor16 (Const16 [c]) (Xor16 (Const16 [d]) x)) + // result: (Xor16 (Const16 [c^d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 { + continue + } + t := v_0.Type + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpXor16 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst16 || v_1_0.Type != t { + continue + } + d := auxIntToInt16(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpXor16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(c ^ d) + v.AddArg2(v0, x) + return true + } + } + break + } + // match: (Xor16 (Lsh16x64 x z:(Const64 [c])) (Rsh16Ux64 x (Const64 [d]))) + // cond: c < 16 && d == 16-c && canRotate(config, 16) + // result: (RotateLeft16 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLsh16x64 { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpConst64 { + continue + } + c := auxIntToInt64(z.AuxInt) + if v_1.Op != OpRsh16Ux64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(c < 16 && d == 16-c && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) + return true + } + break + } + // match: (Xor16 left:(Lsh16x64 x y) right:(Rsh16Ux64 x (Sub64 (Const64 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh16x64 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh16Ux64 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub64 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (Xor16 left:(Lsh16x32 x y) right:(Rsh16Ux32 x (Sub32 (Const32 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh16x32 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh16Ux32 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub32 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (Xor16 left:(Lsh16x16 x y) right:(Rsh16Ux16 x (Sub16 (Const16 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh16x16 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh16Ux16 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub16 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (Xor16 left:(Lsh16x8 x y) right:(Rsh16Ux8 x (Sub8 (Const8 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh16x8 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh16Ux8 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub8 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (Xor16 right:(Rsh16Ux64 x y) left:(Lsh16x64 x z:(Sub64 (Const64 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh16Ux64 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh16x64 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub64 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) + return true + } + break + } + // match: (Xor16 right:(Rsh16Ux32 x y) left:(Lsh16x32 x z:(Sub32 (Const32 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh16Ux32 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh16x32 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub32 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) + return true + } + break + } + // match: (Xor16 right:(Rsh16Ux16 x y) left:(Lsh16x16 x z:(Sub16 (Const16 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh16Ux16 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh16x16 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub16 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) + return true + } + break + } + // match: (Xor16 right:(Rsh16Ux8 x y) left:(Lsh16x8 x z:(Sub8 (Const8 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh16Ux8 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh16x8 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub8 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpXor32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (Xor32 (Const32 [c]) (Const32 [d])) + // result: (Const32 [c^d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1.AuxInt) + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(c ^ d) + return true + } + break + } + // match: (Xor32 x x) + // result: (Const32 [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) + return true + } + // match: (Xor32 (Const32 [0]) x) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 { + continue + } + x := v_1 + v.copyOf(x) + return true + } + break + } + // match: (Xor32 (Com32 x) x) + // result: (Const32 [-1]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpCom32 { + continue + } + x := v_0.Args[0] + if x != v_1 { + continue + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(-1) + return true + } + break + } + // match: (Xor32 (Const32 [-1]) x) + // result: (Com32 x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != -1 { + continue + } + x := v_1 + v.reset(OpCom32) + v.AddArg(x) + return true + } + break + } + // match: (Xor32 x (Xor32 x y)) + // result: y + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpXor32 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + y := v_1_1 + v.copyOf(y) + return true + } + } + break + } + // match: (Xor32 (Xor32 i:(Const32 ) z) x) + // cond: (z.Op != OpConst32 && x.Op != OpConst32) + // result: (Xor32 i (Xor32 z x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpXor32 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 + if i.Op != OpConst32 { + continue + } + t := i.Type + z := v_0_1 + x := v_1 + if !(z.Op != OpConst32 && x.Op != OpConst32) { + continue + } + v.reset(OpXor32) + v0 := b.NewValue0(v.Pos, OpXor32, t) + v0.AddArg2(z, x) + v.AddArg2(i, v0) + return true + } + } + break + } + // match: (Xor32 (Const32 [c]) (Xor32 (Const32 [d]) x)) + // result: (Xor32 (Const32 [c^d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 { + continue + } + t := v_0.Type + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpXor32 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst32 || v_1_0.Type != t { + continue + } + d := auxIntToInt32(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpXor32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(c ^ d) + v.AddArg2(v0, x) + return true + } + } + break + } + // match: (Xor32 (Lsh32x64 x z:(Const64 [c])) (Rsh32Ux64 x (Const64 [d]))) + // cond: c < 32 && d == 32-c && canRotate(config, 32) + // result: (RotateLeft32 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLsh32x64 { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpConst64 { + continue + } + c := auxIntToInt64(z.AuxInt) + if v_1.Op != OpRsh32Ux64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(c < 32 && d == 32-c && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) + return true + } + break + } + // match: (Xor32 left:(Lsh32x64 x y) right:(Rsh32Ux64 x (Sub64 (Const64 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh32x64 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh32Ux64 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub64 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (Xor32 left:(Lsh32x32 x y) right:(Rsh32Ux32 x (Sub32 (Const32 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh32x32 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh32Ux32 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub32 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (Xor32 left:(Lsh32x16 x y) right:(Rsh32Ux16 x (Sub16 (Const16 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh32x16 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh32Ux16 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub16 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (Xor32 left:(Lsh32x8 x y) right:(Rsh32Ux8 x (Sub8 (Const8 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh32x8 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh32Ux8 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub8 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (Xor32 right:(Rsh32Ux64 x y) left:(Lsh32x64 x z:(Sub64 (Const64 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh32Ux64 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh32x64 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub64 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) + return true + } + break + } + // match: (Xor32 right:(Rsh32Ux32 x y) left:(Lsh32x32 x z:(Sub32 (Const32 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh32Ux32 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh32x32 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub32 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) + return true + } + break + } + // match: (Xor32 right:(Rsh32Ux16 x y) left:(Lsh32x16 x z:(Sub16 (Const16 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh32Ux16 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh32x16 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub16 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) + return true + } + break + } + // match: (Xor32 right:(Rsh32Ux8 x y) left:(Lsh32x8 x z:(Sub8 (Const8 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh32Ux8 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh32x8 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub8 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpXor64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (Xor64 (Const64 [c]) (Const64 [d])) + // result: (Const64 [c^d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(c ^ d) + return true + } + break + } + // match: (Xor64 x x) + // result: (Const64 [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (Xor64 (Const64 [0]) x) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 { + continue + } + x := v_1 + v.copyOf(x) + return true + } + break + } + // match: (Xor64 (Com64 x) x) + // result: (Const64 [-1]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpCom64 { + continue + } + x := v_0.Args[0] + if x != v_1 { + continue + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(-1) + return true + } + break + } + // match: (Xor64 (Const64 [-1]) x) + // result: (Com64 x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != -1 { + continue + } + x := v_1 + v.reset(OpCom64) + v.AddArg(x) + return true + } + break + } + // match: (Xor64 x (Xor64 x y)) + // result: y + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpXor64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + y := v_1_1 + v.copyOf(y) + return true + } + } + break + } + // match: (Xor64 (Xor64 i:(Const64 ) z) x) + // cond: (z.Op != OpConst64 && x.Op != OpConst64) + // result: (Xor64 i (Xor64 z x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpXor64 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 + if i.Op != OpConst64 { + continue + } + t := i.Type + z := v_0_1 + x := v_1 + if !(z.Op != OpConst64 && x.Op != OpConst64) { + continue + } + v.reset(OpXor64) + v0 := b.NewValue0(v.Pos, OpXor64, t) + v0.AddArg2(z, x) + v.AddArg2(i, v0) + return true + } + } + break + } + // match: (Xor64 (Const64 [c]) (Xor64 (Const64 [d]) x)) + // result: (Xor64 (Const64 [c^d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 { + continue + } + t := v_0.Type + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpXor64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst64 || v_1_0.Type != t { + continue + } + d := auxIntToInt64(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpXor64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c ^ d) + v.AddArg2(v0, x) + return true + } + } + break + } + // match: (Xor64 (Lsh64x64 x z:(Const64 [c])) (Rsh64Ux64 x (Const64 [d]))) + // cond: c < 64 && d == 64-c && canRotate(config, 64) + // result: (RotateLeft64 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLsh64x64 { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpConst64 { + continue + } + c := auxIntToInt64(z.AuxInt) + if v_1.Op != OpRsh64Ux64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(c < 64 && d == 64-c && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, z) + return true + } + break + } + // match: (Xor64 left:(Lsh64x64 x y) right:(Rsh64Ux64 x (Sub64 (Const64 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh64x64 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh64Ux64 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub64 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (Xor64 left:(Lsh64x32 x y) right:(Rsh64Ux32 x (Sub32 (Const32 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh64x32 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh64Ux32 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub32 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (Xor64 left:(Lsh64x16 x y) right:(Rsh64Ux16 x (Sub16 (Const16 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh64x16 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh64Ux16 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub16 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (Xor64 left:(Lsh64x8 x y) right:(Rsh64Ux8 x (Sub8 (Const8 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh64x8 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh64Ux8 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub8 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (Xor64 right:(Rsh64Ux64 x y) left:(Lsh64x64 x z:(Sub64 (Const64 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh64Ux64 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh64x64 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub64 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, z) + return true + } + break + } + // match: (Xor64 right:(Rsh64Ux32 x y) left:(Lsh64x32 x z:(Sub32 (Const32 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh64Ux32 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh64x32 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub32 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, z) + return true + } + break + } + // match: (Xor64 right:(Rsh64Ux16 x y) left:(Lsh64x16 x z:(Sub16 (Const16 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh64Ux16 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh64x16 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub16 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, z) + return true + } + break + } + // match: (Xor64 right:(Rsh64Ux8 x y) left:(Lsh64x8 x z:(Sub8 (Const8 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh64Ux8 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh64x8 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub8 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, z) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpXor8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (Xor8 (Const8 [c]) (Const8 [d])) + // result: (Const8 [c^d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1.AuxInt) + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(c ^ d) + return true + } + break + } + // match: (Xor8 x x) + // result: (Const8 [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(0) + return true + } + // match: (Xor8 (Const8 [0]) x) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 { + continue + } + x := v_1 + v.copyOf(x) + return true + } + break + } + // match: (Xor8 (Com8 x) x) + // result: (Const8 [-1]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpCom8 { + continue + } + x := v_0.Args[0] + if x != v_1 { + continue + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(-1) + return true + } + break + } + // match: (Xor8 (Const8 [-1]) x) + // result: (Com8 x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != -1 { + continue + } + x := v_1 + v.reset(OpCom8) + v.AddArg(x) + return true + } + break + } + // match: (Xor8 x (Xor8 x y)) + // result: y + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpXor8 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + y := v_1_1 + v.copyOf(y) + return true + } + } + break + } + // match: (Xor8 (Xor8 i:(Const8 ) z) x) + // cond: (z.Op != OpConst8 && x.Op != OpConst8) + // result: (Xor8 i (Xor8 z x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpXor8 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 + if i.Op != OpConst8 { + continue + } + t := i.Type + z := v_0_1 + x := v_1 + if !(z.Op != OpConst8 && x.Op != OpConst8) { + continue + } + v.reset(OpXor8) + v0 := b.NewValue0(v.Pos, OpXor8, t) + v0.AddArg2(z, x) + v.AddArg2(i, v0) + return true + } + } + break + } + // match: (Xor8 (Const8 [c]) (Xor8 (Const8 [d]) x)) + // result: (Xor8 (Const8 [c^d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 { + continue + } + t := v_0.Type + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpXor8 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst8 || v_1_0.Type != t { + continue + } + d := auxIntToInt8(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpXor8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(c ^ d) + v.AddArg2(v0, x) + return true + } + } + break + } + // match: (Xor8 (Lsh8x64 x z:(Const64 [c])) (Rsh8Ux64 x (Const64 [d]))) + // cond: c < 8 && d == 8-c && canRotate(config, 8) + // result: (RotateLeft8 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLsh8x64 { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpConst64 { + continue + } + c := auxIntToInt64(z.AuxInt) + if v_1.Op != OpRsh8Ux64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(c < 8 && d == 8-c && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, z) + return true + } + break + } + // match: (Xor8 left:(Lsh8x64 x y) right:(Rsh8Ux64 x (Sub64 (Const64 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh8x64 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh8Ux64 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub64 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (Xor8 left:(Lsh8x32 x y) right:(Rsh8Ux32 x (Sub32 (Const32 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh8x32 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh8Ux32 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub32 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (Xor8 left:(Lsh8x16 x y) right:(Rsh8Ux16 x (Sub16 (Const16 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh8x16 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh8Ux16 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub16 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (Xor8 left:(Lsh8x8 x y) right:(Rsh8Ux8 x (Sub8 (Const8 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh8x8 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh8Ux8 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub8 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (Xor8 right:(Rsh8Ux64 x y) left:(Lsh8x64 x z:(Sub64 (Const64 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh8Ux64 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh8x64 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub64 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, z) + return true + } + break + } + // match: (Xor8 right:(Rsh8Ux32 x y) left:(Lsh8x32 x z:(Sub32 (Const32 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh8Ux32 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh8x32 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub32 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, z) + return true + } + break + } + // match: (Xor8 right:(Rsh8Ux16 x y) left:(Lsh8x16 x z:(Sub16 (Const16 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh8Ux16 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh8x16 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub16 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, z) + return true + } + break + } + // match: (Xor8 right:(Rsh8Ux8 x y) left:(Lsh8x8 x z:(Sub8 (Const8 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh8Ux8 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh8x8 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub8 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, z) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpZero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Zero (SelectN [0] call:(StaticLECall _ _)) mem:(SelectN [1] call)) + // cond: isSameCall(call.Aux, "runtime.newobject") + // result: mem + for { + if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + call := v_0.Args[0] + if call.Op != OpStaticLECall || len(call.Args) != 2 { + break + } + mem := v_1 + if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isSameCall(call.Aux, "runtime.newobject")) { + break + } + v.copyOf(mem) + return true + } + // match: (Zero {t1} [n] p1 store:(Store {t2} (OffPtr [o2] p2) _ mem)) + // cond: isSamePtr(p1, p2) && store.Uses == 1 && n >= o2 + t2.Size() && clobber(store) + // result: (Zero {t1} [n] p1 mem) + for { + n := auxIntToInt64(v.AuxInt) + t1 := auxToType(v.Aux) + p1 := v_0 + store := v_1 + if store.Op != OpStore { + break + } + t2 := auxToType(store.Aux) + mem := store.Args[2] + store_0 := store.Args[0] + if store_0.Op != OpOffPtr { + break + } + o2 := auxIntToInt64(store_0.AuxInt) + p2 := store_0.Args[0] + if !(isSamePtr(p1, p2) && store.Uses == 1 && n >= o2+t2.Size() && clobber(store)) { + break + } + v.reset(OpZero) + v.AuxInt = int64ToAuxInt(n) + v.Aux = typeToAux(t1) + v.AddArg2(p1, mem) + return true + } + // match: (Zero {t} [n] dst1 move:(Move {t} [n] dst2 _ mem)) + // cond: move.Uses == 1 && isSamePtr(dst1, dst2) && clobber(move) + // result: (Zero {t} [n] dst1 mem) + for { + n := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + dst1 := v_0 + move := v_1 + if move.Op != OpMove || auxIntToInt64(move.AuxInt) != n || auxToType(move.Aux) != t { + break + } + mem := move.Args[2] + dst2 := move.Args[0] + if !(move.Uses == 1 && isSamePtr(dst1, dst2) && clobber(move)) { + break + } + v.reset(OpZero) + v.AuxInt = int64ToAuxInt(n) + v.Aux = typeToAux(t) + v.AddArg2(dst1, mem) + return true + } + // match: (Zero {t} [n] dst1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem))) + // cond: move.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && clobber(move, vardef) + // result: (Zero {t} [n] dst1 (VarDef {x} mem)) + for { + n := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + dst1 := v_0 + vardef := v_1 + if vardef.Op != OpVarDef { + break + } + x := auxToSym(vardef.Aux) + move := vardef.Args[0] + if move.Op != OpMove || auxIntToInt64(move.AuxInt) != n || auxToType(move.Aux) != t { + break + } + mem := move.Args[2] + dst2 := move.Args[0] + if !(move.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && clobber(move, vardef)) { + break + } + v.reset(OpZero) + v.AuxInt = int64ToAuxInt(n) + v.Aux = typeToAux(t) + v0 := b.NewValue0(v.Pos, OpVarDef, types.TypeMem) + v0.Aux = symToAux(x) + v0.AddArg(mem) + v.AddArg2(dst1, v0) + return true + } + // match: (Zero {t} [s] dst1 zero:(Zero {t} [s] dst2 _)) + // cond: isSamePtr(dst1, dst2) + // result: zero + for { + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + dst1 := v_0 + zero := v_1 + if zero.Op != OpZero || auxIntToInt64(zero.AuxInt) != s || auxToType(zero.Aux) != t { + break + } + dst2 := zero.Args[0] + if !(isSamePtr(dst1, dst2)) { + break + } + v.copyOf(zero) + return true + } + // match: (Zero {t} [s] dst1 vardef:(VarDef (Zero {t} [s] dst2 _))) + // cond: isSamePtr(dst1, dst2) + // result: vardef + for { + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + dst1 := v_0 + vardef := v_1 + if vardef.Op != OpVarDef { + break + } + vardef_0 := vardef.Args[0] + if vardef_0.Op != OpZero || auxIntToInt64(vardef_0.AuxInt) != s || auxToType(vardef_0.Aux) != t { + break + } + dst2 := vardef_0.Args[0] + if !(isSamePtr(dst1, dst2)) { + break + } + v.copyOf(vardef) + return true + } + return false +} +func rewriteValuegeneric_OpZeroExt16to32(v *Value) bool { + v_0 := v.Args[0] + // match: (ZeroExt16to32 (Const16 [c])) + // result: (Const32 [int32(uint16(c))]) + for { + if v_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_0.AuxInt) + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(int32(uint16(c))) + return true + } + // match: (ZeroExt16to32 (Trunc32to16 x:(Rsh32Ux64 _ (Const64 [s])))) + // cond: s >= 16 + // result: x + for { + if v_0.Op != OpTrunc32to16 { + break + } + x := v_0.Args[0] + if x.Op != OpRsh32Ux64 { + break + } + _ = x.Args[1] + x_1 := x.Args[1] + if x_1.Op != OpConst64 { + break + } + s := auxIntToInt64(x_1.AuxInt) + if !(s >= 16) { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValuegeneric_OpZeroExt16to64(v *Value) bool { + v_0 := v.Args[0] + // match: (ZeroExt16to64 (Const16 [c])) + // result: (Const64 [int64(uint16(c))]) + for { + if v_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_0.AuxInt) + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(int64(uint16(c))) + return true + } + // match: (ZeroExt16to64 (Trunc64to16 x:(Rsh64Ux64 _ (Const64 [s])))) + // cond: s >= 48 + // result: x + for { + if v_0.Op != OpTrunc64to16 { + break + } + x := v_0.Args[0] + if x.Op != OpRsh64Ux64 { + break + } + _ = x.Args[1] + x_1 := x.Args[1] + if x_1.Op != OpConst64 { + break + } + s := auxIntToInt64(x_1.AuxInt) + if !(s >= 48) { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValuegeneric_OpZeroExt32to64(v *Value) bool { + v_0 := v.Args[0] + // match: (ZeroExt32to64 (Const32 [c])) + // result: (Const64 [int64(uint32(c))]) + for { + if v_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(int64(uint32(c))) + return true + } + // match: (ZeroExt32to64 (Trunc64to32 x:(Rsh64Ux64 _ (Const64 [s])))) + // cond: s >= 32 + // result: x + for { + if v_0.Op != OpTrunc64to32 { + break + } + x := v_0.Args[0] + if x.Op != OpRsh64Ux64 { + break + } + _ = x.Args[1] + x_1 := x.Args[1] + if x_1.Op != OpConst64 { + break + } + s := auxIntToInt64(x_1.AuxInt) + if !(s >= 32) { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValuegeneric_OpZeroExt8to16(v *Value) bool { + v_0 := v.Args[0] + // match: (ZeroExt8to16 (Const8 [c])) + // result: (Const16 [int16( uint8(c))]) + for { + if v_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_0.AuxInt) + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(int16(uint8(c))) + return true + } + // match: (ZeroExt8to16 (Trunc16to8 x:(Rsh16Ux64 _ (Const64 [s])))) + // cond: s >= 8 + // result: x + for { + if v_0.Op != OpTrunc16to8 { + break + } + x := v_0.Args[0] + if x.Op != OpRsh16Ux64 { + break + } + _ = x.Args[1] + x_1 := x.Args[1] + if x_1.Op != OpConst64 { + break + } + s := auxIntToInt64(x_1.AuxInt) + if !(s >= 8) { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValuegeneric_OpZeroExt8to32(v *Value) bool { + v_0 := v.Args[0] + // match: (ZeroExt8to32 (Const8 [c])) + // result: (Const32 [int32( uint8(c))]) + for { + if v_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_0.AuxInt) + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(int32(uint8(c))) + return true + } + // match: (ZeroExt8to32 (Trunc32to8 x:(Rsh32Ux64 _ (Const64 [s])))) + // cond: s >= 24 + // result: x + for { + if v_0.Op != OpTrunc32to8 { + break + } + x := v_0.Args[0] + if x.Op != OpRsh32Ux64 { + break + } + _ = x.Args[1] + x_1 := x.Args[1] + if x_1.Op != OpConst64 { + break + } + s := auxIntToInt64(x_1.AuxInt) + if !(s >= 24) { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValuegeneric_OpZeroExt8to64(v *Value) bool { + v_0 := v.Args[0] + // match: (ZeroExt8to64 (Const8 [c])) + // result: (Const64 [int64( uint8(c))]) + for { + if v_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_0.AuxInt) + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(int64(uint8(c))) + return true + } + // match: (ZeroExt8to64 (Trunc64to8 x:(Rsh64Ux64 _ (Const64 [s])))) + // cond: s >= 56 + // result: x + for { + if v_0.Op != OpTrunc64to8 { + break + } + x := v_0.Args[0] + if x.Op != OpRsh64Ux64 { + break + } + _ = x.Args[1] + x_1 := x.Args[1] + if x_1.Op != OpConst64 { + break + } + s := auxIntToInt64(x_1.AuxInt) + if !(s >= 56) { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteBlockgeneric(b *Block) bool { + switch b.Kind { + case BlockIf: + // match: (If (Not cond) yes no) + // result: (If cond no yes) + for b.Controls[0].Op == OpNot { + v_0 := b.Controls[0] + cond := v_0.Args[0] + b.resetWithControl(BlockIf, cond) + b.swapSuccessors() + return true + } + // match: (If (ConstBool [c]) yes no) + // cond: c + // result: (First yes no) + for b.Controls[0].Op == OpConstBool { + v_0 := b.Controls[0] + c := auxIntToBool(v_0.AuxInt) + if !(c) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (If (ConstBool [c]) yes no) + // cond: !c + // result: (First no yes) + for b.Controls[0].Op == OpConstBool { + v_0 := b.Controls[0] + c := auxIntToBool(v_0.AuxInt) + if !(!c) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/sccp.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/sccp.go new file mode 100644 index 0000000000000000000000000000000000000000..77a6f509618af659efa1e715db7ab4f0823dbaa5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/sccp.go @@ -0,0 +1,585 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "fmt" +) + +// ---------------------------------------------------------------------------- +// Sparse Conditional Constant Propagation +// +// Described in +// Mark N. Wegman, F. Kenneth Zadeck: Constant Propagation with Conditional Branches. +// TOPLAS 1991. +// +// This algorithm uses three level lattice for SSA value +// +// Top undefined +// / | \ +// .. 1 2 3 .. constant +// \ | / +// Bottom not constant +// +// It starts with optimistically assuming that all SSA values are initially Top +// and then propagates constant facts only along reachable control flow paths. +// Since some basic blocks are not visited yet, corresponding inputs of phi become +// Top, we use the meet(phi) to compute its lattice. +// +// Top ∩ any = any +// Bottom ∩ any = Bottom +// ConstantA ∩ ConstantA = ConstantA +// ConstantA ∩ ConstantB = Bottom +// +// Each lattice value is lowered most twice(Top to Constant, Constant to Bottom) +// due to lattice depth, resulting in a fast convergence speed of the algorithm. +// In this way, sccp can discover optimization opportunities that cannot be found +// by just combining constant folding and constant propagation and dead code +// elimination separately. + +// Three level lattice holds compile time knowledge about SSA value +const ( + top int8 = iota // undefined + constant // constant + bottom // not a constant +) + +type lattice struct { + tag int8 // lattice type + val *Value // constant value +} + +type worklist struct { + f *Func // the target function to be optimized out + edges []Edge // propagate constant facts through edges + uses []*Value // re-visiting set + visited map[Edge]bool // visited edges + latticeCells map[*Value]lattice // constant lattices + defUse map[*Value][]*Value // def-use chains for some values + defBlock map[*Value][]*Block // use blocks of def + visitedBlock []bool // visited block +} + +// sccp stands for sparse conditional constant propagation, it propagates constants +// through CFG conditionally and applies constant folding, constant replacement and +// dead code elimination all together. +func sccp(f *Func) { + var t worklist + t.f = f + t.edges = make([]Edge, 0) + t.visited = make(map[Edge]bool) + t.edges = append(t.edges, Edge{f.Entry, 0}) + t.defUse = make(map[*Value][]*Value) + t.defBlock = make(map[*Value][]*Block) + t.latticeCells = make(map[*Value]lattice) + t.visitedBlock = f.Cache.allocBoolSlice(f.NumBlocks()) + defer f.Cache.freeBoolSlice(t.visitedBlock) + + // build it early since we rely heavily on the def-use chain later + t.buildDefUses() + + // pick up either an edge or SSA value from worklilst, process it + for { + if len(t.edges) > 0 { + edge := t.edges[0] + t.edges = t.edges[1:] + if _, exist := t.visited[edge]; !exist { + dest := edge.b + destVisited := t.visitedBlock[dest.ID] + + // mark edge as visited + t.visited[edge] = true + t.visitedBlock[dest.ID] = true + for _, val := range dest.Values { + if val.Op == OpPhi || !destVisited { + t.visitValue(val) + } + } + // propagates constants facts through CFG, taking condition test + // into account + if !destVisited { + t.propagate(dest) + } + } + continue + } + if len(t.uses) > 0 { + use := t.uses[0] + t.uses = t.uses[1:] + t.visitValue(use) + continue + } + break + } + + // apply optimizations based on discovered constants + constCnt, rewireCnt := t.replaceConst() + if f.pass.debug > 0 { + if constCnt > 0 || rewireCnt > 0 { + fmt.Printf("Phase SCCP for %v : %v constants, %v dce\n", f.Name, constCnt, rewireCnt) + } + } +} + +func equals(a, b lattice) bool { + if a == b { + // fast path + return true + } + if a.tag != b.tag { + return false + } + if a.tag == constant { + // The same content of const value may be different, we should + // compare with auxInt instead + v1 := a.val + v2 := b.val + if v1.Op == v2.Op && v1.AuxInt == v2.AuxInt { + return true + } else { + return false + } + } + return true +} + +// possibleConst checks if Value can be fold to const. For those Values that can +// never become constants(e.g. StaticCall), we don't make futile efforts. +func possibleConst(val *Value) bool { + if isConst(val) { + return true + } + switch val.Op { + case OpCopy: + return true + case OpPhi: + return true + case + // negate + OpNeg8, OpNeg16, OpNeg32, OpNeg64, OpNeg32F, OpNeg64F, + OpCom8, OpCom16, OpCom32, OpCom64, + // math + OpFloor, OpCeil, OpTrunc, OpRoundToEven, OpSqrt, + // conversion + OpTrunc16to8, OpTrunc32to8, OpTrunc32to16, OpTrunc64to8, + OpTrunc64to16, OpTrunc64to32, OpCvt32to32F, OpCvt32to64F, + OpCvt64to32F, OpCvt64to64F, OpCvt32Fto32, OpCvt32Fto64, + OpCvt64Fto32, OpCvt64Fto64, OpCvt32Fto64F, OpCvt64Fto32F, + OpCvtBoolToUint8, + OpZeroExt8to16, OpZeroExt8to32, OpZeroExt8to64, OpZeroExt16to32, + OpZeroExt16to64, OpZeroExt32to64, OpSignExt8to16, OpSignExt8to32, + OpSignExt8to64, OpSignExt16to32, OpSignExt16to64, OpSignExt32to64, + // bit + OpCtz8, OpCtz16, OpCtz32, OpCtz64, + // mask + OpSlicemask, + // safety check + OpIsNonNil, + // not + OpNot: + return true + case + // add + OpAdd64, OpAdd32, OpAdd16, OpAdd8, + OpAdd32F, OpAdd64F, + // sub + OpSub64, OpSub32, OpSub16, OpSub8, + OpSub32F, OpSub64F, + // mul + OpMul64, OpMul32, OpMul16, OpMul8, + OpMul32F, OpMul64F, + // div + OpDiv32F, OpDiv64F, + OpDiv8, OpDiv16, OpDiv32, OpDiv64, + OpDiv8u, OpDiv16u, OpDiv32u, OpDiv64u, + OpMod8, OpMod16, OpMod32, OpMod64, + OpMod8u, OpMod16u, OpMod32u, OpMod64u, + // compare + OpEq64, OpEq32, OpEq16, OpEq8, + OpEq32F, OpEq64F, + OpLess64, OpLess32, OpLess16, OpLess8, + OpLess64U, OpLess32U, OpLess16U, OpLess8U, + OpLess32F, OpLess64F, + OpLeq64, OpLeq32, OpLeq16, OpLeq8, + OpLeq64U, OpLeq32U, OpLeq16U, OpLeq8U, + OpLeq32F, OpLeq64F, + OpEqB, OpNeqB, + // shift + OpLsh64x64, OpRsh64x64, OpRsh64Ux64, OpLsh32x64, + OpRsh32x64, OpRsh32Ux64, OpLsh16x64, OpRsh16x64, + OpRsh16Ux64, OpLsh8x64, OpRsh8x64, OpRsh8Ux64, + // safety check + OpIsInBounds, OpIsSliceInBounds, + // bit + OpAnd8, OpAnd16, OpAnd32, OpAnd64, + OpOr8, OpOr16, OpOr32, OpOr64, + OpXor8, OpXor16, OpXor32, OpXor64: + return true + default: + return false + } +} + +func (t *worklist) getLatticeCell(val *Value) lattice { + if !possibleConst(val) { + // they are always worst + return lattice{bottom, nil} + } + lt, exist := t.latticeCells[val] + if !exist { + return lattice{top, nil} // optimistically for un-visited value + } + return lt +} + +func isConst(val *Value) bool { + switch val.Op { + case OpConst64, OpConst32, OpConst16, OpConst8, + OpConstBool, OpConst32F, OpConst64F: + return true + default: + return false + } +} + +// buildDefUses builds def-use chain for some values early, because once the +// lattice of a value is changed, we need to update lattices of use. But we don't +// need all uses of it, only uses that can become constants would be added into +// re-visit worklist since no matter how many times they are revisited, uses which +// can't become constants lattice remains unchanged, i.e. Bottom. +func (t *worklist) buildDefUses() { + for _, block := range t.f.Blocks { + for _, val := range block.Values { + for _, arg := range val.Args { + // find its uses, only uses that can become constants take into account + if possibleConst(arg) && possibleConst(val) { + if _, exist := t.defUse[arg]; !exist { + t.defUse[arg] = make([]*Value, 0, arg.Uses) + } + t.defUse[arg] = append(t.defUse[arg], val) + } + } + } + for _, ctl := range block.ControlValues() { + // for control values that can become constants, find their use blocks + if possibleConst(ctl) { + t.defBlock[ctl] = append(t.defBlock[ctl], block) + } + } + } +} + +// addUses finds all uses of value and appends them into work list for further process +func (t *worklist) addUses(val *Value) { + for _, use := range t.defUse[val] { + if val == use { + // Phi may refer to itself as uses, ignore them to avoid re-visiting phi + // for performance reason + continue + } + t.uses = append(t.uses, use) + } + for _, block := range t.defBlock[val] { + if t.visitedBlock[block.ID] { + t.propagate(block) + } + } +} + +// meet meets all of phi arguments and computes result lattice +func (t *worklist) meet(val *Value) lattice { + optimisticLt := lattice{top, nil} + for i := 0; i < len(val.Args); i++ { + edge := Edge{val.Block, i} + // If incoming edge for phi is not visited, assume top optimistically. + // According to rules of meet: + // Top ∩ any = any + // Top participates in meet() but does not affect the result, so here + // we will ignore Top and only take other lattices into consideration. + if _, exist := t.visited[edge]; exist { + lt := t.getLatticeCell(val.Args[i]) + if lt.tag == constant { + if optimisticLt.tag == top { + optimisticLt = lt + } else { + if !equals(optimisticLt, lt) { + // ConstantA ∩ ConstantB = Bottom + return lattice{bottom, nil} + } + } + } else if lt.tag == bottom { + // Bottom ∩ any = Bottom + return lattice{bottom, nil} + } else { + // Top ∩ any = any + } + } else { + // Top ∩ any = any + } + } + + // ConstantA ∩ ConstantA = ConstantA or Top ∩ any = any + return optimisticLt +} + +func computeLattice(f *Func, val *Value, args ...*Value) lattice { + // In general, we need to perform constant evaluation based on constant args: + // + // res := lattice{constant, nil} + // switch op { + // case OpAdd16: + // res.val = newConst(argLt1.val.AuxInt16() + argLt2.val.AuxInt16()) + // case OpAdd32: + // res.val = newConst(argLt1.val.AuxInt32() + argLt2.val.AuxInt32()) + // case OpDiv8: + // if !isDivideByZero(argLt2.val.AuxInt8()) { + // res.val = newConst(argLt1.val.AuxInt8() / argLt2.val.AuxInt8()) + // } + // ... + // } + // + // However, this would create a huge switch for all opcodes that can be + // evaluated during compile time. Moreover, some operations can be evaluated + // only if its arguments satisfy additional conditions(e.g. divide by zero). + // It's fragile and error prone. We did a trick by reusing the existing rules + // in generic rules for compile-time evaluation. But generic rules rewrite + // original value, this behavior is undesired, because the lattice of values + // may change multiple times, once it was rewritten, we lose the opportunity + // to change it permanently, which can lead to errors. For example, We cannot + // change its value immediately after visiting Phi, because some of its input + // edges may still not be visited at this moment. + constValue := f.newValue(val.Op, val.Type, f.Entry, val.Pos) + constValue.AddArgs(args...) + matched := rewriteValuegeneric(constValue) + if matched { + if isConst(constValue) { + return lattice{constant, constValue} + } + } + // Either we can not match generic rules for given value or it does not + // satisfy additional constraints(e.g. divide by zero), in these cases, clean + // up temporary value immediately in case they are not dominated by their args. + constValue.reset(OpInvalid) + return lattice{bottom, nil} +} + +func (t *worklist) visitValue(val *Value) { + if !possibleConst(val) { + // fast fail for always worst Values, i.e. there is no lowering happen + // on them, their lattices must be initially worse Bottom. + return + } + + oldLt := t.getLatticeCell(val) + defer func() { + // re-visit all uses of value if its lattice is changed + newLt := t.getLatticeCell(val) + if !equals(newLt, oldLt) { + if int8(oldLt.tag) > int8(newLt.tag) { + t.f.Fatalf("Must lower lattice\n") + } + t.addUses(val) + } + }() + + switch val.Op { + // they are constant values, aren't they? + case OpConst64, OpConst32, OpConst16, OpConst8, + OpConstBool, OpConst32F, OpConst64F: //TODO: support ConstNil ConstString etc + t.latticeCells[val] = lattice{constant, val} + // lattice value of copy(x) actually means lattice value of (x) + case OpCopy: + t.latticeCells[val] = t.getLatticeCell(val.Args[0]) + // phi should be processed specially + case OpPhi: + t.latticeCells[val] = t.meet(val) + // fold 1-input operations: + case + // negate + OpNeg8, OpNeg16, OpNeg32, OpNeg64, OpNeg32F, OpNeg64F, + OpCom8, OpCom16, OpCom32, OpCom64, + // math + OpFloor, OpCeil, OpTrunc, OpRoundToEven, OpSqrt, + // conversion + OpTrunc16to8, OpTrunc32to8, OpTrunc32to16, OpTrunc64to8, + OpTrunc64to16, OpTrunc64to32, OpCvt32to32F, OpCvt32to64F, + OpCvt64to32F, OpCvt64to64F, OpCvt32Fto32, OpCvt32Fto64, + OpCvt64Fto32, OpCvt64Fto64, OpCvt32Fto64F, OpCvt64Fto32F, + OpCvtBoolToUint8, + OpZeroExt8to16, OpZeroExt8to32, OpZeroExt8to64, OpZeroExt16to32, + OpZeroExt16to64, OpZeroExt32to64, OpSignExt8to16, OpSignExt8to32, + OpSignExt8to64, OpSignExt16to32, OpSignExt16to64, OpSignExt32to64, + // bit + OpCtz8, OpCtz16, OpCtz32, OpCtz64, + // mask + OpSlicemask, + // safety check + OpIsNonNil, + // not + OpNot: + lt1 := t.getLatticeCell(val.Args[0]) + + if lt1.tag == constant { + // here we take a shortcut by reusing generic rules to fold constants + t.latticeCells[val] = computeLattice(t.f, val, lt1.val) + } else { + t.latticeCells[val] = lattice{lt1.tag, nil} + } + // fold 2-input operations + case + // add + OpAdd64, OpAdd32, OpAdd16, OpAdd8, + OpAdd32F, OpAdd64F, + // sub + OpSub64, OpSub32, OpSub16, OpSub8, + OpSub32F, OpSub64F, + // mul + OpMul64, OpMul32, OpMul16, OpMul8, + OpMul32F, OpMul64F, + // div + OpDiv32F, OpDiv64F, + OpDiv8, OpDiv16, OpDiv32, OpDiv64, + OpDiv8u, OpDiv16u, OpDiv32u, OpDiv64u, //TODO: support div128u + // mod + OpMod8, OpMod16, OpMod32, OpMod64, + OpMod8u, OpMod16u, OpMod32u, OpMod64u, + // compare + OpEq64, OpEq32, OpEq16, OpEq8, + OpEq32F, OpEq64F, + OpLess64, OpLess32, OpLess16, OpLess8, + OpLess64U, OpLess32U, OpLess16U, OpLess8U, + OpLess32F, OpLess64F, + OpLeq64, OpLeq32, OpLeq16, OpLeq8, + OpLeq64U, OpLeq32U, OpLeq16U, OpLeq8U, + OpLeq32F, OpLeq64F, + OpEqB, OpNeqB, + // shift + OpLsh64x64, OpRsh64x64, OpRsh64Ux64, OpLsh32x64, + OpRsh32x64, OpRsh32Ux64, OpLsh16x64, OpRsh16x64, + OpRsh16Ux64, OpLsh8x64, OpRsh8x64, OpRsh8Ux64, + // safety check + OpIsInBounds, OpIsSliceInBounds, + // bit + OpAnd8, OpAnd16, OpAnd32, OpAnd64, + OpOr8, OpOr16, OpOr32, OpOr64, + OpXor8, OpXor16, OpXor32, OpXor64: + lt1 := t.getLatticeCell(val.Args[0]) + lt2 := t.getLatticeCell(val.Args[1]) + + if lt1.tag == constant && lt2.tag == constant { + // here we take a shortcut by reusing generic rules to fold constants + t.latticeCells[val] = computeLattice(t.f, val, lt1.val, lt2.val) + } else { + if lt1.tag == bottom || lt2.tag == bottom { + t.latticeCells[val] = lattice{bottom, nil} + } else { + t.latticeCells[val] = lattice{top, nil} + } + } + default: + // Any other type of value cannot be a constant, they are always worst(Bottom) + } +} + +// propagate propagates constants facts through CFG. If the block has single successor, +// add the successor anyway. If the block has multiple successors, only add the +// branch destination corresponding to lattice value of condition value. +func (t *worklist) propagate(block *Block) { + switch block.Kind { + case BlockExit, BlockRet, BlockRetJmp, BlockInvalid: + // control flow ends, do nothing then + break + case BlockDefer: + // we know nothing about control flow, add all branch destinations + t.edges = append(t.edges, block.Succs...) + case BlockFirst: + fallthrough // always takes the first branch + case BlockPlain: + t.edges = append(t.edges, block.Succs[0]) + case BlockIf, BlockJumpTable: + cond := block.ControlValues()[0] + condLattice := t.getLatticeCell(cond) + if condLattice.tag == bottom { + // we know nothing about control flow, add all branch destinations + t.edges = append(t.edges, block.Succs...) + } else if condLattice.tag == constant { + // add branchIdx destinations depends on its condition + var branchIdx int64 + if block.Kind == BlockIf { + branchIdx = 1 - condLattice.val.AuxInt + } else { + branchIdx = condLattice.val.AuxInt + } + t.edges = append(t.edges, block.Succs[branchIdx]) + } else { + // condition value is not visited yet, don't propagate it now + } + default: + t.f.Fatalf("All kind of block should be processed above.") + } +} + +// rewireSuccessor rewires corresponding successors according to constant value +// discovered by previous analysis. As the result, some successors become unreachable +// and thus can be removed in further deadcode phase +func rewireSuccessor(block *Block, constVal *Value) bool { + switch block.Kind { + case BlockIf: + block.removeEdge(int(constVal.AuxInt)) + block.Kind = BlockPlain + block.Likely = BranchUnknown + block.ResetControls() + return true + case BlockJumpTable: + // Remove everything but the known taken branch. + idx := int(constVal.AuxInt) + if idx < 0 || idx >= len(block.Succs) { + // This can only happen in unreachable code, + // as an invariant of jump tables is that their + // input index is in range. + // See issue 64826. + return false + } + block.swapSuccessorsByIdx(0, idx) + for len(block.Succs) > 1 { + block.removeEdge(1) + } + block.Kind = BlockPlain + block.Likely = BranchUnknown + block.ResetControls() + return true + default: + return false + } +} + +// replaceConst will replace non-constant values that have been proven by sccp +// to be constants. +func (t *worklist) replaceConst() (int, int) { + constCnt, rewireCnt := 0, 0 + for val, lt := range t.latticeCells { + if lt.tag == constant { + if !isConst(val) { + if t.f.pass.debug > 0 { + fmt.Printf("Replace %v with %v\n", val.LongString(), lt.val.LongString()) + } + val.reset(lt.val.Op) + val.AuxInt = lt.val.AuxInt + constCnt++ + } + // If const value controls this block, rewires successors according to its value + ctrlBlock := t.defBlock[val] + for _, block := range ctrlBlock { + if rewireSuccessor(block, lt.val) { + rewireCnt++ + if t.f.pass.debug > 0 { + fmt.Printf("Rewire %v %v successors\n", block.Kind, block) + } + } + } + } + } + return constCnt, rewireCnt +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/sccp_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/sccp_test.go new file mode 100644 index 0000000000000000000000000000000000000000..70c23e752755a3e6e3a0747f6ed284b309416e83 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/sccp_test.go @@ -0,0 +1,95 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/types" + "strings" + "testing" +) + +func TestSCCPBasic(t *testing.T) { + c := testConfig(t) + fun := c.Fun("b1", + Bloc("b1", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("v1", OpConst64, c.config.Types.Int64, 20, nil), + Valu("v2", OpConst64, c.config.Types.Int64, 21, nil), + Valu("v3", OpConst64F, c.config.Types.Float64, 21.0, nil), + Valu("v4", OpConstBool, c.config.Types.Bool, 1, nil), + Valu("t1", OpAdd64, c.config.Types.Int64, 0, nil, "v1", "v2"), + Valu("t2", OpDiv64, c.config.Types.Int64, 0, nil, "t1", "v1"), + Valu("t3", OpAdd64, c.config.Types.Int64, 0, nil, "t1", "t2"), + Valu("t4", OpSub64, c.config.Types.Int64, 0, nil, "t3", "v2"), + Valu("t5", OpMul64, c.config.Types.Int64, 0, nil, "t4", "v2"), + Valu("t6", OpMod64, c.config.Types.Int64, 0, nil, "t5", "v2"), + Valu("t7", OpAnd64, c.config.Types.Int64, 0, nil, "t6", "v2"), + Valu("t8", OpOr64, c.config.Types.Int64, 0, nil, "t7", "v2"), + Valu("t9", OpXor64, c.config.Types.Int64, 0, nil, "t8", "v2"), + Valu("t10", OpNeg64, c.config.Types.Int64, 0, nil, "t9"), + Valu("t11", OpCom64, c.config.Types.Int64, 0, nil, "t10"), + Valu("t12", OpNeg64, c.config.Types.Int64, 0, nil, "t11"), + Valu("t13", OpFloor, c.config.Types.Float64, 0, nil, "v3"), + Valu("t14", OpSqrt, c.config.Types.Float64, 0, nil, "t13"), + Valu("t15", OpCeil, c.config.Types.Float64, 0, nil, "t14"), + Valu("t16", OpTrunc, c.config.Types.Float64, 0, nil, "t15"), + Valu("t17", OpRoundToEven, c.config.Types.Float64, 0, nil, "t16"), + Valu("t18", OpTrunc64to32, c.config.Types.Int64, 0, nil, "t12"), + Valu("t19", OpCvt64Fto64, c.config.Types.Float64, 0, nil, "t17"), + Valu("t20", OpCtz64, c.config.Types.Int64, 0, nil, "v2"), + Valu("t21", OpSlicemask, c.config.Types.Int64, 0, nil, "t20"), + Valu("t22", OpIsNonNil, c.config.Types.Int64, 0, nil, "v2"), + Valu("t23", OpNot, c.config.Types.Bool, 0, nil, "v4"), + Valu("t24", OpEq64, c.config.Types.Bool, 0, nil, "v1", "v2"), + Valu("t25", OpLess64, c.config.Types.Bool, 0, nil, "v1", "v2"), + Valu("t26", OpLeq64, c.config.Types.Bool, 0, nil, "v1", "v2"), + Valu("t27", OpEqB, c.config.Types.Bool, 0, nil, "v4", "v4"), + Valu("t28", OpLsh64x64, c.config.Types.Int64, 0, nil, "v2", "v1"), + Valu("t29", OpIsInBounds, c.config.Types.Int64, 0, nil, "v2", "v1"), + Valu("t30", OpIsSliceInBounds, c.config.Types.Int64, 0, nil, "v2", "v1"), + Goto("b2")), + Bloc("b2", + Exit("mem"))) + sccp(fun.f) + CheckFunc(fun.f) + for name, value := range fun.values { + if strings.HasPrefix(name, "t") { + if !isConst(value) { + t.Errorf("Must be constant: %v", value.LongString()) + } + } + } +} + +func TestSCCPIf(t *testing.T) { + c := testConfig(t) + fun := c.Fun("b1", + Bloc("b1", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("v1", OpConst64, c.config.Types.Int64, 0, nil), + Valu("v2", OpConst64, c.config.Types.Int64, 1, nil), + Valu("cmp", OpLess64, c.config.Types.Bool, 0, nil, "v1", "v2"), + If("cmp", "b2", "b3")), + Bloc("b2", + Valu("v3", OpConst64, c.config.Types.Int64, 3, nil), + Goto("b4")), + Bloc("b3", + Valu("v4", OpConst64, c.config.Types.Int64, 4, nil), + Goto("b4")), + Bloc("b4", + Valu("merge", OpPhi, c.config.Types.Int64, 0, nil, "v3", "v4"), + Exit("mem"))) + sccp(fun.f) + CheckFunc(fun.f) + for _, b := range fun.blocks { + for _, v := range b.Values { + if v == fun.values["merge"] { + if !isConst(v) { + t.Errorf("Must be constant: %v", v.LongString()) + } + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/schedule.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/schedule.go new file mode 100644 index 0000000000000000000000000000000000000000..fb38f40d63ab11507e2d52540bed5b274e7825a6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/schedule.go @@ -0,0 +1,575 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/types" + "container/heap" + "sort" +) + +const ( + ScorePhi = iota // towards top of block + ScoreArg // must occur at the top of the entry block + ScoreInitMem // after the args - used as mark by debug info generation + ScoreReadTuple // must occur immediately after tuple-generating insn (or call) + ScoreNilCheck + ScoreMemory + ScoreReadFlags + ScoreDefault + ScoreFlags + ScoreControl // towards bottom of block +) + +type ValHeap struct { + a []*Value + score []int8 + inBlockUses []bool +} + +func (h ValHeap) Len() int { return len(h.a) } +func (h ValHeap) Swap(i, j int) { a := h.a; a[i], a[j] = a[j], a[i] } + +func (h *ValHeap) Push(x interface{}) { + // Push and Pop use pointer receivers because they modify the slice's length, + // not just its contents. + v := x.(*Value) + h.a = append(h.a, v) +} +func (h *ValHeap) Pop() interface{} { + old := h.a + n := len(old) + x := old[n-1] + h.a = old[0 : n-1] + return x +} +func (h ValHeap) Less(i, j int) bool { + x := h.a[i] + y := h.a[j] + sx := h.score[x.ID] + sy := h.score[y.ID] + if c := sx - sy; c != 0 { + return c < 0 // lower scores come earlier. + } + // Note: only scores are required for correct scheduling. + // Everything else is just heuristics. + + ix := h.inBlockUses[x.ID] + iy := h.inBlockUses[y.ID] + if ix != iy { + return ix // values with in-block uses come earlier + } + + if x.Pos != y.Pos { // Favor in-order line stepping + return x.Pos.Before(y.Pos) + } + if x.Op != OpPhi { + if c := len(x.Args) - len(y.Args); c != 0 { + return c > 0 // smaller args come later + } + } + if c := x.Uses - y.Uses; c != 0 { + return c > 0 // smaller uses come later + } + // These comparisons are fairly arbitrary. + // The goal here is stability in the face + // of unrelated changes elsewhere in the compiler. + if c := x.AuxInt - y.AuxInt; c != 0 { + return c < 0 + } + if cmp := x.Type.Compare(y.Type); cmp != types.CMPeq { + return cmp == types.CMPlt + } + return x.ID < y.ID +} + +func (op Op) isLoweredGetClosurePtr() bool { + switch op { + case OpAMD64LoweredGetClosurePtr, OpPPC64LoweredGetClosurePtr, OpARMLoweredGetClosurePtr, OpARM64LoweredGetClosurePtr, + Op386LoweredGetClosurePtr, OpMIPS64LoweredGetClosurePtr, OpLOONG64LoweredGetClosurePtr, OpS390XLoweredGetClosurePtr, OpMIPSLoweredGetClosurePtr, + OpRISCV64LoweredGetClosurePtr, OpWasmLoweredGetClosurePtr: + return true + } + return false +} + +// Schedule the Values in each Block. After this phase returns, the +// order of b.Values matters and is the order in which those values +// will appear in the assembly output. For now it generates a +// reasonable valid schedule using a priority queue. TODO(khr): +// schedule smarter. +func schedule(f *Func) { + // reusable priority queue + priq := new(ValHeap) + + // "priority" for a value + score := f.Cache.allocInt8Slice(f.NumValues()) + defer f.Cache.freeInt8Slice(score) + + // maps mem values to the next live memory value + nextMem := f.Cache.allocValueSlice(f.NumValues()) + defer f.Cache.freeValueSlice(nextMem) + + // inBlockUses records whether a value is used in the block + // in which it lives. (block control values don't count as uses.) + inBlockUses := f.Cache.allocBoolSlice(f.NumValues()) + defer f.Cache.freeBoolSlice(inBlockUses) + if f.Config.optimize { + for _, b := range f.Blocks { + for _, v := range b.Values { + for _, a := range v.Args { + if a.Block == b { + inBlockUses[a.ID] = true + } + } + } + } + } + priq.inBlockUses = inBlockUses + + for _, b := range f.Blocks { + // Compute score. Larger numbers are scheduled closer to the end of the block. + for _, v := range b.Values { + switch { + case v.Op.isLoweredGetClosurePtr(): + // We also score GetLoweredClosurePtr as early as possible to ensure that the + // context register is not stomped. GetLoweredClosurePtr should only appear + // in the entry block where there are no phi functions, so there is no + // conflict or ambiguity here. + if b != f.Entry { + f.Fatalf("LoweredGetClosurePtr appeared outside of entry block, b=%s", b.String()) + } + score[v.ID] = ScorePhi + case opcodeTable[v.Op].nilCheck: + // Nil checks must come before loads from the same address. + score[v.ID] = ScoreNilCheck + case v.Op == OpPhi: + // We want all the phis first. + score[v.ID] = ScorePhi + case v.Op == OpArgIntReg || v.Op == OpArgFloatReg: + // In-register args must be scheduled as early as possible to ensure that they + // are not stomped (similar to the closure pointer above). + // In particular, they need to come before regular OpArg operations because + // of how regalloc places spill code (see regalloc.go:placeSpills:mustBeFirst). + if b != f.Entry { + f.Fatalf("%s appeared outside of entry block, b=%s", v.Op, b.String()) + } + score[v.ID] = ScorePhi + case v.Op == OpArg || v.Op == OpSP || v.Op == OpSB: + // We want all the args as early as possible, for better debugging. + score[v.ID] = ScoreArg + case v.Op == OpInitMem: + // Early, but after args. See debug.go:buildLocationLists + score[v.ID] = ScoreInitMem + case v.Type.IsMemory(): + // Schedule stores as early as possible. This tends to + // reduce register pressure. + score[v.ID] = ScoreMemory + case v.Op == OpSelect0 || v.Op == OpSelect1 || v.Op == OpSelectN: + // Tuple selectors need to appear immediately after the instruction + // that generates the tuple. + score[v.ID] = ScoreReadTuple + case v.hasFlagInput(): + // Schedule flag-reading ops earlier, to minimize the lifetime + // of flag values. + score[v.ID] = ScoreReadFlags + case v.isFlagOp(): + // Schedule flag register generation as late as possible. + // This makes sure that we only have one live flags + // value at a time. + // Note that this case is after the case above, so values + // which both read and generate flags are given ScoreReadFlags. + score[v.ID] = ScoreFlags + default: + score[v.ID] = ScoreDefault + // If we're reading flags, schedule earlier to keep flag lifetime short. + for _, a := range v.Args { + if a.isFlagOp() { + score[v.ID] = ScoreReadFlags + } + } + } + } + for _, c := range b.ControlValues() { + // Force the control values to be scheduled at the end, + // unless they have other special priority. + if c.Block != b || score[c.ID] < ScoreReadTuple { + continue + } + if score[c.ID] == ScoreReadTuple { + score[c.Args[0].ID] = ScoreControl + continue + } + score[c.ID] = ScoreControl + } + } + priq.score = score + + // An edge represents a scheduling constraint that x must appear before y in the schedule. + type edge struct { + x, y *Value + } + edges := make([]edge, 0, 64) + + // inEdges is the number of scheduling edges incoming from values that haven't been scheduled yet. + // i.e. inEdges[y.ID] = |e in edges where e.y == y and e.x is not in the schedule yet|. + inEdges := f.Cache.allocInt32Slice(f.NumValues()) + defer f.Cache.freeInt32Slice(inEdges) + + for _, b := range f.Blocks { + edges = edges[:0] + // Standard edges: from the argument of a value to that value. + for _, v := range b.Values { + if v.Op == OpPhi { + // If a value is used by a phi, it does not induce + // a scheduling edge because that use is from the + // previous iteration. + continue + } + for _, a := range v.Args { + if a.Block == b { + edges = append(edges, edge{a, v}) + } + } + } + + // Find store chain for block. + // Store chains for different blocks overwrite each other, so + // the calculated store chain is good only for this block. + for _, v := range b.Values { + if v.Op != OpPhi && v.Op != OpInitMem && v.Type.IsMemory() { + nextMem[v.MemoryArg().ID] = v + } + } + + // Add edges to enforce that any load must come before the following store. + for _, v := range b.Values { + if v.Op == OpPhi || v.Type.IsMemory() { + continue + } + w := v.MemoryArg() + if w == nil { + continue + } + if s := nextMem[w.ID]; s != nil && s.Block == b { + edges = append(edges, edge{v, s}) + } + } + + // Sort all the edges by source Value ID. + sort.Slice(edges, func(i, j int) bool { + return edges[i].x.ID < edges[j].x.ID + }) + // Compute inEdges for values in this block. + for _, e := range edges { + inEdges[e.y.ID]++ + } + + // Initialize priority queue with schedulable values. + priq.a = priq.a[:0] + for _, v := range b.Values { + if inEdges[v.ID] == 0 { + heap.Push(priq, v) + } + } + + // Produce the schedule. Pick the highest priority scheduleable value, + // add it to the schedule, add any of its uses that are now scheduleable + // to the queue, and repeat. + nv := len(b.Values) + b.Values = b.Values[:0] + for priq.Len() > 0 { + // Schedule the next schedulable value in priority order. + v := heap.Pop(priq).(*Value) + b.Values = append(b.Values, v) + + // Find all the scheduling edges out from this value. + i := sort.Search(len(edges), func(i int) bool { + return edges[i].x.ID >= v.ID + }) + j := sort.Search(len(edges), func(i int) bool { + return edges[i].x.ID > v.ID + }) + // Decrement inEdges for each target of edges from v. + for _, e := range edges[i:j] { + inEdges[e.y.ID]-- + if inEdges[e.y.ID] == 0 { + heap.Push(priq, e.y) + } + } + } + if len(b.Values) != nv { + f.Fatalf("schedule does not include all values in block %s", b) + } + } + + // Remove SPanchored now that we've scheduled. + // Also unlink nil checks now that ordering is assured + // between the nil check and the uses of the nil-checked pointer. + for _, b := range f.Blocks { + for _, v := range b.Values { + for i, a := range v.Args { + if a.Op == OpSPanchored || opcodeTable[a.Op].nilCheck { + v.SetArg(i, a.Args[0]) + } + } + } + for i, c := range b.ControlValues() { + if c.Op == OpSPanchored || opcodeTable[c.Op].nilCheck { + b.ReplaceControl(i, c.Args[0]) + } + } + } + for _, b := range f.Blocks { + i := 0 + for _, v := range b.Values { + if v.Op == OpSPanchored { + // Free this value + if v.Uses != 0 { + base.Fatalf("SPAnchored still has %d uses", v.Uses) + } + v.resetArgs() + f.freeValue(v) + } else { + if opcodeTable[v.Op].nilCheck { + if v.Uses != 0 { + base.Fatalf("nilcheck still has %d uses", v.Uses) + } + // We can't delete the nil check, but we mark + // it as having void type so regalloc won't + // try to allocate a register for it. + v.Type = types.TypeVoid + } + b.Values[i] = v + i++ + } + } + b.truncateValues(i) + } + + f.scheduled = true +} + +// storeOrder orders values with respect to stores. That is, +// if v transitively depends on store s, v is ordered after s, +// otherwise v is ordered before s. +// Specifically, values are ordered like +// +// store1 +// NilCheck that depends on store1 +// other values that depends on store1 +// store2 +// NilCheck that depends on store2 +// other values that depends on store2 +// ... +// +// The order of non-store and non-NilCheck values are undefined +// (not necessarily dependency order). This should be cheaper +// than a full scheduling as done above. +// Note that simple dependency order won't work: there is no +// dependency between NilChecks and values like IsNonNil. +// Auxiliary data structures are passed in as arguments, so +// that they can be allocated in the caller and be reused. +// This function takes care of reset them. +func storeOrder(values []*Value, sset *sparseSet, storeNumber []int32) []*Value { + if len(values) == 0 { + return values + } + + f := values[0].Block.Func + + // find all stores + + // Members of values that are store values. + // A constant bound allows this to be stack-allocated. 64 is + // enough to cover almost every storeOrder call. + stores := make([]*Value, 0, 64) + hasNilCheck := false + sset.clear() // sset is the set of stores that are used in other values + for _, v := range values { + if v.Type.IsMemory() { + stores = append(stores, v) + if v.Op == OpInitMem || v.Op == OpPhi { + continue + } + sset.add(v.MemoryArg().ID) // record that v's memory arg is used + } + if v.Op == OpNilCheck { + hasNilCheck = true + } + } + if len(stores) == 0 || !hasNilCheck && f.pass.name == "nilcheckelim" { + // there is no store, the order does not matter + return values + } + + // find last store, which is the one that is not used by other stores + var last *Value + for _, v := range stores { + if !sset.contains(v.ID) { + if last != nil { + f.Fatalf("two stores live simultaneously: %v and %v", v, last) + } + last = v + } + } + + // We assign a store number to each value. Store number is the + // index of the latest store that this value transitively depends. + // The i-th store in the current block gets store number 3*i. A nil + // check that depends on the i-th store gets store number 3*i+1. + // Other values that depends on the i-th store gets store number 3*i+2. + // Special case: 0 -- unassigned, 1 or 2 -- the latest store it depends + // is in the previous block (or no store at all, e.g. value is Const). + // First we assign the number to all stores by walking back the store chain, + // then assign the number to other values in DFS order. + count := make([]int32, 3*(len(stores)+1)) + sset.clear() // reuse sparse set to ensure that a value is pushed to stack only once + for n, w := len(stores), last; n > 0; n-- { + storeNumber[w.ID] = int32(3 * n) + count[3*n]++ + sset.add(w.ID) + if w.Op == OpInitMem || w.Op == OpPhi { + if n != 1 { + f.Fatalf("store order is wrong: there are stores before %v", w) + } + break + } + w = w.MemoryArg() + } + var stack []*Value + for _, v := range values { + if sset.contains(v.ID) { + // in sset means v is a store, or already pushed to stack, or already assigned a store number + continue + } + stack = append(stack, v) + sset.add(v.ID) + + for len(stack) > 0 { + w := stack[len(stack)-1] + if storeNumber[w.ID] != 0 { + stack = stack[:len(stack)-1] + continue + } + if w.Op == OpPhi { + // Phi value doesn't depend on store in the current block. + // Do this early to avoid dependency cycle. + storeNumber[w.ID] = 2 + count[2]++ + stack = stack[:len(stack)-1] + continue + } + + max := int32(0) // latest store dependency + argsdone := true + for _, a := range w.Args { + if a.Block != w.Block { + continue + } + if !sset.contains(a.ID) { + stack = append(stack, a) + sset.add(a.ID) + argsdone = false + break + } + if storeNumber[a.ID]/3 > max { + max = storeNumber[a.ID] / 3 + } + } + if !argsdone { + continue + } + + n := 3*max + 2 + if w.Op == OpNilCheck { + n = 3*max + 1 + } + storeNumber[w.ID] = n + count[n]++ + stack = stack[:len(stack)-1] + } + } + + // convert count to prefix sum of counts: count'[i] = sum_{j<=i} count[i] + for i := range count { + if i == 0 { + continue + } + count[i] += count[i-1] + } + if count[len(count)-1] != int32(len(values)) { + f.Fatalf("storeOrder: value is missing, total count = %d, values = %v", count[len(count)-1], values) + } + + // place values in count-indexed bins, which are in the desired store order + order := make([]*Value, len(values)) + for _, v := range values { + s := storeNumber[v.ID] + order[count[s-1]] = v + count[s-1]++ + } + + // Order nil checks in source order. We want the first in source order to trigger. + // If two are on the same line, we don't really care which happens first. + // See issue 18169. + if hasNilCheck { + start := -1 + for i, v := range order { + if v.Op == OpNilCheck { + if start == -1 { + start = i + } + } else { + if start != -1 { + sort.Sort(bySourcePos(order[start:i])) + start = -1 + } + } + } + if start != -1 { + sort.Sort(bySourcePos(order[start:])) + } + } + + return order +} + +// isFlagOp reports if v is an OP with the flag type. +func (v *Value) isFlagOp() bool { + if v.Type.IsFlags() || v.Type.IsTuple() && v.Type.FieldType(1).IsFlags() { + return true + } + // PPC64 carry generators put their carry in a non-flag-typed register + // in their output. + switch v.Op { + case OpPPC64SUBC, OpPPC64ADDC, OpPPC64SUBCconst, OpPPC64ADDCconst: + return true + } + return false +} + +// hasFlagInput reports whether v has a flag value as any of its inputs. +func (v *Value) hasFlagInput() bool { + for _, a := range v.Args { + if a.isFlagOp() { + return true + } + } + // PPC64 carry dependencies are conveyed through their final argument, + // so we treat those operations as taking flags as well. + switch v.Op { + case OpPPC64SUBE, OpPPC64ADDE, OpPPC64SUBZEzero, OpPPC64ADDZEzero: + return true + } + return false +} + +type bySourcePos []*Value + +func (s bySourcePos) Len() int { return len(s) } +func (s bySourcePos) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s bySourcePos) Less(i, j int) bool { return s[i].Pos.Before(s[j].Pos) } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/schedule_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/schedule_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6cf5105be1f44ef9d77ed05376be17769893c484 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/schedule_test.go @@ -0,0 +1,160 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/types" + "testing" +) + +func TestSchedule(t *testing.T) { + c := testConfig(t) + cases := []fun{ + c.Fun("entry", + Bloc("entry", + Valu("mem0", OpInitMem, types.TypeMem, 0, nil), + Valu("ptr", OpConst64, c.config.Types.Int64, 0xABCD, nil), + Valu("v", OpConst64, c.config.Types.Int64, 12, nil), + Valu("mem1", OpStore, types.TypeMem, 0, c.config.Types.Int64, "ptr", "v", "mem0"), + Valu("mem2", OpStore, types.TypeMem, 0, c.config.Types.Int64, "ptr", "v", "mem1"), + Valu("mem3", OpStore, types.TypeMem, 0, c.config.Types.Int64, "ptr", "sum", "mem2"), + Valu("l1", OpLoad, c.config.Types.Int64, 0, nil, "ptr", "mem1"), + Valu("l2", OpLoad, c.config.Types.Int64, 0, nil, "ptr", "mem2"), + Valu("sum", OpAdd64, c.config.Types.Int64, 0, nil, "l1", "l2"), + Goto("exit")), + Bloc("exit", + Exit("mem3"))), + } + for _, c := range cases { + schedule(c.f) + if !isSingleLiveMem(c.f) { + t.Error("single-live-mem restriction not enforced by schedule for func:") + printFunc(c.f) + } + } +} + +func isSingleLiveMem(f *Func) bool { + for _, b := range f.Blocks { + var liveMem *Value + for _, v := range b.Values { + for _, w := range v.Args { + if w.Type.IsMemory() { + if liveMem == nil { + liveMem = w + continue + } + if w != liveMem { + return false + } + } + } + if v.Type.IsMemory() { + liveMem = v + } + } + } + return true +} + +func TestStoreOrder(t *testing.T) { + // In the function below, v2 depends on v3 and v4, v4 depends on v3, and v3 depends on store v5. + // storeOrder did not handle this case correctly. + c := testConfig(t) + fun := c.Fun("entry", + Bloc("entry", + Valu("mem0", OpInitMem, types.TypeMem, 0, nil), + Valu("a", OpAdd64, c.config.Types.Int64, 0, nil, "b", "c"), // v2 + Valu("b", OpLoad, c.config.Types.Int64, 0, nil, "ptr", "mem1"), // v3 + Valu("c", OpNeg64, c.config.Types.Int64, 0, nil, "b"), // v4 + Valu("mem1", OpStore, types.TypeMem, 0, c.config.Types.Int64, "ptr", "v", "mem0"), // v5 + Valu("mem2", OpStore, types.TypeMem, 0, c.config.Types.Int64, "ptr", "a", "mem1"), + Valu("ptr", OpConst64, c.config.Types.Int64, 0xABCD, nil), + Valu("v", OpConst64, c.config.Types.Int64, 12, nil), + Goto("exit")), + Bloc("exit", + Exit("mem2"))) + + CheckFunc(fun.f) + order := storeOrder(fun.f.Blocks[0].Values, fun.f.newSparseSet(fun.f.NumValues()), make([]int32, fun.f.NumValues())) + + // check that v2, v3, v4 is sorted after v5 + var ai, bi, ci, si int + for i, v := range order { + switch v.ID { + case 2: + ai = i + case 3: + bi = i + case 4: + ci = i + case 5: + si = i + } + } + if ai < si || bi < si || ci < si { + t.Logf("Func: %s", fun.f) + t.Errorf("store order is wrong: got %v, want v2 v3 v4 after v5", order) + } +} + +func TestCarryChainOrder(t *testing.T) { + // In the function below, there are two carry chains that have no dependencies on each other, + // one is A1 -> A1carry -> A1Carryvalue, the other is A2 -> A2carry -> A2Carryvalue. If they + // are not scheduled properly, the carry will be clobbered, causing the carry to be regenerated. + c := testConfigARM64(t) + fun := c.Fun("entry", + Bloc("entry", + Valu("mem0", OpInitMem, types.TypeMem, 0, nil), + Valu("x", OpARM64MOVDconst, c.config.Types.UInt64, 5, nil), + Valu("y", OpARM64MOVDconst, c.config.Types.UInt64, 6, nil), + Valu("z", OpARM64MOVDconst, c.config.Types.UInt64, 7, nil), + Valu("A1", OpARM64ADDSflags, types.NewTuple(c.config.Types.UInt64, types.TypeFlags), 0, nil, "x", "z"), // x+z, set flags + Valu("A1carry", OpSelect1, types.TypeFlags, 0, nil, "A1"), + Valu("A2", OpARM64ADDSflags, types.NewTuple(c.config.Types.UInt64, types.TypeFlags), 0, nil, "y", "z"), // y+z, set flags + Valu("A2carry", OpSelect1, types.TypeFlags, 0, nil, "A2"), + Valu("A1value", OpSelect0, c.config.Types.UInt64, 0, nil, "A1"), + Valu("A1Carryvalue", OpARM64ADCzerocarry, c.config.Types.UInt64, 0, nil, "A1carry"), // 0+0+A1carry + Valu("A2value", OpSelect0, c.config.Types.UInt64, 0, nil, "A2"), + Valu("A2Carryvalue", OpARM64ADCzerocarry, c.config.Types.UInt64, 0, nil, "A2carry"), // 0+0+A2carry + Valu("ValueSum", OpARM64ADD, c.config.Types.UInt64, 0, nil, "A1value", "A2value"), + Valu("CarrySum", OpARM64ADD, c.config.Types.UInt64, 0, nil, "A1Carryvalue", "A2Carryvalue"), + Valu("Sum", OpARM64AND, c.config.Types.UInt64, 0, nil, "ValueSum", "CarrySum"), + Goto("exit")), + Bloc("exit", + Exit("mem0")), + ) + + CheckFunc(fun.f) + schedule(fun.f) + + // The expected order is A1 < A1carry < A1Carryvalue < A2 < A2carry < A2Carryvalue. + // There is no dependency between the two carry chains, so it doesn't matter which + // comes first and which comes after, but the unsorted position of A1 is before A2, + // so A1Carryvalue < A2. + var ai, bi, ci, di, ei, fi int + for i, v := range fun.f.Blocks[0].Values { + switch { + case fun.values["A1"] == v: + ai = i + case fun.values["A1carry"] == v: + bi = i + case fun.values["A1Carryvalue"] == v: + ci = i + case fun.values["A2"] == v: + di = i + case fun.values["A2carry"] == v: + ei = i + case fun.values["A2Carryvalue"] == v: + fi = i + } + } + if !(ai < bi && bi < ci && ci < di && di < ei && ei < fi) { + t.Logf("Func: %s", fun.f) + t.Errorf("carry chain order is wrong: got %v, want V%d after V%d after V%d after V%d after V%d after V%d,", + fun.f.Blocks[0], fun.values["A1"].ID, fun.values["A1carry"].ID, fun.values["A1Carryvalue"].ID, + fun.values["A2"].ID, fun.values["A2carry"].ID, fun.values["A2Carryvalue"].ID) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/shift_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/shift_test.go new file mode 100644 index 0000000000000000000000000000000000000000..06c2f6720ff7ff6f35ce57ef0165217516c9adff --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/shift_test.go @@ -0,0 +1,107 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/types" + "testing" +) + +func TestShiftConstAMD64(t *testing.T) { + c := testConfig(t) + fun := makeConstShiftFunc(c, 18, OpLsh64x64, c.config.Types.UInt64) + checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHLQconst: 1, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0}) + + fun = makeConstShiftFunc(c, 66, OpLsh64x64, c.config.Types.UInt64) + checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHLQconst: 0, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0}) + + fun = makeConstShiftFunc(c, 18, OpRsh64Ux64, c.config.Types.UInt64) + checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHRQconst: 1, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0}) + + fun = makeConstShiftFunc(c, 66, OpRsh64Ux64, c.config.Types.UInt64) + checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHRQconst: 0, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0}) + + fun = makeConstShiftFunc(c, 18, OpRsh64x64, c.config.Types.Int64) + checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SARQconst: 1, OpAMD64CMPQconst: 0}) + + fun = makeConstShiftFunc(c, 66, OpRsh64x64, c.config.Types.Int64) + checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SARQconst: 1, OpAMD64CMPQconst: 0}) +} + +func makeConstShiftFunc(c *Conf, amount int64, op Op, typ *types.Type) fun { + ptyp := c.config.Types.BytePtr + fun := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("SP", OpSP, c.config.Types.Uintptr, 0, nil), + Valu("argptr", OpOffPtr, ptyp, 8, nil, "SP"), + Valu("resptr", OpOffPtr, ptyp, 16, nil, "SP"), + Valu("load", OpLoad, typ, 0, nil, "argptr", "mem"), + Valu("c", OpConst64, c.config.Types.UInt64, amount, nil), + Valu("shift", op, typ, 0, nil, "load", "c"), + Valu("store", OpStore, types.TypeMem, 0, c.config.Types.UInt64, "resptr", "shift", "mem"), + Exit("store"))) + Compile(fun.f) + return fun +} + +func TestShiftToExtensionAMD64(t *testing.T) { + c := testConfig(t) + // Test that eligible pairs of constant shifts are converted to extensions. + // For example: + // (uint64(x) << 32) >> 32 -> uint64(uint32(x)) + ops := map[Op]int{ + OpAMD64SHLQconst: 0, OpAMD64SHLLconst: 0, + OpAMD64SHRQconst: 0, OpAMD64SHRLconst: 0, + OpAMD64SARQconst: 0, OpAMD64SARLconst: 0, + } + tests := [...]struct { + amount int64 + left, right Op + typ *types.Type + }{ + // unsigned + {56, OpLsh64x64, OpRsh64Ux64, c.config.Types.UInt64}, + {48, OpLsh64x64, OpRsh64Ux64, c.config.Types.UInt64}, + {32, OpLsh64x64, OpRsh64Ux64, c.config.Types.UInt64}, + {24, OpLsh32x64, OpRsh32Ux64, c.config.Types.UInt32}, + {16, OpLsh32x64, OpRsh32Ux64, c.config.Types.UInt32}, + {8, OpLsh16x64, OpRsh16Ux64, c.config.Types.UInt16}, + // signed + {56, OpLsh64x64, OpRsh64x64, c.config.Types.Int64}, + {48, OpLsh64x64, OpRsh64x64, c.config.Types.Int64}, + {32, OpLsh64x64, OpRsh64x64, c.config.Types.Int64}, + {24, OpLsh32x64, OpRsh32x64, c.config.Types.Int32}, + {16, OpLsh32x64, OpRsh32x64, c.config.Types.Int32}, + {8, OpLsh16x64, OpRsh16x64, c.config.Types.Int16}, + } + for _, tc := range tests { + fun := makeShiftExtensionFunc(c, tc.amount, tc.left, tc.right, tc.typ) + checkOpcodeCounts(t, fun.f, ops) + } +} + +// makeShiftExtensionFunc generates a function containing: +// +// (rshift (lshift (Const64 [amount])) (Const64 [amount])) +// +// This may be equivalent to a sign or zero extension. +func makeShiftExtensionFunc(c *Conf, amount int64, lshift, rshift Op, typ *types.Type) fun { + ptyp := c.config.Types.BytePtr + fun := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("SP", OpSP, c.config.Types.Uintptr, 0, nil), + Valu("argptr", OpOffPtr, ptyp, 8, nil, "SP"), + Valu("resptr", OpOffPtr, ptyp, 16, nil, "SP"), + Valu("load", OpLoad, typ, 0, nil, "argptr", "mem"), + Valu("c", OpConst64, c.config.Types.UInt64, amount, nil), + Valu("lshift", lshift, typ, 0, nil, "load", "c"), + Valu("rshift", rshift, typ, 0, nil, "lshift", "c"), + Valu("store", OpStore, types.TypeMem, 0, c.config.Types.UInt64, "resptr", "rshift", "mem"), + Exit("store"))) + Compile(fun.f) + return fun +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/shortcircuit.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/shortcircuit.go new file mode 100644 index 0000000000000000000000000000000000000000..d7d0b6fe3335b6218f1d0789fee680fb54e2906b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/shortcircuit.go @@ -0,0 +1,513 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// shortcircuit finds situations where branch directions +// are always correlated and rewrites the CFG to take +// advantage of that fact. +// This optimization is useful for compiling && and || expressions. +func shortcircuit(f *Func) { + // Step 1: Replace a phi arg with a constant if that arg + // is the control value of a preceding If block. + // b1: + // If a goto b2 else b3 + // b2: <- b1 ... + // x = phi(a, ...) + // + // We can replace the "a" in the phi with the constant true. + var ct, cf *Value + for _, b := range f.Blocks { + for _, v := range b.Values { + if v.Op != OpPhi { + continue + } + if !v.Type.IsBoolean() { + continue + } + for i, a := range v.Args { + e := b.Preds[i] + p := e.b + if p.Kind != BlockIf { + continue + } + if p.Controls[0] != a { + continue + } + if e.i == 0 { + if ct == nil { + ct = f.ConstBool(f.Config.Types.Bool, true) + } + v.SetArg(i, ct) + } else { + if cf == nil { + cf = f.ConstBool(f.Config.Types.Bool, false) + } + v.SetArg(i, cf) + } + } + } + } + + // Step 2: Redirect control flow around known branches. + // p: + // ... goto b ... + // b: <- p ... + // v = phi(true, ...) + // if v goto t else u + // We can redirect p to go directly to t instead of b. + // (If v is not live after b). + fuse(f, fuseTypePlain|fuseTypeShortCircuit) +} + +// shortcircuitBlock checks for a CFG in which an If block +// has as its control value a Phi that has a ConstBool arg. +// In some such cases, we can rewrite the CFG into a flatter form. +// +// (1) Look for a CFG of the form +// +// p other pred(s) +// \ / +// b +// / \ +// t other succ +// +// in which b is an If block containing a single phi value with a single use (b's Control), +// which has a ConstBool arg. +// p is the predecessor corresponding to the argument slot in which the ConstBool is found. +// t is the successor corresponding to the value of the ConstBool arg. +// +// Rewrite this into +// +// p other pred(s) +// | / +// | b +// |/ \ +// t u +// +// and remove the appropriate phi arg(s). +// +// (2) Look for a CFG of the form +// +// p q +// \ / +// b +// / \ +// t u +// +// in which b is as described in (1). +// However, b may also contain other phi values. +// The CFG will be modified as described in (1). +// However, in order to handle those other phi values, +// for each other phi value w, we must be able to eliminate w from b. +// We can do that though a combination of moving w to a different block +// and rewriting uses of w to use a different value instead. +// See shortcircuitPhiPlan for details. +func shortcircuitBlock(b *Block) bool { + if b.Kind != BlockIf { + return false + } + // Look for control values of the form Copy(Not(Copy(Phi(const, ...)))). + // Those must be the only values in the b, and they each must be used only by b. + // Track the negations so that we can swap successors as needed later. + ctl := b.Controls[0] + nval := 1 // the control value + var swap int64 + for ctl.Uses == 1 && ctl.Block == b && (ctl.Op == OpCopy || ctl.Op == OpNot) { + if ctl.Op == OpNot { + swap = 1 ^ swap + } + ctl = ctl.Args[0] + nval++ // wrapper around control value + } + if ctl.Op != OpPhi || ctl.Block != b || ctl.Uses != 1 { + return false + } + nOtherPhi := 0 + for _, w := range b.Values { + if w.Op == OpPhi && w != ctl { + nOtherPhi++ + } + } + if nOtherPhi > 0 && len(b.Preds) != 2 { + // We rely on b having exactly two preds in shortcircuitPhiPlan + // to reason about the values of phis. + return false + } + if len(b.Values) != nval+nOtherPhi { + return false + } + if nOtherPhi > 0 { + // Check for any phi which is the argument of another phi. + // These cases are tricky, as substitutions done by replaceUses + // are no longer trivial to do in any ordering. See issue 45175. + m := make(map[*Value]bool, 1+nOtherPhi) + for _, v := range b.Values { + if v.Op == OpPhi { + m[v] = true + } + } + for v := range m { + for _, a := range v.Args { + if a != v && m[a] { + return false + } + } + } + } + + // Locate index of first const phi arg. + cidx := -1 + for i, a := range ctl.Args { + if a.Op == OpConstBool { + cidx = i + break + } + } + if cidx == -1 { + return false + } + + // p is the predecessor corresponding to cidx. + pe := b.Preds[cidx] + p := pe.b + pi := pe.i + + // t is the "taken" branch: the successor we always go to when coming in from p. + ti := 1 ^ ctl.Args[cidx].AuxInt ^ swap + te := b.Succs[ti] + t := te.b + if p == b || t == b { + // This is an infinite loop; we can't remove it. See issue 33903. + return false + } + + var fixPhi func(*Value, int) + if nOtherPhi > 0 { + fixPhi = shortcircuitPhiPlan(b, ctl, cidx, ti) + if fixPhi == nil { + return false + } + } + + // We're committed. Update CFG and Phis. + // If you modify this section, update shortcircuitPhiPlan corresponding. + + // Remove b's incoming edge from p. + b.removePred(cidx) + b.removePhiArg(ctl, cidx) + + // Redirect p's outgoing edge to t. + p.Succs[pi] = Edge{t, len(t.Preds)} + + // Fix up t to have one more predecessor. + t.Preds = append(t.Preds, Edge{p, pi}) + for _, v := range t.Values { + if v.Op != OpPhi { + continue + } + v.AddArg(v.Args[te.i]) + } + + if nOtherPhi != 0 { + // Adjust all other phis as necessary. + // Use a plain for loop instead of range because fixPhi may move phis, + // thus modifying b.Values. + for i := 0; i < len(b.Values); i++ { + phi := b.Values[i] + if phi.Uses == 0 || phi == ctl || phi.Op != OpPhi { + continue + } + fixPhi(phi, i) + if phi.Block == b { + continue + } + // phi got moved to a different block with v.moveTo. + // Adjust phi values in this new block that refer + // to phi to refer to the corresponding phi arg instead. + // phi used to be evaluated prior to this block, + // and now it is evaluated in this block. + for _, v := range phi.Block.Values { + if v.Op != OpPhi || v == phi { + continue + } + for j, a := range v.Args { + if a == phi { + v.SetArg(j, phi.Args[j]) + } + } + } + if phi.Uses != 0 { + phielimValue(phi) + } else { + phi.reset(OpInvalid) + } + i-- // v.moveTo put a new value at index i; reprocess + } + + // We may have left behind some phi values with no uses + // but the wrong number of arguments. Eliminate those. + for _, v := range b.Values { + if v.Uses == 0 { + v.reset(OpInvalid) + } + } + } + + if len(b.Preds) == 0 { + // Block is now dead. + b.Kind = BlockInvalid + } + + phielimValue(ctl) + return true +} + +// shortcircuitPhiPlan returns a function to handle non-ctl phi values in b, +// where b is as described in shortcircuitBlock. +// The returned function accepts a value v +// and the index i of v in v.Block: v.Block.Values[i] == v. +// If the returned function moves v to a different block, it will use v.moveTo. +// cidx is the index in ctl of the ConstBool arg. +// ti is the index in b.Succs of the always taken branch when arriving from p. +// If shortcircuitPhiPlan returns nil, there is no plan available, +// and the CFG modifications must not proceed. +// The returned function assumes that shortcircuitBlock has completed its CFG modifications. +func shortcircuitPhiPlan(b *Block, ctl *Value, cidx int, ti int64) func(*Value, int) { + // t is the "taken" branch: the successor we always go to when coming in from p. + t := b.Succs[ti].b + // u is the "untaken" branch: the successor we never go to when coming in from p. + u := b.Succs[1^ti].b + + // In the following CFG matching, ensure that b's preds are entirely distinct from b's succs. + // This is probably a stronger condition than required, but this happens extremely rarely, + // and it makes it easier to avoid getting deceived by pretty ASCII charts. See #44465. + if p0, p1 := b.Preds[0].b, b.Preds[1].b; p0 == t || p1 == t || p0 == u || p1 == u { + return nil + } + + // Look for some common CFG structures + // in which the outbound paths from b merge, + // with no other preds joining them. + // In these cases, we can reconstruct what the value + // of any phi in b must be in the successor blocks. + + if len(t.Preds) == 1 && len(t.Succs) == 1 && + len(u.Preds) == 1 && len(u.Succs) == 1 && + t.Succs[0].b == u.Succs[0].b && len(t.Succs[0].b.Preds) == 2 { + // p q + // \ / + // b + // / \ + // t u + // \ / + // m + // + // After the CFG modifications, this will look like + // + // p q + // | / + // | b + // |/ \ + // t u + // \ / + // m + // + // NB: t.Preds is (b, p), not (p, b). + m := t.Succs[0].b + return func(v *Value, i int) { + // Replace any uses of v in t and u with the value v must have, + // given that we have arrived at that block. + // Then move v to m and adjust its value accordingly; + // this handles all other uses of v. + argP, argQ := v.Args[cidx], v.Args[1^cidx] + u.replaceUses(v, argQ) + phi := t.Func.newValue(OpPhi, v.Type, t, v.Pos) + phi.AddArg2(argQ, argP) + t.replaceUses(v, phi) + if v.Uses == 0 { + return + } + v.moveTo(m, i) + // The phi in m belongs to whichever pred idx corresponds to t. + if m.Preds[0].b == t { + v.SetArgs2(phi, argQ) + } else { + v.SetArgs2(argQ, phi) + } + } + } + + if len(t.Preds) == 2 && len(u.Preds) == 1 && len(u.Succs) == 1 && u.Succs[0].b == t { + // p q + // \ / + // b + // |\ + // | u + // |/ + // t + // + // After the CFG modifications, this will look like + // + // q + // / + // b + // |\ + // p | u + // \|/ + // t + // + // NB: t.Preds is (b or u, b or u, p). + return func(v *Value, i int) { + // Replace any uses of v in u. Then move v to t. + argP, argQ := v.Args[cidx], v.Args[1^cidx] + u.replaceUses(v, argQ) + v.moveTo(t, i) + v.SetArgs3(argQ, argQ, argP) + } + } + + if len(u.Preds) == 2 && len(t.Preds) == 1 && len(t.Succs) == 1 && t.Succs[0].b == u { + // p q + // \ / + // b + // /| + // t | + // \| + // u + // + // After the CFG modifications, this will look like + // + // p q + // | / + // | b + // |/| + // t | + // \| + // u + // + // NB: t.Preds is (b, p), not (p, b). + return func(v *Value, i int) { + // Replace any uses of v in t. Then move v to u. + argP, argQ := v.Args[cidx], v.Args[1^cidx] + phi := t.Func.newValue(OpPhi, v.Type, t, v.Pos) + phi.AddArg2(argQ, argP) + t.replaceUses(v, phi) + if v.Uses == 0 { + return + } + v.moveTo(u, i) + v.SetArgs2(argQ, phi) + } + } + + // Look for some common CFG structures + // in which one outbound path from b exits, + // with no other preds joining. + // In these cases, we can reconstruct what the value + // of any phi in b must be in the path leading to exit, + // and move the phi to the non-exit path. + + if len(t.Preds) == 1 && len(u.Preds) == 1 && len(t.Succs) == 0 { + // p q + // \ / + // b + // / \ + // t u + // + // where t is an Exit/Ret block. + // + // After the CFG modifications, this will look like + // + // p q + // | / + // | b + // |/ \ + // t u + // + // NB: t.Preds is (b, p), not (p, b). + return func(v *Value, i int) { + // Replace any uses of v in t and x. Then move v to u. + argP, argQ := v.Args[cidx], v.Args[1^cidx] + // If there are no uses of v in t or x, this phi will be unused. + // That's OK; it's not worth the cost to prevent that. + phi := t.Func.newValue(OpPhi, v.Type, t, v.Pos) + phi.AddArg2(argQ, argP) + t.replaceUses(v, phi) + if v.Uses == 0 { + return + } + v.moveTo(u, i) + v.SetArgs1(argQ) + } + } + + if len(u.Preds) == 1 && len(t.Preds) == 1 && len(u.Succs) == 0 { + // p q + // \ / + // b + // / \ + // t u + // + // where u is an Exit/Ret block. + // + // After the CFG modifications, this will look like + // + // p q + // | / + // | b + // |/ \ + // t u + // + // NB: t.Preds is (b, p), not (p, b). + return func(v *Value, i int) { + // Replace any uses of v in u (and x). Then move v to t. + argP, argQ := v.Args[cidx], v.Args[1^cidx] + u.replaceUses(v, argQ) + v.moveTo(t, i) + v.SetArgs2(argQ, argP) + } + } + + // TODO: handle more cases; shortcircuit optimizations turn out to be reasonably high impact + return nil +} + +// replaceUses replaces all uses of old in b with new. +func (b *Block) replaceUses(old, new *Value) { + for _, v := range b.Values { + for i, a := range v.Args { + if a == old { + v.SetArg(i, new) + } + } + } + for i, v := range b.ControlValues() { + if v == old { + b.ReplaceControl(i, new) + } + } +} + +// moveTo moves v to dst, adjusting the appropriate Block.Values slices. +// The caller is responsible for ensuring that this is safe. +// i is the index of v in v.Block.Values. +func (v *Value) moveTo(dst *Block, i int) { + if dst.Func.scheduled { + v.Fatalf("moveTo after scheduling") + } + src := v.Block + if src.Values[i] != v { + v.Fatalf("moveTo bad index %d", v, i) + } + if src == dst { + return + } + v.Block = dst + dst.Values = append(dst.Values, v) + last := len(src.Values) - 1 + src.Values[i] = src.Values[last] + src.Values[last] = nil + src.Values = src.Values[:last] +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/shortcircuit_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/shortcircuit_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b25eeb4740e6614821cbb50d26e857b50e4ce717 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/shortcircuit_test.go @@ -0,0 +1,53 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/types" + "testing" +) + +func TestShortCircuit(t *testing.T) { + c := testConfig(t) + + fun := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("arg1", OpArg, c.config.Types.Int64, 0, nil), + Valu("arg2", OpArg, c.config.Types.Int64, 0, nil), + Valu("arg3", OpArg, c.config.Types.Int64, 0, nil), + Goto("b1")), + Bloc("b1", + Valu("cmp1", OpLess64, c.config.Types.Bool, 0, nil, "arg1", "arg2"), + If("cmp1", "b2", "b3")), + Bloc("b2", + Valu("cmp2", OpLess64, c.config.Types.Bool, 0, nil, "arg2", "arg3"), + Goto("b3")), + Bloc("b3", + Valu("phi2", OpPhi, c.config.Types.Bool, 0, nil, "cmp1", "cmp2"), + If("phi2", "b4", "b5")), + Bloc("b4", + Valu("cmp3", OpLess64, c.config.Types.Bool, 0, nil, "arg3", "arg1"), + Goto("b5")), + Bloc("b5", + Valu("phi3", OpPhi, c.config.Types.Bool, 0, nil, "phi2", "cmp3"), + If("phi3", "b6", "b7")), + Bloc("b6", + Exit("mem")), + Bloc("b7", + Exit("mem"))) + + CheckFunc(fun.f) + shortcircuit(fun.f) + CheckFunc(fun.f) + + for _, b := range fun.f.Blocks { + for _, v := range b.Values { + if v.Op == OpPhi { + t.Errorf("phi %s remains", v) + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/sizeof_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/sizeof_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a27002ee3ac3b26a557decab61d33de604665398 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/sizeof_test.go @@ -0,0 +1,39 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "reflect" + "testing" + "unsafe" +) + +// Assert that the size of important structures do not change unexpectedly. + +func TestSizeof(t *testing.T) { + const _64bit = unsafe.Sizeof(uintptr(0)) == 8 + + var tests = []struct { + val interface{} // type as a value + _32bit uintptr // size on 32bit platforms + _64bit uintptr // size on 64bit platforms + }{ + {Value{}, 72, 112}, + {Block{}, 164, 304}, + {LocalSlot{}, 28, 40}, + {valState{}, 28, 40}, + } + + for _, tt := range tests { + want := tt._32bit + if _64bit { + want = tt._64bit + } + got := reflect.TypeOf(tt.val).Size() + if want != got { + t.Errorf("unsafe.Sizeof(%T) = %d, want %d", tt.val, got, want) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/softfloat.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/softfloat.go new file mode 100644 index 0000000000000000000000000000000000000000..351f824a9f579a2f8d4675e0be4d6d2a256631f2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/softfloat.go @@ -0,0 +1,80 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/types" + "math" +) + +func softfloat(f *Func) { + if !f.Config.SoftFloat { + return + } + newInt64 := false + + for _, b := range f.Blocks { + for _, v := range b.Values { + if v.Type.IsFloat() { + f.unCache(v) + switch v.Op { + case OpPhi, OpLoad, OpArg: + if v.Type.Size() == 4 { + v.Type = f.Config.Types.UInt32 + } else { + v.Type = f.Config.Types.UInt64 + } + case OpConst32F: + v.Op = OpConst32 + v.Type = f.Config.Types.UInt32 + v.AuxInt = int64(int32(math.Float32bits(auxTo32F(v.AuxInt)))) + case OpConst64F: + v.Op = OpConst64 + v.Type = f.Config.Types.UInt64 + case OpNeg32F: + arg0 := v.Args[0] + v.reset(OpXor32) + v.Type = f.Config.Types.UInt32 + v.AddArg(arg0) + mask := v.Block.NewValue0(v.Pos, OpConst32, v.Type) + mask.AuxInt = -0x80000000 + v.AddArg(mask) + case OpNeg64F: + arg0 := v.Args[0] + v.reset(OpXor64) + v.Type = f.Config.Types.UInt64 + v.AddArg(arg0) + mask := v.Block.NewValue0(v.Pos, OpConst64, v.Type) + mask.AuxInt = -0x8000000000000000 + v.AddArg(mask) + case OpRound32F: + v.Op = OpCopy + v.Type = f.Config.Types.UInt32 + case OpRound64F: + v.Op = OpCopy + v.Type = f.Config.Types.UInt64 + } + newInt64 = newInt64 || v.Type.Size() == 8 + } else if (v.Op == OpStore || v.Op == OpZero || v.Op == OpMove) && v.Aux.(*types.Type).IsFloat() { + switch size := v.Aux.(*types.Type).Size(); size { + case 4: + v.Aux = f.Config.Types.UInt32 + case 8: + v.Aux = f.Config.Types.UInt64 + newInt64 = true + default: + v.Fatalf("bad float type with size %d", size) + } + } + } + } + + if newInt64 && f.Config.RegSize == 4 { + // On 32bit arch, decompose Uint64 introduced in the switch above. + decomposeBuiltIn(f) + applyRewrite(f, rewriteBlockdec64, rewriteValuedec64, removeDeadValues) + } + +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/sparsemap.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/sparsemap.go new file mode 100644 index 0000000000000000000000000000000000000000..9443c8b4b43aa648e26318d3631e317af5d00782 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/sparsemap.go @@ -0,0 +1,89 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// from https://research.swtch.com/sparse +// in turn, from Briggs and Torczon + +type sparseEntry struct { + key ID + val int32 +} + +type sparseMap struct { + dense []sparseEntry + sparse []int32 +} + +// newSparseMap returns a sparseMap that can map +// integers between 0 and n-1 to int32s. +func newSparseMap(n int) *sparseMap { + return &sparseMap{dense: nil, sparse: make([]int32, n)} +} + +func (s *sparseMap) cap() int { + return len(s.sparse) +} + +func (s *sparseMap) size() int { + return len(s.dense) +} + +func (s *sparseMap) contains(k ID) bool { + i := s.sparse[k] + return i < int32(len(s.dense)) && s.dense[i].key == k +} + +// get returns the value for key k, or -1 if k does +// not appear in the map. +func (s *sparseMap) get(k ID) int32 { + i := s.sparse[k] + if i < int32(len(s.dense)) && s.dense[i].key == k { + return s.dense[i].val + } + return -1 +} + +func (s *sparseMap) set(k ID, v int32) { + i := s.sparse[k] + if i < int32(len(s.dense)) && s.dense[i].key == k { + s.dense[i].val = v + return + } + s.dense = append(s.dense, sparseEntry{k, v}) + s.sparse[k] = int32(len(s.dense)) - 1 +} + +// setBit sets the v'th bit of k's value, where 0 <= v < 32 +func (s *sparseMap) setBit(k ID, v uint) { + if v >= 32 { + panic("bit index too large.") + } + i := s.sparse[k] + if i < int32(len(s.dense)) && s.dense[i].key == k { + s.dense[i].val |= 1 << v + return + } + s.dense = append(s.dense, sparseEntry{k, 1 << v}) + s.sparse[k] = int32(len(s.dense)) - 1 +} + +func (s *sparseMap) remove(k ID) { + i := s.sparse[k] + if i < int32(len(s.dense)) && s.dense[i].key == k { + y := s.dense[len(s.dense)-1] + s.dense[i] = y + s.sparse[y.key] = i + s.dense = s.dense[:len(s.dense)-1] + } +} + +func (s *sparseMap) clear() { + s.dense = s.dense[:0] +} + +func (s *sparseMap) contents() []sparseEntry { + return s.dense +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/sparsemappos.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/sparsemappos.go new file mode 100644 index 0000000000000000000000000000000000000000..60bad8298bfe52098ba0d7ef842138d7f286a93e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/sparsemappos.go @@ -0,0 +1,79 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import "cmd/internal/src" + +// from https://research.swtch.com/sparse +// in turn, from Briggs and Torczon + +type sparseEntryPos struct { + key ID + val int32 + pos src.XPos +} + +type sparseMapPos struct { + dense []sparseEntryPos + sparse []int32 +} + +// newSparseMapPos returns a sparseMapPos that can map +// integers between 0 and n-1 to the pair . +func newSparseMapPos(n int) *sparseMapPos { + return &sparseMapPos{dense: nil, sparse: make([]int32, n)} +} + +func (s *sparseMapPos) cap() int { + return len(s.sparse) +} + +func (s *sparseMapPos) size() int { + return len(s.dense) +} + +func (s *sparseMapPos) contains(k ID) bool { + i := s.sparse[k] + return i < int32(len(s.dense)) && s.dense[i].key == k +} + +// get returns the value for key k, or -1 if k does +// not appear in the map. +func (s *sparseMapPos) get(k ID) int32 { + i := s.sparse[k] + if i < int32(len(s.dense)) && s.dense[i].key == k { + return s.dense[i].val + } + return -1 +} + +func (s *sparseMapPos) set(k ID, v int32, a src.XPos) { + i := s.sparse[k] + if i < int32(len(s.dense)) && s.dense[i].key == k { + s.dense[i].val = v + s.dense[i].pos = a + return + } + s.dense = append(s.dense, sparseEntryPos{k, v, a}) + s.sparse[k] = int32(len(s.dense)) - 1 +} + +func (s *sparseMapPos) remove(k ID) { + i := s.sparse[k] + if i < int32(len(s.dense)) && s.dense[i].key == k { + y := s.dense[len(s.dense)-1] + s.dense[i] = y + s.sparse[y.key] = i + s.dense = s.dense[:len(s.dense)-1] + } +} + +func (s *sparseMapPos) clear() { + s.dense = s.dense[:0] +} + +func (s *sparseMapPos) contents() []sparseEntryPos { + return s.dense +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/sparseset.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/sparseset.go new file mode 100644 index 0000000000000000000000000000000000000000..07d40dc948cc2faabbd558e23d4635e3afe448f3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/sparseset.go @@ -0,0 +1,79 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// from https://research.swtch.com/sparse +// in turn, from Briggs and Torczon + +type sparseSet struct { + dense []ID + sparse []int32 +} + +// newSparseSet returns a sparseSet that can represent +// integers between 0 and n-1. +func newSparseSet(n int) *sparseSet { + return &sparseSet{dense: nil, sparse: make([]int32, n)} +} + +func (s *sparseSet) cap() int { + return len(s.sparse) +} + +func (s *sparseSet) size() int { + return len(s.dense) +} + +func (s *sparseSet) contains(x ID) bool { + i := s.sparse[x] + return i < int32(len(s.dense)) && s.dense[i] == x +} + +func (s *sparseSet) add(x ID) { + i := s.sparse[x] + if i < int32(len(s.dense)) && s.dense[i] == x { + return + } + s.dense = append(s.dense, x) + s.sparse[x] = int32(len(s.dense)) - 1 +} + +func (s *sparseSet) addAll(a []ID) { + for _, x := range a { + s.add(x) + } +} + +func (s *sparseSet) addAllValues(a []*Value) { + for _, v := range a { + s.add(v.ID) + } +} + +func (s *sparseSet) remove(x ID) { + i := s.sparse[x] + if i < int32(len(s.dense)) && s.dense[i] == x { + y := s.dense[len(s.dense)-1] + s.dense[i] = y + s.sparse[y] = i + s.dense = s.dense[:len(s.dense)-1] + } +} + +// pop removes an arbitrary element from the set. +// The set must be nonempty. +func (s *sparseSet) pop() ID { + x := s.dense[len(s.dense)-1] + s.dense = s.dense[:len(s.dense)-1] + return x +} + +func (s *sparseSet) clear() { + s.dense = s.dense[:0] +} + +func (s *sparseSet) contents() []ID { + return s.dense +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/sparsetree.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/sparsetree.go new file mode 100644 index 0000000000000000000000000000000000000000..6f2bd040375a83c129f69d8a021b13e4a55803f6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/sparsetree.go @@ -0,0 +1,242 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "fmt" + "strings" +) + +type SparseTreeNode struct { + child *Block + sibling *Block + parent *Block + + // Every block has 6 numbers associated with it: + // entry-1, entry, entry+1, exit-1, and exit, exit+1. + // entry and exit are conceptually the top of the block (phi functions) + // entry+1 and exit-1 are conceptually the bottom of the block (ordinary defs) + // entry-1 and exit+1 are conceptually "just before" the block (conditions flowing in) + // + // This simplifies life if we wish to query information about x + // when x is both an input to and output of a block. + entry, exit int32 +} + +func (s *SparseTreeNode) String() string { + return fmt.Sprintf("[%d,%d]", s.entry, s.exit) +} + +func (s *SparseTreeNode) Entry() int32 { + return s.entry +} + +func (s *SparseTreeNode) Exit() int32 { + return s.exit +} + +const ( + // When used to lookup up definitions in a sparse tree, + // these adjustments to a block's entry (+adjust) and + // exit (-adjust) numbers allow a distinction to be made + // between assignments (typically branch-dependent + // conditionals) occurring "before" the block (e.g., as inputs + // to the block and its phi functions), "within" the block, + // and "after" the block. + AdjustBefore = -1 // defined before phi + AdjustWithin = 0 // defined by phi + AdjustAfter = 1 // defined within block +) + +// A SparseTree is a tree of Blocks. +// It allows rapid ancestor queries, +// such as whether one block dominates another. +type SparseTree []SparseTreeNode + +// newSparseTree creates a SparseTree from a block-to-parent map (array indexed by Block.ID). +func newSparseTree(f *Func, parentOf []*Block) SparseTree { + t := make(SparseTree, f.NumBlocks()) + for _, b := range f.Blocks { + n := &t[b.ID] + if p := parentOf[b.ID]; p != nil { + n.parent = p + n.sibling = t[p.ID].child + t[p.ID].child = b + } + } + t.numberBlock(f.Entry, 1) + return t +} + +// newSparseOrderedTree creates a SparseTree from a block-to-parent map (array indexed by Block.ID) +// children will appear in the reverse of their order in reverseOrder +// in particular, if reverseOrder is a dfs-reversePostOrder, then the root-to-children +// walk of the tree will yield a pre-order. +func newSparseOrderedTree(f *Func, parentOf, reverseOrder []*Block) SparseTree { + t := make(SparseTree, f.NumBlocks()) + for _, b := range reverseOrder { + n := &t[b.ID] + if p := parentOf[b.ID]; p != nil { + n.parent = p + n.sibling = t[p.ID].child + t[p.ID].child = b + } + } + t.numberBlock(f.Entry, 1) + return t +} + +// treestructure provides a string description of the dominator +// tree and flow structure of block b and all blocks that it +// dominates. +func (t SparseTree) treestructure(b *Block) string { + return t.treestructure1(b, 0) +} +func (t SparseTree) treestructure1(b *Block, i int) string { + s := "\n" + strings.Repeat("\t", i) + b.String() + "->[" + for i, e := range b.Succs { + if i > 0 { + s += "," + } + s += e.b.String() + } + s += "]" + if c0 := t[b.ID].child; c0 != nil { + s += "(" + for c := c0; c != nil; c = t[c.ID].sibling { + if c != c0 { + s += " " + } + s += t.treestructure1(c, i+1) + } + s += ")" + } + return s +} + +// numberBlock assigns entry and exit numbers for b and b's +// children in an in-order walk from a gappy sequence, where n +// is the first number not yet assigned or reserved. N should +// be larger than zero. For each entry and exit number, the +// values one larger and smaller are reserved to indicate +// "strictly above" and "strictly below". numberBlock returns +// the smallest number not yet assigned or reserved (i.e., the +// exit number of the last block visited, plus two, because +// last.exit+1 is a reserved value.) +// +// examples: +// +// single node tree Root, call with n=1 +// entry=2 Root exit=5; returns 7 +// +// two node tree, Root->Child, call with n=1 +// entry=2 Root exit=11; returns 13 +// entry=5 Child exit=8 +// +// three node tree, Root->(Left, Right), call with n=1 +// entry=2 Root exit=17; returns 19 +// entry=5 Left exit=8; entry=11 Right exit=14 +// +// This is the in-order sequence of assigned and reserved numbers +// for the last example: +// root left left right right root +// 1 2e 3 | 4 5e 6 | 7 8x 9 | 10 11e 12 | 13 14x 15 | 16 17x 18 + +func (t SparseTree) numberBlock(b *Block, n int32) int32 { + // reserve n for entry-1, assign n+1 to entry + n++ + t[b.ID].entry = n + // reserve n+1 for entry+1, n+2 is next free number + n += 2 + for c := t[b.ID].child; c != nil; c = t[c.ID].sibling { + n = t.numberBlock(c, n) // preserves n = next free number + } + // reserve n for exit-1, assign n+1 to exit + n++ + t[b.ID].exit = n + // reserve n+1 for exit+1, n+2 is next free number, returned. + return n + 2 +} + +// Sibling returns a sibling of x in the dominator tree (i.e., +// a node with the same immediate dominator) or nil if there +// are no remaining siblings in the arbitrary but repeatable +// order chosen. Because the Child-Sibling order is used +// to assign entry and exit numbers in the treewalk, those +// numbers are also consistent with this order (i.e., +// Sibling(x) has entry number larger than x's exit number). +func (t SparseTree) Sibling(x *Block) *Block { + return t[x.ID].sibling +} + +// Child returns a child of x in the dominator tree, or +// nil if there are none. The choice of first child is +// arbitrary but repeatable. +func (t SparseTree) Child(x *Block) *Block { + return t[x.ID].child +} + +// Parent returns the parent of x in the dominator tree, or +// nil if x is the function's entry. +func (t SparseTree) Parent(x *Block) *Block { + return t[x.ID].parent +} + +// IsAncestorEq reports whether x is an ancestor of or equal to y. +func (t SparseTree) IsAncestorEq(x, y *Block) bool { + if x == y { + return true + } + xx := &t[x.ID] + yy := &t[y.ID] + return xx.entry <= yy.entry && yy.exit <= xx.exit +} + +// isAncestor reports whether x is a strict ancestor of y. +func (t SparseTree) isAncestor(x, y *Block) bool { + if x == y { + return false + } + xx := &t[x.ID] + yy := &t[y.ID] + return xx.entry < yy.entry && yy.exit < xx.exit +} + +// domorder returns a value for dominator-oriented sorting. +// Block domination does not provide a total ordering, +// but domorder two has useful properties. +// 1. If domorder(x) > domorder(y) then x does not dominate y. +// 2. If domorder(x) < domorder(y) and domorder(y) < domorder(z) and x does not dominate y, +// then x does not dominate z. +// +// Property (1) means that blocks sorted by domorder always have a maximal dominant block first. +// Property (2) allows searches for dominated blocks to exit early. +func (t SparseTree) domorder(x *Block) int32 { + // Here is an argument that entry(x) provides the properties documented above. + // + // Entry and exit values are assigned in a depth-first dominator tree walk. + // For all blocks x and y, one of the following holds: + // + // (x-dom-y) x dominates y => entry(x) < entry(y) < exit(y) < exit(x) + // (y-dom-x) y dominates x => entry(y) < entry(x) < exit(x) < exit(y) + // (x-then-y) neither x nor y dominates the other and x walked before y => entry(x) < exit(x) < entry(y) < exit(y) + // (y-then-x) neither x nor y dominates the other and y walked before y => entry(y) < exit(y) < entry(x) < exit(x) + // + // entry(x) > entry(y) eliminates case x-dom-y. This provides property (1) above. + // + // For property (2), assume entry(x) < entry(y) and entry(y) < entry(z) and x does not dominate y. + // entry(x) < entry(y) allows cases x-dom-y and x-then-y. + // But by supposition, x does not dominate y. So we have x-then-y. + // + // For contradiction, assume x dominates z. + // Then entry(x) < entry(z) < exit(z) < exit(x). + // But we know x-then-y, so entry(x) < exit(x) < entry(y) < exit(y). + // Combining those, entry(x) < entry(z) < exit(z) < exit(x) < entry(y) < exit(y). + // By supposition, entry(y) < entry(z), which allows cases y-dom-z and y-then-z. + // y-dom-z requires entry(y) < entry(z), but we have entry(z) < entry(y). + // y-then-z requires exit(y) < entry(z), but we have entry(z) < exit(y). + // We have a contradiction, so x does not dominate z, as required. + return t[x.ID].entry +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/stackalloc.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/stackalloc.go new file mode 100644 index 0000000000000000000000000000000000000000..c9ca778b3aa47785a6445f1dfdba4140ef13651c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/stackalloc.go @@ -0,0 +1,454 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO: live at start of block instead? + +package ssa + +import ( + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "cmd/internal/src" + "fmt" +) + +type stackAllocState struct { + f *Func + + // live is the output of stackalloc. + // live[b.id] = live values at the end of block b. + live [][]ID + + // The following slices are reused across multiple users + // of stackAllocState. + values []stackValState + interfere [][]ID // interfere[v.id] = values that interfere with v. + names []LocalSlot + + nArgSlot, // Number of Values sourced to arg slot + nNotNeed, // Number of Values not needing a stack slot + nNamedSlot, // Number of Values using a named stack slot + nReuse, // Number of values reusing a stack slot + nAuto, // Number of autos allocated for stack slots. + nSelfInterfere int32 // Number of self-interferences +} + +func newStackAllocState(f *Func) *stackAllocState { + s := f.Cache.stackAllocState + if s == nil { + return new(stackAllocState) + } + if s.f != nil { + f.fe.Fatalf(src.NoXPos, "newStackAllocState called without previous free") + } + return s +} + +func putStackAllocState(s *stackAllocState) { + for i := range s.values { + s.values[i] = stackValState{} + } + for i := range s.interfere { + s.interfere[i] = nil + } + for i := range s.names { + s.names[i] = LocalSlot{} + } + s.f.Cache.stackAllocState = s + s.f = nil + s.live = nil + s.nArgSlot, s.nNotNeed, s.nNamedSlot, s.nReuse, s.nAuto, s.nSelfInterfere = 0, 0, 0, 0, 0, 0 +} + +type stackValState struct { + typ *types.Type + spill *Value + needSlot bool + isArg bool +} + +// stackalloc allocates storage in the stack frame for +// all Values that did not get a register. +// Returns a map from block ID to the stack values live at the end of that block. +func stackalloc(f *Func, spillLive [][]ID) [][]ID { + if f.pass.debug > stackDebug { + fmt.Println("before stackalloc") + fmt.Println(f.String()) + } + s := newStackAllocState(f) + s.init(f, spillLive) + defer putStackAllocState(s) + + s.stackalloc() + if f.pass.stats > 0 { + f.LogStat("stack_alloc_stats", + s.nArgSlot, "arg_slots", s.nNotNeed, "slot_not_needed", + s.nNamedSlot, "named_slots", s.nAuto, "auto_slots", + s.nReuse, "reused_slots", s.nSelfInterfere, "self_interfering") + } + + return s.live +} + +func (s *stackAllocState) init(f *Func, spillLive [][]ID) { + s.f = f + + // Initialize value information. + if n := f.NumValues(); cap(s.values) >= n { + s.values = s.values[:n] + } else { + s.values = make([]stackValState, n) + } + for _, b := range f.Blocks { + for _, v := range b.Values { + s.values[v.ID].typ = v.Type + s.values[v.ID].needSlot = !v.Type.IsMemory() && !v.Type.IsVoid() && !v.Type.IsFlags() && f.getHome(v.ID) == nil && !v.rematerializeable() && !v.OnWasmStack + s.values[v.ID].isArg = hasAnyArgOp(v) + if f.pass.debug > stackDebug && s.values[v.ID].needSlot { + fmt.Printf("%s needs a stack slot\n", v) + } + if v.Op == OpStoreReg { + s.values[v.Args[0].ID].spill = v + } + } + } + + // Compute liveness info for values needing a slot. + s.computeLive(spillLive) + + // Build interference graph among values needing a slot. + s.buildInterferenceGraph() +} + +func (s *stackAllocState) stackalloc() { + f := s.f + + // Build map from values to their names, if any. + // A value may be associated with more than one name (e.g. after + // the assignment i=j). This step picks one name per value arbitrarily. + if n := f.NumValues(); cap(s.names) >= n { + s.names = s.names[:n] + } else { + s.names = make([]LocalSlot, n) + } + names := s.names + empty := LocalSlot{} + for _, name := range f.Names { + // Note: not "range f.NamedValues" above, because + // that would be nondeterministic. + for _, v := range f.NamedValues[*name] { + if v.Op == OpArgIntReg || v.Op == OpArgFloatReg { + aux := v.Aux.(*AuxNameOffset) + // Never let an arg be bound to a differently named thing. + if name.N != aux.Name || name.Off != aux.Offset { + if f.pass.debug > stackDebug { + fmt.Printf("stackalloc register arg %s skipping name %s\n", v, name) + } + continue + } + } else if name.N.Class == ir.PPARAM && v.Op != OpArg { + // PPARAM's only bind to OpArg + if f.pass.debug > stackDebug { + fmt.Printf("stackalloc PPARAM name %s skipping non-Arg %s\n", name, v) + } + continue + } + + if names[v.ID] == empty { + if f.pass.debug > stackDebug { + fmt.Printf("stackalloc value %s to name %s\n", v, *name) + } + names[v.ID] = *name + } + } + } + + // Allocate args to their assigned locations. + for _, v := range f.Entry.Values { + if !hasAnyArgOp(v) { + continue + } + if v.Aux == nil { + f.Fatalf("%s has nil Aux\n", v.LongString()) + } + if v.Op == OpArg { + loc := LocalSlot{N: v.Aux.(*ir.Name), Type: v.Type, Off: v.AuxInt} + if f.pass.debug > stackDebug { + fmt.Printf("stackalloc OpArg %s to %s\n", v, loc) + } + f.setHome(v, loc) + continue + } + // You might think this below would be the right idea, but you would be wrong. + // It almost works; as of 105a6e9518 - 2021-04-23, + // GOSSAHASH=11011011001011111 == cmd/compile/internal/noder.(*noder).embedded + // is compiled incorrectly. I believe the cause is one of those SSA-to-registers + // puzzles that the register allocator untangles; in the event that a register + // parameter does not end up bound to a name, "fixing" it is a bad idea. + // + //if f.DebugTest { + // if v.Op == OpArgIntReg || v.Op == OpArgFloatReg { + // aux := v.Aux.(*AuxNameOffset) + // loc := LocalSlot{N: aux.Name, Type: v.Type, Off: aux.Offset} + // if f.pass.debug > stackDebug { + // fmt.Printf("stackalloc Op%s %s to %s\n", v.Op, v, loc) + // } + // names[v.ID] = loc + // continue + // } + //} + + } + + // For each type, we keep track of all the stack slots we + // have allocated for that type. + // TODO: share slots among equivalent types. We would need to + // only share among types with the same GC signature. See the + // type.Equal calls below for where this matters. + locations := map[*types.Type][]LocalSlot{} + + // Each time we assign a stack slot to a value v, we remember + // the slot we used via an index into locations[v.Type]. + slots := f.Cache.allocIntSlice(f.NumValues()) + defer f.Cache.freeIntSlice(slots) + for i := range slots { + slots[i] = -1 + } + + // Pick a stack slot for each value needing one. + used := f.Cache.allocBoolSlice(f.NumValues()) + defer f.Cache.freeBoolSlice(used) + for _, b := range f.Blocks { + for _, v := range b.Values { + if !s.values[v.ID].needSlot { + s.nNotNeed++ + continue + } + if hasAnyArgOp(v) { + s.nArgSlot++ + continue // already picked + } + + // If this is a named value, try to use the name as + // the spill location. + var name LocalSlot + if v.Op == OpStoreReg { + name = names[v.Args[0].ID] + } else { + name = names[v.ID] + } + if name.N != nil && v.Type.Compare(name.Type) == types.CMPeq { + for _, id := range s.interfere[v.ID] { + h := f.getHome(id) + if h != nil && h.(LocalSlot).N == name.N && h.(LocalSlot).Off == name.Off { + // A variable can interfere with itself. + // It is rare, but it can happen. + s.nSelfInterfere++ + goto noname + } + } + if f.pass.debug > stackDebug { + fmt.Printf("stackalloc %s to %s\n", v, name) + } + s.nNamedSlot++ + f.setHome(v, name) + continue + } + + noname: + // Set of stack slots we could reuse. + locs := locations[v.Type] + // Mark all positions in locs used by interfering values. + for i := 0; i < len(locs); i++ { + used[i] = false + } + for _, xid := range s.interfere[v.ID] { + slot := slots[xid] + if slot >= 0 { + used[slot] = true + } + } + // Find an unused stack slot. + var i int + for i = 0; i < len(locs); i++ { + if !used[i] { + s.nReuse++ + break + } + } + // If there is no unused stack slot, allocate a new one. + if i == len(locs) { + s.nAuto++ + locs = append(locs, LocalSlot{N: f.NewLocal(v.Pos, v.Type), Type: v.Type, Off: 0}) + locations[v.Type] = locs + } + // Use the stack variable at that index for v. + loc := locs[i] + if f.pass.debug > stackDebug { + fmt.Printf("stackalloc %s to %s\n", v, loc) + } + f.setHome(v, loc) + slots[v.ID] = i + } + } +} + +// computeLive computes a map from block ID to a list of +// stack-slot-needing value IDs live at the end of that block. +// TODO: this could be quadratic if lots of variables are live across lots of +// basic blocks. Figure out a way to make this function (or, more precisely, the user +// of this function) require only linear size & time. +func (s *stackAllocState) computeLive(spillLive [][]ID) { + s.live = make([][]ID, s.f.NumBlocks()) + var phis []*Value + live := s.f.newSparseSet(s.f.NumValues()) + defer s.f.retSparseSet(live) + t := s.f.newSparseSet(s.f.NumValues()) + defer s.f.retSparseSet(t) + + // Instead of iterating over f.Blocks, iterate over their postordering. + // Liveness information flows backward, so starting at the end + // increases the probability that we will stabilize quickly. + po := s.f.postorder() + for { + changed := false + for _, b := range po { + // Start with known live values at the end of the block + live.clear() + live.addAll(s.live[b.ID]) + + // Propagate backwards to the start of the block + phis = phis[:0] + for i := len(b.Values) - 1; i >= 0; i-- { + v := b.Values[i] + live.remove(v.ID) + if v.Op == OpPhi { + // Save phi for later. + // Note: its args might need a stack slot even though + // the phi itself doesn't. So don't use needSlot. + if !v.Type.IsMemory() && !v.Type.IsVoid() { + phis = append(phis, v) + } + continue + } + for _, a := range v.Args { + if s.values[a.ID].needSlot { + live.add(a.ID) + } + } + } + + // for each predecessor of b, expand its list of live-at-end values + // invariant: s contains the values live at the start of b (excluding phi inputs) + for i, e := range b.Preds { + p := e.b + t.clear() + t.addAll(s.live[p.ID]) + t.addAll(live.contents()) + t.addAll(spillLive[p.ID]) + for _, v := range phis { + a := v.Args[i] + if s.values[a.ID].needSlot { + t.add(a.ID) + } + if spill := s.values[a.ID].spill; spill != nil { + //TODO: remove? Subsumed by SpillUse? + t.add(spill.ID) + } + } + if t.size() == len(s.live[p.ID]) { + continue + } + // grow p's live set + s.live[p.ID] = append(s.live[p.ID][:0], t.contents()...) + changed = true + } + } + + if !changed { + break + } + } + if s.f.pass.debug > stackDebug { + for _, b := range s.f.Blocks { + fmt.Printf("stacklive %s %v\n", b, s.live[b.ID]) + } + } +} + +func (f *Func) getHome(vid ID) Location { + if int(vid) >= len(f.RegAlloc) { + return nil + } + return f.RegAlloc[vid] +} + +func (f *Func) setHome(v *Value, loc Location) { + for v.ID >= ID(len(f.RegAlloc)) { + f.RegAlloc = append(f.RegAlloc, nil) + } + f.RegAlloc[v.ID] = loc +} + +func (s *stackAllocState) buildInterferenceGraph() { + f := s.f + if n := f.NumValues(); cap(s.interfere) >= n { + s.interfere = s.interfere[:n] + } else { + s.interfere = make([][]ID, n) + } + live := f.newSparseSet(f.NumValues()) + defer f.retSparseSet(live) + for _, b := range f.Blocks { + // Propagate liveness backwards to the start of the block. + // Two values interfere if one is defined while the other is live. + live.clear() + live.addAll(s.live[b.ID]) + for i := len(b.Values) - 1; i >= 0; i-- { + v := b.Values[i] + if s.values[v.ID].needSlot { + live.remove(v.ID) + for _, id := range live.contents() { + // Note: args can have different types and still interfere + // (with each other or with other values). See issue 23522. + if s.values[v.ID].typ.Compare(s.values[id].typ) == types.CMPeq || hasAnyArgOp(v) || s.values[id].isArg { + s.interfere[v.ID] = append(s.interfere[v.ID], id) + s.interfere[id] = append(s.interfere[id], v.ID) + } + } + } + for _, a := range v.Args { + if s.values[a.ID].needSlot { + live.add(a.ID) + } + } + if hasAnyArgOp(v) && s.values[v.ID].needSlot { + // OpArg is an input argument which is pre-spilled. + // We add back v.ID here because we want this value + // to appear live even before this point. Being live + // all the way to the start of the entry block prevents other + // values from being allocated to the same slot and clobbering + // the input value before we have a chance to load it. + + // TODO(register args) this is apparently not wrong for register args -- is it necessary? + live.add(v.ID) + } + } + } + if f.pass.debug > stackDebug { + for vid, i := range s.interfere { + if len(i) > 0 { + fmt.Printf("v%d interferes with", vid) + for _, x := range i { + fmt.Printf(" v%d", x) + } + fmt.Println() + } + } + } +} + +func hasAnyArgOp(v *Value) bool { + return v.Op == OpArg || v.Op == OpArgIntReg || v.Op == OpArgFloatReg +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/stmtlines_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/stmtlines_test.go new file mode 100644 index 0000000000000000000000000000000000000000..79bcab08a1e6988267381c4a5283cb8afb350daf --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/stmtlines_test.go @@ -0,0 +1,158 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa_test + +import ( + cmddwarf "cmd/internal/dwarf" + "cmd/internal/quoted" + "debug/dwarf" + "debug/elf" + "debug/macho" + "debug/pe" + "fmt" + "internal/platform" + "internal/testenv" + "internal/xcoff" + "io" + "os" + "runtime" + "sort" + "testing" +) + +func open(path string) (*dwarf.Data, error) { + if fh, err := elf.Open(path); err == nil { + return fh.DWARF() + } + + if fh, err := pe.Open(path); err == nil { + return fh.DWARF() + } + + if fh, err := macho.Open(path); err == nil { + return fh.DWARF() + } + + if fh, err := xcoff.Open(path); err == nil { + return fh.DWARF() + } + + return nil, fmt.Errorf("unrecognized executable format") +} + +func must(err error) { + if err != nil { + panic(err) + } +} + +type Line struct { + File string + Line int +} + +func TestStmtLines(t *testing.T) { + if !platform.ExecutableHasDWARF(runtime.GOOS, runtime.GOARCH) { + t.Skipf("skipping on %s/%s: no DWARF symbol table in executables", runtime.GOOS, runtime.GOARCH) + } + + if runtime.GOOS == "aix" { + extld := os.Getenv("CC") + if extld == "" { + extld = "gcc" + } + extldArgs, err := quoted.Split(extld) + if err != nil { + t.Fatal(err) + } + enabled, err := cmddwarf.IsDWARFEnabledOnAIXLd(extldArgs) + if err != nil { + t.Fatal(err) + } + if !enabled { + t.Skip("skipping on aix: no DWARF with ld version < 7.2.2 ") + } + } + + // Build cmd/go forcing DWARF enabled, as a large test case. + dir := t.TempDir() + out, err := testenv.Command(t, testenv.GoToolPath(t), "build", "-ldflags=-w=0", "-o", dir+"/test.exe", "cmd/go").CombinedOutput() + if err != nil { + t.Fatalf("go build: %v\n%s", err, out) + } + + lines := map[Line]bool{} + dw, err := open(dir + "/test.exe") + must(err) + rdr := dw.Reader() + rdr.Seek(0) + for { + e, err := rdr.Next() + must(err) + if e == nil { + break + } + if e.Tag != dwarf.TagCompileUnit { + continue + } + pkgname, _ := e.Val(dwarf.AttrName).(string) + if pkgname == "runtime" { + continue + } + if pkgname == "crypto/internal/nistec/fiat" { + continue // golang.org/issue/49372 + } + if e.Val(dwarf.AttrStmtList) == nil { + continue + } + lrdr, err := dw.LineReader(e) + must(err) + + var le dwarf.LineEntry + + for { + err := lrdr.Next(&le) + if err == io.EOF { + break + } + must(err) + fl := Line{le.File.Name, le.Line} + lines[fl] = lines[fl] || le.IsStmt + } + } + + nonStmtLines := []Line{} + for line, isstmt := range lines { + if !isstmt { + nonStmtLines = append(nonStmtLines, line) + } + } + + var m int + if runtime.GOARCH == "amd64" { + m = 1 // > 99% obtained on amd64, no backsliding + } else if runtime.GOARCH == "riscv64" { + m = 3 // XXX temporary update threshold to 97% for regabi + } else { + m = 2 // expect 98% elsewhere. + } + + if len(nonStmtLines)*100 > m*len(lines) { + t.Errorf("Saw too many (%s, > %d%%) lines without statement marks, total=%d, nostmt=%d ('-run TestStmtLines -v' lists failing lines)\n", runtime.GOARCH, m, len(lines), len(nonStmtLines)) + } + t.Logf("Saw %d out of %d lines without statement marks", len(nonStmtLines), len(lines)) + if testing.Verbose() { + sort.Slice(nonStmtLines, func(i, j int) bool { + if nonStmtLines[i].File != nonStmtLines[j].File { + return nonStmtLines[i].File < nonStmtLines[j].File + } + return nonStmtLines[i].Line < nonStmtLines[j].Line + }) + for _, l := range nonStmtLines { + t.Logf("%s:%d has no DWARF is_stmt mark\n", l.File, l.Line) + } + } + t.Logf("total=%d, nostmt=%d\n", len(lines), len(nonStmtLines)) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/tighten.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/tighten.go new file mode 100644 index 0000000000000000000000000000000000000000..85b6a84cc3f426c905160af30fe6622a360fc1c7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/tighten.go @@ -0,0 +1,269 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import "cmd/compile/internal/base" + +// tighten moves Values closer to the Blocks in which they are used. +// This can reduce the amount of register spilling required, +// if it doesn't also create more live values. +// A Value can be moved to any block that +// dominates all blocks in which it is used. +func tighten(f *Func) { + if base.Flag.N != 0 && len(f.Blocks) < 10000 { + // Skip the optimization in -N mode, except for huge functions. + // Too many values live across blocks can cause pathological + // behavior in the register allocator (see issue 52180). + return + } + + canMove := f.Cache.allocBoolSlice(f.NumValues()) + defer f.Cache.freeBoolSlice(canMove) + + // Compute the memory states of each block. + startMem := f.Cache.allocValueSlice(f.NumBlocks()) + defer f.Cache.freeValueSlice(startMem) + endMem := f.Cache.allocValueSlice(f.NumBlocks()) + defer f.Cache.freeValueSlice(endMem) + memState(f, startMem, endMem) + + for _, b := range f.Blocks { + for _, v := range b.Values { + if v.Op.isLoweredGetClosurePtr() { + // Must stay in the entry block. + continue + } + switch v.Op { + case OpPhi, OpArg, OpArgIntReg, OpArgFloatReg, OpSelect0, OpSelect1, OpSelectN: + // Phis need to stay in their block. + // Arg must stay in the entry block. + // Tuple selectors must stay with the tuple generator. + // SelectN is typically, ultimately, a register. + continue + } + // Count arguments which will need a register. + narg := 0 + for _, a := range v.Args { + // SP and SB are special registers and have no effect on + // the allocation of general-purpose registers. + if a.needRegister() && a.Op != OpSB && a.Op != OpSP { + narg++ + } + } + if narg >= 2 && !v.Type.IsFlags() { + // Don't move values with more than one input, as that may + // increase register pressure. + // We make an exception for flags, as we want flag generators + // moved next to uses (because we only have 1 flag register). + continue + } + canMove[v.ID] = true + } + } + + // Build data structure for fast least-common-ancestor queries. + lca := makeLCArange(f) + + // For each moveable value, record the block that dominates all uses found so far. + target := f.Cache.allocBlockSlice(f.NumValues()) + defer f.Cache.freeBlockSlice(target) + + // Grab loop information. + // We use this to make sure we don't tighten a value into a (deeper) loop. + idom := f.Idom() + loops := f.loopnest() + loops.calculateDepths() + + changed := true + for changed { + changed = false + + // Reset target + for i := range target { + target[i] = nil + } + + // Compute target locations (for moveable values only). + // target location = the least common ancestor of all uses in the dominator tree. + for _, b := range f.Blocks { + for _, v := range b.Values { + for i, a := range v.Args { + if !canMove[a.ID] { + continue + } + use := b + if v.Op == OpPhi { + use = b.Preds[i].b + } + if target[a.ID] == nil { + target[a.ID] = use + } else { + target[a.ID] = lca.find(target[a.ID], use) + } + } + } + for _, c := range b.ControlValues() { + if !canMove[c.ID] { + continue + } + if target[c.ID] == nil { + target[c.ID] = b + } else { + target[c.ID] = lca.find(target[c.ID], b) + } + } + } + + // If the target location is inside a loop, + // move the target location up to just before the loop head. + for _, b := range f.Blocks { + origloop := loops.b2l[b.ID] + for _, v := range b.Values { + t := target[v.ID] + if t == nil { + continue + } + targetloop := loops.b2l[t.ID] + for targetloop != nil && (origloop == nil || targetloop.depth > origloop.depth) { + t = idom[targetloop.header.ID] + target[v.ID] = t + targetloop = loops.b2l[t.ID] + } + } + } + + // Move values to target locations. + for _, b := range f.Blocks { + for i := 0; i < len(b.Values); i++ { + v := b.Values[i] + t := target[v.ID] + if t == nil || t == b { + // v is not moveable, or is already in correct place. + continue + } + if mem := v.MemoryArg(); mem != nil { + if startMem[t.ID] != mem { + // We can't move a value with a memory arg unless the target block + // has that memory arg as its starting memory. + continue + } + } + if f.pass.debug > 0 { + b.Func.Warnl(v.Pos, "%v is moved", v.Op) + } + // Move v to the block which dominates its uses. + t.Values = append(t.Values, v) + v.Block = t + last := len(b.Values) - 1 + b.Values[i] = b.Values[last] + b.Values[last] = nil + b.Values = b.Values[:last] + changed = true + i-- + } + } + } +} + +// phiTighten moves constants closer to phi users. +// This pass avoids having lots of constants live for lots of the program. +// See issue 16407. +func phiTighten(f *Func) { + for _, b := range f.Blocks { + for _, v := range b.Values { + if v.Op != OpPhi { + continue + } + for i, a := range v.Args { + if !a.rematerializeable() { + continue // not a constant we can move around + } + if a.Block == b.Preds[i].b { + continue // already in the right place + } + // Make a copy of a, put in predecessor block. + v.SetArg(i, a.copyInto(b.Preds[i].b)) + } + } + } +} + +// memState computes the memory state at the beginning and end of each block of +// the function. The memory state is represented by a value of mem type. +// The returned result is stored in startMem and endMem, and endMem is nil for +// blocks with no successors (Exit,Ret,RetJmp blocks). This algorithm is not +// suitable for infinite loop blocks that do not contain any mem operations. +// For example: +// b1: +// +// (some values) +// +// plain -> b2 +// b2: <- b1 b2 +// Plain -> b2 +// +// Algorithm introduction: +// 1. The start memory state of a block is InitMem, a Phi node of type mem or +// an incoming memory value. +// 2. The start memory state of a block is consistent with the end memory state +// of its parent nodes. If the start memory state of a block is a Phi value, +// then the end memory state of its parent nodes is consistent with the +// corresponding argument value of the Phi node. +// 3. The algorithm first obtains the memory state of some blocks in the tree +// in the first step. Then floods the known memory state to other nodes in +// the second step. +func memState(f *Func, startMem, endMem []*Value) { + // This slice contains the set of blocks that have had their startMem set but this + // startMem value has not yet been propagated to the endMem of its predecessors + changed := make([]*Block, 0) + // First step, init the memory state of some blocks. + for _, b := range f.Blocks { + for _, v := range b.Values { + var mem *Value + if v.Op == OpPhi { + if v.Type.IsMemory() { + mem = v + } + } else if v.Op == OpInitMem { + mem = v // This is actually not needed. + } else if a := v.MemoryArg(); a != nil && a.Block != b { + // The only incoming memory value doesn't belong to this block. + mem = a + } + if mem != nil { + if old := startMem[b.ID]; old != nil { + if old == mem { + continue + } + f.Fatalf("func %s, startMem[%v] has different values, old %v, new %v", f.Name, b, old, mem) + } + startMem[b.ID] = mem + changed = append(changed, b) + } + } + } + + // Second step, floods the known memory state of some blocks to others. + for len(changed) != 0 { + top := changed[0] + changed = changed[1:] + mem := startMem[top.ID] + for i, p := range top.Preds { + pb := p.b + if endMem[pb.ID] != nil { + continue + } + if mem.Op == OpPhi && mem.Block == top { + endMem[pb.ID] = mem.Args[i] + } else { + endMem[pb.ID] = mem + } + if startMem[pb.ID] == nil { + startMem[pb.ID] = endMem[pb.ID] + changed = append(changed, pb) + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/trim.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/trim.go new file mode 100644 index 0000000000000000000000000000000000000000..13798c6c397eb82513cb29b68607cc7ea46e58bc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/trim.go @@ -0,0 +1,172 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import "cmd/internal/src" + +// trim removes blocks with no code in them. +// These blocks were inserted to remove critical edges. +func trim(f *Func) { + n := 0 + for _, b := range f.Blocks { + if !trimmableBlock(b) { + f.Blocks[n] = b + n++ + continue + } + + bPos := b.Pos + bIsStmt := bPos.IsStmt() == src.PosIsStmt + + // Splice b out of the graph. NOTE: `mergePhi` depends on the + // order, in which the predecessors edges are merged here. + p, i := b.Preds[0].b, b.Preds[0].i + s, j := b.Succs[0].b, b.Succs[0].i + ns := len(s.Preds) + p.Succs[i] = Edge{s, j} + s.Preds[j] = Edge{p, i} + + for _, e := range b.Preds[1:] { + p, i := e.b, e.i + p.Succs[i] = Edge{s, len(s.Preds)} + s.Preds = append(s.Preds, Edge{p, i}) + } + + // Attempt to preserve a statement boundary + if bIsStmt { + sawStmt := false + for _, v := range s.Values { + if isPoorStatementOp(v.Op) { + continue + } + if v.Pos.SameFileAndLine(bPos) { + v.Pos = v.Pos.WithIsStmt() + } + sawStmt = true + break + } + if !sawStmt && s.Pos.SameFileAndLine(bPos) { + s.Pos = s.Pos.WithIsStmt() + } + } + // If `s` had more than one predecessor, update its phi-ops to + // account for the merge. + if ns > 1 { + for _, v := range s.Values { + if v.Op == OpPhi { + mergePhi(v, j, b) + } + + } + // Remove the phi-ops from `b` if they were merged into the + // phi-ops of `s`. + k := 0 + for _, v := range b.Values { + if v.Op == OpPhi { + if v.Uses == 0 { + v.resetArgs() + continue + } + // Pad the arguments of the remaining phi-ops so + // they match the new predecessor count of `s`. + // Since s did not have a Phi op corresponding to + // the phi op in b, the other edges coming into s + // must be loopback edges from s, so v is the right + // argument to v! + args := make([]*Value, len(v.Args)) + copy(args, v.Args) + v.resetArgs() + for x := 0; x < j; x++ { + v.AddArg(v) + } + v.AddArg(args[0]) + for x := j + 1; x < ns; x++ { + v.AddArg(v) + } + for _, a := range args[1:] { + v.AddArg(a) + } + } + b.Values[k] = v + k++ + } + b.Values = b.Values[:k] + } + + // Merge the blocks' values. + for _, v := range b.Values { + v.Block = s + } + k := len(b.Values) + m := len(s.Values) + for i := 0; i < k; i++ { + s.Values = append(s.Values, nil) + } + copy(s.Values[k:], s.Values[:m]) + copy(s.Values, b.Values) + } + if n < len(f.Blocks) { + f.invalidateCFG() + tail := f.Blocks[n:] + for i := range tail { + tail[i] = nil + } + f.Blocks = f.Blocks[:n] + } +} + +// emptyBlock reports whether the block does not contain actual +// instructions. +func emptyBlock(b *Block) bool { + for _, v := range b.Values { + if v.Op != OpPhi { + return false + } + } + return true +} + +// trimmableBlock reports whether the block can be trimmed from the CFG, +// subject to the following criteria: +// - it should not be the first block. +// - it should be BlockPlain. +// - it should not loop back to itself. +// - it either is the single predecessor of the successor block or +// contains no actual instructions. +func trimmableBlock(b *Block) bool { + if b.Kind != BlockPlain || b == b.Func.Entry { + return false + } + s := b.Succs[0].b + return s != b && (len(s.Preds) == 1 || emptyBlock(b)) +} + +// mergePhi adjusts the number of `v`s arguments to account for merge +// of `b`, which was `i`th predecessor of the `v`s block. +func mergePhi(v *Value, i int, b *Block) { + u := v.Args[i] + if u.Block == b { + if u.Op != OpPhi { + b.Func.Fatalf("value %s is not a phi operation", u.LongString()) + } + // If the original block contained u = φ(u0, u1, ..., un) and + // the current phi is + // v = φ(v0, v1, ..., u, ..., vk) + // then the merged phi is + // v = φ(v0, v1, ..., u0, ..., vk, u1, ..., un) + v.SetArg(i, u.Args[0]) + v.AddArgs(u.Args[1:]...) + } else { + // If the original block contained u = φ(u0, u1, ..., un) and + // the current phi is + // v = φ(v0, v1, ..., vi, ..., vk) + // i.e. it does not use a value from the predecessor block, + // then the merged phi is + // v = φ(v0, v1, ..., vk, vi, vi, ...) + for j := 1; j < len(b.Preds); j++ { + v.AddArg(v.Args[i]) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/tuple.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/tuple.go new file mode 100644 index 0000000000000000000000000000000000000000..289df40431a7a7b4ce2cb3083d7e24332d45af07 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/tuple.go @@ -0,0 +1,71 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// tightenTupleSelectors ensures that tuple selectors (Select0, Select1, +// and SelectN ops) are in the same block as their tuple generator. The +// function also ensures that there are no duplicate tuple selectors. +// These properties are expected by the scheduler but may not have +// been maintained by the optimization pipeline up to this point. +// +// See issues 16741 and 39472. +func tightenTupleSelectors(f *Func) { + selectors := make(map[struct { + id ID + which int + }]*Value) + for _, b := range f.Blocks { + for _, selector := range b.Values { + // Key fields for de-duplication + var tuple *Value + idx := 0 + switch selector.Op { + default: + continue + case OpSelect1: + idx = 1 + fallthrough + case OpSelect0: + tuple = selector.Args[0] + if !tuple.Type.IsTuple() { + f.Fatalf("arg of tuple selector %s is not a tuple: %s", selector.String(), tuple.LongString()) + } + case OpSelectN: + tuple = selector.Args[0] + idx = int(selector.AuxInt) + if !tuple.Type.IsResults() { + f.Fatalf("arg of result selector %s is not a results: %s", selector.String(), tuple.LongString()) + } + } + + // If there is a pre-existing selector in the target block then + // use that. Do this even if the selector is already in the + // target block to avoid duplicate tuple selectors. + key := struct { + id ID + which int + }{tuple.ID, idx} + if t := selectors[key]; t != nil { + if selector != t { + selector.copyOf(t) + } + continue + } + + // If the selector is in the wrong block copy it into the target + // block. + if selector.Block != tuple.Block { + t := selector.copyInto(tuple.Block) + selector.copyOf(t) + selectors[key] = t + continue + } + + // The selector is in the target block. Add it to the map so it + // cannot be duplicated. + selectors[key] = selector + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/value.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/value.go new file mode 100644 index 0000000000000000000000000000000000000000..4eaab40354c1715850cc01d05c0e131b5b277887 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/value.go @@ -0,0 +1,620 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "cmd/internal/src" + "fmt" + "math" + "sort" + "strings" +) + +// A Value represents a value in the SSA representation of the program. +// The ID and Type fields must not be modified. The remainder may be modified +// if they preserve the value of the Value (e.g. changing a (mul 2 x) to an (add x x)). +type Value struct { + // A unique identifier for the value. For performance we allocate these IDs + // densely starting at 1. There is no guarantee that there won't be occasional holes, though. + ID ID + + // The operation that computes this value. See op.go. + Op Op + + // The type of this value. Normally this will be a Go type, but there + // are a few other pseudo-types, see ../types/type.go. + Type *types.Type + + // Auxiliary info for this value. The type of this information depends on the opcode and type. + // AuxInt is used for integer values, Aux is used for other values. + // Floats are stored in AuxInt using math.Float64bits(f). + // Unused portions of AuxInt are filled by sign-extending the used portion, + // even if the represented value is unsigned. + // Users of AuxInt which interpret AuxInt as unsigned (e.g. shifts) must be careful. + // Use Value.AuxUnsigned to get the zero-extended value of AuxInt. + AuxInt int64 + Aux Aux + + // Arguments of this value + Args []*Value + + // Containing basic block + Block *Block + + // Source position + Pos src.XPos + + // Use count. Each appearance in Value.Args and Block.Controls counts once. + Uses int32 + + // wasm: Value stays on the WebAssembly stack. This value will not get a "register" (WebAssembly variable) + // nor a slot on Go stack, and the generation of this value is delayed to its use time. + OnWasmStack bool + + // Is this value in the per-function constant cache? If so, remove from cache before changing it or recycling it. + InCache bool + + // Storage for the first three args + argstorage [3]*Value +} + +// Examples: +// Opcode aux args +// OpAdd nil 2 +// OpConst string 0 string constant +// OpConst int64 0 int64 constant +// OpAddcq int64 1 amd64 op: v = arg[0] + constant + +// short form print. Just v#. +func (v *Value) String() string { + if v == nil { + return "nil" // should never happen, but not panicking helps with debugging + } + return fmt.Sprintf("v%d", v.ID) +} + +func (v *Value) AuxInt8() int8 { + if opcodeTable[v.Op].auxType != auxInt8 && opcodeTable[v.Op].auxType != auxNameOffsetInt8 { + v.Fatalf("op %s doesn't have an int8 aux field", v.Op) + } + return int8(v.AuxInt) +} + +func (v *Value) AuxUInt8() uint8 { + if opcodeTable[v.Op].auxType != auxUInt8 { + v.Fatalf("op %s doesn't have a uint8 aux field", v.Op) + } + return uint8(v.AuxInt) +} + +func (v *Value) AuxInt16() int16 { + if opcodeTable[v.Op].auxType != auxInt16 { + v.Fatalf("op %s doesn't have an int16 aux field", v.Op) + } + return int16(v.AuxInt) +} + +func (v *Value) AuxInt32() int32 { + if opcodeTable[v.Op].auxType != auxInt32 { + v.Fatalf("op %s doesn't have an int32 aux field", v.Op) + } + return int32(v.AuxInt) +} + +// AuxUnsigned returns v.AuxInt as an unsigned value for OpConst*. +// v.AuxInt is always sign-extended to 64 bits, even if the +// represented value is unsigned. This undoes that sign extension. +func (v *Value) AuxUnsigned() uint64 { + c := v.AuxInt + switch v.Op { + case OpConst64: + return uint64(c) + case OpConst32: + return uint64(uint32(c)) + case OpConst16: + return uint64(uint16(c)) + case OpConst8: + return uint64(uint8(c)) + } + v.Fatalf("op %s isn't OpConst*", v.Op) + return 0 +} + +func (v *Value) AuxFloat() float64 { + if opcodeTable[v.Op].auxType != auxFloat32 && opcodeTable[v.Op].auxType != auxFloat64 { + v.Fatalf("op %s doesn't have a float aux field", v.Op) + } + return math.Float64frombits(uint64(v.AuxInt)) +} +func (v *Value) AuxValAndOff() ValAndOff { + if opcodeTable[v.Op].auxType != auxSymValAndOff { + v.Fatalf("op %s doesn't have a ValAndOff aux field", v.Op) + } + return ValAndOff(v.AuxInt) +} + +func (v *Value) AuxArm64BitField() arm64BitField { + if opcodeTable[v.Op].auxType != auxARM64BitField { + v.Fatalf("op %s doesn't have a ValAndOff aux field", v.Op) + } + return arm64BitField(v.AuxInt) +} + +// long form print. v# = opcode [aux] args [: reg] (names) +func (v *Value) LongString() string { + if v == nil { + return "" + } + s := fmt.Sprintf("v%d = %s", v.ID, v.Op) + s += " <" + v.Type.String() + ">" + s += v.auxString() + for _, a := range v.Args { + s += fmt.Sprintf(" %v", a) + } + if v.Block == nil { + return s + } + r := v.Block.Func.RegAlloc + if int(v.ID) < len(r) && r[v.ID] != nil { + s += " : " + r[v.ID].String() + } + if reg := v.Block.Func.tempRegs[v.ID]; reg != nil { + s += " tmp=" + reg.String() + } + var names []string + for name, values := range v.Block.Func.NamedValues { + for _, value := range values { + if value == v { + names = append(names, name.String()) + break // drop duplicates. + } + } + } + if len(names) != 0 { + sort.Strings(names) // Otherwise a source of variation in debugging output. + s += " (" + strings.Join(names, ", ") + ")" + } + return s +} + +func (v *Value) auxString() string { + switch opcodeTable[v.Op].auxType { + case auxBool: + if v.AuxInt == 0 { + return " [false]" + } else { + return " [true]" + } + case auxInt8: + return fmt.Sprintf(" [%d]", v.AuxInt8()) + case auxInt16: + return fmt.Sprintf(" [%d]", v.AuxInt16()) + case auxInt32: + return fmt.Sprintf(" [%d]", v.AuxInt32()) + case auxInt64, auxInt128: + return fmt.Sprintf(" [%d]", v.AuxInt) + case auxUInt8: + return fmt.Sprintf(" [%d]", v.AuxUInt8()) + case auxARM64BitField: + lsb := v.AuxArm64BitField().getARM64BFlsb() + width := v.AuxArm64BitField().getARM64BFwidth() + return fmt.Sprintf(" [lsb=%d,width=%d]", lsb, width) + case auxFloat32, auxFloat64: + return fmt.Sprintf(" [%g]", v.AuxFloat()) + case auxString: + return fmt.Sprintf(" {%q}", v.Aux) + case auxSym, auxCall, auxTyp: + if v.Aux != nil { + return fmt.Sprintf(" {%v}", v.Aux) + } + return "" + case auxSymOff, auxCallOff, auxTypSize, auxNameOffsetInt8: + s := "" + if v.Aux != nil { + s = fmt.Sprintf(" {%v}", v.Aux) + } + if v.AuxInt != 0 || opcodeTable[v.Op].auxType == auxNameOffsetInt8 { + s += fmt.Sprintf(" [%v]", v.AuxInt) + } + return s + case auxSymValAndOff: + s := "" + if v.Aux != nil { + s = fmt.Sprintf(" {%v}", v.Aux) + } + return s + fmt.Sprintf(" [%s]", v.AuxValAndOff()) + case auxCCop: + return fmt.Sprintf(" {%s}", Op(v.AuxInt)) + case auxS390XCCMask, auxS390XRotateParams: + return fmt.Sprintf(" {%v}", v.Aux) + case auxFlagConstant: + return fmt.Sprintf("[%s]", flagConstant(v.AuxInt)) + case auxNone: + return "" + default: + // If you see this, add a case above instead. + return fmt.Sprintf("[auxtype=%d AuxInt=%d Aux=%v]", opcodeTable[v.Op].auxType, v.AuxInt, v.Aux) + } +} + +// If/when midstack inlining is enabled (-l=4), the compiler gets both larger and slower. +// Not-inlining this method is a help (*Value.reset and *Block.NewValue0 are similar). +// +//go:noinline +func (v *Value) AddArg(w *Value) { + if v.Args == nil { + v.resetArgs() // use argstorage + } + v.Args = append(v.Args, w) + w.Uses++ +} + +//go:noinline +func (v *Value) AddArg2(w1, w2 *Value) { + if v.Args == nil { + v.resetArgs() // use argstorage + } + v.Args = append(v.Args, w1, w2) + w1.Uses++ + w2.Uses++ +} + +//go:noinline +func (v *Value) AddArg3(w1, w2, w3 *Value) { + if v.Args == nil { + v.resetArgs() // use argstorage + } + v.Args = append(v.Args, w1, w2, w3) + w1.Uses++ + w2.Uses++ + w3.Uses++ +} + +//go:noinline +func (v *Value) AddArg4(w1, w2, w3, w4 *Value) { + v.Args = append(v.Args, w1, w2, w3, w4) + w1.Uses++ + w2.Uses++ + w3.Uses++ + w4.Uses++ +} + +//go:noinline +func (v *Value) AddArg5(w1, w2, w3, w4, w5 *Value) { + v.Args = append(v.Args, w1, w2, w3, w4, w5) + w1.Uses++ + w2.Uses++ + w3.Uses++ + w4.Uses++ + w5.Uses++ +} + +//go:noinline +func (v *Value) AddArg6(w1, w2, w3, w4, w5, w6 *Value) { + v.Args = append(v.Args, w1, w2, w3, w4, w5, w6) + w1.Uses++ + w2.Uses++ + w3.Uses++ + w4.Uses++ + w5.Uses++ + w6.Uses++ +} + +func (v *Value) AddArgs(a ...*Value) { + if v.Args == nil { + v.resetArgs() // use argstorage + } + v.Args = append(v.Args, a...) + for _, x := range a { + x.Uses++ + } +} +func (v *Value) SetArg(i int, w *Value) { + v.Args[i].Uses-- + v.Args[i] = w + w.Uses++ +} +func (v *Value) SetArgs1(a *Value) { + v.resetArgs() + v.AddArg(a) +} +func (v *Value) SetArgs2(a, b *Value) { + v.resetArgs() + v.AddArg(a) + v.AddArg(b) +} +func (v *Value) SetArgs3(a, b, c *Value) { + v.resetArgs() + v.AddArg(a) + v.AddArg(b) + v.AddArg(c) +} + +func (v *Value) resetArgs() { + for _, a := range v.Args { + a.Uses-- + } + v.argstorage[0] = nil + v.argstorage[1] = nil + v.argstorage[2] = nil + v.Args = v.argstorage[:0] +} + +// reset is called from most rewrite rules. +// Allowing it to be inlined increases the size +// of cmd/compile by almost 10%, and slows it down. +// +//go:noinline +func (v *Value) reset(op Op) { + if v.InCache { + v.Block.Func.unCache(v) + } + v.Op = op + v.resetArgs() + v.AuxInt = 0 + v.Aux = nil +} + +// invalidateRecursively marks a value as invalid (unused) +// and after decrementing reference counts on its Args, +// also recursively invalidates any of those whose use +// count goes to zero. It returns whether any of the +// invalidated values was marked with IsStmt. +// +// BEWARE of doing this *before* you've applied intended +// updates to SSA. +func (v *Value) invalidateRecursively() bool { + lostStmt := v.Pos.IsStmt() == src.PosIsStmt + if v.InCache { + v.Block.Func.unCache(v) + } + v.Op = OpInvalid + + for _, a := range v.Args { + a.Uses-- + if a.Uses == 0 { + lost := a.invalidateRecursively() + lostStmt = lost || lostStmt + } + } + + v.argstorage[0] = nil + v.argstorage[1] = nil + v.argstorage[2] = nil + v.Args = v.argstorage[:0] + + v.AuxInt = 0 + v.Aux = nil + return lostStmt +} + +// copyOf is called from rewrite rules. +// It modifies v to be (Copy a). +// +//go:noinline +func (v *Value) copyOf(a *Value) { + if v == a { + return + } + if v.InCache { + v.Block.Func.unCache(v) + } + v.Op = OpCopy + v.resetArgs() + v.AddArg(a) + v.AuxInt = 0 + v.Aux = nil + v.Type = a.Type +} + +// copyInto makes a new value identical to v and adds it to the end of b. +// unlike copyIntoWithXPos this does not check for v.Pos being a statement. +func (v *Value) copyInto(b *Block) *Value { + c := b.NewValue0(v.Pos.WithNotStmt(), v.Op, v.Type) // Lose the position, this causes line number churn otherwise. + c.Aux = v.Aux + c.AuxInt = v.AuxInt + c.AddArgs(v.Args...) + for _, a := range v.Args { + if a.Type.IsMemory() { + v.Fatalf("can't move a value with a memory arg %s", v.LongString()) + } + } + return c +} + +// copyIntoWithXPos makes a new value identical to v and adds it to the end of b. +// The supplied position is used as the position of the new value. +// Because this is used for rematerialization, check for case that (rematerialized) +// input to value with position 'pos' carried a statement mark, and that the supplied +// position (of the instruction using the rematerialized value) is not marked, and +// preserve that mark if its line matches the supplied position. +func (v *Value) copyIntoWithXPos(b *Block, pos src.XPos) *Value { + if v.Pos.IsStmt() == src.PosIsStmt && pos.IsStmt() != src.PosIsStmt && v.Pos.SameFileAndLine(pos) { + pos = pos.WithIsStmt() + } + c := b.NewValue0(pos, v.Op, v.Type) + c.Aux = v.Aux + c.AuxInt = v.AuxInt + c.AddArgs(v.Args...) + for _, a := range v.Args { + if a.Type.IsMemory() { + v.Fatalf("can't move a value with a memory arg %s", v.LongString()) + } + } + return c +} + +func (v *Value) Logf(msg string, args ...interface{}) { v.Block.Logf(msg, args...) } +func (v *Value) Log() bool { return v.Block.Log() } +func (v *Value) Fatalf(msg string, args ...interface{}) { + v.Block.Func.fe.Fatalf(v.Pos, msg, args...) +} + +// isGenericIntConst reports whether v is a generic integer constant. +func (v *Value) isGenericIntConst() bool { + return v != nil && (v.Op == OpConst64 || v.Op == OpConst32 || v.Op == OpConst16 || v.Op == OpConst8) +} + +// ResultReg returns the result register assigned to v, in cmd/internal/obj/$ARCH numbering. +// It is similar to Reg and Reg0, except that it is usable interchangeably for all Value Ops. +// If you know v.Op, using Reg or Reg0 (as appropriate) will be more efficient. +func (v *Value) ResultReg() int16 { + reg := v.Block.Func.RegAlloc[v.ID] + if reg == nil { + v.Fatalf("nil reg for value: %s\n%s\n", v.LongString(), v.Block.Func) + } + if pair, ok := reg.(LocPair); ok { + reg = pair[0] + } + if reg == nil { + v.Fatalf("nil reg0 for value: %s\n%s\n", v.LongString(), v.Block.Func) + } + return reg.(*Register).objNum +} + +// Reg returns the register assigned to v, in cmd/internal/obj/$ARCH numbering. +func (v *Value) Reg() int16 { + reg := v.Block.Func.RegAlloc[v.ID] + if reg == nil { + v.Fatalf("nil register for value: %s\n%s\n", v.LongString(), v.Block.Func) + } + return reg.(*Register).objNum +} + +// Reg0 returns the register assigned to the first output of v, in cmd/internal/obj/$ARCH numbering. +func (v *Value) Reg0() int16 { + reg := v.Block.Func.RegAlloc[v.ID].(LocPair)[0] + if reg == nil { + v.Fatalf("nil first register for value: %s\n%s\n", v.LongString(), v.Block.Func) + } + return reg.(*Register).objNum +} + +// Reg1 returns the register assigned to the second output of v, in cmd/internal/obj/$ARCH numbering. +func (v *Value) Reg1() int16 { + reg := v.Block.Func.RegAlloc[v.ID].(LocPair)[1] + if reg == nil { + v.Fatalf("nil second register for value: %s\n%s\n", v.LongString(), v.Block.Func) + } + return reg.(*Register).objNum +} + +// RegTmp returns the temporary register assigned to v, in cmd/internal/obj/$ARCH numbering. +func (v *Value) RegTmp() int16 { + reg := v.Block.Func.tempRegs[v.ID] + if reg == nil { + v.Fatalf("nil tmp register for value: %s\n%s\n", v.LongString(), v.Block.Func) + } + return reg.objNum +} + +func (v *Value) RegName() string { + reg := v.Block.Func.RegAlloc[v.ID] + if reg == nil { + v.Fatalf("nil register for value: %s\n%s\n", v.LongString(), v.Block.Func) + } + return reg.(*Register).name +} + +// MemoryArg returns the memory argument for the Value. +// The returned value, if non-nil, will be memory-typed (or a tuple with a memory-typed second part). +// Otherwise, nil is returned. +func (v *Value) MemoryArg() *Value { + if v.Op == OpPhi { + v.Fatalf("MemoryArg on Phi") + } + na := len(v.Args) + if na == 0 { + return nil + } + if m := v.Args[na-1]; m.Type.IsMemory() { + return m + } + return nil +} + +// LackingPos indicates whether v is a value that is unlikely to have a correct +// position assigned to it. Ignoring such values leads to more user-friendly positions +// assigned to nearby values and the blocks containing them. +func (v *Value) LackingPos() bool { + // The exact definition of LackingPos is somewhat heuristically defined and may change + // in the future, for example if some of these operations are generated more carefully + // with respect to their source position. + return v.Op == OpVarDef || v.Op == OpVarLive || v.Op == OpPhi || + (v.Op == OpFwdRef || v.Op == OpCopy) && v.Type == types.TypeMem +} + +// removeable reports whether the value v can be removed from the SSA graph entirely +// if its use count drops to 0. +func (v *Value) removeable() bool { + if v.Type.IsVoid() { + // Void ops (inline marks), must stay. + return false + } + if opcodeTable[v.Op].nilCheck { + // Nil pointer checks must stay. + return false + } + if v.Type.IsMemory() { + // We don't need to preserve all memory ops, but we do need + // to keep calls at least (because they might have + // synchronization operations we can't see). + return false + } + if v.Op.HasSideEffects() { + // These are mostly synchronization operations. + return false + } + return true +} + +// AutoVar returns a *Name and int64 representing the auto variable and offset within it +// where v should be spilled. +func AutoVar(v *Value) (*ir.Name, int64) { + if loc, ok := v.Block.Func.RegAlloc[v.ID].(LocalSlot); ok { + if v.Type.Size() > loc.Type.Size() { + v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type) + } + return loc.N, loc.Off + } + // Assume it is a register, return its spill slot, which needs to be live + nameOff := v.Aux.(*AuxNameOffset) + return nameOff.Name, nameOff.Offset +} + +// CanSSA reports whether values of type t can be represented as a Value. +func CanSSA(t *types.Type) bool { + types.CalcSize(t) + if t.Size() > int64(4*types.PtrSize) { + // 4*Widthptr is an arbitrary constant. We want it + // to be at least 3*Widthptr so slices can be registerized. + // Too big and we'll introduce too much register pressure. + return false + } + switch t.Kind() { + case types.TARRAY: + // We can't do larger arrays because dynamic indexing is + // not supported on SSA variables. + // TODO: allow if all indexes are constant. + if t.NumElem() <= 1 { + return CanSSA(t.Elem()) + } + return false + case types.TSTRUCT: + if t.NumFields() > MaxStruct { + return false + } + for _, t1 := range t.Fields() { + if !CanSSA(t1.Type) { + return false + } + } + return true + default: + return true + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/writebarrier.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/writebarrier.go new file mode 100644 index 0000000000000000000000000000000000000000..1caccb7c18d3c43ca24ff9814ef7b10d0660826d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/writebarrier.go @@ -0,0 +1,804 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/reflectdata" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/internal/src" + "fmt" + "internal/buildcfg" +) + +// A ZeroRegion records parts of an object which are known to be zero. +// A ZeroRegion only applies to a single memory state. +// Each bit in mask is set if the corresponding pointer-sized word of +// the base object is known to be zero. +// In other words, if mask & (1< 64*ptrSize { + // memory range goes off end of tracked offsets + return true + } + z := zeroes[mem.ID] + if ptr != z.base { + // This isn't the object we know about at this memory state. + return true + } + // Mask of bits we're asking about + m := (uint64(1)<<(size/ptrSize) - 1) << (off / ptrSize) + + if z.mask&m == m { + // All locations are known to be zero, so no heap pointers. + return false + } + return true +} + +// needwb reports whether we need write barrier for store op v. +// v must be Store/Move/Zero. +// zeroes provides known zero information (keyed by ID of memory-type values). +func needwb(v *Value, zeroes map[ID]ZeroRegion) bool { + t, ok := v.Aux.(*types.Type) + if !ok { + v.Fatalf("store aux is not a type: %s", v.LongString()) + } + if !t.HasPointers() { + return false + } + dst := v.Args[0] + if IsStackAddr(dst) { + return false // writes into the stack don't need write barrier + } + // If we're writing to a place that might have heap pointers, we need + // the write barrier. + if mightContainHeapPointer(dst, t.Size(), v.MemoryArg(), zeroes) { + return true + } + // Lastly, check if the values we're writing might be heap pointers. + // If they aren't, we don't need a write barrier. + switch v.Op { + case OpStore: + if !mightBeHeapPointer(v.Args[1]) { + return false + } + case OpZero: + return false // nil is not a heap pointer + case OpMove: + if !mightContainHeapPointer(v.Args[1], t.Size(), v.Args[2], zeroes) { + return false + } + default: + v.Fatalf("store op unknown: %s", v.LongString()) + } + return true +} + +// needWBsrc reports whether GC needs to see v when it is the source of a store. +func needWBsrc(v *Value) bool { + return !IsGlobalAddr(v) +} + +// needWBdst reports whether GC needs to see what used to be in *ptr when ptr is +// the target of a pointer store. +func needWBdst(ptr, mem *Value, zeroes map[ID]ZeroRegion) bool { + // Detect storing to zeroed memory. + var off int64 + for ptr.Op == OpOffPtr { + off += ptr.AuxInt + ptr = ptr.Args[0] + } + ptrSize := ptr.Block.Func.Config.PtrSize + if off%ptrSize != 0 { + return true // see issue 61187 + } + if off < 0 || off >= 64*ptrSize { + // write goes off end of tracked offsets + return true + } + z := zeroes[mem.ID] + if ptr != z.base { + return true + } + // If destination is known to be zeroed, we don't need the write barrier + // to record the old value in *ptr. + return z.mask>>uint(off/ptrSize)&1 == 0 +} + +// writebarrier pass inserts write barriers for store ops (Store, Move, Zero) +// when necessary (the condition above). It rewrites store ops to branches +// and runtime calls, like +// +// if writeBarrier.enabled { +// buf := gcWriteBarrier2() // Not a regular Go call +// buf[0] = val +// buf[1] = *ptr +// } +// *ptr = val +// +// A sequence of WB stores for many pointer fields of a single type will +// be emitted together, with a single branch. +func writebarrier(f *Func) { + if !f.fe.UseWriteBarrier() { + return + } + + // Number of write buffer entries we can request at once. + // Must match runtime/mwbbuf.go:wbMaxEntriesPerCall. + // It must also match the number of instances of runtime.gcWriteBarrier{X}. + const maxEntries = 8 + + var sb, sp, wbaddr, const0 *Value + var cgoCheckPtrWrite, cgoCheckMemmove *obj.LSym + var wbZero, wbMove *obj.LSym + var stores, after []*Value + var sset, sset2 *sparseSet + var storeNumber []int32 + + // Compute map from a value to the SelectN [1] value that uses it. + select1 := f.Cache.allocValueSlice(f.NumValues()) + defer func() { f.Cache.freeValueSlice(select1) }() + for _, b := range f.Blocks { + for _, v := range b.Values { + if v.Op != OpSelectN { + continue + } + if v.AuxInt != 1 { + continue + } + select1[v.Args[0].ID] = v + } + } + + zeroes := f.computeZeroMap(select1) + for _, b := range f.Blocks { // range loop is safe since the blocks we added contain no stores to expand + // first, identify all the stores that need to insert a write barrier. + // mark them with WB ops temporarily. record presence of WB ops. + nWBops := 0 // count of temporarily created WB ops remaining to be rewritten in the current block + for _, v := range b.Values { + switch v.Op { + case OpStore, OpMove, OpZero: + if needwb(v, zeroes) { + switch v.Op { + case OpStore: + v.Op = OpStoreWB + case OpMove: + v.Op = OpMoveWB + case OpZero: + v.Op = OpZeroWB + } + nWBops++ + } + } + } + if nWBops == 0 { + continue + } + + if wbaddr == nil { + // lazily initialize global values for write barrier test and calls + // find SB and SP values in entry block + initpos := f.Entry.Pos + sp, sb = f.spSb() + wbsym := f.fe.Syslook("writeBarrier") + wbaddr = f.Entry.NewValue1A(initpos, OpAddr, f.Config.Types.UInt32Ptr, wbsym, sb) + wbZero = f.fe.Syslook("wbZero") + wbMove = f.fe.Syslook("wbMove") + if buildcfg.Experiment.CgoCheck2 { + cgoCheckPtrWrite = f.fe.Syslook("cgoCheckPtrWrite") + cgoCheckMemmove = f.fe.Syslook("cgoCheckMemmove") + } + const0 = f.ConstInt32(f.Config.Types.UInt32, 0) + + // allocate auxiliary data structures for computing store order + sset = f.newSparseSet(f.NumValues()) + defer f.retSparseSet(sset) + sset2 = f.newSparseSet(f.NumValues()) + defer f.retSparseSet(sset2) + storeNumber = f.Cache.allocInt32Slice(f.NumValues()) + defer f.Cache.freeInt32Slice(storeNumber) + } + + // order values in store order + b.Values = storeOrder(b.Values, sset, storeNumber) + again: + // find the start and end of the last contiguous WB store sequence. + // a branch will be inserted there. values after it will be moved + // to a new block. + var last *Value + var start, end int + var nonPtrStores int + values := b.Values + FindSeq: + for i := len(values) - 1; i >= 0; i-- { + w := values[i] + switch w.Op { + case OpStoreWB, OpMoveWB, OpZeroWB: + start = i + if last == nil { + last = w + end = i + 1 + } + nonPtrStores = 0 + case OpVarDef, OpVarLive: + continue + case OpStore: + if last == nil { + continue + } + nonPtrStores++ + if nonPtrStores > 2 { + break FindSeq + } + default: + if last == nil { + continue + } + break FindSeq + } + } + stores = append(stores[:0], b.Values[start:end]...) // copy to avoid aliasing + after = append(after[:0], b.Values[end:]...) + b.Values = b.Values[:start] + + // find the memory before the WB stores + mem := stores[0].MemoryArg() + pos := stores[0].Pos + + // If the source of a MoveWB is volatile (will be clobbered by a + // function call), we need to copy it to a temporary location, as + // marshaling the args of wbMove might clobber the value we're + // trying to move. + // Look for volatile source, copy it to temporary before we check + // the write barrier flag. + // It is unlikely to have more than one of them. Just do a linear + // search instead of using a map. + // See issue 15854. + type volatileCopy struct { + src *Value // address of original volatile value + tmp *Value // address of temporary we've copied the volatile value into + } + var volatiles []volatileCopy + + if !(f.ABIDefault == f.ABI1 && len(f.Config.intParamRegs) >= 3) { + // We don't need to do this if the calls we're going to do take + // all their arguments in registers. + // 3 is the magic number because it covers wbZero, wbMove, cgoCheckMemmove. + copyLoop: + for _, w := range stores { + if w.Op == OpMoveWB { + val := w.Args[1] + if isVolatile(val) { + for _, c := range volatiles { + if val == c.src { + continue copyLoop // already copied + } + } + + t := val.Type.Elem() + tmp := f.NewLocal(w.Pos, t) + mem = b.NewValue1A(w.Pos, OpVarDef, types.TypeMem, tmp, mem) + tmpaddr := b.NewValue2A(w.Pos, OpLocalAddr, t.PtrTo(), tmp, sp, mem) + siz := t.Size() + mem = b.NewValue3I(w.Pos, OpMove, types.TypeMem, siz, tmpaddr, val, mem) + mem.Aux = t + volatiles = append(volatiles, volatileCopy{val, tmpaddr}) + } + } + } + } + + // Build branch point. + bThen := f.NewBlock(BlockPlain) + bEnd := f.NewBlock(b.Kind) + bThen.Pos = pos + bEnd.Pos = b.Pos + b.Pos = pos + + // Set up control flow for end block. + bEnd.CopyControls(b) + bEnd.Likely = b.Likely + for _, e := range b.Succs { + bEnd.Succs = append(bEnd.Succs, e) + e.b.Preds[e.i].b = bEnd + } + + // set up control flow for write barrier test + // load word, test word, avoiding partial register write from load byte. + cfgtypes := &f.Config.Types + flag := b.NewValue2(pos, OpLoad, cfgtypes.UInt32, wbaddr, mem) + flag = b.NewValue2(pos, OpNeq32, cfgtypes.Bool, flag, const0) + b.Kind = BlockIf + b.SetControl(flag) + b.Likely = BranchUnlikely + b.Succs = b.Succs[:0] + b.AddEdgeTo(bThen) + b.AddEdgeTo(bEnd) + bThen.AddEdgeTo(bEnd) + + // For each write barrier store, append write barrier code to bThen. + memThen := mem + var curCall *Value + var curPtr *Value + addEntry := func(pos src.XPos, v *Value) { + if curCall == nil || curCall.AuxInt == maxEntries { + t := types.NewTuple(types.Types[types.TUINTPTR].PtrTo(), types.TypeMem) + curCall = bThen.NewValue1(pos, OpWB, t, memThen) + curPtr = bThen.NewValue1(pos, OpSelect0, types.Types[types.TUINTPTR].PtrTo(), curCall) + memThen = bThen.NewValue1(pos, OpSelect1, types.TypeMem, curCall) + } + // Store value in write buffer + num := curCall.AuxInt + curCall.AuxInt = num + 1 + wbuf := bThen.NewValue1I(pos, OpOffPtr, types.Types[types.TUINTPTR].PtrTo(), num*f.Config.PtrSize, curPtr) + memThen = bThen.NewValue3A(pos, OpStore, types.TypeMem, types.Types[types.TUINTPTR], wbuf, v, memThen) + } + + // Note: we can issue the write barrier code in any order. In particular, + // it doesn't matter if they are in a different order *even if* they end + // up referring to overlapping memory regions. For instance if an OpStore + // stores to a location that is later read by an OpMove. In all cases + // any pointers we must get into the write barrier buffer still make it, + // possibly in a different order and possibly a different (but definitely + // more than 0) number of times. + // In light of that, we process all the OpStoreWBs first. This minimizes + // the amount of spill/restore code we need around the Zero/Move calls. + + // srcs contains the value IDs of pointer values we've put in the write barrier buffer. + srcs := sset + srcs.clear() + // dsts contains the value IDs of locations which we've read a pointer out of + // and put the result in the write barrier buffer. + dsts := sset2 + dsts.clear() + + for _, w := range stores { + if w.Op != OpStoreWB { + continue + } + pos := w.Pos + ptr := w.Args[0] + val := w.Args[1] + if !srcs.contains(val.ID) && needWBsrc(val) { + srcs.add(val.ID) + addEntry(pos, val) + } + if !dsts.contains(ptr.ID) && needWBdst(ptr, w.Args[2], zeroes) { + dsts.add(ptr.ID) + // Load old value from store target. + // Note: This turns bad pointer writes into bad + // pointer reads, which could be confusing. We could avoid + // reading from obviously bad pointers, which would + // take care of the vast majority of these. We could + // patch this up in the signal handler, or use XCHG to + // combine the read and the write. + oldVal := bThen.NewValue2(pos, OpLoad, types.Types[types.TUINTPTR], ptr, memThen) + // Save old value to write buffer. + addEntry(pos, oldVal) + } + f.fe.Func().SetWBPos(pos) + nWBops-- + } + + for _, w := range stores { + pos := w.Pos + switch w.Op { + case OpZeroWB: + dst := w.Args[0] + typ := reflectdata.TypeLinksym(w.Aux.(*types.Type)) + // zeroWB(&typ, dst) + taddr := b.NewValue1A(pos, OpAddr, b.Func.Config.Types.Uintptr, typ, sb) + memThen = wbcall(pos, bThen, wbZero, sp, memThen, taddr, dst) + f.fe.Func().SetWBPos(pos) + nWBops-- + case OpMoveWB: + dst := w.Args[0] + src := w.Args[1] + if isVolatile(src) { + for _, c := range volatiles { + if src == c.src { + src = c.tmp + break + } + } + } + typ := reflectdata.TypeLinksym(w.Aux.(*types.Type)) + // moveWB(&typ, dst, src) + taddr := b.NewValue1A(pos, OpAddr, b.Func.Config.Types.Uintptr, typ, sb) + memThen = wbcall(pos, bThen, wbMove, sp, memThen, taddr, dst, src) + f.fe.Func().SetWBPos(pos) + nWBops-- + } + } + + // merge memory + mem = bEnd.NewValue2(pos, OpPhi, types.TypeMem, mem, memThen) + + // Do raw stores after merge point. + for _, w := range stores { + pos := w.Pos + switch w.Op { + case OpStoreWB: + ptr := w.Args[0] + val := w.Args[1] + if buildcfg.Experiment.CgoCheck2 { + // Issue cgo checking code. + mem = wbcall(pos, bEnd, cgoCheckPtrWrite, sp, mem, ptr, val) + } + mem = bEnd.NewValue3A(pos, OpStore, types.TypeMem, w.Aux, ptr, val, mem) + case OpZeroWB: + dst := w.Args[0] + mem = bEnd.NewValue2I(pos, OpZero, types.TypeMem, w.AuxInt, dst, mem) + mem.Aux = w.Aux + case OpMoveWB: + dst := w.Args[0] + src := w.Args[1] + if isVolatile(src) { + for _, c := range volatiles { + if src == c.src { + src = c.tmp + break + } + } + } + if buildcfg.Experiment.CgoCheck2 { + // Issue cgo checking code. + typ := reflectdata.TypeLinksym(w.Aux.(*types.Type)) + taddr := b.NewValue1A(pos, OpAddr, b.Func.Config.Types.Uintptr, typ, sb) + mem = wbcall(pos, bEnd, cgoCheckMemmove, sp, mem, taddr, dst, src) + } + mem = bEnd.NewValue3I(pos, OpMove, types.TypeMem, w.AuxInt, dst, src, mem) + mem.Aux = w.Aux + case OpVarDef, OpVarLive: + mem = bEnd.NewValue1A(pos, w.Op, types.TypeMem, w.Aux, mem) + case OpStore: + ptr := w.Args[0] + val := w.Args[1] + mem = bEnd.NewValue3A(pos, OpStore, types.TypeMem, w.Aux, ptr, val, mem) + } + } + + // The last store becomes the WBend marker. This marker is used by the liveness + // pass to determine what parts of the code are preemption-unsafe. + // All subsequent memory operations use this memory, so we have to sacrifice the + // previous last memory op to become this new value. + bEnd.Values = append(bEnd.Values, last) + last.Block = bEnd + last.reset(OpWBend) + last.Pos = last.Pos.WithNotStmt() + last.Type = types.TypeMem + last.AddArg(mem) + + // Free all the old stores, except last which became the WBend marker. + for _, w := range stores { + if w != last { + w.resetArgs() + } + } + for _, w := range stores { + if w != last { + f.freeValue(w) + } + } + + // put values after the store sequence into the end block + bEnd.Values = append(bEnd.Values, after...) + for _, w := range after { + w.Block = bEnd + } + + // if we have more stores in this block, do this block again + if nWBops > 0 { + goto again + } + } +} + +// computeZeroMap returns a map from an ID of a memory value to +// a set of locations that are known to be zeroed at that memory value. +func (f *Func) computeZeroMap(select1 []*Value) map[ID]ZeroRegion { + + ptrSize := f.Config.PtrSize + // Keep track of which parts of memory are known to be zero. + // This helps with removing write barriers for various initialization patterns. + // This analysis is conservative. We only keep track, for each memory state, of + // which of the first 64 words of a single object are known to be zero. + zeroes := map[ID]ZeroRegion{} + // Find new objects. + for _, b := range f.Blocks { + for _, v := range b.Values { + if mem, ok := IsNewObject(v, select1); ok { + // While compiling package runtime itself, we might see user + // calls to newobject, which will have result type + // unsafe.Pointer instead. We can't easily infer how large the + // allocated memory is, so just skip it. + if types.LocalPkg.Path == "runtime" && v.Type.IsUnsafePtr() { + continue + } + + nptr := v.Type.Elem().Size() / ptrSize + if nptr > 64 { + nptr = 64 + } + zeroes[mem.ID] = ZeroRegion{base: v, mask: 1< 64*ptrSize { + max = 64 * ptrSize + } + // Clear bits for parts that we are writing (and hence + // will no longer necessarily be zero). + for i := min; i < max; i += ptrSize { + bit := i / ptrSize + z.mask &^= 1 << uint(bit) + } + if z.mask == 0 { + // No more known zeros - don't bother keeping. + continue + } + // Save updated known zero contents for new store. + if zeroes[v.ID] != z { + zeroes[v.ID] = z + changed = true + } + } + } + if !changed { + break + } + } + if f.pass.debug > 0 { + fmt.Printf("func %s\n", f.Name) + for mem, z := range zeroes { + fmt.Printf(" memory=v%d ptr=%v zeromask=%b\n", mem, z.base, z.mask) + } + } + return zeroes +} + +// wbcall emits write barrier runtime call in b, returns memory. +func wbcall(pos src.XPos, b *Block, fn *obj.LSym, sp, mem *Value, args ...*Value) *Value { + config := b.Func.Config + typ := config.Types.Uintptr // type of all argument values + nargs := len(args) + + // TODO (register args) this is a bit of a hack. + inRegs := b.Func.ABIDefault == b.Func.ABI1 && len(config.intParamRegs) >= 3 + + if !inRegs { + // Store arguments to the appropriate stack slot. + off := config.ctxt.Arch.FixedFrameSize + for _, arg := range args { + stkaddr := b.NewValue1I(pos, OpOffPtr, typ.PtrTo(), off, sp) + mem = b.NewValue3A(pos, OpStore, types.TypeMem, typ, stkaddr, arg, mem) + off += typ.Size() + } + args = args[:0] + } + + args = append(args, mem) + + // issue call + argTypes := make([]*types.Type, nargs, 3) // at most 3 args; allows stack allocation + for i := 0; i < nargs; i++ { + argTypes[i] = typ + } + call := b.NewValue0A(pos, OpStaticCall, types.TypeResultMem, StaticAuxCall(fn, b.Func.ABIDefault.ABIAnalyzeTypes(argTypes, nil))) + call.AddArgs(args...) + call.AuxInt = int64(nargs) * typ.Size() + return b.NewValue1I(pos, OpSelectN, types.TypeMem, 0, call) +} + +// round to a multiple of r, r is a power of 2. +func round(o int64, r int64) int64 { + return (o + r - 1) &^ (r - 1) +} + +// IsStackAddr reports whether v is known to be an address of a stack slot. +func IsStackAddr(v *Value) bool { + for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy { + v = v.Args[0] + } + switch v.Op { + case OpSP, OpLocalAddr, OpSelectNAddr, OpGetCallerSP: + return true + } + return false +} + +// IsGlobalAddr reports whether v is known to be an address of a global (or nil). +func IsGlobalAddr(v *Value) bool { + for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy { + v = v.Args[0] + } + if v.Op == OpAddr && v.Args[0].Op == OpSB { + return true // address of a global + } + if v.Op == OpConstNil { + return true + } + if v.Op == OpLoad && IsReadOnlyGlobalAddr(v.Args[0]) { + return true // loading from a read-only global - the resulting address can't be a heap address. + } + return false +} + +// IsReadOnlyGlobalAddr reports whether v is known to be an address of a read-only global. +func IsReadOnlyGlobalAddr(v *Value) bool { + if v.Op == OpConstNil { + // Nil pointers are read only. See issue 33438. + return true + } + if v.Op == OpAddr && v.Aux != nil && v.Aux.(*obj.LSym).Type == objabi.SRODATA { + return true + } + return false +} + +// IsNewObject reports whether v is a pointer to a freshly allocated & zeroed object, +// if so, also returns the memory state mem at which v is zero. +func IsNewObject(v *Value, select1 []*Value) (mem *Value, ok bool) { + f := v.Block.Func + c := f.Config + if f.ABIDefault == f.ABI1 && len(c.intParamRegs) >= 1 { + if v.Op != OpSelectN || v.AuxInt != 0 { + return nil, false + } + mem = select1[v.Args[0].ID] + if mem == nil { + return nil, false + } + } else { + if v.Op != OpLoad { + return nil, false + } + mem = v.MemoryArg() + if mem.Op != OpSelectN { + return nil, false + } + if mem.Type != types.TypeMem { + return nil, false + } // assume it is the right selection if true + } + call := mem.Args[0] + if call.Op != OpStaticCall { + return nil, false + } + if !isSameCall(call.Aux, "runtime.newobject") { + return nil, false + } + if f.ABIDefault == f.ABI1 && len(c.intParamRegs) >= 1 { + if v.Args[0] == call { + return mem, true + } + return nil, false + } + if v.Args[0].Op != OpOffPtr { + return nil, false + } + if v.Args[0].Args[0].Op != OpSP { + return nil, false + } + if v.Args[0].AuxInt != c.ctxt.Arch.FixedFrameSize+c.RegSize { // offset of return value + return nil, false + } + return mem, true +} + +// IsSanitizerSafeAddr reports whether v is known to be an address +// that doesn't need instrumentation. +func IsSanitizerSafeAddr(v *Value) bool { + for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy { + v = v.Args[0] + } + switch v.Op { + case OpSP, OpLocalAddr, OpSelectNAddr: + // Stack addresses are always safe. + return true + case OpITab, OpStringPtr, OpGetClosurePtr: + // Itabs, string data, and closure fields are + // read-only once initialized. + return true + case OpAddr: + vt := v.Aux.(*obj.LSym).Type + return vt == objabi.SRODATA || vt == objabi.SLIBFUZZER_8BIT_COUNTER || vt == objabi.SCOVERAGE_COUNTER || vt == objabi.SCOVERAGE_AUXVAR + } + return false +} + +// isVolatile reports whether v is a pointer to argument region on stack which +// will be clobbered by a function call. +func isVolatile(v *Value) bool { + for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy || v.Op == OpSelectNAddr { + v = v.Args[0] + } + return v.Op == OpSP +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/writebarrier_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/writebarrier_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0b11afc84da6433863765d6ea106fa764e872b88 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/writebarrier_test.go @@ -0,0 +1,56 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/compile/internal/types" + "testing" +) + +func TestWriteBarrierStoreOrder(t *testing.T) { + // Make sure writebarrier phase works even StoreWB ops are not in dependency order + c := testConfig(t) + ptrType := c.config.Types.BytePtr + fun := c.Fun("entry", + Bloc("entry", + Valu("start", OpInitMem, types.TypeMem, 0, nil), + Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil), + Valu("sp", OpSP, c.config.Types.Uintptr, 0, nil), + Valu("v", OpConstNil, ptrType, 0, nil), + Valu("addr1", OpAddr, ptrType, 0, nil, "sb"), + Valu("wb2", OpStore, types.TypeMem, 0, ptrType, "addr1", "v", "wb1"), + Valu("wb1", OpStore, types.TypeMem, 0, ptrType, "addr1", "v", "start"), // wb1 and wb2 are out of order + Goto("exit")), + Bloc("exit", + Exit("wb2"))) + + CheckFunc(fun.f) + writebarrier(fun.f) + CheckFunc(fun.f) +} + +func TestWriteBarrierPhi(t *testing.T) { + // Make sure writebarrier phase works for single-block loop, where + // a Phi op takes the store in the same block as argument. + // See issue #19067. + c := testConfig(t) + ptrType := c.config.Types.BytePtr + fun := c.Fun("entry", + Bloc("entry", + Valu("start", OpInitMem, types.TypeMem, 0, nil), + Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil), + Valu("sp", OpSP, c.config.Types.Uintptr, 0, nil), + Goto("loop")), + Bloc("loop", + Valu("phi", OpPhi, types.TypeMem, 0, nil, "start", "wb"), + Valu("v", OpConstNil, ptrType, 0, nil), + Valu("addr", OpAddr, ptrType, 0, nil, "sb"), + Valu("wb", OpStore, types.TypeMem, 0, ptrType, "addr", "v", "phi"), // has write barrier + Goto("loop"))) + + CheckFunc(fun.f) + writebarrier(fun.f) + CheckFunc(fun.f) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/xposmap.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/xposmap.go new file mode 100644 index 0000000000000000000000000000000000000000..93582e1373e27223b472b0b96f81e39120dfbccb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/xposmap.go @@ -0,0 +1,116 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "cmd/internal/src" + "fmt" +) + +type lineRange struct { + first, last uint32 +} + +// An xposmap is a map from fileindex and line of src.XPos to int32, +// implemented sparsely to save space (column and statement status are ignored). +// The sparse skeleton is constructed once, and then reused by ssa phases +// that (re)move values with statements attached. +type xposmap struct { + // A map from file index to maps from line range to integers (block numbers) + maps map[int32]*biasedSparseMap + // The next two fields provide a single-item cache for common case of repeated lines from same file. + lastIndex int32 // -1 means no entry in cache + lastMap *biasedSparseMap // map found at maps[lastIndex] +} + +// newXposmap constructs an xposmap valid for inputs which have a file index in the keys of x, +// and line numbers in the range x[file index]. +// The resulting xposmap will panic if a caller attempts to set or add an XPos not in that range. +func newXposmap(x map[int]lineRange) *xposmap { + maps := make(map[int32]*biasedSparseMap) + for i, p := range x { + maps[int32(i)] = newBiasedSparseMap(int(p.first), int(p.last)) + } + return &xposmap{maps: maps, lastIndex: -1} // zero for the rest is okay +} + +// clear removes data from the map but leaves the sparse skeleton. +func (m *xposmap) clear() { + for _, l := range m.maps { + if l != nil { + l.clear() + } + } + m.lastIndex = -1 + m.lastMap = nil +} + +// mapFor returns the line range map for a given file index. +func (m *xposmap) mapFor(index int32) *biasedSparseMap { + if index == m.lastIndex { + return m.lastMap + } + mf := m.maps[index] + m.lastIndex = index + m.lastMap = mf + return mf +} + +// set inserts p->v into the map. +// If p does not fall within the set of fileindex->lineRange used to construct m, this will panic. +func (m *xposmap) set(p src.XPos, v int32) { + s := m.mapFor(p.FileIndex()) + if s == nil { + panic(fmt.Sprintf("xposmap.set(%d), file index not found in map\n", p.FileIndex())) + } + s.set(p.Line(), v) +} + +// get returns the int32 associated with the file index and line of p. +func (m *xposmap) get(p src.XPos) int32 { + s := m.mapFor(p.FileIndex()) + if s == nil { + return -1 + } + return s.get(p.Line()) +} + +// add adds p to m, treating m as a set instead of as a map. +// If p does not fall within the set of fileindex->lineRange used to construct m, this will panic. +// Use clear() in between set/map interpretations of m. +func (m *xposmap) add(p src.XPos) { + m.set(p, 0) +} + +// contains returns whether the file index and line of p are in m, +// treating m as a set instead of as a map. +func (m *xposmap) contains(p src.XPos) bool { + s := m.mapFor(p.FileIndex()) + if s == nil { + return false + } + return s.contains(p.Line()) +} + +// remove removes the file index and line for p from m, +// whether m is currently treated as a map or set. +func (m *xposmap) remove(p src.XPos) { + s := m.mapFor(p.FileIndex()) + if s == nil { + return + } + s.remove(p.Line()) +} + +// foreachEntry applies f to each (fileindex, line, value) triple in m. +func (m *xposmap) foreachEntry(f func(j int32, l uint, v int32)) { + for j, mm := range m.maps { + s := mm.size() + for i := 0; i < s; i++ { + l, v := mm.getEntry(i) + f(j, l, v) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/zcse.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/zcse.go new file mode 100644 index 0000000000000000000000000000000000000000..e08272c34580173c60929b7c9edc4183d80050f4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/zcse.go @@ -0,0 +1,79 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import "cmd/compile/internal/types" + +// zcse does an initial pass of common-subexpression elimination on the +// function for values with zero arguments to allow the more expensive cse +// to begin with a reduced number of values. Values are just relinked, +// nothing is deleted. A subsequent deadcode pass is required to actually +// remove duplicate expressions. +func zcse(f *Func) { + vals := make(map[vkey]*Value) + + for _, b := range f.Blocks { + for i := 0; i < len(b.Values); i++ { + v := b.Values[i] + if opcodeTable[v.Op].argLen == 0 { + key := vkey{v.Op, keyFor(v), v.Aux, v.Type} + if vals[key] == nil { + vals[key] = v + if b != f.Entry { + // Move v to the entry block so it will dominate every block + // where we might use it. This prevents the need for any dominator + // calculations in this pass. + v.Block = f.Entry + f.Entry.Values = append(f.Entry.Values, v) + last := len(b.Values) - 1 + b.Values[i] = b.Values[last] + b.Values[last] = nil + b.Values = b.Values[:last] + + i-- // process b.Values[i] again + } + } + } + } + } + + for _, b := range f.Blocks { + for _, v := range b.Values { + for i, a := range v.Args { + if opcodeTable[a.Op].argLen == 0 { + key := vkey{a.Op, keyFor(a), a.Aux, a.Type} + if rv, ok := vals[key]; ok { + v.SetArg(i, rv) + } + } + } + } + } +} + +// vkey is a type used to uniquely identify a zero arg value. +type vkey struct { + op Op + ai int64 // aux int + ax Aux // aux + t *types.Type // type +} + +// keyFor returns the AuxInt portion of a key structure uniquely identifying a +// zero arg value for the supported ops. +func keyFor(v *Value) int64 { + switch v.Op { + case OpConst64, OpConst64F, OpConst32F: + return v.AuxInt + case OpConst32: + return int64(int32(v.AuxInt)) + case OpConst16: + return int64(int16(v.AuxInt)) + case OpConst8, OpConstBool: + return int64(int8(v.AuxInt)) + default: + return v.AuxInt + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/zeroextension_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/zeroextension_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2e316214114fccdffadb8ad7832f3c02b95902b6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/zeroextension_test.go @@ -0,0 +1,34 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import "testing" + +type extTest struct { + f func(uint64, uint64) uint64 + arg1 uint64 + arg2 uint64 + res uint64 + name string +} + +var extTests = [...]extTest{ + {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 / op2)) }, arg1: 0x1, arg2: 0xfffffffeffffffff, res: 0xffffffff, name: "div"}, + {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 * op2)) }, arg1: 0x1, arg2: 0x100000001, res: 0x1, name: "mul"}, + {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 + op2)) }, arg1: 0x1, arg2: 0xeeeeeeeeffffffff, res: 0x0, name: "add"}, + {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 - op2)) }, arg1: 0x1, arg2: 0xeeeeeeeeffffffff, res: 0x2, name: "sub"}, + {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 | op2)) }, arg1: 0x100000000000001, arg2: 0xfffffffffffffff, res: 0xffffffff, name: "or"}, + {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 ^ op2)) }, arg1: 0x100000000000001, arg2: 0xfffffffffffffff, res: 0xfffffffe, name: "xor"}, + {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 & op2)) }, arg1: 0x100000000000001, arg2: 0x100000000000001, res: 0x1, name: "and"}, +} + +func TestZeroExtension(t *testing.T) { + for _, x := range extTests { + r := x.f(x.arg1, x.arg2) + if x.res != r { + t.Errorf("%s: got %d want %d", x.name, r, x.res) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssagen/abi.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssagen/abi.go new file mode 100644 index 0000000000000000000000000000000000000000..56af9ce7810e7091e7ae53e04139853a893e5138 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssagen/abi.go @@ -0,0 +1,440 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssagen + +import ( + "fmt" + "internal/buildcfg" + "log" + "os" + "strings" + + "cmd/compile/internal/abi" + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/objw" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/obj/wasm" +) + +// SymABIs records information provided by the assembler about symbol +// definition ABIs and reference ABIs. +type SymABIs struct { + defs map[string]obj.ABI + refs map[string]obj.ABISet +} + +func NewSymABIs() *SymABIs { + return &SymABIs{ + defs: make(map[string]obj.ABI), + refs: make(map[string]obj.ABISet), + } +} + +// canonicalize returns the canonical name used for a linker symbol in +// s's maps. Symbols in this package may be written either as "".X or +// with the package's import path already in the symbol. This rewrites +// both to use the full path, which matches compiler-generated linker +// symbol names. +func (s *SymABIs) canonicalize(linksym string) string { + if strings.HasPrefix(linksym, `"".`) { + panic("non-canonical symbol name: " + linksym) + } + return linksym +} + +// ReadSymABIs reads a symabis file that specifies definitions and +// references of text symbols by ABI. +// +// The symabis format is a set of lines, where each line is a sequence +// of whitespace-separated fields. The first field is a verb and is +// either "def" for defining a symbol ABI or "ref" for referencing a +// symbol using an ABI. For both "def" and "ref", the second field is +// the symbol name and the third field is the ABI name, as one of the +// named cmd/internal/obj.ABI constants. +func (s *SymABIs) ReadSymABIs(file string) { + data, err := os.ReadFile(file) + if err != nil { + log.Fatalf("-symabis: %v", err) + } + + for lineNum, line := range strings.Split(string(data), "\n") { + lineNum++ // 1-based + line = strings.TrimSpace(line) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + + parts := strings.Fields(line) + switch parts[0] { + case "def", "ref": + // Parse line. + if len(parts) != 3 { + log.Fatalf(`%s:%d: invalid symabi: syntax is "%s sym abi"`, file, lineNum, parts[0]) + } + sym, abistr := parts[1], parts[2] + abi, valid := obj.ParseABI(abistr) + if !valid { + log.Fatalf(`%s:%d: invalid symabi: unknown abi "%s"`, file, lineNum, abistr) + } + + sym = s.canonicalize(sym) + + // Record for later. + if parts[0] == "def" { + s.defs[sym] = abi + } else { + s.refs[sym] |= obj.ABISetOf(abi) + } + default: + log.Fatalf(`%s:%d: invalid symabi type "%s"`, file, lineNum, parts[0]) + } + } +} + +// GenABIWrappers applies ABI information to Funcs and generates ABI +// wrapper functions where necessary. +func (s *SymABIs) GenABIWrappers() { + // For cgo exported symbols, we tell the linker to export the + // definition ABI to C. That also means that we don't want to + // create ABI wrappers even if there's a linkname. + // + // TODO(austin): Maybe we want to create the ABI wrappers, but + // ensure the linker exports the right ABI definition under + // the unmangled name? + cgoExports := make(map[string][]*[]string) + for i, prag := range typecheck.Target.CgoPragmas { + switch prag[0] { + case "cgo_export_static", "cgo_export_dynamic": + symName := s.canonicalize(prag[1]) + pprag := &typecheck.Target.CgoPragmas[i] + cgoExports[symName] = append(cgoExports[symName], pprag) + } + } + + // Apply ABI defs and refs to Funcs and generate wrappers. + // + // This may generate new decls for the wrappers, but we + // specifically *don't* want to visit those, lest we create + // wrappers for wrappers. + for _, fn := range typecheck.Target.Funcs { + nam := fn.Nname + if ir.IsBlank(nam) { + continue + } + sym := nam.Sym() + + symName := sym.Linkname + if symName == "" { + symName = sym.Pkg.Prefix + "." + sym.Name + } + symName = s.canonicalize(symName) + + // Apply definitions. + defABI, hasDefABI := s.defs[symName] + if hasDefABI { + if len(fn.Body) != 0 { + base.ErrorfAt(fn.Pos(), 0, "%v defined in both Go and assembly", fn) + } + fn.ABI = defABI + } + + if fn.Pragma&ir.CgoUnsafeArgs != 0 { + // CgoUnsafeArgs indicates the function (or its callee) uses + // offsets to dispatch arguments, which currently using ABI0 + // frame layout. Pin it to ABI0. + fn.ABI = obj.ABI0 + } + + // If cgo-exported, add the definition ABI to the cgo + // pragmas. + cgoExport := cgoExports[symName] + for _, pprag := range cgoExport { + // The export pragmas have the form: + // + // cgo_export_* [] + // + // If is omitted, it's the same as + // . + // + // Expand to + // + // cgo_export_* + if len(*pprag) == 2 { + *pprag = append(*pprag, (*pprag)[1]) + } + // Add the ABI argument. + *pprag = append(*pprag, fn.ABI.String()) + } + + // Apply references. + if abis, ok := s.refs[symName]; ok { + fn.ABIRefs |= abis + } + // Assume all functions are referenced at least as + // ABIInternal, since they may be referenced from + // other packages. + fn.ABIRefs.Set(obj.ABIInternal, true) + + // If a symbol is defined in this package (either in + // Go or assembly) and given a linkname, it may be + // referenced from another package, so make it + // callable via any ABI. It's important that we know + // it's defined in this package since other packages + // may "pull" symbols using linkname and we don't want + // to create duplicate ABI wrappers. + // + // However, if it's given a linkname for exporting to + // C, then we don't make ABI wrappers because the cgo + // tool wants the original definition. + hasBody := len(fn.Body) != 0 + if sym.Linkname != "" && (hasBody || hasDefABI) && len(cgoExport) == 0 { + fn.ABIRefs |= obj.ABISetCallable + } + + // Double check that cgo-exported symbols don't get + // any wrappers. + if len(cgoExport) > 0 && fn.ABIRefs&^obj.ABISetOf(fn.ABI) != 0 { + base.Fatalf("cgo exported function %v cannot have ABI wrappers", fn) + } + + if !buildcfg.Experiment.RegabiWrappers { + continue + } + + forEachWrapperABI(fn, makeABIWrapper) + } +} + +func forEachWrapperABI(fn *ir.Func, cb func(fn *ir.Func, wrapperABI obj.ABI)) { + need := fn.ABIRefs &^ obj.ABISetOf(fn.ABI) + if need == 0 { + return + } + + for wrapperABI := obj.ABI(0); wrapperABI < obj.ABICount; wrapperABI++ { + if !need.Get(wrapperABI) { + continue + } + cb(fn, wrapperABI) + } +} + +// makeABIWrapper creates a new function that will be called with +// wrapperABI and calls "f" using f.ABI. +func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) { + if base.Debug.ABIWrap != 0 { + fmt.Fprintf(os.Stderr, "=-= %v to %v wrapper for %v\n", wrapperABI, f.ABI, f) + } + + // Q: is this needed? + savepos := base.Pos + savedcurfn := ir.CurFunc + + pos := base.AutogeneratedPos + base.Pos = pos + + // At the moment we don't support wrapping a method, we'd need machinery + // below to handle the receiver. Panic if we see this scenario. + ft := f.Nname.Type() + if ft.NumRecvs() != 0 { + base.ErrorfAt(f.Pos(), 0, "makeABIWrapper support for wrapping methods not implemented") + return + } + + // Reuse f's types.Sym to create a new ODCLFUNC/function. + // TODO(mdempsky): Means we can't set sym.Def in Declfunc, ugh. + fn := ir.NewFunc(pos, pos, f.Sym(), types.NewSignature(nil, + typecheck.NewFuncParams(ft.Params()), + typecheck.NewFuncParams(ft.Results()))) + fn.ABI = wrapperABI + typecheck.DeclFunc(fn) + + fn.SetABIWrapper(true) + fn.SetDupok(true) + + // ABI0-to-ABIInternal wrappers will be mainly loading params from + // stack into registers (and/or storing stack locations back to + // registers after the wrapped call); in most cases they won't + // need to allocate stack space, so it should be OK to mark them + // as NOSPLIT in these cases. In addition, my assumption is that + // functions written in assembly are NOSPLIT in most (but not all) + // cases. In the case of an ABIInternal target that has too many + // parameters to fit into registers, the wrapper would need to + // allocate stack space, but this seems like an unlikely scenario. + // Hence: mark these wrappers NOSPLIT. + // + // ABIInternal-to-ABI0 wrappers on the other hand will be taking + // things in registers and pushing them onto the stack prior to + // the ABI0 call, meaning that they will always need to allocate + // stack space. If the compiler marks them as NOSPLIT this seems + // as though it could lead to situations where the linker's + // nosplit-overflow analysis would trigger a link failure. On the + // other hand if they not tagged NOSPLIT then this could cause + // problems when building the runtime (since there may be calls to + // asm routine in cases where it's not safe to grow the stack). In + // most cases the wrapper would be (in effect) inlined, but are + // there (perhaps) indirect calls from the runtime that could run + // into trouble here. + // FIXME: at the moment all.bash does not pass when I leave out + // NOSPLIT for these wrappers, so all are currently tagged with NOSPLIT. + fn.Pragma |= ir.Nosplit + + // Generate call. Use tail call if no params and no returns, + // but a regular call otherwise. + // + // Note: ideally we would be using a tail call in cases where + // there are params but no returns for ABI0->ABIInternal wrappers, + // provided that all params fit into registers (e.g. we don't have + // to allocate any stack space). Doing this will require some + // extra work in typecheck/walk/ssa, might want to add a new node + // OTAILCALL or something to this effect. + tailcall := fn.Type().NumResults() == 0 && fn.Type().NumParams() == 0 && fn.Type().NumRecvs() == 0 + if base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink { + // cannot tailcall on PPC64 with dynamic linking, as we need + // to restore R2 after call. + tailcall = false + } + if base.Ctxt.Arch.Name == "amd64" && wrapperABI == obj.ABIInternal { + // cannot tailcall from ABIInternal to ABI0 on AMD64, as we need + // to special registers (X15) when returning to ABIInternal. + tailcall = false + } + + var tail ir.Node + call := ir.NewCallExpr(base.Pos, ir.OCALL, f.Nname, nil) + call.Args = ir.ParamNames(fn.Type()) + call.IsDDD = fn.Type().IsVariadic() + tail = call + if tailcall { + tail = ir.NewTailCallStmt(base.Pos, call) + } else if fn.Type().NumResults() > 0 { + n := ir.NewReturnStmt(base.Pos, nil) + n.Results = []ir.Node{call} + tail = n + } + fn.Body.Append(tail) + + typecheck.FinishFuncBody() + + ir.CurFunc = fn + typecheck.Stmts(fn.Body) + + // Restore previous context. + base.Pos = savepos + ir.CurFunc = savedcurfn +} + +// CreateWasmImportWrapper creates a wrapper for imported WASM functions to +// adapt them to the Go calling convention. The body for this function is +// generated in cmd/internal/obj/wasm/wasmobj.go +func CreateWasmImportWrapper(fn *ir.Func) bool { + if fn.WasmImport == nil { + return false + } + if buildcfg.GOARCH != "wasm" { + base.FatalfAt(fn.Pos(), "CreateWasmImportWrapper call not supported on %s: func was %v", buildcfg.GOARCH, fn) + } + + ir.InitLSym(fn, true) + + setupWasmABI(fn) + + pp := objw.NewProgs(fn, 0) + defer pp.Free() + pp.Text.To.Type = obj.TYPE_TEXTSIZE + pp.Text.To.Val = int32(types.RoundUp(fn.Type().ArgWidth(), int64(types.RegSize))) + // Wrapper functions never need their own stack frame + pp.Text.To.Offset = 0 + pp.Flush() + + return true +} + +func paramsToWasmFields(f *ir.Func, result *abi.ABIParamResultInfo, abiParams []abi.ABIParamAssignment) []obj.WasmField { + wfs := make([]obj.WasmField, len(abiParams)) + for i, p := range abiParams { + t := p.Type + switch t.Kind() { + case types.TINT32, types.TUINT32: + wfs[i].Type = obj.WasmI32 + case types.TINT64, types.TUINT64: + wfs[i].Type = obj.WasmI64 + case types.TFLOAT32: + wfs[i].Type = obj.WasmF32 + case types.TFLOAT64: + wfs[i].Type = obj.WasmF64 + case types.TUNSAFEPTR: + wfs[i].Type = obj.WasmPtr + default: + base.ErrorfAt(f.Pos(), 0, "go:wasmimport %s %s: unsupported parameter type %s", f.WasmImport.Module, f.WasmImport.Name, t.String()) + } + wfs[i].Offset = p.FrameOffset(result) + } + return wfs +} + +func resultsToWasmFields(f *ir.Func, result *abi.ABIParamResultInfo, abiParams []abi.ABIParamAssignment) []obj.WasmField { + if len(abiParams) > 1 { + base.ErrorfAt(f.Pos(), 0, "go:wasmimport %s %s: too many return values", f.WasmImport.Module, f.WasmImport.Name) + return nil + } + wfs := make([]obj.WasmField, len(abiParams)) + for i, p := range abiParams { + t := p.Type + switch t.Kind() { + case types.TINT32, types.TUINT32: + wfs[i].Type = obj.WasmI32 + case types.TINT64, types.TUINT64: + wfs[i].Type = obj.WasmI64 + case types.TFLOAT32: + wfs[i].Type = obj.WasmF32 + case types.TFLOAT64: + wfs[i].Type = obj.WasmF64 + default: + base.ErrorfAt(f.Pos(), 0, "go:wasmimport %s %s: unsupported result type %s", f.WasmImport.Module, f.WasmImport.Name, t.String()) + } + wfs[i].Offset = p.FrameOffset(result) + } + return wfs +} + +// setupTextLSym initializes the LSym for a with-body text symbol. +func setupWasmABI(f *ir.Func) { + wi := obj.WasmImport{ + Module: f.WasmImport.Module, + Name: f.WasmImport.Name, + } + if wi.Module == wasm.GojsModule { + // Functions that are imported from the "gojs" module use a special + // ABI that just accepts the stack pointer. + // Example: + // + // //go:wasmimport gojs add + // func importedAdd(a, b uint) uint + // + // will roughly become + // + // (import "gojs" "add" (func (param i32))) + wi.Params = []obj.WasmField{{Type: obj.WasmI32}} + } else { + // All other imported functions use the normal WASM ABI. + // Example: + // + // //go:wasmimport a_module add + // func importedAdd(a, b uint) uint + // + // will roughly become + // + // (import "a_module" "add" (func (param i32 i32) (result i32))) + abiConfig := AbiForBodylessFuncStackMap(f) + abiInfo := abiConfig.ABIAnalyzeFuncType(f.Type()) + wi.Params = paramsToWasmFields(f, abiInfo, abiInfo.InParams()) + wi.Results = resultsToWasmFields(f, abiInfo, abiInfo.OutParams()) + } + f.LSym.Func().WasmImport = &wi +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssagen/arch.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssagen/arch.go new file mode 100644 index 0000000000000000000000000000000000000000..483e45cad43c7445c46032111434fffe16cd62a7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssagen/arch.go @@ -0,0 +1,51 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssagen + +import ( + "cmd/compile/internal/ir" + "cmd/compile/internal/objw" + "cmd/compile/internal/ssa" + "cmd/compile/internal/types" + "cmd/internal/obj" +) + +var Arch ArchInfo + +// interface to back end + +type ArchInfo struct { + LinkArch *obj.LinkArch + + REGSP int + MAXWIDTH int64 + SoftFloat bool + + PadFrame func(int64) int64 + + // ZeroRange zeroes a range of memory on stack. It is only inserted + // at function entry, and it is ok to clobber registers. + ZeroRange func(*objw.Progs, *obj.Prog, int64, int64, *uint32) *obj.Prog + + Ginsnop func(*objw.Progs) *obj.Prog + + // SSAMarkMoves marks any MOVXconst ops that need to avoid clobbering flags. + SSAMarkMoves func(*State, *ssa.Block) + + // SSAGenValue emits Prog(s) for the Value. + SSAGenValue func(*State, *ssa.Value) + + // SSAGenBlock emits end-of-block Progs. SSAGenValue should be called + // for all values in the block before SSAGenBlock. + SSAGenBlock func(s *State, b, next *ssa.Block) + + // LoadRegResult emits instructions that loads register-assigned result + // at n+off (n is PPARAMOUT) to register reg. The result is already in + // memory. Used in open-coded defer return path. + LoadRegResult func(s *State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog + + // SpillArgReg emits instructions that spill reg to n+off. + SpillArgReg func(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssagen/nowb.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssagen/nowb.go new file mode 100644 index 0000000000000000000000000000000000000000..b8756eea61092ba436b3ef56509ad38393818946 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssagen/nowb.go @@ -0,0 +1,195 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssagen + +import ( + "fmt" + "strings" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/src" +) + +func EnableNoWriteBarrierRecCheck() { + nowritebarrierrecCheck = newNowritebarrierrecChecker() +} + +func NoWriteBarrierRecCheck() { + // Write barriers are now known. Check the + // call graph. + nowritebarrierrecCheck.check() + nowritebarrierrecCheck = nil +} + +var nowritebarrierrecCheck *nowritebarrierrecChecker + +type nowritebarrierrecChecker struct { + // extraCalls contains extra function calls that may not be + // visible during later analysis. It maps from the ODCLFUNC of + // the caller to a list of callees. + extraCalls map[*ir.Func][]nowritebarrierrecCall + + // curfn is the current function during AST walks. + curfn *ir.Func +} + +type nowritebarrierrecCall struct { + target *ir.Func // caller or callee + lineno src.XPos // line of call +} + +// newNowritebarrierrecChecker creates a nowritebarrierrecChecker. It +// must be called before walk. +func newNowritebarrierrecChecker() *nowritebarrierrecChecker { + c := &nowritebarrierrecChecker{ + extraCalls: make(map[*ir.Func][]nowritebarrierrecCall), + } + + // Find all systemstack calls and record their targets. In + // general, flow analysis can't see into systemstack, but it's + // important to handle it for this check, so we model it + // directly. This has to happen before transforming closures in walk since + // it's a lot harder to work out the argument after. + for _, n := range typecheck.Target.Funcs { + c.curfn = n + if c.curfn.ABIWrapper() { + // We only want "real" calls to these + // functions, not the generated ones within + // their own ABI wrappers. + continue + } + ir.Visit(n, c.findExtraCalls) + } + c.curfn = nil + return c +} + +func (c *nowritebarrierrecChecker) findExtraCalls(nn ir.Node) { + if nn.Op() != ir.OCALLFUNC { + return + } + n := nn.(*ir.CallExpr) + if n.Fun == nil || n.Fun.Op() != ir.ONAME { + return + } + fn := n.Fun.(*ir.Name) + if fn.Class != ir.PFUNC || fn.Defn == nil { + return + } + if types.RuntimeSymName(fn.Sym()) != "systemstack" { + return + } + + var callee *ir.Func + arg := n.Args[0] + switch arg.Op() { + case ir.ONAME: + arg := arg.(*ir.Name) + callee = arg.Defn.(*ir.Func) + case ir.OCLOSURE: + arg := arg.(*ir.ClosureExpr) + callee = arg.Func + default: + base.Fatalf("expected ONAME or OCLOSURE node, got %+v", arg) + } + c.extraCalls[c.curfn] = append(c.extraCalls[c.curfn], nowritebarrierrecCall{callee, n.Pos()}) +} + +// recordCall records a call from ODCLFUNC node "from", to function +// symbol "to" at position pos. +// +// This should be done as late as possible during compilation to +// capture precise call graphs. The target of the call is an LSym +// because that's all we know after we start SSA. +// +// This can be called concurrently for different from Nodes. +func (c *nowritebarrierrecChecker) recordCall(fn *ir.Func, to *obj.LSym, pos src.XPos) { + // We record this information on the *Func so this is concurrent-safe. + if fn.NWBRCalls == nil { + fn.NWBRCalls = new([]ir.SymAndPos) + } + *fn.NWBRCalls = append(*fn.NWBRCalls, ir.SymAndPos{Sym: to, Pos: pos}) +} + +func (c *nowritebarrierrecChecker) check() { + // We walk the call graph as late as possible so we can + // capture all calls created by lowering, but this means we + // only get to see the obj.LSyms of calls. symToFunc lets us + // get back to the ODCLFUNCs. + symToFunc := make(map[*obj.LSym]*ir.Func) + // funcs records the back-edges of the BFS call graph walk. It + // maps from the ODCLFUNC of each function that must not have + // write barriers to the call that inhibits them. Functions + // that are directly marked go:nowritebarrierrec are in this + // map with a zero-valued nowritebarrierrecCall. This also + // acts as the set of marks for the BFS of the call graph. + funcs := make(map[*ir.Func]nowritebarrierrecCall) + // q is the queue of ODCLFUNC Nodes to visit in BFS order. + var q ir.NameQueue + + for _, fn := range typecheck.Target.Funcs { + symToFunc[fn.LSym] = fn + + // Make nowritebarrierrec functions BFS roots. + if fn.Pragma&ir.Nowritebarrierrec != 0 { + funcs[fn] = nowritebarrierrecCall{} + q.PushRight(fn.Nname) + } + // Check go:nowritebarrier functions. + if fn.Pragma&ir.Nowritebarrier != 0 && fn.WBPos.IsKnown() { + base.ErrorfAt(fn.WBPos, 0, "write barrier prohibited") + } + } + + // Perform a BFS of the call graph from all + // go:nowritebarrierrec functions. + enqueue := func(src, target *ir.Func, pos src.XPos) { + if target.Pragma&ir.Yeswritebarrierrec != 0 { + // Don't flow into this function. + return + } + if _, ok := funcs[target]; ok { + // Already found a path to target. + return + } + + // Record the path. + funcs[target] = nowritebarrierrecCall{target: src, lineno: pos} + q.PushRight(target.Nname) + } + for !q.Empty() { + fn := q.PopLeft().Func + + // Check fn. + if fn.WBPos.IsKnown() { + var err strings.Builder + call := funcs[fn] + for call.target != nil { + fmt.Fprintf(&err, "\n\t%v: called by %v", base.FmtPos(call.lineno), call.target.Nname) + call = funcs[call.target] + } + base.ErrorfAt(fn.WBPos, 0, "write barrier prohibited by caller; %v%s", fn.Nname, err.String()) + continue + } + + // Enqueue fn's calls. + for _, callee := range c.extraCalls[fn] { + enqueue(fn, callee.target, callee.lineno) + } + if fn.NWBRCalls == nil { + continue + } + for _, callee := range *fn.NWBRCalls { + target := symToFunc[callee.Sym] + if target != nil { + enqueue(fn, target, callee.Pos) + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssagen/pgen.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssagen/pgen.go new file mode 100644 index 0000000000000000000000000000000000000000..e7a0699641bf8ddfce8487e4f4ee9ec0ac6fc445 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssagen/pgen.go @@ -0,0 +1,364 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssagen + +import ( + "fmt" + "internal/buildcfg" + "os" + "sort" + "sync" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/objw" + "cmd/compile/internal/ssa" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/internal/src" +) + +// cmpstackvarlt reports whether the stack variable a sorts before b. +func cmpstackvarlt(a, b *ir.Name) bool { + // Sort non-autos before autos. + if needAlloc(a) != needAlloc(b) { + return needAlloc(b) + } + + // If both are non-auto (e.g., parameters, results), then sort by + // frame offset (defined by ABI). + if !needAlloc(a) { + return a.FrameOffset() < b.FrameOffset() + } + + // From here on, a and b are both autos (i.e., local variables). + + // Sort used before unused (so AllocFrame can truncate unused + // variables). + if a.Used() != b.Used() { + return a.Used() + } + + // Sort pointer-typed before non-pointer types. + // Keeps the stack's GC bitmap compact. + ap := a.Type().HasPointers() + bp := b.Type().HasPointers() + if ap != bp { + return ap + } + + // Group variables that need zeroing, so we can efficiently zero + // them altogether. + ap = a.Needzero() + bp = b.Needzero() + if ap != bp { + return ap + } + + // Sort variables in descending alignment order, so we can optimally + // pack variables into the frame. + if a.Type().Alignment() != b.Type().Alignment() { + return a.Type().Alignment() > b.Type().Alignment() + } + + // Sort normal variables before open-coded-defer slots, so that the + // latter are grouped together and near the top of the frame (to + // minimize varint encoding of their varp offset). + if a.OpenDeferSlot() != b.OpenDeferSlot() { + return a.OpenDeferSlot() + } + + // If a and b are both open-coded defer slots, then order them by + // index in descending order, so they'll be laid out in the frame in + // ascending order. + // + // Their index was saved in FrameOffset in state.openDeferSave. + if a.OpenDeferSlot() { + return a.FrameOffset() > b.FrameOffset() + } + + // Tie breaker for stable results. + return a.Sym().Name < b.Sym().Name +} + +// byStackVar implements sort.Interface for []*Node using cmpstackvarlt. +type byStackVar []*ir.Name + +func (s byStackVar) Len() int { return len(s) } +func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) } +func (s byStackVar) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// needAlloc reports whether n is within the current frame, for which we need to +// allocate space. In particular, it excludes arguments and results, which are in +// the callers frame. +func needAlloc(n *ir.Name) bool { + if n.Op() != ir.ONAME { + base.FatalfAt(n.Pos(), "%v has unexpected Op %v", n, n.Op()) + } + + switch n.Class { + case ir.PAUTO: + return true + case ir.PPARAM: + return false + case ir.PPARAMOUT: + return n.IsOutputParamInRegisters() + + default: + base.FatalfAt(n.Pos(), "%v has unexpected Class %v", n, n.Class) + return false + } +} + +func (s *ssafn) AllocFrame(f *ssa.Func) { + s.stksize = 0 + s.stkptrsize = 0 + s.stkalign = int64(types.RegSize) + fn := s.curfn + + // Mark the PAUTO's unused. + for _, ln := range fn.Dcl { + if ln.OpenDeferSlot() { + // Open-coded defer slots have indices that were assigned + // upfront during SSA construction, but the defer statement can + // later get removed during deadcode elimination (#61895). To + // keep their relative offsets correct, treat them all as used. + continue + } + + if needAlloc(ln) { + ln.SetUsed(false) + } + } + + for _, l := range f.RegAlloc { + if ls, ok := l.(ssa.LocalSlot); ok { + ls.N.SetUsed(true) + } + } + + for _, b := range f.Blocks { + for _, v := range b.Values { + if n, ok := v.Aux.(*ir.Name); ok { + switch n.Class { + case ir.PPARAMOUT: + if n.IsOutputParamInRegisters() && v.Op == ssa.OpVarDef { + // ignore VarDef, look for "real" uses. + // TODO: maybe do this for PAUTO as well? + continue + } + fallthrough + case ir.PPARAM, ir.PAUTO: + n.SetUsed(true) + } + } + } + } + + // Use sort.Stable instead of sort.Sort so stack layout (and thus + // compiler output) is less sensitive to frontend changes that + // introduce or remove unused variables. + sort.Stable(byStackVar(fn.Dcl)) + + // Reassign stack offsets of the locals that are used. + lastHasPtr := false + for i, n := range fn.Dcl { + if n.Op() != ir.ONAME || n.Class != ir.PAUTO && !(n.Class == ir.PPARAMOUT && n.IsOutputParamInRegisters()) { + // i.e., stack assign if AUTO, or if PARAMOUT in registers (which has no predefined spill locations) + continue + } + if !n.Used() { + fn.DebugInfo.(*ssa.FuncDebug).OptDcl = fn.Dcl[i:] + fn.Dcl = fn.Dcl[:i] + break + } + + types.CalcSize(n.Type()) + w := n.Type().Size() + if w >= types.MaxWidth || w < 0 { + base.Fatalf("bad width") + } + if w == 0 && lastHasPtr { + // Pad between a pointer-containing object and a zero-sized object. + // This prevents a pointer to the zero-sized object from being interpreted + // as a pointer to the pointer-containing object (and causing it + // to be scanned when it shouldn't be). See issue 24993. + w = 1 + } + s.stksize += w + s.stksize = types.RoundUp(s.stksize, n.Type().Alignment()) + if n.Type().Alignment() > int64(types.RegSize) { + s.stkalign = n.Type().Alignment() + } + if n.Type().HasPointers() { + s.stkptrsize = s.stksize + lastHasPtr = true + } else { + lastHasPtr = false + } + n.SetFrameOffset(-s.stksize) + } + + s.stksize = types.RoundUp(s.stksize, s.stkalign) + s.stkptrsize = types.RoundUp(s.stkptrsize, s.stkalign) +} + +const maxStackSize = 1 << 30 + +// Compile builds an SSA backend function, +// uses it to generate a plist, +// and flushes that plist to machine code. +// worker indicates which of the backend workers is doing the processing. +func Compile(fn *ir.Func, worker int) { + f := buildssa(fn, worker) + // Note: check arg size to fix issue 25507. + if f.Frontend().(*ssafn).stksize >= maxStackSize || f.OwnAux.ArgWidth() >= maxStackSize { + largeStackFramesMu.Lock() + largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: f.OwnAux.ArgWidth(), pos: fn.Pos()}) + largeStackFramesMu.Unlock() + return + } + pp := objw.NewProgs(fn, worker) + defer pp.Free() + genssa(f, pp) + // Check frame size again. + // The check above included only the space needed for local variables. + // After genssa, the space needed includes local variables and the callee arg region. + // We must do this check prior to calling pp.Flush. + // If there are any oversized stack frames, + // the assembler may emit inscrutable complaints about invalid instructions. + if pp.Text.To.Offset >= maxStackSize { + largeStackFramesMu.Lock() + locals := f.Frontend().(*ssafn).stksize + largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: f.OwnAux.ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos()}) + largeStackFramesMu.Unlock() + return + } + + pp.Flush() // assemble, fill in boilerplate, etc. + + // If we're compiling the package init function, search for any + // relocations that target global map init outline functions and + // turn them into weak relocs. + if fn.IsPackageInit() && base.Debug.WrapGlobalMapCtl != 1 { + weakenGlobalMapInitRelocs(fn) + } + + // fieldtrack must be called after pp.Flush. See issue 20014. + fieldtrack(pp.Text.From.Sym, fn.FieldTrack) +} + +// globalMapInitLsyms records the LSym of each map.init.NNN outlined +// map initializer function created by the compiler. +var globalMapInitLsyms map[*obj.LSym]struct{} + +// RegisterMapInitLsym records "s" in the set of outlined map initializer +// functions. +func RegisterMapInitLsym(s *obj.LSym) { + if globalMapInitLsyms == nil { + globalMapInitLsyms = make(map[*obj.LSym]struct{}) + } + globalMapInitLsyms[s] = struct{}{} +} + +// weakenGlobalMapInitRelocs walks through all of the relocations on a +// given a package init function "fn" and looks for relocs that target +// outlined global map initializer functions; if it finds any such +// relocs, it flags them as R_WEAK. +func weakenGlobalMapInitRelocs(fn *ir.Func) { + if globalMapInitLsyms == nil { + return + } + for i := range fn.LSym.R { + tgt := fn.LSym.R[i].Sym + if tgt == nil { + continue + } + if _, ok := globalMapInitLsyms[tgt]; !ok { + continue + } + if base.Debug.WrapGlobalMapDbg > 1 { + fmt.Fprintf(os.Stderr, "=-= weakify fn %v reloc %d %+v\n", fn, i, + fn.LSym.R[i]) + } + // set the R_WEAK bit, leave rest of reloc type intact + fn.LSym.R[i].Type |= objabi.R_WEAK + } +} + +// StackOffset returns the stack location of a LocalSlot relative to the +// stack pointer, suitable for use in a DWARF location entry. This has nothing +// to do with its offset in the user variable. +func StackOffset(slot ssa.LocalSlot) int32 { + n := slot.N + var off int64 + switch n.Class { + case ir.PPARAM, ir.PPARAMOUT: + if !n.IsOutputParamInRegisters() { + off = n.FrameOffset() + base.Ctxt.Arch.FixedFrameSize + break + } + fallthrough // PPARAMOUT in registers allocates like an AUTO + case ir.PAUTO: + off = n.FrameOffset() + if base.Ctxt.Arch.FixedFrameSize == 0 { + off -= int64(types.PtrSize) + } + if buildcfg.FramePointerEnabled { + off -= int64(types.PtrSize) + } + } + return int32(off + slot.Off) +} + +// fieldtrack adds R_USEFIELD relocations to fnsym to record any +// struct fields that it used. +func fieldtrack(fnsym *obj.LSym, tracked map[*obj.LSym]struct{}) { + if fnsym == nil { + return + } + if !buildcfg.Experiment.FieldTrack || len(tracked) == 0 { + return + } + + trackSyms := make([]*obj.LSym, 0, len(tracked)) + for sym := range tracked { + trackSyms = append(trackSyms, sym) + } + sort.Slice(trackSyms, func(i, j int) bool { return trackSyms[i].Name < trackSyms[j].Name }) + for _, sym := range trackSyms { + r := obj.Addrel(fnsym) + r.Sym = sym + r.Type = objabi.R_USEFIELD + } +} + +// largeStack is info about a function whose stack frame is too large (rare). +type largeStack struct { + locals int64 + args int64 + callee int64 + pos src.XPos +} + +var ( + largeStackFramesMu sync.Mutex // protects largeStackFrames + largeStackFrames []largeStack +) + +func CheckLargeStacks() { + // Check whether any of the functions we have compiled have gigantic stack frames. + sort.Slice(largeStackFrames, func(i, j int) bool { + return largeStackFrames[i].pos.Before(largeStackFrames[j].pos) + }) + for _, large := range largeStackFrames { + if large.callee != 0 { + base.ErrorfAt(large.pos, 0, "stack frame too large (>1GB): %d MB locals + %d MB args + %d MB callee", large.locals>>20, large.args>>20, large.callee>>20) + } else { + base.ErrorfAt(large.pos, 0, "stack frame too large (>1GB): %d MB locals + %d MB args", large.locals>>20, large.args>>20) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssagen/phi.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssagen/phi.go new file mode 100644 index 0000000000000000000000000000000000000000..19b6920913d83c9f0bdd42a9bebb92cc8d01393d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssagen/phi.go @@ -0,0 +1,557 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssagen + +import ( + "container/heap" + "fmt" + + "cmd/compile/internal/ir" + "cmd/compile/internal/ssa" + "cmd/compile/internal/types" + "cmd/internal/src" +) + +// This file contains the algorithm to place phi nodes in a function. +// For small functions, we use Braun, Buchwald, Hack, Leißa, Mallon, and Zwinkau. +// https://pp.info.uni-karlsruhe.de/uploads/publikationen/braun13cc.pdf +// For large functions, we use Sreedhar & Gao: A Linear Time Algorithm for Placing Φ-Nodes. +// http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.8.1979&rep=rep1&type=pdf + +const smallBlocks = 500 + +const debugPhi = false + +// fwdRefAux wraps an arbitrary ir.Node as an ssa.Aux for use with OpFwdref. +type fwdRefAux struct { + _ [0]func() // ensure ir.Node isn't compared for equality + N ir.Node +} + +func (fwdRefAux) CanBeAnSSAAux() {} + +// insertPhis finds all the places in the function where a phi is +// necessary and inserts them. +// Uses FwdRef ops to find all uses of variables, and s.defvars to find +// all definitions. +// Phi values are inserted, and all FwdRefs are changed to a Copy +// of the appropriate phi or definition. +// TODO: make this part of cmd/compile/internal/ssa somehow? +func (s *state) insertPhis() { + if len(s.f.Blocks) <= smallBlocks { + sps := simplePhiState{s: s, f: s.f, defvars: s.defvars} + sps.insertPhis() + return + } + ps := phiState{s: s, f: s.f, defvars: s.defvars} + ps.insertPhis() +} + +type phiState struct { + s *state // SSA state + f *ssa.Func // function to work on + defvars []map[ir.Node]*ssa.Value // defined variables at end of each block + + varnum map[ir.Node]int32 // variable numbering + + // properties of the dominator tree + idom []*ssa.Block // dominator parents + tree []domBlock // dominator child+sibling + level []int32 // level in dominator tree (0 = root or unreachable, 1 = children of root, ...) + + // scratch locations + priq blockHeap // priority queue of blocks, higher level (toward leaves) = higher priority + q []*ssa.Block // inner loop queue + queued *sparseSet // has been put in q + hasPhi *sparseSet // has a phi + hasDef *sparseSet // has a write of the variable we're processing + + // miscellaneous + placeholder *ssa.Value // value to use as a "not set yet" placeholder. +} + +func (s *phiState) insertPhis() { + if debugPhi { + fmt.Println(s.f.String()) + } + + // Find all the variables for which we need to match up reads & writes. + // This step prunes any basic-block-only variables from consideration. + // Generate a numbering for these variables. + s.varnum = map[ir.Node]int32{} + var vars []ir.Node + var vartypes []*types.Type + for _, b := range s.f.Blocks { + for _, v := range b.Values { + if v.Op != ssa.OpFwdRef { + continue + } + var_ := v.Aux.(fwdRefAux).N + + // Optimization: look back 1 block for the definition. + if len(b.Preds) == 1 { + c := b.Preds[0].Block() + if w := s.defvars[c.ID][var_]; w != nil { + v.Op = ssa.OpCopy + v.Aux = nil + v.AddArg(w) + continue + } + } + + if _, ok := s.varnum[var_]; ok { + continue + } + s.varnum[var_] = int32(len(vartypes)) + if debugPhi { + fmt.Printf("var%d = %v\n", len(vartypes), var_) + } + vars = append(vars, var_) + vartypes = append(vartypes, v.Type) + } + } + + if len(vartypes) == 0 { + return + } + + // Find all definitions of the variables we need to process. + // defs[n] contains all the blocks in which variable number n is assigned. + defs := make([][]*ssa.Block, len(vartypes)) + for _, b := range s.f.Blocks { + for var_ := range s.defvars[b.ID] { // TODO: encode defvars some other way (explicit ops)? make defvars[n] a slice instead of a map. + if n, ok := s.varnum[var_]; ok { + defs[n] = append(defs[n], b) + } + } + } + + // Make dominator tree. + s.idom = s.f.Idom() + s.tree = make([]domBlock, s.f.NumBlocks()) + for _, b := range s.f.Blocks { + p := s.idom[b.ID] + if p != nil { + s.tree[b.ID].sibling = s.tree[p.ID].firstChild + s.tree[p.ID].firstChild = b + } + } + // Compute levels in dominator tree. + // With parent pointers we can do a depth-first walk without + // any auxiliary storage. + s.level = make([]int32, s.f.NumBlocks()) + b := s.f.Entry +levels: + for { + if p := s.idom[b.ID]; p != nil { + s.level[b.ID] = s.level[p.ID] + 1 + if debugPhi { + fmt.Printf("level %s = %d\n", b, s.level[b.ID]) + } + } + if c := s.tree[b.ID].firstChild; c != nil { + b = c + continue + } + for { + if c := s.tree[b.ID].sibling; c != nil { + b = c + continue levels + } + b = s.idom[b.ID] + if b == nil { + break levels + } + } + } + + // Allocate scratch locations. + s.priq.level = s.level + s.q = make([]*ssa.Block, 0, s.f.NumBlocks()) + s.queued = newSparseSet(s.f.NumBlocks()) + s.hasPhi = newSparseSet(s.f.NumBlocks()) + s.hasDef = newSparseSet(s.f.NumBlocks()) + s.placeholder = s.s.entryNewValue0(ssa.OpUnknown, types.TypeInvalid) + + // Generate phi ops for each variable. + for n := range vartypes { + s.insertVarPhis(n, vars[n], defs[n], vartypes[n]) + } + + // Resolve FwdRefs to the correct write or phi. + s.resolveFwdRefs() + + // Erase variable numbers stored in AuxInt fields of phi ops. They are no longer needed. + for _, b := range s.f.Blocks { + for _, v := range b.Values { + if v.Op == ssa.OpPhi { + v.AuxInt = 0 + } + // Any remaining FwdRefs are dead code. + if v.Op == ssa.OpFwdRef { + v.Op = ssa.OpUnknown + v.Aux = nil + } + } + } +} + +func (s *phiState) insertVarPhis(n int, var_ ir.Node, defs []*ssa.Block, typ *types.Type) { + priq := &s.priq + q := s.q + queued := s.queued + queued.clear() + hasPhi := s.hasPhi + hasPhi.clear() + hasDef := s.hasDef + hasDef.clear() + + // Add defining blocks to priority queue. + for _, b := range defs { + priq.a = append(priq.a, b) + hasDef.add(b.ID) + if debugPhi { + fmt.Printf("def of var%d in %s\n", n, b) + } + } + heap.Init(priq) + + // Visit blocks defining variable n, from deepest to shallowest. + for len(priq.a) > 0 { + currentRoot := heap.Pop(priq).(*ssa.Block) + if debugPhi { + fmt.Printf("currentRoot %s\n", currentRoot) + } + // Walk subtree below definition. + // Skip subtrees we've done in previous iterations. + // Find edges exiting tree dominated by definition (the dominance frontier). + // Insert phis at target blocks. + if queued.contains(currentRoot.ID) { + s.s.Fatalf("root already in queue") + } + q = append(q, currentRoot) + queued.add(currentRoot.ID) + for len(q) > 0 { + b := q[len(q)-1] + q = q[:len(q)-1] + if debugPhi { + fmt.Printf(" processing %s\n", b) + } + + currentRootLevel := s.level[currentRoot.ID] + for _, e := range b.Succs { + c := e.Block() + // TODO: if the variable is dead at c, skip it. + if s.level[c.ID] > currentRootLevel { + // a D-edge, or an edge whose target is in currentRoot's subtree. + continue + } + if hasPhi.contains(c.ID) { + continue + } + // Add a phi to block c for variable n. + hasPhi.add(c.ID) + v := c.NewValue0I(currentRoot.Pos, ssa.OpPhi, typ, int64(n)) // TODO: line number right? + // Note: we store the variable number in the phi's AuxInt field. Used temporarily by phi building. + if var_.Op() == ir.ONAME { + s.s.addNamedValue(var_.(*ir.Name), v) + } + for range c.Preds { + v.AddArg(s.placeholder) // Actual args will be filled in by resolveFwdRefs. + } + if debugPhi { + fmt.Printf("new phi for var%d in %s: %s\n", n, c, v) + } + if !hasDef.contains(c.ID) { + // There's now a new definition of this variable in block c. + // Add it to the priority queue to explore. + heap.Push(priq, c) + hasDef.add(c.ID) + } + } + + // Visit children if they have not been visited yet. + for c := s.tree[b.ID].firstChild; c != nil; c = s.tree[c.ID].sibling { + if !queued.contains(c.ID) { + q = append(q, c) + queued.add(c.ID) + } + } + } + } +} + +// resolveFwdRefs links all FwdRef uses up to their nearest dominating definition. +func (s *phiState) resolveFwdRefs() { + // Do a depth-first walk of the dominator tree, keeping track + // of the most-recently-seen value for each variable. + + // Map from variable ID to SSA value at the current point of the walk. + values := make([]*ssa.Value, len(s.varnum)) + for i := range values { + values[i] = s.placeholder + } + + // Stack of work to do. + type stackEntry struct { + b *ssa.Block // block to explore + + // variable/value pair to reinstate on exit + n int32 // variable ID + v *ssa.Value + + // Note: only one of b or n,v will be set. + } + var stk []stackEntry + + stk = append(stk, stackEntry{b: s.f.Entry}) + for len(stk) > 0 { + work := stk[len(stk)-1] + stk = stk[:len(stk)-1] + + b := work.b + if b == nil { + // On exit from a block, this case will undo any assignments done below. + values[work.n] = work.v + continue + } + + // Process phis as new defs. They come before FwdRefs in this block. + for _, v := range b.Values { + if v.Op != ssa.OpPhi { + continue + } + n := int32(v.AuxInt) + // Remember the old assignment so we can undo it when we exit b. + stk = append(stk, stackEntry{n: n, v: values[n]}) + // Record the new assignment. + values[n] = v + } + + // Replace a FwdRef op with the current incoming value for its variable. + for _, v := range b.Values { + if v.Op != ssa.OpFwdRef { + continue + } + n := s.varnum[v.Aux.(fwdRefAux).N] + v.Op = ssa.OpCopy + v.Aux = nil + v.AddArg(values[n]) + } + + // Establish values for variables defined in b. + for var_, v := range s.defvars[b.ID] { + n, ok := s.varnum[var_] + if !ok { + // some variable not live across a basic block boundary. + continue + } + // Remember the old assignment so we can undo it when we exit b. + stk = append(stk, stackEntry{n: n, v: values[n]}) + // Record the new assignment. + values[n] = v + } + + // Replace phi args in successors with the current incoming value. + for _, e := range b.Succs { + c, i := e.Block(), e.Index() + for j := len(c.Values) - 1; j >= 0; j-- { + v := c.Values[j] + if v.Op != ssa.OpPhi { + break // All phis will be at the end of the block during phi building. + } + // Only set arguments that have been resolved. + // For very wide CFGs, this significantly speeds up phi resolution. + // See golang.org/issue/8225. + if w := values[v.AuxInt]; w.Op != ssa.OpUnknown { + v.SetArg(i, w) + } + } + } + + // Walk children in dominator tree. + for c := s.tree[b.ID].firstChild; c != nil; c = s.tree[c.ID].sibling { + stk = append(stk, stackEntry{b: c}) + } + } +} + +// domBlock contains extra per-block information to record the dominator tree. +type domBlock struct { + firstChild *ssa.Block // first child of block in dominator tree + sibling *ssa.Block // next child of parent in dominator tree +} + +// A block heap is used as a priority queue to implement the PiggyBank +// from Sreedhar and Gao. That paper uses an array which is better +// asymptotically but worse in the common case when the PiggyBank +// holds a sparse set of blocks. +type blockHeap struct { + a []*ssa.Block // block IDs in heap + level []int32 // depth in dominator tree (static, used for determining priority) +} + +func (h *blockHeap) Len() int { return len(h.a) } +func (h *blockHeap) Swap(i, j int) { a := h.a; a[i], a[j] = a[j], a[i] } + +func (h *blockHeap) Push(x interface{}) { + v := x.(*ssa.Block) + h.a = append(h.a, v) +} +func (h *blockHeap) Pop() interface{} { + old := h.a + n := len(old) + x := old[n-1] + h.a = old[:n-1] + return x +} +func (h *blockHeap) Less(i, j int) bool { + return h.level[h.a[i].ID] > h.level[h.a[j].ID] +} + +// TODO: stop walking the iterated domininance frontier when +// the variable is dead. Maybe detect that by checking if the +// node we're on is reverse dominated by all the reads? +// Reverse dominated by the highest common successor of all the reads? + +// copy of ../ssa/sparseset.go +// TODO: move this file to ../ssa, then use sparseSet there. +type sparseSet struct { + dense []ssa.ID + sparse []int32 +} + +// newSparseSet returns a sparseSet that can represent +// integers between 0 and n-1. +func newSparseSet(n int) *sparseSet { + return &sparseSet{dense: nil, sparse: make([]int32, n)} +} + +func (s *sparseSet) contains(x ssa.ID) bool { + i := s.sparse[x] + return i < int32(len(s.dense)) && s.dense[i] == x +} + +func (s *sparseSet) add(x ssa.ID) { + i := s.sparse[x] + if i < int32(len(s.dense)) && s.dense[i] == x { + return + } + s.dense = append(s.dense, x) + s.sparse[x] = int32(len(s.dense)) - 1 +} + +func (s *sparseSet) clear() { + s.dense = s.dense[:0] +} + +// Variant to use for small functions. +type simplePhiState struct { + s *state // SSA state + f *ssa.Func // function to work on + fwdrefs []*ssa.Value // list of FwdRefs to be processed + defvars []map[ir.Node]*ssa.Value // defined variables at end of each block + reachable []bool // which blocks are reachable +} + +func (s *simplePhiState) insertPhis() { + s.reachable = ssa.ReachableBlocks(s.f) + + // Find FwdRef ops. + for _, b := range s.f.Blocks { + for _, v := range b.Values { + if v.Op != ssa.OpFwdRef { + continue + } + s.fwdrefs = append(s.fwdrefs, v) + var_ := v.Aux.(fwdRefAux).N + if _, ok := s.defvars[b.ID][var_]; !ok { + s.defvars[b.ID][var_] = v // treat FwdDefs as definitions. + } + } + } + + var args []*ssa.Value + +loop: + for len(s.fwdrefs) > 0 { + v := s.fwdrefs[len(s.fwdrefs)-1] + s.fwdrefs = s.fwdrefs[:len(s.fwdrefs)-1] + b := v.Block + var_ := v.Aux.(fwdRefAux).N + if b == s.f.Entry { + // No variable should be live at entry. + s.s.Fatalf("value %v (%v) incorrectly live at entry", var_, v) + } + if !s.reachable[b.ID] { + // This block is dead. + // It doesn't matter what we use here as long as it is well-formed. + v.Op = ssa.OpUnknown + v.Aux = nil + continue + } + // Find variable value on each predecessor. + args = args[:0] + for _, e := range b.Preds { + args = append(args, s.lookupVarOutgoing(e.Block(), v.Type, var_, v.Pos)) + } + + // Decide if we need a phi or not. We need a phi if there + // are two different args (which are both not v). + var w *ssa.Value + for _, a := range args { + if a == v { + continue // self-reference + } + if a == w { + continue // already have this witness + } + if w != nil { + // two witnesses, need a phi value + v.Op = ssa.OpPhi + v.AddArgs(args...) + v.Aux = nil + continue loop + } + w = a // save witness + } + if w == nil { + s.s.Fatalf("no witness for reachable phi %s", v) + } + // One witness. Make v a copy of w. + v.Op = ssa.OpCopy + v.Aux = nil + v.AddArg(w) + } +} + +// lookupVarOutgoing finds the variable's value at the end of block b. +func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ ir.Node, line src.XPos) *ssa.Value { + for { + if v := s.defvars[b.ID][var_]; v != nil { + return v + } + // The variable is not defined by b and we haven't looked it up yet. + // If b has exactly one predecessor, loop to look it up there. + // Otherwise, give up and insert a new FwdRef and resolve it later. + if len(b.Preds) != 1 { + break + } + b = b.Preds[0].Block() + if !s.reachable[b.ID] { + // This is rare; it happens with oddly interleaved infinite loops in dead code. + // See issue 19783. + break + } + } + // Generate a FwdRef for the variable and return that. + v := b.NewValue0A(line, ssa.OpFwdRef, t, fwdRefAux{N: var_}) + s.defvars[b.ID][var_] = v + if var_.Op() == ir.ONAME { + s.s.addNamedValue(var_.(*ir.Name), v) + } + s.fwdrefs = append(s.fwdrefs, v) + return v +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssagen/ssa.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssagen/ssa.go new file mode 100644 index 0000000000000000000000000000000000000000..c794d6ffd9d783da2e63ba89e39cb1051b0ae66d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssagen/ssa.go @@ -0,0 +1,8369 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssagen + +import ( + "bufio" + "bytes" + "fmt" + "go/constant" + "html" + "internal/buildcfg" + "os" + "path/filepath" + "sort" + "strings" + + "cmd/compile/internal/abi" + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/liveness" + "cmd/compile/internal/objw" + "cmd/compile/internal/reflectdata" + "cmd/compile/internal/ssa" + "cmd/compile/internal/staticdata" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/internal/src" + "cmd/internal/sys" + + rtabi "internal/abi" +) + +var ssaConfig *ssa.Config +var ssaCaches []ssa.Cache + +var ssaDump string // early copy of $GOSSAFUNC; the func name to dump output for +var ssaDir string // optional destination for ssa dump file +var ssaDumpStdout bool // whether to dump to stdout +var ssaDumpCFG string // generate CFGs for these phases +const ssaDumpFile = "ssa.html" + +// ssaDumpInlined holds all inlined functions when ssaDump contains a function name. +var ssaDumpInlined []*ir.Func + +func DumpInline(fn *ir.Func) { + if ssaDump != "" && ssaDump == ir.FuncName(fn) { + ssaDumpInlined = append(ssaDumpInlined, fn) + } +} + +func InitEnv() { + ssaDump = os.Getenv("GOSSAFUNC") + ssaDir = os.Getenv("GOSSADIR") + if ssaDump != "" { + if strings.HasSuffix(ssaDump, "+") { + ssaDump = ssaDump[:len(ssaDump)-1] + ssaDumpStdout = true + } + spl := strings.Split(ssaDump, ":") + if len(spl) > 1 { + ssaDump = spl[0] + ssaDumpCFG = spl[1] + } + } +} + +func InitConfig() { + types_ := ssa.NewTypes() + + if Arch.SoftFloat { + softfloatInit() + } + + // Generate a few pointer types that are uncommon in the frontend but common in the backend. + // Caching is disabled in the backend, so generating these here avoids allocations. + _ = types.NewPtr(types.Types[types.TINTER]) // *interface{} + _ = types.NewPtr(types.NewPtr(types.Types[types.TSTRING])) // **string + _ = types.NewPtr(types.NewSlice(types.Types[types.TINTER])) // *[]interface{} + _ = types.NewPtr(types.NewPtr(types.ByteType)) // **byte + _ = types.NewPtr(types.NewSlice(types.ByteType)) // *[]byte + _ = types.NewPtr(types.NewSlice(types.Types[types.TSTRING])) // *[]string + _ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[types.TUINT8]))) // ***uint8 + _ = types.NewPtr(types.Types[types.TINT16]) // *int16 + _ = types.NewPtr(types.Types[types.TINT64]) // *int64 + _ = types.NewPtr(types.ErrorType) // *error + _ = types.NewPtr(reflectdata.MapType()) // *runtime.hmap + _ = types.NewPtr(deferstruct()) // *runtime._defer + types.NewPtrCacheEnabled = false + ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0, Arch.SoftFloat) + ssaConfig.Race = base.Flag.Race + ssaCaches = make([]ssa.Cache, base.Flag.LowerC) + + // Set up some runtime functions we'll need to call. + ir.Syms.AssertE2I = typecheck.LookupRuntimeFunc("assertE2I") + ir.Syms.AssertE2I2 = typecheck.LookupRuntimeFunc("assertE2I2") + ir.Syms.AssertI2I = typecheck.LookupRuntimeFunc("assertI2I") + ir.Syms.AssertI2I2 = typecheck.LookupRuntimeFunc("assertI2I2") + ir.Syms.CgoCheckMemmove = typecheck.LookupRuntimeFunc("cgoCheckMemmove") + ir.Syms.CgoCheckPtrWrite = typecheck.LookupRuntimeFunc("cgoCheckPtrWrite") + ir.Syms.CheckPtrAlignment = typecheck.LookupRuntimeFunc("checkptrAlignment") + ir.Syms.Deferproc = typecheck.LookupRuntimeFunc("deferproc") + ir.Syms.Deferprocat = typecheck.LookupRuntimeFunc("deferprocat") + ir.Syms.DeferprocStack = typecheck.LookupRuntimeFunc("deferprocStack") + ir.Syms.Deferreturn = typecheck.LookupRuntimeFunc("deferreturn") + ir.Syms.Duffcopy = typecheck.LookupRuntimeFunc("duffcopy") + ir.Syms.Duffzero = typecheck.LookupRuntimeFunc("duffzero") + ir.Syms.GCWriteBarrier[0] = typecheck.LookupRuntimeFunc("gcWriteBarrier1") + ir.Syms.GCWriteBarrier[1] = typecheck.LookupRuntimeFunc("gcWriteBarrier2") + ir.Syms.GCWriteBarrier[2] = typecheck.LookupRuntimeFunc("gcWriteBarrier3") + ir.Syms.GCWriteBarrier[3] = typecheck.LookupRuntimeFunc("gcWriteBarrier4") + ir.Syms.GCWriteBarrier[4] = typecheck.LookupRuntimeFunc("gcWriteBarrier5") + ir.Syms.GCWriteBarrier[5] = typecheck.LookupRuntimeFunc("gcWriteBarrier6") + ir.Syms.GCWriteBarrier[6] = typecheck.LookupRuntimeFunc("gcWriteBarrier7") + ir.Syms.GCWriteBarrier[7] = typecheck.LookupRuntimeFunc("gcWriteBarrier8") + ir.Syms.Goschedguarded = typecheck.LookupRuntimeFunc("goschedguarded") + ir.Syms.Growslice = typecheck.LookupRuntimeFunc("growslice") + ir.Syms.InterfaceSwitch = typecheck.LookupRuntimeFunc("interfaceSwitch") + ir.Syms.Memmove = typecheck.LookupRuntimeFunc("memmove") + ir.Syms.Msanread = typecheck.LookupRuntimeFunc("msanread") + ir.Syms.Msanwrite = typecheck.LookupRuntimeFunc("msanwrite") + ir.Syms.Msanmove = typecheck.LookupRuntimeFunc("msanmove") + ir.Syms.Asanread = typecheck.LookupRuntimeFunc("asanread") + ir.Syms.Asanwrite = typecheck.LookupRuntimeFunc("asanwrite") + ir.Syms.Newobject = typecheck.LookupRuntimeFunc("newobject") + ir.Syms.Newproc = typecheck.LookupRuntimeFunc("newproc") + ir.Syms.Panicdivide = typecheck.LookupRuntimeFunc("panicdivide") + ir.Syms.PanicdottypeE = typecheck.LookupRuntimeFunc("panicdottypeE") + ir.Syms.PanicdottypeI = typecheck.LookupRuntimeFunc("panicdottypeI") + ir.Syms.Panicnildottype = typecheck.LookupRuntimeFunc("panicnildottype") + ir.Syms.Panicoverflow = typecheck.LookupRuntimeFunc("panicoverflow") + ir.Syms.Panicshift = typecheck.LookupRuntimeFunc("panicshift") + ir.Syms.Racefuncenter = typecheck.LookupRuntimeFunc("racefuncenter") + ir.Syms.Racefuncexit = typecheck.LookupRuntimeFunc("racefuncexit") + ir.Syms.Raceread = typecheck.LookupRuntimeFunc("raceread") + ir.Syms.Racereadrange = typecheck.LookupRuntimeFunc("racereadrange") + ir.Syms.Racewrite = typecheck.LookupRuntimeFunc("racewrite") + ir.Syms.Racewriterange = typecheck.LookupRuntimeFunc("racewriterange") + ir.Syms.TypeAssert = typecheck.LookupRuntimeFunc("typeAssert") + ir.Syms.WBZero = typecheck.LookupRuntimeFunc("wbZero") + ir.Syms.WBMove = typecheck.LookupRuntimeFunc("wbMove") + ir.Syms.X86HasPOPCNT = typecheck.LookupRuntimeVar("x86HasPOPCNT") // bool + ir.Syms.X86HasSSE41 = typecheck.LookupRuntimeVar("x86HasSSE41") // bool + ir.Syms.X86HasFMA = typecheck.LookupRuntimeVar("x86HasFMA") // bool + ir.Syms.ARMHasVFPv4 = typecheck.LookupRuntimeVar("armHasVFPv4") // bool + ir.Syms.ARM64HasATOMICS = typecheck.LookupRuntimeVar("arm64HasATOMICS") // bool + ir.Syms.Staticuint64s = typecheck.LookupRuntimeVar("staticuint64s") + ir.Syms.Typedmemmove = typecheck.LookupRuntimeFunc("typedmemmove") + ir.Syms.Udiv = typecheck.LookupRuntimeVar("udiv") // asm func with special ABI + ir.Syms.WriteBarrier = typecheck.LookupRuntimeVar("writeBarrier") // struct { bool; ... } + ir.Syms.Zerobase = typecheck.LookupRuntimeVar("zerobase") + + if Arch.LinkArch.Family == sys.Wasm { + BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("goPanicIndex") + BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("goPanicIndexU") + BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("goPanicSliceAlen") + BoundsCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeFunc("goPanicSliceAlenU") + BoundsCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeFunc("goPanicSliceAcap") + BoundsCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeFunc("goPanicSliceAcapU") + BoundsCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeFunc("goPanicSliceB") + BoundsCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeFunc("goPanicSliceBU") + BoundsCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeFunc("goPanicSlice3Alen") + BoundsCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeFunc("goPanicSlice3AlenU") + BoundsCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeFunc("goPanicSlice3Acap") + BoundsCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeFunc("goPanicSlice3AcapU") + BoundsCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeFunc("goPanicSlice3B") + BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("goPanicSlice3BU") + BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("goPanicSlice3C") + BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("goPanicSlice3CU") + BoundsCheckFunc[ssa.BoundsConvert] = typecheck.LookupRuntimeFunc("goPanicSliceConvert") + } else { + BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("panicIndex") + BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("panicIndexU") + BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("panicSliceAlen") + BoundsCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeFunc("panicSliceAlenU") + BoundsCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeFunc("panicSliceAcap") + BoundsCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeFunc("panicSliceAcapU") + BoundsCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeFunc("panicSliceB") + BoundsCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeFunc("panicSliceBU") + BoundsCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeFunc("panicSlice3Alen") + BoundsCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeFunc("panicSlice3AlenU") + BoundsCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeFunc("panicSlice3Acap") + BoundsCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeFunc("panicSlice3AcapU") + BoundsCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeFunc("panicSlice3B") + BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("panicSlice3BU") + BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("panicSlice3C") + BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("panicSlice3CU") + BoundsCheckFunc[ssa.BoundsConvert] = typecheck.LookupRuntimeFunc("panicSliceConvert") + } + if Arch.LinkArch.PtrSize == 4 { + ExtendCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeVar("panicExtendIndex") + ExtendCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeVar("panicExtendIndexU") + ExtendCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeVar("panicExtendSliceAlen") + ExtendCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeVar("panicExtendSliceAlenU") + ExtendCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeVar("panicExtendSliceAcap") + ExtendCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeVar("panicExtendSliceAcapU") + ExtendCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeVar("panicExtendSliceB") + ExtendCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeVar("panicExtendSliceBU") + ExtendCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeVar("panicExtendSlice3Alen") + ExtendCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeVar("panicExtendSlice3AlenU") + ExtendCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeVar("panicExtendSlice3Acap") + ExtendCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeVar("panicExtendSlice3AcapU") + ExtendCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeVar("panicExtendSlice3B") + ExtendCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeVar("panicExtendSlice3BU") + ExtendCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeVar("panicExtendSlice3C") + ExtendCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeVar("panicExtendSlice3CU") + } + + // Wasm (all asm funcs with special ABIs) + ir.Syms.WasmDiv = typecheck.LookupRuntimeVar("wasmDiv") + ir.Syms.WasmTruncS = typecheck.LookupRuntimeVar("wasmTruncS") + ir.Syms.WasmTruncU = typecheck.LookupRuntimeVar("wasmTruncU") + ir.Syms.SigPanic = typecheck.LookupRuntimeFunc("sigpanic") +} + +// AbiForBodylessFuncStackMap returns the ABI for a bodyless function's stack map. +// This is not necessarily the ABI used to call it. +// Currently (1.17 dev) such a stack map is always ABI0; +// any ABI wrapper that is present is nosplit, hence a precise +// stack map is not needed there (the parameters survive only long +// enough to call the wrapped assembly function). +// This always returns a freshly copied ABI. +func AbiForBodylessFuncStackMap(fn *ir.Func) *abi.ABIConfig { + return ssaConfig.ABI0.Copy() // No idea what races will result, be safe +} + +// abiForFunc implements ABI policy for a function, but does not return a copy of the ABI. +// Passing a nil function returns the default ABI based on experiment configuration. +func abiForFunc(fn *ir.Func, abi0, abi1 *abi.ABIConfig) *abi.ABIConfig { + if buildcfg.Experiment.RegabiArgs { + // Select the ABI based on the function's defining ABI. + if fn == nil { + return abi1 + } + switch fn.ABI { + case obj.ABI0: + return abi0 + case obj.ABIInternal: + // TODO(austin): Clean up the nomenclature here. + // It's not clear that "abi1" is ABIInternal. + return abi1 + } + base.Fatalf("function %v has unknown ABI %v", fn, fn.ABI) + panic("not reachable") + } + + a := abi0 + if fn != nil { + if fn.Pragma&ir.RegisterParams != 0 { // TODO(register args) remove after register abi is working + a = abi1 + } + } + return a +} + +// emitOpenDeferInfo emits FUNCDATA information about the defers in a function +// that is using open-coded defers. This funcdata is used to determine the active +// defers in a function and execute those defers during panic processing. +// +// The funcdata is all encoded in varints (since values will almost always be less than +// 128, but stack offsets could potentially be up to 2Gbyte). All "locations" (offsets) +// for stack variables are specified as the number of bytes below varp (pointer to the +// top of the local variables) for their starting address. The format is: +// +// - Offset of the deferBits variable +// - Offset of the first closure slot (the rest are laid out consecutively). +func (s *state) emitOpenDeferInfo() { + firstOffset := s.openDefers[0].closureNode.FrameOffset() + + // Verify that cmpstackvarlt laid out the slots in order. + for i, r := range s.openDefers { + have := r.closureNode.FrameOffset() + want := firstOffset + int64(i)*int64(types.PtrSize) + if have != want { + base.FatalfAt(s.curfn.Pos(), "unexpected frame offset for open-coded defer slot #%v: have %v, want %v", i, have, want) + } + } + + x := base.Ctxt.Lookup(s.curfn.LSym.Name + ".opendefer") + x.Set(obj.AttrContentAddressable, true) + s.curfn.LSym.Func().OpenCodedDeferInfo = x + + off := 0 + off = objw.Uvarint(x, off, uint64(-s.deferBitsTemp.FrameOffset())) + off = objw.Uvarint(x, off, uint64(-firstOffset)) +} + +// buildssa builds an SSA function for fn. +// worker indicates which of the backend workers is doing the processing. +func buildssa(fn *ir.Func, worker int) *ssa.Func { + name := ir.FuncName(fn) + + abiSelf := abiForFunc(fn, ssaConfig.ABI0, ssaConfig.ABI1) + + printssa := false + // match either a simple name e.g. "(*Reader).Reset", package.name e.g. "compress/gzip.(*Reader).Reset", or subpackage name "gzip.(*Reader).Reset" + // optionally allows an ABI suffix specification in the GOSSAHASH, e.g. "(*Reader).Reset<0>" etc + if strings.Contains(ssaDump, name) { // in all the cases the function name is entirely contained within the GOSSAFUNC string. + nameOptABI := name + if strings.Contains(ssaDump, ",") { // ABI specification + nameOptABI = ssa.FuncNameABI(name, abiSelf.Which()) + } else if strings.HasSuffix(ssaDump, ">") { // if they use the linker syntax instead.... + l := len(ssaDump) + if l >= 3 && ssaDump[l-3] == '<' { + nameOptABI = ssa.FuncNameABI(name, abiSelf.Which()) + ssaDump = ssaDump[:l-3] + "," + ssaDump[l-2:l-1] + } + } + pkgDotName := base.Ctxt.Pkgpath + "." + nameOptABI + printssa = nameOptABI == ssaDump || // "(*Reader).Reset" + pkgDotName == ssaDump || // "compress/gzip.(*Reader).Reset" + strings.HasSuffix(pkgDotName, ssaDump) && strings.HasSuffix(pkgDotName, "/"+ssaDump) // "gzip.(*Reader).Reset" + } + + var astBuf *bytes.Buffer + if printssa { + astBuf = &bytes.Buffer{} + ir.FDumpList(astBuf, "buildssa-body", fn.Body) + if ssaDumpStdout { + fmt.Println("generating SSA for", name) + fmt.Print(astBuf.String()) + } + } + + var s state + s.pushLine(fn.Pos()) + defer s.popLine() + + s.hasdefer = fn.HasDefer() + if fn.Pragma&ir.CgoUnsafeArgs != 0 { + s.cgoUnsafeArgs = true + } + s.checkPtrEnabled = ir.ShouldCheckPtr(fn, 1) + + if base.Flag.Cfg.Instrumenting && fn.Pragma&ir.Norace == 0 && !fn.Linksym().ABIWrapper() { + if !base.Flag.Race || !objabi.LookupPkgSpecial(fn.Sym().Pkg.Path).NoRaceFunc { + s.instrumentMemory = true + } + if base.Flag.Race { + s.instrumentEnterExit = true + } + } + + fe := ssafn{ + curfn: fn, + log: printssa && ssaDumpStdout, + } + s.curfn = fn + + cache := &ssaCaches[worker] + cache.Reset() + + s.f = ssaConfig.NewFunc(&fe, cache) + s.config = ssaConfig + s.f.Type = fn.Type() + s.f.Name = name + s.f.PrintOrHtmlSSA = printssa + if fn.Pragma&ir.Nosplit != 0 { + s.f.NoSplit = true + } + s.f.ABI0 = ssaConfig.ABI0 + s.f.ABI1 = ssaConfig.ABI1 + s.f.ABIDefault = abiForFunc(nil, ssaConfig.ABI0, ssaConfig.ABI1) + s.f.ABISelf = abiSelf + + s.panics = map[funcLine]*ssa.Block{} + s.softFloat = s.config.SoftFloat + + // Allocate starting block + s.f.Entry = s.f.NewBlock(ssa.BlockPlain) + s.f.Entry.Pos = fn.Pos() + + if printssa { + ssaDF := ssaDumpFile + if ssaDir != "" { + ssaDF = filepath.Join(ssaDir, base.Ctxt.Pkgpath+"."+s.f.NameABI()+".html") + ssaD := filepath.Dir(ssaDF) + os.MkdirAll(ssaD, 0755) + } + s.f.HTMLWriter = ssa.NewHTMLWriter(ssaDF, s.f, ssaDumpCFG) + // TODO: generate and print a mapping from nodes to values and blocks + dumpSourcesColumn(s.f.HTMLWriter, fn) + s.f.HTMLWriter.WriteAST("AST", astBuf) + } + + // Allocate starting values + s.labels = map[string]*ssaLabel{} + s.fwdVars = map[ir.Node]*ssa.Value{} + s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem) + + s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.OpenCodedDeferDisallowed() + switch { + case base.Debug.NoOpenDefer != 0: + s.hasOpenDefers = false + case s.hasOpenDefers && (base.Ctxt.Flag_shared || base.Ctxt.Flag_dynlink) && base.Ctxt.Arch.Name == "386": + // Don't support open-coded defers for 386 ONLY when using shared + // libraries, because there is extra code (added by rewriteToUseGot()) + // preceding the deferreturn/ret code that we don't track correctly. + s.hasOpenDefers = false + } + if s.hasOpenDefers && s.instrumentEnterExit { + // Skip doing open defers if we need to instrument function + // returns for the race detector, since we will not generate that + // code in the case of the extra deferreturn/ret segment. + s.hasOpenDefers = false + } + if s.hasOpenDefers { + // Similarly, skip if there are any heap-allocated result + // parameters that need to be copied back to their stack slots. + for _, f := range s.curfn.Type().Results() { + if !f.Nname.(*ir.Name).OnStack() { + s.hasOpenDefers = false + break + } + } + } + if s.hasOpenDefers && + s.curfn.NumReturns*s.curfn.NumDefers > 15 { + // Since we are generating defer calls at every exit for + // open-coded defers, skip doing open-coded defers if there are + // too many returns (especially if there are multiple defers). + // Open-coded defers are most important for improving performance + // for smaller functions (which don't have many returns). + s.hasOpenDefers = false + } + + s.sp = s.entryNewValue0(ssa.OpSP, types.Types[types.TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead + s.sb = s.entryNewValue0(ssa.OpSB, types.Types[types.TUINTPTR]) + + s.startBlock(s.f.Entry) + s.vars[memVar] = s.startmem + if s.hasOpenDefers { + // Create the deferBits variable and stack slot. deferBits is a + // bitmask showing which of the open-coded defers in this function + // have been activated. + deferBitsTemp := typecheck.TempAt(src.NoXPos, s.curfn, types.Types[types.TUINT8]) + deferBitsTemp.SetAddrtaken(true) + s.deferBitsTemp = deferBitsTemp + // For this value, AuxInt is initialized to zero by default + startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[types.TUINT8]) + s.vars[deferBitsVar] = startDeferBits + s.deferBitsAddr = s.addr(deferBitsTemp) + s.store(types.Types[types.TUINT8], s.deferBitsAddr, startDeferBits) + // Make sure that the deferBits stack slot is kept alive (for use + // by panics) and stores to deferBits are not eliminated, even if + // all checking code on deferBits in the function exit can be + // eliminated, because the defer statements were all + // unconditional. + s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, deferBitsTemp, s.mem(), false) + } + + var params *abi.ABIParamResultInfo + params = s.f.ABISelf.ABIAnalyze(fn.Type(), true) + + // The backend's stackframe pass prunes away entries from the fn's + // Dcl list, including PARAMOUT nodes that correspond to output + // params passed in registers. Walk the Dcl list and capture these + // nodes to a side list, so that we'll have them available during + // DWARF-gen later on. See issue 48573 for more details. + var debugInfo ssa.FuncDebug + for _, n := range fn.Dcl { + if n.Class == ir.PPARAMOUT && n.IsOutputParamInRegisters() { + debugInfo.RegOutputParams = append(debugInfo.RegOutputParams, n) + } + } + fn.DebugInfo = &debugInfo + + // Generate addresses of local declarations + s.decladdrs = map[*ir.Name]*ssa.Value{} + for _, n := range fn.Dcl { + switch n.Class { + case ir.PPARAM: + // Be aware that blank and unnamed input parameters will not appear here, but do appear in the type + s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem) + case ir.PPARAMOUT: + s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem) + case ir.PAUTO: + // processed at each use, to prevent Addr coming + // before the decl. + default: + s.Fatalf("local variable with class %v unimplemented", n.Class) + } + } + + s.f.OwnAux = ssa.OwnAuxCall(fn.LSym, params) + + // Populate SSAable arguments. + for _, n := range fn.Dcl { + if n.Class == ir.PPARAM { + if s.canSSA(n) { + v := s.newValue0A(ssa.OpArg, n.Type(), n) + s.vars[n] = v + s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself. + } else { // address was taken AND/OR too large for SSA + paramAssignment := ssa.ParamAssignmentForArgName(s.f, n) + if len(paramAssignment.Registers) > 0 { + if ssa.CanSSA(n.Type()) { // SSA-able type, so address was taken -- receive value in OpArg, DO NOT bind to var, store immediately to memory. + v := s.newValue0A(ssa.OpArg, n.Type(), n) + s.store(n.Type(), s.decladdrs[n], v) + } else { // Too big for SSA. + // Brute force, and early, do a bunch of stores from registers + // Note that expand calls knows about this and doesn't trouble itself with larger-than-SSA-able Args in registers. + s.storeParameterRegsToStack(s.f.ABISelf, paramAssignment, n, s.decladdrs[n], false) + } + } + } + } + } + + // Populate closure variables. + if fn.Needctxt() { + clo := s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr) + offset := int64(types.PtrSize) // PtrSize to skip past function entry PC field + for _, n := range fn.ClosureVars { + typ := n.Type() + if !n.Byval() { + typ = types.NewPtr(typ) + } + + offset = types.RoundUp(offset, typ.Alignment()) + ptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(typ), offset, clo) + offset += typ.Size() + + // If n is a small variable captured by value, promote + // it to PAUTO so it can be converted to SSA. + // + // Note: While we never capture a variable by value if + // the user took its address, we may have generated + // runtime calls that did (#43701). Since we don't + // convert Addrtaken variables to SSA anyway, no point + // in promoting them either. + if n.Byval() && !n.Addrtaken() && ssa.CanSSA(n.Type()) { + n.Class = ir.PAUTO + fn.Dcl = append(fn.Dcl, n) + s.assign(n, s.load(n.Type(), ptr), false, 0) + continue + } + + if !n.Byval() { + ptr = s.load(typ, ptr) + } + s.setHeapaddr(fn.Pos(), n, ptr) + } + } + + // Convert the AST-based IR to the SSA-based IR + if s.instrumentEnterExit { + s.rtcall(ir.Syms.Racefuncenter, true, nil, s.newValue0(ssa.OpGetCallerPC, types.Types[types.TUINTPTR])) + } + s.zeroResults() + s.paramsToHeap() + s.stmtList(fn.Body) + + // fallthrough to exit + if s.curBlock != nil { + s.pushLine(fn.Endlineno) + s.exit() + s.popLine() + } + + for _, b := range s.f.Blocks { + if b.Pos != src.NoXPos { + s.updateUnsetPredPos(b) + } + } + + s.f.HTMLWriter.WritePhase("before insert phis", "before insert phis") + + s.insertPhis() + + // Main call to ssa package to compile function + ssa.Compile(s.f) + + fe.AllocFrame(s.f) + + if len(s.openDefers) != 0 { + s.emitOpenDeferInfo() + } + + // Record incoming parameter spill information for morestack calls emitted in the assembler. + // This is done here, using all the parameters (used, partially used, and unused) because + // it mimics the behavior of the former ABI (everything stored) and because it's not 100% + // clear if naming conventions are respected in autogenerated code. + // TODO figure out exactly what's unused, don't spill it. Make liveness fine-grained, also. + for _, p := range params.InParams() { + typs, offs := p.RegisterTypesAndOffsets() + for i, t := range typs { + o := offs[i] // offset within parameter + fo := p.FrameOffset(params) // offset of parameter in frame + reg := ssa.ObjRegForAbiReg(p.Registers[i], s.f.Config) + s.f.RegArgs = append(s.f.RegArgs, ssa.Spill{Reg: reg, Offset: fo + o, Type: t}) + } + } + + return s.f +} + +func (s *state) storeParameterRegsToStack(abi *abi.ABIConfig, paramAssignment *abi.ABIParamAssignment, n *ir.Name, addr *ssa.Value, pointersOnly bool) { + typs, offs := paramAssignment.RegisterTypesAndOffsets() + for i, t := range typs { + if pointersOnly && !t.IsPtrShaped() { + continue + } + r := paramAssignment.Registers[i] + o := offs[i] + op, reg := ssa.ArgOpAndRegisterFor(r, abi) + aux := &ssa.AuxNameOffset{Name: n, Offset: o} + v := s.newValue0I(op, t, reg) + v.Aux = aux + p := s.newValue1I(ssa.OpOffPtr, types.NewPtr(t), o, addr) + s.store(t, p, v) + } +} + +// zeroResults zeros the return values at the start of the function. +// We need to do this very early in the function. Defer might stop a +// panic and show the return values as they exist at the time of +// panic. For precise stacks, the garbage collector assumes results +// are always live, so we need to zero them before any allocations, +// even allocations to move params/results to the heap. +func (s *state) zeroResults() { + for _, f := range s.curfn.Type().Results() { + n := f.Nname.(*ir.Name) + if !n.OnStack() { + // The local which points to the return value is the + // thing that needs zeroing. This is already handled + // by a Needzero annotation in plive.go:(*liveness).epilogue. + continue + } + // Zero the stack location containing f. + if typ := n.Type(); ssa.CanSSA(typ) { + s.assign(n, s.zeroVal(typ), false, 0) + } else { + if typ.HasPointers() { + s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem()) + } + s.zero(n.Type(), s.decladdrs[n]) + } + } +} + +// paramsToHeap produces code to allocate memory for heap-escaped parameters +// and to copy non-result parameters' values from the stack. +func (s *state) paramsToHeap() { + do := func(params []*types.Field) { + for _, f := range params { + if f.Nname == nil { + continue // anonymous or blank parameter + } + n := f.Nname.(*ir.Name) + if ir.IsBlank(n) || n.OnStack() { + continue + } + s.newHeapaddr(n) + if n.Class == ir.PPARAM { + s.move(n.Type(), s.expr(n.Heapaddr), s.decladdrs[n]) + } + } + } + + typ := s.curfn.Type() + do(typ.Recvs()) + do(typ.Params()) + do(typ.Results()) +} + +// newHeapaddr allocates heap memory for n and sets its heap address. +func (s *state) newHeapaddr(n *ir.Name) { + s.setHeapaddr(n.Pos(), n, s.newObject(n.Type(), nil)) +} + +// setHeapaddr allocates a new PAUTO variable to store ptr (which must be non-nil) +// and then sets it as n's heap address. +func (s *state) setHeapaddr(pos src.XPos, n *ir.Name, ptr *ssa.Value) { + if !ptr.Type.IsPtr() || !types.Identical(n.Type(), ptr.Type.Elem()) { + base.FatalfAt(n.Pos(), "setHeapaddr %L with type %v", n, ptr.Type) + } + + // Declare variable to hold address. + sym := &types.Sym{Name: "&" + n.Sym().Name, Pkg: types.LocalPkg} + addr := s.curfn.NewLocal(pos, sym, types.NewPtr(n.Type())) + addr.SetUsed(true) + types.CalcSize(addr.Type()) + + if n.Class == ir.PPARAMOUT { + addr.SetIsOutputParamHeapAddr(true) + } + + n.Heapaddr = addr + s.assign(addr, ptr, false, 0) +} + +// newObject returns an SSA value denoting new(typ). +func (s *state) newObject(typ *types.Type, rtype *ssa.Value) *ssa.Value { + if typ.Size() == 0 { + return s.newValue1A(ssa.OpAddr, types.NewPtr(typ), ir.Syms.Zerobase, s.sb) + } + if rtype == nil { + rtype = s.reflectType(typ) + } + return s.rtcall(ir.Syms.Newobject, true, []*types.Type{types.NewPtr(typ)}, rtype)[0] +} + +func (s *state) checkPtrAlignment(n *ir.ConvExpr, v *ssa.Value, count *ssa.Value) { + if !n.Type().IsPtr() { + s.Fatalf("expected pointer type: %v", n.Type()) + } + elem, rtypeExpr := n.Type().Elem(), n.ElemRType + if count != nil { + if !elem.IsArray() { + s.Fatalf("expected array type: %v", elem) + } + elem, rtypeExpr = elem.Elem(), n.ElemElemRType + } + size := elem.Size() + // Casting from larger type to smaller one is ok, so for smallest type, do nothing. + if elem.Alignment() == 1 && (size == 0 || size == 1 || count == nil) { + return + } + if count == nil { + count = s.constInt(types.Types[types.TUINTPTR], 1) + } + if count.Type.Size() != s.config.PtrSize { + s.Fatalf("expected count fit to a uintptr size, have: %d, want: %d", count.Type.Size(), s.config.PtrSize) + } + var rtype *ssa.Value + if rtypeExpr != nil { + rtype = s.expr(rtypeExpr) + } else { + rtype = s.reflectType(elem) + } + s.rtcall(ir.Syms.CheckPtrAlignment, true, nil, v, rtype, count) +} + +// reflectType returns an SSA value representing a pointer to typ's +// reflection type descriptor. +func (s *state) reflectType(typ *types.Type) *ssa.Value { + // TODO(mdempsky): Make this Fatalf under Unified IR; frontend needs + // to supply RType expressions. + lsym := reflectdata.TypeLinksym(typ) + return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(types.Types[types.TUINT8]), lsym, s.sb) +} + +func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *ir.Func) { + // Read sources of target function fn. + fname := base.Ctxt.PosTable.Pos(fn.Pos()).Filename() + targetFn, err := readFuncLines(fname, fn.Pos().Line(), fn.Endlineno.Line()) + if err != nil { + writer.Logf("cannot read sources for function %v: %v", fn, err) + } + + // Read sources of inlined functions. + var inlFns []*ssa.FuncLines + for _, fi := range ssaDumpInlined { + elno := fi.Endlineno + fname := base.Ctxt.PosTable.Pos(fi.Pos()).Filename() + fnLines, err := readFuncLines(fname, fi.Pos().Line(), elno.Line()) + if err != nil { + writer.Logf("cannot read sources for inlined function %v: %v", fi, err) + continue + } + inlFns = append(inlFns, fnLines) + } + + sort.Sort(ssa.ByTopo(inlFns)) + if targetFn != nil { + inlFns = append([]*ssa.FuncLines{targetFn}, inlFns...) + } + + writer.WriteSources("sources", inlFns) +} + +func readFuncLines(file string, start, end uint) (*ssa.FuncLines, error) { + f, err := os.Open(os.ExpandEnv(file)) + if err != nil { + return nil, err + } + defer f.Close() + var lines []string + ln := uint(1) + scanner := bufio.NewScanner(f) + for scanner.Scan() && ln <= end { + if ln >= start { + lines = append(lines, scanner.Text()) + } + ln++ + } + return &ssa.FuncLines{Filename: file, StartLineno: start, Lines: lines}, nil +} + +// updateUnsetPredPos propagates the earliest-value position information for b +// towards all of b's predecessors that need a position, and recurs on that +// predecessor if its position is updated. B should have a non-empty position. +func (s *state) updateUnsetPredPos(b *ssa.Block) { + if b.Pos == src.NoXPos { + s.Fatalf("Block %s should have a position", b) + } + bestPos := src.NoXPos + for _, e := range b.Preds { + p := e.Block() + if !p.LackingPos() { + continue + } + if bestPos == src.NoXPos { + bestPos = b.Pos + for _, v := range b.Values { + if v.LackingPos() { + continue + } + if v.Pos != src.NoXPos { + // Assume values are still in roughly textual order; + // TODO: could also seek minimum position? + bestPos = v.Pos + break + } + } + } + p.Pos = bestPos + s.updateUnsetPredPos(p) // We do not expect long chains of these, thus recursion is okay. + } +} + +// Information about each open-coded defer. +type openDeferInfo struct { + // The node representing the call of the defer + n *ir.CallExpr + // If defer call is closure call, the address of the argtmp where the + // closure is stored. + closure *ssa.Value + // The node representing the argtmp where the closure is stored - used for + // function, method, or interface call, to store a closure that panic + // processing can use for this defer. + closureNode *ir.Name +} + +type state struct { + // configuration (arch) information + config *ssa.Config + + // function we're building + f *ssa.Func + + // Node for function + curfn *ir.Func + + // labels in f + labels map[string]*ssaLabel + + // unlabeled break and continue statement tracking + breakTo *ssa.Block // current target for plain break statement + continueTo *ssa.Block // current target for plain continue statement + + // current location where we're interpreting the AST + curBlock *ssa.Block + + // variable assignments in the current block (map from variable symbol to ssa value) + // *Node is the unique identifier (an ONAME Node) for the variable. + // TODO: keep a single varnum map, then make all of these maps slices instead? + vars map[ir.Node]*ssa.Value + + // fwdVars are variables that are used before they are defined in the current block. + // This map exists just to coalesce multiple references into a single FwdRef op. + // *Node is the unique identifier (an ONAME Node) for the variable. + fwdVars map[ir.Node]*ssa.Value + + // all defined variables at the end of each block. Indexed by block ID. + defvars []map[ir.Node]*ssa.Value + + // addresses of PPARAM and PPARAMOUT variables on the stack. + decladdrs map[*ir.Name]*ssa.Value + + // starting values. Memory, stack pointer, and globals pointer + startmem *ssa.Value + sp *ssa.Value + sb *ssa.Value + // value representing address of where deferBits autotmp is stored + deferBitsAddr *ssa.Value + deferBitsTemp *ir.Name + + // line number stack. The current line number is top of stack + line []src.XPos + // the last line number processed; it may have been popped + lastPos src.XPos + + // list of panic calls by function name and line number. + // Used to deduplicate panic calls. + panics map[funcLine]*ssa.Block + + cgoUnsafeArgs bool + hasdefer bool // whether the function contains a defer statement + softFloat bool + hasOpenDefers bool // whether we are doing open-coded defers + checkPtrEnabled bool // whether to insert checkptr instrumentation + instrumentEnterExit bool // whether to instrument function enter/exit + instrumentMemory bool // whether to instrument memory operations + + // If doing open-coded defers, list of info about the defer calls in + // scanning order. Hence, at exit we should run these defers in reverse + // order of this list + openDefers []*openDeferInfo + // For open-coded defers, this is the beginning and end blocks of the last + // defer exit code that we have generated so far. We use these to share + // code between exits if the shareDeferExits option (disabled by default) + // is on. + lastDeferExit *ssa.Block // Entry block of last defer exit code we generated + lastDeferFinalBlock *ssa.Block // Final block of last defer exit code we generated + lastDeferCount int // Number of defers encountered at that point + + prevCall *ssa.Value // the previous call; use this to tie results to the call op. +} + +type funcLine struct { + f *obj.LSym + base *src.PosBase + line uint +} + +type ssaLabel struct { + target *ssa.Block // block identified by this label + breakTarget *ssa.Block // block to break to in control flow node identified by this label + continueTarget *ssa.Block // block to continue to in control flow node identified by this label +} + +// label returns the label associated with sym, creating it if necessary. +func (s *state) label(sym *types.Sym) *ssaLabel { + lab := s.labels[sym.Name] + if lab == nil { + lab = new(ssaLabel) + s.labels[sym.Name] = lab + } + return lab +} + +func (s *state) Logf(msg string, args ...interface{}) { s.f.Logf(msg, args...) } +func (s *state) Log() bool { return s.f.Log() } +func (s *state) Fatalf(msg string, args ...interface{}) { + s.f.Frontend().Fatalf(s.peekPos(), msg, args...) +} +func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) } +func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() } + +func ssaMarker(name string) *ir.Name { + return ir.NewNameAt(base.Pos, &types.Sym{Name: name}, nil) +} + +var ( + // marker node for the memory variable + memVar = ssaMarker("mem") + + // marker nodes for temporary variables + ptrVar = ssaMarker("ptr") + lenVar = ssaMarker("len") + capVar = ssaMarker("cap") + typVar = ssaMarker("typ") + okVar = ssaMarker("ok") + deferBitsVar = ssaMarker("deferBits") + hashVar = ssaMarker("hash") +) + +// startBlock sets the current block we're generating code in to b. +func (s *state) startBlock(b *ssa.Block) { + if s.curBlock != nil { + s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock) + } + s.curBlock = b + s.vars = map[ir.Node]*ssa.Value{} + for n := range s.fwdVars { + delete(s.fwdVars, n) + } +} + +// endBlock marks the end of generating code for the current block. +// Returns the (former) current block. Returns nil if there is no current +// block, i.e. if no code flows to the current execution point. +func (s *state) endBlock() *ssa.Block { + b := s.curBlock + if b == nil { + return nil + } + for len(s.defvars) <= int(b.ID) { + s.defvars = append(s.defvars, nil) + } + s.defvars[b.ID] = s.vars + s.curBlock = nil + s.vars = nil + if b.LackingPos() { + // Empty plain blocks get the line of their successor (handled after all blocks created), + // except for increment blocks in For statements (handled in ssa conversion of OFOR), + // and for blocks ending in GOTO/BREAK/CONTINUE. + b.Pos = src.NoXPos + } else { + b.Pos = s.lastPos + } + return b +} + +// pushLine pushes a line number on the line number stack. +func (s *state) pushLine(line src.XPos) { + if !line.IsKnown() { + // the frontend may emit node with line number missing, + // use the parent line number in this case. + line = s.peekPos() + if base.Flag.K != 0 { + base.Warn("buildssa: unknown position (line 0)") + } + } else { + s.lastPos = line + } + + s.line = append(s.line, line) +} + +// popLine pops the top of the line number stack. +func (s *state) popLine() { + s.line = s.line[:len(s.line)-1] +} + +// peekPos peeks the top of the line number stack. +func (s *state) peekPos() src.XPos { + return s.line[len(s.line)-1] +} + +// newValue0 adds a new value with no arguments to the current block. +func (s *state) newValue0(op ssa.Op, t *types.Type) *ssa.Value { + return s.curBlock.NewValue0(s.peekPos(), op, t) +} + +// newValue0A adds a new value with no arguments and an aux value to the current block. +func (s *state) newValue0A(op ssa.Op, t *types.Type, aux ssa.Aux) *ssa.Value { + return s.curBlock.NewValue0A(s.peekPos(), op, t, aux) +} + +// newValue0I adds a new value with no arguments and an auxint value to the current block. +func (s *state) newValue0I(op ssa.Op, t *types.Type, auxint int64) *ssa.Value { + return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint) +} + +// newValue1 adds a new value with one argument to the current block. +func (s *state) newValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value { + return s.curBlock.NewValue1(s.peekPos(), op, t, arg) +} + +// newValue1A adds a new value with one argument and an aux value to the current block. +func (s *state) newValue1A(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value) *ssa.Value { + return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg) +} + +// newValue1Apos adds a new value with one argument and an aux value to the current block. +// isStmt determines whether the created values may be a statement or not +// (i.e., false means never, yes means maybe). +func (s *state) newValue1Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value, isStmt bool) *ssa.Value { + if isStmt { + return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg) + } + return s.curBlock.NewValue1A(s.peekPos().WithNotStmt(), op, t, aux, arg) +} + +// newValue1I adds a new value with one argument and an auxint value to the current block. +func (s *state) newValue1I(op ssa.Op, t *types.Type, aux int64, arg *ssa.Value) *ssa.Value { + return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg) +} + +// newValue2 adds a new value with two arguments to the current block. +func (s *state) newValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value { + return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1) +} + +// newValue2A adds a new value with two arguments and an aux value to the current block. +func (s *state) newValue2A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value) *ssa.Value { + return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1) +} + +// newValue2Apos adds a new value with two arguments and an aux value to the current block. +// isStmt determines whether the created values may be a statement or not +// (i.e., false means never, yes means maybe). +func (s *state) newValue2Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value, isStmt bool) *ssa.Value { + if isStmt { + return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1) + } + return s.curBlock.NewValue2A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1) +} + +// newValue2I adds a new value with two arguments and an auxint value to the current block. +func (s *state) newValue2I(op ssa.Op, t *types.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value { + return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1) +} + +// newValue3 adds a new value with three arguments to the current block. +func (s *state) newValue3(op ssa.Op, t *types.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value { + return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2) +} + +// newValue3I adds a new value with three arguments and an auxint value to the current block. +func (s *state) newValue3I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value { + return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2) +} + +// newValue3A adds a new value with three arguments and an aux value to the current block. +func (s *state) newValue3A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1, arg2 *ssa.Value) *ssa.Value { + return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2) +} + +// newValue3Apos adds a new value with three arguments and an aux value to the current block. +// isStmt determines whether the created values may be a statement or not +// (i.e., false means never, yes means maybe). +func (s *state) newValue3Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1, arg2 *ssa.Value, isStmt bool) *ssa.Value { + if isStmt { + return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2) + } + return s.curBlock.NewValue3A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1, arg2) +} + +// newValue4 adds a new value with four arguments to the current block. +func (s *state) newValue4(op ssa.Op, t *types.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value { + return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3) +} + +// newValue4I adds a new value with four arguments and an auxint value to the current block. +func (s *state) newValue4I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value { + return s.curBlock.NewValue4I(s.peekPos(), op, t, aux, arg0, arg1, arg2, arg3) +} + +func (s *state) entryBlock() *ssa.Block { + b := s.f.Entry + if base.Flag.N > 0 && s.curBlock != nil { + // If optimizations are off, allocate in current block instead. Since with -N + // we're not doing the CSE or tighten passes, putting lots of stuff in the + // entry block leads to O(n^2) entries in the live value map during regalloc. + // See issue 45897. + b = s.curBlock + } + return b +} + +// entryNewValue0 adds a new value with no arguments to the entry block. +func (s *state) entryNewValue0(op ssa.Op, t *types.Type) *ssa.Value { + return s.entryBlock().NewValue0(src.NoXPos, op, t) +} + +// entryNewValue0A adds a new value with no arguments and an aux value to the entry block. +func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux ssa.Aux) *ssa.Value { + return s.entryBlock().NewValue0A(src.NoXPos, op, t, aux) +} + +// entryNewValue1 adds a new value with one argument to the entry block. +func (s *state) entryNewValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value { + return s.entryBlock().NewValue1(src.NoXPos, op, t, arg) +} + +// entryNewValue1I adds a new value with one argument and an auxint value to the entry block. +func (s *state) entryNewValue1I(op ssa.Op, t *types.Type, auxint int64, arg *ssa.Value) *ssa.Value { + return s.entryBlock().NewValue1I(src.NoXPos, op, t, auxint, arg) +} + +// entryNewValue1A adds a new value with one argument and an aux value to the entry block. +func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value) *ssa.Value { + return s.entryBlock().NewValue1A(src.NoXPos, op, t, aux, arg) +} + +// entryNewValue2 adds a new value with two arguments to the entry block. +func (s *state) entryNewValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value { + return s.entryBlock().NewValue2(src.NoXPos, op, t, arg0, arg1) +} + +// entryNewValue2A adds a new value with two arguments and an aux value to the entry block. +func (s *state) entryNewValue2A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value) *ssa.Value { + return s.entryBlock().NewValue2A(src.NoXPos, op, t, aux, arg0, arg1) +} + +// const* routines add a new const value to the entry block. +func (s *state) constSlice(t *types.Type) *ssa.Value { + return s.f.ConstSlice(t) +} +func (s *state) constInterface(t *types.Type) *ssa.Value { + return s.f.ConstInterface(t) +} +func (s *state) constNil(t *types.Type) *ssa.Value { return s.f.ConstNil(t) } +func (s *state) constEmptyString(t *types.Type) *ssa.Value { + return s.f.ConstEmptyString(t) +} +func (s *state) constBool(c bool) *ssa.Value { + return s.f.ConstBool(types.Types[types.TBOOL], c) +} +func (s *state) constInt8(t *types.Type, c int8) *ssa.Value { + return s.f.ConstInt8(t, c) +} +func (s *state) constInt16(t *types.Type, c int16) *ssa.Value { + return s.f.ConstInt16(t, c) +} +func (s *state) constInt32(t *types.Type, c int32) *ssa.Value { + return s.f.ConstInt32(t, c) +} +func (s *state) constInt64(t *types.Type, c int64) *ssa.Value { + return s.f.ConstInt64(t, c) +} +func (s *state) constFloat32(t *types.Type, c float64) *ssa.Value { + return s.f.ConstFloat32(t, c) +} +func (s *state) constFloat64(t *types.Type, c float64) *ssa.Value { + return s.f.ConstFloat64(t, c) +} +func (s *state) constInt(t *types.Type, c int64) *ssa.Value { + if s.config.PtrSize == 8 { + return s.constInt64(t, c) + } + if int64(int32(c)) != c { + s.Fatalf("integer constant too big %d", c) + } + return s.constInt32(t, int32(c)) +} +func (s *state) constOffPtrSP(t *types.Type, c int64) *ssa.Value { + return s.f.ConstOffPtrSP(t, c, s.sp) +} + +// newValueOrSfCall* are wrappers around newValue*, which may create a call to a +// soft-float runtime function instead (when emitting soft-float code). +func (s *state) newValueOrSfCall1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value { + if s.softFloat { + if c, ok := s.sfcall(op, arg); ok { + return c + } + } + return s.newValue1(op, t, arg) +} +func (s *state) newValueOrSfCall2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value { + if s.softFloat { + if c, ok := s.sfcall(op, arg0, arg1); ok { + return c + } + } + return s.newValue2(op, t, arg0, arg1) +} + +type instrumentKind uint8 + +const ( + instrumentRead = iota + instrumentWrite + instrumentMove +) + +func (s *state) instrument(t *types.Type, addr *ssa.Value, kind instrumentKind) { + s.instrument2(t, addr, nil, kind) +} + +// instrumentFields instruments a read/write operation on addr. +// If it is instrumenting for MSAN or ASAN and t is a struct type, it instruments +// operation for each field, instead of for the whole struct. +func (s *state) instrumentFields(t *types.Type, addr *ssa.Value, kind instrumentKind) { + if !(base.Flag.MSan || base.Flag.ASan) || !t.IsStruct() { + s.instrument(t, addr, kind) + return + } + for _, f := range t.Fields() { + if f.Sym.IsBlank() { + continue + } + offptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(f.Type), f.Offset, addr) + s.instrumentFields(f.Type, offptr, kind) + } +} + +func (s *state) instrumentMove(t *types.Type, dst, src *ssa.Value) { + if base.Flag.MSan { + s.instrument2(t, dst, src, instrumentMove) + } else { + s.instrument(t, src, instrumentRead) + s.instrument(t, dst, instrumentWrite) + } +} + +func (s *state) instrument2(t *types.Type, addr, addr2 *ssa.Value, kind instrumentKind) { + if !s.instrumentMemory { + return + } + + w := t.Size() + if w == 0 { + return // can't race on zero-sized things + } + + if ssa.IsSanitizerSafeAddr(addr) { + return + } + + var fn *obj.LSym + needWidth := false + + if addr2 != nil && kind != instrumentMove { + panic("instrument2: non-nil addr2 for non-move instrumentation") + } + + if base.Flag.MSan { + switch kind { + case instrumentRead: + fn = ir.Syms.Msanread + case instrumentWrite: + fn = ir.Syms.Msanwrite + case instrumentMove: + fn = ir.Syms.Msanmove + default: + panic("unreachable") + } + needWidth = true + } else if base.Flag.Race && t.NumComponents(types.CountBlankFields) > 1 { + // for composite objects we have to write every address + // because a write might happen to any subobject. + // composites with only one element don't have subobjects, though. + switch kind { + case instrumentRead: + fn = ir.Syms.Racereadrange + case instrumentWrite: + fn = ir.Syms.Racewriterange + default: + panic("unreachable") + } + needWidth = true + } else if base.Flag.Race { + // for non-composite objects we can write just the start + // address, as any write must write the first byte. + switch kind { + case instrumentRead: + fn = ir.Syms.Raceread + case instrumentWrite: + fn = ir.Syms.Racewrite + default: + panic("unreachable") + } + } else if base.Flag.ASan { + switch kind { + case instrumentRead: + fn = ir.Syms.Asanread + case instrumentWrite: + fn = ir.Syms.Asanwrite + default: + panic("unreachable") + } + needWidth = true + } else { + panic("unreachable") + } + + args := []*ssa.Value{addr} + if addr2 != nil { + args = append(args, addr2) + } + if needWidth { + args = append(args, s.constInt(types.Types[types.TUINTPTR], w)) + } + s.rtcall(fn, true, nil, args...) +} + +func (s *state) load(t *types.Type, src *ssa.Value) *ssa.Value { + s.instrumentFields(t, src, instrumentRead) + return s.rawLoad(t, src) +} + +func (s *state) rawLoad(t *types.Type, src *ssa.Value) *ssa.Value { + return s.newValue2(ssa.OpLoad, t, src, s.mem()) +} + +func (s *state) store(t *types.Type, dst, val *ssa.Value) { + s.vars[memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, dst, val, s.mem()) +} + +func (s *state) zero(t *types.Type, dst *ssa.Value) { + s.instrument(t, dst, instrumentWrite) + store := s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), dst, s.mem()) + store.Aux = t + s.vars[memVar] = store +} + +func (s *state) move(t *types.Type, dst, src *ssa.Value) { + s.moveWhichMayOverlap(t, dst, src, false) +} +func (s *state) moveWhichMayOverlap(t *types.Type, dst, src *ssa.Value, mayOverlap bool) { + s.instrumentMove(t, dst, src) + if mayOverlap && t.IsArray() && t.NumElem() > 1 && !ssa.IsInlinableMemmove(dst, src, t.Size(), s.f.Config) { + // Normally, when moving Go values of type T from one location to another, + // we don't need to worry about partial overlaps. The two Ts must either be + // in disjoint (nonoverlapping) memory or in exactly the same location. + // There are 2 cases where this isn't true: + // 1) Using unsafe you can arrange partial overlaps. + // 2) Since Go 1.17, you can use a cast from a slice to a ptr-to-array. + // https://go.dev/ref/spec#Conversions_from_slice_to_array_pointer + // This feature can be used to construct partial overlaps of array types. + // var a [3]int + // p := (*[2]int)(a[:]) + // q := (*[2]int)(a[1:]) + // *p = *q + // We don't care about solving 1. Or at least, we haven't historically + // and no one has complained. + // For 2, we need to ensure that if there might be partial overlap, + // then we can't use OpMove; we must use memmove instead. + // (memmove handles partial overlap by copying in the correct + // direction. OpMove does not.) + // + // Note that we have to be careful here not to introduce a call when + // we're marshaling arguments to a call or unmarshaling results from a call. + // Cases where this is happening must pass mayOverlap to false. + // (Currently this only happens when unmarshaling results of a call.) + if t.HasPointers() { + s.rtcall(ir.Syms.Typedmemmove, true, nil, s.reflectType(t), dst, src) + // We would have otherwise implemented this move with straightline code, + // including a write barrier. Pretend we issue a write barrier here, + // so that the write barrier tests work. (Otherwise they'd need to know + // the details of IsInlineableMemmove.) + s.curfn.SetWBPos(s.peekPos()) + } else { + s.rtcall(ir.Syms.Memmove, true, nil, dst, src, s.constInt(types.Types[types.TUINTPTR], t.Size())) + } + ssa.LogLargeCopy(s.f.Name, s.peekPos(), t.Size()) + return + } + store := s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), dst, src, s.mem()) + store.Aux = t + s.vars[memVar] = store +} + +// stmtList converts the statement list n to SSA and adds it to s. +func (s *state) stmtList(l ir.Nodes) { + for _, n := range l { + s.stmt(n) + } +} + +// stmt converts the statement n to SSA and adds it to s. +func (s *state) stmt(n ir.Node) { + s.pushLine(n.Pos()) + defer s.popLine() + + // If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere), + // then this code is dead. Stop here. + if s.curBlock == nil && n.Op() != ir.OLABEL { + return + } + + s.stmtList(n.Init()) + switch n.Op() { + + case ir.OBLOCK: + n := n.(*ir.BlockStmt) + s.stmtList(n.List) + + case ir.OFALL: // no-op + + // Expression statements + case ir.OCALLFUNC: + n := n.(*ir.CallExpr) + if ir.IsIntrinsicCall(n) { + s.intrinsicCall(n) + return + } + fallthrough + + case ir.OCALLINTER: + n := n.(*ir.CallExpr) + s.callResult(n, callNormal) + if n.Op() == ir.OCALLFUNC && n.Fun.Op() == ir.ONAME && n.Fun.(*ir.Name).Class == ir.PFUNC { + if fn := n.Fun.Sym().Name; base.Flag.CompilingRuntime && fn == "throw" || + n.Fun.Sym().Pkg == ir.Pkgs.Runtime && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap" || fn == "panicunsafeslicelen" || fn == "panicunsafeslicenilptr" || fn == "panicunsafestringlen" || fn == "panicunsafestringnilptr") { + m := s.mem() + b := s.endBlock() + b.Kind = ssa.BlockExit + b.SetControl(m) + // TODO: never rewrite OPANIC to OCALLFUNC in the + // first place. Need to wait until all backends + // go through SSA. + } + } + case ir.ODEFER: + n := n.(*ir.GoDeferStmt) + if base.Debug.Defer > 0 { + var defertype string + if s.hasOpenDefers { + defertype = "open-coded" + } else if n.Esc() == ir.EscNever { + defertype = "stack-allocated" + } else { + defertype = "heap-allocated" + } + base.WarnfAt(n.Pos(), "%s defer", defertype) + } + if s.hasOpenDefers { + s.openDeferRecord(n.Call.(*ir.CallExpr)) + } else { + d := callDefer + if n.Esc() == ir.EscNever && n.DeferAt == nil { + d = callDeferStack + } + s.call(n.Call.(*ir.CallExpr), d, false, n.DeferAt) + } + case ir.OGO: + n := n.(*ir.GoDeferStmt) + s.callResult(n.Call.(*ir.CallExpr), callGo) + + case ir.OAS2DOTTYPE: + n := n.(*ir.AssignListStmt) + var res, resok *ssa.Value + if n.Rhs[0].Op() == ir.ODOTTYPE2 { + res, resok = s.dottype(n.Rhs[0].(*ir.TypeAssertExpr), true) + } else { + res, resok = s.dynamicDottype(n.Rhs[0].(*ir.DynamicTypeAssertExpr), true) + } + deref := false + if !ssa.CanSSA(n.Rhs[0].Type()) { + if res.Op != ssa.OpLoad { + s.Fatalf("dottype of non-load") + } + mem := s.mem() + if res.Args[1] != mem { + s.Fatalf("memory no longer live from 2-result dottype load") + } + deref = true + res = res.Args[0] + } + s.assign(n.Lhs[0], res, deref, 0) + s.assign(n.Lhs[1], resok, false, 0) + return + + case ir.OAS2FUNC: + // We come here only when it is an intrinsic call returning two values. + n := n.(*ir.AssignListStmt) + call := n.Rhs[0].(*ir.CallExpr) + if !ir.IsIntrinsicCall(call) { + s.Fatalf("non-intrinsic AS2FUNC not expanded %v", call) + } + v := s.intrinsicCall(call) + v1 := s.newValue1(ssa.OpSelect0, n.Lhs[0].Type(), v) + v2 := s.newValue1(ssa.OpSelect1, n.Lhs[1].Type(), v) + s.assign(n.Lhs[0], v1, false, 0) + s.assign(n.Lhs[1], v2, false, 0) + return + + case ir.ODCL: + n := n.(*ir.Decl) + if v := n.X; v.Esc() == ir.EscHeap { + s.newHeapaddr(v) + } + + case ir.OLABEL: + n := n.(*ir.LabelStmt) + sym := n.Label + if sym.IsBlank() { + // Nothing to do because the label isn't targetable. See issue 52278. + break + } + lab := s.label(sym) + + // The label might already have a target block via a goto. + if lab.target == nil { + lab.target = s.f.NewBlock(ssa.BlockPlain) + } + + // Go to that label. + // (We pretend "label:" is preceded by "goto label", unless the predecessor is unreachable.) + if s.curBlock != nil { + b := s.endBlock() + b.AddEdgeTo(lab.target) + } + s.startBlock(lab.target) + + case ir.OGOTO: + n := n.(*ir.BranchStmt) + sym := n.Label + + lab := s.label(sym) + if lab.target == nil { + lab.target = s.f.NewBlock(ssa.BlockPlain) + } + + b := s.endBlock() + b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block. + b.AddEdgeTo(lab.target) + + case ir.OAS: + n := n.(*ir.AssignStmt) + if n.X == n.Y && n.X.Op() == ir.ONAME { + // An x=x assignment. No point in doing anything + // here. In addition, skipping this assignment + // prevents generating: + // VARDEF x + // COPY x -> x + // which is bad because x is incorrectly considered + // dead before the vardef. See issue #14904. + return + } + + // mayOverlap keeps track of whether the LHS and RHS might + // refer to partially overlapping memory. Partial overlapping can + // only happen for arrays, see the comment in moveWhichMayOverlap. + // + // If both sides of the assignment are not dereferences, then partial + // overlap can't happen. Partial overlap can only occur only when the + // arrays referenced are strictly smaller parts of the same base array. + // If one side of the assignment is a full array, then partial overlap + // can't happen. (The arrays are either disjoint or identical.) + mayOverlap := n.X.Op() == ir.ODEREF && (n.Y != nil && n.Y.Op() == ir.ODEREF) + if n.Y != nil && n.Y.Op() == ir.ODEREF { + p := n.Y.(*ir.StarExpr).X + for p.Op() == ir.OCONVNOP { + p = p.(*ir.ConvExpr).X + } + if p.Op() == ir.OSPTR && p.(*ir.UnaryExpr).X.Type().IsString() { + // Pointer fields of strings point to unmodifiable memory. + // That memory can't overlap with the memory being written. + mayOverlap = false + } + } + + // Evaluate RHS. + rhs := n.Y + if rhs != nil { + switch rhs.Op() { + case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT: + // All literals with nonzero fields have already been + // rewritten during walk. Any that remain are just T{} + // or equivalents. Use the zero value. + if !ir.IsZero(rhs) { + s.Fatalf("literal with nonzero value in SSA: %v", rhs) + } + rhs = nil + case ir.OAPPEND: + rhs := rhs.(*ir.CallExpr) + // Check whether we're writing the result of an append back to the same slice. + // If so, we handle it specially to avoid write barriers on the fast + // (non-growth) path. + if !ir.SameSafeExpr(n.X, rhs.Args[0]) || base.Flag.N != 0 { + break + } + // If the slice can be SSA'd, it'll be on the stack, + // so there will be no write barriers, + // so there's no need to attempt to prevent them. + if s.canSSA(n.X) { + if base.Debug.Append > 0 { // replicating old diagnostic message + base.WarnfAt(n.Pos(), "append: len-only update (in local slice)") + } + break + } + if base.Debug.Append > 0 { + base.WarnfAt(n.Pos(), "append: len-only update") + } + s.append(rhs, true) + return + } + } + + if ir.IsBlank(n.X) { + // _ = rhs + // Just evaluate rhs for side-effects. + if rhs != nil { + s.expr(rhs) + } + return + } + + var t *types.Type + if n.Y != nil { + t = n.Y.Type() + } else { + t = n.X.Type() + } + + var r *ssa.Value + deref := !ssa.CanSSA(t) + if deref { + if rhs == nil { + r = nil // Signal assign to use OpZero. + } else { + r = s.addr(rhs) + } + } else { + if rhs == nil { + r = s.zeroVal(t) + } else { + r = s.expr(rhs) + } + } + + var skip skipMask + if rhs != nil && (rhs.Op() == ir.OSLICE || rhs.Op() == ir.OSLICE3 || rhs.Op() == ir.OSLICESTR) && ir.SameSafeExpr(rhs.(*ir.SliceExpr).X, n.X) { + // We're assigning a slicing operation back to its source. + // Don't write back fields we aren't changing. See issue #14855. + rhs := rhs.(*ir.SliceExpr) + i, j, k := rhs.Low, rhs.High, rhs.Max + if i != nil && (i.Op() == ir.OLITERAL && i.Val().Kind() == constant.Int && ir.Int64Val(i) == 0) { + // [0:...] is the same as [:...] + i = nil + } + // TODO: detect defaults for len/cap also. + // Currently doesn't really work because (*p)[:len(*p)] appears here as: + // tmp = len(*p) + // (*p)[:tmp] + // if j != nil && (j.Op == OLEN && SameSafeExpr(j.Left, n.Left)) { + // j = nil + // } + // if k != nil && (k.Op == OCAP && SameSafeExpr(k.Left, n.Left)) { + // k = nil + // } + if i == nil { + skip |= skipPtr + if j == nil { + skip |= skipLen + } + if k == nil { + skip |= skipCap + } + } + } + + s.assignWhichMayOverlap(n.X, r, deref, skip, mayOverlap) + + case ir.OIF: + n := n.(*ir.IfStmt) + if ir.IsConst(n.Cond, constant.Bool) { + s.stmtList(n.Cond.Init()) + if ir.BoolVal(n.Cond) { + s.stmtList(n.Body) + } else { + s.stmtList(n.Else) + } + break + } + + bEnd := s.f.NewBlock(ssa.BlockPlain) + var likely int8 + if n.Likely { + likely = 1 + } + var bThen *ssa.Block + if len(n.Body) != 0 { + bThen = s.f.NewBlock(ssa.BlockPlain) + } else { + bThen = bEnd + } + var bElse *ssa.Block + if len(n.Else) != 0 { + bElse = s.f.NewBlock(ssa.BlockPlain) + } else { + bElse = bEnd + } + s.condBranch(n.Cond, bThen, bElse, likely) + + if len(n.Body) != 0 { + s.startBlock(bThen) + s.stmtList(n.Body) + if b := s.endBlock(); b != nil { + b.AddEdgeTo(bEnd) + } + } + if len(n.Else) != 0 { + s.startBlock(bElse) + s.stmtList(n.Else) + if b := s.endBlock(); b != nil { + b.AddEdgeTo(bEnd) + } + } + s.startBlock(bEnd) + + case ir.ORETURN: + n := n.(*ir.ReturnStmt) + s.stmtList(n.Results) + b := s.exit() + b.Pos = s.lastPos.WithIsStmt() + + case ir.OTAILCALL: + n := n.(*ir.TailCallStmt) + s.callResult(n.Call, callTail) + call := s.mem() + b := s.endBlock() + b.Kind = ssa.BlockRetJmp // could use BlockExit. BlockRetJmp is mostly for clarity. + b.SetControl(call) + + case ir.OCONTINUE, ir.OBREAK: + n := n.(*ir.BranchStmt) + var to *ssa.Block + if n.Label == nil { + // plain break/continue + switch n.Op() { + case ir.OCONTINUE: + to = s.continueTo + case ir.OBREAK: + to = s.breakTo + } + } else { + // labeled break/continue; look up the target + sym := n.Label + lab := s.label(sym) + switch n.Op() { + case ir.OCONTINUE: + to = lab.continueTarget + case ir.OBREAK: + to = lab.breakTarget + } + } + + b := s.endBlock() + b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block. + b.AddEdgeTo(to) + + case ir.OFOR: + // OFOR: for Ninit; Left; Right { Nbody } + // cond (Left); body (Nbody); incr (Right) + n := n.(*ir.ForStmt) + base.Assert(!n.DistinctVars) // Should all be rewritten before escape analysis + bCond := s.f.NewBlock(ssa.BlockPlain) + bBody := s.f.NewBlock(ssa.BlockPlain) + bIncr := s.f.NewBlock(ssa.BlockPlain) + bEnd := s.f.NewBlock(ssa.BlockPlain) + + // ensure empty for loops have correct position; issue #30167 + bBody.Pos = n.Pos() + + // first, jump to condition test + b := s.endBlock() + b.AddEdgeTo(bCond) + + // generate code to test condition + s.startBlock(bCond) + if n.Cond != nil { + s.condBranch(n.Cond, bBody, bEnd, 1) + } else { + b := s.endBlock() + b.Kind = ssa.BlockPlain + b.AddEdgeTo(bBody) + } + + // set up for continue/break in body + prevContinue := s.continueTo + prevBreak := s.breakTo + s.continueTo = bIncr + s.breakTo = bEnd + var lab *ssaLabel + if sym := n.Label; sym != nil { + // labeled for loop + lab = s.label(sym) + lab.continueTarget = bIncr + lab.breakTarget = bEnd + } + + // generate body + s.startBlock(bBody) + s.stmtList(n.Body) + + // tear down continue/break + s.continueTo = prevContinue + s.breakTo = prevBreak + if lab != nil { + lab.continueTarget = nil + lab.breakTarget = nil + } + + // done with body, goto incr + if b := s.endBlock(); b != nil { + b.AddEdgeTo(bIncr) + } + + // generate incr + s.startBlock(bIncr) + if n.Post != nil { + s.stmt(n.Post) + } + if b := s.endBlock(); b != nil { + b.AddEdgeTo(bCond) + // It can happen that bIncr ends in a block containing only VARKILL, + // and that muddles the debugging experience. + if b.Pos == src.NoXPos { + b.Pos = bCond.Pos + } + } + + s.startBlock(bEnd) + + case ir.OSWITCH, ir.OSELECT: + // These have been mostly rewritten by the front end into their Nbody fields. + // Our main task is to correctly hook up any break statements. + bEnd := s.f.NewBlock(ssa.BlockPlain) + + prevBreak := s.breakTo + s.breakTo = bEnd + var sym *types.Sym + var body ir.Nodes + if n.Op() == ir.OSWITCH { + n := n.(*ir.SwitchStmt) + sym = n.Label + body = n.Compiled + } else { + n := n.(*ir.SelectStmt) + sym = n.Label + body = n.Compiled + } + + var lab *ssaLabel + if sym != nil { + // labeled + lab = s.label(sym) + lab.breakTarget = bEnd + } + + // generate body code + s.stmtList(body) + + s.breakTo = prevBreak + if lab != nil { + lab.breakTarget = nil + } + + // walk adds explicit OBREAK nodes to the end of all reachable code paths. + // If we still have a current block here, then mark it unreachable. + if s.curBlock != nil { + m := s.mem() + b := s.endBlock() + b.Kind = ssa.BlockExit + b.SetControl(m) + } + s.startBlock(bEnd) + + case ir.OJUMPTABLE: + n := n.(*ir.JumpTableStmt) + + // Make blocks we'll need. + jt := s.f.NewBlock(ssa.BlockJumpTable) + bEnd := s.f.NewBlock(ssa.BlockPlain) + + // The only thing that needs evaluating is the index we're looking up. + idx := s.expr(n.Idx) + unsigned := idx.Type.IsUnsigned() + + // Extend so we can do everything in uintptr arithmetic. + t := types.Types[types.TUINTPTR] + idx = s.conv(nil, idx, idx.Type, t) + + // The ending condition for the current block decides whether we'll use + // the jump table at all. + // We check that min <= idx <= max and jump around the jump table + // if that test fails. + // We implement min <= idx <= max with 0 <= idx-min <= max-min, because + // we'll need idx-min anyway as the control value for the jump table. + var min, max uint64 + if unsigned { + min, _ = constant.Uint64Val(n.Cases[0]) + max, _ = constant.Uint64Val(n.Cases[len(n.Cases)-1]) + } else { + mn, _ := constant.Int64Val(n.Cases[0]) + mx, _ := constant.Int64Val(n.Cases[len(n.Cases)-1]) + min = uint64(mn) + max = uint64(mx) + } + // Compare idx-min with max-min, to see if we can use the jump table. + idx = s.newValue2(s.ssaOp(ir.OSUB, t), t, idx, s.uintptrConstant(min)) + width := s.uintptrConstant(max - min) + cmp := s.newValue2(s.ssaOp(ir.OLE, t), types.Types[types.TBOOL], idx, width) + b := s.endBlock() + b.Kind = ssa.BlockIf + b.SetControl(cmp) + b.AddEdgeTo(jt) // in range - use jump table + b.AddEdgeTo(bEnd) // out of range - no case in the jump table will trigger + b.Likely = ssa.BranchLikely // TODO: assumes missing the table entirely is unlikely. True? + + // Build jump table block. + s.startBlock(jt) + jt.Pos = n.Pos() + if base.Flag.Cfg.SpectreIndex { + idx = s.newValue2(ssa.OpSpectreSliceIndex, t, idx, width) + } + jt.SetControl(idx) + + // Figure out where we should go for each index in the table. + table := make([]*ssa.Block, max-min+1) + for i := range table { + table[i] = bEnd // default target + } + for i := range n.Targets { + c := n.Cases[i] + lab := s.label(n.Targets[i]) + if lab.target == nil { + lab.target = s.f.NewBlock(ssa.BlockPlain) + } + var val uint64 + if unsigned { + val, _ = constant.Uint64Val(c) + } else { + vl, _ := constant.Int64Val(c) + val = uint64(vl) + } + // Overwrite the default target. + table[val-min] = lab.target + } + for _, t := range table { + jt.AddEdgeTo(t) + } + s.endBlock() + + s.startBlock(bEnd) + + case ir.OINTERFACESWITCH: + n := n.(*ir.InterfaceSwitchStmt) + typs := s.f.Config.Types + + t := s.expr(n.RuntimeType) + h := s.expr(n.Hash) + d := s.newValue1A(ssa.OpAddr, typs.BytePtr, n.Descriptor, s.sb) + + // Check the cache first. + var merge *ssa.Block + if base.Flag.N == 0 && rtabi.UseInterfaceSwitchCache(Arch.LinkArch.Name) { + // Note: we can only use the cache if we have the right atomic load instruction. + // Double-check that here. + if _, ok := intrinsics[intrinsicKey{Arch.LinkArch.Arch, "runtime/internal/atomic", "Loadp"}]; !ok { + s.Fatalf("atomic load not available") + } + merge = s.f.NewBlock(ssa.BlockPlain) + cacheHit := s.f.NewBlock(ssa.BlockPlain) + cacheMiss := s.f.NewBlock(ssa.BlockPlain) + loopHead := s.f.NewBlock(ssa.BlockPlain) + loopBody := s.f.NewBlock(ssa.BlockPlain) + + // Pick right size ops. + var mul, and, add, zext ssa.Op + if s.config.PtrSize == 4 { + mul = ssa.OpMul32 + and = ssa.OpAnd32 + add = ssa.OpAdd32 + zext = ssa.OpCopy + } else { + mul = ssa.OpMul64 + and = ssa.OpAnd64 + add = ssa.OpAdd64 + zext = ssa.OpZeroExt32to64 + } + + // Load cache pointer out of descriptor, with an atomic load so + // we ensure that we see a fully written cache. + atomicLoad := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(typs.BytePtr, types.TypeMem), d, s.mem()) + cache := s.newValue1(ssa.OpSelect0, typs.BytePtr, atomicLoad) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, atomicLoad) + + // Initialize hash variable. + s.vars[hashVar] = s.newValue1(zext, typs.Uintptr, h) + + // Load mask from cache. + mask := s.newValue2(ssa.OpLoad, typs.Uintptr, cache, s.mem()) + // Jump to loop head. + b := s.endBlock() + b.AddEdgeTo(loopHead) + + // At loop head, get pointer to the cache entry. + // e := &cache.Entries[hash&mask] + s.startBlock(loopHead) + entries := s.newValue2(ssa.OpAddPtr, typs.UintptrPtr, cache, s.uintptrConstant(uint64(s.config.PtrSize))) + idx := s.newValue2(and, typs.Uintptr, s.variable(hashVar, typs.Uintptr), mask) + idx = s.newValue2(mul, typs.Uintptr, idx, s.uintptrConstant(uint64(3*s.config.PtrSize))) + e := s.newValue2(ssa.OpAddPtr, typs.UintptrPtr, entries, idx) + // hash++ + s.vars[hashVar] = s.newValue2(add, typs.Uintptr, s.variable(hashVar, typs.Uintptr), s.uintptrConstant(1)) + + // Look for a cache hit. + // if e.Typ == t { goto hit } + eTyp := s.newValue2(ssa.OpLoad, typs.Uintptr, e, s.mem()) + cmp1 := s.newValue2(ssa.OpEqPtr, typs.Bool, t, eTyp) + b = s.endBlock() + b.Kind = ssa.BlockIf + b.SetControl(cmp1) + b.AddEdgeTo(cacheHit) + b.AddEdgeTo(loopBody) + + // Look for an empty entry, the tombstone for this hash table. + // if e.Typ == nil { goto miss } + s.startBlock(loopBody) + cmp2 := s.newValue2(ssa.OpEqPtr, typs.Bool, eTyp, s.constNil(typs.BytePtr)) + b = s.endBlock() + b.Kind = ssa.BlockIf + b.SetControl(cmp2) + b.AddEdgeTo(cacheMiss) + b.AddEdgeTo(loopHead) + + // On a hit, load the data fields of the cache entry. + // Case = e.Case + // Itab = e.Itab + s.startBlock(cacheHit) + eCase := s.newValue2(ssa.OpLoad, typs.Int, s.newValue1I(ssa.OpOffPtr, typs.IntPtr, s.config.PtrSize, e), s.mem()) + eItab := s.newValue2(ssa.OpLoad, typs.BytePtr, s.newValue1I(ssa.OpOffPtr, typs.BytePtrPtr, 2*s.config.PtrSize, e), s.mem()) + s.assign(n.Case, eCase, false, 0) + s.assign(n.Itab, eItab, false, 0) + b = s.endBlock() + b.AddEdgeTo(merge) + + // On a miss, call into the runtime to get the answer. + s.startBlock(cacheMiss) + } + + r := s.rtcall(ir.Syms.InterfaceSwitch, true, []*types.Type{typs.Int, typs.BytePtr}, d, t) + s.assign(n.Case, r[0], false, 0) + s.assign(n.Itab, r[1], false, 0) + + if merge != nil { + // Cache hits merge in here. + b := s.endBlock() + b.Kind = ssa.BlockPlain + b.AddEdgeTo(merge) + s.startBlock(merge) + } + + case ir.OCHECKNIL: + n := n.(*ir.UnaryExpr) + p := s.expr(n.X) + _ = s.nilCheck(p) + // TODO: check that throwing away the nilcheck result is ok. + + case ir.OINLMARK: + n := n.(*ir.InlineMarkStmt) + s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Index, s.mem()) + + default: + s.Fatalf("unhandled stmt %v", n.Op()) + } +} + +// If true, share as many open-coded defer exits as possible (with the downside of +// worse line-number information) +const shareDeferExits = false + +// exit processes any code that needs to be generated just before returning. +// It returns a BlockRet block that ends the control flow. Its control value +// will be set to the final memory state. +func (s *state) exit() *ssa.Block { + if s.hasdefer { + if s.hasOpenDefers { + if shareDeferExits && s.lastDeferExit != nil && len(s.openDefers) == s.lastDeferCount { + if s.curBlock.Kind != ssa.BlockPlain { + panic("Block for an exit should be BlockPlain") + } + s.curBlock.AddEdgeTo(s.lastDeferExit) + s.endBlock() + return s.lastDeferFinalBlock + } + s.openDeferExit() + } else { + s.rtcall(ir.Syms.Deferreturn, true, nil) + } + } + + // Do actual return. + // These currently turn into self-copies (in many cases). + resultFields := s.curfn.Type().Results() + results := make([]*ssa.Value, len(resultFields)+1, len(resultFields)+1) + // Store SSAable and heap-escaped PPARAMOUT variables back to stack locations. + for i, f := range resultFields { + n := f.Nname.(*ir.Name) + if s.canSSA(n) { // result is in some SSA variable + if !n.IsOutputParamInRegisters() && n.Type().HasPointers() { + // We are about to store to the result slot. + s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem()) + } + results[i] = s.variable(n, n.Type()) + } else if !n.OnStack() { // result is actually heap allocated + // We are about to copy the in-heap result to the result slot. + if n.Type().HasPointers() { + s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem()) + } + ha := s.expr(n.Heapaddr) + s.instrumentFields(n.Type(), ha, instrumentRead) + results[i] = s.newValue2(ssa.OpDereference, n.Type(), ha, s.mem()) + } else { // result is not SSA-able; not escaped, so not on heap, but too large for SSA. + // Before register ABI this ought to be a self-move, home=dest, + // With register ABI, it's still a self-move if parameter is on stack (i.e., too big or overflowed) + // No VarDef, as the result slot is already holding live value. + results[i] = s.newValue2(ssa.OpDereference, n.Type(), s.addr(n), s.mem()) + } + } + + // In -race mode, we need to call racefuncexit. + // Note: This has to happen after we load any heap-allocated results, + // otherwise races will be attributed to the caller instead. + if s.instrumentEnterExit { + s.rtcall(ir.Syms.Racefuncexit, true, nil) + } + + results[len(results)-1] = s.mem() + m := s.newValue0(ssa.OpMakeResult, s.f.OwnAux.LateExpansionResultType()) + m.AddArgs(results...) + + b := s.endBlock() + b.Kind = ssa.BlockRet + b.SetControl(m) + if s.hasdefer && s.hasOpenDefers { + s.lastDeferFinalBlock = b + } + return b +} + +type opAndType struct { + op ir.Op + etype types.Kind +} + +var opToSSA = map[opAndType]ssa.Op{ + {ir.OADD, types.TINT8}: ssa.OpAdd8, + {ir.OADD, types.TUINT8}: ssa.OpAdd8, + {ir.OADD, types.TINT16}: ssa.OpAdd16, + {ir.OADD, types.TUINT16}: ssa.OpAdd16, + {ir.OADD, types.TINT32}: ssa.OpAdd32, + {ir.OADD, types.TUINT32}: ssa.OpAdd32, + {ir.OADD, types.TINT64}: ssa.OpAdd64, + {ir.OADD, types.TUINT64}: ssa.OpAdd64, + {ir.OADD, types.TFLOAT32}: ssa.OpAdd32F, + {ir.OADD, types.TFLOAT64}: ssa.OpAdd64F, + + {ir.OSUB, types.TINT8}: ssa.OpSub8, + {ir.OSUB, types.TUINT8}: ssa.OpSub8, + {ir.OSUB, types.TINT16}: ssa.OpSub16, + {ir.OSUB, types.TUINT16}: ssa.OpSub16, + {ir.OSUB, types.TINT32}: ssa.OpSub32, + {ir.OSUB, types.TUINT32}: ssa.OpSub32, + {ir.OSUB, types.TINT64}: ssa.OpSub64, + {ir.OSUB, types.TUINT64}: ssa.OpSub64, + {ir.OSUB, types.TFLOAT32}: ssa.OpSub32F, + {ir.OSUB, types.TFLOAT64}: ssa.OpSub64F, + + {ir.ONOT, types.TBOOL}: ssa.OpNot, + + {ir.ONEG, types.TINT8}: ssa.OpNeg8, + {ir.ONEG, types.TUINT8}: ssa.OpNeg8, + {ir.ONEG, types.TINT16}: ssa.OpNeg16, + {ir.ONEG, types.TUINT16}: ssa.OpNeg16, + {ir.ONEG, types.TINT32}: ssa.OpNeg32, + {ir.ONEG, types.TUINT32}: ssa.OpNeg32, + {ir.ONEG, types.TINT64}: ssa.OpNeg64, + {ir.ONEG, types.TUINT64}: ssa.OpNeg64, + {ir.ONEG, types.TFLOAT32}: ssa.OpNeg32F, + {ir.ONEG, types.TFLOAT64}: ssa.OpNeg64F, + + {ir.OBITNOT, types.TINT8}: ssa.OpCom8, + {ir.OBITNOT, types.TUINT8}: ssa.OpCom8, + {ir.OBITNOT, types.TINT16}: ssa.OpCom16, + {ir.OBITNOT, types.TUINT16}: ssa.OpCom16, + {ir.OBITNOT, types.TINT32}: ssa.OpCom32, + {ir.OBITNOT, types.TUINT32}: ssa.OpCom32, + {ir.OBITNOT, types.TINT64}: ssa.OpCom64, + {ir.OBITNOT, types.TUINT64}: ssa.OpCom64, + + {ir.OIMAG, types.TCOMPLEX64}: ssa.OpComplexImag, + {ir.OIMAG, types.TCOMPLEX128}: ssa.OpComplexImag, + {ir.OREAL, types.TCOMPLEX64}: ssa.OpComplexReal, + {ir.OREAL, types.TCOMPLEX128}: ssa.OpComplexReal, + + {ir.OMUL, types.TINT8}: ssa.OpMul8, + {ir.OMUL, types.TUINT8}: ssa.OpMul8, + {ir.OMUL, types.TINT16}: ssa.OpMul16, + {ir.OMUL, types.TUINT16}: ssa.OpMul16, + {ir.OMUL, types.TINT32}: ssa.OpMul32, + {ir.OMUL, types.TUINT32}: ssa.OpMul32, + {ir.OMUL, types.TINT64}: ssa.OpMul64, + {ir.OMUL, types.TUINT64}: ssa.OpMul64, + {ir.OMUL, types.TFLOAT32}: ssa.OpMul32F, + {ir.OMUL, types.TFLOAT64}: ssa.OpMul64F, + + {ir.ODIV, types.TFLOAT32}: ssa.OpDiv32F, + {ir.ODIV, types.TFLOAT64}: ssa.OpDiv64F, + + {ir.ODIV, types.TINT8}: ssa.OpDiv8, + {ir.ODIV, types.TUINT8}: ssa.OpDiv8u, + {ir.ODIV, types.TINT16}: ssa.OpDiv16, + {ir.ODIV, types.TUINT16}: ssa.OpDiv16u, + {ir.ODIV, types.TINT32}: ssa.OpDiv32, + {ir.ODIV, types.TUINT32}: ssa.OpDiv32u, + {ir.ODIV, types.TINT64}: ssa.OpDiv64, + {ir.ODIV, types.TUINT64}: ssa.OpDiv64u, + + {ir.OMOD, types.TINT8}: ssa.OpMod8, + {ir.OMOD, types.TUINT8}: ssa.OpMod8u, + {ir.OMOD, types.TINT16}: ssa.OpMod16, + {ir.OMOD, types.TUINT16}: ssa.OpMod16u, + {ir.OMOD, types.TINT32}: ssa.OpMod32, + {ir.OMOD, types.TUINT32}: ssa.OpMod32u, + {ir.OMOD, types.TINT64}: ssa.OpMod64, + {ir.OMOD, types.TUINT64}: ssa.OpMod64u, + + {ir.OAND, types.TINT8}: ssa.OpAnd8, + {ir.OAND, types.TUINT8}: ssa.OpAnd8, + {ir.OAND, types.TINT16}: ssa.OpAnd16, + {ir.OAND, types.TUINT16}: ssa.OpAnd16, + {ir.OAND, types.TINT32}: ssa.OpAnd32, + {ir.OAND, types.TUINT32}: ssa.OpAnd32, + {ir.OAND, types.TINT64}: ssa.OpAnd64, + {ir.OAND, types.TUINT64}: ssa.OpAnd64, + + {ir.OOR, types.TINT8}: ssa.OpOr8, + {ir.OOR, types.TUINT8}: ssa.OpOr8, + {ir.OOR, types.TINT16}: ssa.OpOr16, + {ir.OOR, types.TUINT16}: ssa.OpOr16, + {ir.OOR, types.TINT32}: ssa.OpOr32, + {ir.OOR, types.TUINT32}: ssa.OpOr32, + {ir.OOR, types.TINT64}: ssa.OpOr64, + {ir.OOR, types.TUINT64}: ssa.OpOr64, + + {ir.OXOR, types.TINT8}: ssa.OpXor8, + {ir.OXOR, types.TUINT8}: ssa.OpXor8, + {ir.OXOR, types.TINT16}: ssa.OpXor16, + {ir.OXOR, types.TUINT16}: ssa.OpXor16, + {ir.OXOR, types.TINT32}: ssa.OpXor32, + {ir.OXOR, types.TUINT32}: ssa.OpXor32, + {ir.OXOR, types.TINT64}: ssa.OpXor64, + {ir.OXOR, types.TUINT64}: ssa.OpXor64, + + {ir.OEQ, types.TBOOL}: ssa.OpEqB, + {ir.OEQ, types.TINT8}: ssa.OpEq8, + {ir.OEQ, types.TUINT8}: ssa.OpEq8, + {ir.OEQ, types.TINT16}: ssa.OpEq16, + {ir.OEQ, types.TUINT16}: ssa.OpEq16, + {ir.OEQ, types.TINT32}: ssa.OpEq32, + {ir.OEQ, types.TUINT32}: ssa.OpEq32, + {ir.OEQ, types.TINT64}: ssa.OpEq64, + {ir.OEQ, types.TUINT64}: ssa.OpEq64, + {ir.OEQ, types.TINTER}: ssa.OpEqInter, + {ir.OEQ, types.TSLICE}: ssa.OpEqSlice, + {ir.OEQ, types.TFUNC}: ssa.OpEqPtr, + {ir.OEQ, types.TMAP}: ssa.OpEqPtr, + {ir.OEQ, types.TCHAN}: ssa.OpEqPtr, + {ir.OEQ, types.TPTR}: ssa.OpEqPtr, + {ir.OEQ, types.TUINTPTR}: ssa.OpEqPtr, + {ir.OEQ, types.TUNSAFEPTR}: ssa.OpEqPtr, + {ir.OEQ, types.TFLOAT64}: ssa.OpEq64F, + {ir.OEQ, types.TFLOAT32}: ssa.OpEq32F, + + {ir.ONE, types.TBOOL}: ssa.OpNeqB, + {ir.ONE, types.TINT8}: ssa.OpNeq8, + {ir.ONE, types.TUINT8}: ssa.OpNeq8, + {ir.ONE, types.TINT16}: ssa.OpNeq16, + {ir.ONE, types.TUINT16}: ssa.OpNeq16, + {ir.ONE, types.TINT32}: ssa.OpNeq32, + {ir.ONE, types.TUINT32}: ssa.OpNeq32, + {ir.ONE, types.TINT64}: ssa.OpNeq64, + {ir.ONE, types.TUINT64}: ssa.OpNeq64, + {ir.ONE, types.TINTER}: ssa.OpNeqInter, + {ir.ONE, types.TSLICE}: ssa.OpNeqSlice, + {ir.ONE, types.TFUNC}: ssa.OpNeqPtr, + {ir.ONE, types.TMAP}: ssa.OpNeqPtr, + {ir.ONE, types.TCHAN}: ssa.OpNeqPtr, + {ir.ONE, types.TPTR}: ssa.OpNeqPtr, + {ir.ONE, types.TUINTPTR}: ssa.OpNeqPtr, + {ir.ONE, types.TUNSAFEPTR}: ssa.OpNeqPtr, + {ir.ONE, types.TFLOAT64}: ssa.OpNeq64F, + {ir.ONE, types.TFLOAT32}: ssa.OpNeq32F, + + {ir.OLT, types.TINT8}: ssa.OpLess8, + {ir.OLT, types.TUINT8}: ssa.OpLess8U, + {ir.OLT, types.TINT16}: ssa.OpLess16, + {ir.OLT, types.TUINT16}: ssa.OpLess16U, + {ir.OLT, types.TINT32}: ssa.OpLess32, + {ir.OLT, types.TUINT32}: ssa.OpLess32U, + {ir.OLT, types.TINT64}: ssa.OpLess64, + {ir.OLT, types.TUINT64}: ssa.OpLess64U, + {ir.OLT, types.TFLOAT64}: ssa.OpLess64F, + {ir.OLT, types.TFLOAT32}: ssa.OpLess32F, + + {ir.OLE, types.TINT8}: ssa.OpLeq8, + {ir.OLE, types.TUINT8}: ssa.OpLeq8U, + {ir.OLE, types.TINT16}: ssa.OpLeq16, + {ir.OLE, types.TUINT16}: ssa.OpLeq16U, + {ir.OLE, types.TINT32}: ssa.OpLeq32, + {ir.OLE, types.TUINT32}: ssa.OpLeq32U, + {ir.OLE, types.TINT64}: ssa.OpLeq64, + {ir.OLE, types.TUINT64}: ssa.OpLeq64U, + {ir.OLE, types.TFLOAT64}: ssa.OpLeq64F, + {ir.OLE, types.TFLOAT32}: ssa.OpLeq32F, +} + +func (s *state) concreteEtype(t *types.Type) types.Kind { + e := t.Kind() + switch e { + default: + return e + case types.TINT: + if s.config.PtrSize == 8 { + return types.TINT64 + } + return types.TINT32 + case types.TUINT: + if s.config.PtrSize == 8 { + return types.TUINT64 + } + return types.TUINT32 + case types.TUINTPTR: + if s.config.PtrSize == 8 { + return types.TUINT64 + } + return types.TUINT32 + } +} + +func (s *state) ssaOp(op ir.Op, t *types.Type) ssa.Op { + etype := s.concreteEtype(t) + x, ok := opToSSA[opAndType{op, etype}] + if !ok { + s.Fatalf("unhandled binary op %v %s", op, etype) + } + return x +} + +type opAndTwoTypes struct { + op ir.Op + etype1 types.Kind + etype2 types.Kind +} + +type twoTypes struct { + etype1 types.Kind + etype2 types.Kind +} + +type twoOpsAndType struct { + op1 ssa.Op + op2 ssa.Op + intermediateType types.Kind +} + +var fpConvOpToSSA = map[twoTypes]twoOpsAndType{ + + {types.TINT8, types.TFLOAT32}: {ssa.OpSignExt8to32, ssa.OpCvt32to32F, types.TINT32}, + {types.TINT16, types.TFLOAT32}: {ssa.OpSignExt16to32, ssa.OpCvt32to32F, types.TINT32}, + {types.TINT32, types.TFLOAT32}: {ssa.OpCopy, ssa.OpCvt32to32F, types.TINT32}, + {types.TINT64, types.TFLOAT32}: {ssa.OpCopy, ssa.OpCvt64to32F, types.TINT64}, + + {types.TINT8, types.TFLOAT64}: {ssa.OpSignExt8to32, ssa.OpCvt32to64F, types.TINT32}, + {types.TINT16, types.TFLOAT64}: {ssa.OpSignExt16to32, ssa.OpCvt32to64F, types.TINT32}, + {types.TINT32, types.TFLOAT64}: {ssa.OpCopy, ssa.OpCvt32to64F, types.TINT32}, + {types.TINT64, types.TFLOAT64}: {ssa.OpCopy, ssa.OpCvt64to64F, types.TINT64}, + + {types.TFLOAT32, types.TINT8}: {ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32}, + {types.TFLOAT32, types.TINT16}: {ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32}, + {types.TFLOAT32, types.TINT32}: {ssa.OpCvt32Fto32, ssa.OpCopy, types.TINT32}, + {types.TFLOAT32, types.TINT64}: {ssa.OpCvt32Fto64, ssa.OpCopy, types.TINT64}, + + {types.TFLOAT64, types.TINT8}: {ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32}, + {types.TFLOAT64, types.TINT16}: {ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32}, + {types.TFLOAT64, types.TINT32}: {ssa.OpCvt64Fto32, ssa.OpCopy, types.TINT32}, + {types.TFLOAT64, types.TINT64}: {ssa.OpCvt64Fto64, ssa.OpCopy, types.TINT64}, + // unsigned + {types.TUINT8, types.TFLOAT32}: {ssa.OpZeroExt8to32, ssa.OpCvt32to32F, types.TINT32}, + {types.TUINT16, types.TFLOAT32}: {ssa.OpZeroExt16to32, ssa.OpCvt32to32F, types.TINT32}, + {types.TUINT32, types.TFLOAT32}: {ssa.OpZeroExt32to64, ssa.OpCvt64to32F, types.TINT64}, // go wide to dodge unsigned + {types.TUINT64, types.TFLOAT32}: {ssa.OpCopy, ssa.OpInvalid, types.TUINT64}, // Cvt64Uto32F, branchy code expansion instead + + {types.TUINT8, types.TFLOAT64}: {ssa.OpZeroExt8to32, ssa.OpCvt32to64F, types.TINT32}, + {types.TUINT16, types.TFLOAT64}: {ssa.OpZeroExt16to32, ssa.OpCvt32to64F, types.TINT32}, + {types.TUINT32, types.TFLOAT64}: {ssa.OpZeroExt32to64, ssa.OpCvt64to64F, types.TINT64}, // go wide to dodge unsigned + {types.TUINT64, types.TFLOAT64}: {ssa.OpCopy, ssa.OpInvalid, types.TUINT64}, // Cvt64Uto64F, branchy code expansion instead + + {types.TFLOAT32, types.TUINT8}: {ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32}, + {types.TFLOAT32, types.TUINT16}: {ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32}, + {types.TFLOAT32, types.TUINT32}: {ssa.OpCvt32Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned + {types.TFLOAT32, types.TUINT64}: {ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt32Fto64U, branchy code expansion instead + + {types.TFLOAT64, types.TUINT8}: {ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32}, + {types.TFLOAT64, types.TUINT16}: {ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32}, + {types.TFLOAT64, types.TUINT32}: {ssa.OpCvt64Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned + {types.TFLOAT64, types.TUINT64}: {ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt64Fto64U, branchy code expansion instead + + // float + {types.TFLOAT64, types.TFLOAT32}: {ssa.OpCvt64Fto32F, ssa.OpCopy, types.TFLOAT32}, + {types.TFLOAT64, types.TFLOAT64}: {ssa.OpRound64F, ssa.OpCopy, types.TFLOAT64}, + {types.TFLOAT32, types.TFLOAT32}: {ssa.OpRound32F, ssa.OpCopy, types.TFLOAT32}, + {types.TFLOAT32, types.TFLOAT64}: {ssa.OpCvt32Fto64F, ssa.OpCopy, types.TFLOAT64}, +} + +// this map is used only for 32-bit arch, and only includes the difference +// on 32-bit arch, don't use int64<->float conversion for uint32 +var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{ + {types.TUINT32, types.TFLOAT32}: {ssa.OpCopy, ssa.OpCvt32Uto32F, types.TUINT32}, + {types.TUINT32, types.TFLOAT64}: {ssa.OpCopy, ssa.OpCvt32Uto64F, types.TUINT32}, + {types.TFLOAT32, types.TUINT32}: {ssa.OpCvt32Fto32U, ssa.OpCopy, types.TUINT32}, + {types.TFLOAT64, types.TUINT32}: {ssa.OpCvt64Fto32U, ssa.OpCopy, types.TUINT32}, +} + +// uint64<->float conversions, only on machines that have instructions for that +var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{ + {types.TUINT64, types.TFLOAT32}: {ssa.OpCopy, ssa.OpCvt64Uto32F, types.TUINT64}, + {types.TUINT64, types.TFLOAT64}: {ssa.OpCopy, ssa.OpCvt64Uto64F, types.TUINT64}, + {types.TFLOAT32, types.TUINT64}: {ssa.OpCvt32Fto64U, ssa.OpCopy, types.TUINT64}, + {types.TFLOAT64, types.TUINT64}: {ssa.OpCvt64Fto64U, ssa.OpCopy, types.TUINT64}, +} + +var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{ + {ir.OLSH, types.TINT8, types.TUINT8}: ssa.OpLsh8x8, + {ir.OLSH, types.TUINT8, types.TUINT8}: ssa.OpLsh8x8, + {ir.OLSH, types.TINT8, types.TUINT16}: ssa.OpLsh8x16, + {ir.OLSH, types.TUINT8, types.TUINT16}: ssa.OpLsh8x16, + {ir.OLSH, types.TINT8, types.TUINT32}: ssa.OpLsh8x32, + {ir.OLSH, types.TUINT8, types.TUINT32}: ssa.OpLsh8x32, + {ir.OLSH, types.TINT8, types.TUINT64}: ssa.OpLsh8x64, + {ir.OLSH, types.TUINT8, types.TUINT64}: ssa.OpLsh8x64, + + {ir.OLSH, types.TINT16, types.TUINT8}: ssa.OpLsh16x8, + {ir.OLSH, types.TUINT16, types.TUINT8}: ssa.OpLsh16x8, + {ir.OLSH, types.TINT16, types.TUINT16}: ssa.OpLsh16x16, + {ir.OLSH, types.TUINT16, types.TUINT16}: ssa.OpLsh16x16, + {ir.OLSH, types.TINT16, types.TUINT32}: ssa.OpLsh16x32, + {ir.OLSH, types.TUINT16, types.TUINT32}: ssa.OpLsh16x32, + {ir.OLSH, types.TINT16, types.TUINT64}: ssa.OpLsh16x64, + {ir.OLSH, types.TUINT16, types.TUINT64}: ssa.OpLsh16x64, + + {ir.OLSH, types.TINT32, types.TUINT8}: ssa.OpLsh32x8, + {ir.OLSH, types.TUINT32, types.TUINT8}: ssa.OpLsh32x8, + {ir.OLSH, types.TINT32, types.TUINT16}: ssa.OpLsh32x16, + {ir.OLSH, types.TUINT32, types.TUINT16}: ssa.OpLsh32x16, + {ir.OLSH, types.TINT32, types.TUINT32}: ssa.OpLsh32x32, + {ir.OLSH, types.TUINT32, types.TUINT32}: ssa.OpLsh32x32, + {ir.OLSH, types.TINT32, types.TUINT64}: ssa.OpLsh32x64, + {ir.OLSH, types.TUINT32, types.TUINT64}: ssa.OpLsh32x64, + + {ir.OLSH, types.TINT64, types.TUINT8}: ssa.OpLsh64x8, + {ir.OLSH, types.TUINT64, types.TUINT8}: ssa.OpLsh64x8, + {ir.OLSH, types.TINT64, types.TUINT16}: ssa.OpLsh64x16, + {ir.OLSH, types.TUINT64, types.TUINT16}: ssa.OpLsh64x16, + {ir.OLSH, types.TINT64, types.TUINT32}: ssa.OpLsh64x32, + {ir.OLSH, types.TUINT64, types.TUINT32}: ssa.OpLsh64x32, + {ir.OLSH, types.TINT64, types.TUINT64}: ssa.OpLsh64x64, + {ir.OLSH, types.TUINT64, types.TUINT64}: ssa.OpLsh64x64, + + {ir.ORSH, types.TINT8, types.TUINT8}: ssa.OpRsh8x8, + {ir.ORSH, types.TUINT8, types.TUINT8}: ssa.OpRsh8Ux8, + {ir.ORSH, types.TINT8, types.TUINT16}: ssa.OpRsh8x16, + {ir.ORSH, types.TUINT8, types.TUINT16}: ssa.OpRsh8Ux16, + {ir.ORSH, types.TINT8, types.TUINT32}: ssa.OpRsh8x32, + {ir.ORSH, types.TUINT8, types.TUINT32}: ssa.OpRsh8Ux32, + {ir.ORSH, types.TINT8, types.TUINT64}: ssa.OpRsh8x64, + {ir.ORSH, types.TUINT8, types.TUINT64}: ssa.OpRsh8Ux64, + + {ir.ORSH, types.TINT16, types.TUINT8}: ssa.OpRsh16x8, + {ir.ORSH, types.TUINT16, types.TUINT8}: ssa.OpRsh16Ux8, + {ir.ORSH, types.TINT16, types.TUINT16}: ssa.OpRsh16x16, + {ir.ORSH, types.TUINT16, types.TUINT16}: ssa.OpRsh16Ux16, + {ir.ORSH, types.TINT16, types.TUINT32}: ssa.OpRsh16x32, + {ir.ORSH, types.TUINT16, types.TUINT32}: ssa.OpRsh16Ux32, + {ir.ORSH, types.TINT16, types.TUINT64}: ssa.OpRsh16x64, + {ir.ORSH, types.TUINT16, types.TUINT64}: ssa.OpRsh16Ux64, + + {ir.ORSH, types.TINT32, types.TUINT8}: ssa.OpRsh32x8, + {ir.ORSH, types.TUINT32, types.TUINT8}: ssa.OpRsh32Ux8, + {ir.ORSH, types.TINT32, types.TUINT16}: ssa.OpRsh32x16, + {ir.ORSH, types.TUINT32, types.TUINT16}: ssa.OpRsh32Ux16, + {ir.ORSH, types.TINT32, types.TUINT32}: ssa.OpRsh32x32, + {ir.ORSH, types.TUINT32, types.TUINT32}: ssa.OpRsh32Ux32, + {ir.ORSH, types.TINT32, types.TUINT64}: ssa.OpRsh32x64, + {ir.ORSH, types.TUINT32, types.TUINT64}: ssa.OpRsh32Ux64, + + {ir.ORSH, types.TINT64, types.TUINT8}: ssa.OpRsh64x8, + {ir.ORSH, types.TUINT64, types.TUINT8}: ssa.OpRsh64Ux8, + {ir.ORSH, types.TINT64, types.TUINT16}: ssa.OpRsh64x16, + {ir.ORSH, types.TUINT64, types.TUINT16}: ssa.OpRsh64Ux16, + {ir.ORSH, types.TINT64, types.TUINT32}: ssa.OpRsh64x32, + {ir.ORSH, types.TUINT64, types.TUINT32}: ssa.OpRsh64Ux32, + {ir.ORSH, types.TINT64, types.TUINT64}: ssa.OpRsh64x64, + {ir.ORSH, types.TUINT64, types.TUINT64}: ssa.OpRsh64Ux64, +} + +func (s *state) ssaShiftOp(op ir.Op, t *types.Type, u *types.Type) ssa.Op { + etype1 := s.concreteEtype(t) + etype2 := s.concreteEtype(u) + x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}] + if !ok { + s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2) + } + return x +} + +func (s *state) uintptrConstant(v uint64) *ssa.Value { + if s.config.PtrSize == 4 { + return s.newValue0I(ssa.OpConst32, types.Types[types.TUINTPTR], int64(v)) + } + return s.newValue0I(ssa.OpConst64, types.Types[types.TUINTPTR], int64(v)) +} + +func (s *state) conv(n ir.Node, v *ssa.Value, ft, tt *types.Type) *ssa.Value { + if ft.IsBoolean() && tt.IsKind(types.TUINT8) { + // Bool -> uint8 is generated internally when indexing into runtime.staticbyte. + return s.newValue1(ssa.OpCvtBoolToUint8, tt, v) + } + if ft.IsInteger() && tt.IsInteger() { + var op ssa.Op + if tt.Size() == ft.Size() { + op = ssa.OpCopy + } else if tt.Size() < ft.Size() { + // truncation + switch 10*ft.Size() + tt.Size() { + case 21: + op = ssa.OpTrunc16to8 + case 41: + op = ssa.OpTrunc32to8 + case 42: + op = ssa.OpTrunc32to16 + case 81: + op = ssa.OpTrunc64to8 + case 82: + op = ssa.OpTrunc64to16 + case 84: + op = ssa.OpTrunc64to32 + default: + s.Fatalf("weird integer truncation %v -> %v", ft, tt) + } + } else if ft.IsSigned() { + // sign extension + switch 10*ft.Size() + tt.Size() { + case 12: + op = ssa.OpSignExt8to16 + case 14: + op = ssa.OpSignExt8to32 + case 18: + op = ssa.OpSignExt8to64 + case 24: + op = ssa.OpSignExt16to32 + case 28: + op = ssa.OpSignExt16to64 + case 48: + op = ssa.OpSignExt32to64 + default: + s.Fatalf("bad integer sign extension %v -> %v", ft, tt) + } + } else { + // zero extension + switch 10*ft.Size() + tt.Size() { + case 12: + op = ssa.OpZeroExt8to16 + case 14: + op = ssa.OpZeroExt8to32 + case 18: + op = ssa.OpZeroExt8to64 + case 24: + op = ssa.OpZeroExt16to32 + case 28: + op = ssa.OpZeroExt16to64 + case 48: + op = ssa.OpZeroExt32to64 + default: + s.Fatalf("weird integer sign extension %v -> %v", ft, tt) + } + } + return s.newValue1(op, tt, v) + } + + if ft.IsComplex() && tt.IsComplex() { + var op ssa.Op + if ft.Size() == tt.Size() { + switch ft.Size() { + case 8: + op = ssa.OpRound32F + case 16: + op = ssa.OpRound64F + default: + s.Fatalf("weird complex conversion %v -> %v", ft, tt) + } + } else if ft.Size() == 8 && tt.Size() == 16 { + op = ssa.OpCvt32Fto64F + } else if ft.Size() == 16 && tt.Size() == 8 { + op = ssa.OpCvt64Fto32F + } else { + s.Fatalf("weird complex conversion %v -> %v", ft, tt) + } + ftp := types.FloatForComplex(ft) + ttp := types.FloatForComplex(tt) + return s.newValue2(ssa.OpComplexMake, tt, + s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, v)), + s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, v))) + } + + if tt.IsComplex() { // and ft is not complex + // Needed for generics support - can't happen in normal Go code. + et := types.FloatForComplex(tt) + v = s.conv(n, v, ft, et) + return s.newValue2(ssa.OpComplexMake, tt, v, s.zeroVal(et)) + } + + if ft.IsFloat() || tt.IsFloat() { + conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}] + if s.config.RegSize == 4 && Arch.LinkArch.Family != sys.MIPS && !s.softFloat { + if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { + conv = conv1 + } + } + if Arch.LinkArch.Family == sys.ARM64 || Arch.LinkArch.Family == sys.Wasm || Arch.LinkArch.Family == sys.S390X || s.softFloat { + if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { + conv = conv1 + } + } + + if Arch.LinkArch.Family == sys.MIPS && !s.softFloat { + if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() { + // tt is float32 or float64, and ft is also unsigned + if tt.Size() == 4 { + return s.uint32Tofloat32(n, v, ft, tt) + } + if tt.Size() == 8 { + return s.uint32Tofloat64(n, v, ft, tt) + } + } else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() { + // ft is float32 or float64, and tt is unsigned integer + if ft.Size() == 4 { + return s.float32ToUint32(n, v, ft, tt) + } + if ft.Size() == 8 { + return s.float64ToUint32(n, v, ft, tt) + } + } + } + + if !ok { + s.Fatalf("weird float conversion %v -> %v", ft, tt) + } + op1, op2, it := conv.op1, conv.op2, conv.intermediateType + + if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid { + // normal case, not tripping over unsigned 64 + if op1 == ssa.OpCopy { + if op2 == ssa.OpCopy { + return v + } + return s.newValueOrSfCall1(op2, tt, v) + } + if op2 == ssa.OpCopy { + return s.newValueOrSfCall1(op1, tt, v) + } + return s.newValueOrSfCall1(op2, tt, s.newValueOrSfCall1(op1, types.Types[it], v)) + } + // Tricky 64-bit unsigned cases. + if ft.IsInteger() { + // tt is float32 or float64, and ft is also unsigned + if tt.Size() == 4 { + return s.uint64Tofloat32(n, v, ft, tt) + } + if tt.Size() == 8 { + return s.uint64Tofloat64(n, v, ft, tt) + } + s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt) + } + // ft is float32 or float64, and tt is unsigned integer + if ft.Size() == 4 { + return s.float32ToUint64(n, v, ft, tt) + } + if ft.Size() == 8 { + return s.float64ToUint64(n, v, ft, tt) + } + s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt) + return nil + } + + s.Fatalf("unhandled OCONV %s -> %s", ft.Kind(), tt.Kind()) + return nil +} + +// expr converts the expression n to ssa, adds it to s and returns the ssa result. +func (s *state) expr(n ir.Node) *ssa.Value { + return s.exprCheckPtr(n, true) +} + +func (s *state) exprCheckPtr(n ir.Node, checkPtrOK bool) *ssa.Value { + if ir.HasUniquePos(n) { + // ONAMEs and named OLITERALs have the line number + // of the decl, not the use. See issue 14742. + s.pushLine(n.Pos()) + defer s.popLine() + } + + s.stmtList(n.Init()) + switch n.Op() { + case ir.OBYTES2STRTMP: + n := n.(*ir.ConvExpr) + slice := s.expr(n.X) + ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice) + len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice) + return s.newValue2(ssa.OpStringMake, n.Type(), ptr, len) + case ir.OSTR2BYTESTMP: + n := n.(*ir.ConvExpr) + str := s.expr(n.X) + ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str) + if !n.NonNil() { + // We need to ensure []byte("") evaluates to []byte{}, and not []byte(nil). + // + // TODO(mdempsky): Investigate using "len != 0" instead of "ptr != nil". + cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], ptr, s.constNil(ptr.Type)) + zerobase := s.newValue1A(ssa.OpAddr, ptr.Type, ir.Syms.Zerobase, s.sb) + ptr = s.ternary(cond, ptr, zerobase) + } + len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], str) + return s.newValue3(ssa.OpSliceMake, n.Type(), ptr, len, len) + case ir.OCFUNC: + n := n.(*ir.UnaryExpr) + aux := n.X.(*ir.Name).Linksym() + // OCFUNC is used to build function values, which must + // always reference ABIInternal entry points. + if aux.ABI() != obj.ABIInternal { + s.Fatalf("expected ABIInternal: %v", aux.ABI()) + } + return s.entryNewValue1A(ssa.OpAddr, n.Type(), aux, s.sb) + case ir.ONAME: + n := n.(*ir.Name) + if n.Class == ir.PFUNC { + // "value" of a function is the address of the function's closure + sym := staticdata.FuncLinksym(n) + return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb) + } + if s.canSSA(n) { + return s.variable(n, n.Type()) + } + return s.load(n.Type(), s.addr(n)) + case ir.OLINKSYMOFFSET: + n := n.(*ir.LinksymOffsetExpr) + return s.load(n.Type(), s.addr(n)) + case ir.ONIL: + n := n.(*ir.NilExpr) + t := n.Type() + switch { + case t.IsSlice(): + return s.constSlice(t) + case t.IsInterface(): + return s.constInterface(t) + default: + return s.constNil(t) + } + case ir.OLITERAL: + switch u := n.Val(); u.Kind() { + case constant.Int: + i := ir.IntVal(n.Type(), u) + switch n.Type().Size() { + case 1: + return s.constInt8(n.Type(), int8(i)) + case 2: + return s.constInt16(n.Type(), int16(i)) + case 4: + return s.constInt32(n.Type(), int32(i)) + case 8: + return s.constInt64(n.Type(), i) + default: + s.Fatalf("bad integer size %d", n.Type().Size()) + return nil + } + case constant.String: + i := constant.StringVal(u) + if i == "" { + return s.constEmptyString(n.Type()) + } + return s.entryNewValue0A(ssa.OpConstString, n.Type(), ssa.StringToAux(i)) + case constant.Bool: + return s.constBool(constant.BoolVal(u)) + case constant.Float: + f, _ := constant.Float64Val(u) + switch n.Type().Size() { + case 4: + return s.constFloat32(n.Type(), f) + case 8: + return s.constFloat64(n.Type(), f) + default: + s.Fatalf("bad float size %d", n.Type().Size()) + return nil + } + case constant.Complex: + re, _ := constant.Float64Val(constant.Real(u)) + im, _ := constant.Float64Val(constant.Imag(u)) + switch n.Type().Size() { + case 8: + pt := types.Types[types.TFLOAT32] + return s.newValue2(ssa.OpComplexMake, n.Type(), + s.constFloat32(pt, re), + s.constFloat32(pt, im)) + case 16: + pt := types.Types[types.TFLOAT64] + return s.newValue2(ssa.OpComplexMake, n.Type(), + s.constFloat64(pt, re), + s.constFloat64(pt, im)) + default: + s.Fatalf("bad complex size %d", n.Type().Size()) + return nil + } + default: + s.Fatalf("unhandled OLITERAL %v", u.Kind()) + return nil + } + case ir.OCONVNOP: + n := n.(*ir.ConvExpr) + to := n.Type() + from := n.X.Type() + + // Assume everything will work out, so set up our return value. + // Anything interesting that happens from here is a fatal. + x := s.expr(n.X) + if to == from { + return x + } + + // Special case for not confusing GC and liveness. + // We don't want pointers accidentally classified + // as not-pointers or vice-versa because of copy + // elision. + if to.IsPtrShaped() != from.IsPtrShaped() { + return s.newValue2(ssa.OpConvert, to, x, s.mem()) + } + + v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type + + // CONVNOP closure + if to.Kind() == types.TFUNC && from.IsPtrShaped() { + return v + } + + // named <--> unnamed type or typed <--> untyped const + if from.Kind() == to.Kind() { + return v + } + + // unsafe.Pointer <--> *T + if to.IsUnsafePtr() && from.IsPtrShaped() || from.IsUnsafePtr() && to.IsPtrShaped() { + if s.checkPtrEnabled && checkPtrOK && to.IsPtr() && from.IsUnsafePtr() { + s.checkPtrAlignment(n, v, nil) + } + return v + } + + // map <--> *hmap + if to.Kind() == types.TMAP && from == types.NewPtr(reflectdata.MapType()) { + return v + } + + types.CalcSize(from) + types.CalcSize(to) + if from.Size() != to.Size() { + s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Size(), to, to.Size()) + return nil + } + if etypesign(from.Kind()) != etypesign(to.Kind()) { + s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Kind(), to, to.Kind()) + return nil + } + + if base.Flag.Cfg.Instrumenting { + // These appear to be fine, but they fail the + // integer constraint below, so okay them here. + // Sample non-integer conversion: map[string]string -> *uint8 + return v + } + + if etypesign(from.Kind()) == 0 { + s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to) + return nil + } + + // integer, same width, same sign + return v + + case ir.OCONV: + n := n.(*ir.ConvExpr) + x := s.expr(n.X) + return s.conv(n, x, n.X.Type(), n.Type()) + + case ir.ODOTTYPE: + n := n.(*ir.TypeAssertExpr) + res, _ := s.dottype(n, false) + return res + + case ir.ODYNAMICDOTTYPE: + n := n.(*ir.DynamicTypeAssertExpr) + res, _ := s.dynamicDottype(n, false) + return res + + // binary ops + case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT: + n := n.(*ir.BinaryExpr) + a := s.expr(n.X) + b := s.expr(n.Y) + if n.X.Type().IsComplex() { + pt := types.FloatForComplex(n.X.Type()) + op := s.ssaOp(ir.OEQ, pt) + r := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)) + i := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)) + c := s.newValue2(ssa.OpAndB, types.Types[types.TBOOL], r, i) + switch n.Op() { + case ir.OEQ: + return c + case ir.ONE: + return s.newValue1(ssa.OpNot, types.Types[types.TBOOL], c) + default: + s.Fatalf("ordered complex compare %v", n.Op()) + } + } + + // Convert OGE and OGT into OLE and OLT. + op := n.Op() + switch op { + case ir.OGE: + op, a, b = ir.OLE, b, a + case ir.OGT: + op, a, b = ir.OLT, b, a + } + if n.X.Type().IsFloat() { + // float comparison + return s.newValueOrSfCall2(s.ssaOp(op, n.X.Type()), types.Types[types.TBOOL], a, b) + } + // integer comparison + return s.newValue2(s.ssaOp(op, n.X.Type()), types.Types[types.TBOOL], a, b) + case ir.OMUL: + n := n.(*ir.BinaryExpr) + a := s.expr(n.X) + b := s.expr(n.Y) + if n.Type().IsComplex() { + mulop := ssa.OpMul64F + addop := ssa.OpAdd64F + subop := ssa.OpSub64F + pt := types.FloatForComplex(n.Type()) // Could be Float32 or Float64 + wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error + + areal := s.newValue1(ssa.OpComplexReal, pt, a) + breal := s.newValue1(ssa.OpComplexReal, pt, b) + aimag := s.newValue1(ssa.OpComplexImag, pt, a) + bimag := s.newValue1(ssa.OpComplexImag, pt, b) + + if pt != wt { // Widen for calculation + areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal) + breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal) + aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag) + bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag) + } + + xreal := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag)) + ximag := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, bimag), s.newValueOrSfCall2(mulop, wt, aimag, breal)) + + if pt != wt { // Narrow to store back + xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal) + ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag) + } + + return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag) + } + + if n.Type().IsFloat() { + return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b) + } + + return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b) + + case ir.ODIV: + n := n.(*ir.BinaryExpr) + a := s.expr(n.X) + b := s.expr(n.Y) + if n.Type().IsComplex() { + // TODO this is not executed because the front-end substitutes a runtime call. + // That probably ought to change; with modest optimization the widen/narrow + // conversions could all be elided in larger expression trees. + mulop := ssa.OpMul64F + addop := ssa.OpAdd64F + subop := ssa.OpSub64F + divop := ssa.OpDiv64F + pt := types.FloatForComplex(n.Type()) // Could be Float32 or Float64 + wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error + + areal := s.newValue1(ssa.OpComplexReal, pt, a) + breal := s.newValue1(ssa.OpComplexReal, pt, b) + aimag := s.newValue1(ssa.OpComplexImag, pt, a) + bimag := s.newValue1(ssa.OpComplexImag, pt, b) + + if pt != wt { // Widen for calculation + areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal) + breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal) + aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag) + bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag) + } + + denom := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, breal, breal), s.newValueOrSfCall2(mulop, wt, bimag, bimag)) + xreal := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag)) + ximag := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, aimag, breal), s.newValueOrSfCall2(mulop, wt, areal, bimag)) + + // TODO not sure if this is best done in wide precision or narrow + // Double-rounding might be an issue. + // Note that the pre-SSA implementation does the entire calculation + // in wide format, so wide is compatible. + xreal = s.newValueOrSfCall2(divop, wt, xreal, denom) + ximag = s.newValueOrSfCall2(divop, wt, ximag, denom) + + if pt != wt { // Narrow to store back + xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal) + ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag) + } + return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag) + } + if n.Type().IsFloat() { + return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b) + } + return s.intDivide(n, a, b) + case ir.OMOD: + n := n.(*ir.BinaryExpr) + a := s.expr(n.X) + b := s.expr(n.Y) + return s.intDivide(n, a, b) + case ir.OADD, ir.OSUB: + n := n.(*ir.BinaryExpr) + a := s.expr(n.X) + b := s.expr(n.Y) + if n.Type().IsComplex() { + pt := types.FloatForComplex(n.Type()) + op := s.ssaOp(n.Op(), pt) + return s.newValue2(ssa.OpComplexMake, n.Type(), + s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)), + s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))) + } + if n.Type().IsFloat() { + return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b) + } + return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b) + case ir.OAND, ir.OOR, ir.OXOR: + n := n.(*ir.BinaryExpr) + a := s.expr(n.X) + b := s.expr(n.Y) + return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b) + case ir.OANDNOT: + n := n.(*ir.BinaryExpr) + a := s.expr(n.X) + b := s.expr(n.Y) + b = s.newValue1(s.ssaOp(ir.OBITNOT, b.Type), b.Type, b) + return s.newValue2(s.ssaOp(ir.OAND, n.Type()), a.Type, a, b) + case ir.OLSH, ir.ORSH: + n := n.(*ir.BinaryExpr) + a := s.expr(n.X) + b := s.expr(n.Y) + bt := b.Type + if bt.IsSigned() { + cmp := s.newValue2(s.ssaOp(ir.OLE, bt), types.Types[types.TBOOL], s.zeroVal(bt), b) + s.check(cmp, ir.Syms.Panicshift) + bt = bt.ToUnsigned() + } + return s.newValue2(s.ssaShiftOp(n.Op(), n.Type(), bt), a.Type, a, b) + case ir.OANDAND, ir.OOROR: + // To implement OANDAND (and OOROR), we introduce a + // new temporary variable to hold the result. The + // variable is associated with the OANDAND node in the + // s.vars table (normally variables are only + // associated with ONAME nodes). We convert + // A && B + // to + // var = A + // if var { + // var = B + // } + // Using var in the subsequent block introduces the + // necessary phi variable. + n := n.(*ir.LogicalExpr) + el := s.expr(n.X) + s.vars[n] = el + + b := s.endBlock() + b.Kind = ssa.BlockIf + b.SetControl(el) + // In theory, we should set b.Likely here based on context. + // However, gc only gives us likeliness hints + // in a single place, for plain OIF statements, + // and passing around context is finnicky, so don't bother for now. + + bRight := s.f.NewBlock(ssa.BlockPlain) + bResult := s.f.NewBlock(ssa.BlockPlain) + if n.Op() == ir.OANDAND { + b.AddEdgeTo(bRight) + b.AddEdgeTo(bResult) + } else if n.Op() == ir.OOROR { + b.AddEdgeTo(bResult) + b.AddEdgeTo(bRight) + } + + s.startBlock(bRight) + er := s.expr(n.Y) + s.vars[n] = er + + b = s.endBlock() + b.AddEdgeTo(bResult) + + s.startBlock(bResult) + return s.variable(n, types.Types[types.TBOOL]) + case ir.OCOMPLEX: + n := n.(*ir.BinaryExpr) + r := s.expr(n.X) + i := s.expr(n.Y) + return s.newValue2(ssa.OpComplexMake, n.Type(), r, i) + + // unary ops + case ir.ONEG: + n := n.(*ir.UnaryExpr) + a := s.expr(n.X) + if n.Type().IsComplex() { + tp := types.FloatForComplex(n.Type()) + negop := s.ssaOp(n.Op(), tp) + return s.newValue2(ssa.OpComplexMake, n.Type(), + s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)), + s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a))) + } + return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a) + case ir.ONOT, ir.OBITNOT: + n := n.(*ir.UnaryExpr) + a := s.expr(n.X) + return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a) + case ir.OIMAG, ir.OREAL: + n := n.(*ir.UnaryExpr) + a := s.expr(n.X) + return s.newValue1(s.ssaOp(n.Op(), n.X.Type()), n.Type(), a) + case ir.OPLUS: + n := n.(*ir.UnaryExpr) + return s.expr(n.X) + + case ir.OADDR: + n := n.(*ir.AddrExpr) + return s.addr(n.X) + + case ir.ORESULT: + n := n.(*ir.ResultExpr) + if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall { + panic("Expected to see a previous call") + } + which := n.Index + if which == -1 { + panic(fmt.Errorf("ORESULT %v does not match call %s", n, s.prevCall)) + } + return s.resultOfCall(s.prevCall, which, n.Type()) + + case ir.ODEREF: + n := n.(*ir.StarExpr) + p := s.exprPtr(n.X, n.Bounded(), n.Pos()) + return s.load(n.Type(), p) + + case ir.ODOT: + n := n.(*ir.SelectorExpr) + if n.X.Op() == ir.OSTRUCTLIT { + // All literals with nonzero fields have already been + // rewritten during walk. Any that remain are just T{} + // or equivalents. Use the zero value. + if !ir.IsZero(n.X) { + s.Fatalf("literal with nonzero value in SSA: %v", n.X) + } + return s.zeroVal(n.Type()) + } + // If n is addressable and can't be represented in + // SSA, then load just the selected field. This + // prevents false memory dependencies in race/msan/asan + // instrumentation. + if ir.IsAddressable(n) && !s.canSSA(n) { + p := s.addr(n) + return s.load(n.Type(), p) + } + v := s.expr(n.X) + return s.newValue1I(ssa.OpStructSelect, n.Type(), int64(fieldIdx(n)), v) + + case ir.ODOTPTR: + n := n.(*ir.SelectorExpr) + p := s.exprPtr(n.X, n.Bounded(), n.Pos()) + p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type()), n.Offset(), p) + return s.load(n.Type(), p) + + case ir.OINDEX: + n := n.(*ir.IndexExpr) + switch { + case n.X.Type().IsString(): + if n.Bounded() && ir.IsConst(n.X, constant.String) && ir.IsConst(n.Index, constant.Int) { + // Replace "abc"[1] with 'b'. + // Delayed until now because "abc"[1] is not an ideal constant. + // See test/fixedbugs/issue11370.go. + return s.newValue0I(ssa.OpConst8, types.Types[types.TUINT8], int64(int8(ir.StringVal(n.X)[ir.Int64Val(n.Index)]))) + } + a := s.expr(n.X) + i := s.expr(n.Index) + len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], a) + i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) + ptrtyp := s.f.Config.Types.BytePtr + ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a) + if ir.IsConst(n.Index, constant.Int) { + ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, ir.Int64Val(n.Index), ptr) + } else { + ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i) + } + return s.load(types.Types[types.TUINT8], ptr) + case n.X.Type().IsSlice(): + p := s.addr(n) + return s.load(n.X.Type().Elem(), p) + case n.X.Type().IsArray(): + if ssa.CanSSA(n.X.Type()) { + // SSA can handle arrays of length at most 1. + bound := n.X.Type().NumElem() + a := s.expr(n.X) + i := s.expr(n.Index) + if bound == 0 { + // Bounds check will never succeed. Might as well + // use constants for the bounds check. + z := s.constInt(types.Types[types.TINT], 0) + s.boundsCheck(z, z, ssa.BoundsIndex, false) + // The return value won't be live, return junk. + // But not quite junk, in case bounds checks are turned off. See issue 48092. + return s.zeroVal(n.Type()) + } + len := s.constInt(types.Types[types.TINT], bound) + s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) // checks i == 0 + return s.newValue1I(ssa.OpArraySelect, n.Type(), 0, a) + } + p := s.addr(n) + return s.load(n.X.Type().Elem(), p) + default: + s.Fatalf("bad type for index %v", n.X.Type()) + return nil + } + + case ir.OLEN, ir.OCAP: + n := n.(*ir.UnaryExpr) + switch { + case n.X.Type().IsSlice(): + op := ssa.OpSliceLen + if n.Op() == ir.OCAP { + op = ssa.OpSliceCap + } + return s.newValue1(op, types.Types[types.TINT], s.expr(n.X)) + case n.X.Type().IsString(): // string; not reachable for OCAP + return s.newValue1(ssa.OpStringLen, types.Types[types.TINT], s.expr(n.X)) + case n.X.Type().IsMap(), n.X.Type().IsChan(): + return s.referenceTypeBuiltin(n, s.expr(n.X)) + default: // array + return s.constInt(types.Types[types.TINT], n.X.Type().NumElem()) + } + + case ir.OSPTR: + n := n.(*ir.UnaryExpr) + a := s.expr(n.X) + if n.X.Type().IsSlice() { + if n.Bounded() { + return s.newValue1(ssa.OpSlicePtr, n.Type(), a) + } + return s.newValue1(ssa.OpSlicePtrUnchecked, n.Type(), a) + } else { + return s.newValue1(ssa.OpStringPtr, n.Type(), a) + } + + case ir.OITAB: + n := n.(*ir.UnaryExpr) + a := s.expr(n.X) + return s.newValue1(ssa.OpITab, n.Type(), a) + + case ir.OIDATA: + n := n.(*ir.UnaryExpr) + a := s.expr(n.X) + return s.newValue1(ssa.OpIData, n.Type(), a) + + case ir.OMAKEFACE: + n := n.(*ir.BinaryExpr) + tab := s.expr(n.X) + data := s.expr(n.Y) + return s.newValue2(ssa.OpIMake, n.Type(), tab, data) + + case ir.OSLICEHEADER: + n := n.(*ir.SliceHeaderExpr) + p := s.expr(n.Ptr) + l := s.expr(n.Len) + c := s.expr(n.Cap) + return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c) + + case ir.OSTRINGHEADER: + n := n.(*ir.StringHeaderExpr) + p := s.expr(n.Ptr) + l := s.expr(n.Len) + return s.newValue2(ssa.OpStringMake, n.Type(), p, l) + + case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR: + n := n.(*ir.SliceExpr) + check := s.checkPtrEnabled && n.Op() == ir.OSLICE3ARR && n.X.Op() == ir.OCONVNOP && n.X.(*ir.ConvExpr).X.Type().IsUnsafePtr() + v := s.exprCheckPtr(n.X, !check) + var i, j, k *ssa.Value + if n.Low != nil { + i = s.expr(n.Low) + } + if n.High != nil { + j = s.expr(n.High) + } + if n.Max != nil { + k = s.expr(n.Max) + } + p, l, c := s.slice(v, i, j, k, n.Bounded()) + if check { + // Emit checkptr instrumentation after bound check to prevent false positive, see #46938. + s.checkPtrAlignment(n.X.(*ir.ConvExpr), v, s.conv(n.Max, k, k.Type, types.Types[types.TUINTPTR])) + } + return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c) + + case ir.OSLICESTR: + n := n.(*ir.SliceExpr) + v := s.expr(n.X) + var i, j *ssa.Value + if n.Low != nil { + i = s.expr(n.Low) + } + if n.High != nil { + j = s.expr(n.High) + } + p, l, _ := s.slice(v, i, j, nil, n.Bounded()) + return s.newValue2(ssa.OpStringMake, n.Type(), p, l) + + case ir.OSLICE2ARRPTR: + // if arrlen > slice.len { + // panic(...) + // } + // slice.ptr + n := n.(*ir.ConvExpr) + v := s.expr(n.X) + nelem := n.Type().Elem().NumElem() + arrlen := s.constInt(types.Types[types.TINT], nelem) + cap := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], v) + s.boundsCheck(arrlen, cap, ssa.BoundsConvert, false) + op := ssa.OpSlicePtr + if nelem == 0 { + op = ssa.OpSlicePtrUnchecked + } + return s.newValue1(op, n.Type(), v) + + case ir.OCALLFUNC: + n := n.(*ir.CallExpr) + if ir.IsIntrinsicCall(n) { + return s.intrinsicCall(n) + } + fallthrough + + case ir.OCALLINTER: + n := n.(*ir.CallExpr) + return s.callResult(n, callNormal) + + case ir.OGETG: + n := n.(*ir.CallExpr) + return s.newValue1(ssa.OpGetG, n.Type(), s.mem()) + + case ir.OGETCALLERPC: + n := n.(*ir.CallExpr) + return s.newValue0(ssa.OpGetCallerPC, n.Type()) + + case ir.OGETCALLERSP: + n := n.(*ir.CallExpr) + return s.newValue1(ssa.OpGetCallerSP, n.Type(), s.mem()) + + case ir.OAPPEND: + return s.append(n.(*ir.CallExpr), false) + + case ir.OMIN, ir.OMAX: + return s.minMax(n.(*ir.CallExpr)) + + case ir.OSTRUCTLIT, ir.OARRAYLIT: + // All literals with nonzero fields have already been + // rewritten during walk. Any that remain are just T{} + // or equivalents. Use the zero value. + n := n.(*ir.CompLitExpr) + if !ir.IsZero(n) { + s.Fatalf("literal with nonzero value in SSA: %v", n) + } + return s.zeroVal(n.Type()) + + case ir.ONEW: + n := n.(*ir.UnaryExpr) + var rtype *ssa.Value + if x, ok := n.X.(*ir.DynamicType); ok && x.Op() == ir.ODYNAMICTYPE { + rtype = s.expr(x.RType) + } + return s.newObject(n.Type().Elem(), rtype) + + case ir.OUNSAFEADD: + n := n.(*ir.BinaryExpr) + ptr := s.expr(n.X) + len := s.expr(n.Y) + + // Force len to uintptr to prevent misuse of garbage bits in the + // upper part of the register (#48536). + len = s.conv(n, len, len.Type, types.Types[types.TUINTPTR]) + + return s.newValue2(ssa.OpAddPtr, n.Type(), ptr, len) + + default: + s.Fatalf("unhandled expr %v", n.Op()) + return nil + } +} + +func (s *state) resultOfCall(c *ssa.Value, which int64, t *types.Type) *ssa.Value { + aux := c.Aux.(*ssa.AuxCall) + pa := aux.ParamAssignmentForResult(which) + // TODO(register args) determine if in-memory TypeOK is better loaded early from SelectNAddr or later when SelectN is expanded. + // SelectN is better for pattern-matching and possible call-aware analysis we might want to do in the future. + if len(pa.Registers) == 0 && !ssa.CanSSA(t) { + addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), which, c) + return s.rawLoad(t, addr) + } + return s.newValue1I(ssa.OpSelectN, t, which, c) +} + +func (s *state) resultAddrOfCall(c *ssa.Value, which int64, t *types.Type) *ssa.Value { + aux := c.Aux.(*ssa.AuxCall) + pa := aux.ParamAssignmentForResult(which) + if len(pa.Registers) == 0 { + return s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), which, c) + } + _, addr := s.temp(c.Pos, t) + rval := s.newValue1I(ssa.OpSelectN, t, which, c) + s.vars[memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, addr, rval, s.mem(), false) + return addr +} + +// append converts an OAPPEND node to SSA. +// If inplace is false, it converts the OAPPEND expression n to an ssa.Value, +// adds it to s, and returns the Value. +// If inplace is true, it writes the result of the OAPPEND expression n +// back to the slice being appended to, and returns nil. +// inplace MUST be set to false if the slice can be SSA'd. +// Note: this code only handles fixed-count appends. Dotdotdot appends +// have already been rewritten at this point (by walk). +func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value { + // If inplace is false, process as expression "append(s, e1, e2, e3)": + // + // ptr, len, cap := s + // len += 3 + // if uint(len) > uint(cap) { + // ptr, len, cap = growslice(ptr, len, cap, 3, typ) + // Note that len is unmodified by growslice. + // } + // // with write barriers, if needed: + // *(ptr+(len-3)) = e1 + // *(ptr+(len-2)) = e2 + // *(ptr+(len-1)) = e3 + // return makeslice(ptr, len, cap) + // + // + // If inplace is true, process as statement "s = append(s, e1, e2, e3)": + // + // a := &s + // ptr, len, cap := s + // len += 3 + // if uint(len) > uint(cap) { + // ptr, len, cap = growslice(ptr, len, cap, 3, typ) + // vardef(a) // if necessary, advise liveness we are writing a new a + // *a.cap = cap // write before ptr to avoid a spill + // *a.ptr = ptr // with write barrier + // } + // *a.len = len + // // with write barriers, if needed: + // *(ptr+(len-3)) = e1 + // *(ptr+(len-2)) = e2 + // *(ptr+(len-1)) = e3 + + et := n.Type().Elem() + pt := types.NewPtr(et) + + // Evaluate slice + sn := n.Args[0] // the slice node is the first in the list + var slice, addr *ssa.Value + if inplace { + addr = s.addr(sn) + slice = s.load(n.Type(), addr) + } else { + slice = s.expr(sn) + } + + // Allocate new blocks + grow := s.f.NewBlock(ssa.BlockPlain) + assign := s.f.NewBlock(ssa.BlockPlain) + + // Decomposse input slice. + p := s.newValue1(ssa.OpSlicePtr, pt, slice) + l := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice) + c := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], slice) + + // Add number of new elements to length. + nargs := s.constInt(types.Types[types.TINT], int64(len(n.Args)-1)) + l = s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], l, nargs) + + // Decide if we need to grow + cmp := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT]), types.Types[types.TBOOL], c, l) + + // Record values of ptr/len/cap before branch. + s.vars[ptrVar] = p + s.vars[lenVar] = l + if !inplace { + s.vars[capVar] = c + } + + b := s.endBlock() + b.Kind = ssa.BlockIf + b.Likely = ssa.BranchUnlikely + b.SetControl(cmp) + b.AddEdgeTo(grow) + b.AddEdgeTo(assign) + + // Call growslice + s.startBlock(grow) + taddr := s.expr(n.Fun) + r := s.rtcall(ir.Syms.Growslice, true, []*types.Type{n.Type()}, p, l, c, nargs, taddr) + + // Decompose output slice + p = s.newValue1(ssa.OpSlicePtr, pt, r[0]) + l = s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], r[0]) + c = s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], r[0]) + + s.vars[ptrVar] = p + s.vars[lenVar] = l + s.vars[capVar] = c + if inplace { + if sn.Op() == ir.ONAME { + sn := sn.(*ir.Name) + if sn.Class != ir.PEXTERN { + // Tell liveness we're about to build a new slice + s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem()) + } + } + capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, types.SliceCapOffset, addr) + s.store(types.Types[types.TINT], capaddr, c) + s.store(pt, addr, p) + } + + b = s.endBlock() + b.AddEdgeTo(assign) + + // assign new elements to slots + s.startBlock(assign) + p = s.variable(ptrVar, pt) // generates phi for ptr + l = s.variable(lenVar, types.Types[types.TINT]) // generates phi for len + if !inplace { + c = s.variable(capVar, types.Types[types.TINT]) // generates phi for cap + } + + if inplace { + // Update length in place. + // We have to wait until here to make sure growslice succeeded. + lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, types.SliceLenOffset, addr) + s.store(types.Types[types.TINT], lenaddr, l) + } + + // Evaluate args + type argRec struct { + // if store is true, we're appending the value v. If false, we're appending the + // value at *v. + v *ssa.Value + store bool + } + args := make([]argRec, 0, len(n.Args[1:])) + for _, n := range n.Args[1:] { + if ssa.CanSSA(n.Type()) { + args = append(args, argRec{v: s.expr(n), store: true}) + } else { + v := s.addr(n) + args = append(args, argRec{v: v}) + } + } + + // Write args into slice. + oldLen := s.newValue2(s.ssaOp(ir.OSUB, types.Types[types.TINT]), types.Types[types.TINT], l, nargs) + p2 := s.newValue2(ssa.OpPtrIndex, pt, p, oldLen) + for i, arg := range args { + addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[types.TINT], int64(i))) + if arg.store { + s.storeType(et, addr, arg.v, 0, true) + } else { + s.move(et, addr, arg.v) + } + } + + // The following deletions have no practical effect at this time + // because state.vars has been reset by the preceding state.startBlock. + // They only enforce the fact that these variables are no longer need in + // the current scope. + delete(s.vars, ptrVar) + delete(s.vars, lenVar) + if !inplace { + delete(s.vars, capVar) + } + + // make result + if inplace { + return nil + } + return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c) +} + +// minMax converts an OMIN/OMAX builtin call into SSA. +func (s *state) minMax(n *ir.CallExpr) *ssa.Value { + // The OMIN/OMAX builtin is variadic, but its semantics are + // equivalent to left-folding a binary min/max operation across the + // arguments list. + fold := func(op func(x, a *ssa.Value) *ssa.Value) *ssa.Value { + x := s.expr(n.Args[0]) + for _, arg := range n.Args[1:] { + x = op(x, s.expr(arg)) + } + return x + } + + typ := n.Type() + + if typ.IsFloat() || typ.IsString() { + // min/max semantics for floats are tricky because of NaNs and + // negative zero. Some architectures have instructions which + // we can use to generate the right result. For others we must + // call into the runtime instead. + // + // Strings are conceptually simpler, but we currently desugar + // string comparisons during walk, not ssagen. + + if typ.IsFloat() { + switch Arch.LinkArch.Family { + case sys.AMD64, sys.ARM64: + var op ssa.Op + switch { + case typ.Kind() == types.TFLOAT64 && n.Op() == ir.OMIN: + op = ssa.OpMin64F + case typ.Kind() == types.TFLOAT64 && n.Op() == ir.OMAX: + op = ssa.OpMax64F + case typ.Kind() == types.TFLOAT32 && n.Op() == ir.OMIN: + op = ssa.OpMin32F + case typ.Kind() == types.TFLOAT32 && n.Op() == ir.OMAX: + op = ssa.OpMax32F + } + return fold(func(x, a *ssa.Value) *ssa.Value { + return s.newValue2(op, typ, x, a) + }) + } + } + var name string + switch typ.Kind() { + case types.TFLOAT32: + switch n.Op() { + case ir.OMIN: + name = "fmin32" + case ir.OMAX: + name = "fmax32" + } + case types.TFLOAT64: + switch n.Op() { + case ir.OMIN: + name = "fmin64" + case ir.OMAX: + name = "fmax64" + } + case types.TSTRING: + switch n.Op() { + case ir.OMIN: + name = "strmin" + case ir.OMAX: + name = "strmax" + } + } + fn := typecheck.LookupRuntimeFunc(name) + + return fold(func(x, a *ssa.Value) *ssa.Value { + return s.rtcall(fn, true, []*types.Type{typ}, x, a)[0] + }) + } + + lt := s.ssaOp(ir.OLT, typ) + + return fold(func(x, a *ssa.Value) *ssa.Value { + switch n.Op() { + case ir.OMIN: + // a < x ? a : x + return s.ternary(s.newValue2(lt, types.Types[types.TBOOL], a, x), a, x) + case ir.OMAX: + // x < a ? a : x + return s.ternary(s.newValue2(lt, types.Types[types.TBOOL], x, a), a, x) + } + panic("unreachable") + }) +} + +// ternary emits code to evaluate cond ? x : y. +func (s *state) ternary(cond, x, y *ssa.Value) *ssa.Value { + // Note that we need a new ternaryVar each time (unlike okVar where we can + // reuse the variable) because it might have a different type every time. + ternaryVar := ssaMarker("ternary") + + bThen := s.f.NewBlock(ssa.BlockPlain) + bElse := s.f.NewBlock(ssa.BlockPlain) + bEnd := s.f.NewBlock(ssa.BlockPlain) + + b := s.endBlock() + b.Kind = ssa.BlockIf + b.SetControl(cond) + b.AddEdgeTo(bThen) + b.AddEdgeTo(bElse) + + s.startBlock(bThen) + s.vars[ternaryVar] = x + s.endBlock().AddEdgeTo(bEnd) + + s.startBlock(bElse) + s.vars[ternaryVar] = y + s.endBlock().AddEdgeTo(bEnd) + + s.startBlock(bEnd) + r := s.variable(ternaryVar, x.Type) + delete(s.vars, ternaryVar) + return r +} + +// condBranch evaluates the boolean expression cond and branches to yes +// if cond is true and no if cond is false. +// This function is intended to handle && and || better than just calling +// s.expr(cond) and branching on the result. +func (s *state) condBranch(cond ir.Node, yes, no *ssa.Block, likely int8) { + switch cond.Op() { + case ir.OANDAND: + cond := cond.(*ir.LogicalExpr) + mid := s.f.NewBlock(ssa.BlockPlain) + s.stmtList(cond.Init()) + s.condBranch(cond.X, mid, no, max8(likely, 0)) + s.startBlock(mid) + s.condBranch(cond.Y, yes, no, likely) + return + // Note: if likely==1, then both recursive calls pass 1. + // If likely==-1, then we don't have enough information to decide + // whether the first branch is likely or not. So we pass 0 for + // the likeliness of the first branch. + // TODO: have the frontend give us branch prediction hints for + // OANDAND and OOROR nodes (if it ever has such info). + case ir.OOROR: + cond := cond.(*ir.LogicalExpr) + mid := s.f.NewBlock(ssa.BlockPlain) + s.stmtList(cond.Init()) + s.condBranch(cond.X, yes, mid, min8(likely, 0)) + s.startBlock(mid) + s.condBranch(cond.Y, yes, no, likely) + return + // Note: if likely==-1, then both recursive calls pass -1. + // If likely==1, then we don't have enough info to decide + // the likelihood of the first branch. + case ir.ONOT: + cond := cond.(*ir.UnaryExpr) + s.stmtList(cond.Init()) + s.condBranch(cond.X, no, yes, -likely) + return + case ir.OCONVNOP: + cond := cond.(*ir.ConvExpr) + s.stmtList(cond.Init()) + s.condBranch(cond.X, yes, no, likely) + return + } + c := s.expr(cond) + b := s.endBlock() + b.Kind = ssa.BlockIf + b.SetControl(c) + b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness + b.AddEdgeTo(yes) + b.AddEdgeTo(no) +} + +type skipMask uint8 + +const ( + skipPtr skipMask = 1 << iota + skipLen + skipCap +) + +// assign does left = right. +// Right has already been evaluated to ssa, left has not. +// If deref is true, then we do left = *right instead (and right has already been nil-checked). +// If deref is true and right == nil, just do left = 0. +// skip indicates assignments (at the top level) that can be avoided. +// mayOverlap indicates whether left&right might partially overlap in memory. Default is false. +func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask) { + s.assignWhichMayOverlap(left, right, deref, skip, false) +} +func (s *state) assignWhichMayOverlap(left ir.Node, right *ssa.Value, deref bool, skip skipMask, mayOverlap bool) { + if left.Op() == ir.ONAME && ir.IsBlank(left) { + return + } + t := left.Type() + types.CalcSize(t) + if s.canSSA(left) { + if deref { + s.Fatalf("can SSA LHS %v but not RHS %s", left, right) + } + if left.Op() == ir.ODOT { + // We're assigning to a field of an ssa-able value. + // We need to build a new structure with the new value for the + // field we're assigning and the old values for the other fields. + // For instance: + // type T struct {a, b, c int} + // var T x + // x.b = 5 + // For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c} + + // Grab information about the structure type. + left := left.(*ir.SelectorExpr) + t := left.X.Type() + nf := t.NumFields() + idx := fieldIdx(left) + + // Grab old value of structure. + old := s.expr(left.X) + + // Make new structure. + new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t) + + // Add fields as args. + for i := 0; i < nf; i++ { + if i == idx { + new.AddArg(right) + } else { + new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old)) + } + } + + // Recursively assign the new value we've made to the base of the dot op. + s.assign(left.X, new, false, 0) + // TODO: do we need to update named values here? + return + } + if left.Op() == ir.OINDEX && left.(*ir.IndexExpr).X.Type().IsArray() { + left := left.(*ir.IndexExpr) + s.pushLine(left.Pos()) + defer s.popLine() + // We're assigning to an element of an ssa-able array. + // a[i] = v + t := left.X.Type() + n := t.NumElem() + + i := s.expr(left.Index) // index + if n == 0 { + // The bounds check must fail. Might as well + // ignore the actual index and just use zeros. + z := s.constInt(types.Types[types.TINT], 0) + s.boundsCheck(z, z, ssa.BoundsIndex, false) + return + } + if n != 1 { + s.Fatalf("assigning to non-1-length array") + } + // Rewrite to a = [1]{v} + len := s.constInt(types.Types[types.TINT], 1) + s.boundsCheck(i, len, ssa.BoundsIndex, false) // checks i == 0 + v := s.newValue1(ssa.OpArrayMake1, t, right) + s.assign(left.X, v, false, 0) + return + } + left := left.(*ir.Name) + // Update variable assignment. + s.vars[left] = right + s.addNamedValue(left, right) + return + } + + // If this assignment clobbers an entire local variable, then emit + // OpVarDef so liveness analysis knows the variable is redefined. + if base, ok := clobberBase(left).(*ir.Name); ok && base.OnStack() && skip == 0 && t.HasPointers() { + s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !ir.IsAutoTmp(base)) + } + + // Left is not ssa-able. Compute its address. + addr := s.addr(left) + if ir.IsReflectHeaderDataField(left) { + // Package unsafe's documentation says storing pointers into + // reflect.SliceHeader and reflect.StringHeader's Data fields + // is valid, even though they have type uintptr (#19168). + // Mark it pointer type to signal the writebarrier pass to + // insert a write barrier. + t = types.Types[types.TUNSAFEPTR] + } + if deref { + // Treat as a mem->mem move. + if right == nil { + s.zero(t, addr) + } else { + s.moveWhichMayOverlap(t, addr, right, mayOverlap) + } + return + } + // Treat as a store. + s.storeType(t, addr, right, skip, !ir.IsAutoTmp(left)) +} + +// zeroVal returns the zero value for type t. +func (s *state) zeroVal(t *types.Type) *ssa.Value { + switch { + case t.IsInteger(): + switch t.Size() { + case 1: + return s.constInt8(t, 0) + case 2: + return s.constInt16(t, 0) + case 4: + return s.constInt32(t, 0) + case 8: + return s.constInt64(t, 0) + default: + s.Fatalf("bad sized integer type %v", t) + } + case t.IsFloat(): + switch t.Size() { + case 4: + return s.constFloat32(t, 0) + case 8: + return s.constFloat64(t, 0) + default: + s.Fatalf("bad sized float type %v", t) + } + case t.IsComplex(): + switch t.Size() { + case 8: + z := s.constFloat32(types.Types[types.TFLOAT32], 0) + return s.entryNewValue2(ssa.OpComplexMake, t, z, z) + case 16: + z := s.constFloat64(types.Types[types.TFLOAT64], 0) + return s.entryNewValue2(ssa.OpComplexMake, t, z, z) + default: + s.Fatalf("bad sized complex type %v", t) + } + + case t.IsString(): + return s.constEmptyString(t) + case t.IsPtrShaped(): + return s.constNil(t) + case t.IsBoolean(): + return s.constBool(false) + case t.IsInterface(): + return s.constInterface(t) + case t.IsSlice(): + return s.constSlice(t) + case t.IsStruct(): + n := t.NumFields() + v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t) + for i := 0; i < n; i++ { + v.AddArg(s.zeroVal(t.FieldType(i))) + } + return v + case t.IsArray(): + switch t.NumElem() { + case 0: + return s.entryNewValue0(ssa.OpArrayMake0, t) + case 1: + return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem())) + } + } + s.Fatalf("zero for type %v not implemented", t) + return nil +} + +type callKind int8 + +const ( + callNormal callKind = iota + callDefer + callDeferStack + callGo + callTail +) + +type sfRtCallDef struct { + rtfn *obj.LSym + rtype types.Kind +} + +var softFloatOps map[ssa.Op]sfRtCallDef + +func softfloatInit() { + // Some of these operations get transformed by sfcall. + softFloatOps = map[ssa.Op]sfRtCallDef{ + ssa.OpAdd32F: {typecheck.LookupRuntimeFunc("fadd32"), types.TFLOAT32}, + ssa.OpAdd64F: {typecheck.LookupRuntimeFunc("fadd64"), types.TFLOAT64}, + ssa.OpSub32F: {typecheck.LookupRuntimeFunc("fadd32"), types.TFLOAT32}, + ssa.OpSub64F: {typecheck.LookupRuntimeFunc("fadd64"), types.TFLOAT64}, + ssa.OpMul32F: {typecheck.LookupRuntimeFunc("fmul32"), types.TFLOAT32}, + ssa.OpMul64F: {typecheck.LookupRuntimeFunc("fmul64"), types.TFLOAT64}, + ssa.OpDiv32F: {typecheck.LookupRuntimeFunc("fdiv32"), types.TFLOAT32}, + ssa.OpDiv64F: {typecheck.LookupRuntimeFunc("fdiv64"), types.TFLOAT64}, + + ssa.OpEq64F: {typecheck.LookupRuntimeFunc("feq64"), types.TBOOL}, + ssa.OpEq32F: {typecheck.LookupRuntimeFunc("feq32"), types.TBOOL}, + ssa.OpNeq64F: {typecheck.LookupRuntimeFunc("feq64"), types.TBOOL}, + ssa.OpNeq32F: {typecheck.LookupRuntimeFunc("feq32"), types.TBOOL}, + ssa.OpLess64F: {typecheck.LookupRuntimeFunc("fgt64"), types.TBOOL}, + ssa.OpLess32F: {typecheck.LookupRuntimeFunc("fgt32"), types.TBOOL}, + ssa.OpLeq64F: {typecheck.LookupRuntimeFunc("fge64"), types.TBOOL}, + ssa.OpLeq32F: {typecheck.LookupRuntimeFunc("fge32"), types.TBOOL}, + + ssa.OpCvt32to32F: {typecheck.LookupRuntimeFunc("fint32to32"), types.TFLOAT32}, + ssa.OpCvt32Fto32: {typecheck.LookupRuntimeFunc("f32toint32"), types.TINT32}, + ssa.OpCvt64to32F: {typecheck.LookupRuntimeFunc("fint64to32"), types.TFLOAT32}, + ssa.OpCvt32Fto64: {typecheck.LookupRuntimeFunc("f32toint64"), types.TINT64}, + ssa.OpCvt64Uto32F: {typecheck.LookupRuntimeFunc("fuint64to32"), types.TFLOAT32}, + ssa.OpCvt32Fto64U: {typecheck.LookupRuntimeFunc("f32touint64"), types.TUINT64}, + ssa.OpCvt32to64F: {typecheck.LookupRuntimeFunc("fint32to64"), types.TFLOAT64}, + ssa.OpCvt64Fto32: {typecheck.LookupRuntimeFunc("f64toint32"), types.TINT32}, + ssa.OpCvt64to64F: {typecheck.LookupRuntimeFunc("fint64to64"), types.TFLOAT64}, + ssa.OpCvt64Fto64: {typecheck.LookupRuntimeFunc("f64toint64"), types.TINT64}, + ssa.OpCvt64Uto64F: {typecheck.LookupRuntimeFunc("fuint64to64"), types.TFLOAT64}, + ssa.OpCvt64Fto64U: {typecheck.LookupRuntimeFunc("f64touint64"), types.TUINT64}, + ssa.OpCvt32Fto64F: {typecheck.LookupRuntimeFunc("f32to64"), types.TFLOAT64}, + ssa.OpCvt64Fto32F: {typecheck.LookupRuntimeFunc("f64to32"), types.TFLOAT32}, + } +} + +// TODO: do not emit sfcall if operation can be optimized to constant in later +// opt phase +func (s *state) sfcall(op ssa.Op, args ...*ssa.Value) (*ssa.Value, bool) { + f2i := func(t *types.Type) *types.Type { + switch t.Kind() { + case types.TFLOAT32: + return types.Types[types.TUINT32] + case types.TFLOAT64: + return types.Types[types.TUINT64] + } + return t + } + + if callDef, ok := softFloatOps[op]; ok { + switch op { + case ssa.OpLess32F, + ssa.OpLess64F, + ssa.OpLeq32F, + ssa.OpLeq64F: + args[0], args[1] = args[1], args[0] + case ssa.OpSub32F, + ssa.OpSub64F: + args[1] = s.newValue1(s.ssaOp(ir.ONEG, types.Types[callDef.rtype]), args[1].Type, args[1]) + } + + // runtime functions take uints for floats and returns uints. + // Convert to uints so we use the right calling convention. + for i, a := range args { + if a.Type.IsFloat() { + args[i] = s.newValue1(ssa.OpCopy, f2i(a.Type), a) + } + } + + rt := types.Types[callDef.rtype] + result := s.rtcall(callDef.rtfn, true, []*types.Type{f2i(rt)}, args...)[0] + if rt.IsFloat() { + result = s.newValue1(ssa.OpCopy, rt, result) + } + if op == ssa.OpNeq32F || op == ssa.OpNeq64F { + result = s.newValue1(ssa.OpNot, result.Type, result) + } + return result, true + } + return nil, false +} + +var intrinsics map[intrinsicKey]intrinsicBuilder + +// An intrinsicBuilder converts a call node n into an ssa value that +// implements that call as an intrinsic. args is a list of arguments to the func. +type intrinsicBuilder func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value + +type intrinsicKey struct { + arch *sys.Arch + pkg string + fn string +} + +func InitTables() { + intrinsics = map[intrinsicKey]intrinsicBuilder{} + + var all []*sys.Arch + var p4 []*sys.Arch + var p8 []*sys.Arch + var lwatomics []*sys.Arch + for _, a := range &sys.Archs { + all = append(all, a) + if a.PtrSize == 4 { + p4 = append(p4, a) + } else { + p8 = append(p8, a) + } + if a.Family != sys.PPC64 { + lwatomics = append(lwatomics, a) + } + } + + // add adds the intrinsic b for pkg.fn for the given list of architectures. + add := func(pkg, fn string, b intrinsicBuilder, archs ...*sys.Arch) { + for _, a := range archs { + intrinsics[intrinsicKey{a, pkg, fn}] = b + } + } + // addF does the same as add but operates on architecture families. + addF := func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily) { + m := 0 + for _, f := range archFamilies { + if f >= 32 { + panic("too many architecture families") + } + m |= 1 << uint(f) + } + for _, a := range all { + if m>>uint(a.Family)&1 != 0 { + intrinsics[intrinsicKey{a, pkg, fn}] = b + } + } + } + // alias defines pkg.fn = pkg2.fn2 for all architectures in archs for which pkg2.fn2 exists. + alias := func(pkg, fn, pkg2, fn2 string, archs ...*sys.Arch) { + aliased := false + for _, a := range archs { + if b, ok := intrinsics[intrinsicKey{a, pkg2, fn2}]; ok { + intrinsics[intrinsicKey{a, pkg, fn}] = b + aliased = true + } + } + if !aliased { + panic(fmt.Sprintf("attempted to alias undefined intrinsic: %s.%s", pkg, fn)) + } + } + + /******** runtime ********/ + if !base.Flag.Cfg.Instrumenting { + add("runtime", "slicebytetostringtmp", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + // Compiler frontend optimizations emit OBYTES2STRTMP nodes + // for the backend instead of slicebytetostringtmp calls + // when not instrumenting. + return s.newValue2(ssa.OpStringMake, n.Type(), args[0], args[1]) + }, + all...) + } + addF("runtime/internal/math", "MulUintptr", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + if s.config.PtrSize == 4 { + return s.newValue2(ssa.OpMul32uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1]) + } + return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1]) + }, + sys.AMD64, sys.I386, sys.Loong64, sys.MIPS64, sys.RISCV64, sys.ARM64) + add("runtime", "KeepAlive", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0]) + s.vars[memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem()) + return nil + }, + all...) + add("runtime", "getclosureptr", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr) + }, + all...) + + add("runtime", "getcallerpc", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr) + }, + all...) + + add("runtime", "getcallersp", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr, s.mem()) + }, + all...) + + addF("runtime", "publicationBarrier", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + s.vars[memVar] = s.newValue1(ssa.OpPubBarrier, types.TypeMem, s.mem()) + return nil + }, + sys.ARM64, sys.PPC64, sys.RISCV64) + + brev_arch := []sys.ArchFamily{sys.AMD64, sys.I386, sys.ARM64, sys.ARM, sys.S390X} + if buildcfg.GOPPC64 >= 10 { + // Use only on Power10 as the new byte reverse instructions that Power10 provide + // make it worthwhile as an intrinsic + brev_arch = append(brev_arch, sys.PPC64) + } + /******** runtime/internal/sys ********/ + addF("runtime/internal/sys", "Bswap32", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpBswap32, types.Types[types.TUINT32], args[0]) + }, + brev_arch...) + addF("runtime/internal/sys", "Bswap64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpBswap64, types.Types[types.TUINT64], args[0]) + }, + brev_arch...) + + /****** Prefetch ******/ + makePrefetchFunc := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + s.vars[memVar] = s.newValue2(op, types.TypeMem, args[0], s.mem()) + return nil + } + } + + // Make Prefetch intrinsics for supported platforms + // On the unsupported platforms stub function will be eliminated + addF("runtime/internal/sys", "Prefetch", makePrefetchFunc(ssa.OpPrefetchCache), + sys.AMD64, sys.ARM64, sys.PPC64) + addF("runtime/internal/sys", "PrefetchStreamed", makePrefetchFunc(ssa.OpPrefetchCacheStreamed), + sys.AMD64, sys.ARM64, sys.PPC64) + + /******** runtime/internal/atomic ********/ + addF("runtime/internal/atomic", "Load", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) + }, + sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Load8", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[types.TUINT8], types.TypeMem), args[0], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT8], v) + }, + sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Load64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v) + }, + sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "LoadAcq", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) + }, + sys.PPC64, sys.S390X) + addF("runtime/internal/atomic", "LoadAcq64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v) + }, + sys.PPC64) + addF("runtime/internal/atomic", "Loadp", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v) + }, + sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + + addF("runtime/internal/atomic", "Store", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + s.vars[memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem()) + return nil + }, + sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Store8", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + s.vars[memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem()) + return nil + }, + sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Store64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + s.vars[memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem()) + return nil + }, + sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "StorepNoWB", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + s.vars[memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem()) + return nil + }, + sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "StoreRel", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem()) + return nil + }, + sys.PPC64, sys.S390X) + addF("runtime/internal/atomic", "StoreRel64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem()) + return nil + }, + sys.PPC64) + + addF("runtime/internal/atomic", "Xchg", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) + }, + sys.AMD64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Xchg64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v) + }, + sys.AMD64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + + type atomicOpEmitter func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) + + makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ, rtyp types.Kind, emit atomicOpEmitter) intrinsicBuilder { + + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + // Target Atomic feature is identified by dynamic detection + addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), ir.Syms.ARM64HasATOMICS, s.sb) + v := s.load(types.Types[types.TBOOL], addr) + b := s.endBlock() + b.Kind = ssa.BlockIf + b.SetControl(v) + bTrue := s.f.NewBlock(ssa.BlockPlain) + bFalse := s.f.NewBlock(ssa.BlockPlain) + bEnd := s.f.NewBlock(ssa.BlockPlain) + b.AddEdgeTo(bTrue) + b.AddEdgeTo(bFalse) + b.Likely = ssa.BranchLikely + + // We have atomic instructions - use it directly. + s.startBlock(bTrue) + emit(s, n, args, op1, typ) + s.endBlock().AddEdgeTo(bEnd) + + // Use original instruction sequence. + s.startBlock(bFalse) + emit(s, n, args, op0, typ) + s.endBlock().AddEdgeTo(bEnd) + + // Merge results. + s.startBlock(bEnd) + if rtyp == types.TNIL { + return nil + } else { + return s.variable(n, types.Types[rtyp]) + } + } + } + + atomicXchgXaddEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) { + v := s.newValue3(op, types.NewTuple(types.Types[typ], types.TypeMem), args[0], args[1], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v) + } + addF("runtime/internal/atomic", "Xchg", + makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange32, ssa.OpAtomicExchange32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64), + sys.ARM64) + addF("runtime/internal/atomic", "Xchg64", + makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange64, ssa.OpAtomicExchange64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64), + sys.ARM64) + + addF("runtime/internal/atomic", "Xadd", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) + }, + sys.AMD64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Xadd64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v) + }, + sys.AMD64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + + addF("runtime/internal/atomic", "Xadd", + makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64), + sys.ARM64) + addF("runtime/internal/atomic", "Xadd64", + makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd64, ssa.OpAtomicAdd64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64), + sys.ARM64) + + addF("runtime/internal/atomic", "Cas", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v) + }, + sys.AMD64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Cas64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v) + }, + sys.AMD64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "CasRel", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v) + }, + sys.PPC64) + + atomicCasEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) { + v := s.newValue4(op, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v) + } + + addF("runtime/internal/atomic", "Cas", + makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap32, ssa.OpAtomicCompareAndSwap32Variant, types.TUINT32, types.TBOOL, atomicCasEmitterARM64), + sys.ARM64) + addF("runtime/internal/atomic", "Cas64", + makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap64, ssa.OpAtomicCompareAndSwap64Variant, types.TUINT64, types.TBOOL, atomicCasEmitterARM64), + sys.ARM64) + + addF("runtime/internal/atomic", "And8", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem()) + return nil + }, + sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "And", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem()) + return nil + }, + sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Or8", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + s.vars[memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem()) + return nil + }, + sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Or", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + s.vars[memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem()) + return nil + }, + sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + + atomicAndOrEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) { + s.vars[memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem()) + } + + addF("runtime/internal/atomic", "And8", + makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd8, ssa.OpAtomicAnd8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64), + sys.ARM64) + addF("runtime/internal/atomic", "And", + makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd32, ssa.OpAtomicAnd32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64), + sys.ARM64) + addF("runtime/internal/atomic", "Or8", + makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr8, ssa.OpAtomicOr8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64), + sys.ARM64) + addF("runtime/internal/atomic", "Or", + makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr32, ssa.OpAtomicOr32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64), + sys.ARM64) + + // Aliases for atomic load operations + alias("runtime/internal/atomic", "Loadint32", "runtime/internal/atomic", "Load", all...) + alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...) + alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...) + alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...) + alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load", p4...) + alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", p8...) + alias("runtime/internal/atomic", "LoadAcq", "runtime/internal/atomic", "Load", lwatomics...) + alias("runtime/internal/atomic", "LoadAcq64", "runtime/internal/atomic", "Load64", lwatomics...) + alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...) + alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...) // linknamed + alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...) + alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...) // linknamed + + // Aliases for atomic store operations + alias("runtime/internal/atomic", "Storeint32", "runtime/internal/atomic", "Store", all...) + alias("runtime/internal/atomic", "Storeint64", "runtime/internal/atomic", "Store64", all...) + alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...) + alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...) + alias("runtime/internal/atomic", "StoreRel", "runtime/internal/atomic", "Store", lwatomics...) + alias("runtime/internal/atomic", "StoreRel64", "runtime/internal/atomic", "Store64", lwatomics...) + alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...) + alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...) // linknamed + alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...) + alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...) // linknamed + + // Aliases for atomic swap operations + alias("runtime/internal/atomic", "Xchgint32", "runtime/internal/atomic", "Xchg", all...) + alias("runtime/internal/atomic", "Xchgint64", "runtime/internal/atomic", "Xchg64", all...) + alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...) + alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...) + + // Aliases for atomic add operations + alias("runtime/internal/atomic", "Xaddint32", "runtime/internal/atomic", "Xadd", all...) + alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...) + alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...) + alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd64", p8...) + + // Aliases for atomic CAS operations + alias("runtime/internal/atomic", "Casint32", "runtime/internal/atomic", "Cas", all...) + alias("runtime/internal/atomic", "Casint64", "runtime/internal/atomic", "Cas64", all...) + alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas", p4...) + alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...) + alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...) + alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas64", p8...) + alias("runtime/internal/atomic", "CasRel", "runtime/internal/atomic", "Cas", lwatomics...) + + /******** math ********/ + addF("math", "sqrt", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpSqrt, types.Types[types.TFLOAT64], args[0]) + }, + sys.I386, sys.AMD64, sys.ARM, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm) + addF("math", "Trunc", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpTrunc, types.Types[types.TFLOAT64], args[0]) + }, + sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm) + addF("math", "Ceil", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpCeil, types.Types[types.TFLOAT64], args[0]) + }, + sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm) + addF("math", "Floor", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpFloor, types.Types[types.TFLOAT64], args[0]) + }, + sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm) + addF("math", "Round", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpRound, types.Types[types.TFLOAT64], args[0]) + }, + sys.ARM64, sys.PPC64, sys.S390X) + addF("math", "RoundToEven", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpRoundToEven, types.Types[types.TFLOAT64], args[0]) + }, + sys.ARM64, sys.S390X, sys.Wasm) + addF("math", "Abs", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpAbs, types.Types[types.TFLOAT64], args[0]) + }, + sys.ARM64, sys.ARM, sys.PPC64, sys.RISCV64, sys.Wasm, sys.MIPS, sys.MIPS64) + addF("math", "Copysign", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue2(ssa.OpCopysign, types.Types[types.TFLOAT64], args[0], args[1]) + }, + sys.PPC64, sys.RISCV64, sys.Wasm) + addF("math", "FMA", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2]) + }, + sys.ARM64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("math", "FMA", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + if !s.config.UseFMA { + s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64] + return s.variable(n, types.Types[types.TFLOAT64]) + } + + if buildcfg.GOAMD64 >= 3 { + return s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2]) + } + + v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasFMA) + b := s.endBlock() + b.Kind = ssa.BlockIf + b.SetControl(v) + bTrue := s.f.NewBlock(ssa.BlockPlain) + bFalse := s.f.NewBlock(ssa.BlockPlain) + bEnd := s.f.NewBlock(ssa.BlockPlain) + b.AddEdgeTo(bTrue) + b.AddEdgeTo(bFalse) + b.Likely = ssa.BranchLikely // >= haswell cpus are common + + // We have the intrinsic - use it directly. + s.startBlock(bTrue) + s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2]) + s.endBlock().AddEdgeTo(bEnd) + + // Call the pure Go version. + s.startBlock(bFalse) + s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64] + s.endBlock().AddEdgeTo(bEnd) + + // Merge results. + s.startBlock(bEnd) + return s.variable(n, types.Types[types.TFLOAT64]) + }, + sys.AMD64) + addF("math", "FMA", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + if !s.config.UseFMA { + s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64] + return s.variable(n, types.Types[types.TFLOAT64]) + } + addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), ir.Syms.ARMHasVFPv4, s.sb) + v := s.load(types.Types[types.TBOOL], addr) + b := s.endBlock() + b.Kind = ssa.BlockIf + b.SetControl(v) + bTrue := s.f.NewBlock(ssa.BlockPlain) + bFalse := s.f.NewBlock(ssa.BlockPlain) + bEnd := s.f.NewBlock(ssa.BlockPlain) + b.AddEdgeTo(bTrue) + b.AddEdgeTo(bFalse) + b.Likely = ssa.BranchLikely + + // We have the intrinsic - use it directly. + s.startBlock(bTrue) + s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2]) + s.endBlock().AddEdgeTo(bEnd) + + // Call the pure Go version. + s.startBlock(bFalse) + s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64] + s.endBlock().AddEdgeTo(bEnd) + + // Merge results. + s.startBlock(bEnd) + return s.variable(n, types.Types[types.TFLOAT64]) + }, + sys.ARM) + + makeRoundAMD64 := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + if buildcfg.GOAMD64 >= 2 { + return s.newValue1(op, types.Types[types.TFLOAT64], args[0]) + } + + v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasSSE41) + b := s.endBlock() + b.Kind = ssa.BlockIf + b.SetControl(v) + bTrue := s.f.NewBlock(ssa.BlockPlain) + bFalse := s.f.NewBlock(ssa.BlockPlain) + bEnd := s.f.NewBlock(ssa.BlockPlain) + b.AddEdgeTo(bTrue) + b.AddEdgeTo(bFalse) + b.Likely = ssa.BranchLikely // most machines have sse4.1 nowadays + + // We have the intrinsic - use it directly. + s.startBlock(bTrue) + s.vars[n] = s.newValue1(op, types.Types[types.TFLOAT64], args[0]) + s.endBlock().AddEdgeTo(bEnd) + + // Call the pure Go version. + s.startBlock(bFalse) + s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64] + s.endBlock().AddEdgeTo(bEnd) + + // Merge results. + s.startBlock(bEnd) + return s.variable(n, types.Types[types.TFLOAT64]) + } + } + addF("math", "RoundToEven", + makeRoundAMD64(ssa.OpRoundToEven), + sys.AMD64) + addF("math", "Floor", + makeRoundAMD64(ssa.OpFloor), + sys.AMD64) + addF("math", "Ceil", + makeRoundAMD64(ssa.OpCeil), + sys.AMD64) + addF("math", "Trunc", + makeRoundAMD64(ssa.OpTrunc), + sys.AMD64) + + /******** math/bits ********/ + addF("math/bits", "TrailingZeros64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0]) + }, + sys.AMD64, sys.I386, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm) + addF("math/bits", "TrailingZeros32", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0]) + }, + sys.AMD64, sys.I386, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm) + addF("math/bits", "TrailingZeros16", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0]) + c := s.constInt32(types.Types[types.TUINT32], 1<<16) + y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c) + return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y) + }, + sys.MIPS) + addF("math/bits", "TrailingZeros16", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpCtz16, types.Types[types.TINT], args[0]) + }, + sys.AMD64, sys.I386, sys.ARM, sys.ARM64, sys.Wasm) + addF("math/bits", "TrailingZeros16", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0]) + c := s.constInt64(types.Types[types.TUINT64], 1<<16) + y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c) + return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y) + }, + sys.S390X, sys.PPC64) + addF("math/bits", "TrailingZeros8", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0]) + c := s.constInt32(types.Types[types.TUINT32], 1<<8) + y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c) + return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y) + }, + sys.MIPS) + addF("math/bits", "TrailingZeros8", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpCtz8, types.Types[types.TINT], args[0]) + }, + sys.AMD64, sys.I386, sys.ARM, sys.ARM64, sys.Wasm) + addF("math/bits", "TrailingZeros8", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0]) + c := s.constInt64(types.Types[types.TUINT64], 1<<8) + y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c) + return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y) + }, + sys.S390X) + alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...) + alias("math/bits", "ReverseBytes32", "runtime/internal/sys", "Bswap32", all...) + // ReverseBytes inlines correctly, no need to intrinsify it. + // Nothing special is needed for targets where ReverseBytes16 lowers to a rotate + // On Power10, 16-bit rotate is not available so use BRH instruction + if buildcfg.GOPPC64 >= 10 { + addF("math/bits", "ReverseBytes16", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpBswap16, types.Types[types.TUINT], args[0]) + }, + sys.PPC64) + } + + addF("math/bits", "Len64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0]) + }, + sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm) + addF("math/bits", "Len32", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0]) + }, + sys.AMD64, sys.ARM64, sys.PPC64) + addF("math/bits", "Len32", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + if s.config.PtrSize == 4 { + return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0]) + } + x := s.newValue1(ssa.OpZeroExt32to64, types.Types[types.TUINT64], args[0]) + return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x) + }, + sys.ARM, sys.S390X, sys.MIPS, sys.Wasm) + addF("math/bits", "Len16", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + if s.config.PtrSize == 4 { + x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0]) + return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x) + } + x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0]) + return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x) + }, + sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm) + addF("math/bits", "Len16", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpBitLen16, types.Types[types.TINT], args[0]) + }, + sys.AMD64) + addF("math/bits", "Len8", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + if s.config.PtrSize == 4 { + x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0]) + return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x) + } + x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0]) + return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x) + }, + sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm) + addF("math/bits", "Len8", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpBitLen8, types.Types[types.TINT], args[0]) + }, + sys.AMD64) + addF("math/bits", "Len", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + if s.config.PtrSize == 4 { + return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0]) + } + return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0]) + }, + sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm) + // LeadingZeros is handled because it trivially calls Len. + addF("math/bits", "Reverse64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0]) + }, + sys.ARM64) + addF("math/bits", "Reverse32", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0]) + }, + sys.ARM64) + addF("math/bits", "Reverse16", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpBitRev16, types.Types[types.TINT], args[0]) + }, + sys.ARM64) + addF("math/bits", "Reverse8", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpBitRev8, types.Types[types.TINT], args[0]) + }, + sys.ARM64) + addF("math/bits", "Reverse", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0]) + }, + sys.ARM64) + addF("math/bits", "RotateLeft8", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue2(ssa.OpRotateLeft8, types.Types[types.TUINT8], args[0], args[1]) + }, + sys.AMD64) + addF("math/bits", "RotateLeft16", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue2(ssa.OpRotateLeft16, types.Types[types.TUINT16], args[0], args[1]) + }, + sys.AMD64) + addF("math/bits", "RotateLeft32", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue2(ssa.OpRotateLeft32, types.Types[types.TUINT32], args[0], args[1]) + }, + sys.AMD64, sys.ARM, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm, sys.Loong64) + addF("math/bits", "RotateLeft64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue2(ssa.OpRotateLeft64, types.Types[types.TUINT64], args[0], args[1]) + }, + sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm, sys.Loong64) + alias("math/bits", "RotateLeft", "math/bits", "RotateLeft64", p8...) + + makeOnesCountAMD64 := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + if buildcfg.GOAMD64 >= 2 { + return s.newValue1(op, types.Types[types.TINT], args[0]) + } + + v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasPOPCNT) + b := s.endBlock() + b.Kind = ssa.BlockIf + b.SetControl(v) + bTrue := s.f.NewBlock(ssa.BlockPlain) + bFalse := s.f.NewBlock(ssa.BlockPlain) + bEnd := s.f.NewBlock(ssa.BlockPlain) + b.AddEdgeTo(bTrue) + b.AddEdgeTo(bFalse) + b.Likely = ssa.BranchLikely // most machines have popcnt nowadays + + // We have the intrinsic - use it directly. + s.startBlock(bTrue) + s.vars[n] = s.newValue1(op, types.Types[types.TINT], args[0]) + s.endBlock().AddEdgeTo(bEnd) + + // Call the pure Go version. + s.startBlock(bFalse) + s.vars[n] = s.callResult(n, callNormal) // types.Types[TINT] + s.endBlock().AddEdgeTo(bEnd) + + // Merge results. + s.startBlock(bEnd) + return s.variable(n, types.Types[types.TINT]) + } + } + addF("math/bits", "OnesCount64", + makeOnesCountAMD64(ssa.OpPopCount64), + sys.AMD64) + addF("math/bits", "OnesCount64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpPopCount64, types.Types[types.TINT], args[0]) + }, + sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm) + addF("math/bits", "OnesCount32", + makeOnesCountAMD64(ssa.OpPopCount32), + sys.AMD64) + addF("math/bits", "OnesCount32", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpPopCount32, types.Types[types.TINT], args[0]) + }, + sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm) + addF("math/bits", "OnesCount16", + makeOnesCountAMD64(ssa.OpPopCount16), + sys.AMD64) + addF("math/bits", "OnesCount16", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpPopCount16, types.Types[types.TINT], args[0]) + }, + sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm) + addF("math/bits", "OnesCount8", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpPopCount8, types.Types[types.TINT], args[0]) + }, + sys.S390X, sys.PPC64, sys.Wasm) + addF("math/bits", "OnesCount", + makeOnesCountAMD64(ssa.OpPopCount64), + sys.AMD64) + addF("math/bits", "Mul64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1]) + }, + sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.MIPS64, sys.RISCV64, sys.Loong64) + alias("math/bits", "Mul", "math/bits", "Mul64", p8...) + alias("runtime/internal/math", "Mul64", "math/bits", "Mul64", p8...) + addF("math/bits", "Add64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2]) + }, + sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.RISCV64, sys.Loong64, sys.MIPS64) + alias("math/bits", "Add", "math/bits", "Add64", p8...) + alias("runtime/internal/math", "Add64", "math/bits", "Add64", all...) + addF("math/bits", "Sub64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2]) + }, + sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.RISCV64, sys.Loong64, sys.MIPS64) + alias("math/bits", "Sub", "math/bits", "Sub64", p8...) + addF("math/bits", "Div64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + // check for divide-by-zero/overflow and panic with appropriate message + cmpZero := s.newValue2(s.ssaOp(ir.ONE, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[2], s.zeroVal(types.Types[types.TUINT64])) + s.check(cmpZero, ir.Syms.Panicdivide) + cmpOverflow := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[0], args[2]) + s.check(cmpOverflow, ir.Syms.Panicoverflow) + return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2]) + }, + sys.AMD64) + alias("math/bits", "Div", "math/bits", "Div64", sys.ArchAMD64) + + alias("runtime/internal/sys", "TrailingZeros8", "math/bits", "TrailingZeros8", all...) + alias("runtime/internal/sys", "TrailingZeros32", "math/bits", "TrailingZeros32", all...) + alias("runtime/internal/sys", "TrailingZeros64", "math/bits", "TrailingZeros64", all...) + alias("runtime/internal/sys", "Len8", "math/bits", "Len8", all...) + alias("runtime/internal/sys", "Len64", "math/bits", "Len64", all...) + alias("runtime/internal/sys", "OnesCount64", "math/bits", "OnesCount64", all...) + + /******** sync/atomic ********/ + + // Note: these are disabled by flag_race in findIntrinsic below. + alias("sync/atomic", "LoadInt32", "runtime/internal/atomic", "Load", all...) + alias("sync/atomic", "LoadInt64", "runtime/internal/atomic", "Load64", all...) + alias("sync/atomic", "LoadPointer", "runtime/internal/atomic", "Loadp", all...) + alias("sync/atomic", "LoadUint32", "runtime/internal/atomic", "Load", all...) + alias("sync/atomic", "LoadUint64", "runtime/internal/atomic", "Load64", all...) + alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load", p4...) + alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load64", p8...) + + alias("sync/atomic", "StoreInt32", "runtime/internal/atomic", "Store", all...) + alias("sync/atomic", "StoreInt64", "runtime/internal/atomic", "Store64", all...) + // Note: not StorePointer, that needs a write barrier. Same below for {CompareAnd}Swap. + alias("sync/atomic", "StoreUint32", "runtime/internal/atomic", "Store", all...) + alias("sync/atomic", "StoreUint64", "runtime/internal/atomic", "Store64", all...) + alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store", p4...) + alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store64", p8...) + + alias("sync/atomic", "SwapInt32", "runtime/internal/atomic", "Xchg", all...) + alias("sync/atomic", "SwapInt64", "runtime/internal/atomic", "Xchg64", all...) + alias("sync/atomic", "SwapUint32", "runtime/internal/atomic", "Xchg", all...) + alias("sync/atomic", "SwapUint64", "runtime/internal/atomic", "Xchg64", all...) + alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg", p4...) + alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg64", p8...) + + alias("sync/atomic", "CompareAndSwapInt32", "runtime/internal/atomic", "Cas", all...) + alias("sync/atomic", "CompareAndSwapInt64", "runtime/internal/atomic", "Cas64", all...) + alias("sync/atomic", "CompareAndSwapUint32", "runtime/internal/atomic", "Cas", all...) + alias("sync/atomic", "CompareAndSwapUint64", "runtime/internal/atomic", "Cas64", all...) + alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas", p4...) + alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas64", p8...) + + alias("sync/atomic", "AddInt32", "runtime/internal/atomic", "Xadd", all...) + alias("sync/atomic", "AddInt64", "runtime/internal/atomic", "Xadd64", all...) + alias("sync/atomic", "AddUint32", "runtime/internal/atomic", "Xadd", all...) + alias("sync/atomic", "AddUint64", "runtime/internal/atomic", "Xadd64", all...) + alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd", p4...) + alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd64", p8...) + + /******** math/big ********/ + alias("math/big", "mulWW", "math/bits", "Mul64", p8...) +} + +// findIntrinsic returns a function which builds the SSA equivalent of the +// function identified by the symbol sym. If sym is not an intrinsic call, returns nil. +func findIntrinsic(sym *types.Sym) intrinsicBuilder { + if sym == nil || sym.Pkg == nil { + return nil + } + pkg := sym.Pkg.Path + if sym.Pkg == ir.Pkgs.Runtime { + pkg = "runtime" + } + if base.Flag.Race && pkg == "sync/atomic" { + // The race detector needs to be able to intercept these calls. + // We can't intrinsify them. + return nil + } + // Skip intrinsifying math functions (which may contain hard-float + // instructions) when soft-float + if Arch.SoftFloat && pkg == "math" { + return nil + } + + fn := sym.Name + if ssa.IntrinsicsDisable { + if pkg == "runtime" && (fn == "getcallerpc" || fn == "getcallersp" || fn == "getclosureptr") { + // These runtime functions don't have definitions, must be intrinsics. + } else { + return nil + } + } + return intrinsics[intrinsicKey{Arch.LinkArch.Arch, pkg, fn}] +} + +func IsIntrinsicCall(n *ir.CallExpr) bool { + if n == nil { + return false + } + name, ok := n.Fun.(*ir.Name) + if !ok { + return false + } + return findIntrinsic(name.Sym()) != nil +} + +// intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation. +func (s *state) intrinsicCall(n *ir.CallExpr) *ssa.Value { + v := findIntrinsic(n.Fun.Sym())(s, n, s.intrinsicArgs(n)) + if ssa.IntrinsicsDebug > 0 { + x := v + if x == nil { + x = s.mem() + } + if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 { + x = x.Args[0] + } + base.WarnfAt(n.Pos(), "intrinsic substitution for %v with %s", n.Fun.Sym().Name, x.LongString()) + } + return v +} + +// intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them. +func (s *state) intrinsicArgs(n *ir.CallExpr) []*ssa.Value { + args := make([]*ssa.Value, len(n.Args)) + for i, n := range n.Args { + args[i] = s.expr(n) + } + return args +} + +// openDeferRecord adds code to evaluate and store the function for an open-code defer +// call, and records info about the defer, so we can generate proper code on the +// exit paths. n is the sub-node of the defer node that is the actual function +// call. We will also record funcdata information on where the function is stored +// (as well as the deferBits variable), and this will enable us to run the proper +// defer calls during panics. +func (s *state) openDeferRecord(n *ir.CallExpr) { + if len(n.Args) != 0 || n.Op() != ir.OCALLFUNC || n.Fun.Type().NumResults() != 0 { + s.Fatalf("defer call with arguments or results: %v", n) + } + + opendefer := &openDeferInfo{ + n: n, + } + fn := n.Fun + // We must always store the function value in a stack slot for the + // runtime panic code to use. But in the defer exit code, we will + // call the function directly if it is a static function. + closureVal := s.expr(fn) + closure := s.openDeferSave(fn.Type(), closureVal) + opendefer.closureNode = closure.Aux.(*ir.Name) + if !(fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC) { + opendefer.closure = closure + } + index := len(s.openDefers) + s.openDefers = append(s.openDefers, opendefer) + + // Update deferBits only after evaluation and storage to stack of + // the function is successful. + bitvalue := s.constInt8(types.Types[types.TUINT8], 1<= 0; i-- { + r := s.openDefers[i] + bCond := s.f.NewBlock(ssa.BlockPlain) + bEnd := s.f.NewBlock(ssa.BlockPlain) + + deferBits := s.variable(deferBitsVar, types.Types[types.TUINT8]) + // Generate code to check if the bit associated with the current + // defer is set. + bitval := s.constInt8(types.Types[types.TUINT8], 1< 1 { + s.f.Warnl(lineno, "removed nil check") + } + return p + } + p = s.nilCheck(p) + return p +} + +// nilCheck generates nil pointer checking code. +// Used only for automatically inserted nil checks, +// not for user code like 'x != nil'. +// Returns a "definitely not nil" copy of x to ensure proper ordering +// of the uses of the post-nilcheck pointer. +func (s *state) nilCheck(ptr *ssa.Value) *ssa.Value { + if base.Debug.DisableNil != 0 || s.curfn.NilCheckDisabled() { + return ptr + } + return s.newValue2(ssa.OpNilCheck, ptr.Type, ptr, s.mem()) +} + +// boundsCheck generates bounds checking code. Checks if 0 <= idx <[=] len, branches to exit if not. +// Starts a new block on return. +// On input, len must be converted to full int width and be nonnegative. +// Returns idx converted to full int width. +// If bounded is true then caller guarantees the index is not out of bounds +// (but boundsCheck will still extend the index to full int width). +func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value { + idx = s.extendIndex(idx, len, kind, bounded) + + if bounded || base.Flag.B != 0 { + // If bounded or bounds checking is flag-disabled, then no check necessary, + // just return the extended index. + // + // Here, bounded == true if the compiler generated the index itself, + // such as in the expansion of a slice initializer. These indexes are + // compiler-generated, not Go program variables, so they cannot be + // attacker-controlled, so we can omit Spectre masking as well. + // + // Note that we do not want to omit Spectre masking in code like: + // + // if 0 <= i && i < len(x) { + // use(x[i]) + // } + // + // Lucky for us, bounded==false for that code. + // In that case (handled below), we emit a bound check (and Spectre mask) + // and then the prove pass will remove the bounds check. + // In theory the prove pass could potentially remove certain + // Spectre masks, but it's very delicate and probably better + // to be conservative and leave them all in. + return idx + } + + bNext := s.f.NewBlock(ssa.BlockPlain) + bPanic := s.f.NewBlock(ssa.BlockExit) + + if !idx.Type.IsSigned() { + switch kind { + case ssa.BoundsIndex: + kind = ssa.BoundsIndexU + case ssa.BoundsSliceAlen: + kind = ssa.BoundsSliceAlenU + case ssa.BoundsSliceAcap: + kind = ssa.BoundsSliceAcapU + case ssa.BoundsSliceB: + kind = ssa.BoundsSliceBU + case ssa.BoundsSlice3Alen: + kind = ssa.BoundsSlice3AlenU + case ssa.BoundsSlice3Acap: + kind = ssa.BoundsSlice3AcapU + case ssa.BoundsSlice3B: + kind = ssa.BoundsSlice3BU + case ssa.BoundsSlice3C: + kind = ssa.BoundsSlice3CU + } + } + + var cmp *ssa.Value + if kind == ssa.BoundsIndex || kind == ssa.BoundsIndexU { + cmp = s.newValue2(ssa.OpIsInBounds, types.Types[types.TBOOL], idx, len) + } else { + cmp = s.newValue2(ssa.OpIsSliceInBounds, types.Types[types.TBOOL], idx, len) + } + b := s.endBlock() + b.Kind = ssa.BlockIf + b.SetControl(cmp) + b.Likely = ssa.BranchLikely + b.AddEdgeTo(bNext) + b.AddEdgeTo(bPanic) + + s.startBlock(bPanic) + if Arch.LinkArch.Family == sys.Wasm { + // TODO(khr): figure out how to do "register" based calling convention for bounds checks. + // Should be similar to gcWriteBarrier, but I can't make it work. + s.rtcall(BoundsCheckFunc[kind], false, nil, idx, len) + } else { + mem := s.newValue3I(ssa.OpPanicBounds, types.TypeMem, int64(kind), idx, len, s.mem()) + s.endBlock().SetControl(mem) + } + s.startBlock(bNext) + + // In Spectre index mode, apply an appropriate mask to avoid speculative out-of-bounds accesses. + if base.Flag.Cfg.SpectreIndex { + op := ssa.OpSpectreIndex + if kind != ssa.BoundsIndex && kind != ssa.BoundsIndexU { + op = ssa.OpSpectreSliceIndex + } + idx = s.newValue2(op, types.Types[types.TINT], idx, len) + } + + return idx +} + +// If cmp (a bool) is false, panic using the given function. +func (s *state) check(cmp *ssa.Value, fn *obj.LSym) { + b := s.endBlock() + b.Kind = ssa.BlockIf + b.SetControl(cmp) + b.Likely = ssa.BranchLikely + bNext := s.f.NewBlock(ssa.BlockPlain) + line := s.peekPos() + pos := base.Ctxt.PosTable.Pos(line) + fl := funcLine{f: fn, base: pos.Base(), line: pos.Line()} + bPanic := s.panics[fl] + if bPanic == nil { + bPanic = s.f.NewBlock(ssa.BlockPlain) + s.panics[fl] = bPanic + s.startBlock(bPanic) + // The panic call takes/returns memory to ensure that the right + // memory state is observed if the panic happens. + s.rtcall(fn, false, nil) + } + b.AddEdgeTo(bNext) + b.AddEdgeTo(bPanic) + s.startBlock(bNext) +} + +func (s *state) intDivide(n ir.Node, a, b *ssa.Value) *ssa.Value { + needcheck := true + switch b.Op { + case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64: + if b.AuxInt != 0 { + needcheck = false + } + } + if needcheck { + // do a size-appropriate check for zero + cmp := s.newValue2(s.ssaOp(ir.ONE, n.Type()), types.Types[types.TBOOL], b, s.zeroVal(n.Type())) + s.check(cmp, ir.Syms.Panicdivide) + } + return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b) +} + +// rtcall issues a call to the given runtime function fn with the listed args. +// Returns a slice of results of the given result types. +// The call is added to the end of the current block. +// If returns is false, the block is marked as an exit block. +func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value { + s.prevCall = nil + // Write args to the stack + off := base.Ctxt.Arch.FixedFrameSize + var callArgs []*ssa.Value + var callArgTypes []*types.Type + + for _, arg := range args { + t := arg.Type + off = types.RoundUp(off, t.Alignment()) + size := t.Size() + callArgs = append(callArgs, arg) + callArgTypes = append(callArgTypes, t) + off += size + } + off = types.RoundUp(off, int64(types.RegSize)) + + // Issue call + var call *ssa.Value + aux := ssa.StaticAuxCall(fn, s.f.ABIDefault.ABIAnalyzeTypes(callArgTypes, results)) + callArgs = append(callArgs, s.mem()) + call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) + call.AddArgs(callArgs...) + s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(results)), call) + + if !returns { + // Finish block + b := s.endBlock() + b.Kind = ssa.BlockExit + b.SetControl(call) + call.AuxInt = off - base.Ctxt.Arch.FixedFrameSize + if len(results) > 0 { + s.Fatalf("panic call can't have results") + } + return nil + } + + // Load results + res := make([]*ssa.Value, len(results)) + for i, t := range results { + off = types.RoundUp(off, t.Alignment()) + res[i] = s.resultOfCall(call, int64(i), t) + off += t.Size() + } + off = types.RoundUp(off, int64(types.PtrSize)) + + // Remember how much callee stack space we needed. + call.AuxInt = off + + return res +} + +// do *left = right for type t. +func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask, leftIsStmt bool) { + s.instrument(t, left, instrumentWrite) + + if skip == 0 && (!t.HasPointers() || ssa.IsStackAddr(left)) { + // Known to not have write barrier. Store the whole type. + s.vars[memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, left, right, s.mem(), leftIsStmt) + return + } + + // store scalar fields first, so write barrier stores for + // pointer fields can be grouped together, and scalar values + // don't need to be live across the write barrier call. + // TODO: if the writebarrier pass knows how to reorder stores, + // we can do a single store here as long as skip==0. + s.storeTypeScalars(t, left, right, skip) + if skip&skipPtr == 0 && t.HasPointers() { + s.storeTypePtrs(t, left, right) + } +} + +// do *left = right for all scalar (non-pointer) parts of t. +func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip skipMask) { + switch { + case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex(): + s.store(t, left, right) + case t.IsPtrShaped(): + if t.IsPtr() && t.Elem().NotInHeap() { + s.store(t, left, right) // see issue 42032 + } + // otherwise, no scalar fields. + case t.IsString(): + if skip&skipLen != 0 { + return + } + len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], right) + lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left) + s.store(types.Types[types.TINT], lenAddr, len) + case t.IsSlice(): + if skip&skipLen == 0 { + len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], right) + lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left) + s.store(types.Types[types.TINT], lenAddr, len) + } + if skip&skipCap == 0 { + cap := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], right) + capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.PtrSize, left) + s.store(types.Types[types.TINT], capAddr, cap) + } + case t.IsInterface(): + // itab field doesn't need a write barrier (even though it is a pointer). + itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right) + s.store(types.Types[types.TUINTPTR], left, itab) + case t.IsStruct(): + n := t.NumFields() + for i := 0; i < n; i++ { + ft := t.FieldType(i) + addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) + val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) + s.storeTypeScalars(ft, addr, val, 0) + } + case t.IsArray() && t.NumElem() == 0: + // nothing + case t.IsArray() && t.NumElem() == 1: + s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0) + default: + s.Fatalf("bad write barrier type %v", t) + } +} + +// do *left = right for all pointer parts of t. +func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) { + switch { + case t.IsPtrShaped(): + if t.IsPtr() && t.Elem().NotInHeap() { + break // see issue 42032 + } + s.store(t, left, right) + case t.IsString(): + ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right) + s.store(s.f.Config.Types.BytePtr, left, ptr) + case t.IsSlice(): + elType := types.NewPtr(t.Elem()) + ptr := s.newValue1(ssa.OpSlicePtr, elType, right) + s.store(elType, left, ptr) + case t.IsInterface(): + // itab field is treated as a scalar. + idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right) + idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left) + s.store(s.f.Config.Types.BytePtr, idataAddr, idata) + case t.IsStruct(): + n := t.NumFields() + for i := 0; i < n; i++ { + ft := t.FieldType(i) + if !ft.HasPointers() { + continue + } + addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left) + val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right) + s.storeTypePtrs(ft, addr, val) + } + case t.IsArray() && t.NumElem() == 0: + // nothing + case t.IsArray() && t.NumElem() == 1: + s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right)) + default: + s.Fatalf("bad write barrier type %v", t) + } +} + +// putArg evaluates n for the purpose of passing it as an argument to a function and returns the value for the call. +func (s *state) putArg(n ir.Node, t *types.Type) *ssa.Value { + var a *ssa.Value + if !ssa.CanSSA(t) { + a = s.newValue2(ssa.OpDereference, t, s.addr(n), s.mem()) + } else { + a = s.expr(n) + } + return a +} + +func (s *state) storeArgWithBase(n ir.Node, t *types.Type, base *ssa.Value, off int64) { + pt := types.NewPtr(t) + var addr *ssa.Value + if base == s.sp { + // Use special routine that avoids allocation on duplicate offsets. + addr = s.constOffPtrSP(pt, off) + } else { + addr = s.newValue1I(ssa.OpOffPtr, pt, off, base) + } + + if !ssa.CanSSA(t) { + a := s.addr(n) + s.move(t, addr, a) + return + } + + a := s.expr(n) + s.storeType(t, addr, a, 0, false) +} + +// slice computes the slice v[i:j:k] and returns ptr, len, and cap of result. +// i,j,k may be nil, in which case they are set to their default value. +// v may be a slice, string or pointer to an array. +func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value) { + t := v.Type + var ptr, len, cap *ssa.Value + switch { + case t.IsSlice(): + ptr = s.newValue1(ssa.OpSlicePtr, types.NewPtr(t.Elem()), v) + len = s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], v) + cap = s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], v) + case t.IsString(): + ptr = s.newValue1(ssa.OpStringPtr, types.NewPtr(types.Types[types.TUINT8]), v) + len = s.newValue1(ssa.OpStringLen, types.Types[types.TINT], v) + cap = len + case t.IsPtr(): + if !t.Elem().IsArray() { + s.Fatalf("bad ptr to array in slice %v\n", t) + } + nv := s.nilCheck(v) + ptr = s.newValue1(ssa.OpCopy, types.NewPtr(t.Elem().Elem()), nv) + len = s.constInt(types.Types[types.TINT], t.Elem().NumElem()) + cap = len + default: + s.Fatalf("bad type in slice %v\n", t) + } + + // Set default values + if i == nil { + i = s.constInt(types.Types[types.TINT], 0) + } + if j == nil { + j = len + } + three := true + if k == nil { + three = false + k = cap + } + + // Panic if slice indices are not in bounds. + // Make sure we check these in reverse order so that we're always + // comparing against a value known to be nonnegative. See issue 28797. + if three { + if k != cap { + kind := ssa.BoundsSlice3Alen + if t.IsSlice() { + kind = ssa.BoundsSlice3Acap + } + k = s.boundsCheck(k, cap, kind, bounded) + } + if j != k { + j = s.boundsCheck(j, k, ssa.BoundsSlice3B, bounded) + } + i = s.boundsCheck(i, j, ssa.BoundsSlice3C, bounded) + } else { + if j != k { + kind := ssa.BoundsSliceAlen + if t.IsSlice() { + kind = ssa.BoundsSliceAcap + } + j = s.boundsCheck(j, k, kind, bounded) + } + i = s.boundsCheck(i, j, ssa.BoundsSliceB, bounded) + } + + // Word-sized integer operations. + subOp := s.ssaOp(ir.OSUB, types.Types[types.TINT]) + mulOp := s.ssaOp(ir.OMUL, types.Types[types.TINT]) + andOp := s.ssaOp(ir.OAND, types.Types[types.TINT]) + + // Calculate the length (rlen) and capacity (rcap) of the new slice. + // For strings the capacity of the result is unimportant. However, + // we use rcap to test if we've generated a zero-length slice. + // Use length of strings for that. + rlen := s.newValue2(subOp, types.Types[types.TINT], j, i) + rcap := rlen + if j != k && !t.IsString() { + rcap = s.newValue2(subOp, types.Types[types.TINT], k, i) + } + + if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 { + // No pointer arithmetic necessary. + return ptr, rlen, rcap + } + + // Calculate the base pointer (rptr) for the new slice. + // + // Generate the following code assuming that indexes are in bounds. + // The masking is to make sure that we don't generate a slice + // that points to the next object in memory. We cannot just set + // the pointer to nil because then we would create a nil slice or + // string. + // + // rcap = k - i + // rlen = j - i + // rptr = ptr + (mask(rcap) & (i * stride)) + // + // Where mask(x) is 0 if x==0 and -1 if x>0 and stride is the width + // of the element type. + stride := s.constInt(types.Types[types.TINT], ptr.Type.Elem().Size()) + + // The delta is the number of bytes to offset ptr by. + delta := s.newValue2(mulOp, types.Types[types.TINT], i, stride) + + // If we're slicing to the point where the capacity is zero, + // zero out the delta. + mask := s.newValue1(ssa.OpSlicemask, types.Types[types.TINT], rcap) + delta = s.newValue2(andOp, types.Types[types.TINT], delta, mask) + + // Compute rptr = ptr + delta. + rptr := s.newValue2(ssa.OpAddPtr, ptr.Type, ptr, delta) + + return rptr, rlen, rcap +} + +type u642fcvtTab struct { + leq, cvt2F, and, rsh, or, add ssa.Op + one func(*state, *types.Type, int64) *ssa.Value +} + +var u64_f64 = u642fcvtTab{ + leq: ssa.OpLeq64, + cvt2F: ssa.OpCvt64to64F, + and: ssa.OpAnd64, + rsh: ssa.OpRsh64Ux64, + or: ssa.OpOr64, + add: ssa.OpAdd64F, + one: (*state).constInt64, +} + +var u64_f32 = u642fcvtTab{ + leq: ssa.OpLeq64, + cvt2F: ssa.OpCvt64to32F, + and: ssa.OpAnd64, + rsh: ssa.OpRsh64Ux64, + or: ssa.OpOr64, + add: ssa.OpAdd32F, + one: (*state).constInt64, +} + +func (s *state) uint64Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { + return s.uint64Tofloat(&u64_f64, n, x, ft, tt) +} + +func (s *state) uint64Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { + return s.uint64Tofloat(&u64_f32, n, x, ft, tt) +} + +func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { + // if x >= 0 { + // result = (floatY) x + // } else { + // y = uintX(x) ; y = x & 1 + // z = uintX(x) ; z = z >> 1 + // z = z | y + // result = floatY(z) + // result = result + result + // } + // + // Code borrowed from old code generator. + // What's going on: large 64-bit "unsigned" looks like + // negative number to hardware's integer-to-float + // conversion. However, because the mantissa is only + // 63 bits, we don't need the LSB, so instead we do an + // unsigned right shift (divide by two), convert, and + // double. However, before we do that, we need to be + // sure that we do not lose a "1" if that made the + // difference in the resulting rounding. Therefore, we + // preserve it, and OR (not ADD) it back in. The case + // that matters is when the eleven discarded bits are + // equal to 10000000001; that rounds up, and the 1 cannot + // be lost else it would round down if the LSB of the + // candidate mantissa is 0. + cmp := s.newValue2(cvttab.leq, types.Types[types.TBOOL], s.zeroVal(ft), x) + b := s.endBlock() + b.Kind = ssa.BlockIf + b.SetControl(cmp) + b.Likely = ssa.BranchLikely + + bThen := s.f.NewBlock(ssa.BlockPlain) + bElse := s.f.NewBlock(ssa.BlockPlain) + bAfter := s.f.NewBlock(ssa.BlockPlain) + + b.AddEdgeTo(bThen) + s.startBlock(bThen) + a0 := s.newValue1(cvttab.cvt2F, tt, x) + s.vars[n] = a0 + s.endBlock() + bThen.AddEdgeTo(bAfter) + + b.AddEdgeTo(bElse) + s.startBlock(bElse) + one := cvttab.one(s, ft, 1) + y := s.newValue2(cvttab.and, ft, x, one) + z := s.newValue2(cvttab.rsh, ft, x, one) + z = s.newValue2(cvttab.or, ft, z, y) + a := s.newValue1(cvttab.cvt2F, tt, z) + a1 := s.newValue2(cvttab.add, tt, a, a) + s.vars[n] = a1 + s.endBlock() + bElse.AddEdgeTo(bAfter) + + s.startBlock(bAfter) + return s.variable(n, n.Type()) +} + +type u322fcvtTab struct { + cvtI2F, cvtF2F ssa.Op +} + +var u32_f64 = u322fcvtTab{ + cvtI2F: ssa.OpCvt32to64F, + cvtF2F: ssa.OpCopy, +} + +var u32_f32 = u322fcvtTab{ + cvtI2F: ssa.OpCvt32to32F, + cvtF2F: ssa.OpCvt64Fto32F, +} + +func (s *state) uint32Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { + return s.uint32Tofloat(&u32_f64, n, x, ft, tt) +} + +func (s *state) uint32Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { + return s.uint32Tofloat(&u32_f32, n, x, ft, tt) +} + +func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { + // if x >= 0 { + // result = floatY(x) + // } else { + // result = floatY(float64(x) + (1<<32)) + // } + cmp := s.newValue2(ssa.OpLeq32, types.Types[types.TBOOL], s.zeroVal(ft), x) + b := s.endBlock() + b.Kind = ssa.BlockIf + b.SetControl(cmp) + b.Likely = ssa.BranchLikely + + bThen := s.f.NewBlock(ssa.BlockPlain) + bElse := s.f.NewBlock(ssa.BlockPlain) + bAfter := s.f.NewBlock(ssa.BlockPlain) + + b.AddEdgeTo(bThen) + s.startBlock(bThen) + a0 := s.newValue1(cvttab.cvtI2F, tt, x) + s.vars[n] = a0 + s.endBlock() + bThen.AddEdgeTo(bAfter) + + b.AddEdgeTo(bElse) + s.startBlock(bElse) + a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[types.TFLOAT64], x) + twoToThe32 := s.constFloat64(types.Types[types.TFLOAT64], float64(1<<32)) + a2 := s.newValue2(ssa.OpAdd64F, types.Types[types.TFLOAT64], a1, twoToThe32) + a3 := s.newValue1(cvttab.cvtF2F, tt, a2) + + s.vars[n] = a3 + s.endBlock() + bElse.AddEdgeTo(bAfter) + + s.startBlock(bAfter) + return s.variable(n, n.Type()) +} + +// referenceTypeBuiltin generates code for the len/cap builtins for maps and channels. +func (s *state) referenceTypeBuiltin(n *ir.UnaryExpr, x *ssa.Value) *ssa.Value { + if !n.X.Type().IsMap() && !n.X.Type().IsChan() { + s.Fatalf("node must be a map or a channel") + } + // if n == nil { + // return 0 + // } else { + // // len + // return *((*int)n) + // // cap + // return *(((*int)n)+1) + // } + lenType := n.Type() + nilValue := s.constNil(types.Types[types.TUINTPTR]) + cmp := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], x, nilValue) + b := s.endBlock() + b.Kind = ssa.BlockIf + b.SetControl(cmp) + b.Likely = ssa.BranchUnlikely + + bThen := s.f.NewBlock(ssa.BlockPlain) + bElse := s.f.NewBlock(ssa.BlockPlain) + bAfter := s.f.NewBlock(ssa.BlockPlain) + + // length/capacity of a nil map/chan is zero + b.AddEdgeTo(bThen) + s.startBlock(bThen) + s.vars[n] = s.zeroVal(lenType) + s.endBlock() + bThen.AddEdgeTo(bAfter) + + b.AddEdgeTo(bElse) + s.startBlock(bElse) + switch n.Op() { + case ir.OLEN: + // length is stored in the first word for map/chan + s.vars[n] = s.load(lenType, x) + case ir.OCAP: + // capacity is stored in the second word for chan + sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Size(), x) + s.vars[n] = s.load(lenType, sw) + default: + s.Fatalf("op must be OLEN or OCAP") + } + s.endBlock() + bElse.AddEdgeTo(bAfter) + + s.startBlock(bAfter) + return s.variable(n, lenType) +} + +type f2uCvtTab struct { + ltf, cvt2U, subf, or ssa.Op + floatValue func(*state, *types.Type, float64) *ssa.Value + intValue func(*state, *types.Type, int64) *ssa.Value + cutoff uint64 +} + +var f32_u64 = f2uCvtTab{ + ltf: ssa.OpLess32F, + cvt2U: ssa.OpCvt32Fto64, + subf: ssa.OpSub32F, + or: ssa.OpOr64, + floatValue: (*state).constFloat32, + intValue: (*state).constInt64, + cutoff: 1 << 63, +} + +var f64_u64 = f2uCvtTab{ + ltf: ssa.OpLess64F, + cvt2U: ssa.OpCvt64Fto64, + subf: ssa.OpSub64F, + or: ssa.OpOr64, + floatValue: (*state).constFloat64, + intValue: (*state).constInt64, + cutoff: 1 << 63, +} + +var f32_u32 = f2uCvtTab{ + ltf: ssa.OpLess32F, + cvt2U: ssa.OpCvt32Fto32, + subf: ssa.OpSub32F, + or: ssa.OpOr32, + floatValue: (*state).constFloat32, + intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) }, + cutoff: 1 << 31, +} + +var f64_u32 = f2uCvtTab{ + ltf: ssa.OpLess64F, + cvt2U: ssa.OpCvt64Fto32, + subf: ssa.OpSub64F, + or: ssa.OpOr32, + floatValue: (*state).constFloat64, + intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) }, + cutoff: 1 << 31, +} + +func (s *state) float32ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { + return s.floatToUint(&f32_u64, n, x, ft, tt) +} +func (s *state) float64ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { + return s.floatToUint(&f64_u64, n, x, ft, tt) +} + +func (s *state) float32ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { + return s.floatToUint(&f32_u32, n, x, ft, tt) +} + +func (s *state) float64ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { + return s.floatToUint(&f64_u32, n, x, ft, tt) +} + +func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { + // cutoff:=1<<(intY_Size-1) + // if x < floatX(cutoff) { + // result = uintY(x) + // } else { + // y = x - floatX(cutoff) + // z = uintY(y) + // result = z | -(cutoff) + // } + cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff)) + cmp := s.newValue2(cvttab.ltf, types.Types[types.TBOOL], x, cutoff) + b := s.endBlock() + b.Kind = ssa.BlockIf + b.SetControl(cmp) + b.Likely = ssa.BranchLikely + + bThen := s.f.NewBlock(ssa.BlockPlain) + bElse := s.f.NewBlock(ssa.BlockPlain) + bAfter := s.f.NewBlock(ssa.BlockPlain) + + b.AddEdgeTo(bThen) + s.startBlock(bThen) + a0 := s.newValue1(cvttab.cvt2U, tt, x) + s.vars[n] = a0 + s.endBlock() + bThen.AddEdgeTo(bAfter) + + b.AddEdgeTo(bElse) + s.startBlock(bElse) + y := s.newValue2(cvttab.subf, ft, x, cutoff) + y = s.newValue1(cvttab.cvt2U, tt, y) + z := cvttab.intValue(s, tt, int64(-cvttab.cutoff)) + a1 := s.newValue2(cvttab.or, tt, y, z) + s.vars[n] = a1 + s.endBlock() + bElse.AddEdgeTo(bAfter) + + s.startBlock(bAfter) + return s.variable(n, n.Type()) +} + +// dottype generates SSA for a type assertion node. +// commaok indicates whether to panic or return a bool. +// If commaok is false, resok will be nil. +func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Value) { + iface := s.expr(n.X) // input interface + target := s.reflectType(n.Type()) // target type + var targetItab *ssa.Value + if n.ITab != nil { + targetItab = s.expr(n.ITab) + } + return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, nil, target, targetItab, commaok, n.Descriptor) +} + +func (s *state) dynamicDottype(n *ir.DynamicTypeAssertExpr, commaok bool) (res, resok *ssa.Value) { + iface := s.expr(n.X) + var source, target, targetItab *ssa.Value + if n.SrcRType != nil { + source = s.expr(n.SrcRType) + } + if !n.X.Type().IsEmptyInterface() && !n.Type().IsInterface() { + byteptr := s.f.Config.Types.BytePtr + targetItab = s.expr(n.ITab) + // TODO(mdempsky): Investigate whether compiling n.RType could be + // better than loading itab.typ. + target = s.load(byteptr, s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), targetItab)) // itab.typ + } else { + target = s.expr(n.RType) + } + return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, source, target, targetItab, commaok, nil) +} + +// dottype1 implements a x.(T) operation. iface is the argument (x), dst is the type we're asserting to (T) +// and src is the type we're asserting from. +// source is the *runtime._type of src +// target is the *runtime._type of dst. +// If src is a nonempty interface and dst is not an interface, targetItab is an itab representing (dst, src). Otherwise it is nil. +// commaok is true if the caller wants a boolean success value. Otherwise, the generated code panics if the conversion fails. +// descriptor is a compiler-allocated internal/abi.TypeAssert whose address is passed to runtime.typeAssert when +// the target type is a compile-time-known non-empty interface. It may be nil. +func (s *state) dottype1(pos src.XPos, src, dst *types.Type, iface, source, target, targetItab *ssa.Value, commaok bool, descriptor *obj.LSym) (res, resok *ssa.Value) { + typs := s.f.Config.Types + byteptr := typs.BytePtr + if dst.IsInterface() { + if dst.IsEmptyInterface() { + // Converting to an empty interface. + // Input could be an empty or nonempty interface. + if base.Debug.TypeAssert > 0 { + base.WarnfAt(pos, "type assertion inlined") + } + + // Get itab/type field from input. + itab := s.newValue1(ssa.OpITab, byteptr, iface) + // Conversion succeeds iff that field is not nil. + cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr)) + + if src.IsEmptyInterface() && commaok { + // Converting empty interface to empty interface with ,ok is just a nil check. + return iface, cond + } + + // Branch on nilness. + b := s.endBlock() + b.Kind = ssa.BlockIf + b.SetControl(cond) + b.Likely = ssa.BranchLikely + bOk := s.f.NewBlock(ssa.BlockPlain) + bFail := s.f.NewBlock(ssa.BlockPlain) + b.AddEdgeTo(bOk) + b.AddEdgeTo(bFail) + + if !commaok { + // On failure, panic by calling panicnildottype. + s.startBlock(bFail) + s.rtcall(ir.Syms.Panicnildottype, false, nil, target) + + // On success, return (perhaps modified) input interface. + s.startBlock(bOk) + if src.IsEmptyInterface() { + res = iface // Use input interface unchanged. + return + } + // Load type out of itab, build interface with existing idata. + off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab) + typ := s.load(byteptr, off) + idata := s.newValue1(ssa.OpIData, byteptr, iface) + res = s.newValue2(ssa.OpIMake, dst, typ, idata) + return + } + + s.startBlock(bOk) + // nonempty -> empty + // Need to load type from itab + off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab) + s.vars[typVar] = s.load(byteptr, off) + s.endBlock() + + // itab is nil, might as well use that as the nil result. + s.startBlock(bFail) + s.vars[typVar] = itab + s.endBlock() + + // Merge point. + bEnd := s.f.NewBlock(ssa.BlockPlain) + bOk.AddEdgeTo(bEnd) + bFail.AddEdgeTo(bEnd) + s.startBlock(bEnd) + idata := s.newValue1(ssa.OpIData, byteptr, iface) + res = s.newValue2(ssa.OpIMake, dst, s.variable(typVar, byteptr), idata) + resok = cond + delete(s.vars, typVar) // no practical effect, just to indicate typVar is no longer live. + return + } + // converting to a nonempty interface needs a runtime call. + if base.Debug.TypeAssert > 0 { + base.WarnfAt(pos, "type assertion not inlined") + } + + itab := s.newValue1(ssa.OpITab, byteptr, iface) + data := s.newValue1(ssa.OpIData, types.Types[types.TUNSAFEPTR], iface) + + // First, check for nil. + bNil := s.f.NewBlock(ssa.BlockPlain) + bNonNil := s.f.NewBlock(ssa.BlockPlain) + bMerge := s.f.NewBlock(ssa.BlockPlain) + cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr)) + b := s.endBlock() + b.Kind = ssa.BlockIf + b.SetControl(cond) + b.Likely = ssa.BranchLikely + b.AddEdgeTo(bNonNil) + b.AddEdgeTo(bNil) + + s.startBlock(bNil) + if commaok { + s.vars[typVar] = itab // which will be nil + b := s.endBlock() + b.AddEdgeTo(bMerge) + } else { + // Panic if input is nil. + s.rtcall(ir.Syms.Panicnildottype, false, nil, target) + } + + // Get typ, possibly by loading out of itab. + s.startBlock(bNonNil) + typ := itab + if !src.IsEmptyInterface() { + typ = s.load(byteptr, s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)) + } + + // Check the cache first. + var d *ssa.Value + if descriptor != nil { + d = s.newValue1A(ssa.OpAddr, byteptr, descriptor, s.sb) + if base.Flag.N == 0 && rtabi.UseInterfaceSwitchCache(Arch.LinkArch.Name) { + // Note: we can only use the cache if we have the right atomic load instruction. + // Double-check that here. + if _, ok := intrinsics[intrinsicKey{Arch.LinkArch.Arch, "runtime/internal/atomic", "Loadp"}]; !ok { + s.Fatalf("atomic load not available") + } + // Pick right size ops. + var mul, and, add, zext ssa.Op + if s.config.PtrSize == 4 { + mul = ssa.OpMul32 + and = ssa.OpAnd32 + add = ssa.OpAdd32 + zext = ssa.OpCopy + } else { + mul = ssa.OpMul64 + and = ssa.OpAnd64 + add = ssa.OpAdd64 + zext = ssa.OpZeroExt32to64 + } + + loopHead := s.f.NewBlock(ssa.BlockPlain) + loopBody := s.f.NewBlock(ssa.BlockPlain) + cacheHit := s.f.NewBlock(ssa.BlockPlain) + cacheMiss := s.f.NewBlock(ssa.BlockPlain) + + // Load cache pointer out of descriptor, with an atomic load so + // we ensure that we see a fully written cache. + atomicLoad := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(typs.BytePtr, types.TypeMem), d, s.mem()) + cache := s.newValue1(ssa.OpSelect0, typs.BytePtr, atomicLoad) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, atomicLoad) + + // Load hash from type or itab. + var hash *ssa.Value + if src.IsEmptyInterface() { + hash = s.newValue2(ssa.OpLoad, typs.UInt32, s.newValue1I(ssa.OpOffPtr, typs.UInt32Ptr, 2*s.config.PtrSize, typ), s.mem()) + } else { + hash = s.newValue2(ssa.OpLoad, typs.UInt32, s.newValue1I(ssa.OpOffPtr, typs.UInt32Ptr, 2*s.config.PtrSize, itab), s.mem()) + } + hash = s.newValue1(zext, typs.Uintptr, hash) + s.vars[hashVar] = hash + // Load mask from cache. + mask := s.newValue2(ssa.OpLoad, typs.Uintptr, cache, s.mem()) + // Jump to loop head. + b := s.endBlock() + b.AddEdgeTo(loopHead) + + // At loop head, get pointer to the cache entry. + // e := &cache.Entries[hash&mask] + s.startBlock(loopHead) + idx := s.newValue2(and, typs.Uintptr, s.variable(hashVar, typs.Uintptr), mask) + idx = s.newValue2(mul, typs.Uintptr, idx, s.uintptrConstant(uint64(2*s.config.PtrSize))) + idx = s.newValue2(add, typs.Uintptr, idx, s.uintptrConstant(uint64(s.config.PtrSize))) + e := s.newValue2(ssa.OpAddPtr, typs.UintptrPtr, cache, idx) + // hash++ + s.vars[hashVar] = s.newValue2(add, typs.Uintptr, s.variable(hashVar, typs.Uintptr), s.uintptrConstant(1)) + + // Look for a cache hit. + // if e.Typ == typ { goto hit } + eTyp := s.newValue2(ssa.OpLoad, typs.Uintptr, e, s.mem()) + cmp1 := s.newValue2(ssa.OpEqPtr, typs.Bool, typ, eTyp) + b = s.endBlock() + b.Kind = ssa.BlockIf + b.SetControl(cmp1) + b.AddEdgeTo(cacheHit) + b.AddEdgeTo(loopBody) + + // Look for an empty entry, the tombstone for this hash table. + // if e.Typ == nil { goto miss } + s.startBlock(loopBody) + cmp2 := s.newValue2(ssa.OpEqPtr, typs.Bool, eTyp, s.constNil(typs.BytePtr)) + b = s.endBlock() + b.Kind = ssa.BlockIf + b.SetControl(cmp2) + b.AddEdgeTo(cacheMiss) + b.AddEdgeTo(loopHead) + + // On a hit, load the data fields of the cache entry. + // Itab = e.Itab + s.startBlock(cacheHit) + eItab := s.newValue2(ssa.OpLoad, typs.BytePtr, s.newValue1I(ssa.OpOffPtr, typs.BytePtrPtr, s.config.PtrSize, e), s.mem()) + s.vars[typVar] = eItab + b = s.endBlock() + b.AddEdgeTo(bMerge) + + // On a miss, call into the runtime to get the answer. + s.startBlock(cacheMiss) + } + } + + // Call into runtime to get itab for result. + if descriptor != nil { + itab = s.rtcall(ir.Syms.TypeAssert, true, []*types.Type{byteptr}, d, typ)[0] + } else { + var fn *obj.LSym + if commaok { + fn = ir.Syms.AssertE2I2 + } else { + fn = ir.Syms.AssertE2I + } + itab = s.rtcall(fn, true, []*types.Type{byteptr}, target, typ)[0] + } + s.vars[typVar] = itab + b = s.endBlock() + b.AddEdgeTo(bMerge) + + // Build resulting interface. + s.startBlock(bMerge) + itab = s.variable(typVar, byteptr) + var ok *ssa.Value + if commaok { + ok = s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr)) + } + return s.newValue2(ssa.OpIMake, dst, itab, data), ok + } + + if base.Debug.TypeAssert > 0 { + base.WarnfAt(pos, "type assertion inlined") + } + + // Converting to a concrete type. + direct := types.IsDirectIface(dst) + itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface + if base.Debug.TypeAssert > 0 { + base.WarnfAt(pos, "type assertion inlined") + } + var wantedFirstWord *ssa.Value + if src.IsEmptyInterface() { + // Looking for pointer to target type. + wantedFirstWord = target + } else { + // Looking for pointer to itab for target type and source interface. + wantedFirstWord = targetItab + } + + var tmp ir.Node // temporary for use with large types + var addr *ssa.Value // address of tmp + if commaok && !ssa.CanSSA(dst) { + // unSSAable type, use temporary. + // TODO: get rid of some of these temporaries. + tmp, addr = s.temp(pos, dst) + } + + cond := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], itab, wantedFirstWord) + b := s.endBlock() + b.Kind = ssa.BlockIf + b.SetControl(cond) + b.Likely = ssa.BranchLikely + + bOk := s.f.NewBlock(ssa.BlockPlain) + bFail := s.f.NewBlock(ssa.BlockPlain) + b.AddEdgeTo(bOk) + b.AddEdgeTo(bFail) + + if !commaok { + // on failure, panic by calling panicdottype + s.startBlock(bFail) + taddr := source + if taddr == nil { + taddr = s.reflectType(src) + } + if src.IsEmptyInterface() { + s.rtcall(ir.Syms.PanicdottypeE, false, nil, itab, target, taddr) + } else { + s.rtcall(ir.Syms.PanicdottypeI, false, nil, itab, target, taddr) + } + + // on success, return data from interface + s.startBlock(bOk) + if direct { + return s.newValue1(ssa.OpIData, dst, iface), nil + } + p := s.newValue1(ssa.OpIData, types.NewPtr(dst), iface) + return s.load(dst, p), nil + } + + // commaok is the more complicated case because we have + // a control flow merge point. + bEnd := s.f.NewBlock(ssa.BlockPlain) + // Note that we need a new valVar each time (unlike okVar where we can + // reuse the variable) because it might have a different type every time. + valVar := ssaMarker("val") + + // type assertion succeeded + s.startBlock(bOk) + if tmp == nil { + if direct { + s.vars[valVar] = s.newValue1(ssa.OpIData, dst, iface) + } else { + p := s.newValue1(ssa.OpIData, types.NewPtr(dst), iface) + s.vars[valVar] = s.load(dst, p) + } + } else { + p := s.newValue1(ssa.OpIData, types.NewPtr(dst), iface) + s.move(dst, addr, p) + } + s.vars[okVar] = s.constBool(true) + s.endBlock() + bOk.AddEdgeTo(bEnd) + + // type assertion failed + s.startBlock(bFail) + if tmp == nil { + s.vars[valVar] = s.zeroVal(dst) + } else { + s.zero(dst, addr) + } + s.vars[okVar] = s.constBool(false) + s.endBlock() + bFail.AddEdgeTo(bEnd) + + // merge point + s.startBlock(bEnd) + if tmp == nil { + res = s.variable(valVar, dst) + delete(s.vars, valVar) // no practical effect, just to indicate typVar is no longer live. + } else { + res = s.load(dst, addr) + } + resok = s.variable(okVar, types.Types[types.TBOOL]) + delete(s.vars, okVar) // ditto + return res, resok +} + +// temp allocates a temp of type t at position pos +func (s *state) temp(pos src.XPos, t *types.Type) (*ir.Name, *ssa.Value) { + tmp := typecheck.TempAt(pos, s.curfn, t) + if t.HasPointers() { + s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem()) + } + addr := s.addr(tmp) + return tmp, addr +} + +// variable returns the value of a variable at the current location. +func (s *state) variable(n ir.Node, t *types.Type) *ssa.Value { + v := s.vars[n] + if v != nil { + return v + } + v = s.fwdVars[n] + if v != nil { + return v + } + + if s.curBlock == s.f.Entry { + // No variable should be live at entry. + s.f.Fatalf("value %v (%v) incorrectly live at entry", n, v) + } + // Make a FwdRef, which records a value that's live on block input. + // We'll find the matching definition as part of insertPhis. + v = s.newValue0A(ssa.OpFwdRef, t, fwdRefAux{N: n}) + s.fwdVars[n] = v + if n.Op() == ir.ONAME { + s.addNamedValue(n.(*ir.Name), v) + } + return v +} + +func (s *state) mem() *ssa.Value { + return s.variable(memVar, types.TypeMem) +} + +func (s *state) addNamedValue(n *ir.Name, v *ssa.Value) { + if n.Class == ir.Pxxx { + // Don't track our marker nodes (memVar etc.). + return + } + if ir.IsAutoTmp(n) { + // Don't track temporary variables. + return + } + if n.Class == ir.PPARAMOUT { + // Don't track named output values. This prevents return values + // from being assigned too early. See #14591 and #14762. TODO: allow this. + return + } + loc := ssa.LocalSlot{N: n, Type: n.Type(), Off: 0} + values, ok := s.f.NamedValues[loc] + if !ok { + s.f.Names = append(s.f.Names, &loc) + s.f.CanonicalLocalSlots[loc] = &loc + } + s.f.NamedValues[loc] = append(values, v) +} + +// Branch is an unresolved branch. +type Branch struct { + P *obj.Prog // branch instruction + B *ssa.Block // target +} + +// State contains state needed during Prog generation. +type State struct { + ABI obj.ABI + + pp *objw.Progs + + // Branches remembers all the branch instructions we've seen + // and where they would like to go. + Branches []Branch + + // JumpTables remembers all the jump tables we've seen. + JumpTables []*ssa.Block + + // bstart remembers where each block starts (indexed by block ID) + bstart []*obj.Prog + + maxarg int64 // largest frame size for arguments to calls made by the function + + // Map from GC safe points to liveness index, generated by + // liveness analysis. + livenessMap liveness.Map + + // partLiveArgs includes arguments that may be partially live, for which we + // need to generate instructions that spill the argument registers. + partLiveArgs map[*ir.Name]bool + + // lineRunStart records the beginning of the current run of instructions + // within a single block sharing the same line number + // Used to move statement marks to the beginning of such runs. + lineRunStart *obj.Prog + + // wasm: The number of values on the WebAssembly stack. This is only used as a safeguard. + OnWasmStackSkipped int +} + +func (s *State) FuncInfo() *obj.FuncInfo { + return s.pp.CurFunc.LSym.Func() +} + +// Prog appends a new Prog. +func (s *State) Prog(as obj.As) *obj.Prog { + p := s.pp.Prog(as) + if objw.LosesStmtMark(as) { + return p + } + // Float a statement start to the beginning of any same-line run. + // lineRunStart is reset at block boundaries, which appears to work well. + if s.lineRunStart == nil || s.lineRunStart.Pos.Line() != p.Pos.Line() { + s.lineRunStart = p + } else if p.Pos.IsStmt() == src.PosIsStmt { + s.lineRunStart.Pos = s.lineRunStart.Pos.WithIsStmt() + p.Pos = p.Pos.WithNotStmt() + } + return p +} + +// Pc returns the current Prog. +func (s *State) Pc() *obj.Prog { + return s.pp.Next +} + +// SetPos sets the current source position. +func (s *State) SetPos(pos src.XPos) { + s.pp.Pos = pos +} + +// Br emits a single branch instruction and returns the instruction. +// Not all architectures need the returned instruction, but otherwise +// the boilerplate is common to all. +func (s *State) Br(op obj.As, target *ssa.Block) *obj.Prog { + p := s.Prog(op) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, Branch{P: p, B: target}) + return p +} + +// DebugFriendlySetPosFrom adjusts Pos.IsStmt subject to heuristics +// that reduce "jumpy" line number churn when debugging. +// Spill/fill/copy instructions from the register allocator, +// phi functions, and instructions with a no-pos position +// are examples of instructions that can cause churn. +func (s *State) DebugFriendlySetPosFrom(v *ssa.Value) { + switch v.Op { + case ssa.OpPhi, ssa.OpCopy, ssa.OpLoadReg, ssa.OpStoreReg: + // These are not statements + s.SetPos(v.Pos.WithNotStmt()) + default: + p := v.Pos + if p != src.NoXPos { + // If the position is defined, update the position. + // Also convert default IsStmt to NotStmt; only + // explicit statement boundaries should appear + // in the generated code. + if p.IsStmt() != src.PosIsStmt { + if s.pp.Pos.IsStmt() == src.PosIsStmt && s.pp.Pos.SameFileAndLine(p) { + // If s.pp.Pos already has a statement mark, then it was set here (below) for + // the previous value. If an actual instruction had been emitted for that + // value, then the statement mark would have been reset. Since the statement + // mark of s.pp.Pos was not reset, this position (file/line) still needs a + // statement mark on an instruction. If file and line for this value are + // the same as the previous value, then the first instruction for this + // value will work to take the statement mark. Return early to avoid + // resetting the statement mark. + // + // The reset of s.pp.Pos occurs in (*Progs).Prog() -- if it emits + // an instruction, and the instruction's statement mark was set, + // and it is not one of the LosesStmtMark instructions, + // then Prog() resets the statement mark on the (*Progs).Pos. + return + } + p = p.WithNotStmt() + // Calls use the pos attached to v, but copy the statement mark from State + } + s.SetPos(p) + } else { + s.SetPos(s.pp.Pos.WithNotStmt()) + } + } +} + +// emit argument info (locations on stack) for traceback. +func emitArgInfo(e *ssafn, f *ssa.Func, pp *objw.Progs) { + ft := e.curfn.Type() + if ft.NumRecvs() == 0 && ft.NumParams() == 0 { + return + } + + x := EmitArgInfo(e.curfn, f.OwnAux.ABIInfo()) + x.Set(obj.AttrContentAddressable, true) + e.curfn.LSym.Func().ArgInfo = x + + // Emit a funcdata pointing at the arg info data. + p := pp.Prog(obj.AFUNCDATA) + p.From.SetConst(rtabi.FUNCDATA_ArgInfo) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = x +} + +// emit argument info (locations on stack) of f for traceback. +func EmitArgInfo(f *ir.Func, abiInfo *abi.ABIParamResultInfo) *obj.LSym { + x := base.Ctxt.Lookup(fmt.Sprintf("%s.arginfo%d", f.LSym.Name, f.ABI)) + // NOTE: do not set ContentAddressable here. This may be referenced from + // assembly code by name (in this case f is a declaration). + // Instead, set it in emitArgInfo above. + + PtrSize := int64(types.PtrSize) + uintptrTyp := types.Types[types.TUINTPTR] + + isAggregate := func(t *types.Type) bool { + return t.IsStruct() || t.IsArray() || t.IsComplex() || t.IsInterface() || t.IsString() || t.IsSlice() + } + + // Populate the data. + // The data is a stream of bytes, which contains the offsets and sizes of the + // non-aggregate arguments or non-aggregate fields/elements of aggregate-typed + // arguments, along with special "operators". Specifically, + // - for each non-aggrgate arg/field/element, its offset from FP (1 byte) and + // size (1 byte) + // - special operators: + // - 0xff - end of sequence + // - 0xfe - print { (at the start of an aggregate-typed argument) + // - 0xfd - print } (at the end of an aggregate-typed argument) + // - 0xfc - print ... (more args/fields/elements) + // - 0xfb - print _ (offset too large) + // These constants need to be in sync with runtime.traceback.go:printArgs. + const ( + _endSeq = 0xff + _startAgg = 0xfe + _endAgg = 0xfd + _dotdotdot = 0xfc + _offsetTooLarge = 0xfb + _special = 0xf0 // above this are operators, below this are ordinary offsets + ) + + const ( + limit = 10 // print no more than 10 args/components + maxDepth = 5 // no more than 5 layers of nesting + + // maxLen is a (conservative) upper bound of the byte stream length. For + // each arg/component, it has no more than 2 bytes of data (size, offset), + // and no more than one {, }, ... at each level (it cannot have both the + // data and ... unless it is the last one, just be conservative). Plus 1 + // for _endSeq. + maxLen = (maxDepth*3+2)*limit + 1 + ) + + wOff := 0 + n := 0 + writebyte := func(o uint8) { wOff = objw.Uint8(x, wOff, o) } + + // Write one non-aggregate arg/field/element. + write1 := func(sz, offset int64) { + if offset >= _special { + writebyte(_offsetTooLarge) + } else { + writebyte(uint8(offset)) + writebyte(uint8(sz)) + } + n++ + } + + // Visit t recursively and write it out. + // Returns whether to continue visiting. + var visitType func(baseOffset int64, t *types.Type, depth int) bool + visitType = func(baseOffset int64, t *types.Type, depth int) bool { + if n >= limit { + writebyte(_dotdotdot) + return false + } + if !isAggregate(t) { + write1(t.Size(), baseOffset) + return true + } + writebyte(_startAgg) + depth++ + if depth >= maxDepth { + writebyte(_dotdotdot) + writebyte(_endAgg) + n++ + return true + } + switch { + case t.IsInterface(), t.IsString(): + _ = visitType(baseOffset, uintptrTyp, depth) && + visitType(baseOffset+PtrSize, uintptrTyp, depth) + case t.IsSlice(): + _ = visitType(baseOffset, uintptrTyp, depth) && + visitType(baseOffset+PtrSize, uintptrTyp, depth) && + visitType(baseOffset+PtrSize*2, uintptrTyp, depth) + case t.IsComplex(): + _ = visitType(baseOffset, types.FloatForComplex(t), depth) && + visitType(baseOffset+t.Size()/2, types.FloatForComplex(t), depth) + case t.IsArray(): + if t.NumElem() == 0 { + n++ // {} counts as a component + break + } + for i := int64(0); i < t.NumElem(); i++ { + if !visitType(baseOffset, t.Elem(), depth) { + break + } + baseOffset += t.Elem().Size() + } + case t.IsStruct(): + if t.NumFields() == 0 { + n++ // {} counts as a component + break + } + for _, field := range t.Fields() { + if !visitType(baseOffset+field.Offset, field.Type, depth) { + break + } + } + } + writebyte(_endAgg) + return true + } + + start := 0 + if strings.Contains(f.LSym.Name, "[") { + // Skip the dictionary argument - it is implicit and the user doesn't need to see it. + start = 1 + } + + for _, a := range abiInfo.InParams()[start:] { + if !visitType(a.FrameOffset(abiInfo), a.Type, 0) { + break + } + } + writebyte(_endSeq) + if wOff > maxLen { + base.Fatalf("ArgInfo too large") + } + + return x +} + +// for wrapper, emit info of wrapped function. +func emitWrappedFuncInfo(e *ssafn, pp *objw.Progs) { + if base.Ctxt.Flag_linkshared { + // Relative reference (SymPtrOff) to another shared object doesn't work. + // Unfortunate. + return + } + + wfn := e.curfn.WrappedFunc + if wfn == nil { + return + } + + wsym := wfn.Linksym() + x := base.Ctxt.LookupInit(fmt.Sprintf("%s.wrapinfo", wsym.Name), func(x *obj.LSym) { + objw.SymPtrOff(x, 0, wsym) + x.Set(obj.AttrContentAddressable, true) + }) + e.curfn.LSym.Func().WrapInfo = x + + // Emit a funcdata pointing at the wrap info data. + p := pp.Prog(obj.AFUNCDATA) + p.From.SetConst(rtabi.FUNCDATA_WrapInfo) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = x +} + +// genssa appends entries to pp for each instruction in f. +func genssa(f *ssa.Func, pp *objw.Progs) { + var s State + s.ABI = f.OwnAux.Fn.ABI() + + e := f.Frontend().(*ssafn) + + s.livenessMap, s.partLiveArgs = liveness.Compute(e.curfn, f, e.stkptrsize, pp) + emitArgInfo(e, f, pp) + argLiveBlockMap, argLiveValueMap := liveness.ArgLiveness(e.curfn, f, pp) + + openDeferInfo := e.curfn.LSym.Func().OpenCodedDeferInfo + if openDeferInfo != nil { + // This function uses open-coded defers -- write out the funcdata + // info that we computed at the end of genssa. + p := pp.Prog(obj.AFUNCDATA) + p.From.SetConst(rtabi.FUNCDATA_OpenCodedDeferInfo) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = openDeferInfo + } + + emitWrappedFuncInfo(e, pp) + + // Remember where each block starts. + s.bstart = make([]*obj.Prog, f.NumBlocks()) + s.pp = pp + var progToValue map[*obj.Prog]*ssa.Value + var progToBlock map[*obj.Prog]*ssa.Block + var valueToProgAfter []*obj.Prog // The first Prog following computation of a value v; v is visible at this point. + gatherPrintInfo := f.PrintOrHtmlSSA || ssa.GenssaDump[f.Name] + if gatherPrintInfo { + progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues()) + progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks()) + f.Logf("genssa %s\n", f.Name) + progToBlock[s.pp.Next] = f.Blocks[0] + } + + if base.Ctxt.Flag_locationlists { + if cap(f.Cache.ValueToProgAfter) < f.NumValues() { + f.Cache.ValueToProgAfter = make([]*obj.Prog, f.NumValues()) + } + valueToProgAfter = f.Cache.ValueToProgAfter[:f.NumValues()] + for i := range valueToProgAfter { + valueToProgAfter[i] = nil + } + } + + // If the very first instruction is not tagged as a statement, + // debuggers may attribute it to previous function in program. + firstPos := src.NoXPos + for _, v := range f.Entry.Values { + if v.Pos.IsStmt() == src.PosIsStmt && v.Op != ssa.OpArg && v.Op != ssa.OpArgIntReg && v.Op != ssa.OpArgFloatReg && v.Op != ssa.OpLoadReg && v.Op != ssa.OpStoreReg { + firstPos = v.Pos + v.Pos = firstPos.WithDefaultStmt() + break + } + } + + // inlMarks has an entry for each Prog that implements an inline mark. + // It maps from that Prog to the global inlining id of the inlined body + // which should unwind to this Prog's location. + var inlMarks map[*obj.Prog]int32 + var inlMarkList []*obj.Prog + + // inlMarksByPos maps from a (column 1) source position to the set of + // Progs that are in the set above and have that source position. + var inlMarksByPos map[src.XPos][]*obj.Prog + + var argLiveIdx int = -1 // argument liveness info index + + // Emit basic blocks + for i, b := range f.Blocks { + s.bstart[b.ID] = s.pp.Next + s.lineRunStart = nil + s.SetPos(s.pp.Pos.WithNotStmt()) // It needs a non-empty Pos, but cannot be a statement boundary (yet). + + if idx, ok := argLiveBlockMap[b.ID]; ok && idx != argLiveIdx { + argLiveIdx = idx + p := s.pp.Prog(obj.APCDATA) + p.From.SetConst(rtabi.PCDATA_ArgLiveIndex) + p.To.SetConst(int64(idx)) + } + + // Emit values in block + Arch.SSAMarkMoves(&s, b) + for _, v := range b.Values { + x := s.pp.Next + s.DebugFriendlySetPosFrom(v) + + if v.Op.ResultInArg0() && v.ResultReg() != v.Args[0].Reg() { + v.Fatalf("input[0] and output not in same register %s", v.LongString()) + } + + switch v.Op { + case ssa.OpInitMem: + // memory arg needs no code + case ssa.OpArg: + // input args need no code + case ssa.OpSP, ssa.OpSB: + // nothing to do + case ssa.OpSelect0, ssa.OpSelect1, ssa.OpSelectN, ssa.OpMakeResult: + // nothing to do + case ssa.OpGetG: + // nothing to do when there's a g register, + // and checkLower complains if there's not + case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive, ssa.OpWBend: + // nothing to do; already used by liveness + case ssa.OpPhi: + CheckLoweredPhi(v) + case ssa.OpConvert: + // nothing to do; no-op conversion for liveness + if v.Args[0].Reg() != v.Reg() { + v.Fatalf("OpConvert should be a no-op: %s; %s", v.Args[0].LongString(), v.LongString()) + } + case ssa.OpInlMark: + p := Arch.Ginsnop(s.pp) + if inlMarks == nil { + inlMarks = map[*obj.Prog]int32{} + inlMarksByPos = map[src.XPos][]*obj.Prog{} + } + inlMarks[p] = v.AuxInt32() + inlMarkList = append(inlMarkList, p) + pos := v.Pos.AtColumn1() + inlMarksByPos[pos] = append(inlMarksByPos[pos], p) + firstPos = src.NoXPos + + default: + // Special case for first line in function; move it to the start (which cannot be a register-valued instruction) + if firstPos != src.NoXPos && v.Op != ssa.OpArgIntReg && v.Op != ssa.OpArgFloatReg && v.Op != ssa.OpLoadReg && v.Op != ssa.OpStoreReg { + s.SetPos(firstPos) + firstPos = src.NoXPos + } + // Attach this safe point to the next + // instruction. + s.pp.NextLive = s.livenessMap.Get(v) + s.pp.NextUnsafe = s.livenessMap.GetUnsafe(v) + + // let the backend handle it + Arch.SSAGenValue(&s, v) + } + + if idx, ok := argLiveValueMap[v.ID]; ok && idx != argLiveIdx { + argLiveIdx = idx + p := s.pp.Prog(obj.APCDATA) + p.From.SetConst(rtabi.PCDATA_ArgLiveIndex) + p.To.SetConst(int64(idx)) + } + + if base.Ctxt.Flag_locationlists { + valueToProgAfter[v.ID] = s.pp.Next + } + + if gatherPrintInfo { + for ; x != s.pp.Next; x = x.Link { + progToValue[x] = v + } + } + } + // If this is an empty infinite loop, stick a hardware NOP in there so that debuggers are less confused. + if s.bstart[b.ID] == s.pp.Next && len(b.Succs) == 1 && b.Succs[0].Block() == b { + p := Arch.Ginsnop(s.pp) + p.Pos = p.Pos.WithIsStmt() + if b.Pos == src.NoXPos { + b.Pos = p.Pos // It needs a file, otherwise a no-file non-zero line causes confusion. See #35652. + if b.Pos == src.NoXPos { + b.Pos = pp.Text.Pos // Sometimes p.Pos is empty. See #35695. + } + } + b.Pos = b.Pos.WithBogusLine() // Debuggers are not good about infinite loops, force a change in line number + } + + // Set unsafe mark for any end-of-block generated instructions + // (normally, conditional or unconditional branches). + // This is particularly important for empty blocks, as there + // are no values to inherit the unsafe mark from. + s.pp.NextUnsafe = s.livenessMap.GetUnsafeBlock(b) + + // Emit control flow instructions for block + var next *ssa.Block + if i < len(f.Blocks)-1 && base.Flag.N == 0 { + // If -N, leave next==nil so every block with successors + // ends in a JMP (except call blocks - plive doesn't like + // select{send,recv} followed by a JMP call). Helps keep + // line numbers for otherwise empty blocks. + next = f.Blocks[i+1] + } + x := s.pp.Next + s.SetPos(b.Pos) + Arch.SSAGenBlock(&s, b, next) + if gatherPrintInfo { + for ; x != s.pp.Next; x = x.Link { + progToBlock[x] = b + } + } + } + if f.Blocks[len(f.Blocks)-1].Kind == ssa.BlockExit { + // We need the return address of a panic call to + // still be inside the function in question. So if + // it ends in a call which doesn't return, add a + // nop (which will never execute) after the call. + Arch.Ginsnop(pp) + } + if openDeferInfo != nil { + // When doing open-coded defers, generate a disconnected call to + // deferreturn and a return. This will be used to during panic + // recovery to unwind the stack and return back to the runtime. + s.pp.NextLive = s.livenessMap.DeferReturn + p := pp.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ir.Syms.Deferreturn + + // Load results into registers. So when a deferred function + // recovers a panic, it will return to caller with right results. + // The results are already in memory, because they are not SSA'd + // when the function has defers (see canSSAName). + for _, o := range f.OwnAux.ABIInfo().OutParams() { + n := o.Name + rts, offs := o.RegisterTypesAndOffsets() + for i := range o.Registers { + Arch.LoadRegResult(&s, f, rts[i], ssa.ObjRegForAbiReg(o.Registers[i], f.Config), n, offs[i]) + } + } + + pp.Prog(obj.ARET) + } + + if inlMarks != nil { + hasCall := false + + // We have some inline marks. Try to find other instructions we're + // going to emit anyway, and use those instructions instead of the + // inline marks. + for p := pp.Text; p != nil; p = p.Link { + if p.As == obj.ANOP || p.As == obj.AFUNCDATA || p.As == obj.APCDATA || p.As == obj.ATEXT || p.As == obj.APCALIGN || Arch.LinkArch.Family == sys.Wasm { + // Don't use 0-sized instructions as inline marks, because we need + // to identify inline mark instructions by pc offset. + // (Some of these instructions are sometimes zero-sized, sometimes not. + // We must not use anything that even might be zero-sized.) + // TODO: are there others? + continue + } + if _, ok := inlMarks[p]; ok { + // Don't use inline marks themselves. We don't know + // whether they will be zero-sized or not yet. + continue + } + if p.As == obj.ACALL || p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO { + hasCall = true + } + pos := p.Pos.AtColumn1() + s := inlMarksByPos[pos] + if len(s) == 0 { + continue + } + for _, m := range s { + // We found an instruction with the same source position as + // some of the inline marks. + // Use this instruction instead. + p.Pos = p.Pos.WithIsStmt() // promote position to a statement + pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[m]) + // Make the inline mark a real nop, so it doesn't generate any code. + m.As = obj.ANOP + m.Pos = src.NoXPos + m.From = obj.Addr{} + m.To = obj.Addr{} + } + delete(inlMarksByPos, pos) + } + // Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction). + for _, p := range inlMarkList { + if p.As != obj.ANOP { + pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[p]) + } + } + + if e.stksize == 0 && !hasCall { + // Frameless leaf function. It doesn't need any preamble, + // so make sure its first instruction isn't from an inlined callee. + // If it is, add a nop at the start of the function with a position + // equal to the start of the function. + // This ensures that runtime.FuncForPC(uintptr(reflect.ValueOf(fn).Pointer())).Name() + // returns the right answer. See issue 58300. + for p := pp.Text; p != nil; p = p.Link { + if p.As == obj.AFUNCDATA || p.As == obj.APCDATA || p.As == obj.ATEXT || p.As == obj.ANOP { + continue + } + if base.Ctxt.PosTable.Pos(p.Pos).Base().InliningIndex() >= 0 { + // Make a real (not 0-sized) nop. + nop := Arch.Ginsnop(pp) + nop.Pos = e.curfn.Pos().WithIsStmt() + + // Unfortunately, Ginsnop puts the instruction at the + // end of the list. Move it up to just before p. + + // Unlink from the current list. + for x := pp.Text; x != nil; x = x.Link { + if x.Link == nop { + x.Link = nop.Link + break + } + } + // Splice in right before p. + for x := pp.Text; x != nil; x = x.Link { + if x.Link == p { + nop.Link = p + x.Link = nop + break + } + } + } + break + } + } + } + + if base.Ctxt.Flag_locationlists { + var debugInfo *ssa.FuncDebug + debugInfo = e.curfn.DebugInfo.(*ssa.FuncDebug) + if e.curfn.ABI == obj.ABIInternal && base.Flag.N != 0 { + ssa.BuildFuncDebugNoOptimized(base.Ctxt, f, base.Debug.LocationLists > 1, StackOffset, debugInfo) + } else { + ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists, StackOffset, debugInfo) + } + bstart := s.bstart + idToIdx := make([]int, f.NumBlocks()) + for i, b := range f.Blocks { + idToIdx[b.ID] = i + } + // Register a callback that will be used later to fill in PCs into location + // lists. At the moment, Prog.Pc is a sequence number; it's not a real PC + // until after assembly, so the translation needs to be deferred. + debugInfo.GetPC = func(b, v ssa.ID) int64 { + switch v { + case ssa.BlockStart.ID: + if b == f.Entry.ID { + return 0 // Start at the very beginning, at the assembler-generated prologue. + // this should only happen for function args (ssa.OpArg) + } + return bstart[b].Pc + case ssa.BlockEnd.ID: + blk := f.Blocks[idToIdx[b]] + nv := len(blk.Values) + return valueToProgAfter[blk.Values[nv-1].ID].Pc + case ssa.FuncEnd.ID: + return e.curfn.LSym.Size + default: + return valueToProgAfter[v].Pc + } + } + } + + // Resolve branches, and relax DefaultStmt into NotStmt + for _, br := range s.Branches { + br.P.To.SetTarget(s.bstart[br.B.ID]) + if br.P.Pos.IsStmt() != src.PosIsStmt { + br.P.Pos = br.P.Pos.WithNotStmt() + } else if v0 := br.B.FirstPossibleStmtValue(); v0 != nil && v0.Pos.Line() == br.P.Pos.Line() && v0.Pos.IsStmt() == src.PosIsStmt { + br.P.Pos = br.P.Pos.WithNotStmt() + } + + } + + // Resolve jump table destinations. + for _, jt := range s.JumpTables { + // Convert from *Block targets to *Prog targets. + targets := make([]*obj.Prog, len(jt.Succs)) + for i, e := range jt.Succs { + targets[i] = s.bstart[e.Block().ID] + } + // Add to list of jump tables to be resolved at assembly time. + // The assembler converts from *Prog entries to absolute addresses + // once it knows instruction byte offsets. + fi := pp.CurFunc.LSym.Func() + fi.JumpTables = append(fi.JumpTables, obj.JumpTable{Sym: jt.Aux.(*obj.LSym), Targets: targets}) + } + + if e.log { // spew to stdout + filename := "" + for p := pp.Text; p != nil; p = p.Link { + if p.Pos.IsKnown() && p.InnermostFilename() != filename { + filename = p.InnermostFilename() + f.Logf("# %s\n", filename) + } + + var s string + if v, ok := progToValue[p]; ok { + s = v.String() + } else if b, ok := progToBlock[p]; ok { + s = b.String() + } else { + s = " " // most value and branch strings are 2-3 characters long + } + f.Logf(" %-6s\t%.5d (%s)\t%s\n", s, p.Pc, p.InnermostLineNumber(), p.InstructionString()) + } + } + if f.HTMLWriter != nil { // spew to ssa.html + var buf strings.Builder + buf.WriteString("") + buf.WriteString("
    ") + filename := "" + for p := pp.Text; p != nil; p = p.Link { + // Don't spam every line with the file name, which is often huge. + // Only print changes, and "unknown" is not a change. + if p.Pos.IsKnown() && p.InnermostFilename() != filename { + filename = p.InnermostFilename() + buf.WriteString("
    ") + buf.WriteString(html.EscapeString("# " + filename)) + buf.WriteString("
    ") + } + + buf.WriteString("
    ") + if v, ok := progToValue[p]; ok { + buf.WriteString(v.HTML()) + } else if b, ok := progToBlock[p]; ok { + buf.WriteString("" + b.HTML() + "") + } + buf.WriteString("
    ") + buf.WriteString("
    ") + fmt.Fprintf(&buf, "%.5d (%s) %s", p.Pc, p.InnermostLineNumber(), p.InnermostLineNumberHTML(), html.EscapeString(p.InstructionString())) + buf.WriteString("
    ") + } + buf.WriteString("
    ") + buf.WriteString("
    ") + f.HTMLWriter.WriteColumn("genssa", "genssa", "ssa-prog", buf.String()) + } + if ssa.GenssaDump[f.Name] { + fi := f.DumpFileForPhase("genssa") + if fi != nil { + + // inliningDiffers if any filename changes or if any line number except the innermost (last index) changes. + inliningDiffers := func(a, b []src.Pos) bool { + if len(a) != len(b) { + return true + } + for i := range a { + if a[i].Filename() != b[i].Filename() { + return true + } + if i != len(a)-1 && a[i].Line() != b[i].Line() { + return true + } + } + return false + } + + var allPosOld []src.Pos + var allPos []src.Pos + + for p := pp.Text; p != nil; p = p.Link { + if p.Pos.IsKnown() { + allPos = allPos[:0] + p.Ctxt.AllPos(p.Pos, func(pos src.Pos) { allPos = append(allPos, pos) }) + if inliningDiffers(allPos, allPosOld) { + for _, pos := range allPos { + fmt.Fprintf(fi, "# %s:%d\n", pos.Filename(), pos.Line()) + } + allPos, allPosOld = allPosOld, allPos // swap, not copy, so that they do not share slice storage. + } + } + + var s string + if v, ok := progToValue[p]; ok { + s = v.String() + } else if b, ok := progToBlock[p]; ok { + s = b.String() + } else { + s = " " // most value and branch strings are 2-3 characters long + } + fmt.Fprintf(fi, " %-6s\t%.5d %s\t%s\n", s, p.Pc, ssa.StmtString(p.Pos), p.InstructionString()) + } + fi.Close() + } + } + + defframe(&s, e, f) + + f.HTMLWriter.Close() + f.HTMLWriter = nil +} + +func defframe(s *State, e *ssafn, f *ssa.Func) { + pp := s.pp + + s.maxarg = types.RoundUp(s.maxarg, e.stkalign) + frame := s.maxarg + e.stksize + if Arch.PadFrame != nil { + frame = Arch.PadFrame(frame) + } + + // Fill in argument and frame size. + pp.Text.To.Type = obj.TYPE_TEXTSIZE + pp.Text.To.Val = int32(types.RoundUp(f.OwnAux.ArgWidth(), int64(types.RegSize))) + pp.Text.To.Offset = frame + + p := pp.Text + + // Insert code to spill argument registers if the named slot may be partially + // live. That is, the named slot is considered live by liveness analysis, + // (because a part of it is live), but we may not spill all parts into the + // slot. This can only happen with aggregate-typed arguments that are SSA-able + // and not address-taken (for non-SSA-able or address-taken arguments we always + // spill upfront). + // Note: spilling is unnecessary in the -N/no-optimize case, since all values + // will be considered non-SSAable and spilled up front. + // TODO(register args) Make liveness more fine-grained to that partial spilling is okay. + if f.OwnAux.ABIInfo().InRegistersUsed() != 0 && base.Flag.N == 0 { + // First, see if it is already spilled before it may be live. Look for a spill + // in the entry block up to the first safepoint. + type nameOff struct { + n *ir.Name + off int64 + } + partLiveArgsSpilled := make(map[nameOff]bool) + for _, v := range f.Entry.Values { + if v.Op.IsCall() { + break + } + if v.Op != ssa.OpStoreReg || v.Args[0].Op != ssa.OpArgIntReg { + continue + } + n, off := ssa.AutoVar(v) + if n.Class != ir.PPARAM || n.Addrtaken() || !ssa.CanSSA(n.Type()) || !s.partLiveArgs[n] { + continue + } + partLiveArgsSpilled[nameOff{n, off}] = true + } + + // Then, insert code to spill registers if not already. + for _, a := range f.OwnAux.ABIInfo().InParams() { + n := a.Name + if n == nil || n.Addrtaken() || !ssa.CanSSA(n.Type()) || !s.partLiveArgs[n] || len(a.Registers) <= 1 { + continue + } + rts, offs := a.RegisterTypesAndOffsets() + for i := range a.Registers { + if !rts[i].HasPointers() { + continue + } + if partLiveArgsSpilled[nameOff{n, offs[i]}] { + continue // already spilled + } + reg := ssa.ObjRegForAbiReg(a.Registers[i], f.Config) + p = Arch.SpillArgReg(pp, p, f, rts[i], reg, n, offs[i]) + } + } + } + + // Insert code to zero ambiguously live variables so that the + // garbage collector only sees initialized values when it + // looks for pointers. + var lo, hi int64 + + // Opaque state for backend to use. Current backends use it to + // keep track of which helper registers have been zeroed. + var state uint32 + + // Iterate through declarations. Autos are sorted in decreasing + // frame offset order. + for _, n := range e.curfn.Dcl { + if !n.Needzero() { + continue + } + if n.Class != ir.PAUTO { + e.Fatalf(n.Pos(), "needzero class %d", n.Class) + } + if n.Type().Size()%int64(types.PtrSize) != 0 || n.FrameOffset()%int64(types.PtrSize) != 0 || n.Type().Size() == 0 { + e.Fatalf(n.Pos(), "var %L has size %d offset %d", n, n.Type().Size(), n.Offset_) + } + + if lo != hi && n.FrameOffset()+n.Type().Size() >= lo-int64(2*types.RegSize) { + // Merge with range we already have. + lo = n.FrameOffset() + continue + } + + // Zero old range + p = Arch.ZeroRange(pp, p, frame+lo, hi-lo, &state) + + // Set new range. + lo = n.FrameOffset() + hi = lo + n.Type().Size() + } + + // Zero final range. + Arch.ZeroRange(pp, p, frame+lo, hi-lo, &state) +} + +// For generating consecutive jump instructions to model a specific branching +type IndexJump struct { + Jump obj.As + Index int +} + +func (s *State) oneJump(b *ssa.Block, jump *IndexJump) { + p := s.Br(jump.Jump, b.Succs[jump.Index].Block()) + p.Pos = b.Pos +} + +// CombJump generates combinational instructions (2 at present) for a block jump, +// thereby the behaviour of non-standard condition codes could be simulated +func (s *State) CombJump(b, next *ssa.Block, jumps *[2][2]IndexJump) { + switch next { + case b.Succs[0].Block(): + s.oneJump(b, &jumps[0][0]) + s.oneJump(b, &jumps[0][1]) + case b.Succs[1].Block(): + s.oneJump(b, &jumps[1][0]) + s.oneJump(b, &jumps[1][1]) + default: + var q *obj.Prog + if b.Likely != ssa.BranchUnlikely { + s.oneJump(b, &jumps[1][0]) + s.oneJump(b, &jumps[1][1]) + q = s.Br(obj.AJMP, b.Succs[1].Block()) + } else { + s.oneJump(b, &jumps[0][0]) + s.oneJump(b, &jumps[0][1]) + q = s.Br(obj.AJMP, b.Succs[0].Block()) + } + q.Pos = b.Pos + } +} + +// AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a. +func AddAux(a *obj.Addr, v *ssa.Value) { + AddAux2(a, v, v.AuxInt) +} +func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) { + if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR { + v.Fatalf("bad AddAux addr %v", a) + } + // add integer offset + a.Offset += offset + + // If no additional symbol offset, we're done. + if v.Aux == nil { + return + } + // Add symbol's offset from its base register. + switch n := v.Aux.(type) { + case *ssa.AuxCall: + a.Name = obj.NAME_EXTERN + a.Sym = n.Fn + case *obj.LSym: + a.Name = obj.NAME_EXTERN + a.Sym = n + case *ir.Name: + if n.Class == ir.PPARAM || (n.Class == ir.PPARAMOUT && !n.IsOutputParamInRegisters()) { + a.Name = obj.NAME_PARAM + } else { + a.Name = obj.NAME_AUTO + } + a.Sym = n.Linksym() + a.Offset += n.FrameOffset() + default: + v.Fatalf("aux in %s not implemented %#v", v, v.Aux) + } +} + +// extendIndex extends v to a full int width. +// panic with the given kind if v does not fit in an int (only on 32-bit archs). +func (s *state) extendIndex(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value { + size := idx.Type.Size() + if size == s.config.PtrSize { + return idx + } + if size > s.config.PtrSize { + // truncate 64-bit indexes on 32-bit pointer archs. Test the + // high word and branch to out-of-bounds failure if it is not 0. + var lo *ssa.Value + if idx.Type.IsSigned() { + lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TINT], idx) + } else { + lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TUINT], idx) + } + if bounded || base.Flag.B != 0 { + return lo + } + bNext := s.f.NewBlock(ssa.BlockPlain) + bPanic := s.f.NewBlock(ssa.BlockExit) + hi := s.newValue1(ssa.OpInt64Hi, types.Types[types.TUINT32], idx) + cmp := s.newValue2(ssa.OpEq32, types.Types[types.TBOOL], hi, s.constInt32(types.Types[types.TUINT32], 0)) + if !idx.Type.IsSigned() { + switch kind { + case ssa.BoundsIndex: + kind = ssa.BoundsIndexU + case ssa.BoundsSliceAlen: + kind = ssa.BoundsSliceAlenU + case ssa.BoundsSliceAcap: + kind = ssa.BoundsSliceAcapU + case ssa.BoundsSliceB: + kind = ssa.BoundsSliceBU + case ssa.BoundsSlice3Alen: + kind = ssa.BoundsSlice3AlenU + case ssa.BoundsSlice3Acap: + kind = ssa.BoundsSlice3AcapU + case ssa.BoundsSlice3B: + kind = ssa.BoundsSlice3BU + case ssa.BoundsSlice3C: + kind = ssa.BoundsSlice3CU + } + } + b := s.endBlock() + b.Kind = ssa.BlockIf + b.SetControl(cmp) + b.Likely = ssa.BranchLikely + b.AddEdgeTo(bNext) + b.AddEdgeTo(bPanic) + + s.startBlock(bPanic) + mem := s.newValue4I(ssa.OpPanicExtend, types.TypeMem, int64(kind), hi, lo, len, s.mem()) + s.endBlock().SetControl(mem) + s.startBlock(bNext) + + return lo + } + + // Extend value to the required size + var op ssa.Op + if idx.Type.IsSigned() { + switch 10*size + s.config.PtrSize { + case 14: + op = ssa.OpSignExt8to32 + case 18: + op = ssa.OpSignExt8to64 + case 24: + op = ssa.OpSignExt16to32 + case 28: + op = ssa.OpSignExt16to64 + case 48: + op = ssa.OpSignExt32to64 + default: + s.Fatalf("bad signed index extension %s", idx.Type) + } + } else { + switch 10*size + s.config.PtrSize { + case 14: + op = ssa.OpZeroExt8to32 + case 18: + op = ssa.OpZeroExt8to64 + case 24: + op = ssa.OpZeroExt16to32 + case 28: + op = ssa.OpZeroExt16to64 + case 48: + op = ssa.OpZeroExt32to64 + default: + s.Fatalf("bad unsigned index extension %s", idx.Type) + } + } + return s.newValue1(op, types.Types[types.TINT], idx) +} + +// CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values. +// Called during ssaGenValue. +func CheckLoweredPhi(v *ssa.Value) { + if v.Op != ssa.OpPhi { + v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString()) + } + if v.Type.IsMemory() { + return + } + f := v.Block.Func + loc := f.RegAlloc[v.ID] + for _, a := range v.Args { + if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead? + v.Fatalf("phi arg at different location than phi: %v @ %s, but arg %v @ %s\n%s\n", v, loc, a, aloc, v.Block.Func) + } + } +} + +// CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block, +// except for incoming in-register arguments. +// The output of LoweredGetClosurePtr is generally hardwired to the correct register. +// That register contains the closure pointer on closure entry. +func CheckLoweredGetClosurePtr(v *ssa.Value) { + entry := v.Block.Func.Entry + if entry != v.Block { + base.Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v) + } + for _, w := range entry.Values { + if w == v { + break + } + switch w.Op { + case ssa.OpArgIntReg, ssa.OpArgFloatReg: + // okay + default: + base.Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v) + } + } +} + +// CheckArgReg ensures that v is in the function's entry block. +func CheckArgReg(v *ssa.Value) { + entry := v.Block.Func.Entry + if entry != v.Block { + base.Fatalf("in %s, badly placed ArgIReg or ArgFReg: %v %v", v.Block.Func.Name, v.Block, v) + } +} + +func AddrAuto(a *obj.Addr, v *ssa.Value) { + n, off := ssa.AutoVar(v) + a.Type = obj.TYPE_MEM + a.Sym = n.Linksym() + a.Reg = int16(Arch.REGSP) + a.Offset = n.FrameOffset() + off + if n.Class == ir.PPARAM || (n.Class == ir.PPARAMOUT && !n.IsOutputParamInRegisters()) { + a.Name = obj.NAME_PARAM + } else { + a.Name = obj.NAME_AUTO + } +} + +// Call returns a new CALL instruction for the SSA value v. +// It uses PrepareCall to prepare the call. +func (s *State) Call(v *ssa.Value) *obj.Prog { + pPosIsStmt := s.pp.Pos.IsStmt() // The statement-ness fo the call comes from ssaGenState + s.PrepareCall(v) + + p := s.Prog(obj.ACALL) + if pPosIsStmt == src.PosIsStmt { + p.Pos = v.Pos.WithIsStmt() + } else { + p.Pos = v.Pos.WithNotStmt() + } + if sym, ok := v.Aux.(*ssa.AuxCall); ok && sym.Fn != nil { + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = sym.Fn + } else { + // TODO(mdempsky): Can these differences be eliminated? + switch Arch.LinkArch.Family { + case sys.AMD64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm: + p.To.Type = obj.TYPE_REG + case sys.ARM, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64: + p.To.Type = obj.TYPE_MEM + default: + base.Fatalf("unknown indirect call family") + } + p.To.Reg = v.Args[0].Reg() + } + return p +} + +// TailCall returns a new tail call instruction for the SSA value v. +// It is like Call, but for a tail call. +func (s *State) TailCall(v *ssa.Value) *obj.Prog { + p := s.Call(v) + p.As = obj.ARET + return p +} + +// PrepareCall prepares to emit a CALL instruction for v and does call-related bookkeeping. +// It must be called immediately before emitting the actual CALL instruction, +// since it emits PCDATA for the stack map at the call (calls are safe points). +func (s *State) PrepareCall(v *ssa.Value) { + idx := s.livenessMap.Get(v) + if !idx.StackMapValid() { + // See Liveness.hasStackMap. + if sym, ok := v.Aux.(*ssa.AuxCall); !ok || !(sym.Fn == ir.Syms.WBZero || sym.Fn == ir.Syms.WBMove) { + base.Fatalf("missing stack map index for %v", v.LongString()) + } + } + + call, ok := v.Aux.(*ssa.AuxCall) + + if ok { + // Record call graph information for nowritebarrierrec + // analysis. + if nowritebarrierrecCheck != nil { + nowritebarrierrecCheck.recordCall(s.pp.CurFunc, call.Fn, v.Pos) + } + } + + if s.maxarg < v.AuxInt { + s.maxarg = v.AuxInt + } +} + +// UseArgs records the fact that an instruction needs a certain amount of +// callee args space for its use. +func (s *State) UseArgs(n int64) { + if s.maxarg < n { + s.maxarg = n + } +} + +// fieldIdx finds the index of the field referred to by the ODOT node n. +func fieldIdx(n *ir.SelectorExpr) int { + t := n.X.Type() + if !t.IsStruct() { + panic("ODOT's LHS is not a struct") + } + + for i, f := range t.Fields() { + if f.Sym == n.Sel { + if f.Offset != n.Offset() { + panic("field offset doesn't match") + } + return i + } + } + panic(fmt.Sprintf("can't find field in expr %v\n", n)) + + // TODO: keep the result of this function somewhere in the ODOT Node + // so we don't have to recompute it each time we need it. +} + +// ssafn holds frontend information about a function that the backend is processing. +// It also exports a bunch of compiler services for the ssa backend. +type ssafn struct { + curfn *ir.Func + strings map[string]*obj.LSym // map from constant string to data symbols + stksize int64 // stack size for current frame + stkptrsize int64 // prefix of stack containing pointers + + // alignment for current frame. + // NOTE: when stkalign > PtrSize, currently this only ensures the offsets of + // objects in the stack frame are aligned. The stack pointer is still aligned + // only PtrSize. + stkalign int64 + + log bool // print ssa debug to the stdout +} + +// StringData returns a symbol which +// is the data component of a global string constant containing s. +func (e *ssafn) StringData(s string) *obj.LSym { + if aux, ok := e.strings[s]; ok { + return aux + } + if e.strings == nil { + e.strings = make(map[string]*obj.LSym) + } + data := staticdata.StringSym(e.curfn.Pos(), s) + e.strings[s] = data + return data +} + +// SplitSlot returns a slot representing the data of parent starting at offset. +func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot { + node := parent.N + + if node.Class != ir.PAUTO || node.Addrtaken() { + // addressed things and non-autos retain their parents (i.e., cannot truly be split) + return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset} + } + + sym := &types.Sym{Name: node.Sym().Name + suffix, Pkg: types.LocalPkg} + n := e.curfn.NewLocal(parent.N.Pos(), sym, t) + n.SetUsed(true) + n.SetEsc(ir.EscNever) + types.CalcSize(t) + return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset} +} + +// Logf logs a message from the compiler. +func (e *ssafn) Logf(msg string, args ...interface{}) { + if e.log { + fmt.Printf(msg, args...) + } +} + +func (e *ssafn) Log() bool { + return e.log +} + +// Fatalf reports a compiler error and exits. +func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) { + base.Pos = pos + nargs := append([]interface{}{ir.FuncName(e.curfn)}, args...) + base.Fatalf("'%s': "+msg, nargs...) +} + +// Warnl reports a "warning", which is usually flag-triggered +// logging output for the benefit of tests. +func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) { + base.WarnfAt(pos, fmt_, args...) +} + +func (e *ssafn) Debug_checknil() bool { + return base.Debug.Nil != 0 +} + +func (e *ssafn) UseWriteBarrier() bool { + return base.Flag.WB +} + +func (e *ssafn) Syslook(name string) *obj.LSym { + switch name { + case "goschedguarded": + return ir.Syms.Goschedguarded + case "writeBarrier": + return ir.Syms.WriteBarrier + case "wbZero": + return ir.Syms.WBZero + case "wbMove": + return ir.Syms.WBMove + case "cgoCheckMemmove": + return ir.Syms.CgoCheckMemmove + case "cgoCheckPtrWrite": + return ir.Syms.CgoCheckPtrWrite + } + e.Fatalf(src.NoXPos, "unknown Syslook func %v", name) + return nil +} + +func (e *ssafn) Func() *ir.Func { + return e.curfn +} + +func clobberBase(n ir.Node) ir.Node { + if n.Op() == ir.ODOT { + n := n.(*ir.SelectorExpr) + if n.X.Type().NumFields() == 1 { + return clobberBase(n.X) + } + } + if n.Op() == ir.OINDEX { + n := n.(*ir.IndexExpr) + if n.X.Type().IsArray() && n.X.Type().NumElem() == 1 { + return clobberBase(n.X) + } + } + return n +} + +// callTargetLSym returns the correct LSym to call 'callee' using its ABI. +func callTargetLSym(callee *ir.Name) *obj.LSym { + if callee.Func == nil { + // TODO(austin): This happens in case of interface method I.M from imported package. + // It's ABIInternal, and would be better if callee.Func was never nil and we didn't + // need this case. + return callee.Linksym() + } + + return callee.LinksymABI(callee.Func.ABI) +} + +func min8(a, b int8) int8 { + if a < b { + return a + } + return b +} + +func max8(a, b int8) int8 { + if a > b { + return a + } + return b +} + +// deferStructFnField is the field index of _defer.fn. +const deferStructFnField = 4 + +var deferType *types.Type + +// deferstruct returns a type interchangeable with runtime._defer. +// Make sure this stays in sync with runtime/runtime2.go:_defer. +func deferstruct() *types.Type { + if deferType != nil { + return deferType + } + + makefield := func(name string, t *types.Type) *types.Field { + sym := (*types.Pkg)(nil).Lookup(name) + return types.NewField(src.NoXPos, sym, t) + } + + fields := []*types.Field{ + makefield("heap", types.Types[types.TBOOL]), + makefield("rangefunc", types.Types[types.TBOOL]), + makefield("sp", types.Types[types.TUINTPTR]), + makefield("pc", types.Types[types.TUINTPTR]), + // Note: the types here don't really matter. Defer structures + // are always scanned explicitly during stack copying and GC, + // so we make them uintptr type even though they are real pointers. + makefield("fn", types.Types[types.TUINTPTR]), + makefield("link", types.Types[types.TUINTPTR]), + makefield("head", types.Types[types.TUINTPTR]), + } + if name := fields[deferStructFnField].Sym.Name; name != "fn" { + base.Fatalf("deferStructFnField is %q, not fn", name) + } + + n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("_defer")) + typ := types.NewNamed(n) + n.SetType(typ) + n.SetTypecheck(1) + + // build struct holding the above fields + typ.SetUnderlying(types.NewStruct(fields)) + types.CalcStructSize(typ) + + deferType = typ + return typ +} + +// SpillSlotAddr uses LocalSlot information to initialize an obj.Addr +// The resulting addr is used in a non-standard context -- in the prologue +// of a function, before the frame has been constructed, so the standard +// addressing for the parameters will be wrong. +func SpillSlotAddr(spill ssa.Spill, baseReg int16, extraOffset int64) obj.Addr { + return obj.Addr{ + Name: obj.NAME_NONE, + Type: obj.TYPE_MEM, + Reg: baseReg, + Offset: spill.Offset + extraOffset, + } +} + +var ( + BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym + ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym +) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/staticdata/data.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/staticdata/data.go new file mode 100644 index 0000000000000000000000000000000000000000..78c332eeb893dcc1a437310577b4c42004d3e476 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/staticdata/data.go @@ -0,0 +1,346 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package staticdata + +import ( + "encoding/base64" + "fmt" + "go/constant" + "io" + "os" + "sort" + "strconv" + "sync" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/objw" + "cmd/compile/internal/types" + "cmd/internal/notsha256" + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/internal/src" +) + +// InitAddrOffset writes the static name symbol lsym to n, it does not modify n. +// It's the caller responsibility to make sure lsym is from ONAME/PEXTERN node. +func InitAddrOffset(n *ir.Name, noff int64, lsym *obj.LSym, off int64) { + if n.Op() != ir.ONAME { + base.Fatalf("InitAddr n op %v", n.Op()) + } + if n.Sym() == nil { + base.Fatalf("InitAddr nil n sym") + } + s := n.Linksym() + s.WriteAddr(base.Ctxt, noff, types.PtrSize, lsym, off) +} + +// InitAddr is InitAddrOffset, with offset fixed to 0. +func InitAddr(n *ir.Name, noff int64, lsym *obj.LSym) { + InitAddrOffset(n, noff, lsym, 0) +} + +// InitSlice writes a static slice symbol {lsym, lencap, lencap} to n+noff, it does not modify n. +// It's the caller responsibility to make sure lsym is from ONAME node. +func InitSlice(n *ir.Name, noff int64, lsym *obj.LSym, lencap int64) { + s := n.Linksym() + s.WriteAddr(base.Ctxt, noff, types.PtrSize, lsym, 0) + s.WriteInt(base.Ctxt, noff+types.SliceLenOffset, types.PtrSize, lencap) + s.WriteInt(base.Ctxt, noff+types.SliceCapOffset, types.PtrSize, lencap) +} + +func InitSliceBytes(nam *ir.Name, off int64, s string) { + if nam.Op() != ir.ONAME { + base.Fatalf("InitSliceBytes %v", nam) + } + InitSlice(nam, off, slicedata(nam.Pos(), s), int64(len(s))) +} + +const ( + stringSymPrefix = "go:string." + stringSymPattern = ".gostring.%d.%s" +) + +// shortHashString converts the hash to a string for use with stringSymPattern. +// We cut it to 16 bytes and then base64-encode to make it even smaller. +func shortHashString(hash []byte) string { + return base64.StdEncoding.EncodeToString(hash[:16]) +} + +// StringSym returns a symbol containing the string s. +// The symbol contains the string data, not a string header. +func StringSym(pos src.XPos, s string) (data *obj.LSym) { + var symname string + if len(s) > 100 { + // Huge strings are hashed to avoid long names in object files. + // Indulge in some paranoia by writing the length of s, too, + // as protection against length extension attacks. + // Same pattern is known to fileStringSym below. + h := notsha256.New() + io.WriteString(h, s) + symname = fmt.Sprintf(stringSymPattern, len(s), shortHashString(h.Sum(nil))) + } else { + // Small strings get named directly by their contents. + symname = strconv.Quote(s) + } + + symdata := base.Ctxt.Lookup(stringSymPrefix + symname) + if !symdata.OnList() { + off := dstringdata(symdata, 0, s, pos, "string") + objw.Global(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL) + symdata.Set(obj.AttrContentAddressable, true) + } + + return symdata +} + +// StringSymNoCommon is like StringSym, but produces a symbol that is not content- +// addressable. This symbol is not supposed to appear in the final binary, it is +// only used to pass string arguments to the linker like R_USENAMEDMETHOD does. +func StringSymNoCommon(s string) (data *obj.LSym) { + var nameSym obj.LSym + nameSym.WriteString(base.Ctxt, 0, len(s), s) + objw.Global(&nameSym, int32(len(s)), obj.RODATA) + return &nameSym +} + +// maxFileSize is the maximum file size permitted by the linker +// (see issue #9862). +const maxFileSize = int64(2e9) + +// fileStringSym returns a symbol for the contents and the size of file. +// If readonly is true, the symbol shares storage with any literal string +// or other file with the same content and is placed in a read-only section. +// If readonly is false, the symbol is a read-write copy separate from any other, +// for use as the backing store of a []byte. +// The content hash of file is copied into hash. (If hash is nil, nothing is copied.) +// The returned symbol contains the data itself, not a string header. +func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.LSym, int64, error) { + f, err := os.Open(file) + if err != nil { + return nil, 0, err + } + defer f.Close() + info, err := f.Stat() + if err != nil { + return nil, 0, err + } + if !info.Mode().IsRegular() { + return nil, 0, fmt.Errorf("not a regular file") + } + size := info.Size() + if size <= 1*1024 { + data, err := io.ReadAll(f) + if err != nil { + return nil, 0, err + } + if int64(len(data)) != size { + return nil, 0, fmt.Errorf("file changed between reads") + } + var sym *obj.LSym + if readonly { + sym = StringSym(pos, string(data)) + } else { + sym = slicedata(pos, string(data)) + } + if len(hash) > 0 { + sum := notsha256.Sum256(data) + copy(hash, sum[:]) + } + return sym, size, nil + } + if size > maxFileSize { + // ggloblsym takes an int32, + // and probably the rest of the toolchain + // can't handle such big symbols either. + // See golang.org/issue/9862. + return nil, 0, fmt.Errorf("file too large (%d bytes > %d bytes)", size, maxFileSize) + } + + // File is too big to read and keep in memory. + // Compute hash if needed for read-only content hashing or if the caller wants it. + var sum []byte + if readonly || len(hash) > 0 { + h := notsha256.New() + n, err := io.Copy(h, f) + if err != nil { + return nil, 0, err + } + if n != size { + return nil, 0, fmt.Errorf("file changed between reads") + } + sum = h.Sum(nil) + copy(hash, sum) + } + + var symdata *obj.LSym + if readonly { + symname := fmt.Sprintf(stringSymPattern, size, shortHashString(sum)) + symdata = base.Ctxt.Lookup(stringSymPrefix + symname) + if !symdata.OnList() { + info := symdata.NewFileInfo() + info.Name = file + info.Size = size + objw.Global(symdata, int32(size), obj.DUPOK|obj.RODATA|obj.LOCAL) + // Note: AttrContentAddressable cannot be set here, + // because the content-addressable-handling code + // does not know about file symbols. + } + } else { + // Emit a zero-length data symbol + // and then fix up length and content to use file. + symdata = slicedata(pos, "") + symdata.Size = size + symdata.Type = objabi.SNOPTRDATA + info := symdata.NewFileInfo() + info.Name = file + info.Size = size + } + + return symdata, size, nil +} + +var slicedataGen int + +func slicedata(pos src.XPos, s string) *obj.LSym { + slicedataGen++ + symname := fmt.Sprintf(".gobytes.%d", slicedataGen) + lsym := types.LocalPkg.Lookup(symname).LinksymABI(obj.ABI0) + off := dstringdata(lsym, 0, s, pos, "slice") + objw.Global(lsym, int32(off), obj.NOPTR|obj.LOCAL) + + return lsym +} + +func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int { + // Objects that are too large will cause the data section to overflow right away, + // causing a cryptic error message by the linker. Check for oversize objects here + // and provide a useful error message instead. + if int64(len(t)) > 2e9 { + base.ErrorfAt(pos, 0, "%v with length %v is too big", what, len(t)) + return 0 + } + + s.WriteString(base.Ctxt, int64(off), len(t), t) + return off + len(t) +} + +var ( + funcsymsmu sync.Mutex // protects funcsyms and associated package lookups (see func funcsym) + funcsyms []*ir.Name // functions that need function value symbols +) + +// FuncLinksym returns n·f, the function value symbol for n. +func FuncLinksym(n *ir.Name) *obj.LSym { + if n.Op() != ir.ONAME || n.Class != ir.PFUNC { + base.Fatalf("expected func name: %v", n) + } + s := n.Sym() + + // funcsymsmu here serves to protect not just mutations of funcsyms (below), + // but also the package lookup of the func sym name, + // since this function gets called concurrently from the backend. + // There are no other concurrent package lookups in the backend, + // except for the types package, which is protected separately. + // Reusing funcsymsmu to also cover this package lookup + // avoids a general, broader, expensive package lookup mutex. + funcsymsmu.Lock() + sf, existed := s.Pkg.LookupOK(ir.FuncSymName(s)) + if !existed { + funcsyms = append(funcsyms, n) + } + funcsymsmu.Unlock() + + return sf.Linksym() +} + +func GlobalLinksym(n *ir.Name) *obj.LSym { + if n.Op() != ir.ONAME || n.Class != ir.PEXTERN { + base.Fatalf("expected global variable: %v", n) + } + return n.Linksym() +} + +func WriteFuncSyms() { + sort.Slice(funcsyms, func(i, j int) bool { + return funcsyms[i].Linksym().Name < funcsyms[j].Linksym().Name + }) + for _, nam := range funcsyms { + s := nam.Sym() + sf := s.Pkg.Lookup(ir.FuncSymName(s)).Linksym() + + // While compiling package runtime, we might try to create + // funcsyms for functions from both types.LocalPkg and + // ir.Pkgs.Runtime. + if base.Flag.CompilingRuntime && sf.OnList() { + continue + } + + // Function values must always reference ABIInternal + // entry points. + target := s.Linksym() + if target.ABI() != obj.ABIInternal { + base.Fatalf("expected ABIInternal: %v has %v", target, target.ABI()) + } + objw.SymPtr(sf, 0, target, 0) + objw.Global(sf, int32(types.PtrSize), obj.DUPOK|obj.RODATA) + } +} + +// InitConst writes the static literal c to n. +// Neither n nor c is modified. +func InitConst(n *ir.Name, noff int64, c ir.Node, wid int) { + if n.Op() != ir.ONAME { + base.Fatalf("InitConst n op %v", n.Op()) + } + if n.Sym() == nil { + base.Fatalf("InitConst nil n sym") + } + if c.Op() == ir.ONIL { + return + } + if c.Op() != ir.OLITERAL { + base.Fatalf("InitConst c op %v", c.Op()) + } + s := n.Linksym() + switch u := c.Val(); u.Kind() { + case constant.Bool: + i := int64(obj.Bool2int(constant.BoolVal(u))) + s.WriteInt(base.Ctxt, noff, wid, i) + + case constant.Int: + s.WriteInt(base.Ctxt, noff, wid, ir.IntVal(c.Type(), u)) + + case constant.Float: + f, _ := constant.Float64Val(u) + switch c.Type().Kind() { + case types.TFLOAT32: + s.WriteFloat32(base.Ctxt, noff, float32(f)) + case types.TFLOAT64: + s.WriteFloat64(base.Ctxt, noff, f) + } + + case constant.Complex: + re, _ := constant.Float64Val(constant.Real(u)) + im, _ := constant.Float64Val(constant.Imag(u)) + switch c.Type().Kind() { + case types.TCOMPLEX64: + s.WriteFloat32(base.Ctxt, noff, float32(re)) + s.WriteFloat32(base.Ctxt, noff+4, float32(im)) + case types.TCOMPLEX128: + s.WriteFloat64(base.Ctxt, noff, re) + s.WriteFloat64(base.Ctxt, noff+8, im) + } + + case constant.String: + i := constant.StringVal(u) + symdata := StringSym(n.Pos(), i) + s.WriteAddr(base.Ctxt, noff, types.PtrSize, symdata, 0) + s.WriteInt(base.Ctxt, noff+int64(types.PtrSize), types.PtrSize, int64(len(i))) + + default: + base.Fatalf("InitConst unhandled OLITERAL %v", c) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/staticdata/embed.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/staticdata/embed.go new file mode 100644 index 0000000000000000000000000000000000000000..a4d493ce5e359ea37b6334686b37d247969a7d94 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/staticdata/embed.go @@ -0,0 +1,174 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package staticdata + +import ( + "path" + "sort" + "strings" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/objw" + "cmd/compile/internal/types" + "cmd/internal/obj" +) + +const ( + embedUnknown = iota + embedBytes + embedString + embedFiles +) + +func embedFileList(v *ir.Name, kind int) []string { + // Build list of files to store. + have := make(map[string]bool) + var list []string + for _, e := range *v.Embed { + for _, pattern := range e.Patterns { + files, ok := base.Flag.Cfg.Embed.Patterns[pattern] + if !ok { + base.ErrorfAt(e.Pos, 0, "invalid go:embed: build system did not map pattern: %s", pattern) + } + for _, file := range files { + if base.Flag.Cfg.Embed.Files[file] == "" { + base.ErrorfAt(e.Pos, 0, "invalid go:embed: build system did not map file: %s", file) + continue + } + if !have[file] { + have[file] = true + list = append(list, file) + } + if kind == embedFiles { + for dir := path.Dir(file); dir != "." && !have[dir]; dir = path.Dir(dir) { + have[dir] = true + list = append(list, dir+"/") + } + } + } + } + } + sort.Slice(list, func(i, j int) bool { + return embedFileLess(list[i], list[j]) + }) + + if kind == embedString || kind == embedBytes { + if len(list) > 1 { + base.ErrorfAt(v.Pos(), 0, "invalid go:embed: multiple files for type %v", v.Type()) + return nil + } + } + + return list +} + +// embedKind determines the kind of embedding variable. +func embedKind(typ *types.Type) int { + if typ.Sym() != nil && typ.Sym().Name == "FS" && typ.Sym().Pkg.Path == "embed" { + return embedFiles + } + if typ.Kind() == types.TSTRING { + return embedString + } + if typ.IsSlice() && typ.Elem().Kind() == types.TUINT8 { + return embedBytes + } + return embedUnknown +} + +func embedFileNameSplit(name string) (dir, elem string, isDir bool) { + if name[len(name)-1] == '/' { + isDir = true + name = name[:len(name)-1] + } + i := len(name) - 1 + for i >= 0 && name[i] != '/' { + i-- + } + if i < 0 { + return ".", name, isDir + } + return name[:i], name[i+1:], isDir +} + +// embedFileLess implements the sort order for a list of embedded files. +// See the comment inside ../../../../embed/embed.go's Files struct for rationale. +func embedFileLess(x, y string) bool { + xdir, xelem, _ := embedFileNameSplit(x) + ydir, yelem, _ := embedFileNameSplit(y) + return xdir < ydir || xdir == ydir && xelem < yelem +} + +// WriteEmbed emits the init data for a //go:embed variable, +// which is either a string, a []byte, or an embed.FS. +func WriteEmbed(v *ir.Name) { + // TODO(mdempsky): User errors should be reported by the frontend. + + commentPos := (*v.Embed)[0].Pos + if base.Flag.Cfg.Embed.Patterns == nil { + base.ErrorfAt(commentPos, 0, "invalid go:embed: build system did not supply embed configuration") + return + } + kind := embedKind(v.Type()) + if kind == embedUnknown { + base.ErrorfAt(v.Pos(), 0, "go:embed cannot apply to var of type %v", v.Type()) + return + } + + files := embedFileList(v, kind) + switch kind { + case embedString, embedBytes: + file := files[0] + fsym, size, err := fileStringSym(v.Pos(), base.Flag.Cfg.Embed.Files[file], kind == embedString, nil) + if err != nil { + base.ErrorfAt(v.Pos(), 0, "embed %s: %v", file, err) + } + sym := v.Linksym() + off := 0 + off = objw.SymPtr(sym, off, fsym, 0) // data string + off = objw.Uintptr(sym, off, uint64(size)) // len + if kind == embedBytes { + objw.Uintptr(sym, off, uint64(size)) // cap for slice + } + + case embedFiles: + slicedata := v.Sym().Pkg.Lookup(v.Sym().Name + `.files`).Linksym() + off := 0 + // []files pointed at by Files + off = objw.SymPtr(slicedata, off, slicedata, 3*types.PtrSize) // []file, pointing just past slice + off = objw.Uintptr(slicedata, off, uint64(len(files))) + off = objw.Uintptr(slicedata, off, uint64(len(files))) + + // embed/embed.go type file is: + // name string + // data string + // hash [16]byte + // Emit one of these per file in the set. + const hashSize = 16 + hash := make([]byte, hashSize) + for _, file := range files { + off = objw.SymPtr(slicedata, off, StringSym(v.Pos(), file), 0) // file string + off = objw.Uintptr(slicedata, off, uint64(len(file))) + if strings.HasSuffix(file, "/") { + // entry for directory - no data + off = objw.Uintptr(slicedata, off, 0) + off = objw.Uintptr(slicedata, off, 0) + off += hashSize + } else { + fsym, size, err := fileStringSym(v.Pos(), base.Flag.Cfg.Embed.Files[file], true, hash) + if err != nil { + base.ErrorfAt(v.Pos(), 0, "embed %s: %v", file, err) + } + off = objw.SymPtr(slicedata, off, fsym, 0) // data string + off = objw.Uintptr(slicedata, off, uint64(size)) + off = int(slicedata.WriteBytes(base.Ctxt, int64(off), hash)) + } + } + objw.Global(slicedata, int32(off), obj.RODATA|obj.LOCAL) + sym := v.Linksym() + objw.SymPtr(sym, 0, slicedata, 0) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/staticinit/sched.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/staticinit/sched.go new file mode 100644 index 0000000000000000000000000000000000000000..4191f6997ebcf47af0475bf7479271bb1fd2ace5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/staticinit/sched.go @@ -0,0 +1,1210 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package staticinit + +import ( + "fmt" + "go/constant" + "go/token" + "os" + "strings" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/reflectdata" + "cmd/compile/internal/staticdata" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/internal/src" +) + +type Entry struct { + Xoffset int64 // struct, array only + Expr ir.Node // bytes of run-time computed expressions +} + +type Plan struct { + E []Entry +} + +// An Schedule is used to decompose assignment statements into +// static and dynamic initialization parts. Static initializations are +// handled by populating variables' linker symbol data, while dynamic +// initializations are accumulated to be executed in order. +type Schedule struct { + // Out is the ordered list of dynamic initialization + // statements. + Out []ir.Node + + Plans map[ir.Node]*Plan + Temps map[ir.Node]*ir.Name + + // seenMutation tracks whether we've seen an initialization + // expression that may have modified other package-scope variables + // within this package. + seenMutation bool +} + +func (s *Schedule) append(n ir.Node) { + s.Out = append(s.Out, n) +} + +// StaticInit adds an initialization statement n to the schedule. +func (s *Schedule) StaticInit(n ir.Node) { + if !s.tryStaticInit(n) { + if base.Flag.Percent != 0 { + ir.Dump("StaticInit failed", n) + } + s.append(n) + } +} + +// varToMapInit holds book-keeping state for global map initialization; +// it records the init function created by the compiler to host the +// initialization code for the map in question. +var varToMapInit map[*ir.Name]*ir.Func + +// MapInitToVar is the inverse of VarToMapInit; it maintains a mapping +// from a compiler-generated init function to the map the function is +// initializing. +var MapInitToVar map[*ir.Func]*ir.Name + +// recordFuncForVar establishes a mapping between global map var "v" and +// outlined init function "fn" (and vice versa); so that we can use +// the mappings later on to update relocations. +func recordFuncForVar(v *ir.Name, fn *ir.Func) { + if varToMapInit == nil { + varToMapInit = make(map[*ir.Name]*ir.Func) + MapInitToVar = make(map[*ir.Func]*ir.Name) + } + varToMapInit[v] = fn + MapInitToVar[fn] = v +} + +// allBlank reports whether every node in exprs is blank. +func allBlank(exprs []ir.Node) bool { + for _, expr := range exprs { + if !ir.IsBlank(expr) { + return false + } + } + return true +} + +// tryStaticInit attempts to statically execute an initialization +// statement and reports whether it succeeded. +func (s *Schedule) tryStaticInit(n ir.Node) bool { + var lhs []ir.Node + var rhs ir.Node + + switch n.Op() { + default: + base.FatalfAt(n.Pos(), "unexpected initialization statement: %v", n) + case ir.OAS: + n := n.(*ir.AssignStmt) + lhs, rhs = []ir.Node{n.X}, n.Y + case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV: + n := n.(*ir.AssignListStmt) + if len(n.Lhs) < 2 || len(n.Rhs) != 1 { + base.FatalfAt(n.Pos(), "unexpected shape for %v: %v", n.Op(), n) + } + lhs, rhs = n.Lhs, n.Rhs[0] + case ir.OCALLFUNC: + return false // outlined map init call; no mutations + } + + if !s.seenMutation { + s.seenMutation = mayModifyPkgVar(rhs) + } + + if allBlank(lhs) && !AnySideEffects(rhs) { + return true // discard + } + + // Only worry about simple "l = r" assignments. The OAS2* + // assignments mostly necessitate dynamic execution anyway. + if len(lhs) > 1 { + return false + } + + lno := ir.SetPos(n) + defer func() { base.Pos = lno }() + + nam := lhs[0].(*ir.Name) + return s.StaticAssign(nam, 0, rhs, nam.Type()) +} + +// like staticassign but we are copying an already +// initialized value r. +func (s *Schedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Type) bool { + if rn.Class == ir.PFUNC { + // TODO if roff != 0 { panic } + staticdata.InitAddr(l, loff, staticdata.FuncLinksym(rn)) + return true + } + if rn.Class != ir.PEXTERN || rn.Sym().Pkg != types.LocalPkg { + return false + } + if rn.Defn == nil { + // No explicit initialization value. Probably zeroed but perhaps + // supplied externally and of unknown value. + return false + } + if rn.Defn.Op() != ir.OAS { + return false + } + if rn.Type().IsString() { // perhaps overwritten by cmd/link -X (#34675) + return false + } + if rn.Embed != nil { + return false + } + orig := rn + r := rn.Defn.(*ir.AssignStmt).Y + if r == nil { + // types2.InitOrder doesn't include default initializers. + base.Fatalf("unexpected initializer: %v", rn.Defn) + } + + // Variable may have been reassigned by a user-written function call + // that was invoked to initialize another global variable (#51913). + if s.seenMutation { + if base.Debug.StaticCopy != 0 { + base.WarnfAt(l.Pos(), "skipping static copy of %v+%v with %v", l, loff, r) + } + return false + } + + for r.Op() == ir.OCONVNOP && !types.Identical(r.Type(), typ) { + r = r.(*ir.ConvExpr).X + } + + switch r.Op() { + case ir.OMETHEXPR: + r = r.(*ir.SelectorExpr).FuncName() + fallthrough + case ir.ONAME: + r := r.(*ir.Name) + if s.staticcopy(l, loff, r, typ) { + return true + } + // We may have skipped past one or more OCONVNOPs, so + // use conv to ensure r is assignable to l (#13263). + dst := ir.Node(l) + if loff != 0 || !types.Identical(typ, l.Type()) { + dst = ir.NewNameOffsetExpr(base.Pos, l, loff, typ) + } + s.append(ir.NewAssignStmt(base.Pos, dst, typecheck.Conv(r, typ))) + return true + + case ir.ONIL: + return true + + case ir.OLITERAL: + if ir.IsZero(r) { + return true + } + staticdata.InitConst(l, loff, r, int(typ.Size())) + return true + + case ir.OADDR: + r := r.(*ir.AddrExpr) + if a, ok := r.X.(*ir.Name); ok && a.Op() == ir.ONAME { + staticdata.InitAddr(l, loff, staticdata.GlobalLinksym(a)) + return true + } + + case ir.OPTRLIT: + r := r.(*ir.AddrExpr) + switch r.X.Op() { + case ir.OARRAYLIT, ir.OSLICELIT, ir.OSTRUCTLIT, ir.OMAPLIT: + // copy pointer + staticdata.InitAddr(l, loff, staticdata.GlobalLinksym(s.Temps[r])) + return true + } + + case ir.OSLICELIT: + r := r.(*ir.CompLitExpr) + // copy slice + staticdata.InitSlice(l, loff, staticdata.GlobalLinksym(s.Temps[r]), r.Len) + return true + + case ir.OARRAYLIT, ir.OSTRUCTLIT: + r := r.(*ir.CompLitExpr) + p := s.Plans[r] + for i := range p.E { + e := &p.E[i] + typ := e.Expr.Type() + if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL { + staticdata.InitConst(l, loff+e.Xoffset, e.Expr, int(typ.Size())) + continue + } + x := e.Expr + if x.Op() == ir.OMETHEXPR { + x = x.(*ir.SelectorExpr).FuncName() + } + if x.Op() == ir.ONAME && s.staticcopy(l, loff+e.Xoffset, x.(*ir.Name), typ) { + continue + } + // Requires computation, but we're + // copying someone else's computation. + ll := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, typ) + rr := ir.NewNameOffsetExpr(base.Pos, orig, e.Xoffset, typ) + ir.SetPos(rr) + s.append(ir.NewAssignStmt(base.Pos, ll, rr)) + } + + return true + } + + return false +} + +func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Type) bool { + if r == nil { + // No explicit initialization value. Either zero or supplied + // externally. + return true + } + for r.Op() == ir.OCONVNOP { + r = r.(*ir.ConvExpr).X + } + + assign := func(pos src.XPos, a *ir.Name, aoff int64, v ir.Node) { + if s.StaticAssign(a, aoff, v, v.Type()) { + return + } + var lhs ir.Node + if ir.IsBlank(a) { + // Don't use NameOffsetExpr with blank (#43677). + lhs = ir.BlankNode + } else { + lhs = ir.NewNameOffsetExpr(pos, a, aoff, v.Type()) + } + s.append(ir.NewAssignStmt(pos, lhs, v)) + } + + switch r.Op() { + case ir.ONAME: + r := r.(*ir.Name) + return s.staticcopy(l, loff, r, typ) + + case ir.OMETHEXPR: + r := r.(*ir.SelectorExpr) + return s.staticcopy(l, loff, r.FuncName(), typ) + + case ir.ONIL: + return true + + case ir.OLITERAL: + if ir.IsZero(r) { + return true + } + staticdata.InitConst(l, loff, r, int(typ.Size())) + return true + + case ir.OADDR: + r := r.(*ir.AddrExpr) + if name, offset, ok := StaticLoc(r.X); ok && name.Class == ir.PEXTERN { + staticdata.InitAddrOffset(l, loff, name.Linksym(), offset) + return true + } + fallthrough + + case ir.OPTRLIT: + r := r.(*ir.AddrExpr) + switch r.X.Op() { + case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT: + // Init pointer. + a := StaticName(r.X.Type()) + + s.Temps[r] = a + staticdata.InitAddr(l, loff, a.Linksym()) + + // Init underlying literal. + assign(base.Pos, a, 0, r.X) + return true + } + //dump("not static ptrlit", r); + + case ir.OSTR2BYTES: + r := r.(*ir.ConvExpr) + if l.Class == ir.PEXTERN && r.X.Op() == ir.OLITERAL { + sval := ir.StringVal(r.X) + staticdata.InitSliceBytes(l, loff, sval) + return true + } + + case ir.OSLICELIT: + r := r.(*ir.CompLitExpr) + s.initplan(r) + // Init slice. + ta := types.NewArray(r.Type().Elem(), r.Len) + ta.SetNoalg(true) + a := StaticName(ta) + s.Temps[r] = a + staticdata.InitSlice(l, loff, a.Linksym(), r.Len) + // Fall through to init underlying array. + l = a + loff = 0 + fallthrough + + case ir.OARRAYLIT, ir.OSTRUCTLIT: + r := r.(*ir.CompLitExpr) + s.initplan(r) + + p := s.Plans[r] + for i := range p.E { + e := &p.E[i] + if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL { + staticdata.InitConst(l, loff+e.Xoffset, e.Expr, int(e.Expr.Type().Size())) + continue + } + ir.SetPos(e.Expr) + assign(base.Pos, l, loff+e.Xoffset, e.Expr) + } + + return true + + case ir.OMAPLIT: + break + + case ir.OCLOSURE: + r := r.(*ir.ClosureExpr) + if ir.IsTrivialClosure(r) { + if base.Debug.Closure > 0 { + base.WarnfAt(r.Pos(), "closure converted to global") + } + // Issue 59680: if the closure we're looking at was produced + // by inlining, it could be marked as hidden, which we don't + // want (moving the func to a static init will effectively + // hide it from escape analysis). Mark as non-hidden here. + // so that it will participated in escape analysis. + r.Func.SetIsHiddenClosure(false) + // Closures with no captured variables are globals, + // so the assignment can be done at link time. + // TODO if roff != 0 { panic } + staticdata.InitAddr(l, loff, staticdata.FuncLinksym(r.Func.Nname)) + return true + } + ir.ClosureDebugRuntimeCheck(r) + + case ir.OCONVIFACE: + // This logic is mirrored in isStaticCompositeLiteral. + // If you change something here, change it there, and vice versa. + + // Determine the underlying concrete type and value we are converting from. + r := r.(*ir.ConvExpr) + val := ir.Node(r) + for val.Op() == ir.OCONVIFACE { + val = val.(*ir.ConvExpr).X + } + + if val.Type().IsInterface() { + // val is an interface type. + // If val is nil, we can statically initialize l; + // both words are zero and so there no work to do, so report success. + // If val is non-nil, we have no concrete type to record, + // and we won't be able to statically initialize its value, so report failure. + return val.Op() == ir.ONIL + } + + if val.Type().HasShape() { + // See comment in cmd/compile/internal/walk/convert.go:walkConvInterface + return false + } + + reflectdata.MarkTypeUsedInInterface(val.Type(), l.Linksym()) + + var itab *ir.AddrExpr + if typ.IsEmptyInterface() { + itab = reflectdata.TypePtrAt(base.Pos, val.Type()) + } else { + itab = reflectdata.ITabAddrAt(base.Pos, val.Type(), typ) + } + + // Create a copy of l to modify while we emit data. + + // Emit itab, advance offset. + staticdata.InitAddr(l, loff, itab.X.(*ir.LinksymOffsetExpr).Linksym) + + // Emit data. + if types.IsDirectIface(val.Type()) { + if val.Op() == ir.ONIL { + // Nil is zero, nothing to do. + return true + } + // Copy val directly into n. + ir.SetPos(val) + assign(base.Pos, l, loff+int64(types.PtrSize), val) + } else { + // Construct temp to hold val, write pointer to temp into n. + a := StaticName(val.Type()) + s.Temps[val] = a + assign(base.Pos, a, 0, val) + staticdata.InitAddr(l, loff+int64(types.PtrSize), a.Linksym()) + } + + return true + + case ir.OINLCALL: + r := r.(*ir.InlinedCallExpr) + return s.staticAssignInlinedCall(l, loff, r, typ) + } + + if base.Flag.Percent != 0 { + ir.Dump("not static", r) + } + return false +} + +func (s *Schedule) initplan(n ir.Node) { + if s.Plans[n] != nil { + return + } + p := new(Plan) + s.Plans[n] = p + switch n.Op() { + default: + base.Fatalf("initplan") + + case ir.OARRAYLIT, ir.OSLICELIT: + n := n.(*ir.CompLitExpr) + var k int64 + for _, a := range n.List { + if a.Op() == ir.OKEY { + kv := a.(*ir.KeyExpr) + k = typecheck.IndexConst(kv.Key) + if k < 0 { + base.Fatalf("initplan arraylit: invalid index %v", kv.Key) + } + a = kv.Value + } + s.addvalue(p, k*n.Type().Elem().Size(), a) + k++ + } + + case ir.OSTRUCTLIT: + n := n.(*ir.CompLitExpr) + for _, a := range n.List { + if a.Op() != ir.OSTRUCTKEY { + base.Fatalf("initplan structlit") + } + a := a.(*ir.StructKeyExpr) + if a.Sym().IsBlank() { + continue + } + s.addvalue(p, a.Field.Offset, a.Value) + } + + case ir.OMAPLIT: + n := n.(*ir.CompLitExpr) + for _, a := range n.List { + if a.Op() != ir.OKEY { + base.Fatalf("initplan maplit") + } + a := a.(*ir.KeyExpr) + s.addvalue(p, -1, a.Value) + } + } +} + +func (s *Schedule) addvalue(p *Plan, xoffset int64, n ir.Node) { + // special case: zero can be dropped entirely + if ir.IsZero(n) { + return + } + + // special case: inline struct and array (not slice) literals + if isvaluelit(n) { + s.initplan(n) + q := s.Plans[n] + for _, qe := range q.E { + // qe is a copy; we are not modifying entries in q.E + qe.Xoffset += xoffset + p.E = append(p.E, qe) + } + return + } + + // add to plan + p.E = append(p.E, Entry{Xoffset: xoffset, Expr: n}) +} + +func (s *Schedule) staticAssignInlinedCall(l *ir.Name, loff int64, call *ir.InlinedCallExpr, typ *types.Type) bool { + if base.Debug.InlStaticInit == 0 { + return false + } + + // Handle the special case of an inlined call of + // a function body with a single return statement, + // which turns into a single assignment plus a goto. + // + // For example code like this: + // + // type T struct{ x int } + // func F(x int) *T { return &T{x} } + // var Global = F(400) + // + // turns into IR like this: + // + // INLCALL-init + // . AS2-init + // . . DCL # x.go:18:13 + // . . . NAME-p.x Class:PAUTO Offset:0 InlFormal OnStack Used int tc(1) # x.go:14:9,x.go:18:13 + // . AS2 Def tc(1) # x.go:18:13 + // . AS2-Lhs + // . . NAME-p.x Class:PAUTO Offset:0 InlFormal OnStack Used int tc(1) # x.go:14:9,x.go:18:13 + // . AS2-Rhs + // . . LITERAL-400 int tc(1) # x.go:18:14 + // . INLMARK Index:1 # +x.go:18:13 + // INLCALL PTR-*T tc(1) # x.go:18:13 + // INLCALL-Body + // . BLOCK tc(1) # x.go:18:13 + // . BLOCK-List + // . . DCL tc(1) # x.go:18:13 + // . . . NAME-p.~R0 Class:PAUTO Offset:0 OnStack Used PTR-*T tc(1) # x.go:18:13 + // . . AS2 tc(1) # x.go:18:13 + // . . AS2-Lhs + // . . . NAME-p.~R0 Class:PAUTO Offset:0 OnStack Used PTR-*T tc(1) # x.go:18:13 + // . . AS2-Rhs + // . . . INLINED RETURN ARGUMENT HERE + // . . GOTO p..i1 tc(1) # x.go:18:13 + // . LABEL p..i1 # x.go:18:13 + // INLCALL-ReturnVars + // . NAME-p.~R0 Class:PAUTO Offset:0 OnStack Used PTR-*T tc(1) # x.go:18:13 + // + // In non-unified IR, the tree is slightly different: + // - if there are no arguments to the inlined function, + // the INLCALL-init omits the AS2. + // - the DCL inside BLOCK is on the AS2's init list, + // not its own statement in the top level of the BLOCK. + // + // If the init values are side-effect-free and each either only + // appears once in the function body or is safely repeatable, + // then we inline the value expressions into the return argument + // and then call StaticAssign to handle that copy. + // + // This handles simple cases like + // + // var myError = errors.New("mine") + // + // where errors.New is + // + // func New(text string) error { + // return &errorString{text} + // } + // + // We could make things more sophisticated but this kind of initializer + // is the most important case for us to get right. + + init := call.Init() + var as2init *ir.AssignListStmt + if len(init) == 2 && init[0].Op() == ir.OAS2 && init[1].Op() == ir.OINLMARK { + as2init = init[0].(*ir.AssignListStmt) + } else if len(init) == 1 && init[0].Op() == ir.OINLMARK { + as2init = new(ir.AssignListStmt) + } else { + return false + } + if len(call.Body) != 2 || call.Body[0].Op() != ir.OBLOCK || call.Body[1].Op() != ir.OLABEL { + return false + } + label := call.Body[1].(*ir.LabelStmt).Label + block := call.Body[0].(*ir.BlockStmt) + list := block.List + var dcl *ir.Decl + if len(list) == 3 && list[0].Op() == ir.ODCL { + dcl = list[0].(*ir.Decl) + list = list[1:] + } + if len(list) != 2 || + list[0].Op() != ir.OAS2 || + list[1].Op() != ir.OGOTO || + list[1].(*ir.BranchStmt).Label != label { + return false + } + as2body := list[0].(*ir.AssignListStmt) + if dcl == nil { + ainit := as2body.Init() + if len(ainit) != 1 || ainit[0].Op() != ir.ODCL { + return false + } + dcl = ainit[0].(*ir.Decl) + } + if len(as2body.Lhs) != 1 || as2body.Lhs[0] != dcl.X { + return false + } + + // Can't remove the parameter variables if an address is taken. + for _, v := range as2init.Lhs { + if v.(*ir.Name).Addrtaken() { + return false + } + } + // Can't move the computation of the args if they have side effects. + for _, r := range as2init.Rhs { + if AnySideEffects(r) { + return false + } + } + + // Can only substitute arg for param if param is used + // at most once or is repeatable. + count := make(map[*ir.Name]int) + for _, x := range as2init.Lhs { + count[x.(*ir.Name)] = 0 + } + + hasNonTrivialClosure := false + ir.Visit(as2body.Rhs[0], func(n ir.Node) { + if name, ok := n.(*ir.Name); ok { + if c, ok := count[name]; ok { + count[name] = c + 1 + } + } + if clo, ok := n.(*ir.ClosureExpr); ok { + hasNonTrivialClosure = hasNonTrivialClosure || !ir.IsTrivialClosure(clo) + } + }) + + // If there's a non-trivial closure, it has captured the param, + // so we can't substitute arg for param. + if hasNonTrivialClosure { + return false + } + + for name, c := range count { + if c > 1 { + // Check whether corresponding initializer can be repeated. + // Something like 1 can be; make(chan int) or &T{} cannot, + // because they need to evaluate to the same result in each use. + for i, n := range as2init.Lhs { + if n == name && !canRepeat(as2init.Rhs[i]) { + return false + } + } + } + } + + // Possible static init. + // Build tree with args substituted for params and try it. + args := make(map[*ir.Name]ir.Node) + for i, v := range as2init.Lhs { + if ir.IsBlank(v) { + continue + } + args[v.(*ir.Name)] = as2init.Rhs[i] + } + r, ok := subst(as2body.Rhs[0], args) + if !ok { + return false + } + ok = s.StaticAssign(l, loff, r, typ) + + if ok && base.Flag.Percent != 0 { + ir.Dump("static inlined-LEFT", l) + ir.Dump("static inlined-ORIG", call) + ir.Dump("static inlined-RIGHT", r) + } + return ok +} + +// from here down is the walk analysis +// of composite literals. +// most of the work is to generate +// data statements for the constant +// part of the composite literal. + +var statuniqgen int // name generator for static temps + +// StaticName returns a name backed by a (writable) static data symbol. +// Use readonlystaticname for read-only node. +func StaticName(t *types.Type) *ir.Name { + // Don't use LookupNum; it interns the resulting string, but these are all unique. + sym := typecheck.Lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen)) + statuniqgen++ + + n := ir.NewNameAt(base.Pos, sym, t) + sym.Def = n + + n.Class = ir.PEXTERN + typecheck.Target.Externs = append(typecheck.Target.Externs, n) + + n.Linksym().Set(obj.AttrStatic, true) + return n +} + +// StaticLoc returns the static address of n, if n has one, or else nil. +func StaticLoc(n ir.Node) (name *ir.Name, offset int64, ok bool) { + if n == nil { + return nil, 0, false + } + + switch n.Op() { + case ir.ONAME: + n := n.(*ir.Name) + return n, 0, true + + case ir.OMETHEXPR: + n := n.(*ir.SelectorExpr) + return StaticLoc(n.FuncName()) + + case ir.ODOT: + n := n.(*ir.SelectorExpr) + if name, offset, ok = StaticLoc(n.X); !ok { + break + } + offset += n.Offset() + return name, offset, true + + case ir.OINDEX: + n := n.(*ir.IndexExpr) + if n.X.Type().IsSlice() { + break + } + if name, offset, ok = StaticLoc(n.X); !ok { + break + } + l := getlit(n.Index) + if l < 0 { + break + } + + // Check for overflow. + if n.Type().Size() != 0 && types.MaxWidth/n.Type().Size() <= int64(l) { + break + } + offset += int64(l) * n.Type().Size() + return name, offset, true + } + + return nil, 0, false +} + +func isSideEffect(n ir.Node) bool { + switch n.Op() { + // Assume side effects unless we know otherwise. + default: + return true + + // No side effects here (arguments are checked separately). + case ir.ONAME, + ir.ONONAME, + ir.OTYPE, + ir.OLITERAL, + ir.ONIL, + ir.OADD, + ir.OSUB, + ir.OOR, + ir.OXOR, + ir.OADDSTR, + ir.OADDR, + ir.OANDAND, + ir.OBYTES2STR, + ir.ORUNES2STR, + ir.OSTR2BYTES, + ir.OSTR2RUNES, + ir.OCAP, + ir.OCOMPLIT, + ir.OMAPLIT, + ir.OSTRUCTLIT, + ir.OARRAYLIT, + ir.OSLICELIT, + ir.OPTRLIT, + ir.OCONV, + ir.OCONVIFACE, + ir.OCONVNOP, + ir.ODOT, + ir.OEQ, + ir.ONE, + ir.OLT, + ir.OLE, + ir.OGT, + ir.OGE, + ir.OKEY, + ir.OSTRUCTKEY, + ir.OLEN, + ir.OMUL, + ir.OLSH, + ir.ORSH, + ir.OAND, + ir.OANDNOT, + ir.ONEW, + ir.ONOT, + ir.OBITNOT, + ir.OPLUS, + ir.ONEG, + ir.OOROR, + ir.OPAREN, + ir.ORUNESTR, + ir.OREAL, + ir.OIMAG, + ir.OCOMPLEX: + return false + + // Only possible side effect is division by zero. + case ir.ODIV, ir.OMOD: + n := n.(*ir.BinaryExpr) + if n.Y.Op() != ir.OLITERAL || constant.Sign(n.Y.Val()) == 0 { + return true + } + + // Only possible side effect is panic on invalid size, + // but many makechan and makemap use size zero, which is definitely OK. + case ir.OMAKECHAN, ir.OMAKEMAP: + n := n.(*ir.MakeExpr) + if !ir.IsConst(n.Len, constant.Int) || constant.Sign(n.Len.Val()) != 0 { + return true + } + + // Only possible side effect is panic on invalid size. + // TODO(rsc): Merge with previous case (probably breaks toolstash -cmp). + case ir.OMAKESLICE, ir.OMAKESLICECOPY: + return true + } + return false +} + +// AnySideEffects reports whether n contains any operations that could have observable side effects. +func AnySideEffects(n ir.Node) bool { + return ir.Any(n, isSideEffect) +} + +// mayModifyPkgVar reports whether expression n may modify any +// package-scope variables declared within the current package. +func mayModifyPkgVar(n ir.Node) bool { + // safeLHS reports whether the assigned-to variable lhs is either a + // local variable or a global from another package. + safeLHS := func(lhs ir.Node) bool { + v, ok := ir.OuterValue(lhs).(*ir.Name) + return ok && v.Op() == ir.ONAME && !(v.Class == ir.PEXTERN && v.Sym().Pkg == types.LocalPkg) + } + + return ir.Any(n, func(n ir.Node) bool { + switch n.Op() { + case ir.OCALLFUNC, ir.OCALLINTER: + return !ir.IsFuncPCIntrinsic(n.(*ir.CallExpr)) + + case ir.OAPPEND, ir.OCLEAR, ir.OCOPY: + return true // could mutate a global array + + case ir.OAS: + n := n.(*ir.AssignStmt) + if !safeLHS(n.X) { + return true + } + + case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV: + n := n.(*ir.AssignListStmt) + for _, lhs := range n.Lhs { + if !safeLHS(lhs) { + return true + } + } + } + + return false + }) +} + +// canRepeat reports whether executing n multiple times has the same effect as +// assigning n to a single variable and using that variable multiple times. +func canRepeat(n ir.Node) bool { + bad := func(n ir.Node) bool { + if isSideEffect(n) { + return true + } + switch n.Op() { + case ir.OMAKECHAN, + ir.OMAKEMAP, + ir.OMAKESLICE, + ir.OMAKESLICECOPY, + ir.OMAPLIT, + ir.ONEW, + ir.OPTRLIT, + ir.OSLICELIT, + ir.OSTR2BYTES, + ir.OSTR2RUNES: + return true + } + return false + } + return !ir.Any(n, bad) +} + +func getlit(lit ir.Node) int { + if ir.IsSmallIntConst(lit) { + return int(ir.Int64Val(lit)) + } + return -1 +} + +func isvaluelit(n ir.Node) bool { + return n.Op() == ir.OARRAYLIT || n.Op() == ir.OSTRUCTLIT +} + +func subst(n ir.Node, m map[*ir.Name]ir.Node) (ir.Node, bool) { + valid := true + var edit func(ir.Node) ir.Node + edit = func(x ir.Node) ir.Node { + switch x.Op() { + case ir.ONAME: + x := x.(*ir.Name) + if v, ok := m[x]; ok { + return ir.DeepCopy(v.Pos(), v) + } + return x + case ir.ONONAME, ir.OLITERAL, ir.ONIL, ir.OTYPE: + return x + } + x = ir.Copy(x) + ir.EditChildrenWithHidden(x, edit) + + // TODO: handle more operations, see details discussion in go.dev/cl/466277. + switch x.Op() { + case ir.OCONV: + x := x.(*ir.ConvExpr) + if x.X.Op() == ir.OLITERAL { + if x, ok := truncate(x.X, x.Type()); ok { + return x + } + valid = false + return x + } + case ir.OADDSTR: + return addStr(x.(*ir.AddStringExpr)) + } + return x + } + n = edit(n) + return n, valid +} + +// truncate returns the result of force converting c to type t, +// truncating its value as needed, like a conversion of a variable. +// If the conversion is too difficult, truncate returns nil, false. +func truncate(c ir.Node, t *types.Type) (ir.Node, bool) { + ct := c.Type() + cv := c.Val() + if ct.Kind() != t.Kind() { + switch { + default: + // Note: float -> float/integer and complex -> complex are valid but subtle. + // For example a float32(float64 1e300) evaluates to +Inf at runtime + // and the compiler doesn't have any concept of +Inf, so that would + // have to be left for runtime code evaluation. + // For now + return nil, false + + case ct.IsInteger() && t.IsInteger(): + // truncate or sign extend + bits := t.Size() * 8 + cv = constant.BinaryOp(cv, token.AND, constant.MakeUint64(1< 0 { + fmt.Fprintf(os.Stderr, "=-= mapassign %s %v rhs size %d\n", + base.Ctxt.Pkgpath, n, rsiz) + } + + // Reject smaller candidates if not in stress mode. + if rsiz < wrapGlobalMapInitSizeThreshold && base.Debug.WrapGlobalMapCtl != 2 { + if base.Debug.WrapGlobalMapDbg > 1 { + fmt.Fprintf(os.Stderr, "=-= skipping %v size too small at %d\n", + nm, rsiz) + } + return nil + } + + // Reject right hand sides with side effects. + if AnySideEffects(as.Y) { + if base.Debug.WrapGlobalMapDbg > 0 { + fmt.Fprintf(os.Stderr, "=-= rejected %v due to side effects\n", nm) + } + return nil + } + + if base.Debug.WrapGlobalMapDbg > 1 { + fmt.Fprintf(os.Stderr, "=-= committed for: %+v\n", n) + } + + // Create a new function that will (eventually) have this form: + // + // func map.init.%d() { + // globmapvar = + // } + // + // Note: cmd/link expects the function name to contain "map.init". + minitsym := typecheck.LookupNum("map.init.", mapinitgen) + mapinitgen++ + + fn := ir.NewFunc(n.Pos(), n.Pos(), minitsym, types.NewSignature(nil, nil, nil)) + fn.SetInlinabilityChecked(true) // suppress inlining (which would defeat the point) + typecheck.DeclFunc(fn) + if base.Debug.WrapGlobalMapDbg > 0 { + fmt.Fprintf(os.Stderr, "=-= generated func is %v\n", fn) + } + + // NB: we're relying on this phase being run before inlining; + // if for some reason we need to move it after inlining, we'll + // need code here that relocates or duplicates inline temps. + + // Insert assignment into function body; mark body finished. + fn.Body = []ir.Node{as} + typecheck.FinishFuncBody() + + if base.Debug.WrapGlobalMapDbg > 1 { + fmt.Fprintf(os.Stderr, "=-= mapvar is %v\n", nm) + fmt.Fprintf(os.Stderr, "=-= newfunc is %+v\n", fn) + } + + recordFuncForVar(nm, fn) + + return fn +} + +// mapinitgen is a counter used to uniquify compiler-generated +// map init functions. +var mapinitgen int + +// AddKeepRelocations adds a dummy "R_KEEP" relocation from each +// global map variable V to its associated outlined init function. +// These relocation ensure that if the map var itself is determined to +// be reachable at link time, we also mark the init function as +// reachable. +func AddKeepRelocations() { + if varToMapInit == nil { + return + } + for k, v := range varToMapInit { + // Add R_KEEP relocation from map to init function. + fs := v.Linksym() + if fs == nil { + base.Fatalf("bad: func %v has no linksym", v) + } + vs := k.Linksym() + if vs == nil { + base.Fatalf("bad: mapvar %v has no linksym", k) + } + r := obj.Addrel(vs) + r.Sym = fs + r.Type = objabi.R_KEEP + if base.Debug.WrapGlobalMapDbg > 1 { + fmt.Fprintf(os.Stderr, "=-= add R_KEEP relo from %s to %s\n", + vs.Name, fs.Name) + } + } + varToMapInit = nil +} + +// OutlineMapInits replaces global map initializers with outlined +// calls to separate "map init" functions (where possible and +// profitable), to facilitate better dead-code elimination by the +// linker. +func OutlineMapInits(fn *ir.Func) { + if base.Debug.WrapGlobalMapCtl == 1 { + return + } + + outlined := 0 + for i, stmt := range fn.Body { + // Attempt to outline stmt. If successful, replace it with a call + // to the returned wrapper function. + if wrapperFn := tryWrapGlobalInit(stmt); wrapperFn != nil { + ir.WithFunc(fn, func() { + fn.Body[i] = typecheck.Call(stmt.Pos(), wrapperFn.Nname, nil, false) + }) + outlined++ + } + } + + if base.Debug.WrapGlobalMapDbg > 1 { + fmt.Fprintf(os.Stderr, "=-= outlined %v map initializations\n", outlined) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/branches.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/branches.go new file mode 100644 index 0000000000000000000000000000000000000000..3d7ffed37423ba586e6ec08aa486178e99eb7e58 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/branches.go @@ -0,0 +1,339 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syntax + +import "fmt" + +// checkBranches checks correct use of labels and branch +// statements (break, continue, fallthrough, goto) in a function body. +// It catches: +// - misplaced breaks, continues, and fallthroughs +// - bad labeled breaks and continues +// - invalid, unused, duplicate, and missing labels +// - gotos jumping over variable declarations and into blocks +func checkBranches(body *BlockStmt, errh ErrorHandler) { + if body == nil { + return + } + + // scope of all labels in this body + ls := &labelScope{errh: errh} + fwdGotos := ls.blockBranches(nil, targets{}, nil, body.Pos(), body.List) + + // If there are any forward gotos left, no matching label was + // found for them. Either those labels were never defined, or + // they are inside blocks and not reachable from the gotos. + for _, fwd := range fwdGotos { + name := fwd.Label.Value + if l := ls.labels[name]; l != nil { + l.used = true // avoid "defined and not used" error + ls.err(fwd.Label.Pos(), "goto %s jumps into block starting at %s", name, l.parent.start) + } else { + ls.err(fwd.Label.Pos(), "label %s not defined", name) + } + } + + // spec: "It is illegal to define a label that is never used." + for _, l := range ls.labels { + if !l.used { + l := l.lstmt.Label + ls.err(l.Pos(), "label %s defined and not used", l.Value) + } + } +} + +type labelScope struct { + errh ErrorHandler + labels map[string]*label // all label declarations inside the function; allocated lazily +} + +type label struct { + parent *block // block containing this label declaration + lstmt *LabeledStmt // statement declaring the label + used bool // whether the label is used or not +} + +type block struct { + parent *block // immediately enclosing block, or nil + start Pos // start of block + lstmt *LabeledStmt // labeled statement associated with this block, or nil +} + +func (ls *labelScope) err(pos Pos, format string, args ...interface{}) { + ls.errh(Error{pos, fmt.Sprintf(format, args...)}) +} + +// declare declares the label introduced by s in block b and returns +// the new label. If the label was already declared, declare reports +// and error and the existing label is returned instead. +func (ls *labelScope) declare(b *block, s *LabeledStmt) *label { + name := s.Label.Value + labels := ls.labels + if labels == nil { + labels = make(map[string]*label) + ls.labels = labels + } else if alt := labels[name]; alt != nil { + ls.err(s.Label.Pos(), "label %s already defined at %s", name, alt.lstmt.Label.Pos().String()) + return alt + } + l := &label{b, s, false} + labels[name] = l + return l +} + +// gotoTarget returns the labeled statement matching the given name and +// declared in block b or any of its enclosing blocks. The result is nil +// if the label is not defined, or doesn't match a valid labeled statement. +func (ls *labelScope) gotoTarget(b *block, name string) *LabeledStmt { + if l := ls.labels[name]; l != nil { + l.used = true // even if it's not a valid target + for ; b != nil; b = b.parent { + if l.parent == b { + return l.lstmt + } + } + } + return nil +} + +var invalid = new(LabeledStmt) // singleton to signal invalid enclosing target + +// enclosingTarget returns the innermost enclosing labeled statement matching +// the given name. The result is nil if the label is not defined, and invalid +// if the label is defined but doesn't label a valid labeled statement. +func (ls *labelScope) enclosingTarget(b *block, name string) *LabeledStmt { + if l := ls.labels[name]; l != nil { + l.used = true // even if it's not a valid target (see e.g., test/fixedbugs/bug136.go) + for ; b != nil; b = b.parent { + if l.lstmt == b.lstmt { + return l.lstmt + } + } + return invalid + } + return nil +} + +// targets describes the target statements within which break +// or continue statements are valid. +type targets struct { + breaks Stmt // *ForStmt, *SwitchStmt, *SelectStmt, or nil + continues *ForStmt // or nil + caseIndex int // case index of immediately enclosing switch statement, or < 0 +} + +// blockBranches processes a block's body starting at start and returns the +// list of unresolved (forward) gotos. parent is the immediately enclosing +// block (or nil), ctxt provides information about the enclosing statements, +// and lstmt is the labeled statement associated with this block, or nil. +func (ls *labelScope) blockBranches(parent *block, ctxt targets, lstmt *LabeledStmt, start Pos, body []Stmt) []*BranchStmt { + b := &block{parent: parent, start: start, lstmt: lstmt} + + var varPos Pos + var varName Expr + var fwdGotos, badGotos []*BranchStmt + + recordVarDecl := func(pos Pos, name Expr) { + varPos = pos + varName = name + // Any existing forward goto jumping over the variable + // declaration is invalid. The goto may still jump out + // of the block and be ok, but we don't know that yet. + // Remember all forward gotos as potential bad gotos. + badGotos = append(badGotos[:0], fwdGotos...) + } + + jumpsOverVarDecl := func(fwd *BranchStmt) bool { + if varPos.IsKnown() { + for _, bad := range badGotos { + if fwd == bad { + return true + } + } + } + return false + } + + innerBlock := func(ctxt targets, start Pos, body []Stmt) { + // Unresolved forward gotos from the inner block + // become forward gotos for the current block. + fwdGotos = append(fwdGotos, ls.blockBranches(b, ctxt, lstmt, start, body)...) + } + + // A fallthrough statement counts as last statement in a statement + // list even if there are trailing empty statements; remove them. + stmtList := trimTrailingEmptyStmts(body) + for stmtIndex, stmt := range stmtList { + lstmt = nil + L: + switch s := stmt.(type) { + case *DeclStmt: + for _, d := range s.DeclList { + if v, ok := d.(*VarDecl); ok { + recordVarDecl(v.Pos(), v.NameList[0]) + break // the first VarDecl will do + } + } + + case *LabeledStmt: + // declare non-blank label + if name := s.Label.Value; name != "_" { + l := ls.declare(b, s) + // resolve matching forward gotos + i := 0 + for _, fwd := range fwdGotos { + if fwd.Label.Value == name { + fwd.Target = s + l.used = true + if jumpsOverVarDecl(fwd) { + ls.err( + fwd.Label.Pos(), + "goto %s jumps over declaration of %s at %s", + name, String(varName), varPos, + ) + } + } else { + // no match - keep forward goto + fwdGotos[i] = fwd + i++ + } + } + fwdGotos = fwdGotos[:i] + lstmt = s + } + // process labeled statement + stmt = s.Stmt + goto L + + case *BranchStmt: + // unlabeled branch statement + if s.Label == nil { + switch s.Tok { + case _Break: + if t := ctxt.breaks; t != nil { + s.Target = t + } else { + ls.err(s.Pos(), "break is not in a loop, switch, or select") + } + case _Continue: + if t := ctxt.continues; t != nil { + s.Target = t + } else { + ls.err(s.Pos(), "continue is not in a loop") + } + case _Fallthrough: + msg := "fallthrough statement out of place" + if t, _ := ctxt.breaks.(*SwitchStmt); t != nil { + if _, ok := t.Tag.(*TypeSwitchGuard); ok { + msg = "cannot fallthrough in type switch" + } else if ctxt.caseIndex < 0 || stmtIndex+1 < len(stmtList) { + // fallthrough nested in a block or not the last statement + // use msg as is + } else if ctxt.caseIndex+1 == len(t.Body) { + msg = "cannot fallthrough final case in switch" + } else { + break // fallthrough ok + } + } + ls.err(s.Pos(), msg) + case _Goto: + fallthrough // should always have a label + default: + panic("invalid BranchStmt") + } + break + } + + // labeled branch statement + name := s.Label.Value + switch s.Tok { + case _Break: + // spec: "If there is a label, it must be that of an enclosing + // "for", "switch", or "select" statement, and that is the one + // whose execution terminates." + if t := ls.enclosingTarget(b, name); t != nil { + switch t := t.Stmt.(type) { + case *SwitchStmt, *SelectStmt, *ForStmt: + s.Target = t + default: + ls.err(s.Label.Pos(), "invalid break label %s", name) + } + } else { + ls.err(s.Label.Pos(), "break label not defined: %s", name) + } + + case _Continue: + // spec: "If there is a label, it must be that of an enclosing + // "for" statement, and that is the one whose execution advances." + if t := ls.enclosingTarget(b, name); t != nil { + if t, ok := t.Stmt.(*ForStmt); ok { + s.Target = t + } else { + ls.err(s.Label.Pos(), "invalid continue label %s", name) + } + } else { + ls.err(s.Label.Pos(), "continue label not defined: %s", name) + } + + case _Goto: + if t := ls.gotoTarget(b, name); t != nil { + s.Target = t + } else { + // label may be declared later - add goto to forward gotos + fwdGotos = append(fwdGotos, s) + } + + case _Fallthrough: + fallthrough // should never have a label + default: + panic("invalid BranchStmt") + } + + case *AssignStmt: + if s.Op == Def { + recordVarDecl(s.Pos(), s.Lhs) + } + + case *BlockStmt: + inner := targets{ctxt.breaks, ctxt.continues, -1} + innerBlock(inner, s.Pos(), s.List) + + case *IfStmt: + inner := targets{ctxt.breaks, ctxt.continues, -1} + innerBlock(inner, s.Then.Pos(), s.Then.List) + if s.Else != nil { + innerBlock(inner, s.Else.Pos(), []Stmt{s.Else}) + } + + case *ForStmt: + inner := targets{s, s, -1} + innerBlock(inner, s.Body.Pos(), s.Body.List) + + case *SwitchStmt: + inner := targets{s, ctxt.continues, -1} + for i, cc := range s.Body { + inner.caseIndex = i + innerBlock(inner, cc.Pos(), cc.Body) + } + + case *SelectStmt: + inner := targets{s, ctxt.continues, -1} + for _, cc := range s.Body { + innerBlock(inner, cc.Pos(), cc.Body) + } + } + } + + return fwdGotos +} + +func trimTrailingEmptyStmts(list []Stmt) []Stmt { + for i := len(list); i > 0; i-- { + if _, ok := list[i-1].(*EmptyStmt); !ok { + return list[:i] + } + } + return nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/dumper.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/dumper.go new file mode 100644 index 0000000000000000000000000000000000000000..d5247886dae53117ccf5934cd53dd68d499ea4ec --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/dumper.go @@ -0,0 +1,212 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements printing of syntax tree structures. + +package syntax + +import ( + "fmt" + "io" + "reflect" + "unicode" + "unicode/utf8" +) + +// Fdump dumps the structure of the syntax tree rooted at n to w. +// It is intended for debugging purposes; no specific output format +// is guaranteed. +func Fdump(w io.Writer, n Node) (err error) { + p := dumper{ + output: w, + ptrmap: make(map[Node]int), + last: '\n', // force printing of line number on first line + } + + defer func() { + if e := recover(); e != nil { + err = e.(writeError).err // re-panics if it's not a writeError + } + }() + + if n == nil { + p.printf("nil\n") + return + } + p.dump(reflect.ValueOf(n), n) + p.printf("\n") + + return +} + +type dumper struct { + output io.Writer + ptrmap map[Node]int // node -> dump line number + indent int // current indentation level + last byte // last byte processed by Write + line int // current line number +} + +var indentBytes = []byte(". ") + +func (p *dumper) Write(data []byte) (n int, err error) { + var m int + for i, b := range data { + // invariant: data[0:n] has been written + if b == '\n' { + m, err = p.output.Write(data[n : i+1]) + n += m + if err != nil { + return + } + } else if p.last == '\n' { + p.line++ + _, err = fmt.Fprintf(p.output, "%6d ", p.line) + if err != nil { + return + } + for j := p.indent; j > 0; j-- { + _, err = p.output.Write(indentBytes) + if err != nil { + return + } + } + } + p.last = b + } + if len(data) > n { + m, err = p.output.Write(data[n:]) + n += m + } + return +} + +// writeError wraps locally caught write errors so we can distinguish +// them from genuine panics which we don't want to return as errors. +type writeError struct { + err error +} + +// printf is a convenience wrapper that takes care of print errors. +func (p *dumper) printf(format string, args ...interface{}) { + if _, err := fmt.Fprintf(p, format, args...); err != nil { + panic(writeError{err}) + } +} + +// dump prints the contents of x. +// If x is the reflect.Value of a struct s, where &s +// implements Node, then &s should be passed for n - +// this permits printing of the unexported span and +// comments fields of the embedded isNode field by +// calling the Span() and Comment() instead of using +// reflection. +func (p *dumper) dump(x reflect.Value, n Node) { + switch x.Kind() { + case reflect.Interface: + if x.IsNil() { + p.printf("nil") + return + } + p.dump(x.Elem(), nil) + + case reflect.Ptr: + if x.IsNil() { + p.printf("nil") + return + } + + // special cases for identifiers w/o attached comments (common case) + if x, ok := x.Interface().(*Name); ok { + p.printf("%s @ %v", x.Value, x.Pos()) + return + } + + p.printf("*") + // Fields may share type expressions, and declarations + // may share the same group - use ptrmap to keep track + // of nodes that have been printed already. + if ptr, ok := x.Interface().(Node); ok { + if line, exists := p.ptrmap[ptr]; exists { + p.printf("(Node @ %d)", line) + return + } + p.ptrmap[ptr] = p.line + n = ptr + } + p.dump(x.Elem(), n) + + case reflect.Slice: + if x.IsNil() { + p.printf("nil") + return + } + p.printf("%s (%d entries) {", x.Type(), x.Len()) + if x.Len() > 0 { + p.indent++ + p.printf("\n") + for i, n := 0, x.Len(); i < n; i++ { + p.printf("%d: ", i) + p.dump(x.Index(i), nil) + p.printf("\n") + } + p.indent-- + } + p.printf("}") + + case reflect.Struct: + typ := x.Type() + + // if span, ok := x.Interface().(lexical.Span); ok { + // p.printf("%s", &span) + // return + // } + + p.printf("%s {", typ) + p.indent++ + + first := true + if n != nil { + p.printf("\n") + first = false + // p.printf("Span: %s\n", n.Span()) + // if c := *n.Comments(); c != nil { + // p.printf("Comments: ") + // p.dump(reflect.ValueOf(c), nil) // a Comment is not a Node + // p.printf("\n") + // } + } + + for i, n := 0, typ.NumField(); i < n; i++ { + // Exclude non-exported fields because their + // values cannot be accessed via reflection. + if name := typ.Field(i).Name; isExported(name) { + if first { + p.printf("\n") + first = false + } + p.printf("%s: ", name) + p.dump(x.Field(i), nil) + p.printf("\n") + } + } + + p.indent-- + p.printf("}") + + default: + switch x := x.Interface().(type) { + case string: + // print strings in quotes + p.printf("%q", x) + default: + p.printf("%v", x) + } + } +} + +func isExported(name string) bool { + ch, _ := utf8.DecodeRuneInString(name) + return unicode.IsUpper(ch) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/dumper_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/dumper_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1ba85cc8d9a9870bbbb1338355954a0ee8a1b0f0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/dumper_test.go @@ -0,0 +1,21 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syntax + +import ( + "testing" +) + +func TestDump(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + + ast, _ := ParseFile(*src_, func(err error) { t.Error(err) }, nil, CheckBranches) + + if ast != nil { + Fdump(testOut(), ast) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/error_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/error_test.go new file mode 100644 index 0000000000000000000000000000000000000000..55ea6345b90fdf5525b0accb3ee1f0212ac34863 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/error_test.go @@ -0,0 +1,190 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements a regression test harness for syntax errors. +// The files in the testdata directory are parsed and the reported +// errors are compared against the errors declared in those files. +// +// Errors are declared in place in the form of "error comments", +// just before (or on the same line as) the offending token. +// +// Error comments must be of the form // ERROR rx or /* ERROR rx */ +// where rx is a regular expression that matches the reported error +// message. The rx text comprises the comment text after "ERROR ", +// with any white space around it stripped. +// +// If the line comment form is used, the reported error's line must +// match the line of the error comment. +// +// If the regular comment form is used, the reported error's position +// must match the position of the token immediately following the +// error comment. Thus, /* ERROR ... */ comments should appear +// immediately before the position where the error is reported. +// +// Currently, the test harness only supports one error comment per +// token. If multiple error comments appear before a token, only +// the last one is considered. + +package syntax + +import ( + "flag" + "fmt" + "internal/testenv" + "os" + "path/filepath" + "regexp" + "sort" + "strings" + "testing" +) + +const testdata = "testdata" // directory containing test files + +var print = flag.Bool("print", false, "only print errors") + +// A position represents a source position in the current file. +type position struct { + line, col uint +} + +func (pos position) String() string { + return fmt.Sprintf("%d:%d", pos.line, pos.col) +} + +func sortedPositions(m map[position]string) []position { + list := make([]position, len(m)) + i := 0 + for pos := range m { + list[i] = pos + i++ + } + sort.Slice(list, func(i, j int) bool { + a, b := list[i], list[j] + return a.line < b.line || a.line == b.line && a.col < b.col + }) + return list +} + +// declaredErrors returns a map of source positions to error +// patterns, extracted from error comments in the given file. +// Error comments in the form of line comments use col = 0 +// in their position. +func declaredErrors(t *testing.T, filename string) map[position]string { + f, err := os.Open(filename) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + declared := make(map[position]string) + + var s scanner + var pattern string + s.init(f, func(line, col uint, msg string) { + // errors never start with '/' so they are automatically excluded here + switch { + case strings.HasPrefix(msg, "// ERROR "): + // we can't have another comment on the same line - just add it + declared[position{s.line, 0}] = strings.TrimSpace(msg[9:]) + case strings.HasPrefix(msg, "/* ERROR "): + // we may have more comments before the next token - collect them + pattern = strings.TrimSpace(msg[9 : len(msg)-2]) + } + }, comments) + + // consume file + for { + s.next() + if pattern != "" { + declared[position{s.line, s.col}] = pattern + pattern = "" + } + if s.tok == _EOF { + break + } + } + + return declared +} + +func testSyntaxErrors(t *testing.T, filename string) { + declared := declaredErrors(t, filename) + if *print { + fmt.Println("Declared errors:") + for _, pos := range sortedPositions(declared) { + fmt.Printf("%s:%s: %s\n", filename, pos, declared[pos]) + } + + fmt.Println() + fmt.Println("Reported errors:") + } + + f, err := os.Open(filename) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + ParseFile(filename, func(err error) { + e, ok := err.(Error) + if !ok { + return + } + + if *print { + fmt.Println(err) + return + } + + orig := position{e.Pos.Line(), e.Pos.Col()} + pos := orig + pattern, found := declared[pos] + if !found { + // try line comment (only line must match) + pos = position{e.Pos.Line(), 0} + pattern, found = declared[pos] + } + if found { + rx, err := regexp.Compile(pattern) + if err != nil { + t.Errorf("%s:%s: %v", filename, pos, err) + return + } + if match := rx.MatchString(e.Msg); !match { + t.Errorf("%s:%s: %q does not match %q", filename, pos, e.Msg, pattern) + return + } + // we have a match - eliminate this error + delete(declared, pos) + } else { + t.Errorf("%s:%s: unexpected error: %s", filename, orig, e.Msg) + } + }, nil, CheckBranches) + + if *print { + fmt.Println() + return // we're done + } + + // report expected but not reported errors + for pos, pattern := range declared { + t.Errorf("%s:%s: missing error: %s", filename, pos, pattern) + } +} + +func TestSyntaxErrors(t *testing.T) { + testenv.MustHaveGoBuild(t) // we need access to source (testdata) + + list, err := os.ReadDir(testdata) + if err != nil { + t.Fatal(err) + } + for _, fi := range list { + name := fi.Name() + if !fi.IsDir() && !strings.HasPrefix(name, ".") { + testSyntaxErrors(t, filepath.Join(testdata, name)) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/nodes.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/nodes.go new file mode 100644 index 0000000000000000000000000000000000000000..de277fc3d8cdabe845538ee6c6fb5629ee97932a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/nodes.go @@ -0,0 +1,487 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syntax + +// ---------------------------------------------------------------------------- +// Nodes + +type Node interface { + // Pos() returns the position associated with the node as follows: + // 1) The position of a node representing a terminal syntax production + // (Name, BasicLit, etc.) is the position of the respective production + // in the source. + // 2) The position of a node representing a non-terminal production + // (IndexExpr, IfStmt, etc.) is the position of a token uniquely + // associated with that production; usually the left-most one + // ('[' for IndexExpr, 'if' for IfStmt, etc.) + Pos() Pos + SetPos(Pos) + aNode() +} + +type node struct { + // commented out for now since not yet used + // doc *Comment // nil means no comment(s) attached + pos Pos +} + +func (n *node) Pos() Pos { return n.pos } +func (n *node) SetPos(pos Pos) { n.pos = pos } +func (*node) aNode() {} + +// ---------------------------------------------------------------------------- +// Files + +// package PkgName; DeclList[0], DeclList[1], ... +type File struct { + Pragma Pragma + PkgName *Name + DeclList []Decl + EOF Pos + GoVersion string + node +} + +// ---------------------------------------------------------------------------- +// Declarations + +type ( + Decl interface { + Node + aDecl() + } + + // Path + // LocalPkgName Path + ImportDecl struct { + Group *Group // nil means not part of a group + Pragma Pragma + LocalPkgName *Name // including "."; nil means no rename present + Path *BasicLit // Path.Bad || Path.Kind == StringLit; nil means no path + decl + } + + // NameList + // NameList = Values + // NameList Type = Values + ConstDecl struct { + Group *Group // nil means not part of a group + Pragma Pragma + NameList []*Name + Type Expr // nil means no type + Values Expr // nil means no values + decl + } + + // Name Type + TypeDecl struct { + Group *Group // nil means not part of a group + Pragma Pragma + Name *Name + TParamList []*Field // nil means no type parameters + Alias bool + Type Expr + decl + } + + // NameList Type + // NameList Type = Values + // NameList = Values + VarDecl struct { + Group *Group // nil means not part of a group + Pragma Pragma + NameList []*Name + Type Expr // nil means no type + Values Expr // nil means no values + decl + } + + // func Name Type { Body } + // func Name Type + // func Receiver Name Type { Body } + // func Receiver Name Type + FuncDecl struct { + Pragma Pragma + Recv *Field // nil means regular function + Name *Name + TParamList []*Field // nil means no type parameters + Type *FuncType + Body *BlockStmt // nil means no body (forward declaration) + decl + } +) + +type decl struct{ node } + +func (*decl) aDecl() {} + +// All declarations belonging to the same group point to the same Group node. +type Group struct { + _ int // not empty so we are guaranteed different Group instances +} + +// ---------------------------------------------------------------------------- +// Expressions + +func NewName(pos Pos, value string) *Name { + n := new(Name) + n.pos = pos + n.Value = value + return n +} + +type ( + Expr interface { + Node + typeInfo + aExpr() + } + + // Placeholder for an expression that failed to parse + // correctly and where we can't provide a better node. + BadExpr struct { + expr + } + + // Value + Name struct { + Value string + expr + } + + // Value + BasicLit struct { + Value string + Kind LitKind + Bad bool // true means the literal Value has syntax errors + expr + } + + // Type { ElemList[0], ElemList[1], ... } + CompositeLit struct { + Type Expr // nil means no literal type + ElemList []Expr + NKeys int // number of elements with keys + Rbrace Pos + expr + } + + // Key: Value + KeyValueExpr struct { + Key, Value Expr + expr + } + + // func Type { Body } + FuncLit struct { + Type *FuncType + Body *BlockStmt + expr + } + + // (X) + ParenExpr struct { + X Expr + expr + } + + // X.Sel + SelectorExpr struct { + X Expr + Sel *Name + expr + } + + // X[Index] + // X[T1, T2, ...] (with Ti = Index.(*ListExpr).ElemList[i]) + IndexExpr struct { + X Expr + Index Expr + expr + } + + // X[Index[0] : Index[1] : Index[2]] + SliceExpr struct { + X Expr + Index [3]Expr + // Full indicates whether this is a simple or full slice expression. + // In a valid AST, this is equivalent to Index[2] != nil. + // TODO(mdempsky): This is only needed to report the "3-index + // slice of string" error when Index[2] is missing. + Full bool + expr + } + + // X.(Type) + AssertExpr struct { + X Expr + Type Expr + expr + } + + // X.(type) + // Lhs := X.(type) + TypeSwitchGuard struct { + Lhs *Name // nil means no Lhs := + X Expr // X.(type) + expr + } + + Operation struct { + Op Operator + X, Y Expr // Y == nil means unary expression + expr + } + + // Fun(ArgList[0], ArgList[1], ...) + CallExpr struct { + Fun Expr + ArgList []Expr // nil means no arguments + HasDots bool // last argument is followed by ... + expr + } + + // ElemList[0], ElemList[1], ... + ListExpr struct { + ElemList []Expr + expr + } + + // [Len]Elem + ArrayType struct { + // TODO(gri) consider using Name{"..."} instead of nil (permits attaching of comments) + Len Expr // nil means Len is ... + Elem Expr + expr + } + + // []Elem + SliceType struct { + Elem Expr + expr + } + + // ...Elem + DotsType struct { + Elem Expr + expr + } + + // struct { FieldList[0] TagList[0]; FieldList[1] TagList[1]; ... } + StructType struct { + FieldList []*Field + TagList []*BasicLit // i >= len(TagList) || TagList[i] == nil means no tag for field i + expr + } + + // Name Type + // Type + Field struct { + Name *Name // nil means anonymous field/parameter (structs/parameters), or embedded element (interfaces) + Type Expr // field names declared in a list share the same Type (identical pointers) + node + } + + // interface { MethodList[0]; MethodList[1]; ... } + InterfaceType struct { + MethodList []*Field + expr + } + + FuncType struct { + ParamList []*Field + ResultList []*Field + expr + } + + // map[Key]Value + MapType struct { + Key, Value Expr + expr + } + + // chan Elem + // <-chan Elem + // chan<- Elem + ChanType struct { + Dir ChanDir // 0 means no direction + Elem Expr + expr + } +) + +type expr struct { + node + typeAndValue // After typechecking, contains the results of typechecking this expression. +} + +func (*expr) aExpr() {} + +type ChanDir uint + +const ( + _ ChanDir = iota + SendOnly + RecvOnly +) + +// ---------------------------------------------------------------------------- +// Statements + +type ( + Stmt interface { + Node + aStmt() + } + + SimpleStmt interface { + Stmt + aSimpleStmt() + } + + EmptyStmt struct { + simpleStmt + } + + LabeledStmt struct { + Label *Name + Stmt Stmt + stmt + } + + BlockStmt struct { + List []Stmt + Rbrace Pos + stmt + } + + ExprStmt struct { + X Expr + simpleStmt + } + + SendStmt struct { + Chan, Value Expr // Chan <- Value + simpleStmt + } + + DeclStmt struct { + DeclList []Decl + stmt + } + + AssignStmt struct { + Op Operator // 0 means no operation + Lhs, Rhs Expr // Rhs == nil means Lhs++ (Op == Add) or Lhs-- (Op == Sub) + simpleStmt + } + + BranchStmt struct { + Tok token // Break, Continue, Fallthrough, or Goto + Label *Name + // Target is the continuation of the control flow after executing + // the branch; it is computed by the parser if CheckBranches is set. + // Target is a *LabeledStmt for gotos, and a *SwitchStmt, *SelectStmt, + // or *ForStmt for breaks and continues, depending on the context of + // the branch. Target is not set for fallthroughs. + Target Stmt + stmt + } + + CallStmt struct { + Tok token // Go or Defer + Call Expr + DeferAt Expr // argument to runtime.deferprocat + stmt + } + + ReturnStmt struct { + Results Expr // nil means no explicit return values + stmt + } + + IfStmt struct { + Init SimpleStmt + Cond Expr + Then *BlockStmt + Else Stmt // either nil, *IfStmt, or *BlockStmt + stmt + } + + ForStmt struct { + Init SimpleStmt // incl. *RangeClause + Cond Expr + Post SimpleStmt + Body *BlockStmt + stmt + } + + SwitchStmt struct { + Init SimpleStmt + Tag Expr // incl. *TypeSwitchGuard + Body []*CaseClause + Rbrace Pos + stmt + } + + SelectStmt struct { + Body []*CommClause + Rbrace Pos + stmt + } +) + +type ( + RangeClause struct { + Lhs Expr // nil means no Lhs = or Lhs := + Def bool // means := + X Expr // range X + simpleStmt + } + + CaseClause struct { + Cases Expr // nil means default clause + Body []Stmt + Colon Pos + node + } + + CommClause struct { + Comm SimpleStmt // send or receive stmt; nil means default clause + Body []Stmt + Colon Pos + node + } +) + +type stmt struct{ node } + +func (stmt) aStmt() {} + +type simpleStmt struct { + stmt +} + +func (simpleStmt) aSimpleStmt() {} + +// ---------------------------------------------------------------------------- +// Comments + +// TODO(gri) Consider renaming to CommentPos, CommentPlacement, etc. +// Kind = Above doesn't make much sense. +type CommentKind uint + +const ( + Above CommentKind = iota + Below + Left + Right +) + +type Comment struct { + Kind CommentKind + Text string + Next *Comment +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/nodes_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/nodes_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a86ae87adf3ab842b1aa8aaa1fbeb5d644ae848b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/nodes_test.go @@ -0,0 +1,326 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syntax + +import ( + "fmt" + "strings" + "testing" +) + +// A test is a source code snippet of a particular node type. +// In the snippet, a '@' indicates the position recorded by +// the parser when creating the respective node. +type test struct { + nodetyp string + snippet string +} + +var decls = []test{ + // The position of declarations is always the + // position of the first token of an individual + // declaration, independent of grouping. + {"ImportDecl", `import @"math"`}, + {"ImportDecl", `import @mymath "math"`}, + {"ImportDecl", `import @. "math"`}, + {"ImportDecl", `import (@"math")`}, + {"ImportDecl", `import (@mymath "math")`}, + {"ImportDecl", `import (@. "math")`}, + + {"ConstDecl", `const @x`}, + {"ConstDecl", `const @x = 0`}, + {"ConstDecl", `const @x, y, z = 0, 1, 2`}, + {"ConstDecl", `const (@x)`}, + {"ConstDecl", `const (@x = 0)`}, + {"ConstDecl", `const (@x, y, z = 0, 1, 2)`}, + + {"TypeDecl", `type @T int`}, + {"TypeDecl", `type @T = int`}, + {"TypeDecl", `type (@T int)`}, + {"TypeDecl", `type (@T = int)`}, + + {"VarDecl", `var @x int`}, + {"VarDecl", `var @x, y, z int`}, + {"VarDecl", `var @x int = 0`}, + {"VarDecl", `var @x, y, z int = 1, 2, 3`}, + {"VarDecl", `var @x = 0`}, + {"VarDecl", `var @x, y, z = 1, 2, 3`}, + {"VarDecl", `var (@x int)`}, + {"VarDecl", `var (@x, y, z int)`}, + {"VarDecl", `var (@x int = 0)`}, + {"VarDecl", `var (@x, y, z int = 1, 2, 3)`}, + {"VarDecl", `var (@x = 0)`}, + {"VarDecl", `var (@x, y, z = 1, 2, 3)`}, + + {"FuncDecl", `func @f() {}`}, + {"FuncDecl", `func @(T) f() {}`}, + {"FuncDecl", `func @(x T) f() {}`}, +} + +var exprs = []test{ + // The position of an expression is the position + // of the left-most token that identifies the + // kind of expression. + {"Name", `@x`}, + + {"BasicLit", `@0`}, + {"BasicLit", `@0x123`}, + {"BasicLit", `@3.1415`}, + {"BasicLit", `@.2718`}, + {"BasicLit", `@1i`}, + {"BasicLit", `@'a'`}, + {"BasicLit", `@"abc"`}, + {"BasicLit", "@`abc`"}, + + {"CompositeLit", `@{}`}, + {"CompositeLit", `T@{}`}, + {"CompositeLit", `struct{x, y int}@{}`}, + + {"KeyValueExpr", `"foo"@: true`}, + {"KeyValueExpr", `"a"@: b`}, + + {"FuncLit", `@func (){}`}, + {"ParenExpr", `@(x)`}, + {"SelectorExpr", `a@.b`}, + {"IndexExpr", `a@[i]`}, + + {"SliceExpr", `a@[:]`}, + {"SliceExpr", `a@[i:]`}, + {"SliceExpr", `a@[:j]`}, + {"SliceExpr", `a@[i:j]`}, + {"SliceExpr", `a@[i:j:k]`}, + + {"AssertExpr", `x@.(T)`}, + + {"Operation", `@*b`}, + {"Operation", `@+b`}, + {"Operation", `@-b`}, + {"Operation", `@!b`}, + {"Operation", `@^b`}, + {"Operation", `@&b`}, + {"Operation", `@<-b`}, + + {"Operation", `a @|| b`}, + {"Operation", `a @&& b`}, + {"Operation", `a @== b`}, + {"Operation", `a @+ b`}, + {"Operation", `a @* b`}, + + {"CallExpr", `f@()`}, + {"CallExpr", `f@(x, y, z)`}, + {"CallExpr", `obj.f@(1, 2, 3)`}, + {"CallExpr", `func(x int) int { return x + 1 }@(y)`}, + + // ListExpr: tested via multi-value const/var declarations +} + +var types = []test{ + {"Operation", `@*T`}, + {"Operation", `@*struct{}`}, + + {"ArrayType", `@[10]T`}, + {"ArrayType", `@[...]T`}, + + {"SliceType", `@[]T`}, + {"DotsType", `@...T`}, + {"StructType", `@struct{}`}, + {"InterfaceType", `@interface{}`}, + {"FuncType", `func@()`}, + {"MapType", `@map[T]T`}, + + {"ChanType", `@chan T`}, + {"ChanType", `@chan<- T`}, + {"ChanType", `@<-chan T`}, +} + +var fields = []test{ + {"Field", `@T`}, + {"Field", `@(T)`}, + {"Field", `@x T`}, + {"Field", `@x *(T)`}, + {"Field", `@x, y, z T`}, + {"Field", `@x, y, z (*T)`}, +} + +var stmts = []test{ + {"EmptyStmt", `@`}, + + {"LabeledStmt", `L@:`}, + {"LabeledStmt", `L@: ;`}, + {"LabeledStmt", `L@: f()`}, + + {"BlockStmt", `@{}`}, + + // The position of an ExprStmt is the position of the expression. + {"ExprStmt", `@<-ch`}, + {"ExprStmt", `f@()`}, + {"ExprStmt", `append@(s, 1, 2, 3)`}, + + {"SendStmt", `ch @<- x`}, + + {"DeclStmt", `@const x = 0`}, + {"DeclStmt", `@const (x = 0)`}, + {"DeclStmt", `@type T int`}, + {"DeclStmt", `@type T = int`}, + {"DeclStmt", `@type (T1 = int; T2 = float32)`}, + {"DeclStmt", `@var x = 0`}, + {"DeclStmt", `@var x, y, z int`}, + {"DeclStmt", `@var (a, b = 1, 2)`}, + + {"AssignStmt", `x @= y`}, + {"AssignStmt", `a, b, x @= 1, 2, 3`}, + {"AssignStmt", `x @+= y`}, + {"AssignStmt", `x @:= y`}, + {"AssignStmt", `x, ok @:= f()`}, + {"AssignStmt", `x@++`}, + {"AssignStmt", `a[i]@--`}, + + {"BranchStmt", `@break`}, + {"BranchStmt", `@break L`}, + {"BranchStmt", `@continue`}, + {"BranchStmt", `@continue L`}, + {"BranchStmt", `@fallthrough`}, + {"BranchStmt", `@goto L`}, + + {"CallStmt", `@defer f()`}, + {"CallStmt", `@go f()`}, + + {"ReturnStmt", `@return`}, + {"ReturnStmt", `@return x`}, + {"ReturnStmt", `@return a, b, a + b*f(1, 2, 3)`}, + + {"IfStmt", `@if cond {}`}, + {"IfStmt", `@if cond { f() } else {}`}, + {"IfStmt", `@if cond { f() } else { g(); h() }`}, + {"ForStmt", `@for {}`}, + {"ForStmt", `@for { f() }`}, + {"SwitchStmt", `@switch {}`}, + {"SwitchStmt", `@switch { default: }`}, + {"SwitchStmt", `@switch { default: x++ }`}, + {"SelectStmt", `@select {}`}, + {"SelectStmt", `@select { default: }`}, + {"SelectStmt", `@select { default: ch <- false }`}, +} + +var ranges = []test{ + {"RangeClause", `@range s`}, + {"RangeClause", `i = @range s`}, + {"RangeClause", `i := @range s`}, + {"RangeClause", `_, x = @range s`}, + {"RangeClause", `i, x = @range s`}, + {"RangeClause", `_, x := @range s.f`}, + {"RangeClause", `i, x := @range f(i)`}, +} + +var guards = []test{ + {"TypeSwitchGuard", `x@.(type)`}, + {"TypeSwitchGuard", `x := x@.(type)`}, +} + +var cases = []test{ + {"CaseClause", `@case x:`}, + {"CaseClause", `@case x, y, z:`}, + {"CaseClause", `@case x == 1, y == 2:`}, + {"CaseClause", `@default:`}, +} + +var comms = []test{ + {"CommClause", `@case <-ch:`}, + {"CommClause", `@case x <- ch:`}, + {"CommClause", `@case x = <-ch:`}, + {"CommClause", `@case x := <-ch:`}, + {"CommClause", `@case x, ok = <-ch: f(1, 2, 3)`}, + {"CommClause", `@case x, ok := <-ch: x++`}, + {"CommClause", `@default:`}, + {"CommClause", `@default: ch <- true`}, +} + +func TestPos(t *testing.T) { + // TODO(gri) Once we have a general tree walker, we can use that to find + // the first occurrence of the respective node and we don't need to hand- + // extract the node for each specific kind of construct. + + testPos(t, decls, "package p; ", "", + func(f *File) Node { return f.DeclList[0] }, + ) + + // embed expressions in a composite literal so we can test key:value and naked composite literals + testPos(t, exprs, "package p; var _ = T{ ", " }", + func(f *File) Node { return f.DeclList[0].(*VarDecl).Values.(*CompositeLit).ElemList[0] }, + ) + + // embed types in a function signature so we can test ... types + testPos(t, types, "package p; func f(", ")", + func(f *File) Node { return f.DeclList[0].(*FuncDecl).Type.ParamList[0].Type }, + ) + + testPos(t, fields, "package p; func f(", ")", + func(f *File) Node { return f.DeclList[0].(*FuncDecl).Type.ParamList[0] }, + ) + + testPos(t, stmts, "package p; func _() { ", "; }", + func(f *File) Node { return f.DeclList[0].(*FuncDecl).Body.List[0] }, + ) + + testPos(t, ranges, "package p; func _() { for ", " {} }", + func(f *File) Node { return f.DeclList[0].(*FuncDecl).Body.List[0].(*ForStmt).Init.(*RangeClause) }, + ) + + testPos(t, guards, "package p; func _() { switch ", " {} }", + func(f *File) Node { return f.DeclList[0].(*FuncDecl).Body.List[0].(*SwitchStmt).Tag.(*TypeSwitchGuard) }, + ) + + testPos(t, cases, "package p; func _() { switch { ", " } }", + func(f *File) Node { return f.DeclList[0].(*FuncDecl).Body.List[0].(*SwitchStmt).Body[0] }, + ) + + testPos(t, comms, "package p; func _() { select { ", " } }", + func(f *File) Node { return f.DeclList[0].(*FuncDecl).Body.List[0].(*SelectStmt).Body[0] }, + ) +} + +func testPos(t *testing.T, list []test, prefix, suffix string, extract func(*File) Node) { + for _, test := range list { + // complete source, compute @ position, and strip @ from source + src, index := stripAt(prefix + test.snippet + suffix) + if index < 0 { + t.Errorf("missing @: %s (%s)", src, test.nodetyp) + continue + } + + // build syntax tree + file, err := Parse(nil, strings.NewReader(src), nil, nil, 0) + if err != nil { + t.Errorf("parse error: %s: %v (%s)", src, err, test.nodetyp) + continue + } + + // extract desired node + node := extract(file) + if typ := typeOf(node); typ != test.nodetyp { + t.Errorf("type error: %s: type = %s, want %s", src, typ, test.nodetyp) + continue + } + + // verify node position with expected position as indicated by @ + if pos := int(node.Pos().Col()); pos != index+colbase { + t.Errorf("pos error: %s: pos = %d, want %d (%s)", src, pos, index+colbase, test.nodetyp) + continue + } + } +} + +func stripAt(s string) (string, int) { + if i := strings.Index(s, "@"); i >= 0 { + return s[:i] + s[i+1:], i + } + return s, -1 +} + +func typeOf(n Node) string { + const prefix = "*syntax." + k := fmt.Sprintf("%T", n) + return strings.TrimPrefix(k, prefix) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/operator_string.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/operator_string.go new file mode 100644 index 0000000000000000000000000000000000000000..f045d8c55243ea307922e2372d0deae119b43207 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/operator_string.go @@ -0,0 +1,46 @@ +// Code generated by "stringer -type Operator -linecomment tokens.go"; DO NOT EDIT. + +package syntax + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Def-1] + _ = x[Not-2] + _ = x[Recv-3] + _ = x[Tilde-4] + _ = x[OrOr-5] + _ = x[AndAnd-6] + _ = x[Eql-7] + _ = x[Neq-8] + _ = x[Lss-9] + _ = x[Leq-10] + _ = x[Gtr-11] + _ = x[Geq-12] + _ = x[Add-13] + _ = x[Sub-14] + _ = x[Or-15] + _ = x[Xor-16] + _ = x[Mul-17] + _ = x[Div-18] + _ = x[Rem-19] + _ = x[And-20] + _ = x[AndNot-21] + _ = x[Shl-22] + _ = x[Shr-23] +} + +const _Operator_name = ":!<-~||&&==!=<<=>>=+-|^*/%&&^<<>>" + +var _Operator_index = [...]uint8{0, 1, 2, 4, 5, 7, 9, 11, 13, 14, 16, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 29, 31, 33} + +func (i Operator) String() string { + i -= 1 + if i >= Operator(len(_Operator_index)-1) { + return "Operator(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _Operator_name[_Operator_index[i]:_Operator_index[i+1]] +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/parser.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/parser.go new file mode 100644 index 0000000000000000000000000000000000000000..1569b5e9872e1eab970c56ff81cd3675898319dc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/parser.go @@ -0,0 +1,2849 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syntax + +import ( + "fmt" + "go/build/constraint" + "io" + "strconv" + "strings" +) + +const debug = false +const trace = false + +type parser struct { + file *PosBase + errh ErrorHandler + mode Mode + pragh PragmaHandler + scanner + + base *PosBase // current position base + first error // first error encountered + errcnt int // number of errors encountered + pragma Pragma // pragmas + goVersion string // Go version from //go:build line + + top bool // in top of file (before package clause) + fnest int // function nesting level (for error handling) + xnest int // expression nesting level (for complit ambiguity resolution) + indent []byte // tracing support +} + +func (p *parser) init(file *PosBase, r io.Reader, errh ErrorHandler, pragh PragmaHandler, mode Mode) { + p.top = true + p.file = file + p.errh = errh + p.mode = mode + p.pragh = pragh + p.scanner.init( + r, + // Error and directive handler for scanner. + // Because the (line, col) positions passed to the + // handler is always at or after the current reading + // position, it is safe to use the most recent position + // base to compute the corresponding Pos value. + func(line, col uint, msg string) { + if msg[0] != '/' { + p.errorAt(p.posAt(line, col), msg) + return + } + + // otherwise it must be a comment containing a line or go: directive. + // //line directives must be at the start of the line (column colbase). + // /*line*/ directives can be anywhere in the line. + text := commentText(msg) + if (col == colbase || msg[1] == '*') && strings.HasPrefix(text, "line ") { + var pos Pos // position immediately following the comment + if msg[1] == '/' { + // line comment (newline is part of the comment) + pos = MakePos(p.file, line+1, colbase) + } else { + // regular comment + // (if the comment spans multiple lines it's not + // a valid line directive and will be discarded + // by updateBase) + pos = MakePos(p.file, line, col+uint(len(msg))) + } + p.updateBase(pos, line, col+2+5, text[5:]) // +2 to skip over // or /* + return + } + + // go: directive (but be conservative and test) + if strings.HasPrefix(text, "go:") { + if p.top && strings.HasPrefix(msg, "//go:build") { + if x, err := constraint.Parse(msg); err == nil { + p.goVersion = constraint.GoVersion(x) + } + } + if pragh != nil { + p.pragma = pragh(p.posAt(line, col+2), p.scanner.blank, text, p.pragma) // +2 to skip over // or /* + } + } + }, + directives, + ) + + p.base = file + p.first = nil + p.errcnt = 0 + p.pragma = nil + + p.fnest = 0 + p.xnest = 0 + p.indent = nil +} + +// takePragma returns the current parsed pragmas +// and clears them from the parser state. +func (p *parser) takePragma() Pragma { + prag := p.pragma + p.pragma = nil + return prag +} + +// clearPragma is called at the end of a statement or +// other Go form that does NOT accept a pragma. +// It sends the pragma back to the pragma handler +// to be reported as unused. +func (p *parser) clearPragma() { + if p.pragma != nil { + p.pragh(p.pos(), p.scanner.blank, "", p.pragma) + p.pragma = nil + } +} + +// updateBase sets the current position base to a new line base at pos. +// The base's filename, line, and column values are extracted from text +// which is positioned at (tline, tcol) (only needed for error messages). +func (p *parser) updateBase(pos Pos, tline, tcol uint, text string) { + i, n, ok := trailingDigits(text) + if i == 0 { + return // ignore (not a line directive) + } + // i > 0 + + if !ok { + // text has a suffix :xxx but xxx is not a number + p.errorAt(p.posAt(tline, tcol+i), "invalid line number: "+text[i:]) + return + } + + var line, col uint + i2, n2, ok2 := trailingDigits(text[:i-1]) + if ok2 { + //line filename:line:col + i, i2 = i2, i + line, col = n2, n + if col == 0 || col > PosMax { + p.errorAt(p.posAt(tline, tcol+i2), "invalid column number: "+text[i2:]) + return + } + text = text[:i2-1] // lop off ":col" + } else { + //line filename:line + line = n + } + + if line == 0 || line > PosMax { + p.errorAt(p.posAt(tline, tcol+i), "invalid line number: "+text[i:]) + return + } + + // If we have a column (//line filename:line:col form), + // an empty filename means to use the previous filename. + filename := text[:i-1] // lop off ":line" + trimmed := false + if filename == "" && ok2 { + filename = p.base.Filename() + trimmed = p.base.Trimmed() + } + + p.base = NewLineBase(pos, filename, trimmed, line, col) +} + +func commentText(s string) string { + if s[:2] == "/*" { + return s[2 : len(s)-2] // lop off /* and */ + } + + // line comment (does not include newline) + // (on Windows, the line comment may end in \r\n) + i := len(s) + if s[i-1] == '\r' { + i-- + } + return s[2:i] // lop off //, and \r at end, if any +} + +func trailingDigits(text string) (uint, uint, bool) { + i := strings.LastIndexByte(text, ':') // look from right (Windows filenames may contain ':') + if i < 0 { + return 0, 0, false // no ':' + } + // i >= 0 + n, err := strconv.ParseUint(text[i+1:], 10, 0) + return uint(i + 1), uint(n), err == nil +} + +func (p *parser) got(tok token) bool { + if p.tok == tok { + p.next() + return true + } + return false +} + +func (p *parser) want(tok token) { + if !p.got(tok) { + p.syntaxError("expected " + tokstring(tok)) + p.advance() + } +} + +// gotAssign is like got(_Assign) but it also accepts ":=" +// (and reports an error) for better parser error recovery. +func (p *parser) gotAssign() bool { + switch p.tok { + case _Define: + p.syntaxError("expected =") + fallthrough + case _Assign: + p.next() + return true + } + return false +} + +// ---------------------------------------------------------------------------- +// Error handling + +// posAt returns the Pos value for (line, col) and the current position base. +func (p *parser) posAt(line, col uint) Pos { + return MakePos(p.base, line, col) +} + +// errorAt reports an error at the given position. +func (p *parser) errorAt(pos Pos, msg string) { + err := Error{pos, msg} + if p.first == nil { + p.first = err + } + p.errcnt++ + if p.errh == nil { + panic(p.first) + } + p.errh(err) +} + +// syntaxErrorAt reports a syntax error at the given position. +func (p *parser) syntaxErrorAt(pos Pos, msg string) { + if trace { + p.print("syntax error: " + msg) + } + + if p.tok == _EOF && p.first != nil { + return // avoid meaningless follow-up errors + } + + // add punctuation etc. as needed to msg + switch { + case msg == "": + // nothing to do + case strings.HasPrefix(msg, "in "), strings.HasPrefix(msg, "at "), strings.HasPrefix(msg, "after "): + msg = " " + msg + case strings.HasPrefix(msg, "expected "): + msg = ", " + msg + default: + // plain error - we don't care about current token + p.errorAt(pos, "syntax error: "+msg) + return + } + + // determine token string + var tok string + switch p.tok { + case _Name, _Semi: + tok = p.lit + case _Literal: + tok = "literal " + p.lit + case _Operator: + tok = p.op.String() + case _AssignOp: + tok = p.op.String() + "=" + case _IncOp: + tok = p.op.String() + tok += tok + default: + tok = tokstring(p.tok) + } + + // TODO(gri) This may print "unexpected X, expected Y". + // Consider "got X, expected Y" in this case. + p.errorAt(pos, "syntax error: unexpected "+tok+msg) +} + +// tokstring returns the English word for selected punctuation tokens +// for more readable error messages. Use tokstring (not tok.String()) +// for user-facing (error) messages; use tok.String() for debugging +// output. +func tokstring(tok token) string { + switch tok { + case _Comma: + return "comma" + case _Semi: + return "semicolon or newline" + } + return tok.String() +} + +// Convenience methods using the current token position. +func (p *parser) pos() Pos { return p.posAt(p.line, p.col) } +func (p *parser) error(msg string) { p.errorAt(p.pos(), msg) } +func (p *parser) syntaxError(msg string) { p.syntaxErrorAt(p.pos(), msg) } + +// The stopset contains keywords that start a statement. +// They are good synchronization points in case of syntax +// errors and (usually) shouldn't be skipped over. +const stopset uint64 = 1<<_Break | + 1<<_Const | + 1<<_Continue | + 1<<_Defer | + 1<<_Fallthrough | + 1<<_For | + 1<<_Go | + 1<<_Goto | + 1<<_If | + 1<<_Return | + 1<<_Select | + 1<<_Switch | + 1<<_Type | + 1<<_Var + +// advance consumes tokens until it finds a token of the stopset or followlist. +// The stopset is only considered if we are inside a function (p.fnest > 0). +// The followlist is the list of valid tokens that can follow a production; +// if it is empty, exactly one (non-EOF) token is consumed to ensure progress. +func (p *parser) advance(followlist ...token) { + if trace { + p.print(fmt.Sprintf("advance %s", followlist)) + } + + // compute follow set + // (not speed critical, advance is only called in error situations) + var followset uint64 = 1 << _EOF // don't skip over EOF + if len(followlist) > 0 { + if p.fnest > 0 { + followset |= stopset + } + for _, tok := range followlist { + followset |= 1 << tok + } + } + + for !contains(followset, p.tok) { + if trace { + p.print("skip " + p.tok.String()) + } + p.next() + if len(followlist) == 0 { + break + } + } + + if trace { + p.print("next " + p.tok.String()) + } +} + +// usage: defer p.trace(msg)() +func (p *parser) trace(msg string) func() { + p.print(msg + " (") + const tab = ". " + p.indent = append(p.indent, tab...) + return func() { + p.indent = p.indent[:len(p.indent)-len(tab)] + if x := recover(); x != nil { + panic(x) // skip print_trace + } + p.print(")") + } +} + +func (p *parser) print(msg string) { + fmt.Printf("%5d: %s%s\n", p.line, p.indent, msg) +} + +// ---------------------------------------------------------------------------- +// Package files +// +// Parse methods are annotated with matching Go productions as appropriate. +// The annotations are intended as guidelines only since a single Go grammar +// rule may be covered by multiple parse methods and vice versa. +// +// Excluding methods returning slices, parse methods named xOrNil may return +// nil; all others are expected to return a valid non-nil node. + +// SourceFile = PackageClause ";" { ImportDecl ";" } { TopLevelDecl ";" } . +func (p *parser) fileOrNil() *File { + if trace { + defer p.trace("file")() + } + + f := new(File) + f.pos = p.pos() + + // PackageClause + f.GoVersion = p.goVersion + p.top = false + if !p.got(_Package) { + p.syntaxError("package statement must be first") + return nil + } + f.Pragma = p.takePragma() + f.PkgName = p.name() + p.want(_Semi) + + // don't bother continuing if package clause has errors + if p.first != nil { + return nil + } + + // Accept import declarations anywhere for error tolerance, but complain. + // { ( ImportDecl | TopLevelDecl ) ";" } + prev := _Import + for p.tok != _EOF { + if p.tok == _Import && prev != _Import { + p.syntaxError("imports must appear before other declarations") + } + prev = p.tok + + switch p.tok { + case _Import: + p.next() + f.DeclList = p.appendGroup(f.DeclList, p.importDecl) + + case _Const: + p.next() + f.DeclList = p.appendGroup(f.DeclList, p.constDecl) + + case _Type: + p.next() + f.DeclList = p.appendGroup(f.DeclList, p.typeDecl) + + case _Var: + p.next() + f.DeclList = p.appendGroup(f.DeclList, p.varDecl) + + case _Func: + p.next() + if d := p.funcDeclOrNil(); d != nil { + f.DeclList = append(f.DeclList, d) + } + + default: + if p.tok == _Lbrace && len(f.DeclList) > 0 && isEmptyFuncDecl(f.DeclList[len(f.DeclList)-1]) { + // opening { of function declaration on next line + p.syntaxError("unexpected semicolon or newline before {") + } else { + p.syntaxError("non-declaration statement outside function body") + } + p.advance(_Import, _Const, _Type, _Var, _Func) + continue + } + + // Reset p.pragma BEFORE advancing to the next token (consuming ';') + // since comments before may set pragmas for the next function decl. + p.clearPragma() + + if p.tok != _EOF && !p.got(_Semi) { + p.syntaxError("after top level declaration") + p.advance(_Import, _Const, _Type, _Var, _Func) + } + } + // p.tok == _EOF + + p.clearPragma() + f.EOF = p.pos() + + return f +} + +func isEmptyFuncDecl(dcl Decl) bool { + f, ok := dcl.(*FuncDecl) + return ok && f.Body == nil +} + +// ---------------------------------------------------------------------------- +// Declarations + +// list parses a possibly empty, sep-separated list of elements, optionally +// followed by sep, and closed by close (or EOF). sep must be one of _Comma +// or _Semi, and close must be one of _Rparen, _Rbrace, or _Rbrack. +// +// For each list element, f is called. Specifically, unless we're at close +// (or EOF), f is called at least once. After f returns true, no more list +// elements are accepted. list returns the position of the closing token. +// +// list = [ f { sep f } [sep] ] close . +func (p *parser) list(context string, sep, close token, f func() bool) Pos { + if debug && (sep != _Comma && sep != _Semi || close != _Rparen && close != _Rbrace && close != _Rbrack) { + panic("invalid sep or close argument for list") + } + + done := false + for p.tok != _EOF && p.tok != close && !done { + done = f() + // sep is optional before close + if !p.got(sep) && p.tok != close { + p.syntaxError(fmt.Sprintf("in %s; possibly missing %s or %s", context, tokstring(sep), tokstring(close))) + p.advance(_Rparen, _Rbrack, _Rbrace) + if p.tok != close { + // position could be better but we had an error so we don't care + return p.pos() + } + } + } + + pos := p.pos() + p.want(close) + return pos +} + +// appendGroup(f) = f | "(" { f ";" } ")" . // ";" is optional before ")" +func (p *parser) appendGroup(list []Decl, f func(*Group) Decl) []Decl { + if p.tok == _Lparen { + g := new(Group) + p.clearPragma() + p.next() // must consume "(" after calling clearPragma! + p.list("grouped declaration", _Semi, _Rparen, func() bool { + if x := f(g); x != nil { + list = append(list, x) + } + return false + }) + } else { + if x := f(nil); x != nil { + list = append(list, x) + } + } + return list +} + +// ImportSpec = [ "." | PackageName ] ImportPath . +// ImportPath = string_lit . +func (p *parser) importDecl(group *Group) Decl { + if trace { + defer p.trace("importDecl")() + } + + d := new(ImportDecl) + d.pos = p.pos() + d.Group = group + d.Pragma = p.takePragma() + + switch p.tok { + case _Name: + d.LocalPkgName = p.name() + case _Dot: + d.LocalPkgName = NewName(p.pos(), ".") + p.next() + } + d.Path = p.oliteral() + if d.Path == nil { + p.syntaxError("missing import path") + p.advance(_Semi, _Rparen) + return d + } + if !d.Path.Bad && d.Path.Kind != StringLit { + p.syntaxErrorAt(d.Path.Pos(), "import path must be a string") + d.Path.Bad = true + } + // d.Path.Bad || d.Path.Kind == StringLit + + return d +} + +// ConstSpec = IdentifierList [ [ Type ] "=" ExpressionList ] . +func (p *parser) constDecl(group *Group) Decl { + if trace { + defer p.trace("constDecl")() + } + + d := new(ConstDecl) + d.pos = p.pos() + d.Group = group + d.Pragma = p.takePragma() + + d.NameList = p.nameList(p.name()) + if p.tok != _EOF && p.tok != _Semi && p.tok != _Rparen { + d.Type = p.typeOrNil() + if p.gotAssign() { + d.Values = p.exprList() + } + } + + return d +} + +// TypeSpec = identifier [ TypeParams ] [ "=" ] Type . +func (p *parser) typeDecl(group *Group) Decl { + if trace { + defer p.trace("typeDecl")() + } + + d := new(TypeDecl) + d.pos = p.pos() + d.Group = group + d.Pragma = p.takePragma() + + d.Name = p.name() + if p.tok == _Lbrack { + // d.Name "[" ... + // array/slice type or type parameter list + pos := p.pos() + p.next() + switch p.tok { + case _Name: + // We may have an array type or a type parameter list. + // In either case we expect an expression x (which may + // just be a name, or a more complex expression) which + // we can analyze further. + // + // A type parameter list may have a type bound starting + // with a "[" as in: P []E. In that case, simply parsing + // an expression would lead to an error: P[] is invalid. + // But since index or slice expressions are never constant + // and thus invalid array length expressions, if the name + // is followed by "[" it must be the start of an array or + // slice constraint. Only if we don't see a "[" do we + // need to parse a full expression. Notably, name <- x + // is not a concern because name <- x is a statement and + // not an expression. + var x Expr = p.name() + if p.tok != _Lbrack { + // To parse the expression starting with name, expand + // the call sequence we would get by passing in name + // to parser.expr, and pass in name to parser.pexpr. + p.xnest++ + x = p.binaryExpr(p.pexpr(x, false), 0) + p.xnest-- + } + // Analyze expression x. If we can split x into a type parameter + // name, possibly followed by a type parameter type, we consider + // this the start of a type parameter list, with some caveats: + // a single name followed by "]" tilts the decision towards an + // array declaration; a type parameter type that could also be + // an ordinary expression but which is followed by a comma tilts + // the decision towards a type parameter list. + if pname, ptype := extractName(x, p.tok == _Comma); pname != nil && (ptype != nil || p.tok != _Rbrack) { + // d.Name "[" pname ... + // d.Name "[" pname ptype ... + // d.Name "[" pname ptype "," ... + d.TParamList = p.paramList(pname, ptype, _Rbrack, true) // ptype may be nil + d.Alias = p.gotAssign() + d.Type = p.typeOrNil() + } else { + // d.Name "[" pname "]" ... + // d.Name "[" x ... + d.Type = p.arrayType(pos, x) + } + case _Rbrack: + // d.Name "[" "]" ... + p.next() + d.Type = p.sliceType(pos) + default: + // d.Name "[" ... + d.Type = p.arrayType(pos, nil) + } + } else { + d.Alias = p.gotAssign() + d.Type = p.typeOrNil() + } + + if d.Type == nil { + d.Type = p.badExpr() + p.syntaxError("in type declaration") + p.advance(_Semi, _Rparen) + } + + return d +} + +// extractName splits the expression x into (name, expr) if syntactically +// x can be written as name expr. The split only happens if expr is a type +// element (per the isTypeElem predicate) or if force is set. +// If x is just a name, the result is (name, nil). If the split succeeds, +// the result is (name, expr). Otherwise the result is (nil, x). +// Examples: +// +// x force name expr +// ------------------------------------ +// P*[]int T/F P *[]int +// P*E T P *E +// P*E F nil P*E +// P([]int) T/F P []int +// P(E) T P E +// P(E) F nil P(E) +// P*E|F|~G T/F P *E|F|~G +// P*E|F|G T P *E|F|G +// P*E|F|G F nil P*E|F|G +func extractName(x Expr, force bool) (*Name, Expr) { + switch x := x.(type) { + case *Name: + return x, nil + case *Operation: + if x.Y == nil { + break // unary expr + } + switch x.Op { + case Mul: + if name, _ := x.X.(*Name); name != nil && (force || isTypeElem(x.Y)) { + // x = name *x.Y + op := *x + op.X, op.Y = op.Y, nil // change op into unary *op.Y + return name, &op + } + case Or: + if name, lhs := extractName(x.X, force || isTypeElem(x.Y)); name != nil && lhs != nil { + // x = name lhs|x.Y + op := *x + op.X = lhs + return name, &op + } + } + case *CallExpr: + if name, _ := x.Fun.(*Name); name != nil { + if len(x.ArgList) == 1 && !x.HasDots && (force || isTypeElem(x.ArgList[0])) { + // x = name "(" x.ArgList[0] ")" + return name, x.ArgList[0] + } + } + } + return nil, x +} + +// isTypeElem reports whether x is a (possibly parenthesized) type element expression. +// The result is false if x could be a type element OR an ordinary (value) expression. +func isTypeElem(x Expr) bool { + switch x := x.(type) { + case *ArrayType, *StructType, *FuncType, *InterfaceType, *SliceType, *MapType, *ChanType: + return true + case *Operation: + return isTypeElem(x.X) || (x.Y != nil && isTypeElem(x.Y)) || x.Op == Tilde + case *ParenExpr: + return isTypeElem(x.X) + } + return false +} + +// VarSpec = IdentifierList ( Type [ "=" ExpressionList ] | "=" ExpressionList ) . +func (p *parser) varDecl(group *Group) Decl { + if trace { + defer p.trace("varDecl")() + } + + d := new(VarDecl) + d.pos = p.pos() + d.Group = group + d.Pragma = p.takePragma() + + d.NameList = p.nameList(p.name()) + if p.gotAssign() { + d.Values = p.exprList() + } else { + d.Type = p.type_() + if p.gotAssign() { + d.Values = p.exprList() + } + } + + return d +} + +// FunctionDecl = "func" FunctionName [ TypeParams ] ( Function | Signature ) . +// FunctionName = identifier . +// Function = Signature FunctionBody . +// MethodDecl = "func" Receiver MethodName ( Function | Signature ) . +// Receiver = Parameters . +func (p *parser) funcDeclOrNil() *FuncDecl { + if trace { + defer p.trace("funcDecl")() + } + + f := new(FuncDecl) + f.pos = p.pos() + f.Pragma = p.takePragma() + + var context string + if p.got(_Lparen) { + context = "method" + rcvr := p.paramList(nil, nil, _Rparen, false) + switch len(rcvr) { + case 0: + p.error("method has no receiver") + default: + p.error("method has multiple receivers") + fallthrough + case 1: + f.Recv = rcvr[0] + } + } + + if p.tok == _Name { + f.Name = p.name() + f.TParamList, f.Type = p.funcType(context) + } else { + f.Name = NewName(p.pos(), "_") + f.Type = new(FuncType) + f.Type.pos = p.pos() + msg := "expected name or (" + if context != "" { + msg = "expected name" + } + p.syntaxError(msg) + p.advance(_Lbrace, _Semi) + } + + if p.tok == _Lbrace { + f.Body = p.funcBody() + } + + return f +} + +func (p *parser) funcBody() *BlockStmt { + p.fnest++ + errcnt := p.errcnt + body := p.blockStmt("") + p.fnest-- + + // Don't check branches if there were syntax errors in the function + // as it may lead to spurious errors (e.g., see test/switch2.go) or + // possibly crashes due to incomplete syntax trees. + if p.mode&CheckBranches != 0 && errcnt == p.errcnt { + checkBranches(body, p.errh) + } + + return body +} + +// ---------------------------------------------------------------------------- +// Expressions + +func (p *parser) expr() Expr { + if trace { + defer p.trace("expr")() + } + + return p.binaryExpr(nil, 0) +} + +// Expression = UnaryExpr | Expression binary_op Expression . +func (p *parser) binaryExpr(x Expr, prec int) Expr { + // don't trace binaryExpr - only leads to overly nested trace output + + if x == nil { + x = p.unaryExpr() + } + for (p.tok == _Operator || p.tok == _Star) && p.prec > prec { + t := new(Operation) + t.pos = p.pos() + t.Op = p.op + tprec := p.prec + p.next() + t.X = x + t.Y = p.binaryExpr(nil, tprec) + x = t + } + return x +} + +// UnaryExpr = PrimaryExpr | unary_op UnaryExpr . +func (p *parser) unaryExpr() Expr { + if trace { + defer p.trace("unaryExpr")() + } + + switch p.tok { + case _Operator, _Star: + switch p.op { + case Mul, Add, Sub, Not, Xor, Tilde: + x := new(Operation) + x.pos = p.pos() + x.Op = p.op + p.next() + x.X = p.unaryExpr() + return x + + case And: + x := new(Operation) + x.pos = p.pos() + x.Op = And + p.next() + // unaryExpr may have returned a parenthesized composite literal + // (see comment in operand) - remove parentheses if any + x.X = Unparen(p.unaryExpr()) + return x + } + + case _Arrow: + // receive op (<-x) or receive-only channel (<-chan E) + pos := p.pos() + p.next() + + // If the next token is _Chan we still don't know if it is + // a channel (<-chan int) or a receive op (<-chan int(ch)). + // We only know once we have found the end of the unaryExpr. + + x := p.unaryExpr() + + // There are two cases: + // + // <-chan... => <-x is a channel type + // <-x => <-x is a receive operation + // + // In the first case, <- must be re-associated with + // the channel type parsed already: + // + // <-(chan E) => (<-chan E) + // <-(chan<-E) => (<-chan (<-E)) + + if _, ok := x.(*ChanType); ok { + // x is a channel type => re-associate <- + dir := SendOnly + t := x + for dir == SendOnly { + c, ok := t.(*ChanType) + if !ok { + break + } + dir = c.Dir + if dir == RecvOnly { + // t is type <-chan E but <-<-chan E is not permitted + // (report same error as for "type _ <-<-chan E") + p.syntaxError("unexpected <-, expected chan") + // already progressed, no need to advance + } + c.Dir = RecvOnly + t = c.Elem + } + if dir == SendOnly { + // channel dir is <- but channel element E is not a channel + // (report same error as for "type _ <-chan<-E") + p.syntaxError(fmt.Sprintf("unexpected %s, expected chan", String(t))) + // already progressed, no need to advance + } + return x + } + + // x is not a channel type => we have a receive op + o := new(Operation) + o.pos = pos + o.Op = Recv + o.X = x + return o + } + + // TODO(mdempsky): We need parens here so we can report an + // error for "(x) := true". It should be possible to detect + // and reject that more efficiently though. + return p.pexpr(nil, true) +} + +// callStmt parses call-like statements that can be preceded by 'defer' and 'go'. +func (p *parser) callStmt() *CallStmt { + if trace { + defer p.trace("callStmt")() + } + + s := new(CallStmt) + s.pos = p.pos() + s.Tok = p.tok // _Defer or _Go + p.next() + + x := p.pexpr(nil, p.tok == _Lparen) // keep_parens so we can report error below + if t := Unparen(x); t != x { + p.errorAt(x.Pos(), fmt.Sprintf("expression in %s must not be parenthesized", s.Tok)) + // already progressed, no need to advance + x = t + } + + s.Call = x + return s +} + +// Operand = Literal | OperandName | MethodExpr | "(" Expression ")" . +// Literal = BasicLit | CompositeLit | FunctionLit . +// BasicLit = int_lit | float_lit | imaginary_lit | rune_lit | string_lit . +// OperandName = identifier | QualifiedIdent. +func (p *parser) operand(keep_parens bool) Expr { + if trace { + defer p.trace("operand " + p.tok.String())() + } + + switch p.tok { + case _Name: + return p.name() + + case _Literal: + return p.oliteral() + + case _Lparen: + pos := p.pos() + p.next() + p.xnest++ + x := p.expr() + p.xnest-- + p.want(_Rparen) + + // Optimization: Record presence of ()'s only where needed + // for error reporting. Don't bother in other cases; it is + // just a waste of memory and time. + // + // Parentheses are not permitted around T in a composite + // literal T{}. If the next token is a {, assume x is a + // composite literal type T (it may not be, { could be + // the opening brace of a block, but we don't know yet). + if p.tok == _Lbrace { + keep_parens = true + } + + // Parentheses are also not permitted around the expression + // in a go/defer statement. In that case, operand is called + // with keep_parens set. + if keep_parens { + px := new(ParenExpr) + px.pos = pos + px.X = x + x = px + } + return x + + case _Func: + pos := p.pos() + p.next() + _, ftyp := p.funcType("function type") + if p.tok == _Lbrace { + p.xnest++ + + f := new(FuncLit) + f.pos = pos + f.Type = ftyp + f.Body = p.funcBody() + + p.xnest-- + return f + } + return ftyp + + case _Lbrack, _Chan, _Map, _Struct, _Interface: + return p.type_() // othertype + + default: + x := p.badExpr() + p.syntaxError("expected expression") + p.advance(_Rparen, _Rbrack, _Rbrace) + return x + } + + // Syntactically, composite literals are operands. Because a complit + // type may be a qualified identifier which is handled by pexpr + // (together with selector expressions), complits are parsed there + // as well (operand is only called from pexpr). +} + +// pexpr parses a PrimaryExpr. +// +// PrimaryExpr = +// Operand | +// Conversion | +// PrimaryExpr Selector | +// PrimaryExpr Index | +// PrimaryExpr Slice | +// PrimaryExpr TypeAssertion | +// PrimaryExpr Arguments . +// +// Selector = "." identifier . +// Index = "[" Expression "]" . +// Slice = "[" ( [ Expression ] ":" [ Expression ] ) | +// ( [ Expression ] ":" Expression ":" Expression ) +// "]" . +// TypeAssertion = "." "(" Type ")" . +// Arguments = "(" [ ( ExpressionList | Type [ "," ExpressionList ] ) [ "..." ] [ "," ] ] ")" . +func (p *parser) pexpr(x Expr, keep_parens bool) Expr { + if trace { + defer p.trace("pexpr")() + } + + if x == nil { + x = p.operand(keep_parens) + } + +loop: + for { + pos := p.pos() + switch p.tok { + case _Dot: + p.next() + switch p.tok { + case _Name: + // pexpr '.' sym + t := new(SelectorExpr) + t.pos = pos + t.X = x + t.Sel = p.name() + x = t + + case _Lparen: + p.next() + if p.got(_Type) { + t := new(TypeSwitchGuard) + // t.Lhs is filled in by parser.simpleStmt + t.pos = pos + t.X = x + x = t + } else { + t := new(AssertExpr) + t.pos = pos + t.X = x + t.Type = p.type_() + x = t + } + p.want(_Rparen) + + default: + p.syntaxError("expected name or (") + p.advance(_Semi, _Rparen) + } + + case _Lbrack: + p.next() + + var i Expr + if p.tok != _Colon { + var comma bool + if p.tok == _Rbrack { + // invalid empty instance, slice or index expression; accept but complain + p.syntaxError("expected operand") + i = p.badExpr() + } else { + i, comma = p.typeList(false) + } + if comma || p.tok == _Rbrack { + p.want(_Rbrack) + // x[], x[i,] or x[i, j, ...] + t := new(IndexExpr) + t.pos = pos + t.X = x + t.Index = i + x = t + break + } + } + + // x[i:... + // For better error message, don't simply use p.want(_Colon) here (go.dev/issue/47704). + if !p.got(_Colon) { + p.syntaxError("expected comma, : or ]") + p.advance(_Comma, _Colon, _Rbrack) + } + p.xnest++ + t := new(SliceExpr) + t.pos = pos + t.X = x + t.Index[0] = i + if p.tok != _Colon && p.tok != _Rbrack { + // x[i:j... + t.Index[1] = p.expr() + } + if p.tok == _Colon { + t.Full = true + // x[i:j:...] + if t.Index[1] == nil { + p.error("middle index required in 3-index slice") + t.Index[1] = p.badExpr() + } + p.next() + if p.tok != _Rbrack { + // x[i:j:k... + t.Index[2] = p.expr() + } else { + p.error("final index required in 3-index slice") + t.Index[2] = p.badExpr() + } + } + p.xnest-- + p.want(_Rbrack) + x = t + + case _Lparen: + t := new(CallExpr) + t.pos = pos + p.next() + t.Fun = x + t.ArgList, t.HasDots = p.argList() + x = t + + case _Lbrace: + // operand may have returned a parenthesized complit + // type; accept it but complain if we have a complit + t := Unparen(x) + // determine if '{' belongs to a composite literal or a block statement + complit_ok := false + switch t.(type) { + case *Name, *SelectorExpr: + if p.xnest >= 0 { + // x is possibly a composite literal type + complit_ok = true + } + case *IndexExpr: + if p.xnest >= 0 && !isValue(t) { + // x is possibly a composite literal type + complit_ok = true + } + case *ArrayType, *SliceType, *StructType, *MapType: + // x is a comptype + complit_ok = true + } + if !complit_ok { + break loop + } + if t != x { + p.syntaxError("cannot parenthesize type in composite literal") + // already progressed, no need to advance + } + n := p.complitexpr() + n.Type = x + x = n + + default: + break loop + } + } + + return x +} + +// isValue reports whether x syntactically must be a value (and not a type) expression. +func isValue(x Expr) bool { + switch x := x.(type) { + case *BasicLit, *CompositeLit, *FuncLit, *SliceExpr, *AssertExpr, *TypeSwitchGuard, *CallExpr: + return true + case *Operation: + return x.Op != Mul || x.Y != nil // *T may be a type + case *ParenExpr: + return isValue(x.X) + case *IndexExpr: + return isValue(x.X) || isValue(x.Index) + } + return false +} + +// Element = Expression | LiteralValue . +func (p *parser) bare_complitexpr() Expr { + if trace { + defer p.trace("bare_complitexpr")() + } + + if p.tok == _Lbrace { + // '{' start_complit braced_keyval_list '}' + return p.complitexpr() + } + + return p.expr() +} + +// LiteralValue = "{" [ ElementList [ "," ] ] "}" . +func (p *parser) complitexpr() *CompositeLit { + if trace { + defer p.trace("complitexpr")() + } + + x := new(CompositeLit) + x.pos = p.pos() + + p.xnest++ + p.want(_Lbrace) + x.Rbrace = p.list("composite literal", _Comma, _Rbrace, func() bool { + // value + e := p.bare_complitexpr() + if p.tok == _Colon { + // key ':' value + l := new(KeyValueExpr) + l.pos = p.pos() + p.next() + l.Key = e + l.Value = p.bare_complitexpr() + e = l + x.NKeys++ + } + x.ElemList = append(x.ElemList, e) + return false + }) + p.xnest-- + + return x +} + +// ---------------------------------------------------------------------------- +// Types + +func (p *parser) type_() Expr { + if trace { + defer p.trace("type_")() + } + + typ := p.typeOrNil() + if typ == nil { + typ = p.badExpr() + p.syntaxError("expected type") + p.advance(_Comma, _Colon, _Semi, _Rparen, _Rbrack, _Rbrace) + } + + return typ +} + +func newIndirect(pos Pos, typ Expr) Expr { + o := new(Operation) + o.pos = pos + o.Op = Mul + o.X = typ + return o +} + +// typeOrNil is like type_ but it returns nil if there was no type +// instead of reporting an error. +// +// Type = TypeName | TypeLit | "(" Type ")" . +// TypeName = identifier | QualifiedIdent . +// TypeLit = ArrayType | StructType | PointerType | FunctionType | InterfaceType | +// SliceType | MapType | Channel_Type . +func (p *parser) typeOrNil() Expr { + if trace { + defer p.trace("typeOrNil")() + } + + pos := p.pos() + switch p.tok { + case _Star: + // ptrtype + p.next() + return newIndirect(pos, p.type_()) + + case _Arrow: + // recvchantype + p.next() + p.want(_Chan) + t := new(ChanType) + t.pos = pos + t.Dir = RecvOnly + t.Elem = p.chanElem() + return t + + case _Func: + // fntype + p.next() + _, t := p.funcType("function type") + return t + + case _Lbrack: + // '[' oexpr ']' ntype + // '[' _DotDotDot ']' ntype + p.next() + if p.got(_Rbrack) { + return p.sliceType(pos) + } + return p.arrayType(pos, nil) + + case _Chan: + // _Chan non_recvchantype + // _Chan _Comm ntype + p.next() + t := new(ChanType) + t.pos = pos + if p.got(_Arrow) { + t.Dir = SendOnly + } + t.Elem = p.chanElem() + return t + + case _Map: + // _Map '[' ntype ']' ntype + p.next() + p.want(_Lbrack) + t := new(MapType) + t.pos = pos + t.Key = p.type_() + p.want(_Rbrack) + t.Value = p.type_() + return t + + case _Struct: + return p.structType() + + case _Interface: + return p.interfaceType() + + case _Name: + return p.qualifiedName(nil) + + case _Lparen: + p.next() + t := p.type_() + p.want(_Rparen) + return t + } + + return nil +} + +func (p *parser) typeInstance(typ Expr) Expr { + if trace { + defer p.trace("typeInstance")() + } + + pos := p.pos() + p.want(_Lbrack) + x := new(IndexExpr) + x.pos = pos + x.X = typ + if p.tok == _Rbrack { + p.syntaxError("expected type argument list") + x.Index = p.badExpr() + } else { + x.Index, _ = p.typeList(true) + } + p.want(_Rbrack) + return x +} + +// If context != "", type parameters are not permitted. +func (p *parser) funcType(context string) ([]*Field, *FuncType) { + if trace { + defer p.trace("funcType")() + } + + typ := new(FuncType) + typ.pos = p.pos() + + var tparamList []*Field + if p.got(_Lbrack) { + if context != "" { + // accept but complain + p.syntaxErrorAt(typ.pos, context+" must have no type parameters") + } + if p.tok == _Rbrack { + p.syntaxError("empty type parameter list") + p.next() + } else { + tparamList = p.paramList(nil, nil, _Rbrack, true) + } + } + + p.want(_Lparen) + typ.ParamList = p.paramList(nil, nil, _Rparen, false) + typ.ResultList = p.funcResult() + + return tparamList, typ +} + +// "[" has already been consumed, and pos is its position. +// If len != nil it is the already consumed array length. +func (p *parser) arrayType(pos Pos, len Expr) Expr { + if trace { + defer p.trace("arrayType")() + } + + if len == nil && !p.got(_DotDotDot) { + p.xnest++ + len = p.expr() + p.xnest-- + } + if p.tok == _Comma { + // Trailing commas are accepted in type parameter + // lists but not in array type declarations. + // Accept for better error handling but complain. + p.syntaxError("unexpected comma; expected ]") + p.next() + } + p.want(_Rbrack) + t := new(ArrayType) + t.pos = pos + t.Len = len + t.Elem = p.type_() + return t +} + +// "[" and "]" have already been consumed, and pos is the position of "[". +func (p *parser) sliceType(pos Pos) Expr { + t := new(SliceType) + t.pos = pos + t.Elem = p.type_() + return t +} + +func (p *parser) chanElem() Expr { + if trace { + defer p.trace("chanElem")() + } + + typ := p.typeOrNil() + if typ == nil { + typ = p.badExpr() + p.syntaxError("missing channel element type") + // assume element type is simply absent - don't advance + } + + return typ +} + +// StructType = "struct" "{" { FieldDecl ";" } "}" . +func (p *parser) structType() *StructType { + if trace { + defer p.trace("structType")() + } + + typ := new(StructType) + typ.pos = p.pos() + + p.want(_Struct) + p.want(_Lbrace) + p.list("struct type", _Semi, _Rbrace, func() bool { + p.fieldDecl(typ) + return false + }) + + return typ +} + +// InterfaceType = "interface" "{" { ( MethodDecl | EmbeddedElem ) ";" } "}" . +func (p *parser) interfaceType() *InterfaceType { + if trace { + defer p.trace("interfaceType")() + } + + typ := new(InterfaceType) + typ.pos = p.pos() + + p.want(_Interface) + p.want(_Lbrace) + p.list("interface type", _Semi, _Rbrace, func() bool { + var f *Field + if p.tok == _Name { + f = p.methodDecl() + } + if f == nil || f.Name == nil { + f = p.embeddedElem(f) + } + typ.MethodList = append(typ.MethodList, f) + return false + }) + + return typ +} + +// Result = Parameters | Type . +func (p *parser) funcResult() []*Field { + if trace { + defer p.trace("funcResult")() + } + + if p.got(_Lparen) { + return p.paramList(nil, nil, _Rparen, false) + } + + pos := p.pos() + if typ := p.typeOrNil(); typ != nil { + f := new(Field) + f.pos = pos + f.Type = typ + return []*Field{f} + } + + return nil +} + +func (p *parser) addField(styp *StructType, pos Pos, name *Name, typ Expr, tag *BasicLit) { + if tag != nil { + for i := len(styp.FieldList) - len(styp.TagList); i > 0; i-- { + styp.TagList = append(styp.TagList, nil) + } + styp.TagList = append(styp.TagList, tag) + } + + f := new(Field) + f.pos = pos + f.Name = name + f.Type = typ + styp.FieldList = append(styp.FieldList, f) + + if debug && tag != nil && len(styp.FieldList) != len(styp.TagList) { + panic("inconsistent struct field list") + } +} + +// FieldDecl = (IdentifierList Type | AnonymousField) [ Tag ] . +// AnonymousField = [ "*" ] TypeName . +// Tag = string_lit . +func (p *parser) fieldDecl(styp *StructType) { + if trace { + defer p.trace("fieldDecl")() + } + + pos := p.pos() + switch p.tok { + case _Name: + name := p.name() + if p.tok == _Dot || p.tok == _Literal || p.tok == _Semi || p.tok == _Rbrace { + // embedded type + typ := p.qualifiedName(name) + tag := p.oliteral() + p.addField(styp, pos, nil, typ, tag) + break + } + + // name1, name2, ... Type [ tag ] + names := p.nameList(name) + var typ Expr + + // Careful dance: We don't know if we have an embedded instantiated + // type T[P1, P2, ...] or a field T of array/slice type [P]E or []E. + if len(names) == 1 && p.tok == _Lbrack { + typ = p.arrayOrTArgs() + if typ, ok := typ.(*IndexExpr); ok { + // embedded type T[P1, P2, ...] + typ.X = name // name == names[0] + tag := p.oliteral() + p.addField(styp, pos, nil, typ, tag) + break + } + } else { + // T P + typ = p.type_() + } + + tag := p.oliteral() + + for _, name := range names { + p.addField(styp, name.Pos(), name, typ, tag) + } + + case _Star: + p.next() + var typ Expr + if p.tok == _Lparen { + // *(T) + p.syntaxError("cannot parenthesize embedded type") + p.next() + typ = p.qualifiedName(nil) + p.got(_Rparen) // no need to complain if missing + } else { + // *T + typ = p.qualifiedName(nil) + } + tag := p.oliteral() + p.addField(styp, pos, nil, newIndirect(pos, typ), tag) + + case _Lparen: + p.syntaxError("cannot parenthesize embedded type") + p.next() + var typ Expr + if p.tok == _Star { + // (*T) + pos := p.pos() + p.next() + typ = newIndirect(pos, p.qualifiedName(nil)) + } else { + // (T) + typ = p.qualifiedName(nil) + } + p.got(_Rparen) // no need to complain if missing + tag := p.oliteral() + p.addField(styp, pos, nil, typ, tag) + + default: + p.syntaxError("expected field name or embedded type") + p.advance(_Semi, _Rbrace) + } +} + +func (p *parser) arrayOrTArgs() Expr { + if trace { + defer p.trace("arrayOrTArgs")() + } + + pos := p.pos() + p.want(_Lbrack) + if p.got(_Rbrack) { + return p.sliceType(pos) + } + + // x [n]E or x[n,], x[n1, n2], ... + n, comma := p.typeList(false) + p.want(_Rbrack) + if !comma { + if elem := p.typeOrNil(); elem != nil { + // x [n]E + t := new(ArrayType) + t.pos = pos + t.Len = n + t.Elem = elem + return t + } + } + + // x[n,], x[n1, n2], ... + t := new(IndexExpr) + t.pos = pos + // t.X will be filled in by caller + t.Index = n + return t +} + +func (p *parser) oliteral() *BasicLit { + if p.tok == _Literal { + b := new(BasicLit) + b.pos = p.pos() + b.Value = p.lit + b.Kind = p.kind + b.Bad = p.bad + p.next() + return b + } + return nil +} + +// MethodSpec = MethodName Signature | InterfaceTypeName . +// MethodName = identifier . +// InterfaceTypeName = TypeName . +func (p *parser) methodDecl() *Field { + if trace { + defer p.trace("methodDecl")() + } + + f := new(Field) + f.pos = p.pos() + name := p.name() + + const context = "interface method" + + switch p.tok { + case _Lparen: + // method + f.Name = name + _, f.Type = p.funcType(context) + + case _Lbrack: + // Careful dance: We don't know if we have a generic method m[T C](x T) + // or an embedded instantiated type T[P1, P2] (we accept generic methods + // for generality and robustness of parsing but complain with an error). + pos := p.pos() + p.next() + + // Empty type parameter or argument lists are not permitted. + // Treat as if [] were absent. + if p.tok == _Rbrack { + // name[] + pos := p.pos() + p.next() + if p.tok == _Lparen { + // name[]( + p.errorAt(pos, "empty type parameter list") + f.Name = name + _, f.Type = p.funcType(context) + } else { + p.errorAt(pos, "empty type argument list") + f.Type = name + } + break + } + + // A type argument list looks like a parameter list with only + // types. Parse a parameter list and decide afterwards. + list := p.paramList(nil, nil, _Rbrack, false) + if len(list) == 0 { + // The type parameter list is not [] but we got nothing + // due to other errors (reported by paramList). Treat + // as if [] were absent. + if p.tok == _Lparen { + f.Name = name + _, f.Type = p.funcType(context) + } else { + f.Type = name + } + break + } + + // len(list) > 0 + if list[0].Name != nil { + // generic method + f.Name = name + _, f.Type = p.funcType(context) + p.errorAt(pos, "interface method must have no type parameters") + break + } + + // embedded instantiated type + t := new(IndexExpr) + t.pos = pos + t.X = name + if len(list) == 1 { + t.Index = list[0].Type + } else { + // len(list) > 1 + l := new(ListExpr) + l.pos = list[0].Pos() + l.ElemList = make([]Expr, len(list)) + for i := range list { + l.ElemList[i] = list[i].Type + } + t.Index = l + } + f.Type = t + + default: + // embedded type + f.Type = p.qualifiedName(name) + } + + return f +} + +// EmbeddedElem = MethodSpec | EmbeddedTerm { "|" EmbeddedTerm } . +func (p *parser) embeddedElem(f *Field) *Field { + if trace { + defer p.trace("embeddedElem")() + } + + if f == nil { + f = new(Field) + f.pos = p.pos() + f.Type = p.embeddedTerm() + } + + for p.tok == _Operator && p.op == Or { + t := new(Operation) + t.pos = p.pos() + t.Op = Or + p.next() + t.X = f.Type + t.Y = p.embeddedTerm() + f.Type = t + } + + return f +} + +// EmbeddedTerm = [ "~" ] Type . +func (p *parser) embeddedTerm() Expr { + if trace { + defer p.trace("embeddedTerm")() + } + + if p.tok == _Operator && p.op == Tilde { + t := new(Operation) + t.pos = p.pos() + t.Op = Tilde + p.next() + t.X = p.type_() + return t + } + + t := p.typeOrNil() + if t == nil { + t = p.badExpr() + p.syntaxError("expected ~ term or type") + p.advance(_Operator, _Semi, _Rparen, _Rbrack, _Rbrace) + } + + return t +} + +// ParameterDecl = [ IdentifierList ] [ "..." ] Type . +func (p *parser) paramDeclOrNil(name *Name, follow token) *Field { + if trace { + defer p.trace("paramDeclOrNil")() + } + + // type set notation is ok in type parameter lists + typeSetsOk := follow == _Rbrack + + pos := p.pos() + if name != nil { + pos = name.pos + } else if typeSetsOk && p.tok == _Operator && p.op == Tilde { + // "~" ... + return p.embeddedElem(nil) + } + + f := new(Field) + f.pos = pos + + if p.tok == _Name || name != nil { + // name + if name == nil { + name = p.name() + } + + if p.tok == _Lbrack { + // name "[" ... + f.Type = p.arrayOrTArgs() + if typ, ok := f.Type.(*IndexExpr); ok { + // name "[" ... "]" + typ.X = name + } else { + // name "[" n "]" E + f.Name = name + } + if typeSetsOk && p.tok == _Operator && p.op == Or { + // name "[" ... "]" "|" ... + // name "[" n "]" E "|" ... + f = p.embeddedElem(f) + } + return f + } + + if p.tok == _Dot { + // name "." ... + f.Type = p.qualifiedName(name) + if typeSetsOk && p.tok == _Operator && p.op == Or { + // name "." name "|" ... + f = p.embeddedElem(f) + } + return f + } + + if typeSetsOk && p.tok == _Operator && p.op == Or { + // name "|" ... + f.Type = name + return p.embeddedElem(f) + } + + f.Name = name + } + + if p.tok == _DotDotDot { + // [name] "..." ... + t := new(DotsType) + t.pos = p.pos() + p.next() + t.Elem = p.typeOrNil() + if t.Elem == nil { + t.Elem = p.badExpr() + p.syntaxError("... is missing type") + } + f.Type = t + return f + } + + if typeSetsOk && p.tok == _Operator && p.op == Tilde { + // [name] "~" ... + f.Type = p.embeddedElem(nil).Type + return f + } + + f.Type = p.typeOrNil() + if typeSetsOk && p.tok == _Operator && p.op == Or && f.Type != nil { + // [name] type "|" + f = p.embeddedElem(f) + } + if f.Name != nil || f.Type != nil { + return f + } + + p.syntaxError("expected " + tokstring(follow)) + p.advance(_Comma, follow) + return nil +} + +// Parameters = "(" [ ParameterList [ "," ] ] ")" . +// ParameterList = ParameterDecl { "," ParameterDecl } . +// "(" or "[" has already been consumed. +// If name != nil, it is the first name after "(" or "[". +// If typ != nil, name must be != nil, and (name, typ) is the first field in the list. +// In the result list, either all fields have a name, or no field has a name. +func (p *parser) paramList(name *Name, typ Expr, close token, requireNames bool) (list []*Field) { + if trace { + defer p.trace("paramList")() + } + + // p.list won't invoke its function argument if we're at the end of the + // parameter list. If we have a complete field, handle this case here. + if name != nil && typ != nil && p.tok == close { + p.next() + par := new(Field) + par.pos = name.pos + par.Name = name + par.Type = typ + return []*Field{par} + } + + var named int // number of parameters that have an explicit name and type + var typed int // number of parameters that have an explicit type + end := p.list("parameter list", _Comma, close, func() bool { + var par *Field + if typ != nil { + if debug && name == nil { + panic("initial type provided without name") + } + par = new(Field) + par.pos = name.pos + par.Name = name + par.Type = typ + } else { + par = p.paramDeclOrNil(name, close) + } + name = nil // 1st name was consumed if present + typ = nil // 1st type was consumed if present + if par != nil { + if debug && par.Name == nil && par.Type == nil { + panic("parameter without name or type") + } + if par.Name != nil && par.Type != nil { + named++ + } + if par.Type != nil { + typed++ + } + list = append(list, par) + } + return false + }) + + if len(list) == 0 { + return + } + + // distribute parameter types (len(list) > 0) + if named == 0 && !requireNames { + // all unnamed and we're not in a type parameter list => found names are named types + for _, par := range list { + if typ := par.Name; typ != nil { + par.Type = typ + par.Name = nil + } + } + } else if named != len(list) { + // some named or we're in a type parameter list => all must be named + var errPos Pos // left-most error position (or unknown) + var typ Expr // current type (from right to left) + for i := len(list) - 1; i >= 0; i-- { + par := list[i] + if par.Type != nil { + typ = par.Type + if par.Name == nil { + errPos = StartPos(typ) + par.Name = NewName(errPos, "_") + } + } else if typ != nil { + par.Type = typ + } else { + // par.Type == nil && typ == nil => we only have a par.Name + errPos = par.Name.Pos() + t := p.badExpr() + t.pos = errPos // correct position + par.Type = t + } + } + if errPos.IsKnown() { + var msg string + if requireNames { + // Not all parameters are named because named != len(list). + // If named == typed we must have parameters that have no types, + // and they must be at the end of the parameter list, otherwise + // the types would have been filled in by the right-to-left sweep + // above and we wouldn't have an error. Since we are in a type + // parameter list, the missing types are constraints. + if named == typed { + errPos = end // position error at closing ] + msg = "missing type constraint" + } else { + msg = "missing type parameter name" + // go.dev/issue/60812 + if len(list) == 1 { + msg += " or invalid array length" + } + } + } else { + msg = "mixed named and unnamed parameters" + } + p.syntaxErrorAt(errPos, msg) + } + } + + return +} + +func (p *parser) badExpr() *BadExpr { + b := new(BadExpr) + b.pos = p.pos() + return b +} + +// ---------------------------------------------------------------------------- +// Statements + +// SimpleStmt = EmptyStmt | ExpressionStmt | SendStmt | IncDecStmt | Assignment | ShortVarDecl . +func (p *parser) simpleStmt(lhs Expr, keyword token) SimpleStmt { + if trace { + defer p.trace("simpleStmt")() + } + + if keyword == _For && p.tok == _Range { + // _Range expr + if debug && lhs != nil { + panic("invalid call of simpleStmt") + } + return p.newRangeClause(nil, false) + } + + if lhs == nil { + lhs = p.exprList() + } + + if _, ok := lhs.(*ListExpr); !ok && p.tok != _Assign && p.tok != _Define { + // expr + pos := p.pos() + switch p.tok { + case _AssignOp: + // lhs op= rhs + op := p.op + p.next() + return p.newAssignStmt(pos, op, lhs, p.expr()) + + case _IncOp: + // lhs++ or lhs-- + op := p.op + p.next() + return p.newAssignStmt(pos, op, lhs, nil) + + case _Arrow: + // lhs <- rhs + s := new(SendStmt) + s.pos = pos + p.next() + s.Chan = lhs + s.Value = p.expr() + return s + + default: + // expr + s := new(ExprStmt) + s.pos = lhs.Pos() + s.X = lhs + return s + } + } + + // expr_list + switch p.tok { + case _Assign, _Define: + pos := p.pos() + var op Operator + if p.tok == _Define { + op = Def + } + p.next() + + if keyword == _For && p.tok == _Range { + // expr_list op= _Range expr + return p.newRangeClause(lhs, op == Def) + } + + // expr_list op= expr_list + rhs := p.exprList() + + if x, ok := rhs.(*TypeSwitchGuard); ok && keyword == _Switch && op == Def { + if lhs, ok := lhs.(*Name); ok { + // switch … lhs := rhs.(type) + x.Lhs = lhs + s := new(ExprStmt) + s.pos = x.Pos() + s.X = x + return s + } + } + + return p.newAssignStmt(pos, op, lhs, rhs) + + default: + p.syntaxError("expected := or = or comma") + p.advance(_Semi, _Rbrace) + // make the best of what we have + if x, ok := lhs.(*ListExpr); ok { + lhs = x.ElemList[0] + } + s := new(ExprStmt) + s.pos = lhs.Pos() + s.X = lhs + return s + } +} + +func (p *parser) newRangeClause(lhs Expr, def bool) *RangeClause { + r := new(RangeClause) + r.pos = p.pos() + p.next() // consume _Range + r.Lhs = lhs + r.Def = def + r.X = p.expr() + return r +} + +func (p *parser) newAssignStmt(pos Pos, op Operator, lhs, rhs Expr) *AssignStmt { + a := new(AssignStmt) + a.pos = pos + a.Op = op + a.Lhs = lhs + a.Rhs = rhs + return a +} + +func (p *parser) labeledStmtOrNil(label *Name) Stmt { + if trace { + defer p.trace("labeledStmt")() + } + + s := new(LabeledStmt) + s.pos = p.pos() + s.Label = label + + p.want(_Colon) + + if p.tok == _Rbrace { + // We expect a statement (incl. an empty statement), which must be + // terminated by a semicolon. Because semicolons may be omitted before + // an _Rbrace, seeing an _Rbrace implies an empty statement. + e := new(EmptyStmt) + e.pos = p.pos() + s.Stmt = e + return s + } + + s.Stmt = p.stmtOrNil() + if s.Stmt != nil { + return s + } + + // report error at line of ':' token + p.syntaxErrorAt(s.pos, "missing statement after label") + // we are already at the end of the labeled statement - no need to advance + return nil // avoids follow-on errors (see e.g., fixedbugs/bug274.go) +} + +// context must be a non-empty string unless we know that p.tok == _Lbrace. +func (p *parser) blockStmt(context string) *BlockStmt { + if trace { + defer p.trace("blockStmt")() + } + + s := new(BlockStmt) + s.pos = p.pos() + + // people coming from C may forget that braces are mandatory in Go + if !p.got(_Lbrace) { + p.syntaxError("expected { after " + context) + p.advance(_Name, _Rbrace) + s.Rbrace = p.pos() // in case we found "}" + if p.got(_Rbrace) { + return s + } + } + + s.List = p.stmtList() + s.Rbrace = p.pos() + p.want(_Rbrace) + + return s +} + +func (p *parser) declStmt(f func(*Group) Decl) *DeclStmt { + if trace { + defer p.trace("declStmt")() + } + + s := new(DeclStmt) + s.pos = p.pos() + + p.next() // _Const, _Type, or _Var + s.DeclList = p.appendGroup(nil, f) + + return s +} + +func (p *parser) forStmt() Stmt { + if trace { + defer p.trace("forStmt")() + } + + s := new(ForStmt) + s.pos = p.pos() + + s.Init, s.Cond, s.Post = p.header(_For) + s.Body = p.blockStmt("for clause") + + return s +} + +func (p *parser) header(keyword token) (init SimpleStmt, cond Expr, post SimpleStmt) { + p.want(keyword) + + if p.tok == _Lbrace { + if keyword == _If { + p.syntaxError("missing condition in if statement") + cond = p.badExpr() + } + return + } + // p.tok != _Lbrace + + outer := p.xnest + p.xnest = -1 + + if p.tok != _Semi { + // accept potential varDecl but complain + if p.got(_Var) { + p.syntaxError(fmt.Sprintf("var declaration not allowed in %s initializer", tokstring(keyword))) + } + init = p.simpleStmt(nil, keyword) + // If we have a range clause, we are done (can only happen for keyword == _For). + if _, ok := init.(*RangeClause); ok { + p.xnest = outer + return + } + } + + var condStmt SimpleStmt + var semi struct { + pos Pos + lit string // valid if pos.IsKnown() + } + if p.tok != _Lbrace { + if p.tok == _Semi { + semi.pos = p.pos() + semi.lit = p.lit + p.next() + } else { + // asking for a '{' rather than a ';' here leads to a better error message + p.want(_Lbrace) + if p.tok != _Lbrace { + p.advance(_Lbrace, _Rbrace) // for better synchronization (e.g., go.dev/issue/22581) + } + } + if keyword == _For { + if p.tok != _Semi { + if p.tok == _Lbrace { + p.syntaxError("expected for loop condition") + goto done + } + condStmt = p.simpleStmt(nil, 0 /* range not permitted */) + } + p.want(_Semi) + if p.tok != _Lbrace { + post = p.simpleStmt(nil, 0 /* range not permitted */) + if a, _ := post.(*AssignStmt); a != nil && a.Op == Def { + p.syntaxErrorAt(a.Pos(), "cannot declare in post statement of for loop") + } + } + } else if p.tok != _Lbrace { + condStmt = p.simpleStmt(nil, keyword) + } + } else { + condStmt = init + init = nil + } + +done: + // unpack condStmt + switch s := condStmt.(type) { + case nil: + if keyword == _If && semi.pos.IsKnown() { + if semi.lit != "semicolon" { + p.syntaxErrorAt(semi.pos, fmt.Sprintf("unexpected %s, expected { after if clause", semi.lit)) + } else { + p.syntaxErrorAt(semi.pos, "missing condition in if statement") + } + b := new(BadExpr) + b.pos = semi.pos + cond = b + } + case *ExprStmt: + cond = s.X + default: + // A common syntax error is to write '=' instead of '==', + // which turns an expression into an assignment. Provide + // a more explicit error message in that case to prevent + // further confusion. + var str string + if as, ok := s.(*AssignStmt); ok && as.Op == 0 { + // Emphasize complex Lhs and Rhs of assignment with parentheses to highlight '='. + str = "assignment " + emphasize(as.Lhs) + " = " + emphasize(as.Rhs) + } else { + str = String(s) + } + p.syntaxErrorAt(s.Pos(), fmt.Sprintf("cannot use %s as value", str)) + } + + p.xnest = outer + return +} + +// emphasize returns a string representation of x, with (top-level) +// binary expressions emphasized by enclosing them in parentheses. +func emphasize(x Expr) string { + s := String(x) + if op, _ := x.(*Operation); op != nil && op.Y != nil { + // binary expression + return "(" + s + ")" + } + return s +} + +func (p *parser) ifStmt() *IfStmt { + if trace { + defer p.trace("ifStmt")() + } + + s := new(IfStmt) + s.pos = p.pos() + + s.Init, s.Cond, _ = p.header(_If) + s.Then = p.blockStmt("if clause") + + if p.got(_Else) { + switch p.tok { + case _If: + s.Else = p.ifStmt() + case _Lbrace: + s.Else = p.blockStmt("") + default: + p.syntaxError("else must be followed by if or statement block") + p.advance(_Name, _Rbrace) + } + } + + return s +} + +func (p *parser) switchStmt() *SwitchStmt { + if trace { + defer p.trace("switchStmt")() + } + + s := new(SwitchStmt) + s.pos = p.pos() + + s.Init, s.Tag, _ = p.header(_Switch) + + if !p.got(_Lbrace) { + p.syntaxError("missing { after switch clause") + p.advance(_Case, _Default, _Rbrace) + } + for p.tok != _EOF && p.tok != _Rbrace { + s.Body = append(s.Body, p.caseClause()) + } + s.Rbrace = p.pos() + p.want(_Rbrace) + + return s +} + +func (p *parser) selectStmt() *SelectStmt { + if trace { + defer p.trace("selectStmt")() + } + + s := new(SelectStmt) + s.pos = p.pos() + + p.want(_Select) + if !p.got(_Lbrace) { + p.syntaxError("missing { after select clause") + p.advance(_Case, _Default, _Rbrace) + } + for p.tok != _EOF && p.tok != _Rbrace { + s.Body = append(s.Body, p.commClause()) + } + s.Rbrace = p.pos() + p.want(_Rbrace) + + return s +} + +func (p *parser) caseClause() *CaseClause { + if trace { + defer p.trace("caseClause")() + } + + c := new(CaseClause) + c.pos = p.pos() + + switch p.tok { + case _Case: + p.next() + c.Cases = p.exprList() + + case _Default: + p.next() + + default: + p.syntaxError("expected case or default or }") + p.advance(_Colon, _Case, _Default, _Rbrace) + } + + c.Colon = p.pos() + p.want(_Colon) + c.Body = p.stmtList() + + return c +} + +func (p *parser) commClause() *CommClause { + if trace { + defer p.trace("commClause")() + } + + c := new(CommClause) + c.pos = p.pos() + + switch p.tok { + case _Case: + p.next() + c.Comm = p.simpleStmt(nil, 0) + + // The syntax restricts the possible simple statements here to: + // + // lhs <- x (send statement) + // <-x + // lhs = <-x + // lhs := <-x + // + // All these (and more) are recognized by simpleStmt and invalid + // syntax trees are flagged later, during type checking. + + case _Default: + p.next() + + default: + p.syntaxError("expected case or default or }") + p.advance(_Colon, _Case, _Default, _Rbrace) + } + + c.Colon = p.pos() + p.want(_Colon) + c.Body = p.stmtList() + + return c +} + +// stmtOrNil parses a statement if one is present, or else returns nil. +// +// Statement = +// Declaration | LabeledStmt | SimpleStmt | +// GoStmt | ReturnStmt | BreakStmt | ContinueStmt | GotoStmt | +// FallthroughStmt | Block | IfStmt | SwitchStmt | SelectStmt | ForStmt | +// DeferStmt . +func (p *parser) stmtOrNil() Stmt { + if trace { + defer p.trace("stmt " + p.tok.String())() + } + + // Most statements (assignments) start with an identifier; + // look for it first before doing anything more expensive. + if p.tok == _Name { + p.clearPragma() + lhs := p.exprList() + if label, ok := lhs.(*Name); ok && p.tok == _Colon { + return p.labeledStmtOrNil(label) + } + return p.simpleStmt(lhs, 0) + } + + switch p.tok { + case _Var: + return p.declStmt(p.varDecl) + + case _Const: + return p.declStmt(p.constDecl) + + case _Type: + return p.declStmt(p.typeDecl) + } + + p.clearPragma() + + switch p.tok { + case _Lbrace: + return p.blockStmt("") + + case _Operator, _Star: + switch p.op { + case Add, Sub, Mul, And, Xor, Not: + return p.simpleStmt(nil, 0) // unary operators + } + + case _Literal, _Func, _Lparen, // operands + _Lbrack, _Struct, _Map, _Chan, _Interface, // composite types + _Arrow: // receive operator + return p.simpleStmt(nil, 0) + + case _For: + return p.forStmt() + + case _Switch: + return p.switchStmt() + + case _Select: + return p.selectStmt() + + case _If: + return p.ifStmt() + + case _Fallthrough: + s := new(BranchStmt) + s.pos = p.pos() + p.next() + s.Tok = _Fallthrough + return s + + case _Break, _Continue: + s := new(BranchStmt) + s.pos = p.pos() + s.Tok = p.tok + p.next() + if p.tok == _Name { + s.Label = p.name() + } + return s + + case _Go, _Defer: + return p.callStmt() + + case _Goto: + s := new(BranchStmt) + s.pos = p.pos() + s.Tok = _Goto + p.next() + s.Label = p.name() + return s + + case _Return: + s := new(ReturnStmt) + s.pos = p.pos() + p.next() + if p.tok != _Semi && p.tok != _Rbrace { + s.Results = p.exprList() + } + return s + + case _Semi: + s := new(EmptyStmt) + s.pos = p.pos() + return s + } + + return nil +} + +// StatementList = { Statement ";" } . +func (p *parser) stmtList() (l []Stmt) { + if trace { + defer p.trace("stmtList")() + } + + for p.tok != _EOF && p.tok != _Rbrace && p.tok != _Case && p.tok != _Default { + s := p.stmtOrNil() + p.clearPragma() + if s == nil { + break + } + l = append(l, s) + // ";" is optional before "}" + if !p.got(_Semi) && p.tok != _Rbrace { + p.syntaxError("at end of statement") + p.advance(_Semi, _Rbrace, _Case, _Default) + p.got(_Semi) // avoid spurious empty statement + } + } + return +} + +// argList parses a possibly empty, comma-separated list of arguments, +// optionally followed by a comma (if not empty), and closed by ")". +// The last argument may be followed by "...". +// +// argList = [ arg { "," arg } [ "..." ] [ "," ] ] ")" . +func (p *parser) argList() (list []Expr, hasDots bool) { + if trace { + defer p.trace("argList")() + } + + p.xnest++ + p.list("argument list", _Comma, _Rparen, func() bool { + list = append(list, p.expr()) + hasDots = p.got(_DotDotDot) + return hasDots + }) + p.xnest-- + + return +} + +// ---------------------------------------------------------------------------- +// Common productions + +func (p *parser) name() *Name { + // no tracing to avoid overly verbose output + + if p.tok == _Name { + n := NewName(p.pos(), p.lit) + p.next() + return n + } + + n := NewName(p.pos(), "_") + p.syntaxError("expected name") + p.advance() + return n +} + +// IdentifierList = identifier { "," identifier } . +// The first name must be provided. +func (p *parser) nameList(first *Name) []*Name { + if trace { + defer p.trace("nameList")() + } + + if debug && first == nil { + panic("first name not provided") + } + + l := []*Name{first} + for p.got(_Comma) { + l = append(l, p.name()) + } + + return l +} + +// The first name may be provided, or nil. +func (p *parser) qualifiedName(name *Name) Expr { + if trace { + defer p.trace("qualifiedName")() + } + + var x Expr + switch { + case name != nil: + x = name + case p.tok == _Name: + x = p.name() + default: + x = NewName(p.pos(), "_") + p.syntaxError("expected name") + p.advance(_Dot, _Semi, _Rbrace) + } + + if p.tok == _Dot { + s := new(SelectorExpr) + s.pos = p.pos() + p.next() + s.X = x + s.Sel = p.name() + x = s + } + + if p.tok == _Lbrack { + x = p.typeInstance(x) + } + + return x +} + +// ExpressionList = Expression { "," Expression } . +func (p *parser) exprList() Expr { + if trace { + defer p.trace("exprList")() + } + + x := p.expr() + if p.got(_Comma) { + list := []Expr{x, p.expr()} + for p.got(_Comma) { + list = append(list, p.expr()) + } + t := new(ListExpr) + t.pos = x.Pos() + t.ElemList = list + x = t + } + return x +} + +// typeList parses a non-empty, comma-separated list of types, +// optionally followed by a comma. If strict is set to false, +// the first element may also be a (non-type) expression. +// If there is more than one argument, the result is a *ListExpr. +// The comma result indicates whether there was a (separating or +// trailing) comma. +// +// typeList = arg { "," arg } [ "," ] . +func (p *parser) typeList(strict bool) (x Expr, comma bool) { + if trace { + defer p.trace("typeList")() + } + + p.xnest++ + if strict { + x = p.type_() + } else { + x = p.expr() + } + if p.got(_Comma) { + comma = true + if t := p.typeOrNil(); t != nil { + list := []Expr{x, t} + for p.got(_Comma) { + if t = p.typeOrNil(); t == nil { + break + } + list = append(list, t) + } + l := new(ListExpr) + l.pos = x.Pos() // == list[0].Pos() + l.ElemList = list + x = l + } + } + p.xnest-- + return +} + +// Unparen returns e with any enclosing parentheses stripped. +func Unparen(x Expr) Expr { + for { + p, ok := x.(*ParenExpr) + if !ok { + break + } + x = p.X + } + return x +} + +// UnpackListExpr unpacks a *ListExpr into a []Expr. +func UnpackListExpr(x Expr) []Expr { + switch x := x.(type) { + case nil: + return nil + case *ListExpr: + return x.ElemList + default: + return []Expr{x} + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/parser_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/parser_test.go new file mode 100644 index 0000000000000000000000000000000000000000..538278b3eb89498150aa23c9491642cf4baaeb5e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/parser_test.go @@ -0,0 +1,395 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syntax + +import ( + "bytes" + "flag" + "fmt" + "internal/testenv" + "os" + "path/filepath" + "regexp" + "runtime" + "strings" + "sync" + "testing" + "time" +) + +var ( + fast = flag.Bool("fast", false, "parse package files in parallel") + verify = flag.Bool("verify", false, "verify idempotent printing") + src_ = flag.String("src", "parser.go", "source file to parse") + skip = flag.String("skip", "", "files matching this regular expression are skipped by TestStdLib") +) + +func TestParse(t *testing.T) { + ParseFile(*src_, func(err error) { t.Error(err) }, nil, 0) +} + +func TestVerify(t *testing.T) { + ast, err := ParseFile(*src_, func(err error) { t.Error(err) }, nil, 0) + if err != nil { + return // error already reported + } + verifyPrint(t, *src_, ast) +} + +func TestStdLib(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + + var skipRx *regexp.Regexp + if *skip != "" { + var err error + skipRx, err = regexp.Compile(*skip) + if err != nil { + t.Fatalf("invalid argument for -skip (%v)", err) + } + } + + var m1 runtime.MemStats + runtime.ReadMemStats(&m1) + start := time.Now() + + type parseResult struct { + filename string + lines uint + } + + goroot := testenv.GOROOT(t) + + results := make(chan parseResult) + go func() { + defer close(results) + for _, dir := range []string{ + filepath.Join(goroot, "src"), + filepath.Join(goroot, "misc"), + } { + if filepath.Base(dir) == "misc" { + // cmd/distpack deletes GOROOT/misc, so skip that directory if it isn't present. + // cmd/distpack also requires GOROOT/VERSION to exist, so use that to + // suppress false-positive skips. + if _, err := os.Stat(dir); os.IsNotExist(err) { + if _, err := os.Stat(filepath.Join(testenv.GOROOT(t), "VERSION")); err == nil { + fmt.Printf("%s not present; skipping\n", dir) + continue + } + } + } + + walkDirs(t, dir, func(filename string) { + if skipRx != nil && skipRx.MatchString(filename) { + // Always report skipped files since regexp + // typos can lead to surprising results. + fmt.Printf("skipping %s\n", filename) + return + } + if debug { + fmt.Printf("parsing %s\n", filename) + } + ast, err := ParseFile(filename, nil, nil, 0) + if err != nil { + t.Error(err) + return + } + if *verify { + verifyPrint(t, filename, ast) + } + results <- parseResult{filename, ast.EOF.Line()} + }) + } + }() + + var count, lines uint + for res := range results { + count++ + lines += res.lines + if testing.Verbose() { + fmt.Printf("%5d %s (%d lines)\n", count, res.filename, res.lines) + } + } + + dt := time.Since(start) + var m2 runtime.MemStats + runtime.ReadMemStats(&m2) + dm := float64(m2.TotalAlloc-m1.TotalAlloc) / 1e6 + + fmt.Printf("parsed %d lines (%d files) in %v (%d lines/s)\n", lines, count, dt, int64(float64(lines)/dt.Seconds())) + fmt.Printf("allocated %.3fMb (%.3fMb/s)\n", dm, dm/dt.Seconds()) +} + +func walkDirs(t *testing.T, dir string, action func(string)) { + entries, err := os.ReadDir(dir) + if err != nil { + t.Error(err) + return + } + + var files, dirs []string + for _, entry := range entries { + if entry.Type().IsRegular() { + if strings.HasSuffix(entry.Name(), ".go") { + path := filepath.Join(dir, entry.Name()) + files = append(files, path) + } + } else if entry.IsDir() && entry.Name() != "testdata" { + path := filepath.Join(dir, entry.Name()) + if !strings.HasSuffix(path, string(filepath.Separator)+"test") { + dirs = append(dirs, path) + } + } + } + + if *fast { + var wg sync.WaitGroup + wg.Add(len(files)) + for _, filename := range files { + go func(filename string) { + defer wg.Done() + action(filename) + }(filename) + } + wg.Wait() + } else { + for _, filename := range files { + action(filename) + } + } + + for _, dir := range dirs { + walkDirs(t, dir, action) + } +} + +func verifyPrint(t *testing.T, filename string, ast1 *File) { + var buf1 bytes.Buffer + _, err := Fprint(&buf1, ast1, LineForm) + if err != nil { + panic(err) + } + bytes1 := buf1.Bytes() + + ast2, err := Parse(NewFileBase(filename), &buf1, nil, nil, 0) + if err != nil { + panic(err) + } + + var buf2 bytes.Buffer + _, err = Fprint(&buf2, ast2, LineForm) + if err != nil { + panic(err) + } + bytes2 := buf2.Bytes() + + if bytes.Compare(bytes1, bytes2) != 0 { + fmt.Printf("--- %s ---\n", filename) + fmt.Printf("%s\n", bytes1) + fmt.Println() + + fmt.Printf("--- %s ---\n", filename) + fmt.Printf("%s\n", bytes2) + fmt.Println() + + t.Error("printed syntax trees do not match") + } +} + +func TestIssue17697(t *testing.T) { + _, err := Parse(nil, bytes.NewReader(nil), nil, nil, 0) // return with parser error, don't panic + if err == nil { + t.Errorf("no error reported") + } +} + +func TestParseFile(t *testing.T) { + _, err := ParseFile("", nil, nil, 0) + if err == nil { + t.Error("missing io error") + } + + var first error + _, err = ParseFile("", func(err error) { + if first == nil { + first = err + } + }, nil, 0) + if err == nil || first == nil { + t.Error("missing io error") + } + if err != first { + t.Errorf("got %v; want first error %v", err, first) + } +} + +// Make sure (PosMax + 1) doesn't overflow when converted to default +// type int (when passed as argument to fmt.Sprintf) on 32bit platforms +// (see test cases below). +var tooLarge int = PosMax + 1 + +func TestLineDirectives(t *testing.T) { + // valid line directives lead to a syntax error after them + const valid = "syntax error: package statement must be first" + const filename = "directives.go" + + for _, test := range []struct { + src, msg string + filename string + line, col uint // 1-based; 0 means unknown + }{ + // ignored //line directives + {"//\n", valid, filename, 2, 1}, // no directive + {"//line\n", valid, filename, 2, 1}, // missing colon + {"//line foo\n", valid, filename, 2, 1}, // missing colon + {" //line foo:\n", valid, filename, 2, 1}, // not a line start + {"// line foo:\n", valid, filename, 2, 1}, // space between // and line + + // invalid //line directives with one colon + {"//line :\n", "invalid line number: ", filename, 1, 9}, + {"//line :x\n", "invalid line number: x", filename, 1, 9}, + {"//line foo :\n", "invalid line number: ", filename, 1, 13}, + {"//line foo:x\n", "invalid line number: x", filename, 1, 12}, + {"//line foo:0\n", "invalid line number: 0", filename, 1, 12}, + {"//line foo:1 \n", "invalid line number: 1 ", filename, 1, 12}, + {"//line foo:-12\n", "invalid line number: -12", filename, 1, 12}, + {"//line C:foo:0\n", "invalid line number: 0", filename, 1, 14}, + {fmt.Sprintf("//line foo:%d\n", tooLarge), fmt.Sprintf("invalid line number: %d", tooLarge), filename, 1, 12}, + + // invalid //line directives with two colons + {"//line ::\n", "invalid line number: ", filename, 1, 10}, + {"//line ::x\n", "invalid line number: x", filename, 1, 10}, + {"//line foo::123abc\n", "invalid line number: 123abc", filename, 1, 13}, + {"//line foo::0\n", "invalid line number: 0", filename, 1, 13}, + {"//line foo:0:1\n", "invalid line number: 0", filename, 1, 12}, + + {"//line :123:0\n", "invalid column number: 0", filename, 1, 13}, + {"//line foo:123:0\n", "invalid column number: 0", filename, 1, 16}, + {fmt.Sprintf("//line foo:10:%d\n", tooLarge), fmt.Sprintf("invalid column number: %d", tooLarge), filename, 1, 15}, + + // effect of valid //line directives on lines + {"//line foo:123\n foo", valid, "foo", 123, 0}, + {"//line foo:123\n foo", valid, " foo", 123, 0}, + {"//line foo:123\n//line bar:345\nfoo", valid, "bar", 345, 0}, + {"//line C:foo:123\n", valid, "C:foo", 123, 0}, + {"//line /src/a/a.go:123\n foo", valid, "/src/a/a.go", 123, 0}, + {"//line :x:1\n", valid, ":x", 1, 0}, + {"//line foo ::1\n", valid, "foo :", 1, 0}, + {"//line foo:123abc:1\n", valid, "foo:123abc", 1, 0}, + {"//line foo :123:1\n", valid, "foo ", 123, 1}, + {"//line ::123\n", valid, ":", 123, 0}, + + // effect of valid //line directives on columns + {"//line :x:1:10\n", valid, ":x", 1, 10}, + {"//line foo ::1:2\n", valid, "foo :", 1, 2}, + {"//line foo:123abc:1:1000\n", valid, "foo:123abc", 1, 1000}, + {"//line foo :123:1000\n\n", valid, "foo ", 124, 1}, + {"//line ::123:1234\n", valid, ":", 123, 1234}, + + // //line directives with omitted filenames lead to empty filenames + {"//line :10\n", valid, "", 10, 0}, + {"//line :10:20\n", valid, filename, 10, 20}, + {"//line bar:1\n//line :10\n", valid, "", 10, 0}, + {"//line bar:1\n//line :10:20\n", valid, "bar", 10, 20}, + + // ignored /*line directives + {"/**/", valid, filename, 1, 5}, // no directive + {"/*line*/", valid, filename, 1, 9}, // missing colon + {"/*line foo*/", valid, filename, 1, 13}, // missing colon + {" //line foo:*/", valid, filename, 1, 16}, // not a line start + {"/* line foo:*/", valid, filename, 1, 16}, // space between // and line + + // invalid /*line directives with one colon + {"/*line :*/", "invalid line number: ", filename, 1, 9}, + {"/*line :x*/", "invalid line number: x", filename, 1, 9}, + {"/*line foo :*/", "invalid line number: ", filename, 1, 13}, + {"/*line foo:x*/", "invalid line number: x", filename, 1, 12}, + {"/*line foo:0*/", "invalid line number: 0", filename, 1, 12}, + {"/*line foo:1 */", "invalid line number: 1 ", filename, 1, 12}, + {"/*line C:foo:0*/", "invalid line number: 0", filename, 1, 14}, + {fmt.Sprintf("/*line foo:%d*/", tooLarge), fmt.Sprintf("invalid line number: %d", tooLarge), filename, 1, 12}, + + // invalid /*line directives with two colons + {"/*line ::*/", "invalid line number: ", filename, 1, 10}, + {"/*line ::x*/", "invalid line number: x", filename, 1, 10}, + {"/*line foo::123abc*/", "invalid line number: 123abc", filename, 1, 13}, + {"/*line foo::0*/", "invalid line number: 0", filename, 1, 13}, + {"/*line foo:0:1*/", "invalid line number: 0", filename, 1, 12}, + + {"/*line :123:0*/", "invalid column number: 0", filename, 1, 13}, + {"/*line foo:123:0*/", "invalid column number: 0", filename, 1, 16}, + {fmt.Sprintf("/*line foo:10:%d*/", tooLarge), fmt.Sprintf("invalid column number: %d", tooLarge), filename, 1, 15}, + + // effect of valid /*line directives on lines + {"/*line foo:123*/ foo", valid, "foo", 123, 0}, + {"/*line foo:123*/\n//line bar:345\nfoo", valid, "bar", 345, 0}, + {"/*line C:foo:123*/", valid, "C:foo", 123, 0}, + {"/*line /src/a/a.go:123*/ foo", valid, "/src/a/a.go", 123, 0}, + {"/*line :x:1*/", valid, ":x", 1, 0}, + {"/*line foo ::1*/", valid, "foo :", 1, 0}, + {"/*line foo:123abc:1*/", valid, "foo:123abc", 1, 0}, + {"/*line foo :123:10*/", valid, "foo ", 123, 10}, + {"/*line ::123*/", valid, ":", 123, 0}, + + // effect of valid /*line directives on columns + {"/*line :x:1:10*/", valid, ":x", 1, 10}, + {"/*line foo ::1:2*/", valid, "foo :", 1, 2}, + {"/*line foo:123abc:1:1000*/", valid, "foo:123abc", 1, 1000}, + {"/*line foo :123:1000*/\n", valid, "foo ", 124, 1}, + {"/*line ::123:1234*/", valid, ":", 123, 1234}, + + // /*line directives with omitted filenames lead to the previously used filenames + {"/*line :10*/", valid, "", 10, 0}, + {"/*line :10:20*/", valid, filename, 10, 20}, + {"//line bar:1\n/*line :10*/", valid, "", 10, 0}, + {"//line bar:1\n/*line :10:20*/", valid, "bar", 10, 20}, + } { + base := NewFileBase(filename) + _, err := Parse(base, strings.NewReader(test.src), nil, nil, 0) + if err == nil { + t.Errorf("%s: no error reported", test.src) + continue + } + perr, ok := err.(Error) + if !ok { + t.Errorf("%s: got %v; want parser error", test.src, err) + continue + } + if msg := perr.Msg; msg != test.msg { + t.Errorf("%s: got msg = %q; want %q", test.src, msg, test.msg) + } + + pos := perr.Pos + if filename := pos.RelFilename(); filename != test.filename { + t.Errorf("%s: got filename = %q; want %q", test.src, filename, test.filename) + } + if line := pos.RelLine(); line != test.line { + t.Errorf("%s: got line = %d; want %d", test.src, line, test.line) + } + if col := pos.RelCol(); col != test.col { + t.Errorf("%s: got col = %d; want %d", test.src, col, test.col) + } + } +} + +// Test that typical uses of UnpackListExpr don't allocate. +func TestUnpackListExprAllocs(t *testing.T) { + var x Expr = NewName(Pos{}, "x") + allocs := testing.AllocsPerRun(1000, func() { + list := UnpackListExpr(x) + if len(list) != 1 || list[0] != x { + t.Fatalf("unexpected result") + } + }) + + if allocs > 0 { + errorf := t.Errorf + if testenv.OptimizationOff() { + errorf = t.Logf // noopt builder disables inlining + } + errorf("UnpackListExpr allocated %v times", allocs) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/pos.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/pos.go new file mode 100644 index 0000000000000000000000000000000000000000..dd25d4f249dee6b7b0ed8b9dc937a6e4724b00f8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/pos.go @@ -0,0 +1,211 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syntax + +import "fmt" + +// PosMax is the largest line or column value that can be represented without loss. +// Incoming values (arguments) larger than PosMax will be set to PosMax. +// +// Keep this consistent with maxLineCol in go/scanner. +const PosMax = 1 << 30 + +// A Pos represents an absolute (line, col) source position +// with a reference to position base for computing relative +// (to a file, or line directive) position information. +// Pos values are intentionally light-weight so that they +// can be created without too much concern about space use. +type Pos struct { + base *PosBase + line, col uint32 +} + +// MakePos returns a new Pos for the given PosBase, line and column. +func MakePos(base *PosBase, line, col uint) Pos { return Pos{base, sat32(line), sat32(col)} } + +// TODO(gri) IsKnown makes an assumption about linebase < 1. +// Maybe we should check for Base() != nil instead. + +func (pos Pos) Pos() Pos { return pos } +func (pos Pos) IsKnown() bool { return pos.line > 0 } +func (pos Pos) Base() *PosBase { return pos.base } +func (pos Pos) Line() uint { return uint(pos.line) } +func (pos Pos) Col() uint { return uint(pos.col) } + +func (pos Pos) RelFilename() string { return pos.base.Filename() } + +func (pos Pos) RelLine() uint { + b := pos.base + if b.Line() == 0 { + // base line is unknown => relative line is unknown + return 0 + } + return b.Line() + (pos.Line() - b.Pos().Line()) +} + +func (pos Pos) RelCol() uint { + b := pos.base + if b.Col() == 0 { + // base column is unknown => relative column is unknown + // (the current specification for line directives requires + // this to apply until the next PosBase/line directive, + // not just until the new newline) + return 0 + } + if pos.Line() == b.Pos().Line() { + // pos on same line as pos base => column is relative to pos base + return b.Col() + (pos.Col() - b.Pos().Col()) + } + return pos.Col() +} + +// Cmp compares the positions p and q and returns a result r as follows: +// +// r < 0: p is before q +// r == 0: p and q are the same position (but may not be identical) +// r > 0: p is after q +// +// If p and q are in different files, p is before q if the filename +// of p sorts lexicographically before the filename of q. +func (p Pos) Cmp(q Pos) int { + pname := p.RelFilename() + qname := q.RelFilename() + switch { + case pname < qname: + return -1 + case pname > qname: + return +1 + } + + pline := p.Line() + qline := q.Line() + switch { + case pline < qline: + return -1 + case pline > qline: + return +1 + } + + pcol := p.Col() + qcol := q.Col() + switch { + case pcol < qcol: + return -1 + case pcol > qcol: + return +1 + } + + return 0 +} + +func (pos Pos) String() string { + rel := position_{pos.RelFilename(), pos.RelLine(), pos.RelCol()} + abs := position_{pos.Base().Pos().RelFilename(), pos.Line(), pos.Col()} + s := rel.String() + if rel != abs { + s += "[" + abs.String() + "]" + } + return s +} + +// TODO(gri) cleanup: find better name, avoid conflict with position in error_test.go +type position_ struct { + filename string + line, col uint +} + +func (p position_) String() string { + if p.line == 0 { + if p.filename == "" { + return "" + } + return p.filename + } + if p.col == 0 { + return fmt.Sprintf("%s:%d", p.filename, p.line) + } + return fmt.Sprintf("%s:%d:%d", p.filename, p.line, p.col) +} + +// A PosBase represents the base for relative position information: +// At position pos, the relative position is filename:line:col. +type PosBase struct { + pos Pos + filename string + line, col uint32 + trimmed bool // whether -trimpath has been applied +} + +// NewFileBase returns a new PosBase for the given filename. +// A file PosBase's position is relative to itself, with the +// position being filename:1:1. +func NewFileBase(filename string) *PosBase { + return NewTrimmedFileBase(filename, false) +} + +// NewTrimmedFileBase is like NewFileBase, but allows specifying Trimmed. +func NewTrimmedFileBase(filename string, trimmed bool) *PosBase { + base := &PosBase{MakePos(nil, linebase, colbase), filename, linebase, colbase, trimmed} + base.pos.base = base + return base +} + +// NewLineBase returns a new PosBase for a line directive "line filename:line:col" +// relative to pos, which is the position of the character immediately following +// the comment containing the line directive. For a directive in a line comment, +// that position is the beginning of the next line (i.e., the newline character +// belongs to the line comment). +func NewLineBase(pos Pos, filename string, trimmed bool, line, col uint) *PosBase { + return &PosBase{pos, filename, sat32(line), sat32(col), trimmed} +} + +func (base *PosBase) IsFileBase() bool { + if base == nil { + return false + } + return base.pos.base == base +} + +func (base *PosBase) Pos() (_ Pos) { + if base == nil { + return + } + return base.pos +} + +func (base *PosBase) Filename() string { + if base == nil { + return "" + } + return base.filename +} + +func (base *PosBase) Line() uint { + if base == nil { + return 0 + } + return uint(base.line) +} + +func (base *PosBase) Col() uint { + if base == nil { + return 0 + } + return uint(base.col) +} + +func (base *PosBase) Trimmed() bool { + if base == nil { + return false + } + return base.trimmed +} + +func sat32(x uint) uint32 { + if x > PosMax { + return PosMax + } + return uint32(x) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/positions.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/positions.go new file mode 100644 index 0000000000000000000000000000000000000000..93596559a02328a3a23296c15ffa2983630a20a1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/positions.go @@ -0,0 +1,364 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements helper functions for scope position computations. + +package syntax + +// StartPos returns the start position of n. +func StartPos(n Node) Pos { + // Cases for nodes which don't need a correction are commented out. + for m := n; ; { + switch n := m.(type) { + case nil: + panic("nil node") + + // packages + case *File: + // file block starts at the beginning of the file + return MakePos(n.Pos().Base(), 1, 1) + + // declarations + // case *ImportDecl: + // case *ConstDecl: + // case *TypeDecl: + // case *VarDecl: + // case *FuncDecl: + + // expressions + // case *BadExpr: + // case *Name: + // case *BasicLit: + case *CompositeLit: + if n.Type != nil { + m = n.Type + continue + } + return n.Pos() + // case *KeyValueExpr: + // case *FuncLit: + // case *ParenExpr: + case *SelectorExpr: + m = n.X + case *IndexExpr: + m = n.X + // case *SliceExpr: + case *AssertExpr: + m = n.X + case *TypeSwitchGuard: + if n.Lhs != nil { + m = n.Lhs + continue + } + m = n.X + case *Operation: + if n.Y != nil { + m = n.X + continue + } + return n.Pos() + case *CallExpr: + m = n.Fun + case *ListExpr: + if len(n.ElemList) > 0 { + m = n.ElemList[0] + continue + } + return n.Pos() + // types + // case *ArrayType: + // case *SliceType: + // case *DotsType: + // case *StructType: + // case *Field: + // case *InterfaceType: + // case *FuncType: + // case *MapType: + // case *ChanType: + + // statements + // case *EmptyStmt: + // case *LabeledStmt: + // case *BlockStmt: + // case *ExprStmt: + case *SendStmt: + m = n.Chan + // case *DeclStmt: + case *AssignStmt: + m = n.Lhs + // case *BranchStmt: + // case *CallStmt: + // case *ReturnStmt: + // case *IfStmt: + // case *ForStmt: + // case *SwitchStmt: + // case *SelectStmt: + + // helper nodes + case *RangeClause: + if n.Lhs != nil { + m = n.Lhs + continue + } + m = n.X + // case *CaseClause: + // case *CommClause: + + default: + return n.Pos() + } + } +} + +// EndPos returns the approximate end position of n in the source. +// For some nodes (*Name, *BasicLit) it returns the position immediately +// following the node; for others (*BlockStmt, *SwitchStmt, etc.) it +// returns the position of the closing '}'; and for some (*ParenExpr) +// the returned position is the end position of the last enclosed +// expression. +// Thus, EndPos should not be used for exact demarcation of the +// end of a node in the source; it is mostly useful to determine +// scope ranges where there is some leeway. +func EndPos(n Node) Pos { + for m := n; ; { + switch n := m.(type) { + case nil: + panic("nil node") + + // packages + case *File: + return n.EOF + + // declarations + case *ImportDecl: + m = n.Path + case *ConstDecl: + if n.Values != nil { + m = n.Values + continue + } + if n.Type != nil { + m = n.Type + continue + } + if l := len(n.NameList); l > 0 { + m = n.NameList[l-1] + continue + } + return n.Pos() + case *TypeDecl: + m = n.Type + case *VarDecl: + if n.Values != nil { + m = n.Values + continue + } + if n.Type != nil { + m = n.Type + continue + } + if l := len(n.NameList); l > 0 { + m = n.NameList[l-1] + continue + } + return n.Pos() + case *FuncDecl: + if n.Body != nil { + m = n.Body + continue + } + m = n.Type + + // expressions + case *BadExpr: + return n.Pos() + case *Name: + p := n.Pos() + return MakePos(p.Base(), p.Line(), p.Col()+uint(len(n.Value))) + case *BasicLit: + p := n.Pos() + return MakePos(p.Base(), p.Line(), p.Col()+uint(len(n.Value))) + case *CompositeLit: + return n.Rbrace + case *KeyValueExpr: + m = n.Value + case *FuncLit: + m = n.Body + case *ParenExpr: + m = n.X + case *SelectorExpr: + m = n.Sel + case *IndexExpr: + m = n.Index + case *SliceExpr: + for i := len(n.Index) - 1; i >= 0; i-- { + if x := n.Index[i]; x != nil { + m = x + continue + } + } + m = n.X + case *AssertExpr: + m = n.Type + case *TypeSwitchGuard: + m = n.X + case *Operation: + if n.Y != nil { + m = n.Y + continue + } + m = n.X + case *CallExpr: + if l := lastExpr(n.ArgList); l != nil { + m = l + continue + } + m = n.Fun + case *ListExpr: + if l := lastExpr(n.ElemList); l != nil { + m = l + continue + } + return n.Pos() + + // types + case *ArrayType: + m = n.Elem + case *SliceType: + m = n.Elem + case *DotsType: + m = n.Elem + case *StructType: + if l := lastField(n.FieldList); l != nil { + m = l + continue + } + return n.Pos() + // TODO(gri) need to take TagList into account + case *Field: + if n.Type != nil { + m = n.Type + continue + } + m = n.Name + case *InterfaceType: + if l := lastField(n.MethodList); l != nil { + m = l + continue + } + return n.Pos() + case *FuncType: + if l := lastField(n.ResultList); l != nil { + m = l + continue + } + if l := lastField(n.ParamList); l != nil { + m = l + continue + } + return n.Pos() + case *MapType: + m = n.Value + case *ChanType: + m = n.Elem + + // statements + case *EmptyStmt: + return n.Pos() + case *LabeledStmt: + m = n.Stmt + case *BlockStmt: + return n.Rbrace + case *ExprStmt: + m = n.X + case *SendStmt: + m = n.Value + case *DeclStmt: + if l := lastDecl(n.DeclList); l != nil { + m = l + continue + } + return n.Pos() + case *AssignStmt: + m = n.Rhs + if m == nil { + p := EndPos(n.Lhs) + return MakePos(p.Base(), p.Line(), p.Col()+2) + } + case *BranchStmt: + if n.Label != nil { + m = n.Label + continue + } + return n.Pos() + case *CallStmt: + m = n.Call + case *ReturnStmt: + if n.Results != nil { + m = n.Results + continue + } + return n.Pos() + case *IfStmt: + if n.Else != nil { + m = n.Else + continue + } + m = n.Then + case *ForStmt: + m = n.Body + case *SwitchStmt: + return n.Rbrace + case *SelectStmt: + return n.Rbrace + + // helper nodes + case *RangeClause: + m = n.X + case *CaseClause: + if l := lastStmt(n.Body); l != nil { + m = l + continue + } + return n.Colon + case *CommClause: + if l := lastStmt(n.Body); l != nil { + m = l + continue + } + return n.Colon + + default: + return n.Pos() + } + } +} + +func lastDecl(list []Decl) Decl { + if l := len(list); l > 0 { + return list[l-1] + } + return nil +} + +func lastExpr(list []Expr) Expr { + if l := len(list); l > 0 { + return list[l-1] + } + return nil +} + +func lastStmt(list []Stmt) Stmt { + if l := len(list); l > 0 { + return list[l-1] + } + return nil +} + +func lastField(list []*Field) *Field { + if l := len(list); l > 0 { + return list[l-1] + } + return nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/printer.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/printer.go new file mode 100644 index 0000000000000000000000000000000000000000..9f20db54dea5b9fe70a523aa2ffd6c3e204d85ff --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/printer.go @@ -0,0 +1,1020 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements printing of syntax trees in source format. + +package syntax + +import ( + "fmt" + "io" + "strings" +) + +// Form controls print formatting. +type Form uint + +const ( + _ Form = iota // default + LineForm // use spaces instead of linebreaks where possible + ShortForm // like LineForm but print "…" for non-empty function or composite literal bodies +) + +// Fprint prints node x to w in the specified form. +// It returns the number of bytes written, and whether there was an error. +func Fprint(w io.Writer, x Node, form Form) (n int, err error) { + p := printer{ + output: w, + form: form, + linebreaks: form == 0, + } + + defer func() { + n = p.written + if e := recover(); e != nil { + err = e.(writeError).err // re-panics if it's not a writeError + } + }() + + p.print(x) + p.flush(_EOF) + + return +} + +// String is a convenience function that prints n in ShortForm +// and returns the printed string. +func String(n Node) string { + var buf strings.Builder + _, err := Fprint(&buf, n, ShortForm) + if err != nil { + fmt.Fprintf(&buf, "<<< ERROR: %s", err) + } + return buf.String() +} + +type ctrlSymbol int + +const ( + none ctrlSymbol = iota + semi + blank + newline + indent + outdent + // comment + // eolComment +) + +type whitespace struct { + last token + kind ctrlSymbol + //text string // comment text (possibly ""); valid if kind == comment +} + +type printer struct { + output io.Writer + written int // number of bytes written + form Form + linebreaks bool // print linebreaks instead of semis + + indent int // current indentation level + nlcount int // number of consecutive newlines + + pending []whitespace // pending whitespace + lastTok token // last token (after any pending semi) processed by print +} + +// write is a thin wrapper around p.output.Write +// that takes care of accounting and error handling. +func (p *printer) write(data []byte) { + n, err := p.output.Write(data) + p.written += n + if err != nil { + panic(writeError{err}) + } +} + +var ( + tabBytes = []byte("\t\t\t\t\t\t\t\t") + newlineByte = []byte("\n") + blankByte = []byte(" ") +) + +func (p *printer) writeBytes(data []byte) { + if len(data) == 0 { + panic("expected non-empty []byte") + } + if p.nlcount > 0 && p.indent > 0 { + // write indentation + n := p.indent + for n > len(tabBytes) { + p.write(tabBytes) + n -= len(tabBytes) + } + p.write(tabBytes[:n]) + } + p.write(data) + p.nlcount = 0 +} + +func (p *printer) writeString(s string) { + p.writeBytes([]byte(s)) +} + +// If impliesSemi returns true for a non-blank line's final token tok, +// a semicolon is automatically inserted. Vice versa, a semicolon may +// be omitted in those cases. +func impliesSemi(tok token) bool { + switch tok { + case _Name, + _Break, _Continue, _Fallthrough, _Return, + /*_Inc, _Dec,*/ _Rparen, _Rbrack, _Rbrace: // TODO(gri) fix this + return true + } + return false +} + +// TODO(gri) provide table of []byte values for all tokens to avoid repeated string conversion + +func lineComment(text string) bool { + return strings.HasPrefix(text, "//") +} + +func (p *printer) addWhitespace(kind ctrlSymbol, text string) { + p.pending = append(p.pending, whitespace{p.lastTok, kind /*text*/}) + switch kind { + case semi: + p.lastTok = _Semi + case newline: + p.lastTok = 0 + // TODO(gri) do we need to handle /*-style comments containing newlines here? + } +} + +func (p *printer) flush(next token) { + // eliminate semis and redundant whitespace + sawNewline := next == _EOF + sawParen := next == _Rparen || next == _Rbrace + for i := len(p.pending) - 1; i >= 0; i-- { + switch p.pending[i].kind { + case semi: + k := semi + if sawParen { + sawParen = false + k = none // eliminate semi + } else if sawNewline && impliesSemi(p.pending[i].last) { + sawNewline = false + k = none // eliminate semi + } + p.pending[i].kind = k + case newline: + sawNewline = true + case blank, indent, outdent: + // nothing to do + // case comment: + // // A multi-line comment acts like a newline; and a "" + // // comment implies by definition at least one newline. + // if text := p.pending[i].text; strings.HasPrefix(text, "/*") && strings.ContainsRune(text, '\n') { + // sawNewline = true + // } + // case eolComment: + // // TODO(gri) act depending on sawNewline + default: + panic("unreachable") + } + } + + // print pending + prev := none + for i := range p.pending { + switch p.pending[i].kind { + case none: + // nothing to do + case semi: + p.writeString(";") + p.nlcount = 0 + prev = semi + case blank: + if prev != blank { + // at most one blank + p.writeBytes(blankByte) + p.nlcount = 0 + prev = blank + } + case newline: + const maxEmptyLines = 1 + if p.nlcount <= maxEmptyLines { + p.write(newlineByte) + p.nlcount++ + prev = newline + } + case indent: + p.indent++ + case outdent: + p.indent-- + if p.indent < 0 { + panic("negative indentation") + } + // case comment: + // if text := p.pending[i].text; text != "" { + // p.writeString(text) + // p.nlcount = 0 + // prev = comment + // } + // // TODO(gri) should check that line comments are always followed by newline + default: + panic("unreachable") + } + } + + p.pending = p.pending[:0] // re-use underlying array +} + +func mayCombine(prev token, next byte) (b bool) { + return // for now + // switch prev { + // case lexical.Int: + // b = next == '.' // 1. + // case lexical.Add: + // b = next == '+' // ++ + // case lexical.Sub: + // b = next == '-' // -- + // case lexical.Quo: + // b = next == '*' // /* + // case lexical.Lss: + // b = next == '-' || next == '<' // <- or << + // case lexical.And: + // b = next == '&' || next == '^' // && or &^ + // } + // return +} + +func (p *printer) print(args ...interface{}) { + for i := 0; i < len(args); i++ { + switch x := args[i].(type) { + case nil: + // we should not reach here but don't crash + + case Node: + p.printNode(x) + + case token: + // _Name implies an immediately following string + // argument which is the actual value to print. + var s string + if x == _Name { + i++ + if i >= len(args) { + panic("missing string argument after _Name") + } + s = args[i].(string) + } else { + s = x.String() + } + + // TODO(gri) This check seems at the wrong place since it doesn't + // take into account pending white space. + if mayCombine(p.lastTok, s[0]) { + panic("adjacent tokens combine without whitespace") + } + + if x == _Semi { + // delay printing of semi + p.addWhitespace(semi, "") + } else { + p.flush(x) + p.writeString(s) + p.nlcount = 0 + p.lastTok = x + } + + case Operator: + if x != 0 { + p.flush(_Operator) + p.writeString(x.String()) + } + + case ctrlSymbol: + switch x { + case none, semi /*, comment*/ : + panic("unreachable") + case newline: + // TODO(gri) need to handle mandatory newlines after a //-style comment + if !p.linebreaks { + x = blank + } + } + p.addWhitespace(x, "") + + // case *Comment: // comments are not Nodes + // p.addWhitespace(comment, x.Text) + + default: + panic(fmt.Sprintf("unexpected argument %v (%T)", x, x)) + } + } +} + +func (p *printer) printNode(n Node) { + // ncom := *n.Comments() + // if ncom != nil { + // // TODO(gri) in general we cannot make assumptions about whether + // // a comment is a /*- or a //-style comment since the syntax + // // tree may have been manipulated. Need to make sure the correct + // // whitespace is emitted. + // for _, c := range ncom.Alone { + // p.print(c, newline) + // } + // for _, c := range ncom.Before { + // if c.Text == "" || lineComment(c.Text) { + // panic("unexpected empty line or //-style 'before' comment") + // } + // p.print(c, blank) + // } + // } + + p.printRawNode(n) + + // if ncom != nil && len(ncom.After) > 0 { + // for i, c := range ncom.After { + // if i+1 < len(ncom.After) { + // if c.Text == "" || lineComment(c.Text) { + // panic("unexpected empty line or //-style non-final 'after' comment") + // } + // } + // p.print(blank, c) + // } + // //p.print(newline) + // } +} + +func (p *printer) printRawNode(n Node) { + switch n := n.(type) { + case nil: + // we should not reach here but don't crash + + // expressions and types + case *BadExpr: + p.print(_Name, "") + + case *Name: + p.print(_Name, n.Value) // _Name requires actual value following immediately + + case *BasicLit: + p.print(_Name, n.Value) // _Name requires actual value following immediately + + case *FuncLit: + p.print(n.Type, blank) + if n.Body != nil { + if p.form == ShortForm { + p.print(_Lbrace) + if len(n.Body.List) > 0 { + p.print(_Name, "…") + } + p.print(_Rbrace) + } else { + p.print(n.Body) + } + } + + case *CompositeLit: + if n.Type != nil { + p.print(n.Type) + } + p.print(_Lbrace) + if p.form == ShortForm { + if len(n.ElemList) > 0 { + p.print(_Name, "…") + } + } else { + if n.NKeys > 0 && n.NKeys == len(n.ElemList) { + p.printExprLines(n.ElemList) + } else { + p.printExprList(n.ElemList) + } + } + p.print(_Rbrace) + + case *ParenExpr: + p.print(_Lparen, n.X, _Rparen) + + case *SelectorExpr: + p.print(n.X, _Dot, n.Sel) + + case *IndexExpr: + p.print(n.X, _Lbrack, n.Index, _Rbrack) + + case *SliceExpr: + p.print(n.X, _Lbrack) + if i := n.Index[0]; i != nil { + p.printNode(i) + } + p.print(_Colon) + if j := n.Index[1]; j != nil { + p.printNode(j) + } + if k := n.Index[2]; k != nil { + p.print(_Colon, k) + } + p.print(_Rbrack) + + case *AssertExpr: + p.print(n.X, _Dot, _Lparen, n.Type, _Rparen) + + case *TypeSwitchGuard: + if n.Lhs != nil { + p.print(n.Lhs, blank, _Define, blank) + } + p.print(n.X, _Dot, _Lparen, _Type, _Rparen) + + case *CallExpr: + p.print(n.Fun, _Lparen) + p.printExprList(n.ArgList) + if n.HasDots { + p.print(_DotDotDot) + } + p.print(_Rparen) + + case *Operation: + if n.Y == nil { + // unary expr + p.print(n.Op) + // if n.Op == lexical.Range { + // p.print(blank) + // } + p.print(n.X) + } else { + // binary expr + // TODO(gri) eventually take precedence into account + // to control possibly missing parentheses + p.print(n.X, blank, n.Op, blank, n.Y) + } + + case *KeyValueExpr: + p.print(n.Key, _Colon, blank, n.Value) + + case *ListExpr: + p.printExprList(n.ElemList) + + case *ArrayType: + var len interface{} = _DotDotDot + if n.Len != nil { + len = n.Len + } + p.print(_Lbrack, len, _Rbrack, n.Elem) + + case *SliceType: + p.print(_Lbrack, _Rbrack, n.Elem) + + case *DotsType: + p.print(_DotDotDot, n.Elem) + + case *StructType: + p.print(_Struct) + if len(n.FieldList) > 0 && p.linebreaks { + p.print(blank) + } + p.print(_Lbrace) + if len(n.FieldList) > 0 { + if p.linebreaks { + p.print(newline, indent) + p.printFieldList(n.FieldList, n.TagList, _Semi) + p.print(outdent, newline) + } else { + p.printFieldList(n.FieldList, n.TagList, _Semi) + } + } + p.print(_Rbrace) + + case *FuncType: + p.print(_Func) + p.printSignature(n) + + case *InterfaceType: + p.print(_Interface) + if p.linebreaks && len(n.MethodList) > 1 { + p.print(blank) + p.print(_Lbrace) + p.print(newline, indent) + p.printMethodList(n.MethodList) + p.print(outdent, newline) + } else { + p.print(_Lbrace) + p.printMethodList(n.MethodList) + } + p.print(_Rbrace) + + case *MapType: + p.print(_Map, _Lbrack, n.Key, _Rbrack, n.Value) + + case *ChanType: + if n.Dir == RecvOnly { + p.print(_Arrow) + } + p.print(_Chan) + if n.Dir == SendOnly { + p.print(_Arrow) + } + p.print(blank) + if e, _ := n.Elem.(*ChanType); n.Dir == 0 && e != nil && e.Dir == RecvOnly { + // don't print chan (<-chan T) as chan <-chan T + p.print(_Lparen) + p.print(n.Elem) + p.print(_Rparen) + } else { + p.print(n.Elem) + } + + // statements + case *DeclStmt: + p.printDecl(n.DeclList) + + case *EmptyStmt: + // nothing to print + + case *LabeledStmt: + p.print(outdent, n.Label, _Colon, indent, newline, n.Stmt) + + case *ExprStmt: + p.print(n.X) + + case *SendStmt: + p.print(n.Chan, blank, _Arrow, blank, n.Value) + + case *AssignStmt: + p.print(n.Lhs) + if n.Rhs == nil { + // TODO(gri) This is going to break the mayCombine + // check once we enable that again. + p.print(n.Op, n.Op) // ++ or -- + } else { + p.print(blank, n.Op, _Assign, blank) + p.print(n.Rhs) + } + + case *CallStmt: + p.print(n.Tok, blank, n.Call) + + case *ReturnStmt: + p.print(_Return) + if n.Results != nil { + p.print(blank, n.Results) + } + + case *BranchStmt: + p.print(n.Tok) + if n.Label != nil { + p.print(blank, n.Label) + } + + case *BlockStmt: + p.print(_Lbrace) + if len(n.List) > 0 { + p.print(newline, indent) + p.printStmtList(n.List, true) + p.print(outdent, newline) + } + p.print(_Rbrace) + + case *IfStmt: + p.print(_If, blank) + if n.Init != nil { + p.print(n.Init, _Semi, blank) + } + p.print(n.Cond, blank, n.Then) + if n.Else != nil { + p.print(blank, _Else, blank, n.Else) + } + + case *SwitchStmt: + p.print(_Switch, blank) + if n.Init != nil { + p.print(n.Init, _Semi, blank) + } + if n.Tag != nil { + p.print(n.Tag, blank) + } + p.printSwitchBody(n.Body) + + case *SelectStmt: + p.print(_Select, blank) // for now + p.printSelectBody(n.Body) + + case *RangeClause: + if n.Lhs != nil { + tok := _Assign + if n.Def { + tok = _Define + } + p.print(n.Lhs, blank, tok, blank) + } + p.print(_Range, blank, n.X) + + case *ForStmt: + p.print(_For, blank) + if n.Init == nil && n.Post == nil { + if n.Cond != nil { + p.print(n.Cond, blank) + } + } else { + if n.Init != nil { + p.print(n.Init) + // TODO(gri) clean this up + if _, ok := n.Init.(*RangeClause); ok { + p.print(blank, n.Body) + break + } + } + p.print(_Semi, blank) + if n.Cond != nil { + p.print(n.Cond) + } + p.print(_Semi, blank) + if n.Post != nil { + p.print(n.Post, blank) + } + } + p.print(n.Body) + + case *ImportDecl: + if n.Group == nil { + p.print(_Import, blank) + } + if n.LocalPkgName != nil { + p.print(n.LocalPkgName, blank) + } + p.print(n.Path) + + case *ConstDecl: + if n.Group == nil { + p.print(_Const, blank) + } + p.printNameList(n.NameList) + if n.Type != nil { + p.print(blank, n.Type) + } + if n.Values != nil { + p.print(blank, _Assign, blank, n.Values) + } + + case *TypeDecl: + if n.Group == nil { + p.print(_Type, blank) + } + p.print(n.Name) + if n.TParamList != nil { + p.printParameterList(n.TParamList, _Type) + } + p.print(blank) + if n.Alias { + p.print(_Assign, blank) + } + p.print(n.Type) + + case *VarDecl: + if n.Group == nil { + p.print(_Var, blank) + } + p.printNameList(n.NameList) + if n.Type != nil { + p.print(blank, n.Type) + } + if n.Values != nil { + p.print(blank, _Assign, blank, n.Values) + } + + case *FuncDecl: + p.print(_Func, blank) + if r := n.Recv; r != nil { + p.print(_Lparen) + if r.Name != nil { + p.print(r.Name, blank) + } + p.printNode(r.Type) + p.print(_Rparen, blank) + } + p.print(n.Name) + if n.TParamList != nil { + p.printParameterList(n.TParamList, _Func) + } + p.printSignature(n.Type) + if n.Body != nil { + p.print(blank, n.Body) + } + + case *printGroup: + p.print(n.Tok, blank, _Lparen) + if len(n.Decls) > 0 { + p.print(newline, indent) + for _, d := range n.Decls { + p.printNode(d) + p.print(_Semi, newline) + } + p.print(outdent) + } + p.print(_Rparen) + + // files + case *File: + p.print(_Package, blank, n.PkgName) + if len(n.DeclList) > 0 { + p.print(_Semi, newline, newline) + p.printDeclList(n.DeclList) + } + + default: + panic(fmt.Sprintf("syntax.Iterate: unexpected node type %T", n)) + } +} + +func (p *printer) printFields(fields []*Field, tags []*BasicLit, i, j int) { + if i+1 == j && fields[i].Name == nil { + // anonymous field + p.printNode(fields[i].Type) + } else { + for k, f := range fields[i:j] { + if k > 0 { + p.print(_Comma, blank) + } + p.printNode(f.Name) + } + p.print(blank) + p.printNode(fields[i].Type) + } + if i < len(tags) && tags[i] != nil { + p.print(blank) + p.printNode(tags[i]) + } +} + +func (p *printer) printFieldList(fields []*Field, tags []*BasicLit, sep token) { + i0 := 0 + var typ Expr + for i, f := range fields { + if f.Name == nil || f.Type != typ { + if i0 < i { + p.printFields(fields, tags, i0, i) + p.print(sep, newline) + i0 = i + } + typ = f.Type + } + } + p.printFields(fields, tags, i0, len(fields)) +} + +func (p *printer) printMethodList(methods []*Field) { + for i, m := range methods { + if i > 0 { + p.print(_Semi, newline) + } + if m.Name != nil { + p.printNode(m.Name) + p.printSignature(m.Type.(*FuncType)) + } else { + p.printNode(m.Type) + } + } +} + +func (p *printer) printNameList(list []*Name) { + for i, x := range list { + if i > 0 { + p.print(_Comma, blank) + } + p.printNode(x) + } +} + +func (p *printer) printExprList(list []Expr) { + for i, x := range list { + if i > 0 { + p.print(_Comma, blank) + } + p.printNode(x) + } +} + +func (p *printer) printExprLines(list []Expr) { + if len(list) > 0 { + p.print(newline, indent) + for _, x := range list { + p.print(x, _Comma, newline) + } + p.print(outdent) + } +} + +func groupFor(d Decl) (token, *Group) { + switch d := d.(type) { + case *ImportDecl: + return _Import, d.Group + case *ConstDecl: + return _Const, d.Group + case *TypeDecl: + return _Type, d.Group + case *VarDecl: + return _Var, d.Group + case *FuncDecl: + return _Func, nil + default: + panic("unreachable") + } +} + +type printGroup struct { + node + Tok token + Decls []Decl +} + +func (p *printer) printDecl(list []Decl) { + tok, group := groupFor(list[0]) + + if group == nil { + if len(list) != 1 { + panic("unreachable") + } + p.printNode(list[0]) + return + } + + // if _, ok := list[0].(*EmptyDecl); ok { + // if len(list) != 1 { + // panic("unreachable") + // } + // // TODO(gri) if there are comments inside the empty + // // group, we may need to keep the list non-nil + // list = nil + // } + + // printGroup is here for consistent comment handling + // (this is not yet used) + var pg printGroup + // *pg.Comments() = *group.Comments() + pg.Tok = tok + pg.Decls = list + p.printNode(&pg) +} + +func (p *printer) printDeclList(list []Decl) { + i0 := 0 + var tok token + var group *Group + for i, x := range list { + if s, g := groupFor(x); g == nil || g != group { + if i0 < i { + p.printDecl(list[i0:i]) + p.print(_Semi, newline) + // print empty line between different declaration groups, + // different kinds of declarations, or between functions + if g != group || s != tok || s == _Func { + p.print(newline) + } + i0 = i + } + tok, group = s, g + } + } + p.printDecl(list[i0:]) +} + +func (p *printer) printSignature(sig *FuncType) { + p.printParameterList(sig.ParamList, 0) + if list := sig.ResultList; list != nil { + p.print(blank) + if len(list) == 1 && list[0].Name == nil { + p.printNode(list[0].Type) + } else { + p.printParameterList(list, 0) + } + } +} + +// If tok != 0 print a type parameter list: tok == _Type means +// a type parameter list for a type, tok == _Func means a type +// parameter list for a func. +func (p *printer) printParameterList(list []*Field, tok token) { + open, close := _Lparen, _Rparen + if tok != 0 { + open, close = _Lbrack, _Rbrack + } + p.print(open) + for i, f := range list { + if i > 0 { + p.print(_Comma, blank) + } + if f.Name != nil { + p.printNode(f.Name) + if i+1 < len(list) { + f1 := list[i+1] + if f1.Name != nil && f1.Type == f.Type { + continue // no need to print type + } + } + p.print(blank) + } + p.printNode(Unparen(f.Type)) // no need for (extra) parentheses around parameter types + } + // A type parameter list [P T] where the name P and the type expression T syntactically + // combine to another valid (value) expression requires a trailing comma, as in [P *T,] + // (or an enclosing interface as in [P interface(*T)]), so that the type parameter list + // is not parsed as an array length [P*T]. + if tok == _Type && len(list) == 1 && combinesWithName(list[0].Type) { + p.print(_Comma) + } + p.print(close) +} + +// combinesWithName reports whether a name followed by the expression x +// syntactically combines to another valid (value) expression. For instance +// using *T for x, "name *T" syntactically appears as the expression x*T. +// On the other hand, using P|Q or *P|~Q for x, "name P|Q" or name *P|~Q" +// cannot be combined into a valid (value) expression. +func combinesWithName(x Expr) bool { + switch x := x.(type) { + case *Operation: + if x.Y == nil { + // name *x.X combines to name*x.X if x.X is not a type element + return x.Op == Mul && !isTypeElem(x.X) + } + // binary expressions + return combinesWithName(x.X) && !isTypeElem(x.Y) + case *ParenExpr: + // name(x) combines but we are making sure at + // the call site that x is never parenthesized. + panic("unexpected parenthesized expression") + } + return false +} + +func (p *printer) printStmtList(list []Stmt, braces bool) { + for i, x := range list { + p.print(x, _Semi) + if i+1 < len(list) { + p.print(newline) + } else if braces { + // Print an extra semicolon if the last statement is + // an empty statement and we are in a braced block + // because one semicolon is automatically removed. + if _, ok := x.(*EmptyStmt); ok { + p.print(x, _Semi) + } + } + } +} + +func (p *printer) printSwitchBody(list []*CaseClause) { + p.print(_Lbrace) + if len(list) > 0 { + p.print(newline) + for i, c := range list { + p.printCaseClause(c, i+1 == len(list)) + p.print(newline) + } + } + p.print(_Rbrace) +} + +func (p *printer) printSelectBody(list []*CommClause) { + p.print(_Lbrace) + if len(list) > 0 { + p.print(newline) + for i, c := range list { + p.printCommClause(c, i+1 == len(list)) + p.print(newline) + } + } + p.print(_Rbrace) +} + +func (p *printer) printCaseClause(c *CaseClause, braces bool) { + if c.Cases != nil { + p.print(_Case, blank, c.Cases) + } else { + p.print(_Default) + } + p.print(_Colon) + if len(c.Body) > 0 { + p.print(newline, indent) + p.printStmtList(c.Body, braces) + p.print(outdent) + } +} + +func (p *printer) printCommClause(c *CommClause, braces bool) { + if c.Comm != nil { + p.print(_Case, blank) + p.print(c.Comm) + } else { + p.print(_Default) + } + p.print(_Colon) + if len(c.Body) > 0 { + p.print(newline, indent) + p.printStmtList(c.Body, braces) + p.print(outdent) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/printer_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/printer_test.go new file mode 100644 index 0000000000000000000000000000000000000000..99baf7f5b63c96741701a18c03e8ca99a68c597e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/printer_test.go @@ -0,0 +1,285 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syntax + +import ( + "fmt" + "io" + "os" + "strings" + "testing" +) + +func TestPrint(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + + ast, _ := ParseFile(*src_, func(err error) { t.Error(err) }, nil, 0) + + if ast != nil { + Fprint(testOut(), ast, LineForm) + fmt.Println() + } +} + +type shortBuffer struct { + buf []byte +} + +func (w *shortBuffer) Write(data []byte) (n int, err error) { + w.buf = append(w.buf, data...) + n = len(data) + if len(w.buf) > 10 { + err = io.ErrShortBuffer + } + return +} + +func TestPrintError(t *testing.T) { + const src = "package p; var x int" + ast, err := Parse(nil, strings.NewReader(src), nil, nil, 0) + if err != nil { + t.Fatal(err) + } + + var buf shortBuffer + _, err = Fprint(&buf, ast, 0) + if err == nil || err != io.ErrShortBuffer { + t.Errorf("got err = %s, want %s", err, io.ErrShortBuffer) + } +} + +var stringTests = [][2]string{ + dup("package p"), + dup("package p; type _ int; type T1 = struct{}; type ( _ *struct{}; T2 = float32 )"), + + // generic type declarations (given type separated with blank from LHS) + dup("package p; type _[T any] struct{}"), + dup("package p; type _[A, B, C interface{m()}] struct{}"), + dup("package p; type _[T any, A, B, C interface{m()}, X, Y, Z interface{~int}] struct{}"), + + dup("package p; type _[P *struct{}] struct{}"), + dup("package p; type _[P *T,] struct{}"), + dup("package p; type _[P *T, _ any] struct{}"), + {"package p; type _[P (*T),] struct{}", "package p; type _[P *T,] struct{}"}, + {"package p; type _[P (*T), _ any] struct{}", "package p; type _[P *T, _ any] struct{}"}, + {"package p; type _[P (T),] struct{}", "package p; type _[P T] struct{}"}, + {"package p; type _[P (T), _ any] struct{}", "package p; type _[P T, _ any] struct{}"}, + + {"package p; type _[P (*struct{})] struct{}", "package p; type _[P *struct{}] struct{}"}, + {"package p; type _[P ([]int)] struct{}", "package p; type _[P []int] struct{}"}, + {"package p; type _[P ([]int) | int] struct{}", "package p; type _[P []int | int] struct{}"}, + + // a type literal in an |-expression indicates a type parameter list (blank after type parameter list and type) + dup("package p; type _[P *[]int] struct{}"), + dup("package p; type _[P T | T] struct{}"), + dup("package p; type _[P T | T | T | T] struct{}"), + dup("package p; type _[P *T | T, Q T] struct{}"), + dup("package p; type _[P *[]T | T] struct{}"), + dup("package p; type _[P *T | T | T | T | ~T] struct{}"), + dup("package p; type _[P *T | T | T | ~T | T] struct{}"), + dup("package p; type _[P *T | T | struct{} | T] struct{}"), + dup("package p; type _[P <-chan int] struct{}"), + dup("package p; type _[P *T | struct{} | T] struct{}"), + + // a trailing comma always indicates a (possibly invalid) type parameter list (blank after type parameter list and type) + dup("package p; type _[P *T,] struct{}"), + dup("package p; type _[P *T | T,] struct{}"), + dup("package p; type _[P *T | <-T | T,] struct{}"), + + // slice/array type declarations (no blank between array length and element type) + dup("package p; type _ []byte"), + dup("package p; type _ [n]byte"), + dup("package p; type _ [P(T)]byte"), + dup("package p; type _ [P((T))]byte"), + dup("package p; type _ [P * *T]byte"), + dup("package p; type _ [P * T]byte"), + dup("package p; type _ [P(*T)]byte"), + dup("package p; type _ [P(**T)]byte"), + dup("package p; type _ [P * T - T]byte"), + dup("package p; type _ [P * T - T]byte"), + dup("package p; type _ [P * T | T]byte"), + dup("package p; type _ [P * T | <-T | T]byte"), + + // generic function declarations + dup("package p; func _[T any]()"), + dup("package p; func _[A, B, C interface{m()}]()"), + dup("package p; func _[T any, A, B, C interface{m()}, X, Y, Z interface{~int}]()"), + + // generic functions with elided interfaces in type constraints + dup("package p; func _[P *T]() {}"), + dup("package p; func _[P *T | T | T | T | ~T]() {}"), + dup("package p; func _[P *T | T | struct{} | T]() {}"), + dup("package p; func _[P ~int, Q int | string]() {}"), + dup("package p; func _[P struct{f int}, Q *P]() {}"), + + // methods with generic receiver types + dup("package p; func (R[T]) _()"), + dup("package p; func (*R[A, B, C]) _()"), + dup("package p; func (_ *R[A, B, C]) _()"), + + // channels + dup("package p; type _ chan chan int"), + dup("package p; type _ chan (<-chan int)"), + dup("package p; type _ chan chan<- int"), + + dup("package p; type _ <-chan chan int"), + dup("package p; type _ <-chan <-chan int"), + dup("package p; type _ <-chan chan<- int"), + + dup("package p; type _ chan<- chan int"), + dup("package p; type _ chan<- <-chan int"), + dup("package p; type _ chan<- chan<- int"), + + // TODO(gri) expand +} + +func TestPrintString(t *testing.T) { + for _, test := range stringTests { + ast, err := Parse(nil, strings.NewReader(test[0]), nil, nil, 0) + if err != nil { + t.Error(err) + continue + } + if got := String(ast); got != test[1] { + t.Errorf("%q: got %q", test[1], got) + } + } +} + +func testOut() io.Writer { + if testing.Verbose() { + return os.Stdout + } + return io.Discard +} + +func dup(s string) [2]string { return [2]string{s, s} } + +var exprTests = [][2]string{ + // basic type literals + dup("x"), + dup("true"), + dup("42"), + dup("3.1415"), + dup("2.71828i"), + dup(`'a'`), + dup(`"foo"`), + dup("`bar`"), + dup("any"), + + // func and composite literals + dup("func() {}"), + dup("[]int{}"), + {"func(x int) complex128 { return 0 }", "func(x int) complex128 {…}"}, + {"[]int{1, 2, 3}", "[]int{…}"}, + + // type expressions + dup("[1 << 10]byte"), + dup("[]int"), + dup("*int"), + dup("struct{x int}"), + dup("func()"), + dup("func(int, float32) string"), + dup("interface{m()}"), + dup("interface{m() string; n(x int)}"), + dup("interface{~int}"), + dup("interface{~int | ~float64 | ~string}"), + dup("interface{~int; m()}"), + dup("interface{~int | ~float64 | ~string; m() string; n(x int)}"), + dup("map[string]int"), + dup("chan E"), + dup("<-chan E"), + dup("chan<- E"), + + // new interfaces + dup("interface{int}"), + dup("interface{~int}"), + + // generic constraints + dup("interface{~a | ~b | ~c; ~int | ~string; float64; m()}"), + dup("interface{int | string}"), + dup("interface{~int | ~string; float64; m()}"), + dup("interface{~T[int, string] | string}"), + + // generic types + dup("x[T]"), + dup("x[N | A | S]"), + dup("x[N, A]"), + + // non-type expressions + dup("(x)"), + dup("x.f"), + dup("a[i]"), + + dup("s[:]"), + dup("s[i:]"), + dup("s[:j]"), + dup("s[i:j]"), + dup("s[:j:k]"), + dup("s[i:j:k]"), + + dup("x.(T)"), + + dup("x.([10]int)"), + dup("x.([...]int)"), + + dup("x.(struct{})"), + dup("x.(struct{x int; y, z float32; E})"), + + dup("x.(func())"), + dup("x.(func(x int))"), + dup("x.(func() int)"), + dup("x.(func(x, y int, z float32) (r int))"), + dup("x.(func(a, b, c int))"), + dup("x.(func(x ...T))"), + + dup("x.(interface{})"), + dup("x.(interface{m(); n(x int); E})"), + dup("x.(interface{m(); n(x int) T; E; F})"), + + dup("x.(map[K]V)"), + + dup("x.(chan E)"), + dup("x.(<-chan E)"), + dup("x.(chan<- chan int)"), + dup("x.(chan<- <-chan int)"), + dup("x.(<-chan chan int)"), + dup("x.(chan (<-chan int))"), + + dup("f()"), + dup("f(x)"), + dup("int(x)"), + dup("f(x, x + y)"), + dup("f(s...)"), + dup("f(a, s...)"), + + // generic functions + dup("f[T]()"), + dup("f[T](T)"), + dup("f[T, T1]()"), + dup("f[T, T1](T, T1)"), + + dup("*x"), + dup("&x"), + dup("x + y"), + dup("x + y << (2 * s)"), +} + +func TestShortString(t *testing.T) { + for _, test := range exprTests { + src := "package p; var _ = " + test[0] + ast, err := Parse(nil, strings.NewReader(src), nil, nil, 0) + if err != nil { + t.Errorf("%s: %s", test[0], err) + continue + } + x := ast.DeclList[0].(*VarDecl).Values + if got := String(x); got != test[1] { + t.Errorf("%s: got %s, want %s", test[0], got, test[1]) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/scanner.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/scanner.go new file mode 100644 index 0000000000000000000000000000000000000000..807d8383866dcb80849c0d1daad89962b6d11c1c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/scanner.go @@ -0,0 +1,881 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements scanner, a lexical tokenizer for +// Go source. After initialization, consecutive calls of +// next advance the scanner one token at a time. +// +// This file, source.go, tokens.go, and token_string.go are self-contained +// (`go tool compile scanner.go source.go tokens.go token_string.go` compiles) +// and thus could be made into their own package. + +package syntax + +import ( + "fmt" + "io" + "unicode" + "unicode/utf8" +) + +// The mode flags below control which comments are reported +// by calling the error handler. If no flag is set, comments +// are ignored. +const ( + comments uint = 1 << iota // call handler for all comments + directives // call handler for directives only +) + +type scanner struct { + source + mode uint + nlsemi bool // if set '\n' and EOF translate to ';' + + // current token, valid after calling next() + line, col uint + blank bool // line is blank up to col + tok token + lit string // valid if tok is _Name, _Literal, or _Semi ("semicolon", "newline", or "EOF"); may be malformed if bad is true + bad bool // valid if tok is _Literal, true if a syntax error occurred, lit may be malformed + kind LitKind // valid if tok is _Literal + op Operator // valid if tok is _Operator, _Star, _AssignOp, or _IncOp + prec int // valid if tok is _Operator, _Star, _AssignOp, or _IncOp +} + +func (s *scanner) init(src io.Reader, errh func(line, col uint, msg string), mode uint) { + s.source.init(src, errh) + s.mode = mode + s.nlsemi = false +} + +// errorf reports an error at the most recently read character position. +func (s *scanner) errorf(format string, args ...interface{}) { + s.error(fmt.Sprintf(format, args...)) +} + +// errorAtf reports an error at a byte column offset relative to the current token start. +func (s *scanner) errorAtf(offset int, format string, args ...interface{}) { + s.errh(s.line, s.col+uint(offset), fmt.Sprintf(format, args...)) +} + +// setLit sets the scanner state for a recognized _Literal token. +func (s *scanner) setLit(kind LitKind, ok bool) { + s.nlsemi = true + s.tok = _Literal + s.lit = string(s.segment()) + s.bad = !ok + s.kind = kind +} + +// next advances the scanner by reading the next token. +// +// If a read, source encoding, or lexical error occurs, next calls +// the installed error handler with the respective error position +// and message. The error message is guaranteed to be non-empty and +// never starts with a '/'. The error handler must exist. +// +// If the scanner mode includes the comments flag and a comment +// (including comments containing directives) is encountered, the +// error handler is also called with each comment position and text +// (including opening /* or // and closing */, but without a newline +// at the end of line comments). Comment text always starts with a / +// which can be used to distinguish these handler calls from errors. +// +// If the scanner mode includes the directives (but not the comments) +// flag, only comments containing a //line, /*line, or //go: directive +// are reported, in the same way as regular comments. +func (s *scanner) next() { + nlsemi := s.nlsemi + s.nlsemi = false + +redo: + // skip white space + s.stop() + startLine, startCol := s.pos() + for s.ch == ' ' || s.ch == '\t' || s.ch == '\n' && !nlsemi || s.ch == '\r' { + s.nextch() + } + + // token start + s.line, s.col = s.pos() + s.blank = s.line > startLine || startCol == colbase + s.start() + if isLetter(s.ch) || s.ch >= utf8.RuneSelf && s.atIdentChar(true) { + s.nextch() + s.ident() + return + } + + switch s.ch { + case -1: + if nlsemi { + s.lit = "EOF" + s.tok = _Semi + break + } + s.tok = _EOF + + case '\n': + s.nextch() + s.lit = "newline" + s.tok = _Semi + + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + s.number(false) + + case '"': + s.stdString() + + case '`': + s.rawString() + + case '\'': + s.rune() + + case '(': + s.nextch() + s.tok = _Lparen + + case '[': + s.nextch() + s.tok = _Lbrack + + case '{': + s.nextch() + s.tok = _Lbrace + + case ',': + s.nextch() + s.tok = _Comma + + case ';': + s.nextch() + s.lit = "semicolon" + s.tok = _Semi + + case ')': + s.nextch() + s.nlsemi = true + s.tok = _Rparen + + case ']': + s.nextch() + s.nlsemi = true + s.tok = _Rbrack + + case '}': + s.nextch() + s.nlsemi = true + s.tok = _Rbrace + + case ':': + s.nextch() + if s.ch == '=' { + s.nextch() + s.tok = _Define + break + } + s.tok = _Colon + + case '.': + s.nextch() + if isDecimal(s.ch) { + s.number(true) + break + } + if s.ch == '.' { + s.nextch() + if s.ch == '.' { + s.nextch() + s.tok = _DotDotDot + break + } + s.rewind() // now s.ch holds 1st '.' + s.nextch() // consume 1st '.' again + } + s.tok = _Dot + + case '+': + s.nextch() + s.op, s.prec = Add, precAdd + if s.ch != '+' { + goto assignop + } + s.nextch() + s.nlsemi = true + s.tok = _IncOp + + case '-': + s.nextch() + s.op, s.prec = Sub, precAdd + if s.ch != '-' { + goto assignop + } + s.nextch() + s.nlsemi = true + s.tok = _IncOp + + case '*': + s.nextch() + s.op, s.prec = Mul, precMul + // don't goto assignop - want _Star token + if s.ch == '=' { + s.nextch() + s.tok = _AssignOp + break + } + s.tok = _Star + + case '/': + s.nextch() + if s.ch == '/' { + s.nextch() + s.lineComment() + goto redo + } + if s.ch == '*' { + s.nextch() + s.fullComment() + if line, _ := s.pos(); line > s.line && nlsemi { + // A multi-line comment acts like a newline; + // it translates to a ';' if nlsemi is set. + s.lit = "newline" + s.tok = _Semi + break + } + goto redo + } + s.op, s.prec = Div, precMul + goto assignop + + case '%': + s.nextch() + s.op, s.prec = Rem, precMul + goto assignop + + case '&': + s.nextch() + if s.ch == '&' { + s.nextch() + s.op, s.prec = AndAnd, precAndAnd + s.tok = _Operator + break + } + s.op, s.prec = And, precMul + if s.ch == '^' { + s.nextch() + s.op = AndNot + } + goto assignop + + case '|': + s.nextch() + if s.ch == '|' { + s.nextch() + s.op, s.prec = OrOr, precOrOr + s.tok = _Operator + break + } + s.op, s.prec = Or, precAdd + goto assignop + + case '^': + s.nextch() + s.op, s.prec = Xor, precAdd + goto assignop + + case '<': + s.nextch() + if s.ch == '=' { + s.nextch() + s.op, s.prec = Leq, precCmp + s.tok = _Operator + break + } + if s.ch == '<' { + s.nextch() + s.op, s.prec = Shl, precMul + goto assignop + } + if s.ch == '-' { + s.nextch() + s.tok = _Arrow + break + } + s.op, s.prec = Lss, precCmp + s.tok = _Operator + + case '>': + s.nextch() + if s.ch == '=' { + s.nextch() + s.op, s.prec = Geq, precCmp + s.tok = _Operator + break + } + if s.ch == '>' { + s.nextch() + s.op, s.prec = Shr, precMul + goto assignop + } + s.op, s.prec = Gtr, precCmp + s.tok = _Operator + + case '=': + s.nextch() + if s.ch == '=' { + s.nextch() + s.op, s.prec = Eql, precCmp + s.tok = _Operator + break + } + s.tok = _Assign + + case '!': + s.nextch() + if s.ch == '=' { + s.nextch() + s.op, s.prec = Neq, precCmp + s.tok = _Operator + break + } + s.op, s.prec = Not, 0 + s.tok = _Operator + + case '~': + s.nextch() + s.op, s.prec = Tilde, 0 + s.tok = _Operator + + default: + s.errorf("invalid character %#U", s.ch) + s.nextch() + goto redo + } + + return + +assignop: + if s.ch == '=' { + s.nextch() + s.tok = _AssignOp + return + } + s.tok = _Operator +} + +func (s *scanner) ident() { + // accelerate common case (7bit ASCII) + for isLetter(s.ch) || isDecimal(s.ch) { + s.nextch() + } + + // general case + if s.ch >= utf8.RuneSelf { + for s.atIdentChar(false) { + s.nextch() + } + } + + // possibly a keyword + lit := s.segment() + if len(lit) >= 2 { + if tok := keywordMap[hash(lit)]; tok != 0 && tokStrFast(tok) == string(lit) { + s.nlsemi = contains(1<<_Break|1<<_Continue|1<<_Fallthrough|1<<_Return, tok) + s.tok = tok + return + } + } + + s.nlsemi = true + s.lit = string(lit) + s.tok = _Name +} + +// tokStrFast is a faster version of token.String, which assumes that tok +// is one of the valid tokens - and can thus skip bounds checks. +func tokStrFast(tok token) string { + return _token_name[_token_index[tok-1]:_token_index[tok]] +} + +func (s *scanner) atIdentChar(first bool) bool { + switch { + case unicode.IsLetter(s.ch) || s.ch == '_': + // ok + case unicode.IsDigit(s.ch): + if first { + s.errorf("identifier cannot begin with digit %#U", s.ch) + } + case s.ch >= utf8.RuneSelf: + s.errorf("invalid character %#U in identifier", s.ch) + default: + return false + } + return true +} + +// hash is a perfect hash function for keywords. +// It assumes that s has at least length 2. +func hash(s []byte) uint { + return (uint(s[0])<<4 ^ uint(s[1]) + uint(len(s))) & uint(len(keywordMap)-1) +} + +var keywordMap [1 << 6]token // size must be power of two + +func init() { + // populate keywordMap + for tok := _Break; tok <= _Var; tok++ { + h := hash([]byte(tok.String())) + if keywordMap[h] != 0 { + panic("imperfect hash") + } + keywordMap[h] = tok + } +} + +func lower(ch rune) rune { return ('a' - 'A') | ch } // returns lower-case ch iff ch is ASCII letter +func isLetter(ch rune) bool { return 'a' <= lower(ch) && lower(ch) <= 'z' || ch == '_' } +func isDecimal(ch rune) bool { return '0' <= ch && ch <= '9' } +func isHex(ch rune) bool { return '0' <= ch && ch <= '9' || 'a' <= lower(ch) && lower(ch) <= 'f' } + +// digits accepts the sequence { digit | '_' }. +// If base <= 10, digits accepts any decimal digit but records +// the index (relative to the literal start) of a digit >= base +// in *invalid, if *invalid < 0. +// digits returns a bitset describing whether the sequence contained +// digits (bit 0 is set), or separators '_' (bit 1 is set). +func (s *scanner) digits(base int, invalid *int) (digsep int) { + if base <= 10 { + max := rune('0' + base) + for isDecimal(s.ch) || s.ch == '_' { + ds := 1 + if s.ch == '_' { + ds = 2 + } else if s.ch >= max && *invalid < 0 { + _, col := s.pos() + *invalid = int(col - s.col) // record invalid rune index + } + digsep |= ds + s.nextch() + } + } else { + for isHex(s.ch) || s.ch == '_' { + ds := 1 + if s.ch == '_' { + ds = 2 + } + digsep |= ds + s.nextch() + } + } + return +} + +func (s *scanner) number(seenPoint bool) { + ok := true + kind := IntLit + base := 10 // number base + prefix := rune(0) // one of 0 (decimal), '0' (0-octal), 'x', 'o', or 'b' + digsep := 0 // bit 0: digit present, bit 1: '_' present + invalid := -1 // index of invalid digit in literal, or < 0 + + // integer part + if !seenPoint { + if s.ch == '0' { + s.nextch() + switch lower(s.ch) { + case 'x': + s.nextch() + base, prefix = 16, 'x' + case 'o': + s.nextch() + base, prefix = 8, 'o' + case 'b': + s.nextch() + base, prefix = 2, 'b' + default: + base, prefix = 8, '0' + digsep = 1 // leading 0 + } + } + digsep |= s.digits(base, &invalid) + if s.ch == '.' { + if prefix == 'o' || prefix == 'b' { + s.errorf("invalid radix point in %s literal", baseName(base)) + ok = false + } + s.nextch() + seenPoint = true + } + } + + // fractional part + if seenPoint { + kind = FloatLit + digsep |= s.digits(base, &invalid) + } + + if digsep&1 == 0 && ok { + s.errorf("%s literal has no digits", baseName(base)) + ok = false + } + + // exponent + if e := lower(s.ch); e == 'e' || e == 'p' { + if ok { + switch { + case e == 'e' && prefix != 0 && prefix != '0': + s.errorf("%q exponent requires decimal mantissa", s.ch) + ok = false + case e == 'p' && prefix != 'x': + s.errorf("%q exponent requires hexadecimal mantissa", s.ch) + ok = false + } + } + s.nextch() + kind = FloatLit + if s.ch == '+' || s.ch == '-' { + s.nextch() + } + digsep = s.digits(10, nil) | digsep&2 // don't lose sep bit + if digsep&1 == 0 && ok { + s.errorf("exponent has no digits") + ok = false + } + } else if prefix == 'x' && kind == FloatLit && ok { + s.errorf("hexadecimal mantissa requires a 'p' exponent") + ok = false + } + + // suffix 'i' + if s.ch == 'i' { + kind = ImagLit + s.nextch() + } + + s.setLit(kind, ok) // do this now so we can use s.lit below + + if kind == IntLit && invalid >= 0 && ok { + s.errorAtf(invalid, "invalid digit %q in %s literal", s.lit[invalid], baseName(base)) + ok = false + } + + if digsep&2 != 0 && ok { + if i := invalidSep(s.lit); i >= 0 { + s.errorAtf(i, "'_' must separate successive digits") + ok = false + } + } + + s.bad = !ok // correct s.bad +} + +func baseName(base int) string { + switch base { + case 2: + return "binary" + case 8: + return "octal" + case 10: + return "decimal" + case 16: + return "hexadecimal" + } + panic("invalid base") +} + +// invalidSep returns the index of the first invalid separator in x, or -1. +func invalidSep(x string) int { + x1 := ' ' // prefix char, we only care if it's 'x' + d := '.' // digit, one of '_', '0' (a digit), or '.' (anything else) + i := 0 + + // a prefix counts as a digit + if len(x) >= 2 && x[0] == '0' { + x1 = lower(rune(x[1])) + if x1 == 'x' || x1 == 'o' || x1 == 'b' { + d = '0' + i = 2 + } + } + + // mantissa and exponent + for ; i < len(x); i++ { + p := d // previous digit + d = rune(x[i]) + switch { + case d == '_': + if p != '0' { + return i + } + case isDecimal(d) || x1 == 'x' && isHex(d): + d = '0' + default: + if p == '_' { + return i - 1 + } + d = '.' + } + } + if d == '_' { + return len(x) - 1 + } + + return -1 +} + +func (s *scanner) rune() { + ok := true + s.nextch() + + n := 0 + for ; ; n++ { + if s.ch == '\'' { + if ok { + if n == 0 { + s.errorf("empty rune literal or unescaped '") + ok = false + } else if n != 1 { + s.errorAtf(0, "more than one character in rune literal") + ok = false + } + } + s.nextch() + break + } + if s.ch == '\\' { + s.nextch() + if !s.escape('\'') { + ok = false + } + continue + } + if s.ch == '\n' { + if ok { + s.errorf("newline in rune literal") + ok = false + } + break + } + if s.ch < 0 { + if ok { + s.errorAtf(0, "rune literal not terminated") + ok = false + } + break + } + s.nextch() + } + + s.setLit(RuneLit, ok) +} + +func (s *scanner) stdString() { + ok := true + s.nextch() + + for { + if s.ch == '"' { + s.nextch() + break + } + if s.ch == '\\' { + s.nextch() + if !s.escape('"') { + ok = false + } + continue + } + if s.ch == '\n' { + s.errorf("newline in string") + ok = false + break + } + if s.ch < 0 { + s.errorAtf(0, "string not terminated") + ok = false + break + } + s.nextch() + } + + s.setLit(StringLit, ok) +} + +func (s *scanner) rawString() { + ok := true + s.nextch() + + for { + if s.ch == '`' { + s.nextch() + break + } + if s.ch < 0 { + s.errorAtf(0, "string not terminated") + ok = false + break + } + s.nextch() + } + // We leave CRs in the string since they are part of the + // literal (even though they are not part of the literal + // value). + + s.setLit(StringLit, ok) +} + +func (s *scanner) comment(text string) { + s.errorAtf(0, "%s", text) +} + +func (s *scanner) skipLine() { + // don't consume '\n' - needed for nlsemi logic + for s.ch >= 0 && s.ch != '\n' { + s.nextch() + } +} + +func (s *scanner) lineComment() { + // opening has already been consumed + + if s.mode&comments != 0 { + s.skipLine() + s.comment(string(s.segment())) + return + } + + // are we saving directives? or is this definitely not a directive? + if s.mode&directives == 0 || (s.ch != 'g' && s.ch != 'l') { + s.stop() + s.skipLine() + return + } + + // recognize go: or line directives + prefix := "go:" + if s.ch == 'l' { + prefix = "line " + } + for _, m := range prefix { + if s.ch != m { + s.stop() + s.skipLine() + return + } + s.nextch() + } + + // directive text + s.skipLine() + s.comment(string(s.segment())) +} + +func (s *scanner) skipComment() bool { + for s.ch >= 0 { + for s.ch == '*' { + s.nextch() + if s.ch == '/' { + s.nextch() + return true + } + } + s.nextch() + } + s.errorAtf(0, "comment not terminated") + return false +} + +func (s *scanner) fullComment() { + /* opening has already been consumed */ + + if s.mode&comments != 0 { + if s.skipComment() { + s.comment(string(s.segment())) + } + return + } + + if s.mode&directives == 0 || s.ch != 'l' { + s.stop() + s.skipComment() + return + } + + // recognize line directive + const prefix = "line " + for _, m := range prefix { + if s.ch != m { + s.stop() + s.skipComment() + return + } + s.nextch() + } + + // directive text + if s.skipComment() { + s.comment(string(s.segment())) + } +} + +func (s *scanner) escape(quote rune) bool { + var n int + var base, max uint32 + + switch s.ch { + case quote, 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\': + s.nextch() + return true + case '0', '1', '2', '3', '4', '5', '6', '7': + n, base, max = 3, 8, 255 + case 'x': + s.nextch() + n, base, max = 2, 16, 255 + case 'u': + s.nextch() + n, base, max = 4, 16, unicode.MaxRune + case 'U': + s.nextch() + n, base, max = 8, 16, unicode.MaxRune + default: + if s.ch < 0 { + return true // complain in caller about EOF + } + s.errorf("unknown escape") + return false + } + + var x uint32 + for i := n; i > 0; i-- { + if s.ch < 0 { + return true // complain in caller about EOF + } + d := base + if isDecimal(s.ch) { + d = uint32(s.ch) - '0' + } else if 'a' <= lower(s.ch) && lower(s.ch) <= 'f' { + d = uint32(lower(s.ch)) - 'a' + 10 + } + if d >= base { + s.errorf("invalid character %q in %s escape", s.ch, baseName(int(base))) + return false + } + // d < base + x = x*base + d + s.nextch() + } + + if x > max && base == 8 { + s.errorf("octal escape value %d > 255", x) + return false + } + + if x > max || 0xD800 <= x && x < 0xE000 /* surrogate range */ { + s.errorf("escape is invalid Unicode code point %#U", x) + return false + } + + return true +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/scanner_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/scanner_test.go new file mode 100644 index 0000000000000000000000000000000000000000..450ec1ff8a55f6138318e7dc84164bceaa4ca406 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/scanner_test.go @@ -0,0 +1,767 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syntax + +import ( + "bytes" + "fmt" + "os" + "strings" + "testing" +) + +// errh is a default error handler for basic tests. +func errh(line, col uint, msg string) { + panic(fmt.Sprintf("%d:%d: %s", line, col, msg)) +} + +// Don't bother with other tests if TestSmoke doesn't pass. +func TestSmoke(t *testing.T) { + const src = "if (+foo\t+=..123/***/0.9_0e-0i'a'`raw`\"string\"..f;//$" + tokens := []token{_If, _Lparen, _Operator, _Name, _AssignOp, _Dot, _Literal, _Literal, _Literal, _Literal, _Literal, _Dot, _Dot, _Name, _Semi, _EOF} + + var got scanner + got.init(strings.NewReader(src), errh, 0) + for _, want := range tokens { + got.next() + if got.tok != want { + t.Errorf("%d:%d: got %s; want %s", got.line, got.col, got.tok, want) + continue + } + } +} + +// Once TestSmoke passes, run TestTokens next. +func TestTokens(t *testing.T) { + var got scanner + for _, want := range sampleTokens { + got.init(strings.NewReader(want.src), func(line, col uint, msg string) { + t.Errorf("%s:%d:%d: %s", want.src, line, col, msg) + }, 0) + got.next() + if got.tok != want.tok { + t.Errorf("%s: got %s; want %s", want.src, got.tok, want.tok) + continue + } + if (got.tok == _Name || got.tok == _Literal) && got.lit != want.src { + t.Errorf("%s: got %q; want %q", want.src, got.lit, want.src) + } + } +} + +func TestScanner(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + + filename := *src_ // can be changed via -src flag + src, err := os.Open(filename) + if err != nil { + t.Fatal(err) + } + defer src.Close() + + var s scanner + s.init(src, errh, 0) + for { + s.next() + if s.tok == _EOF { + break + } + if !testing.Verbose() { + continue + } + switch s.tok { + case _Name, _Literal: + fmt.Printf("%s:%d:%d: %s => %s\n", filename, s.line, s.col, s.tok, s.lit) + case _Operator: + fmt.Printf("%s:%d:%d: %s => %s (prec = %d)\n", filename, s.line, s.col, s.tok, s.op, s.prec) + default: + fmt.Printf("%s:%d:%d: %s\n", filename, s.line, s.col, s.tok) + } + } +} + +func TestEmbeddedTokens(t *testing.T) { + // make source + var buf bytes.Buffer + for i, s := range sampleTokens { + buf.WriteString("\t\t\t\t"[:i&3]) // leading indentation + buf.WriteString(s.src) // token + buf.WriteString(" "[:i&7]) // trailing spaces + fmt.Fprintf(&buf, "/*line foo:%d */ // bar\n", i) // comments + newline (don't crash w/o directive handler) + } + + // scan source + var got scanner + var src string + got.init(&buf, func(line, col uint, msg string) { + t.Fatalf("%s:%d:%d: %s", src, line, col, msg) + }, 0) + got.next() + for i, want := range sampleTokens { + src = want.src + nlsemi := false + + if got.line-linebase != uint(i) { + t.Errorf("%s: got line %d; want %d", src, got.line-linebase, i) + } + + if got.tok != want.tok { + t.Errorf("%s: got tok %s; want %s", src, got.tok, want.tok) + continue + } + + switch want.tok { + case _Semi: + if got.lit != "semicolon" { + t.Errorf("%s: got %s; want semicolon", src, got.lit) + } + + case _Name, _Literal: + if got.lit != want.src { + t.Errorf("%s: got lit %q; want %q", src, got.lit, want.src) + continue + } + nlsemi = true + + case _Operator, _AssignOp, _IncOp: + if got.op != want.op { + t.Errorf("%s: got op %s; want %s", src, got.op, want.op) + continue + } + if got.prec != want.prec { + t.Errorf("%s: got prec %d; want %d", src, got.prec, want.prec) + continue + } + nlsemi = want.tok == _IncOp + + case _Rparen, _Rbrack, _Rbrace, _Break, _Continue, _Fallthrough, _Return: + nlsemi = true + } + + if nlsemi { + got.next() + if got.tok != _Semi { + t.Errorf("%s: got tok %s; want ;", src, got.tok) + continue + } + if got.lit != "newline" { + t.Errorf("%s: got %s; want newline", src, got.lit) + } + } + + got.next() + } + + if got.tok != _EOF { + t.Errorf("got %q; want _EOF", got.tok) + } +} + +var sampleTokens = [...]struct { + tok token + src string + op Operator + prec int +}{ + // name samples + {_Name, "x", 0, 0}, + {_Name, "X123", 0, 0}, + {_Name, "foo", 0, 0}, + {_Name, "Foo123", 0, 0}, + {_Name, "foo_bar", 0, 0}, + {_Name, "_", 0, 0}, + {_Name, "_foobar", 0, 0}, + {_Name, "a۰۱۸", 0, 0}, + {_Name, "foo६४", 0, 0}, + {_Name, "bar9876", 0, 0}, + {_Name, "ŝ", 0, 0}, + {_Name, "ŝfoo", 0, 0}, + + // literal samples + {_Literal, "0", 0, 0}, + {_Literal, "1", 0, 0}, + {_Literal, "12345", 0, 0}, + {_Literal, "123456789012345678890123456789012345678890", 0, 0}, + {_Literal, "01234567", 0, 0}, + {_Literal, "0_1_234_567", 0, 0}, + {_Literal, "0X0", 0, 0}, + {_Literal, "0xcafebabe", 0, 0}, + {_Literal, "0x_cafe_babe", 0, 0}, + {_Literal, "0O0", 0, 0}, + {_Literal, "0o000", 0, 0}, + {_Literal, "0o_000", 0, 0}, + {_Literal, "0B1", 0, 0}, + {_Literal, "0b01100110", 0, 0}, + {_Literal, "0b_0110_0110", 0, 0}, + {_Literal, "0.", 0, 0}, + {_Literal, "0.e0", 0, 0}, + {_Literal, "0.e-1", 0, 0}, + {_Literal, "0.e+123", 0, 0}, + {_Literal, ".0", 0, 0}, + {_Literal, ".0E00", 0, 0}, + {_Literal, ".0E-0123", 0, 0}, + {_Literal, ".0E+12345678901234567890", 0, 0}, + {_Literal, ".45e1", 0, 0}, + {_Literal, "3.14159265", 0, 0}, + {_Literal, "1e0", 0, 0}, + {_Literal, "1e+100", 0, 0}, + {_Literal, "1e-100", 0, 0}, + {_Literal, "2.71828e-1000", 0, 0}, + {_Literal, "0i", 0, 0}, + {_Literal, "1i", 0, 0}, + {_Literal, "012345678901234567889i", 0, 0}, + {_Literal, "123456789012345678890i", 0, 0}, + {_Literal, "0.i", 0, 0}, + {_Literal, ".0i", 0, 0}, + {_Literal, "3.14159265i", 0, 0}, + {_Literal, "1e0i", 0, 0}, + {_Literal, "1e+100i", 0, 0}, + {_Literal, "1e-100i", 0, 0}, + {_Literal, "2.71828e-1000i", 0, 0}, + {_Literal, "'a'", 0, 0}, + {_Literal, "'\\000'", 0, 0}, + {_Literal, "'\\xFF'", 0, 0}, + {_Literal, "'\\uff16'", 0, 0}, + {_Literal, "'\\U0000ff16'", 0, 0}, + {_Literal, "`foobar`", 0, 0}, + {_Literal, "`foo\tbar`", 0, 0}, + {_Literal, "`\r`", 0, 0}, + + // operators + {_Operator, "!", Not, 0}, + {_Operator, "~", Tilde, 0}, + + {_Operator, "||", OrOr, precOrOr}, + + {_Operator, "&&", AndAnd, precAndAnd}, + + {_Operator, "==", Eql, precCmp}, + {_Operator, "!=", Neq, precCmp}, + {_Operator, "<", Lss, precCmp}, + {_Operator, "<=", Leq, precCmp}, + {_Operator, ">", Gtr, precCmp}, + {_Operator, ">=", Geq, precCmp}, + + {_Operator, "+", Add, precAdd}, + {_Operator, "-", Sub, precAdd}, + {_Operator, "|", Or, precAdd}, + {_Operator, "^", Xor, precAdd}, + + {_Star, "*", Mul, precMul}, + {_Operator, "/", Div, precMul}, + {_Operator, "%", Rem, precMul}, + {_Operator, "&", And, precMul}, + {_Operator, "&^", AndNot, precMul}, + {_Operator, "<<", Shl, precMul}, + {_Operator, ">>", Shr, precMul}, + + // assignment operations + {_AssignOp, "+=", Add, precAdd}, + {_AssignOp, "-=", Sub, precAdd}, + {_AssignOp, "|=", Or, precAdd}, + {_AssignOp, "^=", Xor, precAdd}, + + {_AssignOp, "*=", Mul, precMul}, + {_AssignOp, "/=", Div, precMul}, + {_AssignOp, "%=", Rem, precMul}, + {_AssignOp, "&=", And, precMul}, + {_AssignOp, "&^=", AndNot, precMul}, + {_AssignOp, "<<=", Shl, precMul}, + {_AssignOp, ">>=", Shr, precMul}, + + // other operations + {_IncOp, "++", Add, precAdd}, + {_IncOp, "--", Sub, precAdd}, + {_Assign, "=", 0, 0}, + {_Define, ":=", 0, 0}, + {_Arrow, "<-", 0, 0}, + + // delimiters + {_Lparen, "(", 0, 0}, + {_Lbrack, "[", 0, 0}, + {_Lbrace, "{", 0, 0}, + {_Rparen, ")", 0, 0}, + {_Rbrack, "]", 0, 0}, + {_Rbrace, "}", 0, 0}, + {_Comma, ",", 0, 0}, + {_Semi, ";", 0, 0}, + {_Colon, ":", 0, 0}, + {_Dot, ".", 0, 0}, + {_DotDotDot, "...", 0, 0}, + + // keywords + {_Break, "break", 0, 0}, + {_Case, "case", 0, 0}, + {_Chan, "chan", 0, 0}, + {_Const, "const", 0, 0}, + {_Continue, "continue", 0, 0}, + {_Default, "default", 0, 0}, + {_Defer, "defer", 0, 0}, + {_Else, "else", 0, 0}, + {_Fallthrough, "fallthrough", 0, 0}, + {_For, "for", 0, 0}, + {_Func, "func", 0, 0}, + {_Go, "go", 0, 0}, + {_Goto, "goto", 0, 0}, + {_If, "if", 0, 0}, + {_Import, "import", 0, 0}, + {_Interface, "interface", 0, 0}, + {_Map, "map", 0, 0}, + {_Package, "package", 0, 0}, + {_Range, "range", 0, 0}, + {_Return, "return", 0, 0}, + {_Select, "select", 0, 0}, + {_Struct, "struct", 0, 0}, + {_Switch, "switch", 0, 0}, + {_Type, "type", 0, 0}, + {_Var, "var", 0, 0}, +} + +func TestComments(t *testing.T) { + type comment struct { + line, col uint // 0-based + text string + } + + for _, test := range []struct { + src string + want comment + }{ + // no comments + {"no comment here", comment{0, 0, ""}}, + {" /", comment{0, 0, ""}}, + {"\n /*/", comment{0, 0, ""}}, + + //-style comments + {"// line comment\n", comment{0, 0, "// line comment"}}, + {"package p // line comment\n", comment{0, 10, "// line comment"}}, + {"//\n//\n\t// want this one\r\n", comment{2, 1, "// want this one\r"}}, + {"\n\n//\n", comment{2, 0, "//"}}, + {"//", comment{0, 0, "//"}}, + + /*-style comments */ + {"123/* regular comment */", comment{0, 3, "/* regular comment */"}}, + {"package p /* regular comment", comment{0, 0, ""}}, + {"\n\n\n/*\n*//* want this one */", comment{4, 2, "/* want this one */"}}, + {"\n\n/**/", comment{2, 0, "/**/"}}, + {"/*", comment{0, 0, ""}}, + } { + var s scanner + var got comment + s.init(strings.NewReader(test.src), func(line, col uint, msg string) { + if msg[0] != '/' { + // error + if msg != "comment not terminated" { + t.Errorf("%q: %s", test.src, msg) + } + return + } + got = comment{line - linebase, col - colbase, msg} // keep last one + }, comments) + + for { + s.next() + if s.tok == _EOF { + break + } + } + + want := test.want + if got.line != want.line || got.col != want.col { + t.Errorf("%q: got position %d:%d; want %d:%d", test.src, got.line, got.col, want.line, want.col) + } + if got.text != want.text { + t.Errorf("%q: got %q; want %q", test.src, got.text, want.text) + } + } +} + +func TestNumbers(t *testing.T) { + for _, test := range []struct { + kind LitKind + src, tokens, err string + }{ + // binaries + {IntLit, "0b0", "0b0", ""}, + {IntLit, "0b1010", "0b1010", ""}, + {IntLit, "0B1110", "0B1110", ""}, + + {IntLit, "0b", "0b", "binary literal has no digits"}, + {IntLit, "0b0190", "0b0190", "invalid digit '9' in binary literal"}, + {IntLit, "0b01a0", "0b01 a0", ""}, // only accept 0-9 + + {FloatLit, "0b.", "0b.", "invalid radix point in binary literal"}, + {FloatLit, "0b.1", "0b.1", "invalid radix point in binary literal"}, + {FloatLit, "0b1.0", "0b1.0", "invalid radix point in binary literal"}, + {FloatLit, "0b1e10", "0b1e10", "'e' exponent requires decimal mantissa"}, + {FloatLit, "0b1P-1", "0b1P-1", "'P' exponent requires hexadecimal mantissa"}, + + {ImagLit, "0b10i", "0b10i", ""}, + {ImagLit, "0b10.0i", "0b10.0i", "invalid radix point in binary literal"}, + + // octals + {IntLit, "0o0", "0o0", ""}, + {IntLit, "0o1234", "0o1234", ""}, + {IntLit, "0O1234", "0O1234", ""}, + + {IntLit, "0o", "0o", "octal literal has no digits"}, + {IntLit, "0o8123", "0o8123", "invalid digit '8' in octal literal"}, + {IntLit, "0o1293", "0o1293", "invalid digit '9' in octal literal"}, + {IntLit, "0o12a3", "0o12 a3", ""}, // only accept 0-9 + + {FloatLit, "0o.", "0o.", "invalid radix point in octal literal"}, + {FloatLit, "0o.2", "0o.2", "invalid radix point in octal literal"}, + {FloatLit, "0o1.2", "0o1.2", "invalid radix point in octal literal"}, + {FloatLit, "0o1E+2", "0o1E+2", "'E' exponent requires decimal mantissa"}, + {FloatLit, "0o1p10", "0o1p10", "'p' exponent requires hexadecimal mantissa"}, + + {ImagLit, "0o10i", "0o10i", ""}, + {ImagLit, "0o10e0i", "0o10e0i", "'e' exponent requires decimal mantissa"}, + + // 0-octals + {IntLit, "0", "0", ""}, + {IntLit, "0123", "0123", ""}, + + {IntLit, "08123", "08123", "invalid digit '8' in octal literal"}, + {IntLit, "01293", "01293", "invalid digit '9' in octal literal"}, + {IntLit, "0F.", "0 F .", ""}, // only accept 0-9 + {IntLit, "0123F.", "0123 F .", ""}, + {IntLit, "0123456x", "0123456 x", ""}, + + // decimals + {IntLit, "1", "1", ""}, + {IntLit, "1234", "1234", ""}, + + {IntLit, "1f", "1 f", ""}, // only accept 0-9 + + {ImagLit, "0i", "0i", ""}, + {ImagLit, "0678i", "0678i", ""}, + + // decimal floats + {FloatLit, "0.", "0.", ""}, + {FloatLit, "123.", "123.", ""}, + {FloatLit, "0123.", "0123.", ""}, + + {FloatLit, ".0", ".0", ""}, + {FloatLit, ".123", ".123", ""}, + {FloatLit, ".0123", ".0123", ""}, + + {FloatLit, "0.0", "0.0", ""}, + {FloatLit, "123.123", "123.123", ""}, + {FloatLit, "0123.0123", "0123.0123", ""}, + + {FloatLit, "0e0", "0e0", ""}, + {FloatLit, "123e+0", "123e+0", ""}, + {FloatLit, "0123E-1", "0123E-1", ""}, + + {FloatLit, "0.e+1", "0.e+1", ""}, + {FloatLit, "123.E-10", "123.E-10", ""}, + {FloatLit, "0123.e123", "0123.e123", ""}, + + {FloatLit, ".0e-1", ".0e-1", ""}, + {FloatLit, ".123E+10", ".123E+10", ""}, + {FloatLit, ".0123E123", ".0123E123", ""}, + + {FloatLit, "0.0e1", "0.0e1", ""}, + {FloatLit, "123.123E-10", "123.123E-10", ""}, + {FloatLit, "0123.0123e+456", "0123.0123e+456", ""}, + + {FloatLit, "0e", "0e", "exponent has no digits"}, + {FloatLit, "0E+", "0E+", "exponent has no digits"}, + {FloatLit, "1e+f", "1e+ f", "exponent has no digits"}, + {FloatLit, "0p0", "0p0", "'p' exponent requires hexadecimal mantissa"}, + {FloatLit, "1.0P-1", "1.0P-1", "'P' exponent requires hexadecimal mantissa"}, + + {ImagLit, "0.i", "0.i", ""}, + {ImagLit, ".123i", ".123i", ""}, + {ImagLit, "123.123i", "123.123i", ""}, + {ImagLit, "123e+0i", "123e+0i", ""}, + {ImagLit, "123.E-10i", "123.E-10i", ""}, + {ImagLit, ".123E+10i", ".123E+10i", ""}, + + // hexadecimals + {IntLit, "0x0", "0x0", ""}, + {IntLit, "0x1234", "0x1234", ""}, + {IntLit, "0xcafef00d", "0xcafef00d", ""}, + {IntLit, "0XCAFEF00D", "0XCAFEF00D", ""}, + + {IntLit, "0x", "0x", "hexadecimal literal has no digits"}, + {IntLit, "0x1g", "0x1 g", ""}, + + {ImagLit, "0xf00i", "0xf00i", ""}, + + // hexadecimal floats + {FloatLit, "0x0p0", "0x0p0", ""}, + {FloatLit, "0x12efp-123", "0x12efp-123", ""}, + {FloatLit, "0xABCD.p+0", "0xABCD.p+0", ""}, + {FloatLit, "0x.0189P-0", "0x.0189P-0", ""}, + {FloatLit, "0x1.ffffp+1023", "0x1.ffffp+1023", ""}, + + {FloatLit, "0x.", "0x.", "hexadecimal literal has no digits"}, + {FloatLit, "0x0.", "0x0.", "hexadecimal mantissa requires a 'p' exponent"}, + {FloatLit, "0x.0", "0x.0", "hexadecimal mantissa requires a 'p' exponent"}, + {FloatLit, "0x1.1", "0x1.1", "hexadecimal mantissa requires a 'p' exponent"}, + {FloatLit, "0x1.1e0", "0x1.1e0", "hexadecimal mantissa requires a 'p' exponent"}, + {FloatLit, "0x1.2gp1a", "0x1.2 gp1a", "hexadecimal mantissa requires a 'p' exponent"}, + {FloatLit, "0x0p", "0x0p", "exponent has no digits"}, + {FloatLit, "0xeP-", "0xeP-", "exponent has no digits"}, + {FloatLit, "0x1234PAB", "0x1234P AB", "exponent has no digits"}, + {FloatLit, "0x1.2p1a", "0x1.2p1 a", ""}, + + {ImagLit, "0xf00.bap+12i", "0xf00.bap+12i", ""}, + + // separators + {IntLit, "0b_1000_0001", "0b_1000_0001", ""}, + {IntLit, "0o_600", "0o_600", ""}, + {IntLit, "0_466", "0_466", ""}, + {IntLit, "1_000", "1_000", ""}, + {FloatLit, "1_000.000_1", "1_000.000_1", ""}, + {ImagLit, "10e+1_2_3i", "10e+1_2_3i", ""}, + {IntLit, "0x_f00d", "0x_f00d", ""}, + {FloatLit, "0x_f00d.0p1_2", "0x_f00d.0p1_2", ""}, + + {IntLit, "0b__1000", "0b__1000", "'_' must separate successive digits"}, + {IntLit, "0o60___0", "0o60___0", "'_' must separate successive digits"}, + {IntLit, "0466_", "0466_", "'_' must separate successive digits"}, + {FloatLit, "1_.", "1_.", "'_' must separate successive digits"}, + {FloatLit, "0._1", "0._1", "'_' must separate successive digits"}, + {FloatLit, "2.7_e0", "2.7_e0", "'_' must separate successive digits"}, + {ImagLit, "10e+12_i", "10e+12_i", "'_' must separate successive digits"}, + {IntLit, "0x___0", "0x___0", "'_' must separate successive digits"}, + {FloatLit, "0x1.0_p0", "0x1.0_p0", "'_' must separate successive digits"}, + } { + var s scanner + var err string + s.init(strings.NewReader(test.src), func(_, _ uint, msg string) { + if err == "" { + err = msg + } + }, 0) + + for i, want := range strings.Split(test.tokens, " ") { + err = "" + s.next() + + if err != "" && !s.bad { + t.Errorf("%q: got error but bad not set", test.src) + } + + // compute lit where s.lit is not defined + var lit string + switch s.tok { + case _Name, _Literal: + lit = s.lit + case _Dot: + lit = "." + } + + if i == 0 { + if s.tok != _Literal || s.kind != test.kind { + t.Errorf("%q: got token %s (kind = %d); want literal (kind = %d)", test.src, s.tok, s.kind, test.kind) + } + if err != test.err { + t.Errorf("%q: got error %q; want %q", test.src, err, test.err) + } + } + + if lit != want { + t.Errorf("%q: got literal %q (%s); want %s", test.src, lit, s.tok, want) + } + } + + // make sure we read all + s.next() + if s.tok == _Semi { + s.next() + } + if s.tok != _EOF { + t.Errorf("%q: got %s; want EOF", test.src, s.tok) + } + } +} + +func TestScanErrors(t *testing.T) { + for _, test := range []struct { + src, err string + line, col uint // 0-based + }{ + // Note: Positions for lexical errors are the earliest position + // where the error is apparent, not the beginning of the respective + // token. + + // rune-level errors + {"fo\x00o", "invalid NUL character", 0, 2}, + {"foo\n\ufeff bar", "invalid BOM in the middle of the file", 1, 0}, + {"foo\n\n\xff ", "invalid UTF-8 encoding", 2, 0}, + + // token-level errors + {"\u00BD" /* ½ */, "invalid character U+00BD '½' in identifier", 0, 0}, + {"\U0001d736\U0001d737\U0001d738_½" /* 𝜶𝜷𝜸_½ */, "invalid character U+00BD '½' in identifier", 0, 13 /* byte offset */}, + {"\U0001d7d8" /* 𝟘 */, "identifier cannot begin with digit U+1D7D8 '𝟘'", 0, 0}, + {"foo\U0001d7d8_½" /* foo𝟘_½ */, "invalid character U+00BD '½' in identifier", 0, 8 /* byte offset */}, + + {"x + #y", "invalid character U+0023 '#'", 0, 4}, + {"foo$bar = 0", "invalid character U+0024 '$'", 0, 3}, + {"0123456789", "invalid digit '8' in octal literal", 0, 8}, + {"0123456789. /* foobar", "comment not terminated", 0, 12}, // valid float constant + {"0123456789e0 /*\nfoobar", "comment not terminated", 0, 13}, // valid float constant + {"var a, b = 09, 07\n", "invalid digit '9' in octal literal", 0, 12}, + + {`''`, "empty rune literal or unescaped '", 0, 1}, + {"'\n", "newline in rune literal", 0, 1}, + {`'\`, "rune literal not terminated", 0, 0}, + {`'\'`, "rune literal not terminated", 0, 0}, + {`'\x`, "rune literal not terminated", 0, 0}, + {`'\x'`, "invalid character '\\'' in hexadecimal escape", 0, 3}, + {`'\y'`, "unknown escape", 0, 2}, + {`'\x0'`, "invalid character '\\'' in hexadecimal escape", 0, 4}, + {`'\00'`, "invalid character '\\'' in octal escape", 0, 4}, + {`'\377' /*`, "comment not terminated", 0, 7}, // valid octal escape + {`'\378`, "invalid character '8' in octal escape", 0, 4}, + {`'\400'`, "octal escape value 256 > 255", 0, 5}, + {`'xx`, "rune literal not terminated", 0, 0}, + {`'xx'`, "more than one character in rune literal", 0, 0}, + + {"\n \"foo\n", "newline in string", 1, 7}, + {`"`, "string not terminated", 0, 0}, + {`"foo`, "string not terminated", 0, 0}, + {"`", "string not terminated", 0, 0}, + {"`foo", "string not terminated", 0, 0}, + {"/*/", "comment not terminated", 0, 0}, + {"/*\n\nfoo", "comment not terminated", 0, 0}, + {`"\`, "string not terminated", 0, 0}, + {`"\"`, "string not terminated", 0, 0}, + {`"\x`, "string not terminated", 0, 0}, + {`"\x"`, "invalid character '\"' in hexadecimal escape", 0, 3}, + {`"\y"`, "unknown escape", 0, 2}, + {`"\x0"`, "invalid character '\"' in hexadecimal escape", 0, 4}, + {`"\00"`, "invalid character '\"' in octal escape", 0, 4}, + {`"\377" /*`, "comment not terminated", 0, 7}, // valid octal escape + {`"\378"`, "invalid character '8' in octal escape", 0, 4}, + {`"\400"`, "octal escape value 256 > 255", 0, 5}, + + {`s := "foo\z"`, "unknown escape", 0, 10}, + {`s := "foo\z00\nbar"`, "unknown escape", 0, 10}, + {`"\x`, "string not terminated", 0, 0}, + {`"\x"`, "invalid character '\"' in hexadecimal escape", 0, 3}, + {`var s string = "\x"`, "invalid character '\"' in hexadecimal escape", 0, 18}, + {`return "\Uffffffff"`, "escape is invalid Unicode code point U+FFFFFFFF", 0, 18}, + + {"0b.0", "invalid radix point in binary literal", 0, 2}, + {"0x.p0\n", "hexadecimal literal has no digits", 0, 3}, + + // former problem cases + {"package p\n\n\xef", "invalid UTF-8 encoding", 2, 0}, + } { + var s scanner + var line, col uint + var err string + s.init(strings.NewReader(test.src), func(l, c uint, msg string) { + if err == "" { + line, col = l-linebase, c-colbase + err = msg + } + }, 0) + + for { + s.next() + if s.tok == _EOF { + break + } + } + + if err != "" { + if err != test.err { + t.Errorf("%q: got err = %q; want %q", test.src, err, test.err) + } + if line != test.line { + t.Errorf("%q: got line = %d; want %d", test.src, line, test.line) + } + if col != test.col { + t.Errorf("%q: got col = %d; want %d", test.src, col, test.col) + } + } else { + t.Errorf("%q: got no error; want %q", test.src, test.err) + } + } +} + +func TestDirectives(t *testing.T) { + for _, src := range []string{ + "line", + "// line", + "//line", + "//line foo", + "//line foo%bar", + + "go", + "// go:", + "//go:", + "//go :foo", + "//go:foo", + "//go:foo%bar", + } { + got := "" + var s scanner + s.init(strings.NewReader(src), func(_, col uint, msg string) { + if col != colbase { + t.Errorf("%s: got col = %d; want %d", src, col, colbase) + } + if msg == "" { + t.Errorf("%s: handler called with empty msg", src) + } + got = msg + }, directives) + + s.next() + if strings.HasPrefix(src, "//line ") || strings.HasPrefix(src, "//go:") { + // handler should have been called + if got != src { + t.Errorf("got %s; want %s", got, src) + } + } else { + // handler should not have been called + if got != "" { + t.Errorf("got %s for %s", got, src) + } + } + } +} + +func TestIssue21938(t *testing.T) { + s := "/*" + strings.Repeat(" ", 4089) + "*/ .5" + + var got scanner + got.init(strings.NewReader(s), errh, 0) + got.next() + + if got.tok != _Literal || got.lit != ".5" { + t.Errorf("got %s %q; want %s %q", got.tok, got.lit, _Literal, ".5") + } +} + +func TestIssue33961(t *testing.T) { + literals := `08__ 0b.p 0b_._p 0x.e 0x.p` + for _, lit := range strings.Split(literals, " ") { + n := 0 + var got scanner + got.init(strings.NewReader(lit), func(_, _ uint, msg string) { + // fmt.Printf("%s: %s\n", lit, msg) // uncomment for debugging + n++ + }, 0) + got.next() + + if n != 1 { + t.Errorf("%q: got %d errors; want 1", lit, n) + continue + } + + if !got.bad { + t.Errorf("%q: got error but bad not set", lit) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/source.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/source.go new file mode 100644 index 0000000000000000000000000000000000000000..01b592152bb7886885e2b870f513d81d7d20ead0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/source.go @@ -0,0 +1,218 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements source, a buffered rune reader +// specialized for scanning Go code: Reading +// ASCII characters, maintaining current (line, col) +// position information, and recording of the most +// recently read source segment are highly optimized. +// This file is self-contained (go tool compile source.go +// compiles) and thus could be made into its own package. + +package syntax + +import ( + "io" + "unicode/utf8" +) + +// The source buffer is accessed using three indices b (begin), +// r (read), and e (end): +// +// - If b >= 0, it points to the beginning of a segment of most +// recently read characters (typically a Go literal). +// +// - r points to the byte immediately following the most recently +// read character ch, which starts at r-chw. +// +// - e points to the byte immediately following the last byte that +// was read into the buffer. +// +// The buffer content is terminated at buf[e] with the sentinel +// character utf8.RuneSelf. This makes it possible to test for +// the common case of ASCII characters with a single 'if' (see +// nextch method). +// +// +------ content in use -------+ +// v v +// buf [...read...|...segment...|ch|...unread...|s|...free...] +// ^ ^ ^ ^ +// | | | | +// b r-chw r e +// +// Invariant: -1 <= b < r <= e < len(buf) && buf[e] == sentinel + +type source struct { + in io.Reader + errh func(line, col uint, msg string) + + buf []byte // source buffer + ioerr error // pending I/O error, or nil + b, r, e int // buffer indices (see comment above) + line, col uint // source position of ch (0-based) + ch rune // most recently read character + chw int // width of ch +} + +const sentinel = utf8.RuneSelf + +func (s *source) init(in io.Reader, errh func(line, col uint, msg string)) { + s.in = in + s.errh = errh + + if s.buf == nil { + s.buf = make([]byte, nextSize(0)) + } + s.buf[0] = sentinel + s.ioerr = nil + s.b, s.r, s.e = -1, 0, 0 + s.line, s.col = 0, 0 + s.ch = ' ' + s.chw = 0 +} + +// starting points for line and column numbers +const linebase = 1 +const colbase = 1 + +// pos returns the (line, col) source position of s.ch. +func (s *source) pos() (line, col uint) { + return linebase + s.line, colbase + s.col +} + +// error reports the error msg at source position s.pos(). +func (s *source) error(msg string) { + line, col := s.pos() + s.errh(line, col, msg) +} + +// start starts a new active source segment (including s.ch). +// As long as stop has not been called, the active segment's +// bytes (excluding s.ch) may be retrieved by calling segment. +func (s *source) start() { s.b = s.r - s.chw } +func (s *source) stop() { s.b = -1 } +func (s *source) segment() []byte { return s.buf[s.b : s.r-s.chw] } + +// rewind rewinds the scanner's read position and character s.ch +// to the start of the currently active segment, which must not +// contain any newlines (otherwise position information will be +// incorrect). Currently, rewind is only needed for handling the +// source sequence ".."; it must not be called outside an active +// segment. +func (s *source) rewind() { + // ok to verify precondition - rewind is rarely called + if s.b < 0 { + panic("no active segment") + } + s.col -= uint(s.r - s.b) + s.r = s.b + s.nextch() +} + +func (s *source) nextch() { +redo: + s.col += uint(s.chw) + if s.ch == '\n' { + s.line++ + s.col = 0 + } + + // fast common case: at least one ASCII character + if s.ch = rune(s.buf[s.r]); s.ch < sentinel { + s.r++ + s.chw = 1 + if s.ch == 0 { + s.error("invalid NUL character") + goto redo + } + return + } + + // slower general case: add more bytes to buffer if we don't have a full rune + for s.e-s.r < utf8.UTFMax && !utf8.FullRune(s.buf[s.r:s.e]) && s.ioerr == nil { + s.fill() + } + + // EOF + if s.r == s.e { + if s.ioerr != io.EOF { + // ensure we never start with a '/' (e.g., rooted path) in the error message + s.error("I/O error: " + s.ioerr.Error()) + s.ioerr = nil + } + s.ch = -1 + s.chw = 0 + return + } + + s.ch, s.chw = utf8.DecodeRune(s.buf[s.r:s.e]) + s.r += s.chw + + if s.ch == utf8.RuneError && s.chw == 1 { + s.error("invalid UTF-8 encoding") + goto redo + } + + // BOM's are only allowed as the first character in a file + const BOM = 0xfeff + if s.ch == BOM { + if s.line > 0 || s.col > 0 { + s.error("invalid BOM in the middle of the file") + } + goto redo + } +} + +// fill reads more source bytes into s.buf. +// It returns with at least one more byte in the buffer, or with s.ioerr != nil. +func (s *source) fill() { + // determine content to preserve + b := s.r + if s.b >= 0 { + b = s.b + s.b = 0 // after buffer has grown or content has been moved down + } + content := s.buf[b:s.e] + + // grow buffer or move content down + if len(content)*2 > len(s.buf) { + s.buf = make([]byte, nextSize(len(s.buf))) + copy(s.buf, content) + } else if b > 0 { + copy(s.buf, content) + } + s.r -= b + s.e -= b + + // read more data: try a limited number of times + for i := 0; i < 10; i++ { + var n int + n, s.ioerr = s.in.Read(s.buf[s.e : len(s.buf)-1]) // -1 to leave space for sentinel + if n < 0 { + panic("negative read") // incorrect underlying io.Reader implementation + } + if n > 0 || s.ioerr != nil { + s.e += n + s.buf[s.e] = sentinel + return + } + // n == 0 + } + + s.buf[s.e] = sentinel + s.ioerr = io.ErrNoProgress +} + +// nextSize returns the next bigger size for a buffer of a given size. +func nextSize(size int) int { + const min = 4 << 10 // 4K: minimum buffer size + const max = 1 << 20 // 1M: maximum buffer size which is still doubled + if size < min { + return min + } + if size <= max { + return size << 1 + } + return size + max +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/syntax.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/syntax.go new file mode 100644 index 0000000000000000000000000000000000000000..83b102da9f564e72fe41dc8da27146aabd63699e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/syntax.go @@ -0,0 +1,94 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syntax + +import ( + "fmt" + "io" + "os" +) + +// Mode describes the parser mode. +type Mode uint + +// Modes supported by the parser. +const ( + CheckBranches Mode = 1 << iota // check correct use of labels, break, continue, and goto statements +) + +// Error describes a syntax error. Error implements the error interface. +type Error struct { + Pos Pos + Msg string +} + +func (err Error) Error() string { + return fmt.Sprintf("%s: %s", err.Pos, err.Msg) +} + +var _ error = Error{} // verify that Error implements error + +// An ErrorHandler is called for each error encountered reading a .go file. +type ErrorHandler func(err error) + +// A Pragma value augments a package, import, const, func, type, or var declaration. +// Its meaning is entirely up to the PragmaHandler, +// except that nil is used to mean “no pragma seen.” +type Pragma interface{} + +// A PragmaHandler is used to process //go: directives while scanning. +// It is passed the current pragma value, which starts out being nil, +// and it returns an updated pragma value. +// The text is the directive, with the "//" prefix stripped. +// The current pragma is saved at each package, import, const, func, type, or var +// declaration, into the File, ImportDecl, ConstDecl, FuncDecl, TypeDecl, or VarDecl node. +// +// If text is the empty string, the pragma is being returned +// to the handler unused, meaning it appeared before a non-declaration. +// The handler may wish to report an error. In this case, pos is the +// current parser position, not the position of the pragma itself. +// Blank specifies whether the line is blank before the pragma. +type PragmaHandler func(pos Pos, blank bool, text string, current Pragma) Pragma + +// Parse parses a single Go source file from src and returns the corresponding +// syntax tree. If there are errors, Parse will return the first error found, +// and a possibly partially constructed syntax tree, or nil. +// +// If errh != nil, it is called with each error encountered, and Parse will +// process as much source as possible. In this case, the returned syntax tree +// is only nil if no correct package clause was found. +// If errh is nil, Parse will terminate immediately upon encountering the first +// error, and the returned syntax tree is nil. +// +// If pragh != nil, it is called with each pragma encountered. +func Parse(base *PosBase, src io.Reader, errh ErrorHandler, pragh PragmaHandler, mode Mode) (_ *File, first error) { + defer func() { + if p := recover(); p != nil { + if err, ok := p.(Error); ok { + first = err + return + } + panic(p) + } + }() + + var p parser + p.init(base, src, errh, pragh, mode) + p.next() + return p.fileOrNil(), p.first +} + +// ParseFile behaves like Parse but it reads the source from the named file. +func ParseFile(filename string, errh ErrorHandler, pragh PragmaHandler, mode Mode) (*File, error) { + f, err := os.Open(filename) + if err != nil { + if errh != nil { + errh(err) + } + return nil, err + } + defer f.Close() + return Parse(NewFileBase(filename), f, errh, pragh, mode) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testing.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testing.go new file mode 100644 index 0000000000000000000000000000000000000000..202b2efc3e0d913b3aaef4fafb8e98a27bdacc6a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testing.go @@ -0,0 +1,69 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements testing support. + +package syntax + +import ( + "io" + "regexp" +) + +// CommentsDo parses the given source and calls the provided handler for each +// comment or error. If the text provided to handler starts with a '/' it is +// the comment text; otherwise it is the error message. +func CommentsDo(src io.Reader, handler func(line, col uint, text string)) { + var s scanner + s.init(src, handler, comments) + for s.tok != _EOF { + s.next() + } +} + +// CommentMap collects all comments in the given src with comment text +// that matches the supplied regular expression rx and returns them as +// []Error lists in a map indexed by line number. The comment text is +// the comment with any comment markers ("//", "/*", or "*/") stripped. +// The position for each Error is the position of the token immediately +// preceding the comment and the Error message is the comment text, +// with all comments that are on the same line collected in a slice, in +// source order. If there is no preceding token (the matching comment +// appears at the beginning of the file), then the recorded position +// is unknown (line, col = 0, 0). If there are no matching comments, +// the result is nil. +func CommentMap(src io.Reader, rx *regexp.Regexp) (res map[uint][]Error) { + // position of previous token + var base *PosBase + var prev struct{ line, col uint } + + var s scanner + s.init(src, func(_, _ uint, text string) { + if text[0] != '/' { + return // not a comment, ignore + } + if text[1] == '*' { + text = text[:len(text)-2] // strip trailing */ + } + text = text[2:] // strip leading // or /* + if rx.MatchString(text) { + pos := MakePos(base, prev.line, prev.col) + err := Error{pos, text} + if res == nil { + res = make(map[uint][]Error) + } + res[prev.line] = append(res[prev.line], err) + } + }, comments) + + for s.tok != _EOF { + s.next() + if s.tok == _Semi && s.lit != "semicolon" { + continue // ignore automatically inserted semicolons + } + prev.line, prev.col = s.line, s.col + } + + return +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testing_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testing_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7e439c5523998698c632a561587c982eb29e5a26 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testing_test.go @@ -0,0 +1,48 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syntax + +import ( + "fmt" + "regexp" + "strings" + "testing" +) + +func TestCommentMap(t *testing.T) { + const src = `/* ERROR "0:0" */ /* ERROR "0:0" */ // ERROR "0:0" +// ERROR "0:0" +x /* ERROR "3:1" */ // ignore automatically inserted semicolon here +/* ERROR "3:1" */ // position of x on previous line + x /* ERROR "5:4" */ ; // do not ignore this semicolon +/* ERROR "5:24" */ // position of ; on previous line + package /* ERROR "7:2" */ // indented with tab + import /* ERROR "8:9" */ // indented with blanks +` + m := CommentMap(strings.NewReader(src), regexp.MustCompile("^ ERROR ")) + found := 0 // number of errors found + for line, errlist := range m { + for _, err := range errlist { + if err.Pos.Line() != line { + t.Errorf("%v: got map line %d; want %d", err, err.Pos.Line(), line) + continue + } + // err.Pos.Line() == line + + got := strings.TrimSpace(err.Msg[len(" ERROR "):]) + want := fmt.Sprintf(`"%d:%d"`, line, err.Pos.Col()) + if got != want { + t.Errorf("%v: got msg %q; want %q", err, got, want) + continue + } + found++ + } + } + + want := strings.Count(src, " ERROR ") + if found != want { + t.Errorf("CommentMap got %d errors; want %d", found, want) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/token_string.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/token_string.go new file mode 100644 index 0000000000000000000000000000000000000000..ef295eb24b2bc9acb4b0d44ca9893f3bcb5067c2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/token_string.go @@ -0,0 +1,70 @@ +// Code generated by "stringer -type token -linecomment tokens.go"; DO NOT EDIT. + +package syntax + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[_EOF-1] + _ = x[_Name-2] + _ = x[_Literal-3] + _ = x[_Operator-4] + _ = x[_AssignOp-5] + _ = x[_IncOp-6] + _ = x[_Assign-7] + _ = x[_Define-8] + _ = x[_Arrow-9] + _ = x[_Star-10] + _ = x[_Lparen-11] + _ = x[_Lbrack-12] + _ = x[_Lbrace-13] + _ = x[_Rparen-14] + _ = x[_Rbrack-15] + _ = x[_Rbrace-16] + _ = x[_Comma-17] + _ = x[_Semi-18] + _ = x[_Colon-19] + _ = x[_Dot-20] + _ = x[_DotDotDot-21] + _ = x[_Break-22] + _ = x[_Case-23] + _ = x[_Chan-24] + _ = x[_Const-25] + _ = x[_Continue-26] + _ = x[_Default-27] + _ = x[_Defer-28] + _ = x[_Else-29] + _ = x[_Fallthrough-30] + _ = x[_For-31] + _ = x[_Func-32] + _ = x[_Go-33] + _ = x[_Goto-34] + _ = x[_If-35] + _ = x[_Import-36] + _ = x[_Interface-37] + _ = x[_Map-38] + _ = x[_Package-39] + _ = x[_Range-40] + _ = x[_Return-41] + _ = x[_Select-42] + _ = x[_Struct-43] + _ = x[_Switch-44] + _ = x[_Type-45] + _ = x[_Var-46] + _ = x[tokenCount-47] +} + +const _token_name = "EOFnameliteralopop=opop=:=<-*([{)]},;:....breakcasechanconstcontinuedefaultdeferelsefallthroughforfuncgogotoifimportinterfacemappackagerangereturnselectstructswitchtypevar" + +var _token_index = [...]uint8{0, 3, 7, 14, 16, 19, 23, 24, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 42, 47, 51, 55, 60, 68, 75, 80, 84, 95, 98, 102, 104, 108, 110, 116, 125, 128, 135, 140, 146, 152, 158, 164, 168, 171, 171} + +func (i token) String() string { + i -= 1 + if i >= token(len(_token_index)-1) { + return "token(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _token_name[_token_index[i]:_token_index[i+1]] +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/tokens.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/tokens.go new file mode 100644 index 0000000000000000000000000000000000000000..b08f699582fb6595fcd6046e31ee857f3766e808 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/tokens.go @@ -0,0 +1,159 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syntax + +type Token uint + +type token = Token + +//go:generate stringer -type token -linecomment tokens.go + +const ( + _ token = iota + _EOF // EOF + + // names and literals + _Name // name + _Literal // literal + + // operators and operations + // _Operator is excluding '*' (_Star) + _Operator // op + _AssignOp // op= + _IncOp // opop + _Assign // = + _Define // := + _Arrow // <- + _Star // * + + // delimiters + _Lparen // ( + _Lbrack // [ + _Lbrace // { + _Rparen // ) + _Rbrack // ] + _Rbrace // } + _Comma // , + _Semi // ; + _Colon // : + _Dot // . + _DotDotDot // ... + + // keywords + _Break // break + _Case // case + _Chan // chan + _Const // const + _Continue // continue + _Default // default + _Defer // defer + _Else // else + _Fallthrough // fallthrough + _For // for + _Func // func + _Go // go + _Goto // goto + _If // if + _Import // import + _Interface // interface + _Map // map + _Package // package + _Range // range + _Return // return + _Select // select + _Struct // struct + _Switch // switch + _Type // type + _Var // var + + // empty line comment to exclude it from .String + tokenCount // +) + +const ( + // for BranchStmt + Break = _Break + Continue = _Continue + Fallthrough = _Fallthrough + Goto = _Goto + + // for CallStmt + Go = _Go + Defer = _Defer +) + +// Make sure we have at most 64 tokens so we can use them in a set. +const _ uint64 = 1 << (tokenCount - 1) + +// contains reports whether tok is in tokset. +func contains(tokset uint64, tok token) bool { + return tokset&(1< + Geq // >= + + // precAdd + Add // + + Sub // - + Or // | + Xor // ^ + + // precMul + Mul // * + Div // / + Rem // % + And // & + AndNot // &^ + Shl // << + Shr // >> +) + +// Operator precedences +const ( + _ = iota + precOrOr + precAndAnd + precCmp + precAdd + precMul +) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/type.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/type.go new file mode 100644 index 0000000000000000000000000000000000000000..53132a442d0388098ce926b552c2a4fbc35284cb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/type.go @@ -0,0 +1,75 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syntax + +import "go/constant" + +// A Type represents a type of Go. +// All types implement the Type interface. +// (This type originally lived in types2. We moved it here +// so we could depend on it from other packages without +// introducing a circularity.) +type Type interface { + // Underlying returns the underlying type of a type. + Underlying() Type + + // String returns a string representation of a type. + String() string +} + +// Expressions in the syntax package provide storage for +// the typechecker to record its results. This interface +// is the mechanism the typechecker uses to record results, +// and clients use to retrieve those results. +type typeInfo interface { + SetTypeInfo(TypeAndValue) + GetTypeInfo() TypeAndValue +} + +// A TypeAndValue records the type information, constant +// value if known, and various other flags associated with +// an expression. +// This type is similar to types2.TypeAndValue, but exposes +// none of types2's internals. +type TypeAndValue struct { + Type Type + Value constant.Value + exprFlags +} + +type exprFlags uint16 + +func (f exprFlags) IsVoid() bool { return f&1 != 0 } +func (f exprFlags) IsType() bool { return f&2 != 0 } +func (f exprFlags) IsBuiltin() bool { return f&4 != 0 } // a language builtin that resembles a function call, e.g., "make, append, new" +func (f exprFlags) IsValue() bool { return f&8 != 0 } +func (f exprFlags) IsNil() bool { return f&16 != 0 } +func (f exprFlags) Addressable() bool { return f&32 != 0 } +func (f exprFlags) Assignable() bool { return f&64 != 0 } +func (f exprFlags) HasOk() bool { return f&128 != 0 } +func (f exprFlags) IsRuntimeHelper() bool { return f&256 != 0 } // a runtime function called from transformed syntax + +func (f *exprFlags) SetIsVoid() { *f |= 1 } +func (f *exprFlags) SetIsType() { *f |= 2 } +func (f *exprFlags) SetIsBuiltin() { *f |= 4 } +func (f *exprFlags) SetIsValue() { *f |= 8 } +func (f *exprFlags) SetIsNil() { *f |= 16 } +func (f *exprFlags) SetAddressable() { *f |= 32 } +func (f *exprFlags) SetAssignable() { *f |= 64 } +func (f *exprFlags) SetHasOk() { *f |= 128 } +func (f *exprFlags) SetIsRuntimeHelper() { *f |= 256 } + +// a typeAndValue contains the results of typechecking an expression. +// It is embedded in expression nodes. +type typeAndValue struct { + tv TypeAndValue +} + +func (x *typeAndValue) SetTypeInfo(tv TypeAndValue) { + x.tv = tv +} +func (x *typeAndValue) GetTypeInfo() TypeAndValue { + return x.tv +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/walk.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/walk.go new file mode 100644 index 0000000000000000000000000000000000000000..b03a7c14b0d14e579fb6ec91d4334fd397735a24 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/walk.go @@ -0,0 +1,346 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements syntax tree walking. + +package syntax + +import "fmt" + +// Inspect traverses an AST in pre-order: it starts by calling f(root); +// root must not be nil. If f returns true, Inspect invokes f recursively +// for each of the non-nil children of root, followed by a call of f(nil). +// +// See Walk for caveats about shared nodes. +func Inspect(root Node, f func(Node) bool) { + Walk(root, inspector(f)) +} + +type inspector func(Node) bool + +func (v inspector) Visit(node Node) Visitor { + if v(node) { + return v + } + return nil +} + +// Walk traverses an AST in pre-order: It starts by calling +// v.Visit(node); node must not be nil. If the visitor w returned by +// v.Visit(node) is not nil, Walk is invoked recursively with visitor +// w for each of the non-nil children of node, followed by a call of +// w.Visit(nil). +// +// Some nodes may be shared among multiple parent nodes (e.g., types in +// field lists such as type T in "a, b, c T"). Such shared nodes are +// walked multiple times. +// TODO(gri) Revisit this design. It may make sense to walk those nodes +// only once. A place where this matters is types2.TestResolveIdents. +func Walk(root Node, v Visitor) { + walker{v}.node(root) +} + +// A Visitor's Visit method is invoked for each node encountered by Walk. +// If the result visitor w is not nil, Walk visits each of the children +// of node with the visitor w, followed by a call of w.Visit(nil). +type Visitor interface { + Visit(node Node) (w Visitor) +} + +type walker struct { + v Visitor +} + +func (w walker) node(n Node) { + if n == nil { + panic("nil node") + } + + w.v = w.v.Visit(n) + if w.v == nil { + return + } + + switch n := n.(type) { + // packages + case *File: + w.node(n.PkgName) + w.declList(n.DeclList) + + // declarations + case *ImportDecl: + if n.LocalPkgName != nil { + w.node(n.LocalPkgName) + } + w.node(n.Path) + + case *ConstDecl: + w.nameList(n.NameList) + if n.Type != nil { + w.node(n.Type) + } + if n.Values != nil { + w.node(n.Values) + } + + case *TypeDecl: + w.node(n.Name) + w.fieldList(n.TParamList) + w.node(n.Type) + + case *VarDecl: + w.nameList(n.NameList) + if n.Type != nil { + w.node(n.Type) + } + if n.Values != nil { + w.node(n.Values) + } + + case *FuncDecl: + if n.Recv != nil { + w.node(n.Recv) + } + w.node(n.Name) + w.fieldList(n.TParamList) + w.node(n.Type) + if n.Body != nil { + w.node(n.Body) + } + + // expressions + case *BadExpr: // nothing to do + case *Name: // nothing to do + case *BasicLit: // nothing to do + + case *CompositeLit: + if n.Type != nil { + w.node(n.Type) + } + w.exprList(n.ElemList) + + case *KeyValueExpr: + w.node(n.Key) + w.node(n.Value) + + case *FuncLit: + w.node(n.Type) + w.node(n.Body) + + case *ParenExpr: + w.node(n.X) + + case *SelectorExpr: + w.node(n.X) + w.node(n.Sel) + + case *IndexExpr: + w.node(n.X) + w.node(n.Index) + + case *SliceExpr: + w.node(n.X) + for _, x := range n.Index { + if x != nil { + w.node(x) + } + } + + case *AssertExpr: + w.node(n.X) + w.node(n.Type) + + case *TypeSwitchGuard: + if n.Lhs != nil { + w.node(n.Lhs) + } + w.node(n.X) + + case *Operation: + w.node(n.X) + if n.Y != nil { + w.node(n.Y) + } + + case *CallExpr: + w.node(n.Fun) + w.exprList(n.ArgList) + + case *ListExpr: + w.exprList(n.ElemList) + + // types + case *ArrayType: + if n.Len != nil { + w.node(n.Len) + } + w.node(n.Elem) + + case *SliceType: + w.node(n.Elem) + + case *DotsType: + w.node(n.Elem) + + case *StructType: + w.fieldList(n.FieldList) + for _, t := range n.TagList { + if t != nil { + w.node(t) + } + } + + case *Field: + if n.Name != nil { + w.node(n.Name) + } + w.node(n.Type) + + case *InterfaceType: + w.fieldList(n.MethodList) + + case *FuncType: + w.fieldList(n.ParamList) + w.fieldList(n.ResultList) + + case *MapType: + w.node(n.Key) + w.node(n.Value) + + case *ChanType: + w.node(n.Elem) + + // statements + case *EmptyStmt: // nothing to do + + case *LabeledStmt: + w.node(n.Label) + w.node(n.Stmt) + + case *BlockStmt: + w.stmtList(n.List) + + case *ExprStmt: + w.node(n.X) + + case *SendStmt: + w.node(n.Chan) + w.node(n.Value) + + case *DeclStmt: + w.declList(n.DeclList) + + case *AssignStmt: + w.node(n.Lhs) + if n.Rhs != nil { + w.node(n.Rhs) + } + + case *BranchStmt: + if n.Label != nil { + w.node(n.Label) + } + // Target points to nodes elsewhere in the syntax tree + + case *CallStmt: + w.node(n.Call) + + case *ReturnStmt: + if n.Results != nil { + w.node(n.Results) + } + + case *IfStmt: + if n.Init != nil { + w.node(n.Init) + } + w.node(n.Cond) + w.node(n.Then) + if n.Else != nil { + w.node(n.Else) + } + + case *ForStmt: + if n.Init != nil { + w.node(n.Init) + } + if n.Cond != nil { + w.node(n.Cond) + } + if n.Post != nil { + w.node(n.Post) + } + w.node(n.Body) + + case *SwitchStmt: + if n.Init != nil { + w.node(n.Init) + } + if n.Tag != nil { + w.node(n.Tag) + } + for _, s := range n.Body { + w.node(s) + } + + case *SelectStmt: + for _, s := range n.Body { + w.node(s) + } + + // helper nodes + case *RangeClause: + if n.Lhs != nil { + w.node(n.Lhs) + } + w.node(n.X) + + case *CaseClause: + if n.Cases != nil { + w.node(n.Cases) + } + w.stmtList(n.Body) + + case *CommClause: + if n.Comm != nil { + w.node(n.Comm) + } + w.stmtList(n.Body) + + default: + panic(fmt.Sprintf("internal error: unknown node type %T", n)) + } + + w.v.Visit(nil) +} + +func (w walker) declList(list []Decl) { + for _, n := range list { + w.node(n) + } +} + +func (w walker) exprList(list []Expr) { + for _, n := range list { + w.node(n) + } +} + +func (w walker) stmtList(list []Stmt) { + for _, n := range list { + w.node(n) + } +} + +func (w walker) nameList(list []*Name) { + for _, n := range list { + w.node(n) + } +} + +func (w walker) fieldList(list []*Field) { + for _, n := range list { + w.node(n) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/README b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/README new file mode 100644 index 0000000000000000000000000000000000000000..242ff794cb3cdef2e900afc92fa478de3f480d1b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/README @@ -0,0 +1,4 @@ +This directory holds small tests and benchmarks of code +generated by the compiler. This code is not for importing, +and the tests are intended to verify that specific optimzations +are applied and correct. diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/abiutils_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/abiutils_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b500de9f18a872aa0e2535e942ad0333571ffac3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/abiutils_test.go @@ -0,0 +1,398 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "bufio" + "cmd/compile/internal/abi" + "cmd/compile/internal/base" + "cmd/compile/internal/ssagen" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/obj/x86" + "cmd/internal/src" + "fmt" + "os" + "testing" +) + +// AMD64 registers available: +// - integer: RAX, RBX, RCX, RDI, RSI, R8, R9, r10, R11 +// - floating point: X0 - X14 +var configAMD64 = abi.NewABIConfig(9, 15, 0, 1) + +func TestMain(m *testing.M) { + ssagen.Arch.LinkArch = &x86.Linkamd64 + ssagen.Arch.REGSP = x86.REGSP + ssagen.Arch.MAXWIDTH = 1 << 50 + types.MaxWidth = ssagen.Arch.MAXWIDTH + base.Ctxt = obj.Linknew(ssagen.Arch.LinkArch) + base.Ctxt.DiagFunc = base.Errorf + base.Ctxt.DiagFlush = base.FlushErrors + base.Ctxt.Bso = bufio.NewWriter(os.Stdout) + types.LocalPkg = types.NewPkg("p", "local") + types.LocalPkg.Prefix = "p" + types.PtrSize = ssagen.Arch.LinkArch.PtrSize + types.RegSize = ssagen.Arch.LinkArch.RegSize + typecheck.InitUniverse() + os.Exit(m.Run()) +} + +func TestABIUtilsBasic1(t *testing.T) { + + // func(x int32) int32 + i32 := types.Types[types.TINT32] + ft := mkFuncType(nil, []*types.Type{i32}, []*types.Type{i32}) + + // expected results + exp := makeExpectedDump(` + IN 0: R{ I0 } spilloffset: 0 typ: int32 + OUT 0: R{ I0 } spilloffset: -1 typ: int32 + offsetToSpillArea: 0 spillAreaSize: 8 +`) + + abitest(t, ft, exp) +} + +func TestABIUtilsBasic2(t *testing.T) { + // func(p1 int8, p2 int16, p3 int32, p4 int64, + // p5 float32, p6 float32, p7 float64, p8 float64, + // p9 int8, p10 int16, p11 int32, p12 int64, + // p13 float32, p14 float32, p15 float64, p16 float64, + // p17 complex128, p18 complex128, p19 complex12, p20 complex128, + // p21 complex64, p22 int8, p23 in16, p24 int32, p25 int64, + // p26 int8, p27 in16, p28 int32, p29 int64) + // (r1 int32, r2 float64, r3 float64) { + i8 := types.Types[types.TINT8] + i16 := types.Types[types.TINT16] + i32 := types.Types[types.TINT32] + i64 := types.Types[types.TINT64] + f32 := types.Types[types.TFLOAT32] + f64 := types.Types[types.TFLOAT64] + c64 := types.Types[types.TCOMPLEX64] + c128 := types.Types[types.TCOMPLEX128] + ft := mkFuncType(nil, + []*types.Type{ + i8, i16, i32, i64, + f32, f32, f64, f64, + i8, i16, i32, i64, + f32, f32, f64, f64, + c128, c128, c128, c128, c64, + i8, i16, i32, i64, + i8, i16, i32, i64}, + []*types.Type{i32, f64, f64}) + exp := makeExpectedDump(` + IN 0: R{ I0 } spilloffset: 0 typ: int8 + IN 1: R{ I1 } spilloffset: 2 typ: int16 + IN 2: R{ I2 } spilloffset: 4 typ: int32 + IN 3: R{ I3 } spilloffset: 8 typ: int64 + IN 4: R{ F0 } spilloffset: 16 typ: float32 + IN 5: R{ F1 } spilloffset: 20 typ: float32 + IN 6: R{ F2 } spilloffset: 24 typ: float64 + IN 7: R{ F3 } spilloffset: 32 typ: float64 + IN 8: R{ I4 } spilloffset: 40 typ: int8 + IN 9: R{ I5 } spilloffset: 42 typ: int16 + IN 10: R{ I6 } spilloffset: 44 typ: int32 + IN 11: R{ I7 } spilloffset: 48 typ: int64 + IN 12: R{ F4 } spilloffset: 56 typ: float32 + IN 13: R{ F5 } spilloffset: 60 typ: float32 + IN 14: R{ F6 } spilloffset: 64 typ: float64 + IN 15: R{ F7 } spilloffset: 72 typ: float64 + IN 16: R{ F8 F9 } spilloffset: 80 typ: complex128 + IN 17: R{ F10 F11 } spilloffset: 96 typ: complex128 + IN 18: R{ F12 F13 } spilloffset: 112 typ: complex128 + IN 19: R{ } offset: 0 typ: complex128 + IN 20: R{ } offset: 16 typ: complex64 + IN 21: R{ I8 } spilloffset: 128 typ: int8 + IN 22: R{ } offset: 24 typ: int16 + IN 23: R{ } offset: 28 typ: int32 + IN 24: R{ } offset: 32 typ: int64 + IN 25: R{ } offset: 40 typ: int8 + IN 26: R{ } offset: 42 typ: int16 + IN 27: R{ } offset: 44 typ: int32 + IN 28: R{ } offset: 48 typ: int64 + OUT 0: R{ I0 } spilloffset: -1 typ: int32 + OUT 1: R{ F0 } spilloffset: -1 typ: float64 + OUT 2: R{ F1 } spilloffset: -1 typ: float64 + offsetToSpillArea: 56 spillAreaSize: 136 +`) + + abitest(t, ft, exp) +} + +func TestABIUtilsArrays(t *testing.T) { + // func(p1 [1]int32, p2 [0]int32, p3 [1][1]int32, p4 [2]int32) + // (r1 [2]int32, r2 [1]int32, r3 [0]int32, r4 [1][1]int32) { + i32 := types.Types[types.TINT32] + ae := types.NewArray(i32, 0) + a1 := types.NewArray(i32, 1) + a2 := types.NewArray(i32, 2) + aa1 := types.NewArray(a1, 1) + ft := mkFuncType(nil, []*types.Type{a1, ae, aa1, a2}, + []*types.Type{a2, a1, ae, aa1}) + + exp := makeExpectedDump(` + IN 0: R{ I0 } spilloffset: 0 typ: [1]int32 + IN 1: R{ } offset: 0 typ: [0]int32 + IN 2: R{ I1 } spilloffset: 4 typ: [1][1]int32 + IN 3: R{ } offset: 0 typ: [2]int32 + OUT 0: R{ } offset: 8 typ: [2]int32 + OUT 1: R{ I0 } spilloffset: -1 typ: [1]int32 + OUT 2: R{ } offset: 16 typ: [0]int32 + OUT 3: R{ I1 } spilloffset: -1 typ: [1][1]int32 + offsetToSpillArea: 16 spillAreaSize: 8 +`) + + abitest(t, ft, exp) +} + +func TestABIUtilsStruct1(t *testing.T) { + // type s struct { f1 int8; f2 int8; f3 struct {}; f4 int8; f5 int16) } + // func(p1 int6, p2 s, p3 int64) + // (r1 s, r2 int8, r3 int32) { + i8 := types.Types[types.TINT8] + i16 := types.Types[types.TINT16] + i32 := types.Types[types.TINT32] + i64 := types.Types[types.TINT64] + s := mkstruct(i8, i8, mkstruct(), i8, i16) + ft := mkFuncType(nil, []*types.Type{i8, s, i64}, + []*types.Type{s, i8, i32}) + + exp := makeExpectedDump(` + IN 0: R{ I0 } spilloffset: 0 typ: int8 + IN 1: R{ I1 I2 I3 I4 } spilloffset: 2 typ: struct { int8; int8; struct {}; int8; int16 } + IN 2: R{ I5 } spilloffset: 8 typ: int64 + OUT 0: R{ I0 I1 I2 I3 } spilloffset: -1 typ: struct { int8; int8; struct {}; int8; int16 } + OUT 1: R{ I4 } spilloffset: -1 typ: int8 + OUT 2: R{ I5 } spilloffset: -1 typ: int32 + offsetToSpillArea: 0 spillAreaSize: 16 +`) + + abitest(t, ft, exp) +} + +func TestABIUtilsStruct2(t *testing.T) { + // type s struct { f1 int64; f2 struct { } } + // type fs struct { f1 float64; f2 s; f3 struct { } } + // func(p1 s, p2 s, p3 fs) + // (r1 fs, r2 fs) + f64 := types.Types[types.TFLOAT64] + i64 := types.Types[types.TINT64] + s := mkstruct(i64, mkstruct()) + fs := mkstruct(f64, s, mkstruct()) + ft := mkFuncType(nil, []*types.Type{s, s, fs}, + []*types.Type{fs, fs}) + + exp := makeExpectedDump(` + IN 0: R{ I0 } spilloffset: 0 typ: struct { int64; struct {} } + IN 1: R{ I1 } spilloffset: 16 typ: struct { int64; struct {} } + IN 2: R{ F0 I2 } spilloffset: 32 typ: struct { float64; struct { int64; struct {} }; struct {} } + OUT 0: R{ F0 I0 } spilloffset: -1 typ: struct { float64; struct { int64; struct {} }; struct {} } + OUT 1: R{ F1 I1 } spilloffset: -1 typ: struct { float64; struct { int64; struct {} }; struct {} } + offsetToSpillArea: 0 spillAreaSize: 64 +`) + + abitest(t, ft, exp) +} + +// TestABIUtilsEmptyFieldAtEndOfStruct is testing to make sure +// the abi code is doing the right thing for struct types that have +// a trailing zero-sized field (where the we need to add padding). +func TestABIUtilsEmptyFieldAtEndOfStruct(t *testing.T) { + // type s struct { f1 [2]int64; f2 struct { } } + // type s2 struct { f1 [3]int16; f2 struct { } } + // type fs struct { f1 float64; f s; f3 struct { } } + // func(p1 s, p2 s, p3 fs) (r1 fs, r2 fs) + f64 := types.Types[types.TFLOAT64] + i64 := types.Types[types.TINT64] + i16 := types.Types[types.TINT16] + tb := types.Types[types.TBOOL] + ab2 := types.NewArray(tb, 2) + a2 := types.NewArray(i64, 2) + a3 := types.NewArray(i16, 3) + empty := mkstruct() + s := mkstruct(a2, empty) + s2 := mkstruct(a3, empty) + fs := mkstruct(f64, s, empty) + ft := mkFuncType(nil, []*types.Type{s, ab2, s2, fs, fs}, + []*types.Type{fs, ab2, fs}) + + exp := makeExpectedDump(` + IN 0: R{ } offset: 0 typ: struct { [2]int64; struct {} } + IN 1: R{ } offset: 24 typ: [2]bool + IN 2: R{ } offset: 26 typ: struct { [3]int16; struct {} } + IN 3: R{ } offset: 40 typ: struct { float64; struct { [2]int64; struct {} }; struct {} } + IN 4: R{ } offset: 80 typ: struct { float64; struct { [2]int64; struct {} }; struct {} } + OUT 0: R{ } offset: 120 typ: struct { float64; struct { [2]int64; struct {} }; struct {} } + OUT 1: R{ } offset: 160 typ: [2]bool + OUT 2: R{ } offset: 168 typ: struct { float64; struct { [2]int64; struct {} }; struct {} } + offsetToSpillArea: 208 spillAreaSize: 0 +`) + + abitest(t, ft, exp) + + // Test that NumParamRegs doesn't assign registers to trailing padding. + typ := mkstruct(i64, i64, mkstruct()) + have := configAMD64.NumParamRegs(typ) + if have != 2 { + t.Errorf("NumParams(%v): have %v, want %v", typ, have, 2) + } +} + +func TestABIUtilsSliceString(t *testing.T) { + // func(p1 []int32, p2 int8, p3 []int32, p4 int8, p5 string, + // p6 int64, p6 []intr32) (r1 string, r2 int64, r3 string, r4 []int32) + i32 := types.Types[types.TINT32] + sli32 := types.NewSlice(i32) + str := types.Types[types.TSTRING] + i8 := types.Types[types.TINT8] + i64 := types.Types[types.TINT64] + ft := mkFuncType(nil, []*types.Type{sli32, i8, sli32, i8, str, i8, i64, sli32}, + []*types.Type{str, i64, str, sli32}) + + exp := makeExpectedDump(` + IN 0: R{ I0 I1 I2 } spilloffset: 0 typ: []int32 + IN 1: R{ I3 } spilloffset: 24 typ: int8 + IN 2: R{ I4 I5 I6 } spilloffset: 32 typ: []int32 + IN 3: R{ I7 } spilloffset: 56 typ: int8 + IN 4: R{ } offset: 0 typ: string + IN 5: R{ I8 } spilloffset: 57 typ: int8 + IN 6: R{ } offset: 16 typ: int64 + IN 7: R{ } offset: 24 typ: []int32 + OUT 0: R{ I0 I1 } spilloffset: -1 typ: string + OUT 1: R{ I2 } spilloffset: -1 typ: int64 + OUT 2: R{ I3 I4 } spilloffset: -1 typ: string + OUT 3: R{ I5 I6 I7 } spilloffset: -1 typ: []int32 + offsetToSpillArea: 48 spillAreaSize: 64 +`) + + abitest(t, ft, exp) +} + +func TestABIUtilsMethod(t *testing.T) { + // type s1 struct { f1 int16; f2 int16; f3 int16 } + // func(p1 *s1, p2 [7]*s1, p3 float64, p4 int16, p5 int16, p6 int16) + // (r1 [7]*s1, r2 float64, r3 int64) + i16 := types.Types[types.TINT16] + i64 := types.Types[types.TINT64] + f64 := types.Types[types.TFLOAT64] + s1 := mkstruct(i16, i16, i16) + ps1 := types.NewPtr(s1) + a7 := types.NewArray(ps1, 7) + ft := mkFuncType(s1, []*types.Type{ps1, a7, f64, i16, i16, i16}, + []*types.Type{a7, f64, i64}) + + exp := makeExpectedDump(` + IN 0: R{ I0 I1 I2 } spilloffset: 0 typ: struct { int16; int16; int16 } + IN 1: R{ I3 } spilloffset: 8 typ: *struct { int16; int16; int16 } + IN 2: R{ } offset: 0 typ: [7]*struct { int16; int16; int16 } + IN 3: R{ F0 } spilloffset: 16 typ: float64 + IN 4: R{ I4 } spilloffset: 24 typ: int16 + IN 5: R{ I5 } spilloffset: 26 typ: int16 + IN 6: R{ I6 } spilloffset: 28 typ: int16 + OUT 0: R{ } offset: 56 typ: [7]*struct { int16; int16; int16 } + OUT 1: R{ F0 } spilloffset: -1 typ: float64 + OUT 2: R{ I0 } spilloffset: -1 typ: int64 + offsetToSpillArea: 112 spillAreaSize: 32 +`) + + abitest(t, ft, exp) +} + +func TestABIUtilsInterfaces(t *testing.T) { + // type s1 { f1 int16; f2 int16; f3 bool) + // type nei interface { ...() string } + // func(p1 s1, p2 interface{}, p3 interface{}, p4 nei, + // p5 *interface{}, p6 nei, p7 int64) + // (r1 interface{}, r2 nei, r3 bool) + ei := types.Types[types.TINTER] // interface{} + pei := types.NewPtr(ei) // *interface{} + fldt := mkFuncType(types.FakeRecvType(), []*types.Type{}, + []*types.Type{types.Types[types.TSTRING]}) + field := types.NewField(src.NoXPos, typecheck.Lookup("F"), fldt) + nei := types.NewInterface([]*types.Field{field}) + i16 := types.Types[types.TINT16] + tb := types.Types[types.TBOOL] + s1 := mkstruct(i16, i16, tb) + ft := mkFuncType(nil, []*types.Type{s1, ei, ei, nei, pei, nei, i16}, + []*types.Type{ei, nei, pei}) + + exp := makeExpectedDump(` + IN 0: R{ I0 I1 I2 } spilloffset: 0 typ: struct { int16; int16; bool } + IN 1: R{ I3 I4 } spilloffset: 8 typ: interface {} + IN 2: R{ I5 I6 } spilloffset: 24 typ: interface {} + IN 3: R{ I7 I8 } spilloffset: 40 typ: interface { F() string } + IN 4: R{ } offset: 0 typ: *interface {} + IN 5: R{ } offset: 8 typ: interface { F() string } + IN 6: R{ } offset: 24 typ: int16 + OUT 0: R{ I0 I1 } spilloffset: -1 typ: interface {} + OUT 1: R{ I2 I3 } spilloffset: -1 typ: interface { F() string } + OUT 2: R{ I4 } spilloffset: -1 typ: *interface {} + offsetToSpillArea: 32 spillAreaSize: 56 +`) + + abitest(t, ft, exp) +} + +func TestABINumParamRegs(t *testing.T) { + i8 := types.Types[types.TINT8] + i16 := types.Types[types.TINT16] + i32 := types.Types[types.TINT32] + i64 := types.Types[types.TINT64] + f32 := types.Types[types.TFLOAT32] + f64 := types.Types[types.TFLOAT64] + c64 := types.Types[types.TCOMPLEX64] + c128 := types.Types[types.TCOMPLEX128] + + s := mkstruct(i8, i8, mkstruct(), i8, i16) + a := mkstruct(s, s, s) + + nrtest(t, i8, 1) + nrtest(t, i16, 1) + nrtest(t, i32, 1) + nrtest(t, i64, 1) + nrtest(t, f32, 1) + nrtest(t, f64, 1) + nrtest(t, c64, 2) + nrtest(t, c128, 2) + nrtest(t, s, 4) + nrtest(t, a, 12) +} + +func TestABIUtilsComputePadding(t *testing.T) { + // type s1 { f1 int8; f2 int16; f3 struct{}; f4 int32; f5 int64 } + i8 := types.Types[types.TINT8] + i16 := types.Types[types.TINT16] + i32 := types.Types[types.TINT32] + i64 := types.Types[types.TINT64] + emptys := mkstruct() + s1 := mkstruct(i8, i16, emptys, i32, i64) + // func (p1 int32, p2 s1, p3 emptys, p4 [1]int32) + a1 := types.NewArray(i32, 1) + ft := mkFuncType(nil, []*types.Type{i32, s1, emptys, a1}, nil) + + // Run abitest() just to document what we're expected to see. + exp := makeExpectedDump(` + IN 0: R{ I0 } spilloffset: 0 typ: int32 + IN 1: R{ I1 I2 I3 I4 } spilloffset: 8 typ: struct { int8; int16; struct {}; int32; int64 } + IN 2: R{ } offset: 0 typ: struct {} + IN 3: R{ I5 } spilloffset: 24 typ: [1]int32 + offsetToSpillArea: 0 spillAreaSize: 32 +`) + abitest(t, ft, exp) + + // Analyze with full set of registers, then call ComputePadding + // on the second param, verifying the results. + regRes := configAMD64.ABIAnalyze(ft, false) + padding := make([]uint64, 32) + parm := regRes.InParams()[1] + padding = parm.ComputePadding(padding) + want := "[1 1 1 0]" + got := fmt.Sprintf("%+v", padding) + if got != want { + t.Errorf("padding mismatch: wanted %q got %q\n", got, want) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/abiutilsaux_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/abiutilsaux_test.go new file mode 100644 index 0000000000000000000000000000000000000000..fb1c3983a8715c304beb42dec6485fe10e5250db --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/abiutilsaux_test.go @@ -0,0 +1,131 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +// This file contains utility routines and harness infrastructure used +// by the ABI tests in "abiutils_test.go". + +import ( + "cmd/compile/internal/abi" + "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/src" + "fmt" + "strings" + "testing" + "text/scanner" +) + +func mkParamResultField(t *types.Type, s *types.Sym, which ir.Class) *types.Field { + field := types.NewField(src.NoXPos, s, t) + n := ir.NewNameAt(src.NoXPos, s, t) + n.Class = which + field.Nname = n + return field +} + +// mkstruct is a helper routine to create a struct type with fields +// of the types specified in 'fieldtypes'. +func mkstruct(fieldtypes ...*types.Type) *types.Type { + fields := make([]*types.Field, len(fieldtypes)) + for k, t := range fieldtypes { + if t == nil { + panic("bad -- field has no type") + } + f := types.NewField(src.NoXPos, nil, t) + fields[k] = f + } + s := types.NewStruct(fields) + return s +} + +func mkFuncType(rcvr *types.Type, ins []*types.Type, outs []*types.Type) *types.Type { + q := typecheck.Lookup("?") + inf := []*types.Field{} + for _, it := range ins { + inf = append(inf, mkParamResultField(it, q, ir.PPARAM)) + } + outf := []*types.Field{} + for _, ot := range outs { + outf = append(outf, mkParamResultField(ot, q, ir.PPARAMOUT)) + } + var rf *types.Field + if rcvr != nil { + rf = mkParamResultField(rcvr, q, ir.PPARAM) + } + return types.NewSignature(rf, inf, outf) +} + +type expectedDump struct { + dump string + file string + line int +} + +func tokenize(src string) []string { + var s scanner.Scanner + s.Init(strings.NewReader(src)) + res := []string{} + for tok := s.Scan(); tok != scanner.EOF; tok = s.Scan() { + res = append(res, s.TokenText()) + } + return res +} + +func verifyParamResultOffset(t *testing.T, f *types.Field, r abi.ABIParamAssignment, which string, idx int) int { + n := f.Nname.(*ir.Name) + if n.FrameOffset() != int64(r.Offset()) { + t.Errorf("%s %d: got offset %d wanted %d t=%v", + which, idx, r.Offset(), n.Offset_, f.Type) + return 1 + } + return 0 +} + +func makeExpectedDump(e string) expectedDump { + return expectedDump{dump: e} +} + +func difftokens(atoks []string, etoks []string) string { + if len(atoks) != len(etoks) { + return fmt.Sprintf("expected %d tokens got %d", + len(etoks), len(atoks)) + } + for i := 0; i < len(etoks); i++ { + if etoks[i] == atoks[i] { + continue + } + + return fmt.Sprintf("diff at token %d: expected %q got %q", + i, etoks[i], atoks[i]) + } + return "" +} + +func nrtest(t *testing.T, ft *types.Type, expected int) { + types.CalcSize(ft) + got := configAMD64.NumParamRegs(ft) + if got != expected { + t.Errorf("]\nexpected num regs = %d, got %d, type %v", expected, got, ft) + } +} + +func abitest(t *testing.T, ft *types.Type, exp expectedDump) { + + types.CalcSize(ft) + + // Analyze with full set of registers. + regRes := configAMD64.ABIAnalyze(ft, false) + regResString := strings.TrimSpace(regRes.String()) + + // Check results. + reason := difftokens(tokenize(regResString), tokenize(exp.dump)) + if reason != "" { + t.Errorf("\nexpected:\n%s\ngot:\n%s\nreason: %s", + strings.TrimSpace(exp.dump), regResString, reason) + } + +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/align_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/align_test.go new file mode 100644 index 0000000000000000000000000000000000000000..32afc92973622705d416d492c0677bc9646340fe --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/align_test.go @@ -0,0 +1,96 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test to make sure that equality functions (and hash +// functions) don't do unaligned reads on architectures +// that can't do unaligned reads. See issue 46283. + +package test + +import "testing" + +type T1 struct { + x float32 + a, b, c, d int16 // memequal64 +} +type T2 struct { + x float32 + a, b, c, d int32 // memequal128 +} + +type A2 [2]byte // eq uses a 2-byte load +type A4 [4]byte // eq uses a 4-byte load +type A8 [8]byte // eq uses an 8-byte load + +//go:noinline +func cmpT1(p, q *T1) { + if *p != *q { + panic("comparison test wrong") + } +} + +//go:noinline +func cmpT2(p, q *T2) { + if *p != *q { + panic("comparison test wrong") + } +} + +//go:noinline +func cmpA2(p, q *A2) { + if *p != *q { + panic("comparison test wrong") + } +} + +//go:noinline +func cmpA4(p, q *A4) { + if *p != *q { + panic("comparison test wrong") + } +} + +//go:noinline +func cmpA8(p, q *A8) { + if *p != *q { + panic("comparison test wrong") + } +} + +func TestAlignEqual(t *testing.T) { + cmpT1(&T1{}, &T1{}) + cmpT2(&T2{}, &T2{}) + + m1 := map[T1]bool{} + m1[T1{}] = true + m1[T1{}] = false + if len(m1) != 1 { + t.Fatalf("len(m1)=%d, want 1", len(m1)) + } + m2 := map[T2]bool{} + m2[T2{}] = true + m2[T2{}] = false + if len(m2) != 1 { + t.Fatalf("len(m2)=%d, want 1", len(m2)) + } + + type X2 struct { + y byte + z A2 + } + var x2 X2 + cmpA2(&x2.z, &A2{}) + type X4 struct { + y byte + z A4 + } + var x4 X4 + cmpA4(&x4.z, &A4{}) + type X8 struct { + y byte + z A8 + } + var x8 X8 + cmpA8(&x8.z, &A8{}) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/bench_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/bench_test.go new file mode 100644 index 0000000000000000000000000000000000000000..472460009170e2f80d37915fb7feda9194cef1b6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/bench_test.go @@ -0,0 +1,124 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import "testing" + +var globl int64 +var globl32 int32 + +func BenchmarkLoadAdd(b *testing.B) { + x := make([]int64, 1024) + y := make([]int64, 1024) + for i := 0; i < b.N; i++ { + var s int64 + for i := range x { + s ^= x[i] + y[i] + } + globl = s + } +} + +// Added for ppc64 extswsli on power9 +func BenchmarkExtShift(b *testing.B) { + x := make([]int32, 1024) + for i := 0; i < b.N; i++ { + var s int64 + for i := range x { + s ^= int64(x[i]+32) * 8 + } + globl = s + } +} + +func BenchmarkModify(b *testing.B) { + a := make([]int64, 1024) + v := globl + for i := 0; i < b.N; i++ { + for j := range a { + a[j] += v + } + } +} + +func BenchmarkMullImm(b *testing.B) { + x := make([]int32, 1024) + for i := 0; i < b.N; i++ { + var s int32 + for i := range x { + s += x[i] * 100 + } + globl32 = s + } +} + +func BenchmarkConstModify(b *testing.B) { + a := make([]int64, 1024) + for i := 0; i < b.N; i++ { + for j := range a { + a[j] += 3 + } + } +} + +func BenchmarkBitSet(b *testing.B) { + const N = 64 * 8 + a := make([]uint64, N/64) + for i := 0; i < b.N; i++ { + for j := uint64(0); j < N; j++ { + a[j/64] |= 1 << (j % 64) + } + } +} + +func BenchmarkBitClear(b *testing.B) { + const N = 64 * 8 + a := make([]uint64, N/64) + for i := 0; i < b.N; i++ { + for j := uint64(0); j < N; j++ { + a[j/64] &^= 1 << (j % 64) + } + } +} + +func BenchmarkBitToggle(b *testing.B) { + const N = 64 * 8 + a := make([]uint64, N/64) + for i := 0; i < b.N; i++ { + for j := uint64(0); j < N; j++ { + a[j/64] ^= 1 << (j % 64) + } + } +} + +func BenchmarkBitSetConst(b *testing.B) { + const N = 64 + a := make([]uint64, N) + for i := 0; i < b.N; i++ { + for j := range a { + a[j] |= 1 << 37 + } + } +} + +func BenchmarkBitClearConst(b *testing.B) { + const N = 64 + a := make([]uint64, N) + for i := 0; i < b.N; i++ { + for j := range a { + a[j] &^= 1 << 37 + } + } +} + +func BenchmarkBitToggleConst(b *testing.B) { + const N = 64 + a := make([]uint64, N) + for i := 0; i < b.N; i++ { + for j := range a { + a[j] ^= 1 << 37 + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/clobberdead_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/clobberdead_test.go new file mode 100644 index 0000000000000000000000000000000000000000..80d9678c082866d2cd5ca7a6cba68336614db5f4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/clobberdead_test.go @@ -0,0 +1,54 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "internal/testenv" + "os" + "path/filepath" + "testing" +) + +const helloSrc = ` +package main +import "fmt" +func main() { fmt.Println("hello") } +` + +func TestClobberDead(t *testing.T) { + // Test that clobberdead mode generates correct program. + runHello(t, "-clobberdead") +} + +func TestClobberDeadReg(t *testing.T) { + // Test that clobberdeadreg mode generates correct program. + runHello(t, "-clobberdeadreg") +} + +func runHello(t *testing.T, flag string) { + if testing.Short() { + // This test rebuilds the runtime with a special flag, which + // takes a while. + t.Skip("skip in short mode") + } + testenv.MustHaveGoRun(t) + t.Parallel() + + tmpdir := t.TempDir() + src := filepath.Join(tmpdir, "x.go") + err := os.WriteFile(src, []byte(helloSrc), 0644) + if err != nil { + t.Fatalf("write file failed: %v", err) + } + + cmd := testenv.Command(t, testenv.GoToolPath(t), "run", "-gcflags=all="+flag, src) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("go run failed: %v\n%s", err, out) + } + if string(out) != "hello\n" { + t.Errorf("wrong output: got %q, want %q", out, "hello\n") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/constFold_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/constFold_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7159f0ed33a375960c782f69940446d4c7ae5278 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/constFold_test.go @@ -0,0 +1,18111 @@ +// run +// Code generated by gen/constFoldGen.go. DO NOT EDIT. + +package test + +import "testing" + +func TestConstFolduint64add(t *testing.T) { + var x, y, r uint64 + x = 0 + y = 0 + r = x + y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "+", r) + } + y = 1 + r = x + y + if r != 1 { + t.Errorf("0 %s 1 = %d, want 1", "+", r) + } + y = 4294967296 + r = x + y + if r != 4294967296 { + t.Errorf("0 %s 4294967296 = %d, want 4294967296", "+", r) + } + y = 18446744073709551615 + r = x + y + if r != 18446744073709551615 { + t.Errorf("0 %s 18446744073709551615 = %d, want 18446744073709551615", "+", r) + } + x = 1 + y = 0 + r = x + y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "+", r) + } + y = 1 + r = x + y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "+", r) + } + y = 4294967296 + r = x + y + if r != 4294967297 { + t.Errorf("1 %s 4294967296 = %d, want 4294967297", "+", r) + } + y = 18446744073709551615 + r = x + y + if r != 0 { + t.Errorf("1 %s 18446744073709551615 = %d, want 0", "+", r) + } + x = 4294967296 + y = 0 + r = x + y + if r != 4294967296 { + t.Errorf("4294967296 %s 0 = %d, want 4294967296", "+", r) + } + y = 1 + r = x + y + if r != 4294967297 { + t.Errorf("4294967296 %s 1 = %d, want 4294967297", "+", r) + } + y = 4294967296 + r = x + y + if r != 8589934592 { + t.Errorf("4294967296 %s 4294967296 = %d, want 8589934592", "+", r) + } + y = 18446744073709551615 + r = x + y + if r != 4294967295 { + t.Errorf("4294967296 %s 18446744073709551615 = %d, want 4294967295", "+", r) + } + x = 18446744073709551615 + y = 0 + r = x + y + if r != 18446744073709551615 { + t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "+", r) + } + y = 1 + r = x + y + if r != 0 { + t.Errorf("18446744073709551615 %s 1 = %d, want 0", "+", r) + } + y = 4294967296 + r = x + y + if r != 4294967295 { + t.Errorf("18446744073709551615 %s 4294967296 = %d, want 4294967295", "+", r) + } + y = 18446744073709551615 + r = x + y + if r != 18446744073709551614 { + t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 18446744073709551614", "+", r) + } +} +func TestConstFolduint64sub(t *testing.T) { + var x, y, r uint64 + x = 0 + y = 0 + r = x - y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "-", r) + } + y = 1 + r = x - y + if r != 18446744073709551615 { + t.Errorf("0 %s 1 = %d, want 18446744073709551615", "-", r) + } + y = 4294967296 + r = x - y + if r != 18446744069414584320 { + t.Errorf("0 %s 4294967296 = %d, want 18446744069414584320", "-", r) + } + y = 18446744073709551615 + r = x - y + if r != 1 { + t.Errorf("0 %s 18446744073709551615 = %d, want 1", "-", r) + } + x = 1 + y = 0 + r = x - y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "-", r) + } + y = 1 + r = x - y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", "-", r) + } + y = 4294967296 + r = x - y + if r != 18446744069414584321 { + t.Errorf("1 %s 4294967296 = %d, want 18446744069414584321", "-", r) + } + y = 18446744073709551615 + r = x - y + if r != 2 { + t.Errorf("1 %s 18446744073709551615 = %d, want 2", "-", r) + } + x = 4294967296 + y = 0 + r = x - y + if r != 4294967296 { + t.Errorf("4294967296 %s 0 = %d, want 4294967296", "-", r) + } + y = 1 + r = x - y + if r != 4294967295 { + t.Errorf("4294967296 %s 1 = %d, want 4294967295", "-", r) + } + y = 4294967296 + r = x - y + if r != 0 { + t.Errorf("4294967296 %s 4294967296 = %d, want 0", "-", r) + } + y = 18446744073709551615 + r = x - y + if r != 4294967297 { + t.Errorf("4294967296 %s 18446744073709551615 = %d, want 4294967297", "-", r) + } + x = 18446744073709551615 + y = 0 + r = x - y + if r != 18446744073709551615 { + t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "-", r) + } + y = 1 + r = x - y + if r != 18446744073709551614 { + t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551614", "-", r) + } + y = 4294967296 + r = x - y + if r != 18446744069414584319 { + t.Errorf("18446744073709551615 %s 4294967296 = %d, want 18446744069414584319", "-", r) + } + y = 18446744073709551615 + r = x - y + if r != 0 { + t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 0", "-", r) + } +} +func TestConstFolduint64div(t *testing.T) { + var x, y, r uint64 + x = 0 + y = 1 + r = x / y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "/", r) + } + y = 4294967296 + r = x / y + if r != 0 { + t.Errorf("0 %s 4294967296 = %d, want 0", "/", r) + } + y = 18446744073709551615 + r = x / y + if r != 0 { + t.Errorf("0 %s 18446744073709551615 = %d, want 0", "/", r) + } + x = 1 + y = 1 + r = x / y + if r != 1 { + t.Errorf("1 %s 1 = %d, want 1", "/", r) + } + y = 4294967296 + r = x / y + if r != 0 { + t.Errorf("1 %s 4294967296 = %d, want 0", "/", r) + } + y = 18446744073709551615 + r = x / y + if r != 0 { + t.Errorf("1 %s 18446744073709551615 = %d, want 0", "/", r) + } + x = 4294967296 + y = 1 + r = x / y + if r != 4294967296 { + t.Errorf("4294967296 %s 1 = %d, want 4294967296", "/", r) + } + y = 4294967296 + r = x / y + if r != 1 { + t.Errorf("4294967296 %s 4294967296 = %d, want 1", "/", r) + } + y = 18446744073709551615 + r = x / y + if r != 0 { + t.Errorf("4294967296 %s 18446744073709551615 = %d, want 0", "/", r) + } + x = 18446744073709551615 + y = 1 + r = x / y + if r != 18446744073709551615 { + t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551615", "/", r) + } + y = 4294967296 + r = x / y + if r != 4294967295 { + t.Errorf("18446744073709551615 %s 4294967296 = %d, want 4294967295", "/", r) + } + y = 18446744073709551615 + r = x / y + if r != 1 { + t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 1", "/", r) + } +} +func TestConstFolduint64mul(t *testing.T) { + var x, y, r uint64 + x = 0 + y = 0 + r = x * y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "*", r) + } + y = 4294967296 + r = x * y + if r != 0 { + t.Errorf("0 %s 4294967296 = %d, want 0", "*", r) + } + y = 18446744073709551615 + r = x * y + if r != 0 { + t.Errorf("0 %s 18446744073709551615 = %d, want 0", "*", r) + } + x = 1 + y = 0 + r = x * y + if r != 0 { + t.Errorf("1 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 1 { + t.Errorf("1 %s 1 = %d, want 1", "*", r) + } + y = 4294967296 + r = x * y + if r != 4294967296 { + t.Errorf("1 %s 4294967296 = %d, want 4294967296", "*", r) + } + y = 18446744073709551615 + r = x * y + if r != 18446744073709551615 { + t.Errorf("1 %s 18446744073709551615 = %d, want 18446744073709551615", "*", r) + } + x = 4294967296 + y = 0 + r = x * y + if r != 0 { + t.Errorf("4294967296 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 4294967296 { + t.Errorf("4294967296 %s 1 = %d, want 4294967296", "*", r) + } + y = 4294967296 + r = x * y + if r != 0 { + t.Errorf("4294967296 %s 4294967296 = %d, want 0", "*", r) + } + y = 18446744073709551615 + r = x * y + if r != 18446744069414584320 { + t.Errorf("4294967296 %s 18446744073709551615 = %d, want 18446744069414584320", "*", r) + } + x = 18446744073709551615 + y = 0 + r = x * y + if r != 0 { + t.Errorf("18446744073709551615 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 18446744073709551615 { + t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551615", "*", r) + } + y = 4294967296 + r = x * y + if r != 18446744069414584320 { + t.Errorf("18446744073709551615 %s 4294967296 = %d, want 18446744069414584320", "*", r) + } + y = 18446744073709551615 + r = x * y + if r != 1 { + t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 1", "*", r) + } +} +func TestConstFolduint64mod(t *testing.T) { + var x, y, r uint64 + x = 0 + y = 1 + r = x % y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "%", r) + } + y = 4294967296 + r = x % y + if r != 0 { + t.Errorf("0 %s 4294967296 = %d, want 0", "%", r) + } + y = 18446744073709551615 + r = x % y + if r != 0 { + t.Errorf("0 %s 18446744073709551615 = %d, want 0", "%", r) + } + x = 1 + y = 1 + r = x % y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", "%", r) + } + y = 4294967296 + r = x % y + if r != 1 { + t.Errorf("1 %s 4294967296 = %d, want 1", "%", r) + } + y = 18446744073709551615 + r = x % y + if r != 1 { + t.Errorf("1 %s 18446744073709551615 = %d, want 1", "%", r) + } + x = 4294967296 + y = 1 + r = x % y + if r != 0 { + t.Errorf("4294967296 %s 1 = %d, want 0", "%", r) + } + y = 4294967296 + r = x % y + if r != 0 { + t.Errorf("4294967296 %s 4294967296 = %d, want 0", "%", r) + } + y = 18446744073709551615 + r = x % y + if r != 4294967296 { + t.Errorf("4294967296 %s 18446744073709551615 = %d, want 4294967296", "%", r) + } + x = 18446744073709551615 + y = 1 + r = x % y + if r != 0 { + t.Errorf("18446744073709551615 %s 1 = %d, want 0", "%", r) + } + y = 4294967296 + r = x % y + if r != 4294967295 { + t.Errorf("18446744073709551615 %s 4294967296 = %d, want 4294967295", "%", r) + } + y = 18446744073709551615 + r = x % y + if r != 0 { + t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 0", "%", r) + } +} +func TestConstFoldint64add(t *testing.T) { + var x, y, r int64 + x = -9223372036854775808 + y = -9223372036854775808 + r = x + y + if r != 0 { + t.Errorf("-9223372036854775808 %s -9223372036854775808 = %d, want 0", "+", r) + } + y = -9223372036854775807 + r = x + y + if r != 1 { + t.Errorf("-9223372036854775808 %s -9223372036854775807 = %d, want 1", "+", r) + } + y = -4294967296 + r = x + y + if r != 9223372032559808512 { + t.Errorf("-9223372036854775808 %s -4294967296 = %d, want 9223372032559808512", "+", r) + } + y = -1 + r = x + y + if r != 9223372036854775807 { + t.Errorf("-9223372036854775808 %s -1 = %d, want 9223372036854775807", "+", r) + } + y = 0 + r = x + y + if r != -9223372036854775808 { + t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "+", r) + } + y = 1 + r = x + y + if r != -9223372036854775807 { + t.Errorf("-9223372036854775808 %s 1 = %d, want -9223372036854775807", "+", r) + } + y = 4294967296 + r = x + y + if r != -9223372032559808512 { + t.Errorf("-9223372036854775808 %s 4294967296 = %d, want -9223372032559808512", "+", r) + } + y = 9223372036854775806 + r = x + y + if r != -2 { + t.Errorf("-9223372036854775808 %s 9223372036854775806 = %d, want -2", "+", r) + } + y = 9223372036854775807 + r = x + y + if r != -1 { + t.Errorf("-9223372036854775808 %s 9223372036854775807 = %d, want -1", "+", r) + } + x = -9223372036854775807 + y = -9223372036854775808 + r = x + y + if r != 1 { + t.Errorf("-9223372036854775807 %s -9223372036854775808 = %d, want 1", "+", r) + } + y = -9223372036854775807 + r = x + y + if r != 2 { + t.Errorf("-9223372036854775807 %s -9223372036854775807 = %d, want 2", "+", r) + } + y = -4294967296 + r = x + y + if r != 9223372032559808513 { + t.Errorf("-9223372036854775807 %s -4294967296 = %d, want 9223372032559808513", "+", r) + } + y = -1 + r = x + y + if r != -9223372036854775808 { + t.Errorf("-9223372036854775807 %s -1 = %d, want -9223372036854775808", "+", r) + } + y = 0 + r = x + y + if r != -9223372036854775807 { + t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "+", r) + } + y = 1 + r = x + y + if r != -9223372036854775806 { + t.Errorf("-9223372036854775807 %s 1 = %d, want -9223372036854775806", "+", r) + } + y = 4294967296 + r = x + y + if r != -9223372032559808511 { + t.Errorf("-9223372036854775807 %s 4294967296 = %d, want -9223372032559808511", "+", r) + } + y = 9223372036854775806 + r = x + y + if r != -1 { + t.Errorf("-9223372036854775807 %s 9223372036854775806 = %d, want -1", "+", r) + } + y = 9223372036854775807 + r = x + y + if r != 0 { + t.Errorf("-9223372036854775807 %s 9223372036854775807 = %d, want 0", "+", r) + } + x = -4294967296 + y = -9223372036854775808 + r = x + y + if r != 9223372032559808512 { + t.Errorf("-4294967296 %s -9223372036854775808 = %d, want 9223372032559808512", "+", r) + } + y = -9223372036854775807 + r = x + y + if r != 9223372032559808513 { + t.Errorf("-4294967296 %s -9223372036854775807 = %d, want 9223372032559808513", "+", r) + } + y = -4294967296 + r = x + y + if r != -8589934592 { + t.Errorf("-4294967296 %s -4294967296 = %d, want -8589934592", "+", r) + } + y = -1 + r = x + y + if r != -4294967297 { + t.Errorf("-4294967296 %s -1 = %d, want -4294967297", "+", r) + } + y = 0 + r = x + y + if r != -4294967296 { + t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "+", r) + } + y = 1 + r = x + y + if r != -4294967295 { + t.Errorf("-4294967296 %s 1 = %d, want -4294967295", "+", r) + } + y = 4294967296 + r = x + y + if r != 0 { + t.Errorf("-4294967296 %s 4294967296 = %d, want 0", "+", r) + } + y = 9223372036854775806 + r = x + y + if r != 9223372032559808510 { + t.Errorf("-4294967296 %s 9223372036854775806 = %d, want 9223372032559808510", "+", r) + } + y = 9223372036854775807 + r = x + y + if r != 9223372032559808511 { + t.Errorf("-4294967296 %s 9223372036854775807 = %d, want 9223372032559808511", "+", r) + } + x = -1 + y = -9223372036854775808 + r = x + y + if r != 9223372036854775807 { + t.Errorf("-1 %s -9223372036854775808 = %d, want 9223372036854775807", "+", r) + } + y = -9223372036854775807 + r = x + y + if r != -9223372036854775808 { + t.Errorf("-1 %s -9223372036854775807 = %d, want -9223372036854775808", "+", r) + } + y = -4294967296 + r = x + y + if r != -4294967297 { + t.Errorf("-1 %s -4294967296 = %d, want -4294967297", "+", r) + } + y = -1 + r = x + y + if r != -2 { + t.Errorf("-1 %s -1 = %d, want -2", "+", r) + } + y = 0 + r = x + y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", "+", r) + } + y = 1 + r = x + y + if r != 0 { + t.Errorf("-1 %s 1 = %d, want 0", "+", r) + } + y = 4294967296 + r = x + y + if r != 4294967295 { + t.Errorf("-1 %s 4294967296 = %d, want 4294967295", "+", r) + } + y = 9223372036854775806 + r = x + y + if r != 9223372036854775805 { + t.Errorf("-1 %s 9223372036854775806 = %d, want 9223372036854775805", "+", r) + } + y = 9223372036854775807 + r = x + y + if r != 9223372036854775806 { + t.Errorf("-1 %s 9223372036854775807 = %d, want 9223372036854775806", "+", r) + } + x = 0 + y = -9223372036854775808 + r = x + y + if r != -9223372036854775808 { + t.Errorf("0 %s -9223372036854775808 = %d, want -9223372036854775808", "+", r) + } + y = -9223372036854775807 + r = x + y + if r != -9223372036854775807 { + t.Errorf("0 %s -9223372036854775807 = %d, want -9223372036854775807", "+", r) + } + y = -4294967296 + r = x + y + if r != -4294967296 { + t.Errorf("0 %s -4294967296 = %d, want -4294967296", "+", r) + } + y = -1 + r = x + y + if r != -1 { + t.Errorf("0 %s -1 = %d, want -1", "+", r) + } + y = 0 + r = x + y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "+", r) + } + y = 1 + r = x + y + if r != 1 { + t.Errorf("0 %s 1 = %d, want 1", "+", r) + } + y = 4294967296 + r = x + y + if r != 4294967296 { + t.Errorf("0 %s 4294967296 = %d, want 4294967296", "+", r) + } + y = 9223372036854775806 + r = x + y + if r != 9223372036854775806 { + t.Errorf("0 %s 9223372036854775806 = %d, want 9223372036854775806", "+", r) + } + y = 9223372036854775807 + r = x + y + if r != 9223372036854775807 { + t.Errorf("0 %s 9223372036854775807 = %d, want 9223372036854775807", "+", r) + } + x = 1 + y = -9223372036854775808 + r = x + y + if r != -9223372036854775807 { + t.Errorf("1 %s -9223372036854775808 = %d, want -9223372036854775807", "+", r) + } + y = -9223372036854775807 + r = x + y + if r != -9223372036854775806 { + t.Errorf("1 %s -9223372036854775807 = %d, want -9223372036854775806", "+", r) + } + y = -4294967296 + r = x + y + if r != -4294967295 { + t.Errorf("1 %s -4294967296 = %d, want -4294967295", "+", r) + } + y = -1 + r = x + y + if r != 0 { + t.Errorf("1 %s -1 = %d, want 0", "+", r) + } + y = 0 + r = x + y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "+", r) + } + y = 1 + r = x + y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "+", r) + } + y = 4294967296 + r = x + y + if r != 4294967297 { + t.Errorf("1 %s 4294967296 = %d, want 4294967297", "+", r) + } + y = 9223372036854775806 + r = x + y + if r != 9223372036854775807 { + t.Errorf("1 %s 9223372036854775806 = %d, want 9223372036854775807", "+", r) + } + y = 9223372036854775807 + r = x + y + if r != -9223372036854775808 { + t.Errorf("1 %s 9223372036854775807 = %d, want -9223372036854775808", "+", r) + } + x = 4294967296 + y = -9223372036854775808 + r = x + y + if r != -9223372032559808512 { + t.Errorf("4294967296 %s -9223372036854775808 = %d, want -9223372032559808512", "+", r) + } + y = -9223372036854775807 + r = x + y + if r != -9223372032559808511 { + t.Errorf("4294967296 %s -9223372036854775807 = %d, want -9223372032559808511", "+", r) + } + y = -4294967296 + r = x + y + if r != 0 { + t.Errorf("4294967296 %s -4294967296 = %d, want 0", "+", r) + } + y = -1 + r = x + y + if r != 4294967295 { + t.Errorf("4294967296 %s -1 = %d, want 4294967295", "+", r) + } + y = 0 + r = x + y + if r != 4294967296 { + t.Errorf("4294967296 %s 0 = %d, want 4294967296", "+", r) + } + y = 1 + r = x + y + if r != 4294967297 { + t.Errorf("4294967296 %s 1 = %d, want 4294967297", "+", r) + } + y = 4294967296 + r = x + y + if r != 8589934592 { + t.Errorf("4294967296 %s 4294967296 = %d, want 8589934592", "+", r) + } + y = 9223372036854775806 + r = x + y + if r != -9223372032559808514 { + t.Errorf("4294967296 %s 9223372036854775806 = %d, want -9223372032559808514", "+", r) + } + y = 9223372036854775807 + r = x + y + if r != -9223372032559808513 { + t.Errorf("4294967296 %s 9223372036854775807 = %d, want -9223372032559808513", "+", r) + } + x = 9223372036854775806 + y = -9223372036854775808 + r = x + y + if r != -2 { + t.Errorf("9223372036854775806 %s -9223372036854775808 = %d, want -2", "+", r) + } + y = -9223372036854775807 + r = x + y + if r != -1 { + t.Errorf("9223372036854775806 %s -9223372036854775807 = %d, want -1", "+", r) + } + y = -4294967296 + r = x + y + if r != 9223372032559808510 { + t.Errorf("9223372036854775806 %s -4294967296 = %d, want 9223372032559808510", "+", r) + } + y = -1 + r = x + y + if r != 9223372036854775805 { + t.Errorf("9223372036854775806 %s -1 = %d, want 9223372036854775805", "+", r) + } + y = 0 + r = x + y + if r != 9223372036854775806 { + t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "+", r) + } + y = 1 + r = x + y + if r != 9223372036854775807 { + t.Errorf("9223372036854775806 %s 1 = %d, want 9223372036854775807", "+", r) + } + y = 4294967296 + r = x + y + if r != -9223372032559808514 { + t.Errorf("9223372036854775806 %s 4294967296 = %d, want -9223372032559808514", "+", r) + } + y = 9223372036854775806 + r = x + y + if r != -4 { + t.Errorf("9223372036854775806 %s 9223372036854775806 = %d, want -4", "+", r) + } + y = 9223372036854775807 + r = x + y + if r != -3 { + t.Errorf("9223372036854775806 %s 9223372036854775807 = %d, want -3", "+", r) + } + x = 9223372036854775807 + y = -9223372036854775808 + r = x + y + if r != -1 { + t.Errorf("9223372036854775807 %s -9223372036854775808 = %d, want -1", "+", r) + } + y = -9223372036854775807 + r = x + y + if r != 0 { + t.Errorf("9223372036854775807 %s -9223372036854775807 = %d, want 0", "+", r) + } + y = -4294967296 + r = x + y + if r != 9223372032559808511 { + t.Errorf("9223372036854775807 %s -4294967296 = %d, want 9223372032559808511", "+", r) + } + y = -1 + r = x + y + if r != 9223372036854775806 { + t.Errorf("9223372036854775807 %s -1 = %d, want 9223372036854775806", "+", r) + } + y = 0 + r = x + y + if r != 9223372036854775807 { + t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "+", r) + } + y = 1 + r = x + y + if r != -9223372036854775808 { + t.Errorf("9223372036854775807 %s 1 = %d, want -9223372036854775808", "+", r) + } + y = 4294967296 + r = x + y + if r != -9223372032559808513 { + t.Errorf("9223372036854775807 %s 4294967296 = %d, want -9223372032559808513", "+", r) + } + y = 9223372036854775806 + r = x + y + if r != -3 { + t.Errorf("9223372036854775807 %s 9223372036854775806 = %d, want -3", "+", r) + } + y = 9223372036854775807 + r = x + y + if r != -2 { + t.Errorf("9223372036854775807 %s 9223372036854775807 = %d, want -2", "+", r) + } +} +func TestConstFoldint64sub(t *testing.T) { + var x, y, r int64 + x = -9223372036854775808 + y = -9223372036854775808 + r = x - y + if r != 0 { + t.Errorf("-9223372036854775808 %s -9223372036854775808 = %d, want 0", "-", r) + } + y = -9223372036854775807 + r = x - y + if r != -1 { + t.Errorf("-9223372036854775808 %s -9223372036854775807 = %d, want -1", "-", r) + } + y = -4294967296 + r = x - y + if r != -9223372032559808512 { + t.Errorf("-9223372036854775808 %s -4294967296 = %d, want -9223372032559808512", "-", r) + } + y = -1 + r = x - y + if r != -9223372036854775807 { + t.Errorf("-9223372036854775808 %s -1 = %d, want -9223372036854775807", "-", r) + } + y = 0 + r = x - y + if r != -9223372036854775808 { + t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "-", r) + } + y = 1 + r = x - y + if r != 9223372036854775807 { + t.Errorf("-9223372036854775808 %s 1 = %d, want 9223372036854775807", "-", r) + } + y = 4294967296 + r = x - y + if r != 9223372032559808512 { + t.Errorf("-9223372036854775808 %s 4294967296 = %d, want 9223372032559808512", "-", r) + } + y = 9223372036854775806 + r = x - y + if r != 2 { + t.Errorf("-9223372036854775808 %s 9223372036854775806 = %d, want 2", "-", r) + } + y = 9223372036854775807 + r = x - y + if r != 1 { + t.Errorf("-9223372036854775808 %s 9223372036854775807 = %d, want 1", "-", r) + } + x = -9223372036854775807 + y = -9223372036854775808 + r = x - y + if r != 1 { + t.Errorf("-9223372036854775807 %s -9223372036854775808 = %d, want 1", "-", r) + } + y = -9223372036854775807 + r = x - y + if r != 0 { + t.Errorf("-9223372036854775807 %s -9223372036854775807 = %d, want 0", "-", r) + } + y = -4294967296 + r = x - y + if r != -9223372032559808511 { + t.Errorf("-9223372036854775807 %s -4294967296 = %d, want -9223372032559808511", "-", r) + } + y = -1 + r = x - y + if r != -9223372036854775806 { + t.Errorf("-9223372036854775807 %s -1 = %d, want -9223372036854775806", "-", r) + } + y = 0 + r = x - y + if r != -9223372036854775807 { + t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "-", r) + } + y = 1 + r = x - y + if r != -9223372036854775808 { + t.Errorf("-9223372036854775807 %s 1 = %d, want -9223372036854775808", "-", r) + } + y = 4294967296 + r = x - y + if r != 9223372032559808513 { + t.Errorf("-9223372036854775807 %s 4294967296 = %d, want 9223372032559808513", "-", r) + } + y = 9223372036854775806 + r = x - y + if r != 3 { + t.Errorf("-9223372036854775807 %s 9223372036854775806 = %d, want 3", "-", r) + } + y = 9223372036854775807 + r = x - y + if r != 2 { + t.Errorf("-9223372036854775807 %s 9223372036854775807 = %d, want 2", "-", r) + } + x = -4294967296 + y = -9223372036854775808 + r = x - y + if r != 9223372032559808512 { + t.Errorf("-4294967296 %s -9223372036854775808 = %d, want 9223372032559808512", "-", r) + } + y = -9223372036854775807 + r = x - y + if r != 9223372032559808511 { + t.Errorf("-4294967296 %s -9223372036854775807 = %d, want 9223372032559808511", "-", r) + } + y = -4294967296 + r = x - y + if r != 0 { + t.Errorf("-4294967296 %s -4294967296 = %d, want 0", "-", r) + } + y = -1 + r = x - y + if r != -4294967295 { + t.Errorf("-4294967296 %s -1 = %d, want -4294967295", "-", r) + } + y = 0 + r = x - y + if r != -4294967296 { + t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "-", r) + } + y = 1 + r = x - y + if r != -4294967297 { + t.Errorf("-4294967296 %s 1 = %d, want -4294967297", "-", r) + } + y = 4294967296 + r = x - y + if r != -8589934592 { + t.Errorf("-4294967296 %s 4294967296 = %d, want -8589934592", "-", r) + } + y = 9223372036854775806 + r = x - y + if r != 9223372032559808514 { + t.Errorf("-4294967296 %s 9223372036854775806 = %d, want 9223372032559808514", "-", r) + } + y = 9223372036854775807 + r = x - y + if r != 9223372032559808513 { + t.Errorf("-4294967296 %s 9223372036854775807 = %d, want 9223372032559808513", "-", r) + } + x = -1 + y = -9223372036854775808 + r = x - y + if r != 9223372036854775807 { + t.Errorf("-1 %s -9223372036854775808 = %d, want 9223372036854775807", "-", r) + } + y = -9223372036854775807 + r = x - y + if r != 9223372036854775806 { + t.Errorf("-1 %s -9223372036854775807 = %d, want 9223372036854775806", "-", r) + } + y = -4294967296 + r = x - y + if r != 4294967295 { + t.Errorf("-1 %s -4294967296 = %d, want 4294967295", "-", r) + } + y = -1 + r = x - y + if r != 0 { + t.Errorf("-1 %s -1 = %d, want 0", "-", r) + } + y = 0 + r = x - y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", "-", r) + } + y = 1 + r = x - y + if r != -2 { + t.Errorf("-1 %s 1 = %d, want -2", "-", r) + } + y = 4294967296 + r = x - y + if r != -4294967297 { + t.Errorf("-1 %s 4294967296 = %d, want -4294967297", "-", r) + } + y = 9223372036854775806 + r = x - y + if r != -9223372036854775807 { + t.Errorf("-1 %s 9223372036854775806 = %d, want -9223372036854775807", "-", r) + } + y = 9223372036854775807 + r = x - y + if r != -9223372036854775808 { + t.Errorf("-1 %s 9223372036854775807 = %d, want -9223372036854775808", "-", r) + } + x = 0 + y = -9223372036854775808 + r = x - y + if r != -9223372036854775808 { + t.Errorf("0 %s -9223372036854775808 = %d, want -9223372036854775808", "-", r) + } + y = -9223372036854775807 + r = x - y + if r != 9223372036854775807 { + t.Errorf("0 %s -9223372036854775807 = %d, want 9223372036854775807", "-", r) + } + y = -4294967296 + r = x - y + if r != 4294967296 { + t.Errorf("0 %s -4294967296 = %d, want 4294967296", "-", r) + } + y = -1 + r = x - y + if r != 1 { + t.Errorf("0 %s -1 = %d, want 1", "-", r) + } + y = 0 + r = x - y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "-", r) + } + y = 1 + r = x - y + if r != -1 { + t.Errorf("0 %s 1 = %d, want -1", "-", r) + } + y = 4294967296 + r = x - y + if r != -4294967296 { + t.Errorf("0 %s 4294967296 = %d, want -4294967296", "-", r) + } + y = 9223372036854775806 + r = x - y + if r != -9223372036854775806 { + t.Errorf("0 %s 9223372036854775806 = %d, want -9223372036854775806", "-", r) + } + y = 9223372036854775807 + r = x - y + if r != -9223372036854775807 { + t.Errorf("0 %s 9223372036854775807 = %d, want -9223372036854775807", "-", r) + } + x = 1 + y = -9223372036854775808 + r = x - y + if r != -9223372036854775807 { + t.Errorf("1 %s -9223372036854775808 = %d, want -9223372036854775807", "-", r) + } + y = -9223372036854775807 + r = x - y + if r != -9223372036854775808 { + t.Errorf("1 %s -9223372036854775807 = %d, want -9223372036854775808", "-", r) + } + y = -4294967296 + r = x - y + if r != 4294967297 { + t.Errorf("1 %s -4294967296 = %d, want 4294967297", "-", r) + } + y = -1 + r = x - y + if r != 2 { + t.Errorf("1 %s -1 = %d, want 2", "-", r) + } + y = 0 + r = x - y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "-", r) + } + y = 1 + r = x - y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", "-", r) + } + y = 4294967296 + r = x - y + if r != -4294967295 { + t.Errorf("1 %s 4294967296 = %d, want -4294967295", "-", r) + } + y = 9223372036854775806 + r = x - y + if r != -9223372036854775805 { + t.Errorf("1 %s 9223372036854775806 = %d, want -9223372036854775805", "-", r) + } + y = 9223372036854775807 + r = x - y + if r != -9223372036854775806 { + t.Errorf("1 %s 9223372036854775807 = %d, want -9223372036854775806", "-", r) + } + x = 4294967296 + y = -9223372036854775808 + r = x - y + if r != -9223372032559808512 { + t.Errorf("4294967296 %s -9223372036854775808 = %d, want -9223372032559808512", "-", r) + } + y = -9223372036854775807 + r = x - y + if r != -9223372032559808513 { + t.Errorf("4294967296 %s -9223372036854775807 = %d, want -9223372032559808513", "-", r) + } + y = -4294967296 + r = x - y + if r != 8589934592 { + t.Errorf("4294967296 %s -4294967296 = %d, want 8589934592", "-", r) + } + y = -1 + r = x - y + if r != 4294967297 { + t.Errorf("4294967296 %s -1 = %d, want 4294967297", "-", r) + } + y = 0 + r = x - y + if r != 4294967296 { + t.Errorf("4294967296 %s 0 = %d, want 4294967296", "-", r) + } + y = 1 + r = x - y + if r != 4294967295 { + t.Errorf("4294967296 %s 1 = %d, want 4294967295", "-", r) + } + y = 4294967296 + r = x - y + if r != 0 { + t.Errorf("4294967296 %s 4294967296 = %d, want 0", "-", r) + } + y = 9223372036854775806 + r = x - y + if r != -9223372032559808510 { + t.Errorf("4294967296 %s 9223372036854775806 = %d, want -9223372032559808510", "-", r) + } + y = 9223372036854775807 + r = x - y + if r != -9223372032559808511 { + t.Errorf("4294967296 %s 9223372036854775807 = %d, want -9223372032559808511", "-", r) + } + x = 9223372036854775806 + y = -9223372036854775808 + r = x - y + if r != -2 { + t.Errorf("9223372036854775806 %s -9223372036854775808 = %d, want -2", "-", r) + } + y = -9223372036854775807 + r = x - y + if r != -3 { + t.Errorf("9223372036854775806 %s -9223372036854775807 = %d, want -3", "-", r) + } + y = -4294967296 + r = x - y + if r != -9223372032559808514 { + t.Errorf("9223372036854775806 %s -4294967296 = %d, want -9223372032559808514", "-", r) + } + y = -1 + r = x - y + if r != 9223372036854775807 { + t.Errorf("9223372036854775806 %s -1 = %d, want 9223372036854775807", "-", r) + } + y = 0 + r = x - y + if r != 9223372036854775806 { + t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "-", r) + } + y = 1 + r = x - y + if r != 9223372036854775805 { + t.Errorf("9223372036854775806 %s 1 = %d, want 9223372036854775805", "-", r) + } + y = 4294967296 + r = x - y + if r != 9223372032559808510 { + t.Errorf("9223372036854775806 %s 4294967296 = %d, want 9223372032559808510", "-", r) + } + y = 9223372036854775806 + r = x - y + if r != 0 { + t.Errorf("9223372036854775806 %s 9223372036854775806 = %d, want 0", "-", r) + } + y = 9223372036854775807 + r = x - y + if r != -1 { + t.Errorf("9223372036854775806 %s 9223372036854775807 = %d, want -1", "-", r) + } + x = 9223372036854775807 + y = -9223372036854775808 + r = x - y + if r != -1 { + t.Errorf("9223372036854775807 %s -9223372036854775808 = %d, want -1", "-", r) + } + y = -9223372036854775807 + r = x - y + if r != -2 { + t.Errorf("9223372036854775807 %s -9223372036854775807 = %d, want -2", "-", r) + } + y = -4294967296 + r = x - y + if r != -9223372032559808513 { + t.Errorf("9223372036854775807 %s -4294967296 = %d, want -9223372032559808513", "-", r) + } + y = -1 + r = x - y + if r != -9223372036854775808 { + t.Errorf("9223372036854775807 %s -1 = %d, want -9223372036854775808", "-", r) + } + y = 0 + r = x - y + if r != 9223372036854775807 { + t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "-", r) + } + y = 1 + r = x - y + if r != 9223372036854775806 { + t.Errorf("9223372036854775807 %s 1 = %d, want 9223372036854775806", "-", r) + } + y = 4294967296 + r = x - y + if r != 9223372032559808511 { + t.Errorf("9223372036854775807 %s 4294967296 = %d, want 9223372032559808511", "-", r) + } + y = 9223372036854775806 + r = x - y + if r != 1 { + t.Errorf("9223372036854775807 %s 9223372036854775806 = %d, want 1", "-", r) + } + y = 9223372036854775807 + r = x - y + if r != 0 { + t.Errorf("9223372036854775807 %s 9223372036854775807 = %d, want 0", "-", r) + } +} +func TestConstFoldint64div(t *testing.T) { + var x, y, r int64 + x = -9223372036854775808 + y = -9223372036854775808 + r = x / y + if r != 1 { + t.Errorf("-9223372036854775808 %s -9223372036854775808 = %d, want 1", "/", r) + } + y = -9223372036854775807 + r = x / y + if r != 1 { + t.Errorf("-9223372036854775808 %s -9223372036854775807 = %d, want 1", "/", r) + } + y = -4294967296 + r = x / y + if r != 2147483648 { + t.Errorf("-9223372036854775808 %s -4294967296 = %d, want 2147483648", "/", r) + } + y = -1 + r = x / y + if r != -9223372036854775808 { + t.Errorf("-9223372036854775808 %s -1 = %d, want -9223372036854775808", "/", r) + } + y = 1 + r = x / y + if r != -9223372036854775808 { + t.Errorf("-9223372036854775808 %s 1 = %d, want -9223372036854775808", "/", r) + } + y = 4294967296 + r = x / y + if r != -2147483648 { + t.Errorf("-9223372036854775808 %s 4294967296 = %d, want -2147483648", "/", r) + } + y = 9223372036854775806 + r = x / y + if r != -1 { + t.Errorf("-9223372036854775808 %s 9223372036854775806 = %d, want -1", "/", r) + } + y = 9223372036854775807 + r = x / y + if r != -1 { + t.Errorf("-9223372036854775808 %s 9223372036854775807 = %d, want -1", "/", r) + } + x = -9223372036854775807 + y = -9223372036854775808 + r = x / y + if r != 0 { + t.Errorf("-9223372036854775807 %s -9223372036854775808 = %d, want 0", "/", r) + } + y = -9223372036854775807 + r = x / y + if r != 1 { + t.Errorf("-9223372036854775807 %s -9223372036854775807 = %d, want 1", "/", r) + } + y = -4294967296 + r = x / y + if r != 2147483647 { + t.Errorf("-9223372036854775807 %s -4294967296 = %d, want 2147483647", "/", r) + } + y = -1 + r = x / y + if r != 9223372036854775807 { + t.Errorf("-9223372036854775807 %s -1 = %d, want 9223372036854775807", "/", r) + } + y = 1 + r = x / y + if r != -9223372036854775807 { + t.Errorf("-9223372036854775807 %s 1 = %d, want -9223372036854775807", "/", r) + } + y = 4294967296 + r = x / y + if r != -2147483647 { + t.Errorf("-9223372036854775807 %s 4294967296 = %d, want -2147483647", "/", r) + } + y = 9223372036854775806 + r = x / y + if r != -1 { + t.Errorf("-9223372036854775807 %s 9223372036854775806 = %d, want -1", "/", r) + } + y = 9223372036854775807 + r = x / y + if r != -1 { + t.Errorf("-9223372036854775807 %s 9223372036854775807 = %d, want -1", "/", r) + } + x = -4294967296 + y = -9223372036854775808 + r = x / y + if r != 0 { + t.Errorf("-4294967296 %s -9223372036854775808 = %d, want 0", "/", r) + } + y = -9223372036854775807 + r = x / y + if r != 0 { + t.Errorf("-4294967296 %s -9223372036854775807 = %d, want 0", "/", r) + } + y = -4294967296 + r = x / y + if r != 1 { + t.Errorf("-4294967296 %s -4294967296 = %d, want 1", "/", r) + } + y = -1 + r = x / y + if r != 4294967296 { + t.Errorf("-4294967296 %s -1 = %d, want 4294967296", "/", r) + } + y = 1 + r = x / y + if r != -4294967296 { + t.Errorf("-4294967296 %s 1 = %d, want -4294967296", "/", r) + } + y = 4294967296 + r = x / y + if r != -1 { + t.Errorf("-4294967296 %s 4294967296 = %d, want -1", "/", r) + } + y = 9223372036854775806 + r = x / y + if r != 0 { + t.Errorf("-4294967296 %s 9223372036854775806 = %d, want 0", "/", r) + } + y = 9223372036854775807 + r = x / y + if r != 0 { + t.Errorf("-4294967296 %s 9223372036854775807 = %d, want 0", "/", r) + } + x = -1 + y = -9223372036854775808 + r = x / y + if r != 0 { + t.Errorf("-1 %s -9223372036854775808 = %d, want 0", "/", r) + } + y = -9223372036854775807 + r = x / y + if r != 0 { + t.Errorf("-1 %s -9223372036854775807 = %d, want 0", "/", r) + } + y = -4294967296 + r = x / y + if r != 0 { + t.Errorf("-1 %s -4294967296 = %d, want 0", "/", r) + } + y = -1 + r = x / y + if r != 1 { + t.Errorf("-1 %s -1 = %d, want 1", "/", r) + } + y = 1 + r = x / y + if r != -1 { + t.Errorf("-1 %s 1 = %d, want -1", "/", r) + } + y = 4294967296 + r = x / y + if r != 0 { + t.Errorf("-1 %s 4294967296 = %d, want 0", "/", r) + } + y = 9223372036854775806 + r = x / y + if r != 0 { + t.Errorf("-1 %s 9223372036854775806 = %d, want 0", "/", r) + } + y = 9223372036854775807 + r = x / y + if r != 0 { + t.Errorf("-1 %s 9223372036854775807 = %d, want 0", "/", r) + } + x = 0 + y = -9223372036854775808 + r = x / y + if r != 0 { + t.Errorf("0 %s -9223372036854775808 = %d, want 0", "/", r) + } + y = -9223372036854775807 + r = x / y + if r != 0 { + t.Errorf("0 %s -9223372036854775807 = %d, want 0", "/", r) + } + y = -4294967296 + r = x / y + if r != 0 { + t.Errorf("0 %s -4294967296 = %d, want 0", "/", r) + } + y = -1 + r = x / y + if r != 0 { + t.Errorf("0 %s -1 = %d, want 0", "/", r) + } + y = 1 + r = x / y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "/", r) + } + y = 4294967296 + r = x / y + if r != 0 { + t.Errorf("0 %s 4294967296 = %d, want 0", "/", r) + } + y = 9223372036854775806 + r = x / y + if r != 0 { + t.Errorf("0 %s 9223372036854775806 = %d, want 0", "/", r) + } + y = 9223372036854775807 + r = x / y + if r != 0 { + t.Errorf("0 %s 9223372036854775807 = %d, want 0", "/", r) + } + x = 1 + y = -9223372036854775808 + r = x / y + if r != 0 { + t.Errorf("1 %s -9223372036854775808 = %d, want 0", "/", r) + } + y = -9223372036854775807 + r = x / y + if r != 0 { + t.Errorf("1 %s -9223372036854775807 = %d, want 0", "/", r) + } + y = -4294967296 + r = x / y + if r != 0 { + t.Errorf("1 %s -4294967296 = %d, want 0", "/", r) + } + y = -1 + r = x / y + if r != -1 { + t.Errorf("1 %s -1 = %d, want -1", "/", r) + } + y = 1 + r = x / y + if r != 1 { + t.Errorf("1 %s 1 = %d, want 1", "/", r) + } + y = 4294967296 + r = x / y + if r != 0 { + t.Errorf("1 %s 4294967296 = %d, want 0", "/", r) + } + y = 9223372036854775806 + r = x / y + if r != 0 { + t.Errorf("1 %s 9223372036854775806 = %d, want 0", "/", r) + } + y = 9223372036854775807 + r = x / y + if r != 0 { + t.Errorf("1 %s 9223372036854775807 = %d, want 0", "/", r) + } + x = 4294967296 + y = -9223372036854775808 + r = x / y + if r != 0 { + t.Errorf("4294967296 %s -9223372036854775808 = %d, want 0", "/", r) + } + y = -9223372036854775807 + r = x / y + if r != 0 { + t.Errorf("4294967296 %s -9223372036854775807 = %d, want 0", "/", r) + } + y = -4294967296 + r = x / y + if r != -1 { + t.Errorf("4294967296 %s -4294967296 = %d, want -1", "/", r) + } + y = -1 + r = x / y + if r != -4294967296 { + t.Errorf("4294967296 %s -1 = %d, want -4294967296", "/", r) + } + y = 1 + r = x / y + if r != 4294967296 { + t.Errorf("4294967296 %s 1 = %d, want 4294967296", "/", r) + } + y = 4294967296 + r = x / y + if r != 1 { + t.Errorf("4294967296 %s 4294967296 = %d, want 1", "/", r) + } + y = 9223372036854775806 + r = x / y + if r != 0 { + t.Errorf("4294967296 %s 9223372036854775806 = %d, want 0", "/", r) + } + y = 9223372036854775807 + r = x / y + if r != 0 { + t.Errorf("4294967296 %s 9223372036854775807 = %d, want 0", "/", r) + } + x = 9223372036854775806 + y = -9223372036854775808 + r = x / y + if r != 0 { + t.Errorf("9223372036854775806 %s -9223372036854775808 = %d, want 0", "/", r) + } + y = -9223372036854775807 + r = x / y + if r != 0 { + t.Errorf("9223372036854775806 %s -9223372036854775807 = %d, want 0", "/", r) + } + y = -4294967296 + r = x / y + if r != -2147483647 { + t.Errorf("9223372036854775806 %s -4294967296 = %d, want -2147483647", "/", r) + } + y = -1 + r = x / y + if r != -9223372036854775806 { + t.Errorf("9223372036854775806 %s -1 = %d, want -9223372036854775806", "/", r) + } + y = 1 + r = x / y + if r != 9223372036854775806 { + t.Errorf("9223372036854775806 %s 1 = %d, want 9223372036854775806", "/", r) + } + y = 4294967296 + r = x / y + if r != 2147483647 { + t.Errorf("9223372036854775806 %s 4294967296 = %d, want 2147483647", "/", r) + } + y = 9223372036854775806 + r = x / y + if r != 1 { + t.Errorf("9223372036854775806 %s 9223372036854775806 = %d, want 1", "/", r) + } + y = 9223372036854775807 + r = x / y + if r != 0 { + t.Errorf("9223372036854775806 %s 9223372036854775807 = %d, want 0", "/", r) + } + x = 9223372036854775807 + y = -9223372036854775808 + r = x / y + if r != 0 { + t.Errorf("9223372036854775807 %s -9223372036854775808 = %d, want 0", "/", r) + } + y = -9223372036854775807 + r = x / y + if r != -1 { + t.Errorf("9223372036854775807 %s -9223372036854775807 = %d, want -1", "/", r) + } + y = -4294967296 + r = x / y + if r != -2147483647 { + t.Errorf("9223372036854775807 %s -4294967296 = %d, want -2147483647", "/", r) + } + y = -1 + r = x / y + if r != -9223372036854775807 { + t.Errorf("9223372036854775807 %s -1 = %d, want -9223372036854775807", "/", r) + } + y = 1 + r = x / y + if r != 9223372036854775807 { + t.Errorf("9223372036854775807 %s 1 = %d, want 9223372036854775807", "/", r) + } + y = 4294967296 + r = x / y + if r != 2147483647 { + t.Errorf("9223372036854775807 %s 4294967296 = %d, want 2147483647", "/", r) + } + y = 9223372036854775806 + r = x / y + if r != 1 { + t.Errorf("9223372036854775807 %s 9223372036854775806 = %d, want 1", "/", r) + } + y = 9223372036854775807 + r = x / y + if r != 1 { + t.Errorf("9223372036854775807 %s 9223372036854775807 = %d, want 1", "/", r) + } +} +func TestConstFoldint64mul(t *testing.T) { + var x, y, r int64 + x = -9223372036854775808 + y = -9223372036854775808 + r = x * y + if r != 0 { + t.Errorf("-9223372036854775808 %s -9223372036854775808 = %d, want 0", "*", r) + } + y = -9223372036854775807 + r = x * y + if r != -9223372036854775808 { + t.Errorf("-9223372036854775808 %s -9223372036854775807 = %d, want -9223372036854775808", "*", r) + } + y = -4294967296 + r = x * y + if r != 0 { + t.Errorf("-9223372036854775808 %s -4294967296 = %d, want 0", "*", r) + } + y = -1 + r = x * y + if r != -9223372036854775808 { + t.Errorf("-9223372036854775808 %s -1 = %d, want -9223372036854775808", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("-9223372036854775808 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != -9223372036854775808 { + t.Errorf("-9223372036854775808 %s 1 = %d, want -9223372036854775808", "*", r) + } + y = 4294967296 + r = x * y + if r != 0 { + t.Errorf("-9223372036854775808 %s 4294967296 = %d, want 0", "*", r) + } + y = 9223372036854775806 + r = x * y + if r != 0 { + t.Errorf("-9223372036854775808 %s 9223372036854775806 = %d, want 0", "*", r) + } + y = 9223372036854775807 + r = x * y + if r != -9223372036854775808 { + t.Errorf("-9223372036854775808 %s 9223372036854775807 = %d, want -9223372036854775808", "*", r) + } + x = -9223372036854775807 + y = -9223372036854775808 + r = x * y + if r != -9223372036854775808 { + t.Errorf("-9223372036854775807 %s -9223372036854775808 = %d, want -9223372036854775808", "*", r) + } + y = -9223372036854775807 + r = x * y + if r != 1 { + t.Errorf("-9223372036854775807 %s -9223372036854775807 = %d, want 1", "*", r) + } + y = -4294967296 + r = x * y + if r != -4294967296 { + t.Errorf("-9223372036854775807 %s -4294967296 = %d, want -4294967296", "*", r) + } + y = -1 + r = x * y + if r != 9223372036854775807 { + t.Errorf("-9223372036854775807 %s -1 = %d, want 9223372036854775807", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("-9223372036854775807 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != -9223372036854775807 { + t.Errorf("-9223372036854775807 %s 1 = %d, want -9223372036854775807", "*", r) + } + y = 4294967296 + r = x * y + if r != 4294967296 { + t.Errorf("-9223372036854775807 %s 4294967296 = %d, want 4294967296", "*", r) + } + y = 9223372036854775806 + r = x * y + if r != 9223372036854775806 { + t.Errorf("-9223372036854775807 %s 9223372036854775806 = %d, want 9223372036854775806", "*", r) + } + y = 9223372036854775807 + r = x * y + if r != -1 { + t.Errorf("-9223372036854775807 %s 9223372036854775807 = %d, want -1", "*", r) + } + x = -4294967296 + y = -9223372036854775808 + r = x * y + if r != 0 { + t.Errorf("-4294967296 %s -9223372036854775808 = %d, want 0", "*", r) + } + y = -9223372036854775807 + r = x * y + if r != -4294967296 { + t.Errorf("-4294967296 %s -9223372036854775807 = %d, want -4294967296", "*", r) + } + y = -4294967296 + r = x * y + if r != 0 { + t.Errorf("-4294967296 %s -4294967296 = %d, want 0", "*", r) + } + y = -1 + r = x * y + if r != 4294967296 { + t.Errorf("-4294967296 %s -1 = %d, want 4294967296", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("-4294967296 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != -4294967296 { + t.Errorf("-4294967296 %s 1 = %d, want -4294967296", "*", r) + } + y = 4294967296 + r = x * y + if r != 0 { + t.Errorf("-4294967296 %s 4294967296 = %d, want 0", "*", r) + } + y = 9223372036854775806 + r = x * y + if r != 8589934592 { + t.Errorf("-4294967296 %s 9223372036854775806 = %d, want 8589934592", "*", r) + } + y = 9223372036854775807 + r = x * y + if r != 4294967296 { + t.Errorf("-4294967296 %s 9223372036854775807 = %d, want 4294967296", "*", r) + } + x = -1 + y = -9223372036854775808 + r = x * y + if r != -9223372036854775808 { + t.Errorf("-1 %s -9223372036854775808 = %d, want -9223372036854775808", "*", r) + } + y = -9223372036854775807 + r = x * y + if r != 9223372036854775807 { + t.Errorf("-1 %s -9223372036854775807 = %d, want 9223372036854775807", "*", r) + } + y = -4294967296 + r = x * y + if r != 4294967296 { + t.Errorf("-1 %s -4294967296 = %d, want 4294967296", "*", r) + } + y = -1 + r = x * y + if r != 1 { + t.Errorf("-1 %s -1 = %d, want 1", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("-1 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != -1 { + t.Errorf("-1 %s 1 = %d, want -1", "*", r) + } + y = 4294967296 + r = x * y + if r != -4294967296 { + t.Errorf("-1 %s 4294967296 = %d, want -4294967296", "*", r) + } + y = 9223372036854775806 + r = x * y + if r != -9223372036854775806 { + t.Errorf("-1 %s 9223372036854775806 = %d, want -9223372036854775806", "*", r) + } + y = 9223372036854775807 + r = x * y + if r != -9223372036854775807 { + t.Errorf("-1 %s 9223372036854775807 = %d, want -9223372036854775807", "*", r) + } + x = 0 + y = -9223372036854775808 + r = x * y + if r != 0 { + t.Errorf("0 %s -9223372036854775808 = %d, want 0", "*", r) + } + y = -9223372036854775807 + r = x * y + if r != 0 { + t.Errorf("0 %s -9223372036854775807 = %d, want 0", "*", r) + } + y = -4294967296 + r = x * y + if r != 0 { + t.Errorf("0 %s -4294967296 = %d, want 0", "*", r) + } + y = -1 + r = x * y + if r != 0 { + t.Errorf("0 %s -1 = %d, want 0", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "*", r) + } + y = 4294967296 + r = x * y + if r != 0 { + t.Errorf("0 %s 4294967296 = %d, want 0", "*", r) + } + y = 9223372036854775806 + r = x * y + if r != 0 { + t.Errorf("0 %s 9223372036854775806 = %d, want 0", "*", r) + } + y = 9223372036854775807 + r = x * y + if r != 0 { + t.Errorf("0 %s 9223372036854775807 = %d, want 0", "*", r) + } + x = 1 + y = -9223372036854775808 + r = x * y + if r != -9223372036854775808 { + t.Errorf("1 %s -9223372036854775808 = %d, want -9223372036854775808", "*", r) + } + y = -9223372036854775807 + r = x * y + if r != -9223372036854775807 { + t.Errorf("1 %s -9223372036854775807 = %d, want -9223372036854775807", "*", r) + } + y = -4294967296 + r = x * y + if r != -4294967296 { + t.Errorf("1 %s -4294967296 = %d, want -4294967296", "*", r) + } + y = -1 + r = x * y + if r != -1 { + t.Errorf("1 %s -1 = %d, want -1", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("1 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 1 { + t.Errorf("1 %s 1 = %d, want 1", "*", r) + } + y = 4294967296 + r = x * y + if r != 4294967296 { + t.Errorf("1 %s 4294967296 = %d, want 4294967296", "*", r) + } + y = 9223372036854775806 + r = x * y + if r != 9223372036854775806 { + t.Errorf("1 %s 9223372036854775806 = %d, want 9223372036854775806", "*", r) + } + y = 9223372036854775807 + r = x * y + if r != 9223372036854775807 { + t.Errorf("1 %s 9223372036854775807 = %d, want 9223372036854775807", "*", r) + } + x = 4294967296 + y = -9223372036854775808 + r = x * y + if r != 0 { + t.Errorf("4294967296 %s -9223372036854775808 = %d, want 0", "*", r) + } + y = -9223372036854775807 + r = x * y + if r != 4294967296 { + t.Errorf("4294967296 %s -9223372036854775807 = %d, want 4294967296", "*", r) + } + y = -4294967296 + r = x * y + if r != 0 { + t.Errorf("4294967296 %s -4294967296 = %d, want 0", "*", r) + } + y = -1 + r = x * y + if r != -4294967296 { + t.Errorf("4294967296 %s -1 = %d, want -4294967296", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("4294967296 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 4294967296 { + t.Errorf("4294967296 %s 1 = %d, want 4294967296", "*", r) + } + y = 4294967296 + r = x * y + if r != 0 { + t.Errorf("4294967296 %s 4294967296 = %d, want 0", "*", r) + } + y = 9223372036854775806 + r = x * y + if r != -8589934592 { + t.Errorf("4294967296 %s 9223372036854775806 = %d, want -8589934592", "*", r) + } + y = 9223372036854775807 + r = x * y + if r != -4294967296 { + t.Errorf("4294967296 %s 9223372036854775807 = %d, want -4294967296", "*", r) + } + x = 9223372036854775806 + y = -9223372036854775808 + r = x * y + if r != 0 { + t.Errorf("9223372036854775806 %s -9223372036854775808 = %d, want 0", "*", r) + } + y = -9223372036854775807 + r = x * y + if r != 9223372036854775806 { + t.Errorf("9223372036854775806 %s -9223372036854775807 = %d, want 9223372036854775806", "*", r) + } + y = -4294967296 + r = x * y + if r != 8589934592 { + t.Errorf("9223372036854775806 %s -4294967296 = %d, want 8589934592", "*", r) + } + y = -1 + r = x * y + if r != -9223372036854775806 { + t.Errorf("9223372036854775806 %s -1 = %d, want -9223372036854775806", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("9223372036854775806 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 9223372036854775806 { + t.Errorf("9223372036854775806 %s 1 = %d, want 9223372036854775806", "*", r) + } + y = 4294967296 + r = x * y + if r != -8589934592 { + t.Errorf("9223372036854775806 %s 4294967296 = %d, want -8589934592", "*", r) + } + y = 9223372036854775806 + r = x * y + if r != 4 { + t.Errorf("9223372036854775806 %s 9223372036854775806 = %d, want 4", "*", r) + } + y = 9223372036854775807 + r = x * y + if r != -9223372036854775806 { + t.Errorf("9223372036854775806 %s 9223372036854775807 = %d, want -9223372036854775806", "*", r) + } + x = 9223372036854775807 + y = -9223372036854775808 + r = x * y + if r != -9223372036854775808 { + t.Errorf("9223372036854775807 %s -9223372036854775808 = %d, want -9223372036854775808", "*", r) + } + y = -9223372036854775807 + r = x * y + if r != -1 { + t.Errorf("9223372036854775807 %s -9223372036854775807 = %d, want -1", "*", r) + } + y = -4294967296 + r = x * y + if r != 4294967296 { + t.Errorf("9223372036854775807 %s -4294967296 = %d, want 4294967296", "*", r) + } + y = -1 + r = x * y + if r != -9223372036854775807 { + t.Errorf("9223372036854775807 %s -1 = %d, want -9223372036854775807", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("9223372036854775807 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 9223372036854775807 { + t.Errorf("9223372036854775807 %s 1 = %d, want 9223372036854775807", "*", r) + } + y = 4294967296 + r = x * y + if r != -4294967296 { + t.Errorf("9223372036854775807 %s 4294967296 = %d, want -4294967296", "*", r) + } + y = 9223372036854775806 + r = x * y + if r != -9223372036854775806 { + t.Errorf("9223372036854775807 %s 9223372036854775806 = %d, want -9223372036854775806", "*", r) + } + y = 9223372036854775807 + r = x * y + if r != 1 { + t.Errorf("9223372036854775807 %s 9223372036854775807 = %d, want 1", "*", r) + } +} +func TestConstFoldint64mod(t *testing.T) { + var x, y, r int64 + x = -9223372036854775808 + y = -9223372036854775808 + r = x % y + if r != 0 { + t.Errorf("-9223372036854775808 %s -9223372036854775808 = %d, want 0", "%", r) + } + y = -9223372036854775807 + r = x % y + if r != -1 { + t.Errorf("-9223372036854775808 %s -9223372036854775807 = %d, want -1", "%", r) + } + y = -4294967296 + r = x % y + if r != 0 { + t.Errorf("-9223372036854775808 %s -4294967296 = %d, want 0", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("-9223372036854775808 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("-9223372036854775808 %s 1 = %d, want 0", "%", r) + } + y = 4294967296 + r = x % y + if r != 0 { + t.Errorf("-9223372036854775808 %s 4294967296 = %d, want 0", "%", r) + } + y = 9223372036854775806 + r = x % y + if r != -2 { + t.Errorf("-9223372036854775808 %s 9223372036854775806 = %d, want -2", "%", r) + } + y = 9223372036854775807 + r = x % y + if r != -1 { + t.Errorf("-9223372036854775808 %s 9223372036854775807 = %d, want -1", "%", r) + } + x = -9223372036854775807 + y = -9223372036854775808 + r = x % y + if r != -9223372036854775807 { + t.Errorf("-9223372036854775807 %s -9223372036854775808 = %d, want -9223372036854775807", "%", r) + } + y = -9223372036854775807 + r = x % y + if r != 0 { + t.Errorf("-9223372036854775807 %s -9223372036854775807 = %d, want 0", "%", r) + } + y = -4294967296 + r = x % y + if r != -4294967295 { + t.Errorf("-9223372036854775807 %s -4294967296 = %d, want -4294967295", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("-9223372036854775807 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("-9223372036854775807 %s 1 = %d, want 0", "%", r) + } + y = 4294967296 + r = x % y + if r != -4294967295 { + t.Errorf("-9223372036854775807 %s 4294967296 = %d, want -4294967295", "%", r) + } + y = 9223372036854775806 + r = x % y + if r != -1 { + t.Errorf("-9223372036854775807 %s 9223372036854775806 = %d, want -1", "%", r) + } + y = 9223372036854775807 + r = x % y + if r != 0 { + t.Errorf("-9223372036854775807 %s 9223372036854775807 = %d, want 0", "%", r) + } + x = -4294967296 + y = -9223372036854775808 + r = x % y + if r != -4294967296 { + t.Errorf("-4294967296 %s -9223372036854775808 = %d, want -4294967296", "%", r) + } + y = -9223372036854775807 + r = x % y + if r != -4294967296 { + t.Errorf("-4294967296 %s -9223372036854775807 = %d, want -4294967296", "%", r) + } + y = -4294967296 + r = x % y + if r != 0 { + t.Errorf("-4294967296 %s -4294967296 = %d, want 0", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("-4294967296 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("-4294967296 %s 1 = %d, want 0", "%", r) + } + y = 4294967296 + r = x % y + if r != 0 { + t.Errorf("-4294967296 %s 4294967296 = %d, want 0", "%", r) + } + y = 9223372036854775806 + r = x % y + if r != -4294967296 { + t.Errorf("-4294967296 %s 9223372036854775806 = %d, want -4294967296", "%", r) + } + y = 9223372036854775807 + r = x % y + if r != -4294967296 { + t.Errorf("-4294967296 %s 9223372036854775807 = %d, want -4294967296", "%", r) + } + x = -1 + y = -9223372036854775808 + r = x % y + if r != -1 { + t.Errorf("-1 %s -9223372036854775808 = %d, want -1", "%", r) + } + y = -9223372036854775807 + r = x % y + if r != -1 { + t.Errorf("-1 %s -9223372036854775807 = %d, want -1", "%", r) + } + y = -4294967296 + r = x % y + if r != -1 { + t.Errorf("-1 %s -4294967296 = %d, want -1", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("-1 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("-1 %s 1 = %d, want 0", "%", r) + } + y = 4294967296 + r = x % y + if r != -1 { + t.Errorf("-1 %s 4294967296 = %d, want -1", "%", r) + } + y = 9223372036854775806 + r = x % y + if r != -1 { + t.Errorf("-1 %s 9223372036854775806 = %d, want -1", "%", r) + } + y = 9223372036854775807 + r = x % y + if r != -1 { + t.Errorf("-1 %s 9223372036854775807 = %d, want -1", "%", r) + } + x = 0 + y = -9223372036854775808 + r = x % y + if r != 0 { + t.Errorf("0 %s -9223372036854775808 = %d, want 0", "%", r) + } + y = -9223372036854775807 + r = x % y + if r != 0 { + t.Errorf("0 %s -9223372036854775807 = %d, want 0", "%", r) + } + y = -4294967296 + r = x % y + if r != 0 { + t.Errorf("0 %s -4294967296 = %d, want 0", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("0 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "%", r) + } + y = 4294967296 + r = x % y + if r != 0 { + t.Errorf("0 %s 4294967296 = %d, want 0", "%", r) + } + y = 9223372036854775806 + r = x % y + if r != 0 { + t.Errorf("0 %s 9223372036854775806 = %d, want 0", "%", r) + } + y = 9223372036854775807 + r = x % y + if r != 0 { + t.Errorf("0 %s 9223372036854775807 = %d, want 0", "%", r) + } + x = 1 + y = -9223372036854775808 + r = x % y + if r != 1 { + t.Errorf("1 %s -9223372036854775808 = %d, want 1", "%", r) + } + y = -9223372036854775807 + r = x % y + if r != 1 { + t.Errorf("1 %s -9223372036854775807 = %d, want 1", "%", r) + } + y = -4294967296 + r = x % y + if r != 1 { + t.Errorf("1 %s -4294967296 = %d, want 1", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("1 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", "%", r) + } + y = 4294967296 + r = x % y + if r != 1 { + t.Errorf("1 %s 4294967296 = %d, want 1", "%", r) + } + y = 9223372036854775806 + r = x % y + if r != 1 { + t.Errorf("1 %s 9223372036854775806 = %d, want 1", "%", r) + } + y = 9223372036854775807 + r = x % y + if r != 1 { + t.Errorf("1 %s 9223372036854775807 = %d, want 1", "%", r) + } + x = 4294967296 + y = -9223372036854775808 + r = x % y + if r != 4294967296 { + t.Errorf("4294967296 %s -9223372036854775808 = %d, want 4294967296", "%", r) + } + y = -9223372036854775807 + r = x % y + if r != 4294967296 { + t.Errorf("4294967296 %s -9223372036854775807 = %d, want 4294967296", "%", r) + } + y = -4294967296 + r = x % y + if r != 0 { + t.Errorf("4294967296 %s -4294967296 = %d, want 0", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("4294967296 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("4294967296 %s 1 = %d, want 0", "%", r) + } + y = 4294967296 + r = x % y + if r != 0 { + t.Errorf("4294967296 %s 4294967296 = %d, want 0", "%", r) + } + y = 9223372036854775806 + r = x % y + if r != 4294967296 { + t.Errorf("4294967296 %s 9223372036854775806 = %d, want 4294967296", "%", r) + } + y = 9223372036854775807 + r = x % y + if r != 4294967296 { + t.Errorf("4294967296 %s 9223372036854775807 = %d, want 4294967296", "%", r) + } + x = 9223372036854775806 + y = -9223372036854775808 + r = x % y + if r != 9223372036854775806 { + t.Errorf("9223372036854775806 %s -9223372036854775808 = %d, want 9223372036854775806", "%", r) + } + y = -9223372036854775807 + r = x % y + if r != 9223372036854775806 { + t.Errorf("9223372036854775806 %s -9223372036854775807 = %d, want 9223372036854775806", "%", r) + } + y = -4294967296 + r = x % y + if r != 4294967294 { + t.Errorf("9223372036854775806 %s -4294967296 = %d, want 4294967294", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("9223372036854775806 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("9223372036854775806 %s 1 = %d, want 0", "%", r) + } + y = 4294967296 + r = x % y + if r != 4294967294 { + t.Errorf("9223372036854775806 %s 4294967296 = %d, want 4294967294", "%", r) + } + y = 9223372036854775806 + r = x % y + if r != 0 { + t.Errorf("9223372036854775806 %s 9223372036854775806 = %d, want 0", "%", r) + } + y = 9223372036854775807 + r = x % y + if r != 9223372036854775806 { + t.Errorf("9223372036854775806 %s 9223372036854775807 = %d, want 9223372036854775806", "%", r) + } + x = 9223372036854775807 + y = -9223372036854775808 + r = x % y + if r != 9223372036854775807 { + t.Errorf("9223372036854775807 %s -9223372036854775808 = %d, want 9223372036854775807", "%", r) + } + y = -9223372036854775807 + r = x % y + if r != 0 { + t.Errorf("9223372036854775807 %s -9223372036854775807 = %d, want 0", "%", r) + } + y = -4294967296 + r = x % y + if r != 4294967295 { + t.Errorf("9223372036854775807 %s -4294967296 = %d, want 4294967295", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("9223372036854775807 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("9223372036854775807 %s 1 = %d, want 0", "%", r) + } + y = 4294967296 + r = x % y + if r != 4294967295 { + t.Errorf("9223372036854775807 %s 4294967296 = %d, want 4294967295", "%", r) + } + y = 9223372036854775806 + r = x % y + if r != 1 { + t.Errorf("9223372036854775807 %s 9223372036854775806 = %d, want 1", "%", r) + } + y = 9223372036854775807 + r = x % y + if r != 0 { + t.Errorf("9223372036854775807 %s 9223372036854775807 = %d, want 0", "%", r) + } +} +func TestConstFolduint32add(t *testing.T) { + var x, y, r uint32 + x = 0 + y = 0 + r = x + y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "+", r) + } + y = 1 + r = x + y + if r != 1 { + t.Errorf("0 %s 1 = %d, want 1", "+", r) + } + y = 4294967295 + r = x + y + if r != 4294967295 { + t.Errorf("0 %s 4294967295 = %d, want 4294967295", "+", r) + } + x = 1 + y = 0 + r = x + y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "+", r) + } + y = 1 + r = x + y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "+", r) + } + y = 4294967295 + r = x + y + if r != 0 { + t.Errorf("1 %s 4294967295 = %d, want 0", "+", r) + } + x = 4294967295 + y = 0 + r = x + y + if r != 4294967295 { + t.Errorf("4294967295 %s 0 = %d, want 4294967295", "+", r) + } + y = 1 + r = x + y + if r != 0 { + t.Errorf("4294967295 %s 1 = %d, want 0", "+", r) + } + y = 4294967295 + r = x + y + if r != 4294967294 { + t.Errorf("4294967295 %s 4294967295 = %d, want 4294967294", "+", r) + } +} +func TestConstFolduint32sub(t *testing.T) { + var x, y, r uint32 + x = 0 + y = 0 + r = x - y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "-", r) + } + y = 1 + r = x - y + if r != 4294967295 { + t.Errorf("0 %s 1 = %d, want 4294967295", "-", r) + } + y = 4294967295 + r = x - y + if r != 1 { + t.Errorf("0 %s 4294967295 = %d, want 1", "-", r) + } + x = 1 + y = 0 + r = x - y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "-", r) + } + y = 1 + r = x - y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", "-", r) + } + y = 4294967295 + r = x - y + if r != 2 { + t.Errorf("1 %s 4294967295 = %d, want 2", "-", r) + } + x = 4294967295 + y = 0 + r = x - y + if r != 4294967295 { + t.Errorf("4294967295 %s 0 = %d, want 4294967295", "-", r) + } + y = 1 + r = x - y + if r != 4294967294 { + t.Errorf("4294967295 %s 1 = %d, want 4294967294", "-", r) + } + y = 4294967295 + r = x - y + if r != 0 { + t.Errorf("4294967295 %s 4294967295 = %d, want 0", "-", r) + } +} +func TestConstFolduint32div(t *testing.T) { + var x, y, r uint32 + x = 0 + y = 1 + r = x / y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "/", r) + } + y = 4294967295 + r = x / y + if r != 0 { + t.Errorf("0 %s 4294967295 = %d, want 0", "/", r) + } + x = 1 + y = 1 + r = x / y + if r != 1 { + t.Errorf("1 %s 1 = %d, want 1", "/", r) + } + y = 4294967295 + r = x / y + if r != 0 { + t.Errorf("1 %s 4294967295 = %d, want 0", "/", r) + } + x = 4294967295 + y = 1 + r = x / y + if r != 4294967295 { + t.Errorf("4294967295 %s 1 = %d, want 4294967295", "/", r) + } + y = 4294967295 + r = x / y + if r != 1 { + t.Errorf("4294967295 %s 4294967295 = %d, want 1", "/", r) + } +} +func TestConstFolduint32mul(t *testing.T) { + var x, y, r uint32 + x = 0 + y = 0 + r = x * y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "*", r) + } + y = 4294967295 + r = x * y + if r != 0 { + t.Errorf("0 %s 4294967295 = %d, want 0", "*", r) + } + x = 1 + y = 0 + r = x * y + if r != 0 { + t.Errorf("1 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 1 { + t.Errorf("1 %s 1 = %d, want 1", "*", r) + } + y = 4294967295 + r = x * y + if r != 4294967295 { + t.Errorf("1 %s 4294967295 = %d, want 4294967295", "*", r) + } + x = 4294967295 + y = 0 + r = x * y + if r != 0 { + t.Errorf("4294967295 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 4294967295 { + t.Errorf("4294967295 %s 1 = %d, want 4294967295", "*", r) + } + y = 4294967295 + r = x * y + if r != 1 { + t.Errorf("4294967295 %s 4294967295 = %d, want 1", "*", r) + } +} +func TestConstFolduint32mod(t *testing.T) { + var x, y, r uint32 + x = 0 + y = 1 + r = x % y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "%", r) + } + y = 4294967295 + r = x % y + if r != 0 { + t.Errorf("0 %s 4294967295 = %d, want 0", "%", r) + } + x = 1 + y = 1 + r = x % y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", "%", r) + } + y = 4294967295 + r = x % y + if r != 1 { + t.Errorf("1 %s 4294967295 = %d, want 1", "%", r) + } + x = 4294967295 + y = 1 + r = x % y + if r != 0 { + t.Errorf("4294967295 %s 1 = %d, want 0", "%", r) + } + y = 4294967295 + r = x % y + if r != 0 { + t.Errorf("4294967295 %s 4294967295 = %d, want 0", "%", r) + } +} +func TestConstFoldint32add(t *testing.T) { + var x, y, r int32 + x = -2147483648 + y = -2147483648 + r = x + y + if r != 0 { + t.Errorf("-2147483648 %s -2147483648 = %d, want 0", "+", r) + } + y = -2147483647 + r = x + y + if r != 1 { + t.Errorf("-2147483648 %s -2147483647 = %d, want 1", "+", r) + } + y = -1 + r = x + y + if r != 2147483647 { + t.Errorf("-2147483648 %s -1 = %d, want 2147483647", "+", r) + } + y = 0 + r = x + y + if r != -2147483648 { + t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "+", r) + } + y = 1 + r = x + y + if r != -2147483647 { + t.Errorf("-2147483648 %s 1 = %d, want -2147483647", "+", r) + } + y = 2147483647 + r = x + y + if r != -1 { + t.Errorf("-2147483648 %s 2147483647 = %d, want -1", "+", r) + } + x = -2147483647 + y = -2147483648 + r = x + y + if r != 1 { + t.Errorf("-2147483647 %s -2147483648 = %d, want 1", "+", r) + } + y = -2147483647 + r = x + y + if r != 2 { + t.Errorf("-2147483647 %s -2147483647 = %d, want 2", "+", r) + } + y = -1 + r = x + y + if r != -2147483648 { + t.Errorf("-2147483647 %s -1 = %d, want -2147483648", "+", r) + } + y = 0 + r = x + y + if r != -2147483647 { + t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "+", r) + } + y = 1 + r = x + y + if r != -2147483646 { + t.Errorf("-2147483647 %s 1 = %d, want -2147483646", "+", r) + } + y = 2147483647 + r = x + y + if r != 0 { + t.Errorf("-2147483647 %s 2147483647 = %d, want 0", "+", r) + } + x = -1 + y = -2147483648 + r = x + y + if r != 2147483647 { + t.Errorf("-1 %s -2147483648 = %d, want 2147483647", "+", r) + } + y = -2147483647 + r = x + y + if r != -2147483648 { + t.Errorf("-1 %s -2147483647 = %d, want -2147483648", "+", r) + } + y = -1 + r = x + y + if r != -2 { + t.Errorf("-1 %s -1 = %d, want -2", "+", r) + } + y = 0 + r = x + y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", "+", r) + } + y = 1 + r = x + y + if r != 0 { + t.Errorf("-1 %s 1 = %d, want 0", "+", r) + } + y = 2147483647 + r = x + y + if r != 2147483646 { + t.Errorf("-1 %s 2147483647 = %d, want 2147483646", "+", r) + } + x = 0 + y = -2147483648 + r = x + y + if r != -2147483648 { + t.Errorf("0 %s -2147483648 = %d, want -2147483648", "+", r) + } + y = -2147483647 + r = x + y + if r != -2147483647 { + t.Errorf("0 %s -2147483647 = %d, want -2147483647", "+", r) + } + y = -1 + r = x + y + if r != -1 { + t.Errorf("0 %s -1 = %d, want -1", "+", r) + } + y = 0 + r = x + y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "+", r) + } + y = 1 + r = x + y + if r != 1 { + t.Errorf("0 %s 1 = %d, want 1", "+", r) + } + y = 2147483647 + r = x + y + if r != 2147483647 { + t.Errorf("0 %s 2147483647 = %d, want 2147483647", "+", r) + } + x = 1 + y = -2147483648 + r = x + y + if r != -2147483647 { + t.Errorf("1 %s -2147483648 = %d, want -2147483647", "+", r) + } + y = -2147483647 + r = x + y + if r != -2147483646 { + t.Errorf("1 %s -2147483647 = %d, want -2147483646", "+", r) + } + y = -1 + r = x + y + if r != 0 { + t.Errorf("1 %s -1 = %d, want 0", "+", r) + } + y = 0 + r = x + y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "+", r) + } + y = 1 + r = x + y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "+", r) + } + y = 2147483647 + r = x + y + if r != -2147483648 { + t.Errorf("1 %s 2147483647 = %d, want -2147483648", "+", r) + } + x = 2147483647 + y = -2147483648 + r = x + y + if r != -1 { + t.Errorf("2147483647 %s -2147483648 = %d, want -1", "+", r) + } + y = -2147483647 + r = x + y + if r != 0 { + t.Errorf("2147483647 %s -2147483647 = %d, want 0", "+", r) + } + y = -1 + r = x + y + if r != 2147483646 { + t.Errorf("2147483647 %s -1 = %d, want 2147483646", "+", r) + } + y = 0 + r = x + y + if r != 2147483647 { + t.Errorf("2147483647 %s 0 = %d, want 2147483647", "+", r) + } + y = 1 + r = x + y + if r != -2147483648 { + t.Errorf("2147483647 %s 1 = %d, want -2147483648", "+", r) + } + y = 2147483647 + r = x + y + if r != -2 { + t.Errorf("2147483647 %s 2147483647 = %d, want -2", "+", r) + } +} +func TestConstFoldint32sub(t *testing.T) { + var x, y, r int32 + x = -2147483648 + y = -2147483648 + r = x - y + if r != 0 { + t.Errorf("-2147483648 %s -2147483648 = %d, want 0", "-", r) + } + y = -2147483647 + r = x - y + if r != -1 { + t.Errorf("-2147483648 %s -2147483647 = %d, want -1", "-", r) + } + y = -1 + r = x - y + if r != -2147483647 { + t.Errorf("-2147483648 %s -1 = %d, want -2147483647", "-", r) + } + y = 0 + r = x - y + if r != -2147483648 { + t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "-", r) + } + y = 1 + r = x - y + if r != 2147483647 { + t.Errorf("-2147483648 %s 1 = %d, want 2147483647", "-", r) + } + y = 2147483647 + r = x - y + if r != 1 { + t.Errorf("-2147483648 %s 2147483647 = %d, want 1", "-", r) + } + x = -2147483647 + y = -2147483648 + r = x - y + if r != 1 { + t.Errorf("-2147483647 %s -2147483648 = %d, want 1", "-", r) + } + y = -2147483647 + r = x - y + if r != 0 { + t.Errorf("-2147483647 %s -2147483647 = %d, want 0", "-", r) + } + y = -1 + r = x - y + if r != -2147483646 { + t.Errorf("-2147483647 %s -1 = %d, want -2147483646", "-", r) + } + y = 0 + r = x - y + if r != -2147483647 { + t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "-", r) + } + y = 1 + r = x - y + if r != -2147483648 { + t.Errorf("-2147483647 %s 1 = %d, want -2147483648", "-", r) + } + y = 2147483647 + r = x - y + if r != 2 { + t.Errorf("-2147483647 %s 2147483647 = %d, want 2", "-", r) + } + x = -1 + y = -2147483648 + r = x - y + if r != 2147483647 { + t.Errorf("-1 %s -2147483648 = %d, want 2147483647", "-", r) + } + y = -2147483647 + r = x - y + if r != 2147483646 { + t.Errorf("-1 %s -2147483647 = %d, want 2147483646", "-", r) + } + y = -1 + r = x - y + if r != 0 { + t.Errorf("-1 %s -1 = %d, want 0", "-", r) + } + y = 0 + r = x - y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", "-", r) + } + y = 1 + r = x - y + if r != -2 { + t.Errorf("-1 %s 1 = %d, want -2", "-", r) + } + y = 2147483647 + r = x - y + if r != -2147483648 { + t.Errorf("-1 %s 2147483647 = %d, want -2147483648", "-", r) + } + x = 0 + y = -2147483648 + r = x - y + if r != -2147483648 { + t.Errorf("0 %s -2147483648 = %d, want -2147483648", "-", r) + } + y = -2147483647 + r = x - y + if r != 2147483647 { + t.Errorf("0 %s -2147483647 = %d, want 2147483647", "-", r) + } + y = -1 + r = x - y + if r != 1 { + t.Errorf("0 %s -1 = %d, want 1", "-", r) + } + y = 0 + r = x - y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "-", r) + } + y = 1 + r = x - y + if r != -1 { + t.Errorf("0 %s 1 = %d, want -1", "-", r) + } + y = 2147483647 + r = x - y + if r != -2147483647 { + t.Errorf("0 %s 2147483647 = %d, want -2147483647", "-", r) + } + x = 1 + y = -2147483648 + r = x - y + if r != -2147483647 { + t.Errorf("1 %s -2147483648 = %d, want -2147483647", "-", r) + } + y = -2147483647 + r = x - y + if r != -2147483648 { + t.Errorf("1 %s -2147483647 = %d, want -2147483648", "-", r) + } + y = -1 + r = x - y + if r != 2 { + t.Errorf("1 %s -1 = %d, want 2", "-", r) + } + y = 0 + r = x - y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "-", r) + } + y = 1 + r = x - y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", "-", r) + } + y = 2147483647 + r = x - y + if r != -2147483646 { + t.Errorf("1 %s 2147483647 = %d, want -2147483646", "-", r) + } + x = 2147483647 + y = -2147483648 + r = x - y + if r != -1 { + t.Errorf("2147483647 %s -2147483648 = %d, want -1", "-", r) + } + y = -2147483647 + r = x - y + if r != -2 { + t.Errorf("2147483647 %s -2147483647 = %d, want -2", "-", r) + } + y = -1 + r = x - y + if r != -2147483648 { + t.Errorf("2147483647 %s -1 = %d, want -2147483648", "-", r) + } + y = 0 + r = x - y + if r != 2147483647 { + t.Errorf("2147483647 %s 0 = %d, want 2147483647", "-", r) + } + y = 1 + r = x - y + if r != 2147483646 { + t.Errorf("2147483647 %s 1 = %d, want 2147483646", "-", r) + } + y = 2147483647 + r = x - y + if r != 0 { + t.Errorf("2147483647 %s 2147483647 = %d, want 0", "-", r) + } +} +func TestConstFoldint32div(t *testing.T) { + var x, y, r int32 + x = -2147483648 + y = -2147483648 + r = x / y + if r != 1 { + t.Errorf("-2147483648 %s -2147483648 = %d, want 1", "/", r) + } + y = -2147483647 + r = x / y + if r != 1 { + t.Errorf("-2147483648 %s -2147483647 = %d, want 1", "/", r) + } + y = -1 + r = x / y + if r != -2147483648 { + t.Errorf("-2147483648 %s -1 = %d, want -2147483648", "/", r) + } + y = 1 + r = x / y + if r != -2147483648 { + t.Errorf("-2147483648 %s 1 = %d, want -2147483648", "/", r) + } + y = 2147483647 + r = x / y + if r != -1 { + t.Errorf("-2147483648 %s 2147483647 = %d, want -1", "/", r) + } + x = -2147483647 + y = -2147483648 + r = x / y + if r != 0 { + t.Errorf("-2147483647 %s -2147483648 = %d, want 0", "/", r) + } + y = -2147483647 + r = x / y + if r != 1 { + t.Errorf("-2147483647 %s -2147483647 = %d, want 1", "/", r) + } + y = -1 + r = x / y + if r != 2147483647 { + t.Errorf("-2147483647 %s -1 = %d, want 2147483647", "/", r) + } + y = 1 + r = x / y + if r != -2147483647 { + t.Errorf("-2147483647 %s 1 = %d, want -2147483647", "/", r) + } + y = 2147483647 + r = x / y + if r != -1 { + t.Errorf("-2147483647 %s 2147483647 = %d, want -1", "/", r) + } + x = -1 + y = -2147483648 + r = x / y + if r != 0 { + t.Errorf("-1 %s -2147483648 = %d, want 0", "/", r) + } + y = -2147483647 + r = x / y + if r != 0 { + t.Errorf("-1 %s -2147483647 = %d, want 0", "/", r) + } + y = -1 + r = x / y + if r != 1 { + t.Errorf("-1 %s -1 = %d, want 1", "/", r) + } + y = 1 + r = x / y + if r != -1 { + t.Errorf("-1 %s 1 = %d, want -1", "/", r) + } + y = 2147483647 + r = x / y + if r != 0 { + t.Errorf("-1 %s 2147483647 = %d, want 0", "/", r) + } + x = 0 + y = -2147483648 + r = x / y + if r != 0 { + t.Errorf("0 %s -2147483648 = %d, want 0", "/", r) + } + y = -2147483647 + r = x / y + if r != 0 { + t.Errorf("0 %s -2147483647 = %d, want 0", "/", r) + } + y = -1 + r = x / y + if r != 0 { + t.Errorf("0 %s -1 = %d, want 0", "/", r) + } + y = 1 + r = x / y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "/", r) + } + y = 2147483647 + r = x / y + if r != 0 { + t.Errorf("0 %s 2147483647 = %d, want 0", "/", r) + } + x = 1 + y = -2147483648 + r = x / y + if r != 0 { + t.Errorf("1 %s -2147483648 = %d, want 0", "/", r) + } + y = -2147483647 + r = x / y + if r != 0 { + t.Errorf("1 %s -2147483647 = %d, want 0", "/", r) + } + y = -1 + r = x / y + if r != -1 { + t.Errorf("1 %s -1 = %d, want -1", "/", r) + } + y = 1 + r = x / y + if r != 1 { + t.Errorf("1 %s 1 = %d, want 1", "/", r) + } + y = 2147483647 + r = x / y + if r != 0 { + t.Errorf("1 %s 2147483647 = %d, want 0", "/", r) + } + x = 2147483647 + y = -2147483648 + r = x / y + if r != 0 { + t.Errorf("2147483647 %s -2147483648 = %d, want 0", "/", r) + } + y = -2147483647 + r = x / y + if r != -1 { + t.Errorf("2147483647 %s -2147483647 = %d, want -1", "/", r) + } + y = -1 + r = x / y + if r != -2147483647 { + t.Errorf("2147483647 %s -1 = %d, want -2147483647", "/", r) + } + y = 1 + r = x / y + if r != 2147483647 { + t.Errorf("2147483647 %s 1 = %d, want 2147483647", "/", r) + } + y = 2147483647 + r = x / y + if r != 1 { + t.Errorf("2147483647 %s 2147483647 = %d, want 1", "/", r) + } +} +func TestConstFoldint32mul(t *testing.T) { + var x, y, r int32 + x = -2147483648 + y = -2147483648 + r = x * y + if r != 0 { + t.Errorf("-2147483648 %s -2147483648 = %d, want 0", "*", r) + } + y = -2147483647 + r = x * y + if r != -2147483648 { + t.Errorf("-2147483648 %s -2147483647 = %d, want -2147483648", "*", r) + } + y = -1 + r = x * y + if r != -2147483648 { + t.Errorf("-2147483648 %s -1 = %d, want -2147483648", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("-2147483648 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != -2147483648 { + t.Errorf("-2147483648 %s 1 = %d, want -2147483648", "*", r) + } + y = 2147483647 + r = x * y + if r != -2147483648 { + t.Errorf("-2147483648 %s 2147483647 = %d, want -2147483648", "*", r) + } + x = -2147483647 + y = -2147483648 + r = x * y + if r != -2147483648 { + t.Errorf("-2147483647 %s -2147483648 = %d, want -2147483648", "*", r) + } + y = -2147483647 + r = x * y + if r != 1 { + t.Errorf("-2147483647 %s -2147483647 = %d, want 1", "*", r) + } + y = -1 + r = x * y + if r != 2147483647 { + t.Errorf("-2147483647 %s -1 = %d, want 2147483647", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("-2147483647 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != -2147483647 { + t.Errorf("-2147483647 %s 1 = %d, want -2147483647", "*", r) + } + y = 2147483647 + r = x * y + if r != -1 { + t.Errorf("-2147483647 %s 2147483647 = %d, want -1", "*", r) + } + x = -1 + y = -2147483648 + r = x * y + if r != -2147483648 { + t.Errorf("-1 %s -2147483648 = %d, want -2147483648", "*", r) + } + y = -2147483647 + r = x * y + if r != 2147483647 { + t.Errorf("-1 %s -2147483647 = %d, want 2147483647", "*", r) + } + y = -1 + r = x * y + if r != 1 { + t.Errorf("-1 %s -1 = %d, want 1", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("-1 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != -1 { + t.Errorf("-1 %s 1 = %d, want -1", "*", r) + } + y = 2147483647 + r = x * y + if r != -2147483647 { + t.Errorf("-1 %s 2147483647 = %d, want -2147483647", "*", r) + } + x = 0 + y = -2147483648 + r = x * y + if r != 0 { + t.Errorf("0 %s -2147483648 = %d, want 0", "*", r) + } + y = -2147483647 + r = x * y + if r != 0 { + t.Errorf("0 %s -2147483647 = %d, want 0", "*", r) + } + y = -1 + r = x * y + if r != 0 { + t.Errorf("0 %s -1 = %d, want 0", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "*", r) + } + y = 2147483647 + r = x * y + if r != 0 { + t.Errorf("0 %s 2147483647 = %d, want 0", "*", r) + } + x = 1 + y = -2147483648 + r = x * y + if r != -2147483648 { + t.Errorf("1 %s -2147483648 = %d, want -2147483648", "*", r) + } + y = -2147483647 + r = x * y + if r != -2147483647 { + t.Errorf("1 %s -2147483647 = %d, want -2147483647", "*", r) + } + y = -1 + r = x * y + if r != -1 { + t.Errorf("1 %s -1 = %d, want -1", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("1 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 1 { + t.Errorf("1 %s 1 = %d, want 1", "*", r) + } + y = 2147483647 + r = x * y + if r != 2147483647 { + t.Errorf("1 %s 2147483647 = %d, want 2147483647", "*", r) + } + x = 2147483647 + y = -2147483648 + r = x * y + if r != -2147483648 { + t.Errorf("2147483647 %s -2147483648 = %d, want -2147483648", "*", r) + } + y = -2147483647 + r = x * y + if r != -1 { + t.Errorf("2147483647 %s -2147483647 = %d, want -1", "*", r) + } + y = -1 + r = x * y + if r != -2147483647 { + t.Errorf("2147483647 %s -1 = %d, want -2147483647", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("2147483647 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 2147483647 { + t.Errorf("2147483647 %s 1 = %d, want 2147483647", "*", r) + } + y = 2147483647 + r = x * y + if r != 1 { + t.Errorf("2147483647 %s 2147483647 = %d, want 1", "*", r) + } +} +func TestConstFoldint32mod(t *testing.T) { + var x, y, r int32 + x = -2147483648 + y = -2147483648 + r = x % y + if r != 0 { + t.Errorf("-2147483648 %s -2147483648 = %d, want 0", "%", r) + } + y = -2147483647 + r = x % y + if r != -1 { + t.Errorf("-2147483648 %s -2147483647 = %d, want -1", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("-2147483648 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("-2147483648 %s 1 = %d, want 0", "%", r) + } + y = 2147483647 + r = x % y + if r != -1 { + t.Errorf("-2147483648 %s 2147483647 = %d, want -1", "%", r) + } + x = -2147483647 + y = -2147483648 + r = x % y + if r != -2147483647 { + t.Errorf("-2147483647 %s -2147483648 = %d, want -2147483647", "%", r) + } + y = -2147483647 + r = x % y + if r != 0 { + t.Errorf("-2147483647 %s -2147483647 = %d, want 0", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("-2147483647 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("-2147483647 %s 1 = %d, want 0", "%", r) + } + y = 2147483647 + r = x % y + if r != 0 { + t.Errorf("-2147483647 %s 2147483647 = %d, want 0", "%", r) + } + x = -1 + y = -2147483648 + r = x % y + if r != -1 { + t.Errorf("-1 %s -2147483648 = %d, want -1", "%", r) + } + y = -2147483647 + r = x % y + if r != -1 { + t.Errorf("-1 %s -2147483647 = %d, want -1", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("-1 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("-1 %s 1 = %d, want 0", "%", r) + } + y = 2147483647 + r = x % y + if r != -1 { + t.Errorf("-1 %s 2147483647 = %d, want -1", "%", r) + } + x = 0 + y = -2147483648 + r = x % y + if r != 0 { + t.Errorf("0 %s -2147483648 = %d, want 0", "%", r) + } + y = -2147483647 + r = x % y + if r != 0 { + t.Errorf("0 %s -2147483647 = %d, want 0", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("0 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "%", r) + } + y = 2147483647 + r = x % y + if r != 0 { + t.Errorf("0 %s 2147483647 = %d, want 0", "%", r) + } + x = 1 + y = -2147483648 + r = x % y + if r != 1 { + t.Errorf("1 %s -2147483648 = %d, want 1", "%", r) + } + y = -2147483647 + r = x % y + if r != 1 { + t.Errorf("1 %s -2147483647 = %d, want 1", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("1 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", "%", r) + } + y = 2147483647 + r = x % y + if r != 1 { + t.Errorf("1 %s 2147483647 = %d, want 1", "%", r) + } + x = 2147483647 + y = -2147483648 + r = x % y + if r != 2147483647 { + t.Errorf("2147483647 %s -2147483648 = %d, want 2147483647", "%", r) + } + y = -2147483647 + r = x % y + if r != 0 { + t.Errorf("2147483647 %s -2147483647 = %d, want 0", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("2147483647 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("2147483647 %s 1 = %d, want 0", "%", r) + } + y = 2147483647 + r = x % y + if r != 0 { + t.Errorf("2147483647 %s 2147483647 = %d, want 0", "%", r) + } +} +func TestConstFolduint16add(t *testing.T) { + var x, y, r uint16 + x = 0 + y = 0 + r = x + y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "+", r) + } + y = 1 + r = x + y + if r != 1 { + t.Errorf("0 %s 1 = %d, want 1", "+", r) + } + y = 65535 + r = x + y + if r != 65535 { + t.Errorf("0 %s 65535 = %d, want 65535", "+", r) + } + x = 1 + y = 0 + r = x + y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "+", r) + } + y = 1 + r = x + y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "+", r) + } + y = 65535 + r = x + y + if r != 0 { + t.Errorf("1 %s 65535 = %d, want 0", "+", r) + } + x = 65535 + y = 0 + r = x + y + if r != 65535 { + t.Errorf("65535 %s 0 = %d, want 65535", "+", r) + } + y = 1 + r = x + y + if r != 0 { + t.Errorf("65535 %s 1 = %d, want 0", "+", r) + } + y = 65535 + r = x + y + if r != 65534 { + t.Errorf("65535 %s 65535 = %d, want 65534", "+", r) + } +} +func TestConstFolduint16sub(t *testing.T) { + var x, y, r uint16 + x = 0 + y = 0 + r = x - y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "-", r) + } + y = 1 + r = x - y + if r != 65535 { + t.Errorf("0 %s 1 = %d, want 65535", "-", r) + } + y = 65535 + r = x - y + if r != 1 { + t.Errorf("0 %s 65535 = %d, want 1", "-", r) + } + x = 1 + y = 0 + r = x - y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "-", r) + } + y = 1 + r = x - y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", "-", r) + } + y = 65535 + r = x - y + if r != 2 { + t.Errorf("1 %s 65535 = %d, want 2", "-", r) + } + x = 65535 + y = 0 + r = x - y + if r != 65535 { + t.Errorf("65535 %s 0 = %d, want 65535", "-", r) + } + y = 1 + r = x - y + if r != 65534 { + t.Errorf("65535 %s 1 = %d, want 65534", "-", r) + } + y = 65535 + r = x - y + if r != 0 { + t.Errorf("65535 %s 65535 = %d, want 0", "-", r) + } +} +func TestConstFolduint16div(t *testing.T) { + var x, y, r uint16 + x = 0 + y = 1 + r = x / y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "/", r) + } + y = 65535 + r = x / y + if r != 0 { + t.Errorf("0 %s 65535 = %d, want 0", "/", r) + } + x = 1 + y = 1 + r = x / y + if r != 1 { + t.Errorf("1 %s 1 = %d, want 1", "/", r) + } + y = 65535 + r = x / y + if r != 0 { + t.Errorf("1 %s 65535 = %d, want 0", "/", r) + } + x = 65535 + y = 1 + r = x / y + if r != 65535 { + t.Errorf("65535 %s 1 = %d, want 65535", "/", r) + } + y = 65535 + r = x / y + if r != 1 { + t.Errorf("65535 %s 65535 = %d, want 1", "/", r) + } +} +func TestConstFolduint16mul(t *testing.T) { + var x, y, r uint16 + x = 0 + y = 0 + r = x * y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "*", r) + } + y = 65535 + r = x * y + if r != 0 { + t.Errorf("0 %s 65535 = %d, want 0", "*", r) + } + x = 1 + y = 0 + r = x * y + if r != 0 { + t.Errorf("1 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 1 { + t.Errorf("1 %s 1 = %d, want 1", "*", r) + } + y = 65535 + r = x * y + if r != 65535 { + t.Errorf("1 %s 65535 = %d, want 65535", "*", r) + } + x = 65535 + y = 0 + r = x * y + if r != 0 { + t.Errorf("65535 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 65535 { + t.Errorf("65535 %s 1 = %d, want 65535", "*", r) + } + y = 65535 + r = x * y + if r != 1 { + t.Errorf("65535 %s 65535 = %d, want 1", "*", r) + } +} +func TestConstFolduint16mod(t *testing.T) { + var x, y, r uint16 + x = 0 + y = 1 + r = x % y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "%", r) + } + y = 65535 + r = x % y + if r != 0 { + t.Errorf("0 %s 65535 = %d, want 0", "%", r) + } + x = 1 + y = 1 + r = x % y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", "%", r) + } + y = 65535 + r = x % y + if r != 1 { + t.Errorf("1 %s 65535 = %d, want 1", "%", r) + } + x = 65535 + y = 1 + r = x % y + if r != 0 { + t.Errorf("65535 %s 1 = %d, want 0", "%", r) + } + y = 65535 + r = x % y + if r != 0 { + t.Errorf("65535 %s 65535 = %d, want 0", "%", r) + } +} +func TestConstFoldint16add(t *testing.T) { + var x, y, r int16 + x = -32768 + y = -32768 + r = x + y + if r != 0 { + t.Errorf("-32768 %s -32768 = %d, want 0", "+", r) + } + y = -32767 + r = x + y + if r != 1 { + t.Errorf("-32768 %s -32767 = %d, want 1", "+", r) + } + y = -1 + r = x + y + if r != 32767 { + t.Errorf("-32768 %s -1 = %d, want 32767", "+", r) + } + y = 0 + r = x + y + if r != -32768 { + t.Errorf("-32768 %s 0 = %d, want -32768", "+", r) + } + y = 1 + r = x + y + if r != -32767 { + t.Errorf("-32768 %s 1 = %d, want -32767", "+", r) + } + y = 32766 + r = x + y + if r != -2 { + t.Errorf("-32768 %s 32766 = %d, want -2", "+", r) + } + y = 32767 + r = x + y + if r != -1 { + t.Errorf("-32768 %s 32767 = %d, want -1", "+", r) + } + x = -32767 + y = -32768 + r = x + y + if r != 1 { + t.Errorf("-32767 %s -32768 = %d, want 1", "+", r) + } + y = -32767 + r = x + y + if r != 2 { + t.Errorf("-32767 %s -32767 = %d, want 2", "+", r) + } + y = -1 + r = x + y + if r != -32768 { + t.Errorf("-32767 %s -1 = %d, want -32768", "+", r) + } + y = 0 + r = x + y + if r != -32767 { + t.Errorf("-32767 %s 0 = %d, want -32767", "+", r) + } + y = 1 + r = x + y + if r != -32766 { + t.Errorf("-32767 %s 1 = %d, want -32766", "+", r) + } + y = 32766 + r = x + y + if r != -1 { + t.Errorf("-32767 %s 32766 = %d, want -1", "+", r) + } + y = 32767 + r = x + y + if r != 0 { + t.Errorf("-32767 %s 32767 = %d, want 0", "+", r) + } + x = -1 + y = -32768 + r = x + y + if r != 32767 { + t.Errorf("-1 %s -32768 = %d, want 32767", "+", r) + } + y = -32767 + r = x + y + if r != -32768 { + t.Errorf("-1 %s -32767 = %d, want -32768", "+", r) + } + y = -1 + r = x + y + if r != -2 { + t.Errorf("-1 %s -1 = %d, want -2", "+", r) + } + y = 0 + r = x + y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", "+", r) + } + y = 1 + r = x + y + if r != 0 { + t.Errorf("-1 %s 1 = %d, want 0", "+", r) + } + y = 32766 + r = x + y + if r != 32765 { + t.Errorf("-1 %s 32766 = %d, want 32765", "+", r) + } + y = 32767 + r = x + y + if r != 32766 { + t.Errorf("-1 %s 32767 = %d, want 32766", "+", r) + } + x = 0 + y = -32768 + r = x + y + if r != -32768 { + t.Errorf("0 %s -32768 = %d, want -32768", "+", r) + } + y = -32767 + r = x + y + if r != -32767 { + t.Errorf("0 %s -32767 = %d, want -32767", "+", r) + } + y = -1 + r = x + y + if r != -1 { + t.Errorf("0 %s -1 = %d, want -1", "+", r) + } + y = 0 + r = x + y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "+", r) + } + y = 1 + r = x + y + if r != 1 { + t.Errorf("0 %s 1 = %d, want 1", "+", r) + } + y = 32766 + r = x + y + if r != 32766 { + t.Errorf("0 %s 32766 = %d, want 32766", "+", r) + } + y = 32767 + r = x + y + if r != 32767 { + t.Errorf("0 %s 32767 = %d, want 32767", "+", r) + } + x = 1 + y = -32768 + r = x + y + if r != -32767 { + t.Errorf("1 %s -32768 = %d, want -32767", "+", r) + } + y = -32767 + r = x + y + if r != -32766 { + t.Errorf("1 %s -32767 = %d, want -32766", "+", r) + } + y = -1 + r = x + y + if r != 0 { + t.Errorf("1 %s -1 = %d, want 0", "+", r) + } + y = 0 + r = x + y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "+", r) + } + y = 1 + r = x + y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "+", r) + } + y = 32766 + r = x + y + if r != 32767 { + t.Errorf("1 %s 32766 = %d, want 32767", "+", r) + } + y = 32767 + r = x + y + if r != -32768 { + t.Errorf("1 %s 32767 = %d, want -32768", "+", r) + } + x = 32766 + y = -32768 + r = x + y + if r != -2 { + t.Errorf("32766 %s -32768 = %d, want -2", "+", r) + } + y = -32767 + r = x + y + if r != -1 { + t.Errorf("32766 %s -32767 = %d, want -1", "+", r) + } + y = -1 + r = x + y + if r != 32765 { + t.Errorf("32766 %s -1 = %d, want 32765", "+", r) + } + y = 0 + r = x + y + if r != 32766 { + t.Errorf("32766 %s 0 = %d, want 32766", "+", r) + } + y = 1 + r = x + y + if r != 32767 { + t.Errorf("32766 %s 1 = %d, want 32767", "+", r) + } + y = 32766 + r = x + y + if r != -4 { + t.Errorf("32766 %s 32766 = %d, want -4", "+", r) + } + y = 32767 + r = x + y + if r != -3 { + t.Errorf("32766 %s 32767 = %d, want -3", "+", r) + } + x = 32767 + y = -32768 + r = x + y + if r != -1 { + t.Errorf("32767 %s -32768 = %d, want -1", "+", r) + } + y = -32767 + r = x + y + if r != 0 { + t.Errorf("32767 %s -32767 = %d, want 0", "+", r) + } + y = -1 + r = x + y + if r != 32766 { + t.Errorf("32767 %s -1 = %d, want 32766", "+", r) + } + y = 0 + r = x + y + if r != 32767 { + t.Errorf("32767 %s 0 = %d, want 32767", "+", r) + } + y = 1 + r = x + y + if r != -32768 { + t.Errorf("32767 %s 1 = %d, want -32768", "+", r) + } + y = 32766 + r = x + y + if r != -3 { + t.Errorf("32767 %s 32766 = %d, want -3", "+", r) + } + y = 32767 + r = x + y + if r != -2 { + t.Errorf("32767 %s 32767 = %d, want -2", "+", r) + } +} +func TestConstFoldint16sub(t *testing.T) { + var x, y, r int16 + x = -32768 + y = -32768 + r = x - y + if r != 0 { + t.Errorf("-32768 %s -32768 = %d, want 0", "-", r) + } + y = -32767 + r = x - y + if r != -1 { + t.Errorf("-32768 %s -32767 = %d, want -1", "-", r) + } + y = -1 + r = x - y + if r != -32767 { + t.Errorf("-32768 %s -1 = %d, want -32767", "-", r) + } + y = 0 + r = x - y + if r != -32768 { + t.Errorf("-32768 %s 0 = %d, want -32768", "-", r) + } + y = 1 + r = x - y + if r != 32767 { + t.Errorf("-32768 %s 1 = %d, want 32767", "-", r) + } + y = 32766 + r = x - y + if r != 2 { + t.Errorf("-32768 %s 32766 = %d, want 2", "-", r) + } + y = 32767 + r = x - y + if r != 1 { + t.Errorf("-32768 %s 32767 = %d, want 1", "-", r) + } + x = -32767 + y = -32768 + r = x - y + if r != 1 { + t.Errorf("-32767 %s -32768 = %d, want 1", "-", r) + } + y = -32767 + r = x - y + if r != 0 { + t.Errorf("-32767 %s -32767 = %d, want 0", "-", r) + } + y = -1 + r = x - y + if r != -32766 { + t.Errorf("-32767 %s -1 = %d, want -32766", "-", r) + } + y = 0 + r = x - y + if r != -32767 { + t.Errorf("-32767 %s 0 = %d, want -32767", "-", r) + } + y = 1 + r = x - y + if r != -32768 { + t.Errorf("-32767 %s 1 = %d, want -32768", "-", r) + } + y = 32766 + r = x - y + if r != 3 { + t.Errorf("-32767 %s 32766 = %d, want 3", "-", r) + } + y = 32767 + r = x - y + if r != 2 { + t.Errorf("-32767 %s 32767 = %d, want 2", "-", r) + } + x = -1 + y = -32768 + r = x - y + if r != 32767 { + t.Errorf("-1 %s -32768 = %d, want 32767", "-", r) + } + y = -32767 + r = x - y + if r != 32766 { + t.Errorf("-1 %s -32767 = %d, want 32766", "-", r) + } + y = -1 + r = x - y + if r != 0 { + t.Errorf("-1 %s -1 = %d, want 0", "-", r) + } + y = 0 + r = x - y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", "-", r) + } + y = 1 + r = x - y + if r != -2 { + t.Errorf("-1 %s 1 = %d, want -2", "-", r) + } + y = 32766 + r = x - y + if r != -32767 { + t.Errorf("-1 %s 32766 = %d, want -32767", "-", r) + } + y = 32767 + r = x - y + if r != -32768 { + t.Errorf("-1 %s 32767 = %d, want -32768", "-", r) + } + x = 0 + y = -32768 + r = x - y + if r != -32768 { + t.Errorf("0 %s -32768 = %d, want -32768", "-", r) + } + y = -32767 + r = x - y + if r != 32767 { + t.Errorf("0 %s -32767 = %d, want 32767", "-", r) + } + y = -1 + r = x - y + if r != 1 { + t.Errorf("0 %s -1 = %d, want 1", "-", r) + } + y = 0 + r = x - y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "-", r) + } + y = 1 + r = x - y + if r != -1 { + t.Errorf("0 %s 1 = %d, want -1", "-", r) + } + y = 32766 + r = x - y + if r != -32766 { + t.Errorf("0 %s 32766 = %d, want -32766", "-", r) + } + y = 32767 + r = x - y + if r != -32767 { + t.Errorf("0 %s 32767 = %d, want -32767", "-", r) + } + x = 1 + y = -32768 + r = x - y + if r != -32767 { + t.Errorf("1 %s -32768 = %d, want -32767", "-", r) + } + y = -32767 + r = x - y + if r != -32768 { + t.Errorf("1 %s -32767 = %d, want -32768", "-", r) + } + y = -1 + r = x - y + if r != 2 { + t.Errorf("1 %s -1 = %d, want 2", "-", r) + } + y = 0 + r = x - y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "-", r) + } + y = 1 + r = x - y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", "-", r) + } + y = 32766 + r = x - y + if r != -32765 { + t.Errorf("1 %s 32766 = %d, want -32765", "-", r) + } + y = 32767 + r = x - y + if r != -32766 { + t.Errorf("1 %s 32767 = %d, want -32766", "-", r) + } + x = 32766 + y = -32768 + r = x - y + if r != -2 { + t.Errorf("32766 %s -32768 = %d, want -2", "-", r) + } + y = -32767 + r = x - y + if r != -3 { + t.Errorf("32766 %s -32767 = %d, want -3", "-", r) + } + y = -1 + r = x - y + if r != 32767 { + t.Errorf("32766 %s -1 = %d, want 32767", "-", r) + } + y = 0 + r = x - y + if r != 32766 { + t.Errorf("32766 %s 0 = %d, want 32766", "-", r) + } + y = 1 + r = x - y + if r != 32765 { + t.Errorf("32766 %s 1 = %d, want 32765", "-", r) + } + y = 32766 + r = x - y + if r != 0 { + t.Errorf("32766 %s 32766 = %d, want 0", "-", r) + } + y = 32767 + r = x - y + if r != -1 { + t.Errorf("32766 %s 32767 = %d, want -1", "-", r) + } + x = 32767 + y = -32768 + r = x - y + if r != -1 { + t.Errorf("32767 %s -32768 = %d, want -1", "-", r) + } + y = -32767 + r = x - y + if r != -2 { + t.Errorf("32767 %s -32767 = %d, want -2", "-", r) + } + y = -1 + r = x - y + if r != -32768 { + t.Errorf("32767 %s -1 = %d, want -32768", "-", r) + } + y = 0 + r = x - y + if r != 32767 { + t.Errorf("32767 %s 0 = %d, want 32767", "-", r) + } + y = 1 + r = x - y + if r != 32766 { + t.Errorf("32767 %s 1 = %d, want 32766", "-", r) + } + y = 32766 + r = x - y + if r != 1 { + t.Errorf("32767 %s 32766 = %d, want 1", "-", r) + } + y = 32767 + r = x - y + if r != 0 { + t.Errorf("32767 %s 32767 = %d, want 0", "-", r) + } +} +func TestConstFoldint16div(t *testing.T) { + var x, y, r int16 + x = -32768 + y = -32768 + r = x / y + if r != 1 { + t.Errorf("-32768 %s -32768 = %d, want 1", "/", r) + } + y = -32767 + r = x / y + if r != 1 { + t.Errorf("-32768 %s -32767 = %d, want 1", "/", r) + } + y = -1 + r = x / y + if r != -32768 { + t.Errorf("-32768 %s -1 = %d, want -32768", "/", r) + } + y = 1 + r = x / y + if r != -32768 { + t.Errorf("-32768 %s 1 = %d, want -32768", "/", r) + } + y = 32766 + r = x / y + if r != -1 { + t.Errorf("-32768 %s 32766 = %d, want -1", "/", r) + } + y = 32767 + r = x / y + if r != -1 { + t.Errorf("-32768 %s 32767 = %d, want -1", "/", r) + } + x = -32767 + y = -32768 + r = x / y + if r != 0 { + t.Errorf("-32767 %s -32768 = %d, want 0", "/", r) + } + y = -32767 + r = x / y + if r != 1 { + t.Errorf("-32767 %s -32767 = %d, want 1", "/", r) + } + y = -1 + r = x / y + if r != 32767 { + t.Errorf("-32767 %s -1 = %d, want 32767", "/", r) + } + y = 1 + r = x / y + if r != -32767 { + t.Errorf("-32767 %s 1 = %d, want -32767", "/", r) + } + y = 32766 + r = x / y + if r != -1 { + t.Errorf("-32767 %s 32766 = %d, want -1", "/", r) + } + y = 32767 + r = x / y + if r != -1 { + t.Errorf("-32767 %s 32767 = %d, want -1", "/", r) + } + x = -1 + y = -32768 + r = x / y + if r != 0 { + t.Errorf("-1 %s -32768 = %d, want 0", "/", r) + } + y = -32767 + r = x / y + if r != 0 { + t.Errorf("-1 %s -32767 = %d, want 0", "/", r) + } + y = -1 + r = x / y + if r != 1 { + t.Errorf("-1 %s -1 = %d, want 1", "/", r) + } + y = 1 + r = x / y + if r != -1 { + t.Errorf("-1 %s 1 = %d, want -1", "/", r) + } + y = 32766 + r = x / y + if r != 0 { + t.Errorf("-1 %s 32766 = %d, want 0", "/", r) + } + y = 32767 + r = x / y + if r != 0 { + t.Errorf("-1 %s 32767 = %d, want 0", "/", r) + } + x = 0 + y = -32768 + r = x / y + if r != 0 { + t.Errorf("0 %s -32768 = %d, want 0", "/", r) + } + y = -32767 + r = x / y + if r != 0 { + t.Errorf("0 %s -32767 = %d, want 0", "/", r) + } + y = -1 + r = x / y + if r != 0 { + t.Errorf("0 %s -1 = %d, want 0", "/", r) + } + y = 1 + r = x / y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "/", r) + } + y = 32766 + r = x / y + if r != 0 { + t.Errorf("0 %s 32766 = %d, want 0", "/", r) + } + y = 32767 + r = x / y + if r != 0 { + t.Errorf("0 %s 32767 = %d, want 0", "/", r) + } + x = 1 + y = -32768 + r = x / y + if r != 0 { + t.Errorf("1 %s -32768 = %d, want 0", "/", r) + } + y = -32767 + r = x / y + if r != 0 { + t.Errorf("1 %s -32767 = %d, want 0", "/", r) + } + y = -1 + r = x / y + if r != -1 { + t.Errorf("1 %s -1 = %d, want -1", "/", r) + } + y = 1 + r = x / y + if r != 1 { + t.Errorf("1 %s 1 = %d, want 1", "/", r) + } + y = 32766 + r = x / y + if r != 0 { + t.Errorf("1 %s 32766 = %d, want 0", "/", r) + } + y = 32767 + r = x / y + if r != 0 { + t.Errorf("1 %s 32767 = %d, want 0", "/", r) + } + x = 32766 + y = -32768 + r = x / y + if r != 0 { + t.Errorf("32766 %s -32768 = %d, want 0", "/", r) + } + y = -32767 + r = x / y + if r != 0 { + t.Errorf("32766 %s -32767 = %d, want 0", "/", r) + } + y = -1 + r = x / y + if r != -32766 { + t.Errorf("32766 %s -1 = %d, want -32766", "/", r) + } + y = 1 + r = x / y + if r != 32766 { + t.Errorf("32766 %s 1 = %d, want 32766", "/", r) + } + y = 32766 + r = x / y + if r != 1 { + t.Errorf("32766 %s 32766 = %d, want 1", "/", r) + } + y = 32767 + r = x / y + if r != 0 { + t.Errorf("32766 %s 32767 = %d, want 0", "/", r) + } + x = 32767 + y = -32768 + r = x / y + if r != 0 { + t.Errorf("32767 %s -32768 = %d, want 0", "/", r) + } + y = -32767 + r = x / y + if r != -1 { + t.Errorf("32767 %s -32767 = %d, want -1", "/", r) + } + y = -1 + r = x / y + if r != -32767 { + t.Errorf("32767 %s -1 = %d, want -32767", "/", r) + } + y = 1 + r = x / y + if r != 32767 { + t.Errorf("32767 %s 1 = %d, want 32767", "/", r) + } + y = 32766 + r = x / y + if r != 1 { + t.Errorf("32767 %s 32766 = %d, want 1", "/", r) + } + y = 32767 + r = x / y + if r != 1 { + t.Errorf("32767 %s 32767 = %d, want 1", "/", r) + } +} +func TestConstFoldint16mul(t *testing.T) { + var x, y, r int16 + x = -32768 + y = -32768 + r = x * y + if r != 0 { + t.Errorf("-32768 %s -32768 = %d, want 0", "*", r) + } + y = -32767 + r = x * y + if r != -32768 { + t.Errorf("-32768 %s -32767 = %d, want -32768", "*", r) + } + y = -1 + r = x * y + if r != -32768 { + t.Errorf("-32768 %s -1 = %d, want -32768", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("-32768 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != -32768 { + t.Errorf("-32768 %s 1 = %d, want -32768", "*", r) + } + y = 32766 + r = x * y + if r != 0 { + t.Errorf("-32768 %s 32766 = %d, want 0", "*", r) + } + y = 32767 + r = x * y + if r != -32768 { + t.Errorf("-32768 %s 32767 = %d, want -32768", "*", r) + } + x = -32767 + y = -32768 + r = x * y + if r != -32768 { + t.Errorf("-32767 %s -32768 = %d, want -32768", "*", r) + } + y = -32767 + r = x * y + if r != 1 { + t.Errorf("-32767 %s -32767 = %d, want 1", "*", r) + } + y = -1 + r = x * y + if r != 32767 { + t.Errorf("-32767 %s -1 = %d, want 32767", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("-32767 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != -32767 { + t.Errorf("-32767 %s 1 = %d, want -32767", "*", r) + } + y = 32766 + r = x * y + if r != 32766 { + t.Errorf("-32767 %s 32766 = %d, want 32766", "*", r) + } + y = 32767 + r = x * y + if r != -1 { + t.Errorf("-32767 %s 32767 = %d, want -1", "*", r) + } + x = -1 + y = -32768 + r = x * y + if r != -32768 { + t.Errorf("-1 %s -32768 = %d, want -32768", "*", r) + } + y = -32767 + r = x * y + if r != 32767 { + t.Errorf("-1 %s -32767 = %d, want 32767", "*", r) + } + y = -1 + r = x * y + if r != 1 { + t.Errorf("-1 %s -1 = %d, want 1", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("-1 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != -1 { + t.Errorf("-1 %s 1 = %d, want -1", "*", r) + } + y = 32766 + r = x * y + if r != -32766 { + t.Errorf("-1 %s 32766 = %d, want -32766", "*", r) + } + y = 32767 + r = x * y + if r != -32767 { + t.Errorf("-1 %s 32767 = %d, want -32767", "*", r) + } + x = 0 + y = -32768 + r = x * y + if r != 0 { + t.Errorf("0 %s -32768 = %d, want 0", "*", r) + } + y = -32767 + r = x * y + if r != 0 { + t.Errorf("0 %s -32767 = %d, want 0", "*", r) + } + y = -1 + r = x * y + if r != 0 { + t.Errorf("0 %s -1 = %d, want 0", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "*", r) + } + y = 32766 + r = x * y + if r != 0 { + t.Errorf("0 %s 32766 = %d, want 0", "*", r) + } + y = 32767 + r = x * y + if r != 0 { + t.Errorf("0 %s 32767 = %d, want 0", "*", r) + } + x = 1 + y = -32768 + r = x * y + if r != -32768 { + t.Errorf("1 %s -32768 = %d, want -32768", "*", r) + } + y = -32767 + r = x * y + if r != -32767 { + t.Errorf("1 %s -32767 = %d, want -32767", "*", r) + } + y = -1 + r = x * y + if r != -1 { + t.Errorf("1 %s -1 = %d, want -1", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("1 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 1 { + t.Errorf("1 %s 1 = %d, want 1", "*", r) + } + y = 32766 + r = x * y + if r != 32766 { + t.Errorf("1 %s 32766 = %d, want 32766", "*", r) + } + y = 32767 + r = x * y + if r != 32767 { + t.Errorf("1 %s 32767 = %d, want 32767", "*", r) + } + x = 32766 + y = -32768 + r = x * y + if r != 0 { + t.Errorf("32766 %s -32768 = %d, want 0", "*", r) + } + y = -32767 + r = x * y + if r != 32766 { + t.Errorf("32766 %s -32767 = %d, want 32766", "*", r) + } + y = -1 + r = x * y + if r != -32766 { + t.Errorf("32766 %s -1 = %d, want -32766", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("32766 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 32766 { + t.Errorf("32766 %s 1 = %d, want 32766", "*", r) + } + y = 32766 + r = x * y + if r != 4 { + t.Errorf("32766 %s 32766 = %d, want 4", "*", r) + } + y = 32767 + r = x * y + if r != -32766 { + t.Errorf("32766 %s 32767 = %d, want -32766", "*", r) + } + x = 32767 + y = -32768 + r = x * y + if r != -32768 { + t.Errorf("32767 %s -32768 = %d, want -32768", "*", r) + } + y = -32767 + r = x * y + if r != -1 { + t.Errorf("32767 %s -32767 = %d, want -1", "*", r) + } + y = -1 + r = x * y + if r != -32767 { + t.Errorf("32767 %s -1 = %d, want -32767", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("32767 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 32767 { + t.Errorf("32767 %s 1 = %d, want 32767", "*", r) + } + y = 32766 + r = x * y + if r != -32766 { + t.Errorf("32767 %s 32766 = %d, want -32766", "*", r) + } + y = 32767 + r = x * y + if r != 1 { + t.Errorf("32767 %s 32767 = %d, want 1", "*", r) + } +} +func TestConstFoldint16mod(t *testing.T) { + var x, y, r int16 + x = -32768 + y = -32768 + r = x % y + if r != 0 { + t.Errorf("-32768 %s -32768 = %d, want 0", "%", r) + } + y = -32767 + r = x % y + if r != -1 { + t.Errorf("-32768 %s -32767 = %d, want -1", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("-32768 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("-32768 %s 1 = %d, want 0", "%", r) + } + y = 32766 + r = x % y + if r != -2 { + t.Errorf("-32768 %s 32766 = %d, want -2", "%", r) + } + y = 32767 + r = x % y + if r != -1 { + t.Errorf("-32768 %s 32767 = %d, want -1", "%", r) + } + x = -32767 + y = -32768 + r = x % y + if r != -32767 { + t.Errorf("-32767 %s -32768 = %d, want -32767", "%", r) + } + y = -32767 + r = x % y + if r != 0 { + t.Errorf("-32767 %s -32767 = %d, want 0", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("-32767 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("-32767 %s 1 = %d, want 0", "%", r) + } + y = 32766 + r = x % y + if r != -1 { + t.Errorf("-32767 %s 32766 = %d, want -1", "%", r) + } + y = 32767 + r = x % y + if r != 0 { + t.Errorf("-32767 %s 32767 = %d, want 0", "%", r) + } + x = -1 + y = -32768 + r = x % y + if r != -1 { + t.Errorf("-1 %s -32768 = %d, want -1", "%", r) + } + y = -32767 + r = x % y + if r != -1 { + t.Errorf("-1 %s -32767 = %d, want -1", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("-1 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("-1 %s 1 = %d, want 0", "%", r) + } + y = 32766 + r = x % y + if r != -1 { + t.Errorf("-1 %s 32766 = %d, want -1", "%", r) + } + y = 32767 + r = x % y + if r != -1 { + t.Errorf("-1 %s 32767 = %d, want -1", "%", r) + } + x = 0 + y = -32768 + r = x % y + if r != 0 { + t.Errorf("0 %s -32768 = %d, want 0", "%", r) + } + y = -32767 + r = x % y + if r != 0 { + t.Errorf("0 %s -32767 = %d, want 0", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("0 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "%", r) + } + y = 32766 + r = x % y + if r != 0 { + t.Errorf("0 %s 32766 = %d, want 0", "%", r) + } + y = 32767 + r = x % y + if r != 0 { + t.Errorf("0 %s 32767 = %d, want 0", "%", r) + } + x = 1 + y = -32768 + r = x % y + if r != 1 { + t.Errorf("1 %s -32768 = %d, want 1", "%", r) + } + y = -32767 + r = x % y + if r != 1 { + t.Errorf("1 %s -32767 = %d, want 1", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("1 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", "%", r) + } + y = 32766 + r = x % y + if r != 1 { + t.Errorf("1 %s 32766 = %d, want 1", "%", r) + } + y = 32767 + r = x % y + if r != 1 { + t.Errorf("1 %s 32767 = %d, want 1", "%", r) + } + x = 32766 + y = -32768 + r = x % y + if r != 32766 { + t.Errorf("32766 %s -32768 = %d, want 32766", "%", r) + } + y = -32767 + r = x % y + if r != 32766 { + t.Errorf("32766 %s -32767 = %d, want 32766", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("32766 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("32766 %s 1 = %d, want 0", "%", r) + } + y = 32766 + r = x % y + if r != 0 { + t.Errorf("32766 %s 32766 = %d, want 0", "%", r) + } + y = 32767 + r = x % y + if r != 32766 { + t.Errorf("32766 %s 32767 = %d, want 32766", "%", r) + } + x = 32767 + y = -32768 + r = x % y + if r != 32767 { + t.Errorf("32767 %s -32768 = %d, want 32767", "%", r) + } + y = -32767 + r = x % y + if r != 0 { + t.Errorf("32767 %s -32767 = %d, want 0", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("32767 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("32767 %s 1 = %d, want 0", "%", r) + } + y = 32766 + r = x % y + if r != 1 { + t.Errorf("32767 %s 32766 = %d, want 1", "%", r) + } + y = 32767 + r = x % y + if r != 0 { + t.Errorf("32767 %s 32767 = %d, want 0", "%", r) + } +} +func TestConstFolduint8add(t *testing.T) { + var x, y, r uint8 + x = 0 + y = 0 + r = x + y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "+", r) + } + y = 1 + r = x + y + if r != 1 { + t.Errorf("0 %s 1 = %d, want 1", "+", r) + } + y = 255 + r = x + y + if r != 255 { + t.Errorf("0 %s 255 = %d, want 255", "+", r) + } + x = 1 + y = 0 + r = x + y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "+", r) + } + y = 1 + r = x + y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "+", r) + } + y = 255 + r = x + y + if r != 0 { + t.Errorf("1 %s 255 = %d, want 0", "+", r) + } + x = 255 + y = 0 + r = x + y + if r != 255 { + t.Errorf("255 %s 0 = %d, want 255", "+", r) + } + y = 1 + r = x + y + if r != 0 { + t.Errorf("255 %s 1 = %d, want 0", "+", r) + } + y = 255 + r = x + y + if r != 254 { + t.Errorf("255 %s 255 = %d, want 254", "+", r) + } +} +func TestConstFolduint8sub(t *testing.T) { + var x, y, r uint8 + x = 0 + y = 0 + r = x - y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "-", r) + } + y = 1 + r = x - y + if r != 255 { + t.Errorf("0 %s 1 = %d, want 255", "-", r) + } + y = 255 + r = x - y + if r != 1 { + t.Errorf("0 %s 255 = %d, want 1", "-", r) + } + x = 1 + y = 0 + r = x - y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "-", r) + } + y = 1 + r = x - y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", "-", r) + } + y = 255 + r = x - y + if r != 2 { + t.Errorf("1 %s 255 = %d, want 2", "-", r) + } + x = 255 + y = 0 + r = x - y + if r != 255 { + t.Errorf("255 %s 0 = %d, want 255", "-", r) + } + y = 1 + r = x - y + if r != 254 { + t.Errorf("255 %s 1 = %d, want 254", "-", r) + } + y = 255 + r = x - y + if r != 0 { + t.Errorf("255 %s 255 = %d, want 0", "-", r) + } +} +func TestConstFolduint8div(t *testing.T) { + var x, y, r uint8 + x = 0 + y = 1 + r = x / y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "/", r) + } + y = 255 + r = x / y + if r != 0 { + t.Errorf("0 %s 255 = %d, want 0", "/", r) + } + x = 1 + y = 1 + r = x / y + if r != 1 { + t.Errorf("1 %s 1 = %d, want 1", "/", r) + } + y = 255 + r = x / y + if r != 0 { + t.Errorf("1 %s 255 = %d, want 0", "/", r) + } + x = 255 + y = 1 + r = x / y + if r != 255 { + t.Errorf("255 %s 1 = %d, want 255", "/", r) + } + y = 255 + r = x / y + if r != 1 { + t.Errorf("255 %s 255 = %d, want 1", "/", r) + } +} +func TestConstFolduint8mul(t *testing.T) { + var x, y, r uint8 + x = 0 + y = 0 + r = x * y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "*", r) + } + y = 255 + r = x * y + if r != 0 { + t.Errorf("0 %s 255 = %d, want 0", "*", r) + } + x = 1 + y = 0 + r = x * y + if r != 0 { + t.Errorf("1 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 1 { + t.Errorf("1 %s 1 = %d, want 1", "*", r) + } + y = 255 + r = x * y + if r != 255 { + t.Errorf("1 %s 255 = %d, want 255", "*", r) + } + x = 255 + y = 0 + r = x * y + if r != 0 { + t.Errorf("255 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 255 { + t.Errorf("255 %s 1 = %d, want 255", "*", r) + } + y = 255 + r = x * y + if r != 1 { + t.Errorf("255 %s 255 = %d, want 1", "*", r) + } +} +func TestConstFolduint8mod(t *testing.T) { + var x, y, r uint8 + x = 0 + y = 1 + r = x % y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "%", r) + } + y = 255 + r = x % y + if r != 0 { + t.Errorf("0 %s 255 = %d, want 0", "%", r) + } + x = 1 + y = 1 + r = x % y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", "%", r) + } + y = 255 + r = x % y + if r != 1 { + t.Errorf("1 %s 255 = %d, want 1", "%", r) + } + x = 255 + y = 1 + r = x % y + if r != 0 { + t.Errorf("255 %s 1 = %d, want 0", "%", r) + } + y = 255 + r = x % y + if r != 0 { + t.Errorf("255 %s 255 = %d, want 0", "%", r) + } +} +func TestConstFoldint8add(t *testing.T) { + var x, y, r int8 + x = -128 + y = -128 + r = x + y + if r != 0 { + t.Errorf("-128 %s -128 = %d, want 0", "+", r) + } + y = -127 + r = x + y + if r != 1 { + t.Errorf("-128 %s -127 = %d, want 1", "+", r) + } + y = -1 + r = x + y + if r != 127 { + t.Errorf("-128 %s -1 = %d, want 127", "+", r) + } + y = 0 + r = x + y + if r != -128 { + t.Errorf("-128 %s 0 = %d, want -128", "+", r) + } + y = 1 + r = x + y + if r != -127 { + t.Errorf("-128 %s 1 = %d, want -127", "+", r) + } + y = 126 + r = x + y + if r != -2 { + t.Errorf("-128 %s 126 = %d, want -2", "+", r) + } + y = 127 + r = x + y + if r != -1 { + t.Errorf("-128 %s 127 = %d, want -1", "+", r) + } + x = -127 + y = -128 + r = x + y + if r != 1 { + t.Errorf("-127 %s -128 = %d, want 1", "+", r) + } + y = -127 + r = x + y + if r != 2 { + t.Errorf("-127 %s -127 = %d, want 2", "+", r) + } + y = -1 + r = x + y + if r != -128 { + t.Errorf("-127 %s -1 = %d, want -128", "+", r) + } + y = 0 + r = x + y + if r != -127 { + t.Errorf("-127 %s 0 = %d, want -127", "+", r) + } + y = 1 + r = x + y + if r != -126 { + t.Errorf("-127 %s 1 = %d, want -126", "+", r) + } + y = 126 + r = x + y + if r != -1 { + t.Errorf("-127 %s 126 = %d, want -1", "+", r) + } + y = 127 + r = x + y + if r != 0 { + t.Errorf("-127 %s 127 = %d, want 0", "+", r) + } + x = -1 + y = -128 + r = x + y + if r != 127 { + t.Errorf("-1 %s -128 = %d, want 127", "+", r) + } + y = -127 + r = x + y + if r != -128 { + t.Errorf("-1 %s -127 = %d, want -128", "+", r) + } + y = -1 + r = x + y + if r != -2 { + t.Errorf("-1 %s -1 = %d, want -2", "+", r) + } + y = 0 + r = x + y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", "+", r) + } + y = 1 + r = x + y + if r != 0 { + t.Errorf("-1 %s 1 = %d, want 0", "+", r) + } + y = 126 + r = x + y + if r != 125 { + t.Errorf("-1 %s 126 = %d, want 125", "+", r) + } + y = 127 + r = x + y + if r != 126 { + t.Errorf("-1 %s 127 = %d, want 126", "+", r) + } + x = 0 + y = -128 + r = x + y + if r != -128 { + t.Errorf("0 %s -128 = %d, want -128", "+", r) + } + y = -127 + r = x + y + if r != -127 { + t.Errorf("0 %s -127 = %d, want -127", "+", r) + } + y = -1 + r = x + y + if r != -1 { + t.Errorf("0 %s -1 = %d, want -1", "+", r) + } + y = 0 + r = x + y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "+", r) + } + y = 1 + r = x + y + if r != 1 { + t.Errorf("0 %s 1 = %d, want 1", "+", r) + } + y = 126 + r = x + y + if r != 126 { + t.Errorf("0 %s 126 = %d, want 126", "+", r) + } + y = 127 + r = x + y + if r != 127 { + t.Errorf("0 %s 127 = %d, want 127", "+", r) + } + x = 1 + y = -128 + r = x + y + if r != -127 { + t.Errorf("1 %s -128 = %d, want -127", "+", r) + } + y = -127 + r = x + y + if r != -126 { + t.Errorf("1 %s -127 = %d, want -126", "+", r) + } + y = -1 + r = x + y + if r != 0 { + t.Errorf("1 %s -1 = %d, want 0", "+", r) + } + y = 0 + r = x + y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "+", r) + } + y = 1 + r = x + y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "+", r) + } + y = 126 + r = x + y + if r != 127 { + t.Errorf("1 %s 126 = %d, want 127", "+", r) + } + y = 127 + r = x + y + if r != -128 { + t.Errorf("1 %s 127 = %d, want -128", "+", r) + } + x = 126 + y = -128 + r = x + y + if r != -2 { + t.Errorf("126 %s -128 = %d, want -2", "+", r) + } + y = -127 + r = x + y + if r != -1 { + t.Errorf("126 %s -127 = %d, want -1", "+", r) + } + y = -1 + r = x + y + if r != 125 { + t.Errorf("126 %s -1 = %d, want 125", "+", r) + } + y = 0 + r = x + y + if r != 126 { + t.Errorf("126 %s 0 = %d, want 126", "+", r) + } + y = 1 + r = x + y + if r != 127 { + t.Errorf("126 %s 1 = %d, want 127", "+", r) + } + y = 126 + r = x + y + if r != -4 { + t.Errorf("126 %s 126 = %d, want -4", "+", r) + } + y = 127 + r = x + y + if r != -3 { + t.Errorf("126 %s 127 = %d, want -3", "+", r) + } + x = 127 + y = -128 + r = x + y + if r != -1 { + t.Errorf("127 %s -128 = %d, want -1", "+", r) + } + y = -127 + r = x + y + if r != 0 { + t.Errorf("127 %s -127 = %d, want 0", "+", r) + } + y = -1 + r = x + y + if r != 126 { + t.Errorf("127 %s -1 = %d, want 126", "+", r) + } + y = 0 + r = x + y + if r != 127 { + t.Errorf("127 %s 0 = %d, want 127", "+", r) + } + y = 1 + r = x + y + if r != -128 { + t.Errorf("127 %s 1 = %d, want -128", "+", r) + } + y = 126 + r = x + y + if r != -3 { + t.Errorf("127 %s 126 = %d, want -3", "+", r) + } + y = 127 + r = x + y + if r != -2 { + t.Errorf("127 %s 127 = %d, want -2", "+", r) + } +} +func TestConstFoldint8sub(t *testing.T) { + var x, y, r int8 + x = -128 + y = -128 + r = x - y + if r != 0 { + t.Errorf("-128 %s -128 = %d, want 0", "-", r) + } + y = -127 + r = x - y + if r != -1 { + t.Errorf("-128 %s -127 = %d, want -1", "-", r) + } + y = -1 + r = x - y + if r != -127 { + t.Errorf("-128 %s -1 = %d, want -127", "-", r) + } + y = 0 + r = x - y + if r != -128 { + t.Errorf("-128 %s 0 = %d, want -128", "-", r) + } + y = 1 + r = x - y + if r != 127 { + t.Errorf("-128 %s 1 = %d, want 127", "-", r) + } + y = 126 + r = x - y + if r != 2 { + t.Errorf("-128 %s 126 = %d, want 2", "-", r) + } + y = 127 + r = x - y + if r != 1 { + t.Errorf("-128 %s 127 = %d, want 1", "-", r) + } + x = -127 + y = -128 + r = x - y + if r != 1 { + t.Errorf("-127 %s -128 = %d, want 1", "-", r) + } + y = -127 + r = x - y + if r != 0 { + t.Errorf("-127 %s -127 = %d, want 0", "-", r) + } + y = -1 + r = x - y + if r != -126 { + t.Errorf("-127 %s -1 = %d, want -126", "-", r) + } + y = 0 + r = x - y + if r != -127 { + t.Errorf("-127 %s 0 = %d, want -127", "-", r) + } + y = 1 + r = x - y + if r != -128 { + t.Errorf("-127 %s 1 = %d, want -128", "-", r) + } + y = 126 + r = x - y + if r != 3 { + t.Errorf("-127 %s 126 = %d, want 3", "-", r) + } + y = 127 + r = x - y + if r != 2 { + t.Errorf("-127 %s 127 = %d, want 2", "-", r) + } + x = -1 + y = -128 + r = x - y + if r != 127 { + t.Errorf("-1 %s -128 = %d, want 127", "-", r) + } + y = -127 + r = x - y + if r != 126 { + t.Errorf("-1 %s -127 = %d, want 126", "-", r) + } + y = -1 + r = x - y + if r != 0 { + t.Errorf("-1 %s -1 = %d, want 0", "-", r) + } + y = 0 + r = x - y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", "-", r) + } + y = 1 + r = x - y + if r != -2 { + t.Errorf("-1 %s 1 = %d, want -2", "-", r) + } + y = 126 + r = x - y + if r != -127 { + t.Errorf("-1 %s 126 = %d, want -127", "-", r) + } + y = 127 + r = x - y + if r != -128 { + t.Errorf("-1 %s 127 = %d, want -128", "-", r) + } + x = 0 + y = -128 + r = x - y + if r != -128 { + t.Errorf("0 %s -128 = %d, want -128", "-", r) + } + y = -127 + r = x - y + if r != 127 { + t.Errorf("0 %s -127 = %d, want 127", "-", r) + } + y = -1 + r = x - y + if r != 1 { + t.Errorf("0 %s -1 = %d, want 1", "-", r) + } + y = 0 + r = x - y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "-", r) + } + y = 1 + r = x - y + if r != -1 { + t.Errorf("0 %s 1 = %d, want -1", "-", r) + } + y = 126 + r = x - y + if r != -126 { + t.Errorf("0 %s 126 = %d, want -126", "-", r) + } + y = 127 + r = x - y + if r != -127 { + t.Errorf("0 %s 127 = %d, want -127", "-", r) + } + x = 1 + y = -128 + r = x - y + if r != -127 { + t.Errorf("1 %s -128 = %d, want -127", "-", r) + } + y = -127 + r = x - y + if r != -128 { + t.Errorf("1 %s -127 = %d, want -128", "-", r) + } + y = -1 + r = x - y + if r != 2 { + t.Errorf("1 %s -1 = %d, want 2", "-", r) + } + y = 0 + r = x - y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "-", r) + } + y = 1 + r = x - y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", "-", r) + } + y = 126 + r = x - y + if r != -125 { + t.Errorf("1 %s 126 = %d, want -125", "-", r) + } + y = 127 + r = x - y + if r != -126 { + t.Errorf("1 %s 127 = %d, want -126", "-", r) + } + x = 126 + y = -128 + r = x - y + if r != -2 { + t.Errorf("126 %s -128 = %d, want -2", "-", r) + } + y = -127 + r = x - y + if r != -3 { + t.Errorf("126 %s -127 = %d, want -3", "-", r) + } + y = -1 + r = x - y + if r != 127 { + t.Errorf("126 %s -1 = %d, want 127", "-", r) + } + y = 0 + r = x - y + if r != 126 { + t.Errorf("126 %s 0 = %d, want 126", "-", r) + } + y = 1 + r = x - y + if r != 125 { + t.Errorf("126 %s 1 = %d, want 125", "-", r) + } + y = 126 + r = x - y + if r != 0 { + t.Errorf("126 %s 126 = %d, want 0", "-", r) + } + y = 127 + r = x - y + if r != -1 { + t.Errorf("126 %s 127 = %d, want -1", "-", r) + } + x = 127 + y = -128 + r = x - y + if r != -1 { + t.Errorf("127 %s -128 = %d, want -1", "-", r) + } + y = -127 + r = x - y + if r != -2 { + t.Errorf("127 %s -127 = %d, want -2", "-", r) + } + y = -1 + r = x - y + if r != -128 { + t.Errorf("127 %s -1 = %d, want -128", "-", r) + } + y = 0 + r = x - y + if r != 127 { + t.Errorf("127 %s 0 = %d, want 127", "-", r) + } + y = 1 + r = x - y + if r != 126 { + t.Errorf("127 %s 1 = %d, want 126", "-", r) + } + y = 126 + r = x - y + if r != 1 { + t.Errorf("127 %s 126 = %d, want 1", "-", r) + } + y = 127 + r = x - y + if r != 0 { + t.Errorf("127 %s 127 = %d, want 0", "-", r) + } +} +func TestConstFoldint8div(t *testing.T) { + var x, y, r int8 + x = -128 + y = -128 + r = x / y + if r != 1 { + t.Errorf("-128 %s -128 = %d, want 1", "/", r) + } + y = -127 + r = x / y + if r != 1 { + t.Errorf("-128 %s -127 = %d, want 1", "/", r) + } + y = -1 + r = x / y + if r != -128 { + t.Errorf("-128 %s -1 = %d, want -128", "/", r) + } + y = 1 + r = x / y + if r != -128 { + t.Errorf("-128 %s 1 = %d, want -128", "/", r) + } + y = 126 + r = x / y + if r != -1 { + t.Errorf("-128 %s 126 = %d, want -1", "/", r) + } + y = 127 + r = x / y + if r != -1 { + t.Errorf("-128 %s 127 = %d, want -1", "/", r) + } + x = -127 + y = -128 + r = x / y + if r != 0 { + t.Errorf("-127 %s -128 = %d, want 0", "/", r) + } + y = -127 + r = x / y + if r != 1 { + t.Errorf("-127 %s -127 = %d, want 1", "/", r) + } + y = -1 + r = x / y + if r != 127 { + t.Errorf("-127 %s -1 = %d, want 127", "/", r) + } + y = 1 + r = x / y + if r != -127 { + t.Errorf("-127 %s 1 = %d, want -127", "/", r) + } + y = 126 + r = x / y + if r != -1 { + t.Errorf("-127 %s 126 = %d, want -1", "/", r) + } + y = 127 + r = x / y + if r != -1 { + t.Errorf("-127 %s 127 = %d, want -1", "/", r) + } + x = -1 + y = -128 + r = x / y + if r != 0 { + t.Errorf("-1 %s -128 = %d, want 0", "/", r) + } + y = -127 + r = x / y + if r != 0 { + t.Errorf("-1 %s -127 = %d, want 0", "/", r) + } + y = -1 + r = x / y + if r != 1 { + t.Errorf("-1 %s -1 = %d, want 1", "/", r) + } + y = 1 + r = x / y + if r != -1 { + t.Errorf("-1 %s 1 = %d, want -1", "/", r) + } + y = 126 + r = x / y + if r != 0 { + t.Errorf("-1 %s 126 = %d, want 0", "/", r) + } + y = 127 + r = x / y + if r != 0 { + t.Errorf("-1 %s 127 = %d, want 0", "/", r) + } + x = 0 + y = -128 + r = x / y + if r != 0 { + t.Errorf("0 %s -128 = %d, want 0", "/", r) + } + y = -127 + r = x / y + if r != 0 { + t.Errorf("0 %s -127 = %d, want 0", "/", r) + } + y = -1 + r = x / y + if r != 0 { + t.Errorf("0 %s -1 = %d, want 0", "/", r) + } + y = 1 + r = x / y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "/", r) + } + y = 126 + r = x / y + if r != 0 { + t.Errorf("0 %s 126 = %d, want 0", "/", r) + } + y = 127 + r = x / y + if r != 0 { + t.Errorf("0 %s 127 = %d, want 0", "/", r) + } + x = 1 + y = -128 + r = x / y + if r != 0 { + t.Errorf("1 %s -128 = %d, want 0", "/", r) + } + y = -127 + r = x / y + if r != 0 { + t.Errorf("1 %s -127 = %d, want 0", "/", r) + } + y = -1 + r = x / y + if r != -1 { + t.Errorf("1 %s -1 = %d, want -1", "/", r) + } + y = 1 + r = x / y + if r != 1 { + t.Errorf("1 %s 1 = %d, want 1", "/", r) + } + y = 126 + r = x / y + if r != 0 { + t.Errorf("1 %s 126 = %d, want 0", "/", r) + } + y = 127 + r = x / y + if r != 0 { + t.Errorf("1 %s 127 = %d, want 0", "/", r) + } + x = 126 + y = -128 + r = x / y + if r != 0 { + t.Errorf("126 %s -128 = %d, want 0", "/", r) + } + y = -127 + r = x / y + if r != 0 { + t.Errorf("126 %s -127 = %d, want 0", "/", r) + } + y = -1 + r = x / y + if r != -126 { + t.Errorf("126 %s -1 = %d, want -126", "/", r) + } + y = 1 + r = x / y + if r != 126 { + t.Errorf("126 %s 1 = %d, want 126", "/", r) + } + y = 126 + r = x / y + if r != 1 { + t.Errorf("126 %s 126 = %d, want 1", "/", r) + } + y = 127 + r = x / y + if r != 0 { + t.Errorf("126 %s 127 = %d, want 0", "/", r) + } + x = 127 + y = -128 + r = x / y + if r != 0 { + t.Errorf("127 %s -128 = %d, want 0", "/", r) + } + y = -127 + r = x / y + if r != -1 { + t.Errorf("127 %s -127 = %d, want -1", "/", r) + } + y = -1 + r = x / y + if r != -127 { + t.Errorf("127 %s -1 = %d, want -127", "/", r) + } + y = 1 + r = x / y + if r != 127 { + t.Errorf("127 %s 1 = %d, want 127", "/", r) + } + y = 126 + r = x / y + if r != 1 { + t.Errorf("127 %s 126 = %d, want 1", "/", r) + } + y = 127 + r = x / y + if r != 1 { + t.Errorf("127 %s 127 = %d, want 1", "/", r) + } +} +func TestConstFoldint8mul(t *testing.T) { + var x, y, r int8 + x = -128 + y = -128 + r = x * y + if r != 0 { + t.Errorf("-128 %s -128 = %d, want 0", "*", r) + } + y = -127 + r = x * y + if r != -128 { + t.Errorf("-128 %s -127 = %d, want -128", "*", r) + } + y = -1 + r = x * y + if r != -128 { + t.Errorf("-128 %s -1 = %d, want -128", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("-128 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != -128 { + t.Errorf("-128 %s 1 = %d, want -128", "*", r) + } + y = 126 + r = x * y + if r != 0 { + t.Errorf("-128 %s 126 = %d, want 0", "*", r) + } + y = 127 + r = x * y + if r != -128 { + t.Errorf("-128 %s 127 = %d, want -128", "*", r) + } + x = -127 + y = -128 + r = x * y + if r != -128 { + t.Errorf("-127 %s -128 = %d, want -128", "*", r) + } + y = -127 + r = x * y + if r != 1 { + t.Errorf("-127 %s -127 = %d, want 1", "*", r) + } + y = -1 + r = x * y + if r != 127 { + t.Errorf("-127 %s -1 = %d, want 127", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("-127 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != -127 { + t.Errorf("-127 %s 1 = %d, want -127", "*", r) + } + y = 126 + r = x * y + if r != 126 { + t.Errorf("-127 %s 126 = %d, want 126", "*", r) + } + y = 127 + r = x * y + if r != -1 { + t.Errorf("-127 %s 127 = %d, want -1", "*", r) + } + x = -1 + y = -128 + r = x * y + if r != -128 { + t.Errorf("-1 %s -128 = %d, want -128", "*", r) + } + y = -127 + r = x * y + if r != 127 { + t.Errorf("-1 %s -127 = %d, want 127", "*", r) + } + y = -1 + r = x * y + if r != 1 { + t.Errorf("-1 %s -1 = %d, want 1", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("-1 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != -1 { + t.Errorf("-1 %s 1 = %d, want -1", "*", r) + } + y = 126 + r = x * y + if r != -126 { + t.Errorf("-1 %s 126 = %d, want -126", "*", r) + } + y = 127 + r = x * y + if r != -127 { + t.Errorf("-1 %s 127 = %d, want -127", "*", r) + } + x = 0 + y = -128 + r = x * y + if r != 0 { + t.Errorf("0 %s -128 = %d, want 0", "*", r) + } + y = -127 + r = x * y + if r != 0 { + t.Errorf("0 %s -127 = %d, want 0", "*", r) + } + y = -1 + r = x * y + if r != 0 { + t.Errorf("0 %s -1 = %d, want 0", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "*", r) + } + y = 126 + r = x * y + if r != 0 { + t.Errorf("0 %s 126 = %d, want 0", "*", r) + } + y = 127 + r = x * y + if r != 0 { + t.Errorf("0 %s 127 = %d, want 0", "*", r) + } + x = 1 + y = -128 + r = x * y + if r != -128 { + t.Errorf("1 %s -128 = %d, want -128", "*", r) + } + y = -127 + r = x * y + if r != -127 { + t.Errorf("1 %s -127 = %d, want -127", "*", r) + } + y = -1 + r = x * y + if r != -1 { + t.Errorf("1 %s -1 = %d, want -1", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("1 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 1 { + t.Errorf("1 %s 1 = %d, want 1", "*", r) + } + y = 126 + r = x * y + if r != 126 { + t.Errorf("1 %s 126 = %d, want 126", "*", r) + } + y = 127 + r = x * y + if r != 127 { + t.Errorf("1 %s 127 = %d, want 127", "*", r) + } + x = 126 + y = -128 + r = x * y + if r != 0 { + t.Errorf("126 %s -128 = %d, want 0", "*", r) + } + y = -127 + r = x * y + if r != 126 { + t.Errorf("126 %s -127 = %d, want 126", "*", r) + } + y = -1 + r = x * y + if r != -126 { + t.Errorf("126 %s -1 = %d, want -126", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("126 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 126 { + t.Errorf("126 %s 1 = %d, want 126", "*", r) + } + y = 126 + r = x * y + if r != 4 { + t.Errorf("126 %s 126 = %d, want 4", "*", r) + } + y = 127 + r = x * y + if r != -126 { + t.Errorf("126 %s 127 = %d, want -126", "*", r) + } + x = 127 + y = -128 + r = x * y + if r != -128 { + t.Errorf("127 %s -128 = %d, want -128", "*", r) + } + y = -127 + r = x * y + if r != -1 { + t.Errorf("127 %s -127 = %d, want -1", "*", r) + } + y = -1 + r = x * y + if r != -127 { + t.Errorf("127 %s -1 = %d, want -127", "*", r) + } + y = 0 + r = x * y + if r != 0 { + t.Errorf("127 %s 0 = %d, want 0", "*", r) + } + y = 1 + r = x * y + if r != 127 { + t.Errorf("127 %s 1 = %d, want 127", "*", r) + } + y = 126 + r = x * y + if r != -126 { + t.Errorf("127 %s 126 = %d, want -126", "*", r) + } + y = 127 + r = x * y + if r != 1 { + t.Errorf("127 %s 127 = %d, want 1", "*", r) + } +} +func TestConstFoldint8mod(t *testing.T) { + var x, y, r int8 + x = -128 + y = -128 + r = x % y + if r != 0 { + t.Errorf("-128 %s -128 = %d, want 0", "%", r) + } + y = -127 + r = x % y + if r != -1 { + t.Errorf("-128 %s -127 = %d, want -1", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("-128 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("-128 %s 1 = %d, want 0", "%", r) + } + y = 126 + r = x % y + if r != -2 { + t.Errorf("-128 %s 126 = %d, want -2", "%", r) + } + y = 127 + r = x % y + if r != -1 { + t.Errorf("-128 %s 127 = %d, want -1", "%", r) + } + x = -127 + y = -128 + r = x % y + if r != -127 { + t.Errorf("-127 %s -128 = %d, want -127", "%", r) + } + y = -127 + r = x % y + if r != 0 { + t.Errorf("-127 %s -127 = %d, want 0", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("-127 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("-127 %s 1 = %d, want 0", "%", r) + } + y = 126 + r = x % y + if r != -1 { + t.Errorf("-127 %s 126 = %d, want -1", "%", r) + } + y = 127 + r = x % y + if r != 0 { + t.Errorf("-127 %s 127 = %d, want 0", "%", r) + } + x = -1 + y = -128 + r = x % y + if r != -1 { + t.Errorf("-1 %s -128 = %d, want -1", "%", r) + } + y = -127 + r = x % y + if r != -1 { + t.Errorf("-1 %s -127 = %d, want -1", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("-1 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("-1 %s 1 = %d, want 0", "%", r) + } + y = 126 + r = x % y + if r != -1 { + t.Errorf("-1 %s 126 = %d, want -1", "%", r) + } + y = 127 + r = x % y + if r != -1 { + t.Errorf("-1 %s 127 = %d, want -1", "%", r) + } + x = 0 + y = -128 + r = x % y + if r != 0 { + t.Errorf("0 %s -128 = %d, want 0", "%", r) + } + y = -127 + r = x % y + if r != 0 { + t.Errorf("0 %s -127 = %d, want 0", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("0 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "%", r) + } + y = 126 + r = x % y + if r != 0 { + t.Errorf("0 %s 126 = %d, want 0", "%", r) + } + y = 127 + r = x % y + if r != 0 { + t.Errorf("0 %s 127 = %d, want 0", "%", r) + } + x = 1 + y = -128 + r = x % y + if r != 1 { + t.Errorf("1 %s -128 = %d, want 1", "%", r) + } + y = -127 + r = x % y + if r != 1 { + t.Errorf("1 %s -127 = %d, want 1", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("1 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", "%", r) + } + y = 126 + r = x % y + if r != 1 { + t.Errorf("1 %s 126 = %d, want 1", "%", r) + } + y = 127 + r = x % y + if r != 1 { + t.Errorf("1 %s 127 = %d, want 1", "%", r) + } + x = 126 + y = -128 + r = x % y + if r != 126 { + t.Errorf("126 %s -128 = %d, want 126", "%", r) + } + y = -127 + r = x % y + if r != 126 { + t.Errorf("126 %s -127 = %d, want 126", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("126 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("126 %s 1 = %d, want 0", "%", r) + } + y = 126 + r = x % y + if r != 0 { + t.Errorf("126 %s 126 = %d, want 0", "%", r) + } + y = 127 + r = x % y + if r != 126 { + t.Errorf("126 %s 127 = %d, want 126", "%", r) + } + x = 127 + y = -128 + r = x % y + if r != 127 { + t.Errorf("127 %s -128 = %d, want 127", "%", r) + } + y = -127 + r = x % y + if r != 0 { + t.Errorf("127 %s -127 = %d, want 0", "%", r) + } + y = -1 + r = x % y + if r != 0 { + t.Errorf("127 %s -1 = %d, want 0", "%", r) + } + y = 1 + r = x % y + if r != 0 { + t.Errorf("127 %s 1 = %d, want 0", "%", r) + } + y = 126 + r = x % y + if r != 1 { + t.Errorf("127 %s 126 = %d, want 1", "%", r) + } + y = 127 + r = x % y + if r != 0 { + t.Errorf("127 %s 127 = %d, want 0", "%", r) + } +} +func TestConstFolduint64uint64lsh(t *testing.T) { + var x, r uint64 + var y uint64 + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = 4294967296 + y = 0 + r = x << y + if r != 4294967296 { + t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r) + } + y = 1 + r = x << y + if r != 8589934592 { + t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("4294967296 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("4294967296 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = 18446744073709551615 + y = 0 + r = x << y + if r != 18446744073709551615 { + t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "<<", r) + } + y = 1 + r = x << y + if r != 18446744073709551614 { + t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551614", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("18446744073709551615 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 0", "<<", r) + } +} +func TestConstFolduint64uint64rsh(t *testing.T) { + var x, r uint64 + var y uint64 + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r) + } + x = 4294967296 + y = 0 + r = x >> y + if r != 4294967296 { + t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r) + } + y = 1 + r = x >> y + if r != 2147483648 { + t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("4294967296 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("4294967296 %s 18446744073709551615 = %d, want 0", ">>", r) + } + x = 18446744073709551615 + y = 0 + r = x >> y + if r != 18446744073709551615 { + t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", ">>", r) + } + y = 1 + r = x >> y + if r != 9223372036854775807 { + t.Errorf("18446744073709551615 %s 1 = %d, want 9223372036854775807", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("18446744073709551615 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 0", ">>", r) + } +} +func TestConstFolduint64uint32lsh(t *testing.T) { + var x, r uint64 + var y uint32 + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r) + } + x = 4294967296 + y = 0 + r = x << y + if r != 4294967296 { + t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r) + } + y = 1 + r = x << y + if r != 8589934592 { + t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("4294967296 %s 4294967295 = %d, want 0", "<<", r) + } + x = 18446744073709551615 + y = 0 + r = x << y + if r != 18446744073709551615 { + t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "<<", r) + } + y = 1 + r = x << y + if r != 18446744073709551614 { + t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551614", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("18446744073709551615 %s 4294967295 = %d, want 0", "<<", r) + } +} +func TestConstFolduint64uint32rsh(t *testing.T) { + var x, r uint64 + var y uint32 + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r) + } + x = 4294967296 + y = 0 + r = x >> y + if r != 4294967296 { + t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r) + } + y = 1 + r = x >> y + if r != 2147483648 { + t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("4294967296 %s 4294967295 = %d, want 0", ">>", r) + } + x = 18446744073709551615 + y = 0 + r = x >> y + if r != 18446744073709551615 { + t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", ">>", r) + } + y = 1 + r = x >> y + if r != 9223372036854775807 { + t.Errorf("18446744073709551615 %s 1 = %d, want 9223372036854775807", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("18446744073709551615 %s 4294967295 = %d, want 0", ">>", r) + } +} +func TestConstFolduint64uint16lsh(t *testing.T) { + var x, r uint64 + var y uint16 + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("0 %s 65535 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("1 %s 65535 = %d, want 0", "<<", r) + } + x = 4294967296 + y = 0 + r = x << y + if r != 4294967296 { + t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r) + } + y = 1 + r = x << y + if r != 8589934592 { + t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("4294967296 %s 65535 = %d, want 0", "<<", r) + } + x = 18446744073709551615 + y = 0 + r = x << y + if r != 18446744073709551615 { + t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "<<", r) + } + y = 1 + r = x << y + if r != 18446744073709551614 { + t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551614", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("18446744073709551615 %s 65535 = %d, want 0", "<<", r) + } +} +func TestConstFolduint64uint16rsh(t *testing.T) { + var x, r uint64 + var y uint16 + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("0 %s 65535 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("1 %s 65535 = %d, want 0", ">>", r) + } + x = 4294967296 + y = 0 + r = x >> y + if r != 4294967296 { + t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r) + } + y = 1 + r = x >> y + if r != 2147483648 { + t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("4294967296 %s 65535 = %d, want 0", ">>", r) + } + x = 18446744073709551615 + y = 0 + r = x >> y + if r != 18446744073709551615 { + t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", ">>", r) + } + y = 1 + r = x >> y + if r != 9223372036854775807 { + t.Errorf("18446744073709551615 %s 1 = %d, want 9223372036854775807", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("18446744073709551615 %s 65535 = %d, want 0", ">>", r) + } +} +func TestConstFolduint64uint8lsh(t *testing.T) { + var x, r uint64 + var y uint8 + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("0 %s 255 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("1 %s 255 = %d, want 0", "<<", r) + } + x = 4294967296 + y = 0 + r = x << y + if r != 4294967296 { + t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r) + } + y = 1 + r = x << y + if r != 8589934592 { + t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("4294967296 %s 255 = %d, want 0", "<<", r) + } + x = 18446744073709551615 + y = 0 + r = x << y + if r != 18446744073709551615 { + t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "<<", r) + } + y = 1 + r = x << y + if r != 18446744073709551614 { + t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551614", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("18446744073709551615 %s 255 = %d, want 0", "<<", r) + } +} +func TestConstFolduint64uint8rsh(t *testing.T) { + var x, r uint64 + var y uint8 + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("0 %s 255 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("1 %s 255 = %d, want 0", ">>", r) + } + x = 4294967296 + y = 0 + r = x >> y + if r != 4294967296 { + t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r) + } + y = 1 + r = x >> y + if r != 2147483648 { + t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("4294967296 %s 255 = %d, want 0", ">>", r) + } + x = 18446744073709551615 + y = 0 + r = x >> y + if r != 18446744073709551615 { + t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", ">>", r) + } + y = 1 + r = x >> y + if r != 9223372036854775807 { + t.Errorf("18446744073709551615 %s 1 = %d, want 9223372036854775807", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("18446744073709551615 %s 255 = %d, want 0", ">>", r) + } +} +func TestConstFoldint64uint64lsh(t *testing.T) { + var x, r int64 + var y uint64 + x = -9223372036854775808 + y = 0 + r = x << y + if r != -9223372036854775808 { + t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("-9223372036854775808 %s 1 = %d, want 0", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("-9223372036854775808 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("-9223372036854775808 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = -9223372036854775807 + y = 0 + r = x << y + if r != -9223372036854775807 { + t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("-9223372036854775807 %s 1 = %d, want 2", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("-9223372036854775807 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("-9223372036854775807 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = -4294967296 + y = 0 + r = x << y + if r != -4294967296 { + t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "<<", r) + } + y = 1 + r = x << y + if r != -8589934592 { + t.Errorf("-4294967296 %s 1 = %d, want -8589934592", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("-4294967296 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("-4294967296 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = -1 + y = 0 + r = x << y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("-1 %s 1 = %d, want -2", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("-1 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("-1 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = 4294967296 + y = 0 + r = x << y + if r != 4294967296 { + t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r) + } + y = 1 + r = x << y + if r != 8589934592 { + t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("4294967296 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("4294967296 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = 9223372036854775806 + y = 0 + r = x << y + if r != 9223372036854775806 { + t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "<<", r) + } + y = 1 + r = x << y + if r != -4 { + t.Errorf("9223372036854775806 %s 1 = %d, want -4", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("9223372036854775806 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("9223372036854775806 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = 9223372036854775807 + y = 0 + r = x << y + if r != 9223372036854775807 { + t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("9223372036854775807 %s 1 = %d, want -2", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("9223372036854775807 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("9223372036854775807 %s 18446744073709551615 = %d, want 0", "<<", r) + } +} +func TestConstFoldint64uint64rsh(t *testing.T) { + var x, r int64 + var y uint64 + x = -9223372036854775808 + y = 0 + r = x >> y + if r != -9223372036854775808 { + t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", ">>", r) + } + y = 1 + r = x >> y + if r != -4611686018427387904 { + t.Errorf("-9223372036854775808 %s 1 = %d, want -4611686018427387904", ">>", r) + } + y = 4294967296 + r = x >> y + if r != -1 { + t.Errorf("-9223372036854775808 %s 4294967296 = %d, want -1", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != -1 { + t.Errorf("-9223372036854775808 %s 18446744073709551615 = %d, want -1", ">>", r) + } + x = -9223372036854775807 + y = 0 + r = x >> y + if r != -9223372036854775807 { + t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", ">>", r) + } + y = 1 + r = x >> y + if r != -4611686018427387904 { + t.Errorf("-9223372036854775807 %s 1 = %d, want -4611686018427387904", ">>", r) + } + y = 4294967296 + r = x >> y + if r != -1 { + t.Errorf("-9223372036854775807 %s 4294967296 = %d, want -1", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != -1 { + t.Errorf("-9223372036854775807 %s 18446744073709551615 = %d, want -1", ">>", r) + } + x = -4294967296 + y = 0 + r = x >> y + if r != -4294967296 { + t.Errorf("-4294967296 %s 0 = %d, want -4294967296", ">>", r) + } + y = 1 + r = x >> y + if r != -2147483648 { + t.Errorf("-4294967296 %s 1 = %d, want -2147483648", ">>", r) + } + y = 4294967296 + r = x >> y + if r != -1 { + t.Errorf("-4294967296 %s 4294967296 = %d, want -1", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != -1 { + t.Errorf("-4294967296 %s 18446744073709551615 = %d, want -1", ">>", r) + } + x = -1 + y = 0 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", ">>", r) + } + y = 1 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 1 = %d, want -1", ">>", r) + } + y = 4294967296 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 4294967296 = %d, want -1", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 18446744073709551615 = %d, want -1", ">>", r) + } + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r) + } + x = 4294967296 + y = 0 + r = x >> y + if r != 4294967296 { + t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r) + } + y = 1 + r = x >> y + if r != 2147483648 { + t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("4294967296 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("4294967296 %s 18446744073709551615 = %d, want 0", ">>", r) + } + x = 9223372036854775806 + y = 0 + r = x >> y + if r != 9223372036854775806 { + t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", ">>", r) + } + y = 1 + r = x >> y + if r != 4611686018427387903 { + t.Errorf("9223372036854775806 %s 1 = %d, want 4611686018427387903", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("9223372036854775806 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("9223372036854775806 %s 18446744073709551615 = %d, want 0", ">>", r) + } + x = 9223372036854775807 + y = 0 + r = x >> y + if r != 9223372036854775807 { + t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", ">>", r) + } + y = 1 + r = x >> y + if r != 4611686018427387903 { + t.Errorf("9223372036854775807 %s 1 = %d, want 4611686018427387903", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("9223372036854775807 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("9223372036854775807 %s 18446744073709551615 = %d, want 0", ">>", r) + } +} +func TestConstFoldint64uint32lsh(t *testing.T) { + var x, r int64 + var y uint32 + x = -9223372036854775808 + y = 0 + r = x << y + if r != -9223372036854775808 { + t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("-9223372036854775808 %s 1 = %d, want 0", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("-9223372036854775808 %s 4294967295 = %d, want 0", "<<", r) + } + x = -9223372036854775807 + y = 0 + r = x << y + if r != -9223372036854775807 { + t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("-9223372036854775807 %s 1 = %d, want 2", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("-9223372036854775807 %s 4294967295 = %d, want 0", "<<", r) + } + x = -4294967296 + y = 0 + r = x << y + if r != -4294967296 { + t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "<<", r) + } + y = 1 + r = x << y + if r != -8589934592 { + t.Errorf("-4294967296 %s 1 = %d, want -8589934592", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("-4294967296 %s 4294967295 = %d, want 0", "<<", r) + } + x = -1 + y = 0 + r = x << y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("-1 %s 1 = %d, want -2", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("-1 %s 4294967295 = %d, want 0", "<<", r) + } + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r) + } + x = 4294967296 + y = 0 + r = x << y + if r != 4294967296 { + t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r) + } + y = 1 + r = x << y + if r != 8589934592 { + t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("4294967296 %s 4294967295 = %d, want 0", "<<", r) + } + x = 9223372036854775806 + y = 0 + r = x << y + if r != 9223372036854775806 { + t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "<<", r) + } + y = 1 + r = x << y + if r != -4 { + t.Errorf("9223372036854775806 %s 1 = %d, want -4", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("9223372036854775806 %s 4294967295 = %d, want 0", "<<", r) + } + x = 9223372036854775807 + y = 0 + r = x << y + if r != 9223372036854775807 { + t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("9223372036854775807 %s 1 = %d, want -2", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("9223372036854775807 %s 4294967295 = %d, want 0", "<<", r) + } +} +func TestConstFoldint64uint32rsh(t *testing.T) { + var x, r int64 + var y uint32 + x = -9223372036854775808 + y = 0 + r = x >> y + if r != -9223372036854775808 { + t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", ">>", r) + } + y = 1 + r = x >> y + if r != -4611686018427387904 { + t.Errorf("-9223372036854775808 %s 1 = %d, want -4611686018427387904", ">>", r) + } + y = 4294967295 + r = x >> y + if r != -1 { + t.Errorf("-9223372036854775808 %s 4294967295 = %d, want -1", ">>", r) + } + x = -9223372036854775807 + y = 0 + r = x >> y + if r != -9223372036854775807 { + t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", ">>", r) + } + y = 1 + r = x >> y + if r != -4611686018427387904 { + t.Errorf("-9223372036854775807 %s 1 = %d, want -4611686018427387904", ">>", r) + } + y = 4294967295 + r = x >> y + if r != -1 { + t.Errorf("-9223372036854775807 %s 4294967295 = %d, want -1", ">>", r) + } + x = -4294967296 + y = 0 + r = x >> y + if r != -4294967296 { + t.Errorf("-4294967296 %s 0 = %d, want -4294967296", ">>", r) + } + y = 1 + r = x >> y + if r != -2147483648 { + t.Errorf("-4294967296 %s 1 = %d, want -2147483648", ">>", r) + } + y = 4294967295 + r = x >> y + if r != -1 { + t.Errorf("-4294967296 %s 4294967295 = %d, want -1", ">>", r) + } + x = -1 + y = 0 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", ">>", r) + } + y = 1 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 1 = %d, want -1", ">>", r) + } + y = 4294967295 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 4294967295 = %d, want -1", ">>", r) + } + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r) + } + x = 4294967296 + y = 0 + r = x >> y + if r != 4294967296 { + t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r) + } + y = 1 + r = x >> y + if r != 2147483648 { + t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("4294967296 %s 4294967295 = %d, want 0", ">>", r) + } + x = 9223372036854775806 + y = 0 + r = x >> y + if r != 9223372036854775806 { + t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", ">>", r) + } + y = 1 + r = x >> y + if r != 4611686018427387903 { + t.Errorf("9223372036854775806 %s 1 = %d, want 4611686018427387903", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("9223372036854775806 %s 4294967295 = %d, want 0", ">>", r) + } + x = 9223372036854775807 + y = 0 + r = x >> y + if r != 9223372036854775807 { + t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", ">>", r) + } + y = 1 + r = x >> y + if r != 4611686018427387903 { + t.Errorf("9223372036854775807 %s 1 = %d, want 4611686018427387903", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("9223372036854775807 %s 4294967295 = %d, want 0", ">>", r) + } +} +func TestConstFoldint64uint16lsh(t *testing.T) { + var x, r int64 + var y uint16 + x = -9223372036854775808 + y = 0 + r = x << y + if r != -9223372036854775808 { + t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("-9223372036854775808 %s 1 = %d, want 0", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("-9223372036854775808 %s 65535 = %d, want 0", "<<", r) + } + x = -9223372036854775807 + y = 0 + r = x << y + if r != -9223372036854775807 { + t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("-9223372036854775807 %s 1 = %d, want 2", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("-9223372036854775807 %s 65535 = %d, want 0", "<<", r) + } + x = -4294967296 + y = 0 + r = x << y + if r != -4294967296 { + t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "<<", r) + } + y = 1 + r = x << y + if r != -8589934592 { + t.Errorf("-4294967296 %s 1 = %d, want -8589934592", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("-4294967296 %s 65535 = %d, want 0", "<<", r) + } + x = -1 + y = 0 + r = x << y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("-1 %s 1 = %d, want -2", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("-1 %s 65535 = %d, want 0", "<<", r) + } + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("0 %s 65535 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("1 %s 65535 = %d, want 0", "<<", r) + } + x = 4294967296 + y = 0 + r = x << y + if r != 4294967296 { + t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r) + } + y = 1 + r = x << y + if r != 8589934592 { + t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("4294967296 %s 65535 = %d, want 0", "<<", r) + } + x = 9223372036854775806 + y = 0 + r = x << y + if r != 9223372036854775806 { + t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "<<", r) + } + y = 1 + r = x << y + if r != -4 { + t.Errorf("9223372036854775806 %s 1 = %d, want -4", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("9223372036854775806 %s 65535 = %d, want 0", "<<", r) + } + x = 9223372036854775807 + y = 0 + r = x << y + if r != 9223372036854775807 { + t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("9223372036854775807 %s 1 = %d, want -2", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("9223372036854775807 %s 65535 = %d, want 0", "<<", r) + } +} +func TestConstFoldint64uint16rsh(t *testing.T) { + var x, r int64 + var y uint16 + x = -9223372036854775808 + y = 0 + r = x >> y + if r != -9223372036854775808 { + t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", ">>", r) + } + y = 1 + r = x >> y + if r != -4611686018427387904 { + t.Errorf("-9223372036854775808 %s 1 = %d, want -4611686018427387904", ">>", r) + } + y = 65535 + r = x >> y + if r != -1 { + t.Errorf("-9223372036854775808 %s 65535 = %d, want -1", ">>", r) + } + x = -9223372036854775807 + y = 0 + r = x >> y + if r != -9223372036854775807 { + t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", ">>", r) + } + y = 1 + r = x >> y + if r != -4611686018427387904 { + t.Errorf("-9223372036854775807 %s 1 = %d, want -4611686018427387904", ">>", r) + } + y = 65535 + r = x >> y + if r != -1 { + t.Errorf("-9223372036854775807 %s 65535 = %d, want -1", ">>", r) + } + x = -4294967296 + y = 0 + r = x >> y + if r != -4294967296 { + t.Errorf("-4294967296 %s 0 = %d, want -4294967296", ">>", r) + } + y = 1 + r = x >> y + if r != -2147483648 { + t.Errorf("-4294967296 %s 1 = %d, want -2147483648", ">>", r) + } + y = 65535 + r = x >> y + if r != -1 { + t.Errorf("-4294967296 %s 65535 = %d, want -1", ">>", r) + } + x = -1 + y = 0 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", ">>", r) + } + y = 1 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 1 = %d, want -1", ">>", r) + } + y = 65535 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 65535 = %d, want -1", ">>", r) + } + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("0 %s 65535 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("1 %s 65535 = %d, want 0", ">>", r) + } + x = 4294967296 + y = 0 + r = x >> y + if r != 4294967296 { + t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r) + } + y = 1 + r = x >> y + if r != 2147483648 { + t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("4294967296 %s 65535 = %d, want 0", ">>", r) + } + x = 9223372036854775806 + y = 0 + r = x >> y + if r != 9223372036854775806 { + t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", ">>", r) + } + y = 1 + r = x >> y + if r != 4611686018427387903 { + t.Errorf("9223372036854775806 %s 1 = %d, want 4611686018427387903", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("9223372036854775806 %s 65535 = %d, want 0", ">>", r) + } + x = 9223372036854775807 + y = 0 + r = x >> y + if r != 9223372036854775807 { + t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", ">>", r) + } + y = 1 + r = x >> y + if r != 4611686018427387903 { + t.Errorf("9223372036854775807 %s 1 = %d, want 4611686018427387903", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("9223372036854775807 %s 65535 = %d, want 0", ">>", r) + } +} +func TestConstFoldint64uint8lsh(t *testing.T) { + var x, r int64 + var y uint8 + x = -9223372036854775808 + y = 0 + r = x << y + if r != -9223372036854775808 { + t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("-9223372036854775808 %s 1 = %d, want 0", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("-9223372036854775808 %s 255 = %d, want 0", "<<", r) + } + x = -9223372036854775807 + y = 0 + r = x << y + if r != -9223372036854775807 { + t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("-9223372036854775807 %s 1 = %d, want 2", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("-9223372036854775807 %s 255 = %d, want 0", "<<", r) + } + x = -4294967296 + y = 0 + r = x << y + if r != -4294967296 { + t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "<<", r) + } + y = 1 + r = x << y + if r != -8589934592 { + t.Errorf("-4294967296 %s 1 = %d, want -8589934592", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("-4294967296 %s 255 = %d, want 0", "<<", r) + } + x = -1 + y = 0 + r = x << y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("-1 %s 1 = %d, want -2", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("-1 %s 255 = %d, want 0", "<<", r) + } + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("0 %s 255 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("1 %s 255 = %d, want 0", "<<", r) + } + x = 4294967296 + y = 0 + r = x << y + if r != 4294967296 { + t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r) + } + y = 1 + r = x << y + if r != 8589934592 { + t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("4294967296 %s 255 = %d, want 0", "<<", r) + } + x = 9223372036854775806 + y = 0 + r = x << y + if r != 9223372036854775806 { + t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "<<", r) + } + y = 1 + r = x << y + if r != -4 { + t.Errorf("9223372036854775806 %s 1 = %d, want -4", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("9223372036854775806 %s 255 = %d, want 0", "<<", r) + } + x = 9223372036854775807 + y = 0 + r = x << y + if r != 9223372036854775807 { + t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("9223372036854775807 %s 1 = %d, want -2", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("9223372036854775807 %s 255 = %d, want 0", "<<", r) + } +} +func TestConstFoldint64uint8rsh(t *testing.T) { + var x, r int64 + var y uint8 + x = -9223372036854775808 + y = 0 + r = x >> y + if r != -9223372036854775808 { + t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", ">>", r) + } + y = 1 + r = x >> y + if r != -4611686018427387904 { + t.Errorf("-9223372036854775808 %s 1 = %d, want -4611686018427387904", ">>", r) + } + y = 255 + r = x >> y + if r != -1 { + t.Errorf("-9223372036854775808 %s 255 = %d, want -1", ">>", r) + } + x = -9223372036854775807 + y = 0 + r = x >> y + if r != -9223372036854775807 { + t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", ">>", r) + } + y = 1 + r = x >> y + if r != -4611686018427387904 { + t.Errorf("-9223372036854775807 %s 1 = %d, want -4611686018427387904", ">>", r) + } + y = 255 + r = x >> y + if r != -1 { + t.Errorf("-9223372036854775807 %s 255 = %d, want -1", ">>", r) + } + x = -4294967296 + y = 0 + r = x >> y + if r != -4294967296 { + t.Errorf("-4294967296 %s 0 = %d, want -4294967296", ">>", r) + } + y = 1 + r = x >> y + if r != -2147483648 { + t.Errorf("-4294967296 %s 1 = %d, want -2147483648", ">>", r) + } + y = 255 + r = x >> y + if r != -1 { + t.Errorf("-4294967296 %s 255 = %d, want -1", ">>", r) + } + x = -1 + y = 0 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", ">>", r) + } + y = 1 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 1 = %d, want -1", ">>", r) + } + y = 255 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 255 = %d, want -1", ">>", r) + } + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("0 %s 255 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("1 %s 255 = %d, want 0", ">>", r) + } + x = 4294967296 + y = 0 + r = x >> y + if r != 4294967296 { + t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r) + } + y = 1 + r = x >> y + if r != 2147483648 { + t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("4294967296 %s 255 = %d, want 0", ">>", r) + } + x = 9223372036854775806 + y = 0 + r = x >> y + if r != 9223372036854775806 { + t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", ">>", r) + } + y = 1 + r = x >> y + if r != 4611686018427387903 { + t.Errorf("9223372036854775806 %s 1 = %d, want 4611686018427387903", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("9223372036854775806 %s 255 = %d, want 0", ">>", r) + } + x = 9223372036854775807 + y = 0 + r = x >> y + if r != 9223372036854775807 { + t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", ">>", r) + } + y = 1 + r = x >> y + if r != 4611686018427387903 { + t.Errorf("9223372036854775807 %s 1 = %d, want 4611686018427387903", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("9223372036854775807 %s 255 = %d, want 0", ">>", r) + } +} +func TestConstFolduint32uint64lsh(t *testing.T) { + var x, r uint32 + var y uint64 + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = 4294967295 + y = 0 + r = x << y + if r != 4294967295 { + t.Errorf("4294967295 %s 0 = %d, want 4294967295", "<<", r) + } + y = 1 + r = x << y + if r != 4294967294 { + t.Errorf("4294967295 %s 1 = %d, want 4294967294", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("4294967295 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("4294967295 %s 18446744073709551615 = %d, want 0", "<<", r) + } +} +func TestConstFolduint32uint64rsh(t *testing.T) { + var x, r uint32 + var y uint64 + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r) + } + x = 4294967295 + y = 0 + r = x >> y + if r != 4294967295 { + t.Errorf("4294967295 %s 0 = %d, want 4294967295", ">>", r) + } + y = 1 + r = x >> y + if r != 2147483647 { + t.Errorf("4294967295 %s 1 = %d, want 2147483647", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("4294967295 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("4294967295 %s 18446744073709551615 = %d, want 0", ">>", r) + } +} +func TestConstFolduint32uint32lsh(t *testing.T) { + var x, r uint32 + var y uint32 + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r) + } + x = 4294967295 + y = 0 + r = x << y + if r != 4294967295 { + t.Errorf("4294967295 %s 0 = %d, want 4294967295", "<<", r) + } + y = 1 + r = x << y + if r != 4294967294 { + t.Errorf("4294967295 %s 1 = %d, want 4294967294", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("4294967295 %s 4294967295 = %d, want 0", "<<", r) + } +} +func TestConstFolduint32uint32rsh(t *testing.T) { + var x, r uint32 + var y uint32 + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r) + } + x = 4294967295 + y = 0 + r = x >> y + if r != 4294967295 { + t.Errorf("4294967295 %s 0 = %d, want 4294967295", ">>", r) + } + y = 1 + r = x >> y + if r != 2147483647 { + t.Errorf("4294967295 %s 1 = %d, want 2147483647", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("4294967295 %s 4294967295 = %d, want 0", ">>", r) + } +} +func TestConstFolduint32uint16lsh(t *testing.T) { + var x, r uint32 + var y uint16 + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("0 %s 65535 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("1 %s 65535 = %d, want 0", "<<", r) + } + x = 4294967295 + y = 0 + r = x << y + if r != 4294967295 { + t.Errorf("4294967295 %s 0 = %d, want 4294967295", "<<", r) + } + y = 1 + r = x << y + if r != 4294967294 { + t.Errorf("4294967295 %s 1 = %d, want 4294967294", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("4294967295 %s 65535 = %d, want 0", "<<", r) + } +} +func TestConstFolduint32uint16rsh(t *testing.T) { + var x, r uint32 + var y uint16 + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("0 %s 65535 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("1 %s 65535 = %d, want 0", ">>", r) + } + x = 4294967295 + y = 0 + r = x >> y + if r != 4294967295 { + t.Errorf("4294967295 %s 0 = %d, want 4294967295", ">>", r) + } + y = 1 + r = x >> y + if r != 2147483647 { + t.Errorf("4294967295 %s 1 = %d, want 2147483647", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("4294967295 %s 65535 = %d, want 0", ">>", r) + } +} +func TestConstFolduint32uint8lsh(t *testing.T) { + var x, r uint32 + var y uint8 + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("0 %s 255 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("1 %s 255 = %d, want 0", "<<", r) + } + x = 4294967295 + y = 0 + r = x << y + if r != 4294967295 { + t.Errorf("4294967295 %s 0 = %d, want 4294967295", "<<", r) + } + y = 1 + r = x << y + if r != 4294967294 { + t.Errorf("4294967295 %s 1 = %d, want 4294967294", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("4294967295 %s 255 = %d, want 0", "<<", r) + } +} +func TestConstFolduint32uint8rsh(t *testing.T) { + var x, r uint32 + var y uint8 + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("0 %s 255 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("1 %s 255 = %d, want 0", ">>", r) + } + x = 4294967295 + y = 0 + r = x >> y + if r != 4294967295 { + t.Errorf("4294967295 %s 0 = %d, want 4294967295", ">>", r) + } + y = 1 + r = x >> y + if r != 2147483647 { + t.Errorf("4294967295 %s 1 = %d, want 2147483647", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("4294967295 %s 255 = %d, want 0", ">>", r) + } +} +func TestConstFoldint32uint64lsh(t *testing.T) { + var x, r int32 + var y uint64 + x = -2147483648 + y = 0 + r = x << y + if r != -2147483648 { + t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("-2147483648 %s 1 = %d, want 0", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("-2147483648 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("-2147483648 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = -2147483647 + y = 0 + r = x << y + if r != -2147483647 { + t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("-2147483647 %s 1 = %d, want 2", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("-2147483647 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("-2147483647 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = -1 + y = 0 + r = x << y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("-1 %s 1 = %d, want -2", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("-1 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("-1 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = 2147483647 + y = 0 + r = x << y + if r != 2147483647 { + t.Errorf("2147483647 %s 0 = %d, want 2147483647", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("2147483647 %s 1 = %d, want -2", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("2147483647 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("2147483647 %s 18446744073709551615 = %d, want 0", "<<", r) + } +} +func TestConstFoldint32uint64rsh(t *testing.T) { + var x, r int32 + var y uint64 + x = -2147483648 + y = 0 + r = x >> y + if r != -2147483648 { + t.Errorf("-2147483648 %s 0 = %d, want -2147483648", ">>", r) + } + y = 1 + r = x >> y + if r != -1073741824 { + t.Errorf("-2147483648 %s 1 = %d, want -1073741824", ">>", r) + } + y = 4294967296 + r = x >> y + if r != -1 { + t.Errorf("-2147483648 %s 4294967296 = %d, want -1", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != -1 { + t.Errorf("-2147483648 %s 18446744073709551615 = %d, want -1", ">>", r) + } + x = -2147483647 + y = 0 + r = x >> y + if r != -2147483647 { + t.Errorf("-2147483647 %s 0 = %d, want -2147483647", ">>", r) + } + y = 1 + r = x >> y + if r != -1073741824 { + t.Errorf("-2147483647 %s 1 = %d, want -1073741824", ">>", r) + } + y = 4294967296 + r = x >> y + if r != -1 { + t.Errorf("-2147483647 %s 4294967296 = %d, want -1", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != -1 { + t.Errorf("-2147483647 %s 18446744073709551615 = %d, want -1", ">>", r) + } + x = -1 + y = 0 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", ">>", r) + } + y = 1 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 1 = %d, want -1", ">>", r) + } + y = 4294967296 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 4294967296 = %d, want -1", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 18446744073709551615 = %d, want -1", ">>", r) + } + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r) + } + x = 2147483647 + y = 0 + r = x >> y + if r != 2147483647 { + t.Errorf("2147483647 %s 0 = %d, want 2147483647", ">>", r) + } + y = 1 + r = x >> y + if r != 1073741823 { + t.Errorf("2147483647 %s 1 = %d, want 1073741823", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("2147483647 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("2147483647 %s 18446744073709551615 = %d, want 0", ">>", r) + } +} +func TestConstFoldint32uint32lsh(t *testing.T) { + var x, r int32 + var y uint32 + x = -2147483648 + y = 0 + r = x << y + if r != -2147483648 { + t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("-2147483648 %s 1 = %d, want 0", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("-2147483648 %s 4294967295 = %d, want 0", "<<", r) + } + x = -2147483647 + y = 0 + r = x << y + if r != -2147483647 { + t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("-2147483647 %s 1 = %d, want 2", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("-2147483647 %s 4294967295 = %d, want 0", "<<", r) + } + x = -1 + y = 0 + r = x << y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("-1 %s 1 = %d, want -2", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("-1 %s 4294967295 = %d, want 0", "<<", r) + } + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r) + } + x = 2147483647 + y = 0 + r = x << y + if r != 2147483647 { + t.Errorf("2147483647 %s 0 = %d, want 2147483647", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("2147483647 %s 1 = %d, want -2", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("2147483647 %s 4294967295 = %d, want 0", "<<", r) + } +} +func TestConstFoldint32uint32rsh(t *testing.T) { + var x, r int32 + var y uint32 + x = -2147483648 + y = 0 + r = x >> y + if r != -2147483648 { + t.Errorf("-2147483648 %s 0 = %d, want -2147483648", ">>", r) + } + y = 1 + r = x >> y + if r != -1073741824 { + t.Errorf("-2147483648 %s 1 = %d, want -1073741824", ">>", r) + } + y = 4294967295 + r = x >> y + if r != -1 { + t.Errorf("-2147483648 %s 4294967295 = %d, want -1", ">>", r) + } + x = -2147483647 + y = 0 + r = x >> y + if r != -2147483647 { + t.Errorf("-2147483647 %s 0 = %d, want -2147483647", ">>", r) + } + y = 1 + r = x >> y + if r != -1073741824 { + t.Errorf("-2147483647 %s 1 = %d, want -1073741824", ">>", r) + } + y = 4294967295 + r = x >> y + if r != -1 { + t.Errorf("-2147483647 %s 4294967295 = %d, want -1", ">>", r) + } + x = -1 + y = 0 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", ">>", r) + } + y = 1 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 1 = %d, want -1", ">>", r) + } + y = 4294967295 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 4294967295 = %d, want -1", ">>", r) + } + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r) + } + x = 2147483647 + y = 0 + r = x >> y + if r != 2147483647 { + t.Errorf("2147483647 %s 0 = %d, want 2147483647", ">>", r) + } + y = 1 + r = x >> y + if r != 1073741823 { + t.Errorf("2147483647 %s 1 = %d, want 1073741823", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("2147483647 %s 4294967295 = %d, want 0", ">>", r) + } +} +func TestConstFoldint32uint16lsh(t *testing.T) { + var x, r int32 + var y uint16 + x = -2147483648 + y = 0 + r = x << y + if r != -2147483648 { + t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("-2147483648 %s 1 = %d, want 0", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("-2147483648 %s 65535 = %d, want 0", "<<", r) + } + x = -2147483647 + y = 0 + r = x << y + if r != -2147483647 { + t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("-2147483647 %s 1 = %d, want 2", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("-2147483647 %s 65535 = %d, want 0", "<<", r) + } + x = -1 + y = 0 + r = x << y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("-1 %s 1 = %d, want -2", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("-1 %s 65535 = %d, want 0", "<<", r) + } + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("0 %s 65535 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("1 %s 65535 = %d, want 0", "<<", r) + } + x = 2147483647 + y = 0 + r = x << y + if r != 2147483647 { + t.Errorf("2147483647 %s 0 = %d, want 2147483647", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("2147483647 %s 1 = %d, want -2", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("2147483647 %s 65535 = %d, want 0", "<<", r) + } +} +func TestConstFoldint32uint16rsh(t *testing.T) { + var x, r int32 + var y uint16 + x = -2147483648 + y = 0 + r = x >> y + if r != -2147483648 { + t.Errorf("-2147483648 %s 0 = %d, want -2147483648", ">>", r) + } + y = 1 + r = x >> y + if r != -1073741824 { + t.Errorf("-2147483648 %s 1 = %d, want -1073741824", ">>", r) + } + y = 65535 + r = x >> y + if r != -1 { + t.Errorf("-2147483648 %s 65535 = %d, want -1", ">>", r) + } + x = -2147483647 + y = 0 + r = x >> y + if r != -2147483647 { + t.Errorf("-2147483647 %s 0 = %d, want -2147483647", ">>", r) + } + y = 1 + r = x >> y + if r != -1073741824 { + t.Errorf("-2147483647 %s 1 = %d, want -1073741824", ">>", r) + } + y = 65535 + r = x >> y + if r != -1 { + t.Errorf("-2147483647 %s 65535 = %d, want -1", ">>", r) + } + x = -1 + y = 0 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", ">>", r) + } + y = 1 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 1 = %d, want -1", ">>", r) + } + y = 65535 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 65535 = %d, want -1", ">>", r) + } + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("0 %s 65535 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("1 %s 65535 = %d, want 0", ">>", r) + } + x = 2147483647 + y = 0 + r = x >> y + if r != 2147483647 { + t.Errorf("2147483647 %s 0 = %d, want 2147483647", ">>", r) + } + y = 1 + r = x >> y + if r != 1073741823 { + t.Errorf("2147483647 %s 1 = %d, want 1073741823", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("2147483647 %s 65535 = %d, want 0", ">>", r) + } +} +func TestConstFoldint32uint8lsh(t *testing.T) { + var x, r int32 + var y uint8 + x = -2147483648 + y = 0 + r = x << y + if r != -2147483648 { + t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("-2147483648 %s 1 = %d, want 0", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("-2147483648 %s 255 = %d, want 0", "<<", r) + } + x = -2147483647 + y = 0 + r = x << y + if r != -2147483647 { + t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("-2147483647 %s 1 = %d, want 2", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("-2147483647 %s 255 = %d, want 0", "<<", r) + } + x = -1 + y = 0 + r = x << y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("-1 %s 1 = %d, want -2", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("-1 %s 255 = %d, want 0", "<<", r) + } + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("0 %s 255 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("1 %s 255 = %d, want 0", "<<", r) + } + x = 2147483647 + y = 0 + r = x << y + if r != 2147483647 { + t.Errorf("2147483647 %s 0 = %d, want 2147483647", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("2147483647 %s 1 = %d, want -2", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("2147483647 %s 255 = %d, want 0", "<<", r) + } +} +func TestConstFoldint32uint8rsh(t *testing.T) { + var x, r int32 + var y uint8 + x = -2147483648 + y = 0 + r = x >> y + if r != -2147483648 { + t.Errorf("-2147483648 %s 0 = %d, want -2147483648", ">>", r) + } + y = 1 + r = x >> y + if r != -1073741824 { + t.Errorf("-2147483648 %s 1 = %d, want -1073741824", ">>", r) + } + y = 255 + r = x >> y + if r != -1 { + t.Errorf("-2147483648 %s 255 = %d, want -1", ">>", r) + } + x = -2147483647 + y = 0 + r = x >> y + if r != -2147483647 { + t.Errorf("-2147483647 %s 0 = %d, want -2147483647", ">>", r) + } + y = 1 + r = x >> y + if r != -1073741824 { + t.Errorf("-2147483647 %s 1 = %d, want -1073741824", ">>", r) + } + y = 255 + r = x >> y + if r != -1 { + t.Errorf("-2147483647 %s 255 = %d, want -1", ">>", r) + } + x = -1 + y = 0 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", ">>", r) + } + y = 1 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 1 = %d, want -1", ">>", r) + } + y = 255 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 255 = %d, want -1", ">>", r) + } + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("0 %s 255 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("1 %s 255 = %d, want 0", ">>", r) + } + x = 2147483647 + y = 0 + r = x >> y + if r != 2147483647 { + t.Errorf("2147483647 %s 0 = %d, want 2147483647", ">>", r) + } + y = 1 + r = x >> y + if r != 1073741823 { + t.Errorf("2147483647 %s 1 = %d, want 1073741823", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("2147483647 %s 255 = %d, want 0", ">>", r) + } +} +func TestConstFolduint16uint64lsh(t *testing.T) { + var x, r uint16 + var y uint64 + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = 65535 + y = 0 + r = x << y + if r != 65535 { + t.Errorf("65535 %s 0 = %d, want 65535", "<<", r) + } + y = 1 + r = x << y + if r != 65534 { + t.Errorf("65535 %s 1 = %d, want 65534", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("65535 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("65535 %s 18446744073709551615 = %d, want 0", "<<", r) + } +} +func TestConstFolduint16uint64rsh(t *testing.T) { + var x, r uint16 + var y uint64 + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r) + } + x = 65535 + y = 0 + r = x >> y + if r != 65535 { + t.Errorf("65535 %s 0 = %d, want 65535", ">>", r) + } + y = 1 + r = x >> y + if r != 32767 { + t.Errorf("65535 %s 1 = %d, want 32767", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("65535 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("65535 %s 18446744073709551615 = %d, want 0", ">>", r) + } +} +func TestConstFolduint16uint32lsh(t *testing.T) { + var x, r uint16 + var y uint32 + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r) + } + x = 65535 + y = 0 + r = x << y + if r != 65535 { + t.Errorf("65535 %s 0 = %d, want 65535", "<<", r) + } + y = 1 + r = x << y + if r != 65534 { + t.Errorf("65535 %s 1 = %d, want 65534", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("65535 %s 4294967295 = %d, want 0", "<<", r) + } +} +func TestConstFolduint16uint32rsh(t *testing.T) { + var x, r uint16 + var y uint32 + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r) + } + x = 65535 + y = 0 + r = x >> y + if r != 65535 { + t.Errorf("65535 %s 0 = %d, want 65535", ">>", r) + } + y = 1 + r = x >> y + if r != 32767 { + t.Errorf("65535 %s 1 = %d, want 32767", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("65535 %s 4294967295 = %d, want 0", ">>", r) + } +} +func TestConstFolduint16uint16lsh(t *testing.T) { + var x, r uint16 + var y uint16 + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("0 %s 65535 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("1 %s 65535 = %d, want 0", "<<", r) + } + x = 65535 + y = 0 + r = x << y + if r != 65535 { + t.Errorf("65535 %s 0 = %d, want 65535", "<<", r) + } + y = 1 + r = x << y + if r != 65534 { + t.Errorf("65535 %s 1 = %d, want 65534", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("65535 %s 65535 = %d, want 0", "<<", r) + } +} +func TestConstFolduint16uint16rsh(t *testing.T) { + var x, r uint16 + var y uint16 + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("0 %s 65535 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("1 %s 65535 = %d, want 0", ">>", r) + } + x = 65535 + y = 0 + r = x >> y + if r != 65535 { + t.Errorf("65535 %s 0 = %d, want 65535", ">>", r) + } + y = 1 + r = x >> y + if r != 32767 { + t.Errorf("65535 %s 1 = %d, want 32767", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("65535 %s 65535 = %d, want 0", ">>", r) + } +} +func TestConstFolduint16uint8lsh(t *testing.T) { + var x, r uint16 + var y uint8 + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("0 %s 255 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("1 %s 255 = %d, want 0", "<<", r) + } + x = 65535 + y = 0 + r = x << y + if r != 65535 { + t.Errorf("65535 %s 0 = %d, want 65535", "<<", r) + } + y = 1 + r = x << y + if r != 65534 { + t.Errorf("65535 %s 1 = %d, want 65534", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("65535 %s 255 = %d, want 0", "<<", r) + } +} +func TestConstFolduint16uint8rsh(t *testing.T) { + var x, r uint16 + var y uint8 + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("0 %s 255 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("1 %s 255 = %d, want 0", ">>", r) + } + x = 65535 + y = 0 + r = x >> y + if r != 65535 { + t.Errorf("65535 %s 0 = %d, want 65535", ">>", r) + } + y = 1 + r = x >> y + if r != 32767 { + t.Errorf("65535 %s 1 = %d, want 32767", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("65535 %s 255 = %d, want 0", ">>", r) + } +} +func TestConstFoldint16uint64lsh(t *testing.T) { + var x, r int16 + var y uint64 + x = -32768 + y = 0 + r = x << y + if r != -32768 { + t.Errorf("-32768 %s 0 = %d, want -32768", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("-32768 %s 1 = %d, want 0", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("-32768 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("-32768 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = -32767 + y = 0 + r = x << y + if r != -32767 { + t.Errorf("-32767 %s 0 = %d, want -32767", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("-32767 %s 1 = %d, want 2", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("-32767 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("-32767 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = -1 + y = 0 + r = x << y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("-1 %s 1 = %d, want -2", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("-1 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("-1 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = 32766 + y = 0 + r = x << y + if r != 32766 { + t.Errorf("32766 %s 0 = %d, want 32766", "<<", r) + } + y = 1 + r = x << y + if r != -4 { + t.Errorf("32766 %s 1 = %d, want -4", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("32766 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("32766 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = 32767 + y = 0 + r = x << y + if r != 32767 { + t.Errorf("32767 %s 0 = %d, want 32767", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("32767 %s 1 = %d, want -2", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("32767 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("32767 %s 18446744073709551615 = %d, want 0", "<<", r) + } +} +func TestConstFoldint16uint64rsh(t *testing.T) { + var x, r int16 + var y uint64 + x = -32768 + y = 0 + r = x >> y + if r != -32768 { + t.Errorf("-32768 %s 0 = %d, want -32768", ">>", r) + } + y = 1 + r = x >> y + if r != -16384 { + t.Errorf("-32768 %s 1 = %d, want -16384", ">>", r) + } + y = 4294967296 + r = x >> y + if r != -1 { + t.Errorf("-32768 %s 4294967296 = %d, want -1", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != -1 { + t.Errorf("-32768 %s 18446744073709551615 = %d, want -1", ">>", r) + } + x = -32767 + y = 0 + r = x >> y + if r != -32767 { + t.Errorf("-32767 %s 0 = %d, want -32767", ">>", r) + } + y = 1 + r = x >> y + if r != -16384 { + t.Errorf("-32767 %s 1 = %d, want -16384", ">>", r) + } + y = 4294967296 + r = x >> y + if r != -1 { + t.Errorf("-32767 %s 4294967296 = %d, want -1", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != -1 { + t.Errorf("-32767 %s 18446744073709551615 = %d, want -1", ">>", r) + } + x = -1 + y = 0 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", ">>", r) + } + y = 1 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 1 = %d, want -1", ">>", r) + } + y = 4294967296 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 4294967296 = %d, want -1", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 18446744073709551615 = %d, want -1", ">>", r) + } + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r) + } + x = 32766 + y = 0 + r = x >> y + if r != 32766 { + t.Errorf("32766 %s 0 = %d, want 32766", ">>", r) + } + y = 1 + r = x >> y + if r != 16383 { + t.Errorf("32766 %s 1 = %d, want 16383", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("32766 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("32766 %s 18446744073709551615 = %d, want 0", ">>", r) + } + x = 32767 + y = 0 + r = x >> y + if r != 32767 { + t.Errorf("32767 %s 0 = %d, want 32767", ">>", r) + } + y = 1 + r = x >> y + if r != 16383 { + t.Errorf("32767 %s 1 = %d, want 16383", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("32767 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("32767 %s 18446744073709551615 = %d, want 0", ">>", r) + } +} +func TestConstFoldint16uint32lsh(t *testing.T) { + var x, r int16 + var y uint32 + x = -32768 + y = 0 + r = x << y + if r != -32768 { + t.Errorf("-32768 %s 0 = %d, want -32768", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("-32768 %s 1 = %d, want 0", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("-32768 %s 4294967295 = %d, want 0", "<<", r) + } + x = -32767 + y = 0 + r = x << y + if r != -32767 { + t.Errorf("-32767 %s 0 = %d, want -32767", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("-32767 %s 1 = %d, want 2", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("-32767 %s 4294967295 = %d, want 0", "<<", r) + } + x = -1 + y = 0 + r = x << y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("-1 %s 1 = %d, want -2", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("-1 %s 4294967295 = %d, want 0", "<<", r) + } + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r) + } + x = 32766 + y = 0 + r = x << y + if r != 32766 { + t.Errorf("32766 %s 0 = %d, want 32766", "<<", r) + } + y = 1 + r = x << y + if r != -4 { + t.Errorf("32766 %s 1 = %d, want -4", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("32766 %s 4294967295 = %d, want 0", "<<", r) + } + x = 32767 + y = 0 + r = x << y + if r != 32767 { + t.Errorf("32767 %s 0 = %d, want 32767", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("32767 %s 1 = %d, want -2", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("32767 %s 4294967295 = %d, want 0", "<<", r) + } +} +func TestConstFoldint16uint32rsh(t *testing.T) { + var x, r int16 + var y uint32 + x = -32768 + y = 0 + r = x >> y + if r != -32768 { + t.Errorf("-32768 %s 0 = %d, want -32768", ">>", r) + } + y = 1 + r = x >> y + if r != -16384 { + t.Errorf("-32768 %s 1 = %d, want -16384", ">>", r) + } + y = 4294967295 + r = x >> y + if r != -1 { + t.Errorf("-32768 %s 4294967295 = %d, want -1", ">>", r) + } + x = -32767 + y = 0 + r = x >> y + if r != -32767 { + t.Errorf("-32767 %s 0 = %d, want -32767", ">>", r) + } + y = 1 + r = x >> y + if r != -16384 { + t.Errorf("-32767 %s 1 = %d, want -16384", ">>", r) + } + y = 4294967295 + r = x >> y + if r != -1 { + t.Errorf("-32767 %s 4294967295 = %d, want -1", ">>", r) + } + x = -1 + y = 0 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", ">>", r) + } + y = 1 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 1 = %d, want -1", ">>", r) + } + y = 4294967295 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 4294967295 = %d, want -1", ">>", r) + } + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r) + } + x = 32766 + y = 0 + r = x >> y + if r != 32766 { + t.Errorf("32766 %s 0 = %d, want 32766", ">>", r) + } + y = 1 + r = x >> y + if r != 16383 { + t.Errorf("32766 %s 1 = %d, want 16383", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("32766 %s 4294967295 = %d, want 0", ">>", r) + } + x = 32767 + y = 0 + r = x >> y + if r != 32767 { + t.Errorf("32767 %s 0 = %d, want 32767", ">>", r) + } + y = 1 + r = x >> y + if r != 16383 { + t.Errorf("32767 %s 1 = %d, want 16383", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("32767 %s 4294967295 = %d, want 0", ">>", r) + } +} +func TestConstFoldint16uint16lsh(t *testing.T) { + var x, r int16 + var y uint16 + x = -32768 + y = 0 + r = x << y + if r != -32768 { + t.Errorf("-32768 %s 0 = %d, want -32768", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("-32768 %s 1 = %d, want 0", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("-32768 %s 65535 = %d, want 0", "<<", r) + } + x = -32767 + y = 0 + r = x << y + if r != -32767 { + t.Errorf("-32767 %s 0 = %d, want -32767", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("-32767 %s 1 = %d, want 2", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("-32767 %s 65535 = %d, want 0", "<<", r) + } + x = -1 + y = 0 + r = x << y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("-1 %s 1 = %d, want -2", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("-1 %s 65535 = %d, want 0", "<<", r) + } + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("0 %s 65535 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("1 %s 65535 = %d, want 0", "<<", r) + } + x = 32766 + y = 0 + r = x << y + if r != 32766 { + t.Errorf("32766 %s 0 = %d, want 32766", "<<", r) + } + y = 1 + r = x << y + if r != -4 { + t.Errorf("32766 %s 1 = %d, want -4", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("32766 %s 65535 = %d, want 0", "<<", r) + } + x = 32767 + y = 0 + r = x << y + if r != 32767 { + t.Errorf("32767 %s 0 = %d, want 32767", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("32767 %s 1 = %d, want -2", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("32767 %s 65535 = %d, want 0", "<<", r) + } +} +func TestConstFoldint16uint16rsh(t *testing.T) { + var x, r int16 + var y uint16 + x = -32768 + y = 0 + r = x >> y + if r != -32768 { + t.Errorf("-32768 %s 0 = %d, want -32768", ">>", r) + } + y = 1 + r = x >> y + if r != -16384 { + t.Errorf("-32768 %s 1 = %d, want -16384", ">>", r) + } + y = 65535 + r = x >> y + if r != -1 { + t.Errorf("-32768 %s 65535 = %d, want -1", ">>", r) + } + x = -32767 + y = 0 + r = x >> y + if r != -32767 { + t.Errorf("-32767 %s 0 = %d, want -32767", ">>", r) + } + y = 1 + r = x >> y + if r != -16384 { + t.Errorf("-32767 %s 1 = %d, want -16384", ">>", r) + } + y = 65535 + r = x >> y + if r != -1 { + t.Errorf("-32767 %s 65535 = %d, want -1", ">>", r) + } + x = -1 + y = 0 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", ">>", r) + } + y = 1 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 1 = %d, want -1", ">>", r) + } + y = 65535 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 65535 = %d, want -1", ">>", r) + } + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("0 %s 65535 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("1 %s 65535 = %d, want 0", ">>", r) + } + x = 32766 + y = 0 + r = x >> y + if r != 32766 { + t.Errorf("32766 %s 0 = %d, want 32766", ">>", r) + } + y = 1 + r = x >> y + if r != 16383 { + t.Errorf("32766 %s 1 = %d, want 16383", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("32766 %s 65535 = %d, want 0", ">>", r) + } + x = 32767 + y = 0 + r = x >> y + if r != 32767 { + t.Errorf("32767 %s 0 = %d, want 32767", ">>", r) + } + y = 1 + r = x >> y + if r != 16383 { + t.Errorf("32767 %s 1 = %d, want 16383", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("32767 %s 65535 = %d, want 0", ">>", r) + } +} +func TestConstFoldint16uint8lsh(t *testing.T) { + var x, r int16 + var y uint8 + x = -32768 + y = 0 + r = x << y + if r != -32768 { + t.Errorf("-32768 %s 0 = %d, want -32768", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("-32768 %s 1 = %d, want 0", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("-32768 %s 255 = %d, want 0", "<<", r) + } + x = -32767 + y = 0 + r = x << y + if r != -32767 { + t.Errorf("-32767 %s 0 = %d, want -32767", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("-32767 %s 1 = %d, want 2", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("-32767 %s 255 = %d, want 0", "<<", r) + } + x = -1 + y = 0 + r = x << y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("-1 %s 1 = %d, want -2", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("-1 %s 255 = %d, want 0", "<<", r) + } + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("0 %s 255 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("1 %s 255 = %d, want 0", "<<", r) + } + x = 32766 + y = 0 + r = x << y + if r != 32766 { + t.Errorf("32766 %s 0 = %d, want 32766", "<<", r) + } + y = 1 + r = x << y + if r != -4 { + t.Errorf("32766 %s 1 = %d, want -4", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("32766 %s 255 = %d, want 0", "<<", r) + } + x = 32767 + y = 0 + r = x << y + if r != 32767 { + t.Errorf("32767 %s 0 = %d, want 32767", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("32767 %s 1 = %d, want -2", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("32767 %s 255 = %d, want 0", "<<", r) + } +} +func TestConstFoldint16uint8rsh(t *testing.T) { + var x, r int16 + var y uint8 + x = -32768 + y = 0 + r = x >> y + if r != -32768 { + t.Errorf("-32768 %s 0 = %d, want -32768", ">>", r) + } + y = 1 + r = x >> y + if r != -16384 { + t.Errorf("-32768 %s 1 = %d, want -16384", ">>", r) + } + y = 255 + r = x >> y + if r != -1 { + t.Errorf("-32768 %s 255 = %d, want -1", ">>", r) + } + x = -32767 + y = 0 + r = x >> y + if r != -32767 { + t.Errorf("-32767 %s 0 = %d, want -32767", ">>", r) + } + y = 1 + r = x >> y + if r != -16384 { + t.Errorf("-32767 %s 1 = %d, want -16384", ">>", r) + } + y = 255 + r = x >> y + if r != -1 { + t.Errorf("-32767 %s 255 = %d, want -1", ">>", r) + } + x = -1 + y = 0 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", ">>", r) + } + y = 1 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 1 = %d, want -1", ">>", r) + } + y = 255 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 255 = %d, want -1", ">>", r) + } + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("0 %s 255 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("1 %s 255 = %d, want 0", ">>", r) + } + x = 32766 + y = 0 + r = x >> y + if r != 32766 { + t.Errorf("32766 %s 0 = %d, want 32766", ">>", r) + } + y = 1 + r = x >> y + if r != 16383 { + t.Errorf("32766 %s 1 = %d, want 16383", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("32766 %s 255 = %d, want 0", ">>", r) + } + x = 32767 + y = 0 + r = x >> y + if r != 32767 { + t.Errorf("32767 %s 0 = %d, want 32767", ">>", r) + } + y = 1 + r = x >> y + if r != 16383 { + t.Errorf("32767 %s 1 = %d, want 16383", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("32767 %s 255 = %d, want 0", ">>", r) + } +} +func TestConstFolduint8uint64lsh(t *testing.T) { + var x, r uint8 + var y uint64 + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = 255 + y = 0 + r = x << y + if r != 255 { + t.Errorf("255 %s 0 = %d, want 255", "<<", r) + } + y = 1 + r = x << y + if r != 254 { + t.Errorf("255 %s 1 = %d, want 254", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("255 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("255 %s 18446744073709551615 = %d, want 0", "<<", r) + } +} +func TestConstFolduint8uint64rsh(t *testing.T) { + var x, r uint8 + var y uint64 + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r) + } + x = 255 + y = 0 + r = x >> y + if r != 255 { + t.Errorf("255 %s 0 = %d, want 255", ">>", r) + } + y = 1 + r = x >> y + if r != 127 { + t.Errorf("255 %s 1 = %d, want 127", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("255 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("255 %s 18446744073709551615 = %d, want 0", ">>", r) + } +} +func TestConstFolduint8uint32lsh(t *testing.T) { + var x, r uint8 + var y uint32 + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r) + } + x = 255 + y = 0 + r = x << y + if r != 255 { + t.Errorf("255 %s 0 = %d, want 255", "<<", r) + } + y = 1 + r = x << y + if r != 254 { + t.Errorf("255 %s 1 = %d, want 254", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("255 %s 4294967295 = %d, want 0", "<<", r) + } +} +func TestConstFolduint8uint32rsh(t *testing.T) { + var x, r uint8 + var y uint32 + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r) + } + x = 255 + y = 0 + r = x >> y + if r != 255 { + t.Errorf("255 %s 0 = %d, want 255", ">>", r) + } + y = 1 + r = x >> y + if r != 127 { + t.Errorf("255 %s 1 = %d, want 127", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("255 %s 4294967295 = %d, want 0", ">>", r) + } +} +func TestConstFolduint8uint16lsh(t *testing.T) { + var x, r uint8 + var y uint16 + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("0 %s 65535 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("1 %s 65535 = %d, want 0", "<<", r) + } + x = 255 + y = 0 + r = x << y + if r != 255 { + t.Errorf("255 %s 0 = %d, want 255", "<<", r) + } + y = 1 + r = x << y + if r != 254 { + t.Errorf("255 %s 1 = %d, want 254", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("255 %s 65535 = %d, want 0", "<<", r) + } +} +func TestConstFolduint8uint16rsh(t *testing.T) { + var x, r uint8 + var y uint16 + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("0 %s 65535 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("1 %s 65535 = %d, want 0", ">>", r) + } + x = 255 + y = 0 + r = x >> y + if r != 255 { + t.Errorf("255 %s 0 = %d, want 255", ">>", r) + } + y = 1 + r = x >> y + if r != 127 { + t.Errorf("255 %s 1 = %d, want 127", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("255 %s 65535 = %d, want 0", ">>", r) + } +} +func TestConstFolduint8uint8lsh(t *testing.T) { + var x, r uint8 + var y uint8 + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("0 %s 255 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("1 %s 255 = %d, want 0", "<<", r) + } + x = 255 + y = 0 + r = x << y + if r != 255 { + t.Errorf("255 %s 0 = %d, want 255", "<<", r) + } + y = 1 + r = x << y + if r != 254 { + t.Errorf("255 %s 1 = %d, want 254", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("255 %s 255 = %d, want 0", "<<", r) + } +} +func TestConstFolduint8uint8rsh(t *testing.T) { + var x, r uint8 + var y uint8 + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("0 %s 255 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("1 %s 255 = %d, want 0", ">>", r) + } + x = 255 + y = 0 + r = x >> y + if r != 255 { + t.Errorf("255 %s 0 = %d, want 255", ">>", r) + } + y = 1 + r = x >> y + if r != 127 { + t.Errorf("255 %s 1 = %d, want 127", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("255 %s 255 = %d, want 0", ">>", r) + } +} +func TestConstFoldint8uint64lsh(t *testing.T) { + var x, r int8 + var y uint64 + x = -128 + y = 0 + r = x << y + if r != -128 { + t.Errorf("-128 %s 0 = %d, want -128", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("-128 %s 1 = %d, want 0", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("-128 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("-128 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = -127 + y = 0 + r = x << y + if r != -127 { + t.Errorf("-127 %s 0 = %d, want -127", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("-127 %s 1 = %d, want 2", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("-127 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("-127 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = -1 + y = 0 + r = x << y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("-1 %s 1 = %d, want -2", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("-1 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("-1 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = 126 + y = 0 + r = x << y + if r != 126 { + t.Errorf("126 %s 0 = %d, want 126", "<<", r) + } + y = 1 + r = x << y + if r != -4 { + t.Errorf("126 %s 1 = %d, want -4", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("126 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("126 %s 18446744073709551615 = %d, want 0", "<<", r) + } + x = 127 + y = 0 + r = x << y + if r != 127 { + t.Errorf("127 %s 0 = %d, want 127", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("127 %s 1 = %d, want -2", "<<", r) + } + y = 4294967296 + r = x << y + if r != 0 { + t.Errorf("127 %s 4294967296 = %d, want 0", "<<", r) + } + y = 18446744073709551615 + r = x << y + if r != 0 { + t.Errorf("127 %s 18446744073709551615 = %d, want 0", "<<", r) + } +} +func TestConstFoldint8uint64rsh(t *testing.T) { + var x, r int8 + var y uint64 + x = -128 + y = 0 + r = x >> y + if r != -128 { + t.Errorf("-128 %s 0 = %d, want -128", ">>", r) + } + y = 1 + r = x >> y + if r != -64 { + t.Errorf("-128 %s 1 = %d, want -64", ">>", r) + } + y = 4294967296 + r = x >> y + if r != -1 { + t.Errorf("-128 %s 4294967296 = %d, want -1", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != -1 { + t.Errorf("-128 %s 18446744073709551615 = %d, want -1", ">>", r) + } + x = -127 + y = 0 + r = x >> y + if r != -127 { + t.Errorf("-127 %s 0 = %d, want -127", ">>", r) + } + y = 1 + r = x >> y + if r != -64 { + t.Errorf("-127 %s 1 = %d, want -64", ">>", r) + } + y = 4294967296 + r = x >> y + if r != -1 { + t.Errorf("-127 %s 4294967296 = %d, want -1", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != -1 { + t.Errorf("-127 %s 18446744073709551615 = %d, want -1", ">>", r) + } + x = -1 + y = 0 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", ">>", r) + } + y = 1 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 1 = %d, want -1", ">>", r) + } + y = 4294967296 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 4294967296 = %d, want -1", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 18446744073709551615 = %d, want -1", ">>", r) + } + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r) + } + x = 126 + y = 0 + r = x >> y + if r != 126 { + t.Errorf("126 %s 0 = %d, want 126", ">>", r) + } + y = 1 + r = x >> y + if r != 63 { + t.Errorf("126 %s 1 = %d, want 63", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("126 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("126 %s 18446744073709551615 = %d, want 0", ">>", r) + } + x = 127 + y = 0 + r = x >> y + if r != 127 { + t.Errorf("127 %s 0 = %d, want 127", ">>", r) + } + y = 1 + r = x >> y + if r != 63 { + t.Errorf("127 %s 1 = %d, want 63", ">>", r) + } + y = 4294967296 + r = x >> y + if r != 0 { + t.Errorf("127 %s 4294967296 = %d, want 0", ">>", r) + } + y = 18446744073709551615 + r = x >> y + if r != 0 { + t.Errorf("127 %s 18446744073709551615 = %d, want 0", ">>", r) + } +} +func TestConstFoldint8uint32lsh(t *testing.T) { + var x, r int8 + var y uint32 + x = -128 + y = 0 + r = x << y + if r != -128 { + t.Errorf("-128 %s 0 = %d, want -128", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("-128 %s 1 = %d, want 0", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("-128 %s 4294967295 = %d, want 0", "<<", r) + } + x = -127 + y = 0 + r = x << y + if r != -127 { + t.Errorf("-127 %s 0 = %d, want -127", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("-127 %s 1 = %d, want 2", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("-127 %s 4294967295 = %d, want 0", "<<", r) + } + x = -1 + y = 0 + r = x << y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("-1 %s 1 = %d, want -2", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("-1 %s 4294967295 = %d, want 0", "<<", r) + } + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r) + } + x = 126 + y = 0 + r = x << y + if r != 126 { + t.Errorf("126 %s 0 = %d, want 126", "<<", r) + } + y = 1 + r = x << y + if r != -4 { + t.Errorf("126 %s 1 = %d, want -4", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("126 %s 4294967295 = %d, want 0", "<<", r) + } + x = 127 + y = 0 + r = x << y + if r != 127 { + t.Errorf("127 %s 0 = %d, want 127", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("127 %s 1 = %d, want -2", "<<", r) + } + y = 4294967295 + r = x << y + if r != 0 { + t.Errorf("127 %s 4294967295 = %d, want 0", "<<", r) + } +} +func TestConstFoldint8uint32rsh(t *testing.T) { + var x, r int8 + var y uint32 + x = -128 + y = 0 + r = x >> y + if r != -128 { + t.Errorf("-128 %s 0 = %d, want -128", ">>", r) + } + y = 1 + r = x >> y + if r != -64 { + t.Errorf("-128 %s 1 = %d, want -64", ">>", r) + } + y = 4294967295 + r = x >> y + if r != -1 { + t.Errorf("-128 %s 4294967295 = %d, want -1", ">>", r) + } + x = -127 + y = 0 + r = x >> y + if r != -127 { + t.Errorf("-127 %s 0 = %d, want -127", ">>", r) + } + y = 1 + r = x >> y + if r != -64 { + t.Errorf("-127 %s 1 = %d, want -64", ">>", r) + } + y = 4294967295 + r = x >> y + if r != -1 { + t.Errorf("-127 %s 4294967295 = %d, want -1", ">>", r) + } + x = -1 + y = 0 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", ">>", r) + } + y = 1 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 1 = %d, want -1", ">>", r) + } + y = 4294967295 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 4294967295 = %d, want -1", ">>", r) + } + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r) + } + x = 126 + y = 0 + r = x >> y + if r != 126 { + t.Errorf("126 %s 0 = %d, want 126", ">>", r) + } + y = 1 + r = x >> y + if r != 63 { + t.Errorf("126 %s 1 = %d, want 63", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("126 %s 4294967295 = %d, want 0", ">>", r) + } + x = 127 + y = 0 + r = x >> y + if r != 127 { + t.Errorf("127 %s 0 = %d, want 127", ">>", r) + } + y = 1 + r = x >> y + if r != 63 { + t.Errorf("127 %s 1 = %d, want 63", ">>", r) + } + y = 4294967295 + r = x >> y + if r != 0 { + t.Errorf("127 %s 4294967295 = %d, want 0", ">>", r) + } +} +func TestConstFoldint8uint16lsh(t *testing.T) { + var x, r int8 + var y uint16 + x = -128 + y = 0 + r = x << y + if r != -128 { + t.Errorf("-128 %s 0 = %d, want -128", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("-128 %s 1 = %d, want 0", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("-128 %s 65535 = %d, want 0", "<<", r) + } + x = -127 + y = 0 + r = x << y + if r != -127 { + t.Errorf("-127 %s 0 = %d, want -127", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("-127 %s 1 = %d, want 2", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("-127 %s 65535 = %d, want 0", "<<", r) + } + x = -1 + y = 0 + r = x << y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("-1 %s 1 = %d, want -2", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("-1 %s 65535 = %d, want 0", "<<", r) + } + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("0 %s 65535 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("1 %s 65535 = %d, want 0", "<<", r) + } + x = 126 + y = 0 + r = x << y + if r != 126 { + t.Errorf("126 %s 0 = %d, want 126", "<<", r) + } + y = 1 + r = x << y + if r != -4 { + t.Errorf("126 %s 1 = %d, want -4", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("126 %s 65535 = %d, want 0", "<<", r) + } + x = 127 + y = 0 + r = x << y + if r != 127 { + t.Errorf("127 %s 0 = %d, want 127", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("127 %s 1 = %d, want -2", "<<", r) + } + y = 65535 + r = x << y + if r != 0 { + t.Errorf("127 %s 65535 = %d, want 0", "<<", r) + } +} +func TestConstFoldint8uint16rsh(t *testing.T) { + var x, r int8 + var y uint16 + x = -128 + y = 0 + r = x >> y + if r != -128 { + t.Errorf("-128 %s 0 = %d, want -128", ">>", r) + } + y = 1 + r = x >> y + if r != -64 { + t.Errorf("-128 %s 1 = %d, want -64", ">>", r) + } + y = 65535 + r = x >> y + if r != -1 { + t.Errorf("-128 %s 65535 = %d, want -1", ">>", r) + } + x = -127 + y = 0 + r = x >> y + if r != -127 { + t.Errorf("-127 %s 0 = %d, want -127", ">>", r) + } + y = 1 + r = x >> y + if r != -64 { + t.Errorf("-127 %s 1 = %d, want -64", ">>", r) + } + y = 65535 + r = x >> y + if r != -1 { + t.Errorf("-127 %s 65535 = %d, want -1", ">>", r) + } + x = -1 + y = 0 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", ">>", r) + } + y = 1 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 1 = %d, want -1", ">>", r) + } + y = 65535 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 65535 = %d, want -1", ">>", r) + } + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("0 %s 65535 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("1 %s 65535 = %d, want 0", ">>", r) + } + x = 126 + y = 0 + r = x >> y + if r != 126 { + t.Errorf("126 %s 0 = %d, want 126", ">>", r) + } + y = 1 + r = x >> y + if r != 63 { + t.Errorf("126 %s 1 = %d, want 63", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("126 %s 65535 = %d, want 0", ">>", r) + } + x = 127 + y = 0 + r = x >> y + if r != 127 { + t.Errorf("127 %s 0 = %d, want 127", ">>", r) + } + y = 1 + r = x >> y + if r != 63 { + t.Errorf("127 %s 1 = %d, want 63", ">>", r) + } + y = 65535 + r = x >> y + if r != 0 { + t.Errorf("127 %s 65535 = %d, want 0", ">>", r) + } +} +func TestConstFoldint8uint8lsh(t *testing.T) { + var x, r int8 + var y uint8 + x = -128 + y = 0 + r = x << y + if r != -128 { + t.Errorf("-128 %s 0 = %d, want -128", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("-128 %s 1 = %d, want 0", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("-128 %s 255 = %d, want 0", "<<", r) + } + x = -127 + y = 0 + r = x << y + if r != -127 { + t.Errorf("-127 %s 0 = %d, want -127", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("-127 %s 1 = %d, want 2", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("-127 %s 255 = %d, want 0", "<<", r) + } + x = -1 + y = 0 + r = x << y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("-1 %s 1 = %d, want -2", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("-1 %s 255 = %d, want 0", "<<", r) + } + x = 0 + y = 0 + r = x << y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", "<<", r) + } + y = 1 + r = x << y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("0 %s 255 = %d, want 0", "<<", r) + } + x = 1 + y = 0 + r = x << y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", "<<", r) + } + y = 1 + r = x << y + if r != 2 { + t.Errorf("1 %s 1 = %d, want 2", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("1 %s 255 = %d, want 0", "<<", r) + } + x = 126 + y = 0 + r = x << y + if r != 126 { + t.Errorf("126 %s 0 = %d, want 126", "<<", r) + } + y = 1 + r = x << y + if r != -4 { + t.Errorf("126 %s 1 = %d, want -4", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("126 %s 255 = %d, want 0", "<<", r) + } + x = 127 + y = 0 + r = x << y + if r != 127 { + t.Errorf("127 %s 0 = %d, want 127", "<<", r) + } + y = 1 + r = x << y + if r != -2 { + t.Errorf("127 %s 1 = %d, want -2", "<<", r) + } + y = 255 + r = x << y + if r != 0 { + t.Errorf("127 %s 255 = %d, want 0", "<<", r) + } +} +func TestConstFoldint8uint8rsh(t *testing.T) { + var x, r int8 + var y uint8 + x = -128 + y = 0 + r = x >> y + if r != -128 { + t.Errorf("-128 %s 0 = %d, want -128", ">>", r) + } + y = 1 + r = x >> y + if r != -64 { + t.Errorf("-128 %s 1 = %d, want -64", ">>", r) + } + y = 255 + r = x >> y + if r != -1 { + t.Errorf("-128 %s 255 = %d, want -1", ">>", r) + } + x = -127 + y = 0 + r = x >> y + if r != -127 { + t.Errorf("-127 %s 0 = %d, want -127", ">>", r) + } + y = 1 + r = x >> y + if r != -64 { + t.Errorf("-127 %s 1 = %d, want -64", ">>", r) + } + y = 255 + r = x >> y + if r != -1 { + t.Errorf("-127 %s 255 = %d, want -1", ">>", r) + } + x = -1 + y = 0 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 0 = %d, want -1", ">>", r) + } + y = 1 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 1 = %d, want -1", ">>", r) + } + y = 255 + r = x >> y + if r != -1 { + t.Errorf("-1 %s 255 = %d, want -1", ">>", r) + } + x = 0 + y = 0 + r = x >> y + if r != 0 { + t.Errorf("0 %s 0 = %d, want 0", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("0 %s 1 = %d, want 0", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("0 %s 255 = %d, want 0", ">>", r) + } + x = 1 + y = 0 + r = x >> y + if r != 1 { + t.Errorf("1 %s 0 = %d, want 1", ">>", r) + } + y = 1 + r = x >> y + if r != 0 { + t.Errorf("1 %s 1 = %d, want 0", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("1 %s 255 = %d, want 0", ">>", r) + } + x = 126 + y = 0 + r = x >> y + if r != 126 { + t.Errorf("126 %s 0 = %d, want 126", ">>", r) + } + y = 1 + r = x >> y + if r != 63 { + t.Errorf("126 %s 1 = %d, want 63", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("126 %s 255 = %d, want 0", ">>", r) + } + x = 127 + y = 0 + r = x >> y + if r != 127 { + t.Errorf("127 %s 0 = %d, want 127", ">>", r) + } + y = 1 + r = x >> y + if r != 63 { + t.Errorf("127 %s 1 = %d, want 63", ">>", r) + } + y = 255 + r = x >> y + if r != 0 { + t.Errorf("127 %s 255 = %d, want 0", ">>", r) + } +} +func TestConstFoldCompareuint64(t *testing.T) { + { + var x uint64 = 0 + var y uint64 = 0 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x uint64 = 0 + var y uint64 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x uint64 = 0 + var y uint64 = 4294967296 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x uint64 = 0 + var y uint64 = 18446744073709551615 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x uint64 = 1 + var y uint64 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x uint64 = 1 + var y uint64 = 1 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x uint64 = 1 + var y uint64 = 4294967296 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x uint64 = 1 + var y uint64 = 18446744073709551615 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x uint64 = 4294967296 + var y uint64 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x uint64 = 4294967296 + var y uint64 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x uint64 = 4294967296 + var y uint64 = 4294967296 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x uint64 = 4294967296 + var y uint64 = 18446744073709551615 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x uint64 = 18446744073709551615 + var y uint64 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x uint64 = 18446744073709551615 + var y uint64 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x uint64 = 18446744073709551615 + var y uint64 = 4294967296 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x uint64 = 18446744073709551615 + var y uint64 = 18446744073709551615 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } +} +func TestConstFoldCompareint64(t *testing.T) { + { + var x int64 = -9223372036854775808 + var y int64 = -9223372036854775808 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = -9223372036854775808 + var y int64 = -9223372036854775807 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = -9223372036854775808 + var y int64 = -4294967296 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = -9223372036854775808 + var y int64 = -1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = -9223372036854775808 + var y int64 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = -9223372036854775808 + var y int64 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = -9223372036854775808 + var y int64 = 4294967296 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = -9223372036854775808 + var y int64 = 9223372036854775806 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = -9223372036854775808 + var y int64 = 9223372036854775807 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = -9223372036854775807 + var y int64 = -9223372036854775808 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = -9223372036854775807 + var y int64 = -9223372036854775807 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = -9223372036854775807 + var y int64 = -4294967296 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = -9223372036854775807 + var y int64 = -1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = -9223372036854775807 + var y int64 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = -9223372036854775807 + var y int64 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = -9223372036854775807 + var y int64 = 4294967296 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = -9223372036854775807 + var y int64 = 9223372036854775806 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = -9223372036854775807 + var y int64 = 9223372036854775807 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = -4294967296 + var y int64 = -9223372036854775808 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = -4294967296 + var y int64 = -9223372036854775807 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = -4294967296 + var y int64 = -4294967296 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = -4294967296 + var y int64 = -1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = -4294967296 + var y int64 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = -4294967296 + var y int64 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = -4294967296 + var y int64 = 4294967296 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = -4294967296 + var y int64 = 9223372036854775806 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = -4294967296 + var y int64 = 9223372036854775807 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = -1 + var y int64 = -9223372036854775808 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = -1 + var y int64 = -9223372036854775807 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = -1 + var y int64 = -4294967296 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = -1 + var y int64 = -1 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = -1 + var y int64 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = -1 + var y int64 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = -1 + var y int64 = 4294967296 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = -1 + var y int64 = 9223372036854775806 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = -1 + var y int64 = 9223372036854775807 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = 0 + var y int64 = -9223372036854775808 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 0 + var y int64 = -9223372036854775807 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 0 + var y int64 = -4294967296 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 0 + var y int64 = -1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 0 + var y int64 = 0 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 0 + var y int64 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = 0 + var y int64 = 4294967296 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = 0 + var y int64 = 9223372036854775806 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = 0 + var y int64 = 9223372036854775807 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = 1 + var y int64 = -9223372036854775808 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 1 + var y int64 = -9223372036854775807 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 1 + var y int64 = -4294967296 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 1 + var y int64 = -1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 1 + var y int64 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 1 + var y int64 = 1 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 1 + var y int64 = 4294967296 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = 1 + var y int64 = 9223372036854775806 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = 1 + var y int64 = 9223372036854775807 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = 4294967296 + var y int64 = -9223372036854775808 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 4294967296 + var y int64 = -9223372036854775807 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 4294967296 + var y int64 = -4294967296 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 4294967296 + var y int64 = -1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 4294967296 + var y int64 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 4294967296 + var y int64 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 4294967296 + var y int64 = 4294967296 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 4294967296 + var y int64 = 9223372036854775806 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = 4294967296 + var y int64 = 9223372036854775807 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = 9223372036854775806 + var y int64 = -9223372036854775808 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 9223372036854775806 + var y int64 = -9223372036854775807 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 9223372036854775806 + var y int64 = -4294967296 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 9223372036854775806 + var y int64 = -1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 9223372036854775806 + var y int64 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 9223372036854775806 + var y int64 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 9223372036854775806 + var y int64 = 4294967296 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 9223372036854775806 + var y int64 = 9223372036854775806 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 9223372036854775806 + var y int64 = 9223372036854775807 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int64 = 9223372036854775807 + var y int64 = -9223372036854775808 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 9223372036854775807 + var y int64 = -9223372036854775807 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 9223372036854775807 + var y int64 = -4294967296 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 9223372036854775807 + var y int64 = -1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 9223372036854775807 + var y int64 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 9223372036854775807 + var y int64 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 9223372036854775807 + var y int64 = 4294967296 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 9223372036854775807 + var y int64 = 9223372036854775806 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int64 = 9223372036854775807 + var y int64 = 9223372036854775807 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } +} +func TestConstFoldCompareuint32(t *testing.T) { + { + var x uint32 = 0 + var y uint32 = 0 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x uint32 = 0 + var y uint32 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x uint32 = 0 + var y uint32 = 4294967295 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x uint32 = 1 + var y uint32 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x uint32 = 1 + var y uint32 = 1 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x uint32 = 1 + var y uint32 = 4294967295 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x uint32 = 4294967295 + var y uint32 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x uint32 = 4294967295 + var y uint32 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x uint32 = 4294967295 + var y uint32 = 4294967295 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } +} +func TestConstFoldCompareint32(t *testing.T) { + { + var x int32 = -2147483648 + var y int32 = -2147483648 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int32 = -2147483648 + var y int32 = -2147483647 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int32 = -2147483648 + var y int32 = -1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int32 = -2147483648 + var y int32 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int32 = -2147483648 + var y int32 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int32 = -2147483648 + var y int32 = 2147483647 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int32 = -2147483647 + var y int32 = -2147483648 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int32 = -2147483647 + var y int32 = -2147483647 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int32 = -2147483647 + var y int32 = -1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int32 = -2147483647 + var y int32 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int32 = -2147483647 + var y int32 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int32 = -2147483647 + var y int32 = 2147483647 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int32 = -1 + var y int32 = -2147483648 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int32 = -1 + var y int32 = -2147483647 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int32 = -1 + var y int32 = -1 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int32 = -1 + var y int32 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int32 = -1 + var y int32 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int32 = -1 + var y int32 = 2147483647 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int32 = 0 + var y int32 = -2147483648 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int32 = 0 + var y int32 = -2147483647 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int32 = 0 + var y int32 = -1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int32 = 0 + var y int32 = 0 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int32 = 0 + var y int32 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int32 = 0 + var y int32 = 2147483647 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int32 = 1 + var y int32 = -2147483648 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int32 = 1 + var y int32 = -2147483647 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int32 = 1 + var y int32 = -1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int32 = 1 + var y int32 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int32 = 1 + var y int32 = 1 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int32 = 1 + var y int32 = 2147483647 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int32 = 2147483647 + var y int32 = -2147483648 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int32 = 2147483647 + var y int32 = -2147483647 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int32 = 2147483647 + var y int32 = -1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int32 = 2147483647 + var y int32 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int32 = 2147483647 + var y int32 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int32 = 2147483647 + var y int32 = 2147483647 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } +} +func TestConstFoldCompareuint16(t *testing.T) { + { + var x uint16 = 0 + var y uint16 = 0 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x uint16 = 0 + var y uint16 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x uint16 = 0 + var y uint16 = 65535 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x uint16 = 1 + var y uint16 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x uint16 = 1 + var y uint16 = 1 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x uint16 = 1 + var y uint16 = 65535 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x uint16 = 65535 + var y uint16 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x uint16 = 65535 + var y uint16 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x uint16 = 65535 + var y uint16 = 65535 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } +} +func TestConstFoldCompareint16(t *testing.T) { + { + var x int16 = -32768 + var y int16 = -32768 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int16 = -32768 + var y int16 = -32767 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int16 = -32768 + var y int16 = -1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int16 = -32768 + var y int16 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int16 = -32768 + var y int16 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int16 = -32768 + var y int16 = 32766 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int16 = -32768 + var y int16 = 32767 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int16 = -32767 + var y int16 = -32768 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int16 = -32767 + var y int16 = -32767 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int16 = -32767 + var y int16 = -1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int16 = -32767 + var y int16 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int16 = -32767 + var y int16 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int16 = -32767 + var y int16 = 32766 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int16 = -32767 + var y int16 = 32767 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int16 = -1 + var y int16 = -32768 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int16 = -1 + var y int16 = -32767 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int16 = -1 + var y int16 = -1 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int16 = -1 + var y int16 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int16 = -1 + var y int16 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int16 = -1 + var y int16 = 32766 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int16 = -1 + var y int16 = 32767 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int16 = 0 + var y int16 = -32768 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int16 = 0 + var y int16 = -32767 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int16 = 0 + var y int16 = -1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int16 = 0 + var y int16 = 0 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int16 = 0 + var y int16 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int16 = 0 + var y int16 = 32766 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int16 = 0 + var y int16 = 32767 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int16 = 1 + var y int16 = -32768 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int16 = 1 + var y int16 = -32767 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int16 = 1 + var y int16 = -1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int16 = 1 + var y int16 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int16 = 1 + var y int16 = 1 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int16 = 1 + var y int16 = 32766 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int16 = 1 + var y int16 = 32767 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int16 = 32766 + var y int16 = -32768 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int16 = 32766 + var y int16 = -32767 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int16 = 32766 + var y int16 = -1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int16 = 32766 + var y int16 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int16 = 32766 + var y int16 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int16 = 32766 + var y int16 = 32766 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int16 = 32766 + var y int16 = 32767 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int16 = 32767 + var y int16 = -32768 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int16 = 32767 + var y int16 = -32767 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int16 = 32767 + var y int16 = -1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int16 = 32767 + var y int16 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int16 = 32767 + var y int16 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int16 = 32767 + var y int16 = 32766 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int16 = 32767 + var y int16 = 32767 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } +} +func TestConstFoldCompareuint8(t *testing.T) { + { + var x uint8 = 0 + var y uint8 = 0 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x uint8 = 0 + var y uint8 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x uint8 = 0 + var y uint8 = 255 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x uint8 = 1 + var y uint8 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x uint8 = 1 + var y uint8 = 1 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x uint8 = 1 + var y uint8 = 255 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x uint8 = 255 + var y uint8 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x uint8 = 255 + var y uint8 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x uint8 = 255 + var y uint8 = 255 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } +} +func TestConstFoldCompareint8(t *testing.T) { + { + var x int8 = -128 + var y int8 = -128 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int8 = -128 + var y int8 = -127 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int8 = -128 + var y int8 = -1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int8 = -128 + var y int8 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int8 = -128 + var y int8 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int8 = -128 + var y int8 = 126 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int8 = -128 + var y int8 = 127 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int8 = -127 + var y int8 = -128 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int8 = -127 + var y int8 = -127 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int8 = -127 + var y int8 = -1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int8 = -127 + var y int8 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int8 = -127 + var y int8 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int8 = -127 + var y int8 = 126 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int8 = -127 + var y int8 = 127 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int8 = -1 + var y int8 = -128 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int8 = -1 + var y int8 = -127 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int8 = -1 + var y int8 = -1 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int8 = -1 + var y int8 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int8 = -1 + var y int8 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int8 = -1 + var y int8 = 126 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int8 = -1 + var y int8 = 127 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int8 = 0 + var y int8 = -128 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int8 = 0 + var y int8 = -127 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int8 = 0 + var y int8 = -1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int8 = 0 + var y int8 = 0 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int8 = 0 + var y int8 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int8 = 0 + var y int8 = 126 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int8 = 0 + var y int8 = 127 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int8 = 1 + var y int8 = -128 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int8 = 1 + var y int8 = -127 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int8 = 1 + var y int8 = -1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int8 = 1 + var y int8 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int8 = 1 + var y int8 = 1 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int8 = 1 + var y int8 = 126 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int8 = 1 + var y int8 = 127 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int8 = 126 + var y int8 = -128 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int8 = 126 + var y int8 = -127 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int8 = 126 + var y int8 = -1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int8 = 126 + var y int8 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int8 = 126 + var y int8 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int8 = 126 + var y int8 = 126 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int8 = 126 + var y int8 = 127 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if !(x < y) { + t.Errorf("!(%d < %d)", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if x >= y { + t.Errorf("%d >= %d", x, y) + } + } + { + var x int8 = 127 + var y int8 = -128 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int8 = 127 + var y int8 = -127 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int8 = 127 + var y int8 = -1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int8 = 127 + var y int8 = 0 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int8 = 127 + var y int8 = 1 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int8 = 127 + var y int8 = 126 + if x == y { + t.Errorf("%d == %d", x, y) + } + if !(x != y) { + t.Errorf("!(%d != %d)", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if !(x > y) { + t.Errorf("!(%d > %d)", x, y) + } + if x <= y { + t.Errorf("%d <= %d", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } + { + var x int8 = 127 + var y int8 = 127 + if !(x == y) { + t.Errorf("!(%d == %d)", x, y) + } + if x != y { + t.Errorf("%d != %d", x, y) + } + if x < y { + t.Errorf("%d < %d", x, y) + } + if x > y { + t.Errorf("%d > %d", x, y) + } + if !(x <= y) { + t.Errorf("!(%d <= %d)", x, y) + } + if !(x >= y) { + t.Errorf("!(%d >= %d)", x, y) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/dep_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/dep_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d141f1074a59164f35724d7479c3876d70727bdc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/dep_test.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "internal/testenv" + "strings" + "testing" +) + +func TestDeps(t *testing.T) { + out, err := testenv.Command(t, testenv.GoToolPath(t), "list", "-f", "{{.Deps}}", "cmd/compile/internal/gc").Output() + if err != nil { + t.Fatal(err) + } + for _, dep := range strings.Fields(strings.Trim(string(out), "[]")) { + switch dep { + case "go/build", "go/scanner": + // cmd/compile/internal/importer introduces a dependency + // on go/build and go/token; cmd/compile/internal/ uses + // go/constant which uses go/token in its API. Once we + // got rid of those dependencies, enable this check again. + // TODO(gri) fix this + // t.Errorf("undesired dependency on %q", dep) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/divconst_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/divconst_test.go new file mode 100644 index 0000000000000000000000000000000000000000..9358a60374996aa29b0cf694ca002914f62c1457 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/divconst_test.go @@ -0,0 +1,325 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "testing" +) + +var boolres bool + +var i64res int64 + +func BenchmarkDivconstI64(b *testing.B) { + for i := 0; i < b.N; i++ { + i64res = int64(i) / 7 + } +} + +func BenchmarkModconstI64(b *testing.B) { + for i := 0; i < b.N; i++ { + i64res = int64(i) % 7 + } +} + +func BenchmarkDivisiblePow2constI64(b *testing.B) { + for i := 0; i < b.N; i++ { + boolres = int64(i)%16 == 0 + } +} +func BenchmarkDivisibleconstI64(b *testing.B) { + for i := 0; i < b.N; i++ { + boolres = int64(i)%7 == 0 + } +} + +func BenchmarkDivisibleWDivconstI64(b *testing.B) { + for i := 0; i < b.N; i++ { + i64res = int64(i) / 7 + boolres = int64(i)%7 == 0 + } +} + +var u64res uint64 + +func TestDivmodConstU64(t *testing.T) { + // Test division by c. Function f must be func(n) { return n/c, n%c } + testdiv := func(c uint64, f func(uint64) (uint64, uint64)) func(*testing.T) { + return func(t *testing.T) { + x := uint64(12345) + for i := 0; i < 10000; i++ { + x += x << 2 + q, r := f(x) + if r < 0 || r >= c || q*c+r != x { + t.Errorf("divmod(%d, %d) returned incorrect (%d, %d)", x, c, q, r) + } + } + max := uint64(1<<64-1) / c * c + xs := []uint64{0, 1, c - 1, c, c + 1, 2*c - 1, 2 * c, 2*c + 1, + c*c - 1, c * c, c*c + 1, max - 1, max, max + 1, 1<<64 - 1} + for _, x := range xs { + q, r := f(x) + if r < 0 || r >= c || q*c+r != x { + t.Errorf("divmod(%d, %d) returned incorrect (%d, %d)", x, c, q, r) + } + } + } + } + t.Run("2", testdiv(2, func(n uint64) (uint64, uint64) { return n / 2, n % 2 })) + t.Run("3", testdiv(3, func(n uint64) (uint64, uint64) { return n / 3, n % 3 })) + t.Run("4", testdiv(4, func(n uint64) (uint64, uint64) { return n / 4, n % 4 })) + t.Run("5", testdiv(5, func(n uint64) (uint64, uint64) { return n / 5, n % 5 })) + t.Run("6", testdiv(6, func(n uint64) (uint64, uint64) { return n / 6, n % 6 })) + t.Run("7", testdiv(7, func(n uint64) (uint64, uint64) { return n / 7, n % 7 })) + t.Run("8", testdiv(8, func(n uint64) (uint64, uint64) { return n / 8, n % 8 })) + t.Run("9", testdiv(9, func(n uint64) (uint64, uint64) { return n / 9, n % 9 })) + t.Run("10", testdiv(10, func(n uint64) (uint64, uint64) { return n / 10, n % 10 })) + t.Run("11", testdiv(11, func(n uint64) (uint64, uint64) { return n / 11, n % 11 })) + t.Run("12", testdiv(12, func(n uint64) (uint64, uint64) { return n / 12, n % 12 })) + t.Run("13", testdiv(13, func(n uint64) (uint64, uint64) { return n / 13, n % 13 })) + t.Run("14", testdiv(14, func(n uint64) (uint64, uint64) { return n / 14, n % 14 })) + t.Run("15", testdiv(15, func(n uint64) (uint64, uint64) { return n / 15, n % 15 })) + t.Run("16", testdiv(16, func(n uint64) (uint64, uint64) { return n / 16, n % 16 })) + t.Run("17", testdiv(17, func(n uint64) (uint64, uint64) { return n / 17, n % 17 })) + t.Run("255", testdiv(255, func(n uint64) (uint64, uint64) { return n / 255, n % 255 })) + t.Run("256", testdiv(256, func(n uint64) (uint64, uint64) { return n / 256, n % 256 })) + t.Run("257", testdiv(257, func(n uint64) (uint64, uint64) { return n / 257, n % 257 })) + t.Run("65535", testdiv(65535, func(n uint64) (uint64, uint64) { return n / 65535, n % 65535 })) + t.Run("65536", testdiv(65536, func(n uint64) (uint64, uint64) { return n / 65536, n % 65536 })) + t.Run("65537", testdiv(65537, func(n uint64) (uint64, uint64) { return n / 65537, n % 65537 })) + t.Run("1<<32-1", testdiv(1<<32-1, func(n uint64) (uint64, uint64) { return n / (1<<32 - 1), n % (1<<32 - 1) })) + t.Run("1<<32+1", testdiv(1<<32+1, func(n uint64) (uint64, uint64) { return n / (1<<32 + 1), n % (1<<32 + 1) })) + t.Run("1<<64-1", testdiv(1<<64-1, func(n uint64) (uint64, uint64) { return n / (1<<64 - 1), n % (1<<64 - 1) })) +} + +func BenchmarkDivconstU64(b *testing.B) { + b.Run("3", func(b *testing.B) { + x := uint64(123456789123456789) + for i := 0; i < b.N; i++ { + x += x << 4 + u64res = uint64(x) / 3 + } + }) + b.Run("5", func(b *testing.B) { + x := uint64(123456789123456789) + for i := 0; i < b.N; i++ { + x += x << 4 + u64res = uint64(x) / 5 + } + }) + b.Run("37", func(b *testing.B) { + x := uint64(123456789123456789) + for i := 0; i < b.N; i++ { + x += x << 4 + u64res = uint64(x) / 37 + } + }) + b.Run("1234567", func(b *testing.B) { + x := uint64(123456789123456789) + for i := 0; i < b.N; i++ { + x += x << 4 + u64res = uint64(x) / 1234567 + } + }) +} + +func BenchmarkModconstU64(b *testing.B) { + for i := 0; i < b.N; i++ { + u64res = uint64(i) % 7 + } +} + +func BenchmarkDivisibleconstU64(b *testing.B) { + for i := 0; i < b.N; i++ { + boolres = uint64(i)%7 == 0 + } +} + +func BenchmarkDivisibleWDivconstU64(b *testing.B) { + for i := 0; i < b.N; i++ { + u64res = uint64(i) / 7 + boolres = uint64(i)%7 == 0 + } +} + +var i32res int32 + +func BenchmarkDivconstI32(b *testing.B) { + for i := 0; i < b.N; i++ { + i32res = int32(i) / 7 + } +} + +func BenchmarkModconstI32(b *testing.B) { + for i := 0; i < b.N; i++ { + i32res = int32(i) % 7 + } +} + +func BenchmarkDivisiblePow2constI32(b *testing.B) { + for i := 0; i < b.N; i++ { + boolres = int32(i)%16 == 0 + } +} + +func BenchmarkDivisibleconstI32(b *testing.B) { + for i := 0; i < b.N; i++ { + boolres = int32(i)%7 == 0 + } +} + +func BenchmarkDivisibleWDivconstI32(b *testing.B) { + for i := 0; i < b.N; i++ { + i32res = int32(i) / 7 + boolres = int32(i)%7 == 0 + } +} + +var u32res uint32 + +func BenchmarkDivconstU32(b *testing.B) { + for i := 0; i < b.N; i++ { + u32res = uint32(i) / 7 + } +} + +func BenchmarkModconstU32(b *testing.B) { + for i := 0; i < b.N; i++ { + u32res = uint32(i) % 7 + } +} + +func BenchmarkDivisibleconstU32(b *testing.B) { + for i := 0; i < b.N; i++ { + boolres = uint32(i)%7 == 0 + } +} + +func BenchmarkDivisibleWDivconstU32(b *testing.B) { + for i := 0; i < b.N; i++ { + u32res = uint32(i) / 7 + boolres = uint32(i)%7 == 0 + } +} + +var i16res int16 + +func BenchmarkDivconstI16(b *testing.B) { + for i := 0; i < b.N; i++ { + i16res = int16(i) / 7 + } +} + +func BenchmarkModconstI16(b *testing.B) { + for i := 0; i < b.N; i++ { + i16res = int16(i) % 7 + } +} + +func BenchmarkDivisiblePow2constI16(b *testing.B) { + for i := 0; i < b.N; i++ { + boolres = int16(i)%16 == 0 + } +} + +func BenchmarkDivisibleconstI16(b *testing.B) { + for i := 0; i < b.N; i++ { + boolres = int16(i)%7 == 0 + } +} + +func BenchmarkDivisibleWDivconstI16(b *testing.B) { + for i := 0; i < b.N; i++ { + i16res = int16(i) / 7 + boolres = int16(i)%7 == 0 + } +} + +var u16res uint16 + +func BenchmarkDivconstU16(b *testing.B) { + for i := 0; i < b.N; i++ { + u16res = uint16(i) / 7 + } +} + +func BenchmarkModconstU16(b *testing.B) { + for i := 0; i < b.N; i++ { + u16res = uint16(i) % 7 + } +} + +func BenchmarkDivisibleconstU16(b *testing.B) { + for i := 0; i < b.N; i++ { + boolres = uint16(i)%7 == 0 + } +} + +func BenchmarkDivisibleWDivconstU16(b *testing.B) { + for i := 0; i < b.N; i++ { + u16res = uint16(i) / 7 + boolres = uint16(i)%7 == 0 + } +} + +var i8res int8 + +func BenchmarkDivconstI8(b *testing.B) { + for i := 0; i < b.N; i++ { + i8res = int8(i) / 7 + } +} + +func BenchmarkModconstI8(b *testing.B) { + for i := 0; i < b.N; i++ { + i8res = int8(i) % 7 + } +} + +func BenchmarkDivisiblePow2constI8(b *testing.B) { + for i := 0; i < b.N; i++ { + boolres = int8(i)%16 == 0 + } +} + +func BenchmarkDivisibleconstI8(b *testing.B) { + for i := 0; i < b.N; i++ { + boolres = int8(i)%7 == 0 + } +} + +func BenchmarkDivisibleWDivconstI8(b *testing.B) { + for i := 0; i < b.N; i++ { + i8res = int8(i) / 7 + boolres = int8(i)%7 == 0 + } +} + +var u8res uint8 + +func BenchmarkDivconstU8(b *testing.B) { + for i := 0; i < b.N; i++ { + u8res = uint8(i) / 7 + } +} + +func BenchmarkModconstU8(b *testing.B) { + for i := 0; i < b.N; i++ { + u8res = uint8(i) % 7 + } +} + +func BenchmarkDivisibleconstU8(b *testing.B) { + for i := 0; i < b.N; i++ { + boolres = uint8(i)%7 == 0 + } +} + +func BenchmarkDivisibleWDivconstU8(b *testing.B) { + for i := 0; i < b.N; i++ { + u8res = uint8(i) / 7 + boolres = uint8(i)%7 == 0 + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/main.go b/platform/dbops/binaries/go/go/src/cmd/compile/main.go new file mode 100644 index 0000000000000000000000000000000000000000..7d38bea7fa27a8c95338b301e1f8409cf1497856 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/main.go @@ -0,0 +1,59 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "cmd/compile/internal/amd64" + "cmd/compile/internal/arm" + "cmd/compile/internal/arm64" + "cmd/compile/internal/base" + "cmd/compile/internal/gc" + "cmd/compile/internal/loong64" + "cmd/compile/internal/mips" + "cmd/compile/internal/mips64" + "cmd/compile/internal/ppc64" + "cmd/compile/internal/riscv64" + "cmd/compile/internal/s390x" + "cmd/compile/internal/ssagen" + "cmd/compile/internal/wasm" + "cmd/compile/internal/x86" + "fmt" + "internal/buildcfg" + "log" + "os" +) + +var archInits = map[string]func(*ssagen.ArchInfo){ + "386": x86.Init, + "amd64": amd64.Init, + "arm": arm.Init, + "arm64": arm64.Init, + "loong64": loong64.Init, + "mips": mips.Init, + "mipsle": mips.Init, + "mips64": mips64.Init, + "mips64le": mips64.Init, + "ppc64": ppc64.Init, + "ppc64le": ppc64.Init, + "riscv64": riscv64.Init, + "s390x": s390x.Init, + "wasm": wasm.Init, +} + +func main() { + // disable timestamps for reproducible output + log.SetFlags(0) + log.SetPrefix("compile: ") + + buildcfg.Check() + archInit, ok := archInits[buildcfg.GOARCH] + if !ok { + fmt.Fprintf(os.Stderr, "compile: unknown architecture %q\n", buildcfg.GOARCH) + os.Exit(2) + } + + gc.Main(archInit) + base.Exit(0) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/profile.sh b/platform/dbops/binaries/go/go/src/cmd/compile/profile.sh new file mode 100644 index 0000000000000000000000000000000000000000..37d65d84942b58739624dc4b50ae64d0dd057159 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/profile.sh @@ -0,0 +1,21 @@ +# Copyright 2023 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# This script collects a CPU profile of the compiler +# for building all targets in std and cmd, and puts +# the profile at cmd/compile/default.pgo. + +dir=$(mktemp -d) +cd $dir +seed=$(date) + +for p in $(go list std cmd); do + h=$(echo $seed $p | md5sum | cut -d ' ' -f 1) + echo $p $h + go build -o /dev/null -gcflags=-cpuprofile=$PWD/prof.$h $p +done + +go tool pprof -proto prof.* > $(go env GOROOT)/src/cmd/compile/default.pgo + +rm -r $dir diff --git a/platform/dbops/binaries/go/go/src/cmd/covdata/argsmerge.go b/platform/dbops/binaries/go/go/src/cmd/covdata/argsmerge.go new file mode 100644 index 0000000000000000000000000000000000000000..8af1432d62ad3077f703f8dd59931e11cdc41e17 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/covdata/argsmerge.go @@ -0,0 +1,56 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "slices" + "strconv" +) + +type argvalues struct { + osargs []string + goos string + goarch string +} + +type argstate struct { + state argvalues + initialized bool +} + +func (a *argstate) Merge(state argvalues) { + if !a.initialized { + a.state = state + a.initialized = true + return + } + if !slices.Equal(a.state.osargs, state.osargs) { + a.state.osargs = nil + } + if state.goos != a.state.goos { + a.state.goos = "" + } + if state.goarch != a.state.goarch { + a.state.goarch = "" + } +} + +func (a *argstate) ArgsSummary() map[string]string { + m := make(map[string]string) + if len(a.state.osargs) != 0 { + m["argc"] = strconv.Itoa(len(a.state.osargs)) + for k, a := range a.state.osargs { + m[fmt.Sprintf("argv%d", k)] = a + } + } + if a.state.goos != "" { + m["GOOS"] = a.state.goos + } + if a.state.goarch != "" { + m["GOARCH"] = a.state.goarch + } + return m +} diff --git a/platform/dbops/binaries/go/go/src/cmd/covdata/covdata.go b/platform/dbops/binaries/go/go/src/cmd/covdata/covdata.go new file mode 100644 index 0000000000000000000000000000000000000000..95bc30d25dae21e74252b3e1ec8ed498f2cfb06a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/covdata/covdata.go @@ -0,0 +1,224 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "cmd/internal/cov" + "cmd/internal/pkgpattern" + "flag" + "fmt" + "os" + "runtime" + "runtime/pprof" + "strings" +) + +var verbflag = flag.Int("v", 0, "Verbose trace output level") +var hflag = flag.Bool("h", false, "Panic on fatal errors (for stack trace)") +var hwflag = flag.Bool("hw", false, "Panic on warnings (for stack trace)") +var indirsflag = flag.String("i", "", "Input dirs to examine (comma separated)") +var pkgpatflag = flag.String("pkg", "", "Restrict output to package(s) matching specified package pattern.") +var cpuprofileflag = flag.String("cpuprofile", "", "Write CPU profile to specified file") +var memprofileflag = flag.String("memprofile", "", "Write memory profile to specified file") +var memprofilerateflag = flag.Int("memprofilerate", 0, "Set memprofile sampling rate to value") + +var matchpkg func(name string) bool + +var atExitFuncs []func() + +func atExit(f func()) { + atExitFuncs = append(atExitFuncs, f) +} + +func Exit(code int) { + for i := len(atExitFuncs) - 1; i >= 0; i-- { + f := atExitFuncs[i] + atExitFuncs = atExitFuncs[:i] + f() + } + os.Exit(code) +} + +func dbgtrace(vlevel int, s string, a ...interface{}) { + if *verbflag >= vlevel { + fmt.Printf(s, a...) + fmt.Printf("\n") + } +} + +func warn(s string, a ...interface{}) { + fmt.Fprintf(os.Stderr, "warning: ") + fmt.Fprintf(os.Stderr, s, a...) + fmt.Fprintf(os.Stderr, "\n") + if *hwflag { + panic("unexpected warning") + } +} + +func fatal(s string, a ...interface{}) { + fmt.Fprintf(os.Stderr, "error: ") + fmt.Fprintf(os.Stderr, s, a...) + fmt.Fprintf(os.Stderr, "\n") + if *hflag { + panic("fatal error") + } + Exit(1) +} + +func usage(msg string) { + if len(msg) > 0 { + fmt.Fprintf(os.Stderr, "error: %s\n", msg) + } + fmt.Fprintf(os.Stderr, "usage: go tool covdata [command]\n") + fmt.Fprintf(os.Stderr, ` +Commands are: + +textfmt convert coverage data to textual format +percent output total percentage of statements covered +pkglist output list of package import paths +func output coverage profile information for each function +merge merge data files together +subtract subtract one set of data files from another set +intersect generate intersection of two sets of data files +debugdump dump data in human-readable format for debugging purposes +`) + fmt.Fprintf(os.Stderr, "\nFor help on a specific subcommand, try:\n") + fmt.Fprintf(os.Stderr, "\ngo tool covdata -help\n") + Exit(2) +} + +type covOperation interface { + cov.CovDataVisitor + Setup() + Usage(string) +} + +// Modes of operation. +const ( + funcMode = "func" + mergeMode = "merge" + intersectMode = "intersect" + subtractMode = "subtract" + percentMode = "percent" + pkglistMode = "pkglist" + textfmtMode = "textfmt" + debugDumpMode = "debugdump" +) + +func main() { + // First argument should be mode/subcommand. + if len(os.Args) < 2 { + usage("missing command selector") + } + + // Select mode + var op covOperation + cmd := os.Args[1] + switch cmd { + case mergeMode: + op = makeMergeOp() + case debugDumpMode: + op = makeDumpOp(debugDumpMode) + case textfmtMode: + op = makeDumpOp(textfmtMode) + case percentMode: + op = makeDumpOp(percentMode) + case funcMode: + op = makeDumpOp(funcMode) + case pkglistMode: + op = makeDumpOp(pkglistMode) + case subtractMode: + op = makeSubtractIntersectOp(subtractMode) + case intersectMode: + op = makeSubtractIntersectOp(intersectMode) + default: + usage(fmt.Sprintf("unknown command selector %q", cmd)) + } + + // Edit out command selector, then parse flags. + os.Args = append(os.Args[:1], os.Args[2:]...) + flag.Usage = func() { + op.Usage("") + } + flag.Parse() + + // Mode-independent flag setup + dbgtrace(1, "starting mode-independent setup") + if flag.NArg() != 0 { + op.Usage("unknown extra arguments") + } + if *pkgpatflag != "" { + pats := strings.Split(*pkgpatflag, ",") + matchers := []func(name string) bool{} + for _, p := range pats { + if p == "" { + continue + } + f := pkgpattern.MatchSimplePattern(p) + matchers = append(matchers, f) + } + matchpkg = func(name string) bool { + for _, f := range matchers { + if f(name) { + return true + } + } + return false + } + } + if *cpuprofileflag != "" { + f, err := os.Create(*cpuprofileflag) + if err != nil { + fatal("%v", err) + } + if err := pprof.StartCPUProfile(f); err != nil { + fatal("%v", err) + } + atExit(pprof.StopCPUProfile) + } + if *memprofileflag != "" { + if *memprofilerateflag != 0 { + runtime.MemProfileRate = *memprofilerateflag + } + f, err := os.Create(*memprofileflag) + if err != nil { + fatal("%v", err) + } + atExit(func() { + runtime.GC() + const writeLegacyFormat = 1 + if err := pprof.Lookup("heap").WriteTo(f, writeLegacyFormat); err != nil { + fatal("%v", err) + } + }) + } else { + // Not doing memory profiling; disable it entirely. + runtime.MemProfileRate = 0 + } + + // Mode-dependent setup. + op.Setup() + + // ... off and running now. + dbgtrace(1, "starting perform") + + indirs := strings.Split(*indirsflag, ",") + vis := cov.CovDataVisitor(op) + var flags cov.CovDataReaderFlags + if *hflag { + flags |= cov.PanicOnError + } + if *hwflag { + flags |= cov.PanicOnWarning + } + reader := cov.MakeCovDataReader(vis, indirs, *verbflag, flags, matchpkg) + st := 0 + if err := reader.Visit(); err != nil { + fmt.Fprintf(os.Stderr, "error: %v\n", err) + st = 1 + } + dbgtrace(1, "leaving main") + Exit(st) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/covdata/doc.go b/platform/dbops/binaries/go/go/src/cmd/covdata/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..ae2e4e4ffa9975a649a0af4421679fc0d588c47c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/covdata/doc.go @@ -0,0 +1,80 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Covdata is a program for manipulating and generating reports +from 2nd-generation coverage testing output files, those produced +from running applications or integration tests. E.g. + + $ mkdir ./profiledir + $ go build -cover -o myapp.exe . + $ GOCOVERDIR=./profiledir ./myapp.exe + $ ls ./profiledir + covcounters.cce1b350af34b6d0fb59cc1725f0ee27.821598.1663006712821344241 + covmeta.cce1b350af34b6d0fb59cc1725f0ee27 + $ + +Run covdata via "go tool covdata ", where 'mode' is a subcommand +selecting a specific reporting, merging, or data manipulation operation. +Descriptions on the various modes (run "go tool cover -help" for +specifics on usage of a given mode): + +1. Report percent of statements covered in each profiled package + + $ go tool covdata percent -i=profiledir + cov-example/p coverage: 41.1% of statements + main coverage: 87.5% of statements + $ + +2. Report import paths of packages profiled + + $ go tool covdata pkglist -i=profiledir + cov-example/p + main + $ + +3. Report percent statements covered by function: + + $ go tool covdata func -i=profiledir + cov-example/p/p.go:12: emptyFn 0.0% + cov-example/p/p.go:32: Small 100.0% + cov-example/p/p.go:47: Medium 90.9% + ... + $ + +4. Convert coverage data to legacy textual format: + + $ go tool covdata textfmt -i=profiledir -o=cov.txt + $ head cov.txt + mode: set + cov-example/p/p.go:12.22,13.2 0 0 + cov-example/p/p.go:15.31,16.2 1 0 + cov-example/p/p.go:16.3,18.3 0 0 + cov-example/p/p.go:19.3,21.3 0 0 + ... + $ go tool cover -html=cov.txt + $ + +5. Merge profiles together: + + $ go tool covdata merge -i=indir1,indir2 -o=outdir -modpaths=github.com/go-delve/delve + $ + +6. Subtract one profile from another + + $ go tool covdata subtract -i=indir1,indir2 -o=outdir + $ + +7. Intersect profiles + + $ go tool covdata intersect -i=indir1,indir2 -o=outdir + $ + +8. Dump a profile for debugging purposes. + + $ go tool covdata debugdump -i=indir + + $ +*/ +package main diff --git a/platform/dbops/binaries/go/go/src/cmd/covdata/dump.go b/platform/dbops/binaries/go/go/src/cmd/covdata/dump.go new file mode 100644 index 0000000000000000000000000000000000000000..a51762f0d10e2c8b4296e16dd3cb7cb9df8150ee --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/covdata/dump.go @@ -0,0 +1,357 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// This file contains functions and apis to support the "go tool +// covdata" sub-commands that relate to dumping text format summaries +// and reports: "pkglist", "func", "debugdump", "percent", and +// "textfmt". + +import ( + "flag" + "fmt" + "internal/coverage" + "internal/coverage/calloc" + "internal/coverage/cformat" + "internal/coverage/cmerge" + "internal/coverage/decodecounter" + "internal/coverage/decodemeta" + "internal/coverage/pods" + "os" + "sort" + "strings" +) + +var textfmtoutflag *string +var liveflag *bool + +func makeDumpOp(cmd string) covOperation { + if cmd == textfmtMode || cmd == percentMode { + textfmtoutflag = flag.String("o", "", "Output text format to file") + } + if cmd == debugDumpMode { + liveflag = flag.Bool("live", false, "Select only live (executed) functions for dump output.") + } + d := &dstate{ + cmd: cmd, + cm: &cmerge.Merger{}, + } + // For these modes (percent, pkglist, func, etc), use a relaxed + // policy when it comes to counter mode clashes. For a percent + // report, for example, we only care whether a given line is + // executed at least once, so it's ok to (effectively) merge + // together runs derived from different counter modes. + if d.cmd == percentMode || d.cmd == funcMode || d.cmd == pkglistMode { + d.cm.SetModeMergePolicy(cmerge.ModeMergeRelaxed) + } + if d.cmd == pkglistMode { + d.pkgpaths = make(map[string]struct{}) + } + return d +} + +// dstate encapsulates state and provides methods for implementing +// various dump operations. Specifically, dstate implements the +// CovDataVisitor interface, and is designed to be used in +// concert with the CovDataReader utility, which abstracts away most +// of the grubby details of reading coverage data files. +type dstate struct { + // for batch allocation of counter arrays + calloc.BatchCounterAlloc + + // counter merging state + methods + cm *cmerge.Merger + + // counter data formatting helper + format *cformat.Formatter + + // 'mm' stores values read from a counter data file; the pkfunc key + // is a pkgid/funcid pair that uniquely identifies a function in + // instrumented application. + mm map[pkfunc]decodecounter.FuncPayload + + // pkm maps package ID to the number of functions in the package + // with that ID. It is used to report inconsistencies in counter + // data (for example, a counter data entry with pkgid=N funcid=10 + // where package N only has 3 functions). + pkm map[uint32]uint32 + + // pkgpaths records all package import paths encountered while + // visiting coverage data files (used to implement the "pkglist" + // subcommand). + pkgpaths map[string]struct{} + + // Current package name and import path. + pkgName string + pkgImportPath string + + // Module path for current package (may be empty). + modulePath string + + // Dump subcommand (ex: "textfmt", "debugdump", etc). + cmd string + + // File to which we will write text format output, if enabled. + textfmtoutf *os.File + + // Total and covered statements (used by "debugdump" subcommand). + totalStmts, coveredStmts int + + // Records whether preamble has been emitted for current pkg + // (used when in "debugdump" mode) + preambleEmitted bool +} + +func (d *dstate) Usage(msg string) { + if len(msg) > 0 { + fmt.Fprintf(os.Stderr, "error: %s\n", msg) + } + fmt.Fprintf(os.Stderr, "usage: go tool covdata %s -i=\n\n", d.cmd) + flag.PrintDefaults() + fmt.Fprintf(os.Stderr, "\nExamples:\n\n") + switch d.cmd { + case pkglistMode: + fmt.Fprintf(os.Stderr, " go tool covdata pkglist -i=dir1,dir2\n\n") + fmt.Fprintf(os.Stderr, " \treads coverage data files from dir1+dirs2\n") + fmt.Fprintf(os.Stderr, " \tand writes out a list of the import paths\n") + fmt.Fprintf(os.Stderr, " \tof all compiled packages.\n") + case textfmtMode: + fmt.Fprintf(os.Stderr, " go tool covdata textfmt -i=dir1,dir2 -o=out.txt\n\n") + fmt.Fprintf(os.Stderr, " \tmerges data from input directories dir1+dir2\n") + fmt.Fprintf(os.Stderr, " \tand emits text format into file 'out.txt'\n") + case percentMode: + fmt.Fprintf(os.Stderr, " go tool covdata percent -i=dir1,dir2\n\n") + fmt.Fprintf(os.Stderr, " \tmerges data from input directories dir1+dir2\n") + fmt.Fprintf(os.Stderr, " \tand emits percentage of statements covered\n\n") + case funcMode: + fmt.Fprintf(os.Stderr, " go tool covdata func -i=dir1,dir2\n\n") + fmt.Fprintf(os.Stderr, " \treads coverage data files from dir1+dirs2\n") + fmt.Fprintf(os.Stderr, " \tand writes out coverage profile data for\n") + fmt.Fprintf(os.Stderr, " \teach function.\n") + case debugDumpMode: + fmt.Fprintf(os.Stderr, " go tool covdata debugdump [flags] -i=dir1,dir2\n\n") + fmt.Fprintf(os.Stderr, " \treads coverage data from dir1+dir2 and dumps\n") + fmt.Fprintf(os.Stderr, " \tcontents in human-readable form to stdout, for\n") + fmt.Fprintf(os.Stderr, " \tdebugging purposes.\n") + default: + panic("unexpected") + } + Exit(2) +} + +// Setup is called once at program startup time to vet flag values +// and do any necessary setup operations. +func (d *dstate) Setup() { + if *indirsflag == "" { + d.Usage("select input directories with '-i' option") + } + if d.cmd == textfmtMode || (d.cmd == percentMode && *textfmtoutflag != "") { + if *textfmtoutflag == "" { + d.Usage("select output file name with '-o' option") + } + var err error + d.textfmtoutf, err = os.Create(*textfmtoutflag) + if err != nil { + d.Usage(fmt.Sprintf("unable to open textfmt output file %q: %v", *textfmtoutflag, err)) + } + } + if d.cmd == debugDumpMode { + fmt.Printf("/* WARNING: the format of this dump is not stable and is\n") + fmt.Printf(" * expected to change from one Go release to the next.\n") + fmt.Printf(" *\n") + fmt.Printf(" * produced by:\n") + args := append([]string{os.Args[0]}, debugDumpMode) + args = append(args, os.Args[1:]...) + fmt.Printf(" *\t%s\n", strings.Join(args, " ")) + fmt.Printf(" */\n") + } +} + +func (d *dstate) BeginPod(p pods.Pod) { + d.mm = make(map[pkfunc]decodecounter.FuncPayload) +} + +func (d *dstate) EndPod(p pods.Pod) { + if d.cmd == debugDumpMode { + d.cm.ResetModeAndGranularity() + } +} + +func (d *dstate) BeginCounterDataFile(cdf string, cdr *decodecounter.CounterDataReader, dirIdx int) { + dbgtrace(2, "visit counter data file %s dirIdx %d", cdf, dirIdx) + if d.cmd == debugDumpMode { + fmt.Printf("data file %s", cdf) + if cdr.Goos() != "" { + fmt.Printf(" GOOS=%s", cdr.Goos()) + } + if cdr.Goarch() != "" { + fmt.Printf(" GOARCH=%s", cdr.Goarch()) + } + if len(cdr.OsArgs()) != 0 { + fmt.Printf(" program args: %+v\n", cdr.OsArgs()) + } + fmt.Printf("\n") + } +} + +func (d *dstate) EndCounterDataFile(cdf string, cdr *decodecounter.CounterDataReader, dirIdx int) { +} + +func (d *dstate) VisitFuncCounterData(data decodecounter.FuncPayload) { + if nf, ok := d.pkm[data.PkgIdx]; !ok || data.FuncIdx > nf { + warn("func payload inconsistency: id [p=%d,f=%d] nf=%d len(ctrs)=%d in VisitFuncCounterData, ignored", data.PkgIdx, data.FuncIdx, nf, len(data.Counters)) + return + } + key := pkfunc{pk: data.PkgIdx, fcn: data.FuncIdx} + val, found := d.mm[key] + + dbgtrace(5, "ctr visit pk=%d fid=%d found=%v len(val.ctrs)=%d len(data.ctrs)=%d", data.PkgIdx, data.FuncIdx, found, len(val.Counters), len(data.Counters)) + + if len(val.Counters) < len(data.Counters) { + t := val.Counters + val.Counters = d.AllocateCounters(len(data.Counters)) + copy(val.Counters, t) + } + err, overflow := d.cm.MergeCounters(val.Counters, data.Counters) + if err != nil { + fatal("%v", err) + } + if overflow { + warn("uint32 overflow during counter merge") + } + d.mm[key] = val +} + +func (d *dstate) EndCounters() { +} + +func (d *dstate) VisitMetaDataFile(mdf string, mfr *decodemeta.CoverageMetaFileReader) { + newgran := mfr.CounterGranularity() + newmode := mfr.CounterMode() + if err := d.cm.SetModeAndGranularity(mdf, newmode, newgran); err != nil { + fatal("%v", err) + } + if d.cmd == debugDumpMode { + fmt.Printf("Cover mode: %s\n", newmode.String()) + fmt.Printf("Cover granularity: %s\n", newgran.String()) + } + if d.format == nil { + d.format = cformat.NewFormatter(mfr.CounterMode()) + } + + // To provide an additional layer of checking when reading counter + // data, walk the meta-data file to determine the set of legal + // package/function combinations. This will help catch bugs in the + // counter file reader. + d.pkm = make(map[uint32]uint32) + np := uint32(mfr.NumPackages()) + payload := []byte{} + for pkIdx := uint32(0); pkIdx < np; pkIdx++ { + var pd *decodemeta.CoverageMetaDataDecoder + var err error + pd, payload, err = mfr.GetPackageDecoder(pkIdx, payload) + if err != nil { + fatal("reading pkg %d from meta-file %s: %s", pkIdx, mdf, err) + } + d.pkm[pkIdx] = pd.NumFuncs() + } +} + +func (d *dstate) BeginPackage(pd *decodemeta.CoverageMetaDataDecoder, pkgIdx uint32) { + d.preambleEmitted = false + d.pkgImportPath = pd.PackagePath() + d.pkgName = pd.PackageName() + d.modulePath = pd.ModulePath() + if d.cmd == pkglistMode { + d.pkgpaths[d.pkgImportPath] = struct{}{} + } + d.format.SetPackage(pd.PackagePath()) +} + +func (d *dstate) EndPackage(pd *decodemeta.CoverageMetaDataDecoder, pkgIdx uint32) { +} + +func (d *dstate) VisitFunc(pkgIdx uint32, fnIdx uint32, fd *coverage.FuncDesc) { + var counters []uint32 + key := pkfunc{pk: pkgIdx, fcn: fnIdx} + v, haveCounters := d.mm[key] + + dbgtrace(5, "meta visit pk=%d fid=%d fname=%s file=%s found=%v len(val.ctrs)=%d", pkgIdx, fnIdx, fd.Funcname, fd.Srcfile, haveCounters, len(v.Counters)) + + suppressOutput := false + if haveCounters { + counters = v.Counters + } else if d.cmd == debugDumpMode && *liveflag { + suppressOutput = true + } + + if d.cmd == debugDumpMode && !suppressOutput { + if !d.preambleEmitted { + fmt.Printf("\nPackage path: %s\n", d.pkgImportPath) + fmt.Printf("Package name: %s\n", d.pkgName) + fmt.Printf("Module path: %s\n", d.modulePath) + d.preambleEmitted = true + } + fmt.Printf("\nFunc: %s\n", fd.Funcname) + fmt.Printf("Srcfile: %s\n", fd.Srcfile) + fmt.Printf("Literal: %v\n", fd.Lit) + } + for i := 0; i < len(fd.Units); i++ { + u := fd.Units[i] + var count uint32 + if counters != nil { + count = counters[i] + } + d.format.AddUnit(fd.Srcfile, fd.Funcname, fd.Lit, u, count) + if d.cmd == debugDumpMode && !suppressOutput { + fmt.Printf("%d: L%d:C%d -- L%d:C%d ", + i, u.StLine, u.StCol, u.EnLine, u.EnCol) + if u.Parent != 0 { + fmt.Printf("Parent:%d = %d\n", u.Parent, count) + } else { + fmt.Printf("NS=%d = %d\n", u.NxStmts, count) + } + } + d.totalStmts += int(u.NxStmts) + if count != 0 { + d.coveredStmts += int(u.NxStmts) + } + } +} + +func (d *dstate) Finish() { + // d.format maybe nil here if the specified input dir was empty. + if d.format != nil { + if d.cmd == percentMode { + d.format.EmitPercent(os.Stdout, "", false, false) + } + if d.cmd == funcMode { + d.format.EmitFuncs(os.Stdout) + } + if d.textfmtoutf != nil { + if err := d.format.EmitTextual(d.textfmtoutf); err != nil { + fatal("writing to %s: %v", *textfmtoutflag, err) + } + } + } + if d.textfmtoutf != nil { + if err := d.textfmtoutf.Close(); err != nil { + fatal("closing textfmt output file %s: %v", *textfmtoutflag, err) + } + } + if d.cmd == debugDumpMode { + fmt.Printf("totalStmts: %d coveredStmts: %d\n", d.totalStmts, d.coveredStmts) + } + if d.cmd == pkglistMode { + pkgs := make([]string, 0, len(d.pkgpaths)) + for p := range d.pkgpaths { + pkgs = append(pkgs, p) + } + sort.Strings(pkgs) + for _, p := range pkgs { + fmt.Printf("%s\n", p) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/covdata/export_test.go b/platform/dbops/binaries/go/go/src/cmd/covdata/export_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e4592ee8f7b19873c1719d9c4e6429e992524e9f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/covdata/export_test.go @@ -0,0 +1,7 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func Main() { main() } diff --git a/platform/dbops/binaries/go/go/src/cmd/covdata/merge.go b/platform/dbops/binaries/go/go/src/cmd/covdata/merge.go new file mode 100644 index 0000000000000000000000000000000000000000..63e823d376a6522f4a1f6b43d61fb6a355c1a5ca --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/covdata/merge.go @@ -0,0 +1,111 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// This file contains functions and apis to support the "merge" +// subcommand of "go tool covdata". + +import ( + "flag" + "fmt" + "internal/coverage" + "internal/coverage/cmerge" + "internal/coverage/decodecounter" + "internal/coverage/decodemeta" + "internal/coverage/pods" + "os" +) + +var outdirflag *string +var pcombineflag *bool + +func makeMergeOp() covOperation { + outdirflag = flag.String("o", "", "Output directory to write") + pcombineflag = flag.Bool("pcombine", false, "Combine profiles derived from distinct program executables") + m := &mstate{ + mm: newMetaMerge(), + } + return m +} + +// mstate encapsulates state and provides methods for implementing the +// merge operation. This type implements the CovDataVisitor interface, +// and is designed to be used in concert with the CovDataReader +// utility, which abstracts away most of the grubby details of reading +// coverage data files. Most of the heavy lifting for merging is done +// using apis from 'metaMerge' (this is mainly a wrapper around that +// functionality). +type mstate struct { + mm *metaMerge +} + +func (m *mstate) Usage(msg string) { + if len(msg) > 0 { + fmt.Fprintf(os.Stderr, "error: %s\n", msg) + } + fmt.Fprintf(os.Stderr, "usage: go tool covdata merge -i= -o=\n\n") + flag.PrintDefaults() + fmt.Fprintf(os.Stderr, "\nExamples:\n\n") + fmt.Fprintf(os.Stderr, " go tool covdata merge -i=dir1,dir2,dir3 -o=outdir\n\n") + fmt.Fprintf(os.Stderr, " \tmerges all files in dir1/dir2/dir3\n") + fmt.Fprintf(os.Stderr, " \tinto output dir outdir\n") + Exit(2) +} + +func (m *mstate) Setup() { + if *indirsflag == "" { + m.Usage("select input directories with '-i' option") + } + if *outdirflag == "" { + m.Usage("select output directory with '-o' option") + } + m.mm.SetModeMergePolicy(cmerge.ModeMergeRelaxed) +} + +func (m *mstate) BeginPod(p pods.Pod) { + m.mm.beginPod() +} + +func (m *mstate) EndPod(p pods.Pod) { + m.mm.endPod(*pcombineflag) +} + +func (m *mstate) BeginCounterDataFile(cdf string, cdr *decodecounter.CounterDataReader, dirIdx int) { + dbgtrace(2, "visit counter data file %s dirIdx %d", cdf, dirIdx) + m.mm.beginCounterDataFile(cdr) +} + +func (m *mstate) EndCounterDataFile(cdf string, cdr *decodecounter.CounterDataReader, dirIdx int) { +} + +func (m *mstate) VisitFuncCounterData(data decodecounter.FuncPayload) { + m.mm.visitFuncCounterData(data) +} + +func (m *mstate) EndCounters() { +} + +func (m *mstate) VisitMetaDataFile(mdf string, mfr *decodemeta.CoverageMetaFileReader) { + m.mm.visitMetaDataFile(mdf, mfr) +} + +func (m *mstate) BeginPackage(pd *decodemeta.CoverageMetaDataDecoder, pkgIdx uint32) { + dbgtrace(3, "VisitPackage(pk=%d path=%s)", pkgIdx, pd.PackagePath()) + m.mm.visitPackage(pd, pkgIdx, *pcombineflag) +} + +func (m *mstate) EndPackage(pd *decodemeta.CoverageMetaDataDecoder, pkgIdx uint32) { +} + +func (m *mstate) VisitFunc(pkgIdx uint32, fnIdx uint32, fd *coverage.FuncDesc) { + m.mm.visitFunc(pkgIdx, fnIdx, fd, mergeMode, *pcombineflag) +} + +func (m *mstate) Finish() { + if *pcombineflag { + finalHash := m.mm.emitMeta(*outdirflag, true) + m.mm.emitCounters(*outdirflag, finalHash) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/covdata/metamerge.go b/platform/dbops/binaries/go/go/src/cmd/covdata/metamerge.go new file mode 100644 index 0000000000000000000000000000000000000000..6c68e0c7228d370ed5b7f880c1ba9ea453b651cb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/covdata/metamerge.go @@ -0,0 +1,422 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// This file contains functions and apis that support merging of +// meta-data information. It helps implement the "merge", "subtract", +// and "intersect" subcommands. + +import ( + "crypto/md5" + "fmt" + "internal/coverage" + "internal/coverage/calloc" + "internal/coverage/cmerge" + "internal/coverage/decodecounter" + "internal/coverage/decodemeta" + "internal/coverage/encodecounter" + "internal/coverage/encodemeta" + "internal/coverage/slicewriter" + "io" + "os" + "path/filepath" + "sort" + "time" + "unsafe" +) + +// metaMerge provides state and methods to help manage the process +// of selecting or merging meta data files. There are three cases +// of interest here: the "-pcombine" flag provided by merge, the +// "-pkg" option provided by all merge/subtract/intersect, and +// a regular vanilla merge with no package selection +// +// In the -pcombine case, we're essentially glomming together all the +// meta-data for all packages and all functions, meaning that +// everything we see in a given package needs to be added into the +// meta-data file builder; we emit a single meta-data file at the end +// of the run. +// +// In the -pkg case, we will typically emit a single meta-data file +// per input pod, where that new meta-data file contains entries for +// just the selected packages. +// +// In the third case (vanilla merge with no combining or package +// selection) we can carry over meta-data files without touching them +// at all (only counter data files will be merged). +type metaMerge struct { + calloc.BatchCounterAlloc + cmerge.Merger + // maps package import path to package state + pkm map[string]*pkstate + // list of packages + pkgs []*pkstate + // current package state + p *pkstate + // current pod state + pod *podstate + // counter data file osargs/goos/goarch state + astate *argstate +} + +// pkstate +type pkstate struct { + // index of package within meta-data file. + pkgIdx uint32 + // this maps function index within the package to counter data payload + ctab map[uint32]decodecounter.FuncPayload + // pointer to meta-data blob for package + mdblob []byte + // filled in only for -pcombine merges + *pcombinestate +} + +type podstate struct { + pmm map[pkfunc]decodecounter.FuncPayload + mdf string + mfr *decodemeta.CoverageMetaFileReader + fileHash [16]byte +} + +type pkfunc struct { + pk, fcn uint32 +} + +// pcombinestate +type pcombinestate struct { + // Meta-data builder for the package. + cmdb *encodemeta.CoverageMetaDataBuilder + // Maps function meta-data hash to new function index in the + // new version of the package we're building. + ftab map[[16]byte]uint32 +} + +func newMetaMerge() *metaMerge { + return &metaMerge{ + pkm: make(map[string]*pkstate), + astate: &argstate{}, + } +} + +func (mm *metaMerge) visitMetaDataFile(mdf string, mfr *decodemeta.CoverageMetaFileReader) { + dbgtrace(2, "visitMetaDataFile(mdf=%s)", mdf) + + // Record meta-data file name. + mm.pod.mdf = mdf + // Keep a pointer to the file-level reader. + mm.pod.mfr = mfr + // Record file hash. + mm.pod.fileHash = mfr.FileHash() + // Counter mode and granularity -- detect and record clashes here. + newgran := mfr.CounterGranularity() + newmode := mfr.CounterMode() + if err := mm.SetModeAndGranularity(mdf, newmode, newgran); err != nil { + fatal("%v", err) + } +} + +func (mm *metaMerge) beginCounterDataFile(cdr *decodecounter.CounterDataReader) { + state := argvalues{ + osargs: cdr.OsArgs(), + goos: cdr.Goos(), + goarch: cdr.Goarch(), + } + mm.astate.Merge(state) +} + +func copyMetaDataFile(inpath, outpath string) { + inf, err := os.Open(inpath) + if err != nil { + fatal("opening input meta-data file %s: %v", inpath, err) + } + defer inf.Close() + + fi, err := inf.Stat() + if err != nil { + fatal("accessing input meta-data file %s: %v", inpath, err) + } + + outf, err := os.OpenFile(outpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, fi.Mode()) + if err != nil { + fatal("opening output meta-data file %s: %v", outpath, err) + } + + _, err = io.Copy(outf, inf) + outf.Close() + if err != nil { + fatal("writing output meta-data file %s: %v", outpath, err) + } +} + +func (mm *metaMerge) beginPod() { + mm.pod = &podstate{ + pmm: make(map[pkfunc]decodecounter.FuncPayload), + } +} + +// metaEndPod handles actions needed when we're done visiting all of +// the things in a pod -- counter files and meta-data file. There are +// three cases of interest here: +// +// Case 1: in an unconditional merge (we're not selecting a specific set of +// packages using "-pkg", and the "-pcombine" option is not in use), +// we can simply copy over the meta-data file from input to output. +// +// Case 2: if this is a select merge (-pkg is in effect), then at +// this point we write out a new smaller meta-data file that includes +// only the packages of interest. At this point we also emit a merged +// counter data file as well. +// +// Case 3: if "-pcombine" is in effect, we don't write anything at +// this point (all writes will happen at the end of the run). +func (mm *metaMerge) endPod(pcombine bool) { + if pcombine { + // Just clear out the pod data, we'll do all the + // heavy lifting at the end. + mm.pod = nil + return + } + + finalHash := mm.pod.fileHash + if matchpkg != nil { + // Emit modified meta-data file for this pod. + finalHash = mm.emitMeta(*outdirflag, pcombine) + } else { + // Copy meta-data file for this pod to the output directory. + inpath := mm.pod.mdf + mdfbase := filepath.Base(mm.pod.mdf) + outpath := filepath.Join(*outdirflag, mdfbase) + copyMetaDataFile(inpath, outpath) + } + + // Emit acccumulated counter data for this pod. + mm.emitCounters(*outdirflag, finalHash) + + // Reset package state. + mm.pkm = make(map[string]*pkstate) + mm.pkgs = nil + mm.pod = nil + + // Reset counter mode and granularity + mm.ResetModeAndGranularity() +} + +// emitMeta encodes and writes out a new coverage meta-data file as +// part of a merge operation, specifically a merge with the +// "-pcombine" flag. +func (mm *metaMerge) emitMeta(outdir string, pcombine bool) [16]byte { + fh := md5.New() + blobs := [][]byte{} + tlen := uint64(unsafe.Sizeof(coverage.MetaFileHeader{})) + for _, p := range mm.pkgs { + var blob []byte + if pcombine { + mdw := &slicewriter.WriteSeeker{} + p.cmdb.Emit(mdw) + blob = mdw.BytesWritten() + } else { + blob = p.mdblob + } + ph := md5.Sum(blob) + blobs = append(blobs, blob) + if _, err := fh.Write(ph[:]); err != nil { + panic(fmt.Sprintf("internal error: md5 sum failed: %v", err)) + } + tlen += uint64(len(blob)) + } + var finalHash [16]byte + fhh := fh.Sum(nil) + copy(finalHash[:], fhh) + + // Open meta-file for writing. + fn := fmt.Sprintf("%s.%x", coverage.MetaFilePref, finalHash) + fpath := filepath.Join(outdir, fn) + mf, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + fatal("unable to open output meta-data file %s: %v", fpath, err) + } + + // Encode and write. + mfw := encodemeta.NewCoverageMetaFileWriter(fpath, mf) + err = mfw.Write(finalHash, blobs, mm.Mode(), mm.Granularity()) + if err != nil { + fatal("error writing %s: %v\n", fpath, err) + } + return finalHash +} + +func (mm *metaMerge) emitCounters(outdir string, metaHash [16]byte) { + // Open output file. The file naming scheme is intended to mimic + // that used when running a coverage-instrumented binary, for + // consistency (however the process ID is not meaningful here, so + // use a value of zero). + var dummyPID int + fn := fmt.Sprintf(coverage.CounterFileTempl, coverage.CounterFilePref, metaHash, dummyPID, time.Now().UnixNano()) + fpath := filepath.Join(outdir, fn) + cf, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + fatal("opening counter data file %s: %v", fpath, err) + } + defer func() { + if err := cf.Close(); err != nil { + fatal("error closing output meta-data file %s: %v", fpath, err) + } + }() + + args := mm.astate.ArgsSummary() + cfw := encodecounter.NewCoverageDataWriter(cf, coverage.CtrULeb128) + if err := cfw.Write(metaHash, args, mm); err != nil { + fatal("counter file write failed: %v", err) + } + mm.astate = &argstate{} +} + +// VisitFuncs is used while writing the counter data files; it +// implements the 'VisitFuncs' method required by the interface +// internal/coverage/encodecounter/CounterVisitor. +func (mm *metaMerge) VisitFuncs(f encodecounter.CounterVisitorFn) error { + if *verbflag >= 4 { + fmt.Printf("counterVisitor invoked\n") + } + // For each package, for each function, construct counter + // array and then call "f" on it. + for pidx, p := range mm.pkgs { + fids := make([]int, 0, len(p.ctab)) + for fid := range p.ctab { + fids = append(fids, int(fid)) + } + sort.Ints(fids) + if *verbflag >= 4 { + fmt.Printf("fids for pk=%d: %+v\n", pidx, fids) + } + for _, fid := range fids { + fp := p.ctab[uint32(fid)] + if *verbflag >= 4 { + fmt.Printf("counter write for pk=%d fid=%d len(ctrs)=%d\n", pidx, fid, len(fp.Counters)) + } + if err := f(uint32(pidx), uint32(fid), fp.Counters); err != nil { + return err + } + } + } + return nil +} + +func (mm *metaMerge) visitPackage(pd *decodemeta.CoverageMetaDataDecoder, pkgIdx uint32, pcombine bool) { + p, ok := mm.pkm[pd.PackagePath()] + if !ok { + p = &pkstate{ + pkgIdx: uint32(len(mm.pkgs)), + } + mm.pkgs = append(mm.pkgs, p) + mm.pkm[pd.PackagePath()] = p + if pcombine { + p.pcombinestate = new(pcombinestate) + cmdb, err := encodemeta.NewCoverageMetaDataBuilder(pd.PackagePath(), pd.PackageName(), pd.ModulePath()) + if err != nil { + fatal("fatal error creating meta-data builder: %v", err) + } + dbgtrace(2, "install new pkm entry for package %s pk=%d", pd.PackagePath(), pkgIdx) + p.cmdb = cmdb + p.ftab = make(map[[16]byte]uint32) + } else { + var err error + p.mdblob, err = mm.pod.mfr.GetPackagePayload(pkgIdx, nil) + if err != nil { + fatal("error extracting package %d payload from %s: %v", + pkgIdx, mm.pod.mdf, err) + } + } + p.ctab = make(map[uint32]decodecounter.FuncPayload) + } + mm.p = p +} + +func (mm *metaMerge) visitFuncCounterData(data decodecounter.FuncPayload) { + key := pkfunc{pk: data.PkgIdx, fcn: data.FuncIdx} + val := mm.pod.pmm[key] + // FIXME: in theory either A) len(val.Counters) is zero, or B) + // the two lengths are equal. Assert if not? Of course, we could + // see odd stuff if there is source file skew. + if *verbflag > 4 { + fmt.Printf("visit pk=%d fid=%d len(counters)=%d\n", data.PkgIdx, data.FuncIdx, len(data.Counters)) + } + if len(val.Counters) < len(data.Counters) { + t := val.Counters + val.Counters = mm.AllocateCounters(len(data.Counters)) + copy(val.Counters, t) + } + err, overflow := mm.MergeCounters(val.Counters, data.Counters) + if err != nil { + fatal("%v", err) + } + if overflow { + warn("uint32 overflow during counter merge") + } + mm.pod.pmm[key] = val +} + +func (mm *metaMerge) visitFunc(pkgIdx uint32, fnIdx uint32, fd *coverage.FuncDesc, verb string, pcombine bool) { + if *verbflag >= 3 { + fmt.Printf("visit pk=%d fid=%d func %s\n", pkgIdx, fnIdx, fd.Funcname) + } + + var counters []uint32 + key := pkfunc{pk: pkgIdx, fcn: fnIdx} + v, haveCounters := mm.pod.pmm[key] + if haveCounters { + counters = v.Counters + } + + if pcombine { + // If the merge is running in "combine programs" mode, then hash + // the function and look it up in the package ftab to see if we've + // encountered it before. If we haven't, then register it with the + // meta-data builder. + fnhash := encodemeta.HashFuncDesc(fd) + gfidx, ok := mm.p.ftab[fnhash] + if !ok { + // We haven't seen this function before, need to add it to + // the meta data. + gfidx = uint32(mm.p.cmdb.AddFunc(*fd)) + mm.p.ftab[fnhash] = gfidx + if *verbflag >= 3 { + fmt.Printf("new meta entry for fn %s fid=%d\n", fd.Funcname, gfidx) + } + } + fnIdx = gfidx + } + if !haveCounters { + return + } + + // Install counters in package ctab. + gfp, ok := mm.p.ctab[fnIdx] + if ok { + if verb == "subtract" || verb == "intersect" { + panic("should never see this for intersect/subtract") + } + if *verbflag >= 3 { + fmt.Printf("counter merge for %s fidx=%d\n", fd.Funcname, fnIdx) + } + // Merge. + err, overflow := mm.MergeCounters(gfp.Counters, counters) + if err != nil { + fatal("%v", err) + } + if overflow { + warn("uint32 overflow during counter merge") + } + mm.p.ctab[fnIdx] = gfp + } else { + if *verbflag >= 3 { + fmt.Printf("null merge for %s fidx %d\n", fd.Funcname, fnIdx) + } + gfp := v + gfp.PkgIdx = mm.p.pkgIdx + gfp.FuncIdx = fnIdx + mm.p.ctab[fnIdx] = gfp + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/covdata/subtractintersect.go b/platform/dbops/binaries/go/go/src/cmd/covdata/subtractintersect.go new file mode 100644 index 0000000000000000000000000000000000000000..5d71e3d8ecdabad84e138cef9f05c9b86ff84e46 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/covdata/subtractintersect.go @@ -0,0 +1,196 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// This file contains functions and apis to support the "subtract" and +// "intersect" subcommands of "go tool covdata". + +import ( + "flag" + "fmt" + "internal/coverage" + "internal/coverage/decodecounter" + "internal/coverage/decodemeta" + "internal/coverage/pods" + "os" + "strings" +) + +// makeSubtractIntersectOp creates a subtract or intersect operation. +// 'mode' here must be either "subtract" or "intersect". +func makeSubtractIntersectOp(mode string) covOperation { + outdirflag = flag.String("o", "", "Output directory to write") + s := &sstate{ + mode: mode, + mm: newMetaMerge(), + inidx: -1, + } + return s +} + +// sstate holds state needed to implement subtraction and intersection +// operations on code coverage data files. This type provides methods +// to implement the CovDataVisitor interface, and is designed to be +// used in concert with the CovDataReader utility, which abstracts +// away most of the grubby details of reading coverage data files. +type sstate struct { + mm *metaMerge + inidx int + mode string + // Used only for intersection; keyed by pkg/fn ID, it keeps track of + // just the set of functions for which we have data in the current + // input directory. + imm map[pkfunc]struct{} +} + +func (s *sstate) Usage(msg string) { + if len(msg) > 0 { + fmt.Fprintf(os.Stderr, "error: %s\n", msg) + } + fmt.Fprintf(os.Stderr, "usage: go tool covdata %s -i=dir1,dir2 -o=\n\n", s.mode) + flag.PrintDefaults() + fmt.Fprintf(os.Stderr, "\nExamples:\n\n") + op := "from" + if s.mode == intersectMode { + op = "with" + } + fmt.Fprintf(os.Stderr, " go tool covdata %s -i=dir1,dir2 -o=outdir\n\n", s.mode) + fmt.Fprintf(os.Stderr, " \t%ss dir2 %s dir1, writing result\n", s.mode, op) + fmt.Fprintf(os.Stderr, " \tinto output dir outdir.\n") + os.Exit(2) +} + +func (s *sstate) Setup() { + if *indirsflag == "" { + usage("select input directories with '-i' option") + } + indirs := strings.Split(*indirsflag, ",") + if s.mode == subtractMode && len(indirs) != 2 { + usage("supply exactly two input dirs for subtract operation") + } + if *outdirflag == "" { + usage("select output directory with '-o' option") + } +} + +func (s *sstate) BeginPod(p pods.Pod) { + s.mm.beginPod() +} + +func (s *sstate) EndPod(p pods.Pod) { + const pcombine = false + s.mm.endPod(pcombine) +} + +func (s *sstate) EndCounters() { + if s.imm != nil { + s.pruneCounters() + } +} + +// pruneCounters performs a function-level partial intersection using the +// current POD counter data (s.mm.pod.pmm) and the intersected data from +// PODs in previous dirs (s.imm). +func (s *sstate) pruneCounters() { + pkeys := make([]pkfunc, 0, len(s.mm.pod.pmm)) + for k := range s.mm.pod.pmm { + pkeys = append(pkeys, k) + } + // Remove anything from pmm not found in imm. We don't need to + // go the other way (removing things from imm not found in pmm) + // since we don't add anything to imm if there is no pmm entry. + for _, k := range pkeys { + if _, found := s.imm[k]; !found { + delete(s.mm.pod.pmm, k) + } + } + s.imm = nil +} + +func (s *sstate) BeginCounterDataFile(cdf string, cdr *decodecounter.CounterDataReader, dirIdx int) { + dbgtrace(2, "visiting counter data file %s diridx %d", cdf, dirIdx) + if s.inidx != dirIdx { + if s.inidx > dirIdx { + // We're relying on having data files presented in + // the order they appear in the inputs (e.g. first all + // data files from input dir 0, then dir 1, etc). + panic("decreasing dir index, internal error") + } + if dirIdx == 0 { + // No need to keep track of the functions in the first + // directory, since that info will be replicated in + // s.mm.pod.pmm. + s.imm = nil + } else { + // We're now starting to visit the Nth directory, N != 0. + if s.mode == intersectMode { + if s.imm != nil { + s.pruneCounters() + } + s.imm = make(map[pkfunc]struct{}) + } + } + s.inidx = dirIdx + } +} + +func (s *sstate) EndCounterDataFile(cdf string, cdr *decodecounter.CounterDataReader, dirIdx int) { +} + +func (s *sstate) VisitFuncCounterData(data decodecounter.FuncPayload) { + key := pkfunc{pk: data.PkgIdx, fcn: data.FuncIdx} + + if *verbflag >= 5 { + fmt.Printf("ctr visit fid=%d pk=%d inidx=%d data.Counters=%+v\n", data.FuncIdx, data.PkgIdx, s.inidx, data.Counters) + } + + // If we're processing counter data from the initial (first) input + // directory, then just install it into the counter data map + // as usual. + if s.inidx == 0 { + s.mm.visitFuncCounterData(data) + return + } + + // If we're looking at counter data from a dir other than + // the first, then perform the intersect/subtract. + if val, ok := s.mm.pod.pmm[key]; ok { + if s.mode == subtractMode { + for i := 0; i < len(data.Counters); i++ { + if data.Counters[i] != 0 { + val.Counters[i] = 0 + } + } + } else if s.mode == intersectMode { + s.imm[key] = struct{}{} + for i := 0; i < len(data.Counters); i++ { + if data.Counters[i] == 0 { + val.Counters[i] = 0 + } + } + } + } +} + +func (s *sstate) VisitMetaDataFile(mdf string, mfr *decodemeta.CoverageMetaFileReader) { + if s.mode == intersectMode { + s.imm = make(map[pkfunc]struct{}) + } + s.mm.visitMetaDataFile(mdf, mfr) +} + +func (s *sstate) BeginPackage(pd *decodemeta.CoverageMetaDataDecoder, pkgIdx uint32) { + s.mm.visitPackage(pd, pkgIdx, false) +} + +func (s *sstate) EndPackage(pd *decodemeta.CoverageMetaDataDecoder, pkgIdx uint32) { +} + +func (s *sstate) VisitFunc(pkgIdx uint32, fnIdx uint32, fd *coverage.FuncDesc) { + s.mm.visitFunc(pkgIdx, fnIdx, fd, s.mode, false) +} + +func (s *sstate) Finish() { +} diff --git a/platform/dbops/binaries/go/go/src/cmd/covdata/tool_test.go b/platform/dbops/binaries/go/go/src/cmd/covdata/tool_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d99113ebc556d56d070b25aa128fd5ea8ad8d1f7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/covdata/tool_test.go @@ -0,0 +1,958 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main_test + +import ( + cmdcovdata "cmd/covdata" + "flag" + "fmt" + "internal/coverage/pods" + "internal/goexperiment" + "internal/testenv" + "log" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "testing" +) + +// testcovdata returns the path to the unit test executable to be used as +// standin for 'go tool covdata'. +func testcovdata(t testing.TB) string { + exe, err := os.Executable() + if err != nil { + t.Helper() + t.Fatal(err) + } + return exe +} + +// Top level tempdir for test. +var testTempDir string + +// If set, this will preserve all the tmpdir files from the test run. +var preserveTmp = flag.Bool("preservetmp", false, "keep tmpdir files for debugging") + +// TestMain used here so that we can leverage the test executable +// itself as a cmd/covdata executable; compare to similar usage in +// the cmd/go tests. +func TestMain(m *testing.M) { + // When CMDCOVDATA_TEST_RUN_MAIN is set, we're reusing the test + // binary as cmd/cover. In this case we run the main func exported + // via export_test.go, and exit; CMDCOVDATA_TEST_RUN_MAIN is set below + // for actual test invocations. + if os.Getenv("CMDCOVDATA_TEST_RUN_MAIN") != "" { + cmdcovdata.Main() + os.Exit(0) + } + flag.Parse() + topTmpdir, err := os.MkdirTemp("", "cmd-covdata-test-") + if err != nil { + log.Fatal(err) + } + testTempDir = topTmpdir + if !*preserveTmp { + defer os.RemoveAll(topTmpdir) + } else { + fmt.Fprintf(os.Stderr, "debug: preserving tmpdir %s\n", topTmpdir) + } + os.Setenv("CMDCOVDATA_TEST_RUN_MAIN", "true") + os.Exit(m.Run()) +} + +var tdmu sync.Mutex +var tdcount int + +func tempDir(t *testing.T) string { + tdmu.Lock() + dir := filepath.Join(testTempDir, fmt.Sprintf("%03d", tdcount)) + tdcount++ + if err := os.Mkdir(dir, 0777); err != nil { + t.Fatal(err) + } + defer tdmu.Unlock() + return dir +} + +const debugtrace = false + +func gobuild(t *testing.T, indir string, bargs []string) { + t.Helper() + + if debugtrace { + if indir != "" { + t.Logf("in dir %s: ", indir) + } + t.Logf("cmd: %s %+v\n", testenv.GoToolPath(t), bargs) + } + cmd := testenv.Command(t, testenv.GoToolPath(t), bargs...) + cmd.Dir = indir + b, err := cmd.CombinedOutput() + if len(b) != 0 { + t.Logf("## build output:\n%s", b) + } + if err != nil { + t.Fatalf("build error: %v", err) + } +} + +func emitFile(t *testing.T, dst, src string) { + payload, err := os.ReadFile(src) + if err != nil { + t.Fatalf("error reading %q: %v", src, err) + } + if err := os.WriteFile(dst, payload, 0666); err != nil { + t.Fatalf("writing %q: %v", dst, err) + } +} + +const mainPkgPath = "prog" + +func buildProg(t *testing.T, prog string, dir string, tag string, flags []string) (string, string) { + // Create subdirs. + subdir := filepath.Join(dir, prog+"dir"+tag) + if err := os.Mkdir(subdir, 0777); err != nil { + t.Fatalf("can't create outdir %s: %v", subdir, err) + } + depdir := filepath.Join(subdir, "dep") + if err := os.Mkdir(depdir, 0777); err != nil { + t.Fatalf("can't create outdir %s: %v", depdir, err) + } + + // Emit program. + insrc := filepath.Join("testdata", prog+".go") + src := filepath.Join(subdir, prog+".go") + emitFile(t, src, insrc) + indep := filepath.Join("testdata", "dep.go") + dep := filepath.Join(depdir, "dep.go") + emitFile(t, dep, indep) + + // Emit go.mod. + mod := filepath.Join(subdir, "go.mod") + modsrc := "\nmodule " + mainPkgPath + "\n\ngo 1.19\n" + if err := os.WriteFile(mod, []byte(modsrc), 0666); err != nil { + t.Fatal(err) + } + exepath := filepath.Join(subdir, prog+".exe") + bargs := []string{"build", "-cover", "-o", exepath} + bargs = append(bargs, flags...) + gobuild(t, subdir, bargs) + return exepath, subdir +} + +type state struct { + dir string + exedir1 string + exedir2 string + exedir3 string + exepath1 string + exepath2 string + exepath3 string + tool string + outdirs [4]string +} + +const debugWorkDir = false + +func TestCovTool(t *testing.T) { + testenv.MustHaveGoBuild(t) + if !goexperiment.CoverageRedesign { + t.Skipf("stubbed out due to goexperiment.CoverageRedesign=false") + } + dir := tempDir(t) + if testing.Short() { + t.Skip() + } + if debugWorkDir { + // debugging + dir = "/tmp/qqq" + os.RemoveAll(dir) + os.Mkdir(dir, 0777) + } + + s := state{ + dir: dir, + } + s.exepath1, s.exedir1 = buildProg(t, "prog1", dir, "", nil) + s.exepath2, s.exedir2 = buildProg(t, "prog2", dir, "", nil) + flags := []string{"-covermode=atomic"} + s.exepath3, s.exedir3 = buildProg(t, "prog1", dir, "atomic", flags) + + // Reuse unit test executable as tool to be tested. + s.tool = testcovdata(t) + + // Create a few coverage output dirs. + for i := 0; i < 4; i++ { + d := filepath.Join(dir, fmt.Sprintf("covdata%d", i)) + s.outdirs[i] = d + if err := os.Mkdir(d, 0777); err != nil { + t.Fatalf("can't create outdir %s: %v", d, err) + } + } + + // Run instrumented program to generate some coverage data output files, + // as follows: + // + // /covdata0 -- prog1.go compiled -cover + // /covdata1 -- prog1.go compiled -cover + // /covdata2 -- prog1.go compiled -covermode=atomic + // /covdata3 -- prog1.go compiled -covermode=atomic + // + for m := 0; m < 2; m++ { + for k := 0; k < 2; k++ { + args := []string{} + if k != 0 { + args = append(args, "foo", "bar") + } + for i := 0; i <= k; i++ { + exepath := s.exepath1 + if m != 0 { + exepath = s.exepath3 + } + cmd := testenv.Command(t, exepath, args...) + cmd.Env = append(cmd.Env, "GOCOVERDIR="+s.outdirs[m*2+k]) + b, err := cmd.CombinedOutput() + if len(b) != 0 { + t.Logf("## instrumented run output:\n%s", b) + } + if err != nil { + t.Fatalf("instrumented run error: %v", err) + } + } + } + } + + // At this point we can fork off a bunch of child tests + // to check different tool modes. + t.Run("MergeSimple", func(t *testing.T) { + t.Parallel() + testMergeSimple(t, s, s.outdirs[0], s.outdirs[1], "set") + testMergeSimple(t, s, s.outdirs[2], s.outdirs[3], "atomic") + }) + t.Run("MergeSelect", func(t *testing.T) { + t.Parallel() + testMergeSelect(t, s, s.outdirs[0], s.outdirs[1], "set") + testMergeSelect(t, s, s.outdirs[2], s.outdirs[3], "atomic") + }) + t.Run("MergePcombine", func(t *testing.T) { + t.Parallel() + testMergeCombinePrograms(t, s) + }) + t.Run("Dump", func(t *testing.T) { + t.Parallel() + testDump(t, s) + }) + t.Run("Percent", func(t *testing.T) { + t.Parallel() + testPercent(t, s) + }) + t.Run("PkgList", func(t *testing.T) { + t.Parallel() + testPkgList(t, s) + }) + t.Run("Textfmt", func(t *testing.T) { + t.Parallel() + testTextfmt(t, s) + }) + t.Run("Subtract", func(t *testing.T) { + t.Parallel() + testSubtract(t, s) + }) + t.Run("Intersect", func(t *testing.T) { + t.Parallel() + testIntersect(t, s, s.outdirs[0], s.outdirs[1], "set") + testIntersect(t, s, s.outdirs[2], s.outdirs[3], "atomic") + }) + t.Run("CounterClash", func(t *testing.T) { + t.Parallel() + testCounterClash(t, s) + }) + t.Run("TestEmpty", func(t *testing.T) { + t.Parallel() + testEmpty(t, s) + }) + t.Run("TestCommandLineErrors", func(t *testing.T) { + t.Parallel() + testCommandLineErrors(t, s, s.outdirs[0]) + }) +} + +const showToolInvocations = true + +func runToolOp(t *testing.T, s state, op string, args []string) []string { + // Perform tool run. + t.Helper() + args = append([]string{op}, args...) + if showToolInvocations { + t.Logf("%s cmd is: %s %+v", op, s.tool, args) + } + cmd := testenv.Command(t, s.tool, args...) + b, err := cmd.CombinedOutput() + if err != nil { + fmt.Fprintf(os.Stderr, "## %s output: %s\n", op, string(b)) + t.Fatalf("%q run error: %v", op, err) + } + output := strings.TrimSpace(string(b)) + lines := strings.Split(output, "\n") + if len(lines) == 1 && lines[0] == "" { + lines = nil + } + return lines +} + +func testDump(t *testing.T, s state) { + // Run the dumper on the two dirs we generated. + dargs := []string{"-pkg=" + mainPkgPath, "-live", "-i=" + s.outdirs[0] + "," + s.outdirs[1]} + lines := runToolOp(t, s, "debugdump", dargs) + + // Sift through the output to make sure it has some key elements. + testpoints := []struct { + tag string + re *regexp.Regexp + }{ + { + "args", + regexp.MustCompile(`^data file .+ GOOS=.+ GOARCH=.+ program args: .+$`), + }, + { + "main package", + regexp.MustCompile(`^Package path: ` + mainPkgPath + `\s*$`), + }, + { + "main function", + regexp.MustCompile(`^Func: main\s*$`), + }, + } + + bad := false + for _, testpoint := range testpoints { + found := false + for _, line := range lines { + if m := testpoint.re.FindStringSubmatch(line); m != nil { + found = true + break + } + } + if !found { + t.Errorf("dump output regexp match failed for %q", testpoint.tag) + bad = true + } + } + if bad { + dumplines(lines) + } +} + +func testPercent(t *testing.T, s state) { + // Run the dumper on the two dirs we generated. + dargs := []string{"-pkg=" + mainPkgPath, "-i=" + s.outdirs[0] + "," + s.outdirs[1]} + lines := runToolOp(t, s, "percent", dargs) + + // Sift through the output to make sure it has the needful. + testpoints := []struct { + tag string + re *regexp.Regexp + }{ + { + "statement coverage percent", + regexp.MustCompile(`coverage: \d+\.\d% of statements\s*$`), + }, + } + + bad := false + for _, testpoint := range testpoints { + found := false + for _, line := range lines { + if m := testpoint.re.FindStringSubmatch(line); m != nil { + found = true + break + } + } + if !found { + t.Errorf("percent output regexp match failed for %s", testpoint.tag) + bad = true + } + } + if bad { + dumplines(lines) + } +} + +func testPkgList(t *testing.T, s state) { + dargs := []string{"-i=" + s.outdirs[0] + "," + s.outdirs[1]} + lines := runToolOp(t, s, "pkglist", dargs) + + want := []string{mainPkgPath, mainPkgPath + "/dep"} + bad := false + if len(lines) != 2 { + t.Errorf("expect pkglist to return two lines") + bad = true + } else { + for i := 0; i < 2; i++ { + lines[i] = strings.TrimSpace(lines[i]) + if want[i] != lines[i] { + t.Errorf("line %d want %s got %s", i, want[i], lines[i]) + bad = true + } + } + } + if bad { + dumplines(lines) + } +} + +func testTextfmt(t *testing.T, s state) { + outf := s.dir + "/" + "t.txt" + dargs := []string{"-pkg=" + mainPkgPath, "-i=" + s.outdirs[0] + "," + s.outdirs[1], + "-o", outf} + lines := runToolOp(t, s, "textfmt", dargs) + + // No output expected. + if len(lines) != 0 { + dumplines(lines) + t.Errorf("unexpected output from go tool covdata textfmt") + } + + // Open and read the first few bits of the file. + payload, err := os.ReadFile(outf) + if err != nil { + t.Errorf("opening %s: %v\n", outf, err) + } + lines = strings.Split(string(payload), "\n") + want0 := "mode: set" + if lines[0] != want0 { + dumplines(lines[0:10]) + t.Errorf("textfmt: want %s got %s", want0, lines[0]) + } + want1 := mainPkgPath + "/prog1.go:13.14,15.2 1 1" + if lines[1] != want1 { + dumplines(lines[0:10]) + t.Errorf("textfmt: want %s got %s", want1, lines[1]) + } +} + +func dumplines(lines []string) { + for i := range lines { + fmt.Fprintf(os.Stderr, "%s\n", lines[i]) + } +} + +type dumpCheck struct { + tag string + re *regexp.Regexp + negate bool + nonzero bool + zero bool +} + +// runDumpChecks examines the output of "go tool covdata debugdump" +// for a given output directory, looking for the presence or absence +// of specific markers. +func runDumpChecks(t *testing.T, s state, dir string, flags []string, checks []dumpCheck) { + dargs := []string{"-i", dir} + dargs = append(dargs, flags...) + lines := runToolOp(t, s, "debugdump", dargs) + if len(lines) == 0 { + t.Fatalf("dump run produced no output") + } + + bad := false + for _, check := range checks { + found := false + for _, line := range lines { + if m := check.re.FindStringSubmatch(line); m != nil { + found = true + if check.negate { + t.Errorf("tag %q: unexpected match", check.tag) + bad = true + + } + if check.nonzero || check.zero { + if len(m) < 2 { + t.Errorf("tag %s: submatch failed (short m)", check.tag) + bad = true + continue + } + if m[1] == "" { + t.Errorf("tag %s: submatch failed", check.tag) + bad = true + continue + } + i, err := strconv.Atoi(m[1]) + if err != nil { + t.Errorf("tag %s: match Atoi failed on %s", + check.tag, m[1]) + continue + } + if check.zero && i != 0 { + t.Errorf("tag %s: match zero failed on %s", + check.tag, m[1]) + } else if check.nonzero && i == 0 { + t.Errorf("tag %s: match nonzero failed on %s", + check.tag, m[1]) + } + } + break + } + } + if !found && !check.negate { + t.Errorf("dump output regexp match failed for %s", check.tag) + bad = true + } + } + if bad { + fmt.Printf("output from 'dump' run:\n") + dumplines(lines) + } +} + +func testMergeSimple(t *testing.T, s state, indir1, indir2, tag string) { + outdir := filepath.Join(s.dir, "simpleMergeOut"+tag) + if err := os.Mkdir(outdir, 0777); err != nil { + t.Fatalf("can't create outdir %s: %v", outdir, err) + } + + // Merge the two dirs into a final result. + ins := fmt.Sprintf("-i=%s,%s", indir1, indir2) + out := fmt.Sprintf("-o=%s", outdir) + margs := []string{ins, out} + lines := runToolOp(t, s, "merge", margs) + if len(lines) != 0 { + t.Errorf("merge run produced %d lines of unexpected output", len(lines)) + dumplines(lines) + } + + // We expect the merge tool to produce exactly two files: a meta + // data file and a counter file. If we get more than just this one + // pair, something went wrong. + podlist, err := pods.CollectPods([]string{outdir}, true) + if err != nil { + t.Fatal(err) + } + if len(podlist) != 1 { + t.Fatalf("expected 1 pod, got %d pods", len(podlist)) + } + ncdfs := len(podlist[0].CounterDataFiles) + if ncdfs != 1 { + t.Fatalf("expected 1 counter data file, got %d", ncdfs) + } + + // Sift through the output to make sure it has some key elements. + // In particular, we want to see entries for all three functions + // ("first", "second", and "third"). + testpoints := []dumpCheck{ + { + tag: "first function", + re: regexp.MustCompile(`^Func: first\s*$`), + }, + { + tag: "second function", + re: regexp.MustCompile(`^Func: second\s*$`), + }, + { + tag: "third function", + re: regexp.MustCompile(`^Func: third\s*$`), + }, + { + tag: "third function unit 0", + re: regexp.MustCompile(`^0: L23:C23 -- L24:C12 NS=1 = (\d+)$`), + nonzero: true, + }, + { + tag: "third function unit 1", + re: regexp.MustCompile(`^1: L27:C2 -- L28:C10 NS=2 = (\d+)$`), + nonzero: true, + }, + { + tag: "third function unit 2", + re: regexp.MustCompile(`^2: L24:C12 -- L26:C3 NS=1 = (\d+)$`), + nonzero: true, + }, + } + flags := []string{"-live", "-pkg=" + mainPkgPath} + runDumpChecks(t, s, outdir, flags, testpoints) +} + +func testMergeSelect(t *testing.T, s state, indir1, indir2 string, tag string) { + outdir := filepath.Join(s.dir, "selectMergeOut"+tag) + if err := os.Mkdir(outdir, 0777); err != nil { + t.Fatalf("can't create outdir %s: %v", outdir, err) + } + + // Merge two input dirs into a final result, but filter + // based on package. + ins := fmt.Sprintf("-i=%s,%s", indir1, indir2) + out := fmt.Sprintf("-o=%s", outdir) + margs := []string{"-pkg=" + mainPkgPath + "/dep", ins, out} + lines := runToolOp(t, s, "merge", margs) + if len(lines) != 0 { + t.Errorf("merge run produced %d lines of unexpected output", len(lines)) + dumplines(lines) + } + + // Dump the files in the merged output dir and examine the result. + // We expect to see only the functions in package "dep". + dargs := []string{"-i=" + outdir} + lines = runToolOp(t, s, "debugdump", dargs) + if len(lines) == 0 { + t.Fatalf("dump run produced no output") + } + want := map[string]int{ + "Package path: " + mainPkgPath + "/dep": 0, + "Func: Dep1": 0, + "Func: PDep": 0, + } + bad := false + for _, line := range lines { + if v, ok := want[line]; ok { + if v != 0 { + t.Errorf("duplicate line %s", line) + bad = true + break + } + want[line] = 1 + continue + } + // no other functions or packages expected. + if strings.HasPrefix(line, "Func:") || strings.HasPrefix(line, "Package path:") { + t.Errorf("unexpected line: %s", line) + bad = true + break + } + } + if bad { + dumplines(lines) + } +} + +func testMergeCombinePrograms(t *testing.T, s state) { + + // Run the new program, emitting output into a new set + // of outdirs. + runout := [2]string{} + for k := 0; k < 2; k++ { + runout[k] = filepath.Join(s.dir, fmt.Sprintf("newcovdata%d", k)) + if err := os.Mkdir(runout[k], 0777); err != nil { + t.Fatalf("can't create outdir %s: %v", runout[k], err) + } + args := []string{} + if k != 0 { + args = append(args, "foo", "bar") + } + cmd := testenv.Command(t, s.exepath2, args...) + cmd.Env = append(cmd.Env, "GOCOVERDIR="+runout[k]) + b, err := cmd.CombinedOutput() + if len(b) != 0 { + t.Logf("## instrumented run output:\n%s", b) + } + if err != nil { + t.Fatalf("instrumented run error: %v", err) + } + } + + // Create out dir for -pcombine merge. + moutdir := filepath.Join(s.dir, "mergeCombineOut") + if err := os.Mkdir(moutdir, 0777); err != nil { + t.Fatalf("can't create outdir %s: %v", moutdir, err) + } + + // Run a merge over both programs, using the -pcombine + // flag to do maximal combining. + ins := fmt.Sprintf("-i=%s,%s,%s,%s", s.outdirs[0], s.outdirs[1], + runout[0], runout[1]) + out := fmt.Sprintf("-o=%s", moutdir) + margs := []string{"-pcombine", ins, out} + lines := runToolOp(t, s, "merge", margs) + if len(lines) != 0 { + t.Errorf("merge run produced unexpected output: %v", lines) + } + + // We expect the merge tool to produce exactly two files: a meta + // data file and a counter file. If we get more than just this one + // pair, something went wrong. + podlist, err := pods.CollectPods([]string{moutdir}, true) + if err != nil { + t.Fatal(err) + } + if len(podlist) != 1 { + t.Fatalf("expected 1 pod, got %d pods", len(podlist)) + } + ncdfs := len(podlist[0].CounterDataFiles) + if ncdfs != 1 { + t.Fatalf("expected 1 counter data file, got %d", ncdfs) + } + + // Sift through the output to make sure it has some key elements. + testpoints := []dumpCheck{ + { + tag: "first function", + re: regexp.MustCompile(`^Func: first\s*$`), + }, + { + tag: "sixth function", + re: regexp.MustCompile(`^Func: sixth\s*$`), + }, + } + + flags := []string{"-live", "-pkg=" + mainPkgPath} + runDumpChecks(t, s, moutdir, flags, testpoints) +} + +func testSubtract(t *testing.T, s state) { + // Create out dir for subtract merge. + soutdir := filepath.Join(s.dir, "subtractOut") + if err := os.Mkdir(soutdir, 0777); err != nil { + t.Fatalf("can't create outdir %s: %v", soutdir, err) + } + + // Subtract the two dirs into a final result. + ins := fmt.Sprintf("-i=%s,%s", s.outdirs[0], s.outdirs[1]) + out := fmt.Sprintf("-o=%s", soutdir) + sargs := []string{ins, out} + lines := runToolOp(t, s, "subtract", sargs) + if len(lines) != 0 { + t.Errorf("subtract run produced unexpected output: %+v", lines) + } + + // Dump the files in the subtract output dir and examine the result. + dargs := []string{"-pkg=" + mainPkgPath, "-live", "-i=" + soutdir} + lines = runToolOp(t, s, "debugdump", dargs) + if len(lines) == 0 { + t.Errorf("dump run produced no output") + } + + // Vet the output. + testpoints := []dumpCheck{ + { + tag: "first function", + re: regexp.MustCompile(`^Func: first\s*$`), + }, + { + tag: "dep function", + re: regexp.MustCompile(`^Func: Dep1\s*$`), + }, + { + tag: "third function", + re: regexp.MustCompile(`^Func: third\s*$`), + }, + { + tag: "third function unit 0", + re: regexp.MustCompile(`^0: L23:C23 -- L24:C12 NS=1 = (\d+)$`), + zero: true, + }, + { + tag: "third function unit 1", + re: regexp.MustCompile(`^1: L27:C2 -- L28:C10 NS=2 = (\d+)$`), + nonzero: true, + }, + { + tag: "third function unit 2", + re: regexp.MustCompile(`^2: L24:C12 -- L26:C3 NS=1 = (\d+)$`), + zero: true, + }, + } + flags := []string{} + runDumpChecks(t, s, soutdir, flags, testpoints) +} + +func testIntersect(t *testing.T, s state, indir1, indir2, tag string) { + // Create out dir for intersection. + ioutdir := filepath.Join(s.dir, "intersectOut"+tag) + if err := os.Mkdir(ioutdir, 0777); err != nil { + t.Fatalf("can't create outdir %s: %v", ioutdir, err) + } + + // Intersect the two dirs into a final result. + ins := fmt.Sprintf("-i=%s,%s", indir1, indir2) + out := fmt.Sprintf("-o=%s", ioutdir) + sargs := []string{ins, out} + lines := runToolOp(t, s, "intersect", sargs) + if len(lines) != 0 { + t.Errorf("intersect run produced unexpected output: %+v", lines) + } + + // Dump the files in the subtract output dir and examine the result. + dargs := []string{"-pkg=" + mainPkgPath, "-live", "-i=" + ioutdir} + lines = runToolOp(t, s, "debugdump", dargs) + if len(lines) == 0 { + t.Errorf("dump run produced no output") + } + + // Vet the output. + testpoints := []dumpCheck{ + { + tag: "first function", + re: regexp.MustCompile(`^Func: first\s*$`), + negate: true, + }, + { + tag: "third function", + re: regexp.MustCompile(`^Func: third\s*$`), + }, + } + flags := []string{"-live"} + runDumpChecks(t, s, ioutdir, flags, testpoints) +} + +func testCounterClash(t *testing.T, s state) { + // Create out dir. + ccoutdir := filepath.Join(s.dir, "ccOut") + if err := os.Mkdir(ccoutdir, 0777); err != nil { + t.Fatalf("can't create outdir %s: %v", ccoutdir, err) + } + + // Try to merge covdata0 (from prog1.go -countermode=set) with + // covdata1 (from prog1.go -countermode=atomic"). This should + // work properly, but result in multiple meta-data files. + ins := fmt.Sprintf("-i=%s,%s", s.outdirs[0], s.outdirs[3]) + out := fmt.Sprintf("-o=%s", ccoutdir) + args := append([]string{}, "merge", ins, out, "-pcombine") + if debugtrace { + t.Logf("cc merge command is %s %v\n", s.tool, args) + } + cmd := testenv.Command(t, s.tool, args...) + b, err := cmd.CombinedOutput() + t.Logf("%% output: %s\n", string(b)) + if err != nil { + t.Fatalf("clash merge failed: %v", err) + } + + // Ask for a textual report from the two dirs. Here we have + // to report the mode clash. + out = "-o=" + filepath.Join(ccoutdir, "file.txt") + args = append([]string{}, "textfmt", ins, out) + if debugtrace { + t.Logf("clash textfmt command is %s %v\n", s.tool, args) + } + cmd = testenv.Command(t, s.tool, args...) + b, err = cmd.CombinedOutput() + t.Logf("%% output: %s\n", string(b)) + if err == nil { + t.Fatalf("expected mode clash") + } + got := string(b) + want := "counter mode clash while reading meta-data" + if !strings.Contains(got, want) { + t.Errorf("counter clash textfmt: wanted %s got %s", want, got) + } +} + +func testEmpty(t *testing.T, s state) { + + // Create a new empty directory. + empty := filepath.Join(s.dir, "empty") + if err := os.Mkdir(empty, 0777); err != nil { + t.Fatalf("can't create dir %s: %v", empty, err) + } + + // Create out dir. + eoutdir := filepath.Join(s.dir, "emptyOut") + if err := os.Mkdir(eoutdir, 0777); err != nil { + t.Fatalf("can't create outdir %s: %v", eoutdir, err) + } + + // Run various operations (merge, dump, textfmt, and so on) + // using the empty directory. We're not interested in the output + // here, just making sure that you can do these runs without + // any error or crash. + + scenarios := []struct { + tag string + args []string + }{ + { + tag: "merge", + args: []string{"merge", "-o", eoutdir}, + }, + { + tag: "textfmt", + args: []string{"textfmt", "-o", filepath.Join(eoutdir, "foo.txt")}, + }, + { + tag: "func", + args: []string{"func"}, + }, + { + tag: "pkglist", + args: []string{"pkglist"}, + }, + { + tag: "debugdump", + args: []string{"debugdump"}, + }, + { + tag: "percent", + args: []string{"percent"}, + }, + } + + for _, x := range scenarios { + ins := fmt.Sprintf("-i=%s", empty) + args := append([]string{}, x.args...) + args = append(args, ins) + if false { + t.Logf("cmd is %s %v\n", s.tool, args) + } + cmd := testenv.Command(t, s.tool, args...) + b, err := cmd.CombinedOutput() + t.Logf("%% output: %s\n", string(b)) + if err != nil { + t.Fatalf("command %s %+v failed with %v", + s.tool, x.args, err) + } + } +} + +func testCommandLineErrors(t *testing.T, s state, outdir string) { + + // Create out dir. + eoutdir := filepath.Join(s.dir, "errorsOut") + if err := os.Mkdir(eoutdir, 0777); err != nil { + t.Fatalf("can't create outdir %s: %v", eoutdir, err) + } + + // Run various operations (merge, dump, textfmt, and so on) + // using the empty directory. We're not interested in the output + // here, just making sure that you can do these runs without + // any error or crash. + + scenarios := []struct { + tag string + args []string + exp string + }{ + { + tag: "input missing", + args: []string{"merge", "-o", eoutdir, "-i", "not there"}, + exp: "error: reading inputs: ", + }, + { + tag: "badv", + args: []string{"textfmt", "-i", outdir, "-v=abc"}, + }, + } + + for _, x := range scenarios { + args := append([]string{}, x.args...) + if false { + t.Logf("cmd is %s %v\n", s.tool, args) + } + cmd := testenv.Command(t, s.tool, args...) + b, err := cmd.CombinedOutput() + if err == nil { + t.Logf("%% output: %s\n", string(b)) + t.Fatalf("command %s %+v unexpectedly succeeded", + s.tool, x.args) + } else { + if !strings.Contains(string(b), x.exp) { + t.Fatalf("command %s %+v:\ngot:\n%s\nwanted to see: %v\n", + s.tool, x.args, string(b), x.exp) + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cover/cfg_test.go b/platform/dbops/binaries/go/go/src/cmd/cover/cfg_test.go new file mode 100644 index 0000000000000000000000000000000000000000..701de615d01c6284d9cf294854fa3883e17aace2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cover/cfg_test.go @@ -0,0 +1,271 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main_test + +import ( + "cmd/internal/cov/covcmd" + "encoding/json" + "fmt" + "internal/testenv" + "os" + "path/filepath" + "strings" + "testing" +) + +func writeFile(t *testing.T, path string, contents []byte) { + if err := os.WriteFile(path, contents, 0666); err != nil { + t.Fatalf("os.WriteFile(%s) failed: %v", path, err) + } +} + +func writePkgConfig(t *testing.T, outdir, tag, ppath, pname string, gran string, mpath string) string { + incfg := filepath.Join(outdir, tag+"incfg.txt") + outcfg := filepath.Join(outdir, "outcfg.txt") + p := covcmd.CoverPkgConfig{ + PkgPath: ppath, + PkgName: pname, + Granularity: gran, + OutConfig: outcfg, + EmitMetaFile: mpath, + } + data, err := json.Marshal(p) + if err != nil { + t.Fatalf("json.Marshal failed: %v", err) + } + writeFile(t, incfg, data) + return incfg +} + +func writeOutFileList(t *testing.T, infiles []string, outdir, tag string) ([]string, string) { + outfilelist := filepath.Join(outdir, tag+"outfilelist.txt") + var sb strings.Builder + cv := filepath.Join(outdir, "covervars.go") + outfs := []string{cv} + fmt.Fprintf(&sb, "%s\n", cv) + for _, inf := range infiles { + base := filepath.Base(inf) + of := filepath.Join(outdir, tag+".cov."+base) + outfs = append(outfs, of) + fmt.Fprintf(&sb, "%s\n", of) + } + if err := os.WriteFile(outfilelist, []byte(sb.String()), 0666); err != nil { + t.Fatalf("writing %s: %v", outfilelist, err) + } + return outfs, outfilelist +} + +func runPkgCover(t *testing.T, outdir string, tag string, incfg string, mode string, infiles []string, errExpected bool) ([]string, string, string) { + // Write the pkgcfg file. + outcfg := filepath.Join(outdir, "outcfg.txt") + + // Form up the arguments and run the tool. + outfiles, outfilelist := writeOutFileList(t, infiles, outdir, tag) + args := []string{"-pkgcfg", incfg, "-mode=" + mode, "-var=var" + tag, "-outfilelist", outfilelist} + args = append(args, infiles...) + cmd := testenv.Command(t, testcover(t), args...) + if errExpected { + errmsg := runExpectingError(cmd, t) + return nil, "", errmsg + } else { + run(cmd, t) + return outfiles, outcfg, "" + } +} + +func TestCoverWithCfg(t *testing.T) { + testenv.MustHaveGoRun(t) + + t.Parallel() + + // Subdir in testdata that has our input files of interest. + tpath := filepath.Join("testdata", "pkgcfg") + dir := tempDir(t) + instdira := filepath.Join(dir, "insta") + if err := os.Mkdir(instdira, 0777); err != nil { + t.Fatal(err) + } + + scenarios := []struct { + mode, gran string + }{ + { + mode: "count", + gran: "perblock", + }, + { + mode: "set", + gran: "perfunc", + }, + { + mode: "regonly", + gran: "perblock", + }, + } + + var incfg string + apkgfiles := []string{filepath.Join(tpath, "a", "a.go")} + for _, scenario := range scenarios { + // Instrument package "a", producing a set of instrumented output + // files and an 'output config' file to pass on to the compiler. + ppath := "cfg/a" + pname := "a" + mode := scenario.mode + gran := scenario.gran + tag := mode + "_" + gran + incfg = writePkgConfig(t, instdira, tag, ppath, pname, gran, "") + ofs, outcfg, _ := runPkgCover(t, instdira, tag, incfg, mode, + apkgfiles, false) + t.Logf("outfiles: %+v\n", ofs) + + // Run the compiler on the files to make sure the result is + // buildable. + bargs := []string{"tool", "compile", "-p", "a", "-coveragecfg", outcfg} + bargs = append(bargs, ofs...) + cmd := testenv.Command(t, testenv.GoToolPath(t), bargs...) + cmd.Dir = instdira + run(cmd, t) + } + + // Do some error testing to ensure that various bad options and + // combinations are properly rejected. + + // Expect error if config file inaccessible/unreadable. + mode := "atomic" + errExpected := true + tag := "errors" + _, _, errmsg := runPkgCover(t, instdira, tag, "/not/a/file", mode, + apkgfiles, errExpected) + want := "error reading pkgconfig file" + if !strings.Contains(errmsg, want) { + t.Errorf("'bad config file' test: wanted %s got %s", want, errmsg) + } + + // Expect err if config file contains unknown stuff. + t.Logf("mangling in config") + writeFile(t, incfg, []byte("blah=foo\n")) + _, _, errmsg = runPkgCover(t, instdira, tag, incfg, mode, + apkgfiles, errExpected) + want = "error reading pkgconfig file" + if !strings.Contains(errmsg, want) { + t.Errorf("'bad config file' test: wanted %s got %s", want, errmsg) + } + + // Expect error on empty config file. + t.Logf("writing empty config") + writeFile(t, incfg, []byte("\n")) + _, _, errmsg = runPkgCover(t, instdira, tag, incfg, mode, + apkgfiles, errExpected) + if !strings.Contains(errmsg, want) { + t.Errorf("'bad config file' test: wanted %s got %s", want, errmsg) + } +} + +func TestCoverOnPackageWithNoTestFiles(t *testing.T) { + testenv.MustHaveGoRun(t) + + // For packages with no test files, the new "go test -cover" + // strategy is to run cmd/cover on the package in a special + // "EmitMetaFile" mode. When running in this mode, cmd/cover walks + // the package doing instrumention, but when finished, instead of + // writing out instrumented source files, it directly emits a + // meta-data file for the package in question, essentially + // simulating the effect that you would get if you added a dummy + // "no-op" x_test.go file and then did a build and run of the test. + + t.Run("YesFuncsNoTests", func(t *testing.T) { + testCoverNoTestsYesFuncs(t) + }) + t.Run("NoFuncsNoTests", func(t *testing.T) { + testCoverNoTestsNoFuncs(t) + }) +} + +func testCoverNoTestsYesFuncs(t *testing.T) { + t.Parallel() + dir := tempDir(t) + + // Run the cover command with "emit meta" enabled on a package + // with functions but no test files. + tpath := filepath.Join("testdata", "pkgcfg") + pkg1files := []string{filepath.Join(tpath, "yesFuncsNoTests", "yfnt.go")} + ppath := "cfg/yesFuncsNoTests" + pname := "yesFuncsNoTests" + mode := "count" + gran := "perblock" + tag := mode + "_" + gran + instdir := filepath.Join(dir, "inst") + if err := os.Mkdir(instdir, 0777); err != nil { + t.Fatal(err) + } + mdir := filepath.Join(dir, "meta") + if err := os.Mkdir(mdir, 0777); err != nil { + t.Fatal(err) + } + mpath := filepath.Join(mdir, "covmeta.xxx") + incfg := writePkgConfig(t, instdir, tag, ppath, pname, gran, mpath) + _, _, errmsg := runPkgCover(t, instdir, tag, incfg, mode, + pkg1files, false) + if errmsg != "" { + t.Fatalf("runPkgCover err: %q", errmsg) + } + + // Check for existence of meta-data file. + if inf, err := os.Open(mpath); err != nil { + t.Fatalf("meta-data file not created: %v", err) + } else { + inf.Close() + } + + // Make sure it is digestible. + cdargs := []string{"tool", "covdata", "percent", "-i", mdir} + cmd := testenv.Command(t, testenv.GoToolPath(t), cdargs...) + run(cmd, t) +} + +func testCoverNoTestsNoFuncs(t *testing.T) { + t.Parallel() + dir := tempDir(t) + + // Run the cover command with "emit meta" enabled on a package + // with no functions and no test files. + tpath := filepath.Join("testdata", "pkgcfg") + pkgfiles := []string{filepath.Join(tpath, "noFuncsNoTests", "nfnt.go")} + pname := "noFuncsNoTests" + mode := "count" + gran := "perblock" + ppath := "cfg/" + pname + tag := mode + "_" + gran + instdir := filepath.Join(dir, "inst2") + if err := os.Mkdir(instdir, 0777); err != nil { + t.Fatal(err) + } + mdir := filepath.Join(dir, "meta2") + if err := os.Mkdir(mdir, 0777); err != nil { + t.Fatal(err) + } + mpath := filepath.Join(mdir, "covmeta.yyy") + incfg := writePkgConfig(t, instdir, tag, ppath, pname, gran, mpath) + _, _, errmsg := runPkgCover(t, instdir, tag, incfg, mode, + pkgfiles, false) + if errmsg != "" { + t.Fatalf("runPkgCover err: %q", errmsg) + } + + // We expect to see an empty meta-data file in this case. + if inf, err := os.Open(mpath); err != nil { + t.Fatalf("opening meta-data file: error %v", err) + } else { + defer inf.Close() + fi, err := inf.Stat() + if err != nil { + t.Fatalf("stat meta-data file: %v", err) + } + if fi.Size() != 0 { + t.Fatalf("want zero-sized meta-data file got size %d", + fi.Size()) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cover/cover.go b/platform/dbops/binaries/go/go/src/cmd/cover/cover.go new file mode 100644 index 0000000000000000000000000000000000000000..ba7694b3af561da891bcde06a2f3c0e1f6b89394 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cover/cover.go @@ -0,0 +1,1209 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "cmd/internal/cov/covcmd" + "encoding/json" + "flag" + "fmt" + "go/ast" + "go/parser" + "go/token" + "internal/coverage" + "internal/coverage/encodemeta" + "internal/coverage/slicewriter" + "io" + "log" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + + "cmd/internal/edit" + "cmd/internal/objabi" +) + +const usageMessage = "" + + `Usage of 'go tool cover': +Given a coverage profile produced by 'go test': + go test -coverprofile=c.out + +Open a web browser displaying annotated source code: + go tool cover -html=c.out + +Write out an HTML file instead of launching a web browser: + go tool cover -html=c.out -o coverage.html + +Display coverage percentages to stdout for each function: + go tool cover -func=c.out + +Finally, to generate modified source code with coverage annotations +for a package (what go test -cover does): + go tool cover -mode=set -var=CoverageVariableName \ + -pkgcfg= -outfilelist= file1.go ... fileN.go + +where -pkgcfg points to a file containing the package path, +package name, module path, and related info from "go build", +and -outfilelist points to a file containing the filenames +of the instrumented output files (one per input file). +See https://pkg.go.dev/cmd/internal/cov/covcmd#CoverPkgConfig for +more on the package config. +` + +func usage() { + fmt.Fprint(os.Stderr, usageMessage) + fmt.Fprintln(os.Stderr, "\nFlags:") + flag.PrintDefaults() + fmt.Fprintln(os.Stderr, "\n Only one of -html, -func, or -mode may be set.") + os.Exit(2) +} + +var ( + mode = flag.String("mode", "", "coverage mode: set, count, atomic") + varVar = flag.String("var", "GoCover", "name of coverage variable to generate") + output = flag.String("o", "", "file for output") + outfilelist = flag.String("outfilelist", "", "file containing list of output files (one per line) if -pkgcfg is in use") + htmlOut = flag.String("html", "", "generate HTML representation of coverage profile") + funcOut = flag.String("func", "", "output coverage profile information for each function") + pkgcfg = flag.String("pkgcfg", "", "enable full-package instrumentation mode using params from specified config file") + pkgconfig covcmd.CoverPkgConfig + outputfiles []string // list of *.cover.go instrumented outputs to write, one per input (set when -pkgcfg is in use) + profile string // The profile to read; the value of -html or -func + counterStmt func(*File, string) string + covervarsoutfile string // an additional Go source file into which we'll write definitions of coverage counter variables + meta data variables (set when -pkgcfg is in use). + cmode coverage.CounterMode + cgran coverage.CounterGranularity +) + +const ( + atomicPackagePath = "sync/atomic" + atomicPackageName = "_cover_atomic_" +) + +func main() { + objabi.AddVersionFlag() + flag.Usage = usage + objabi.Flagparse(usage) + + // Usage information when no arguments. + if flag.NFlag() == 0 && flag.NArg() == 0 { + flag.Usage() + } + + err := parseFlags() + if err != nil { + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, `For usage information, run "go tool cover -help"`) + os.Exit(2) + } + + // Generate coverage-annotated source. + if *mode != "" { + annotate(flag.Args()) + return + } + + // Output HTML or function coverage information. + if *htmlOut != "" { + err = htmlOutput(profile, *output) + } else { + err = funcOutput(profile, *output) + } + + if err != nil { + fmt.Fprintf(os.Stderr, "cover: %v\n", err) + os.Exit(2) + } +} + +// parseFlags sets the profile and counterStmt globals and performs validations. +func parseFlags() error { + profile = *htmlOut + if *funcOut != "" { + if profile != "" { + return fmt.Errorf("too many options") + } + profile = *funcOut + } + + // Must either display a profile or rewrite Go source. + if (profile == "") == (*mode == "") { + return fmt.Errorf("too many options") + } + + if *varVar != "" && !token.IsIdentifier(*varVar) { + return fmt.Errorf("-var: %q is not a valid identifier", *varVar) + } + + if *mode != "" { + switch *mode { + case "set": + counterStmt = setCounterStmt + cmode = coverage.CtrModeSet + case "count": + counterStmt = incCounterStmt + cmode = coverage.CtrModeCount + case "atomic": + counterStmt = atomicCounterStmt + cmode = coverage.CtrModeAtomic + case "regonly": + counterStmt = nil + cmode = coverage.CtrModeRegOnly + case "testmain": + counterStmt = nil + cmode = coverage.CtrModeTestMain + default: + return fmt.Errorf("unknown -mode %v", *mode) + } + + if flag.NArg() == 0 { + return fmt.Errorf("missing source file(s)") + } else { + if *pkgcfg != "" { + if *output != "" { + return fmt.Errorf("please use '-outfilelist' flag instead of '-o'") + } + var err error + if outputfiles, err = readOutFileList(*outfilelist); err != nil { + return err + } + covervarsoutfile = outputfiles[0] + outputfiles = outputfiles[1:] + numInputs := len(flag.Args()) + numOutputs := len(outputfiles) + if numOutputs != numInputs { + return fmt.Errorf("number of output files (%d) not equal to number of input files (%d)", numOutputs, numInputs) + } + if err := readPackageConfig(*pkgcfg); err != nil { + return err + } + return nil + } else { + if *outfilelist != "" { + return fmt.Errorf("'-outfilelist' flag applicable only when -pkgcfg used") + } + } + if flag.NArg() == 1 { + return nil + } + } + } else if flag.NArg() == 0 { + return nil + } + return fmt.Errorf("too many arguments") +} + +func readOutFileList(path string) ([]string, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("error reading -outfilelist file %q: %v", path, err) + } + return strings.Split(strings.TrimSpace(string(data)), "\n"), nil +} + +func readPackageConfig(path string) error { + data, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("error reading pkgconfig file %q: %v", path, err) + } + if err := json.Unmarshal(data, &pkgconfig); err != nil { + return fmt.Errorf("error reading pkgconfig file %q: %v", path, err) + } + switch pkgconfig.Granularity { + case "perblock": + cgran = coverage.CtrGranularityPerBlock + case "perfunc": + cgran = coverage.CtrGranularityPerFunc + default: + return fmt.Errorf(`%s: pkgconfig requires perblock/perfunc value`, path) + } + return nil +} + +// Block represents the information about a basic block to be recorded in the analysis. +// Note: Our definition of basic block is based on control structures; we don't break +// apart && and ||. We could but it doesn't seem important enough to bother. +type Block struct { + startByte token.Pos + endByte token.Pos + numStmt int +} + +// Package holds package-specific state. +type Package struct { + mdb *encodemeta.CoverageMetaDataBuilder + counterLengths []int +} + +// Function holds func-specific state. +type Func struct { + units []coverage.CoverableUnit + counterVar string +} + +// File is a wrapper for the state of a file used in the parser. +// The basic parse tree walker is a method of this type. +type File struct { + fset *token.FileSet + name string // Name of file. + astFile *ast.File + blocks []Block + content []byte + edit *edit.Buffer + mdb *encodemeta.CoverageMetaDataBuilder + fn Func + pkg *Package +} + +// findText finds text in the original source, starting at pos. +// It correctly skips over comments and assumes it need not +// handle quoted strings. +// It returns a byte offset within f.src. +func (f *File) findText(pos token.Pos, text string) int { + b := []byte(text) + start := f.offset(pos) + i := start + s := f.content + for i < len(s) { + if bytes.HasPrefix(s[i:], b) { + return i + } + if i+2 <= len(s) && s[i] == '/' && s[i+1] == '/' { + for i < len(s) && s[i] != '\n' { + i++ + } + continue + } + if i+2 <= len(s) && s[i] == '/' && s[i+1] == '*' { + for i += 2; ; i++ { + if i+2 > len(s) { + return 0 + } + if s[i] == '*' && s[i+1] == '/' { + i += 2 + break + } + } + continue + } + i++ + } + return -1 +} + +// Visit implements the ast.Visitor interface. +func (f *File) Visit(node ast.Node) ast.Visitor { + switch n := node.(type) { + case *ast.BlockStmt: + // If it's a switch or select, the body is a list of case clauses; don't tag the block itself. + if len(n.List) > 0 { + switch n.List[0].(type) { + case *ast.CaseClause: // switch + for _, n := range n.List { + clause := n.(*ast.CaseClause) + f.addCounters(clause.Colon+1, clause.Colon+1, clause.End(), clause.Body, false) + } + return f + case *ast.CommClause: // select + for _, n := range n.List { + clause := n.(*ast.CommClause) + f.addCounters(clause.Colon+1, clause.Colon+1, clause.End(), clause.Body, false) + } + return f + } + } + f.addCounters(n.Lbrace, n.Lbrace+1, n.Rbrace+1, n.List, true) // +1 to step past closing brace. + case *ast.IfStmt: + if n.Init != nil { + ast.Walk(f, n.Init) + } + ast.Walk(f, n.Cond) + ast.Walk(f, n.Body) + if n.Else == nil { + return nil + } + // The elses are special, because if we have + // if x { + // } else if y { + // } + // we want to cover the "if y". To do this, we need a place to drop the counter, + // so we add a hidden block: + // if x { + // } else { + // if y { + // } + // } + elseOffset := f.findText(n.Body.End(), "else") + if elseOffset < 0 { + panic("lost else") + } + f.edit.Insert(elseOffset+4, "{") + f.edit.Insert(f.offset(n.Else.End()), "}") + + // We just created a block, now walk it. + // Adjust the position of the new block to start after + // the "else". That will cause it to follow the "{" + // we inserted above. + pos := f.fset.File(n.Body.End()).Pos(elseOffset + 4) + switch stmt := n.Else.(type) { + case *ast.IfStmt: + block := &ast.BlockStmt{ + Lbrace: pos, + List: []ast.Stmt{stmt}, + Rbrace: stmt.End(), + } + n.Else = block + case *ast.BlockStmt: + stmt.Lbrace = pos + default: + panic("unexpected node type in if") + } + ast.Walk(f, n.Else) + return nil + case *ast.SelectStmt: + // Don't annotate an empty select - creates a syntax error. + if n.Body == nil || len(n.Body.List) == 0 { + return nil + } + case *ast.SwitchStmt: + // Don't annotate an empty switch - creates a syntax error. + if n.Body == nil || len(n.Body.List) == 0 { + if n.Init != nil { + ast.Walk(f, n.Init) + } + if n.Tag != nil { + ast.Walk(f, n.Tag) + } + return nil + } + case *ast.TypeSwitchStmt: + // Don't annotate an empty type switch - creates a syntax error. + if n.Body == nil || len(n.Body.List) == 0 { + if n.Init != nil { + ast.Walk(f, n.Init) + } + ast.Walk(f, n.Assign) + return nil + } + case *ast.FuncDecl: + // Don't annotate functions with blank names - they cannot be executed. + // Similarly for bodyless funcs. + if n.Name.Name == "_" || n.Body == nil { + return nil + } + fname := n.Name.Name + // Skip AddUint32 and StoreUint32 if we're instrumenting + // sync/atomic itself in atomic mode (out of an abundance of + // caution), since as part of the instrumentation process we + // add calls to AddUint32/StoreUint32, and we don't want to + // somehow create an infinite loop. + // + // Note that in the current implementation (Go 1.20) both + // routines are assembly stubs that forward calls to the + // runtime/internal/atomic equivalents, hence the infinite + // loop scenario is purely theoretical (maybe if in some + // future implementation one of these functions might be + // written in Go). See #57445 for more details. + if atomicOnAtomic() && (fname == "AddUint32" || fname == "StoreUint32") { + return nil + } + // Determine proper function or method name. + if r := n.Recv; r != nil && len(r.List) == 1 { + t := r.List[0].Type + star := "" + if p, _ := t.(*ast.StarExpr); p != nil { + t = p.X + star = "*" + } + if p, _ := t.(*ast.Ident); p != nil { + fname = star + p.Name + "." + fname + } + } + walkBody := true + if *pkgcfg != "" { + f.preFunc(n, fname) + if pkgconfig.Granularity == "perfunc" { + walkBody = false + } + } + if walkBody { + ast.Walk(f, n.Body) + } + if *pkgcfg != "" { + flit := false + f.postFunc(n, fname, flit, n.Body) + } + return nil + case *ast.FuncLit: + // For function literals enclosed in functions, just glom the + // code for the literal in with the enclosing function (for now). + if f.fn.counterVar != "" { + return f + } + + // Hack: function literals aren't named in the go/ast representation, + // and we don't know what name the compiler will choose. For now, + // just make up a descriptive name. + pos := n.Pos() + p := f.fset.File(pos).Position(pos) + fname := fmt.Sprintf("func.L%d.C%d", p.Line, p.Column) + if *pkgcfg != "" { + f.preFunc(n, fname) + } + if pkgconfig.Granularity != "perfunc" { + ast.Walk(f, n.Body) + } + if *pkgcfg != "" { + flit := true + f.postFunc(n, fname, flit, n.Body) + } + return nil + } + return f +} + +func mkCounterVarName(idx int) string { + return fmt.Sprintf("%s_%d", *varVar, idx) +} + +func mkPackageIdVar() string { + return *varVar + "P" +} + +func mkMetaVar() string { + return *varVar + "M" +} + +func mkPackageIdExpression() string { + ppath := pkgconfig.PkgPath + if hcid := coverage.HardCodedPkgID(ppath); hcid != -1 { + return fmt.Sprintf("uint32(%d)", uint32(hcid)) + } + return mkPackageIdVar() +} + +func (f *File) preFunc(fn ast.Node, fname string) { + f.fn.units = f.fn.units[:0] + + // create a new counter variable for this function. + cv := mkCounterVarName(len(f.pkg.counterLengths)) + f.fn.counterVar = cv +} + +func (f *File) postFunc(fn ast.Node, funcname string, flit bool, body *ast.BlockStmt) { + + // Tack on single counter write if we are in "perfunc" mode. + singleCtr := "" + if pkgconfig.Granularity == "perfunc" { + singleCtr = "; " + f.newCounter(fn.Pos(), fn.Pos(), 1) + } + + // record the length of the counter var required. + nc := len(f.fn.units) + coverage.FirstCtrOffset + f.pkg.counterLengths = append(f.pkg.counterLengths, nc) + + // FIXME: for windows, do we want "\" and not "/"? Need to test here. + // Currently filename is formed as packagepath + "/" + basename. + fnpos := f.fset.Position(fn.Pos()) + ppath := pkgconfig.PkgPath + filename := ppath + "/" + filepath.Base(fnpos.Filename) + + // The convention for cmd/cover is that if the go command that + // kicks off coverage specifies a local import path (e.g. "go test + // -cover ./thispackage"), the tool will capture full pathnames + // for source files instead of relative paths, which tend to work + // more smoothly for "go tool cover -html". See also issue #56433 + // for more details. + if pkgconfig.Local { + filename = f.name + } + + // Hand off function to meta-data builder. + fd := coverage.FuncDesc{ + Funcname: funcname, + Srcfile: filename, + Units: f.fn.units, + Lit: flit, + } + funcId := f.mdb.AddFunc(fd) + + hookWrite := func(cv string, which int, val string) string { + return fmt.Sprintf("%s[%d] = %s", cv, which, val) + } + if *mode == "atomic" { + hookWrite = func(cv string, which int, val string) string { + return fmt.Sprintf("%sStoreUint32(&%s[%d], %s)", + atomicPackagePrefix(), cv, which, val) + } + } + + // Generate the registration hook sequence for the function. This + // sequence looks like + // + // counterVar[0] = + // counterVar[1] = pkgId + // counterVar[2] = fnId + // + cv := f.fn.counterVar + regHook := hookWrite(cv, 0, strconv.Itoa(len(f.fn.units))) + " ; " + + hookWrite(cv, 1, mkPackageIdExpression()) + " ; " + + hookWrite(cv, 2, strconv.Itoa(int(funcId))) + singleCtr + + // Insert the registration sequence into the function. We want this sequence to + // appear before any counter updates, so use a hack to ensure that this edit + // applies before the edit corresponding to the prolog counter update. + + boff := f.offset(body.Pos()) + ipos := f.fset.File(body.Pos()).Pos(boff) + ip := f.offset(ipos) + f.edit.Replace(ip, ip+1, string(f.content[ipos-1])+regHook+" ; ") + + f.fn.counterVar = "" +} + +func annotate(names []string) { + var p *Package + if *pkgcfg != "" { + pp := pkgconfig.PkgPath + pn := pkgconfig.PkgName + mp := pkgconfig.ModulePath + mdb, err := encodemeta.NewCoverageMetaDataBuilder(pp, pn, mp) + if err != nil { + log.Fatalf("creating coverage meta-data builder: %v\n", err) + } + p = &Package{ + mdb: mdb, + } + } + // TODO: process files in parallel here if it matters. + for k, name := range names { + if strings.ContainsAny(name, "\r\n") { + // annotateFile uses '//line' directives, which don't permit newlines. + log.Fatalf("cover: input path contains newline character: %q", name) + } + + fd := os.Stdout + isStdout := true + if *pkgcfg != "" { + var err error + fd, err = os.Create(outputfiles[k]) + if err != nil { + log.Fatalf("cover: %s", err) + } + isStdout = false + } else if *output != "" { + var err error + fd, err = os.Create(*output) + if err != nil { + log.Fatalf("cover: %s", err) + } + isStdout = false + } + p.annotateFile(name, fd) + if !isStdout { + if err := fd.Close(); err != nil { + log.Fatalf("cover: %s", err) + } + } + } + + if *pkgcfg != "" { + fd, err := os.Create(covervarsoutfile) + if err != nil { + log.Fatalf("cover: %s", err) + } + p.emitMetaData(fd) + if err := fd.Close(); err != nil { + log.Fatalf("cover: %s", err) + } + } +} + +func (p *Package) annotateFile(name string, fd io.Writer) { + fset := token.NewFileSet() + content, err := os.ReadFile(name) + if err != nil { + log.Fatalf("cover: %s: %s", name, err) + } + parsedFile, err := parser.ParseFile(fset, name, content, parser.ParseComments) + if err != nil { + log.Fatalf("cover: %s: %s", name, err) + } + + file := &File{ + fset: fset, + name: name, + content: content, + edit: edit.NewBuffer(content), + astFile: parsedFile, + } + if p != nil { + file.mdb = p.mdb + file.pkg = p + } + + if *mode == "atomic" { + // Add import of sync/atomic immediately after package clause. + // We do this even if there is an existing import, because the + // existing import may be shadowed at any given place we want + // to refer to it, and our name (_cover_atomic_) is less likely to + // be shadowed. The one exception is if we're visiting the + // sync/atomic package itself, in which case we can refer to + // functions directly without an import prefix. See also #57445. + if pkgconfig.PkgPath != "sync/atomic" { + file.edit.Insert(file.offset(file.astFile.Name.End()), + fmt.Sprintf("; import %s %q", atomicPackageName, atomicPackagePath)) + } + } + if pkgconfig.PkgName == "main" { + file.edit.Insert(file.offset(file.astFile.Name.End()), + "; import _ \"runtime/coverage\"") + } + + if counterStmt != nil { + ast.Walk(file, file.astFile) + } + newContent := file.edit.Bytes() + + if strings.ContainsAny(name, "\r\n") { + // This should have been checked by the caller already, but we double check + // here just to be sure we haven't missed a caller somewhere. + panic(fmt.Sprintf("annotateFile: name contains unexpected newline character: %q", name)) + } + fmt.Fprintf(fd, "//line %s:1:1\n", name) + fd.Write(newContent) + + // After printing the source tree, add some declarations for the + // counters etc. We could do this by adding to the tree, but it's + // easier just to print the text. + file.addVariables(fd) + + // Emit a reference to the atomic package to avoid + // import and not used error when there's no code in a file. + if *mode == "atomic" { + fmt.Fprintf(fd, "\nvar _ = %sLoadUint32\n", atomicPackagePrefix()) + } +} + +// setCounterStmt returns the expression: __count[23] = 1. +func setCounterStmt(f *File, counter string) string { + return fmt.Sprintf("%s = 1", counter) +} + +// incCounterStmt returns the expression: __count[23]++. +func incCounterStmt(f *File, counter string) string { + return fmt.Sprintf("%s++", counter) +} + +// atomicCounterStmt returns the expression: atomic.AddUint32(&__count[23], 1) +func atomicCounterStmt(f *File, counter string) string { + return fmt.Sprintf("%sAddUint32(&%s, 1)", atomicPackagePrefix(), counter) +} + +// newCounter creates a new counter expression of the appropriate form. +func (f *File) newCounter(start, end token.Pos, numStmt int) string { + var stmt string + if *pkgcfg != "" { + slot := len(f.fn.units) + coverage.FirstCtrOffset + if f.fn.counterVar == "" { + panic("internal error: counter var unset") + } + stmt = counterStmt(f, fmt.Sprintf("%s[%d]", f.fn.counterVar, slot)) + stpos := f.fset.Position(start) + enpos := f.fset.Position(end) + stpos, enpos = dedup(stpos, enpos) + unit := coverage.CoverableUnit{ + StLine: uint32(stpos.Line), + StCol: uint32(stpos.Column), + EnLine: uint32(enpos.Line), + EnCol: uint32(enpos.Column), + NxStmts: uint32(numStmt), + } + f.fn.units = append(f.fn.units, unit) + } else { + stmt = counterStmt(f, fmt.Sprintf("%s.Count[%d]", *varVar, + len(f.blocks))) + f.blocks = append(f.blocks, Block{start, end, numStmt}) + } + return stmt +} + +// addCounters takes a list of statements and adds counters to the beginning of +// each basic block at the top level of that list. For instance, given +// +// S1 +// if cond { +// S2 +// } +// S3 +// +// counters will be added before S1 and before S3. The block containing S2 +// will be visited in a separate call. +// TODO: Nested simple blocks get unnecessary (but correct) counters +func (f *File) addCounters(pos, insertPos, blockEnd token.Pos, list []ast.Stmt, extendToClosingBrace bool) { + // Special case: make sure we add a counter to an empty block. Can't do this below + // or we will add a counter to an empty statement list after, say, a return statement. + if len(list) == 0 { + f.edit.Insert(f.offset(insertPos), f.newCounter(insertPos, blockEnd, 0)+";") + return + } + // Make a copy of the list, as we may mutate it and should leave the + // existing list intact. + list = append([]ast.Stmt(nil), list...) + // We have a block (statement list), but it may have several basic blocks due to the + // appearance of statements that affect the flow of control. + for { + // Find first statement that affects flow of control (break, continue, if, etc.). + // It will be the last statement of this basic block. + var last int + end := blockEnd + for last = 0; last < len(list); last++ { + stmt := list[last] + end = f.statementBoundary(stmt) + if f.endsBasicSourceBlock(stmt) { + // If it is a labeled statement, we need to place a counter between + // the label and its statement because it may be the target of a goto + // and thus start a basic block. That is, given + // foo: stmt + // we need to create + // foo: ; stmt + // and mark the label as a block-terminating statement. + // The result will then be + // foo: COUNTER[n]++; stmt + // However, we can't do this if the labeled statement is already + // a control statement, such as a labeled for. + if label, isLabel := stmt.(*ast.LabeledStmt); isLabel && !f.isControl(label.Stmt) { + newLabel := *label + newLabel.Stmt = &ast.EmptyStmt{ + Semicolon: label.Stmt.Pos(), + Implicit: true, + } + end = label.Pos() // Previous block ends before the label. + list[last] = &newLabel + // Open a gap and drop in the old statement, now without a label. + list = append(list, nil) + copy(list[last+1:], list[last:]) + list[last+1] = label.Stmt + } + last++ + extendToClosingBrace = false // Block is broken up now. + break + } + } + if extendToClosingBrace { + end = blockEnd + } + if pos != end { // Can have no source to cover if e.g. blocks abut. + f.edit.Insert(f.offset(insertPos), f.newCounter(pos, end, last)+";") + } + list = list[last:] + if len(list) == 0 { + break + } + pos = list[0].Pos() + insertPos = pos + } +} + +// hasFuncLiteral reports the existence and position of the first func literal +// in the node, if any. If a func literal appears, it usually marks the termination +// of a basic block because the function body is itself a block. +// Therefore we draw a line at the start of the body of the first function literal we find. +// TODO: what if there's more than one? Probably doesn't matter much. +func hasFuncLiteral(n ast.Node) (bool, token.Pos) { + if n == nil { + return false, 0 + } + var literal funcLitFinder + ast.Walk(&literal, n) + return literal.found(), token.Pos(literal) +} + +// statementBoundary finds the location in s that terminates the current basic +// block in the source. +func (f *File) statementBoundary(s ast.Stmt) token.Pos { + // Control flow statements are easy. + switch s := s.(type) { + case *ast.BlockStmt: + // Treat blocks like basic blocks to avoid overlapping counters. + return s.Lbrace + case *ast.IfStmt: + found, pos := hasFuncLiteral(s.Init) + if found { + return pos + } + found, pos = hasFuncLiteral(s.Cond) + if found { + return pos + } + return s.Body.Lbrace + case *ast.ForStmt: + found, pos := hasFuncLiteral(s.Init) + if found { + return pos + } + found, pos = hasFuncLiteral(s.Cond) + if found { + return pos + } + found, pos = hasFuncLiteral(s.Post) + if found { + return pos + } + return s.Body.Lbrace + case *ast.LabeledStmt: + return f.statementBoundary(s.Stmt) + case *ast.RangeStmt: + found, pos := hasFuncLiteral(s.X) + if found { + return pos + } + return s.Body.Lbrace + case *ast.SwitchStmt: + found, pos := hasFuncLiteral(s.Init) + if found { + return pos + } + found, pos = hasFuncLiteral(s.Tag) + if found { + return pos + } + return s.Body.Lbrace + case *ast.SelectStmt: + return s.Body.Lbrace + case *ast.TypeSwitchStmt: + found, pos := hasFuncLiteral(s.Init) + if found { + return pos + } + return s.Body.Lbrace + } + // If not a control flow statement, it is a declaration, expression, call, etc. and it may have a function literal. + // If it does, that's tricky because we want to exclude the body of the function from this block. + // Draw a line at the start of the body of the first function literal we find. + // TODO: what if there's more than one? Probably doesn't matter much. + found, pos := hasFuncLiteral(s) + if found { + return pos + } + return s.End() +} + +// endsBasicSourceBlock reports whether s changes the flow of control: break, if, etc., +// or if it's just problematic, for instance contains a function literal, which will complicate +// accounting due to the block-within-an expression. +func (f *File) endsBasicSourceBlock(s ast.Stmt) bool { + switch s := s.(type) { + case *ast.BlockStmt: + // Treat blocks like basic blocks to avoid overlapping counters. + return true + case *ast.BranchStmt: + return true + case *ast.ForStmt: + return true + case *ast.IfStmt: + return true + case *ast.LabeledStmt: + return true // A goto may branch here, starting a new basic block. + case *ast.RangeStmt: + return true + case *ast.SwitchStmt: + return true + case *ast.SelectStmt: + return true + case *ast.TypeSwitchStmt: + return true + case *ast.ExprStmt: + // Calls to panic change the flow. + // We really should verify that "panic" is the predefined function, + // but without type checking we can't and the likelihood of it being + // an actual problem is vanishingly small. + if call, ok := s.X.(*ast.CallExpr); ok { + if ident, ok := call.Fun.(*ast.Ident); ok && ident.Name == "panic" && len(call.Args) == 1 { + return true + } + } + } + found, _ := hasFuncLiteral(s) + return found +} + +// isControl reports whether s is a control statement that, if labeled, cannot be +// separated from its label. +func (f *File) isControl(s ast.Stmt) bool { + switch s.(type) { + case *ast.ForStmt, *ast.RangeStmt, *ast.SwitchStmt, *ast.SelectStmt, *ast.TypeSwitchStmt: + return true + } + return false +} + +// funcLitFinder implements the ast.Visitor pattern to find the location of any +// function literal in a subtree. +type funcLitFinder token.Pos + +func (f *funcLitFinder) Visit(node ast.Node) (w ast.Visitor) { + if f.found() { + return nil // Prune search. + } + switch n := node.(type) { + case *ast.FuncLit: + *f = funcLitFinder(n.Body.Lbrace) + return nil // Prune search. + } + return f +} + +func (f *funcLitFinder) found() bool { + return token.Pos(*f) != token.NoPos +} + +// Sort interface for []block1; used for self-check in addVariables. + +type block1 struct { + Block + index int +} + +type blockSlice []block1 + +func (b blockSlice) Len() int { return len(b) } +func (b blockSlice) Less(i, j int) bool { return b[i].startByte < b[j].startByte } +func (b blockSlice) Swap(i, j int) { b[i], b[j] = b[j], b[i] } + +// offset translates a token position into a 0-indexed byte offset. +func (f *File) offset(pos token.Pos) int { + return f.fset.Position(pos).Offset +} + +// addVariables adds to the end of the file the declarations to set up the counter and position variables. +func (f *File) addVariables(w io.Writer) { + if *pkgcfg != "" { + return + } + // Self-check: Verify that the instrumented basic blocks are disjoint. + t := make([]block1, len(f.blocks)) + for i := range f.blocks { + t[i].Block = f.blocks[i] + t[i].index = i + } + sort.Sort(blockSlice(t)) + for i := 1; i < len(t); i++ { + if t[i-1].endByte > t[i].startByte { + fmt.Fprintf(os.Stderr, "cover: internal error: block %d overlaps block %d\n", t[i-1].index, t[i].index) + // Note: error message is in byte positions, not token positions. + fmt.Fprintf(os.Stderr, "\t%s:#%d,#%d %s:#%d,#%d\n", + f.name, f.offset(t[i-1].startByte), f.offset(t[i-1].endByte), + f.name, f.offset(t[i].startByte), f.offset(t[i].endByte)) + } + } + + // Declare the coverage struct as a package-level variable. + fmt.Fprintf(w, "\nvar %s = struct {\n", *varVar) + fmt.Fprintf(w, "\tCount [%d]uint32\n", len(f.blocks)) + fmt.Fprintf(w, "\tPos [3 * %d]uint32\n", len(f.blocks)) + fmt.Fprintf(w, "\tNumStmt [%d]uint16\n", len(f.blocks)) + fmt.Fprintf(w, "} {\n") + + // Initialize the position array field. + fmt.Fprintf(w, "\tPos: [3 * %d]uint32{\n", len(f.blocks)) + + // A nice long list of positions. Each position is encoded as follows to reduce size: + // - 32-bit starting line number + // - 32-bit ending line number + // - (16 bit ending column number << 16) | (16-bit starting column number). + for i, block := range f.blocks { + start := f.fset.Position(block.startByte) + end := f.fset.Position(block.endByte) + + start, end = dedup(start, end) + + fmt.Fprintf(w, "\t\t%d, %d, %#x, // [%d]\n", start.Line, end.Line, (end.Column&0xFFFF)<<16|(start.Column&0xFFFF), i) + } + + // Close the position array. + fmt.Fprintf(w, "\t},\n") + + // Initialize the position array field. + fmt.Fprintf(w, "\tNumStmt: [%d]uint16{\n", len(f.blocks)) + + // A nice long list of statements-per-block, so we can give a conventional + // valuation of "percent covered". To save space, it's a 16-bit number, so we + // clamp it if it overflows - won't matter in practice. + for i, block := range f.blocks { + n := block.numStmt + if n > 1<<16-1 { + n = 1<<16 - 1 + } + fmt.Fprintf(w, "\t\t%d, // %d\n", n, i) + } + + // Close the statements-per-block array. + fmt.Fprintf(w, "\t},\n") + + // Close the struct initialization. + fmt.Fprintf(w, "}\n") +} + +// It is possible for positions to repeat when there is a line +// directive that does not specify column information and the input +// has not been passed through gofmt. +// See issues #27530 and #30746. +// Tests are TestHtmlUnformatted and TestLineDup. +// We use a map to avoid duplicates. + +// pos2 is a pair of token.Position values, used as a map key type. +type pos2 struct { + p1, p2 token.Position +} + +// seenPos2 tracks whether we have seen a token.Position pair. +var seenPos2 = make(map[pos2]bool) + +// dedup takes a token.Position pair and returns a pair that does not +// duplicate any existing pair. The returned pair will have the Offset +// fields cleared. +func dedup(p1, p2 token.Position) (r1, r2 token.Position) { + key := pos2{ + p1: p1, + p2: p2, + } + + // We want to ignore the Offset fields in the map, + // since cover uses only file/line/column. + key.p1.Offset = 0 + key.p2.Offset = 0 + + for seenPos2[key] { + key.p2.Column++ + } + seenPos2[key] = true + + return key.p1, key.p2 +} + +func (p *Package) emitMetaData(w io.Writer) { + if *pkgcfg == "" { + return + } + + // If the "EmitMetaFile" path has been set, invoke a helper + // that will write out a pre-cooked meta-data file for this package + // to the specified location, in effect simulating the execution + // of a test binary that doesn't do any testing to speak of. + if pkgconfig.EmitMetaFile != "" { + p.emitMetaFile(pkgconfig.EmitMetaFile) + } + + // Something went wrong if regonly/testmain mode is in effect and + // we have instrumented functions. + if counterStmt == nil && len(p.counterLengths) != 0 { + panic("internal error: seen functions with regonly/testmain") + } + + // Emit package name. + fmt.Fprintf(w, "\npackage %s\n\n", pkgconfig.PkgName) + + // Emit package ID var. + fmt.Fprintf(w, "\nvar %sP uint32\n", *varVar) + + // Emit all of the counter variables. + for k := range p.counterLengths { + cvn := mkCounterVarName(k) + fmt.Fprintf(w, "var %s [%d]uint32\n", cvn, p.counterLengths[k]) + } + + // Emit encoded meta-data. + var sws slicewriter.WriteSeeker + digest, err := p.mdb.Emit(&sws) + if err != nil { + log.Fatalf("encoding meta-data: %v", err) + } + p.mdb = nil + fmt.Fprintf(w, "var %s = [...]byte{\n", mkMetaVar()) + payload := sws.BytesWritten() + for k, b := range payload { + fmt.Fprintf(w, " 0x%x,", b) + if k != 0 && k%8 == 0 { + fmt.Fprintf(w, "\n") + } + } + fmt.Fprintf(w, "}\n") + + fixcfg := covcmd.CoverFixupConfig{ + Strategy: "normal", + MetaVar: mkMetaVar(), + MetaLen: len(payload), + MetaHash: fmt.Sprintf("%x", digest), + PkgIdVar: mkPackageIdVar(), + CounterPrefix: *varVar, + CounterGranularity: pkgconfig.Granularity, + CounterMode: *mode, + } + fixdata, err := json.Marshal(fixcfg) + if err != nil { + log.Fatalf("marshal fixupcfg: %v", err) + } + if err := os.WriteFile(pkgconfig.OutConfig, fixdata, 0666); err != nil { + log.Fatalf("error writing %s: %v", pkgconfig.OutConfig, err) + } +} + +// atomicOnAtomic returns true if we're instrumenting +// the sync/atomic package AND using atomic mode. +func atomicOnAtomic() bool { + return *mode == "atomic" && pkgconfig.PkgPath == "sync/atomic" +} + +// atomicPackagePrefix returns the import path prefix used to refer to +// our special import of sync/atomic; this is either set to the +// constant atomicPackageName plus a dot or the empty string if we're +// instrumenting the sync/atomic package itself. +func atomicPackagePrefix() string { + if atomicOnAtomic() { + return "" + } + return atomicPackageName + "." +} + +func (p *Package) emitMetaFile(outpath string) { + // Open output file. + of, err := os.OpenFile(outpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + log.Fatalf("opening covmeta %s: %v", outpath, err) + } + + if len(p.counterLengths) == 0 { + // This corresponds to the case where we have no functions + // in the package to instrument. Leave the file empty file if + // this happens. + if err = of.Close(); err != nil { + log.Fatalf("closing meta-data file: %v", err) + } + return + } + + // Encode meta-data. + var sws slicewriter.WriteSeeker + digest, err := p.mdb.Emit(&sws) + if err != nil { + log.Fatalf("encoding meta-data: %v", err) + } + payload := sws.BytesWritten() + blobs := [][]byte{payload} + + // Write meta-data file directly. + mfw := encodemeta.NewCoverageMetaFileWriter(outpath, of) + err = mfw.Write(digest, blobs, cmode, cgran) + if err != nil { + log.Fatalf("writing meta-data file: %v", err) + } + if err = of.Close(); err != nil { + log.Fatalf("closing meta-data file: %v", err) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cover/cover_test.go b/platform/dbops/binaries/go/go/src/cmd/cover/cover_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7bfe2d072830483ba66d9c9da8a1d5868fb1e9a8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cover/cover_test.go @@ -0,0 +1,649 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main_test + +import ( + "bufio" + "bytes" + cmdcover "cmd/cover" + "flag" + "fmt" + "go/ast" + "go/parser" + "go/token" + "internal/testenv" + "log" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "sync" + "testing" +) + +const ( + // Data directory, also the package directory for the test. + testdata = "testdata" +) + +// testcover returns the path to the cmd/cover binary that we are going to +// test. At one point this was created via "go build"; we now reuse the unit +// test executable itself. +func testcover(t testing.TB) string { + exe, err := os.Executable() + if err != nil { + t.Helper() + t.Fatal(err) + } + return exe +} + +// testTempDir is a temporary directory created in TestMain. +var testTempDir string + +// If set, this will preserve all the tmpdir files from the test run. +var debug = flag.Bool("debug", false, "keep tmpdir files for debugging") + +// TestMain used here so that we can leverage the test executable +// itself as a cmd/cover executable; compare to similar usage in +// the cmd/go tests. +func TestMain(m *testing.M) { + if os.Getenv("CMDCOVER_TOOLEXEC") != "" { + // When CMDCOVER_TOOLEXEC is set, the test binary is also + // running as a -toolexec wrapper. + tool := strings.TrimSuffix(filepath.Base(os.Args[1]), ".exe") + if tool == "cover" { + // Inject this test binary as cmd/cover in place of the + // installed tool, so that the go command's invocations of + // cover produce coverage for the configuration in which + // the test was built. + os.Args = os.Args[1:] + cmdcover.Main() + } else { + cmd := exec.Command(os.Args[1], os.Args[2:]...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + os.Exit(1) + } + } + os.Exit(0) + } + if os.Getenv("CMDCOVER_TEST_RUN_MAIN") != "" { + // When CMDCOVER_TEST_RUN_MAIN is set, we're reusing the test + // binary as cmd/cover. In this case we run the main func exported + // via export_test.go, and exit; CMDCOVER_TEST_RUN_MAIN is set below + // for actual test invocations. + cmdcover.Main() + os.Exit(0) + } + flag.Parse() + topTmpdir, err := os.MkdirTemp("", "cmd-cover-test-") + if err != nil { + log.Fatal(err) + } + testTempDir = topTmpdir + if !*debug { + defer os.RemoveAll(topTmpdir) + } else { + fmt.Fprintf(os.Stderr, "debug: preserving tmpdir %s\n", topTmpdir) + } + os.Setenv("CMDCOVER_TEST_RUN_MAIN", "normal") + os.Exit(m.Run()) +} + +var tdmu sync.Mutex +var tdcount int + +func tempDir(t *testing.T) string { + tdmu.Lock() + dir := filepath.Join(testTempDir, fmt.Sprintf("%03d", tdcount)) + tdcount++ + if err := os.Mkdir(dir, 0777); err != nil { + t.Fatal(err) + } + defer tdmu.Unlock() + return dir +} + +// TestCoverWithToolExec runs a set of subtests that all make use of a +// "-toolexec" wrapper program to invoke the cover test executable +// itself via "go test -cover". +func TestCoverWithToolExec(t *testing.T) { + testenv.MustHaveExec(t) + + toolexecArg := "-toolexec=" + testcover(t) + + t.Run("CoverHTML", func(t *testing.T) { + testCoverHTML(t, toolexecArg) + }) + t.Run("HtmlUnformatted", func(t *testing.T) { + testHtmlUnformatted(t, toolexecArg) + }) + t.Run("FuncWithDuplicateLines", func(t *testing.T) { + testFuncWithDuplicateLines(t, toolexecArg) + }) + t.Run("MissingTrailingNewlineIssue58370", func(t *testing.T) { + testMissingTrailingNewlineIssue58370(t, toolexecArg) + }) +} + +// Execute this command sequence: +// +// replace the word LINE with the line number < testdata/test.go > testdata/test_line.go +// testcover -mode=count -var=CoverTest -o ./testdata/test_cover.go testdata/test_line.go +// go run ./testdata/main.go ./testdata/test.go +func TestCover(t *testing.T) { + testenv.MustHaveGoRun(t) + t.Parallel() + dir := tempDir(t) + + // Read in the test file (testTest) and write it, with LINEs specified, to coverInput. + testTest := filepath.Join(testdata, "test.go") + file, err := os.ReadFile(testTest) + if err != nil { + t.Fatal(err) + } + lines := bytes.Split(file, []byte("\n")) + for i, line := range lines { + lines[i] = bytes.ReplaceAll(line, []byte("LINE"), []byte(fmt.Sprint(i+1))) + } + + // Add a function that is not gofmt'ed. This used to cause a crash. + // We don't put it in test.go because then we would have to gofmt it. + // Issue 23927. + lines = append(lines, []byte("func unFormatted() {"), + []byte("\tif true {"), + []byte("\t}else{"), + []byte("\t}"), + []byte("}")) + lines = append(lines, []byte("func unFormatted2(b bool) {if b{}else{}}")) + + coverInput := filepath.Join(dir, "test_line.go") + if err := os.WriteFile(coverInput, bytes.Join(lines, []byte("\n")), 0666); err != nil { + t.Fatal(err) + } + + // testcover -mode=count -var=thisNameMustBeVeryLongToCauseOverflowOfCounterIncrementStatementOntoNextLineForTest -o ./testdata/test_cover.go testdata/test_line.go + coverOutput := filepath.Join(dir, "test_cover.go") + cmd := testenv.Command(t, testcover(t), "-mode=count", "-var=thisNameMustBeVeryLongToCauseOverflowOfCounterIncrementStatementOntoNextLineForTest", "-o", coverOutput, coverInput) + run(cmd, t) + + cmd = testenv.Command(t, testcover(t), "-mode=set", "-var=Not_an-identifier", "-o", coverOutput, coverInput) + err = cmd.Run() + if err == nil { + t.Error("Expected cover to fail with an error") + } + + // Copy testmain to tmpdir, so that it is in the same directory + // as coverOutput. + testMain := filepath.Join(testdata, "main.go") + b, err := os.ReadFile(testMain) + if err != nil { + t.Fatal(err) + } + tmpTestMain := filepath.Join(dir, "main.go") + if err := os.WriteFile(tmpTestMain, b, 0444); err != nil { + t.Fatal(err) + } + + // go run ./testdata/main.go ./testdata/test.go + cmd = testenv.Command(t, testenv.GoToolPath(t), "run", tmpTestMain, coverOutput) + run(cmd, t) + + file, err = os.ReadFile(coverOutput) + if err != nil { + t.Fatal(err) + } + // compiler directive must appear right next to function declaration. + if got, err := regexp.MatchString(".*\n//go:nosplit\nfunc someFunction().*", string(file)); err != nil || !got { + t.Error("misplaced compiler directive") + } + // "go:linkname" compiler directive should be present. + if got, err := regexp.MatchString(`.*go\:linkname some\_name some\_name.*`, string(file)); err != nil || !got { + t.Error("'go:linkname' compiler directive not found") + } + + // Other comments should be preserved too. + c := ".*// This comment didn't appear in generated go code.*" + if got, err := regexp.MatchString(c, string(file)); err != nil || !got { + t.Errorf("non compiler directive comment %q not found", c) + } +} + +// TestDirectives checks that compiler directives are preserved and positioned +// correctly. Directives that occur before top-level declarations should remain +// above those declarations, even if they are not part of the block of +// documentation comments. +func TestDirectives(t *testing.T) { + testenv.MustHaveExec(t) + t.Parallel() + + // Read the source file and find all the directives. We'll keep + // track of whether each one has been seen in the output. + testDirectives := filepath.Join(testdata, "directives.go") + source, err := os.ReadFile(testDirectives) + if err != nil { + t.Fatal(err) + } + sourceDirectives := findDirectives(source) + + // testcover -mode=atomic ./testdata/directives.go + cmd := testenv.Command(t, testcover(t), "-mode=atomic", testDirectives) + cmd.Stderr = os.Stderr + output, err := cmd.Output() + if err != nil { + t.Fatal(err) + } + + // Check that all directives are present in the output. + outputDirectives := findDirectives(output) + foundDirective := make(map[string]bool) + for _, p := range sourceDirectives { + foundDirective[p.name] = false + } + for _, p := range outputDirectives { + if found, ok := foundDirective[p.name]; !ok { + t.Errorf("unexpected directive in output: %s", p.text) + } else if found { + t.Errorf("directive found multiple times in output: %s", p.text) + } + foundDirective[p.name] = true + } + for name, found := range foundDirective { + if !found { + t.Errorf("missing directive: %s", name) + } + } + + // Check that directives that start with the name of top-level declarations + // come before the beginning of the named declaration and after the end + // of the previous declaration. + fset := token.NewFileSet() + astFile, err := parser.ParseFile(fset, testDirectives, output, 0) + if err != nil { + t.Fatal(err) + } + + prevEnd := 0 + for _, decl := range astFile.Decls { + var name string + switch d := decl.(type) { + case *ast.FuncDecl: + name = d.Name.Name + case *ast.GenDecl: + if len(d.Specs) == 0 { + // An empty group declaration. We still want to check that + // directives can be associated with it, so we make up a name + // to match directives in the test data. + name = "_empty" + } else if spec, ok := d.Specs[0].(*ast.TypeSpec); ok { + name = spec.Name.Name + } + } + pos := fset.Position(decl.Pos()).Offset + end := fset.Position(decl.End()).Offset + if name == "" { + prevEnd = end + continue + } + for _, p := range outputDirectives { + if !strings.HasPrefix(p.name, name) { + continue + } + if p.offset < prevEnd || pos < p.offset { + t.Errorf("directive %s does not appear before definition %s", p.text, name) + } + } + prevEnd = end + } +} + +type directiveInfo struct { + text string // full text of the comment, not including newline + name string // text after //go: + offset int // byte offset of first slash in comment +} + +func findDirectives(source []byte) []directiveInfo { + var directives []directiveInfo + directivePrefix := []byte("\n//go:") + offset := 0 + for { + i := bytes.Index(source[offset:], directivePrefix) + if i < 0 { + break + } + i++ // skip newline + p := source[offset+i:] + j := bytes.IndexByte(p, '\n') + if j < 0 { + // reached EOF + j = len(p) + } + directive := directiveInfo{ + text: string(p[:j]), + name: string(p[len(directivePrefix)-1 : j]), + offset: offset + i, + } + directives = append(directives, directive) + offset += i + j + } + return directives +} + +// Makes sure that `cover -func=profile.cov` reports accurate coverage. +// Issue #20515. +func TestCoverFunc(t *testing.T) { + testenv.MustHaveExec(t) + + // testcover -func ./testdata/profile.cov + coverProfile := filepath.Join(testdata, "profile.cov") + cmd := testenv.Command(t, testcover(t), "-func", coverProfile) + out, err := cmd.Output() + if err != nil { + if ee, ok := err.(*exec.ExitError); ok { + t.Logf("%s", ee.Stderr) + } + t.Fatal(err) + } + + if got, err := regexp.Match(".*total:.*100.0.*", out); err != nil || !got { + t.Logf("%s", out) + t.Errorf("invalid coverage counts. got=(%v, %v); want=(true; nil)", got, err) + } +} + +// Check that cover produces correct HTML. +// Issue #25767. +func testCoverHTML(t *testing.T, toolexecArg string) { + testenv.MustHaveGoRun(t) + dir := tempDir(t) + + t.Parallel() + + // go test -coverprofile testdata/html/html.cov cmd/cover/testdata/html + htmlProfile := filepath.Join(dir, "html.cov") + cmd := testenv.Command(t, testenv.GoToolPath(t), "test", toolexecArg, "-coverprofile", htmlProfile, "cmd/cover/testdata/html") + cmd.Env = append(cmd.Environ(), "CMDCOVER_TOOLEXEC=true") + run(cmd, t) + // testcover -html testdata/html/html.cov -o testdata/html/html.html + htmlHTML := filepath.Join(dir, "html.html") + cmd = testenv.Command(t, testcover(t), "-html", htmlProfile, "-o", htmlHTML) + run(cmd, t) + + // Extract the parts of the HTML with comment markers, + // and compare against a golden file. + entireHTML, err := os.ReadFile(htmlHTML) + if err != nil { + t.Fatal(err) + } + var out strings.Builder + scan := bufio.NewScanner(bytes.NewReader(entireHTML)) + in := false + for scan.Scan() { + line := scan.Text() + if strings.Contains(line, "// START") { + in = true + } + if in { + fmt.Fprintln(&out, line) + } + if strings.Contains(line, "// END") { + in = false + } + } + if scan.Err() != nil { + t.Error(scan.Err()) + } + htmlGolden := filepath.Join(testdata, "html", "html.golden") + golden, err := os.ReadFile(htmlGolden) + if err != nil { + t.Fatalf("reading golden file: %v", err) + } + // Ignore white space differences. + // Break into lines, then compare by breaking into words. + goldenLines := strings.Split(string(golden), "\n") + outLines := strings.Split(out.String(), "\n") + // Compare at the line level, stopping at first different line so + // we don't generate tons of output if there's an inserted or deleted line. + for i, goldenLine := range goldenLines { + if i >= len(outLines) { + t.Fatalf("output shorter than golden; stops before line %d: %s\n", i+1, goldenLine) + } + // Convert all white space to simple spaces, for easy comparison. + goldenLine = strings.Join(strings.Fields(goldenLine), " ") + outLine := strings.Join(strings.Fields(outLines[i]), " ") + if outLine != goldenLine { + t.Fatalf("line %d differs: got:\n\t%s\nwant:\n\t%s", i+1, outLine, goldenLine) + } + } + if len(goldenLines) != len(outLines) { + t.Fatalf("output longer than golden; first extra output line %d: %q\n", len(goldenLines)+1, outLines[len(goldenLines)]) + } +} + +// Test HTML processing with a source file not run through gofmt. +// Issue #27350. +func testHtmlUnformatted(t *testing.T, toolexecArg string) { + testenv.MustHaveGoRun(t) + dir := tempDir(t) + + t.Parallel() + + htmlUDir := filepath.Join(dir, "htmlunformatted") + htmlU := filepath.Join(htmlUDir, "htmlunformatted.go") + htmlUTest := filepath.Join(htmlUDir, "htmlunformatted_test.go") + htmlUProfile := filepath.Join(htmlUDir, "htmlunformatted.cov") + htmlUHTML := filepath.Join(htmlUDir, "htmlunformatted.html") + + if err := os.Mkdir(htmlUDir, 0777); err != nil { + t.Fatal(err) + } + + if err := os.WriteFile(filepath.Join(htmlUDir, "go.mod"), []byte("module htmlunformatted\n"), 0666); err != nil { + t.Fatal(err) + } + + const htmlUContents = ` +package htmlunformatted + +var g int + +func F() { +//line x.go:1 + { { F(); goto lab } } +lab: +}` + + const htmlUTestContents = `package htmlunformatted` + + if err := os.WriteFile(htmlU, []byte(htmlUContents), 0444); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(htmlUTest, []byte(htmlUTestContents), 0444); err != nil { + t.Fatal(err) + } + + // go test -covermode=count -coverprofile TMPDIR/htmlunformatted.cov + cmd := testenv.Command(t, testenv.GoToolPath(t), "test", "-test.v", toolexecArg, "-covermode=count", "-coverprofile", htmlUProfile) + cmd.Env = append(cmd.Environ(), "CMDCOVER_TOOLEXEC=true") + cmd.Dir = htmlUDir + run(cmd, t) + + // testcover -html TMPDIR/htmlunformatted.cov -o unformatted.html + cmd = testenv.Command(t, testcover(t), "-html", htmlUProfile, "-o", htmlUHTML) + cmd.Dir = htmlUDir + run(cmd, t) +} + +// lineDupContents becomes linedup.go in testFuncWithDuplicateLines. +const lineDupContents = ` +package linedup + +var G int + +func LineDup(c int) { + for i := 0; i < c; i++ { +//line ld.go:100 + if i % 2 == 0 { + G++ + } + if i % 3 == 0 { + G++; G++ + } +//line ld.go:100 + if i % 4 == 0 { + G++; G++; G++ + } + if i % 5 == 0 { + G++; G++; G++; G++ + } + } +} +` + +// lineDupTestContents becomes linedup_test.go in testFuncWithDuplicateLines. +const lineDupTestContents = ` +package linedup + +import "testing" + +func TestLineDup(t *testing.T) { + LineDup(100) +} +` + +// Test -func with duplicate //line directives with different numbers +// of statements. +func testFuncWithDuplicateLines(t *testing.T, toolexecArg string) { + testenv.MustHaveGoRun(t) + dir := tempDir(t) + + t.Parallel() + + lineDupDir := filepath.Join(dir, "linedup") + lineDupGo := filepath.Join(lineDupDir, "linedup.go") + lineDupTestGo := filepath.Join(lineDupDir, "linedup_test.go") + lineDupProfile := filepath.Join(lineDupDir, "linedup.out") + + if err := os.Mkdir(lineDupDir, 0777); err != nil { + t.Fatal(err) + } + + if err := os.WriteFile(filepath.Join(lineDupDir, "go.mod"), []byte("module linedup\n"), 0666); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(lineDupGo, []byte(lineDupContents), 0444); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(lineDupTestGo, []byte(lineDupTestContents), 0444); err != nil { + t.Fatal(err) + } + + // go test -cover -covermode count -coverprofile TMPDIR/linedup.out + cmd := testenv.Command(t, testenv.GoToolPath(t), "test", toolexecArg, "-cover", "-covermode", "count", "-coverprofile", lineDupProfile) + cmd.Env = append(cmd.Environ(), "CMDCOVER_TOOLEXEC=true") + cmd.Dir = lineDupDir + run(cmd, t) + + // testcover -func=TMPDIR/linedup.out + cmd = testenv.Command(t, testcover(t), "-func", lineDupProfile) + cmd.Dir = lineDupDir + run(cmd, t) +} + +func run(c *exec.Cmd, t *testing.T) { + t.Helper() + t.Log("running", c.Args) + out, err := c.CombinedOutput() + if len(out) > 0 { + t.Logf("%s", out) + } + if err != nil { + t.Fatal(err) + } +} + +func runExpectingError(c *exec.Cmd, t *testing.T) string { + t.Helper() + t.Log("running", c.Args) + out, err := c.CombinedOutput() + if err == nil { + return fmt.Sprintf("unexpected pass for %+v", c.Args) + } + return string(out) +} + +// Test instrumentation of package that ends before an expected +// trailing newline following package clause. Issue #58370. +func testMissingTrailingNewlineIssue58370(t *testing.T, toolexecArg string) { + testenv.MustHaveGoBuild(t) + dir := tempDir(t) + + t.Parallel() + + noeolDir := filepath.Join(dir, "issue58370") + noeolGo := filepath.Join(noeolDir, "noeol.go") + noeolTestGo := filepath.Join(noeolDir, "noeol_test.go") + + if err := os.Mkdir(noeolDir, 0777); err != nil { + t.Fatal(err) + } + + if err := os.WriteFile(filepath.Join(noeolDir, "go.mod"), []byte("module noeol\n"), 0666); err != nil { + t.Fatal(err) + } + const noeolContents = `package noeol` + if err := os.WriteFile(noeolGo, []byte(noeolContents), 0444); err != nil { + t.Fatal(err) + } + const noeolTestContents = ` +package noeol +import "testing" +func TestCoverage(t *testing.T) { } +` + if err := os.WriteFile(noeolTestGo, []byte(noeolTestContents), 0444); err != nil { + t.Fatal(err) + } + + // go test -covermode atomic + cmd := testenv.Command(t, testenv.GoToolPath(t), "test", toolexecArg, "-covermode", "atomic") + cmd.Env = append(cmd.Environ(), "CMDCOVER_TOOLEXEC=true") + cmd.Dir = noeolDir + run(cmd, t) +} + +func TestSrcPathWithNewline(t *testing.T) { + testenv.MustHaveExec(t) + t.Parallel() + + // srcPath is intentionally not clean so that the path passed to testcover + // will not normalize the trailing / to a \ on Windows. + srcPath := t.TempDir() + string(filepath.Separator) + "\npackage main\nfunc main() { panic(string([]rune{'u', 'h', '-', 'o', 'h'}))\n/*/main.go" + mainSrc := ` package main + +func main() { + /* nothing here */ + println("ok") +} +` + if err := os.MkdirAll(filepath.Dir(srcPath), 0777); err != nil { + t.Skipf("creating directory with bogus path: %v", err) + } + if err := os.WriteFile(srcPath, []byte(mainSrc), 0666); err != nil { + t.Skipf("writing file with bogus directory: %v", err) + } + + cmd := testenv.Command(t, testcover(t), "-mode=atomic", srcPath) + cmd.Stderr = new(bytes.Buffer) + out, err := cmd.Output() + t.Logf("%v:\n%s", cmd, out) + t.Logf("stderr:\n%s", cmd.Stderr) + if err == nil { + t.Errorf("unexpected success; want failure due to newline in file path") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cover/doc.go b/platform/dbops/binaries/go/go/src/cmd/cover/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..f5b9b1c117ac1092277a80a64c903527dbb61d92 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cover/doc.go @@ -0,0 +1,32 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Cover is a program for analyzing the coverage profiles generated by +'go test -coverprofile=cover.out'. + +Cover is also used by 'go test -cover' to rewrite the source code with +annotations to track which parts of each function are executed (this +is referred to "instrumentation"). Cover can operate in "legacy mode" +on a single Go source file at a time, or when invoked by the Go tool +it will process all the source files in a single package at a time +(package-scope instrumentation is enabled via "-pkgcfg" option). + +When generated instrumented code, the cover tool computes approximate +basic block information by studying the source. It is thus more +portable than binary-rewriting coverage tools, but also a little less +capable. For instance, it does not probe inside && and || expressions, +and can be mildly confused by single statements with multiple function +literals. + +When computing coverage of a package that uses cgo, the cover tool +must be applied to the output of cgo preprocessing, not the input, +because cover deletes comments that are significant to cgo. + +For usage information, please see: + + go help testflag + go tool cover -help +*/ +package main diff --git a/platform/dbops/binaries/go/go/src/cmd/cover/export_test.go b/platform/dbops/binaries/go/go/src/cmd/cover/export_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e4592ee8f7b19873c1719d9c4e6429e992524e9f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cover/export_test.go @@ -0,0 +1,7 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func Main() { main() } diff --git a/platform/dbops/binaries/go/go/src/cmd/cover/func.go b/platform/dbops/binaries/go/go/src/cmd/cover/func.go new file mode 100644 index 0000000000000000000000000000000000000000..dffd3c1a0553ac2e907028abdee5d046f3d746da --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cover/func.go @@ -0,0 +1,248 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements the visitor that computes the (line, column)-(line-column) range for each function. + +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "go/ast" + "go/parser" + "go/token" + "io" + "os" + "os/exec" + "path" + "path/filepath" + "runtime" + "strings" + "text/tabwriter" + + "golang.org/x/tools/cover" +) + +// funcOutput takes two file names as arguments, a coverage profile to read as input and an output +// file to write ("" means to write to standard output). The function reads the profile and produces +// as output the coverage data broken down by function, like this: +// +// fmt/format.go:30: init 100.0% +// fmt/format.go:57: clearflags 100.0% +// ... +// fmt/scan.go:1046: doScan 100.0% +// fmt/scan.go:1075: advance 96.2% +// fmt/scan.go:1119: doScanf 96.8% +// total: (statements) 91.9% + +func funcOutput(profile, outputFile string) error { + profiles, err := cover.ParseProfiles(profile) + if err != nil { + return err + } + + dirs, err := findPkgs(profiles) + if err != nil { + return err + } + + var out *bufio.Writer + if outputFile == "" { + out = bufio.NewWriter(os.Stdout) + } else { + fd, err := os.Create(outputFile) + if err != nil { + return err + } + defer fd.Close() + out = bufio.NewWriter(fd) + } + defer out.Flush() + + tabber := tabwriter.NewWriter(out, 1, 8, 1, '\t', 0) + defer tabber.Flush() + + var total, covered int64 + for _, profile := range profiles { + fn := profile.FileName + file, err := findFile(dirs, fn) + if err != nil { + return err + } + funcs, err := findFuncs(file) + if err != nil { + return err + } + // Now match up functions and profile blocks. + for _, f := range funcs { + c, t := f.coverage(profile) + fmt.Fprintf(tabber, "%s:%d:\t%s\t%.1f%%\n", fn, f.startLine, f.name, percent(c, t)) + total += t + covered += c + } + } + fmt.Fprintf(tabber, "total:\t(statements)\t%.1f%%\n", percent(covered, total)) + + return nil +} + +// findFuncs parses the file and returns a slice of FuncExtent descriptors. +func findFuncs(name string) ([]*FuncExtent, error) { + fset := token.NewFileSet() + parsedFile, err := parser.ParseFile(fset, name, nil, 0) + if err != nil { + return nil, err + } + visitor := &FuncVisitor{ + fset: fset, + name: name, + astFile: parsedFile, + } + ast.Walk(visitor, visitor.astFile) + return visitor.funcs, nil +} + +// FuncExtent describes a function's extent in the source by file and position. +type FuncExtent struct { + name string + startLine int + startCol int + endLine int + endCol int +} + +// FuncVisitor implements the visitor that builds the function position list for a file. +type FuncVisitor struct { + fset *token.FileSet + name string // Name of file. + astFile *ast.File + funcs []*FuncExtent +} + +// Visit implements the ast.Visitor interface. +func (v *FuncVisitor) Visit(node ast.Node) ast.Visitor { + switch n := node.(type) { + case *ast.FuncDecl: + if n.Body == nil { + // Do not count declarations of assembly functions. + break + } + start := v.fset.Position(n.Pos()) + end := v.fset.Position(n.End()) + fe := &FuncExtent{ + name: n.Name.Name, + startLine: start.Line, + startCol: start.Column, + endLine: end.Line, + endCol: end.Column, + } + v.funcs = append(v.funcs, fe) + } + return v +} + +// coverage returns the fraction of the statements in the function that were covered, as a numerator and denominator. +func (f *FuncExtent) coverage(profile *cover.Profile) (num, den int64) { + // We could avoid making this n^2 overall by doing a single scan and annotating the functions, + // but the sizes of the data structures is never very large and the scan is almost instantaneous. + var covered, total int64 + // The blocks are sorted, so we can stop counting as soon as we reach the end of the relevant block. + for _, b := range profile.Blocks { + if b.StartLine > f.endLine || (b.StartLine == f.endLine && b.StartCol >= f.endCol) { + // Past the end of the function. + break + } + if b.EndLine < f.startLine || (b.EndLine == f.startLine && b.EndCol <= f.startCol) { + // Before the beginning of the function + continue + } + total += int64(b.NumStmt) + if b.Count > 0 { + covered += int64(b.NumStmt) + } + } + return covered, total +} + +// Pkg describes a single package, compatible with the JSON output from 'go list'; see 'go help list'. +type Pkg struct { + ImportPath string + Dir string + Error *struct { + Err string + } +} + +func findPkgs(profiles []*cover.Profile) (map[string]*Pkg, error) { + // Run go list to find the location of every package we care about. + pkgs := make(map[string]*Pkg) + var list []string + for _, profile := range profiles { + if strings.HasPrefix(profile.FileName, ".") || filepath.IsAbs(profile.FileName) { + // Relative or absolute path. + continue + } + pkg := path.Dir(profile.FileName) + if _, ok := pkgs[pkg]; !ok { + pkgs[pkg] = nil + list = append(list, pkg) + } + } + + if len(list) == 0 { + return pkgs, nil + } + + // Note: usually run as "go tool cover" in which case $GOROOT is set, + // in which case runtime.GOROOT() does exactly what we want. + goTool := filepath.Join(runtime.GOROOT(), "bin/go") + cmd := exec.Command(goTool, append([]string{"list", "-e", "-json"}, list...)...) + var stderr bytes.Buffer + cmd.Stderr = &stderr + stdout, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("cannot run go list: %v\n%s", err, stderr.Bytes()) + } + dec := json.NewDecoder(bytes.NewReader(stdout)) + for { + var pkg Pkg + err := dec.Decode(&pkg) + if err == io.EOF { + break + } + if err != nil { + return nil, fmt.Errorf("decoding go list json: %v", err) + } + pkgs[pkg.ImportPath] = &pkg + } + return pkgs, nil +} + +// findFile finds the location of the named file in GOROOT, GOPATH etc. +func findFile(pkgs map[string]*Pkg, file string) (string, error) { + if strings.HasPrefix(file, ".") || filepath.IsAbs(file) { + // Relative or absolute path. + return file, nil + } + pkg := pkgs[path.Dir(file)] + if pkg != nil { + if pkg.Dir != "" { + return filepath.Join(pkg.Dir, path.Base(file)), nil + } + if pkg.Error != nil { + return "", errors.New(pkg.Error.Err) + } + } + return "", fmt.Errorf("did not find package for %s in go list output", file) +} + +func percent(covered, total int64) float64 { + if total == 0 { + total = 1 // Avoid zero denominator. + } + return 100.0 * float64(covered) / float64(total) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cover/html.go b/platform/dbops/binaries/go/go/src/cmd/cover/html.go new file mode 100644 index 0000000000000000000000000000000000000000..400a7d879d0f6572d0825e46a0beb95c60fbcebc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cover/html.go @@ -0,0 +1,306 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bufio" + "cmd/internal/browser" + "fmt" + "html/template" + "io" + "math" + "os" + "path/filepath" + "strings" + + "golang.org/x/tools/cover" +) + +// htmlOutput reads the profile data from profile and generates an HTML +// coverage report, writing it to outfile. If outfile is empty, +// it writes the report to a temporary file and opens it in a web browser. +func htmlOutput(profile, outfile string) error { + profiles, err := cover.ParseProfiles(profile) + if err != nil { + return err + } + + var d templateData + + dirs, err := findPkgs(profiles) + if err != nil { + return err + } + + for _, profile := range profiles { + fn := profile.FileName + if profile.Mode == "set" { + d.Set = true + } + file, err := findFile(dirs, fn) + if err != nil { + return err + } + src, err := os.ReadFile(file) + if err != nil { + return fmt.Errorf("can't read %q: %v", fn, err) + } + var buf strings.Builder + err = htmlGen(&buf, src, profile.Boundaries(src)) + if err != nil { + return err + } + d.Files = append(d.Files, &templateFile{ + Name: fn, + Body: template.HTML(buf.String()), + Coverage: percentCovered(profile), + }) + } + + var out *os.File + if outfile == "" { + var dir string + dir, err = os.MkdirTemp("", "cover") + if err != nil { + return err + } + out, err = os.Create(filepath.Join(dir, "coverage.html")) + } else { + out, err = os.Create(outfile) + } + if err != nil { + return err + } + err = htmlTemplate.Execute(out, d) + if err2 := out.Close(); err == nil { + err = err2 + } + if err != nil { + return err + } + + if outfile == "" { + if !browser.Open("file://" + out.Name()) { + fmt.Fprintf(os.Stderr, "HTML output written to %s\n", out.Name()) + } + } + + return nil +} + +// percentCovered returns, as a percentage, the fraction of the statements in +// the profile covered by the test run. +// In effect, it reports the coverage of a given source file. +func percentCovered(p *cover.Profile) float64 { + var total, covered int64 + for _, b := range p.Blocks { + total += int64(b.NumStmt) + if b.Count > 0 { + covered += int64(b.NumStmt) + } + } + if total == 0 { + return 0 + } + return float64(covered) / float64(total) * 100 +} + +// htmlGen generates an HTML coverage report with the provided filename, +// source code, and tokens, and writes it to the given Writer. +func htmlGen(w io.Writer, src []byte, boundaries []cover.Boundary) error { + dst := bufio.NewWriter(w) + for i := range src { + for len(boundaries) > 0 && boundaries[0].Offset == i { + b := boundaries[0] + if b.Start { + n := 0 + if b.Count > 0 { + n = int(math.Floor(b.Norm*9)) + 1 + } + fmt.Fprintf(dst, ``, n, b.Count) + } else { + dst.WriteString("") + } + boundaries = boundaries[1:] + } + switch b := src[i]; b { + case '>': + dst.WriteString(">") + case '<': + dst.WriteString("<") + case '&': + dst.WriteString("&") + case '\t': + dst.WriteString(" ") + default: + dst.WriteByte(b) + } + } + return dst.Flush() +} + +// rgb returns an rgb value for the specified coverage value +// between 0 (no coverage) and 10 (max coverage). +func rgb(n int) string { + if n == 0 { + return "rgb(192, 0, 0)" // Red + } + // Gradient from gray to green. + r := 128 - 12*(n-1) + g := 128 + 12*(n-1) + b := 128 + 3*(n-1) + return fmt.Sprintf("rgb(%v, %v, %v)", r, g, b) +} + +// colors generates the CSS rules for coverage colors. +func colors() template.CSS { + var buf strings.Builder + for i := 0; i < 11; i++ { + fmt.Fprintf(&buf, ".cov%v { color: %v }\n", i, rgb(i)) + } + return template.CSS(buf.String()) +} + +var htmlTemplate = template.Must(template.New("html").Funcs(template.FuncMap{ + "colors": colors, +}).Parse(tmplHTML)) + +type templateData struct { + Files []*templateFile + Set bool +} + +// PackageName returns a name for the package being shown. +// It does this by choosing the penultimate element of the path +// name, so foo.bar/baz/foo.go chooses 'baz'. This is cheap +// and easy, avoids parsing the Go file, and gets a better answer +// for package main. It returns the empty string if there is +// a problem. +func (td templateData) PackageName() string { + if len(td.Files) == 0 { + return "" + } + fileName := td.Files[0].Name + elems := strings.Split(fileName, "/") // Package path is always slash-separated. + // Return the penultimate non-empty element. + for i := len(elems) - 2; i >= 0; i-- { + if elems[i] != "" { + return elems[i] + } + } + return "" +} + +type templateFile struct { + Name string + Body template.HTML + Coverage float64 +} + +const tmplHTML = ` + + + + + {{$pkg := .PackageName}}{{if $pkg}}{{$pkg}}: {{end}}Go Coverage Report + + + +
    + +
    + not tracked + {{if .Set}} + not covered + covered + {{else}} + no coverage + low coverage + * + * + * + * + * + * + * + * + high coverage + {{end}} +
    +
    +
    + {{range $i, $f := .Files}} + + {{end}} +
    + + + +` diff --git a/platform/dbops/binaries/go/go/src/cmd/cover/pkgname_test.go b/platform/dbops/binaries/go/go/src/cmd/cover/pkgname_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1c731ad779ed29ed416f6b797a826c1e0fef7121 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cover/pkgname_test.go @@ -0,0 +1,31 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "testing" + +func TestPackageName(t *testing.T) { + var tests = []struct { + fileName, pkgName string + }{ + {"", ""}, + {"///", ""}, + {"fmt", ""}, // No Go file, improper form. + {"fmt/foo.go", "fmt"}, + {"encoding/binary/foo.go", "binary"}, + {"encoding/binary/////foo.go", "binary"}, + } + var tf templateFile + for _, test := range tests { + tf.Name = test.fileName + td := templateData{ + Files: []*templateFile{&tf}, + } + got := td.PackageName() + if got != test.pkgName { + t.Errorf("%s: got %s want %s", test.fileName, got, test.pkgName) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/dist/README b/platform/dbops/binaries/go/go/src/cmd/dist/README new file mode 100644 index 0000000000000000000000000000000000000000..0f99284e6680cf27409c0c8e48760c2b2f541622 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/dist/README @@ -0,0 +1,21 @@ +This program, dist, is the bootstrapping tool for the Go distribution. + +As of Go 1.5, dist and other parts of the compiler toolchain are written +in Go, making bootstrapping a little more involved than in the past. +The approach is to build the current release of Go with an earlier one. + +The process to install Go 1.x, for x ≥ 22, is: + +1. Build cmd/dist with Go 1.20.6. +2. Using dist, build Go 1.x compiler toolchain with Go 1.20.6. +3. Using dist, rebuild Go 1.x compiler toolchain with itself. +4. Using dist, build Go 1.x cmd/go (as go_bootstrap) with Go 1.x compiler toolchain. +5. Using go_bootstrap, build the remaining Go 1.x standard library and commands. + +Because of backward compatibility, although the steps above say Go 1.20.6, +in practice any release ≥ Go 1.20.6 but < Go 1.x will work as the bootstrap base. +Releases ≥ Go 1.x are very likely to work as well. + +See https://go.dev/s/go15bootstrap for more details about the original bootstrap +and https://go.dev/issue/54265 for details about later bootstrap version bumps. + diff --git a/platform/dbops/binaries/go/go/src/cmd/dist/build.go b/platform/dbops/binaries/go/go/src/cmd/dist/build.go new file mode 100644 index 0000000000000000000000000000000000000000..32e59b446a5d9be5be970271cb3e9b7a0c59304f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/dist/build.go @@ -0,0 +1,1952 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/fs" + "log" + "os" + "os/exec" + "path/filepath" + "regexp" + "sort" + "strings" + "sync" + "time" +) + +// Initialization for any invocation. + +// The usual variables. +var ( + goarch string + gorootBin string + gorootBinGo string + gohostarch string + gohostos string + goos string + goarm string + go386 string + goamd64 string + gomips string + gomips64 string + goppc64 string + goroot string + goroot_final string + goextlinkenabled string + gogcflags string // For running built compiler + goldflags string + goexperiment string + workdir string + tooldir string + oldgoos string + oldgoarch string + oldgocache string + exe string + defaultcc map[string]string + defaultcxx map[string]string + defaultpkgconfig string + defaultldso string + + rebuildall bool + noOpt bool + isRelease bool + + vflag int // verbosity +) + +// The known architectures. +var okgoarch = []string{ + "386", + "amd64", + "arm", + "arm64", + "loong64", + "mips", + "mipsle", + "mips64", + "mips64le", + "ppc64", + "ppc64le", + "riscv64", + "s390x", + "sparc64", + "wasm", +} + +// The known operating systems. +var okgoos = []string{ + "darwin", + "dragonfly", + "illumos", + "ios", + "js", + "wasip1", + "linux", + "android", + "solaris", + "freebsd", + "nacl", // keep; + "netbsd", + "openbsd", + "plan9", + "windows", + "aix", +} + +// find reports the first index of p in l[0:n], or else -1. +func find(p string, l []string) int { + for i, s := range l { + if p == s { + return i + } + } + return -1 +} + +// xinit handles initialization of the various global state, like goroot and goarch. +func xinit() { + b := os.Getenv("GOROOT") + if b == "" { + fatalf("$GOROOT must be set") + } + goroot = filepath.Clean(b) + gorootBin = pathf("%s/bin", goroot) + + // Don't run just 'go' because the build infrastructure + // runs cmd/dist inside go/bin often, and on Windows + // it will be found in the current directory and refuse to exec. + // All exec calls rewrite "go" into gorootBinGo. + gorootBinGo = pathf("%s/bin/go", goroot) + + b = os.Getenv("GOROOT_FINAL") + if b == "" { + b = goroot + } + goroot_final = b + + b = os.Getenv("GOOS") + if b == "" { + b = gohostos + } + goos = b + if find(goos, okgoos) < 0 { + fatalf("unknown $GOOS %s", goos) + } + + b = os.Getenv("GOARM") + if b == "" { + b = xgetgoarm() + } + goarm = b + + b = os.Getenv("GO386") + if b == "" { + b = "sse2" + } + go386 = b + + b = os.Getenv("GOAMD64") + if b == "" { + b = "v1" + } + goamd64 = b + + b = os.Getenv("GOMIPS") + if b == "" { + b = "hardfloat" + } + gomips = b + + b = os.Getenv("GOMIPS64") + if b == "" { + b = "hardfloat" + } + gomips64 = b + + b = os.Getenv("GOPPC64") + if b == "" { + b = "power8" + } + goppc64 = b + + if p := pathf("%s/src/all.bash", goroot); !isfile(p) { + fatalf("$GOROOT is not set correctly or not exported\n"+ + "\tGOROOT=%s\n"+ + "\t%s does not exist", goroot, p) + } + + b = os.Getenv("GOHOSTARCH") + if b != "" { + gohostarch = b + } + if find(gohostarch, okgoarch) < 0 { + fatalf("unknown $GOHOSTARCH %s", gohostarch) + } + + b = os.Getenv("GOARCH") + if b == "" { + b = gohostarch + } + goarch = b + if find(goarch, okgoarch) < 0 { + fatalf("unknown $GOARCH %s", goarch) + } + + b = os.Getenv("GO_EXTLINK_ENABLED") + if b != "" { + if b != "0" && b != "1" { + fatalf("unknown $GO_EXTLINK_ENABLED %s", b) + } + goextlinkenabled = b + } + + goexperiment = os.Getenv("GOEXPERIMENT") + // TODO(mdempsky): Validate known experiments? + + gogcflags = os.Getenv("BOOT_GO_GCFLAGS") + goldflags = os.Getenv("BOOT_GO_LDFLAGS") + + defaultcc = compilerEnv("CC", "") + defaultcxx = compilerEnv("CXX", "") + + b = os.Getenv("PKG_CONFIG") + if b == "" { + b = "pkg-config" + } + defaultpkgconfig = b + + defaultldso = os.Getenv("GO_LDSO") + + // For tools being invoked but also for os.ExpandEnv. + os.Setenv("GO386", go386) + os.Setenv("GOAMD64", goamd64) + os.Setenv("GOARCH", goarch) + os.Setenv("GOARM", goarm) + os.Setenv("GOHOSTARCH", gohostarch) + os.Setenv("GOHOSTOS", gohostos) + os.Setenv("GOOS", goos) + os.Setenv("GOMIPS", gomips) + os.Setenv("GOMIPS64", gomips64) + os.Setenv("GOPPC64", goppc64) + os.Setenv("GOROOT", goroot) + os.Setenv("GOROOT_FINAL", goroot_final) + + // Set GOBIN to GOROOT/bin. The meaning of GOBIN has drifted over time + // (see https://go.dev/issue/3269, https://go.dev/cl/183058, + // https://go.dev/issue/31576). Since we want binaries installed by 'dist' to + // always go to GOROOT/bin anyway. + os.Setenv("GOBIN", gorootBin) + + // Make the environment more predictable. + os.Setenv("LANG", "C") + os.Setenv("LANGUAGE", "en_US.UTF8") + os.Unsetenv("GO111MODULE") + os.Setenv("GOENV", "off") + os.Unsetenv("GOFLAGS") + os.Setenv("GOWORK", "off") + + workdir = xworkdir() + if err := os.WriteFile(pathf("%s/go.mod", workdir), []byte("module bootstrap"), 0666); err != nil { + fatalf("cannot write stub go.mod: %s", err) + } + xatexit(rmworkdir) + + tooldir = pathf("%s/pkg/tool/%s_%s", goroot, gohostos, gohostarch) + + goversion := findgoversion() + isRelease = strings.HasPrefix(goversion, "release.") || strings.HasPrefix(goversion, "go") +} + +// compilerEnv returns a map from "goos/goarch" to the +// compiler setting to use for that platform. +// The entry for key "" covers any goos/goarch not explicitly set in the map. +// For example, compilerEnv("CC", "gcc") returns the C compiler settings +// read from $CC, defaulting to gcc. +// +// The result is a map because additional environment variables +// can be set to change the compiler based on goos/goarch settings. +// The following applies to all envNames but CC is assumed to simplify +// the presentation. +// +// If no environment variables are set, we use def for all goos/goarch. +// $CC, if set, applies to all goos/goarch but is overridden by the following. +// $CC_FOR_TARGET, if set, applies to all goos/goarch except gohostos/gohostarch, +// but is overridden by the following. +// If gohostos=goos and gohostarch=goarch, then $CC_FOR_TARGET applies even for gohostos/gohostarch. +// $CC_FOR_goos_goarch, if set, applies only to goos/goarch. +func compilerEnv(envName, def string) map[string]string { + m := map[string]string{"": def} + + if env := os.Getenv(envName); env != "" { + m[""] = env + } + if env := os.Getenv(envName + "_FOR_TARGET"); env != "" { + if gohostos != goos || gohostarch != goarch { + m[gohostos+"/"+gohostarch] = m[""] + } + m[""] = env + } + + for _, goos := range okgoos { + for _, goarch := range okgoarch { + if env := os.Getenv(envName + "_FOR_" + goos + "_" + goarch); env != "" { + m[goos+"/"+goarch] = env + } + } + } + + return m +} + +// clangos lists the operating systems where we prefer clang to gcc. +var clangos = []string{ + "darwin", "ios", // macOS 10.9 and later require clang + "freebsd", // FreeBSD 10 and later do not ship gcc + "openbsd", // OpenBSD ships with GCC 4.2, which is now quite old. +} + +// compilerEnvLookup returns the compiler settings for goos/goarch in map m. +// kind is "CC" or "CXX". +func compilerEnvLookup(kind string, m map[string]string, goos, goarch string) string { + if !needCC() { + return "" + } + if cc := m[goos+"/"+goarch]; cc != "" { + return cc + } + if cc := m[""]; cc != "" { + return cc + } + for _, os := range clangos { + if goos == os { + if kind == "CXX" { + return "clang++" + } + return "clang" + } + } + if kind == "CXX" { + return "g++" + } + return "gcc" +} + +// rmworkdir deletes the work directory. +func rmworkdir() { + if vflag > 1 { + errprintf("rm -rf %s\n", workdir) + } + xremoveall(workdir) +} + +// Remove trailing spaces. +func chomp(s string) string { + return strings.TrimRight(s, " \t\r\n") +} + +// findgoversion determines the Go version to use in the version string. +// It also parses any other metadata found in the version file. +func findgoversion() string { + // The $GOROOT/VERSION file takes priority, for distributions + // without the source repo. + path := pathf("%s/VERSION", goroot) + if isfile(path) { + b := chomp(readfile(path)) + + // Starting in Go 1.21 the VERSION file starts with the + // version on a line by itself but then can contain other + // metadata about the release, one item per line. + if i := strings.Index(b, "\n"); i >= 0 { + rest := b[i+1:] + b = chomp(b[:i]) + for _, line := range strings.Split(rest, "\n") { + f := strings.Fields(line) + if len(f) == 0 { + continue + } + switch f[0] { + default: + fatalf("VERSION: unexpected line: %s", line) + case "time": + if len(f) != 2 { + fatalf("VERSION: unexpected time line: %s", line) + } + _, err := time.Parse(time.RFC3339, f[1]) + if err != nil { + fatalf("VERSION: bad time: %s", err) + } + } + } + } + + // Commands such as "dist version > VERSION" will cause + // the shell to create an empty VERSION file and set dist's + // stdout to its fd. dist in turn looks at VERSION and uses + // its content if available, which is empty at this point. + // Only use the VERSION file if it is non-empty. + if b != "" { + return b + } + } + + // The $GOROOT/VERSION.cache file is a cache to avoid invoking + // git every time we run this command. Unlike VERSION, it gets + // deleted by the clean command. + path = pathf("%s/VERSION.cache", goroot) + if isfile(path) { + return chomp(readfile(path)) + } + + // Show a nicer error message if this isn't a Git repo. + if !isGitRepo() { + fatalf("FAILED: not a Git repo; must put a VERSION file in $GOROOT") + } + + // Otherwise, use Git. + // + // Include 1.x base version, hash, and date in the version. + // + // Note that we lightly parse internal/goversion/goversion.go to + // obtain the base version. We can't just import the package, + // because cmd/dist is built with a bootstrap GOROOT which could + // be an entirely different version of Go. We assume + // that the file contains "const Version = ". + goversionSource := readfile(pathf("%s/src/internal/goversion/goversion.go", goroot)) + m := regexp.MustCompile(`(?m)^const Version = (\d+)`).FindStringSubmatch(goversionSource) + if m == nil { + fatalf("internal/goversion/goversion.go does not contain 'const Version = ...'") + } + version := fmt.Sprintf("devel go1.%s-", m[1]) + version += chomp(run(goroot, CheckExit, "git", "log", "-n", "1", "--format=format:%h %cd", "HEAD")) + + // Cache version. + writefile(version, path, 0) + + return version +} + +// isGitRepo reports whether the working directory is inside a Git repository. +func isGitRepo() bool { + // NB: simply checking the exit code of `git rev-parse --git-dir` would + // suffice here, but that requires deviating from the infrastructure + // provided by `run`. + gitDir := chomp(run(goroot, 0, "git", "rev-parse", "--git-dir")) + if !filepath.IsAbs(gitDir) { + gitDir = filepath.Join(goroot, gitDir) + } + return isdir(gitDir) +} + +/* + * Initial tree setup. + */ + +// The old tools that no longer live in $GOBIN or $GOROOT/bin. +var oldtool = []string{ + "5a", "5c", "5g", "5l", + "6a", "6c", "6g", "6l", + "8a", "8c", "8g", "8l", + "9a", "9c", "9g", "9l", + "6cov", + "6nm", + "6prof", + "cgo", + "ebnflint", + "goapi", + "gofix", + "goinstall", + "gomake", + "gopack", + "gopprof", + "gotest", + "gotype", + "govet", + "goyacc", + "quietgcc", +} + +// Unreleased directories (relative to $GOROOT) that should +// not be in release branches. +var unreleased = []string{ + "src/cmd/newlink", + "src/cmd/objwriter", + "src/debug/goobj", + "src/old", +} + +// setup sets up the tree for the initial build. +func setup() { + // Create bin directory. + if p := pathf("%s/bin", goroot); !isdir(p) { + xmkdir(p) + } + + // Create package directory. + if p := pathf("%s/pkg", goroot); !isdir(p) { + xmkdir(p) + } + + goosGoarch := pathf("%s/pkg/%s_%s", goroot, gohostos, gohostarch) + if rebuildall { + xremoveall(goosGoarch) + } + xmkdirall(goosGoarch) + xatexit(func() { + if files := xreaddir(goosGoarch); len(files) == 0 { + xremove(goosGoarch) + } + }) + + if goos != gohostos || goarch != gohostarch { + p := pathf("%s/pkg/%s_%s", goroot, goos, goarch) + if rebuildall { + xremoveall(p) + } + xmkdirall(p) + } + + // Create object directory. + // We used to use it for C objects. + // Now we use it for the build cache, to separate dist's cache + // from any other cache the user might have, and for the location + // to build the bootstrap versions of the standard library. + obj := pathf("%s/pkg/obj", goroot) + if !isdir(obj) { + xmkdir(obj) + } + xatexit(func() { xremove(obj) }) + + // Create build cache directory. + objGobuild := pathf("%s/pkg/obj/go-build", goroot) + if rebuildall { + xremoveall(objGobuild) + } + xmkdirall(objGobuild) + xatexit(func() { xremoveall(objGobuild) }) + + // Create directory for bootstrap versions of standard library .a files. + objGoBootstrap := pathf("%s/pkg/obj/go-bootstrap", goroot) + if rebuildall { + xremoveall(objGoBootstrap) + } + xmkdirall(objGoBootstrap) + xatexit(func() { xremoveall(objGoBootstrap) }) + + // Create tool directory. + // We keep it in pkg/, just like the object directory above. + if rebuildall { + xremoveall(tooldir) + } + xmkdirall(tooldir) + + // Remove tool binaries from before the tool/gohostos_gohostarch + xremoveall(pathf("%s/bin/tool", goroot)) + + // Remove old pre-tool binaries. + for _, old := range oldtool { + xremove(pathf("%s/bin/%s", goroot, old)) + } + + // Special release-specific setup. + if isRelease { + // Make sure release-excluded things are excluded. + for _, dir := range unreleased { + if p := pathf("%s/%s", goroot, dir); isdir(p) { + fatalf("%s should not exist in release build", p) + } + } + } +} + +/* + * Tool building + */ + +// mustLinkExternal is a copy of internal/platform.MustLinkExternal, +// duplicated here to avoid version skew in the MustLinkExternal function +// during bootstrapping. +func mustLinkExternal(goos, goarch string, cgoEnabled bool) bool { + if cgoEnabled { + switch goarch { + case "loong64", "mips", "mipsle", "mips64", "mips64le": + // Internally linking cgo is incomplete on some architectures. + // https://golang.org/issue/14449 + return true + case "arm64": + if goos == "windows" { + // windows/arm64 internal linking is not implemented. + return true + } + case "ppc64": + // Big Endian PPC64 cgo internal linking is not implemented for aix or linux. + if goos == "aix" || goos == "linux" { + return true + } + } + + switch goos { + case "android": + return true + case "dragonfly": + // It seems that on Dragonfly thread local storage is + // set up by the dynamic linker, so internal cgo linking + // doesn't work. Test case is "go test runtime/cgo". + return true + } + } + + switch goos { + case "android": + if goarch != "arm64" { + return true + } + case "ios": + if goarch == "arm64" { + return true + } + } + return false +} + +// depsuffix records the allowed suffixes for source files. +var depsuffix = []string{ + ".s", + ".go", +} + +// gentab records how to generate some trivial files. +// Files listed here should also be listed in ../distpack/pack.go's srcArch.Remove list. +var gentab = []struct { + pkg string // Relative to $GOROOT/src + file string + gen func(dir, file string) +}{ + {"go/build", "zcgo.go", mkzcgo}, + {"cmd/go/internal/cfg", "zdefaultcc.go", mkzdefaultcc}, + {"runtime/internal/sys", "zversion.go", mkzversion}, + {"time/tzdata", "zzipdata.go", mktzdata}, +} + +// installed maps from a dir name (as given to install) to a chan +// closed when the dir's package is installed. +var installed = make(map[string]chan struct{}) +var installedMu sync.Mutex + +func install(dir string) { + <-startInstall(dir) +} + +func startInstall(dir string) chan struct{} { + installedMu.Lock() + ch := installed[dir] + if ch == nil { + ch = make(chan struct{}) + installed[dir] = ch + go runInstall(dir, ch) + } + installedMu.Unlock() + return ch +} + +// runInstall installs the library, package, or binary associated with pkg, +// which is relative to $GOROOT/src. +func runInstall(pkg string, ch chan struct{}) { + if pkg == "net" || pkg == "os/user" || pkg == "crypto/x509" { + fatalf("go_bootstrap cannot depend on cgo package %s", pkg) + } + + defer close(ch) + + if pkg == "unsafe" { + return + } + + if vflag > 0 { + if goos != gohostos || goarch != gohostarch { + errprintf("%s (%s/%s)\n", pkg, goos, goarch) + } else { + errprintf("%s\n", pkg) + } + } + + workdir := pathf("%s/%s", workdir, pkg) + xmkdirall(workdir) + + var clean []string + defer func() { + for _, name := range clean { + xremove(name) + } + }() + + // dir = full path to pkg. + dir := pathf("%s/src/%s", goroot, pkg) + name := filepath.Base(dir) + + // ispkg predicts whether the package should be linked as a binary, based + // on the name. There should be no "main" packages in vendor, since + // 'go mod vendor' will only copy imported packages there. + ispkg := !strings.HasPrefix(pkg, "cmd/") || strings.Contains(pkg, "/internal/") || strings.Contains(pkg, "/vendor/") + + // Start final link command line. + // Note: code below knows that link.p[targ] is the target. + var ( + link []string + targ int + ispackcmd bool + ) + if ispkg { + // Go library (package). + ispackcmd = true + link = []string{"pack", packagefile(pkg)} + targ = len(link) - 1 + xmkdirall(filepath.Dir(link[targ])) + } else { + // Go command. + elem := name + if elem == "go" { + elem = "go_bootstrap" + } + link = []string{pathf("%s/link", tooldir)} + if goos == "android" { + link = append(link, "-buildmode=pie") + } + if goldflags != "" { + link = append(link, goldflags) + } + link = append(link, "-extld="+compilerEnvLookup("CC", defaultcc, goos, goarch)) + link = append(link, "-L="+pathf("%s/pkg/obj/go-bootstrap/%s_%s", goroot, goos, goarch)) + link = append(link, "-o", pathf("%s/%s%s", tooldir, elem, exe)) + targ = len(link) - 1 + } + ttarg := mtime(link[targ]) + + // Gather files that are sources for this target. + // Everything in that directory, and any target-specific + // additions. + files := xreaddir(dir) + + // Remove files beginning with . or _, + // which are likely to be editor temporary files. + // This is the same heuristic build.ScanDir uses. + // There do exist real C files beginning with _, + // so limit that check to just Go files. + files = filter(files, func(p string) bool { + return !strings.HasPrefix(p, ".") && (!strings.HasPrefix(p, "_") || !strings.HasSuffix(p, ".go")) + }) + + // Add generated files for this package. + for _, gt := range gentab { + if gt.pkg == pkg { + files = append(files, gt.file) + } + } + files = uniq(files) + + // Convert to absolute paths. + for i, p := range files { + if !filepath.IsAbs(p) { + files[i] = pathf("%s/%s", dir, p) + } + } + + // Is the target up-to-date? + var gofiles, sfiles []string + stale := rebuildall + files = filter(files, func(p string) bool { + for _, suf := range depsuffix { + if strings.HasSuffix(p, suf) { + goto ok + } + } + return false + ok: + t := mtime(p) + if !t.IsZero() && !strings.HasSuffix(p, ".a") && !shouldbuild(p, pkg) { + return false + } + if strings.HasSuffix(p, ".go") { + gofiles = append(gofiles, p) + } else if strings.HasSuffix(p, ".s") { + sfiles = append(sfiles, p) + } + if t.After(ttarg) { + stale = true + } + return true + }) + + // If there are no files to compile, we're done. + if len(files) == 0 { + return + } + + if !stale { + return + } + + // For package runtime, copy some files into the work space. + if pkg == "runtime" { + xmkdirall(pathf("%s/pkg/include", goroot)) + // For use by assembly and C files. + copyfile(pathf("%s/pkg/include/textflag.h", goroot), + pathf("%s/src/runtime/textflag.h", goroot), 0) + copyfile(pathf("%s/pkg/include/funcdata.h", goroot), + pathf("%s/src/runtime/funcdata.h", goroot), 0) + copyfile(pathf("%s/pkg/include/asm_ppc64x.h", goroot), + pathf("%s/src/runtime/asm_ppc64x.h", goroot), 0) + copyfile(pathf("%s/pkg/include/asm_amd64.h", goroot), + pathf("%s/src/runtime/asm_amd64.h", goroot), 0) + } + + // Generate any missing files; regenerate existing ones. + for _, gt := range gentab { + if gt.pkg != pkg { + continue + } + p := pathf("%s/%s", dir, gt.file) + if vflag > 1 { + errprintf("generate %s\n", p) + } + gt.gen(dir, p) + // Do not add generated file to clean list. + // In runtime, we want to be able to + // build the package with the go tool, + // and it assumes these generated files already + // exist (it does not know how to build them). + // The 'clean' command can remove + // the generated files. + } + + // Resolve imported packages to actual package paths. + // Make sure they're installed. + importMap := make(map[string]string) + for _, p := range gofiles { + for _, imp := range readimports(p) { + if imp == "C" { + fatalf("%s imports C", p) + } + importMap[imp] = resolveVendor(imp, dir) + } + } + sortedImports := make([]string, 0, len(importMap)) + for imp := range importMap { + sortedImports = append(sortedImports, imp) + } + sort.Strings(sortedImports) + + for _, dep := range importMap { + if dep == "C" { + fatalf("%s imports C", pkg) + } + startInstall(dep) + } + for _, dep := range importMap { + install(dep) + } + + if goos != gohostos || goarch != gohostarch { + // We've generated the right files; the go command can do the build. + if vflag > 1 { + errprintf("skip build for cross-compile %s\n", pkg) + } + return + } + + asmArgs := []string{ + pathf("%s/asm", tooldir), + "-I", workdir, + "-I", pathf("%s/pkg/include", goroot), + "-D", "GOOS_" + goos, + "-D", "GOARCH_" + goarch, + "-D", "GOOS_GOARCH_" + goos + "_" + goarch, + "-p", pkg, + } + if goarch == "mips" || goarch == "mipsle" { + // Define GOMIPS_value from gomips. + asmArgs = append(asmArgs, "-D", "GOMIPS_"+gomips) + } + if goarch == "mips64" || goarch == "mips64le" { + // Define GOMIPS64_value from gomips64. + asmArgs = append(asmArgs, "-D", "GOMIPS64_"+gomips64) + } + if goarch == "ppc64" || goarch == "ppc64le" { + // We treat each powerpc version as a superset of functionality. + switch goppc64 { + case "power10": + asmArgs = append(asmArgs, "-D", "GOPPC64_power10") + fallthrough + case "power9": + asmArgs = append(asmArgs, "-D", "GOPPC64_power9") + fallthrough + default: // This should always be power8. + asmArgs = append(asmArgs, "-D", "GOPPC64_power8") + } + } + goasmh := pathf("%s/go_asm.h", workdir) + + // Collect symabis from assembly code. + var symabis string + if len(sfiles) > 0 { + symabis = pathf("%s/symabis", workdir) + var wg sync.WaitGroup + asmabis := append(asmArgs[:len(asmArgs):len(asmArgs)], "-gensymabis", "-o", symabis) + asmabis = append(asmabis, sfiles...) + if err := os.WriteFile(goasmh, nil, 0666); err != nil { + fatalf("cannot write empty go_asm.h: %s", err) + } + bgrun(&wg, dir, asmabis...) + bgwait(&wg) + } + + // Build an importcfg file for the compiler. + buf := &bytes.Buffer{} + for _, imp := range sortedImports { + if imp == "unsafe" { + continue + } + dep := importMap[imp] + if imp != dep { + fmt.Fprintf(buf, "importmap %s=%s\n", imp, dep) + } + fmt.Fprintf(buf, "packagefile %s=%s\n", dep, packagefile(dep)) + } + importcfg := pathf("%s/importcfg", workdir) + if err := os.WriteFile(importcfg, buf.Bytes(), 0666); err != nil { + fatalf("cannot write importcfg file: %v", err) + } + + var archive string + // The next loop will compile individual non-Go files. + // Hand the Go files to the compiler en masse. + // For packages containing assembly, this writes go_asm.h, which + // the assembly files will need. + pkgName := pkg + if strings.HasPrefix(pkg, "cmd/") && strings.Count(pkg, "/") == 1 { + pkgName = "main" + } + b := pathf("%s/_go_.a", workdir) + clean = append(clean, b) + if !ispackcmd { + link = append(link, b) + } else { + archive = b + } + + // Compile Go code. + compile := []string{pathf("%s/compile", tooldir), "-std", "-pack", "-o", b, "-p", pkgName, "-importcfg", importcfg} + if gogcflags != "" { + compile = append(compile, strings.Fields(gogcflags)...) + } + if len(sfiles) > 0 { + compile = append(compile, "-asmhdr", goasmh) + } + if symabis != "" { + compile = append(compile, "-symabis", symabis) + } + if goos == "android" { + compile = append(compile, "-shared") + } + + compile = append(compile, gofiles...) + var wg sync.WaitGroup + // We use bgrun and immediately wait for it instead of calling run() synchronously. + // This executes all jobs through the bgwork channel and allows the process + // to exit cleanly in case an error occurs. + bgrun(&wg, dir, compile...) + bgwait(&wg) + + // Compile the files. + for _, p := range sfiles { + // Assembly file for a Go package. + compile := asmArgs[:len(asmArgs):len(asmArgs)] + + doclean := true + b := pathf("%s/%s", workdir, filepath.Base(p)) + + // Change the last character of the output file (which was c or s). + b = b[:len(b)-1] + "o" + compile = append(compile, "-o", b, p) + bgrun(&wg, dir, compile...) + + link = append(link, b) + if doclean { + clean = append(clean, b) + } + } + bgwait(&wg) + + if ispackcmd { + xremove(link[targ]) + dopack(link[targ], archive, link[targ+1:]) + return + } + + // Remove target before writing it. + xremove(link[targ]) + bgrun(&wg, "", link...) + bgwait(&wg) +} + +// packagefile returns the path to a compiled .a file for the given package +// path. Paths may need to be resolved with resolveVendor first. +func packagefile(pkg string) string { + return pathf("%s/pkg/obj/go-bootstrap/%s_%s/%s.a", goroot, goos, goarch, pkg) +} + +// unixOS is the set of GOOS values matched by the "unix" build tag. +// This is the same list as in go/build/syslist.go and +// cmd/go/internal/imports/build.go. +var unixOS = map[string]bool{ + "aix": true, + "android": true, + "darwin": true, + "dragonfly": true, + "freebsd": true, + "hurd": true, + "illumos": true, + "ios": true, + "linux": true, + "netbsd": true, + "openbsd": true, + "solaris": true, +} + +// matchtag reports whether the tag matches this build. +func matchtag(tag string) bool { + switch tag { + case "gc", "cmd_go_bootstrap", "go1.1": + return true + case "linux": + return goos == "linux" || goos == "android" + case "solaris": + return goos == "solaris" || goos == "illumos" + case "darwin": + return goos == "darwin" || goos == "ios" + case goos, goarch: + return true + case "unix": + return unixOS[goos] + default: + return false + } +} + +// shouldbuild reports whether we should build this file. +// It applies the same rules that are used with context tags +// in package go/build, except it's less picky about the order +// of GOOS and GOARCH. +// We also allow the special tag cmd_go_bootstrap. +// See ../go/bootstrap.go and package go/build. +func shouldbuild(file, pkg string) bool { + // Check file name for GOOS or GOARCH. + name := filepath.Base(file) + excluded := func(list []string, ok string) bool { + for _, x := range list { + if x == ok || (ok == "android" && x == "linux") || (ok == "illumos" && x == "solaris") || (ok == "ios" && x == "darwin") { + continue + } + i := strings.Index(name, x) + if i <= 0 || name[i-1] != '_' { + continue + } + i += len(x) + if i == len(name) || name[i] == '.' || name[i] == '_' { + return true + } + } + return false + } + if excluded(okgoos, goos) || excluded(okgoarch, goarch) { + return false + } + + // Omit test files. + if strings.Contains(name, "_test") { + return false + } + + // Check file contents for //go:build lines. + for _, p := range strings.Split(readfile(file), "\n") { + p = strings.TrimSpace(p) + if p == "" { + continue + } + code := p + i := strings.Index(code, "//") + if i > 0 { + code = strings.TrimSpace(code[:i]) + } + if code == "package documentation" { + return false + } + if code == "package main" && pkg != "cmd/go" && pkg != "cmd/cgo" { + return false + } + if !strings.HasPrefix(p, "//") { + break + } + if strings.HasPrefix(p, "//go:build ") { + matched, err := matchexpr(p[len("//go:build "):]) + if err != nil { + errprintf("%s: %v", file, err) + } + return matched + } + } + + return true +} + +// copyfile copies the file src to dst, via memory (so only good for small files). +func copyfile(dst, src string, flag int) { + if vflag > 1 { + errprintf("cp %s %s\n", src, dst) + } + writefile(readfile(src), dst, flag) +} + +// dopack copies the package src to dst, +// appending the files listed in extra. +// The archive format is the traditional Unix ar format. +func dopack(dst, src string, extra []string) { + bdst := bytes.NewBufferString(readfile(src)) + for _, file := range extra { + b := readfile(file) + // find last path element for archive member name + i := strings.LastIndex(file, "/") + 1 + j := strings.LastIndex(file, `\`) + 1 + if i < j { + i = j + } + fmt.Fprintf(bdst, "%-16.16s%-12d%-6d%-6d%-8o%-10d`\n", file[i:], 0, 0, 0, 0644, len(b)) + bdst.WriteString(b) + if len(b)&1 != 0 { + bdst.WriteByte(0) + } + } + writefile(bdst.String(), dst, 0) +} + +func clean() { + generated := []byte(generatedHeader) + + // Remove generated source files. + filepath.WalkDir(pathf("%s/src", goroot), func(path string, d fs.DirEntry, err error) error { + switch { + case err != nil: + // ignore + case d.IsDir() && (d.Name() == "vendor" || d.Name() == "testdata"): + return filepath.SkipDir + case d.IsDir() && d.Name() != "dist": + // Remove generated binary named for directory, but not dist out from under us. + exe := filepath.Join(path, d.Name()) + if info, err := os.Stat(exe); err == nil && !info.IsDir() { + xremove(exe) + } + xremove(exe + ".exe") + case !d.IsDir() && strings.HasPrefix(d.Name(), "z"): + // Remove generated file, identified by marker string. + head := make([]byte, 512) + if f, err := os.Open(path); err == nil { + io.ReadFull(f, head) + f.Close() + } + if bytes.HasPrefix(head, generated) { + xremove(path) + } + } + return nil + }) + + if rebuildall { + // Remove object tree. + xremoveall(pathf("%s/pkg/obj/%s_%s", goroot, gohostos, gohostarch)) + + // Remove installed packages and tools. + xremoveall(pathf("%s/pkg/%s_%s", goroot, gohostos, gohostarch)) + xremoveall(pathf("%s/pkg/%s_%s", goroot, goos, goarch)) + xremoveall(pathf("%s/pkg/%s_%s_race", goroot, gohostos, gohostarch)) + xremoveall(pathf("%s/pkg/%s_%s_race", goroot, goos, goarch)) + xremoveall(tooldir) + + // Remove cached version info. + xremove(pathf("%s/VERSION.cache", goroot)) + + // Remove distribution packages. + xremoveall(pathf("%s/pkg/distpack", goroot)) + } +} + +/* + * command implementations + */ + +// The env command prints the default environment. +func cmdenv() { + path := flag.Bool("p", false, "emit updated PATH") + plan9 := flag.Bool("9", gohostos == "plan9", "emit plan 9 syntax") + windows := flag.Bool("w", gohostos == "windows", "emit windows syntax") + xflagparse(0) + + format := "%s=\"%s\";\n" // Include ; to separate variables when 'dist env' output is used with eval. + switch { + case *plan9: + format = "%s='%s'\n" + case *windows: + format = "set %s=%s\r\n" + } + + xprintf(format, "GO111MODULE", "") + xprintf(format, "GOARCH", goarch) + xprintf(format, "GOBIN", gorootBin) + xprintf(format, "GODEBUG", os.Getenv("GODEBUG")) + xprintf(format, "GOENV", "off") + xprintf(format, "GOFLAGS", "") + xprintf(format, "GOHOSTARCH", gohostarch) + xprintf(format, "GOHOSTOS", gohostos) + xprintf(format, "GOOS", goos) + xprintf(format, "GOPROXY", os.Getenv("GOPROXY")) + xprintf(format, "GOROOT", goroot) + xprintf(format, "GOTMPDIR", os.Getenv("GOTMPDIR")) + xprintf(format, "GOTOOLDIR", tooldir) + if goarch == "arm" { + xprintf(format, "GOARM", goarm) + } + if goarch == "386" { + xprintf(format, "GO386", go386) + } + if goarch == "amd64" { + xprintf(format, "GOAMD64", goamd64) + } + if goarch == "mips" || goarch == "mipsle" { + xprintf(format, "GOMIPS", gomips) + } + if goarch == "mips64" || goarch == "mips64le" { + xprintf(format, "GOMIPS64", gomips64) + } + if goarch == "ppc64" || goarch == "ppc64le" { + xprintf(format, "GOPPC64", goppc64) + } + xprintf(format, "GOWORK", "off") + + if *path { + sep := ":" + if gohostos == "windows" { + sep = ";" + } + xprintf(format, "PATH", fmt.Sprintf("%s%s%s", gorootBin, sep, os.Getenv("PATH"))) + + // Also include $DIST_UNMODIFIED_PATH with the original $PATH + // for the internal needs of "dist banner", along with export + // so that it reaches the dist process. See its comment below. + var exportFormat string + if !*windows && !*plan9 { + exportFormat = "export " + format + } else { + exportFormat = format + } + xprintf(exportFormat, "DIST_UNMODIFIED_PATH", os.Getenv("PATH")) + } +} + +var ( + timeLogEnabled = os.Getenv("GOBUILDTIMELOGFILE") != "" + timeLogMu sync.Mutex + timeLogFile *os.File + timeLogStart time.Time +) + +func timelog(op, name string) { + if !timeLogEnabled { + return + } + timeLogMu.Lock() + defer timeLogMu.Unlock() + if timeLogFile == nil { + f, err := os.OpenFile(os.Getenv("GOBUILDTIMELOGFILE"), os.O_RDWR|os.O_APPEND, 0666) + if err != nil { + log.Fatal(err) + } + buf := make([]byte, 100) + n, _ := f.Read(buf) + s := string(buf[:n]) + if i := strings.Index(s, "\n"); i >= 0 { + s = s[:i] + } + i := strings.Index(s, " start") + if i < 0 { + log.Fatalf("time log %s does not begin with start line", os.Getenv("GOBUILDTIMELOGFILE")) + } + t, err := time.Parse(time.UnixDate, s[:i]) + if err != nil { + log.Fatalf("cannot parse time log line %q: %v", s, err) + } + timeLogStart = t + timeLogFile = f + } + t := time.Now() + fmt.Fprintf(timeLogFile, "%s %+.1fs %s %s\n", t.Format(time.UnixDate), t.Sub(timeLogStart).Seconds(), op, name) +} + +// toolenv returns the environment to use when building commands in cmd. +// +// This is a function instead of a variable because the exact toolenv depends +// on the GOOS and GOARCH, and (at least for now) those are modified in place +// to switch between the host and target configurations when cross-compiling. +func toolenv() []string { + var env []string + if !mustLinkExternal(goos, goarch, false) { + // Unless the platform requires external linking, + // we disable cgo to get static binaries for cmd/go and cmd/pprof, + // so that they work on systems without the same dynamic libraries + // as the original build system. + env = append(env, "CGO_ENABLED=0") + } + if isRelease || os.Getenv("GO_BUILDER_NAME") != "" { + // Add -trimpath for reproducible builds of releases. + // Include builders so that -trimpath is well-tested ahead of releases. + // Do not include local development, so that people working in the + // main branch for day-to-day work on the Go toolchain itself can + // still have full paths for stack traces for compiler crashes and the like. + env = append(env, "GOFLAGS=-trimpath -ldflags=-w -gcflags=cmd/...=-dwarf=false") + } + return env +} + +var toolchain = []string{"cmd/asm", "cmd/cgo", "cmd/compile", "cmd/link"} + +// The bootstrap command runs a build from scratch, +// stopping at having installed the go_bootstrap command. +// +// WARNING: This command runs after cmd/dist is built with the Go bootstrap toolchain. +// It rebuilds and installs cmd/dist with the new toolchain, so other +// commands (like "go tool dist test" in run.bash) can rely on bug fixes +// made since the Go bootstrap version, but this function cannot. +func cmdbootstrap() { + timelog("start", "dist bootstrap") + defer timelog("end", "dist bootstrap") + + var debug, distpack, force, noBanner, noClean bool + flag.BoolVar(&rebuildall, "a", rebuildall, "rebuild all") + flag.BoolVar(&debug, "d", debug, "enable debugging of bootstrap process") + flag.BoolVar(&distpack, "distpack", distpack, "write distribution files to pkg/distpack") + flag.BoolVar(&force, "force", force, "build even if the port is marked as broken") + flag.BoolVar(&noBanner, "no-banner", noBanner, "do not print banner") + flag.BoolVar(&noClean, "no-clean", noClean, "print deprecation warning") + + xflagparse(0) + + if noClean { + xprintf("warning: --no-clean is deprecated and has no effect; use 'go install std cmd' instead\n") + } + + // Don't build broken ports by default. + if broken[goos+"/"+goarch] && !force { + fatalf("build stopped because the port %s/%s is marked as broken\n\n"+ + "Use the -force flag to build anyway.\n", goos, goarch) + } + + // Set GOPATH to an internal directory. We shouldn't actually + // need to store files here, since the toolchain won't + // depend on modules outside of vendor directories, but if + // GOPATH points somewhere else (e.g., to GOROOT), the + // go tool may complain. + os.Setenv("GOPATH", pathf("%s/pkg/obj/gopath", goroot)) + + // Use a build cache separate from the default user one. + // Also one that will be wiped out during startup, so that + // make.bash really does start from a clean slate. + oldgocache = os.Getenv("GOCACHE") + os.Setenv("GOCACHE", pathf("%s/pkg/obj/go-build", goroot)) + + // Disable GOEXPERIMENT when building toolchain1 and + // go_bootstrap. We don't need any experiments for the + // bootstrap toolchain, and this lets us avoid duplicating the + // GOEXPERIMENT-related build logic from cmd/go here. If the + // bootstrap toolchain is < Go 1.17, it will ignore this + // anyway since GOEXPERIMENT is baked in; otherwise it will + // pick it up from the environment we set here. Once we're + // using toolchain1 with dist as the build system, we need to + // override this to keep the experiments assumed by the + // toolchain and by dist consistent. Once go_bootstrap takes + // over the build process, we'll set this back to the original + // GOEXPERIMENT. + os.Setenv("GOEXPERIMENT", "none") + + if debug { + // cmd/buildid is used in debug mode. + toolchain = append(toolchain, "cmd/buildid") + } + + if isdir(pathf("%s/src/pkg", goroot)) { + fatalf("\n\n"+ + "The Go package sources have moved to $GOROOT/src.\n"+ + "*** %s still exists. ***\n"+ + "It probably contains stale files that may confuse the build.\n"+ + "Please (check what's there and) remove it and try again.\n"+ + "See https://golang.org/s/go14nopkg\n", + pathf("%s/src/pkg", goroot)) + } + + if rebuildall { + clean() + } + + setup() + + timelog("build", "toolchain1") + checkCC() + bootstrapBuildTools() + + // Remember old content of $GOROOT/bin for comparison below. + oldBinFiles, err := filepath.Glob(pathf("%s/bin/*", goroot)) + if err != nil { + fatalf("glob: %v", err) + } + + // For the main bootstrap, building for host os/arch. + oldgoos = goos + oldgoarch = goarch + goos = gohostos + goarch = gohostarch + os.Setenv("GOHOSTARCH", gohostarch) + os.Setenv("GOHOSTOS", gohostos) + os.Setenv("GOARCH", goarch) + os.Setenv("GOOS", goos) + + timelog("build", "go_bootstrap") + xprintf("Building Go bootstrap cmd/go (go_bootstrap) using Go toolchain1.\n") + install("runtime") // dependency not visible in sources; also sets up textflag.h + install("time/tzdata") // no dependency in sources; creates generated file + install("cmd/go") + if vflag > 0 { + xprintf("\n") + } + + gogcflags = os.Getenv("GO_GCFLAGS") // we were using $BOOT_GO_GCFLAGS until now + setNoOpt() + goldflags = os.Getenv("GO_LDFLAGS") // we were using $BOOT_GO_LDFLAGS until now + goBootstrap := pathf("%s/go_bootstrap", tooldir) + if debug { + run("", ShowOutput|CheckExit, pathf("%s/compile", tooldir), "-V=full") + copyfile(pathf("%s/compile1", tooldir), pathf("%s/compile", tooldir), writeExec) + } + + // To recap, so far we have built the new toolchain + // (cmd/asm, cmd/cgo, cmd/compile, cmd/link) + // using the Go bootstrap toolchain and go command. + // Then we built the new go command (as go_bootstrap) + // using the new toolchain and our own build logic (above). + // + // toolchain1 = mk(new toolchain, go1.17 toolchain, go1.17 cmd/go) + // go_bootstrap = mk(new cmd/go, toolchain1, cmd/dist) + // + // The toolchain1 we built earlier is built from the new sources, + // but because it was built using cmd/go it has no build IDs. + // The eventually installed toolchain needs build IDs, so we need + // to do another round: + // + // toolchain2 = mk(new toolchain, toolchain1, go_bootstrap) + // + timelog("build", "toolchain2") + if vflag > 0 { + xprintf("\n") + } + xprintf("Building Go toolchain2 using go_bootstrap and Go toolchain1.\n") + os.Setenv("CC", compilerEnvLookup("CC", defaultcc, goos, goarch)) + // Now that cmd/go is in charge of the build process, enable GOEXPERIMENT. + os.Setenv("GOEXPERIMENT", goexperiment) + // No need to enable PGO for toolchain2. + goInstall(toolenv(), goBootstrap, append([]string{"-pgo=off"}, toolchain...)...) + if debug { + run("", ShowOutput|CheckExit, pathf("%s/compile", tooldir), "-V=full") + copyfile(pathf("%s/compile2", tooldir), pathf("%s/compile", tooldir), writeExec) + } + + // Toolchain2 should be semantically equivalent to toolchain1, + // but it was built using the newly built compiler instead of the Go bootstrap compiler, + // so it should at the least run faster. Also, toolchain1 had no build IDs + // in the binaries, while toolchain2 does. In non-release builds, the + // toolchain's build IDs feed into constructing the build IDs of built targets, + // so in non-release builds, everything now looks out-of-date due to + // toolchain2 having build IDs - that is, due to the go command seeing + // that there are new compilers. In release builds, the toolchain's reported + // version is used in place of the build ID, and the go command does not + // see that change from toolchain1 to toolchain2, so in release builds, + // nothing looks out of date. + // To keep the behavior the same in both non-release and release builds, + // we force-install everything here. + // + // toolchain3 = mk(new toolchain, toolchain2, go_bootstrap) + // + timelog("build", "toolchain3") + if vflag > 0 { + xprintf("\n") + } + xprintf("Building Go toolchain3 using go_bootstrap and Go toolchain2.\n") + goInstall(toolenv(), goBootstrap, append([]string{"-a"}, toolchain...)...) + if debug { + run("", ShowOutput|CheckExit, pathf("%s/compile", tooldir), "-V=full") + copyfile(pathf("%s/compile3", tooldir), pathf("%s/compile", tooldir), writeExec) + } + + // Now that toolchain3 has been built from scratch, its compiler and linker + // should have accurate build IDs suitable for caching. + // Now prime the build cache with the rest of the standard library for + // testing, and so that the user can run 'go install std cmd' to quickly + // iterate on local changes without waiting for a full rebuild. + if _, err := os.Stat(pathf("%s/VERSION", goroot)); err == nil { + // If we have a VERSION file, then we use the Go version + // instead of build IDs as a cache key, and there is no guarantee + // that code hasn't changed since the last time we ran a build + // with this exact VERSION file (especially if someone is working + // on a release branch). We must not fall back to the shared build cache + // in this case. Leave $GOCACHE alone. + } else { + os.Setenv("GOCACHE", oldgocache) + } + + if goos == oldgoos && goarch == oldgoarch { + // Common case - not setting up for cross-compilation. + timelog("build", "toolchain") + if vflag > 0 { + xprintf("\n") + } + xprintf("Building packages and commands for %s/%s.\n", goos, goarch) + } else { + // GOOS/GOARCH does not match GOHOSTOS/GOHOSTARCH. + // Finish GOHOSTOS/GOHOSTARCH installation and then + // run GOOS/GOARCH installation. + timelog("build", "host toolchain") + if vflag > 0 { + xprintf("\n") + } + xprintf("Building commands for host, %s/%s.\n", goos, goarch) + goInstall(toolenv(), goBootstrap, "cmd") + checkNotStale(toolenv(), goBootstrap, "cmd") + checkNotStale(toolenv(), gorootBinGo, "cmd") + + timelog("build", "target toolchain") + if vflag > 0 { + xprintf("\n") + } + goos = oldgoos + goarch = oldgoarch + os.Setenv("GOOS", goos) + os.Setenv("GOARCH", goarch) + os.Setenv("CC", compilerEnvLookup("CC", defaultcc, goos, goarch)) + xprintf("Building packages and commands for target, %s/%s.\n", goos, goarch) + } + goInstall(nil, goBootstrap, "std") + goInstall(toolenv(), goBootstrap, "cmd") + checkNotStale(toolenv(), goBootstrap, toolchain...) + checkNotStale(nil, goBootstrap, "std") + checkNotStale(toolenv(), goBootstrap, "cmd") + checkNotStale(nil, gorootBinGo, "std") + checkNotStale(toolenv(), gorootBinGo, "cmd") + if debug { + run("", ShowOutput|CheckExit, pathf("%s/compile", tooldir), "-V=full") + checkNotStale(toolenv(), goBootstrap, toolchain...) + copyfile(pathf("%s/compile4", tooldir), pathf("%s/compile", tooldir), writeExec) + } + + // Check that there are no new files in $GOROOT/bin other than + // go and gofmt and $GOOS_$GOARCH (target bin when cross-compiling). + binFiles, err := filepath.Glob(pathf("%s/bin/*", goroot)) + if err != nil { + fatalf("glob: %v", err) + } + + ok := map[string]bool{} + for _, f := range oldBinFiles { + ok[f] = true + } + for _, f := range binFiles { + if gohostos == "darwin" && filepath.Base(f) == ".DS_Store" { + continue // unfortunate but not unexpected + } + elem := strings.TrimSuffix(filepath.Base(f), ".exe") + if !ok[f] && elem != "go" && elem != "gofmt" && elem != goos+"_"+goarch { + fatalf("unexpected new file in $GOROOT/bin: %s", elem) + } + } + + // Remove go_bootstrap now that we're done. + xremove(pathf("%s/go_bootstrap"+exe, tooldir)) + + if goos == "android" { + // Make sure the exec wrapper will sync a fresh $GOROOT to the device. + xremove(pathf("%s/go_android_exec-adb-sync-status", os.TempDir())) + } + + if wrapperPath := wrapperPathFor(goos, goarch); wrapperPath != "" { + oldcc := os.Getenv("CC") + os.Setenv("GOOS", gohostos) + os.Setenv("GOARCH", gohostarch) + os.Setenv("CC", compilerEnvLookup("CC", defaultcc, gohostos, gohostarch)) + goCmd(nil, gorootBinGo, "build", "-o", pathf("%s/go_%s_%s_exec%s", gorootBin, goos, goarch, exe), wrapperPath) + // Restore environment. + // TODO(elias.naur): support environment variables in goCmd? + os.Setenv("GOOS", goos) + os.Setenv("GOARCH", goarch) + os.Setenv("CC", oldcc) + } + + if distpack { + xprintf("Packaging archives for %s/%s.\n", goos, goarch) + run("", ShowOutput|CheckExit, pathf("%s/distpack", tooldir)) + } + + // Print trailing banner unless instructed otherwise. + if !noBanner { + banner() + } +} + +func wrapperPathFor(goos, goarch string) string { + switch { + case goos == "android": + if gohostos != "android" { + return pathf("%s/misc/go_android_exec/main.go", goroot) + } + case goos == "ios": + if gohostos != "ios" { + return pathf("%s/misc/ios/go_ios_exec.go", goroot) + } + } + return "" +} + +func goInstall(env []string, goBinary string, args ...string) { + goCmd(env, goBinary, "install", args...) +} + +func appendCompilerFlags(args []string) []string { + if gogcflags != "" { + args = append(args, "-gcflags=all="+gogcflags) + } + if goldflags != "" { + args = append(args, "-ldflags=all="+goldflags) + } + return args +} + +func goCmd(env []string, goBinary string, cmd string, args ...string) { + goCmd := []string{goBinary, cmd} + if noOpt { + goCmd = append(goCmd, "-tags=noopt") + } + goCmd = appendCompilerFlags(goCmd) + if vflag > 0 { + goCmd = append(goCmd, "-v") + } + + // Force only one process at a time on vx32 emulation. + if gohostos == "plan9" && os.Getenv("sysname") == "vx32" { + goCmd = append(goCmd, "-p=1") + } + + runEnv(workdir, ShowOutput|CheckExit, env, append(goCmd, args...)...) +} + +func checkNotStale(env []string, goBinary string, targets ...string) { + goCmd := []string{goBinary, "list"} + if noOpt { + goCmd = append(goCmd, "-tags=noopt") + } + goCmd = appendCompilerFlags(goCmd) + goCmd = append(goCmd, "-f={{if .Stale}}\tSTALE {{.ImportPath}}: {{.StaleReason}}{{end}}") + + out := runEnv(workdir, CheckExit, env, append(goCmd, targets...)...) + if strings.Contains(out, "\tSTALE ") { + os.Setenv("GODEBUG", "gocachehash=1") + for _, target := range []string{"runtime/internal/sys", "cmd/dist", "cmd/link"} { + if strings.Contains(out, "STALE "+target) { + run(workdir, ShowOutput|CheckExit, goBinary, "list", "-f={{.ImportPath}} {{.Stale}}", target) + break + } + } + fatalf("unexpected stale targets reported by %s list -gcflags=\"%s\" -ldflags=\"%s\" for %v (consider rerunning with GOMAXPROCS=1 GODEBUG=gocachehash=1):\n%s", goBinary, gogcflags, goldflags, targets, out) + } +} + +// Cannot use go/build directly because cmd/dist for a new release +// builds against an old release's go/build, which may be out of sync. +// To reduce duplication, we generate the list for go/build from this. +// +// We list all supported platforms in this list, so that this is the +// single point of truth for supported platforms. This list is used +// by 'go tool dist list'. +var cgoEnabled = map[string]bool{ + "aix/ppc64": true, + "darwin/amd64": true, + "darwin/arm64": true, + "dragonfly/amd64": true, + "freebsd/386": true, + "freebsd/amd64": true, + "freebsd/arm": true, + "freebsd/arm64": true, + "freebsd/riscv64": true, + "illumos/amd64": true, + "linux/386": true, + "linux/amd64": true, + "linux/arm": true, + "linux/arm64": true, + "linux/loong64": true, + "linux/ppc64": false, + "linux/ppc64le": true, + "linux/mips": true, + "linux/mipsle": true, + "linux/mips64": true, + "linux/mips64le": true, + "linux/riscv64": true, + "linux/s390x": true, + "linux/sparc64": true, + "android/386": true, + "android/amd64": true, + "android/arm": true, + "android/arm64": true, + "ios/arm64": true, + "ios/amd64": true, + "js/wasm": false, + "wasip1/wasm": false, + "netbsd/386": true, + "netbsd/amd64": true, + "netbsd/arm": true, + "netbsd/arm64": true, + "openbsd/386": true, + "openbsd/amd64": true, + "openbsd/arm": true, + "openbsd/arm64": true, + "openbsd/mips64": true, + "openbsd/ppc64": false, + "openbsd/riscv64": false, + "plan9/386": false, + "plan9/amd64": false, + "plan9/arm": false, + "solaris/amd64": true, + "windows/386": true, + "windows/amd64": true, + "windows/arm": false, + "windows/arm64": true, +} + +// List of platforms that are marked as broken ports. +// These require -force flag to build, and also +// get filtered out of cgoEnabled for 'dist list'. +// See go.dev/issue/56679. +var broken = map[string]bool{ + "linux/sparc64": true, // An incomplete port. See CL 132155. + "openbsd/mips64": true, // Broken: go.dev/issue/58110. + "openbsd/riscv64": true, // An incomplete port: go.dev/issue/55999. +} + +// List of platforms which are first class ports. See go.dev/issue/38874. +var firstClass = map[string]bool{ + "darwin/amd64": true, + "darwin/arm64": true, + "linux/386": true, + "linux/amd64": true, + "linux/arm": true, + "linux/arm64": true, + "windows/386": true, + "windows/amd64": true, +} + +// We only need CC if cgo is forced on, or if the platform requires external linking. +// Otherwise the go command will automatically disable it. +func needCC() bool { + return os.Getenv("CGO_ENABLED") == "1" || mustLinkExternal(gohostos, gohostarch, false) +} + +func checkCC() { + if !needCC() { + return + } + cc1 := defaultcc[""] + if cc1 == "" { + cc1 = "gcc" + for _, os := range clangos { + if gohostos == os { + cc1 = "clang" + break + } + } + } + cc, err := quotedSplit(cc1) + if err != nil { + fatalf("split CC: %v", err) + } + var ccHelp = append(cc, "--help") + + if output, err := exec.Command(ccHelp[0], ccHelp[1:]...).CombinedOutput(); err != nil { + outputHdr := "" + if len(output) > 0 { + outputHdr = "\nCommand output:\n\n" + } + fatalf("cannot invoke C compiler %q: %v\n\n"+ + "Go needs a system C compiler for use with cgo.\n"+ + "To set a C compiler, set CC=the-compiler.\n"+ + "To disable cgo, set CGO_ENABLED=0.\n%s%s", cc, err, outputHdr, output) + } +} + +func defaulttarg() string { + // xgetwd might return a path with symlinks fully resolved, and if + // there happens to be symlinks in goroot, then the hasprefix test + // will never succeed. Instead, we use xrealwd to get a canonical + // goroot/src before the comparison to avoid this problem. + pwd := xgetwd() + src := pathf("%s/src/", goroot) + real_src := xrealwd(src) + if !strings.HasPrefix(pwd, real_src) { + fatalf("current directory %s is not under %s", pwd, real_src) + } + pwd = pwd[len(real_src):] + // guard against xrealwd returning the directory without the trailing / + pwd = strings.TrimPrefix(pwd, "/") + + return pwd +} + +// Install installs the list of packages named on the command line. +func cmdinstall() { + xflagparse(-1) + + if flag.NArg() == 0 { + install(defaulttarg()) + } + + for _, arg := range flag.Args() { + install(arg) + } +} + +// Clean deletes temporary objects. +func cmdclean() { + xflagparse(0) + clean() +} + +// Banner prints the 'now you've installed Go' banner. +func cmdbanner() { + xflagparse(0) + banner() +} + +func banner() { + if vflag > 0 { + xprintf("\n") + } + xprintf("---\n") + xprintf("Installed Go for %s/%s in %s\n", goos, goarch, goroot) + xprintf("Installed commands in %s\n", gorootBin) + + if !xsamefile(goroot_final, goroot) { + // If the files are to be moved, don't check that gobin + // is on PATH; assume they know what they are doing. + } else if gohostos == "plan9" { + // Check that GOROOT/bin is bound before /bin. + pid := strings.Replace(readfile("#c/pid"), " ", "", -1) + ns := fmt.Sprintf("/proc/%s/ns", pid) + if !strings.Contains(readfile(ns), fmt.Sprintf("bind -b %s /bin", gorootBin)) { + xprintf("*** You need to bind %s before /bin.\n", gorootBin) + } + } else { + // Check that GOROOT/bin appears in $PATH. + pathsep := ":" + if gohostos == "windows" { + pathsep = ";" + } + path := os.Getenv("PATH") + if p, ok := os.LookupEnv("DIST_UNMODIFIED_PATH"); ok { + // Scripts that modify $PATH and then run dist should also provide + // dist with an unmodified copy of $PATH via $DIST_UNMODIFIED_PATH. + // Use it here when determining if the user still needs to update + // their $PATH. See go.dev/issue/42563. + path = p + } + if !strings.Contains(pathsep+path+pathsep, pathsep+gorootBin+pathsep) { + xprintf("*** You need to add %s to your PATH.\n", gorootBin) + } + } + + if !xsamefile(goroot_final, goroot) { + xprintf("\n"+ + "The binaries expect %s to be copied or moved to %s\n", + goroot, goroot_final) + } +} + +// Version prints the Go version. +func cmdversion() { + xflagparse(0) + xprintf("%s\n", findgoversion()) +} + +// cmdlist lists all supported platforms. +func cmdlist() { + jsonFlag := flag.Bool("json", false, "produce JSON output") + brokenFlag := flag.Bool("broken", false, "include broken ports") + xflagparse(0) + + var plats []string + for p := range cgoEnabled { + if broken[p] && !*brokenFlag { + continue + } + plats = append(plats, p) + } + sort.Strings(plats) + + if !*jsonFlag { + for _, p := range plats { + xprintf("%s\n", p) + } + return + } + + type jsonResult struct { + GOOS string + GOARCH string + CgoSupported bool + FirstClass bool + Broken bool `json:",omitempty"` + } + var results []jsonResult + for _, p := range plats { + fields := strings.Split(p, "/") + results = append(results, jsonResult{ + GOOS: fields[0], + GOARCH: fields[1], + CgoSupported: cgoEnabled[p], + FirstClass: firstClass[p], + Broken: broken[p], + }) + } + out, err := json.MarshalIndent(results, "", "\t") + if err != nil { + fatalf("json marshal error: %v", err) + } + if _, err := os.Stdout.Write(out); err != nil { + fatalf("write failed: %v", err) + } +} + +func setNoOpt() { + for _, gcflag := range strings.Split(gogcflags, " ") { + if gcflag == "-N" || gcflag == "-l" { + noOpt = true + break + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/dist/build_test.go b/platform/dbops/binaries/go/go/src/cmd/dist/build_test.go new file mode 100644 index 0000000000000000000000000000000000000000..158ac2678d404122ab54f746239c7b7af219fbe8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/dist/build_test.go @@ -0,0 +1,26 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "internal/platform" + "testing" +) + +// TestMustLinkExternal verifies that the mustLinkExternal helper +// function matches internal/platform.MustLinkExternal. +func TestMustLinkExternal(t *testing.T) { + for _, goos := range okgoos { + for _, goarch := range okgoarch { + for _, cgoEnabled := range []bool{true, false} { + got := mustLinkExternal(goos, goarch, cgoEnabled) + want := platform.MustLinkExternal(goos, goarch, cgoEnabled) + if got != want { + t.Errorf("mustLinkExternal(%q, %q, %v) = %v; want %v", goos, goarch, cgoEnabled, got, want) + } + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/dist/buildgo.go b/platform/dbops/binaries/go/go/src/cmd/dist/buildgo.go new file mode 100644 index 0000000000000000000000000000000000000000..884e9d729a6a3596c7713a30ebeee413fd2b3245 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/dist/buildgo.go @@ -0,0 +1,162 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" +) + +/* + * Helpers for building cmd/go and cmd/cgo. + */ + +// generatedHeader is the string that all source files generated by dist start with. +// +// DO NOT CHANGE THIS STRING. If this string is changed then during +// +// ./make.bash +// git checkout other-rev +// ./make.bash +// +// the second make.bash will not find the files generated by the first make.bash +// and will not clean up properly. +const generatedHeader = "// Code generated by go tool dist; DO NOT EDIT.\n\n" + +// writeHeader emits the standard "generated by" header for all files generated +// by dist. +func writeHeader(w io.Writer) { + fmt.Fprint(w, generatedHeader) +} + +// mkzdefaultcc writes zdefaultcc.go: +// +// package main +// const defaultCC = +// const defaultCXX = +// const defaultPkgConfig = +// +// It is invoked to write cmd/go/internal/cfg/zdefaultcc.go +// but we also write cmd/cgo/zdefaultcc.go +func mkzdefaultcc(dir, file string) { + if strings.Contains(file, filepath.FromSlash("go/internal/cfg")) { + var buf strings.Builder + writeHeader(&buf) + fmt.Fprintf(&buf, "package cfg\n") + fmt.Fprintln(&buf) + fmt.Fprintf(&buf, "const DefaultPkgConfig = `%s`\n", defaultpkgconfig) + buf.WriteString(defaultCCFunc("DefaultCC", defaultcc)) + buf.WriteString(defaultCCFunc("DefaultCXX", defaultcxx)) + writefile(buf.String(), file, writeSkipSame) + return + } + + var buf strings.Builder + writeHeader(&buf) + fmt.Fprintf(&buf, "package main\n") + fmt.Fprintln(&buf) + fmt.Fprintf(&buf, "const defaultPkgConfig = `%s`\n", defaultpkgconfig) + buf.WriteString(defaultCCFunc("defaultCC", defaultcc)) + buf.WriteString(defaultCCFunc("defaultCXX", defaultcxx)) + writefile(buf.String(), file, writeSkipSame) +} + +func defaultCCFunc(name string, defaultcc map[string]string) string { + var buf strings.Builder + + fmt.Fprintf(&buf, "func %s(goos, goarch string) string {\n", name) + fmt.Fprintf(&buf, "\tswitch goos+`/`+goarch {\n") + var keys []string + for k := range defaultcc { + if k != "" { + keys = append(keys, k) + } + } + sort.Strings(keys) + for _, k := range keys { + fmt.Fprintf(&buf, "\tcase %s:\n\t\treturn %s\n", quote(k), quote(defaultcc[k])) + } + fmt.Fprintf(&buf, "\t}\n") + if cc := defaultcc[""]; cc != "" { + fmt.Fprintf(&buf, "\treturn %s\n", quote(cc)) + } else { + clang, gcc := "clang", "gcc" + if strings.HasSuffix(name, "CXX") { + clang, gcc = "clang++", "g++" + } + fmt.Fprintf(&buf, "\tswitch goos {\n") + fmt.Fprintf(&buf, "\tcase ") + for i, os := range clangos { + if i > 0 { + fmt.Fprintf(&buf, ", ") + } + fmt.Fprintf(&buf, "%s", quote(os)) + } + fmt.Fprintf(&buf, ":\n") + fmt.Fprintf(&buf, "\t\treturn %s\n", quote(clang)) + fmt.Fprintf(&buf, "\t}\n") + fmt.Fprintf(&buf, "\treturn %s\n", quote(gcc)) + } + fmt.Fprintf(&buf, "}\n") + + return buf.String() +} + +// mkzcgo writes zcgo.go for the go/build package: +// +// package build +// const defaultCGO_ENABLED = +// +// It is invoked to write go/build/zcgo.go. +func mkzcgo(dir, file string) { + var buf strings.Builder + writeHeader(&buf) + fmt.Fprintf(&buf, "package build\n") + fmt.Fprintln(&buf) + fmt.Fprintf(&buf, "const defaultCGO_ENABLED = %s\n", quote(os.Getenv("CGO_ENABLED"))) + + writefile(buf.String(), file, writeSkipSame) +} + +// mktzdata src/time/tzdata/zzipdata.go: +// +// package tzdata +// const zipdata = "PK..." +func mktzdata(dir, file string) { + zip := readfile(filepath.Join(dir, "../../../lib/time/zoneinfo.zip")) + + var buf strings.Builder + writeHeader(&buf) + fmt.Fprintf(&buf, "package tzdata\n") + fmt.Fprintln(&buf) + fmt.Fprintf(&buf, "const zipdata = %s\n", quote(zip)) + + writefile(buf.String(), file, writeSkipSame) +} + +// quote is like strconv.Quote but simpler and has output +// that does not depend on the exact Go bootstrap version. +func quote(s string) string { + const hex = "0123456789abcdef" + var out strings.Builder + out.WriteByte('"') + for i := 0; i < len(s); i++ { + c := s[i] + if 0x20 <= c && c <= 0x7E && c != '"' && c != '\\' { + out.WriteByte(c) + } else { + out.WriteByte('\\') + out.WriteByte('x') + out.WriteByte(hex[c>>4]) + out.WriteByte(hex[c&0xf]) + } + } + out.WriteByte('"') + return out.String() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/dist/buildruntime.go b/platform/dbops/binaries/go/go/src/cmd/dist/buildruntime.go new file mode 100644 index 0000000000000000000000000000000000000000..1de78f0fdb2eb10f5384a49081de95d9021c565d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/dist/buildruntime.go @@ -0,0 +1,81 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "strings" +) + +/* + * Helpers for building runtime. + */ + +// mkzversion writes zversion.go: +// +// package sys +// +// (Nothing right now!) +func mkzversion(dir, file string) { + var buf strings.Builder + writeHeader(&buf) + fmt.Fprintf(&buf, "package sys\n") + writefile(buf.String(), file, writeSkipSame) +} + +// mkbuildcfg writes internal/buildcfg/zbootstrap.go: +// +// package buildcfg +// +// const defaultGOROOT = +// const defaultGO386 = +// ... +// const defaultGOOS = runtime.GOOS +// const defaultGOARCH = runtime.GOARCH +// +// The use of runtime.GOOS and runtime.GOARCH makes sure that +// a cross-compiled compiler expects to compile for its own target +// system. That is, if on a Mac you do: +// +// GOOS=linux GOARCH=ppc64 go build cmd/compile +// +// the resulting compiler will default to generating linux/ppc64 object files. +// This is more useful than having it default to generating objects for the +// original target (in this example, a Mac). +func mkbuildcfg(file string) { + var buf strings.Builder + writeHeader(&buf) + fmt.Fprintf(&buf, "package buildcfg\n") + fmt.Fprintln(&buf) + fmt.Fprintf(&buf, "import \"runtime\"\n") + fmt.Fprintln(&buf) + fmt.Fprintf(&buf, "const defaultGO386 = `%s`\n", go386) + fmt.Fprintf(&buf, "const defaultGOAMD64 = `%s`\n", goamd64) + fmt.Fprintf(&buf, "const defaultGOARM = `%s`\n", goarm) + fmt.Fprintf(&buf, "const defaultGOMIPS = `%s`\n", gomips) + fmt.Fprintf(&buf, "const defaultGOMIPS64 = `%s`\n", gomips64) + fmt.Fprintf(&buf, "const defaultGOPPC64 = `%s`\n", goppc64) + fmt.Fprintf(&buf, "const defaultGOEXPERIMENT = `%s`\n", goexperiment) + fmt.Fprintf(&buf, "const defaultGO_EXTLINK_ENABLED = `%s`\n", goextlinkenabled) + fmt.Fprintf(&buf, "const defaultGO_LDSO = `%s`\n", defaultldso) + fmt.Fprintf(&buf, "const version = `%s`\n", findgoversion()) + fmt.Fprintf(&buf, "const defaultGOOS = runtime.GOOS\n") + fmt.Fprintf(&buf, "const defaultGOARCH = runtime.GOARCH\n") + + writefile(buf.String(), file, writeSkipSame) +} + +// mkobjabi writes cmd/internal/objabi/zbootstrap.go: +// +// package objabi +// +// (Nothing right now!) +func mkobjabi(file string) { + var buf strings.Builder + writeHeader(&buf) + fmt.Fprintf(&buf, "package objabi\n") + + writefile(buf.String(), file, writeSkipSame) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/dist/buildtag.go b/platform/dbops/binaries/go/go/src/cmd/dist/buildtag.go new file mode 100644 index 0000000000000000000000000000000000000000..24776a0aaf735f14fe79caf53510248f09112742 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/dist/buildtag.go @@ -0,0 +1,133 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "strings" +) + +// exprParser is a //go:build expression parser and evaluator. +// The parser is a trivial precedence-based parser which is still +// almost overkill for these very simple expressions. +type exprParser struct { + x string + t exprToken // upcoming token +} + +// val is the value type result of parsing. +// We don't keep a parse tree, just the value of the expression. +type val bool + +// exprToken describes a single token in the input. +// Prefix operators define a prefix func that parses the +// upcoming value. Binary operators define an infix func +// that combines two values according to the operator. +// In that case, the parsing loop parses the two values. +type exprToken struct { + tok string + prec int + prefix func(*exprParser) val + infix func(val, val) val +} + +var exprTokens []exprToken + +func init() { // init to break init cycle + exprTokens = []exprToken{ + {tok: "&&", prec: 1, infix: func(x, y val) val { return x && y }}, + {tok: "||", prec: 2, infix: func(x, y val) val { return x || y }}, + {tok: "!", prec: 3, prefix: (*exprParser).not}, + {tok: "(", prec: 3, prefix: (*exprParser).paren}, + {tok: ")"}, + } +} + +// matchexpr parses and evaluates the //go:build expression x. +func matchexpr(x string) (matched bool, err error) { + defer func() { + if e := recover(); e != nil { + matched = false + err = fmt.Errorf("parsing //go:build line: %v", e) + } + }() + + p := &exprParser{x: x} + p.next() + v := p.parse(0) + if p.t.tok != "end of expression" { + panic("unexpected " + p.t.tok) + } + return bool(v), nil +} + +// parse parses an expression, including binary operators at precedence >= prec. +func (p *exprParser) parse(prec int) val { + if p.t.prefix == nil { + panic("unexpected " + p.t.tok) + } + v := p.t.prefix(p) + for p.t.prec >= prec && p.t.infix != nil { + t := p.t + p.next() + v = t.infix(v, p.parse(t.prec+1)) + } + return v +} + +// not is the prefix parser for a ! token. +func (p *exprParser) not() val { + p.next() + return !p.parse(100) +} + +// paren is the prefix parser for a ( token. +func (p *exprParser) paren() val { + p.next() + v := p.parse(0) + if p.t.tok != ")" { + panic("missing )") + } + p.next() + return v +} + +// next advances the parser to the next token, +// leaving the token in p.t. +func (p *exprParser) next() { + p.x = strings.TrimSpace(p.x) + if p.x == "" { + p.t = exprToken{tok: "end of expression"} + return + } + for _, t := range exprTokens { + if strings.HasPrefix(p.x, t.tok) { + p.x = p.x[len(t.tok):] + p.t = t + return + } + } + + i := 0 + for i < len(p.x) && validtag(p.x[i]) { + i++ + } + if i == 0 { + panic(fmt.Sprintf("syntax error near %#q", rune(p.x[i]))) + } + tag := p.x[:i] + p.x = p.x[i:] + p.t = exprToken{ + tok: "tag", + prefix: func(p *exprParser) val { + p.next() + return val(matchtag(tag)) + }, + } +} + +func validtag(c byte) bool { + return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '.' || c == '_' +} diff --git a/platform/dbops/binaries/go/go/src/cmd/dist/buildtag_test.go b/platform/dbops/binaries/go/go/src/cmd/dist/buildtag_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f64abfd1f18c252ed1b8a8a9cbbbea5a286abc03 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/dist/buildtag_test.go @@ -0,0 +1,43 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "reflect" + "testing" +) + +var buildParserTests = []struct { + x string + matched bool + err error +}{ + {"gc", true, nil}, + {"gccgo", false, nil}, + {"!gc", false, nil}, + {"gc && gccgo", false, nil}, + {"gc || gccgo", true, nil}, + {"gc || (gccgo && !gccgo)", true, nil}, + {"gc && (gccgo || !gccgo)", true, nil}, + {"!(gc && (gccgo || !gccgo))", false, nil}, + {"gccgo || gc", true, nil}, + {"!(!(!(gccgo || gc)))", false, nil}, + {"compiler_bootstrap", false, nil}, + {"cmd_go_bootstrap", true, nil}, + {"syntax(error", false, fmt.Errorf("parsing //go:build line: unexpected (")}, + {"(gc", false, fmt.Errorf("parsing //go:build line: missing )")}, + {"gc gc", false, fmt.Errorf("parsing //go:build line: unexpected tag")}, + {"(gc))", false, fmt.Errorf("parsing //go:build line: unexpected )")}, +} + +func TestBuildParser(t *testing.T) { + for _, tt := range buildParserTests { + matched, err := matchexpr(tt.x) + if matched != tt.matched || !reflect.DeepEqual(err, tt.err) { + t.Errorf("matchexpr(%q) = %v, %v; want %v, %v", tt.x, matched, err, tt.matched, tt.err) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/dist/buildtool.go b/platform/dbops/binaries/go/go/src/cmd/dist/buildtool.go new file mode 100644 index 0000000000000000000000000000000000000000..3232896f262564f66d0565854eb249fd01e3e0af --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/dist/buildtool.go @@ -0,0 +1,334 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Build toolchain using Go bootstrap version. +// +// The general strategy is to copy the source files we need into +// a new GOPATH workspace, adjust import paths appropriately, +// invoke the Go bootstrap toolchains go command to build those sources, +// and then copy the binaries back. + +package main + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + "strings" +) + +// bootstrapDirs is a list of directories holding code that must be +// compiled with the Go bootstrap toolchain to produce the bootstrapTargets. +// All directories in this list are relative to and must be below $GOROOT/src. +// +// The list has two kinds of entries: names beginning with cmd/ with +// no other slashes, which are commands, and other paths, which are packages +// supporting the commands. Packages in the standard library can be listed +// if a newer copy needs to be substituted for the Go bootstrap copy when used +// by the command packages. Paths ending with /... automatically +// include all packages within subdirectories as well. +// These will be imported during bootstrap as bootstrap/name, like bootstrap/math/big. +var bootstrapDirs = []string{ + "cmp", + "cmd/asm", + "cmd/asm/internal/...", + "cmd/cgo", + "cmd/compile", + "cmd/compile/internal/...", + "cmd/internal/archive", + "cmd/internal/bio", + "cmd/internal/codesign", + "cmd/internal/dwarf", + "cmd/internal/edit", + "cmd/internal/gcprog", + "cmd/internal/goobj", + "cmd/internal/notsha256", + "cmd/internal/obj/...", + "cmd/internal/objabi", + "cmd/internal/pkgpath", + "cmd/internal/quoted", + "cmd/internal/src", + "cmd/internal/sys", + "cmd/link", + "cmd/link/internal/...", + "compress/flate", + "compress/zlib", + "container/heap", + "debug/dwarf", + "debug/elf", + "debug/macho", + "debug/pe", + "go/build/constraint", + "go/constant", + "go/version", + "internal/abi", + "internal/coverage", + "cmd/internal/cov/covcmd", + "internal/bisect", + "internal/buildcfg", + "internal/goarch", + "internal/godebugs", + "internal/goexperiment", + "internal/goroot", + "internal/gover", + "internal/goversion", + // internal/lazyregexp is provided by Go 1.17, which permits it to + // be imported by other packages in this list, but is not provided + // by the Go 1.17 version of gccgo. It's on this list only to + // support gccgo, and can be removed if we require gccgo 14 or later. + "internal/lazyregexp", + "internal/pkgbits", + "internal/platform", + "internal/profile", + "internal/race", + "internal/saferio", + "internal/syscall/unix", + "internal/types/errors", + "internal/unsafeheader", + "internal/xcoff", + "internal/zstd", + "math/bits", + "sort", +} + +// File prefixes that are ignored by go/build anyway, and cause +// problems with editor generated temporary files (#18931). +var ignorePrefixes = []string{ + ".", + "_", + "#", +} + +// File suffixes that use build tags introduced since Go 1.17. +// These must not be copied into the bootstrap build directory. +// Also ignore test files. +var ignoreSuffixes = []string{ + "_test.s", + "_test.go", + // Skip PGO profile. No need to build toolchain1 compiler + // with PGO. And as it is not a text file the import path + // rewrite will break it. + ".pgo", +} + +var tryDirs = []string{ + "sdk/go1.17", + "go1.17", +} + +func bootstrapBuildTools() { + goroot_bootstrap := os.Getenv("GOROOT_BOOTSTRAP") + if goroot_bootstrap == "" { + home := os.Getenv("HOME") + goroot_bootstrap = pathf("%s/go1.4", home) + for _, d := range tryDirs { + if p := pathf("%s/%s", home, d); isdir(p) { + goroot_bootstrap = p + } + } + } + xprintf("Building Go toolchain1 using %s.\n", goroot_bootstrap) + + mkbuildcfg(pathf("%s/src/internal/buildcfg/zbootstrap.go", goroot)) + mkobjabi(pathf("%s/src/cmd/internal/objabi/zbootstrap.go", goroot)) + + // Use $GOROOT/pkg/bootstrap as the bootstrap workspace root. + // We use a subdirectory of $GOROOT/pkg because that's the + // space within $GOROOT where we store all generated objects. + // We could use a temporary directory outside $GOROOT instead, + // but it is easier to debug on failure if the files are in a known location. + workspace := pathf("%s/pkg/bootstrap", goroot) + xremoveall(workspace) + xatexit(func() { xremoveall(workspace) }) + base := pathf("%s/src/bootstrap", workspace) + xmkdirall(base) + + // Copy source code into $GOROOT/pkg/bootstrap and rewrite import paths. + writefile("module bootstrap\ngo 1.20\n", pathf("%s/%s", base, "go.mod"), 0) + for _, dir := range bootstrapDirs { + recurse := strings.HasSuffix(dir, "/...") + dir = strings.TrimSuffix(dir, "/...") + filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + fatalf("walking bootstrap dirs failed: %v: %v", path, err) + } + + name := filepath.Base(path) + src := pathf("%s/src/%s", goroot, path) + dst := pathf("%s/%s", base, path) + + if info.IsDir() { + if !recurse && path != dir || name == "testdata" { + return filepath.SkipDir + } + + xmkdirall(dst) + if path == "cmd/cgo" { + // Write to src because we need the file both for bootstrap + // and for later in the main build. + mkzdefaultcc("", pathf("%s/zdefaultcc.go", src)) + mkzdefaultcc("", pathf("%s/zdefaultcc.go", dst)) + } + return nil + } + + for _, pre := range ignorePrefixes { + if strings.HasPrefix(name, pre) { + return nil + } + } + for _, suf := range ignoreSuffixes { + if strings.HasSuffix(name, suf) { + return nil + } + } + + text := bootstrapRewriteFile(src) + writefile(text, dst, 0) + return nil + }) + } + + // Set up environment for invoking Go bootstrap toolchains go command. + // GOROOT points at Go bootstrap GOROOT, + // GOPATH points at our bootstrap workspace, + // GOBIN is empty, so that binaries are installed to GOPATH/bin, + // and GOOS, GOHOSTOS, GOARCH, and GOHOSTOS are empty, + // so that Go bootstrap toolchain builds whatever kind of binary it knows how to build. + // Restore GOROOT, GOPATH, and GOBIN when done. + // Don't bother with GOOS, GOHOSTOS, GOARCH, and GOHOSTARCH, + // because setup will take care of those when bootstrapBuildTools returns. + + defer os.Setenv("GOROOT", os.Getenv("GOROOT")) + os.Setenv("GOROOT", goroot_bootstrap) + + defer os.Setenv("GOPATH", os.Getenv("GOPATH")) + os.Setenv("GOPATH", workspace) + + defer os.Setenv("GOBIN", os.Getenv("GOBIN")) + os.Setenv("GOBIN", "") + + os.Setenv("GOOS", "") + os.Setenv("GOHOSTOS", "") + os.Setenv("GOARCH", "") + os.Setenv("GOHOSTARCH", "") + + // Run Go bootstrap to build binaries. + // Use the math_big_pure_go build tag to disable the assembly in math/big + // which may contain unsupported instructions. + // Use the purego build tag to disable other assembly code, + // such as in cmd/internal/notsha256. + cmd := []string{ + pathf("%s/bin/go", goroot_bootstrap), + "install", + "-tags=math_big_pure_go compiler_bootstrap purego", + } + if vflag > 0 { + cmd = append(cmd, "-v") + } + if tool := os.Getenv("GOBOOTSTRAP_TOOLEXEC"); tool != "" { + cmd = append(cmd, "-toolexec="+tool) + } + cmd = append(cmd, "bootstrap/cmd/...") + run(base, ShowOutput|CheckExit, cmd...) + + // Copy binaries into tool binary directory. + for _, name := range bootstrapDirs { + if !strings.HasPrefix(name, "cmd/") { + continue + } + name = name[len("cmd/"):] + if !strings.Contains(name, "/") { + copyfile(pathf("%s/%s%s", tooldir, name, exe), pathf("%s/bin/%s%s", workspace, name, exe), writeExec) + } + } + + if vflag > 0 { + xprintf("\n") + } +} + +var ssaRewriteFileSubstring = filepath.FromSlash("src/cmd/compile/internal/ssa/rewrite") + +// isUnneededSSARewriteFile reports whether srcFile is a +// src/cmd/compile/internal/ssa/rewriteARCHNAME.go file for an +// architecture that isn't for the given GOARCH. +// +// When unneeded is true archCaps is the rewrite base filename without +// the "rewrite" prefix or ".go" suffix: AMD64, 386, ARM, ARM64, etc. +func isUnneededSSARewriteFile(srcFile, goArch string) (archCaps string, unneeded bool) { + if !strings.Contains(srcFile, ssaRewriteFileSubstring) { + return "", false + } + fileArch := strings.TrimSuffix(strings.TrimPrefix(filepath.Base(srcFile), "rewrite"), ".go") + if fileArch == "" { + return "", false + } + b := fileArch[0] + if b == '_' || ('a' <= b && b <= 'z') { + return "", false + } + archCaps = fileArch + fileArch = strings.ToLower(fileArch) + fileArch = strings.TrimSuffix(fileArch, "splitload") + fileArch = strings.TrimSuffix(fileArch, "latelower") + if fileArch == goArch { + return "", false + } + if fileArch == strings.TrimSuffix(goArch, "le") { + return "", false + } + return archCaps, true +} + +func bootstrapRewriteFile(srcFile string) string { + // During bootstrap, generate dummy rewrite files for + // irrelevant architectures. We only need to build a bootstrap + // binary that works for the current gohostarch. + // This saves 6+ seconds of bootstrap. + if archCaps, ok := isUnneededSSARewriteFile(srcFile, gohostarch); ok { + return fmt.Sprintf(`%spackage ssa + +func rewriteValue%s(v *Value) bool { panic("unused during bootstrap") } +func rewriteBlock%s(b *Block) bool { panic("unused during bootstrap") } +`, generatedHeader, archCaps, archCaps) + } + + return bootstrapFixImports(srcFile) +} + +func bootstrapFixImports(srcFile string) string { + text := readfile(srcFile) + if !strings.Contains(srcFile, "/cmd/") && !strings.Contains(srcFile, `\cmd\`) { + text = regexp.MustCompile(`\bany\b`).ReplaceAllString(text, "interface{}") + } + lines := strings.SplitAfter(text, "\n") + inBlock := false + for i, line := range lines { + if strings.HasPrefix(line, "import (") { + inBlock = true + continue + } + if inBlock && strings.HasPrefix(line, ")") { + inBlock = false + continue + } + if strings.HasPrefix(line, `import "`) || strings.HasPrefix(line, `import . "`) || + inBlock && (strings.HasPrefix(line, "\t\"") || strings.HasPrefix(line, "\t. \"") || strings.HasPrefix(line, "\texec \"") || strings.HasPrefix(line, "\trtabi \"")) { + line = strings.Replace(line, `"cmd/`, `"bootstrap/cmd/`, -1) + for _, dir := range bootstrapDirs { + if strings.HasPrefix(dir, "cmd/") { + continue + } + line = strings.Replace(line, `"`+dir+`"`, `"bootstrap/`+dir+`"`, -1) + } + lines[i] = line + } + } + + lines[0] = generatedHeader + "// This is a bootstrap copy of " + srcFile + "\n\n//line " + srcFile + ":1\n" + lines[0] + + return strings.Join(lines, "") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/dist/doc.go b/platform/dbops/binaries/go/go/src/cmd/dist/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..ad26aa2dc06ee295fc95c1bc4eecfb9d3ed50348 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/dist/doc.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Dist helps bootstrap, build, and test the Go distribution. +// +// Usage: +// +// go tool dist [command] +// +// The commands are: +// +// banner print installation banner +// bootstrap rebuild everything +// clean deletes all built files +// env [-p] print environment (-p: include $PATH) +// install [dir] install individual directory +// list [-json] list all supported platforms +// test [-h] run Go test(s) +// version print Go version +package main diff --git a/platform/dbops/binaries/go/go/src/cmd/dist/exec.go b/platform/dbops/binaries/go/go/src/cmd/dist/exec.go new file mode 100644 index 0000000000000000000000000000000000000000..602b812b002af210d4c8ae7221d4c9bbe60154a3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/dist/exec.go @@ -0,0 +1,40 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "os/exec" + "strings" +) + +// setDir sets cmd.Dir to dir, and also adds PWD=dir to cmd's environment. +func setDir(cmd *exec.Cmd, dir string) { + cmd.Dir = dir + if cmd.Env != nil { + // os/exec won't set PWD automatically. + setEnv(cmd, "PWD", dir) + } +} + +// setEnv sets cmd.Env so that key = value. +func setEnv(cmd *exec.Cmd, key, value string) { + cmd.Env = append(cmd.Environ(), key+"="+value) +} + +// unsetEnv sets cmd.Env so that key is not present in the environment. +func unsetEnv(cmd *exec.Cmd, key string) { + cmd.Env = cmd.Environ() + + prefix := key + "=" + newEnv := []string{} + for _, entry := range cmd.Env { + if strings.HasPrefix(entry, prefix) { + continue + } + newEnv = append(newEnv, entry) + // key may appear multiple times, so keep going. + } + cmd.Env = newEnv +} diff --git a/platform/dbops/binaries/go/go/src/cmd/dist/imports.go b/platform/dbops/binaries/go/go/src/cmd/dist/imports.go new file mode 100644 index 0000000000000000000000000000000000000000..05dd84d0f12a0a72691b401d6c0b685c30069180 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/dist/imports.go @@ -0,0 +1,276 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is forked from go/build/read.go. +// (cmd/dist must not import go/build because we do not want it to be +// sensitive to the specific version of go/build present in $GOROOT_BOOTSTRAP.) + +package main + +import ( + "bufio" + "errors" + "fmt" + "io" + "path" + "path/filepath" + "strconv" + "strings" + "unicode/utf8" +) + +type importReader struct { + b *bufio.Reader + buf []byte + peek byte + err error + eof bool + nerr int +} + +func isIdent(c byte) bool { + return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '_' || c >= utf8.RuneSelf +} + +var ( + errSyntax = errors.New("syntax error") + errNUL = errors.New("unexpected NUL in input") +) + +// syntaxError records a syntax error, but only if an I/O error has not already been recorded. +func (r *importReader) syntaxError() { + if r.err == nil { + r.err = errSyntax + } +} + +// readByte reads the next byte from the input, saves it in buf, and returns it. +// If an error occurs, readByte records the error in r.err and returns 0. +func (r *importReader) readByte() byte { + c, err := r.b.ReadByte() + if err == nil { + r.buf = append(r.buf, c) + if c == 0 { + err = errNUL + } + } + if err != nil { + if err == io.EOF { + r.eof = true + } else if r.err == nil { + r.err = err + } + c = 0 + } + return c +} + +// peekByte returns the next byte from the input reader but does not advance beyond it. +// If skipSpace is set, peekByte skips leading spaces and comments. +func (r *importReader) peekByte(skipSpace bool) byte { + if r.err != nil { + if r.nerr++; r.nerr > 10000 { + panic("go/build: import reader looping") + } + return 0 + } + + // Use r.peek as first input byte. + // Don't just return r.peek here: it might have been left by peekByte(false) + // and this might be peekByte(true). + c := r.peek + if c == 0 { + c = r.readByte() + } + for r.err == nil && !r.eof { + if skipSpace { + // For the purposes of this reader, semicolons are never necessary to + // understand the input and are treated as spaces. + switch c { + case ' ', '\f', '\t', '\r', '\n', ';': + c = r.readByte() + continue + + case '/': + c = r.readByte() + if c == '/' { + for c != '\n' && r.err == nil && !r.eof { + c = r.readByte() + } + } else if c == '*' { + var c1 byte + for (c != '*' || c1 != '/') && r.err == nil { + if r.eof { + r.syntaxError() + } + c, c1 = c1, r.readByte() + } + } else { + r.syntaxError() + } + c = r.readByte() + continue + } + } + break + } + r.peek = c + return r.peek +} + +// nextByte is like peekByte but advances beyond the returned byte. +func (r *importReader) nextByte(skipSpace bool) byte { + c := r.peekByte(skipSpace) + r.peek = 0 + return c +} + +// readKeyword reads the given keyword from the input. +// If the keyword is not present, readKeyword records a syntax error. +func (r *importReader) readKeyword(kw string) { + r.peekByte(true) + for i := 0; i < len(kw); i++ { + if r.nextByte(false) != kw[i] { + r.syntaxError() + return + } + } + if isIdent(r.peekByte(false)) { + r.syntaxError() + } +} + +// readIdent reads an identifier from the input. +// If an identifier is not present, readIdent records a syntax error. +func (r *importReader) readIdent() { + c := r.peekByte(true) + if !isIdent(c) { + r.syntaxError() + return + } + for isIdent(r.peekByte(false)) { + r.peek = 0 + } +} + +// readString reads a quoted string literal from the input. +// If an identifier is not present, readString records a syntax error. +func (r *importReader) readString(save *[]string) { + switch r.nextByte(true) { + case '`': + start := len(r.buf) - 1 + for r.err == nil { + if r.nextByte(false) == '`' { + if save != nil { + *save = append(*save, string(r.buf[start:])) + } + break + } + if r.eof { + r.syntaxError() + } + } + case '"': + start := len(r.buf) - 1 + for r.err == nil { + c := r.nextByte(false) + if c == '"' { + if save != nil { + *save = append(*save, string(r.buf[start:])) + } + break + } + if r.eof || c == '\n' { + r.syntaxError() + } + if c == '\\' { + r.nextByte(false) + } + } + default: + r.syntaxError() + } +} + +// readImport reads an import clause - optional identifier followed by quoted string - +// from the input. +func (r *importReader) readImport(imports *[]string) { + c := r.peekByte(true) + if c == '.' { + r.peek = 0 + } else if isIdent(c) { + r.readIdent() + } + r.readString(imports) +} + +// readComments is like ioutil.ReadAll, except that it only reads the leading +// block of comments in the file. +func readComments(f io.Reader) ([]byte, error) { + r := &importReader{b: bufio.NewReader(f)} + r.peekByte(true) + if r.err == nil && !r.eof { + // Didn't reach EOF, so must have found a non-space byte. Remove it. + r.buf = r.buf[:len(r.buf)-1] + } + return r.buf, r.err +} + +// readimports returns the imports found in the named file. +func readimports(file string) []string { + var imports []string + r := &importReader{b: bufio.NewReader(strings.NewReader(readfile(file)))} + r.readKeyword("package") + r.readIdent() + for r.peekByte(true) == 'i' { + r.readKeyword("import") + if r.peekByte(true) == '(' { + r.nextByte(false) + for r.peekByte(true) != ')' && r.err == nil { + r.readImport(&imports) + } + r.nextByte(false) + } else { + r.readImport(&imports) + } + } + + for i := range imports { + unquoted, err := strconv.Unquote(imports[i]) + if err != nil { + fatalf("reading imports from %s: %v", file, err) + } + imports[i] = unquoted + } + + return imports +} + +// resolveVendor returns a unique package path imported with the given import +// path from srcDir. +// +// resolveVendor assumes that a package is vendored if and only if its first +// path component contains a dot. If a package is vendored, its import path +// is returned with a "vendor" or "cmd/vendor" prefix, depending on srcDir. +// Otherwise, the import path is returned verbatim. +func resolveVendor(imp, srcDir string) string { + var first string + if i := strings.Index(imp, "/"); i < 0 { + first = imp + } else { + first = imp[:i] + } + isStandard := !strings.Contains(first, ".") + if isStandard { + return imp + } + + if strings.HasPrefix(srcDir, filepath.Join(goroot, "src", "cmd")) { + return path.Join("cmd", "vendor", imp) + } else if strings.HasPrefix(srcDir, filepath.Join(goroot, "src")) { + return path.Join("vendor", imp) + } else { + panic(fmt.Sprintf("srcDir %q not in GOOROT/src", srcDir)) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/dist/main.go b/platform/dbops/binaries/go/go/src/cmd/dist/main.go new file mode 100644 index 0000000000000000000000000000000000000000..f3425a9dd8578e6114916bdf527c926d6d3c3e90 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/dist/main.go @@ -0,0 +1,194 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "flag" + "fmt" + "os" + "runtime" + "strings" +) + +func usage() { + xprintf(`usage: go tool dist [command] +Commands are: + +banner print installation banner +bootstrap rebuild everything +clean deletes all built files +env [-p] print environment (-p: include $PATH) +install [dir] install individual directory +list [-json] [-broken] list all supported platforms +test [-h] run Go test(s) +version print Go version + +All commands take -v flags to emit extra information. +`) + xexit(2) +} + +// commands records the available commands. +var commands = map[string]func(){ + "banner": cmdbanner, + "bootstrap": cmdbootstrap, + "clean": cmdclean, + "env": cmdenv, + "install": cmdinstall, + "list": cmdlist, + "test": cmdtest, + "version": cmdversion, +} + +// main takes care of OS-specific startup and dispatches to xmain. +func main() { + os.Setenv("TERM", "dumb") // disable escape codes in clang errors + + // provide -check-armv6k first, before checking for $GOROOT so that + // it is possible to run this check without having $GOROOT available. + if len(os.Args) > 1 && os.Args[1] == "-check-armv6k" { + useARMv6K() // might fail with SIGILL + println("ARMv6K supported.") + os.Exit(0) + } + + gohostos = runtime.GOOS + switch gohostos { + case "aix": + // uname -m doesn't work under AIX + gohostarch = "ppc64" + case "plan9": + gohostarch = os.Getenv("objtype") + if gohostarch == "" { + fatalf("$objtype is unset") + } + case "solaris", "illumos": + // Solaris and illumos systems have multi-arch userlands, and + // "uname -m" reports the machine hardware name; e.g., + // "i86pc" on both 32- and 64-bit x86 systems. Check for the + // native (widest) instruction set on the running kernel: + out := run("", CheckExit, "isainfo", "-n") + if strings.Contains(out, "amd64") { + gohostarch = "amd64" + } + if strings.Contains(out, "i386") { + gohostarch = "386" + } + case "windows": + exe = ".exe" + } + + sysinit() + + if gohostarch == "" { + // Default Unix system. + out := run("", CheckExit, "uname", "-m") + outAll := run("", CheckExit, "uname", "-a") + switch { + case strings.Contains(outAll, "RELEASE_ARM64"): + // MacOS prints + // Darwin p1.local 21.1.0 Darwin Kernel Version 21.1.0: Wed Oct 13 17:33:01 PDT 2021; root:xnu-8019.41.5~1/RELEASE_ARM64_T6000 x86_64 + // on ARM64 laptops when there is an x86 parent in the + // process tree. Look for the RELEASE_ARM64 to avoid being + // confused into building an x86 toolchain. + gohostarch = "arm64" + case strings.Contains(out, "x86_64"), strings.Contains(out, "amd64"): + gohostarch = "amd64" + case strings.Contains(out, "86"): + gohostarch = "386" + if gohostos == "darwin" { + // Even on 64-bit platform, some versions of macOS uname -m prints i386. + // We don't support any of the OS X versions that run on 32-bit-only hardware anymore. + gohostarch = "amd64" + } + case strings.Contains(out, "aarch64"), strings.Contains(out, "arm64"): + gohostarch = "arm64" + case strings.Contains(out, "arm"): + gohostarch = "arm" + if gohostos == "netbsd" && strings.Contains(run("", CheckExit, "uname", "-p"), "aarch64") { + gohostarch = "arm64" + } + case strings.Contains(out, "ppc64le"): + gohostarch = "ppc64le" + case strings.Contains(out, "ppc64"): + gohostarch = "ppc64" + case strings.Contains(out, "mips64"): + gohostarch = "mips64" + if elfIsLittleEndian(os.Args[0]) { + gohostarch = "mips64le" + } + case strings.Contains(out, "mips"): + gohostarch = "mips" + if elfIsLittleEndian(os.Args[0]) { + gohostarch = "mipsle" + } + case strings.Contains(out, "loongarch64"): + gohostarch = "loong64" + case strings.Contains(out, "riscv64"): + gohostarch = "riscv64" + case strings.Contains(out, "s390x"): + gohostarch = "s390x" + case gohostos == "darwin", gohostos == "ios": + if strings.Contains(run("", CheckExit, "uname", "-v"), "RELEASE_ARM64_") { + gohostarch = "arm64" + } + case gohostos == "freebsd": + if strings.Contains(run("", CheckExit, "uname", "-p"), "riscv64") { + gohostarch = "riscv64" + } + case gohostos == "openbsd" && strings.Contains(out, "powerpc64"): + gohostarch = "ppc64" + case gohostos == "openbsd": + if strings.Contains(run("", CheckExit, "uname", "-p"), "mips64") { + gohostarch = "mips64" + } + default: + fatalf("unknown architecture: %s", out) + } + } + + if gohostarch == "arm" || gohostarch == "mips64" || gohostarch == "mips64le" { + maxbg = min(maxbg, runtime.NumCPU()) + } + // For deterministic make.bash debugging and for smallest-possible footprint, + // pay attention to GOMAXPROCS=1. This was a bad idea for 1.4 bootstrap, but + // the bootstrap version is now 1.17+ and thus this is fine. + if runtime.GOMAXPROCS(0) == 1 { + maxbg = 1 + } + bginit() + + if len(os.Args) > 1 && os.Args[1] == "-check-goarm" { + useVFPv1() // might fail with SIGILL + println("VFPv1 OK.") + useVFPv3() // might fail with SIGILL + println("VFPv3 OK.") + os.Exit(0) + } + + xinit() + xmain() + xexit(0) +} + +// The OS-specific main calls into the portable code here. +func xmain() { + if len(os.Args) < 2 { + usage() + } + cmd := os.Args[1] + os.Args = os.Args[1:] // for flag parsing during cmd + flag.Usage = func() { + fmt.Fprintf(os.Stderr, "usage: go tool dist %s [options]\n", cmd) + flag.PrintDefaults() + os.Exit(2) + } + if f, ok := commands[cmd]; ok { + f() + } else { + xprintf("unknown command %s\n", cmd) + usage() + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/dist/notgo120.go b/platform/dbops/binaries/go/go/src/cmd/dist/notgo120.go new file mode 100644 index 0000000000000000000000000000000000000000..0b89ab3c022a5d514f6f0503f3dd89d28ee1b302 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/dist/notgo120.go @@ -0,0 +1,21 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Go 1.22 and later requires Go 1.20 as the bootstrap toolchain. +// If cmd/dist is built using an earlier Go version, this file will be +// included in the build and cause an error like: +// +// % GOROOT_BOOTSTRAP=$HOME/sdk/go1.16 ./make.bash +// Building Go cmd/dist using /Users/rsc/sdk/go1.16. (go1.16 darwin/amd64) +// found packages main (build.go) and building_Go_requires_Go_1_20_6_or_later (notgo120.go) in /Users/rsc/go/src/cmd/dist +// % +// +// which is the best we can do under the circumstances. +// +// See go.dev/issue/44505 for more background on +// why Go moved on from Go 1.4 for bootstrap. + +//go:build !go1.20 + +package building_Go_requires_Go_1_20_6_or_later diff --git a/platform/dbops/binaries/go/go/src/cmd/dist/quoted.go b/platform/dbops/binaries/go/go/src/cmd/dist/quoted.go new file mode 100644 index 0000000000000000000000000000000000000000..9f3058198e55089b7e3d649c5582b84e805a4ea5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/dist/quoted.go @@ -0,0 +1,53 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "fmt" + +// quotedSplit is a verbatim copy from cmd/internal/quoted.go:Split and its +// dependencies (isSpaceByte). Since this package is built using the host's +// Go compiler, it cannot use `cmd/internal/...`. We also don't want to export +// it to all Go users. +// +// Please keep those in sync. +func quotedSplit(s string) ([]string, error) { + // Split fields allowing '' or "" around elements. + // Quotes further inside the string do not count. + var f []string + for len(s) > 0 { + for len(s) > 0 && isSpaceByte(s[0]) { + s = s[1:] + } + if len(s) == 0 { + break + } + // Accepted quoted string. No unescaping inside. + if s[0] == '"' || s[0] == '\'' { + quote := s[0] + s = s[1:] + i := 0 + for i < len(s) && s[i] != quote { + i++ + } + if i >= len(s) { + return nil, fmt.Errorf("unterminated %c string", quote) + } + f = append(f, s[:i]) + s = s[i+1:] + continue + } + i := 0 + for i < len(s) && !isSpaceByte(s[i]) { + i++ + } + f = append(f, s[:i]) + s = s[i:] + } + return f, nil +} + +func isSpaceByte(c byte) bool { + return c == ' ' || c == '\t' || c == '\n' || c == '\r' +} diff --git a/platform/dbops/binaries/go/go/src/cmd/dist/supported_test.go b/platform/dbops/binaries/go/go/src/cmd/dist/supported_test.go new file mode 100644 index 0000000000000000000000000000000000000000..27c0b92514b3c7aca5382432566c157d8c8136b5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/dist/supported_test.go @@ -0,0 +1,48 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "internal/platform" + "testing" +) + +// TestSupportedBuildModes tests that dist and the main tools agree on +// which build modes are supported for a given target. We do things +// this way because the dist tool needs to be buildable directly by +// the bootstrap compiler, and as such can't import internal packages. +func TestSupported(t *testing.T) { + defer func(a, o string) { + goarch = a + goos = o + }(goarch, goos) + + var modes = []string{ + // we assume that "exe" and "archive" always work + "pie", + "c-archive", + "c-shared", + "shared", + "plugin", + } + + for _, a := range okgoarch { + goarch = a + for _, o := range okgoos { + if _, ok := cgoEnabled[o+"/"+a]; !ok { + continue + } + goos = o + for _, mode := range modes { + var dt tester + dist := dt.supportedBuildmode(mode) + std := platform.BuildModeSupported("gc", mode, o, a) + if dist != std { + t.Errorf("discrepancy for %s-%s %s: dist says %t, standard library says %t", o, a, mode, dist, std) + } + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/dist/sys_default.go b/platform/dbops/binaries/go/go/src/cmd/dist/sys_default.go new file mode 100644 index 0000000000000000000000000000000000000000..ae102270c3401991ec8f823e7405ec2ce3ea8200 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/dist/sys_default.go @@ -0,0 +1,10 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows + +package main + +func sysinit() { +} diff --git a/platform/dbops/binaries/go/go/src/cmd/dist/sys_windows.go b/platform/dbops/binaries/go/go/src/cmd/dist/sys_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..37dffb8541447eaf4f962d9f998160030069e997 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/dist/sys_windows.go @@ -0,0 +1,57 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetSystemInfo = modkernel32.NewProc("GetSystemInfo") +) + +// see https://learn.microsoft.com/en-us/windows/win32/api/sysinfoapi/ns-sysinfoapi-system_info +type systeminfo struct { + wProcessorArchitecture uint16 + wReserved uint16 + dwPageSize uint32 + lpMinimumApplicationAddress uintptr + lpMaximumApplicationAddress uintptr + dwActiveProcessorMask uintptr + dwNumberOfProcessors uint32 + dwProcessorType uint32 + dwAllocationGranularity uint32 + wProcessorLevel uint16 + wProcessorRevision uint16 +} + +// See https://learn.microsoft.com/en-us/windows/win32/api/sysinfoapi/ns-sysinfoapi-system_info +const ( + PROCESSOR_ARCHITECTURE_AMD64 = 9 + PROCESSOR_ARCHITECTURE_INTEL = 0 + PROCESSOR_ARCHITECTURE_ARM = 5 + PROCESSOR_ARCHITECTURE_ARM64 = 12 + PROCESSOR_ARCHITECTURE_IA64 = 6 +) + +var sysinfo systeminfo + +func sysinit() { + syscall.Syscall(procGetSystemInfo.Addr(), 1, uintptr(unsafe.Pointer(&sysinfo)), 0, 0) + switch sysinfo.wProcessorArchitecture { + case PROCESSOR_ARCHITECTURE_AMD64: + gohostarch = "amd64" + case PROCESSOR_ARCHITECTURE_INTEL: + gohostarch = "386" + case PROCESSOR_ARCHITECTURE_ARM: + gohostarch = "arm" + case PROCESSOR_ARCHITECTURE_ARM64: + gohostarch = "arm64" + default: + fatalf("unknown processor architecture") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/dist/test.go b/platform/dbops/binaries/go/go/src/cmd/dist/test.go new file mode 100644 index 0000000000000000000000000000000000000000..5e62bbf4c22c66a86e93f3a0a8055a9648277c1f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/dist/test.go @@ -0,0 +1,1672 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/fs" + "log" + "os" + "os/exec" + "path/filepath" + "reflect" + "regexp" + "runtime" + "strconv" + "strings" + "time" +) + +func cmdtest() { + gogcflags = os.Getenv("GO_GCFLAGS") + setNoOpt() + + var t tester + + var noRebuild bool + flag.BoolVar(&t.listMode, "list", false, "list available tests") + flag.BoolVar(&t.rebuild, "rebuild", false, "rebuild everything first") + flag.BoolVar(&noRebuild, "no-rebuild", false, "overrides -rebuild (historical dreg)") + flag.BoolVar(&t.keepGoing, "k", false, "keep going even when error occurred") + flag.BoolVar(&t.race, "race", false, "run in race builder mode (different set of tests)") + flag.BoolVar(&t.compileOnly, "compile-only", false, "compile tests, but don't run them") + flag.StringVar(&t.banner, "banner", "##### ", "banner prefix; blank means no section banners") + flag.StringVar(&t.runRxStr, "run", "", + "run only those tests matching the regular expression; empty means to run all. "+ + "Special exception: if the string begins with '!', the match is inverted.") + flag.BoolVar(&t.msan, "msan", false, "run in memory sanitizer builder mode") + flag.BoolVar(&t.asan, "asan", false, "run in address sanitizer builder mode") + flag.BoolVar(&t.json, "json", false, "report test results in JSON") + + xflagparse(-1) // any number of args + if noRebuild { + t.rebuild = false + } + + t.run() +} + +// tester executes cmdtest. +type tester struct { + race bool + msan bool + asan bool + listMode bool + rebuild bool + failed bool + keepGoing bool + compileOnly bool // just try to compile all tests, but no need to run + runRxStr string + runRx *regexp.Regexp + runRxWant bool // want runRx to match (true) or not match (false) + runNames []string // tests to run, exclusive with runRx; empty means all + banner string // prefix, or "" for none + lastHeading string // last dir heading printed + + short bool + cgoEnabled bool + json bool + + tests []distTest // use addTest to extend + testNames map[string]bool + timeoutScale int + + worklist []*work +} + +// work tracks command execution for a test. +type work struct { + dt *distTest // unique test name, etc. + cmd *exec.Cmd // must write stdout/stderr to out + flush func() // if non-nil, called after cmd.Run + start chan bool // a true means to start, a false means to skip + out bytes.Buffer // combined stdout/stderr from cmd + err error // work result + end chan struct{} // a value means cmd ended (or was skipped) +} + +// printSkip prints a skip message for all of work. +func (w *work) printSkip(t *tester, msg string) { + if t.json { + synthesizeSkipEvent(json.NewEncoder(&w.out), w.dt.name, msg) + return + } + fmt.Fprintln(&w.out, msg) +} + +// A distTest is a test run by dist test. +// Each test has a unique name and belongs to a group (heading) +type distTest struct { + name string // unique test name; may be filtered with -run flag + heading string // group section; this header is printed before the test is run. + fn func(*distTest) error +} + +func (t *tester) run() { + timelog("start", "dist test") + + os.Setenv("PATH", fmt.Sprintf("%s%c%s", gorootBin, os.PathListSeparator, os.Getenv("PATH"))) + + t.short = true + if v := os.Getenv("GO_TEST_SHORT"); v != "" { + short, err := strconv.ParseBool(v) + if err != nil { + fatalf("invalid GO_TEST_SHORT %q: %v", v, err) + } + t.short = short + } + + cmd := exec.Command(gorootBinGo, "env", "CGO_ENABLED") + cmd.Stderr = new(bytes.Buffer) + slurp, err := cmd.Output() + if err != nil { + fatalf("Error running %s: %v\n%s", cmd, err, cmd.Stderr) + } + parts := strings.Split(string(slurp), "\n") + if nlines := len(parts) - 1; nlines < 1 { + fatalf("Error running %s: output contains <1 lines\n%s", cmd, cmd.Stderr) + } + t.cgoEnabled, _ = strconv.ParseBool(parts[0]) + + if flag.NArg() > 0 && t.runRxStr != "" { + fatalf("the -run regular expression flag is mutually exclusive with test name arguments") + } + + t.runNames = flag.Args() + + // Set GOTRACEBACK to system if the user didn't set a level explicitly. + // Since we're running tests for Go, we want as much detail as possible + // if something goes wrong. + // + // Set it before running any commands just in case something goes wrong. + if ok := isEnvSet("GOTRACEBACK"); !ok { + if err := os.Setenv("GOTRACEBACK", "system"); err != nil { + if t.keepGoing { + log.Printf("Failed to set GOTRACEBACK: %v", err) + } else { + fatalf("Failed to set GOTRACEBACK: %v", err) + } + } + } + + if t.rebuild { + t.out("Building packages and commands.") + // Force rebuild the whole toolchain. + goInstall(toolenv(), gorootBinGo, append([]string{"-a"}, toolchain...)...) + } + + if !t.listMode { + if builder := os.Getenv("GO_BUILDER_NAME"); builder == "" { + // Ensure that installed commands are up to date, even with -no-rebuild, + // so that tests that run commands end up testing what's actually on disk. + // If everything is up-to-date, this is a no-op. + // We first build the toolchain twice to allow it to converge, + // as when we first bootstrap. + // See cmdbootstrap for a description of the overall process. + // + // On the builders, we skip this step: we assume that 'dist test' is + // already using the result of a clean build, and because of test sharding + // and virtualization we usually start with a clean GOCACHE, so we would + // end up rebuilding large parts of the standard library that aren't + // otherwise relevant to the actual set of packages under test. + goInstall(toolenv(), gorootBinGo, toolchain...) + goInstall(toolenv(), gorootBinGo, toolchain...) + goInstall(toolenv(), gorootBinGo, "cmd") + } + } + + t.timeoutScale = 1 + if s := os.Getenv("GO_TEST_TIMEOUT_SCALE"); s != "" { + t.timeoutScale, err = strconv.Atoi(s) + if err != nil { + fatalf("failed to parse $GO_TEST_TIMEOUT_SCALE = %q as integer: %v", s, err) + } + } + + if t.runRxStr != "" { + if t.runRxStr[0] == '!' { + t.runRxWant = false + t.runRxStr = t.runRxStr[1:] + } else { + t.runRxWant = true + } + t.runRx = regexp.MustCompile(t.runRxStr) + } + + t.registerTests() + if t.listMode { + for _, tt := range t.tests { + fmt.Println(tt.name) + } + return + } + + for _, name := range t.runNames { + if !t.testNames[name] { + fatalf("unknown test %q", name) + } + } + + // On a few builders, make GOROOT unwritable to catch tests writing to it. + if strings.HasPrefix(os.Getenv("GO_BUILDER_NAME"), "linux-") { + if os.Getuid() == 0 { + // Don't bother making GOROOT unwritable: + // we're running as root, so permissions would have no effect. + } else { + xatexit(t.makeGOROOTUnwritable()) + } + } + + if !t.json { + if err := t.maybeLogMetadata(); err != nil { + t.failed = true + if t.keepGoing { + log.Printf("Failed logging metadata: %v", err) + } else { + fatalf("Failed logging metadata: %v", err) + } + } + } + + var anyIncluded, someExcluded bool + for _, dt := range t.tests { + if !t.shouldRunTest(dt.name) { + someExcluded = true + continue + } + anyIncluded = true + dt := dt // dt used in background after this iteration + if err := dt.fn(&dt); err != nil { + t.runPending(&dt) // in case that hasn't been done yet + t.failed = true + if t.keepGoing { + log.Printf("Failed: %v", err) + } else { + fatalf("Failed: %v", err) + } + } + } + t.runPending(nil) + timelog("end", "dist test") + + if !t.json { + if t.failed { + fmt.Println("\nFAILED") + } else if !anyIncluded { + fmt.Println() + errprintf("go tool dist: warning: %q matched no tests; use the -list flag to list available tests\n", t.runRxStr) + fmt.Println("NO TESTS TO RUN") + } else if someExcluded { + fmt.Println("\nALL TESTS PASSED (some were excluded)") + } else { + fmt.Println("\nALL TESTS PASSED") + } + } + if t.failed { + xexit(1) + } +} + +func (t *tester) shouldRunTest(name string) bool { + if t.runRx != nil { + return t.runRx.MatchString(name) == t.runRxWant + } + if len(t.runNames) == 0 { + return true + } + for _, runName := range t.runNames { + if runName == name { + return true + } + } + return false +} + +func (t *tester) maybeLogMetadata() error { + if t.compileOnly { + // We need to run a subprocess to log metadata. Don't do that + // on compile-only runs. + return nil + } + t.out("Test execution environment.") + // Helper binary to print system metadata (CPU model, etc). This is a + // separate binary from dist so it need not build with the bootstrap + // toolchain. + // + // TODO(prattmic): If we split dist bootstrap and dist test then this + // could be simplified to directly use internal/sysinfo here. + return t.dirCmd(filepath.Join(goroot, "src/cmd/internal/metadata"), gorootBinGo, []string{"run", "main.go"}).Run() +} + +// testName returns the dist test name for a given package and variant. +func testName(pkg, variant string) string { + name := pkg + if variant != "" { + name += ":" + variant + } + return name +} + +// goTest represents all options to a "go test" command. The final command will +// combine configuration from goTest and tester flags. +type goTest struct { + timeout time.Duration // If non-zero, override timeout + short bool // If true, force -short + tags []string // Build tags + race bool // Force -race + bench bool // Run benchmarks (briefly), not tests. + runTests string // Regexp of tests to run + cpu string // If non-empty, -cpu flag + + gcflags string // If non-empty, build with -gcflags=all=X + ldflags string // If non-empty, build with -ldflags=X + buildmode string // If non-empty, -buildmode flag + + env []string // Environment variables to add, as KEY=VAL. KEY= unsets a variable + + runOnHost bool // When cross-compiling, run this test on the host instead of guest + + // variant, if non-empty, is a name used to distinguish different + // configurations of the same test package(s). If set and omitVariant is false, + // the Package field in test2json output is rewritten to pkg:variant. + variant string + // omitVariant indicates that variant is used solely for the dist test name and + // that the set of test names run by each variant (including empty) of a package + // is non-overlapping. + omitVariant bool + + // We have both pkg and pkgs as a convenience. Both may be set, in which + // case they will be combined. At least one must be set. + pkgs []string // Multiple packages to test + pkg string // A single package to test + + testFlags []string // Additional flags accepted by this test +} + +// bgCommand returns a go test Cmd and a post-Run flush function. The result +// will write its output to stdout and stderr. If stdout==stderr, bgCommand +// ensures Writes are serialized. The caller should call flush() after Cmd exits. +func (opts *goTest) bgCommand(t *tester, stdout, stderr io.Writer) (cmd *exec.Cmd, flush func()) { + build, run, pkgs, testFlags, setupCmd := opts.buildArgs(t) + + // Combine the flags. + args := append([]string{"test"}, build...) + if t.compileOnly { + args = append(args, "-c", "-o", os.DevNull) + } else { + args = append(args, run...) + } + args = append(args, pkgs...) + if !t.compileOnly { + args = append(args, testFlags...) + } + + cmd = exec.Command(gorootBinGo, args...) + setupCmd(cmd) + if t.json && opts.variant != "" && !opts.omitVariant { + // Rewrite Package in the JSON output to be pkg:variant. When omitVariant + // is true, pkg.TestName is already unambiguous, so we don't need to + // rewrite the Package field. + // + // We only want to process JSON on the child's stdout. Ideally if + // stdout==stderr, we would also use the same testJSONFilter for + // cmd.Stdout and cmd.Stderr in order to keep the underlying + // interleaving of writes, but then it would see even partial writes + // interleaved, which would corrupt the JSON. So, we only process + // cmd.Stdout. This has another consequence though: if stdout==stderr, + // we have to serialize Writes in case the Writer is not concurrent + // safe. If we were just passing stdout/stderr through to exec, it would + // do this for us, but since we're wrapping stdout, we have to do it + // ourselves. + if stdout == stderr { + stdout = &lockedWriter{w: stdout} + stderr = stdout + } + f := &testJSONFilter{w: stdout, variant: opts.variant} + cmd.Stdout = f + flush = f.Flush + } else { + cmd.Stdout = stdout + flush = func() {} + } + cmd.Stderr = stderr + + return cmd, flush +} + +// run runs a go test and returns an error if it does not succeed. +func (opts *goTest) run(t *tester) error { + cmd, flush := opts.bgCommand(t, os.Stdout, os.Stderr) + err := cmd.Run() + flush() + return err +} + +// buildArgs is in internal helper for goTest that constructs the elements of +// the "go test" command line. build is the flags for building the test. run is +// the flags for running the test. pkgs is the list of packages to build and +// run. testFlags is the list of flags to pass to the test package. +// +// The caller must call setupCmd on the resulting exec.Cmd to set its directory +// and environment. +func (opts *goTest) buildArgs(t *tester) (build, run, pkgs, testFlags []string, setupCmd func(*exec.Cmd)) { + run = append(run, "-count=1") // Disallow caching + if opts.timeout != 0 { + d := opts.timeout * time.Duration(t.timeoutScale) + run = append(run, "-timeout="+d.String()) + } else if t.timeoutScale != 1 { + const goTestDefaultTimeout = 10 * time.Minute // Default value of go test -timeout flag. + run = append(run, "-timeout="+(goTestDefaultTimeout*time.Duration(t.timeoutScale)).String()) + } + if opts.short || t.short { + run = append(run, "-short") + } + var tags []string + if t.iOS() { + tags = append(tags, "lldb") + } + if noOpt { + tags = append(tags, "noopt") + } + tags = append(tags, opts.tags...) + if len(tags) > 0 { + build = append(build, "-tags="+strings.Join(tags, ",")) + } + if t.race || opts.race { + build = append(build, "-race") + } + if t.msan { + build = append(build, "-msan") + } + if t.asan { + build = append(build, "-asan") + } + if opts.bench { + // Run no tests. + run = append(run, "-run=^$") + // Run benchmarks briefly as a smoke test. + run = append(run, "-bench=.*", "-benchtime=.1s") + } else if opts.runTests != "" { + run = append(run, "-run="+opts.runTests) + } + if opts.cpu != "" { + run = append(run, "-cpu="+opts.cpu) + } + if t.json { + run = append(run, "-json") + } + + if opts.gcflags != "" { + build = append(build, "-gcflags=all="+opts.gcflags) + } + if opts.ldflags != "" { + build = append(build, "-ldflags="+opts.ldflags) + } + if opts.buildmode != "" { + build = append(build, "-buildmode="+opts.buildmode) + } + + pkgs = opts.packages() + + runOnHost := opts.runOnHost && (goarch != gohostarch || goos != gohostos) + needTestFlags := len(opts.testFlags) > 0 || runOnHost + if needTestFlags { + testFlags = append([]string{"-args"}, opts.testFlags...) + } + if runOnHost { + // -target is a special flag understood by tests that can run on the host + testFlags = append(testFlags, "-target="+goos+"/"+goarch) + } + + setupCmd = func(cmd *exec.Cmd) { + setDir(cmd, filepath.Join(goroot, "src")) + if len(opts.env) != 0 { + for _, kv := range opts.env { + if i := strings.Index(kv, "="); i < 0 { + unsetEnv(cmd, kv[:len(kv)-1]) + } else { + setEnv(cmd, kv[:i], kv[i+1:]) + } + } + } + if runOnHost { + setEnv(cmd, "GOARCH", gohostarch) + setEnv(cmd, "GOOS", gohostos) + } + } + + return +} + +// packages returns the full list of packages to be run by this goTest. This +// will always include at least one package. +func (opts *goTest) packages() []string { + pkgs := opts.pkgs + if opts.pkg != "" { + pkgs = append(pkgs[:len(pkgs):len(pkgs)], opts.pkg) + } + if len(pkgs) == 0 { + panic("no packages") + } + return pkgs +} + +// printSkip prints a skip message for all of goTest. +func (opts *goTest) printSkip(t *tester, msg string) { + if t.json { + enc := json.NewEncoder(os.Stdout) + for _, pkg := range opts.packages() { + synthesizeSkipEvent(enc, pkg, msg) + } + return + } + fmt.Println(msg) +} + +// ranGoTest and stdMatches are state closed over by the stdlib +// testing func in registerStdTest below. The tests are run +// sequentially, so there's no need for locks. +// +// ranGoBench and benchMatches are the same, but are only used +// in -race mode. +var ( + ranGoTest bool + stdMatches []string + + ranGoBench bool + benchMatches []string +) + +func (t *tester) registerStdTest(pkg string) { + const stdTestHeading = "Testing packages." // known to addTest for a safety check + gcflags := gogcflags + name := testName(pkg, "") + if t.runRx == nil || t.runRx.MatchString(name) == t.runRxWant { + stdMatches = append(stdMatches, pkg) + } + t.addTest(name, stdTestHeading, func(dt *distTest) error { + if ranGoTest { + return nil + } + t.runPending(dt) + timelog("start", dt.name) + defer timelog("end", dt.name) + ranGoTest = true + + timeoutSec := 180 * time.Second + for _, pkg := range stdMatches { + if pkg == "cmd/go" { + timeoutSec *= 3 + break + } + } + return (&goTest{ + timeout: timeoutSec, + gcflags: gcflags, + pkgs: stdMatches, + }).run(t) + }) +} + +func (t *tester) registerRaceBenchTest(pkg string) { + const raceBenchHeading = "Running benchmarks briefly." // known to addTest for a safety check + name := testName(pkg, "racebench") + if t.runRx == nil || t.runRx.MatchString(name) == t.runRxWant { + benchMatches = append(benchMatches, pkg) + } + t.addTest(name, raceBenchHeading, func(dt *distTest) error { + if ranGoBench { + return nil + } + t.runPending(dt) + timelog("start", dt.name) + defer timelog("end", dt.name) + ranGoBench = true + return (&goTest{ + variant: "racebench", + omitVariant: true, // The only execution of benchmarks in dist; benchmark names are guaranteed not to overlap with test names. + timeout: 1200 * time.Second, // longer timeout for race with benchmarks + race: true, + bench: true, + cpu: "4", + pkgs: benchMatches, + }).run(t) + }) +} + +func (t *tester) registerTests() { + // registerStdTestSpecially tracks import paths in the standard library + // whose test registration happens in a special way. + // + // These tests *must* be able to run normally as part of "go test std cmd", + // even if they are also registered separately by dist, because users often + // run go test directly. Use skips or build tags in preference to expanding + // this list. + registerStdTestSpecially := map[string]bool{ + // testdir can run normally as part of "go test std cmd", but because + // it's a very large test, we register is specially as several shards to + // enable better load balancing on sharded builders. Ideally the build + // system would know how to shard any large test package. + "cmd/internal/testdir": true, + } + + // Fast path to avoid the ~1 second of `go list std cmd` when + // the caller lists specific tests to run. (as the continuous + // build coordinator does). + if len(t.runNames) > 0 { + for _, name := range t.runNames { + if !strings.Contains(name, ":") { + t.registerStdTest(name) + } else if strings.HasSuffix(name, ":racebench") { + t.registerRaceBenchTest(strings.TrimSuffix(name, ":racebench")) + } + } + } else { + // Use 'go list std cmd' to get a list of all Go packages + // that running 'go test std cmd' could find problems in. + // (In race test mode, also set -tags=race.) + // + // In long test mode, this includes vendored packages and other + // packages without tests so that 'dist test' finds if any of + // them don't build, have a problem reported by high-confidence + // vet checks that come with 'go test', and anything else it + // may check in the future. See go.dev/issue/60463. + cmd := exec.Command(gorootBinGo, "list") + if t.short { + // In short test mode, use a format string to only + // list packages and commands that have tests. + const format = "{{if (or .TestGoFiles .XTestGoFiles)}}{{.ImportPath}}{{end}}" + cmd.Args = append(cmd.Args, "-f", format) + } + if t.race { + cmd.Args = append(cmd.Args, "-tags=race") + } + cmd.Args = append(cmd.Args, "std", "cmd") + cmd.Stderr = new(bytes.Buffer) + all, err := cmd.Output() + if err != nil { + fatalf("Error running go list std cmd: %v:\n%s", err, cmd.Stderr) + } + pkgs := strings.Fields(string(all)) + for _, pkg := range pkgs { + if registerStdTestSpecially[pkg] { + continue + } + t.registerStdTest(pkg) + } + if t.race { + for _, pkg := range pkgs { + if t.packageHasBenchmarks(pkg) { + t.registerRaceBenchTest(pkg) + } + } + } + } + + if t.race { + return + } + + // Test the os/user package in the pure-Go mode too. + if !t.compileOnly { + t.registerTest("os/user with tag osusergo", + &goTest{ + variant: "osusergo", + timeout: 300 * time.Second, + tags: []string{"osusergo"}, + pkg: "os/user", + }) + t.registerTest("hash/maphash purego implementation", + &goTest{ + variant: "purego", + timeout: 300 * time.Second, + tags: []string{"purego"}, + pkg: "hash/maphash", + }) + } + + // Test ios/amd64 for the iOS simulator. + if goos == "darwin" && goarch == "amd64" && t.cgoEnabled { + t.registerTest("GOOS=ios on darwin/amd64", + &goTest{ + variant: "amd64ios", + timeout: 300 * time.Second, + runTests: "SystemRoots", + env: []string{"GOOS=ios", "CGO_ENABLED=1"}, + pkg: "crypto/x509", + }) + } + + // Runtime CPU tests. + if !t.compileOnly && t.hasParallelism() { + t.registerTest("GOMAXPROCS=2 runtime -cpu=1,2,4 -quick", + &goTest{ + variant: "cpu124", + timeout: 300 * time.Second, + cpu: "1,2,4", + short: true, + testFlags: []string{"-quick"}, + // We set GOMAXPROCS=2 in addition to -cpu=1,2,4 in order to test runtime bootstrap code, + // creation of first goroutines and first garbage collections in the parallel setting. + env: []string{"GOMAXPROCS=2"}, + pkg: "runtime", + }) + } + + // GOEXPERIMENT=rangefunc tests + if !t.compileOnly { + t.registerTest("GOEXPERIMENT=rangefunc go test iter", + &goTest{ + variant: "iter", + short: t.short, + env: []string{"GOEXPERIMENT=rangefunc"}, + pkg: "iter", + }) + } + + // GODEBUG=gcstoptheworld=2 tests. We only run these in long-test + // mode (with GO_TEST_SHORT=0) because this is just testing a + // non-critical debug setting. + if !t.compileOnly && !t.short { + t.registerTest("GODEBUG=gcstoptheworld=2 archive/zip", + &goTest{ + variant: "runtime:gcstoptheworld2", + timeout: 300 * time.Second, + short: true, + env: []string{"GODEBUG=gcstoptheworld=2"}, + pkg: "archive/zip", + }) + } + + // morestack tests. We only run these in long-test mode + // (with GO_TEST_SHORT=0) because the runtime test is + // already quite long and mayMoreStackMove makes it about + // twice as slow. + if !t.compileOnly && !t.short { + // hooks is the set of maymorestack hooks to test with. + hooks := []string{"mayMoreStackPreempt", "mayMoreStackMove"} + // hookPkgs is the set of package patterns to apply + // the maymorestack hook to. + hookPkgs := []string{"runtime/...", "reflect", "sync"} + // unhookPkgs is the set of package patterns to + // exclude from hookPkgs. + unhookPkgs := []string{"runtime/testdata/..."} + for _, hook := range hooks { + // Construct the build flags to use the + // maymorestack hook in the compiler and + // assembler. We pass this via the GOFLAGS + // environment variable so that it applies to + // both the test itself and to binaries built + // by the test. + goFlagsList := []string{} + for _, flag := range []string{"-gcflags", "-asmflags"} { + for _, hookPkg := range hookPkgs { + goFlagsList = append(goFlagsList, flag+"="+hookPkg+"=-d=maymorestack=runtime."+hook) + } + for _, unhookPkg := range unhookPkgs { + goFlagsList = append(goFlagsList, flag+"="+unhookPkg+"=") + } + } + goFlags := strings.Join(goFlagsList, " ") + + t.registerTest("maymorestack="+hook, + &goTest{ + variant: hook, + timeout: 600 * time.Second, + short: true, + env: []string{"GOFLAGS=" + goFlags}, + pkgs: []string{"runtime", "reflect", "sync"}, + }) + } + } + + // Test that internal linking of standard packages does not + // require libgcc. This ensures that we can install a Go + // release on a system that does not have a C compiler + // installed and still build Go programs (that don't use cgo). + for _, pkg := range cgoPackages { + if !t.internalLink() { + break + } + + // ARM libgcc may be Thumb, which internal linking does not support. + if goarch == "arm" { + break + } + + // What matters is that the tests build and start up. + // Skip expensive tests, especially x509 TestSystemRoots. + run := "^Test[^CS]" + if pkg == "net" { + run = "TestTCPStress" + } + t.registerTest("Testing without libgcc.", + &goTest{ + variant: "nolibgcc", + ldflags: "-linkmode=internal -libgcc=none", + runTests: run, + pkg: pkg, + }) + } + + // Stub out following test on alpine until 54354 resolved. + builderName := os.Getenv("GO_BUILDER_NAME") + disablePIE := strings.HasSuffix(builderName, "-alpine") + + // Test internal linking of PIE binaries where it is supported. + if t.internalLinkPIE() && !disablePIE { + t.registerTest("internal linking of -buildmode=pie", + &goTest{ + variant: "pie_internal", + timeout: 60 * time.Second, + buildmode: "pie", + ldflags: "-linkmode=internal", + env: []string{"CGO_ENABLED=0"}, + pkg: "reflect", + }) + // Also test a cgo package. + if t.cgoEnabled && t.internalLink() && !disablePIE { + t.registerTest("internal linking of -buildmode=pie", + &goTest{ + variant: "pie_internal", + timeout: 60 * time.Second, + buildmode: "pie", + ldflags: "-linkmode=internal", + pkg: "os/user", + }) + } + } + + // sync tests + if t.hasParallelism() { + t.registerTest("sync -cpu=10", + &goTest{ + variant: "cpu10", + timeout: 120 * time.Second, + cpu: "10", + pkg: "sync", + }) + } + + if t.raceDetectorSupported() { + t.registerRaceTests() + } + + const cgoHeading = "Testing cgo" + if t.cgoEnabled { + t.registerCgoTests(cgoHeading) + } + + if goos == "wasip1" { + t.registerTest("wasip1 host tests", + &goTest{ + variant: "host", + pkg: "runtime/internal/wasitest", + timeout: 1 * time.Minute, + runOnHost: true, + }) + } + + if goos != "android" && !t.iOS() { + // Only start multiple test dir shards on builders, + // where they get distributed to multiple machines. + // See issues 20141 and 31834. + nShards := 1 + if os.Getenv("GO_BUILDER_NAME") != "" { + nShards = 10 + } + if n, err := strconv.Atoi(os.Getenv("GO_TEST_SHARDS")); err == nil { + nShards = n + } + for shard := 0; shard < nShards; shard++ { + id := fmt.Sprintf("%d_%d", shard, nShards) + t.registerTest("../test", + &goTest{ + variant: id, + omitVariant: true, // Shards of the same Go package; tests are guaranteed not to overlap. + pkg: "cmd/internal/testdir", + testFlags: []string{fmt.Sprintf("-shard=%d", shard), fmt.Sprintf("-shards=%d", nShards)}, + runOnHost: true, + }, + ) + } + } + // Only run the API check on fast development platforms. + // Every platform checks the API on every GOOS/GOARCH/CGO_ENABLED combination anyway, + // so we really only need to run this check once anywhere to get adequate coverage. + // To help developers avoid trybot-only failures, we try to run on typical developer machines + // which is darwin,linux,windows/amd64 and darwin/arm64. + if goos == "darwin" || ((goos == "linux" || goos == "windows") && goarch == "amd64") { + t.registerTest("API check", &goTest{variant: "check", pkg: "cmd/api", timeout: 5 * time.Minute, testFlags: []string{"-check"}}) + } +} + +// addTest adds an arbitrary test callback to the test list. +// +// name must uniquely identify the test and heading must be non-empty. +func (t *tester) addTest(name, heading string, fn func(*distTest) error) { + if t.testNames[name] { + panic("duplicate registered test name " + name) + } + if heading == "" { + panic("empty heading") + } + // Two simple checks for cases that would conflict with the fast path in registerTests. + if !strings.Contains(name, ":") && heading != "Testing packages." { + panic("empty variant is reserved exclusively for registerStdTest") + } else if strings.HasSuffix(name, ":racebench") && heading != "Running benchmarks briefly." { + panic("racebench variant is reserved exclusively for registerRaceBenchTest") + } + if t.testNames == nil { + t.testNames = make(map[string]bool) + } + t.testNames[name] = true + t.tests = append(t.tests, distTest{ + name: name, + heading: heading, + fn: fn, + }) +} + +type registerTestOpt interface { + isRegisterTestOpt() +} + +// rtSkipFunc is a registerTest option that runs a skip check function before +// running the test. +type rtSkipFunc struct { + skip func(*distTest) (string, bool) // Return message, true to skip the test +} + +func (rtSkipFunc) isRegisterTestOpt() {} + +// registerTest registers a test that runs the given goTest. +// +// Each Go package in goTest will have a corresponding test +// ":", which must uniquely identify the test. +// +// heading and test.variant must be non-empty. +func (t *tester) registerTest(heading string, test *goTest, opts ...registerTestOpt) { + var skipFunc func(*distTest) (string, bool) + for _, opt := range opts { + switch opt := opt.(type) { + case rtSkipFunc: + skipFunc = opt.skip + } + } + // Register each test package as a separate test. + register1 := func(test *goTest) { + if test.variant == "" { + panic("empty variant") + } + name := testName(test.pkg, test.variant) + t.addTest(name, heading, func(dt *distTest) error { + if skipFunc != nil { + msg, skip := skipFunc(dt) + if skip { + test.printSkip(t, msg) + return nil + } + } + w := &work{dt: dt} + w.cmd, w.flush = test.bgCommand(t, &w.out, &w.out) + t.worklist = append(t.worklist, w) + return nil + }) + } + if test.pkg != "" && len(test.pkgs) == 0 { + // Common case. Avoid copying. + register1(test) + return + } + // TODO(dmitshur,austin): It might be better to unify the execution of 'go test pkg' + // invocations for the same variant to be done with a single 'go test pkg1 pkg2 pkg3' + // command, just like it's already done in registerStdTest and registerRaceBenchTest. + // Those methods accumulate matched packages in stdMatches and benchMatches slices, + // and we can extend that mechanism to work for all other equal variant registrations. + // Do the simple thing to start with. + for _, pkg := range test.packages() { + test1 := *test + test1.pkg, test1.pkgs = pkg, nil + register1(&test1) + } +} + +// dirCmd constructs a Cmd intended to be run in the foreground. +// The command will be run in dir, and Stdout and Stderr will go to os.Stdout +// and os.Stderr. +func (t *tester) dirCmd(dir string, cmdline ...interface{}) *exec.Cmd { + bin, args := flattenCmdline(cmdline) + cmd := exec.Command(bin, args...) + if filepath.IsAbs(dir) { + setDir(cmd, dir) + } else { + setDir(cmd, filepath.Join(goroot, dir)) + } + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if vflag > 1 { + errprintf("%s\n", strings.Join(cmd.Args, " ")) + } + return cmd +} + +// flattenCmdline flattens a mixture of string and []string as single list +// and then interprets it as a command line: first element is binary, then args. +func flattenCmdline(cmdline []interface{}) (bin string, args []string) { + var list []string + for _, x := range cmdline { + switch x := x.(type) { + case string: + list = append(list, x) + case []string: + list = append(list, x...) + default: + panic("invalid dirCmd argument type: " + reflect.TypeOf(x).String()) + } + } + + bin = list[0] + if !filepath.IsAbs(bin) { + panic("command is not absolute: " + bin) + } + return bin, list[1:] +} + +func (t *tester) iOS() bool { + return goos == "ios" +} + +func (t *tester) out(v string) { + if t.json { + return + } + if t.banner == "" { + return + } + fmt.Println("\n" + t.banner + v) +} + +// extLink reports whether the current goos/goarch supports +// external linking. This should match the test in determineLinkMode +// in cmd/link/internal/ld/config.go. +func (t *tester) extLink() bool { + if goarch == "ppc64" && goos != "aix" { + return false + } + return true +} + +func (t *tester) internalLink() bool { + if gohostos == "dragonfly" { + // linkmode=internal fails on dragonfly since errno is a TLS relocation. + return false + } + if goos == "android" { + return false + } + if goos == "ios" { + return false + } + if goos == "windows" && goarch == "arm64" { + return false + } + // Internally linking cgo is incomplete on some architectures. + // https://golang.org/issue/10373 + // https://golang.org/issue/14449 + if goarch == "loong64" || goarch == "mips64" || goarch == "mips64le" || goarch == "mips" || goarch == "mipsle" || goarch == "riscv64" { + return false + } + if goos == "aix" { + // linkmode=internal isn't supported. + return false + } + return true +} + +func (t *tester) internalLinkPIE() bool { + switch goos + "-" + goarch { + case "darwin-amd64", "darwin-arm64", + "linux-amd64", "linux-arm64", "linux-ppc64le", + "android-arm64", + "windows-amd64", "windows-386", "windows-arm": + return true + } + return false +} + +// supportedBuildMode reports whether the given build mode is supported. +func (t *tester) supportedBuildmode(mode string) bool { + switch mode { + case "c-archive", "c-shared", "shared", "plugin", "pie": + default: + fatalf("internal error: unknown buildmode %s", mode) + return false + } + + return buildModeSupported("gc", mode, goos, goarch) +} + +func (t *tester) registerCgoTests(heading string) { + cgoTest := func(variant string, subdir, linkmode, buildmode string, opts ...registerTestOpt) *goTest { + gt := &goTest{ + variant: variant, + pkg: "cmd/cgo/internal/" + subdir, + buildmode: buildmode, + } + var ldflags []string + if linkmode != "auto" { + // "auto" is the default, so avoid cluttering the command line for "auto" + ldflags = append(ldflags, "-linkmode="+linkmode) + } + + if linkmode == "internal" { + gt.tags = append(gt.tags, "internal") + if buildmode == "pie" { + gt.tags = append(gt.tags, "internal_pie") + } + } + if buildmode == "static" { + // This isn't actually a Go buildmode, just a convenient way to tell + // cgoTest we want static linking. + gt.buildmode = "" + if linkmode == "external" { + ldflags = append(ldflags, `-extldflags "-static -pthread"`) + } else if linkmode == "auto" { + gt.env = append(gt.env, "CGO_LDFLAGS=-static -pthread") + } else { + panic("unknown linkmode with static build: " + linkmode) + } + gt.tags = append(gt.tags, "static") + } + gt.ldflags = strings.Join(ldflags, " ") + + t.registerTest(heading, gt, opts...) + return gt + } + + // test, testtls, and testnocgo are run with linkmode="auto", buildmode="" + // as part of go test cmd. Here we only have to register the non-default + // build modes of these tests. + + // Stub out various buildmode=pie tests on alpine until 54354 resolved. + builderName := os.Getenv("GO_BUILDER_NAME") + disablePIE := strings.HasSuffix(builderName, "-alpine") + + if t.internalLink() { + cgoTest("internal", "test", "internal", "") + } + + os := gohostos + p := gohostos + "/" + goarch + switch { + case os == "darwin", os == "windows": + if !t.extLink() { + break + } + // test linkmode=external, but __thread not supported, so skip testtls. + cgoTest("external", "test", "external", "") + + gt := cgoTest("external-s", "test", "external", "") + gt.ldflags += " -s" + + if t.supportedBuildmode("pie") && !disablePIE { + cgoTest("auto-pie", "test", "auto", "pie") + if t.internalLink() && t.internalLinkPIE() { + cgoTest("internal-pie", "test", "internal", "pie") + } + } + + case os == "aix", os == "android", os == "dragonfly", os == "freebsd", os == "linux", os == "netbsd", os == "openbsd": + gt := cgoTest("external-g0", "test", "external", "") + gt.env = append(gt.env, "CGO_CFLAGS=-g0 -fdiagnostics-color") + + cgoTest("external", "testtls", "external", "") + switch { + case os == "aix": + // no static linking + case p == "freebsd/arm": + // -fPIC compiled tls code will use __tls_get_addr instead + // of __aeabi_read_tp, however, on FreeBSD/ARM, __tls_get_addr + // is implemented in rtld-elf, so -fPIC isn't compatible with + // static linking on FreeBSD/ARM with clang. (cgo depends on + // -fPIC fundamentally.) + default: + // Check for static linking support + var staticCheck rtSkipFunc + ccName := compilerEnvLookup("CC", defaultcc, goos, goarch) + cc, err := exec.LookPath(ccName) + if err != nil { + staticCheck.skip = func(*distTest) (string, bool) { + return fmt.Sprintf("$CC (%q) not found, skip cgo static linking test.", ccName), true + } + } else { + cmd := t.dirCmd("src/cmd/cgo/internal/test", cc, "-xc", "-o", "/dev/null", "-static", "-") + cmd.Stdin = strings.NewReader("int main() {}") + cmd.Stdout, cmd.Stderr = nil, nil // Discard output + if err := cmd.Run(); err != nil { + // Skip these tests + staticCheck.skip = func(*distTest) (string, bool) { + return "No support for static linking found (lacks libc.a?), skip cgo static linking test.", true + } + } + } + + // Doing a static link with boringcrypto gets + // a C linker warning on Linux. + // in function `bio_ip_and_port_to_socket_and_addr': + // warning: Using 'getaddrinfo' in statically linked applications requires at runtime the shared libraries from the glibc version used for linking + if staticCheck.skip == nil && goos == "linux" && strings.Contains(goexperiment, "boringcrypto") { + staticCheck.skip = func(*distTest) (string, bool) { + return "skipping static linking check on Linux when using boringcrypto to avoid C linker warning about getaddrinfo", true + } + } + + // Static linking tests + if goos != "android" && p != "netbsd/arm" { + // TODO(#56629): Why does this fail on netbsd-arm? + cgoTest("static", "testtls", "external", "static", staticCheck) + } + cgoTest("external", "testnocgo", "external", "", staticCheck) + if goos != "android" { + cgoTest("static", "testnocgo", "external", "static", staticCheck) + cgoTest("static", "test", "external", "static", staticCheck) + // -static in CGO_LDFLAGS triggers a different code path + // than -static in -extldflags, so test both. + // See issue #16651. + if goarch != "loong64" { + // TODO(#56623): Why does this fail on loong64? + cgoTest("auto-static", "test", "auto", "static", staticCheck) + } + } + + // PIE linking tests + if t.supportedBuildmode("pie") && !disablePIE { + cgoTest("auto-pie", "test", "auto", "pie") + if t.internalLink() && t.internalLinkPIE() { + cgoTest("internal-pie", "test", "internal", "pie") + } + cgoTest("auto-pie", "testtls", "auto", "pie") + cgoTest("auto-pie", "testnocgo", "auto", "pie") + } + } + } +} + +// runPending runs pending test commands, in parallel, emitting headers as appropriate. +// When finished, it emits header for nextTest, which is going to run after the +// pending commands are done (and runPending returns). +// A test should call runPending if it wants to make sure that it is not +// running in parallel with earlier tests, or if it has some other reason +// for needing the earlier tests to be done. +func (t *tester) runPending(nextTest *distTest) { + worklist := t.worklist + t.worklist = nil + for _, w := range worklist { + w.start = make(chan bool) + w.end = make(chan struct{}) + // w.cmd must be set up to write to w.out. We can't check that, but we + // can check for easy mistakes. + if w.cmd.Stdout == nil || w.cmd.Stdout == os.Stdout || w.cmd.Stderr == nil || w.cmd.Stderr == os.Stderr { + panic("work.cmd.Stdout/Stderr must be redirected") + } + go func(w *work) { + if !<-w.start { + timelog("skip", w.dt.name) + w.printSkip(t, "skipped due to earlier error") + } else { + timelog("start", w.dt.name) + w.err = w.cmd.Run() + if w.flush != nil { + w.flush() + } + if w.err != nil { + if isUnsupportedVMASize(w) { + timelog("skip", w.dt.name) + w.out.Reset() + w.printSkip(t, "skipped due to unsupported VMA") + w.err = nil + } + } + } + timelog("end", w.dt.name) + w.end <- struct{}{} + }(w) + } + + started := 0 + ended := 0 + var last *distTest + for ended < len(worklist) { + for started < len(worklist) && started-ended < maxbg { + w := worklist[started] + started++ + w.start <- !t.failed || t.keepGoing + } + w := worklist[ended] + dt := w.dt + if t.lastHeading != dt.heading { + t.lastHeading = dt.heading + t.out(dt.heading) + } + if dt != last { + // Assumes all the entries for a single dt are in one worklist. + last = w.dt + if vflag > 0 { + fmt.Printf("# go tool dist test -run=^%s$\n", dt.name) + } + } + if vflag > 1 { + errprintf("%s\n", strings.Join(w.cmd.Args, " ")) + } + ended++ + <-w.end + os.Stdout.Write(w.out.Bytes()) + // We no longer need the output, so drop the buffer. + w.out = bytes.Buffer{} + if w.err != nil { + log.Printf("Failed: %v", w.err) + t.failed = true + } + } + if t.failed && !t.keepGoing { + fatalf("FAILED") + } + + if dt := nextTest; dt != nil { + if t.lastHeading != dt.heading { + t.lastHeading = dt.heading + t.out(dt.heading) + } + if vflag > 0 { + fmt.Printf("# go tool dist test -run=^%s$\n", dt.name) + } + } +} + +func (t *tester) hasBash() bool { + switch gohostos { + case "windows", "plan9": + return false + } + return true +} + +// hasParallelism is a copy of the function +// internal/testenv.HasParallelism, which can't be used here +// because cmd/dist can not import internal packages during bootstrap. +func (t *tester) hasParallelism() bool { + switch goos { + case "js", "wasip1": + return false + } + return true +} + +func (t *tester) raceDetectorSupported() bool { + if gohostos != goos { + return false + } + if !t.cgoEnabled { + return false + } + if !raceDetectorSupported(goos, goarch) { + return false + } + // The race detector doesn't work on Alpine Linux: + // golang.org/issue/14481 + if isAlpineLinux() { + return false + } + // NetBSD support is unfinished. + // golang.org/issue/26403 + if goos == "netbsd" { + return false + } + return true +} + +func isAlpineLinux() bool { + if runtime.GOOS != "linux" { + return false + } + fi, err := os.Lstat("/etc/alpine-release") + return err == nil && fi.Mode().IsRegular() +} + +func (t *tester) registerRaceTests() { + hdr := "Testing race detector" + t.registerTest(hdr, + &goTest{ + variant: "race", + race: true, + runTests: "Output", + pkg: "runtime/race", + }) + t.registerTest(hdr, + &goTest{ + variant: "race", + race: true, + runTests: "TestParse|TestEcho|TestStdinCloseRace|TestClosedPipeRace|TestTypeRace|TestFdRace|TestFdReadRace|TestFileCloseRace", + pkgs: []string{"flag", "net", "os", "os/exec", "encoding/gob"}, + }) + // We don't want the following line, because it + // slows down all.bash (by 10 seconds on my laptop). + // The race builder should catch any error here, but doesn't. + // TODO(iant): Figure out how to catch this. + // t.registerTest(hdr, &goTest{variant: "race", race: true, runTests: "TestParallelTest", pkg: "cmd/go"}) + if t.cgoEnabled { + // Building cmd/cgo/internal/test takes a long time. + // There are already cgo-enabled packages being tested with the race detector. + // We shouldn't need to redo all of cmd/cgo/internal/test too. + // The race buildler will take care of this. + // t.registerTest(hdr, &goTest{variant: "race", race: true, env: []string{"GOTRACEBACK=2"}, pkg: "cmd/cgo/internal/test"}) + } + if t.extLink() { + // Test with external linking; see issue 9133. + t.registerTest(hdr, + &goTest{ + variant: "race-external", + race: true, + ldflags: "-linkmode=external", + runTests: "TestParse|TestEcho|TestStdinCloseRace", + pkgs: []string{"flag", "os/exec"}, + }) + } +} + +// cgoPackages is the standard packages that use cgo. +var cgoPackages = []string{ + "net", + "os/user", +} + +var funcBenchmark = []byte("\nfunc Benchmark") + +// packageHasBenchmarks reports whether pkg has benchmarks. +// On any error, it conservatively returns true. +// +// This exists just to eliminate work on the builders, since compiling +// a test in race mode just to discover it has no benchmarks costs a +// second or two per package, and this function returns false for +// about 100 packages. +func (t *tester) packageHasBenchmarks(pkg string) bool { + pkgDir := filepath.Join(goroot, "src", pkg) + d, err := os.Open(pkgDir) + if err != nil { + return true // conservatively + } + defer d.Close() + names, err := d.Readdirnames(-1) + if err != nil { + return true // conservatively + } + for _, name := range names { + if !strings.HasSuffix(name, "_test.go") { + continue + } + slurp, err := os.ReadFile(filepath.Join(pkgDir, name)) + if err != nil { + return true // conservatively + } + if bytes.Contains(slurp, funcBenchmark) { + return true + } + } + return false +} + +// makeGOROOTUnwritable makes all $GOROOT files & directories non-writable to +// check that no tests accidentally write to $GOROOT. +func (t *tester) makeGOROOTUnwritable() (undo func()) { + dir := os.Getenv("GOROOT") + if dir == "" { + panic("GOROOT not set") + } + + type pathMode struct { + path string + mode os.FileMode + } + var dirs []pathMode // in lexical order + + undo = func() { + for i := range dirs { + os.Chmod(dirs[i].path, dirs[i].mode) // best effort + } + } + + filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { + if suffix := strings.TrimPrefix(path, dir+string(filepath.Separator)); suffix != "" { + if suffix == ".git" { + // Leave Git metadata in whatever state it was in. It may contain a lot + // of files, and it is highly unlikely that a test will try to modify + // anything within that directory. + return filepath.SkipDir + } + } + if err != nil { + return nil + } + + info, err := d.Info() + if err != nil { + return nil + } + + mode := info.Mode() + if mode&0222 != 0 && (mode.IsDir() || mode.IsRegular()) { + dirs = append(dirs, pathMode{path, mode}) + } + return nil + }) + + // Run over list backward to chmod children before parents. + for i := len(dirs) - 1; i >= 0; i-- { + err := os.Chmod(dirs[i].path, dirs[i].mode&^0222) + if err != nil { + dirs = dirs[i:] // Only undo what we did so far. + undo() + fatalf("failed to make GOROOT read-only: %v", err) + } + } + + return undo +} + +// raceDetectorSupported is a copy of the function +// internal/platform.RaceDetectorSupported, which can't be used here +// because cmd/dist can not import internal packages during bootstrap. +// The race detector only supports 48-bit VMA on arm64. But we don't have +// a good solution to check VMA size (see https://go.dev/issue/29948). +// raceDetectorSupported will always return true for arm64. But race +// detector tests may abort on non 48-bit VMA configuration, the tests +// will be marked as "skipped" in this case. +func raceDetectorSupported(goos, goarch string) bool { + switch goos { + case "linux": + return goarch == "amd64" || goarch == "ppc64le" || goarch == "arm64" || goarch == "s390x" + case "darwin": + return goarch == "amd64" || goarch == "arm64" + case "freebsd", "netbsd", "openbsd", "windows": + return goarch == "amd64" + default: + return false + } +} + +// buildModeSupports is a copy of the function +// internal/platform.BuildModeSupported, which can't be used here +// because cmd/dist can not import internal packages during bootstrap. +func buildModeSupported(compiler, buildmode, goos, goarch string) bool { + if compiler == "gccgo" { + return true + } + + platform := goos + "/" + goarch + + switch buildmode { + case "archive": + return true + + case "c-archive": + switch goos { + case "aix", "darwin", "ios", "windows": + return true + case "linux": + switch goarch { + case "386", "amd64", "arm", "armbe", "arm64", "arm64be", "loong64", "ppc64le", "riscv64", "s390x": + // linux/ppc64 not supported because it does + // not support external linking mode yet. + return true + default: + // Other targets do not support -shared, + // per ParseFlags in + // cmd/compile/internal/base/flag.go. + // For c-archive the Go tool passes -shared, + // so that the result is suitable for inclusion + // in a PIE or shared library. + return false + } + case "freebsd": + return goarch == "amd64" + } + return false + + case "c-shared": + switch platform { + case "linux/amd64", "linux/arm", "linux/arm64", "linux/loong64", "linux/386", "linux/ppc64le", "linux/riscv64", "linux/s390x", + "android/amd64", "android/arm", "android/arm64", "android/386", + "freebsd/amd64", + "darwin/amd64", "darwin/arm64", + "windows/amd64", "windows/386", "windows/arm64": + return true + } + return false + + case "default": + return true + + case "exe": + return true + + case "pie": + switch platform { + case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/loong64", "linux/ppc64le", "linux/riscv64", "linux/s390x", + "android/amd64", "android/arm", "android/arm64", "android/386", + "freebsd/amd64", + "darwin/amd64", "darwin/arm64", + "ios/amd64", "ios/arm64", + "aix/ppc64", + "windows/386", "windows/amd64", "windows/arm", "windows/arm64": + return true + } + return false + + case "shared": + switch platform { + case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x": + return true + } + return false + + case "plugin": + switch platform { + case "linux/amd64", "linux/arm", "linux/arm64", "linux/386", "linux/loong64", "linux/s390x", "linux/ppc64le", + "android/amd64", "android/386", + "darwin/amd64", "darwin/arm64", + "freebsd/amd64": + return true + } + return false + + default: + return false + } +} + +// isUnsupportedVMASize reports whether the failure is caused by an unsupported +// VMA for the race detector (for example, running the race detector on an +// arm64 machine configured with 39-bit VMA). +func isUnsupportedVMASize(w *work) bool { + unsupportedVMA := []byte("unsupported VMA range") + return strings.Contains(w.dt.name, ":race") && bytes.Contains(w.out.Bytes(), unsupportedVMA) +} + +// isEnvSet reports whether the environment variable evar is +// set in the environment. +func isEnvSet(evar string) bool { + evarEq := evar + "=" + for _, e := range os.Environ() { + if strings.HasPrefix(e, evarEq) { + return true + } + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/dist/testjson.go b/platform/dbops/binaries/go/go/src/cmd/dist/testjson.go new file mode 100644 index 0000000000000000000000000000000000000000..62045932a9f92b99d065e0629eb06d8e2e976744 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/dist/testjson.go @@ -0,0 +1,204 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "sync" + "time" +) + +// lockedWriter serializes Write calls to an underlying Writer. +type lockedWriter struct { + lock sync.Mutex + w io.Writer +} + +func (w *lockedWriter) Write(b []byte) (int, error) { + w.lock.Lock() + defer w.lock.Unlock() + return w.w.Write(b) +} + +// testJSONFilter is an io.Writer filter that replaces the Package field in +// test2json output. +type testJSONFilter struct { + w io.Writer // Underlying writer + variant string // Add ":variant" to Package field + + lineBuf bytes.Buffer // Buffer for incomplete lines +} + +func (f *testJSONFilter) Write(b []byte) (int, error) { + bn := len(b) + + // Process complete lines, and buffer any incomplete lines. + for len(b) > 0 { + nl := bytes.IndexByte(b, '\n') + if nl < 0 { + f.lineBuf.Write(b) + break + } + var line []byte + if f.lineBuf.Len() > 0 { + // We have buffered data. Add the rest of the line from b and + // process the complete line. + f.lineBuf.Write(b[:nl+1]) + line = f.lineBuf.Bytes() + } else { + // Process a complete line from b. + line = b[:nl+1] + } + b = b[nl+1:] + f.process(line) + f.lineBuf.Reset() + } + + return bn, nil +} + +func (f *testJSONFilter) Flush() { + // Write any remaining partial line to the underlying writer. + if f.lineBuf.Len() > 0 { + f.w.Write(f.lineBuf.Bytes()) + f.lineBuf.Reset() + } +} + +func (f *testJSONFilter) process(line []byte) { + if len(line) > 0 && line[0] == '{' { + // Plausible test2json output. Parse it generically. + // + // We go to some effort here to preserve key order while doing this + // generically. This will stay robust to changes in the test2json + // struct, or other additions outside of it. If humans are ever looking + // at the output, it's really nice to keep field order because it + // preserves a lot of regularity in the output. + dec := json.NewDecoder(bytes.NewBuffer(line)) + dec.UseNumber() + val, err := decodeJSONValue(dec) + if err == nil && val.atom == json.Delim('{') { + // Rewrite the Package field. + found := false + for i := 0; i < len(val.seq); i += 2 { + if val.seq[i].atom == "Package" { + if pkg, ok := val.seq[i+1].atom.(string); ok { + val.seq[i+1].atom = pkg + ":" + f.variant + found = true + break + } + } + } + if found { + data, err := json.Marshal(val) + if err != nil { + // Should never happen. + panic(fmt.Sprintf("failed to round-trip JSON %q: %s", string(line), err)) + } + f.w.Write(data) + // Copy any trailing text. We expect at most a "\n" here, but + // there could be other text and we want to feed that through. + io.Copy(f.w, dec.Buffered()) + return + } + } + } + + // Something went wrong. Just pass the line through. + f.w.Write(line) +} + +type jsonValue struct { + atom json.Token // If json.Delim, then seq will be filled + seq []jsonValue // If atom == json.Delim('{'), alternating pairs +} + +var jsonPop = errors.New("end of JSON sequence") + +func decodeJSONValue(dec *json.Decoder) (jsonValue, error) { + t, err := dec.Token() + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return jsonValue{}, err + } + + switch t := t.(type) { + case json.Delim: + if t == '}' || t == ']' { + return jsonValue{}, jsonPop + } + + var seq []jsonValue + for { + val, err := decodeJSONValue(dec) + if err == jsonPop { + break + } else if err != nil { + return jsonValue{}, err + } + seq = append(seq, val) + } + return jsonValue{t, seq}, nil + default: + return jsonValue{t, nil}, nil + } +} + +func (v jsonValue) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + var marshal1 func(v jsonValue) error + marshal1 = func(v jsonValue) error { + if t, ok := v.atom.(json.Delim); ok { + buf.WriteRune(rune(t)) + for i, v2 := range v.seq { + if t == '{' && i%2 == 1 { + buf.WriteByte(':') + } else if i > 0 { + buf.WriteByte(',') + } + if err := marshal1(v2); err != nil { + return err + } + } + if t == '{' { + buf.WriteByte('}') + } else { + buf.WriteByte(']') + } + return nil + } + bytes, err := json.Marshal(v.atom) + if err != nil { + return err + } + buf.Write(bytes) + return nil + } + err := marshal1(v) + return buf.Bytes(), err +} + +func synthesizeSkipEvent(enc *json.Encoder, pkg, msg string) { + type event struct { + Time time.Time + Action string + Package string + Output string `json:",omitempty"` + } + ev := event{Time: time.Now(), Package: pkg, Action: "start"} + enc.Encode(ev) + ev.Action = "output" + ev.Output = msg + enc.Encode(ev) + ev.Action = "skip" + ev.Output = "" + enc.Encode(ev) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/dist/testjson_test.go b/platform/dbops/binaries/go/go/src/cmd/dist/testjson_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0a52aec273eada2d662a27eaf785b49e4252b66b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/dist/testjson_test.go @@ -0,0 +1,85 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "strings" + "testing" +) + +func TestJSONFilterRewritePackage(t *testing.T) { + const in = `{"Package":"abc"} +{"Field1":"1","Package":"abc","Field3":"3"} +{"Package":123} +{} +{"Package":"abc","Unexpected":[null,true,false,99999999999999999999]} +` + want := strings.ReplaceAll(in, `"Package":"abc"`, `"Package":"abc:variant"`) + + checkJSONFilter(t, in, want) +} + +func TestJSONFilterMalformed(t *testing.T) { + const in = `unexpected text +{"Package":"abc"} +more text +{"Package":"abc"}trailing text +{not json} +no newline` + const want = `unexpected text +{"Package":"abc:variant"} +more text +{"Package":"abc:variant"}trailing text +{not json} +no newline` + checkJSONFilter(t, in, want) +} + +func TestJSONFilterBoundaries(t *testing.T) { + const in = `{"Package":"abc"} +{"Package":"def"} +{"Package":"ghi"} +` + want := strings.ReplaceAll(in, `"}`, `:variant"}`) + + // Write one bytes at a time. + t.Run("bytes", func(t *testing.T) { + checkJSONFilterWith(t, want, func(f *testJSONFilter) { + for i := 0; i < len(in); i++ { + f.Write([]byte{in[i]}) + } + }) + }) + // Write a block containing a whole line bordered by two partial lines. + t.Run("bytes", func(t *testing.T) { + checkJSONFilterWith(t, want, func(f *testJSONFilter) { + const b1 = 5 + const b2 = len(in) - 5 + f.Write([]byte(in[:b1])) + f.Write([]byte(in[b1:b2])) + f.Write([]byte(in[b2:])) + }) + }) +} + +func checkJSONFilter(t *testing.T, in, want string) { + t.Helper() + checkJSONFilterWith(t, want, func(f *testJSONFilter) { + f.Write([]byte(in)) + }) +} + +func checkJSONFilterWith(t *testing.T, want string, write func(*testJSONFilter)) { + t.Helper() + + out := new(strings.Builder) + f := &testJSONFilter{w: out, variant: "variant"} + write(f) + f.Flush() + got := out.String() + if want != got { + t.Errorf("want:\n%s\ngot:\n%s", want, got) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/dist/util.go b/platform/dbops/binaries/go/go/src/cmd/dist/util.go new file mode 100644 index 0000000000000000000000000000000000000000..2eeab18a93f1b6ffc92acd30c04a4c6465d45f48 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/dist/util.go @@ -0,0 +1,475 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "flag" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "time" +) + +// pathf is fmt.Sprintf for generating paths +// (on windows it turns / into \ after the printf). +func pathf(format string, args ...interface{}) string { + return filepath.Clean(fmt.Sprintf(format, args...)) +} + +// filter returns a slice containing the elements x from list for which f(x) == true. +func filter(list []string, f func(string) bool) []string { + var out []string + for _, x := range list { + if f(x) { + out = append(out, x) + } + } + return out +} + +// uniq returns a sorted slice containing the unique elements of list. +func uniq(list []string) []string { + out := make([]string, len(list)) + copy(out, list) + sort.Strings(out) + keep := out[:0] + for _, x := range out { + if len(keep) == 0 || keep[len(keep)-1] != x { + keep = append(keep, x) + } + } + return keep +} + +const ( + CheckExit = 1 << iota + ShowOutput + Background +) + +var outputLock sync.Mutex + +// run is like runEnv with no additional environment. +func run(dir string, mode int, cmd ...string) string { + return runEnv(dir, mode, nil, cmd...) +} + +// runEnv runs the command line cmd in dir with additional environment env. +// If mode has ShowOutput set and Background unset, run passes cmd's output to +// stdout/stderr directly. Otherwise, run returns cmd's output as a string. +// If mode has CheckExit set and the command fails, run calls fatalf. +// If mode has Background set, this command is being run as a +// Background job. Only bgrun should use the Background mode, +// not other callers. +func runEnv(dir string, mode int, env []string, cmd ...string) string { + if vflag > 1 { + errprintf("run: %s\n", strings.Join(cmd, " ")) + } + + xcmd := exec.Command(cmd[0], cmd[1:]...) + if env != nil { + xcmd.Env = append(os.Environ(), env...) + } + setDir(xcmd, dir) + var data []byte + var err error + + // If we want to show command output and this is not + // a background command, assume it's the only thing + // running, so we can just let it write directly stdout/stderr + // as it runs without fear of mixing the output with some + // other command's output. Not buffering lets the output + // appear as it is printed instead of once the command exits. + // This is most important for the invocation of 'go build -v bootstrap/...'. + if mode&(Background|ShowOutput) == ShowOutput { + xcmd.Stdout = os.Stdout + xcmd.Stderr = os.Stderr + err = xcmd.Run() + } else { + data, err = xcmd.CombinedOutput() + } + if err != nil && mode&CheckExit != 0 { + outputLock.Lock() + if len(data) > 0 { + xprintf("%s\n", data) + } + outputLock.Unlock() + if mode&Background != 0 { + // Prevent fatalf from waiting on our own goroutine's + // bghelper to exit: + bghelpers.Done() + } + fatalf("FAILED: %v: %v", strings.Join(cmd, " "), err) + } + if mode&ShowOutput != 0 { + outputLock.Lock() + os.Stdout.Write(data) + outputLock.Unlock() + } + if vflag > 2 { + errprintf("run: %s DONE\n", strings.Join(cmd, " ")) + } + return string(data) +} + +var maxbg = 4 /* maximum number of jobs to run at once */ + +var ( + bgwork = make(chan func(), 1e5) + + bghelpers sync.WaitGroup + + dieOnce sync.Once // guards close of dying + dying = make(chan struct{}) +) + +func bginit() { + bghelpers.Add(maxbg) + for i := 0; i < maxbg; i++ { + go bghelper() + } +} + +func bghelper() { + defer bghelpers.Done() + for { + select { + case <-dying: + return + case w := <-bgwork: + // Dying takes precedence over doing more work. + select { + case <-dying: + return + default: + w() + } + } + } +} + +// bgrun is like run but runs the command in the background. +// CheckExit|ShowOutput mode is implied (since output cannot be returned). +// bgrun adds 1 to wg immediately, and calls Done when the work completes. +func bgrun(wg *sync.WaitGroup, dir string, cmd ...string) { + wg.Add(1) + bgwork <- func() { + defer wg.Done() + run(dir, CheckExit|ShowOutput|Background, cmd...) + } +} + +// bgwait waits for pending bgruns to finish. +// bgwait must be called from only a single goroutine at a time. +func bgwait(wg *sync.WaitGroup) { + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + case <-dying: + // Don't return to the caller, to avoid reporting additional errors + // to the user. + select {} + } +} + +// xgetwd returns the current directory. +func xgetwd() string { + wd, err := os.Getwd() + if err != nil { + fatalf("%s", err) + } + return wd +} + +// xrealwd returns the 'real' name for the given path. +// real is defined as what xgetwd returns in that directory. +func xrealwd(path string) string { + old := xgetwd() + if err := os.Chdir(path); err != nil { + fatalf("chdir %s: %v", path, err) + } + real := xgetwd() + if err := os.Chdir(old); err != nil { + fatalf("chdir %s: %v", old, err) + } + return real +} + +// isdir reports whether p names an existing directory. +func isdir(p string) bool { + fi, err := os.Stat(p) + return err == nil && fi.IsDir() +} + +// isfile reports whether p names an existing file. +func isfile(p string) bool { + fi, err := os.Stat(p) + return err == nil && fi.Mode().IsRegular() +} + +// mtime returns the modification time of the file p. +func mtime(p string) time.Time { + fi, err := os.Stat(p) + if err != nil { + return time.Time{} + } + return fi.ModTime() +} + +// readfile returns the content of the named file. +func readfile(file string) string { + data, err := os.ReadFile(file) + if err != nil { + fatalf("%v", err) + } + return string(data) +} + +const ( + writeExec = 1 << iota + writeSkipSame +) + +// writefile writes text to the named file, creating it if needed. +// if exec is non-zero, marks the file as executable. +// If the file already exists and has the expected content, +// it is not rewritten, to avoid changing the time stamp. +func writefile(text, file string, flag int) { + new := []byte(text) + if flag&writeSkipSame != 0 { + old, err := os.ReadFile(file) + if err == nil && bytes.Equal(old, new) { + return + } + } + mode := os.FileMode(0666) + if flag&writeExec != 0 { + mode = 0777 + } + xremove(file) // in case of symlink tricks by misc/reboot test + err := os.WriteFile(file, new, mode) + if err != nil { + fatalf("%v", err) + } +} + +// xmkdir creates the directory p. +func xmkdir(p string) { + err := os.Mkdir(p, 0777) + if err != nil { + fatalf("%v", err) + } +} + +// xmkdirall creates the directory p and its parents, as needed. +func xmkdirall(p string) { + err := os.MkdirAll(p, 0777) + if err != nil { + fatalf("%v", err) + } +} + +// xremove removes the file p. +func xremove(p string) { + if vflag > 2 { + errprintf("rm %s\n", p) + } + os.Remove(p) +} + +// xremoveall removes the file or directory tree rooted at p. +func xremoveall(p string) { + if vflag > 2 { + errprintf("rm -r %s\n", p) + } + os.RemoveAll(p) +} + +// xreaddir replaces dst with a list of the names of the files and subdirectories in dir. +// The names are relative to dir; they are not full paths. +func xreaddir(dir string) []string { + f, err := os.Open(dir) + if err != nil { + fatalf("%v", err) + } + defer f.Close() + names, err := f.Readdirnames(-1) + if err != nil { + fatalf("reading %s: %v", dir, err) + } + return names +} + +// xworkdir creates a new temporary directory to hold object files +// and returns the name of that directory. +func xworkdir() string { + name, err := os.MkdirTemp(os.Getenv("GOTMPDIR"), "go-tool-dist-") + if err != nil { + fatalf("%v", err) + } + return name +} + +// fatalf prints an error message to standard error and exits. +func fatalf(format string, args ...interface{}) { + fmt.Fprintf(os.Stderr, "go tool dist: %s\n", fmt.Sprintf(format, args...)) + + dieOnce.Do(func() { close(dying) }) + + // Wait for background goroutines to finish, + // so that exit handler that removes the work directory + // is not fighting with active writes or open files. + bghelpers.Wait() + + xexit(2) +} + +var atexits []func() + +// xexit exits the process with return code n. +func xexit(n int) { + for i := len(atexits) - 1; i >= 0; i-- { + atexits[i]() + } + os.Exit(n) +} + +// xatexit schedules the exit-handler f to be run when the program exits. +func xatexit(f func()) { + atexits = append(atexits, f) +} + +// xprintf prints a message to standard output. +func xprintf(format string, args ...interface{}) { + fmt.Printf(format, args...) +} + +// errprintf prints a message to standard output. +func errprintf(format string, args ...interface{}) { + fmt.Fprintf(os.Stderr, format, args...) +} + +// xsamefile reports whether f1 and f2 are the same file (or dir). +func xsamefile(f1, f2 string) bool { + fi1, err1 := os.Stat(f1) + fi2, err2 := os.Stat(f2) + if err1 != nil || err2 != nil { + return f1 == f2 + } + return os.SameFile(fi1, fi2) +} + +func xgetgoarm() string { + // If we're building on an actual arm system, and not building + // a cross-compiling toolchain, try to exec ourselves + // to detect whether VFP is supported and set the default GOARM. + // Windows requires ARMv7, so we can skip the check. + // We've always assumed Android is ARMv7 too. + if gohostarch == "arm" && goarch == "arm" && goos == gohostos && goos != "windows" && goos != "android" { + // Try to exec ourselves in a mode to detect VFP support. + // Seeing how far it gets determines which instructions failed. + // The test is OS-agnostic. + out := run("", 0, os.Args[0], "-check-goarm") + v1ok := strings.Contains(out, "VFPv1 OK.") + v3ok := strings.Contains(out, "VFPv3 OK.") + if v1ok && v3ok { + return "7" + } + if v1ok { + return "6" + } + return "5" + } + + // Otherwise, in the absence of local information, assume GOARM=7. + // + // We used to assume GOARM=5 in certain contexts but not others, + // which produced inconsistent results. For example if you cross-compiled + // for linux/arm from a windows/amd64 machine, you got GOARM=7 binaries, + // but if you cross-compiled for linux/arm from a linux/amd64 machine, + // you got GOARM=5 binaries. Now the default is independent of the + // host operating system, for better reproducibility of builds. + return "7" +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +// elfIsLittleEndian detects if the ELF file is little endian. +func elfIsLittleEndian(fn string) bool { + // read the ELF file header to determine the endianness without using the + // debug/elf package. + file, err := os.Open(fn) + if err != nil { + fatalf("failed to open file to determine endianness: %v", err) + } + defer file.Close() + var hdr [16]byte + if _, err := io.ReadFull(file, hdr[:]); err != nil { + fatalf("failed to read ELF header to determine endianness: %v", err) + } + // hdr[5] is EI_DATA byte, 1 is ELFDATA2LSB and 2 is ELFDATA2MSB + switch hdr[5] { + default: + fatalf("unknown ELF endianness of %s: EI_DATA = %d", fn, hdr[5]) + case 1: + return true + case 2: + return false + } + panic("unreachable") +} + +// count is a flag.Value that is like a flag.Bool and a flag.Int. +// If used as -name, it increments the count, but -name=x sets the count. +// Used for verbose flag -v. +type count int + +func (c *count) String() string { + return fmt.Sprint(int(*c)) +} + +func (c *count) Set(s string) error { + switch s { + case "true": + *c++ + case "false": + *c = 0 + default: + n, err := strconv.Atoi(s) + if err != nil { + return fmt.Errorf("invalid count %q", s) + } + *c = count(n) + } + return nil +} + +func (c *count) IsBoolFlag() bool { + return true +} + +func xflagparse(maxargs int) { + flag.Var((*count)(&vflag), "v", "verbosity") + flag.Parse() + if maxargs >= 0 && flag.NArg() > maxargs { + flag.Usage() + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/dist/util_gc.go b/platform/dbops/binaries/go/go/src/cmd/dist/util_gc.go new file mode 100644 index 0000000000000000000000000000000000000000..6efdf23e6049ebd290d83c464af15482f63c5717 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/dist/util_gc.go @@ -0,0 +1,20 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +package main + +// useVFPv1 tries to execute one VFPv1 instruction on ARM. +// It will crash the current process if VFPv1 is missing. +func useVFPv1() + +// useVFPv3 tries to execute one VFPv3 instruction on ARM. +// It will crash the current process if VFPv3 is missing. +func useVFPv3() + +// useARMv6K tries to run ARMv6K instructions on ARM. +// It will crash the current process if it doesn't implement +// ARMv6K or above. +func useARMv6K() diff --git a/platform/dbops/binaries/go/go/src/cmd/dist/util_gccgo.go b/platform/dbops/binaries/go/go/src/cmd/dist/util_gccgo.go new file mode 100644 index 0000000000000000000000000000000000000000..2f7af7ed6628b3fce1e8511669ea1df476a4e1da --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/dist/util_gccgo.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gccgo + +package main + +func useVFPv1() {} + +func useVFPv3() {} + +func useARMv6K() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/dist/vfp_arm.s b/platform/dbops/binaries/go/go/src/cmd/dist/vfp_arm.s new file mode 100644 index 0000000000000000000000000000000000000000..37fb4061af9122f4da626553cdfdaa52c7260862 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/dist/vfp_arm.s @@ -0,0 +1,26 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +#include "textflag.h" + +// try to run "vmov.f64 d0, d0" instruction +TEXT ·useVFPv1(SB),NOSPLIT,$0 + WORD $0xeeb00b40 // vmov.f64 d0, d0 + RET + +// try to run VFPv3-only "vmov.f64 d0, #112" instruction +TEXT ·useVFPv3(SB),NOSPLIT,$0 + WORD $0xeeb70b00 // vmov.f64 d0, #112 + RET + +// try to run ARMv6K (or above) "ldrexd" instruction +TEXT ·useARMv6K(SB),NOSPLIT,$32 + MOVW R13, R2 + BIC $15, R13 + WORD $0xe1bd0f9f // ldrexd r0, r1, [sp] + WORD $0xf57ff01f // clrex + MOVW R2, R13 + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/dist/vfp_default.s b/platform/dbops/binaries/go/go/src/cmd/dist/vfp_default.s new file mode 100644 index 0000000000000000000000000000000000000000..a766edac286e70cb70fc4f07094f61f94602042c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/dist/vfp_default.s @@ -0,0 +1,16 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !arm + +#include "textflag.h" + +TEXT ·useVFPv1(SB),NOSPLIT,$0 + RET + +TEXT ·useVFPv3(SB),NOSPLIT,$0 + RET + +TEXT ·useARMv6K(SB),NOSPLIT,$0 + RET diff --git a/platform/dbops/binaries/go/go/src/cmd/distpack/archive.go b/platform/dbops/binaries/go/go/src/cmd/distpack/archive.go new file mode 100644 index 0000000000000000000000000000000000000000..e52dae13a7e45dfa51310a9f7fd5bc1b5d262414 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/distpack/archive.go @@ -0,0 +1,227 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "io/fs" + "log" + "os" + "path" + "path/filepath" + "sort" + "strings" + "time" +) + +// An Archive describes an archive to write: a collection of files. +// Directories are implied by the files and not explicitly listed. +type Archive struct { + Files []File +} + +// A File describes a single file to write to an archive. +type File struct { + Name string // name in archive + Time time.Time // modification time + Mode fs.FileMode + Size int64 + Src string // source file in OS file system +} + +// Info returns a FileInfo about the file, for use with tar.FileInfoHeader +// and zip.FileInfoHeader. +func (f *File) Info() fs.FileInfo { + return fileInfo{f} +} + +// A fileInfo is an implementation of fs.FileInfo describing a File. +type fileInfo struct { + f *File +} + +func (i fileInfo) Name() string { return path.Base(i.f.Name) } +func (i fileInfo) ModTime() time.Time { return i.f.Time } +func (i fileInfo) Mode() fs.FileMode { return i.f.Mode } +func (i fileInfo) IsDir() bool { return i.f.Mode&fs.ModeDir != 0 } +func (i fileInfo) Size() int64 { return i.f.Size } +func (i fileInfo) Sys() any { return nil } + +func (i fileInfo) String() string { + return fs.FormatFileInfo(i) +} + +// NewArchive returns a new Archive containing all the files in the directory dir. +// The archive can be amended afterward using methods like Add and Filter. +func NewArchive(dir string) (*Archive, error) { + a := new(Archive) + err := fs.WalkDir(os.DirFS(dir), ".", func(name string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if d.IsDir() { + return nil + } + info, err := d.Info() + if err != nil { + return err + } + a.Add(name, filepath.Join(dir, name), info) + return nil + }) + if err != nil { + return nil, err + } + a.Sort() + return a, nil +} + +// Add adds a file with the given name and info to the archive. +// The content of the file comes from the operating system file src. +// After a sequence of one or more calls to Add, +// the caller should invoke Sort to re-sort the archive's files. +func (a *Archive) Add(name, src string, info fs.FileInfo) { + a.Files = append(a.Files, File{ + Name: name, + Time: info.ModTime(), + Mode: info.Mode(), + Size: info.Size(), + Src: src, + }) +} + +func nameLess(x, y string) bool { + for i := 0; i < len(x) && i < len(y); i++ { + if x[i] != y[i] { + // foo/bar/baz before foo/bar.go, because foo/bar is before foo/bar.go + if x[i] == '/' { + return true + } + if y[i] == '/' { + return false + } + return x[i] < y[i] + } + } + return len(x) < len(y) +} + +// Sort sorts the files in the archive. +// It is only necessary to call Sort after calling Add or RenameGoMod. +// NewArchive returns a sorted archive, and the other methods +// preserve the sorting of the archive. +func (a *Archive) Sort() { + sort.Slice(a.Files, func(i, j int) bool { + return nameLess(a.Files[i].Name, a.Files[j].Name) + }) +} + +// Clone returns a copy of the Archive. +// Method calls like Add and Filter invoked on the copy do not affect the original, +// nor do calls on the original affect the copy. +func (a *Archive) Clone() *Archive { + b := &Archive{ + Files: make([]File, len(a.Files)), + } + copy(b.Files, a.Files) + return b +} + +// AddPrefix adds a prefix to all file names in the archive. +func (a *Archive) AddPrefix(prefix string) { + for i := range a.Files { + a.Files[i].Name = path.Join(prefix, a.Files[i].Name) + } +} + +// Filter removes files from the archive for which keep(name) returns false. +func (a *Archive) Filter(keep func(name string) bool) { + files := a.Files[:0] + for _, f := range a.Files { + if keep(f.Name) { + files = append(files, f) + } + } + a.Files = files +} + +// SetMode changes the mode of every file in the archive +// to be mode(name, m), where m is the file's current mode. +func (a *Archive) SetMode(mode func(name string, m fs.FileMode) fs.FileMode) { + for i := range a.Files { + a.Files[i].Mode = mode(a.Files[i].Name, a.Files[i].Mode) + } +} + +// Remove removes files matching any of the patterns from the archive. +// The patterns use the syntax of path.Match, with an extension of allowing +// a leading **/ or trailing /**, which match any number of path elements +// (including no path elements) before or after the main match. +func (a *Archive) Remove(patterns ...string) { + a.Filter(func(name string) bool { + for _, pattern := range patterns { + match, err := amatch(pattern, name) + if err != nil { + log.Fatalf("archive remove: %v", err) + } + if match { + return false + } + } + return true + }) +} + +// SetTime sets the modification time of all files in the archive to t. +func (a *Archive) SetTime(t time.Time) { + for i := range a.Files { + a.Files[i].Time = t + } +} + +// RenameGoMod renames the go.mod files in the archive to _go.mod, +// for use with the module form, which cannot contain other go.mod files. +func (a *Archive) RenameGoMod() { + for i, f := range a.Files { + if strings.HasSuffix(f.Name, "/go.mod") { + a.Files[i].Name = strings.TrimSuffix(f.Name, "go.mod") + "_go.mod" + } + } +} + +func amatch(pattern, name string) (bool, error) { + // firstN returns the prefix of name corresponding to the first n path elements. + // If n <= 0, firstN returns the entire name. + firstN := func(name string, n int) string { + for i := 0; i < len(name); i++ { + if name[i] == '/' { + if n--; n == 0 { + return name[:i] + } + } + } + return name + } + + // lastN returns the suffix of name corresponding to the last n path elements. + // If n <= 0, lastN returns the entire name. + lastN := func(name string, n int) string { + for i := len(name) - 1; i >= 0; i-- { + if name[i] == '/' { + if n--; n == 0 { + return name[i+1:] + } + } + } + return name + } + + if p, ok := strings.CutPrefix(pattern, "**/"); ok { + return path.Match(p, lastN(name, 1+strings.Count(p, "/"))) + } + if p, ok := strings.CutSuffix(pattern, "/**"); ok { + return path.Match(p, firstN(name, 1+strings.Count(p, "/"))) + } + return path.Match(pattern, name) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/distpack/archive_test.go b/platform/dbops/binaries/go/go/src/cmd/distpack/archive_test.go new file mode 100644 index 0000000000000000000000000000000000000000..620b970aeb262ed1c32658e0346a938050f3e07a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/distpack/archive_test.go @@ -0,0 +1,39 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "testing" + +var amatchTests = []struct { + pattern string + name string + ok bool +}{ + {"a", "a", true}, + {"a", "b", false}, + {"a/**", "a", true}, + {"a/**", "b", false}, + {"a/**", "a/b", true}, + {"a/**", "b/b", false}, + {"a/**", "a/b/c/d/e/f", true}, + {"a/**", "z/a/b/c/d/e/f", false}, + {"**/a", "a", true}, + {"**/a", "b", false}, + {"**/a", "x/a", true}, + {"**/a", "x/a/b", false}, + {"**/a", "x/y/z/a", true}, + {"**/a", "x/y/z/a/b", false}, + + {"go/pkg/tool/*/compile", "go/pkg/tool/darwin_amd64/compile", true}, +} + +func TestAmatch(t *testing.T) { + for _, tt := range amatchTests { + ok, err := amatch(tt.pattern, tt.name) + if ok != tt.ok || err != nil { + t.Errorf("amatch(%q, %q) = %v, %v, want %v, nil", tt.pattern, tt.name, ok, err, tt.ok) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/distpack/pack.go b/platform/dbops/binaries/go/go/src/cmd/distpack/pack.go new file mode 100644 index 0000000000000000000000000000000000000000..cf507edb4de229335489002c72939d4f80dd8bbe --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/distpack/pack.go @@ -0,0 +1,434 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Distpack creates the tgz and zip files for a Go distribution. +// It writes into GOROOT/pkg/distpack: +// +// - a binary distribution (tgz or zip) for the current GOOS and GOARCH +// - a source distribution that is independent of GOOS/GOARCH +// - the module mod, info, and zip files for a distribution in module form +// (as used by GOTOOLCHAIN support in the go command). +// +// Distpack is typically invoked by the -distpack flag to make.bash. +// A cross-compiled distribution for goos/goarch can be built using: +// +// GOOS=goos GOARCH=goarch ./make.bash -distpack +// +// To test that the module downloads are usable with the go command: +// +// ./make.bash -distpack +// mkdir -p /tmp/goproxy/golang.org/toolchain/ +// ln -sf $(pwd)/../pkg/distpack /tmp/goproxy/golang.org/toolchain/@v +// GOPROXY=file:///tmp/goproxy GOTOOLCHAIN=$(sed 1q ../VERSION) gotip version +// +// gotip can be replaced with an older released Go version once there is one. +// It just can't be the one make.bash built, because it knows it is already that +// version and will skip the download. +package main + +import ( + "archive/tar" + "archive/zip" + "compress/flate" + "compress/gzip" + "crypto/sha256" + "flag" + "fmt" + "io" + "io/fs" + "log" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "time" +) + +func usage() { + fmt.Fprintf(os.Stderr, "usage: distpack\n") + os.Exit(2) +} + +const ( + modPath = "golang.org/toolchain" + modVersionPrefix = "v0.0.1" +) + +var ( + goroot string + gohostos string + gohostarch string + goos string + goarch string +) + +func main() { + log.SetPrefix("distpack: ") + log.SetFlags(0) + flag.Usage = usage + flag.Parse() + if flag.NArg() != 0 { + usage() + } + + // Load context. + goroot = runtime.GOROOT() + if goroot == "" { + log.Fatalf("missing $GOROOT") + } + gohostos = runtime.GOOS + gohostarch = runtime.GOARCH + goos = os.Getenv("GOOS") + if goos == "" { + goos = gohostos + } + goarch = os.Getenv("GOARCH") + if goarch == "" { + goarch = gohostarch + } + goosUnderGoarch := goos + "_" + goarch + goosDashGoarch := goos + "-" + goarch + exe := "" + if goos == "windows" { + exe = ".exe" + } + version, versionTime := readVERSION(goroot) + + // Start with files from GOROOT, filtering out non-distribution files. + base, err := NewArchive(goroot) + if err != nil { + log.Fatal(err) + } + base.SetTime(versionTime) + base.SetMode(mode) + base.Remove( + ".git/**", + ".gitattributes", + ".github/**", + ".gitignore", + "VERSION.cache", + "misc/cgo/*/_obj/**", + "**/.DS_Store", + "**/*.exe~", // go.dev/issue/23894 + // Generated during make.bat/make.bash. + "src/cmd/dist/dist", + "src/cmd/dist/dist.exe", + ) + + // The source distribution removes files generated during the release build. + // See ../dist/build.go's deptab. + srcArch := base.Clone() + srcArch.Remove( + "bin/**", + "pkg/**", + + // Generated during cmd/dist. See ../dist/build.go:/gentab. + "src/cmd/go/internal/cfg/zdefaultcc.go", + "src/go/build/zcgo.go", + "src/runtime/internal/sys/zversion.go", + "src/time/tzdata/zzipdata.go", + + // Generated during cmd/dist by bootstrapBuildTools. + "src/cmd/cgo/zdefaultcc.go", + "src/cmd/internal/objabi/zbootstrap.go", + "src/internal/buildcfg/zbootstrap.go", + + // Generated by earlier versions of cmd/dist . + "src/cmd/go/internal/cfg/zosarch.go", + ) + srcArch.AddPrefix("go") + testSrc(srcArch) + + // The binary distribution includes only a subset of bin and pkg. + binArch := base.Clone() + binArch.Filter(func(name string) bool { + // Discard bin/ for now, will add back later. + if strings.HasPrefix(name, "bin/") { + return false + } + // Discard most of pkg. + if strings.HasPrefix(name, "pkg/") { + // Keep pkg/include. + if strings.HasPrefix(name, "pkg/include/") { + return true + } + // Discard other pkg except pkg/tool. + if !strings.HasPrefix(name, "pkg/tool/") { + return false + } + // Inside pkg/tool, keep only $GOOS_$GOARCH. + if !strings.HasPrefix(name, "pkg/tool/"+goosUnderGoarch+"/") { + return false + } + // Inside pkg/tool/$GOOS_$GOARCH, discard helper tools. + switch strings.TrimSuffix(path.Base(name), ".exe") { + case "api", "dist", "distpack", "metadata": + return false + } + } + return true + }) + + // Add go and gofmt to bin, using cross-compiled binaries + // if this is a cross-compiled distribution. + binExes := []string{ + "go", + "gofmt", + } + crossBin := "bin" + if goos != gohostos || goarch != gohostarch { + crossBin = "bin/" + goosUnderGoarch + } + for _, b := range binExes { + name := "bin/" + b + exe + src := filepath.Join(goroot, crossBin, b+exe) + info, err := os.Stat(src) + if err != nil { + log.Fatal(err) + } + binArch.Add(name, src, info) + } + binArch.Sort() + binArch.SetTime(versionTime) // fix added files + binArch.SetMode(mode) // fix added files + + zipArch := binArch.Clone() + zipArch.AddPrefix("go") + testZip(zipArch) + + // The module distribution is the binary distribution with unnecessary files removed + // and file names using the necessary prefix for the module. + modArch := binArch.Clone() + modArch.Remove( + "api/**", + "doc/**", + "misc/**", + "test/**", + ) + modVers := modVersionPrefix + "-" + version + "." + goosDashGoarch + modArch.AddPrefix(modPath + "@" + modVers) + modArch.RenameGoMod() + modArch.Sort() + testMod(modArch) + + // distpack returns the full path to name in the distpack directory. + distpack := func(name string) string { + return filepath.Join(goroot, "pkg/distpack", name) + } + if err := os.MkdirAll(filepath.Join(goroot, "pkg/distpack"), 0777); err != nil { + log.Fatal(err) + } + + writeTgz(distpack(version+".src.tar.gz"), srcArch) + + if goos == "windows" { + writeZip(distpack(version+"."+goos+"-"+goarch+".zip"), zipArch) + } else { + writeTgz(distpack(version+"."+goos+"-"+goarch+".tar.gz"), zipArch) + } + + writeZip(distpack(modVers+".zip"), modArch) + writeFile(distpack(modVers+".mod"), + []byte(fmt.Sprintf("module %s\n", modPath))) + writeFile(distpack(modVers+".info"), + []byte(fmt.Sprintf("{%q:%q, %q:%q}\n", + "Version", modVers, + "Time", versionTime.Format(time.RFC3339)))) +} + +// mode computes the mode for the given file name. +func mode(name string, _ fs.FileMode) fs.FileMode { + if strings.HasPrefix(name, "bin/") || + strings.HasPrefix(name, "pkg/tool/") || + strings.HasSuffix(name, ".bash") || + strings.HasSuffix(name, ".sh") || + strings.HasSuffix(name, ".pl") || + strings.HasSuffix(name, ".rc") { + return 0o755 + } else if ok, _ := amatch("**/go_?*_?*_exec", name); ok { + return 0o755 + } + return 0o644 +} + +// readVERSION reads the VERSION file. +// The first line of the file is the Go version. +// Additional lines are 'key value' pairs setting other data. +// The only valid key at the moment is 'time', which sets the modification time for file archives. +func readVERSION(goroot string) (version string, t time.Time) { + data, err := os.ReadFile(filepath.Join(goroot, "VERSION")) + if err != nil { + log.Fatal(err) + } + version, rest, _ := strings.Cut(string(data), "\n") + for _, line := range strings.Split(rest, "\n") { + f := strings.Fields(line) + if len(f) == 0 { + continue + } + switch f[0] { + default: + log.Fatalf("VERSION: unexpected line: %s", line) + case "time": + if len(f) != 2 { + log.Fatalf("VERSION: unexpected time line: %s", line) + } + t, err = time.ParseInLocation(time.RFC3339, f[1], time.UTC) + if err != nil { + log.Fatalf("VERSION: bad time: %s", err) + } + } + } + return version, t +} + +// writeFile writes a file with the given name and data or fatals. +func writeFile(name string, data []byte) { + if err := os.WriteFile(name, data, 0666); err != nil { + log.Fatal(err) + } + reportHash(name) +} + +// check panics if err is not nil. Otherwise it returns x. +// It is only meant to be used in a function that has deferred +// a function to recover appropriately from the panic. +func check[T any](x T, err error) T { + check1(err) + return x +} + +// check1 panics if err is not nil. +// It is only meant to be used in a function that has deferred +// a function to recover appropriately from the panic. +func check1(err error) { + if err != nil { + panic(err) + } +} + +// writeTgz writes the archive in tgz form to the file named name. +func writeTgz(name string, a *Archive) { + out, err := os.Create(name) + if err != nil { + log.Fatal(err) + } + + var f File + defer func() { + if err := recover(); err != nil { + extra := "" + if f.Name != "" { + extra = " " + f.Name + } + log.Fatalf("writing %s%s: %v", name, extra, err) + } + }() + + zw := check(gzip.NewWriterLevel(out, gzip.BestCompression)) + tw := tar.NewWriter(zw) + + // Find the mode and mtime to use for directory entries, + // based on the mode and mtime of the first file we see. + // We know that modes and mtimes are uniform across the archive. + var dirMode fs.FileMode + var mtime time.Time + for _, f := range a.Files { + dirMode = fs.ModeDir | f.Mode | (f.Mode&0444)>>2 // copy r bits down to x bits + mtime = f.Time + break + } + + // mkdirAll ensures that the tar file contains directory + // entries for dir and all its parents. Some programs reading + // these tar files expect that. See go.dev/issue/61862. + haveDir := map[string]bool{".": true} + var mkdirAll func(string) + mkdirAll = func(dir string) { + if dir == "/" { + panic("mkdirAll /") + } + if haveDir[dir] { + return + } + haveDir[dir] = true + mkdirAll(path.Dir(dir)) + df := &File{ + Name: dir + "/", + Time: mtime, + Mode: dirMode, + } + h := check(tar.FileInfoHeader(df.Info(), "")) + h.Name = dir + "/" + if err := tw.WriteHeader(h); err != nil { + panic(err) + } + } + + for _, f = range a.Files { + h := check(tar.FileInfoHeader(f.Info(), "")) + mkdirAll(path.Dir(f.Name)) + h.Name = f.Name + if err := tw.WriteHeader(h); err != nil { + panic(err) + } + r := check(os.Open(f.Src)) + check(io.Copy(tw, r)) + check1(r.Close()) + } + f.Name = "" + check1(tw.Close()) + check1(zw.Close()) + check1(out.Close()) + reportHash(name) +} + +// writeZip writes the archive in zip form to the file named name. +func writeZip(name string, a *Archive) { + out, err := os.Create(name) + if err != nil { + log.Fatal(err) + } + + var f File + defer func() { + if err := recover(); err != nil { + extra := "" + if f.Name != "" { + extra = " " + f.Name + } + log.Fatalf("writing %s%s: %v", name, extra, err) + } + }() + + zw := zip.NewWriter(out) + zw.RegisterCompressor(zip.Deflate, func(out io.Writer) (io.WriteCloser, error) { + return flate.NewWriter(out, flate.BestCompression) + }) + for _, f = range a.Files { + h := check(zip.FileInfoHeader(f.Info())) + h.Name = f.Name + h.Method = zip.Deflate + w := check(zw.CreateHeader(h)) + r := check(os.Open(f.Src)) + check(io.Copy(w, r)) + check1(r.Close()) + } + f.Name = "" + check1(zw.Close()) + check1(out.Close()) + reportHash(name) +} + +func reportHash(name string) { + f, err := os.Open(name) + if err != nil { + log.Fatal(err) + } + h := sha256.New() + io.Copy(h, f) + f.Close() + fmt.Printf("distpack: %x %s\n", h.Sum(nil)[:8], filepath.Base(name)) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/distpack/test.go b/platform/dbops/binaries/go/go/src/cmd/distpack/test.go new file mode 100644 index 0000000000000000000000000000000000000000..22b54b5fe123bba472ba7e9c6162486e0b6d5a6d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/distpack/test.go @@ -0,0 +1,170 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains tests applied to the archives before they are written. + +package main + +import ( + "bytes" + "fmt" + "log" + "os" + "path" + "path/filepath" + "strings" +) + +type testRule struct { + name string + goos string + exclude bool +} + +var srcRules = []testRule{ + {name: "go/VERSION"}, + {name: "go/src/cmd/go/main.go"}, + {name: "go/src/bytes/bytes.go"}, + {name: "**/.DS_Store", exclude: true}, + {name: "go/.git", exclude: true}, + {name: "go/.gitattributes", exclude: true}, + {name: "go/.github", exclude: true}, + {name: "go/VERSION.cache", exclude: true}, + {name: "go/bin/**", exclude: true}, + {name: "go/pkg/**", exclude: true}, + {name: "go/src/cmd/dist/dist", exclude: true}, + {name: "go/src/cmd/dist/dist.exe", exclude: true}, + {name: "go/src/runtime/internal/sys/zversion.go", exclude: true}, + {name: "go/src/time/tzdata/zzipdata.go", exclude: true}, +} + +var zipRules = []testRule{ + {name: "go/VERSION"}, + {name: "go/src/cmd/go/main.go"}, + {name: "go/src/bytes/bytes.go"}, + + {name: "**/.DS_Store", exclude: true}, + {name: "go/.git", exclude: true}, + {name: "go/.gitattributes", exclude: true}, + {name: "go/.github", exclude: true}, + {name: "go/VERSION.cache", exclude: true}, + {name: "go/bin", exclude: true}, + {name: "go/pkg", exclude: true}, + {name: "go/src/cmd/dist/dist", exclude: true}, + {name: "go/src/cmd/dist/dist.exe", exclude: true}, + + {name: "go/bin/go", goos: "linux"}, + {name: "go/bin/go", goos: "darwin"}, + {name: "go/bin/go", goos: "windows", exclude: true}, + {name: "go/bin/go.exe", goos: "windows"}, + {name: "go/bin/gofmt", goos: "linux"}, + {name: "go/bin/gofmt", goos: "darwin"}, + {name: "go/bin/gofmt", goos: "windows", exclude: true}, + {name: "go/bin/gofmt.exe", goos: "windows"}, + {name: "go/pkg/tool/*/compile", goos: "linux"}, + {name: "go/pkg/tool/*/compile", goos: "darwin"}, + {name: "go/pkg/tool/*/compile", goos: "windows", exclude: true}, + {name: "go/pkg/tool/*/compile.exe", goos: "windows"}, +} + +var modRules = []testRule{ + {name: "golang.org/toolchain@*/VERSION"}, + {name: "golang.org/toolchain@*/src/cmd/go/main.go"}, + {name: "golang.org/toolchain@*/src/bytes/bytes.go"}, + + {name: "**/.DS_Store", exclude: true}, + {name: "golang.org/toolchain@*/.git", exclude: true}, + {name: "golang.org/toolchain@*/.gitattributes", exclude: true}, + {name: "golang.org/toolchain@*/.github", exclude: true}, + {name: "golang.org/toolchain@*/VERSION.cache", exclude: true}, + {name: "golang.org/toolchain@*/bin", exclude: true}, + {name: "golang.org/toolchain@*/pkg", exclude: true}, + {name: "golang.org/toolchain@*/src/cmd/dist/dist", exclude: true}, + {name: "golang.org/toolchain@*/src/cmd/dist/dist.exe", exclude: true}, + + {name: "golang.org/toolchain@*/bin/go", goos: "linux"}, + {name: "golang.org/toolchain@*/bin/go", goos: "darwin"}, + {name: "golang.org/toolchain@*/bin/go", goos: "windows", exclude: true}, + {name: "golang.org/toolchain@*/bin/go.exe", goos: "windows"}, + {name: "golang.org/toolchain@*/bin/gofmt", goos: "linux"}, + {name: "golang.org/toolchain@*/bin/gofmt", goos: "darwin"}, + {name: "golang.org/toolchain@*/bin/gofmt", goos: "windows", exclude: true}, + {name: "golang.org/toolchain@*/bin/gofmt.exe", goos: "windows"}, + {name: "golang.org/toolchain@*/pkg/tool/*/compile", goos: "linux"}, + {name: "golang.org/toolchain@*/pkg/tool/*/compile", goos: "darwin"}, + {name: "golang.org/toolchain@*/pkg/tool/*/compile", goos: "windows", exclude: true}, + {name: "golang.org/toolchain@*/pkg/tool/*/compile.exe", goos: "windows"}, + + // go.mod are renamed to _go.mod. + {name: "**/go.mod", exclude: true}, + {name: "**/_go.mod"}, +} + +func testSrc(a *Archive) { + test("source", a, srcRules) + + // Check that no generated files slip in, even if new ones are added. + for _, f := range a.Files { + if strings.HasPrefix(path.Base(f.Name), "z") { + data, err := os.ReadFile(filepath.Join(goroot, strings.TrimPrefix(f.Name, "go/"))) + if err != nil { + log.Fatalf("checking source archive: %v", err) + } + if strings.Contains(string(data), "generated by go tool dist; DO NOT EDIT") { + log.Fatalf("unexpected source archive file: %s (generated by dist)", f.Name) + } + } + } +} + +func testZip(a *Archive) { test("binary", a, zipRules) } +func testMod(a *Archive) { test("module", a, modRules) } + +func test(kind string, a *Archive, rules []testRule) { + ok := true + have := make([]bool, len(rules)) + for _, f := range a.Files { + for i, r := range rules { + if r.goos != "" && r.goos != goos { + continue + } + match, err := amatch(r.name, f.Name) + if err != nil { + log.Fatal(err) + } + if match { + if r.exclude { + ok = false + if !have[i] { + log.Printf("unexpected %s archive file: %s", kind, f.Name) + have[i] = true // silence future prints for excluded directory + } + } else { + have[i] = true + } + } + } + } + missing := false + for i, r := range rules { + if r.goos != "" && r.goos != goos { + continue + } + if !r.exclude && !have[i] { + missing = true + log.Printf("missing %s archive file: %s", kind, r.name) + } + } + if missing { + ok = false + var buf bytes.Buffer + for _, f := range a.Files { + fmt.Fprintf(&buf, "\n\t%s", f.Name) + } + log.Printf("archive contents: %d files%s", len(a.Files), buf.Bytes()) + } + if !ok { + log.Fatalf("bad archive file") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/doc/dirs.go b/platform/dbops/binaries/go/go/src/cmd/doc/dirs.go new file mode 100644 index 0000000000000000000000000000000000000000..60ad6d30e6a99b89307db38f902abee15408d19c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/doc/dirs.go @@ -0,0 +1,320 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "sync" + + "golang.org/x/mod/semver" +) + +// A Dir describes a directory holding code by specifying +// the expected import path and the file system directory. +type Dir struct { + importPath string // import path for that dir + dir string // file system directory + inModule bool +} + +// Dirs is a structure for scanning the directory tree. +// Its Next method returns the next Go source directory it finds. +// Although it can be used to scan the tree multiple times, it +// only walks the tree once, caching the data it finds. +type Dirs struct { + scan chan Dir // Directories generated by walk. + hist []Dir // History of reported Dirs. + offset int // Counter for Next. +} + +var dirs Dirs + +// dirsInit starts the scanning of package directories in GOROOT and GOPATH. Any +// extra paths passed to it are included in the channel. +func dirsInit(extra ...Dir) { + if buildCtx.GOROOT == "" { + stdout, err := exec.Command("go", "env", "GOROOT").Output() + if err != nil { + if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 { + log.Fatalf("failed to determine GOROOT: $GOROOT is not set and 'go env GOROOT' failed:\n%s", ee.Stderr) + } + log.Fatalf("failed to determine GOROOT: $GOROOT is not set and could not run 'go env GOROOT':\n\t%s", err) + } + buildCtx.GOROOT = string(bytes.TrimSpace(stdout)) + } + + dirs.hist = make([]Dir, 0, 1000) + dirs.hist = append(dirs.hist, extra...) + dirs.scan = make(chan Dir) + go dirs.walk(codeRoots()) +} + +// goCmd returns the "go" command path corresponding to buildCtx.GOROOT. +func goCmd() string { + if buildCtx.GOROOT == "" { + return "go" + } + return filepath.Join(buildCtx.GOROOT, "bin", "go") +} + +// Reset puts the scan back at the beginning. +func (d *Dirs) Reset() { + d.offset = 0 +} + +// Next returns the next directory in the scan. The boolean +// is false when the scan is done. +func (d *Dirs) Next() (Dir, bool) { + if d.offset < len(d.hist) { + dir := d.hist[d.offset] + d.offset++ + return dir, true + } + dir, ok := <-d.scan + if !ok { + return Dir{}, false + } + d.hist = append(d.hist, dir) + d.offset++ + return dir, ok +} + +// walk walks the trees in GOROOT and GOPATH. +func (d *Dirs) walk(roots []Dir) { + for _, root := range roots { + d.bfsWalkRoot(root) + } + close(d.scan) +} + +// bfsWalkRoot walks a single directory hierarchy in breadth-first lexical order. +// Each Go source directory it finds is delivered on d.scan. +func (d *Dirs) bfsWalkRoot(root Dir) { + root.dir = filepath.Clean(root.dir) // because filepath.Join will do it anyway + + // this is the queue of directories to examine in this pass. + this := []string{} + // next is the queue of directories to examine in the next pass. + next := []string{root.dir} + + for len(next) > 0 { + this, next = next, this[0:0] + for _, dir := range this { + fd, err := os.Open(dir) + if err != nil { + log.Print(err) + continue + } + entries, err := fd.Readdir(0) + fd.Close() + if err != nil { + log.Print(err) + continue + } + hasGoFiles := false + for _, entry := range entries { + name := entry.Name() + // For plain files, remember if this directory contains any .go + // source files, but ignore them otherwise. + if !entry.IsDir() { + if !hasGoFiles && strings.HasSuffix(name, ".go") { + hasGoFiles = true + } + continue + } + // Entry is a directory. + + // The go tool ignores directories starting with ., _, or named "testdata". + if name[0] == '.' || name[0] == '_' || name == "testdata" { + continue + } + // When in a module, ignore vendor directories and stop at module boundaries. + if root.inModule { + if name == "vendor" { + continue + } + if fi, err := os.Stat(filepath.Join(dir, name, "go.mod")); err == nil && !fi.IsDir() { + continue + } + } + // Remember this (fully qualified) directory for the next pass. + next = append(next, filepath.Join(dir, name)) + } + if hasGoFiles { + // It's a candidate. + importPath := root.importPath + if len(dir) > len(root.dir) { + if importPath != "" { + importPath += "/" + } + importPath += filepath.ToSlash(dir[len(root.dir)+1:]) + } + d.scan <- Dir{importPath, dir, root.inModule} + } + } + + } +} + +var testGOPATH = false // force GOPATH use for testing + +// codeRoots returns the code roots to search for packages. +// In GOPATH mode this is GOROOT/src and GOPATH/src, with empty import paths. +// In module mode, this is each module root, with an import path set to its module path. +func codeRoots() []Dir { + codeRootsCache.once.Do(func() { + codeRootsCache.roots = findCodeRoots() + }) + return codeRootsCache.roots +} + +var codeRootsCache struct { + once sync.Once + roots []Dir +} + +var usingModules bool + +func findCodeRoots() []Dir { + var list []Dir + if !testGOPATH { + // Check for use of modules by 'go env GOMOD', + // which reports a go.mod file path if modules are enabled. + stdout, _ := exec.Command(goCmd(), "env", "GOMOD").Output() + gomod := string(bytes.TrimSpace(stdout)) + + usingModules = len(gomod) > 0 + if usingModules && buildCtx.GOROOT != "" { + list = append(list, + Dir{dir: filepath.Join(buildCtx.GOROOT, "src"), inModule: true}, + Dir{importPath: "cmd", dir: filepath.Join(buildCtx.GOROOT, "src", "cmd"), inModule: true}) + } + + if gomod == os.DevNull { + // Modules are enabled, but the working directory is outside any module. + // We can still access std, cmd, and packages specified as source files + // on the command line, but there are no module roots. + // Avoid 'go list -m all' below, since it will not work. + return list + } + } + + if !usingModules { + if buildCtx.GOROOT != "" { + list = append(list, Dir{dir: filepath.Join(buildCtx.GOROOT, "src")}) + } + for _, root := range splitGopath() { + list = append(list, Dir{dir: filepath.Join(root, "src")}) + } + return list + } + + // Find module root directories from go list. + // Eventually we want golang.org/x/tools/go/packages + // to handle the entire file system search and become go/packages, + // but for now enumerating the module roots lets us fit modules + // into the current code with as few changes as possible. + mainMod, vendorEnabled, err := vendorEnabled() + if err != nil { + return list + } + if vendorEnabled { + // Add the vendor directory to the search path ahead of "std". + // That way, if the main module *is* "std", we will identify the path + // without the "vendor/" prefix before the one with that prefix. + list = append([]Dir{{dir: filepath.Join(mainMod.Dir, "vendor"), inModule: false}}, list...) + if mainMod.Path != "std" { + list = append(list, Dir{importPath: mainMod.Path, dir: mainMod.Dir, inModule: true}) + } + return list + } + + cmd := exec.Command(goCmd(), "list", "-m", "-f={{.Path}}\t{{.Dir}}", "all") + cmd.Stderr = os.Stderr + out, _ := cmd.Output() + for _, line := range strings.Split(string(out), "\n") { + path, dir, _ := strings.Cut(line, "\t") + if dir != "" { + list = append(list, Dir{importPath: path, dir: dir, inModule: true}) + } + } + + return list +} + +// The functions below are derived from x/tools/internal/imports at CL 203017. + +type moduleJSON struct { + Path, Dir, GoVersion string +} + +var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`) + +// vendorEnabled indicates if vendoring is enabled. +// Inspired by setDefaultBuildMod in modload/init.go +func vendorEnabled() (*moduleJSON, bool, error) { + mainMod, go114, err := getMainModuleAnd114() + if err != nil { + return nil, false, err + } + + stdout, _ := exec.Command(goCmd(), "env", "GOFLAGS").Output() + goflags := string(bytes.TrimSpace(stdout)) + matches := modFlagRegexp.FindStringSubmatch(goflags) + var modFlag string + if len(matches) != 0 { + modFlag = matches[1] + } + if modFlag != "" { + // Don't override an explicit '-mod=' argument. + return mainMod, modFlag == "vendor", nil + } + if mainMod == nil || !go114 { + return mainMod, false, nil + } + // Check 1.14's automatic vendor mode. + if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() { + if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 { + // The Go version is at least 1.14, and a vendor directory exists. + // Set -mod=vendor by default. + return mainMod, true, nil + } + } + return mainMod, false, nil +} + +// getMainModuleAnd114 gets the main module's information and whether the +// go command in use is 1.14+. This is the information needed to figure out +// if vendoring should be enabled. +func getMainModuleAnd114() (*moduleJSON, bool, error) { + const format = `{{.Path}} +{{.Dir}} +{{.GoVersion}} +{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}} +` + cmd := exec.Command(goCmd(), "list", "-m", "-f", format) + cmd.Stderr = os.Stderr + stdout, err := cmd.Output() + if err != nil { + return nil, false, nil + } + lines := strings.Split(string(stdout), "\n") + if len(lines) < 5 { + return nil, false, fmt.Errorf("unexpected stdout: %q", stdout) + } + mod := &moduleJSON{ + Path: lines[0], + Dir: lines[1], + GoVersion: lines[2], + } + return mod, lines[3] == "go1.14", nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/doc/doc_test.go b/platform/dbops/binaries/go/go/src/cmd/doc/doc_test.go new file mode 100644 index 0000000000000000000000000000000000000000..354adc87af1c49fc92d7235f9593aa3a26e8c23b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/doc/doc_test.go @@ -0,0 +1,1113 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "flag" + "go/build" + "internal/testenv" + "log" + "os" + "path/filepath" + "regexp" + "runtime" + "strings" + "testing" +) + +func TestMain(m *testing.M) { + // Clear GOPATH so we don't access the user's own packages in the test. + buildCtx.GOPATH = "" + testGOPATH = true // force GOPATH mode; module test is in cmd/go/testdata/script/mod_doc.txt + + // Set GOROOT in case runtime.GOROOT is wrong (for example, if the test was + // built with -trimpath). dirsInit would identify it using 'go env GOROOT', + // but we can't be sure that the 'go' in $PATH is the right one either. + buildCtx.GOROOT = testenv.GOROOT(nil) + build.Default.GOROOT = testenv.GOROOT(nil) + + // Add $GOROOT/src/cmd/doc/testdata explicitly so we can access its contents in the test. + // Normally testdata directories are ignored, but sending it to dirs.scan directly is + // a hack that works around the check. + testdataDir, err := filepath.Abs("testdata") + if err != nil { + panic(err) + } + dirsInit( + Dir{importPath: "testdata", dir: testdataDir}, + Dir{importPath: "testdata/nested", dir: filepath.Join(testdataDir, "nested")}, + Dir{importPath: "testdata/nested/nested", dir: filepath.Join(testdataDir, "nested", "nested")}) + + os.Exit(m.Run()) +} + +func maybeSkip(t *testing.T) { + if runtime.GOOS == "ios" { + t.Skip("iOS does not have a full file tree") + } +} + +type isDotSlashTest struct { + str string + result bool +} + +var isDotSlashTests = []isDotSlashTest{ + {``, false}, + {`x`, false}, + {`...`, false}, + {`.../`, false}, + {`...\`, false}, + + {`.`, true}, + {`./`, true}, + {`.\`, true}, + {`./x`, true}, + {`.\x`, true}, + + {`..`, true}, + {`../`, true}, + {`..\`, true}, + {`../x`, true}, + {`..\x`, true}, +} + +func TestIsDotSlashPath(t *testing.T) { + for _, test := range isDotSlashTests { + if result := isDotSlash(test.str); result != test.result { + t.Errorf("isDotSlash(%q) = %t; expected %t", test.str, result, test.result) + } + } +} + +type test struct { + name string + args []string // Arguments to "[go] doc". + yes []string // Regular expressions that should match. + no []string // Regular expressions that should not match. +} + +const p = "cmd/doc/testdata" + +var tests = []test{ + // Sanity check. + { + "sanity check", + []string{p}, + []string{`type ExportedType struct`}, + nil, + }, + + // Package dump includes import, package statement. + { + "package clause", + []string{p}, + []string{`package pkg.*cmd/doc/testdata`}, + nil, + }, + + // Constants. + // Package dump + { + "full package", + []string{p}, + []string{ + `Package comment`, + `const ExportedConstant = 1`, // Simple constant. + `const ConstOne = 1`, // First entry in constant block. + `const ConstFive ...`, // From block starting with unexported constant. + `var ExportedVariable = 1`, // Simple variable. + `var VarOne = 1`, // First entry in variable block. + `func ExportedFunc\(a int\) bool`, // Function. + `func ReturnUnexported\(\) unexportedType`, // Function with unexported return type. + `type ExportedType struct{ ... }`, // Exported type. + `const ExportedTypedConstant ExportedType = iota`, // Typed constant. + `const ExportedTypedConstant_unexported unexportedType`, // Typed constant, exported for unexported type. + `const ConstLeft2 uint64 ...`, // Typed constant using unexported iota. + `const ConstGroup1 unexportedType = iota ...`, // Typed constant using unexported type. + `const ConstGroup4 ExportedType = ExportedType{}`, // Typed constant using exported type. + `const MultiLineConst = ...`, // Multi line constant. + `var MultiLineVar = map\[struct{ ... }\]struct{ ... }{ ... }`, // Multi line variable. + `func MultiLineFunc\(x interface{ ... }\) \(r struct{ ... }\)`, // Multi line function. + `var LongLine = newLongLine\(("someArgument[1-4]", ){4}...\)`, // Long list of arguments. + `type T1 = T2`, // Type alias + `type SimpleConstraint interface{ ... }`, + `type TildeConstraint interface{ ... }`, + `type StructConstraint interface{ ... }`, + }, + []string{ + `const internalConstant = 2`, // No internal constants. + `var internalVariable = 2`, // No internal variables. + `func internalFunc(a int) bool`, // No internal functions. + `Comment about exported constant`, // No comment for single constant. + `Comment about exported variable`, // No comment for single variable. + `Comment about block of constants`, // No comment for constant block. + `Comment about block of variables`, // No comment for variable block. + `Comment before ConstOne`, // No comment for first entry in constant block. + `Comment before VarOne`, // No comment for first entry in variable block. + `ConstTwo = 2`, // No second entry in constant block. + `VarTwo = 2`, // No second entry in variable block. + `VarFive = 5`, // From block starting with unexported variable. + `type unexportedType`, // No unexported type. + `unexportedTypedConstant`, // No unexported typed constant. + `\bField`, // No fields. + `Method`, // No methods. + `someArgument[5-8]`, // No truncated arguments. + `type T1 T2`, // Type alias does not display as type declaration. + `ignore:directive`, // Directives should be dropped. + }, + }, + // Package dump -all + { + "full package", + []string{"-all", p}, + []string{ + `package pkg .*import`, + `Package comment`, + `CONSTANTS`, + `Comment before ConstOne`, + `ConstOne = 1`, + `ConstTwo = 2 // Comment on line with ConstTwo`, + `ConstFive`, + `ConstSix`, + `Const block where first entry is unexported`, + `ConstLeft2, constRight2 uint64`, + `constLeft3, ConstRight3`, + `ConstLeft4, ConstRight4`, + `Duplicate = iota`, + `const CaseMatch = 1`, + `const Casematch = 2`, + `const ExportedConstant = 1`, + `const MultiLineConst = `, + `MultiLineString1`, + `VARIABLES`, + `Comment before VarOne`, + `VarOne = 1`, + `Comment about block of variables`, + `VarFive = 5`, + `var ExportedVariable = 1`, + `var ExportedVarOfUnExported unexportedType`, + `var LongLine = newLongLine\(`, + `var MultiLineVar = map\[struct {`, + `FUNCTIONS`, + `func ExportedFunc\(a int\) bool`, + `Comment about exported function`, + `func MultiLineFunc\(x interface`, + `func ReturnUnexported\(\) unexportedType`, + `TYPES`, + `type ExportedInterface interface`, + `type ExportedStructOneField struct`, + `type ExportedType struct`, + `Comment about exported type`, + `const ConstGroup4 ExportedType = ExportedType`, + `ExportedTypedConstant ExportedType = iota`, + `Constants tied to ExportedType`, + `func ExportedTypeConstructor\(\) \*ExportedType`, + `Comment about constructor for exported type`, + `func ReturnExported\(\) ExportedType`, + `func \(ExportedType\) ExportedMethod\(a int\) bool`, + `Comment about exported method`, + `type T1 = T2`, + `type T2 int`, + `type SimpleConstraint interface {`, + `type TildeConstraint interface {`, + `type StructConstraint interface {`, + `BUG: function body note`, + }, + []string{ + `constThree`, + `_, _ uint64 = 2 \* iota, 1 << iota`, + `constLeft1, constRight1`, + `duplicate`, + `varFour`, + `func internalFunc`, + `unexportedField`, + `func \(unexportedType\)`, + `ignore:directive`, + }, + }, + // Package with just the package declaration. Issue 31457. + { + "only package declaration", + []string{"-all", p + "/nested/empty"}, + []string{`package empty .*import`}, + nil, + }, + // Package dump -short + { + "full package with -short", + []string{`-short`, p}, + []string{ + `const ExportedConstant = 1`, // Simple constant. + `func ReturnUnexported\(\) unexportedType`, // Function with unexported return type. + }, + []string{ + `MultiLine(String|Method|Field)`, // No data from multi line portions. + }, + }, + // Package dump -u + { + "full package with u", + []string{`-u`, p}, + []string{ + `const ExportedConstant = 1`, // Simple constant. + `const internalConstant = 2`, // Internal constants. + `func internalFunc\(a int\) bool`, // Internal functions. + `func ReturnUnexported\(\) unexportedType`, // Function with unexported return type. + }, + []string{ + `Comment about exported constant`, // No comment for simple constant. + `Comment about block of constants`, // No comment for constant block. + `Comment about internal function`, // No comment for internal function. + `MultiLine(String|Method|Field)`, // No data from multi line portions. + `ignore:directive`, + }, + }, + // Package dump -u -all + { + "full package", + []string{"-u", "-all", p}, + []string{ + `package pkg .*import`, + `Package comment`, + `CONSTANTS`, + `Comment before ConstOne`, + `ConstOne += 1`, + `ConstTwo += 2 // Comment on line with ConstTwo`, + `constThree = 3 // Comment on line with constThree`, + `ConstFive`, + `const internalConstant += 2`, + `Comment about internal constant`, + `VARIABLES`, + `Comment before VarOne`, + `VarOne += 1`, + `Comment about block of variables`, + `varFour += 4`, + `VarFive += 5`, + `varSix += 6`, + `var ExportedVariable = 1`, + `var LongLine = newLongLine\(`, + `var MultiLineVar = map\[struct {`, + `var internalVariable = 2`, + `Comment about internal variable`, + `FUNCTIONS`, + `func ExportedFunc\(a int\) bool`, + `Comment about exported function`, + `func MultiLineFunc\(x interface`, + `func internalFunc\(a int\) bool`, + `Comment about internal function`, + `func newLongLine\(ss .*string\)`, + `TYPES`, + `type ExportedType struct`, + `type T1 = T2`, + `type T2 int`, + `type unexportedType int`, + `Comment about unexported type`, + `ConstGroup1 unexportedType = iota`, + `ConstGroup2`, + `ConstGroup3`, + `ExportedTypedConstant_unexported unexportedType = iota`, + `Constants tied to unexportedType`, + `const unexportedTypedConstant unexportedType = 1`, + `func ReturnUnexported\(\) unexportedType`, + `func \(unexportedType\) ExportedMethod\(\) bool`, + `func \(unexportedType\) unexportedMethod\(\) bool`, + }, + []string{ + `ignore:directive`, + }, + }, + + // Single constant. + { + "single constant", + []string{p, `ExportedConstant`}, + []string{ + `Comment about exported constant`, // Include comment. + `const ExportedConstant = 1`, + }, + nil, + }, + // Single constant -u. + { + "single constant with -u", + []string{`-u`, p, `internalConstant`}, + []string{ + `Comment about internal constant`, // Include comment. + `const internalConstant = 2`, + }, + nil, + }, + // Block of constants. + { + "block of constants", + []string{p, `ConstTwo`}, + []string{ + `Comment before ConstOne.\n.*ConstOne = 1`, // First... + `ConstTwo = 2.*Comment on line with ConstTwo`, // And second show up. + `Comment about block of constants`, // Comment does too. + }, + []string{ + `constThree`, // No unexported constant. + }, + }, + // Block of constants -u. + { + "block of constants with -u", + []string{"-u", p, `constThree`}, + []string{ + `constThree = 3.*Comment on line with constThree`, + }, + nil, + }, + // Block of constants -src. + { + "block of constants with -src", + []string{"-src", p, `ConstTwo`}, + []string{ + `Comment about block of constants`, // Top comment. + `ConstOne.*=.*1`, // Each constant seen. + `ConstTwo.*=.*2.*Comment on line with ConstTwo`, + `constThree`, // Even unexported constants. + }, + nil, + }, + // Block of constants with carryover type from unexported field. + { + "block of constants with carryover type", + []string{p, `ConstLeft2`}, + []string{ + `ConstLeft2, constRight2 uint64`, + `constLeft3, ConstRight3`, + `ConstLeft4, ConstRight4`, + }, + nil, + }, + // Block of constants -u with carryover type from unexported field. + { + "block of constants with carryover type", + []string{"-u", p, `ConstLeft2`}, + []string{ + `_, _ uint64 = 2 \* iota, 1 << iota`, + `constLeft1, constRight1`, + `ConstLeft2, constRight2`, + `constLeft3, ConstRight3`, + `ConstLeft4, ConstRight4`, + }, + nil, + }, + + // Single variable. + { + "single variable", + []string{p, `ExportedVariable`}, + []string{ + `ExportedVariable`, // Include comment. + `var ExportedVariable = 1`, + }, + nil, + }, + // Single variable -u. + { + "single variable with -u", + []string{`-u`, p, `internalVariable`}, + []string{ + `Comment about internal variable`, // Include comment. + `var internalVariable = 2`, + }, + nil, + }, + // Block of variables. + { + "block of variables", + []string{p, `VarTwo`}, + []string{ + `Comment before VarOne.\n.*VarOne = 1`, // First... + `VarTwo = 2.*Comment on line with VarTwo`, // And second show up. + `Comment about block of variables`, // Comment does too. + }, + []string{ + `varThree= 3`, // No unexported variable. + }, + }, + // Block of variables -u. + { + "block of variables with -u", + []string{"-u", p, `varThree`}, + []string{ + `varThree = 3.*Comment on line with varThree`, + }, + nil, + }, + + // Function. + { + "function", + []string{p, `ExportedFunc`}, + []string{ + `Comment about exported function`, // Include comment. + `func ExportedFunc\(a int\) bool`, + }, + nil, + }, + // Function -u. + { + "function with -u", + []string{"-u", p, `internalFunc`}, + []string{ + `Comment about internal function`, // Include comment. + `func internalFunc\(a int\) bool`, + }, + nil, + }, + // Function with -src. + { + "function with -src", + []string{"-src", p, `ExportedFunc`}, + []string{ + `Comment about exported function`, // Include comment. + `func ExportedFunc\(a int\) bool`, + `return true != false`, // Include body. + }, + nil, + }, + + // Type. + { + "type", + []string{p, `ExportedType`}, + []string{ + `Comment about exported type`, // Include comment. + `type ExportedType struct`, // Type definition. + `Comment before exported field.*\n.*ExportedField +int` + + `.*Comment on line with exported field`, + `ExportedEmbeddedType.*Comment on line with exported embedded field`, + `Has unexported fields`, + `func \(ExportedType\) ExportedMethod\(a int\) bool`, + `const ExportedTypedConstant ExportedType = iota`, // Must include associated constant. + `func ExportedTypeConstructor\(\) \*ExportedType`, // Must include constructor. + `io.Reader.*Comment on line with embedded Reader`, + }, + []string{ + `unexportedField`, // No unexported field. + `int.*embedded`, // No unexported embedded field. + `Comment about exported method`, // No comment about exported method. + `unexportedMethod`, // No unexported method. + `unexportedTypedConstant`, // No unexported constant. + `error`, // No embedded error. + }, + }, + // Type with -src. Will see unexported fields. + { + "type", + []string{"-src", p, `ExportedType`}, + []string{ + `Comment about exported type`, // Include comment. + `type ExportedType struct`, // Type definition. + `Comment before exported field`, + `ExportedField.*Comment on line with exported field`, + `ExportedEmbeddedType.*Comment on line with exported embedded field`, + `unexportedType.*Comment on line with unexported embedded field`, + `func \(ExportedType\) ExportedMethod\(a int\) bool`, + `const ExportedTypedConstant ExportedType = iota`, // Must include associated constant. + `func ExportedTypeConstructor\(\) \*ExportedType`, // Must include constructor. + `io.Reader.*Comment on line with embedded Reader`, + }, + []string{ + `Comment about exported method`, // No comment about exported method. + `unexportedMethod`, // No unexported method. + `unexportedTypedConstant`, // No unexported constant. + }, + }, + // Type -all. + { + "type", + []string{"-all", p, `ExportedType`}, + []string{ + `type ExportedType struct {`, // Type definition as source. + `Comment about exported type`, // Include comment afterwards. + `const ConstGroup4 ExportedType = ExportedType\{\}`, // Related constants. + `ExportedTypedConstant ExportedType = iota`, + `Constants tied to ExportedType`, + `func ExportedTypeConstructor\(\) \*ExportedType`, + `Comment about constructor for exported type.`, + `func ReturnExported\(\) ExportedType`, + `func \(ExportedType\) ExportedMethod\(a int\) bool`, + `Comment about exported method.`, + `func \(ExportedType\) Uncommented\(a int\) bool\n\n`, // Ensure line gap after method with no comment + }, + []string{ + `unexportedType`, + }, + }, + // Type T1 dump (alias). + { + "type T1", + []string{p + ".T1"}, + []string{ + `type T1 = T2`, + }, + []string{ + `type T1 T2`, + `type ExportedType`, + }, + }, + // Type -u with unexported fields. + { + "type with unexported fields and -u", + []string{"-u", p, `ExportedType`}, + []string{ + `Comment about exported type`, // Include comment. + `type ExportedType struct`, // Type definition. + `Comment before exported field.*\n.*ExportedField +int`, + `unexportedField.*int.*Comment on line with unexported field`, + `ExportedEmbeddedType.*Comment on line with exported embedded field`, + `\*ExportedEmbeddedType.*Comment on line with exported embedded \*field`, + `\*qualified.ExportedEmbeddedType.*Comment on line with exported embedded \*selector.field`, + `unexportedType.*Comment on line with unexported embedded field`, + `\*unexportedType.*Comment on line with unexported embedded \*field`, + `io.Reader.*Comment on line with embedded Reader`, + `error.*Comment on line with embedded error`, + `func \(ExportedType\) unexportedMethod\(a int\) bool`, + `unexportedTypedConstant`, + }, + []string{ + `Has unexported fields`, + }, + }, + // Unexported type with -u. + { + "unexported type with -u", + []string{"-u", p, `unexportedType`}, + []string{ + `Comment about unexported type`, // Include comment. + `type unexportedType int`, // Type definition. + `func \(unexportedType\) ExportedMethod\(\) bool`, + `func \(unexportedType\) unexportedMethod\(\) bool`, + `ExportedTypedConstant_unexported unexportedType = iota`, + `const unexportedTypedConstant unexportedType = 1`, + }, + nil, + }, + + // Interface. + { + "interface type", + []string{p, `ExportedInterface`}, + []string{ + `Comment about exported interface`, // Include comment. + `type ExportedInterface interface`, // Interface definition. + `Comment before exported method.\n.*//\n.*// // Code block showing how to use ExportedMethod\n.*// func DoSomething\(\) error {\n.*// ExportedMethod\(\)\n.*// return nil\n.*// }\n.*//.*\n.*ExportedMethod\(\)` + + `.*Comment on line with exported method`, + `io.Reader.*Comment on line with embedded Reader`, + `error.*Comment on line with embedded error`, + `Has unexported methods`, + }, + []string{ + `unexportedField`, // No unexported field. + `Comment about exported method`, // No comment about exported method. + `unexportedMethod`, // No unexported method. + `unexportedTypedConstant`, // No unexported constant. + }, + }, + // Interface -u with unexported methods. + { + "interface type with unexported methods and -u", + []string{"-u", p, `ExportedInterface`}, + []string{ + `Comment about exported interface`, // Include comment. + `type ExportedInterface interface`, // Interface definition. + `Comment before exported method.\n.*//\n.*// // Code block showing how to use ExportedMethod\n.*// func DoSomething\(\) error {\n.*// ExportedMethod\(\)\n.*// return nil\n.*// }\n.*//.*\n.*ExportedMethod\(\)` + `.*Comment on line with exported method`, + `unexportedMethod\(\).*Comment on line with unexported method`, + `io.Reader.*Comment on line with embedded Reader`, + `error.*Comment on line with embedded error`, + }, + []string{ + `Has unexported methods`, + }, + }, + + // Interface method. + { + "interface method", + []string{p, `ExportedInterface.ExportedMethod`}, + []string{ + `Comment before exported method.\n.*//\n.*// // Code block showing how to use ExportedMethod\n.*// func DoSomething\(\) error {\n.*// ExportedMethod\(\)\n.*// return nil\n.*// }\n.*//.*\n.*ExportedMethod\(\)` + + `.*Comment on line with exported method`, + }, + []string{ + `Comment about exported interface`, + }, + }, + // Interface method at package level. + { + "interface method at package level", + []string{p, `ExportedMethod`}, + []string{ + `func \(ExportedType\) ExportedMethod\(a int\) bool`, + `Comment about exported method`, + }, + []string{ + `Comment before exported method.*\n.*ExportedMethod\(\)` + + `.*Comment on line with exported method`, + }, + }, + + // Method. + { + "method", + []string{p, `ExportedType.ExportedMethod`}, + []string{ + `func \(ExportedType\) ExportedMethod\(a int\) bool`, + `Comment about exported method`, + }, + nil, + }, + // Method with -u. + { + "method with -u", + []string{"-u", p, `ExportedType.unexportedMethod`}, + []string{ + `func \(ExportedType\) unexportedMethod\(a int\) bool`, + `Comment about unexported method`, + }, + nil, + }, + // Method with -src. + { + "method with -src", + []string{"-src", p, `ExportedType.ExportedMethod`}, + []string{ + `func \(ExportedType\) ExportedMethod\(a int\) bool`, + `Comment about exported method`, + `return true != true`, + }, + nil, + }, + + // Field. + { + "field", + []string{p, `ExportedType.ExportedField`}, + []string{ + `type ExportedType struct`, + `ExportedField int`, + `Comment before exported field`, + `Comment on line with exported field`, + `other fields elided`, + }, + nil, + }, + + // Field with -u. + { + "method with -u", + []string{"-u", p, `ExportedType.unexportedField`}, + []string{ + `unexportedField int`, + `Comment on line with unexported field`, + }, + nil, + }, + + // Field of struct with only one field. + { + "single-field struct", + []string{p, `ExportedStructOneField.OnlyField`}, + []string{`the only field`}, + []string{`other fields elided`}, + }, + + // Case matching off. + { + "case matching off", + []string{p, `casematch`}, + []string{ + `CaseMatch`, + `Casematch`, + }, + nil, + }, + + // Case matching on. + { + "case matching on", + []string{"-c", p, `Casematch`}, + []string{ + `Casematch`, + }, + []string{ + `CaseMatch`, + }, + }, + + // Merging comments with -src. + { + "merge comments with -src A", + []string{"-src", p + "/merge", `A`}, + []string{ + `A doc`, + `func A`, + `A comment`, + }, + []string{ + `Package A doc`, + `Package B doc`, + `B doc`, + `B comment`, + `B doc`, + }, + }, + { + "merge comments with -src B", + []string{"-src", p + "/merge", `B`}, + []string{ + `B doc`, + `func B`, + `B comment`, + }, + []string{ + `Package A doc`, + `Package B doc`, + `A doc`, + `A comment`, + `A doc`, + }, + }, + + // No dups with -u. Issue 21797. + { + "case matching on, no dups", + []string{"-u", p, `duplicate`}, + []string{ + `Duplicate`, + `duplicate`, + }, + []string{ + "\\)\n+const", // This will appear if the const decl appears twice. + }, + }, + { + "non-imported: pkg.sym", + []string{"nested.Foo"}, + []string{"Foo struct"}, + nil, + }, + { + "non-imported: pkg only", + []string{"nested"}, + []string{"Foo struct"}, + nil, + }, + { + "non-imported: pkg sym", + []string{"nested", "Foo"}, + []string{"Foo struct"}, + nil, + }, + { + "formatted doc on function", + []string{p, "ExportedFormattedDoc"}, + []string{ + `func ExportedFormattedDoc\(a int\) bool`, + ` Comment about exported function with formatting\. + + Example + + fmt\.Println\(FormattedDoc\(\)\) + + Text after pre-formatted block\.`, + }, + nil, + }, + { + "formatted doc on type field", + []string{p, "ExportedFormattedType.ExportedField"}, + []string{ + `type ExportedFormattedType struct`, + ` // Comment before exported field with formatting\. + //[ ] + // Example + //[ ] + // a\.ExportedField = 123 + //[ ] + // Text after pre-formatted block\.`, + `ExportedField int`, + }, + []string{"ignore:directive"}, + }, + { + "formatted doc on entire type", + []string{p, "ExportedFormattedType"}, + []string{ + `type ExportedFormattedType struct`, + ` // Comment before exported field with formatting\. + // + // Example + // + // a\.ExportedField = 123 + // + // Text after pre-formatted block\.`, + `ExportedField int`, + }, + []string{"ignore:directive"}, + }, + { + "formatted doc on entire type with -all", + []string{"-all", p, "ExportedFormattedType"}, + []string{ + `type ExportedFormattedType struct`, + ` // Comment before exported field with formatting\. + // + // Example + // + // a\.ExportedField = 123 + // + // Text after pre-formatted block\.`, + `ExportedField int`, + }, + []string{"ignore:directive"}, + }, +} + +func TestDoc(t *testing.T) { + maybeSkip(t) + defer log.SetOutput(log.Writer()) + for _, test := range tests { + var b bytes.Buffer + var flagSet flag.FlagSet + var logbuf bytes.Buffer + log.SetOutput(&logbuf) + err := do(&b, &flagSet, test.args) + if err != nil { + t.Fatalf("%s %v: %s\n", test.name, test.args, err) + } + if logbuf.Len() > 0 { + t.Errorf("%s %v: unexpected log messages:\n%s", test.name, test.args, logbuf.Bytes()) + } + output := b.Bytes() + failed := false + for j, yes := range test.yes { + re, err := regexp.Compile(yes) + if err != nil { + t.Fatalf("%s.%d: compiling %#q: %s", test.name, j, yes, err) + } + if !re.Match(output) { + t.Errorf("%s.%d: no match for %s %#q", test.name, j, test.args, yes) + failed = true + } + } + for j, no := range test.no { + re, err := regexp.Compile(no) + if err != nil { + t.Fatalf("%s.%d: compiling %#q: %s", test.name, j, no, err) + } + if re.Match(output) { + t.Errorf("%s.%d: incorrect match for %s %#q", test.name, j, test.args, no) + failed = true + } + } + if bytes.Count(output, []byte("TYPES\n")) > 1 { + t.Fatalf("%s: repeating headers", test.name) + } + if failed { + t.Logf("\n%s", output) + } + } +} + +// Test the code to try multiple packages. Our test case is +// +// go doc rand.Float64 +// +// This needs to find math/rand.Float64; however crypto/rand, which doesn't +// have the symbol, usually appears first in the directory listing. +func TestMultiplePackages(t *testing.T) { + if testing.Short() { + t.Skip("scanning file system takes too long") + } + maybeSkip(t) + var b bytes.Buffer // We don't care about the output. + // Make sure crypto/rand does not have the symbol. + { + var flagSet flag.FlagSet + err := do(&b, &flagSet, []string{"crypto/rand.float64"}) + if err == nil { + t.Errorf("expected error from crypto/rand.float64") + } else if !strings.Contains(err.Error(), "no symbol float64") { + t.Errorf("unexpected error %q from crypto/rand.float64", err) + } + } + // Make sure math/rand does have the symbol. + { + var flagSet flag.FlagSet + err := do(&b, &flagSet, []string{"math/rand.float64"}) + if err != nil { + t.Errorf("unexpected error %q from math/rand.float64", err) + } + } + // Try the shorthand. + { + var flagSet flag.FlagSet + err := do(&b, &flagSet, []string{"rand.float64"}) + if err != nil { + t.Errorf("unexpected error %q from rand.float64", err) + } + } + // Now try a missing symbol. We should see both packages in the error. + { + var flagSet flag.FlagSet + err := do(&b, &flagSet, []string{"rand.doesnotexit"}) + if err == nil { + t.Errorf("expected error from rand.doesnotexit") + } else { + errStr := err.Error() + if !strings.Contains(errStr, "no symbol") { + t.Errorf("error %q should contain 'no symbol", errStr) + } + if !strings.Contains(errStr, "crypto/rand") { + t.Errorf("error %q should contain crypto/rand", errStr) + } + if !strings.Contains(errStr, "math/rand") { + t.Errorf("error %q should contain math/rand", errStr) + } + } + } +} + +// Test the code to look up packages when given two args. First test case is +// +// go doc binary BigEndian +// +// This needs to find encoding/binary.BigEndian, which means +// finding the package encoding/binary given only "binary". +// Second case is +// +// go doc rand Float64 +// +// which again needs to find math/rand and not give up after crypto/rand, +// which has no such function. +func TestTwoArgLookup(t *testing.T) { + if testing.Short() { + t.Skip("scanning file system takes too long") + } + maybeSkip(t) + var b bytes.Buffer // We don't care about the output. + { + var flagSet flag.FlagSet + err := do(&b, &flagSet, []string{"binary", "BigEndian"}) + if err != nil { + t.Errorf("unexpected error %q from binary BigEndian", err) + } + } + { + var flagSet flag.FlagSet + err := do(&b, &flagSet, []string{"rand", "Float64"}) + if err != nil { + t.Errorf("unexpected error %q from rand Float64", err) + } + } + { + var flagSet flag.FlagSet + err := do(&b, &flagSet, []string{"bytes", "Foo"}) + if err == nil { + t.Errorf("expected error from bytes Foo") + } else if !strings.Contains(err.Error(), "no symbol Foo") { + t.Errorf("unexpected error %q from bytes Foo", err) + } + } + { + var flagSet flag.FlagSet + err := do(&b, &flagSet, []string{"nosuchpackage", "Foo"}) + if err == nil { + // actually present in the user's filesystem + } else if !strings.Contains(err.Error(), "no such package") { + t.Errorf("unexpected error %q from nosuchpackage Foo", err) + } + } +} + +// Test the code to look up packages when the first argument starts with "./". +// Our test case is in effect "cd src/text; doc ./template". This should get +// text/template but before Issue 23383 was fixed would give html/template. +func TestDotSlashLookup(t *testing.T) { + if testing.Short() { + t.Skip("scanning file system takes too long") + } + maybeSkip(t) + where, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + defer func() { + if err := os.Chdir(where); err != nil { + t.Fatal(err) + } + }() + if err := os.Chdir(filepath.Join(buildCtx.GOROOT, "src", "text")); err != nil { + t.Fatal(err) + } + var b strings.Builder + var flagSet flag.FlagSet + err = do(&b, &flagSet, []string{"./template"}) + if err != nil { + t.Errorf("unexpected error %q from ./template", err) + } + // The output should contain information about the text/template package. + const want = `package template // import "text/template"` + output := b.String() + if !strings.HasPrefix(output, want) { + t.Fatalf("wrong package: %.*q...", len(want), output) + } +} + +// Test that we don't print spurious package clauses +// when there should be no output at all. Issue 37969. +func TestNoPackageClauseWhenNoMatch(t *testing.T) { + maybeSkip(t) + var b strings.Builder + var flagSet flag.FlagSet + err := do(&b, &flagSet, []string{"template.ZZZ"}) + // Expect an error. + if err == nil { + t.Error("expect an error for template.zzz") + } + // And the output should not contain any package clauses. + const dontWant = `package template // import ` + output := b.String() + if strings.Contains(output, dontWant) { + t.Fatalf("improper package clause printed:\n%s", output) + } +} + +type trimTest struct { + path string + prefix string + result string + ok bool +} + +var trimTests = []trimTest{ + {"", "", "", true}, + {"/usr/gopher", "/usr/gopher", "/usr/gopher", true}, + {"/usr/gopher/bar", "/usr/gopher", "bar", true}, + {"/usr/gopherflakes", "/usr/gopher", "/usr/gopherflakes", false}, + {"/usr/gopher/bar", "/usr/zot", "/usr/gopher/bar", false}, +} + +func TestTrim(t *testing.T) { + for _, test := range trimTests { + result, ok := trim(test.path, test.prefix) + if ok != test.ok { + t.Errorf("%s %s expected %t got %t", test.path, test.prefix, test.ok, ok) + continue + } + if result != test.result { + t.Errorf("%s %s expected %q got %q", test.path, test.prefix, test.result, result) + continue + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/doc/main.go b/platform/dbops/binaries/go/go/src/cmd/doc/main.go new file mode 100644 index 0000000000000000000000000000000000000000..273d7febbc37cca568c30811e8aac8b88879f704 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/doc/main.go @@ -0,0 +1,412 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Doc (usually run as go doc) accepts zero, one or two arguments. +// +// Zero arguments: +// +// go doc +// +// Show the documentation for the package in the current directory. +// +// One argument: +// +// go doc +// go doc [.] +// go doc [.][.] +// go doc [.][.] +// +// The first item in this list that succeeds is the one whose documentation +// is printed. If there is a symbol but no package, the package in the current +// directory is chosen. However, if the argument begins with a capital +// letter it is always assumed to be a symbol in the current directory. +// +// Two arguments: +// +// go doc [.] +// +// Show the documentation for the package, symbol, and method or field. The +// first argument must be a full package path. This is similar to the +// command-line usage for the godoc command. +// +// For commands, unless the -cmd flag is present "go doc command" +// shows only the package-level docs for the package. +// +// The -src flag causes doc to print the full source code for the symbol, such +// as the body of a struct, function or method. +// +// The -all flag causes doc to print all documentation for the package and +// all its visible symbols. The argument must identify a package. +// +// For complete documentation, run "go help doc". +package main + +import ( + "bytes" + "flag" + "fmt" + "go/build" + "go/token" + "io" + "log" + "os" + "path" + "path/filepath" + "strings" +) + +var ( + unexported bool // -u flag + matchCase bool // -c flag + chdir string // -C flag + showAll bool // -all flag + showCmd bool // -cmd flag + showSrc bool // -src flag + short bool // -short flag +) + +// usage is a replacement usage function for the flags package. +func usage() { + fmt.Fprintf(os.Stderr, "Usage of [go] doc:\n") + fmt.Fprintf(os.Stderr, "\tgo doc\n") + fmt.Fprintf(os.Stderr, "\tgo doc \n") + fmt.Fprintf(os.Stderr, "\tgo doc [.]\n") + fmt.Fprintf(os.Stderr, "\tgo doc [.][.]\n") + fmt.Fprintf(os.Stderr, "\tgo doc [.][.]\n") + fmt.Fprintf(os.Stderr, "\tgo doc [.]\n") + fmt.Fprintf(os.Stderr, "For more information run\n") + fmt.Fprintf(os.Stderr, "\tgo help doc\n\n") + fmt.Fprintf(os.Stderr, "Flags:\n") + flag.PrintDefaults() + os.Exit(2) +} + +func main() { + log.SetFlags(0) + log.SetPrefix("doc: ") + dirsInit() + err := do(os.Stdout, flag.CommandLine, os.Args[1:]) + if err != nil { + log.Fatal(err) + } +} + +// do is the workhorse, broken out of main to make testing easier. +func do(writer io.Writer, flagSet *flag.FlagSet, args []string) (err error) { + flagSet.Usage = usage + unexported = false + matchCase = false + flagSet.StringVar(&chdir, "C", "", "change to `dir` before running command") + flagSet.BoolVar(&unexported, "u", false, "show unexported symbols as well as exported") + flagSet.BoolVar(&matchCase, "c", false, "symbol matching honors case (paths not affected)") + flagSet.BoolVar(&showAll, "all", false, "show all documentation for package") + flagSet.BoolVar(&showCmd, "cmd", false, "show symbols with package docs even if package is a command") + flagSet.BoolVar(&showSrc, "src", false, "show source code for symbol") + flagSet.BoolVar(&short, "short", false, "one-line representation for each symbol") + flagSet.Parse(args) + if chdir != "" { + if err := os.Chdir(chdir); err != nil { + return err + } + } + var paths []string + var symbol, method string + // Loop until something is printed. + dirs.Reset() + for i := 0; ; i++ { + buildPackage, userPath, sym, more := parseArgs(flagSet.Args()) + if i > 0 && !more { // Ignore the "more" bit on the first iteration. + return failMessage(paths, symbol, method) + } + if buildPackage == nil { + return fmt.Errorf("no such package: %s", userPath) + } + + // The builtin package needs special treatment: its symbols are lower + // case but we want to see them, always. + if buildPackage.ImportPath == "builtin" { + unexported = true + } + + symbol, method = parseSymbol(sym) + pkg := parsePackage(writer, buildPackage, userPath) + paths = append(paths, pkg.prettyPath()) + + defer func() { + pkg.flush() + e := recover() + if e == nil { + return + } + pkgError, ok := e.(PackageError) + if ok { + err = pkgError + return + } + panic(e) + }() + + switch { + case symbol == "": + pkg.packageDoc() // The package exists, so we got some output. + return + case method == "": + if pkg.symbolDoc(symbol) { + return + } + case pkg.printMethodDoc(symbol, method): + return + case pkg.printFieldDoc(symbol, method): + return + } + } +} + +// failMessage creates a nicely formatted error message when there is no result to show. +func failMessage(paths []string, symbol, method string) error { + var b bytes.Buffer + if len(paths) > 1 { + b.WriteString("s") + } + b.WriteString(" ") + for i, path := range paths { + if i > 0 { + b.WriteString(", ") + } + b.WriteString(path) + } + if method == "" { + return fmt.Errorf("no symbol %s in package%s", symbol, &b) + } + return fmt.Errorf("no method or field %s.%s in package%s", symbol, method, &b) +} + +// parseArgs analyzes the arguments (if any) and returns the package +// it represents, the part of the argument the user used to identify +// the path (or "" if it's the current package) and the symbol +// (possibly with a .method) within that package. +// parseSymbol is used to analyze the symbol itself. +// The boolean final argument reports whether it is possible that +// there may be more directories worth looking at. It will only +// be true if the package path is a partial match for some directory +// and there may be more matches. For example, if the argument +// is rand.Float64, we must scan both crypto/rand and math/rand +// to find the symbol, and the first call will return crypto/rand, true. +func parseArgs(args []string) (pkg *build.Package, path, symbol string, more bool) { + wd, err := os.Getwd() + if err != nil { + log.Fatal(err) + } + if len(args) == 0 { + // Easy: current directory. + return importDir(wd), "", "", false + } + arg := args[0] + // We have an argument. If it is a directory name beginning with . or .., + // use the absolute path name. This discriminates "./errors" from "errors" + // if the current directory contains a non-standard errors package. + if isDotSlash(arg) { + arg = filepath.Join(wd, arg) + } + switch len(args) { + default: + usage() + case 1: + // Done below. + case 2: + // Package must be findable and importable. + pkg, err := build.Import(args[0], wd, build.ImportComment) + if err == nil { + return pkg, args[0], args[1], false + } + for { + packagePath, ok := findNextPackage(arg) + if !ok { + break + } + if pkg, err := build.ImportDir(packagePath, build.ImportComment); err == nil { + return pkg, arg, args[1], true + } + } + return nil, args[0], args[1], false + } + // Usual case: one argument. + // If it contains slashes, it begins with either a package path + // or an absolute directory. + // First, is it a complete package path as it is? If so, we are done. + // This avoids confusion over package paths that have other + // package paths as their prefix. + var importErr error + if filepath.IsAbs(arg) { + pkg, importErr = build.ImportDir(arg, build.ImportComment) + if importErr == nil { + return pkg, arg, "", false + } + } else { + pkg, importErr = build.Import(arg, wd, build.ImportComment) + if importErr == nil { + return pkg, arg, "", false + } + } + // Another disambiguator: If the argument starts with an upper + // case letter, it can only be a symbol in the current directory. + // Kills the problem caused by case-insensitive file systems + // matching an upper case name as a package name. + if !strings.ContainsAny(arg, `/\`) && token.IsExported(arg) { + pkg, err := build.ImportDir(".", build.ImportComment) + if err == nil { + return pkg, "", arg, false + } + } + // If it has a slash, it must be a package path but there is a symbol. + // It's the last package path we care about. + slash := strings.LastIndex(arg, "/") + // There may be periods in the package path before or after the slash + // and between a symbol and method. + // Split the string at various periods to see what we find. + // In general there may be ambiguities but this should almost always + // work. + var period int + // slash+1: if there's no slash, the value is -1 and start is 0; otherwise + // start is the byte after the slash. + for start := slash + 1; start < len(arg); start = period + 1 { + period = strings.Index(arg[start:], ".") + symbol := "" + if period < 0 { + period = len(arg) + } else { + period += start + symbol = arg[period+1:] + } + // Have we identified a package already? + pkg, err := build.Import(arg[0:period], wd, build.ImportComment) + if err == nil { + return pkg, arg[0:period], symbol, false + } + // See if we have the basename or tail of a package, as in json for encoding/json + // or ivy/value for robpike.io/ivy/value. + pkgName := arg[:period] + for { + path, ok := findNextPackage(pkgName) + if !ok { + break + } + if pkg, err = build.ImportDir(path, build.ImportComment); err == nil { + return pkg, arg[0:period], symbol, true + } + } + dirs.Reset() // Next iteration of for loop must scan all the directories again. + } + // If it has a slash, we've failed. + if slash >= 0 { + // build.Import should always include the path in its error message, + // and we should avoid repeating it. Unfortunately, build.Import doesn't + // return a structured error. That can't easily be fixed, since it + // invokes 'go list' and returns the error text from the loaded package. + // TODO(golang.org/issue/34750): load using golang.org/x/tools/go/packages + // instead of go/build. + importErrStr := importErr.Error() + if strings.Contains(importErrStr, arg[:period]) { + log.Fatal(importErrStr) + } else { + log.Fatalf("no such package %s: %s", arg[:period], importErrStr) + } + } + // Guess it's a symbol in the current directory. + return importDir(wd), "", arg, false +} + +// dotPaths lists all the dotted paths legal on Unix-like and +// Windows-like file systems. We check them all, as the chance +// of error is minute and even on Windows people will use ./ +// sometimes. +var dotPaths = []string{ + `./`, + `../`, + `.\`, + `..\`, +} + +// isDotSlash reports whether the path begins with a reference +// to the local . or .. directory. +func isDotSlash(arg string) bool { + if arg == "." || arg == ".." { + return true + } + for _, dotPath := range dotPaths { + if strings.HasPrefix(arg, dotPath) { + return true + } + } + return false +} + +// importDir is just an error-catching wrapper for build.ImportDir. +func importDir(dir string) *build.Package { + pkg, err := build.ImportDir(dir, build.ImportComment) + if err != nil { + log.Fatal(err) + } + return pkg +} + +// parseSymbol breaks str apart into a symbol and method. +// Both may be missing or the method may be missing. +// If present, each must be a valid Go identifier. +func parseSymbol(str string) (symbol, method string) { + if str == "" { + return + } + elem := strings.Split(str, ".") + switch len(elem) { + case 1: + case 2: + method = elem[1] + default: + log.Printf("too many periods in symbol specification") + usage() + } + symbol = elem[0] + return +} + +// isExported reports whether the name is an exported identifier. +// If the unexported flag (-u) is true, isExported returns true because +// it means that we treat the name as if it is exported. +func isExported(name string) bool { + return unexported || token.IsExported(name) +} + +// findNextPackage returns the next full file name path that matches the +// (perhaps partial) package path pkg. The boolean reports if any match was found. +func findNextPackage(pkg string) (string, bool) { + if filepath.IsAbs(pkg) { + if dirs.offset == 0 { + dirs.offset = -1 + return pkg, true + } + return "", false + } + if pkg == "" || token.IsExported(pkg) { // Upper case symbol cannot be a package name. + return "", false + } + pkg = path.Clean(pkg) + pkgSuffix := "/" + pkg + for { + d, ok := dirs.Next() + if !ok { + return "", false + } + if d.importPath == pkg || strings.HasSuffix(d.importPath, pkgSuffix) { + return d.dir, true + } + } +} + +var buildCtx = build.Default + +// splitGopath splits $GOPATH into a list of roots. +func splitGopath() []string { + return filepath.SplitList(buildCtx.GOPATH) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/doc/pkg.go b/platform/dbops/binaries/go/go/src/cmd/doc/pkg.go new file mode 100644 index 0000000000000000000000000000000000000000..a21d8a4688233476a7648a60d8a4ee5690d5b6de --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/doc/pkg.go @@ -0,0 +1,1167 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bufio" + "bytes" + "fmt" + "go/ast" + "go/build" + "go/doc" + "go/format" + "go/parser" + "go/printer" + "go/token" + "io" + "io/fs" + "log" + "path/filepath" + "strings" + "unicode" + "unicode/utf8" +) + +const ( + punchedCardWidth = 80 + indent = " " +) + +type Package struct { + writer io.Writer // Destination for output. + name string // Package name, json for encoding/json. + userPath string // String the user used to find this package. + pkg *ast.Package // Parsed package. + file *ast.File // Merged from all files in the package + doc *doc.Package + build *build.Package + typedValue map[*doc.Value]bool // Consts and vars related to types. + constructor map[*doc.Func]bool // Constructors. + fs *token.FileSet // Needed for printing. + buf pkgBuffer +} + +func (pkg *Package) ToText(w io.Writer, text, prefix, codePrefix string) { + d := pkg.doc.Parser().Parse(text) + pr := pkg.doc.Printer() + pr.TextPrefix = prefix + pr.TextCodePrefix = codePrefix + w.Write(pr.Text(d)) +} + +// pkgBuffer is a wrapper for bytes.Buffer that prints a package clause the +// first time Write is called. +type pkgBuffer struct { + pkg *Package + printed bool // Prevent repeated package clauses. + bytes.Buffer +} + +func (pb *pkgBuffer) Write(p []byte) (int, error) { + pb.packageClause() + return pb.Buffer.Write(p) +} + +func (pb *pkgBuffer) packageClause() { + if !pb.printed { + pb.printed = true + // Only show package clause for commands if requested explicitly. + if pb.pkg.pkg.Name != "main" || showCmd { + pb.pkg.packageClause() + } + } +} + +type PackageError string // type returned by pkg.Fatalf. + +func (p PackageError) Error() string { + return string(p) +} + +// prettyPath returns a version of the package path that is suitable for an +// error message. It obeys the import comment if present. Also, since +// pkg.build.ImportPath is sometimes the unhelpful "" or ".", it looks for a +// directory name in GOROOT or GOPATH if that happens. +func (pkg *Package) prettyPath() string { + path := pkg.build.ImportComment + if path == "" { + path = pkg.build.ImportPath + } + if path != "." && path != "" { + return path + } + // Convert the source directory into a more useful path. + // Also convert everything to slash-separated paths for uniform handling. + path = filepath.Clean(filepath.ToSlash(pkg.build.Dir)) + // Can we find a decent prefix? + if buildCtx.GOROOT != "" { + goroot := filepath.Join(buildCtx.GOROOT, "src") + if p, ok := trim(path, filepath.ToSlash(goroot)); ok { + return p + } + } + for _, gopath := range splitGopath() { + if p, ok := trim(path, filepath.ToSlash(gopath)); ok { + return p + } + } + return path +} + +// trim trims the directory prefix from the path, paying attention +// to the path separator. If they are the same string or the prefix +// is not present the original is returned. The boolean reports whether +// the prefix is present. That path and prefix have slashes for separators. +func trim(path, prefix string) (string, bool) { + if !strings.HasPrefix(path, prefix) { + return path, false + } + if path == prefix { + return path, true + } + if path[len(prefix)] == '/' { + return path[len(prefix)+1:], true + } + return path, false // Textual prefix but not a path prefix. +} + +// pkg.Fatalf is like log.Fatalf, but panics so it can be recovered in the +// main do function, so it doesn't cause an exit. Allows testing to work +// without running a subprocess. The log prefix will be added when +// logged in main; it is not added here. +func (pkg *Package) Fatalf(format string, args ...any) { + panic(PackageError(fmt.Sprintf(format, args...))) +} + +// parsePackage turns the build package we found into a parsed package +// we can then use to generate documentation. +func parsePackage(writer io.Writer, pkg *build.Package, userPath string) *Package { + // include tells parser.ParseDir which files to include. + // That means the file must be in the build package's GoFiles or CgoFiles + // list only (no tag-ignored files, tests, swig or other non-Go files). + include := func(info fs.FileInfo) bool { + for _, name := range pkg.GoFiles { + if name == info.Name() { + return true + } + } + for _, name := range pkg.CgoFiles { + if name == info.Name() { + return true + } + } + return false + } + fset := token.NewFileSet() + pkgs, err := parser.ParseDir(fset, pkg.Dir, include, parser.ParseComments) + if err != nil { + log.Fatal(err) + } + // Make sure they are all in one package. + if len(pkgs) == 0 { + log.Fatalf("no source-code package in directory %s", pkg.Dir) + } + if len(pkgs) > 1 { + log.Fatalf("multiple packages in directory %s", pkg.Dir) + } + astPkg := pkgs[pkg.Name] + + // TODO: go/doc does not include typed constants in the constants + // list, which is what we want. For instance, time.Sunday is of type + // time.Weekday, so it is defined in the type but not in the + // Consts list for the package. This prevents + // go doc time.Sunday + // from finding the symbol. Work around this for now, but we + // should fix it in go/doc. + // A similar story applies to factory functions. + mode := doc.AllDecls + if showSrc { + mode |= doc.PreserveAST // See comment for Package.emit. + } + docPkg := doc.New(astPkg, pkg.ImportPath, mode) + typedValue := make(map[*doc.Value]bool) + constructor := make(map[*doc.Func]bool) + for _, typ := range docPkg.Types { + docPkg.Consts = append(docPkg.Consts, typ.Consts...) + docPkg.Vars = append(docPkg.Vars, typ.Vars...) + docPkg.Funcs = append(docPkg.Funcs, typ.Funcs...) + if isExported(typ.Name) { + for _, value := range typ.Consts { + typedValue[value] = true + } + for _, value := range typ.Vars { + typedValue[value] = true + } + for _, fun := range typ.Funcs { + // We don't count it as a constructor bound to the type + // if the type itself is not exported. + constructor[fun] = true + } + } + } + + p := &Package{ + writer: writer, + name: pkg.Name, + userPath: userPath, + pkg: astPkg, + file: ast.MergePackageFiles(astPkg, 0), + doc: docPkg, + typedValue: typedValue, + constructor: constructor, + build: pkg, + fs: fset, + } + p.buf.pkg = p + return p +} + +func (pkg *Package) Printf(format string, args ...any) { + fmt.Fprintf(&pkg.buf, format, args...) +} + +func (pkg *Package) flush() { + _, err := pkg.writer.Write(pkg.buf.Bytes()) + if err != nil { + log.Fatal(err) + } + pkg.buf.Reset() // Not needed, but it's a flush. +} + +var newlineBytes = []byte("\n\n") // We never ask for more than 2. + +// newlines guarantees there are n newlines at the end of the buffer. +func (pkg *Package) newlines(n int) { + for !bytes.HasSuffix(pkg.buf.Bytes(), newlineBytes[:n]) { + pkg.buf.WriteRune('\n') + } +} + +// emit prints the node. If showSrc is true, it ignores the provided comment, +// assuming the comment is in the node itself. Otherwise, the go/doc package +// clears the stuff we don't want to print anyway. It's a bit of a magic trick. +func (pkg *Package) emit(comment string, node ast.Node) { + if node != nil { + var arg any = node + if showSrc { + // Need an extra little dance to get internal comments to appear. + arg = &printer.CommentedNode{ + Node: node, + Comments: pkg.file.Comments, + } + } + err := format.Node(&pkg.buf, pkg.fs, arg) + if err != nil { + log.Fatal(err) + } + if comment != "" && !showSrc { + pkg.newlines(1) + pkg.ToText(&pkg.buf, comment, indent, indent+indent) + pkg.newlines(2) // Blank line after comment to separate from next item. + } else { + pkg.newlines(1) + } + } +} + +// oneLineNode returns a one-line summary of the given input node. +func (pkg *Package) oneLineNode(node ast.Node) string { + const maxDepth = 10 + return pkg.oneLineNodeDepth(node, maxDepth) +} + +// oneLineNodeDepth returns a one-line summary of the given input node. +// The depth specifies the maximum depth when traversing the AST. +func (pkg *Package) oneLineNodeDepth(node ast.Node, depth int) string { + const dotDotDot = "..." + if depth == 0 { + return dotDotDot + } + depth-- + + switch n := node.(type) { + case nil: + return "" + + case *ast.GenDecl: + // Formats const and var declarations. + trailer := "" + if len(n.Specs) > 1 { + trailer = " " + dotDotDot + } + + // Find the first relevant spec. + typ := "" + for i, spec := range n.Specs { + valueSpec := spec.(*ast.ValueSpec) // Must succeed; we can't mix types in one GenDecl. + + // The type name may carry over from a previous specification in the + // case of constants and iota. + if valueSpec.Type != nil { + typ = fmt.Sprintf(" %s", pkg.oneLineNodeDepth(valueSpec.Type, depth)) + } else if len(valueSpec.Values) > 0 { + typ = "" + } + + if !isExported(valueSpec.Names[0].Name) { + continue + } + val := "" + if i < len(valueSpec.Values) && valueSpec.Values[i] != nil { + val = fmt.Sprintf(" = %s", pkg.oneLineNodeDepth(valueSpec.Values[i], depth)) + } + return fmt.Sprintf("%s %s%s%s%s", n.Tok, valueSpec.Names[0], typ, val, trailer) + } + return "" + + case *ast.FuncDecl: + // Formats func declarations. + name := n.Name.Name + recv := pkg.oneLineNodeDepth(n.Recv, depth) + if len(recv) > 0 { + recv = "(" + recv + ") " + } + fnc := pkg.oneLineNodeDepth(n.Type, depth) + fnc = strings.TrimPrefix(fnc, "func") + return fmt.Sprintf("func %s%s%s", recv, name, fnc) + + case *ast.TypeSpec: + sep := " " + if n.Assign.IsValid() { + sep = " = " + } + tparams := pkg.formatTypeParams(n.TypeParams, depth) + return fmt.Sprintf("type %s%s%s%s", n.Name.Name, tparams, sep, pkg.oneLineNodeDepth(n.Type, depth)) + + case *ast.FuncType: + var params []string + if n.Params != nil { + for _, field := range n.Params.List { + params = append(params, pkg.oneLineField(field, depth)) + } + } + needParens := false + var results []string + if n.Results != nil { + needParens = needParens || len(n.Results.List) > 1 + for _, field := range n.Results.List { + needParens = needParens || len(field.Names) > 0 + results = append(results, pkg.oneLineField(field, depth)) + } + } + + tparam := pkg.formatTypeParams(n.TypeParams, depth) + param := joinStrings(params) + if len(results) == 0 { + return fmt.Sprintf("func%s(%s)", tparam, param) + } + result := joinStrings(results) + if !needParens { + return fmt.Sprintf("func%s(%s) %s", tparam, param, result) + } + return fmt.Sprintf("func%s(%s) (%s)", tparam, param, result) + + case *ast.StructType: + if n.Fields == nil || len(n.Fields.List) == 0 { + return "struct{}" + } + return "struct{ ... }" + + case *ast.InterfaceType: + if n.Methods == nil || len(n.Methods.List) == 0 { + return "interface{}" + } + return "interface{ ... }" + + case *ast.FieldList: + if n == nil || len(n.List) == 0 { + return "" + } + if len(n.List) == 1 { + return pkg.oneLineField(n.List[0], depth) + } + return dotDotDot + + case *ast.FuncLit: + return pkg.oneLineNodeDepth(n.Type, depth) + " { ... }" + + case *ast.CompositeLit: + typ := pkg.oneLineNodeDepth(n.Type, depth) + if len(n.Elts) == 0 { + return fmt.Sprintf("%s{}", typ) + } + return fmt.Sprintf("%s{ %s }", typ, dotDotDot) + + case *ast.ArrayType: + length := pkg.oneLineNodeDepth(n.Len, depth) + element := pkg.oneLineNodeDepth(n.Elt, depth) + return fmt.Sprintf("[%s]%s", length, element) + + case *ast.MapType: + key := pkg.oneLineNodeDepth(n.Key, depth) + value := pkg.oneLineNodeDepth(n.Value, depth) + return fmt.Sprintf("map[%s]%s", key, value) + + case *ast.CallExpr: + fnc := pkg.oneLineNodeDepth(n.Fun, depth) + var args []string + for _, arg := range n.Args { + args = append(args, pkg.oneLineNodeDepth(arg, depth)) + } + return fmt.Sprintf("%s(%s)", fnc, joinStrings(args)) + + case *ast.UnaryExpr: + return fmt.Sprintf("%s%s", n.Op, pkg.oneLineNodeDepth(n.X, depth)) + + case *ast.Ident: + return n.Name + + default: + // As a fallback, use default formatter for all unknown node types. + buf := new(strings.Builder) + format.Node(buf, pkg.fs, node) + s := buf.String() + if strings.Contains(s, "\n") { + return dotDotDot + } + return s + } +} + +func (pkg *Package) formatTypeParams(list *ast.FieldList, depth int) string { + if list.NumFields() == 0 { + return "" + } + var tparams []string + for _, field := range list.List { + tparams = append(tparams, pkg.oneLineField(field, depth)) + } + return "[" + joinStrings(tparams) + "]" +} + +// oneLineField returns a one-line summary of the field. +func (pkg *Package) oneLineField(field *ast.Field, depth int) string { + var names []string + for _, name := range field.Names { + names = append(names, name.Name) + } + if len(names) == 0 { + return pkg.oneLineNodeDepth(field.Type, depth) + } + return joinStrings(names) + " " + pkg.oneLineNodeDepth(field.Type, depth) +} + +// joinStrings formats the input as a comma-separated list, +// but truncates the list at some reasonable length if necessary. +func joinStrings(ss []string) string { + var n int + for i, s := range ss { + n += len(s) + len(", ") + if n > punchedCardWidth { + ss = append(ss[:i:i], "...") + break + } + } + return strings.Join(ss, ", ") +} + +// printHeader prints a header for the section named s, adding a blank line on each side. +func (pkg *Package) printHeader(s string) { + pkg.Printf("\n%s\n\n", s) +} + +// constsDoc prints all const documentation, if any, including a header. +// The one argument is the valueDoc registry. +func (pkg *Package) constsDoc(printed map[*ast.GenDecl]bool) { + var header bool + for _, value := range pkg.doc.Consts { + // Constants and variables come in groups, and valueDoc prints + // all the items in the group. We only need to find one exported symbol. + for _, name := range value.Names { + if isExported(name) && !pkg.typedValue[value] { + if !header { + pkg.printHeader("CONSTANTS") + header = true + } + pkg.valueDoc(value, printed) + break + } + } + } +} + +// varsDoc prints all var documentation, if any, including a header. +// Printed is the valueDoc registry. +func (pkg *Package) varsDoc(printed map[*ast.GenDecl]bool) { + var header bool + for _, value := range pkg.doc.Vars { + // Constants and variables come in groups, and valueDoc prints + // all the items in the group. We only need to find one exported symbol. + for _, name := range value.Names { + if isExported(name) && !pkg.typedValue[value] { + if !header { + pkg.printHeader("VARIABLES") + header = true + } + pkg.valueDoc(value, printed) + break + } + } + } +} + +// funcsDoc prints all func documentation, if any, including a header. +func (pkg *Package) funcsDoc() { + var header bool + for _, fun := range pkg.doc.Funcs { + if isExported(fun.Name) && !pkg.constructor[fun] { + if !header { + pkg.printHeader("FUNCTIONS") + header = true + } + pkg.emit(fun.Doc, fun.Decl) + } + } +} + +// funcsDoc prints all type documentation, if any, including a header. +func (pkg *Package) typesDoc() { + var header bool + for _, typ := range pkg.doc.Types { + if isExported(typ.Name) { + if !header { + pkg.printHeader("TYPES") + header = true + } + pkg.typeDoc(typ) + } + } +} + +// packageDoc prints the docs for the package. +func (pkg *Package) packageDoc() { + pkg.Printf("") // Trigger the package clause; we know the package exists. + if showAll || !short { + pkg.ToText(&pkg.buf, pkg.doc.Doc, "", indent) + pkg.newlines(1) + } + + switch { + case showAll: + printed := make(map[*ast.GenDecl]bool) // valueDoc registry + pkg.constsDoc(printed) + pkg.varsDoc(printed) + pkg.funcsDoc() + pkg.typesDoc() + + case pkg.pkg.Name == "main" && !showCmd: + // Show only package docs for commands. + return + + default: + if !short { + pkg.newlines(2) // Guarantee blank line before the components. + } + pkg.valueSummary(pkg.doc.Consts, false) + pkg.valueSummary(pkg.doc.Vars, false) + pkg.funcSummary(pkg.doc.Funcs, false) + pkg.typeSummary() + } + + if !short { + pkg.bugs() + } +} + +// packageClause prints the package clause. +func (pkg *Package) packageClause() { + if short { + return + } + importPath := pkg.build.ImportComment + if importPath == "" { + importPath = pkg.build.ImportPath + } + + // If we're using modules, the import path derived from module code locations wins. + // If we did a file system scan, we knew the import path when we found the directory. + // But if we started with a directory name, we never knew the import path. + // Either way, we don't know it now, and it's cheap to (re)compute it. + if usingModules { + for _, root := range codeRoots() { + if pkg.build.Dir == root.dir { + importPath = root.importPath + break + } + if strings.HasPrefix(pkg.build.Dir, root.dir+string(filepath.Separator)) { + suffix := filepath.ToSlash(pkg.build.Dir[len(root.dir)+1:]) + if root.importPath == "" { + importPath = suffix + } else { + importPath = root.importPath + "/" + suffix + } + break + } + } + } + + pkg.Printf("package %s // import %q\n\n", pkg.name, importPath) + if !usingModules && importPath != pkg.build.ImportPath { + pkg.Printf("WARNING: package source is installed in %q\n", pkg.build.ImportPath) + } +} + +// valueSummary prints a one-line summary for each set of values and constants. +// If all the types in a constant or variable declaration belong to the same +// type they can be printed by typeSummary, and so can be suppressed here. +func (pkg *Package) valueSummary(values []*doc.Value, showGrouped bool) { + var isGrouped map[*doc.Value]bool + if !showGrouped { + isGrouped = make(map[*doc.Value]bool) + for _, typ := range pkg.doc.Types { + if !isExported(typ.Name) { + continue + } + for _, c := range typ.Consts { + isGrouped[c] = true + } + for _, v := range typ.Vars { + isGrouped[v] = true + } + } + } + + for _, value := range values { + if !isGrouped[value] { + if decl := pkg.oneLineNode(value.Decl); decl != "" { + pkg.Printf("%s\n", decl) + } + } + } +} + +// funcSummary prints a one-line summary for each function. Constructors +// are printed by typeSummary, below, and so can be suppressed here. +func (pkg *Package) funcSummary(funcs []*doc.Func, showConstructors bool) { + for _, fun := range funcs { + // Exported functions only. The go/doc package does not include methods here. + if isExported(fun.Name) { + if showConstructors || !pkg.constructor[fun] { + pkg.Printf("%s\n", pkg.oneLineNode(fun.Decl)) + } + } + } +} + +// typeSummary prints a one-line summary for each type, followed by its constructors. +func (pkg *Package) typeSummary() { + for _, typ := range pkg.doc.Types { + for _, spec := range typ.Decl.Specs { + typeSpec := spec.(*ast.TypeSpec) // Must succeed. + if isExported(typeSpec.Name.Name) { + pkg.Printf("%s\n", pkg.oneLineNode(typeSpec)) + // Now print the consts, vars, and constructors. + for _, c := range typ.Consts { + if decl := pkg.oneLineNode(c.Decl); decl != "" { + pkg.Printf(indent+"%s\n", decl) + } + } + for _, v := range typ.Vars { + if decl := pkg.oneLineNode(v.Decl); decl != "" { + pkg.Printf(indent+"%s\n", decl) + } + } + for _, constructor := range typ.Funcs { + if isExported(constructor.Name) { + pkg.Printf(indent+"%s\n", pkg.oneLineNode(constructor.Decl)) + } + } + } + } + } +} + +// bugs prints the BUGS information for the package. +// TODO: Provide access to TODOs and NOTEs as well (very noisy so off by default)? +func (pkg *Package) bugs() { + if pkg.doc.Notes["BUG"] == nil { + return + } + pkg.Printf("\n") + for _, note := range pkg.doc.Notes["BUG"] { + pkg.Printf("%s: %v\n", "BUG", note.Body) + } +} + +// findValues finds the doc.Values that describe the symbol. +func (pkg *Package) findValues(symbol string, docValues []*doc.Value) (values []*doc.Value) { + for _, value := range docValues { + for _, name := range value.Names { + if match(symbol, name) { + values = append(values, value) + } + } + } + return +} + +// findFuncs finds the doc.Funcs that describes the symbol. +func (pkg *Package) findFuncs(symbol string) (funcs []*doc.Func) { + for _, fun := range pkg.doc.Funcs { + if match(symbol, fun.Name) { + funcs = append(funcs, fun) + } + } + return +} + +// findTypes finds the doc.Types that describes the symbol. +// If symbol is empty, it finds all exported types. +func (pkg *Package) findTypes(symbol string) (types []*doc.Type) { + for _, typ := range pkg.doc.Types { + if symbol == "" && isExported(typ.Name) || match(symbol, typ.Name) { + types = append(types, typ) + } + } + return +} + +// findTypeSpec returns the ast.TypeSpec within the declaration that defines the symbol. +// The name must match exactly. +func (pkg *Package) findTypeSpec(decl *ast.GenDecl, symbol string) *ast.TypeSpec { + for _, spec := range decl.Specs { + typeSpec := spec.(*ast.TypeSpec) // Must succeed. + if symbol == typeSpec.Name.Name { + return typeSpec + } + } + return nil +} + +// symbolDoc prints the docs for symbol. There may be multiple matches. +// If symbol matches a type, output includes its methods factories and associated constants. +// If there is no top-level symbol, symbolDoc looks for methods that match. +func (pkg *Package) symbolDoc(symbol string) bool { + found := false + // Functions. + for _, fun := range pkg.findFuncs(symbol) { + // Symbol is a function. + decl := fun.Decl + pkg.emit(fun.Doc, decl) + found = true + } + // Constants and variables behave the same. + values := pkg.findValues(symbol, pkg.doc.Consts) + values = append(values, pkg.findValues(symbol, pkg.doc.Vars)...) + printed := make(map[*ast.GenDecl]bool) // valueDoc registry + for _, value := range values { + pkg.valueDoc(value, printed) + found = true + } + // Types. + for _, typ := range pkg.findTypes(symbol) { + pkg.typeDoc(typ) + found = true + } + if !found { + // See if there are methods. + if !pkg.printMethodDoc("", symbol) { + return false + } + } + return true +} + +// valueDoc prints the docs for a constant or variable. The printed map records +// which values have been printed already to avoid duplication. Otherwise, a +// declaration like: +// +// const ( c = 1; C = 2 ) +// +// … could be printed twice if the -u flag is set, as it matches twice. +func (pkg *Package) valueDoc(value *doc.Value, printed map[*ast.GenDecl]bool) { + if printed[value.Decl] { + return + } + // Print each spec only if there is at least one exported symbol in it. + // (See issue 11008.) + // TODO: Should we elide unexported symbols from a single spec? + // It's an unlikely scenario, probably not worth the trouble. + // TODO: Would be nice if go/doc did this for us. + specs := make([]ast.Spec, 0, len(value.Decl.Specs)) + var typ ast.Expr + for _, spec := range value.Decl.Specs { + vspec := spec.(*ast.ValueSpec) + + // The type name may carry over from a previous specification in the + // case of constants and iota. + if vspec.Type != nil { + typ = vspec.Type + } + + for _, ident := range vspec.Names { + if showSrc || isExported(ident.Name) { + if vspec.Type == nil && vspec.Values == nil && typ != nil { + // This a standalone identifier, as in the case of iota usage. + // Thus, assume the type comes from the previous type. + vspec.Type = &ast.Ident{ + Name: pkg.oneLineNode(typ), + NamePos: vspec.End() - 1, + } + } + + specs = append(specs, vspec) + typ = nil // Only inject type on first exported identifier + break + } + } + } + if len(specs) == 0 { + return + } + value.Decl.Specs = specs + pkg.emit(value.Doc, value.Decl) + printed[value.Decl] = true +} + +// typeDoc prints the docs for a type, including constructors and other items +// related to it. +func (pkg *Package) typeDoc(typ *doc.Type) { + decl := typ.Decl + spec := pkg.findTypeSpec(decl, typ.Name) + trimUnexportedElems(spec) + // If there are multiple types defined, reduce to just this one. + if len(decl.Specs) > 1 { + decl.Specs = []ast.Spec{spec} + } + pkg.emit(typ.Doc, decl) + pkg.newlines(2) + // Show associated methods, constants, etc. + if showAll { + printed := make(map[*ast.GenDecl]bool) // valueDoc registry + // We can use append here to print consts, then vars. Ditto for funcs and methods. + values := typ.Consts + values = append(values, typ.Vars...) + for _, value := range values { + for _, name := range value.Names { + if isExported(name) { + pkg.valueDoc(value, printed) + break + } + } + } + funcs := typ.Funcs + funcs = append(funcs, typ.Methods...) + for _, fun := range funcs { + if isExported(fun.Name) { + pkg.emit(fun.Doc, fun.Decl) + if fun.Doc == "" { + pkg.newlines(2) + } + } + } + } else { + pkg.valueSummary(typ.Consts, true) + pkg.valueSummary(typ.Vars, true) + pkg.funcSummary(typ.Funcs, true) + pkg.funcSummary(typ.Methods, true) + } +} + +// trimUnexportedElems modifies spec in place to elide unexported fields from +// structs and methods from interfaces (unless the unexported flag is set or we +// are asked to show the original source). +func trimUnexportedElems(spec *ast.TypeSpec) { + if showSrc { + return + } + switch typ := spec.Type.(type) { + case *ast.StructType: + typ.Fields = trimUnexportedFields(typ.Fields, false) + case *ast.InterfaceType: + typ.Methods = trimUnexportedFields(typ.Methods, true) + } +} + +// trimUnexportedFields returns the field list trimmed of unexported fields. +func trimUnexportedFields(fields *ast.FieldList, isInterface bool) *ast.FieldList { + what := "methods" + if !isInterface { + what = "fields" + } + + trimmed := false + list := make([]*ast.Field, 0, len(fields.List)) + for _, field := range fields.List { + // When printing fields we normally print field.Doc. + // Here we are going to pass the AST to go/format, + // which will print the comments from the AST, + // not field.Doc which is from go/doc. + // The two are similar but not identical; + // for example, field.Doc does not include directives. + // In order to consistently print field.Doc, + // we replace the comment in the AST with field.Doc. + // That will cause go/format to print what we want. + // See issue #56592. + if field.Doc != nil { + doc := field.Doc + text := doc.Text() + + trailingBlankLine := len(doc.List[len(doc.List)-1].Text) == 2 + if !trailingBlankLine { + // Remove trailing newline. + lt := len(text) + if lt > 0 && text[lt-1] == '\n' { + text = text[:lt-1] + } + } + + start := doc.List[0].Slash + doc.List = doc.List[:0] + for _, line := range strings.Split(text, "\n") { + prefix := "// " + if len(line) > 0 && line[0] == '\t' { + prefix = "//" + } + doc.List = append(doc.List, &ast.Comment{ + Text: prefix + line, + }) + } + doc.List[0].Slash = start + } + + names := field.Names + if len(names) == 0 { + // Embedded type. Use the name of the type. It must be of the form ident or + // pkg.ident (for structs and interfaces), or *ident or *pkg.ident (structs only). + // Or a type embedded in a constraint. + // Nothing else is allowed. + ty := field.Type + if se, ok := field.Type.(*ast.StarExpr); !isInterface && ok { + // The form *ident or *pkg.ident is only valid on + // embedded types in structs. + ty = se.X + } + constraint := false + switch ident := ty.(type) { + case *ast.Ident: + if isInterface && ident.Name == "error" && ident.Obj == nil { + // For documentation purposes, we consider the builtin error + // type special when embedded in an interface, such that it + // always gets shown publicly. + list = append(list, field) + continue + } + names = []*ast.Ident{ident} + case *ast.SelectorExpr: + // An embedded type may refer to a type in another package. + names = []*ast.Ident{ident.Sel} + default: + // An approximation or union or type + // literal in an interface. + constraint = true + } + if names == nil && !constraint { + // Can only happen if AST is incorrect. Safe to continue with a nil list. + log.Print("invalid program: unexpected type for embedded field") + } + } + // Trims if any is unexported. Good enough in practice. + ok := true + if !unexported { + for _, name := range names { + if !isExported(name.Name) { + trimmed = true + ok = false + break + } + } + } + if ok { + list = append(list, field) + } + } + if !trimmed { + return fields + } + unexportedField := &ast.Field{ + Type: &ast.Ident{ + // Hack: printer will treat this as a field with a named type. + // Setting Name and NamePos to ("", fields.Closing-1) ensures that + // when Pos and End are called on this field, they return the + // position right before closing '}' character. + Name: "", + NamePos: fields.Closing - 1, + }, + Comment: &ast.CommentGroup{ + List: []*ast.Comment{{Text: fmt.Sprintf("// Has unexported %s.\n", what)}}, + }, + } + return &ast.FieldList{ + Opening: fields.Opening, + List: append(list, unexportedField), + Closing: fields.Closing, + } +} + +// printMethodDoc prints the docs for matches of symbol.method. +// If symbol is empty, it prints all methods for any concrete type +// that match the name. It reports whether it found any methods. +func (pkg *Package) printMethodDoc(symbol, method string) bool { + types := pkg.findTypes(symbol) + if types == nil { + if symbol == "" { + return false + } + pkg.Fatalf("symbol %s is not a type in package %s installed in %q", symbol, pkg.name, pkg.build.ImportPath) + } + found := false + for _, typ := range types { + if len(typ.Methods) > 0 { + for _, meth := range typ.Methods { + if match(method, meth.Name) { + decl := meth.Decl + pkg.emit(meth.Doc, decl) + found = true + } + } + continue + } + if symbol == "" { + continue + } + // Type may be an interface. The go/doc package does not attach + // an interface's methods to the doc.Type. We need to dig around. + spec := pkg.findTypeSpec(typ.Decl, typ.Name) + inter, ok := spec.Type.(*ast.InterfaceType) + if !ok { + // Not an interface type. + continue + } + + // Collect and print only the methods that match. + var methods []*ast.Field + for _, iMethod := range inter.Methods.List { + // This is an interface, so there can be only one name. + // TODO: Anonymous methods (embedding) + if len(iMethod.Names) == 0 { + continue + } + name := iMethod.Names[0].Name + if match(method, name) { + methods = append(methods, iMethod) + found = true + } + } + if found { + pkg.Printf("type %s ", spec.Name) + inter.Methods.List, methods = methods, inter.Methods.List + err := format.Node(&pkg.buf, pkg.fs, inter) + if err != nil { + log.Fatal(err) + } + pkg.newlines(1) + // Restore the original methods. + inter.Methods.List = methods + } + } + return found +} + +// printFieldDoc prints the docs for matches of symbol.fieldName. +// It reports whether it found any field. +// Both symbol and fieldName must be non-empty or it returns false. +func (pkg *Package) printFieldDoc(symbol, fieldName string) bool { + if symbol == "" || fieldName == "" { + return false + } + types := pkg.findTypes(symbol) + if types == nil { + pkg.Fatalf("symbol %s is not a type in package %s installed in %q", symbol, pkg.name, pkg.build.ImportPath) + } + found := false + numUnmatched := 0 + for _, typ := range types { + // Type must be a struct. + spec := pkg.findTypeSpec(typ.Decl, typ.Name) + structType, ok := spec.Type.(*ast.StructType) + if !ok { + // Not a struct type. + continue + } + for _, field := range structType.Fields.List { + // TODO: Anonymous fields. + for _, name := range field.Names { + if !match(fieldName, name.Name) { + numUnmatched++ + continue + } + if !found { + pkg.Printf("type %s struct {\n", typ.Name) + } + if field.Doc != nil { + // To present indented blocks in comments correctly, process the comment as + // a unit before adding the leading // to each line. + docBuf := new(bytes.Buffer) + pkg.ToText(docBuf, field.Doc.Text(), "", indent) + scanner := bufio.NewScanner(docBuf) + for scanner.Scan() { + fmt.Fprintf(&pkg.buf, "%s// %s\n", indent, scanner.Bytes()) + } + } + s := pkg.oneLineNode(field.Type) + lineComment := "" + if field.Comment != nil { + lineComment = fmt.Sprintf(" %s", field.Comment.List[0].Text) + } + pkg.Printf("%s%s %s%s\n", indent, name, s, lineComment) + found = true + } + } + } + if found { + if numUnmatched > 0 { + pkg.Printf("\n // ... other fields elided ...\n") + } + pkg.Printf("}\n") + } + return found +} + +// match reports whether the user's symbol matches the program's. +// A lower-case character in the user's string matches either case in the program's. +// The program string must be exported. +func match(user, program string) bool { + if !isExported(program) { + return false + } + if matchCase { + return user == program + } + for _, u := range user { + p, w := utf8.DecodeRuneInString(program) + program = program[w:] + if u == p { + continue + } + if unicode.IsLower(u) && simpleFold(u) == simpleFold(p) { + continue + } + return false + } + return program == "" +} + +// simpleFold returns the minimum rune equivalent to r +// under Unicode-defined simple case folding. +func simpleFold(r rune) rune { + for { + r1 := unicode.SimpleFold(r) + if r1 <= r { + return r1 // wrapped around, found min + } + r = r1 + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/fix/buildtag.go b/platform/dbops/binaries/go/go/src/cmd/fix/buildtag.go new file mode 100644 index 0000000000000000000000000000000000000000..5f4fbfef16f15b99d78ff34d2f2c0a02334760db --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/fix/buildtag.go @@ -0,0 +1,51 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "go/ast" + "strings" +) + +func init() { + register(buildtagFix) +} + +const buildtagGoVersionCutoff = 1_18 + +var buildtagFix = fix{ + name: "buildtag", + date: "2021-08-25", + f: buildtag, + desc: `Remove +build comments from modules using Go 1.18 or later`, +} + +func buildtag(f *ast.File) bool { + if goVersion < buildtagGoVersionCutoff { + return false + } + + // File is already gofmt-ed, so we know that if there are +build lines, + // they are in a comment group that starts with a //go:build line followed + // by a blank line. While we cannot delete comments from an AST and + // expect consistent output in general, this specific case - deleting only + // some lines from a comment block - does format correctly. + fixed := false + for _, g := range f.Comments { + sawGoBuild := false + for i, c := range g.List { + if strings.HasPrefix(c.Text, "//go:build ") { + sawGoBuild = true + } + if sawGoBuild && strings.HasPrefix(c.Text, "// +build ") { + g.List = g.List[:i] + fixed = true + break + } + } + } + + return fixed +} diff --git a/platform/dbops/binaries/go/go/src/cmd/fix/buildtag_test.go b/platform/dbops/binaries/go/go/src/cmd/fix/buildtag_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1c6efbe9e0326232a59a9548e2cde10fb73e2bc0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/fix/buildtag_test.go @@ -0,0 +1,34 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func init() { + addTestCases(buildtagTests, buildtag) +} + +var buildtagTests = []testCase{ + { + Name: "buildtag.oldGo", + Version: 1_10, + In: `//go:build yes +// +build yes + +package main +`, + }, + { + Name: "buildtag.new", + Version: 1_99, + In: `//go:build yes +// +build yes + +package main +`, + Out: `//go:build yes + +package main +`, + }, +} diff --git a/platform/dbops/binaries/go/go/src/cmd/fix/cftype.go b/platform/dbops/binaries/go/go/src/cmd/fix/cftype.go new file mode 100644 index 0000000000000000000000000000000000000000..d4fcc4485e5791b9c00725de145ea5bfe4845c28 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/fix/cftype.go @@ -0,0 +1,147 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "go/ast" + "go/token" + "reflect" + "strings" +) + +func init() { + register(cftypeFix) +} + +var cftypeFix = fix{ + name: "cftype", + date: "2017-09-27", + f: cftypefix, + desc: `Fixes initializers and casts of C.*Ref and JNI types`, + disabled: false, +} + +// Old state: +// +// type CFTypeRef unsafe.Pointer +// +// New state: +// +// type CFTypeRef uintptr +// +// and similar for other *Ref types. +// This fix finds nils initializing these types and replaces the nils with 0s. +func cftypefix(f *ast.File) bool { + return typefix(f, func(s string) bool { + return strings.HasPrefix(s, "C.") && strings.HasSuffix(s, "Ref") && s != "C.CFAllocatorRef" + }) +} + +// typefix replaces nil with 0 for all nils whose type, when passed to badType, returns true. +func typefix(f *ast.File, badType func(string) bool) bool { + if !imports(f, "C") { + return false + } + typeof, _ := typecheck(&TypeConfig{}, f) + changed := false + + // step 1: Find all the nils with the offending types. + // Compute their replacement. + badNils := map[any]ast.Expr{} + walk(f, func(n any) { + if i, ok := n.(*ast.Ident); ok && i.Name == "nil" && badType(typeof[n]) { + badNils[n] = &ast.BasicLit{ValuePos: i.NamePos, Kind: token.INT, Value: "0"} + } + }) + + // step 2: find all uses of the bad nils, replace them with 0. + // There's no easy way to map from an ast.Expr to all the places that use them, so + // we use reflect to find all such references. + if len(badNils) > 0 { + exprType := reflect.TypeFor[ast.Expr]() + exprSliceType := reflect.TypeFor[[]ast.Expr]() + walk(f, func(n any) { + if n == nil { + return + } + v := reflect.ValueOf(n) + if v.Type().Kind() != reflect.Pointer { + return + } + if v.IsNil() { + return + } + v = v.Elem() + if v.Type().Kind() != reflect.Struct { + return + } + for i := 0; i < v.NumField(); i++ { + f := v.Field(i) + if f.Type() == exprType { + if r := badNils[f.Interface()]; r != nil { + f.Set(reflect.ValueOf(r)) + changed = true + } + } + if f.Type() == exprSliceType { + for j := 0; j < f.Len(); j++ { + e := f.Index(j) + if r := badNils[e.Interface()]; r != nil { + e.Set(reflect.ValueOf(r)) + changed = true + } + } + } + } + }) + } + + // step 3: fix up invalid casts. + // It used to be ok to cast between *unsafe.Pointer and *C.CFTypeRef in a single step. + // Now we need unsafe.Pointer as an intermediate cast. + // (*unsafe.Pointer)(x) where x is type *bad -> (*unsafe.Pointer)(unsafe.Pointer(x)) + // (*bad.type)(x) where x is type *unsafe.Pointer -> (*bad.type)(unsafe.Pointer(x)) + walk(f, func(n any) { + if n == nil { + return + } + // Find pattern like (*a.b)(x) + c, ok := n.(*ast.CallExpr) + if !ok { + return + } + if len(c.Args) != 1 { + return + } + p, ok := c.Fun.(*ast.ParenExpr) + if !ok { + return + } + s, ok := p.X.(*ast.StarExpr) + if !ok { + return + } + t, ok := s.X.(*ast.SelectorExpr) + if !ok { + return + } + pkg, ok := t.X.(*ast.Ident) + if !ok { + return + } + dst := pkg.Name + "." + t.Sel.Name + src := typeof[c.Args[0]] + if badType(dst) && src == "*unsafe.Pointer" || + dst == "unsafe.Pointer" && strings.HasPrefix(src, "*") && badType(src[1:]) { + c.Args[0] = &ast.CallExpr{ + Fun: &ast.SelectorExpr{X: &ast.Ident{Name: "unsafe"}, Sel: &ast.Ident{Name: "Pointer"}}, + Args: []ast.Expr{c.Args[0]}, + } + changed = true + } + }) + + return changed +} diff --git a/platform/dbops/binaries/go/go/src/cmd/fix/cftype_test.go b/platform/dbops/binaries/go/go/src/cmd/fix/cftype_test.go new file mode 100644 index 0000000000000000000000000000000000000000..cde47f28a3bbf11898fdf8153781333c5923ce7c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/fix/cftype_test.go @@ -0,0 +1,241 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func init() { + addTestCases(cftypeTests, cftypefix) +} + +var cftypeTests = []testCase{ + { + Name: "cftype.localVariable", + In: `package main + +// typedef const void *CFTypeRef; +import "C" + +func f() { + var x C.CFTypeRef = nil + x = nil + x, x = nil, nil +} +`, + Out: `package main + +// typedef const void *CFTypeRef; +import "C" + +func f() { + var x C.CFTypeRef = 0 + x = 0 + x, x = 0, 0 +} +`, + }, + { + Name: "cftype.globalVariable", + In: `package main + +// typedef const void *CFTypeRef; +import "C" + +var x C.CFTypeRef = nil + +func f() { + x = nil +} +`, + Out: `package main + +// typedef const void *CFTypeRef; +import "C" + +var x C.CFTypeRef = 0 + +func f() { + x = 0 +} +`, + }, + { + Name: "cftype.EqualArgument", + In: `package main + +// typedef const void *CFTypeRef; +import "C" + +var x C.CFTypeRef +var y = x == nil +var z = x != nil +`, + Out: `package main + +// typedef const void *CFTypeRef; +import "C" + +var x C.CFTypeRef +var y = x == 0 +var z = x != 0 +`, + }, + { + Name: "cftype.StructField", + In: `package main + +// typedef const void *CFTypeRef; +import "C" + +type T struct { + x C.CFTypeRef +} + +var t = T{x: nil} +`, + Out: `package main + +// typedef const void *CFTypeRef; +import "C" + +type T struct { + x C.CFTypeRef +} + +var t = T{x: 0} +`, + }, + { + Name: "cftype.FunctionArgument", + In: `package main + +// typedef const void *CFTypeRef; +import "C" + +func f(x C.CFTypeRef) { +} + +func g() { + f(nil) +} +`, + Out: `package main + +// typedef const void *CFTypeRef; +import "C" + +func f(x C.CFTypeRef) { +} + +func g() { + f(0) +} +`, + }, + { + Name: "cftype.ArrayElement", + In: `package main + +// typedef const void *CFTypeRef; +import "C" + +var x = [3]C.CFTypeRef{nil, nil, nil} +`, + Out: `package main + +// typedef const void *CFTypeRef; +import "C" + +var x = [3]C.CFTypeRef{0, 0, 0} +`, + }, + { + Name: "cftype.SliceElement", + In: `package main + +// typedef const void *CFTypeRef; +import "C" + +var x = []C.CFTypeRef{nil, nil, nil} +`, + Out: `package main + +// typedef const void *CFTypeRef; +import "C" + +var x = []C.CFTypeRef{0, 0, 0} +`, + }, + { + Name: "cftype.MapKey", + In: `package main + +// typedef const void *CFTypeRef; +import "C" + +var x = map[C.CFTypeRef]int{nil: 0} +`, + Out: `package main + +// typedef const void *CFTypeRef; +import "C" + +var x = map[C.CFTypeRef]int{0: 0} +`, + }, + { + Name: "cftype.MapValue", + In: `package main + +// typedef const void *CFTypeRef; +import "C" + +var x = map[int]C.CFTypeRef{0: nil} +`, + Out: `package main + +// typedef const void *CFTypeRef; +import "C" + +var x = map[int]C.CFTypeRef{0: 0} +`, + }, + { + Name: "cftype.Conversion1", + In: `package main + +// typedef const void *CFTypeRef; +import "C" + +var x C.CFTypeRef +var y = (*unsafe.Pointer)(&x) +`, + Out: `package main + +// typedef const void *CFTypeRef; +import "C" + +var x C.CFTypeRef +var y = (*unsafe.Pointer)(unsafe.Pointer(&x)) +`, + }, + { + Name: "cftype.Conversion2", + In: `package main + +// typedef const void *CFTypeRef; +import "C" + +var x unsafe.Pointer +var y = (*C.CFTypeRef)(&x) +`, + Out: `package main + +// typedef const void *CFTypeRef; +import "C" + +var x unsafe.Pointer +var y = (*C.CFTypeRef)(unsafe.Pointer(&x)) +`, + }, +} diff --git a/platform/dbops/binaries/go/go/src/cmd/fix/context.go b/platform/dbops/binaries/go/go/src/cmd/fix/context.go new file mode 100644 index 0000000000000000000000000000000000000000..1107f4d66c0ceb221d9e89f6ad836546d78d362e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/fix/context.go @@ -0,0 +1,25 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "go/ast" +) + +func init() { + register(contextFix) +} + +var contextFix = fix{ + name: "context", + date: "2016-09-09", + f: ctxfix, + desc: `Change imports of golang.org/x/net/context to context`, + disabled: false, +} + +func ctxfix(f *ast.File) bool { + return rewriteImport(f, "golang.org/x/net/context", "context") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/fix/context_test.go b/platform/dbops/binaries/go/go/src/cmd/fix/context_test.go new file mode 100644 index 0000000000000000000000000000000000000000..935d0d723589164798eee287ed296c309091e21c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/fix/context_test.go @@ -0,0 +1,42 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func init() { + addTestCases(contextTests, ctxfix) +} + +var contextTests = []testCase{ + { + Name: "context.0", + In: `package main + +import "golang.org/x/net/context" + +var _ = "golang.org/x/net/context" +`, + Out: `package main + +import "context" + +var _ = "golang.org/x/net/context" +`, + }, + { + Name: "context.1", + In: `package main + +import ctx "golang.org/x/net/context" + +var _ = ctx.Background() +`, + Out: `package main + +import ctx "context" + +var _ = ctx.Background() +`, + }, +} diff --git a/platform/dbops/binaries/go/go/src/cmd/fix/doc.go b/platform/dbops/binaries/go/go/src/cmd/fix/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..062eb792856285585c3144b6fe08c5dcc0aa5c10 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/fix/doc.go @@ -0,0 +1,37 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Fix finds Go programs that use old APIs and rewrites them to use +newer ones. After you update to a new Go release, fix helps make +the necessary changes to your programs. + +Usage: + + go tool fix [-r name,...] [path ...] + +Without an explicit path, fix reads standard input and writes the +result to standard output. + +If the named path is a file, fix rewrites the named files in place. +If the named path is a directory, fix rewrites all .go files in that +directory tree. When fix rewrites a file, it prints a line to standard +error giving the name of the file and the rewrite applied. + +If the -diff flag is set, no files are rewritten. Instead fix prints +the differences a rewrite would introduce. + +The -r flag restricts the set of rewrites considered to those in the +named list. By default fix considers all known rewrites. Fix's +rewrites are idempotent, so that it is safe to apply fix to updated +or partially updated code even without using the -r flag. + +Fix prints the full list of fixes it can apply in its help output; +to see them, run go tool fix -help. + +Fix does not make backup copies of the files that it edits. +Instead, use a version control system's “diff” functionality to inspect +the changes that fix makes before committing them. +*/ +package main diff --git a/platform/dbops/binaries/go/go/src/cmd/fix/egltype.go b/platform/dbops/binaries/go/go/src/cmd/fix/egltype.go new file mode 100644 index 0000000000000000000000000000000000000000..a096db6665a5fbe19ec5efb3ae7897ad5d8957cd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/fix/egltype.go @@ -0,0 +1,60 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "go/ast" +) + +func init() { + register(eglFixDisplay) + register(eglFixConfig) +} + +var eglFixDisplay = fix{ + name: "egl", + date: "2018-12-15", + f: eglfixDisp, + desc: `Fixes initializers of EGLDisplay`, + disabled: false, +} + +// Old state: +// +// type EGLDisplay unsafe.Pointer +// +// New state: +// +// type EGLDisplay uintptr +// +// This fix finds nils initializing these types and replaces the nils with 0s. +func eglfixDisp(f *ast.File) bool { + return typefix(f, func(s string) bool { + return s == "C.EGLDisplay" + }) +} + +var eglFixConfig = fix{ + name: "eglconf", + date: "2020-05-30", + f: eglfixConfig, + desc: `Fixes initializers of EGLConfig`, + disabled: false, +} + +// Old state: +// +// type EGLConfig unsafe.Pointer +// +// New state: +// +// type EGLConfig uintptr +// +// This fix finds nils initializing these types and replaces the nils with 0s. +func eglfixConfig(f *ast.File) bool { + return typefix(f, func(s string) bool { + return s == "C.EGLConfig" + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/fix/egltype_test.go b/platform/dbops/binaries/go/go/src/cmd/fix/egltype_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c44525c0539e8b9fe699799232f0626bff51ea98 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/fix/egltype_test.go @@ -0,0 +1,214 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "strings" + +func init() { + addTestCases(eglTestsFor("EGLDisplay"), eglfixDisp) + addTestCases(eglTestsFor("EGLConfig"), eglfixConfig) +} + +func eglTestsFor(tname string) []testCase { + var eglTests = []testCase{ + { + Name: "egl.localVariable", + In: `package main + +// typedef void *$EGLTYPE; +import "C" + +func f() { + var x C.$EGLTYPE = nil + x = nil + x, x = nil, nil +} +`, + Out: `package main + +// typedef void *$EGLTYPE; +import "C" + +func f() { + var x C.$EGLTYPE = 0 + x = 0 + x, x = 0, 0 +} +`, + }, + { + Name: "egl.globalVariable", + In: `package main + +// typedef void *$EGLTYPE; +import "C" + +var x C.$EGLTYPE = nil + +func f() { + x = nil +} +`, + Out: `package main + +// typedef void *$EGLTYPE; +import "C" + +var x C.$EGLTYPE = 0 + +func f() { + x = 0 +} +`, + }, + { + Name: "egl.EqualArgument", + In: `package main + +// typedef void *$EGLTYPE; +import "C" + +var x C.$EGLTYPE +var y = x == nil +var z = x != nil +`, + Out: `package main + +// typedef void *$EGLTYPE; +import "C" + +var x C.$EGLTYPE +var y = x == 0 +var z = x != 0 +`, + }, + { + Name: "egl.StructField", + In: `package main + +// typedef void *$EGLTYPE; +import "C" + +type T struct { + x C.$EGLTYPE +} + +var t = T{x: nil} +`, + Out: `package main + +// typedef void *$EGLTYPE; +import "C" + +type T struct { + x C.$EGLTYPE +} + +var t = T{x: 0} +`, + }, + { + Name: "egl.FunctionArgument", + In: `package main + +// typedef void *$EGLTYPE; +import "C" + +func f(x C.$EGLTYPE) { +} + +func g() { + f(nil) +} +`, + Out: `package main + +// typedef void *$EGLTYPE; +import "C" + +func f(x C.$EGLTYPE) { +} + +func g() { + f(0) +} +`, + }, + { + Name: "egl.ArrayElement", + In: `package main + +// typedef void *$EGLTYPE; +import "C" + +var x = [3]C.$EGLTYPE{nil, nil, nil} +`, + Out: `package main + +// typedef void *$EGLTYPE; +import "C" + +var x = [3]C.$EGLTYPE{0, 0, 0} +`, + }, + { + Name: "egl.SliceElement", + In: `package main + +// typedef void *$EGLTYPE; +import "C" + +var x = []C.$EGLTYPE{nil, nil, nil} +`, + Out: `package main + +// typedef void *$EGLTYPE; +import "C" + +var x = []C.$EGLTYPE{0, 0, 0} +`, + }, + { + Name: "egl.MapKey", + In: `package main + +// typedef void *$EGLTYPE; +import "C" + +var x = map[C.$EGLTYPE]int{nil: 0} +`, + Out: `package main + +// typedef void *$EGLTYPE; +import "C" + +var x = map[C.$EGLTYPE]int{0: 0} +`, + }, + { + Name: "egl.MapValue", + In: `package main + +// typedef void *$EGLTYPE; +import "C" + +var x = map[int]C.$EGLTYPE{0: nil} +`, + Out: `package main + +// typedef void *$EGLTYPE; +import "C" + +var x = map[int]C.$EGLTYPE{0: 0} +`, + }, + } + for i := range eglTests { + t := &eglTests[i] + t.In = strings.ReplaceAll(t.In, "$EGLTYPE", tname) + t.Out = strings.ReplaceAll(t.Out, "$EGLTYPE", tname) + } + return eglTests +} diff --git a/platform/dbops/binaries/go/go/src/cmd/fix/fix.go b/platform/dbops/binaries/go/go/src/cmd/fix/fix.go new file mode 100644 index 0000000000000000000000000000000000000000..7abdab28a8df21095aebc8ea009919406dd03128 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/fix/fix.go @@ -0,0 +1,566 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "go/ast" + "go/token" + "path" + "strconv" +) + +type fix struct { + name string + date string // date that fix was introduced, in YYYY-MM-DD format + f func(*ast.File) bool + desc string + disabled bool // whether this fix should be disabled by default +} + +// main runs sort.Sort(byName(fixes)) before printing list of fixes. +type byName []fix + +func (f byName) Len() int { return len(f) } +func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } +func (f byName) Less(i, j int) bool { return f[i].name < f[j].name } + +// main runs sort.Sort(byDate(fixes)) before applying fixes. +type byDate []fix + +func (f byDate) Len() int { return len(f) } +func (f byDate) Swap(i, j int) { f[i], f[j] = f[j], f[i] } +func (f byDate) Less(i, j int) bool { return f[i].date < f[j].date } + +var fixes []fix + +func register(f fix) { + fixes = append(fixes, f) +} + +// walk traverses the AST x, calling visit(y) for each node y in the tree but +// also with a pointer to each ast.Expr, ast.Stmt, and *ast.BlockStmt, +// in a bottom-up traversal. +func walk(x any, visit func(any)) { + walkBeforeAfter(x, nop, visit) +} + +func nop(any) {} + +// walkBeforeAfter is like walk but calls before(x) before traversing +// x's children and after(x) afterward. +func walkBeforeAfter(x any, before, after func(any)) { + before(x) + + switch n := x.(type) { + default: + panic(fmt.Errorf("unexpected type %T in walkBeforeAfter", x)) + + case nil: + + // pointers to interfaces + case *ast.Decl: + walkBeforeAfter(*n, before, after) + case *ast.Expr: + walkBeforeAfter(*n, before, after) + case *ast.Spec: + walkBeforeAfter(*n, before, after) + case *ast.Stmt: + walkBeforeAfter(*n, before, after) + + // pointers to struct pointers + case **ast.BlockStmt: + walkBeforeAfter(*n, before, after) + case **ast.CallExpr: + walkBeforeAfter(*n, before, after) + case **ast.FieldList: + walkBeforeAfter(*n, before, after) + case **ast.FuncType: + walkBeforeAfter(*n, before, after) + case **ast.Ident: + walkBeforeAfter(*n, before, after) + case **ast.BasicLit: + walkBeforeAfter(*n, before, after) + + // pointers to slices + case *[]ast.Decl: + walkBeforeAfter(*n, before, after) + case *[]ast.Expr: + walkBeforeAfter(*n, before, after) + case *[]*ast.File: + walkBeforeAfter(*n, before, after) + case *[]*ast.Ident: + walkBeforeAfter(*n, before, after) + case *[]ast.Spec: + walkBeforeAfter(*n, before, after) + case *[]ast.Stmt: + walkBeforeAfter(*n, before, after) + + // These are ordered and grouped to match ../../go/ast/ast.go + case *ast.Field: + walkBeforeAfter(&n.Names, before, after) + walkBeforeAfter(&n.Type, before, after) + walkBeforeAfter(&n.Tag, before, after) + case *ast.FieldList: + for _, field := range n.List { + walkBeforeAfter(field, before, after) + } + case *ast.BadExpr: + case *ast.Ident: + case *ast.Ellipsis: + walkBeforeAfter(&n.Elt, before, after) + case *ast.BasicLit: + case *ast.FuncLit: + walkBeforeAfter(&n.Type, before, after) + walkBeforeAfter(&n.Body, before, after) + case *ast.CompositeLit: + walkBeforeAfter(&n.Type, before, after) + walkBeforeAfter(&n.Elts, before, after) + case *ast.ParenExpr: + walkBeforeAfter(&n.X, before, after) + case *ast.SelectorExpr: + walkBeforeAfter(&n.X, before, after) + case *ast.IndexExpr: + walkBeforeAfter(&n.X, before, after) + walkBeforeAfter(&n.Index, before, after) + case *ast.IndexListExpr: + walkBeforeAfter(&n.X, before, after) + walkBeforeAfter(&n.Indices, before, after) + case *ast.SliceExpr: + walkBeforeAfter(&n.X, before, after) + if n.Low != nil { + walkBeforeAfter(&n.Low, before, after) + } + if n.High != nil { + walkBeforeAfter(&n.High, before, after) + } + case *ast.TypeAssertExpr: + walkBeforeAfter(&n.X, before, after) + walkBeforeAfter(&n.Type, before, after) + case *ast.CallExpr: + walkBeforeAfter(&n.Fun, before, after) + walkBeforeAfter(&n.Args, before, after) + case *ast.StarExpr: + walkBeforeAfter(&n.X, before, after) + case *ast.UnaryExpr: + walkBeforeAfter(&n.X, before, after) + case *ast.BinaryExpr: + walkBeforeAfter(&n.X, before, after) + walkBeforeAfter(&n.Y, before, after) + case *ast.KeyValueExpr: + walkBeforeAfter(&n.Key, before, after) + walkBeforeAfter(&n.Value, before, after) + + case *ast.ArrayType: + walkBeforeAfter(&n.Len, before, after) + walkBeforeAfter(&n.Elt, before, after) + case *ast.StructType: + walkBeforeAfter(&n.Fields, before, after) + case *ast.FuncType: + if n.TypeParams != nil { + walkBeforeAfter(&n.TypeParams, before, after) + } + walkBeforeAfter(&n.Params, before, after) + if n.Results != nil { + walkBeforeAfter(&n.Results, before, after) + } + case *ast.InterfaceType: + walkBeforeAfter(&n.Methods, before, after) + case *ast.MapType: + walkBeforeAfter(&n.Key, before, after) + walkBeforeAfter(&n.Value, before, after) + case *ast.ChanType: + walkBeforeAfter(&n.Value, before, after) + + case *ast.BadStmt: + case *ast.DeclStmt: + walkBeforeAfter(&n.Decl, before, after) + case *ast.EmptyStmt: + case *ast.LabeledStmt: + walkBeforeAfter(&n.Stmt, before, after) + case *ast.ExprStmt: + walkBeforeAfter(&n.X, before, after) + case *ast.SendStmt: + walkBeforeAfter(&n.Chan, before, after) + walkBeforeAfter(&n.Value, before, after) + case *ast.IncDecStmt: + walkBeforeAfter(&n.X, before, after) + case *ast.AssignStmt: + walkBeforeAfter(&n.Lhs, before, after) + walkBeforeAfter(&n.Rhs, before, after) + case *ast.GoStmt: + walkBeforeAfter(&n.Call, before, after) + case *ast.DeferStmt: + walkBeforeAfter(&n.Call, before, after) + case *ast.ReturnStmt: + walkBeforeAfter(&n.Results, before, after) + case *ast.BranchStmt: + case *ast.BlockStmt: + walkBeforeAfter(&n.List, before, after) + case *ast.IfStmt: + walkBeforeAfter(&n.Init, before, after) + walkBeforeAfter(&n.Cond, before, after) + walkBeforeAfter(&n.Body, before, after) + walkBeforeAfter(&n.Else, before, after) + case *ast.CaseClause: + walkBeforeAfter(&n.List, before, after) + walkBeforeAfter(&n.Body, before, after) + case *ast.SwitchStmt: + walkBeforeAfter(&n.Init, before, after) + walkBeforeAfter(&n.Tag, before, after) + walkBeforeAfter(&n.Body, before, after) + case *ast.TypeSwitchStmt: + walkBeforeAfter(&n.Init, before, after) + walkBeforeAfter(&n.Assign, before, after) + walkBeforeAfter(&n.Body, before, after) + case *ast.CommClause: + walkBeforeAfter(&n.Comm, before, after) + walkBeforeAfter(&n.Body, before, after) + case *ast.SelectStmt: + walkBeforeAfter(&n.Body, before, after) + case *ast.ForStmt: + walkBeforeAfter(&n.Init, before, after) + walkBeforeAfter(&n.Cond, before, after) + walkBeforeAfter(&n.Post, before, after) + walkBeforeAfter(&n.Body, before, after) + case *ast.RangeStmt: + walkBeforeAfter(&n.Key, before, after) + walkBeforeAfter(&n.Value, before, after) + walkBeforeAfter(&n.X, before, after) + walkBeforeAfter(&n.Body, before, after) + + case *ast.ImportSpec: + case *ast.ValueSpec: + walkBeforeAfter(&n.Type, before, after) + walkBeforeAfter(&n.Values, before, after) + walkBeforeAfter(&n.Names, before, after) + case *ast.TypeSpec: + if n.TypeParams != nil { + walkBeforeAfter(&n.TypeParams, before, after) + } + walkBeforeAfter(&n.Type, before, after) + + case *ast.BadDecl: + case *ast.GenDecl: + walkBeforeAfter(&n.Specs, before, after) + case *ast.FuncDecl: + if n.Recv != nil { + walkBeforeAfter(&n.Recv, before, after) + } + walkBeforeAfter(&n.Type, before, after) + if n.Body != nil { + walkBeforeAfter(&n.Body, before, after) + } + + case *ast.File: + walkBeforeAfter(&n.Decls, before, after) + + case *ast.Package: + walkBeforeAfter(&n.Files, before, after) + + case []*ast.File: + for i := range n { + walkBeforeAfter(&n[i], before, after) + } + case []ast.Decl: + for i := range n { + walkBeforeAfter(&n[i], before, after) + } + case []ast.Expr: + for i := range n { + walkBeforeAfter(&n[i], before, after) + } + case []*ast.Ident: + for i := range n { + walkBeforeAfter(&n[i], before, after) + } + case []ast.Stmt: + for i := range n { + walkBeforeAfter(&n[i], before, after) + } + case []ast.Spec: + for i := range n { + walkBeforeAfter(&n[i], before, after) + } + } + after(x) +} + +// imports reports whether f imports path. +func imports(f *ast.File, path string) bool { + return importSpec(f, path) != nil +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if importPath(s) == path { + return s + } + } + return nil +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err == nil { + return t + } + return "" +} + +// declImports reports whether gen contains an import of path. +func declImports(gen *ast.GenDecl, path string) bool { + if gen.Tok != token.IMPORT { + return false + } + for _, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + if importPath(impspec) == path { + return true + } + } + return false +} + +// isTopName reports whether n is a top-level unresolved identifier with the given name. +func isTopName(n ast.Expr, name string) bool { + id, ok := n.(*ast.Ident) + return ok && id.Name == name && id.Obj == nil +} + +// renameTop renames all references to the top-level name old. +// It reports whether it makes any changes. +func renameTop(f *ast.File, old, new string) bool { + var fixed bool + + // Rename any conflicting imports + // (assuming package name is last element of path). + for _, s := range f.Imports { + if s.Name != nil { + if s.Name.Name == old { + s.Name.Name = new + fixed = true + } + } else { + _, thisName := path.Split(importPath(s)) + if thisName == old { + s.Name = ast.NewIdent(new) + fixed = true + } + } + } + + // Rename any top-level declarations. + for _, d := range f.Decls { + switch d := d.(type) { + case *ast.FuncDecl: + if d.Recv == nil && d.Name.Name == old { + d.Name.Name = new + d.Name.Obj.Name = new + fixed = true + } + case *ast.GenDecl: + for _, s := range d.Specs { + switch s := s.(type) { + case *ast.TypeSpec: + if s.Name.Name == old { + s.Name.Name = new + s.Name.Obj.Name = new + fixed = true + } + case *ast.ValueSpec: + for _, n := range s.Names { + if n.Name == old { + n.Name = new + n.Obj.Name = new + fixed = true + } + } + } + } + } + } + + // Rename top-level old to new, both unresolved names + // (probably defined in another file) and names that resolve + // to a declaration we renamed. + walk(f, func(n any) { + id, ok := n.(*ast.Ident) + if ok && isTopName(id, old) { + id.Name = new + fixed = true + } + if ok && id.Obj != nil && id.Name == old && id.Obj.Name == new { + id.Name = id.Obj.Name + fixed = true + } + }) + + return fixed +} + +// matchLen returns the length of the longest prefix shared by x and y. +func matchLen(x, y string) int { + i := 0 + for i < len(x) && i < len(y) && x[i] == y[i] { + i++ + } + return i +} + +// addImport adds the import path to the file f, if absent. +func addImport(f *ast.File, ipath string) (added bool) { + if imports(f, ipath) { + return false + } + + // Determine name of import. + // Assume added imports follow convention of using last element. + _, name := path.Split(ipath) + + // Rename any conflicting top-level references from name to name_. + renameTop(f, name, name+"_") + + newImport := &ast.ImportSpec{ + Path: &ast.BasicLit{ + Kind: token.STRING, + Value: strconv.Quote(ipath), + }, + } + + // Find an import decl to add to. + var ( + bestMatch = -1 + lastImport = -1 + impDecl *ast.GenDecl + impIndex = -1 + ) + for i, decl := range f.Decls { + gen, ok := decl.(*ast.GenDecl) + if ok && gen.Tok == token.IMPORT { + lastImport = i + // Do not add to import "C", to avoid disrupting the + // association with its doc comment, breaking cgo. + if declImports(gen, "C") { + continue + } + + // Compute longest shared prefix with imports in this block. + for j, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + n := matchLen(importPath(impspec), ipath) + if n > bestMatch { + bestMatch = n + impDecl = gen + impIndex = j + } + } + } + } + + // If no import decl found, add one after the last import. + if impDecl == nil { + impDecl = &ast.GenDecl{ + Tok: token.IMPORT, + } + f.Decls = append(f.Decls, nil) + copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:]) + f.Decls[lastImport+1] = impDecl + } + + // Ensure the import decl has parentheses, if needed. + if len(impDecl.Specs) > 0 && !impDecl.Lparen.IsValid() { + impDecl.Lparen = impDecl.Pos() + } + + insertAt := impIndex + 1 + if insertAt == 0 { + insertAt = len(impDecl.Specs) + } + impDecl.Specs = append(impDecl.Specs, nil) + copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:]) + impDecl.Specs[insertAt] = newImport + if insertAt > 0 { + // Assign same position as the previous import, + // so that the sorter sees it as being in the same block. + prev := impDecl.Specs[insertAt-1] + newImport.Path.ValuePos = prev.Pos() + newImport.EndPos = prev.Pos() + } + + f.Imports = append(f.Imports, newImport) + return true +} + +// deleteImport deletes the import path from the file f, if present. +func deleteImport(f *ast.File, path string) (deleted bool) { + oldImport := importSpec(f, path) + + // Find the import node that imports path, if any. + for i, decl := range f.Decls { + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT { + continue + } + for j, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + if oldImport != impspec { + continue + } + + // We found an import spec that imports path. + // Delete it. + deleted = true + copy(gen.Specs[j:], gen.Specs[j+1:]) + gen.Specs = gen.Specs[:len(gen.Specs)-1] + + // If this was the last import spec in this decl, + // delete the decl, too. + if len(gen.Specs) == 0 { + copy(f.Decls[i:], f.Decls[i+1:]) + f.Decls = f.Decls[:len(f.Decls)-1] + } else if len(gen.Specs) == 1 { + gen.Lparen = token.NoPos // drop parens + } + if j > 0 { + // We deleted an entry but now there will be + // a blank line-sized hole where the import was. + // Close the hole by making the previous + // import appear to "end" where this one did. + gen.Specs[j-1].(*ast.ImportSpec).EndPos = impspec.End() + } + break + } + } + + // Delete it from f.Imports. + for i, imp := range f.Imports { + if imp == oldImport { + copy(f.Imports[i:], f.Imports[i+1:]) + f.Imports = f.Imports[:len(f.Imports)-1] + break + } + } + + return +} + +// rewriteImport rewrites any import of path oldPath to path newPath. +func rewriteImport(f *ast.File, oldPath, newPath string) (rewrote bool) { + for _, imp := range f.Imports { + if importPath(imp) == oldPath { + rewrote = true + // record old End, because the default is to compute + // it using the length of imp.Path.Value. + imp.EndPos = imp.End() + imp.Path.Value = strconv.Quote(newPath) + } + } + return +} diff --git a/platform/dbops/binaries/go/go/src/cmd/fix/gotypes.go b/platform/dbops/binaries/go/go/src/cmd/fix/gotypes.go new file mode 100644 index 0000000000000000000000000000000000000000..6085816ada45c0d9b7f706574b07298aacad3155 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/fix/gotypes.go @@ -0,0 +1,75 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "go/ast" + "strconv" +) + +func init() { + register(gotypesFix) +} + +var gotypesFix = fix{ + name: "gotypes", + date: "2015-07-16", + f: gotypes, + desc: `Change imports of golang.org/x/tools/go/{exact,types} to go/{constant,types}`, +} + +func gotypes(f *ast.File) bool { + fixed := fixGoTypes(f) + if fixGoExact(f) { + fixed = true + } + return fixed +} + +func fixGoTypes(f *ast.File) bool { + return rewriteImport(f, "golang.org/x/tools/go/types", "go/types") +} + +func fixGoExact(f *ast.File) bool { + // This one is harder because the import name changes. + // First find the import spec. + var importSpec *ast.ImportSpec + walk(f, func(n any) { + if importSpec != nil { + return + } + spec, ok := n.(*ast.ImportSpec) + if !ok { + return + } + path, err := strconv.Unquote(spec.Path.Value) + if err != nil { + return + } + if path == "golang.org/x/tools/go/exact" { + importSpec = spec + } + + }) + if importSpec == nil { + return false + } + + // We are about to rename exact.* to constant.*, but constant is a common + // name. See if it will conflict. This is a hack but it is effective. + exists := renameTop(f, "constant", "constant") + suffix := "" + if exists { + suffix = "_" + } + // Now we need to rename all the uses of the import. RewriteImport + // affects renameTop, but not vice versa, so do them in this order. + renameTop(f, "exact", "constant"+suffix) + rewriteImport(f, "golang.org/x/tools/go/exact", "go/constant") + // renameTop will also rewrite the imported package name. Fix that; + // we know it should be missing. + importSpec.Name = nil + return true +} diff --git a/platform/dbops/binaries/go/go/src/cmd/fix/gotypes_test.go b/platform/dbops/binaries/go/go/src/cmd/fix/gotypes_test.go new file mode 100644 index 0000000000000000000000000000000000000000..9248fffd246bb00b94bb07cb197da3245936f72c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/fix/gotypes_test.go @@ -0,0 +1,89 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func init() { + addTestCases(gotypesTests, gotypes) +} + +var gotypesTests = []testCase{ + { + Name: "gotypes.0", + In: `package main + +import "golang.org/x/tools/go/types" +import "golang.org/x/tools/go/exact" + +var _ = exact.Kind + +func f() { + _ = exact.MakeBool(true) +} +`, + Out: `package main + +import "go/types" +import "go/constant" + +var _ = constant.Kind + +func f() { + _ = constant.MakeBool(true) +} +`, + }, + { + Name: "gotypes.1", + In: `package main + +import "golang.org/x/tools/go/types" +import foo "golang.org/x/tools/go/exact" + +var _ = foo.Kind + +func f() { + _ = foo.MakeBool(true) +} +`, + Out: `package main + +import "go/types" +import "go/constant" + +var _ = foo.Kind + +func f() { + _ = foo.MakeBool(true) +} +`, + }, + { + Name: "gotypes.0", + In: `package main + +import "golang.org/x/tools/go/types" +import "golang.org/x/tools/go/exact" + +var _ = exact.Kind +var constant = 23 // Use of new package name. + +func f() { + _ = exact.MakeBool(true) +} +`, + Out: `package main + +import "go/types" +import "go/constant" + +var _ = constant_.Kind +var constant = 23 // Use of new package name. + +func f() { + _ = constant_.MakeBool(true) +} +`, + }, +} diff --git a/platform/dbops/binaries/go/go/src/cmd/fix/import_test.go b/platform/dbops/binaries/go/go/src/cmd/fix/import_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8644e28f85020ef5edd4ace83583b8f447535d87 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/fix/import_test.go @@ -0,0 +1,458 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "go/ast" + +func init() { + addTestCases(importTests, nil) +} + +var importTests = []testCase{ + { + Name: "import.0", + Fn: addImportFn("os"), + In: `package main + +import ( + "os" +) +`, + Out: `package main + +import ( + "os" +) +`, + }, + { + Name: "import.1", + Fn: addImportFn("os"), + In: `package main +`, + Out: `package main + +import "os" +`, + }, + { + Name: "import.2", + Fn: addImportFn("os"), + In: `package main + +// Comment +import "C" +`, + Out: `package main + +// Comment +import "C" +import "os" +`, + }, + { + Name: "import.3", + Fn: addImportFn("os"), + In: `package main + +// Comment +import "C" + +import ( + "io" + "utf8" +) +`, + Out: `package main + +// Comment +import "C" + +import ( + "io" + "os" + "utf8" +) +`, + }, + { + Name: "import.4", + Fn: deleteImportFn("os"), + In: `package main + +import ( + "os" +) +`, + Out: `package main +`, + }, + { + Name: "import.5", + Fn: deleteImportFn("os"), + In: `package main + +// Comment +import "C" +import "os" +`, + Out: `package main + +// Comment +import "C" +`, + }, + { + Name: "import.6", + Fn: deleteImportFn("os"), + In: `package main + +// Comment +import "C" + +import ( + "io" + "os" + "utf8" +) +`, + Out: `package main + +// Comment +import "C" + +import ( + "io" + "utf8" +) +`, + }, + { + Name: "import.7", + Fn: deleteImportFn("io"), + In: `package main + +import ( + "io" // a + "os" // b + "utf8" // c +) +`, + Out: `package main + +import ( + // a + "os" // b + "utf8" // c +) +`, + }, + { + Name: "import.8", + Fn: deleteImportFn("os"), + In: `package main + +import ( + "io" // a + "os" // b + "utf8" // c +) +`, + Out: `package main + +import ( + "io" // a + // b + "utf8" // c +) +`, + }, + { + Name: "import.9", + Fn: deleteImportFn("utf8"), + In: `package main + +import ( + "io" // a + "os" // b + "utf8" // c +) +`, + Out: `package main + +import ( + "io" // a + "os" // b + // c +) +`, + }, + { + Name: "import.10", + Fn: deleteImportFn("io"), + In: `package main + +import ( + "io" + "os" + "utf8" +) +`, + Out: `package main + +import ( + "os" + "utf8" +) +`, + }, + { + Name: "import.11", + Fn: deleteImportFn("os"), + In: `package main + +import ( + "io" + "os" + "utf8" +) +`, + Out: `package main + +import ( + "io" + "utf8" +) +`, + }, + { + Name: "import.12", + Fn: deleteImportFn("utf8"), + In: `package main + +import ( + "io" + "os" + "utf8" +) +`, + Out: `package main + +import ( + "io" + "os" +) +`, + }, + { + Name: "import.13", + Fn: rewriteImportFn("utf8", "encoding/utf8"), + In: `package main + +import ( + "io" + "os" + "utf8" // thanks ken +) +`, + Out: `package main + +import ( + "encoding/utf8" // thanks ken + "io" + "os" +) +`, + }, + { + Name: "import.14", + Fn: rewriteImportFn("asn1", "encoding/asn1"), + In: `package main + +import ( + "asn1" + "crypto" + "crypto/rsa" + _ "crypto/sha1" + "crypto/x509" + "crypto/x509/pkix" + "time" +) + +var x = 1 +`, + Out: `package main + +import ( + "crypto" + "crypto/rsa" + _ "crypto/sha1" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "time" +) + +var x = 1 +`, + }, + { + Name: "import.15", + Fn: rewriteImportFn("url", "net/url"), + In: `package main + +import ( + "bufio" + "net" + "path" + "url" +) + +var x = 1 // comment on x, not on url +`, + Out: `package main + +import ( + "bufio" + "net" + "net/url" + "path" +) + +var x = 1 // comment on x, not on url +`, + }, + { + Name: "import.16", + Fn: rewriteImportFn("http", "net/http", "template", "text/template"), + In: `package main + +import ( + "flag" + "http" + "log" + "template" +) + +var addr = flag.String("addr", ":1718", "http service address") // Q=17, R=18 +`, + Out: `package main + +import ( + "flag" + "log" + "net/http" + "text/template" +) + +var addr = flag.String("addr", ":1718", "http service address") // Q=17, R=18 +`, + }, + { + Name: "import.17", + Fn: addImportFn("x/y/z", "x/a/c"), + In: `package main + +// Comment +import "C" + +import ( + "a" + "b" + + "x/w" + + "d/f" +) +`, + Out: `package main + +// Comment +import "C" + +import ( + "a" + "b" + + "x/a/c" + "x/w" + "x/y/z" + + "d/f" +) +`, + }, + { + Name: "import.18", + Fn: addDelImportFn("e", "o"), + In: `package main + +import ( + "f" + "o" + "z" +) +`, + Out: `package main + +import ( + "e" + "f" + "z" +) +`, + }, +} + +func addImportFn(path ...string) func(*ast.File) bool { + return func(f *ast.File) bool { + fixed := false + for _, p := range path { + if !imports(f, p) { + addImport(f, p) + fixed = true + } + } + return fixed + } +} + +func deleteImportFn(path string) func(*ast.File) bool { + return func(f *ast.File) bool { + if imports(f, path) { + deleteImport(f, path) + return true + } + return false + } +} + +func addDelImportFn(p1 string, p2 string) func(*ast.File) bool { + return func(f *ast.File) bool { + fixed := false + if !imports(f, p1) { + addImport(f, p1) + fixed = true + } + if imports(f, p2) { + deleteImport(f, p2) + fixed = true + } + return fixed + } +} + +func rewriteImportFn(oldnew ...string) func(*ast.File) bool { + return func(f *ast.File) bool { + fixed := false + for i := 0; i < len(oldnew); i += 2 { + if imports(f, oldnew[i]) { + rewriteImport(f, oldnew[i], oldnew[i+1]) + fixed = true + } + } + return fixed + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/fix/jnitype.go b/platform/dbops/binaries/go/go/src/cmd/fix/jnitype.go new file mode 100644 index 0000000000000000000000000000000000000000..111be8e70c6be38c9cd03529e151639d22b10005 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/fix/jnitype.go @@ -0,0 +1,69 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "go/ast" +) + +func init() { + register(jniFix) +} + +var jniFix = fix{ + name: "jni", + date: "2017-12-04", + f: jnifix, + desc: `Fixes initializers of JNI's jobject and subtypes`, + disabled: false, +} + +// Old state: +// +// type jobject *_jobject +// +// New state: +// +// type jobject uintptr +// +// and similar for subtypes of jobject. +// This fix finds nils initializing these types and replaces the nils with 0s. +func jnifix(f *ast.File) bool { + return typefix(f, func(s string) bool { + switch s { + case "C.jobject": + return true + case "C.jclass": + return true + case "C.jthrowable": + return true + case "C.jstring": + return true + case "C.jarray": + return true + case "C.jbooleanArray": + return true + case "C.jbyteArray": + return true + case "C.jcharArray": + return true + case "C.jshortArray": + return true + case "C.jintArray": + return true + case "C.jlongArray": + return true + case "C.jfloatArray": + return true + case "C.jdoubleArray": + return true + case "C.jobjectArray": + return true + case "C.jweak": + return true + } + return false + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/fix/jnitype_test.go b/platform/dbops/binaries/go/go/src/cmd/fix/jnitype_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ecf01408c7c84058488602461502ac912d2184b1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/fix/jnitype_test.go @@ -0,0 +1,203 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func init() { + addTestCases(jniTests, jnifix) +} + +var jniTests = []testCase{ + { + Name: "jni.localVariable", + In: `package main + +// typedef struct _jobject* jobject; +import "C" + +func f() { + var x C.jobject = nil + x = nil + x, x = nil, nil +} +`, + Out: `package main + +// typedef struct _jobject* jobject; +import "C" + +func f() { + var x C.jobject = 0 + x = 0 + x, x = 0, 0 +} +`, + }, + { + Name: "jni.globalVariable", + In: `package main + +// typedef struct _jobject* jobject; +import "C" + +var x C.jobject = nil + +func f() { + x = nil +} +`, + Out: `package main + +// typedef struct _jobject* jobject; +import "C" + +var x C.jobject = 0 + +func f() { + x = 0 +} +`, + }, + { + Name: "jni.EqualArgument", + In: `package main + +// typedef struct _jobject* jobject; +import "C" + +var x C.jobject +var y = x == nil +var z = x != nil +`, + Out: `package main + +// typedef struct _jobject* jobject; +import "C" + +var x C.jobject +var y = x == 0 +var z = x != 0 +`, + }, + { + Name: "jni.StructField", + In: `package main + +// typedef struct _jobject* jobject; +import "C" + +type T struct { + x C.jobject +} + +var t = T{x: nil} +`, + Out: `package main + +// typedef struct _jobject* jobject; +import "C" + +type T struct { + x C.jobject +} + +var t = T{x: 0} +`, + }, + { + Name: "jni.FunctionArgument", + In: `package main + +// typedef struct _jobject* jobject; +import "C" + +func f(x C.jobject) { +} + +func g() { + f(nil) +} +`, + Out: `package main + +// typedef struct _jobject* jobject; +import "C" + +func f(x C.jobject) { +} + +func g() { + f(0) +} +`, + }, + { + Name: "jni.ArrayElement", + In: `package main + +// typedef struct _jobject* jobject; +import "C" + +var x = [3]C.jobject{nil, nil, nil} +`, + Out: `package main + +// typedef struct _jobject* jobject; +import "C" + +var x = [3]C.jobject{0, 0, 0} +`, + }, + { + Name: "jni.SliceElement", + In: `package main + +// typedef struct _jobject* jobject; +import "C" + +var x = []C.jobject{nil, nil, nil} +`, + Out: `package main + +// typedef struct _jobject* jobject; +import "C" + +var x = []C.jobject{0, 0, 0} +`, + }, + { + Name: "jni.MapKey", + In: `package main + +// typedef struct _jobject* jobject; +import "C" + +var x = map[C.jobject]int{nil: 0} +`, + Out: `package main + +// typedef struct _jobject* jobject; +import "C" + +var x = map[C.jobject]int{0: 0} +`, + }, + { + Name: "jni.MapValue", + In: `package main + +// typedef struct _jobject* jobject; +import "C" + +var x = map[int]C.jobject{0: nil} +`, + Out: `package main + +// typedef struct _jobject* jobject; +import "C" + +var x = map[int]C.jobject{0: 0} +`, + }, +} diff --git a/platform/dbops/binaries/go/go/src/cmd/fix/main.go b/platform/dbops/binaries/go/go/src/cmd/fix/main.go new file mode 100644 index 0000000000000000000000000000000000000000..0f36fcc3123202c4f23f68c229a3a100f7ccfce0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/fix/main.go @@ -0,0 +1,273 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/scanner" + "go/token" + "internal/diff" + "io" + "io/fs" + "os" + "path/filepath" + "sort" + "strconv" + "strings" +) + +var ( + fset = token.NewFileSet() + exitCode = 0 +) + +var allowedRewrites = flag.String("r", "", + "restrict the rewrites to this comma-separated list") + +var forceRewrites = flag.String("force", "", + "force these fixes to run even if the code looks updated") + +var allowed, force map[string]bool + +var ( + doDiff = flag.Bool("diff", false, "display diffs instead of rewriting files") + goVersionStr = flag.String("go", "", "go language version for files") + + goVersion int // 115 for go1.15 +) + +// enable for debugging fix failures +const debug = false // display incorrectly reformatted source and exit + +func usage() { + fmt.Fprintf(os.Stderr, "usage: go tool fix [-diff] [-r fixname,...] [-force fixname,...] [path ...]\n") + flag.PrintDefaults() + fmt.Fprintf(os.Stderr, "\nAvailable rewrites are:\n") + sort.Sort(byName(fixes)) + for _, f := range fixes { + if f.disabled { + fmt.Fprintf(os.Stderr, "\n%s (disabled)\n", f.name) + } else { + fmt.Fprintf(os.Stderr, "\n%s\n", f.name) + } + desc := strings.TrimSpace(f.desc) + desc = strings.ReplaceAll(desc, "\n", "\n\t") + fmt.Fprintf(os.Stderr, "\t%s\n", desc) + } + os.Exit(2) +} + +func main() { + flag.Usage = usage + flag.Parse() + + if *goVersionStr != "" { + if !strings.HasPrefix(*goVersionStr, "go") { + report(fmt.Errorf("invalid -go=%s", *goVersionStr)) + os.Exit(exitCode) + } + majorStr := (*goVersionStr)[len("go"):] + minorStr := "0" + if before, after, found := strings.Cut(majorStr, "."); found { + majorStr, minorStr = before, after + } + major, err1 := strconv.Atoi(majorStr) + minor, err2 := strconv.Atoi(minorStr) + if err1 != nil || err2 != nil || major < 0 || major >= 100 || minor < 0 || minor >= 100 { + report(fmt.Errorf("invalid -go=%s", *goVersionStr)) + os.Exit(exitCode) + } + + goVersion = major*100 + minor + } + + sort.Sort(byDate(fixes)) + + if *allowedRewrites != "" { + allowed = make(map[string]bool) + for _, f := range strings.Split(*allowedRewrites, ",") { + allowed[f] = true + } + } + + if *forceRewrites != "" { + force = make(map[string]bool) + for _, f := range strings.Split(*forceRewrites, ",") { + force[f] = true + } + } + + if flag.NArg() == 0 { + if err := processFile("standard input", true); err != nil { + report(err) + } + os.Exit(exitCode) + } + + for i := 0; i < flag.NArg(); i++ { + path := flag.Arg(i) + switch dir, err := os.Stat(path); { + case err != nil: + report(err) + case dir.IsDir(): + walkDir(path) + default: + if err := processFile(path, false); err != nil { + report(err) + } + } + } + + os.Exit(exitCode) +} + +const parserMode = parser.ParseComments + +func gofmtFile(f *ast.File) ([]byte, error) { + var buf bytes.Buffer + if err := format.Node(&buf, fset, f); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func processFile(filename string, useStdin bool) error { + var f *os.File + var err error + var fixlog strings.Builder + + if useStdin { + f = os.Stdin + } else { + f, err = os.Open(filename) + if err != nil { + return err + } + defer f.Close() + } + + src, err := io.ReadAll(f) + if err != nil { + return err + } + + file, err := parser.ParseFile(fset, filename, src, parserMode) + if err != nil { + return err + } + + // Make sure file is in canonical format. + // This "fmt" pseudo-fix cannot be disabled. + newSrc, err := gofmtFile(file) + if err != nil { + return err + } + if !bytes.Equal(newSrc, src) { + newFile, err := parser.ParseFile(fset, filename, newSrc, parserMode) + if err != nil { + return err + } + file = newFile + fmt.Fprintf(&fixlog, " fmt") + } + + // Apply all fixes to file. + newFile := file + fixed := false + for _, fix := range fixes { + if allowed != nil && !allowed[fix.name] { + continue + } + if fix.disabled && !force[fix.name] { + continue + } + if fix.f(newFile) { + fixed = true + fmt.Fprintf(&fixlog, " %s", fix.name) + + // AST changed. + // Print and parse, to update any missing scoping + // or position information for subsequent fixers. + newSrc, err := gofmtFile(newFile) + if err != nil { + return err + } + newFile, err = parser.ParseFile(fset, filename, newSrc, parserMode) + if err != nil { + if debug { + fmt.Printf("%s", newSrc) + report(err) + os.Exit(exitCode) + } + return err + } + } + } + if !fixed { + return nil + } + fmt.Fprintf(os.Stderr, "%s: fixed %s\n", filename, fixlog.String()[1:]) + + // Print AST. We did that after each fix, so this appears + // redundant, but it is necessary to generate gofmt-compatible + // source code in a few cases. The official gofmt style is the + // output of the printer run on a standard AST generated by the parser, + // but the source we generated inside the loop above is the + // output of the printer run on a mangled AST generated by a fixer. + newSrc, err = gofmtFile(newFile) + if err != nil { + return err + } + + if *doDiff { + os.Stdout.Write(diff.Diff(filename, src, "fixed/"+filename, newSrc)) + return nil + } + + if useStdin { + os.Stdout.Write(newSrc) + return nil + } + + return os.WriteFile(f.Name(), newSrc, 0) +} + +func gofmt(n any) string { + var gofmtBuf strings.Builder + if err := format.Node(&gofmtBuf, fset, n); err != nil { + return "<" + err.Error() + ">" + } + return gofmtBuf.String() +} + +func report(err error) { + scanner.PrintError(os.Stderr, err) + exitCode = 2 +} + +func walkDir(path string) { + filepath.WalkDir(path, visitFile) +} + +func visitFile(path string, f fs.DirEntry, err error) error { + if err == nil && isGoFile(f) { + err = processFile(path, false) + } + if err != nil { + report(err) + } + return nil +} + +func isGoFile(f fs.DirEntry) bool { + // ignore non-Go files + name := f.Name() + return !f.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/fix/main_test.go b/platform/dbops/binaries/go/go/src/cmd/fix/main_test.go new file mode 100644 index 0000000000000000000000000000000000000000..cafd116cfd6b357f5737d2a77db7bb4bd6a0cb93 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/fix/main_test.go @@ -0,0 +1,166 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "go/ast" + "go/parser" + "internal/diff" + "internal/testenv" + "strings" + "testing" +) + +type testCase struct { + Name string + Fn func(*ast.File) bool + Version int + In string + Out string +} + +var testCases []testCase + +func addTestCases(t []testCase, fn func(*ast.File) bool) { + // Fill in fn to avoid repetition in definitions. + if fn != nil { + for i := range t { + if t[i].Fn == nil { + t[i].Fn = fn + } + } + } + testCases = append(testCases, t...) +} + +func fnop(*ast.File) bool { return false } + +func parseFixPrint(t *testing.T, fn func(*ast.File) bool, desc, in string, mustBeGofmt bool) (out string, fixed, ok bool) { + file, err := parser.ParseFile(fset, desc, in, parserMode) + if err != nil { + t.Errorf("parsing: %v", err) + return + } + + outb, err := gofmtFile(file) + if err != nil { + t.Errorf("printing: %v", err) + return + } + if s := string(outb); in != s && mustBeGofmt { + t.Errorf("not gofmt-formatted.\n--- %s\n%s\n--- %s | gofmt\n%s", + desc, in, desc, s) + tdiff(t, "want", in, "have", s) + return + } + + if fn == nil { + for _, fix := range fixes { + if fix.f(file) { + fixed = true + } + } + } else { + fixed = fn(file) + } + + outb, err = gofmtFile(file) + if err != nil { + t.Errorf("printing: %v", err) + return + } + + return string(outb), fixed, true +} + +func TestRewrite(t *testing.T) { + // If cgo is enabled, enforce that cgo commands invoked by cmd/fix + // do not fail during testing. + if testenv.HasCGO() { + testenv.MustHaveGoBuild(t) // Really just 'go tool cgo', but close enough. + + // The reportCgoError hook is global, so we can't set it per-test + // if we want to be able to run those tests in parallel. + // Instead, simply set it to panic on error: the goroutine dump + // from the panic should help us determine which test failed. + prevReportCgoError := reportCgoError + reportCgoError = func(err error) { + panic(fmt.Sprintf("unexpected cgo error: %v", err)) + } + t.Cleanup(func() { reportCgoError = prevReportCgoError }) + } + + for _, tt := range testCases { + tt := tt + t.Run(tt.Name, func(t *testing.T) { + if tt.Version == 0 { + if testing.Verbose() { + // Don't run in parallel: cmd/fix sometimes writes directly to stderr, + // and since -v prints which test is currently running we want that + // information to accurately correlate with the stderr output. + } else { + t.Parallel() + } + } else { + old := goVersion + goVersion = tt.Version + defer func() { + goVersion = old + }() + } + + // Apply fix: should get tt.Out. + out, fixed, ok := parseFixPrint(t, tt.Fn, tt.Name, tt.In, true) + if !ok { + return + } + + // reformat to get printing right + out, _, ok = parseFixPrint(t, fnop, tt.Name, out, false) + if !ok { + return + } + + if tt.Out == "" { + tt.Out = tt.In + } + if out != tt.Out { + t.Errorf("incorrect output.\n") + if !strings.HasPrefix(tt.Name, "testdata/") { + t.Errorf("--- have\n%s\n--- want\n%s", out, tt.Out) + } + tdiff(t, "have", out, "want", tt.Out) + return + } + + if changed := out != tt.In; changed != fixed { + t.Errorf("changed=%v != fixed=%v", changed, fixed) + return + } + + // Should not change if run again. + out2, fixed2, ok := parseFixPrint(t, tt.Fn, tt.Name+" output", out, true) + if !ok { + return + } + + if fixed2 { + t.Errorf("applied fixes during second round") + return + } + + if out2 != out { + t.Errorf("changed output after second round of fixes.\n--- output after first round\n%s\n--- output after second round\n%s", + out, out2) + tdiff(t, "first", out, "second", out2) + } + }) + } +} + +func tdiff(t *testing.T, aname, a, bname, b string) { + t.Errorf("%s", diff.Diff(aname, []byte(a), bname, []byte(b))) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/fix/netipv6zone.go b/platform/dbops/binaries/go/go/src/cmd/fix/netipv6zone.go new file mode 100644 index 0000000000000000000000000000000000000000..199fcf5bf5976520857e7c194341e5443f60684a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/fix/netipv6zone.go @@ -0,0 +1,68 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "go/ast" + +func init() { + register(netipv6zoneFix) +} + +var netipv6zoneFix = fix{ + name: "netipv6zone", + date: "2012-11-26", + f: netipv6zone, + desc: `Adapt element key to IPAddr, UDPAddr or TCPAddr composite literals. + +https://codereview.appspot.com/6849045/ +`, +} + +func netipv6zone(f *ast.File) bool { + if !imports(f, "net") { + return false + } + + fixed := false + walk(f, func(n any) { + cl, ok := n.(*ast.CompositeLit) + if !ok { + return + } + se, ok := cl.Type.(*ast.SelectorExpr) + if !ok { + return + } + if !isTopName(se.X, "net") || se.Sel == nil { + return + } + switch ss := se.Sel.String(); ss { + case "IPAddr", "UDPAddr", "TCPAddr": + for i, e := range cl.Elts { + if _, ok := e.(*ast.KeyValueExpr); ok { + break + } + switch i { + case 0: + cl.Elts[i] = &ast.KeyValueExpr{ + Key: ast.NewIdent("IP"), + Value: e, + } + case 1: + if elit, ok := e.(*ast.BasicLit); ok && elit.Value == "0" { + cl.Elts = append(cl.Elts[:i], cl.Elts[i+1:]...) + } else { + cl.Elts[i] = &ast.KeyValueExpr{ + Key: ast.NewIdent("Port"), + Value: e, + } + } + } + fixed = true + } + } + }) + return fixed +} diff --git a/platform/dbops/binaries/go/go/src/cmd/fix/netipv6zone_test.go b/platform/dbops/binaries/go/go/src/cmd/fix/netipv6zone_test.go new file mode 100644 index 0000000000000000000000000000000000000000..5b8d964d4134b1a153f477421c3c9adf686f8a11 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/fix/netipv6zone_test.go @@ -0,0 +1,43 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func init() { + addTestCases(netipv6zoneTests, netipv6zone) +} + +var netipv6zoneTests = []testCase{ + { + Name: "netipv6zone.0", + In: `package main + +import "net" + +func f() net.Addr { + a := &net.IPAddr{ip1} + sub(&net.UDPAddr{ip2, 12345}) + c := &net.TCPAddr{IP: ip3, Port: 54321} + d := &net.TCPAddr{ip4, 0} + p := 1234 + e := &net.TCPAddr{ip4, p} + return &net.TCPAddr{ip5}, nil +} +`, + Out: `package main + +import "net" + +func f() net.Addr { + a := &net.IPAddr{IP: ip1} + sub(&net.UDPAddr{IP: ip2, Port: 12345}) + c := &net.TCPAddr{IP: ip3, Port: 54321} + d := &net.TCPAddr{IP: ip4} + p := 1234 + e := &net.TCPAddr{IP: ip4, Port: p} + return &net.TCPAddr{IP: ip5}, nil +} +`, + }, +} diff --git a/platform/dbops/binaries/go/go/src/cmd/fix/printerconfig.go b/platform/dbops/binaries/go/go/src/cmd/fix/printerconfig.go new file mode 100644 index 0000000000000000000000000000000000000000..bad6953196421408fbf905f169342dffc51f1313 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/fix/printerconfig.go @@ -0,0 +1,61 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "go/ast" + +func init() { + register(printerconfigFix) +} + +var printerconfigFix = fix{ + name: "printerconfig", + date: "2012-12-11", + f: printerconfig, + desc: `Add element keys to Config composite literals.`, +} + +func printerconfig(f *ast.File) bool { + if !imports(f, "go/printer") { + return false + } + + fixed := false + walk(f, func(n any) { + cl, ok := n.(*ast.CompositeLit) + if !ok { + return + } + se, ok := cl.Type.(*ast.SelectorExpr) + if !ok { + return + } + if !isTopName(se.X, "printer") || se.Sel == nil { + return + } + + if ss := se.Sel.String(); ss == "Config" { + for i, e := range cl.Elts { + if _, ok := e.(*ast.KeyValueExpr); ok { + break + } + switch i { + case 0: + cl.Elts[i] = &ast.KeyValueExpr{ + Key: ast.NewIdent("Mode"), + Value: e, + } + case 1: + cl.Elts[i] = &ast.KeyValueExpr{ + Key: ast.NewIdent("Tabwidth"), + Value: e, + } + } + fixed = true + } + } + }) + return fixed +} diff --git a/platform/dbops/binaries/go/go/src/cmd/fix/printerconfig_test.go b/platform/dbops/binaries/go/go/src/cmd/fix/printerconfig_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e485c137b7bbebfb03482c9a9d7c483c7cbe568d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/fix/printerconfig_test.go @@ -0,0 +1,37 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func init() { + addTestCases(printerconfigTests, printerconfig) +} + +var printerconfigTests = []testCase{ + { + Name: "printerconfig.0", + In: `package main + +import "go/printer" + +func f() printer.Config { + b := printer.Config{0, 8} + c := &printer.Config{0} + d := &printer.Config{Tabwidth: 8, Mode: 0} + return printer.Config{0, 8} +} +`, + Out: `package main + +import "go/printer" + +func f() printer.Config { + b := printer.Config{Mode: 0, Tabwidth: 8} + c := &printer.Config{Mode: 0} + d := &printer.Config{Tabwidth: 8, Mode: 0} + return printer.Config{Mode: 0, Tabwidth: 8} +} +`, + }, +} diff --git a/platform/dbops/binaries/go/go/src/cmd/fix/typecheck.go b/platform/dbops/binaries/go/go/src/cmd/fix/typecheck.go new file mode 100644 index 0000000000000000000000000000000000000000..b115987390cb5221f482ee8a6e22f1a92aeb6ab1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/fix/typecheck.go @@ -0,0 +1,813 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "os" + "os/exec" + "path/filepath" + "reflect" + "runtime" + "strings" +) + +// Partial type checker. +// +// The fact that it is partial is very important: the input is +// an AST and a description of some type information to +// assume about one or more packages, but not all the +// packages that the program imports. The checker is +// expected to do as much as it can with what it has been +// given. There is not enough information supplied to do +// a full type check, but the type checker is expected to +// apply information that can be derived from variable +// declarations, function and method returns, and type switches +// as far as it can, so that the caller can still tell the types +// of expression relevant to a particular fix. +// +// TODO(rsc,gri): Replace with go/typechecker. +// Doing that could be an interesting test case for go/typechecker: +// the constraints about working with partial information will +// likely exercise it in interesting ways. The ideal interface would +// be to pass typecheck a map from importpath to package API text +// (Go source code), but for now we use data structures (TypeConfig, Type). +// +// The strings mostly use gofmt form. +// +// A Field or FieldList has as its type a comma-separated list +// of the types of the fields. For example, the field list +// x, y, z int +// has type "int, int, int". + +// The prefix "type " is the type of a type. +// For example, given +// var x int +// type T int +// x's type is "int" but T's type is "type int". +// mkType inserts the "type " prefix. +// getType removes it. +// isType tests for it. + +func mkType(t string) string { + return "type " + t +} + +func getType(t string) string { + if !isType(t) { + return "" + } + return t[len("type "):] +} + +func isType(t string) bool { + return strings.HasPrefix(t, "type ") +} + +// TypeConfig describes the universe of relevant types. +// For ease of creation, the types are all referred to by string +// name (e.g., "reflect.Value"). TypeByName is the only place +// where the strings are resolved. + +type TypeConfig struct { + Type map[string]*Type + Var map[string]string + Func map[string]string + + // External maps from a name to its type. + // It provides additional typings not present in the Go source itself. + // For now, the only additional typings are those generated by cgo. + External map[string]string +} + +// typeof returns the type of the given name, which may be of +// the form "x" or "p.X". +func (cfg *TypeConfig) typeof(name string) string { + if cfg.Var != nil { + if t := cfg.Var[name]; t != "" { + return t + } + } + if cfg.Func != nil { + if t := cfg.Func[name]; t != "" { + return "func()" + t + } + } + return "" +} + +// Type describes the Fields and Methods of a type. +// If the field or method cannot be found there, it is next +// looked for in the Embed list. +type Type struct { + Field map[string]string // map field name to type + Method map[string]string // map method name to comma-separated return types (should start with "func ") + Embed []string // list of types this type embeds (for extra methods) + Def string // definition of named type +} + +// dot returns the type of "typ.name", making its decision +// using the type information in cfg. +func (typ *Type) dot(cfg *TypeConfig, name string) string { + if typ.Field != nil { + if t := typ.Field[name]; t != "" { + return t + } + } + if typ.Method != nil { + if t := typ.Method[name]; t != "" { + return t + } + } + + for _, e := range typ.Embed { + etyp := cfg.Type[e] + if etyp != nil { + if t := etyp.dot(cfg, name); t != "" { + return t + } + } + } + + return "" +} + +// typecheck type checks the AST f assuming the information in cfg. +// It returns two maps with type information: +// typeof maps AST nodes to type information in gofmt string form. +// assign maps type strings to lists of expressions that were assigned +// to values of another type that were assigned to that type. +func typecheck(cfg *TypeConfig, f *ast.File) (typeof map[any]string, assign map[string][]any) { + typeof = make(map[any]string) + assign = make(map[string][]any) + cfg1 := &TypeConfig{} + *cfg1 = *cfg // make copy so we can add locally + copied := false + + // If we import "C", add types of cgo objects. + cfg.External = map[string]string{} + cfg1.External = cfg.External + if imports(f, "C") { + // Run cgo on gofmtFile(f) + // Parse, extract decls from _cgo_gotypes.go + // Map _Ctype_* types to C.* types. + err := func() error { + txt, err := gofmtFile(f) + if err != nil { + return err + } + dir, err := os.MkdirTemp(os.TempDir(), "fix_cgo_typecheck") + if err != nil { + return err + } + defer os.RemoveAll(dir) + err = os.WriteFile(filepath.Join(dir, "in.go"), txt, 0600) + if err != nil { + return err + } + goCmd := "go" + if goroot := runtime.GOROOT(); goroot != "" { + goCmd = filepath.Join(goroot, "bin", "go") + } + cmd := exec.Command(goCmd, "tool", "cgo", "-objdir", dir, "-srcdir", dir, "in.go") + if reportCgoError != nil { + // Since cgo command errors will be reported, also forward the error + // output from the command for debugging. + cmd.Stderr = os.Stderr + } + err = cmd.Run() + if err != nil { + return err + } + out, err := os.ReadFile(filepath.Join(dir, "_cgo_gotypes.go")) + if err != nil { + return err + } + cgo, err := parser.ParseFile(token.NewFileSet(), "cgo.go", out, 0) + if err != nil { + return err + } + for _, decl := range cgo.Decls { + fn, ok := decl.(*ast.FuncDecl) + if !ok { + continue + } + if strings.HasPrefix(fn.Name.Name, "_Cfunc_") { + var params, results []string + for _, p := range fn.Type.Params.List { + t := gofmt(p.Type) + t = strings.ReplaceAll(t, "_Ctype_", "C.") + params = append(params, t) + } + for _, r := range fn.Type.Results.List { + t := gofmt(r.Type) + t = strings.ReplaceAll(t, "_Ctype_", "C.") + results = append(results, t) + } + cfg.External["C."+fn.Name.Name[7:]] = joinFunc(params, results) + } + } + return nil + }() + if err != nil { + if reportCgoError == nil { + fmt.Fprintf(os.Stderr, "go fix: warning: no cgo types: %s\n", err) + } else { + reportCgoError(err) + } + } + } + + // gather function declarations + for _, decl := range f.Decls { + fn, ok := decl.(*ast.FuncDecl) + if !ok { + continue + } + typecheck1(cfg, fn.Type, typeof, assign) + t := typeof[fn.Type] + if fn.Recv != nil { + // The receiver must be a type. + rcvr := typeof[fn.Recv] + if !isType(rcvr) { + if len(fn.Recv.List) != 1 { + continue + } + rcvr = mkType(gofmt(fn.Recv.List[0].Type)) + typeof[fn.Recv.List[0].Type] = rcvr + } + rcvr = getType(rcvr) + if rcvr != "" && rcvr[0] == '*' { + rcvr = rcvr[1:] + } + typeof[rcvr+"."+fn.Name.Name] = t + } else { + if isType(t) { + t = getType(t) + } else { + t = gofmt(fn.Type) + } + typeof[fn.Name] = t + + // Record typeof[fn.Name.Obj] for future references to fn.Name. + typeof[fn.Name.Obj] = t + } + } + + // gather struct declarations + for _, decl := range f.Decls { + d, ok := decl.(*ast.GenDecl) + if ok { + for _, s := range d.Specs { + switch s := s.(type) { + case *ast.TypeSpec: + if cfg1.Type[s.Name.Name] != nil { + break + } + if !copied { + copied = true + // Copy map lazily: it's time. + cfg1.Type = make(map[string]*Type) + for k, v := range cfg.Type { + cfg1.Type[k] = v + } + } + t := &Type{Field: map[string]string{}} + cfg1.Type[s.Name.Name] = t + switch st := s.Type.(type) { + case *ast.StructType: + for _, f := range st.Fields.List { + for _, n := range f.Names { + t.Field[n.Name] = gofmt(f.Type) + } + } + case *ast.ArrayType, *ast.StarExpr, *ast.MapType: + t.Def = gofmt(st) + } + } + } + } + } + + typecheck1(cfg1, f, typeof, assign) + return typeof, assign +} + +// reportCgoError, if non-nil, reports a non-nil error from running the "cgo" +// tool. (Set to a non-nil hook during testing if cgo is expected to work.) +var reportCgoError func(err error) + +func makeExprList(a []*ast.Ident) []ast.Expr { + var b []ast.Expr + for _, x := range a { + b = append(b, x) + } + return b +} + +// typecheck1 is the recursive form of typecheck. +// It is like typecheck but adds to the information in typeof +// instead of allocating a new map. +func typecheck1(cfg *TypeConfig, f any, typeof map[any]string, assign map[string][]any) { + // set sets the type of n to typ. + // If isDecl is true, n is being declared. + set := func(n ast.Expr, typ string, isDecl bool) { + if typeof[n] != "" || typ == "" { + if typeof[n] != typ { + assign[typ] = append(assign[typ], n) + } + return + } + typeof[n] = typ + + // If we obtained typ from the declaration of x + // propagate the type to all the uses. + // The !isDecl case is a cheat here, but it makes + // up in some cases for not paying attention to + // struct fields. The real type checker will be + // more accurate so we won't need the cheat. + if id, ok := n.(*ast.Ident); ok && id.Obj != nil && (isDecl || typeof[id.Obj] == "") { + typeof[id.Obj] = typ + } + } + + // Type-check an assignment lhs = rhs. + // If isDecl is true, this is := so we can update + // the types of the objects that lhs refers to. + typecheckAssign := func(lhs, rhs []ast.Expr, isDecl bool) { + if len(lhs) > 1 && len(rhs) == 1 { + if _, ok := rhs[0].(*ast.CallExpr); ok { + t := split(typeof[rhs[0]]) + // Lists should have same length but may not; pair what can be paired. + for i := 0; i < len(lhs) && i < len(t); i++ { + set(lhs[i], t[i], isDecl) + } + return + } + } + if len(lhs) == 1 && len(rhs) == 2 { + // x = y, ok + rhs = rhs[:1] + } else if len(lhs) == 2 && len(rhs) == 1 { + // x, ok = y + lhs = lhs[:1] + } + + // Match as much as we can. + for i := 0; i < len(lhs) && i < len(rhs); i++ { + x, y := lhs[i], rhs[i] + if typeof[y] != "" { + set(x, typeof[y], isDecl) + } else { + set(y, typeof[x], false) + } + } + } + + expand := func(s string) string { + typ := cfg.Type[s] + if typ != nil && typ.Def != "" { + return typ.Def + } + return s + } + + // The main type check is a recursive algorithm implemented + // by walkBeforeAfter(n, before, after). + // Most of it is bottom-up, but in a few places we need + // to know the type of the function we are checking. + // The before function records that information on + // the curfn stack. + var curfn []*ast.FuncType + + before := func(n any) { + // push function type on stack + switch n := n.(type) { + case *ast.FuncDecl: + curfn = append(curfn, n.Type) + case *ast.FuncLit: + curfn = append(curfn, n.Type) + } + } + + // After is the real type checker. + after := func(n any) { + if n == nil { + return + } + if false && reflect.TypeOf(n).Kind() == reflect.Pointer { // debugging trace + defer func() { + if t := typeof[n]; t != "" { + pos := fset.Position(n.(ast.Node).Pos()) + fmt.Fprintf(os.Stderr, "%s: typeof[%s] = %s\n", pos, gofmt(n), t) + } + }() + } + + switch n := n.(type) { + case *ast.FuncDecl, *ast.FuncLit: + // pop function type off stack + curfn = curfn[:len(curfn)-1] + + case *ast.FuncType: + typeof[n] = mkType(joinFunc(split(typeof[n.Params]), split(typeof[n.Results]))) + + case *ast.FieldList: + // Field list is concatenation of sub-lists. + t := "" + for _, field := range n.List { + if t != "" { + t += ", " + } + t += typeof[field] + } + typeof[n] = t + + case *ast.Field: + // Field is one instance of the type per name. + all := "" + t := typeof[n.Type] + if !isType(t) { + // Create a type, because it is typically *T or *p.T + // and we might care about that type. + t = mkType(gofmt(n.Type)) + typeof[n.Type] = t + } + t = getType(t) + if len(n.Names) == 0 { + all = t + } else { + for _, id := range n.Names { + if all != "" { + all += ", " + } + all += t + typeof[id.Obj] = t + typeof[id] = t + } + } + typeof[n] = all + + case *ast.ValueSpec: + // var declaration. Use type if present. + if n.Type != nil { + t := typeof[n.Type] + if !isType(t) { + t = mkType(gofmt(n.Type)) + typeof[n.Type] = t + } + t = getType(t) + for _, id := range n.Names { + set(id, t, true) + } + } + // Now treat same as assignment. + typecheckAssign(makeExprList(n.Names), n.Values, true) + + case *ast.AssignStmt: + typecheckAssign(n.Lhs, n.Rhs, n.Tok == token.DEFINE) + + case *ast.Ident: + // Identifier can take its type from underlying object. + if t := typeof[n.Obj]; t != "" { + typeof[n] = t + } + + case *ast.SelectorExpr: + // Field or method. + name := n.Sel.Name + if t := typeof[n.X]; t != "" { + t = strings.TrimPrefix(t, "*") // implicit * + if typ := cfg.Type[t]; typ != nil { + if t := typ.dot(cfg, name); t != "" { + typeof[n] = t + return + } + } + tt := typeof[t+"."+name] + if isType(tt) { + typeof[n] = getType(tt) + return + } + } + // Package selector. + if x, ok := n.X.(*ast.Ident); ok && x.Obj == nil { + str := x.Name + "." + name + if cfg.Type[str] != nil { + typeof[n] = mkType(str) + return + } + if t := cfg.typeof(x.Name + "." + name); t != "" { + typeof[n] = t + return + } + } + + case *ast.CallExpr: + // make(T) has type T. + if isTopName(n.Fun, "make") && len(n.Args) >= 1 { + typeof[n] = gofmt(n.Args[0]) + return + } + // new(T) has type *T + if isTopName(n.Fun, "new") && len(n.Args) == 1 { + typeof[n] = "*" + gofmt(n.Args[0]) + return + } + // Otherwise, use type of function to determine arguments. + t := typeof[n.Fun] + if t == "" { + t = cfg.External[gofmt(n.Fun)] + } + in, out := splitFunc(t) + if in == nil && out == nil { + return + } + typeof[n] = join(out) + for i, arg := range n.Args { + if i >= len(in) { + break + } + if typeof[arg] == "" { + typeof[arg] = in[i] + } + } + + case *ast.TypeAssertExpr: + // x.(type) has type of x. + if n.Type == nil { + typeof[n] = typeof[n.X] + return + } + // x.(T) has type T. + if t := typeof[n.Type]; isType(t) { + typeof[n] = getType(t) + } else { + typeof[n] = gofmt(n.Type) + } + + case *ast.SliceExpr: + // x[i:j] has type of x. + typeof[n] = typeof[n.X] + + case *ast.IndexExpr: + // x[i] has key type of x's type. + t := expand(typeof[n.X]) + if strings.HasPrefix(t, "[") || strings.HasPrefix(t, "map[") { + // Lazy: assume there are no nested [] in the array + // length or map key type. + if _, elem, ok := strings.Cut(t, "]"); ok { + typeof[n] = elem + } + } + + case *ast.StarExpr: + // *x for x of type *T has type T when x is an expr. + // We don't use the result when *x is a type, but + // compute it anyway. + t := expand(typeof[n.X]) + if isType(t) { + typeof[n] = "type *" + getType(t) + } else if strings.HasPrefix(t, "*") { + typeof[n] = t[len("*"):] + } + + case *ast.UnaryExpr: + // &x for x of type T has type *T. + t := typeof[n.X] + if t != "" && n.Op == token.AND { + typeof[n] = "*" + t + } + + case *ast.CompositeLit: + // T{...} has type T. + typeof[n] = gofmt(n.Type) + + // Propagate types down to values used in the composite literal. + t := expand(typeof[n]) + if strings.HasPrefix(t, "[") { // array or slice + // Lazy: assume there are no nested [] in the array length. + if _, et, ok := strings.Cut(t, "]"); ok { + for _, e := range n.Elts { + if kv, ok := e.(*ast.KeyValueExpr); ok { + e = kv.Value + } + if typeof[e] == "" { + typeof[e] = et + } + } + } + } + if strings.HasPrefix(t, "map[") { // map + // Lazy: assume there are no nested [] in the map key type. + if kt, vt, ok := strings.Cut(t[len("map["):], "]"); ok { + for _, e := range n.Elts { + if kv, ok := e.(*ast.KeyValueExpr); ok { + if typeof[kv.Key] == "" { + typeof[kv.Key] = kt + } + if typeof[kv.Value] == "" { + typeof[kv.Value] = vt + } + } + } + } + } + if typ := cfg.Type[t]; typ != nil && len(typ.Field) > 0 { // struct + for _, e := range n.Elts { + if kv, ok := e.(*ast.KeyValueExpr); ok { + if ft := typ.Field[fmt.Sprintf("%s", kv.Key)]; ft != "" { + if typeof[kv.Value] == "" { + typeof[kv.Value] = ft + } + } + } + } + } + + case *ast.ParenExpr: + // (x) has type of x. + typeof[n] = typeof[n.X] + + case *ast.RangeStmt: + t := expand(typeof[n.X]) + if t == "" { + return + } + var key, value string + if t == "string" { + key, value = "int", "rune" + } else if strings.HasPrefix(t, "[") { + key = "int" + _, value, _ = strings.Cut(t, "]") + } else if strings.HasPrefix(t, "map[") { + if k, v, ok := strings.Cut(t[len("map["):], "]"); ok { + key, value = k, v + } + } + changed := false + if n.Key != nil && key != "" { + changed = true + set(n.Key, key, n.Tok == token.DEFINE) + } + if n.Value != nil && value != "" { + changed = true + set(n.Value, value, n.Tok == token.DEFINE) + } + // Ugly failure of vision: already type-checked body. + // Do it again now that we have that type info. + if changed { + typecheck1(cfg, n.Body, typeof, assign) + } + + case *ast.TypeSwitchStmt: + // Type of variable changes for each case in type switch, + // but go/parser generates just one variable. + // Repeat type check for each case with more precise + // type information. + as, ok := n.Assign.(*ast.AssignStmt) + if !ok { + return + } + varx, ok := as.Lhs[0].(*ast.Ident) + if !ok { + return + } + t := typeof[varx] + for _, cas := range n.Body.List { + cas := cas.(*ast.CaseClause) + if len(cas.List) == 1 { + // Variable has specific type only when there is + // exactly one type in the case list. + if tt := typeof[cas.List[0]]; isType(tt) { + tt = getType(tt) + typeof[varx] = tt + typeof[varx.Obj] = tt + typecheck1(cfg, cas.Body, typeof, assign) + } + } + } + // Restore t. + typeof[varx] = t + typeof[varx.Obj] = t + + case *ast.ReturnStmt: + if len(curfn) == 0 { + // Probably can't happen. + return + } + f := curfn[len(curfn)-1] + res := n.Results + if f.Results != nil { + t := split(typeof[f.Results]) + for i := 0; i < len(res) && i < len(t); i++ { + set(res[i], t[i], false) + } + } + + case *ast.BinaryExpr: + // Propagate types across binary ops that require two args of the same type. + switch n.Op { + case token.EQL, token.NEQ: // TODO: more cases. This is enough for the cftype fix. + if typeof[n.X] != "" && typeof[n.Y] == "" { + typeof[n.Y] = typeof[n.X] + } + if typeof[n.X] == "" && typeof[n.Y] != "" { + typeof[n.X] = typeof[n.Y] + } + } + } + } + walkBeforeAfter(f, before, after) +} + +// Convert between function type strings and lists of types. +// Using strings makes this a little harder, but it makes +// a lot of the rest of the code easier. This will all go away +// when we can use go/typechecker directly. + +// splitFunc splits "func(x,y,z) (a,b,c)" into ["x", "y", "z"] and ["a", "b", "c"]. +func splitFunc(s string) (in, out []string) { + if !strings.HasPrefix(s, "func(") { + return nil, nil + } + + i := len("func(") // index of beginning of 'in' arguments + nparen := 0 + for j := i; j < len(s); j++ { + switch s[j] { + case '(': + nparen++ + case ')': + nparen-- + if nparen < 0 { + // found end of parameter list + out := strings.TrimSpace(s[j+1:]) + if len(out) >= 2 && out[0] == '(' && out[len(out)-1] == ')' { + out = out[1 : len(out)-1] + } + return split(s[i:j]), split(out) + } + } + } + return nil, nil +} + +// joinFunc is the inverse of splitFunc. +func joinFunc(in, out []string) string { + outs := "" + if len(out) == 1 { + outs = " " + out[0] + } else if len(out) > 1 { + outs = " (" + join(out) + ")" + } + return "func(" + join(in) + ")" + outs +} + +// split splits "int, float" into ["int", "float"] and splits "" into []. +func split(s string) []string { + out := []string{} + i := 0 // current type being scanned is s[i:j]. + nparen := 0 + for j := 0; j < len(s); j++ { + switch s[j] { + case ' ': + if i == j { + i++ + } + case '(': + nparen++ + case ')': + nparen-- + if nparen < 0 { + // probably can't happen + return nil + } + case ',': + if nparen == 0 { + if i < j { + out = append(out, s[i:j]) + } + i = j + 1 + } + } + } + if nparen != 0 { + // probably can't happen + return nil + } + if i < len(s) { + out = append(out, s[i:]) + } + return out +} + +// join is the inverse of split. +func join(x []string) string { + return strings.Join(x, ", ") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/alldocs.go b/platform/dbops/binaries/go/go/src/cmd/go/alldocs.go new file mode 100644 index 0000000000000000000000000000000000000000..e61e865c84a8f77b06196c584f29f5f8bda3640b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/alldocs.go @@ -0,0 +1,3357 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by 'go test cmd/go -v -run=^TestDocsUpToDate$ -fixdocs'; DO NOT EDIT. +// Edit the documentation in other files and then execute 'go generate cmd/go' to generate this one. + +// Go is a tool for managing Go source code. +// +// Usage: +// +// go [arguments] +// +// The commands are: +// +// bug start a bug report +// build compile packages and dependencies +// clean remove object files and cached files +// doc show documentation for package or symbol +// env print Go environment information +// fix update packages to use new APIs +// fmt gofmt (reformat) package sources +// generate generate Go files by processing source +// get add dependencies to current module and install them +// install compile and install packages and dependencies +// list list packages or modules +// mod module maintenance +// work workspace maintenance +// run compile and run Go program +// test test packages +// tool run specified go tool +// version print Go version +// vet report likely mistakes in packages +// +// Use "go help " for more information about a command. +// +// Additional help topics: +// +// buildconstraint build constraints +// buildmode build modes +// c calling between Go and C +// cache build and test caching +// environment environment variables +// filetype file types +// go.mod the go.mod file +// gopath GOPATH environment variable +// goproxy module proxy protocol +// importpath import path syntax +// modules modules, module versions, and more +// module-auth module authentication using go.sum +// packages package lists and patterns +// private configuration for downloading non-public code +// testflag testing flags +// testfunc testing functions +// vcs controlling version control with GOVCS +// +// Use "go help " for more information about that topic. +// +// # Start a bug report +// +// Usage: +// +// go bug +// +// Bug opens the default browser and starts a new bug report. +// The report includes useful system information. +// +// # Compile packages and dependencies +// +// Usage: +// +// go build [-o output] [build flags] [packages] +// +// Build compiles the packages named by the import paths, +// along with their dependencies, but it does not install the results. +// +// If the arguments to build are a list of .go files from a single directory, +// build treats them as a list of source files specifying a single package. +// +// When compiling packages, build ignores files that end in '_test.go'. +// +// When compiling a single main package, build writes the resulting +// executable to an output file named after the last non-major-version +// component of the package import path. The '.exe' suffix is added +// when writing a Windows executable. +// So 'go build example/sam' writes 'sam' or 'sam.exe'. +// 'go build example.com/foo/v2' writes 'foo' or 'foo.exe', not 'v2.exe'. +// +// When compiling a package from a list of .go files, the executable +// is named after the first source file. +// 'go build ed.go rx.go' writes 'ed' or 'ed.exe'. +// +// When compiling multiple packages or a single non-main package, +// build compiles the packages but discards the resulting object, +// serving only as a check that the packages can be built. +// +// The -o flag forces build to write the resulting executable or object +// to the named output file or directory, instead of the default behavior described +// in the last two paragraphs. If the named output is an existing directory or +// ends with a slash or backslash, then any resulting executables +// will be written to that directory. +// +// The build flags are shared by the build, clean, get, install, list, run, +// and test commands: +// +// -C dir +// Change to dir before running the command. +// Any files named on the command line are interpreted after +// changing directories. +// If used, this flag must be the first one in the command line. +// -a +// force rebuilding of packages that are already up-to-date. +// -n +// print the commands but do not run them. +// -p n +// the number of programs, such as build commands or +// test binaries, that can be run in parallel. +// The default is GOMAXPROCS, normally the number of CPUs available. +// -race +// enable data race detection. +// Supported only on linux/amd64, freebsd/amd64, darwin/amd64, darwin/arm64, windows/amd64, +// linux/ppc64le and linux/arm64 (only for 48-bit VMA). +// -msan +// enable interoperation with memory sanitizer. +// Supported only on linux/amd64, linux/arm64, linux/loong64, freebsd/amd64 +// and only with Clang/LLVM as the host C compiler. +// PIE build mode will be used on all platforms except linux/amd64. +// -asan +// enable interoperation with address sanitizer. +// Supported only on linux/arm64, linux/amd64, linux/loong64. +// Supported on linux/amd64 or linux/arm64 and only with GCC 7 and higher +// or Clang/LLVM 9 and higher. +// And supported on linux/loong64 only with Clang/LLVM 16 and higher. +// -cover +// enable code coverage instrumentation. +// -covermode set,count,atomic +// set the mode for coverage analysis. +// The default is "set" unless -race is enabled, +// in which case it is "atomic". +// The values: +// set: bool: does this statement run? +// count: int: how many times does this statement run? +// atomic: int: count, but correct in multithreaded tests; +// significantly more expensive. +// Sets -cover. +// -coverpkg pattern1,pattern2,pattern3 +// For a build that targets package 'main' (e.g. building a Go +// executable), apply coverage analysis to each package matching +// the patterns. The default is to apply coverage analysis to +// packages in the main Go module. See 'go help packages' for a +// description of package patterns. Sets -cover. +// -v +// print the names of packages as they are compiled. +// -work +// print the name of the temporary work directory and +// do not delete it when exiting. +// -x +// print the commands. +// -asmflags '[pattern=]arg list' +// arguments to pass on each go tool asm invocation. +// -buildmode mode +// build mode to use. See 'go help buildmode' for more. +// -buildvcs +// Whether to stamp binaries with version control information +// ("true", "false", or "auto"). By default ("auto"), version control +// information is stamped into a binary if the main package, the main module +// containing it, and the current directory are all in the same repository. +// Use -buildvcs=false to always omit version control information, or +// -buildvcs=true to error out if version control information is available but +// cannot be included due to a missing tool or ambiguous directory structure. +// -compiler name +// name of compiler to use, as in runtime.Compiler (gccgo or gc). +// -gccgoflags '[pattern=]arg list' +// arguments to pass on each gccgo compiler/linker invocation. +// -gcflags '[pattern=]arg list' +// arguments to pass on each go tool compile invocation. +// -installsuffix suffix +// a suffix to use in the name of the package installation directory, +// in order to keep output separate from default builds. +// If using the -race flag, the install suffix is automatically set to race +// or, if set explicitly, has _race appended to it. Likewise for the -msan +// and -asan flags. Using a -buildmode option that requires non-default compile +// flags has a similar effect. +// -ldflags '[pattern=]arg list' +// arguments to pass on each go tool link invocation. +// -linkshared +// build code that will be linked against shared libraries previously +// created with -buildmode=shared. +// -mod mode +// module download mode to use: readonly, vendor, or mod. +// By default, if a vendor directory is present and the go version in go.mod +// is 1.14 or higher, the go command acts as if -mod=vendor were set. +// Otherwise, the go command acts as if -mod=readonly were set. +// See https://golang.org/ref/mod#build-commands for details. +// -modcacherw +// leave newly-created directories in the module cache read-write +// instead of making them read-only. +// -modfile file +// in module aware mode, read (and possibly write) an alternate go.mod +// file instead of the one in the module root directory. A file named +// "go.mod" must still be present in order to determine the module root +// directory, but it is not accessed. When -modfile is specified, an +// alternate go.sum file is also used: its path is derived from the +// -modfile flag by trimming the ".mod" extension and appending ".sum". +// -overlay file +// read a JSON config file that provides an overlay for build operations. +// The file is a JSON struct with a single field, named 'Replace', that +// maps each disk file path (a string) to its backing file path, so that +// a build will run as if the disk file path exists with the contents +// given by the backing file paths, or as if the disk file path does not +// exist if its backing file path is empty. Support for the -overlay flag +// has some limitations: importantly, cgo files included from outside the +// include path must be in the same directory as the Go package they are +// included from, and overlays will not appear when binaries and tests are +// run through go run and go test respectively. +// -pgo file +// specify the file path of a profile for profile-guided optimization (PGO). +// When the special name "auto" is specified, for each main package in the +// build, the go command selects a file named "default.pgo" in the package's +// directory if that file exists, and applies it to the (transitive) +// dependencies of the main package (other packages are not affected). +// Special name "off" turns off PGO. The default is "auto". +// -pkgdir dir +// install and load all packages from dir instead of the usual locations. +// For example, when building with a non-standard configuration, +// use -pkgdir to keep generated packages in a separate location. +// -tags tag,list +// a comma-separated list of additional build tags to consider satisfied +// during the build. For more information about build tags, see +// 'go help buildconstraint'. (Earlier versions of Go used a +// space-separated list, and that form is deprecated but still recognized.) +// -trimpath +// remove all file system paths from the resulting executable. +// Instead of absolute file system paths, the recorded file names +// will begin either a module path@version (when using modules), +// or a plain import path (when using the standard library, or GOPATH). +// -toolexec 'cmd args' +// a program to use to invoke toolchain programs like vet and asm. +// For example, instead of running asm, the go command will run +// 'cmd args /path/to/asm '. +// The TOOLEXEC_IMPORTPATH environment variable will be set, +// matching 'go list -f {{.ImportPath}}' for the package being built. +// +// The -asmflags, -gccgoflags, -gcflags, and -ldflags flags accept a +// space-separated list of arguments to pass to an underlying tool +// during the build. To embed spaces in an element in the list, surround +// it with either single or double quotes. The argument list may be +// preceded by a package pattern and an equal sign, which restricts +// the use of that argument list to the building of packages matching +// that pattern (see 'go help packages' for a description of package +// patterns). Without a pattern, the argument list applies only to the +// packages named on the command line. The flags may be repeated +// with different patterns in order to specify different arguments for +// different sets of packages. If a package matches patterns given in +// multiple flags, the latest match on the command line wins. +// For example, 'go build -gcflags=-S fmt' prints the disassembly +// only for package fmt, while 'go build -gcflags=all=-S fmt' +// prints the disassembly for fmt and all its dependencies. +// +// For more about specifying packages, see 'go help packages'. +// For more about where packages and binaries are installed, +// run 'go help gopath'. +// For more about calling between Go and C/C++, run 'go help c'. +// +// Note: Build adheres to certain conventions such as those described +// by 'go help gopath'. Not all projects can follow these conventions, +// however. Installations that have their own conventions or that use +// a separate software build system may choose to use lower-level +// invocations such as 'go tool compile' and 'go tool link' to avoid +// some of the overheads and design decisions of the build tool. +// +// See also: go install, go get, go clean. +// +// # Remove object files and cached files +// +// Usage: +// +// go clean [clean flags] [build flags] [packages] +// +// Clean removes object files from package source directories. +// The go command builds most objects in a temporary directory, +// so go clean is mainly concerned with object files left by other +// tools or by manual invocations of go build. +// +// If a package argument is given or the -i or -r flag is set, +// clean removes the following files from each of the +// source directories corresponding to the import paths: +// +// _obj/ old object directory, left from Makefiles +// _test/ old test directory, left from Makefiles +// _testmain.go old gotest file, left from Makefiles +// test.out old test log, left from Makefiles +// build.out old test log, left from Makefiles +// *.[568ao] object files, left from Makefiles +// +// DIR(.exe) from go build +// DIR.test(.exe) from go test -c +// MAINFILE(.exe) from go build MAINFILE.go +// *.so from SWIG +// +// In the list, DIR represents the final path element of the +// directory, and MAINFILE is the base name of any Go source +// file in the directory that is not included when building +// the package. +// +// The -i flag causes clean to remove the corresponding installed +// archive or binary (what 'go install' would create). +// +// The -n flag causes clean to print the remove commands it would execute, +// but not run them. +// +// The -r flag causes clean to be applied recursively to all the +// dependencies of the packages named by the import paths. +// +// The -x flag causes clean to print remove commands as it executes them. +// +// The -cache flag causes clean to remove the entire go build cache. +// +// The -testcache flag causes clean to expire all test results in the +// go build cache. +// +// The -modcache flag causes clean to remove the entire module +// download cache, including unpacked source code of versioned +// dependencies. +// +// The -fuzzcache flag causes clean to remove files stored in the Go build +// cache for fuzz testing. The fuzzing engine caches files that expand +// code coverage, so removing them may make fuzzing less effective until +// new inputs are found that provide the same coverage. These files are +// distinct from those stored in testdata directory; clean does not remove +// those files. +// +// For more about build flags, see 'go help build'. +// +// For more about specifying packages, see 'go help packages'. +// +// # Show documentation for package or symbol +// +// Usage: +// +// go doc [doc flags] [package|[package.]symbol[.methodOrField]] +// +// Doc prints the documentation comments associated with the item identified by its +// arguments (a package, const, func, type, var, method, or struct field) +// followed by a one-line summary of each of the first-level items "under" +// that item (package-level declarations for a package, methods for a type, +// etc.). +// +// Doc accepts zero, one, or two arguments. +// +// Given no arguments, that is, when run as +// +// go doc +// +// it prints the package documentation for the package in the current directory. +// If the package is a command (package main), the exported symbols of the package +// are elided from the presentation unless the -cmd flag is provided. +// +// When run with one argument, the argument is treated as a Go-syntax-like +// representation of the item to be documented. What the argument selects depends +// on what is installed in GOROOT and GOPATH, as well as the form of the argument, +// which is schematically one of these: +// +// go doc +// go doc [.] +// go doc [.][.] +// go doc [.][.] +// +// The first item in this list matched by the argument is the one whose documentation +// is printed. (See the examples below.) However, if the argument starts with a capital +// letter it is assumed to identify a symbol or method in the current directory. +// +// For packages, the order of scanning is determined lexically in breadth-first order. +// That is, the package presented is the one that matches the search and is nearest +// the root and lexically first at its level of the hierarchy. The GOROOT tree is +// always scanned in its entirety before GOPATH. +// +// If there is no package specified or matched, the package in the current +// directory is selected, so "go doc Foo" shows the documentation for symbol Foo in +// the current package. +// +// The package path must be either a qualified path or a proper suffix of a +// path. The go tool's usual package mechanism does not apply: package path +// elements like . and ... are not implemented by go doc. +// +// When run with two arguments, the first is a package path (full path or suffix), +// and the second is a symbol, or symbol with method or struct field: +// +// go doc [.] +// +// In all forms, when matching symbols, lower-case letters in the argument match +// either case but upper-case letters match exactly. This means that there may be +// multiple matches of a lower-case argument in a package if different symbols have +// different cases. If this occurs, documentation for all matches is printed. +// +// Examples: +// +// go doc +// Show documentation for current package. +// go doc Foo +// Show documentation for Foo in the current package. +// (Foo starts with a capital letter so it cannot match +// a package path.) +// go doc encoding/json +// Show documentation for the encoding/json package. +// go doc json +// Shorthand for encoding/json. +// go doc json.Number (or go doc json.number) +// Show documentation and method summary for json.Number. +// go doc json.Number.Int64 (or go doc json.number.int64) +// Show documentation for json.Number's Int64 method. +// go doc cmd/doc +// Show package docs for the doc command. +// go doc -cmd cmd/doc +// Show package docs and exported symbols within the doc command. +// go doc template.new +// Show documentation for html/template's New function. +// (html/template is lexically before text/template) +// go doc text/template.new # One argument +// Show documentation for text/template's New function. +// go doc text/template new # Two arguments +// Show documentation for text/template's New function. +// +// At least in the current tree, these invocations all print the +// documentation for json.Decoder's Decode method: +// +// go doc json.Decoder.Decode +// go doc json.decoder.decode +// go doc json.decode +// cd go/src/encoding/json; go doc decode +// +// Flags: +// +// -all +// Show all the documentation for the package. +// -c +// Respect case when matching symbols. +// -cmd +// Treat a command (package main) like a regular package. +// Otherwise package main's exported symbols are hidden +// when showing the package's top-level documentation. +// -short +// One-line representation for each symbol. +// -src +// Show the full source code for the symbol. This will +// display the full Go source of its declaration and +// definition, such as a function definition (including +// the body), type declaration or enclosing const +// block. The output may therefore include unexported +// details. +// -u +// Show documentation for unexported as well as exported +// symbols, methods, and fields. +// +// # Print Go environment information +// +// Usage: +// +// go env [-json] [-u] [-w] [var ...] +// +// Env prints Go environment information. +// +// By default env prints information as a shell script +// (on Windows, a batch file). If one or more variable +// names is given as arguments, env prints the value of +// each named variable on its own line. +// +// The -json flag prints the environment in JSON format +// instead of as a shell script. +// +// The -u flag requires one or more arguments and unsets +// the default setting for the named environment variables, +// if one has been set with 'go env -w'. +// +// The -w flag requires one or more arguments of the +// form NAME=VALUE and changes the default settings +// of the named environment variables to the given values. +// +// For more about environment variables, see 'go help environment'. +// +// # Update packages to use new APIs +// +// Usage: +// +// go fix [-fix list] [packages] +// +// Fix runs the Go fix command on the packages named by the import paths. +// +// The -fix flag sets a comma-separated list of fixes to run. +// The default is all known fixes. +// (Its value is passed to 'go tool fix -r'.) +// +// For more about fix, see 'go doc cmd/fix'. +// For more about specifying packages, see 'go help packages'. +// +// To run fix with other options, run 'go tool fix'. +// +// See also: go fmt, go vet. +// +// # Gofmt (reformat) package sources +// +// Usage: +// +// go fmt [-n] [-x] [packages] +// +// Fmt runs the command 'gofmt -l -w' on the packages named +// by the import paths. It prints the names of the files that are modified. +// +// For more about gofmt, see 'go doc cmd/gofmt'. +// For more about specifying packages, see 'go help packages'. +// +// The -n flag prints commands that would be executed. +// The -x flag prints commands as they are executed. +// +// The -mod flag's value sets which module download mode +// to use: readonly or vendor. See 'go help modules' for more. +// +// To run gofmt with specific options, run gofmt itself. +// +// See also: go fix, go vet. +// +// # Generate Go files by processing source +// +// Usage: +// +// go generate [-run regexp] [-n] [-v] [-x] [build flags] [file.go... | packages] +// +// Generate runs commands described by directives within existing +// files. Those commands can run any process but the intent is to +// create or update Go source files. +// +// Go generate is never run automatically by go build, go test, +// and so on. It must be run explicitly. +// +// Go generate scans the file for directives, which are lines of +// the form, +// +// //go:generate command argument... +// +// (note: no leading spaces and no space in "//go") where command +// is the generator to be run, corresponding to an executable file +// that can be run locally. It must either be in the shell path +// (gofmt), a fully qualified path (/usr/you/bin/mytool), or a +// command alias, described below. +// +// Note that go generate does not parse the file, so lines that look +// like directives in comments or multiline strings will be treated +// as directives. +// +// The arguments to the directive are space-separated tokens or +// double-quoted strings passed to the generator as individual +// arguments when it is run. +// +// Quoted strings use Go syntax and are evaluated before execution; a +// quoted string appears as a single argument to the generator. +// +// To convey to humans and machine tools that code is generated, +// generated source should have a line that matches the following +// regular expression (in Go syntax): +// +// ^// Code generated .* DO NOT EDIT\.$ +// +// This line must appear before the first non-comment, non-blank +// text in the file. +// +// Go generate sets several variables when it runs the generator: +// +// $GOARCH +// The execution architecture (arm, amd64, etc.) +// $GOOS +// The execution operating system (linux, windows, etc.) +// $GOFILE +// The base name of the file. +// $GOLINE +// The line number of the directive in the source file. +// $GOPACKAGE +// The name of the package of the file containing the directive. +// $GOROOT +// The GOROOT directory for the 'go' command that invoked the +// generator, containing the Go toolchain and standard library. +// $DOLLAR +// A dollar sign. +// $PATH +// The $PATH of the parent process, with $GOROOT/bin +// placed at the beginning. This causes generators +// that execute 'go' commands to use the same 'go' +// as the parent 'go generate' command. +// +// Other than variable substitution and quoted-string evaluation, no +// special processing such as "globbing" is performed on the command +// line. +// +// As a last step before running the command, any invocations of any +// environment variables with alphanumeric names, such as $GOFILE or +// $HOME, are expanded throughout the command line. The syntax for +// variable expansion is $NAME on all operating systems. Due to the +// order of evaluation, variables are expanded even inside quoted +// strings. If the variable NAME is not set, $NAME expands to the +// empty string. +// +// A directive of the form, +// +// //go:generate -command xxx args... +// +// specifies, for the remainder of this source file only, that the +// string xxx represents the command identified by the arguments. This +// can be used to create aliases or to handle multiword generators. +// For example, +// +// //go:generate -command foo go tool foo +// +// specifies that the command "foo" represents the generator +// "go tool foo". +// +// Generate processes packages in the order given on the command line, +// one at a time. If the command line lists .go files from a single directory, +// they are treated as a single package. Within a package, generate processes the +// source files in a package in file name order, one at a time. Within +// a source file, generate runs generators in the order they appear +// in the file, one at a time. The go generate tool also sets the build +// tag "generate" so that files may be examined by go generate but ignored +// during build. +// +// For packages with invalid code, generate processes only source files with a +// valid package clause. +// +// If any generator returns an error exit status, "go generate" skips +// all further processing for that package. +// +// The generator is run in the package's source directory. +// +// Go generate accepts two specific flags: +// +// -run="" +// if non-empty, specifies a regular expression to select +// directives whose full original source text (excluding +// any trailing spaces and final newline) matches the +// expression. +// +// -skip="" +// if non-empty, specifies a regular expression to suppress +// directives whose full original source text (excluding +// any trailing spaces and final newline) matches the +// expression. If a directive matches both the -run and +// the -skip arguments, it is skipped. +// +// It also accepts the standard build flags including -v, -n, and -x. +// The -v flag prints the names of packages and files as they are +// processed. +// The -n flag prints commands that would be executed. +// The -x flag prints commands as they are executed. +// +// For more about build flags, see 'go help build'. +// +// For more about specifying packages, see 'go help packages'. +// +// # Add dependencies to current module and install them +// +// Usage: +// +// go get [-t] [-u] [-v] [build flags] [packages] +// +// Get resolves its command-line arguments to packages at specific module versions, +// updates go.mod to require those versions, and downloads source code into the +// module cache. +// +// To add a dependency for a package or upgrade it to its latest version: +// +// go get example.com/pkg +// +// To upgrade or downgrade a package to a specific version: +// +// go get example.com/pkg@v1.2.3 +// +// To remove a dependency on a module and downgrade modules that require it: +// +// go get example.com/mod@none +// +// To upgrade the minimum required Go version to the latest released Go version: +// +// go get go@latest +// +// To upgrade the Go toolchain to the latest patch release of the current Go toolchain: +// +// go get toolchain@patch +// +// See https://golang.org/ref/mod#go-get for details. +// +// In earlier versions of Go, 'go get' was used to build and install packages. +// Now, 'go get' is dedicated to adjusting dependencies in go.mod. 'go install' +// may be used to build and install commands instead. When a version is specified, +// 'go install' runs in module-aware mode and ignores the go.mod file in the +// current directory. For example: +// +// go install example.com/pkg@v1.2.3 +// go install example.com/pkg@latest +// +// See 'go help install' or https://golang.org/ref/mod#go-install for details. +// +// 'go get' accepts the following flags. +// +// The -t flag instructs get to consider modules needed to build tests of +// packages specified on the command line. +// +// The -u flag instructs get to update modules providing dependencies +// of packages named on the command line to use newer minor or patch +// releases when available. +// +// The -u=patch flag (not -u patch) also instructs get to update dependencies, +// but changes the default to select patch releases. +// +// When the -t and -u flags are used together, get will update +// test dependencies as well. +// +// The -x flag prints commands as they are executed. This is useful for +// debugging version control commands when a module is downloaded directly +// from a repository. +// +// For more about modules, see https://golang.org/ref/mod. +// +// For more about using 'go get' to update the minimum Go version and +// suggested Go toolchain, see https://go.dev/doc/toolchain. +// +// For more about specifying packages, see 'go help packages'. +// +// This text describes the behavior of get using modules to manage source +// code and dependencies. If instead the go command is running in GOPATH +// mode, the details of get's flags and effects change, as does 'go help get'. +// See 'go help gopath-get'. +// +// See also: go build, go install, go clean, go mod. +// +// # Compile and install packages and dependencies +// +// Usage: +// +// go install [build flags] [packages] +// +// Install compiles and installs the packages named by the import paths. +// +// Executables are installed in the directory named by the GOBIN environment +// variable, which defaults to $GOPATH/bin or $HOME/go/bin if the GOPATH +// environment variable is not set. Executables in $GOROOT +// are installed in $GOROOT/bin or $GOTOOLDIR instead of $GOBIN. +// +// If the arguments have version suffixes (like @latest or @v1.0.0), "go install" +// builds packages in module-aware mode, ignoring the go.mod file in the current +// directory or any parent directory, if there is one. This is useful for +// installing executables without affecting the dependencies of the main module. +// To eliminate ambiguity about which module versions are used in the build, the +// arguments must satisfy the following constraints: +// +// - Arguments must be package paths or package patterns (with "..." wildcards). +// They must not be standard packages (like fmt), meta-patterns (std, cmd, +// all), or relative or absolute file paths. +// +// - All arguments must have the same version suffix. Different queries are not +// allowed, even if they refer to the same version. +// +// - All arguments must refer to packages in the same module at the same version. +// +// - Package path arguments must refer to main packages. Pattern arguments +// will only match main packages. +// +// - No module is considered the "main" module. If the module containing +// packages named on the command line has a go.mod file, it must not contain +// directives (replace and exclude) that would cause it to be interpreted +// differently than if it were the main module. The module must not require +// a higher version of itself. +// +// - Vendor directories are not used in any module. (Vendor directories are not +// included in the module zip files downloaded by 'go install'.) +// +// If the arguments don't have version suffixes, "go install" may run in +// module-aware mode or GOPATH mode, depending on the GO111MODULE environment +// variable and the presence of a go.mod file. See 'go help modules' for details. +// If module-aware mode is enabled, "go install" runs in the context of the main +// module. +// +// When module-aware mode is disabled, non-main packages are installed in the +// directory $GOPATH/pkg/$GOOS_$GOARCH. When module-aware mode is enabled, +// non-main packages are built and cached but not installed. +// +// Before Go 1.20, the standard library was installed to +// $GOROOT/pkg/$GOOS_$GOARCH. +// Starting in Go 1.20, the standard library is built and cached but not installed. +// Setting GODEBUG=installgoroot=all restores the use of +// $GOROOT/pkg/$GOOS_$GOARCH. +// +// For more about build flags, see 'go help build'. +// +// For more about specifying packages, see 'go help packages'. +// +// See also: go build, go get, go clean. +// +// # List packages or modules +// +// Usage: +// +// go list [-f format] [-json] [-m] [list flags] [build flags] [packages] +// +// List lists the named packages, one per line. +// The most commonly-used flags are -f and -json, which control the form +// of the output printed for each package. Other list flags, documented below, +// control more specific details. +// +// The default output shows the package import path: +// +// bytes +// encoding/json +// github.com/gorilla/mux +// golang.org/x/net/html +// +// The -f flag specifies an alternate format for the list, using the +// syntax of package template. The default output is equivalent +// to -f '{{.ImportPath}}'. The struct being passed to the template is: +// +// type Package struct { +// Dir string // directory containing package sources +// ImportPath string // import path of package in dir +// ImportComment string // path in import comment on package statement +// Name string // package name +// Doc string // package documentation string +// Target string // install path +// Shlib string // the shared library that contains this package (only set when -linkshared) +// Goroot bool // is this package in the Go root? +// Standard bool // is this package part of the standard Go library? +// Stale bool // would 'go install' do anything for this package? +// StaleReason string // explanation for Stale==true +// Root string // Go root or Go path dir containing this package +// ConflictDir string // this directory shadows Dir in $GOPATH +// BinaryOnly bool // binary-only package (no longer supported) +// ForTest string // package is only for use in named test +// Export string // file containing export data (when using -export) +// BuildID string // build ID of the compiled package (when using -export) +// Module *Module // info about package's containing module, if any (can be nil) +// Match []string // command-line patterns matching this package +// DepOnly bool // package is only a dependency, not explicitly listed +// DefaultGODEBUG string // default GODEBUG setting, for main packages +// +// // Source files +// GoFiles []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles) +// CgoFiles []string // .go source files that import "C" +// CompiledGoFiles []string // .go files presented to compiler (when using -compiled) +// IgnoredGoFiles []string // .go source files ignored due to build constraints +// IgnoredOtherFiles []string // non-.go source files ignored due to build constraints +// CFiles []string // .c source files +// CXXFiles []string // .cc, .cxx and .cpp source files +// MFiles []string // .m source files +// HFiles []string // .h, .hh, .hpp and .hxx source files +// FFiles []string // .f, .F, .for and .f90 Fortran source files +// SFiles []string // .s source files +// SwigFiles []string // .swig files +// SwigCXXFiles []string // .swigcxx files +// SysoFiles []string // .syso object files to add to archive +// TestGoFiles []string // _test.go files in package +// XTestGoFiles []string // _test.go files outside package +// +// // Embedded files +// EmbedPatterns []string // //go:embed patterns +// EmbedFiles []string // files matched by EmbedPatterns +// TestEmbedPatterns []string // //go:embed patterns in TestGoFiles +// TestEmbedFiles []string // files matched by TestEmbedPatterns +// XTestEmbedPatterns []string // //go:embed patterns in XTestGoFiles +// XTestEmbedFiles []string // files matched by XTestEmbedPatterns +// +// // Cgo directives +// CgoCFLAGS []string // cgo: flags for C compiler +// CgoCPPFLAGS []string // cgo: flags for C preprocessor +// CgoCXXFLAGS []string // cgo: flags for C++ compiler +// CgoFFLAGS []string // cgo: flags for Fortran compiler +// CgoLDFLAGS []string // cgo: flags for linker +// CgoPkgConfig []string // cgo: pkg-config names +// +// // Dependency information +// Imports []string // import paths used by this package +// ImportMap map[string]string // map from source import to ImportPath (identity entries omitted) +// Deps []string // all (recursively) imported dependencies +// TestImports []string // imports from TestGoFiles +// XTestImports []string // imports from XTestGoFiles +// +// // Error information +// Incomplete bool // this package or a dependency has an error +// Error *PackageError // error loading package +// DepsErrors []*PackageError // errors loading dependencies +// } +// +// Packages stored in vendor directories report an ImportPath that includes the +// path to the vendor directory (for example, "d/vendor/p" instead of "p"), +// so that the ImportPath uniquely identifies a given copy of a package. +// The Imports, Deps, TestImports, and XTestImports lists also contain these +// expanded import paths. See golang.org/s/go15vendor for more about vendoring. +// +// The error information, if any, is +// +// type PackageError struct { +// ImportStack []string // shortest path from package named on command line to this one +// Pos string // position of error (if present, file:line:col) +// Err string // the error itself +// } +// +// The module information is a Module struct, defined in the discussion +// of list -m below. +// +// The template function "join" calls strings.Join. +// +// The template function "context" returns the build context, defined as: +// +// type Context struct { +// GOARCH string // target architecture +// GOOS string // target operating system +// GOROOT string // Go root +// GOPATH string // Go path +// CgoEnabled bool // whether cgo can be used +// UseAllFiles bool // use files regardless of //go:build lines, file names +// Compiler string // compiler to assume when computing target paths +// BuildTags []string // build constraints to match in //go:build lines +// ToolTags []string // toolchain-specific build constraints +// ReleaseTags []string // releases the current release is compatible with +// InstallSuffix string // suffix to use in the name of the install dir +// } +// +// For more information about the meaning of these fields see the documentation +// for the go/build package's Context type. +// +// The -json flag causes the package data to be printed in JSON format +// instead of using the template format. The JSON flag can optionally be +// provided with a set of comma-separated required field names to be output. +// If so, those required fields will always appear in JSON output, but +// others may be omitted to save work in computing the JSON struct. +// +// The -compiled flag causes list to set CompiledGoFiles to the Go source +// files presented to the compiler. Typically this means that it repeats +// the files listed in GoFiles and then also adds the Go code generated +// by processing CgoFiles and SwigFiles. The Imports list contains the +// union of all imports from both GoFiles and CompiledGoFiles. +// +// The -deps flag causes list to iterate over not just the named packages +// but also all their dependencies. It visits them in a depth-first post-order +// traversal, so that a package is listed only after all its dependencies. +// Packages not explicitly listed on the command line will have the DepOnly +// field set to true. +// +// The -e flag changes the handling of erroneous packages, those that +// cannot be found or are malformed. By default, the list command +// prints an error to standard error for each erroneous package and +// omits the packages from consideration during the usual printing. +// With the -e flag, the list command never prints errors to standard +// error and instead processes the erroneous packages with the usual +// printing. Erroneous packages will have a non-empty ImportPath and +// a non-nil Error field; other information may or may not be missing +// (zeroed). +// +// The -export flag causes list to set the Export field to the name of a +// file containing up-to-date export information for the given package, +// and the BuildID field to the build ID of the compiled package. +// +// The -find flag causes list to identify the named packages but not +// resolve their dependencies: the Imports and Deps lists will be empty. +// With the -find flag, the -deps, -test and -export commands cannot be +// used. +// +// The -test flag causes list to report not only the named packages +// but also their test binaries (for packages with tests), to convey to +// source code analysis tools exactly how test binaries are constructed. +// The reported import path for a test binary is the import path of +// the package followed by a ".test" suffix, as in "math/rand.test". +// When building a test, it is sometimes necessary to rebuild certain +// dependencies specially for that test (most commonly the tested +// package itself). The reported import path of a package recompiled +// for a particular test binary is followed by a space and the name of +// the test binary in brackets, as in "math/rand [math/rand.test]" +// or "regexp [sort.test]". The ForTest field is also set to the name +// of the package being tested ("math/rand" or "sort" in the previous +// examples). +// +// The Dir, Target, Shlib, Root, ConflictDir, and Export file paths +// are all absolute paths. +// +// By default, the lists GoFiles, CgoFiles, and so on hold names of files in Dir +// (that is, paths relative to Dir, not absolute paths). +// The generated files added when using the -compiled and -test flags +// are absolute paths referring to cached copies of generated Go source files. +// Although they are Go source files, the paths may not end in ".go". +// +// The -m flag causes list to list modules instead of packages. +// +// When listing modules, the -f flag still specifies a format template +// applied to a Go struct, but now a Module struct: +// +// type Module struct { +// Path string // module path +// Query string // version query corresponding to this version +// Version string // module version +// Versions []string // available module versions +// Replace *Module // replaced by this module +// Time *time.Time // time version was created +// Update *Module // available update (with -u) +// Main bool // is this the main module? +// Indirect bool // module is only indirectly needed by main module +// Dir string // directory holding local copy of files, if any +// GoMod string // path to go.mod file describing module, if any +// GoVersion string // go version used in module +// Retracted []string // retraction information, if any (with -retracted or -u) +// Deprecated string // deprecation message, if any (with -u) +// Error *ModuleError // error loading module +// Origin any // provenance of module +// Reuse bool // reuse of old module info is safe +// } +// +// type ModuleError struct { +// Err string // the error itself +// } +// +// The file GoMod refers to may be outside the module directory if the +// module is in the module cache or if the -modfile flag is used. +// +// The default output is to print the module path and then +// information about the version and replacement if any. +// For example, 'go list -m all' might print: +// +// my/main/module +// golang.org/x/text v0.3.0 => /tmp/text +// rsc.io/pdf v0.1.1 +// +// The Module struct has a String method that formats this +// line of output, so that the default format is equivalent +// to -f '{{.String}}'. +// +// Note that when a module has been replaced, its Replace field +// describes the replacement module, and its Dir field is set to +// the replacement's source code, if present. (That is, if Replace +// is non-nil, then Dir is set to Replace.Dir, with no access to +// the replaced source code.) +// +// The -u flag adds information about available upgrades. +// When the latest version of a given module is newer than +// the current one, list -u sets the Module's Update field +// to information about the newer module. list -u will also set +// the module's Retracted field if the current version is retracted. +// The Module's String method indicates an available upgrade by +// formatting the newer version in brackets after the current version. +// If a version is retracted, the string "(retracted)" will follow it. +// For example, 'go list -m -u all' might print: +// +// my/main/module +// golang.org/x/text v0.3.0 [v0.4.0] => /tmp/text +// rsc.io/pdf v0.1.1 (retracted) [v0.1.2] +// +// (For tools, 'go list -m -u -json all' may be more convenient to parse.) +// +// The -versions flag causes list to set the Module's Versions field +// to a list of all known versions of that module, ordered according +// to semantic versioning, earliest to latest. The flag also changes +// the default output format to display the module path followed by the +// space-separated version list. +// +// The -retracted flag causes list to report information about retracted +// module versions. When -retracted is used with -f or -json, the Retracted +// field will be set to a string explaining why the version was retracted. +// The string is taken from comments on the retract directive in the +// module's go.mod file. When -retracted is used with -versions, retracted +// versions are listed together with unretracted versions. The -retracted +// flag may be used with or without -m. +// +// The arguments to list -m are interpreted as a list of modules, not packages. +// The main module is the module containing the current directory. +// The active modules are the main module and its dependencies. +// With no arguments, list -m shows the main module. +// With arguments, list -m shows the modules specified by the arguments. +// Any of the active modules can be specified by its module path. +// The special pattern "all" specifies all the active modules, first the main +// module and then dependencies sorted by module path. +// A pattern containing "..." specifies the active modules whose +// module paths match the pattern. +// A query of the form path@version specifies the result of that query, +// which is not limited to active modules. +// See 'go help modules' for more about module queries. +// +// The template function "module" takes a single string argument +// that must be a module path or query and returns the specified +// module as a Module struct. If an error occurs, the result will +// be a Module struct with a non-nil Error field. +// +// When using -m, the -reuse=old.json flag accepts the name of file containing +// the JSON output of a previous 'go list -m -json' invocation with the +// same set of modifier flags (such as -u, -retracted, and -versions). +// The go command may use this file to determine that a module is unchanged +// since the previous invocation and avoid redownloading information about it. +// Modules that are not redownloaded will be marked in the new output by +// setting the Reuse field to true. Normally the module cache provides this +// kind of reuse automatically; the -reuse flag can be useful on systems that +// do not preserve the module cache. +// +// For more about build flags, see 'go help build'. +// +// For more about specifying packages, see 'go help packages'. +// +// For more about modules, see https://golang.org/ref/mod. +// +// # Module maintenance +// +// Go mod provides access to operations on modules. +// +// Note that support for modules is built into all the go commands, +// not just 'go mod'. For example, day-to-day adding, removing, upgrading, +// and downgrading of dependencies should be done using 'go get'. +// See 'go help modules' for an overview of module functionality. +// +// Usage: +// +// go mod [arguments] +// +// The commands are: +// +// download download modules to local cache +// edit edit go.mod from tools or scripts +// graph print module requirement graph +// init initialize new module in current directory +// tidy add missing and remove unused modules +// vendor make vendored copy of dependencies +// verify verify dependencies have expected content +// why explain why packages or modules are needed +// +// Use "go help mod " for more information about a command. +// +// # Download modules to local cache +// +// Usage: +// +// go mod download [-x] [-json] [-reuse=old.json] [modules] +// +// Download downloads the named modules, which can be module patterns selecting +// dependencies of the main module or module queries of the form path@version. +// +// With no arguments, download applies to the modules needed to build and test +// the packages in the main module: the modules explicitly required by the main +// module if it is at 'go 1.17' or higher, or all transitively-required modules +// if at 'go 1.16' or lower. +// +// The go command will automatically download modules as needed during ordinary +// execution. The "go mod download" command is useful mainly for pre-filling +// the local cache or to compute the answers for a Go module proxy. +// +// By default, download writes nothing to standard output. It may print progress +// messages and errors to standard error. +// +// The -json flag causes download to print a sequence of JSON objects +// to standard output, describing each downloaded module (or failure), +// corresponding to this Go struct: +// +// type Module struct { +// Path string // module path +// Query string // version query corresponding to this version +// Version string // module version +// Error string // error loading module +// Info string // absolute path to cached .info file +// GoMod string // absolute path to cached .mod file +// Zip string // absolute path to cached .zip file +// Dir string // absolute path to cached source root directory +// Sum string // checksum for path, version (as in go.sum) +// GoModSum string // checksum for go.mod (as in go.sum) +// Origin any // provenance of module +// Reuse bool // reuse of old module info is safe +// } +// +// The -reuse flag accepts the name of file containing the JSON output of a +// previous 'go mod download -json' invocation. The go command may use this +// file to determine that a module is unchanged since the previous invocation +// and avoid redownloading it. Modules that are not redownloaded will be marked +// in the new output by setting the Reuse field to true. Normally the module +// cache provides this kind of reuse automatically; the -reuse flag can be +// useful on systems that do not preserve the module cache. +// +// The -x flag causes download to print the commands download executes. +// +// See https://golang.org/ref/mod#go-mod-download for more about 'go mod download'. +// +// See https://golang.org/ref/mod#version-queries for more about version queries. +// +// # Edit go.mod from tools or scripts +// +// Usage: +// +// go mod edit [editing flags] [-fmt|-print|-json] [go.mod] +// +// Edit provides a command-line interface for editing go.mod, +// for use primarily by tools or scripts. It reads only go.mod; +// it does not look up information about the modules involved. +// By default, edit reads and writes the go.mod file of the main module, +// but a different target file can be specified after the editing flags. +// +// The editing flags specify a sequence of editing operations. +// +// The -fmt flag reformats the go.mod file without making other changes. +// This reformatting is also implied by any other modifications that use or +// rewrite the go.mod file. The only time this flag is needed is if no other +// flags are specified, as in 'go mod edit -fmt'. +// +// The -module flag changes the module's path (the go.mod file's module line). +// +// The -require=path@version and -droprequire=path flags +// add and drop a requirement on the given module path and version. +// Note that -require overrides any existing requirements on path. +// These flags are mainly for tools that understand the module graph. +// Users should prefer 'go get path@version' or 'go get path@none', +// which make other go.mod adjustments as needed to satisfy +// constraints imposed by other modules. +// +// The -exclude=path@version and -dropexclude=path@version flags +// add and drop an exclusion for the given module path and version. +// Note that -exclude=path@version is a no-op if that exclusion already exists. +// +// The -replace=old[@v]=new[@v] flag adds a replacement of the given +// module path and version pair. If the @v in old@v is omitted, a +// replacement without a version on the left side is added, which applies +// to all versions of the old module path. If the @v in new@v is omitted, +// the new path should be a local module root directory, not a module +// path. Note that -replace overrides any redundant replacements for old[@v], +// so omitting @v will drop existing replacements for specific versions. +// +// The -dropreplace=old[@v] flag drops a replacement of the given +// module path and version pair. If the @v is omitted, a replacement without +// a version on the left side is dropped. +// +// The -retract=version and -dropretract=version flags add and drop a +// retraction on the given version. The version may be a single version +// like "v1.2.3" or a closed interval like "[v1.1.0,v1.1.9]". Note that +// -retract=version is a no-op if that retraction already exists. +// +// The -require, -droprequire, -exclude, -dropexclude, -replace, +// -dropreplace, -retract, and -dropretract editing flags may be repeated, +// and the changes are applied in the order given. +// +// The -go=version flag sets the expected Go language version. +// +// The -toolchain=name flag sets the Go toolchain to use. +// +// The -print flag prints the final go.mod in its text format instead of +// writing it back to go.mod. +// +// The -json flag prints the final go.mod file in JSON format instead of +// writing it back to go.mod. The JSON output corresponds to these Go types: +// +// type Module struct { +// Path string +// Version string +// } +// +// type GoMod struct { +// Module ModPath +// Go string +// Toolchain string +// Require []Require +// Exclude []Module +// Replace []Replace +// Retract []Retract +// } +// +// type ModPath struct { +// Path string +// Deprecated string +// } +// +// type Require struct { +// Path string +// Version string +// Indirect bool +// } +// +// type Replace struct { +// Old Module +// New Module +// } +// +// type Retract struct { +// Low string +// High string +// Rationale string +// } +// +// Retract entries representing a single version (not an interval) will have +// the "Low" and "High" fields set to the same value. +// +// Note that this only describes the go.mod file itself, not other modules +// referred to indirectly. For the full set of modules available to a build, +// use 'go list -m -json all'. +// +// Edit also provides the -C, -n, and -x build flags. +// +// See https://golang.org/ref/mod#go-mod-edit for more about 'go mod edit'. +// +// # Print module requirement graph +// +// Usage: +// +// go mod graph [-go=version] [-x] +// +// Graph prints the module requirement graph (with replacements applied) +// in text form. Each line in the output has two space-separated fields: a module +// and one of its requirements. Each module is identified as a string of the form +// path@version, except for the main module, which has no @version suffix. +// +// The -go flag causes graph to report the module graph as loaded by the +// given Go version, instead of the version indicated by the 'go' directive +// in the go.mod file. +// +// The -x flag causes graph to print the commands graph executes. +// +// See https://golang.org/ref/mod#go-mod-graph for more about 'go mod graph'. +// +// # Initialize new module in current directory +// +// Usage: +// +// go mod init [module-path] +// +// Init initializes and writes a new go.mod file in the current directory, in +// effect creating a new module rooted at the current directory. The go.mod file +// must not already exist. +// +// Init accepts one optional argument, the module path for the new module. If the +// module path argument is omitted, init will attempt to infer the module path +// using import comments in .go files, vendoring tool configuration files (like +// Gopkg.lock), and the current directory (if in GOPATH). +// +// See https://golang.org/ref/mod#go-mod-init for more about 'go mod init'. +// +// # Add missing and remove unused modules +// +// Usage: +// +// go mod tidy [-e] [-v] [-x] [-go=version] [-compat=version] +// +// Tidy makes sure go.mod matches the source code in the module. +// It adds any missing modules necessary to build the current module's +// packages and dependencies, and it removes unused modules that +// don't provide any relevant packages. It also adds any missing entries +// to go.sum and removes any unnecessary ones. +// +// The -v flag causes tidy to print information about removed modules +// to standard error. +// +// The -e flag causes tidy to attempt to proceed despite errors +// encountered while loading packages. +// +// The -go flag causes tidy to update the 'go' directive in the go.mod +// file to the given version, which may change which module dependencies +// are retained as explicit requirements in the go.mod file. +// (Go versions 1.17 and higher retain more requirements in order to +// support lazy module loading.) +// +// The -compat flag preserves any additional checksums needed for the +// 'go' command from the indicated major Go release to successfully load +// the module graph, and causes tidy to error out if that version of the +// 'go' command would load any imported package from a different module +// version. By default, tidy acts as if the -compat flag were set to the +// version prior to the one indicated by the 'go' directive in the go.mod +// file. +// +// The -x flag causes tidy to print the commands download executes. +// +// See https://golang.org/ref/mod#go-mod-tidy for more about 'go mod tidy'. +// +// # Make vendored copy of dependencies +// +// Usage: +// +// go mod vendor [-e] [-v] [-o outdir] +// +// Vendor resets the main module's vendor directory to include all packages +// needed to build and test all the main module's packages. +// It does not include test code for vendored packages. +// +// The -v flag causes vendor to print the names of vendored +// modules and packages to standard error. +// +// The -e flag causes vendor to attempt to proceed despite errors +// encountered while loading packages. +// +// The -o flag causes vendor to create the vendor directory at the given +// path instead of "vendor". The go command can only use a vendor directory +// named "vendor" within the module root directory, so this flag is +// primarily useful for other tools. +// +// See https://golang.org/ref/mod#go-mod-vendor for more about 'go mod vendor'. +// +// # Verify dependencies have expected content +// +// Usage: +// +// go mod verify +// +// Verify checks that the dependencies of the current module, +// which are stored in a local downloaded source cache, have not been +// modified since being downloaded. If all the modules are unmodified, +// verify prints "all modules verified." Otherwise it reports which +// modules have been changed and causes 'go mod' to exit with a +// non-zero status. +// +// See https://golang.org/ref/mod#go-mod-verify for more about 'go mod verify'. +// +// # Explain why packages or modules are needed +// +// Usage: +// +// go mod why [-m] [-vendor] packages... +// +// Why shows a shortest path in the import graph from the main module to +// each of the listed packages. If the -m flag is given, why treats the +// arguments as a list of modules and finds a path to any package in each +// of the modules. +// +// By default, why queries the graph of packages matched by "go list all", +// which includes tests for reachable packages. The -vendor flag causes why +// to exclude tests of dependencies. +// +// The output is a sequence of stanzas, one for each package or module +// name on the command line, separated by blank lines. Each stanza begins +// with a comment line "# package" or "# module" giving the target +// package or module. Subsequent lines give a path through the import +// graph, one package per line. If the package or module is not +// referenced from the main module, the stanza will display a single +// parenthesized note indicating that fact. +// +// For example: +// +// $ go mod why golang.org/x/text/language golang.org/x/text/encoding +// # golang.org/x/text/language +// rsc.io/quote +// rsc.io/sampler +// golang.org/x/text/language +// +// # golang.org/x/text/encoding +// (main module does not need package golang.org/x/text/encoding) +// $ +// +// See https://golang.org/ref/mod#go-mod-why for more about 'go mod why'. +// +// # Workspace maintenance +// +// Work provides access to operations on workspaces. +// +// Note that support for workspaces is built into many other commands, not +// just 'go work'. +// +// See 'go help modules' for information about Go's module system of which +// workspaces are a part. +// +// See https://go.dev/ref/mod#workspaces for an in-depth reference on +// workspaces. +// +// See https://go.dev/doc/tutorial/workspaces for an introductory +// tutorial on workspaces. +// +// A workspace is specified by a go.work file that specifies a set of +// module directories with the "use" directive. These modules are used as +// root modules by the go command for builds and related operations. A +// workspace that does not specify modules to be used cannot be used to do +// builds from local modules. +// +// go.work files are line-oriented. Each line holds a single directive, +// made up of a keyword followed by arguments. For example: +// +// go 1.18 +// +// use ../foo/bar +// use ./baz +// +// replace example.com/foo v1.2.3 => example.com/bar v1.4.5 +// +// The leading keyword can be factored out of adjacent lines to create a block, +// like in Go imports. +// +// use ( +// ../foo/bar +// ./baz +// ) +// +// The use directive specifies a module to be included in the workspace's +// set of main modules. The argument to the use directive is the directory +// containing the module's go.mod file. +// +// The go directive specifies the version of Go the file was written at. It +// is possible there may be future changes in the semantics of workspaces +// that could be controlled by this version, but for now the version +// specified has no effect. +// +// The replace directive has the same syntax as the replace directive in a +// go.mod file and takes precedence over replaces in go.mod files. It is +// primarily intended to override conflicting replaces in different workspace +// modules. +// +// To determine whether the go command is operating in workspace mode, use +// the "go env GOWORK" command. This will specify the workspace file being +// used. +// +// Usage: +// +// go work [arguments] +// +// The commands are: +// +// edit edit go.work from tools or scripts +// init initialize workspace file +// sync sync workspace build list to modules +// use add modules to workspace file +// vendor make vendored copy of dependencies +// +// Use "go help work " for more information about a command. +// +// # Edit go.work from tools or scripts +// +// Usage: +// +// go work edit [editing flags] [go.work] +// +// Edit provides a command-line interface for editing go.work, +// for use primarily by tools or scripts. It only reads go.work; +// it does not look up information about the modules involved. +// If no file is specified, Edit looks for a go.work file in the current +// directory and its parent directories +// +// The editing flags specify a sequence of editing operations. +// +// The -fmt flag reformats the go.work file without making other changes. +// This reformatting is also implied by any other modifications that use or +// rewrite the go.mod file. The only time this flag is needed is if no other +// flags are specified, as in 'go work edit -fmt'. +// +// The -use=path and -dropuse=path flags +// add and drop a use directive from the go.work file's set of module directories. +// +// The -replace=old[@v]=new[@v] flag adds a replacement of the given +// module path and version pair. If the @v in old@v is omitted, a +// replacement without a version on the left side is added, which applies +// to all versions of the old module path. If the @v in new@v is omitted, +// the new path should be a local module root directory, not a module +// path. Note that -replace overrides any redundant replacements for old[@v], +// so omitting @v will drop existing replacements for specific versions. +// +// The -dropreplace=old[@v] flag drops a replacement of the given +// module path and version pair. If the @v is omitted, a replacement without +// a version on the left side is dropped. +// +// The -use, -dropuse, -replace, and -dropreplace, +// editing flags may be repeated, and the changes are applied in the order given. +// +// The -go=version flag sets the expected Go language version. +// +// The -toolchain=name flag sets the Go toolchain to use. +// +// The -print flag prints the final go.work in its text format instead of +// writing it back to go.mod. +// +// The -json flag prints the final go.work file in JSON format instead of +// writing it back to go.mod. The JSON output corresponds to these Go types: +// +// type GoWork struct { +// Go string +// Toolchain string +// Use []Use +// Replace []Replace +// } +// +// type Use struct { +// DiskPath string +// ModulePath string +// } +// +// type Replace struct { +// Old Module +// New Module +// } +// +// type Module struct { +// Path string +// Version string +// } +// +// See the workspaces reference at https://go.dev/ref/mod#workspaces +// for more information. +// +// # Initialize workspace file +// +// Usage: +// +// go work init [moddirs] +// +// Init initializes and writes a new go.work file in the +// current directory, in effect creating a new workspace at the current +// directory. +// +// go work init optionally accepts paths to the workspace modules as +// arguments. If the argument is omitted, an empty workspace with no +// modules will be created. +// +// Each argument path is added to a use directive in the go.work file. The +// current go version will also be listed in the go.work file. +// +// See the workspaces reference at https://go.dev/ref/mod#workspaces +// for more information. +// +// # Sync workspace build list to modules +// +// Usage: +// +// go work sync +// +// Sync syncs the workspace's build list back to the +// workspace's modules +// +// The workspace's build list is the set of versions of all the +// (transitive) dependency modules used to do builds in the workspace. go +// work sync generates that build list using the Minimal Version Selection +// algorithm, and then syncs those versions back to each of modules +// specified in the workspace (with use directives). +// +// The syncing is done by sequentially upgrading each of the dependency +// modules specified in a workspace module to the version in the build list +// if the dependency module's version is not already the same as the build +// list's version. Note that Minimal Version Selection guarantees that the +// build list's version of each module is always the same or higher than +// that in each workspace module. +// +// See the workspaces reference at https://go.dev/ref/mod#workspaces +// for more information. +// +// # Add modules to workspace file +// +// Usage: +// +// go work use [-r] [moddirs] +// +// Use provides a command-line interface for adding +// directories, optionally recursively, to a go.work file. +// +// A use directive will be added to the go.work file for each argument +// directory listed on the command line go.work file, if it exists, +// or removed from the go.work file if it does not exist. +// Use fails if any remaining use directives refer to modules that +// do not exist. +// +// Use updates the go line in go.work to specify a version at least as +// new as all the go lines in the used modules, both preexisting ones +// and newly added ones. With no arguments, this update is the only +// thing that go work use does. +// +// The -r flag searches recursively for modules in the argument +// directories, and the use command operates as if each of the directories +// were specified as arguments: namely, use directives will be added for +// directories that exist, and removed for directories that do not exist. +// +// See the workspaces reference at https://go.dev/ref/mod#workspaces +// for more information. +// +// # Make vendored copy of dependencies +// +// Usage: +// +// go work vendor [-e] [-v] [-o outdir] +// +// Vendor resets the workspace's vendor directory to include all packages +// needed to build and test all the workspace's packages. +// It does not include test code for vendored packages. +// +// The -v flag causes vendor to print the names of vendored +// modules and packages to standard error. +// +// The -e flag causes vendor to attempt to proceed despite errors +// encountered while loading packages. +// +// The -o flag causes vendor to create the vendor directory at the given +// path instead of "vendor". The go command can only use a vendor directory +// named "vendor" within the module root directory, so this flag is +// primarily useful for other tools. +// +// # Compile and run Go program +// +// Usage: +// +// go run [build flags] [-exec xprog] package [arguments...] +// +// Run compiles and runs the named main Go package. +// Typically the package is specified as a list of .go source files from a single +// directory, but it may also be an import path, file system path, or pattern +// matching a single known package, as in 'go run .' or 'go run my/cmd'. +// +// If the package argument has a version suffix (like @latest or @v1.0.0), +// "go run" builds the program in module-aware mode, ignoring the go.mod file in +// the current directory or any parent directory, if there is one. This is useful +// for running programs without affecting the dependencies of the main module. +// +// If the package argument doesn't have a version suffix, "go run" may run in +// module-aware mode or GOPATH mode, depending on the GO111MODULE environment +// variable and the presence of a go.mod file. See 'go help modules' for details. +// If module-aware mode is enabled, "go run" runs in the context of the main +// module. +// +// By default, 'go run' runs the compiled binary directly: 'a.out arguments...'. +// If the -exec flag is given, 'go run' invokes the binary using xprog: +// +// 'xprog a.out arguments...'. +// +// If the -exec flag is not given, GOOS or GOARCH is different from the system +// default, and a program named go_$GOOS_$GOARCH_exec can be found +// on the current search path, 'go run' invokes the binary using that program, +// for example 'go_js_wasm_exec a.out arguments...'. This allows execution of +// cross-compiled programs when a simulator or other execution method is +// available. +// +// By default, 'go run' compiles the binary without generating the information +// used by debuggers, to reduce build time. To include debugger information in +// the binary, use 'go build'. +// +// The exit status of Run is not the exit status of the compiled binary. +// +// For more about build flags, see 'go help build'. +// For more about specifying packages, see 'go help packages'. +// +// See also: go build. +// +// # Test packages +// +// Usage: +// +// go test [build/test flags] [packages] [build/test flags & test binary flags] +// +// 'Go test' automates testing the packages named by the import paths. +// It prints a summary of the test results in the format: +// +// ok archive/tar 0.011s +// FAIL archive/zip 0.022s +// ok compress/gzip 0.033s +// ... +// +// followed by detailed output for each failed package. +// +// 'Go test' recompiles each package along with any files with names matching +// the file pattern "*_test.go". +// These additional files can contain test functions, benchmark functions, fuzz +// tests and example functions. See 'go help testfunc' for more. +// Each listed package causes the execution of a separate test binary. +// Files whose names begin with "_" (including "_test.go") or "." are ignored. +// +// Test files that declare a package with the suffix "_test" will be compiled as a +// separate package, and then linked and run with the main test binary. +// +// The go tool will ignore a directory named "testdata", making it available +// to hold ancillary data needed by the tests. +// +// As part of building a test binary, go test runs go vet on the package +// and its test source files to identify significant problems. If go vet +// finds any problems, go test reports those and does not run the test +// binary. Only a high-confidence subset of the default go vet checks are +// used. That subset is: atomic, bool, buildtags, directive, errorsas, +// ifaceassert, nilfunc, printf, and stringintconv. You can see +// the documentation for these and other vet tests via "go doc cmd/vet". +// To disable the running of go vet, use the -vet=off flag. To run all +// checks, use the -vet=all flag. +// +// All test output and summary lines are printed to the go command's +// standard output, even if the test printed them to its own standard +// error. (The go command's standard error is reserved for printing +// errors building the tests.) +// +// The go command places $GOROOT/bin at the beginning of $PATH +// in the test's environment, so that tests that execute +// 'go' commands use the same 'go' as the parent 'go test' command. +// +// Go test runs in two different modes: +// +// The first, called local directory mode, occurs when go test is +// invoked with no package arguments (for example, 'go test' or 'go +// test -v'). In this mode, go test compiles the package sources and +// tests found in the current directory and then runs the resulting +// test binary. In this mode, caching (discussed below) is disabled. +// After the package test finishes, go test prints a summary line +// showing the test status ('ok' or 'FAIL'), package name, and elapsed +// time. +// +// The second, called package list mode, occurs when go test is invoked +// with explicit package arguments (for example 'go test math', 'go +// test ./...', and even 'go test .'). In this mode, go test compiles +// and tests each of the packages listed on the command line. If a +// package test passes, go test prints only the final 'ok' summary +// line. If a package test fails, go test prints the full test output. +// If invoked with the -bench or -v flag, go test prints the full +// output even for passing package tests, in order to display the +// requested benchmark results or verbose logging. After the package +// tests for all of the listed packages finish, and their output is +// printed, go test prints a final 'FAIL' status if any package test +// has failed. +// +// In package list mode only, go test caches successful package test +// results to avoid unnecessary repeated running of tests. When the +// result of a test can be recovered from the cache, go test will +// redisplay the previous output instead of running the test binary +// again. When this happens, go test prints '(cached)' in place of the +// elapsed time in the summary line. +// +// The rule for a match in the cache is that the run involves the same +// test binary and the flags on the command line come entirely from a +// restricted set of 'cacheable' test flags, defined as -benchtime, -cpu, +// -list, -parallel, -run, -short, -timeout, -failfast, and -v. +// If a run of go test has any test or non-test flags outside this set, +// the result is not cached. To disable test caching, use any test flag +// or argument other than the cacheable flags. The idiomatic way to disable +// test caching explicitly is to use -count=1. Tests that open files within +// the package's source root (usually $GOPATH) or that consult environment +// variables only match future runs in which the files and environment +// variables are unchanged. A cached test result is treated as executing +// in no time at all, so a successful package test result will be cached and +// reused regardless of -timeout setting. +// +// In addition to the build flags, the flags handled by 'go test' itself are: +// +// -args +// Pass the remainder of the command line (everything after -args) +// to the test binary, uninterpreted and unchanged. +// Because this flag consumes the remainder of the command line, +// the package list (if present) must appear before this flag. +// +// -c +// Compile the test binary to pkg.test in the current directory but do not run it +// (where pkg is the last element of the package's import path). +// The file name or target directory can be changed with the -o flag. +// +// -exec xprog +// Run the test binary using xprog. The behavior is the same as +// in 'go run'. See 'go help run' for details. +// +// -json +// Convert test output to JSON suitable for automated processing. +// See 'go doc test2json' for the encoding details. +// +// -o file +// Compile the test binary to the named file. +// The test still runs (unless -c or -i is specified). +// If file ends in a slash or names an existing directory, +// the test is written to pkg.test in that directory. +// +// The test binary also accepts flags that control execution of the test; these +// flags are also accessible by 'go test'. See 'go help testflag' for details. +// +// For more about build flags, see 'go help build'. +// For more about specifying packages, see 'go help packages'. +// +// See also: go build, go vet. +// +// # Run specified go tool +// +// Usage: +// +// go tool [-n] command [args...] +// +// Tool runs the go tool command identified by the arguments. +// With no arguments it prints the list of known tools. +// +// The -n flag causes tool to print the command that would be +// executed but not execute it. +// +// For more about each tool command, see 'go doc cmd/'. +// +// # Print Go version +// +// Usage: +// +// go version [-m] [-v] [file ...] +// +// Version prints the build information for Go binary files. +// +// Go version reports the Go version used to build each of the named files. +// +// If no files are named on the command line, go version prints its own +// version information. +// +// If a directory is named, go version walks that directory, recursively, +// looking for recognized Go binaries and reporting their versions. +// By default, go version does not report unrecognized files found +// during a directory scan. The -v flag causes it to report unrecognized files. +// +// The -m flag causes go version to print each file's embedded +// module version information, when available. In the output, the module +// information consists of multiple lines following the version line, each +// indented by a leading tab character. +// +// See also: go doc runtime/debug.BuildInfo. +// +// # Report likely mistakes in packages +// +// Usage: +// +// go vet [build flags] [-vettool prog] [vet flags] [packages] +// +// Vet runs the Go vet command on the packages named by the import paths. +// +// For more about vet and its flags, see 'go doc cmd/vet'. +// For more about specifying packages, see 'go help packages'. +// For a list of checkers and their flags, see 'go tool vet help'. +// For details of a specific checker such as 'printf', see 'go tool vet help printf'. +// +// The -vettool=prog flag selects a different analysis tool with alternative +// or additional checks. +// For example, the 'shadow' analyzer can be built and run using these commands: +// +// go install golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow@latest +// go vet -vettool=$(which shadow) +// +// The build flags supported by go vet are those that control package resolution +// and execution, such as -C, -n, -x, -v, -tags, and -toolexec. +// For more about these flags, see 'go help build'. +// +// See also: go fmt, go fix. +// +// # Build constraints +// +// A build constraint, also known as a build tag, is a condition under which a +// file should be included in the package. Build constraints are given by a +// line comment that begins +// +// //go:build +// +// Constraints may appear in any kind of source file (not just Go), but +// they must appear near the top of the file, preceded +// only by blank lines and other comments. These rules mean that in Go +// files a build constraint must appear before the package clause. +// +// To distinguish build constraints from package documentation, +// a build constraint should be followed by a blank line. +// +// A build constraint comment is evaluated as an expression containing +// build tags combined by ||, &&, and ! operators and parentheses. +// Operators have the same meaning as in Go. +// +// For example, the following build constraint constrains a file to +// build when the "linux" and "386" constraints are satisfied, or when +// "darwin" is satisfied and "cgo" is not: +// +// //go:build (linux && 386) || (darwin && !cgo) +// +// It is an error for a file to have more than one //go:build line. +// +// During a particular build, the following build tags are satisfied: +// +// - the target operating system, as spelled by runtime.GOOS, set with the +// GOOS environment variable. +// - the target architecture, as spelled by runtime.GOARCH, set with the +// GOARCH environment variable. +// - any architecture features, in the form GOARCH.feature +// (for example, "amd64.v2"), as detailed below. +// - "unix", if GOOS is a Unix or Unix-like system. +// - the compiler being used, either "gc" or "gccgo" +// - "cgo", if the cgo command is supported (see CGO_ENABLED in +// 'go help environment'). +// - a term for each Go major release, through the current version: +// "go1.1" from Go version 1.1 onward, "go1.12" from Go 1.12, and so on. +// - any additional tags given by the -tags flag (see 'go help build'). +// +// There are no separate build tags for beta or minor releases. +// +// If a file's name, after stripping the extension and a possible _test suffix, +// matches any of the following patterns: +// +// *_GOOS +// *_GOARCH +// *_GOOS_GOARCH +// +// (example: source_windows_amd64.go) where GOOS and GOARCH represent +// any known operating system and architecture values respectively, then +// the file is considered to have an implicit build constraint requiring +// those terms (in addition to any explicit constraints in the file). +// +// Using GOOS=android matches build tags and files as for GOOS=linux +// in addition to android tags and files. +// +// Using GOOS=illumos matches build tags and files as for GOOS=solaris +// in addition to illumos tags and files. +// +// Using GOOS=ios matches build tags and files as for GOOS=darwin +// in addition to ios tags and files. +// +// The defined architecture feature build tags are: +// +// - For GOARCH=386, GO386=387 and GO386=sse2 +// set the 386.387 and 386.sse2 build tags, respectively. +// - For GOARCH=amd64, GOAMD64=v1, v2, and v3 +// correspond to the amd64.v1, amd64.v2, and amd64.v3 feature build tags. +// - For GOARCH=arm, GOARM=5, 6, and 7 +// correspond to the arm.5, arm.6, and arm.7 feature build tags. +// - For GOARCH=mips or mipsle, +// GOMIPS=hardfloat and softfloat +// correspond to the mips.hardfloat and mips.softfloat +// (or mipsle.hardfloat and mipsle.softfloat) feature build tags. +// - For GOARCH=mips64 or mips64le, +// GOMIPS64=hardfloat and softfloat +// correspond to the mips64.hardfloat and mips64.softfloat +// (or mips64le.hardfloat and mips64le.softfloat) feature build tags. +// - For GOARCH=ppc64 or ppc64le, +// GOPPC64=power8, power9, and power10 correspond to the +// ppc64.power8, ppc64.power9, and ppc64.power10 +// (or ppc64le.power8, ppc64le.power9, and ppc64le.power10) +// feature build tags. +// - For GOARCH=wasm, GOWASM=satconv and signext +// correspond to the wasm.satconv and wasm.signext feature build tags. +// +// For GOARCH=amd64, arm, ppc64, and ppc64le, a particular feature level +// sets the feature build tags for all previous levels as well. +// For example, GOAMD64=v2 sets the amd64.v1 and amd64.v2 feature flags. +// This ensures that code making use of v2 features continues to compile +// when, say, GOAMD64=v4 is introduced. +// Code handling the absence of a particular feature level +// should use a negation: +// +// //go:build !amd64.v2 +// +// To keep a file from being considered for any build: +// +// //go:build ignore +// +// (Any other unsatisfied word will work as well, but "ignore" is conventional.) +// +// To build a file only when using cgo, and only on Linux and OS X: +// +// //go:build cgo && (linux || darwin) +// +// Such a file is usually paired with another file implementing the +// default functionality for other systems, which in this case would +// carry the constraint: +// +// //go:build !(cgo && (linux || darwin)) +// +// Naming a file dns_windows.go will cause it to be included only when +// building the package for Windows; similarly, math_386.s will be included +// only when building the package for 32-bit x86. +// +// Go versions 1.16 and earlier used a different syntax for build constraints, +// with a "// +build" prefix. The gofmt command will add an equivalent //go:build +// constraint when encountering the older syntax. +// +// # Build modes +// +// The 'go build' and 'go install' commands take a -buildmode argument which +// indicates which kind of object file is to be built. Currently supported values +// are: +// +// -buildmode=archive +// Build the listed non-main packages into .a files. Packages named +// main are ignored. +// +// -buildmode=c-archive +// Build the listed main package, plus all packages it imports, +// into a C archive file. The only callable symbols will be those +// functions exported using a cgo //export comment. Requires +// exactly one main package to be listed. +// +// -buildmode=c-shared +// Build the listed main package, plus all packages it imports, +// into a C shared library. The only callable symbols will +// be those functions exported using a cgo //export comment. +// Requires exactly one main package to be listed. +// +// -buildmode=default +// Listed main packages are built into executables and listed +// non-main packages are built into .a files (the default +// behavior). +// +// -buildmode=shared +// Combine all the listed non-main packages into a single shared +// library that will be used when building with the -linkshared +// option. Packages named main are ignored. +// +// -buildmode=exe +// Build the listed main packages and everything they import into +// executables. Packages not named main are ignored. +// +// -buildmode=pie +// Build the listed main packages and everything they import into +// position independent executables (PIE). Packages not named +// main are ignored. +// +// -buildmode=plugin +// Build the listed main packages, plus all packages that they +// import, into a Go plugin. Packages not named main are ignored. +// +// On AIX, when linking a C program that uses a Go archive built with +// -buildmode=c-archive, you must pass -Wl,-bnoobjreorder to the C compiler. +// +// # Calling between Go and C +// +// There are two different ways to call between Go and C/C++ code. +// +// The first is the cgo tool, which is part of the Go distribution. For +// information on how to use it see the cgo documentation (go doc cmd/cgo). +// +// The second is the SWIG program, which is a general tool for +// interfacing between languages. For information on SWIG see +// http://swig.org/. When running go build, any file with a .swig +// extension will be passed to SWIG. Any file with a .swigcxx extension +// will be passed to SWIG with the -c++ option. +// +// When either cgo or SWIG is used, go build will pass any .c, .m, .s, .S +// or .sx files to the C compiler, and any .cc, .cpp, .cxx files to the C++ +// compiler. The CC or CXX environment variables may be set to determine +// the C or C++ compiler, respectively, to use. +// +// # Build and test caching +// +// The go command caches build outputs for reuse in future builds. +// The default location for cache data is a subdirectory named go-build +// in the standard user cache directory for the current operating system. +// Setting the GOCACHE environment variable overrides this default, +// and running 'go env GOCACHE' prints the current cache directory. +// +// The go command periodically deletes cached data that has not been +// used recently. Running 'go clean -cache' deletes all cached data. +// +// The build cache correctly accounts for changes to Go source files, +// compilers, compiler options, and so on: cleaning the cache explicitly +// should not be necessary in typical use. However, the build cache +// does not detect changes to C libraries imported with cgo. +// If you have made changes to the C libraries on your system, you +// will need to clean the cache explicitly or else use the -a build flag +// (see 'go help build') to force rebuilding of packages that +// depend on the updated C libraries. +// +// The go command also caches successful package test results. +// See 'go help test' for details. Running 'go clean -testcache' removes +// all cached test results (but not cached build results). +// +// The go command also caches values used in fuzzing with 'go test -fuzz', +// specifically, values that expanded code coverage when passed to a +// fuzz function. These values are not used for regular building and +// testing, but they're stored in a subdirectory of the build cache. +// Running 'go clean -fuzzcache' removes all cached fuzzing values. +// This may make fuzzing less effective, temporarily. +// +// The GODEBUG environment variable can enable printing of debugging +// information about the state of the cache: +// +// GODEBUG=gocacheverify=1 causes the go command to bypass the +// use of any cache entries and instead rebuild everything and check +// that the results match existing cache entries. +// +// GODEBUG=gocachehash=1 causes the go command to print the inputs +// for all of the content hashes it uses to construct cache lookup keys. +// The output is voluminous but can be useful for debugging the cache. +// +// GODEBUG=gocachetest=1 causes the go command to print details of its +// decisions about whether to reuse a cached test result. +// +// # Environment variables +// +// The go command and the tools it invokes consult environment variables +// for configuration. If an environment variable is unset or empty, the go +// command uses a sensible default setting. To see the effective setting of +// the variable , run 'go env '. To change the default setting, +// run 'go env -w ='. Defaults changed using 'go env -w' +// are recorded in a Go environment configuration file stored in the +// per-user configuration directory, as reported by os.UserConfigDir. +// The location of the configuration file can be changed by setting +// the environment variable GOENV, and 'go env GOENV' prints the +// effective location, but 'go env -w' cannot change the default location. +// See 'go help env' for details. +// +// General-purpose environment variables: +// +// GO111MODULE +// Controls whether the go command runs in module-aware mode or GOPATH mode. +// May be "off", "on", or "auto". +// See https://golang.org/ref/mod#mod-commands. +// GCCGO +// The gccgo command to run for 'go build -compiler=gccgo'. +// GOARCH +// The architecture, or processor, for which to compile code. +// Examples are amd64, 386, arm, ppc64. +// GOBIN +// The directory where 'go install' will install a command. +// GOCACHE +// The directory where the go command will store cached +// information for reuse in future builds. +// GOMODCACHE +// The directory where the go command will store downloaded modules. +// GODEBUG +// Enable various debugging facilities. See https://go.dev/doc/godebug +// for details. +// GOENV +// The location of the Go environment configuration file. +// Cannot be set using 'go env -w'. +// Setting GOENV=off in the environment disables the use of the +// default configuration file. +// GOFLAGS +// A space-separated list of -flag=value settings to apply +// to go commands by default, when the given flag is known by +// the current command. Each entry must be a standalone flag. +// Because the entries are space-separated, flag values must +// not contain spaces. Flags listed on the command line +// are applied after this list and therefore override it. +// GOINSECURE +// Comma-separated list of glob patterns (in the syntax of Go's path.Match) +// of module path prefixes that should always be fetched in an insecure +// manner. Only applies to dependencies that are being fetched directly. +// GOINSECURE does not disable checksum database validation. GOPRIVATE or +// GONOSUMDB may be used to achieve that. +// GOOS +// The operating system for which to compile code. +// Examples are linux, darwin, windows, netbsd. +// GOPATH +// Controls where various files are stored. See: 'go help gopath'. +// GOPROXY +// URL of Go module proxy. See https://golang.org/ref/mod#environment-variables +// and https://golang.org/ref/mod#module-proxy for details. +// GOPRIVATE, GONOPROXY, GONOSUMDB +// Comma-separated list of glob patterns (in the syntax of Go's path.Match) +// of module path prefixes that should always be fetched directly +// or that should not be compared against the checksum database. +// See https://golang.org/ref/mod#private-modules. +// GOROOT +// The root of the go tree. +// GOSUMDB +// The name of checksum database to use and optionally its public key and +// URL. See https://golang.org/ref/mod#authenticating. +// GOTOOLCHAIN +// Controls which Go toolchain is used. See https://go.dev/doc/toolchain. +// GOTMPDIR +// The directory where the go command will write +// temporary source files, packages, and binaries. +// GOVCS +// Lists version control commands that may be used with matching servers. +// See 'go help vcs'. +// GOWORK +// In module aware mode, use the given go.work file as a workspace file. +// By default or when GOWORK is "auto", the go command searches for a +// file named go.work in the current directory and then containing directories +// until one is found. If a valid go.work file is found, the modules +// specified will collectively be used as the main modules. If GOWORK +// is "off", or a go.work file is not found in "auto" mode, workspace +// mode is disabled. +// +// Environment variables for use with cgo: +// +// AR +// The command to use to manipulate library archives when +// building with the gccgo compiler. +// The default is 'ar'. +// CC +// The command to use to compile C code. +// CGO_ENABLED +// Whether the cgo command is supported. Either 0 or 1. +// CGO_CFLAGS +// Flags that cgo will pass to the compiler when compiling +// C code. +// CGO_CFLAGS_ALLOW +// A regular expression specifying additional flags to allow +// to appear in #cgo CFLAGS source code directives. +// Does not apply to the CGO_CFLAGS environment variable. +// CGO_CFLAGS_DISALLOW +// A regular expression specifying flags that must be disallowed +// from appearing in #cgo CFLAGS source code directives. +// Does not apply to the CGO_CFLAGS environment variable. +// CGO_CPPFLAGS, CGO_CPPFLAGS_ALLOW, CGO_CPPFLAGS_DISALLOW +// Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW, +// but for the C preprocessor. +// CGO_CXXFLAGS, CGO_CXXFLAGS_ALLOW, CGO_CXXFLAGS_DISALLOW +// Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW, +// but for the C++ compiler. +// CGO_FFLAGS, CGO_FFLAGS_ALLOW, CGO_FFLAGS_DISALLOW +// Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW, +// but for the Fortran compiler. +// CGO_LDFLAGS, CGO_LDFLAGS_ALLOW, CGO_LDFLAGS_DISALLOW +// Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW, +// but for the linker. +// CXX +// The command to use to compile C++ code. +// FC +// The command to use to compile Fortran code. +// PKG_CONFIG +// Path to pkg-config tool. +// +// Architecture-specific environment variables: +// +// GOARM +// For GOARCH=arm, the ARM architecture for which to compile. +// Valid values are 5, 6, 7. +// The value can be followed by an option specifying how to implement floating point instructions. +// Valid options are ,softfloat (default for 5) and ,hardfloat (default for 6 and 7). +// GO386 +// For GOARCH=386, how to implement floating point instructions. +// Valid values are sse2 (default), softfloat. +// GOAMD64 +// For GOARCH=amd64, the microarchitecture level for which to compile. +// Valid values are v1 (default), v2, v3, v4. +// See https://golang.org/wiki/MinimumRequirements#amd64 +// GOMIPS +// For GOARCH=mips{,le}, whether to use floating point instructions. +// Valid values are hardfloat (default), softfloat. +// GOMIPS64 +// For GOARCH=mips64{,le}, whether to use floating point instructions. +// Valid values are hardfloat (default), softfloat. +// GOPPC64 +// For GOARCH=ppc64{,le}, the target ISA (Instruction Set Architecture). +// Valid values are power8 (default), power9, power10. +// GOWASM +// For GOARCH=wasm, comma-separated list of experimental WebAssembly features to use. +// Valid values are satconv, signext. +// +// Environment variables for use with code coverage: +// +// GOCOVERDIR +// Directory into which to write code coverage data files +// generated by running a "go build -cover" binary. +// Requires that GOEXPERIMENT=coverageredesign is enabled. +// +// Special-purpose environment variables: +// +// GCCGOTOOLDIR +// If set, where to find gccgo tools, such as cgo. +// The default is based on how gccgo was configured. +// GOEXPERIMENT +// Comma-separated list of toolchain experiments to enable or disable. +// The list of available experiments may change arbitrarily over time. +// See src/internal/goexperiment/flags.go for currently valid values. +// Warning: This variable is provided for the development and testing +// of the Go toolchain itself. Use beyond that purpose is unsupported. +// GOROOT_FINAL +// The root of the installed Go tree, when it is +// installed in a location other than where it is built. +// File names in stack traces are rewritten from GOROOT to +// GOROOT_FINAL. +// GO_EXTLINK_ENABLED +// Whether the linker should use external linking mode +// when using -linkmode=auto with code that uses cgo. +// Set to 0 to disable external linking mode, 1 to enable it. +// GIT_ALLOW_PROTOCOL +// Defined by Git. A colon-separated list of schemes that are allowed +// to be used with git fetch/clone. If set, any scheme not explicitly +// mentioned will be considered insecure by 'go get'. +// Because the variable is defined by Git, the default value cannot +// be set using 'go env -w'. +// +// Additional information available from 'go env' but not read from the environment: +// +// GOEXE +// The executable file name suffix (".exe" on Windows, "" on other systems). +// GOGCCFLAGS +// A space-separated list of arguments supplied to the CC command. +// GOHOSTARCH +// The architecture (GOARCH) of the Go toolchain binaries. +// GOHOSTOS +// The operating system (GOOS) of the Go toolchain binaries. +// GOMOD +// The absolute path to the go.mod of the main module. +// If module-aware mode is enabled, but there is no go.mod, GOMOD will be +// os.DevNull ("/dev/null" on Unix-like systems, "NUL" on Windows). +// If module-aware mode is disabled, GOMOD will be the empty string. +// GOTOOLDIR +// The directory where the go tools (compile, cover, doc, etc...) are installed. +// GOVERSION +// The version of the installed Go tree, as reported by runtime.Version. +// +// # File types +// +// The go command examines the contents of a restricted set of files +// in each directory. It identifies which files to examine based on +// the extension of the file name. These extensions are: +// +// .go +// Go source files. +// .c, .h +// C source files. +// If the package uses cgo or SWIG, these will be compiled with the +// OS-native compiler (typically gcc); otherwise they will +// trigger an error. +// .cc, .cpp, .cxx, .hh, .hpp, .hxx +// C++ source files. Only useful with cgo or SWIG, and always +// compiled with the OS-native compiler. +// .m +// Objective-C source files. Only useful with cgo, and always +// compiled with the OS-native compiler. +// .s, .S, .sx +// Assembler source files. +// If the package uses cgo or SWIG, these will be assembled with the +// OS-native assembler (typically gcc (sic)); otherwise they +// will be assembled with the Go assembler. +// .swig, .swigcxx +// SWIG definition files. +// .syso +// System object files. +// +// Files of each of these types except .syso may contain build +// constraints, but the go command stops scanning for build constraints +// at the first item in the file that is not a blank line or //-style +// line comment. See the go/build package documentation for +// more details. +// +// # The go.mod file +// +// A module version is defined by a tree of source files, with a go.mod +// file in its root. When the go command is run, it looks in the current +// directory and then successive parent directories to find the go.mod +// marking the root of the main (current) module. +// +// The go.mod file format is described in detail at +// https://golang.org/ref/mod#go-mod-file. +// +// To create a new go.mod file, use 'go mod init'. For details see +// 'go help mod init' or https://golang.org/ref/mod#go-mod-init. +// +// To add missing module requirements or remove unneeded requirements, +// use 'go mod tidy'. For details, see 'go help mod tidy' or +// https://golang.org/ref/mod#go-mod-tidy. +// +// To add, upgrade, downgrade, or remove a specific module requirement, use +// 'go get'. For details, see 'go help module-get' or +// https://golang.org/ref/mod#go-get. +// +// To make other changes or to parse go.mod as JSON for use by other tools, +// use 'go mod edit'. See 'go help mod edit' or +// https://golang.org/ref/mod#go-mod-edit. +// +// # GOPATH environment variable +// +// The Go path is used to resolve import statements. +// It is implemented by and documented in the go/build package. +// +// The GOPATH environment variable lists places to look for Go code. +// On Unix, the value is a colon-separated string. +// On Windows, the value is a semicolon-separated string. +// On Plan 9, the value is a list. +// +// If the environment variable is unset, GOPATH defaults +// to a subdirectory named "go" in the user's home directory +// ($HOME/go on Unix, %USERPROFILE%\go on Windows), +// unless that directory holds a Go distribution. +// Run "go env GOPATH" to see the current GOPATH. +// +// See https://golang.org/wiki/SettingGOPATH to set a custom GOPATH. +// +// Each directory listed in GOPATH must have a prescribed structure: +// +// The src directory holds source code. The path below src +// determines the import path or executable name. +// +// The pkg directory holds installed package objects. +// As in the Go tree, each target operating system and +// architecture pair has its own subdirectory of pkg +// (pkg/GOOS_GOARCH). +// +// If DIR is a directory listed in the GOPATH, a package with +// source in DIR/src/foo/bar can be imported as "foo/bar" and +// has its compiled form installed to "DIR/pkg/GOOS_GOARCH/foo/bar.a". +// +// The bin directory holds compiled commands. +// Each command is named for its source directory, but only +// the final element, not the entire path. That is, the +// command with source in DIR/src/foo/quux is installed into +// DIR/bin/quux, not DIR/bin/foo/quux. The "foo/" prefix is stripped +// so that you can add DIR/bin to your PATH to get at the +// installed commands. If the GOBIN environment variable is +// set, commands are installed to the directory it names instead +// of DIR/bin. GOBIN must be an absolute path. +// +// Here's an example directory layout: +// +// GOPATH=/home/user/go +// +// /home/user/go/ +// src/ +// foo/ +// bar/ (go code in package bar) +// x.go +// quux/ (go code in package main) +// y.go +// bin/ +// quux (installed command) +// pkg/ +// linux_amd64/ +// foo/ +// bar.a (installed package object) +// +// Go searches each directory listed in GOPATH to find source code, +// but new packages are always downloaded into the first directory +// in the list. +// +// See https://golang.org/doc/code.html for an example. +// +// # GOPATH and Modules +// +// When using modules, GOPATH is no longer used for resolving imports. +// However, it is still used to store downloaded source code (in GOPATH/pkg/mod) +// and compiled commands (in GOPATH/bin). +// +// # Internal Directories +// +// Code in or below a directory named "internal" is importable only +// by code in the directory tree rooted at the parent of "internal". +// Here's an extended version of the directory layout above: +// +// /home/user/go/ +// src/ +// crash/ +// bang/ (go code in package bang) +// b.go +// foo/ (go code in package foo) +// f.go +// bar/ (go code in package bar) +// x.go +// internal/ +// baz/ (go code in package baz) +// z.go +// quux/ (go code in package main) +// y.go +// +// The code in z.go is imported as "foo/internal/baz", but that +// import statement can only appear in source files in the subtree +// rooted at foo. The source files foo/f.go, foo/bar/x.go, and +// foo/quux/y.go can all import "foo/internal/baz", but the source file +// crash/bang/b.go cannot. +// +// See https://golang.org/s/go14internal for details. +// +// # Vendor Directories +// +// Go 1.6 includes support for using local copies of external dependencies +// to satisfy imports of those dependencies, often referred to as vendoring. +// +// Code below a directory named "vendor" is importable only +// by code in the directory tree rooted at the parent of "vendor", +// and only using an import path that omits the prefix up to and +// including the vendor element. +// +// Here's the example from the previous section, +// but with the "internal" directory renamed to "vendor" +// and a new foo/vendor/crash/bang directory added: +// +// /home/user/go/ +// src/ +// crash/ +// bang/ (go code in package bang) +// b.go +// foo/ (go code in package foo) +// f.go +// bar/ (go code in package bar) +// x.go +// vendor/ +// crash/ +// bang/ (go code in package bang) +// b.go +// baz/ (go code in package baz) +// z.go +// quux/ (go code in package main) +// y.go +// +// The same visibility rules apply as for internal, but the code +// in z.go is imported as "baz", not as "foo/vendor/baz". +// +// Code in vendor directories deeper in the source tree shadows +// code in higher directories. Within the subtree rooted at foo, an import +// of "crash/bang" resolves to "foo/vendor/crash/bang", not the +// top-level "crash/bang". +// +// Code in vendor directories is not subject to import path +// checking (see 'go help importpath'). +// +// When 'go get' checks out or updates a git repository, it now also +// updates submodules. +// +// Vendor directories do not affect the placement of new repositories +// being checked out for the first time by 'go get': those are always +// placed in the main GOPATH, never in a vendor subtree. +// +// See https://golang.org/s/go15vendor for details. +// +// # Module proxy protocol +// +// A Go module proxy is any web server that can respond to GET requests for +// URLs of a specified form. The requests have no query parameters, so even +// a site serving from a fixed file system (including a file:/// URL) +// can be a module proxy. +// +// For details on the GOPROXY protocol, see +// https://golang.org/ref/mod#goproxy-protocol. +// +// # Import path syntax +// +// An import path (see 'go help packages') denotes a package stored in the local +// file system. In general, an import path denotes either a standard package (such +// as "unicode/utf8") or a package found in one of the work spaces (For more +// details see: 'go help gopath'). +// +// # Relative import paths +// +// An import path beginning with ./ or ../ is called a relative path. +// The toolchain supports relative import paths as a shortcut in two ways. +// +// First, a relative path can be used as a shorthand on the command line. +// If you are working in the directory containing the code imported as +// "unicode" and want to run the tests for "unicode/utf8", you can type +// "go test ./utf8" instead of needing to specify the full path. +// Similarly, in the reverse situation, "go test .." will test "unicode" from +// the "unicode/utf8" directory. Relative patterns are also allowed, like +// "go test ./..." to test all subdirectories. See 'go help packages' for details +// on the pattern syntax. +// +// Second, if you are compiling a Go program not in a work space, +// you can use a relative path in an import statement in that program +// to refer to nearby code also not in a work space. +// This makes it easy to experiment with small multipackage programs +// outside of the usual work spaces, but such programs cannot be +// installed with "go install" (there is no work space in which to install them), +// so they are rebuilt from scratch each time they are built. +// To avoid ambiguity, Go programs cannot use relative import paths +// within a work space. +// +// # Remote import paths +// +// Certain import paths also +// describe how to obtain the source code for the package using +// a revision control system. +// +// A few common code hosting sites have special syntax: +// +// Bitbucket (Git, Mercurial) +// +// import "bitbucket.org/user/project" +// import "bitbucket.org/user/project/sub/directory" +// +// GitHub (Git) +// +// import "github.com/user/project" +// import "github.com/user/project/sub/directory" +// +// Launchpad (Bazaar) +// +// import "launchpad.net/project" +// import "launchpad.net/project/series" +// import "launchpad.net/project/series/sub/directory" +// +// import "launchpad.net/~user/project/branch" +// import "launchpad.net/~user/project/branch/sub/directory" +// +// IBM DevOps Services (Git) +// +// import "hub.jazz.net/git/user/project" +// import "hub.jazz.net/git/user/project/sub/directory" +// +// For code hosted on other servers, import paths may either be qualified +// with the version control type, or the go tool can dynamically fetch +// the import path over https/http and discover where the code resides +// from a tag in the HTML. +// +// To declare the code location, an import path of the form +// +// repository.vcs/path +// +// specifies the given repository, with or without the .vcs suffix, +// using the named version control system, and then the path inside +// that repository. The supported version control systems are: +// +// Bazaar .bzr +// Fossil .fossil +// Git .git +// Mercurial .hg +// Subversion .svn +// +// For example, +// +// import "example.org/user/foo.hg" +// +// denotes the root directory of the Mercurial repository at +// example.org/user/foo or foo.hg, and +// +// import "example.org/repo.git/foo/bar" +// +// denotes the foo/bar directory of the Git repository at +// example.org/repo or repo.git. +// +// When a version control system supports multiple protocols, +// each is tried in turn when downloading. For example, a Git +// download tries https://, then git+ssh://. +// +// By default, downloads are restricted to known secure protocols +// (e.g. https, ssh). To override this setting for Git downloads, the +// GIT_ALLOW_PROTOCOL environment variable can be set (For more details see: +// 'go help environment'). +// +// If the import path is not a known code hosting site and also lacks a +// version control qualifier, the go tool attempts to fetch the import +// over https/http and looks for a tag in the document's HTML +// . +// +// The meta tag has the form: +// +// +// +// The import-prefix is the import path corresponding to the repository +// root. It must be a prefix or an exact match of the package being +// fetched with "go get". If it's not an exact match, another http +// request is made at the prefix to verify the tags match. +// +// The meta tag should appear as early in the file as possible. +// In particular, it should appear before any raw JavaScript or CSS, +// to avoid confusing the go command's restricted parser. +// +// The vcs is one of "bzr", "fossil", "git", "hg", "svn". +// +// The repo-root is the root of the version control system +// containing a scheme and not containing a .vcs qualifier. +// +// For example, +// +// import "example.org/pkg/foo" +// +// will result in the following requests: +// +// https://example.org/pkg/foo?go-get=1 (preferred) +// http://example.org/pkg/foo?go-get=1 (fallback, only with use of correctly set GOINSECURE) +// +// If that page contains the meta tag +// +// +// +// the go tool will verify that https://example.org/?go-get=1 contains the +// same meta tag and then git clone https://code.org/r/p/exproj into +// GOPATH/src/example.org. +// +// When using GOPATH, downloaded packages are written to the first directory +// listed in the GOPATH environment variable. +// (See 'go help gopath-get' and 'go help gopath'.) +// +// When using modules, downloaded packages are stored in the module cache. +// See https://golang.org/ref/mod#module-cache. +// +// When using modules, an additional variant of the go-import meta tag is +// recognized and is preferred over those listing version control systems. +// That variant uses "mod" as the vcs in the content value, as in: +// +// +// +// This tag means to fetch modules with paths beginning with example.org +// from the module proxy available at the URL https://code.org/moduleproxy. +// See https://golang.org/ref/mod#goproxy-protocol for details about the +// proxy protocol. +// +// # Import path checking +// +// When the custom import path feature described above redirects to a +// known code hosting site, each of the resulting packages has two possible +// import paths, using the custom domain or the known hosting site. +// +// A package statement is said to have an "import comment" if it is immediately +// followed (before the next newline) by a comment of one of these two forms: +// +// package math // import "path" +// package math /* import "path" */ +// +// The go command will refuse to install a package with an import comment +// unless it is being referred to by that import path. In this way, import comments +// let package authors make sure the custom import path is used and not a +// direct path to the underlying code hosting site. +// +// Import path checking is disabled for code found within vendor trees. +// This makes it possible to copy code into alternate locations in vendor trees +// without needing to update import comments. +// +// Import path checking is also disabled when using modules. +// Import path comments are obsoleted by the go.mod file's module statement. +// +// See https://golang.org/s/go14customimport for details. +// +// # Modules, module versions, and more +// +// Modules are how Go manages dependencies. +// +// A module is a collection of packages that are released, versioned, and +// distributed together. Modules may be downloaded directly from version control +// repositories or from module proxy servers. +// +// For a series of tutorials on modules, see +// https://golang.org/doc/tutorial/create-module. +// +// For a detailed reference on modules, see https://golang.org/ref/mod. +// +// By default, the go command may download modules from https://proxy.golang.org. +// It may authenticate modules using the checksum database at +// https://sum.golang.org. Both services are operated by the Go team at Google. +// The privacy policies for these services are available at +// https://proxy.golang.org/privacy and https://sum.golang.org/privacy, +// respectively. +// +// The go command's download behavior may be configured using GOPROXY, GOSUMDB, +// GOPRIVATE, and other environment variables. See 'go help environment' +// and https://golang.org/ref/mod#private-module-privacy for more information. +// +// # Module authentication using go.sum +// +// When the go command downloads a module zip file or go.mod file into the +// module cache, it computes a cryptographic hash and compares it with a known +// value to verify the file hasn't changed since it was first downloaded. Known +// hashes are stored in a file in the module root directory named go.sum. Hashes +// may also be downloaded from the checksum database depending on the values of +// GOSUMDB, GOPRIVATE, and GONOSUMDB. +// +// For details, see https://golang.org/ref/mod#authenticating. +// +// # Package lists and patterns +// +// Many commands apply to a set of packages: +// +// go [packages] +// +// Usually, [packages] is a list of import paths. +// +// An import path that is a rooted path or that begins with +// a . or .. element is interpreted as a file system path and +// denotes the package in that directory. +// +// Otherwise, the import path P denotes the package found in +// the directory DIR/src/P for some DIR listed in the GOPATH +// environment variable (For more details see: 'go help gopath'). +// +// If no import paths are given, the action applies to the +// package in the current directory. +// +// There are four reserved names for paths that should not be used +// for packages to be built with the go tool: +// +// - "main" denotes the top-level package in a stand-alone executable. +// +// - "all" expands to all packages found in all the GOPATH +// trees. For example, 'go list all' lists all the packages on the local +// system. When using modules, "all" expands to all packages in +// the main module and their dependencies, including dependencies +// needed by tests of any of those. +// +// - "std" is like all but expands to just the packages in the standard +// Go library. +// +// - "cmd" expands to the Go repository's commands and their +// internal libraries. +// +// Import paths beginning with "cmd/" only match source code in +// the Go repository. +// +// An import path is a pattern if it includes one or more "..." wildcards, +// each of which can match any string, including the empty string and +// strings containing slashes. Such a pattern expands to all package +// directories found in the GOPATH trees with names matching the +// patterns. +// +// To make common patterns more convenient, there are two special cases. +// First, /... at the end of the pattern can match an empty string, +// so that net/... matches both net and packages in its subdirectories, like net/http. +// Second, any slash-separated pattern element containing a wildcard never +// participates in a match of the "vendor" element in the path of a vendored +// package, so that ./... does not match packages in subdirectories of +// ./vendor or ./mycode/vendor, but ./vendor/... and ./mycode/vendor/... do. +// Note, however, that a directory named vendor that itself contains code +// is not a vendored package: cmd/vendor would be a command named vendor, +// and the pattern cmd/... matches it. +// See golang.org/s/go15vendor for more about vendoring. +// +// An import path can also name a package to be downloaded from +// a remote repository. Run 'go help importpath' for details. +// +// Every package in a program must have a unique import path. +// By convention, this is arranged by starting each path with a +// unique prefix that belongs to you. For example, paths used +// internally at Google all begin with 'google', and paths +// denoting remote repositories begin with the path to the code, +// such as 'github.com/user/repo'. +// +// Packages in a program need not have unique package names, +// but there are two reserved package names with special meaning. +// The name main indicates a command, not a library. +// Commands are built into binaries and cannot be imported. +// The name documentation indicates documentation for +// a non-Go program in the directory. Files in package documentation +// are ignored by the go command. +// +// As a special case, if the package list is a list of .go files from a +// single directory, the command is applied to a single synthesized +// package made up of exactly those files, ignoring any build constraints +// in those files and ignoring any other files in the directory. +// +// Directory and file names that begin with "." or "_" are ignored +// by the go tool, as are directories named "testdata". +// +// # Configuration for downloading non-public code +// +// The go command defaults to downloading modules from the public Go module +// mirror at proxy.golang.org. It also defaults to validating downloaded modules, +// regardless of source, against the public Go checksum database at sum.golang.org. +// These defaults work well for publicly available source code. +// +// The GOPRIVATE environment variable controls which modules the go command +// considers to be private (not available publicly) and should therefore not use +// the proxy or checksum database. The variable is a comma-separated list of +// glob patterns (in the syntax of Go's path.Match) of module path prefixes. +// For example, +// +// GOPRIVATE=*.corp.example.com,rsc.io/private +// +// causes the go command to treat as private any module with a path prefix +// matching either pattern, including git.corp.example.com/xyzzy, rsc.io/private, +// and rsc.io/private/quux. +// +// For fine-grained control over module download and validation, the GONOPROXY +// and GONOSUMDB environment variables accept the same kind of glob list +// and override GOPRIVATE for the specific decision of whether to use the proxy +// and checksum database, respectively. +// +// For example, if a company ran a module proxy serving private modules, +// users would configure go using: +// +// GOPRIVATE=*.corp.example.com +// GOPROXY=proxy.example.com +// GONOPROXY=none +// +// The GOPRIVATE variable is also used to define the "public" and "private" +// patterns for the GOVCS variable; see 'go help vcs'. For that usage, +// GOPRIVATE applies even in GOPATH mode. In that case, it matches import paths +// instead of module paths. +// +// The 'go env -w' command (see 'go help env') can be used to set these variables +// for future go command invocations. +// +// For more details, see https://golang.org/ref/mod#private-modules. +// +// # Testing flags +// +// The 'go test' command takes both flags that apply to 'go test' itself +// and flags that apply to the resulting test binary. +// +// Several of the flags control profiling and write an execution profile +// suitable for "go tool pprof"; run "go tool pprof -h" for more +// information. The --alloc_space, --alloc_objects, and --show_bytes +// options of pprof control how the information is presented. +// +// The following flags are recognized by the 'go test' command and +// control the execution of any test: +// +// -bench regexp +// Run only those benchmarks matching a regular expression. +// By default, no benchmarks are run. +// To run all benchmarks, use '-bench .' or '-bench=.'. +// The regular expression is split by unbracketed slash (/) +// characters into a sequence of regular expressions, and each +// part of a benchmark's identifier must match the corresponding +// element in the sequence, if any. Possible parents of matches +// are run with b.N=1 to identify sub-benchmarks. For example, +// given -bench=X/Y, top-level benchmarks matching X are run +// with b.N=1 to find any sub-benchmarks matching Y, which are +// then run in full. +// +// -benchtime t +// Run enough iterations of each benchmark to take t, specified +// as a time.Duration (for example, -benchtime 1h30s). +// The default is 1 second (1s). +// The special syntax Nx means to run the benchmark N times +// (for example, -benchtime 100x). +// +// -count n +// Run each test, benchmark, and fuzz seed n times (default 1). +// If -cpu is set, run n times for each GOMAXPROCS value. +// Examples are always run once. -count does not apply to +// fuzz tests matched by -fuzz. +// +// -cover +// Enable coverage analysis. +// Note that because coverage works by annotating the source +// code before compilation, compilation and test failures with +// coverage enabled may report line numbers that don't correspond +// to the original sources. +// +// -covermode set,count,atomic +// Set the mode for coverage analysis for the package[s] +// being tested. The default is "set" unless -race is enabled, +// in which case it is "atomic". +// The values: +// set: bool: does this statement run? +// count: int: how many times does this statement run? +// atomic: int: count, but correct in multithreaded tests; +// significantly more expensive. +// Sets -cover. +// +// -coverpkg pattern1,pattern2,pattern3 +// Apply coverage analysis in each test to packages matching the patterns. +// The default is for each test to analyze only the package being tested. +// See 'go help packages' for a description of package patterns. +// Sets -cover. +// +// -cpu 1,2,4 +// Specify a list of GOMAXPROCS values for which the tests, benchmarks or +// fuzz tests should be executed. The default is the current value +// of GOMAXPROCS. -cpu does not apply to fuzz tests matched by -fuzz. +// +// -failfast +// Do not start new tests after the first test failure. +// +// -fullpath +// Show full file names in the error messages. +// +// -fuzz regexp +// Run the fuzz test matching the regular expression. When specified, +// the command line argument must match exactly one package within the +// main module, and regexp must match exactly one fuzz test within +// that package. Fuzzing will occur after tests, benchmarks, seed corpora +// of other fuzz tests, and examples have completed. See the Fuzzing +// section of the testing package documentation for details. +// +// -fuzztime t +// Run enough iterations of the fuzz target during fuzzing to take t, +// specified as a time.Duration (for example, -fuzztime 1h30s). +// The default is to run forever. +// The special syntax Nx means to run the fuzz target N times +// (for example, -fuzztime 1000x). +// +// -fuzzminimizetime t +// Run enough iterations of the fuzz target during each minimization +// attempt to take t, as specified as a time.Duration (for example, +// -fuzzminimizetime 30s). +// The default is 60s. +// The special syntax Nx means to run the fuzz target N times +// (for example, -fuzzminimizetime 100x). +// +// -json +// Log verbose output and test results in JSON. This presents the +// same information as the -v flag in a machine-readable format. +// +// -list regexp +// List tests, benchmarks, fuzz tests, or examples matching the regular +// expression. No tests, benchmarks, fuzz tests, or examples will be run. +// This will only list top-level tests. No subtest or subbenchmarks will be +// shown. +// +// -parallel n +// Allow parallel execution of test functions that call t.Parallel, and +// fuzz targets that call t.Parallel when running the seed corpus. +// The value of this flag is the maximum number of tests to run +// simultaneously. +// While fuzzing, the value of this flag is the maximum number of +// subprocesses that may call the fuzz function simultaneously, regardless of +// whether T.Parallel is called. +// By default, -parallel is set to the value of GOMAXPROCS. +// Setting -parallel to values higher than GOMAXPROCS may cause degraded +// performance due to CPU contention, especially when fuzzing. +// Note that -parallel only applies within a single test binary. +// The 'go test' command may run tests for different packages +// in parallel as well, according to the setting of the -p flag +// (see 'go help build'). +// +// -run regexp +// Run only those tests, examples, and fuzz tests matching the regular +// expression. For tests, the regular expression is split by unbracketed +// slash (/) characters into a sequence of regular expressions, and each +// part of a test's identifier must match the corresponding element in +// the sequence, if any. Note that possible parents of matches are +// run too, so that -run=X/Y matches and runs and reports the result +// of all tests matching X, even those without sub-tests matching Y, +// because it must run them to look for those sub-tests. +// See also -skip. +// +// -short +// Tell long-running tests to shorten their run time. +// It is off by default but set during all.bash so that installing +// the Go tree can run a sanity check but not spend time running +// exhaustive tests. +// +// -shuffle off,on,N +// Randomize the execution order of tests and benchmarks. +// It is off by default. If -shuffle is set to on, then it will seed +// the randomizer using the system clock. If -shuffle is set to an +// integer N, then N will be used as the seed value. In both cases, +// the seed will be reported for reproducibility. +// +// -skip regexp +// Run only those tests, examples, fuzz tests, and benchmarks that +// do not match the regular expression. Like for -run and -bench, +// for tests and benchmarks, the regular expression is split by unbracketed +// slash (/) characters into a sequence of regular expressions, and each +// part of a test's identifier must match the corresponding element in +// the sequence, if any. +// +// -timeout d +// If a test binary runs longer than duration d, panic. +// If d is 0, the timeout is disabled. +// The default is 10 minutes (10m). +// +// -v +// Verbose output: log all tests as they are run. Also print all +// text from Log and Logf calls even if the test succeeds. +// +// -vet list +// Configure the invocation of "go vet" during "go test" +// to use the comma-separated list of vet checks. +// If list is empty, "go test" runs "go vet" with a curated list of +// checks believed to be always worth addressing. +// If list is "off", "go test" does not run "go vet" at all. +// +// The following flags are also recognized by 'go test' and can be used to +// profile the tests during execution: +// +// -benchmem +// Print memory allocation statistics for benchmarks. +// +// -blockprofile block.out +// Write a goroutine blocking profile to the specified file +// when all tests are complete. +// Writes test binary as -c would. +// +// -blockprofilerate n +// Control the detail provided in goroutine blocking profiles by +// calling runtime.SetBlockProfileRate with n. +// See 'go doc runtime.SetBlockProfileRate'. +// The profiler aims to sample, on average, one blocking event every +// n nanoseconds the program spends blocked. By default, +// if -test.blockprofile is set without this flag, all blocking events +// are recorded, equivalent to -test.blockprofilerate=1. +// +// -coverprofile cover.out +// Write a coverage profile to the file after all tests have passed. +// Sets -cover. +// +// -cpuprofile cpu.out +// Write a CPU profile to the specified file before exiting. +// Writes test binary as -c would. +// +// -memprofile mem.out +// Write an allocation profile to the file after all tests have passed. +// Writes test binary as -c would. +// +// -memprofilerate n +// Enable more precise (and expensive) memory allocation profiles by +// setting runtime.MemProfileRate. See 'go doc runtime.MemProfileRate'. +// To profile all memory allocations, use -test.memprofilerate=1. +// +// -mutexprofile mutex.out +// Write a mutex contention profile to the specified file +// when all tests are complete. +// Writes test binary as -c would. +// +// -mutexprofilefraction n +// Sample 1 in n stack traces of goroutines holding a +// contended mutex. +// +// -outputdir directory +// Place output files from profiling in the specified directory, +// by default the directory in which "go test" is running. +// +// -trace trace.out +// Write an execution trace to the specified file before exiting. +// +// Each of these flags is also recognized with an optional 'test.' prefix, +// as in -test.v. When invoking the generated test binary (the result of +// 'go test -c') directly, however, the prefix is mandatory. +// +// The 'go test' command rewrites or removes recognized flags, +// as appropriate, both before and after the optional package list, +// before invoking the test binary. +// +// For instance, the command +// +// go test -v -myflag testdata -cpuprofile=prof.out -x +// +// will compile the test binary and then run it as +// +// pkg.test -test.v -myflag testdata -test.cpuprofile=prof.out +// +// (The -x flag is removed because it applies only to the go command's +// execution, not to the test itself.) +// +// The test flags that generate profiles (other than for coverage) also +// leave the test binary in pkg.test for use when analyzing the profiles. +// +// When 'go test' runs a test binary, it does so from within the +// corresponding package's source code directory. Depending on the test, +// it may be necessary to do the same when invoking a generated test +// binary directly. Because that directory may be located within the +// module cache, which may be read-only and is verified by checksums, the +// test must not write to it or any other directory within the module +// unless explicitly requested by the user (such as with the -fuzz flag, +// which writes failures to testdata/fuzz). +// +// The command-line package list, if present, must appear before any +// flag not known to the go test command. Continuing the example above, +// the package list would have to appear before -myflag, but could appear +// on either side of -v. +// +// When 'go test' runs in package list mode, 'go test' caches successful +// package test results to avoid unnecessary repeated running of tests. To +// disable test caching, use any test flag or argument other than the +// cacheable flags. The idiomatic way to disable test caching explicitly +// is to use -count=1. +// +// To keep an argument for a test binary from being interpreted as a +// known flag or a package name, use -args (see 'go help test') which +// passes the remainder of the command line through to the test binary +// uninterpreted and unaltered. +// +// For instance, the command +// +// go test -v -args -x -v +// +// will compile the test binary and then run it as +// +// pkg.test -test.v -x -v +// +// Similarly, +// +// go test -args math +// +// will compile the test binary and then run it as +// +// pkg.test math +// +// In the first example, the -x and the second -v are passed through to the +// test binary unchanged and with no effect on the go command itself. +// In the second example, the argument math is passed through to the test +// binary, instead of being interpreted as the package list. +// +// # Testing functions +// +// The 'go test' command expects to find test, benchmark, and example functions +// in the "*_test.go" files corresponding to the package under test. +// +// A test function is one named TestXxx (where Xxx does not start with a +// lower case letter) and should have the signature, +// +// func TestXxx(t *testing.T) { ... } +// +// A benchmark function is one named BenchmarkXxx and should have the signature, +// +// func BenchmarkXxx(b *testing.B) { ... } +// +// A fuzz test is one named FuzzXxx and should have the signature, +// +// func FuzzXxx(f *testing.F) { ... } +// +// An example function is similar to a test function but, instead of using +// *testing.T to report success or failure, prints output to os.Stdout. +// If the last comment in the function starts with "Output:" then the output +// is compared exactly against the comment (see examples below). If the last +// comment begins with "Unordered output:" then the output is compared to the +// comment, however the order of the lines is ignored. An example with no such +// comment is compiled but not executed. An example with no text after +// "Output:" is compiled, executed, and expected to produce no output. +// +// Godoc displays the body of ExampleXxx to demonstrate the use +// of the function, constant, or variable Xxx. An example of a method M with +// receiver type T or *T is named ExampleT_M. There may be multiple examples +// for a given function, constant, or variable, distinguished by a trailing _xxx, +// where xxx is a suffix not beginning with an upper case letter. +// +// Here is an example of an example: +// +// func ExamplePrintln() { +// Println("The output of\nthis example.") +// // Output: The output of +// // this example. +// } +// +// Here is another example where the ordering of the output is ignored: +// +// func ExamplePerm() { +// for _, value := range Perm(4) { +// fmt.Println(value) +// } +// +// // Unordered output: 4 +// // 2 +// // 1 +// // 3 +// // 0 +// } +// +// The entire test file is presented as the example when it contains a single +// example function, at least one other function, type, variable, or constant +// declaration, and no tests, benchmarks, or fuzz tests. +// +// See the documentation of the testing package for more information. +// +// # Controlling version control with GOVCS +// +// The 'go get' command can run version control commands like git +// to download imported code. This functionality is critical to the decentralized +// Go package ecosystem, in which code can be imported from any server, +// but it is also a potential security problem, if a malicious server finds a +// way to cause the invoked version control command to run unintended code. +// +// To balance the functionality and security concerns, the 'go get' command +// by default will only use git and hg to download code from public servers. +// But it will use any known version control system (bzr, fossil, git, hg, svn) +// to download code from private servers, defined as those hosting packages +// matching the GOPRIVATE variable (see 'go help private'). The rationale behind +// allowing only Git and Mercurial is that these two systems have had the most +// attention to issues of being run as clients of untrusted servers. In contrast, +// Bazaar, Fossil, and Subversion have primarily been used in trusted, +// authenticated environments and are not as well scrutinized as attack surfaces. +// +// The version control command restrictions only apply when using direct version +// control access to download code. When downloading modules from a proxy, +// 'go get' uses the proxy protocol instead, which is always permitted. +// By default, the 'go get' command uses the Go module mirror (proxy.golang.org) +// for public packages and only falls back to version control for private +// packages or when the mirror refuses to serve a public package (typically for +// legal reasons). Therefore, clients can still access public code served from +// Bazaar, Fossil, or Subversion repositories by default, because those downloads +// use the Go module mirror, which takes on the security risk of running the +// version control commands using a custom sandbox. +// +// The GOVCS variable can be used to change the allowed version control systems +// for specific packages (identified by a module or import path). +// The GOVCS variable applies when building package in both module-aware mode +// and GOPATH mode. When using modules, the patterns match against the module path. +// When using GOPATH, the patterns match against the import path corresponding to +// the root of the version control repository. +// +// The general form of the GOVCS setting is a comma-separated list of +// pattern:vcslist rules. The pattern is a glob pattern that must match +// one or more leading elements of the module or import path. The vcslist +// is a pipe-separated list of allowed version control commands, or "all" +// to allow use of any known command, or "off" to disallow all commands. +// Note that if a module matches a pattern with vcslist "off", it may still be +// downloaded if the origin server uses the "mod" scheme, which instructs the +// go command to download the module using the GOPROXY protocol. +// The earliest matching pattern in the list applies, even if later patterns +// might also match. +// +// For example, consider: +// +// GOVCS=github.com:git,evil.com:off,*:git|hg +// +// With this setting, code with a module or import path beginning with +// github.com/ can only use git; paths on evil.com cannot use any version +// control command, and all other paths (* matches everything) can use +// only git or hg. +// +// The special patterns "public" and "private" match public and private +// module or import paths. A path is private if it matches the GOPRIVATE +// variable; otherwise it is public. +// +// If no rules in the GOVCS variable match a particular module or import path, +// the 'go get' command applies its default rule, which can now be summarized +// in GOVCS notation as 'public:git|hg,private:all'. +// +// To allow unfettered use of any version control system for any package, use: +// +// GOVCS=*:all +// +// To disable all use of version control, use: +// +// GOVCS=*:off +// +// The 'go env -w' command (see 'go help env') can be used to set the GOVCS +// variable for future go command invocations. +package main diff --git a/platform/dbops/binaries/go/go/src/cmd/go/chdir_test.go b/platform/dbops/binaries/go/go/src/cmd/go/chdir_test.go new file mode 100644 index 0000000000000000000000000000000000000000..44cbb9c3f7339cd456140389293e47f673670158 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/chdir_test.go @@ -0,0 +1,49 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "cmd/go/internal/base" + "os" + "strings" + "testing" +) + +func TestChdir(t *testing.T) { + // We want -C to apply to every go subcommand. + // Test that every command either has a -C flag registered + // or has CustomFlags set. In the latter case, the command + // must be explicitly tested in TestScript/chdir. + script, err := os.ReadFile("testdata/script/chdir.txt") + if err != nil { + t.Fatal(err) + } + + var walk func(string, *base.Command) + walk = func(name string, cmd *base.Command) { + if len(cmd.Commands) > 0 { + for _, sub := range cmd.Commands { + walk(name+" "+sub.Name(), sub) + } + return + } + if !cmd.Runnable() { + return + } + if cmd.CustomFlags { + if !strings.Contains(string(script), "# "+name+"\n") { + t.Errorf("%s has custom flags, not tested in testdata/script/chdir.txt", name) + } + return + } + f := cmd.Flag.Lookup("C") + if f == nil { + t.Errorf("%s has no -C flag", name) + } else if f.Usage != "AddChdirFlag" { + t.Errorf("%s has -C flag but not from AddChdirFlag", name) + } + } + walk("go", base.Go) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/export_test.go b/platform/dbops/binaries/go/go/src/cmd/go/export_test.go new file mode 100644 index 0000000000000000000000000000000000000000..155ab8c1bbeb19f9ec81b3f5912c96fbc23c83eb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/export_test.go @@ -0,0 +1,7 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func Main() { main() } diff --git a/platform/dbops/binaries/go/go/src/cmd/go/go11.go b/platform/dbops/binaries/go/go/src/cmd/go/go11.go new file mode 100644 index 0000000000000000000000000000000000000000..9faa7cba42e097b094ca17d41c47dbbb5f54ba62 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/go11.go @@ -0,0 +1,10 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.1 + +package main + +// Test that go1.1 tag above is included in builds. main.go refers to this definition. +const go11tag = true diff --git a/platform/dbops/binaries/go/go/src/cmd/go/go_boring_test.go b/platform/dbops/binaries/go/go/src/cmd/go/go_boring_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ed0fbf3d53d75b19133c3b9f59c1d1b8fb4cc8cc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/go_boring_test.go @@ -0,0 +1,22 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build boringcrypto + +package main_test + +import "testing" + +func TestBoringInternalLink(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.tempFile("main.go", `package main + import "crypto/sha1" + func main() { + sha1.New() + }`) + tg.run("build", "-ldflags=-w -extld=false", tg.path("main.go")) + tg.run("build", "-ldflags=-extld=false", tg.path("main.go")) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/go_test.go b/platform/dbops/binaries/go/go/src/cmd/go/go_test.go new file mode 100644 index 0000000000000000000000000000000000000000..32822950f10a42f9515be05565144e2f8bad9961 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/go_test.go @@ -0,0 +1,2803 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main_test + +import ( + "bytes" + "debug/elf" + "debug/macho" + "debug/pe" + "encoding/binary" + "flag" + "fmt" + "go/format" + "internal/godebug" + "internal/platform" + "internal/testenv" + "io" + "io/fs" + "log" + "math" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + "testing" + "time" + + "cmd/go/internal/base" + "cmd/go/internal/cache" + "cmd/go/internal/cfg" + "cmd/go/internal/gover" + "cmd/go/internal/robustio" + "cmd/go/internal/search" + "cmd/go/internal/toolchain" + "cmd/go/internal/vcs" + "cmd/go/internal/vcweb/vcstest" + "cmd/go/internal/web" + "cmd/go/internal/work" + "cmd/internal/sys" + + cmdgo "cmd/go" +) + +func init() { + // GOVCS defaults to public:git|hg,private:all, + // which breaks many tests here - they can't use non-git, non-hg VCS at all! + // Change to fully permissive. + // The tests of the GOVCS setting itself are in ../../testdata/script/govcs.txt. + os.Setenv("GOVCS", "*:all") +} + +var ( + canRace = false // whether we can run the race detector + canMSan = false // whether we can run the memory sanitizer + canASan = false // whether we can run the address sanitizer +) + +var ( + goHostOS, goHostArch string + cgoEnabled string // raw value from 'go env CGO_ENABLED' +) + +// netTestSem is a semaphore limiting the number of tests that may use the +// external network in parallel. If non-nil, it contains one buffer slot per +// test (send to acquire), with a low enough limit that the overall number of +// connections (summed across subprocesses) stays at or below base.NetLimit. +var netTestSem chan struct{} + +var exeSuffix string = func() string { + if runtime.GOOS == "windows" { + return ".exe" + } + return "" +}() + +func tooSlow(t *testing.T, reason string) { + if testing.Short() { + t.Helper() + t.Skipf("skipping test in -short mode: %s", reason) + } +} + +// testGOROOT is the GOROOT to use when running testgo, a cmd/go binary +// build from this process's current GOROOT, but run from a different +// (temp) directory. +var testGOROOT string + +// testGOROOT_FINAL is the GOROOT_FINAL with which the test binary is assumed to +// have been built. +var testGOROOT_FINAL = os.Getenv("GOROOT_FINAL") + +var testGOCACHE string + +var testGo string +var testTmpDir string +var testBin string + +// The TestMain function creates a go command for testing purposes and +// deletes it after the tests have been run. +func TestMain(m *testing.M) { + // When CMDGO_TEST_RUN_MAIN is set, we're reusing the test binary as cmd/go. + // Enable the special behavior needed in cmd/go/internal/work, + // run the main func exported via export_test.go, and exit. + // We set CMDGO_TEST_RUN_MAIN via os.Setenv and testScript.setup. + if os.Getenv("CMDGO_TEST_RUN_MAIN") != "" { + cfg.SetGOROOT(cfg.GOROOT, true) + gover.TestVersion = os.Getenv("TESTGO_VERSION") + toolchain.TestVersionSwitch = os.Getenv("TESTGO_VERSION_SWITCH") + if v := os.Getenv("TESTGO_TOOLCHAIN_VERSION"); v != "" { + work.ToolchainVersion = v + } + + if testGOROOT := os.Getenv("TESTGO_GOROOT"); testGOROOT != "" { + // Disallow installs to the GOROOT from which testgo was built. + // Installs to other GOROOTs — such as one set explicitly within a test — are ok. + work.AllowInstall = func(a *work.Action) error { + if cfg.BuildN { + return nil + } + + rel := search.InDir(a.Target, testGOROOT) + if rel == "" { + return nil + } + + callerPos := "" + if _, file, line, ok := runtime.Caller(1); ok { + if shortFile := search.InDir(file, filepath.Join(testGOROOT, "src")); shortFile != "" { + file = shortFile + } + callerPos = fmt.Sprintf("%s:%d: ", file, line) + } + notice := "This error error can occur if GOROOT is stale, in which case rerunning make.bash will fix it." + return fmt.Errorf("%stestgo must not write to GOROOT (installing to %s) (%v)", callerPos, filepath.Join("GOROOT", rel), notice) + } + } + + if vcsTestHost := os.Getenv("TESTGO_VCSTEST_HOST"); vcsTestHost != "" { + vcs.VCSTestRepoURL = "http://" + vcsTestHost + vcs.VCSTestHosts = vcstest.Hosts + vcsTestTLSHost := os.Getenv("TESTGO_VCSTEST_TLS_HOST") + vcsTestClient, err := vcstest.TLSClient(os.Getenv("TESTGO_VCSTEST_CERT")) + if err != nil { + fmt.Fprintf(os.Stderr, "loading certificates from $TESTGO_VCSTEST_CERT: %v", err) + } + var interceptors []web.Interceptor + for _, host := range vcstest.Hosts { + interceptors = append(interceptors, + web.Interceptor{Scheme: "http", FromHost: host, ToHost: vcsTestHost}, + web.Interceptor{Scheme: "https", FromHost: host, ToHost: vcsTestTLSHost, Client: vcsTestClient}) + } + web.EnableTestHooks(interceptors) + } + + cmdgo.Main() + os.Exit(0) + } + os.Setenv("CMDGO_TEST_RUN_MAIN", "true") + + // $GO_GCFLAGS a compiler debug flag known to cmd/dist, make.bash, etc. + // It is not a standard go command flag; use os.Getenv, not cfg.Getenv. + if os.Getenv("GO_GCFLAGS") != "" { + fmt.Fprintf(os.Stderr, "testing: warning: no tests to run\n") // magic string for cmd/go + fmt.Printf("cmd/go test is not compatible with $GO_GCFLAGS being set\n") + fmt.Printf("SKIP\n") + return + } + + flag.Parse() + + if *proxyAddr != "" { + StartProxy() + select {} + } + + // Run with a temporary TMPDIR to check that the tests don't + // leave anything behind. + topTmpdir, err := os.MkdirTemp("", "cmd-go-test-") + if err != nil { + log.Fatal(err) + } + if !*testWork { + defer removeAll(topTmpdir) + } else { + fmt.Fprintf(os.Stderr, "TESTWORK: preserving top level tempdir %s\n", topTmpdir) + } + os.Setenv(tempEnvName(), topTmpdir) + + dir, err := os.MkdirTemp(topTmpdir, "tmpdir") + if err != nil { + log.Fatal(err) + } + testTmpDir = dir + if !*testWork { + defer removeAll(testTmpDir) + } + + testGOCACHE = cache.DefaultDir() + if testenv.HasGoBuild() { + testBin = filepath.Join(testTmpDir, "testbin") + if err := os.Mkdir(testBin, 0777); err != nil { + log.Fatal(err) + } + testGo = filepath.Join(testBin, "go"+exeSuffix) + gotool, err := testenv.GoTool() + if err != nil { + fmt.Fprintln(os.Stderr, "locating go tool: ", err) + os.Exit(2) + } + + goEnv := func(name string) string { + out, err := exec.Command(gotool, "env", name).CombinedOutput() + if err != nil { + fmt.Fprintf(os.Stderr, "go env %s: %v\n%s", name, err, out) + os.Exit(2) + } + return strings.TrimSpace(string(out)) + } + testGOROOT = goEnv("GOROOT") + os.Setenv("TESTGO_GOROOT", testGOROOT) + // Ensure that GOROOT is set explicitly. + // Otherwise, if the toolchain was built with GOROOT_FINAL set but has not + // yet been moved to its final location, programs that invoke runtime.GOROOT + // may accidentally use the wrong path. + os.Setenv("GOROOT", testGOROOT) + + // The whole GOROOT/pkg tree was installed using the GOHOSTOS/GOHOSTARCH + // toolchain (installed in GOROOT/pkg/tool/GOHOSTOS_GOHOSTARCH). + // The testgo.exe we are about to create will be built for GOOS/GOARCH, + // which means it will use the GOOS/GOARCH toolchain + // (installed in GOROOT/pkg/tool/GOOS_GOARCH). + // If these are not the same toolchain, then the entire standard library + // will look out of date (the compilers in those two different tool directories + // are built for different architectures and have different build IDs), + // which will cause many tests to do unnecessary rebuilds and some + // tests to attempt to overwrite the installed standard library. + // Bail out entirely in this case. + goHostOS = goEnv("GOHOSTOS") + os.Setenv("TESTGO_GOHOSTOS", goHostOS) + goHostArch = goEnv("GOHOSTARCH") + os.Setenv("TESTGO_GOHOSTARCH", goHostArch) + + cgoEnabled = goEnv("CGO_ENABLED") + + // Duplicate the test executable into the path at testGo, for $PATH. + // If the OS supports symlinks, use them instead of copying bytes. + testExe, err := os.Executable() + if err != nil { + log.Fatal(err) + } + if err := os.Symlink(testExe, testGo); err != nil { + // Otherwise, copy the bytes. + src, err := os.Open(testExe) + if err != nil { + log.Fatal(err) + } + defer src.Close() + + dst, err := os.OpenFile(testGo, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0o777) + if err != nil { + log.Fatal(err) + } + + _, err = io.Copy(dst, src) + if closeErr := dst.Close(); err == nil { + err = closeErr + } + if err != nil { + log.Fatal(err) + } + } + + out, err := exec.Command(gotool, "env", "GOCACHE").CombinedOutput() + if err != nil { + fmt.Fprintf(os.Stderr, "could not find testing GOCACHE: %v\n%s", err, out) + os.Exit(2) + } + testGOCACHE = strings.TrimSpace(string(out)) + + canMSan = testenv.HasCGO() && platform.MSanSupported(runtime.GOOS, runtime.GOARCH) + canASan = testenv.HasCGO() && platform.ASanSupported(runtime.GOOS, runtime.GOARCH) + canRace = testenv.HasCGO() && platform.RaceDetectorSupported(runtime.GOOS, runtime.GOARCH) + // The race detector doesn't work on Alpine Linux: + // golang.org/issue/14481 + // gccgo does not support the race detector. + if isAlpineLinux() || runtime.Compiler == "gccgo" { + canRace = false + } + } + + if n, limited := base.NetLimit(); limited && n > 0 { + // Split the network limit into chunks, so that each parallel script can + // have one chunk. We want to run as many parallel scripts as possible, but + // also want to give each script as high a limit as possible. + // We arbitrarily split by sqrt(n) to try to balance those two goals. + netTestLimit := int(math.Sqrt(float64(n))) + netTestSem = make(chan struct{}, netTestLimit) + reducedLimit := fmt.Sprintf(",%s=%d", base.NetLimitGodebug.Name(), n/netTestLimit) + os.Setenv("GODEBUG", os.Getenv("GODEBUG")+reducedLimit) + } + + // Don't let these environment variables confuse the test. + os.Setenv("GOENV", "off") + os.Unsetenv("GOFLAGS") + os.Unsetenv("GOBIN") + os.Unsetenv("GOPATH") + os.Unsetenv("GIT_ALLOW_PROTOCOL") + os.Setenv("HOME", "/test-go-home-does-not-exist") + // On some systems the default C compiler is ccache. + // Setting HOME to a non-existent directory will break + // those systems. Disable ccache and use real compiler. Issue 17668. + os.Setenv("CCACHE_DISABLE", "1") + if cfg.Getenv("GOCACHE") == "" { + os.Setenv("GOCACHE", testGOCACHE) // because $HOME is gone + } + + if testenv.Builder() != "" || os.Getenv("GIT_TRACE_CURL") == "1" { + // To help diagnose https://go.dev/issue/52545, + // enable tracing for Git HTTPS requests. + os.Setenv("GIT_TRACE_CURL", "1") + os.Setenv("GIT_TRACE_CURL_NO_DATA", "1") + os.Setenv("GIT_REDACT_COOKIES", "o,SSO,GSSO_Uberproxy") + } + + r := m.Run() + if !*testWork { + removeAll(testTmpDir) // os.Exit won't run defer + } + + if !*testWork { + // There shouldn't be anything left in topTmpdir. + var extraFiles, extraDirs []string + err := filepath.WalkDir(topTmpdir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if path == topTmpdir { + return nil + } + + if rel, err := filepath.Rel(topTmpdir, path); err == nil { + path = rel + } + if d.IsDir() { + extraDirs = append(extraDirs, path) + } else { + extraFiles = append(extraFiles, path) + } + return nil + }) + if err != nil { + log.Fatal(err) + } + + if len(extraFiles) > 0 { + log.Fatalf("unexpected files left in tmpdir: %q", extraFiles) + } else if len(extraDirs) > 0 { + log.Fatalf("unexpected subdirectories left in tmpdir: %q", extraDirs) + } + + removeAll(topTmpdir) + } + + os.Exit(r) +} + +func isAlpineLinux() bool { + if runtime.GOOS != "linux" { + return false + } + fi, err := os.Lstat("/etc/alpine-release") + return err == nil && fi.Mode().IsRegular() +} + +// The length of an mtime tick on this system. This is an estimate of +// how long we need to sleep to ensure that the mtime of two files is +// different. +// We used to try to be clever but that didn't always work (see golang.org/issue/12205). +var mtimeTick time.Duration = 1 * time.Second + +// Manage a single run of the testgo binary. +type testgoData struct { + t *testing.T + temps []string + env []string + tempdir string + ran bool + inParallel bool + stdout, stderr bytes.Buffer + execDir string // dir for tg.run +} + +// skipIfGccgo skips the test if using gccgo. +func skipIfGccgo(t *testing.T, msg string) { + if runtime.Compiler == "gccgo" { + t.Skipf("skipping test not supported on gccgo: %s", msg) + } +} + +// testgo sets up for a test that runs testgo. +func testgo(t *testing.T) *testgoData { + t.Helper() + testenv.MustHaveGoBuild(t) + testenv.SkipIfShortAndSlow(t) + + return &testgoData{t: t} +} + +// must gives a fatal error if err is not nil. +func (tg *testgoData) must(err error) { + tg.t.Helper() + if err != nil { + tg.t.Fatal(err) + } +} + +// check gives a test non-fatal error if err is not nil. +func (tg *testgoData) check(err error) { + tg.t.Helper() + if err != nil { + tg.t.Error(err) + } +} + +// parallel runs the test in parallel by calling t.Parallel. +func (tg *testgoData) parallel() { + tg.t.Helper() + if tg.ran { + tg.t.Fatal("internal testsuite error: call to parallel after run") + } + for _, e := range tg.env { + if strings.HasPrefix(e, "GOROOT=") || strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "GOBIN=") { + val := e[strings.Index(e, "=")+1:] + if strings.HasPrefix(val, "testdata") || strings.HasPrefix(val, "./testdata") { + tg.t.Fatalf("internal testsuite error: call to parallel with testdata in environment (%s)", e) + } + } + } + tg.inParallel = true + tg.t.Parallel() +} + +// pwd returns the current directory. +func (tg *testgoData) pwd() string { + tg.t.Helper() + wd, err := os.Getwd() + if err != nil { + tg.t.Fatalf("could not get working directory: %v", err) + } + return wd +} + +// sleep sleeps for one tick, where a tick is a conservative estimate +// of how long it takes for a file modification to get a different +// mtime. +func (tg *testgoData) sleep() { + time.Sleep(mtimeTick) +} + +// setenv sets an environment variable to use when running the test go +// command. +func (tg *testgoData) setenv(name, val string) { + tg.t.Helper() + tg.unsetenv(name) + tg.env = append(tg.env, name+"="+val) +} + +// unsetenv removes an environment variable. +func (tg *testgoData) unsetenv(name string) { + if tg.env == nil { + tg.env = append([]string(nil), os.Environ()...) + tg.env = append(tg.env, "GO111MODULE=off", "TESTGONETWORK=panic") + if testing.Short() { + tg.env = append(tg.env, "TESTGOVCS=panic") + } + } + for i, v := range tg.env { + if strings.HasPrefix(v, name+"=") { + tg.env = append(tg.env[:i], tg.env[i+1:]...) + break + } + } +} + +func (tg *testgoData) goTool() string { + return testGo +} + +// doRun runs the test go command, recording stdout and stderr and +// returning exit status. +func (tg *testgoData) doRun(args []string) error { + tg.t.Helper() + if tg.inParallel { + for _, arg := range args { + if strings.HasPrefix(arg, "testdata") || strings.HasPrefix(arg, "./testdata") { + tg.t.Fatal("internal testsuite error: parallel run using testdata") + } + } + } + + hasGoroot := false + for _, v := range tg.env { + if strings.HasPrefix(v, "GOROOT=") { + hasGoroot = true + break + } + } + prog := tg.goTool() + if !hasGoroot { + tg.setenv("GOROOT", testGOROOT) + } + + tg.t.Logf("running testgo %v", args) + cmd := testenv.Command(tg.t, prog, args...) + tg.stdout.Reset() + tg.stderr.Reset() + cmd.Dir = tg.execDir + cmd.Stdout = &tg.stdout + cmd.Stderr = &tg.stderr + cmd.Env = tg.env + status := cmd.Run() + if tg.stdout.Len() > 0 { + tg.t.Log("standard output:") + tg.t.Log(tg.stdout.String()) + } + if tg.stderr.Len() > 0 { + tg.t.Log("standard error:") + tg.t.Log(tg.stderr.String()) + } + tg.ran = true + return status +} + +// run runs the test go command, and expects it to succeed. +func (tg *testgoData) run(args ...string) { + tg.t.Helper() + if status := tg.doRun(args); status != nil { + wd, _ := os.Getwd() + tg.t.Logf("go %v failed unexpectedly in %s: %v", args, wd, status) + tg.t.FailNow() + } +} + +// runFail runs the test go command, and expects it to fail. +func (tg *testgoData) runFail(args ...string) { + tg.t.Helper() + if status := tg.doRun(args); status == nil { + tg.t.Fatal("testgo succeeded unexpectedly") + } else { + tg.t.Log("testgo failed as expected:", status) + } +} + +// getStdout returns standard output of the testgo run as a string. +func (tg *testgoData) getStdout() string { + tg.t.Helper() + if !tg.ran { + tg.t.Fatal("internal testsuite error: stdout called before run") + } + return tg.stdout.String() +} + +// getStderr returns standard error of the testgo run as a string. +func (tg *testgoData) getStderr() string { + tg.t.Helper() + if !tg.ran { + tg.t.Fatal("internal testsuite error: stdout called before run") + } + return tg.stderr.String() +} + +// doGrepMatch looks for a regular expression in a buffer, and returns +// whether it is found. The regular expression is matched against +// each line separately, as with the grep command. +func (tg *testgoData) doGrepMatch(match string, b *bytes.Buffer) bool { + tg.t.Helper() + if !tg.ran { + tg.t.Fatal("internal testsuite error: grep called before run") + } + re := regexp.MustCompile(match) + for _, ln := range bytes.Split(b.Bytes(), []byte{'\n'}) { + if re.Match(ln) { + return true + } + } + return false +} + +// doGrep looks for a regular expression in a buffer and fails if it +// is not found. The name argument is the name of the output we are +// searching, "output" or "error". The msg argument is logged on +// failure. +func (tg *testgoData) doGrep(match string, b *bytes.Buffer, name, msg string) { + tg.t.Helper() + if !tg.doGrepMatch(match, b) { + tg.t.Log(msg) + tg.t.Logf("pattern %v not found in standard %s", match, name) + tg.t.FailNow() + } +} + +// grepStdout looks for a regular expression in the test run's +// standard output and fails, logging msg, if it is not found. +func (tg *testgoData) grepStdout(match, msg string) { + tg.t.Helper() + tg.doGrep(match, &tg.stdout, "output", msg) +} + +// grepStderr looks for a regular expression in the test run's +// standard error and fails, logging msg, if it is not found. +func (tg *testgoData) grepStderr(match, msg string) { + tg.t.Helper() + tg.doGrep(match, &tg.stderr, "error", msg) +} + +// grepBoth looks for a regular expression in the test run's standard +// output or stand error and fails, logging msg, if it is not found. +func (tg *testgoData) grepBoth(match, msg string) { + tg.t.Helper() + if !tg.doGrepMatch(match, &tg.stdout) && !tg.doGrepMatch(match, &tg.stderr) { + tg.t.Log(msg) + tg.t.Logf("pattern %v not found in standard output or standard error", match) + tg.t.FailNow() + } +} + +// doGrepNot looks for a regular expression in a buffer and fails if +// it is found. The name and msg arguments are as for doGrep. +func (tg *testgoData) doGrepNot(match string, b *bytes.Buffer, name, msg string) { + tg.t.Helper() + if tg.doGrepMatch(match, b) { + tg.t.Log(msg) + tg.t.Logf("pattern %v found unexpectedly in standard %s", match, name) + tg.t.FailNow() + } +} + +// grepStdoutNot looks for a regular expression in the test run's +// standard output and fails, logging msg, if it is found. +func (tg *testgoData) grepStdoutNot(match, msg string) { + tg.t.Helper() + tg.doGrepNot(match, &tg.stdout, "output", msg) +} + +// grepStderrNot looks for a regular expression in the test run's +// standard error and fails, logging msg, if it is found. +func (tg *testgoData) grepStderrNot(match, msg string) { + tg.t.Helper() + tg.doGrepNot(match, &tg.stderr, "error", msg) +} + +// grepBothNot looks for a regular expression in the test run's +// standard output or standard error and fails, logging msg, if it is +// found. +func (tg *testgoData) grepBothNot(match, msg string) { + tg.t.Helper() + if tg.doGrepMatch(match, &tg.stdout) || tg.doGrepMatch(match, &tg.stderr) { + tg.t.Log(msg) + tg.t.Fatalf("pattern %v found unexpectedly in standard output or standard error", match) + } +} + +// doGrepCount counts the number of times a regexp is seen in a buffer. +func (tg *testgoData) doGrepCount(match string, b *bytes.Buffer) int { + tg.t.Helper() + if !tg.ran { + tg.t.Fatal("internal testsuite error: doGrepCount called before run") + } + re := regexp.MustCompile(match) + c := 0 + for _, ln := range bytes.Split(b.Bytes(), []byte{'\n'}) { + if re.Match(ln) { + c++ + } + } + return c +} + +// grepCountBoth returns the number of times a regexp is seen in both +// standard output and standard error. +func (tg *testgoData) grepCountBoth(match string) int { + tg.t.Helper() + return tg.doGrepCount(match, &tg.stdout) + tg.doGrepCount(match, &tg.stderr) +} + +// creatingTemp records that the test plans to create a temporary file +// or directory. If the file or directory exists already, it will be +// removed. When the test completes, the file or directory will be +// removed if it exists. +func (tg *testgoData) creatingTemp(path string) { + tg.t.Helper() + if filepath.IsAbs(path) && !strings.HasPrefix(path, tg.tempdir) { + tg.t.Fatalf("internal testsuite error: creatingTemp(%q) with absolute path not in temporary directory", path) + } + tg.must(robustio.RemoveAll(path)) + tg.temps = append(tg.temps, path) +} + +// makeTempdir makes a temporary directory for a run of testgo. If +// the temporary directory was already created, this does nothing. +func (tg *testgoData) makeTempdir() { + tg.t.Helper() + if tg.tempdir == "" { + var err error + tg.tempdir, err = os.MkdirTemp("", "gotest") + tg.must(err) + } +} + +// tempFile adds a temporary file for a run of testgo. +func (tg *testgoData) tempFile(path, contents string) { + tg.t.Helper() + tg.makeTempdir() + tg.must(os.MkdirAll(filepath.Join(tg.tempdir, filepath.Dir(path)), 0755)) + bytes := []byte(contents) + if strings.HasSuffix(path, ".go") { + formatted, err := format.Source(bytes) + if err == nil { + bytes = formatted + } + } + tg.must(os.WriteFile(filepath.Join(tg.tempdir, path), bytes, 0644)) +} + +// tempDir adds a temporary directory for a run of testgo. +func (tg *testgoData) tempDir(path string) { + tg.t.Helper() + tg.makeTempdir() + if err := os.MkdirAll(filepath.Join(tg.tempdir, path), 0755); err != nil && !os.IsExist(err) { + tg.t.Fatal(err) + } +} + +// path returns the absolute pathname to file with the temporary +// directory. +func (tg *testgoData) path(name string) string { + tg.t.Helper() + if tg.tempdir == "" { + tg.t.Fatalf("internal testsuite error: path(%q) with no tempdir", name) + } + if name == "." { + return tg.tempdir + } + return filepath.Join(tg.tempdir, name) +} + +// mustExist fails if path does not exist. +func (tg *testgoData) mustExist(path string) { + tg.t.Helper() + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + tg.t.Fatalf("%s does not exist but should", path) + } + tg.t.Fatalf("%s stat failed: %v", path, err) + } +} + +// mustNotExist fails if path exists. +func (tg *testgoData) mustNotExist(path string) { + tg.t.Helper() + if _, err := os.Stat(path); err == nil || !os.IsNotExist(err) { + tg.t.Fatalf("%s exists but should not (%v)", path, err) + } +} + +// wantExecutable fails with msg if path is not executable. +func (tg *testgoData) wantExecutable(path, msg string) { + tg.t.Helper() + if st, err := os.Stat(path); err != nil { + if !os.IsNotExist(err) { + tg.t.Log(err) + } + tg.t.Fatal(msg) + } else { + if runtime.GOOS != "windows" && st.Mode()&0111 == 0 { + tg.t.Fatalf("binary %s exists but is not executable", path) + } + } +} + +// isStale reports whether pkg is stale, and why +func (tg *testgoData) isStale(pkg string) (bool, string) { + tg.t.Helper() + tg.run("list", "-f", "{{.Stale}}:{{.StaleReason}}", pkg) + v := strings.TrimSpace(tg.getStdout()) + f := strings.SplitN(v, ":", 2) + if len(f) == 2 { + switch f[0] { + case "true": + return true, f[1] + case "false": + return false, f[1] + } + } + tg.t.Fatalf("unexpected output checking staleness of package %v: %v", pkg, v) + panic("unreachable") +} + +// wantStale fails with msg if pkg is not stale. +func (tg *testgoData) wantStale(pkg, reason, msg string) { + tg.t.Helper() + stale, why := tg.isStale(pkg) + if !stale { + tg.t.Fatal(msg) + } + // We always accept the reason as being "not installed but + // available in build cache", because when that is the case go + // list doesn't try to sort out the underlying reason why the + // package is not installed. + if reason == "" && why != "" || !strings.Contains(why, reason) && !strings.Contains(why, "not installed but available in build cache") { + tg.t.Errorf("wrong reason for Stale=true: %q, want %q", why, reason) + } +} + +// wantNotStale fails with msg if pkg is stale. +func (tg *testgoData) wantNotStale(pkg, reason, msg string) { + tg.t.Helper() + stale, why := tg.isStale(pkg) + if stale { + tg.t.Fatal(msg) + } + if reason == "" && why != "" || !strings.Contains(why, reason) { + tg.t.Errorf("wrong reason for Stale=false: %q, want %q", why, reason) + } +} + +// If -testwork is specified, the test prints the name of the temp directory +// and does not remove it when done, so that a programmer can +// poke at the test file tree afterward. +var testWork = flag.Bool("testwork", false, "") + +// cleanup cleans up a test that runs testgo. +func (tg *testgoData) cleanup() { + tg.t.Helper() + if *testWork { + if tg.tempdir != "" { + tg.t.Logf("TESTWORK=%s\n", tg.path(".")) + } + return + } + for _, path := range tg.temps { + tg.check(removeAll(path)) + } + if tg.tempdir != "" { + tg.check(removeAll(tg.tempdir)) + } +} + +func removeAll(dir string) error { + // module cache has 0444 directories; + // make them writable in order to remove content. + filepath.WalkDir(dir, func(path string, info fs.DirEntry, err error) error { + // chmod not only directories, but also things that we couldn't even stat + // due to permission errors: they may also be unreadable directories. + if err != nil || info.IsDir() { + os.Chmod(path, 0777) + } + return nil + }) + return robustio.RemoveAll(dir) +} + +func TestNewReleaseRebuildsStalePackagesInGOPATH(t *testing.T) { + if testing.Short() { + t.Skip("skipping lengthy test in short mode") + } + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + + // Set GOCACHE to an empty directory so that a previous run of + // this test does not affect the staleness of the packages it builds. + tg.tempDir("gocache") + tg.setenv("GOCACHE", tg.path("gocache")) + + // Copy the runtime packages into a temporary GOROOT + // so that we can change files. + var dirs []string + tg.run("list", "-deps", "runtime") + pkgs := strings.Split(strings.TrimSpace(tg.getStdout()), "\n") + for _, pkg := range pkgs { + dirs = append(dirs, filepath.Join("src", pkg)) + } + dirs = append(dirs, + filepath.Join("pkg/tool", goHostOS+"_"+goHostArch), + "pkg/include", + ) + for _, copydir := range dirs { + srcdir := filepath.Join(testGOROOT, copydir) + tg.tempDir(filepath.Join("goroot", copydir)) + err := filepath.WalkDir(srcdir, + func(path string, info fs.DirEntry, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } + srcrel, err := filepath.Rel(srcdir, path) + if err != nil { + return err + } + dest := filepath.Join("goroot", copydir, srcrel) + if _, err := os.Stat(dest); err == nil { + return nil + } + data, err := os.ReadFile(path) + if err != nil { + return err + } + tg.tempFile(dest, string(data)) + if strings.Contains(copydir, filepath.Join("pkg", "tool")) { + os.Chmod(tg.path(dest), 0777) + } + return nil + }) + if err != nil { + t.Fatal(err) + } + } + tg.setenv("GOROOT", tg.path("goroot")) + + addVar := func(name string, idx int) (restore func()) { + data, err := os.ReadFile(name) + if err != nil { + t.Fatal(err) + } + old := data + data = append(data, fmt.Sprintf("var DummyUnusedVar%d bool\n", idx)...) + if err := os.WriteFile(name, append(data, '\n'), 0666); err != nil { + t.Fatal(err) + } + tg.sleep() + return func() { + if err := os.WriteFile(name, old, 0666); err != nil { + t.Fatal(err) + } + } + } + + // Every main package depends on the "runtime". + tg.tempFile("d1/src/p1/p1.go", `package main; func main(){}`) + tg.setenv("GOPATH", tg.path("d1")) + // Pass -i flag to rebuild everything outdated. + tg.run("install", "p1") + tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, before any changes") + + // Changing mtime of runtime/internal/sys/sys.go + // should have no effect: only the content matters. + // In fact this should be true even outside a release branch. + sys := tg.path("goroot/src/runtime/internal/sys/sys.go") + tg.sleep() + restore := addVar(sys, 0) + restore() + tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, after updating mtime of runtime/internal/sys/sys.go") + + // But changing content of any file should have an effect. + // Previously zversion.go was the only one that mattered; + // now they all matter, so keep using sys.go. + restore = addVar(sys, 1) + defer restore() + tg.wantStale("p1", "stale dependency: runtime/internal", "./testgo list claims p1 is NOT stale, incorrectly, after changing sys.go") + restore() + tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, after changing back to old release") + addVar(sys, 2) + tg.wantStale("p1", "stale dependency: runtime", "./testgo list claims p1 is NOT stale, incorrectly, after changing sys.go again") + tg.run("install", "p1") + tg.wantNotStale("p1", "", "./testgo list claims p1 is stale after building with new release") + + // Restore to "old" release. + restore() + tg.wantStale("p1", "stale dependency: runtime/internal", "./testgo list claims p1 is NOT stale, incorrectly, after restoring sys.go") + tg.run("install", "p1") + tg.wantNotStale("p1", "", "./testgo list claims p1 is stale after building with old release") +} + +func TestPackageMainTestCompilerFlags(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.makeTempdir() + tg.setenv("GOPATH", tg.path(".")) + tg.tempFile("src/p1/p1.go", "package main\n") + tg.tempFile("src/p1/p1_test.go", "package main\nimport \"testing\"\nfunc Test(t *testing.T){}\n") + tg.run("test", "-c", "-n", "p1") + tg.grepBothNot(`([\\/]compile|gccgo).* (-p main|-fgo-pkgpath=main).*p1\.go`, "should not have run compile -p main p1.go") + tg.grepStderr(`([\\/]compile|gccgo).* (-p p1|-fgo-pkgpath=p1).*p1\.go`, "should have run compile -p p1 p1.go") +} + +// Issue 4104. +func TestGoTestWithPackageListedMultipleTimes(t *testing.T) { + tooSlow(t, "links and runs a test") + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.run("test", "errors", "errors", "errors", "errors", "errors") + if strings.Contains(strings.TrimSpace(tg.getStdout()), "\n") { + t.Error("go test errors errors errors errors errors tested the same package multiple times") + } +} + +func TestGoListHasAConsistentOrder(t *testing.T) { + tooSlow(t, "walks all of GOROOT/src twice") + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.run("list", "std") + first := tg.getStdout() + tg.run("list", "std") + if first != tg.getStdout() { + t.Error("go list std ordering is inconsistent") + } +} + +func TestGoListStdDoesNotIncludeCommands(t *testing.T) { + tooSlow(t, "walks all of GOROOT/src") + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.run("list", "std") + tg.grepStdoutNot("cmd/", "go list std shows commands") +} + +func TestGoListCmdOnlyShowsCommands(t *testing.T) { + skipIfGccgo(t, "gccgo does not have GOROOT") + tooSlow(t, "walks all of GOROOT/src/cmd") + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.run("list", "cmd") + out := strings.TrimSpace(tg.getStdout()) + for _, line := range strings.Split(out, "\n") { + if !strings.Contains(line, "cmd/") { + t.Error("go list cmd shows non-commands") + break + } + } +} + +func TestGoListDeps(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.tempDir("src/p1/p2/p3/p4") + tg.setenv("GOPATH", tg.path(".")) + tg.tempFile("src/p1/p.go", "package p1\nimport _ \"p1/p2\"\n") + tg.tempFile("src/p1/p2/p.go", "package p2\nimport _ \"p1/p2/p3\"\n") + tg.tempFile("src/p1/p2/p3/p.go", "package p3\nimport _ \"p1/p2/p3/p4\"\n") + tg.tempFile("src/p1/p2/p3/p4/p.go", "package p4\n") + tg.run("list", "-f", "{{.Deps}}", "p1") + tg.grepStdout("p1/p2/p3/p4", "Deps(p1) does not mention p4") + + tg.run("list", "-deps", "p1") + tg.grepStdout("p1/p2/p3/p4", "-deps p1 does not mention p4") + + if runtime.Compiler != "gccgo" { + // Check the list is in dependency order. + tg.run("list", "-deps", "math") + want := "internal/cpu\nunsafe\nmath/bits\nmath\n" + out := tg.stdout.String() + if !strings.Contains(out, "internal/cpu") { + // Some systems don't use internal/cpu. + want = "unsafe\nmath/bits\nmath\n" + } + if tg.stdout.String() != want { + t.Fatalf("list -deps math: wrong order\nhave %q\nwant %q", tg.stdout.String(), want) + } + } +} + +func TestGoListTest(t *testing.T) { + skipIfGccgo(t, "gccgo does not have standard packages") + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.makeTempdir() + tg.setenv("GOCACHE", tg.tempdir) + + tg.run("list", "-test", "-deps", "sort") + tg.grepStdout(`^sort.test$`, "missing test main") + tg.grepStdout(`^sort$`, "missing real sort") + tg.grepStdout(`^sort \[sort.test\]$`, "missing test copy of sort") + tg.grepStdout(`^testing \[sort.test\]$`, "missing test copy of testing") + tg.grepStdoutNot(`^testing$`, "unexpected real copy of testing") + + tg.run("list", "-test", "sort") + tg.grepStdout(`^sort.test$`, "missing test main") + tg.grepStdout(`^sort$`, "missing real sort") + tg.grepStdout(`^sort \[sort.test\]$`, "unexpected test copy of sort") + tg.grepStdoutNot(`^testing \[sort.test\]$`, "unexpected test copy of testing") + tg.grepStdoutNot(`^testing$`, "unexpected real copy of testing") + + tg.run("list", "-test", "cmd/buildid", "cmd/doc") + tg.grepStdout(`^cmd/buildid$`, "missing cmd/buildid") + tg.grepStdout(`^cmd/doc$`, "missing cmd/doc") + tg.grepStdout(`^cmd/doc\.test$`, "missing cmd/doc test") + tg.grepStdoutNot(`^cmd/buildid\.test$`, "unexpected cmd/buildid test") + tg.grepStdoutNot(`^testing`, "unexpected testing") + + tg.run("list", "-test", "runtime/cgo") + tg.grepStdout(`^runtime/cgo$`, "missing runtime/cgo") + + tg.run("list", "-deps", "-f", "{{if .DepOnly}}{{.ImportPath}}{{end}}", "sort") + tg.grepStdout(`^internal/reflectlite$`, "missing internal/reflectlite") + tg.grepStdoutNot(`^sort`, "unexpected sort") +} + +func TestGoListCompiledCgo(t *testing.T) { + tooSlow(t, "compiles cgo files") + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.makeTempdir() + tg.setenv("GOCACHE", tg.tempdir) + + tg.run("list", "-f", `{{join .CgoFiles "\n"}}`, "net") + if tg.stdout.String() == "" { + t.Skip("net does not use cgo") + } + if strings.Contains(tg.stdout.String(), tg.tempdir) { + t.Fatalf(".CgoFiles unexpectedly mentioned cache %s", tg.tempdir) + } + tg.run("list", "-compiled", "-f", `{{.Dir}}{{"\n"}}{{join .CompiledGoFiles "\n"}}`, "net") + if !strings.Contains(tg.stdout.String(), tg.tempdir) { + t.Fatalf(".CompiledGoFiles with -compiled did not mention cache %s", tg.tempdir) + } + dir := "" + for _, file := range strings.Split(tg.stdout.String(), "\n") { + if file == "" { + continue + } + if dir == "" { + dir = file + continue + } + if !strings.Contains(file, "/") && !strings.Contains(file, `\`) { + file = filepath.Join(dir, file) + } + if _, err := os.Stat(file); err != nil { + t.Fatalf("cannot find .CompiledGoFiles result %s: %v", file, err) + } + } +} + +func TestGoListExport(t *testing.T) { + skipIfGccgo(t, "gccgo does not have standard packages") + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.makeTempdir() + tg.setenv("GOCACHE", tg.tempdir) + + tg.run("list", "-f", "{{.Export}}", "strings") + if tg.stdout.String() != "" { + t.Fatalf(".Export without -export unexpectedly set") + } + tg.run("list", "-export", "-f", "{{.Export}}", "strings") + file := strings.TrimSpace(tg.stdout.String()) + if file == "" { + t.Fatalf(".Export with -export was empty") + } + if _, err := os.Stat(file); err != nil { + t.Fatalf("cannot find .Export result %s: %v", file, err) + } + + tg.run("list", "-export", "-f", "{{.BuildID}}", "strings") + buildID := strings.TrimSpace(tg.stdout.String()) + if buildID == "" { + t.Fatalf(".BuildID with -export was empty") + } + + tg.run("tool", "buildid", file) + toolBuildID := strings.TrimSpace(tg.stdout.String()) + if buildID != toolBuildID { + t.Fatalf(".BuildID with -export %q disagrees with 'go tool buildid' %q", buildID, toolBuildID) + } +} + +// Issue 4096. Validate the output of unsuccessful go install foo/quxx. +func TestUnsuccessfulGoInstallShouldMentionMissingPackage(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.runFail("install", "foo/quxx") + if tg.grepCountBoth(`cannot find package "foo/quxx" in any of`) != 1 { + t.Error(`go install foo/quxx expected error: .*cannot find package "foo/quxx" in any of`) + } +} + +func TestGOROOTSearchFailureReporting(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.runFail("install", "foo/quxx") + if tg.grepCountBoth(regexp.QuoteMeta(filepath.Join("foo", "quxx"))+` \(from \$GOROOT\)$`) != 1 { + t.Error(`go install foo/quxx expected error: .*foo/quxx (from $GOROOT)`) + } +} + +func TestMultipleGOPATHEntriesReportedSeparately(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + sep := string(filepath.ListSeparator) + tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata", "a")+sep+filepath.Join(tg.pwd(), "testdata", "b")) + tg.runFail("install", "foo/quxx") + if tg.grepCountBoth(`testdata[/\\].[/\\]src[/\\]foo[/\\]quxx`) != 2 { + t.Error(`go install foo/quxx expected error: .*testdata/a/src/foo/quxx (from $GOPATH)\n.*testdata/b/src/foo/quxx`) + } +} + +// Test (from $GOPATH) annotation is reported for the first GOPATH entry, +func TestMentionGOPATHInFirstGOPATHEntry(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + sep := string(filepath.ListSeparator) + tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata", "a")+sep+filepath.Join(tg.pwd(), "testdata", "b")) + tg.runFail("install", "foo/quxx") + if tg.grepCountBoth(regexp.QuoteMeta(filepath.Join("testdata", "a", "src", "foo", "quxx"))+` \(from \$GOPATH\)$`) != 1 { + t.Error(`go install foo/quxx expected error: .*testdata/a/src/foo/quxx (from $GOPATH)`) + } +} + +// but not on the second. +func TestMentionGOPATHNotOnSecondEntry(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + sep := string(filepath.ListSeparator) + tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata", "a")+sep+filepath.Join(tg.pwd(), "testdata", "b")) + tg.runFail("install", "foo/quxx") + if tg.grepCountBoth(regexp.QuoteMeta(filepath.Join("testdata", "b", "src", "foo", "quxx"))+`$`) != 1 { + t.Error(`go install foo/quxx expected error: .*testdata/b/src/foo/quxx`) + } +} + +func homeEnvName() string { + switch runtime.GOOS { + case "windows": + return "USERPROFILE" + case "plan9": + return "home" + default: + return "HOME" + } +} + +func tempEnvName() string { + switch runtime.GOOS { + case "windows": + return "TMP" + case "plan9": + return "TMPDIR" // actually plan 9 doesn't have one at all but this is fine + default: + return "TMPDIR" + } +} + +func pathEnvName() string { + switch runtime.GOOS { + case "plan9": + return "path" + default: + return "PATH" + } +} + +func TestDefaultGOPATH(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.tempDir("home/go") + tg.setenv(homeEnvName(), tg.path("home")) + + tg.run("env", "GOPATH") + tg.grepStdout(regexp.QuoteMeta(tg.path("home/go")), "want GOPATH=$HOME/go") + + tg.setenv("GOROOT", tg.path("home/go")) + tg.run("env", "GOPATH") + tg.grepStdoutNot(".", "want unset GOPATH because GOROOT=$HOME/go") + + tg.setenv("GOROOT", tg.path("home/go")+"/") + tg.run("env", "GOPATH") + tg.grepStdoutNot(".", "want unset GOPATH because GOROOT=$HOME/go/") +} + +func TestDefaultGOPATHPrintedSearchList(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.setenv("GOPATH", "") + tg.tempDir("home") + tg.setenv(homeEnvName(), tg.path("home")) + + tg.runFail("install", "github.com/golang/example/hello") + tg.grepStderr(regexp.QuoteMeta(tg.path("home/go/src/github.com/golang/example/hello"))+`.*from \$GOPATH`, "expected default GOPATH") +} + +func TestLdflagsArgumentsWithSpacesIssue3941(t *testing.T) { + skipIfGccgo(t, "gccgo does not support -ldflags -X") + tooSlow(t, "compiles and links a binary") + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.tempFile("main.go", `package main + var extern string + func main() { + println(extern) + }`) + tg.run("run", "-ldflags", `-X "main.extern=hello world"`, tg.path("main.go")) + tg.grepStderr("^hello world", `ldflags -X "main.extern=hello world"' failed`) +} + +func TestLdFlagsLongArgumentsIssue42295(t *testing.T) { + // Test the extremely long command line arguments that contain '\n' characters + // get encoded and passed correctly. + skipIfGccgo(t, "gccgo does not support -ldflags -X") + tooSlow(t, "compiles and links a binary") + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.tempFile("main.go", `package main + var extern string + func main() { + print(extern) + }`) + testStr := "test test test test test \n\\ " + var buf strings.Builder + for buf.Len() < sys.ExecArgLengthLimit+1 { + buf.WriteString(testStr) + } + tg.run("run", "-ldflags", fmt.Sprintf(`-X "main.extern=%s"`, buf.String()), tg.path("main.go")) + if tg.stderr.String() != buf.String() { + t.Errorf("strings differ") + } +} + +func TestGoTestDashCDashOControlsBinaryLocation(t *testing.T) { + skipIfGccgo(t, "gccgo has no standard packages") + tooSlow(t, "compiles and links a test binary") + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.makeTempdir() + tg.run("test", "-c", "-o", tg.path("myerrors.test"+exeSuffix), "errors") + tg.wantExecutable(tg.path("myerrors.test"+exeSuffix), "go test -c -o myerrors.test did not create myerrors.test") +} + +func TestGoTestDashOWritesBinary(t *testing.T) { + skipIfGccgo(t, "gccgo has no standard packages") + tooSlow(t, "compiles and runs a test binary") + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.makeTempdir() + tg.run("test", "-o", tg.path("myerrors.test"+exeSuffix), "errors") + tg.wantExecutable(tg.path("myerrors.test"+exeSuffix), "go test -o myerrors.test did not create myerrors.test") +} + +// Issue 4515. +func TestInstallWithTags(t *testing.T) { + tooSlow(t, "compiles and links binaries") + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.tempDir("bin") + tg.tempFile("src/example/a/main.go", `package main + func main() {}`) + tg.tempFile("src/example/b/main.go", `// +build mytag + + package main + func main() {}`) + tg.setenv("GOPATH", tg.path(".")) + tg.run("install", "-tags", "mytag", "example/a", "example/b") + tg.wantExecutable(tg.path("bin/a"+exeSuffix), "go install example/a example/b did not install binaries") + tg.wantExecutable(tg.path("bin/b"+exeSuffix), "go install example/a example/b did not install binaries") + tg.must(os.Remove(tg.path("bin/a" + exeSuffix))) + tg.must(os.Remove(tg.path("bin/b" + exeSuffix))) + tg.run("install", "-tags", "mytag", "example/...") + tg.wantExecutable(tg.path("bin/a"+exeSuffix), "go install example/... did not install binaries") + tg.wantExecutable(tg.path("bin/b"+exeSuffix), "go install example/... did not install binaries") + tg.run("list", "-tags", "mytag", "example/b...") + if strings.TrimSpace(tg.getStdout()) != "example/b" { + t.Error("go list example/b did not find example/b") + } +} + +// Issue 17451, 17662. +func TestSymlinkWarning(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.makeTempdir() + tg.setenv("GOPATH", tg.path(".")) + + tg.tempDir("src/example/xx") + tg.tempDir("yy/zz") + tg.tempFile("yy/zz/zz.go", "package zz\n") + if err := os.Symlink(tg.path("yy"), tg.path("src/example/xx/yy")); err != nil { + t.Skipf("symlink failed: %v", err) + } + tg.run("list", "example/xx/z...") + tg.grepStdoutNot(".", "list should not have matched anything") + tg.grepStderr("matched no packages", "list should have reported that pattern matched no packages") + tg.grepStderrNot("symlink", "list should not have reported symlink") + + tg.run("list", "example/xx/...") + tg.grepStdoutNot(".", "list should not have matched anything") + tg.grepStderr("matched no packages", "list should have reported that pattern matched no packages") + tg.grepStderr("ignoring symlink", "list should have reported symlink") +} + +func TestCgoShowsFullPathNames(t *testing.T) { + testenv.MustHaveCGO(t) + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.tempFile("src/x/y/dirname/foo.go", ` + package foo + import "C" + func f() {`) + tg.setenv("GOPATH", tg.path(".")) + tg.runFail("build", "x/y/dirname") + tg.grepBoth("x/y/dirname", "error did not use full path") +} + +func TestCgoHandlesWlORIGIN(t *testing.T) { + tooSlow(t, "compiles cgo files") + testenv.MustHaveCGO(t) + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.tempFile("src/origin/origin.go", `package origin + // #cgo !darwin,!windows LDFLAGS: -Wl,-rpath,$ORIGIN + // void f(void) {} + import "C" + func f() { C.f() }`) + tg.setenv("GOPATH", tg.path(".")) + tg.run("build", "origin") +} + +func TestCgoPkgConfig(t *testing.T) { + tooSlow(t, "compiles cgo files") + testenv.MustHaveCGO(t) + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + + tg.run("env", "PKG_CONFIG") + pkgConfig := strings.TrimSpace(tg.getStdout()) + testenv.MustHaveExecPath(t, pkgConfig) + if out, err := testenv.Command(t, pkgConfig, "--atleast-pkgconfig-version", "0.24").CombinedOutput(); err != nil { + t.Skipf("%s --atleast-pkgconfig-version 0.24: %v\n%s", pkgConfig, err, out) + } + + // OpenBSD's pkg-config is strict about whitespace and only + // supports backslash-escaped whitespace. It does not support + // quotes, which the normal freedesktop.org pkg-config does + // support. See https://man.openbsd.org/pkg-config.1 + tg.tempFile("foo.pc", ` +Name: foo +Description: The foo library +Version: 1.0.0 +Cflags: -Dhello=10 -Dworld=+32 -DDEFINED_FROM_PKG_CONFIG=hello\ world +`) + tg.tempFile("foo.go", `package main + +/* +#cgo pkg-config: foo +int value() { + return DEFINED_FROM_PKG_CONFIG; +} +*/ +import "C" +import "os" + +func main() { + if C.value() != 42 { + println("value() =", C.value(), "wanted 42") + os.Exit(1) + } +} +`) + tg.setenv("PKG_CONFIG_PATH", tg.path(".")) + tg.run("run", tg.path("foo.go")) + + // test for ldflags + tg.tempFile("bar.pc", ` +Name: bar +Description: The bar library +Version: 1.0.0 +Libs: -Wl,-rpath=/path\ with\ spaces/bin +`) + tg.tempFile("bar.go", `package main +/* +#cgo pkg-config: bar +*/ +import "C" +func main() {} +`) + tg.run("run", tg.path("bar.go")) +} + +func TestListTemplateContextFunction(t *testing.T) { + t.Parallel() + for _, tt := range []struct { + v string + want string + }{ + {"GOARCH", runtime.GOARCH}, + {"GOOS", runtime.GOOS}, + {"GOROOT", testGOROOT}, + {"GOPATH", os.Getenv("GOPATH")}, + {"CgoEnabled", ""}, + {"UseAllFiles", ""}, + {"Compiler", ""}, + {"BuildTags", ""}, + {"ReleaseTags", ""}, + {"InstallSuffix", ""}, + } { + tt := tt + t.Run(tt.v, func(t *testing.T) { + tg := testgo(t) + tg.parallel() + defer tg.cleanup() + tmpl := "{{context." + tt.v + "}}" + tg.run("list", "-f", tmpl) + if tt.want == "" { + return + } + if got := strings.TrimSpace(tg.getStdout()); got != tt.want { + t.Errorf("go list -f %q: got %q; want %q", tmpl, got, tt.want) + } + }) + } +} + +// Test that you cannot use a local import in a package +// accessed by a non-local import (found in a GOPATH/GOROOT). +// See golang.org/issue/17475. +func TestImportLocal(t *testing.T) { + tooSlow(t, "builds a lot of sequential packages") + + tg := testgo(t) + tg.parallel() + defer tg.cleanup() + + tg.tempFile("src/dir/x/x.go", `package x + var X int + `) + tg.setenv("GOPATH", tg.path(".")) + tg.run("build", "dir/x") + + // Ordinary import should work. + tg.tempFile("src/dir/p0/p.go", `package p0 + import "dir/x" + var _ = x.X + `) + tg.run("build", "dir/p0") + + // Relative import should not. + tg.tempFile("src/dir/p1/p.go", `package p1 + import "../x" + var _ = x.X + `) + tg.runFail("build", "dir/p1") + tg.grepStderr("local import.*in non-local package", "did not diagnose local import") + + // ... even in a test. + tg.tempFile("src/dir/p2/p.go", `package p2 + `) + tg.tempFile("src/dir/p2/p_test.go", `package p2 + import "../x" + import "testing" + var _ = x.X + func TestFoo(t *testing.T) {} + `) + tg.run("build", "dir/p2") + tg.runFail("test", "dir/p2") + tg.grepStderr("local import.*in non-local package", "did not diagnose local import") + + // ... even in an xtest. + tg.tempFile("src/dir/p2/p_test.go", `package p2_test + import "../x" + import "testing" + var _ = x.X + func TestFoo(t *testing.T) {} + `) + tg.run("build", "dir/p2") + tg.runFail("test", "dir/p2") + tg.grepStderr("local import.*in non-local package", "did not diagnose local import") + + // Relative import starting with ./ should not work either. + tg.tempFile("src/dir/d.go", `package dir + import "./x" + var _ = x.X + `) + tg.runFail("build", "dir") + tg.grepStderr("local import.*in non-local package", "did not diagnose local import") + + // ... even in a test. + tg.tempFile("src/dir/d.go", `package dir + `) + tg.tempFile("src/dir/d_test.go", `package dir + import "./x" + import "testing" + var _ = x.X + func TestFoo(t *testing.T) {} + `) + tg.run("build", "dir") + tg.runFail("test", "dir") + tg.grepStderr("local import.*in non-local package", "did not diagnose local import") + + // ... even in an xtest. + tg.tempFile("src/dir/d_test.go", `package dir_test + import "./x" + import "testing" + var _ = x.X + func TestFoo(t *testing.T) {} + `) + tg.run("build", "dir") + tg.runFail("test", "dir") + tg.grepStderr("local import.*in non-local package", "did not diagnose local import") + + // Relative import plain ".." should not work. + tg.tempFile("src/dir/x/y/y.go", `package dir + import ".." + var _ = x.X + `) + tg.runFail("build", "dir/x/y") + tg.grepStderr("local import.*in non-local package", "did not diagnose local import") + + // ... even in a test. + tg.tempFile("src/dir/x/y/y.go", `package y + `) + tg.tempFile("src/dir/x/y/y_test.go", `package y + import ".." + import "testing" + var _ = x.X + func TestFoo(t *testing.T) {} + `) + tg.run("build", "dir/x/y") + tg.runFail("test", "dir/x/y") + tg.grepStderr("local import.*in non-local package", "did not diagnose local import") + + // ... even in an x test. + tg.tempFile("src/dir/x/y/y_test.go", `package y_test + import ".." + import "testing" + var _ = x.X + func TestFoo(t *testing.T) {} + `) + tg.run("build", "dir/x/y") + tg.runFail("test", "dir/x/y") + tg.grepStderr("local import.*in non-local package", "did not diagnose local import") + + // Relative import "." should not work. + tg.tempFile("src/dir/x/xx.go", `package x + import "." + var _ = x.X + `) + tg.runFail("build", "dir/x") + tg.grepStderr("cannot import current directory", "did not diagnose import current directory") + + // ... even in a test. + tg.tempFile("src/dir/x/xx.go", `package x + `) + tg.tempFile("src/dir/x/xx_test.go", `package x + import "." + import "testing" + var _ = x.X + func TestFoo(t *testing.T) {} + `) + tg.run("build", "dir/x") + tg.runFail("test", "dir/x") + tg.grepStderr("cannot import current directory", "did not diagnose import current directory") + + // ... even in an xtest. + tg.tempFile("src/dir/x/xx.go", `package x + `) + tg.tempFile("src/dir/x/xx_test.go", `package x_test + import "." + import "testing" + var _ = x.X + func TestFoo(t *testing.T) {} + `) + tg.run("build", "dir/x") + tg.runFail("test", "dir/x") + tg.grepStderr("cannot import current directory", "did not diagnose import current directory") +} + +func TestGoInstallPkgdir(t *testing.T) { + skipIfGccgo(t, "gccgo has no standard packages") + tooSlow(t, "builds a package with cgo dependencies") + // Only the stdlib packages that use cgo have install + // targets, (we're using net below) so cgo is required + // for the install. + testenv.MustHaveCGO(t) + + tg := testgo(t) + tg.parallel() + tg.setenv("GODEBUG", "installgoroot=all") + defer tg.cleanup() + tg.makeTempdir() + pkg := tg.path(".") + tg.run("install", "-pkgdir", pkg, "net") + tg.mustExist(filepath.Join(pkg, "net.a")) + tg.mustNotExist(filepath.Join(pkg, "runtime/cgo.a")) +} + +// For issue 14337. +func TestParallelTest(t *testing.T) { + tooSlow(t, "links and runs test binaries") + + tg := testgo(t) + tg.parallel() + defer tg.cleanup() + tg.makeTempdir() + const testSrc = `package package_test + import ( + "testing" + ) + func TestTest(t *testing.T) { + }` + tg.tempFile("src/p1/p1_test.go", strings.Replace(testSrc, "package_test", "p1_test", 1)) + tg.tempFile("src/p2/p2_test.go", strings.Replace(testSrc, "package_test", "p2_test", 1)) + tg.tempFile("src/p3/p3_test.go", strings.Replace(testSrc, "package_test", "p3_test", 1)) + tg.tempFile("src/p4/p4_test.go", strings.Replace(testSrc, "package_test", "p4_test", 1)) + tg.setenv("GOPATH", tg.path(".")) + tg.run("test", "-p=4", "p1", "p2", "p3", "p4") +} + +func TestBinaryOnlyPackages(t *testing.T) { + tooSlow(t, "compiles several packages sequentially") + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.makeTempdir() + tg.setenv("GOPATH", tg.path(".")) + + tg.tempFile("src/p1/p1.go", `//go:binary-only-package + + package p1 + `) + tg.wantStale("p1", "binary-only packages are no longer supported", "p1 is binary-only, and this message should always be printed") + tg.runFail("install", "p1") + tg.grepStderr("binary-only packages are no longer supported", "did not report attempt to compile binary-only package") + + tg.tempFile("src/p1/p1.go", ` + package p1 + import "fmt" + func F(b bool) { fmt.Printf("hello from p1\n"); if b { F(false) } } + `) + tg.run("install", "p1") + os.Remove(tg.path("src/p1/p1.go")) + tg.mustNotExist(tg.path("src/p1/p1.go")) + + tg.tempFile("src/p2/p2.go", `//go:binary-only-packages-are-not-great + + package p2 + import "p1" + func F() { p1.F(true) } + `) + tg.runFail("install", "p2") + tg.grepStderr("no Go files", "did not complain about missing sources") + + tg.tempFile("src/p1/missing.go", `//go:binary-only-package + + package p1 + import _ "fmt" + func G() + `) + tg.wantStale("p1", "binary-only package", "should NOT want to rebuild p1 (first)") + tg.runFail("install", "p2") + tg.grepStderr("p1: binary-only packages are no longer supported", "did not report error for binary-only p1") + + tg.run("list", "-deps", "-f", "{{.ImportPath}}: {{.BinaryOnly}}", "p2") + tg.grepStdout("p1: true", "p1 not listed as BinaryOnly") + tg.grepStdout("p2: false", "p2 listed as BinaryOnly") +} + +// Issue 16050 and 21884. +func TestLinkSysoFiles(t *testing.T) { + if runtime.GOOS != "linux" || runtime.GOARCH != "amd64" { + t.Skip("not linux/amd64") + } + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.tempDir("src/syso") + tg.tempFile("src/syso/a.syso", ``) + tg.tempFile("src/syso/b.go", `package syso`) + tg.setenv("GOPATH", tg.path(".")) + + // We should see the .syso file regardless of the setting of + // CGO_ENABLED. + + tg.setenv("CGO_ENABLED", "1") + tg.run("list", "-f", "{{.SysoFiles}}", "syso") + tg.grepStdout("a.syso", "missing syso file with CGO_ENABLED=1") + + tg.setenv("CGO_ENABLED", "0") + tg.run("list", "-f", "{{.SysoFiles}}", "syso") + tg.grepStdout("a.syso", "missing syso file with CGO_ENABLED=0") + + tg.setenv("CGO_ENABLED", "1") + tg.run("list", "-msan", "-f", "{{.SysoFiles}}", "syso") + tg.grepStdoutNot("a.syso", "unexpected syso file with -msan") +} + +// Issue 16120. +func TestGenerateUsesBuildContext(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("this test won't run under Windows") + } + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.tempDir("src/gen") + tg.tempFile("src/gen/gen.go", "package gen\n//go:generate echo $GOOS $GOARCH\n") + tg.setenv("GOPATH", tg.path(".")) + + tg.setenv("GOOS", "linux") + tg.setenv("GOARCH", "amd64") + tg.run("generate", "gen") + tg.grepStdout("linux amd64", "unexpected GOOS/GOARCH combination") + + tg.setenv("GOOS", "darwin") + tg.setenv("GOARCH", "arm64") + tg.run("generate", "gen") + tg.grepStdout("darwin arm64", "unexpected GOOS/GOARCH combination") +} + +func TestGoEnv(t *testing.T) { + tg := testgo(t) + tg.parallel() + defer tg.cleanup() + tg.setenv("GOOS", "freebsd") // to avoid invalid pair errors + tg.setenv("GOARCH", "arm") + tg.run("env", "GOARCH") + tg.grepStdout("^arm$", "GOARCH not honored") + + tg.run("env", "GCCGO") + tg.grepStdout(".", "GCCGO unexpectedly empty") + + tg.run("env", "CGO_CFLAGS") + tg.grepStdout(".", "default CGO_CFLAGS unexpectedly empty") + + tg.setenv("CGO_CFLAGS", "-foobar") + tg.run("env", "CGO_CFLAGS") + tg.grepStdout("^-foobar$", "CGO_CFLAGS not honored") + + tg.setenv("CC", "gcc -fmust -fgo -ffaster") + tg.run("env", "CC") + tg.grepStdout("gcc", "CC not found") + tg.run("env", "GOGCCFLAGS") + tg.grepStdout("-ffaster", "CC arguments not found") + + tg.run("env", "GOVERSION") + envVersion := strings.TrimSpace(tg.stdout.String()) + + tg.run("version") + cmdVersion := strings.TrimSpace(tg.stdout.String()) + + // If 'go version' is "go version /", then + // 'go env GOVERSION' is just "". + if cmdVersion == envVersion || !strings.Contains(cmdVersion, envVersion) { + t.Fatalf("'go env GOVERSION' %q should be a shorter substring of 'go version' %q", envVersion, cmdVersion) + } +} + +const ( + noMatchesPattern = `(?m)^ok.*\[no tests to run\]` + okPattern = `(?m)^ok` +) + +// Issue 18044. +func TestLdBindNow(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.setenv("LD_BIND_NOW", "1") + tg.run("help") +} + +// Issue 18225. +// This is really a cmd/asm issue but this is a convenient place to test it. +func TestConcurrentAsm(t *testing.T) { + skipIfGccgo(t, "gccgo does not use cmd/asm") + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + asm := `DATA ·constants<>+0x0(SB)/8,$0 +GLOBL ·constants<>(SB),8,$8 +` + tg.tempFile("go/src/p/a.s", asm) + tg.tempFile("go/src/p/b.s", asm) + tg.tempFile("go/src/p/p.go", `package p`) + tg.setenv("GOPATH", tg.path("go")) + tg.run("build", "p") +} + +// Issue 18975. +func TestFFLAGS(t *testing.T) { + testenv.MustHaveCGO(t) + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + + tg.tempFile("p/src/p/main.go", `package main + // #cgo FFLAGS: -no-such-fortran-flag + import "C" + func main() {} + `) + tg.tempFile("p/src/p/a.f", `! comment`) + tg.setenv("GOPATH", tg.path("p")) + + // This should normally fail because we are passing an unknown flag, + // but issue #19080 points to Fortran compilers that succeed anyhow. + // To work either way we call doRun directly rather than run or runFail. + tg.doRun([]string{"build", "-x", "p"}) + + tg.grepStderr("no-such-fortran-flag", `missing expected "-no-such-fortran-flag"`) +} + +// Issue 19198. +// This is really a cmd/link issue but this is a convenient place to test it. +func TestDuplicateGlobalAsmSymbols(t *testing.T) { + skipIfGccgo(t, "gccgo does not use cmd/asm") + tooSlow(t, "links a binary with cgo dependencies") + if runtime.GOARCH != "386" && runtime.GOARCH != "amd64" { + t.Skipf("skipping test on %s", runtime.GOARCH) + } + testenv.MustHaveCGO(t) + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + + asm := ` +#include "textflag.h" + +DATA sym<>+0x0(SB)/8,$0 +GLOBL sym<>(SB),(NOPTR+RODATA),$8 + +TEXT ·Data(SB),NOSPLIT,$0 + MOVB sym<>(SB), AX + MOVB AX, ret+0(FP) + RET +` + tg.tempFile("go/src/a/a.s", asm) + tg.tempFile("go/src/a/a.go", `package a; func Data() uint8`) + tg.tempFile("go/src/b/b.s", asm) + tg.tempFile("go/src/b/b.go", `package b; func Data() uint8`) + tg.tempFile("go/src/p/p.go", ` +package main +import "a" +import "b" +import "C" +func main() { + _ = a.Data() + b.Data() +} +`) + tg.setenv("GOPATH", tg.path("go")) + exe := tg.path("p.exe") + tg.creatingTemp(exe) + tg.run("build", "-o", exe, "p") +} + +func copyFile(src, dst string, perm fs.FileMode) error { + sf, err := os.Open(src) + if err != nil { + return err + } + defer sf.Close() + + df, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + + _, err = io.Copy(df, sf) + err2 := df.Close() + if err != nil { + return err + } + return err2 +} + +func TestNeedVersion(t *testing.T) { + skipIfGccgo(t, "gccgo does not use cmd/compile") + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.tempFile("goversion.go", `package main; func main() {}`) + path := tg.path("goversion.go") + tg.setenv("TESTGO_TOOLCHAIN_VERSION", "go1.testgo") + tg.runFail("run", path) + tg.grepStderr("compile", "does not match go tool version") +} + +func TestBuildmodePIE(t *testing.T) { + tooSlow(t, "links binaries") + + if !platform.BuildModeSupported(runtime.Compiler, "pie", runtime.GOOS, runtime.GOARCH) { + t.Skipf("skipping test because buildmode=pie is not supported on %s/%s", runtime.GOOS, runtime.GOARCH) + } + // Skip on alpine until https://go.dev/issues/54354 resolved. + if strings.HasSuffix(testenv.Builder(), "-alpine") { + t.Skip("skipping PIE tests on alpine; see https://go.dev/issues/54354") + } + t.Run("non-cgo", func(t *testing.T) { + testBuildmodePIE(t, false, true) + }) + t.Run("cgo", func(t *testing.T) { + testenv.MustHaveCGO(t) + testBuildmodePIE(t, true, true) + }) +} + +func TestWindowsDefaultBuildmodIsPIE(t *testing.T) { + if runtime.GOOS != "windows" { + t.Skip("skipping windows only test") + } + tooSlow(t, "links binaries") + + t.Run("non-cgo", func(t *testing.T) { + testBuildmodePIE(t, false, false) + }) + t.Run("cgo", func(t *testing.T) { + testenv.MustHaveCGO(t) + testBuildmodePIE(t, true, false) + }) +} + +func testBuildmodePIE(t *testing.T, useCgo, setBuildmodeToPIE bool) { + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + + var s string + if useCgo { + s = `import "C";` + } + tg.tempFile("main.go", fmt.Sprintf(`package main;%s func main() { print("hello") }`, s)) + src := tg.path("main.go") + obj := tg.path("main.exe") + args := []string{"build"} + if setBuildmodeToPIE { + args = append(args, "-buildmode=pie") + } + args = append(args, "-o", obj, src) + tg.run(args...) + + switch runtime.GOOS { + case "linux", "android", "freebsd": + f, err := elf.Open(obj) + if err != nil { + t.Fatal(err) + } + defer f.Close() + if f.Type != elf.ET_DYN { + t.Errorf("PIE type must be ET_DYN, but %s", f.Type) + } + case "darwin", "ios": + f, err := macho.Open(obj) + if err != nil { + t.Fatal(err) + } + defer f.Close() + if f.Flags&macho.FlagDyldLink == 0 { + t.Error("PIE must have DyldLink flag, but not") + } + if f.Flags&macho.FlagPIE == 0 { + t.Error("PIE must have PIE flag, but not") + } + case "windows": + f, err := pe.Open(obj) + if err != nil { + t.Fatal(err) + } + defer f.Close() + if f.Section(".reloc") == nil { + t.Error(".reloc section is not present") + } + if (f.FileHeader.Characteristics & pe.IMAGE_FILE_RELOCS_STRIPPED) != 0 { + t.Error("IMAGE_FILE_RELOCS_STRIPPED flag is set") + } + var dc uint16 + switch oh := f.OptionalHeader.(type) { + case *pe.OptionalHeader32: + dc = oh.DllCharacteristics + case *pe.OptionalHeader64: + dc = oh.DllCharacteristics + if (dc & pe.IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA) == 0 { + t.Error("IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA flag is not set") + } + default: + t.Fatalf("unexpected optional header type of %T", f.OptionalHeader) + } + if (dc & pe.IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE) == 0 { + t.Error("IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE flag is not set") + } + if useCgo { + // Test that only one symbol is exported (#40795). + // PIE binaries don´t require .edata section but unfortunately + // binutils doesn´t generate a .reloc section unless there is + // at least one symbol exported. + // See https://sourceware.org/bugzilla/show_bug.cgi?id=19011 + section := f.Section(".edata") + if section == nil { + t.Skip(".edata section is not present") + } + // TODO: deduplicate this struct from cmd/link/internal/ld/pe.go + type IMAGE_EXPORT_DIRECTORY struct { + _ [2]uint32 + _ [2]uint16 + _ [2]uint32 + NumberOfFunctions uint32 + NumberOfNames uint32 + _ [3]uint32 + } + var e IMAGE_EXPORT_DIRECTORY + if err := binary.Read(section.Open(), binary.LittleEndian, &e); err != nil { + t.Fatalf("binary.Read failed: %v", err) + } + + // Only _cgo_dummy_export should be exported + if e.NumberOfFunctions != 1 { + t.Fatalf("got %d exported functions; want 1", e.NumberOfFunctions) + } + if e.NumberOfNames != 1 { + t.Fatalf("got %d exported names; want 1", e.NumberOfNames) + } + } + default: + // testBuildmodePIE opens object files, so it needs to understand the object + // file format. + t.Skipf("skipping test: test helper does not support %s", runtime.GOOS) + } + + out, err := testenv.Command(t, obj).CombinedOutput() + if err != nil { + t.Fatal(err) + } + + if string(out) != "hello" { + t.Errorf("got %q; want %q", out, "hello") + } +} + +func TestUpxCompression(t *testing.T) { + if runtime.GOOS != "linux" || + (runtime.GOARCH != "amd64" && runtime.GOARCH != "386") { + t.Skipf("skipping upx test on %s/%s", runtime.GOOS, runtime.GOARCH) + } + + testenv.MustHaveExecPath(t, "upx") + out, err := testenv.Command(t, "upx", "--version").CombinedOutput() + if err != nil { + t.Fatalf("upx --version failed: %v", err) + } + + // upx --version prints `upx ` in the first line of output: + // upx 3.94 + // [...] + re := regexp.MustCompile(`([[:digit:]]+)\.([[:digit:]]+)`) + upxVersion := re.FindStringSubmatch(string(out)) + if len(upxVersion) != 3 { + t.Fatalf("bad upx version string: %s", upxVersion) + } + + major, err1 := strconv.Atoi(upxVersion[1]) + minor, err2 := strconv.Atoi(upxVersion[2]) + if err1 != nil || err2 != nil { + t.Fatalf("bad upx version string: %s", upxVersion[0]) + } + + // Anything below 3.94 is known not to work with go binaries + if (major < 3) || (major == 3 && minor < 94) { + t.Skipf("skipping because upx version %v.%v is too old", major, minor) + } + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + + tg.tempFile("main.go", `package main; import "fmt"; func main() { fmt.Print("hello upx") }`) + src := tg.path("main.go") + obj := tg.path("main") + tg.run("build", "-o", obj, src) + + out, err = testenv.Command(t, "upx", obj).CombinedOutput() + if err != nil { + t.Logf("executing upx\n%s\n", out) + t.Fatalf("upx failed with %v", err) + } + + out, err = testenv.Command(t, obj).CombinedOutput() + if err != nil { + t.Logf("%s", out) + t.Fatalf("running compressed go binary failed with error %s", err) + } + if string(out) != "hello upx" { + t.Fatalf("bad output from compressed go binary:\ngot %q; want %q", out, "hello upx") + } +} + +var gocacheverify = godebug.New("#gocacheverify") + +func TestCacheListStale(t *testing.T) { + tooSlow(t, "links a binary") + if gocacheverify.Value() == "1" { + t.Skip("GODEBUG gocacheverify") + } + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.makeTempdir() + tg.setenv("GOCACHE", tg.path("cache")) + tg.tempFile("gopath/src/p/p.go", "package p; import _ \"q\"; func F(){}\n") + tg.tempFile("gopath/src/q/q.go", "package q; func F(){}\n") + tg.tempFile("gopath/src/m/m.go", "package main; import _ \"q\"; func main(){}\n") + + tg.setenv("GOPATH", tg.path("gopath")) + tg.run("install", "p", "m") + tg.run("list", "-f={{.ImportPath}} {{.Stale}}", "m", "q", "p") + tg.grepStdout("^m false", "m should not be stale") + tg.grepStdout("^q true", "q should be stale") + tg.grepStdout("^p false", "p should not be stale") +} + +func TestCacheCoverage(t *testing.T) { + tooSlow(t, "links and runs a test binary with coverage enabled") + if gocacheverify.Value() == "1" { + t.Skip("GODEBUG gocacheverify") + } + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata")) + tg.makeTempdir() + + tg.setenv("GOCACHE", tg.path("c1")) + tg.run("test", "-cover", "-short", "strings") + tg.run("test", "-cover", "-short", "math", "strings") +} + +func TestIssue22588(t *testing.T) { + // Don't get confused by stderr coming from tools. + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + + tg.wantNotStale("runtime", "", "must be non-stale to compare staleness under -toolexec") + + if _, err := os.Stat("/usr/bin/time"); err != nil { + t.Skip(err) + } + + tg.run("list", "-f={{.Stale}}", "runtime") + tg.run("list", "-toolexec=/usr/bin/time", "-f={{.Stale}}", "runtime") + tg.grepStdout("false", "incorrectly reported runtime as stale") +} + +func TestIssue22531(t *testing.T) { + tooSlow(t, "links binaries") + if gocacheverify.Value() == "1" { + t.Skip("GODEBUG gocacheverify") + } + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.makeTempdir() + tg.setenv("GOPATH", tg.tempdir) + tg.setenv("GOCACHE", tg.path("cache")) + tg.tempFile("src/m/main.go", "package main /* c1 */; func main() {}\n") + tg.run("install", "-x", "m") + tg.run("list", "-f", "{{.Stale}}", "m") + tg.grepStdout("false", "reported m as stale after install") + tg.run("tool", "buildid", tg.path("bin/m"+exeSuffix)) + + // The link action ID did not include the full main build ID, + // even though the full main build ID is written into the + // eventual binary. That caused the following install to + // be a no-op, thinking the gofmt binary was up-to-date, + // even though .Stale could see it was not. + tg.tempFile("src/m/main.go", "package main /* c2 */; func main() {}\n") + tg.run("install", "-x", "m") + tg.run("list", "-f", "{{.Stale}}", "m") + tg.grepStdout("false", "reported m as stale after reinstall") + tg.run("tool", "buildid", tg.path("bin/m"+exeSuffix)) +} + +func TestIssue22596(t *testing.T) { + tooSlow(t, "links binaries") + if gocacheverify.Value() == "1" { + t.Skip("GODEBUG gocacheverify") + } + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.makeTempdir() + tg.setenv("GOCACHE", tg.path("cache")) + tg.tempFile("gopath1/src/p/p.go", "package p; func F(){}\n") + tg.tempFile("gopath2/src/p/p.go", "package p; func F(){}\n") + + tg.setenv("GOPATH", tg.path("gopath1")) + tg.run("list", "-f={{.Target}}", "p") + target1 := strings.TrimSpace(tg.getStdout()) + tg.run("install", "p") + tg.wantNotStale("p", "", "p stale after install") + + tg.setenv("GOPATH", tg.path("gopath2")) + tg.run("list", "-f={{.Target}}", "p") + target2 := strings.TrimSpace(tg.getStdout()) + tg.must(os.MkdirAll(filepath.Dir(target2), 0777)) + tg.must(copyFile(target1, target2, 0666)) + tg.wantStale("p", "build ID mismatch", "p not stale after copy from gopath1") + tg.run("install", "p") + tg.wantNotStale("p", "", "p stale after install2") +} + +func TestTestCache(t *testing.T) { + tooSlow(t, "links and runs test binaries") + if gocacheverify.Value() == "1" { + t.Skip("GODEBUG gocacheverify") + } + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.makeTempdir() + tg.setenv("GOPATH", tg.tempdir) + tg.setenv("GOCACHE", tg.path("cache")) + + // The -p=1 in the commands below just makes the -x output easier to read. + + t.Log("\n\nINITIAL\n\n") + + tg.tempFile("src/p1/p1.go", "package p1\nvar X = 1\n") + tg.tempFile("src/p2/p2.go", "package p2\nimport _ \"p1\"\nvar X = 1\n") + tg.tempFile("src/t/t1/t1_test.go", "package t\nimport \"testing\"\nfunc Test1(*testing.T) {}\n") + tg.tempFile("src/t/t2/t2_test.go", "package t\nimport _ \"p1\"\nimport \"testing\"\nfunc Test2(*testing.T) {}\n") + tg.tempFile("src/t/t3/t3_test.go", "package t\nimport \"p1\"\nimport \"testing\"\nfunc Test3(t *testing.T) {t.Log(p1.X)}\n") + tg.tempFile("src/t/t4/t4_test.go", "package t\nimport \"p2\"\nimport \"testing\"\nfunc Test4(t *testing.T) {t.Log(p2.X)}") + tg.run("test", "-x", "-v", "-short", "t/...") + + t.Log("\n\nREPEAT\n\n") + + tg.run("test", "-x", "-v", "-short", "t/...") + tg.grepStdout(`ok \tt/t1\t\(cached\)`, "did not cache t1") + tg.grepStdout(`ok \tt/t2\t\(cached\)`, "did not cache t2") + tg.grepStdout(`ok \tt/t3\t\(cached\)`, "did not cache t3") + tg.grepStdout(`ok \tt/t4\t\(cached\)`, "did not cache t4") + tg.grepStderrNot(`[\\/](compile|gccgo) `, "incorrectly ran compiler") + tg.grepStderrNot(`[\\/](link|gccgo) `, "incorrectly ran linker") + tg.grepStderrNot(`p[0-9]\.test`, "incorrectly ran test") + + t.Log("\n\nCOMMENT\n\n") + + // Changing the program text without affecting the compiled package + // should result in the package being rebuilt but nothing more. + tg.tempFile("src/p1/p1.go", "package p1\nvar X = 01\n") + tg.run("test", "-p=1", "-x", "-v", "-short", "t/...") + tg.grepStdout(`ok \tt/t1\t\(cached\)`, "did not cache t1") + tg.grepStdout(`ok \tt/t2\t\(cached\)`, "did not cache t2") + tg.grepStdout(`ok \tt/t3\t\(cached\)`, "did not cache t3") + tg.grepStdout(`ok \tt/t4\t\(cached\)`, "did not cache t4") + tg.grepStderrNot(`([\\/](compile|gccgo) ).*t[0-9]_test\.go`, "incorrectly ran compiler") + tg.grepStderrNot(`[\\/](link|gccgo) `, "incorrectly ran linker") + tg.grepStderrNot(`t[0-9]\.test.*test\.short`, "incorrectly ran test") + + t.Log("\n\nCHANGE\n\n") + + // Changing the actual package should have limited effects. + tg.tempFile("src/p1/p1.go", "package p1\nvar X = 02\n") + tg.run("test", "-p=1", "-x", "-v", "-short", "t/...") + + // p2 should have been rebuilt. + tg.grepStderr(`([\\/]compile|gccgo).*p2.go`, "did not recompile p2") + + // t1 does not import anything, should not have been rebuilt. + tg.grepStderrNot(`([\\/]compile|gccgo).*t1_test.go`, "incorrectly recompiled t1") + tg.grepStderrNot(`([\\/]link|gccgo).*t1_test`, "incorrectly relinked t1_test") + tg.grepStdout(`ok \tt/t1\t\(cached\)`, "did not cache t/t1") + + // t2 imports p1 and must be rebuilt and relinked, + // but the change should not have any effect on the test binary, + // so the test should not have been rerun. + tg.grepStderr(`([\\/]compile|gccgo).*t2_test.go`, "did not recompile t2") + tg.grepStderr(`([\\/]link|gccgo).*t2\.test`, "did not relink t2_test") + // This check does not currently work with gccgo, as garbage + // collection of unused variables is not turned on by default. + if runtime.Compiler != "gccgo" { + tg.grepStdout(`ok \tt/t2\t\(cached\)`, "did not cache t/t2") + } + + // t3 imports p1, and changing X changes t3's test binary. + tg.grepStderr(`([\\/]compile|gccgo).*t3_test.go`, "did not recompile t3") + tg.grepStderr(`([\\/]link|gccgo).*t3\.test`, "did not relink t3_test") + tg.grepStderr(`t3\.test.*-test.short`, "did not rerun t3_test") + tg.grepStdoutNot(`ok \tt/t3\t\(cached\)`, "reported cached t3_test result") + + // t4 imports p2, but p2 did not change, so t4 should be relinked, not recompiled, + // and not rerun. + tg.grepStderrNot(`([\\/]compile|gccgo).*t4_test.go`, "incorrectly recompiled t4") + tg.grepStderr(`([\\/]link|gccgo).*t4\.test`, "did not relink t4_test") + // This check does not currently work with gccgo, as garbage + // collection of unused variables is not turned on by default. + if runtime.Compiler != "gccgo" { + tg.grepStdout(`ok \tt/t4\t\(cached\)`, "did not cache t/t4") + } +} + +func TestTestSkipVetAfterFailedBuild(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + + tg.tempFile("x_test.go", `package x + func f() { + return 1 + } + `) + + tg.runFail("test", tg.path("x_test.go")) + tg.grepStderrNot(`vet`, "vet should be skipped after the failed build") +} + +func TestTestVetRebuild(t *testing.T) { + tooSlow(t, "links and runs test binaries") + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + + // golang.org/issue/23701. + // b_test imports b with augmented method from export_test.go. + // b_test also imports a, which imports b. + // Must not accidentally see un-augmented b propagate through a to b_test. + tg.tempFile("src/a/a.go", `package a + import "b" + type Type struct{} + func (*Type) M() b.T {return 0} + `) + tg.tempFile("src/b/b.go", `package b + type T int + type I interface {M() T} + `) + tg.tempFile("src/b/export_test.go", `package b + func (*T) Method() *T { return nil } + `) + tg.tempFile("src/b/b_test.go", `package b_test + import ( + "testing" + "a" + . "b" + ) + func TestBroken(t *testing.T) { + x := new(T) + x.Method() + _ = new(a.Type) + } + `) + + tg.setenv("GOPATH", tg.path(".")) + tg.run("test", "b") + tg.run("vet", "b") +} + +func TestInstallDeps(t *testing.T) { + tooSlow(t, "links a binary") + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.makeTempdir() + tg.setenv("GOPATH", tg.tempdir) + + tg.tempFile("src/p1/p1.go", "package p1\nvar X = 1\n") + tg.tempFile("src/p2/p2.go", "package p2\nimport _ \"p1\"\n") + tg.tempFile("src/main1/main.go", "package main\nimport _ \"p2\"\nfunc main() {}\n") + + tg.run("list", "-f={{.Target}}", "p1") + p1 := strings.TrimSpace(tg.getStdout()) + tg.run("list", "-f={{.Target}}", "p2") + p2 := strings.TrimSpace(tg.getStdout()) + tg.run("list", "-f={{.Target}}", "main1") + main1 := strings.TrimSpace(tg.getStdout()) + + tg.run("install", "main1") + + tg.mustExist(main1) + tg.mustNotExist(p2) + tg.mustNotExist(p1) + + tg.run("install", "p2") + tg.mustExist(p2) + tg.mustNotExist(p1) +} + +// Issue 22986. +func TestImportPath(t *testing.T) { + tooSlow(t, "links and runs a test binary") + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + + tg.tempFile("src/a/a.go", ` +package main + +import ( + "log" + p "a/p-1.0" +) + +func main() { + if !p.V { + log.Fatal("false") + } +}`) + + tg.tempFile("src/a/a_test.go", ` +package main_test + +import ( + p "a/p-1.0" + "testing" +) + +func TestV(t *testing.T) { + if !p.V { + t.Fatal("false") + } +}`) + + tg.tempFile("src/a/p-1.0/p.go", ` +package p + +var V = true + +func init() {} +`) + + tg.setenv("GOPATH", tg.path(".")) + tg.run("build", "-o", tg.path("a.exe"), "a") + tg.run("test", "a") +} + +func TestBadCommandLines(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + + tg.tempFile("src/x/x.go", "package x\n") + tg.setenv("GOPATH", tg.path(".")) + + tg.run("build", "x") + + tg.tempFile("src/x/@y.go", "package x\n") + tg.runFail("build", "x") + tg.grepStderr("invalid input file name \"@y.go\"", "did not reject @y.go") + tg.must(os.Remove(tg.path("src/x/@y.go"))) + + tg.tempFile("src/x/-y.go", "package x\n") + tg.runFail("build", "x") + tg.grepStderr("invalid input file name \"-y.go\"", "did not reject -y.go") + tg.must(os.Remove(tg.path("src/x/-y.go"))) + + if runtime.Compiler == "gccgo" { + tg.runFail("build", "-gccgoflags=all=@x", "x") + } else { + tg.runFail("build", "-gcflags=all=@x", "x") + } + tg.grepStderr("invalid command-line argument @x in command", "did not reject @x during exec") + + tg.tempFile("src/@x/x.go", "package x\n") + tg.setenv("GOPATH", tg.path(".")) + tg.runFail("build", "@x") + tg.grepStderr("invalid input directory name \"@x\"|can only use path@version syntax with 'go get' and 'go install' in module-aware mode", "did not reject @x directory") + + tg.tempFile("src/@x/y/y.go", "package y\n") + tg.setenv("GOPATH", tg.path(".")) + tg.runFail("build", "@x/y") + tg.grepStderr("invalid import path \"@x/y\"|can only use path@version syntax with 'go get' and 'go install' in module-aware mode", "did not reject @x/y import path") + + tg.tempFile("src/-x/x.go", "package x\n") + tg.setenv("GOPATH", tg.path(".")) + tg.runFail("build", "--", "-x") + tg.grepStderr("invalid import path \"-x\"", "did not reject -x import path") + + tg.tempFile("src/-x/y/y.go", "package y\n") + tg.setenv("GOPATH", tg.path(".")) + tg.runFail("build", "--", "-x/y") + tg.grepStderr("invalid import path \"-x/y\"", "did not reject -x/y import path") +} + +func TestTwoPkgConfigs(t *testing.T) { + testenv.MustHaveCGO(t) + if runtime.GOOS == "windows" || runtime.GOOS == "plan9" { + t.Skipf("no shell scripts on %s", runtime.GOOS) + } + tooSlow(t, "builds a package with cgo dependencies") + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.tempFile("src/x/a.go", `package x + // #cgo pkg-config: --static a + import "C" + `) + tg.tempFile("src/x/b.go", `package x + // #cgo pkg-config: --static a + import "C" + `) + tg.tempFile("pkg-config.sh", `#!/bin/sh +echo $* >>`+tg.path("pkg-config.out")) + tg.must(os.Chmod(tg.path("pkg-config.sh"), 0755)) + tg.setenv("GOPATH", tg.path(".")) + tg.setenv("PKG_CONFIG", tg.path("pkg-config.sh")) + tg.run("build", "x") + out, err := os.ReadFile(tg.path("pkg-config.out")) + tg.must(err) + out = bytes.TrimSpace(out) + want := "--cflags --static --static -- a a\n--libs --static --static -- a a" + if !bytes.Equal(out, []byte(want)) { + t.Errorf("got %q want %q", out, want) + } +} + +func TestCgoCache(t *testing.T) { + testenv.MustHaveCGO(t) + tooSlow(t, "builds a package with cgo dependencies") + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.tempFile("src/x/a.go", `package main + // #ifndef VAL + // #define VAL 0 + // #endif + // int val = VAL; + import "C" + import "fmt" + func main() { fmt.Println(C.val) } + `) + tg.setenv("GOPATH", tg.path(".")) + exe := tg.path("x.exe") + tg.run("build", "-o", exe, "x") + tg.setenv("CGO_LDFLAGS", "-lnosuchlibraryexists") + tg.runFail("build", "-o", exe, "x") + tg.grepStderr(`nosuchlibraryexists`, "did not run linker with changed CGO_LDFLAGS") +} + +// Issue 23982 +func TestFilepathUnderCwdFormat(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.run("test", "-x", "-cover", "log") + tg.grepStderrNot(`\.log\.cover\.go`, "-x output should contain correctly formatted filepath under cwd") +} + +// Issue 24396. +func TestDontReportRemoveOfEmptyDir(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.tempFile("src/a/a.go", `package a`) + tg.setenv("GOPATH", tg.path(".")) + tg.run("install", "-x", "a") + tg.run("install", "-x", "a") + // The second install should have printed only a WORK= line, + // nothing else. + if bytes.Count(tg.stdout.Bytes(), []byte{'\n'})+bytes.Count(tg.stderr.Bytes(), []byte{'\n'}) > 1 { + t.Error("unnecessary output when installing installed package") + } +} + +// Issue 24704. +func TestLinkerTmpDirIsDeleted(t *testing.T) { + skipIfGccgo(t, "gccgo does not use cmd/link") + testenv.MustHaveCGO(t) + tooSlow(t, "builds a package with cgo dependencies") + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.tempFile("a.go", `package main; import "C"; func main() {}`) + tg.run("build", "-ldflags", "-v", "-o", os.DevNull, tg.path("a.go")) + // Find line that has "host link:" in linker output. + stderr := tg.getStderr() + var hostLinkLine string + for _, line := range strings.Split(stderr, "\n") { + if !strings.Contains(line, "host link:") { + continue + } + hostLinkLine = line + break + } + if hostLinkLine == "" { + t.Fatal(`fail to find with "host link:" string in linker output`) + } + // Find parameter, like "/tmp/go-link-408556474/go.o" inside of + // "host link:" line, and extract temp directory /tmp/go-link-408556474 + // out of it. + tmpdir := hostLinkLine + i := strings.Index(tmpdir, `go.o"`) + if i == -1 { + t.Fatalf(`fail to find "go.o" in "host link:" line %q`, hostLinkLine) + } + tmpdir = tmpdir[:i-1] + i = strings.LastIndex(tmpdir, `"`) + if i == -1 { + t.Fatalf(`fail to find " in "host link:" line %q`, hostLinkLine) + } + tmpdir = tmpdir[i+1:] + // Verify that temp directory has been removed. + _, err := os.Stat(tmpdir) + if err == nil { + t.Fatalf("temp directory %q has not been removed", tmpdir) + } + if !os.IsNotExist(err) { + t.Fatalf("Stat(%q) returns unexpected error: %v", tmpdir, err) + } +} + +// Issue 25093. +func TestCoverpkgTestOnly(t *testing.T) { + skipIfGccgo(t, "gccgo has no cover tool") + tooSlow(t, "links and runs a test binary with coverage enabled") + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.tempFile("src/a/a.go", `package a + func F(i int) int { + return i*i + }`) + tg.tempFile("src/atest/a_test.go", ` + package a_test + import ( "a"; "testing" ) + func TestF(t *testing.T) { a.F(2) } + `) + tg.setenv("GOPATH", tg.path(".")) + tg.run("test", "-coverpkg=a", "atest") + tg.grepStderrNot("no packages being tested depend on matches", "bad match message") + tg.grepStdout("coverage: 100", "no coverage") +} + +// Regression test for golang.org/issue/34499: version command should not crash +// when executed in a deleted directory on Linux. +func TestExecInDeletedDir(t *testing.T) { + switch runtime.GOOS { + case "windows", "plan9", + "aix", // Fails with "device busy". + "solaris", "illumos": // Fails with "invalid argument". + t.Skipf("%v does not support removing the current working directory", runtime.GOOS) + } + tg := testgo(t) + defer tg.cleanup() + + wd, err := os.Getwd() + tg.check(err) + tg.makeTempdir() + tg.check(os.Chdir(tg.tempdir)) + defer func() { tg.check(os.Chdir(wd)) }() + + tg.check(os.Remove(tg.tempdir)) + + // `go version` should not fail + tg.run("version") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/go_unix_test.go b/platform/dbops/binaries/go/go/src/cmd/go/go_unix_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a6b21b86d020994cc093102569c5d91b01ba8009 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/go_unix_test.go @@ -0,0 +1,137 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package main_test + +import ( + "bufio" + "context" + "internal/testenv" + "io" + "os" + "os/exec" + "slices" + "strings" + "syscall" + "testing" +) + +func TestGoBuildUmask(t *testing.T) { + // Do not use tg.parallel; avoid other tests seeing umask manipulation. + mask := syscall.Umask(0077) // prohibit low bits + defer syscall.Umask(mask) + + tg := testgo(t) + defer tg.cleanup() + tg.tempFile("x.go", `package main; func main() {}`) + + // We have set a umask, but if the parent directory happens to have a default + // ACL, the umask may be ignored. To prevent spurious failures from an ACL, + // we compare the file created by "go build" against a file written explicitly + // by os.WriteFile. + // + // (See https://go.dev/issue/62724, https://go.dev/issue/17909.) + control := tg.path("control") + tg.creatingTemp(control) + if err := os.WriteFile(control, []byte("#!/bin/sh\nexit 0"), 0777); err != nil { + t.Fatal(err) + } + cfi, err := os.Stat(control) + if err != nil { + t.Fatal(err) + } + + exe := tg.path("x") + tg.creatingTemp(exe) + tg.run("build", "-o", exe, tg.path("x.go")) + fi, err := os.Stat(exe) + if err != nil { + t.Fatal(err) + } + got, want := fi.Mode(), cfi.Mode() + if got == want { + t.Logf("wrote x with mode %v", got) + } else { + t.Fatalf("wrote x with mode %v, wanted no 0077 bits (%v)", got, want) + } +} + +// TestTestInterrupt verifies the fix for issue #60203. +// +// If the whole process group for a 'go test' invocation receives +// SIGINT (as would be sent by pressing ^C on a console), +// it should return quickly, not deadlock. +func TestTestInterrupt(t *testing.T) { + if testing.Short() { + t.Skipf("skipping in short mode: test executes many subprocesses") + } + // Don't run this test in parallel, for the same reason. + + tg := testgo(t) + defer tg.cleanup() + tg.setenv("GOROOT", testGOROOT) + + ctx, cancel := context.WithCancel(context.Background()) + cmd := testenv.CommandContext(t, ctx, tg.goTool(), "test", "std", "-short", "-count=1") + cmd.Dir = tg.execDir + + // Override $TMPDIR when running the tests: since we're terminating the tests + // with a signal they might fail to clean up some temp files, and we don't + // want that to cause an "unexpected files" failure at the end of the run. + cmd.Env = append(slices.Clip(tg.env), tempEnvName()+"="+t.TempDir()) + + cmd.SysProcAttr = &syscall.SysProcAttr{ + Setpgid: true, + } + cmd.Cancel = func() error { + pgid := cmd.Process.Pid + return syscall.Kill(-pgid, syscall.SIGINT) + } + + pipe, err := cmd.StdoutPipe() + if err != nil { + t.Fatal(err) + } + + t.Logf("running %v", cmd) + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + stdout := new(strings.Builder) + r := bufio.NewReader(pipe) + line, err := r.ReadString('\n') + if err != nil { + t.Fatal(err) + } + stdout.WriteString(line) + + // The output line for some test was written, so we know things are in progress. + // + // Cancel the rest of the run by sending SIGINT to the process group: + // it should finish up and exit with a nonzero status, + // not have to be killed with SIGKILL. + cancel() + + io.Copy(stdout, r) + if stdout.Len() > 0 { + t.Logf("stdout:\n%s", stdout) + } + err = cmd.Wait() + + ee, _ := err.(*exec.ExitError) + if ee == nil { + t.Fatalf("unexpectedly finished with nonzero status") + } + if len(ee.Stderr) > 0 { + t.Logf("stderr:\n%s", ee.Stderr) + } + if !ee.Exited() { + t.Fatalf("'go test' did not exit after interrupt: %v", err) + } + + t.Logf("interrupted tests without deadlocking") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/go_windows_test.go b/platform/dbops/binaries/go/go/src/cmd/go/go_windows_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0c443eb64dcdf555eb33eb2e0084b0971d0243c0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/go_windows_test.go @@ -0,0 +1,50 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main_test + +import ( + "internal/testenv" + "os" + "path/filepath" + "strings" + "testing" + + "cmd/go/internal/robustio" +) + +func TestAbsolutePath(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + + tmp, err := os.MkdirTemp("", "TestAbsolutePath") + if err != nil { + t.Fatal(err) + } + defer robustio.RemoveAll(tmp) + + file := filepath.Join(tmp, "a.go") + err = os.WriteFile(file, []byte{}, 0644) + if err != nil { + t.Fatal(err) + } + dir := filepath.Join(tmp, "dir") + err = os.Mkdir(dir, 0777) + if err != nil { + t.Fatal(err) + } + + noVolume := file[len(filepath.VolumeName(file)):] + wrongPath := filepath.Join(dir, noVolume) + cmd := testenv.Command(t, tg.goTool(), "build", noVolume) + cmd.Dir = dir + output, err := cmd.CombinedOutput() + if err == nil { + t.Fatal("build should fail") + } + if strings.Contains(string(output), wrongPath) { + t.Fatalf("wrong output found: %v %v", err, string(output)) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/help_test.go b/platform/dbops/binaries/go/go/src/cmd/go/help_test.go new file mode 100644 index 0000000000000000000000000000000000000000..de3b96694c56d4c56ffb0923340869c5a4f4247e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/help_test.go @@ -0,0 +1,63 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main_test + +import ( + "flag" + "go/format" + "internal/diff" + "internal/testenv" + "os" + "strings" + "testing" +) + +var fixDocs = flag.Bool("fixdocs", false, "if true, update alldocs.go") + +func TestDocsUpToDate(t *testing.T) { + testenv.MustHaveGoBuild(t) + if !*fixDocs { + t.Parallel() + } + + // We run 'go help documentation' as a subprocess instead of + // calling help.Help directly because it may be sensitive to + // init-time configuration + cmd := testenv.Command(t, testGo, "help", "documentation") + // Unset GO111MODULE so that the 'go get' section matches + // the default 'go get' implementation. + cmd.Env = append(cmd.Environ(), "GO111MODULE=") + cmd.Stderr = new(strings.Builder) + out, err := cmd.Output() + if err != nil { + t.Fatalf("%v: %v\n%s", cmd, err, cmd.Stderr) + } + + alldocs, err := format.Source(out) + if err != nil { + t.Fatalf("format.Source($(%v)): %v", cmd, err) + } + + const srcPath = `alldocs.go` + old, err := os.ReadFile(srcPath) + if err != nil { + t.Fatalf("error reading %s: %v", srcPath, err) + } + diff := diff.Diff(srcPath, old, "go help documentation | gofmt", alldocs) + if diff == nil { + t.Logf("%s is up to date.", srcPath) + return + } + + if *fixDocs { + if err := os.WriteFile(srcPath, alldocs, 0666); err != nil { + t.Fatal(err) + } + t.Logf("wrote %d bytes to %s", len(alldocs), srcPath) + } else { + t.Logf("\n%s", diff) + t.Errorf("%s is stale. To update, run 'go generate cmd/go'.", srcPath) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/init_test.go b/platform/dbops/binaries/go/go/src/cmd/go/init_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f76425d06e8dd966e8acd65030479f54c95adfe1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/init_test.go @@ -0,0 +1,41 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main_test + +import ( + "internal/testenv" + "sync/atomic" + "testing" +) + +// BenchmarkExecGoEnv measures how long it takes for 'go env GOARCH' to run. +// Since 'go' is executed, remember to run 'go install cmd/go' before running +// the benchmark if any changes were done. +func BenchmarkExecGoEnv(b *testing.B) { + testenv.MustHaveExec(b) + gotool, err := testenv.GoTool() + if err != nil { + b.Fatal(err) + } + + // We collect extra metrics. + var n, userTime, systemTime int64 + + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + cmd := testenv.Command(b, gotool, "env", "GOARCH") + + if err := cmd.Run(); err != nil { + b.Fatal(err) + } + atomic.AddInt64(&n, 1) + atomic.AddInt64(&userTime, int64(cmd.ProcessState.UserTime())) + atomic.AddInt64(&systemTime, int64(cmd.ProcessState.SystemTime())) + } + }) + b.ReportMetric(float64(userTime)/float64(n), "user-ns/op") + b.ReportMetric(float64(systemTime)/float64(n), "sys-ns/op") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/main.go b/platform/dbops/binaries/go/go/src/cmd/go/main.go new file mode 100644 index 0000000000000000000000000000000000000000..d380aae489436f8e531803beaef4f86dfd4c1bb6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/main.go @@ -0,0 +1,327 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go test cmd/go -v -run=^TestDocsUpToDate$ -fixdocs + +package main + +import ( + "cmd/go/internal/toolchain" + "cmd/go/internal/workcmd" + "context" + "flag" + "fmt" + "internal/buildcfg" + "log" + "os" + "path/filepath" + rtrace "runtime/trace" + "slices" + "strings" + + "cmd/go/internal/base" + "cmd/go/internal/bug" + "cmd/go/internal/cfg" + "cmd/go/internal/clean" + "cmd/go/internal/doc" + "cmd/go/internal/envcmd" + "cmd/go/internal/fix" + "cmd/go/internal/fmtcmd" + "cmd/go/internal/generate" + "cmd/go/internal/help" + "cmd/go/internal/list" + "cmd/go/internal/modcmd" + "cmd/go/internal/modfetch" + "cmd/go/internal/modget" + "cmd/go/internal/modload" + "cmd/go/internal/run" + "cmd/go/internal/test" + "cmd/go/internal/tool" + "cmd/go/internal/trace" + "cmd/go/internal/version" + "cmd/go/internal/vet" + "cmd/go/internal/work" +) + +func init() { + base.Go.Commands = []*base.Command{ + bug.CmdBug, + work.CmdBuild, + clean.CmdClean, + doc.CmdDoc, + envcmd.CmdEnv, + fix.CmdFix, + fmtcmd.CmdFmt, + generate.CmdGenerate, + modget.CmdGet, + work.CmdInstall, + list.CmdList, + modcmd.CmdMod, + workcmd.CmdWork, + run.CmdRun, + test.CmdTest, + tool.CmdTool, + version.CmdVersion, + vet.CmdVet, + + help.HelpBuildConstraint, + help.HelpBuildmode, + help.HelpC, + help.HelpCache, + help.HelpEnvironment, + help.HelpFileType, + modload.HelpGoMod, + help.HelpGopath, + modfetch.HelpGoproxy, + help.HelpImportPath, + modload.HelpModules, + modfetch.HelpModuleAuth, + help.HelpPackages, + modfetch.HelpPrivate, + test.HelpTestflag, + test.HelpTestfunc, + modget.HelpVCS, + } +} + +var _ = go11tag + +func main() { + log.SetFlags(0) + handleChdirFlag() + toolchain.Select() + + flag.Usage = base.Usage + flag.Parse() + + args := flag.Args() + if len(args) < 1 { + base.Usage() + } + + cfg.CmdName = args[0] // for error messages + if args[0] == "help" { + help.Help(os.Stdout, args[1:]) + return + } + + if cfg.GOROOT == "" { + fmt.Fprintf(os.Stderr, "go: cannot find GOROOT directory: 'go' binary is trimmed and GOROOT is not set\n") + os.Exit(2) + } + if fi, err := os.Stat(cfg.GOROOT); err != nil || !fi.IsDir() { + fmt.Fprintf(os.Stderr, "go: cannot find GOROOT directory: %v\n", cfg.GOROOT) + os.Exit(2) + } + + // Diagnose common mistake: GOPATH==GOROOT. + // This setting is equivalent to not setting GOPATH at all, + // which is not what most people want when they do it. + if gopath := cfg.BuildContext.GOPATH; filepath.Clean(gopath) == filepath.Clean(cfg.GOROOT) { + fmt.Fprintf(os.Stderr, "warning: GOPATH set to GOROOT (%s) has no effect\n", gopath) + } else { + for _, p := range filepath.SplitList(gopath) { + // Some GOPATHs have empty directory elements - ignore them. + // See issue 21928 for details. + if p == "" { + continue + } + // Note: using HasPrefix instead of Contains because a ~ can appear + // in the middle of directory elements, such as /tmp/git-1.8.2~rc3 + // or C:\PROGRA~1. Only ~ as a path prefix has meaning to the shell. + if strings.HasPrefix(p, "~") { + fmt.Fprintf(os.Stderr, "go: GOPATH entry cannot start with shell metacharacter '~': %q\n", p) + os.Exit(2) + } + if !filepath.IsAbs(p) { + if cfg.Getenv("GOPATH") == "" { + // We inferred $GOPATH from $HOME and did a bad job at it. + // Instead of dying, uninfer it. + cfg.BuildContext.GOPATH = "" + } else { + fmt.Fprintf(os.Stderr, "go: GOPATH entry is relative; must be absolute path: %q.\nFor more details see: 'go help gopath'\n", p) + os.Exit(2) + } + } + } + } + + cmd, used := lookupCmd(args) + cfg.CmdName = strings.Join(args[:used], " ") + if len(cmd.Commands) > 0 { + if used >= len(args) { + help.PrintUsage(os.Stderr, cmd) + base.SetExitStatus(2) + base.Exit() + } + if args[used] == "help" { + // Accept 'go mod help' and 'go mod help foo' for 'go help mod' and 'go help mod foo'. + help.Help(os.Stdout, append(slices.Clip(args[:used]), args[used+1:]...)) + base.Exit() + } + helpArg := "" + if used > 0 { + helpArg += " " + strings.Join(args[:used], " ") + } + cmdName := cfg.CmdName + if cmdName == "" { + cmdName = args[0] + } + fmt.Fprintf(os.Stderr, "go %s: unknown command\nRun 'go help%s' for usage.\n", cmdName, helpArg) + base.SetExitStatus(2) + base.Exit() + } + invoke(cmd, args[used-1:]) + base.Exit() +} + +// lookupCmd interprets the initial elements of args +// to find a command to run (cmd.Runnable() == true) +// or else a command group that ran out of arguments +// or had an unknown subcommand (len(cmd.Commands) > 0). +// It returns that command and the number of elements of args +// that it took to arrive at that command. +func lookupCmd(args []string) (cmd *base.Command, used int) { + cmd = base.Go + for used < len(args) { + c := cmd.Lookup(args[used]) + if c == nil { + break + } + if c.Runnable() { + cmd = c + used++ + break + } + if len(c.Commands) > 0 { + cmd = c + used++ + if used >= len(args) || args[0] == "help" { + break + } + continue + } + // len(c.Commands) == 0 && !c.Runnable() => help text; stop at "help" + break + } + return cmd, used +} + +func invoke(cmd *base.Command, args []string) { + // 'go env' handles checking the build config + if cmd != envcmd.CmdEnv { + buildcfg.Check() + if cfg.ExperimentErr != nil { + base.Fatal(cfg.ExperimentErr) + } + } + + // Set environment (GOOS, GOARCH, etc) explicitly. + // In theory all the commands we invoke should have + // the same default computation of these as we do, + // but in practice there might be skew + // This makes sure we all agree. + cfg.OrigEnv = toolchain.FilterEnv(os.Environ()) + cfg.CmdEnv = envcmd.MkEnv() + for _, env := range cfg.CmdEnv { + if os.Getenv(env.Name) != env.Value { + os.Setenv(env.Name, env.Value) + } + } + + cmd.Flag.Usage = func() { cmd.Usage() } + if cmd.CustomFlags { + args = args[1:] + } else { + base.SetFromGOFLAGS(&cmd.Flag) + cmd.Flag.Parse(args[1:]) + args = cmd.Flag.Args() + } + + if cfg.DebugRuntimeTrace != "" { + f, err := os.Create(cfg.DebugRuntimeTrace) + if err != nil { + base.Fatalf("creating trace file: %v", err) + } + if err := rtrace.Start(f); err != nil { + base.Fatalf("starting event trace: %v", err) + } + defer func() { + rtrace.Stop() + }() + } + + ctx := maybeStartTrace(context.Background()) + ctx, span := trace.StartSpan(ctx, fmt.Sprint("Running ", cmd.Name(), " command")) + cmd.Run(ctx, cmd, args) + span.Done() +} + +func init() { + base.Usage = mainUsage +} + +func mainUsage() { + help.PrintUsage(os.Stderr, base.Go) + os.Exit(2) +} + +func maybeStartTrace(pctx context.Context) context.Context { + if cfg.DebugTrace == "" { + return pctx + } + + ctx, close, err := trace.Start(pctx, cfg.DebugTrace) + if err != nil { + base.Fatalf("failed to start trace: %v", err) + } + base.AtExit(func() { + if err := close(); err != nil { + base.Fatalf("failed to stop trace: %v", err) + } + }) + + return ctx +} + +// handleChdirFlag handles the -C flag before doing anything else. +// The -C flag must be the first flag on the command line, to make it easy to find +// even with commands that have custom flag parsing. +// handleChdirFlag handles the flag by chdir'ing to the directory +// and then removing that flag from the command line entirely. +// +// We have to handle the -C flag this way for two reasons: +// +// 1. Toolchain selection needs to be in the right directory to look for go.mod and go.work. +// +// 2. A toolchain switch later on reinvokes the new go command with the same arguments. +// The parent toolchain has already done the chdir; the child must not try to do it again. +func handleChdirFlag() { + _, used := lookupCmd(os.Args[1:]) + used++ // because of [1:] + if used >= len(os.Args) { + return + } + + var dir string + switch a := os.Args[used]; { + default: + return + + case a == "-C", a == "--C": + if used+1 >= len(os.Args) { + return + } + dir = os.Args[used+1] + os.Args = slices.Delete(os.Args, used, used+2) + + case strings.HasPrefix(a, "-C="), strings.HasPrefix(a, "--C="): + _, dir, _ = strings.Cut(a, "=") + os.Args = slices.Delete(os.Args, used, used+1) + } + + if err := os.Chdir(dir); err != nil { + base.Fatalf("go: %v", err) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/note_test.go b/platform/dbops/binaries/go/go/src/cmd/go/note_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ba7ec2a47bcc43f5faaaf3f6b489b66b32b6e1e0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/note_test.go @@ -0,0 +1,72 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main_test + +import ( + "internal/testenv" + "runtime" + "testing" + + "cmd/internal/buildid" +) + +func TestNoteReading(t *testing.T) { + // cmd/internal/buildid already has tests that the basic reading works. + // This test is essentially checking that -ldflags=-buildid=XXX works, + // both in internal and external linking mode. + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + + tg.tempFile("hello.go", `package main; func main() { print("hello, world\n") }`) + const buildID = "TestNoteReading-Build-ID" + tg.run("build", "-ldflags", "-buildid="+buildID, "-o", tg.path("hello.exe"), tg.path("hello.go")) + id, err := buildid.ReadFile(tg.path("hello.exe")) + if err != nil { + t.Fatalf("reading build ID from hello binary: %v", err) + } + if id != buildID { + t.Fatalf("buildID in hello binary = %q, want %q", id, buildID) + } + + switch { + case !testenv.HasCGO(): + t.Skipf("skipping - no cgo, so assuming external linking not available") + case runtime.GOOS == "plan9": + t.Skipf("skipping - external linking not supported") + } + + tg.run("build", "-ldflags", "-buildid="+buildID+" -linkmode=external", "-o", tg.path("hello2.exe"), tg.path("hello.go")) + id, err = buildid.ReadFile(tg.path("hello2.exe")) + if err != nil { + t.Fatalf("reading build ID from hello binary (linkmode=external): %v", err) + } + if id != buildID { + t.Fatalf("buildID in hello binary = %q, want %q (linkmode=external)", id, buildID) + } + + switch runtime.GOOS { + case "dragonfly", "freebsd", "linux", "netbsd", "openbsd": + // Test while forcing use of the gold linker, since in the past + // we've had trouble reading the notes generated by gold. + err := tg.doRun([]string{"build", "-ldflags", "-buildid=" + buildID + " -linkmode=external -extldflags=-fuse-ld=gold", "-o", tg.path("hello3.exe"), tg.path("hello.go")}) + if err != nil { + if tg.grepCountBoth("(invalid linker|gold|cannot find [‘']ld[’'])") > 0 { + // It's not an error if gold isn't there. gcc claims it "cannot find 'ld'" if + // ld.gold is missing, see issue #22340. + t.Log("skipping gold test") + break + } + t.Fatalf("building hello binary: %v", err) + } + id, err = buildid.ReadFile(tg.path("hello3.exe")) + if err != nil { + t.Fatalf("reading build ID from hello binary (linkmode=external -extldflags=-fuse-ld=gold): %v", err) + } + if id != buildID { + t.Fatalf("buildID in hello binary = %q, want %q (linkmode=external -extldflags=-fuse-ld=gold)", id, buildID) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/proxy_test.go b/platform/dbops/binaries/go/go/src/cmd/go/proxy_test.go new file mode 100644 index 0000000000000000000000000000000000000000..cb3d9f92f19b88662e299c2505a378de1a438a40 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/proxy_test.go @@ -0,0 +1,487 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main_test + +import ( + "archive/zip" + "bytes" + "encoding/json" + "errors" + "flag" + "fmt" + "internal/txtar" + "io" + "io/fs" + "log" + "net" + "net/http" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "testing" + + "cmd/go/internal/modfetch/codehost" + "cmd/go/internal/par" + + "golang.org/x/mod/module" + "golang.org/x/mod/semver" + "golang.org/x/mod/sumdb" + "golang.org/x/mod/sumdb/dirhash" +) + +var ( + proxyAddr = flag.String("proxy", "", "run proxy on this network address instead of running any tests") + proxyURL string +) + +var proxyOnce sync.Once + +// StartProxy starts the Go module proxy running on *proxyAddr (like "localhost:1234") +// and sets proxyURL to the GOPROXY setting to use to access the proxy. +// Subsequent calls are no-ops. +// +// The proxy serves from testdata/mod. See testdata/mod/README. +func StartProxy() { + proxyOnce.Do(func() { + readModList() + addr := *proxyAddr + if addr == "" { + addr = "localhost:0" + } + l, err := net.Listen("tcp", addr) + if err != nil { + log.Fatal(err) + } + *proxyAddr = l.Addr().String() + proxyURL = "http://" + *proxyAddr + "/mod" + fmt.Fprintf(os.Stderr, "go test proxy running at GOPROXY=%s\n", proxyURL) + go func() { + log.Fatalf("go proxy: http.Serve: %v", http.Serve(l, http.HandlerFunc(proxyHandler))) + }() + + // Prepopulate main sumdb. + for _, mod := range modList { + sumdbOps.Lookup(nil, mod) + } + }) +} + +var modList []module.Version + +func readModList() { + files, err := os.ReadDir("testdata/mod") + if err != nil { + log.Fatal(err) + } + for _, f := range files { + name := f.Name() + if !strings.HasSuffix(name, ".txt") { + continue + } + name = strings.TrimSuffix(name, ".txt") + i := strings.LastIndex(name, "_v") + if i < 0 { + continue + } + encPath := strings.ReplaceAll(name[:i], "_", "/") + path, err := module.UnescapePath(encPath) + if err != nil { + if testing.Verbose() && encPath != "example.com/invalidpath/v1" { + fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err) + } + continue + } + encVers := name[i+1:] + vers, err := module.UnescapeVersion(encVers) + if err != nil { + fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err) + continue + } + modList = append(modList, module.Version{Path: path, Version: vers}) + } +} + +var zipCache par.ErrCache[*txtar.Archive, []byte] + +const ( + testSumDBName = "localhost.localdev/sumdb" + testSumDBVerifierKey = "localhost.localdev/sumdb+00000c67+AcTrnkbUA+TU4heY3hkjiSES/DSQniBqIeQ/YppAUtK6" + testSumDBSignerKey = "PRIVATE+KEY+localhost.localdev/sumdb+00000c67+AXu6+oaVaOYuQOFrf1V59JK1owcFlJcHwwXHDfDGxSPk" +) + +var ( + sumdbOps = sumdb.NewTestServer(testSumDBSignerKey, proxyGoSum) + sumdbServer = sumdb.NewServer(sumdbOps) + + sumdbWrongOps = sumdb.NewTestServer(testSumDBSignerKey, proxyGoSumWrong) + sumdbWrongServer = sumdb.NewServer(sumdbWrongOps) +) + +// proxyHandler serves the Go module proxy protocol. +// See the proxy section of https://research.swtch.com/vgo-module. +func proxyHandler(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.URL.Path, "/mod/") { + http.NotFound(w, r) + return + } + path := r.URL.Path[len("/mod/"):] + + // /mod/invalid returns faulty responses. + if strings.HasPrefix(path, "invalid/") { + w.Write([]byte("invalid")) + return + } + + // Next element may opt into special behavior. + if j := strings.Index(path, "/"); j >= 0 { + n, err := strconv.Atoi(path[:j]) + if err == nil && n >= 200 { + w.WriteHeader(n) + return + } + if strings.HasPrefix(path, "sumdb-") { + n, err := strconv.Atoi(path[len("sumdb-"):j]) + if err == nil && n >= 200 { + if strings.HasPrefix(path[j:], "/sumdb/") { + w.WriteHeader(n) + return + } + path = path[j+1:] + } + } + } + + // Request for $GOPROXY/sumdb-direct is direct sumdb access. + // (Client thinks it is talking directly to a sumdb.) + if strings.HasPrefix(path, "sumdb-direct/") { + r.URL.Path = path[len("sumdb-direct"):] + sumdbServer.ServeHTTP(w, r) + return + } + + // Request for $GOPROXY/sumdb-wrong is direct sumdb access + // but all the hashes are wrong. + // (Client thinks it is talking directly to a sumdb.) + if strings.HasPrefix(path, "sumdb-wrong/") { + r.URL.Path = path[len("sumdb-wrong"):] + sumdbWrongServer.ServeHTTP(w, r) + return + } + + // Request for $GOPROXY/redirect//... goes to redirects. + if strings.HasPrefix(path, "redirect/") { + path = path[len("redirect/"):] + if j := strings.Index(path, "/"); j >= 0 { + count, err := strconv.Atoi(path[:j]) + if err != nil { + return + } + + // The last redirect. + if count <= 1 { + http.Redirect(w, r, fmt.Sprintf("/mod/%s", path[j+1:]), 302) + return + } + http.Redirect(w, r, fmt.Sprintf("/mod/redirect/%d/%s", count-1, path[j+1:]), 302) + return + } + } + + // Request for $GOPROXY/sumdb//supported + // is checking whether it's OK to access sumdb via the proxy. + if path == "sumdb/"+testSumDBName+"/supported" { + w.WriteHeader(200) + return + } + + // Request for $GOPROXY/sumdb//... goes to sumdb. + if sumdbPrefix := "sumdb/" + testSumDBName + "/"; strings.HasPrefix(path, sumdbPrefix) { + r.URL.Path = path[len(sumdbPrefix)-1:] + sumdbServer.ServeHTTP(w, r) + return + } + + // Module proxy request: /mod/path/@latest + // Rewrite to /mod/path/@v/.info where is the semantically + // latest version, including pseudo-versions. + if i := strings.LastIndex(path, "/@latest"); i >= 0 { + enc := path[:i] + modPath, err := module.UnescapePath(enc) + if err != nil { + if testing.Verbose() { + fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err) + } + http.NotFound(w, r) + return + } + + // Imitate what "latest" does in direct mode and what proxy.golang.org does. + // Use the latest released version. + // If there is no released version, use the latest prereleased version. + // Otherwise, use the latest pseudoversion. + var latestRelease, latestPrerelease, latestPseudo string + for _, m := range modList { + if m.Path != modPath { + continue + } + if module.IsPseudoVersion(m.Version) && (latestPseudo == "" || semver.Compare(latestPseudo, m.Version) > 0) { + latestPseudo = m.Version + } else if semver.Prerelease(m.Version) != "" && (latestPrerelease == "" || semver.Compare(latestPrerelease, m.Version) > 0) { + latestPrerelease = m.Version + } else if latestRelease == "" || semver.Compare(latestRelease, m.Version) > 0 { + latestRelease = m.Version + } + } + var latest string + if latestRelease != "" { + latest = latestRelease + } else if latestPrerelease != "" { + latest = latestPrerelease + } else if latestPseudo != "" { + latest = latestPseudo + } else { + http.NotFound(w, r) + return + } + + encVers, err := module.EscapeVersion(latest) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + path = fmt.Sprintf("%s/@v/%s.info", enc, encVers) + } + + // Module proxy request: /mod/path/@v/version[.suffix] + i := strings.Index(path, "/@v/") + if i < 0 { + http.NotFound(w, r) + return + } + enc, file := path[:i], path[i+len("/@v/"):] + path, err := module.UnescapePath(enc) + if err != nil { + if testing.Verbose() { + fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err) + } + http.NotFound(w, r) + return + } + if file == "list" { + // list returns a list of versions, not including pseudo-versions. + // If the module has no tagged versions, we should serve an empty 200. + // If the module doesn't exist, we should serve 404 or 410. + found := false + for _, m := range modList { + if m.Path != path { + continue + } + found = true + if !module.IsPseudoVersion(m.Version) { + if err := module.Check(m.Path, m.Version); err == nil { + fmt.Fprintf(w, "%s\n", m.Version) + } + } + } + if !found { + http.NotFound(w, r) + } + return + } + + i = strings.LastIndex(file, ".") + if i < 0 { + http.NotFound(w, r) + return + } + encVers, ext := file[:i], file[i+1:] + vers, err := module.UnescapeVersion(encVers) + if err != nil { + fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err) + http.NotFound(w, r) + return + } + + if codehost.AllHex(vers) { + var best string + // Convert commit hash (only) to known version. + // Use latest version in semver priority, to match similar logic + // in the repo-based module server (see modfetch.(*codeRepo).convert). + for _, m := range modList { + if m.Path == path && semver.Compare(best, m.Version) < 0 { + var hash string + if module.IsPseudoVersion(m.Version) { + hash = m.Version[strings.LastIndex(m.Version, "-")+1:] + } else { + hash = findHash(m) + } + if strings.HasPrefix(hash, vers) || strings.HasPrefix(vers, hash) { + best = m.Version + } + } + } + if best != "" { + vers = best + } + } + + a, err := readArchive(path, vers) + if err != nil { + if testing.Verbose() { + fmt.Fprintf(os.Stderr, "go proxy: no archive %s %s: %v\n", path, vers, err) + } + if errors.Is(err, fs.ErrNotExist) { + http.NotFound(w, r) + } else { + http.Error(w, "cannot load archive", 500) + } + return + } + + switch ext { + case "info", "mod": + want := "." + ext + for _, f := range a.Files { + if f.Name == want { + w.Write(f.Data) + return + } + } + + case "zip": + zipBytes, err := zipCache.Do(a, func() ([]byte, error) { + var buf bytes.Buffer + z := zip.NewWriter(&buf) + for _, f := range a.Files { + if f.Name == ".info" || f.Name == ".mod" || f.Name == ".zip" { + continue + } + var zipName string + if strings.HasPrefix(f.Name, "/") { + zipName = f.Name[1:] + } else { + zipName = path + "@" + vers + "/" + f.Name + } + zf, err := z.Create(zipName) + if err != nil { + return nil, err + } + if _, err := zf.Write(f.Data); err != nil { + return nil, err + } + } + if err := z.Close(); err != nil { + return nil, err + } + return buf.Bytes(), nil + }) + + if err != nil { + if testing.Verbose() { + fmt.Fprintf(os.Stderr, "go proxy: %v\n", err) + } + http.Error(w, err.Error(), 500) + return + } + w.Write(zipBytes) + return + + } + http.NotFound(w, r) +} + +func findHash(m module.Version) string { + a, err := readArchive(m.Path, m.Version) + if err != nil { + return "" + } + var data []byte + for _, f := range a.Files { + if f.Name == ".info" { + data = f.Data + break + } + } + var info struct{ Short string } + json.Unmarshal(data, &info) + return info.Short +} + +var archiveCache par.Cache[string, *txtar.Archive] + +var cmdGoDir, _ = os.Getwd() + +func readArchive(path, vers string) (*txtar.Archive, error) { + enc, err := module.EscapePath(path) + if err != nil { + return nil, err + } + encVers, err := module.EscapeVersion(vers) + if err != nil { + return nil, err + } + + prefix := strings.ReplaceAll(enc, "/", "_") + name := filepath.Join(cmdGoDir, "testdata/mod", prefix+"_"+encVers+".txt") + a := archiveCache.Do(name, func() *txtar.Archive { + a, err := txtar.ParseFile(name) + if err != nil { + if testing.Verbose() || !os.IsNotExist(err) { + fmt.Fprintf(os.Stderr, "go proxy: %v\n", err) + } + a = nil + } + return a + }) + if a == nil { + return nil, fs.ErrNotExist + } + return a, nil +} + +// proxyGoSum returns the two go.sum lines for path@vers. +func proxyGoSum(path, vers string) ([]byte, error) { + a, err := readArchive(path, vers) + if err != nil { + return nil, err + } + var names []string + files := make(map[string][]byte) + var gomod []byte + for _, f := range a.Files { + if strings.HasPrefix(f.Name, ".") { + if f.Name == ".mod" { + gomod = f.Data + } + continue + } + name := path + "@" + vers + "/" + f.Name + names = append(names, name) + files[name] = f.Data + } + h1, err := dirhash.Hash1(names, func(name string) (io.ReadCloser, error) { + data := files[name] + return io.NopCloser(bytes.NewReader(data)), nil + }) + if err != nil { + return nil, err + } + h1mod, err := dirhash.Hash1([]string{"go.mod"}, func(string) (io.ReadCloser, error) { + return io.NopCloser(bytes.NewReader(gomod)), nil + }) + if err != nil { + return nil, err + } + data := []byte(fmt.Sprintf("%s %s %s\n%s %s/go.mod %s\n", path, vers, h1, path, vers, h1mod)) + return data, nil +} + +// proxyGoSumWrong returns the wrong lines. +func proxyGoSumWrong(path, vers string) ([]byte, error) { + data := []byte(fmt.Sprintf("%s %s %s\n%s %s/go.mod %s\n", path, vers, "h1:wrong", path, vers, "h1:wrong")) + return data, nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/script_test.go b/platform/dbops/binaries/go/go/src/cmd/go/script_test.go new file mode 100644 index 0000000000000000000000000000000000000000..624c5bf50106202ef24b6f1b90517cb63e71c8f9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/script_test.go @@ -0,0 +1,360 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Script-driven tests. +// See testdata/script/README for an overview. + +//go:generate go test cmd/go -v -run=TestScript/README --fixreadme + +package main_test + +import ( + "bufio" + "bytes" + "context" + "flag" + "internal/testenv" + "internal/txtar" + "net/url" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + "time" + + "cmd/go/internal/cfg" + "cmd/go/internal/gover" + "cmd/go/internal/script" + "cmd/go/internal/script/scripttest" + "cmd/go/internal/vcweb/vcstest" +) + +var testSum = flag.String("testsum", "", `may be tidy, listm, or listall. If set, TestScript generates a go.sum file at the beginning of each test and updates test files if they pass.`) + +// TestScript runs the tests in testdata/script/*.txt. +func TestScript(t *testing.T) { + testenv.MustHaveGoBuild(t) + testenv.SkipIfShortAndSlow(t) + + srv, err := vcstest.NewServer() + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := srv.Close(); err != nil { + t.Fatal(err) + } + }) + certFile, err := srv.WriteCertificateFile() + if err != nil { + t.Fatal(err) + } + + StartProxy() + + var ( + ctx = context.Background() + gracePeriod = 100 * time.Millisecond + ) + if deadline, ok := t.Deadline(); ok { + timeout := time.Until(deadline) + + // If time allows, increase the termination grace period to 5% of the + // remaining time. + if gp := timeout / 20; gp > gracePeriod { + gracePeriod = gp + } + + // When we run commands that execute subprocesses, we want to reserve two + // grace periods to clean up. We will send the first termination signal when + // the context expires, then wait one grace period for the process to + // produce whatever useful output it can (such as a stack trace). After the + // first grace period expires, we'll escalate to os.Kill, leaving the second + // grace period for the test function to record its output before the test + // process itself terminates. + timeout -= 2 * gracePeriod + + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, timeout) + t.Cleanup(cancel) + } + + env, err := scriptEnv(srv, certFile) + if err != nil { + t.Fatal(err) + } + engine := &script.Engine{ + Conds: scriptConditions(), + Cmds: scriptCommands(quitSignal(), gracePeriod), + Quiet: !testing.Verbose(), + } + + t.Run("README", func(t *testing.T) { + checkScriptReadme(t, engine, env) + }) + + files, err := filepath.Glob("testdata/script/*.txt") + if err != nil { + t.Fatal(err) + } + for _, file := range files { + file := file + name := strings.TrimSuffix(filepath.Base(file), ".txt") + t.Run(name, func(t *testing.T) { + t.Parallel() + StartProxy() + + workdir, err := os.MkdirTemp(testTmpDir, name) + if err != nil { + t.Fatal(err) + } + if !*testWork { + defer removeAll(workdir) + } + + s, err := script.NewState(tbContext(ctx, t), workdir, env) + if err != nil { + t.Fatal(err) + } + + // Unpack archive. + a, err := txtar.ParseFile(file) + if err != nil { + t.Fatal(err) + } + initScriptDirs(t, s) + if err := s.ExtractFiles(a); err != nil { + t.Fatal(err) + } + + t.Log(time.Now().UTC().Format(time.RFC3339)) + work, _ := s.LookupEnv("WORK") + t.Logf("$WORK=%s", work) + + // With -testsum, if a go.mod file is present in the test's initial + // working directory, run 'go mod tidy'. + if *testSum != "" { + if updateSum(t, engine, s, a) { + defer func() { + if t.Failed() { + return + } + data := txtar.Format(a) + if err := os.WriteFile(file, data, 0666); err != nil { + t.Errorf("rewriting test file: %v", err) + } + }() + } + } + + // Note: Do not use filepath.Base(file) here: + // editors that can jump to file:line references in the output + // will work better seeing the full path relative to cmd/go + // (where the "go test" command is usually run). + scripttest.Run(t, engine, s, file, bytes.NewReader(a.Comment)) + }) + } +} + +// testingTBKey is the Context key for a testing.TB. +type testingTBKey struct{} + +// tbContext returns a Context derived from ctx and associated with t. +func tbContext(ctx context.Context, t testing.TB) context.Context { + return context.WithValue(ctx, testingTBKey{}, t) +} + +// tbFromContext returns the testing.TB associated with ctx, if any. +func tbFromContext(ctx context.Context) (testing.TB, bool) { + t := ctx.Value(testingTBKey{}) + if t == nil { + return nil, false + } + return t.(testing.TB), true +} + +// initScriptState creates the initial directory structure in s for unpacking a +// cmd/go script. +func initScriptDirs(t testing.TB, s *script.State) { + must := func(err error) { + if err != nil { + t.Helper() + t.Fatal(err) + } + } + + work := s.Getwd() + must(s.Setenv("WORK", work)) + + must(os.MkdirAll(filepath.Join(work, "tmp"), 0777)) + must(s.Setenv(tempEnvName(), filepath.Join(work, "tmp"))) + + gopath := filepath.Join(work, "gopath") + must(s.Setenv("GOPATH", gopath)) + gopathSrc := filepath.Join(gopath, "src") + must(os.MkdirAll(gopathSrc, 0777)) + must(s.Chdir(gopathSrc)) +} + +func scriptEnv(srv *vcstest.Server, srvCertFile string) ([]string, error) { + httpURL, err := url.Parse(srv.HTTP.URL) + if err != nil { + return nil, err + } + httpsURL, err := url.Parse(srv.HTTPS.URL) + if err != nil { + return nil, err + } + env := []string{ + pathEnvName() + "=" + testBin + string(filepath.ListSeparator) + os.Getenv(pathEnvName()), + homeEnvName() + "=/no-home", + "CCACHE_DISABLE=1", // ccache breaks with non-existent HOME + "GOARCH=" + runtime.GOARCH, + "TESTGO_GOHOSTARCH=" + goHostArch, + "GOCACHE=" + testGOCACHE, + "GOCOVERDIR=" + os.Getenv("GOCOVERDIR"), + "GODEBUG=" + os.Getenv("GODEBUG"), + "GOEXE=" + cfg.ExeSuffix, + "GOEXPERIMENT=" + os.Getenv("GOEXPERIMENT"), + "GOOS=" + runtime.GOOS, + "TESTGO_GOHOSTOS=" + goHostOS, + "GOPROXY=" + proxyURL, + "GOPRIVATE=", + "GOROOT=" + testGOROOT, + "GOROOT_FINAL=" + testGOROOT_FINAL, // causes spurious rebuilds and breaks the "stale" built-in if not propagated + "GOTRACEBACK=system", + "TESTGONETWORK=panic", // allow only local connections by default; the [net] condition resets this + "TESTGO_GOROOT=" + testGOROOT, + "TESTGO_EXE=" + testGo, + "TESTGO_VCSTEST_HOST=" + httpURL.Host, + "TESTGO_VCSTEST_TLS_HOST=" + httpsURL.Host, + "TESTGO_VCSTEST_CERT=" + srvCertFile, + "TESTGONETWORK=panic", // cleared by the [net] condition + "GOSUMDB=" + testSumDBVerifierKey, + "GONOPROXY=", + "GONOSUMDB=", + "GOVCS=*:all", + "devnull=" + os.DevNull, + "goversion=" + gover.Local(), + "CMDGO_TEST_RUN_MAIN=true", + "HGRCPATH=", + "GOTOOLCHAIN=auto", + "newline=\n", + } + + if testenv.Builder() != "" || os.Getenv("GIT_TRACE_CURL") == "1" { + // To help diagnose https://go.dev/issue/52545, + // enable tracing for Git HTTPS requests. + env = append(env, + "GIT_TRACE_CURL=1", + "GIT_TRACE_CURL_NO_DATA=1", + "GIT_REDACT_COOKIES=o,SSO,GSSO_Uberproxy") + } + if testing.Short() { + // VCS commands are always somewhat slow: they either require access to external hosts, + // or they require our intercepted vcs-test.golang.org to regenerate the repository. + // Require all tests that use VCS commands to be skipped in short mode. + env = append(env, "TESTGOVCS=panic") + } + + if os.Getenv("CGO_ENABLED") != "" || runtime.GOOS != goHostOS || runtime.GOARCH != goHostArch { + // If the actual CGO_ENABLED might not match the cmd/go default, set it + // explicitly in the environment. Otherwise, leave it unset so that we also + // cover the default behaviors. + env = append(env, "CGO_ENABLED="+cgoEnabled) + } + + for _, key := range extraEnvKeys { + if val, ok := os.LookupEnv(key); ok { + env = append(env, key+"="+val) + } + } + + return env, nil +} + +var extraEnvKeys = []string{ + "SYSTEMROOT", // must be preserved on Windows to find DLLs; golang.org/issue/25210 + "WINDIR", // must be preserved on Windows to be able to run PowerShell command; golang.org/issue/30711 + "LD_LIBRARY_PATH", // must be preserved on Unix systems to find shared libraries + "LIBRARY_PATH", // allow override of non-standard static library paths + "C_INCLUDE_PATH", // allow override non-standard include paths + "CC", // don't lose user settings when invoking cgo + "GO_TESTING_GOTOOLS", // for gccgo testing + "GCCGO", // for gccgo testing + "GCCGOTOOLDIR", // for gccgo testing +} + +// updateSum runs 'go mod tidy', 'go list -mod=mod -m all', or +// 'go list -mod=mod all' in the test's current directory if a file named +// "go.mod" is present after the archive has been extracted. updateSum modifies +// archive and returns true if go.mod or go.sum were changed. +func updateSum(t testing.TB, e *script.Engine, s *script.State, archive *txtar.Archive) (rewrite bool) { + gomodIdx, gosumIdx := -1, -1 + for i := range archive.Files { + switch archive.Files[i].Name { + case "go.mod": + gomodIdx = i + case "go.sum": + gosumIdx = i + } + } + if gomodIdx < 0 { + return false + } + + var cmd string + switch *testSum { + case "tidy": + cmd = "go mod tidy" + case "listm": + cmd = "go list -m -mod=mod all" + case "listall": + cmd = "go list -mod=mod all" + default: + t.Fatalf(`unknown value for -testsum %q; may be "tidy", "listm", or "listall"`, *testSum) + } + + log := new(strings.Builder) + err := e.Execute(s, "updateSum", bufio.NewReader(strings.NewReader(cmd)), log) + if log.Len() > 0 { + t.Logf("%s", log) + } + if err != nil { + t.Fatal(err) + } + + newGomodData, err := os.ReadFile(s.Path("go.mod")) + if err != nil { + t.Fatalf("reading go.mod after -testsum: %v", err) + } + if !bytes.Equal(newGomodData, archive.Files[gomodIdx].Data) { + archive.Files[gomodIdx].Data = newGomodData + rewrite = true + } + + newGosumData, err := os.ReadFile(s.Path("go.sum")) + if err != nil && !os.IsNotExist(err) { + t.Fatalf("reading go.sum after -testsum: %v", err) + } + switch { + case os.IsNotExist(err) && gosumIdx >= 0: + // go.sum was deleted. + rewrite = true + archive.Files = append(archive.Files[:gosumIdx], archive.Files[gosumIdx+1:]...) + case err == nil && gosumIdx < 0: + // go.sum was created. + rewrite = true + gosumIdx = gomodIdx + 1 + archive.Files = append(archive.Files, txtar.File{}) + copy(archive.Files[gosumIdx+1:], archive.Files[gosumIdx:]) + archive.Files[gosumIdx] = txtar.File{Name: "go.sum", Data: newGosumData} + case err == nil && gosumIdx >= 0 && !bytes.Equal(newGosumData, archive.Files[gosumIdx].Data): + // go.sum was changed. + rewrite = true + archive.Files[gosumIdx].Data = newGosumData + } + return rewrite +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/scriptcmds_test.go b/platform/dbops/binaries/go/go/src/cmd/go/scriptcmds_test.go new file mode 100644 index 0000000000000000000000000000000000000000..db5e6cafdafe51d5a94f97e0841f1c3e9fa62a27 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/scriptcmds_test.go @@ -0,0 +1,109 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main_test + +import ( + "cmd/go/internal/script" + "cmd/go/internal/script/scripttest" + "cmd/go/internal/work" + "errors" + "fmt" + "os" + "os/exec" + "strings" + "time" +) + +func scriptCommands(interrupt os.Signal, waitDelay time.Duration) map[string]script.Cmd { + cmds := scripttest.DefaultCmds() + + // Customize the "exec" interrupt signal and grace period. + var cancel func(cmd *exec.Cmd) error + if interrupt != nil { + cancel = func(cmd *exec.Cmd) error { + return cmd.Process.Signal(interrupt) + } + } + + cmdExec := script.Exec(cancel, waitDelay) + cmds["exec"] = cmdExec + + add := func(name string, cmd script.Cmd) { + if _, ok := cmds[name]; ok { + panic(fmt.Sprintf("command %q is already registered", name)) + } + cmds[name] = cmd + } + + add("cc", scriptCC(cmdExec)) + cmdGo := scriptGo(cancel, waitDelay) + add("go", cmdGo) + add("stale", scriptStale(cmdGo)) + + return cmds +} + +// scriptCC runs the C compiler along with platform specific options. +func scriptCC(cmdExec script.Cmd) script.Cmd { + return script.Command( + script.CmdUsage{ + Summary: "run the platform C compiler", + Args: "args...", + }, + func(s *script.State, args ...string) (script.WaitFunc, error) { + b := work.NewBuilder(s.Getwd()) + wait, err := cmdExec.Run(s, append(b.GccCmd(".", ""), args...)...) + if err != nil { + return wait, err + } + waitAndClean := func(s *script.State) (stdout, stderr string, err error) { + stdout, stderr, err = wait(s) + if closeErr := b.Close(); err == nil { + err = closeErr + } + return stdout, stderr, err + } + return waitAndClean, nil + }) +} + +// scriptGo runs the go command. +func scriptGo(cancel func(*exec.Cmd) error, waitDelay time.Duration) script.Cmd { + return script.Program(testGo, cancel, waitDelay) +} + +// scriptStale checks that the named build targets are stale. +func scriptStale(cmdGo script.Cmd) script.Cmd { + return script.Command( + script.CmdUsage{ + Summary: "check that build targets are stale", + Args: "target...", + }, + func(s *script.State, args ...string) (script.WaitFunc, error) { + if len(args) == 0 { + return nil, script.ErrUsage + } + tmpl := "{{if .Error}}{{.ImportPath}}: {{.Error.Err}}" + + "{{else}}{{if not .Stale}}{{.ImportPath}} ({{.Target}}) is not stale{{end}}" + + "{{end}}" + + wait, err := cmdGo.Run(s, append([]string{"list", "-e", "-f=" + tmpl}, args...)...) + if err != nil { + return nil, err + } + + stdout, stderr, err := wait(s) + if len(stderr) != 0 { + s.Logf("%s", stderr) + } + if err != nil { + return nil, err + } + if out := strings.TrimSpace(stdout); out != "" { + return nil, errors.New(out) + } + return nil, nil + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/scriptconds_test.go b/platform/dbops/binaries/go/go/src/cmd/go/scriptconds_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8dd9b0d1cd1a971ba4a87124b31cb960e29c82c6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/scriptconds_test.go @@ -0,0 +1,235 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main_test + +import ( + "cmd/go/internal/cfg" + "cmd/go/internal/script" + "cmd/go/internal/script/scripttest" + "errors" + "fmt" + "internal/buildcfg" + "internal/platform" + "internal/testenv" + "os" + "os/exec" + "path/filepath" + "runtime" + "runtime/debug" + "strings" + "sync" +) + +func scriptConditions() map[string]script.Cond { + conds := scripttest.DefaultConds() + + add := func(name string, cond script.Cond) { + if _, ok := conds[name]; ok { + panic(fmt.Sprintf("condition %q is already registered", name)) + } + conds[name] = cond + } + + lazyBool := func(summary string, f func() bool) script.Cond { + return script.OnceCondition(summary, func() (bool, error) { return f(), nil }) + } + + add("abscc", script.Condition("default $CC path is absolute and exists", defaultCCIsAbsolute)) + add("asan", sysCondition("-asan", platform.ASanSupported, true)) + add("buildmode", script.PrefixCondition("go supports -buildmode=", hasBuildmode)) + add("case-sensitive", script.OnceCondition("$WORK filesystem is case-sensitive", isCaseSensitive)) + add("cc", script.PrefixCondition("go env CC = (ignoring the go/env file)", ccIs)) + add("cgo", script.BoolCondition("host CGO_ENABLED", testenv.HasCGO())) + add("cgolinkext", script.Condition("platform requires external linking for cgo", cgoLinkExt)) + add("cross", script.BoolCondition("cmd/go GOOS/GOARCH != GOHOSTOS/GOHOSTARCH", goHostOS != runtime.GOOS || goHostArch != runtime.GOARCH)) + add("fuzz", sysCondition("-fuzz", platform.FuzzSupported, false)) + add("fuzz-instrumented", sysCondition("-fuzz with instrumentation", platform.FuzzInstrumented, false)) + add("git", lazyBool("the 'git' executable exists and provides the standard CLI", hasWorkingGit)) + add("GODEBUG", script.PrefixCondition("GODEBUG contains ", hasGodebug)) + add("GOEXPERIMENT", script.PrefixCondition("GOEXPERIMENT is enabled", hasGoexperiment)) + add("go-builder", script.BoolCondition("GO_BUILDER_NAME is non-empty", testenv.Builder() != "")) + add("link", lazyBool("testenv.HasLink()", testenv.HasLink)) + add("mismatched-goroot", script.Condition("test's GOROOT_FINAL does not match the real GOROOT", isMismatchedGoroot)) + add("msan", sysCondition("-msan", platform.MSanSupported, true)) + add("mustlinkext", script.Condition("platform always requires external linking", mustLinkExt)) + add("net", script.PrefixCondition("can connect to external network host ", hasNet)) + add("race", sysCondition("-race", platform.RaceDetectorSupported, true)) + add("symlink", lazyBool("testenv.HasSymlink()", testenv.HasSymlink)) + add("trimpath", script.OnceCondition("test binary was built with -trimpath", isTrimpath)) + + return conds +} + +func defaultCCIsAbsolute(s *script.State) (bool, error) { + GOOS, _ := s.LookupEnv("GOOS") + GOARCH, _ := s.LookupEnv("GOARCH") + defaultCC := cfg.DefaultCC(GOOS, GOARCH) + if filepath.IsAbs(defaultCC) { + if _, err := exec.LookPath(defaultCC); err == nil { + return true, nil + } + } + return false, nil +} + +func ccIs(s *script.State, want string) (bool, error) { + CC, _ := s.LookupEnv("CC") + if CC != "" { + return CC == want, nil + } + GOOS, _ := s.LookupEnv("GOOS") + GOARCH, _ := s.LookupEnv("GOARCH") + return cfg.DefaultCC(GOOS, GOARCH) == want, nil +} + +func isMismatchedGoroot(s *script.State) (bool, error) { + gorootFinal, _ := s.LookupEnv("GOROOT_FINAL") + if gorootFinal == "" { + gorootFinal, _ = s.LookupEnv("GOROOT") + } + return gorootFinal != testGOROOT, nil +} + +func sysCondition(flag string, f func(goos, goarch string) bool, needsCgo bool) script.Cond { + return script.Condition( + "GOOS/GOARCH supports "+flag, + func(s *script.State) (bool, error) { + GOOS, _ := s.LookupEnv("GOOS") + GOARCH, _ := s.LookupEnv("GOARCH") + cross := goHostOS != GOOS || goHostArch != GOARCH + return (!needsCgo || (testenv.HasCGO() && !cross)) && f(GOOS, GOARCH), nil + }) +} + +func hasBuildmode(s *script.State, mode string) (bool, error) { + GOOS, _ := s.LookupEnv("GOOS") + GOARCH, _ := s.LookupEnv("GOARCH") + return platform.BuildModeSupported(runtime.Compiler, mode, GOOS, GOARCH), nil +} + +var scriptNetEnabled sync.Map // testing.TB → already enabled + +func hasNet(s *script.State, host string) (bool, error) { + if !testenv.HasExternalNetwork() { + return false, nil + } + + // TODO(bcmills): Add a flag or environment variable to allow skipping tests + // for specific hosts and/or skipping all net tests except for specific hosts. + + t, ok := tbFromContext(s.Context()) + if !ok { + return false, errors.New("script Context unexpectedly missing testing.TB key") + } + + if netTestSem != nil { + // When the number of external network connections is limited, we limit the + // number of net tests that can run concurrently so that the overall number + // of network connections won't exceed the limit. + _, dup := scriptNetEnabled.LoadOrStore(t, true) + if !dup { + // Acquire a net token for this test until the test completes. + netTestSem <- struct{}{} + t.Cleanup(func() { + <-netTestSem + scriptNetEnabled.Delete(t) + }) + } + } + + // Since we have confirmed that the network is available, + // allow cmd/go to use it. + s.Setenv("TESTGONETWORK", "") + return true, nil +} + +func hasGodebug(s *script.State, value string) (bool, error) { + godebug, _ := s.LookupEnv("GODEBUG") + for _, p := range strings.Split(godebug, ",") { + if strings.TrimSpace(p) == value { + return true, nil + } + } + return false, nil +} + +func hasGoexperiment(s *script.State, value string) (bool, error) { + GOOS, _ := s.LookupEnv("GOOS") + GOARCH, _ := s.LookupEnv("GOARCH") + goexp, _ := s.LookupEnv("GOEXPERIMENT") + flags, err := buildcfg.ParseGOEXPERIMENT(GOOS, GOARCH, goexp) + if err != nil { + return false, err + } + for _, exp := range flags.All() { + if value == exp { + return true, nil + } + if strings.TrimPrefix(value, "no") == strings.TrimPrefix(exp, "no") { + return false, nil + } + } + return false, fmt.Errorf("unrecognized GOEXPERIMENT %q", value) +} + +func isCaseSensitive() (bool, error) { + tmpdir, err := os.MkdirTemp(testTmpDir, "case-sensitive") + if err != nil { + return false, fmt.Errorf("failed to create directory to determine case-sensitivity: %w", err) + } + defer os.RemoveAll(tmpdir) + + fcap := filepath.Join(tmpdir, "FILE") + if err := os.WriteFile(fcap, []byte{}, 0644); err != nil { + return false, fmt.Errorf("error writing file to determine case-sensitivity: %w", err) + } + + flow := filepath.Join(tmpdir, "file") + _, err = os.ReadFile(flow) + switch { + case err == nil: + return false, nil + case os.IsNotExist(err): + return true, nil + default: + return false, fmt.Errorf("unexpected error reading file when determining case-sensitivity: %w", err) + } +} + +func isTrimpath() (bool, error) { + info, _ := debug.ReadBuildInfo() + if info == nil { + return false, errors.New("missing build info") + } + + for _, s := range info.Settings { + if s.Key == "-trimpath" && s.Value == "true" { + return true, nil + } + } + return false, nil +} + +func hasWorkingGit() bool { + if runtime.GOOS == "plan9" { + // The Git command is usually not the real Git on Plan 9. + // See https://golang.org/issues/29640. + return false + } + _, err := exec.LookPath("git") + return err == nil +} + +func cgoLinkExt(s *script.State) (bool, error) { + GOOS, _ := s.LookupEnv("GOOS") + GOARCH, _ := s.LookupEnv("GOARCH") + return platform.MustLinkExternal(GOOS, GOARCH, true), nil +} + +func mustLinkExt(s *script.State) (bool, error) { + GOOS, _ := s.LookupEnv("GOOS") + GOARCH, _ := s.LookupEnv("GOARCH") + return platform.MustLinkExternal(GOOS, GOARCH, false), nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/scriptreadme_test.go b/platform/dbops/binaries/go/go/src/cmd/go/scriptreadme_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2a842fbc0f7b636d92ce644586b1e3e5c7084dd3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/scriptreadme_test.go @@ -0,0 +1,267 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main_test + +import ( + "bytes" + "cmd/go/internal/script" + "flag" + "internal/diff" + "internal/testenv" + "os" + "strings" + "testing" + "text/template" +) + +var fixReadme = flag.Bool("fixreadme", false, "if true, update ../testdata/script/README") + +func checkScriptReadme(t *testing.T, engine *script.Engine, env []string) { + var args struct { + Language string + Commands string + Conditions string + } + + cmds := new(strings.Builder) + if err := engine.ListCmds(cmds, true); err != nil { + t.Fatal(err) + } + args.Commands = cmds.String() + + conds := new(strings.Builder) + if err := engine.ListConds(conds, nil); err != nil { + t.Fatal(err) + } + args.Conditions = conds.String() + + doc := new(strings.Builder) + cmd := testenv.Command(t, testGo, "doc", "cmd/go/internal/script") + cmd.Env = env + cmd.Stdout = doc + if err := cmd.Run(); err != nil { + t.Fatal(cmd, ":", err) + } + _, lang, ok := strings.Cut(doc.String(), "# Script Language\n\n") + if !ok { + t.Fatalf("%q did not include Script Language section", cmd) + } + lang, _, ok = strings.Cut(lang, "\n\nvar ") + if !ok { + t.Fatalf("%q did not include vars after Script Language section", cmd) + } + args.Language = lang + + tmpl := template.Must(template.New("README").Parse(readmeTmpl[1:])) + buf := new(bytes.Buffer) + if err := tmpl.Execute(buf, args); err != nil { + t.Fatal(err) + } + + const readmePath = "testdata/script/README" + old, err := os.ReadFile(readmePath) + if err != nil { + t.Fatal(err) + } + diff := diff.Diff(readmePath, old, "readmeTmpl", buf.Bytes()) + if diff == nil { + t.Logf("%s is up to date.", readmePath) + return + } + + if *fixReadme { + if err := os.WriteFile(readmePath, buf.Bytes(), 0666); err != nil { + t.Fatal(err) + } + t.Logf("wrote %d bytes to %s", buf.Len(), readmePath) + } else { + t.Logf("\n%s", diff) + t.Errorf("%s is stale. To update, run 'go generate cmd/go'.", readmePath) + } +} + +const readmeTmpl = ` +This file is generated by 'go generate cmd/go'. DO NOT EDIT. + +This directory holds test scripts *.txt run during 'go test cmd/go'. +To run a specific script foo.txt + + go test cmd/go -run=Script/^foo$ + +In general script files should have short names: a few words, not whole sentences. +The first word should be the general category of behavior being tested, +often the name of a go subcommand (list, build, test, ...) or concept (vendor, pattern). + +Each script is a text archive (go doc internal/txtar). +The script begins with an actual command script to run +followed by the content of zero or more supporting files to +create in the script's temporary file system before it starts executing. + +As an example, run_hello.txt says: + + # hello world + go run hello.go + stderr 'hello world' + ! stdout . + + -- hello.go -- + package main + func main() { println("hello world") } + +Each script runs in a fresh temporary work directory tree, available to scripts as $WORK. +Scripts also have access to other environment variables, including: + + GOARCH= + GOCACHE= + GOEXE= + GOOS= + GOPATH=$WORK/gopath + GOPROXY= + GOROOT= + GOROOT_FINAL= + TESTGO_GOROOT= + HOME=/no-home + PATH= + TMPDIR=$WORK/tmp + GODEBUG= + devnull= + goversion= + +On Plan 9, the variables $path and $home are set instead of $PATH and $HOME. +On Windows, the variables $USERPROFILE and $TMP are set instead of +$HOME and $TMPDIR. + +The lines at the top of the script are a sequence of commands to be executed by +a small script engine configured in ../../script_test.go (not the system shell). + +The scripts' supporting files are unpacked relative to $GOPATH/src +(aka $WORK/gopath/src) and then the script begins execution in that directory as +well. Thus the example above runs in $WORK/gopath/src with GOPATH=$WORK/gopath +and $WORK/gopath/src/hello.go containing the listed contents. + +{{.Language}} + +When TestScript runs a script and the script fails, by default TestScript shows +the execution of the most recent phase of the script (since the last # comment) +and only shows the # comments for earlier phases. For example, here is a +multi-phase script with a bug in it: + + # GOPATH with p1 in d2, p2 in d2 + env GOPATH=$WORK${/}d1${:}$WORK${/}d2 + + # build & install p1 + env + go install -i p1 + ! stale p1 + ! stale p2 + + # modify p2 - p1 should appear stale + cp $WORK/p2x.go $WORK/d2/src/p2/p2.go + stale p1 p2 + + # build & install p1 again + go install -i p11 + ! stale p1 + ! stale p2 + + -- $WORK/d1/src/p1/p1.go -- + package p1 + import "p2" + func F() { p2.F() } + -- $WORK/d2/src/p2/p2.go -- + package p2 + func F() {} + -- $WORK/p2x.go -- + package p2 + func F() {} + func G() {} + +The bug is that the final phase installs p11 instead of p1. The test failure looks like: + + $ go test -run=Script + --- FAIL: TestScript (3.75s) + --- FAIL: TestScript/install_rebuild_gopath (0.16s) + script_test.go:223: + # GOPATH with p1 in d2, p2 in d2 (0.000s) + # build & install p1 (0.087s) + # modify p2 - p1 should appear stale (0.029s) + # build & install p1 again (0.022s) + > go install -i p11 + [stderr] + can't load package: package p11: cannot find package "p11" in any of: + /Users/rsc/go/src/p11 (from $GOROOT) + $WORK/d1/src/p11 (from $GOPATH) + $WORK/d2/src/p11 + [exit status 1] + FAIL: unexpected go command failure + + script_test.go:73: failed at testdata/script/install_rebuild_gopath.txt:15 in $WORK/gopath/src + + FAIL + exit status 1 + FAIL cmd/go 4.875s + $ + +Note that the commands in earlier phases have been hidden, so that the relevant +commands are more easily found, and the elapsed time for a completed phase +is shown next to the phase heading. To see the entire execution, use "go test -v", +which also adds an initial environment dump to the beginning of the log. + +Note also that in reported output, the actual name of the per-script temporary directory +has been consistently replaced with the literal string $WORK. + +The cmd/go test flag -testwork (which must appear on the "go test" command line after +standard test flags) causes each test to log the name of its $WORK directory and other +environment variable settings and also to leave that directory behind when it exits, +for manual debugging of failing tests: + + $ go test -run=Script -work + --- FAIL: TestScript (3.75s) + --- FAIL: TestScript/install_rebuild_gopath (0.16s) + script_test.go:223: + WORK=/tmp/cmd-go-test-745953508/script-install_rebuild_gopath + GOARCH= + GOCACHE=/Users/rsc/Library/Caches/go-build + GOOS= + GOPATH=$WORK/gopath + GOROOT=/Users/rsc/go + HOME=/no-home + TMPDIR=$WORK/tmp + exe= + + # GOPATH with p1 in d2, p2 in d2 (0.000s) + # build & install p1 (0.085s) + # modify p2 - p1 should appear stale (0.030s) + # build & install p1 again (0.019s) + > go install -i p11 + [stderr] + can't load package: package p11: cannot find package "p11" in any of: + /Users/rsc/go/src/p11 (from $GOROOT) + $WORK/d1/src/p11 (from $GOPATH) + $WORK/d2/src/p11 + [exit status 1] + FAIL: unexpected go command failure + + script_test.go:73: failed at testdata/script/install_rebuild_gopath.txt:15 in $WORK/gopath/src + + FAIL + exit status 1 + FAIL cmd/go 4.875s + $ + + $ WORK=/tmp/cmd-go-test-745953508/script-install_rebuild_gopath + $ cd $WORK/d1/src/p1 + $ cat p1.go + package p1 + import "p2" + func F() { p2.F() } + $ + +The available commands are: +{{.Commands}} + +The available conditions are: +{{.Conditions}} +` diff --git a/platform/dbops/binaries/go/go/src/cmd/go/stop_other_test.go b/platform/dbops/binaries/go/go/src/cmd/go/stop_other_test.go new file mode 100644 index 0000000000000000000000000000000000000000..cb4569b91d4a7791761830e8b2cbc05ae7b89a39 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/stop_other_test.go @@ -0,0 +1,24 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !(unix || (js && wasm)) + +package main_test + +import ( + "os" + "runtime" +) + +// quitSignal returns the appropriate signal to use to request that a process +// quit execution. +func quitSignal() os.Signal { + if runtime.GOOS == "windows" { + // Per https://golang.org/pkg/os/#Signal, “Interrupt is not implemented on + // Windows; using it with os.Process.Signal will return an error.” + // Fall back to Kill instead. + return os.Kill + } + return os.Interrupt +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/stop_unix_test.go b/platform/dbops/binaries/go/go/src/cmd/go/stop_unix_test.go new file mode 100644 index 0000000000000000000000000000000000000000..baa1427465d796e918cb8d776092e95d39e6e1be --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/stop_unix_test.go @@ -0,0 +1,16 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || (js && wasm) + +package main_test + +import ( + "os" + "syscall" +) + +func quitSignal() os.Signal { + return syscall.SIGQUIT +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/terminal_test.go b/platform/dbops/binaries/go/go/src/cmd/go/terminal_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a5ad9191c2a470880d4e12d36bd964364055eab6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/terminal_test.go @@ -0,0 +1,130 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main_test + +import ( + "errors" + "internal/testenv" + "internal/testpty" + "io" + "os" + "testing" + + "golang.org/x/term" +) + +func TestTerminalPassthrough(t *testing.T) { + // Check that if 'go test' is run with a terminal connected to stdin/stdout, + // then the go command passes that terminal down to the test binary + // invocation (rather than, e.g., putting a pipe in the way). + // + // See issue 18153. + testenv.MustHaveGoBuild(t) + + // Start with a "self test" to make sure that if we *don't* pass in a + // terminal, the test can correctly detect that. (cmd/go doesn't guarantee + // that it won't add a terminal in the middle, but that would be pretty weird.) + t.Run("pipe", func(t *testing.T) { + r, w, err := os.Pipe() + if err != nil { + t.Fatalf("pipe failed: %s", err) + } + defer r.Close() + defer w.Close() + stdout, stderr := runTerminalPassthrough(t, r, w) + if stdout { + t.Errorf("stdout is unexpectedly a terminal") + } + if stderr { + t.Errorf("stderr is unexpectedly a terminal") + } + }) + + // Now test with a read PTY. + t.Run("pty", func(t *testing.T) { + r, processTTY, err := testpty.Open() + if errors.Is(err, testpty.ErrNotSupported) { + t.Skipf("%s", err) + } else if err != nil { + t.Fatalf("failed to open test PTY: %s", err) + } + defer r.Close() + w, err := os.OpenFile(processTTY, os.O_RDWR, 0) + if err != nil { + t.Fatal(err) + } + defer w.Close() + stdout, stderr := runTerminalPassthrough(t, r, w) + if !stdout { + t.Errorf("stdout is not a terminal") + } + if !stderr { + t.Errorf("stderr is not a terminal") + } + }) +} + +func runTerminalPassthrough(t *testing.T, r, w *os.File) (stdout, stderr bool) { + cmd := testenv.Command(t, testGo, "test", "-run=^$") + cmd.Env = append(cmd.Environ(), "GO_TEST_TERMINAL_PASSTHROUGH=1") + cmd.Stdout = w + cmd.Stderr = w + + // The behavior of reading from a PTY after the child closes it is very + // strange: on Linux, Read returns EIO, and on at least some versions of + // macOS, unread output may be discarded (see https://go.dev/issue/57141). + // + // To avoid that situation, we keep the child process running until the + // parent has finished reading from the PTY, at which point we unblock the + // child by closing its stdin pipe. + stdin, err := cmd.StdinPipe() + if err != nil { + t.Fatal(err) + } + + t.Logf("running %s", cmd) + err = cmd.Start() + if err != nil { + t.Fatalf("starting subprocess: %s", err) + } + w.Close() + t.Cleanup(func() { + stdin.Close() + if err := cmd.Wait(); err != nil { + t.Errorf("suprocess failed with: %s", err) + } + }) + + buf := make([]byte, 2) + n, err := io.ReadFull(r, buf) + if err != nil || !(buf[0] == '1' || buf[0] == 'X') || !(buf[1] == '2' || buf[1] == 'X') { + t.Logf("read error: %v", err) + t.Fatalf("expected 2 bytes matching `[1X][2X]`; got %q", buf[:n]) + } + return buf[0] == '1', buf[1] == '2' +} + +func init() { + if os.Getenv("GO_TEST_TERMINAL_PASSTHROUGH") == "" { + return + } + + if term.IsTerminal(1) { + os.Stdout.WriteString("1") + } else { + os.Stdout.WriteString("X") + } + if term.IsTerminal(2) { + os.Stdout.WriteString("2") + } else { + os.Stdout.WriteString("X") + } + + // Before exiting, wait for the parent process to read the PTY output, + // at which point it will close stdin. + io.Copy(io.Discard, os.Stdin) + + os.Exit(0) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/gofmt/doc.go b/platform/dbops/binaries/go/go/src/cmd/gofmt/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..8ac9c6a931711df3c65b58611d77fc000578175d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/gofmt/doc.go @@ -0,0 +1,106 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Gofmt formats Go programs. +It uses tabs for indentation and blanks for alignment. +Alignment assumes that an editor is using a fixed-width font. + +Without an explicit path, it processes the standard input. Given a file, +it operates on that file; given a directory, it operates on all .go files in +that directory, recursively. (Files starting with a period are ignored.) +By default, gofmt prints the reformatted sources to standard output. + +Usage: + + gofmt [flags] [path ...] + +The flags are: + + -d + Do not print reformatted sources to standard output. + If a file's formatting is different than gofmt's, print diffs + to standard output. + -e + Print all (including spurious) errors. + -l + Do not print reformatted sources to standard output. + If a file's formatting is different from gofmt's, print its name + to standard output. + -r rule + Apply the rewrite rule to the source before reformatting. + -s + Try to simplify code (after applying the rewrite rule, if any). + -w + Do not print reformatted sources to standard output. + If a file's formatting is different from gofmt's, overwrite it + with gofmt's version. If an error occurred during overwriting, + the original file is restored from an automatic backup. + +Debugging support: + + -cpuprofile filename + Write cpu profile to the specified file. + +The rewrite rule specified with the -r flag must be a string of the form: + + pattern -> replacement + +Both pattern and replacement must be valid Go expressions. +In the pattern, single-character lowercase identifiers serve as +wildcards matching arbitrary sub-expressions; those expressions +will be substituted for the same identifiers in the replacement. + +When gofmt reads from standard input, it accepts either a full Go program +or a program fragment. A program fragment must be a syntactically +valid declaration list, statement list, or expression. When formatting +such a fragment, gofmt preserves leading indentation as well as leading +and trailing spaces, so that individual sections of a Go program can be +formatted by piping them through gofmt. + +# Examples + +To check files for unnecessary parentheses: + + gofmt -r '(a) -> a' -l *.go + +To remove the parentheses: + + gofmt -r '(a) -> a' -w *.go + +To convert the package tree from explicit slice upper bounds to implicit ones: + + gofmt -r 'α[β:len(α)] -> α[β:]' -w $GOROOT/src + +# The simplify command + +When invoked with -s gofmt will make the following source transformations where possible. + + An array, slice, or map composite literal of the form: + []T{T{}, T{}} + will be simplified to: + []T{{}, {}} + + A slice expression of the form: + s[a:len(s)] + will be simplified to: + s[a:] + + A range of the form: + for x, _ = range v {...} + will be simplified to: + for x = range v {...} + + A range of the form: + for _ = range v {...} + will be simplified to: + for range v {...} + +This may result in changes that are incompatible with earlier versions of Go. +*/ +package main + +// BUG(rsc): The implementation of -r is a bit slow. +// BUG(gri): If -w fails, the restored original file may not have some of the +// original file attributes. diff --git a/platform/dbops/binaries/go/go/src/cmd/gofmt/gofmt.go b/platform/dbops/binaries/go/go/src/cmd/gofmt/gofmt.go new file mode 100644 index 0000000000000000000000000000000000000000..f4fb6bff84473e76afbad37c8ea3ec8d88795088 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/gofmt/gofmt.go @@ -0,0 +1,570 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "context" + "flag" + "fmt" + "go/ast" + "go/parser" + "go/printer" + "go/scanner" + "go/token" + "internal/diff" + "io" + "io/fs" + "math/rand" + "os" + "path/filepath" + "runtime" + "runtime/pprof" + "strconv" + "strings" + + "golang.org/x/sync/semaphore" +) + +var ( + // main operation modes + list = flag.Bool("l", false, "list files whose formatting differs from gofmt's") + write = flag.Bool("w", false, "write result to (source) file instead of stdout") + rewriteRule = flag.String("r", "", "rewrite rule (e.g., 'a[b:len(a)] -> a[b:]')") + simplifyAST = flag.Bool("s", false, "simplify code") + doDiff = flag.Bool("d", false, "display diffs instead of rewriting files") + allErrors = flag.Bool("e", false, "report all errors (not just the first 10 on different lines)") + + // debugging + cpuprofile = flag.String("cpuprofile", "", "write cpu profile to this file") +) + +// Keep these in sync with go/format/format.go. +const ( + tabWidth = 8 + printerMode = printer.UseSpaces | printer.TabIndent | printerNormalizeNumbers + + // printerNormalizeNumbers means to canonicalize number literal prefixes + // and exponents while printing. See https://golang.org/doc/go1.13#gofmt. + // + // This value is defined in go/printer specifically for go/format and cmd/gofmt. + printerNormalizeNumbers = 1 << 30 +) + +// fdSem guards the number of concurrently-open file descriptors. +// +// For now, this is arbitrarily set to 200, based on the observation that many +// platforms default to a kernel limit of 256. Ideally, perhaps we should derive +// it from rlimit on platforms that support that system call. +// +// File descriptors opened from outside of this package are not tracked, +// so this limit may be approximate. +var fdSem = make(chan bool, 200) + +var ( + rewrite func(*token.FileSet, *ast.File) *ast.File + parserMode parser.Mode +) + +func usage() { + fmt.Fprintf(os.Stderr, "usage: gofmt [flags] [path ...]\n") + flag.PrintDefaults() +} + +func initParserMode() { + parserMode = parser.ParseComments + if *allErrors { + parserMode |= parser.AllErrors + } + // It's only -r that makes use of go/ast's object resolution, + // so avoid the unnecessary work if the flag isn't used. + if *rewriteRule == "" { + parserMode |= parser.SkipObjectResolution + } +} + +func isGoFile(f fs.DirEntry) bool { + // ignore non-Go files + name := f.Name() + return !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go") && !f.IsDir() +} + +// A sequencer performs concurrent tasks that may write output, but emits that +// output in a deterministic order. +type sequencer struct { + maxWeight int64 + sem *semaphore.Weighted // weighted by input bytes (an approximate proxy for memory overhead) + prev <-chan *reporterState // 1-buffered +} + +// newSequencer returns a sequencer that allows concurrent tasks up to maxWeight +// and writes tasks' output to out and err. +func newSequencer(maxWeight int64, out, err io.Writer) *sequencer { + sem := semaphore.NewWeighted(maxWeight) + prev := make(chan *reporterState, 1) + prev <- &reporterState{out: out, err: err} + return &sequencer{ + maxWeight: maxWeight, + sem: sem, + prev: prev, + } +} + +// exclusive is a weight that can be passed to a sequencer to cause +// a task to be executed without any other concurrent tasks. +const exclusive = -1 + +// Add blocks until the sequencer has enough weight to spare, then adds f as a +// task to be executed concurrently. +// +// If the weight is either negative or larger than the sequencer's maximum +// weight, Add blocks until all other tasks have completed, then the task +// executes exclusively (blocking all other calls to Add until it completes). +// +// f may run concurrently in a goroutine, but its output to the passed-in +// reporter will be sequential relative to the other tasks in the sequencer. +// +// If f invokes a method on the reporter, execution of that method may block +// until the previous task has finished. (To maximize concurrency, f should +// avoid invoking the reporter until it has finished any parallelizable work.) +// +// If f returns a non-nil error, that error will be reported after f's output +// (if any) and will cause a nonzero final exit code. +func (s *sequencer) Add(weight int64, f func(*reporter) error) { + if weight < 0 || weight > s.maxWeight { + weight = s.maxWeight + } + if err := s.sem.Acquire(context.TODO(), weight); err != nil { + // Change the task from "execute f" to "report err". + weight = 0 + f = func(*reporter) error { return err } + } + + r := &reporter{prev: s.prev} + next := make(chan *reporterState, 1) + s.prev = next + + // Start f in parallel: it can run until it invokes a method on r, at which + // point it will block until the previous task releases the output state. + go func() { + if err := f(r); err != nil { + r.Report(err) + } + next <- r.getState() // Release the next task. + s.sem.Release(weight) + }() +} + +// AddReport prints an error to s after the output of any previously-added +// tasks, causing the final exit code to be nonzero. +func (s *sequencer) AddReport(err error) { + s.Add(0, func(*reporter) error { return err }) +} + +// GetExitCode waits for all previously-added tasks to complete, then returns an +// exit code for the sequence suitable for passing to os.Exit. +func (s *sequencer) GetExitCode() int { + c := make(chan int, 1) + s.Add(0, func(r *reporter) error { + c <- r.ExitCode() + return nil + }) + return <-c +} + +// A reporter reports output, warnings, and errors. +type reporter struct { + prev <-chan *reporterState + state *reporterState +} + +// reporterState carries the state of a reporter instance. +// +// Only one reporter at a time may have access to a reporterState. +type reporterState struct { + out, err io.Writer + exitCode int +} + +// getState blocks until any prior reporters are finished with the reporter +// state, then returns the state for manipulation. +func (r *reporter) getState() *reporterState { + if r.state == nil { + r.state = <-r.prev + } + return r.state +} + +// Warnf emits a warning message to the reporter's error stream, +// without changing its exit code. +func (r *reporter) Warnf(format string, args ...any) { + fmt.Fprintf(r.getState().err, format, args...) +} + +// Write emits a slice to the reporter's output stream. +// +// Any error is returned to the caller, and does not otherwise affect the +// reporter's exit code. +func (r *reporter) Write(p []byte) (int, error) { + return r.getState().out.Write(p) +} + +// Report emits a non-nil error to the reporter's error stream, +// changing its exit code to a nonzero value. +func (r *reporter) Report(err error) { + if err == nil { + panic("Report with nil error") + } + st := r.getState() + scanner.PrintError(st.err, err) + st.exitCode = 2 +} + +func (r *reporter) ExitCode() int { + return r.getState().exitCode +} + +// If info == nil, we are formatting stdin instead of a file. +// If in == nil, the source is the contents of the file with the given filename. +func processFile(filename string, info fs.FileInfo, in io.Reader, r *reporter) error { + src, err := readFile(filename, info, in) + if err != nil { + return err + } + + fileSet := token.NewFileSet() + // If we are formatting stdin, we accept a program fragment in lieu of a + // complete source file. + fragmentOk := info == nil + file, sourceAdj, indentAdj, err := parse(fileSet, filename, src, fragmentOk) + if err != nil { + return err + } + + if rewrite != nil { + if sourceAdj == nil { + file = rewrite(fileSet, file) + } else { + r.Warnf("warning: rewrite ignored for incomplete programs\n") + } + } + + ast.SortImports(fileSet, file) + + if *simplifyAST { + simplify(file) + } + + res, err := format(fileSet, file, sourceAdj, indentAdj, src, printer.Config{Mode: printerMode, Tabwidth: tabWidth}) + if err != nil { + return err + } + + if !bytes.Equal(src, res) { + // formatting has changed + if *list { + fmt.Fprintln(r, filename) + } + if *write { + if info == nil { + panic("-w should not have been allowed with stdin") + } + + perm := info.Mode().Perm() + if err := writeFile(filename, src, res, perm, info.Size()); err != nil { + return err + } + } + if *doDiff { + newName := filepath.ToSlash(filename) + oldName := newName + ".orig" + r.Write(diff.Diff(oldName, src, newName, res)) + } + } + + if !*list && !*write && !*doDiff { + _, err = r.Write(res) + } + + return err +} + +// readFile reads the contents of filename, described by info. +// If in is non-nil, readFile reads directly from it. +// Otherwise, readFile opens and reads the file itself, +// with the number of concurrently-open files limited by fdSem. +func readFile(filename string, info fs.FileInfo, in io.Reader) ([]byte, error) { + if in == nil { + fdSem <- true + var err error + f, err := os.Open(filename) + if err != nil { + return nil, err + } + in = f + defer func() { + f.Close() + <-fdSem + }() + } + + // Compute the file's size and read its contents with minimal allocations. + // + // If we have the FileInfo from filepath.WalkDir, use it to make + // a buffer of the right size and avoid ReadAll's reallocations. + // + // If the size is unknown (or bogus, or overflows an int), fall back to + // a size-independent ReadAll. + size := -1 + if info != nil && info.Mode().IsRegular() && int64(int(info.Size())) == info.Size() { + size = int(info.Size()) + } + if size+1 <= 0 { + // The file is not known to be regular, so we don't have a reliable size for it. + var err error + src, err := io.ReadAll(in) + if err != nil { + return nil, err + } + return src, nil + } + + // We try to read size+1 bytes so that we can detect modifications: if we + // read more than size bytes, then the file was modified concurrently. + // (If that happens, we could, say, append to src to finish the read, or + // proceed with a truncated buffer — but the fact that it changed at all + // indicates a possible race with someone editing the file, so we prefer to + // stop to avoid corrupting it.) + src := make([]byte, size+1) + n, err := io.ReadFull(in, src) + switch err { + case nil, io.EOF, io.ErrUnexpectedEOF: + // io.ReadFull returns io.EOF (for an empty file) or io.ErrUnexpectedEOF + // (for a non-empty file) if the file was changed unexpectedly. Continue + // with comparing file sizes in those cases. + default: + return nil, err + } + if n < size { + return nil, fmt.Errorf("error: size of %s changed during reading (from %d to %d bytes)", filename, size, n) + } else if n > size { + return nil, fmt.Errorf("error: size of %s changed during reading (from %d to >=%d bytes)", filename, size, len(src)) + } + return src[:n], nil +} + +func main() { + // Arbitrarily limit in-flight work to 2MiB times the number of threads. + // + // The actual overhead for the parse tree and output will depend on the + // specifics of the file, but this at least keeps the footprint of the process + // roughly proportional to GOMAXPROCS. + maxWeight := (2 << 20) * int64(runtime.GOMAXPROCS(0)) + s := newSequencer(maxWeight, os.Stdout, os.Stderr) + + // call gofmtMain in a separate function + // so that it can use defer and have them + // run before the exit. + gofmtMain(s) + os.Exit(s.GetExitCode()) +} + +func gofmtMain(s *sequencer) { + flag.Usage = usage + flag.Parse() + + if *cpuprofile != "" { + fdSem <- true + f, err := os.Create(*cpuprofile) + if err != nil { + s.AddReport(fmt.Errorf("creating cpu profile: %s", err)) + return + } + defer func() { + f.Close() + <-fdSem + }() + pprof.StartCPUProfile(f) + defer pprof.StopCPUProfile() + } + + initParserMode() + initRewrite() + + args := flag.Args() + if len(args) == 0 { + if *write { + s.AddReport(fmt.Errorf("error: cannot use -w with standard input")) + return + } + s.Add(0, func(r *reporter) error { + return processFile("", nil, os.Stdin, r) + }) + return + } + + for _, arg := range args { + switch info, err := os.Stat(arg); { + case err != nil: + s.AddReport(err) + case !info.IsDir(): + // Non-directory arguments are always formatted. + arg := arg + s.Add(fileWeight(arg, info), func(r *reporter) error { + return processFile(arg, info, nil, r) + }) + default: + // Directories are walked, ignoring non-Go files. + err := filepath.WalkDir(arg, func(path string, f fs.DirEntry, err error) error { + if err != nil || !isGoFile(f) { + return err + } + info, err := f.Info() + if err != nil { + s.AddReport(err) + return nil + } + s.Add(fileWeight(path, info), func(r *reporter) error { + return processFile(path, info, nil, r) + }) + return nil + }) + if err != nil { + s.AddReport(err) + } + } + } +} + +func fileWeight(path string, info fs.FileInfo) int64 { + if info == nil { + return exclusive + } + if info.Mode().Type() == fs.ModeSymlink { + var err error + info, err = os.Stat(path) + if err != nil { + return exclusive + } + } + if !info.Mode().IsRegular() { + // For non-regular files, FileInfo.Size is system-dependent and thus not a + // reliable indicator of weight. + return exclusive + } + return info.Size() +} + +// writeFile updates a file with the new formatted data. +func writeFile(filename string, orig, formatted []byte, perm fs.FileMode, size int64) error { + // Make a temporary backup file before rewriting the original file. + bakname, err := backupFile(filename, orig, perm) + if err != nil { + return err + } + + fdSem <- true + defer func() { <-fdSem }() + + fout, err := os.OpenFile(filename, os.O_WRONLY, perm) + if err != nil { + // We couldn't even open the file, so it should + // not have changed. + os.Remove(bakname) + return err + } + defer fout.Close() // for error paths + + restoreFail := func(err error) { + fmt.Fprintf(os.Stderr, "gofmt: %s: error restoring file to original: %v; backup in %s\n", filename, err, bakname) + } + + n, err := fout.Write(formatted) + if err == nil && int64(n) < size { + err = fout.Truncate(int64(n)) + } + + if err != nil { + // Rewriting the file failed. + + if n == 0 { + // Original file unchanged. + os.Remove(bakname) + return err + } + + // Try to restore the original contents. + + no, erro := fout.WriteAt(orig, 0) + if erro != nil { + // That failed too. + restoreFail(erro) + return err + } + + if no < n { + // Original file is shorter. Truncate. + if erro = fout.Truncate(int64(no)); erro != nil { + restoreFail(erro) + return err + } + } + + if erro := fout.Close(); erro != nil { + restoreFail(erro) + return err + } + + // Original contents restored. + os.Remove(bakname) + return err + } + + if err := fout.Close(); err != nil { + restoreFail(err) + return err + } + + // File updated. + os.Remove(bakname) + return nil +} + +// backupFile writes data to a new file named filename with permissions perm, +// with randomly chosen such that the file name is unique. backupFile returns +// the chosen file name. +func backupFile(filename string, data []byte, perm fs.FileMode) (string, error) { + fdSem <- true + defer func() { <-fdSem }() + + nextRandom := func() string { + return strconv.Itoa(rand.Int()) + } + + dir, base := filepath.Split(filename) + var ( + bakname string + f *os.File + ) + for { + bakname = filepath.Join(dir, base+"."+nextRandom()) + var err error + f, err = os.OpenFile(bakname, os.O_RDWR|os.O_CREATE|os.O_EXCL, perm) + if err == nil { + break + } + if err != nil && !os.IsExist(err) { + return "", err + } + } + + // write data to backup file + _, err := f.Write(data) + if err1 := f.Close(); err == nil { + err = err1 + } + + return bakname, err +} diff --git a/platform/dbops/binaries/go/go/src/cmd/gofmt/gofmt_test.go b/platform/dbops/binaries/go/go/src/cmd/gofmt/gofmt_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6b80673af148f58a2dea38d71536d8c5755c1e6e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/gofmt/gofmt_test.go @@ -0,0 +1,195 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "flag" + "internal/diff" + "os" + "path/filepath" + "strings" + "testing" + "text/scanner" +) + +var update = flag.Bool("update", false, "update .golden files") + +// gofmtFlags looks for a comment of the form +// +// //gofmt flags +// +// within the first maxLines lines of the given file, +// and returns the flags string, if any. Otherwise it +// returns the empty string. +func gofmtFlags(filename string, maxLines int) string { + f, err := os.Open(filename) + if err != nil { + return "" // ignore errors - they will be found later + } + defer f.Close() + + // initialize scanner + var s scanner.Scanner + s.Init(f) + s.Error = func(*scanner.Scanner, string) {} // ignore errors + s.Mode = scanner.GoTokens &^ scanner.SkipComments // want comments + + // look for //gofmt comment + for s.Line <= maxLines { + switch s.Scan() { + case scanner.Comment: + const prefix = "//gofmt " + if t := s.TokenText(); strings.HasPrefix(t, prefix) { + return strings.TrimSpace(t[len(prefix):]) + } + case scanner.EOF: + return "" + } + } + + return "" +} + +func runTest(t *testing.T, in, out string) { + // process flags + *simplifyAST = false + *rewriteRule = "" + info, err := os.Lstat(in) + if err != nil { + t.Error(err) + return + } + for _, flag := range strings.Split(gofmtFlags(in, 20), " ") { + elts := strings.SplitN(flag, "=", 2) + name := elts[0] + value := "" + if len(elts) == 2 { + value = elts[1] + } + switch name { + case "": + // no flags + case "-r": + *rewriteRule = value + case "-s": + *simplifyAST = true + case "-stdin": + // fake flag - pretend input is from stdin + info = nil + default: + t.Errorf("unrecognized flag name: %s", name) + } + } + + initParserMode() + initRewrite() + + const maxWeight = 2 << 20 + var buf, errBuf bytes.Buffer + s := newSequencer(maxWeight, &buf, &errBuf) + s.Add(fileWeight(in, info), func(r *reporter) error { + return processFile(in, info, nil, r) + }) + if errBuf.Len() > 0 { + t.Logf("%q", errBuf.Bytes()) + } + if s.GetExitCode() != 0 { + t.Fail() + } + + expected, err := os.ReadFile(out) + if err != nil { + t.Error(err) + return + } + + if got := buf.Bytes(); !bytes.Equal(got, expected) { + if *update { + if in != out { + if err := os.WriteFile(out, got, 0666); err != nil { + t.Error(err) + } + return + } + // in == out: don't accidentally destroy input + t.Errorf("WARNING: -update did not rewrite input file %s", in) + } + + t.Errorf("(gofmt %s) != %s (see %s.gofmt)\n%s", in, out, in, + diff.Diff("expected", expected, "got", got)) + if err := os.WriteFile(in+".gofmt", got, 0666); err != nil { + t.Error(err) + } + } +} + +// TestRewrite processes testdata/*.input files and compares them to the +// corresponding testdata/*.golden files. The gofmt flags used to process +// a file must be provided via a comment of the form +// +// //gofmt flags +// +// in the processed file within the first 20 lines, if any. +func TestRewrite(t *testing.T) { + // determine input files + match, err := filepath.Glob("testdata/*.input") + if err != nil { + t.Fatal(err) + } + + // add larger examples + match = append(match, "gofmt.go", "gofmt_test.go") + + for _, in := range match { + name := filepath.Base(in) + t.Run(name, func(t *testing.T) { + out := in // for files where input and output are identical + if strings.HasSuffix(in, ".input") { + out = in[:len(in)-len(".input")] + ".golden" + } + runTest(t, in, out) + if in != out && !t.Failed() { + // Check idempotence. + runTest(t, out, out) + } + }) + } +} + +// Test case for issue 3961. +func TestCRLF(t *testing.T) { + const input = "testdata/crlf.input" // must contain CR/LF's + const golden = "testdata/crlf.golden" // must not contain any CR's + + data, err := os.ReadFile(input) + if err != nil { + t.Error(err) + } + if !bytes.Contains(data, []byte("\r\n")) { + t.Errorf("%s contains no CR/LF's", input) + } + + data, err = os.ReadFile(golden) + if err != nil { + t.Error(err) + } + if bytes.Contains(data, []byte("\r")) { + t.Errorf("%s contains CR's", golden) + } +} + +func TestBackupFile(t *testing.T) { + dir, err := os.MkdirTemp("", "gofmt_test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + name, err := backupFile(filepath.Join(dir, "foo.go"), []byte(" package main"), 0644) + if err != nil { + t.Fatal(err) + } + t.Logf("Created: %s", name) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/gofmt/gofmt_unix_test.go b/platform/dbops/binaries/go/go/src/cmd/gofmt/gofmt_unix_test.go new file mode 100644 index 0000000000000000000000000000000000000000..fec514380f6354f23760c295f8ef88d7113a2d9b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/gofmt/gofmt_unix_test.go @@ -0,0 +1,67 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package main + +import ( + "os" + "path/filepath" + "strings" + "testing" + "time" +) + +func TestPermissions(t *testing.T) { + if os.Getuid() == 0 { + t.Skip("skipping permission test when running as root") + } + + dir := t.TempDir() + fn := filepath.Join(dir, "perm.go") + + // Create a file that needs formatting without write permission. + if err := os.WriteFile(filepath.Join(fn), []byte(" package main"), 0o400); err != nil { + t.Fatal(err) + } + + // Set mtime of the file in the past. + past := time.Now().Add(-time.Hour) + if err := os.Chtimes(fn, past, past); err != nil { + t.Fatal(err) + } + + info, err := os.Stat(fn) + if err != nil { + t.Fatal(err) + } + + defer func() { *write = false }() + *write = true + + initParserMode() + initRewrite() + + const maxWeight = 2 << 20 + var buf, errBuf strings.Builder + s := newSequencer(maxWeight, &buf, &errBuf) + s.Add(fileWeight(fn, info), func(r *reporter) error { + return processFile(fn, info, nil, r) + }) + if s.GetExitCode() == 0 { + t.Fatal("rewrite of read-only file succeeded unexpectedly") + } + if errBuf.Len() > 0 { + t.Log(errBuf) + } + + info, err = os.Stat(fn) + if err != nil { + t.Fatal(err) + } + if !info.ModTime().Equal(past) { + t.Errorf("after rewrite mod time is %v, want %v", info.ModTime(), past) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/gofmt/internal.go b/platform/dbops/binaries/go/go/src/cmd/gofmt/internal.go new file mode 100644 index 0000000000000000000000000000000000000000..058158ad4032d4290e751abf19006a6deeedee80 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/gofmt/internal.go @@ -0,0 +1,176 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO(gri): This file and the file src/go/format/internal.go are +// the same (but for this comment and the package name). Do not modify +// one without the other. Determine if we can factor out functionality +// in a public API. See also #11844 for context. + +package main + +import ( + "bytes" + "go/ast" + "go/parser" + "go/printer" + "go/token" + "strings" +) + +// parse parses src, which was read from the named file, +// as a Go source file, declaration, or statement list. +func parse(fset *token.FileSet, filename string, src []byte, fragmentOk bool) ( + file *ast.File, + sourceAdj func(src []byte, indent int) []byte, + indentAdj int, + err error, +) { + // Try as whole source file. + file, err = parser.ParseFile(fset, filename, src, parserMode) + // If there's no error, return. If the error is that the source file didn't begin with a + // package line and source fragments are ok, fall through to + // try as a source fragment. Stop and return on any other error. + if err == nil || !fragmentOk || !strings.Contains(err.Error(), "expected 'package'") { + return + } + + // If this is a declaration list, make it a source file + // by inserting a package clause. + // Insert using a ';', not a newline, so that the line numbers + // in psrc match the ones in src. + psrc := append([]byte("package p;"), src...) + file, err = parser.ParseFile(fset, filename, psrc, parserMode) + if err == nil { + sourceAdj = func(src []byte, indent int) []byte { + // Remove the package clause. + // Gofmt has turned the ';' into a '\n'. + src = src[indent+len("package p\n"):] + return bytes.TrimSpace(src) + } + return + } + // If the error is that the source file didn't begin with a + // declaration, fall through to try as a statement list. + // Stop and return on any other error. + if !strings.Contains(err.Error(), "expected declaration") { + return + } + + // If this is a statement list, make it a source file + // by inserting a package clause and turning the list + // into a function body. This handles expressions too. + // Insert using a ';', not a newline, so that the line numbers + // in fsrc match the ones in src. Add an extra '\n' before the '}' + // to make sure comments are flushed before the '}'. + fsrc := append(append([]byte("package p; func _() {"), src...), '\n', '\n', '}') + file, err = parser.ParseFile(fset, filename, fsrc, parserMode) + if err == nil { + sourceAdj = func(src []byte, indent int) []byte { + // Cap adjusted indent to zero. + if indent < 0 { + indent = 0 + } + // Remove the wrapping. + // Gofmt has turned the "; " into a "\n\n". + // There will be two non-blank lines with indent, hence 2*indent. + src = src[2*indent+len("package p\n\nfunc _() {"):] + // Remove only the "}\n" suffix: remaining whitespaces will be trimmed anyway + src = src[:len(src)-len("}\n")] + return bytes.TrimSpace(src) + } + // Gofmt has also indented the function body one level. + // Adjust that with indentAdj. + indentAdj = -1 + } + + // Succeeded, or out of options. + return +} + +// format formats the given package file originally obtained from src +// and adjusts the result based on the original source via sourceAdj +// and indentAdj. +func format( + fset *token.FileSet, + file *ast.File, + sourceAdj func(src []byte, indent int) []byte, + indentAdj int, + src []byte, + cfg printer.Config, +) ([]byte, error) { + if sourceAdj == nil { + // Complete source file. + var buf bytes.Buffer + err := cfg.Fprint(&buf, fset, file) + if err != nil { + return nil, err + } + return buf.Bytes(), nil + } + + // Partial source file. + // Determine and prepend leading space. + i, j := 0, 0 + for j < len(src) && isSpace(src[j]) { + if src[j] == '\n' { + i = j + 1 // byte offset of last line in leading space + } + j++ + } + var res []byte + res = append(res, src[:i]...) + + // Determine and prepend indentation of first code line. + // Spaces are ignored unless there are no tabs, + // in which case spaces count as one tab. + indent := 0 + hasSpace := false + for _, b := range src[i:j] { + switch b { + case ' ': + hasSpace = true + case '\t': + indent++ + } + } + if indent == 0 && hasSpace { + indent = 1 + } + for i := 0; i < indent; i++ { + res = append(res, '\t') + } + + // Format the source. + // Write it without any leading and trailing space. + cfg.Indent = indent + indentAdj + var buf bytes.Buffer + err := cfg.Fprint(&buf, fset, file) + if err != nil { + return nil, err + } + out := sourceAdj(buf.Bytes(), cfg.Indent) + + // If the adjusted output is empty, the source + // was empty but (possibly) for white space. + // The result is the incoming source. + if len(out) == 0 { + return src, nil + } + + // Otherwise, append output to leading space. + res = append(res, out...) + + // Determine and append trailing space. + i = len(src) + for i > 0 && isSpace(src[i-1]) { + i-- + } + return append(res, src[i:]...), nil +} + +// isSpace reports whether the byte is a space character. +// isSpace defines a space as being among the following bytes: ' ', '\t', '\n' and '\r'. +func isSpace(b byte) bool { + return b == ' ' || b == '\t' || b == '\n' || b == '\r' +} diff --git a/platform/dbops/binaries/go/go/src/cmd/gofmt/long_test.go b/platform/dbops/binaries/go/go/src/cmd/gofmt/long_test.go new file mode 100644 index 0000000000000000000000000000000000000000..21a01196cf6cc285b4e36f99ef5269cce3a79d11 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/gofmt/long_test.go @@ -0,0 +1,172 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This test applies gofmt to all Go files under -root. +// To test specific files provide a list of comma-separated +// filenames via the -files flag: go test -files=gofmt.go . + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/printer" + "go/token" + "internal/testenv" + "io" + "io/fs" + "os" + "path/filepath" + "runtime" + "strings" + "testing" +) + +var ( + root = flag.String("root", runtime.GOROOT(), "test root directory") + files = flag.String("files", "", "comma-separated list of files to test") + ngo = flag.Int("n", runtime.NumCPU(), "number of goroutines used") + verbose = flag.Bool("verbose", false, "verbose mode") + nfiles int // number of files processed +) + +func gofmt(fset *token.FileSet, filename string, src *bytes.Buffer) error { + f, _, _, err := parse(fset, filename, src.Bytes(), false) + if err != nil { + return err + } + ast.SortImports(fset, f) + src.Reset() + return (&printer.Config{Mode: printerMode, Tabwidth: tabWidth}).Fprint(src, fset, f) +} + +func testFile(t *testing.T, b1, b2 *bytes.Buffer, filename string) { + // open file + f, err := os.Open(filename) + if err != nil { + t.Error(err) + return + } + + // read file + b1.Reset() + _, err = io.Copy(b1, f) + f.Close() + if err != nil { + t.Error(err) + return + } + + // exclude files w/ syntax errors (typically test cases) + fset := token.NewFileSet() + if _, _, _, err = parse(fset, filename, b1.Bytes(), false); err != nil { + if *verbose { + fmt.Fprintf(os.Stderr, "ignoring %s\n", err) + } + return + } + + // gofmt file + if err = gofmt(fset, filename, b1); err != nil { + t.Errorf("1st gofmt failed: %v", err) + return + } + + // make a copy of the result + b2.Reset() + b2.Write(b1.Bytes()) + + // gofmt result again + if err = gofmt(fset, filename, b2); err != nil { + t.Errorf("2nd gofmt failed: %v", err) + return + } + + // the first and 2nd result should be identical + if !bytes.Equal(b1.Bytes(), b2.Bytes()) { + // A known instance of gofmt not being idempotent + // (see Issue #24472) + if strings.HasSuffix(filename, "issue22662.go") { + t.Log("known gofmt idempotency bug (Issue #24472)") + return + } + t.Errorf("gofmt %s not idempotent", filename) + } +} + +func testFiles(t *testing.T, filenames <-chan string, done chan<- int) { + b1 := new(bytes.Buffer) + b2 := new(bytes.Buffer) + for filename := range filenames { + testFile(t, b1, b2, filename) + } + done <- 0 +} + +func genFilenames(t *testing.T, filenames chan<- string) { + defer close(filenames) + + handleFile := func(filename string, d fs.DirEntry, err error) error { + if err != nil { + t.Error(err) + return nil + } + // don't descend into testdata directories + if isGoFile(d) && !strings.Contains(filepath.ToSlash(filename), "/testdata/") { + filenames <- filename + nfiles++ + } + return nil + } + + // test Go files provided via -files, if any + if *files != "" { + for _, filename := range strings.Split(*files, ",") { + fi, err := os.Stat(filename) + handleFile(filename, fs.FileInfoToDirEntry(fi), err) + } + return // ignore files under -root + } + + // otherwise, test all Go files under *root + goroot := *root + if goroot == "" { + goroot = testenv.GOROOT(t) + } + filepath.WalkDir(goroot, handleFile) +} + +func TestAll(t *testing.T) { + if testing.Short() { + return + } + + if *ngo < 1 { + *ngo = 1 // make sure test is run + } + if *verbose { + fmt.Printf("running test using %d goroutines\n", *ngo) + } + + // generate filenames + filenames := make(chan string, 32) + go genFilenames(t, filenames) + + // launch test goroutines + done := make(chan int) + for i := 0; i < *ngo; i++ { + go testFiles(t, filenames, done) + } + + // wait for all test goroutines to complete + for i := 0; i < *ngo; i++ { + <-done + } + + if *verbose { + fmt.Printf("processed %d files\n", nfiles) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/gofmt/rewrite.go b/platform/dbops/binaries/go/go/src/cmd/gofmt/rewrite.go new file mode 100644 index 0000000000000000000000000000000000000000..8ed093041c120ee1f1b3f13b1bc4ac527aa66281 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/gofmt/rewrite.go @@ -0,0 +1,309 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "os" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +func initRewrite() { + if *rewriteRule == "" { + rewrite = nil // disable any previous rewrite + return + } + f := strings.Split(*rewriteRule, "->") + if len(f) != 2 { + fmt.Fprintf(os.Stderr, "rewrite rule must be of the form 'pattern -> replacement'\n") + os.Exit(2) + } + pattern := parseExpr(f[0], "pattern") + replace := parseExpr(f[1], "replacement") + rewrite = func(fset *token.FileSet, p *ast.File) *ast.File { + return rewriteFile(fset, pattern, replace, p) + } +} + +// parseExpr parses s as an expression. +// It might make sense to expand this to allow statement patterns, +// but there are problems with preserving formatting and also +// with what a wildcard for a statement looks like. +func parseExpr(s, what string) ast.Expr { + x, err := parser.ParseExpr(s) + if err != nil { + fmt.Fprintf(os.Stderr, "parsing %s %s at %s\n", what, s, err) + os.Exit(2) + } + return x +} + +// Keep this function for debugging. +/* +func dump(msg string, val reflect.Value) { + fmt.Printf("%s:\n", msg) + ast.Print(fileSet, val.Interface()) + fmt.Println() +} +*/ + +// rewriteFile applies the rewrite rule 'pattern -> replace' to an entire file. +func rewriteFile(fileSet *token.FileSet, pattern, replace ast.Expr, p *ast.File) *ast.File { + cmap := ast.NewCommentMap(fileSet, p, p.Comments) + m := make(map[string]reflect.Value) + pat := reflect.ValueOf(pattern) + repl := reflect.ValueOf(replace) + + var rewriteVal func(val reflect.Value) reflect.Value + rewriteVal = func(val reflect.Value) reflect.Value { + // don't bother if val is invalid to start with + if !val.IsValid() { + return reflect.Value{} + } + val = apply(rewriteVal, val) + clear(m) + if match(m, pat, val) { + val = subst(m, repl, reflect.ValueOf(val.Interface().(ast.Node).Pos())) + } + return val + } + + r := apply(rewriteVal, reflect.ValueOf(p)).Interface().(*ast.File) + r.Comments = cmap.Filter(r).Comments() // recreate comments list + return r +} + +// set is a wrapper for x.Set(y); it protects the caller from panics if x cannot be changed to y. +func set(x, y reflect.Value) { + // don't bother if x cannot be set or y is invalid + if !x.CanSet() || !y.IsValid() { + return + } + defer func() { + if x := recover(); x != nil { + if s, ok := x.(string); ok && + (strings.Contains(s, "type mismatch") || strings.Contains(s, "not assignable")) { + // x cannot be set to y - ignore this rewrite + return + } + panic(x) + } + }() + x.Set(y) +} + +// Values/types for special cases. +var ( + objectPtrNil = reflect.ValueOf((*ast.Object)(nil)) + scopePtrNil = reflect.ValueOf((*ast.Scope)(nil)) + + identType = reflect.TypeOf((*ast.Ident)(nil)) + objectPtrType = reflect.TypeOf((*ast.Object)(nil)) + positionType = reflect.TypeOf(token.NoPos) + callExprType = reflect.TypeOf((*ast.CallExpr)(nil)) + scopePtrType = reflect.TypeOf((*ast.Scope)(nil)) +) + +// apply replaces each AST field x in val with f(x), returning val. +// To avoid extra conversions, f operates on the reflect.Value form. +func apply(f func(reflect.Value) reflect.Value, val reflect.Value) reflect.Value { + if !val.IsValid() { + return reflect.Value{} + } + + // *ast.Objects introduce cycles and are likely incorrect after + // rewrite; don't follow them but replace with nil instead + if val.Type() == objectPtrType { + return objectPtrNil + } + + // similarly for scopes: they are likely incorrect after a rewrite; + // replace them with nil + if val.Type() == scopePtrType { + return scopePtrNil + } + + switch v := reflect.Indirect(val); v.Kind() { + case reflect.Slice: + for i := 0; i < v.Len(); i++ { + e := v.Index(i) + set(e, f(e)) + } + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + e := v.Field(i) + set(e, f(e)) + } + case reflect.Interface: + e := v.Elem() + set(v, f(e)) + } + return val +} + +func isWildcard(s string) bool { + rune, size := utf8.DecodeRuneInString(s) + return size == len(s) && unicode.IsLower(rune) +} + +// match reports whether pattern matches val, +// recording wildcard submatches in m. +// If m == nil, match checks whether pattern == val. +func match(m map[string]reflect.Value, pattern, val reflect.Value) bool { + // Wildcard matches any expression. If it appears multiple + // times in the pattern, it must match the same expression + // each time. + if m != nil && pattern.IsValid() && pattern.Type() == identType { + name := pattern.Interface().(*ast.Ident).Name + if isWildcard(name) && val.IsValid() { + // wildcards only match valid (non-nil) expressions. + if _, ok := val.Interface().(ast.Expr); ok && !val.IsNil() { + if old, ok := m[name]; ok { + return match(nil, old, val) + } + m[name] = val + return true + } + } + } + + // Otherwise, pattern and val must match recursively. + if !pattern.IsValid() || !val.IsValid() { + return !pattern.IsValid() && !val.IsValid() + } + if pattern.Type() != val.Type() { + return false + } + + // Special cases. + switch pattern.Type() { + case identType: + // For identifiers, only the names need to match + // (and none of the other *ast.Object information). + // This is a common case, handle it all here instead + // of recursing down any further via reflection. + p := pattern.Interface().(*ast.Ident) + v := val.Interface().(*ast.Ident) + return p == nil && v == nil || p != nil && v != nil && p.Name == v.Name + case objectPtrType, positionType: + // object pointers and token positions always match + return true + case callExprType: + // For calls, the Ellipsis fields (token.Pos) must + // match since that is how f(x) and f(x...) are different. + // Check them here but fall through for the remaining fields. + p := pattern.Interface().(*ast.CallExpr) + v := val.Interface().(*ast.CallExpr) + if p.Ellipsis.IsValid() != v.Ellipsis.IsValid() { + return false + } + } + + p := reflect.Indirect(pattern) + v := reflect.Indirect(val) + if !p.IsValid() || !v.IsValid() { + return !p.IsValid() && !v.IsValid() + } + + switch p.Kind() { + case reflect.Slice: + if p.Len() != v.Len() { + return false + } + for i := 0; i < p.Len(); i++ { + if !match(m, p.Index(i), v.Index(i)) { + return false + } + } + return true + + case reflect.Struct: + for i := 0; i < p.NumField(); i++ { + if !match(m, p.Field(i), v.Field(i)) { + return false + } + } + return true + + case reflect.Interface: + return match(m, p.Elem(), v.Elem()) + } + + // Handle token integers, etc. + return p.Interface() == v.Interface() +} + +// subst returns a copy of pattern with values from m substituted in place +// of wildcards and pos used as the position of tokens from the pattern. +// if m == nil, subst returns a copy of pattern and doesn't change the line +// number information. +func subst(m map[string]reflect.Value, pattern reflect.Value, pos reflect.Value) reflect.Value { + if !pattern.IsValid() { + return reflect.Value{} + } + + // Wildcard gets replaced with map value. + if m != nil && pattern.Type() == identType { + name := pattern.Interface().(*ast.Ident).Name + if isWildcard(name) { + if old, ok := m[name]; ok { + return subst(nil, old, reflect.Value{}) + } + } + } + + if pos.IsValid() && pattern.Type() == positionType { + // use new position only if old position was valid in the first place + if old := pattern.Interface().(token.Pos); !old.IsValid() { + return pattern + } + return pos + } + + // Otherwise copy. + switch p := pattern; p.Kind() { + case reflect.Slice: + if p.IsNil() { + // Do not turn nil slices into empty slices. go/ast + // guarantees that certain lists will be nil if not + // populated. + return reflect.Zero(p.Type()) + } + v := reflect.MakeSlice(p.Type(), p.Len(), p.Len()) + for i := 0; i < p.Len(); i++ { + v.Index(i).Set(subst(m, p.Index(i), pos)) + } + return v + + case reflect.Struct: + v := reflect.New(p.Type()).Elem() + for i := 0; i < p.NumField(); i++ { + v.Field(i).Set(subst(m, p.Field(i), pos)) + } + return v + + case reflect.Pointer: + v := reflect.New(p.Type()).Elem() + if elem := p.Elem(); elem.IsValid() { + v.Set(subst(m, elem, pos).Addr()) + } + return v + + case reflect.Interface: + v := reflect.New(p.Type()).Elem() + if elem := p.Elem(); elem.IsValid() { + v.Set(subst(m, elem, pos)) + } + return v + } + + return pattern +} diff --git a/platform/dbops/binaries/go/go/src/cmd/gofmt/simplify.go b/platform/dbops/binaries/go/go/src/cmd/gofmt/simplify.go new file mode 100644 index 0000000000000000000000000000000000000000..eb55daabc1db86740cdd63bb938c2f1e7d3d166f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/gofmt/simplify.go @@ -0,0 +1,169 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "go/ast" + "go/token" + "reflect" +) + +type simplifier struct{} + +func (s simplifier) Visit(node ast.Node) ast.Visitor { + switch n := node.(type) { + case *ast.CompositeLit: + // array, slice, and map composite literals may be simplified + outer := n + var keyType, eltType ast.Expr + switch typ := outer.Type.(type) { + case *ast.ArrayType: + eltType = typ.Elt + case *ast.MapType: + keyType = typ.Key + eltType = typ.Value + } + + if eltType != nil { + var ktyp reflect.Value + if keyType != nil { + ktyp = reflect.ValueOf(keyType) + } + typ := reflect.ValueOf(eltType) + for i, x := range outer.Elts { + px := &outer.Elts[i] + // look at value of indexed/named elements + if t, ok := x.(*ast.KeyValueExpr); ok { + if keyType != nil { + s.simplifyLiteral(ktyp, keyType, t.Key, &t.Key) + } + x = t.Value + px = &t.Value + } + s.simplifyLiteral(typ, eltType, x, px) + } + // node was simplified - stop walk (there are no subnodes to simplify) + return nil + } + + case *ast.SliceExpr: + // a slice expression of the form: s[a:len(s)] + // can be simplified to: s[a:] + // if s is "simple enough" (for now we only accept identifiers) + // + // Note: This may not be correct because len may have been redeclared in + // the same package. However, this is extremely unlikely and so far + // (April 2022, after years of supporting this rewrite feature) + // has never come up, so let's keep it working as is (see also #15153). + // + // Also note that this code used to use go/ast's object tracking, + // which was removed in exchange for go/parser.Mode.SkipObjectResolution. + // False positives are extremely unlikely as described above, + // and go/ast's object tracking is incomplete in any case. + if n.Max != nil { + // - 3-index slices always require the 2nd and 3rd index + break + } + if s, _ := n.X.(*ast.Ident); s != nil { + // the array/slice object is a single identifier + if call, _ := n.High.(*ast.CallExpr); call != nil && len(call.Args) == 1 && !call.Ellipsis.IsValid() { + // the high expression is a function call with a single argument + if fun, _ := call.Fun.(*ast.Ident); fun != nil && fun.Name == "len" { + // the function called is "len" + if arg, _ := call.Args[0].(*ast.Ident); arg != nil && arg.Name == s.Name { + // the len argument is the array/slice object + n.High = nil + } + } + } + } + // Note: We could also simplify slice expressions of the form s[0:b] to s[:b] + // but we leave them as is since sometimes we want to be very explicit + // about the lower bound. + // An example where the 0 helps: + // x, y, z := b[0:2], b[2:4], b[4:6] + // An example where it does not: + // x, y := b[:n], b[n:] + + case *ast.RangeStmt: + // - a range of the form: for x, _ = range v {...} + // can be simplified to: for x = range v {...} + // - a range of the form: for _ = range v {...} + // can be simplified to: for range v {...} + if isBlank(n.Value) { + n.Value = nil + } + if isBlank(n.Key) && n.Value == nil { + n.Key = nil + } + } + + return s +} + +func (s simplifier) simplifyLiteral(typ reflect.Value, astType, x ast.Expr, px *ast.Expr) { + ast.Walk(s, x) // simplify x + + // if the element is a composite literal and its literal type + // matches the outer literal's element type exactly, the inner + // literal type may be omitted + if inner, ok := x.(*ast.CompositeLit); ok { + if match(nil, typ, reflect.ValueOf(inner.Type)) { + inner.Type = nil + } + } + // if the outer literal's element type is a pointer type *T + // and the element is & of a composite literal of type T, + // the inner &T may be omitted. + if ptr, ok := astType.(*ast.StarExpr); ok { + if addr, ok := x.(*ast.UnaryExpr); ok && addr.Op == token.AND { + if inner, ok := addr.X.(*ast.CompositeLit); ok { + if match(nil, reflect.ValueOf(ptr.X), reflect.ValueOf(inner.Type)) { + inner.Type = nil // drop T + *px = inner // drop & + } + } + } + } +} + +func isBlank(x ast.Expr) bool { + ident, ok := x.(*ast.Ident) + return ok && ident.Name == "_" +} + +func simplify(f *ast.File) { + // remove empty declarations such as "const ()", etc + removeEmptyDeclGroups(f) + + var s simplifier + ast.Walk(s, f) +} + +func removeEmptyDeclGroups(f *ast.File) { + i := 0 + for _, d := range f.Decls { + if g, ok := d.(*ast.GenDecl); !ok || !isEmpty(f, g) { + f.Decls[i] = d + i++ + } + } + f.Decls = f.Decls[:i] +} + +func isEmpty(f *ast.File, g *ast.GenDecl) bool { + if g.Doc != nil || g.Specs != nil { + return false + } + + for _, c := range f.Comments { + // if there is a comment in the declaration, it is not considered empty + if g.Pos() <= c.Pos() && c.End() <= g.End() { + return false + } + } + + return true +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/cgo_test.go b/platform/dbops/binaries/go/go/src/cmd/link/cgo_test.go new file mode 100644 index 0000000000000000000000000000000000000000..52db70e1ad8d4c16ce9c21c4239236e4caa9baf2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/cgo_test.go @@ -0,0 +1,148 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "internal/testenv" + "os" + "path/filepath" + "strconv" + "testing" +) + +// Issues 43830, 46295 +func TestCGOLTO(t *testing.T) { + testenv.MustHaveCGO(t) + testenv.MustHaveGoBuild(t) + + t.Parallel() + + goEnv := func(arg string) string { + cmd := testenv.Command(t, testenv.GoToolPath(t), "env", arg) + cmd.Stderr = new(bytes.Buffer) + + line, err := cmd.Output() + if err != nil { + t.Fatalf("%v: %v\n%s", cmd, err, cmd.Stderr) + } + out := string(bytes.TrimSpace(line)) + t.Logf("%v: %q", cmd, out) + return out + } + + cc := goEnv("CC") + cgoCflags := goEnv("CGO_CFLAGS") + + for test := 0; test < 2; test++ { + t.Run(strconv.Itoa(test), func(t *testing.T) { + testCGOLTO(t, cc, cgoCflags, test) + }) + } +} + +const test1_main = ` +package main + +/* +extern int myadd(int, int); +int c_add(int a, int b) { + return myadd(a, b); +} +*/ +import "C" + +func main() { + println(C.c_add(1, 2)) +} +` + +const test1_add = ` +package main + +import "C" + +/* test */ + +//export myadd +func myadd(a C.int, b C.int) C.int { + return a + b +} +` + +const test2_main = ` +package main + +import "fmt" + +/* +#include + +void hello(void) { + printf("hello\n"); +} +*/ +import "C" + +func main() { + hello := C.hello + fmt.Printf("%v\n", hello) +} +` + +func testCGOLTO(t *testing.T, cc, cgoCflags string, test int) { + t.Parallel() + + dir := t.TempDir() + + writeTempFile := func(name, contents string) { + if err := os.WriteFile(filepath.Join(dir, name), []byte(contents), 0644); err != nil { + t.Fatal(err) + } + } + + writeTempFile("go.mod", "module cgolto\n") + + switch test { + case 0: + writeTempFile("main.go", test1_main) + writeTempFile("add.go", test1_add) + case 1: + writeTempFile("main.go", test2_main) + default: + t.Fatalf("bad case %d", test) + } + + cmd := testenv.Command(t, testenv.GoToolPath(t), "build") + cmd.Dir = dir + cgoCflags += " -flto" + cmd.Env = append(cmd.Environ(), "CGO_CFLAGS="+cgoCflags) + + t.Logf("CGO_CFLAGS=%q %v", cgoCflags, cmd) + out, err := cmd.CombinedOutput() + t.Logf("%s", out) + + if err != nil { + t.Logf("go build failed: %v", err) + + // Error messages we've seen indicating that LTO is not supported. + // These errors come from GCC or clang, not Go. + var noLTO = []string{ + `unrecognized command line option "-flto"`, + "unable to pass LLVM bit-code files to linker", + "file not recognized: File format not recognized", + "LTO support has not been enabled", + "linker command failed with exit code", + "gcc: can't load library", + } + for _, msg := range noLTO { + if bytes.Contains(out, []byte(msg)) { + t.Skipf("C compiler %v does not support LTO", cc) + } + } + + t.Error("failed") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/doc.go b/platform/dbops/binaries/go/go/src/cmd/link/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..b0f2700ac1d2536dcf2463f4d6aeca916e84e135 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/doc.go @@ -0,0 +1,123 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Link, typically invoked as “go tool link”, reads the Go archive or object +for a package main, along with its dependencies, and combines them +into an executable binary. + +# Command Line + +Usage: + + go tool link [flags] main.a + +Flags: + + -B note + Add an ELF_NT_GNU_BUILD_ID note when using ELF. + The value should start with 0x and be an even number of hex digits. + Alternatively, you can pass "gobuildid" in order to derive the + GNU build ID from the Go build ID. + -E entry + Set entry symbol name. + -H type + Set executable format type. + The default format is inferred from GOOS and GOARCH. + On Windows, -H windowsgui writes a "GUI binary" instead of a "console binary." + -I interpreter + Set the ELF dynamic linker to use. + -L dir1 -L dir2 + Search for imported packages in dir1, dir2, etc, + after consulting $GOROOT/pkg/$GOOS_$GOARCH. + -R quantum + Set address rounding quantum. + -T address + Set the start address of text symbols. + -V + Print linker version and exit. + -X importpath.name=value + Set the value of the string variable in importpath named name to value. + This is only effective if the variable is declared in the source code either uninitialized + or initialized to a constant string expression. -X will not work if the initializer makes + a function call or refers to other variables. + Note that before Go 1.5 this option took two separate arguments. + -asan + Link with C/C++ address sanitizer support. + -aslr + Enable ASLR for buildmode=c-shared on windows (default true). + -buildid id + Record id as Go toolchain build id. + -buildmode mode + Set build mode (default exe). + -c + Dump call graphs. + -compressdwarf + Compress DWARF if possible (default true). + -cpuprofile file + Write CPU profile to file. + -d + Disable generation of dynamic executables. + The emitted code is the same in either case; the option + controls only whether a dynamic header is included. + The dynamic header is on by default, even without any + references to dynamic libraries, because many common + system tools now assume the presence of the header. + -dumpdep + Dump symbol dependency graph. + -extar ar + Set the external archive program (default "ar"). + Used only for -buildmode=c-archive. + -extld linker + Set the external linker (default "clang" or "gcc"). + -extldflags flags + Set space-separated flags to pass to the external linker. + -f + Ignore version mismatch in the linked archives. + -g + Disable Go package data checks. + -importcfg file + Read import configuration from file. + In the file, set packagefile, packageshlib to specify import resolution. + -installsuffix suffix + Look for packages in $GOROOT/pkg/$GOOS_$GOARCH_suffix + instead of $GOROOT/pkg/$GOOS_$GOARCH. + -k symbol + Set field tracking symbol. Use this flag when GOEXPERIMENT=fieldtrack is set. + -libgcc file + Set name of compiler support library. + This is only used in internal link mode. + If not set, default value comes from running the compiler, + which may be set by the -extld option. + Set to "none" to use no support library. + -linkmode mode + Set link mode (internal, external, auto). + This sets the linking mode as described in cmd/cgo/doc.go. + -linkshared + Link against installed Go shared libraries (experimental). + -memprofile file + Write memory profile to file. + -memprofilerate rate + Set runtime.MemProfileRate to rate. + -msan + Link with C/C++ memory sanitizer support. + -o file + Write output to file (default a.out, or a.out.exe on Windows). + -pluginpath path + The path name used to prefix exported plugin symbols. + -r dir1:dir2:... + Set the ELF dynamic linker search path. + -race + Link with race detection libraries. + -s + Omit the symbol table and debug information. + -tmpdir dir + Write temporary files to dir. + Temporary files are only used in external linking mode. + -v + Print trace of linker operations. + -w + Omit the DWARF symbol table. +*/ +package main diff --git a/platform/dbops/binaries/go/go/src/cmd/link/dwarf_test.go b/platform/dbops/binaries/go/go/src/cmd/link/dwarf_test.go new file mode 100644 index 0000000000000000000000000000000000000000..124c91538cc804b88d2f4ad358f70a58e4278a58 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/dwarf_test.go @@ -0,0 +1,234 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + cmddwarf "cmd/internal/dwarf" + "cmd/internal/objfile" + "cmd/internal/quoted" + "debug/dwarf" + "internal/platform" + "internal/testenv" + "os" + "os/exec" + "path" + "path/filepath" + "runtime" + "strings" + "testing" +) + +// TestMain allows this test binary to run as a -toolexec wrapper for the 'go' +// command. If LINK_TEST_TOOLEXEC is set, TestMain runs the binary as if it were +// cmd/link, and otherwise runs the requested tool as a subprocess. +// +// This allows the test to verify the behavior of the current contents of the +// cmd/link package even if the installed cmd/link binary is stale. +func TestMain(m *testing.M) { + if os.Getenv("LINK_TEST_TOOLEXEC") == "" { + // Not running as a -toolexec wrapper. Just run the tests. + os.Exit(m.Run()) + } + + if strings.TrimSuffix(filepath.Base(os.Args[1]), ".exe") == "link" { + // Running as a -toolexec linker, and the tool is cmd/link. + // Substitute this test binary for the linker. + os.Args = os.Args[1:] + main() + os.Exit(0) + } + + cmd := exec.Command(os.Args[1], os.Args[2:]...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + os.Exit(1) + } + os.Exit(0) +} + +func testDWARF(t *testing.T, buildmode string, expectDWARF bool, env ...string) { + testenv.MustHaveCGO(t) + testenv.MustHaveGoBuild(t) + + if !platform.ExecutableHasDWARF(runtime.GOOS, runtime.GOARCH) { + t.Skipf("skipping on %s/%s: no DWARF symbol table in executables", runtime.GOOS, runtime.GOARCH) + } + + t.Parallel() + + for _, prog := range []string{"testprog", "testprogcgo"} { + prog := prog + expectDWARF := expectDWARF + if runtime.GOOS == "aix" && prog == "testprogcgo" { + extld := os.Getenv("CC") + if extld == "" { + extld = "gcc" + } + extldArgs, err := quoted.Split(extld) + if err != nil { + t.Fatal(err) + } + expectDWARF, err = cmddwarf.IsDWARFEnabledOnAIXLd(extldArgs) + if err != nil { + t.Fatal(err) + } + } + + t.Run(prog, func(t *testing.T) { + t.Parallel() + + tmpDir := t.TempDir() + + exe := filepath.Join(tmpDir, prog+".exe") + dir := "../../runtime/testdata/" + prog + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-toolexec", os.Args[0], "-o", exe) + if buildmode != "" { + cmd.Args = append(cmd.Args, "-buildmode", buildmode) + } + cmd.Args = append(cmd.Args, dir) + cmd.Env = append(os.Environ(), env...) + cmd.Env = append(cmd.Env, "CGO_CFLAGS=") // ensure CGO_CFLAGS does not contain any flags. Issue #35459 + cmd.Env = append(cmd.Env, "LINK_TEST_TOOLEXEC=1") + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("go build -o %v %v: %v\n%s", exe, dir, err, out) + } + + if buildmode == "c-archive" { + // Extract the archive and use the go.o object within. + cmd := testenv.Command(t, "ar", "-x", exe) + cmd.Dir = tmpDir + if out, err := cmd.CombinedOutput(); err != nil { + t.Fatalf("ar -x %s: %v\n%s", exe, err, out) + } + exe = filepath.Join(tmpDir, "go.o") + } + + darwinSymbolTestIsTooFlaky := true // Turn this off, it is too flaky -- See #32218 + if runtime.GOOS == "darwin" && !darwinSymbolTestIsTooFlaky { + if _, err = exec.LookPath("symbols"); err == nil { + // Ensure Apple's tooling can parse our object for symbols. + out, err = testenv.Command(t, "symbols", exe).CombinedOutput() + if err != nil { + t.Fatalf("symbols %v: %v: %s", filepath.Base(exe), err, out) + } else { + if bytes.HasPrefix(out, []byte("Unable to find file")) { + // This failure will cause the App Store to reject our binaries. + t.Fatalf("symbols %v: failed to parse file", filepath.Base(exe)) + } else if bytes.Contains(out, []byte(", Empty]")) { + t.Fatalf("symbols %v: parsed as empty", filepath.Base(exe)) + } + } + } + } + + f, err := objfile.Open(exe) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + syms, err := f.Symbols() + if err != nil { + t.Fatal(err) + } + + var addr uint64 + for _, sym := range syms { + if sym.Name == "main.main" { + addr = sym.Addr + break + } + } + if addr == 0 { + t.Fatal("cannot find main.main in symbols") + } + + d, err := f.DWARF() + if err != nil { + if expectDWARF { + t.Fatal(err) + } + return + } else { + if !expectDWARF { + t.Fatal("unexpected DWARF section") + } + } + + // TODO: We'd like to use filepath.Join here. + // Also related: golang.org/issue/19784. + wantFile := path.Join(prog, "main.go") + wantLine := 24 + r := d.Reader() + entry, err := r.SeekPC(addr) + if err != nil { + t.Fatal(err) + } + lr, err := d.LineReader(entry) + if err != nil { + t.Fatal(err) + } + var line dwarf.LineEntry + if err := lr.SeekPC(addr, &line); err == dwarf.ErrUnknownPC { + t.Fatalf("did not find file:line for %#x (main.main)", addr) + } else if err != nil { + t.Fatal(err) + } + if !strings.HasSuffix(line.File.Name, wantFile) || line.Line != wantLine { + t.Errorf("%#x is %s:%d, want %s:%d", addr, line.File.Name, line.Line, filepath.Join("...", wantFile), wantLine) + } + }) + } +} + +func TestDWARF(t *testing.T) { + testDWARF(t, "", true) + if !testing.Short() { + if runtime.GOOS == "windows" { + t.Skip("skipping Windows/c-archive; see Issue 35512 for more.") + } + if !platform.BuildModeSupported(runtime.Compiler, "c-archive", runtime.GOOS, runtime.GOARCH) { + t.Skipf("skipping c-archive test on unsupported platform %s-%s", runtime.GOOS, runtime.GOARCH) + } + t.Run("c-archive", func(t *testing.T) { + testDWARF(t, "c-archive", true) + }) + } +} + +func TestDWARFiOS(t *testing.T) { + // Normally we run TestDWARF on native platform. But on iOS we don't have + // go build, so we do this test with a cross build. + // Only run this on darwin/amd64, where we can cross build for iOS. + if testing.Short() { + t.Skip("skipping in short mode") + } + if runtime.GOARCH != "amd64" || runtime.GOOS != "darwin" { + t.Skip("skipping on non-darwin/amd64 platform") + } + if err := testenv.Command(t, "xcrun", "--help").Run(); err != nil { + t.Skipf("error running xcrun, required for iOS cross build: %v", err) + } + // Check to see if the ios tools are installed. It's possible to have the command line tools + // installed without the iOS sdk. + if output, err := testenv.Command(t, "xcodebuild", "-showsdks").CombinedOutput(); err != nil { + t.Skipf("error running xcodebuild, required for iOS cross build: %v", err) + } else if !strings.Contains(string(output), "iOS SDK") { + t.Skipf("iOS SDK not detected.") + } + cc := "CC=" + runtime.GOROOT() + "/misc/ios/clangwrap.sh" + // iOS doesn't allow unmapped segments, so iOS executables don't have DWARF. + t.Run("exe", func(t *testing.T) { + testDWARF(t, "", false, cc, "CGO_ENABLED=1", "GOOS=ios", "GOARCH=arm64") + }) + // However, c-archive iOS objects have embedded DWARF. + t.Run("c-archive", func(t *testing.T) { + testDWARF(t, "c-archive", true, cc, "CGO_ENABLED=1", "GOOS=ios", "GOARCH=arm64") + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/elf_test.go b/platform/dbops/binaries/go/go/src/cmd/link/elf_test.go new file mode 100644 index 0000000000000000000000000000000000000000..5dcef1cc22b20cd3c2354674e177affaef84aa22 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/elf_test.go @@ -0,0 +1,562 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build dragonfly || freebsd || linux || netbsd || openbsd + +package main + +import ( + "bytes" + "cmd/internal/buildid" + "cmd/internal/notsha256" + "cmd/link/internal/ld" + "debug/elf" + "fmt" + "internal/platform" + "internal/testenv" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "sync" + "testing" + "text/template" +) + +func getCCAndCCFLAGS(t *testing.T, env []string) (string, []string) { + goTool := testenv.GoToolPath(t) + cmd := testenv.Command(t, goTool, "env", "CC") + cmd.Env = env + ccb, err := cmd.Output() + if err != nil { + t.Fatal(err) + } + cc := strings.TrimSpace(string(ccb)) + + cmd = testenv.Command(t, goTool, "env", "GOGCCFLAGS") + cmd.Env = env + cflagsb, err := cmd.Output() + if err != nil { + t.Fatal(err) + } + cflags := strings.Fields(string(cflagsb)) + + return cc, cflags +} + +var asmSource = ` + .section .text1,"ax" +s1: + .byte 0 + .section .text2,"ax" +s2: + .byte 0 +` + +var goSource = ` +package main +func main() {} +` + +// The linker used to crash if an ELF input file had multiple text sections +// with the same name. +func TestSectionsWithSameName(t *testing.T) { + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + t.Parallel() + + objcopy, err := exec.LookPath("objcopy") + if err != nil { + t.Skipf("can't find objcopy: %v", err) + } + + dir := t.TempDir() + + gopath := filepath.Join(dir, "GOPATH") + env := append(os.Environ(), "GOPATH="+gopath) + + if err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module elf_test\n"), 0666); err != nil { + t.Fatal(err) + } + + asmFile := filepath.Join(dir, "x.s") + if err := os.WriteFile(asmFile, []byte(asmSource), 0444); err != nil { + t.Fatal(err) + } + + goTool := testenv.GoToolPath(t) + cc, cflags := getCCAndCCFLAGS(t, env) + + asmObj := filepath.Join(dir, "x.o") + t.Logf("%s %v -c -o %s %s", cc, cflags, asmObj, asmFile) + if out, err := testenv.Command(t, cc, append(cflags, "-c", "-o", asmObj, asmFile)...).CombinedOutput(); err != nil { + t.Logf("%s", out) + t.Fatal(err) + } + + asm2Obj := filepath.Join(dir, "x2.syso") + t.Logf("%s --rename-section .text2=.text1 %s %s", objcopy, asmObj, asm2Obj) + if out, err := testenv.Command(t, objcopy, "--rename-section", ".text2=.text1", asmObj, asm2Obj).CombinedOutput(); err != nil { + t.Logf("%s", out) + t.Fatal(err) + } + + for _, s := range []string{asmFile, asmObj} { + if err := os.Remove(s); err != nil { + t.Fatal(err) + } + } + + goFile := filepath.Join(dir, "main.go") + if err := os.WriteFile(goFile, []byte(goSource), 0444); err != nil { + t.Fatal(err) + } + + cmd := testenv.Command(t, goTool, "build") + cmd.Dir = dir + cmd.Env = env + t.Logf("%s build", goTool) + if out, err := cmd.CombinedOutput(); err != nil { + t.Logf("%s", out) + t.Fatal(err) + } +} + +var cSources35779 = []string{` +static int blah() { return 42; } +int Cfunc1() { return blah(); } +`, ` +static int blah() { return 42; } +int Cfunc2() { return blah(); } +`, +} + +// TestMinusRSymsWithSameName tests a corner case in the new +// loader. Prior to the fix this failed with the error 'loadelf: +// $WORK/b001/_pkg_.a(ldr.syso): duplicate symbol reference: blah in +// both main(.text) and main(.text)'. See issue #35779. +func TestMinusRSymsWithSameName(t *testing.T) { + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + t.Parallel() + + dir := t.TempDir() + + gopath := filepath.Join(dir, "GOPATH") + env := append(os.Environ(), "GOPATH="+gopath) + + if err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module elf_test\n"), 0666); err != nil { + t.Fatal(err) + } + + goTool := testenv.GoToolPath(t) + cc, cflags := getCCAndCCFLAGS(t, env) + + objs := []string{} + csrcs := []string{} + for i, content := range cSources35779 { + csrcFile := filepath.Join(dir, fmt.Sprintf("x%d.c", i)) + csrcs = append(csrcs, csrcFile) + if err := os.WriteFile(csrcFile, []byte(content), 0444); err != nil { + t.Fatal(err) + } + + obj := filepath.Join(dir, fmt.Sprintf("x%d.o", i)) + objs = append(objs, obj) + t.Logf("%s %v -c -o %s %s", cc, cflags, obj, csrcFile) + if out, err := testenv.Command(t, cc, append(cflags, "-c", "-o", obj, csrcFile)...).CombinedOutput(); err != nil { + t.Logf("%s", out) + t.Fatal(err) + } + } + + sysoObj := filepath.Join(dir, "ldr.syso") + t.Logf("%s %v -nostdlib -r -o %s %v", cc, cflags, sysoObj, objs) + if out, err := testenv.Command(t, cc, append(cflags, "-nostdlib", "-r", "-o", sysoObj, objs[0], objs[1])...).CombinedOutput(); err != nil { + t.Logf("%s", out) + t.Fatal(err) + } + + cruft := [][]string{objs, csrcs} + for _, sl := range cruft { + for _, s := range sl { + if err := os.Remove(s); err != nil { + t.Fatal(err) + } + } + } + + goFile := filepath.Join(dir, "main.go") + if err := os.WriteFile(goFile, []byte(goSource), 0444); err != nil { + t.Fatal(err) + } + + t.Logf("%s build", goTool) + cmd := testenv.Command(t, goTool, "build") + cmd.Dir = dir + cmd.Env = env + if out, err := cmd.CombinedOutput(); err != nil { + t.Logf("%s", out) + t.Fatal(err) + } +} + +func TestGNUBuildIDDerivedFromGoBuildID(t *testing.T) { + testenv.MustHaveGoBuild(t) + + t.Parallel() + + goFile := filepath.Join(t.TempDir(), "notes.go") + if err := os.WriteFile(goFile, []byte(goSource), 0444); err != nil { + t.Fatal(err) + } + outFile := filepath.Join(t.TempDir(), "notes.exe") + goTool := testenv.GoToolPath(t) + + cmd := testenv.Command(t, goTool, "build", "-o", outFile, "-ldflags", "-buildid 0x1234 -B gobuildid", goFile) + cmd.Dir = t.TempDir() + + out, err := cmd.CombinedOutput() + if err != nil { + t.Logf("%s", out) + t.Fatal(err) + } + + expectedGoBuildID := notsha256.Sum256([]byte("0x1234")) + + gnuBuildID, err := buildid.ReadELFNote(outFile, string(ld.ELF_NOTE_BUILDINFO_NAME), ld.ELF_NOTE_BUILDINFO_TAG) + if err != nil || gnuBuildID == nil { + t.Fatalf("can't read GNU build ID") + } + + if !bytes.Equal(gnuBuildID, expectedGoBuildID[:20]) { + t.Fatalf("build id not matching") + } +} + +func TestMergeNoteSections(t *testing.T) { + testenv.MustHaveGoBuild(t) + expected := 1 + + switch runtime.GOOS { + case "linux", "dragonfly": + case "openbsd", "netbsd", "freebsd": + // These OSes require independent segment + expected = 2 + default: + t.Skip("We should only test on elf output.") + } + t.Parallel() + + goFile := filepath.Join(t.TempDir(), "notes.go") + if err := os.WriteFile(goFile, []byte(goSource), 0444); err != nil { + t.Fatal(err) + } + outFile := filepath.Join(t.TempDir(), "notes.exe") + goTool := testenv.GoToolPath(t) + // sha1sum of "gopher" + id := "0xf4e8cd51ce8bae2996dc3b74639cdeaa1f7fee5f" + cmd := testenv.Command(t, goTool, "build", "-o", outFile, "-ldflags", + "-B "+id, goFile) + cmd.Dir = t.TempDir() + if out, err := cmd.CombinedOutput(); err != nil { + t.Logf("%s", out) + t.Fatal(err) + } + + ef, err := elf.Open(outFile) + if err != nil { + t.Fatalf("open elf file failed:%v", err) + } + defer ef.Close() + sec := ef.Section(".note.gnu.build-id") + if sec == nil { + t.Fatalf("can't find gnu build id") + } + + sec = ef.Section(".note.go.buildid") + if sec == nil { + t.Fatalf("can't find go build id") + } + cnt := 0 + for _, ph := range ef.Progs { + if ph.Type == elf.PT_NOTE { + cnt += 1 + } + } + if cnt != expected { + t.Fatalf("want %d PT_NOTE segment, got %d", expected, cnt) + } +} + +const pieSourceTemplate = ` +package main + +import "fmt" + +// Force the creation of a lot of type descriptors that will go into +// the .data.rel.ro section. +{{range $index, $element := .}}var V{{$index}} interface{} = [{{$index}}]int{} +{{end}} + +func main() { +{{range $index, $element := .}} fmt.Println(V{{$index}}) +{{end}} +} +` + +func TestPIESize(t *testing.T) { + testenv.MustHaveGoBuild(t) + + // We don't want to test -linkmode=external if cgo is not supported. + // On some systems -buildmode=pie implies -linkmode=external, so just + // always skip the test if cgo is not supported. + testenv.MustHaveCGO(t) + + if !platform.BuildModeSupported(runtime.Compiler, "pie", runtime.GOOS, runtime.GOARCH) { + t.Skip("-buildmode=pie not supported") + } + + t.Parallel() + + tmpl := template.Must(template.New("pie").Parse(pieSourceTemplate)) + + writeGo := func(t *testing.T, dir string) { + f, err := os.Create(filepath.Join(dir, "pie.go")) + if err != nil { + t.Fatal(err) + } + + // Passing a 100-element slice here will cause + // pieSourceTemplate to create 100 variables with + // different types. + if err := tmpl.Execute(f, make([]byte, 100)); err != nil { + t.Fatal(err) + } + + if err := f.Close(); err != nil { + t.Fatal(err) + } + } + + for _, external := range []bool{false, true} { + external := external + + name := "TestPieSize-" + if external { + name += "external" + } else { + name += "internal" + } + t.Run(name, func(t *testing.T) { + t.Parallel() + + dir := t.TempDir() + + writeGo(t, dir) + + binexe := filepath.Join(dir, "exe") + binpie := filepath.Join(dir, "pie") + if external { + binexe += "external" + binpie += "external" + } + + build := func(bin, mode string) error { + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-o", bin, "-buildmode="+mode) + if external { + cmd.Args = append(cmd.Args, "-ldflags=-linkmode=external") + } + cmd.Args = append(cmd.Args, "pie.go") + cmd.Dir = dir + t.Logf("%v", cmd.Args) + out, err := cmd.CombinedOutput() + if len(out) > 0 { + t.Logf("%s", out) + } + if err != nil { + t.Log(err) + } + return err + } + + var errexe, errpie error + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + errexe = build(binexe, "exe") + }() + go func() { + defer wg.Done() + errpie = build(binpie, "pie") + }() + wg.Wait() + if errexe != nil || errpie != nil { + if runtime.GOOS == "android" && runtime.GOARCH == "arm64" { + testenv.SkipFlaky(t, 58806) + } + t.Fatal("link failed") + } + + var sizeexe, sizepie uint64 + if fi, err := os.Stat(binexe); err != nil { + t.Fatal(err) + } else { + sizeexe = uint64(fi.Size()) + } + if fi, err := os.Stat(binpie); err != nil { + t.Fatal(err) + } else { + sizepie = uint64(fi.Size()) + } + + elfexe, err := elf.Open(binexe) + if err != nil { + t.Fatal(err) + } + defer elfexe.Close() + + elfpie, err := elf.Open(binpie) + if err != nil { + t.Fatal(err) + } + defer elfpie.Close() + + // The difference in size between exe and PIE + // should be approximately the difference in + // size of the .text section plus the size of + // the PIE dynamic data sections plus the + // difference in size of the .got and .plt + // sections if they exist. + // We ignore unallocated sections. + // There may be gaps between non-writeable and + // writable PT_LOAD segments. We also skip those + // gaps (see issue #36023). + + textsize := func(ef *elf.File, name string) uint64 { + for _, s := range ef.Sections { + if s.Name == ".text" { + return s.Size + } + } + t.Fatalf("%s: no .text section", name) + return 0 + } + textexe := textsize(elfexe, binexe) + textpie := textsize(elfpie, binpie) + + dynsize := func(ef *elf.File) uint64 { + var ret uint64 + for _, s := range ef.Sections { + if s.Flags&elf.SHF_ALLOC == 0 { + continue + } + switch s.Type { + case elf.SHT_DYNSYM, elf.SHT_STRTAB, elf.SHT_REL, elf.SHT_RELA, elf.SHT_HASH, elf.SHT_GNU_HASH, elf.SHT_GNU_VERDEF, elf.SHT_GNU_VERNEED, elf.SHT_GNU_VERSYM: + ret += s.Size + } + if s.Flags&elf.SHF_WRITE != 0 && (strings.Contains(s.Name, ".got") || strings.Contains(s.Name, ".plt")) { + ret += s.Size + } + } + return ret + } + + dynexe := dynsize(elfexe) + dynpie := dynsize(elfpie) + + extrasize := func(ef *elf.File) uint64 { + var ret uint64 + // skip unallocated sections + for _, s := range ef.Sections { + if s.Flags&elf.SHF_ALLOC == 0 { + ret += s.Size + } + } + // also skip gaps between PT_LOAD segments + var prev *elf.Prog + for _, seg := range ef.Progs { + if seg.Type != elf.PT_LOAD { + continue + } + if prev != nil { + ret += seg.Off - prev.Off - prev.Filesz + } + prev = seg + } + return ret + } + + extraexe := extrasize(elfexe) + extrapie := extrasize(elfpie) + + if sizepie < sizeexe || sizepie-extrapie < sizeexe-extraexe { + return + } + diffReal := (sizepie - extrapie) - (sizeexe - extraexe) + diffExpected := (textpie + dynpie) - (textexe + dynexe) + + t.Logf("real size difference %#x, expected %#x", diffReal, diffExpected) + + if diffReal > (diffExpected + diffExpected/10) { + t.Errorf("PIE unexpectedly large: got difference of %d (%d - %d), expected difference %d", diffReal, sizepie, sizeexe, diffExpected) + } + }) + } +} + +func TestIssue51939(t *testing.T) { + testenv.MustHaveGoBuild(t) + t.Parallel() + td := t.TempDir() + goFile := filepath.Join(td, "issue51939.go") + if err := os.WriteFile(goFile, []byte(goSource), 0444); err != nil { + t.Fatal(err) + } + outFile := filepath.Join(td, "issue51939.exe") + goTool := testenv.GoToolPath(t) + cmd := testenv.Command(t, goTool, "build", "-o", outFile, goFile) + if out, err := cmd.CombinedOutput(); err != nil { + t.Logf("%s", out) + t.Fatal(err) + } + + ef, err := elf.Open(outFile) + if err != nil { + t.Fatal(err) + } + + for _, s := range ef.Sections { + if s.Flags&elf.SHF_ALLOC == 0 && s.Addr != 0 { + t.Errorf("section %s should not allocated with addr %x", s.Name, s.Addr) + } + } +} + +func TestFlagR(t *testing.T) { + // Test that using the -R flag to specify a (large) alignment generates + // a working binary. + // (Test only on ELF for now. The alignment allowed differs from platform + // to platform.) + testenv.MustHaveGoBuild(t) + t.Parallel() + tmpdir := t.TempDir() + src := filepath.Join(tmpdir, "x.go") + if err := os.WriteFile(src, []byte(goSource), 0444); err != nil { + t.Fatal(err) + } + exe := filepath.Join(tmpdir, "x.exe") + + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-ldflags=-R=0x100000", "-o", exe, src) + if out, err := cmd.CombinedOutput(); err != nil { + t.Fatalf("build failed: %v, output:\n%s", err, out) + } + + cmd = testenv.Command(t, exe) + if out, err := cmd.CombinedOutput(); err != nil { + t.Errorf("executable failed to run: %v\n%s", err, out) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/link_test.go b/platform/dbops/binaries/go/go/src/cmd/link/link_test.go new file mode 100644 index 0000000000000000000000000000000000000000..897607c4fad5e06f5af410fbf9f6b23e15502cfc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/link_test.go @@ -0,0 +1,1377 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bufio" + "bytes" + "debug/macho" + "errors" + "internal/platform" + "internal/testenv" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strings" + "testing" + + "cmd/internal/sys" +) + +var AuthorPaidByTheColumnInch struct { + fog int `text:"London. Michaelmas term lately over, and the Lord Chancellor sitting in Lincoln’s Inn Hall. Implacable November weather. As much mud in the streets as if the waters had but newly retired from the face of the earth, and it would not be wonderful to meet a Megalosaurus, forty feet long or so, waddling like an elephantine lizard up Holborn Hill. Smoke lowering down from chimney-pots, making a soft black drizzle, with flakes of soot in it as big as full-grown snowflakes—gone into mourning, one might imagine, for the death of the sun. Dogs, undistinguishable in mire. Horses, scarcely better; splashed to their very blinkers. Foot passengers, jostling one another’s umbrellas in a general infection of ill temper, and losing their foot-hold at street-corners, where tens of thousands of other foot passengers have been slipping and sliding since the day broke (if this day ever broke), adding new deposits to the crust upon crust of mud, sticking at those points tenaciously to the pavement, and accumulating at compound interest. Fog everywhere. Fog up the river, where it flows among green aits and meadows; fog down the river, where it rolls defiled among the tiers of shipping and the waterside pollutions of a great (and dirty) city. Fog on the Essex marshes, fog on the Kentish heights. Fog creeping into the cabooses of collier-brigs; fog lying out on the yards and hovering in the rigging of great ships; fog drooping on the gunwales of barges and small boats. Fog in the eyes and throats of ancient Greenwich pensioners, wheezing by the firesides of their wards; fog in the stem and bowl of the afternoon pipe of the wrathful skipper, down in his close cabin; fog cruelly pinching the toes and fingers of his shivering little ‘prentice boy on deck. Chance people on the bridges peeping over the parapets into a nether sky of fog, with fog all round them, as if they were up in a balloon and hanging in the misty clouds. Gas looming through the fog in divers places in the streets, much as the sun may, from the spongey fields, be seen to loom by husbandman and ploughboy. Most of the shops lighted two hours before their time—as the gas seems to know, for it has a haggard and unwilling look. The raw afternoon is rawest, and the dense fog is densest, and the muddy streets are muddiest near that leaden-headed old obstruction, appropriate ornament for the threshold of a leaden-headed old corporation, Temple Bar. And hard by Temple Bar, in Lincoln’s Inn Hall, at the very heart of the fog, sits the Lord High Chancellor in his High Court of Chancery."` + + wind int `text:"It was grand to see how the wind awoke, and bent the trees, and drove the rain before it like a cloud of smoke; and to hear the solemn thunder, and to see the lightning; and while thinking with awe of the tremendous powers by which our little lives are encompassed, to consider how beneficent they are, and how upon the smallest flower and leaf there was already a freshness poured from all this seeming rage, which seemed to make creation new again."` + + jarndyce int `text:"Jarndyce and Jarndyce drones on. This scarecrow of a suit has, over the course of time, become so complicated, that no man alive knows what it means. The parties to it understand it least; but it has been observed that no two Chancery lawyers can talk about it for five minutes, without coming to a total disagreement as to all the premises. Innumerable children have been born into the cause; innumerable young people have married into it; innumerable old people have died out of it. Scores of persons have deliriously found themselves made parties in Jarndyce and Jarndyce, without knowing how or why; whole families have inherited legendary hatreds with the suit. The little plaintiff or defendant, who was promised a new rocking-horse when Jarndyce and Jarndyce should be settled, has grown up, possessed himself of a real horse, and trotted away into the other world. Fair wards of court have faded into mothers and grandmothers; a long procession of Chancellors has come in and gone out; the legion of bills in the suit have been transformed into mere bills of mortality; there are not three Jarndyces left upon the earth perhaps, since old Tom Jarndyce in despair blew his brains out at a coffee-house in Chancery Lane; but Jarndyce and Jarndyce still drags its dreary length before the Court, perennially hopeless."` + + principle int `text:"The one great principle of the English law is, to make business for itself. There is no other principle distinctly, certainly, and consistently maintained through all its narrow turnings. Viewed by this light it becomes a coherent scheme, and not the monstrous maze the laity are apt to think it. Let them but once clearly perceive that its grand principle is to make business for itself at their expense, and surely they will cease to grumble."` +} + +func TestLargeSymName(t *testing.T) { + // The compiler generates a symbol name using the string form of the + // type. This tests that the linker can read symbol names larger than + // the bufio buffer. Issue #15104. + _ = AuthorPaidByTheColumnInch +} + +func TestIssue21703(t *testing.T) { + t.Parallel() + + testenv.MustHaveGoBuild(t) + testenv.MustInternalLink(t, false) + + const source = ` +package main +const X = "\n!\n" +func main() {} +` + + tmpdir := t.TempDir() + main := filepath.Join(tmpdir, "main.go") + + err := os.WriteFile(main, []byte(source), 0666) + if err != nil { + t.Fatalf("failed to write main.go: %v\n", err) + } + + importcfgfile := filepath.Join(tmpdir, "importcfg") + testenv.WriteImportcfg(t, importcfgfile, nil, main) + + cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "compile", "-importcfg="+importcfgfile, "-p=main", "main.go") + cmd.Dir = tmpdir + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("failed to compile main.go: %v, output: %s\n", err, out) + } + + cmd = testenv.Command(t, testenv.GoToolPath(t), "tool", "link", "-importcfg="+importcfgfile, "main.o") + cmd.Dir = tmpdir + out, err = cmd.CombinedOutput() + if err != nil { + if runtime.GOOS == "android" && runtime.GOARCH == "arm64" { + testenv.SkipFlaky(t, 58806) + } + t.Fatalf("failed to link main.o: %v, output: %s\n", err, out) + } +} + +// TestIssue28429 ensures that the linker does not attempt to link +// sections not named *.o. Such sections may be used by a build system +// to, for example, save facts produced by a modular static analysis +// such as golang.org/x/tools/go/analysis. +func TestIssue28429(t *testing.T) { + t.Parallel() + + testenv.MustHaveGoBuild(t) + testenv.MustInternalLink(t, false) + + tmpdir := t.TempDir() + + write := func(name, content string) { + err := os.WriteFile(filepath.Join(tmpdir, name), []byte(content), 0666) + if err != nil { + t.Fatal(err) + } + } + + runGo := func(args ...string) { + cmd := testenv.Command(t, testenv.GoToolPath(t), args...) + cmd.Dir = tmpdir + out, err := cmd.CombinedOutput() + if err != nil { + if len(args) >= 2 && args[1] == "link" && runtime.GOOS == "android" && runtime.GOARCH == "arm64" { + testenv.SkipFlaky(t, 58806) + } + t.Fatalf("'go %s' failed: %v, output: %s", + strings.Join(args, " "), err, out) + } + } + + // Compile a main package. + write("main.go", "package main; func main() {}") + importcfgfile := filepath.Join(tmpdir, "importcfg") + testenv.WriteImportcfg(t, importcfgfile, nil, filepath.Join(tmpdir, "main.go")) + runGo("tool", "compile", "-importcfg="+importcfgfile, "-p=main", "main.go") + runGo("tool", "pack", "c", "main.a", "main.o") + + // Add an extra section with a short, non-.o name. + // This simulates an alternative build system. + write(".facts", "this is not an object file") + runGo("tool", "pack", "r", "main.a", ".facts") + + // Verify that the linker does not attempt + // to compile the extra section. + runGo("tool", "link", "-importcfg="+importcfgfile, "main.a") +} + +func TestUnresolved(t *testing.T) { + testenv.MustHaveGoBuild(t) + + t.Parallel() + + tmpdir := t.TempDir() + + write := func(name, content string) { + err := os.WriteFile(filepath.Join(tmpdir, name), []byte(content), 0666) + if err != nil { + t.Fatal(err) + } + } + + // Test various undefined references. Because of issue #29852, + // this used to give confusing error messages because the + // linker would find an undefined reference to "zero" created + // by the runtime package. + + write("go.mod", "module testunresolved\n") + write("main.go", `package main + +func main() { + x() +} + +func x() +`) + write("main.s", ` +TEXT ·x(SB),0,$0 + MOVD zero<>(SB), AX + MOVD zero(SB), AX + MOVD ·zero(SB), AX + RET +`) + cmd := testenv.Command(t, testenv.GoToolPath(t), "build") + cmd.Dir = tmpdir + cmd.Env = append(os.Environ(), + "GOARCH=amd64", "GOOS=linux", "GOPATH="+filepath.Join(tmpdir, "_gopath")) + out, err := cmd.CombinedOutput() + if err == nil { + t.Fatalf("expected build to fail, but it succeeded") + } + out = regexp.MustCompile("(?m)^#.*\n").ReplaceAll(out, nil) + got := string(out) + want := `main.x: relocation target zero not defined +main.x: relocation target zero not defined +main.x: relocation target main.zero not defined +` + if want != got { + t.Fatalf("want:\n%sgot:\n%s", want, got) + } +} + +func TestIssue33979(t *testing.T) { + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + testenv.MustInternalLink(t, true) + + t.Parallel() + + tmpdir := t.TempDir() + + write := func(name, content string) { + err := os.WriteFile(filepath.Join(tmpdir, name), []byte(content), 0666) + if err != nil { + t.Fatal(err) + } + } + + run := func(name string, args ...string) string { + cmd := testenv.Command(t, name, args...) + cmd.Dir = tmpdir + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("'go %s' failed: %v, output: %s", strings.Join(args, " "), err, out) + } + return string(out) + } + runGo := func(args ...string) string { + return run(testenv.GoToolPath(t), args...) + } + + // Test object with undefined reference that was not generated + // by Go, resulting in an SXREF symbol being loaded during linking. + // Because of issue #33979, the SXREF symbol would be found during + // error reporting, resulting in confusing error messages. + + write("main.go", `package main +func main() { + x() +} +func x() +`) + // The following assembly must work on all architectures. + write("x.s", ` +TEXT ·x(SB),0,$0 + CALL foo(SB) + RET +`) + write("x.c", ` +void undefined(); + +void foo() { + undefined(); +} +`) + + cc := strings.TrimSpace(runGo("env", "CC")) + cflags := strings.Fields(runGo("env", "GOGCCFLAGS")) + + importcfgfile := filepath.Join(tmpdir, "importcfg") + testenv.WriteImportcfg(t, importcfgfile, nil, "runtime") + + // Compile, assemble and pack the Go and C code. + runGo("tool", "asm", "-p=main", "-gensymabis", "-o", "symabis", "x.s") + runGo("tool", "compile", "-importcfg="+importcfgfile, "-symabis", "symabis", "-p=main", "-o", "x1.o", "main.go") + runGo("tool", "asm", "-p=main", "-o", "x2.o", "x.s") + run(cc, append(cflags, "-c", "-o", "x3.o", "x.c")...) + runGo("tool", "pack", "c", "x.a", "x1.o", "x2.o", "x3.o") + + // Now attempt to link using the internal linker. + cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "link", "-importcfg="+importcfgfile, "-linkmode=internal", "x.a") + cmd.Dir = tmpdir + out, err := cmd.CombinedOutput() + if err == nil { + t.Fatalf("expected link to fail, but it succeeded") + } + re := regexp.MustCompile(`(?m)^main\(.*text\): relocation target undefined not defined$`) + if !re.Match(out) { + t.Fatalf("got:\n%q\nwant:\n%s", out, re) + } +} + +func TestBuildForTvOS(t *testing.T) { + testenv.MustHaveCGO(t) + testenv.MustHaveGoBuild(t) + + // Only run this on darwin, where we can cross build for tvOS. + if runtime.GOOS != "darwin" { + t.Skip("skipping on non-darwin platform") + } + if testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" { + t.Skip("skipping in -short mode with $GO_BUILDER_NAME empty") + } + if err := testenv.Command(t, "xcrun", "--help").Run(); err != nil { + t.Skipf("error running xcrun, required for iOS cross build: %v", err) + } + + t.Parallel() + + sdkPath, err := testenv.Command(t, "xcrun", "--sdk", "appletvos", "--show-sdk-path").Output() + if err != nil { + t.Skip("failed to locate appletvos SDK, skipping") + } + CC := []string{ + "clang", + "-arch", + "arm64", + "-isysroot", strings.TrimSpace(string(sdkPath)), + "-mtvos-version-min=12.0", + "-fembed-bitcode", + } + CGO_LDFLAGS := []string{"-framework", "CoreFoundation"} + lib := filepath.Join("testdata", "testBuildFortvOS", "lib.go") + tmpDir := t.TempDir() + + ar := filepath.Join(tmpDir, "lib.a") + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-buildmode=c-archive", "-o", ar, lib) + env := []string{ + "CGO_ENABLED=1", + "GOOS=ios", + "GOARCH=arm64", + "CC=" + strings.Join(CC, " "), + "CGO_CFLAGS=", // ensure CGO_CFLAGS does not contain any flags. Issue #35459 + "CGO_LDFLAGS=" + strings.Join(CGO_LDFLAGS, " "), + } + cmd.Env = append(os.Environ(), env...) + t.Logf("%q %v", env, cmd) + if out, err := cmd.CombinedOutput(); err != nil { + t.Fatalf("%v: %v:\n%s", cmd.Args, err, out) + } + + link := testenv.Command(t, CC[0], CC[1:]...) + link.Args = append(link.Args, CGO_LDFLAGS...) + link.Args = append(link.Args, "-o", filepath.Join(tmpDir, "a.out")) // Avoid writing to package directory. + link.Args = append(link.Args, ar, filepath.Join("testdata", "testBuildFortvOS", "main.m")) + t.Log(link) + if out, err := link.CombinedOutput(); err != nil { + t.Fatalf("%v: %v:\n%s", link.Args, err, out) + } +} + +var testXFlagSrc = ` +package main +var X = "hello" +var Z = [99999]int{99998:12345} // make it large enough to be mmaped +func main() { println(X) } +` + +func TestXFlag(t *testing.T) { + testenv.MustHaveGoBuild(t) + + t.Parallel() + + tmpdir := t.TempDir() + + src := filepath.Join(tmpdir, "main.go") + err := os.WriteFile(src, []byte(testXFlagSrc), 0666) + if err != nil { + t.Fatal(err) + } + + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-ldflags=-X=main.X=meow", "-o", filepath.Join(tmpdir, "main"), src) + if out, err := cmd.CombinedOutput(); err != nil { + t.Errorf("%v: %v:\n%s", cmd.Args, err, out) + } +} + +var testMachOBuildVersionSrc = ` +package main +func main() { } +` + +func TestMachOBuildVersion(t *testing.T) { + testenv.MustHaveGoBuild(t) + + t.Parallel() + + tmpdir := t.TempDir() + + src := filepath.Join(tmpdir, "main.go") + err := os.WriteFile(src, []byte(testMachOBuildVersionSrc), 0666) + if err != nil { + t.Fatal(err) + } + + exe := filepath.Join(tmpdir, "main") + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-ldflags=-linkmode=internal", "-o", exe, src) + cmd.Env = append(os.Environ(), + "CGO_ENABLED=0", + "GOOS=darwin", + "GOARCH=amd64", + ) + if out, err := cmd.CombinedOutput(); err != nil { + t.Fatalf("%v: %v:\n%s", cmd.Args, err, out) + } + exef, err := os.Open(exe) + if err != nil { + t.Fatal(err) + } + defer exef.Close() + exem, err := macho.NewFile(exef) + if err != nil { + t.Fatal(err) + } + found := false + const LC_BUILD_VERSION = 0x32 + checkMin := func(ver uint32) { + major, minor := (ver>>16)&0xff, (ver>>8)&0xff + if major != 10 || minor < 9 { + t.Errorf("LC_BUILD_VERSION version %d.%d < 10.9", major, minor) + } + } + for _, cmd := range exem.Loads { + raw := cmd.Raw() + type_ := exem.ByteOrder.Uint32(raw) + if type_ != LC_BUILD_VERSION { + continue + } + osVer := exem.ByteOrder.Uint32(raw[12:]) + checkMin(osVer) + sdkVer := exem.ByteOrder.Uint32(raw[16:]) + checkMin(sdkVer) + found = true + break + } + if !found { + t.Errorf("no LC_BUILD_VERSION load command found") + } +} + +const Issue34788src = ` + +package blah + +func Blah(i int) int { + a := [...]int{1, 2, 3, 4, 5, 6, 7, 8} + return a[i&7] +} +` + +func TestIssue34788Android386TLSSequence(t *testing.T) { + testenv.MustHaveGoBuild(t) + + // This is a cross-compilation test, so it doesn't make + // sense to run it on every GOOS/GOARCH combination. Limit + // the test to amd64 + darwin/linux. + if runtime.GOARCH != "amd64" || + (runtime.GOOS != "darwin" && runtime.GOOS != "linux") { + t.Skip("skipping on non-{linux,darwin}/amd64 platform") + } + + t.Parallel() + + tmpdir := t.TempDir() + + src := filepath.Join(tmpdir, "blah.go") + err := os.WriteFile(src, []byte(Issue34788src), 0666) + if err != nil { + t.Fatal(err) + } + + obj := filepath.Join(tmpdir, "blah.o") + cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "compile", "-p=blah", "-o", obj, src) + cmd.Env = append(os.Environ(), "GOARCH=386", "GOOS=android") + if out, err := cmd.CombinedOutput(); err != nil { + t.Fatalf("failed to compile blah.go: %v, output: %s\n", err, out) + } + + // Run objdump on the resulting object. + cmd = testenv.Command(t, testenv.GoToolPath(t), "tool", "objdump", obj) + out, oerr := cmd.CombinedOutput() + if oerr != nil { + t.Fatalf("failed to objdump blah.o: %v, output: %s\n", oerr, out) + } + + // Sift through the output; we should not be seeing any R_TLS_LE relocs. + scanner := bufio.NewScanner(bytes.NewReader(out)) + for scanner.Scan() { + line := scanner.Text() + if strings.Contains(line, "R_TLS_LE") { + t.Errorf("objdump output contains unexpected R_TLS_LE reloc: %s", line) + } + } +} + +const testStrictDupGoSrc = ` +package main +func f() +func main() { f() } +` + +const testStrictDupAsmSrc1 = ` +#include "textflag.h" +TEXT ·f(SB), NOSPLIT|DUPOK, $0-0 + RET +` + +const testStrictDupAsmSrc2 = ` +#include "textflag.h" +TEXT ·f(SB), NOSPLIT|DUPOK, $0-0 + JMP 0(PC) +` + +const testStrictDupAsmSrc3 = ` +#include "textflag.h" +GLOBL ·rcon(SB), RODATA|DUPOK, $64 +` + +const testStrictDupAsmSrc4 = ` +#include "textflag.h" +GLOBL ·rcon(SB), RODATA|DUPOK, $32 +` + +func TestStrictDup(t *testing.T) { + // Check that -strictdups flag works. + testenv.MustHaveGoBuild(t) + + asmfiles := []struct { + fname string + payload string + }{ + {"a", testStrictDupAsmSrc1}, + {"b", testStrictDupAsmSrc2}, + {"c", testStrictDupAsmSrc3}, + {"d", testStrictDupAsmSrc4}, + } + + t.Parallel() + + tmpdir := t.TempDir() + + src := filepath.Join(tmpdir, "x.go") + err := os.WriteFile(src, []byte(testStrictDupGoSrc), 0666) + if err != nil { + t.Fatal(err) + } + for _, af := range asmfiles { + src = filepath.Join(tmpdir, af.fname+".s") + err = os.WriteFile(src, []byte(af.payload), 0666) + if err != nil { + t.Fatal(err) + } + } + src = filepath.Join(tmpdir, "go.mod") + err = os.WriteFile(src, []byte("module teststrictdup\n"), 0666) + if err != nil { + t.Fatal(err) + } + + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-ldflags=-strictdups=1") + cmd.Dir = tmpdir + out, err := cmd.CombinedOutput() + if err != nil { + t.Errorf("linking with -strictdups=1 failed: %v\n%s", err, string(out)) + } + if !bytes.Contains(out, []byte("mismatched payload")) { + t.Errorf("unexpected output:\n%s", out) + } + + cmd = testenv.Command(t, testenv.GoToolPath(t), "build", "-ldflags=-strictdups=2") + cmd.Dir = tmpdir + out, err = cmd.CombinedOutput() + if err == nil { + t.Errorf("linking with -strictdups=2 did not fail") + } + // NB: on amd64 we get the 'new length' error, on arm64 the 'different + // contents' error. + if !(bytes.Contains(out, []byte("mismatched payload: new length")) || + bytes.Contains(out, []byte("mismatched payload: same length but different contents"))) || + !bytes.Contains(out, []byte("mismatched payload: different sizes")) { + t.Errorf("unexpected output:\n%s", out) + } +} + +const testFuncAlignSrc = ` +package main +import ( + "fmt" +) +func alignPc() +var alignPcFnAddr uintptr + +func main() { + if alignPcFnAddr % 512 != 0 { + fmt.Printf("expected 512 bytes alignment, got %v\n", alignPcFnAddr) + } else { + fmt.Printf("PASS") + } +} +` + +var testFuncAlignAsmSources = map[string]string{ + "arm64": ` +#include "textflag.h" + +TEXT ·alignPc(SB),NOSPLIT, $0-0 + MOVD $2, R0 + PCALIGN $512 + MOVD $3, R1 + RET + +GLOBL ·alignPcFnAddr(SB),RODATA,$8 +DATA ·alignPcFnAddr(SB)/8,$·alignPc(SB) +`, + "loong64": ` +#include "textflag.h" + +TEXT ·alignPc(SB),NOSPLIT, $0-0 + MOVV $2, R4 + PCALIGN $512 + MOVV $3, R5 + RET + +GLOBL ·alignPcFnAddr(SB),RODATA,$8 +DATA ·alignPcFnAddr(SB)/8,$·alignPc(SB) +`, +} + +// TestFuncAlign verifies that the address of a function can be aligned +// with a specific value on arm64 and loong64. +func TestFuncAlign(t *testing.T) { + testFuncAlignAsmSrc := testFuncAlignAsmSources[runtime.GOARCH] + if len(testFuncAlignAsmSrc) == 0 || runtime.GOOS != "linux" { + t.Skip("skipping on non-linux/{arm64,loong64} platform") + } + testenv.MustHaveGoBuild(t) + + t.Parallel() + + tmpdir := t.TempDir() + + src := filepath.Join(tmpdir, "go.mod") + err := os.WriteFile(src, []byte("module cmd/link/TestFuncAlign/falign"), 0666) + if err != nil { + t.Fatal(err) + } + src = filepath.Join(tmpdir, "falign.go") + err = os.WriteFile(src, []byte(testFuncAlignSrc), 0666) + if err != nil { + t.Fatal(err) + } + src = filepath.Join(tmpdir, "falign.s") + err = os.WriteFile(src, []byte(testFuncAlignAsmSrc), 0666) + if err != nil { + t.Fatal(err) + } + + // Build and run with old object file format. + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-o", "falign") + cmd.Dir = tmpdir + out, err := cmd.CombinedOutput() + if err != nil { + t.Errorf("build failed: %v", err) + } + cmd = testenv.Command(t, tmpdir+"/falign") + out, err = cmd.CombinedOutput() + if err != nil { + t.Errorf("failed to run with err %v, output: %s", err, out) + } + if string(out) != "PASS" { + t.Errorf("unexpected output: %s\n", out) + } +} + +const testTrampSrc = ` +package main +import "fmt" +func main() { + fmt.Println("hello") + + defer func(){ + if e := recover(); e == nil { + panic("did not panic") + } + }() + f1() +} + +// Test deferreturn trampolines. See issue #39049. +func f1() { defer f2() } +func f2() { panic("XXX") } +` + +func TestTrampoline(t *testing.T) { + // Test that trampoline insertion works as expected. + // For stress test, we set -debugtramp=2 flag, which sets a very low + // threshold for trampoline generation, and essentially all cross-package + // calls will use trampolines. + buildmodes := []string{"default"} + switch runtime.GOARCH { + case "arm", "arm64", "ppc64": + case "ppc64le": + // Trampolines are generated differently when internal linking PIE, test them too. + buildmodes = append(buildmodes, "pie") + default: + t.Skipf("trampoline insertion is not implemented on %s", runtime.GOARCH) + } + + testenv.MustHaveGoBuild(t) + + t.Parallel() + + tmpdir := t.TempDir() + + src := filepath.Join(tmpdir, "hello.go") + err := os.WriteFile(src, []byte(testTrampSrc), 0666) + if err != nil { + t.Fatal(err) + } + exe := filepath.Join(tmpdir, "hello.exe") + + for _, mode := range buildmodes { + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-buildmode="+mode, "-ldflags=-debugtramp=2", "-o", exe, src) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("build (%s) failed: %v\n%s", mode, err, out) + } + cmd = testenv.Command(t, exe) + out, err = cmd.CombinedOutput() + if err != nil { + t.Errorf("executable failed to run (%s): %v\n%s", mode, err, out) + } + if string(out) != "hello\n" { + t.Errorf("unexpected output (%s):\n%s", mode, out) + } + } +} + +const testTrampCgoSrc = ` +package main + +// #include +// void CHello() { printf("hello\n"); fflush(stdout); } +import "C" + +func main() { + C.CHello() +} +` + +func TestTrampolineCgo(t *testing.T) { + // Test that trampoline insertion works for cgo code. + // For stress test, we set -debugtramp=2 flag, which sets a very low + // threshold for trampoline generation, and essentially all cross-package + // calls will use trampolines. + buildmodes := []string{"default"} + switch runtime.GOARCH { + case "arm", "arm64", "ppc64": + case "ppc64le": + // Trampolines are generated differently when internal linking PIE, test them too. + buildmodes = append(buildmodes, "pie") + default: + t.Skipf("trampoline insertion is not implemented on %s", runtime.GOARCH) + } + + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + + t.Parallel() + + tmpdir := t.TempDir() + + src := filepath.Join(tmpdir, "hello.go") + err := os.WriteFile(src, []byte(testTrampCgoSrc), 0666) + if err != nil { + t.Fatal(err) + } + exe := filepath.Join(tmpdir, "hello.exe") + + for _, mode := range buildmodes { + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-buildmode="+mode, "-ldflags=-debugtramp=2", "-o", exe, src) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("build (%s) failed: %v\n%s", mode, err, out) + } + cmd = testenv.Command(t, exe) + out, err = cmd.CombinedOutput() + if err != nil { + t.Errorf("executable failed to run (%s): %v\n%s", mode, err, out) + } + if string(out) != "hello\n" && string(out) != "hello\r\n" { + t.Errorf("unexpected output (%s):\n%s", mode, out) + } + + // Test internal linking mode. + + if !testenv.CanInternalLink(true) { + continue + } + cmd = testenv.Command(t, testenv.GoToolPath(t), "build", "-buildmode="+mode, "-ldflags=-debugtramp=2 -linkmode=internal", "-o", exe, src) + out, err = cmd.CombinedOutput() + if err != nil { + t.Fatalf("build (%s) failed: %v\n%s", mode, err, out) + } + cmd = testenv.Command(t, exe) + out, err = cmd.CombinedOutput() + if err != nil { + t.Errorf("executable failed to run (%s): %v\n%s", mode, err, out) + } + if string(out) != "hello\n" && string(out) != "hello\r\n" { + t.Errorf("unexpected output (%s):\n%s", mode, out) + } + } +} + +func TestIndexMismatch(t *testing.T) { + // Test that index mismatch will cause a link-time error (not run-time error). + // This shouldn't happen with "go build". We invoke the compiler and the linker + // manually, and try to "trick" the linker with an inconsistent object file. + testenv.MustHaveGoBuild(t) + testenv.MustInternalLink(t, false) + + t.Parallel() + + tmpdir := t.TempDir() + + aSrc := filepath.Join("testdata", "testIndexMismatch", "a.go") + bSrc := filepath.Join("testdata", "testIndexMismatch", "b.go") + mSrc := filepath.Join("testdata", "testIndexMismatch", "main.go") + aObj := filepath.Join(tmpdir, "a.o") + mObj := filepath.Join(tmpdir, "main.o") + exe := filepath.Join(tmpdir, "main.exe") + + importcfgFile := filepath.Join(tmpdir, "runtime.importcfg") + testenv.WriteImportcfg(t, importcfgFile, nil, "runtime") + importcfgWithAFile := filepath.Join(tmpdir, "witha.importcfg") + testenv.WriteImportcfg(t, importcfgWithAFile, map[string]string{"a": aObj}, "runtime") + + // Build a program with main package importing package a. + cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "compile", "-importcfg="+importcfgFile, "-p=a", "-o", aObj, aSrc) + t.Log(cmd) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("compiling a.go failed: %v\n%s", err, out) + } + cmd = testenv.Command(t, testenv.GoToolPath(t), "tool", "compile", "-importcfg="+importcfgWithAFile, "-p=main", "-I", tmpdir, "-o", mObj, mSrc) + t.Log(cmd) + out, err = cmd.CombinedOutput() + if err != nil { + t.Fatalf("compiling main.go failed: %v\n%s", err, out) + } + cmd = testenv.Command(t, testenv.GoToolPath(t), "tool", "link", "-importcfg="+importcfgWithAFile, "-L", tmpdir, "-o", exe, mObj) + t.Log(cmd) + out, err = cmd.CombinedOutput() + if err != nil { + if runtime.GOOS == "android" && runtime.GOARCH == "arm64" { + testenv.SkipFlaky(t, 58806) + } + t.Errorf("linking failed: %v\n%s", err, out) + } + + // Now, overwrite a.o with the object of b.go. This should + // result in an index mismatch. + cmd = testenv.Command(t, testenv.GoToolPath(t), "tool", "compile", "-importcfg="+importcfgFile, "-p=a", "-o", aObj, bSrc) + t.Log(cmd) + out, err = cmd.CombinedOutput() + if err != nil { + t.Fatalf("compiling a.go failed: %v\n%s", err, out) + } + cmd = testenv.Command(t, testenv.GoToolPath(t), "tool", "link", "-importcfg="+importcfgWithAFile, "-L", tmpdir, "-o", exe, mObj) + t.Log(cmd) + out, err = cmd.CombinedOutput() + if err == nil { + t.Fatalf("linking didn't fail") + } + if !bytes.Contains(out, []byte("fingerprint mismatch")) { + t.Errorf("did not see expected error message. out:\n%s", out) + } +} + +func TestPErsrcBinutils(t *testing.T) { + // Test that PE rsrc section is handled correctly (issue 39658). + testenv.MustHaveGoBuild(t) + + if (runtime.GOARCH != "386" && runtime.GOARCH != "amd64") || runtime.GOOS != "windows" { + // This test is limited to amd64 and 386, because binutils is limited as such + t.Skipf("this is only for windows/amd64 and windows/386") + } + + t.Parallel() + + tmpdir := t.TempDir() + + pkgdir := filepath.Join("testdata", "pe-binutils") + exe := filepath.Join(tmpdir, "a.exe") + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-o", exe) + cmd.Dir = pkgdir + // cmd.Env = append(os.Environ(), "GOOS=windows", "GOARCH=amd64") // uncomment if debugging in a cross-compiling environment + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("building failed: %v, output:\n%s", err, out) + } + + // Check that the binary contains the rsrc data + b, err := os.ReadFile(exe) + if err != nil { + t.Fatalf("reading output failed: %v", err) + } + if !bytes.Contains(b, []byte("Hello Gophers!")) { + t.Fatalf("binary does not contain expected content") + } +} + +func TestPErsrcLLVM(t *testing.T) { + // Test that PE rsrc section is handled correctly (issue 39658). + testenv.MustHaveGoBuild(t) + + if runtime.GOOS != "windows" { + t.Skipf("this is a windows-only test") + } + + t.Parallel() + + tmpdir := t.TempDir() + + pkgdir := filepath.Join("testdata", "pe-llvm") + exe := filepath.Join(tmpdir, "a.exe") + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-o", exe) + cmd.Dir = pkgdir + // cmd.Env = append(os.Environ(), "GOOS=windows", "GOARCH=amd64") // uncomment if debugging in a cross-compiling environment + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("building failed: %v, output:\n%s", err, out) + } + + // Check that the binary contains the rsrc data + b, err := os.ReadFile(exe) + if err != nil { + t.Fatalf("reading output failed: %v", err) + } + if !bytes.Contains(b, []byte("resname RCDATA a.rc")) { + t.Fatalf("binary does not contain expected content") + } +} + +func TestContentAddressableSymbols(t *testing.T) { + // Test that the linker handles content-addressable symbols correctly. + testenv.MustHaveGoBuild(t) + + t.Parallel() + + src := filepath.Join("testdata", "testHashedSyms", "p.go") + cmd := testenv.Command(t, testenv.GoToolPath(t), "run", src) + out, err := cmd.CombinedOutput() + if err != nil { + t.Errorf("command %s failed: %v\n%s", cmd, err, out) + } +} + +func TestReadOnly(t *testing.T) { + // Test that read-only data is indeed read-only. + testenv.MustHaveGoBuild(t) + + t.Parallel() + + src := filepath.Join("testdata", "testRO", "x.go") + cmd := testenv.Command(t, testenv.GoToolPath(t), "run", src) + out, err := cmd.CombinedOutput() + if err == nil { + t.Errorf("running test program did not fail. output:\n%s", out) + } +} + +const testIssue38554Src = ` +package main + +type T [10<<20]byte + +//go:noinline +func f() T { + return T{} // compiler will make a large stmp symbol, but not used. +} + +func main() { + x := f() + println(x[1]) +} +` + +func TestIssue38554(t *testing.T) { + testenv.MustHaveGoBuild(t) + + t.Parallel() + + tmpdir := t.TempDir() + + src := filepath.Join(tmpdir, "x.go") + err := os.WriteFile(src, []byte(testIssue38554Src), 0666) + if err != nil { + t.Fatalf("failed to write source file: %v", err) + } + exe := filepath.Join(tmpdir, "x.exe") + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-o", exe, src) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("build failed: %v\n%s", err, out) + } + + fi, err := os.Stat(exe) + if err != nil { + t.Fatalf("failed to stat output file: %v", err) + } + + // The test program is not much different from a helloworld, which is + // typically a little over 1 MB. We allow 5 MB. If the bad stmp is live, + // it will be over 10 MB. + const want = 5 << 20 + if got := fi.Size(); got > want { + t.Errorf("binary too big: got %d, want < %d", got, want) + } +} + +const testIssue42396src = ` +package main + +//go:noinline +//go:nosplit +func callee(x int) { +} + +func main() { + callee(9) +} +` + +func TestIssue42396(t *testing.T) { + testenv.MustHaveGoBuild(t) + + if !platform.RaceDetectorSupported(runtime.GOOS, runtime.GOARCH) { + t.Skip("no race detector support") + } + + t.Parallel() + + tmpdir := t.TempDir() + + src := filepath.Join(tmpdir, "main.go") + err := os.WriteFile(src, []byte(testIssue42396src), 0666) + if err != nil { + t.Fatalf("failed to write source file: %v", err) + } + exe := filepath.Join(tmpdir, "main.exe") + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-gcflags=-race", "-o", exe, src) + out, err := cmd.CombinedOutput() + if err == nil { + t.Fatalf("build unexpectedly succeeded") + } + + // Check to make sure that we see a reasonable error message + // and not a panic. + if strings.Contains(string(out), "panic:") { + t.Fatalf("build should not fail with panic:\n%s", out) + } + const want = "reference to undefined builtin" + if !strings.Contains(string(out), want) { + t.Fatalf("error message incorrect: expected it to contain %q but instead got:\n%s\n", want, out) + } +} + +const testLargeRelocSrc = ` +package main + +var x = [1<<25]byte{1<<23: 23, 1<<24: 24} + +var addr = [...]*byte{ + &x[1<<23-1], + &x[1<<23], + &x[1<<23+1], + &x[1<<24-1], + &x[1<<24], + &x[1<<24+1], +} + +func main() { + // check relocations in instructions + check(x[1<<23-1], 0) + check(x[1<<23], 23) + check(x[1<<23+1], 0) + check(x[1<<24-1], 0) + check(x[1<<24], 24) + check(x[1<<24+1], 0) + + // check absolute address relocations in data + check(*addr[0], 0) + check(*addr[1], 23) + check(*addr[2], 0) + check(*addr[3], 0) + check(*addr[4], 24) + check(*addr[5], 0) +} + +func check(x, y byte) { + if x != y { + panic("FAIL") + } +} +` + +func TestLargeReloc(t *testing.T) { + // Test that large relocation addend is handled correctly. + // In particular, on darwin/arm64 when external linking, + // Mach-O relocation has only 24-bit addend. See issue #42738. + testenv.MustHaveGoBuild(t) + t.Parallel() + + tmpdir := t.TempDir() + + src := filepath.Join(tmpdir, "x.go") + err := os.WriteFile(src, []byte(testLargeRelocSrc), 0666) + if err != nil { + t.Fatalf("failed to write source file: %v", err) + } + cmd := testenv.Command(t, testenv.GoToolPath(t), "run", src) + out, err := cmd.CombinedOutput() + if err != nil { + t.Errorf("build failed: %v. output:\n%s", err, out) + } + + if testenv.HasCGO() { // currently all targets that support cgo can external link + cmd = testenv.Command(t, testenv.GoToolPath(t), "run", "-ldflags=-linkmode=external", src) + out, err = cmd.CombinedOutput() + if err != nil { + t.Fatalf("build failed: %v. output:\n%s", err, out) + } + } +} + +func TestUnlinkableObj(t *testing.T) { + // Test that the linker emits an error with unlinkable object. + testenv.MustHaveGoBuild(t) + t.Parallel() + + if true /* was buildcfg.Experiment.Unified */ { + t.Skip("TODO(mdempsky): Fix ICE when importing unlinkable objects for GOEXPERIMENT=unified") + } + + tmpdir := t.TempDir() + + xSrc := filepath.Join(tmpdir, "x.go") + pSrc := filepath.Join(tmpdir, "p.go") + xObj := filepath.Join(tmpdir, "x.o") + pObj := filepath.Join(tmpdir, "p.o") + exe := filepath.Join(tmpdir, "x.exe") + importcfgfile := filepath.Join(tmpdir, "importcfg") + testenv.WriteImportcfg(t, importcfgfile, map[string]string{"p": pObj}) + err := os.WriteFile(xSrc, []byte("package main\nimport _ \"p\"\nfunc main() {}\n"), 0666) + if err != nil { + t.Fatalf("failed to write source file: %v", err) + } + err = os.WriteFile(pSrc, []byte("package p\n"), 0666) + if err != nil { + t.Fatalf("failed to write source file: %v", err) + } + cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "compile", "-importcfg="+importcfgfile, "-o", pObj, pSrc) // without -p + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("compile p.go failed: %v. output:\n%s", err, out) + } + cmd = testenv.Command(t, testenv.GoToolPath(t), "tool", "compile", "-importcfg="+importcfgfile, "-p=main", "-o", xObj, xSrc) + out, err = cmd.CombinedOutput() + if err != nil { + t.Fatalf("compile x.go failed: %v. output:\n%s", err, out) + } + cmd = testenv.Command(t, testenv.GoToolPath(t), "tool", "link", "-importcfg="+importcfgfile, "-o", exe, xObj) + out, err = cmd.CombinedOutput() + if err == nil { + t.Fatalf("link did not fail") + } + if !bytes.Contains(out, []byte("unlinkable object")) { + t.Errorf("did not see expected error message. out:\n%s", out) + } + + // It is okay to omit -p for (only) main package. + cmd = testenv.Command(t, testenv.GoToolPath(t), "tool", "compile", "-importcfg="+importcfgfile, "-p=p", "-o", pObj, pSrc) + out, err = cmd.CombinedOutput() + if err != nil { + t.Fatalf("compile p.go failed: %v. output:\n%s", err, out) + } + cmd = testenv.Command(t, testenv.GoToolPath(t), "tool", "compile", "-importcfg="+importcfgfile, "-o", xObj, xSrc) // without -p + out, err = cmd.CombinedOutput() + if err != nil { + t.Fatalf("compile failed: %v. output:\n%s", err, out) + } + + cmd = testenv.Command(t, testenv.GoToolPath(t), "tool", "link", "-importcfg="+importcfgfile, "-o", exe, xObj) + out, err = cmd.CombinedOutput() + if err != nil { + t.Errorf("link failed: %v. output:\n%s", err, out) + } +} + +func TestExtLinkCmdlineDeterminism(t *testing.T) { + // Test that we pass flags in deterministic order to the external linker + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) // this test requires -linkmode=external + t.Parallel() + + // test source code, with some cgo exports + testSrc := ` +package main +import "C" +//export F1 +func F1() {} +//export F2 +func F2() {} +//export F3 +func F3() {} +func main() {} +` + + tmpdir := t.TempDir() + src := filepath.Join(tmpdir, "x.go") + if err := os.WriteFile(src, []byte(testSrc), 0666); err != nil { + t.Fatal(err) + } + exe := filepath.Join(tmpdir, "x.exe") + + // Use a deterministc tmp directory so the temporary file paths are + // deterministc. + linktmp := filepath.Join(tmpdir, "linktmp") + if err := os.Mkdir(linktmp, 0777); err != nil { + t.Fatal(err) + } + + // Link with -v -linkmode=external to see the flags we pass to the + // external linker. + ldflags := "-ldflags=-v -linkmode=external -tmpdir=" + linktmp + var out0 []byte + for i := 0; i < 5; i++ { + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", ldflags, "-o", exe, src) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("build failed: %v, output:\n%s", err, out) + } + if err := os.Remove(exe); err != nil { + t.Fatal(err) + } + + // extract the "host link" invocaton + j := bytes.Index(out, []byte("\nhost link:")) + if j == -1 { + t.Fatalf("host link step not found, output:\n%s", out) + } + out = out[j+1:] + k := bytes.Index(out, []byte("\n")) + if k == -1 { + t.Fatalf("no newline after host link, output:\n%s", out) + } + out = out[:k] + + // filter out output file name, which is passed by the go + // command and is nondeterministic. + fs := bytes.Fields(out) + for i, f := range fs { + if bytes.Equal(f, []byte(`"-o"`)) && i+1 < len(fs) { + fs[i+1] = []byte("a.out") + break + } + } + out = bytes.Join(fs, []byte{' '}) + + if i == 0 { + out0 = out + continue + } + if !bytes.Equal(out0, out) { + t.Fatalf("output differ:\n%s\n==========\n%s", out0, out) + } + } +} + +// TestResponseFile tests that creating a response file to pass to the +// external linker works correctly. +func TestResponseFile(t *testing.T) { + t.Parallel() + + testenv.MustHaveGoBuild(t) + + // This test requires -linkmode=external. Currently all + // systems that support cgo support -linkmode=external. + testenv.MustHaveCGO(t) + + tmpdir := t.TempDir() + + src := filepath.Join(tmpdir, "x.go") + if err := os.WriteFile(src, []byte(`package main; import "C"; func main() {}`), 0666); err != nil { + t.Fatal(err) + } + + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-o", "output", "x.go") + cmd.Dir = tmpdir + + // Add enough arguments to push cmd/link into creating a response file. + var sb strings.Builder + sb.WriteString(`'-ldflags=all="-extldflags=`) + for i := 0; i < sys.ExecArgLengthLimit/len("-g"); i++ { + if i > 0 { + sb.WriteString(" ") + } + sb.WriteString("-g") + } + sb.WriteString(`"'`) + cmd = testenv.CleanCmdEnv(cmd) + cmd.Env = append(cmd.Env, "GOFLAGS="+sb.String()) + + out, err := cmd.CombinedOutput() + if len(out) > 0 { + t.Logf("%s", out) + } + if err != nil { + t.Error(err) + } +} + +func TestDynimportVar(t *testing.T) { + // Test that we can access dynamically imported variables. + // Currently darwin only. + if runtime.GOOS != "darwin" { + t.Skip("skip on non-darwin platform") + } + + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) + + t.Parallel() + + tmpdir := t.TempDir() + exe := filepath.Join(tmpdir, "a.exe") + src := filepath.Join("testdata", "dynimportvar", "main.go") + + for _, mode := range []string{"internal", "external"} { + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-ldflags=-linkmode="+mode, "-o", exe, src) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("build (linkmode=%s) failed: %v\n%s", mode, err, out) + } + cmd = testenv.Command(t, exe) + out, err = cmd.CombinedOutput() + if err != nil { + t.Errorf("executable failed to run (%s): %v\n%s", mode, err, out) + } + } +} + +const helloSrc = ` +package main +var X = 42 +var Y int +func main() { println("hello", X, Y) } +` + +func TestFlagS(t *testing.T) { + // Test that the -s flag strips the symbol table. + testenv.MustHaveGoBuild(t) + + t.Parallel() + + tmpdir := t.TempDir() + exe := filepath.Join(tmpdir, "a.exe") + src := filepath.Join(tmpdir, "a.go") + err := os.WriteFile(src, []byte(helloSrc), 0666) + if err != nil { + t.Fatal(err) + } + + modes := []string{"auto"} + if testenv.HasCGO() { + modes = append(modes, "external") + } + + // check a text symbol, a data symbol, and a BSS symbol + syms := []string{"main.main", "main.X", "main.Y"} + + for _, mode := range modes { + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-ldflags=-s -linkmode="+mode, "-o", exe, src) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("build (linkmode=%s) failed: %v\n%s", mode, err, out) + } + cmd = testenv.Command(t, testenv.GoToolPath(t), "tool", "nm", exe) + out, err = cmd.CombinedOutput() + if err != nil && !errors.As(err, new(*exec.ExitError)) { + // Error exit is fine as it may have no symbols. + // On darwin we need to emit dynamic symbol references so it + // actually has some symbols, and nm succeeds. + t.Errorf("(mode=%s) go tool nm failed: %v\n%s", mode, err, out) + } + for _, s := range syms { + if bytes.Contains(out, []byte(s)) { + t.Errorf("(mode=%s): unexpected symbol %s", mode, s) + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/linkbig_test.go b/platform/dbops/binaries/go/go/src/cmd/link/linkbig_test.go new file mode 100644 index 0000000000000000000000000000000000000000..45cb1b3ab6784fdcb27c7128be2a28bb342389b3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/linkbig_test.go @@ -0,0 +1,111 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This program generates a test to verify that a program can be +// successfully linked even when there are very large text +// sections present. + +package main + +import ( + "bytes" + "fmt" + "internal/buildcfg" + "internal/testenv" + "os" + "testing" +) + +func TestLargeText(t *testing.T) { + if testing.Short() || (buildcfg.GOARCH != "ppc64le" && buildcfg.GOARCH != "ppc64" && buildcfg.GOARCH != "arm") { + t.Skipf("Skipping large text section test in short mode or on %s", buildcfg.GOARCH) + } + testenv.MustHaveGoBuild(t) + + var w bytes.Buffer + const FN = 4 + tmpdir := t.TempDir() + + if err := os.WriteFile(tmpdir+"/go.mod", []byte("module big_test\n"), 0666); err != nil { + t.Fatal(err) + } + + // Generate the scenario where the total amount of text exceeds the + // limit for the jmp/call instruction, on RISC architectures like ppc64le, + // which is 2^26. When that happens the call requires special trampolines or + // long branches inserted by the linker where supported. + // Multiple .s files are generated instead of one. + instOnArch := map[string]string{ + "ppc64": "\tMOVD\tR0,R3\n", + "ppc64le": "\tMOVD\tR0,R3\n", + "arm": "\tMOVW\tR0,R1\n", + } + inst := instOnArch[buildcfg.GOARCH] + for j := 0; j < FN; j++ { + testname := fmt.Sprintf("bigfn%d", j) + fmt.Fprintf(&w, "TEXT ·%s(SB),$0\n", testname) + for i := 0; i < 2200000; i++ { + fmt.Fprintf(&w, inst) + } + fmt.Fprintf(&w, "\tRET\n") + err := os.WriteFile(tmpdir+"/"+testname+".s", w.Bytes(), 0666) + if err != nil { + t.Fatalf("can't write output: %v\n", err) + } + w.Reset() + } + fmt.Fprintf(&w, "package main\n") + fmt.Fprintf(&w, "\nimport (\n") + fmt.Fprintf(&w, "\t\"os\"\n") + fmt.Fprintf(&w, "\t\"fmt\"\n") + fmt.Fprintf(&w, ")\n\n") + + for i := 0; i < FN; i++ { + fmt.Fprintf(&w, "func bigfn%d()\n", i) + } + fmt.Fprintf(&w, "\nfunc main() {\n") + + // There are lots of dummy code generated in the .s files just to generate a lot + // of text. Link them in but guard their call so their code is not executed but + // the main part of the program can be run. + fmt.Fprintf(&w, "\tif os.Getenv(\"LINKTESTARG\") != \"\" {\n") + for i := 0; i < FN; i++ { + fmt.Fprintf(&w, "\t\tbigfn%d()\n", i) + } + fmt.Fprintf(&w, "\t}\n") + fmt.Fprintf(&w, "\tfmt.Printf(\"PASS\\n\")\n") + fmt.Fprintf(&w, "}") + err := os.WriteFile(tmpdir+"/bigfn.go", w.Bytes(), 0666) + if err != nil { + t.Fatalf("can't write output: %v\n", err) + } + + // Build and run with internal linking. + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-o", "bigtext") + cmd.Dir = tmpdir + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Build failed for big text program with internal linking: %v, output: %s", err, out) + } + cmd = testenv.Command(t, "./bigtext") + cmd.Dir = tmpdir + out, err = cmd.CombinedOutput() + if err != nil { + t.Fatalf("Program built with internal linking failed to run with err %v, output: %s", err, out) + } + + // Build and run with external linking + cmd = testenv.Command(t, testenv.GoToolPath(t), "build", "-o", "bigtext", "-ldflags", "-linkmode=external") + cmd.Dir = tmpdir + out, err = cmd.CombinedOutput() + if err != nil { + t.Fatalf("Build failed for big text program with external linking: %v, output: %s", err, out) + } + cmd = testenv.Command(t, "./bigtext") + cmd.Dir = tmpdir + out, err = cmd.CombinedOutput() + if err != nil { + t.Fatalf("Program built with external linking failed to run with err %v, output: %s", err, out) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/link/main.go b/platform/dbops/binaries/go/go/src/cmd/link/main.go new file mode 100644 index 0000000000000000000000000000000000000000..16e5a0115132f93b2f7fad0ab9628cd5d40504d1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/link/main.go @@ -0,0 +1,73 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "cmd/internal/sys" + "cmd/link/internal/amd64" + "cmd/link/internal/arm" + "cmd/link/internal/arm64" + "cmd/link/internal/ld" + "cmd/link/internal/loong64" + "cmd/link/internal/mips" + "cmd/link/internal/mips64" + "cmd/link/internal/ppc64" + "cmd/link/internal/riscv64" + "cmd/link/internal/s390x" + "cmd/link/internal/wasm" + "cmd/link/internal/x86" + "fmt" + "internal/buildcfg" + "os" +) + +// The bulk of the linker implementation lives in cmd/link/internal/ld. +// Architecture-specific code lives in cmd/link/internal/GOARCH. +// +// Program initialization: +// +// Before any argument parsing is done, the Init function of relevant +// architecture package is called. The only job done in Init is +// configuration of the architecture-specific variables. +// +// Then control flow passes to ld.Main, which parses flags, makes +// some configuration decisions, and then gives the architecture +// packages a second chance to modify the linker's configuration +// via the ld.Arch.Archinit function. + +func main() { + var arch *sys.Arch + var theArch ld.Arch + + buildcfg.Check() + switch buildcfg.GOARCH { + default: + fmt.Fprintf(os.Stderr, "link: unknown architecture %q\n", buildcfg.GOARCH) + os.Exit(2) + case "386": + arch, theArch = x86.Init() + case "amd64": + arch, theArch = amd64.Init() + case "arm": + arch, theArch = arm.Init() + case "arm64": + arch, theArch = arm64.Init() + case "loong64": + arch, theArch = loong64.Init() + case "mips", "mipsle": + arch, theArch = mips.Init() + case "mips64", "mips64le": + arch, theArch = mips64.Init() + case "ppc64", "ppc64le": + arch, theArch = ppc64.Init() + case "riscv64": + arch, theArch = riscv64.Init() + case "s390x": + arch, theArch = s390x.Init() + case "wasm": + arch, theArch = wasm.Init() + } + ld.Main(arch, theArch) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/nm/doc.go b/platform/dbops/binaries/go/go/src/cmd/nm/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..b11a2a845659917e33e9fe7f12d4f326d3ed940c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/nm/doc.go @@ -0,0 +1,41 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Nm lists the symbols defined or used by an object file, archive, or executable. +// +// Usage: +// +// go tool nm [options] file... +// +// The default output prints one line per symbol, with three space-separated +// fields giving the address (in hexadecimal), type (a character), and name of +// the symbol. The types are: +// +// T text (code) segment symbol +// t static text segment symbol +// R read-only data segment symbol +// r static read-only data segment symbol +// D data segment symbol +// d static data segment symbol +// B bss segment symbol +// b static bss segment symbol +// C constant address +// U referenced but undefined symbol +// +// Following established convention, the address is omitted for undefined +// symbols (type U). +// +// The options control the printed output: +// +// -n +// an alias for -sort address (numeric), +// for compatibility with other nm commands +// -size +// print symbol size in decimal between address and type +// -sort {address,name,none,size} +// sort output in the given order (default name) +// size orders from largest to smallest +// -type +// print symbol type after name +package main diff --git a/platform/dbops/binaries/go/go/src/cmd/nm/nm.go b/platform/dbops/binaries/go/go/src/cmd/nm/nm.go new file mode 100644 index 0000000000000000000000000000000000000000..78fa60014b55395ea33d8e576864e258d71757a8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/nm/nm.go @@ -0,0 +1,167 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bufio" + "flag" + "fmt" + "log" + "os" + "sort" + + "cmd/internal/objfile" +) + +const helpText = `usage: go tool nm [options] file... + -n + an alias for -sort address (numeric), + for compatibility with other nm commands + -size + print symbol size in decimal between address and type + -sort {address,name,none,size} + sort output in the given order (default name) + size orders from largest to smallest + -type + print symbol type after name +` + +func usage() { + fmt.Fprint(os.Stderr, helpText) + os.Exit(2) +} + +var ( + sortOrder = flag.String("sort", "name", "") + printSize = flag.Bool("size", false, "") + printType = flag.Bool("type", false, "") + + filePrefix = false +) + +func init() { + flag.Var(nflag(0), "n", "") // alias for -sort address +} + +type nflag int + +func (nflag) IsBoolFlag() bool { + return true +} + +func (nflag) Set(value string) error { + if value == "true" { + *sortOrder = "address" + } + return nil +} + +func (nflag) String() string { + if *sortOrder == "address" { + return "true" + } + return "false" +} + +func main() { + log.SetFlags(0) + flag.Usage = usage + flag.Parse() + + switch *sortOrder { + case "address", "name", "none", "size": + // ok + default: + fmt.Fprintf(os.Stderr, "nm: unknown sort order %q\n", *sortOrder) + os.Exit(2) + } + + args := flag.Args() + filePrefix = len(args) > 1 + if len(args) == 0 { + flag.Usage() + } + + for _, file := range args { + nm(file) + } + + os.Exit(exitCode) +} + +var exitCode = 0 + +func errorf(format string, args ...any) { + log.Printf(format, args...) + exitCode = 1 +} + +func nm(file string) { + f, err := objfile.Open(file) + if err != nil { + errorf("%v", err) + return + } + defer f.Close() + + w := bufio.NewWriter(os.Stdout) + + entries := f.Entries() + + var found bool + + for _, e := range entries { + syms, err := e.Symbols() + if err != nil { + errorf("reading %s: %v", file, err) + } + if len(syms) == 0 { + continue + } + + found = true + + switch *sortOrder { + case "address": + sort.Slice(syms, func(i, j int) bool { return syms[i].Addr < syms[j].Addr }) + case "name": + sort.Slice(syms, func(i, j int) bool { return syms[i].Name < syms[j].Name }) + case "size": + sort.Slice(syms, func(i, j int) bool { return syms[i].Size > syms[j].Size }) + } + + for _, sym := range syms { + if len(entries) > 1 { + name := e.Name() + if name == "" { + fmt.Fprintf(w, "%s(%s):\t", file, "_go_.o") + } else { + fmt.Fprintf(w, "%s(%s):\t", file, name) + } + } else if filePrefix { + fmt.Fprintf(w, "%s:\t", file) + } + if sym.Code == 'U' { + fmt.Fprintf(w, "%8s", "") + } else { + fmt.Fprintf(w, "%8x", sym.Addr) + } + if *printSize { + fmt.Fprintf(w, " %10d", sym.Size) + } + fmt.Fprintf(w, " %c %s", sym.Code, sym.Name) + if *printType && sym.Type != "" { + fmt.Fprintf(w, " %s", sym.Type) + } + fmt.Fprintf(w, "\n") + } + } + + if !found { + errorf("reading %s: no symbols", file) + } + + w.Flush() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/nm/nm_cgo_test.go b/platform/dbops/binaries/go/go/src/cmd/nm/nm_cgo_test.go new file mode 100644 index 0000000000000000000000000000000000000000..face58c311f7b50a937c7a2d49a959d8acff5e78 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/nm/nm_cgo_test.go @@ -0,0 +1,26 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "internal/testenv" + "testing" +) + +func TestInternalLinkerCgoExec(t *testing.T) { + testenv.MustHaveCGO(t) + testenv.MustInternalLink(t, true) + testGoExec(t, true, false) +} + +func TestExternalLinkerCgoExec(t *testing.T) { + testenv.MustHaveCGO(t) + testGoExec(t, true, true) +} + +func TestCgoLib(t *testing.T) { + testenv.MustHaveCGO(t) + testGoLib(t, true) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/nm/nm_test.go b/platform/dbops/binaries/go/go/src/cmd/nm/nm_test.go new file mode 100644 index 0000000000000000000000000000000000000000..530a720f2b9eb70fee4ca1b980d3bb6bf4366adc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/nm/nm_test.go @@ -0,0 +1,368 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "internal/obscuretestdata" + "internal/platform" + "internal/testenv" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "testing" + "text/template" +) + +// TestMain executes the test binary as the nm command if +// GO_NMTEST_IS_NM is set, and runs the tests otherwise. +func TestMain(m *testing.M) { + if os.Getenv("GO_NMTEST_IS_NM") != "" { + main() + os.Exit(0) + } + + os.Setenv("GO_NMTEST_IS_NM", "1") // Set for subprocesses to inherit. + os.Exit(m.Run()) +} + +// nmPath returns the path to the "nm" binary to run. +func nmPath(t testing.TB) string { + t.Helper() + testenv.MustHaveExec(t) + + nmPathOnce.Do(func() { + nmExePath, nmPathErr = os.Executable() + }) + if nmPathErr != nil { + t.Fatal(nmPathErr) + } + return nmExePath +} + +var ( + nmPathOnce sync.Once + nmExePath string + nmPathErr error +) + +func TestNonGoExecs(t *testing.T) { + t.Parallel() + testfiles := []string{ + "debug/elf/testdata/gcc-386-freebsd-exec", + "debug/elf/testdata/gcc-amd64-linux-exec", + "debug/macho/testdata/gcc-386-darwin-exec.base64", // golang.org/issue/34986 + "debug/macho/testdata/gcc-amd64-darwin-exec.base64", // golang.org/issue/34986 + // "debug/pe/testdata/gcc-amd64-mingw-exec", // no symbols! + "debug/pe/testdata/gcc-386-mingw-exec", + "debug/plan9obj/testdata/amd64-plan9-exec", + "debug/plan9obj/testdata/386-plan9-exec", + "internal/xcoff/testdata/gcc-ppc64-aix-dwarf2-exec", + } + for _, f := range testfiles { + exepath := filepath.Join(testenv.GOROOT(t), "src", f) + if strings.HasSuffix(f, ".base64") { + tf, err := obscuretestdata.DecodeToTempFile(exepath) + if err != nil { + t.Errorf("obscuretestdata.DecodeToTempFile(%s): %v", exepath, err) + continue + } + defer os.Remove(tf) + exepath = tf + } + + cmd := testenv.Command(t, nmPath(t), exepath) + out, err := cmd.CombinedOutput() + if err != nil { + t.Errorf("go tool nm %v: %v\n%s", exepath, err, string(out)) + } + } +} + +func testGoExec(t *testing.T, iscgo, isexternallinker bool) { + t.Parallel() + tmpdir, err := os.MkdirTemp("", "TestGoExec") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + src := filepath.Join(tmpdir, "a.go") + file, err := os.Create(src) + if err != nil { + t.Fatal(err) + } + err = template.Must(template.New("main").Parse(testexec)).Execute(file, iscgo) + if e := file.Close(); err == nil { + err = e + } + if err != nil { + t.Fatal(err) + } + + exe := filepath.Join(tmpdir, "a.exe") + args := []string{"build", "-o", exe} + if iscgo { + linkmode := "internal" + if isexternallinker { + linkmode = "external" + } + args = append(args, "-ldflags", "-linkmode="+linkmode) + } + args = append(args, src) + out, err := testenv.Command(t, testenv.GoToolPath(t), args...).CombinedOutput() + if err != nil { + t.Fatalf("building test executable failed: %s %s", err, out) + } + + out, err = testenv.Command(t, exe).CombinedOutput() + if err != nil { + t.Fatalf("running test executable failed: %s %s", err, out) + } + names := make(map[string]string) + for _, line := range strings.Split(string(out), "\n") { + if line == "" { + continue + } + f := strings.Split(line, "=") + if len(f) != 2 { + t.Fatalf("unexpected output line: %q", line) + } + names["main."+f[0]] = f[1] + } + + runtimeSyms := map[string]string{ + "runtime.text": "T", + "runtime.etext": "T", + "runtime.rodata": "R", + "runtime.erodata": "R", + "runtime.epclntab": "R", + "runtime.noptrdata": "D", + } + + if runtime.GOOS == "aix" && iscgo { + // pclntab is moved to .data section on AIX. + runtimeSyms["runtime.epclntab"] = "D" + } + + out, err = testenv.Command(t, nmPath(t), exe).CombinedOutput() + if err != nil { + t.Fatalf("go tool nm: %v\n%s", err, string(out)) + } + + relocated := func(code string) bool { + if runtime.GOOS == "aix" { + // On AIX, .data and .bss addresses are changed by the loader. + // Therefore, the values returned by the exec aren't the same + // than the ones inside the symbol table. + // In case of cgo, .text symbols are also changed. + switch code { + case "T", "t", "R", "r": + return iscgo + case "D", "d", "B", "b": + return true + } + } + if platform.DefaultPIE(runtime.GOOS, runtime.GOARCH, false) { + // Code is always relocated if the default buildmode is PIE. + return true + } + return false + } + + dups := make(map[string]bool) + for _, line := range strings.Split(string(out), "\n") { + f := strings.Fields(line) + if len(f) < 3 { + continue + } + name := f[2] + if addr, found := names[name]; found { + if want, have := addr, "0x"+f[0]; have != want { + if !relocated(f[1]) { + t.Errorf("want %s address for %s symbol, but have %s", want, name, have) + } + } + delete(names, name) + } + if _, found := dups[name]; found { + t.Errorf("duplicate name of %q is found", name) + } + if stype, found := runtimeSyms[name]; found { + if runtime.GOOS == "plan9" && stype == "R" { + // no read-only data segment symbol on Plan 9 + stype = "D" + } + if want, have := stype, strings.ToUpper(f[1]); have != want { + if runtime.GOOS == "android" && name == "runtime.epclntab" && have == "D" { + // TODO(#58807): Figure out why this fails and fix up the test. + t.Logf("(ignoring on %s) want %s type for %s symbol, but have %s", runtime.GOOS, want, name, have) + } else { + t.Errorf("want %s type for %s symbol, but have %s", want, name, have) + } + } + delete(runtimeSyms, name) + } + } + if len(names) > 0 { + t.Errorf("executable is missing %v symbols", names) + } + if len(runtimeSyms) > 0 { + t.Errorf("executable is missing %v symbols", runtimeSyms) + } +} + +func TestGoExec(t *testing.T) { + testGoExec(t, false, false) +} + +func testGoLib(t *testing.T, iscgo bool) { + t.Parallel() + tmpdir, err := os.MkdirTemp("", "TestGoLib") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + gopath := filepath.Join(tmpdir, "gopath") + libpath := filepath.Join(gopath, "src", "mylib") + + err = os.MkdirAll(libpath, 0777) + if err != nil { + t.Fatal(err) + } + src := filepath.Join(libpath, "a.go") + file, err := os.Create(src) + if err != nil { + t.Fatal(err) + } + err = template.Must(template.New("mylib").Parse(testlib)).Execute(file, iscgo) + if e := file.Close(); err == nil { + err = e + } + if err == nil { + err = os.WriteFile(filepath.Join(libpath, "go.mod"), []byte("module mylib\n"), 0666) + } + if err != nil { + t.Fatal(err) + } + + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-buildmode=archive", "-o", "mylib.a", ".") + cmd.Dir = libpath + cmd.Env = append(os.Environ(), "GOPATH="+gopath) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("building test lib failed: %s %s", err, out) + } + mylib := filepath.Join(libpath, "mylib.a") + + out, err = testenv.Command(t, nmPath(t), mylib).CombinedOutput() + if err != nil { + t.Fatalf("go tool nm: %v\n%s", err, string(out)) + } + type symType struct { + Type string + Name string + CSym bool + Found bool + } + var syms = []symType{ + {"B", "mylib.Testdata", false, false}, + {"T", "mylib.Testfunc", false, false}, + } + if iscgo { + syms = append(syms, symType{"B", "mylib.TestCgodata", false, false}) + syms = append(syms, symType{"T", "mylib.TestCgofunc", false, false}) + if runtime.GOOS == "darwin" || runtime.GOOS == "ios" || (runtime.GOOS == "windows" && runtime.GOARCH == "386") { + syms = append(syms, symType{"D", "_cgodata", true, false}) + syms = append(syms, symType{"T", "_cgofunc", true, false}) + } else if runtime.GOOS == "aix" { + syms = append(syms, symType{"D", "cgodata", true, false}) + syms = append(syms, symType{"T", ".cgofunc", true, false}) + } else { + syms = append(syms, symType{"D", "cgodata", true, false}) + syms = append(syms, symType{"T", "cgofunc", true, false}) + } + } + + for _, line := range strings.Split(string(out), "\n") { + f := strings.Fields(line) + var typ, name string + var csym bool + if iscgo { + if len(f) < 4 { + continue + } + csym = !strings.Contains(f[0], "_go_.o") + typ = f[2] + name = f[3] + } else { + if len(f) < 3 { + continue + } + typ = f[1] + name = f[2] + } + for i := range syms { + sym := &syms[i] + if sym.Type == typ && sym.Name == name && sym.CSym == csym { + if sym.Found { + t.Fatalf("duplicate symbol %s %s", sym.Type, sym.Name) + } + sym.Found = true + } + } + } + for _, sym := range syms { + if !sym.Found { + t.Errorf("cannot found symbol %s %s", sym.Type, sym.Name) + } + } +} + +func TestGoLib(t *testing.T) { + testGoLib(t, false) +} + +const testexec = ` +package main + +import "fmt" +{{if .}}import "C" +{{end}} + +func main() { + testfunc() +} + +var testdata uint32 + +func testfunc() { + fmt.Printf("main=%p\n", main) + fmt.Printf("testfunc=%p\n", testfunc) + fmt.Printf("testdata=%p\n", &testdata) +} +` + +const testlib = ` +package mylib + +{{if .}} +// int cgodata = 5; +// void cgofunc(void) {} +import "C" + +var TestCgodata = C.cgodata + +func TestCgofunc() { + C.cgofunc() +} +{{end}} + +var Testdata uint32 + +func Testfunc() {} +` diff --git a/platform/dbops/binaries/go/go/src/cmd/objdump/main.go b/platform/dbops/binaries/go/go/src/cmd/objdump/main.go new file mode 100644 index 0000000000000000000000000000000000000000..6605f8a60ce18e500f639075da72e0fc99918853 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/objdump/main.go @@ -0,0 +1,105 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Objdump disassembles executable files. +// +// Usage: +// +// go tool objdump [-s symregexp] binary +// +// Objdump prints a disassembly of all text symbols (code) in the binary. +// If the -s option is present, objdump only disassembles +// symbols with names matching the regular expression. +// +// Alternate usage: +// +// go tool objdump binary start end +// +// In this mode, objdump disassembles the binary starting at the start address and +// stopping at the end address. The start and end addresses are program +// counters written in hexadecimal with optional leading 0x prefix. +// In this mode, objdump prints a sequence of stanzas of the form: +// +// file:line +// address: assembly +// address: assembly +// ... +// +// Each stanza gives the disassembly for a contiguous range of addresses +// all mapped to the same original source file and line number. +// This mode is intended for use by pprof. +package main + +import ( + "flag" + "fmt" + "log" + "os" + "regexp" + "strconv" + "strings" + + "cmd/internal/objfile" +) + +var printCode = flag.Bool("S", false, "print Go code alongside assembly") +var symregexp = flag.String("s", "", "only dump symbols matching this regexp") +var gnuAsm = flag.Bool("gnu", false, "print GNU assembly next to Go assembly (where supported)") +var symRE *regexp.Regexp + +func usage() { + fmt.Fprintf(os.Stderr, "usage: go tool objdump [-S] [-gnu] [-s symregexp] binary [start end]\n\n") + flag.PrintDefaults() + os.Exit(2) +} + +func main() { + log.SetFlags(0) + log.SetPrefix("objdump: ") + + flag.Usage = usage + flag.Parse() + if flag.NArg() != 1 && flag.NArg() != 3 { + usage() + } + + if *symregexp != "" { + re, err := regexp.Compile(*symregexp) + if err != nil { + log.Fatalf("invalid -s regexp: %v", err) + } + symRE = re + } + + f, err := objfile.Open(flag.Arg(0)) + if err != nil { + log.Fatal(err) + } + defer f.Close() + + dis, err := f.Disasm() + if err != nil { + log.Fatalf("disassemble %s: %v", flag.Arg(0), err) + } + + switch flag.NArg() { + default: + usage() + case 1: + // disassembly of entire object + dis.Print(os.Stdout, symRE, 0, ^uint64(0), *printCode, *gnuAsm) + + case 3: + // disassembly of PC range + start, err := strconv.ParseUint(strings.TrimPrefix(flag.Arg(1), "0x"), 16, 64) + if err != nil { + log.Fatalf("invalid start PC: %v", err) + } + end, err := strconv.ParseUint(strings.TrimPrefix(flag.Arg(2), "0x"), 16, 64) + if err != nil { + log.Fatalf("invalid end PC: %v", err) + } + dis.Print(os.Stdout, symRE, start, end, *printCode, *gnuAsm) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/objdump/objdump_test.go b/platform/dbops/binaries/go/go/src/cmd/objdump/objdump_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6e781c924d297017152ee60db28c71d236e802b5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/objdump/objdump_test.go @@ -0,0 +1,392 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "cmd/internal/notsha256" + "flag" + "fmt" + "internal/platform" + "internal/testenv" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "testing" +) + +// TestMain executes the test binary as the objdump command if +// GO_OBJDUMPTEST_IS_OBJDUMP is set, and runs the test otherwise. +func TestMain(m *testing.M) { + if os.Getenv("GO_OBJDUMPTEST_IS_OBJDUMP") != "" { + main() + os.Exit(0) + } + + os.Setenv("GO_OBJDUMPTEST_IS_OBJDUMP", "1") + os.Exit(m.Run()) +} + +// objdumpPath returns the path to the "objdump" binary to run. +func objdumpPath(t testing.TB) string { + t.Helper() + testenv.MustHaveExec(t) + + objdumpPathOnce.Do(func() { + objdumpExePath, objdumpPathErr = os.Executable() + }) + if objdumpPathErr != nil { + t.Fatal(objdumpPathErr) + } + return objdumpExePath +} + +var ( + objdumpPathOnce sync.Once + objdumpExePath string + objdumpPathErr error +) + +var x86Need = []string{ // for both 386 and AMD64 + "JMP main.main(SB)", + "CALL main.Println(SB)", + "RET", +} + +var amd64GnuNeed = []string{ + "jmp", + "callq", + "cmpb", +} + +var i386GnuNeed = []string{ + "jmp", + "call", + "cmp", +} + +var armNeed = []string{ + "B main.main(SB)", + "BL main.Println(SB)", + "RET", +} + +var arm64Need = []string{ + "JMP main.main(SB)", + "CALL main.Println(SB)", + "RET", +} + +var armGnuNeed = []string{ // for both ARM and AMR64 + "ldr", + "bl", + "cmp", +} + +var ppcNeed = []string{ + "BR main.main(SB)", + "CALL main.Println(SB)", + "RET", +} + +var ppcPIENeed = []string{ + "BR", + "CALL", + "RET", +} + +var ppcGnuNeed = []string{ + "mflr", + "lbz", + "beq", +} + +func mustHaveDisasm(t *testing.T) { + switch runtime.GOARCH { + case "loong64": + t.Skipf("skipping on %s", runtime.GOARCH) + case "mips", "mipsle", "mips64", "mips64le": + t.Skipf("skipping on %s, issue 12559", runtime.GOARCH) + case "riscv64": + t.Skipf("skipping on %s, issue 36738", runtime.GOARCH) + case "s390x": + t.Skipf("skipping on %s, issue 15255", runtime.GOARCH) + } +} + +var target = flag.String("target", "", "test disassembly of `goos/goarch` binary") + +// objdump is fully cross platform: it can handle binaries +// from any known operating system and architecture. +// We could in principle add binaries to testdata and check +// all the supported systems during this test. However, the +// binaries would be about 1 MB each, and we don't want to +// add that much junk to the hg repository. Instead, build a +// binary for the current system (only) and test that objdump +// can handle that one. + +func testDisasm(t *testing.T, srcfname string, printCode bool, printGnuAsm bool, flags ...string) { + mustHaveDisasm(t) + goarch := runtime.GOARCH + if *target != "" { + f := strings.Split(*target, "/") + if len(f) != 2 { + t.Fatalf("-target argument must be goos/goarch") + } + defer os.Setenv("GOOS", os.Getenv("GOOS")) + defer os.Setenv("GOARCH", os.Getenv("GOARCH")) + os.Setenv("GOOS", f[0]) + os.Setenv("GOARCH", f[1]) + goarch = f[1] + } + + hash := notsha256.Sum256([]byte(fmt.Sprintf("%v-%v-%v-%v", srcfname, flags, printCode, printGnuAsm))) + tmp := t.TempDir() + hello := filepath.Join(tmp, fmt.Sprintf("hello-%x.exe", hash)) + args := []string{"build", "-o", hello} + args = append(args, flags...) + args = append(args, srcfname) + cmd := testenv.Command(t, testenv.GoToolPath(t), args...) + // "Bad line" bug #36683 is sensitive to being run in the source directory. + cmd.Dir = "testdata" + // Ensure that the source file location embedded in the binary matches our + // actual current GOROOT, instead of GOROOT_FINAL if set. + cmd.Env = append(os.Environ(), "GOROOT_FINAL=") + t.Logf("Running %v", cmd.Args) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("go build %s: %v\n%s", srcfname, err, out) + } + need := []string{ + "TEXT main.main(SB)", + } + + if printCode { + need = append(need, ` Println("hello, world")`) + } else { + need = append(need, srcfname+":6") + } + + switch goarch { + case "amd64", "386": + need = append(need, x86Need...) + case "arm": + need = append(need, armNeed...) + case "arm64": + need = append(need, arm64Need...) + case "ppc64", "ppc64le": + var pie bool + for _, flag := range flags { + if flag == "-buildmode=pie" { + pie = true + break + } + } + if pie { + // In PPC64 PIE binaries we use a "local entry point" which is + // function symbol address + 8. Currently we don't symbolize that. + // Expect a different output. + need = append(need, ppcPIENeed...) + } else { + need = append(need, ppcNeed...) + } + } + + if printGnuAsm { + switch goarch { + case "amd64": + need = append(need, amd64GnuNeed...) + case "386": + need = append(need, i386GnuNeed...) + case "arm", "arm64": + need = append(need, armGnuNeed...) + case "ppc64", "ppc64le": + need = append(need, ppcGnuNeed...) + } + } + args = []string{ + "-s", "main.main", + hello, + } + + if printCode { + args = append([]string{"-S"}, args...) + } + + if printGnuAsm { + args = append([]string{"-gnu"}, args...) + } + cmd = testenv.Command(t, objdumpPath(t), args...) + cmd.Dir = "testdata" // "Bad line" bug #36683 is sensitive to being run in the source directory + out, err = cmd.CombinedOutput() + t.Logf("Running %v", cmd.Args) + + if err != nil { + exename := srcfname[:len(srcfname)-len(filepath.Ext(srcfname))] + ".exe" + t.Fatalf("objdump %q: %v\n%s", exename, err, out) + } + + text := string(out) + ok := true + for _, s := range need { + if !strings.Contains(text, s) { + t.Errorf("disassembly missing '%s'", s) + ok = false + } + } + if goarch == "386" { + if strings.Contains(text, "(IP)") { + t.Errorf("disassembly contains PC-Relative addressing on 386") + ok = false + } + } + + if !ok || testing.Verbose() { + t.Logf("full disassembly:\n%s", text) + } +} + +func testGoAndCgoDisasm(t *testing.T, printCode bool, printGnuAsm bool) { + t.Parallel() + testDisasm(t, "fmthello.go", printCode, printGnuAsm) + if testenv.HasCGO() { + testDisasm(t, "fmthellocgo.go", printCode, printGnuAsm) + } +} + +func TestDisasm(t *testing.T) { + testGoAndCgoDisasm(t, false, false) +} + +func TestDisasmCode(t *testing.T) { + testGoAndCgoDisasm(t, true, false) +} + +func TestDisasmGnuAsm(t *testing.T) { + testGoAndCgoDisasm(t, false, true) +} + +func TestDisasmExtld(t *testing.T) { + testenv.MustHaveCGO(t) + switch runtime.GOOS { + case "plan9": + t.Skipf("skipping on %s", runtime.GOOS) + } + t.Parallel() + testDisasm(t, "fmthello.go", false, false, "-ldflags=-linkmode=external") +} + +func TestDisasmPIE(t *testing.T) { + if !platform.BuildModeSupported("gc", "pie", runtime.GOOS, runtime.GOARCH) { + t.Skipf("skipping on %s/%s, PIE buildmode not supported", runtime.GOOS, runtime.GOARCH) + } + if !platform.InternalLinkPIESupported(runtime.GOOS, runtime.GOARCH) { + // require cgo on platforms that PIE needs external linking + testenv.MustHaveCGO(t) + } + t.Parallel() + testDisasm(t, "fmthello.go", false, false, "-buildmode=pie") +} + +func TestDisasmGoobj(t *testing.T) { + mustHaveDisasm(t) + testenv.MustHaveGoBuild(t) + + tmp := t.TempDir() + + importcfgfile := filepath.Join(tmp, "hello.importcfg") + testenv.WriteImportcfg(t, importcfgfile, nil, "testdata/fmthello.go") + + hello := filepath.Join(tmp, "hello.o") + args := []string{"tool", "compile", "-p=main", "-importcfg=" + importcfgfile, "-o", hello} + args = append(args, "testdata/fmthello.go") + out, err := testenv.Command(t, testenv.GoToolPath(t), args...).CombinedOutput() + if err != nil { + t.Fatalf("go tool compile fmthello.go: %v\n%s", err, out) + } + need := []string{ + "main(SB)", + "fmthello.go:6", + } + + args = []string{ + "-s", "main", + hello, + } + + out, err = testenv.Command(t, objdumpPath(t), args...).CombinedOutput() + if err != nil { + t.Fatalf("objdump fmthello.o: %v\n%s", err, out) + } + + text := string(out) + ok := true + for _, s := range need { + if !strings.Contains(text, s) { + t.Errorf("disassembly missing '%s'", s) + ok = false + } + } + if runtime.GOARCH == "386" { + if strings.Contains(text, "(IP)") { + t.Errorf("disassembly contains PC-Relative addressing on 386") + ok = false + } + } + if !ok { + t.Logf("full disassembly:\n%s", text) + } +} + +func TestGoobjFileNumber(t *testing.T) { + // Test that file table in Go object file is parsed correctly. + testenv.MustHaveGoBuild(t) + mustHaveDisasm(t) + + t.Parallel() + + tmp := t.TempDir() + + obj := filepath.Join(tmp, "p.a") + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-o", obj) + cmd.Dir = filepath.Join("testdata/testfilenum") + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("build failed: %v\n%s", err, out) + } + + cmd = testenv.Command(t, objdumpPath(t), obj) + out, err = cmd.CombinedOutput() + if err != nil { + t.Fatalf("objdump failed: %v\n%s", err, out) + } + + text := string(out) + for _, s := range []string{"a.go", "b.go", "c.go"} { + if !strings.Contains(text, s) { + t.Errorf("output missing '%s'", s) + } + } + + if t.Failed() { + t.Logf("output:\n%s", text) + } +} + +func TestGoObjOtherVersion(t *testing.T) { + testenv.MustHaveExec(t) + t.Parallel() + + obj := filepath.Join("testdata", "go116.o") + cmd := testenv.Command(t, objdumpPath(t), obj) + out, err := cmd.CombinedOutput() + if err == nil { + t.Fatalf("objdump go116.o succeeded unexpectedly") + } + if !strings.Contains(string(out), "go object of a different version") { + t.Errorf("unexpected error message:\n%s", out) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/pack/doc.go b/platform/dbops/binaries/go/go/src/cmd/pack/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..22c361ee09ef537251ceb032b3fe2208ed9a067a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/pack/doc.go @@ -0,0 +1,40 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Pack is a simple version of the traditional Unix ar tool. +It implements only the operations needed by Go. + +Usage: + + go tool pack op file.a [name...] + +Pack applies the operation to the archive, using the names as arguments to the operation. + +The operation op is given by one of these letters: + + c append files (from the file system) to a new archive + p print files from the archive + r append files (from the file system) to the archive + t list files from the archive + x extract files from the archive + +The archive argument to the c command must be non-existent or a +valid archive file, which will be cleared before adding new entries. It +is an error if the file exists but is not an archive. + +For the p, t, and x commands, listing no names on the command line +causes the operation to apply to all files in the archive. + +In contrast to Unix ar, the r operation always appends to the archive, +even if a file with the given name already exists in the archive. In this way +pack's r operation is more like Unix ar's rq operation. + +Adding the letter v to an operation, as in pv or rv, enables verbose operation: +For the c and r commands, names are printed as files are added. +For the p command, each file is prefixed by the name on a line by itself. +For the t command, the listing includes additional file metadata. +For the x command, names are printed as files are extracted. +*/ +package main diff --git a/platform/dbops/binaries/go/go/src/cmd/pack/pack.go b/platform/dbops/binaries/go/go/src/cmd/pack/pack.go new file mode 100644 index 0000000000000000000000000000000000000000..412ea36d60fa905ffbe3987d301640b5d21f666a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/pack/pack.go @@ -0,0 +1,342 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "cmd/internal/archive" + "fmt" + "io" + "io/fs" + "log" + "os" + "path/filepath" +) + +const usageMessage = `Usage: pack op file.a [name....] +Where op is one of cprtx optionally followed by v for verbose output. +For compatibility with old Go build environments the op string grc is +accepted as a synonym for c. + +For more information, run + go doc cmd/pack` + +func usage() { + fmt.Fprintln(os.Stderr, usageMessage) + os.Exit(2) +} + +func main() { + log.SetFlags(0) + log.SetPrefix("pack: ") + // need "pack op archive" at least. + if len(os.Args) < 3 { + log.Print("not enough arguments") + fmt.Fprintln(os.Stderr) + usage() + } + setOp(os.Args[1]) + var ar *Archive + switch op { + case 'p': + ar = openArchive(os.Args[2], os.O_RDONLY, os.Args[3:]) + ar.scan(ar.printContents) + case 'r': + ar = openArchive(os.Args[2], os.O_RDWR|os.O_CREATE, os.Args[3:]) + ar.addFiles() + case 'c': + ar = openArchive(os.Args[2], os.O_RDWR|os.O_TRUNC|os.O_CREATE, os.Args[3:]) + ar.addPkgdef() + ar.addFiles() + case 't': + ar = openArchive(os.Args[2], os.O_RDONLY, os.Args[3:]) + ar.scan(ar.tableOfContents) + case 'x': + ar = openArchive(os.Args[2], os.O_RDONLY, os.Args[3:]) + ar.scan(ar.extractContents) + default: + log.Printf("invalid operation %q", os.Args[1]) + fmt.Fprintln(os.Stderr) + usage() + } + if len(ar.files) > 0 { + log.Fatalf("file %q not in archive", ar.files[0]) + } +} + +// The unusual ancestry means the arguments are not Go-standard. +// These variables hold the decoded operation specified by the first argument. +// op holds the operation we are doing (prtx). +// verbose tells whether the 'v' option was specified. +var ( + op rune + verbose bool +) + +// setOp parses the operation string (first argument). +func setOp(arg string) { + // Recognize 'go tool pack grc' because that was the + // formerly canonical way to build a new archive + // from a set of input files. Accepting it keeps old + // build systems working with both Go 1.2 and Go 1.3. + if arg == "grc" { + arg = "c" + } + + for _, r := range arg { + switch r { + case 'c', 'p', 'r', 't', 'x': + if op != 0 { + // At most one can be set. + usage() + } + op = r + case 'v': + if verbose { + // Can be set only once. + usage() + } + verbose = true + default: + usage() + } + } +} + +const ( + arHeader = "!\n" +) + +// An Archive represents an open archive file. It is always scanned sequentially +// from start to end, without backing up. +type Archive struct { + a *archive.Archive + files []string // Explicit list of files to be processed. + pad int // Padding bytes required at end of current archive file + matchAll bool // match all files in archive +} + +// archive opens (and if necessary creates) the named archive. +func openArchive(name string, mode int, files []string) *Archive { + f, err := os.OpenFile(name, mode, 0666) + if err != nil { + log.Fatal(err) + } + var a *archive.Archive + if mode&os.O_TRUNC != 0 { // the c command + a, err = archive.New(f) + } else { + a, err = archive.Parse(f, verbose) + if err != nil && mode&os.O_CREATE != 0 { // the r command + a, err = archive.New(f) + } + } + if err != nil { + log.Fatal(err) + } + return &Archive{ + a: a, + files: files, + matchAll: len(files) == 0, + } +} + +// scan scans the archive and executes the specified action on each entry. +func (ar *Archive) scan(action func(*archive.Entry)) { + for i := range ar.a.Entries { + e := &ar.a.Entries[i] + action(e) + } +} + +// listEntry prints to standard output a line describing the entry. +func listEntry(e *archive.Entry, verbose bool) { + if verbose { + fmt.Fprintf(stdout, "%s\n", e.String()) + } else { + fmt.Fprintf(stdout, "%s\n", e.Name) + } +} + +// output copies the entry to the specified writer. +func (ar *Archive) output(e *archive.Entry, w io.Writer) { + r := io.NewSectionReader(ar.a.File(), e.Offset, e.Size) + n, err := io.Copy(w, r) + if err != nil { + log.Fatal(err) + } + if n != e.Size { + log.Fatal("short file") + } +} + +// match reports whether the entry matches the argument list. +// If it does, it also drops the file from the to-be-processed list. +func (ar *Archive) match(e *archive.Entry) bool { + if ar.matchAll { + return true + } + for i, name := range ar.files { + if e.Name == name { + copy(ar.files[i:], ar.files[i+1:]) + ar.files = ar.files[:len(ar.files)-1] + return true + } + } + return false +} + +// addFiles adds files to the archive. The archive is known to be +// sane and we are positioned at the end. No attempt is made +// to check for existing files. +func (ar *Archive) addFiles() { + if len(ar.files) == 0 { + usage() + } + for _, file := range ar.files { + if verbose { + fmt.Printf("%s\n", file) + } + + f, err := os.Open(file) + if err != nil { + log.Fatal(err) + } + aro, err := archive.Parse(f, false) + if err != nil || !isGoCompilerObjFile(aro) { + f.Seek(0, io.SeekStart) + ar.addFile(f) + goto close + } + + for _, e := range aro.Entries { + if e.Type != archive.EntryGoObj || e.Name != "_go_.o" { + continue + } + ar.a.AddEntry(archive.EntryGoObj, filepath.Base(file), 0, 0, 0, 0644, e.Size, io.NewSectionReader(f, e.Offset, e.Size)) + } + close: + f.Close() + } + ar.files = nil +} + +// FileLike abstracts the few methods we need, so we can test without needing real files. +type FileLike interface { + Name() string + Stat() (fs.FileInfo, error) + Read([]byte) (int, error) + Close() error +} + +// addFile adds a single file to the archive +func (ar *Archive) addFile(fd FileLike) { + // Format the entry. + // First, get its info. + info, err := fd.Stat() + if err != nil { + log.Fatal(err) + } + // mtime, uid, gid are all zero so repeated builds produce identical output. + mtime := int64(0) + uid := 0 + gid := 0 + ar.a.AddEntry(archive.EntryNativeObj, info.Name(), mtime, uid, gid, info.Mode(), info.Size(), fd) +} + +// addPkgdef adds the __.PKGDEF file to the archive, copied +// from the first Go object file on the file list, if any. +// The archive is known to be empty. +func (ar *Archive) addPkgdef() { + done := false + for _, file := range ar.files { + f, err := os.Open(file) + if err != nil { + log.Fatal(err) + } + aro, err := archive.Parse(f, false) + if err != nil || !isGoCompilerObjFile(aro) { + goto close + } + + for _, e := range aro.Entries { + if e.Type != archive.EntryPkgDef { + continue + } + if verbose { + fmt.Printf("__.PKGDEF # %s\n", file) + } + ar.a.AddEntry(archive.EntryPkgDef, "__.PKGDEF", 0, 0, 0, 0644, e.Size, io.NewSectionReader(f, e.Offset, e.Size)) + done = true + } + close: + f.Close() + if done { + break + } + } +} + +// Finally, the actual commands. Each is an action. + +// can be modified for testing. +var stdout io.Writer = os.Stdout + +// printContents implements the 'p' command. +func (ar *Archive) printContents(e *archive.Entry) { + ar.extractContents1(e, stdout) +} + +// tableOfContents implements the 't' command. +func (ar *Archive) tableOfContents(e *archive.Entry) { + if ar.match(e) { + listEntry(e, verbose) + } +} + +// extractContents implements the 'x' command. +func (ar *Archive) extractContents(e *archive.Entry) { + ar.extractContents1(e, nil) +} + +func (ar *Archive) extractContents1(e *archive.Entry, out io.Writer) { + if ar.match(e) { + if verbose { + listEntry(e, false) + } + if out == nil { + f, err := os.OpenFile(e.Name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0444 /*e.Mode*/) + if err != nil { + log.Fatal(err) + } + defer f.Close() + out = f + } + ar.output(e, out) + } +} + +// isGoCompilerObjFile reports whether file is an object file created +// by the Go compiler, which is an archive file with exactly one entry +// of __.PKGDEF, or _go_.o, or both entries. +func isGoCompilerObjFile(a *archive.Archive) bool { + switch len(a.Entries) { + case 1: + return (a.Entries[0].Type == archive.EntryGoObj && a.Entries[0].Name == "_go_.o") || + (a.Entries[0].Type == archive.EntryPkgDef && a.Entries[0].Name == "__.PKGDEF") + case 2: + var foundPkgDef, foundGo bool + for _, e := range a.Entries { + if e.Type == archive.EntryPkgDef && e.Name == "__.PKGDEF" { + foundPkgDef = true + } + if e.Type == archive.EntryGoObj && e.Name == "_go_.o" { + foundGo = true + } + } + return foundPkgDef && foundGo + default: + return false + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/pack/pack_test.go b/platform/dbops/binaries/go/go/src/cmd/pack/pack_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c3a63424dd9d26f2eaaf07c8f09c90684b6d8fc9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/pack/pack_test.go @@ -0,0 +1,515 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bufio" + "cmd/internal/archive" + "fmt" + "internal/testenv" + "io" + "io/fs" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "testing" + "time" +) + +// TestMain executes the test binary as the pack command if +// GO_PACKTEST_IS_PACK is set, and runs the tests otherwise. +func TestMain(m *testing.M) { + if os.Getenv("GO_PACKTEST_IS_PACK") != "" { + main() + os.Exit(0) + } + + os.Setenv("GO_PACKTEST_IS_PACK", "1") // Set for subprocesses to inherit. + os.Exit(m.Run()) +} + +// packPath returns the path to the "pack" binary to run. +func packPath(t testing.TB) string { + t.Helper() + testenv.MustHaveExec(t) + + packPathOnce.Do(func() { + packExePath, packPathErr = os.Executable() + }) + if packPathErr != nil { + t.Fatal(packPathErr) + } + return packExePath +} + +var ( + packPathOnce sync.Once + packExePath string + packPathErr error +) + +// testCreate creates an archive in the specified directory. +func testCreate(t *testing.T, dir string) { + name := filepath.Join(dir, "pack.a") + ar := openArchive(name, os.O_RDWR|os.O_CREATE, nil) + // Add an entry by hand. + ar.addFile(helloFile.Reset()) + ar.a.File().Close() + // Now check it. + ar = openArchive(name, os.O_RDONLY, []string{helloFile.name}) + var buf strings.Builder + stdout = &buf + verbose = true + defer func() { + stdout = os.Stdout + verbose = false + }() + ar.scan(ar.printContents) + ar.a.File().Close() + result := buf.String() + // Expect verbose output plus file contents. + expect := fmt.Sprintf("%s\n%s", helloFile.name, helloFile.contents) + if result != expect { + t.Fatalf("expected %q got %q", expect, result) + } +} + +// Test that we can create an archive, write to it, and get the same contents back. +// Tests the rv and then the pv command on a new archive. +func TestCreate(t *testing.T) { + dir := t.TempDir() + testCreate(t, dir) +} + +// Test that we can create an archive twice with the same name (Issue 8369). +func TestCreateTwice(t *testing.T) { + dir := t.TempDir() + testCreate(t, dir) + testCreate(t, dir) +} + +// Test that we can create an archive, put some files in it, and get back a correct listing. +// Tests the tv command. +func TestTableOfContents(t *testing.T) { + dir := t.TempDir() + name := filepath.Join(dir, "pack.a") + ar := openArchive(name, os.O_RDWR|os.O_CREATE, nil) + + // Add some entries by hand. + ar.addFile(helloFile.Reset()) + ar.addFile(goodbyeFile.Reset()) + ar.a.File().Close() + + // Now print it. + var buf strings.Builder + stdout = &buf + verbose = true + defer func() { + stdout = os.Stdout + verbose = false + }() + ar = openArchive(name, os.O_RDONLY, nil) + ar.scan(ar.tableOfContents) + ar.a.File().Close() + result := buf.String() + // Expect verbose listing. + expect := fmt.Sprintf("%s\n%s\n", helloFile.Entry(), goodbyeFile.Entry()) + if result != expect { + t.Fatalf("expected %q got %q", expect, result) + } + + // Do it again without verbose. + verbose = false + buf.Reset() + ar = openArchive(name, os.O_RDONLY, nil) + ar.scan(ar.tableOfContents) + ar.a.File().Close() + result = buf.String() + // Expect non-verbose listing. + expect = fmt.Sprintf("%s\n%s\n", helloFile.name, goodbyeFile.name) + if result != expect { + t.Fatalf("expected %q got %q", expect, result) + } + + // Do it again with file list arguments. + verbose = false + buf.Reset() + ar = openArchive(name, os.O_RDONLY, []string{helloFile.name}) + ar.scan(ar.tableOfContents) + ar.a.File().Close() + result = buf.String() + // Expect only helloFile. + expect = fmt.Sprintf("%s\n", helloFile.name) + if result != expect { + t.Fatalf("expected %q got %q", expect, result) + } +} + +// Test that we can create an archive, put some files in it, and get back a file. +// Tests the x command. +func TestExtract(t *testing.T) { + dir := t.TempDir() + name := filepath.Join(dir, "pack.a") + ar := openArchive(name, os.O_RDWR|os.O_CREATE, nil) + // Add some entries by hand. + ar.addFile(helloFile.Reset()) + ar.addFile(goodbyeFile.Reset()) + ar.a.File().Close() + // Now extract one file. We chdir to the directory of the archive for simplicity. + pwd, err := os.Getwd() + if err != nil { + t.Fatal("os.Getwd: ", err) + } + err = os.Chdir(dir) + if err != nil { + t.Fatal("os.Chdir: ", err) + } + defer func() { + err := os.Chdir(pwd) + if err != nil { + t.Fatal("os.Chdir: ", err) + } + }() + ar = openArchive(name, os.O_RDONLY, []string{goodbyeFile.name}) + ar.scan(ar.extractContents) + ar.a.File().Close() + data, err := os.ReadFile(goodbyeFile.name) + if err != nil { + t.Fatal(err) + } + // Expect contents of file. + result := string(data) + expect := goodbyeFile.contents + if result != expect { + t.Fatalf("expected %q got %q", expect, result) + } +} + +// Test that pack-created archives can be understood by the tools. +func TestHello(t *testing.T) { + testenv.MustHaveGoBuild(t) + testenv.MustInternalLink(t, false) + + dir := t.TempDir() + hello := filepath.Join(dir, "hello.go") + prog := ` + package main + func main() { + println("hello world") + } + ` + err := os.WriteFile(hello, []byte(prog), 0666) + if err != nil { + t.Fatal(err) + } + + run := func(args ...string) string { + return doRun(t, dir, args...) + } + + importcfgfile := filepath.Join(dir, "hello.importcfg") + testenv.WriteImportcfg(t, importcfgfile, nil, hello) + + goBin := testenv.GoToolPath(t) + run(goBin, "tool", "compile", "-importcfg="+importcfgfile, "-p=main", "hello.go") + run(packPath(t), "grc", "hello.a", "hello.o") + run(goBin, "tool", "link", "-importcfg="+importcfgfile, "-o", "a.out", "hello.a") + out := run("./a.out") + if out != "hello world\n" { + t.Fatalf("incorrect output: %q, want %q", out, "hello world\n") + } +} + +// Test that pack works with very long lines in PKGDEF. +func TestLargeDefs(t *testing.T) { + if testing.Short() { + t.Skip("skipping in -short mode") + } + testenv.MustHaveGoBuild(t) + + dir := t.TempDir() + large := filepath.Join(dir, "large.go") + f, err := os.Create(large) + if err != nil { + t.Fatal(err) + } + b := bufio.NewWriter(f) + + printf := func(format string, args ...any) { + _, err := fmt.Fprintf(b, format, args...) + if err != nil { + t.Fatalf("Writing to %s: %v", large, err) + } + } + + printf("package large\n\ntype T struct {\n") + for i := 0; i < 1000; i++ { + printf("f%d int `tag:\"", i) + for j := 0; j < 100; j++ { + printf("t%d=%d,", j, j) + } + printf("\"`\n") + } + printf("}\n") + if err = b.Flush(); err != nil { + t.Fatal(err) + } + if err = f.Close(); err != nil { + t.Fatal(err) + } + + main := filepath.Join(dir, "main.go") + prog := ` + package main + import "large" + var V large.T + func main() { + println("ok") + } + ` + err = os.WriteFile(main, []byte(prog), 0666) + if err != nil { + t.Fatal(err) + } + + run := func(args ...string) string { + return doRun(t, dir, args...) + } + + importcfgfile := filepath.Join(dir, "hello.importcfg") + testenv.WriteImportcfg(t, importcfgfile, nil) + + goBin := testenv.GoToolPath(t) + run(goBin, "tool", "compile", "-importcfg="+importcfgfile, "-p=large", "large.go") + run(packPath(t), "grc", "large.a", "large.o") + testenv.WriteImportcfg(t, importcfgfile, map[string]string{"large": filepath.Join(dir, "large.o")}, "runtime") + run(goBin, "tool", "compile", "-importcfg="+importcfgfile, "-p=main", "main.go") + run(goBin, "tool", "link", "-importcfg="+importcfgfile, "-L", ".", "-o", "a.out", "main.o") + out := run("./a.out") + if out != "ok\n" { + t.Fatalf("incorrect output: %q, want %q", out, "ok\n") + } +} + +// Test that "\n!\n" inside export data doesn't result in a truncated +// package definition when creating a .a archive from a .o Go object. +func TestIssue21703(t *testing.T) { + testenv.MustHaveGoBuild(t) + + dir := t.TempDir() + + const aSrc = `package a; const X = "\n!\n"` + err := os.WriteFile(filepath.Join(dir, "a.go"), []byte(aSrc), 0666) + if err != nil { + t.Fatal(err) + } + + const bSrc = `package b; import _ "a"` + err = os.WriteFile(filepath.Join(dir, "b.go"), []byte(bSrc), 0666) + if err != nil { + t.Fatal(err) + } + + run := func(args ...string) string { + return doRun(t, dir, args...) + } + + goBin := testenv.GoToolPath(t) + run(goBin, "tool", "compile", "-p=a", "a.go") + run(packPath(t), "c", "a.a", "a.o") + run(goBin, "tool", "compile", "-p=b", "-I", ".", "b.go") +} + +// Test the "c" command can "see through" the archive generated by the compiler. +// This is peculiar. (See issue #43271) +func TestCreateWithCompilerObj(t *testing.T) { + testenv.MustHaveGoBuild(t) + + dir := t.TempDir() + src := filepath.Join(dir, "p.go") + prog := "package p; var X = 42\n" + err := os.WriteFile(src, []byte(prog), 0666) + if err != nil { + t.Fatal(err) + } + + run := func(args ...string) string { + return doRun(t, dir, args...) + } + + goBin := testenv.GoToolPath(t) + run(goBin, "tool", "compile", "-pack", "-p=p", "-o", "p.a", "p.go") + run(packPath(t), "c", "packed.a", "p.a") + fi, err := os.Stat(filepath.Join(dir, "p.a")) + if err != nil { + t.Fatalf("stat p.a failed: %v", err) + } + fi2, err := os.Stat(filepath.Join(dir, "packed.a")) + if err != nil { + t.Fatalf("stat packed.a failed: %v", err) + } + // For compiler-generated object file, the "c" command is + // expected to get (essentially) the same file back, instead + // of packing it into a new archive with a single entry. + if want, got := fi.Size(), fi2.Size(); want != got { + t.Errorf("packed file with different size: want %d, got %d", want, got) + } + + // Test -linkobj flag as well. + run(goBin, "tool", "compile", "-p=p", "-linkobj", "p2.a", "-o", "p.x", "p.go") + run(packPath(t), "c", "packed2.a", "p2.a") + fi, err = os.Stat(filepath.Join(dir, "p2.a")) + if err != nil { + t.Fatalf("stat p2.a failed: %v", err) + } + fi2, err = os.Stat(filepath.Join(dir, "packed2.a")) + if err != nil { + t.Fatalf("stat packed2.a failed: %v", err) + } + if want, got := fi.Size(), fi2.Size(); want != got { + t.Errorf("packed file with different size: want %d, got %d", want, got) + } + + run(packPath(t), "c", "packed3.a", "p.x") + fi, err = os.Stat(filepath.Join(dir, "p.x")) + if err != nil { + t.Fatalf("stat p.x failed: %v", err) + } + fi2, err = os.Stat(filepath.Join(dir, "packed3.a")) + if err != nil { + t.Fatalf("stat packed3.a failed: %v", err) + } + if want, got := fi.Size(), fi2.Size(); want != got { + t.Errorf("packed file with different size: want %d, got %d", want, got) + } +} + +// Test the "r" command creates the output file if it does not exist. +func TestRWithNonexistentFile(t *testing.T) { + testenv.MustHaveGoBuild(t) + + dir := t.TempDir() + src := filepath.Join(dir, "p.go") + prog := "package p; var X = 42\n" + err := os.WriteFile(src, []byte(prog), 0666) + if err != nil { + t.Fatal(err) + } + + run := func(args ...string) string { + return doRun(t, dir, args...) + } + + goBin := testenv.GoToolPath(t) + run(goBin, "tool", "compile", "-p=p", "-o", "p.o", "p.go") + run(packPath(t), "r", "p.a", "p.o") // should succeed +} + +// doRun runs a program in a directory and returns the output. +func doRun(t *testing.T, dir string, args ...string) string { + cmd := testenv.Command(t, args[0], args[1:]...) + cmd.Dir = dir + out, err := cmd.CombinedOutput() + if err != nil { + if t.Name() == "TestHello" && runtime.GOOS == "android" && runtime.GOARCH == "arm64" { + testenv.SkipFlaky(t, 58806) + } + t.Fatalf("%v: %v\n%s", args, err, string(out)) + } + return string(out) +} + +// Fake implementation of files. + +var helloFile = &FakeFile{ + name: "hello", + contents: "hello world", // 11 bytes, an odd number. + mode: 0644, +} + +var goodbyeFile = &FakeFile{ + name: "goodbye", + contents: "Sayonara, Jim", // 13 bytes, another odd number. + mode: 0644, +} + +// FakeFile implements FileLike and also fs.FileInfo. +type FakeFile struct { + name string + contents string + mode fs.FileMode + offset int +} + +// Reset prepares a FakeFile for reuse. +func (f *FakeFile) Reset() *FakeFile { + f.offset = 0 + return f +} + +// FileLike methods. + +func (f *FakeFile) Name() string { + // A bit of a cheat: we only have a basename, so that's also ok for FileInfo. + return f.name +} + +func (f *FakeFile) Stat() (fs.FileInfo, error) { + return f, nil +} + +func (f *FakeFile) Read(p []byte) (int, error) { + if f.offset >= len(f.contents) { + return 0, io.EOF + } + n := copy(p, f.contents[f.offset:]) + f.offset += n + return n, nil +} + +func (f *FakeFile) Close() error { + return nil +} + +// fs.FileInfo methods. + +func (f *FakeFile) Size() int64 { + return int64(len(f.contents)) +} + +func (f *FakeFile) Mode() fs.FileMode { + return f.mode +} + +func (f *FakeFile) ModTime() time.Time { + return time.Time{} +} + +func (f *FakeFile) IsDir() bool { + return false +} + +func (f *FakeFile) Sys() any { + return nil +} + +func (f *FakeFile) String() string { + return fs.FormatFileInfo(f) +} + +// Special helpers. + +func (f *FakeFile) Entry() *archive.Entry { + return &archive.Entry{ + Name: f.name, + Mtime: 0, // Defined to be zero. + Uid: 0, // Ditto. + Gid: 0, // Ditto. + Mode: f.mode, + Data: archive.Data{Size: int64(len(f.contents))}, + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/pprof/README b/platform/dbops/binaries/go/go/src/cmd/pprof/README new file mode 100644 index 0000000000000000000000000000000000000000..612dc644f295baf7b7f26711595e1757bd812049 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/pprof/README @@ -0,0 +1,18 @@ +This directory is the copy of Google's pprof shipped as part of the Go distribution. +The bulk of the code is vendored from github.com/google/pprof and is in +../vendor/github.com/google/pprof. + +Two important notes: + +1. Using github.com/google/pprof directly (for example, after installing +with "go get") should work with Go programs, but we cannot guarantee that. +What we test is that the "go tool pprof" shipped with each Go release works +with programs from that release. + +2. Pprof is used inside Google for C++, Java, and Go programs. +Because it was developed for that broader context, it is overgeneralized +when used here for the specific use case of profiling standard Go programs. +However, we've left the abstractions intact in order to share updates +between our vendored copy and Google's internal one. +Please do not take the level of abstraction in this program as an example +to follow in your own. diff --git a/platform/dbops/binaries/go/go/src/cmd/pprof/doc.go b/platform/dbops/binaries/go/go/src/cmd/pprof/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..59e1a47cd19ff2d1552bc63d8322e3ec61c58306 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/pprof/doc.go @@ -0,0 +1,16 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Pprof interprets and displays profiles of Go programs. +// +// Basic usage: +// +// go tool pprof binary profile +// +// For detailed usage information: +// +// go tool pprof -h +// +// For an example, see https://blog.golang.org/profiling-go-programs. +package main diff --git a/platform/dbops/binaries/go/go/src/cmd/pprof/pprof.go b/platform/dbops/binaries/go/go/src/cmd/pprof/pprof.go new file mode 100644 index 0000000000000000000000000000000000000000..bc1a4cf9f75a186916bf9a76a9460c03201ed2dd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/pprof/pprof.go @@ -0,0 +1,378 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// pprof is a tool for visualization of profile.data. It is based on +// the upstream version at github.com/google/pprof, with minor +// modifications specific to the Go distribution. Please consider +// upstreaming any modifications to these packages. + +package main + +import ( + "crypto/tls" + "debug/dwarf" + "fmt" + "io" + "net/http" + "net/url" + "os" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "cmd/internal/objfile" + + "github.com/google/pprof/driver" + "github.com/google/pprof/profile" +) + +func main() { + options := &driver.Options{ + Fetch: new(fetcher), + Obj: new(objTool), + UI: newUI(), + } + if err := driver.PProf(options); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(2) + } +} + +type fetcher struct { +} + +func (f *fetcher) Fetch(src string, duration, timeout time.Duration) (*profile.Profile, string, error) { + sourceURL, timeout := adjustURL(src, duration, timeout) + if sourceURL == "" { + // Could not recognize URL, let regular pprof attempt to fetch the profile (eg. from a file) + return nil, "", nil + } + fmt.Fprintln(os.Stderr, "Fetching profile over HTTP from", sourceURL) + if duration > 0 { + fmt.Fprintf(os.Stderr, "Please wait... (%v)\n", duration) + } + p, err := getProfile(sourceURL, timeout) + return p, sourceURL, err +} + +func getProfile(source string, timeout time.Duration) (*profile.Profile, error) { + url, err := url.Parse(source) + if err != nil { + return nil, err + } + + var tlsConfig *tls.Config + if url.Scheme == "https+insecure" { + tlsConfig = &tls.Config{ + InsecureSkipVerify: true, + } + url.Scheme = "https" + source = url.String() + } + + client := &http.Client{ + Transport: &http.Transport{ + ResponseHeaderTimeout: timeout + 5*time.Second, + Proxy: http.ProxyFromEnvironment, + TLSClientConfig: tlsConfig, + }, + } + resp, err := client.Get(source) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + defer resp.Body.Close() + return nil, statusCodeError(resp) + } + return profile.Parse(resp.Body) +} + +func statusCodeError(resp *http.Response) error { + if resp.Header.Get("X-Go-Pprof") != "" && strings.Contains(resp.Header.Get("Content-Type"), "text/plain") { + // error is from pprof endpoint + if body, err := io.ReadAll(resp.Body); err == nil { + return fmt.Errorf("server response: %s - %s", resp.Status, body) + } + } + return fmt.Errorf("server response: %s", resp.Status) +} + +// cpuProfileHandler is the Go pprof CPU profile handler URL. +const cpuProfileHandler = "/debug/pprof/profile" + +// adjustURL applies the duration/timeout values and Go specific defaults. +func adjustURL(source string, duration, timeout time.Duration) (string, time.Duration) { + u, err := url.Parse(source) + if err != nil || (u.Host == "" && u.Scheme != "" && u.Scheme != "file") { + // Try adding http:// to catch sources of the form hostname:port/path. + // url.Parse treats "hostname" as the scheme. + u, err = url.Parse("http://" + source) + } + if err != nil || u.Host == "" { + return "", 0 + } + + if u.Path == "" || u.Path == "/" { + u.Path = cpuProfileHandler + } + + // Apply duration/timeout overrides to URL. + values := u.Query() + if duration > 0 { + values.Set("seconds", fmt.Sprint(int(duration.Seconds()))) + } else { + if urlSeconds := values.Get("seconds"); urlSeconds != "" { + if us, err := strconv.ParseInt(urlSeconds, 10, 32); err == nil { + duration = time.Duration(us) * time.Second + } + } + } + if timeout <= 0 { + if duration > 0 { + timeout = duration + duration/2 + } else { + timeout = 60 * time.Second + } + } + u.RawQuery = values.Encode() + return u.String(), timeout +} + +// objTool implements driver.ObjTool using Go libraries +// (instead of invoking GNU binutils). +type objTool struct { + mu sync.Mutex + disasmCache map[string]*objfile.Disasm +} + +func (*objTool) Open(name string, start, limit, offset uint64, relocationSymbol string) (driver.ObjFile, error) { + of, err := objfile.Open(name) + if err != nil { + return nil, err + } + f := &file{ + name: name, + file: of, + } + if start != 0 { + if load, err := of.LoadAddress(); err == nil { + f.offset = start - load + } + } + return f, nil +} + +func (*objTool) Demangle(names []string) (map[string]string, error) { + // No C++, nothing to demangle. + return make(map[string]string), nil +} + +func (t *objTool) Disasm(file string, start, end uint64, intelSyntax bool) ([]driver.Inst, error) { + if intelSyntax { + return nil, fmt.Errorf("printing assembly in Intel syntax is not supported") + } + d, err := t.cachedDisasm(file) + if err != nil { + return nil, err + } + var asm []driver.Inst + d.Decode(start, end, nil, false, func(pc, size uint64, file string, line int, text string) { + asm = append(asm, driver.Inst{Addr: pc, File: file, Line: line, Text: text}) + }) + return asm, nil +} + +func (t *objTool) cachedDisasm(file string) (*objfile.Disasm, error) { + t.mu.Lock() + defer t.mu.Unlock() + if t.disasmCache == nil { + t.disasmCache = make(map[string]*objfile.Disasm) + } + d := t.disasmCache[file] + if d != nil { + return d, nil + } + f, err := objfile.Open(file) + if err != nil { + return nil, err + } + d, err = f.Disasm() + f.Close() + if err != nil { + return nil, err + } + t.disasmCache[file] = d + return d, nil +} + +func (*objTool) SetConfig(config string) { + // config is usually used to say what binaries to invoke. + // Ignore entirely. +} + +// file implements driver.ObjFile using Go libraries +// (instead of invoking GNU binutils). +// A file represents a single executable being analyzed. +type file struct { + name string + offset uint64 + sym []objfile.Sym + file *objfile.File + pcln objfile.Liner + + triedDwarf bool + dwarf *dwarf.Data +} + +func (f *file) Name() string { + return f.name +} + +func (f *file) ObjAddr(addr uint64) (uint64, error) { + return addr - f.offset, nil +} + +func (f *file) BuildID() string { + // No support for build ID. + return "" +} + +func (f *file) SourceLine(addr uint64) ([]driver.Frame, error) { + if f.pcln == nil { + pcln, err := f.file.PCLineTable() + if err != nil { + return nil, err + } + f.pcln = pcln + } + addr -= f.offset + file, line, fn := f.pcln.PCToLine(addr) + if fn != nil { + frame := []driver.Frame{ + { + Func: fn.Name, + File: file, + Line: line, + }, + } + return frame, nil + } + + frames := f.dwarfSourceLine(addr) + if frames != nil { + return frames, nil + } + + return nil, fmt.Errorf("no line information for PC=%#x", addr) +} + +// dwarfSourceLine tries to get file/line information using DWARF. +// This is for C functions that appear in the profile. +// Returns nil if there is no information available. +func (f *file) dwarfSourceLine(addr uint64) []driver.Frame { + if f.dwarf == nil && !f.triedDwarf { + // Ignore any error--we don't care exactly why there + // is no DWARF info. + f.dwarf, _ = f.file.DWARF() + f.triedDwarf = true + } + + if f.dwarf != nil { + r := f.dwarf.Reader() + unit, err := r.SeekPC(addr) + if err == nil { + if frames := f.dwarfSourceLineEntry(r, unit, addr); frames != nil { + return frames + } + } + } + + return nil +} + +// dwarfSourceLineEntry tries to get file/line information from a +// DWARF compilation unit. Returns nil if it doesn't find anything. +func (f *file) dwarfSourceLineEntry(r *dwarf.Reader, entry *dwarf.Entry, addr uint64) []driver.Frame { + lines, err := f.dwarf.LineReader(entry) + if err != nil { + return nil + } + var lentry dwarf.LineEntry + if err := lines.SeekPC(addr, &lentry); err != nil { + return nil + } + + // Try to find the function name. + name := "" +FindName: + for entry, err := r.Next(); entry != nil && err == nil; entry, err = r.Next() { + if entry.Tag == dwarf.TagSubprogram { + ranges, err := f.dwarf.Ranges(entry) + if err != nil { + return nil + } + for _, pcs := range ranges { + if pcs[0] <= addr && addr < pcs[1] { + var ok bool + // TODO: AT_linkage_name, AT_MIPS_linkage_name. + name, ok = entry.Val(dwarf.AttrName).(string) + if ok { + break FindName + } + } + } + } + } + + // TODO: Report inlined functions. + + frames := []driver.Frame{ + { + Func: name, + File: lentry.File.Name, + Line: lentry.Line, + }, + } + + return frames +} + +func (f *file) Symbols(r *regexp.Regexp, addr uint64) ([]*driver.Sym, error) { + if f.sym == nil { + sym, err := f.file.Symbols() + if err != nil { + return nil, err + } + f.sym = sym + } + var out []*driver.Sym + for _, s := range f.sym { + // Ignore a symbol with address 0 and size 0. + // An ELF STT_FILE symbol will look like that. + if s.Addr == 0 && s.Size == 0 { + continue + } + if (r == nil || r.MatchString(s.Name)) && (addr == 0 || s.Addr <= addr && addr < s.Addr+uint64(s.Size)) { + out = append(out, &driver.Sym{ + Name: []string{s.Name}, + File: f.name, + Start: s.Addr, + End: s.Addr + uint64(s.Size) - 1, + }) + } + } + return out, nil +} + +func (f *file) Close() error { + f.file.Close() + return nil +} + +// newUI will be set in readlineui.go in some platforms +// for interactive readline functionality. +var newUI = func() driver.UI { return nil } diff --git a/platform/dbops/binaries/go/go/src/cmd/pprof/pprof_test.go b/platform/dbops/binaries/go/go/src/cmd/pprof/pprof_test.go new file mode 100644 index 0000000000000000000000000000000000000000..494cd8f24c00c2659dd6e1c6e15ac68d1ec61e00 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/pprof/pprof_test.go @@ -0,0 +1,129 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "internal/testenv" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "testing" +) + +// TestMain executes the test binary as the pprof command if +// GO_PPROFTEST_IS_PPROF is set, and runs the tests otherwise. +func TestMain(m *testing.M) { + if os.Getenv("GO_PPROFTEST_IS_PPROF") != "" { + main() + os.Exit(0) + } + + os.Setenv("GO_PPROFTEST_IS_PPROF", "1") // Set for subprocesses to inherit. + os.Exit(m.Run()) +} + +// pprofPath returns the path to the "pprof" binary to run. +func pprofPath(t testing.TB) string { + t.Helper() + testenv.MustHaveExec(t) + + pprofPathOnce.Do(func() { + pprofExePath, pprofPathErr = os.Executable() + }) + if pprofPathErr != nil { + t.Fatal(pprofPathErr) + } + return pprofExePath +} + +var ( + pprofPathOnce sync.Once + pprofExePath string + pprofPathErr error +) + +// See also runtime/pprof.cpuProfilingBroken. +func mustHaveCPUProfiling(t *testing.T) { + switch runtime.GOOS { + case "plan9": + t.Skipf("skipping on %s, unimplemented", runtime.GOOS) + case "aix": + t.Skipf("skipping on %s, issue 45170", runtime.GOOS) + case "ios", "dragonfly", "netbsd", "illumos", "solaris": + t.Skipf("skipping on %s, issue 13841", runtime.GOOS) + case "openbsd": + if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" { + t.Skipf("skipping on %s/%s, issue 13841", runtime.GOOS, runtime.GOARCH) + } + } +} + +func mustHaveDisasm(t *testing.T) { + switch runtime.GOARCH { + case "loong64": + t.Skipf("skipping on %s.", runtime.GOARCH) + case "mips", "mipsle", "mips64", "mips64le": + t.Skipf("skipping on %s, issue 12559", runtime.GOARCH) + case "riscv64": + t.Skipf("skipping on %s, issue 36738", runtime.GOARCH) + case "s390x": + t.Skipf("skipping on %s, issue 15255", runtime.GOARCH) + } + + // pprof can only disassemble PIE on some platforms. + // Skip the ones it can't handle yet. + if runtime.GOOS == "android" && runtime.GOARCH == "arm" { + t.Skipf("skipping on %s/%s, issue 46639", runtime.GOOS, runtime.GOARCH) + } +} + +// TestDisasm verifies that cmd/pprof can successfully disassemble functions. +// +// This is a regression test for issue 46636. +func TestDisasm(t *testing.T) { + mustHaveCPUProfiling(t) + mustHaveDisasm(t) + testenv.MustHaveGoBuild(t) + + tmpdir := t.TempDir() + cpuExe := filepath.Join(tmpdir, "cpu.exe") + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-o", cpuExe, "cpu.go") + cmd.Dir = "testdata/" + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("build failed: %v\n%s", err, out) + } + + profile := filepath.Join(tmpdir, "cpu.pprof") + cmd = testenv.Command(t, cpuExe, "-output", profile) + out, err = cmd.CombinedOutput() + if err != nil { + t.Fatalf("cpu failed: %v\n%s", err, out) + } + + cmd = testenv.Command(t, pprofPath(t), "-disasm", "main.main", cpuExe, profile) + out, err = cmd.CombinedOutput() + if err != nil { + t.Errorf("pprof -disasm failed: %v\n%s", err, out) + + // Try to print out profile content for debugging. + cmd = testenv.Command(t, pprofPath(t), "-raw", cpuExe, profile) + out, err = cmd.CombinedOutput() + if err != nil { + t.Logf("pprof -raw failed: %v\n%s", err, out) + } else { + t.Logf("profile content:\n%s", out) + } + return + } + + sout := string(out) + want := "ROUTINE ======================== main.main" + if !strings.Contains(sout, want) { + t.Errorf("pprof -disasm got %s want contains %q", sout, want) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/pprof/readlineui.go b/platform/dbops/binaries/go/go/src/cmd/pprof/readlineui.go new file mode 100644 index 0000000000000000000000000000000000000000..b0f998f719559d8fbd994f80d2d0ed54b3e526d0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/pprof/readlineui.go @@ -0,0 +1,118 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains a driver.UI implementation +// that provides the readline functionality if possible. + +//go:build (darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows) && !appengine && !android + +package main + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/google/pprof/driver" + "golang.org/x/term" +) + +func init() { + newUI = newReadlineUI +} + +// readlineUI implements driver.UI interface using the +// golang.org/x/term package. +// The upstream pprof command implements the same functionality +// using the github.com/chzyer/readline package. +type readlineUI struct { + term *term.Terminal +} + +func newReadlineUI() driver.UI { + // disable readline UI in dumb terminal. (golang.org/issue/26254) + if v := strings.ToLower(os.Getenv("TERM")); v == "" || v == "dumb" { + return nil + } + // test if we can use term.ReadLine + // that assumes operation in the raw mode. + oldState, err := term.MakeRaw(0) + if err != nil { + return nil + } + term.Restore(0, oldState) + + rw := struct { + io.Reader + io.Writer + }{os.Stdin, os.Stderr} + return &readlineUI{term: term.NewTerminal(rw, "")} +} + +// ReadLine returns a line of text (a command) read from the user. +// prompt is printed before reading the command. +func (r *readlineUI) ReadLine(prompt string) (string, error) { + r.term.SetPrompt(prompt) + + // skip error checking because we tested it + // when creating this readlineUI initially. + oldState, _ := term.MakeRaw(0) + defer term.Restore(0, oldState) + + s, err := r.term.ReadLine() + return s, err +} + +// Print shows a message to the user. +// It formats the text as fmt.Print would and adds a final \n if not already present. +// For line-based UI, Print writes to standard error. +// (Standard output is reserved for report data.) +func (r *readlineUI) Print(args ...any) { + r.print(false, args...) +} + +// PrintErr shows an error message to the user. +// It formats the text as fmt.Print would and adds a final \n if not already present. +// For line-based UI, PrintErr writes to standard error. +func (r *readlineUI) PrintErr(args ...any) { + r.print(true, args...) +} + +func (r *readlineUI) print(withColor bool, args ...any) { + text := fmt.Sprint(args...) + if !strings.HasSuffix(text, "\n") { + text += "\n" + } + if withColor { + text = colorize(text) + } + fmt.Fprint(r.term, text) +} + +// colorize prints the msg in red using ANSI color escapes. +func colorize(msg string) string { + const red = 31 + var colorEscape = fmt.Sprintf("\033[0;%dm", red) + var colorResetEscape = "\033[0m" + return colorEscape + msg + colorResetEscape +} + +// IsTerminal reports whether the UI is known to be tied to an +// interactive terminal (as opposed to being redirected to a file). +func (r *readlineUI) IsTerminal() bool { + const stdout = 1 + return term.IsTerminal(stdout) +} + +// WantBrowser indicates whether browser should be opened with the -http option. +func (r *readlineUI) WantBrowser() bool { + return r.IsTerminal() +} + +// SetAutoComplete instructs the UI to call complete(cmd) to obtain +// the auto-completion of cmd, if the UI supports auto-completion at all. +func (r *readlineUI) SetAutoComplete(complete func(string) string) { + // TODO: Implement auto-completion support. +} diff --git a/platform/dbops/binaries/go/go/src/cmd/test2json/main.go b/platform/dbops/binaries/go/go/src/cmd/test2json/main.go new file mode 100644 index 0000000000000000000000000000000000000000..09d5fcec79cb499c8b239ffe7fae7154b9edffef --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/test2json/main.go @@ -0,0 +1,161 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test2json converts go test output to a machine-readable JSON stream. +// +// Usage: +// +// go tool test2json [-p pkg] [-t] [./pkg.test -test.v=test2json] +// +// Test2json runs the given test command and converts its output to JSON; +// with no command specified, test2json expects test output on standard input. +// It writes a corresponding stream of JSON events to standard output. +// There is no unnecessary input or output buffering, so that +// the JSON stream can be read for “live updates” of test status. +// +// The -p flag sets the package reported in each test event. +// +// The -t flag requests that time stamps be added to each test event. +// +// The test should be invoked with -test.v=test2json. Using only -test.v +// (or -test.v=true) is permissible but produces lower fidelity results. +// +// Note that "go test -json" takes care of invoking test2json correctly, +// so "go tool test2json" is only needed when a test binary is being run +// separately from "go test". Use "go test -json" whenever possible. +// +// Note also that test2json is only intended for converting a single test +// binary's output. To convert the output of a "go test" command that +// runs multiple packages, again use "go test -json". +// +// # Output Format +// +// The JSON stream is a newline-separated sequence of TestEvent objects +// corresponding to the Go struct: +// +// type TestEvent struct { +// Time time.Time // encodes as an RFC3339-format string +// Action string +// Package string +// Test string +// Elapsed float64 // seconds +// Output string +// } +// +// The Time field holds the time the event happened. +// It is conventionally omitted for cached test results. +// +// The Action field is one of a fixed set of action descriptions: +// +// start - the test binary is about to be executed +// run - the test has started running +// pause - the test has been paused +// cont - the test has continued running +// pass - the test passed +// bench - the benchmark printed log output but did not fail +// fail - the test or benchmark failed +// output - the test printed output +// skip - the test was skipped or the package contained no tests +// +// Every JSON stream begins with a "start" event. +// +// The Package field, if present, specifies the package being tested. +// When the go command runs parallel tests in -json mode, events from +// different tests are interlaced; the Package field allows readers to +// separate them. +// +// The Test field, if present, specifies the test, example, or benchmark +// function that caused the event. Events for the overall package test +// do not set Test. +// +// The Elapsed field is set for "pass" and "fail" events. It gives the time +// elapsed for the specific test or the overall package test that passed or failed. +// +// The Output field is set for Action == "output" and is a portion of the test's output +// (standard output and standard error merged together). The output is +// unmodified except that invalid UTF-8 output from a test is coerced +// into valid UTF-8 by use of replacement characters. With that one exception, +// the concatenation of the Output fields of all output events is the exact +// output of the test execution. +// +// When a benchmark runs, it typically produces a single line of output +// giving timing results. That line is reported in an event with Action == "output" +// and no Test field. If a benchmark logs output or reports a failure +// (for example, by using b.Log or b.Error), that extra output is reported +// as a sequence of events with Test set to the benchmark name, terminated +// by a final event with Action == "bench" or "fail". +// Benchmarks have no events with Action == "pause". +package main + +import ( + "flag" + "fmt" + "io" + "os" + "os/exec" + "os/signal" + + "cmd/internal/test2json" +) + +var ( + flagP = flag.String("p", "", "report `pkg` as the package being tested in each event") + flagT = flag.Bool("t", false, "include timestamps in events") +) + +func usage() { + fmt.Fprintf(os.Stderr, "usage: go tool test2json [-p pkg] [-t] [./pkg.test -test.v]\n") + os.Exit(2) +} + +// ignoreSignals ignore the interrupt signals. +func ignoreSignals() { + signal.Ignore(signalsToIgnore...) +} + +func main() { + flag.Usage = usage + flag.Parse() + + var mode test2json.Mode + if *flagT { + mode |= test2json.Timestamp + } + c := test2json.NewConverter(os.Stdout, *flagP, mode) + defer c.Close() + + if flag.NArg() == 0 { + io.Copy(c, os.Stdin) + } else { + args := flag.Args() + cmd := exec.Command(args[0], args[1:]...) + w := &countWriter{0, c} + cmd.Stdout = w + cmd.Stderr = w + ignoreSignals() + err := cmd.Run() + if err != nil { + if w.n > 0 { + // Assume command printed why it failed. + } else { + fmt.Fprintf(c, "test2json: %v\n", err) + } + } + c.Exited(err) + if err != nil { + c.Close() + os.Exit(1) + } + } +} + +type countWriter struct { + n int64 + w io.Writer +} + +func (w *countWriter) Write(b []byte) (int, error) { + w.n += int64(len(b)) + return w.w.Write(b) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/test2json/signal_notunix.go b/platform/dbops/binaries/go/go/src/cmd/test2json/signal_notunix.go new file mode 100644 index 0000000000000000000000000000000000000000..e5a73be8cc23c4ac00a49a7383beb3c31c80c3f0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/test2json/signal_notunix.go @@ -0,0 +1,13 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build plan9 || windows + +package main + +import ( + "os" +) + +var signalsToIgnore = []os.Signal{os.Interrupt} diff --git a/platform/dbops/binaries/go/go/src/cmd/test2json/signal_unix.go b/platform/dbops/binaries/go/go/src/cmd/test2json/signal_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..2b4e44cb91a67a6e4aac765f596fcd7c13279e2c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/test2json/signal_unix.go @@ -0,0 +1,14 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || js || wasip1 + +package main + +import ( + "os" + "syscall" +) + +var signalsToIgnore = []os.Signal{os.Interrupt, syscall.SIGQUIT} diff --git a/platform/dbops/binaries/go/go/src/cmd/tools/tools.go b/platform/dbops/binaries/go/go/src/cmd/tools/tools.go new file mode 100644 index 0000000000000000000000000000000000000000..5e0f2774c2fc8144b78092ffff6ec9123c25499d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/tools/tools.go @@ -0,0 +1,11 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build tools + +package tools + +// Arrange to vendor the bisect command for use +// by the internal/godebug package test. +import _ "golang.org/x/tools/cmd/bisect" diff --git a/platform/dbops/binaries/go/go/src/cmd/trace/annotations.go b/platform/dbops/binaries/go/go/src/cmd/trace/annotations.go new file mode 100644 index 0000000000000000000000000000000000000000..df194a759808e6ca7b9ccd01800a510d443918fa --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/trace/annotations.go @@ -0,0 +1,1196 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "html/template" + "internal/trace" + "internal/trace/traceviewer" + "log" + "net/http" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +func init() { + http.HandleFunc("/usertasks", httpUserTasks) + http.HandleFunc("/usertask", httpUserTask) + http.HandleFunc("/userregions", httpUserRegions) + http.HandleFunc("/userregion", httpUserRegion) +} + +// httpUserTasks reports all tasks found in the trace. +func httpUserTasks(w http.ResponseWriter, r *http.Request) { + res, err := analyzeAnnotations() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + tasks := res.tasks + summary := make(map[string]taskStats) + for _, task := range tasks { + stats, ok := summary[task.name] + if !ok { + stats.Type = task.name + } + + stats.add(task) + summary[task.name] = stats + } + + // Sort tasks by type. + userTasks := make([]taskStats, 0, len(summary)) + for _, stats := range summary { + userTasks = append(userTasks, stats) + } + sort.Slice(userTasks, func(i, j int) bool { + return userTasks[i].Type < userTasks[j].Type + }) + + // Emit table. + err = templUserTaskTypes.Execute(w, userTasks) + if err != nil { + http.Error(w, fmt.Sprintf("failed to execute template: %v", err), http.StatusInternalServerError) + return + } +} + +func httpUserRegions(w http.ResponseWriter, r *http.Request) { + res, err := analyzeAnnotations() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + allRegions := res.regions + + summary := make(map[regionTypeID]regionStats) + for id, regions := range allRegions { + stats, ok := summary[id] + if !ok { + stats.regionTypeID = id + } + for _, s := range regions { + stats.add(s) + } + summary[id] = stats + } + // Sort regions by pc and name + userRegions := make([]regionStats, 0, len(summary)) + for _, stats := range summary { + userRegions = append(userRegions, stats) + } + sort.Slice(userRegions, func(i, j int) bool { + if userRegions[i].Type != userRegions[j].Type { + return userRegions[i].Type < userRegions[j].Type + } + return userRegions[i].Frame.PC < userRegions[j].Frame.PC + }) + // Emit table. + err = templUserRegionTypes.Execute(w, userRegions) + if err != nil { + http.Error(w, fmt.Sprintf("failed to execute template: %v", err), http.StatusInternalServerError) + return + } +} + +func httpUserRegion(w http.ResponseWriter, r *http.Request) { + filter, err := newRegionFilter(r) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + res, err := analyzeAnnotations() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + allRegions := res.regions + + var data []regionDesc + + var maxTotal int64 + for id, regions := range allRegions { + for _, s := range regions { + if !filter.match(id, s) { + continue + } + data = append(data, s) + if maxTotal < s.TotalTime { + maxTotal = s.TotalTime + } + } + } + + sortby := r.FormValue("sortby") + _, ok := reflect.TypeOf(regionDesc{}).FieldByNameFunc(func(s string) bool { + return s == sortby + }) + if !ok { + sortby = "TotalTime" + } + sort.Slice(data, func(i, j int) bool { + ival := reflect.ValueOf(data[i]).FieldByName(sortby).Int() + jval := reflect.ValueOf(data[j]).FieldByName(sortby).Int() + return ival > jval + }) + + err = templUserRegionType.Execute(w, struct { + MaxTotal int64 + Data []regionDesc + Name string + Filter *regionFilter + }{ + MaxTotal: maxTotal, + Data: data, + Name: filter.name, + Filter: filter, + }) + if err != nil { + http.Error(w, fmt.Sprintf("failed to execute template: %v", err), http.StatusInternalServerError) + return + } +} + +// httpUserTask presents the details of the selected tasks. +func httpUserTask(w http.ResponseWriter, r *http.Request) { + filter, err := newTaskFilter(r) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + res, err := analyzeAnnotations() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + tasks := res.tasks + + type event struct { + WhenString string + Elapsed time.Duration + Go uint64 + What string + // TODO: include stack trace of creation time + } + type entry struct { + WhenString string + ID uint64 + Duration time.Duration + Complete bool + Events []event + Start, End time.Duration // Time since the beginning of the trace + GCTime time.Duration + } + + base := time.Duration(firstTimestamp()) * time.Nanosecond // trace start + + var data []entry + + for _, task := range tasks { + if !filter.match(task) { + continue + } + // merge events in the task.events and task.regions.Start + rawEvents := append([]*trace.Event{}, task.events...) + for _, s := range task.regions { + if s.Start != nil { + rawEvents = append(rawEvents, s.Start) + } + } + sort.SliceStable(rawEvents, func(i, j int) bool { return rawEvents[i].Ts < rawEvents[j].Ts }) + + var events []event + var last time.Duration + for i, ev := range rawEvents { + when := time.Duration(ev.Ts)*time.Nanosecond - base + elapsed := time.Duration(ev.Ts)*time.Nanosecond - last + if i == 0 { + elapsed = 0 + } + + what := describeEvent(ev) + if what != "" { + events = append(events, event{ + WhenString: fmt.Sprintf("%2.9f", when.Seconds()), + Elapsed: elapsed, + What: what, + Go: ev.G, + }) + last = time.Duration(ev.Ts) * time.Nanosecond + } + } + + data = append(data, entry{ + WhenString: fmt.Sprintf("%2.9fs", (time.Duration(task.firstTimestamp())*time.Nanosecond - base).Seconds()), + Duration: task.duration(), + ID: task.id, + Complete: task.complete(), + Events: events, + Start: time.Duration(task.firstTimestamp()) * time.Nanosecond, + End: time.Duration(task.endTimestamp()) * time.Nanosecond, + GCTime: task.overlappingGCDuration(res.gcEvents), + }) + } + sort.Slice(data, func(i, j int) bool { + return data[i].Duration < data[j].Duration + }) + + // Emit table. + err = templUserTaskType.Execute(w, struct { + Name string + Entry []entry + }{ + Name: filter.name, + Entry: data, + }) + if err != nil { + log.Printf("failed to execute template: %v", err) + http.Error(w, fmt.Sprintf("failed to execute template: %v", err), http.StatusInternalServerError) + return + } +} + +type annotationAnalysisResult struct { + tasks map[uint64]*taskDesc // tasks + regions map[regionTypeID][]regionDesc // regions + gcEvents []*trace.Event // GCStartevents, sorted +} + +type regionTypeID struct { + Frame trace.Frame // top frame + Type string +} + +// analyzeAnnotations analyzes user annotation events and +// returns the task descriptors keyed by internal task id. +func analyzeAnnotations() (annotationAnalysisResult, error) { + res, err := parseTrace() + if err != nil { + return annotationAnalysisResult{}, fmt.Errorf("failed to parse trace: %v", err) + } + + events := res.Events + if len(events) == 0 { + return annotationAnalysisResult{}, fmt.Errorf("empty trace") + } + + tasks := allTasks{} + regions := map[regionTypeID][]regionDesc{} + var gcEvents []*trace.Event + + for _, ev := range events { + switch typ := ev.Type; typ { + case trace.EvUserTaskCreate, trace.EvUserTaskEnd, trace.EvUserLog: + taskid := ev.Args[0] + task := tasks.task(taskid) + task.addEvent(ev) + + // retrieve parent task information + if typ == trace.EvUserTaskCreate { + if parentID := ev.Args[1]; parentID != 0 { + parentTask := tasks.task(parentID) + task.parent = parentTask + if parentTask != nil { + parentTask.children = append(parentTask.children, task) + } + } + } + + case trace.EvGCStart: + gcEvents = append(gcEvents, ev) + } + } + // combine region info. + analyzeGoroutines(events) + for goid, stats := range gs { + // gs is a global var defined in goroutines.go as a result + // of analyzeGoroutines. TODO(hyangah): fix this not to depend + // on a 'global' var. + for _, s := range stats.Regions { + if s.TaskID != 0 { + task := tasks.task(s.TaskID) + task.goroutines[goid] = struct{}{} + task.regions = append(task.regions, regionDesc{UserRegionDesc: s, G: goid}) + } + var frame trace.Frame + if s.Start != nil { + frame = *s.Start.Stk[0] + } + id := regionTypeID{Frame: frame, Type: s.Name} + regions[id] = append(regions[id], regionDesc{UserRegionDesc: s, G: goid}) + } + } + + // sort regions in tasks based on the timestamps. + for _, task := range tasks { + sort.SliceStable(task.regions, func(i, j int) bool { + si, sj := task.regions[i].firstTimestamp(), task.regions[j].firstTimestamp() + if si != sj { + return si < sj + } + return task.regions[i].lastTimestamp() < task.regions[j].lastTimestamp() + }) + } + return annotationAnalysisResult{tasks: tasks, regions: regions, gcEvents: gcEvents}, nil +} + +// taskDesc represents a task. +type taskDesc struct { + name string // user-provided task name + id uint64 // internal task id + events []*trace.Event // sorted based on timestamp. + regions []regionDesc // associated regions, sorted based on the start timestamp and then the last timestamp. + goroutines map[uint64]struct{} // involved goroutines + + create *trace.Event // Task create event + end *trace.Event // Task end event + + parent *taskDesc + children []*taskDesc +} + +func newTaskDesc(id uint64) *taskDesc { + return &taskDesc{ + id: id, + goroutines: make(map[uint64]struct{}), + } +} + +func (task *taskDesc) String() string { + if task == nil { + return "task " + } + wb := new(strings.Builder) + fmt.Fprintf(wb, "task %d:\t%s\n", task.id, task.name) + fmt.Fprintf(wb, "\tstart: %v end: %v complete: %t\n", task.firstTimestamp(), task.endTimestamp(), task.complete()) + fmt.Fprintf(wb, "\t%d goroutines\n", len(task.goroutines)) + fmt.Fprintf(wb, "\t%d regions:\n", len(task.regions)) + for _, s := range task.regions { + fmt.Fprintf(wb, "\t\t%s(goid=%d)\n", s.Name, s.G) + } + if task.parent != nil { + fmt.Fprintf(wb, "\tparent: %s\n", task.parent.name) + } + fmt.Fprintf(wb, "\t%d children:\n", len(task.children)) + for _, c := range task.children { + fmt.Fprintf(wb, "\t\t%s\n", c.name) + } + + return wb.String() +} + +// regionDesc represents a region. +type regionDesc struct { + *trace.UserRegionDesc + G uint64 // id of goroutine where the region was defined +} + +type allTasks map[uint64]*taskDesc + +func (tasks allTasks) task(taskID uint64) *taskDesc { + if taskID == 0 { + return nil // notask + } + + t, ok := tasks[taskID] + if ok { + return t + } + + t = newTaskDesc(taskID) + tasks[taskID] = t + return t +} + +func (task *taskDesc) addEvent(ev *trace.Event) { + if task == nil { + return + } + + task.events = append(task.events, ev) + task.goroutines[ev.G] = struct{}{} + + switch typ := ev.Type; typ { + case trace.EvUserTaskCreate: + task.name = ev.SArgs[0] + task.create = ev + case trace.EvUserTaskEnd: + task.end = ev + } +} + +// complete is true only if both start and end events of this task +// are present in the trace. +func (task *taskDesc) complete() bool { + if task == nil { + return false + } + return task.create != nil && task.end != nil +} + +// descendants returns all the task nodes in the subtree rooted from this task. +func (task *taskDesc) descendants() []*taskDesc { + if task == nil { + return nil + } + res := []*taskDesc{task} + for i := 0; len(res[i:]) > 0; i++ { + t := res[i] + res = append(res, t.children...) + } + return res +} + +// firstTimestamp returns the first timestamp of this task found in +// this trace. If the trace does not contain the task creation event, +// the first timestamp of the trace will be returned. +func (task *taskDesc) firstTimestamp() int64 { + if task != nil && task.create != nil { + return task.create.Ts + } + return firstTimestamp() +} + +// lastTimestamp returns the last timestamp of this task in this +// trace. If the trace does not contain the task end event, the last +// timestamp of the trace will be returned. +func (task *taskDesc) lastTimestamp() int64 { + endTs := task.endTimestamp() + if last := task.lastEvent(); last != nil && last.Ts > endTs { + return last.Ts + } + return endTs +} + +// endTimestamp returns the timestamp of this task's end event. +// If the trace does not contain the task end event, the last +// timestamp of the trace will be returned. +func (task *taskDesc) endTimestamp() int64 { + if task != nil && task.end != nil { + return task.end.Ts + } + return lastTimestamp() +} + +func (task *taskDesc) duration() time.Duration { + return time.Duration(task.endTimestamp()-task.firstTimestamp()) * time.Nanosecond +} + +func (region *regionDesc) duration() time.Duration { + return time.Duration(region.lastTimestamp()-region.firstTimestamp()) * time.Nanosecond +} + +// overlappingGCDuration returns the sum of GC period overlapping with the task's lifetime. +func (task *taskDesc) overlappingGCDuration(evs []*trace.Event) (overlapping time.Duration) { + for _, ev := range evs { + // make sure we only consider the global GC events. + if typ := ev.Type; typ != trace.EvGCStart { + continue + } + + if o, overlapped := task.overlappingDuration(ev); overlapped { + overlapping += o + } + } + return overlapping +} + +// overlappingInstant reports whether the instantaneous event, ev, occurred during +// any of the task's region if ev is a goroutine-local event, or overlaps with the +// task's lifetime if ev is a global event. +func (task *taskDesc) overlappingInstant(ev *trace.Event) bool { + if _, ok := isUserAnnotationEvent(ev); ok && task.id != ev.Args[0] { + return false // not this task's user event. + } + + ts := ev.Ts + taskStart := task.firstTimestamp() + taskEnd := task.endTimestamp() + if ts < taskStart || taskEnd < ts { + return false + } + if ev.P == trace.GCP { + return true + } + + // Goroutine local event. Check whether there are regions overlapping with the event. + goid := ev.G + for _, region := range task.regions { + if region.G != goid { + continue + } + if region.firstTimestamp() <= ts && ts <= region.lastTimestamp() { + return true + } + } + return false +} + +// overlappingDuration reports whether the durational event, ev, overlaps with +// any of the task's region if ev is a goroutine-local event, or overlaps with +// the task's lifetime if ev is a global event. It returns the overlapping time +// as well. +func (task *taskDesc) overlappingDuration(ev *trace.Event) (time.Duration, bool) { + start := ev.Ts + end := lastTimestamp() + if ev.Link != nil { + end = ev.Link.Ts + } + + if start > end { + return 0, false + } + + goid := ev.G + goid2 := ev.G + if ev.Link != nil { + goid2 = ev.Link.G + } + + // This event is a global GC event + if ev.P == trace.GCP { + taskStart := task.firstTimestamp() + taskEnd := task.endTimestamp() + o := overlappingDuration(taskStart, taskEnd, start, end) + return o, o > 0 + } + + // Goroutine local event. Check whether there are regions overlapping with the event. + var overlapping time.Duration + var lastRegionEnd int64 // the end of previous overlapping region + for _, region := range task.regions { + if region.G != goid && region.G != goid2 { + continue + } + regionStart, regionEnd := region.firstTimestamp(), region.lastTimestamp() + if regionStart < lastRegionEnd { // skip nested regions + continue + } + + if o := overlappingDuration(regionStart, regionEnd, start, end); o > 0 { + // overlapping. + lastRegionEnd = regionEnd + overlapping += o + } + } + return overlapping, overlapping > 0 +} + +// overlappingDuration returns the overlapping time duration between +// two time intervals [start1, end1] and [start2, end2] where +// start, end parameters are all int64 representing nanoseconds. +func overlappingDuration(start1, end1, start2, end2 int64) time.Duration { + // assume start1 <= end1 and start2 <= end2 + if end1 < start2 || end2 < start1 { + return 0 + } + + if start1 < start2 { // choose the later one + start1 = start2 + } + if end1 > end2 { // choose the earlier one + end1 = end2 + } + return time.Duration(end1 - start1) +} + +func (task *taskDesc) lastEvent() *trace.Event { + if task == nil { + return nil + } + + if n := len(task.events); n > 0 { + return task.events[n-1] + } + return nil +} + +// firstTimestamp returns the timestamp of region start event. +// If the region's start event is not present in the trace, +// the first timestamp of the trace will be returned. +func (region *regionDesc) firstTimestamp() int64 { + if region.Start != nil { + return region.Start.Ts + } + return firstTimestamp() +} + +// lastTimestamp returns the timestamp of region end event. +// If the region's end event is not present in the trace, +// the last timestamp of the trace will be returned. +func (region *regionDesc) lastTimestamp() int64 { + if region.End != nil { + return region.End.Ts + } + return lastTimestamp() +} + +// RelatedGoroutines returns IDs of goroutines related to the task. A goroutine +// is related to the task if user annotation activities for the task occurred. +// If non-zero depth is provided, this searches all events with BFS and includes +// goroutines unblocked any of related goroutines to the result. +func (task *taskDesc) RelatedGoroutines(events []*trace.Event, depth int) map[uint64]bool { + start, end := task.firstTimestamp(), task.endTimestamp() + + gmap := map[uint64]bool{} + for k := range task.goroutines { + gmap[k] = true + } + + for i := 0; i < depth; i++ { + gmap1 := make(map[uint64]bool) + for g := range gmap { + gmap1[g] = true + } + for _, ev := range events { + if ev.Ts < start || ev.Ts > end { + continue + } + if ev.Type == trace.EvGoUnblock && gmap[ev.Args[0]] { + gmap1[ev.G] = true + } + gmap = gmap1 + } + } + gmap[0] = true // for GC events (goroutine id = 0) + return gmap +} + +type taskFilter struct { + name string + cond []func(*taskDesc) bool +} + +func (f *taskFilter) match(t *taskDesc) bool { + if t == nil { + return false + } + for _, c := range f.cond { + if !c(t) { + return false + } + } + return true +} + +func newTaskFilter(r *http.Request) (*taskFilter, error) { + if err := r.ParseForm(); err != nil { + return nil, err + } + + var name []string + var conditions []func(*taskDesc) bool + + param := r.Form + if typ, ok := param["type"]; ok && len(typ) > 0 { + name = append(name, "type="+typ[0]) + conditions = append(conditions, func(t *taskDesc) bool { + return t.name == typ[0] + }) + } + if complete := r.FormValue("complete"); complete == "1" { + name = append(name, "complete") + conditions = append(conditions, func(t *taskDesc) bool { + return t.complete() + }) + } else if complete == "0" { + name = append(name, "incomplete") + conditions = append(conditions, func(t *taskDesc) bool { + return !t.complete() + }) + } + if lat, err := time.ParseDuration(r.FormValue("latmin")); err == nil { + name = append(name, fmt.Sprintf("latency >= %s", lat)) + conditions = append(conditions, func(t *taskDesc) bool { + return t.complete() && t.duration() >= lat + }) + } + if lat, err := time.ParseDuration(r.FormValue("latmax")); err == nil { + name = append(name, fmt.Sprintf("latency <= %s", lat)) + conditions = append(conditions, func(t *taskDesc) bool { + return t.complete() && t.duration() <= lat + }) + } + if text := r.FormValue("logtext"); text != "" { + name = append(name, fmt.Sprintf("log contains %q", text)) + conditions = append(conditions, func(t *taskDesc) bool { + return taskMatches(t, text) + }) + } + + return &taskFilter{name: strings.Join(name, ","), cond: conditions}, nil +} + +func taskMatches(t *taskDesc, text string) bool { + for _, ev := range t.events { + switch ev.Type { + case trace.EvUserTaskCreate, trace.EvUserRegion, trace.EvUserLog: + for _, s := range ev.SArgs { + if strings.Contains(s, text) { + return true + } + } + } + } + return false +} + +type regionFilter struct { + name string + params url.Values + cond []func(regionTypeID, regionDesc) bool +} + +func (f *regionFilter) match(id regionTypeID, s regionDesc) bool { + for _, c := range f.cond { + if !c(id, s) { + return false + } + } + return true +} + +func newRegionFilter(r *http.Request) (*regionFilter, error) { + if err := r.ParseForm(); err != nil { + return nil, err + } + + var name []string + var conditions []func(regionTypeID, regionDesc) bool + filterParams := make(url.Values) + + param := r.Form + if typ, ok := param["type"]; ok && len(typ) > 0 { + name = append(name, "type="+typ[0]) + conditions = append(conditions, func(id regionTypeID, s regionDesc) bool { + return id.Type == typ[0] + }) + filterParams.Add("type", typ[0]) + } + if pc, err := strconv.ParseUint(r.FormValue("pc"), 16, 64); err == nil { + encPC := fmt.Sprintf("%x", pc) + name = append(name, "pc="+encPC) + conditions = append(conditions, func(id regionTypeID, s regionDesc) bool { + return id.Frame.PC == pc + }) + filterParams.Add("pc", encPC) + } + + if lat, err := time.ParseDuration(r.FormValue("latmin")); err == nil { + name = append(name, fmt.Sprintf("latency >= %s", lat)) + conditions = append(conditions, func(_ regionTypeID, s regionDesc) bool { + return s.duration() >= lat + }) + filterParams.Add("latmin", lat.String()) + } + if lat, err := time.ParseDuration(r.FormValue("latmax")); err == nil { + name = append(name, fmt.Sprintf("latency <= %s", lat)) + conditions = append(conditions, func(_ regionTypeID, s regionDesc) bool { + return s.duration() <= lat + }) + filterParams.Add("latmax", lat.String()) + } + + return ®ionFilter{ + name: strings.Join(name, ","), + cond: conditions, + params: filterParams, + }, nil +} + +type regionStats struct { + regionTypeID + Histogram traceviewer.TimeHistogram +} + +func (s *regionStats) UserRegionURL() func(min, max time.Duration) string { + return func(min, max time.Duration) string { + return fmt.Sprintf("/userregion?type=%s&pc=%x&latmin=%v&latmax=%v", template.URLQueryEscaper(s.Type), s.Frame.PC, template.URLQueryEscaper(min), template.URLQueryEscaper(max)) + } +} + +func (s *regionStats) add(region regionDesc) { + s.Histogram.Add(region.duration()) +} + +var templUserRegionTypes = template.Must(template.New("").Parse(` + + + + + + + + + +{{range $}} + + + + + +{{end}} +
    Region typeCountDuration distribution (complete tasks)
    {{.Type}}
    {{.Frame.Fn}}
    {{.Frame.File}}:{{.Frame.Line}}
    {{.Histogram.Count}}{{.Histogram.ToHTML (.UserRegionURL)}}
    + + +`)) + +type taskStats struct { + Type string + Count int // Complete + incomplete tasks + Histogram traceviewer.TimeHistogram // Complete tasks only +} + +func (s *taskStats) UserTaskURL(complete bool) func(min, max time.Duration) string { + return func(min, max time.Duration) string { + return fmt.Sprintf("/usertask?type=%s&complete=%v&latmin=%v&latmax=%v", template.URLQueryEscaper(s.Type), template.URLQueryEscaper(complete), template.URLQueryEscaper(min), template.URLQueryEscaper(max)) + } +} + +func (s *taskStats) add(task *taskDesc) { + s.Count++ + if task.complete() { + s.Histogram.Add(task.duration()) + } +} + +var templUserTaskTypes = template.Must(template.New("").Parse(` + + + +Search log text:

    + + + + + + +{{range $}} + + + + + +{{end}} +
    Task typeCountDuration distribution (complete tasks)
    {{.Type}}{{.Count}}{{.Histogram.ToHTML (.UserTaskURL true)}}
    + + +`)) + +var templUserTaskType = template.Must(template.New("userTask").Funcs(template.FuncMap{ + "elapsed": elapsed, + "asMillisecond": asMillisecond, + "trimSpace": strings.TrimSpace, +}).Parse(` + + User Task: {{.Name}} + + + +

    User Task: {{.Name}}

    + +Search log text:
    + +

    + + + + {{range $el := $.Entry}} + + + + + + + {{range $el.Events}} + + + + + + + {{end}} + + + + + + {{end}} + + +`)) + +func elapsed(d time.Duration) string { + b := fmt.Appendf(nil, "%.9f", d.Seconds()) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +func asMillisecond(d time.Duration) float64 { + return float64(d.Nanoseconds()) / 1e6 +} + +func formatUserLog(ev *trace.Event) string { + k, v := ev.SArgs[0], ev.SArgs[1] + if k == "" { + return v + } + if v == "" { + return k + } + return fmt.Sprintf("%v=%v", k, v) +} + +func describeEvent(ev *trace.Event) string { + switch ev.Type { + case trace.EvGoCreate: + goid := ev.Args[0] + return fmt.Sprintf("new goroutine %d: %s", goid, gs[goid].Name) + case trace.EvGoEnd, trace.EvGoStop: + return "goroutine stopped" + case trace.EvUserLog: + return formatUserLog(ev) + case trace.EvUserRegion: + if ev.Args[1] == 0 { + duration := "unknown" + if ev.Link != nil { + duration = (time.Duration(ev.Link.Ts-ev.Ts) * time.Nanosecond).String() + } + return fmt.Sprintf("region %s started (duration: %v)", ev.SArgs[0], duration) + } + return fmt.Sprintf("region %s ended", ev.SArgs[0]) + case trace.EvUserTaskCreate: + return fmt.Sprintf("task %v (id %d, parent %d) created", ev.SArgs[0], ev.Args[0], ev.Args[1]) + // TODO: add child task creation events into the parent task events + case trace.EvUserTaskEnd: + return "task end" + } + return "" +} + +func isUserAnnotationEvent(ev *trace.Event) (taskID uint64, ok bool) { + switch ev.Type { + case trace.EvUserLog, trace.EvUserRegion, trace.EvUserTaskCreate, trace.EvUserTaskEnd: + return ev.Args[0], true + } + return 0, false +} + +var templUserRegionType = template.Must(template.New("").Funcs(template.FuncMap{ + "prettyDuration": func(nsec int64) template.HTML { + d := time.Duration(nsec) * time.Nanosecond + return template.HTML(d.String()) + }, + "percent": func(dividend, divisor int64) template.HTML { + if divisor == 0 { + return "" + } + return template.HTML(fmt.Sprintf("(%.1f%%)", float64(dividend)/float64(divisor)*100)) + }, + "barLen": func(dividend, divisor int64) template.HTML { + if divisor == 0 { + return "0" + } + return template.HTML(fmt.Sprintf("%.2f%%", float64(dividend)/float64(divisor)*100)) + }, + "unknownTime": func(desc regionDesc) int64 { + sum := desc.ExecTime + desc.IOTime + desc.BlockTime + desc.SyscallTime + desc.SchedWaitTime + if sum < desc.TotalTime { + return desc.TotalTime - sum + } + return 0 + }, + "filterParams": func(f *regionFilter) template.URL { + return template.URL(f.params.Encode()) + }, +}).Parse(` + +User Region {{.Name}} + + + + +

    {{.Name}}

    + +{{ with $p := filterParams .Filter}} +
    WhenElapsedGoroutine IDEvents
    {{$el.WhenString}}{{$el.Duration}} +Task {{$el.ID}} +(goroutine view) +({{if .Complete}}complete{{else}}incomplete{{end}})
    {{.WhenString}}{{elapsed .Elapsed}}{{.Go}}{{.What}}
    GC:{{$el.GCTime}}
    + + + + +
    Network Wait Time: graph(download)
    Sync Block Time: graph(download)
    Blocking Syscall Time: graph(download)
    Scheduler Wait Time: graph(download)
    +{{ end }} +

    + + + + + + + + + + + + + + +{{range .Data}} + + + + + + + + + + + + + +{{end}} +
    Goroutine Task Total Execution Network wait Sync block Blocking syscall Scheduler wait GC sweeping GC pause
    {{.G}} {{if .TaskID}}{{.TaskID}}{{end}} {{prettyDuration .TotalTime}} +
    + {{if unknownTime .}} {{end}} + {{if .ExecTime}} {{end}} + {{if .IOTime}} {{end}} + {{if .BlockTime}} {{end}} + {{if .SyscallTime}} {{end}} + {{if .SchedWaitTime}} {{end}} +
    +
    {{prettyDuration .ExecTime}} {{prettyDuration .IOTime}} {{prettyDuration .BlockTime}} {{prettyDuration .SyscallTime}} {{prettyDuration .SchedWaitTime}} {{prettyDuration .SweepTime}} {{percent .SweepTime .TotalTime}} {{prettyDuration .GCTime}} {{percent .GCTime .TotalTime}}
    +

    +`)) diff --git a/platform/dbops/binaries/go/go/src/cmd/trace/annotations_test.go b/platform/dbops/binaries/go/go/src/cmd/trace/annotations_test.go new file mode 100644 index 0000000000000000000000000000000000000000..36d3ec9d6dc15801f67cec4bdae1dc28f0d25a58 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/trace/annotations_test.go @@ -0,0 +1,396 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !js + +package main + +import ( + "bytes" + "context" + "flag" + "fmt" + "internal/goexperiment" + traceparser "internal/trace" + "os" + "reflect" + "runtime/debug" + "runtime/trace" + "sort" + "sync" + "testing" + "time" +) + +var saveTraces = flag.Bool("savetraces", false, "save traces collected by tests") + +func TestOverlappingDuration(t *testing.T) { + cases := []struct { + start0, end0, start1, end1 int64 + want time.Duration + }{ + { + 1, 10, 11, 20, 0, + }, + { + 1, 10, 5, 20, 5 * time.Nanosecond, + }, + { + 1, 10, 2, 8, 6 * time.Nanosecond, + }, + } + + for _, tc := range cases { + s0, e0, s1, e1 := tc.start0, tc.end0, tc.start1, tc.end1 + if got := overlappingDuration(s0, e0, s1, e1); got != tc.want { + t.Errorf("overlappingDuration(%d, %d, %d, %d)=%v; want %v", s0, e0, s1, e1, got, tc.want) + } + if got := overlappingDuration(s1, e1, s0, e0); got != tc.want { + t.Errorf("overlappingDuration(%d, %d, %d, %d)=%v; want %v", s1, e1, s0, e0, got, tc.want) + } + } +} + +// prog0 starts three goroutines. +// +// goroutine 1: taskless region +// goroutine 2: starts task0, do work in task0.region0, starts task1 which ends immediately. +// goroutine 3: do work in task0.region1 and task0.region2, ends task0 +func prog0() { + ctx := context.Background() + + var wg sync.WaitGroup + + wg.Add(1) + go func() { // goroutine 1 + defer wg.Done() + trace.WithRegion(ctx, "taskless.region", func() { + trace.Log(ctx, "key0", "val0") + }) + }() + + wg.Add(1) + go func() { // goroutine 2 + defer wg.Done() + ctx, task := trace.NewTask(ctx, "task0") + trace.WithRegion(ctx, "task0.region0", func() { + wg.Add(1) + go func() { // goroutine 3 + defer wg.Done() + defer task.End() + trace.WithRegion(ctx, "task0.region1", func() { + trace.WithRegion(ctx, "task0.region2", func() { + trace.Log(ctx, "key2", "val2") + }) + trace.Log(ctx, "key1", "val1") + }) + }() + }) + ctx2, task2 := trace.NewTask(ctx, "task1") + trace.Log(ctx2, "key3", "val3") + task2.End() + }() + wg.Wait() +} + +func TestAnalyzeAnnotations(t *testing.T) { + // TODO: classify taskless regions + + // Run prog0 and capture the execution trace. + if err := traceProgram(t, prog0, "TestAnalyzeAnnotations"); err != nil { + t.Fatalf("failed to trace the program: %v", err) + } + + res, err := analyzeAnnotations() + if err != nil { + t.Fatalf("failed to analyzeAnnotations: %v", err) + } + + // For prog0, we expect + // - task with name = "task0", with three regions. + // - task with name = "task1", with no region. + wantTasks := map[string]struct { + complete bool + goroutines int + regions []string + }{ + "task0": { + complete: true, + goroutines: 2, + regions: []string{"task0.region0", "", "task0.region1", "task0.region2"}, + }, + "task1": { + complete: true, + goroutines: 1, + }, + } + + for _, task := range res.tasks { + want, ok := wantTasks[task.name] + if !ok { + t.Errorf("unexpected task: %s", task) + continue + } + if task.complete() != want.complete || len(task.goroutines) != want.goroutines || !reflect.DeepEqual(regionNames(task), want.regions) { + t.Errorf("got task %v; want %+v", task, want) + } + + delete(wantTasks, task.name) + } + if len(wantTasks) > 0 { + t.Errorf("no more tasks; want %+v", wantTasks) + } + + wantRegions := []string{ + "", // an auto-created region for the goroutine 3 + "taskless.region", + "task0.region0", + "task0.region1", + "task0.region2", + } + var gotRegions []string + for regionID := range res.regions { + gotRegions = append(gotRegions, regionID.Type) + } + + sort.Strings(wantRegions) + sort.Strings(gotRegions) + if !reflect.DeepEqual(gotRegions, wantRegions) { + t.Errorf("got regions %q, want regions %q", gotRegions, wantRegions) + } +} + +// prog1 creates a task hierarchy consisting of three tasks. +func prog1() { + ctx := context.Background() + ctx1, task1 := trace.NewTask(ctx, "task1") + defer task1.End() + trace.WithRegion(ctx1, "task1.region", func() { + ctx2, task2 := trace.NewTask(ctx1, "task2") + defer task2.End() + trace.WithRegion(ctx2, "task2.region", func() { + ctx3, task3 := trace.NewTask(ctx2, "task3") + defer task3.End() + trace.WithRegion(ctx3, "task3.region", func() { + }) + }) + }) +} + +func TestAnalyzeAnnotationTaskTree(t *testing.T) { + // Run prog1 and capture the execution trace. + if err := traceProgram(t, prog1, "TestAnalyzeAnnotationTaskTree"); err != nil { + t.Fatalf("failed to trace the program: %v", err) + } + + res, err := analyzeAnnotations() + if err != nil { + t.Fatalf("failed to analyzeAnnotations: %v", err) + } + tasks := res.tasks + + // For prog0, we expect + // - task with name = "", with taskless.region in regions. + // - task with name = "task0", with three regions. + wantTasks := map[string]struct { + parent string + children []string + regions []string + }{ + "task1": { + parent: "", + children: []string{"task2"}, + regions: []string{"task1.region"}, + }, + "task2": { + parent: "task1", + children: []string{"task3"}, + regions: []string{"task2.region"}, + }, + "task3": { + parent: "task2", + children: nil, + regions: []string{"task3.region"}, + }, + } + + for _, task := range tasks { + want, ok := wantTasks[task.name] + if !ok { + t.Errorf("unexpected task: %s", task) + continue + } + delete(wantTasks, task.name) + + if parentName(task) != want.parent || + !reflect.DeepEqual(childrenNames(task), want.children) || + !reflect.DeepEqual(regionNames(task), want.regions) { + t.Errorf("got %v; want %+v", task, want) + } + } + + if len(wantTasks) > 0 { + t.Errorf("no more tasks; want %+v", wantTasks) + } +} + +// prog2 starts two tasks; "taskWithGC" that overlaps with GC +// and "taskWithoutGC" that doesn't. In order to run this reliably, +// the caller needs to set up to prevent GC from running automatically. +// prog2 returns the upper-bound gc time that overlaps with the first task. +func prog2() (gcTime time.Duration) { + ch := make(chan bool) + ctx1, task := trace.NewTask(context.Background(), "taskWithGC") + trace.WithRegion(ctx1, "taskWithGC.region1", func() { + go func() { + defer trace.StartRegion(ctx1, "taskWithGC.region2").End() + <-ch + }() + s := time.Now() + debug.FreeOSMemory() // task1 affected by gc + gcTime = time.Since(s) + close(ch) + }) + task.End() + + ctx2, task2 := trace.NewTask(context.Background(), "taskWithoutGC") + trace.WithRegion(ctx2, "taskWithoutGC.region1", func() { + // do nothing. + }) + task2.End() + return gcTime +} + +func TestAnalyzeAnnotationGC(t *testing.T) { + err := traceProgram(t, func() { + oldGC := debug.SetGCPercent(10000) // gc, and effectively disable GC + defer debug.SetGCPercent(oldGC) + prog2() + }, "TestAnalyzeAnnotationGC") + if err != nil { + t.Fatalf("failed to trace the program: %v", err) + } + + res, err := analyzeAnnotations() + if err != nil { + t.Fatalf("failed to analyzeAnnotations: %v", err) + } + + // Check collected GC Start events are all sorted and non-overlapping. + lastTS := int64(0) + for i, ev := range res.gcEvents { + if ev.Type != traceparser.EvGCStart { + t.Errorf("unwanted event in gcEvents: %v", ev) + } + if i > 0 && lastTS > ev.Ts { + t.Errorf("overlapping GC events:\n%d: %v\n%d: %v", i-1, res.gcEvents[i-1], i, res.gcEvents[i]) + } + if ev.Link != nil { + lastTS = ev.Link.Ts + } + } + + // Check whether only taskWithGC reports overlapping duration. + for _, task := range res.tasks { + got := task.overlappingGCDuration(res.gcEvents) + switch task.name { + case "taskWithoutGC": + if got != 0 { + t.Errorf("%s reported %v as overlapping GC time; want 0: %v", task.name, got, task) + } + case "taskWithGC": + upperBound := task.duration() + // TODO(hyangah): a tighter upper bound is gcTime, but + // use of it will make the test flaky due to the issue + // described in golang.org/issue/16755. Tighten the upper + // bound when the issue with the timestamp computed + // based on clockticks is resolved. + if got <= 0 || got > upperBound { + t.Errorf("%s reported %v as overlapping GC time; want (0, %v):\n%v", task.name, got, upperBound, task) + buf := new(bytes.Buffer) + fmt.Fprintln(buf, "GC Events") + for _, ev := range res.gcEvents { + fmt.Fprintf(buf, " %s -> %s\n", ev, ev.Link) + } + fmt.Fprintln(buf, "Events in Task") + for i, ev := range task.events { + fmt.Fprintf(buf, " %d: %s\n", i, ev) + } + + t.Logf("\n%s", buf) + } + } + } +} + +// traceProgram runs the provided function while tracing is enabled, +// parses the captured trace, and sets the global trace loader to +// point to the parsed trace. +// +// If savetraces flag is set, the captured trace will be saved in the named file. +func traceProgram(t *testing.T, f func(), name string) error { + t.Helper() + if goexperiment.ExecTracer2 { + t.Skip("skipping because test programs are covered elsewhere for the new tracer") + } + buf := new(bytes.Buffer) + if err := trace.Start(buf); err != nil { + return err + } + f() + trace.Stop() + + saveTrace(buf, name) + res, err := traceparser.Parse(buf, name+".faketrace") + if err == traceparser.ErrTimeOrder { + t.Skipf("skipping due to golang.org/issue/16755: %v", err) + } else if err != nil { + return err + } + + swapLoaderData(res, err) + return nil +} + +func regionNames(task *taskDesc) (ret []string) { + for _, s := range task.regions { + ret = append(ret, s.Name) + } + return ret +} + +func parentName(task *taskDesc) string { + if task.parent != nil { + return task.parent.name + } + return "" +} + +func childrenNames(task *taskDesc) (ret []string) { + for _, s := range task.children { + ret = append(ret, s.name) + } + return ret +} + +func swapLoaderData(res traceparser.ParseResult, err error) { + // swap loader's data. + parseTrace() // fool loader.once. + + loader.res = res + loader.err = err + + analyzeGoroutines(nil) // fool gsInit once. + gs = traceparser.GoroutineStats(res.Events) + +} + +func saveTrace(buf *bytes.Buffer, name string) { + if !*saveTraces { + return + } + if err := os.WriteFile(name+".trace", buf.Bytes(), 0600); err != nil { + panic(fmt.Errorf("failed to write trace file: %v", err)) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/trace/doc.go b/platform/dbops/binaries/go/go/src/cmd/trace/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..26a96fac2f1e4d6f1a5420412697d624f3568f1d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/trace/doc.go @@ -0,0 +1,41 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Trace is a tool for viewing trace files. + +Trace files can be generated with: + - runtime/trace.Start + - net/http/pprof package + - go test -trace + +Example usage: +Generate a trace file with 'go test': + + go test -trace trace.out pkg + +View the trace in a web browser: + + go tool trace trace.out + +Generate a pprof-like profile from the trace: + + go tool trace -pprof=TYPE trace.out > TYPE.pprof + +Supported profile types are: + - net: network blocking profile + - sync: synchronization blocking profile + - syscall: syscall blocking profile + - sched: scheduler latency profile + +Then, you can use the pprof tool to analyze the profile: + + go tool pprof TYPE.pprof + +Note that while the various profiles available when launching +'go tool trace' work on every browser, the trace viewer itself +(the 'view trace' page) comes from the Chrome/Chromium project +and is only actively tested on that browser. +*/ +package main diff --git a/platform/dbops/binaries/go/go/src/cmd/trace/goroutines.go b/platform/dbops/binaries/go/go/src/cmd/trace/goroutines.go new file mode 100644 index 0000000000000000000000000000000000000000..28eace82fa85d12e164869475d0df6ecd461294e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/trace/goroutines.go @@ -0,0 +1,302 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Goroutine-related profiles. + +package main + +import ( + "fmt" + "html/template" + "internal/trace" + "log" + "net/http" + "reflect" + "sort" + "strconv" + "sync" + "time" +) + +func init() { + http.HandleFunc("/goroutines", httpGoroutines) + http.HandleFunc("/goroutine", httpGoroutine) +} + +// gtype describes a group of goroutines grouped by start PC. +type gtype struct { + ID uint64 // Unique identifier (PC). + Name string // Start function. + N int // Total number of goroutines in this group. + ExecTime int64 // Total execution time of all goroutines in this group. +} + +var ( + gsInit sync.Once + gs map[uint64]*trace.GDesc +) + +// analyzeGoroutines generates statistics about execution of all goroutines and stores them in gs. +func analyzeGoroutines(events []*trace.Event) { + gsInit.Do(func() { + gs = trace.GoroutineStats(events) + }) +} + +// httpGoroutines serves list of goroutine groups. +func httpGoroutines(w http.ResponseWriter, r *http.Request) { + events, err := parseEvents() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + analyzeGoroutines(events) + gss := make(map[uint64]gtype) + for _, g := range gs { + gs1 := gss[g.PC] + gs1.ID = g.PC + gs1.Name = g.Name + gs1.N++ + gs1.ExecTime += g.ExecTime + gss[g.PC] = gs1 + } + var glist []gtype + for k, v := range gss { + v.ID = k + // If goroutine didn't run during the trace (no sampled PC), + // the v.ID and v.Name will be zero value. + if v.ID == 0 && v.Name == "" { + v.Name = "(Inactive, no stack trace sampled)" + } + glist = append(glist, v) + } + sort.Slice(glist, func(i, j int) bool { return glist[i].ExecTime > glist[j].ExecTime }) + w.Header().Set("Content-Type", "text/html;charset=utf-8") + if err := templGoroutines.Execute(w, glist); err != nil { + log.Printf("failed to execute template: %v", err) + return + } +} + +var templGoroutines = template.Must(template.New("").Parse(` + + +Goroutines:
    +{{range $}} + {{.Name}} N={{.N}}
    +{{end}} + + +`)) + +// httpGoroutine serves list of goroutines in a particular group. +func httpGoroutine(w http.ResponseWriter, r *http.Request) { + // TODO(hyangah): support format=csv (raw data) + + events, err := parseEvents() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + pc, err := strconv.ParseUint(r.FormValue("id"), 10, 64) + if err != nil { + http.Error(w, fmt.Sprintf("failed to parse id parameter '%v': %v", r.FormValue("id"), err), http.StatusInternalServerError) + return + } + analyzeGoroutines(events) + var ( + glist []*trace.GDesc + name string + totalExecTime, execTime int64 + maxTotalTime int64 + ) + + for _, g := range gs { + totalExecTime += g.ExecTime + + if g.PC != pc { + continue + } + glist = append(glist, g) + name = g.Name + execTime += g.ExecTime + if maxTotalTime < g.TotalTime { + maxTotalTime = g.TotalTime + } + } + + execTimePercent := "" + if totalExecTime > 0 { + execTimePercent = fmt.Sprintf("%.2f%%", float64(execTime)/float64(totalExecTime)*100) + } + + sortby := r.FormValue("sortby") + _, ok := reflect.TypeOf(trace.GDesc{}).FieldByNameFunc(func(s string) bool { + return s == sortby + }) + if !ok { + sortby = "TotalTime" + } + + sort.Slice(glist, func(i, j int) bool { + ival := reflect.ValueOf(glist[i]).Elem().FieldByName(sortby).Int() + jval := reflect.ValueOf(glist[j]).Elem().FieldByName(sortby).Int() + return ival > jval + }) + + err = templGoroutine.Execute(w, struct { + Name string + PC uint64 + N int + ExecTimePercent string + MaxTotal int64 + GList []*trace.GDesc + }{ + Name: name, + PC: pc, + N: len(glist), + ExecTimePercent: execTimePercent, + MaxTotal: maxTotalTime, + GList: glist}) + if err != nil { + http.Error(w, fmt.Sprintf("failed to execute template: %v", err), http.StatusInternalServerError) + return + } +} + +var templGoroutine = template.Must(template.New("").Funcs(template.FuncMap{ + "prettyDuration": func(nsec int64) template.HTML { + d := time.Duration(nsec) * time.Nanosecond + return template.HTML(d.String()) + }, + "percent": func(dividend, divisor int64) template.HTML { + if divisor == 0 { + return "" + } + return template.HTML(fmt.Sprintf("(%.1f%%)", float64(dividend)/float64(divisor)*100)) + }, + "barLen": func(dividend, divisor int64) template.HTML { + if divisor == 0 { + return "0" + } + return template.HTML(fmt.Sprintf("%.2f%%", float64(dividend)/float64(divisor)*100)) + }, + "unknownTime": func(desc *trace.GDesc) int64 { + sum := desc.ExecTime + desc.IOTime + desc.BlockTime + desc.SyscallTime + desc.SchedWaitTime + if sum < desc.TotalTime { + return desc.TotalTime - sum + } + return 0 + }, +}).Parse(` + +Goroutine {{.Name}} + + + + + + + + + + + + +
    Goroutine Name:{{.Name}}
    Number of Goroutines:{{.N}}
    Execution Time:{{.ExecTimePercent}} of total program execution time
    Network Wait Time: graph(download)
    Sync Block Time: graph(download)
    Blocking Syscall Time: graph(download)
    Scheduler Wait Time: graph(download)
    +

    + + + + + + + + + + + + + +{{range .GList}} + + + + + + + + + + + + +{{end}} +
    Goroutine Total Execution Network wait Sync block Blocking syscall Scheduler wait GC sweeping GC pause
    {{.ID}} {{prettyDuration .TotalTime}} +
    + {{if unknownTime .}} {{end}} + {{if .ExecTime}} {{end}} + {{if .IOTime}} {{end}} + {{if .BlockTime}} {{end}} + {{if .SyscallTime}} {{end}} + {{if .SchedWaitTime}} {{end}} +
    +
    {{prettyDuration .ExecTime}} {{prettyDuration .IOTime}} {{prettyDuration .BlockTime}} {{prettyDuration .SyscallTime}} {{prettyDuration .SchedWaitTime}} {{prettyDuration .SweepTime}} {{percent .SweepTime .TotalTime}} {{prettyDuration .GCTime}} {{percent .GCTime .TotalTime}}
    +`)) diff --git a/platform/dbops/binaries/go/go/src/cmd/trace/main.go b/platform/dbops/binaries/go/go/src/cmd/trace/main.go new file mode 100644 index 0000000000000000000000000000000000000000..5f0d6f612b692e666ccdcc1e1dadb42404340551 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/trace/main.go @@ -0,0 +1,248 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bufio" + "cmd/internal/browser" + cmdv2 "cmd/trace/v2" + "flag" + "fmt" + "internal/trace" + "internal/trace/traceviewer" + "log" + "net" + "net/http" + "os" + "runtime" + "runtime/debug" + "sync" + + _ "net/http/pprof" // Required to use pprof +) + +const usageMessage = "" + + `Usage of 'go tool trace': +Given a trace file produced by 'go test': + go test -trace=trace.out pkg + +Open a web browser displaying trace: + go tool trace [flags] [pkg.test] trace.out + +Generate a pprof-like profile from the trace: + go tool trace -pprof=TYPE [pkg.test] trace.out + +[pkg.test] argument is required for traces produced by Go 1.6 and below. +Go 1.7 does not require the binary argument. + +Supported profile types are: + - net: network blocking profile + - sync: synchronization blocking profile + - syscall: syscall blocking profile + - sched: scheduler latency profile + +Flags: + -http=addr: HTTP service address (e.g., ':6060') + -pprof=type: print a pprof-like profile instead + -d=int: print debug info such as parsed events (1 for high-level, 2 for low-level) + +Note that while the various profiles available when launching +'go tool trace' work on every browser, the trace viewer itself +(the 'view trace' page) comes from the Chrome/Chromium project +and is only actively tested on that browser. +` + +var ( + httpFlag = flag.String("http", "localhost:0", "HTTP service address (e.g., ':6060')") + pprofFlag = flag.String("pprof", "", "print a pprof-like profile instead") + debugFlag = flag.Int("d", 0, "print debug information (1 for basic debug info, 2 for lower-level info)") + + // The binary file name, left here for serveSVGProfile. + programBinary string + traceFile string +) + +func main() { + flag.Usage = func() { + fmt.Fprint(os.Stderr, usageMessage) + os.Exit(2) + } + flag.Parse() + + // Go 1.7 traces embed symbol info and does not require the binary. + // But we optionally accept binary as first arg for Go 1.5 traces. + switch flag.NArg() { + case 1: + traceFile = flag.Arg(0) + case 2: + programBinary = flag.Arg(0) + traceFile = flag.Arg(1) + default: + flag.Usage() + } + + if isTraceV2(traceFile) { + if err := cmdv2.Main(traceFile, *httpFlag, *pprofFlag, *debugFlag); err != nil { + dief("%s\n", err) + } + return + } + + var pprofFunc traceviewer.ProfileFunc + switch *pprofFlag { + case "net": + pprofFunc = pprofByGoroutine(computePprofIO) + case "sync": + pprofFunc = pprofByGoroutine(computePprofBlock) + case "syscall": + pprofFunc = pprofByGoroutine(computePprofSyscall) + case "sched": + pprofFunc = pprofByGoroutine(computePprofSched) + } + if pprofFunc != nil { + records, err := pprofFunc(&http.Request{}) + if err != nil { + dief("failed to generate pprof: %v\n", err) + } + if err := traceviewer.BuildProfile(records).Write(os.Stdout); err != nil { + dief("failed to generate pprof: %v\n", err) + } + os.Exit(0) + } + if *pprofFlag != "" { + dief("unknown pprof type %s\n", *pprofFlag) + } + + ln, err := net.Listen("tcp", *httpFlag) + if err != nil { + dief("failed to create server socket: %v\n", err) + } + + log.Print("Parsing trace...") + res, err := parseTrace() + if err != nil { + dief("%v\n", err) + } + + if *debugFlag != 0 { + trace.Print(res.Events) + os.Exit(0) + } + reportMemoryUsage("after parsing trace") + debug.FreeOSMemory() + + log.Print("Splitting trace...") + ranges = splitTrace(res) + reportMemoryUsage("after splitting trace") + debug.FreeOSMemory() + + addr := "http://" + ln.Addr().String() + log.Printf("Opening browser. Trace viewer is listening on %s", addr) + browser.Open(addr) + + // Install MMU handler. + http.HandleFunc("/mmu", traceviewer.MMUHandlerFunc(ranges, mutatorUtil)) + + // Install main handler. + http.Handle("/", traceviewer.MainHandler([]traceviewer.View{ + {Type: traceviewer.ViewProc, Ranges: ranges}, + })) + + // Start http server. + err = http.Serve(ln, nil) + dief("failed to start http server: %v\n", err) +} + +// isTraceV2 returns true if filename holds a v2 trace. +func isTraceV2(filename string) bool { + file, err := os.Open(filename) + if err != nil { + return false + } + defer file.Close() + + ver, _, err := trace.ReadVersion(file) + if err != nil { + return false + } + return ver >= 1022 +} + +var ranges []traceviewer.Range + +var loader struct { + once sync.Once + res trace.ParseResult + err error +} + +// parseEvents is a compatibility wrapper that returns only +// the Events part of trace.ParseResult returned by parseTrace. +func parseEvents() ([]*trace.Event, error) { + res, err := parseTrace() + if err != nil { + return nil, err + } + return res.Events, err +} + +func parseTrace() (trace.ParseResult, error) { + loader.once.Do(func() { + tracef, err := os.Open(traceFile) + if err != nil { + loader.err = fmt.Errorf("failed to open trace file: %v", err) + return + } + defer tracef.Close() + + // Parse and symbolize. + res, err := trace.Parse(bufio.NewReader(tracef), programBinary) + if err != nil { + loader.err = fmt.Errorf("failed to parse trace: %v", err) + return + } + loader.res = res + }) + return loader.res, loader.err +} + +func dief(msg string, args ...any) { + fmt.Fprintf(os.Stderr, msg, args...) + os.Exit(1) +} + +var debugMemoryUsage bool + +func init() { + v := os.Getenv("DEBUG_MEMORY_USAGE") + debugMemoryUsage = v != "" +} + +func reportMemoryUsage(msg string) { + if !debugMemoryUsage { + return + } + var s runtime.MemStats + runtime.ReadMemStats(&s) + w := os.Stderr + fmt.Fprintf(w, "%s\n", msg) + fmt.Fprintf(w, " Alloc:\t%d Bytes\n", s.Alloc) + fmt.Fprintf(w, " Sys:\t%d Bytes\n", s.Sys) + fmt.Fprintf(w, " HeapReleased:\t%d Bytes\n", s.HeapReleased) + fmt.Fprintf(w, " HeapSys:\t%d Bytes\n", s.HeapSys) + fmt.Fprintf(w, " HeapInUse:\t%d Bytes\n", s.HeapInuse) + fmt.Fprintf(w, " HeapAlloc:\t%d Bytes\n", s.HeapAlloc) + var dummy string + fmt.Printf("Enter to continue...") + fmt.Scanf("%s", &dummy) +} + +func mutatorUtil(flags trace.UtilFlags) ([][]trace.MutatorUtil, error) { + events, err := parseEvents() + if err != nil { + return nil, err + } + return trace.MutatorUtilization(events, flags), nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/trace/pprof.go b/platform/dbops/binaries/go/go/src/cmd/trace/pprof.go new file mode 100644 index 0000000000000000000000000000000000000000..3722b37ab8da871ef14c62cd8afa70000e3f85ff --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/trace/pprof.go @@ -0,0 +1,263 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Serving of pprof-like profiles. + +package main + +import ( + "fmt" + "internal/trace" + "internal/trace/traceviewer" + "net/http" + "sort" + "strconv" + "time" +) + +func init() { + http.HandleFunc("/io", traceviewer.SVGProfileHandlerFunc(pprofByGoroutine(computePprofIO))) + http.HandleFunc("/block", traceviewer.SVGProfileHandlerFunc(pprofByGoroutine(computePprofBlock))) + http.HandleFunc("/syscall", traceviewer.SVGProfileHandlerFunc(pprofByGoroutine(computePprofSyscall))) + http.HandleFunc("/sched", traceviewer.SVGProfileHandlerFunc(pprofByGoroutine(computePprofSched))) + + http.HandleFunc("/regionio", traceviewer.SVGProfileHandlerFunc(pprofByRegion(computePprofIO))) + http.HandleFunc("/regionblock", traceviewer.SVGProfileHandlerFunc(pprofByRegion(computePprofBlock))) + http.HandleFunc("/regionsyscall", traceviewer.SVGProfileHandlerFunc(pprofByRegion(computePprofSyscall))) + http.HandleFunc("/regionsched", traceviewer.SVGProfileHandlerFunc(pprofByRegion(computePprofSched))) +} + +// interval represents a time interval in the trace. +type interval struct { + begin, end int64 // nanoseconds. +} + +func pprofByGoroutine(compute computePprofFunc) traceviewer.ProfileFunc { + return func(r *http.Request) ([]traceviewer.ProfileRecord, error) { + id := r.FormValue("id") + events, err := parseEvents() + if err != nil { + return nil, err + } + gToIntervals, err := pprofMatchingGoroutines(id, events) + if err != nil { + return nil, err + } + return compute(gToIntervals, events) + } +} + +func pprofByRegion(compute computePprofFunc) traceviewer.ProfileFunc { + return func(r *http.Request) ([]traceviewer.ProfileRecord, error) { + filter, err := newRegionFilter(r) + if err != nil { + return nil, err + } + gToIntervals, err := pprofMatchingRegions(filter) + if err != nil { + return nil, err + } + events, _ := parseEvents() + + return compute(gToIntervals, events) + } +} + +// pprofMatchingGoroutines parses the goroutine type id string (i.e. pc) +// and returns the ids of goroutines of the matching type and its interval. +// If the id string is empty, returns nil without an error. +func pprofMatchingGoroutines(id string, events []*trace.Event) (map[uint64][]interval, error) { + if id == "" { + return nil, nil + } + pc, err := strconv.ParseUint(id, 10, 64) // id is string + if err != nil { + return nil, fmt.Errorf("invalid goroutine type: %v", id) + } + analyzeGoroutines(events) + var res map[uint64][]interval + for _, g := range gs { + if g.PC != pc { + continue + } + if res == nil { + res = make(map[uint64][]interval) + } + endTime := g.EndTime + if g.EndTime == 0 { + endTime = lastTimestamp() // the trace doesn't include the goroutine end event. Use the trace end time. + } + res[g.ID] = []interval{{begin: g.StartTime, end: endTime}} + } + if len(res) == 0 && id != "" { + return nil, fmt.Errorf("failed to find matching goroutines for id: %s", id) + } + return res, nil +} + +// pprofMatchingRegions returns the time intervals of matching regions +// grouped by the goroutine id. If the filter is nil, returns nil without an error. +func pprofMatchingRegions(filter *regionFilter) (map[uint64][]interval, error) { + res, err := analyzeAnnotations() + if err != nil { + return nil, err + } + if filter == nil { + return nil, nil + } + + gToIntervals := make(map[uint64][]interval) + for id, regions := range res.regions { + for _, s := range regions { + if filter.match(id, s) { + gToIntervals[s.G] = append(gToIntervals[s.G], interval{begin: s.firstTimestamp(), end: s.lastTimestamp()}) + } + } + } + + for g, intervals := range gToIntervals { + // in order to remove nested regions and + // consider only the outermost regions, + // first, we sort based on the start time + // and then scan through to select only the outermost regions. + sort.Slice(intervals, func(i, j int) bool { + x := intervals[i].begin + y := intervals[j].begin + if x == y { + return intervals[i].end < intervals[j].end + } + return x < y + }) + var lastTimestamp int64 + var n int + // select only the outermost regions. + for _, i := range intervals { + if lastTimestamp <= i.begin { + intervals[n] = i // new non-overlapping region starts. + lastTimestamp = i.end + n++ + } // otherwise, skip because this region overlaps with a previous region. + } + gToIntervals[g] = intervals[:n] + } + return gToIntervals, nil +} + +type computePprofFunc func(gToIntervals map[uint64][]interval, events []*trace.Event) ([]traceviewer.ProfileRecord, error) + +// computePprofIO generates IO pprof-like profile (time spent in IO wait, currently only network blocking event). +func computePprofIO(gToIntervals map[uint64][]interval, events []*trace.Event) ([]traceviewer.ProfileRecord, error) { + prof := make(map[uint64]traceviewer.ProfileRecord) + for _, ev := range events { + if ev.Type != trace.EvGoBlockNet || ev.Link == nil || ev.StkID == 0 || len(ev.Stk) == 0 { + continue + } + overlapping := pprofOverlappingDuration(gToIntervals, ev) + if overlapping > 0 { + rec := prof[ev.StkID] + rec.Stack = ev.Stk + rec.Count++ + rec.Time += overlapping + prof[ev.StkID] = rec + } + } + return recordsOf(prof), nil +} + +// computePprofBlock generates blocking pprof-like profile (time spent blocked on synchronization primitives). +func computePprofBlock(gToIntervals map[uint64][]interval, events []*trace.Event) ([]traceviewer.ProfileRecord, error) { + prof := make(map[uint64]traceviewer.ProfileRecord) + for _, ev := range events { + switch ev.Type { + case trace.EvGoBlockSend, trace.EvGoBlockRecv, trace.EvGoBlockSelect, + trace.EvGoBlockSync, trace.EvGoBlockCond, trace.EvGoBlockGC: + // TODO(hyangah): figure out why EvGoBlockGC should be here. + // EvGoBlockGC indicates the goroutine blocks on GC assist, not + // on synchronization primitives. + default: + continue + } + if ev.Link == nil || ev.StkID == 0 || len(ev.Stk) == 0 { + continue + } + overlapping := pprofOverlappingDuration(gToIntervals, ev) + if overlapping > 0 { + rec := prof[ev.StkID] + rec.Stack = ev.Stk + rec.Count++ + rec.Time += overlapping + prof[ev.StkID] = rec + } + } + return recordsOf(prof), nil +} + +// computePprofSyscall generates syscall pprof-like profile (time spent blocked in syscalls). +func computePprofSyscall(gToIntervals map[uint64][]interval, events []*trace.Event) ([]traceviewer.ProfileRecord, error) { + prof := make(map[uint64]traceviewer.ProfileRecord) + for _, ev := range events { + if ev.Type != trace.EvGoSysCall || ev.Link == nil || ev.StkID == 0 || len(ev.Stk) == 0 { + continue + } + overlapping := pprofOverlappingDuration(gToIntervals, ev) + if overlapping > 0 { + rec := prof[ev.StkID] + rec.Stack = ev.Stk + rec.Count++ + rec.Time += overlapping + prof[ev.StkID] = rec + } + } + return recordsOf(prof), nil +} + +// computePprofSched generates scheduler latency pprof-like profile +// (time between a goroutine become runnable and actually scheduled for execution). +func computePprofSched(gToIntervals map[uint64][]interval, events []*trace.Event) ([]traceviewer.ProfileRecord, error) { + prof := make(map[uint64]traceviewer.ProfileRecord) + for _, ev := range events { + if (ev.Type != trace.EvGoUnblock && ev.Type != trace.EvGoCreate) || + ev.Link == nil || ev.StkID == 0 || len(ev.Stk) == 0 { + continue + } + overlapping := pprofOverlappingDuration(gToIntervals, ev) + if overlapping > 0 { + rec := prof[ev.StkID] + rec.Stack = ev.Stk + rec.Count++ + rec.Time += overlapping + prof[ev.StkID] = rec + } + } + return recordsOf(prof), nil +} + +// pprofOverlappingDuration returns the overlapping duration between +// the time intervals in gToIntervals and the specified event. +// If gToIntervals is nil, this simply returns the event's duration. +func pprofOverlappingDuration(gToIntervals map[uint64][]interval, ev *trace.Event) time.Duration { + if gToIntervals == nil { // No filtering. + return time.Duration(ev.Link.Ts-ev.Ts) * time.Nanosecond + } + intervals := gToIntervals[ev.G] + if len(intervals) == 0 { + return 0 + } + + var overlapping time.Duration + for _, i := range intervals { + if o := overlappingDuration(i.begin, i.end, ev.Ts, ev.Link.Ts); o > 0 { + overlapping += o + } + } + return overlapping +} + +func recordsOf(records map[uint64]traceviewer.ProfileRecord) []traceviewer.ProfileRecord { + result := make([]traceviewer.ProfileRecord, 0, len(records)) + for _, record := range records { + result = append(result, record) + } + return result +} diff --git a/platform/dbops/binaries/go/go/src/cmd/trace/trace.go b/platform/dbops/binaries/go/go/src/cmd/trace/trace.go new file mode 100644 index 0000000000000000000000000000000000000000..438b8dd32853769700a376834d804ae41ed82223 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/trace/trace.go @@ -0,0 +1,810 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "internal/trace" + "internal/trace/traceviewer" + "log" + "math" + "net/http" + "runtime/debug" + "sort" + "strconv" + "time" + + "internal/trace/traceviewer/format" +) + +func init() { + http.HandleFunc("/trace", httpTrace) + http.HandleFunc("/jsontrace", httpJsonTrace) + http.Handle("/static/", traceviewer.StaticHandler()) +} + +// httpTrace serves either whole trace (goid==0) or trace for goid goroutine. +func httpTrace(w http.ResponseWriter, r *http.Request) { + _, err := parseTrace() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + traceviewer.TraceHandler().ServeHTTP(w, r) +} + +// httpJsonTrace serves json trace, requested from within templTrace HTML. +func httpJsonTrace(w http.ResponseWriter, r *http.Request) { + defer debug.FreeOSMemory() + defer reportMemoryUsage("after httpJsonTrace") + // This is an AJAX handler, so instead of http.Error we use log.Printf to log errors. + res, err := parseTrace() + if err != nil { + log.Printf("failed to parse trace: %v", err) + return + } + + params := &traceParams{ + parsed: res, + endTime: math.MaxInt64, + } + + if goids := r.FormValue("goid"); goids != "" { + // If goid argument is present, we are rendering a trace for this particular goroutine. + goid, err := strconv.ParseUint(goids, 10, 64) + if err != nil { + log.Printf("failed to parse goid parameter %q: %v", goids, err) + return + } + analyzeGoroutines(res.Events) + g, ok := gs[goid] + if !ok { + log.Printf("failed to find goroutine %d", goid) + return + } + params.mode = traceviewer.ModeGoroutineOriented + params.startTime = g.StartTime + if g.EndTime != 0 { + params.endTime = g.EndTime + } else { // The goroutine didn't end. + params.endTime = lastTimestamp() + } + params.maing = goid + params.gs = trace.RelatedGoroutines(res.Events, goid) + } else if taskids := r.FormValue("taskid"); taskids != "" { + taskid, err := strconv.ParseUint(taskids, 10, 64) + if err != nil { + log.Printf("failed to parse taskid parameter %q: %v", taskids, err) + return + } + annotRes, _ := analyzeAnnotations() + task, ok := annotRes.tasks[taskid] + if !ok || len(task.events) == 0 { + log.Printf("failed to find task with id %d", taskid) + return + } + goid := task.events[0].G + params.mode = traceviewer.ModeGoroutineOriented | traceviewer.ModeTaskOriented + params.startTime = task.firstTimestamp() - 1 + params.endTime = task.lastTimestamp() + 1 + params.maing = goid + params.tasks = task.descendants() + gs := map[uint64]bool{} + for _, t := range params.tasks { + // find only directly involved goroutines + for k, v := range t.RelatedGoroutines(res.Events, 0) { + gs[k] = v + } + } + params.gs = gs + } else if taskids := r.FormValue("focustask"); taskids != "" { + taskid, err := strconv.ParseUint(taskids, 10, 64) + if err != nil { + log.Printf("failed to parse focustask parameter %q: %v", taskids, err) + return + } + annotRes, _ := analyzeAnnotations() + task, ok := annotRes.tasks[taskid] + if !ok || len(task.events) == 0 { + log.Printf("failed to find task with id %d", taskid) + return + } + params.mode = traceviewer.ModeTaskOriented + params.startTime = task.firstTimestamp() - 1 + params.endTime = task.lastTimestamp() + 1 + params.tasks = task.descendants() + } + + start := int64(0) + end := int64(math.MaxInt64) + if startStr, endStr := r.FormValue("start"), r.FormValue("end"); startStr != "" && endStr != "" { + // If start/end arguments are present, we are rendering a range of the trace. + start, err = strconv.ParseInt(startStr, 10, 64) + if err != nil { + log.Printf("failed to parse start parameter %q: %v", startStr, err) + return + } + end, err = strconv.ParseInt(endStr, 10, 64) + if err != nil { + log.Printf("failed to parse end parameter %q: %v", endStr, err) + return + } + } + + c := traceviewer.ViewerDataTraceConsumer(w, start, end) + if err := generateTrace(params, c); err != nil { + log.Printf("failed to generate trace: %v", err) + return + } +} + +// splitTrace splits the trace into a number of ranges, +// each resulting in approx 100MB of json output +// (trace viewer can hardly handle more). +func splitTrace(res trace.ParseResult) []traceviewer.Range { + params := &traceParams{ + parsed: res, + endTime: math.MaxInt64, + } + s, c := traceviewer.SplittingTraceConsumer(100 << 20) // 100M + if err := generateTrace(params, c); err != nil { + dief("%v\n", err) + } + return s.Ranges +} + +type traceParams struct { + parsed trace.ParseResult + mode traceviewer.Mode + startTime int64 + endTime int64 + maing uint64 // for goroutine-oriented view, place this goroutine on the top row + gs map[uint64]bool // Goroutines to be displayed for goroutine-oriented or task-oriented view + tasks []*taskDesc // Tasks to be displayed. tasks[0] is the top-most task +} + +type traceContext struct { + *traceParams + consumer traceviewer.TraceConsumer + emitter *traceviewer.Emitter + arrowSeq uint64 + gcount uint64 + regionID int // last emitted region id. incremented in each emitRegion call. +} + +type gInfo struct { + state traceviewer.GState // current state + name string // name chosen for this goroutine at first EvGoStart + isSystemG bool + start *trace.Event // most recent EvGoStart + markAssist *trace.Event // if non-nil, the mark assist currently running. +} + +type NameArg struct { + Name string `json:"name"` +} + +type TaskArg struct { + ID uint64 `json:"id"` + StartG uint64 `json:"start_g,omitempty"` + EndG uint64 `json:"end_g,omitempty"` +} + +type RegionArg struct { + TaskID uint64 `json:"taskid,omitempty"` +} + +type SortIndexArg struct { + Index int `json:"sort_index"` +} + +// generateTrace generates json trace for trace-viewer: +// https://github.com/google/trace-viewer +// Trace format is described at: +// https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/view +// If mode==goroutineMode, generate trace for goroutine goid, otherwise whole trace. +// startTime, endTime determine part of the trace that we are interested in. +// gset restricts goroutines that are included in the resulting trace. +func generateTrace(params *traceParams, consumer traceviewer.TraceConsumer) error { + emitter := traceviewer.NewEmitter( + consumer, + time.Duration(params.startTime), + time.Duration(params.endTime), + ) + if params.mode&traceviewer.ModeGoroutineOriented != 0 { + emitter.SetResourceType("G") + } else { + emitter.SetResourceType("PROCS") + } + defer emitter.Flush() + + ctx := &traceContext{traceParams: params, emitter: emitter} + ctx.consumer = consumer + + maxProc := 0 + ginfos := make(map[uint64]*gInfo) + stacks := params.parsed.Stacks + + getGInfo := func(g uint64) *gInfo { + info, ok := ginfos[g] + if !ok { + info = &gInfo{} + ginfos[g] = info + } + return info + } + + // Since we make many calls to setGState, we record a sticky + // error in setGStateErr and check it after every event. + var setGStateErr error + setGState := func(ev *trace.Event, g uint64, oldState, newState traceviewer.GState) { + info := getGInfo(g) + if oldState == traceviewer.GWaiting && info.state == traceviewer.GWaitingGC { + // For checking, traceviewer.GWaiting counts as any traceviewer.GWaiting*. + oldState = info.state + } + if info.state != oldState && setGStateErr == nil { + setGStateErr = fmt.Errorf("expected G %d to be in state %d, but got state %d", g, oldState, info.state) + } + + emitter.GoroutineTransition(time.Duration(ev.Ts), info.state, newState) + info.state = newState + } + + for _, ev := range ctx.parsed.Events { + // Handle state transitions before we filter out events. + switch ev.Type { + case trace.EvGoStart, trace.EvGoStartLabel: + setGState(ev, ev.G, traceviewer.GRunnable, traceviewer.GRunning) + info := getGInfo(ev.G) + info.start = ev + case trace.EvProcStart: + emitter.IncThreadStateCount(time.Duration(ev.Ts), traceviewer.ThreadStateRunning, 1) + case trace.EvProcStop: + emitter.IncThreadStateCount(time.Duration(ev.Ts), traceviewer.ThreadStateRunning, -1) + case trace.EvGoCreate: + newG := ev.Args[0] + info := getGInfo(newG) + if info.name != "" { + return fmt.Errorf("duplicate go create event for go id=%d detected at offset %d", newG, ev.Off) + } + + stk, ok := stacks[ev.Args[1]] + if !ok || len(stk) == 0 { + return fmt.Errorf("invalid go create event: missing stack information for go id=%d at offset %d", newG, ev.Off) + } + + fname := stk[0].Fn + info.name = fmt.Sprintf("G%v %s", newG, fname) + info.isSystemG = trace.IsSystemGoroutine(fname) + + ctx.gcount++ + setGState(ev, newG, traceviewer.GDead, traceviewer.GRunnable) + case trace.EvGoEnd: + ctx.gcount-- + setGState(ev, ev.G, traceviewer.GRunning, traceviewer.GDead) + case trace.EvGoUnblock: + setGState(ev, ev.Args[0], traceviewer.GWaiting, traceviewer.GRunnable) + case trace.EvGoSysExit: + setGState(ev, ev.G, traceviewer.GWaiting, traceviewer.GRunnable) + if getGInfo(ev.G).isSystemG { + emitter.IncThreadStateCount(time.Duration(ev.Ts), traceviewer.ThreadStateInSyscallRuntime, -1) + } else { + emitter.IncThreadStateCount(time.Duration(ev.Ts), traceviewer.ThreadStateInSyscall, -1) + } + case trace.EvGoSysBlock: + setGState(ev, ev.G, traceviewer.GRunning, traceviewer.GWaiting) + if getGInfo(ev.G).isSystemG { + emitter.IncThreadStateCount(time.Duration(ev.Ts), traceviewer.ThreadStateInSyscallRuntime, 1) + } else { + emitter.IncThreadStateCount(time.Duration(ev.Ts), traceviewer.ThreadStateInSyscall, 1) + } + case trace.EvGoSched, trace.EvGoPreempt: + setGState(ev, ev.G, traceviewer.GRunning, traceviewer.GRunnable) + case trace.EvGoStop, + trace.EvGoSleep, trace.EvGoBlock, trace.EvGoBlockSend, trace.EvGoBlockRecv, + trace.EvGoBlockSelect, trace.EvGoBlockSync, trace.EvGoBlockCond, trace.EvGoBlockNet: + setGState(ev, ev.G, traceviewer.GRunning, traceviewer.GWaiting) + case trace.EvGoBlockGC: + setGState(ev, ev.G, traceviewer.GRunning, traceviewer.GWaitingGC) + case trace.EvGCMarkAssistStart: + getGInfo(ev.G).markAssist = ev + case trace.EvGCMarkAssistDone: + getGInfo(ev.G).markAssist = nil + case trace.EvGoWaiting: + setGState(ev, ev.G, traceviewer.GRunnable, traceviewer.GWaiting) + case trace.EvGoInSyscall: + // Cancel out the effect of EvGoCreate at the beginning. + setGState(ev, ev.G, traceviewer.GRunnable, traceviewer.GWaiting) + if getGInfo(ev.G).isSystemG { + emitter.IncThreadStateCount(time.Duration(ev.Ts), traceviewer.ThreadStateInSyscallRuntime, 1) + } else { + emitter.IncThreadStateCount(time.Duration(ev.Ts), traceviewer.ThreadStateInSyscall, 1) + } + case trace.EvHeapAlloc: + emitter.HeapAlloc(time.Duration(ev.Ts), ev.Args[0]) + case trace.EvHeapGoal: + emitter.HeapGoal(time.Duration(ev.Ts), ev.Args[0]) + } + if setGStateErr != nil { + return setGStateErr + } + + if err := emitter.Err(); err != nil { + return fmt.Errorf("invalid state after processing %v: %s", ev, err) + } + + // Ignore events that are from uninteresting goroutines + // or outside of the interesting timeframe. + if ctx.gs != nil && ev.P < trace.FakeP && !ctx.gs[ev.G] { + continue + } + if !withinTimeRange(ev, ctx.startTime, ctx.endTime) { + continue + } + + if ev.P < trace.FakeP && ev.P > maxProc { + maxProc = ev.P + } + + // Emit trace objects. + switch ev.Type { + case trace.EvProcStart: + if ctx.mode&traceviewer.ModeGoroutineOriented != 0 { + continue + } + ctx.emitInstant(ev, "proc start", "") + case trace.EvProcStop: + if ctx.mode&traceviewer.ModeGoroutineOriented != 0 { + continue + } + ctx.emitInstant(ev, "proc stop", "") + case trace.EvGCStart: + ctx.emitSlice(ev, "GC") + case trace.EvGCDone: + case trace.EvSTWStart: + if ctx.mode&traceviewer.ModeGoroutineOriented != 0 { + continue + } + ctx.emitSlice(ev, fmt.Sprintf("STW (%s)", ev.SArgs[0])) + case trace.EvSTWDone: + case trace.EvGCMarkAssistStart: + // Mark assists can continue past preemptions, so truncate to the + // whichever comes first. We'll synthesize another slice if + // necessary in EvGoStart. + markFinish := ev.Link + goFinish := getGInfo(ev.G).start.Link + fakeMarkStart := *ev + text := "MARK ASSIST" + if markFinish == nil || markFinish.Ts > goFinish.Ts { + fakeMarkStart.Link = goFinish + text = "MARK ASSIST (unfinished)" + } + ctx.emitSlice(&fakeMarkStart, text) + case trace.EvGCSweepStart: + slice := ctx.makeSlice(ev, "SWEEP") + if done := ev.Link; done != nil && done.Args[0] != 0 { + slice.Arg = struct { + Swept uint64 `json:"Swept bytes"` + Reclaimed uint64 `json:"Reclaimed bytes"` + }{done.Args[0], done.Args[1]} + } + ctx.emit(slice) + case trace.EvGoStart, trace.EvGoStartLabel: + info := getGInfo(ev.G) + if ev.Type == trace.EvGoStartLabel { + ctx.emitSlice(ev, ev.SArgs[0]) + } else { + ctx.emitSlice(ev, info.name) + } + if info.markAssist != nil { + // If we're in a mark assist, synthesize a new slice, ending + // either when the mark assist ends or when we're descheduled. + markFinish := info.markAssist.Link + goFinish := ev.Link + fakeMarkStart := *ev + text := "MARK ASSIST (resumed, unfinished)" + if markFinish != nil && markFinish.Ts < goFinish.Ts { + fakeMarkStart.Link = markFinish + text = "MARK ASSIST (resumed)" + } + ctx.emitSlice(&fakeMarkStart, text) + } + case trace.EvGoCreate: + ctx.emitArrow(ev, "go") + case trace.EvGoUnblock: + ctx.emitArrow(ev, "unblock") + case trace.EvGoSysCall: + ctx.emitInstant(ev, "syscall", "") + case trace.EvGoSysExit: + ctx.emitArrow(ev, "sysexit") + case trace.EvUserLog: + ctx.emitInstant(ev, formatUserLog(ev), "user event") + case trace.EvUserTaskCreate: + ctx.emitInstant(ev, "task start", "user event") + case trace.EvUserTaskEnd: + ctx.emitInstant(ev, "task end", "user event") + case trace.EvCPUSample: + if ev.P >= 0 { + // only show in this UI when there's an associated P + ctx.emitInstant(ev, "CPU profile sample", "") + } + } + } + + // Display task and its regions if we are in task-oriented presentation mode. + if ctx.mode&traceviewer.ModeTaskOriented != 0 { + // sort tasks based on the task start time. + sortedTask := make([]*taskDesc, len(ctx.tasks)) + copy(sortedTask, ctx.tasks) + sort.SliceStable(sortedTask, func(i, j int) bool { + ti, tj := sortedTask[i], sortedTask[j] + if ti.firstTimestamp() == tj.firstTimestamp() { + return ti.lastTimestamp() < tj.lastTimestamp() + } + return ti.firstTimestamp() < tj.firstTimestamp() + }) + + for i, task := range sortedTask { + ctx.emitTask(task, i) + + // If we are in goroutine-oriented mode, we draw regions. + // TODO(hyangah): add this for task/P-oriented mode (i.e., focustask view) too. + if ctx.mode&traceviewer.ModeGoroutineOriented != 0 { + for _, s := range task.regions { + ctx.emitRegion(s) + } + } + } + } + + // Display goroutine rows if we are either in goroutine-oriented mode. + if ctx.mode&traceviewer.ModeGoroutineOriented != 0 { + for k, v := range ginfos { + if !ctx.gs[k] { + continue + } + emitter.Resource(k, v.name) + } + emitter.Focus(ctx.maing) + + // Row for GC or global state (specified with G=0) + ctx.emitFooter(&format.Event{Name: "thread_sort_index", Phase: "M", PID: format.ProcsSection, TID: 0, Arg: &SortIndexArg{-1}}) + } else { + // Display rows for Ps if we are in the default trace view mode. + for i := 0; i <= maxProc; i++ { + emitter.Resource(uint64(i), fmt.Sprintf("Proc %v", i)) + } + } + + return nil +} + +func (ctx *traceContext) emit(e *format.Event) { + ctx.consumer.ConsumeViewerEvent(e, false) +} + +func (ctx *traceContext) emitFooter(e *format.Event) { + ctx.consumer.ConsumeViewerEvent(e, true) +} +func (ctx *traceContext) time(ev *trace.Event) float64 { + // Trace viewer wants timestamps in microseconds. + return float64(ev.Ts) / 1000 +} + +func withinTimeRange(ev *trace.Event, s, e int64) bool { + if evEnd := ev.Link; evEnd != nil { + return ev.Ts <= e && evEnd.Ts >= s + } + return ev.Ts >= s && ev.Ts <= e +} + +func tsWithinRange(ts, s, e int64) bool { + return s <= ts && ts <= e +} + +func (ctx *traceContext) proc(ev *trace.Event) uint64 { + if ctx.mode&traceviewer.ModeGoroutineOriented != 0 && ev.P < trace.FakeP { + return ev.G + } else { + return uint64(ev.P) + } +} + +func (ctx *traceContext) emitSlice(ev *trace.Event, name string) { + ctx.emit(ctx.makeSlice(ev, name)) +} + +func (ctx *traceContext) makeSlice(ev *trace.Event, name string) *format.Event { + // If ViewerEvent.Dur is not a positive value, + // trace viewer handles it as a non-terminating time interval. + // Avoid it by setting the field with a small value. + durationUsec := ctx.time(ev.Link) - ctx.time(ev) + if ev.Link.Ts-ev.Ts <= 0 { + durationUsec = 0.0001 // 0.1 nanoseconds + } + sl := &format.Event{ + Name: name, + Phase: "X", + Time: ctx.time(ev), + Dur: durationUsec, + TID: ctx.proc(ev), + Stack: ctx.emitter.Stack(ev.Stk), + EndStack: ctx.emitter.Stack(ev.Link.Stk), + } + + // grey out non-overlapping events if the event is not a global event (ev.G == 0) + if ctx.mode&traceviewer.ModeTaskOriented != 0 && ev.G != 0 { + // include P information. + if t := ev.Type; t == trace.EvGoStart || t == trace.EvGoStartLabel { + type Arg struct { + P int + } + sl.Arg = &Arg{P: ev.P} + } + // grey out non-overlapping events. + overlapping := false + for _, task := range ctx.tasks { + if _, overlapped := task.overlappingDuration(ev); overlapped { + overlapping = true + break + } + } + if !overlapping { + sl.Cname = colorLightGrey + } + } + return sl +} + +func (ctx *traceContext) emitTask(task *taskDesc, sortIndex int) { + taskRow := uint64(task.id) + taskName := task.name + durationUsec := float64(task.lastTimestamp()-task.firstTimestamp()) / 1e3 + + ctx.emitter.Task(taskRow, taskName, sortIndex) + ts := float64(task.firstTimestamp()) / 1e3 + sl := &format.Event{ + Name: taskName, + Phase: "X", + Time: ts, + Dur: durationUsec, + PID: format.TasksSection, + TID: taskRow, + Cname: pickTaskColor(task.id), + } + targ := TaskArg{ID: task.id} + if task.create != nil { + sl.Stack = ctx.emitter.Stack(task.create.Stk) + targ.StartG = task.create.G + } + if task.end != nil { + sl.EndStack = ctx.emitter.Stack(task.end.Stk) + targ.EndG = task.end.G + } + sl.Arg = targ + ctx.emit(sl) + + if task.create != nil && task.create.Type == trace.EvUserTaskCreate && task.create.Args[1] != 0 { + ctx.arrowSeq++ + ctx.emit(&format.Event{Name: "newTask", Phase: "s", TID: task.create.Args[1], ID: ctx.arrowSeq, Time: ts, PID: format.TasksSection}) + ctx.emit(&format.Event{Name: "newTask", Phase: "t", TID: taskRow, ID: ctx.arrowSeq, Time: ts, PID: format.TasksSection}) + } +} + +func (ctx *traceContext) emitRegion(s regionDesc) { + if s.Name == "" { + return + } + + if !tsWithinRange(s.firstTimestamp(), ctx.startTime, ctx.endTime) && + !tsWithinRange(s.lastTimestamp(), ctx.startTime, ctx.endTime) { + return + } + + ctx.regionID++ + regionID := ctx.regionID + + id := s.TaskID + scopeID := fmt.Sprintf("%x", id) + name := s.Name + + sl0 := &format.Event{ + Category: "Region", + Name: name, + Phase: "b", + Time: float64(s.firstTimestamp()) / 1e3, + TID: s.G, // only in goroutine-oriented view + ID: uint64(regionID), + Scope: scopeID, + Cname: pickTaskColor(s.TaskID), + } + if s.Start != nil { + sl0.Stack = ctx.emitter.Stack(s.Start.Stk) + } + ctx.emit(sl0) + + sl1 := &format.Event{ + Category: "Region", + Name: name, + Phase: "e", + Time: float64(s.lastTimestamp()) / 1e3, + TID: s.G, + ID: uint64(regionID), + Scope: scopeID, + Cname: pickTaskColor(s.TaskID), + Arg: RegionArg{TaskID: s.TaskID}, + } + if s.End != nil { + sl1.Stack = ctx.emitter.Stack(s.End.Stk) + } + ctx.emit(sl1) +} + +func (ctx *traceContext) emitInstant(ev *trace.Event, name, category string) { + if !tsWithinRange(ev.Ts, ctx.startTime, ctx.endTime) { + return + } + + cname := "" + if ctx.mode&traceviewer.ModeTaskOriented != 0 { + taskID, isUserAnnotation := isUserAnnotationEvent(ev) + + show := false + for _, task := range ctx.tasks { + if isUserAnnotation && task.id == taskID || task.overlappingInstant(ev) { + show = true + break + } + } + // grey out or skip if non-overlapping instant. + if !show { + if isUserAnnotation { + return // don't display unrelated user annotation events. + } + cname = colorLightGrey + } + } + var arg any + if ev.Type == trace.EvProcStart { + type Arg struct { + ThreadID uint64 + } + arg = &Arg{ev.Args[0]} + } + ctx.emit(&format.Event{ + Name: name, + Category: category, + Phase: "I", + Scope: "t", + Time: ctx.time(ev), + TID: ctx.proc(ev), + Stack: ctx.emitter.Stack(ev.Stk), + Cname: cname, + Arg: arg}) +} + +func (ctx *traceContext) emitArrow(ev *trace.Event, name string) { + if ev.Link == nil { + // The other end of the arrow is not captured in the trace. + // For example, a goroutine was unblocked but was not scheduled before trace stop. + return + } + if ctx.mode&traceviewer.ModeGoroutineOriented != 0 && (!ctx.gs[ev.Link.G] || ev.Link.Ts < ctx.startTime || ev.Link.Ts > ctx.endTime) { + return + } + + if ev.P == trace.NetpollP || ev.P == trace.TimerP || ev.P == trace.SyscallP { + // Trace-viewer discards arrows if they don't start/end inside of a slice or instant. + // So emit a fake instant at the start of the arrow. + ctx.emitInstant(&trace.Event{P: ev.P, Ts: ev.Ts}, "unblock", "") + } + + color := "" + if ctx.mode&traceviewer.ModeTaskOriented != 0 { + overlapping := false + // skip non-overlapping arrows. + for _, task := range ctx.tasks { + if _, overlapped := task.overlappingDuration(ev); overlapped { + overlapping = true + break + } + } + if !overlapping { + return + } + } + + ctx.arrowSeq++ + ctx.emit(&format.Event{Name: name, Phase: "s", TID: ctx.proc(ev), ID: ctx.arrowSeq, Time: ctx.time(ev), Stack: ctx.emitter.Stack(ev.Stk), Cname: color}) + ctx.emit(&format.Event{Name: name, Phase: "t", TID: ctx.proc(ev.Link), ID: ctx.arrowSeq, Time: ctx.time(ev.Link), Cname: color}) +} + +// firstTimestamp returns the timestamp of the first event record. +func firstTimestamp() int64 { + res, _ := parseTrace() + if len(res.Events) > 0 { + return res.Events[0].Ts + } + return 0 +} + +// lastTimestamp returns the timestamp of the last event record. +func lastTimestamp() int64 { + res, _ := parseTrace() + if n := len(res.Events); n > 1 { + return res.Events[n-1].Ts + } + return 0 +} + +// Mapping from more reasonable color names to the reserved color names in +// https://github.com/catapult-project/catapult/blob/master/tracing/tracing/base/color_scheme.html#L50 +// The chrome trace viewer allows only those as cname values. +const ( + colorLightMauve = "thread_state_uninterruptible" // 182, 125, 143 + colorOrange = "thread_state_iowait" // 255, 140, 0 + colorSeafoamGreen = "thread_state_running" // 126, 200, 148 + colorVistaBlue = "thread_state_runnable" // 133, 160, 210 + colorTan = "thread_state_unknown" // 199, 155, 125 + colorIrisBlue = "background_memory_dump" // 0, 180, 180 + colorMidnightBlue = "light_memory_dump" // 0, 0, 180 + colorDeepMagenta = "detailed_memory_dump" // 180, 0, 180 + colorBlue = "vsync_highlight_color" // 0, 0, 255 + colorGrey = "generic_work" // 125, 125, 125 + colorGreen = "good" // 0, 125, 0 + colorDarkGoldenrod = "bad" // 180, 125, 0 + colorPeach = "terrible" // 180, 0, 0 + colorBlack = "black" // 0, 0, 0 + colorLightGrey = "grey" // 221, 221, 221 + colorWhite = "white" // 255, 255, 255 + colorYellow = "yellow" // 255, 255, 0 + colorOlive = "olive" // 100, 100, 0 + colorCornflowerBlue = "rail_response" // 67, 135, 253 + colorSunsetOrange = "rail_animation" // 244, 74, 63 + colorTangerine = "rail_idle" // 238, 142, 0 + colorShamrockGreen = "rail_load" // 13, 168, 97 + colorGreenishYellow = "startup" // 230, 230, 0 + colorDarkGrey = "heap_dump_stack_frame" // 128, 128, 128 + colorTawny = "heap_dump_child_node_arrow" // 204, 102, 0 + colorLemon = "cq_build_running" // 255, 255, 119 + colorLime = "cq_build_passed" // 153, 238, 102 + colorPink = "cq_build_failed" // 238, 136, 136 + colorSilver = "cq_build_abandoned" // 187, 187, 187 + colorManzGreen = "cq_build_attempt_runnig" // 222, 222, 75 + colorKellyGreen = "cq_build_attempt_passed" // 108, 218, 35 + colorAnotherGrey = "cq_build_attempt_failed" // 187, 187, 187 +) + +var colorForTask = []string{ + colorLightMauve, + colorOrange, + colorSeafoamGreen, + colorVistaBlue, + colorTan, + colorMidnightBlue, + colorIrisBlue, + colorDeepMagenta, + colorGreen, + colorDarkGoldenrod, + colorPeach, + colorOlive, + colorCornflowerBlue, + colorSunsetOrange, + colorTangerine, + colorShamrockGreen, + colorTawny, + colorLemon, + colorLime, + colorPink, + colorSilver, + colorManzGreen, + colorKellyGreen, +} + +func pickTaskColor(id uint64) string { + idx := id % uint64(len(colorForTask)) + return colorForTask[idx] +} diff --git a/platform/dbops/binaries/go/go/src/cmd/trace/trace_test.go b/platform/dbops/binaries/go/go/src/cmd/trace/trace_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d315fad47179f56771961e95ea2a635470a03655 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/trace/trace_test.go @@ -0,0 +1,270 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !js + +package main + +import ( + "context" + "internal/trace" + "internal/trace/traceviewer" + "internal/trace/traceviewer/format" + "io" + rtrace "runtime/trace" + "strings" + "sync" + "testing" + "time" +) + +// stacks is a fake stack map populated for test. +type stacks map[uint64][]*trace.Frame + +// add adds a stack with a single frame whose Fn field is +// set to the provided fname and returns a unique stack id. +func (s *stacks) add(fname string) uint64 { + if *s == nil { + *s = make(map[uint64][]*trace.Frame) + } + + id := uint64(len(*s)) + (*s)[id] = []*trace.Frame{{Fn: fname}} + return id +} + +// TestGoroutineCount tests runnable/running goroutine counts computed by generateTrace +// remain in the valid range. +// - the counts must not be negative. generateTrace will return an error. +// - the counts must not include goroutines blocked waiting on channels or in syscall. +func TestGoroutineCount(t *testing.T) { + w := trace.NewWriter() + w.Emit(trace.EvBatch, 0, 0) // start of per-P batch event [pid, timestamp] + w.Emit(trace.EvFrequency, 1) // [ticks per second] + + var s stacks + + // In this test, we assume a valid trace contains EvGoWaiting or EvGoInSyscall + // event for every blocked goroutine. + + // goroutine 10: blocked + w.Emit(trace.EvGoCreate, 1, 10, s.add("pkg.f1"), s.add("main.f1")) // [timestamp, new goroutine id, new stack id, stack id] + w.Emit(trace.EvGoWaiting, 1, 10) // [timestamp, goroutine id] + + // goroutine 20: in syscall + w.Emit(trace.EvGoCreate, 1, 20, s.add("pkg.f2"), s.add("main.f2")) + w.Emit(trace.EvGoInSyscall, 1, 20) // [timestamp, goroutine id] + + // goroutine 30: runnable + w.Emit(trace.EvGoCreate, 1, 30, s.add("pkg.f3"), s.add("main.f3")) + + w.Emit(trace.EvProcStart, 2, 0) // [timestamp, thread id] + + // goroutine 40: runnable->running->runnable + w.Emit(trace.EvGoCreate, 1, 40, s.add("pkg.f4"), s.add("main.f4")) + w.Emit(trace.EvGoStartLocal, 1, 40) // [timestamp, goroutine id] + w.Emit(trace.EvGoSched, 1, s.add("main.f4")) // [timestamp, stack] + + res, err := trace.Parse(w, "") + if err != nil { + t.Fatalf("failed to parse test trace: %v", err) + } + res.Stacks = s // use fake stacks. + + params := &traceParams{ + parsed: res, + endTime: int64(1<<63 - 1), + } + + // Use the default viewerDataTraceConsumer but replace + // consumeViewerEvent to intercept the ViewerEvents for testing. + c := traceviewer.ViewerDataTraceConsumer(io.Discard, 0, 1<<63-1) + c.ConsumeViewerEvent = func(ev *format.Event, _ bool) { + if ev.Name == "Goroutines" { + cnt := ev.Arg.(*format.GoroutineCountersArg) + if cnt.Runnable+cnt.Running > 2 { + t.Errorf("goroutine count=%+v; want no more than 2 goroutines in runnable/running state", cnt) + } + t.Logf("read %+v %+v", ev, cnt) + } + } + + // If the counts drop below 0, generateTrace will return an error. + if err := generateTrace(params, c); err != nil { + t.Fatalf("generateTrace failed: %v", err) + } +} + +func TestGoroutineFilter(t *testing.T) { + // Test that we handle state changes to selected goroutines + // caused by events on goroutines that are not selected. + + var s stacks + + w := trace.NewWriter() + w.Emit(trace.EvBatch, 0, 0) // start of per-P batch event [pid, timestamp] + w.Emit(trace.EvFrequency, 1) // [ticks per second] + + // goroutine 10: blocked + w.Emit(trace.EvGoCreate, 1, 10, s.add("pkg.f1"), s.add("main.f1")) // [timestamp, new goroutine id, new stack id, stack id] + w.Emit(trace.EvGoWaiting, 1, 10) // [timestamp, goroutine id] + + // goroutine 20: runnable->running->unblock 10 + w.Emit(trace.EvGoCreate, 1, 20, s.add("pkg.f2"), s.add("main.f2")) + w.Emit(trace.EvGoStartLocal, 1, 20) // [timestamp, goroutine id] + w.Emit(trace.EvGoUnblockLocal, 1, 10, s.add("pkg.f2")) // [timestamp, goroutine id, stack] + w.Emit(trace.EvGoEnd, 1) // [timestamp] + + // goroutine 10: runnable->running->block + w.Emit(trace.EvGoStartLocal, 1, 10) // [timestamp, goroutine id] + w.Emit(trace.EvGoBlock, 1, s.add("pkg.f3")) // [timestamp, stack] + + res, err := trace.Parse(w, "") + if err != nil { + t.Fatalf("failed to parse test trace: %v", err) + } + res.Stacks = s // use fake stacks + + params := &traceParams{ + parsed: res, + endTime: int64(1<<63 - 1), + gs: map[uint64]bool{10: true}, + } + + c := traceviewer.ViewerDataTraceConsumer(io.Discard, 0, 1<<63-1) + if err := generateTrace(params, c); err != nil { + t.Fatalf("generateTrace failed: %v", err) + } +} + +func TestPreemptedMarkAssist(t *testing.T) { + w := trace.NewWriter() + w.Emit(trace.EvBatch, 0, 0) // start of per-P batch event [pid, timestamp] + w.Emit(trace.EvFrequency, 1) // [ticks per second] + + var s stacks + // goroutine 9999: running -> mark assisting -> preempted -> assisting -> running -> block + w.Emit(trace.EvGoCreate, 1, 9999, s.add("pkg.f1"), s.add("main.f1")) // [timestamp, new goroutine id, new stack id, stack id] + w.Emit(trace.EvGoStartLocal, 1, 9999) // [timestamp, goroutine id] + w.Emit(trace.EvGCMarkAssistStart, 1, s.add("main.f1")) // [timestamp, stack] + w.Emit(trace.EvGoPreempt, 1, s.add("main.f1")) // [timestamp, stack] + w.Emit(trace.EvGoStartLocal, 1, 9999) // [timestamp, goroutine id] + w.Emit(trace.EvGCMarkAssistDone, 1) // [timestamp] + w.Emit(trace.EvGoBlock, 1, s.add("main.f2")) // [timestamp, stack] + + res, err := trace.Parse(w, "") + if err != nil { + t.Fatalf("failed to parse test trace: %v", err) + } + res.Stacks = s // use fake stacks + + params := &traceParams{ + parsed: res, + endTime: int64(1<<63 - 1), + } + + c := traceviewer.ViewerDataTraceConsumer(io.Discard, 0, 1<<63-1) + + marks := 0 + c.ConsumeViewerEvent = func(ev *format.Event, _ bool) { + if strings.Contains(ev.Name, "MARK ASSIST") { + marks++ + } + } + if err := generateTrace(params, c); err != nil { + t.Fatalf("generateTrace failed: %v", err) + } + + if marks != 2 { + t.Errorf("Got %v MARK ASSIST events, want %v", marks, 2) + } +} + +func TestFoo(t *testing.T) { + prog0 := func() { + ctx, task := rtrace.NewTask(context.Background(), "ohHappyDay") + rtrace.Log(ctx, "", "log before task ends") + task.End() + rtrace.Log(ctx, "", "log after task ends") // log after task ends + } + if err := traceProgram(t, prog0, "TestFoo"); err != nil { + t.Fatalf("failed to trace the program: %v", err) + } + res, err := parseTrace() + if err != nil { + t.Fatalf("failed to parse the trace: %v", err) + } + annotRes, _ := analyzeAnnotations() + var task *taskDesc + for _, t := range annotRes.tasks { + if t.name == "ohHappyDay" { + task = t + break + } + } + if task == nil { + t.Fatal("failed to locate expected task event") + } + + params := &traceParams{ + parsed: res, + mode: traceviewer.ModeTaskOriented, + startTime: task.firstTimestamp() - 1, + endTime: task.lastTimestamp() + 1, + tasks: []*taskDesc{task}, + } + + c := traceviewer.ViewerDataTraceConsumer(io.Discard, 0, 1<<63-1) + + var logBeforeTaskEnd, logAfterTaskEnd bool + c.ConsumeViewerEvent = func(ev *format.Event, _ bool) { + if ev.Name == "log before task ends" { + logBeforeTaskEnd = true + } + if ev.Name == "log after task ends" { + logAfterTaskEnd = true + } + } + if err := generateTrace(params, c); err != nil { + t.Fatalf("generateTrace failed: %v", err) + } + if !logBeforeTaskEnd { + t.Error("failed to find 'log before task ends'") + } + if !logAfterTaskEnd { + t.Error("failed to find 'log after task ends'") + } + +} + +func TestDirectSemaphoreHandoff(t *testing.T) { + prog0 := func() { + var mu sync.Mutex + var wg sync.WaitGroup + mu.Lock() + // This is modeled after src/sync/mutex_test.go to trigger Mutex + // starvation mode, in which the goroutine that calls Unlock hands off + // both the semaphore and its remaining time slice. See issue 36186. + for i := 0; i < 2; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for i := 0; i < 100; i++ { + mu.Lock() + time.Sleep(100 * time.Microsecond) + mu.Unlock() + } + }() + } + mu.Unlock() + wg.Wait() + } + if err := traceProgram(t, prog0, "TestDirectSemaphoreHandoff"); err != nil { + t.Fatalf("failed to trace the program: %v", err) + } + _, err := parseTrace() + if err != nil { + t.Fatalf("failed to parse the trace: %v", err) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/trace/trace_unix_test.go b/platform/dbops/binaries/go/go/src/cmd/trace/trace_unix_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e6346354276184d7ee2999972c018cfc4d80db0c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/trace/trace_unix_test.go @@ -0,0 +1,108 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris + +package main + +import ( + "bytes" + "internal/goexperiment" + traceparser "internal/trace" + "internal/trace/traceviewer" + "internal/trace/traceviewer/format" + "io" + "runtime" + "runtime/trace" + "sync" + "syscall" + "testing" + "time" +) + +// TestGoroutineInSyscall tests threads for timer goroutines +// that preexisted when the tracing started were not counted +// as threads in syscall. See golang.org/issues/22574. +func TestGoroutineInSyscall(t *testing.T) { + if goexperiment.ExecTracer2 { + t.Skip("skipping because this test is obsolete and incompatible with the new tracer") + } + // Start one goroutine blocked in syscall. + // + // TODO: syscall.Pipe used to cause the goroutine to + // remain blocked in syscall is not portable. Replace + // it with a more portable way so this test can run + // on non-unix architecture e.g. Windows. + var p [2]int + if err := syscall.Pipe(p[:]); err != nil { + t.Fatalf("failed to create pipe: %v", err) + } + + var wg sync.WaitGroup + defer func() { + syscall.Write(p[1], []byte("a")) + wg.Wait() + + syscall.Close(p[0]) + syscall.Close(p[1]) + }() + wg.Add(1) + go func() { + var tmp [1]byte + syscall.Read(p[0], tmp[:]) + wg.Done() + }() + + // Start multiple timer goroutines. + allTimers := make([]*time.Timer, 2*runtime.GOMAXPROCS(0)) + defer func() { + for _, timer := range allTimers { + timer.Stop() + } + }() + + var timerSetup sync.WaitGroup + for i := range allTimers { + timerSetup.Add(1) + go func(i int) { + defer timerSetup.Done() + allTimers[i] = time.AfterFunc(time.Hour, nil) + }(i) + } + timerSetup.Wait() + + // Collect and parse trace. + buf := new(bytes.Buffer) + if err := trace.Start(buf); err != nil { + t.Fatalf("failed to start tracing: %v", err) + } + trace.Stop() + + res, err := traceparser.Parse(buf, "") + if err == traceparser.ErrTimeOrder { + t.Skipf("skipping due to golang.org/issue/16755 (timestamps are unreliable): %v", err) + } else if err != nil { + t.Fatalf("failed to parse trace: %v", err) + } + + // Check only one thread for the pipe read goroutine is + // considered in-syscall. + c := traceviewer.ViewerDataTraceConsumer(io.Discard, 0, 1<<63-1) + c.ConsumeViewerEvent = func(ev *format.Event, _ bool) { + if ev.Name == "Threads" { + arg := ev.Arg.(*format.ThreadCountersArg) + if arg.InSyscall > 1 { + t.Errorf("%d threads in syscall at time %v; want less than 1 thread in syscall", arg.InSyscall, ev.Time) + } + } + } + + param := &traceParams{ + parsed: res, + endTime: int64(1<<63 - 1), + } + if err := generateTrace(param, c); err != nil { + t.Fatalf("failed to generate ViewerData: %v", err) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vendor/modules.txt b/platform/dbops/binaries/go/go/src/cmd/vendor/modules.txt new file mode 100644 index 0000000000000000000000000000000000000000..d2caf1ffb0a7bc553c12677e3e1df7eced38e7fe --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vendor/modules.txt @@ -0,0 +1,99 @@ +# github.com/google/pprof v0.0.0-20230811205829-9131a7e9cc17 +## explicit; go 1.19 +github.com/google/pprof/driver +github.com/google/pprof/internal/binutils +github.com/google/pprof/internal/driver +github.com/google/pprof/internal/elfexec +github.com/google/pprof/internal/graph +github.com/google/pprof/internal/measurement +github.com/google/pprof/internal/plugin +github.com/google/pprof/internal/report +github.com/google/pprof/internal/symbolizer +github.com/google/pprof/internal/symbolz +github.com/google/pprof/internal/transport +github.com/google/pprof/profile +github.com/google/pprof/third_party/d3flamegraph +github.com/google/pprof/third_party/svgpan +# github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab +## explicit; go 1.13 +github.com/ianlancetaylor/demangle +# golang.org/x/arch v0.6.0 +## explicit; go 1.18 +golang.org/x/arch/arm/armasm +golang.org/x/arch/arm64/arm64asm +golang.org/x/arch/ppc64/ppc64asm +golang.org/x/arch/x86/x86asm +# golang.org/x/mod v0.14.0 +## explicit; go 1.18 +golang.org/x/mod/internal/lazyregexp +golang.org/x/mod/modfile +golang.org/x/mod/module +golang.org/x/mod/semver +golang.org/x/mod/sumdb +golang.org/x/mod/sumdb/dirhash +golang.org/x/mod/sumdb/note +golang.org/x/mod/sumdb/tlog +golang.org/x/mod/zip +# golang.org/x/sync v0.5.0 +## explicit; go 1.18 +golang.org/x/sync/semaphore +# golang.org/x/sys v0.15.0 +## explicit; go 1.18 +golang.org/x/sys/plan9 +golang.org/x/sys/unix +golang.org/x/sys/windows +# golang.org/x/term v0.15.0 +## explicit; go 1.18 +golang.org/x/term +# golang.org/x/tools v0.16.2-0.20231218185909-83bceaf2424d +## explicit; go 1.18 +golang.org/x/tools/cmd/bisect +golang.org/x/tools/cover +golang.org/x/tools/go/analysis +golang.org/x/tools/go/analysis/internal/analysisflags +golang.org/x/tools/go/analysis/passes/appends +golang.org/x/tools/go/analysis/passes/asmdecl +golang.org/x/tools/go/analysis/passes/assign +golang.org/x/tools/go/analysis/passes/atomic +golang.org/x/tools/go/analysis/passes/bools +golang.org/x/tools/go/analysis/passes/buildtag +golang.org/x/tools/go/analysis/passes/cgocall +golang.org/x/tools/go/analysis/passes/composite +golang.org/x/tools/go/analysis/passes/copylock +golang.org/x/tools/go/analysis/passes/ctrlflow +golang.org/x/tools/go/analysis/passes/defers +golang.org/x/tools/go/analysis/passes/directive +golang.org/x/tools/go/analysis/passes/errorsas +golang.org/x/tools/go/analysis/passes/framepointer +golang.org/x/tools/go/analysis/passes/httpresponse +golang.org/x/tools/go/analysis/passes/ifaceassert +golang.org/x/tools/go/analysis/passes/inspect +golang.org/x/tools/go/analysis/passes/internal/analysisutil +golang.org/x/tools/go/analysis/passes/loopclosure +golang.org/x/tools/go/analysis/passes/lostcancel +golang.org/x/tools/go/analysis/passes/nilfunc +golang.org/x/tools/go/analysis/passes/printf +golang.org/x/tools/go/analysis/passes/shift +golang.org/x/tools/go/analysis/passes/sigchanyzer +golang.org/x/tools/go/analysis/passes/slog +golang.org/x/tools/go/analysis/passes/stdmethods +golang.org/x/tools/go/analysis/passes/stringintconv +golang.org/x/tools/go/analysis/passes/structtag +golang.org/x/tools/go/analysis/passes/testinggoroutine +golang.org/x/tools/go/analysis/passes/tests +golang.org/x/tools/go/analysis/passes/timeformat +golang.org/x/tools/go/analysis/passes/unmarshal +golang.org/x/tools/go/analysis/passes/unreachable +golang.org/x/tools/go/analysis/passes/unsafeptr +golang.org/x/tools/go/analysis/passes/unusedresult +golang.org/x/tools/go/analysis/unitchecker +golang.org/x/tools/go/ast/astutil +golang.org/x/tools/go/ast/inspector +golang.org/x/tools/go/cfg +golang.org/x/tools/go/types/objectpath +golang.org/x/tools/go/types/typeutil +golang.org/x/tools/internal/analysisinternal +golang.org/x/tools/internal/bisect +golang.org/x/tools/internal/facts +golang.org/x/tools/internal/typeparams +golang.org/x/tools/internal/versions diff --git a/platform/dbops/binaries/go/go/src/cmd/vet/README b/platform/dbops/binaries/go/go/src/cmd/vet/README new file mode 100644 index 0000000000000000000000000000000000000000..5ab75494d3ee2b7a37025aedb63de765a7962ca7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vet/README @@ -0,0 +1,33 @@ +Vet is a tool that checks correctness of Go programs. It runs a suite of tests, +each tailored to check for a particular class of errors. Examples include incorrect +Printf format verbs and malformed build tags. + +Over time many checks have been added to vet's suite, but many more have been +rejected as not appropriate for the tool. The criteria applied when selecting which +checks to add are: + +Correctness: + +Vet's checks are about correctness, not style. A vet check must identify real or +potential bugs that could cause incorrect compilation or execution. A check that +only identifies stylistic points or alternative correct approaches to a situation +is not acceptable. + +Frequency: + +Vet is run every day by many programmers, often as part of every compilation or +submission. The cost in execution time is considerable, especially in aggregate, +so checks must be likely enough to find real problems that they are worth the +overhead of the added check. A new check that finds only a handful of problems +across all existing programs, even if the problem is significant, is not worth +adding to the suite everyone runs daily. + +Precision: + +Most of vet's checks are heuristic and can generate both false positives (flagging +correct programs) and false negatives (not flagging incorrect ones). The rate of +both these failures must be very small. A check that is too noisy will be ignored +by the programmer overwhelmed by the output; a check that misses too many of the +cases it's looking for will give a false sense of security. Neither is acceptable. +A vet check must be accurate enough that everything it reports is worth examining, +and complete enough to encourage real confidence. diff --git a/platform/dbops/binaries/go/go/src/cmd/vet/doc.go b/platform/dbops/binaries/go/go/src/cmd/vet/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..5b2fa3d72fd1d1bf31ba06ac36d571e29e60e8be --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vet/doc.go @@ -0,0 +1,80 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Vet examines Go source code and reports suspicious constructs, such as Printf +calls whose arguments do not align with the format string. Vet uses heuristics +that do not guarantee all reports are genuine problems, but it can find errors +not caught by the compilers. + +Vet is normally invoked through the go command. +This command vets the package in the current directory: + + go vet + +whereas this one vets the packages whose path is provided: + + go vet my/project/... + +Use "go help packages" to see other ways of specifying which packages to vet. + +Vet's exit code is non-zero for erroneous invocation of the tool or if a +problem was reported, and 0 otherwise. Note that the tool does not +check every possible problem and depends on unreliable heuristics, +so it should be used as guidance only, not as a firm indicator of +program correctness. + +To list the available checks, run "go tool vet help": + + appends check for missing values after append + asmdecl report mismatches between assembly files and Go declarations + assign check for useless assignments + atomic check for common mistakes using the sync/atomic package + bools check for common mistakes involving boolean operators + buildtag check //go:build and // +build directives + cgocall detect some violations of the cgo pointer passing rules + composites check for unkeyed composite literals + copylocks check for locks erroneously passed by value + defers report common mistakes in defer statements + directive check Go toolchain directives such as //go:debug + errorsas report passing non-pointer or non-error values to errors.As + framepointer report assembly that clobbers the frame pointer before saving it + httpresponse check for mistakes using HTTP responses + ifaceassert detect impossible interface-to-interface type assertions + loopclosure check references to loop variables from within nested functions + lostcancel check cancel func returned by context.WithCancel is called + nilfunc check for useless comparisons between functions and nil + printf check consistency of Printf format strings and arguments + shift check for shifts that equal or exceed the width of the integer + sigchanyzer check for unbuffered channel of os.Signal + slog check for invalid structured logging calls + stdmethods check signature of methods of well-known interfaces + stringintconv check for string(int) conversions + structtag check that struct field tags conform to reflect.StructTag.Get + testinggoroutine report calls to (*testing.T).Fatal from goroutines started by a test + tests check for common mistaken usages of tests and examples + timeformat check for calls of (time.Time).Format or time.Parse with 2006-02-01 + unmarshal report passing non-pointer or non-interface values to unmarshal + unreachable check for unreachable code + unsafeptr check for invalid conversions of uintptr to unsafe.Pointer + unusedresult check for unused results of calls to some functions + +For details and flags of a particular check, such as printf, run "go tool vet help printf". + +By default, all checks are performed. +If any flags are explicitly set to true, only those tests are run. +Conversely, if any flag is explicitly set to false, only those tests are disabled. +Thus -printf=true runs the printf check, +and -printf=false runs all checks except the printf check. + +For information on writing a new check, see golang.org/x/tools/go/analysis. + +Core flags: + + -c=N + display offending line plus N lines of surrounding context + -json + emit analysis diagnostics (and errors) in JSON format +*/ +package main diff --git a/platform/dbops/binaries/go/go/src/cmd/vet/main.go b/platform/dbops/binaries/go/go/src/cmd/vet/main.go new file mode 100644 index 0000000000000000000000000000000000000000..c5197284b56165159ffb942e35cf0ffcf0add9ce --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vet/main.go @@ -0,0 +1,83 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "cmd/internal/objabi" + + "golang.org/x/tools/go/analysis/unitchecker" + + "golang.org/x/tools/go/analysis/passes/appends" + "golang.org/x/tools/go/analysis/passes/asmdecl" + "golang.org/x/tools/go/analysis/passes/assign" + "golang.org/x/tools/go/analysis/passes/atomic" + "golang.org/x/tools/go/analysis/passes/bools" + "golang.org/x/tools/go/analysis/passes/buildtag" + "golang.org/x/tools/go/analysis/passes/cgocall" + "golang.org/x/tools/go/analysis/passes/composite" + "golang.org/x/tools/go/analysis/passes/copylock" + "golang.org/x/tools/go/analysis/passes/defers" + "golang.org/x/tools/go/analysis/passes/directive" + "golang.org/x/tools/go/analysis/passes/errorsas" + "golang.org/x/tools/go/analysis/passes/framepointer" + "golang.org/x/tools/go/analysis/passes/httpresponse" + "golang.org/x/tools/go/analysis/passes/ifaceassert" + "golang.org/x/tools/go/analysis/passes/loopclosure" + "golang.org/x/tools/go/analysis/passes/lostcancel" + "golang.org/x/tools/go/analysis/passes/nilfunc" + "golang.org/x/tools/go/analysis/passes/printf" + "golang.org/x/tools/go/analysis/passes/shift" + "golang.org/x/tools/go/analysis/passes/sigchanyzer" + "golang.org/x/tools/go/analysis/passes/slog" + "golang.org/x/tools/go/analysis/passes/stdmethods" + "golang.org/x/tools/go/analysis/passes/stringintconv" + "golang.org/x/tools/go/analysis/passes/structtag" + "golang.org/x/tools/go/analysis/passes/testinggoroutine" + "golang.org/x/tools/go/analysis/passes/tests" + "golang.org/x/tools/go/analysis/passes/timeformat" + "golang.org/x/tools/go/analysis/passes/unmarshal" + "golang.org/x/tools/go/analysis/passes/unreachable" + "golang.org/x/tools/go/analysis/passes/unsafeptr" + "golang.org/x/tools/go/analysis/passes/unusedresult" +) + +func main() { + objabi.AddVersionFlag() + + unitchecker.Main( + appends.Analyzer, + asmdecl.Analyzer, + assign.Analyzer, + atomic.Analyzer, + bools.Analyzer, + buildtag.Analyzer, + cgocall.Analyzer, + composite.Analyzer, + copylock.Analyzer, + defers.Analyzer, + directive.Analyzer, + errorsas.Analyzer, + framepointer.Analyzer, + httpresponse.Analyzer, + ifaceassert.Analyzer, + loopclosure.Analyzer, + lostcancel.Analyzer, + nilfunc.Analyzer, + printf.Analyzer, + shift.Analyzer, + sigchanyzer.Analyzer, + slog.Analyzer, + stdmethods.Analyzer, + stringintconv.Analyzer, + structtag.Analyzer, + tests.Analyzer, + testinggoroutine.Analyzer, + timeformat.Analyzer, + unmarshal.Analyzer, + unreachable.Analyzer, + unsafeptr.Analyzer, + unusedresult.Analyzer, + ) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/vet/vet_test.go b/platform/dbops/binaries/go/go/src/cmd/vet/vet_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4bb0de00b3b2b21245d9bf4e3f3c7f36aa2f3b6d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/vet/vet_test.go @@ -0,0 +1,400 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "errors" + "fmt" + "internal/testenv" + "log" + "os" + "os/exec" + "path" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "testing" +) + +// TestMain executes the test binary as the vet command if +// GO_VETTEST_IS_VET is set, and runs the tests otherwise. +func TestMain(m *testing.M) { + if os.Getenv("GO_VETTEST_IS_VET") != "" { + main() + os.Exit(0) + } + + os.Setenv("GO_VETTEST_IS_VET", "1") // Set for subprocesses to inherit. + os.Exit(m.Run()) +} + +// vetPath returns the path to the "vet" binary to run. +func vetPath(t testing.TB) string { + t.Helper() + testenv.MustHaveExec(t) + + vetPathOnce.Do(func() { + vetExePath, vetPathErr = os.Executable() + }) + if vetPathErr != nil { + t.Fatal(vetPathErr) + } + return vetExePath +} + +var ( + vetPathOnce sync.Once + vetExePath string + vetPathErr error +) + +func vetCmd(t *testing.T, arg, pkg string) *exec.Cmd { + cmd := testenv.Command(t, testenv.GoToolPath(t), "vet", "-vettool="+vetPath(t), arg, path.Join("cmd/vet/testdata", pkg)) + cmd.Env = os.Environ() + return cmd +} + +func TestVet(t *testing.T) { + t.Parallel() + for _, pkg := range []string{ + "appends", + "asm", + "assign", + "atomic", + "bool", + "buildtag", + "cgo", + "composite", + "copylock", + "deadcode", + "directive", + "httpresponse", + "lostcancel", + "method", + "nilfunc", + "print", + "rangeloop", + "shift", + "slog", + "structtag", + "testingpkg", + // "testtag" has its own test + "unmarshal", + "unsafeptr", + "unused", + } { + pkg := pkg + t.Run(pkg, func(t *testing.T) { + t.Parallel() + + // Skip cgo test on platforms without cgo. + if pkg == "cgo" && !cgoEnabled(t) { + return + } + + cmd := vetCmd(t, "-printfuncs=Warn,Warnf", pkg) + + // The asm test assumes amd64. + if pkg == "asm" { + cmd.Env = append(cmd.Env, "GOOS=linux", "GOARCH=amd64") + } + + dir := filepath.Join("testdata", pkg) + gos, err := filepath.Glob(filepath.Join(dir, "*.go")) + if err != nil { + t.Fatal(err) + } + asms, err := filepath.Glob(filepath.Join(dir, "*.s")) + if err != nil { + t.Fatal(err) + } + var files []string + files = append(files, gos...) + files = append(files, asms...) + + errchk(cmd, files, t) + }) + } +} + +func cgoEnabled(t *testing.T) bool { + // Don't trust build.Default.CgoEnabled as it is false for + // cross-builds unless CGO_ENABLED is explicitly specified. + // That's fine for the builders, but causes commands like + // 'GOARCH=386 go test .' to fail. + // Instead, we ask the go command. + cmd := testenv.Command(t, testenv.GoToolPath(t), "list", "-f", "{{context.CgoEnabled}}") + out, _ := cmd.CombinedOutput() + return string(out) == "true\n" +} + +func errchk(c *exec.Cmd, files []string, t *testing.T) { + output, err := c.CombinedOutput() + if _, ok := err.(*exec.ExitError); !ok { + t.Logf("vet output:\n%s", output) + t.Fatal(err) + } + fullshort := make([]string, 0, len(files)*2) + for _, f := range files { + fullshort = append(fullshort, f, filepath.Base(f)) + } + err = errorCheck(string(output), false, fullshort...) + if err != nil { + t.Errorf("error check failed: %s", err) + } +} + +// TestTags verifies that the -tags argument controls which files to check. +func TestTags(t *testing.T) { + t.Parallel() + for tag, wantFile := range map[string]int{ + "testtag": 1, // file1 + "x testtag y": 1, + "othertag": 2, + } { + tag, wantFile := tag, wantFile + t.Run(tag, func(t *testing.T) { + t.Parallel() + t.Logf("-tags=%s", tag) + cmd := vetCmd(t, "-tags="+tag, "tagtest") + output, err := cmd.CombinedOutput() + + want := fmt.Sprintf("file%d.go", wantFile) + dontwant := fmt.Sprintf("file%d.go", 3-wantFile) + + // file1 has testtag and file2 has !testtag. + if !bytes.Contains(output, []byte(filepath.Join("tagtest", want))) { + t.Errorf("%s: %s was excluded, should be included", tag, want) + } + if bytes.Contains(output, []byte(filepath.Join("tagtest", dontwant))) { + t.Errorf("%s: %s was included, should be excluded", tag, dontwant) + } + if t.Failed() { + t.Logf("err=%s, output=<<%s>>", err, output) + } + }) + } +} + +// All declarations below were adapted from test/run.go. + +// errorCheck matches errors in outStr against comments in source files. +// For each line of the source files which should generate an error, +// there should be a comment of the form // ERROR "regexp". +// If outStr has an error for a line which has no such comment, +// this function will report an error. +// Likewise if outStr does not have an error for a line which has a comment, +// or if the error message does not match the . +// The syntax is Perl but it's best to stick to egrep. +// +// Sources files are supplied as fullshort slice. +// It consists of pairs: full path to source file and its base name. +func errorCheck(outStr string, wantAuto bool, fullshort ...string) (err error) { + var errs []error + out := splitOutput(outStr, wantAuto) + // Cut directory name. + for i := range out { + for j := 0; j < len(fullshort); j += 2 { + full, short := fullshort[j], fullshort[j+1] + out[i] = strings.ReplaceAll(out[i], full, short) + } + } + + var want []wantedError + for j := 0; j < len(fullshort); j += 2 { + full, short := fullshort[j], fullshort[j+1] + want = append(want, wantedErrors(full, short)...) + } + for _, we := range want { + var errmsgs []string + if we.auto { + errmsgs, out = partitionStrings("", out) + } else { + errmsgs, out = partitionStrings(we.prefix, out) + } + if len(errmsgs) == 0 { + errs = append(errs, fmt.Errorf("%s:%d: missing error %q", we.file, we.lineNum, we.reStr)) + continue + } + matched := false + n := len(out) + for _, errmsg := range errmsgs { + // Assume errmsg says "file:line: foo". + // Cut leading "file:line: " to avoid accidental matching of file name instead of message. + text := errmsg + if _, suffix, ok := strings.Cut(text, " "); ok { + text = suffix + } + if we.re.MatchString(text) { + matched = true + } else { + out = append(out, errmsg) + } + } + if !matched { + errs = append(errs, fmt.Errorf("%s:%d: no match for %#q in:\n\t%s", we.file, we.lineNum, we.reStr, strings.Join(out[n:], "\n\t"))) + continue + } + } + + if len(out) > 0 { + errs = append(errs, fmt.Errorf("Unmatched Errors:")) + for _, errLine := range out { + errs = append(errs, fmt.Errorf("%s", errLine)) + } + } + + if len(errs) == 0 { + return nil + } + if len(errs) == 1 { + return errs[0] + } + var buf strings.Builder + fmt.Fprintf(&buf, "\n") + for _, err := range errs { + fmt.Fprintf(&buf, "%s\n", err.Error()) + } + return errors.New(buf.String()) +} + +func splitOutput(out string, wantAuto bool) []string { + // gc error messages continue onto additional lines with leading tabs. + // Split the output at the beginning of each line that doesn't begin with a tab. + // lines are impossible to match so those are filtered out. + var res []string + for _, line := range strings.Split(out, "\n") { + line = strings.TrimSuffix(line, "\r") // normalize Windows output + if strings.HasPrefix(line, "\t") { + res[len(res)-1] += "\n" + line + } else if strings.HasPrefix(line, "go tool") || strings.HasPrefix(line, "#") || !wantAuto && strings.HasPrefix(line, "") { + continue + } else if strings.TrimSpace(line) != "" { + res = append(res, line) + } + } + return res +} + +// matchPrefix reports whether s starts with file name prefix followed by a :, +// and possibly preceded by a directory name. +func matchPrefix(s, prefix string) bool { + i := strings.Index(s, ":") + if i < 0 { + return false + } + j := strings.LastIndex(s[:i], "/") + s = s[j+1:] + if len(s) <= len(prefix) || s[:len(prefix)] != prefix { + return false + } + if s[len(prefix)] == ':' { + return true + } + return false +} + +func partitionStrings(prefix string, strs []string) (matched, unmatched []string) { + for _, s := range strs { + if matchPrefix(s, prefix) { + matched = append(matched, s) + } else { + unmatched = append(unmatched, s) + } + } + return +} + +type wantedError struct { + reStr string + re *regexp.Regexp + lineNum int + auto bool // match line + file string + prefix string +} + +var ( + errRx = regexp.MustCompile(`// (?:GC_)?ERROR(NEXT)? (.*)`) + errAutoRx = regexp.MustCompile(`// (?:GC_)?ERRORAUTO(NEXT)? (.*)`) + errQuotesRx = regexp.MustCompile(`"([^"]*)"`) + lineRx = regexp.MustCompile(`LINE(([+-])(\d+))?`) +) + +// wantedErrors parses expected errors from comments in a file. +func wantedErrors(file, short string) (errs []wantedError) { + cache := make(map[string]*regexp.Regexp) + + src, err := os.ReadFile(file) + if err != nil { + log.Fatal(err) + } + for i, line := range strings.Split(string(src), "\n") { + lineNum := i + 1 + if strings.Contains(line, "////") { + // double comment disables ERROR + continue + } + var auto bool + m := errAutoRx.FindStringSubmatch(line) + if m != nil { + auto = true + } else { + m = errRx.FindStringSubmatch(line) + } + if m == nil { + continue + } + if m[1] == "NEXT" { + lineNum++ + } + all := m[2] + mm := errQuotesRx.FindAllStringSubmatch(all, -1) + if mm == nil { + log.Fatalf("%s:%d: invalid errchk line: %s", file, lineNum, line) + } + for _, m := range mm { + replacedOnce := false + rx := lineRx.ReplaceAllStringFunc(m[1], func(m string) string { + if replacedOnce { + return m + } + replacedOnce = true + n := lineNum + if strings.HasPrefix(m, "LINE+") { + delta, _ := strconv.Atoi(m[5:]) + n += delta + } else if strings.HasPrefix(m, "LINE-") { + delta, _ := strconv.Atoi(m[5:]) + n -= delta + } + return fmt.Sprintf("%s:%d", short, n) + }) + re := cache[rx] + if re == nil { + var err error + re, err = regexp.Compile(rx) + if err != nil { + log.Fatalf("%s:%d: invalid regexp \"%#q\" in ERROR line: %v", file, lineNum, rx, err) + } + cache[rx] = re + } + prefix := fmt.Sprintf("%s:%d", short, lineNum) + errs = append(errs, wantedError{ + reStr: rx, + re: re, + prefix: prefix, + auto: auto, + lineNum: lineNum, + file: short, + }) + } + } + + return +}